mirascope 2.0.0a2__py3-none-any.whl → 2.0.0a3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (204) hide show
  1. mirascope/__init__.py +2 -2
  2. mirascope/api/__init__.py +6 -0
  3. mirascope/api/_generated/README.md +207 -0
  4. mirascope/api/_generated/__init__.py +85 -0
  5. mirascope/api/_generated/client.py +155 -0
  6. mirascope/api/_generated/core/__init__.py +52 -0
  7. mirascope/api/_generated/core/api_error.py +23 -0
  8. mirascope/api/_generated/core/client_wrapper.py +58 -0
  9. mirascope/api/_generated/core/datetime_utils.py +30 -0
  10. mirascope/api/_generated/core/file.py +70 -0
  11. mirascope/api/_generated/core/force_multipart.py +16 -0
  12. mirascope/api/_generated/core/http_client.py +619 -0
  13. mirascope/api/_generated/core/http_response.py +55 -0
  14. mirascope/api/_generated/core/jsonable_encoder.py +102 -0
  15. mirascope/api/_generated/core/pydantic_utilities.py +310 -0
  16. mirascope/api/_generated/core/query_encoder.py +60 -0
  17. mirascope/api/_generated/core/remove_none_from_dict.py +11 -0
  18. mirascope/api/_generated/core/request_options.py +35 -0
  19. mirascope/api/_generated/core/serialization.py +282 -0
  20. mirascope/api/_generated/docs/__init__.py +4 -0
  21. mirascope/api/_generated/docs/client.py +95 -0
  22. mirascope/api/_generated/docs/raw_client.py +132 -0
  23. mirascope/api/_generated/environment.py +9 -0
  24. mirascope/api/_generated/errors/__init__.py +7 -0
  25. mirascope/api/_generated/errors/bad_request_error.py +15 -0
  26. mirascope/api/_generated/health/__init__.py +7 -0
  27. mirascope/api/_generated/health/client.py +96 -0
  28. mirascope/api/_generated/health/raw_client.py +129 -0
  29. mirascope/api/_generated/health/types/__init__.py +8 -0
  30. mirascope/api/_generated/health/types/health_check_response.py +24 -0
  31. mirascope/api/_generated/health/types/health_check_response_status.py +5 -0
  32. mirascope/api/_generated/reference.md +167 -0
  33. mirascope/api/_generated/traces/__init__.py +55 -0
  34. mirascope/api/_generated/traces/client.py +162 -0
  35. mirascope/api/_generated/traces/raw_client.py +168 -0
  36. mirascope/api/_generated/traces/types/__init__.py +95 -0
  37. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item.py +36 -0
  38. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_resource.py +31 -0
  39. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_resource_attributes_item.py +25 -0
  40. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_resource_attributes_item_value.py +54 -0
  41. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_resource_attributes_item_value_array_value.py +23 -0
  42. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_resource_attributes_item_value_kvlist_value.py +28 -0
  43. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_resource_attributes_item_value_kvlist_value_values_item.py +24 -0
  44. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item.py +35 -0
  45. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_scope.py +35 -0
  46. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_scope_attributes_item.py +27 -0
  47. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_scope_attributes_item_value.py +54 -0
  48. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_scope_attributes_item_value_array_value.py +23 -0
  49. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_scope_attributes_item_value_kvlist_value.py +28 -0
  50. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_scope_attributes_item_value_kvlist_value_values_item.py +24 -0
  51. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item.py +60 -0
  52. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item_attributes_item.py +29 -0
  53. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item_attributes_item_value.py +54 -0
  54. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item_attributes_item_value_array_value.py +23 -0
  55. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item_attributes_item_value_kvlist_value.py +28 -0
  56. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item_attributes_item_value_kvlist_value_values_item.py +24 -0
  57. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item_status.py +24 -0
  58. mirascope/api/_generated/traces/types/traces_create_response.py +27 -0
  59. mirascope/api/_generated/traces/types/traces_create_response_partial_success.py +28 -0
  60. mirascope/api/_generated/types/__init__.py +21 -0
  61. mirascope/api/_generated/types/http_api_decode_error.py +31 -0
  62. mirascope/api/_generated/types/http_api_decode_error_tag.py +5 -0
  63. mirascope/api/_generated/types/issue.py +44 -0
  64. mirascope/api/_generated/types/issue_tag.py +17 -0
  65. mirascope/api/_generated/types/property_key.py +7 -0
  66. mirascope/api/_generated/types/property_key_tag.py +29 -0
  67. mirascope/api/_generated/types/property_key_tag_tag.py +5 -0
  68. mirascope/api/client.py +255 -0
  69. mirascope/api/settings.py +81 -0
  70. mirascope/llm/__init__.py +41 -11
  71. mirascope/llm/calls/calls.py +81 -57
  72. mirascope/llm/calls/decorator.py +121 -115
  73. mirascope/llm/content/__init__.py +3 -2
  74. mirascope/llm/context/_utils.py +19 -6
  75. mirascope/llm/exceptions.py +30 -16
  76. mirascope/llm/formatting/_utils.py +9 -5
  77. mirascope/llm/formatting/format.py +2 -2
  78. mirascope/llm/formatting/from_call_args.py +2 -2
  79. mirascope/llm/messages/message.py +13 -5
  80. mirascope/llm/models/__init__.py +2 -2
  81. mirascope/llm/models/models.py +189 -81
  82. mirascope/llm/prompts/__init__.py +13 -12
  83. mirascope/llm/prompts/_utils.py +27 -24
  84. mirascope/llm/prompts/decorator.py +133 -204
  85. mirascope/llm/prompts/prompts.py +424 -0
  86. mirascope/llm/prompts/protocols.py +25 -59
  87. mirascope/llm/providers/__init__.py +38 -0
  88. mirascope/llm/{clients → providers}/_missing_import_stubs.py +8 -6
  89. mirascope/llm/providers/anthropic/__init__.py +24 -0
  90. mirascope/llm/{clients → providers}/anthropic/_utils/decode.py +5 -4
  91. mirascope/llm/{clients → providers}/anthropic/_utils/encode.py +31 -10
  92. mirascope/llm/providers/anthropic/model_id.py +40 -0
  93. mirascope/llm/{clients/anthropic/clients.py → providers/anthropic/provider.py} +33 -418
  94. mirascope/llm/{clients → providers}/base/__init__.py +3 -3
  95. mirascope/llm/{clients → providers}/base/_utils.py +10 -7
  96. mirascope/llm/{clients/base/client.py → providers/base/base_provider.py} +255 -126
  97. mirascope/llm/providers/google/__init__.py +21 -0
  98. mirascope/llm/{clients → providers}/google/_utils/decode.py +6 -4
  99. mirascope/llm/{clients → providers}/google/_utils/encode.py +30 -24
  100. mirascope/llm/providers/google/model_id.py +28 -0
  101. mirascope/llm/providers/google/provider.py +438 -0
  102. mirascope/llm/providers/load_provider.py +48 -0
  103. mirascope/llm/providers/mlx/__init__.py +24 -0
  104. mirascope/llm/providers/mlx/_utils.py +107 -0
  105. mirascope/llm/providers/mlx/encoding/__init__.py +8 -0
  106. mirascope/llm/providers/mlx/encoding/base.py +69 -0
  107. mirascope/llm/providers/mlx/encoding/transformers.py +131 -0
  108. mirascope/llm/providers/mlx/mlx.py +237 -0
  109. mirascope/llm/providers/mlx/model_id.py +17 -0
  110. mirascope/llm/providers/mlx/provider.py +411 -0
  111. mirascope/llm/providers/model_id.py +16 -0
  112. mirascope/llm/providers/openai/__init__.py +6 -0
  113. mirascope/llm/providers/openai/completions/__init__.py +20 -0
  114. mirascope/llm/{clients/openai/responses → providers/openai/completions}/_utils/__init__.py +2 -0
  115. mirascope/llm/{clients → providers}/openai/completions/_utils/decode.py +5 -3
  116. mirascope/llm/{clients → providers}/openai/completions/_utils/encode.py +33 -23
  117. mirascope/llm/providers/openai/completions/provider.py +456 -0
  118. mirascope/llm/providers/openai/model_id.py +31 -0
  119. mirascope/llm/providers/openai/model_info.py +246 -0
  120. mirascope/llm/providers/openai/provider.py +386 -0
  121. mirascope/llm/providers/openai/responses/__init__.py +21 -0
  122. mirascope/llm/{clients → providers}/openai/responses/_utils/decode.py +5 -3
  123. mirascope/llm/{clients → providers}/openai/responses/_utils/encode.py +28 -17
  124. mirascope/llm/providers/openai/responses/provider.py +470 -0
  125. mirascope/llm/{clients → providers}/openai/shared/_utils.py +7 -3
  126. mirascope/llm/providers/provider_id.py +13 -0
  127. mirascope/llm/providers/provider_registry.py +167 -0
  128. mirascope/llm/responses/base_response.py +10 -5
  129. mirascope/llm/responses/base_stream_response.py +10 -5
  130. mirascope/llm/responses/response.py +24 -13
  131. mirascope/llm/responses/root_response.py +7 -12
  132. mirascope/llm/responses/stream_response.py +35 -23
  133. mirascope/llm/tools/__init__.py +9 -2
  134. mirascope/llm/tools/_utils.py +12 -3
  135. mirascope/llm/tools/protocols.py +4 -4
  136. mirascope/llm/tools/tool_schema.py +44 -9
  137. mirascope/llm/tools/tools.py +10 -9
  138. mirascope/ops/__init__.py +156 -0
  139. mirascope/ops/_internal/__init__.py +5 -0
  140. mirascope/ops/_internal/closure.py +1118 -0
  141. mirascope/ops/_internal/configuration.py +126 -0
  142. mirascope/ops/_internal/context.py +76 -0
  143. mirascope/ops/_internal/exporters/__init__.py +26 -0
  144. mirascope/ops/_internal/exporters/exporters.py +342 -0
  145. mirascope/ops/_internal/exporters/processors.py +104 -0
  146. mirascope/ops/_internal/exporters/types.py +165 -0
  147. mirascope/ops/_internal/exporters/utils.py +29 -0
  148. mirascope/ops/_internal/instrumentation/__init__.py +8 -0
  149. mirascope/ops/_internal/instrumentation/llm/__init__.py +8 -0
  150. mirascope/ops/_internal/instrumentation/llm/encode.py +238 -0
  151. mirascope/ops/_internal/instrumentation/llm/gen_ai_types/__init__.py +38 -0
  152. mirascope/ops/_internal/instrumentation/llm/gen_ai_types/gen_ai_input_messages.py +31 -0
  153. mirascope/ops/_internal/instrumentation/llm/gen_ai_types/gen_ai_output_messages.py +38 -0
  154. mirascope/ops/_internal/instrumentation/llm/gen_ai_types/gen_ai_system_instructions.py +18 -0
  155. mirascope/ops/_internal/instrumentation/llm/gen_ai_types/shared.py +100 -0
  156. mirascope/ops/_internal/instrumentation/llm/llm.py +1288 -0
  157. mirascope/ops/_internal/propagation.py +198 -0
  158. mirascope/ops/_internal/protocols.py +51 -0
  159. mirascope/ops/_internal/session.py +139 -0
  160. mirascope/ops/_internal/spans.py +232 -0
  161. mirascope/ops/_internal/traced_calls.py +371 -0
  162. mirascope/ops/_internal/traced_functions.py +394 -0
  163. mirascope/ops/_internal/tracing.py +276 -0
  164. mirascope/ops/_internal/types.py +13 -0
  165. mirascope/ops/_internal/utils.py +75 -0
  166. mirascope/ops/_internal/versioned_calls.py +512 -0
  167. mirascope/ops/_internal/versioned_functions.py +346 -0
  168. mirascope/ops/_internal/versioning.py +303 -0
  169. mirascope/ops/exceptions.py +21 -0
  170. {mirascope-2.0.0a2.dist-info → mirascope-2.0.0a3.dist-info}/METADATA +76 -1
  171. mirascope-2.0.0a3.dist-info/RECORD +206 -0
  172. {mirascope-2.0.0a2.dist-info → mirascope-2.0.0a3.dist-info}/WHEEL +1 -1
  173. mirascope/graphs/__init__.py +0 -22
  174. mirascope/graphs/finite_state_machine.py +0 -625
  175. mirascope/llm/agents/__init__.py +0 -15
  176. mirascope/llm/agents/agent.py +0 -97
  177. mirascope/llm/agents/agent_template.py +0 -45
  178. mirascope/llm/agents/decorator.py +0 -176
  179. mirascope/llm/calls/base_call.py +0 -33
  180. mirascope/llm/clients/__init__.py +0 -34
  181. mirascope/llm/clients/anthropic/__init__.py +0 -25
  182. mirascope/llm/clients/anthropic/model_ids.py +0 -8
  183. mirascope/llm/clients/google/__init__.py +0 -20
  184. mirascope/llm/clients/google/clients.py +0 -853
  185. mirascope/llm/clients/google/model_ids.py +0 -15
  186. mirascope/llm/clients/openai/__init__.py +0 -25
  187. mirascope/llm/clients/openai/completions/__init__.py +0 -28
  188. mirascope/llm/clients/openai/completions/_utils/model_features.py +0 -81
  189. mirascope/llm/clients/openai/completions/clients.py +0 -833
  190. mirascope/llm/clients/openai/completions/model_ids.py +0 -8
  191. mirascope/llm/clients/openai/responses/__init__.py +0 -26
  192. mirascope/llm/clients/openai/responses/_utils/model_features.py +0 -87
  193. mirascope/llm/clients/openai/responses/clients.py +0 -832
  194. mirascope/llm/clients/openai/responses/model_ids.py +0 -8
  195. mirascope/llm/clients/providers.py +0 -175
  196. mirascope-2.0.0a2.dist-info/RECORD +0 -102
  197. /mirascope/llm/{clients → providers}/anthropic/_utils/__init__.py +0 -0
  198. /mirascope/llm/{clients → providers}/base/kwargs.py +0 -0
  199. /mirascope/llm/{clients → providers}/base/params.py +0 -0
  200. /mirascope/llm/{clients → providers}/google/_utils/__init__.py +0 -0
  201. /mirascope/llm/{clients → providers}/google/message.py +0 -0
  202. /mirascope/llm/{clients/openai/completions → providers/openai/responses}/_utils/__init__.py +0 -0
  203. /mirascope/llm/{clients → providers}/openai/shared/__init__.py +0 -0
  204. {mirascope-2.0.0a2.dist-info → mirascope-2.0.0a3.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,255 @@
1
+ """Client interfaces and factory for Mirascope SDK.
2
+
3
+ This module provides interfaces and factory functions for creating Mirascope clients
4
+ that support both the Fern-generated API client and OpenTelemetry exporters.
5
+ """
6
+
7
+ from __future__ import annotations
8
+
9
+ import asyncio
10
+ import logging
11
+ import weakref
12
+ from collections.abc import Callable
13
+ from functools import lru_cache
14
+ from typing import ParamSpec, TypeAlias, TypeVar
15
+
16
+ import httpx
17
+
18
+ from ._generated.client import (
19
+ AsyncMirascope as _BaseAsyncMirascope,
20
+ Mirascope as _BaseMirascope,
21
+ )
22
+ from .settings import get_settings
23
+
24
+ ApiKey: TypeAlias = str
25
+ BaseUrl: TypeAlias = str
26
+ Token: TypeAlias = str | Callable[[], str] | None
27
+ _P = ParamSpec("_P")
28
+ _R = TypeVar("_R")
29
+
30
+ logger = logging.getLogger(__name__)
31
+
32
+
33
+ class Mirascope(_BaseMirascope):
34
+ """Enhanced Mirascope client with error handling.
35
+
36
+ This client automatically handles API errors and provides fallback behavior
37
+ for non-critical failures while preserving important exceptions like NotFoundError.
38
+ """
39
+
40
+ def __init__(
41
+ self,
42
+ *,
43
+ base_url: str | None = None,
44
+ api_key: str | None = None,
45
+ token: Token = None,
46
+ timeout: float | None = None,
47
+ follow_redirects: bool | None = True,
48
+ httpx_client: httpx.Client | None = None,
49
+ ) -> None:
50
+ """Initialize the enhanced Mirascope client."""
51
+ try:
52
+ settings = get_settings()
53
+ self.api_key = api_key or settings.api_key
54
+ if not self.api_key:
55
+ raise ValueError("`Mirascope` client requires `api_key`.")
56
+
57
+ self.base_url = base_url or settings.base_url
58
+
59
+ headers = {"Authorization": f"Bearer {self.api_key}"}
60
+ if httpx_client:
61
+ if hasattr(httpx_client, "headers"):
62
+ httpx_client.headers.update(headers)
63
+ else:
64
+ httpx_client = httpx.Client(
65
+ headers=headers,
66
+ timeout=timeout or 30.0,
67
+ follow_redirects=follow_redirects
68
+ if follow_redirects is not None
69
+ else True,
70
+ )
71
+
72
+ super().__init__(
73
+ base_url=self.base_url,
74
+ timeout=timeout,
75
+ follow_redirects=follow_redirects,
76
+ httpx_client=httpx_client,
77
+ )
78
+
79
+ except Exception as e:
80
+ logger.error("Failed to initialize Mirascope client: %s", e)
81
+ raise RuntimeError(f"Client initialization failed: {e}") from e
82
+
83
+ def close(self) -> None:
84
+ """Close the underlying synchronous HTTP client."""
85
+ wrapper_client = getattr(self._client_wrapper, "httpx_client", None)
86
+ underlying_httpx_client = getattr(wrapper_client, "httpx_client", None)
87
+ if underlying_httpx_client is not None:
88
+ underlying_httpx_client.close()
89
+
90
+
91
+ class AsyncMirascope(_BaseAsyncMirascope):
92
+ """Enhanced async Mirascope client with error handling.
93
+
94
+ This client automatically handles API errors and provides fallback behavior
95
+ for non-critical failures while preserving important exceptions like NotFoundError.
96
+ """
97
+
98
+ def __init__(
99
+ self,
100
+ *,
101
+ base_url: str | None = None,
102
+ api_key: str | None = None,
103
+ token: Token = None,
104
+ timeout: float | None = None,
105
+ follow_redirects: bool | None = True,
106
+ httpx_client: httpx.AsyncClient | None = None,
107
+ ) -> None:
108
+ """Initialize the enhanced async Mirascope client."""
109
+ try:
110
+ settings = get_settings()
111
+ self.api_key = api_key or settings.api_key
112
+ if not self.api_key:
113
+ raise ValueError("`AsyncMirascope` client requires `api_key`.")
114
+
115
+ self.base_url = base_url or settings.base_url
116
+
117
+ headers = {"Authorization": f"Bearer {self.api_key}"}
118
+ if httpx_client:
119
+ if hasattr(httpx_client, "headers"):
120
+ httpx_client.headers.update(headers)
121
+ else:
122
+ httpx_client = httpx.AsyncClient(
123
+ headers=headers,
124
+ timeout=timeout or 30.0,
125
+ follow_redirects=follow_redirects
126
+ if follow_redirects is not None
127
+ else True,
128
+ )
129
+
130
+ super().__init__(
131
+ base_url=self.base_url,
132
+ timeout=timeout,
133
+ follow_redirects=follow_redirects,
134
+ httpx_client=httpx_client,
135
+ )
136
+
137
+ except Exception as e:
138
+ logger.error("Failed to initialize AsyncMirascope client: %s", e)
139
+ raise RuntimeError(f"Async client initialization failed: {e}") from e
140
+
141
+ async def aclose(self) -> None:
142
+ """Close the underlying asynchronous HTTP client."""
143
+ wrapper_client = getattr(self._client_wrapper, "httpx_client", None)
144
+ underlying_httpx_client = getattr(wrapper_client, "httpx_client", None)
145
+ if underlying_httpx_client is not None:
146
+ await underlying_httpx_client.aclose()
147
+
148
+
149
+ @lru_cache(maxsize=256)
150
+ def _sync_singleton(api_key: str | None, base_url: str | None) -> Mirascope:
151
+ """Return the process-wide synchronous client, creating one if none yet exists"""
152
+ try:
153
+ logger.debug("Creating sync client with api_key=*****, base_url=%s", base_url)
154
+ return Mirascope(api_key=api_key, base_url=base_url)
155
+ except Exception as e:
156
+ logger.error("Failed to create singleton Mirascope client: %s", e)
157
+ raise RuntimeError(f"Failed to create cached client: {e}") from e
158
+
159
+
160
+ def get_sync_client(
161
+ api_key: str | None = None,
162
+ base_url: str | None = None,
163
+ ) -> Mirascope:
164
+ """Get or create a cached synchronous client.
165
+
166
+ Args:
167
+ api_key: API key for authentication
168
+ base_url: Base URL for the API
169
+
170
+ Returns:
171
+ Cached Mirascope client instance
172
+ """
173
+ settings = get_settings()
174
+
175
+ return _sync_singleton(
176
+ api_key or settings.api_key,
177
+ base_url or settings.base_url,
178
+ )
179
+
180
+
181
+ @lru_cache(maxsize=256)
182
+ def _async_singleton(
183
+ _loop_id_for_cache: int, api_key: str | None, base_url: str | None
184
+ ) -> AsyncMirascope:
185
+ """Return the loop-specific asynchronous client, creating one if none yet exists"""
186
+ try:
187
+ logger.debug("Creating async client with api_key=*****, base_url=%s", base_url)
188
+ loop = asyncio.get_running_loop()
189
+ client = AsyncMirascope(api_key=api_key, base_url=base_url)
190
+ weakref.finalize(loop, _async_singleton.cache_clear)
191
+ return client
192
+ except Exception as e:
193
+ logger.error("Failed to create singleton AsyncMirascope client: %s", e)
194
+ raise RuntimeError(f"Failed to create cached async client: {e}") from e
195
+
196
+
197
+ def get_async_client(
198
+ api_key: str | None = None,
199
+ base_url: str | None = None,
200
+ ) -> AsyncMirascope:
201
+ """Get or create a cached asynchronous client.
202
+
203
+ Args:
204
+ api_key: API key for authentication
205
+ base_url: Base URL for the API
206
+
207
+ Returns:
208
+ Cached AsyncMirascope client instance
209
+ """
210
+ try:
211
+ loop = asyncio.get_running_loop()
212
+ except RuntimeError as exc:
213
+ raise RuntimeError(
214
+ "get_async_client() must be called from within an active event loop."
215
+ ) from exc
216
+
217
+ settings = get_settings()
218
+
219
+ return _async_singleton(
220
+ id(loop),
221
+ api_key or settings.api_key,
222
+ base_url or settings.base_url,
223
+ )
224
+
225
+
226
+ def create_export_client(
227
+ *,
228
+ base_url: str | None = None,
229
+ api_key: str | None = None,
230
+ timeout: float = 30.0,
231
+ httpx_client: httpx.Client | None = None,
232
+ ) -> Mirascope:
233
+ """Create a client suitable for OpenTelemetry export.
234
+
235
+ Args:
236
+ base_url: Base URL for the API
237
+ api_key: API key for authentication
238
+ timeout: Request timeout in seconds
239
+ httpx_client: Optional custom httpx client
240
+
241
+ Returns:
242
+ Mirascope client configured for export use
243
+ """
244
+ return Mirascope(
245
+ base_url=base_url,
246
+ api_key=api_key,
247
+ timeout=timeout,
248
+ httpx_client=httpx_client,
249
+ )
250
+
251
+
252
+ def close_cached_clients() -> None:
253
+ """Close all cached client instances."""
254
+ _sync_singleton.cache_clear()
255
+ _async_singleton.cache_clear()
@@ -0,0 +1,81 @@
1
+ """Settings and configuration for Mirascope SDK."""
2
+
3
+ from __future__ import annotations
4
+
5
+ from collections.abc import Iterator
6
+ from contextlib import contextmanager
7
+ from contextvars import ContextVar
8
+ from functools import cache
9
+ from typing import Any
10
+
11
+ from pydantic import Field
12
+ from pydantic_settings import BaseSettings, SettingsConfigDict
13
+
14
+
15
+ class Settings(BaseSettings):
16
+ """Global settings for Mirascope SDK."""
17
+
18
+ base_url: str = Field(default="https://v2.mirascope.com")
19
+ api_key: str | None = None
20
+
21
+ def update(self, **kwargs: Any) -> None: # noqa: ANN401
22
+ """Update non-None fields in place."""
23
+ for k, v in kwargs.items():
24
+ if v is not None and hasattr(self, k):
25
+ setattr(self, k, v)
26
+
27
+ model_config = SettingsConfigDict(env_prefix="MIRASCOPE_")
28
+
29
+
30
+ @cache
31
+ def _default_settings() -> Settings:
32
+ return Settings()
33
+
34
+
35
+ CURRENT_SETTINGS: ContextVar[Settings | None] = ContextVar(
36
+ "CURRENT_SETTINGS", default=None
37
+ )
38
+
39
+
40
+ def get_settings() -> Settings:
41
+ """Return Settings for the current context."""
42
+ settings = CURRENT_SETTINGS.get()
43
+ if settings is None:
44
+ settings = _default_settings()
45
+ CURRENT_SETTINGS.set(settings)
46
+ return settings
47
+
48
+
49
+ @contextmanager
50
+ def settings(
51
+ base_url: str | None = None,
52
+ api_key: str | None = None,
53
+ ) -> Iterator[Settings]:
54
+ """Context manager for temporarily overriding settings.
55
+
56
+ Args:
57
+ base_url: Override the base URL for API calls
58
+ api_key: Override the API key
59
+
60
+ Yields:
61
+ Settings instance with overrides applied
62
+
63
+ Example:
64
+ with settings(base_url="https://api.example.com", api_key="test-key"):
65
+ # Use custom settings within this context
66
+ client = MirascopeClient()
67
+ """
68
+ current = CURRENT_SETTINGS.get()
69
+ if current is None:
70
+ current = _default_settings()
71
+
72
+ new_settings = Settings(
73
+ base_url=base_url or current.base_url,
74
+ api_key=api_key or current.api_key,
75
+ )
76
+
77
+ token = CURRENT_SETTINGS.set(new_settings)
78
+ try:
79
+ yield new_settings
80
+ finally:
81
+ CURRENT_SETTINGS.reset(token)
mirascope/llm/__init__.py CHANGED
@@ -1,4 +1,4 @@
1
- """The `llm` module for writing provider-agnostic LLM Generations.
1
+ """LLM abstractions that aren't obstructions.
2
2
 
3
3
  This module provides a unified interface for interacting with different LLM providers,
4
4
  including messages, tools, response formatting, and streaming. It allows you to write
@@ -12,13 +12,13 @@ from contextlib import suppress
12
12
 
13
13
  from . import (
14
14
  calls,
15
- clients,
16
15
  content,
17
16
  exceptions,
18
17
  formatting,
19
18
  messages,
20
19
  models,
21
20
  prompts,
21
+ providers,
22
22
  responses,
23
23
  tools,
24
24
  types,
@@ -26,8 +26,7 @@ from . import (
26
26
 
27
27
  with suppress(ImportError):
28
28
  from . import mcp
29
- from .calls import call
30
- from .clients import ModelId, Params, Provider, client, get_client
29
+ from .calls import AsyncCall, AsyncContextCall, Call, CallDecorator, ContextCall, call
31
30
  from .content import (
32
31
  AssistantContentChunk,
33
32
  AssistantContentPart,
@@ -60,7 +59,8 @@ from .exceptions import (
60
59
  ConnectionError,
61
60
  FeatureNotSupportedError,
62
61
  FormattingModeNotSupportedError,
63
- MirascopeError,
62
+ MirascopeLLMError,
63
+ NoRegisteredProviderError,
64
64
  NotFoundError,
65
65
  PermissionError,
66
66
  RateLimitError,
@@ -78,8 +78,23 @@ from .messages import (
78
78
  UserContent,
79
79
  UserMessage,
80
80
  )
81
- from .models import Model, model, use_model
82
- from .prompts import prompt
81
+ from .models import Model, model, model_from_context, use_model
82
+ from .prompts import (
83
+ AsyncContextPrompt,
84
+ AsyncPrompt,
85
+ ContextPrompt,
86
+ Prompt,
87
+ PromptDecorator,
88
+ prompt,
89
+ )
90
+ from .providers import (
91
+ ModelId,
92
+ Params,
93
+ Provider,
94
+ ProviderId,
95
+ load_provider,
96
+ register_provider,
97
+ )
83
98
  from .responses import (
84
99
  AsyncChunkIterator,
85
100
  AsyncContextResponse,
@@ -121,11 +136,15 @@ __all__ = [
121
136
  "AssistantContentChunk",
122
137
  "AssistantContentPart",
123
138
  "AssistantMessage",
139
+ "AsyncCall",
124
140
  "AsyncChunkIterator",
141
+ "AsyncContextCall",
142
+ "AsyncContextPrompt",
125
143
  "AsyncContextResponse",
126
144
  "AsyncContextStreamResponse",
127
145
  "AsyncContextTool",
128
146
  "AsyncContextToolkit",
147
+ "AsyncPrompt",
129
148
  "AsyncResponse",
130
149
  "AsyncStream",
131
150
  "AsyncStreamResponse",
@@ -139,9 +158,13 @@ __all__ = [
139
158
  "BadRequestError",
140
159
  "Base64AudioSource",
141
160
  "Base64ImageSource",
161
+ "Call",
162
+ "CallDecorator",
142
163
  "ChunkIterator",
143
164
  "ConnectionError",
144
165
  "Context",
166
+ "ContextCall",
167
+ "ContextPrompt",
145
168
  "ContextResponse",
146
169
  "ContextStreamResponse",
147
170
  "ContextTool",
@@ -154,12 +177,18 @@ __all__ = [
154
177
  "FormattingModeNotSupportedError",
155
178
  "Image",
156
179
  "Message",
157
- "MirascopeError",
180
+ "MirascopeLLMError",
158
181
  "Model",
182
+ "ModelId",
183
+ "NoRegisteredProviderError",
159
184
  "NotFoundError",
160
185
  "Params",
161
186
  "Partial",
162
187
  "PermissionError",
188
+ "Prompt",
189
+ "PromptDecorator",
190
+ "Provider",
191
+ "ProviderId",
163
192
  "RateLimitError",
164
193
  "RawMessageChunk",
165
194
  "Response",
@@ -195,19 +224,20 @@ __all__ = [
195
224
  "UserMessage",
196
225
  "call",
197
226
  "calls",
198
- "client",
199
- "clients",
200
227
  "content",
201
228
  "exceptions",
202
229
  "format",
203
230
  "formatting",
204
- "get_client",
231
+ "load_provider",
205
232
  "mcp",
206
233
  "messages",
207
234
  "model",
235
+ "model_from_context",
208
236
  "models",
209
237
  "prompt",
210
238
  "prompts",
239
+ "providers",
240
+ "register_provider",
211
241
  "responses",
212
242
  "tool",
213
243
  "tools",
@@ -5,6 +5,7 @@ from typing import Generic, overload
5
5
 
6
6
  from ..context import Context, DepsT
7
7
  from ..formatting import FormattableT
8
+ from ..models import Model, use_model
8
9
  from ..prompts import (
9
10
  AsyncContextPrompt,
10
11
  AsyncPrompt,
@@ -21,19 +22,37 @@ from ..responses import (
21
22
  Response,
22
23
  StreamResponse,
23
24
  )
24
- from ..tools import (
25
- AsyncContextToolkit,
26
- AsyncToolkit,
27
- ContextToolkit,
28
- Toolkit,
29
- )
30
25
  from ..types import P
31
- from .base_call import BaseCall
32
26
 
33
27
 
34
28
  @dataclass
35
- class Call(BaseCall[P, Prompt, Toolkit, FormattableT], Generic[P, FormattableT]):
36
- """A class for generating responses using LLMs."""
29
+ class BaseCall:
30
+ """Base class for all Call types with shared model functionality."""
31
+
32
+ default_model: Model
33
+ """The default model that will be used if no model is set in context."""
34
+
35
+ @property
36
+ def model(self) -> Model:
37
+ """The model used for generating responses. May be overwritten via `with llm.model(...)`."""
38
+ return use_model(self.default_model)
39
+
40
+
41
+ @dataclass
42
+ class Call(BaseCall, Generic[P, FormattableT]):
43
+ """A call that directly generates LLM responses without requiring a model argument.
44
+
45
+ Created by decorating a `MessageTemplate` with `llm.call`. The decorated function
46
+ becomes directly callable to generate responses, with the `Model` bundled in.
47
+
48
+ A `Call` is essentially: `MessageTemplate` + tools + format + `Model`.
49
+ It can be invoked directly: `call(*args, **kwargs)` (no model argument needed).
50
+
51
+ The model can be overridden at runtime using `with llm.model(...)` context manager.
52
+ """
53
+
54
+ prompt: Prompt[P, FormattableT]
55
+ """The underlying Prompt instance that generates messages with tools and format."""
37
56
 
38
57
  @overload
39
58
  def __call__(
@@ -63,10 +82,7 @@ class Call(BaseCall[P, Prompt, Toolkit, FormattableT], Generic[P, FormattableT])
63
82
  self, *args: P.args, **kwargs: P.kwargs
64
83
  ) -> Response | Response[FormattableT]:
65
84
  """Generates a response using the LLM."""
66
- messages = self.fn(*args, **kwargs)
67
- return self.model.call(
68
- messages=messages, tools=self.toolkit, format=self.format
69
- )
85
+ return self.prompt.call(self.model, *args, **kwargs)
70
86
 
71
87
  @overload
72
88
  def stream(
@@ -82,18 +98,24 @@ class Call(BaseCall[P, Prompt, Toolkit, FormattableT], Generic[P, FormattableT])
82
98
  self, *args: P.args, **kwargs: P.kwargs
83
99
  ) -> StreamResponse | StreamResponse[FormattableT]:
84
100
  """Generates a streaming response using the LLM."""
85
- messages = self.fn(*args, **kwargs)
86
- return self.model.stream(
87
- messages=messages, tools=self.toolkit, format=self.format
88
- )
101
+ return self.prompt.stream(self.model, *args, **kwargs)
89
102
 
90
103
 
91
104
  @dataclass
92
- class AsyncCall(
93
- BaseCall[P, AsyncPrompt, AsyncToolkit, FormattableT],
94
- Generic[P, FormattableT],
95
- ):
96
- """A class for generating responses using LLMs asynchronously."""
105
+ class AsyncCall(BaseCall, Generic[P, FormattableT]):
106
+ """An async call that directly generates LLM responses without requiring a model argument.
107
+
108
+ Created by decorating an async `MessageTemplate` with `llm.call`. The decorated async
109
+ function becomes directly callable to generate responses asynchronously, with the `Model` bundled in.
110
+
111
+ An `AsyncCall` is essentially: async `MessageTemplate` + tools + format + `Model`.
112
+ It can be invoked directly: `await call(*args, **kwargs)` (no model argument needed).
113
+
114
+ The model can be overridden at runtime using `with llm.model(...)` context manager.
115
+ """
116
+
117
+ prompt: AsyncPrompt[P, FormattableT]
118
+ """The underlying AsyncPrompt instance that generates messages with tools and format."""
97
119
 
98
120
  @overload
99
121
  async def __call__(
@@ -108,7 +130,7 @@ class AsyncCall(
108
130
  async def __call__(
109
131
  self, *args: P.args, **kwargs: P.kwargs
110
132
  ) -> AsyncResponse | AsyncResponse[FormattableT]:
111
- """Generates a Asyncresponse using the LLM asynchronously."""
133
+ """Generates a response using the LLM asynchronously."""
112
134
  return await self.call(*args, **kwargs)
113
135
 
114
136
  @overload
@@ -125,10 +147,7 @@ class AsyncCall(
125
147
  self, *args: P.args, **kwargs: P.kwargs
126
148
  ) -> AsyncResponse | AsyncResponse[FormattableT]:
127
149
  """Generates a response using the LLM asynchronously."""
128
- messages = await self.fn(*args, **kwargs)
129
- return await self.model.call_async(
130
- messages=messages, tools=self.toolkit, format=self.format
131
- )
150
+ return await self.prompt.call(self.model, *args, **kwargs)
132
151
 
133
152
  @overload
134
153
  async def stream(
@@ -144,18 +163,25 @@ class AsyncCall(
144
163
  self, *args: P.args, **kwargs: P.kwargs
145
164
  ) -> AsyncStreamResponse[FormattableT] | AsyncStreamResponse:
146
165
  """Generates a streaming response using the LLM asynchronously."""
147
- messages = await self.fn(*args, **kwargs)
148
- return await self.model.stream_async(
149
- messages=messages, tools=self.toolkit, format=self.format
150
- )
166
+ return await self.prompt.stream(self.model, *args, **kwargs)
151
167
 
152
168
 
153
169
  @dataclass
154
- class ContextCall(
155
- BaseCall[P, ContextPrompt, ContextToolkit[DepsT], FormattableT],
156
- Generic[P, DepsT, FormattableT],
157
- ):
158
- """A class for generating responses using LLMs."""
170
+ class ContextCall(BaseCall, Generic[P, DepsT, FormattableT]):
171
+ """A context-aware call that directly generates LLM responses without requiring a model argument.
172
+
173
+ Created by decorating a `ContextMessageTemplate` with `llm.call`. The decorated function
174
+ (with first parameter `'ctx'` of type `Context[DepsT]`) becomes directly callable to generate
175
+ responses with context dependencies, with the `Model` bundled in.
176
+
177
+ A `ContextCall` is essentially: `ContextMessageTemplate` + tools + format + `Model`.
178
+ It can be invoked directly: `call(ctx, *args, **kwargs)` (no model argument needed).
179
+
180
+ The model can be overridden at runtime using `with llm.model(...)` context manager.
181
+ """
182
+
183
+ prompt: ContextPrompt[P, DepsT, FormattableT]
184
+ """The underlying ContextPrompt instance that generates messages with tools and format."""
159
185
 
160
186
  @overload
161
187
  def __call__(
@@ -199,10 +225,7 @@ class ContextCall(
199
225
  self, ctx: Context[DepsT], *args: P.args, **kwargs: P.kwargs
200
226
  ) -> ContextResponse[DepsT, None] | ContextResponse[DepsT, FormattableT]:
201
227
  """Generates a response using the LLM."""
202
- messages = self.fn(ctx, *args, **kwargs)
203
- return self.model.context_call(
204
- ctx=ctx, messages=messages, tools=self.toolkit, format=self.format
205
- )
228
+ return self.prompt.call(self.model, ctx, *args, **kwargs)
206
229
 
207
230
  @overload
208
231
  def stream(
@@ -226,18 +249,25 @@ class ContextCall(
226
249
  ContextStreamResponse[DepsT, None] | ContextStreamResponse[DepsT, FormattableT]
227
250
  ):
228
251
  """Generates a streaming response using the LLM."""
229
- messages = self.fn(ctx, *args, **kwargs)
230
- return self.model.context_stream(
231
- ctx=ctx, messages=messages, tools=self.toolkit, format=self.format
232
- )
252
+ return self.prompt.stream(self.model, ctx, *args, **kwargs)
233
253
 
234
254
 
235
255
  @dataclass
236
- class AsyncContextCall(
237
- BaseCall[P, AsyncContextPrompt, AsyncContextToolkit[DepsT], FormattableT],
238
- Generic[P, DepsT, FormattableT],
239
- ):
240
- """A class for generating responses using LLMs asynchronously."""
256
+ class AsyncContextCall(BaseCall, Generic[P, DepsT, FormattableT]):
257
+ """An async context-aware call that directly generates LLM responses without requiring a model argument.
258
+
259
+ Created by decorating an async `ContextMessageTemplate` with `llm.call`. The decorated async
260
+ function (with first parameter `'ctx'` of type `Context[DepsT]`) becomes directly callable to generate
261
+ responses asynchronously with context dependencies, with the `Model` bundled in.
262
+
263
+ An `AsyncContextCall` is essentially: async `ContextMessageTemplate` + tools + format + `Model`.
264
+ It can be invoked directly: `await call(ctx, *args, **kwargs)` (no model argument needed).
265
+
266
+ The model can be overridden at runtime using `with llm.model(...)` context manager.
267
+ """
268
+
269
+ prompt: AsyncContextPrompt[P, DepsT, FormattableT]
270
+ """The underlying AsyncContextPrompt instance that generates messages with tools and format."""
241
271
 
242
272
  @overload
243
273
  async def __call__(
@@ -281,10 +311,7 @@ class AsyncContextCall(
281
311
  self, ctx: Context[DepsT], *args: P.args, **kwargs: P.kwargs
282
312
  ) -> AsyncContextResponse[DepsT, None] | AsyncContextResponse[DepsT, FormattableT]:
283
313
  """Generates a response using the LLM asynchronously."""
284
- messages = await self.fn(ctx, *args, **kwargs)
285
- return await self.model.context_call_async(
286
- ctx=ctx, messages=messages, tools=self.toolkit, format=self.format
287
- )
314
+ return await self.prompt.call(self.model, ctx, *args, **kwargs)
288
315
 
289
316
  @overload
290
317
  async def stream(
@@ -309,7 +336,4 @@ class AsyncContextCall(
309
336
  | AsyncContextStreamResponse[DepsT, FormattableT]
310
337
  ):
311
338
  """Generates a streaming response using the LLM asynchronously."""
312
- messages = await self.fn(ctx, *args, **kwargs)
313
- return await self.model.context_stream_async(
314
- ctx=ctx, messages=messages, tools=self.toolkit, format=self.format
315
- )
339
+ return await self.prompt.stream(self.model, ctx, *args, **kwargs)