mirascope 2.0.0a2__py3-none-any.whl → 2.0.0a4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (252) hide show
  1. mirascope/__init__.py +2 -2
  2. mirascope/api/__init__.py +6 -0
  3. mirascope/api/_generated/README.md +207 -0
  4. mirascope/api/_generated/__init__.py +141 -0
  5. mirascope/api/_generated/client.py +163 -0
  6. mirascope/api/_generated/core/__init__.py +52 -0
  7. mirascope/api/_generated/core/api_error.py +23 -0
  8. mirascope/api/_generated/core/client_wrapper.py +58 -0
  9. mirascope/api/_generated/core/datetime_utils.py +30 -0
  10. mirascope/api/_generated/core/file.py +70 -0
  11. mirascope/api/_generated/core/force_multipart.py +16 -0
  12. mirascope/api/_generated/core/http_client.py +619 -0
  13. mirascope/api/_generated/core/http_response.py +55 -0
  14. mirascope/api/_generated/core/jsonable_encoder.py +102 -0
  15. mirascope/api/_generated/core/pydantic_utilities.py +310 -0
  16. mirascope/api/_generated/core/query_encoder.py +60 -0
  17. mirascope/api/_generated/core/remove_none_from_dict.py +11 -0
  18. mirascope/api/_generated/core/request_options.py +35 -0
  19. mirascope/api/_generated/core/serialization.py +282 -0
  20. mirascope/api/_generated/docs/__init__.py +4 -0
  21. mirascope/api/_generated/docs/client.py +95 -0
  22. mirascope/api/_generated/docs/raw_client.py +132 -0
  23. mirascope/api/_generated/environment.py +9 -0
  24. mirascope/api/_generated/errors/__init__.py +17 -0
  25. mirascope/api/_generated/errors/bad_request_error.py +15 -0
  26. mirascope/api/_generated/errors/conflict_error.py +15 -0
  27. mirascope/api/_generated/errors/forbidden_error.py +15 -0
  28. mirascope/api/_generated/errors/internal_server_error.py +15 -0
  29. mirascope/api/_generated/errors/not_found_error.py +15 -0
  30. mirascope/api/_generated/health/__init__.py +7 -0
  31. mirascope/api/_generated/health/client.py +96 -0
  32. mirascope/api/_generated/health/raw_client.py +129 -0
  33. mirascope/api/_generated/health/types/__init__.py +8 -0
  34. mirascope/api/_generated/health/types/health_check_response.py +24 -0
  35. mirascope/api/_generated/health/types/health_check_response_status.py +5 -0
  36. mirascope/api/_generated/organizations/__init__.py +25 -0
  37. mirascope/api/_generated/organizations/client.py +380 -0
  38. mirascope/api/_generated/organizations/raw_client.py +876 -0
  39. mirascope/api/_generated/organizations/types/__init__.py +23 -0
  40. mirascope/api/_generated/organizations/types/organizations_create_response.py +24 -0
  41. mirascope/api/_generated/organizations/types/organizations_create_response_role.py +7 -0
  42. mirascope/api/_generated/organizations/types/organizations_get_response.py +24 -0
  43. mirascope/api/_generated/organizations/types/organizations_get_response_role.py +7 -0
  44. mirascope/api/_generated/organizations/types/organizations_list_response_item.py +24 -0
  45. mirascope/api/_generated/organizations/types/organizations_list_response_item_role.py +7 -0
  46. mirascope/api/_generated/organizations/types/organizations_update_response.py +24 -0
  47. mirascope/api/_generated/organizations/types/organizations_update_response_role.py +7 -0
  48. mirascope/api/_generated/projects/__init__.py +17 -0
  49. mirascope/api/_generated/projects/client.py +458 -0
  50. mirascope/api/_generated/projects/raw_client.py +1016 -0
  51. mirascope/api/_generated/projects/types/__init__.py +15 -0
  52. mirascope/api/_generated/projects/types/projects_create_response.py +30 -0
  53. mirascope/api/_generated/projects/types/projects_get_response.py +30 -0
  54. mirascope/api/_generated/projects/types/projects_list_response_item.py +30 -0
  55. mirascope/api/_generated/projects/types/projects_update_response.py +30 -0
  56. mirascope/api/_generated/reference.md +753 -0
  57. mirascope/api/_generated/traces/__init__.py +55 -0
  58. mirascope/api/_generated/traces/client.py +162 -0
  59. mirascope/api/_generated/traces/raw_client.py +168 -0
  60. mirascope/api/_generated/traces/types/__init__.py +95 -0
  61. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item.py +36 -0
  62. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_resource.py +31 -0
  63. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_resource_attributes_item.py +25 -0
  64. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_resource_attributes_item_value.py +54 -0
  65. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_resource_attributes_item_value_array_value.py +23 -0
  66. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_resource_attributes_item_value_kvlist_value.py +28 -0
  67. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_resource_attributes_item_value_kvlist_value_values_item.py +24 -0
  68. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item.py +35 -0
  69. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_scope.py +35 -0
  70. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_scope_attributes_item.py +27 -0
  71. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_scope_attributes_item_value.py +54 -0
  72. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_scope_attributes_item_value_array_value.py +23 -0
  73. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_scope_attributes_item_value_kvlist_value.py +28 -0
  74. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_scope_attributes_item_value_kvlist_value_values_item.py +24 -0
  75. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item.py +60 -0
  76. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item_attributes_item.py +29 -0
  77. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item_attributes_item_value.py +54 -0
  78. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item_attributes_item_value_array_value.py +23 -0
  79. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item_attributes_item_value_kvlist_value.py +28 -0
  80. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item_attributes_item_value_kvlist_value_values_item.py +24 -0
  81. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item_status.py +24 -0
  82. mirascope/api/_generated/traces/types/traces_create_response.py +27 -0
  83. mirascope/api/_generated/traces/types/traces_create_response_partial_success.py +28 -0
  84. mirascope/api/_generated/types/__init__.py +37 -0
  85. mirascope/api/_generated/types/already_exists_error.py +24 -0
  86. mirascope/api/_generated/types/already_exists_error_tag.py +5 -0
  87. mirascope/api/_generated/types/database_error.py +24 -0
  88. mirascope/api/_generated/types/database_error_tag.py +5 -0
  89. mirascope/api/_generated/types/http_api_decode_error.py +29 -0
  90. mirascope/api/_generated/types/http_api_decode_error_tag.py +5 -0
  91. mirascope/api/_generated/types/issue.py +40 -0
  92. mirascope/api/_generated/types/issue_tag.py +17 -0
  93. mirascope/api/_generated/types/not_found_error_body.py +24 -0
  94. mirascope/api/_generated/types/not_found_error_tag.py +5 -0
  95. mirascope/api/_generated/types/permission_denied_error.py +24 -0
  96. mirascope/api/_generated/types/permission_denied_error_tag.py +7 -0
  97. mirascope/api/_generated/types/property_key.py +7 -0
  98. mirascope/api/_generated/types/property_key_key.py +27 -0
  99. mirascope/api/_generated/types/property_key_key_tag.py +5 -0
  100. mirascope/api/client.py +255 -0
  101. mirascope/api/settings.py +81 -0
  102. mirascope/llm/__init__.py +45 -11
  103. mirascope/llm/calls/calls.py +81 -57
  104. mirascope/llm/calls/decorator.py +121 -115
  105. mirascope/llm/content/__init__.py +3 -2
  106. mirascope/llm/context/_utils.py +19 -6
  107. mirascope/llm/exceptions.py +30 -16
  108. mirascope/llm/formatting/_utils.py +9 -5
  109. mirascope/llm/formatting/format.py +2 -2
  110. mirascope/llm/formatting/from_call_args.py +2 -2
  111. mirascope/llm/messages/message.py +13 -5
  112. mirascope/llm/models/__init__.py +2 -2
  113. mirascope/llm/models/models.py +189 -81
  114. mirascope/llm/prompts/__init__.py +13 -12
  115. mirascope/llm/prompts/_utils.py +27 -24
  116. mirascope/llm/prompts/decorator.py +133 -204
  117. mirascope/llm/prompts/prompts.py +424 -0
  118. mirascope/llm/prompts/protocols.py +25 -59
  119. mirascope/llm/providers/__init__.py +44 -0
  120. mirascope/llm/{clients → providers}/_missing_import_stubs.py +8 -6
  121. mirascope/llm/providers/anthropic/__init__.py +29 -0
  122. mirascope/llm/providers/anthropic/_utils/__init__.py +23 -0
  123. mirascope/llm/providers/anthropic/_utils/beta_decode.py +271 -0
  124. mirascope/llm/providers/anthropic/_utils/beta_encode.py +216 -0
  125. mirascope/llm/{clients → providers}/anthropic/_utils/decode.py +44 -11
  126. mirascope/llm/providers/anthropic/_utils/encode.py +356 -0
  127. mirascope/llm/providers/anthropic/beta_provider.py +322 -0
  128. mirascope/llm/providers/anthropic/model_id.py +23 -0
  129. mirascope/llm/providers/anthropic/model_info.py +87 -0
  130. mirascope/llm/providers/anthropic/provider.py +416 -0
  131. mirascope/llm/{clients → providers}/base/__init__.py +3 -3
  132. mirascope/llm/{clients → providers}/base/_utils.py +25 -8
  133. mirascope/llm/{clients/base/client.py → providers/base/base_provider.py} +255 -126
  134. mirascope/llm/providers/google/__init__.py +21 -0
  135. mirascope/llm/{clients → providers}/google/_utils/decode.py +61 -7
  136. mirascope/llm/{clients → providers}/google/_utils/encode.py +44 -30
  137. mirascope/llm/providers/google/model_id.py +22 -0
  138. mirascope/llm/providers/google/model_info.py +62 -0
  139. mirascope/llm/providers/google/provider.py +442 -0
  140. mirascope/llm/providers/load_provider.py +54 -0
  141. mirascope/llm/providers/mlx/__init__.py +24 -0
  142. mirascope/llm/providers/mlx/_utils.py +129 -0
  143. mirascope/llm/providers/mlx/encoding/__init__.py +8 -0
  144. mirascope/llm/providers/mlx/encoding/base.py +69 -0
  145. mirascope/llm/providers/mlx/encoding/transformers.py +147 -0
  146. mirascope/llm/providers/mlx/mlx.py +237 -0
  147. mirascope/llm/providers/mlx/model_id.py +17 -0
  148. mirascope/llm/providers/mlx/provider.py +415 -0
  149. mirascope/llm/providers/model_id.py +16 -0
  150. mirascope/llm/providers/ollama/__init__.py +19 -0
  151. mirascope/llm/providers/ollama/provider.py +71 -0
  152. mirascope/llm/providers/openai/__init__.py +6 -0
  153. mirascope/llm/providers/openai/completions/__init__.py +25 -0
  154. mirascope/llm/{clients → providers}/openai/completions/_utils/__init__.py +2 -0
  155. mirascope/llm/{clients → providers}/openai/completions/_utils/decode.py +60 -6
  156. mirascope/llm/{clients → providers}/openai/completions/_utils/encode.py +37 -26
  157. mirascope/llm/providers/openai/completions/base_provider.py +513 -0
  158. mirascope/llm/providers/openai/completions/provider.py +22 -0
  159. mirascope/llm/providers/openai/model_id.py +31 -0
  160. mirascope/llm/providers/openai/model_info.py +303 -0
  161. mirascope/llm/providers/openai/provider.py +398 -0
  162. mirascope/llm/providers/openai/responses/__init__.py +21 -0
  163. mirascope/llm/{clients → providers}/openai/responses/_utils/decode.py +59 -6
  164. mirascope/llm/{clients → providers}/openai/responses/_utils/encode.py +34 -23
  165. mirascope/llm/providers/openai/responses/provider.py +469 -0
  166. mirascope/llm/providers/provider_id.py +23 -0
  167. mirascope/llm/providers/provider_registry.py +169 -0
  168. mirascope/llm/providers/together/__init__.py +19 -0
  169. mirascope/llm/providers/together/provider.py +40 -0
  170. mirascope/llm/responses/__init__.py +3 -0
  171. mirascope/llm/responses/base_response.py +14 -5
  172. mirascope/llm/responses/base_stream_response.py +35 -6
  173. mirascope/llm/responses/finish_reason.py +1 -0
  174. mirascope/llm/responses/response.py +33 -13
  175. mirascope/llm/responses/root_response.py +12 -13
  176. mirascope/llm/responses/stream_response.py +35 -23
  177. mirascope/llm/responses/usage.py +95 -0
  178. mirascope/llm/tools/__init__.py +9 -2
  179. mirascope/llm/tools/_utils.py +12 -3
  180. mirascope/llm/tools/protocols.py +4 -4
  181. mirascope/llm/tools/tool_schema.py +44 -9
  182. mirascope/llm/tools/tools.py +10 -9
  183. mirascope/ops/__init__.py +156 -0
  184. mirascope/ops/_internal/__init__.py +5 -0
  185. mirascope/ops/_internal/closure.py +1118 -0
  186. mirascope/ops/_internal/configuration.py +126 -0
  187. mirascope/ops/_internal/context.py +76 -0
  188. mirascope/ops/_internal/exporters/__init__.py +26 -0
  189. mirascope/ops/_internal/exporters/exporters.py +342 -0
  190. mirascope/ops/_internal/exporters/processors.py +104 -0
  191. mirascope/ops/_internal/exporters/types.py +165 -0
  192. mirascope/ops/_internal/exporters/utils.py +29 -0
  193. mirascope/ops/_internal/instrumentation/__init__.py +8 -0
  194. mirascope/ops/_internal/instrumentation/llm/__init__.py +8 -0
  195. mirascope/ops/_internal/instrumentation/llm/encode.py +238 -0
  196. mirascope/ops/_internal/instrumentation/llm/gen_ai_types/__init__.py +38 -0
  197. mirascope/ops/_internal/instrumentation/llm/gen_ai_types/gen_ai_input_messages.py +31 -0
  198. mirascope/ops/_internal/instrumentation/llm/gen_ai_types/gen_ai_output_messages.py +38 -0
  199. mirascope/ops/_internal/instrumentation/llm/gen_ai_types/gen_ai_system_instructions.py +18 -0
  200. mirascope/ops/_internal/instrumentation/llm/gen_ai_types/shared.py +100 -0
  201. mirascope/ops/_internal/instrumentation/llm/llm.py +1288 -0
  202. mirascope/ops/_internal/propagation.py +198 -0
  203. mirascope/ops/_internal/protocols.py +51 -0
  204. mirascope/ops/_internal/session.py +139 -0
  205. mirascope/ops/_internal/spans.py +232 -0
  206. mirascope/ops/_internal/traced_calls.py +371 -0
  207. mirascope/ops/_internal/traced_functions.py +394 -0
  208. mirascope/ops/_internal/tracing.py +276 -0
  209. mirascope/ops/_internal/types.py +13 -0
  210. mirascope/ops/_internal/utils.py +75 -0
  211. mirascope/ops/_internal/versioned_calls.py +512 -0
  212. mirascope/ops/_internal/versioned_functions.py +346 -0
  213. mirascope/ops/_internal/versioning.py +303 -0
  214. mirascope/ops/exceptions.py +21 -0
  215. {mirascope-2.0.0a2.dist-info → mirascope-2.0.0a4.dist-info}/METADATA +78 -3
  216. mirascope-2.0.0a4.dist-info/RECORD +247 -0
  217. {mirascope-2.0.0a2.dist-info → mirascope-2.0.0a4.dist-info}/WHEEL +1 -1
  218. mirascope/graphs/__init__.py +0 -22
  219. mirascope/graphs/finite_state_machine.py +0 -625
  220. mirascope/llm/agents/__init__.py +0 -15
  221. mirascope/llm/agents/agent.py +0 -97
  222. mirascope/llm/agents/agent_template.py +0 -45
  223. mirascope/llm/agents/decorator.py +0 -176
  224. mirascope/llm/calls/base_call.py +0 -33
  225. mirascope/llm/clients/__init__.py +0 -34
  226. mirascope/llm/clients/anthropic/__init__.py +0 -25
  227. mirascope/llm/clients/anthropic/_utils/encode.py +0 -243
  228. mirascope/llm/clients/anthropic/clients.py +0 -819
  229. mirascope/llm/clients/anthropic/model_ids.py +0 -8
  230. mirascope/llm/clients/google/__init__.py +0 -20
  231. mirascope/llm/clients/google/clients.py +0 -853
  232. mirascope/llm/clients/google/model_ids.py +0 -15
  233. mirascope/llm/clients/openai/__init__.py +0 -25
  234. mirascope/llm/clients/openai/completions/__init__.py +0 -28
  235. mirascope/llm/clients/openai/completions/_utils/model_features.py +0 -81
  236. mirascope/llm/clients/openai/completions/clients.py +0 -833
  237. mirascope/llm/clients/openai/completions/model_ids.py +0 -8
  238. mirascope/llm/clients/openai/responses/__init__.py +0 -26
  239. mirascope/llm/clients/openai/responses/_utils/__init__.py +0 -13
  240. mirascope/llm/clients/openai/responses/_utils/model_features.py +0 -87
  241. mirascope/llm/clients/openai/responses/clients.py +0 -832
  242. mirascope/llm/clients/openai/responses/model_ids.py +0 -8
  243. mirascope/llm/clients/openai/shared/__init__.py +0 -7
  244. mirascope/llm/clients/openai/shared/_utils.py +0 -55
  245. mirascope/llm/clients/providers.py +0 -175
  246. mirascope-2.0.0a2.dist-info/RECORD +0 -102
  247. /mirascope/llm/{clients → providers}/base/kwargs.py +0 -0
  248. /mirascope/llm/{clients → providers}/base/params.py +0 -0
  249. /mirascope/llm/{clients/anthropic → providers/google}/_utils/__init__.py +0 -0
  250. /mirascope/llm/{clients → providers}/google/message.py +0 -0
  251. /mirascope/llm/{clients/google → providers/openai/responses}/_utils/__init__.py +0 -0
  252. {mirascope-2.0.0a2.dist-info → mirascope-2.0.0a4.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,398 @@
1
+ """Unified OpenAI client implementation."""
2
+
3
+ from collections.abc import Sequence
4
+ from typing_extensions import Unpack
5
+
6
+ from openai import OpenAI
7
+
8
+ from ...context import Context, DepsT
9
+ from ...formatting import Format, FormattableT
10
+ from ...messages import Message
11
+ from ...responses import (
12
+ AsyncContextResponse,
13
+ AsyncContextStreamResponse,
14
+ AsyncResponse,
15
+ AsyncStreamResponse,
16
+ ContextResponse,
17
+ ContextStreamResponse,
18
+ Response,
19
+ StreamResponse,
20
+ )
21
+ from ...tools import (
22
+ AsyncContextTool,
23
+ AsyncContextToolkit,
24
+ AsyncTool,
25
+ AsyncToolkit,
26
+ ContextTool,
27
+ ContextToolkit,
28
+ Tool,
29
+ Toolkit,
30
+ )
31
+ from ..base import BaseProvider, Params
32
+ from .completions import OpenAICompletionsProvider
33
+ from .model_id import OPENAI_KNOWN_MODELS, OpenAIModelId
34
+ from .responses import OpenAIResponsesProvider
35
+
36
+
37
+ def _has_audio_content(messages: Sequence[Message]) -> bool:
38
+ """Returns whether a sequence of messages contains any audio content."""
39
+ for message in messages:
40
+ if message.role == "system":
41
+ continue
42
+ for content in message.content:
43
+ if content.type == "audio":
44
+ return True
45
+ return False
46
+
47
+
48
+ def choose_api_mode(model_id: OpenAIModelId, messages: Sequence[Message]) -> str:
49
+ """Choose between 'responses' or 'completions' API based on model_id and messages.
50
+
51
+ Args:
52
+ model_id: The model identifier.
53
+ messages: The messages to send to the LLM.
54
+
55
+ Returns:
56
+ Either "responses" or "completions" depending on the model and message content.
57
+
58
+ If the user manually specified an api mode (by appending it as a suffix to the model
59
+ id), then we use it.
60
+
61
+ Otherwise, we prefer the responses API where supported (because it has better
62
+ reasoning support and better prompt caching). However we will use the :completions api
63
+ if the messages contain any audio content, as audio content is not yet supported in
64
+ the responses API.
65
+ """
66
+ if model_id.endswith(":completions"):
67
+ return "completions"
68
+ elif model_id.endswith(":responses"):
69
+ return "responses"
70
+
71
+ if _has_audio_content(messages):
72
+ return "completions"
73
+
74
+ if f"{model_id}:responses" in OPENAI_KNOWN_MODELS:
75
+ # Prefer responses api when we know it is available
76
+ return "responses"
77
+ elif f"{model_id}:completions" in OPENAI_KNOWN_MODELS:
78
+ # If we know from testing that the completions api is available, and
79
+ # (implied by above) that responses wasn't, then we should use completions
80
+ return "completions"
81
+
82
+ # If we don't have either :responses or :completions in the known_models, it's
83
+ # likely that this is a new model we haven't tested. We default to responses api for
84
+ # openai/ models (on the assumption that they are new models and OpenAI prefers
85
+ # the responses API) but completions for other models (on the assumption that they
86
+ # are other models routing through the OpenAI completions API)
87
+ if model_id.startswith("openai/"):
88
+ return "responses"
89
+ else:
90
+ return "completions"
91
+
92
+
93
+ class OpenAIRoutedCompletionsProvider(OpenAICompletionsProvider):
94
+ """OpenAI completions client that reports provider_id as 'openai'."""
95
+
96
+ id = "openai"
97
+
98
+
99
+ class OpenAIRoutedResponsesProvider(OpenAIResponsesProvider):
100
+ """OpenAI responses client that reports provider_id as 'openai'."""
101
+
102
+ id = "openai"
103
+
104
+
105
+ class OpenAIProvider(BaseProvider[OpenAI]):
106
+ """Unified provider for OpenAI that routes to Completions or Responses API based on model_id."""
107
+
108
+ id = "openai"
109
+ default_scope = "openai/"
110
+
111
+ def __init__(
112
+ self, *, api_key: str | None = None, base_url: str | None = None
113
+ ) -> None:
114
+ """Initialize the OpenAI provider with both subclients."""
115
+ self._completions_provider = OpenAIRoutedCompletionsProvider(
116
+ api_key=api_key, base_url=base_url
117
+ )
118
+ self._responses_provider = OpenAIRoutedResponsesProvider(
119
+ api_key=api_key, base_url=base_url
120
+ )
121
+ # Use completions client's underlying OpenAI client as the main one
122
+ self.client = self._completions_provider.client
123
+
124
+ def _choose_subprovider(
125
+ self, model_id: OpenAIModelId, messages: Sequence[Message]
126
+ ) -> OpenAICompletionsProvider | OpenAIResponsesProvider:
127
+ """Choose the appropriate provider based on model_id and messages.
128
+
129
+ Args:
130
+ model_id: The model identifier.
131
+ messages: The messages to send to the LLM.
132
+
133
+ Returns:
134
+ The responses or completions subclient.
135
+ """
136
+ api_mode = choose_api_mode(model_id, messages)
137
+ if api_mode == "responses":
138
+ return self._responses_provider
139
+ return self._completions_provider
140
+
141
+ def _call(
142
+ self,
143
+ *,
144
+ model_id: OpenAIModelId,
145
+ messages: Sequence[Message],
146
+ tools: Sequence[Tool] | Toolkit | None = None,
147
+ format: type[FormattableT] | Format[FormattableT] | None = None,
148
+ **params: Unpack[Params],
149
+ ) -> Response | Response[FormattableT]:
150
+ """Generate an `llm.Response` by synchronously calling the OpenAI API.
151
+
152
+ Args:
153
+ model_id: Model identifier to use.
154
+ messages: Messages to send to the LLM.
155
+ tools: Optional tools that the model may invoke.
156
+ format: Optional response format specifier.
157
+ **params: Additional parameters to configure output (e.g. temperature). See `llm.Params`.
158
+
159
+ Returns:
160
+ An `llm.Response` object containing the LLM-generated content.
161
+ """
162
+ client = self._choose_subprovider(model_id, messages)
163
+ return client.call(
164
+ model_id=model_id,
165
+ messages=messages,
166
+ tools=tools,
167
+ format=format,
168
+ **params,
169
+ )
170
+
171
+ def _context_call(
172
+ self,
173
+ *,
174
+ ctx: Context[DepsT],
175
+ model_id: OpenAIModelId,
176
+ messages: Sequence[Message],
177
+ tools: Sequence[Tool | ContextTool[DepsT]]
178
+ | ContextToolkit[DepsT]
179
+ | None = None,
180
+ format: type[FormattableT] | Format[FormattableT] | None = None,
181
+ **params: Unpack[Params],
182
+ ) -> ContextResponse[DepsT, None] | ContextResponse[DepsT, FormattableT]:
183
+ """Generate an `llm.ContextResponse` by synchronously calling the OpenAI API.
184
+
185
+ Args:
186
+ ctx: Context object with dependencies for tools.
187
+ model_id: Model identifier to use.
188
+ messages: Messages to send to the LLM.
189
+ tools: Optional tools that the model may invoke.
190
+ format: Optional response format specifier.
191
+ **params: Additional parameters to configure output (e.g. temperature). See `llm.Params`.
192
+
193
+ Returns:
194
+ An `llm.ContextResponse` object containing the LLM-generated content.
195
+ """
196
+ client = self._choose_subprovider(model_id, messages)
197
+ return client.context_call(
198
+ ctx=ctx,
199
+ model_id=model_id,
200
+ messages=messages,
201
+ tools=tools,
202
+ format=format,
203
+ **params,
204
+ )
205
+
206
+ async def _call_async(
207
+ self,
208
+ *,
209
+ model_id: OpenAIModelId,
210
+ messages: Sequence[Message],
211
+ tools: Sequence[AsyncTool] | AsyncToolkit | None = None,
212
+ format: type[FormattableT] | Format[FormattableT] | None = None,
213
+ **params: Unpack[Params],
214
+ ) -> AsyncResponse | AsyncResponse[FormattableT]:
215
+ """Generate an `llm.AsyncResponse` by asynchronously calling the OpenAI API.
216
+
217
+ Args:
218
+ model_id: Model identifier to use.
219
+ messages: Messages to send to the LLM.
220
+ tools: Optional tools that the model may invoke.
221
+ format: Optional response format specifier.
222
+ **params: Additional parameters to configure output (e.g. temperature). See `llm.Params`.
223
+
224
+ Returns:
225
+ An `llm.AsyncResponse` object containing the LLM-generated content.
226
+ """
227
+ return await self._choose_subprovider(model_id, messages).call_async(
228
+ model_id=model_id,
229
+ messages=messages,
230
+ tools=tools,
231
+ format=format,
232
+ **params,
233
+ )
234
+
235
+ async def _context_call_async(
236
+ self,
237
+ *,
238
+ ctx: Context[DepsT],
239
+ model_id: OpenAIModelId,
240
+ messages: Sequence[Message],
241
+ tools: Sequence[AsyncTool | AsyncContextTool[DepsT]]
242
+ | AsyncContextToolkit[DepsT]
243
+ | None = None,
244
+ format: type[FormattableT] | Format[FormattableT] | None = None,
245
+ **params: Unpack[Params],
246
+ ) -> AsyncContextResponse[DepsT, None] | AsyncContextResponse[DepsT, FormattableT]:
247
+ """Generate an `llm.AsyncContextResponse` by asynchronously calling the OpenAI API.
248
+
249
+ Args:
250
+ ctx: Context object with dependencies for tools.
251
+ model_id: Model identifier to use.
252
+ messages: Messages to send to the LLM.
253
+ tools: Optional tools that the model may invoke.
254
+ format: Optional response format specifier.
255
+ **params: Additional parameters to configure output (e.g. temperature). See `llm.Params`.
256
+
257
+ Returns:
258
+ An `llm.AsyncContextResponse` object containing the LLM-generated content.
259
+ """
260
+ return await self._choose_subprovider(model_id, messages).context_call_async(
261
+ ctx=ctx,
262
+ model_id=model_id,
263
+ messages=messages,
264
+ tools=tools,
265
+ format=format,
266
+ **params,
267
+ )
268
+
269
+ def _stream(
270
+ self,
271
+ *,
272
+ model_id: OpenAIModelId,
273
+ messages: Sequence[Message],
274
+ tools: Sequence[Tool] | Toolkit | None = None,
275
+ format: type[FormattableT] | Format[FormattableT] | None = None,
276
+ **params: Unpack[Params],
277
+ ) -> StreamResponse | StreamResponse[FormattableT]:
278
+ """Generate an `llm.StreamResponse` by synchronously streaming from the OpenAI API.
279
+
280
+ Args:
281
+ model_id: Model identifier to use.
282
+ messages: Messages to send to the LLM.
283
+ tools: Optional tools that the model may invoke.
284
+ format: Optional response format specifier.
285
+ **params: Additional parameters to configure output (e.g. temperature). See `llm.Params`.
286
+
287
+ Returns:
288
+ An `llm.StreamResponse` object for iterating over the LLM-generated content.
289
+ """
290
+ client = self._choose_subprovider(model_id, messages)
291
+ return client.stream(
292
+ model_id=model_id,
293
+ messages=messages,
294
+ tools=tools,
295
+ format=format,
296
+ **params,
297
+ )
298
+
299
+ def _context_stream(
300
+ self,
301
+ *,
302
+ ctx: Context[DepsT],
303
+ model_id: OpenAIModelId,
304
+ messages: Sequence[Message],
305
+ tools: Sequence[Tool | ContextTool[DepsT]]
306
+ | ContextToolkit[DepsT]
307
+ | None = None,
308
+ format: type[FormattableT] | Format[FormattableT] | None = None,
309
+ **params: Unpack[Params],
310
+ ) -> ContextStreamResponse[DepsT] | ContextStreamResponse[DepsT, FormattableT]:
311
+ """Generate an `llm.ContextStreamResponse` by synchronously streaming from the OpenAI API.
312
+
313
+ Args:
314
+ ctx: Context object with dependencies for tools.
315
+ model_id: Model identifier to use.
316
+ messages: Messages to send to the LLM.
317
+ tools: Optional tools that the model may invoke.
318
+ format: Optional response format specifier.
319
+ **params: Additional parameters to configure output (e.g. temperature). See `llm.Params`.
320
+
321
+ Returns:
322
+ An `llm.ContextStreamResponse` object for iterating over the LLM-generated content.
323
+ """
324
+ client = self._choose_subprovider(model_id, messages)
325
+ return client.context_stream(
326
+ ctx=ctx,
327
+ model_id=model_id,
328
+ messages=messages,
329
+ tools=tools,
330
+ format=format,
331
+ **params,
332
+ )
333
+
334
+ async def _stream_async(
335
+ self,
336
+ *,
337
+ model_id: OpenAIModelId,
338
+ messages: Sequence[Message],
339
+ tools: Sequence[AsyncTool] | AsyncToolkit | None = None,
340
+ format: type[FormattableT] | Format[FormattableT] | None = None,
341
+ **params: Unpack[Params],
342
+ ) -> AsyncStreamResponse | AsyncStreamResponse[FormattableT]:
343
+ """Generate an `llm.AsyncStreamResponse` by asynchronously streaming from the OpenAI API.
344
+
345
+ Args:
346
+ model_id: Model identifier to use.
347
+ messages: Messages to send to the LLM.
348
+ tools: Optional tools that the model may invoke.
349
+ format: Optional response format specifier.
350
+ **params: Additional parameters to configure output (e.g. temperature). See `llm.Params`.
351
+
352
+ Returns:
353
+ An `llm.AsyncStreamResponse` object for asynchronously iterating over the LLM-generated content.
354
+ """
355
+ return await self._choose_subprovider(model_id, messages).stream_async(
356
+ model_id=model_id,
357
+ messages=messages,
358
+ tools=tools,
359
+ format=format,
360
+ **params,
361
+ )
362
+
363
+ async def _context_stream_async(
364
+ self,
365
+ *,
366
+ ctx: Context[DepsT],
367
+ model_id: OpenAIModelId,
368
+ messages: Sequence[Message],
369
+ tools: Sequence[AsyncTool | AsyncContextTool[DepsT]]
370
+ | AsyncContextToolkit[DepsT]
371
+ | None = None,
372
+ format: type[FormattableT] | Format[FormattableT] | None = None,
373
+ **params: Unpack[Params],
374
+ ) -> (
375
+ AsyncContextStreamResponse[DepsT]
376
+ | AsyncContextStreamResponse[DepsT, FormattableT]
377
+ ):
378
+ """Generate an `llm.AsyncContextStreamResponse` by asynchronously streaming from the OpenAI API.
379
+
380
+ Args:
381
+ ctx: Context object with dependencies for tools.
382
+ model_id: Model identifier to use.
383
+ messages: Messages to send to the LLM.
384
+ tools: Optional tools that the model may invoke.
385
+ format: Optional response format specifier.
386
+ **params: Additional parameters to configure output (e.g. temperature). See `llm.Params`.
387
+
388
+ Returns:
389
+ An `llm.AsyncContextStreamResponse` object for asynchronously iterating over the LLM-generated content.
390
+ """
391
+ return await self._choose_subprovider(model_id, messages).context_stream_async(
392
+ ctx=ctx,
393
+ model_id=model_id,
394
+ messages=messages,
395
+ tools=tools,
396
+ format=format,
397
+ **params,
398
+ )
@@ -0,0 +1,21 @@
1
+ from typing import TYPE_CHECKING
2
+
3
+ if TYPE_CHECKING:
4
+ from .provider import OpenAIResponsesProvider
5
+ else:
6
+ try:
7
+ from .provider import OpenAIResponsesProvider
8
+ except ImportError: # pragma: no cover
9
+ from ..._missing_import_stubs import (
10
+ create_import_error_stub,
11
+ create_provider_stub,
12
+ )
13
+
14
+ OpenAIResponsesProvider = create_provider_stub(
15
+ "openai", "OpenAIResponsesProvider"
16
+ )
17
+
18
+
19
+ __all__ = [
20
+ "OpenAIResponsesProvider",
21
+ ]
@@ -29,8 +29,10 @@ from .....responses import (
29
29
  FinishReasonChunk,
30
30
  RawMessageChunk,
31
31
  RawStreamEventChunk,
32
+ Usage,
33
+ UsageDeltaChunk,
32
34
  )
33
- from ..model_ids import OpenAIResponsesModelId
35
+ from ...model_id import OpenAIModelId, model_name
34
36
 
35
37
  INCOMPLETE_DETAILS_TO_FINISH_REASON = {
36
38
  "max_output_tokens": FinishReason.MAX_TOKENS,
@@ -38,6 +40,33 @@ INCOMPLETE_DETAILS_TO_FINISH_REASON = {
38
40
  }
39
41
 
40
42
 
43
+ def _decode_usage(
44
+ usage: openai_types.ResponseUsage | None,
45
+ ) -> Usage | None:
46
+ """Convert OpenAI ResponseUsage to Mirascope Usage."""
47
+ if usage is None: # pragma: no cover
48
+ return None
49
+
50
+ return Usage(
51
+ input_tokens=usage.input_tokens,
52
+ output_tokens=usage.output_tokens,
53
+ cache_read_tokens=(
54
+ usage.input_tokens_details.cached_tokens
55
+ if usage.input_tokens_details
56
+ else None
57
+ )
58
+ or 0,
59
+ cache_write_tokens=0,
60
+ reasoning_tokens=(
61
+ usage.output_tokens_details.reasoning_tokens
62
+ if usage.output_tokens_details
63
+ else None
64
+ )
65
+ or 0,
66
+ raw=usage,
67
+ )
68
+
69
+
41
70
  def _serialize_output_item(
42
71
  item: openai_types.ResponseOutputItem,
43
72
  ) -> dict[str, Any]:
@@ -47,9 +76,10 @@ def _serialize_output_item(
47
76
 
48
77
  def decode_response(
49
78
  response: openai_types.Response,
50
- model_id: OpenAIResponsesModelId,
51
- ) -> tuple[AssistantMessage, FinishReason | None]:
52
- """Convert OpenAI Responses Response to mirascope AssistantMessage."""
79
+ model_id: OpenAIModelId,
80
+ provider_id: str,
81
+ ) -> tuple[AssistantMessage, FinishReason | None, Usage | None]:
82
+ """Convert OpenAI Responses Response to mirascope AssistantMessage and usage."""
53
83
  parts: list[AssistantContentPart] = []
54
84
  finish_reason: FinishReason | None = None
55
85
  refused = False
@@ -91,14 +121,16 @@ def decode_response(
91
121
 
92
122
  assistant_message = AssistantMessage(
93
123
  content=parts,
94
- provider="openai:responses",
124
+ provider_id=provider_id,
95
125
  model_id=model_id,
126
+ provider_model_name=model_name(model_id, "responses"),
96
127
  raw_message=[
97
128
  _serialize_output_item(output_item) for output_item in response.output
98
129
  ],
99
130
  )
100
131
 
101
- return assistant_message, finish_reason
132
+ usage = _decode_usage(response.usage)
133
+ return assistant_message, finish_reason, usage
102
134
 
103
135
 
104
136
  class _OpenAIResponsesChunkProcessor:
@@ -174,6 +206,27 @@ class _OpenAIResponsesChunkProcessor:
174
206
  if self.refusal_encountered:
175
207
  yield FinishReasonChunk(finish_reason=FinishReason.REFUSAL)
176
208
 
209
+ # Emit usage delta if present
210
+ if event.response.usage:
211
+ usage = event.response.usage
212
+ yield UsageDeltaChunk(
213
+ input_tokens=usage.input_tokens,
214
+ output_tokens=usage.output_tokens,
215
+ cache_read_tokens=(
216
+ usage.input_tokens_details.cached_tokens
217
+ if usage.input_tokens_details
218
+ else None
219
+ )
220
+ or 0,
221
+ cache_write_tokens=0,
222
+ reasoning_tokens=(
223
+ usage.output_tokens_details.reasoning_tokens
224
+ if usage.output_tokens_details
225
+ else None
226
+ )
227
+ or 0,
228
+ )
229
+
177
230
 
178
231
  def decode_stream(
179
232
  openai_stream: Stream[ResponseStreamEvent],
@@ -37,11 +37,14 @@ from .....formatting import (
37
37
  resolve_format,
38
38
  )
39
39
  from .....messages import AssistantMessage, Message, UserMessage
40
- from .....tools import FORMAT_TOOL_NAME, BaseToolkit, ToolSchema
40
+ from .....tools import FORMAT_TOOL_NAME, AnyToolSchema, BaseToolkit
41
41
  from ....base import Params, _utils as _base_utils
42
- from ...shared import _utils as _shared_utils
43
- from ..model_ids import OpenAIResponsesModelId
44
- from .model_features import NON_REASONING_MODELS
42
+ from ...model_id import OpenAIModelId, model_name
43
+ from ...model_info import (
44
+ MODELS_WITHOUT_JSON_OBJECT_SUPPORT,
45
+ MODELS_WITHOUT_JSON_SCHEMA_SUPPORT,
46
+ NON_REASONING_MODELS,
47
+ )
45
48
 
46
49
 
47
50
  class ResponseCreateKwargs(TypedDict, total=False):
@@ -107,8 +110,8 @@ def _encode_user_message(
107
110
  elif part.type == "audio":
108
111
  raise FeatureNotSupportedError(
109
112
  "audio input",
110
- "openai:responses",
111
- message='provider "openai:responses" does not support audio inputs. Try using "openai:completions" instead',
113
+ "openai",
114
+ message='provider "openai" does not support audio inputs when using :responses api. Try appending :completions to your model instead.',
112
115
  )
113
116
  else:
114
117
  raise NotImplementedError(
@@ -163,7 +166,7 @@ def _encode_assistant_message(
163
166
 
164
167
 
165
168
  def _encode_message(
166
- message: Message, model_id: OpenAIResponsesModelId, encode_thoughts: bool
169
+ message: Message, model_id: OpenAIModelId, encode_thoughts: bool
167
170
  ) -> ResponseInputParam:
168
171
  """Convert a Mirascope Message to OpenAI Responses input items.
169
172
 
@@ -179,8 +182,9 @@ def _encode_message(
179
182
 
180
183
  if (
181
184
  message.role == "assistant"
182
- and message.provider == "openai:responses"
183
- and message.model_id == model_id
185
+ and message.provider_id in ("openai", "openai:responses")
186
+ and message.provider_model_name
187
+ == model_name(model_id=model_id, api_mode="responses")
184
188
  and message.raw_message
185
189
  and not encode_thoughts
186
190
  ):
@@ -192,11 +196,11 @@ def _encode_message(
192
196
  return _encode_user_message(message)
193
197
 
194
198
 
195
- def _convert_tool_to_function_tool_param(tool: ToolSchema) -> FunctionToolParam:
199
+ def _convert_tool_to_function_tool_param(tool: AnyToolSchema) -> FunctionToolParam:
196
200
  """Convert a Mirascope ToolSchema to OpenAI Responses FunctionToolParam."""
197
201
  schema_dict = tool.parameters.model_dump(by_alias=True, exclude_none=True)
198
202
  schema_dict["type"] = "object"
199
- _shared_utils._ensure_additional_properties_false(schema_dict)
203
+ _base_utils.ensure_additional_properties_false(schema_dict)
200
204
 
201
205
  return FunctionToolParam(
202
206
  type="function",
@@ -219,7 +223,7 @@ def _create_strict_response_format(
219
223
  ResponseFormatTextJSONSchemaConfigParam for strict structured outputs
220
224
  """
221
225
  schema = format.schema.copy()
222
- _shared_utils._ensure_additional_properties_false(schema)
226
+ _base_utils.ensure_additional_properties_false(schema)
223
227
 
224
228
  response_format: ResponseFormatTextJSONSchemaConfigParam = {
225
229
  "type": "json_schema",
@@ -243,23 +247,33 @@ def _compute_reasoning(thinking: bool) -> Reasoning:
243
247
 
244
248
  def encode_request(
245
249
  *,
246
- model_id: OpenAIResponsesModelId,
250
+ model_id: OpenAIModelId,
247
251
  messages: Sequence[Message],
248
- tools: Sequence[ToolSchema] | BaseToolkit | None,
252
+ tools: Sequence[AnyToolSchema] | BaseToolkit[AnyToolSchema] | None,
249
253
  format: type[FormattableT] | Format[FormattableT] | None,
250
254
  params: Params,
251
255
  ) -> tuple[Sequence[Message], Format[FormattableT] | None, ResponseCreateKwargs]:
252
256
  """Prepares a request for the `OpenAI.responses.create` method."""
257
+ if model_id.endswith(":completions"):
258
+ raise FeatureNotSupportedError(
259
+ feature="completions API",
260
+ provider_id="openai:responses",
261
+ model_id=model_id,
262
+ message=f"Cannot use completions model with responses client: {model_id}",
263
+ )
264
+
265
+ base_model_name = model_name(model_id, None)
266
+
253
267
  kwargs: ResponseCreateKwargs = ResponseCreateKwargs(
254
268
  {
255
- "model": model_id,
269
+ "model": base_model_name,
256
270
  }
257
271
  )
258
272
  encode_thoughts = False
259
273
 
260
274
  with _base_utils.ensure_all_params_accessed(
261
275
  params=params,
262
- provider="openai:responses",
276
+ provider_id="openai",
263
277
  unsupported_params=["top_k", "seed", "stop_sequences"],
264
278
  ) as param_accessor:
265
279
  if param_accessor.temperature is not None:
@@ -269,9 +283,9 @@ def encode_request(
269
283
  if param_accessor.top_p is not None:
270
284
  kwargs["top_p"] = param_accessor.top_p
271
285
  if param_accessor.thinking is not None:
272
- if model_id in NON_REASONING_MODELS:
286
+ if base_model_name in NON_REASONING_MODELS:
273
287
  param_accessor.emit_warning_for_unused_param(
274
- "thinking", param_accessor.thinking, "openai:responses", model_id
288
+ "thinking", param_accessor.thinking, "openai", model_id
275
289
  )
276
290
  else:
277
291
  # Assume model supports reasoning unless explicitly listed as non-reasoning
@@ -283,9 +297,7 @@ def encode_request(
283
297
  tools = tools.tools if isinstance(tools, BaseToolkit) else tools or []
284
298
  openai_tools = [_convert_tool_to_function_tool_param(tool) for tool in tools]
285
299
 
286
- model_supports_strict = (
287
- model_id not in _shared_utils.MODELS_WITHOUT_JSON_SCHEMA_SUPPORT
288
- )
300
+ model_supports_strict = model_id not in MODELS_WITHOUT_JSON_SCHEMA_SUPPORT
289
301
  default_mode = "strict" if model_supports_strict else "tool"
290
302
 
291
303
  format = resolve_format(format, default_mode=default_mode)
@@ -312,8 +324,7 @@ def encode_request(
312
324
  name=FORMAT_TOOL_NAME,
313
325
  )
314
326
  elif (
315
- format.mode == "json"
316
- and model_id not in _shared_utils.MODELS_WITHOUT_JSON_OBJECT_SUPPORT
327
+ format.mode == "json" and model_id not in MODELS_WITHOUT_JSON_OBJECT_SUPPORT
317
328
  ):
318
329
  kwargs["text"] = {"format": ResponseFormatJSONObject(type="json_object")}
319
330