mirascope 2.0.0a2__py3-none-any.whl → 2.0.0a3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (204) hide show
  1. mirascope/__init__.py +2 -2
  2. mirascope/api/__init__.py +6 -0
  3. mirascope/api/_generated/README.md +207 -0
  4. mirascope/api/_generated/__init__.py +85 -0
  5. mirascope/api/_generated/client.py +155 -0
  6. mirascope/api/_generated/core/__init__.py +52 -0
  7. mirascope/api/_generated/core/api_error.py +23 -0
  8. mirascope/api/_generated/core/client_wrapper.py +58 -0
  9. mirascope/api/_generated/core/datetime_utils.py +30 -0
  10. mirascope/api/_generated/core/file.py +70 -0
  11. mirascope/api/_generated/core/force_multipart.py +16 -0
  12. mirascope/api/_generated/core/http_client.py +619 -0
  13. mirascope/api/_generated/core/http_response.py +55 -0
  14. mirascope/api/_generated/core/jsonable_encoder.py +102 -0
  15. mirascope/api/_generated/core/pydantic_utilities.py +310 -0
  16. mirascope/api/_generated/core/query_encoder.py +60 -0
  17. mirascope/api/_generated/core/remove_none_from_dict.py +11 -0
  18. mirascope/api/_generated/core/request_options.py +35 -0
  19. mirascope/api/_generated/core/serialization.py +282 -0
  20. mirascope/api/_generated/docs/__init__.py +4 -0
  21. mirascope/api/_generated/docs/client.py +95 -0
  22. mirascope/api/_generated/docs/raw_client.py +132 -0
  23. mirascope/api/_generated/environment.py +9 -0
  24. mirascope/api/_generated/errors/__init__.py +7 -0
  25. mirascope/api/_generated/errors/bad_request_error.py +15 -0
  26. mirascope/api/_generated/health/__init__.py +7 -0
  27. mirascope/api/_generated/health/client.py +96 -0
  28. mirascope/api/_generated/health/raw_client.py +129 -0
  29. mirascope/api/_generated/health/types/__init__.py +8 -0
  30. mirascope/api/_generated/health/types/health_check_response.py +24 -0
  31. mirascope/api/_generated/health/types/health_check_response_status.py +5 -0
  32. mirascope/api/_generated/reference.md +167 -0
  33. mirascope/api/_generated/traces/__init__.py +55 -0
  34. mirascope/api/_generated/traces/client.py +162 -0
  35. mirascope/api/_generated/traces/raw_client.py +168 -0
  36. mirascope/api/_generated/traces/types/__init__.py +95 -0
  37. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item.py +36 -0
  38. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_resource.py +31 -0
  39. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_resource_attributes_item.py +25 -0
  40. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_resource_attributes_item_value.py +54 -0
  41. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_resource_attributes_item_value_array_value.py +23 -0
  42. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_resource_attributes_item_value_kvlist_value.py +28 -0
  43. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_resource_attributes_item_value_kvlist_value_values_item.py +24 -0
  44. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item.py +35 -0
  45. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_scope.py +35 -0
  46. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_scope_attributes_item.py +27 -0
  47. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_scope_attributes_item_value.py +54 -0
  48. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_scope_attributes_item_value_array_value.py +23 -0
  49. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_scope_attributes_item_value_kvlist_value.py +28 -0
  50. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_scope_attributes_item_value_kvlist_value_values_item.py +24 -0
  51. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item.py +60 -0
  52. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item_attributes_item.py +29 -0
  53. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item_attributes_item_value.py +54 -0
  54. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item_attributes_item_value_array_value.py +23 -0
  55. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item_attributes_item_value_kvlist_value.py +28 -0
  56. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item_attributes_item_value_kvlist_value_values_item.py +24 -0
  57. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item_status.py +24 -0
  58. mirascope/api/_generated/traces/types/traces_create_response.py +27 -0
  59. mirascope/api/_generated/traces/types/traces_create_response_partial_success.py +28 -0
  60. mirascope/api/_generated/types/__init__.py +21 -0
  61. mirascope/api/_generated/types/http_api_decode_error.py +31 -0
  62. mirascope/api/_generated/types/http_api_decode_error_tag.py +5 -0
  63. mirascope/api/_generated/types/issue.py +44 -0
  64. mirascope/api/_generated/types/issue_tag.py +17 -0
  65. mirascope/api/_generated/types/property_key.py +7 -0
  66. mirascope/api/_generated/types/property_key_tag.py +29 -0
  67. mirascope/api/_generated/types/property_key_tag_tag.py +5 -0
  68. mirascope/api/client.py +255 -0
  69. mirascope/api/settings.py +81 -0
  70. mirascope/llm/__init__.py +41 -11
  71. mirascope/llm/calls/calls.py +81 -57
  72. mirascope/llm/calls/decorator.py +121 -115
  73. mirascope/llm/content/__init__.py +3 -2
  74. mirascope/llm/context/_utils.py +19 -6
  75. mirascope/llm/exceptions.py +30 -16
  76. mirascope/llm/formatting/_utils.py +9 -5
  77. mirascope/llm/formatting/format.py +2 -2
  78. mirascope/llm/formatting/from_call_args.py +2 -2
  79. mirascope/llm/messages/message.py +13 -5
  80. mirascope/llm/models/__init__.py +2 -2
  81. mirascope/llm/models/models.py +189 -81
  82. mirascope/llm/prompts/__init__.py +13 -12
  83. mirascope/llm/prompts/_utils.py +27 -24
  84. mirascope/llm/prompts/decorator.py +133 -204
  85. mirascope/llm/prompts/prompts.py +424 -0
  86. mirascope/llm/prompts/protocols.py +25 -59
  87. mirascope/llm/providers/__init__.py +38 -0
  88. mirascope/llm/{clients → providers}/_missing_import_stubs.py +8 -6
  89. mirascope/llm/providers/anthropic/__init__.py +24 -0
  90. mirascope/llm/{clients → providers}/anthropic/_utils/decode.py +5 -4
  91. mirascope/llm/{clients → providers}/anthropic/_utils/encode.py +31 -10
  92. mirascope/llm/providers/anthropic/model_id.py +40 -0
  93. mirascope/llm/{clients/anthropic/clients.py → providers/anthropic/provider.py} +33 -418
  94. mirascope/llm/{clients → providers}/base/__init__.py +3 -3
  95. mirascope/llm/{clients → providers}/base/_utils.py +10 -7
  96. mirascope/llm/{clients/base/client.py → providers/base/base_provider.py} +255 -126
  97. mirascope/llm/providers/google/__init__.py +21 -0
  98. mirascope/llm/{clients → providers}/google/_utils/decode.py +6 -4
  99. mirascope/llm/{clients → providers}/google/_utils/encode.py +30 -24
  100. mirascope/llm/providers/google/model_id.py +28 -0
  101. mirascope/llm/providers/google/provider.py +438 -0
  102. mirascope/llm/providers/load_provider.py +48 -0
  103. mirascope/llm/providers/mlx/__init__.py +24 -0
  104. mirascope/llm/providers/mlx/_utils.py +107 -0
  105. mirascope/llm/providers/mlx/encoding/__init__.py +8 -0
  106. mirascope/llm/providers/mlx/encoding/base.py +69 -0
  107. mirascope/llm/providers/mlx/encoding/transformers.py +131 -0
  108. mirascope/llm/providers/mlx/mlx.py +237 -0
  109. mirascope/llm/providers/mlx/model_id.py +17 -0
  110. mirascope/llm/providers/mlx/provider.py +411 -0
  111. mirascope/llm/providers/model_id.py +16 -0
  112. mirascope/llm/providers/openai/__init__.py +6 -0
  113. mirascope/llm/providers/openai/completions/__init__.py +20 -0
  114. mirascope/llm/{clients/openai/responses → providers/openai/completions}/_utils/__init__.py +2 -0
  115. mirascope/llm/{clients → providers}/openai/completions/_utils/decode.py +5 -3
  116. mirascope/llm/{clients → providers}/openai/completions/_utils/encode.py +33 -23
  117. mirascope/llm/providers/openai/completions/provider.py +456 -0
  118. mirascope/llm/providers/openai/model_id.py +31 -0
  119. mirascope/llm/providers/openai/model_info.py +246 -0
  120. mirascope/llm/providers/openai/provider.py +386 -0
  121. mirascope/llm/providers/openai/responses/__init__.py +21 -0
  122. mirascope/llm/{clients → providers}/openai/responses/_utils/decode.py +5 -3
  123. mirascope/llm/{clients → providers}/openai/responses/_utils/encode.py +28 -17
  124. mirascope/llm/providers/openai/responses/provider.py +470 -0
  125. mirascope/llm/{clients → providers}/openai/shared/_utils.py +7 -3
  126. mirascope/llm/providers/provider_id.py +13 -0
  127. mirascope/llm/providers/provider_registry.py +167 -0
  128. mirascope/llm/responses/base_response.py +10 -5
  129. mirascope/llm/responses/base_stream_response.py +10 -5
  130. mirascope/llm/responses/response.py +24 -13
  131. mirascope/llm/responses/root_response.py +7 -12
  132. mirascope/llm/responses/stream_response.py +35 -23
  133. mirascope/llm/tools/__init__.py +9 -2
  134. mirascope/llm/tools/_utils.py +12 -3
  135. mirascope/llm/tools/protocols.py +4 -4
  136. mirascope/llm/tools/tool_schema.py +44 -9
  137. mirascope/llm/tools/tools.py +10 -9
  138. mirascope/ops/__init__.py +156 -0
  139. mirascope/ops/_internal/__init__.py +5 -0
  140. mirascope/ops/_internal/closure.py +1118 -0
  141. mirascope/ops/_internal/configuration.py +126 -0
  142. mirascope/ops/_internal/context.py +76 -0
  143. mirascope/ops/_internal/exporters/__init__.py +26 -0
  144. mirascope/ops/_internal/exporters/exporters.py +342 -0
  145. mirascope/ops/_internal/exporters/processors.py +104 -0
  146. mirascope/ops/_internal/exporters/types.py +165 -0
  147. mirascope/ops/_internal/exporters/utils.py +29 -0
  148. mirascope/ops/_internal/instrumentation/__init__.py +8 -0
  149. mirascope/ops/_internal/instrumentation/llm/__init__.py +8 -0
  150. mirascope/ops/_internal/instrumentation/llm/encode.py +238 -0
  151. mirascope/ops/_internal/instrumentation/llm/gen_ai_types/__init__.py +38 -0
  152. mirascope/ops/_internal/instrumentation/llm/gen_ai_types/gen_ai_input_messages.py +31 -0
  153. mirascope/ops/_internal/instrumentation/llm/gen_ai_types/gen_ai_output_messages.py +38 -0
  154. mirascope/ops/_internal/instrumentation/llm/gen_ai_types/gen_ai_system_instructions.py +18 -0
  155. mirascope/ops/_internal/instrumentation/llm/gen_ai_types/shared.py +100 -0
  156. mirascope/ops/_internal/instrumentation/llm/llm.py +1288 -0
  157. mirascope/ops/_internal/propagation.py +198 -0
  158. mirascope/ops/_internal/protocols.py +51 -0
  159. mirascope/ops/_internal/session.py +139 -0
  160. mirascope/ops/_internal/spans.py +232 -0
  161. mirascope/ops/_internal/traced_calls.py +371 -0
  162. mirascope/ops/_internal/traced_functions.py +394 -0
  163. mirascope/ops/_internal/tracing.py +276 -0
  164. mirascope/ops/_internal/types.py +13 -0
  165. mirascope/ops/_internal/utils.py +75 -0
  166. mirascope/ops/_internal/versioned_calls.py +512 -0
  167. mirascope/ops/_internal/versioned_functions.py +346 -0
  168. mirascope/ops/_internal/versioning.py +303 -0
  169. mirascope/ops/exceptions.py +21 -0
  170. {mirascope-2.0.0a2.dist-info → mirascope-2.0.0a3.dist-info}/METADATA +76 -1
  171. mirascope-2.0.0a3.dist-info/RECORD +206 -0
  172. {mirascope-2.0.0a2.dist-info → mirascope-2.0.0a3.dist-info}/WHEEL +1 -1
  173. mirascope/graphs/__init__.py +0 -22
  174. mirascope/graphs/finite_state_machine.py +0 -625
  175. mirascope/llm/agents/__init__.py +0 -15
  176. mirascope/llm/agents/agent.py +0 -97
  177. mirascope/llm/agents/agent_template.py +0 -45
  178. mirascope/llm/agents/decorator.py +0 -176
  179. mirascope/llm/calls/base_call.py +0 -33
  180. mirascope/llm/clients/__init__.py +0 -34
  181. mirascope/llm/clients/anthropic/__init__.py +0 -25
  182. mirascope/llm/clients/anthropic/model_ids.py +0 -8
  183. mirascope/llm/clients/google/__init__.py +0 -20
  184. mirascope/llm/clients/google/clients.py +0 -853
  185. mirascope/llm/clients/google/model_ids.py +0 -15
  186. mirascope/llm/clients/openai/__init__.py +0 -25
  187. mirascope/llm/clients/openai/completions/__init__.py +0 -28
  188. mirascope/llm/clients/openai/completions/_utils/model_features.py +0 -81
  189. mirascope/llm/clients/openai/completions/clients.py +0 -833
  190. mirascope/llm/clients/openai/completions/model_ids.py +0 -8
  191. mirascope/llm/clients/openai/responses/__init__.py +0 -26
  192. mirascope/llm/clients/openai/responses/_utils/model_features.py +0 -87
  193. mirascope/llm/clients/openai/responses/clients.py +0 -832
  194. mirascope/llm/clients/openai/responses/model_ids.py +0 -8
  195. mirascope/llm/clients/providers.py +0 -175
  196. mirascope-2.0.0a2.dist-info/RECORD +0 -102
  197. /mirascope/llm/{clients → providers}/anthropic/_utils/__init__.py +0 -0
  198. /mirascope/llm/{clients → providers}/base/kwargs.py +0 -0
  199. /mirascope/llm/{clients → providers}/base/params.py +0 -0
  200. /mirascope/llm/{clients → providers}/google/_utils/__init__.py +0 -0
  201. /mirascope/llm/{clients → providers}/google/message.py +0 -0
  202. /mirascope/llm/{clients/openai/completions → providers/openai/responses}/_utils/__init__.py +0 -0
  203. /mirascope/llm/{clients → providers}/openai/shared/__init__.py +0 -0
  204. {mirascope-2.0.0a2.dist-info → mirascope-2.0.0a3.dist-info}/licenses/LICENSE +0 -0
@@ -37,11 +37,11 @@ from .....formatting import (
37
37
  resolve_format,
38
38
  )
39
39
  from .....messages import AssistantMessage, Message, UserMessage
40
- from .....tools import FORMAT_TOOL_NAME, BaseToolkit, ToolSchema
40
+ from .....tools import FORMAT_TOOL_NAME, AnyToolSchema, BaseToolkit
41
41
  from ....base import Params, _utils as _base_utils
42
+ from ...model_id import OpenAIModelId, model_name
43
+ from ...model_info import NON_REASONING_MODELS
42
44
  from ...shared import _utils as _shared_utils
43
- from ..model_ids import OpenAIResponsesModelId
44
- from .model_features import NON_REASONING_MODELS
45
45
 
46
46
 
47
47
  class ResponseCreateKwargs(TypedDict, total=False):
@@ -107,8 +107,8 @@ def _encode_user_message(
107
107
  elif part.type == "audio":
108
108
  raise FeatureNotSupportedError(
109
109
  "audio input",
110
- "openai:responses",
111
- message='provider "openai:responses" does not support audio inputs. Try using "openai:completions" instead',
110
+ "openai",
111
+ message='provider "openai" does not support audio inputs when using :responses api. Try appending :completions to your model instead.',
112
112
  )
113
113
  else:
114
114
  raise NotImplementedError(
@@ -163,7 +163,7 @@ def _encode_assistant_message(
163
163
 
164
164
 
165
165
  def _encode_message(
166
- message: Message, model_id: OpenAIResponsesModelId, encode_thoughts: bool
166
+ message: Message, model_id: OpenAIModelId, encode_thoughts: bool
167
167
  ) -> ResponseInputParam:
168
168
  """Convert a Mirascope Message to OpenAI Responses input items.
169
169
 
@@ -179,8 +179,9 @@ def _encode_message(
179
179
 
180
180
  if (
181
181
  message.role == "assistant"
182
- and message.provider == "openai:responses"
183
- and message.model_id == model_id
182
+ and message.provider_id in ("openai", "openai:responses")
183
+ and message.provider_model_name
184
+ == model_name(model_id=model_id, api_mode="responses")
184
185
  and message.raw_message
185
186
  and not encode_thoughts
186
187
  ):
@@ -192,11 +193,11 @@ def _encode_message(
192
193
  return _encode_user_message(message)
193
194
 
194
195
 
195
- def _convert_tool_to_function_tool_param(tool: ToolSchema) -> FunctionToolParam:
196
+ def _convert_tool_to_function_tool_param(tool: AnyToolSchema) -> FunctionToolParam:
196
197
  """Convert a Mirascope ToolSchema to OpenAI Responses FunctionToolParam."""
197
198
  schema_dict = tool.parameters.model_dump(by_alias=True, exclude_none=True)
198
199
  schema_dict["type"] = "object"
199
- _shared_utils._ensure_additional_properties_false(schema_dict)
200
+ _shared_utils.ensure_additional_properties_false(schema_dict)
200
201
 
201
202
  return FunctionToolParam(
202
203
  type="function",
@@ -219,7 +220,7 @@ def _create_strict_response_format(
219
220
  ResponseFormatTextJSONSchemaConfigParam for strict structured outputs
220
221
  """
221
222
  schema = format.schema.copy()
222
- _shared_utils._ensure_additional_properties_false(schema)
223
+ _shared_utils.ensure_additional_properties_false(schema)
223
224
 
224
225
  response_format: ResponseFormatTextJSONSchemaConfigParam = {
225
226
  "type": "json_schema",
@@ -243,23 +244,33 @@ def _compute_reasoning(thinking: bool) -> Reasoning:
243
244
 
244
245
  def encode_request(
245
246
  *,
246
- model_id: OpenAIResponsesModelId,
247
+ model_id: OpenAIModelId,
247
248
  messages: Sequence[Message],
248
- tools: Sequence[ToolSchema] | BaseToolkit | None,
249
+ tools: Sequence[AnyToolSchema] | BaseToolkit[AnyToolSchema] | None,
249
250
  format: type[FormattableT] | Format[FormattableT] | None,
250
251
  params: Params,
251
252
  ) -> tuple[Sequence[Message], Format[FormattableT] | None, ResponseCreateKwargs]:
252
253
  """Prepares a request for the `OpenAI.responses.create` method."""
254
+ if model_id.endswith(":completions"):
255
+ raise FeatureNotSupportedError(
256
+ feature="completions API",
257
+ provider_id="openai:responses",
258
+ model_id=model_id,
259
+ message=f"Cannot use completions model with responses client: {model_id}",
260
+ )
261
+
262
+ base_model_name = model_name(model_id, None)
263
+
253
264
  kwargs: ResponseCreateKwargs = ResponseCreateKwargs(
254
265
  {
255
- "model": model_id,
266
+ "model": base_model_name,
256
267
  }
257
268
  )
258
269
  encode_thoughts = False
259
270
 
260
271
  with _base_utils.ensure_all_params_accessed(
261
272
  params=params,
262
- provider="openai:responses",
273
+ provider_id="openai",
263
274
  unsupported_params=["top_k", "seed", "stop_sequences"],
264
275
  ) as param_accessor:
265
276
  if param_accessor.temperature is not None:
@@ -269,9 +280,9 @@ def encode_request(
269
280
  if param_accessor.top_p is not None:
270
281
  kwargs["top_p"] = param_accessor.top_p
271
282
  if param_accessor.thinking is not None:
272
- if model_id in NON_REASONING_MODELS:
283
+ if base_model_name in NON_REASONING_MODELS:
273
284
  param_accessor.emit_warning_for_unused_param(
274
- "thinking", param_accessor.thinking, "openai:responses", model_id
285
+ "thinking", param_accessor.thinking, "openai", model_id
275
286
  )
276
287
  else:
277
288
  # Assume model supports reasoning unless explicitly listed as non-reasoning
@@ -0,0 +1,470 @@
1
+ """OpenAI Responses API client implementation."""
2
+
3
+ from collections.abc import Sequence
4
+ from typing import Literal
5
+ from typing_extensions import Unpack
6
+
7
+ from openai import AsyncOpenAI, OpenAI
8
+
9
+ from ....context import Context, DepsT
10
+ from ....formatting import Format, FormattableT
11
+ from ....messages import Message
12
+ from ....responses import (
13
+ AsyncContextResponse,
14
+ AsyncContextStreamResponse,
15
+ AsyncResponse,
16
+ AsyncStreamResponse,
17
+ ContextResponse,
18
+ ContextStreamResponse,
19
+ Response,
20
+ StreamResponse,
21
+ )
22
+ from ....tools import (
23
+ AsyncContextTool,
24
+ AsyncContextToolkit,
25
+ AsyncTool,
26
+ AsyncToolkit,
27
+ ContextTool,
28
+ ContextToolkit,
29
+ Tool,
30
+ Toolkit,
31
+ )
32
+ from ...base import BaseProvider, Params
33
+ from ..model_id import OpenAIModelId, model_name
34
+ from . import _utils
35
+
36
+
37
+ class OpenAIResponsesProvider(BaseProvider[OpenAI]):
38
+ """The client for the OpenAI Responses API."""
39
+
40
+ id = "openai:responses"
41
+ default_scope = "openai/"
42
+
43
+ def __init__(
44
+ self,
45
+ *,
46
+ api_key: str | None = None,
47
+ base_url: str | None = None,
48
+ wrapped_by_openai_provider: bool = False,
49
+ ) -> None:
50
+ """Initialize the OpenAI Responses client."""
51
+ self.client = OpenAI(api_key=api_key, base_url=base_url)
52
+ self.async_client = AsyncOpenAI(api_key=api_key, base_url=base_url)
53
+ self.active_provider_id: Literal["openai", "openai:responses"] = (
54
+ "openai" if wrapped_by_openai_provider else "openai:responses"
55
+ )
56
+
57
+ def _call(
58
+ self,
59
+ *,
60
+ model_id: OpenAIModelId,
61
+ messages: Sequence[Message],
62
+ tools: Sequence[Tool] | Toolkit | None = None,
63
+ format: type[FormattableT] | Format[FormattableT] | None = None,
64
+ **params: Unpack[Params],
65
+ ) -> Response | Response[FormattableT]:
66
+ """Generate an `llm.Response` by synchronously calling the OpenAI Responses API.
67
+
68
+ Args:
69
+ model_id: Model identifier to use.
70
+ messages: Messages to send to the LLM.
71
+ tools: Optional tools that the model may invoke.
72
+ format: Optional response format specifier.
73
+ **params: Additional parameters to configure output (e.g. temperature). See `llm.Params`.
74
+
75
+ Returns:
76
+ An `llm.Response` object containing the LLM-generated content.
77
+ """
78
+ messages, format, kwargs = _utils.encode_request(
79
+ model_id=model_id,
80
+ messages=messages,
81
+ tools=tools,
82
+ format=format,
83
+ params=params,
84
+ )
85
+
86
+ openai_response = self.client.responses.create(**kwargs)
87
+
88
+ assistant_message, finish_reason = _utils.decode_response(
89
+ openai_response, model_id, self.active_provider_id
90
+ )
91
+ provider_model_name = model_name(model_id, "responses")
92
+
93
+ return Response(
94
+ raw=openai_response,
95
+ provider_id=self.active_provider_id,
96
+ model_id=model_id,
97
+ provider_model_name=provider_model_name,
98
+ params=params,
99
+ tools=tools,
100
+ input_messages=messages,
101
+ assistant_message=assistant_message,
102
+ finish_reason=finish_reason,
103
+ format=format,
104
+ )
105
+
106
+ async def _call_async(
107
+ self,
108
+ *,
109
+ model_id: OpenAIModelId,
110
+ messages: Sequence[Message],
111
+ tools: Sequence[AsyncTool] | AsyncToolkit | None = None,
112
+ format: type[FormattableT] | Format[FormattableT] | None = None,
113
+ **params: Unpack[Params],
114
+ ) -> AsyncResponse | AsyncResponse[FormattableT]:
115
+ """Generate an `llm.AsyncResponse` by asynchronously calling the OpenAI Responses API.
116
+
117
+ Args:
118
+ model_id: Model identifier to use.
119
+ messages: Messages to send to the LLM.
120
+ tools: Optional tools that the model may invoke.
121
+ format: Optional response format specifier.
122
+ **params: Additional parameters to configure output (e.g. temperature). See `llm.Params`.
123
+
124
+ Returns:
125
+ An `llm.AsyncResponse` object containing the LLM-generated content.
126
+ """
127
+ messages, format, kwargs = _utils.encode_request(
128
+ model_id=model_id,
129
+ messages=messages,
130
+ tools=tools,
131
+ format=format,
132
+ params=params,
133
+ )
134
+
135
+ openai_response = await self.async_client.responses.create(**kwargs)
136
+
137
+ assistant_message, finish_reason = _utils.decode_response(
138
+ openai_response, model_id, self.active_provider_id
139
+ )
140
+ provider_model_name = model_name(model_id, "responses")
141
+
142
+ return AsyncResponse(
143
+ raw=openai_response,
144
+ provider_id=self.active_provider_id,
145
+ model_id=model_id,
146
+ provider_model_name=provider_model_name,
147
+ params=params,
148
+ tools=tools,
149
+ input_messages=messages,
150
+ assistant_message=assistant_message,
151
+ finish_reason=finish_reason,
152
+ format=format,
153
+ )
154
+
155
+ def _stream(
156
+ self,
157
+ *,
158
+ model_id: OpenAIModelId,
159
+ messages: Sequence[Message],
160
+ tools: Sequence[Tool] | Toolkit | None = None,
161
+ format: type[FormattableT] | Format[FormattableT] | None = None,
162
+ **params: Unpack[Params],
163
+ ) -> StreamResponse | StreamResponse[FormattableT]:
164
+ """Generate a `llm.StreamResponse` by synchronously streaming from the OpenAI Responses API.
165
+
166
+ Args:
167
+ model_id: Model identifier to use.
168
+ messages: Messages to send to the LLM.
169
+ tools: Optional tools that the model may invoke.
170
+ format: Optional response format specifier.
171
+ **params: Additional parameters to configure output (e.g. temperature). See `llm.Params`.
172
+
173
+ Returns:
174
+ A `llm.StreamResponse` object containing the LLM-generated content stream.
175
+ """
176
+ messages, format, kwargs = _utils.encode_request(
177
+ model_id=model_id,
178
+ messages=messages,
179
+ tools=tools,
180
+ format=format,
181
+ params=params,
182
+ )
183
+
184
+ openai_stream = self.client.responses.create(
185
+ **kwargs,
186
+ stream=True,
187
+ )
188
+
189
+ chunk_iterator = _utils.decode_stream(
190
+ openai_stream,
191
+ )
192
+ provider_model_name = model_name(model_id, "responses")
193
+
194
+ return StreamResponse(
195
+ provider_id=self.active_provider_id,
196
+ model_id=model_id,
197
+ provider_model_name=provider_model_name,
198
+ params=params,
199
+ tools=tools,
200
+ input_messages=messages,
201
+ chunk_iterator=chunk_iterator,
202
+ format=format,
203
+ )
204
+
205
+ async def _stream_async(
206
+ self,
207
+ *,
208
+ model_id: OpenAIModelId,
209
+ messages: Sequence[Message],
210
+ tools: Sequence[AsyncTool] | AsyncToolkit | None = None,
211
+ format: type[FormattableT] | Format[FormattableT] | None = None,
212
+ **params: Unpack[Params],
213
+ ) -> AsyncStreamResponse | AsyncStreamResponse[FormattableT]:
214
+ """Generate a `llm.AsyncStreamResponse` by asynchronously streaming from the OpenAI Responses API.
215
+
216
+ Args:
217
+ model_id: Model identifier to use.
218
+ messages: Messages to send to the LLM.
219
+ tools: Optional tools that the model may invoke.
220
+ format: Optional response format specifier.
221
+ **params: Additional parameters to configure output (e.g. temperature). See `llm.Params`.
222
+
223
+ Returns:
224
+ A `llm.AsyncStreamResponse` object containing the LLM-generated content stream.
225
+ """
226
+ messages, format, kwargs = _utils.encode_request(
227
+ model_id=model_id,
228
+ messages=messages,
229
+ tools=tools,
230
+ format=format,
231
+ params=params,
232
+ )
233
+
234
+ openai_stream = await self.async_client.responses.create(
235
+ **kwargs,
236
+ stream=True,
237
+ )
238
+
239
+ chunk_iterator = _utils.decode_async_stream(
240
+ openai_stream,
241
+ )
242
+ provider_model_name = model_name(model_id, "responses")
243
+
244
+ return AsyncStreamResponse(
245
+ provider_id=self.active_provider_id,
246
+ model_id=model_id,
247
+ provider_model_name=provider_model_name,
248
+ params=params,
249
+ tools=tools,
250
+ input_messages=messages,
251
+ chunk_iterator=chunk_iterator,
252
+ format=format,
253
+ )
254
+
255
+ def _context_call(
256
+ self,
257
+ *,
258
+ ctx: Context[DepsT],
259
+ model_id: OpenAIModelId,
260
+ messages: Sequence[Message],
261
+ tools: Sequence[Tool | ContextTool[DepsT]]
262
+ | ContextToolkit[DepsT]
263
+ | None = None,
264
+ format: type[FormattableT] | Format[FormattableT] | None = None,
265
+ **params: Unpack[Params],
266
+ ) -> ContextResponse[DepsT] | ContextResponse[DepsT, FormattableT]:
267
+ """Generate a `llm.ContextResponse` by synchronously calling the OpenAI Responses API with context.
268
+
269
+ Args:
270
+ ctx: The context object containing dependencies.
271
+ model_id: Model identifier to use.
272
+ messages: Messages to send to the LLM.
273
+ tools: Optional tools that the model may invoke.
274
+ format: Optional response format specifier.
275
+ **params: Additional parameters to configure output (e.g. temperature). See `llm.Params`.
276
+
277
+ Returns:
278
+ A `llm.ContextResponse` object containing the LLM-generated content and context.
279
+ """
280
+ messages, format, kwargs = _utils.encode_request(
281
+ model_id=model_id,
282
+ messages=messages,
283
+ tools=tools,
284
+ format=format,
285
+ params=params,
286
+ )
287
+
288
+ openai_response = self.client.responses.create(**kwargs)
289
+
290
+ assistant_message, finish_reason = _utils.decode_response(
291
+ openai_response, model_id, self.active_provider_id
292
+ )
293
+ provider_model_name = model_name(model_id, "responses")
294
+
295
+ return ContextResponse(
296
+ raw=openai_response,
297
+ provider_id=self.active_provider_id,
298
+ model_id=model_id,
299
+ provider_model_name=provider_model_name,
300
+ params=params,
301
+ tools=tools,
302
+ input_messages=messages,
303
+ assistant_message=assistant_message,
304
+ finish_reason=finish_reason,
305
+ format=format,
306
+ )
307
+
308
+ async def _context_call_async(
309
+ self,
310
+ *,
311
+ ctx: Context[DepsT],
312
+ model_id: OpenAIModelId,
313
+ messages: Sequence[Message],
314
+ tools: Sequence[AsyncTool | AsyncContextTool[DepsT]]
315
+ | AsyncContextToolkit[DepsT]
316
+ | None = None,
317
+ format: type[FormattableT] | Format[FormattableT] | None = None,
318
+ **params: Unpack[Params],
319
+ ) -> AsyncContextResponse[DepsT] | AsyncContextResponse[DepsT, FormattableT]:
320
+ """Generate a `llm.AsyncContextResponse` by asynchronously calling the OpenAI Responses API with context.
321
+
322
+ Args:
323
+ ctx: The context object containing dependencies.
324
+ model_id: Model identifier to use.
325
+ messages: Messages to send to the LLM.
326
+ tools: Optional tools that the model may invoke.
327
+ format: Optional response format specifier.
328
+ **params: Additional parameters to configure output (e.g. temperature). See `llm.Params`.
329
+
330
+ Returns:
331
+ A `llm.AsyncContextResponse` object containing the LLM-generated content and context.
332
+ """
333
+ messages, format, kwargs = _utils.encode_request(
334
+ model_id=model_id,
335
+ messages=messages,
336
+ tools=tools,
337
+ format=format,
338
+ params=params,
339
+ )
340
+
341
+ openai_response = await self.async_client.responses.create(**kwargs)
342
+
343
+ assistant_message, finish_reason = _utils.decode_response(
344
+ openai_response, model_id, self.active_provider_id
345
+ )
346
+ provider_model_name = model_name(model_id, "responses")
347
+
348
+ return AsyncContextResponse(
349
+ raw=openai_response,
350
+ provider_id=self.active_provider_id,
351
+ model_id=model_id,
352
+ provider_model_name=provider_model_name,
353
+ params=params,
354
+ tools=tools,
355
+ input_messages=messages,
356
+ assistant_message=assistant_message,
357
+ finish_reason=finish_reason,
358
+ format=format,
359
+ )
360
+
361
+ def _context_stream(
362
+ self,
363
+ *,
364
+ ctx: Context[DepsT],
365
+ model_id: OpenAIModelId,
366
+ messages: Sequence[Message],
367
+ tools: Sequence[Tool | ContextTool[DepsT]]
368
+ | ContextToolkit[DepsT]
369
+ | None = None,
370
+ format: type[FormattableT] | Format[FormattableT] | None = None,
371
+ **params: Unpack[Params],
372
+ ) -> ContextStreamResponse[DepsT] | ContextStreamResponse[DepsT, FormattableT]:
373
+ """Generate a `llm.ContextStreamResponse` by synchronously streaming from the OpenAI Responses API with context.
374
+
375
+ Args:
376
+ ctx: The context object containing dependencies.
377
+ model_id: Model identifier to use.
378
+ messages: Messages to send to the LLM.
379
+ tools: Optional tools that the model may invoke.
380
+ format: Optional response format specifier.
381
+ **params: Additional parameters to configure output (e.g. temperature). See `llm.Params`.
382
+
383
+ Returns:
384
+ A `llm.ContextStreamResponse` object containing the LLM-generated content stream and context.
385
+ """
386
+ messages, format, kwargs = _utils.encode_request(
387
+ model_id=model_id,
388
+ messages=messages,
389
+ tools=tools,
390
+ format=format,
391
+ params=params,
392
+ )
393
+
394
+ openai_stream = self.client.responses.create(
395
+ **kwargs,
396
+ stream=True,
397
+ )
398
+
399
+ chunk_iterator = _utils.decode_stream(
400
+ openai_stream,
401
+ )
402
+ provider_model_name = model_name(model_id, "responses")
403
+
404
+ return ContextStreamResponse(
405
+ provider_id=self.active_provider_id,
406
+ model_id=model_id,
407
+ provider_model_name=provider_model_name,
408
+ params=params,
409
+ tools=tools,
410
+ input_messages=messages,
411
+ chunk_iterator=chunk_iterator,
412
+ format=format,
413
+ )
414
+
415
+ async def _context_stream_async(
416
+ self,
417
+ *,
418
+ ctx: Context[DepsT],
419
+ model_id: OpenAIModelId,
420
+ messages: Sequence[Message],
421
+ tools: Sequence[AsyncTool | AsyncContextTool[DepsT]]
422
+ | AsyncContextToolkit[DepsT]
423
+ | None = None,
424
+ format: type[FormattableT] | Format[FormattableT] | None = None,
425
+ **params: Unpack[Params],
426
+ ) -> (
427
+ AsyncContextStreamResponse[DepsT]
428
+ | AsyncContextStreamResponse[DepsT, FormattableT]
429
+ ):
430
+ """Generate a `llm.AsyncContextStreamResponse` by asynchronously streaming from the OpenAI Responses API with context.
431
+
432
+ Args:
433
+ ctx: The context object containing dependencies.
434
+ model_id: Model identifier to use.
435
+ messages: Messages to send to the LLM.
436
+ tools: Optional tools that the model may invoke.
437
+ format: Optional response format specifier.
438
+ **params: Additional parameters to configure output (e.g. temperature). See `llm.Params`.
439
+
440
+ Returns:
441
+ A `llm.AsyncContextStreamResponse` object containing the LLM-generated content stream and context.
442
+ """
443
+ messages, format, kwargs = _utils.encode_request(
444
+ model_id=model_id,
445
+ messages=messages,
446
+ tools=tools,
447
+ format=format,
448
+ params=params,
449
+ )
450
+
451
+ openai_stream = await self.async_client.responses.create(
452
+ **kwargs,
453
+ stream=True,
454
+ )
455
+
456
+ chunk_iterator = _utils.decode_async_stream(
457
+ openai_stream,
458
+ )
459
+ provider_model_name = model_name(model_id, "responses")
460
+
461
+ return AsyncContextStreamResponse(
462
+ provider_id=self.active_provider_id,
463
+ model_id=model_id,
464
+ provider_model_name=provider_model_name,
465
+ params=params,
466
+ tools=tools,
467
+ input_messages=messages,
468
+ chunk_iterator=chunk_iterator,
469
+ format=format,
470
+ )
@@ -1,5 +1,7 @@
1
1
  """Shared utils for all OpenAI clients."""
2
2
 
3
+ from typing import cast
4
+
3
5
  MODELS_WITHOUT_JSON_SCHEMA_SUPPORT = {
4
6
  "chatgpt-4o-latest",
5
7
  "gpt-3.5-turbo",
@@ -43,13 +45,15 @@ MODELS_WITHOUT_JSON_OBJECT_SUPPORT = {
43
45
  }
44
46
 
45
47
 
46
- def _ensure_additional_properties_false(obj: object) -> None:
48
+ def ensure_additional_properties_false(obj: object) -> None:
47
49
  """Recursively adds additionalProperties = False to a schema, required by OpenAI API."""
48
50
  if isinstance(obj, dict):
51
+ obj = cast(dict[str, object], obj)
49
52
  if obj.get("type") == "object" and "additionalProperties" not in obj:
50
53
  obj["additionalProperties"] = False
51
54
  for value in obj.values():
52
- _ensure_additional_properties_false(value)
55
+ ensure_additional_properties_false(value)
53
56
  elif isinstance(obj, list):
57
+ obj = cast(list[object], obj)
54
58
  for item in obj:
55
- _ensure_additional_properties_false(item)
59
+ ensure_additional_properties_false(item)
@@ -0,0 +1,13 @@
1
+ """Identifiers for all registered providers."""
2
+
3
+ from typing import Literal, TypeAlias, get_args
4
+
5
+ KnownProviderId: TypeAlias = Literal[
6
+ "anthropic", # Anthropic provider via AnthropicProvider
7
+ "google", # Google provider via GoogleProvider
8
+ "openai", # OpenAI provider via OpenAIProvider
9
+ "mlx", # Local inference powered by `mlx-lm`, via MLXProvider
10
+ ]
11
+ KNOWN_PROVIDER_IDS = get_args(KnownProviderId)
12
+
13
+ ProviderId = KnownProviderId | str