mirascope 2.0.0a1__py3-none-any.whl → 2.0.0a3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (205) hide show
  1. mirascope/__init__.py +2 -2
  2. mirascope/api/__init__.py +6 -0
  3. mirascope/api/_generated/README.md +207 -0
  4. mirascope/api/_generated/__init__.py +85 -0
  5. mirascope/api/_generated/client.py +155 -0
  6. mirascope/api/_generated/core/__init__.py +52 -0
  7. mirascope/api/_generated/core/api_error.py +23 -0
  8. mirascope/api/_generated/core/client_wrapper.py +58 -0
  9. mirascope/api/_generated/core/datetime_utils.py +30 -0
  10. mirascope/api/_generated/core/file.py +70 -0
  11. mirascope/api/_generated/core/force_multipart.py +16 -0
  12. mirascope/api/_generated/core/http_client.py +619 -0
  13. mirascope/api/_generated/core/http_response.py +55 -0
  14. mirascope/api/_generated/core/jsonable_encoder.py +102 -0
  15. mirascope/api/_generated/core/pydantic_utilities.py +310 -0
  16. mirascope/api/_generated/core/query_encoder.py +60 -0
  17. mirascope/api/_generated/core/remove_none_from_dict.py +11 -0
  18. mirascope/api/_generated/core/request_options.py +35 -0
  19. mirascope/api/_generated/core/serialization.py +282 -0
  20. mirascope/api/_generated/docs/__init__.py +4 -0
  21. mirascope/api/_generated/docs/client.py +95 -0
  22. mirascope/api/_generated/docs/raw_client.py +132 -0
  23. mirascope/api/_generated/environment.py +9 -0
  24. mirascope/api/_generated/errors/__init__.py +7 -0
  25. mirascope/api/_generated/errors/bad_request_error.py +15 -0
  26. mirascope/api/_generated/health/__init__.py +7 -0
  27. mirascope/api/_generated/health/client.py +96 -0
  28. mirascope/api/_generated/health/raw_client.py +129 -0
  29. mirascope/api/_generated/health/types/__init__.py +8 -0
  30. mirascope/api/_generated/health/types/health_check_response.py +24 -0
  31. mirascope/api/_generated/health/types/health_check_response_status.py +5 -0
  32. mirascope/api/_generated/reference.md +167 -0
  33. mirascope/api/_generated/traces/__init__.py +55 -0
  34. mirascope/api/_generated/traces/client.py +162 -0
  35. mirascope/api/_generated/traces/raw_client.py +168 -0
  36. mirascope/api/_generated/traces/types/__init__.py +95 -0
  37. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item.py +36 -0
  38. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_resource.py +31 -0
  39. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_resource_attributes_item.py +25 -0
  40. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_resource_attributes_item_value.py +54 -0
  41. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_resource_attributes_item_value_array_value.py +23 -0
  42. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_resource_attributes_item_value_kvlist_value.py +28 -0
  43. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_resource_attributes_item_value_kvlist_value_values_item.py +24 -0
  44. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item.py +35 -0
  45. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_scope.py +35 -0
  46. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_scope_attributes_item.py +27 -0
  47. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_scope_attributes_item_value.py +54 -0
  48. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_scope_attributes_item_value_array_value.py +23 -0
  49. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_scope_attributes_item_value_kvlist_value.py +28 -0
  50. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_scope_attributes_item_value_kvlist_value_values_item.py +24 -0
  51. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item.py +60 -0
  52. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item_attributes_item.py +29 -0
  53. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item_attributes_item_value.py +54 -0
  54. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item_attributes_item_value_array_value.py +23 -0
  55. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item_attributes_item_value_kvlist_value.py +28 -0
  56. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item_attributes_item_value_kvlist_value_values_item.py +24 -0
  57. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item_status.py +24 -0
  58. mirascope/api/_generated/traces/types/traces_create_response.py +27 -0
  59. mirascope/api/_generated/traces/types/traces_create_response_partial_success.py +28 -0
  60. mirascope/api/_generated/types/__init__.py +21 -0
  61. mirascope/api/_generated/types/http_api_decode_error.py +31 -0
  62. mirascope/api/_generated/types/http_api_decode_error_tag.py +5 -0
  63. mirascope/api/_generated/types/issue.py +44 -0
  64. mirascope/api/_generated/types/issue_tag.py +17 -0
  65. mirascope/api/_generated/types/property_key.py +7 -0
  66. mirascope/api/_generated/types/property_key_tag.py +29 -0
  67. mirascope/api/_generated/types/property_key_tag_tag.py +5 -0
  68. mirascope/api/client.py +255 -0
  69. mirascope/api/settings.py +81 -0
  70. mirascope/llm/__init__.py +41 -11
  71. mirascope/llm/calls/calls.py +81 -57
  72. mirascope/llm/calls/decorator.py +121 -115
  73. mirascope/llm/content/__init__.py +3 -2
  74. mirascope/llm/context/_utils.py +19 -6
  75. mirascope/llm/exceptions.py +30 -16
  76. mirascope/llm/formatting/_utils.py +9 -5
  77. mirascope/llm/formatting/format.py +2 -2
  78. mirascope/llm/formatting/from_call_args.py +2 -2
  79. mirascope/llm/messages/message.py +13 -5
  80. mirascope/llm/models/__init__.py +2 -2
  81. mirascope/llm/models/models.py +189 -81
  82. mirascope/llm/prompts/__init__.py +13 -12
  83. mirascope/llm/prompts/_utils.py +27 -24
  84. mirascope/llm/prompts/decorator.py +133 -204
  85. mirascope/llm/prompts/prompts.py +424 -0
  86. mirascope/llm/prompts/protocols.py +25 -59
  87. mirascope/llm/providers/__init__.py +38 -0
  88. mirascope/llm/{clients → providers}/_missing_import_stubs.py +8 -6
  89. mirascope/llm/providers/anthropic/__init__.py +24 -0
  90. mirascope/llm/{clients → providers}/anthropic/_utils/decode.py +5 -4
  91. mirascope/llm/{clients → providers}/anthropic/_utils/encode.py +31 -10
  92. mirascope/llm/providers/anthropic/model_id.py +40 -0
  93. mirascope/llm/{clients/anthropic/clients.py → providers/anthropic/provider.py} +33 -418
  94. mirascope/llm/{clients → providers}/base/__init__.py +3 -3
  95. mirascope/llm/{clients → providers}/base/_utils.py +10 -7
  96. mirascope/llm/{clients/base/client.py → providers/base/base_provider.py} +255 -126
  97. mirascope/llm/providers/google/__init__.py +21 -0
  98. mirascope/llm/{clients → providers}/google/_utils/decode.py +6 -4
  99. mirascope/llm/{clients → providers}/google/_utils/encode.py +30 -24
  100. mirascope/llm/providers/google/model_id.py +28 -0
  101. mirascope/llm/providers/google/provider.py +438 -0
  102. mirascope/llm/providers/load_provider.py +48 -0
  103. mirascope/llm/providers/mlx/__init__.py +24 -0
  104. mirascope/llm/providers/mlx/_utils.py +107 -0
  105. mirascope/llm/providers/mlx/encoding/__init__.py +8 -0
  106. mirascope/llm/providers/mlx/encoding/base.py +69 -0
  107. mirascope/llm/providers/mlx/encoding/transformers.py +131 -0
  108. mirascope/llm/providers/mlx/mlx.py +237 -0
  109. mirascope/llm/providers/mlx/model_id.py +17 -0
  110. mirascope/llm/providers/mlx/provider.py +411 -0
  111. mirascope/llm/providers/model_id.py +16 -0
  112. mirascope/llm/providers/openai/__init__.py +6 -0
  113. mirascope/llm/providers/openai/completions/__init__.py +20 -0
  114. mirascope/llm/{clients/openai/responses → providers/openai/completions}/_utils/__init__.py +2 -0
  115. mirascope/llm/{clients → providers}/openai/completions/_utils/decode.py +5 -3
  116. mirascope/llm/{clients → providers}/openai/completions/_utils/encode.py +33 -23
  117. mirascope/llm/providers/openai/completions/provider.py +456 -0
  118. mirascope/llm/providers/openai/model_id.py +31 -0
  119. mirascope/llm/providers/openai/model_info.py +246 -0
  120. mirascope/llm/providers/openai/provider.py +386 -0
  121. mirascope/llm/providers/openai/responses/__init__.py +21 -0
  122. mirascope/llm/{clients → providers}/openai/responses/_utils/decode.py +5 -3
  123. mirascope/llm/{clients → providers}/openai/responses/_utils/encode.py +28 -17
  124. mirascope/llm/providers/openai/responses/provider.py +470 -0
  125. mirascope/llm/{clients → providers}/openai/shared/_utils.py +7 -3
  126. mirascope/llm/providers/provider_id.py +13 -0
  127. mirascope/llm/providers/provider_registry.py +167 -0
  128. mirascope/llm/responses/base_response.py +10 -5
  129. mirascope/llm/responses/base_stream_response.py +10 -5
  130. mirascope/llm/responses/response.py +24 -13
  131. mirascope/llm/responses/root_response.py +7 -12
  132. mirascope/llm/responses/stream_response.py +35 -23
  133. mirascope/llm/tools/__init__.py +9 -2
  134. mirascope/llm/tools/_utils.py +12 -3
  135. mirascope/llm/tools/decorator.py +10 -10
  136. mirascope/llm/tools/protocols.py +4 -4
  137. mirascope/llm/tools/tool_schema.py +44 -9
  138. mirascope/llm/tools/tools.py +12 -11
  139. mirascope/ops/__init__.py +156 -0
  140. mirascope/ops/_internal/__init__.py +5 -0
  141. mirascope/ops/_internal/closure.py +1118 -0
  142. mirascope/ops/_internal/configuration.py +126 -0
  143. mirascope/ops/_internal/context.py +76 -0
  144. mirascope/ops/_internal/exporters/__init__.py +26 -0
  145. mirascope/ops/_internal/exporters/exporters.py +342 -0
  146. mirascope/ops/_internal/exporters/processors.py +104 -0
  147. mirascope/ops/_internal/exporters/types.py +165 -0
  148. mirascope/ops/_internal/exporters/utils.py +29 -0
  149. mirascope/ops/_internal/instrumentation/__init__.py +8 -0
  150. mirascope/ops/_internal/instrumentation/llm/__init__.py +8 -0
  151. mirascope/ops/_internal/instrumentation/llm/encode.py +238 -0
  152. mirascope/ops/_internal/instrumentation/llm/gen_ai_types/__init__.py +38 -0
  153. mirascope/ops/_internal/instrumentation/llm/gen_ai_types/gen_ai_input_messages.py +31 -0
  154. mirascope/ops/_internal/instrumentation/llm/gen_ai_types/gen_ai_output_messages.py +38 -0
  155. mirascope/ops/_internal/instrumentation/llm/gen_ai_types/gen_ai_system_instructions.py +18 -0
  156. mirascope/ops/_internal/instrumentation/llm/gen_ai_types/shared.py +100 -0
  157. mirascope/ops/_internal/instrumentation/llm/llm.py +1288 -0
  158. mirascope/ops/_internal/propagation.py +198 -0
  159. mirascope/ops/_internal/protocols.py +51 -0
  160. mirascope/ops/_internal/session.py +139 -0
  161. mirascope/ops/_internal/spans.py +232 -0
  162. mirascope/ops/_internal/traced_calls.py +371 -0
  163. mirascope/ops/_internal/traced_functions.py +394 -0
  164. mirascope/ops/_internal/tracing.py +276 -0
  165. mirascope/ops/_internal/types.py +13 -0
  166. mirascope/ops/_internal/utils.py +75 -0
  167. mirascope/ops/_internal/versioned_calls.py +512 -0
  168. mirascope/ops/_internal/versioned_functions.py +346 -0
  169. mirascope/ops/_internal/versioning.py +303 -0
  170. mirascope/ops/exceptions.py +21 -0
  171. {mirascope-2.0.0a1.dist-info → mirascope-2.0.0a3.dist-info}/METADATA +77 -1
  172. mirascope-2.0.0a3.dist-info/RECORD +206 -0
  173. {mirascope-2.0.0a1.dist-info → mirascope-2.0.0a3.dist-info}/WHEEL +1 -1
  174. mirascope/graphs/__init__.py +0 -22
  175. mirascope/graphs/finite_state_machine.py +0 -625
  176. mirascope/llm/agents/__init__.py +0 -15
  177. mirascope/llm/agents/agent.py +0 -97
  178. mirascope/llm/agents/agent_template.py +0 -45
  179. mirascope/llm/agents/decorator.py +0 -176
  180. mirascope/llm/calls/base_call.py +0 -33
  181. mirascope/llm/clients/__init__.py +0 -34
  182. mirascope/llm/clients/anthropic/__init__.py +0 -25
  183. mirascope/llm/clients/anthropic/model_ids.py +0 -8
  184. mirascope/llm/clients/google/__init__.py +0 -20
  185. mirascope/llm/clients/google/clients.py +0 -853
  186. mirascope/llm/clients/google/model_ids.py +0 -15
  187. mirascope/llm/clients/openai/__init__.py +0 -25
  188. mirascope/llm/clients/openai/completions/__init__.py +0 -28
  189. mirascope/llm/clients/openai/completions/_utils/model_features.py +0 -81
  190. mirascope/llm/clients/openai/completions/clients.py +0 -833
  191. mirascope/llm/clients/openai/completions/model_ids.py +0 -8
  192. mirascope/llm/clients/openai/responses/__init__.py +0 -26
  193. mirascope/llm/clients/openai/responses/_utils/model_features.py +0 -87
  194. mirascope/llm/clients/openai/responses/clients.py +0 -832
  195. mirascope/llm/clients/openai/responses/model_ids.py +0 -8
  196. mirascope/llm/clients/providers.py +0 -175
  197. mirascope-2.0.0a1.dist-info/RECORD +0 -102
  198. /mirascope/llm/{clients → providers}/anthropic/_utils/__init__.py +0 -0
  199. /mirascope/llm/{clients → providers}/base/kwargs.py +0 -0
  200. /mirascope/llm/{clients → providers}/base/params.py +0 -0
  201. /mirascope/llm/{clients → providers}/google/_utils/__init__.py +0 -0
  202. /mirascope/llm/{clients → providers}/google/message.py +0 -0
  203. /mirascope/llm/{clients/openai/completions → providers/openai/responses}/_utils/__init__.py +0 -0
  204. /mirascope/llm/{clients → providers}/openai/shared/__init__.py +0 -0
  205. {mirascope-2.0.0a1.dist-info → mirascope-2.0.0a3.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,456 @@
1
+ """OpenAI client implementation."""
2
+
3
+ from collections.abc import Sequence
4
+ from typing import Literal
5
+ from typing_extensions import Unpack
6
+
7
+ from openai import AsyncOpenAI, OpenAI
8
+
9
+ from ....context import Context, DepsT
10
+ from ....formatting import Format, FormattableT
11
+ from ....messages import Message
12
+ from ....responses import (
13
+ AsyncContextResponse,
14
+ AsyncContextStreamResponse,
15
+ AsyncResponse,
16
+ AsyncStreamResponse,
17
+ ContextResponse,
18
+ ContextStreamResponse,
19
+ Response,
20
+ StreamResponse,
21
+ )
22
+ from ....tools import (
23
+ AsyncContextTool,
24
+ AsyncContextToolkit,
25
+ AsyncTool,
26
+ AsyncToolkit,
27
+ ContextTool,
28
+ ContextToolkit,
29
+ Tool,
30
+ Toolkit,
31
+ )
32
+ from ...base import BaseProvider, Params
33
+ from ..model_id import OpenAIModelId, model_name
34
+ from . import _utils
35
+
36
+
37
+ class OpenAICompletionsProvider(BaseProvider[OpenAI]):
38
+ """The client for the OpenAI LLM model."""
39
+
40
+ id = "openai:completions"
41
+ default_scope = "openai/"
42
+
43
+ def __init__(
44
+ self,
45
+ *,
46
+ api_key: str | None = None,
47
+ base_url: str | None = None,
48
+ wrapped_by_openai_provider: bool = False,
49
+ ) -> None:
50
+ """Initialize the OpenAI client."""
51
+ self.client = OpenAI(api_key=api_key, base_url=base_url)
52
+ self.async_client = AsyncOpenAI(api_key=api_key, base_url=base_url)
53
+ self.active_provider_id: Literal["openai", "openai:completions"] = (
54
+ "openai" if wrapped_by_openai_provider else "openai:completions"
55
+ )
56
+
57
+ def _call(
58
+ self,
59
+ *,
60
+ model_id: OpenAIModelId,
61
+ messages: Sequence[Message],
62
+ tools: Sequence[Tool] | Toolkit | None = None,
63
+ format: type[FormattableT] | Format[FormattableT] | None = None,
64
+ **params: Unpack[Params],
65
+ ) -> Response | Response[FormattableT]:
66
+ """Generate an `llm.Response` by synchronously calling the OpenAI ChatCompletions API.
67
+
68
+ Args:
69
+ model_id: Model identifier to use.
70
+ messages: Messages to send to the LLM.
71
+ tools: Optional tools that the model may invoke.
72
+ format: Optional response format specifier.
73
+ **params: Additional parameters to configure output (e.g. temperature). See `llm.Params`.
74
+
75
+ Returns:
76
+ An `llm.Response` object containing the LLM-generated content.
77
+ """
78
+ input_messages, format, kwargs = _utils.encode_request(
79
+ model_id=model_id,
80
+ messages=messages,
81
+ tools=tools,
82
+ format=format,
83
+ params=params,
84
+ )
85
+
86
+ openai_response = self.client.chat.completions.create(**kwargs)
87
+
88
+ assistant_message, finish_reason = _utils.decode_response(
89
+ openai_response, model_id, self.active_provider_id
90
+ )
91
+
92
+ return Response(
93
+ raw=openai_response,
94
+ provider_id=self.active_provider_id,
95
+ model_id=model_id,
96
+ provider_model_name=model_name(model_id, "completions"),
97
+ params=params,
98
+ tools=tools,
99
+ input_messages=input_messages,
100
+ assistant_message=assistant_message,
101
+ finish_reason=finish_reason,
102
+ format=format,
103
+ )
104
+
105
+ def _context_call(
106
+ self,
107
+ *,
108
+ ctx: Context[DepsT],
109
+ model_id: OpenAIModelId,
110
+ messages: Sequence[Message],
111
+ tools: Sequence[Tool | ContextTool[DepsT]]
112
+ | ContextToolkit[DepsT]
113
+ | None = None,
114
+ format: type[FormattableT] | Format[FormattableT] | None = None,
115
+ **params: Unpack[Params],
116
+ ) -> ContextResponse[DepsT, None] | ContextResponse[DepsT, FormattableT]:
117
+ """Generate an `llm.ContextResponse` by synchronously calling the OpenAI ChatCompletions API.
118
+
119
+ Args:
120
+ ctx: Context object with dependencies for tools.
121
+ model_id: Model identifier to use.
122
+ messages: Messages to send to the LLM.
123
+ tools: Optional tools that the model may invoke.
124
+ format: Optional response format specifier.
125
+ **params: Additional parameters to configure output (e.g. temperature). See `llm.Params`.
126
+
127
+ Returns:
128
+ An `llm.ContextResponse` object containing the LLM-generated content.
129
+ """
130
+ input_messages, format, kwargs = _utils.encode_request(
131
+ model_id=model_id,
132
+ messages=messages,
133
+ tools=tools,
134
+ format=format,
135
+ params=params,
136
+ )
137
+
138
+ openai_response = self.client.chat.completions.create(**kwargs)
139
+
140
+ assistant_message, finish_reason = _utils.decode_response(
141
+ openai_response, model_id, self.active_provider_id
142
+ )
143
+
144
+ return ContextResponse(
145
+ raw=openai_response,
146
+ provider_id=self.active_provider_id,
147
+ model_id=model_id,
148
+ provider_model_name=model_name(model_id, "completions"),
149
+ params=params,
150
+ tools=tools,
151
+ input_messages=input_messages,
152
+ assistant_message=assistant_message,
153
+ finish_reason=finish_reason,
154
+ format=format,
155
+ )
156
+
157
+ async def _call_async(
158
+ self,
159
+ *,
160
+ model_id: OpenAIModelId,
161
+ messages: Sequence[Message],
162
+ tools: Sequence[AsyncTool] | AsyncToolkit | None = None,
163
+ format: type[FormattableT] | Format[FormattableT] | None = None,
164
+ **params: Unpack[Params],
165
+ ) -> AsyncResponse | AsyncResponse[FormattableT]:
166
+ """Generate an `llm.AsyncResponse` by asynchronously calling the OpenAI ChatCompletions API.
167
+
168
+ Args:
169
+ model_id: Model identifier to use.
170
+ messages: Messages to send to the LLM.
171
+ tools: Optional tools that the model may invoke.
172
+ format: Optional response format specifier.
173
+ **params: Additional parameters to configure output (e.g. temperature). See `llm.Params`.
174
+
175
+ Returns:
176
+ An `llm.AsyncResponse` object containing the LLM-generated content.
177
+ """
178
+
179
+ input_messages, format, kwargs = _utils.encode_request(
180
+ model_id=model_id,
181
+ params=params,
182
+ messages=messages,
183
+ tools=tools,
184
+ format=format,
185
+ )
186
+
187
+ openai_response = await self.async_client.chat.completions.create(**kwargs)
188
+
189
+ assistant_message, finish_reason = _utils.decode_response(
190
+ openai_response, model_id, self.active_provider_id
191
+ )
192
+
193
+ return AsyncResponse(
194
+ raw=openai_response,
195
+ provider_id=self.active_provider_id,
196
+ model_id=model_id,
197
+ provider_model_name=model_name(model_id, "completions"),
198
+ params=params,
199
+ tools=tools,
200
+ input_messages=input_messages,
201
+ assistant_message=assistant_message,
202
+ finish_reason=finish_reason,
203
+ format=format,
204
+ )
205
+
206
+ async def _context_call_async(
207
+ self,
208
+ *,
209
+ ctx: Context[DepsT],
210
+ model_id: OpenAIModelId,
211
+ messages: Sequence[Message],
212
+ tools: Sequence[AsyncTool | AsyncContextTool[DepsT]]
213
+ | AsyncContextToolkit[DepsT]
214
+ | None = None,
215
+ format: type[FormattableT] | Format[FormattableT] | None = None,
216
+ **params: Unpack[Params],
217
+ ) -> AsyncContextResponse[DepsT, None] | AsyncContextResponse[DepsT, FormattableT]:
218
+ """Generate an `llm.AsyncContextResponse` by asynchronously calling the OpenAI ChatCompletions API.
219
+
220
+ Args:
221
+ ctx: Context object with dependencies for tools.
222
+ model_id: Model identifier to use.
223
+ messages: Messages to send to the LLM.
224
+ tools: Optional tools that the model may invoke.
225
+ format: Optional response format specifier.
226
+ **params: Additional parameters to configure output (e.g. temperature). See `llm.Params`.
227
+
228
+ Returns:
229
+ An `llm.AsyncContextResponse` object containing the LLM-generated content.
230
+ """
231
+ input_messages, format, kwargs = _utils.encode_request(
232
+ model_id=model_id,
233
+ params=params,
234
+ messages=messages,
235
+ tools=tools,
236
+ format=format,
237
+ )
238
+
239
+ openai_response = await self.async_client.chat.completions.create(**kwargs)
240
+
241
+ assistant_message, finish_reason = _utils.decode_response(
242
+ openai_response, model_id, self.active_provider_id
243
+ )
244
+
245
+ return AsyncContextResponse(
246
+ raw=openai_response,
247
+ provider_id=self.active_provider_id,
248
+ model_id=model_id,
249
+ provider_model_name=model_name(model_id, "completions"),
250
+ params=params,
251
+ tools=tools,
252
+ input_messages=input_messages,
253
+ assistant_message=assistant_message,
254
+ finish_reason=finish_reason,
255
+ format=format,
256
+ )
257
+
258
+ def _stream(
259
+ self,
260
+ *,
261
+ model_id: OpenAIModelId,
262
+ messages: Sequence[Message],
263
+ tools: Sequence[Tool] | Toolkit | None = None,
264
+ format: type[FormattableT] | Format[FormattableT] | None = None,
265
+ **params: Unpack[Params],
266
+ ) -> StreamResponse | StreamResponse[FormattableT]:
267
+ """Generate an `llm.StreamResponse` by synchronously streaming from the OpenAI ChatCompletions API.
268
+
269
+ Args:
270
+ model_id: Model identifier to use.
271
+ messages: Messages to send to the LLM.
272
+ tools: Optional tools that the model may invoke.
273
+ format: Optional response format specifier.
274
+ **params: Additional parameters to configure output (e.g. temperature). See `llm.Params`.
275
+
276
+ Returns:
277
+ An `llm.StreamResponse` object for iterating over the LLM-generated content.
278
+ """
279
+ input_messages, format, kwargs = _utils.encode_request(
280
+ model_id=model_id,
281
+ messages=messages,
282
+ tools=tools,
283
+ format=format,
284
+ params=params,
285
+ )
286
+
287
+ openai_stream = self.client.chat.completions.create(
288
+ **kwargs,
289
+ stream=True,
290
+ )
291
+
292
+ chunk_iterator = _utils.decode_stream(openai_stream)
293
+
294
+ return StreamResponse(
295
+ provider_id=self.active_provider_id,
296
+ model_id=model_id,
297
+ provider_model_name=model_name(model_id, "completions"),
298
+ params=params,
299
+ tools=tools,
300
+ input_messages=input_messages,
301
+ chunk_iterator=chunk_iterator,
302
+ format=format,
303
+ )
304
+
305
+ def _context_stream(
306
+ self,
307
+ *,
308
+ ctx: Context[DepsT],
309
+ model_id: OpenAIModelId,
310
+ messages: Sequence[Message],
311
+ tools: Sequence[Tool | ContextTool[DepsT]]
312
+ | ContextToolkit[DepsT]
313
+ | None = None,
314
+ format: type[FormattableT] | Format[FormattableT] | None = None,
315
+ **params: Unpack[Params],
316
+ ) -> ContextStreamResponse[DepsT] | ContextStreamResponse[DepsT, FormattableT]:
317
+ """Generate an `llm.ContextStreamResponse` by synchronously streaming from the OpenAI ChatCompletions API.
318
+
319
+ Args:
320
+ ctx: Context object with dependencies for tools.
321
+ model_id: Model identifier to use.
322
+ messages: Messages to send to the LLM.
323
+ tools: Optional tools that the model may invoke.
324
+ format: Optional response format specifier.
325
+ **params: Additional parameters to configure output (e.g. temperature). See `llm.Params`.
326
+
327
+ Returns:
328
+ An `llm.ContextStreamResponse` object for iterating over the LLM-generated content.
329
+ """
330
+ input_messages, format, kwargs = _utils.encode_request(
331
+ model_id=model_id,
332
+ messages=messages,
333
+ tools=tools,
334
+ format=format,
335
+ params=params,
336
+ )
337
+
338
+ openai_stream = self.client.chat.completions.create(
339
+ **kwargs,
340
+ stream=True,
341
+ )
342
+
343
+ chunk_iterator = _utils.decode_stream(openai_stream)
344
+
345
+ return ContextStreamResponse(
346
+ provider_id=self.active_provider_id,
347
+ model_id=model_id,
348
+ provider_model_name=model_name(model_id, "completions"),
349
+ params=params,
350
+ tools=tools,
351
+ input_messages=input_messages,
352
+ chunk_iterator=chunk_iterator,
353
+ format=format,
354
+ )
355
+
356
+ async def _stream_async(
357
+ self,
358
+ *,
359
+ model_id: OpenAIModelId,
360
+ messages: Sequence[Message],
361
+ tools: Sequence[AsyncTool] | AsyncToolkit | None = None,
362
+ format: type[FormattableT] | Format[FormattableT] | None = None,
363
+ **params: Unpack[Params],
364
+ ) -> AsyncStreamResponse | AsyncStreamResponse[FormattableT]:
365
+ """Generate an `llm.AsyncStreamResponse` by asynchronously streaming from the OpenAI ChatCompletions API.
366
+
367
+ Args:
368
+ model_id: Model identifier to use.
369
+ messages: Messages to send to the LLM.
370
+ tools: Optional tools that the model may invoke.
371
+ format: Optional response format specifier.
372
+ **params: Additional parameters to configure output (e.g. temperature). See `llm.Params`.
373
+
374
+ Returns:
375
+ An `llm.AsyncStreamResponse` object for asynchronously iterating over the LLM-generated content.
376
+ """
377
+
378
+ input_messages, format, kwargs = _utils.encode_request(
379
+ model_id=model_id,
380
+ messages=messages,
381
+ tools=tools,
382
+ format=format,
383
+ params=params,
384
+ )
385
+
386
+ openai_stream = await self.async_client.chat.completions.create(
387
+ **kwargs,
388
+ stream=True,
389
+ )
390
+
391
+ chunk_iterator = _utils.decode_async_stream(openai_stream)
392
+
393
+ return AsyncStreamResponse(
394
+ provider_id=self.active_provider_id,
395
+ model_id=model_id,
396
+ provider_model_name=model_name(model_id, "completions"),
397
+ params=params,
398
+ tools=tools,
399
+ input_messages=input_messages,
400
+ chunk_iterator=chunk_iterator,
401
+ format=format,
402
+ )
403
+
404
+ async def _context_stream_async(
405
+ self,
406
+ *,
407
+ ctx: Context[DepsT],
408
+ model_id: OpenAIModelId,
409
+ messages: Sequence[Message],
410
+ tools: Sequence[AsyncTool | AsyncContextTool[DepsT]]
411
+ | AsyncContextToolkit[DepsT]
412
+ | None = None,
413
+ format: type[FormattableT] | Format[FormattableT] | None = None,
414
+ **params: Unpack[Params],
415
+ ) -> (
416
+ AsyncContextStreamResponse[DepsT]
417
+ | AsyncContextStreamResponse[DepsT, FormattableT]
418
+ ):
419
+ """Generate an `llm.AsyncContextStreamResponse` by asynchronously streaming from the OpenAI ChatCompletions API.
420
+
421
+ Args:
422
+ ctx: Context object with dependencies for tools.
423
+ model_id: Model identifier to use.
424
+ messages: Messages to send to the LLM.
425
+ tools: Optional tools that the model may invoke.
426
+ format: Optional response format specifier.
427
+ **params: Additional parameters to configure output (e.g. temperature). See `llm.Params`.
428
+
429
+ Returns:
430
+ An `llm.AsyncContextStreamResponse` object for asynchronously iterating over the LLM-generated content.
431
+ """
432
+ input_messages, format, kwargs = _utils.encode_request(
433
+ model_id=model_id,
434
+ messages=messages,
435
+ tools=tools,
436
+ format=format,
437
+ params=params,
438
+ )
439
+
440
+ openai_stream = await self.async_client.chat.completions.create(
441
+ **kwargs,
442
+ stream=True,
443
+ )
444
+
445
+ chunk_iterator = _utils.decode_async_stream(openai_stream)
446
+
447
+ return AsyncContextStreamResponse(
448
+ provider_id=self.active_provider_id,
449
+ model_id=model_id,
450
+ provider_model_name=model_name(model_id, "completions"),
451
+ params=params,
452
+ tools=tools,
453
+ input_messages=input_messages,
454
+ chunk_iterator=chunk_iterator,
455
+ format=format,
456
+ )
@@ -0,0 +1,31 @@
1
+ """OpenAI model ids, and related utilities."""
2
+
3
+ from typing import Literal, TypeAlias, get_args
4
+
5
+ from .model_info import OpenAIKnownModels
6
+
7
+ OpenAIModelId = OpenAIKnownModels | str
8
+ """Valid OpenAI model IDs including API-specific variants."""
9
+
10
+ OPENAI_KNOWN_MODELS: set[str] = set(get_args(OpenAIKnownModels))
11
+
12
+ ApiMode: TypeAlias = Literal["responses", "completions"]
13
+
14
+
15
+ def model_name(model_id: OpenAIModelId, api_mode: ApiMode | None) -> str:
16
+ """Extract the openai model name from the ModelId
17
+
18
+ Args:
19
+ model_id: Full model ID (e.g. "openai/gpt-4o")
20
+ api_mode: API mode to append as suffix ("responses" or "completions").
21
+ If None, no suffix will be added (just the base model name).
22
+
23
+ Returns:
24
+ Provider-specific model ID with API suffix (e.g. "gpt-4o:responses")
25
+ """
26
+ base_name = (
27
+ model_id.split("/")[1].removesuffix(":responses").removesuffix(":completions")
28
+ )
29
+ if api_mode is None:
30
+ return base_name
31
+ return f"{base_name}:{api_mode}"