mirascope 2.0.0a2__py3-none-any.whl → 2.0.0a4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (252) hide show
  1. mirascope/__init__.py +2 -2
  2. mirascope/api/__init__.py +6 -0
  3. mirascope/api/_generated/README.md +207 -0
  4. mirascope/api/_generated/__init__.py +141 -0
  5. mirascope/api/_generated/client.py +163 -0
  6. mirascope/api/_generated/core/__init__.py +52 -0
  7. mirascope/api/_generated/core/api_error.py +23 -0
  8. mirascope/api/_generated/core/client_wrapper.py +58 -0
  9. mirascope/api/_generated/core/datetime_utils.py +30 -0
  10. mirascope/api/_generated/core/file.py +70 -0
  11. mirascope/api/_generated/core/force_multipart.py +16 -0
  12. mirascope/api/_generated/core/http_client.py +619 -0
  13. mirascope/api/_generated/core/http_response.py +55 -0
  14. mirascope/api/_generated/core/jsonable_encoder.py +102 -0
  15. mirascope/api/_generated/core/pydantic_utilities.py +310 -0
  16. mirascope/api/_generated/core/query_encoder.py +60 -0
  17. mirascope/api/_generated/core/remove_none_from_dict.py +11 -0
  18. mirascope/api/_generated/core/request_options.py +35 -0
  19. mirascope/api/_generated/core/serialization.py +282 -0
  20. mirascope/api/_generated/docs/__init__.py +4 -0
  21. mirascope/api/_generated/docs/client.py +95 -0
  22. mirascope/api/_generated/docs/raw_client.py +132 -0
  23. mirascope/api/_generated/environment.py +9 -0
  24. mirascope/api/_generated/errors/__init__.py +17 -0
  25. mirascope/api/_generated/errors/bad_request_error.py +15 -0
  26. mirascope/api/_generated/errors/conflict_error.py +15 -0
  27. mirascope/api/_generated/errors/forbidden_error.py +15 -0
  28. mirascope/api/_generated/errors/internal_server_error.py +15 -0
  29. mirascope/api/_generated/errors/not_found_error.py +15 -0
  30. mirascope/api/_generated/health/__init__.py +7 -0
  31. mirascope/api/_generated/health/client.py +96 -0
  32. mirascope/api/_generated/health/raw_client.py +129 -0
  33. mirascope/api/_generated/health/types/__init__.py +8 -0
  34. mirascope/api/_generated/health/types/health_check_response.py +24 -0
  35. mirascope/api/_generated/health/types/health_check_response_status.py +5 -0
  36. mirascope/api/_generated/organizations/__init__.py +25 -0
  37. mirascope/api/_generated/organizations/client.py +380 -0
  38. mirascope/api/_generated/organizations/raw_client.py +876 -0
  39. mirascope/api/_generated/organizations/types/__init__.py +23 -0
  40. mirascope/api/_generated/organizations/types/organizations_create_response.py +24 -0
  41. mirascope/api/_generated/organizations/types/organizations_create_response_role.py +7 -0
  42. mirascope/api/_generated/organizations/types/organizations_get_response.py +24 -0
  43. mirascope/api/_generated/organizations/types/organizations_get_response_role.py +7 -0
  44. mirascope/api/_generated/organizations/types/organizations_list_response_item.py +24 -0
  45. mirascope/api/_generated/organizations/types/organizations_list_response_item_role.py +7 -0
  46. mirascope/api/_generated/organizations/types/organizations_update_response.py +24 -0
  47. mirascope/api/_generated/organizations/types/organizations_update_response_role.py +7 -0
  48. mirascope/api/_generated/projects/__init__.py +17 -0
  49. mirascope/api/_generated/projects/client.py +458 -0
  50. mirascope/api/_generated/projects/raw_client.py +1016 -0
  51. mirascope/api/_generated/projects/types/__init__.py +15 -0
  52. mirascope/api/_generated/projects/types/projects_create_response.py +30 -0
  53. mirascope/api/_generated/projects/types/projects_get_response.py +30 -0
  54. mirascope/api/_generated/projects/types/projects_list_response_item.py +30 -0
  55. mirascope/api/_generated/projects/types/projects_update_response.py +30 -0
  56. mirascope/api/_generated/reference.md +753 -0
  57. mirascope/api/_generated/traces/__init__.py +55 -0
  58. mirascope/api/_generated/traces/client.py +162 -0
  59. mirascope/api/_generated/traces/raw_client.py +168 -0
  60. mirascope/api/_generated/traces/types/__init__.py +95 -0
  61. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item.py +36 -0
  62. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_resource.py +31 -0
  63. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_resource_attributes_item.py +25 -0
  64. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_resource_attributes_item_value.py +54 -0
  65. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_resource_attributes_item_value_array_value.py +23 -0
  66. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_resource_attributes_item_value_kvlist_value.py +28 -0
  67. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_resource_attributes_item_value_kvlist_value_values_item.py +24 -0
  68. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item.py +35 -0
  69. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_scope.py +35 -0
  70. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_scope_attributes_item.py +27 -0
  71. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_scope_attributes_item_value.py +54 -0
  72. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_scope_attributes_item_value_array_value.py +23 -0
  73. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_scope_attributes_item_value_kvlist_value.py +28 -0
  74. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_scope_attributes_item_value_kvlist_value_values_item.py +24 -0
  75. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item.py +60 -0
  76. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item_attributes_item.py +29 -0
  77. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item_attributes_item_value.py +54 -0
  78. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item_attributes_item_value_array_value.py +23 -0
  79. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item_attributes_item_value_kvlist_value.py +28 -0
  80. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item_attributes_item_value_kvlist_value_values_item.py +24 -0
  81. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item_status.py +24 -0
  82. mirascope/api/_generated/traces/types/traces_create_response.py +27 -0
  83. mirascope/api/_generated/traces/types/traces_create_response_partial_success.py +28 -0
  84. mirascope/api/_generated/types/__init__.py +37 -0
  85. mirascope/api/_generated/types/already_exists_error.py +24 -0
  86. mirascope/api/_generated/types/already_exists_error_tag.py +5 -0
  87. mirascope/api/_generated/types/database_error.py +24 -0
  88. mirascope/api/_generated/types/database_error_tag.py +5 -0
  89. mirascope/api/_generated/types/http_api_decode_error.py +29 -0
  90. mirascope/api/_generated/types/http_api_decode_error_tag.py +5 -0
  91. mirascope/api/_generated/types/issue.py +40 -0
  92. mirascope/api/_generated/types/issue_tag.py +17 -0
  93. mirascope/api/_generated/types/not_found_error_body.py +24 -0
  94. mirascope/api/_generated/types/not_found_error_tag.py +5 -0
  95. mirascope/api/_generated/types/permission_denied_error.py +24 -0
  96. mirascope/api/_generated/types/permission_denied_error_tag.py +7 -0
  97. mirascope/api/_generated/types/property_key.py +7 -0
  98. mirascope/api/_generated/types/property_key_key.py +27 -0
  99. mirascope/api/_generated/types/property_key_key_tag.py +5 -0
  100. mirascope/api/client.py +255 -0
  101. mirascope/api/settings.py +81 -0
  102. mirascope/llm/__init__.py +45 -11
  103. mirascope/llm/calls/calls.py +81 -57
  104. mirascope/llm/calls/decorator.py +121 -115
  105. mirascope/llm/content/__init__.py +3 -2
  106. mirascope/llm/context/_utils.py +19 -6
  107. mirascope/llm/exceptions.py +30 -16
  108. mirascope/llm/formatting/_utils.py +9 -5
  109. mirascope/llm/formatting/format.py +2 -2
  110. mirascope/llm/formatting/from_call_args.py +2 -2
  111. mirascope/llm/messages/message.py +13 -5
  112. mirascope/llm/models/__init__.py +2 -2
  113. mirascope/llm/models/models.py +189 -81
  114. mirascope/llm/prompts/__init__.py +13 -12
  115. mirascope/llm/prompts/_utils.py +27 -24
  116. mirascope/llm/prompts/decorator.py +133 -204
  117. mirascope/llm/prompts/prompts.py +424 -0
  118. mirascope/llm/prompts/protocols.py +25 -59
  119. mirascope/llm/providers/__init__.py +44 -0
  120. mirascope/llm/{clients → providers}/_missing_import_stubs.py +8 -6
  121. mirascope/llm/providers/anthropic/__init__.py +29 -0
  122. mirascope/llm/providers/anthropic/_utils/__init__.py +23 -0
  123. mirascope/llm/providers/anthropic/_utils/beta_decode.py +271 -0
  124. mirascope/llm/providers/anthropic/_utils/beta_encode.py +216 -0
  125. mirascope/llm/{clients → providers}/anthropic/_utils/decode.py +44 -11
  126. mirascope/llm/providers/anthropic/_utils/encode.py +356 -0
  127. mirascope/llm/providers/anthropic/beta_provider.py +322 -0
  128. mirascope/llm/providers/anthropic/model_id.py +23 -0
  129. mirascope/llm/providers/anthropic/model_info.py +87 -0
  130. mirascope/llm/providers/anthropic/provider.py +416 -0
  131. mirascope/llm/{clients → providers}/base/__init__.py +3 -3
  132. mirascope/llm/{clients → providers}/base/_utils.py +25 -8
  133. mirascope/llm/{clients/base/client.py → providers/base/base_provider.py} +255 -126
  134. mirascope/llm/providers/google/__init__.py +21 -0
  135. mirascope/llm/{clients → providers}/google/_utils/decode.py +61 -7
  136. mirascope/llm/{clients → providers}/google/_utils/encode.py +44 -30
  137. mirascope/llm/providers/google/model_id.py +22 -0
  138. mirascope/llm/providers/google/model_info.py +62 -0
  139. mirascope/llm/providers/google/provider.py +442 -0
  140. mirascope/llm/providers/load_provider.py +54 -0
  141. mirascope/llm/providers/mlx/__init__.py +24 -0
  142. mirascope/llm/providers/mlx/_utils.py +129 -0
  143. mirascope/llm/providers/mlx/encoding/__init__.py +8 -0
  144. mirascope/llm/providers/mlx/encoding/base.py +69 -0
  145. mirascope/llm/providers/mlx/encoding/transformers.py +147 -0
  146. mirascope/llm/providers/mlx/mlx.py +237 -0
  147. mirascope/llm/providers/mlx/model_id.py +17 -0
  148. mirascope/llm/providers/mlx/provider.py +415 -0
  149. mirascope/llm/providers/model_id.py +16 -0
  150. mirascope/llm/providers/ollama/__init__.py +19 -0
  151. mirascope/llm/providers/ollama/provider.py +71 -0
  152. mirascope/llm/providers/openai/__init__.py +6 -0
  153. mirascope/llm/providers/openai/completions/__init__.py +25 -0
  154. mirascope/llm/{clients → providers}/openai/completions/_utils/__init__.py +2 -0
  155. mirascope/llm/{clients → providers}/openai/completions/_utils/decode.py +60 -6
  156. mirascope/llm/{clients → providers}/openai/completions/_utils/encode.py +37 -26
  157. mirascope/llm/providers/openai/completions/base_provider.py +513 -0
  158. mirascope/llm/providers/openai/completions/provider.py +22 -0
  159. mirascope/llm/providers/openai/model_id.py +31 -0
  160. mirascope/llm/providers/openai/model_info.py +303 -0
  161. mirascope/llm/providers/openai/provider.py +398 -0
  162. mirascope/llm/providers/openai/responses/__init__.py +21 -0
  163. mirascope/llm/{clients → providers}/openai/responses/_utils/decode.py +59 -6
  164. mirascope/llm/{clients → providers}/openai/responses/_utils/encode.py +34 -23
  165. mirascope/llm/providers/openai/responses/provider.py +469 -0
  166. mirascope/llm/providers/provider_id.py +23 -0
  167. mirascope/llm/providers/provider_registry.py +169 -0
  168. mirascope/llm/providers/together/__init__.py +19 -0
  169. mirascope/llm/providers/together/provider.py +40 -0
  170. mirascope/llm/responses/__init__.py +3 -0
  171. mirascope/llm/responses/base_response.py +14 -5
  172. mirascope/llm/responses/base_stream_response.py +35 -6
  173. mirascope/llm/responses/finish_reason.py +1 -0
  174. mirascope/llm/responses/response.py +33 -13
  175. mirascope/llm/responses/root_response.py +12 -13
  176. mirascope/llm/responses/stream_response.py +35 -23
  177. mirascope/llm/responses/usage.py +95 -0
  178. mirascope/llm/tools/__init__.py +9 -2
  179. mirascope/llm/tools/_utils.py +12 -3
  180. mirascope/llm/tools/protocols.py +4 -4
  181. mirascope/llm/tools/tool_schema.py +44 -9
  182. mirascope/llm/tools/tools.py +10 -9
  183. mirascope/ops/__init__.py +156 -0
  184. mirascope/ops/_internal/__init__.py +5 -0
  185. mirascope/ops/_internal/closure.py +1118 -0
  186. mirascope/ops/_internal/configuration.py +126 -0
  187. mirascope/ops/_internal/context.py +76 -0
  188. mirascope/ops/_internal/exporters/__init__.py +26 -0
  189. mirascope/ops/_internal/exporters/exporters.py +342 -0
  190. mirascope/ops/_internal/exporters/processors.py +104 -0
  191. mirascope/ops/_internal/exporters/types.py +165 -0
  192. mirascope/ops/_internal/exporters/utils.py +29 -0
  193. mirascope/ops/_internal/instrumentation/__init__.py +8 -0
  194. mirascope/ops/_internal/instrumentation/llm/__init__.py +8 -0
  195. mirascope/ops/_internal/instrumentation/llm/encode.py +238 -0
  196. mirascope/ops/_internal/instrumentation/llm/gen_ai_types/__init__.py +38 -0
  197. mirascope/ops/_internal/instrumentation/llm/gen_ai_types/gen_ai_input_messages.py +31 -0
  198. mirascope/ops/_internal/instrumentation/llm/gen_ai_types/gen_ai_output_messages.py +38 -0
  199. mirascope/ops/_internal/instrumentation/llm/gen_ai_types/gen_ai_system_instructions.py +18 -0
  200. mirascope/ops/_internal/instrumentation/llm/gen_ai_types/shared.py +100 -0
  201. mirascope/ops/_internal/instrumentation/llm/llm.py +1288 -0
  202. mirascope/ops/_internal/propagation.py +198 -0
  203. mirascope/ops/_internal/protocols.py +51 -0
  204. mirascope/ops/_internal/session.py +139 -0
  205. mirascope/ops/_internal/spans.py +232 -0
  206. mirascope/ops/_internal/traced_calls.py +371 -0
  207. mirascope/ops/_internal/traced_functions.py +394 -0
  208. mirascope/ops/_internal/tracing.py +276 -0
  209. mirascope/ops/_internal/types.py +13 -0
  210. mirascope/ops/_internal/utils.py +75 -0
  211. mirascope/ops/_internal/versioned_calls.py +512 -0
  212. mirascope/ops/_internal/versioned_functions.py +346 -0
  213. mirascope/ops/_internal/versioning.py +303 -0
  214. mirascope/ops/exceptions.py +21 -0
  215. {mirascope-2.0.0a2.dist-info → mirascope-2.0.0a4.dist-info}/METADATA +78 -3
  216. mirascope-2.0.0a4.dist-info/RECORD +247 -0
  217. {mirascope-2.0.0a2.dist-info → mirascope-2.0.0a4.dist-info}/WHEEL +1 -1
  218. mirascope/graphs/__init__.py +0 -22
  219. mirascope/graphs/finite_state_machine.py +0 -625
  220. mirascope/llm/agents/__init__.py +0 -15
  221. mirascope/llm/agents/agent.py +0 -97
  222. mirascope/llm/agents/agent_template.py +0 -45
  223. mirascope/llm/agents/decorator.py +0 -176
  224. mirascope/llm/calls/base_call.py +0 -33
  225. mirascope/llm/clients/__init__.py +0 -34
  226. mirascope/llm/clients/anthropic/__init__.py +0 -25
  227. mirascope/llm/clients/anthropic/_utils/encode.py +0 -243
  228. mirascope/llm/clients/anthropic/clients.py +0 -819
  229. mirascope/llm/clients/anthropic/model_ids.py +0 -8
  230. mirascope/llm/clients/google/__init__.py +0 -20
  231. mirascope/llm/clients/google/clients.py +0 -853
  232. mirascope/llm/clients/google/model_ids.py +0 -15
  233. mirascope/llm/clients/openai/__init__.py +0 -25
  234. mirascope/llm/clients/openai/completions/__init__.py +0 -28
  235. mirascope/llm/clients/openai/completions/_utils/model_features.py +0 -81
  236. mirascope/llm/clients/openai/completions/clients.py +0 -833
  237. mirascope/llm/clients/openai/completions/model_ids.py +0 -8
  238. mirascope/llm/clients/openai/responses/__init__.py +0 -26
  239. mirascope/llm/clients/openai/responses/_utils/__init__.py +0 -13
  240. mirascope/llm/clients/openai/responses/_utils/model_features.py +0 -87
  241. mirascope/llm/clients/openai/responses/clients.py +0 -832
  242. mirascope/llm/clients/openai/responses/model_ids.py +0 -8
  243. mirascope/llm/clients/openai/shared/__init__.py +0 -7
  244. mirascope/llm/clients/openai/shared/_utils.py +0 -55
  245. mirascope/llm/clients/providers.py +0 -175
  246. mirascope-2.0.0a2.dist-info/RECORD +0 -102
  247. /mirascope/llm/{clients → providers}/base/kwargs.py +0 -0
  248. /mirascope/llm/{clients → providers}/base/params.py +0 -0
  249. /mirascope/llm/{clients/anthropic → providers/google}/_utils/__init__.py +0 -0
  250. /mirascope/llm/{clients → providers}/google/message.py +0 -0
  251. /mirascope/llm/{clients/google → providers/openai/responses}/_utils/__init__.py +0 -0
  252. {mirascope-2.0.0a2.dist-info → mirascope-2.0.0a4.dist-info}/licenses/LICENSE +0 -0
@@ -19,11 +19,14 @@ from .....formatting import (
19
19
  resolve_format,
20
20
  )
21
21
  from .....messages import AssistantMessage, Message, UserMessage
22
- from .....tools import FORMAT_TOOL_NAME, BaseToolkit, ToolSchema
22
+ from .....tools import FORMAT_TOOL_NAME, AnyToolSchema, BaseToolkit
23
23
  from ....base import Params, _utils as _base_utils
24
- from ...shared import _utils as _shared_utils
25
- from ..model_ids import OpenAICompletionsModelId
26
- from .model_features import MODEL_FEATURES
24
+ from ...model_id import OpenAIModelId, model_name
25
+ from ...model_info import (
26
+ MODELS_WITHOUT_AUDIO_SUPPORT,
27
+ MODELS_WITHOUT_JSON_OBJECT_SUPPORT,
28
+ MODELS_WITHOUT_JSON_SCHEMA_SUPPORT,
29
+ )
27
30
 
28
31
 
29
32
  class ChatCompletionCreateKwargs(TypedDict, total=False):
@@ -49,7 +52,7 @@ class ChatCompletionCreateKwargs(TypedDict, total=False):
49
52
 
50
53
  def _encode_user_message(
51
54
  message: UserMessage,
52
- model_id: OpenAICompletionsModelId,
55
+ model_id: OpenAIModelId,
53
56
  ) -> list[openai_types.ChatCompletionMessageParam]:
54
57
  """Convert Mirascope `UserMessage` to a list of OpenAI `ChatCompletionMessageParam`.
55
58
 
@@ -98,11 +101,11 @@ def _encode_user_message(
98
101
  )
99
102
  current_content.append(content)
100
103
  elif part.type == "audio":
101
- model_status = MODEL_FEATURES.get(model_id)
102
- if model_status == "no_audio_support":
104
+ base_model_name = model_name(model_id, None)
105
+ if base_model_name in MODELS_WITHOUT_AUDIO_SUPPORT:
103
106
  raise FeatureNotSupportedError(
104
107
  feature="Audio inputs",
105
- provider="openai:completions",
108
+ provider_id="openai",
106
109
  message=f"Model '{model_id}' does not support audio inputs.",
107
110
  )
108
111
 
@@ -111,7 +114,7 @@ def _encode_user_message(
111
114
  if audio_format not in ("wav", "mp3"):
112
115
  raise FeatureNotSupportedError(
113
116
  feature=f"Audio format: {audio_format}",
114
- provider="openai:completions",
117
+ provider_id="openai",
115
118
  message="OpenAI only supports 'wav' and 'mp3' audio formats.",
116
119
  ) # pragma: no cover
117
120
  audio_content = openai_types.ChatCompletionContentPartInputAudioParam(
@@ -141,13 +144,14 @@ def _encode_user_message(
141
144
 
142
145
 
143
146
  def _encode_assistant_message(
144
- message: AssistantMessage, model_id: OpenAICompletionsModelId, encode_thoughts: bool
147
+ message: AssistantMessage, model_id: OpenAIModelId, encode_thoughts: bool
145
148
  ) -> openai_types.ChatCompletionAssistantMessageParam:
146
149
  """Convert Mirascope `AssistantMessage` to OpenAI `ChatCompletionAssistantMessageParam`."""
147
150
 
148
151
  if (
149
- message.provider == "openai:completions"
150
- and message.model_id == model_id
152
+ message.provider_id in ("openai", "openai:completions")
153
+ and message.provider_model_name
154
+ == model_name(model_id=model_id, api_mode="completions")
151
155
  and message.raw_message
152
156
  and not encode_thoughts
153
157
  ):
@@ -188,7 +192,7 @@ def _encode_assistant_message(
188
192
  elif text_params:
189
193
  content = text_params
190
194
 
191
- message_params = {
195
+ message_params: openai_types.ChatCompletionAssistantMessageParam = {
192
196
  "role": "assistant",
193
197
  "content": content,
194
198
  }
@@ -199,7 +203,7 @@ def _encode_assistant_message(
199
203
 
200
204
 
201
205
  def _encode_message(
202
- message: Message, model_id: OpenAICompletionsModelId, encode_thoughts: bool
206
+ message: Message, model_id: OpenAIModelId, encode_thoughts: bool
203
207
  ) -> list[openai_types.ChatCompletionMessageParam]:
204
208
  """Convert a Mirascope `Message` to OpenAI `ChatCompletionMessageParam` format.
205
209
 
@@ -227,12 +231,12 @@ def _encode_message(
227
231
 
228
232
  @lru_cache(maxsize=128)
229
233
  def _convert_tool_to_tool_param(
230
- tool: ToolSchema,
234
+ tool: AnyToolSchema,
231
235
  ) -> openai_types.ChatCompletionToolParam:
232
236
  """Convert a single Mirascope `Tool` to OpenAI ChatCompletionToolParam with caching."""
233
237
  schema_dict = tool.parameters.model_dump(by_alias=True, exclude_none=True)
234
238
  schema_dict["type"] = "object"
235
- _shared_utils._ensure_additional_properties_false(schema_dict)
239
+ _base_utils.ensure_additional_properties_false(schema_dict)
236
240
  return openai_types.ChatCompletionToolParam(
237
241
  type="function",
238
242
  function={
@@ -257,7 +261,7 @@ def _create_strict_response_format(
257
261
  """
258
262
  schema = format.schema.copy()
259
263
 
260
- _shared_utils._ensure_additional_properties_false(schema)
264
+ _base_utils.ensure_additional_properties_false(schema)
261
265
 
262
266
  json_schema = JSONSchema(
263
267
  name=format.name,
@@ -274,23 +278,32 @@ def _create_strict_response_format(
274
278
 
275
279
  def encode_request(
276
280
  *,
277
- model_id: OpenAICompletionsModelId,
281
+ model_id: OpenAIModelId,
278
282
  messages: Sequence[Message],
279
- tools: Sequence[ToolSchema] | BaseToolkit | None,
283
+ tools: Sequence[AnyToolSchema] | BaseToolkit[AnyToolSchema] | None,
280
284
  format: type[FormattableT] | Format[FormattableT] | None,
281
285
  params: Params,
282
286
  ) -> tuple[Sequence[Message], Format[FormattableT] | None, ChatCompletionCreateKwargs]:
283
287
  """Prepares a request for the `OpenAI.chat.completions.create` method."""
288
+ if model_id.endswith(":responses"):
289
+ raise FeatureNotSupportedError(
290
+ feature="responses API",
291
+ provider_id="openai:completions",
292
+ model_id=model_id,
293
+ message=f"Can't use completions client for responses model: {model_id}",
294
+ )
295
+ base_model_name = model_name(model_id, None)
296
+
284
297
  kwargs: ChatCompletionCreateKwargs = ChatCompletionCreateKwargs(
285
298
  {
286
- "model": model_id,
299
+ "model": base_model_name,
287
300
  }
288
301
  )
289
302
  encode_thoughts = False
290
303
 
291
304
  with _base_utils.ensure_all_params_accessed(
292
305
  params=params,
293
- provider="openai:completions",
306
+ provider_id="openai",
294
307
  unsupported_params=["top_k", "thinking"],
295
308
  ) as param_accessor:
296
309
  if param_accessor.temperature is not None:
@@ -311,9 +324,7 @@ def encode_request(
311
324
 
312
325
  openai_tools = [_convert_tool_to_tool_param(tool) for tool in tools]
313
326
 
314
- model_supports_strict = (
315
- model_id not in _shared_utils.MODELS_WITHOUT_JSON_SCHEMA_SUPPORT
316
- )
327
+ model_supports_strict = base_model_name not in MODELS_WITHOUT_JSON_SCHEMA_SUPPORT
317
328
  default_mode = "strict" if model_supports_strict else "tool"
318
329
  format = resolve_format(format, default_mode=default_mode)
319
330
  if format is not None:
@@ -321,7 +332,7 @@ def encode_request(
321
332
  if not model_supports_strict:
322
333
  raise FormattingModeNotSupportedError(
323
334
  formatting_mode="strict",
324
- provider="openai:completions",
335
+ provider_id="openai",
325
336
  model_id=model_id,
326
337
  )
327
338
  kwargs["response_format"] = _create_strict_response_format(format)
@@ -338,7 +349,7 @@ def encode_request(
338
349
  openai_tools.append(_convert_tool_to_tool_param(format_tool_schema))
339
350
  elif (
340
351
  format.mode == "json"
341
- and model_id not in _shared_utils.MODELS_WITHOUT_JSON_OBJECT_SUPPORT
352
+ and base_model_name not in MODELS_WITHOUT_JSON_OBJECT_SUPPORT
342
353
  ):
343
354
  kwargs["response_format"] = {"type": "json_object"}
344
355
 
@@ -0,0 +1,513 @@
1
+ """Base class for OpenAI Completions-compatible providers."""
2
+
3
+ import os
4
+ from collections.abc import Sequence
5
+ from typing import ClassVar
6
+ from typing_extensions import Unpack
7
+
8
+ from openai import AsyncOpenAI, OpenAI
9
+
10
+ from ....context import Context, DepsT
11
+ from ....formatting import Format, FormattableT
12
+ from ....messages import Message
13
+ from ....responses import (
14
+ AsyncContextResponse,
15
+ AsyncContextStreamResponse,
16
+ AsyncResponse,
17
+ AsyncStreamResponse,
18
+ ContextResponse,
19
+ ContextStreamResponse,
20
+ Response,
21
+ StreamResponse,
22
+ )
23
+ from ....tools import (
24
+ AsyncContextTool,
25
+ AsyncContextToolkit,
26
+ AsyncTool,
27
+ AsyncToolkit,
28
+ ContextTool,
29
+ ContextToolkit,
30
+ Tool,
31
+ Toolkit,
32
+ )
33
+ from ...base import BaseProvider, Params
34
+ from ..model_id import model_name as openai_model_name
35
+ from . import _utils
36
+
37
+
38
+ class BaseOpenAICompletionsProvider(BaseProvider[OpenAI]):
39
+ """Base class for providers that use OpenAI Completions-compatible APIs."""
40
+
41
+ id: ClassVar[str]
42
+ default_scope: ClassVar[str | list[str]]
43
+ default_base_url: ClassVar[str | None] = None
44
+ api_key_env_var: ClassVar[str]
45
+ api_key_required: ClassVar[bool] = True
46
+ provider_name: ClassVar[str | None] = None
47
+
48
+ def __init__(
49
+ self,
50
+ *,
51
+ api_key: str | None = None,
52
+ base_url: str | None = None,
53
+ ) -> None:
54
+ """Initialize the OpenAI Completions-compatible provider client."""
55
+ resolved_api_key = api_key or os.environ.get(self.api_key_env_var)
56
+
57
+ if self.api_key_required and not resolved_api_key:
58
+ name = self.provider_name or self.id.split(":")[0].capitalize()
59
+ raise ValueError(
60
+ f"{name} API key is required. "
61
+ f"Set the {self.api_key_env_var} environment variable "
62
+ f"or pass the api_key parameter to register_provider()."
63
+ )
64
+
65
+ resolved_base_url = base_url or self.default_base_url
66
+
67
+ effective_api_key: str | None = resolved_api_key
68
+ if resolved_base_url is not None and not effective_api_key:
69
+ effective_api_key = "not-needed"
70
+
71
+ self.client = OpenAI(
72
+ api_key=effective_api_key,
73
+ base_url=resolved_base_url,
74
+ )
75
+ self.async_client = AsyncOpenAI(
76
+ api_key=effective_api_key,
77
+ base_url=resolved_base_url,
78
+ )
79
+
80
+ def _model_name(self, model_id: str) -> str:
81
+ """Extract the model name to send to the API."""
82
+ return openai_model_name(model_id, None)
83
+
84
+ def _provider_model_name(self, model_id: str) -> str:
85
+ """Get the model name for tracking in Response."""
86
+ return self._model_name(model_id)
87
+
88
+ def _call(
89
+ self,
90
+ *,
91
+ model_id: str,
92
+ messages: Sequence[Message],
93
+ tools: Sequence[Tool] | Toolkit | None = None,
94
+ format: type[FormattableT] | Format[FormattableT] | None = None,
95
+ **params: Unpack[Params],
96
+ ) -> Response | Response[FormattableT]:
97
+ """Generate an `llm.Response` by synchronously calling the API.
98
+
99
+ Args:
100
+ model_id: Model identifier to use.
101
+ messages: Messages to send to the LLM.
102
+ tools: Optional tools that the model may invoke.
103
+ format: Optional response format specifier.
104
+ **params: Additional parameters to configure output (e.g. temperature). See `llm.Params`.
105
+
106
+ Returns:
107
+ An `llm.Response` object containing the LLM-generated content.
108
+ """
109
+ input_messages, format, kwargs = _utils.encode_request(
110
+ model_id=model_id,
111
+ messages=messages,
112
+ tools=tools,
113
+ format=format,
114
+ params=params,
115
+ )
116
+ kwargs["model"] = self._model_name(model_id)
117
+
118
+ openai_response = self.client.chat.completions.create(**kwargs)
119
+
120
+ assistant_message, finish_reason, usage = _utils.decode_response(
121
+ openai_response,
122
+ model_id,
123
+ self.id,
124
+ self._provider_model_name(model_id),
125
+ )
126
+
127
+ return Response(
128
+ raw=openai_response,
129
+ provider_id=self.id,
130
+ model_id=model_id,
131
+ provider_model_name=self._provider_model_name(model_id),
132
+ params=params,
133
+ tools=tools,
134
+ input_messages=input_messages,
135
+ assistant_message=assistant_message,
136
+ finish_reason=finish_reason,
137
+ usage=usage,
138
+ format=format,
139
+ )
140
+
141
+ def _context_call(
142
+ self,
143
+ *,
144
+ ctx: Context[DepsT],
145
+ model_id: str,
146
+ messages: Sequence[Message],
147
+ tools: Sequence[Tool | ContextTool[DepsT]]
148
+ | ContextToolkit[DepsT]
149
+ | None = None,
150
+ format: type[FormattableT] | Format[FormattableT] | None = None,
151
+ **params: Unpack[Params],
152
+ ) -> ContextResponse[DepsT, None] | ContextResponse[DepsT, FormattableT]:
153
+ """Generate an `llm.ContextResponse` by synchronously calling the API.
154
+
155
+ Args:
156
+ ctx: Context object with dependencies for tools.
157
+ model_id: Model identifier to use.
158
+ messages: Messages to send to the LLM.
159
+ tools: Optional tools that the model may invoke.
160
+ format: Optional response format specifier.
161
+ **params: Additional parameters to configure output (e.g. temperature). See `llm.Params`.
162
+
163
+ Returns:
164
+ An `llm.ContextResponse` object containing the LLM-generated content.
165
+ """
166
+ input_messages, format, kwargs = _utils.encode_request(
167
+ model_id=model_id,
168
+ messages=messages,
169
+ tools=tools,
170
+ format=format,
171
+ params=params,
172
+ )
173
+ kwargs["model"] = self._model_name(model_id)
174
+
175
+ openai_response = self.client.chat.completions.create(**kwargs)
176
+
177
+ assistant_message, finish_reason, usage = _utils.decode_response(
178
+ openai_response,
179
+ model_id,
180
+ self.id,
181
+ self._provider_model_name(model_id),
182
+ )
183
+
184
+ return ContextResponse(
185
+ raw=openai_response,
186
+ provider_id=self.id,
187
+ model_id=model_id,
188
+ provider_model_name=self._provider_model_name(model_id),
189
+ params=params,
190
+ tools=tools,
191
+ input_messages=input_messages,
192
+ assistant_message=assistant_message,
193
+ finish_reason=finish_reason,
194
+ usage=usage,
195
+ format=format,
196
+ )
197
+
198
+ async def _call_async(
199
+ self,
200
+ *,
201
+ model_id: str,
202
+ messages: Sequence[Message],
203
+ tools: Sequence[AsyncTool] | AsyncToolkit | None = None,
204
+ format: type[FormattableT] | Format[FormattableT] | None = None,
205
+ **params: Unpack[Params],
206
+ ) -> AsyncResponse | AsyncResponse[FormattableT]:
207
+ """Generate an `llm.AsyncResponse` by asynchronously calling the API.
208
+
209
+ Args:
210
+ model_id: Model identifier to use.
211
+ messages: Messages to send to the LLM.
212
+ tools: Optional tools that the model may invoke.
213
+ format: Optional response format specifier.
214
+ **params: Additional parameters to configure output (e.g. temperature). See `llm.Params`.
215
+
216
+ Returns:
217
+ An `llm.AsyncResponse` object containing the LLM-generated content.
218
+ """
219
+ input_messages, format, kwargs = _utils.encode_request(
220
+ model_id=model_id,
221
+ params=params,
222
+ messages=messages,
223
+ tools=tools,
224
+ format=format,
225
+ )
226
+ kwargs["model"] = self._model_name(model_id)
227
+
228
+ openai_response = await self.async_client.chat.completions.create(**kwargs)
229
+
230
+ assistant_message, finish_reason, usage = _utils.decode_response(
231
+ openai_response,
232
+ model_id,
233
+ self.id,
234
+ self._provider_model_name(model_id),
235
+ )
236
+
237
+ return AsyncResponse(
238
+ raw=openai_response,
239
+ provider_id=self.id,
240
+ model_id=model_id,
241
+ provider_model_name=self._provider_model_name(model_id),
242
+ params=params,
243
+ tools=tools,
244
+ input_messages=input_messages,
245
+ assistant_message=assistant_message,
246
+ finish_reason=finish_reason,
247
+ usage=usage,
248
+ format=format,
249
+ )
250
+
251
+ async def _context_call_async(
252
+ self,
253
+ *,
254
+ ctx: Context[DepsT],
255
+ model_id: str,
256
+ messages: Sequence[Message],
257
+ tools: Sequence[AsyncTool | AsyncContextTool[DepsT]]
258
+ | AsyncContextToolkit[DepsT]
259
+ | None = None,
260
+ format: type[FormattableT] | Format[FormattableT] | None = None,
261
+ **params: Unpack[Params],
262
+ ) -> AsyncContextResponse[DepsT, None] | AsyncContextResponse[DepsT, FormattableT]:
263
+ """Generate an `llm.AsyncContextResponse` by asynchronously calling the API.
264
+
265
+ Args:
266
+ ctx: Context object with dependencies for tools.
267
+ model_id: Model identifier to use.
268
+ messages: Messages to send to the LLM.
269
+ tools: Optional tools that the model may invoke.
270
+ format: Optional response format specifier.
271
+ **params: Additional parameters to configure output (e.g. temperature). See `llm.Params`.
272
+
273
+ Returns:
274
+ An `llm.AsyncContextResponse` object containing the LLM-generated content.
275
+ """
276
+ input_messages, format, kwargs = _utils.encode_request(
277
+ model_id=model_id,
278
+ params=params,
279
+ messages=messages,
280
+ tools=tools,
281
+ format=format,
282
+ )
283
+ kwargs["model"] = self._model_name(model_id)
284
+
285
+ openai_response = await self.async_client.chat.completions.create(**kwargs)
286
+
287
+ assistant_message, finish_reason, usage = _utils.decode_response(
288
+ openai_response,
289
+ model_id,
290
+ self.id,
291
+ self._provider_model_name(model_id),
292
+ )
293
+
294
+ return AsyncContextResponse(
295
+ raw=openai_response,
296
+ provider_id=self.id,
297
+ model_id=model_id,
298
+ provider_model_name=self._provider_model_name(model_id),
299
+ params=params,
300
+ tools=tools,
301
+ input_messages=input_messages,
302
+ assistant_message=assistant_message,
303
+ finish_reason=finish_reason,
304
+ usage=usage,
305
+ format=format,
306
+ )
307
+
308
+ def _stream(
309
+ self,
310
+ *,
311
+ model_id: str,
312
+ messages: Sequence[Message],
313
+ tools: Sequence[Tool] | Toolkit | None = None,
314
+ format: type[FormattableT] | Format[FormattableT] | None = None,
315
+ **params: Unpack[Params],
316
+ ) -> StreamResponse | StreamResponse[FormattableT]:
317
+ """Generate an `llm.StreamResponse` by synchronously streaming from the API.
318
+
319
+ Args:
320
+ model_id: Model identifier to use.
321
+ messages: Messages to send to the LLM.
322
+ tools: Optional tools that the model may invoke.
323
+ format: Optional response format specifier.
324
+ **params: Additional parameters to configure output (e.g. temperature). See `llm.Params`.
325
+
326
+ Returns:
327
+ An `llm.StreamResponse` object for iterating over the LLM-generated content.
328
+ """
329
+ input_messages, format, kwargs = _utils.encode_request(
330
+ model_id=model_id,
331
+ messages=messages,
332
+ tools=tools,
333
+ format=format,
334
+ params=params,
335
+ )
336
+ kwargs["model"] = self._model_name(model_id)
337
+
338
+ openai_stream = self.client.chat.completions.create(
339
+ **kwargs,
340
+ stream=True,
341
+ stream_options={"include_usage": True},
342
+ )
343
+
344
+ chunk_iterator = _utils.decode_stream(openai_stream)
345
+
346
+ return StreamResponse(
347
+ provider_id=self.id,
348
+ model_id=model_id,
349
+ provider_model_name=self._provider_model_name(model_id),
350
+ params=params,
351
+ tools=tools,
352
+ input_messages=input_messages,
353
+ chunk_iterator=chunk_iterator,
354
+ format=format,
355
+ )
356
+
357
+ def _context_stream(
358
+ self,
359
+ *,
360
+ ctx: Context[DepsT],
361
+ model_id: str,
362
+ messages: Sequence[Message],
363
+ tools: Sequence[Tool | ContextTool[DepsT]]
364
+ | ContextToolkit[DepsT]
365
+ | None = None,
366
+ format: type[FormattableT] | Format[FormattableT] | None = None,
367
+ **params: Unpack[Params],
368
+ ) -> ContextStreamResponse[DepsT] | ContextStreamResponse[DepsT, FormattableT]:
369
+ """Generate an `llm.ContextStreamResponse` by synchronously streaming from the API.
370
+
371
+ Args:
372
+ ctx: Context object with dependencies for tools.
373
+ model_id: Model identifier to use.
374
+ messages: Messages to send to the LLM.
375
+ tools: Optional tools that the model may invoke.
376
+ format: Optional response format specifier.
377
+ **params: Additional parameters to configure output (e.g. temperature). See `llm.Params`.
378
+
379
+ Returns:
380
+ An `llm.ContextStreamResponse` object for iterating over the LLM-generated content.
381
+ """
382
+ input_messages, format, kwargs = _utils.encode_request(
383
+ model_id=model_id,
384
+ messages=messages,
385
+ tools=tools,
386
+ format=format,
387
+ params=params,
388
+ )
389
+ kwargs["model"] = self._model_name(model_id)
390
+
391
+ openai_stream = self.client.chat.completions.create(
392
+ **kwargs,
393
+ stream=True,
394
+ stream_options={"include_usage": True},
395
+ )
396
+
397
+ chunk_iterator = _utils.decode_stream(openai_stream)
398
+
399
+ return ContextStreamResponse(
400
+ provider_id=self.id,
401
+ model_id=model_id,
402
+ provider_model_name=self._provider_model_name(model_id),
403
+ params=params,
404
+ tools=tools,
405
+ input_messages=input_messages,
406
+ chunk_iterator=chunk_iterator,
407
+ format=format,
408
+ )
409
+
410
+ async def _stream_async(
411
+ self,
412
+ *,
413
+ model_id: str,
414
+ messages: Sequence[Message],
415
+ tools: Sequence[AsyncTool] | AsyncToolkit | None = None,
416
+ format: type[FormattableT] | Format[FormattableT] | None = None,
417
+ **params: Unpack[Params],
418
+ ) -> AsyncStreamResponse | AsyncStreamResponse[FormattableT]:
419
+ """Generate an `llm.AsyncStreamResponse` by asynchronously streaming from the API.
420
+
421
+ Args:
422
+ model_id: Model identifier to use.
423
+ messages: Messages to send to the LLM.
424
+ tools: Optional tools that the model may invoke.
425
+ format: Optional response format specifier.
426
+ **params: Additional parameters to configure output (e.g. temperature). See `llm.Params`.
427
+
428
+ Returns:
429
+ An `llm.AsyncStreamResponse` object for iterating over the LLM-generated content.
430
+ """
431
+ input_messages, format, kwargs = _utils.encode_request(
432
+ model_id=model_id,
433
+ messages=messages,
434
+ tools=tools,
435
+ format=format,
436
+ params=params,
437
+ )
438
+ kwargs["model"] = self._model_name(model_id)
439
+
440
+ openai_stream = await self.async_client.chat.completions.create(
441
+ **kwargs,
442
+ stream=True,
443
+ stream_options={"include_usage": True},
444
+ )
445
+
446
+ chunk_iterator = _utils.decode_async_stream(openai_stream)
447
+
448
+ return AsyncStreamResponse(
449
+ provider_id=self.id,
450
+ model_id=model_id,
451
+ provider_model_name=self._provider_model_name(model_id),
452
+ params=params,
453
+ tools=tools,
454
+ input_messages=input_messages,
455
+ chunk_iterator=chunk_iterator,
456
+ format=format,
457
+ )
458
+
459
+ async def _context_stream_async(
460
+ self,
461
+ *,
462
+ ctx: Context[DepsT],
463
+ model_id: str,
464
+ messages: Sequence[Message],
465
+ tools: Sequence[AsyncTool | AsyncContextTool[DepsT]]
466
+ | AsyncContextToolkit[DepsT]
467
+ | None = None,
468
+ format: type[FormattableT] | Format[FormattableT] | None = None,
469
+ **params: Unpack[Params],
470
+ ) -> (
471
+ AsyncContextStreamResponse[DepsT]
472
+ | AsyncContextStreamResponse[DepsT, FormattableT]
473
+ ):
474
+ """Generate an `llm.AsyncContextStreamResponse` by asynchronously streaming from the API.
475
+
476
+ Args:
477
+ ctx: Context object with dependencies for tools.
478
+ model_id: Model identifier to use.
479
+ messages: Messages to send to the LLM.
480
+ tools: Optional tools that the model may invoke.
481
+ format: Optional response format specifier.
482
+ **params: Additional parameters to configure output (e.g. temperature). See `llm.Params`.
483
+
484
+ Returns:
485
+ An `llm.AsyncContextStreamResponse` object for iterating over the LLM-generated content.
486
+ """
487
+ input_messages, format, kwargs = _utils.encode_request(
488
+ model_id=model_id,
489
+ messages=messages,
490
+ tools=tools,
491
+ format=format,
492
+ params=params,
493
+ )
494
+ kwargs["model"] = self._model_name(model_id)
495
+
496
+ openai_stream = await self.async_client.chat.completions.create(
497
+ **kwargs,
498
+ stream=True,
499
+ stream_options={"include_usage": True},
500
+ )
501
+
502
+ chunk_iterator = _utils.decode_async_stream(openai_stream)
503
+
504
+ return AsyncContextStreamResponse(
505
+ provider_id=self.id,
506
+ model_id=model_id,
507
+ provider_model_name=self._provider_model_name(model_id),
508
+ params=params,
509
+ tools=tools,
510
+ input_messages=input_messages,
511
+ chunk_iterator=chunk_iterator,
512
+ format=format,
513
+ )