mirascope 2.0.0a2__py3-none-any.whl → 2.0.0a3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (204) hide show
  1. mirascope/__init__.py +2 -2
  2. mirascope/api/__init__.py +6 -0
  3. mirascope/api/_generated/README.md +207 -0
  4. mirascope/api/_generated/__init__.py +85 -0
  5. mirascope/api/_generated/client.py +155 -0
  6. mirascope/api/_generated/core/__init__.py +52 -0
  7. mirascope/api/_generated/core/api_error.py +23 -0
  8. mirascope/api/_generated/core/client_wrapper.py +58 -0
  9. mirascope/api/_generated/core/datetime_utils.py +30 -0
  10. mirascope/api/_generated/core/file.py +70 -0
  11. mirascope/api/_generated/core/force_multipart.py +16 -0
  12. mirascope/api/_generated/core/http_client.py +619 -0
  13. mirascope/api/_generated/core/http_response.py +55 -0
  14. mirascope/api/_generated/core/jsonable_encoder.py +102 -0
  15. mirascope/api/_generated/core/pydantic_utilities.py +310 -0
  16. mirascope/api/_generated/core/query_encoder.py +60 -0
  17. mirascope/api/_generated/core/remove_none_from_dict.py +11 -0
  18. mirascope/api/_generated/core/request_options.py +35 -0
  19. mirascope/api/_generated/core/serialization.py +282 -0
  20. mirascope/api/_generated/docs/__init__.py +4 -0
  21. mirascope/api/_generated/docs/client.py +95 -0
  22. mirascope/api/_generated/docs/raw_client.py +132 -0
  23. mirascope/api/_generated/environment.py +9 -0
  24. mirascope/api/_generated/errors/__init__.py +7 -0
  25. mirascope/api/_generated/errors/bad_request_error.py +15 -0
  26. mirascope/api/_generated/health/__init__.py +7 -0
  27. mirascope/api/_generated/health/client.py +96 -0
  28. mirascope/api/_generated/health/raw_client.py +129 -0
  29. mirascope/api/_generated/health/types/__init__.py +8 -0
  30. mirascope/api/_generated/health/types/health_check_response.py +24 -0
  31. mirascope/api/_generated/health/types/health_check_response_status.py +5 -0
  32. mirascope/api/_generated/reference.md +167 -0
  33. mirascope/api/_generated/traces/__init__.py +55 -0
  34. mirascope/api/_generated/traces/client.py +162 -0
  35. mirascope/api/_generated/traces/raw_client.py +168 -0
  36. mirascope/api/_generated/traces/types/__init__.py +95 -0
  37. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item.py +36 -0
  38. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_resource.py +31 -0
  39. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_resource_attributes_item.py +25 -0
  40. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_resource_attributes_item_value.py +54 -0
  41. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_resource_attributes_item_value_array_value.py +23 -0
  42. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_resource_attributes_item_value_kvlist_value.py +28 -0
  43. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_resource_attributes_item_value_kvlist_value_values_item.py +24 -0
  44. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item.py +35 -0
  45. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_scope.py +35 -0
  46. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_scope_attributes_item.py +27 -0
  47. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_scope_attributes_item_value.py +54 -0
  48. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_scope_attributes_item_value_array_value.py +23 -0
  49. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_scope_attributes_item_value_kvlist_value.py +28 -0
  50. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_scope_attributes_item_value_kvlist_value_values_item.py +24 -0
  51. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item.py +60 -0
  52. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item_attributes_item.py +29 -0
  53. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item_attributes_item_value.py +54 -0
  54. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item_attributes_item_value_array_value.py +23 -0
  55. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item_attributes_item_value_kvlist_value.py +28 -0
  56. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item_attributes_item_value_kvlist_value_values_item.py +24 -0
  57. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item_status.py +24 -0
  58. mirascope/api/_generated/traces/types/traces_create_response.py +27 -0
  59. mirascope/api/_generated/traces/types/traces_create_response_partial_success.py +28 -0
  60. mirascope/api/_generated/types/__init__.py +21 -0
  61. mirascope/api/_generated/types/http_api_decode_error.py +31 -0
  62. mirascope/api/_generated/types/http_api_decode_error_tag.py +5 -0
  63. mirascope/api/_generated/types/issue.py +44 -0
  64. mirascope/api/_generated/types/issue_tag.py +17 -0
  65. mirascope/api/_generated/types/property_key.py +7 -0
  66. mirascope/api/_generated/types/property_key_tag.py +29 -0
  67. mirascope/api/_generated/types/property_key_tag_tag.py +5 -0
  68. mirascope/api/client.py +255 -0
  69. mirascope/api/settings.py +81 -0
  70. mirascope/llm/__init__.py +41 -11
  71. mirascope/llm/calls/calls.py +81 -57
  72. mirascope/llm/calls/decorator.py +121 -115
  73. mirascope/llm/content/__init__.py +3 -2
  74. mirascope/llm/context/_utils.py +19 -6
  75. mirascope/llm/exceptions.py +30 -16
  76. mirascope/llm/formatting/_utils.py +9 -5
  77. mirascope/llm/formatting/format.py +2 -2
  78. mirascope/llm/formatting/from_call_args.py +2 -2
  79. mirascope/llm/messages/message.py +13 -5
  80. mirascope/llm/models/__init__.py +2 -2
  81. mirascope/llm/models/models.py +189 -81
  82. mirascope/llm/prompts/__init__.py +13 -12
  83. mirascope/llm/prompts/_utils.py +27 -24
  84. mirascope/llm/prompts/decorator.py +133 -204
  85. mirascope/llm/prompts/prompts.py +424 -0
  86. mirascope/llm/prompts/protocols.py +25 -59
  87. mirascope/llm/providers/__init__.py +38 -0
  88. mirascope/llm/{clients → providers}/_missing_import_stubs.py +8 -6
  89. mirascope/llm/providers/anthropic/__init__.py +24 -0
  90. mirascope/llm/{clients → providers}/anthropic/_utils/decode.py +5 -4
  91. mirascope/llm/{clients → providers}/anthropic/_utils/encode.py +31 -10
  92. mirascope/llm/providers/anthropic/model_id.py +40 -0
  93. mirascope/llm/{clients/anthropic/clients.py → providers/anthropic/provider.py} +33 -418
  94. mirascope/llm/{clients → providers}/base/__init__.py +3 -3
  95. mirascope/llm/{clients → providers}/base/_utils.py +10 -7
  96. mirascope/llm/{clients/base/client.py → providers/base/base_provider.py} +255 -126
  97. mirascope/llm/providers/google/__init__.py +21 -0
  98. mirascope/llm/{clients → providers}/google/_utils/decode.py +6 -4
  99. mirascope/llm/{clients → providers}/google/_utils/encode.py +30 -24
  100. mirascope/llm/providers/google/model_id.py +28 -0
  101. mirascope/llm/providers/google/provider.py +438 -0
  102. mirascope/llm/providers/load_provider.py +48 -0
  103. mirascope/llm/providers/mlx/__init__.py +24 -0
  104. mirascope/llm/providers/mlx/_utils.py +107 -0
  105. mirascope/llm/providers/mlx/encoding/__init__.py +8 -0
  106. mirascope/llm/providers/mlx/encoding/base.py +69 -0
  107. mirascope/llm/providers/mlx/encoding/transformers.py +131 -0
  108. mirascope/llm/providers/mlx/mlx.py +237 -0
  109. mirascope/llm/providers/mlx/model_id.py +17 -0
  110. mirascope/llm/providers/mlx/provider.py +411 -0
  111. mirascope/llm/providers/model_id.py +16 -0
  112. mirascope/llm/providers/openai/__init__.py +6 -0
  113. mirascope/llm/providers/openai/completions/__init__.py +20 -0
  114. mirascope/llm/{clients/openai/responses → providers/openai/completions}/_utils/__init__.py +2 -0
  115. mirascope/llm/{clients → providers}/openai/completions/_utils/decode.py +5 -3
  116. mirascope/llm/{clients → providers}/openai/completions/_utils/encode.py +33 -23
  117. mirascope/llm/providers/openai/completions/provider.py +456 -0
  118. mirascope/llm/providers/openai/model_id.py +31 -0
  119. mirascope/llm/providers/openai/model_info.py +246 -0
  120. mirascope/llm/providers/openai/provider.py +386 -0
  121. mirascope/llm/providers/openai/responses/__init__.py +21 -0
  122. mirascope/llm/{clients → providers}/openai/responses/_utils/decode.py +5 -3
  123. mirascope/llm/{clients → providers}/openai/responses/_utils/encode.py +28 -17
  124. mirascope/llm/providers/openai/responses/provider.py +470 -0
  125. mirascope/llm/{clients → providers}/openai/shared/_utils.py +7 -3
  126. mirascope/llm/providers/provider_id.py +13 -0
  127. mirascope/llm/providers/provider_registry.py +167 -0
  128. mirascope/llm/responses/base_response.py +10 -5
  129. mirascope/llm/responses/base_stream_response.py +10 -5
  130. mirascope/llm/responses/response.py +24 -13
  131. mirascope/llm/responses/root_response.py +7 -12
  132. mirascope/llm/responses/stream_response.py +35 -23
  133. mirascope/llm/tools/__init__.py +9 -2
  134. mirascope/llm/tools/_utils.py +12 -3
  135. mirascope/llm/tools/protocols.py +4 -4
  136. mirascope/llm/tools/tool_schema.py +44 -9
  137. mirascope/llm/tools/tools.py +10 -9
  138. mirascope/ops/__init__.py +156 -0
  139. mirascope/ops/_internal/__init__.py +5 -0
  140. mirascope/ops/_internal/closure.py +1118 -0
  141. mirascope/ops/_internal/configuration.py +126 -0
  142. mirascope/ops/_internal/context.py +76 -0
  143. mirascope/ops/_internal/exporters/__init__.py +26 -0
  144. mirascope/ops/_internal/exporters/exporters.py +342 -0
  145. mirascope/ops/_internal/exporters/processors.py +104 -0
  146. mirascope/ops/_internal/exporters/types.py +165 -0
  147. mirascope/ops/_internal/exporters/utils.py +29 -0
  148. mirascope/ops/_internal/instrumentation/__init__.py +8 -0
  149. mirascope/ops/_internal/instrumentation/llm/__init__.py +8 -0
  150. mirascope/ops/_internal/instrumentation/llm/encode.py +238 -0
  151. mirascope/ops/_internal/instrumentation/llm/gen_ai_types/__init__.py +38 -0
  152. mirascope/ops/_internal/instrumentation/llm/gen_ai_types/gen_ai_input_messages.py +31 -0
  153. mirascope/ops/_internal/instrumentation/llm/gen_ai_types/gen_ai_output_messages.py +38 -0
  154. mirascope/ops/_internal/instrumentation/llm/gen_ai_types/gen_ai_system_instructions.py +18 -0
  155. mirascope/ops/_internal/instrumentation/llm/gen_ai_types/shared.py +100 -0
  156. mirascope/ops/_internal/instrumentation/llm/llm.py +1288 -0
  157. mirascope/ops/_internal/propagation.py +198 -0
  158. mirascope/ops/_internal/protocols.py +51 -0
  159. mirascope/ops/_internal/session.py +139 -0
  160. mirascope/ops/_internal/spans.py +232 -0
  161. mirascope/ops/_internal/traced_calls.py +371 -0
  162. mirascope/ops/_internal/traced_functions.py +394 -0
  163. mirascope/ops/_internal/tracing.py +276 -0
  164. mirascope/ops/_internal/types.py +13 -0
  165. mirascope/ops/_internal/utils.py +75 -0
  166. mirascope/ops/_internal/versioned_calls.py +512 -0
  167. mirascope/ops/_internal/versioned_functions.py +346 -0
  168. mirascope/ops/_internal/versioning.py +303 -0
  169. mirascope/ops/exceptions.py +21 -0
  170. {mirascope-2.0.0a2.dist-info → mirascope-2.0.0a3.dist-info}/METADATA +76 -1
  171. mirascope-2.0.0a3.dist-info/RECORD +206 -0
  172. {mirascope-2.0.0a2.dist-info → mirascope-2.0.0a3.dist-info}/WHEEL +1 -1
  173. mirascope/graphs/__init__.py +0 -22
  174. mirascope/graphs/finite_state_machine.py +0 -625
  175. mirascope/llm/agents/__init__.py +0 -15
  176. mirascope/llm/agents/agent.py +0 -97
  177. mirascope/llm/agents/agent_template.py +0 -45
  178. mirascope/llm/agents/decorator.py +0 -176
  179. mirascope/llm/calls/base_call.py +0 -33
  180. mirascope/llm/clients/__init__.py +0 -34
  181. mirascope/llm/clients/anthropic/__init__.py +0 -25
  182. mirascope/llm/clients/anthropic/model_ids.py +0 -8
  183. mirascope/llm/clients/google/__init__.py +0 -20
  184. mirascope/llm/clients/google/clients.py +0 -853
  185. mirascope/llm/clients/google/model_ids.py +0 -15
  186. mirascope/llm/clients/openai/__init__.py +0 -25
  187. mirascope/llm/clients/openai/completions/__init__.py +0 -28
  188. mirascope/llm/clients/openai/completions/_utils/model_features.py +0 -81
  189. mirascope/llm/clients/openai/completions/clients.py +0 -833
  190. mirascope/llm/clients/openai/completions/model_ids.py +0 -8
  191. mirascope/llm/clients/openai/responses/__init__.py +0 -26
  192. mirascope/llm/clients/openai/responses/_utils/model_features.py +0 -87
  193. mirascope/llm/clients/openai/responses/clients.py +0 -832
  194. mirascope/llm/clients/openai/responses/model_ids.py +0 -8
  195. mirascope/llm/clients/providers.py +0 -175
  196. mirascope-2.0.0a2.dist-info/RECORD +0 -102
  197. /mirascope/llm/{clients → providers}/anthropic/_utils/__init__.py +0 -0
  198. /mirascope/llm/{clients → providers}/base/kwargs.py +0 -0
  199. /mirascope/llm/{clients → providers}/base/params.py +0 -0
  200. /mirascope/llm/{clients → providers}/google/_utils/__init__.py +0 -0
  201. /mirascope/llm/{clients → providers}/google/message.py +0 -0
  202. /mirascope/llm/{clients/openai/completions → providers/openai/responses}/_utils/__init__.py +0 -0
  203. /mirascope/llm/{clients → providers}/openai/shared/__init__.py +0 -0
  204. {mirascope-2.0.0a2.dist-info → mirascope-2.0.0a3.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,246 @@
1
+ """OpenAI model information.
2
+
3
+ This file is auto-generated by scripts/model_features/codegen_openai.py
4
+ Do not edit manually - run the codegen script to update."""
5
+
6
+ from typing import Literal
7
+
8
+ OpenAIKnownModels = Literal[
9
+ "openai/chatgpt-4o-latest",
10
+ "openai/chatgpt-4o-latest:completions",
11
+ "openai/chatgpt-4o-latest:responses",
12
+ "openai/codex-mini-latest",
13
+ "openai/codex-mini-latest:responses",
14
+ "openai/gpt-3.5-turbo",
15
+ "openai/gpt-3.5-turbo:completions",
16
+ "openai/gpt-3.5-turbo:responses",
17
+ "openai/gpt-3.5-turbo-0125",
18
+ "openai/gpt-3.5-turbo-0125:completions",
19
+ "openai/gpt-3.5-turbo-0125:responses",
20
+ "openai/gpt-3.5-turbo-1106",
21
+ "openai/gpt-3.5-turbo-1106:completions",
22
+ "openai/gpt-3.5-turbo-1106:responses",
23
+ "openai/gpt-3.5-turbo-16k",
24
+ "openai/gpt-3.5-turbo-16k:completions",
25
+ "openai/gpt-4",
26
+ "openai/gpt-4:completions",
27
+ "openai/gpt-4:responses",
28
+ "openai/gpt-4-0125-preview",
29
+ "openai/gpt-4-0125-preview:completions",
30
+ "openai/gpt-4-0125-preview:responses",
31
+ "openai/gpt-4-0613",
32
+ "openai/gpt-4-0613:completions",
33
+ "openai/gpt-4-0613:responses",
34
+ "openai/gpt-4-1106-preview",
35
+ "openai/gpt-4-1106-preview:completions",
36
+ "openai/gpt-4-1106-preview:responses",
37
+ "openai/gpt-4-turbo",
38
+ "openai/gpt-4-turbo:completions",
39
+ "openai/gpt-4-turbo:responses",
40
+ "openai/gpt-4-turbo-2024-04-09",
41
+ "openai/gpt-4-turbo-2024-04-09:completions",
42
+ "openai/gpt-4-turbo-2024-04-09:responses",
43
+ "openai/gpt-4-turbo-preview",
44
+ "openai/gpt-4-turbo-preview:completions",
45
+ "openai/gpt-4-turbo-preview:responses",
46
+ "openai/gpt-4.1",
47
+ "openai/gpt-4.1:completions",
48
+ "openai/gpt-4.1:responses",
49
+ "openai/gpt-4.1-2025-04-14",
50
+ "openai/gpt-4.1-2025-04-14:completions",
51
+ "openai/gpt-4.1-2025-04-14:responses",
52
+ "openai/gpt-4.1-mini",
53
+ "openai/gpt-4.1-mini:completions",
54
+ "openai/gpt-4.1-mini:responses",
55
+ "openai/gpt-4.1-mini-2025-04-14",
56
+ "openai/gpt-4.1-mini-2025-04-14:completions",
57
+ "openai/gpt-4.1-mini-2025-04-14:responses",
58
+ "openai/gpt-4.1-nano",
59
+ "openai/gpt-4.1-nano:completions",
60
+ "openai/gpt-4.1-nano:responses",
61
+ "openai/gpt-4.1-nano-2025-04-14",
62
+ "openai/gpt-4.1-nano-2025-04-14:completions",
63
+ "openai/gpt-4.1-nano-2025-04-14:responses",
64
+ "openai/gpt-4o",
65
+ "openai/gpt-4o:completions",
66
+ "openai/gpt-4o:responses",
67
+ "openai/gpt-4o-2024-05-13",
68
+ "openai/gpt-4o-2024-05-13:completions",
69
+ "openai/gpt-4o-2024-05-13:responses",
70
+ "openai/gpt-4o-2024-08-06",
71
+ "openai/gpt-4o-2024-08-06:completions",
72
+ "openai/gpt-4o-2024-08-06:responses",
73
+ "openai/gpt-4o-2024-11-20",
74
+ "openai/gpt-4o-2024-11-20:completions",
75
+ "openai/gpt-4o-2024-11-20:responses",
76
+ "openai/gpt-4o-mini",
77
+ "openai/gpt-4o-mini:completions",
78
+ "openai/gpt-4o-mini:responses",
79
+ "openai/gpt-4o-mini-2024-07-18",
80
+ "openai/gpt-4o-mini-2024-07-18:completions",
81
+ "openai/gpt-4o-mini-2024-07-18:responses",
82
+ "openai/gpt-4o-mini-search-preview",
83
+ "openai/gpt-4o-mini-search-preview:completions",
84
+ "openai/gpt-4o-mini-search-preview-2025-03-11",
85
+ "openai/gpt-4o-mini-search-preview-2025-03-11:completions",
86
+ "openai/gpt-4o-search-preview",
87
+ "openai/gpt-4o-search-preview:completions",
88
+ "openai/gpt-4o-search-preview-2025-03-11",
89
+ "openai/gpt-4o-search-preview-2025-03-11:completions",
90
+ "openai/gpt-5",
91
+ "openai/gpt-5:completions",
92
+ "openai/gpt-5:responses",
93
+ "openai/gpt-5-2025-08-07",
94
+ "openai/gpt-5-2025-08-07:completions",
95
+ "openai/gpt-5-2025-08-07:responses",
96
+ "openai/gpt-5-chat-latest",
97
+ "openai/gpt-5-chat-latest:completions",
98
+ "openai/gpt-5-chat-latest:responses",
99
+ "openai/gpt-5-codex",
100
+ "openai/gpt-5-codex:responses",
101
+ "openai/gpt-5-mini",
102
+ "openai/gpt-5-mini:completions",
103
+ "openai/gpt-5-mini:responses",
104
+ "openai/gpt-5-mini-2025-08-07",
105
+ "openai/gpt-5-mini-2025-08-07:completions",
106
+ "openai/gpt-5-mini-2025-08-07:responses",
107
+ "openai/gpt-5-nano",
108
+ "openai/gpt-5-nano:completions",
109
+ "openai/gpt-5-nano:responses",
110
+ "openai/gpt-5-nano-2025-08-07",
111
+ "openai/gpt-5-nano-2025-08-07:completions",
112
+ "openai/gpt-5-nano-2025-08-07:responses",
113
+ "openai/gpt-5-pro",
114
+ "openai/gpt-5-pro:responses",
115
+ "openai/gpt-5-pro-2025-10-06",
116
+ "openai/gpt-5-pro-2025-10-06:responses",
117
+ "openai/gpt-5-search-api",
118
+ "openai/gpt-5-search-api:completions",
119
+ "openai/gpt-5-search-api-2025-10-14",
120
+ "openai/gpt-5-search-api-2025-10-14:completions",
121
+ "openai/gpt-5.1",
122
+ "openai/gpt-5.1:responses",
123
+ "openai/gpt-5.1-2025-11-13",
124
+ "openai/gpt-5.1-2025-11-13:responses",
125
+ "openai/gpt-5.1-chat-latest",
126
+ "openai/gpt-5.1-chat-latest:completions",
127
+ "openai/gpt-5.1-chat-latest:responses",
128
+ "openai/gpt-5.1-codex",
129
+ "openai/gpt-5.1-codex:responses",
130
+ "openai/gpt-5.1-codex-max",
131
+ "openai/gpt-5.1-codex-max:responses",
132
+ "openai/gpt-5.1-codex-mini",
133
+ "openai/gpt-5.1-codex-mini:responses",
134
+ "openai/o1",
135
+ "openai/o1:completions",
136
+ "openai/o1:responses",
137
+ "openai/o1-2024-12-17",
138
+ "openai/o1-2024-12-17:completions",
139
+ "openai/o1-2024-12-17:responses",
140
+ "openai/o1-pro",
141
+ "openai/o1-pro:responses",
142
+ "openai/o1-pro-2025-03-19",
143
+ "openai/o1-pro-2025-03-19:responses",
144
+ "openai/o3",
145
+ "openai/o3:completions",
146
+ "openai/o3:responses",
147
+ "openai/o3-2025-04-16",
148
+ "openai/o3-2025-04-16:completions",
149
+ "openai/o3-2025-04-16:responses",
150
+ "openai/o3-mini",
151
+ "openai/o3-mini:completions",
152
+ "openai/o3-mini:responses",
153
+ "openai/o3-mini-2025-01-31",
154
+ "openai/o3-mini-2025-01-31:completions",
155
+ "openai/o3-mini-2025-01-31:responses",
156
+ "openai/o3-pro",
157
+ "openai/o3-pro:responses",
158
+ "openai/o3-pro-2025-06-10",
159
+ "openai/o3-pro-2025-06-10:responses",
160
+ "openai/o4-mini",
161
+ "openai/o4-mini:completions",
162
+ "openai/o4-mini:responses",
163
+ "openai/o4-mini-2025-04-16",
164
+ "openai/o4-mini-2025-04-16:completions",
165
+ "openai/o4-mini-2025-04-16:responses",
166
+ ]
167
+ """Valid OpenAI model IDs including API-specific variants."""
168
+
169
+
170
+ MODELS_WITHOUT_AUDIO_SUPPORT: set[str] = {
171
+ "gpt-3.5-turbo",
172
+ "gpt-3.5-turbo-0125",
173
+ "gpt-3.5-turbo-1106",
174
+ "gpt-4",
175
+ "gpt-4-0125-preview",
176
+ "gpt-4-0613",
177
+ "gpt-4-1106-preview",
178
+ "gpt-4-turbo",
179
+ "gpt-4-turbo-2024-04-09",
180
+ "gpt-4-turbo-preview",
181
+ "gpt-4.1",
182
+ "gpt-4.1-mini",
183
+ "gpt-4.1-nano",
184
+ "gpt-4.1-nano-2025-04-14",
185
+ "gpt-4o",
186
+ "gpt-4o-2024-05-13",
187
+ "gpt-4o-2024-08-06",
188
+ "gpt-4o-mini",
189
+ "gpt-4o-mini-2024-07-18",
190
+ "gpt-4o-mini-search-preview",
191
+ "gpt-4o-search-preview",
192
+ "gpt-4o-search-preview-2025-03-11",
193
+ "gpt-5",
194
+ "gpt-5-2025-08-07",
195
+ "gpt-5-chat-latest",
196
+ "gpt-5-mini",
197
+ "gpt-5-mini-2025-08-07",
198
+ "gpt-5-nano",
199
+ "gpt-5-nano-2025-08-07",
200
+ "gpt-5-search-api",
201
+ "gpt-5-search-api-2025-10-14",
202
+ "gpt-5.1-chat-latest",
203
+ "o1",
204
+ "o1-2024-12-17",
205
+ "o3",
206
+ "o3-2025-04-16",
207
+ "o3-mini",
208
+ "o3-mini-2025-01-31",
209
+ "o4-mini",
210
+ "o4-mini-2025-04-16",
211
+ }
212
+ """Models that do not support audio inputs.
213
+
214
+ Models not in this set are assumed to support audio (optimistic default).
215
+ """
216
+
217
+ NON_REASONING_MODELS: set[str] = {
218
+ "chatgpt-4o-latest",
219
+ "gpt-3.5-turbo",
220
+ "gpt-3.5-turbo-0125",
221
+ "gpt-3.5-turbo-1106",
222
+ "gpt-4",
223
+ "gpt-4-0125-preview",
224
+ "gpt-4-0613",
225
+ "gpt-4-1106-preview",
226
+ "gpt-4-turbo",
227
+ "gpt-4-turbo-2024-04-09",
228
+ "gpt-4-turbo-preview",
229
+ "gpt-4.1",
230
+ "gpt-4.1-2025-04-14",
231
+ "gpt-4.1-mini",
232
+ "gpt-4.1-mini-2025-04-14",
233
+ "gpt-4.1-nano",
234
+ "gpt-4.1-nano-2025-04-14",
235
+ "gpt-4o",
236
+ "gpt-4o-2024-05-13",
237
+ "gpt-4o-2024-08-06",
238
+ "gpt-4o-2024-11-20",
239
+ "gpt-4o-mini",
240
+ "gpt-4o-mini-2024-07-18",
241
+ "gpt-5-chat-latest",
242
+ }
243
+ """Models that do not support the reasoning parameter.
244
+
245
+ Models not in this set are assumed to support reasoning (optimistic default).
246
+ """
@@ -0,0 +1,386 @@
1
+ """Unified OpenAI client implementation."""
2
+
3
+ from collections.abc import Sequence
4
+ from typing_extensions import Unpack
5
+
6
+ from openai import OpenAI
7
+
8
+ from ...context import Context, DepsT
9
+ from ...formatting import Format, FormattableT
10
+ from ...messages import Message
11
+ from ...responses import (
12
+ AsyncContextResponse,
13
+ AsyncContextStreamResponse,
14
+ AsyncResponse,
15
+ AsyncStreamResponse,
16
+ ContextResponse,
17
+ ContextStreamResponse,
18
+ Response,
19
+ StreamResponse,
20
+ )
21
+ from ...tools import (
22
+ AsyncContextTool,
23
+ AsyncContextToolkit,
24
+ AsyncTool,
25
+ AsyncToolkit,
26
+ ContextTool,
27
+ ContextToolkit,
28
+ Tool,
29
+ Toolkit,
30
+ )
31
+ from ..base import BaseProvider, Params
32
+ from .completions import OpenAICompletionsProvider
33
+ from .model_id import OPENAI_KNOWN_MODELS, OpenAIModelId
34
+ from .responses import OpenAIResponsesProvider
35
+
36
+
37
+ def _has_audio_content(messages: Sequence[Message]) -> bool:
38
+ """Returns whether a sequence of messages contains any audio content."""
39
+ for message in messages:
40
+ if message.role == "system":
41
+ continue
42
+ for content in message.content:
43
+ if content.type == "audio":
44
+ return True
45
+ return False
46
+
47
+
48
+ def choose_api_mode(model_id: OpenAIModelId, messages: Sequence[Message]) -> str:
49
+ """Choose between 'responses' or 'completions' API based on model_id and messages.
50
+
51
+ Args:
52
+ model_id: The model identifier.
53
+ messages: The messages to send to the LLM.
54
+
55
+ Returns:
56
+ Either "responses" or "completions" depending on the model and message content.
57
+
58
+ If the user manually specified an api mode (by appending it as a suffix to the model
59
+ id), then we use it.
60
+
61
+ Otherwise, we prefer the responses API where supported (because it has better
62
+ reasoning support and better prompt caching). However we will use the :completions api
63
+ if the messages contain any audio content, as audio content is not yet supported in
64
+ the responses API.
65
+ """
66
+ if model_id.endswith(":completions"):
67
+ return "completions"
68
+ elif model_id.endswith(":responses"):
69
+ return "responses"
70
+
71
+ if _has_audio_content(messages):
72
+ return "completions"
73
+
74
+ if f"{model_id}:responses" in OPENAI_KNOWN_MODELS:
75
+ # Prefer responses api when we know it is available
76
+ return "responses"
77
+ elif f"{model_id}:completions" in OPENAI_KNOWN_MODELS:
78
+ # If we know from testing that the completions api is available, and
79
+ # (implied by above) that responses wasn't, then we should use completions
80
+ return "completions"
81
+
82
+ # If we don't have either :responses or :completions in the known_models, it's
83
+ # likely that this is a new model we haven't tested. We default to responses api for
84
+ # openai/ models (on the assumption that they are new models and OpenAI prefers
85
+ # the responses API) but completions for other models (on the assumption that they
86
+ # are other models routing through the OpenAI completions API)
87
+ if model_id.startswith("openai/"):
88
+ return "responses"
89
+ else:
90
+ return "completions"
91
+
92
+
93
+ class OpenAIProvider(BaseProvider[OpenAI]):
94
+ """Unified provider for OpenAI that routes to Completions or Responses API based on model_id."""
95
+
96
+ id = "openai"
97
+ default_scope = "openai/"
98
+
99
+ def __init__(
100
+ self, *, api_key: str | None = None, base_url: str | None = None
101
+ ) -> None:
102
+ """Initialize the OpenAI provider with both subclients."""
103
+ self._completions_provider = OpenAICompletionsProvider(
104
+ api_key=api_key, base_url=base_url, wrapped_by_openai_provider=True
105
+ )
106
+ self._responses_provider = OpenAIResponsesProvider(
107
+ api_key=api_key, base_url=base_url, wrapped_by_openai_provider=True
108
+ )
109
+ # Use completions client's underlying OpenAI client as the main one
110
+ self.client = self._completions_provider.client
111
+
112
+ def _choose_subprovider(
113
+ self, model_id: OpenAIModelId, messages: Sequence[Message]
114
+ ) -> OpenAICompletionsProvider | OpenAIResponsesProvider:
115
+ """Choose the appropriate provider based on model_id and messages.
116
+
117
+ Args:
118
+ model_id: The model identifier.
119
+ messages: The messages to send to the LLM.
120
+
121
+ Returns:
122
+ The responses or completions subclient.
123
+ """
124
+ api_mode = choose_api_mode(model_id, messages)
125
+ if api_mode == "responses":
126
+ return self._responses_provider
127
+ return self._completions_provider
128
+
129
+ def _call(
130
+ self,
131
+ *,
132
+ model_id: OpenAIModelId,
133
+ messages: Sequence[Message],
134
+ tools: Sequence[Tool] | Toolkit | None = None,
135
+ format: type[FormattableT] | Format[FormattableT] | None = None,
136
+ **params: Unpack[Params],
137
+ ) -> Response | Response[FormattableT]:
138
+ """Generate an `llm.Response` by synchronously calling the OpenAI API.
139
+
140
+ Args:
141
+ model_id: Model identifier to use.
142
+ messages: Messages to send to the LLM.
143
+ tools: Optional tools that the model may invoke.
144
+ format: Optional response format specifier.
145
+ **params: Additional parameters to configure output (e.g. temperature). See `llm.Params`.
146
+
147
+ Returns:
148
+ An `llm.Response` object containing the LLM-generated content.
149
+ """
150
+ client = self._choose_subprovider(model_id, messages)
151
+ return client.call(
152
+ model_id=model_id,
153
+ messages=messages,
154
+ tools=tools,
155
+ format=format,
156
+ **params,
157
+ )
158
+
159
+ def _context_call(
160
+ self,
161
+ *,
162
+ ctx: Context[DepsT],
163
+ model_id: OpenAIModelId,
164
+ messages: Sequence[Message],
165
+ tools: Sequence[Tool | ContextTool[DepsT]]
166
+ | ContextToolkit[DepsT]
167
+ | None = None,
168
+ format: type[FormattableT] | Format[FormattableT] | None = None,
169
+ **params: Unpack[Params],
170
+ ) -> ContextResponse[DepsT, None] | ContextResponse[DepsT, FormattableT]:
171
+ """Generate an `llm.ContextResponse` by synchronously calling the OpenAI API.
172
+
173
+ Args:
174
+ ctx: Context object with dependencies for tools.
175
+ model_id: Model identifier to use.
176
+ messages: Messages to send to the LLM.
177
+ tools: Optional tools that the model may invoke.
178
+ format: Optional response format specifier.
179
+ **params: Additional parameters to configure output (e.g. temperature). See `llm.Params`.
180
+
181
+ Returns:
182
+ An `llm.ContextResponse` object containing the LLM-generated content.
183
+ """
184
+ client = self._choose_subprovider(model_id, messages)
185
+ return client.context_call(
186
+ ctx=ctx,
187
+ model_id=model_id,
188
+ messages=messages,
189
+ tools=tools,
190
+ format=format,
191
+ **params,
192
+ )
193
+
194
+ async def _call_async(
195
+ self,
196
+ *,
197
+ model_id: OpenAIModelId,
198
+ messages: Sequence[Message],
199
+ tools: Sequence[AsyncTool] | AsyncToolkit | None = None,
200
+ format: type[FormattableT] | Format[FormattableT] | None = None,
201
+ **params: Unpack[Params],
202
+ ) -> AsyncResponse | AsyncResponse[FormattableT]:
203
+ """Generate an `llm.AsyncResponse` by asynchronously calling the OpenAI API.
204
+
205
+ Args:
206
+ model_id: Model identifier to use.
207
+ messages: Messages to send to the LLM.
208
+ tools: Optional tools that the model may invoke.
209
+ format: Optional response format specifier.
210
+ **params: Additional parameters to configure output (e.g. temperature). See `llm.Params`.
211
+
212
+ Returns:
213
+ An `llm.AsyncResponse` object containing the LLM-generated content.
214
+ """
215
+ return await self._choose_subprovider(model_id, messages).call_async(
216
+ model_id=model_id,
217
+ messages=messages,
218
+ tools=tools,
219
+ format=format,
220
+ **params,
221
+ )
222
+
223
+ async def _context_call_async(
224
+ self,
225
+ *,
226
+ ctx: Context[DepsT],
227
+ model_id: OpenAIModelId,
228
+ messages: Sequence[Message],
229
+ tools: Sequence[AsyncTool | AsyncContextTool[DepsT]]
230
+ | AsyncContextToolkit[DepsT]
231
+ | None = None,
232
+ format: type[FormattableT] | Format[FormattableT] | None = None,
233
+ **params: Unpack[Params],
234
+ ) -> AsyncContextResponse[DepsT, None] | AsyncContextResponse[DepsT, FormattableT]:
235
+ """Generate an `llm.AsyncContextResponse` by asynchronously calling the OpenAI API.
236
+
237
+ Args:
238
+ ctx: Context object with dependencies for tools.
239
+ model_id: Model identifier to use.
240
+ messages: Messages to send to the LLM.
241
+ tools: Optional tools that the model may invoke.
242
+ format: Optional response format specifier.
243
+ **params: Additional parameters to configure output (e.g. temperature). See `llm.Params`.
244
+
245
+ Returns:
246
+ An `llm.AsyncContextResponse` object containing the LLM-generated content.
247
+ """
248
+ return await self._choose_subprovider(model_id, messages).context_call_async(
249
+ ctx=ctx,
250
+ model_id=model_id,
251
+ messages=messages,
252
+ tools=tools,
253
+ format=format,
254
+ **params,
255
+ )
256
+
257
+ def _stream(
258
+ self,
259
+ *,
260
+ model_id: OpenAIModelId,
261
+ messages: Sequence[Message],
262
+ tools: Sequence[Tool] | Toolkit | None = None,
263
+ format: type[FormattableT] | Format[FormattableT] | None = None,
264
+ **params: Unpack[Params],
265
+ ) -> StreamResponse | StreamResponse[FormattableT]:
266
+ """Generate an `llm.StreamResponse` by synchronously streaming from the OpenAI API.
267
+
268
+ Args:
269
+ model_id: Model identifier to use.
270
+ messages: Messages to send to the LLM.
271
+ tools: Optional tools that the model may invoke.
272
+ format: Optional response format specifier.
273
+ **params: Additional parameters to configure output (e.g. temperature). See `llm.Params`.
274
+
275
+ Returns:
276
+ An `llm.StreamResponse` object for iterating over the LLM-generated content.
277
+ """
278
+ client = self._choose_subprovider(model_id, messages)
279
+ return client.stream(
280
+ model_id=model_id,
281
+ messages=messages,
282
+ tools=tools,
283
+ format=format,
284
+ **params,
285
+ )
286
+
287
+ def _context_stream(
288
+ self,
289
+ *,
290
+ ctx: Context[DepsT],
291
+ model_id: OpenAIModelId,
292
+ messages: Sequence[Message],
293
+ tools: Sequence[Tool | ContextTool[DepsT]]
294
+ | ContextToolkit[DepsT]
295
+ | None = None,
296
+ format: type[FormattableT] | Format[FormattableT] | None = None,
297
+ **params: Unpack[Params],
298
+ ) -> ContextStreamResponse[DepsT] | ContextStreamResponse[DepsT, FormattableT]:
299
+ """Generate an `llm.ContextStreamResponse` by synchronously streaming from the OpenAI API.
300
+
301
+ Args:
302
+ ctx: Context object with dependencies for tools.
303
+ model_id: Model identifier to use.
304
+ messages: Messages to send to the LLM.
305
+ tools: Optional tools that the model may invoke.
306
+ format: Optional response format specifier.
307
+ **params: Additional parameters to configure output (e.g. temperature). See `llm.Params`.
308
+
309
+ Returns:
310
+ An `llm.ContextStreamResponse` object for iterating over the LLM-generated content.
311
+ """
312
+ client = self._choose_subprovider(model_id, messages)
313
+ return client.context_stream(
314
+ ctx=ctx,
315
+ model_id=model_id,
316
+ messages=messages,
317
+ tools=tools,
318
+ format=format,
319
+ **params,
320
+ )
321
+
322
+ async def _stream_async(
323
+ self,
324
+ *,
325
+ model_id: OpenAIModelId,
326
+ messages: Sequence[Message],
327
+ tools: Sequence[AsyncTool] | AsyncToolkit | None = None,
328
+ format: type[FormattableT] | Format[FormattableT] | None = None,
329
+ **params: Unpack[Params],
330
+ ) -> AsyncStreamResponse | AsyncStreamResponse[FormattableT]:
331
+ """Generate an `llm.AsyncStreamResponse` by asynchronously streaming from the OpenAI API.
332
+
333
+ Args:
334
+ model_id: Model identifier to use.
335
+ messages: Messages to send to the LLM.
336
+ tools: Optional tools that the model may invoke.
337
+ format: Optional response format specifier.
338
+ **params: Additional parameters to configure output (e.g. temperature). See `llm.Params`.
339
+
340
+ Returns:
341
+ An `llm.AsyncStreamResponse` object for asynchronously iterating over the LLM-generated content.
342
+ """
343
+ return await self._choose_subprovider(model_id, messages).stream_async(
344
+ model_id=model_id,
345
+ messages=messages,
346
+ tools=tools,
347
+ format=format,
348
+ **params,
349
+ )
350
+
351
+ async def _context_stream_async(
352
+ self,
353
+ *,
354
+ ctx: Context[DepsT],
355
+ model_id: OpenAIModelId,
356
+ messages: Sequence[Message],
357
+ tools: Sequence[AsyncTool | AsyncContextTool[DepsT]]
358
+ | AsyncContextToolkit[DepsT]
359
+ | None = None,
360
+ format: type[FormattableT] | Format[FormattableT] | None = None,
361
+ **params: Unpack[Params],
362
+ ) -> (
363
+ AsyncContextStreamResponse[DepsT]
364
+ | AsyncContextStreamResponse[DepsT, FormattableT]
365
+ ):
366
+ """Generate an `llm.AsyncContextStreamResponse` by asynchronously streaming from the OpenAI API.
367
+
368
+ Args:
369
+ ctx: Context object with dependencies for tools.
370
+ model_id: Model identifier to use.
371
+ messages: Messages to send to the LLM.
372
+ tools: Optional tools that the model may invoke.
373
+ format: Optional response format specifier.
374
+ **params: Additional parameters to configure output (e.g. temperature). See `llm.Params`.
375
+
376
+ Returns:
377
+ An `llm.AsyncContextStreamResponse` object for asynchronously iterating over the LLM-generated content.
378
+ """
379
+ return await self._choose_subprovider(model_id, messages).context_stream_async(
380
+ ctx=ctx,
381
+ model_id=model_id,
382
+ messages=messages,
383
+ tools=tools,
384
+ format=format,
385
+ **params,
386
+ )
@@ -0,0 +1,21 @@
1
+ from typing import TYPE_CHECKING
2
+
3
+ if TYPE_CHECKING:
4
+ from .provider import OpenAIResponsesProvider
5
+ else:
6
+ try:
7
+ from .provider import OpenAIResponsesProvider
8
+ except ImportError: # pragma: no cover
9
+ from ..._missing_import_stubs import (
10
+ create_import_error_stub,
11
+ create_provider_stub,
12
+ )
13
+
14
+ OpenAIResponsesProvider = create_provider_stub(
15
+ "openai", "OpenAIResponsesProvider"
16
+ )
17
+
18
+
19
+ __all__ = [
20
+ "OpenAIResponsesProvider",
21
+ ]
@@ -30,7 +30,7 @@ from .....responses import (
30
30
  RawMessageChunk,
31
31
  RawStreamEventChunk,
32
32
  )
33
- from ..model_ids import OpenAIResponsesModelId
33
+ from ...model_id import OpenAIModelId, model_name
34
34
 
35
35
  INCOMPLETE_DETAILS_TO_FINISH_REASON = {
36
36
  "max_output_tokens": FinishReason.MAX_TOKENS,
@@ -47,7 +47,8 @@ def _serialize_output_item(
47
47
 
48
48
  def decode_response(
49
49
  response: openai_types.Response,
50
- model_id: OpenAIResponsesModelId,
50
+ model_id: OpenAIModelId,
51
+ provider_id: Literal["openai", "openai:responses"],
51
52
  ) -> tuple[AssistantMessage, FinishReason | None]:
52
53
  """Convert OpenAI Responses Response to mirascope AssistantMessage."""
53
54
  parts: list[AssistantContentPart] = []
@@ -91,8 +92,9 @@ def decode_response(
91
92
 
92
93
  assistant_message = AssistantMessage(
93
94
  content=parts,
94
- provider="openai:responses",
95
+ provider_id=provider_id,
95
96
  model_id=model_id,
97
+ provider_model_name=model_name(model_id, "responses"),
96
98
  raw_message=[
97
99
  _serialize_output_item(output_item) for output_item in response.output
98
100
  ],