mirascope 1.25.7__py3-none-any.whl → 2.0.0a0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (474) hide show
  1. mirascope/__init__.py +3 -59
  2. mirascope/graphs/__init__.py +22 -0
  3. mirascope/{experimental/graphs → graphs}/finite_state_machine.py +70 -159
  4. mirascope/llm/__init__.py +206 -16
  5. mirascope/llm/agents/__init__.py +15 -0
  6. mirascope/llm/agents/agent.py +97 -0
  7. mirascope/llm/agents/agent_template.py +45 -0
  8. mirascope/llm/agents/decorator.py +176 -0
  9. mirascope/llm/calls/__init__.py +16 -0
  10. mirascope/llm/calls/base_call.py +33 -0
  11. mirascope/llm/calls/calls.py +315 -0
  12. mirascope/llm/calls/decorator.py +255 -0
  13. mirascope/llm/clients/__init__.py +34 -0
  14. mirascope/llm/clients/anthropic/__init__.py +11 -0
  15. mirascope/llm/clients/anthropic/_utils/__init__.py +13 -0
  16. mirascope/llm/clients/anthropic/_utils/decode.py +244 -0
  17. mirascope/llm/clients/anthropic/_utils/encode.py +243 -0
  18. mirascope/llm/clients/anthropic/clients.py +819 -0
  19. mirascope/llm/clients/anthropic/model_ids.py +8 -0
  20. mirascope/llm/clients/base/__init__.py +15 -0
  21. mirascope/llm/clients/base/_utils.py +192 -0
  22. mirascope/llm/clients/base/client.py +1256 -0
  23. mirascope/llm/clients/base/kwargs.py +12 -0
  24. mirascope/llm/clients/base/params.py +93 -0
  25. mirascope/llm/clients/google/__init__.py +6 -0
  26. mirascope/llm/clients/google/_utils/__init__.py +13 -0
  27. mirascope/llm/clients/google/_utils/decode.py +231 -0
  28. mirascope/llm/clients/google/_utils/encode.py +279 -0
  29. mirascope/llm/clients/google/clients.py +853 -0
  30. mirascope/llm/clients/google/message.py +7 -0
  31. mirascope/llm/clients/google/model_ids.py +15 -0
  32. mirascope/llm/clients/openai/__init__.py +25 -0
  33. mirascope/llm/clients/openai/completions/__init__.py +9 -0
  34. mirascope/llm/clients/openai/completions/_utils/__init__.py +13 -0
  35. mirascope/llm/clients/openai/completions/_utils/decode.py +187 -0
  36. mirascope/llm/clients/openai/completions/_utils/encode.py +358 -0
  37. mirascope/llm/clients/openai/completions/_utils/model_features.py +81 -0
  38. mirascope/llm/clients/openai/completions/clients.py +833 -0
  39. mirascope/llm/clients/openai/completions/model_ids.py +8 -0
  40. mirascope/llm/clients/openai/responses/__init__.py +9 -0
  41. mirascope/llm/clients/openai/responses/_utils/__init__.py +13 -0
  42. mirascope/llm/clients/openai/responses/_utils/decode.py +194 -0
  43. mirascope/llm/clients/openai/responses/_utils/encode.py +333 -0
  44. mirascope/llm/clients/openai/responses/_utils/model_features.py +87 -0
  45. mirascope/llm/clients/openai/responses/clients.py +832 -0
  46. mirascope/llm/clients/openai/responses/model_ids.py +8 -0
  47. mirascope/llm/clients/openai/shared/__init__.py +7 -0
  48. mirascope/llm/clients/openai/shared/_utils.py +55 -0
  49. mirascope/llm/clients/providers.py +175 -0
  50. mirascope/llm/content/__init__.py +70 -0
  51. mirascope/llm/content/audio.py +173 -0
  52. mirascope/llm/content/document.py +94 -0
  53. mirascope/llm/content/image.py +206 -0
  54. mirascope/llm/content/text.py +47 -0
  55. mirascope/llm/content/thought.py +58 -0
  56. mirascope/llm/content/tool_call.py +63 -0
  57. mirascope/llm/content/tool_output.py +26 -0
  58. mirascope/llm/context/__init__.py +6 -0
  59. mirascope/llm/context/_utils.py +28 -0
  60. mirascope/llm/context/context.py +24 -0
  61. mirascope/llm/exceptions.py +105 -0
  62. mirascope/llm/formatting/__init__.py +22 -0
  63. mirascope/llm/formatting/_utils.py +74 -0
  64. mirascope/llm/formatting/format.py +104 -0
  65. mirascope/llm/formatting/from_call_args.py +30 -0
  66. mirascope/llm/formatting/partial.py +58 -0
  67. mirascope/llm/formatting/types.py +109 -0
  68. mirascope/llm/mcp/__init__.py +5 -0
  69. mirascope/llm/mcp/client.py +118 -0
  70. mirascope/llm/messages/__init__.py +32 -0
  71. mirascope/llm/messages/message.py +182 -0
  72. mirascope/llm/models/__init__.py +16 -0
  73. mirascope/llm/models/models.py +1243 -0
  74. mirascope/llm/prompts/__init__.py +33 -0
  75. mirascope/llm/prompts/_utils.py +60 -0
  76. mirascope/llm/prompts/decorator.py +286 -0
  77. mirascope/llm/prompts/protocols.py +99 -0
  78. mirascope/llm/responses/__init__.py +57 -0
  79. mirascope/llm/responses/_utils.py +56 -0
  80. mirascope/llm/responses/base_response.py +91 -0
  81. mirascope/llm/responses/base_stream_response.py +697 -0
  82. mirascope/llm/responses/finish_reason.py +27 -0
  83. mirascope/llm/responses/response.py +345 -0
  84. mirascope/llm/responses/root_response.py +177 -0
  85. mirascope/llm/responses/stream_response.py +572 -0
  86. mirascope/llm/responses/streams.py +363 -0
  87. mirascope/llm/tools/__init__.py +40 -0
  88. mirascope/llm/tools/_utils.py +25 -0
  89. mirascope/llm/tools/decorator.py +175 -0
  90. mirascope/llm/tools/protocols.py +96 -0
  91. mirascope/llm/tools/tool_schema.py +246 -0
  92. mirascope/llm/tools/toolkit.py +152 -0
  93. mirascope/llm/tools/tools.py +169 -0
  94. mirascope/llm/types/__init__.py +22 -0
  95. mirascope/llm/types/dataclass.py +9 -0
  96. mirascope/llm/types/jsonable.py +44 -0
  97. mirascope/llm/types/type_vars.py +19 -0
  98. mirascope-2.0.0a0.dist-info/METADATA +117 -0
  99. mirascope-2.0.0a0.dist-info/RECORD +101 -0
  100. mirascope/beta/__init__.py +0 -3
  101. mirascope/beta/openai/__init__.py +0 -17
  102. mirascope/beta/openai/realtime/__init__.py +0 -13
  103. mirascope/beta/openai/realtime/_utils/__init__.py +0 -3
  104. mirascope/beta/openai/realtime/_utils/_audio.py +0 -74
  105. mirascope/beta/openai/realtime/_utils/_protocols.py +0 -50
  106. mirascope/beta/openai/realtime/realtime.py +0 -500
  107. mirascope/beta/openai/realtime/recording.py +0 -98
  108. mirascope/beta/openai/realtime/tool.py +0 -113
  109. mirascope/beta/rag/__init__.py +0 -24
  110. mirascope/beta/rag/base/__init__.py +0 -22
  111. mirascope/beta/rag/base/chunkers/__init__.py +0 -2
  112. mirascope/beta/rag/base/chunkers/base_chunker.py +0 -37
  113. mirascope/beta/rag/base/chunkers/text_chunker.py +0 -33
  114. mirascope/beta/rag/base/config.py +0 -8
  115. mirascope/beta/rag/base/document.py +0 -11
  116. mirascope/beta/rag/base/embedders.py +0 -35
  117. mirascope/beta/rag/base/embedding_params.py +0 -18
  118. mirascope/beta/rag/base/embedding_response.py +0 -30
  119. mirascope/beta/rag/base/query_results.py +0 -7
  120. mirascope/beta/rag/base/vectorstore_params.py +0 -18
  121. mirascope/beta/rag/base/vectorstores.py +0 -37
  122. mirascope/beta/rag/chroma/__init__.py +0 -11
  123. mirascope/beta/rag/chroma/types.py +0 -62
  124. mirascope/beta/rag/chroma/vectorstores.py +0 -121
  125. mirascope/beta/rag/cohere/__init__.py +0 -11
  126. mirascope/beta/rag/cohere/embedders.py +0 -87
  127. mirascope/beta/rag/cohere/embedding_params.py +0 -29
  128. mirascope/beta/rag/cohere/embedding_response.py +0 -29
  129. mirascope/beta/rag/cohere/py.typed +0 -0
  130. mirascope/beta/rag/openai/__init__.py +0 -11
  131. mirascope/beta/rag/openai/embedders.py +0 -144
  132. mirascope/beta/rag/openai/embedding_params.py +0 -18
  133. mirascope/beta/rag/openai/embedding_response.py +0 -14
  134. mirascope/beta/rag/openai/py.typed +0 -0
  135. mirascope/beta/rag/pinecone/__init__.py +0 -19
  136. mirascope/beta/rag/pinecone/types.py +0 -143
  137. mirascope/beta/rag/pinecone/vectorstores.py +0 -148
  138. mirascope/beta/rag/weaviate/__init__.py +0 -6
  139. mirascope/beta/rag/weaviate/types.py +0 -92
  140. mirascope/beta/rag/weaviate/vectorstores.py +0 -103
  141. mirascope/core/__init__.py +0 -109
  142. mirascope/core/anthropic/__init__.py +0 -31
  143. mirascope/core/anthropic/_call.py +0 -67
  144. mirascope/core/anthropic/_call_kwargs.py +0 -13
  145. mirascope/core/anthropic/_thinking.py +0 -70
  146. mirascope/core/anthropic/_utils/__init__.py +0 -16
  147. mirascope/core/anthropic/_utils/_convert_common_call_params.py +0 -25
  148. mirascope/core/anthropic/_utils/_convert_finish_reason_to_common_finish_reasons.py +0 -21
  149. mirascope/core/anthropic/_utils/_convert_message_params.py +0 -102
  150. mirascope/core/anthropic/_utils/_get_json_output.py +0 -31
  151. mirascope/core/anthropic/_utils/_handle_stream.py +0 -113
  152. mirascope/core/anthropic/_utils/_message_param_converter.py +0 -154
  153. mirascope/core/anthropic/_utils/_setup_call.py +0 -146
  154. mirascope/core/anthropic/call_params.py +0 -44
  155. mirascope/core/anthropic/call_response.py +0 -226
  156. mirascope/core/anthropic/call_response_chunk.py +0 -152
  157. mirascope/core/anthropic/dynamic_config.py +0 -40
  158. mirascope/core/anthropic/py.typed +0 -0
  159. mirascope/core/anthropic/stream.py +0 -204
  160. mirascope/core/anthropic/tool.py +0 -101
  161. mirascope/core/azure/__init__.py +0 -31
  162. mirascope/core/azure/_call.py +0 -67
  163. mirascope/core/azure/_call_kwargs.py +0 -13
  164. mirascope/core/azure/_utils/__init__.py +0 -14
  165. mirascope/core/azure/_utils/_convert_common_call_params.py +0 -26
  166. mirascope/core/azure/_utils/_convert_finish_reason_to_common_finish_reasons.py +0 -21
  167. mirascope/core/azure/_utils/_convert_message_params.py +0 -121
  168. mirascope/core/azure/_utils/_get_credential.py +0 -33
  169. mirascope/core/azure/_utils/_get_json_output.py +0 -27
  170. mirascope/core/azure/_utils/_handle_stream.py +0 -130
  171. mirascope/core/azure/_utils/_message_param_converter.py +0 -117
  172. mirascope/core/azure/_utils/_setup_call.py +0 -183
  173. mirascope/core/azure/call_params.py +0 -59
  174. mirascope/core/azure/call_response.py +0 -215
  175. mirascope/core/azure/call_response_chunk.py +0 -105
  176. mirascope/core/azure/dynamic_config.py +0 -30
  177. mirascope/core/azure/py.typed +0 -0
  178. mirascope/core/azure/stream.py +0 -147
  179. mirascope/core/azure/tool.py +0 -93
  180. mirascope/core/base/__init__.py +0 -86
  181. mirascope/core/base/_call_factory.py +0 -256
  182. mirascope/core/base/_create.py +0 -253
  183. mirascope/core/base/_extract.py +0 -175
  184. mirascope/core/base/_extract_with_tools.py +0 -189
  185. mirascope/core/base/_partial.py +0 -95
  186. mirascope/core/base/_utils/__init__.py +0 -92
  187. mirascope/core/base/_utils/_base_message_param_converter.py +0 -22
  188. mirascope/core/base/_utils/_base_type.py +0 -26
  189. mirascope/core/base/_utils/_convert_base_model_to_base_tool.py +0 -48
  190. mirascope/core/base/_utils/_convert_base_type_to_base_tool.py +0 -24
  191. mirascope/core/base/_utils/_convert_function_to_base_tool.py +0 -139
  192. mirascope/core/base/_utils/_convert_messages_to_message_params.py +0 -178
  193. mirascope/core/base/_utils/_convert_provider_finish_reason_to_finish_reason.py +0 -20
  194. mirascope/core/base/_utils/_default_tool_docstring.py +0 -6
  195. mirascope/core/base/_utils/_extract_tool_return.py +0 -42
  196. mirascope/core/base/_utils/_fn_is_async.py +0 -24
  197. mirascope/core/base/_utils/_format_template.py +0 -32
  198. mirascope/core/base/_utils/_get_audio_type.py +0 -18
  199. mirascope/core/base/_utils/_get_common_usage.py +0 -20
  200. mirascope/core/base/_utils/_get_create_fn_or_async_create_fn.py +0 -137
  201. mirascope/core/base/_utils/_get_document_type.py +0 -7
  202. mirascope/core/base/_utils/_get_dynamic_configuration.py +0 -69
  203. mirascope/core/base/_utils/_get_fields_from_call_args.py +0 -34
  204. mirascope/core/base/_utils/_get_fn_args.py +0 -23
  205. mirascope/core/base/_utils/_get_image_dimensions.py +0 -39
  206. mirascope/core/base/_utils/_get_image_type.py +0 -26
  207. mirascope/core/base/_utils/_get_metadata.py +0 -17
  208. mirascope/core/base/_utils/_get_possible_user_message_param.py +0 -21
  209. mirascope/core/base/_utils/_get_prompt_template.py +0 -28
  210. mirascope/core/base/_utils/_get_template_values.py +0 -51
  211. mirascope/core/base/_utils/_get_template_variables.py +0 -38
  212. mirascope/core/base/_utils/_get_unsupported_tool_config_keys.py +0 -10
  213. mirascope/core/base/_utils/_is_prompt_template.py +0 -24
  214. mirascope/core/base/_utils/_json_mode_content.py +0 -17
  215. mirascope/core/base/_utils/_messages_decorator.py +0 -121
  216. mirascope/core/base/_utils/_parse_content_template.py +0 -323
  217. mirascope/core/base/_utils/_parse_prompt_messages.py +0 -63
  218. mirascope/core/base/_utils/_pil_image_to_bytes.py +0 -13
  219. mirascope/core/base/_utils/_protocols.py +0 -901
  220. mirascope/core/base/_utils/_setup_call.py +0 -79
  221. mirascope/core/base/_utils/_setup_extract_tool.py +0 -30
  222. mirascope/core/base/call_kwargs.py +0 -13
  223. mirascope/core/base/call_params.py +0 -36
  224. mirascope/core/base/call_response.py +0 -338
  225. mirascope/core/base/call_response_chunk.py +0 -130
  226. mirascope/core/base/dynamic_config.py +0 -82
  227. mirascope/core/base/from_call_args.py +0 -30
  228. mirascope/core/base/merge_decorators.py +0 -59
  229. mirascope/core/base/message_param.py +0 -175
  230. mirascope/core/base/messages.py +0 -116
  231. mirascope/core/base/metadata.py +0 -13
  232. mirascope/core/base/prompt.py +0 -497
  233. mirascope/core/base/response_model_config_dict.py +0 -9
  234. mirascope/core/base/stream.py +0 -479
  235. mirascope/core/base/stream_config.py +0 -11
  236. mirascope/core/base/structured_stream.py +0 -296
  237. mirascope/core/base/tool.py +0 -214
  238. mirascope/core/base/toolkit.py +0 -176
  239. mirascope/core/base/types.py +0 -344
  240. mirascope/core/bedrock/__init__.py +0 -34
  241. mirascope/core/bedrock/_call.py +0 -68
  242. mirascope/core/bedrock/_call_kwargs.py +0 -12
  243. mirascope/core/bedrock/_types.py +0 -104
  244. mirascope/core/bedrock/_utils/__init__.py +0 -14
  245. mirascope/core/bedrock/_utils/_convert_common_call_params.py +0 -39
  246. mirascope/core/bedrock/_utils/_convert_finish_reason_to_common_finish_reasons.py +0 -23
  247. mirascope/core/bedrock/_utils/_convert_message_params.py +0 -111
  248. mirascope/core/bedrock/_utils/_get_json_output.py +0 -30
  249. mirascope/core/bedrock/_utils/_handle_stream.py +0 -104
  250. mirascope/core/bedrock/_utils/_message_param_converter.py +0 -172
  251. mirascope/core/bedrock/_utils/_setup_call.py +0 -258
  252. mirascope/core/bedrock/call_params.py +0 -38
  253. mirascope/core/bedrock/call_response.py +0 -248
  254. mirascope/core/bedrock/call_response_chunk.py +0 -111
  255. mirascope/core/bedrock/dynamic_config.py +0 -37
  256. mirascope/core/bedrock/py.typed +0 -0
  257. mirascope/core/bedrock/stream.py +0 -154
  258. mirascope/core/bedrock/tool.py +0 -100
  259. mirascope/core/cohere/__init__.py +0 -30
  260. mirascope/core/cohere/_call.py +0 -67
  261. mirascope/core/cohere/_call_kwargs.py +0 -11
  262. mirascope/core/cohere/_types.py +0 -20
  263. mirascope/core/cohere/_utils/__init__.py +0 -14
  264. mirascope/core/cohere/_utils/_convert_common_call_params.py +0 -26
  265. mirascope/core/cohere/_utils/_convert_finish_reason_to_common_finish_reasons.py +0 -24
  266. mirascope/core/cohere/_utils/_convert_message_params.py +0 -32
  267. mirascope/core/cohere/_utils/_get_json_output.py +0 -30
  268. mirascope/core/cohere/_utils/_handle_stream.py +0 -35
  269. mirascope/core/cohere/_utils/_message_param_converter.py +0 -54
  270. mirascope/core/cohere/_utils/_setup_call.py +0 -150
  271. mirascope/core/cohere/call_params.py +0 -62
  272. mirascope/core/cohere/call_response.py +0 -205
  273. mirascope/core/cohere/call_response_chunk.py +0 -125
  274. mirascope/core/cohere/dynamic_config.py +0 -32
  275. mirascope/core/cohere/py.typed +0 -0
  276. mirascope/core/cohere/stream.py +0 -113
  277. mirascope/core/cohere/tool.py +0 -93
  278. mirascope/core/costs/__init__.py +0 -5
  279. mirascope/core/costs/_anthropic_calculate_cost.py +0 -219
  280. mirascope/core/costs/_azure_calculate_cost.py +0 -11
  281. mirascope/core/costs/_bedrock_calculate_cost.py +0 -15
  282. mirascope/core/costs/_cohere_calculate_cost.py +0 -44
  283. mirascope/core/costs/_gemini_calculate_cost.py +0 -67
  284. mirascope/core/costs/_google_calculate_cost.py +0 -427
  285. mirascope/core/costs/_groq_calculate_cost.py +0 -156
  286. mirascope/core/costs/_litellm_calculate_cost.py +0 -11
  287. mirascope/core/costs/_mistral_calculate_cost.py +0 -64
  288. mirascope/core/costs/_openai_calculate_cost.py +0 -416
  289. mirascope/core/costs/_vertex_calculate_cost.py +0 -67
  290. mirascope/core/costs/_xai_calculate_cost.py +0 -104
  291. mirascope/core/costs/calculate_cost.py +0 -86
  292. mirascope/core/gemini/__init__.py +0 -40
  293. mirascope/core/gemini/_call.py +0 -67
  294. mirascope/core/gemini/_call_kwargs.py +0 -12
  295. mirascope/core/gemini/_utils/__init__.py +0 -14
  296. mirascope/core/gemini/_utils/_convert_common_call_params.py +0 -39
  297. mirascope/core/gemini/_utils/_convert_finish_reason_to_common_finish_reasons.py +0 -23
  298. mirascope/core/gemini/_utils/_convert_message_params.py +0 -156
  299. mirascope/core/gemini/_utils/_get_json_output.py +0 -35
  300. mirascope/core/gemini/_utils/_handle_stream.py +0 -33
  301. mirascope/core/gemini/_utils/_message_param_converter.py +0 -209
  302. mirascope/core/gemini/_utils/_setup_call.py +0 -149
  303. mirascope/core/gemini/call_params.py +0 -52
  304. mirascope/core/gemini/call_response.py +0 -216
  305. mirascope/core/gemini/call_response_chunk.py +0 -100
  306. mirascope/core/gemini/dynamic_config.py +0 -26
  307. mirascope/core/gemini/stream.py +0 -120
  308. mirascope/core/gemini/tool.py +0 -104
  309. mirascope/core/google/__init__.py +0 -29
  310. mirascope/core/google/_call.py +0 -67
  311. mirascope/core/google/_call_kwargs.py +0 -13
  312. mirascope/core/google/_utils/__init__.py +0 -14
  313. mirascope/core/google/_utils/_convert_common_call_params.py +0 -38
  314. mirascope/core/google/_utils/_convert_finish_reason_to_common_finish_reasons.py +0 -27
  315. mirascope/core/google/_utils/_convert_message_params.py +0 -297
  316. mirascope/core/google/_utils/_get_json_output.py +0 -37
  317. mirascope/core/google/_utils/_handle_stream.py +0 -58
  318. mirascope/core/google/_utils/_message_param_converter.py +0 -200
  319. mirascope/core/google/_utils/_setup_call.py +0 -201
  320. mirascope/core/google/_utils/_validate_media_type.py +0 -58
  321. mirascope/core/google/call_params.py +0 -22
  322. mirascope/core/google/call_response.py +0 -255
  323. mirascope/core/google/call_response_chunk.py +0 -135
  324. mirascope/core/google/dynamic_config.py +0 -26
  325. mirascope/core/google/stream.py +0 -199
  326. mirascope/core/google/tool.py +0 -146
  327. mirascope/core/groq/__init__.py +0 -30
  328. mirascope/core/groq/_call.py +0 -67
  329. mirascope/core/groq/_call_kwargs.py +0 -13
  330. mirascope/core/groq/_utils/__init__.py +0 -14
  331. mirascope/core/groq/_utils/_convert_common_call_params.py +0 -26
  332. mirascope/core/groq/_utils/_convert_message_params.py +0 -112
  333. mirascope/core/groq/_utils/_get_json_output.py +0 -27
  334. mirascope/core/groq/_utils/_handle_stream.py +0 -123
  335. mirascope/core/groq/_utils/_message_param_converter.py +0 -89
  336. mirascope/core/groq/_utils/_setup_call.py +0 -132
  337. mirascope/core/groq/call_params.py +0 -52
  338. mirascope/core/groq/call_response.py +0 -213
  339. mirascope/core/groq/call_response_chunk.py +0 -104
  340. mirascope/core/groq/dynamic_config.py +0 -29
  341. mirascope/core/groq/py.typed +0 -0
  342. mirascope/core/groq/stream.py +0 -135
  343. mirascope/core/groq/tool.py +0 -80
  344. mirascope/core/litellm/__init__.py +0 -28
  345. mirascope/core/litellm/_call.py +0 -67
  346. mirascope/core/litellm/_utils/__init__.py +0 -5
  347. mirascope/core/litellm/_utils/_setup_call.py +0 -109
  348. mirascope/core/litellm/call_params.py +0 -10
  349. mirascope/core/litellm/call_response.py +0 -24
  350. mirascope/core/litellm/call_response_chunk.py +0 -14
  351. mirascope/core/litellm/dynamic_config.py +0 -8
  352. mirascope/core/litellm/py.typed +0 -0
  353. mirascope/core/litellm/stream.py +0 -86
  354. mirascope/core/litellm/tool.py +0 -13
  355. mirascope/core/mistral/__init__.py +0 -36
  356. mirascope/core/mistral/_call.py +0 -65
  357. mirascope/core/mistral/_call_kwargs.py +0 -19
  358. mirascope/core/mistral/_utils/__init__.py +0 -14
  359. mirascope/core/mistral/_utils/_convert_common_call_params.py +0 -24
  360. mirascope/core/mistral/_utils/_convert_finish_reason_to_common_finish_reasons.py +0 -22
  361. mirascope/core/mistral/_utils/_convert_message_params.py +0 -122
  362. mirascope/core/mistral/_utils/_get_json_output.py +0 -34
  363. mirascope/core/mistral/_utils/_handle_stream.py +0 -139
  364. mirascope/core/mistral/_utils/_message_param_converter.py +0 -176
  365. mirascope/core/mistral/_utils/_setup_call.py +0 -164
  366. mirascope/core/mistral/call_params.py +0 -36
  367. mirascope/core/mistral/call_response.py +0 -205
  368. mirascope/core/mistral/call_response_chunk.py +0 -105
  369. mirascope/core/mistral/dynamic_config.py +0 -33
  370. mirascope/core/mistral/py.typed +0 -0
  371. mirascope/core/mistral/stream.py +0 -120
  372. mirascope/core/mistral/tool.py +0 -81
  373. mirascope/core/openai/__init__.py +0 -31
  374. mirascope/core/openai/_call.py +0 -67
  375. mirascope/core/openai/_call_kwargs.py +0 -13
  376. mirascope/core/openai/_utils/__init__.py +0 -14
  377. mirascope/core/openai/_utils/_convert_common_call_params.py +0 -26
  378. mirascope/core/openai/_utils/_convert_message_params.py +0 -148
  379. mirascope/core/openai/_utils/_get_json_output.py +0 -31
  380. mirascope/core/openai/_utils/_handle_stream.py +0 -138
  381. mirascope/core/openai/_utils/_message_param_converter.py +0 -105
  382. mirascope/core/openai/_utils/_setup_call.py +0 -155
  383. mirascope/core/openai/call_params.py +0 -92
  384. mirascope/core/openai/call_response.py +0 -273
  385. mirascope/core/openai/call_response_chunk.py +0 -139
  386. mirascope/core/openai/dynamic_config.py +0 -34
  387. mirascope/core/openai/py.typed +0 -0
  388. mirascope/core/openai/stream.py +0 -185
  389. mirascope/core/openai/tool.py +0 -101
  390. mirascope/core/py.typed +0 -0
  391. mirascope/core/vertex/__init__.py +0 -45
  392. mirascope/core/vertex/_call.py +0 -62
  393. mirascope/core/vertex/_call_kwargs.py +0 -12
  394. mirascope/core/vertex/_utils/__init__.py +0 -14
  395. mirascope/core/vertex/_utils/_convert_common_call_params.py +0 -37
  396. mirascope/core/vertex/_utils/_convert_finish_reason_to_common_finish_reasons.py +0 -23
  397. mirascope/core/vertex/_utils/_convert_message_params.py +0 -171
  398. mirascope/core/vertex/_utils/_get_json_output.py +0 -36
  399. mirascope/core/vertex/_utils/_handle_stream.py +0 -33
  400. mirascope/core/vertex/_utils/_message_param_converter.py +0 -133
  401. mirascope/core/vertex/_utils/_setup_call.py +0 -160
  402. mirascope/core/vertex/call_params.py +0 -24
  403. mirascope/core/vertex/call_response.py +0 -206
  404. mirascope/core/vertex/call_response_chunk.py +0 -99
  405. mirascope/core/vertex/dynamic_config.py +0 -28
  406. mirascope/core/vertex/stream.py +0 -119
  407. mirascope/core/vertex/tool.py +0 -101
  408. mirascope/core/xai/__init__.py +0 -28
  409. mirascope/core/xai/_call.py +0 -67
  410. mirascope/core/xai/_utils/__init__.py +0 -5
  411. mirascope/core/xai/_utils/_setup_call.py +0 -113
  412. mirascope/core/xai/call_params.py +0 -10
  413. mirascope/core/xai/call_response.py +0 -16
  414. mirascope/core/xai/call_response_chunk.py +0 -14
  415. mirascope/core/xai/dynamic_config.py +0 -8
  416. mirascope/core/xai/py.typed +0 -0
  417. mirascope/core/xai/stream.py +0 -57
  418. mirascope/core/xai/tool.py +0 -13
  419. mirascope/experimental/graphs/__init__.py +0 -5
  420. mirascope/integrations/__init__.py +0 -16
  421. mirascope/integrations/_middleware_factory.py +0 -403
  422. mirascope/integrations/langfuse/__init__.py +0 -3
  423. mirascope/integrations/langfuse/_utils.py +0 -114
  424. mirascope/integrations/langfuse/_with_langfuse.py +0 -70
  425. mirascope/integrations/logfire/__init__.py +0 -3
  426. mirascope/integrations/logfire/_utils.py +0 -225
  427. mirascope/integrations/logfire/_with_logfire.py +0 -63
  428. mirascope/integrations/otel/__init__.py +0 -10
  429. mirascope/integrations/otel/_utils.py +0 -270
  430. mirascope/integrations/otel/_with_hyperdx.py +0 -60
  431. mirascope/integrations/otel/_with_otel.py +0 -59
  432. mirascope/integrations/tenacity.py +0 -14
  433. mirascope/llm/_call.py +0 -401
  434. mirascope/llm/_context.py +0 -384
  435. mirascope/llm/_override.py +0 -3639
  436. mirascope/llm/_protocols.py +0 -500
  437. mirascope/llm/_response_metaclass.py +0 -31
  438. mirascope/llm/call_response.py +0 -158
  439. mirascope/llm/call_response_chunk.py +0 -66
  440. mirascope/llm/stream.py +0 -162
  441. mirascope/llm/tool.py +0 -64
  442. mirascope/mcp/__init__.py +0 -7
  443. mirascope/mcp/_utils.py +0 -288
  444. mirascope/mcp/client.py +0 -167
  445. mirascope/mcp/server.py +0 -356
  446. mirascope/mcp/tools.py +0 -110
  447. mirascope/py.typed +0 -0
  448. mirascope/retries/__init__.py +0 -11
  449. mirascope/retries/fallback.py +0 -131
  450. mirascope/retries/tenacity.py +0 -50
  451. mirascope/tools/__init__.py +0 -37
  452. mirascope/tools/base.py +0 -98
  453. mirascope/tools/system/__init__.py +0 -0
  454. mirascope/tools/system/_docker_operation.py +0 -166
  455. mirascope/tools/system/_file_system.py +0 -267
  456. mirascope/tools/web/__init__.py +0 -0
  457. mirascope/tools/web/_duckduckgo.py +0 -111
  458. mirascope/tools/web/_httpx.py +0 -125
  459. mirascope/tools/web/_parse_url_content.py +0 -94
  460. mirascope/tools/web/_requests.py +0 -54
  461. mirascope/v0/__init__.py +0 -43
  462. mirascope/v0/anthropic.py +0 -54
  463. mirascope/v0/base/__init__.py +0 -12
  464. mirascope/v0/base/calls.py +0 -118
  465. mirascope/v0/base/extractors.py +0 -122
  466. mirascope/v0/base/ops_utils.py +0 -207
  467. mirascope/v0/base/prompts.py +0 -48
  468. mirascope/v0/base/types.py +0 -14
  469. mirascope/v0/base/utils.py +0 -21
  470. mirascope/v0/openai.py +0 -54
  471. mirascope-1.25.7.dist-info/METADATA +0 -169
  472. mirascope-1.25.7.dist-info/RECORD +0 -378
  473. {mirascope-1.25.7.dist-info → mirascope-2.0.0a0.dist-info}/WHEEL +0 -0
  474. {mirascope-1.25.7.dist-info → mirascope-2.0.0a0.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,8 @@
1
+ """OpenAI ChatCompletions registered LLM models."""
2
+
3
+ from typing import TypeAlias
4
+
5
+ from openai.types import ChatModel
6
+
7
+ OpenAICompletionsModelId: TypeAlias = ChatModel | str
8
+ """The OpenAI ChatCompletions model ids registered with Mirascope."""
@@ -0,0 +1,9 @@
1
+ from .clients import OpenAIResponsesClient, client, get_client
2
+ from .model_ids import OpenAIResponsesModelId
3
+
4
+ __all__ = [
5
+ "OpenAIResponsesClient",
6
+ "OpenAIResponsesModelId",
7
+ "client",
8
+ "get_client",
9
+ ]
@@ -0,0 +1,13 @@
1
+ from .decode import (
2
+ decode_async_stream,
3
+ decode_response,
4
+ decode_stream,
5
+ )
6
+ from .encode import encode_request
7
+
8
+ __all__ = [
9
+ "decode_async_stream",
10
+ "decode_response",
11
+ "decode_stream",
12
+ "encode_request",
13
+ ]
@@ -0,0 +1,194 @@
1
+ """OpenAI Responses response decoding."""
2
+
3
+ from typing import Any, Literal
4
+
5
+ from openai import AsyncStream, Stream
6
+ from openai.types import responses as openai_types
7
+ from openai.types.responses.response_stream_event import ResponseStreamEvent
8
+
9
+ from .....content import (
10
+ AssistantContentPart,
11
+ Text,
12
+ TextChunk,
13
+ TextEndChunk,
14
+ TextStartChunk,
15
+ Thought,
16
+ ThoughtChunk,
17
+ ThoughtEndChunk,
18
+ ThoughtStartChunk,
19
+ ToolCall,
20
+ ToolCallChunk,
21
+ ToolCallEndChunk,
22
+ ToolCallStartChunk,
23
+ )
24
+ from .....messages import AssistantMessage
25
+ from .....responses import (
26
+ AsyncChunkIterator,
27
+ ChunkIterator,
28
+ FinishReason,
29
+ FinishReasonChunk,
30
+ RawMessageChunk,
31
+ RawStreamEventChunk,
32
+ )
33
+ from ..model_ids import OpenAIResponsesModelId
34
+
35
+ INCOMPLETE_DETAILS_TO_FINISH_REASON = {
36
+ "max_output_tokens": FinishReason.MAX_TOKENS,
37
+ "content_filter": FinishReason.REFUSAL,
38
+ }
39
+
40
+
41
+ def _serialize_output_item(
42
+ item: openai_types.ResponseOutputItem,
43
+ ) -> dict[str, Any]:
44
+ """Returns the item serialized as a dictionary."""
45
+ return {key: value for key, value in item.model_dump().items() if value is not None}
46
+
47
+
48
+ def decode_response(
49
+ response: openai_types.Response,
50
+ model_id: OpenAIResponsesModelId,
51
+ ) -> tuple[AssistantMessage, FinishReason | None]:
52
+ """Convert OpenAI Responses Response to mirascope AssistantMessage."""
53
+ parts: list[AssistantContentPart] = []
54
+ finish_reason: FinishReason | None = None
55
+ refused = False
56
+
57
+ for output_item in response.output:
58
+ if output_item.type == "message":
59
+ for content in output_item.content:
60
+ if content.type == "output_text":
61
+ parts.append(Text(text=content.text))
62
+ elif content.type == "refusal":
63
+ parts.append(Text(text=content.refusal))
64
+ refused = True
65
+ elif output_item.type == "function_call":
66
+ parts.append(
67
+ ToolCall(
68
+ id=output_item.call_id,
69
+ name=output_item.name,
70
+ args=output_item.arguments,
71
+ )
72
+ )
73
+ elif output_item.type == "reasoning":
74
+ for summary_part in output_item.summary:
75
+ if summary_part.type == "summary_text":
76
+ parts.append(Thought(thought=summary_part.text))
77
+ if output_item.content: # pragma: no cover
78
+ # TODO: Add test case covering this
79
+ # (Likely their open-source models output reasoning_text rather than summaries)
80
+ for reasoning_content in output_item.content:
81
+ if reasoning_content.type == "reasoning_text":
82
+ parts.append(Thought(thought=reasoning_content.text))
83
+
84
+ else:
85
+ raise NotImplementedError(f"Unsupported output item: {output_item.type}")
86
+
87
+ if refused:
88
+ finish_reason = FinishReason.REFUSAL
89
+ elif details := response.incomplete_details:
90
+ finish_reason = INCOMPLETE_DETAILS_TO_FINISH_REASON.get(details.reason or "")
91
+
92
+ assistant_message = AssistantMessage(
93
+ content=parts,
94
+ provider="openai:responses",
95
+ model_id=model_id,
96
+ raw_message=[
97
+ _serialize_output_item(output_item) for output_item in response.output
98
+ ],
99
+ )
100
+
101
+ return assistant_message, finish_reason
102
+
103
+
104
+ class _OpenAIResponsesChunkProcessor:
105
+ """Processes OpenAI Responses streaming events and maintains state across chunks."""
106
+
107
+ def __init__(self) -> None:
108
+ self.current_content_type: Literal["text", "tool_call", "thought"] | None = None
109
+ self.refusal_encountered = False
110
+
111
+ def process_chunk(self, event: ResponseStreamEvent) -> ChunkIterator:
112
+ """Process a single OpenAI Responses stream event and yield the appropriate content chunks."""
113
+ yield RawStreamEventChunk(raw_stream_event=event)
114
+
115
+ if hasattr(event, "type"):
116
+ if event.type == "response.output_text.delta":
117
+ if not self.current_content_type:
118
+ yield TextStartChunk()
119
+ self.current_content_type = "text"
120
+ yield TextChunk(delta=event.delta)
121
+ elif event.type == "response.output_text.done":
122
+ yield TextEndChunk()
123
+ self.current_content_type = None
124
+ if event.type == "response.refusal.delta":
125
+ if not self.current_content_type:
126
+ yield TextStartChunk()
127
+ self.current_content_type = "text"
128
+ yield TextChunk(delta=event.delta)
129
+ elif event.type == "response.refusal.done":
130
+ yield TextEndChunk()
131
+ self.refusal_encountered = True
132
+ self.current_content_type = None
133
+ elif event.type == "response.output_item.added":
134
+ item = event.item
135
+ if item.type == "function_call":
136
+ self.current_tool_call_id = item.call_id
137
+ self.current_tool_call_name = item.name
138
+ yield ToolCallStartChunk(
139
+ id=item.call_id,
140
+ name=item.name,
141
+ )
142
+ self.current_content_type = "tool_call"
143
+ elif event.type == "response.function_call_arguments.delta":
144
+ yield ToolCallChunk(delta=event.delta)
145
+ elif event.type == "response.function_call_arguments.done":
146
+ yield ToolCallEndChunk()
147
+ self.current_content_type = None
148
+ elif (
149
+ event.type == "response.reasoning_text.delta"
150
+ or event.type == "response.reasoning_summary_text.delta"
151
+ ):
152
+ if not self.current_content_type:
153
+ yield ThoughtStartChunk()
154
+ self.current_content_type = "thought"
155
+ yield ThoughtChunk(delta=event.delta)
156
+ elif (
157
+ event.type == "response.reasoning_summary_text.done"
158
+ or event.type == "response.reasoning_text.done"
159
+ ):
160
+ yield ThoughtEndChunk()
161
+ self.current_content_type = None
162
+ elif event.type == "response.incomplete":
163
+ details = event.response.incomplete_details
164
+ reason = (details and details.reason) or ""
165
+ finish_reason = INCOMPLETE_DETAILS_TO_FINISH_REASON.get(reason)
166
+ if finish_reason:
167
+ yield FinishReasonChunk(finish_reason=finish_reason)
168
+ elif event.type == "response.completed":
169
+ yield RawMessageChunk(
170
+ raw_message=[
171
+ _serialize_output_item(item) for item in event.response.output
172
+ ]
173
+ )
174
+ if self.refusal_encountered:
175
+ yield FinishReasonChunk(finish_reason=FinishReason.REFUSAL)
176
+
177
+
178
+ def decode_stream(
179
+ openai_stream: Stream[ResponseStreamEvent],
180
+ ) -> ChunkIterator:
181
+ """Returns a ChunkIterator converted from an OpenAI Stream[ResponseStreamEvent]"""
182
+ processor = _OpenAIResponsesChunkProcessor()
183
+ for event in openai_stream:
184
+ yield from processor.process_chunk(event)
185
+
186
+
187
+ async def decode_async_stream(
188
+ openai_stream: AsyncStream[ResponseStreamEvent],
189
+ ) -> AsyncChunkIterator:
190
+ """Returns an AsyncChunkIterator converted from an OpenAI AsyncStream[ResponseStreamEvent]"""
191
+ processor = _OpenAIResponsesChunkProcessor()
192
+ async for event in openai_stream:
193
+ for item in processor.process_chunk(event):
194
+ yield item
@@ -0,0 +1,333 @@
1
+ """OpenAI Responses message encoding and request preparation."""
2
+
3
+ from collections.abc import Sequence
4
+ from typing import TypedDict, cast
5
+
6
+ from openai import Omit
7
+ from openai.types.responses import (
8
+ FunctionToolParam,
9
+ ResponseFormatTextJSONSchemaConfigParam,
10
+ ResponseFunctionToolCallParam,
11
+ ResponseInputContentParam,
12
+ ResponseInputItemParam,
13
+ ResponseInputParam,
14
+ ResponseInputTextParam,
15
+ ResponseTextConfigParam,
16
+ ToolChoiceAllowedParam,
17
+ ToolChoiceFunctionParam,
18
+ response_create_params,
19
+ )
20
+ from openai.types.responses.easy_input_message_param import EasyInputMessageParam
21
+ from openai.types.responses.response_input_image_param import ResponseInputImageParam
22
+ from openai.types.responses.response_input_param import (
23
+ FunctionCallOutput,
24
+ Message as ResponseInputMessageParam,
25
+ )
26
+ from openai.types.shared_params import Reasoning
27
+ from openai.types.shared_params.response_format_json_object import (
28
+ ResponseFormatJSONObject,
29
+ )
30
+ from openai.types.shared_params.responses_model import ResponsesModel
31
+
32
+ from .....exceptions import FeatureNotSupportedError
33
+ from .....formatting import (
34
+ Format,
35
+ FormattableT,
36
+ _utils as _formatting_utils,
37
+ resolve_format,
38
+ )
39
+ from .....messages import AssistantMessage, Message, UserMessage
40
+ from .....tools import FORMAT_TOOL_NAME, BaseToolkit, ToolSchema
41
+ from ....base import Params, _utils as _base_utils
42
+ from ...shared import _utils as _shared_utils
43
+ from ..model_ids import OpenAIResponsesModelId
44
+ from .model_features import NON_REASONING_MODELS
45
+
46
+
47
+ class ResponseCreateKwargs(TypedDict, total=False):
48
+ """Kwargs to the OpenAI `client.responses.create` method."""
49
+
50
+ model: ResponsesModel
51
+ input: str | ResponseInputParam
52
+ instructions: str
53
+ temperature: float
54
+ max_output_tokens: int
55
+ top_p: float
56
+ tools: list[FunctionToolParam] | Omit
57
+ tool_choice: response_create_params.ToolChoice | Omit
58
+ text: ResponseTextConfigParam
59
+ reasoning: Reasoning | Omit
60
+
61
+
62
+ def _encode_user_message(
63
+ message: UserMessage,
64
+ ) -> ResponseInputParam:
65
+ if len(message.content) == 1 and (first := message.content[0]).type == "text":
66
+ return [EasyInputMessageParam(content=first.text, role="user")]
67
+
68
+ current_content: list[ResponseInputContentParam] = []
69
+ result: ResponseInputParam = []
70
+
71
+ def flush_message_content() -> None:
72
+ nonlocal current_content
73
+ if current_content:
74
+ result.append(
75
+ ResponseInputMessageParam(
76
+ content=current_content, role="user", type="message"
77
+ )
78
+ )
79
+ current_content = []
80
+
81
+ for part in message.content:
82
+ if part.type == "text":
83
+ current_content.append(
84
+ ResponseInputTextParam(text=part.text, type="input_text")
85
+ )
86
+ elif part.type == "image":
87
+ image_url = (
88
+ part.source.url
89
+ if part.source.type == "url_image_source"
90
+ else f"data:{part.source.mime_type};base64,{part.source.data}"
91
+ )
92
+
93
+ current_content.append(
94
+ ResponseInputImageParam(
95
+ image_url=image_url, detail="auto", type="input_image"
96
+ )
97
+ )
98
+ elif part.type == "tool_output":
99
+ flush_message_content()
100
+ result.append(
101
+ FunctionCallOutput(
102
+ call_id=part.id,
103
+ output=str(part.value),
104
+ type="function_call_output",
105
+ )
106
+ )
107
+ elif part.type == "audio":
108
+ raise FeatureNotSupportedError(
109
+ "audio input",
110
+ "openai:responses",
111
+ message='provider "openai:responses" does not support audio inputs. Try using "openai:completions" instead',
112
+ )
113
+ else:
114
+ raise NotImplementedError(
115
+ f"Unsupported user content part type: {part.type}"
116
+ )
117
+ flush_message_content()
118
+
119
+ return result
120
+
121
+
122
+ def _encode_assistant_message(
123
+ message: AssistantMessage, encode_thoughts: bool
124
+ ) -> ResponseInputParam:
125
+ result: ResponseInputParam = []
126
+
127
+ # Note: OpenAI does not provide any way to encode multiplie pieces of assistant-generated
128
+ # text as adjacent content within the same Message, except as part of
129
+ # ResponseOutputMessageParam which requires OpenAI-provided `id` and `status` on the message,
130
+ # and `annotations` and `logprobs` on the output text.
131
+ # Rather than generating a fake or nonexistent fields and triggering potentially undefined
132
+ # server-side behavior, we use `EasyInputMessageParam` for assistant generated text,
133
+ # with the caveat that assistant messages containing multiple text parts will be encoded
134
+ # as though they are separate messages.
135
+ # (It would seem as though the `Message` class in `response_input_param.py` would be suitable,
136
+ # especially as it supports the "assistant" role; however attempting to use it triggers a server
137
+ # error when text of type input_text is passed as part of an assistant message.)
138
+ for part in message.content:
139
+ if part.type == "text":
140
+ result.append(EasyInputMessageParam(content=part.text, role="assistant"))
141
+ elif part.type == "thought":
142
+ if encode_thoughts:
143
+ result.append(
144
+ EasyInputMessageParam(
145
+ content="**Thinking:** " + part.thought, role="assistant"
146
+ )
147
+ )
148
+ elif part.type == "tool_call":
149
+ result.append(
150
+ ResponseFunctionToolCallParam(
151
+ call_id=part.id,
152
+ name=part.name,
153
+ arguments=part.args,
154
+ type="function_call",
155
+ )
156
+ )
157
+ else:
158
+ raise NotImplementedError(
159
+ f"Unsupported assistant content part type: {part.type}"
160
+ )
161
+
162
+ return result
163
+
164
+
165
+ def _encode_message(
166
+ message: Message, model_id: OpenAIResponsesModelId, encode_thoughts: bool
167
+ ) -> ResponseInputParam:
168
+ """Convert a Mirascope Message to OpenAI Responses input items.
169
+
170
+ Returns a list because tool calls and tool outputs become separate input items
171
+ in the Responses API, not part of message content.
172
+ """
173
+
174
+ if message.role == "system":
175
+ # Responses API allows multiple "developer" messages, so rather than using the
176
+ # instructions field, we convert system messages as we find them.
177
+ # Unlike other LLM APIs, the system message does not need to be the first message.
178
+ return [EasyInputMessageParam(role="developer", content=message.content.text)]
179
+
180
+ if (
181
+ message.role == "assistant"
182
+ and message.provider == "openai:responses"
183
+ and message.model_id == model_id
184
+ and message.raw_message
185
+ and not encode_thoughts
186
+ ):
187
+ return cast(ResponseInputParam, message.raw_message)
188
+
189
+ if message.role == "assistant":
190
+ return _encode_assistant_message(message, encode_thoughts)
191
+ else:
192
+ return _encode_user_message(message)
193
+
194
+
195
+ def _convert_tool_to_function_tool_param(tool: ToolSchema) -> FunctionToolParam:
196
+ """Convert a Mirascope ToolSchema to OpenAI Responses FunctionToolParam."""
197
+ schema_dict = tool.parameters.model_dump(by_alias=True, exclude_none=True)
198
+ schema_dict["type"] = "object"
199
+ _shared_utils._ensure_additional_properties_false(schema_dict)
200
+
201
+ return FunctionToolParam(
202
+ type="function",
203
+ name=tool.name,
204
+ description=tool.description,
205
+ parameters=schema_dict,
206
+ strict=tool.strict,
207
+ )
208
+
209
+
210
+ def _create_strict_response_format(
211
+ format: Format[FormattableT],
212
+ ) -> ResponseFormatTextJSONSchemaConfigParam:
213
+ """Create OpenAI Responses strict response format from a Mirascope Format.
214
+
215
+ Args:
216
+ format: The `Format` instance containing schema and metadata
217
+
218
+ Returns:
219
+ ResponseFormatTextJSONSchemaConfigParam for strict structured outputs
220
+ """
221
+ schema = format.schema.copy()
222
+ _shared_utils._ensure_additional_properties_false(schema)
223
+
224
+ response_format: ResponseFormatTextJSONSchemaConfigParam = {
225
+ "type": "json_schema",
226
+ "name": format.name,
227
+ "schema": schema,
228
+ }
229
+ if format.description:
230
+ response_format["description"] = format.description
231
+ response_format["strict"] = True
232
+
233
+ return response_format
234
+
235
+
236
+ def _compute_reasoning(thinking: bool) -> Reasoning:
237
+ """Compute the OpenAI `Reasoning` config based on thinking settings."""
238
+ if thinking:
239
+ return {"effort": "medium", "summary": "auto"}
240
+ else:
241
+ return {"effort": "minimal"}
242
+
243
+
244
+ def encode_request(
245
+ *,
246
+ model_id: OpenAIResponsesModelId,
247
+ messages: Sequence[Message],
248
+ tools: Sequence[ToolSchema] | BaseToolkit | None,
249
+ format: type[FormattableT] | Format[FormattableT] | None,
250
+ params: Params,
251
+ ) -> tuple[Sequence[Message], Format[FormattableT] | None, ResponseCreateKwargs]:
252
+ """Prepares a request for the `OpenAI.responses.create` method."""
253
+ kwargs: ResponseCreateKwargs = ResponseCreateKwargs(
254
+ {
255
+ "model": model_id,
256
+ }
257
+ )
258
+ encode_thoughts = False
259
+
260
+ with _base_utils.ensure_all_params_accessed(
261
+ params=params,
262
+ provider="openai:responses",
263
+ unsupported_params=["top_k", "seed", "stop_sequences"],
264
+ ) as param_accessor:
265
+ if param_accessor.temperature is not None:
266
+ kwargs["temperature"] = param_accessor.temperature
267
+ if param_accessor.max_tokens is not None:
268
+ kwargs["max_output_tokens"] = param_accessor.max_tokens
269
+ if param_accessor.top_p is not None:
270
+ kwargs["top_p"] = param_accessor.top_p
271
+ if param_accessor.thinking is not None:
272
+ if model_id in NON_REASONING_MODELS:
273
+ param_accessor.emit_warning_for_unused_param(
274
+ "thinking", param_accessor.thinking, "openai:responses", model_id
275
+ )
276
+ else:
277
+ # Assume model supports reasoning unless explicitly listed as non-reasoning
278
+ # This ensures new reasoning models work immediately without code updates
279
+ kwargs["reasoning"] = _compute_reasoning(param_accessor.thinking)
280
+ if param_accessor.encode_thoughts_as_text:
281
+ encode_thoughts = True
282
+
283
+ tools = tools.tools if isinstance(tools, BaseToolkit) else tools or []
284
+ openai_tools = [_convert_tool_to_function_tool_param(tool) for tool in tools]
285
+
286
+ model_supports_strict = (
287
+ model_id not in _shared_utils.MODELS_WITHOUT_JSON_SCHEMA_SUPPORT
288
+ )
289
+ default_mode = "strict" if model_supports_strict else "tool"
290
+
291
+ format = resolve_format(format, default_mode=default_mode)
292
+ if format is not None:
293
+ if format.mode == "strict":
294
+ kwargs["text"] = {"format": _create_strict_response_format(format)}
295
+ elif format.mode == "tool":
296
+ format_tool_shared_utils = _formatting_utils.create_tool_schema(format)
297
+ openai_tools.append(
298
+ _convert_tool_to_function_tool_param(format_tool_shared_utils)
299
+ )
300
+ if tools:
301
+ kwargs["tool_choice"] = ToolChoiceAllowedParam(
302
+ type="allowed_tools",
303
+ mode="required",
304
+ tools=[
305
+ {"type": "function", "name": tool["name"]}
306
+ for tool in openai_tools
307
+ ],
308
+ )
309
+ else:
310
+ kwargs["tool_choice"] = ToolChoiceFunctionParam(
311
+ type="function",
312
+ name=FORMAT_TOOL_NAME,
313
+ )
314
+ elif (
315
+ format.mode == "json"
316
+ and model_id not in _shared_utils.MODELS_WITHOUT_JSON_OBJECT_SUPPORT
317
+ ):
318
+ kwargs["text"] = {"format": ResponseFormatJSONObject(type="json_object")}
319
+
320
+ if format.formatting_instructions:
321
+ messages = _base_utils.add_system_instructions(
322
+ messages, format.formatting_instructions
323
+ )
324
+
325
+ encoded_messages: list[ResponseInputItemParam] = []
326
+ for message in messages:
327
+ encoded_messages.extend(_encode_message(message, model_id, encode_thoughts))
328
+ kwargs["input"] = encoded_messages
329
+
330
+ if openai_tools:
331
+ kwargs["tools"] = openai_tools
332
+
333
+ return messages, format, kwargs
@@ -0,0 +1,87 @@
1
+ """OpenAI Responses models categorized by reasoning support.
2
+
3
+ This file is auto-generated by scripts/update_openai_responses_model_features.py
4
+ Run that script to update these sets when OpenAI releases new models.
5
+ """
6
+
7
+ REASONING_MODELS: set[str] = {
8
+ "codex-mini-latest",
9
+ "gpt-5",
10
+ "gpt-5-2025-08-07",
11
+ "gpt-5-mini",
12
+ "gpt-5-mini-2025-08-07",
13
+ "gpt-5-nano",
14
+ "gpt-5-nano-2025-08-07",
15
+ "o1",
16
+ "o1-2024-12-17",
17
+ "o1-pro",
18
+ "o1-pro-2025-03-19",
19
+ "o3",
20
+ "o3-2025-04-16",
21
+ "o3-mini",
22
+ "o3-mini-2025-01-31",
23
+ "o3-pro",
24
+ "o3-pro-2025-06-10",
25
+ "o4-mini",
26
+ "o4-mini-2025-04-16",
27
+ }
28
+ """Models that have been tested and confirmed to support the reasoning parameter."""
29
+
30
+ NON_REASONING_MODELS: set[str] = {
31
+ "chatgpt-4o-latest",
32
+ "gpt-3.5-turbo",
33
+ "gpt-3.5-turbo-0125",
34
+ "gpt-3.5-turbo-1106",
35
+ "gpt-4",
36
+ "gpt-4-0125-preview",
37
+ "gpt-4-0314",
38
+ "gpt-4-0613",
39
+ "gpt-4-1106-preview",
40
+ "gpt-4-turbo",
41
+ "gpt-4-turbo-2024-04-09",
42
+ "gpt-4-turbo-preview",
43
+ "gpt-4.1",
44
+ "gpt-4.1-2025-04-14",
45
+ "gpt-4.1-mini",
46
+ "gpt-4.1-mini-2025-04-14",
47
+ "gpt-4.1-nano",
48
+ "gpt-4.1-nano-2025-04-14",
49
+ "gpt-4o",
50
+ "gpt-4o-2024-05-13",
51
+ "gpt-4o-2024-08-06",
52
+ "gpt-4o-2024-11-20",
53
+ "gpt-4o-mini",
54
+ "gpt-4o-mini-2024-07-18",
55
+ "gpt-5-chat-latest",
56
+ }
57
+ """Models that have been tested and confirmed to NOT support the reasoning parameter."""
58
+
59
+ NON_EXISTENT_MODELS: set[str] = {
60
+ "gpt-3.5-turbo-0301",
61
+ "gpt-3.5-turbo-0613",
62
+ "gpt-3.5-turbo-16k",
63
+ "gpt-3.5-turbo-16k-0613",
64
+ "gpt-4-32k",
65
+ "gpt-4-32k-0314",
66
+ "gpt-4-32k-0613",
67
+ "gpt-4-vision-preview",
68
+ }
69
+ """Models that are listed in OpenAI's types but no longer exist in their API."""
70
+
71
+ NO_RESPONSES_API_SUPPORT_MODELS: set[str] = {
72
+ "gpt-4o-audio-preview",
73
+ "gpt-4o-audio-preview-2024-10-01",
74
+ "gpt-4o-audio-preview-2024-12-17",
75
+ "gpt-4o-audio-preview-2025-06-03",
76
+ "gpt-4o-mini-audio-preview",
77
+ "gpt-4o-mini-audio-preview-2024-12-17",
78
+ "gpt-4o-mini-search-preview",
79
+ "gpt-4o-mini-search-preview-2025-03-11",
80
+ "gpt-4o-search-preview",
81
+ "gpt-4o-search-preview-2025-03-11",
82
+ "o1-mini",
83
+ "o1-mini-2024-09-12",
84
+ "o1-preview",
85
+ "o1-preview-2024-09-12",
86
+ }
87
+ """Models that do not support the Responses API."""