mirascope 1.25.7__py3-none-any.whl → 2.0.0a0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (474) hide show
  1. mirascope/__init__.py +3 -59
  2. mirascope/graphs/__init__.py +22 -0
  3. mirascope/{experimental/graphs → graphs}/finite_state_machine.py +70 -159
  4. mirascope/llm/__init__.py +206 -16
  5. mirascope/llm/agents/__init__.py +15 -0
  6. mirascope/llm/agents/agent.py +97 -0
  7. mirascope/llm/agents/agent_template.py +45 -0
  8. mirascope/llm/agents/decorator.py +176 -0
  9. mirascope/llm/calls/__init__.py +16 -0
  10. mirascope/llm/calls/base_call.py +33 -0
  11. mirascope/llm/calls/calls.py +315 -0
  12. mirascope/llm/calls/decorator.py +255 -0
  13. mirascope/llm/clients/__init__.py +34 -0
  14. mirascope/llm/clients/anthropic/__init__.py +11 -0
  15. mirascope/llm/clients/anthropic/_utils/__init__.py +13 -0
  16. mirascope/llm/clients/anthropic/_utils/decode.py +244 -0
  17. mirascope/llm/clients/anthropic/_utils/encode.py +243 -0
  18. mirascope/llm/clients/anthropic/clients.py +819 -0
  19. mirascope/llm/clients/anthropic/model_ids.py +8 -0
  20. mirascope/llm/clients/base/__init__.py +15 -0
  21. mirascope/llm/clients/base/_utils.py +192 -0
  22. mirascope/llm/clients/base/client.py +1256 -0
  23. mirascope/llm/clients/base/kwargs.py +12 -0
  24. mirascope/llm/clients/base/params.py +93 -0
  25. mirascope/llm/clients/google/__init__.py +6 -0
  26. mirascope/llm/clients/google/_utils/__init__.py +13 -0
  27. mirascope/llm/clients/google/_utils/decode.py +231 -0
  28. mirascope/llm/clients/google/_utils/encode.py +279 -0
  29. mirascope/llm/clients/google/clients.py +853 -0
  30. mirascope/llm/clients/google/message.py +7 -0
  31. mirascope/llm/clients/google/model_ids.py +15 -0
  32. mirascope/llm/clients/openai/__init__.py +25 -0
  33. mirascope/llm/clients/openai/completions/__init__.py +9 -0
  34. mirascope/llm/clients/openai/completions/_utils/__init__.py +13 -0
  35. mirascope/llm/clients/openai/completions/_utils/decode.py +187 -0
  36. mirascope/llm/clients/openai/completions/_utils/encode.py +358 -0
  37. mirascope/llm/clients/openai/completions/_utils/model_features.py +81 -0
  38. mirascope/llm/clients/openai/completions/clients.py +833 -0
  39. mirascope/llm/clients/openai/completions/model_ids.py +8 -0
  40. mirascope/llm/clients/openai/responses/__init__.py +9 -0
  41. mirascope/llm/clients/openai/responses/_utils/__init__.py +13 -0
  42. mirascope/llm/clients/openai/responses/_utils/decode.py +194 -0
  43. mirascope/llm/clients/openai/responses/_utils/encode.py +333 -0
  44. mirascope/llm/clients/openai/responses/_utils/model_features.py +87 -0
  45. mirascope/llm/clients/openai/responses/clients.py +832 -0
  46. mirascope/llm/clients/openai/responses/model_ids.py +8 -0
  47. mirascope/llm/clients/openai/shared/__init__.py +7 -0
  48. mirascope/llm/clients/openai/shared/_utils.py +55 -0
  49. mirascope/llm/clients/providers.py +175 -0
  50. mirascope/llm/content/__init__.py +70 -0
  51. mirascope/llm/content/audio.py +173 -0
  52. mirascope/llm/content/document.py +94 -0
  53. mirascope/llm/content/image.py +206 -0
  54. mirascope/llm/content/text.py +47 -0
  55. mirascope/llm/content/thought.py +58 -0
  56. mirascope/llm/content/tool_call.py +63 -0
  57. mirascope/llm/content/tool_output.py +26 -0
  58. mirascope/llm/context/__init__.py +6 -0
  59. mirascope/llm/context/_utils.py +28 -0
  60. mirascope/llm/context/context.py +24 -0
  61. mirascope/llm/exceptions.py +105 -0
  62. mirascope/llm/formatting/__init__.py +22 -0
  63. mirascope/llm/formatting/_utils.py +74 -0
  64. mirascope/llm/formatting/format.py +104 -0
  65. mirascope/llm/formatting/from_call_args.py +30 -0
  66. mirascope/llm/formatting/partial.py +58 -0
  67. mirascope/llm/formatting/types.py +109 -0
  68. mirascope/llm/mcp/__init__.py +5 -0
  69. mirascope/llm/mcp/client.py +118 -0
  70. mirascope/llm/messages/__init__.py +32 -0
  71. mirascope/llm/messages/message.py +182 -0
  72. mirascope/llm/models/__init__.py +16 -0
  73. mirascope/llm/models/models.py +1243 -0
  74. mirascope/llm/prompts/__init__.py +33 -0
  75. mirascope/llm/prompts/_utils.py +60 -0
  76. mirascope/llm/prompts/decorator.py +286 -0
  77. mirascope/llm/prompts/protocols.py +99 -0
  78. mirascope/llm/responses/__init__.py +57 -0
  79. mirascope/llm/responses/_utils.py +56 -0
  80. mirascope/llm/responses/base_response.py +91 -0
  81. mirascope/llm/responses/base_stream_response.py +697 -0
  82. mirascope/llm/responses/finish_reason.py +27 -0
  83. mirascope/llm/responses/response.py +345 -0
  84. mirascope/llm/responses/root_response.py +177 -0
  85. mirascope/llm/responses/stream_response.py +572 -0
  86. mirascope/llm/responses/streams.py +363 -0
  87. mirascope/llm/tools/__init__.py +40 -0
  88. mirascope/llm/tools/_utils.py +25 -0
  89. mirascope/llm/tools/decorator.py +175 -0
  90. mirascope/llm/tools/protocols.py +96 -0
  91. mirascope/llm/tools/tool_schema.py +246 -0
  92. mirascope/llm/tools/toolkit.py +152 -0
  93. mirascope/llm/tools/tools.py +169 -0
  94. mirascope/llm/types/__init__.py +22 -0
  95. mirascope/llm/types/dataclass.py +9 -0
  96. mirascope/llm/types/jsonable.py +44 -0
  97. mirascope/llm/types/type_vars.py +19 -0
  98. mirascope-2.0.0a0.dist-info/METADATA +117 -0
  99. mirascope-2.0.0a0.dist-info/RECORD +101 -0
  100. mirascope/beta/__init__.py +0 -3
  101. mirascope/beta/openai/__init__.py +0 -17
  102. mirascope/beta/openai/realtime/__init__.py +0 -13
  103. mirascope/beta/openai/realtime/_utils/__init__.py +0 -3
  104. mirascope/beta/openai/realtime/_utils/_audio.py +0 -74
  105. mirascope/beta/openai/realtime/_utils/_protocols.py +0 -50
  106. mirascope/beta/openai/realtime/realtime.py +0 -500
  107. mirascope/beta/openai/realtime/recording.py +0 -98
  108. mirascope/beta/openai/realtime/tool.py +0 -113
  109. mirascope/beta/rag/__init__.py +0 -24
  110. mirascope/beta/rag/base/__init__.py +0 -22
  111. mirascope/beta/rag/base/chunkers/__init__.py +0 -2
  112. mirascope/beta/rag/base/chunkers/base_chunker.py +0 -37
  113. mirascope/beta/rag/base/chunkers/text_chunker.py +0 -33
  114. mirascope/beta/rag/base/config.py +0 -8
  115. mirascope/beta/rag/base/document.py +0 -11
  116. mirascope/beta/rag/base/embedders.py +0 -35
  117. mirascope/beta/rag/base/embedding_params.py +0 -18
  118. mirascope/beta/rag/base/embedding_response.py +0 -30
  119. mirascope/beta/rag/base/query_results.py +0 -7
  120. mirascope/beta/rag/base/vectorstore_params.py +0 -18
  121. mirascope/beta/rag/base/vectorstores.py +0 -37
  122. mirascope/beta/rag/chroma/__init__.py +0 -11
  123. mirascope/beta/rag/chroma/types.py +0 -62
  124. mirascope/beta/rag/chroma/vectorstores.py +0 -121
  125. mirascope/beta/rag/cohere/__init__.py +0 -11
  126. mirascope/beta/rag/cohere/embedders.py +0 -87
  127. mirascope/beta/rag/cohere/embedding_params.py +0 -29
  128. mirascope/beta/rag/cohere/embedding_response.py +0 -29
  129. mirascope/beta/rag/cohere/py.typed +0 -0
  130. mirascope/beta/rag/openai/__init__.py +0 -11
  131. mirascope/beta/rag/openai/embedders.py +0 -144
  132. mirascope/beta/rag/openai/embedding_params.py +0 -18
  133. mirascope/beta/rag/openai/embedding_response.py +0 -14
  134. mirascope/beta/rag/openai/py.typed +0 -0
  135. mirascope/beta/rag/pinecone/__init__.py +0 -19
  136. mirascope/beta/rag/pinecone/types.py +0 -143
  137. mirascope/beta/rag/pinecone/vectorstores.py +0 -148
  138. mirascope/beta/rag/weaviate/__init__.py +0 -6
  139. mirascope/beta/rag/weaviate/types.py +0 -92
  140. mirascope/beta/rag/weaviate/vectorstores.py +0 -103
  141. mirascope/core/__init__.py +0 -109
  142. mirascope/core/anthropic/__init__.py +0 -31
  143. mirascope/core/anthropic/_call.py +0 -67
  144. mirascope/core/anthropic/_call_kwargs.py +0 -13
  145. mirascope/core/anthropic/_thinking.py +0 -70
  146. mirascope/core/anthropic/_utils/__init__.py +0 -16
  147. mirascope/core/anthropic/_utils/_convert_common_call_params.py +0 -25
  148. mirascope/core/anthropic/_utils/_convert_finish_reason_to_common_finish_reasons.py +0 -21
  149. mirascope/core/anthropic/_utils/_convert_message_params.py +0 -102
  150. mirascope/core/anthropic/_utils/_get_json_output.py +0 -31
  151. mirascope/core/anthropic/_utils/_handle_stream.py +0 -113
  152. mirascope/core/anthropic/_utils/_message_param_converter.py +0 -154
  153. mirascope/core/anthropic/_utils/_setup_call.py +0 -146
  154. mirascope/core/anthropic/call_params.py +0 -44
  155. mirascope/core/anthropic/call_response.py +0 -226
  156. mirascope/core/anthropic/call_response_chunk.py +0 -152
  157. mirascope/core/anthropic/dynamic_config.py +0 -40
  158. mirascope/core/anthropic/py.typed +0 -0
  159. mirascope/core/anthropic/stream.py +0 -204
  160. mirascope/core/anthropic/tool.py +0 -101
  161. mirascope/core/azure/__init__.py +0 -31
  162. mirascope/core/azure/_call.py +0 -67
  163. mirascope/core/azure/_call_kwargs.py +0 -13
  164. mirascope/core/azure/_utils/__init__.py +0 -14
  165. mirascope/core/azure/_utils/_convert_common_call_params.py +0 -26
  166. mirascope/core/azure/_utils/_convert_finish_reason_to_common_finish_reasons.py +0 -21
  167. mirascope/core/azure/_utils/_convert_message_params.py +0 -121
  168. mirascope/core/azure/_utils/_get_credential.py +0 -33
  169. mirascope/core/azure/_utils/_get_json_output.py +0 -27
  170. mirascope/core/azure/_utils/_handle_stream.py +0 -130
  171. mirascope/core/azure/_utils/_message_param_converter.py +0 -117
  172. mirascope/core/azure/_utils/_setup_call.py +0 -183
  173. mirascope/core/azure/call_params.py +0 -59
  174. mirascope/core/azure/call_response.py +0 -215
  175. mirascope/core/azure/call_response_chunk.py +0 -105
  176. mirascope/core/azure/dynamic_config.py +0 -30
  177. mirascope/core/azure/py.typed +0 -0
  178. mirascope/core/azure/stream.py +0 -147
  179. mirascope/core/azure/tool.py +0 -93
  180. mirascope/core/base/__init__.py +0 -86
  181. mirascope/core/base/_call_factory.py +0 -256
  182. mirascope/core/base/_create.py +0 -253
  183. mirascope/core/base/_extract.py +0 -175
  184. mirascope/core/base/_extract_with_tools.py +0 -189
  185. mirascope/core/base/_partial.py +0 -95
  186. mirascope/core/base/_utils/__init__.py +0 -92
  187. mirascope/core/base/_utils/_base_message_param_converter.py +0 -22
  188. mirascope/core/base/_utils/_base_type.py +0 -26
  189. mirascope/core/base/_utils/_convert_base_model_to_base_tool.py +0 -48
  190. mirascope/core/base/_utils/_convert_base_type_to_base_tool.py +0 -24
  191. mirascope/core/base/_utils/_convert_function_to_base_tool.py +0 -139
  192. mirascope/core/base/_utils/_convert_messages_to_message_params.py +0 -178
  193. mirascope/core/base/_utils/_convert_provider_finish_reason_to_finish_reason.py +0 -20
  194. mirascope/core/base/_utils/_default_tool_docstring.py +0 -6
  195. mirascope/core/base/_utils/_extract_tool_return.py +0 -42
  196. mirascope/core/base/_utils/_fn_is_async.py +0 -24
  197. mirascope/core/base/_utils/_format_template.py +0 -32
  198. mirascope/core/base/_utils/_get_audio_type.py +0 -18
  199. mirascope/core/base/_utils/_get_common_usage.py +0 -20
  200. mirascope/core/base/_utils/_get_create_fn_or_async_create_fn.py +0 -137
  201. mirascope/core/base/_utils/_get_document_type.py +0 -7
  202. mirascope/core/base/_utils/_get_dynamic_configuration.py +0 -69
  203. mirascope/core/base/_utils/_get_fields_from_call_args.py +0 -34
  204. mirascope/core/base/_utils/_get_fn_args.py +0 -23
  205. mirascope/core/base/_utils/_get_image_dimensions.py +0 -39
  206. mirascope/core/base/_utils/_get_image_type.py +0 -26
  207. mirascope/core/base/_utils/_get_metadata.py +0 -17
  208. mirascope/core/base/_utils/_get_possible_user_message_param.py +0 -21
  209. mirascope/core/base/_utils/_get_prompt_template.py +0 -28
  210. mirascope/core/base/_utils/_get_template_values.py +0 -51
  211. mirascope/core/base/_utils/_get_template_variables.py +0 -38
  212. mirascope/core/base/_utils/_get_unsupported_tool_config_keys.py +0 -10
  213. mirascope/core/base/_utils/_is_prompt_template.py +0 -24
  214. mirascope/core/base/_utils/_json_mode_content.py +0 -17
  215. mirascope/core/base/_utils/_messages_decorator.py +0 -121
  216. mirascope/core/base/_utils/_parse_content_template.py +0 -323
  217. mirascope/core/base/_utils/_parse_prompt_messages.py +0 -63
  218. mirascope/core/base/_utils/_pil_image_to_bytes.py +0 -13
  219. mirascope/core/base/_utils/_protocols.py +0 -901
  220. mirascope/core/base/_utils/_setup_call.py +0 -79
  221. mirascope/core/base/_utils/_setup_extract_tool.py +0 -30
  222. mirascope/core/base/call_kwargs.py +0 -13
  223. mirascope/core/base/call_params.py +0 -36
  224. mirascope/core/base/call_response.py +0 -338
  225. mirascope/core/base/call_response_chunk.py +0 -130
  226. mirascope/core/base/dynamic_config.py +0 -82
  227. mirascope/core/base/from_call_args.py +0 -30
  228. mirascope/core/base/merge_decorators.py +0 -59
  229. mirascope/core/base/message_param.py +0 -175
  230. mirascope/core/base/messages.py +0 -116
  231. mirascope/core/base/metadata.py +0 -13
  232. mirascope/core/base/prompt.py +0 -497
  233. mirascope/core/base/response_model_config_dict.py +0 -9
  234. mirascope/core/base/stream.py +0 -479
  235. mirascope/core/base/stream_config.py +0 -11
  236. mirascope/core/base/structured_stream.py +0 -296
  237. mirascope/core/base/tool.py +0 -214
  238. mirascope/core/base/toolkit.py +0 -176
  239. mirascope/core/base/types.py +0 -344
  240. mirascope/core/bedrock/__init__.py +0 -34
  241. mirascope/core/bedrock/_call.py +0 -68
  242. mirascope/core/bedrock/_call_kwargs.py +0 -12
  243. mirascope/core/bedrock/_types.py +0 -104
  244. mirascope/core/bedrock/_utils/__init__.py +0 -14
  245. mirascope/core/bedrock/_utils/_convert_common_call_params.py +0 -39
  246. mirascope/core/bedrock/_utils/_convert_finish_reason_to_common_finish_reasons.py +0 -23
  247. mirascope/core/bedrock/_utils/_convert_message_params.py +0 -111
  248. mirascope/core/bedrock/_utils/_get_json_output.py +0 -30
  249. mirascope/core/bedrock/_utils/_handle_stream.py +0 -104
  250. mirascope/core/bedrock/_utils/_message_param_converter.py +0 -172
  251. mirascope/core/bedrock/_utils/_setup_call.py +0 -258
  252. mirascope/core/bedrock/call_params.py +0 -38
  253. mirascope/core/bedrock/call_response.py +0 -248
  254. mirascope/core/bedrock/call_response_chunk.py +0 -111
  255. mirascope/core/bedrock/dynamic_config.py +0 -37
  256. mirascope/core/bedrock/py.typed +0 -0
  257. mirascope/core/bedrock/stream.py +0 -154
  258. mirascope/core/bedrock/tool.py +0 -100
  259. mirascope/core/cohere/__init__.py +0 -30
  260. mirascope/core/cohere/_call.py +0 -67
  261. mirascope/core/cohere/_call_kwargs.py +0 -11
  262. mirascope/core/cohere/_types.py +0 -20
  263. mirascope/core/cohere/_utils/__init__.py +0 -14
  264. mirascope/core/cohere/_utils/_convert_common_call_params.py +0 -26
  265. mirascope/core/cohere/_utils/_convert_finish_reason_to_common_finish_reasons.py +0 -24
  266. mirascope/core/cohere/_utils/_convert_message_params.py +0 -32
  267. mirascope/core/cohere/_utils/_get_json_output.py +0 -30
  268. mirascope/core/cohere/_utils/_handle_stream.py +0 -35
  269. mirascope/core/cohere/_utils/_message_param_converter.py +0 -54
  270. mirascope/core/cohere/_utils/_setup_call.py +0 -150
  271. mirascope/core/cohere/call_params.py +0 -62
  272. mirascope/core/cohere/call_response.py +0 -205
  273. mirascope/core/cohere/call_response_chunk.py +0 -125
  274. mirascope/core/cohere/dynamic_config.py +0 -32
  275. mirascope/core/cohere/py.typed +0 -0
  276. mirascope/core/cohere/stream.py +0 -113
  277. mirascope/core/cohere/tool.py +0 -93
  278. mirascope/core/costs/__init__.py +0 -5
  279. mirascope/core/costs/_anthropic_calculate_cost.py +0 -219
  280. mirascope/core/costs/_azure_calculate_cost.py +0 -11
  281. mirascope/core/costs/_bedrock_calculate_cost.py +0 -15
  282. mirascope/core/costs/_cohere_calculate_cost.py +0 -44
  283. mirascope/core/costs/_gemini_calculate_cost.py +0 -67
  284. mirascope/core/costs/_google_calculate_cost.py +0 -427
  285. mirascope/core/costs/_groq_calculate_cost.py +0 -156
  286. mirascope/core/costs/_litellm_calculate_cost.py +0 -11
  287. mirascope/core/costs/_mistral_calculate_cost.py +0 -64
  288. mirascope/core/costs/_openai_calculate_cost.py +0 -416
  289. mirascope/core/costs/_vertex_calculate_cost.py +0 -67
  290. mirascope/core/costs/_xai_calculate_cost.py +0 -104
  291. mirascope/core/costs/calculate_cost.py +0 -86
  292. mirascope/core/gemini/__init__.py +0 -40
  293. mirascope/core/gemini/_call.py +0 -67
  294. mirascope/core/gemini/_call_kwargs.py +0 -12
  295. mirascope/core/gemini/_utils/__init__.py +0 -14
  296. mirascope/core/gemini/_utils/_convert_common_call_params.py +0 -39
  297. mirascope/core/gemini/_utils/_convert_finish_reason_to_common_finish_reasons.py +0 -23
  298. mirascope/core/gemini/_utils/_convert_message_params.py +0 -156
  299. mirascope/core/gemini/_utils/_get_json_output.py +0 -35
  300. mirascope/core/gemini/_utils/_handle_stream.py +0 -33
  301. mirascope/core/gemini/_utils/_message_param_converter.py +0 -209
  302. mirascope/core/gemini/_utils/_setup_call.py +0 -149
  303. mirascope/core/gemini/call_params.py +0 -52
  304. mirascope/core/gemini/call_response.py +0 -216
  305. mirascope/core/gemini/call_response_chunk.py +0 -100
  306. mirascope/core/gemini/dynamic_config.py +0 -26
  307. mirascope/core/gemini/stream.py +0 -120
  308. mirascope/core/gemini/tool.py +0 -104
  309. mirascope/core/google/__init__.py +0 -29
  310. mirascope/core/google/_call.py +0 -67
  311. mirascope/core/google/_call_kwargs.py +0 -13
  312. mirascope/core/google/_utils/__init__.py +0 -14
  313. mirascope/core/google/_utils/_convert_common_call_params.py +0 -38
  314. mirascope/core/google/_utils/_convert_finish_reason_to_common_finish_reasons.py +0 -27
  315. mirascope/core/google/_utils/_convert_message_params.py +0 -297
  316. mirascope/core/google/_utils/_get_json_output.py +0 -37
  317. mirascope/core/google/_utils/_handle_stream.py +0 -58
  318. mirascope/core/google/_utils/_message_param_converter.py +0 -200
  319. mirascope/core/google/_utils/_setup_call.py +0 -201
  320. mirascope/core/google/_utils/_validate_media_type.py +0 -58
  321. mirascope/core/google/call_params.py +0 -22
  322. mirascope/core/google/call_response.py +0 -255
  323. mirascope/core/google/call_response_chunk.py +0 -135
  324. mirascope/core/google/dynamic_config.py +0 -26
  325. mirascope/core/google/stream.py +0 -199
  326. mirascope/core/google/tool.py +0 -146
  327. mirascope/core/groq/__init__.py +0 -30
  328. mirascope/core/groq/_call.py +0 -67
  329. mirascope/core/groq/_call_kwargs.py +0 -13
  330. mirascope/core/groq/_utils/__init__.py +0 -14
  331. mirascope/core/groq/_utils/_convert_common_call_params.py +0 -26
  332. mirascope/core/groq/_utils/_convert_message_params.py +0 -112
  333. mirascope/core/groq/_utils/_get_json_output.py +0 -27
  334. mirascope/core/groq/_utils/_handle_stream.py +0 -123
  335. mirascope/core/groq/_utils/_message_param_converter.py +0 -89
  336. mirascope/core/groq/_utils/_setup_call.py +0 -132
  337. mirascope/core/groq/call_params.py +0 -52
  338. mirascope/core/groq/call_response.py +0 -213
  339. mirascope/core/groq/call_response_chunk.py +0 -104
  340. mirascope/core/groq/dynamic_config.py +0 -29
  341. mirascope/core/groq/py.typed +0 -0
  342. mirascope/core/groq/stream.py +0 -135
  343. mirascope/core/groq/tool.py +0 -80
  344. mirascope/core/litellm/__init__.py +0 -28
  345. mirascope/core/litellm/_call.py +0 -67
  346. mirascope/core/litellm/_utils/__init__.py +0 -5
  347. mirascope/core/litellm/_utils/_setup_call.py +0 -109
  348. mirascope/core/litellm/call_params.py +0 -10
  349. mirascope/core/litellm/call_response.py +0 -24
  350. mirascope/core/litellm/call_response_chunk.py +0 -14
  351. mirascope/core/litellm/dynamic_config.py +0 -8
  352. mirascope/core/litellm/py.typed +0 -0
  353. mirascope/core/litellm/stream.py +0 -86
  354. mirascope/core/litellm/tool.py +0 -13
  355. mirascope/core/mistral/__init__.py +0 -36
  356. mirascope/core/mistral/_call.py +0 -65
  357. mirascope/core/mistral/_call_kwargs.py +0 -19
  358. mirascope/core/mistral/_utils/__init__.py +0 -14
  359. mirascope/core/mistral/_utils/_convert_common_call_params.py +0 -24
  360. mirascope/core/mistral/_utils/_convert_finish_reason_to_common_finish_reasons.py +0 -22
  361. mirascope/core/mistral/_utils/_convert_message_params.py +0 -122
  362. mirascope/core/mistral/_utils/_get_json_output.py +0 -34
  363. mirascope/core/mistral/_utils/_handle_stream.py +0 -139
  364. mirascope/core/mistral/_utils/_message_param_converter.py +0 -176
  365. mirascope/core/mistral/_utils/_setup_call.py +0 -164
  366. mirascope/core/mistral/call_params.py +0 -36
  367. mirascope/core/mistral/call_response.py +0 -205
  368. mirascope/core/mistral/call_response_chunk.py +0 -105
  369. mirascope/core/mistral/dynamic_config.py +0 -33
  370. mirascope/core/mistral/py.typed +0 -0
  371. mirascope/core/mistral/stream.py +0 -120
  372. mirascope/core/mistral/tool.py +0 -81
  373. mirascope/core/openai/__init__.py +0 -31
  374. mirascope/core/openai/_call.py +0 -67
  375. mirascope/core/openai/_call_kwargs.py +0 -13
  376. mirascope/core/openai/_utils/__init__.py +0 -14
  377. mirascope/core/openai/_utils/_convert_common_call_params.py +0 -26
  378. mirascope/core/openai/_utils/_convert_message_params.py +0 -148
  379. mirascope/core/openai/_utils/_get_json_output.py +0 -31
  380. mirascope/core/openai/_utils/_handle_stream.py +0 -138
  381. mirascope/core/openai/_utils/_message_param_converter.py +0 -105
  382. mirascope/core/openai/_utils/_setup_call.py +0 -155
  383. mirascope/core/openai/call_params.py +0 -92
  384. mirascope/core/openai/call_response.py +0 -273
  385. mirascope/core/openai/call_response_chunk.py +0 -139
  386. mirascope/core/openai/dynamic_config.py +0 -34
  387. mirascope/core/openai/py.typed +0 -0
  388. mirascope/core/openai/stream.py +0 -185
  389. mirascope/core/openai/tool.py +0 -101
  390. mirascope/core/py.typed +0 -0
  391. mirascope/core/vertex/__init__.py +0 -45
  392. mirascope/core/vertex/_call.py +0 -62
  393. mirascope/core/vertex/_call_kwargs.py +0 -12
  394. mirascope/core/vertex/_utils/__init__.py +0 -14
  395. mirascope/core/vertex/_utils/_convert_common_call_params.py +0 -37
  396. mirascope/core/vertex/_utils/_convert_finish_reason_to_common_finish_reasons.py +0 -23
  397. mirascope/core/vertex/_utils/_convert_message_params.py +0 -171
  398. mirascope/core/vertex/_utils/_get_json_output.py +0 -36
  399. mirascope/core/vertex/_utils/_handle_stream.py +0 -33
  400. mirascope/core/vertex/_utils/_message_param_converter.py +0 -133
  401. mirascope/core/vertex/_utils/_setup_call.py +0 -160
  402. mirascope/core/vertex/call_params.py +0 -24
  403. mirascope/core/vertex/call_response.py +0 -206
  404. mirascope/core/vertex/call_response_chunk.py +0 -99
  405. mirascope/core/vertex/dynamic_config.py +0 -28
  406. mirascope/core/vertex/stream.py +0 -119
  407. mirascope/core/vertex/tool.py +0 -101
  408. mirascope/core/xai/__init__.py +0 -28
  409. mirascope/core/xai/_call.py +0 -67
  410. mirascope/core/xai/_utils/__init__.py +0 -5
  411. mirascope/core/xai/_utils/_setup_call.py +0 -113
  412. mirascope/core/xai/call_params.py +0 -10
  413. mirascope/core/xai/call_response.py +0 -16
  414. mirascope/core/xai/call_response_chunk.py +0 -14
  415. mirascope/core/xai/dynamic_config.py +0 -8
  416. mirascope/core/xai/py.typed +0 -0
  417. mirascope/core/xai/stream.py +0 -57
  418. mirascope/core/xai/tool.py +0 -13
  419. mirascope/experimental/graphs/__init__.py +0 -5
  420. mirascope/integrations/__init__.py +0 -16
  421. mirascope/integrations/_middleware_factory.py +0 -403
  422. mirascope/integrations/langfuse/__init__.py +0 -3
  423. mirascope/integrations/langfuse/_utils.py +0 -114
  424. mirascope/integrations/langfuse/_with_langfuse.py +0 -70
  425. mirascope/integrations/logfire/__init__.py +0 -3
  426. mirascope/integrations/logfire/_utils.py +0 -225
  427. mirascope/integrations/logfire/_with_logfire.py +0 -63
  428. mirascope/integrations/otel/__init__.py +0 -10
  429. mirascope/integrations/otel/_utils.py +0 -270
  430. mirascope/integrations/otel/_with_hyperdx.py +0 -60
  431. mirascope/integrations/otel/_with_otel.py +0 -59
  432. mirascope/integrations/tenacity.py +0 -14
  433. mirascope/llm/_call.py +0 -401
  434. mirascope/llm/_context.py +0 -384
  435. mirascope/llm/_override.py +0 -3639
  436. mirascope/llm/_protocols.py +0 -500
  437. mirascope/llm/_response_metaclass.py +0 -31
  438. mirascope/llm/call_response.py +0 -158
  439. mirascope/llm/call_response_chunk.py +0 -66
  440. mirascope/llm/stream.py +0 -162
  441. mirascope/llm/tool.py +0 -64
  442. mirascope/mcp/__init__.py +0 -7
  443. mirascope/mcp/_utils.py +0 -288
  444. mirascope/mcp/client.py +0 -167
  445. mirascope/mcp/server.py +0 -356
  446. mirascope/mcp/tools.py +0 -110
  447. mirascope/py.typed +0 -0
  448. mirascope/retries/__init__.py +0 -11
  449. mirascope/retries/fallback.py +0 -131
  450. mirascope/retries/tenacity.py +0 -50
  451. mirascope/tools/__init__.py +0 -37
  452. mirascope/tools/base.py +0 -98
  453. mirascope/tools/system/__init__.py +0 -0
  454. mirascope/tools/system/_docker_operation.py +0 -166
  455. mirascope/tools/system/_file_system.py +0 -267
  456. mirascope/tools/web/__init__.py +0 -0
  457. mirascope/tools/web/_duckduckgo.py +0 -111
  458. mirascope/tools/web/_httpx.py +0 -125
  459. mirascope/tools/web/_parse_url_content.py +0 -94
  460. mirascope/tools/web/_requests.py +0 -54
  461. mirascope/v0/__init__.py +0 -43
  462. mirascope/v0/anthropic.py +0 -54
  463. mirascope/v0/base/__init__.py +0 -12
  464. mirascope/v0/base/calls.py +0 -118
  465. mirascope/v0/base/extractors.py +0 -122
  466. mirascope/v0/base/ops_utils.py +0 -207
  467. mirascope/v0/base/prompts.py +0 -48
  468. mirascope/v0/base/types.py +0 -14
  469. mirascope/v0/base/utils.py +0 -21
  470. mirascope/v0/openai.py +0 -54
  471. mirascope-1.25.7.dist-info/METADATA +0 -169
  472. mirascope-1.25.7.dist-info/RECORD +0 -378
  473. {mirascope-1.25.7.dist-info → mirascope-2.0.0a0.dist-info}/WHEEL +0 -0
  474. {mirascope-1.25.7.dist-info → mirascope-2.0.0a0.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,833 @@
1
+ """OpenAI client implementation."""
2
+
3
+ import os
4
+ from collections.abc import Sequence
5
+ from contextvars import ContextVar
6
+ from functools import lru_cache
7
+ from typing import overload
8
+ from typing_extensions import Unpack
9
+
10
+ from openai import AsyncOpenAI, OpenAI
11
+
12
+ from ....context import Context, DepsT
13
+ from ....formatting import Format, FormattableT
14
+ from ....messages import Message
15
+ from ....responses import (
16
+ AsyncContextResponse,
17
+ AsyncContextStreamResponse,
18
+ AsyncResponse,
19
+ AsyncStreamResponse,
20
+ ContextResponse,
21
+ ContextStreamResponse,
22
+ Response,
23
+ StreamResponse,
24
+ )
25
+ from ....tools import (
26
+ AsyncContextTool,
27
+ AsyncContextToolkit,
28
+ AsyncTool,
29
+ AsyncToolkit,
30
+ ContextTool,
31
+ ContextToolkit,
32
+ Tool,
33
+ Toolkit,
34
+ )
35
+ from ...base import BaseClient, Params
36
+ from . import _utils
37
+ from .model_ids import OpenAICompletionsModelId
38
+
39
+ OPENAI_COMPLETIONS_CLIENT_CONTEXT: ContextVar["OpenAICompletionsClient | None"] = (
40
+ ContextVar("OPENAI_COMPLETIONS_CLIENT_CONTEXT", default=None)
41
+ )
42
+
43
+
44
+ @lru_cache(maxsize=256)
45
+ def _openai_singleton(
46
+ api_key: str | None, base_url: str | None
47
+ ) -> "OpenAICompletionsClient":
48
+ """Return a cached OpenAI client instance for the given parameters."""
49
+ return OpenAICompletionsClient(api_key=api_key, base_url=base_url)
50
+
51
+
52
+ def client(
53
+ *, api_key: str | None = None, base_url: str | None = None
54
+ ) -> "OpenAICompletionsClient":
55
+ """Create or retrieve an OpenAI client with the given parameters.
56
+
57
+ If a client has already been created with these parameters, it will be
58
+ retrieved from cache and returned.
59
+
60
+ Args:
61
+ api_key: API key for authentication. If None, uses OPENAI_API_KEY env var.
62
+ base_url: Base URL for the API. If None, uses OPENAI_BASE_URL env var.
63
+
64
+ Returns:
65
+ An OpenAI client instance.
66
+ """
67
+ api_key = api_key or os.getenv("OPENAI_API_KEY")
68
+ base_url = base_url or os.getenv("OPENAI_BASE_URL")
69
+ return _openai_singleton(api_key, base_url)
70
+
71
+
72
+ def get_client() -> "OpenAICompletionsClient":
73
+ """Retrieve the current OpenAI client from context, or a global default.
74
+
75
+ Returns:
76
+ The current OpenAI client from context if available, otherwise
77
+ a global default client based on environment variables.
78
+ """
79
+ ctx_client = OPENAI_COMPLETIONS_CLIENT_CONTEXT.get()
80
+ return ctx_client or client()
81
+
82
+
83
+ class OpenAICompletionsClient(BaseClient[OpenAICompletionsModelId, OpenAI]):
84
+ """The client for the OpenAI LLM model."""
85
+
86
+ @property
87
+ def _context_var(self) -> ContextVar["OpenAICompletionsClient | None"]:
88
+ return OPENAI_COMPLETIONS_CLIENT_CONTEXT
89
+
90
+ def __init__(
91
+ self, *, api_key: str | None = None, base_url: str | None = None
92
+ ) -> None:
93
+ """Initialize the OpenAI client."""
94
+ self.client = OpenAI(api_key=api_key, base_url=base_url)
95
+ self.async_client = AsyncOpenAI(api_key=api_key, base_url=base_url)
96
+
97
+ @overload
98
+ def call(
99
+ self,
100
+ *,
101
+ model_id: OpenAICompletionsModelId,
102
+ messages: Sequence[Message],
103
+ tools: Sequence[Tool] | Toolkit | None = None,
104
+ format: None = None,
105
+ **params: Unpack[Params],
106
+ ) -> Response:
107
+ """Generate an `llm.Response` without a response format."""
108
+ ...
109
+
110
+ @overload
111
+ def call(
112
+ self,
113
+ *,
114
+ model_id: OpenAICompletionsModelId,
115
+ messages: Sequence[Message],
116
+ tools: Sequence[Tool] | Toolkit | None = None,
117
+ format: type[FormattableT] | Format[FormattableT],
118
+ **params: Unpack[Params],
119
+ ) -> Response[FormattableT]:
120
+ """Generate an `llm.Response` with a response format."""
121
+ ...
122
+
123
+ @overload
124
+ def call(
125
+ self,
126
+ *,
127
+ model_id: OpenAICompletionsModelId,
128
+ messages: Sequence[Message],
129
+ tools: Sequence[Tool] | Toolkit | None = None,
130
+ format: type[FormattableT] | Format[FormattableT] | None,
131
+ **params: Unpack[Params],
132
+ ) -> Response | Response[FormattableT]:
133
+ """Generate an `llm.Response` with an optional response format."""
134
+ ...
135
+
136
+ def call(
137
+ self,
138
+ *,
139
+ model_id: OpenAICompletionsModelId,
140
+ messages: Sequence[Message],
141
+ tools: Sequence[Tool] | Toolkit | None = None,
142
+ format: type[FormattableT] | Format[FormattableT] | None = None,
143
+ **params: Unpack[Params],
144
+ ) -> Response | Response[FormattableT]:
145
+ """Generate an `llm.Response` by synchronously calling the OpenAI ChatCompletions API.
146
+
147
+ Args:
148
+ model_id: Model identifier to use.
149
+ messages: Messages to send to the LLM.
150
+ tools: Optional tools that the model may invoke.
151
+ format: Optional response format specifier.
152
+ **params: Additional parameters to configure output (e.g. temperature). See `llm.Params`.
153
+
154
+ Returns:
155
+ An `llm.Response` object containing the LLM-generated content.
156
+ """
157
+ input_messages, format, kwargs = _utils.encode_request(
158
+ model_id=model_id,
159
+ messages=messages,
160
+ tools=tools,
161
+ format=format,
162
+ params=params,
163
+ )
164
+
165
+ openai_response = self.client.chat.completions.create(**kwargs)
166
+
167
+ assistant_message, finish_reason = _utils.decode_response(
168
+ openai_response, model_id
169
+ )
170
+
171
+ return Response(
172
+ raw=openai_response,
173
+ provider="openai:completions",
174
+ model_id=model_id,
175
+ params=params,
176
+ tools=tools,
177
+ input_messages=input_messages,
178
+ assistant_message=assistant_message,
179
+ finish_reason=finish_reason,
180
+ format=format,
181
+ )
182
+
183
+ @overload
184
+ def context_call(
185
+ self,
186
+ *,
187
+ ctx: Context[DepsT],
188
+ model_id: OpenAICompletionsModelId,
189
+ messages: Sequence[Message],
190
+ tools: Sequence[Tool | ContextTool[DepsT]]
191
+ | ContextToolkit[DepsT]
192
+ | None = None,
193
+ format: None = None,
194
+ **params: Unpack[Params],
195
+ ) -> ContextResponse[DepsT, None]:
196
+ """Generate an `llm.ContextResponse` without a response format."""
197
+ ...
198
+
199
+ @overload
200
+ def context_call(
201
+ self,
202
+ *,
203
+ ctx: Context[DepsT],
204
+ model_id: OpenAICompletionsModelId,
205
+ messages: Sequence[Message],
206
+ tools: Sequence[Tool | ContextTool[DepsT]]
207
+ | ContextToolkit[DepsT]
208
+ | None = None,
209
+ format: type[FormattableT] | Format[FormattableT],
210
+ **params: Unpack[Params],
211
+ ) -> ContextResponse[DepsT, FormattableT]:
212
+ """Generate an `llm.ContextResponse` with a response format."""
213
+ ...
214
+
215
+ @overload
216
+ def context_call(
217
+ self,
218
+ *,
219
+ ctx: Context[DepsT],
220
+ model_id: OpenAICompletionsModelId,
221
+ messages: Sequence[Message],
222
+ tools: Sequence[Tool | ContextTool[DepsT]]
223
+ | ContextToolkit[DepsT]
224
+ | None = None,
225
+ format: type[FormattableT] | Format[FormattableT] | None,
226
+ **params: Unpack[Params],
227
+ ) -> ContextResponse[DepsT, None] | ContextResponse[DepsT, FormattableT]:
228
+ """Generate an `llm.ContextResponse` with an optional response format."""
229
+ ...
230
+
231
+ def context_call(
232
+ self,
233
+ *,
234
+ ctx: Context[DepsT],
235
+ model_id: OpenAICompletionsModelId,
236
+ messages: Sequence[Message],
237
+ tools: Sequence[Tool | ContextTool[DepsT]]
238
+ | ContextToolkit[DepsT]
239
+ | None = None,
240
+ format: type[FormattableT] | Format[FormattableT] | None = None,
241
+ **params: Unpack[Params],
242
+ ) -> ContextResponse[DepsT, None] | ContextResponse[DepsT, FormattableT]:
243
+ """Generate an `llm.ContextResponse` by synchronously calling the OpenAI ChatCompletions API.
244
+
245
+ Args:
246
+ ctx: Context object with dependencies for tools.
247
+ model_id: Model identifier to use.
248
+ messages: Messages to send to the LLM.
249
+ tools: Optional tools that the model may invoke.
250
+ format: Optional response format specifier.
251
+ **params: Additional parameters to configure output (e.g. temperature). See `llm.Params`.
252
+
253
+ Returns:
254
+ An `llm.ContextResponse` object containing the LLM-generated content.
255
+ """
256
+ input_messages, format, kwargs = _utils.encode_request(
257
+ model_id=model_id,
258
+ messages=messages,
259
+ tools=tools,
260
+ format=format,
261
+ params=params,
262
+ )
263
+
264
+ openai_response = self.client.chat.completions.create(**kwargs)
265
+
266
+ assistant_message, finish_reason = _utils.decode_response(
267
+ openai_response, model_id
268
+ )
269
+
270
+ return ContextResponse(
271
+ raw=openai_response,
272
+ provider="openai:completions",
273
+ model_id=model_id,
274
+ params=params,
275
+ tools=tools,
276
+ input_messages=input_messages,
277
+ assistant_message=assistant_message,
278
+ finish_reason=finish_reason,
279
+ format=format,
280
+ )
281
+
282
+ @overload
283
+ async def call_async(
284
+ self,
285
+ *,
286
+ model_id: OpenAICompletionsModelId,
287
+ messages: Sequence[Message],
288
+ tools: Sequence[AsyncTool] | AsyncToolkit | None = None,
289
+ format: None = None,
290
+ **params: Unpack[Params],
291
+ ) -> AsyncResponse:
292
+ """Generate an `llm.AsyncResponse` without a response format."""
293
+ ...
294
+
295
+ @overload
296
+ async def call_async(
297
+ self,
298
+ *,
299
+ model_id: OpenAICompletionsModelId,
300
+ messages: Sequence[Message],
301
+ tools: Sequence[AsyncTool] | AsyncToolkit | None = None,
302
+ format: type[FormattableT] | Format[FormattableT],
303
+ **params: Unpack[Params],
304
+ ) -> AsyncResponse[FormattableT]:
305
+ """Generate an `llm.AsyncResponse` with a response format."""
306
+ ...
307
+
308
+ @overload
309
+ async def call_async(
310
+ self,
311
+ *,
312
+ model_id: OpenAICompletionsModelId,
313
+ messages: Sequence[Message],
314
+ tools: Sequence[AsyncTool] | AsyncToolkit | None = None,
315
+ format: type[FormattableT] | Format[FormattableT] | None,
316
+ **params: Unpack[Params],
317
+ ) -> AsyncResponse | AsyncResponse[FormattableT]:
318
+ """Generate an `llm.AsyncResponse` with an optional response format."""
319
+ ...
320
+
321
+ async def call_async(
322
+ self,
323
+ *,
324
+ model_id: OpenAICompletionsModelId,
325
+ messages: Sequence[Message],
326
+ tools: Sequence[AsyncTool] | AsyncToolkit | None = None,
327
+ format: type[FormattableT] | Format[FormattableT] | None = None,
328
+ **params: Unpack[Params],
329
+ ) -> AsyncResponse | AsyncResponse[FormattableT]:
330
+ """Generate an `llm.AsyncResponse` by asynchronously calling the OpenAI ChatCompletions API.
331
+
332
+ Args:
333
+ model_id: Model identifier to use.
334
+ messages: Messages to send to the LLM.
335
+ tools: Optional tools that the model may invoke.
336
+ format: Optional response format specifier.
337
+ **params: Additional parameters to configure output (e.g. temperature). See `llm.Params`.
338
+
339
+ Returns:
340
+ An `llm.AsyncResponse` object containing the LLM-generated content.
341
+ """
342
+
343
+ input_messages, format, kwargs = _utils.encode_request(
344
+ model_id=model_id,
345
+ params=params,
346
+ messages=messages,
347
+ tools=tools,
348
+ format=format,
349
+ )
350
+
351
+ openai_response = await self.async_client.chat.completions.create(**kwargs)
352
+
353
+ assistant_message, finish_reason = _utils.decode_response(
354
+ openai_response, model_id
355
+ )
356
+
357
+ return AsyncResponse(
358
+ raw=openai_response,
359
+ provider="openai:completions",
360
+ model_id=model_id,
361
+ params=params,
362
+ tools=tools,
363
+ input_messages=input_messages,
364
+ assistant_message=assistant_message,
365
+ finish_reason=finish_reason,
366
+ format=format,
367
+ )
368
+
369
+ @overload
370
+ async def context_call_async(
371
+ self,
372
+ *,
373
+ ctx: Context[DepsT],
374
+ model_id: OpenAICompletionsModelId,
375
+ messages: Sequence[Message],
376
+ tools: Sequence[AsyncTool | AsyncContextTool[DepsT]]
377
+ | AsyncContextToolkit[DepsT]
378
+ | None = None,
379
+ format: None = None,
380
+ **params: Unpack[Params],
381
+ ) -> AsyncContextResponse[DepsT, None]:
382
+ """Generate an `llm.AsyncContextResponse` without a response format."""
383
+ ...
384
+
385
+ @overload
386
+ async def context_call_async(
387
+ self,
388
+ *,
389
+ ctx: Context[DepsT],
390
+ model_id: OpenAICompletionsModelId,
391
+ messages: Sequence[Message],
392
+ tools: Sequence[AsyncTool | AsyncContextTool[DepsT]]
393
+ | AsyncContextToolkit[DepsT]
394
+ | None = None,
395
+ format: type[FormattableT] | Format[FormattableT],
396
+ **params: Unpack[Params],
397
+ ) -> AsyncContextResponse[DepsT, FormattableT]:
398
+ """Generate an `llm.AsyncContextResponse` with a response format."""
399
+ ...
400
+
401
+ @overload
402
+ async def context_call_async(
403
+ self,
404
+ *,
405
+ ctx: Context[DepsT],
406
+ model_id: OpenAICompletionsModelId,
407
+ messages: Sequence[Message],
408
+ tools: Sequence[AsyncTool | AsyncContextTool[DepsT]]
409
+ | AsyncContextToolkit[DepsT]
410
+ | None = None,
411
+ format: type[FormattableT] | Format[FormattableT] | None,
412
+ **params: Unpack[Params],
413
+ ) -> AsyncContextResponse[DepsT, None] | AsyncContextResponse[DepsT, FormattableT]:
414
+ """Generate an `llm.AsyncContextResponse` with an optional response format."""
415
+ ...
416
+
417
+ async def context_call_async(
418
+ self,
419
+ *,
420
+ ctx: Context[DepsT],
421
+ model_id: OpenAICompletionsModelId,
422
+ messages: Sequence[Message],
423
+ tools: Sequence[AsyncTool | AsyncContextTool[DepsT]]
424
+ | AsyncContextToolkit[DepsT]
425
+ | None = None,
426
+ format: type[FormattableT] | Format[FormattableT] | None = None,
427
+ **params: Unpack[Params],
428
+ ) -> AsyncContextResponse[DepsT, None] | AsyncContextResponse[DepsT, FormattableT]:
429
+ """Generate an `llm.AsyncContextResponse` by asynchronously calling the OpenAI ChatCompletions API.
430
+
431
+ Args:
432
+ ctx: Context object with dependencies for tools.
433
+ model_id: Model identifier to use.
434
+ messages: Messages to send to the LLM.
435
+ tools: Optional tools that the model may invoke.
436
+ format: Optional response format specifier.
437
+ **params: Additional parameters to configure output (e.g. temperature). See `llm.Params`.
438
+
439
+ Returns:
440
+ An `llm.AsyncContextResponse` object containing the LLM-generated content.
441
+ """
442
+ input_messages, format, kwargs = _utils.encode_request(
443
+ model_id=model_id,
444
+ params=params,
445
+ messages=messages,
446
+ tools=tools,
447
+ format=format,
448
+ )
449
+
450
+ openai_response = await self.async_client.chat.completions.create(**kwargs)
451
+
452
+ assistant_message, finish_reason = _utils.decode_response(
453
+ openai_response, model_id
454
+ )
455
+
456
+ return AsyncContextResponse(
457
+ raw=openai_response,
458
+ provider="openai:completions",
459
+ model_id=model_id,
460
+ params=params,
461
+ tools=tools,
462
+ input_messages=input_messages,
463
+ assistant_message=assistant_message,
464
+ finish_reason=finish_reason,
465
+ format=format,
466
+ )
467
+
468
+ @overload
469
+ def stream(
470
+ self,
471
+ *,
472
+ model_id: OpenAICompletionsModelId,
473
+ messages: Sequence[Message],
474
+ tools: Sequence[Tool] | Toolkit | None = None,
475
+ format: None = None,
476
+ **params: Unpack[Params],
477
+ ) -> StreamResponse:
478
+ """Stream an `llm.StreamResponse` without a response format."""
479
+ ...
480
+
481
+ @overload
482
+ def stream(
483
+ self,
484
+ *,
485
+ model_id: OpenAICompletionsModelId,
486
+ messages: Sequence[Message],
487
+ tools: Sequence[Tool] | Toolkit | None = None,
488
+ format: type[FormattableT] | Format[FormattableT],
489
+ **params: Unpack[Params],
490
+ ) -> StreamResponse[FormattableT]:
491
+ """Stream an `llm.StreamResponse` with a response format."""
492
+ ...
493
+
494
+ @overload
495
+ def stream(
496
+ self,
497
+ *,
498
+ model_id: OpenAICompletionsModelId,
499
+ messages: Sequence[Message],
500
+ tools: Sequence[Tool] | Toolkit | None = None,
501
+ format: type[FormattableT] | Format[FormattableT] | None,
502
+ **params: Unpack[Params],
503
+ ) -> StreamResponse | StreamResponse[FormattableT]:
504
+ """Stream an `llm.StreamResponse` with an optional response format."""
505
+ ...
506
+
507
+ def stream(
508
+ self,
509
+ *,
510
+ model_id: OpenAICompletionsModelId,
511
+ messages: Sequence[Message],
512
+ tools: Sequence[Tool] | Toolkit | None = None,
513
+ format: type[FormattableT] | Format[FormattableT] | None = None,
514
+ **params: Unpack[Params],
515
+ ) -> StreamResponse | StreamResponse[FormattableT]:
516
+ """Generate an `llm.StreamResponse` by synchronously streaming from the OpenAI ChatCompletions API.
517
+
518
+ Args:
519
+ model_id: Model identifier to use.
520
+ messages: Messages to send to the LLM.
521
+ tools: Optional tools that the model may invoke.
522
+ format: Optional response format specifier.
523
+ **params: Additional parameters to configure output (e.g. temperature). See `llm.Params`.
524
+
525
+ Returns:
526
+ An `llm.StreamResponse` object for iterating over the LLM-generated content.
527
+ """
528
+ input_messages, format, kwargs = _utils.encode_request(
529
+ model_id=model_id,
530
+ messages=messages,
531
+ tools=tools,
532
+ format=format,
533
+ params=params,
534
+ )
535
+
536
+ openai_stream = self.client.chat.completions.create(
537
+ **kwargs,
538
+ stream=True,
539
+ )
540
+
541
+ chunk_iterator = _utils.decode_stream(openai_stream)
542
+
543
+ return StreamResponse(
544
+ provider="openai:completions",
545
+ model_id=model_id,
546
+ params=params,
547
+ tools=tools,
548
+ input_messages=input_messages,
549
+ chunk_iterator=chunk_iterator,
550
+ format=format,
551
+ )
552
+
553
+ @overload
554
+ def context_stream(
555
+ self,
556
+ *,
557
+ ctx: Context[DepsT],
558
+ model_id: OpenAICompletionsModelId,
559
+ messages: Sequence[Message],
560
+ tools: Sequence[Tool | ContextTool[DepsT]]
561
+ | ContextToolkit[DepsT]
562
+ | None = None,
563
+ format: None = None,
564
+ **params: Unpack[Params],
565
+ ) -> ContextStreamResponse[DepsT]:
566
+ """Stream an `llm.ContextStreamResponse` without a response format."""
567
+ ...
568
+
569
+ @overload
570
+ def context_stream(
571
+ self,
572
+ *,
573
+ ctx: Context[DepsT],
574
+ model_id: OpenAICompletionsModelId,
575
+ messages: Sequence[Message],
576
+ tools: Sequence[Tool | ContextTool[DepsT]]
577
+ | ContextToolkit[DepsT]
578
+ | None = None,
579
+ format: type[FormattableT] | Format[FormattableT],
580
+ **params: Unpack[Params],
581
+ ) -> ContextStreamResponse[DepsT, FormattableT]:
582
+ """Stream an `llm.ContextStreamResponse` with a response format."""
583
+ ...
584
+
585
+ @overload
586
+ def context_stream(
587
+ self,
588
+ *,
589
+ ctx: Context[DepsT],
590
+ model_id: OpenAICompletionsModelId,
591
+ messages: Sequence[Message],
592
+ tools: Sequence[Tool | ContextTool[DepsT]]
593
+ | ContextToolkit[DepsT]
594
+ | None = None,
595
+ format: type[FormattableT] | Format[FormattableT] | None,
596
+ **params: Unpack[Params],
597
+ ) -> ContextStreamResponse[DepsT] | ContextStreamResponse[DepsT, FormattableT]:
598
+ """Stream an `llm.ContextStreamResponse` with an optional response format."""
599
+ ...
600
+
601
+ def context_stream(
602
+ self,
603
+ *,
604
+ ctx: Context[DepsT],
605
+ model_id: OpenAICompletionsModelId,
606
+ messages: Sequence[Message],
607
+ tools: Sequence[Tool | ContextTool[DepsT]]
608
+ | ContextToolkit[DepsT]
609
+ | None = None,
610
+ format: type[FormattableT] | Format[FormattableT] | None = None,
611
+ **params: Unpack[Params],
612
+ ) -> ContextStreamResponse[DepsT] | ContextStreamResponse[DepsT, FormattableT]:
613
+ """Generate an `llm.ContextStreamResponse` by synchronously streaming from the OpenAI ChatCompletions API.
614
+
615
+ Args:
616
+ ctx: Context object with dependencies for tools.
617
+ model_id: Model identifier to use.
618
+ messages: Messages to send to the LLM.
619
+ tools: Optional tools that the model may invoke.
620
+ format: Optional response format specifier.
621
+ **params: Additional parameters to configure output (e.g. temperature). See `llm.Params`.
622
+
623
+ Returns:
624
+ An `llm.ContextStreamResponse` object for iterating over the LLM-generated content.
625
+ """
626
+ input_messages, format, kwargs = _utils.encode_request(
627
+ model_id=model_id,
628
+ messages=messages,
629
+ tools=tools,
630
+ format=format,
631
+ params=params,
632
+ )
633
+
634
+ openai_stream = self.client.chat.completions.create(
635
+ **kwargs,
636
+ stream=True,
637
+ )
638
+
639
+ chunk_iterator = _utils.decode_stream(openai_stream)
640
+
641
+ return ContextStreamResponse(
642
+ provider="openai:completions",
643
+ model_id=model_id,
644
+ params=params,
645
+ tools=tools,
646
+ input_messages=input_messages,
647
+ chunk_iterator=chunk_iterator,
648
+ format=format,
649
+ )
650
+
651
+ @overload
652
+ async def stream_async(
653
+ self,
654
+ *,
655
+ model_id: OpenAICompletionsModelId,
656
+ messages: Sequence[Message],
657
+ tools: Sequence[AsyncTool] | AsyncToolkit | None = None,
658
+ format: None = None,
659
+ **params: Unpack[Params],
660
+ ) -> AsyncStreamResponse:
661
+ """Stream an `llm.AsyncStreamResponse` without a response format."""
662
+ ...
663
+
664
+ @overload
665
+ async def stream_async(
666
+ self,
667
+ *,
668
+ model_id: OpenAICompletionsModelId,
669
+ messages: Sequence[Message],
670
+ tools: Sequence[AsyncTool] | AsyncToolkit | None = None,
671
+ format: type[FormattableT] | Format[FormattableT],
672
+ **params: Unpack[Params],
673
+ ) -> AsyncStreamResponse[FormattableT]:
674
+ """Stream an `llm.AsyncStreamResponse` with a response format."""
675
+ ...
676
+
677
+ @overload
678
+ async def stream_async(
679
+ self,
680
+ *,
681
+ model_id: OpenAICompletionsModelId,
682
+ messages: Sequence[Message],
683
+ tools: Sequence[AsyncTool] | AsyncToolkit | None = None,
684
+ format: type[FormattableT] | Format[FormattableT] | None,
685
+ **params: Unpack[Params],
686
+ ) -> AsyncStreamResponse | AsyncStreamResponse[FormattableT]:
687
+ """Stream an `llm.AsyncStreamResponse` with an optional response format."""
688
+ ...
689
+
690
+ async def stream_async(
691
+ self,
692
+ *,
693
+ model_id: OpenAICompletionsModelId,
694
+ messages: Sequence[Message],
695
+ tools: Sequence[AsyncTool] | AsyncToolkit | None = None,
696
+ format: type[FormattableT] | Format[FormattableT] | None = None,
697
+ **params: Unpack[Params],
698
+ ) -> AsyncStreamResponse | AsyncStreamResponse[FormattableT]:
699
+ """Generate an `llm.AsyncStreamResponse` by asynchronously streaming from the OpenAI ChatCompletions API.
700
+
701
+ Args:
702
+ model_id: Model identifier to use.
703
+ messages: Messages to send to the LLM.
704
+ tools: Optional tools that the model may invoke.
705
+ format: Optional response format specifier.
706
+ **params: Additional parameters to configure output (e.g. temperature). See `llm.Params`.
707
+
708
+ Returns:
709
+ An `llm.AsyncStreamResponse` object for asynchronously iterating over the LLM-generated content.
710
+ """
711
+
712
+ input_messages, format, kwargs = _utils.encode_request(
713
+ model_id=model_id,
714
+ messages=messages,
715
+ tools=tools,
716
+ format=format,
717
+ params=params,
718
+ )
719
+
720
+ openai_stream = await self.async_client.chat.completions.create(
721
+ **kwargs,
722
+ stream=True,
723
+ )
724
+
725
+ chunk_iterator = _utils.decode_async_stream(openai_stream)
726
+
727
+ return AsyncStreamResponse(
728
+ provider="openai:completions",
729
+ model_id=model_id,
730
+ params=params,
731
+ tools=tools,
732
+ input_messages=input_messages,
733
+ chunk_iterator=chunk_iterator,
734
+ format=format,
735
+ )
736
+
737
+ @overload
738
+ async def context_stream_async(
739
+ self,
740
+ *,
741
+ ctx: Context[DepsT],
742
+ model_id: OpenAICompletionsModelId,
743
+ messages: Sequence[Message],
744
+ tools: Sequence[AsyncTool | AsyncContextTool[DepsT]]
745
+ | AsyncContextToolkit[DepsT]
746
+ | None = None,
747
+ format: None = None,
748
+ **params: Unpack[Params],
749
+ ) -> AsyncContextStreamResponse[DepsT]:
750
+ """Stream an `llm.AsyncContextStreamResponse` without a response format."""
751
+ ...
752
+
753
+ @overload
754
+ async def context_stream_async(
755
+ self,
756
+ *,
757
+ ctx: Context[DepsT],
758
+ model_id: OpenAICompletionsModelId,
759
+ messages: Sequence[Message],
760
+ tools: Sequence[AsyncTool | AsyncContextTool[DepsT]]
761
+ | AsyncContextToolkit[DepsT]
762
+ | None = None,
763
+ format: type[FormattableT] | Format[FormattableT],
764
+ **params: Unpack[Params],
765
+ ) -> AsyncContextStreamResponse[DepsT, FormattableT]:
766
+ """Stream an `llm.AsyncContextStreamResponse` with a response format."""
767
+ ...
768
+
769
+ @overload
770
+ async def context_stream_async(
771
+ self,
772
+ *,
773
+ ctx: Context[DepsT],
774
+ model_id: OpenAICompletionsModelId,
775
+ messages: Sequence[Message],
776
+ tools: Sequence[AsyncTool | AsyncContextTool[DepsT]]
777
+ | AsyncContextToolkit[DepsT]
778
+ | None = None,
779
+ format: type[FormattableT] | Format[FormattableT] | None,
780
+ **params: Unpack[Params],
781
+ ) -> AsyncContextStreamResponse | AsyncContextStreamResponse[DepsT, FormattableT]:
782
+ """Stream an `llm.AsyncContextStreamResponse` with an optional response format."""
783
+ ...
784
+
785
+ async def context_stream_async(
786
+ self,
787
+ *,
788
+ ctx: Context[DepsT],
789
+ model_id: OpenAICompletionsModelId,
790
+ messages: Sequence[Message],
791
+ tools: Sequence[AsyncTool | AsyncContextTool[DepsT]]
792
+ | AsyncContextToolkit[DepsT]
793
+ | None = None,
794
+ format: type[FormattableT] | Format[FormattableT] | None = None,
795
+ **params: Unpack[Params],
796
+ ) -> AsyncContextStreamResponse | AsyncContextStreamResponse[DepsT, FormattableT]:
797
+ """Generate an `llm.AsyncContextStreamResponse` by asynchronously streaming from the OpenAI ChatCompletions API.
798
+
799
+ Args:
800
+ ctx: Context object with dependencies for tools.
801
+ model_id: Model identifier to use.
802
+ messages: Messages to send to the LLM.
803
+ tools: Optional tools that the model may invoke.
804
+ format: Optional response format specifier.
805
+ **params: Additional parameters to configure output (e.g. temperature). See `llm.Params`.
806
+
807
+ Returns:
808
+ An `llm.AsyncContextStreamResponse` object for asynchronously iterating over the LLM-generated content.
809
+ """
810
+ input_messages, format, kwargs = _utils.encode_request(
811
+ model_id=model_id,
812
+ messages=messages,
813
+ tools=tools,
814
+ format=format,
815
+ params=params,
816
+ )
817
+
818
+ openai_stream = await self.async_client.chat.completions.create(
819
+ **kwargs,
820
+ stream=True,
821
+ )
822
+
823
+ chunk_iterator = _utils.decode_async_stream(openai_stream)
824
+
825
+ return AsyncContextStreamResponse(
826
+ provider="openai:completions",
827
+ model_id=model_id,
828
+ params=params,
829
+ tools=tools,
830
+ input_messages=input_messages,
831
+ chunk_iterator=chunk_iterator,
832
+ format=format,
833
+ )