mirascope 1.25.7__py3-none-any.whl → 2.0.0a0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (474) hide show
  1. mirascope/__init__.py +3 -59
  2. mirascope/graphs/__init__.py +22 -0
  3. mirascope/{experimental/graphs → graphs}/finite_state_machine.py +70 -159
  4. mirascope/llm/__init__.py +206 -16
  5. mirascope/llm/agents/__init__.py +15 -0
  6. mirascope/llm/agents/agent.py +97 -0
  7. mirascope/llm/agents/agent_template.py +45 -0
  8. mirascope/llm/agents/decorator.py +176 -0
  9. mirascope/llm/calls/__init__.py +16 -0
  10. mirascope/llm/calls/base_call.py +33 -0
  11. mirascope/llm/calls/calls.py +315 -0
  12. mirascope/llm/calls/decorator.py +255 -0
  13. mirascope/llm/clients/__init__.py +34 -0
  14. mirascope/llm/clients/anthropic/__init__.py +11 -0
  15. mirascope/llm/clients/anthropic/_utils/__init__.py +13 -0
  16. mirascope/llm/clients/anthropic/_utils/decode.py +244 -0
  17. mirascope/llm/clients/anthropic/_utils/encode.py +243 -0
  18. mirascope/llm/clients/anthropic/clients.py +819 -0
  19. mirascope/llm/clients/anthropic/model_ids.py +8 -0
  20. mirascope/llm/clients/base/__init__.py +15 -0
  21. mirascope/llm/clients/base/_utils.py +192 -0
  22. mirascope/llm/clients/base/client.py +1256 -0
  23. mirascope/llm/clients/base/kwargs.py +12 -0
  24. mirascope/llm/clients/base/params.py +93 -0
  25. mirascope/llm/clients/google/__init__.py +6 -0
  26. mirascope/llm/clients/google/_utils/__init__.py +13 -0
  27. mirascope/llm/clients/google/_utils/decode.py +231 -0
  28. mirascope/llm/clients/google/_utils/encode.py +279 -0
  29. mirascope/llm/clients/google/clients.py +853 -0
  30. mirascope/llm/clients/google/message.py +7 -0
  31. mirascope/llm/clients/google/model_ids.py +15 -0
  32. mirascope/llm/clients/openai/__init__.py +25 -0
  33. mirascope/llm/clients/openai/completions/__init__.py +9 -0
  34. mirascope/llm/clients/openai/completions/_utils/__init__.py +13 -0
  35. mirascope/llm/clients/openai/completions/_utils/decode.py +187 -0
  36. mirascope/llm/clients/openai/completions/_utils/encode.py +358 -0
  37. mirascope/llm/clients/openai/completions/_utils/model_features.py +81 -0
  38. mirascope/llm/clients/openai/completions/clients.py +833 -0
  39. mirascope/llm/clients/openai/completions/model_ids.py +8 -0
  40. mirascope/llm/clients/openai/responses/__init__.py +9 -0
  41. mirascope/llm/clients/openai/responses/_utils/__init__.py +13 -0
  42. mirascope/llm/clients/openai/responses/_utils/decode.py +194 -0
  43. mirascope/llm/clients/openai/responses/_utils/encode.py +333 -0
  44. mirascope/llm/clients/openai/responses/_utils/model_features.py +87 -0
  45. mirascope/llm/clients/openai/responses/clients.py +832 -0
  46. mirascope/llm/clients/openai/responses/model_ids.py +8 -0
  47. mirascope/llm/clients/openai/shared/__init__.py +7 -0
  48. mirascope/llm/clients/openai/shared/_utils.py +55 -0
  49. mirascope/llm/clients/providers.py +175 -0
  50. mirascope/llm/content/__init__.py +70 -0
  51. mirascope/llm/content/audio.py +173 -0
  52. mirascope/llm/content/document.py +94 -0
  53. mirascope/llm/content/image.py +206 -0
  54. mirascope/llm/content/text.py +47 -0
  55. mirascope/llm/content/thought.py +58 -0
  56. mirascope/llm/content/tool_call.py +63 -0
  57. mirascope/llm/content/tool_output.py +26 -0
  58. mirascope/llm/context/__init__.py +6 -0
  59. mirascope/llm/context/_utils.py +28 -0
  60. mirascope/llm/context/context.py +24 -0
  61. mirascope/llm/exceptions.py +105 -0
  62. mirascope/llm/formatting/__init__.py +22 -0
  63. mirascope/llm/formatting/_utils.py +74 -0
  64. mirascope/llm/formatting/format.py +104 -0
  65. mirascope/llm/formatting/from_call_args.py +30 -0
  66. mirascope/llm/formatting/partial.py +58 -0
  67. mirascope/llm/formatting/types.py +109 -0
  68. mirascope/llm/mcp/__init__.py +5 -0
  69. mirascope/llm/mcp/client.py +118 -0
  70. mirascope/llm/messages/__init__.py +32 -0
  71. mirascope/llm/messages/message.py +182 -0
  72. mirascope/llm/models/__init__.py +16 -0
  73. mirascope/llm/models/models.py +1243 -0
  74. mirascope/llm/prompts/__init__.py +33 -0
  75. mirascope/llm/prompts/_utils.py +60 -0
  76. mirascope/llm/prompts/decorator.py +286 -0
  77. mirascope/llm/prompts/protocols.py +99 -0
  78. mirascope/llm/responses/__init__.py +57 -0
  79. mirascope/llm/responses/_utils.py +56 -0
  80. mirascope/llm/responses/base_response.py +91 -0
  81. mirascope/llm/responses/base_stream_response.py +697 -0
  82. mirascope/llm/responses/finish_reason.py +27 -0
  83. mirascope/llm/responses/response.py +345 -0
  84. mirascope/llm/responses/root_response.py +177 -0
  85. mirascope/llm/responses/stream_response.py +572 -0
  86. mirascope/llm/responses/streams.py +363 -0
  87. mirascope/llm/tools/__init__.py +40 -0
  88. mirascope/llm/tools/_utils.py +25 -0
  89. mirascope/llm/tools/decorator.py +175 -0
  90. mirascope/llm/tools/protocols.py +96 -0
  91. mirascope/llm/tools/tool_schema.py +246 -0
  92. mirascope/llm/tools/toolkit.py +152 -0
  93. mirascope/llm/tools/tools.py +169 -0
  94. mirascope/llm/types/__init__.py +22 -0
  95. mirascope/llm/types/dataclass.py +9 -0
  96. mirascope/llm/types/jsonable.py +44 -0
  97. mirascope/llm/types/type_vars.py +19 -0
  98. mirascope-2.0.0a0.dist-info/METADATA +117 -0
  99. mirascope-2.0.0a0.dist-info/RECORD +101 -0
  100. mirascope/beta/__init__.py +0 -3
  101. mirascope/beta/openai/__init__.py +0 -17
  102. mirascope/beta/openai/realtime/__init__.py +0 -13
  103. mirascope/beta/openai/realtime/_utils/__init__.py +0 -3
  104. mirascope/beta/openai/realtime/_utils/_audio.py +0 -74
  105. mirascope/beta/openai/realtime/_utils/_protocols.py +0 -50
  106. mirascope/beta/openai/realtime/realtime.py +0 -500
  107. mirascope/beta/openai/realtime/recording.py +0 -98
  108. mirascope/beta/openai/realtime/tool.py +0 -113
  109. mirascope/beta/rag/__init__.py +0 -24
  110. mirascope/beta/rag/base/__init__.py +0 -22
  111. mirascope/beta/rag/base/chunkers/__init__.py +0 -2
  112. mirascope/beta/rag/base/chunkers/base_chunker.py +0 -37
  113. mirascope/beta/rag/base/chunkers/text_chunker.py +0 -33
  114. mirascope/beta/rag/base/config.py +0 -8
  115. mirascope/beta/rag/base/document.py +0 -11
  116. mirascope/beta/rag/base/embedders.py +0 -35
  117. mirascope/beta/rag/base/embedding_params.py +0 -18
  118. mirascope/beta/rag/base/embedding_response.py +0 -30
  119. mirascope/beta/rag/base/query_results.py +0 -7
  120. mirascope/beta/rag/base/vectorstore_params.py +0 -18
  121. mirascope/beta/rag/base/vectorstores.py +0 -37
  122. mirascope/beta/rag/chroma/__init__.py +0 -11
  123. mirascope/beta/rag/chroma/types.py +0 -62
  124. mirascope/beta/rag/chroma/vectorstores.py +0 -121
  125. mirascope/beta/rag/cohere/__init__.py +0 -11
  126. mirascope/beta/rag/cohere/embedders.py +0 -87
  127. mirascope/beta/rag/cohere/embedding_params.py +0 -29
  128. mirascope/beta/rag/cohere/embedding_response.py +0 -29
  129. mirascope/beta/rag/cohere/py.typed +0 -0
  130. mirascope/beta/rag/openai/__init__.py +0 -11
  131. mirascope/beta/rag/openai/embedders.py +0 -144
  132. mirascope/beta/rag/openai/embedding_params.py +0 -18
  133. mirascope/beta/rag/openai/embedding_response.py +0 -14
  134. mirascope/beta/rag/openai/py.typed +0 -0
  135. mirascope/beta/rag/pinecone/__init__.py +0 -19
  136. mirascope/beta/rag/pinecone/types.py +0 -143
  137. mirascope/beta/rag/pinecone/vectorstores.py +0 -148
  138. mirascope/beta/rag/weaviate/__init__.py +0 -6
  139. mirascope/beta/rag/weaviate/types.py +0 -92
  140. mirascope/beta/rag/weaviate/vectorstores.py +0 -103
  141. mirascope/core/__init__.py +0 -109
  142. mirascope/core/anthropic/__init__.py +0 -31
  143. mirascope/core/anthropic/_call.py +0 -67
  144. mirascope/core/anthropic/_call_kwargs.py +0 -13
  145. mirascope/core/anthropic/_thinking.py +0 -70
  146. mirascope/core/anthropic/_utils/__init__.py +0 -16
  147. mirascope/core/anthropic/_utils/_convert_common_call_params.py +0 -25
  148. mirascope/core/anthropic/_utils/_convert_finish_reason_to_common_finish_reasons.py +0 -21
  149. mirascope/core/anthropic/_utils/_convert_message_params.py +0 -102
  150. mirascope/core/anthropic/_utils/_get_json_output.py +0 -31
  151. mirascope/core/anthropic/_utils/_handle_stream.py +0 -113
  152. mirascope/core/anthropic/_utils/_message_param_converter.py +0 -154
  153. mirascope/core/anthropic/_utils/_setup_call.py +0 -146
  154. mirascope/core/anthropic/call_params.py +0 -44
  155. mirascope/core/anthropic/call_response.py +0 -226
  156. mirascope/core/anthropic/call_response_chunk.py +0 -152
  157. mirascope/core/anthropic/dynamic_config.py +0 -40
  158. mirascope/core/anthropic/py.typed +0 -0
  159. mirascope/core/anthropic/stream.py +0 -204
  160. mirascope/core/anthropic/tool.py +0 -101
  161. mirascope/core/azure/__init__.py +0 -31
  162. mirascope/core/azure/_call.py +0 -67
  163. mirascope/core/azure/_call_kwargs.py +0 -13
  164. mirascope/core/azure/_utils/__init__.py +0 -14
  165. mirascope/core/azure/_utils/_convert_common_call_params.py +0 -26
  166. mirascope/core/azure/_utils/_convert_finish_reason_to_common_finish_reasons.py +0 -21
  167. mirascope/core/azure/_utils/_convert_message_params.py +0 -121
  168. mirascope/core/azure/_utils/_get_credential.py +0 -33
  169. mirascope/core/azure/_utils/_get_json_output.py +0 -27
  170. mirascope/core/azure/_utils/_handle_stream.py +0 -130
  171. mirascope/core/azure/_utils/_message_param_converter.py +0 -117
  172. mirascope/core/azure/_utils/_setup_call.py +0 -183
  173. mirascope/core/azure/call_params.py +0 -59
  174. mirascope/core/azure/call_response.py +0 -215
  175. mirascope/core/azure/call_response_chunk.py +0 -105
  176. mirascope/core/azure/dynamic_config.py +0 -30
  177. mirascope/core/azure/py.typed +0 -0
  178. mirascope/core/azure/stream.py +0 -147
  179. mirascope/core/azure/tool.py +0 -93
  180. mirascope/core/base/__init__.py +0 -86
  181. mirascope/core/base/_call_factory.py +0 -256
  182. mirascope/core/base/_create.py +0 -253
  183. mirascope/core/base/_extract.py +0 -175
  184. mirascope/core/base/_extract_with_tools.py +0 -189
  185. mirascope/core/base/_partial.py +0 -95
  186. mirascope/core/base/_utils/__init__.py +0 -92
  187. mirascope/core/base/_utils/_base_message_param_converter.py +0 -22
  188. mirascope/core/base/_utils/_base_type.py +0 -26
  189. mirascope/core/base/_utils/_convert_base_model_to_base_tool.py +0 -48
  190. mirascope/core/base/_utils/_convert_base_type_to_base_tool.py +0 -24
  191. mirascope/core/base/_utils/_convert_function_to_base_tool.py +0 -139
  192. mirascope/core/base/_utils/_convert_messages_to_message_params.py +0 -178
  193. mirascope/core/base/_utils/_convert_provider_finish_reason_to_finish_reason.py +0 -20
  194. mirascope/core/base/_utils/_default_tool_docstring.py +0 -6
  195. mirascope/core/base/_utils/_extract_tool_return.py +0 -42
  196. mirascope/core/base/_utils/_fn_is_async.py +0 -24
  197. mirascope/core/base/_utils/_format_template.py +0 -32
  198. mirascope/core/base/_utils/_get_audio_type.py +0 -18
  199. mirascope/core/base/_utils/_get_common_usage.py +0 -20
  200. mirascope/core/base/_utils/_get_create_fn_or_async_create_fn.py +0 -137
  201. mirascope/core/base/_utils/_get_document_type.py +0 -7
  202. mirascope/core/base/_utils/_get_dynamic_configuration.py +0 -69
  203. mirascope/core/base/_utils/_get_fields_from_call_args.py +0 -34
  204. mirascope/core/base/_utils/_get_fn_args.py +0 -23
  205. mirascope/core/base/_utils/_get_image_dimensions.py +0 -39
  206. mirascope/core/base/_utils/_get_image_type.py +0 -26
  207. mirascope/core/base/_utils/_get_metadata.py +0 -17
  208. mirascope/core/base/_utils/_get_possible_user_message_param.py +0 -21
  209. mirascope/core/base/_utils/_get_prompt_template.py +0 -28
  210. mirascope/core/base/_utils/_get_template_values.py +0 -51
  211. mirascope/core/base/_utils/_get_template_variables.py +0 -38
  212. mirascope/core/base/_utils/_get_unsupported_tool_config_keys.py +0 -10
  213. mirascope/core/base/_utils/_is_prompt_template.py +0 -24
  214. mirascope/core/base/_utils/_json_mode_content.py +0 -17
  215. mirascope/core/base/_utils/_messages_decorator.py +0 -121
  216. mirascope/core/base/_utils/_parse_content_template.py +0 -323
  217. mirascope/core/base/_utils/_parse_prompt_messages.py +0 -63
  218. mirascope/core/base/_utils/_pil_image_to_bytes.py +0 -13
  219. mirascope/core/base/_utils/_protocols.py +0 -901
  220. mirascope/core/base/_utils/_setup_call.py +0 -79
  221. mirascope/core/base/_utils/_setup_extract_tool.py +0 -30
  222. mirascope/core/base/call_kwargs.py +0 -13
  223. mirascope/core/base/call_params.py +0 -36
  224. mirascope/core/base/call_response.py +0 -338
  225. mirascope/core/base/call_response_chunk.py +0 -130
  226. mirascope/core/base/dynamic_config.py +0 -82
  227. mirascope/core/base/from_call_args.py +0 -30
  228. mirascope/core/base/merge_decorators.py +0 -59
  229. mirascope/core/base/message_param.py +0 -175
  230. mirascope/core/base/messages.py +0 -116
  231. mirascope/core/base/metadata.py +0 -13
  232. mirascope/core/base/prompt.py +0 -497
  233. mirascope/core/base/response_model_config_dict.py +0 -9
  234. mirascope/core/base/stream.py +0 -479
  235. mirascope/core/base/stream_config.py +0 -11
  236. mirascope/core/base/structured_stream.py +0 -296
  237. mirascope/core/base/tool.py +0 -214
  238. mirascope/core/base/toolkit.py +0 -176
  239. mirascope/core/base/types.py +0 -344
  240. mirascope/core/bedrock/__init__.py +0 -34
  241. mirascope/core/bedrock/_call.py +0 -68
  242. mirascope/core/bedrock/_call_kwargs.py +0 -12
  243. mirascope/core/bedrock/_types.py +0 -104
  244. mirascope/core/bedrock/_utils/__init__.py +0 -14
  245. mirascope/core/bedrock/_utils/_convert_common_call_params.py +0 -39
  246. mirascope/core/bedrock/_utils/_convert_finish_reason_to_common_finish_reasons.py +0 -23
  247. mirascope/core/bedrock/_utils/_convert_message_params.py +0 -111
  248. mirascope/core/bedrock/_utils/_get_json_output.py +0 -30
  249. mirascope/core/bedrock/_utils/_handle_stream.py +0 -104
  250. mirascope/core/bedrock/_utils/_message_param_converter.py +0 -172
  251. mirascope/core/bedrock/_utils/_setup_call.py +0 -258
  252. mirascope/core/bedrock/call_params.py +0 -38
  253. mirascope/core/bedrock/call_response.py +0 -248
  254. mirascope/core/bedrock/call_response_chunk.py +0 -111
  255. mirascope/core/bedrock/dynamic_config.py +0 -37
  256. mirascope/core/bedrock/py.typed +0 -0
  257. mirascope/core/bedrock/stream.py +0 -154
  258. mirascope/core/bedrock/tool.py +0 -100
  259. mirascope/core/cohere/__init__.py +0 -30
  260. mirascope/core/cohere/_call.py +0 -67
  261. mirascope/core/cohere/_call_kwargs.py +0 -11
  262. mirascope/core/cohere/_types.py +0 -20
  263. mirascope/core/cohere/_utils/__init__.py +0 -14
  264. mirascope/core/cohere/_utils/_convert_common_call_params.py +0 -26
  265. mirascope/core/cohere/_utils/_convert_finish_reason_to_common_finish_reasons.py +0 -24
  266. mirascope/core/cohere/_utils/_convert_message_params.py +0 -32
  267. mirascope/core/cohere/_utils/_get_json_output.py +0 -30
  268. mirascope/core/cohere/_utils/_handle_stream.py +0 -35
  269. mirascope/core/cohere/_utils/_message_param_converter.py +0 -54
  270. mirascope/core/cohere/_utils/_setup_call.py +0 -150
  271. mirascope/core/cohere/call_params.py +0 -62
  272. mirascope/core/cohere/call_response.py +0 -205
  273. mirascope/core/cohere/call_response_chunk.py +0 -125
  274. mirascope/core/cohere/dynamic_config.py +0 -32
  275. mirascope/core/cohere/py.typed +0 -0
  276. mirascope/core/cohere/stream.py +0 -113
  277. mirascope/core/cohere/tool.py +0 -93
  278. mirascope/core/costs/__init__.py +0 -5
  279. mirascope/core/costs/_anthropic_calculate_cost.py +0 -219
  280. mirascope/core/costs/_azure_calculate_cost.py +0 -11
  281. mirascope/core/costs/_bedrock_calculate_cost.py +0 -15
  282. mirascope/core/costs/_cohere_calculate_cost.py +0 -44
  283. mirascope/core/costs/_gemini_calculate_cost.py +0 -67
  284. mirascope/core/costs/_google_calculate_cost.py +0 -427
  285. mirascope/core/costs/_groq_calculate_cost.py +0 -156
  286. mirascope/core/costs/_litellm_calculate_cost.py +0 -11
  287. mirascope/core/costs/_mistral_calculate_cost.py +0 -64
  288. mirascope/core/costs/_openai_calculate_cost.py +0 -416
  289. mirascope/core/costs/_vertex_calculate_cost.py +0 -67
  290. mirascope/core/costs/_xai_calculate_cost.py +0 -104
  291. mirascope/core/costs/calculate_cost.py +0 -86
  292. mirascope/core/gemini/__init__.py +0 -40
  293. mirascope/core/gemini/_call.py +0 -67
  294. mirascope/core/gemini/_call_kwargs.py +0 -12
  295. mirascope/core/gemini/_utils/__init__.py +0 -14
  296. mirascope/core/gemini/_utils/_convert_common_call_params.py +0 -39
  297. mirascope/core/gemini/_utils/_convert_finish_reason_to_common_finish_reasons.py +0 -23
  298. mirascope/core/gemini/_utils/_convert_message_params.py +0 -156
  299. mirascope/core/gemini/_utils/_get_json_output.py +0 -35
  300. mirascope/core/gemini/_utils/_handle_stream.py +0 -33
  301. mirascope/core/gemini/_utils/_message_param_converter.py +0 -209
  302. mirascope/core/gemini/_utils/_setup_call.py +0 -149
  303. mirascope/core/gemini/call_params.py +0 -52
  304. mirascope/core/gemini/call_response.py +0 -216
  305. mirascope/core/gemini/call_response_chunk.py +0 -100
  306. mirascope/core/gemini/dynamic_config.py +0 -26
  307. mirascope/core/gemini/stream.py +0 -120
  308. mirascope/core/gemini/tool.py +0 -104
  309. mirascope/core/google/__init__.py +0 -29
  310. mirascope/core/google/_call.py +0 -67
  311. mirascope/core/google/_call_kwargs.py +0 -13
  312. mirascope/core/google/_utils/__init__.py +0 -14
  313. mirascope/core/google/_utils/_convert_common_call_params.py +0 -38
  314. mirascope/core/google/_utils/_convert_finish_reason_to_common_finish_reasons.py +0 -27
  315. mirascope/core/google/_utils/_convert_message_params.py +0 -297
  316. mirascope/core/google/_utils/_get_json_output.py +0 -37
  317. mirascope/core/google/_utils/_handle_stream.py +0 -58
  318. mirascope/core/google/_utils/_message_param_converter.py +0 -200
  319. mirascope/core/google/_utils/_setup_call.py +0 -201
  320. mirascope/core/google/_utils/_validate_media_type.py +0 -58
  321. mirascope/core/google/call_params.py +0 -22
  322. mirascope/core/google/call_response.py +0 -255
  323. mirascope/core/google/call_response_chunk.py +0 -135
  324. mirascope/core/google/dynamic_config.py +0 -26
  325. mirascope/core/google/stream.py +0 -199
  326. mirascope/core/google/tool.py +0 -146
  327. mirascope/core/groq/__init__.py +0 -30
  328. mirascope/core/groq/_call.py +0 -67
  329. mirascope/core/groq/_call_kwargs.py +0 -13
  330. mirascope/core/groq/_utils/__init__.py +0 -14
  331. mirascope/core/groq/_utils/_convert_common_call_params.py +0 -26
  332. mirascope/core/groq/_utils/_convert_message_params.py +0 -112
  333. mirascope/core/groq/_utils/_get_json_output.py +0 -27
  334. mirascope/core/groq/_utils/_handle_stream.py +0 -123
  335. mirascope/core/groq/_utils/_message_param_converter.py +0 -89
  336. mirascope/core/groq/_utils/_setup_call.py +0 -132
  337. mirascope/core/groq/call_params.py +0 -52
  338. mirascope/core/groq/call_response.py +0 -213
  339. mirascope/core/groq/call_response_chunk.py +0 -104
  340. mirascope/core/groq/dynamic_config.py +0 -29
  341. mirascope/core/groq/py.typed +0 -0
  342. mirascope/core/groq/stream.py +0 -135
  343. mirascope/core/groq/tool.py +0 -80
  344. mirascope/core/litellm/__init__.py +0 -28
  345. mirascope/core/litellm/_call.py +0 -67
  346. mirascope/core/litellm/_utils/__init__.py +0 -5
  347. mirascope/core/litellm/_utils/_setup_call.py +0 -109
  348. mirascope/core/litellm/call_params.py +0 -10
  349. mirascope/core/litellm/call_response.py +0 -24
  350. mirascope/core/litellm/call_response_chunk.py +0 -14
  351. mirascope/core/litellm/dynamic_config.py +0 -8
  352. mirascope/core/litellm/py.typed +0 -0
  353. mirascope/core/litellm/stream.py +0 -86
  354. mirascope/core/litellm/tool.py +0 -13
  355. mirascope/core/mistral/__init__.py +0 -36
  356. mirascope/core/mistral/_call.py +0 -65
  357. mirascope/core/mistral/_call_kwargs.py +0 -19
  358. mirascope/core/mistral/_utils/__init__.py +0 -14
  359. mirascope/core/mistral/_utils/_convert_common_call_params.py +0 -24
  360. mirascope/core/mistral/_utils/_convert_finish_reason_to_common_finish_reasons.py +0 -22
  361. mirascope/core/mistral/_utils/_convert_message_params.py +0 -122
  362. mirascope/core/mistral/_utils/_get_json_output.py +0 -34
  363. mirascope/core/mistral/_utils/_handle_stream.py +0 -139
  364. mirascope/core/mistral/_utils/_message_param_converter.py +0 -176
  365. mirascope/core/mistral/_utils/_setup_call.py +0 -164
  366. mirascope/core/mistral/call_params.py +0 -36
  367. mirascope/core/mistral/call_response.py +0 -205
  368. mirascope/core/mistral/call_response_chunk.py +0 -105
  369. mirascope/core/mistral/dynamic_config.py +0 -33
  370. mirascope/core/mistral/py.typed +0 -0
  371. mirascope/core/mistral/stream.py +0 -120
  372. mirascope/core/mistral/tool.py +0 -81
  373. mirascope/core/openai/__init__.py +0 -31
  374. mirascope/core/openai/_call.py +0 -67
  375. mirascope/core/openai/_call_kwargs.py +0 -13
  376. mirascope/core/openai/_utils/__init__.py +0 -14
  377. mirascope/core/openai/_utils/_convert_common_call_params.py +0 -26
  378. mirascope/core/openai/_utils/_convert_message_params.py +0 -148
  379. mirascope/core/openai/_utils/_get_json_output.py +0 -31
  380. mirascope/core/openai/_utils/_handle_stream.py +0 -138
  381. mirascope/core/openai/_utils/_message_param_converter.py +0 -105
  382. mirascope/core/openai/_utils/_setup_call.py +0 -155
  383. mirascope/core/openai/call_params.py +0 -92
  384. mirascope/core/openai/call_response.py +0 -273
  385. mirascope/core/openai/call_response_chunk.py +0 -139
  386. mirascope/core/openai/dynamic_config.py +0 -34
  387. mirascope/core/openai/py.typed +0 -0
  388. mirascope/core/openai/stream.py +0 -185
  389. mirascope/core/openai/tool.py +0 -101
  390. mirascope/core/py.typed +0 -0
  391. mirascope/core/vertex/__init__.py +0 -45
  392. mirascope/core/vertex/_call.py +0 -62
  393. mirascope/core/vertex/_call_kwargs.py +0 -12
  394. mirascope/core/vertex/_utils/__init__.py +0 -14
  395. mirascope/core/vertex/_utils/_convert_common_call_params.py +0 -37
  396. mirascope/core/vertex/_utils/_convert_finish_reason_to_common_finish_reasons.py +0 -23
  397. mirascope/core/vertex/_utils/_convert_message_params.py +0 -171
  398. mirascope/core/vertex/_utils/_get_json_output.py +0 -36
  399. mirascope/core/vertex/_utils/_handle_stream.py +0 -33
  400. mirascope/core/vertex/_utils/_message_param_converter.py +0 -133
  401. mirascope/core/vertex/_utils/_setup_call.py +0 -160
  402. mirascope/core/vertex/call_params.py +0 -24
  403. mirascope/core/vertex/call_response.py +0 -206
  404. mirascope/core/vertex/call_response_chunk.py +0 -99
  405. mirascope/core/vertex/dynamic_config.py +0 -28
  406. mirascope/core/vertex/stream.py +0 -119
  407. mirascope/core/vertex/tool.py +0 -101
  408. mirascope/core/xai/__init__.py +0 -28
  409. mirascope/core/xai/_call.py +0 -67
  410. mirascope/core/xai/_utils/__init__.py +0 -5
  411. mirascope/core/xai/_utils/_setup_call.py +0 -113
  412. mirascope/core/xai/call_params.py +0 -10
  413. mirascope/core/xai/call_response.py +0 -16
  414. mirascope/core/xai/call_response_chunk.py +0 -14
  415. mirascope/core/xai/dynamic_config.py +0 -8
  416. mirascope/core/xai/py.typed +0 -0
  417. mirascope/core/xai/stream.py +0 -57
  418. mirascope/core/xai/tool.py +0 -13
  419. mirascope/experimental/graphs/__init__.py +0 -5
  420. mirascope/integrations/__init__.py +0 -16
  421. mirascope/integrations/_middleware_factory.py +0 -403
  422. mirascope/integrations/langfuse/__init__.py +0 -3
  423. mirascope/integrations/langfuse/_utils.py +0 -114
  424. mirascope/integrations/langfuse/_with_langfuse.py +0 -70
  425. mirascope/integrations/logfire/__init__.py +0 -3
  426. mirascope/integrations/logfire/_utils.py +0 -225
  427. mirascope/integrations/logfire/_with_logfire.py +0 -63
  428. mirascope/integrations/otel/__init__.py +0 -10
  429. mirascope/integrations/otel/_utils.py +0 -270
  430. mirascope/integrations/otel/_with_hyperdx.py +0 -60
  431. mirascope/integrations/otel/_with_otel.py +0 -59
  432. mirascope/integrations/tenacity.py +0 -14
  433. mirascope/llm/_call.py +0 -401
  434. mirascope/llm/_context.py +0 -384
  435. mirascope/llm/_override.py +0 -3639
  436. mirascope/llm/_protocols.py +0 -500
  437. mirascope/llm/_response_metaclass.py +0 -31
  438. mirascope/llm/call_response.py +0 -158
  439. mirascope/llm/call_response_chunk.py +0 -66
  440. mirascope/llm/stream.py +0 -162
  441. mirascope/llm/tool.py +0 -64
  442. mirascope/mcp/__init__.py +0 -7
  443. mirascope/mcp/_utils.py +0 -288
  444. mirascope/mcp/client.py +0 -167
  445. mirascope/mcp/server.py +0 -356
  446. mirascope/mcp/tools.py +0 -110
  447. mirascope/py.typed +0 -0
  448. mirascope/retries/__init__.py +0 -11
  449. mirascope/retries/fallback.py +0 -131
  450. mirascope/retries/tenacity.py +0 -50
  451. mirascope/tools/__init__.py +0 -37
  452. mirascope/tools/base.py +0 -98
  453. mirascope/tools/system/__init__.py +0 -0
  454. mirascope/tools/system/_docker_operation.py +0 -166
  455. mirascope/tools/system/_file_system.py +0 -267
  456. mirascope/tools/web/__init__.py +0 -0
  457. mirascope/tools/web/_duckduckgo.py +0 -111
  458. mirascope/tools/web/_httpx.py +0 -125
  459. mirascope/tools/web/_parse_url_content.py +0 -94
  460. mirascope/tools/web/_requests.py +0 -54
  461. mirascope/v0/__init__.py +0 -43
  462. mirascope/v0/anthropic.py +0 -54
  463. mirascope/v0/base/__init__.py +0 -12
  464. mirascope/v0/base/calls.py +0 -118
  465. mirascope/v0/base/extractors.py +0 -122
  466. mirascope/v0/base/ops_utils.py +0 -207
  467. mirascope/v0/base/prompts.py +0 -48
  468. mirascope/v0/base/types.py +0 -14
  469. mirascope/v0/base/utils.py +0 -21
  470. mirascope/v0/openai.py +0 -54
  471. mirascope-1.25.7.dist-info/METADATA +0 -169
  472. mirascope-1.25.7.dist-info/RECORD +0 -378
  473. {mirascope-1.25.7.dist-info → mirascope-2.0.0a0.dist-info}/WHEEL +0 -0
  474. {mirascope-1.25.7.dist-info → mirascope-2.0.0a0.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,1243 @@
1
+ """The model context manager for the `llm` module."""
2
+
3
+ from __future__ import annotations
4
+
5
+ from collections.abc import Iterator, Sequence
6
+ from contextlib import contextmanager
7
+ from contextvars import ContextVar
8
+ from typing import TYPE_CHECKING, overload
9
+ from typing_extensions import Unpack
10
+
11
+ from ..clients import PROVIDERS, get_client
12
+ from ..context import Context, DepsT
13
+ from ..formatting import Format, FormattableT
14
+ from ..messages import Message, UserContent
15
+ from ..responses import (
16
+ AsyncContextResponse,
17
+ AsyncContextStreamResponse,
18
+ AsyncResponse,
19
+ AsyncStreamResponse,
20
+ ContextResponse,
21
+ ContextStreamResponse,
22
+ Response,
23
+ StreamResponse,
24
+ )
25
+ from ..tools import (
26
+ AsyncContextTool,
27
+ AsyncContextToolkit,
28
+ AsyncTool,
29
+ AsyncToolkit,
30
+ ContextTool,
31
+ ContextToolkit,
32
+ Tool,
33
+ Toolkit,
34
+ )
35
+
36
+ if TYPE_CHECKING:
37
+ from ..clients import (
38
+ ModelId,
39
+ Params,
40
+ Provider,
41
+ )
42
+
43
+
44
+ MODEL_CONTEXT: ContextVar[Model | None] = ContextVar("MODEL_CONTEXT", default=None)
45
+
46
+
47
+ def get_model_from_context() -> Model | None:
48
+ """Get the LLM currently set via context, if any."""
49
+ return MODEL_CONTEXT.get()
50
+
51
+
52
+ class Model:
53
+ """The unified LLM interface that delegates to provider-specific clients.
54
+
55
+ This class provides a consistent interface for interacting with language models
56
+ from various providers. It handles the common operations like generating responses,
57
+ streaming, and async variants by delegating to the appropriate client methods.
58
+
59
+ **Usage Note:** In most cases, you should use `llm.use_model()` instead of instantiating
60
+ `Model` directly. This preserves the ability to override the model at runtime using
61
+ the `llm.model()` context manager. Only instantiate `Model` directly if you want to
62
+ hardcode a specific model and prevent it from being overridden by context.
63
+
64
+ Example (recommended - allows override):
65
+
66
+ ```python
67
+ from mirascope import llm
68
+
69
+ def recommend_book(genre: str) -> llm.Response:
70
+ # Uses context model if available, otherwise creates default
71
+ model = llm.use_model(provider="openai", model_id="gpt-4o-mini")
72
+ message = llm.messages.user(f"Please recommend a book in {genre}.")
73
+ return model.call(messages=[message])
74
+
75
+ # Uses default model
76
+ response = recommend_book("fantasy")
77
+
78
+ # Override with different model
79
+ with llm.model(provider="anthropic", model_id="claude-sonnet-4-0"):
80
+ response = recommend_book("fantasy") # Uses Claude
81
+ ```
82
+
83
+ Example (direct instantiation - prevents override):
84
+
85
+ ```python
86
+ from mirascope import llm
87
+
88
+ def recommend_book(genre: str) -> llm.Response:
89
+ # Hardcoded model, cannot be overridden by context
90
+ model = llm.Model(provider="openai", model_id="gpt-4o-mini")
91
+ message = llm.messages.user(f"Please recommend a book in {genre}.")
92
+ return model.call(messages=[message])
93
+ ```
94
+ """
95
+
96
+ provider: Provider
97
+ """The provider being used (e.g. `openai`)."""
98
+
99
+ model_id: ModelId
100
+ """The model being used (e.g. `gpt-4o-mini`)."""
101
+
102
+ params: Params
103
+ """The default parameters for the model (temperature, max_tokens, etc.)."""
104
+
105
+ def __init__(
106
+ self,
107
+ provider: Provider,
108
+ model_id: ModelId,
109
+ **params: Unpack[Params],
110
+ ) -> None:
111
+ """Initialize the Model with provider, model_id, and optional params."""
112
+ if provider not in PROVIDERS:
113
+ raise ValueError(f"Unknown provider: {provider}")
114
+ self.provider = provider
115
+ self.model_id = model_id
116
+ self.params = params
117
+
118
+ @overload
119
+ def call(
120
+ self,
121
+ *,
122
+ messages: Sequence[Message],
123
+ tools: Sequence[Tool] | Toolkit | None = None,
124
+ format: None = None,
125
+ ) -> Response:
126
+ """Generate an `llm.Response` without a response format."""
127
+ ...
128
+
129
+ @overload
130
+ def call(
131
+ self,
132
+ *,
133
+ messages: Sequence[Message],
134
+ tools: Sequence[Tool] | Toolkit | None = None,
135
+ format: type[FormattableT] | Format[FormattableT],
136
+ ) -> Response[FormattableT]:
137
+ """Generate an `llm.Response` with a response format."""
138
+ ...
139
+
140
+ @overload
141
+ def call(
142
+ self,
143
+ *,
144
+ messages: Sequence[Message],
145
+ tools: Sequence[Tool] | Toolkit | None = None,
146
+ format: type[FormattableT] | Format[FormattableT] | None,
147
+ ) -> Response | Response[FormattableT]:
148
+ """Generate an `llm.Response` with an optional response format."""
149
+ ...
150
+
151
+ def call(
152
+ self,
153
+ *,
154
+ messages: Sequence[Message],
155
+ tools: Sequence[Tool] | Toolkit | None = None,
156
+ format: type[FormattableT] | Format[FormattableT] | None = None,
157
+ ) -> Response | Response[FormattableT]:
158
+ """Generate an `llm.Response` by synchronously calling this model's LLM provider.
159
+
160
+ Args:
161
+ messages: Messages to send to the LLM.
162
+ tools: Optional tools that the model may invoke.
163
+ format: Optional response format specifier.
164
+
165
+ Returns:
166
+ An `llm.Response` object containing the LLM-generated content.
167
+ """
168
+ return get_client(self.provider).call(
169
+ model_id=self.model_id,
170
+ messages=messages,
171
+ tools=tools,
172
+ format=format,
173
+ **self.params,
174
+ )
175
+
176
+ @overload
177
+ async def call_async(
178
+ self,
179
+ *,
180
+ messages: Sequence[Message],
181
+ tools: Sequence[AsyncTool] | AsyncToolkit | None = None,
182
+ format: None = None,
183
+ ) -> AsyncResponse:
184
+ """Generate an `llm.AsyncResponse` without a response format."""
185
+ ...
186
+
187
+ @overload
188
+ async def call_async(
189
+ self,
190
+ *,
191
+ messages: Sequence[Message],
192
+ tools: Sequence[AsyncTool] | AsyncToolkit | None = None,
193
+ format: type[FormattableT] | Format[FormattableT],
194
+ ) -> AsyncResponse[FormattableT]:
195
+ """Generate an `llm.AsyncResponse` with a response format."""
196
+ ...
197
+
198
+ @overload
199
+ async def call_async(
200
+ self,
201
+ *,
202
+ messages: Sequence[Message],
203
+ tools: Sequence[AsyncTool] | AsyncToolkit | None = None,
204
+ format: type[FormattableT] | Format[FormattableT] | None,
205
+ ) -> AsyncResponse | AsyncResponse[FormattableT]:
206
+ """Generate an `llm.AsyncResponse` with an optional response format."""
207
+ ...
208
+
209
+ async def call_async(
210
+ self,
211
+ *,
212
+ messages: Sequence[Message],
213
+ tools: Sequence[AsyncTool] | AsyncToolkit | None = None,
214
+ format: type[FormattableT] | Format[FormattableT] | None = None,
215
+ ) -> AsyncResponse | AsyncResponse[FormattableT]:
216
+ """Generate an `llm.AsyncResponse` by asynchronously calling this model's LLM provider.
217
+
218
+ Args:
219
+ messages: Messages to send to the LLM.
220
+ tools: Optional tools that the model may invoke.
221
+ format: Optional response format specifier.
222
+
223
+ Returns:
224
+ An `llm.AsyncResponse` object containing the LLM-generated content.
225
+ """
226
+ return await get_client(self.provider).call_async(
227
+ model_id=self.model_id,
228
+ messages=messages,
229
+ tools=tools,
230
+ **self.params,
231
+ format=format,
232
+ )
233
+
234
+ @overload
235
+ def stream(
236
+ self,
237
+ *,
238
+ messages: Sequence[Message],
239
+ tools: Sequence[Tool] | Toolkit | None = None,
240
+ format: None = None,
241
+ ) -> StreamResponse:
242
+ """Stream an `llm.StreamResponse` without a response format."""
243
+ ...
244
+
245
+ @overload
246
+ def stream(
247
+ self,
248
+ *,
249
+ messages: Sequence[Message],
250
+ tools: Sequence[Tool] | Toolkit | None = None,
251
+ format: type[FormattableT] | Format[FormattableT],
252
+ ) -> StreamResponse[FormattableT]:
253
+ """Stream an `llm.StreamResponse` with a response format."""
254
+ ...
255
+
256
+ @overload
257
+ def stream(
258
+ self,
259
+ *,
260
+ messages: Sequence[Message],
261
+ tools: Sequence[Tool] | Toolkit | None = None,
262
+ format: type[FormattableT] | Format[FormattableT] | None,
263
+ ) -> StreamResponse | StreamResponse[FormattableT]:
264
+ """Stream an `llm.StreamResponse` with an optional response format."""
265
+ ...
266
+
267
+ def stream(
268
+ self,
269
+ *,
270
+ messages: Sequence[Message],
271
+ tools: Sequence[Tool] | Toolkit | None = None,
272
+ format: type[FormattableT] | Format[FormattableT] | None = None,
273
+ ) -> StreamResponse | StreamResponse[FormattableT]:
274
+ """Generate an `llm.StreamResponse` by synchronously streaming from this model's LLM provider.
275
+
276
+ Args:
277
+ messages: Messages to send to the LLM.
278
+ tools: Optional tools that the model may invoke.
279
+ format: Optional response format specifier.
280
+
281
+ Returns:
282
+ An `llm.StreamResponse` object for iterating over the LLM-generated content.
283
+ """
284
+ return get_client(self.provider).stream(
285
+ model_id=self.model_id,
286
+ messages=messages,
287
+ tools=tools,
288
+ format=format,
289
+ **self.params,
290
+ )
291
+
292
+ @overload
293
+ async def stream_async(
294
+ self,
295
+ *,
296
+ messages: list[Message],
297
+ tools: Sequence[AsyncTool] | AsyncToolkit | None = None,
298
+ format: None = None,
299
+ ) -> AsyncStreamResponse:
300
+ """Stream an `llm.AsyncStreamResponse` without a response format."""
301
+ ...
302
+
303
+ @overload
304
+ async def stream_async(
305
+ self,
306
+ *,
307
+ messages: list[Message],
308
+ tools: Sequence[AsyncTool] | AsyncToolkit | None = None,
309
+ format: type[FormattableT] | Format[FormattableT],
310
+ ) -> AsyncStreamResponse[FormattableT]:
311
+ """Stream an `llm.AsyncStreamResponse` with a response format."""
312
+ ...
313
+
314
+ @overload
315
+ async def stream_async(
316
+ self,
317
+ *,
318
+ messages: list[Message],
319
+ tools: Sequence[AsyncTool] | AsyncToolkit | None = None,
320
+ format: type[FormattableT] | Format[FormattableT] | None,
321
+ ) -> AsyncStreamResponse | AsyncStreamResponse[FormattableT]:
322
+ """Stream an `llm.AsyncStreamResponse` with an optional response format."""
323
+ ...
324
+
325
+ async def stream_async(
326
+ self,
327
+ *,
328
+ messages: list[Message],
329
+ tools: Sequence[AsyncTool] | AsyncToolkit | None = None,
330
+ format: type[FormattableT] | Format[FormattableT] | None = None,
331
+ ) -> AsyncStreamResponse | AsyncStreamResponse[FormattableT]:
332
+ """Generate an `llm.AsyncStreamResponse` by asynchronously streaming from this model's LLM provider.
333
+
334
+ Args:
335
+ messages: Messages to send to the LLM.
336
+ tools: Optional tools that the model may invoke.
337
+ format: Optional response format specifier.
338
+
339
+ Returns:
340
+ An `llm.AsyncStreamResponse` object for asynchronously iterating over the LLM-generated content.
341
+ """
342
+ return await get_client(self.provider).stream_async(
343
+ model_id=self.model_id,
344
+ messages=messages,
345
+ tools=tools,
346
+ format=format,
347
+ **self.params,
348
+ )
349
+
350
+ @overload
351
+ def context_call(
352
+ self,
353
+ *,
354
+ ctx: Context[DepsT],
355
+ messages: Sequence[Message],
356
+ tools: Sequence[Tool | ContextTool[DepsT]]
357
+ | ContextToolkit[DepsT]
358
+ | None = None,
359
+ format: None = None,
360
+ ) -> ContextResponse[DepsT, None]:
361
+ """Generate an `llm.ContextResponse` without a response format."""
362
+ ...
363
+
364
+ @overload
365
+ def context_call(
366
+ self,
367
+ *,
368
+ ctx: Context[DepsT],
369
+ messages: Sequence[Message],
370
+ tools: Sequence[Tool | ContextTool[DepsT]]
371
+ | ContextToolkit[DepsT]
372
+ | None = None,
373
+ format: type[FormattableT] | Format[FormattableT],
374
+ ) -> ContextResponse[DepsT, FormattableT]:
375
+ """Generate an `llm.ContextResponse` with a response format."""
376
+ ...
377
+
378
+ @overload
379
+ def context_call(
380
+ self,
381
+ *,
382
+ ctx: Context[DepsT],
383
+ messages: Sequence[Message],
384
+ tools: Sequence[Tool | ContextTool[DepsT]]
385
+ | ContextToolkit[DepsT]
386
+ | None = None,
387
+ format: type[FormattableT] | Format[FormattableT] | None,
388
+ ) -> ContextResponse[DepsT, None] | ContextResponse[DepsT, FormattableT]:
389
+ """Generate an `llm.ContextResponse` with an optional response format."""
390
+ ...
391
+
392
+ def context_call(
393
+ self,
394
+ *,
395
+ ctx: Context[DepsT],
396
+ messages: Sequence[Message],
397
+ tools: Sequence[Tool | ContextTool[DepsT]]
398
+ | ContextToolkit[DepsT]
399
+ | None = None,
400
+ format: type[FormattableT] | Format[FormattableT] | None = None,
401
+ ) -> ContextResponse[DepsT, None] | ContextResponse[DepsT, FormattableT]:
402
+ """Generate an `llm.ContextResponse` by synchronously calling this model's LLM provider.
403
+
404
+ Args:
405
+ ctx: Context object with dependencies for tools.
406
+ messages: Messages to send to the LLM.
407
+ tools: Optional tools that the model may invoke.
408
+ format: Optional response format specifier.
409
+
410
+ Returns:
411
+ An `llm.ContextResponse` object containing the LLM-generated content.
412
+ """
413
+ return get_client(self.provider).context_call(
414
+ ctx=ctx,
415
+ model_id=self.model_id,
416
+ messages=messages,
417
+ tools=tools,
418
+ format=format,
419
+ **self.params,
420
+ )
421
+
422
+ @overload
423
+ async def context_call_async(
424
+ self,
425
+ *,
426
+ ctx: Context[DepsT],
427
+ messages: Sequence[Message],
428
+ tools: Sequence[AsyncTool | AsyncContextTool[DepsT]]
429
+ | AsyncContextToolkit[DepsT]
430
+ | None = None,
431
+ format: None = None,
432
+ ) -> AsyncContextResponse[DepsT, None]:
433
+ """Generate an `llm.AsyncContextResponse` without a response format."""
434
+ ...
435
+
436
+ @overload
437
+ async def context_call_async(
438
+ self,
439
+ *,
440
+ ctx: Context[DepsT],
441
+ messages: Sequence[Message],
442
+ tools: Sequence[AsyncTool | AsyncContextTool[DepsT]]
443
+ | AsyncContextToolkit[DepsT]
444
+ | None = None,
445
+ format: type[FormattableT] | Format[FormattableT],
446
+ ) -> AsyncContextResponse[DepsT, FormattableT]:
447
+ """Generate an `llm.AsyncContextResponse` with a response format."""
448
+ ...
449
+
450
+ @overload
451
+ async def context_call_async(
452
+ self,
453
+ *,
454
+ ctx: Context[DepsT],
455
+ messages: Sequence[Message],
456
+ tools: Sequence[AsyncTool | AsyncContextTool[DepsT]]
457
+ | AsyncContextToolkit[DepsT]
458
+ | None = None,
459
+ format: type[FormattableT] | Format[FormattableT] | None,
460
+ ) -> AsyncContextResponse[DepsT, None] | AsyncContextResponse[DepsT, FormattableT]:
461
+ """Generate an `llm.AsyncContextResponse` with an optional response format."""
462
+ ...
463
+
464
+ async def context_call_async(
465
+ self,
466
+ *,
467
+ ctx: Context[DepsT],
468
+ messages: Sequence[Message],
469
+ tools: Sequence[AsyncTool | AsyncContextTool[DepsT]]
470
+ | AsyncContextToolkit[DepsT]
471
+ | None = None,
472
+ format: type[FormattableT] | Format[FormattableT] | None = None,
473
+ ) -> AsyncContextResponse[DepsT, None] | AsyncContextResponse[DepsT, FormattableT]:
474
+ """Generate an `llm.AsyncContextResponse` by asynchronously calling this model's LLM provider.
475
+
476
+ Args:
477
+ ctx: Context object with dependencies for tools.
478
+ messages: Messages to send to the LLM.
479
+ tools: Optional tools that the model may invoke.
480
+ format: Optional response format specifier.
481
+
482
+ Returns:
483
+ An `llm.AsyncContextResponse` object containing the LLM-generated content.
484
+ """
485
+ return await get_client(self.provider).context_call_async(
486
+ ctx=ctx,
487
+ model_id=self.model_id,
488
+ messages=messages,
489
+ tools=tools,
490
+ format=format,
491
+ **self.params,
492
+ )
493
+
494
+ @overload
495
+ def context_stream(
496
+ self,
497
+ *,
498
+ ctx: Context[DepsT],
499
+ messages: Sequence[Message],
500
+ tools: Sequence[Tool | ContextTool[DepsT]]
501
+ | ContextToolkit[DepsT]
502
+ | None = None,
503
+ format: None = None,
504
+ ) -> ContextStreamResponse[DepsT, None]:
505
+ """Stream an `llm.ContextStreamResponse` without a response format."""
506
+ ...
507
+
508
+ @overload
509
+ def context_stream(
510
+ self,
511
+ *,
512
+ ctx: Context[DepsT],
513
+ messages: Sequence[Message],
514
+ tools: Sequence[Tool | ContextTool[DepsT]]
515
+ | ContextToolkit[DepsT]
516
+ | None = None,
517
+ format: type[FormattableT] | Format[FormattableT],
518
+ ) -> ContextStreamResponse[DepsT, FormattableT]:
519
+ """Stream an `llm.ContextStreamResponse` with a response format."""
520
+ ...
521
+
522
+ @overload
523
+ def context_stream(
524
+ self,
525
+ *,
526
+ ctx: Context[DepsT],
527
+ messages: Sequence[Message],
528
+ tools: Sequence[Tool | ContextTool[DepsT]]
529
+ | ContextToolkit[DepsT]
530
+ | None = None,
531
+ format: type[FormattableT] | Format[FormattableT] | None,
532
+ ) -> (
533
+ ContextStreamResponse[DepsT, None] | ContextStreamResponse[DepsT, FormattableT]
534
+ ):
535
+ """Stream an `llm.ContextStreamResponse` with an optional response format."""
536
+ ...
537
+
538
+ def context_stream(
539
+ self,
540
+ *,
541
+ ctx: Context[DepsT],
542
+ messages: Sequence[Message],
543
+ tools: Sequence[Tool | ContextTool[DepsT]]
544
+ | ContextToolkit[DepsT]
545
+ | None = None,
546
+ format: type[FormattableT] | Format[FormattableT] | None = None,
547
+ ) -> (
548
+ ContextStreamResponse[DepsT, None] | ContextStreamResponse[DepsT, FormattableT]
549
+ ):
550
+ """Generate an `llm.ContextStreamResponse` by synchronously streaming from this model's LLM provider.
551
+
552
+ Args:
553
+ ctx: Context object with dependencies for tools.
554
+ messages: Messages to send to the LLM.
555
+ tools: Optional tools that the model may invoke.
556
+ format: Optional response format specifier.
557
+
558
+ Returns:
559
+ An `llm.ContextStreamResponse` object for iterating over the LLM-generated content.
560
+ """
561
+ return get_client(self.provider).context_stream(
562
+ ctx=ctx,
563
+ model_id=self.model_id,
564
+ messages=messages,
565
+ tools=tools,
566
+ format=format,
567
+ **self.params,
568
+ )
569
+
570
+ @overload
571
+ async def context_stream_async(
572
+ self,
573
+ *,
574
+ ctx: Context[DepsT],
575
+ messages: list[Message],
576
+ tools: Sequence[AsyncTool | AsyncContextTool[DepsT]]
577
+ | AsyncContextToolkit[DepsT]
578
+ | None = None,
579
+ format: None = None,
580
+ ) -> AsyncContextStreamResponse[DepsT, None]:
581
+ """Stream an `llm.AsyncContextStreamResponse` without a response format."""
582
+ ...
583
+
584
+ @overload
585
+ async def context_stream_async(
586
+ self,
587
+ *,
588
+ ctx: Context[DepsT],
589
+ messages: list[Message],
590
+ tools: Sequence[AsyncTool | AsyncContextTool[DepsT]]
591
+ | AsyncContextToolkit[DepsT]
592
+ | None = None,
593
+ format: type[FormattableT] | Format[FormattableT],
594
+ ) -> AsyncContextStreamResponse[DepsT, FormattableT]:
595
+ """Stream an `llm.AsyncContextStreamResponse` with a response format."""
596
+ ...
597
+
598
+ @overload
599
+ async def context_stream_async(
600
+ self,
601
+ *,
602
+ ctx: Context[DepsT],
603
+ messages: list[Message],
604
+ tools: Sequence[AsyncTool | AsyncContextTool[DepsT]]
605
+ | AsyncContextToolkit[DepsT]
606
+ | None = None,
607
+ format: type[FormattableT] | Format[FormattableT] | None,
608
+ ) -> (
609
+ AsyncContextStreamResponse[DepsT, None]
610
+ | AsyncContextStreamResponse[DepsT, FormattableT]
611
+ ):
612
+ """Stream an `llm.AsyncContextStreamResponse` with an optional response format."""
613
+ ...
614
+
615
+ async def context_stream_async(
616
+ self,
617
+ *,
618
+ ctx: Context[DepsT],
619
+ messages: list[Message],
620
+ tools: Sequence[AsyncTool | AsyncContextTool[DepsT]]
621
+ | AsyncContextToolkit[DepsT]
622
+ | None = None,
623
+ format: type[FormattableT] | Format[FormattableT] | None = None,
624
+ ) -> (
625
+ AsyncContextStreamResponse[DepsT, None]
626
+ | AsyncContextStreamResponse[DepsT, FormattableT]
627
+ ):
628
+ """Generate an `llm.AsyncContextStreamResponse` by asynchronously streaming from this model's LLM provider.
629
+
630
+ Args:
631
+ ctx: Context object with dependencies for tools.
632
+ messages: Messages to send to the LLM.
633
+ tools: Optional tools that the model may invoke.
634
+ format: Optional response format specifier.
635
+
636
+ Returns:
637
+ An `llm.AsyncContextStreamResponse` object for asynchronously iterating over the LLM-generated content.
638
+ """
639
+ return await get_client(self.provider).context_stream_async(
640
+ ctx=ctx,
641
+ model_id=self.model_id,
642
+ messages=messages,
643
+ tools=tools,
644
+ format=format,
645
+ **self.params,
646
+ )
647
+
648
+ @overload
649
+ def resume(
650
+ self,
651
+ *,
652
+ response: Response,
653
+ content: UserContent,
654
+ ) -> Response:
655
+ """Resume an `llm.Response` without a response format."""
656
+ ...
657
+
658
+ @overload
659
+ def resume(
660
+ self,
661
+ *,
662
+ response: Response[FormattableT],
663
+ content: UserContent,
664
+ ) -> Response[FormattableT]:
665
+ """Resume an `llm.Response` with a response format."""
666
+ ...
667
+
668
+ @overload
669
+ def resume(
670
+ self,
671
+ *,
672
+ response: Response | Response[FormattableT],
673
+ content: UserContent,
674
+ ) -> Response | Response[FormattableT]:
675
+ """Resume an `llm.Response` with an optional response format."""
676
+ ...
677
+
678
+ def resume(
679
+ self,
680
+ *,
681
+ response: Response | Response[FormattableT],
682
+ content: UserContent,
683
+ ) -> Response | Response[FormattableT]:
684
+ """Generate a new `llm.Response` by extending another response's messages with additional user content.
685
+
686
+ Uses the previous response's tools and output format, and this model's params.
687
+
688
+ Depending on the client, this may be a wrapper around using client call methods
689
+ with the response's messages and the new content, or it may use a provider-specific
690
+ API for resuming an existing interaction.
691
+
692
+ Args:
693
+ response: Previous response to extend.
694
+ content: Additional user content to append.
695
+
696
+ Returns:
697
+ A new `llm.Response` object containing the extended conversation.
698
+ """
699
+ return get_client(self.provider).resume(
700
+ model_id=self.model_id,
701
+ response=response,
702
+ content=content,
703
+ **self.params,
704
+ )
705
+
706
+ @overload
707
+ async def resume_async(
708
+ self,
709
+ *,
710
+ response: AsyncResponse,
711
+ content: UserContent,
712
+ ) -> AsyncResponse:
713
+ """Resume an `llm.AsyncResponse` without a response format."""
714
+ ...
715
+
716
+ @overload
717
+ async def resume_async(
718
+ self,
719
+ *,
720
+ response: AsyncResponse[FormattableT],
721
+ content: UserContent,
722
+ ) -> AsyncResponse[FormattableT]:
723
+ """Resume an `llm.AsyncResponse` with a response format."""
724
+ ...
725
+
726
+ @overload
727
+ async def resume_async(
728
+ self,
729
+ *,
730
+ response: AsyncResponse | AsyncResponse[FormattableT],
731
+ content: UserContent,
732
+ ) -> AsyncResponse | AsyncResponse[FormattableT]:
733
+ """Resume an `llm.AsyncResponse` with an optional response format."""
734
+ ...
735
+
736
+ async def resume_async(
737
+ self,
738
+ *,
739
+ response: AsyncResponse | AsyncResponse[FormattableT],
740
+ content: UserContent,
741
+ ) -> AsyncResponse | AsyncResponse[FormattableT]:
742
+ """Generate a new `llm.AsyncResponse` by extending another response's messages with additional user content.
743
+
744
+ Uses the previous response's tools and output format, and this model's params.
745
+
746
+ Depending on the client, this may be a wrapper around using client call methods
747
+ with the response's messages and the new content, or it may use a provider-specific
748
+ API for resuming an existing interaction.
749
+
750
+ Args:
751
+ response: Previous async response to extend.
752
+ content: Additional user content to append.
753
+
754
+ Returns:
755
+ A new `llm.AsyncResponse` object containing the extended conversation.
756
+ """
757
+ return await get_client(self.provider).resume_async(
758
+ model_id=self.model_id,
759
+ response=response,
760
+ content=content,
761
+ **self.params,
762
+ )
763
+
764
+ @overload
765
+ def context_resume(
766
+ self,
767
+ *,
768
+ ctx: Context[DepsT],
769
+ response: ContextResponse[DepsT, None],
770
+ content: UserContent,
771
+ ) -> ContextResponse[DepsT, None]:
772
+ """Resume an `llm.ContextResponse` without a response format."""
773
+ ...
774
+
775
+ @overload
776
+ def context_resume(
777
+ self,
778
+ *,
779
+ ctx: Context[DepsT],
780
+ response: ContextResponse[DepsT, FormattableT],
781
+ content: UserContent,
782
+ ) -> ContextResponse[DepsT, FormattableT]:
783
+ """Resume an `llm.ContextResponse` with a response format."""
784
+ ...
785
+
786
+ @overload
787
+ def context_resume(
788
+ self,
789
+ *,
790
+ ctx: Context[DepsT],
791
+ response: ContextResponse[DepsT, None] | ContextResponse[DepsT, FormattableT],
792
+ content: UserContent,
793
+ ) -> ContextResponse[DepsT, None] | ContextResponse[DepsT, FormattableT]:
794
+ """Resume an `llm.ContextResponse` with an optional response format."""
795
+ ...
796
+
797
+ def context_resume(
798
+ self,
799
+ *,
800
+ ctx: Context[DepsT],
801
+ response: ContextResponse[DepsT, None] | ContextResponse[DepsT, FormattableT],
802
+ content: UserContent,
803
+ ) -> ContextResponse[DepsT, None] | ContextResponse[DepsT, FormattableT]:
804
+ """Generate a new `llm.ContextResponse` by extending another response's messages with additional user content.
805
+
806
+ Uses the previous response's tools and output format, and this model's params.
807
+
808
+ Depending on the client, this may be a wrapper around using client call methods
809
+ with the response's messages and the new content, or it may use a provider-specific
810
+ API for resuming an existing interaction.
811
+
812
+ Args:
813
+ ctx: Context object with dependencies for tools.
814
+ response: Previous context response to extend.
815
+ content: Additional user content to append.
816
+
817
+ Returns:
818
+ A new `llm.ContextResponse` object containing the extended conversation.
819
+ """
820
+ return get_client(self.provider).context_resume(
821
+ ctx=ctx,
822
+ model_id=self.model_id,
823
+ response=response,
824
+ content=content,
825
+ **self.params,
826
+ )
827
+
828
+ @overload
829
+ async def context_resume_async(
830
+ self,
831
+ *,
832
+ ctx: Context[DepsT],
833
+ response: AsyncContextResponse[DepsT, None],
834
+ content: UserContent,
835
+ ) -> AsyncContextResponse[DepsT, None]:
836
+ """Resume an `llm.AsyncContextResponse` without a response format."""
837
+ ...
838
+
839
+ @overload
840
+ async def context_resume_async(
841
+ self,
842
+ *,
843
+ ctx: Context[DepsT],
844
+ response: AsyncContextResponse[DepsT, FormattableT],
845
+ content: UserContent,
846
+ ) -> AsyncContextResponse[DepsT, FormattableT]:
847
+ """Resume an `llm.AsyncContextResponse` with a response format."""
848
+ ...
849
+
850
+ @overload
851
+ async def context_resume_async(
852
+ self,
853
+ *,
854
+ ctx: Context[DepsT],
855
+ response: AsyncContextResponse[DepsT, None]
856
+ | AsyncContextResponse[DepsT, FormattableT],
857
+ content: UserContent,
858
+ ) -> AsyncContextResponse[DepsT, None] | AsyncContextResponse[DepsT, FormattableT]:
859
+ """Resume an `llm.AsyncContextResponse` with an optional response format."""
860
+ ...
861
+
862
+ async def context_resume_async(
863
+ self,
864
+ *,
865
+ ctx: Context[DepsT],
866
+ response: AsyncContextResponse[DepsT, None]
867
+ | AsyncContextResponse[DepsT, FormattableT],
868
+ content: UserContent,
869
+ ) -> AsyncContextResponse[DepsT, None] | AsyncContextResponse[DepsT, FormattableT]:
870
+ """Generate a new `llm.AsyncContextResponse` by extending another response's messages with additional user content.
871
+
872
+ Uses the previous response's tools and output format, and this model's params.
873
+
874
+ Depending on the client, this may be a wrapper around using client call methods
875
+ with the response's messages and the new content, or it may use a provider-specific
876
+ API for resuming an existing interaction.
877
+
878
+ Args:
879
+ ctx: Context object with dependencies for tools.
880
+ response: Previous async context response to extend.
881
+ content: Additional user content to append.
882
+
883
+ Returns:
884
+ A new `llm.AsyncContextResponse` object containing the extended conversation.
885
+ """
886
+ return await get_client(self.provider).context_resume_async(
887
+ ctx=ctx,
888
+ model_id=self.model_id,
889
+ response=response,
890
+ content=content,
891
+ **self.params,
892
+ )
893
+
894
+ @overload
895
+ def resume_stream(
896
+ self,
897
+ *,
898
+ response: StreamResponse,
899
+ content: UserContent,
900
+ ) -> StreamResponse:
901
+ """Resume an `llm.StreamResponse` without a response format."""
902
+ ...
903
+
904
+ @overload
905
+ def resume_stream(
906
+ self,
907
+ *,
908
+ response: StreamResponse[FormattableT],
909
+ content: UserContent,
910
+ ) -> StreamResponse[FormattableT]:
911
+ """Resume an `llm.StreamResponse` with a response format."""
912
+ ...
913
+
914
+ @overload
915
+ def resume_stream(
916
+ self,
917
+ *,
918
+ response: StreamResponse | StreamResponse[FormattableT],
919
+ content: UserContent,
920
+ ) -> StreamResponse | StreamResponse[FormattableT]:
921
+ """Resume an `llm.StreamResponse` with an optional response format."""
922
+ ...
923
+
924
+ def resume_stream(
925
+ self,
926
+ *,
927
+ response: StreamResponse | StreamResponse[FormattableT],
928
+ content: UserContent,
929
+ ) -> StreamResponse | StreamResponse[FormattableT]:
930
+ """Generate a new `llm.StreamResponse` by extending another response's messages with additional user content.
931
+
932
+ Uses the previous response's tools and output format, and this model's params.
933
+
934
+ Depending on the client, this may be a wrapper around using client call methods
935
+ with the response's messages and the new content, or it may use a provider-specific
936
+ API for resuming an existing interaction.
937
+
938
+ Args:
939
+ response: Previous stream response to extend.
940
+ content: Additional user content to append.
941
+
942
+ Returns:
943
+ A new `llm.StreamResponse` object for streaming the extended conversation.
944
+ """
945
+ return get_client(self.provider).resume_stream(
946
+ model_id=self.model_id,
947
+ response=response,
948
+ content=content,
949
+ **self.params,
950
+ )
951
+
952
+ @overload
953
+ async def resume_stream_async(
954
+ self,
955
+ *,
956
+ response: AsyncStreamResponse,
957
+ content: UserContent,
958
+ ) -> AsyncStreamResponse:
959
+ """Resume an `llm.AsyncStreamResponse` without a response format."""
960
+ ...
961
+
962
+ @overload
963
+ async def resume_stream_async(
964
+ self,
965
+ *,
966
+ response: AsyncStreamResponse[FormattableT],
967
+ content: UserContent,
968
+ ) -> AsyncStreamResponse[FormattableT]:
969
+ """Resume an `llm.AsyncStreamResponse` with a response format."""
970
+ ...
971
+
972
+ @overload
973
+ async def resume_stream_async(
974
+ self,
975
+ *,
976
+ response: AsyncStreamResponse | AsyncStreamResponse[FormattableT],
977
+ content: UserContent,
978
+ ) -> AsyncStreamResponse | AsyncStreamResponse[FormattableT]:
979
+ """Resume an `llm.AsyncStreamResponse` with an optional response format."""
980
+ ...
981
+
982
+ async def resume_stream_async(
983
+ self,
984
+ *,
985
+ response: AsyncStreamResponse | AsyncStreamResponse[FormattableT],
986
+ content: UserContent,
987
+ ) -> AsyncStreamResponse | AsyncStreamResponse[FormattableT]:
988
+ """Generate a new `llm.AsyncStreamResponse` by extending another response's messages with additional user content.
989
+
990
+ Uses the previous response's tools and output format, and this model's params.
991
+
992
+ Depending on the client, this may be a wrapper around using client call methods
993
+ with the response's messages and the new content, or it may use a provider-specific
994
+ API for resuming an existing interaction.
995
+
996
+ Args:
997
+ response: Previous async stream response to extend.
998
+ content: Additional user content to append.
999
+
1000
+ Returns:
1001
+ A new `llm.AsyncStreamResponse` object for asynchronously streaming the extended conversation.
1002
+ """
1003
+ return await get_client(self.provider).resume_stream_async(
1004
+ model_id=self.model_id,
1005
+ response=response,
1006
+ content=content,
1007
+ **self.params,
1008
+ )
1009
+
1010
+ @overload
1011
+ def context_resume_stream(
1012
+ self,
1013
+ *,
1014
+ ctx: Context[DepsT],
1015
+ response: ContextStreamResponse[DepsT, None],
1016
+ content: UserContent,
1017
+ ) -> ContextStreamResponse[DepsT, None]:
1018
+ """Resume an `llm.ContextStreamResponse` without a response format."""
1019
+ ...
1020
+
1021
+ @overload
1022
+ def context_resume_stream(
1023
+ self,
1024
+ *,
1025
+ ctx: Context[DepsT],
1026
+ response: ContextStreamResponse[DepsT, FormattableT],
1027
+ content: UserContent,
1028
+ ) -> ContextStreamResponse[DepsT, FormattableT]:
1029
+ """Resume an `llm.ContextStreamResponse` with a response format."""
1030
+ ...
1031
+
1032
+ @overload
1033
+ def context_resume_stream(
1034
+ self,
1035
+ *,
1036
+ ctx: Context[DepsT],
1037
+ response: ContextStreamResponse[DepsT, None]
1038
+ | ContextStreamResponse[DepsT, FormattableT],
1039
+ content: UserContent,
1040
+ ) -> (
1041
+ ContextStreamResponse[DepsT, None] | ContextStreamResponse[DepsT, FormattableT]
1042
+ ):
1043
+ """Resume an `llm.ContextStreamResponse` with an optional response format."""
1044
+ ...
1045
+
1046
+ def context_resume_stream(
1047
+ self,
1048
+ *,
1049
+ ctx: Context[DepsT],
1050
+ response: ContextStreamResponse[DepsT, None]
1051
+ | ContextStreamResponse[DepsT, FormattableT],
1052
+ content: UserContent,
1053
+ ) -> (
1054
+ ContextStreamResponse[DepsT, None] | ContextStreamResponse[DepsT, FormattableT]
1055
+ ):
1056
+ """Generate a new `llm.ContextStreamResponse` by extending another response's messages with additional user content.
1057
+
1058
+ Uses the previous response's tools and output format, and this model's params.
1059
+
1060
+ Depending on the client, this may be a wrapper around using client call methods
1061
+ with the response's messages and the new content, or it may use a provider-specific
1062
+ API for resuming an existing interaction.
1063
+
1064
+ Args:
1065
+ ctx: Context object with dependencies for tools.
1066
+ response: Previous context stream response to extend.
1067
+ content: Additional user content to append.
1068
+
1069
+ Returns:
1070
+ A new `llm.ContextStreamResponse` object for streaming the extended conversation.
1071
+ """
1072
+ return get_client(self.provider).context_resume_stream(
1073
+ ctx=ctx,
1074
+ model_id=self.model_id,
1075
+ response=response,
1076
+ content=content,
1077
+ **self.params,
1078
+ )
1079
+
1080
+ @overload
1081
+ async def context_resume_stream_async(
1082
+ self,
1083
+ *,
1084
+ ctx: Context[DepsT],
1085
+ response: AsyncContextStreamResponse[DepsT, None],
1086
+ content: UserContent,
1087
+ ) -> AsyncContextStreamResponse[DepsT, None]:
1088
+ """Resume an `llm.AsyncContextStreamResponse` without a response format."""
1089
+ ...
1090
+
1091
+ @overload
1092
+ async def context_resume_stream_async(
1093
+ self,
1094
+ *,
1095
+ ctx: Context[DepsT],
1096
+ response: AsyncContextStreamResponse[DepsT, FormattableT],
1097
+ content: UserContent,
1098
+ ) -> AsyncContextStreamResponse[DepsT, FormattableT]:
1099
+ """Resume an `llm.AsyncContextStreamResponse` with a response format."""
1100
+ ...
1101
+
1102
+ @overload
1103
+ async def context_resume_stream_async(
1104
+ self,
1105
+ *,
1106
+ ctx: Context[DepsT],
1107
+ response: AsyncContextStreamResponse[DepsT, None]
1108
+ | AsyncContextStreamResponse[DepsT, FormattableT],
1109
+ content: UserContent,
1110
+ ) -> (
1111
+ AsyncContextStreamResponse[DepsT, None]
1112
+ | AsyncContextStreamResponse[DepsT, FormattableT]
1113
+ ):
1114
+ """Resume an `llm.AsyncContextStreamResponse` with an optional response format."""
1115
+ ...
1116
+
1117
+ async def context_resume_stream_async(
1118
+ self,
1119
+ *,
1120
+ ctx: Context[DepsT],
1121
+ response: AsyncContextStreamResponse[DepsT, None]
1122
+ | AsyncContextStreamResponse[DepsT, FormattableT],
1123
+ content: UserContent,
1124
+ ) -> (
1125
+ AsyncContextStreamResponse[DepsT, None]
1126
+ | AsyncContextStreamResponse[DepsT, FormattableT]
1127
+ ):
1128
+ """Generate a new `llm.AsyncContextStreamResponse` by extending another response's messages with additional user content.
1129
+
1130
+ Uses the previous response's tools and output format, and this model's params.
1131
+
1132
+ Depending on the client, this may be a wrapper around using client call methods
1133
+ with the response's messages and the new content, or it may use a provider-specific
1134
+ API for resuming an existing interaction.
1135
+
1136
+ Args:
1137
+ ctx: Context object with dependencies for tools.
1138
+ response: Previous async context stream response to extend.
1139
+ content: Additional user content to append.
1140
+
1141
+ Returns:
1142
+ A new `llm.AsyncContextStreamResponse` object for asynchronously streaming the extended conversation.
1143
+ """
1144
+ return await get_client(self.provider).context_resume_stream_async(
1145
+ ctx=ctx,
1146
+ model_id=self.model_id,
1147
+ response=response,
1148
+ content=content,
1149
+ **self.params,
1150
+ )
1151
+
1152
+
1153
+ @contextmanager
1154
+ def model(
1155
+ *,
1156
+ provider: Provider,
1157
+ model_id: ModelId,
1158
+ **params: Unpack[Params],
1159
+ ) -> Iterator[None]:
1160
+ """Set a model in context for the duration of the context manager.
1161
+
1162
+ This context manager sets a model that will be used by `llm.use_model()` calls
1163
+ within the context. This allows you to override the default model at runtime.
1164
+
1165
+ Args:
1166
+ provider: The LLM provider to use (e.g., "openai:completions", "anthropic", "google").
1167
+ model_id: The specific model identifier for the chosen provider.
1168
+ **params: Additional parameters to configure the model (e.g. temperature). See `llm.Params`.
1169
+
1170
+ Raises:
1171
+ ValueError: If the specified provider is not supported.
1172
+
1173
+ Example:
1174
+
1175
+ ```python
1176
+ import mirascope.llm as llm
1177
+
1178
+ def recommend_book(genre: str) -> llm.Response:
1179
+ model = llm.use_model(provider="openai", model_id="gpt-4o-mini")
1180
+ message = llm.messages.user(f"Please recommend a book in {genre}.")
1181
+ return model.call(messages=[message])
1182
+
1183
+ # Override the default model at runtime
1184
+ with llm.model(provider="anthropic", model_id="claude-sonnet-4-0"):
1185
+ response = recommend_book("fantasy") # Uses Claude instead of GPT
1186
+ ```
1187
+ """
1188
+ token = MODEL_CONTEXT.set(Model(provider, model_id, **params))
1189
+ try:
1190
+ yield
1191
+ finally:
1192
+ MODEL_CONTEXT.reset(token)
1193
+
1194
+
1195
+ def use_model(
1196
+ *,
1197
+ provider: Provider,
1198
+ model_id: ModelId,
1199
+ **params: Unpack[Params],
1200
+ ) -> Model:
1201
+ """Get the model from context if available, otherwise create a new Model.
1202
+
1203
+ This function checks if a model has been set in the context (via `llm.model()`
1204
+ context manager). If a model is found in the context, it returns that model.
1205
+ Otherwise, it creates and returns a new `llm.Model` instance with the provided
1206
+ arguments as defaults.
1207
+
1208
+ This allows you to write functions that work with a default model but can be
1209
+ overridden at runtime using the `llm.model()` context manager.
1210
+
1211
+ Args:
1212
+ provider: The LLM provider to use (e.g., "openai:completions", "anthropic", "google").
1213
+ model_id: The specific model identifier for the chosen provider.
1214
+ **params: Additional parameters to configure the model (e.g. temperature). See `llm.Params`.
1215
+
1216
+ Returns:
1217
+ An `llm.Model` instance from context or a new instance with the specified settings.
1218
+
1219
+ Raises:
1220
+ ValueError: If the specified provider is not supported.
1221
+
1222
+ Example:
1223
+
1224
+ ```python
1225
+ import mirascope.llm as llm
1226
+
1227
+ def recommend_book(genre: str) -> llm.Response:
1228
+ model = llm.use_model(provider="openai", model_id="gpt-4o-mini")
1229
+ message = llm.messages.user(f"Please recommend a book in {genre}.")
1230
+ return model.call(messages=[message])
1231
+
1232
+ # Uses the default model (gpt-4o-mini)
1233
+ response = recommend_book("fantasy")
1234
+
1235
+ # Override with a different model
1236
+ with llm.model(provider="anthropic", model_id="claude-sonnet-4-0"):
1237
+ response = recommend_book("fantasy") # Uses Claude instead
1238
+ ```
1239
+ """
1240
+ context_model = get_model_from_context()
1241
+ if context_model is not None:
1242
+ return context_model
1243
+ return Model(provider, model_id, **params)