mirascope 1.25.7__py3-none-any.whl → 2.0.0a0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (474) hide show
  1. mirascope/__init__.py +3 -59
  2. mirascope/graphs/__init__.py +22 -0
  3. mirascope/{experimental/graphs → graphs}/finite_state_machine.py +70 -159
  4. mirascope/llm/__init__.py +206 -16
  5. mirascope/llm/agents/__init__.py +15 -0
  6. mirascope/llm/agents/agent.py +97 -0
  7. mirascope/llm/agents/agent_template.py +45 -0
  8. mirascope/llm/agents/decorator.py +176 -0
  9. mirascope/llm/calls/__init__.py +16 -0
  10. mirascope/llm/calls/base_call.py +33 -0
  11. mirascope/llm/calls/calls.py +315 -0
  12. mirascope/llm/calls/decorator.py +255 -0
  13. mirascope/llm/clients/__init__.py +34 -0
  14. mirascope/llm/clients/anthropic/__init__.py +11 -0
  15. mirascope/llm/clients/anthropic/_utils/__init__.py +13 -0
  16. mirascope/llm/clients/anthropic/_utils/decode.py +244 -0
  17. mirascope/llm/clients/anthropic/_utils/encode.py +243 -0
  18. mirascope/llm/clients/anthropic/clients.py +819 -0
  19. mirascope/llm/clients/anthropic/model_ids.py +8 -0
  20. mirascope/llm/clients/base/__init__.py +15 -0
  21. mirascope/llm/clients/base/_utils.py +192 -0
  22. mirascope/llm/clients/base/client.py +1256 -0
  23. mirascope/llm/clients/base/kwargs.py +12 -0
  24. mirascope/llm/clients/base/params.py +93 -0
  25. mirascope/llm/clients/google/__init__.py +6 -0
  26. mirascope/llm/clients/google/_utils/__init__.py +13 -0
  27. mirascope/llm/clients/google/_utils/decode.py +231 -0
  28. mirascope/llm/clients/google/_utils/encode.py +279 -0
  29. mirascope/llm/clients/google/clients.py +853 -0
  30. mirascope/llm/clients/google/message.py +7 -0
  31. mirascope/llm/clients/google/model_ids.py +15 -0
  32. mirascope/llm/clients/openai/__init__.py +25 -0
  33. mirascope/llm/clients/openai/completions/__init__.py +9 -0
  34. mirascope/llm/clients/openai/completions/_utils/__init__.py +13 -0
  35. mirascope/llm/clients/openai/completions/_utils/decode.py +187 -0
  36. mirascope/llm/clients/openai/completions/_utils/encode.py +358 -0
  37. mirascope/llm/clients/openai/completions/_utils/model_features.py +81 -0
  38. mirascope/llm/clients/openai/completions/clients.py +833 -0
  39. mirascope/llm/clients/openai/completions/model_ids.py +8 -0
  40. mirascope/llm/clients/openai/responses/__init__.py +9 -0
  41. mirascope/llm/clients/openai/responses/_utils/__init__.py +13 -0
  42. mirascope/llm/clients/openai/responses/_utils/decode.py +194 -0
  43. mirascope/llm/clients/openai/responses/_utils/encode.py +333 -0
  44. mirascope/llm/clients/openai/responses/_utils/model_features.py +87 -0
  45. mirascope/llm/clients/openai/responses/clients.py +832 -0
  46. mirascope/llm/clients/openai/responses/model_ids.py +8 -0
  47. mirascope/llm/clients/openai/shared/__init__.py +7 -0
  48. mirascope/llm/clients/openai/shared/_utils.py +55 -0
  49. mirascope/llm/clients/providers.py +175 -0
  50. mirascope/llm/content/__init__.py +70 -0
  51. mirascope/llm/content/audio.py +173 -0
  52. mirascope/llm/content/document.py +94 -0
  53. mirascope/llm/content/image.py +206 -0
  54. mirascope/llm/content/text.py +47 -0
  55. mirascope/llm/content/thought.py +58 -0
  56. mirascope/llm/content/tool_call.py +63 -0
  57. mirascope/llm/content/tool_output.py +26 -0
  58. mirascope/llm/context/__init__.py +6 -0
  59. mirascope/llm/context/_utils.py +28 -0
  60. mirascope/llm/context/context.py +24 -0
  61. mirascope/llm/exceptions.py +105 -0
  62. mirascope/llm/formatting/__init__.py +22 -0
  63. mirascope/llm/formatting/_utils.py +74 -0
  64. mirascope/llm/formatting/format.py +104 -0
  65. mirascope/llm/formatting/from_call_args.py +30 -0
  66. mirascope/llm/formatting/partial.py +58 -0
  67. mirascope/llm/formatting/types.py +109 -0
  68. mirascope/llm/mcp/__init__.py +5 -0
  69. mirascope/llm/mcp/client.py +118 -0
  70. mirascope/llm/messages/__init__.py +32 -0
  71. mirascope/llm/messages/message.py +182 -0
  72. mirascope/llm/models/__init__.py +16 -0
  73. mirascope/llm/models/models.py +1243 -0
  74. mirascope/llm/prompts/__init__.py +33 -0
  75. mirascope/llm/prompts/_utils.py +60 -0
  76. mirascope/llm/prompts/decorator.py +286 -0
  77. mirascope/llm/prompts/protocols.py +99 -0
  78. mirascope/llm/responses/__init__.py +57 -0
  79. mirascope/llm/responses/_utils.py +56 -0
  80. mirascope/llm/responses/base_response.py +91 -0
  81. mirascope/llm/responses/base_stream_response.py +697 -0
  82. mirascope/llm/responses/finish_reason.py +27 -0
  83. mirascope/llm/responses/response.py +345 -0
  84. mirascope/llm/responses/root_response.py +177 -0
  85. mirascope/llm/responses/stream_response.py +572 -0
  86. mirascope/llm/responses/streams.py +363 -0
  87. mirascope/llm/tools/__init__.py +40 -0
  88. mirascope/llm/tools/_utils.py +25 -0
  89. mirascope/llm/tools/decorator.py +175 -0
  90. mirascope/llm/tools/protocols.py +96 -0
  91. mirascope/llm/tools/tool_schema.py +246 -0
  92. mirascope/llm/tools/toolkit.py +152 -0
  93. mirascope/llm/tools/tools.py +169 -0
  94. mirascope/llm/types/__init__.py +22 -0
  95. mirascope/llm/types/dataclass.py +9 -0
  96. mirascope/llm/types/jsonable.py +44 -0
  97. mirascope/llm/types/type_vars.py +19 -0
  98. mirascope-2.0.0a0.dist-info/METADATA +117 -0
  99. mirascope-2.0.0a0.dist-info/RECORD +101 -0
  100. mirascope/beta/__init__.py +0 -3
  101. mirascope/beta/openai/__init__.py +0 -17
  102. mirascope/beta/openai/realtime/__init__.py +0 -13
  103. mirascope/beta/openai/realtime/_utils/__init__.py +0 -3
  104. mirascope/beta/openai/realtime/_utils/_audio.py +0 -74
  105. mirascope/beta/openai/realtime/_utils/_protocols.py +0 -50
  106. mirascope/beta/openai/realtime/realtime.py +0 -500
  107. mirascope/beta/openai/realtime/recording.py +0 -98
  108. mirascope/beta/openai/realtime/tool.py +0 -113
  109. mirascope/beta/rag/__init__.py +0 -24
  110. mirascope/beta/rag/base/__init__.py +0 -22
  111. mirascope/beta/rag/base/chunkers/__init__.py +0 -2
  112. mirascope/beta/rag/base/chunkers/base_chunker.py +0 -37
  113. mirascope/beta/rag/base/chunkers/text_chunker.py +0 -33
  114. mirascope/beta/rag/base/config.py +0 -8
  115. mirascope/beta/rag/base/document.py +0 -11
  116. mirascope/beta/rag/base/embedders.py +0 -35
  117. mirascope/beta/rag/base/embedding_params.py +0 -18
  118. mirascope/beta/rag/base/embedding_response.py +0 -30
  119. mirascope/beta/rag/base/query_results.py +0 -7
  120. mirascope/beta/rag/base/vectorstore_params.py +0 -18
  121. mirascope/beta/rag/base/vectorstores.py +0 -37
  122. mirascope/beta/rag/chroma/__init__.py +0 -11
  123. mirascope/beta/rag/chroma/types.py +0 -62
  124. mirascope/beta/rag/chroma/vectorstores.py +0 -121
  125. mirascope/beta/rag/cohere/__init__.py +0 -11
  126. mirascope/beta/rag/cohere/embedders.py +0 -87
  127. mirascope/beta/rag/cohere/embedding_params.py +0 -29
  128. mirascope/beta/rag/cohere/embedding_response.py +0 -29
  129. mirascope/beta/rag/cohere/py.typed +0 -0
  130. mirascope/beta/rag/openai/__init__.py +0 -11
  131. mirascope/beta/rag/openai/embedders.py +0 -144
  132. mirascope/beta/rag/openai/embedding_params.py +0 -18
  133. mirascope/beta/rag/openai/embedding_response.py +0 -14
  134. mirascope/beta/rag/openai/py.typed +0 -0
  135. mirascope/beta/rag/pinecone/__init__.py +0 -19
  136. mirascope/beta/rag/pinecone/types.py +0 -143
  137. mirascope/beta/rag/pinecone/vectorstores.py +0 -148
  138. mirascope/beta/rag/weaviate/__init__.py +0 -6
  139. mirascope/beta/rag/weaviate/types.py +0 -92
  140. mirascope/beta/rag/weaviate/vectorstores.py +0 -103
  141. mirascope/core/__init__.py +0 -109
  142. mirascope/core/anthropic/__init__.py +0 -31
  143. mirascope/core/anthropic/_call.py +0 -67
  144. mirascope/core/anthropic/_call_kwargs.py +0 -13
  145. mirascope/core/anthropic/_thinking.py +0 -70
  146. mirascope/core/anthropic/_utils/__init__.py +0 -16
  147. mirascope/core/anthropic/_utils/_convert_common_call_params.py +0 -25
  148. mirascope/core/anthropic/_utils/_convert_finish_reason_to_common_finish_reasons.py +0 -21
  149. mirascope/core/anthropic/_utils/_convert_message_params.py +0 -102
  150. mirascope/core/anthropic/_utils/_get_json_output.py +0 -31
  151. mirascope/core/anthropic/_utils/_handle_stream.py +0 -113
  152. mirascope/core/anthropic/_utils/_message_param_converter.py +0 -154
  153. mirascope/core/anthropic/_utils/_setup_call.py +0 -146
  154. mirascope/core/anthropic/call_params.py +0 -44
  155. mirascope/core/anthropic/call_response.py +0 -226
  156. mirascope/core/anthropic/call_response_chunk.py +0 -152
  157. mirascope/core/anthropic/dynamic_config.py +0 -40
  158. mirascope/core/anthropic/py.typed +0 -0
  159. mirascope/core/anthropic/stream.py +0 -204
  160. mirascope/core/anthropic/tool.py +0 -101
  161. mirascope/core/azure/__init__.py +0 -31
  162. mirascope/core/azure/_call.py +0 -67
  163. mirascope/core/azure/_call_kwargs.py +0 -13
  164. mirascope/core/azure/_utils/__init__.py +0 -14
  165. mirascope/core/azure/_utils/_convert_common_call_params.py +0 -26
  166. mirascope/core/azure/_utils/_convert_finish_reason_to_common_finish_reasons.py +0 -21
  167. mirascope/core/azure/_utils/_convert_message_params.py +0 -121
  168. mirascope/core/azure/_utils/_get_credential.py +0 -33
  169. mirascope/core/azure/_utils/_get_json_output.py +0 -27
  170. mirascope/core/azure/_utils/_handle_stream.py +0 -130
  171. mirascope/core/azure/_utils/_message_param_converter.py +0 -117
  172. mirascope/core/azure/_utils/_setup_call.py +0 -183
  173. mirascope/core/azure/call_params.py +0 -59
  174. mirascope/core/azure/call_response.py +0 -215
  175. mirascope/core/azure/call_response_chunk.py +0 -105
  176. mirascope/core/azure/dynamic_config.py +0 -30
  177. mirascope/core/azure/py.typed +0 -0
  178. mirascope/core/azure/stream.py +0 -147
  179. mirascope/core/azure/tool.py +0 -93
  180. mirascope/core/base/__init__.py +0 -86
  181. mirascope/core/base/_call_factory.py +0 -256
  182. mirascope/core/base/_create.py +0 -253
  183. mirascope/core/base/_extract.py +0 -175
  184. mirascope/core/base/_extract_with_tools.py +0 -189
  185. mirascope/core/base/_partial.py +0 -95
  186. mirascope/core/base/_utils/__init__.py +0 -92
  187. mirascope/core/base/_utils/_base_message_param_converter.py +0 -22
  188. mirascope/core/base/_utils/_base_type.py +0 -26
  189. mirascope/core/base/_utils/_convert_base_model_to_base_tool.py +0 -48
  190. mirascope/core/base/_utils/_convert_base_type_to_base_tool.py +0 -24
  191. mirascope/core/base/_utils/_convert_function_to_base_tool.py +0 -139
  192. mirascope/core/base/_utils/_convert_messages_to_message_params.py +0 -178
  193. mirascope/core/base/_utils/_convert_provider_finish_reason_to_finish_reason.py +0 -20
  194. mirascope/core/base/_utils/_default_tool_docstring.py +0 -6
  195. mirascope/core/base/_utils/_extract_tool_return.py +0 -42
  196. mirascope/core/base/_utils/_fn_is_async.py +0 -24
  197. mirascope/core/base/_utils/_format_template.py +0 -32
  198. mirascope/core/base/_utils/_get_audio_type.py +0 -18
  199. mirascope/core/base/_utils/_get_common_usage.py +0 -20
  200. mirascope/core/base/_utils/_get_create_fn_or_async_create_fn.py +0 -137
  201. mirascope/core/base/_utils/_get_document_type.py +0 -7
  202. mirascope/core/base/_utils/_get_dynamic_configuration.py +0 -69
  203. mirascope/core/base/_utils/_get_fields_from_call_args.py +0 -34
  204. mirascope/core/base/_utils/_get_fn_args.py +0 -23
  205. mirascope/core/base/_utils/_get_image_dimensions.py +0 -39
  206. mirascope/core/base/_utils/_get_image_type.py +0 -26
  207. mirascope/core/base/_utils/_get_metadata.py +0 -17
  208. mirascope/core/base/_utils/_get_possible_user_message_param.py +0 -21
  209. mirascope/core/base/_utils/_get_prompt_template.py +0 -28
  210. mirascope/core/base/_utils/_get_template_values.py +0 -51
  211. mirascope/core/base/_utils/_get_template_variables.py +0 -38
  212. mirascope/core/base/_utils/_get_unsupported_tool_config_keys.py +0 -10
  213. mirascope/core/base/_utils/_is_prompt_template.py +0 -24
  214. mirascope/core/base/_utils/_json_mode_content.py +0 -17
  215. mirascope/core/base/_utils/_messages_decorator.py +0 -121
  216. mirascope/core/base/_utils/_parse_content_template.py +0 -323
  217. mirascope/core/base/_utils/_parse_prompt_messages.py +0 -63
  218. mirascope/core/base/_utils/_pil_image_to_bytes.py +0 -13
  219. mirascope/core/base/_utils/_protocols.py +0 -901
  220. mirascope/core/base/_utils/_setup_call.py +0 -79
  221. mirascope/core/base/_utils/_setup_extract_tool.py +0 -30
  222. mirascope/core/base/call_kwargs.py +0 -13
  223. mirascope/core/base/call_params.py +0 -36
  224. mirascope/core/base/call_response.py +0 -338
  225. mirascope/core/base/call_response_chunk.py +0 -130
  226. mirascope/core/base/dynamic_config.py +0 -82
  227. mirascope/core/base/from_call_args.py +0 -30
  228. mirascope/core/base/merge_decorators.py +0 -59
  229. mirascope/core/base/message_param.py +0 -175
  230. mirascope/core/base/messages.py +0 -116
  231. mirascope/core/base/metadata.py +0 -13
  232. mirascope/core/base/prompt.py +0 -497
  233. mirascope/core/base/response_model_config_dict.py +0 -9
  234. mirascope/core/base/stream.py +0 -479
  235. mirascope/core/base/stream_config.py +0 -11
  236. mirascope/core/base/structured_stream.py +0 -296
  237. mirascope/core/base/tool.py +0 -214
  238. mirascope/core/base/toolkit.py +0 -176
  239. mirascope/core/base/types.py +0 -344
  240. mirascope/core/bedrock/__init__.py +0 -34
  241. mirascope/core/bedrock/_call.py +0 -68
  242. mirascope/core/bedrock/_call_kwargs.py +0 -12
  243. mirascope/core/bedrock/_types.py +0 -104
  244. mirascope/core/bedrock/_utils/__init__.py +0 -14
  245. mirascope/core/bedrock/_utils/_convert_common_call_params.py +0 -39
  246. mirascope/core/bedrock/_utils/_convert_finish_reason_to_common_finish_reasons.py +0 -23
  247. mirascope/core/bedrock/_utils/_convert_message_params.py +0 -111
  248. mirascope/core/bedrock/_utils/_get_json_output.py +0 -30
  249. mirascope/core/bedrock/_utils/_handle_stream.py +0 -104
  250. mirascope/core/bedrock/_utils/_message_param_converter.py +0 -172
  251. mirascope/core/bedrock/_utils/_setup_call.py +0 -258
  252. mirascope/core/bedrock/call_params.py +0 -38
  253. mirascope/core/bedrock/call_response.py +0 -248
  254. mirascope/core/bedrock/call_response_chunk.py +0 -111
  255. mirascope/core/bedrock/dynamic_config.py +0 -37
  256. mirascope/core/bedrock/py.typed +0 -0
  257. mirascope/core/bedrock/stream.py +0 -154
  258. mirascope/core/bedrock/tool.py +0 -100
  259. mirascope/core/cohere/__init__.py +0 -30
  260. mirascope/core/cohere/_call.py +0 -67
  261. mirascope/core/cohere/_call_kwargs.py +0 -11
  262. mirascope/core/cohere/_types.py +0 -20
  263. mirascope/core/cohere/_utils/__init__.py +0 -14
  264. mirascope/core/cohere/_utils/_convert_common_call_params.py +0 -26
  265. mirascope/core/cohere/_utils/_convert_finish_reason_to_common_finish_reasons.py +0 -24
  266. mirascope/core/cohere/_utils/_convert_message_params.py +0 -32
  267. mirascope/core/cohere/_utils/_get_json_output.py +0 -30
  268. mirascope/core/cohere/_utils/_handle_stream.py +0 -35
  269. mirascope/core/cohere/_utils/_message_param_converter.py +0 -54
  270. mirascope/core/cohere/_utils/_setup_call.py +0 -150
  271. mirascope/core/cohere/call_params.py +0 -62
  272. mirascope/core/cohere/call_response.py +0 -205
  273. mirascope/core/cohere/call_response_chunk.py +0 -125
  274. mirascope/core/cohere/dynamic_config.py +0 -32
  275. mirascope/core/cohere/py.typed +0 -0
  276. mirascope/core/cohere/stream.py +0 -113
  277. mirascope/core/cohere/tool.py +0 -93
  278. mirascope/core/costs/__init__.py +0 -5
  279. mirascope/core/costs/_anthropic_calculate_cost.py +0 -219
  280. mirascope/core/costs/_azure_calculate_cost.py +0 -11
  281. mirascope/core/costs/_bedrock_calculate_cost.py +0 -15
  282. mirascope/core/costs/_cohere_calculate_cost.py +0 -44
  283. mirascope/core/costs/_gemini_calculate_cost.py +0 -67
  284. mirascope/core/costs/_google_calculate_cost.py +0 -427
  285. mirascope/core/costs/_groq_calculate_cost.py +0 -156
  286. mirascope/core/costs/_litellm_calculate_cost.py +0 -11
  287. mirascope/core/costs/_mistral_calculate_cost.py +0 -64
  288. mirascope/core/costs/_openai_calculate_cost.py +0 -416
  289. mirascope/core/costs/_vertex_calculate_cost.py +0 -67
  290. mirascope/core/costs/_xai_calculate_cost.py +0 -104
  291. mirascope/core/costs/calculate_cost.py +0 -86
  292. mirascope/core/gemini/__init__.py +0 -40
  293. mirascope/core/gemini/_call.py +0 -67
  294. mirascope/core/gemini/_call_kwargs.py +0 -12
  295. mirascope/core/gemini/_utils/__init__.py +0 -14
  296. mirascope/core/gemini/_utils/_convert_common_call_params.py +0 -39
  297. mirascope/core/gemini/_utils/_convert_finish_reason_to_common_finish_reasons.py +0 -23
  298. mirascope/core/gemini/_utils/_convert_message_params.py +0 -156
  299. mirascope/core/gemini/_utils/_get_json_output.py +0 -35
  300. mirascope/core/gemini/_utils/_handle_stream.py +0 -33
  301. mirascope/core/gemini/_utils/_message_param_converter.py +0 -209
  302. mirascope/core/gemini/_utils/_setup_call.py +0 -149
  303. mirascope/core/gemini/call_params.py +0 -52
  304. mirascope/core/gemini/call_response.py +0 -216
  305. mirascope/core/gemini/call_response_chunk.py +0 -100
  306. mirascope/core/gemini/dynamic_config.py +0 -26
  307. mirascope/core/gemini/stream.py +0 -120
  308. mirascope/core/gemini/tool.py +0 -104
  309. mirascope/core/google/__init__.py +0 -29
  310. mirascope/core/google/_call.py +0 -67
  311. mirascope/core/google/_call_kwargs.py +0 -13
  312. mirascope/core/google/_utils/__init__.py +0 -14
  313. mirascope/core/google/_utils/_convert_common_call_params.py +0 -38
  314. mirascope/core/google/_utils/_convert_finish_reason_to_common_finish_reasons.py +0 -27
  315. mirascope/core/google/_utils/_convert_message_params.py +0 -297
  316. mirascope/core/google/_utils/_get_json_output.py +0 -37
  317. mirascope/core/google/_utils/_handle_stream.py +0 -58
  318. mirascope/core/google/_utils/_message_param_converter.py +0 -200
  319. mirascope/core/google/_utils/_setup_call.py +0 -201
  320. mirascope/core/google/_utils/_validate_media_type.py +0 -58
  321. mirascope/core/google/call_params.py +0 -22
  322. mirascope/core/google/call_response.py +0 -255
  323. mirascope/core/google/call_response_chunk.py +0 -135
  324. mirascope/core/google/dynamic_config.py +0 -26
  325. mirascope/core/google/stream.py +0 -199
  326. mirascope/core/google/tool.py +0 -146
  327. mirascope/core/groq/__init__.py +0 -30
  328. mirascope/core/groq/_call.py +0 -67
  329. mirascope/core/groq/_call_kwargs.py +0 -13
  330. mirascope/core/groq/_utils/__init__.py +0 -14
  331. mirascope/core/groq/_utils/_convert_common_call_params.py +0 -26
  332. mirascope/core/groq/_utils/_convert_message_params.py +0 -112
  333. mirascope/core/groq/_utils/_get_json_output.py +0 -27
  334. mirascope/core/groq/_utils/_handle_stream.py +0 -123
  335. mirascope/core/groq/_utils/_message_param_converter.py +0 -89
  336. mirascope/core/groq/_utils/_setup_call.py +0 -132
  337. mirascope/core/groq/call_params.py +0 -52
  338. mirascope/core/groq/call_response.py +0 -213
  339. mirascope/core/groq/call_response_chunk.py +0 -104
  340. mirascope/core/groq/dynamic_config.py +0 -29
  341. mirascope/core/groq/py.typed +0 -0
  342. mirascope/core/groq/stream.py +0 -135
  343. mirascope/core/groq/tool.py +0 -80
  344. mirascope/core/litellm/__init__.py +0 -28
  345. mirascope/core/litellm/_call.py +0 -67
  346. mirascope/core/litellm/_utils/__init__.py +0 -5
  347. mirascope/core/litellm/_utils/_setup_call.py +0 -109
  348. mirascope/core/litellm/call_params.py +0 -10
  349. mirascope/core/litellm/call_response.py +0 -24
  350. mirascope/core/litellm/call_response_chunk.py +0 -14
  351. mirascope/core/litellm/dynamic_config.py +0 -8
  352. mirascope/core/litellm/py.typed +0 -0
  353. mirascope/core/litellm/stream.py +0 -86
  354. mirascope/core/litellm/tool.py +0 -13
  355. mirascope/core/mistral/__init__.py +0 -36
  356. mirascope/core/mistral/_call.py +0 -65
  357. mirascope/core/mistral/_call_kwargs.py +0 -19
  358. mirascope/core/mistral/_utils/__init__.py +0 -14
  359. mirascope/core/mistral/_utils/_convert_common_call_params.py +0 -24
  360. mirascope/core/mistral/_utils/_convert_finish_reason_to_common_finish_reasons.py +0 -22
  361. mirascope/core/mistral/_utils/_convert_message_params.py +0 -122
  362. mirascope/core/mistral/_utils/_get_json_output.py +0 -34
  363. mirascope/core/mistral/_utils/_handle_stream.py +0 -139
  364. mirascope/core/mistral/_utils/_message_param_converter.py +0 -176
  365. mirascope/core/mistral/_utils/_setup_call.py +0 -164
  366. mirascope/core/mistral/call_params.py +0 -36
  367. mirascope/core/mistral/call_response.py +0 -205
  368. mirascope/core/mistral/call_response_chunk.py +0 -105
  369. mirascope/core/mistral/dynamic_config.py +0 -33
  370. mirascope/core/mistral/py.typed +0 -0
  371. mirascope/core/mistral/stream.py +0 -120
  372. mirascope/core/mistral/tool.py +0 -81
  373. mirascope/core/openai/__init__.py +0 -31
  374. mirascope/core/openai/_call.py +0 -67
  375. mirascope/core/openai/_call_kwargs.py +0 -13
  376. mirascope/core/openai/_utils/__init__.py +0 -14
  377. mirascope/core/openai/_utils/_convert_common_call_params.py +0 -26
  378. mirascope/core/openai/_utils/_convert_message_params.py +0 -148
  379. mirascope/core/openai/_utils/_get_json_output.py +0 -31
  380. mirascope/core/openai/_utils/_handle_stream.py +0 -138
  381. mirascope/core/openai/_utils/_message_param_converter.py +0 -105
  382. mirascope/core/openai/_utils/_setup_call.py +0 -155
  383. mirascope/core/openai/call_params.py +0 -92
  384. mirascope/core/openai/call_response.py +0 -273
  385. mirascope/core/openai/call_response_chunk.py +0 -139
  386. mirascope/core/openai/dynamic_config.py +0 -34
  387. mirascope/core/openai/py.typed +0 -0
  388. mirascope/core/openai/stream.py +0 -185
  389. mirascope/core/openai/tool.py +0 -101
  390. mirascope/core/py.typed +0 -0
  391. mirascope/core/vertex/__init__.py +0 -45
  392. mirascope/core/vertex/_call.py +0 -62
  393. mirascope/core/vertex/_call_kwargs.py +0 -12
  394. mirascope/core/vertex/_utils/__init__.py +0 -14
  395. mirascope/core/vertex/_utils/_convert_common_call_params.py +0 -37
  396. mirascope/core/vertex/_utils/_convert_finish_reason_to_common_finish_reasons.py +0 -23
  397. mirascope/core/vertex/_utils/_convert_message_params.py +0 -171
  398. mirascope/core/vertex/_utils/_get_json_output.py +0 -36
  399. mirascope/core/vertex/_utils/_handle_stream.py +0 -33
  400. mirascope/core/vertex/_utils/_message_param_converter.py +0 -133
  401. mirascope/core/vertex/_utils/_setup_call.py +0 -160
  402. mirascope/core/vertex/call_params.py +0 -24
  403. mirascope/core/vertex/call_response.py +0 -206
  404. mirascope/core/vertex/call_response_chunk.py +0 -99
  405. mirascope/core/vertex/dynamic_config.py +0 -28
  406. mirascope/core/vertex/stream.py +0 -119
  407. mirascope/core/vertex/tool.py +0 -101
  408. mirascope/core/xai/__init__.py +0 -28
  409. mirascope/core/xai/_call.py +0 -67
  410. mirascope/core/xai/_utils/__init__.py +0 -5
  411. mirascope/core/xai/_utils/_setup_call.py +0 -113
  412. mirascope/core/xai/call_params.py +0 -10
  413. mirascope/core/xai/call_response.py +0 -16
  414. mirascope/core/xai/call_response_chunk.py +0 -14
  415. mirascope/core/xai/dynamic_config.py +0 -8
  416. mirascope/core/xai/py.typed +0 -0
  417. mirascope/core/xai/stream.py +0 -57
  418. mirascope/core/xai/tool.py +0 -13
  419. mirascope/experimental/graphs/__init__.py +0 -5
  420. mirascope/integrations/__init__.py +0 -16
  421. mirascope/integrations/_middleware_factory.py +0 -403
  422. mirascope/integrations/langfuse/__init__.py +0 -3
  423. mirascope/integrations/langfuse/_utils.py +0 -114
  424. mirascope/integrations/langfuse/_with_langfuse.py +0 -70
  425. mirascope/integrations/logfire/__init__.py +0 -3
  426. mirascope/integrations/logfire/_utils.py +0 -225
  427. mirascope/integrations/logfire/_with_logfire.py +0 -63
  428. mirascope/integrations/otel/__init__.py +0 -10
  429. mirascope/integrations/otel/_utils.py +0 -270
  430. mirascope/integrations/otel/_with_hyperdx.py +0 -60
  431. mirascope/integrations/otel/_with_otel.py +0 -59
  432. mirascope/integrations/tenacity.py +0 -14
  433. mirascope/llm/_call.py +0 -401
  434. mirascope/llm/_context.py +0 -384
  435. mirascope/llm/_override.py +0 -3639
  436. mirascope/llm/_protocols.py +0 -500
  437. mirascope/llm/_response_metaclass.py +0 -31
  438. mirascope/llm/call_response.py +0 -158
  439. mirascope/llm/call_response_chunk.py +0 -66
  440. mirascope/llm/stream.py +0 -162
  441. mirascope/llm/tool.py +0 -64
  442. mirascope/mcp/__init__.py +0 -7
  443. mirascope/mcp/_utils.py +0 -288
  444. mirascope/mcp/client.py +0 -167
  445. mirascope/mcp/server.py +0 -356
  446. mirascope/mcp/tools.py +0 -110
  447. mirascope/py.typed +0 -0
  448. mirascope/retries/__init__.py +0 -11
  449. mirascope/retries/fallback.py +0 -131
  450. mirascope/retries/tenacity.py +0 -50
  451. mirascope/tools/__init__.py +0 -37
  452. mirascope/tools/base.py +0 -98
  453. mirascope/tools/system/__init__.py +0 -0
  454. mirascope/tools/system/_docker_operation.py +0 -166
  455. mirascope/tools/system/_file_system.py +0 -267
  456. mirascope/tools/web/__init__.py +0 -0
  457. mirascope/tools/web/_duckduckgo.py +0 -111
  458. mirascope/tools/web/_httpx.py +0 -125
  459. mirascope/tools/web/_parse_url_content.py +0 -94
  460. mirascope/tools/web/_requests.py +0 -54
  461. mirascope/v0/__init__.py +0 -43
  462. mirascope/v0/anthropic.py +0 -54
  463. mirascope/v0/base/__init__.py +0 -12
  464. mirascope/v0/base/calls.py +0 -118
  465. mirascope/v0/base/extractors.py +0 -122
  466. mirascope/v0/base/ops_utils.py +0 -207
  467. mirascope/v0/base/prompts.py +0 -48
  468. mirascope/v0/base/types.py +0 -14
  469. mirascope/v0/base/utils.py +0 -21
  470. mirascope/v0/openai.py +0 -54
  471. mirascope-1.25.7.dist-info/METADATA +0 -169
  472. mirascope-1.25.7.dist-info/RECORD +0 -378
  473. {mirascope-1.25.7.dist-info → mirascope-2.0.0a0.dist-info}/WHEEL +0 -0
  474. {mirascope-1.25.7.dist-info → mirascope-2.0.0a0.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,1256 @@
1
+ """Base abstract interface for provider clients."""
2
+
3
+ from __future__ import annotations
4
+
5
+ from abc import ABC, abstractmethod
6
+ from collections.abc import Sequence
7
+ from contextvars import ContextVar, Token
8
+ from types import TracebackType
9
+ from typing import Generic, overload
10
+ from typing_extensions import Self, TypeVar, Unpack
11
+
12
+ from ...context import Context, DepsT
13
+ from ...formatting import Format, FormattableT
14
+ from ...messages import Message, UserContent, user
15
+ from ...responses import (
16
+ AsyncContextResponse,
17
+ AsyncContextStreamResponse,
18
+ AsyncResponse,
19
+ AsyncStreamResponse,
20
+ ContextResponse,
21
+ ContextStreamResponse,
22
+ Response,
23
+ StreamResponse,
24
+ )
25
+ from ...tools import (
26
+ AsyncContextTool,
27
+ AsyncContextToolkit,
28
+ AsyncTool,
29
+ AsyncToolkit,
30
+ ContextTool,
31
+ ContextToolkit,
32
+ Tool,
33
+ Toolkit,
34
+ )
35
+ from .params import Params
36
+
37
+ ModelIdT = TypeVar("ModelIdT", bound=str)
38
+ ProviderClientT = TypeVar("ProviderClientT")
39
+
40
+ ClientT = TypeVar("ClientT", bound="BaseClient")
41
+ """Type variable for an LLM client."""
42
+
43
+
44
+ class BaseClient(Generic[ModelIdT, ProviderClientT], ABC):
45
+ """Base abstract client for provider-specific implementations.
46
+
47
+ This class defines explicit methods for each type of call, eliminating
48
+ the need for complex overloads in provider implementations.
49
+ """
50
+
51
+ client: ProviderClientT
52
+ _token: Token | None = None
53
+
54
+ @property
55
+ @abstractmethod
56
+ def _context_var(self) -> ContextVar:
57
+ """The ContextVar for this client type."""
58
+ ...
59
+
60
+ def __enter__(self) -> Self:
61
+ """Sets the client context and stores the token."""
62
+ self._token = self._context_var.set(self)
63
+ return self
64
+
65
+ def __exit__(
66
+ self,
67
+ exc_type: type[BaseException] | None,
68
+ exc_val: BaseException | None,
69
+ exc_tb: TracebackType | None,
70
+ ) -> None:
71
+ """Restores the client context to the token from the last setting."""
72
+ if self._token is not None:
73
+ self._context_var.reset(self._token)
74
+ self._token = None
75
+
76
+ @overload
77
+ @abstractmethod
78
+ def call(
79
+ self,
80
+ *,
81
+ model_id: ModelIdT,
82
+ messages: Sequence[Message],
83
+ tools: Sequence[Tool] | Toolkit | None = None,
84
+ format: None = None,
85
+ **params: Unpack[Params],
86
+ ) -> Response:
87
+ """Generate an `llm.Response` without a response format."""
88
+ ...
89
+
90
+ @overload
91
+ @abstractmethod
92
+ def call(
93
+ self,
94
+ *,
95
+ model_id: ModelIdT,
96
+ messages: Sequence[Message],
97
+ tools: Sequence[Tool] | Toolkit | None = None,
98
+ format: type[FormattableT] | Format[FormattableT],
99
+ **params: Unpack[Params],
100
+ ) -> Response[FormattableT]:
101
+ """Generate an `llm.Response` with a response format."""
102
+ ...
103
+
104
+ @overload
105
+ @abstractmethod
106
+ def call(
107
+ self,
108
+ *,
109
+ model_id: ModelIdT,
110
+ messages: Sequence[Message],
111
+ tools: Sequence[Tool] | Toolkit | None = None,
112
+ format: type[FormattableT] | Format[FormattableT] | None,
113
+ **params: Unpack[Params],
114
+ ) -> Response | Response[FormattableT]:
115
+ """Generate an `llm.Response` with an optional response format."""
116
+ ...
117
+
118
+ @abstractmethod
119
+ def call(
120
+ self,
121
+ *,
122
+ model_id: ModelIdT,
123
+ messages: Sequence[Message],
124
+ tools: Sequence[Tool] | Toolkit | None = None,
125
+ format: type[FormattableT] | Format[FormattableT] | None = None,
126
+ **params: Unpack[Params],
127
+ ) -> Response | Response[FormattableT]:
128
+ """Generate an `llm.Response` by synchronously calling this client's LLM provider.
129
+
130
+ Args:
131
+ model_id: Model identifier to use.
132
+ messages: Messages to send to the LLM.
133
+ tools: Optional tools that the model may invoke.
134
+ format: Optional response format specifier.
135
+ **params: Additional parameters to configure output (e.g. temperature). See `llm.Params`.
136
+
137
+ Returns:
138
+ An `llm.Response` object containing the LLM-generated content.
139
+ """
140
+ ...
141
+
142
+ @overload
143
+ @abstractmethod
144
+ def context_call(
145
+ self,
146
+ *,
147
+ ctx: Context[DepsT],
148
+ model_id: ModelIdT,
149
+ messages: Sequence[Message],
150
+ tools: Sequence[Tool | ContextTool[DepsT]]
151
+ | ContextToolkit[DepsT]
152
+ | None = None,
153
+ format: None = None,
154
+ **params: Unpack[Params],
155
+ ) -> ContextResponse[DepsT, None]:
156
+ """Generate an `llm.ContextResponse` without a response format."""
157
+ ...
158
+
159
+ @overload
160
+ @abstractmethod
161
+ def context_call(
162
+ self,
163
+ *,
164
+ ctx: Context[DepsT],
165
+ model_id: ModelIdT,
166
+ messages: Sequence[Message],
167
+ tools: Sequence[Tool | ContextTool[DepsT]]
168
+ | ContextToolkit[DepsT]
169
+ | None = None,
170
+ format: type[FormattableT] | Format[FormattableT],
171
+ **params: Unpack[Params],
172
+ ) -> ContextResponse[DepsT, FormattableT]:
173
+ """Generate an `llm.ContextResponse` with a response format."""
174
+ ...
175
+
176
+ @overload
177
+ @abstractmethod
178
+ def context_call(
179
+ self,
180
+ *,
181
+ ctx: Context[DepsT],
182
+ model_id: ModelIdT,
183
+ messages: Sequence[Message],
184
+ tools: Sequence[Tool | ContextTool[DepsT]]
185
+ | ContextToolkit[DepsT]
186
+ | None = None,
187
+ format: type[FormattableT] | Format[FormattableT] | None,
188
+ **params: Unpack[Params],
189
+ ) -> ContextResponse[DepsT, None] | ContextResponse[DepsT, FormattableT]:
190
+ """Generate an `llm.ContextResponse` with an optional response format."""
191
+ ...
192
+
193
+ @abstractmethod
194
+ def context_call(
195
+ self,
196
+ *,
197
+ ctx: Context[DepsT],
198
+ model_id: ModelIdT,
199
+ messages: Sequence[Message],
200
+ tools: Sequence[Tool | ContextTool[DepsT]]
201
+ | ContextToolkit[DepsT]
202
+ | None = None,
203
+ format: type[FormattableT] | Format[FormattableT] | None = None,
204
+ **params: Unpack[Params],
205
+ ) -> ContextResponse[DepsT, None] | ContextResponse[DepsT, FormattableT]:
206
+ """Generate an `llm.ContextResponse` by synchronously calling this client's LLM provider.
207
+
208
+ Args:
209
+ ctx: Context object with dependencies for tools.
210
+ model_id: Model identifier to use.
211
+ messages: Messages to send to the LLM.
212
+ tools: Optional tools that the model may invoke.
213
+ format: Optional response format specifier.
214
+ **params: Additional parameters to configure output (e.g. temperature). See `llm.Params`.
215
+
216
+ Returns:
217
+ An `llm.ContextResponse` object containing the LLM-generated content.
218
+ """
219
+ ...
220
+
221
+ @overload
222
+ @abstractmethod
223
+ async def call_async(
224
+ self,
225
+ *,
226
+ model_id: ModelIdT,
227
+ messages: Sequence[Message],
228
+ tools: Sequence[AsyncTool] | AsyncToolkit | None = None,
229
+ format: None = None,
230
+ **params: Unpack[Params],
231
+ ) -> AsyncResponse:
232
+ """Generate an `llm.AsyncResponse` without a response format."""
233
+ ...
234
+
235
+ @overload
236
+ @abstractmethod
237
+ async def call_async(
238
+ self,
239
+ *,
240
+ model_id: ModelIdT,
241
+ messages: Sequence[Message],
242
+ tools: Sequence[AsyncTool] | AsyncToolkit | None = None,
243
+ format: type[FormattableT] | Format[FormattableT],
244
+ **params: Unpack[Params],
245
+ ) -> AsyncResponse[FormattableT]:
246
+ """Generate an `llm.AsyncResponse` with a response format."""
247
+ ...
248
+
249
+ @overload
250
+ @abstractmethod
251
+ async def call_async(
252
+ self,
253
+ *,
254
+ model_id: ModelIdT,
255
+ messages: Sequence[Message],
256
+ tools: Sequence[AsyncTool] | AsyncToolkit | None = None,
257
+ format: type[FormattableT] | Format[FormattableT] | None,
258
+ **params: Unpack[Params],
259
+ ) -> AsyncResponse | AsyncResponse[FormattableT]:
260
+ """Generate an `llm.AsyncResponse` with an optional response format."""
261
+ ...
262
+
263
+ @abstractmethod
264
+ async def call_async(
265
+ self,
266
+ *,
267
+ model_id: ModelIdT,
268
+ messages: Sequence[Message],
269
+ tools: Sequence[AsyncTool] | AsyncToolkit | None = None,
270
+ format: type[FormattableT] | Format[FormattableT] | None = None,
271
+ **params: Unpack[Params],
272
+ ) -> AsyncResponse | AsyncResponse[FormattableT]:
273
+ """Generate an `llm.AsyncResponse` by asynchronously calling this client's LLM provider.
274
+
275
+ Args:
276
+ model_id: Model identifier to use.
277
+ messages: Messages to send to the LLM.
278
+ tools: Optional tools that the model may invoke.
279
+ format: Optional response format specifier.
280
+ **params: Additional parameters to configure output (e.g. temperature). See `llm.Params`.
281
+
282
+ Returns:
283
+ An `llm.AsyncResponse` object containing the LLM-generated content.
284
+ """
285
+ ...
286
+
287
+ @overload
288
+ @abstractmethod
289
+ async def context_call_async(
290
+ self,
291
+ *,
292
+ ctx: Context[DepsT],
293
+ model_id: ModelIdT,
294
+ messages: Sequence[Message],
295
+ tools: Sequence[AsyncTool | AsyncContextTool[DepsT]]
296
+ | AsyncContextToolkit[DepsT]
297
+ | None = None,
298
+ format: None = None,
299
+ **params: Unpack[Params],
300
+ ) -> AsyncContextResponse[DepsT, None]:
301
+ """Generate an `llm.AsyncContextResponse` without a response format."""
302
+ ...
303
+
304
+ @overload
305
+ @abstractmethod
306
+ async def context_call_async(
307
+ self,
308
+ *,
309
+ ctx: Context[DepsT],
310
+ model_id: ModelIdT,
311
+ messages: Sequence[Message],
312
+ tools: Sequence[AsyncTool | AsyncContextTool[DepsT]]
313
+ | AsyncContextToolkit[DepsT]
314
+ | None = None,
315
+ format: type[FormattableT] | Format[FormattableT],
316
+ **params: Unpack[Params],
317
+ ) -> AsyncContextResponse[DepsT, FormattableT]:
318
+ """Generate an `llm.AsyncContextResponse` with a response format."""
319
+ ...
320
+
321
+ @overload
322
+ @abstractmethod
323
+ async def context_call_async(
324
+ self,
325
+ *,
326
+ ctx: Context[DepsT],
327
+ model_id: ModelIdT,
328
+ messages: Sequence[Message],
329
+ tools: Sequence[AsyncTool | AsyncContextTool[DepsT]]
330
+ | AsyncContextToolkit[DepsT]
331
+ | None = None,
332
+ format: type[FormattableT] | Format[FormattableT] | None,
333
+ **params: Unpack[Params],
334
+ ) -> AsyncContextResponse[DepsT, None] | AsyncContextResponse[DepsT, FormattableT]:
335
+ """Generate an `llm.AsyncContextResponse` with an optional response format."""
336
+ ...
337
+
338
+ @abstractmethod
339
+ async def context_call_async(
340
+ self,
341
+ *,
342
+ ctx: Context[DepsT],
343
+ model_id: ModelIdT,
344
+ messages: Sequence[Message],
345
+ tools: Sequence[AsyncTool | AsyncContextTool[DepsT]]
346
+ | AsyncContextToolkit[DepsT]
347
+ | None = None,
348
+ format: type[FormattableT] | Format[FormattableT] | None = None,
349
+ **params: Unpack[Params],
350
+ ) -> AsyncContextResponse[DepsT, None] | AsyncContextResponse[DepsT, FormattableT]:
351
+ """Generate an `llm.AsyncContextResponse` by asynchronously calling this client's LLM provider.
352
+
353
+ Args:
354
+ ctx: Context object with dependencies for tools.
355
+ model_id: Model identifier to use.
356
+ messages: Messages to send to the LLM.
357
+ tools: Optional tools that the model may invoke.
358
+ format: Optional response format specifier.
359
+ **params: Additional parameters to configure output (e.g. temperature). See `llm.Params`.
360
+
361
+ Returns:
362
+ An `llm.AsyncContextResponse` object containing the LLM-generated content.
363
+ """
364
+ ...
365
+
366
+ @overload
367
+ @abstractmethod
368
+ def stream(
369
+ self,
370
+ *,
371
+ model_id: ModelIdT,
372
+ messages: Sequence[Message],
373
+ tools: Sequence[Tool] | Toolkit | None = None,
374
+ format: None = None,
375
+ **params: Unpack[Params],
376
+ ) -> StreamResponse:
377
+ """Stream an `llm.StreamResponse` without a response format."""
378
+ ...
379
+
380
+ @overload
381
+ @abstractmethod
382
+ def stream(
383
+ self,
384
+ *,
385
+ model_id: ModelIdT,
386
+ messages: Sequence[Message],
387
+ tools: Sequence[Tool] | Toolkit | None = None,
388
+ format: type[FormattableT] | Format[FormattableT],
389
+ **params: Unpack[Params],
390
+ ) -> StreamResponse[FormattableT]:
391
+ """Stream an `llm.StreamResponse` with a response format."""
392
+ ...
393
+
394
+ @overload
395
+ @abstractmethod
396
+ def stream(
397
+ self,
398
+ *,
399
+ model_id: ModelIdT,
400
+ messages: Sequence[Message],
401
+ tools: Sequence[Tool] | Toolkit | None = None,
402
+ format: type[FormattableT] | Format[FormattableT] | None,
403
+ **params: Unpack[Params],
404
+ ) -> StreamResponse | StreamResponse[FormattableT]:
405
+ """Stream an `llm.StreamResponse` with an optional response format."""
406
+ ...
407
+
408
+ @abstractmethod
409
+ def stream(
410
+ self,
411
+ *,
412
+ model_id: ModelIdT,
413
+ messages: Sequence[Message],
414
+ tools: Sequence[Tool] | Toolkit | None = None,
415
+ format: type[FormattableT] | Format[FormattableT] | None = None,
416
+ **params: Unpack[Params],
417
+ ) -> StreamResponse | StreamResponse[FormattableT]:
418
+ """Generate an `llm.StreamResponse` by synchronously streaming from this client's LLM provider.
419
+
420
+ Args:
421
+ model_id: Model identifier to use.
422
+ messages: Messages to send to the LLM.
423
+ tools: Optional tools that the model may invoke.
424
+ format: Optional response format specifier.
425
+ **params: Additional parameters to configure output (e.g. temperature). See `llm.Params`.
426
+
427
+ Returns:
428
+ An `llm.StreamResponse` object for iterating over the LLM-generated content.
429
+ """
430
+ ...
431
+
432
+ @overload
433
+ @abstractmethod
434
+ def context_stream(
435
+ self,
436
+ *,
437
+ ctx: Context[DepsT],
438
+ model_id: ModelIdT,
439
+ messages: Sequence[Message],
440
+ tools: Sequence[Tool | ContextTool[DepsT]]
441
+ | ContextToolkit[DepsT]
442
+ | None = None,
443
+ format: None = None,
444
+ **params: Unpack[Params],
445
+ ) -> ContextStreamResponse[DepsT, None]:
446
+ """Stream an `llm.ContextStreamResponse` without a response format."""
447
+ ...
448
+
449
+ @overload
450
+ @abstractmethod
451
+ def context_stream(
452
+ self,
453
+ *,
454
+ ctx: Context[DepsT],
455
+ model_id: ModelIdT,
456
+ messages: Sequence[Message],
457
+ tools: Sequence[Tool | ContextTool[DepsT]]
458
+ | ContextToolkit[DepsT]
459
+ | None = None,
460
+ format: type[FormattableT] | Format[FormattableT],
461
+ **params: Unpack[Params],
462
+ ) -> ContextStreamResponse[DepsT, FormattableT]:
463
+ """Stream an `llm.ContextStreamResponse` with a response format."""
464
+ ...
465
+
466
+ @overload
467
+ @abstractmethod
468
+ def context_stream(
469
+ self,
470
+ *,
471
+ ctx: Context[DepsT],
472
+ model_id: ModelIdT,
473
+ messages: Sequence[Message],
474
+ tools: Sequence[Tool | ContextTool[DepsT]]
475
+ | ContextToolkit[DepsT]
476
+ | None = None,
477
+ format: type[FormattableT] | Format[FormattableT] | None,
478
+ **params: Unpack[Params],
479
+ ) -> (
480
+ ContextStreamResponse[DepsT, None] | ContextStreamResponse[DepsT, FormattableT]
481
+ ):
482
+ """Stream an `llm.ContextStreamResponse` with an optional response format."""
483
+ ...
484
+
485
+ @abstractmethod
486
+ def context_stream(
487
+ self,
488
+ *,
489
+ ctx: Context[DepsT],
490
+ model_id: ModelIdT,
491
+ messages: Sequence[Message],
492
+ tools: Sequence[Tool | ContextTool[DepsT]]
493
+ | ContextToolkit[DepsT]
494
+ | None = None,
495
+ format: type[FormattableT] | Format[FormattableT] | None = None,
496
+ **params: Unpack[Params],
497
+ ) -> (
498
+ ContextStreamResponse[DepsT, None] | ContextStreamResponse[DepsT, FormattableT]
499
+ ):
500
+ """Generate an `llm.ContextStreamResponse` by synchronously streaming from this client's LLM provider.
501
+
502
+ Args:
503
+ ctx: Context object with dependencies for tools.
504
+ model_id: Model identifier to use.
505
+ messages: Messages to send to the LLM.
506
+ tools: Optional tools that the model may invoke.
507
+ format: Optional response format specifier.
508
+ **params: Additional parameters to configure output (e.g. temperature). See `llm.Params`.
509
+
510
+ Returns:
511
+ An `llm.ContextStreamResponse` object for iterating over the LLM-generated content.
512
+ """
513
+ ...
514
+
515
+ @overload
516
+ @abstractmethod
517
+ async def stream_async(
518
+ self,
519
+ *,
520
+ model_id: ModelIdT,
521
+ messages: Sequence[Message],
522
+ tools: Sequence[AsyncTool] | AsyncToolkit | None = None,
523
+ format: None = None,
524
+ **params: Unpack[Params],
525
+ ) -> AsyncStreamResponse:
526
+ """Stream an `llm.AsyncStreamResponse` without a response format."""
527
+ ...
528
+
529
+ @overload
530
+ @abstractmethod
531
+ async def stream_async(
532
+ self,
533
+ *,
534
+ model_id: ModelIdT,
535
+ messages: Sequence[Message],
536
+ tools: Sequence[AsyncTool] | AsyncToolkit | None = None,
537
+ format: type[FormattableT] | Format[FormattableT],
538
+ **params: Unpack[Params],
539
+ ) -> AsyncStreamResponse[FormattableT]:
540
+ """Stream an `llm.AsyncStreamResponse` with a response format."""
541
+ ...
542
+
543
+ @overload
544
+ @abstractmethod
545
+ async def stream_async(
546
+ self,
547
+ *,
548
+ model_id: ModelIdT,
549
+ messages: Sequence[Message],
550
+ tools: Sequence[AsyncTool] | AsyncToolkit | None = None,
551
+ format: type[FormattableT] | Format[FormattableT] | None,
552
+ **params: Unpack[Params],
553
+ ) -> AsyncStreamResponse | AsyncStreamResponse[FormattableT]:
554
+ """Stream an `llm.AsyncStreamResponse` with an optional response format."""
555
+ ...
556
+
557
+ @abstractmethod
558
+ async def stream_async(
559
+ self,
560
+ *,
561
+ model_id: ModelIdT,
562
+ messages: Sequence[Message],
563
+ tools: Sequence[AsyncTool] | AsyncToolkit | None = None,
564
+ format: type[FormattableT] | Format[FormattableT] | None = None,
565
+ **params: Unpack[Params],
566
+ ) -> AsyncStreamResponse | AsyncStreamResponse[FormattableT]:
567
+ """Generate an `llm.AsyncStreamResponse` by asynchronously streaming from this client's LLM provider.
568
+
569
+ Args:
570
+ model_id: Model identifier to use.
571
+ messages: Messages to send to the LLM.
572
+ tools: Optional tools that the model may invoke.
573
+ format: Optional response format specifier.
574
+ **params: Additional parameters to configure output (e.g. temperature). See `llm.Params`.
575
+
576
+ Returns:
577
+ An `llm.AsyncStreamResponse` object for asynchronously iterating over the LLM-generated content.
578
+ """
579
+ ...
580
+
581
+ @overload
582
+ @abstractmethod
583
+ async def context_stream_async(
584
+ self,
585
+ *,
586
+ ctx: Context[DepsT],
587
+ model_id: ModelIdT,
588
+ messages: Sequence[Message],
589
+ tools: Sequence[AsyncTool | AsyncContextTool[DepsT]]
590
+ | AsyncContextToolkit[DepsT]
591
+ | None = None,
592
+ format: None = None,
593
+ **params: Unpack[Params],
594
+ ) -> AsyncContextStreamResponse[DepsT, None]:
595
+ """Stream an `llm.AsyncContextStreamResponse` without a response format."""
596
+ ...
597
+
598
+ @overload
599
+ @abstractmethod
600
+ async def context_stream_async(
601
+ self,
602
+ *,
603
+ ctx: Context[DepsT],
604
+ model_id: ModelIdT,
605
+ messages: Sequence[Message],
606
+ tools: Sequence[AsyncTool | AsyncContextTool[DepsT]]
607
+ | AsyncContextToolkit[DepsT]
608
+ | None = None,
609
+ format: type[FormattableT] | Format[FormattableT],
610
+ **params: Unpack[Params],
611
+ ) -> AsyncContextStreamResponse[DepsT, FormattableT]:
612
+ """Stream an `llm.AsyncContextStreamResponse` with a response format."""
613
+ ...
614
+
615
+ @overload
616
+ @abstractmethod
617
+ async def context_stream_async(
618
+ self,
619
+ *,
620
+ ctx: Context[DepsT],
621
+ model_id: ModelIdT,
622
+ messages: Sequence[Message],
623
+ tools: Sequence[AsyncTool | AsyncContextTool[DepsT]]
624
+ | AsyncContextToolkit[DepsT]
625
+ | None = None,
626
+ format: type[FormattableT] | Format[FormattableT] | None,
627
+ **params: Unpack[Params],
628
+ ) -> (
629
+ AsyncContextStreamResponse[DepsT, None]
630
+ | AsyncContextStreamResponse[DepsT, FormattableT]
631
+ ):
632
+ """Stream an `llm.AsyncContextStreamResponse` with an optional response format."""
633
+ ...
634
+
635
+ @abstractmethod
636
+ async def context_stream_async(
637
+ self,
638
+ *,
639
+ ctx: Context[DepsT],
640
+ model_id: ModelIdT,
641
+ messages: Sequence[Message],
642
+ tools: Sequence[AsyncTool | AsyncContextTool[DepsT]]
643
+ | AsyncContextToolkit[DepsT]
644
+ | None = None,
645
+ format: type[FormattableT] | Format[FormattableT] | None = None,
646
+ **params: Unpack[Params],
647
+ ) -> (
648
+ AsyncContextStreamResponse[DepsT, None]
649
+ | AsyncContextStreamResponse[DepsT, FormattableT]
650
+ ):
651
+ """Generate an `llm.AsyncContextStreamResponse` by asynchronously streaming from this client's LLM provider.
652
+
653
+ Args:
654
+ ctx: Context object with dependencies for tools.
655
+ model_id: Model identifier to use.
656
+ messages: Messages to send to the LLM.
657
+ tools: Optional tools that the model may invoke.
658
+ format: Optional response format specifier.
659
+ **params: Additional parameters to configure output (e.g. temperature). See `llm.Params`.
660
+
661
+ Returns:
662
+ An `llm.AsyncContextStreamResponse` object for asynchronously iterating over the LLM-generated content.
663
+ """
664
+ ...
665
+
666
+ @overload
667
+ def resume(
668
+ self,
669
+ *,
670
+ model_id: ModelIdT,
671
+ response: Response,
672
+ content: UserContent,
673
+ **params: Unpack[Params],
674
+ ) -> Response:
675
+ """Resume an `llm.Response` without a response format."""
676
+ ...
677
+
678
+ @overload
679
+ def resume(
680
+ self,
681
+ *,
682
+ model_id: ModelIdT,
683
+ response: Response[FormattableT],
684
+ content: UserContent,
685
+ **params: Unpack[Params],
686
+ ) -> Response[FormattableT]:
687
+ """Resume an `llm.Response` with a response format."""
688
+ ...
689
+
690
+ @overload
691
+ def resume(
692
+ self,
693
+ *,
694
+ model_id: ModelIdT,
695
+ response: Response | Response[FormattableT],
696
+ content: UserContent,
697
+ **params: Unpack[Params],
698
+ ) -> Response | Response[FormattableT]:
699
+ """Resume an `llm.Response` with an optional response format."""
700
+ ...
701
+
702
+ def resume(
703
+ self,
704
+ *,
705
+ model_id: ModelIdT,
706
+ response: Response | Response[FormattableT],
707
+ content: UserContent,
708
+ **params: Unpack[Params],
709
+ ) -> Response | Response[FormattableT]:
710
+ """Generate a new `llm.Response` by extending another response's messages with additional user content.
711
+
712
+ Args:
713
+ model_id: Model identifier to use.
714
+ response: Previous response to extend.
715
+ content: Additional user content to append.
716
+ **params: Additional parameters to configure output (e.g. temperature). See `llm.Params`.
717
+
718
+ Returns:
719
+ A new `llm.Response` object containing the extended conversation.
720
+
721
+ Note:
722
+ Uses the previous response's tools and output format. This base method wraps
723
+ around calling `client.call()` with a messages array derived from the response
724
+ messages. However, clients may override this with first-class resume logic.
725
+ """
726
+ messages = response.messages + [user(content)]
727
+ return self.call(
728
+ model_id=model_id,
729
+ messages=messages,
730
+ tools=response.toolkit,
731
+ format=response.format,
732
+ **params,
733
+ )
734
+
735
+ @overload
736
+ async def resume_async(
737
+ self,
738
+ *,
739
+ model_id: ModelIdT,
740
+ response: AsyncResponse,
741
+ content: UserContent,
742
+ **params: Unpack[Params],
743
+ ) -> AsyncResponse:
744
+ """Resume an `llm.AsyncResponse` without a response format."""
745
+ ...
746
+
747
+ @overload
748
+ async def resume_async(
749
+ self,
750
+ *,
751
+ model_id: ModelIdT,
752
+ response: AsyncResponse[FormattableT],
753
+ content: UserContent,
754
+ **params: Unpack[Params],
755
+ ) -> AsyncResponse[FormattableT]:
756
+ """Resume an `llm.AsyncResponse` with a response format."""
757
+ ...
758
+
759
+ @overload
760
+ async def resume_async(
761
+ self,
762
+ *,
763
+ model_id: ModelIdT,
764
+ response: AsyncResponse | AsyncResponse[FormattableT],
765
+ content: UserContent,
766
+ **params: Unpack[Params],
767
+ ) -> AsyncResponse | AsyncResponse[FormattableT]:
768
+ """Resume an `llm.AsyncResponse` with an optional response format."""
769
+ ...
770
+
771
+ async def resume_async(
772
+ self,
773
+ *,
774
+ model_id: ModelIdT,
775
+ response: AsyncResponse | AsyncResponse[FormattableT],
776
+ content: UserContent,
777
+ **params: Unpack[Params],
778
+ ) -> AsyncResponse | AsyncResponse[FormattableT]:
779
+ """Generate a new `llm.AsyncResponse` by extending another response's messages with additional user content.
780
+
781
+ Args:
782
+ model_id: Model identifier to use.
783
+ response: Previous async response to extend.
784
+ content: Additional user content to append.
785
+ **params: Additional parameters to configure output (e.g. temperature). See `llm.Params`.
786
+
787
+ Returns:
788
+ A new `llm.AsyncResponse` object containing the extended conversation.
789
+
790
+ Note:
791
+ Uses the previous response's tools and output format. This base method wraps
792
+ around calling `client.call_async()` with a messages array derived from the response
793
+ messages. However, clients may override this with first-class resume logic.
794
+ """
795
+ messages = response.messages + [user(content)]
796
+ return await self.call_async(
797
+ model_id=model_id,
798
+ messages=messages,
799
+ tools=response.toolkit,
800
+ format=response.format,
801
+ **params,
802
+ )
803
+
804
+ @overload
805
+ def context_resume(
806
+ self,
807
+ *,
808
+ ctx: Context[DepsT],
809
+ model_id: ModelIdT,
810
+ response: ContextResponse[DepsT, None],
811
+ content: UserContent,
812
+ **params: Unpack[Params],
813
+ ) -> ContextResponse[DepsT, None]:
814
+ """Resume an `llm.ContextResponse` without a response format."""
815
+ ...
816
+
817
+ @overload
818
+ def context_resume(
819
+ self,
820
+ *,
821
+ ctx: Context[DepsT],
822
+ model_id: ModelIdT,
823
+ response: ContextResponse[DepsT, FormattableT],
824
+ content: UserContent,
825
+ **params: Unpack[Params],
826
+ ) -> ContextResponse[DepsT, FormattableT]:
827
+ """Resume an `llm.ContextResponse` with a response format."""
828
+ ...
829
+
830
+ @overload
831
+ def context_resume(
832
+ self,
833
+ *,
834
+ ctx: Context[DepsT],
835
+ model_id: ModelIdT,
836
+ response: ContextResponse[DepsT, None] | ContextResponse[DepsT, FormattableT],
837
+ content: UserContent,
838
+ **params: Unpack[Params],
839
+ ) -> ContextResponse[DepsT, None] | ContextResponse[DepsT, FormattableT]:
840
+ """Resume an `llm.ContextResponse` with an optional response format."""
841
+ ...
842
+
843
+ def context_resume(
844
+ self,
845
+ *,
846
+ ctx: Context[DepsT],
847
+ model_id: ModelIdT,
848
+ response: ContextResponse[DepsT, None] | ContextResponse[DepsT, FormattableT],
849
+ content: UserContent,
850
+ **params: Unpack[Params],
851
+ ) -> ContextResponse[DepsT, None] | ContextResponse[DepsT, FormattableT]:
852
+ """Generate a new `llm.ContextResponse` by extending another response's messages with additional user content.
853
+
854
+ Args:
855
+ ctx: Context object with dependencies for tools.
856
+ model_id: Model identifier to use.
857
+ response: Previous context response to extend.
858
+ content: Additional user content to append.
859
+ **params: Additional parameters to configure output (e.g. temperature). See `llm.Params`.
860
+
861
+ Returns:
862
+ A new `llm.ContextResponse` object containing the extended conversation.
863
+
864
+ Note:
865
+ Uses the previous response's tools and output format. This base method wraps
866
+ around calling `client.context_call()` with a messages array derived from the response
867
+ messages. However, clients may override this with first-class resume logic.
868
+ """
869
+ messages = response.messages + [user(content)]
870
+ return self.context_call(
871
+ ctx=ctx,
872
+ model_id=model_id,
873
+ messages=messages,
874
+ tools=response.toolkit,
875
+ format=response.format,
876
+ **params,
877
+ )
878
+
879
+ @overload
880
+ async def context_resume_async(
881
+ self,
882
+ *,
883
+ ctx: Context[DepsT],
884
+ model_id: ModelIdT,
885
+ response: AsyncContextResponse[DepsT, None],
886
+ content: UserContent,
887
+ **params: Unpack[Params],
888
+ ) -> AsyncContextResponse[DepsT, None]:
889
+ """Resume an `llm.AsyncContextResponse` without a response format."""
890
+ ...
891
+
892
+ @overload
893
+ async def context_resume_async(
894
+ self,
895
+ *,
896
+ ctx: Context[DepsT],
897
+ model_id: ModelIdT,
898
+ response: AsyncContextResponse[DepsT, FormattableT],
899
+ content: UserContent,
900
+ **params: Unpack[Params],
901
+ ) -> AsyncContextResponse[DepsT, FormattableT]:
902
+ """Resume an `llm.AsyncContextResponse` with a response format."""
903
+ ...
904
+
905
+ @overload
906
+ async def context_resume_async(
907
+ self,
908
+ *,
909
+ ctx: Context[DepsT],
910
+ model_id: ModelIdT,
911
+ response: AsyncContextResponse[DepsT, None]
912
+ | AsyncContextResponse[DepsT, FormattableT],
913
+ content: UserContent,
914
+ **params: Unpack[Params],
915
+ ) -> AsyncContextResponse[DepsT, None] | AsyncContextResponse[DepsT, FormattableT]:
916
+ """Resume an `llm.AsyncContextResponse` with an optional response format."""
917
+ ...
918
+
919
+ async def context_resume_async(
920
+ self,
921
+ *,
922
+ ctx: Context[DepsT],
923
+ model_id: ModelIdT,
924
+ response: AsyncContextResponse[DepsT, None]
925
+ | AsyncContextResponse[DepsT, FormattableT],
926
+ content: UserContent,
927
+ **params: Unpack[Params],
928
+ ) -> AsyncContextResponse[DepsT, None] | AsyncContextResponse[DepsT, FormattableT]:
929
+ """Generate a new `llm.AsyncContextResponse` by extending another response's messages with additional user content.
930
+
931
+ Args:
932
+ ctx: Context object with dependencies for tools.
933
+ model_id: Model identifier to use.
934
+ response: Previous async context response to extend.
935
+ content: Additional user content to append.
936
+ **params: Additional parameters to configure output (e.g. temperature). See `llm.Params`.
937
+
938
+ Returns:
939
+ A new `llm.AsyncContextResponse` object containing the extended conversation.
940
+
941
+ Note:
942
+ Uses the previous response's tools and output format. This base method wraps
943
+ around calling `client.context_call_async()` with a messages array derived from the response
944
+ messages. However, clients may override this with first-class resume logic.
945
+ """
946
+ messages = response.messages + [user(content)]
947
+ return await self.context_call_async(
948
+ ctx=ctx,
949
+ model_id=model_id,
950
+ messages=messages,
951
+ tools=response.toolkit,
952
+ format=response.format,
953
+ **params,
954
+ )
955
+
956
+ @overload
957
+ def resume_stream(
958
+ self,
959
+ *,
960
+ model_id: ModelIdT,
961
+ response: StreamResponse,
962
+ content: UserContent,
963
+ **params: Unpack[Params],
964
+ ) -> StreamResponse:
965
+ """Resume an `llm.StreamResponse` without a response format."""
966
+ ...
967
+
968
+ @overload
969
+ def resume_stream(
970
+ self,
971
+ *,
972
+ model_id: ModelIdT,
973
+ response: StreamResponse[FormattableT],
974
+ content: UserContent,
975
+ **params: Unpack[Params],
976
+ ) -> StreamResponse[FormattableT]:
977
+ """Resume an `llm.StreamResponse` with a response format."""
978
+ ...
979
+
980
+ @overload
981
+ def resume_stream(
982
+ self,
983
+ *,
984
+ model_id: ModelIdT,
985
+ response: StreamResponse | StreamResponse[FormattableT],
986
+ content: UserContent,
987
+ **params: Unpack[Params],
988
+ ) -> StreamResponse | StreamResponse[FormattableT]:
989
+ """Resume an `llm.StreamResponse` with an optional response format."""
990
+ ...
991
+
992
+ def resume_stream(
993
+ self,
994
+ *,
995
+ model_id: ModelIdT,
996
+ response: StreamResponse | StreamResponse[FormattableT],
997
+ content: UserContent,
998
+ **params: Unpack[Params],
999
+ ) -> StreamResponse | StreamResponse[FormattableT]:
1000
+ """Generate a new `llm.StreamResponse` by extending another response's messages with additional user content.
1001
+
1002
+ Args:
1003
+ model_id: Model identifier to use.
1004
+ response: Previous stream response to extend.
1005
+ content: Additional user content to append.
1006
+ **params: Additional parameters to configure output (e.g. temperature). See `llm.Params`.
1007
+
1008
+ Returns:
1009
+ A new `llm.StreamResponse` object for streaming the extended conversation.
1010
+
1011
+ Note:
1012
+ Uses the previous response's tools and output format. This base method wraps
1013
+ around calling `client.stream()` with a messages array derived from the response
1014
+ messages. However, clients may override this with first-class resume logic.
1015
+ """
1016
+ messages = response.messages + [user(content)]
1017
+ return self.stream(
1018
+ model_id=model_id,
1019
+ messages=messages,
1020
+ tools=response.toolkit,
1021
+ format=response.format,
1022
+ **params,
1023
+ )
1024
+
1025
+ @overload
1026
+ async def resume_stream_async(
1027
+ self,
1028
+ *,
1029
+ model_id: ModelIdT,
1030
+ response: AsyncStreamResponse,
1031
+ content: UserContent,
1032
+ **params: Unpack[Params],
1033
+ ) -> AsyncStreamResponse:
1034
+ """Resume an `llm.AsyncStreamResponse` without a response format."""
1035
+ ...
1036
+
1037
+ @overload
1038
+ async def resume_stream_async(
1039
+ self,
1040
+ *,
1041
+ model_id: ModelIdT,
1042
+ response: AsyncStreamResponse[FormattableT],
1043
+ content: UserContent,
1044
+ **params: Unpack[Params],
1045
+ ) -> AsyncStreamResponse[FormattableT]:
1046
+ """Resume an `llm.AsyncStreamResponse` with a response format."""
1047
+ ...
1048
+
1049
+ @overload
1050
+ async def resume_stream_async(
1051
+ self,
1052
+ *,
1053
+ model_id: ModelIdT,
1054
+ response: AsyncStreamResponse | AsyncStreamResponse[FormattableT],
1055
+ content: UserContent,
1056
+ **params: Unpack[Params],
1057
+ ) -> AsyncStreamResponse | AsyncStreamResponse[FormattableT]:
1058
+ """Resume an `llm.AsyncStreamResponse` with an optional response format."""
1059
+ ...
1060
+
1061
+ async def resume_stream_async(
1062
+ self,
1063
+ *,
1064
+ model_id: ModelIdT,
1065
+ response: AsyncStreamResponse | AsyncStreamResponse[FormattableT],
1066
+ content: UserContent,
1067
+ **params: Unpack[Params],
1068
+ ) -> AsyncStreamResponse | AsyncStreamResponse[FormattableT]:
1069
+ """Generate a new `llm.AsyncStreamResponse` by extending another response's messages with additional user content.
1070
+
1071
+ Args:
1072
+ model_id: Model identifier to use.
1073
+ response: Previous async stream response to extend.
1074
+ content: Additional user content to append.
1075
+ **params: Additional parameters to configure output (e.g. temperature). See `llm.Params`.
1076
+
1077
+ Returns:
1078
+ A new `llm.AsyncStreamResponse` object for asynchronously streaming the extended conversation.
1079
+
1080
+ Note:
1081
+ Uses the previous response's tools and output format. This base method wraps
1082
+ around calling `client.stream_async()` with a messages array derived from the response
1083
+ messages. However, clients may override this with first-class resume logic.
1084
+ """
1085
+ messages = response.messages + [user(content)]
1086
+ return await self.stream_async(
1087
+ model_id=model_id,
1088
+ messages=messages,
1089
+ tools=response.toolkit,
1090
+ format=response.format,
1091
+ **params,
1092
+ )
1093
+
1094
+ @overload
1095
+ def context_resume_stream(
1096
+ self,
1097
+ *,
1098
+ ctx: Context[DepsT],
1099
+ model_id: ModelIdT,
1100
+ response: ContextStreamResponse[DepsT, None],
1101
+ content: UserContent,
1102
+ **params: Unpack[Params],
1103
+ ) -> ContextStreamResponse[DepsT, None]:
1104
+ """Resume an `llm.ContextStreamResponse` without a response format."""
1105
+ ...
1106
+
1107
+ @overload
1108
+ def context_resume_stream(
1109
+ self,
1110
+ *,
1111
+ ctx: Context[DepsT],
1112
+ model_id: ModelIdT,
1113
+ response: ContextStreamResponse[DepsT, FormattableT],
1114
+ content: UserContent,
1115
+ **params: Unpack[Params],
1116
+ ) -> ContextStreamResponse[DepsT, FormattableT]:
1117
+ """Resume an `llm.ContextStreamResponse` with a response format."""
1118
+ ...
1119
+
1120
+ @overload
1121
+ def context_resume_stream(
1122
+ self,
1123
+ *,
1124
+ ctx: Context[DepsT],
1125
+ model_id: ModelIdT,
1126
+ response: ContextStreamResponse[DepsT, None]
1127
+ | ContextStreamResponse[DepsT, FormattableT],
1128
+ content: UserContent,
1129
+ **params: Unpack[Params],
1130
+ ) -> (
1131
+ ContextStreamResponse[DepsT, None] | ContextStreamResponse[DepsT, FormattableT]
1132
+ ):
1133
+ """Resume an `llm.ContextStreamResponse` with an optional response format."""
1134
+ ...
1135
+
1136
+ def context_resume_stream(
1137
+ self,
1138
+ *,
1139
+ ctx: Context[DepsT],
1140
+ model_id: ModelIdT,
1141
+ response: ContextStreamResponse[DepsT, None]
1142
+ | ContextStreamResponse[DepsT, FormattableT],
1143
+ content: UserContent,
1144
+ **params: Unpack[Params],
1145
+ ) -> (
1146
+ ContextStreamResponse[DepsT, None] | ContextStreamResponse[DepsT, FormattableT]
1147
+ ):
1148
+ """Generate a new `llm.ContextStreamResponse` by extending another response's messages with additional user content.
1149
+
1150
+ Args:
1151
+ ctx: Context object with dependencies for tools.
1152
+ model_id: Model identifier to use.
1153
+ response: Previous context stream response to extend.
1154
+ content: Additional user content to append.
1155
+ **params: Additional parameters to configure output (e.g. temperature). See `llm.Params`.
1156
+
1157
+ Returns:
1158
+ A new `llm.ContextStreamResponse` object for streaming the extended conversation.
1159
+
1160
+ Note:
1161
+ Uses the previous response's tools and output format. This base method wraps
1162
+ around calling `client.context_stream()` with a messages array derived from the response
1163
+ messages. However, clients may override this with first-class resume logic.
1164
+ """
1165
+ messages = response.messages + [user(content)]
1166
+ return self.context_stream(
1167
+ ctx=ctx,
1168
+ model_id=model_id,
1169
+ messages=messages,
1170
+ tools=response.toolkit,
1171
+ format=response.format,
1172
+ **params,
1173
+ )
1174
+
1175
+ @overload
1176
+ async def context_resume_stream_async(
1177
+ self,
1178
+ *,
1179
+ ctx: Context[DepsT],
1180
+ model_id: ModelIdT,
1181
+ response: AsyncContextStreamResponse[DepsT, None],
1182
+ content: UserContent,
1183
+ **params: Unpack[Params],
1184
+ ) -> AsyncContextStreamResponse[DepsT, None]:
1185
+ """Resume an `llm.AsyncContextStreamResponse` without a response format."""
1186
+ ...
1187
+
1188
+ @overload
1189
+ async def context_resume_stream_async(
1190
+ self,
1191
+ *,
1192
+ ctx: Context[DepsT],
1193
+ model_id: ModelIdT,
1194
+ response: AsyncContextStreamResponse[DepsT, FormattableT],
1195
+ content: UserContent,
1196
+ **params: Unpack[Params],
1197
+ ) -> AsyncContextStreamResponse[DepsT, FormattableT]:
1198
+ """Resume an `llm.AsyncContextStreamResponse` with a response format."""
1199
+ ...
1200
+
1201
+ @overload
1202
+ async def context_resume_stream_async(
1203
+ self,
1204
+ *,
1205
+ ctx: Context[DepsT],
1206
+ model_id: ModelIdT,
1207
+ response: AsyncContextStreamResponse[DepsT, None]
1208
+ | AsyncContextStreamResponse[DepsT, FormattableT],
1209
+ content: UserContent,
1210
+ **params: Unpack[Params],
1211
+ ) -> (
1212
+ AsyncContextStreamResponse[DepsT, None]
1213
+ | AsyncContextStreamResponse[DepsT, FormattableT]
1214
+ ):
1215
+ """Resume an `llm.AsyncContextStreamResponse` with an optional response format."""
1216
+ ...
1217
+
1218
+ async def context_resume_stream_async(
1219
+ self,
1220
+ *,
1221
+ ctx: Context[DepsT],
1222
+ model_id: ModelIdT,
1223
+ response: AsyncContextStreamResponse[DepsT, None]
1224
+ | AsyncContextStreamResponse[DepsT, FormattableT],
1225
+ content: UserContent,
1226
+ **params: Unpack[Params],
1227
+ ) -> (
1228
+ AsyncContextStreamResponse[DepsT, None]
1229
+ | AsyncContextStreamResponse[DepsT, FormattableT]
1230
+ ):
1231
+ """Generate a new `llm.AsyncContextStreamResponse` by extending another response's messages with additional user content.
1232
+
1233
+ Args:
1234
+ ctx: Context object with dependencies for tools.
1235
+ model_id: Model identifier to use.
1236
+ response: Previous async context stream response to extend.
1237
+ content: Additional user content to append.
1238
+ **params: Additional parameters to configure output (e.g. temperature). See `llm.Params`.
1239
+
1240
+ Returns:
1241
+ A new `llm.AsyncContextStreamResponse` object for asynchronously streaming the extended conversation.
1242
+
1243
+ Note:
1244
+ Uses the previous response's tools and output format. This base method wraps
1245
+ around calling `client.context_stream_async()` with a messages array derived from the response
1246
+ messages. However, clients may override this with first-class resume logic.
1247
+ """
1248
+ messages = response.messages + [user(content)]
1249
+ return await self.context_stream_async(
1250
+ ctx=ctx,
1251
+ model_id=model_id,
1252
+ messages=messages,
1253
+ tools=response.toolkit,
1254
+ format=response.format,
1255
+ **params,
1256
+ )