mirascope 1.25.7__py3-none-any.whl → 2.0.0a0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (474) hide show
  1. mirascope/__init__.py +3 -59
  2. mirascope/graphs/__init__.py +22 -0
  3. mirascope/{experimental/graphs → graphs}/finite_state_machine.py +70 -159
  4. mirascope/llm/__init__.py +206 -16
  5. mirascope/llm/agents/__init__.py +15 -0
  6. mirascope/llm/agents/agent.py +97 -0
  7. mirascope/llm/agents/agent_template.py +45 -0
  8. mirascope/llm/agents/decorator.py +176 -0
  9. mirascope/llm/calls/__init__.py +16 -0
  10. mirascope/llm/calls/base_call.py +33 -0
  11. mirascope/llm/calls/calls.py +315 -0
  12. mirascope/llm/calls/decorator.py +255 -0
  13. mirascope/llm/clients/__init__.py +34 -0
  14. mirascope/llm/clients/anthropic/__init__.py +11 -0
  15. mirascope/llm/clients/anthropic/_utils/__init__.py +13 -0
  16. mirascope/llm/clients/anthropic/_utils/decode.py +244 -0
  17. mirascope/llm/clients/anthropic/_utils/encode.py +243 -0
  18. mirascope/llm/clients/anthropic/clients.py +819 -0
  19. mirascope/llm/clients/anthropic/model_ids.py +8 -0
  20. mirascope/llm/clients/base/__init__.py +15 -0
  21. mirascope/llm/clients/base/_utils.py +192 -0
  22. mirascope/llm/clients/base/client.py +1256 -0
  23. mirascope/llm/clients/base/kwargs.py +12 -0
  24. mirascope/llm/clients/base/params.py +93 -0
  25. mirascope/llm/clients/google/__init__.py +6 -0
  26. mirascope/llm/clients/google/_utils/__init__.py +13 -0
  27. mirascope/llm/clients/google/_utils/decode.py +231 -0
  28. mirascope/llm/clients/google/_utils/encode.py +279 -0
  29. mirascope/llm/clients/google/clients.py +853 -0
  30. mirascope/llm/clients/google/message.py +7 -0
  31. mirascope/llm/clients/google/model_ids.py +15 -0
  32. mirascope/llm/clients/openai/__init__.py +25 -0
  33. mirascope/llm/clients/openai/completions/__init__.py +9 -0
  34. mirascope/llm/clients/openai/completions/_utils/__init__.py +13 -0
  35. mirascope/llm/clients/openai/completions/_utils/decode.py +187 -0
  36. mirascope/llm/clients/openai/completions/_utils/encode.py +358 -0
  37. mirascope/llm/clients/openai/completions/_utils/model_features.py +81 -0
  38. mirascope/llm/clients/openai/completions/clients.py +833 -0
  39. mirascope/llm/clients/openai/completions/model_ids.py +8 -0
  40. mirascope/llm/clients/openai/responses/__init__.py +9 -0
  41. mirascope/llm/clients/openai/responses/_utils/__init__.py +13 -0
  42. mirascope/llm/clients/openai/responses/_utils/decode.py +194 -0
  43. mirascope/llm/clients/openai/responses/_utils/encode.py +333 -0
  44. mirascope/llm/clients/openai/responses/_utils/model_features.py +87 -0
  45. mirascope/llm/clients/openai/responses/clients.py +832 -0
  46. mirascope/llm/clients/openai/responses/model_ids.py +8 -0
  47. mirascope/llm/clients/openai/shared/__init__.py +7 -0
  48. mirascope/llm/clients/openai/shared/_utils.py +55 -0
  49. mirascope/llm/clients/providers.py +175 -0
  50. mirascope/llm/content/__init__.py +70 -0
  51. mirascope/llm/content/audio.py +173 -0
  52. mirascope/llm/content/document.py +94 -0
  53. mirascope/llm/content/image.py +206 -0
  54. mirascope/llm/content/text.py +47 -0
  55. mirascope/llm/content/thought.py +58 -0
  56. mirascope/llm/content/tool_call.py +63 -0
  57. mirascope/llm/content/tool_output.py +26 -0
  58. mirascope/llm/context/__init__.py +6 -0
  59. mirascope/llm/context/_utils.py +28 -0
  60. mirascope/llm/context/context.py +24 -0
  61. mirascope/llm/exceptions.py +105 -0
  62. mirascope/llm/formatting/__init__.py +22 -0
  63. mirascope/llm/formatting/_utils.py +74 -0
  64. mirascope/llm/formatting/format.py +104 -0
  65. mirascope/llm/formatting/from_call_args.py +30 -0
  66. mirascope/llm/formatting/partial.py +58 -0
  67. mirascope/llm/formatting/types.py +109 -0
  68. mirascope/llm/mcp/__init__.py +5 -0
  69. mirascope/llm/mcp/client.py +118 -0
  70. mirascope/llm/messages/__init__.py +32 -0
  71. mirascope/llm/messages/message.py +182 -0
  72. mirascope/llm/models/__init__.py +16 -0
  73. mirascope/llm/models/models.py +1243 -0
  74. mirascope/llm/prompts/__init__.py +33 -0
  75. mirascope/llm/prompts/_utils.py +60 -0
  76. mirascope/llm/prompts/decorator.py +286 -0
  77. mirascope/llm/prompts/protocols.py +99 -0
  78. mirascope/llm/responses/__init__.py +57 -0
  79. mirascope/llm/responses/_utils.py +56 -0
  80. mirascope/llm/responses/base_response.py +91 -0
  81. mirascope/llm/responses/base_stream_response.py +697 -0
  82. mirascope/llm/responses/finish_reason.py +27 -0
  83. mirascope/llm/responses/response.py +345 -0
  84. mirascope/llm/responses/root_response.py +177 -0
  85. mirascope/llm/responses/stream_response.py +572 -0
  86. mirascope/llm/responses/streams.py +363 -0
  87. mirascope/llm/tools/__init__.py +40 -0
  88. mirascope/llm/tools/_utils.py +25 -0
  89. mirascope/llm/tools/decorator.py +175 -0
  90. mirascope/llm/tools/protocols.py +96 -0
  91. mirascope/llm/tools/tool_schema.py +246 -0
  92. mirascope/llm/tools/toolkit.py +152 -0
  93. mirascope/llm/tools/tools.py +169 -0
  94. mirascope/llm/types/__init__.py +22 -0
  95. mirascope/llm/types/dataclass.py +9 -0
  96. mirascope/llm/types/jsonable.py +44 -0
  97. mirascope/llm/types/type_vars.py +19 -0
  98. mirascope-2.0.0a0.dist-info/METADATA +117 -0
  99. mirascope-2.0.0a0.dist-info/RECORD +101 -0
  100. mirascope/beta/__init__.py +0 -3
  101. mirascope/beta/openai/__init__.py +0 -17
  102. mirascope/beta/openai/realtime/__init__.py +0 -13
  103. mirascope/beta/openai/realtime/_utils/__init__.py +0 -3
  104. mirascope/beta/openai/realtime/_utils/_audio.py +0 -74
  105. mirascope/beta/openai/realtime/_utils/_protocols.py +0 -50
  106. mirascope/beta/openai/realtime/realtime.py +0 -500
  107. mirascope/beta/openai/realtime/recording.py +0 -98
  108. mirascope/beta/openai/realtime/tool.py +0 -113
  109. mirascope/beta/rag/__init__.py +0 -24
  110. mirascope/beta/rag/base/__init__.py +0 -22
  111. mirascope/beta/rag/base/chunkers/__init__.py +0 -2
  112. mirascope/beta/rag/base/chunkers/base_chunker.py +0 -37
  113. mirascope/beta/rag/base/chunkers/text_chunker.py +0 -33
  114. mirascope/beta/rag/base/config.py +0 -8
  115. mirascope/beta/rag/base/document.py +0 -11
  116. mirascope/beta/rag/base/embedders.py +0 -35
  117. mirascope/beta/rag/base/embedding_params.py +0 -18
  118. mirascope/beta/rag/base/embedding_response.py +0 -30
  119. mirascope/beta/rag/base/query_results.py +0 -7
  120. mirascope/beta/rag/base/vectorstore_params.py +0 -18
  121. mirascope/beta/rag/base/vectorstores.py +0 -37
  122. mirascope/beta/rag/chroma/__init__.py +0 -11
  123. mirascope/beta/rag/chroma/types.py +0 -62
  124. mirascope/beta/rag/chroma/vectorstores.py +0 -121
  125. mirascope/beta/rag/cohere/__init__.py +0 -11
  126. mirascope/beta/rag/cohere/embedders.py +0 -87
  127. mirascope/beta/rag/cohere/embedding_params.py +0 -29
  128. mirascope/beta/rag/cohere/embedding_response.py +0 -29
  129. mirascope/beta/rag/cohere/py.typed +0 -0
  130. mirascope/beta/rag/openai/__init__.py +0 -11
  131. mirascope/beta/rag/openai/embedders.py +0 -144
  132. mirascope/beta/rag/openai/embedding_params.py +0 -18
  133. mirascope/beta/rag/openai/embedding_response.py +0 -14
  134. mirascope/beta/rag/openai/py.typed +0 -0
  135. mirascope/beta/rag/pinecone/__init__.py +0 -19
  136. mirascope/beta/rag/pinecone/types.py +0 -143
  137. mirascope/beta/rag/pinecone/vectorstores.py +0 -148
  138. mirascope/beta/rag/weaviate/__init__.py +0 -6
  139. mirascope/beta/rag/weaviate/types.py +0 -92
  140. mirascope/beta/rag/weaviate/vectorstores.py +0 -103
  141. mirascope/core/__init__.py +0 -109
  142. mirascope/core/anthropic/__init__.py +0 -31
  143. mirascope/core/anthropic/_call.py +0 -67
  144. mirascope/core/anthropic/_call_kwargs.py +0 -13
  145. mirascope/core/anthropic/_thinking.py +0 -70
  146. mirascope/core/anthropic/_utils/__init__.py +0 -16
  147. mirascope/core/anthropic/_utils/_convert_common_call_params.py +0 -25
  148. mirascope/core/anthropic/_utils/_convert_finish_reason_to_common_finish_reasons.py +0 -21
  149. mirascope/core/anthropic/_utils/_convert_message_params.py +0 -102
  150. mirascope/core/anthropic/_utils/_get_json_output.py +0 -31
  151. mirascope/core/anthropic/_utils/_handle_stream.py +0 -113
  152. mirascope/core/anthropic/_utils/_message_param_converter.py +0 -154
  153. mirascope/core/anthropic/_utils/_setup_call.py +0 -146
  154. mirascope/core/anthropic/call_params.py +0 -44
  155. mirascope/core/anthropic/call_response.py +0 -226
  156. mirascope/core/anthropic/call_response_chunk.py +0 -152
  157. mirascope/core/anthropic/dynamic_config.py +0 -40
  158. mirascope/core/anthropic/py.typed +0 -0
  159. mirascope/core/anthropic/stream.py +0 -204
  160. mirascope/core/anthropic/tool.py +0 -101
  161. mirascope/core/azure/__init__.py +0 -31
  162. mirascope/core/azure/_call.py +0 -67
  163. mirascope/core/azure/_call_kwargs.py +0 -13
  164. mirascope/core/azure/_utils/__init__.py +0 -14
  165. mirascope/core/azure/_utils/_convert_common_call_params.py +0 -26
  166. mirascope/core/azure/_utils/_convert_finish_reason_to_common_finish_reasons.py +0 -21
  167. mirascope/core/azure/_utils/_convert_message_params.py +0 -121
  168. mirascope/core/azure/_utils/_get_credential.py +0 -33
  169. mirascope/core/azure/_utils/_get_json_output.py +0 -27
  170. mirascope/core/azure/_utils/_handle_stream.py +0 -130
  171. mirascope/core/azure/_utils/_message_param_converter.py +0 -117
  172. mirascope/core/azure/_utils/_setup_call.py +0 -183
  173. mirascope/core/azure/call_params.py +0 -59
  174. mirascope/core/azure/call_response.py +0 -215
  175. mirascope/core/azure/call_response_chunk.py +0 -105
  176. mirascope/core/azure/dynamic_config.py +0 -30
  177. mirascope/core/azure/py.typed +0 -0
  178. mirascope/core/azure/stream.py +0 -147
  179. mirascope/core/azure/tool.py +0 -93
  180. mirascope/core/base/__init__.py +0 -86
  181. mirascope/core/base/_call_factory.py +0 -256
  182. mirascope/core/base/_create.py +0 -253
  183. mirascope/core/base/_extract.py +0 -175
  184. mirascope/core/base/_extract_with_tools.py +0 -189
  185. mirascope/core/base/_partial.py +0 -95
  186. mirascope/core/base/_utils/__init__.py +0 -92
  187. mirascope/core/base/_utils/_base_message_param_converter.py +0 -22
  188. mirascope/core/base/_utils/_base_type.py +0 -26
  189. mirascope/core/base/_utils/_convert_base_model_to_base_tool.py +0 -48
  190. mirascope/core/base/_utils/_convert_base_type_to_base_tool.py +0 -24
  191. mirascope/core/base/_utils/_convert_function_to_base_tool.py +0 -139
  192. mirascope/core/base/_utils/_convert_messages_to_message_params.py +0 -178
  193. mirascope/core/base/_utils/_convert_provider_finish_reason_to_finish_reason.py +0 -20
  194. mirascope/core/base/_utils/_default_tool_docstring.py +0 -6
  195. mirascope/core/base/_utils/_extract_tool_return.py +0 -42
  196. mirascope/core/base/_utils/_fn_is_async.py +0 -24
  197. mirascope/core/base/_utils/_format_template.py +0 -32
  198. mirascope/core/base/_utils/_get_audio_type.py +0 -18
  199. mirascope/core/base/_utils/_get_common_usage.py +0 -20
  200. mirascope/core/base/_utils/_get_create_fn_or_async_create_fn.py +0 -137
  201. mirascope/core/base/_utils/_get_document_type.py +0 -7
  202. mirascope/core/base/_utils/_get_dynamic_configuration.py +0 -69
  203. mirascope/core/base/_utils/_get_fields_from_call_args.py +0 -34
  204. mirascope/core/base/_utils/_get_fn_args.py +0 -23
  205. mirascope/core/base/_utils/_get_image_dimensions.py +0 -39
  206. mirascope/core/base/_utils/_get_image_type.py +0 -26
  207. mirascope/core/base/_utils/_get_metadata.py +0 -17
  208. mirascope/core/base/_utils/_get_possible_user_message_param.py +0 -21
  209. mirascope/core/base/_utils/_get_prompt_template.py +0 -28
  210. mirascope/core/base/_utils/_get_template_values.py +0 -51
  211. mirascope/core/base/_utils/_get_template_variables.py +0 -38
  212. mirascope/core/base/_utils/_get_unsupported_tool_config_keys.py +0 -10
  213. mirascope/core/base/_utils/_is_prompt_template.py +0 -24
  214. mirascope/core/base/_utils/_json_mode_content.py +0 -17
  215. mirascope/core/base/_utils/_messages_decorator.py +0 -121
  216. mirascope/core/base/_utils/_parse_content_template.py +0 -323
  217. mirascope/core/base/_utils/_parse_prompt_messages.py +0 -63
  218. mirascope/core/base/_utils/_pil_image_to_bytes.py +0 -13
  219. mirascope/core/base/_utils/_protocols.py +0 -901
  220. mirascope/core/base/_utils/_setup_call.py +0 -79
  221. mirascope/core/base/_utils/_setup_extract_tool.py +0 -30
  222. mirascope/core/base/call_kwargs.py +0 -13
  223. mirascope/core/base/call_params.py +0 -36
  224. mirascope/core/base/call_response.py +0 -338
  225. mirascope/core/base/call_response_chunk.py +0 -130
  226. mirascope/core/base/dynamic_config.py +0 -82
  227. mirascope/core/base/from_call_args.py +0 -30
  228. mirascope/core/base/merge_decorators.py +0 -59
  229. mirascope/core/base/message_param.py +0 -175
  230. mirascope/core/base/messages.py +0 -116
  231. mirascope/core/base/metadata.py +0 -13
  232. mirascope/core/base/prompt.py +0 -497
  233. mirascope/core/base/response_model_config_dict.py +0 -9
  234. mirascope/core/base/stream.py +0 -479
  235. mirascope/core/base/stream_config.py +0 -11
  236. mirascope/core/base/structured_stream.py +0 -296
  237. mirascope/core/base/tool.py +0 -214
  238. mirascope/core/base/toolkit.py +0 -176
  239. mirascope/core/base/types.py +0 -344
  240. mirascope/core/bedrock/__init__.py +0 -34
  241. mirascope/core/bedrock/_call.py +0 -68
  242. mirascope/core/bedrock/_call_kwargs.py +0 -12
  243. mirascope/core/bedrock/_types.py +0 -104
  244. mirascope/core/bedrock/_utils/__init__.py +0 -14
  245. mirascope/core/bedrock/_utils/_convert_common_call_params.py +0 -39
  246. mirascope/core/bedrock/_utils/_convert_finish_reason_to_common_finish_reasons.py +0 -23
  247. mirascope/core/bedrock/_utils/_convert_message_params.py +0 -111
  248. mirascope/core/bedrock/_utils/_get_json_output.py +0 -30
  249. mirascope/core/bedrock/_utils/_handle_stream.py +0 -104
  250. mirascope/core/bedrock/_utils/_message_param_converter.py +0 -172
  251. mirascope/core/bedrock/_utils/_setup_call.py +0 -258
  252. mirascope/core/bedrock/call_params.py +0 -38
  253. mirascope/core/bedrock/call_response.py +0 -248
  254. mirascope/core/bedrock/call_response_chunk.py +0 -111
  255. mirascope/core/bedrock/dynamic_config.py +0 -37
  256. mirascope/core/bedrock/py.typed +0 -0
  257. mirascope/core/bedrock/stream.py +0 -154
  258. mirascope/core/bedrock/tool.py +0 -100
  259. mirascope/core/cohere/__init__.py +0 -30
  260. mirascope/core/cohere/_call.py +0 -67
  261. mirascope/core/cohere/_call_kwargs.py +0 -11
  262. mirascope/core/cohere/_types.py +0 -20
  263. mirascope/core/cohere/_utils/__init__.py +0 -14
  264. mirascope/core/cohere/_utils/_convert_common_call_params.py +0 -26
  265. mirascope/core/cohere/_utils/_convert_finish_reason_to_common_finish_reasons.py +0 -24
  266. mirascope/core/cohere/_utils/_convert_message_params.py +0 -32
  267. mirascope/core/cohere/_utils/_get_json_output.py +0 -30
  268. mirascope/core/cohere/_utils/_handle_stream.py +0 -35
  269. mirascope/core/cohere/_utils/_message_param_converter.py +0 -54
  270. mirascope/core/cohere/_utils/_setup_call.py +0 -150
  271. mirascope/core/cohere/call_params.py +0 -62
  272. mirascope/core/cohere/call_response.py +0 -205
  273. mirascope/core/cohere/call_response_chunk.py +0 -125
  274. mirascope/core/cohere/dynamic_config.py +0 -32
  275. mirascope/core/cohere/py.typed +0 -0
  276. mirascope/core/cohere/stream.py +0 -113
  277. mirascope/core/cohere/tool.py +0 -93
  278. mirascope/core/costs/__init__.py +0 -5
  279. mirascope/core/costs/_anthropic_calculate_cost.py +0 -219
  280. mirascope/core/costs/_azure_calculate_cost.py +0 -11
  281. mirascope/core/costs/_bedrock_calculate_cost.py +0 -15
  282. mirascope/core/costs/_cohere_calculate_cost.py +0 -44
  283. mirascope/core/costs/_gemini_calculate_cost.py +0 -67
  284. mirascope/core/costs/_google_calculate_cost.py +0 -427
  285. mirascope/core/costs/_groq_calculate_cost.py +0 -156
  286. mirascope/core/costs/_litellm_calculate_cost.py +0 -11
  287. mirascope/core/costs/_mistral_calculate_cost.py +0 -64
  288. mirascope/core/costs/_openai_calculate_cost.py +0 -416
  289. mirascope/core/costs/_vertex_calculate_cost.py +0 -67
  290. mirascope/core/costs/_xai_calculate_cost.py +0 -104
  291. mirascope/core/costs/calculate_cost.py +0 -86
  292. mirascope/core/gemini/__init__.py +0 -40
  293. mirascope/core/gemini/_call.py +0 -67
  294. mirascope/core/gemini/_call_kwargs.py +0 -12
  295. mirascope/core/gemini/_utils/__init__.py +0 -14
  296. mirascope/core/gemini/_utils/_convert_common_call_params.py +0 -39
  297. mirascope/core/gemini/_utils/_convert_finish_reason_to_common_finish_reasons.py +0 -23
  298. mirascope/core/gemini/_utils/_convert_message_params.py +0 -156
  299. mirascope/core/gemini/_utils/_get_json_output.py +0 -35
  300. mirascope/core/gemini/_utils/_handle_stream.py +0 -33
  301. mirascope/core/gemini/_utils/_message_param_converter.py +0 -209
  302. mirascope/core/gemini/_utils/_setup_call.py +0 -149
  303. mirascope/core/gemini/call_params.py +0 -52
  304. mirascope/core/gemini/call_response.py +0 -216
  305. mirascope/core/gemini/call_response_chunk.py +0 -100
  306. mirascope/core/gemini/dynamic_config.py +0 -26
  307. mirascope/core/gemini/stream.py +0 -120
  308. mirascope/core/gemini/tool.py +0 -104
  309. mirascope/core/google/__init__.py +0 -29
  310. mirascope/core/google/_call.py +0 -67
  311. mirascope/core/google/_call_kwargs.py +0 -13
  312. mirascope/core/google/_utils/__init__.py +0 -14
  313. mirascope/core/google/_utils/_convert_common_call_params.py +0 -38
  314. mirascope/core/google/_utils/_convert_finish_reason_to_common_finish_reasons.py +0 -27
  315. mirascope/core/google/_utils/_convert_message_params.py +0 -297
  316. mirascope/core/google/_utils/_get_json_output.py +0 -37
  317. mirascope/core/google/_utils/_handle_stream.py +0 -58
  318. mirascope/core/google/_utils/_message_param_converter.py +0 -200
  319. mirascope/core/google/_utils/_setup_call.py +0 -201
  320. mirascope/core/google/_utils/_validate_media_type.py +0 -58
  321. mirascope/core/google/call_params.py +0 -22
  322. mirascope/core/google/call_response.py +0 -255
  323. mirascope/core/google/call_response_chunk.py +0 -135
  324. mirascope/core/google/dynamic_config.py +0 -26
  325. mirascope/core/google/stream.py +0 -199
  326. mirascope/core/google/tool.py +0 -146
  327. mirascope/core/groq/__init__.py +0 -30
  328. mirascope/core/groq/_call.py +0 -67
  329. mirascope/core/groq/_call_kwargs.py +0 -13
  330. mirascope/core/groq/_utils/__init__.py +0 -14
  331. mirascope/core/groq/_utils/_convert_common_call_params.py +0 -26
  332. mirascope/core/groq/_utils/_convert_message_params.py +0 -112
  333. mirascope/core/groq/_utils/_get_json_output.py +0 -27
  334. mirascope/core/groq/_utils/_handle_stream.py +0 -123
  335. mirascope/core/groq/_utils/_message_param_converter.py +0 -89
  336. mirascope/core/groq/_utils/_setup_call.py +0 -132
  337. mirascope/core/groq/call_params.py +0 -52
  338. mirascope/core/groq/call_response.py +0 -213
  339. mirascope/core/groq/call_response_chunk.py +0 -104
  340. mirascope/core/groq/dynamic_config.py +0 -29
  341. mirascope/core/groq/py.typed +0 -0
  342. mirascope/core/groq/stream.py +0 -135
  343. mirascope/core/groq/tool.py +0 -80
  344. mirascope/core/litellm/__init__.py +0 -28
  345. mirascope/core/litellm/_call.py +0 -67
  346. mirascope/core/litellm/_utils/__init__.py +0 -5
  347. mirascope/core/litellm/_utils/_setup_call.py +0 -109
  348. mirascope/core/litellm/call_params.py +0 -10
  349. mirascope/core/litellm/call_response.py +0 -24
  350. mirascope/core/litellm/call_response_chunk.py +0 -14
  351. mirascope/core/litellm/dynamic_config.py +0 -8
  352. mirascope/core/litellm/py.typed +0 -0
  353. mirascope/core/litellm/stream.py +0 -86
  354. mirascope/core/litellm/tool.py +0 -13
  355. mirascope/core/mistral/__init__.py +0 -36
  356. mirascope/core/mistral/_call.py +0 -65
  357. mirascope/core/mistral/_call_kwargs.py +0 -19
  358. mirascope/core/mistral/_utils/__init__.py +0 -14
  359. mirascope/core/mistral/_utils/_convert_common_call_params.py +0 -24
  360. mirascope/core/mistral/_utils/_convert_finish_reason_to_common_finish_reasons.py +0 -22
  361. mirascope/core/mistral/_utils/_convert_message_params.py +0 -122
  362. mirascope/core/mistral/_utils/_get_json_output.py +0 -34
  363. mirascope/core/mistral/_utils/_handle_stream.py +0 -139
  364. mirascope/core/mistral/_utils/_message_param_converter.py +0 -176
  365. mirascope/core/mistral/_utils/_setup_call.py +0 -164
  366. mirascope/core/mistral/call_params.py +0 -36
  367. mirascope/core/mistral/call_response.py +0 -205
  368. mirascope/core/mistral/call_response_chunk.py +0 -105
  369. mirascope/core/mistral/dynamic_config.py +0 -33
  370. mirascope/core/mistral/py.typed +0 -0
  371. mirascope/core/mistral/stream.py +0 -120
  372. mirascope/core/mistral/tool.py +0 -81
  373. mirascope/core/openai/__init__.py +0 -31
  374. mirascope/core/openai/_call.py +0 -67
  375. mirascope/core/openai/_call_kwargs.py +0 -13
  376. mirascope/core/openai/_utils/__init__.py +0 -14
  377. mirascope/core/openai/_utils/_convert_common_call_params.py +0 -26
  378. mirascope/core/openai/_utils/_convert_message_params.py +0 -148
  379. mirascope/core/openai/_utils/_get_json_output.py +0 -31
  380. mirascope/core/openai/_utils/_handle_stream.py +0 -138
  381. mirascope/core/openai/_utils/_message_param_converter.py +0 -105
  382. mirascope/core/openai/_utils/_setup_call.py +0 -155
  383. mirascope/core/openai/call_params.py +0 -92
  384. mirascope/core/openai/call_response.py +0 -273
  385. mirascope/core/openai/call_response_chunk.py +0 -139
  386. mirascope/core/openai/dynamic_config.py +0 -34
  387. mirascope/core/openai/py.typed +0 -0
  388. mirascope/core/openai/stream.py +0 -185
  389. mirascope/core/openai/tool.py +0 -101
  390. mirascope/core/py.typed +0 -0
  391. mirascope/core/vertex/__init__.py +0 -45
  392. mirascope/core/vertex/_call.py +0 -62
  393. mirascope/core/vertex/_call_kwargs.py +0 -12
  394. mirascope/core/vertex/_utils/__init__.py +0 -14
  395. mirascope/core/vertex/_utils/_convert_common_call_params.py +0 -37
  396. mirascope/core/vertex/_utils/_convert_finish_reason_to_common_finish_reasons.py +0 -23
  397. mirascope/core/vertex/_utils/_convert_message_params.py +0 -171
  398. mirascope/core/vertex/_utils/_get_json_output.py +0 -36
  399. mirascope/core/vertex/_utils/_handle_stream.py +0 -33
  400. mirascope/core/vertex/_utils/_message_param_converter.py +0 -133
  401. mirascope/core/vertex/_utils/_setup_call.py +0 -160
  402. mirascope/core/vertex/call_params.py +0 -24
  403. mirascope/core/vertex/call_response.py +0 -206
  404. mirascope/core/vertex/call_response_chunk.py +0 -99
  405. mirascope/core/vertex/dynamic_config.py +0 -28
  406. mirascope/core/vertex/stream.py +0 -119
  407. mirascope/core/vertex/tool.py +0 -101
  408. mirascope/core/xai/__init__.py +0 -28
  409. mirascope/core/xai/_call.py +0 -67
  410. mirascope/core/xai/_utils/__init__.py +0 -5
  411. mirascope/core/xai/_utils/_setup_call.py +0 -113
  412. mirascope/core/xai/call_params.py +0 -10
  413. mirascope/core/xai/call_response.py +0 -16
  414. mirascope/core/xai/call_response_chunk.py +0 -14
  415. mirascope/core/xai/dynamic_config.py +0 -8
  416. mirascope/core/xai/py.typed +0 -0
  417. mirascope/core/xai/stream.py +0 -57
  418. mirascope/core/xai/tool.py +0 -13
  419. mirascope/experimental/graphs/__init__.py +0 -5
  420. mirascope/integrations/__init__.py +0 -16
  421. mirascope/integrations/_middleware_factory.py +0 -403
  422. mirascope/integrations/langfuse/__init__.py +0 -3
  423. mirascope/integrations/langfuse/_utils.py +0 -114
  424. mirascope/integrations/langfuse/_with_langfuse.py +0 -70
  425. mirascope/integrations/logfire/__init__.py +0 -3
  426. mirascope/integrations/logfire/_utils.py +0 -225
  427. mirascope/integrations/logfire/_with_logfire.py +0 -63
  428. mirascope/integrations/otel/__init__.py +0 -10
  429. mirascope/integrations/otel/_utils.py +0 -270
  430. mirascope/integrations/otel/_with_hyperdx.py +0 -60
  431. mirascope/integrations/otel/_with_otel.py +0 -59
  432. mirascope/integrations/tenacity.py +0 -14
  433. mirascope/llm/_call.py +0 -401
  434. mirascope/llm/_context.py +0 -384
  435. mirascope/llm/_override.py +0 -3639
  436. mirascope/llm/_protocols.py +0 -500
  437. mirascope/llm/_response_metaclass.py +0 -31
  438. mirascope/llm/call_response.py +0 -158
  439. mirascope/llm/call_response_chunk.py +0 -66
  440. mirascope/llm/stream.py +0 -162
  441. mirascope/llm/tool.py +0 -64
  442. mirascope/mcp/__init__.py +0 -7
  443. mirascope/mcp/_utils.py +0 -288
  444. mirascope/mcp/client.py +0 -167
  445. mirascope/mcp/server.py +0 -356
  446. mirascope/mcp/tools.py +0 -110
  447. mirascope/py.typed +0 -0
  448. mirascope/retries/__init__.py +0 -11
  449. mirascope/retries/fallback.py +0 -131
  450. mirascope/retries/tenacity.py +0 -50
  451. mirascope/tools/__init__.py +0 -37
  452. mirascope/tools/base.py +0 -98
  453. mirascope/tools/system/__init__.py +0 -0
  454. mirascope/tools/system/_docker_operation.py +0 -166
  455. mirascope/tools/system/_file_system.py +0 -267
  456. mirascope/tools/web/__init__.py +0 -0
  457. mirascope/tools/web/_duckduckgo.py +0 -111
  458. mirascope/tools/web/_httpx.py +0 -125
  459. mirascope/tools/web/_parse_url_content.py +0 -94
  460. mirascope/tools/web/_requests.py +0 -54
  461. mirascope/v0/__init__.py +0 -43
  462. mirascope/v0/anthropic.py +0 -54
  463. mirascope/v0/base/__init__.py +0 -12
  464. mirascope/v0/base/calls.py +0 -118
  465. mirascope/v0/base/extractors.py +0 -122
  466. mirascope/v0/base/ops_utils.py +0 -207
  467. mirascope/v0/base/prompts.py +0 -48
  468. mirascope/v0/base/types.py +0 -14
  469. mirascope/v0/base/utils.py +0 -21
  470. mirascope/v0/openai.py +0 -54
  471. mirascope-1.25.7.dist-info/METADATA +0 -169
  472. mirascope-1.25.7.dist-info/RECORD +0 -378
  473. {mirascope-1.25.7.dist-info → mirascope-2.0.0a0.dist-info}/WHEEL +0 -0
  474. {mirascope-1.25.7.dist-info → mirascope-2.0.0a0.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,572 @@
1
+ """StreamResponse and AsyncStreamResponse to stream content from LLMs."""
2
+
3
+ import asyncio
4
+ from collections.abc import Sequence
5
+ from typing import TYPE_CHECKING, Generic, overload
6
+
7
+ from ..content import ToolOutput
8
+ from ..context import Context, DepsT
9
+ from ..formatting import Format, FormattableT
10
+ from ..messages import Message, UserContent
11
+ from ..tools import (
12
+ AsyncContextTool,
13
+ AsyncContextToolkit,
14
+ AsyncTool,
15
+ AsyncToolkit,
16
+ ContextTool,
17
+ ContextToolkit,
18
+ Tool,
19
+ Toolkit,
20
+ )
21
+ from .base_stream_response import (
22
+ AsyncChunkIterator,
23
+ BaseAsyncStreamResponse,
24
+ BaseSyncStreamResponse,
25
+ ChunkIterator,
26
+ )
27
+
28
+ if TYPE_CHECKING:
29
+ from ..clients import ModelId, Params, Provider
30
+
31
+
32
+ class StreamResponse(BaseSyncStreamResponse[Toolkit, FormattableT]):
33
+ """A `StreamResponse` wraps response content from the LLM with a streaming interface.
34
+
35
+ This class supports iteration to process chunks as they arrive from the model.
36
+
37
+ Content can be streamed in one of three ways:
38
+ - Via `.streams()`, which provides an iterator of streams, where each
39
+ stream contains chunks of streamed data. The chunks contain `delta`s (new content
40
+ in that particular chunk), and the stream itself accumulates the collected state
41
+ of all the chunks processed thus far.
42
+ - Via `.chunk_stream()` which allows iterating over Mirascope's provider-
43
+ agnostic chunk representation.
44
+ - Via `.pretty_stream()` a helper method which provides all response content
45
+ as `str` deltas. Iterating through `pretty_stream` will yield text content and
46
+ optionally placeholder representations for other content types, but it will still
47
+ consume the full stream.
48
+ - Via `.structured_stream()`, a helper method which provides partial
49
+ structured outputs from a response (useful when FormatT is set). Iterating through
50
+ `structured_stream` will only yield structured partials, but it will still consume
51
+ the full stream.
52
+
53
+ As chunks are consumed, they are collected in-memory on the `StreamResponse`, and they
54
+ become available in `.content`, `.messages`, `.tool_calls`, etc. All of the stream
55
+ iterators can be restarted after the stream has been consumed, in which case they
56
+ will yield chunks from memory in the original sequence that came from the LLM. If
57
+ the stream is only partially consumed, a fresh iterator will first iterate through
58
+ in-memory content, and then will continue consuming fresh chunks from the LLM.
59
+
60
+ In the specific case of text chunks, they are included in the response content as soon
61
+ as they become available, via an `llm.Text` part that updates as more deltas come in.
62
+ This enables the behavior where resuming a partially-streamed response will include
63
+ as much text as the model generated.
64
+
65
+ For other chunks, like `Thinking` or `ToolCall`, they are only added to response
66
+ content once the corresponding part has fully streamed. This avoids issues like
67
+ adding incomplete tool calls, or thinking blocks missing signatures, to the response.
68
+
69
+ For each iterator, fully iterating through the iterator will consume the whole
70
+ LLM stream. You can pause stream execution midway by breaking out of the iterator,
71
+ and you can safely resume execution from the same iterator if desired.
72
+
73
+
74
+ Example:
75
+ ```python
76
+ from mirascope import llm
77
+
78
+ @llm.call(
79
+ provider="openai:completions",
80
+ model_id="gpt-4o-mini",
81
+ )
82
+ def answer_question(question: str) -> str:
83
+ return f"Answer this question: {question}"
84
+
85
+ stream_response = answer_question.stream("What is the capital of France?")
86
+
87
+ for chunk in stream_response.pretty_stream():
88
+ print(chunk, end="", flush=True)
89
+ print()
90
+ ```
91
+ """
92
+
93
+ def __init__(
94
+ self,
95
+ *,
96
+ provider: "Provider",
97
+ model_id: "ModelId",
98
+ params: "Params",
99
+ tools: Sequence[Tool] | Toolkit | None = None,
100
+ format: Format[FormattableT] | None = None,
101
+ input_messages: Sequence[Message],
102
+ chunk_iterator: ChunkIterator,
103
+ ) -> None:
104
+ """Initialize a `StreamResponse`."""
105
+ toolkit = tools if isinstance(tools, Toolkit) else Toolkit(tools=tools)
106
+ super().__init__(
107
+ provider=provider,
108
+ model_id=model_id,
109
+ params=params,
110
+ toolkit=toolkit,
111
+ format=format,
112
+ input_messages=input_messages,
113
+ chunk_iterator=chunk_iterator,
114
+ )
115
+
116
+ def execute_tools(self) -> Sequence[ToolOutput]:
117
+ """Execute and return all of the tool calls in the response.
118
+
119
+ Returns:
120
+ A sequence containing a `ToolOutput` for every tool call in the order they appeared.
121
+
122
+ Raises:
123
+ ToolNotFoundError: If one of the response's tool calls has no matching tool.
124
+ Exception: If one of the tools throws an exception.
125
+ """
126
+ return [self.toolkit.execute(tool_call) for tool_call in self.tool_calls]
127
+
128
+ @overload
129
+ def resume(self: "StreamResponse", content: UserContent) -> "StreamResponse": ...
130
+
131
+ @overload
132
+ def resume(
133
+ self: "StreamResponse[FormattableT]", content: UserContent
134
+ ) -> "StreamResponse[FormattableT]": ...
135
+
136
+ def resume(
137
+ self, content: UserContent
138
+ ) -> "StreamResponse | StreamResponse[FormattableT]":
139
+ """Generate a new `StreamResponse` using this response's messages with additional user content.
140
+
141
+ Uses this response's tools and format type. Also uses this response's provider,
142
+ model, client, and params, unless the model context manager is being used to
143
+ provide a new LLM as an override.
144
+
145
+ Args:
146
+ content: The new user message content to append to the message history.
147
+
148
+ Returns:
149
+ A new `StreamResponse` instance generated from the extended message history.
150
+ """
151
+ return self.model.resume_stream(
152
+ response=self,
153
+ content=content,
154
+ )
155
+
156
+
157
+ class AsyncStreamResponse(BaseAsyncStreamResponse[AsyncToolkit, FormattableT]):
158
+ """An `AsyncStreamResponse` wraps response content from the LLM with a streaming interface.
159
+
160
+ This class supports iteration to process chunks as they arrive from the model.
161
+
162
+ Content can be streamed in one of three ways:
163
+ - Via `.streams()`, which provides an iterator of streams, where each
164
+ stream contains chunks of streamed data. The chunks contain `delta`s (new content
165
+ in that particular chunk), and the stream itself accumulates the collected state
166
+ of all the chunks processed thus far.
167
+ - Via `.chunk_stream()` which allows iterating over Mirascope's provider-
168
+ agnostic chunk representation.
169
+ - Via `.pretty_stream()` a helper method which provides all response content
170
+ as `str` deltas. Iterating through `pretty_stream` will yield text content and
171
+ optionally placeholder representations for other content types, but it will still
172
+ consume the full stream.
173
+ - Via `.structured_stream()`, a helper method which provides partial
174
+ structured outputs from a response (useful when FormatT is set). Iterating through
175
+ `structured_stream` will only yield structured partials, but it will still consume
176
+ the full stream.
177
+
178
+ As chunks are consumed, they are collected in-memory on the `AsyncContextStreamResponse`, and they
179
+ become available in `.content`, `.messages`, `.tool_calls`, etc. All of the stream
180
+ iterators can be restarted after the stream has been consumed, in which case they
181
+ will yield chunks from memory in the original sequence that came from the LLM. If
182
+ the stream is only partially consumed, a fresh iterator will first iterate through
183
+ in-memory content, and then will continue consuming fresh chunks from the LLM.
184
+
185
+ In the specific case of text chunks, they are included in the response content as soon
186
+ as they become available, via an `llm.Text` part that updates as more deltas come in.
187
+ This enables the behavior where resuming a partially-streamed response will include
188
+ as much text as the model generated.
189
+
190
+ For other chunks, like `Thinking` or `ToolCall`, they are only added to response
191
+ content once the corresponding part has fully streamed. This avoids issues like
192
+ adding incomplete tool calls, or thinking blocks missing signatures, to the response.
193
+
194
+ For each iterator, fully iterating through the iterator will consume the whole
195
+ LLM stream. You can pause stream execution midway by breaking out of the iterator,
196
+ and you can safely resume execution from the same iterator if desired.
197
+
198
+
199
+ Example:
200
+ ```python
201
+ from mirascope import llm
202
+
203
+ @llm.call(
204
+ provider="openai:completions",
205
+ model_id="gpt-4o-mini",
206
+ )
207
+ async def answer_question(question: str) -> str:
208
+ return f"Answer this question: {question}"
209
+
210
+ stream_response = await answer_question.stream("What is the capital of France?")
211
+
212
+ async for chunk in stream_response.pretty_stream():
213
+ print(chunk, end="", flush=True)
214
+ print()
215
+ ```
216
+ """
217
+
218
+ def __init__(
219
+ self,
220
+ *,
221
+ provider: "Provider",
222
+ model_id: "ModelId",
223
+ params: "Params",
224
+ tools: Sequence[AsyncTool] | AsyncToolkit | None = None,
225
+ format: Format[FormattableT] | None = None,
226
+ input_messages: Sequence[Message],
227
+ chunk_iterator: AsyncChunkIterator,
228
+ ) -> None:
229
+ """Initialize an `AsyncStreamResponse`."""
230
+ toolkit = (
231
+ tools if isinstance(tools, AsyncToolkit) else AsyncToolkit(tools=tools)
232
+ )
233
+ super().__init__(
234
+ provider=provider,
235
+ model_id=model_id,
236
+ params=params,
237
+ toolkit=toolkit,
238
+ format=format,
239
+ input_messages=input_messages,
240
+ chunk_iterator=chunk_iterator,
241
+ )
242
+
243
+ async def execute_tools(self) -> Sequence[ToolOutput]:
244
+ """Execute and return all of the tool calls in the response.
245
+
246
+ Returns:
247
+ A sequence containing a `ToolOutput` for every tool call in the order they appeared.
248
+
249
+ Raises:
250
+ ToolNotFoundError: If one of the response's tool calls has no matching tool.
251
+ Exception: If one of the tools throws an exception.
252
+ """
253
+ tasks = [self.toolkit.execute(tool_call) for tool_call in self.tool_calls]
254
+ return await asyncio.gather(*tasks)
255
+
256
+ @overload
257
+ async def resume(
258
+ self: "AsyncStreamResponse", content: UserContent
259
+ ) -> "AsyncStreamResponse": ...
260
+
261
+ @overload
262
+ async def resume(
263
+ self: "AsyncStreamResponse[FormattableT]", content: UserContent
264
+ ) -> "AsyncStreamResponse[FormattableT]": ...
265
+
266
+ async def resume(
267
+ self, content: UserContent
268
+ ) -> "AsyncStreamResponse | AsyncStreamResponse[FormattableT]":
269
+ """Generate a new `AsyncStreamResponse` using this response's messages with additional user content.
270
+
271
+ Uses this response's tools and format type. Also uses this response's provider,
272
+ model, client, and params, unless the model context manager is being used to
273
+ provide a new LLM as an override.
274
+
275
+ Args:
276
+ content: The new user message content to append to the message history.
277
+
278
+ Returns:
279
+ A new `AsyncStreamResponse` instance generated from the extended message history.
280
+ """
281
+ return await self.model.resume_stream_async(
282
+ response=self,
283
+ content=content,
284
+ )
285
+
286
+
287
+ class ContextStreamResponse(
288
+ BaseSyncStreamResponse[ContextToolkit, FormattableT], Generic[DepsT, FormattableT]
289
+ ):
290
+ """A `ContextStreamResponse` wraps response content from the LLM with a streaming interface.
291
+
292
+ This class supports iteration to process chunks as they arrive from the model.
293
+
294
+ Content can be streamed in one of three ways:
295
+ - Via `.streams()`, which provides an iterator of streams, where each
296
+ stream contains chunks of streamed data. The chunks contain `delta`s (new content
297
+ in that particular chunk), and the stream itself accumulates the collected state
298
+ of all the chunks processed thus far.
299
+ - Via `.chunk_stream()` which allows iterating over Mirascope's provider-
300
+ agnostic chunk representation.
301
+ - Via `.pretty_stream()` a helper method which provides all response content
302
+ as `str` deltas. Iterating through `pretty_stream` will yield text content and
303
+ optionally placeholder representations for other content types, but it will still
304
+ consume the full stream.
305
+ - Via `.structured_stream()`, a helper method which provides partial
306
+ structured outputs from a response (useful when FormatT is set). Iterating through
307
+ `structured_stream` will only yield structured partials, but it will still consume
308
+ the full stream.
309
+
310
+ As chunks are consumed, they are collected in-memory on the `ContextStreamResponse`, and they
311
+ become available in `.content`, `.messages`, `.tool_calls`, etc. All of the stream
312
+ iterators can be restarted after the stream has been consumed, in which case they
313
+ will yield chunks from memory in the original sequence that came from the LLM. If
314
+ the stream is only partially consumed, a fresh iterator will first iterate through
315
+ in-memory content, and then will continue consuming fresh chunks from the LLM.
316
+
317
+ In the specific case of text chunks, they are included in the response content as soon
318
+ as they become available, via an `llm.Text` part that updates as more deltas come in.
319
+ This enables the behavior where resuming a partially-streamed response will include
320
+ as much text as the model generated.
321
+
322
+ For other chunks, like `Thinking` or `ToolCall`, they are only added to response
323
+ content once the corresponding part has fully streamed. This avoids issues like
324
+ adding incomplete tool calls, or thinking blocks missing signatures, to the response.
325
+
326
+ For each iterator, fully iterating through the iterator will consume the whole
327
+ LLM stream. You can pause stream execution midway by breaking out of the iterator,
328
+ and you can safely resume execution from the same iterator if desired.
329
+
330
+
331
+ Example:
332
+ ```python
333
+ from mirascope import llm
334
+
335
+ @llm.call(
336
+ provider="openai:completions",
337
+ model_id="gpt-4o-mini",
338
+ )
339
+ def answer_question(ctx: llm.Context, question: str) -> str:
340
+ return f"Answer this question: {question}"
341
+
342
+ ctx = llm.Context()
343
+ stream_response = answer_question.stream(ctx, "What is the capital of France?")
344
+
345
+ for chunk in stream_response.pretty_stream():
346
+ print(chunk, end="", flush=True)
347
+ print()
348
+ ```
349
+ """
350
+
351
+ def __init__(
352
+ self,
353
+ *,
354
+ provider: "Provider",
355
+ model_id: "ModelId",
356
+ params: "Params",
357
+ tools: Sequence[Tool | ContextTool[DepsT]]
358
+ | ContextToolkit[DepsT]
359
+ | None = None,
360
+ format: Format[FormattableT] | None = None,
361
+ input_messages: Sequence[Message],
362
+ chunk_iterator: ChunkIterator,
363
+ ) -> None:
364
+ """Initialize a `ContextStreamResponse`."""
365
+ toolkit = (
366
+ tools if isinstance(tools, ContextToolkit) else ContextToolkit(tools=tools)
367
+ )
368
+ super().__init__(
369
+ provider=provider,
370
+ model_id=model_id,
371
+ params=params,
372
+ toolkit=toolkit,
373
+ format=format,
374
+ input_messages=input_messages,
375
+ chunk_iterator=chunk_iterator,
376
+ )
377
+
378
+ def execute_tools(self, ctx: Context[DepsT]) -> Sequence[ToolOutput]:
379
+ """Execute and return all of the tool calls in the response.
380
+
381
+ Args:
382
+ ctx: A `Context` with the required deps type.
383
+
384
+ Returns:
385
+ A sequence containing a `ToolOutput` for every tool call.
386
+
387
+ Raises:
388
+ ToolNotFoundError: If one of the response's tool calls has no matching tool.
389
+ Exception: If one of the tools throws an exception.
390
+ """
391
+ return [self.toolkit.execute(ctx, tool_call) for tool_call in self.tool_calls]
392
+
393
+ @overload
394
+ def resume(
395
+ self: "ContextStreamResponse[DepsT]", ctx: Context[DepsT], content: UserContent
396
+ ) -> "ContextStreamResponse[DepsT]": ...
397
+
398
+ @overload
399
+ def resume(
400
+ self: "ContextStreamResponse[DepsT, FormattableT]",
401
+ ctx: Context[DepsT],
402
+ content: UserContent,
403
+ ) -> "ContextStreamResponse[DepsT, FormattableT]": ...
404
+
405
+ def resume(
406
+ self, ctx: Context[DepsT], content: UserContent
407
+ ) -> "ContextStreamResponse[DepsT] | ContextStreamResponse[DepsT, FormattableT]":
408
+ """Generate a new `ContextStreamResponse` using this response's messages with additional user content.
409
+
410
+ Uses this response's tools and format type. Also uses this response's provider,
411
+ model, client, and params, unless the model context manager is being used to
412
+ provide a new LLM as an override.
413
+
414
+ Args:
415
+ ctx: A Context with the required deps type.
416
+ content: The new user message content to append to the message history.
417
+
418
+ Returns:
419
+ A new `ContextStreamResponse` instance generated from the extended message history.
420
+ """
421
+ return self.model.context_resume_stream(
422
+ ctx=ctx,
423
+ response=self,
424
+ content=content,
425
+ )
426
+
427
+
428
+ class AsyncContextStreamResponse(
429
+ BaseAsyncStreamResponse[AsyncContextToolkit, FormattableT],
430
+ Generic[DepsT, FormattableT],
431
+ ):
432
+ """An `AsyncContextStreamResponse` wraps response content from the LLM with a streaming interface.
433
+
434
+ This class supports iteration to process chunks as they arrive from the model.
435
+
436
+ Content can be streamed in one of three ways:
437
+ - Via `.streams()`, which provides an iterator of streams, where each
438
+ stream contains chunks of streamed data. The chunks contain `delta`s (new content
439
+ in that particular chunk), and the stream itself accumulates the collected state
440
+ of all the chunks processed thus far.
441
+ - Via `.chunk_stream()` which allows iterating over Mirascope's provider-
442
+ agnostic chunk representation.
443
+ - Via `.pretty_stream()` a helper method which provides all response content
444
+ as `str` deltas. Iterating through `pretty_stream` will yield text content and
445
+ optionally placeholder representations for other content types, but it will still
446
+ consume the full stream.
447
+ - Via `.structured_stream()`, a helper method which provides partial
448
+ structured outputs from a response (useful when FormatT is set). Iterating through
449
+ `structured_stream` will only yield structured partials, but it will still consume
450
+ the full stream.
451
+
452
+ As chunks are consumed, they are collected in-memory on the `AsyncContextStreamResponse`, and they
453
+ become available in `.content`, `.messages`, `.tool_calls`, etc. All of the stream
454
+ iterators can be restarted after the stream has been consumed, in which case they
455
+ will yield chunks from memory in the original sequence that came from the LLM. If
456
+ the stream is only partially consumed, a fresh iterator will first iterate through
457
+ in-memory content, and then will continue consuming fresh chunks from the LLM.
458
+
459
+ In the specific case of text chunks, they are included in the response content as soon
460
+ as they become available, via an `llm.Text` part that updates as more deltas come in.
461
+ This enables the behavior where resuming a partially-streamed response will include
462
+ as much text as the model generated.
463
+
464
+ For other chunks, like `Thinking` or `ToolCall`, they are only added to response
465
+ content once the corresponding part has fully streamed. This avoids issues like
466
+ adding incomplete tool calls, or thinking blocks missing signatures, to the response.
467
+
468
+ For each iterator, fully iterating through the iterator will consume the whole
469
+ LLM stream. You can pause stream execution midway by breaking out of the iterator,
470
+ and you can safely resume execution from the same iterator if desired.
471
+
472
+
473
+ Example:
474
+ ```python
475
+ from mirascope import llm
476
+
477
+ @llm.call(
478
+ provider="openai:completions",
479
+ model_id="gpt-4o-mini",
480
+ )
481
+ async def answer_question(ctx: llm.Context, question: str) -> str:
482
+ return f"Answer this question: {question}"
483
+
484
+ ctx = llm.Context()
485
+ stream_response = await answer_question.stream(ctx, "What is the capital of France?")
486
+
487
+ async for chunk in stream_response.pretty_stream():
488
+ print(chunk, end="", flush=True)
489
+ print()
490
+ ```
491
+ """
492
+
493
+ def __init__(
494
+ self,
495
+ *,
496
+ provider: "Provider",
497
+ model_id: "ModelId",
498
+ params: "Params",
499
+ tools: Sequence[AsyncTool | AsyncContextTool[DepsT]]
500
+ | AsyncContextToolkit[DepsT]
501
+ | None = None,
502
+ format: Format[FormattableT] | None = None,
503
+ input_messages: Sequence[Message],
504
+ chunk_iterator: AsyncChunkIterator,
505
+ ) -> None:
506
+ """Initialize an `AsyncContextStreamResponse`."""
507
+ toolkit = (
508
+ tools
509
+ if isinstance(tools, AsyncContextToolkit)
510
+ else AsyncContextToolkit(tools=tools)
511
+ )
512
+ super().__init__(
513
+ provider=provider,
514
+ model_id=model_id,
515
+ params=params,
516
+ toolkit=toolkit,
517
+ format=format,
518
+ input_messages=input_messages,
519
+ chunk_iterator=chunk_iterator,
520
+ )
521
+
522
+ async def execute_tools(self, ctx: Context[DepsT]) -> Sequence[ToolOutput]:
523
+ """Execute and return all of the tool calls in the response.
524
+
525
+ Args:
526
+ ctx: A `Context` with the required deps type.
527
+
528
+ Returns:
529
+ A sequence containing a `ToolOutput` for every tool call in the order they appeared.
530
+
531
+ Raises:
532
+ ToolNotFoundError: If one of the response's tool calls has no matching tool.
533
+ Exception: If one of the tools throws an exception.
534
+ """
535
+ tasks = [self.toolkit.execute(ctx, tool_call) for tool_call in self.tool_calls]
536
+ return await asyncio.gather(*tasks)
537
+
538
+ @overload
539
+ async def resume(
540
+ self: "AsyncContextStreamResponse[DepsT]",
541
+ ctx: Context[DepsT],
542
+ content: UserContent,
543
+ ) -> "AsyncContextStreamResponse[DepsT]": ...
544
+
545
+ @overload
546
+ async def resume(
547
+ self: "AsyncContextStreamResponse[DepsT, FormattableT]",
548
+ ctx: Context[DepsT],
549
+ content: UserContent,
550
+ ) -> "AsyncContextStreamResponse[DepsT, FormattableT]": ...
551
+
552
+ async def resume(
553
+ self, ctx: Context[DepsT], content: UserContent
554
+ ) -> "AsyncContextStreamResponse[DepsT] | AsyncContextStreamResponse[DepsT, FormattableT]":
555
+ """Generate a new `AsyncContextStreamResponse` using this response's messages with additional user content.
556
+
557
+ Uses this response's tools and format type. Also uses this response's provider,
558
+ model, client, and params, unless the model context manager is being used to
559
+ provide a new LLM as an override.
560
+
561
+ Args:
562
+ ctx: A Context with the required deps type.
563
+ content: The new user message content to append to the message history.
564
+
565
+ Returns:
566
+ A new `AsyncContextStreamResponse` instance generated from the extended message history.
567
+ """
568
+ return await self.model.context_resume_stream_async(
569
+ ctx=ctx,
570
+ response=self,
571
+ content=content,
572
+ )