mirascope 1.25.7__py3-none-any.whl → 2.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (797) hide show
  1. mirascope/__init__.py +5 -52
  2. mirascope/_stubs.py +363 -0
  3. mirascope/api/__init__.py +14 -0
  4. mirascope/api/_generated/README.md +207 -0
  5. mirascope/api/_generated/__init__.py +440 -0
  6. mirascope/api/_generated/annotations/__init__.py +33 -0
  7. mirascope/api/_generated/annotations/client.py +506 -0
  8. mirascope/api/_generated/annotations/raw_client.py +1414 -0
  9. mirascope/api/_generated/annotations/types/__init__.py +31 -0
  10. mirascope/api/_generated/annotations/types/annotations_create_request_label.py +5 -0
  11. mirascope/api/_generated/annotations/types/annotations_create_response.py +48 -0
  12. mirascope/api/_generated/annotations/types/annotations_create_response_label.py +5 -0
  13. mirascope/api/_generated/annotations/types/annotations_get_response.py +48 -0
  14. mirascope/api/_generated/annotations/types/annotations_get_response_label.py +5 -0
  15. mirascope/api/_generated/annotations/types/annotations_list_request_label.py +5 -0
  16. mirascope/api/_generated/annotations/types/annotations_list_response.py +21 -0
  17. mirascope/api/_generated/annotations/types/annotations_list_response_annotations_item.py +50 -0
  18. mirascope/api/_generated/annotations/types/annotations_list_response_annotations_item_label.py +5 -0
  19. mirascope/api/_generated/annotations/types/annotations_update_request_label.py +5 -0
  20. mirascope/api/_generated/annotations/types/annotations_update_response.py +48 -0
  21. mirascope/api/_generated/annotations/types/annotations_update_response_label.py +5 -0
  22. mirascope/api/_generated/api_keys/__init__.py +17 -0
  23. mirascope/api/_generated/api_keys/client.py +530 -0
  24. mirascope/api/_generated/api_keys/raw_client.py +1236 -0
  25. mirascope/api/_generated/api_keys/types/__init__.py +15 -0
  26. mirascope/api/_generated/api_keys/types/api_keys_create_response.py +28 -0
  27. mirascope/api/_generated/api_keys/types/api_keys_get_response.py +27 -0
  28. mirascope/api/_generated/api_keys/types/api_keys_list_all_for_org_response_item.py +40 -0
  29. mirascope/api/_generated/api_keys/types/api_keys_list_response_item.py +27 -0
  30. mirascope/api/_generated/client.py +211 -0
  31. mirascope/api/_generated/core/__init__.py +52 -0
  32. mirascope/api/_generated/core/api_error.py +23 -0
  33. mirascope/api/_generated/core/client_wrapper.py +46 -0
  34. mirascope/api/_generated/core/datetime_utils.py +28 -0
  35. mirascope/api/_generated/core/file.py +67 -0
  36. mirascope/api/_generated/core/force_multipart.py +16 -0
  37. mirascope/api/_generated/core/http_client.py +543 -0
  38. mirascope/api/_generated/core/http_response.py +55 -0
  39. mirascope/api/_generated/core/jsonable_encoder.py +100 -0
  40. mirascope/api/_generated/core/pydantic_utilities.py +255 -0
  41. mirascope/api/_generated/core/query_encoder.py +58 -0
  42. mirascope/api/_generated/core/remove_none_from_dict.py +11 -0
  43. mirascope/api/_generated/core/request_options.py +35 -0
  44. mirascope/api/_generated/core/serialization.py +276 -0
  45. mirascope/api/_generated/docs/__init__.py +4 -0
  46. mirascope/api/_generated/docs/client.py +91 -0
  47. mirascope/api/_generated/docs/raw_client.py +178 -0
  48. mirascope/api/_generated/environment.py +9 -0
  49. mirascope/api/_generated/environments/__init__.py +23 -0
  50. mirascope/api/_generated/environments/client.py +649 -0
  51. mirascope/api/_generated/environments/raw_client.py +1567 -0
  52. mirascope/api/_generated/environments/types/__init__.py +25 -0
  53. mirascope/api/_generated/environments/types/environments_create_response.py +24 -0
  54. mirascope/api/_generated/environments/types/environments_get_analytics_response.py +60 -0
  55. mirascope/api/_generated/environments/types/environments_get_analytics_response_top_functions_item.py +24 -0
  56. mirascope/api/_generated/environments/types/environments_get_analytics_response_top_models_item.py +22 -0
  57. mirascope/api/_generated/environments/types/environments_get_response.py +24 -0
  58. mirascope/api/_generated/environments/types/environments_list_response_item.py +24 -0
  59. mirascope/api/_generated/environments/types/environments_update_response.py +24 -0
  60. mirascope/api/_generated/errors/__init__.py +25 -0
  61. mirascope/api/_generated/errors/bad_request_error.py +14 -0
  62. mirascope/api/_generated/errors/conflict_error.py +14 -0
  63. mirascope/api/_generated/errors/forbidden_error.py +11 -0
  64. mirascope/api/_generated/errors/internal_server_error.py +10 -0
  65. mirascope/api/_generated/errors/not_found_error.py +11 -0
  66. mirascope/api/_generated/errors/payment_required_error.py +15 -0
  67. mirascope/api/_generated/errors/service_unavailable_error.py +14 -0
  68. mirascope/api/_generated/errors/too_many_requests_error.py +15 -0
  69. mirascope/api/_generated/errors/unauthorized_error.py +11 -0
  70. mirascope/api/_generated/functions/__init__.py +39 -0
  71. mirascope/api/_generated/functions/client.py +647 -0
  72. mirascope/api/_generated/functions/raw_client.py +1890 -0
  73. mirascope/api/_generated/functions/types/__init__.py +53 -0
  74. mirascope/api/_generated/functions/types/functions_create_request_dependencies_value.py +20 -0
  75. mirascope/api/_generated/functions/types/functions_create_response.py +37 -0
  76. mirascope/api/_generated/functions/types/functions_create_response_dependencies_value.py +20 -0
  77. mirascope/api/_generated/functions/types/functions_find_by_hash_response.py +39 -0
  78. mirascope/api/_generated/functions/types/functions_find_by_hash_response_dependencies_value.py +20 -0
  79. mirascope/api/_generated/functions/types/functions_get_by_env_response.py +53 -0
  80. mirascope/api/_generated/functions/types/functions_get_by_env_response_dependencies_value.py +22 -0
  81. mirascope/api/_generated/functions/types/functions_get_response.py +37 -0
  82. mirascope/api/_generated/functions/types/functions_get_response_dependencies_value.py +20 -0
  83. mirascope/api/_generated/functions/types/functions_list_by_env_response.py +25 -0
  84. mirascope/api/_generated/functions/types/functions_list_by_env_response_functions_item.py +56 -0
  85. mirascope/api/_generated/functions/types/functions_list_by_env_response_functions_item_dependencies_value.py +22 -0
  86. mirascope/api/_generated/functions/types/functions_list_response.py +21 -0
  87. mirascope/api/_generated/functions/types/functions_list_response_functions_item.py +41 -0
  88. mirascope/api/_generated/functions/types/functions_list_response_functions_item_dependencies_value.py +20 -0
  89. mirascope/api/_generated/health/__init__.py +7 -0
  90. mirascope/api/_generated/health/client.py +92 -0
  91. mirascope/api/_generated/health/raw_client.py +175 -0
  92. mirascope/api/_generated/health/types/__init__.py +8 -0
  93. mirascope/api/_generated/health/types/health_check_response.py +22 -0
  94. mirascope/api/_generated/health/types/health_check_response_status.py +5 -0
  95. mirascope/api/_generated/organization_invitations/__init__.py +33 -0
  96. mirascope/api/_generated/organization_invitations/client.py +546 -0
  97. mirascope/api/_generated/organization_invitations/raw_client.py +1519 -0
  98. mirascope/api/_generated/organization_invitations/types/__init__.py +53 -0
  99. mirascope/api/_generated/organization_invitations/types/organization_invitations_accept_response.py +34 -0
  100. mirascope/api/_generated/organization_invitations/types/organization_invitations_accept_response_role.py +7 -0
  101. mirascope/api/_generated/organization_invitations/types/organization_invitations_create_request_role.py +7 -0
  102. mirascope/api/_generated/organization_invitations/types/organization_invitations_create_response.py +48 -0
  103. mirascope/api/_generated/organization_invitations/types/organization_invitations_create_response_role.py +7 -0
  104. mirascope/api/_generated/organization_invitations/types/organization_invitations_create_response_status.py +7 -0
  105. mirascope/api/_generated/organization_invitations/types/organization_invitations_get_response.py +48 -0
  106. mirascope/api/_generated/organization_invitations/types/organization_invitations_get_response_role.py +7 -0
  107. mirascope/api/_generated/organization_invitations/types/organization_invitations_get_response_status.py +7 -0
  108. mirascope/api/_generated/organization_invitations/types/organization_invitations_list_response_item.py +48 -0
  109. mirascope/api/_generated/organization_invitations/types/organization_invitations_list_response_item_role.py +7 -0
  110. mirascope/api/_generated/organization_invitations/types/organization_invitations_list_response_item_status.py +7 -0
  111. mirascope/api/_generated/organization_memberships/__init__.py +19 -0
  112. mirascope/api/_generated/organization_memberships/client.py +302 -0
  113. mirascope/api/_generated/organization_memberships/raw_client.py +736 -0
  114. mirascope/api/_generated/organization_memberships/types/__init__.py +27 -0
  115. mirascope/api/_generated/organization_memberships/types/organization_memberships_list_response_item.py +33 -0
  116. mirascope/api/_generated/organization_memberships/types/organization_memberships_list_response_item_role.py +7 -0
  117. mirascope/api/_generated/organization_memberships/types/organization_memberships_update_request_role.py +7 -0
  118. mirascope/api/_generated/organization_memberships/types/organization_memberships_update_response.py +31 -0
  119. mirascope/api/_generated/organization_memberships/types/organization_memberships_update_response_role.py +7 -0
  120. mirascope/api/_generated/organizations/__init__.py +51 -0
  121. mirascope/api/_generated/organizations/client.py +869 -0
  122. mirascope/api/_generated/organizations/raw_client.py +2593 -0
  123. mirascope/api/_generated/organizations/types/__init__.py +71 -0
  124. mirascope/api/_generated/organizations/types/organizations_create_payment_intent_response.py +24 -0
  125. mirascope/api/_generated/organizations/types/organizations_create_response.py +26 -0
  126. mirascope/api/_generated/organizations/types/organizations_create_response_role.py +5 -0
  127. mirascope/api/_generated/organizations/types/organizations_get_response.py +26 -0
  128. mirascope/api/_generated/organizations/types/organizations_get_response_role.py +5 -0
  129. mirascope/api/_generated/organizations/types/organizations_list_response_item.py +26 -0
  130. mirascope/api/_generated/organizations/types/organizations_list_response_item_role.py +5 -0
  131. mirascope/api/_generated/organizations/types/organizations_preview_subscription_change_request_target_plan.py +7 -0
  132. mirascope/api/_generated/organizations/types/organizations_preview_subscription_change_response.py +47 -0
  133. mirascope/api/_generated/organizations/types/organizations_preview_subscription_change_response_validation_errors_item.py +33 -0
  134. mirascope/api/_generated/organizations/types/organizations_preview_subscription_change_response_validation_errors_item_resource.py +7 -0
  135. mirascope/api/_generated/organizations/types/organizations_router_balance_response.py +24 -0
  136. mirascope/api/_generated/organizations/types/organizations_subscription_response.py +53 -0
  137. mirascope/api/_generated/organizations/types/organizations_subscription_response_current_plan.py +7 -0
  138. mirascope/api/_generated/organizations/types/organizations_subscription_response_payment_method.py +26 -0
  139. mirascope/api/_generated/organizations/types/organizations_subscription_response_scheduled_change.py +34 -0
  140. mirascope/api/_generated/organizations/types/organizations_subscription_response_scheduled_change_target_plan.py +7 -0
  141. mirascope/api/_generated/organizations/types/organizations_update_response.py +26 -0
  142. mirascope/api/_generated/organizations/types/organizations_update_response_role.py +5 -0
  143. mirascope/api/_generated/organizations/types/organizations_update_subscription_request_target_plan.py +7 -0
  144. mirascope/api/_generated/organizations/types/organizations_update_subscription_response.py +35 -0
  145. mirascope/api/_generated/project_memberships/__init__.py +25 -0
  146. mirascope/api/_generated/project_memberships/client.py +437 -0
  147. mirascope/api/_generated/project_memberships/raw_client.py +1039 -0
  148. mirascope/api/_generated/project_memberships/types/__init__.py +29 -0
  149. mirascope/api/_generated/project_memberships/types/project_memberships_create_request_role.py +7 -0
  150. mirascope/api/_generated/project_memberships/types/project_memberships_create_response.py +35 -0
  151. mirascope/api/_generated/project_memberships/types/project_memberships_create_response_role.py +7 -0
  152. mirascope/api/_generated/project_memberships/types/project_memberships_list_response_item.py +33 -0
  153. mirascope/api/_generated/project_memberships/types/project_memberships_list_response_item_role.py +7 -0
  154. mirascope/api/_generated/project_memberships/types/project_memberships_update_request_role.py +7 -0
  155. mirascope/api/_generated/project_memberships/types/project_memberships_update_response.py +35 -0
  156. mirascope/api/_generated/project_memberships/types/project_memberships_update_response_role.py +7 -0
  157. mirascope/api/_generated/projects/__init__.py +7 -0
  158. mirascope/api/_generated/projects/client.py +428 -0
  159. mirascope/api/_generated/projects/raw_client.py +1302 -0
  160. mirascope/api/_generated/projects/types/__init__.py +10 -0
  161. mirascope/api/_generated/projects/types/projects_create_response.py +25 -0
  162. mirascope/api/_generated/projects/types/projects_get_response.py +25 -0
  163. mirascope/api/_generated/projects/types/projects_list_response_item.py +25 -0
  164. mirascope/api/_generated/projects/types/projects_update_response.py +25 -0
  165. mirascope/api/_generated/reference.md +4915 -0
  166. mirascope/api/_generated/tags/__init__.py +19 -0
  167. mirascope/api/_generated/tags/client.py +504 -0
  168. mirascope/api/_generated/tags/raw_client.py +1288 -0
  169. mirascope/api/_generated/tags/types/__init__.py +17 -0
  170. mirascope/api/_generated/tags/types/tags_create_response.py +41 -0
  171. mirascope/api/_generated/tags/types/tags_get_response.py +41 -0
  172. mirascope/api/_generated/tags/types/tags_list_response.py +23 -0
  173. mirascope/api/_generated/tags/types/tags_list_response_tags_item.py +41 -0
  174. mirascope/api/_generated/tags/types/tags_update_response.py +41 -0
  175. mirascope/api/_generated/token_cost/__init__.py +7 -0
  176. mirascope/api/_generated/token_cost/client.py +160 -0
  177. mirascope/api/_generated/token_cost/raw_client.py +264 -0
  178. mirascope/api/_generated/token_cost/types/__init__.py +8 -0
  179. mirascope/api/_generated/token_cost/types/token_cost_calculate_request_usage.py +54 -0
  180. mirascope/api/_generated/token_cost/types/token_cost_calculate_response.py +52 -0
  181. mirascope/api/_generated/traces/__init__.py +97 -0
  182. mirascope/api/_generated/traces/client.py +1103 -0
  183. mirascope/api/_generated/traces/raw_client.py +2322 -0
  184. mirascope/api/_generated/traces/types/__init__.py +155 -0
  185. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item.py +29 -0
  186. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_resource.py +27 -0
  187. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_resource_attributes_item.py +23 -0
  188. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_resource_attributes_item_value.py +38 -0
  189. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_resource_attributes_item_value_array_value.py +19 -0
  190. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_resource_attributes_item_value_kvlist_value.py +22 -0
  191. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_resource_attributes_item_value_kvlist_value_values_item.py +20 -0
  192. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item.py +29 -0
  193. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_scope.py +31 -0
  194. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_scope_attributes_item.py +23 -0
  195. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_scope_attributes_item_value.py +38 -0
  196. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_scope_attributes_item_value_array_value.py +19 -0
  197. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_scope_attributes_item_value_kvlist_value.py +22 -0
  198. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_scope_attributes_item_value_kvlist_value_values_item.py +22 -0
  199. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item.py +48 -0
  200. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item_attributes_item.py +23 -0
  201. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item_attributes_item_value.py +38 -0
  202. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item_attributes_item_value_array_value.py +19 -0
  203. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item_attributes_item_value_kvlist_value.py +24 -0
  204. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item_attributes_item_value_kvlist_value_values_item.py +22 -0
  205. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item_status.py +20 -0
  206. mirascope/api/_generated/traces/types/traces_create_response.py +24 -0
  207. mirascope/api/_generated/traces/types/traces_create_response_partial_success.py +22 -0
  208. mirascope/api/_generated/traces/types/traces_get_analytics_summary_response.py +60 -0
  209. mirascope/api/_generated/traces/types/traces_get_analytics_summary_response_top_functions_item.py +24 -0
  210. mirascope/api/_generated/traces/types/traces_get_analytics_summary_response_top_models_item.py +22 -0
  211. mirascope/api/_generated/traces/types/traces_get_trace_detail_by_env_response.py +33 -0
  212. mirascope/api/_generated/traces/types/traces_get_trace_detail_by_env_response_spans_item.py +88 -0
  213. mirascope/api/_generated/traces/types/traces_get_trace_detail_response.py +33 -0
  214. mirascope/api/_generated/traces/types/traces_get_trace_detail_response_spans_item.py +88 -0
  215. mirascope/api/_generated/traces/types/traces_list_by_function_hash_response.py +25 -0
  216. mirascope/api/_generated/traces/types/traces_list_by_function_hash_response_traces_item.py +44 -0
  217. mirascope/api/_generated/traces/types/traces_search_by_env_request_attribute_filters_item.py +26 -0
  218. mirascope/api/_generated/traces/types/traces_search_by_env_request_attribute_filters_item_operator.py +7 -0
  219. mirascope/api/_generated/traces/types/traces_search_by_env_request_sort_by.py +7 -0
  220. mirascope/api/_generated/traces/types/traces_search_by_env_request_sort_order.py +7 -0
  221. mirascope/api/_generated/traces/types/traces_search_by_env_response.py +26 -0
  222. mirascope/api/_generated/traces/types/traces_search_by_env_response_spans_item.py +50 -0
  223. mirascope/api/_generated/traces/types/traces_search_request_attribute_filters_item.py +26 -0
  224. mirascope/api/_generated/traces/types/traces_search_request_attribute_filters_item_operator.py +7 -0
  225. mirascope/api/_generated/traces/types/traces_search_request_sort_by.py +7 -0
  226. mirascope/api/_generated/traces/types/traces_search_request_sort_order.py +5 -0
  227. mirascope/api/_generated/traces/types/traces_search_response.py +26 -0
  228. mirascope/api/_generated/traces/types/traces_search_response_spans_item.py +50 -0
  229. mirascope/api/_generated/types/__init__.py +85 -0
  230. mirascope/api/_generated/types/already_exists_error.py +22 -0
  231. mirascope/api/_generated/types/already_exists_error_tag.py +5 -0
  232. mirascope/api/_generated/types/bad_request_error_body.py +50 -0
  233. mirascope/api/_generated/types/click_house_error.py +22 -0
  234. mirascope/api/_generated/types/database_error.py +22 -0
  235. mirascope/api/_generated/types/database_error_tag.py +5 -0
  236. mirascope/api/_generated/types/date.py +3 -0
  237. mirascope/api/_generated/types/http_api_decode_error.py +27 -0
  238. mirascope/api/_generated/types/http_api_decode_error_tag.py +5 -0
  239. mirascope/api/_generated/types/immutable_resource_error.py +22 -0
  240. mirascope/api/_generated/types/internal_server_error_body.py +49 -0
  241. mirascope/api/_generated/types/issue.py +38 -0
  242. mirascope/api/_generated/types/issue_tag.py +10 -0
  243. mirascope/api/_generated/types/not_found_error_body.py +22 -0
  244. mirascope/api/_generated/types/not_found_error_tag.py +5 -0
  245. mirascope/api/_generated/types/number_from_string.py +3 -0
  246. mirascope/api/_generated/types/permission_denied_error.py +22 -0
  247. mirascope/api/_generated/types/permission_denied_error_tag.py +5 -0
  248. mirascope/api/_generated/types/plan_limit_exceeded_error.py +32 -0
  249. mirascope/api/_generated/types/plan_limit_exceeded_error_tag.py +7 -0
  250. mirascope/api/_generated/types/pricing_unavailable_error.py +23 -0
  251. mirascope/api/_generated/types/property_key.py +7 -0
  252. mirascope/api/_generated/types/property_key_key.py +25 -0
  253. mirascope/api/_generated/types/property_key_key_tag.py +5 -0
  254. mirascope/api/_generated/types/rate_limit_error.py +31 -0
  255. mirascope/api/_generated/types/rate_limit_error_tag.py +5 -0
  256. mirascope/api/_generated/types/service_unavailable_error_body.py +24 -0
  257. mirascope/api/_generated/types/service_unavailable_error_tag.py +7 -0
  258. mirascope/api/_generated/types/stripe_error.py +20 -0
  259. mirascope/api/_generated/types/subscription_past_due_error.py +31 -0
  260. mirascope/api/_generated/types/subscription_past_due_error_tag.py +7 -0
  261. mirascope/api/_generated/types/unauthorized_error_body.py +21 -0
  262. mirascope/api/_generated/types/unauthorized_error_tag.py +5 -0
  263. mirascope/api/client.py +255 -0
  264. mirascope/api/settings.py +99 -0
  265. mirascope/llm/__init__.py +290 -15
  266. mirascope/llm/calls/__init__.py +17 -0
  267. mirascope/llm/calls/calls.py +341 -0
  268. mirascope/llm/calls/decorator.py +275 -0
  269. mirascope/llm/content/__init__.py +71 -0
  270. mirascope/llm/content/audio.py +173 -0
  271. mirascope/llm/content/document.py +94 -0
  272. mirascope/llm/content/image.py +206 -0
  273. mirascope/llm/content/text.py +47 -0
  274. mirascope/llm/content/thought.py +58 -0
  275. mirascope/llm/content/tool_call.py +69 -0
  276. mirascope/llm/content/tool_output.py +43 -0
  277. mirascope/llm/context/__init__.py +6 -0
  278. mirascope/llm/context/_utils.py +41 -0
  279. mirascope/llm/context/context.py +24 -0
  280. mirascope/llm/exceptions.py +360 -0
  281. mirascope/llm/formatting/__init__.py +39 -0
  282. mirascope/llm/formatting/format.py +293 -0
  283. mirascope/llm/formatting/from_call_args.py +30 -0
  284. mirascope/llm/formatting/output_parser.py +178 -0
  285. mirascope/llm/formatting/partial.py +131 -0
  286. mirascope/llm/formatting/primitives.py +192 -0
  287. mirascope/llm/formatting/types.py +66 -0
  288. mirascope/llm/mcp/__init__.py +5 -0
  289. mirascope/llm/mcp/mcp_client.py +130 -0
  290. mirascope/llm/messages/__init__.py +35 -0
  291. mirascope/llm/messages/_utils.py +34 -0
  292. mirascope/llm/messages/message.py +190 -0
  293. mirascope/llm/models/__init__.py +21 -0
  294. mirascope/llm/models/models.py +1419 -0
  295. mirascope/llm/models/params.py +72 -0
  296. mirascope/llm/models/thinking_config.py +61 -0
  297. mirascope/llm/prompts/__init__.py +34 -0
  298. mirascope/llm/prompts/_utils.py +31 -0
  299. mirascope/llm/prompts/decorator.py +226 -0
  300. mirascope/llm/prompts/prompts.py +487 -0
  301. mirascope/llm/prompts/protocols.py +65 -0
  302. mirascope/llm/providers/__init__.py +62 -0
  303. mirascope/llm/providers/anthropic/__init__.py +11 -0
  304. mirascope/llm/providers/anthropic/_utils/__init__.py +27 -0
  305. mirascope/llm/providers/anthropic/_utils/beta_decode.py +282 -0
  306. mirascope/llm/providers/anthropic/_utils/beta_encode.py +266 -0
  307. mirascope/llm/providers/anthropic/_utils/decode.py +288 -0
  308. mirascope/llm/providers/anthropic/_utils/encode.py +418 -0
  309. mirascope/llm/providers/anthropic/_utils/errors.py +46 -0
  310. mirascope/llm/providers/anthropic/beta_provider.py +374 -0
  311. mirascope/llm/providers/anthropic/model_id.py +23 -0
  312. mirascope/llm/providers/anthropic/model_info.py +87 -0
  313. mirascope/llm/providers/anthropic/provider.py +479 -0
  314. mirascope/llm/providers/base/__init__.py +14 -0
  315. mirascope/llm/providers/base/_utils.py +253 -0
  316. mirascope/llm/providers/base/base_provider.py +1579 -0
  317. mirascope/llm/providers/base/kwargs.py +12 -0
  318. mirascope/llm/providers/google/__init__.py +6 -0
  319. mirascope/llm/providers/google/_utils/__init__.py +17 -0
  320. mirascope/llm/providers/google/_utils/decode.py +307 -0
  321. mirascope/llm/providers/google/_utils/encode.py +401 -0
  322. mirascope/llm/providers/google/_utils/errors.py +50 -0
  323. mirascope/llm/providers/google/message.py +7 -0
  324. mirascope/llm/providers/google/model_id.py +22 -0
  325. mirascope/llm/providers/google/model_info.py +63 -0
  326. mirascope/llm/providers/google/provider.py +492 -0
  327. mirascope/llm/providers/mirascope/__init__.py +5 -0
  328. mirascope/llm/providers/mirascope/_utils.py +73 -0
  329. mirascope/llm/providers/mirascope/provider.py +349 -0
  330. mirascope/llm/providers/mlx/__init__.py +9 -0
  331. mirascope/llm/providers/mlx/_utils.py +141 -0
  332. mirascope/llm/providers/mlx/encoding/__init__.py +8 -0
  333. mirascope/llm/providers/mlx/encoding/base.py +72 -0
  334. mirascope/llm/providers/mlx/encoding/transformers.py +150 -0
  335. mirascope/llm/providers/mlx/mlx.py +254 -0
  336. mirascope/llm/providers/mlx/model_id.py +17 -0
  337. mirascope/llm/providers/mlx/provider.py +452 -0
  338. mirascope/llm/providers/model_id.py +16 -0
  339. mirascope/llm/providers/ollama/__init__.py +7 -0
  340. mirascope/llm/providers/ollama/provider.py +71 -0
  341. mirascope/llm/providers/openai/__init__.py +15 -0
  342. mirascope/llm/providers/openai/_utils/__init__.py +5 -0
  343. mirascope/llm/providers/openai/_utils/errors.py +46 -0
  344. mirascope/llm/providers/openai/completions/__init__.py +7 -0
  345. mirascope/llm/providers/openai/completions/_utils/__init__.py +15 -0
  346. mirascope/llm/providers/openai/completions/_utils/decode.py +252 -0
  347. mirascope/llm/providers/openai/completions/_utils/encode.py +376 -0
  348. mirascope/llm/providers/openai/completions/base_provider.py +542 -0
  349. mirascope/llm/providers/openai/completions/provider.py +22 -0
  350. mirascope/llm/providers/openai/model_id.py +31 -0
  351. mirascope/llm/providers/openai/model_info.py +303 -0
  352. mirascope/llm/providers/openai/provider.py +441 -0
  353. mirascope/llm/providers/openai/responses/__init__.py +5 -0
  354. mirascope/llm/providers/openai/responses/_utils/__init__.py +15 -0
  355. mirascope/llm/providers/openai/responses/_utils/decode.py +260 -0
  356. mirascope/llm/providers/openai/responses/_utils/encode.py +384 -0
  357. mirascope/llm/providers/openai/responses/provider.py +513 -0
  358. mirascope/llm/providers/provider_id.py +24 -0
  359. mirascope/llm/providers/provider_registry.py +299 -0
  360. mirascope/llm/providers/together/__init__.py +7 -0
  361. mirascope/llm/providers/together/provider.py +40 -0
  362. mirascope/llm/responses/__init__.py +65 -0
  363. mirascope/llm/responses/_utils.py +146 -0
  364. mirascope/llm/responses/base_response.py +103 -0
  365. mirascope/llm/responses/base_stream_response.py +820 -0
  366. mirascope/llm/responses/finish_reason.py +28 -0
  367. mirascope/llm/responses/response.py +366 -0
  368. mirascope/llm/responses/root_response.py +248 -0
  369. mirascope/llm/responses/stream_response.py +581 -0
  370. mirascope/llm/responses/streams.py +363 -0
  371. mirascope/llm/responses/usage.py +95 -0
  372. mirascope/llm/tools/__init__.py +47 -0
  373. mirascope/llm/tools/_utils.py +34 -0
  374. mirascope/llm/tools/decorator.py +184 -0
  375. mirascope/llm/tools/protocols.py +96 -0
  376. mirascope/llm/tools/tool_schema.py +314 -0
  377. mirascope/llm/tools/toolkit.py +160 -0
  378. mirascope/llm/tools/tools.py +263 -0
  379. mirascope/llm/types/__init__.py +22 -0
  380. mirascope/llm/types/dataclass.py +9 -0
  381. mirascope/llm/types/jsonable.py +44 -0
  382. mirascope/llm/types/type_vars.py +19 -0
  383. mirascope/ops/__init__.py +111 -0
  384. mirascope/ops/_internal/__init__.py +5 -0
  385. mirascope/ops/_internal/closure.py +1169 -0
  386. mirascope/ops/_internal/configuration.py +177 -0
  387. mirascope/ops/_internal/context.py +76 -0
  388. mirascope/ops/_internal/exporters/__init__.py +26 -0
  389. mirascope/ops/_internal/exporters/exporters.py +395 -0
  390. mirascope/ops/_internal/exporters/processors.py +104 -0
  391. mirascope/ops/_internal/exporters/types.py +165 -0
  392. mirascope/ops/_internal/exporters/utils.py +29 -0
  393. mirascope/ops/_internal/instrumentation/__init__.py +8 -0
  394. mirascope/ops/_internal/instrumentation/llm/__init__.py +8 -0
  395. mirascope/ops/_internal/instrumentation/llm/common.py +530 -0
  396. mirascope/ops/_internal/instrumentation/llm/cost.py +190 -0
  397. mirascope/ops/_internal/instrumentation/llm/encode.py +238 -0
  398. mirascope/ops/_internal/instrumentation/llm/gen_ai_types/__init__.py +38 -0
  399. mirascope/ops/_internal/instrumentation/llm/gen_ai_types/gen_ai_input_messages.py +31 -0
  400. mirascope/ops/_internal/instrumentation/llm/gen_ai_types/gen_ai_output_messages.py +38 -0
  401. mirascope/ops/_internal/instrumentation/llm/gen_ai_types/gen_ai_system_instructions.py +18 -0
  402. mirascope/ops/_internal/instrumentation/llm/gen_ai_types/shared.py +100 -0
  403. mirascope/ops/_internal/instrumentation/llm/llm.py +161 -0
  404. mirascope/ops/_internal/instrumentation/llm/model.py +1798 -0
  405. mirascope/ops/_internal/instrumentation/llm/response.py +521 -0
  406. mirascope/ops/_internal/instrumentation/llm/serialize.py +300 -0
  407. mirascope/ops/_internal/propagation.py +198 -0
  408. mirascope/ops/_internal/protocols.py +133 -0
  409. mirascope/ops/_internal/session.py +139 -0
  410. mirascope/ops/_internal/spans.py +232 -0
  411. mirascope/ops/_internal/traced_calls.py +375 -0
  412. mirascope/ops/_internal/traced_functions.py +523 -0
  413. mirascope/ops/_internal/tracing.py +353 -0
  414. mirascope/ops/_internal/types.py +13 -0
  415. mirascope/ops/_internal/utils.py +123 -0
  416. mirascope/ops/_internal/versioned_calls.py +512 -0
  417. mirascope/ops/_internal/versioned_functions.py +357 -0
  418. mirascope/ops/_internal/versioning.py +303 -0
  419. mirascope/ops/exceptions.py +21 -0
  420. mirascope-2.0.0.dist-info/METADATA +203 -0
  421. mirascope-2.0.0.dist-info/RECORD +423 -0
  422. {mirascope-1.25.7.dist-info → mirascope-2.0.0.dist-info}/WHEEL +1 -1
  423. {mirascope-1.25.7.dist-info → mirascope-2.0.0.dist-info}/licenses/LICENSE +1 -1
  424. mirascope/beta/__init__.py +0 -3
  425. mirascope/beta/openai/__init__.py +0 -17
  426. mirascope/beta/openai/realtime/__init__.py +0 -13
  427. mirascope/beta/openai/realtime/_utils/__init__.py +0 -3
  428. mirascope/beta/openai/realtime/_utils/_audio.py +0 -74
  429. mirascope/beta/openai/realtime/_utils/_protocols.py +0 -50
  430. mirascope/beta/openai/realtime/realtime.py +0 -500
  431. mirascope/beta/openai/realtime/recording.py +0 -98
  432. mirascope/beta/openai/realtime/tool.py +0 -113
  433. mirascope/beta/rag/__init__.py +0 -24
  434. mirascope/beta/rag/base/__init__.py +0 -22
  435. mirascope/beta/rag/base/chunkers/__init__.py +0 -2
  436. mirascope/beta/rag/base/chunkers/base_chunker.py +0 -37
  437. mirascope/beta/rag/base/chunkers/text_chunker.py +0 -33
  438. mirascope/beta/rag/base/config.py +0 -8
  439. mirascope/beta/rag/base/document.py +0 -11
  440. mirascope/beta/rag/base/embedders.py +0 -35
  441. mirascope/beta/rag/base/embedding_params.py +0 -18
  442. mirascope/beta/rag/base/embedding_response.py +0 -30
  443. mirascope/beta/rag/base/query_results.py +0 -7
  444. mirascope/beta/rag/base/vectorstore_params.py +0 -18
  445. mirascope/beta/rag/base/vectorstores.py +0 -37
  446. mirascope/beta/rag/chroma/__init__.py +0 -11
  447. mirascope/beta/rag/chroma/types.py +0 -62
  448. mirascope/beta/rag/chroma/vectorstores.py +0 -121
  449. mirascope/beta/rag/cohere/__init__.py +0 -11
  450. mirascope/beta/rag/cohere/embedders.py +0 -87
  451. mirascope/beta/rag/cohere/embedding_params.py +0 -29
  452. mirascope/beta/rag/cohere/embedding_response.py +0 -29
  453. mirascope/beta/rag/cohere/py.typed +0 -0
  454. mirascope/beta/rag/openai/__init__.py +0 -11
  455. mirascope/beta/rag/openai/embedders.py +0 -144
  456. mirascope/beta/rag/openai/embedding_params.py +0 -18
  457. mirascope/beta/rag/openai/embedding_response.py +0 -14
  458. mirascope/beta/rag/openai/py.typed +0 -0
  459. mirascope/beta/rag/pinecone/__init__.py +0 -19
  460. mirascope/beta/rag/pinecone/types.py +0 -143
  461. mirascope/beta/rag/pinecone/vectorstores.py +0 -148
  462. mirascope/beta/rag/weaviate/__init__.py +0 -6
  463. mirascope/beta/rag/weaviate/types.py +0 -92
  464. mirascope/beta/rag/weaviate/vectorstores.py +0 -103
  465. mirascope/core/__init__.py +0 -109
  466. mirascope/core/anthropic/__init__.py +0 -31
  467. mirascope/core/anthropic/_call.py +0 -67
  468. mirascope/core/anthropic/_call_kwargs.py +0 -13
  469. mirascope/core/anthropic/_thinking.py +0 -70
  470. mirascope/core/anthropic/_utils/__init__.py +0 -16
  471. mirascope/core/anthropic/_utils/_convert_common_call_params.py +0 -25
  472. mirascope/core/anthropic/_utils/_convert_finish_reason_to_common_finish_reasons.py +0 -21
  473. mirascope/core/anthropic/_utils/_convert_message_params.py +0 -102
  474. mirascope/core/anthropic/_utils/_get_json_output.py +0 -31
  475. mirascope/core/anthropic/_utils/_handle_stream.py +0 -113
  476. mirascope/core/anthropic/_utils/_message_param_converter.py +0 -154
  477. mirascope/core/anthropic/_utils/_setup_call.py +0 -146
  478. mirascope/core/anthropic/call_params.py +0 -44
  479. mirascope/core/anthropic/call_response.py +0 -226
  480. mirascope/core/anthropic/call_response_chunk.py +0 -152
  481. mirascope/core/anthropic/dynamic_config.py +0 -40
  482. mirascope/core/anthropic/py.typed +0 -0
  483. mirascope/core/anthropic/stream.py +0 -204
  484. mirascope/core/anthropic/tool.py +0 -101
  485. mirascope/core/azure/__init__.py +0 -31
  486. mirascope/core/azure/_call.py +0 -67
  487. mirascope/core/azure/_call_kwargs.py +0 -13
  488. mirascope/core/azure/_utils/__init__.py +0 -14
  489. mirascope/core/azure/_utils/_convert_common_call_params.py +0 -26
  490. mirascope/core/azure/_utils/_convert_finish_reason_to_common_finish_reasons.py +0 -21
  491. mirascope/core/azure/_utils/_convert_message_params.py +0 -121
  492. mirascope/core/azure/_utils/_get_credential.py +0 -33
  493. mirascope/core/azure/_utils/_get_json_output.py +0 -27
  494. mirascope/core/azure/_utils/_handle_stream.py +0 -130
  495. mirascope/core/azure/_utils/_message_param_converter.py +0 -117
  496. mirascope/core/azure/_utils/_setup_call.py +0 -183
  497. mirascope/core/azure/call_params.py +0 -59
  498. mirascope/core/azure/call_response.py +0 -215
  499. mirascope/core/azure/call_response_chunk.py +0 -105
  500. mirascope/core/azure/dynamic_config.py +0 -30
  501. mirascope/core/azure/py.typed +0 -0
  502. mirascope/core/azure/stream.py +0 -147
  503. mirascope/core/azure/tool.py +0 -93
  504. mirascope/core/base/__init__.py +0 -86
  505. mirascope/core/base/_call_factory.py +0 -256
  506. mirascope/core/base/_create.py +0 -253
  507. mirascope/core/base/_extract.py +0 -175
  508. mirascope/core/base/_extract_with_tools.py +0 -189
  509. mirascope/core/base/_partial.py +0 -95
  510. mirascope/core/base/_utils/__init__.py +0 -92
  511. mirascope/core/base/_utils/_base_message_param_converter.py +0 -22
  512. mirascope/core/base/_utils/_base_type.py +0 -26
  513. mirascope/core/base/_utils/_convert_base_model_to_base_tool.py +0 -48
  514. mirascope/core/base/_utils/_convert_base_type_to_base_tool.py +0 -24
  515. mirascope/core/base/_utils/_convert_function_to_base_tool.py +0 -139
  516. mirascope/core/base/_utils/_convert_messages_to_message_params.py +0 -178
  517. mirascope/core/base/_utils/_convert_provider_finish_reason_to_finish_reason.py +0 -20
  518. mirascope/core/base/_utils/_default_tool_docstring.py +0 -6
  519. mirascope/core/base/_utils/_extract_tool_return.py +0 -42
  520. mirascope/core/base/_utils/_fn_is_async.py +0 -24
  521. mirascope/core/base/_utils/_format_template.py +0 -32
  522. mirascope/core/base/_utils/_get_audio_type.py +0 -18
  523. mirascope/core/base/_utils/_get_common_usage.py +0 -20
  524. mirascope/core/base/_utils/_get_create_fn_or_async_create_fn.py +0 -137
  525. mirascope/core/base/_utils/_get_document_type.py +0 -7
  526. mirascope/core/base/_utils/_get_dynamic_configuration.py +0 -69
  527. mirascope/core/base/_utils/_get_fields_from_call_args.py +0 -34
  528. mirascope/core/base/_utils/_get_fn_args.py +0 -23
  529. mirascope/core/base/_utils/_get_image_dimensions.py +0 -39
  530. mirascope/core/base/_utils/_get_image_type.py +0 -26
  531. mirascope/core/base/_utils/_get_metadata.py +0 -17
  532. mirascope/core/base/_utils/_get_possible_user_message_param.py +0 -21
  533. mirascope/core/base/_utils/_get_prompt_template.py +0 -28
  534. mirascope/core/base/_utils/_get_template_values.py +0 -51
  535. mirascope/core/base/_utils/_get_template_variables.py +0 -38
  536. mirascope/core/base/_utils/_get_unsupported_tool_config_keys.py +0 -10
  537. mirascope/core/base/_utils/_is_prompt_template.py +0 -24
  538. mirascope/core/base/_utils/_json_mode_content.py +0 -17
  539. mirascope/core/base/_utils/_messages_decorator.py +0 -121
  540. mirascope/core/base/_utils/_parse_content_template.py +0 -323
  541. mirascope/core/base/_utils/_parse_prompt_messages.py +0 -63
  542. mirascope/core/base/_utils/_pil_image_to_bytes.py +0 -13
  543. mirascope/core/base/_utils/_protocols.py +0 -901
  544. mirascope/core/base/_utils/_setup_call.py +0 -79
  545. mirascope/core/base/_utils/_setup_extract_tool.py +0 -30
  546. mirascope/core/base/call_kwargs.py +0 -13
  547. mirascope/core/base/call_params.py +0 -36
  548. mirascope/core/base/call_response.py +0 -338
  549. mirascope/core/base/call_response_chunk.py +0 -130
  550. mirascope/core/base/dynamic_config.py +0 -82
  551. mirascope/core/base/from_call_args.py +0 -30
  552. mirascope/core/base/merge_decorators.py +0 -59
  553. mirascope/core/base/message_param.py +0 -175
  554. mirascope/core/base/messages.py +0 -116
  555. mirascope/core/base/metadata.py +0 -13
  556. mirascope/core/base/prompt.py +0 -497
  557. mirascope/core/base/response_model_config_dict.py +0 -9
  558. mirascope/core/base/stream.py +0 -479
  559. mirascope/core/base/stream_config.py +0 -11
  560. mirascope/core/base/structured_stream.py +0 -296
  561. mirascope/core/base/tool.py +0 -214
  562. mirascope/core/base/toolkit.py +0 -176
  563. mirascope/core/base/types.py +0 -344
  564. mirascope/core/bedrock/__init__.py +0 -34
  565. mirascope/core/bedrock/_call.py +0 -68
  566. mirascope/core/bedrock/_call_kwargs.py +0 -12
  567. mirascope/core/bedrock/_types.py +0 -104
  568. mirascope/core/bedrock/_utils/__init__.py +0 -14
  569. mirascope/core/bedrock/_utils/_convert_common_call_params.py +0 -39
  570. mirascope/core/bedrock/_utils/_convert_finish_reason_to_common_finish_reasons.py +0 -23
  571. mirascope/core/bedrock/_utils/_convert_message_params.py +0 -111
  572. mirascope/core/bedrock/_utils/_get_json_output.py +0 -30
  573. mirascope/core/bedrock/_utils/_handle_stream.py +0 -104
  574. mirascope/core/bedrock/_utils/_message_param_converter.py +0 -172
  575. mirascope/core/bedrock/_utils/_setup_call.py +0 -258
  576. mirascope/core/bedrock/call_params.py +0 -38
  577. mirascope/core/bedrock/call_response.py +0 -248
  578. mirascope/core/bedrock/call_response_chunk.py +0 -111
  579. mirascope/core/bedrock/dynamic_config.py +0 -37
  580. mirascope/core/bedrock/py.typed +0 -0
  581. mirascope/core/bedrock/stream.py +0 -154
  582. mirascope/core/bedrock/tool.py +0 -100
  583. mirascope/core/cohere/__init__.py +0 -30
  584. mirascope/core/cohere/_call.py +0 -67
  585. mirascope/core/cohere/_call_kwargs.py +0 -11
  586. mirascope/core/cohere/_types.py +0 -20
  587. mirascope/core/cohere/_utils/__init__.py +0 -14
  588. mirascope/core/cohere/_utils/_convert_common_call_params.py +0 -26
  589. mirascope/core/cohere/_utils/_convert_finish_reason_to_common_finish_reasons.py +0 -24
  590. mirascope/core/cohere/_utils/_convert_message_params.py +0 -32
  591. mirascope/core/cohere/_utils/_get_json_output.py +0 -30
  592. mirascope/core/cohere/_utils/_handle_stream.py +0 -35
  593. mirascope/core/cohere/_utils/_message_param_converter.py +0 -54
  594. mirascope/core/cohere/_utils/_setup_call.py +0 -150
  595. mirascope/core/cohere/call_params.py +0 -62
  596. mirascope/core/cohere/call_response.py +0 -205
  597. mirascope/core/cohere/call_response_chunk.py +0 -125
  598. mirascope/core/cohere/dynamic_config.py +0 -32
  599. mirascope/core/cohere/py.typed +0 -0
  600. mirascope/core/cohere/stream.py +0 -113
  601. mirascope/core/cohere/tool.py +0 -93
  602. mirascope/core/costs/__init__.py +0 -5
  603. mirascope/core/costs/_anthropic_calculate_cost.py +0 -219
  604. mirascope/core/costs/_azure_calculate_cost.py +0 -11
  605. mirascope/core/costs/_bedrock_calculate_cost.py +0 -15
  606. mirascope/core/costs/_cohere_calculate_cost.py +0 -44
  607. mirascope/core/costs/_gemini_calculate_cost.py +0 -67
  608. mirascope/core/costs/_google_calculate_cost.py +0 -427
  609. mirascope/core/costs/_groq_calculate_cost.py +0 -156
  610. mirascope/core/costs/_litellm_calculate_cost.py +0 -11
  611. mirascope/core/costs/_mistral_calculate_cost.py +0 -64
  612. mirascope/core/costs/_openai_calculate_cost.py +0 -416
  613. mirascope/core/costs/_vertex_calculate_cost.py +0 -67
  614. mirascope/core/costs/_xai_calculate_cost.py +0 -104
  615. mirascope/core/costs/calculate_cost.py +0 -86
  616. mirascope/core/gemini/__init__.py +0 -40
  617. mirascope/core/gemini/_call.py +0 -67
  618. mirascope/core/gemini/_call_kwargs.py +0 -12
  619. mirascope/core/gemini/_utils/__init__.py +0 -14
  620. mirascope/core/gemini/_utils/_convert_common_call_params.py +0 -39
  621. mirascope/core/gemini/_utils/_convert_finish_reason_to_common_finish_reasons.py +0 -23
  622. mirascope/core/gemini/_utils/_convert_message_params.py +0 -156
  623. mirascope/core/gemini/_utils/_get_json_output.py +0 -35
  624. mirascope/core/gemini/_utils/_handle_stream.py +0 -33
  625. mirascope/core/gemini/_utils/_message_param_converter.py +0 -209
  626. mirascope/core/gemini/_utils/_setup_call.py +0 -149
  627. mirascope/core/gemini/call_params.py +0 -52
  628. mirascope/core/gemini/call_response.py +0 -216
  629. mirascope/core/gemini/call_response_chunk.py +0 -100
  630. mirascope/core/gemini/dynamic_config.py +0 -26
  631. mirascope/core/gemini/stream.py +0 -120
  632. mirascope/core/gemini/tool.py +0 -104
  633. mirascope/core/google/__init__.py +0 -29
  634. mirascope/core/google/_call.py +0 -67
  635. mirascope/core/google/_call_kwargs.py +0 -13
  636. mirascope/core/google/_utils/__init__.py +0 -14
  637. mirascope/core/google/_utils/_convert_common_call_params.py +0 -38
  638. mirascope/core/google/_utils/_convert_finish_reason_to_common_finish_reasons.py +0 -27
  639. mirascope/core/google/_utils/_convert_message_params.py +0 -297
  640. mirascope/core/google/_utils/_get_json_output.py +0 -37
  641. mirascope/core/google/_utils/_handle_stream.py +0 -58
  642. mirascope/core/google/_utils/_message_param_converter.py +0 -200
  643. mirascope/core/google/_utils/_setup_call.py +0 -201
  644. mirascope/core/google/_utils/_validate_media_type.py +0 -58
  645. mirascope/core/google/call_params.py +0 -22
  646. mirascope/core/google/call_response.py +0 -255
  647. mirascope/core/google/call_response_chunk.py +0 -135
  648. mirascope/core/google/dynamic_config.py +0 -26
  649. mirascope/core/google/stream.py +0 -199
  650. mirascope/core/google/tool.py +0 -146
  651. mirascope/core/groq/__init__.py +0 -30
  652. mirascope/core/groq/_call.py +0 -67
  653. mirascope/core/groq/_call_kwargs.py +0 -13
  654. mirascope/core/groq/_utils/__init__.py +0 -14
  655. mirascope/core/groq/_utils/_convert_common_call_params.py +0 -26
  656. mirascope/core/groq/_utils/_convert_message_params.py +0 -112
  657. mirascope/core/groq/_utils/_get_json_output.py +0 -27
  658. mirascope/core/groq/_utils/_handle_stream.py +0 -123
  659. mirascope/core/groq/_utils/_message_param_converter.py +0 -89
  660. mirascope/core/groq/_utils/_setup_call.py +0 -132
  661. mirascope/core/groq/call_params.py +0 -52
  662. mirascope/core/groq/call_response.py +0 -213
  663. mirascope/core/groq/call_response_chunk.py +0 -104
  664. mirascope/core/groq/dynamic_config.py +0 -29
  665. mirascope/core/groq/py.typed +0 -0
  666. mirascope/core/groq/stream.py +0 -135
  667. mirascope/core/groq/tool.py +0 -80
  668. mirascope/core/litellm/__init__.py +0 -28
  669. mirascope/core/litellm/_call.py +0 -67
  670. mirascope/core/litellm/_utils/__init__.py +0 -5
  671. mirascope/core/litellm/_utils/_setup_call.py +0 -109
  672. mirascope/core/litellm/call_params.py +0 -10
  673. mirascope/core/litellm/call_response.py +0 -24
  674. mirascope/core/litellm/call_response_chunk.py +0 -14
  675. mirascope/core/litellm/dynamic_config.py +0 -8
  676. mirascope/core/litellm/py.typed +0 -0
  677. mirascope/core/litellm/stream.py +0 -86
  678. mirascope/core/litellm/tool.py +0 -13
  679. mirascope/core/mistral/__init__.py +0 -36
  680. mirascope/core/mistral/_call.py +0 -65
  681. mirascope/core/mistral/_call_kwargs.py +0 -19
  682. mirascope/core/mistral/_utils/__init__.py +0 -14
  683. mirascope/core/mistral/_utils/_convert_common_call_params.py +0 -24
  684. mirascope/core/mistral/_utils/_convert_finish_reason_to_common_finish_reasons.py +0 -22
  685. mirascope/core/mistral/_utils/_convert_message_params.py +0 -122
  686. mirascope/core/mistral/_utils/_get_json_output.py +0 -34
  687. mirascope/core/mistral/_utils/_handle_stream.py +0 -139
  688. mirascope/core/mistral/_utils/_message_param_converter.py +0 -176
  689. mirascope/core/mistral/_utils/_setup_call.py +0 -164
  690. mirascope/core/mistral/call_params.py +0 -36
  691. mirascope/core/mistral/call_response.py +0 -205
  692. mirascope/core/mistral/call_response_chunk.py +0 -105
  693. mirascope/core/mistral/dynamic_config.py +0 -33
  694. mirascope/core/mistral/py.typed +0 -0
  695. mirascope/core/mistral/stream.py +0 -120
  696. mirascope/core/mistral/tool.py +0 -81
  697. mirascope/core/openai/__init__.py +0 -31
  698. mirascope/core/openai/_call.py +0 -67
  699. mirascope/core/openai/_call_kwargs.py +0 -13
  700. mirascope/core/openai/_utils/__init__.py +0 -14
  701. mirascope/core/openai/_utils/_convert_common_call_params.py +0 -26
  702. mirascope/core/openai/_utils/_convert_message_params.py +0 -148
  703. mirascope/core/openai/_utils/_get_json_output.py +0 -31
  704. mirascope/core/openai/_utils/_handle_stream.py +0 -138
  705. mirascope/core/openai/_utils/_message_param_converter.py +0 -105
  706. mirascope/core/openai/_utils/_setup_call.py +0 -155
  707. mirascope/core/openai/call_params.py +0 -92
  708. mirascope/core/openai/call_response.py +0 -273
  709. mirascope/core/openai/call_response_chunk.py +0 -139
  710. mirascope/core/openai/dynamic_config.py +0 -34
  711. mirascope/core/openai/py.typed +0 -0
  712. mirascope/core/openai/stream.py +0 -185
  713. mirascope/core/openai/tool.py +0 -101
  714. mirascope/core/py.typed +0 -0
  715. mirascope/core/vertex/__init__.py +0 -45
  716. mirascope/core/vertex/_call.py +0 -62
  717. mirascope/core/vertex/_call_kwargs.py +0 -12
  718. mirascope/core/vertex/_utils/__init__.py +0 -14
  719. mirascope/core/vertex/_utils/_convert_common_call_params.py +0 -37
  720. mirascope/core/vertex/_utils/_convert_finish_reason_to_common_finish_reasons.py +0 -23
  721. mirascope/core/vertex/_utils/_convert_message_params.py +0 -171
  722. mirascope/core/vertex/_utils/_get_json_output.py +0 -36
  723. mirascope/core/vertex/_utils/_handle_stream.py +0 -33
  724. mirascope/core/vertex/_utils/_message_param_converter.py +0 -133
  725. mirascope/core/vertex/_utils/_setup_call.py +0 -160
  726. mirascope/core/vertex/call_params.py +0 -24
  727. mirascope/core/vertex/call_response.py +0 -206
  728. mirascope/core/vertex/call_response_chunk.py +0 -99
  729. mirascope/core/vertex/dynamic_config.py +0 -28
  730. mirascope/core/vertex/stream.py +0 -119
  731. mirascope/core/vertex/tool.py +0 -101
  732. mirascope/core/xai/__init__.py +0 -28
  733. mirascope/core/xai/_call.py +0 -67
  734. mirascope/core/xai/_utils/__init__.py +0 -5
  735. mirascope/core/xai/_utils/_setup_call.py +0 -113
  736. mirascope/core/xai/call_params.py +0 -10
  737. mirascope/core/xai/call_response.py +0 -16
  738. mirascope/core/xai/call_response_chunk.py +0 -14
  739. mirascope/core/xai/dynamic_config.py +0 -8
  740. mirascope/core/xai/py.typed +0 -0
  741. mirascope/core/xai/stream.py +0 -57
  742. mirascope/core/xai/tool.py +0 -13
  743. mirascope/experimental/graphs/__init__.py +0 -5
  744. mirascope/experimental/graphs/finite_state_machine.py +0 -714
  745. mirascope/integrations/__init__.py +0 -16
  746. mirascope/integrations/_middleware_factory.py +0 -403
  747. mirascope/integrations/langfuse/__init__.py +0 -3
  748. mirascope/integrations/langfuse/_utils.py +0 -114
  749. mirascope/integrations/langfuse/_with_langfuse.py +0 -70
  750. mirascope/integrations/logfire/__init__.py +0 -3
  751. mirascope/integrations/logfire/_utils.py +0 -225
  752. mirascope/integrations/logfire/_with_logfire.py +0 -63
  753. mirascope/integrations/otel/__init__.py +0 -10
  754. mirascope/integrations/otel/_utils.py +0 -270
  755. mirascope/integrations/otel/_with_hyperdx.py +0 -60
  756. mirascope/integrations/otel/_with_otel.py +0 -59
  757. mirascope/integrations/tenacity.py +0 -14
  758. mirascope/llm/_call.py +0 -401
  759. mirascope/llm/_context.py +0 -384
  760. mirascope/llm/_override.py +0 -3639
  761. mirascope/llm/_protocols.py +0 -500
  762. mirascope/llm/_response_metaclass.py +0 -31
  763. mirascope/llm/call_response.py +0 -158
  764. mirascope/llm/call_response_chunk.py +0 -66
  765. mirascope/llm/stream.py +0 -162
  766. mirascope/llm/tool.py +0 -64
  767. mirascope/mcp/__init__.py +0 -7
  768. mirascope/mcp/_utils.py +0 -288
  769. mirascope/mcp/client.py +0 -167
  770. mirascope/mcp/server.py +0 -356
  771. mirascope/mcp/tools.py +0 -110
  772. mirascope/py.typed +0 -0
  773. mirascope/retries/__init__.py +0 -11
  774. mirascope/retries/fallback.py +0 -131
  775. mirascope/retries/tenacity.py +0 -50
  776. mirascope/tools/__init__.py +0 -37
  777. mirascope/tools/base.py +0 -98
  778. mirascope/tools/system/__init__.py +0 -0
  779. mirascope/tools/system/_docker_operation.py +0 -166
  780. mirascope/tools/system/_file_system.py +0 -267
  781. mirascope/tools/web/__init__.py +0 -0
  782. mirascope/tools/web/_duckduckgo.py +0 -111
  783. mirascope/tools/web/_httpx.py +0 -125
  784. mirascope/tools/web/_parse_url_content.py +0 -94
  785. mirascope/tools/web/_requests.py +0 -54
  786. mirascope/v0/__init__.py +0 -43
  787. mirascope/v0/anthropic.py +0 -54
  788. mirascope/v0/base/__init__.py +0 -12
  789. mirascope/v0/base/calls.py +0 -118
  790. mirascope/v0/base/extractors.py +0 -122
  791. mirascope/v0/base/ops_utils.py +0 -207
  792. mirascope/v0/base/prompts.py +0 -48
  793. mirascope/v0/base/types.py +0 -14
  794. mirascope/v0/base/utils.py +0 -21
  795. mirascope/v0/openai.py +0 -54
  796. mirascope-1.25.7.dist-info/METADATA +0 -169
  797. mirascope-1.25.7.dist-info/RECORD +0 -378
@@ -0,0 +1,1419 @@
1
+ """The model context manager for the `llm` module."""
2
+
3
+ from __future__ import annotations
4
+
5
+ from collections.abc import Sequence
6
+ from contextvars import ContextVar, Token
7
+ from types import TracebackType
8
+ from typing import overload
9
+ from typing_extensions import Unpack
10
+
11
+ from ..context import Context, DepsT
12
+ from ..formatting import Format, FormattableT, OutputParser
13
+ from ..messages import Message, UserContent, promote_to_messages
14
+ from ..providers import (
15
+ ModelId,
16
+ Provider,
17
+ ProviderId,
18
+ get_provider_for_model,
19
+ )
20
+ from ..responses import (
21
+ AsyncContextResponse,
22
+ AsyncContextStreamResponse,
23
+ AsyncResponse,
24
+ AsyncStreamResponse,
25
+ ContextResponse,
26
+ ContextStreamResponse,
27
+ Response,
28
+ StreamResponse,
29
+ )
30
+ from ..tools import (
31
+ AsyncContextTool,
32
+ AsyncContextToolkit,
33
+ AsyncTool,
34
+ AsyncToolkit,
35
+ ContextTool,
36
+ ContextToolkit,
37
+ Tool,
38
+ Toolkit,
39
+ )
40
+ from .params import Params
41
+
42
+ MODEL_CONTEXT: ContextVar[Model | None] = ContextVar("MODEL_CONTEXT", default=None)
43
+
44
+
45
+ def model_from_context() -> Model | None:
46
+ """Get the LLM currently set via context, if any."""
47
+ return MODEL_CONTEXT.get()
48
+
49
+
50
+ class Model:
51
+ """The unified LLM interface that delegates to provider-specific clients.
52
+
53
+ This class provides a consistent interface for interacting with language models
54
+ from various providers. It handles the common operations like generating responses,
55
+ streaming, and async variants by delegating to the appropriate client methods.
56
+
57
+ **Usage Note:** In most cases, you should use `llm.use_model()` instead of instantiating
58
+ `Model` directly. This preserves the ability to override the model at runtime using
59
+ the `llm.model()` context manager. Only instantiate `Model` directly if you want to
60
+ hardcode a specific model and prevent it from being overridden by context.
61
+
62
+ Example (recommended - allows override):
63
+
64
+ ```python
65
+ from mirascope import llm
66
+
67
+ def recommend_book(genre: str) -> llm.Response:
68
+ # Uses context model if available, otherwise creates default
69
+ model = llm.use_model("openai/gpt-5-mini")
70
+ return model.call(f"Please recommend a book in {genre}.")
71
+
72
+ # Uses default model
73
+ response = recommend_book("fantasy")
74
+
75
+ # Override with different model
76
+ with llm.model(provider="anthropic", model_id="anthropic/claude-sonnet-4-5"):
77
+ response = recommend_book("fantasy") # Uses Claude
78
+ ```
79
+
80
+ Example (direct instantiation - prevents override):
81
+
82
+ ```python
83
+ from mirascope import llm
84
+
85
+ def recommend_book(genre: str) -> llm.Response:
86
+ # Hardcoded model, cannot be overridden by context
87
+ model = llm.Model("openai/gpt-5-mini")
88
+ return model.call(f"Please recommend a book in {genre}.")
89
+ ```
90
+ """
91
+
92
+ model_id: ModelId
93
+ """The model being used (e.g. `"openai/gpt-4o-mini"`)."""
94
+
95
+ params: Params
96
+ """The default parameters for the model (temperature, max_tokens, etc.)."""
97
+
98
+ def __init__(
99
+ self,
100
+ model_id: ModelId,
101
+ **params: Unpack[Params],
102
+ ) -> None:
103
+ """Initialize the Model with model_id and optional params."""
104
+ if "/" not in model_id:
105
+ raise ValueError(
106
+ "Invalid model_id format. Expected format: 'provider/model-name' "
107
+ f"(e.g., 'openai/gpt-4'). Got: '{model_id}'"
108
+ )
109
+ self.model_id = model_id
110
+ self.params = params
111
+ self._token_stack: list[Token[Model | None]] = []
112
+
113
+ @property
114
+ def provider(self) -> Provider:
115
+ """The provider being used (e.g. an `OpenAIProvider`).
116
+
117
+ This property dynamically looks up the provider from the registry based on
118
+ the current model_id. This allows provider overrides via `llm.register_provider()`
119
+ to take effect even after the model instance is created.
120
+
121
+ Raises:
122
+ NoRegisteredProviderError: If no provider is available for the model_id
123
+ """
124
+ return get_provider_for_model(self.model_id)
125
+
126
+ @property
127
+ def provider_id(self) -> ProviderId:
128
+ """The string id of the provider being used (e.g. `"openai"`).
129
+
130
+ This property returns the `id` field of the dynamically resolved provider.
131
+
132
+ Raises:
133
+ NoRegisteredProviderError: If no provider is available for the model_id
134
+ """
135
+ return self.provider.id
136
+
137
+ def __enter__(self) -> Model:
138
+ """Enter the context manager, setting this model in context."""
139
+ token = MODEL_CONTEXT.set(self)
140
+ self._token_stack.append(token)
141
+ return self
142
+
143
+ def __exit__(
144
+ self,
145
+ exc_type: type[BaseException] | None,
146
+ exc_val: BaseException | None,
147
+ exc_tb: TracebackType | None,
148
+ ) -> None:
149
+ """Exit the context manager, resetting the model context."""
150
+ if self._token_stack:
151
+ token = self._token_stack.pop()
152
+ MODEL_CONTEXT.reset(token)
153
+
154
+ @overload
155
+ def call(
156
+ self,
157
+ content: UserContent | Sequence[Message],
158
+ *,
159
+ tools: Sequence[Tool] | Toolkit | None = None,
160
+ format: None = None,
161
+ ) -> Response:
162
+ """Generate an `llm.Response` without a response format."""
163
+ ...
164
+
165
+ @overload
166
+ def call(
167
+ self,
168
+ content: UserContent | Sequence[Message],
169
+ *,
170
+ tools: Sequence[Tool] | Toolkit | None = None,
171
+ format: type[FormattableT] | Format[FormattableT],
172
+ ) -> Response[FormattableT]:
173
+ """Generate an `llm.Response` with a response format."""
174
+ ...
175
+
176
+ @overload
177
+ def call(
178
+ self,
179
+ content: UserContent | Sequence[Message],
180
+ *,
181
+ tools: Sequence[Tool] | Toolkit | None = None,
182
+ format: type[FormattableT]
183
+ | Format[FormattableT]
184
+ | OutputParser[FormattableT]
185
+ | None,
186
+ ) -> Response | Response[FormattableT]:
187
+ """Generate an `llm.Response` with an optional response format."""
188
+ ...
189
+
190
+ def call(
191
+ self,
192
+ content: UserContent | Sequence[Message],
193
+ *,
194
+ tools: Sequence[Tool] | Toolkit | None = None,
195
+ format: type[FormattableT]
196
+ | Format[FormattableT]
197
+ | OutputParser[FormattableT]
198
+ | None = None,
199
+ ) -> Response | Response[FormattableT]:
200
+ """Generate an `llm.Response` by synchronously calling this model's LLM provider.
201
+
202
+ Args:
203
+ content: Content to send to the LLM. Can be a string (converted to user
204
+ message), UserContent, a sequence of UserContent, or a sequence of
205
+ Messages for full control.
206
+ tools: Optional tools that the model may invoke.
207
+ format: Optional response format specifier.
208
+
209
+ Returns:
210
+ An `llm.Response` object containing the LLM-generated content.
211
+ """
212
+ messages = promote_to_messages(content)
213
+ return self.provider.call(
214
+ model_id=self.model_id,
215
+ messages=messages,
216
+ tools=tools,
217
+ format=format,
218
+ **self.params,
219
+ )
220
+
221
+ @overload
222
+ async def call_async(
223
+ self,
224
+ content: UserContent | Sequence[Message],
225
+ *,
226
+ tools: Sequence[AsyncTool] | AsyncToolkit | None = None,
227
+ format: None = None,
228
+ ) -> AsyncResponse:
229
+ """Generate an `llm.AsyncResponse` without a response format."""
230
+ ...
231
+
232
+ @overload
233
+ async def call_async(
234
+ self,
235
+ content: UserContent | Sequence[Message],
236
+ *,
237
+ tools: Sequence[AsyncTool] | AsyncToolkit | None = None,
238
+ format: type[FormattableT] | Format[FormattableT],
239
+ ) -> AsyncResponse[FormattableT]:
240
+ """Generate an `llm.AsyncResponse` with a response format."""
241
+ ...
242
+
243
+ @overload
244
+ async def call_async(
245
+ self,
246
+ content: UserContent | Sequence[Message],
247
+ *,
248
+ tools: Sequence[AsyncTool] | AsyncToolkit | None = None,
249
+ format: type[FormattableT]
250
+ | Format[FormattableT]
251
+ | OutputParser[FormattableT]
252
+ | None,
253
+ ) -> AsyncResponse | AsyncResponse[FormattableT]:
254
+ """Generate an `llm.AsyncResponse` with an optional response format."""
255
+ ...
256
+
257
+ async def call_async(
258
+ self,
259
+ content: UserContent | Sequence[Message],
260
+ *,
261
+ tools: Sequence[AsyncTool] | AsyncToolkit | None = None,
262
+ format: type[FormattableT]
263
+ | Format[FormattableT]
264
+ | OutputParser[FormattableT]
265
+ | None = None,
266
+ ) -> AsyncResponse | AsyncResponse[FormattableT]:
267
+ """Generate an `llm.AsyncResponse` by asynchronously calling this model's LLM provider.
268
+
269
+ Args:
270
+ content: Content to send to the LLM. Can be a string (converted to user
271
+ message), UserContent, a sequence of UserContent, or a sequence of
272
+ Messages for full control.
273
+ tools: Optional tools that the model may invoke.
274
+ format: Optional response format specifier.
275
+
276
+ Returns:
277
+ An `llm.AsyncResponse` object containing the LLM-generated content.
278
+ """
279
+ messages = promote_to_messages(content)
280
+ return await self.provider.call_async(
281
+ model_id=self.model_id,
282
+ messages=messages,
283
+ tools=tools,
284
+ **self.params,
285
+ format=format,
286
+ )
287
+
288
+ @overload
289
+ def stream(
290
+ self,
291
+ content: UserContent | Sequence[Message],
292
+ *,
293
+ tools: Sequence[Tool] | Toolkit | None = None,
294
+ format: None = None,
295
+ ) -> StreamResponse:
296
+ """Stream an `llm.StreamResponse` without a response format."""
297
+ ...
298
+
299
+ @overload
300
+ def stream(
301
+ self,
302
+ content: UserContent | Sequence[Message],
303
+ *,
304
+ tools: Sequence[Tool] | Toolkit | None = None,
305
+ format: type[FormattableT] | Format[FormattableT],
306
+ ) -> StreamResponse[FormattableT]:
307
+ """Stream an `llm.StreamResponse` with a response format."""
308
+ ...
309
+
310
+ @overload
311
+ def stream(
312
+ self,
313
+ content: UserContent | Sequence[Message],
314
+ *,
315
+ tools: Sequence[Tool] | Toolkit | None = None,
316
+ format: type[FormattableT]
317
+ | Format[FormattableT]
318
+ | OutputParser[FormattableT]
319
+ | None,
320
+ ) -> StreamResponse | StreamResponse[FormattableT]:
321
+ """Stream an `llm.StreamResponse` with an optional response format."""
322
+ ...
323
+
324
+ def stream(
325
+ self,
326
+ content: UserContent | Sequence[Message],
327
+ *,
328
+ tools: Sequence[Tool] | Toolkit | None = None,
329
+ format: type[FormattableT]
330
+ | Format[FormattableT]
331
+ | OutputParser[FormattableT]
332
+ | None = None,
333
+ ) -> StreamResponse | StreamResponse[FormattableT]:
334
+ """Generate an `llm.StreamResponse` by synchronously streaming from this model's LLM provider.
335
+
336
+ Args:
337
+ content: Content to send to the LLM. Can be a string (converted to user
338
+ message), UserContent, a sequence of UserContent, or a sequence of
339
+ Messages for full control.
340
+ tools: Optional tools that the model may invoke.
341
+ format: Optional response format specifier.
342
+
343
+ Returns:
344
+ An `llm.StreamResponse` object for iterating over the LLM-generated content.
345
+ """
346
+ messages = promote_to_messages(content)
347
+ return self.provider.stream(
348
+ model_id=self.model_id,
349
+ messages=messages,
350
+ tools=tools,
351
+ format=format,
352
+ **self.params,
353
+ )
354
+
355
+ @overload
356
+ async def stream_async(
357
+ self,
358
+ content: UserContent | Sequence[Message],
359
+ *,
360
+ tools: Sequence[AsyncTool] | AsyncToolkit | None = None,
361
+ format: None = None,
362
+ ) -> AsyncStreamResponse:
363
+ """Stream an `llm.AsyncStreamResponse` without a response format."""
364
+ ...
365
+
366
+ @overload
367
+ async def stream_async(
368
+ self,
369
+ content: UserContent | Sequence[Message],
370
+ *,
371
+ tools: Sequence[AsyncTool] | AsyncToolkit | None = None,
372
+ format: type[FormattableT] | Format[FormattableT],
373
+ ) -> AsyncStreamResponse[FormattableT]:
374
+ """Stream an `llm.AsyncStreamResponse` with a response format."""
375
+ ...
376
+
377
+ @overload
378
+ async def stream_async(
379
+ self,
380
+ content: UserContent | Sequence[Message],
381
+ *,
382
+ tools: Sequence[AsyncTool] | AsyncToolkit | None = None,
383
+ format: type[FormattableT]
384
+ | Format[FormattableT]
385
+ | OutputParser[FormattableT]
386
+ | None,
387
+ ) -> AsyncStreamResponse | AsyncStreamResponse[FormattableT]:
388
+ """Stream an `llm.AsyncStreamResponse` with an optional response format."""
389
+ ...
390
+
391
+ async def stream_async(
392
+ self,
393
+ content: UserContent | Sequence[Message],
394
+ *,
395
+ tools: Sequence[AsyncTool] | AsyncToolkit | None = None,
396
+ format: type[FormattableT]
397
+ | Format[FormattableT]
398
+ | OutputParser[FormattableT]
399
+ | None = None,
400
+ ) -> AsyncStreamResponse | AsyncStreamResponse[FormattableT]:
401
+ """Generate an `llm.AsyncStreamResponse` by asynchronously streaming from this model's LLM provider.
402
+
403
+ Args:
404
+ content: Content to send to the LLM. Can be a string (converted to user
405
+ message), UserContent, a sequence of UserContent, or a sequence of
406
+ Messages for full control.
407
+ tools: Optional tools that the model may invoke.
408
+ format: Optional response format specifier.
409
+
410
+ Returns:
411
+ An `llm.AsyncStreamResponse` object for asynchronously iterating over the LLM-generated content.
412
+ """
413
+ messages = promote_to_messages(content)
414
+ return await self.provider.stream_async(
415
+ model_id=self.model_id,
416
+ messages=messages,
417
+ tools=tools,
418
+ format=format,
419
+ **self.params,
420
+ )
421
+
422
+ @overload
423
+ def context_call(
424
+ self,
425
+ content: UserContent | Sequence[Message],
426
+ *,
427
+ ctx: Context[DepsT],
428
+ tools: Sequence[Tool | ContextTool[DepsT]]
429
+ | ContextToolkit[DepsT]
430
+ | None = None,
431
+ format: None = None,
432
+ ) -> ContextResponse[DepsT, None]:
433
+ """Generate an `llm.ContextResponse` without a response format."""
434
+ ...
435
+
436
+ @overload
437
+ def context_call(
438
+ self,
439
+ content: UserContent | Sequence[Message],
440
+ *,
441
+ ctx: Context[DepsT],
442
+ tools: Sequence[Tool | ContextTool[DepsT]]
443
+ | ContextToolkit[DepsT]
444
+ | None = None,
445
+ format: type[FormattableT] | Format[FormattableT],
446
+ ) -> ContextResponse[DepsT, FormattableT]:
447
+ """Generate an `llm.ContextResponse` with a response format."""
448
+ ...
449
+
450
+ @overload
451
+ def context_call(
452
+ self,
453
+ content: UserContent | Sequence[Message],
454
+ *,
455
+ ctx: Context[DepsT],
456
+ tools: Sequence[Tool | ContextTool[DepsT]]
457
+ | ContextToolkit[DepsT]
458
+ | None = None,
459
+ format: type[FormattableT]
460
+ | Format[FormattableT]
461
+ | OutputParser[FormattableT]
462
+ | None,
463
+ ) -> ContextResponse[DepsT, None] | ContextResponse[DepsT, FormattableT]:
464
+ """Generate an `llm.ContextResponse` with an optional response format."""
465
+ ...
466
+
467
+ def context_call(
468
+ self,
469
+ content: UserContent | Sequence[Message],
470
+ *,
471
+ ctx: Context[DepsT],
472
+ tools: Sequence[Tool | ContextTool[DepsT]]
473
+ | ContextToolkit[DepsT]
474
+ | None = None,
475
+ format: type[FormattableT]
476
+ | Format[FormattableT]
477
+ | OutputParser[FormattableT]
478
+ | None = None,
479
+ ) -> ContextResponse[DepsT, None] | ContextResponse[DepsT, FormattableT]:
480
+ """Generate an `llm.ContextResponse` by synchronously calling this model's LLM provider.
481
+
482
+ Args:
483
+ content: Content to send to the LLM. Can be a string (converted to user
484
+ message), UserContent, a sequence of UserContent, or a sequence of
485
+ Messages for full control.
486
+ ctx: Context object with dependencies for tools.
487
+ tools: Optional tools that the model may invoke.
488
+ format: Optional response format specifier.
489
+
490
+ Returns:
491
+ An `llm.ContextResponse` object containing the LLM-generated content.
492
+ """
493
+ messages = promote_to_messages(content)
494
+ return self.provider.context_call(
495
+ ctx=ctx,
496
+ model_id=self.model_id,
497
+ messages=messages,
498
+ tools=tools,
499
+ format=format,
500
+ **self.params,
501
+ )
502
+
503
+ @overload
504
+ async def context_call_async(
505
+ self,
506
+ content: UserContent | Sequence[Message],
507
+ *,
508
+ ctx: Context[DepsT],
509
+ tools: Sequence[AsyncTool | AsyncContextTool[DepsT]]
510
+ | AsyncContextToolkit[DepsT]
511
+ | None = None,
512
+ format: None = None,
513
+ ) -> AsyncContextResponse[DepsT, None]:
514
+ """Generate an `llm.AsyncContextResponse` without a response format."""
515
+ ...
516
+
517
+ @overload
518
+ async def context_call_async(
519
+ self,
520
+ content: UserContent | Sequence[Message],
521
+ *,
522
+ ctx: Context[DepsT],
523
+ tools: Sequence[AsyncTool | AsyncContextTool[DepsT]]
524
+ | AsyncContextToolkit[DepsT]
525
+ | None = None,
526
+ format: type[FormattableT] | Format[FormattableT],
527
+ ) -> AsyncContextResponse[DepsT, FormattableT]:
528
+ """Generate an `llm.AsyncContextResponse` with a response format."""
529
+ ...
530
+
531
+ @overload
532
+ async def context_call_async(
533
+ self,
534
+ content: UserContent | Sequence[Message],
535
+ *,
536
+ ctx: Context[DepsT],
537
+ tools: Sequence[AsyncTool | AsyncContextTool[DepsT]]
538
+ | AsyncContextToolkit[DepsT]
539
+ | None = None,
540
+ format: type[FormattableT]
541
+ | Format[FormattableT]
542
+ | OutputParser[FormattableT]
543
+ | None,
544
+ ) -> AsyncContextResponse[DepsT, None] | AsyncContextResponse[DepsT, FormattableT]:
545
+ """Generate an `llm.AsyncContextResponse` with an optional response format."""
546
+ ...
547
+
548
+ async def context_call_async(
549
+ self,
550
+ content: UserContent | Sequence[Message],
551
+ *,
552
+ ctx: Context[DepsT],
553
+ tools: Sequence[AsyncTool | AsyncContextTool[DepsT]]
554
+ | AsyncContextToolkit[DepsT]
555
+ | None = None,
556
+ format: type[FormattableT]
557
+ | Format[FormattableT]
558
+ | OutputParser[FormattableT]
559
+ | None = None,
560
+ ) -> AsyncContextResponse[DepsT, None] | AsyncContextResponse[DepsT, FormattableT]:
561
+ """Generate an `llm.AsyncContextResponse` by asynchronously calling this model's LLM provider.
562
+
563
+ Args:
564
+ content: Content to send to the LLM. Can be a string (converted to user
565
+ message), UserContent, a sequence of UserContent, or a sequence of
566
+ Messages for full control.
567
+ ctx: Context object with dependencies for tools.
568
+ tools: Optional tools that the model may invoke.
569
+ format: Optional response format specifier.
570
+
571
+ Returns:
572
+ An `llm.AsyncContextResponse` object containing the LLM-generated content.
573
+ """
574
+ messages = promote_to_messages(content)
575
+ return await self.provider.context_call_async(
576
+ ctx=ctx,
577
+ model_id=self.model_id,
578
+ messages=messages,
579
+ tools=tools,
580
+ format=format,
581
+ **self.params,
582
+ )
583
+
584
+ @overload
585
+ def context_stream(
586
+ self,
587
+ content: UserContent | Sequence[Message],
588
+ *,
589
+ ctx: Context[DepsT],
590
+ tools: Sequence[Tool | ContextTool[DepsT]]
591
+ | ContextToolkit[DepsT]
592
+ | None = None,
593
+ format: None = None,
594
+ ) -> ContextStreamResponse[DepsT, None]:
595
+ """Stream an `llm.ContextStreamResponse` without a response format."""
596
+ ...
597
+
598
+ @overload
599
+ def context_stream(
600
+ self,
601
+ content: UserContent | Sequence[Message],
602
+ *,
603
+ ctx: Context[DepsT],
604
+ tools: Sequence[Tool | ContextTool[DepsT]]
605
+ | ContextToolkit[DepsT]
606
+ | None = None,
607
+ format: type[FormattableT] | Format[FormattableT],
608
+ ) -> ContextStreamResponse[DepsT, FormattableT]:
609
+ """Stream an `llm.ContextStreamResponse` with a response format."""
610
+ ...
611
+
612
+ @overload
613
+ def context_stream(
614
+ self,
615
+ content: UserContent | Sequence[Message],
616
+ *,
617
+ ctx: Context[DepsT],
618
+ tools: Sequence[Tool | ContextTool[DepsT]]
619
+ | ContextToolkit[DepsT]
620
+ | None = None,
621
+ format: type[FormattableT]
622
+ | Format[FormattableT]
623
+ | OutputParser[FormattableT]
624
+ | None,
625
+ ) -> (
626
+ ContextStreamResponse[DepsT, None] | ContextStreamResponse[DepsT, FormattableT]
627
+ ):
628
+ """Stream an `llm.ContextStreamResponse` with an optional response format."""
629
+ ...
630
+
631
+ def context_stream(
632
+ self,
633
+ content: UserContent | Sequence[Message],
634
+ *,
635
+ ctx: Context[DepsT],
636
+ tools: Sequence[Tool | ContextTool[DepsT]]
637
+ | ContextToolkit[DepsT]
638
+ | None = None,
639
+ format: type[FormattableT]
640
+ | Format[FormattableT]
641
+ | OutputParser[FormattableT]
642
+ | None = None,
643
+ ) -> (
644
+ ContextStreamResponse[DepsT, None] | ContextStreamResponse[DepsT, FormattableT]
645
+ ):
646
+ """Generate an `llm.ContextStreamResponse` by synchronously streaming from this model's LLM provider.
647
+
648
+ Args:
649
+ content: Content to send to the LLM. Can be a string (converted to user
650
+ message), UserContent, a sequence of UserContent, or a sequence of
651
+ Messages for full control.
652
+ ctx: Context object with dependencies for tools.
653
+ tools: Optional tools that the model may invoke.
654
+ format: Optional response format specifier.
655
+
656
+ Returns:
657
+ An `llm.ContextStreamResponse` object for iterating over the LLM-generated content.
658
+ """
659
+ messages = promote_to_messages(content)
660
+ return self.provider.context_stream(
661
+ ctx=ctx,
662
+ model_id=self.model_id,
663
+ messages=messages,
664
+ tools=tools,
665
+ format=format,
666
+ **self.params,
667
+ )
668
+
669
+ @overload
670
+ async def context_stream_async(
671
+ self,
672
+ content: UserContent | Sequence[Message],
673
+ *,
674
+ ctx: Context[DepsT],
675
+ tools: Sequence[AsyncTool | AsyncContextTool[DepsT]]
676
+ | AsyncContextToolkit[DepsT]
677
+ | None = None,
678
+ format: None = None,
679
+ ) -> AsyncContextStreamResponse[DepsT, None]:
680
+ """Stream an `llm.AsyncContextStreamResponse` without a response format."""
681
+ ...
682
+
683
+ @overload
684
+ async def context_stream_async(
685
+ self,
686
+ content: UserContent | Sequence[Message],
687
+ *,
688
+ ctx: Context[DepsT],
689
+ tools: Sequence[AsyncTool | AsyncContextTool[DepsT]]
690
+ | AsyncContextToolkit[DepsT]
691
+ | None = None,
692
+ format: type[FormattableT] | Format[FormattableT],
693
+ ) -> AsyncContextStreamResponse[DepsT, FormattableT]:
694
+ """Stream an `llm.AsyncContextStreamResponse` with a response format."""
695
+ ...
696
+
697
+ @overload
698
+ async def context_stream_async(
699
+ self,
700
+ content: UserContent | Sequence[Message],
701
+ *,
702
+ ctx: Context[DepsT],
703
+ tools: Sequence[AsyncTool | AsyncContextTool[DepsT]]
704
+ | AsyncContextToolkit[DepsT]
705
+ | None = None,
706
+ format: type[FormattableT]
707
+ | Format[FormattableT]
708
+ | OutputParser[FormattableT]
709
+ | None,
710
+ ) -> (
711
+ AsyncContextStreamResponse[DepsT, None]
712
+ | AsyncContextStreamResponse[DepsT, FormattableT]
713
+ ):
714
+ """Stream an `llm.AsyncContextStreamResponse` with an optional response format."""
715
+ ...
716
+
717
+ async def context_stream_async(
718
+ self,
719
+ content: UserContent | Sequence[Message],
720
+ *,
721
+ ctx: Context[DepsT],
722
+ tools: Sequence[AsyncTool | AsyncContextTool[DepsT]]
723
+ | AsyncContextToolkit[DepsT]
724
+ | None = None,
725
+ format: type[FormattableT]
726
+ | Format[FormattableT]
727
+ | OutputParser[FormattableT]
728
+ | None = None,
729
+ ) -> (
730
+ AsyncContextStreamResponse[DepsT, None]
731
+ | AsyncContextStreamResponse[DepsT, FormattableT]
732
+ ):
733
+ """Generate an `llm.AsyncContextStreamResponse` by asynchronously streaming from this model's LLM provider.
734
+
735
+ Args:
736
+ content: Content to send to the LLM. Can be a string (converted to user
737
+ message), UserContent, a sequence of UserContent, or a sequence of
738
+ Messages for full control.
739
+ ctx: Context object with dependencies for tools.
740
+ tools: Optional tools that the model may invoke.
741
+ format: Optional response format specifier.
742
+
743
+ Returns:
744
+ An `llm.AsyncContextStreamResponse` object for asynchronously iterating over the LLM-generated content.
745
+ """
746
+ messages = promote_to_messages(content)
747
+ return await self.provider.context_stream_async(
748
+ ctx=ctx,
749
+ model_id=self.model_id,
750
+ messages=messages,
751
+ tools=tools,
752
+ format=format,
753
+ **self.params,
754
+ )
755
+
756
+ @overload
757
+ def resume(
758
+ self,
759
+ *,
760
+ response: Response,
761
+ content: UserContent,
762
+ ) -> Response:
763
+ """Resume an `llm.Response` without a response format."""
764
+ ...
765
+
766
+ @overload
767
+ def resume(
768
+ self,
769
+ *,
770
+ response: Response[FormattableT],
771
+ content: UserContent,
772
+ ) -> Response[FormattableT]:
773
+ """Resume an `llm.Response` with a response format."""
774
+ ...
775
+
776
+ @overload
777
+ def resume(
778
+ self,
779
+ *,
780
+ response: Response | Response[FormattableT],
781
+ content: UserContent,
782
+ ) -> Response | Response[FormattableT]:
783
+ """Resume an `llm.Response` with an optional response format."""
784
+ ...
785
+
786
+ def resume(
787
+ self,
788
+ *,
789
+ response: Response | Response[FormattableT],
790
+ content: UserContent,
791
+ ) -> Response | Response[FormattableT]:
792
+ """Generate a new `llm.Response` by extending another response's messages with additional user content.
793
+
794
+ Uses the previous response's tools and output format, and this model's params.
795
+
796
+ Depending on the client, this may be a wrapper around using client call methods
797
+ with the response's messages and the new content, or it may use a provider-specific
798
+ API for resuming an existing interaction.
799
+
800
+ Args:
801
+ response: Previous response to extend.
802
+ content: Additional user content to append.
803
+
804
+ Returns:
805
+ A new `llm.Response` object containing the extended conversation.
806
+ """
807
+ return self.provider.resume(
808
+ model_id=self.model_id,
809
+ response=response,
810
+ content=content,
811
+ **self.params,
812
+ )
813
+
814
+ @overload
815
+ async def resume_async(
816
+ self,
817
+ *,
818
+ response: AsyncResponse,
819
+ content: UserContent,
820
+ ) -> AsyncResponse:
821
+ """Resume an `llm.AsyncResponse` without a response format."""
822
+ ...
823
+
824
+ @overload
825
+ async def resume_async(
826
+ self,
827
+ *,
828
+ response: AsyncResponse[FormattableT],
829
+ content: UserContent,
830
+ ) -> AsyncResponse[FormattableT]:
831
+ """Resume an `llm.AsyncResponse` with a response format."""
832
+ ...
833
+
834
+ @overload
835
+ async def resume_async(
836
+ self,
837
+ *,
838
+ response: AsyncResponse | AsyncResponse[FormattableT],
839
+ content: UserContent,
840
+ ) -> AsyncResponse | AsyncResponse[FormattableT]:
841
+ """Resume an `llm.AsyncResponse` with an optional response format."""
842
+ ...
843
+
844
+ async def resume_async(
845
+ self,
846
+ *,
847
+ response: AsyncResponse | AsyncResponse[FormattableT],
848
+ content: UserContent,
849
+ ) -> AsyncResponse | AsyncResponse[FormattableT]:
850
+ """Generate a new `llm.AsyncResponse` by extending another response's messages with additional user content.
851
+
852
+ Uses the previous response's tools and output format, and this model's params.
853
+
854
+ Depending on the client, this may be a wrapper around using client call methods
855
+ with the response's messages and the new content, or it may use a provider-specific
856
+ API for resuming an existing interaction.
857
+
858
+ Args:
859
+ response: Previous async response to extend.
860
+ content: Additional user content to append.
861
+
862
+ Returns:
863
+ A new `llm.AsyncResponse` object containing the extended conversation.
864
+ """
865
+ return await self.provider.resume_async(
866
+ model_id=self.model_id,
867
+ response=response,
868
+ content=content,
869
+ **self.params,
870
+ )
871
+
872
+ @overload
873
+ def context_resume(
874
+ self,
875
+ *,
876
+ ctx: Context[DepsT],
877
+ response: ContextResponse[DepsT, None],
878
+ content: UserContent,
879
+ ) -> ContextResponse[DepsT, None]:
880
+ """Resume an `llm.ContextResponse` without a response format."""
881
+ ...
882
+
883
+ @overload
884
+ def context_resume(
885
+ self,
886
+ *,
887
+ ctx: Context[DepsT],
888
+ response: ContextResponse[DepsT, FormattableT],
889
+ content: UserContent,
890
+ ) -> ContextResponse[DepsT, FormattableT]:
891
+ """Resume an `llm.ContextResponse` with a response format."""
892
+ ...
893
+
894
+ @overload
895
+ def context_resume(
896
+ self,
897
+ *,
898
+ ctx: Context[DepsT],
899
+ response: ContextResponse[DepsT, None] | ContextResponse[DepsT, FormattableT],
900
+ content: UserContent,
901
+ ) -> ContextResponse[DepsT, None] | ContextResponse[DepsT, FormattableT]:
902
+ """Resume an `llm.ContextResponse` with an optional response format."""
903
+ ...
904
+
905
+ def context_resume(
906
+ self,
907
+ *,
908
+ ctx: Context[DepsT],
909
+ response: ContextResponse[DepsT, None] | ContextResponse[DepsT, FormattableT],
910
+ content: UserContent,
911
+ ) -> ContextResponse[DepsT, None] | ContextResponse[DepsT, FormattableT]:
912
+ """Generate a new `llm.ContextResponse` by extending another response's messages with additional user content.
913
+
914
+ Uses the previous response's tools and output format, and this model's params.
915
+
916
+ Depending on the client, this may be a wrapper around using client call methods
917
+ with the response's messages and the new content, or it may use a provider-specific
918
+ API for resuming an existing interaction.
919
+
920
+ Args:
921
+ ctx: Context object with dependencies for tools.
922
+ response: Previous context response to extend.
923
+ content: Additional user content to append.
924
+
925
+ Returns:
926
+ A new `llm.ContextResponse` object containing the extended conversation.
927
+ """
928
+ return self.provider.context_resume(
929
+ ctx=ctx,
930
+ model_id=self.model_id,
931
+ response=response,
932
+ content=content,
933
+ **self.params,
934
+ )
935
+
936
+ @overload
937
+ async def context_resume_async(
938
+ self,
939
+ *,
940
+ ctx: Context[DepsT],
941
+ response: AsyncContextResponse[DepsT, None],
942
+ content: UserContent,
943
+ ) -> AsyncContextResponse[DepsT, None]:
944
+ """Resume an `llm.AsyncContextResponse` without a response format."""
945
+ ...
946
+
947
+ @overload
948
+ async def context_resume_async(
949
+ self,
950
+ *,
951
+ ctx: Context[DepsT],
952
+ response: AsyncContextResponse[DepsT, FormattableT],
953
+ content: UserContent,
954
+ ) -> AsyncContextResponse[DepsT, FormattableT]:
955
+ """Resume an `llm.AsyncContextResponse` with a response format."""
956
+ ...
957
+
958
+ @overload
959
+ async def context_resume_async(
960
+ self,
961
+ *,
962
+ ctx: Context[DepsT],
963
+ response: AsyncContextResponse[DepsT, None]
964
+ | AsyncContextResponse[DepsT, FormattableT],
965
+ content: UserContent,
966
+ ) -> AsyncContextResponse[DepsT, None] | AsyncContextResponse[DepsT, FormattableT]:
967
+ """Resume an `llm.AsyncContextResponse` with an optional response format."""
968
+ ...
969
+
970
+ async def context_resume_async(
971
+ self,
972
+ *,
973
+ ctx: Context[DepsT],
974
+ response: AsyncContextResponse[DepsT, None]
975
+ | AsyncContextResponse[DepsT, FormattableT],
976
+ content: UserContent,
977
+ ) -> AsyncContextResponse[DepsT, None] | AsyncContextResponse[DepsT, FormattableT]:
978
+ """Generate a new `llm.AsyncContextResponse` by extending another response's messages with additional user content.
979
+
980
+ Uses the previous response's tools and output format, and this model's params.
981
+
982
+ Depending on the client, this may be a wrapper around using client call methods
983
+ with the response's messages and the new content, or it may use a provider-specific
984
+ API for resuming an existing interaction.
985
+
986
+ Args:
987
+ ctx: Context object with dependencies for tools.
988
+ response: Previous async context response to extend.
989
+ content: Additional user content to append.
990
+
991
+ Returns:
992
+ A new `llm.AsyncContextResponse` object containing the extended conversation.
993
+ """
994
+ return await self.provider.context_resume_async(
995
+ ctx=ctx,
996
+ model_id=self.model_id,
997
+ response=response,
998
+ content=content,
999
+ **self.params,
1000
+ )
1001
+
1002
+ @overload
1003
+ def resume_stream(
1004
+ self,
1005
+ *,
1006
+ response: StreamResponse,
1007
+ content: UserContent,
1008
+ ) -> StreamResponse:
1009
+ """Resume an `llm.StreamResponse` without a response format."""
1010
+ ...
1011
+
1012
+ @overload
1013
+ def resume_stream(
1014
+ self,
1015
+ *,
1016
+ response: StreamResponse[FormattableT],
1017
+ content: UserContent,
1018
+ ) -> StreamResponse[FormattableT]:
1019
+ """Resume an `llm.StreamResponse` with a response format."""
1020
+ ...
1021
+
1022
+ @overload
1023
+ def resume_stream(
1024
+ self,
1025
+ *,
1026
+ response: StreamResponse | StreamResponse[FormattableT],
1027
+ content: UserContent,
1028
+ ) -> StreamResponse | StreamResponse[FormattableT]:
1029
+ """Resume an `llm.StreamResponse` with an optional response format."""
1030
+ ...
1031
+
1032
+ def resume_stream(
1033
+ self,
1034
+ *,
1035
+ response: StreamResponse | StreamResponse[FormattableT],
1036
+ content: UserContent,
1037
+ ) -> StreamResponse | StreamResponse[FormattableT]:
1038
+ """Generate a new `llm.StreamResponse` by extending another response's messages with additional user content.
1039
+
1040
+ Uses the previous response's tools and output format, and this model's params.
1041
+
1042
+ Depending on the client, this may be a wrapper around using client call methods
1043
+ with the response's messages and the new content, or it may use a provider-specific
1044
+ API for resuming an existing interaction.
1045
+
1046
+ Args:
1047
+ response: Previous stream response to extend.
1048
+ content: Additional user content to append.
1049
+
1050
+ Returns:
1051
+ A new `llm.StreamResponse` object for streaming the extended conversation.
1052
+ """
1053
+ return self.provider.resume_stream(
1054
+ model_id=self.model_id,
1055
+ response=response,
1056
+ content=content,
1057
+ **self.params,
1058
+ )
1059
+
1060
+ @overload
1061
+ async def resume_stream_async(
1062
+ self,
1063
+ *,
1064
+ response: AsyncStreamResponse,
1065
+ content: UserContent,
1066
+ ) -> AsyncStreamResponse:
1067
+ """Resume an `llm.AsyncStreamResponse` without a response format."""
1068
+ ...
1069
+
1070
+ @overload
1071
+ async def resume_stream_async(
1072
+ self,
1073
+ *,
1074
+ response: AsyncStreamResponse[FormattableT],
1075
+ content: UserContent,
1076
+ ) -> AsyncStreamResponse[FormattableT]:
1077
+ """Resume an `llm.AsyncStreamResponse` with a response format."""
1078
+ ...
1079
+
1080
+ @overload
1081
+ async def resume_stream_async(
1082
+ self,
1083
+ *,
1084
+ response: AsyncStreamResponse | AsyncStreamResponse[FormattableT],
1085
+ content: UserContent,
1086
+ ) -> AsyncStreamResponse | AsyncStreamResponse[FormattableT]:
1087
+ """Resume an `llm.AsyncStreamResponse` with an optional response format."""
1088
+ ...
1089
+
1090
+ async def resume_stream_async(
1091
+ self,
1092
+ *,
1093
+ response: AsyncStreamResponse | AsyncStreamResponse[FormattableT],
1094
+ content: UserContent,
1095
+ ) -> AsyncStreamResponse | AsyncStreamResponse[FormattableT]:
1096
+ """Generate a new `llm.AsyncStreamResponse` by extending another response's messages with additional user content.
1097
+
1098
+ Uses the previous response's tools and output format, and this model's params.
1099
+
1100
+ Depending on the client, this may be a wrapper around using client call methods
1101
+ with the response's messages and the new content, or it may use a provider-specific
1102
+ API for resuming an existing interaction.
1103
+
1104
+ Args:
1105
+ response: Previous async stream response to extend.
1106
+ content: Additional user content to append.
1107
+
1108
+ Returns:
1109
+ A new `llm.AsyncStreamResponse` object for asynchronously streaming the extended conversation.
1110
+ """
1111
+ return await self.provider.resume_stream_async(
1112
+ model_id=self.model_id,
1113
+ response=response,
1114
+ content=content,
1115
+ **self.params,
1116
+ )
1117
+
1118
+ @overload
1119
+ def context_resume_stream(
1120
+ self,
1121
+ *,
1122
+ ctx: Context[DepsT],
1123
+ response: ContextStreamResponse[DepsT, None],
1124
+ content: UserContent,
1125
+ ) -> ContextStreamResponse[DepsT, None]:
1126
+ """Resume an `llm.ContextStreamResponse` without a response format."""
1127
+ ...
1128
+
1129
+ @overload
1130
+ def context_resume_stream(
1131
+ self,
1132
+ *,
1133
+ ctx: Context[DepsT],
1134
+ response: ContextStreamResponse[DepsT, FormattableT],
1135
+ content: UserContent,
1136
+ ) -> ContextStreamResponse[DepsT, FormattableT]:
1137
+ """Resume an `llm.ContextStreamResponse` with a response format."""
1138
+ ...
1139
+
1140
+ @overload
1141
+ def context_resume_stream(
1142
+ self,
1143
+ *,
1144
+ ctx: Context[DepsT],
1145
+ response: ContextStreamResponse[DepsT, None]
1146
+ | ContextStreamResponse[DepsT, FormattableT],
1147
+ content: UserContent,
1148
+ ) -> (
1149
+ ContextStreamResponse[DepsT, None] | ContextStreamResponse[DepsT, FormattableT]
1150
+ ):
1151
+ """Resume an `llm.ContextStreamResponse` with an optional response format."""
1152
+ ...
1153
+
1154
+ def context_resume_stream(
1155
+ self,
1156
+ *,
1157
+ ctx: Context[DepsT],
1158
+ response: ContextStreamResponse[DepsT, None]
1159
+ | ContextStreamResponse[DepsT, FormattableT],
1160
+ content: UserContent,
1161
+ ) -> (
1162
+ ContextStreamResponse[DepsT, None] | ContextStreamResponse[DepsT, FormattableT]
1163
+ ):
1164
+ """Generate a new `llm.ContextStreamResponse` by extending another response's messages with additional user content.
1165
+
1166
+ Uses the previous response's tools and output format, and this model's params.
1167
+
1168
+ Depending on the client, this may be a wrapper around using client call methods
1169
+ with the response's messages and the new content, or it may use a provider-specific
1170
+ API for resuming an existing interaction.
1171
+
1172
+ Args:
1173
+ ctx: Context object with dependencies for tools.
1174
+ response: Previous context stream response to extend.
1175
+ content: Additional user content to append.
1176
+
1177
+ Returns:
1178
+ A new `llm.ContextStreamResponse` object for streaming the extended conversation.
1179
+ """
1180
+ return self.provider.context_resume_stream(
1181
+ ctx=ctx,
1182
+ model_id=self.model_id,
1183
+ response=response,
1184
+ content=content,
1185
+ **self.params,
1186
+ )
1187
+
1188
+ @overload
1189
+ async def context_resume_stream_async(
1190
+ self,
1191
+ *,
1192
+ ctx: Context[DepsT],
1193
+ response: AsyncContextStreamResponse[DepsT, None],
1194
+ content: UserContent,
1195
+ ) -> AsyncContextStreamResponse[DepsT, None]:
1196
+ """Resume an `llm.AsyncContextStreamResponse` without a response format."""
1197
+ ...
1198
+
1199
+ @overload
1200
+ async def context_resume_stream_async(
1201
+ self,
1202
+ *,
1203
+ ctx: Context[DepsT],
1204
+ response: AsyncContextStreamResponse[DepsT, FormattableT],
1205
+ content: UserContent,
1206
+ ) -> AsyncContextStreamResponse[DepsT, FormattableT]:
1207
+ """Resume an `llm.AsyncContextStreamResponse` with a response format."""
1208
+ ...
1209
+
1210
+ @overload
1211
+ async def context_resume_stream_async(
1212
+ self,
1213
+ *,
1214
+ ctx: Context[DepsT],
1215
+ response: AsyncContextStreamResponse[DepsT, None]
1216
+ | AsyncContextStreamResponse[DepsT, FormattableT],
1217
+ content: UserContent,
1218
+ ) -> (
1219
+ AsyncContextStreamResponse[DepsT, None]
1220
+ | AsyncContextStreamResponse[DepsT, FormattableT]
1221
+ ):
1222
+ """Resume an `llm.AsyncContextStreamResponse` with an optional response format."""
1223
+ ...
1224
+
1225
+ async def context_resume_stream_async(
1226
+ self,
1227
+ *,
1228
+ ctx: Context[DepsT],
1229
+ response: AsyncContextStreamResponse[DepsT, None]
1230
+ | AsyncContextStreamResponse[DepsT, FormattableT],
1231
+ content: UserContent,
1232
+ ) -> (
1233
+ AsyncContextStreamResponse[DepsT, None]
1234
+ | AsyncContextStreamResponse[DepsT, FormattableT]
1235
+ ):
1236
+ """Generate a new `llm.AsyncContextStreamResponse` by extending another response's messages with additional user content.
1237
+
1238
+ Uses the previous response's tools and output format, and this model's params.
1239
+
1240
+ Depending on the client, this may be a wrapper around using client call methods
1241
+ with the response's messages and the new content, or it may use a provider-specific
1242
+ API for resuming an existing interaction.
1243
+
1244
+ Args:
1245
+ ctx: Context object with dependencies for tools.
1246
+ response: Previous async context stream response to extend.
1247
+ content: Additional user content to append.
1248
+
1249
+ Returns:
1250
+ A new `llm.AsyncContextStreamResponse` object for asynchronously streaming the extended conversation.
1251
+ """
1252
+ return await self.provider.context_resume_stream_async(
1253
+ ctx=ctx,
1254
+ model_id=self.model_id,
1255
+ response=response,
1256
+ content=content,
1257
+ **self.params,
1258
+ )
1259
+
1260
+
1261
+ def model(
1262
+ model_id: ModelId,
1263
+ **params: Unpack[Params],
1264
+ ) -> Model:
1265
+ """Helper for creating a `Model` instance (which may be used as a context manager).
1266
+
1267
+ This is just an alias for the `Model` constructor, added for convenience.
1268
+
1269
+ This function returns a `Model` instance that implements the context manager protocol.
1270
+ When used with a `with` statement, the model will be set in context and used by both
1271
+ `llm.use_model()` and `llm.call()` within that context. This allows you to override
1272
+ the default model at runtime without modifying function definitions.
1273
+
1274
+ The returned `Model` instance can also be stored and reused:
1275
+
1276
+ ```python
1277
+ m = llm.model("openai/gpt-4o")
1278
+ # Use directly
1279
+ response = m.call("Hello!")
1280
+ # Or use as context manager
1281
+ with m:
1282
+ response = recommend_book("fantasy")
1283
+ ```
1284
+
1285
+ When a model is set in context, it completely overrides any model ID or parameters
1286
+ specified in `llm.use_model()` or `llm.call()`. The context model's parameters take
1287
+ precedence, and any unset parameters use default values.
1288
+
1289
+ Args:
1290
+ model_id: A model ID string (e.g., "openai/gpt-4").
1291
+ **params: Additional parameters to configure the model (e.g. temperature). See `llm.Params`.
1292
+
1293
+ Returns:
1294
+ A Model instance that can be used as a context manager.
1295
+
1296
+ Raises:
1297
+ ValueError: If the specified provider is not supported.
1298
+
1299
+ Example:
1300
+ With `llm.use_model()`
1301
+
1302
+ ```python
1303
+ import mirascope.llm as llm
1304
+
1305
+ def recommend_book(genre: str) -> llm.Response:
1306
+ model = llm.use_model("openai/gpt-5-mini")
1307
+ return model.call(f"Please recommend a book in {genre}.")
1308
+
1309
+ # Override the default model at runtime
1310
+ with llm.model("anthropic/claude-sonnet-4-5"):
1311
+ response = recommend_book("fantasy") # Uses Claude instead of GPT
1312
+ ```
1313
+
1314
+ Example:
1315
+ With `llm.call()`
1316
+
1317
+ ```python
1318
+ import mirascope.llm as llm
1319
+
1320
+ @llm.call("openai/gpt-5-mini")
1321
+ def recommend_book(genre: str):
1322
+ return f"Please recommend a {genre} book."
1323
+
1324
+ # Override the decorated model at runtime
1325
+ with llm.model("anthropic/claude-sonnet-4-0"):
1326
+ response = recommend_book("fantasy") # Uses Claude instead of GPT
1327
+ ```
1328
+
1329
+ Example:
1330
+ Storing and reusing Model instances
1331
+
1332
+ ```python
1333
+ import mirascope.llm as llm
1334
+
1335
+ # Create and store a model
1336
+ m = llm.model("openai/gpt-4o")
1337
+
1338
+ # Use it directly
1339
+ response = m.call("Hello!")
1340
+
1341
+ # Or use it as a context manager
1342
+ with m:
1343
+ response = recommend_book("fantasy")
1344
+ ```
1345
+ """
1346
+ return Model(model_id, **params)
1347
+
1348
+
1349
+ @overload
1350
+ def use_model(
1351
+ model: ModelId,
1352
+ **params: Unpack[Params],
1353
+ ) -> Model:
1354
+ """Get the model from context if available, otherwise create a new `Model`.
1355
+
1356
+ This overload accepts a model ID string and allows additional params.
1357
+ """
1358
+ ...
1359
+
1360
+
1361
+ @overload
1362
+ def use_model(
1363
+ model: Model,
1364
+ ) -> Model:
1365
+ """Get the model from context if available, otherwise use the provided `Model`.
1366
+
1367
+ This overload accepts a `Model` instance and does not allow additional params.
1368
+ """
1369
+ ...
1370
+
1371
+
1372
+ def use_model(
1373
+ model: Model | ModelId,
1374
+ **params: Unpack[Params],
1375
+ ) -> Model:
1376
+ """Get the model from context if available, otherwise create a new `Model`.
1377
+
1378
+ This function checks if a model has been set in the context (via `llm.model()`
1379
+ context manager). If a model is found in the context, it returns that model,
1380
+ ignoring any model ID or parameters passed to this function. Otherwise, it creates
1381
+ and returns a new `llm.Model` instance with the provided arguments.
1382
+
1383
+ This allows you to write functions that work with a default model but can be
1384
+ overridden at runtime using the `llm.model()` context manager.
1385
+
1386
+ Args:
1387
+ model: A model ID string (e.g., "openai/gpt-4") or a Model instance
1388
+ **params: Additional parameters to configure the model (e.g. temperature). See `llm.Params`.
1389
+ Only available when passing a model ID string
1390
+
1391
+ Returns:
1392
+ An `llm.Model` instance from context (if set) or a new instance with the specified settings.
1393
+
1394
+ Raises:
1395
+ ValueError: If the specified provider is not supported.
1396
+
1397
+ Example:
1398
+
1399
+ ```python
1400
+ import mirascope.llm as llm
1401
+
1402
+ def recommend_book(genre: str) -> llm.Response:
1403
+ model = llm.use_model("openai/gpt-5-mini")
1404
+ return model.call(f"Please recommend a book in {genre}.")
1405
+
1406
+ # Uses the default model (gpt-5-mini)
1407
+ response = recommend_book("fantasy")
1408
+
1409
+ # Override with a different model
1410
+ with llm.model(provider="anthropic", model_id="anthropic/claude-sonnet-4-5"):
1411
+ response = recommend_book("fantasy") # Uses Claude instead
1412
+ ```
1413
+ """
1414
+ context_model = model_from_context()
1415
+ if context_model is not None:
1416
+ return context_model
1417
+ if isinstance(model, str):
1418
+ return Model(model, **params)
1419
+ return model