mirascope 1.0.5__py3-none-any.whl → 2.1.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (632) hide show
  1. mirascope/__init__.py +6 -6
  2. mirascope/_stubs.py +384 -0
  3. mirascope/_utils.py +34 -0
  4. mirascope/api/__init__.py +14 -0
  5. mirascope/api/_generated/README.md +207 -0
  6. mirascope/api/_generated/__init__.py +444 -0
  7. mirascope/api/_generated/annotations/__init__.py +33 -0
  8. mirascope/api/_generated/annotations/client.py +506 -0
  9. mirascope/api/_generated/annotations/raw_client.py +1414 -0
  10. mirascope/api/_generated/annotations/types/__init__.py +31 -0
  11. mirascope/api/_generated/annotations/types/annotations_create_request_label.py +5 -0
  12. mirascope/api/_generated/annotations/types/annotations_create_response.py +48 -0
  13. mirascope/api/_generated/annotations/types/annotations_create_response_label.py +5 -0
  14. mirascope/api/_generated/annotations/types/annotations_get_response.py +48 -0
  15. mirascope/api/_generated/annotations/types/annotations_get_response_label.py +5 -0
  16. mirascope/api/_generated/annotations/types/annotations_list_request_label.py +5 -0
  17. mirascope/api/_generated/annotations/types/annotations_list_response.py +21 -0
  18. mirascope/api/_generated/annotations/types/annotations_list_response_annotations_item.py +50 -0
  19. mirascope/api/_generated/annotations/types/annotations_list_response_annotations_item_label.py +5 -0
  20. mirascope/api/_generated/annotations/types/annotations_update_request_label.py +5 -0
  21. mirascope/api/_generated/annotations/types/annotations_update_response.py +48 -0
  22. mirascope/api/_generated/annotations/types/annotations_update_response_label.py +5 -0
  23. mirascope/api/_generated/api_keys/__init__.py +17 -0
  24. mirascope/api/_generated/api_keys/client.py +530 -0
  25. mirascope/api/_generated/api_keys/raw_client.py +1236 -0
  26. mirascope/api/_generated/api_keys/types/__init__.py +15 -0
  27. mirascope/api/_generated/api_keys/types/api_keys_create_response.py +28 -0
  28. mirascope/api/_generated/api_keys/types/api_keys_get_response.py +27 -0
  29. mirascope/api/_generated/api_keys/types/api_keys_list_all_for_org_response_item.py +40 -0
  30. mirascope/api/_generated/api_keys/types/api_keys_list_response_item.py +27 -0
  31. mirascope/api/_generated/client.py +211 -0
  32. mirascope/api/_generated/core/__init__.py +52 -0
  33. mirascope/api/_generated/core/api_error.py +23 -0
  34. mirascope/api/_generated/core/client_wrapper.py +46 -0
  35. mirascope/api/_generated/core/datetime_utils.py +28 -0
  36. mirascope/api/_generated/core/file.py +67 -0
  37. mirascope/api/_generated/core/force_multipart.py +16 -0
  38. mirascope/api/_generated/core/http_client.py +543 -0
  39. mirascope/api/_generated/core/http_response.py +55 -0
  40. mirascope/api/_generated/core/jsonable_encoder.py +100 -0
  41. mirascope/api/_generated/core/pydantic_utilities.py +255 -0
  42. mirascope/api/_generated/core/query_encoder.py +58 -0
  43. mirascope/api/_generated/core/remove_none_from_dict.py +11 -0
  44. mirascope/api/_generated/core/request_options.py +35 -0
  45. mirascope/api/_generated/core/serialization.py +276 -0
  46. mirascope/api/_generated/docs/__init__.py +4 -0
  47. mirascope/api/_generated/docs/client.py +91 -0
  48. mirascope/api/_generated/docs/raw_client.py +178 -0
  49. mirascope/api/_generated/environment.py +9 -0
  50. mirascope/api/_generated/environments/__init__.py +23 -0
  51. mirascope/api/_generated/environments/client.py +649 -0
  52. mirascope/api/_generated/environments/raw_client.py +1567 -0
  53. mirascope/api/_generated/environments/types/__init__.py +25 -0
  54. mirascope/api/_generated/environments/types/environments_create_response.py +24 -0
  55. mirascope/api/_generated/environments/types/environments_get_analytics_response.py +60 -0
  56. mirascope/api/_generated/environments/types/environments_get_analytics_response_top_functions_item.py +24 -0
  57. mirascope/api/_generated/environments/types/environments_get_analytics_response_top_models_item.py +22 -0
  58. mirascope/api/_generated/environments/types/environments_get_response.py +24 -0
  59. mirascope/api/_generated/environments/types/environments_list_response_item.py +24 -0
  60. mirascope/api/_generated/environments/types/environments_update_response.py +24 -0
  61. mirascope/api/_generated/errors/__init__.py +25 -0
  62. mirascope/api/_generated/errors/bad_request_error.py +14 -0
  63. mirascope/api/_generated/errors/conflict_error.py +14 -0
  64. mirascope/api/_generated/errors/forbidden_error.py +11 -0
  65. mirascope/api/_generated/errors/internal_server_error.py +10 -0
  66. mirascope/api/_generated/errors/not_found_error.py +11 -0
  67. mirascope/api/_generated/errors/payment_required_error.py +15 -0
  68. mirascope/api/_generated/errors/service_unavailable_error.py +14 -0
  69. mirascope/api/_generated/errors/too_many_requests_error.py +15 -0
  70. mirascope/api/_generated/errors/unauthorized_error.py +11 -0
  71. mirascope/api/_generated/functions/__init__.py +39 -0
  72. mirascope/api/_generated/functions/client.py +647 -0
  73. mirascope/api/_generated/functions/raw_client.py +1890 -0
  74. mirascope/api/_generated/functions/types/__init__.py +53 -0
  75. mirascope/api/_generated/functions/types/functions_create_request_dependencies_value.py +20 -0
  76. mirascope/api/_generated/functions/types/functions_create_response.py +37 -0
  77. mirascope/api/_generated/functions/types/functions_create_response_dependencies_value.py +20 -0
  78. mirascope/api/_generated/functions/types/functions_find_by_hash_response.py +39 -0
  79. mirascope/api/_generated/functions/types/functions_find_by_hash_response_dependencies_value.py +20 -0
  80. mirascope/api/_generated/functions/types/functions_get_by_env_response.py +53 -0
  81. mirascope/api/_generated/functions/types/functions_get_by_env_response_dependencies_value.py +22 -0
  82. mirascope/api/_generated/functions/types/functions_get_response.py +37 -0
  83. mirascope/api/_generated/functions/types/functions_get_response_dependencies_value.py +20 -0
  84. mirascope/api/_generated/functions/types/functions_list_by_env_response.py +25 -0
  85. mirascope/api/_generated/functions/types/functions_list_by_env_response_functions_item.py +56 -0
  86. mirascope/api/_generated/functions/types/functions_list_by_env_response_functions_item_dependencies_value.py +22 -0
  87. mirascope/api/_generated/functions/types/functions_list_response.py +21 -0
  88. mirascope/api/_generated/functions/types/functions_list_response_functions_item.py +41 -0
  89. mirascope/api/_generated/functions/types/functions_list_response_functions_item_dependencies_value.py +20 -0
  90. mirascope/api/_generated/health/__init__.py +7 -0
  91. mirascope/api/_generated/health/client.py +92 -0
  92. mirascope/api/_generated/health/raw_client.py +175 -0
  93. mirascope/api/_generated/health/types/__init__.py +8 -0
  94. mirascope/api/_generated/health/types/health_check_response.py +22 -0
  95. mirascope/api/_generated/health/types/health_check_response_status.py +5 -0
  96. mirascope/api/_generated/organization_invitations/__init__.py +33 -0
  97. mirascope/api/_generated/organization_invitations/client.py +546 -0
  98. mirascope/api/_generated/organization_invitations/raw_client.py +1519 -0
  99. mirascope/api/_generated/organization_invitations/types/__init__.py +53 -0
  100. mirascope/api/_generated/organization_invitations/types/organization_invitations_accept_response.py +34 -0
  101. mirascope/api/_generated/organization_invitations/types/organization_invitations_accept_response_role.py +7 -0
  102. mirascope/api/_generated/organization_invitations/types/organization_invitations_create_request_role.py +7 -0
  103. mirascope/api/_generated/organization_invitations/types/organization_invitations_create_response.py +48 -0
  104. mirascope/api/_generated/organization_invitations/types/organization_invitations_create_response_role.py +7 -0
  105. mirascope/api/_generated/organization_invitations/types/organization_invitations_create_response_status.py +7 -0
  106. mirascope/api/_generated/organization_invitations/types/organization_invitations_get_response.py +48 -0
  107. mirascope/api/_generated/organization_invitations/types/organization_invitations_get_response_role.py +7 -0
  108. mirascope/api/_generated/organization_invitations/types/organization_invitations_get_response_status.py +7 -0
  109. mirascope/api/_generated/organization_invitations/types/organization_invitations_list_response_item.py +48 -0
  110. mirascope/api/_generated/organization_invitations/types/organization_invitations_list_response_item_role.py +7 -0
  111. mirascope/api/_generated/organization_invitations/types/organization_invitations_list_response_item_status.py +7 -0
  112. mirascope/api/_generated/organization_memberships/__init__.py +19 -0
  113. mirascope/api/_generated/organization_memberships/client.py +302 -0
  114. mirascope/api/_generated/organization_memberships/raw_client.py +736 -0
  115. mirascope/api/_generated/organization_memberships/types/__init__.py +27 -0
  116. mirascope/api/_generated/organization_memberships/types/organization_memberships_list_response_item.py +33 -0
  117. mirascope/api/_generated/organization_memberships/types/organization_memberships_list_response_item_role.py +7 -0
  118. mirascope/api/_generated/organization_memberships/types/organization_memberships_update_request_role.py +7 -0
  119. mirascope/api/_generated/organization_memberships/types/organization_memberships_update_response.py +31 -0
  120. mirascope/api/_generated/organization_memberships/types/organization_memberships_update_response_role.py +7 -0
  121. mirascope/api/_generated/organizations/__init__.py +51 -0
  122. mirascope/api/_generated/organizations/client.py +869 -0
  123. mirascope/api/_generated/organizations/raw_client.py +2593 -0
  124. mirascope/api/_generated/organizations/types/__init__.py +71 -0
  125. mirascope/api/_generated/organizations/types/organizations_create_payment_intent_response.py +24 -0
  126. mirascope/api/_generated/organizations/types/organizations_create_response.py +26 -0
  127. mirascope/api/_generated/organizations/types/organizations_create_response_role.py +5 -0
  128. mirascope/api/_generated/organizations/types/organizations_get_response.py +26 -0
  129. mirascope/api/_generated/organizations/types/organizations_get_response_role.py +5 -0
  130. mirascope/api/_generated/organizations/types/organizations_list_response_item.py +26 -0
  131. mirascope/api/_generated/organizations/types/organizations_list_response_item_role.py +5 -0
  132. mirascope/api/_generated/organizations/types/organizations_preview_subscription_change_request_target_plan.py +7 -0
  133. mirascope/api/_generated/organizations/types/organizations_preview_subscription_change_response.py +47 -0
  134. mirascope/api/_generated/organizations/types/organizations_preview_subscription_change_response_validation_errors_item.py +33 -0
  135. mirascope/api/_generated/organizations/types/organizations_preview_subscription_change_response_validation_errors_item_resource.py +7 -0
  136. mirascope/api/_generated/organizations/types/organizations_router_balance_response.py +24 -0
  137. mirascope/api/_generated/organizations/types/organizations_subscription_response.py +53 -0
  138. mirascope/api/_generated/organizations/types/organizations_subscription_response_current_plan.py +7 -0
  139. mirascope/api/_generated/organizations/types/organizations_subscription_response_payment_method.py +26 -0
  140. mirascope/api/_generated/organizations/types/organizations_subscription_response_scheduled_change.py +34 -0
  141. mirascope/api/_generated/organizations/types/organizations_subscription_response_scheduled_change_target_plan.py +7 -0
  142. mirascope/api/_generated/organizations/types/organizations_update_response.py +26 -0
  143. mirascope/api/_generated/organizations/types/organizations_update_response_role.py +5 -0
  144. mirascope/api/_generated/organizations/types/organizations_update_subscription_request_target_plan.py +7 -0
  145. mirascope/api/_generated/organizations/types/organizations_update_subscription_response.py +35 -0
  146. mirascope/api/_generated/project_memberships/__init__.py +29 -0
  147. mirascope/api/_generated/project_memberships/client.py +528 -0
  148. mirascope/api/_generated/project_memberships/raw_client.py +1278 -0
  149. mirascope/api/_generated/project_memberships/types/__init__.py +33 -0
  150. mirascope/api/_generated/project_memberships/types/project_memberships_create_request_role.py +7 -0
  151. mirascope/api/_generated/project_memberships/types/project_memberships_create_response.py +35 -0
  152. mirascope/api/_generated/project_memberships/types/project_memberships_create_response_role.py +7 -0
  153. mirascope/api/_generated/project_memberships/types/project_memberships_get_response.py +33 -0
  154. mirascope/api/_generated/project_memberships/types/project_memberships_get_response_role.py +7 -0
  155. mirascope/api/_generated/project_memberships/types/project_memberships_list_response_item.py +33 -0
  156. mirascope/api/_generated/project_memberships/types/project_memberships_list_response_item_role.py +7 -0
  157. mirascope/api/_generated/project_memberships/types/project_memberships_update_request_role.py +7 -0
  158. mirascope/api/_generated/project_memberships/types/project_memberships_update_response.py +35 -0
  159. mirascope/api/_generated/project_memberships/types/project_memberships_update_response_role.py +7 -0
  160. mirascope/api/_generated/projects/__init__.py +7 -0
  161. mirascope/api/_generated/projects/client.py +428 -0
  162. mirascope/api/_generated/projects/raw_client.py +1302 -0
  163. mirascope/api/_generated/projects/types/__init__.py +10 -0
  164. mirascope/api/_generated/projects/types/projects_create_response.py +25 -0
  165. mirascope/api/_generated/projects/types/projects_get_response.py +25 -0
  166. mirascope/api/_generated/projects/types/projects_list_response_item.py +25 -0
  167. mirascope/api/_generated/projects/types/projects_update_response.py +25 -0
  168. mirascope/api/_generated/reference.md +4987 -0
  169. mirascope/api/_generated/tags/__init__.py +19 -0
  170. mirascope/api/_generated/tags/client.py +504 -0
  171. mirascope/api/_generated/tags/raw_client.py +1288 -0
  172. mirascope/api/_generated/tags/types/__init__.py +17 -0
  173. mirascope/api/_generated/tags/types/tags_create_response.py +41 -0
  174. mirascope/api/_generated/tags/types/tags_get_response.py +41 -0
  175. mirascope/api/_generated/tags/types/tags_list_response.py +23 -0
  176. mirascope/api/_generated/tags/types/tags_list_response_tags_item.py +41 -0
  177. mirascope/api/_generated/tags/types/tags_update_response.py +41 -0
  178. mirascope/api/_generated/token_cost/__init__.py +7 -0
  179. mirascope/api/_generated/token_cost/client.py +160 -0
  180. mirascope/api/_generated/token_cost/raw_client.py +264 -0
  181. mirascope/api/_generated/token_cost/types/__init__.py +8 -0
  182. mirascope/api/_generated/token_cost/types/token_cost_calculate_request_usage.py +54 -0
  183. mirascope/api/_generated/token_cost/types/token_cost_calculate_response.py +52 -0
  184. mirascope/api/_generated/traces/__init__.py +97 -0
  185. mirascope/api/_generated/traces/client.py +1103 -0
  186. mirascope/api/_generated/traces/raw_client.py +2322 -0
  187. mirascope/api/_generated/traces/types/__init__.py +155 -0
  188. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item.py +29 -0
  189. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_resource.py +27 -0
  190. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_resource_attributes_item.py +23 -0
  191. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_resource_attributes_item_value.py +38 -0
  192. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_resource_attributes_item_value_array_value.py +19 -0
  193. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_resource_attributes_item_value_kvlist_value.py +22 -0
  194. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_resource_attributes_item_value_kvlist_value_values_item.py +20 -0
  195. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item.py +29 -0
  196. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_scope.py +31 -0
  197. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_scope_attributes_item.py +23 -0
  198. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_scope_attributes_item_value.py +38 -0
  199. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_scope_attributes_item_value_array_value.py +19 -0
  200. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_scope_attributes_item_value_kvlist_value.py +22 -0
  201. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_scope_attributes_item_value_kvlist_value_values_item.py +22 -0
  202. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item.py +48 -0
  203. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item_attributes_item.py +23 -0
  204. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item_attributes_item_value.py +38 -0
  205. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item_attributes_item_value_array_value.py +19 -0
  206. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item_attributes_item_value_kvlist_value.py +24 -0
  207. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item_attributes_item_value_kvlist_value_values_item.py +22 -0
  208. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item_status.py +20 -0
  209. mirascope/api/_generated/traces/types/traces_create_response.py +24 -0
  210. mirascope/api/_generated/traces/types/traces_create_response_partial_success.py +22 -0
  211. mirascope/api/_generated/traces/types/traces_get_analytics_summary_response.py +60 -0
  212. mirascope/api/_generated/traces/types/traces_get_analytics_summary_response_top_functions_item.py +24 -0
  213. mirascope/api/_generated/traces/types/traces_get_analytics_summary_response_top_models_item.py +22 -0
  214. mirascope/api/_generated/traces/types/traces_get_trace_detail_by_env_response.py +33 -0
  215. mirascope/api/_generated/traces/types/traces_get_trace_detail_by_env_response_spans_item.py +88 -0
  216. mirascope/api/_generated/traces/types/traces_get_trace_detail_response.py +33 -0
  217. mirascope/api/_generated/traces/types/traces_get_trace_detail_response_spans_item.py +88 -0
  218. mirascope/api/_generated/traces/types/traces_list_by_function_hash_response.py +25 -0
  219. mirascope/api/_generated/traces/types/traces_list_by_function_hash_response_traces_item.py +44 -0
  220. mirascope/api/_generated/traces/types/traces_search_by_env_request_attribute_filters_item.py +26 -0
  221. mirascope/api/_generated/traces/types/traces_search_by_env_request_attribute_filters_item_operator.py +7 -0
  222. mirascope/api/_generated/traces/types/traces_search_by_env_request_sort_by.py +7 -0
  223. mirascope/api/_generated/traces/types/traces_search_by_env_request_sort_order.py +7 -0
  224. mirascope/api/_generated/traces/types/traces_search_by_env_response.py +26 -0
  225. mirascope/api/_generated/traces/types/traces_search_by_env_response_spans_item.py +50 -0
  226. mirascope/api/_generated/traces/types/traces_search_request_attribute_filters_item.py +26 -0
  227. mirascope/api/_generated/traces/types/traces_search_request_attribute_filters_item_operator.py +7 -0
  228. mirascope/api/_generated/traces/types/traces_search_request_sort_by.py +7 -0
  229. mirascope/api/_generated/traces/types/traces_search_request_sort_order.py +5 -0
  230. mirascope/api/_generated/traces/types/traces_search_response.py +26 -0
  231. mirascope/api/_generated/traces/types/traces_search_response_spans_item.py +50 -0
  232. mirascope/api/_generated/types/__init__.py +85 -0
  233. mirascope/api/_generated/types/already_exists_error.py +22 -0
  234. mirascope/api/_generated/types/already_exists_error_tag.py +5 -0
  235. mirascope/api/_generated/types/bad_request_error_body.py +50 -0
  236. mirascope/api/_generated/types/click_house_error.py +22 -0
  237. mirascope/api/_generated/types/database_error.py +22 -0
  238. mirascope/api/_generated/types/database_error_tag.py +5 -0
  239. mirascope/api/_generated/types/date.py +3 -0
  240. mirascope/api/_generated/types/http_api_decode_error.py +27 -0
  241. mirascope/api/_generated/types/http_api_decode_error_tag.py +5 -0
  242. mirascope/api/_generated/types/immutable_resource_error.py +22 -0
  243. mirascope/api/_generated/types/internal_server_error_body.py +49 -0
  244. mirascope/api/_generated/types/issue.py +38 -0
  245. mirascope/api/_generated/types/issue_tag.py +10 -0
  246. mirascope/api/_generated/types/not_found_error_body.py +22 -0
  247. mirascope/api/_generated/types/not_found_error_tag.py +5 -0
  248. mirascope/api/_generated/types/number_from_string.py +3 -0
  249. mirascope/api/_generated/types/permission_denied_error.py +22 -0
  250. mirascope/api/_generated/types/permission_denied_error_tag.py +5 -0
  251. mirascope/api/_generated/types/plan_limit_exceeded_error.py +32 -0
  252. mirascope/api/_generated/types/plan_limit_exceeded_error_tag.py +7 -0
  253. mirascope/api/_generated/types/pricing_unavailable_error.py +23 -0
  254. mirascope/api/_generated/types/property_key.py +7 -0
  255. mirascope/api/_generated/types/property_key_key.py +25 -0
  256. mirascope/api/_generated/types/property_key_key_tag.py +5 -0
  257. mirascope/api/_generated/types/rate_limit_error.py +31 -0
  258. mirascope/api/_generated/types/rate_limit_error_tag.py +5 -0
  259. mirascope/api/_generated/types/service_unavailable_error_body.py +24 -0
  260. mirascope/api/_generated/types/service_unavailable_error_tag.py +7 -0
  261. mirascope/api/_generated/types/stripe_error.py +20 -0
  262. mirascope/api/_generated/types/subscription_past_due_error.py +31 -0
  263. mirascope/api/_generated/types/subscription_past_due_error_tag.py +7 -0
  264. mirascope/api/_generated/types/unauthorized_error_body.py +21 -0
  265. mirascope/api/_generated/types/unauthorized_error_tag.py +5 -0
  266. mirascope/api/client.py +255 -0
  267. mirascope/api/settings.py +99 -0
  268. mirascope/llm/__init__.py +316 -0
  269. mirascope/llm/calls/__init__.py +17 -0
  270. mirascope/llm/calls/calls.py +348 -0
  271. mirascope/llm/calls/decorator.py +268 -0
  272. mirascope/llm/content/__init__.py +71 -0
  273. mirascope/llm/content/audio.py +173 -0
  274. mirascope/llm/content/document.py +94 -0
  275. mirascope/llm/content/image.py +206 -0
  276. mirascope/llm/content/text.py +47 -0
  277. mirascope/llm/content/thought.py +58 -0
  278. mirascope/llm/content/tool_call.py +69 -0
  279. mirascope/llm/content/tool_output.py +43 -0
  280. mirascope/llm/context/__init__.py +6 -0
  281. mirascope/llm/context/_utils.py +41 -0
  282. mirascope/llm/context/context.py +24 -0
  283. mirascope/llm/exceptions.py +360 -0
  284. mirascope/llm/formatting/__init__.py +39 -0
  285. mirascope/llm/formatting/format.py +291 -0
  286. mirascope/llm/formatting/from_call_args.py +30 -0
  287. mirascope/llm/formatting/output_parser.py +178 -0
  288. mirascope/llm/formatting/partial.py +131 -0
  289. mirascope/llm/formatting/primitives.py +192 -0
  290. mirascope/llm/formatting/types.py +83 -0
  291. mirascope/llm/mcp/__init__.py +5 -0
  292. mirascope/llm/mcp/mcp_client.py +130 -0
  293. mirascope/llm/messages/__init__.py +35 -0
  294. mirascope/llm/messages/_utils.py +34 -0
  295. mirascope/llm/messages/message.py +190 -0
  296. mirascope/llm/models/__init__.py +21 -0
  297. mirascope/llm/models/models.py +1339 -0
  298. mirascope/llm/models/params.py +72 -0
  299. mirascope/llm/models/thinking_config.py +61 -0
  300. mirascope/llm/prompts/__init__.py +34 -0
  301. mirascope/llm/prompts/_utils.py +31 -0
  302. mirascope/llm/prompts/decorator.py +215 -0
  303. mirascope/llm/prompts/prompts.py +484 -0
  304. mirascope/llm/prompts/protocols.py +65 -0
  305. mirascope/llm/providers/__init__.py +65 -0
  306. mirascope/llm/providers/anthropic/__init__.py +11 -0
  307. mirascope/llm/providers/anthropic/_utils/__init__.py +27 -0
  308. mirascope/llm/providers/anthropic/_utils/beta_decode.py +297 -0
  309. mirascope/llm/providers/anthropic/_utils/beta_encode.py +272 -0
  310. mirascope/llm/providers/anthropic/_utils/decode.py +326 -0
  311. mirascope/llm/providers/anthropic/_utils/encode.py +431 -0
  312. mirascope/llm/providers/anthropic/_utils/errors.py +46 -0
  313. mirascope/llm/providers/anthropic/beta_provider.py +338 -0
  314. mirascope/llm/providers/anthropic/model_id.py +23 -0
  315. mirascope/llm/providers/anthropic/model_info.py +87 -0
  316. mirascope/llm/providers/anthropic/provider.py +440 -0
  317. mirascope/llm/providers/base/__init__.py +14 -0
  318. mirascope/llm/providers/base/_utils.py +248 -0
  319. mirascope/llm/providers/base/base_provider.py +1463 -0
  320. mirascope/llm/providers/base/kwargs.py +12 -0
  321. mirascope/llm/providers/google/__init__.py +6 -0
  322. mirascope/llm/providers/google/_utils/__init__.py +17 -0
  323. mirascope/llm/providers/google/_utils/decode.py +357 -0
  324. mirascope/llm/providers/google/_utils/encode.py +418 -0
  325. mirascope/llm/providers/google/_utils/errors.py +50 -0
  326. mirascope/llm/providers/google/message.py +7 -0
  327. mirascope/llm/providers/google/model_id.py +22 -0
  328. mirascope/llm/providers/google/model_info.py +63 -0
  329. mirascope/llm/providers/google/provider.py +456 -0
  330. mirascope/llm/providers/mirascope/__init__.py +5 -0
  331. mirascope/llm/providers/mirascope/_utils.py +73 -0
  332. mirascope/llm/providers/mirascope/provider.py +313 -0
  333. mirascope/llm/providers/mlx/__init__.py +9 -0
  334. mirascope/llm/providers/mlx/_utils.py +141 -0
  335. mirascope/llm/providers/mlx/encoding/__init__.py +8 -0
  336. mirascope/llm/providers/mlx/encoding/base.py +69 -0
  337. mirascope/llm/providers/mlx/encoding/transformers.py +146 -0
  338. mirascope/llm/providers/mlx/mlx.py +242 -0
  339. mirascope/llm/providers/mlx/model_id.py +17 -0
  340. mirascope/llm/providers/mlx/provider.py +416 -0
  341. mirascope/llm/providers/model_id.py +16 -0
  342. mirascope/llm/providers/ollama/__init__.py +7 -0
  343. mirascope/llm/providers/ollama/provider.py +71 -0
  344. mirascope/llm/providers/openai/__init__.py +15 -0
  345. mirascope/llm/providers/openai/_utils/__init__.py +5 -0
  346. mirascope/llm/providers/openai/_utils/errors.py +46 -0
  347. mirascope/llm/providers/openai/completions/__init__.py +7 -0
  348. mirascope/llm/providers/openai/completions/_utils/__init__.py +18 -0
  349. mirascope/llm/providers/openai/completions/_utils/decode.py +252 -0
  350. mirascope/llm/providers/openai/completions/_utils/encode.py +390 -0
  351. mirascope/llm/providers/openai/completions/_utils/feature_info.py +50 -0
  352. mirascope/llm/providers/openai/completions/base_provider.py +522 -0
  353. mirascope/llm/providers/openai/completions/provider.py +28 -0
  354. mirascope/llm/providers/openai/model_id.py +31 -0
  355. mirascope/llm/providers/openai/model_info.py +303 -0
  356. mirascope/llm/providers/openai/provider.py +405 -0
  357. mirascope/llm/providers/openai/responses/__init__.py +5 -0
  358. mirascope/llm/providers/openai/responses/_utils/__init__.py +15 -0
  359. mirascope/llm/providers/openai/responses/_utils/decode.py +289 -0
  360. mirascope/llm/providers/openai/responses/_utils/encode.py +399 -0
  361. mirascope/llm/providers/openai/responses/provider.py +472 -0
  362. mirascope/llm/providers/openrouter/__init__.py +5 -0
  363. mirascope/llm/providers/openrouter/provider.py +67 -0
  364. mirascope/llm/providers/provider_id.py +26 -0
  365. mirascope/llm/providers/provider_registry.py +305 -0
  366. mirascope/llm/providers/together/__init__.py +7 -0
  367. mirascope/llm/providers/together/provider.py +40 -0
  368. mirascope/llm/responses/__init__.py +66 -0
  369. mirascope/llm/responses/_utils.py +146 -0
  370. mirascope/llm/responses/base_response.py +103 -0
  371. mirascope/llm/responses/base_stream_response.py +824 -0
  372. mirascope/llm/responses/finish_reason.py +28 -0
  373. mirascope/llm/responses/response.py +362 -0
  374. mirascope/llm/responses/root_response.py +248 -0
  375. mirascope/llm/responses/stream_response.py +577 -0
  376. mirascope/llm/responses/streams.py +363 -0
  377. mirascope/llm/responses/usage.py +139 -0
  378. mirascope/llm/tools/__init__.py +71 -0
  379. mirascope/llm/tools/_utils.py +34 -0
  380. mirascope/llm/tools/decorator.py +184 -0
  381. mirascope/llm/tools/protocols.py +96 -0
  382. mirascope/llm/tools/provider_tools.py +18 -0
  383. mirascope/llm/tools/tool_schema.py +321 -0
  384. mirascope/llm/tools/toolkit.py +178 -0
  385. mirascope/llm/tools/tools.py +263 -0
  386. mirascope/llm/tools/types.py +112 -0
  387. mirascope/llm/tools/web_search_tool.py +32 -0
  388. mirascope/llm/types/__init__.py +22 -0
  389. mirascope/llm/types/dataclass.py +9 -0
  390. mirascope/llm/types/jsonable.py +44 -0
  391. mirascope/llm/types/type_vars.py +19 -0
  392. mirascope/ops/__init__.py +129 -0
  393. mirascope/ops/_internal/__init__.py +5 -0
  394. mirascope/ops/_internal/closure.py +1172 -0
  395. mirascope/ops/_internal/configuration.py +177 -0
  396. mirascope/ops/_internal/context.py +76 -0
  397. mirascope/ops/_internal/exporters/__init__.py +26 -0
  398. mirascope/ops/_internal/exporters/exporters.py +362 -0
  399. mirascope/ops/_internal/exporters/processors.py +104 -0
  400. mirascope/ops/_internal/exporters/types.py +165 -0
  401. mirascope/ops/_internal/exporters/utils.py +66 -0
  402. mirascope/ops/_internal/instrumentation/__init__.py +28 -0
  403. mirascope/ops/_internal/instrumentation/llm/__init__.py +8 -0
  404. mirascope/ops/_internal/instrumentation/llm/common.py +500 -0
  405. mirascope/ops/_internal/instrumentation/llm/cost.py +190 -0
  406. mirascope/ops/_internal/instrumentation/llm/encode.py +238 -0
  407. mirascope/ops/_internal/instrumentation/llm/gen_ai_types/__init__.py +38 -0
  408. mirascope/ops/_internal/instrumentation/llm/gen_ai_types/gen_ai_input_messages.py +31 -0
  409. mirascope/ops/_internal/instrumentation/llm/gen_ai_types/gen_ai_output_messages.py +38 -0
  410. mirascope/ops/_internal/instrumentation/llm/gen_ai_types/gen_ai_system_instructions.py +18 -0
  411. mirascope/ops/_internal/instrumentation/llm/gen_ai_types/shared.py +100 -0
  412. mirascope/ops/_internal/instrumentation/llm/llm.py +161 -0
  413. mirascope/ops/_internal/instrumentation/llm/model.py +1777 -0
  414. mirascope/ops/_internal/instrumentation/llm/response.py +521 -0
  415. mirascope/ops/_internal/instrumentation/llm/serialize.py +324 -0
  416. mirascope/ops/_internal/instrumentation/providers/__init__.py +29 -0
  417. mirascope/ops/_internal/instrumentation/providers/anthropic.py +78 -0
  418. mirascope/ops/_internal/instrumentation/providers/base.py +179 -0
  419. mirascope/ops/_internal/instrumentation/providers/google_genai.py +85 -0
  420. mirascope/ops/_internal/instrumentation/providers/openai.py +82 -0
  421. mirascope/ops/_internal/propagation.py +198 -0
  422. mirascope/ops/_internal/protocols.py +133 -0
  423. mirascope/ops/_internal/session.py +139 -0
  424. mirascope/ops/_internal/spans.py +232 -0
  425. mirascope/ops/_internal/traced_calls.py +389 -0
  426. mirascope/ops/_internal/traced_functions.py +528 -0
  427. mirascope/ops/_internal/tracing.py +353 -0
  428. mirascope/ops/_internal/types.py +13 -0
  429. mirascope/ops/_internal/utils.py +131 -0
  430. mirascope/ops/_internal/versioned_calls.py +512 -0
  431. mirascope/ops/_internal/versioned_functions.py +357 -0
  432. mirascope/ops/_internal/versioning.py +303 -0
  433. mirascope/ops/exceptions.py +21 -0
  434. mirascope-2.1.1.dist-info/METADATA +231 -0
  435. mirascope-2.1.1.dist-info/RECORD +437 -0
  436. {mirascope-1.0.5.dist-info → mirascope-2.1.1.dist-info}/WHEEL +1 -1
  437. {mirascope-1.0.5.dist-info → mirascope-2.1.1.dist-info}/licenses/LICENSE +1 -1
  438. mirascope/beta/__init__.py +0 -0
  439. mirascope/beta/openai/__init__.py +0 -5
  440. mirascope/beta/openai/parse.py +0 -129
  441. mirascope/beta/rag/__init__.py +0 -24
  442. mirascope/beta/rag/base/__init__.py +0 -22
  443. mirascope/beta/rag/base/chunkers/__init__.py +0 -2
  444. mirascope/beta/rag/base/chunkers/base_chunker.py +0 -37
  445. mirascope/beta/rag/base/chunkers/text_chunker.py +0 -33
  446. mirascope/beta/rag/base/config.py +0 -8
  447. mirascope/beta/rag/base/document.py +0 -11
  448. mirascope/beta/rag/base/embedders.py +0 -35
  449. mirascope/beta/rag/base/embedding_params.py +0 -18
  450. mirascope/beta/rag/base/embedding_response.py +0 -30
  451. mirascope/beta/rag/base/query_results.py +0 -7
  452. mirascope/beta/rag/base/vectorstore_params.py +0 -18
  453. mirascope/beta/rag/base/vectorstores.py +0 -37
  454. mirascope/beta/rag/chroma/__init__.py +0 -11
  455. mirascope/beta/rag/chroma/types.py +0 -57
  456. mirascope/beta/rag/chroma/vectorstores.py +0 -97
  457. mirascope/beta/rag/cohere/__init__.py +0 -11
  458. mirascope/beta/rag/cohere/embedders.py +0 -87
  459. mirascope/beta/rag/cohere/embedding_params.py +0 -29
  460. mirascope/beta/rag/cohere/embedding_response.py +0 -29
  461. mirascope/beta/rag/cohere/py.typed +0 -0
  462. mirascope/beta/rag/openai/__init__.py +0 -11
  463. mirascope/beta/rag/openai/embedders.py +0 -144
  464. mirascope/beta/rag/openai/embedding_params.py +0 -18
  465. mirascope/beta/rag/openai/embedding_response.py +0 -14
  466. mirascope/beta/rag/openai/py.typed +0 -0
  467. mirascope/beta/rag/pinecone/__init__.py +0 -19
  468. mirascope/beta/rag/pinecone/types.py +0 -143
  469. mirascope/beta/rag/pinecone/vectorstores.py +0 -148
  470. mirascope/beta/rag/weaviate/__init__.py +0 -6
  471. mirascope/beta/rag/weaviate/types.py +0 -92
  472. mirascope/beta/rag/weaviate/vectorstores.py +0 -103
  473. mirascope/core/__init__.py +0 -55
  474. mirascope/core/anthropic/__init__.py +0 -21
  475. mirascope/core/anthropic/_call.py +0 -71
  476. mirascope/core/anthropic/_utils/__init__.py +0 -16
  477. mirascope/core/anthropic/_utils/_calculate_cost.py +0 -63
  478. mirascope/core/anthropic/_utils/_convert_message_params.py +0 -54
  479. mirascope/core/anthropic/_utils/_get_json_output.py +0 -34
  480. mirascope/core/anthropic/_utils/_handle_stream.py +0 -89
  481. mirascope/core/anthropic/_utils/_setup_call.py +0 -76
  482. mirascope/core/anthropic/call_params.py +0 -36
  483. mirascope/core/anthropic/call_response.py +0 -158
  484. mirascope/core/anthropic/call_response_chunk.py +0 -104
  485. mirascope/core/anthropic/dynamic_config.py +0 -26
  486. mirascope/core/anthropic/py.typed +0 -0
  487. mirascope/core/anthropic/stream.py +0 -140
  488. mirascope/core/anthropic/tool.py +0 -77
  489. mirascope/core/base/__init__.py +0 -40
  490. mirascope/core/base/_call_factory.py +0 -323
  491. mirascope/core/base/_create.py +0 -167
  492. mirascope/core/base/_extract.py +0 -139
  493. mirascope/core/base/_partial.py +0 -63
  494. mirascope/core/base/_utils/__init__.py +0 -64
  495. mirascope/core/base/_utils/_base_type.py +0 -17
  496. mirascope/core/base/_utils/_convert_base_model_to_base_tool.py +0 -45
  497. mirascope/core/base/_utils/_convert_base_type_to_base_tool.py +0 -24
  498. mirascope/core/base/_utils/_convert_function_to_base_tool.py +0 -126
  499. mirascope/core/base/_utils/_default_tool_docstring.py +0 -6
  500. mirascope/core/base/_utils/_extract_tool_return.py +0 -36
  501. mirascope/core/base/_utils/_format_template.py +0 -29
  502. mirascope/core/base/_utils/_get_audio_type.py +0 -18
  503. mirascope/core/base/_utils/_get_fn_args.py +0 -14
  504. mirascope/core/base/_utils/_get_image_type.py +0 -26
  505. mirascope/core/base/_utils/_get_metadata.py +0 -17
  506. mirascope/core/base/_utils/_get_possible_user_message_param.py +0 -21
  507. mirascope/core/base/_utils/_get_prompt_template.py +0 -25
  508. mirascope/core/base/_utils/_get_template_values.py +0 -52
  509. mirascope/core/base/_utils/_get_template_variables.py +0 -38
  510. mirascope/core/base/_utils/_json_mode_content.py +0 -15
  511. mirascope/core/base/_utils/_parse_content_template.py +0 -157
  512. mirascope/core/base/_utils/_parse_prompt_messages.py +0 -51
  513. mirascope/core/base/_utils/_protocols.py +0 -215
  514. mirascope/core/base/_utils/_setup_call.py +0 -64
  515. mirascope/core/base/_utils/_setup_extract_tool.py +0 -24
  516. mirascope/core/base/call_params.py +0 -6
  517. mirascope/core/base/call_response.py +0 -189
  518. mirascope/core/base/call_response_chunk.py +0 -91
  519. mirascope/core/base/dynamic_config.py +0 -55
  520. mirascope/core/base/message_param.py +0 -61
  521. mirascope/core/base/metadata.py +0 -13
  522. mirascope/core/base/prompt.py +0 -415
  523. mirascope/core/base/stream.py +0 -365
  524. mirascope/core/base/structured_stream.py +0 -251
  525. mirascope/core/base/tool.py +0 -126
  526. mirascope/core/base/toolkit.py +0 -146
  527. mirascope/core/cohere/__init__.py +0 -21
  528. mirascope/core/cohere/_call.py +0 -71
  529. mirascope/core/cohere/_utils/__init__.py +0 -16
  530. mirascope/core/cohere/_utils/_calculate_cost.py +0 -39
  531. mirascope/core/cohere/_utils/_convert_message_params.py +0 -31
  532. mirascope/core/cohere/_utils/_get_json_output.py +0 -31
  533. mirascope/core/cohere/_utils/_handle_stream.py +0 -33
  534. mirascope/core/cohere/_utils/_setup_call.py +0 -89
  535. mirascope/core/cohere/call_params.py +0 -57
  536. mirascope/core/cohere/call_response.py +0 -167
  537. mirascope/core/cohere/call_response_chunk.py +0 -101
  538. mirascope/core/cohere/dynamic_config.py +0 -24
  539. mirascope/core/cohere/py.typed +0 -0
  540. mirascope/core/cohere/stream.py +0 -113
  541. mirascope/core/cohere/tool.py +0 -92
  542. mirascope/core/gemini/__init__.py +0 -21
  543. mirascope/core/gemini/_call.py +0 -71
  544. mirascope/core/gemini/_utils/__init__.py +0 -16
  545. mirascope/core/gemini/_utils/_calculate_cost.py +0 -8
  546. mirascope/core/gemini/_utils/_convert_message_params.py +0 -74
  547. mirascope/core/gemini/_utils/_get_json_output.py +0 -33
  548. mirascope/core/gemini/_utils/_handle_stream.py +0 -33
  549. mirascope/core/gemini/_utils/_setup_call.py +0 -68
  550. mirascope/core/gemini/call_params.py +0 -28
  551. mirascope/core/gemini/call_response.py +0 -173
  552. mirascope/core/gemini/call_response_chunk.py +0 -85
  553. mirascope/core/gemini/dynamic_config.py +0 -26
  554. mirascope/core/gemini/stream.py +0 -121
  555. mirascope/core/gemini/tool.py +0 -104
  556. mirascope/core/groq/__init__.py +0 -21
  557. mirascope/core/groq/_call.py +0 -71
  558. mirascope/core/groq/_utils/__init__.py +0 -16
  559. mirascope/core/groq/_utils/_calculate_cost.py +0 -68
  560. mirascope/core/groq/_utils/_convert_message_params.py +0 -23
  561. mirascope/core/groq/_utils/_get_json_output.py +0 -27
  562. mirascope/core/groq/_utils/_handle_stream.py +0 -121
  563. mirascope/core/groq/_utils/_setup_call.py +0 -67
  564. mirascope/core/groq/call_params.py +0 -51
  565. mirascope/core/groq/call_response.py +0 -160
  566. mirascope/core/groq/call_response_chunk.py +0 -89
  567. mirascope/core/groq/dynamic_config.py +0 -26
  568. mirascope/core/groq/py.typed +0 -0
  569. mirascope/core/groq/stream.py +0 -136
  570. mirascope/core/groq/tool.py +0 -79
  571. mirascope/core/litellm/__init__.py +0 -6
  572. mirascope/core/litellm/_call.py +0 -73
  573. mirascope/core/litellm/_utils/__init__.py +0 -5
  574. mirascope/core/litellm/_utils/_setup_call.py +0 -46
  575. mirascope/core/litellm/py.typed +0 -0
  576. mirascope/core/mistral/__init__.py +0 -21
  577. mirascope/core/mistral/_call.py +0 -69
  578. mirascope/core/mistral/_utils/__init__.py +0 -16
  579. mirascope/core/mistral/_utils/_calculate_cost.py +0 -47
  580. mirascope/core/mistral/_utils/_convert_message_params.py +0 -23
  581. mirascope/core/mistral/_utils/_get_json_output.py +0 -28
  582. mirascope/core/mistral/_utils/_handle_stream.py +0 -121
  583. mirascope/core/mistral/_utils/_setup_call.py +0 -86
  584. mirascope/core/mistral/call_params.py +0 -36
  585. mirascope/core/mistral/call_response.py +0 -156
  586. mirascope/core/mistral/call_response_chunk.py +0 -84
  587. mirascope/core/mistral/dynamic_config.py +0 -24
  588. mirascope/core/mistral/py.typed +0 -0
  589. mirascope/core/mistral/stream.py +0 -117
  590. mirascope/core/mistral/tool.py +0 -77
  591. mirascope/core/openai/__init__.py +0 -21
  592. mirascope/core/openai/_call.py +0 -71
  593. mirascope/core/openai/_utils/__init__.py +0 -16
  594. mirascope/core/openai/_utils/_calculate_cost.py +0 -110
  595. mirascope/core/openai/_utils/_convert_message_params.py +0 -53
  596. mirascope/core/openai/_utils/_get_json_output.py +0 -27
  597. mirascope/core/openai/_utils/_handle_stream.py +0 -125
  598. mirascope/core/openai/_utils/_setup_call.py +0 -62
  599. mirascope/core/openai/call_params.py +0 -54
  600. mirascope/core/openai/call_response.py +0 -162
  601. mirascope/core/openai/call_response_chunk.py +0 -90
  602. mirascope/core/openai/dynamic_config.py +0 -26
  603. mirascope/core/openai/py.typed +0 -0
  604. mirascope/core/openai/stream.py +0 -148
  605. mirascope/core/openai/tool.py +0 -79
  606. mirascope/core/py.typed +0 -0
  607. mirascope/integrations/__init__.py +0 -20
  608. mirascope/integrations/_middleware_factory.py +0 -277
  609. mirascope/integrations/langfuse/__init__.py +0 -3
  610. mirascope/integrations/langfuse/_utils.py +0 -114
  611. mirascope/integrations/langfuse/_with_langfuse.py +0 -71
  612. mirascope/integrations/logfire/__init__.py +0 -3
  613. mirascope/integrations/logfire/_utils.py +0 -188
  614. mirascope/integrations/logfire/_with_logfire.py +0 -60
  615. mirascope/integrations/otel/__init__.py +0 -5
  616. mirascope/integrations/otel/_utils.py +0 -268
  617. mirascope/integrations/otel/_with_hyperdx.py +0 -61
  618. mirascope/integrations/otel/_with_otel.py +0 -60
  619. mirascope/integrations/tenacity.py +0 -50
  620. mirascope/py.typed +0 -0
  621. mirascope/v0/__init__.py +0 -43
  622. mirascope/v0/anthropic.py +0 -54
  623. mirascope/v0/base/__init__.py +0 -12
  624. mirascope/v0/base/calls.py +0 -118
  625. mirascope/v0/base/extractors.py +0 -122
  626. mirascope/v0/base/ops_utils.py +0 -207
  627. mirascope/v0/base/prompts.py +0 -48
  628. mirascope/v0/base/types.py +0 -14
  629. mirascope/v0/base/utils.py +0 -21
  630. mirascope/v0/openai.py +0 -54
  631. mirascope-1.0.5.dist-info/METADATA +0 -519
  632. mirascope-1.0.5.dist-info/RECORD +0 -198
@@ -0,0 +1,1463 @@
1
+ """Base abstract interface for provider clients."""
2
+
3
+ from __future__ import annotations
4
+
5
+ from abc import ABC, abstractmethod
6
+ from collections.abc import Callable, Generator, Mapping, Sequence
7
+ from contextlib import contextmanager
8
+ from typing import TYPE_CHECKING, Any, ClassVar, Generic, TypeAlias, cast, overload
9
+ from typing_extensions import TypeVar, Unpack
10
+
11
+ from ...context import Context, DepsT
12
+ from ...exceptions import APIError, ProviderError
13
+ from ...formatting import FormatSpec, FormattableT
14
+ from ...messages import Message, UserContent, user
15
+ from ...responses import (
16
+ AsyncChunkIterator,
17
+ AsyncContextResponse,
18
+ AsyncContextStreamResponse,
19
+ AsyncResponse,
20
+ AsyncStreamResponse,
21
+ ChunkIterator,
22
+ ContextResponse,
23
+ ContextStreamResponse,
24
+ Response,
25
+ StreamResponse,
26
+ )
27
+ from ...tools import (
28
+ AsyncContextToolkit,
29
+ AsyncToolkit,
30
+ ContextToolkit,
31
+ Toolkit,
32
+ )
33
+
34
+ if TYPE_CHECKING:
35
+ from ...models import Params
36
+ from ..provider_id import ProviderId
37
+
38
+ ProviderClientT = TypeVar("ProviderClientT")
39
+
40
+ Provider: TypeAlias = "BaseProvider[Any]"
41
+ """Type alias for `BaseProvider` with any client type."""
42
+
43
+ ProviderErrorMap: TypeAlias = Mapping[
44
+ type[Exception],
45
+ "type[ProviderError] | Callable[[Exception], type[ProviderError]]",
46
+ ]
47
+ """Mapping from provider SDK exceptions to Mirascope error types.
48
+
49
+ Keys are provider SDK exception types (e.g., OpenAIError, AnthropicError).
50
+ Values can be:
51
+ - Error type: Simple 1:1 mapping (e.g., RateLimitError)
52
+ - Callable: Transform function returning error type based on exception details
53
+ """
54
+
55
+
56
+ class BaseProvider(Generic[ProviderClientT], ABC):
57
+ """Base abstract provider for LLM interactions.
58
+
59
+ This class defines explicit methods for each type of call, eliminating
60
+ the need for complex overloads in provider implementations.
61
+ """
62
+
63
+ id: ClassVar[ProviderId]
64
+ """Provider identifier (e.g., "anthropic", "openai")."""
65
+
66
+ default_scope: ClassVar[str | list[str]]
67
+ """Default scope(s) for this provider when explicitly registered.
68
+
69
+ Can be a single scope string or a list of scopes. For example:
70
+ - "anthropic/" - Single scope
71
+ - ["anthropic/", "openai/"] - Multiple scopes (e.g., for AWS Bedrock)
72
+ """
73
+
74
+ error_map: ClassVar[ProviderErrorMap]
75
+ """Mapping from provider SDK exceptions to Mirascope error types.
76
+
77
+ Values can be:
78
+ - Error type: Simple 1:1 mapping (e.g., AnthropicRateLimitError -> RateLimitError)
79
+ - Callable: Transform function returning error type based on exception details
80
+ (e.g., lambda e: NotFoundError if e.code == "model_not_found" else BadRequestError)
81
+
82
+ The mapping is walked via the exception's MRO, allowing both specific error handling
83
+ and fallback to base SDK error types (e.g., AnthropicError -> ProviderError).
84
+ """
85
+
86
+ client: ProviderClientT
87
+
88
+ @contextmanager
89
+ def _wrap_errors(self) -> Generator[None, None, None]:
90
+ """Wrap provider API calls and convert errors to Mirascope exceptions.
91
+
92
+ Walks the exception's MRO to find the first matching error type in the
93
+ provider's error_map, allowing both specific error handling and fallback
94
+ to base SDK error types (e.g., AnthropicError -> ProviderError).
95
+ """
96
+ try:
97
+ yield
98
+ except Exception as e:
99
+ # Walk MRO to find first matching error type in provider's error_map
100
+ for error_class in type(e).__mro__:
101
+ if error_class in self.error_map:
102
+ error_type_or_fn = self.error_map[error_class]
103
+
104
+ if isinstance(error_type_or_fn, type):
105
+ error_type = cast(type[ProviderError], error_type_or_fn)
106
+ else:
107
+ error_type = error_type_or_fn(e)
108
+
109
+ # Construct Mirascope error with metadata
110
+ if issubclass(error_type, APIError):
111
+ error: ProviderError = error_type(
112
+ str(e),
113
+ provider=self.id,
114
+ status_code=self.get_error_status(e),
115
+ original_exception=e,
116
+ )
117
+ else:
118
+ error = error_type(
119
+ str(e),
120
+ provider=self.id,
121
+ original_exception=e,
122
+ )
123
+ raise error from e
124
+
125
+ # Not in error_map - not a provider error, re-raise as-is
126
+ raise
127
+
128
+ def _wrap_iterator_errors(self, iterator: ChunkIterator) -> ChunkIterator:
129
+ """Wrap sync chunk iterator to handle errors during iteration."""
130
+ # TODO: Consider moving this logic into BaseSyncStreamResponse if appropriate.
131
+ with self._wrap_errors():
132
+ yield from iterator
133
+
134
+ async def _wrap_async_iterator_errors(
135
+ self, iterator: AsyncChunkIterator
136
+ ) -> AsyncChunkIterator:
137
+ """Wrap async chunk iterator to handle errors during iteration."""
138
+ # TODO: Consider moving this logic into BaseAsyncStreamResponse if appropriate.
139
+ with self._wrap_errors():
140
+ async for chunk in iterator:
141
+ yield chunk
142
+
143
+ @overload
144
+ def call(
145
+ self,
146
+ *,
147
+ model_id: str,
148
+ messages: Sequence[Message],
149
+ toolkit: Toolkit,
150
+ format: None = None,
151
+ **params: Unpack[Params],
152
+ ) -> Response:
153
+ """Generate an `llm.Response` without a response format."""
154
+ ...
155
+
156
+ @overload
157
+ def call(
158
+ self,
159
+ *,
160
+ model_id: str,
161
+ messages: Sequence[Message],
162
+ toolkit: Toolkit,
163
+ format: FormatSpec[FormattableT],
164
+ **params: Unpack[Params],
165
+ ) -> Response[FormattableT]:
166
+ """Generate an `llm.Response` with a response format."""
167
+ ...
168
+
169
+ @overload
170
+ def call(
171
+ self,
172
+ *,
173
+ model_id: str,
174
+ messages: Sequence[Message],
175
+ toolkit: Toolkit,
176
+ format: FormatSpec[FormattableT] | None,
177
+ **params: Unpack[Params],
178
+ ) -> Response | Response[FormattableT]:
179
+ """Generate an `llm.Response` with an optional response format."""
180
+ ...
181
+
182
+ def call(
183
+ self,
184
+ *,
185
+ model_id: str,
186
+ messages: Sequence[Message],
187
+ toolkit: Toolkit,
188
+ format: FormatSpec[FormattableT] | None = None,
189
+ **params: Unpack[Params],
190
+ ) -> Response | Response[FormattableT]:
191
+ """Generate an `llm.Response` by synchronously calling this client's LLM provider.
192
+
193
+ Args:
194
+ model_id: Model identifier to use.
195
+ messages: Messages to send to the LLM.
196
+ tools: Optional tools that the model may invoke.
197
+ format: Optional response format specifier.
198
+ **params: Additional parameters to configure output (e.g. temperature). See `llm.Params`.
199
+
200
+ Returns:
201
+ An `llm.Response` object containing the LLM-generated content.
202
+ """
203
+ with self._wrap_errors():
204
+ return self._call(
205
+ model_id=model_id,
206
+ messages=messages,
207
+ toolkit=toolkit,
208
+ format=format,
209
+ **params,
210
+ )
211
+
212
+ @abstractmethod
213
+ def _call(
214
+ self,
215
+ *,
216
+ model_id: str,
217
+ messages: Sequence[Message],
218
+ toolkit: Toolkit,
219
+ format: FormatSpec[FormattableT] | None = None,
220
+ **params: Unpack[Params],
221
+ ) -> Response | Response[FormattableT]:
222
+ """Implementation for call(). Subclasses override this method."""
223
+ ...
224
+
225
+ @overload
226
+ def context_call(
227
+ self,
228
+ *,
229
+ ctx: Context[DepsT],
230
+ model_id: str,
231
+ messages: Sequence[Message],
232
+ toolkit: ContextToolkit[DepsT],
233
+ format: None = None,
234
+ **params: Unpack[Params],
235
+ ) -> ContextResponse[DepsT, None]:
236
+ """Generate an `llm.ContextResponse` without a response format."""
237
+ ...
238
+
239
+ @overload
240
+ def context_call(
241
+ self,
242
+ *,
243
+ ctx: Context[DepsT],
244
+ model_id: str,
245
+ messages: Sequence[Message],
246
+ toolkit: ContextToolkit[DepsT],
247
+ format: FormatSpec[FormattableT],
248
+ **params: Unpack[Params],
249
+ ) -> ContextResponse[DepsT, FormattableT]:
250
+ """Generate an `llm.ContextResponse` with a response format."""
251
+ ...
252
+
253
+ @overload
254
+ def context_call(
255
+ self,
256
+ *,
257
+ ctx: Context[DepsT],
258
+ model_id: str,
259
+ messages: Sequence[Message],
260
+ toolkit: ContextToolkit[DepsT],
261
+ format: FormatSpec[FormattableT] | None,
262
+ **params: Unpack[Params],
263
+ ) -> ContextResponse[DepsT, None] | ContextResponse[DepsT, FormattableT]:
264
+ """Generate an `llm.ContextResponse` with an optional response format."""
265
+ ...
266
+
267
+ def context_call(
268
+ self,
269
+ *,
270
+ ctx: Context[DepsT],
271
+ model_id: str,
272
+ messages: Sequence[Message],
273
+ toolkit: ContextToolkit[DepsT],
274
+ format: FormatSpec[FormattableT] | None = None,
275
+ **params: Unpack[Params],
276
+ ) -> ContextResponse[DepsT, None] | ContextResponse[DepsT, FormattableT]:
277
+ """Generate an `llm.ContextResponse` by synchronously calling this client's LLM provider.
278
+
279
+ Args:
280
+ ctx: Context object with dependencies for tools.
281
+ model_id: Model identifier to use.
282
+ messages: Messages to send to the LLM.
283
+ tools: Optional tools that the model may invoke.
284
+ format: Optional response format specifier.
285
+ **params: Additional parameters to configure output (e.g. temperature). See `llm.Params`.
286
+
287
+ Returns:
288
+ An `llm.ContextResponse` object containing the LLM-generated content.
289
+ """
290
+ with self._wrap_errors():
291
+ return self._context_call(
292
+ ctx=ctx,
293
+ model_id=model_id,
294
+ messages=messages,
295
+ toolkit=toolkit,
296
+ format=format,
297
+ **params,
298
+ )
299
+
300
+ @abstractmethod
301
+ def _context_call(
302
+ self,
303
+ *,
304
+ ctx: Context[DepsT],
305
+ model_id: str,
306
+ messages: Sequence[Message],
307
+ toolkit: ContextToolkit[DepsT],
308
+ format: FormatSpec[FormattableT] | None = None,
309
+ **params: Unpack[Params],
310
+ ) -> ContextResponse[DepsT, None] | ContextResponse[DepsT, FormattableT]:
311
+ """Implementation for context_call(). Subclasses override this method."""
312
+ ...
313
+
314
+ @overload
315
+ async def call_async(
316
+ self,
317
+ *,
318
+ model_id: str,
319
+ messages: Sequence[Message],
320
+ toolkit: AsyncToolkit,
321
+ format: None = None,
322
+ **params: Unpack[Params],
323
+ ) -> AsyncResponse:
324
+ """Generate an `llm.AsyncResponse` without a response format."""
325
+ ...
326
+
327
+ @overload
328
+ async def call_async(
329
+ self,
330
+ *,
331
+ model_id: str,
332
+ messages: Sequence[Message],
333
+ toolkit: AsyncToolkit,
334
+ format: FormatSpec[FormattableT],
335
+ **params: Unpack[Params],
336
+ ) -> AsyncResponse[FormattableT]:
337
+ """Generate an `llm.AsyncResponse` with a response format."""
338
+ ...
339
+
340
+ @overload
341
+ async def call_async(
342
+ self,
343
+ *,
344
+ model_id: str,
345
+ messages: Sequence[Message],
346
+ toolkit: AsyncToolkit,
347
+ format: FormatSpec[FormattableT] | None,
348
+ **params: Unpack[Params],
349
+ ) -> AsyncResponse | AsyncResponse[FormattableT]:
350
+ """Generate an `llm.AsyncResponse` with an optional response format."""
351
+ ...
352
+
353
+ async def call_async(
354
+ self,
355
+ *,
356
+ model_id: str,
357
+ messages: Sequence[Message],
358
+ toolkit: AsyncToolkit,
359
+ format: FormatSpec[FormattableT] | None = None,
360
+ **params: Unpack[Params],
361
+ ) -> AsyncResponse | AsyncResponse[FormattableT]:
362
+ """Generate an `llm.AsyncResponse` by asynchronously calling this client's LLM provider.
363
+
364
+ Args:
365
+ model_id: Model identifier to use.
366
+ messages: Messages to send to the LLM.
367
+ tools: Optional tools that the model may invoke.
368
+ format: Optional response format specifier.
369
+ **params: Additional parameters to configure output (e.g. temperature). See `llm.Params`.
370
+
371
+ Returns:
372
+ An `llm.AsyncResponse` object containing the LLM-generated content.
373
+ """
374
+ with self._wrap_errors():
375
+ return await self._call_async(
376
+ model_id=model_id,
377
+ messages=messages,
378
+ toolkit=toolkit,
379
+ format=format,
380
+ **params,
381
+ )
382
+
383
+ @abstractmethod
384
+ async def _call_async(
385
+ self,
386
+ *,
387
+ model_id: str,
388
+ messages: Sequence[Message],
389
+ toolkit: AsyncToolkit,
390
+ format: FormatSpec[FormattableT] | None = None,
391
+ **params: Unpack[Params],
392
+ ) -> AsyncResponse | AsyncResponse[FormattableT]:
393
+ """Implementation for call_async(). Subclasses override this method."""
394
+ ...
395
+
396
+ @overload
397
+ async def context_call_async(
398
+ self,
399
+ *,
400
+ ctx: Context[DepsT],
401
+ model_id: str,
402
+ messages: Sequence[Message],
403
+ toolkit: AsyncContextToolkit[DepsT],
404
+ format: None = None,
405
+ **params: Unpack[Params],
406
+ ) -> AsyncContextResponse[DepsT, None]:
407
+ """Generate an `llm.AsyncContextResponse` without a response format."""
408
+ ...
409
+
410
+ @overload
411
+ async def context_call_async(
412
+ self,
413
+ *,
414
+ ctx: Context[DepsT],
415
+ model_id: str,
416
+ messages: Sequence[Message],
417
+ toolkit: AsyncContextToolkit[DepsT],
418
+ format: FormatSpec[FormattableT],
419
+ **params: Unpack[Params],
420
+ ) -> AsyncContextResponse[DepsT, FormattableT]:
421
+ """Generate an `llm.AsyncContextResponse` with a response format."""
422
+ ...
423
+
424
+ @overload
425
+ async def context_call_async(
426
+ self,
427
+ *,
428
+ ctx: Context[DepsT],
429
+ model_id: str,
430
+ messages: Sequence[Message],
431
+ toolkit: AsyncContextToolkit[DepsT],
432
+ format: FormatSpec[FormattableT] | None,
433
+ **params: Unpack[Params],
434
+ ) -> AsyncContextResponse[DepsT, None] | AsyncContextResponse[DepsT, FormattableT]:
435
+ """Generate an `llm.AsyncContextResponse` with an optional response format."""
436
+ ...
437
+
438
+ async def context_call_async(
439
+ self,
440
+ *,
441
+ ctx: Context[DepsT],
442
+ model_id: str,
443
+ messages: Sequence[Message],
444
+ toolkit: AsyncContextToolkit[DepsT],
445
+ format: FormatSpec[FormattableT] | None = None,
446
+ **params: Unpack[Params],
447
+ ) -> AsyncContextResponse[DepsT, None] | AsyncContextResponse[DepsT, FormattableT]:
448
+ """Generate an `llm.AsyncContextResponse` by asynchronously calling this client's LLM provider.
449
+
450
+ Args:
451
+ ctx: Context object with dependencies for tools.
452
+ model_id: Model identifier to use.
453
+ messages: Messages to send to the LLM.
454
+ tools: Optional tools that the model may invoke.
455
+ format: Optional response format specifier.
456
+ **params: Additional parameters to configure output (e.g. temperature). See `llm.Params`.
457
+
458
+ Returns:
459
+ An `llm.AsyncContextResponse` object containing the LLM-generated content.
460
+ """
461
+ with self._wrap_errors():
462
+ return await self._context_call_async(
463
+ ctx=ctx,
464
+ model_id=model_id,
465
+ messages=messages,
466
+ toolkit=toolkit,
467
+ format=format,
468
+ **params,
469
+ )
470
+
471
+ @abstractmethod
472
+ async def _context_call_async(
473
+ self,
474
+ *,
475
+ ctx: Context[DepsT],
476
+ model_id: str,
477
+ messages: Sequence[Message],
478
+ toolkit: AsyncContextToolkit[DepsT],
479
+ format: FormatSpec[FormattableT] | None = None,
480
+ **params: Unpack[Params],
481
+ ) -> AsyncContextResponse[DepsT, None] | AsyncContextResponse[DepsT, FormattableT]:
482
+ """Implementation for context_call_async(). Subclasses override this method."""
483
+ ...
484
+
485
+ @overload
486
+ def stream(
487
+ self,
488
+ *,
489
+ model_id: str,
490
+ messages: Sequence[Message],
491
+ toolkit: Toolkit,
492
+ format: None = None,
493
+ **params: Unpack[Params],
494
+ ) -> StreamResponse:
495
+ """Stream an `llm.StreamResponse` without a response format."""
496
+ ...
497
+
498
+ @overload
499
+ def stream(
500
+ self,
501
+ *,
502
+ model_id: str,
503
+ messages: Sequence[Message],
504
+ toolkit: Toolkit,
505
+ format: FormatSpec[FormattableT],
506
+ **params: Unpack[Params],
507
+ ) -> StreamResponse[FormattableT]:
508
+ """Stream an `llm.StreamResponse` with a response format."""
509
+ ...
510
+
511
+ @overload
512
+ def stream(
513
+ self,
514
+ *,
515
+ model_id: str,
516
+ messages: Sequence[Message],
517
+ toolkit: Toolkit,
518
+ format: FormatSpec[FormattableT] | None,
519
+ **params: Unpack[Params],
520
+ ) -> StreamResponse | StreamResponse[FormattableT]:
521
+ """Stream an `llm.StreamResponse` with an optional response format."""
522
+ ...
523
+
524
+ def stream(
525
+ self,
526
+ *,
527
+ model_id: str,
528
+ messages: Sequence[Message],
529
+ toolkit: Toolkit,
530
+ format: FormatSpec[FormattableT] | None = None,
531
+ **params: Unpack[Params],
532
+ ) -> StreamResponse | StreamResponse[FormattableT]:
533
+ """Generate an `llm.StreamResponse` by synchronously streaming from this client's LLM provider.
534
+
535
+ Args:
536
+ model_id: Model identifier to use.
537
+ messages: Messages to send to the LLM.
538
+ tools: Optional tools that the model may invoke.
539
+ format: Optional response format specifier.
540
+ **params: Additional parameters to configure output (e.g. temperature). See `llm.Params`.
541
+
542
+ Returns:
543
+ An `llm.StreamResponse` object for iterating over the LLM-generated content.
544
+ """
545
+ with self._wrap_errors():
546
+ stream_response = self._stream(
547
+ model_id=model_id,
548
+ messages=messages,
549
+ toolkit=toolkit,
550
+ format=format,
551
+ **params,
552
+ )
553
+ stream_response._chunk_iterator = self._wrap_iterator_errors( # pyright: ignore[reportPrivateUsage]
554
+ stream_response._chunk_iterator # pyright: ignore[reportPrivateUsage]
555
+ )
556
+ return stream_response
557
+
558
+ @abstractmethod
559
+ def _stream(
560
+ self,
561
+ *,
562
+ model_id: str,
563
+ messages: Sequence[Message],
564
+ toolkit: Toolkit,
565
+ format: FormatSpec[FormattableT] | None = None,
566
+ **params: Unpack[Params],
567
+ ) -> StreamResponse | StreamResponse[FormattableT]:
568
+ """Implementation for stream(). Subclasses override this method."""
569
+ ...
570
+
571
+ @overload
572
+ def context_stream(
573
+ self,
574
+ *,
575
+ ctx: Context[DepsT],
576
+ model_id: str,
577
+ messages: Sequence[Message],
578
+ toolkit: ContextToolkit[DepsT],
579
+ format: None = None,
580
+ **params: Unpack[Params],
581
+ ) -> ContextStreamResponse[DepsT, None]:
582
+ """Stream an `llm.ContextStreamResponse` without a response format."""
583
+ ...
584
+
585
+ @overload
586
+ def context_stream(
587
+ self,
588
+ *,
589
+ ctx: Context[DepsT],
590
+ model_id: str,
591
+ messages: Sequence[Message],
592
+ toolkit: ContextToolkit[DepsT],
593
+ format: FormatSpec[FormattableT],
594
+ **params: Unpack[Params],
595
+ ) -> ContextStreamResponse[DepsT, FormattableT]:
596
+ """Stream an `llm.ContextStreamResponse` with a response format."""
597
+ ...
598
+
599
+ @overload
600
+ def context_stream(
601
+ self,
602
+ *,
603
+ ctx: Context[DepsT],
604
+ model_id: str,
605
+ messages: Sequence[Message],
606
+ toolkit: ContextToolkit[DepsT],
607
+ format: FormatSpec[FormattableT] | None,
608
+ **params: Unpack[Params],
609
+ ) -> (
610
+ ContextStreamResponse[DepsT, None] | ContextStreamResponse[DepsT, FormattableT]
611
+ ):
612
+ """Stream an `llm.ContextStreamResponse` with an optional response format."""
613
+ ...
614
+
615
+ def context_stream(
616
+ self,
617
+ *,
618
+ ctx: Context[DepsT],
619
+ model_id: str,
620
+ messages: Sequence[Message],
621
+ toolkit: ContextToolkit[DepsT],
622
+ format: FormatSpec[FormattableT] | None = None,
623
+ **params: Unpack[Params],
624
+ ) -> (
625
+ ContextStreamResponse[DepsT, None] | ContextStreamResponse[DepsT, FormattableT]
626
+ ):
627
+ """Generate an `llm.ContextStreamResponse` by synchronously streaming from this client's LLM provider.
628
+
629
+ Args:
630
+ ctx: Context object with dependencies for tools.
631
+ model_id: Model identifier to use.
632
+ messages: Messages to send to the LLM.
633
+ tools: Optional tools that the model may invoke.
634
+ format: Optional response format specifier.
635
+ **params: Additional parameters to configure output (e.g. temperature). See `llm.Params`.
636
+
637
+ Returns:
638
+ An `llm.ContextStreamResponse` object for iterating over the LLM-generated content.
639
+ """
640
+ with self._wrap_errors():
641
+ stream_response = self._context_stream(
642
+ ctx=ctx,
643
+ model_id=model_id,
644
+ messages=messages,
645
+ toolkit=toolkit,
646
+ format=format,
647
+ **params,
648
+ )
649
+ stream_response._chunk_iterator = self._wrap_iterator_errors( # pyright: ignore[reportPrivateUsage]
650
+ stream_response._chunk_iterator # pyright: ignore[reportPrivateUsage]
651
+ )
652
+ return stream_response
653
+
654
+ @abstractmethod
655
+ def _context_stream(
656
+ self,
657
+ *,
658
+ ctx: Context[DepsT],
659
+ model_id: str,
660
+ messages: Sequence[Message],
661
+ toolkit: ContextToolkit[DepsT],
662
+ format: FormatSpec[FormattableT] | None = None,
663
+ **params: Unpack[Params],
664
+ ) -> (
665
+ ContextStreamResponse[DepsT, None] | ContextStreamResponse[DepsT, FormattableT]
666
+ ):
667
+ """Implementation for context_stream(). Subclasses override this method."""
668
+ ...
669
+
670
+ @overload
671
+ async def stream_async(
672
+ self,
673
+ *,
674
+ model_id: str,
675
+ messages: Sequence[Message],
676
+ toolkit: AsyncToolkit,
677
+ format: None = None,
678
+ **params: Unpack[Params],
679
+ ) -> AsyncStreamResponse:
680
+ """Stream an `llm.AsyncStreamResponse` without a response format."""
681
+ ...
682
+
683
+ @overload
684
+ async def stream_async(
685
+ self,
686
+ *,
687
+ model_id: str,
688
+ messages: Sequence[Message],
689
+ toolkit: AsyncToolkit,
690
+ format: FormatSpec[FormattableT],
691
+ **params: Unpack[Params],
692
+ ) -> AsyncStreamResponse[FormattableT]:
693
+ """Stream an `llm.AsyncStreamResponse` with a response format."""
694
+ ...
695
+
696
+ @overload
697
+ async def stream_async(
698
+ self,
699
+ *,
700
+ model_id: str,
701
+ messages: Sequence[Message],
702
+ toolkit: AsyncToolkit,
703
+ format: FormatSpec[FormattableT] | None,
704
+ **params: Unpack[Params],
705
+ ) -> AsyncStreamResponse | AsyncStreamResponse[FormattableT]:
706
+ """Stream an `llm.AsyncStreamResponse` with an optional response format."""
707
+ ...
708
+
709
+ async def stream_async(
710
+ self,
711
+ *,
712
+ model_id: str,
713
+ messages: Sequence[Message],
714
+ toolkit: AsyncToolkit,
715
+ format: FormatSpec[FormattableT] | None = None,
716
+ **params: Unpack[Params],
717
+ ) -> AsyncStreamResponse | AsyncStreamResponse[FormattableT]:
718
+ """Generate an `llm.AsyncStreamResponse` by asynchronously streaming from this client's LLM provider.
719
+
720
+ Args:
721
+ model_id: Model identifier to use.
722
+ messages: Messages to send to the LLM.
723
+ tools: Optional tools that the model may invoke.
724
+ format: Optional response format specifier.
725
+ **params: Additional parameters to configure output (e.g. temperature). See `llm.Params`.
726
+
727
+ Returns:
728
+ An `llm.AsyncStreamResponse` object for asynchronously iterating over the LLM-generated content.
729
+ """
730
+ with self._wrap_errors():
731
+ stream_response = await self._stream_async(
732
+ model_id=model_id,
733
+ messages=messages,
734
+ toolkit=toolkit,
735
+ format=format,
736
+ **params,
737
+ )
738
+ stream_response._chunk_iterator = self._wrap_async_iterator_errors( # pyright: ignore[reportPrivateUsage]
739
+ stream_response._chunk_iterator # pyright: ignore[reportPrivateUsage]
740
+ )
741
+ return stream_response
742
+
743
+ @abstractmethod
744
+ async def _stream_async(
745
+ self,
746
+ *,
747
+ model_id: str,
748
+ messages: Sequence[Message],
749
+ toolkit: AsyncToolkit,
750
+ format: FormatSpec[FormattableT] | None = None,
751
+ **params: Unpack[Params],
752
+ ) -> AsyncStreamResponse | AsyncStreamResponse[FormattableT]:
753
+ """Implementation for stream_async(). Subclasses override this method."""
754
+ ...
755
+
756
+ @overload
757
+ async def context_stream_async(
758
+ self,
759
+ *,
760
+ ctx: Context[DepsT],
761
+ model_id: str,
762
+ messages: Sequence[Message],
763
+ toolkit: AsyncContextToolkit[DepsT],
764
+ format: None = None,
765
+ **params: Unpack[Params],
766
+ ) -> AsyncContextStreamResponse[DepsT, None]:
767
+ """Stream an `llm.AsyncContextStreamResponse` without a response format."""
768
+ ...
769
+
770
+ @overload
771
+ async def context_stream_async(
772
+ self,
773
+ *,
774
+ ctx: Context[DepsT],
775
+ model_id: str,
776
+ messages: Sequence[Message],
777
+ toolkit: AsyncContextToolkit[DepsT],
778
+ format: FormatSpec[FormattableT],
779
+ **params: Unpack[Params],
780
+ ) -> AsyncContextStreamResponse[DepsT, FormattableT]:
781
+ """Stream an `llm.AsyncContextStreamResponse` with a response format."""
782
+ ...
783
+
784
+ @overload
785
+ async def context_stream_async(
786
+ self,
787
+ *,
788
+ ctx: Context[DepsT],
789
+ model_id: str,
790
+ messages: Sequence[Message],
791
+ toolkit: AsyncContextToolkit[DepsT],
792
+ format: FormatSpec[FormattableT] | None,
793
+ **params: Unpack[Params],
794
+ ) -> (
795
+ AsyncContextStreamResponse[DepsT, None]
796
+ | AsyncContextStreamResponse[DepsT, FormattableT]
797
+ ):
798
+ """Stream an `llm.AsyncContextStreamResponse` with an optional response format."""
799
+ ...
800
+
801
+ async def context_stream_async(
802
+ self,
803
+ *,
804
+ ctx: Context[DepsT],
805
+ model_id: str,
806
+ messages: Sequence[Message],
807
+ toolkit: AsyncContextToolkit[DepsT],
808
+ format: FormatSpec[FormattableT] | None = None,
809
+ **params: Unpack[Params],
810
+ ) -> (
811
+ AsyncContextStreamResponse[DepsT, None]
812
+ | AsyncContextStreamResponse[DepsT, FormattableT]
813
+ ):
814
+ """Generate an `llm.AsyncContextStreamResponse` by asynchronously streaming from this client's LLM provider.
815
+
816
+ Args:
817
+ ctx: Context object with dependencies for tools.
818
+ model_id: Model identifier to use.
819
+ messages: Messages to send to the LLM.
820
+ tools: Optional tools that the model may invoke.
821
+ format: Optional response format specifier.
822
+ **params: Additional parameters to configure output (e.g. temperature). See `llm.Params`.
823
+
824
+ Returns:
825
+ An `llm.AsyncContextStreamResponse` object for asynchronously iterating over the LLM-generated content.
826
+ """
827
+ with self._wrap_errors():
828
+ stream_response = await self._context_stream_async(
829
+ ctx=ctx,
830
+ model_id=model_id,
831
+ messages=messages,
832
+ toolkit=toolkit,
833
+ format=format,
834
+ **params,
835
+ )
836
+ stream_response._chunk_iterator = self._wrap_async_iterator_errors( # pyright: ignore[reportPrivateUsage]
837
+ stream_response._chunk_iterator # pyright: ignore[reportPrivateUsage]
838
+ )
839
+ return stream_response
840
+
841
+ @abstractmethod
842
+ async def _context_stream_async(
843
+ self,
844
+ *,
845
+ ctx: Context[DepsT],
846
+ model_id: str,
847
+ messages: Sequence[Message],
848
+ toolkit: AsyncContextToolkit[DepsT],
849
+ format: FormatSpec[FormattableT] | None = None,
850
+ **params: Unpack[Params],
851
+ ) -> (
852
+ AsyncContextStreamResponse[DepsT, None]
853
+ | AsyncContextStreamResponse[DepsT, FormattableT]
854
+ ):
855
+ """Implementation for context_stream_async(). Subclasses override this method."""
856
+ ...
857
+
858
+ @overload
859
+ def resume(
860
+ self,
861
+ *,
862
+ model_id: str,
863
+ response: Response,
864
+ content: UserContent,
865
+ **params: Unpack[Params],
866
+ ) -> Response:
867
+ """Resume an `llm.Response` without a response format."""
868
+ ...
869
+
870
+ @overload
871
+ def resume(
872
+ self,
873
+ *,
874
+ model_id: str,
875
+ response: Response[FormattableT],
876
+ content: UserContent,
877
+ **params: Unpack[Params],
878
+ ) -> Response[FormattableT]:
879
+ """Resume an `llm.Response` with a response format."""
880
+ ...
881
+
882
+ @overload
883
+ def resume(
884
+ self,
885
+ *,
886
+ model_id: str,
887
+ response: Response | Response[FormattableT],
888
+ content: UserContent,
889
+ **params: Unpack[Params],
890
+ ) -> Response | Response[FormattableT]:
891
+ """Resume an `llm.Response` with an optional response format."""
892
+ ...
893
+
894
+ def resume(
895
+ self,
896
+ *,
897
+ model_id: str,
898
+ response: Response | Response[FormattableT],
899
+ content: UserContent,
900
+ **params: Unpack[Params],
901
+ ) -> Response | Response[FormattableT]:
902
+ """Generate a new `llm.Response` by extending another response's messages with additional user content.
903
+
904
+ Args:
905
+ model_id: Model identifier to use.
906
+ response: Previous response to extend.
907
+ content: Additional user content to append.
908
+ **params: Additional parameters to configure output (e.g. temperature). See `llm.Params`.
909
+
910
+ Returns:
911
+ A new `llm.Response` object containing the extended conversation.
912
+
913
+ Note:
914
+ Uses the previous response's tools and output format. This base method wraps
915
+ around calling `client.call()` with a messages array derived from the response
916
+ messages. However, clients may override this with first-class resume logic.
917
+ """
918
+ messages = response.messages + [user(content)]
919
+ return self.call(
920
+ model_id=model_id,
921
+ messages=messages,
922
+ toolkit=response.toolkit,
923
+ format=response.format,
924
+ **params,
925
+ )
926
+
927
+ @overload
928
+ async def resume_async(
929
+ self,
930
+ *,
931
+ model_id: str,
932
+ response: AsyncResponse,
933
+ content: UserContent,
934
+ **params: Unpack[Params],
935
+ ) -> AsyncResponse:
936
+ """Resume an `llm.AsyncResponse` without a response format."""
937
+ ...
938
+
939
+ @overload
940
+ async def resume_async(
941
+ self,
942
+ *,
943
+ model_id: str,
944
+ response: AsyncResponse[FormattableT],
945
+ content: UserContent,
946
+ **params: Unpack[Params],
947
+ ) -> AsyncResponse[FormattableT]:
948
+ """Resume an `llm.AsyncResponse` with a response format."""
949
+ ...
950
+
951
+ @overload
952
+ async def resume_async(
953
+ self,
954
+ *,
955
+ model_id: str,
956
+ response: AsyncResponse | AsyncResponse[FormattableT],
957
+ content: UserContent,
958
+ **params: Unpack[Params],
959
+ ) -> AsyncResponse | AsyncResponse[FormattableT]:
960
+ """Resume an `llm.AsyncResponse` with an optional response format."""
961
+ ...
962
+
963
+ async def resume_async(
964
+ self,
965
+ *,
966
+ model_id: str,
967
+ response: AsyncResponse | AsyncResponse[FormattableT],
968
+ content: UserContent,
969
+ **params: Unpack[Params],
970
+ ) -> AsyncResponse | AsyncResponse[FormattableT]:
971
+ """Generate a new `llm.AsyncResponse` by extending another response's messages with additional user content.
972
+
973
+ Args:
974
+ model_id: Model identifier to use.
975
+ response: Previous async response to extend.
976
+ content: Additional user content to append.
977
+ **params: Additional parameters to configure output (e.g. temperature). See `llm.Params`.
978
+
979
+ Returns:
980
+ A new `llm.AsyncResponse` object containing the extended conversation.
981
+
982
+ Note:
983
+ Uses the previous response's tools and output format. This base method wraps
984
+ around calling `client.call_async()` with a messages array derived from the response
985
+ messages. However, clients may override this with first-class resume logic.
986
+ """
987
+ messages = response.messages + [user(content)]
988
+ return await self.call_async(
989
+ model_id=model_id,
990
+ messages=messages,
991
+ toolkit=response.toolkit,
992
+ format=response.format,
993
+ **params,
994
+ )
995
+
996
+ @overload
997
+ def context_resume(
998
+ self,
999
+ *,
1000
+ ctx: Context[DepsT],
1001
+ model_id: str,
1002
+ response: ContextResponse[DepsT, None],
1003
+ content: UserContent,
1004
+ **params: Unpack[Params],
1005
+ ) -> ContextResponse[DepsT, None]:
1006
+ """Resume an `llm.ContextResponse` without a response format."""
1007
+ ...
1008
+
1009
+ @overload
1010
+ def context_resume(
1011
+ self,
1012
+ *,
1013
+ ctx: Context[DepsT],
1014
+ model_id: str,
1015
+ response: ContextResponse[DepsT, FormattableT],
1016
+ content: UserContent,
1017
+ **params: Unpack[Params],
1018
+ ) -> ContextResponse[DepsT, FormattableT]:
1019
+ """Resume an `llm.ContextResponse` with a response format."""
1020
+ ...
1021
+
1022
+ @overload
1023
+ def context_resume(
1024
+ self,
1025
+ *,
1026
+ ctx: Context[DepsT],
1027
+ model_id: str,
1028
+ response: ContextResponse[DepsT, None] | ContextResponse[DepsT, FormattableT],
1029
+ content: UserContent,
1030
+ **params: Unpack[Params],
1031
+ ) -> ContextResponse[DepsT, None] | ContextResponse[DepsT, FormattableT]:
1032
+ """Resume an `llm.ContextResponse` with an optional response format."""
1033
+ ...
1034
+
1035
+ def context_resume(
1036
+ self,
1037
+ *,
1038
+ ctx: Context[DepsT],
1039
+ model_id: str,
1040
+ response: ContextResponse[DepsT, None] | ContextResponse[DepsT, FormattableT],
1041
+ content: UserContent,
1042
+ **params: Unpack[Params],
1043
+ ) -> ContextResponse[DepsT, None] | ContextResponse[DepsT, FormattableT]:
1044
+ """Generate a new `llm.ContextResponse` by extending another response's messages with additional user content.
1045
+
1046
+ Args:
1047
+ ctx: Context object with dependencies for tools.
1048
+ model_id: Model identifier to use.
1049
+ response: Previous context response to extend.
1050
+ content: Additional user content to append.
1051
+ **params: Additional parameters to configure output (e.g. temperature). See `llm.Params`.
1052
+
1053
+ Returns:
1054
+ A new `llm.ContextResponse` object containing the extended conversation.
1055
+
1056
+ Note:
1057
+ Uses the previous response's tools and output format. This base method wraps
1058
+ around calling `client.context_call()` with a messages array derived from the response
1059
+ messages. However, clients may override this with first-class resume logic.
1060
+ """
1061
+ messages = response.messages + [user(content)]
1062
+ return self.context_call(
1063
+ ctx=ctx,
1064
+ model_id=model_id,
1065
+ messages=messages,
1066
+ toolkit=response.toolkit,
1067
+ format=response.format,
1068
+ **params,
1069
+ )
1070
+
1071
+ @overload
1072
+ async def context_resume_async(
1073
+ self,
1074
+ *,
1075
+ ctx: Context[DepsT],
1076
+ model_id: str,
1077
+ response: AsyncContextResponse[DepsT, None],
1078
+ content: UserContent,
1079
+ **params: Unpack[Params],
1080
+ ) -> AsyncContextResponse[DepsT, None]:
1081
+ """Resume an `llm.AsyncContextResponse` without a response format."""
1082
+ ...
1083
+
1084
+ @overload
1085
+ async def context_resume_async(
1086
+ self,
1087
+ *,
1088
+ ctx: Context[DepsT],
1089
+ model_id: str,
1090
+ response: AsyncContextResponse[DepsT, FormattableT],
1091
+ content: UserContent,
1092
+ **params: Unpack[Params],
1093
+ ) -> AsyncContextResponse[DepsT, FormattableT]:
1094
+ """Resume an `llm.AsyncContextResponse` with a response format."""
1095
+ ...
1096
+
1097
+ @overload
1098
+ async def context_resume_async(
1099
+ self,
1100
+ *,
1101
+ ctx: Context[DepsT],
1102
+ model_id: str,
1103
+ response: AsyncContextResponse[DepsT, None]
1104
+ | AsyncContextResponse[DepsT, FormattableT],
1105
+ content: UserContent,
1106
+ **params: Unpack[Params],
1107
+ ) -> AsyncContextResponse[DepsT, None] | AsyncContextResponse[DepsT, FormattableT]:
1108
+ """Resume an `llm.AsyncContextResponse` with an optional response format."""
1109
+ ...
1110
+
1111
+ async def context_resume_async(
1112
+ self,
1113
+ *,
1114
+ ctx: Context[DepsT],
1115
+ model_id: str,
1116
+ response: AsyncContextResponse[DepsT, None]
1117
+ | AsyncContextResponse[DepsT, FormattableT],
1118
+ content: UserContent,
1119
+ **params: Unpack[Params],
1120
+ ) -> AsyncContextResponse[DepsT, None] | AsyncContextResponse[DepsT, FormattableT]:
1121
+ """Generate a new `llm.AsyncContextResponse` by extending another response's messages with additional user content.
1122
+
1123
+ Args:
1124
+ ctx: Context object with dependencies for tools.
1125
+ model_id: Model identifier to use.
1126
+ response: Previous async context response to extend.
1127
+ content: Additional user content to append.
1128
+ **params: Additional parameters to configure output (e.g. temperature). See `llm.Params`.
1129
+
1130
+ Returns:
1131
+ A new `llm.AsyncContextResponse` object containing the extended conversation.
1132
+
1133
+ Note:
1134
+ Uses the previous response's tools and output format. This base method wraps
1135
+ around calling `client.context_call_async()` with a messages array derived from the response
1136
+ messages. However, clients may override this with first-class resume logic.
1137
+ """
1138
+ messages = response.messages + [user(content)]
1139
+ return await self.context_call_async(
1140
+ ctx=ctx,
1141
+ model_id=model_id,
1142
+ messages=messages,
1143
+ toolkit=response.toolkit,
1144
+ format=response.format,
1145
+ **params,
1146
+ )
1147
+
1148
+ @overload
1149
+ def resume_stream(
1150
+ self,
1151
+ *,
1152
+ model_id: str,
1153
+ response: StreamResponse,
1154
+ content: UserContent,
1155
+ **params: Unpack[Params],
1156
+ ) -> StreamResponse:
1157
+ """Resume an `llm.StreamResponse` without a response format."""
1158
+ ...
1159
+
1160
+ @overload
1161
+ def resume_stream(
1162
+ self,
1163
+ *,
1164
+ model_id: str,
1165
+ response: StreamResponse[FormattableT],
1166
+ content: UserContent,
1167
+ **params: Unpack[Params],
1168
+ ) -> StreamResponse[FormattableT]:
1169
+ """Resume an `llm.StreamResponse` with a response format."""
1170
+ ...
1171
+
1172
+ @overload
1173
+ def resume_stream(
1174
+ self,
1175
+ *,
1176
+ model_id: str,
1177
+ response: StreamResponse | StreamResponse[FormattableT],
1178
+ content: UserContent,
1179
+ **params: Unpack[Params],
1180
+ ) -> StreamResponse | StreamResponse[FormattableT]:
1181
+ """Resume an `llm.StreamResponse` with an optional response format."""
1182
+ ...
1183
+
1184
+ def resume_stream(
1185
+ self,
1186
+ *,
1187
+ model_id: str,
1188
+ response: StreamResponse | StreamResponse[FormattableT],
1189
+ content: UserContent,
1190
+ **params: Unpack[Params],
1191
+ ) -> StreamResponse | StreamResponse[FormattableT]:
1192
+ """Generate a new `llm.StreamResponse` by extending another response's messages with additional user content.
1193
+
1194
+ Args:
1195
+ model_id: Model identifier to use.
1196
+ response: Previous stream response to extend.
1197
+ content: Additional user content to append.
1198
+ **params: Additional parameters to configure output (e.g. temperature). See `llm.Params`.
1199
+
1200
+ Returns:
1201
+ A new `llm.StreamResponse` object for streaming the extended conversation.
1202
+
1203
+ Note:
1204
+ Uses the previous response's tools and output format. This base method wraps
1205
+ around calling `client.stream()` with a messages array derived from the response
1206
+ messages. However, clients may override this with first-class resume logic.
1207
+ """
1208
+ messages = response.messages + [user(content)]
1209
+ return self.stream(
1210
+ model_id=model_id,
1211
+ messages=messages,
1212
+ toolkit=response.toolkit,
1213
+ format=response.format,
1214
+ **params,
1215
+ )
1216
+
1217
+ @overload
1218
+ async def resume_stream_async(
1219
+ self,
1220
+ *,
1221
+ model_id: str,
1222
+ response: AsyncStreamResponse,
1223
+ content: UserContent,
1224
+ **params: Unpack[Params],
1225
+ ) -> AsyncStreamResponse:
1226
+ """Resume an `llm.AsyncStreamResponse` without a response format."""
1227
+ ...
1228
+
1229
+ @overload
1230
+ async def resume_stream_async(
1231
+ self,
1232
+ *,
1233
+ model_id: str,
1234
+ response: AsyncStreamResponse[FormattableT],
1235
+ content: UserContent,
1236
+ **params: Unpack[Params],
1237
+ ) -> AsyncStreamResponse[FormattableT]:
1238
+ """Resume an `llm.AsyncStreamResponse` with a response format."""
1239
+ ...
1240
+
1241
+ @overload
1242
+ async def resume_stream_async(
1243
+ self,
1244
+ *,
1245
+ model_id: str,
1246
+ response: AsyncStreamResponse | AsyncStreamResponse[FormattableT],
1247
+ content: UserContent,
1248
+ **params: Unpack[Params],
1249
+ ) -> AsyncStreamResponse | AsyncStreamResponse[FormattableT]:
1250
+ """Resume an `llm.AsyncStreamResponse` with an optional response format."""
1251
+ ...
1252
+
1253
+ async def resume_stream_async(
1254
+ self,
1255
+ *,
1256
+ model_id: str,
1257
+ response: AsyncStreamResponse | AsyncStreamResponse[FormattableT],
1258
+ content: UserContent,
1259
+ **params: Unpack[Params],
1260
+ ) -> AsyncStreamResponse | AsyncStreamResponse[FormattableT]:
1261
+ """Generate a new `llm.AsyncStreamResponse` by extending another response's messages with additional user content.
1262
+
1263
+ Args:
1264
+ model_id: Model identifier to use.
1265
+ response: Previous async stream response to extend.
1266
+ content: Additional user content to append.
1267
+ **params: Additional parameters to configure output (e.g. temperature). See `llm.Params`.
1268
+
1269
+ Returns:
1270
+ A new `llm.AsyncStreamResponse` object for asynchronously streaming the extended conversation.
1271
+
1272
+ Note:
1273
+ Uses the previous response's tools and output format. This base method wraps
1274
+ around calling `client.stream_async()` with a messages array derived from the response
1275
+ messages. However, clients may override this with first-class resume logic.
1276
+ """
1277
+ messages = response.messages + [user(content)]
1278
+ return await self.stream_async(
1279
+ model_id=model_id,
1280
+ messages=messages,
1281
+ toolkit=response.toolkit,
1282
+ format=response.format,
1283
+ **params,
1284
+ )
1285
+
1286
+ @overload
1287
+ def context_resume_stream(
1288
+ self,
1289
+ *,
1290
+ ctx: Context[DepsT],
1291
+ model_id: str,
1292
+ response: ContextStreamResponse[DepsT, None],
1293
+ content: UserContent,
1294
+ **params: Unpack[Params],
1295
+ ) -> ContextStreamResponse[DepsT, None]:
1296
+ """Resume an `llm.ContextStreamResponse` without a response format."""
1297
+ ...
1298
+
1299
+ @overload
1300
+ def context_resume_stream(
1301
+ self,
1302
+ *,
1303
+ ctx: Context[DepsT],
1304
+ model_id: str,
1305
+ response: ContextStreamResponse[DepsT, FormattableT],
1306
+ content: UserContent,
1307
+ **params: Unpack[Params],
1308
+ ) -> ContextStreamResponse[DepsT, FormattableT]:
1309
+ """Resume an `llm.ContextStreamResponse` with a response format."""
1310
+ ...
1311
+
1312
+ @overload
1313
+ def context_resume_stream(
1314
+ self,
1315
+ *,
1316
+ ctx: Context[DepsT],
1317
+ model_id: str,
1318
+ response: ContextStreamResponse[DepsT, None]
1319
+ | ContextStreamResponse[DepsT, FormattableT],
1320
+ content: UserContent,
1321
+ **params: Unpack[Params],
1322
+ ) -> (
1323
+ ContextStreamResponse[DepsT, None] | ContextStreamResponse[DepsT, FormattableT]
1324
+ ):
1325
+ """Resume an `llm.ContextStreamResponse` with an optional response format."""
1326
+ ...
1327
+
1328
+ def context_resume_stream(
1329
+ self,
1330
+ *,
1331
+ ctx: Context[DepsT],
1332
+ model_id: str,
1333
+ response: ContextStreamResponse[DepsT, None]
1334
+ | ContextStreamResponse[DepsT, FormattableT],
1335
+ content: UserContent,
1336
+ **params: Unpack[Params],
1337
+ ) -> (
1338
+ ContextStreamResponse[DepsT, None] | ContextStreamResponse[DepsT, FormattableT]
1339
+ ):
1340
+ """Generate a new `llm.ContextStreamResponse` by extending another response's messages with additional user content.
1341
+
1342
+ Args:
1343
+ ctx: Context object with dependencies for tools.
1344
+ model_id: Model identifier to use.
1345
+ response: Previous context stream response to extend.
1346
+ content: Additional user content to append.
1347
+ **params: Additional parameters to configure output (e.g. temperature). See `llm.Params`.
1348
+
1349
+ Returns:
1350
+ A new `llm.ContextStreamResponse` object for streaming the extended conversation.
1351
+
1352
+ Note:
1353
+ Uses the previous response's tools and output format. This base method wraps
1354
+ around calling `client.context_stream()` with a messages array derived from the response
1355
+ messages. However, clients may override this with first-class resume logic.
1356
+ """
1357
+ messages = response.messages + [user(content)]
1358
+ return self.context_stream(
1359
+ ctx=ctx,
1360
+ model_id=model_id,
1361
+ messages=messages,
1362
+ toolkit=response.toolkit,
1363
+ format=response.format,
1364
+ **params,
1365
+ )
1366
+
1367
+ @overload
1368
+ async def context_resume_stream_async(
1369
+ self,
1370
+ *,
1371
+ ctx: Context[DepsT],
1372
+ model_id: str,
1373
+ response: AsyncContextStreamResponse[DepsT, None],
1374
+ content: UserContent,
1375
+ **params: Unpack[Params],
1376
+ ) -> AsyncContextStreamResponse[DepsT, None]:
1377
+ """Resume an `llm.AsyncContextStreamResponse` without a response format."""
1378
+ ...
1379
+
1380
+ @overload
1381
+ async def context_resume_stream_async(
1382
+ self,
1383
+ *,
1384
+ ctx: Context[DepsT],
1385
+ model_id: str,
1386
+ response: AsyncContextStreamResponse[DepsT, FormattableT],
1387
+ content: UserContent,
1388
+ **params: Unpack[Params],
1389
+ ) -> AsyncContextStreamResponse[DepsT, FormattableT]:
1390
+ """Resume an `llm.AsyncContextStreamResponse` with a response format."""
1391
+ ...
1392
+
1393
+ @overload
1394
+ async def context_resume_stream_async(
1395
+ self,
1396
+ *,
1397
+ ctx: Context[DepsT],
1398
+ model_id: str,
1399
+ response: AsyncContextStreamResponse[DepsT, None]
1400
+ | AsyncContextStreamResponse[DepsT, FormattableT],
1401
+ content: UserContent,
1402
+ **params: Unpack[Params],
1403
+ ) -> (
1404
+ AsyncContextStreamResponse[DepsT, None]
1405
+ | AsyncContextStreamResponse[DepsT, FormattableT]
1406
+ ):
1407
+ """Resume an `llm.AsyncContextStreamResponse` with an optional response format."""
1408
+ ...
1409
+
1410
+ async def context_resume_stream_async(
1411
+ self,
1412
+ *,
1413
+ ctx: Context[DepsT],
1414
+ model_id: str,
1415
+ response: AsyncContextStreamResponse[DepsT, None]
1416
+ | AsyncContextStreamResponse[DepsT, FormattableT],
1417
+ content: UserContent,
1418
+ **params: Unpack[Params],
1419
+ ) -> (
1420
+ AsyncContextStreamResponse[DepsT, None]
1421
+ | AsyncContextStreamResponse[DepsT, FormattableT]
1422
+ ):
1423
+ """Generate a new `llm.AsyncContextStreamResponse` by extending another response's messages with additional user content.
1424
+
1425
+ Args:
1426
+ ctx: Context object with dependencies for tools.
1427
+ model_id: Model identifier to use.
1428
+ response: Previous async context stream response to extend.
1429
+ content: Additional user content to append.
1430
+ **params: Additional parameters to configure output (e.g. temperature). See `llm.Params`.
1431
+
1432
+ Returns:
1433
+ A new `llm.AsyncContextStreamResponse` object for asynchronously streaming the extended conversation.
1434
+
1435
+ Note:
1436
+ Uses the previous response's tools and output format. This base method wraps
1437
+ around calling `client.context_stream_async()` with a messages array derived from the response
1438
+ messages. However, clients may override this with first-class resume logic.
1439
+ """
1440
+ messages = response.messages + [user(content)]
1441
+ return await self.context_stream_async(
1442
+ ctx=ctx,
1443
+ model_id=model_id,
1444
+ messages=messages,
1445
+ toolkit=response.toolkit,
1446
+ format=response.format,
1447
+ **params,
1448
+ )
1449
+
1450
+ @abstractmethod
1451
+ def get_error_status(self, e: Exception) -> int | None:
1452
+ """Extract HTTP status code from provider-specific exception.
1453
+
1454
+ Different SDKs store status codes differently (e.g., .status_code vs .code).
1455
+ Each provider implements this to handle their SDK's convention.
1456
+
1457
+ Args:
1458
+ e: The exception to extract status code from.
1459
+
1460
+ Returns:
1461
+ The HTTP status code if available, None otherwise.
1462
+ """
1463
+ ...