mirascope 2.0.0a5__py3-none-any.whl → 2.0.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (349) hide show
  1. mirascope/__init__.py +10 -1
  2. mirascope/_stubs.py +363 -0
  3. mirascope/api/__init__.py +8 -0
  4. mirascope/api/_generated/__init__.py +285 -2
  5. mirascope/api/_generated/annotations/__init__.py +33 -0
  6. mirascope/api/_generated/annotations/client.py +506 -0
  7. mirascope/api/_generated/annotations/raw_client.py +1414 -0
  8. mirascope/api/_generated/annotations/types/__init__.py +31 -0
  9. mirascope/api/_generated/annotations/types/annotations_create_request_label.py +5 -0
  10. mirascope/api/_generated/annotations/types/annotations_create_response.py +48 -0
  11. mirascope/api/_generated/annotations/types/annotations_create_response_label.py +5 -0
  12. mirascope/api/_generated/annotations/types/annotations_get_response.py +48 -0
  13. mirascope/api/_generated/annotations/types/annotations_get_response_label.py +5 -0
  14. mirascope/api/_generated/annotations/types/annotations_list_request_label.py +5 -0
  15. mirascope/api/_generated/annotations/types/annotations_list_response.py +21 -0
  16. mirascope/api/_generated/annotations/types/annotations_list_response_annotations_item.py +50 -0
  17. mirascope/api/_generated/annotations/types/annotations_list_response_annotations_item_label.py +5 -0
  18. mirascope/api/_generated/annotations/types/annotations_update_request_label.py +5 -0
  19. mirascope/api/_generated/annotations/types/annotations_update_response.py +48 -0
  20. mirascope/api/_generated/annotations/types/annotations_update_response_label.py +5 -0
  21. mirascope/api/_generated/api_keys/__init__.py +12 -2
  22. mirascope/api/_generated/api_keys/client.py +77 -0
  23. mirascope/api/_generated/api_keys/raw_client.py +422 -39
  24. mirascope/api/_generated/api_keys/types/__init__.py +7 -1
  25. mirascope/api/_generated/api_keys/types/api_keys_create_response.py +4 -12
  26. mirascope/api/_generated/api_keys/types/api_keys_get_response.py +4 -12
  27. mirascope/api/_generated/api_keys/types/api_keys_list_all_for_org_response_item.py +40 -0
  28. mirascope/api/_generated/api_keys/types/api_keys_list_response_item.py +4 -12
  29. mirascope/api/_generated/client.py +42 -0
  30. mirascope/api/_generated/core/client_wrapper.py +2 -14
  31. mirascope/api/_generated/core/datetime_utils.py +1 -3
  32. mirascope/api/_generated/core/file.py +2 -5
  33. mirascope/api/_generated/core/http_client.py +36 -112
  34. mirascope/api/_generated/core/jsonable_encoder.py +1 -3
  35. mirascope/api/_generated/core/pydantic_utilities.py +19 -74
  36. mirascope/api/_generated/core/query_encoder.py +1 -3
  37. mirascope/api/_generated/core/serialization.py +4 -10
  38. mirascope/api/_generated/docs/client.py +2 -6
  39. mirascope/api/_generated/docs/raw_client.py +51 -5
  40. mirascope/api/_generated/environment.py +3 -3
  41. mirascope/api/_generated/environments/__init__.py +6 -0
  42. mirascope/api/_generated/environments/client.py +117 -0
  43. mirascope/api/_generated/environments/raw_client.py +530 -51
  44. mirascope/api/_generated/environments/types/__init__.py +10 -0
  45. mirascope/api/_generated/environments/types/environments_create_response.py +1 -3
  46. mirascope/api/_generated/environments/types/environments_get_analytics_response.py +60 -0
  47. mirascope/api/_generated/environments/types/environments_get_analytics_response_top_functions_item.py +24 -0
  48. mirascope/api/_generated/environments/types/environments_get_analytics_response_top_models_item.py +22 -0
  49. mirascope/api/_generated/environments/types/environments_get_response.py +1 -3
  50. mirascope/api/_generated/environments/types/environments_list_response_item.py +1 -3
  51. mirascope/api/_generated/environments/types/environments_update_response.py +1 -3
  52. mirascope/api/_generated/errors/__init__.py +8 -0
  53. mirascope/api/_generated/errors/bad_request_error.py +1 -2
  54. mirascope/api/_generated/errors/conflict_error.py +1 -2
  55. mirascope/api/_generated/errors/forbidden_error.py +1 -5
  56. mirascope/api/_generated/errors/internal_server_error.py +1 -6
  57. mirascope/api/_generated/errors/not_found_error.py +1 -5
  58. mirascope/api/_generated/errors/payment_required_error.py +15 -0
  59. mirascope/api/_generated/errors/service_unavailable_error.py +14 -0
  60. mirascope/api/_generated/errors/too_many_requests_error.py +15 -0
  61. mirascope/api/_generated/errors/unauthorized_error.py +11 -0
  62. mirascope/api/_generated/functions/__init__.py +39 -0
  63. mirascope/api/_generated/functions/client.py +647 -0
  64. mirascope/api/_generated/functions/raw_client.py +1890 -0
  65. mirascope/api/_generated/functions/types/__init__.py +53 -0
  66. mirascope/api/_generated/functions/types/functions_create_request_dependencies_value.py +20 -0
  67. mirascope/api/_generated/functions/types/functions_create_response.py +37 -0
  68. mirascope/api/_generated/functions/types/functions_create_response_dependencies_value.py +20 -0
  69. mirascope/api/_generated/functions/types/functions_find_by_hash_response.py +39 -0
  70. mirascope/api/_generated/functions/types/functions_find_by_hash_response_dependencies_value.py +20 -0
  71. mirascope/api/_generated/functions/types/functions_get_by_env_response.py +53 -0
  72. mirascope/api/_generated/functions/types/functions_get_by_env_response_dependencies_value.py +22 -0
  73. mirascope/api/_generated/functions/types/functions_get_response.py +37 -0
  74. mirascope/api/_generated/functions/types/functions_get_response_dependencies_value.py +20 -0
  75. mirascope/api/_generated/functions/types/functions_list_by_env_response.py +25 -0
  76. mirascope/api/_generated/functions/types/functions_list_by_env_response_functions_item.py +56 -0
  77. mirascope/api/_generated/functions/types/functions_list_by_env_response_functions_item_dependencies_value.py +22 -0
  78. mirascope/api/_generated/functions/types/functions_list_response.py +21 -0
  79. mirascope/api/_generated/functions/types/functions_list_response_functions_item.py +41 -0
  80. mirascope/api/_generated/functions/types/functions_list_response_functions_item_dependencies_value.py +20 -0
  81. mirascope/api/_generated/health/client.py +2 -6
  82. mirascope/api/_generated/health/raw_client.py +51 -5
  83. mirascope/api/_generated/health/types/health_check_response.py +1 -3
  84. mirascope/api/_generated/organization_invitations/__init__.py +33 -0
  85. mirascope/api/_generated/organization_invitations/client.py +546 -0
  86. mirascope/api/_generated/organization_invitations/raw_client.py +1519 -0
  87. mirascope/api/_generated/organization_invitations/types/__init__.py +53 -0
  88. mirascope/api/_generated/organization_invitations/types/organization_invitations_accept_response.py +34 -0
  89. mirascope/api/_generated/organization_invitations/types/organization_invitations_accept_response_role.py +7 -0
  90. mirascope/api/_generated/organization_invitations/types/organization_invitations_create_request_role.py +7 -0
  91. mirascope/api/_generated/organization_invitations/types/organization_invitations_create_response.py +48 -0
  92. mirascope/api/_generated/organization_invitations/types/organization_invitations_create_response_role.py +7 -0
  93. mirascope/api/_generated/organization_invitations/types/organization_invitations_create_response_status.py +7 -0
  94. mirascope/api/_generated/organization_invitations/types/organization_invitations_get_response.py +48 -0
  95. mirascope/api/_generated/organization_invitations/types/organization_invitations_get_response_role.py +7 -0
  96. mirascope/api/_generated/organization_invitations/types/organization_invitations_get_response_status.py +7 -0
  97. mirascope/api/_generated/organization_invitations/types/organization_invitations_list_response_item.py +48 -0
  98. mirascope/api/_generated/organization_invitations/types/organization_invitations_list_response_item_role.py +7 -0
  99. mirascope/api/_generated/organization_invitations/types/organization_invitations_list_response_item_status.py +7 -0
  100. mirascope/api/_generated/organization_memberships/__init__.py +19 -0
  101. mirascope/api/_generated/organization_memberships/client.py +302 -0
  102. mirascope/api/_generated/organization_memberships/raw_client.py +736 -0
  103. mirascope/api/_generated/organization_memberships/types/__init__.py +27 -0
  104. mirascope/api/_generated/organization_memberships/types/organization_memberships_list_response_item.py +33 -0
  105. mirascope/api/_generated/organization_memberships/types/organization_memberships_list_response_item_role.py +7 -0
  106. mirascope/api/_generated/organization_memberships/types/organization_memberships_update_request_role.py +7 -0
  107. mirascope/api/_generated/organization_memberships/types/organization_memberships_update_response.py +31 -0
  108. mirascope/api/_generated/organization_memberships/types/organization_memberships_update_response_role.py +7 -0
  109. mirascope/api/_generated/organizations/__init__.py +26 -0
  110. mirascope/api/_generated/organizations/client.py +465 -0
  111. mirascope/api/_generated/organizations/raw_client.py +1799 -108
  112. mirascope/api/_generated/organizations/types/__init__.py +48 -0
  113. mirascope/api/_generated/organizations/types/organizations_create_payment_intent_response.py +24 -0
  114. mirascope/api/_generated/organizations/types/organizations_create_response.py +4 -3
  115. mirascope/api/_generated/organizations/types/organizations_create_response_role.py +1 -3
  116. mirascope/api/_generated/organizations/types/organizations_get_response.py +4 -3
  117. mirascope/api/_generated/organizations/types/organizations_get_response_role.py +1 -3
  118. mirascope/api/_generated/organizations/types/organizations_list_response_item.py +4 -3
  119. mirascope/api/_generated/organizations/types/organizations_list_response_item_role.py +1 -3
  120. mirascope/api/_generated/organizations/types/organizations_preview_subscription_change_request_target_plan.py +7 -0
  121. mirascope/api/_generated/organizations/types/organizations_preview_subscription_change_response.py +47 -0
  122. mirascope/api/_generated/organizations/types/organizations_preview_subscription_change_response_validation_errors_item.py +33 -0
  123. mirascope/api/_generated/organizations/types/organizations_preview_subscription_change_response_validation_errors_item_resource.py +7 -0
  124. mirascope/api/_generated/organizations/types/organizations_router_balance_response.py +24 -0
  125. mirascope/api/_generated/organizations/types/organizations_subscription_response.py +53 -0
  126. mirascope/api/_generated/organizations/types/organizations_subscription_response_current_plan.py +7 -0
  127. mirascope/api/_generated/organizations/types/organizations_subscription_response_payment_method.py +26 -0
  128. mirascope/api/_generated/organizations/types/organizations_subscription_response_scheduled_change.py +34 -0
  129. mirascope/api/_generated/organizations/types/organizations_subscription_response_scheduled_change_target_plan.py +7 -0
  130. mirascope/api/_generated/organizations/types/organizations_update_response.py +4 -3
  131. mirascope/api/_generated/organizations/types/organizations_update_response_role.py +1 -3
  132. mirascope/api/_generated/organizations/types/organizations_update_subscription_request_target_plan.py +7 -0
  133. mirascope/api/_generated/organizations/types/organizations_update_subscription_response.py +35 -0
  134. mirascope/api/_generated/project_memberships/__init__.py +25 -0
  135. mirascope/api/_generated/project_memberships/client.py +437 -0
  136. mirascope/api/_generated/project_memberships/raw_client.py +1039 -0
  137. mirascope/api/_generated/project_memberships/types/__init__.py +29 -0
  138. mirascope/api/_generated/project_memberships/types/project_memberships_create_request_role.py +7 -0
  139. mirascope/api/_generated/project_memberships/types/project_memberships_create_response.py +35 -0
  140. mirascope/api/_generated/project_memberships/types/project_memberships_create_response_role.py +7 -0
  141. mirascope/api/_generated/project_memberships/types/project_memberships_list_response_item.py +33 -0
  142. mirascope/api/_generated/project_memberships/types/project_memberships_list_response_item_role.py +7 -0
  143. mirascope/api/_generated/project_memberships/types/project_memberships_update_request_role.py +7 -0
  144. mirascope/api/_generated/project_memberships/types/project_memberships_update_response.py +35 -0
  145. mirascope/api/_generated/project_memberships/types/project_memberships_update_response_role.py +7 -0
  146. mirascope/api/_generated/projects/__init__.py +2 -12
  147. mirascope/api/_generated/projects/client.py +17 -71
  148. mirascope/api/_generated/projects/raw_client.py +295 -51
  149. mirascope/api/_generated/projects/types/__init__.py +1 -6
  150. mirascope/api/_generated/projects/types/projects_create_response.py +3 -9
  151. mirascope/api/_generated/projects/types/projects_get_response.py +3 -9
  152. mirascope/api/_generated/projects/types/projects_list_response_item.py +3 -9
  153. mirascope/api/_generated/projects/types/projects_update_response.py +3 -9
  154. mirascope/api/_generated/reference.md +3619 -182
  155. mirascope/api/_generated/tags/__init__.py +19 -0
  156. mirascope/api/_generated/tags/client.py +504 -0
  157. mirascope/api/_generated/tags/raw_client.py +1288 -0
  158. mirascope/api/_generated/tags/types/__init__.py +17 -0
  159. mirascope/api/_generated/tags/types/tags_create_response.py +41 -0
  160. mirascope/api/_generated/tags/types/tags_get_response.py +41 -0
  161. mirascope/api/_generated/tags/types/tags_list_response.py +23 -0
  162. mirascope/api/_generated/tags/types/tags_list_response_tags_item.py +41 -0
  163. mirascope/api/_generated/tags/types/tags_update_response.py +41 -0
  164. mirascope/api/_generated/token_cost/__init__.py +7 -0
  165. mirascope/api/_generated/token_cost/client.py +160 -0
  166. mirascope/api/_generated/token_cost/raw_client.py +264 -0
  167. mirascope/api/_generated/token_cost/types/__init__.py +8 -0
  168. mirascope/api/_generated/token_cost/types/token_cost_calculate_request_usage.py +54 -0
  169. mirascope/api/_generated/token_cost/types/token_cost_calculate_response.py +52 -0
  170. mirascope/api/_generated/traces/__init__.py +42 -0
  171. mirascope/api/_generated/traces/client.py +941 -0
  172. mirascope/api/_generated/traces/raw_client.py +2177 -23
  173. mirascope/api/_generated/traces/types/__init__.py +60 -0
  174. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item.py +4 -11
  175. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_resource.py +2 -6
  176. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_resource_attributes_item.py +1 -3
  177. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_resource_attributes_item_value.py +8 -24
  178. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_resource_attributes_item_value_array_value.py +2 -6
  179. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_resource_attributes_item_value_kvlist_value.py +3 -9
  180. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_resource_attributes_item_value_kvlist_value_values_item.py +2 -6
  181. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item.py +3 -9
  182. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_scope.py +4 -8
  183. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_scope_attributes_item.py +2 -6
  184. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_scope_attributes_item_value.py +8 -24
  185. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_scope_attributes_item_value_array_value.py +2 -6
  186. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_scope_attributes_item_value_kvlist_value.py +3 -9
  187. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_scope_attributes_item_value_kvlist_value_values_item.py +1 -3
  188. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item.py +6 -18
  189. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item_attributes_item.py +3 -9
  190. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item_attributes_item_value.py +8 -24
  191. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item_attributes_item_value_array_value.py +2 -6
  192. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item_attributes_item_value_kvlist_value.py +2 -6
  193. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item_attributes_item_value_kvlist_value_values_item.py +1 -3
  194. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item_status.py +2 -6
  195. mirascope/api/_generated/traces/types/traces_create_response.py +2 -5
  196. mirascope/api/_generated/traces/types/traces_create_response_partial_success.py +3 -9
  197. mirascope/api/_generated/traces/types/traces_get_analytics_summary_response.py +60 -0
  198. mirascope/api/_generated/traces/types/traces_get_analytics_summary_response_top_functions_item.py +24 -0
  199. mirascope/api/_generated/traces/types/traces_get_analytics_summary_response_top_models_item.py +22 -0
  200. mirascope/api/_generated/traces/types/traces_get_trace_detail_by_env_response.py +33 -0
  201. mirascope/api/_generated/traces/types/traces_get_trace_detail_by_env_response_spans_item.py +88 -0
  202. mirascope/api/_generated/traces/types/traces_get_trace_detail_response.py +33 -0
  203. mirascope/api/_generated/traces/types/traces_get_trace_detail_response_spans_item.py +88 -0
  204. mirascope/api/_generated/traces/types/traces_list_by_function_hash_response.py +25 -0
  205. mirascope/api/_generated/traces/types/traces_list_by_function_hash_response_traces_item.py +44 -0
  206. mirascope/api/_generated/traces/types/traces_search_by_env_request_attribute_filters_item.py +26 -0
  207. mirascope/api/_generated/traces/types/traces_search_by_env_request_attribute_filters_item_operator.py +7 -0
  208. mirascope/api/_generated/traces/types/traces_search_by_env_request_sort_by.py +7 -0
  209. mirascope/api/_generated/traces/types/traces_search_by_env_request_sort_order.py +7 -0
  210. mirascope/api/_generated/traces/types/traces_search_by_env_response.py +26 -0
  211. mirascope/api/_generated/traces/types/traces_search_by_env_response_spans_item.py +50 -0
  212. mirascope/api/_generated/traces/types/traces_search_request_attribute_filters_item.py +26 -0
  213. mirascope/api/_generated/traces/types/traces_search_request_attribute_filters_item_operator.py +7 -0
  214. mirascope/api/_generated/traces/types/traces_search_request_sort_by.py +7 -0
  215. mirascope/api/_generated/traces/types/traces_search_request_sort_order.py +5 -0
  216. mirascope/api/_generated/traces/types/traces_search_response.py +26 -0
  217. mirascope/api/_generated/traces/types/traces_search_response_spans_item.py +50 -0
  218. mirascope/api/_generated/types/__init__.py +48 -0
  219. mirascope/api/_generated/types/already_exists_error.py +1 -3
  220. mirascope/api/_generated/types/bad_request_error_body.py +50 -0
  221. mirascope/api/_generated/types/click_house_error.py +22 -0
  222. mirascope/api/_generated/types/database_error.py +1 -3
  223. mirascope/api/_generated/types/date.py +3 -0
  224. mirascope/api/_generated/types/http_api_decode_error.py +1 -3
  225. mirascope/api/_generated/types/immutable_resource_error.py +22 -0
  226. mirascope/api/_generated/types/internal_server_error_body.py +49 -0
  227. mirascope/api/_generated/types/issue.py +1 -3
  228. mirascope/api/_generated/types/issue_tag.py +1 -8
  229. mirascope/api/_generated/types/not_found_error_body.py +1 -3
  230. mirascope/api/_generated/types/number_from_string.py +3 -0
  231. mirascope/api/_generated/types/permission_denied_error.py +1 -3
  232. mirascope/api/_generated/types/permission_denied_error_tag.py +1 -3
  233. mirascope/api/_generated/types/plan_limit_exceeded_error.py +32 -0
  234. mirascope/api/_generated/types/plan_limit_exceeded_error_tag.py +7 -0
  235. mirascope/api/_generated/types/pricing_unavailable_error.py +23 -0
  236. mirascope/api/_generated/types/property_key_key.py +1 -3
  237. mirascope/api/_generated/types/rate_limit_error.py +31 -0
  238. mirascope/api/_generated/types/rate_limit_error_tag.py +5 -0
  239. mirascope/api/_generated/types/service_unavailable_error_body.py +24 -0
  240. mirascope/api/_generated/types/service_unavailable_error_tag.py +7 -0
  241. mirascope/api/_generated/types/stripe_error.py +20 -0
  242. mirascope/api/_generated/types/subscription_past_due_error.py +31 -0
  243. mirascope/api/_generated/types/subscription_past_due_error_tag.py +7 -0
  244. mirascope/api/_generated/types/unauthorized_error_body.py +21 -0
  245. mirascope/api/_generated/types/unauthorized_error_tag.py +5 -0
  246. mirascope/api/settings.py +19 -1
  247. mirascope/llm/__init__.py +55 -8
  248. mirascope/llm/calls/__init__.py +2 -1
  249. mirascope/llm/calls/calls.py +3 -1
  250. mirascope/llm/calls/decorator.py +21 -7
  251. mirascope/llm/content/tool_call.py +6 -0
  252. mirascope/llm/content/tool_output.py +22 -5
  253. mirascope/llm/exceptions.py +284 -71
  254. mirascope/llm/formatting/__init__.py +19 -2
  255. mirascope/llm/formatting/format.py +219 -30
  256. mirascope/llm/formatting/output_parser.py +178 -0
  257. mirascope/llm/formatting/partial.py +80 -7
  258. mirascope/llm/formatting/primitives.py +192 -0
  259. mirascope/llm/formatting/types.py +21 -64
  260. mirascope/llm/mcp/__init__.py +2 -2
  261. mirascope/llm/mcp/mcp_client.py +130 -0
  262. mirascope/llm/messages/__init__.py +3 -0
  263. mirascope/llm/messages/_utils.py +34 -0
  264. mirascope/llm/models/__init__.py +5 -0
  265. mirascope/llm/models/models.py +137 -69
  266. mirascope/llm/{providers/base → models}/params.py +16 -37
  267. mirascope/llm/models/thinking_config.py +61 -0
  268. mirascope/llm/prompts/_utils.py +0 -32
  269. mirascope/llm/prompts/decorator.py +16 -5
  270. mirascope/llm/prompts/prompts.py +131 -68
  271. mirascope/llm/providers/__init__.py +18 -2
  272. mirascope/llm/providers/anthropic/__init__.py +3 -21
  273. mirascope/llm/providers/anthropic/_utils/__init__.py +2 -0
  274. mirascope/llm/providers/anthropic/_utils/beta_decode.py +22 -11
  275. mirascope/llm/providers/anthropic/_utils/beta_encode.py +75 -25
  276. mirascope/llm/providers/anthropic/_utils/decode.py +22 -11
  277. mirascope/llm/providers/anthropic/_utils/encode.py +82 -20
  278. mirascope/llm/providers/anthropic/_utils/errors.py +2 -2
  279. mirascope/llm/providers/anthropic/beta_provider.py +64 -18
  280. mirascope/llm/providers/anthropic/provider.py +91 -33
  281. mirascope/llm/providers/base/__init__.py +0 -2
  282. mirascope/llm/providers/base/_utils.py +55 -11
  283. mirascope/llm/providers/base/base_provider.py +116 -37
  284. mirascope/llm/providers/google/__init__.py +2 -17
  285. mirascope/llm/providers/google/_utils/__init__.py +2 -0
  286. mirascope/llm/providers/google/_utils/decode.py +37 -15
  287. mirascope/llm/providers/google/_utils/encode.py +127 -19
  288. mirascope/llm/providers/google/_utils/errors.py +3 -2
  289. mirascope/llm/providers/google/model_info.py +1 -0
  290. mirascope/llm/providers/google/provider.py +68 -19
  291. mirascope/llm/providers/mirascope/__init__.py +5 -0
  292. mirascope/llm/providers/mirascope/_utils.py +73 -0
  293. mirascope/llm/providers/mirascope/provider.py +349 -0
  294. mirascope/llm/providers/mlx/__init__.py +2 -17
  295. mirascope/llm/providers/mlx/_utils.py +8 -3
  296. mirascope/llm/providers/mlx/encoding/base.py +5 -2
  297. mirascope/llm/providers/mlx/encoding/transformers.py +5 -2
  298. mirascope/llm/providers/mlx/mlx.py +23 -6
  299. mirascope/llm/providers/mlx/provider.py +42 -13
  300. mirascope/llm/providers/ollama/__init__.py +1 -13
  301. mirascope/llm/providers/openai/_utils/errors.py +2 -2
  302. mirascope/llm/providers/openai/completions/__init__.py +2 -20
  303. mirascope/llm/providers/openai/completions/_utils/decode.py +14 -3
  304. mirascope/llm/providers/openai/completions/_utils/encode.py +35 -28
  305. mirascope/llm/providers/openai/completions/base_provider.py +40 -11
  306. mirascope/llm/providers/openai/provider.py +40 -10
  307. mirascope/llm/providers/openai/responses/__init__.py +1 -17
  308. mirascope/llm/providers/openai/responses/_utils/__init__.py +2 -0
  309. mirascope/llm/providers/openai/responses/_utils/decode.py +21 -8
  310. mirascope/llm/providers/openai/responses/_utils/encode.py +59 -19
  311. mirascope/llm/providers/openai/responses/provider.py +56 -18
  312. mirascope/llm/providers/provider_id.py +1 -0
  313. mirascope/llm/providers/provider_registry.py +96 -19
  314. mirascope/llm/providers/together/__init__.py +1 -13
  315. mirascope/llm/responses/__init__.py +6 -1
  316. mirascope/llm/responses/_utils.py +102 -12
  317. mirascope/llm/responses/base_response.py +5 -2
  318. mirascope/llm/responses/base_stream_response.py +139 -45
  319. mirascope/llm/responses/response.py +2 -1
  320. mirascope/llm/responses/root_response.py +89 -17
  321. mirascope/llm/responses/stream_response.py +6 -9
  322. mirascope/llm/tools/decorator.py +17 -8
  323. mirascope/llm/tools/tool_schema.py +43 -10
  324. mirascope/llm/tools/toolkit.py +35 -27
  325. mirascope/llm/tools/tools.py +123 -30
  326. mirascope/ops/__init__.py +64 -109
  327. mirascope/ops/_internal/configuration.py +82 -31
  328. mirascope/ops/_internal/exporters/exporters.py +64 -11
  329. mirascope/ops/_internal/instrumentation/llm/common.py +530 -0
  330. mirascope/ops/_internal/instrumentation/llm/cost.py +190 -0
  331. mirascope/ops/_internal/instrumentation/llm/encode.py +1 -1
  332. mirascope/ops/_internal/instrumentation/llm/llm.py +116 -1243
  333. mirascope/ops/_internal/instrumentation/llm/model.py +1798 -0
  334. mirascope/ops/_internal/instrumentation/llm/response.py +521 -0
  335. mirascope/ops/_internal/instrumentation/llm/serialize.py +300 -0
  336. mirascope/ops/_internal/protocols.py +83 -1
  337. mirascope/ops/_internal/traced_calls.py +4 -0
  338. mirascope/ops/_internal/traced_functions.py +141 -12
  339. mirascope/ops/_internal/tracing.py +78 -1
  340. mirascope/ops/_internal/utils.py +52 -4
  341. mirascope/ops/_internal/versioned_functions.py +54 -43
  342. {mirascope-2.0.0a5.dist-info → mirascope-2.0.1.dist-info}/METADATA +14 -13
  343. mirascope-2.0.1.dist-info/RECORD +423 -0
  344. {mirascope-2.0.0a5.dist-info → mirascope-2.0.1.dist-info}/licenses/LICENSE +1 -1
  345. mirascope/llm/formatting/_utils.py +0 -78
  346. mirascope/llm/mcp/client.py +0 -118
  347. mirascope/llm/providers/_missing_import_stubs.py +0 -49
  348. mirascope-2.0.0a5.dist-info/RECORD +0 -265
  349. {mirascope-2.0.0a5.dist-info → mirascope-2.0.1.dist-info}/WHEEL +0 -0
@@ -16,12 +16,12 @@ from openai import (
16
16
  )
17
17
 
18
18
  from ....exceptions import (
19
- APIError,
20
19
  AuthenticationError,
21
20
  BadRequestError,
22
21
  ConnectionError,
23
22
  NotFoundError,
24
23
  PermissionError,
24
+ ProviderError,
25
25
  RateLimitError,
26
26
  ResponseValidationError,
27
27
  ServerError,
@@ -42,5 +42,5 @@ OPENAI_ERROR_MAP: ProviderErrorMap = {
42
42
  OpenAIAPITimeoutError: TimeoutError,
43
43
  OpenAIAPIConnectionError: ConnectionError,
44
44
  OpenAIAPIResponseValidationError: ResponseValidationError,
45
- OpenAIError: APIError, # Catch-all for unknown OpenAI errors
45
+ OpenAIError: ProviderError, # Catch-all for unknown OpenAI errors
46
46
  }
@@ -1,23 +1,5 @@
1
- from typing import TYPE_CHECKING
2
-
3
- if TYPE_CHECKING:
4
- from .base_provider import BaseOpenAICompletionsProvider
5
- from .provider import OpenAICompletionsProvider
6
- else:
7
- try:
8
- from .base_provider import BaseOpenAICompletionsProvider
9
- from .provider import OpenAICompletionsProvider
10
- except ImportError: # pragma: no cover
11
- from ..._missing_import_stubs import (
12
- create_provider_stub,
13
- )
14
-
15
- BaseOpenAICompletionsProvider = create_provider_stub(
16
- "openai", "BaseOpenAICompletionsProvider"
17
- )
18
- OpenAICompletionsProvider = create_provider_stub(
19
- "openai", "OpenAICompletionsProvider"
20
- )
1
+ from .base_provider import BaseOpenAICompletionsProvider
2
+ from .provider import OpenAICompletionsProvider
21
3
 
22
4
  __all__ = [
23
5
  "BaseOpenAICompletionsProvider",
@@ -117,6 +117,7 @@ class _OpenAIChunkProcessor:
117
117
  def __init__(self) -> None:
118
118
  self.current_content_type: Literal["text", "tool_call"] | None = None
119
119
  self.current_tool_index: int | None = None
120
+ self.current_tool_id: str | None = None
120
121
  self.refusal_encountered = False
121
122
 
122
123
  def process_chunk(self, chunk: openai_types.ChatCompletionChunk) -> ChunkIterator:
@@ -180,7 +181,9 @@ class _OpenAIChunkProcessor:
180
181
  self.current_tool_index is not None
181
182
  and self.current_tool_index < index
182
183
  ):
183
- yield ToolCallEndChunk()
184
+ if self.current_tool_id is None: # pragma: no cover
185
+ raise RuntimeError("No current_tool_id for ToolCallChunk")
186
+ yield ToolCallEndChunk(id=self.current_tool_id)
184
187
  self.current_tool_index = None
185
188
 
186
189
  if self.current_tool_index is None:
@@ -201,15 +204,23 @@ class _OpenAIChunkProcessor:
201
204
  id=tool_id,
202
205
  name=name,
203
206
  )
207
+ self.current_tool_id = tool_id
204
208
 
205
209
  if tool_call_delta.function and tool_call_delta.function.arguments:
206
- yield ToolCallChunk(delta=tool_call_delta.function.arguments)
210
+ if self.current_tool_id is None: # pragma: no cover
211
+ raise RuntimeError("No current_tool_id for ToolCallChunk")
212
+ yield ToolCallChunk(
213
+ id=self.current_tool_id,
214
+ delta=tool_call_delta.function.arguments,
215
+ )
207
216
 
208
217
  if choice.finish_reason:
209
218
  if self.current_content_type == "text":
210
219
  yield TextEndChunk()
211
220
  elif self.current_content_type == "tool_call":
212
- yield ToolCallEndChunk()
221
+ if self.current_tool_id is None: # pragma: no cover
222
+ raise RuntimeError("No current_tool_id for ToolCallChunk")
223
+ yield ToolCallEndChunk(id=self.current_tool_id)
213
224
  elif self.current_content_type is not None: # pragma: no cover
214
225
  raise NotImplementedError()
215
226
 
@@ -1,26 +1,20 @@
1
1
  """OpenAI completions message encoding and request preparation."""
2
2
 
3
+ from __future__ import annotations
4
+
3
5
  from collections.abc import Sequence
4
6
  from functools import lru_cache
5
- from typing import TypedDict, cast
7
+ from typing import TYPE_CHECKING, TypedDict, cast
6
8
 
7
9
  from openai import Omit
8
10
  from openai.types import chat as openai_types, shared_params as shared_openai_types
9
11
  from openai.types.shared_params.response_format_json_schema import JSONSchema
10
12
 
11
- from .....exceptions import (
12
- FeatureNotSupportedError,
13
- FormattingModeNotSupportedError,
14
- )
15
- from .....formatting import (
16
- Format,
17
- FormattableT,
18
- _utils as _formatting_utils,
19
- resolve_format,
20
- )
13
+ from .....exceptions import FeatureNotSupportedError
14
+ from .....formatting import Format, FormattableT, OutputParser, resolve_format
21
15
  from .....messages import AssistantMessage, Message, UserMessage
22
16
  from .....tools import FORMAT_TOOL_NAME, AnyToolSchema, BaseToolkit
23
- from ....base import Params, _utils as _base_utils
17
+ from ....base import _utils as _base_utils
24
18
  from ...model_id import OpenAIModelId, model_name
25
19
  from ...model_info import (
26
20
  MODELS_WITHOUT_AUDIO_SUPPORT,
@@ -28,6 +22,9 @@ from ...model_info import (
28
22
  MODELS_WITHOUT_JSON_SCHEMA_SUPPORT,
29
23
  )
30
24
 
25
+ if TYPE_CHECKING:
26
+ from .....models import Params
27
+
31
28
 
32
29
  class ChatCompletionCreateKwargs(TypedDict, total=False):
33
30
  """Kwargs for OpenAI ChatCompletion.create method."""
@@ -130,7 +127,7 @@ def _encode_user_message(
130
127
  result.append(
131
128
  openai_types.ChatCompletionToolMessageParam(
132
129
  role="tool",
133
- content=str(part.value),
130
+ content=str(part.result),
134
131
  tool_call_id=part.id,
135
132
  )
136
133
  )
@@ -144,7 +141,7 @@ def _encode_user_message(
144
141
 
145
142
 
146
143
  def _encode_assistant_message(
147
- message: AssistantMessage, model_id: OpenAIModelId, encode_thoughts: bool
144
+ message: AssistantMessage, model_id: OpenAIModelId, encode_thoughts_as_text: bool
148
145
  ) -> openai_types.ChatCompletionAssistantMessageParam:
149
146
  """Convert Mirascope `AssistantMessage` to OpenAI `ChatCompletionAssistantMessageParam`."""
150
147
 
@@ -153,7 +150,7 @@ def _encode_assistant_message(
153
150
  and message.provider_model_name
154
151
  == model_name(model_id=model_id, api_mode="completions")
155
152
  and message.raw_message
156
- and not encode_thoughts
153
+ and not encode_thoughts_as_text
157
154
  ):
158
155
  return cast(
159
156
  openai_types.ChatCompletionAssistantMessageParam, message.raw_message
@@ -177,7 +174,7 @@ def _encode_assistant_message(
177
174
  )
178
175
  )
179
176
  elif part.type == "thought":
180
- if encode_thoughts:
177
+ if encode_thoughts_as_text:
181
178
  text_params.append(
182
179
  openai_types.ChatCompletionContentPartTextParam(
183
180
  text="**Thinking:** " + part.thought, type="text"
@@ -203,7 +200,7 @@ def _encode_assistant_message(
203
200
 
204
201
 
205
202
  def _encode_message(
206
- message: Message, model_id: OpenAIModelId, encode_thoughts: bool
203
+ message: Message, model_id: OpenAIModelId, encode_thoughts_as_text: bool
207
204
  ) -> list[openai_types.ChatCompletionMessageParam]:
208
205
  """Convert a Mirascope `Message` to OpenAI `ChatCompletionMessageParam` format.
209
206
 
@@ -224,7 +221,7 @@ def _encode_message(
224
221
  elif message.role == "user":
225
222
  return _encode_user_message(message, model_id)
226
223
  elif message.role == "assistant":
227
- return [_encode_assistant_message(message, model_id, encode_thoughts)]
224
+ return [_encode_assistant_message(message, model_id, encode_thoughts_as_text)]
228
225
  else:
229
226
  raise ValueError(f"Unsupported role: {message.role}") # pragma: no cover
230
227
 
@@ -237,13 +234,16 @@ def _convert_tool_to_tool_param(
237
234
  schema_dict = tool.parameters.model_dump(by_alias=True, exclude_none=True)
238
235
  schema_dict["type"] = "object"
239
236
  _base_utils.ensure_additional_properties_false(schema_dict)
237
+ strict = True if tool.strict is None else tool.strict
238
+ if strict:
239
+ _base_utils.ensure_all_properties_required(schema_dict)
240
240
  return openai_types.ChatCompletionToolParam(
241
241
  type="function",
242
242
  function={
243
243
  "name": tool.name,
244
244
  "description": tool.description,
245
245
  "parameters": schema_dict,
246
- "strict": tool.strict,
246
+ "strict": strict,
247
247
  },
248
248
  )
249
249
 
@@ -281,7 +281,10 @@ def encode_request(
281
281
  model_id: OpenAIModelId,
282
282
  messages: Sequence[Message],
283
283
  tools: Sequence[AnyToolSchema] | BaseToolkit[AnyToolSchema] | None,
284
- format: type[FormattableT] | Format[FormattableT] | None,
284
+ format: type[FormattableT]
285
+ | Format[FormattableT]
286
+ | OutputParser[FormattableT]
287
+ | None,
285
288
  params: Params,
286
289
  ) -> tuple[Sequence[Message], Format[FormattableT] | None, ChatCompletionCreateKwargs]:
287
290
  """Prepares a request for the `OpenAI.chat.completions.create` method."""
@@ -299,12 +302,12 @@ def encode_request(
299
302
  "model": base_model_name,
300
303
  }
301
304
  )
302
- encode_thoughts = False
305
+ encode_thoughts_as_text = False
303
306
 
304
307
  with _base_utils.ensure_all_params_accessed(
305
308
  params=params,
306
309
  provider_id="openai",
307
- unsupported_params=["top_k", "thinking"],
310
+ unsupported_params=["top_k"],
308
311
  ) as param_accessor:
309
312
  if param_accessor.temperature is not None:
310
313
  kwargs["temperature"] = param_accessor.temperature
@@ -317,8 +320,10 @@ def encode_request(
317
320
  kwargs["seed"] = param_accessor.seed
318
321
  if param_accessor.stop_sequences is not None:
319
322
  kwargs["stop"] = param_accessor.stop_sequences
320
- if param_accessor.encode_thoughts_as_text is not None:
321
- encode_thoughts = True
323
+ if param_accessor.thinking is not None:
324
+ thinking = param_accessor.thinking
325
+ if thinking.get("encode_thoughts_as_text"):
326
+ encode_thoughts_as_text = True
322
327
 
323
328
  tools = tools.tools if isinstance(tools, BaseToolkit) else tools or []
324
329
 
@@ -330,8 +335,8 @@ def encode_request(
330
335
  if format is not None:
331
336
  if format.mode == "strict":
332
337
  if not model_supports_strict:
333
- raise FormattingModeNotSupportedError(
334
- formatting_mode="strict",
338
+ raise FeatureNotSupportedError(
339
+ feature="formatting_mode:strict",
335
340
  provider_id="openai",
336
341
  model_id=model_id,
337
342
  )
@@ -345,7 +350,7 @@ def encode_request(
345
350
  "function": {"name": FORMAT_TOOL_NAME},
346
351
  }
347
352
  kwargs["parallel_tool_calls"] = False
348
- format_tool_schema = _formatting_utils.create_tool_schema(format)
353
+ format_tool_schema = format.create_tool_schema()
349
354
  openai_tools.append(_convert_tool_to_tool_param(format_tool_schema))
350
355
  elif (
351
356
  format.mode == "json"
@@ -363,7 +368,9 @@ def encode_request(
363
368
 
364
369
  encoded_messages: list[openai_types.ChatCompletionMessageParam] = []
365
370
  for message in messages:
366
- encoded_messages.extend(_encode_message(message, model_id, encode_thoughts))
371
+ encoded_messages.extend(
372
+ _encode_message(message, model_id, encode_thoughts_as_text)
373
+ )
367
374
  kwargs["messages"] = encoded_messages
368
375
 
369
376
  return messages, format, kwargs
@@ -1,14 +1,16 @@
1
1
  """Base class for OpenAI Completions-compatible providers."""
2
2
 
3
+ from __future__ import annotations
4
+
3
5
  import os
4
6
  from collections.abc import Sequence
5
- from typing import ClassVar
7
+ from typing import TYPE_CHECKING, ClassVar
6
8
  from typing_extensions import Unpack
7
9
 
8
10
  from openai import AsyncOpenAI, OpenAI
9
11
 
10
12
  from ....context import Context, DepsT
11
- from ....formatting import Format, FormattableT
13
+ from ....formatting import Format, FormattableT, OutputParser
12
14
  from ....messages import Message
13
15
  from ....responses import (
14
16
  AsyncContextResponse,
@@ -30,11 +32,14 @@ from ....tools import (
30
32
  Tool,
31
33
  Toolkit,
32
34
  )
33
- from ...base import BaseProvider, Params
35
+ from ...base import BaseProvider
34
36
  from .. import _utils as _shared_utils
35
37
  from ..model_id import model_name as openai_model_name
36
38
  from . import _utils
37
39
 
40
+ if TYPE_CHECKING:
41
+ from ....models import Params
42
+
38
43
 
39
44
  class BaseOpenAICompletionsProvider(BaseProvider[OpenAI]):
40
45
  """Base class for providers that use OpenAI Completions-compatible APIs."""
@@ -97,7 +102,10 @@ class BaseOpenAICompletionsProvider(BaseProvider[OpenAI]):
97
102
  model_id: str,
98
103
  messages: Sequence[Message],
99
104
  tools: Sequence[Tool] | Toolkit | None = None,
100
- format: type[FormattableT] | Format[FormattableT] | None = None,
105
+ format: type[FormattableT]
106
+ | Format[FormattableT]
107
+ | OutputParser[FormattableT]
108
+ | None = None,
101
109
  **params: Unpack[Params],
102
110
  ) -> Response | Response[FormattableT]:
103
111
  """Generate an `llm.Response` by synchronously calling the API.
@@ -152,7 +160,10 @@ class BaseOpenAICompletionsProvider(BaseProvider[OpenAI]):
152
160
  tools: Sequence[Tool | ContextTool[DepsT]]
153
161
  | ContextToolkit[DepsT]
154
162
  | None = None,
155
- format: type[FormattableT] | Format[FormattableT] | None = None,
163
+ format: type[FormattableT]
164
+ | Format[FormattableT]
165
+ | OutputParser[FormattableT]
166
+ | None = None,
156
167
  **params: Unpack[Params],
157
168
  ) -> ContextResponse[DepsT, None] | ContextResponse[DepsT, FormattableT]:
158
169
  """Generate an `llm.ContextResponse` by synchronously calling the API.
@@ -205,7 +216,10 @@ class BaseOpenAICompletionsProvider(BaseProvider[OpenAI]):
205
216
  model_id: str,
206
217
  messages: Sequence[Message],
207
218
  tools: Sequence[AsyncTool] | AsyncToolkit | None = None,
208
- format: type[FormattableT] | Format[FormattableT] | None = None,
219
+ format: type[FormattableT]
220
+ | Format[FormattableT]
221
+ | OutputParser[FormattableT]
222
+ | None = None,
209
223
  **params: Unpack[Params],
210
224
  ) -> AsyncResponse | AsyncResponse[FormattableT]:
211
225
  """Generate an `llm.AsyncResponse` by asynchronously calling the API.
@@ -260,7 +274,10 @@ class BaseOpenAICompletionsProvider(BaseProvider[OpenAI]):
260
274
  tools: Sequence[AsyncTool | AsyncContextTool[DepsT]]
261
275
  | AsyncContextToolkit[DepsT]
262
276
  | None = None,
263
- format: type[FormattableT] | Format[FormattableT] | None = None,
277
+ format: type[FormattableT]
278
+ | Format[FormattableT]
279
+ | OutputParser[FormattableT]
280
+ | None = None,
264
281
  **params: Unpack[Params],
265
282
  ) -> AsyncContextResponse[DepsT, None] | AsyncContextResponse[DepsT, FormattableT]:
266
283
  """Generate an `llm.AsyncContextResponse` by asynchronously calling the API.
@@ -313,7 +330,10 @@ class BaseOpenAICompletionsProvider(BaseProvider[OpenAI]):
313
330
  model_id: str,
314
331
  messages: Sequence[Message],
315
332
  tools: Sequence[Tool] | Toolkit | None = None,
316
- format: type[FormattableT] | Format[FormattableT] | None = None,
333
+ format: type[FormattableT]
334
+ | Format[FormattableT]
335
+ | OutputParser[FormattableT]
336
+ | None = None,
317
337
  **params: Unpack[Params],
318
338
  ) -> StreamResponse | StreamResponse[FormattableT]:
319
339
  """Generate an `llm.StreamResponse` by synchronously streaming from the API.
@@ -364,7 +384,10 @@ class BaseOpenAICompletionsProvider(BaseProvider[OpenAI]):
364
384
  tools: Sequence[Tool | ContextTool[DepsT]]
365
385
  | ContextToolkit[DepsT]
366
386
  | None = None,
367
- format: type[FormattableT] | Format[FormattableT] | None = None,
387
+ format: type[FormattableT]
388
+ | Format[FormattableT]
389
+ | OutputParser[FormattableT]
390
+ | None = None,
368
391
  **params: Unpack[Params],
369
392
  ) -> ContextStreamResponse[DepsT] | ContextStreamResponse[DepsT, FormattableT]:
370
393
  """Generate an `llm.ContextStreamResponse` by synchronously streaming from the API.
@@ -414,7 +437,10 @@ class BaseOpenAICompletionsProvider(BaseProvider[OpenAI]):
414
437
  model_id: str,
415
438
  messages: Sequence[Message],
416
439
  tools: Sequence[AsyncTool] | AsyncToolkit | None = None,
417
- format: type[FormattableT] | Format[FormattableT] | None = None,
440
+ format: type[FormattableT]
441
+ | Format[FormattableT]
442
+ | OutputParser[FormattableT]
443
+ | None = None,
418
444
  **params: Unpack[Params],
419
445
  ) -> AsyncStreamResponse | AsyncStreamResponse[FormattableT]:
420
446
  """Generate an `llm.AsyncStreamResponse` by asynchronously streaming from the API.
@@ -465,7 +491,10 @@ class BaseOpenAICompletionsProvider(BaseProvider[OpenAI]):
465
491
  tools: Sequence[AsyncTool | AsyncContextTool[DepsT]]
466
492
  | AsyncContextToolkit[DepsT]
467
493
  | None = None,
468
- format: type[FormattableT] | Format[FormattableT] | None = None,
494
+ format: type[FormattableT]
495
+ | Format[FormattableT]
496
+ | OutputParser[FormattableT]
497
+ | None = None,
469
498
  **params: Unpack[Params],
470
499
  ) -> (
471
500
  AsyncContextStreamResponse[DepsT]
@@ -1,13 +1,16 @@
1
1
  """Unified OpenAI client implementation."""
2
2
 
3
+ from __future__ import annotations
4
+
3
5
  from collections.abc import Sequence
6
+ from typing import TYPE_CHECKING
4
7
  from typing_extensions import Unpack
5
8
 
6
9
  from openai import BadRequestError as OpenAIBadRequestError, OpenAI
7
10
 
8
11
  from ...context import Context, DepsT
9
12
  from ...exceptions import BadRequestError, NotFoundError
10
- from ...formatting import Format, FormattableT
13
+ from ...formatting import Format, FormattableT, OutputParser
11
14
  from ...messages import Message
12
15
  from ...responses import (
13
16
  AsyncContextResponse,
@@ -29,12 +32,15 @@ from ...tools import (
29
32
  Tool,
30
33
  Toolkit,
31
34
  )
32
- from ..base import BaseProvider, Params
35
+ from ..base import BaseProvider
33
36
  from . import _utils
34
37
  from .completions import OpenAICompletionsProvider
35
38
  from .model_id import OPENAI_KNOWN_MODELS, OpenAIModelId
36
39
  from .responses import OpenAIResponsesProvider
37
40
 
41
+ if TYPE_CHECKING:
42
+ from ...models import Params
43
+
38
44
 
39
45
  def _has_audio_content(messages: Sequence[Message]) -> bool:
40
46
  """Returns whether a sequence of messages contains any audio content."""
@@ -157,7 +163,10 @@ class OpenAIProvider(BaseProvider[OpenAI]):
157
163
  model_id: OpenAIModelId,
158
164
  messages: Sequence[Message],
159
165
  tools: Sequence[Tool] | Toolkit | None = None,
160
- format: type[FormattableT] | Format[FormattableT] | None = None,
166
+ format: type[FormattableT]
167
+ | Format[FormattableT]
168
+ | OutputParser[FormattableT]
169
+ | None = None,
161
170
  **params: Unpack[Params],
162
171
  ) -> Response | Response[FormattableT]:
163
172
  """Generate an `llm.Response` by synchronously calling the OpenAI API.
@@ -190,7 +199,10 @@ class OpenAIProvider(BaseProvider[OpenAI]):
190
199
  tools: Sequence[Tool | ContextTool[DepsT]]
191
200
  | ContextToolkit[DepsT]
192
201
  | None = None,
193
- format: type[FormattableT] | Format[FormattableT] | None = None,
202
+ format: type[FormattableT]
203
+ | Format[FormattableT]
204
+ | OutputParser[FormattableT]
205
+ | None = None,
194
206
  **params: Unpack[Params],
195
207
  ) -> ContextResponse[DepsT, None] | ContextResponse[DepsT, FormattableT]:
196
208
  """Generate an `llm.ContextResponse` by synchronously calling the OpenAI API.
@@ -222,7 +234,10 @@ class OpenAIProvider(BaseProvider[OpenAI]):
222
234
  model_id: OpenAIModelId,
223
235
  messages: Sequence[Message],
224
236
  tools: Sequence[AsyncTool] | AsyncToolkit | None = None,
225
- format: type[FormattableT] | Format[FormattableT] | None = None,
237
+ format: type[FormattableT]
238
+ | Format[FormattableT]
239
+ | OutputParser[FormattableT]
240
+ | None = None,
226
241
  **params: Unpack[Params],
227
242
  ) -> AsyncResponse | AsyncResponse[FormattableT]:
228
243
  """Generate an `llm.AsyncResponse` by asynchronously calling the OpenAI API.
@@ -254,7 +269,10 @@ class OpenAIProvider(BaseProvider[OpenAI]):
254
269
  tools: Sequence[AsyncTool | AsyncContextTool[DepsT]]
255
270
  | AsyncContextToolkit[DepsT]
256
271
  | None = None,
257
- format: type[FormattableT] | Format[FormattableT] | None = None,
272
+ format: type[FormattableT]
273
+ | Format[FormattableT]
274
+ | OutputParser[FormattableT]
275
+ | None = None,
258
276
  **params: Unpack[Params],
259
277
  ) -> AsyncContextResponse[DepsT, None] | AsyncContextResponse[DepsT, FormattableT]:
260
278
  """Generate an `llm.AsyncContextResponse` by asynchronously calling the OpenAI API.
@@ -285,7 +303,10 @@ class OpenAIProvider(BaseProvider[OpenAI]):
285
303
  model_id: OpenAIModelId,
286
304
  messages: Sequence[Message],
287
305
  tools: Sequence[Tool] | Toolkit | None = None,
288
- format: type[FormattableT] | Format[FormattableT] | None = None,
306
+ format: type[FormattableT]
307
+ | Format[FormattableT]
308
+ | OutputParser[FormattableT]
309
+ | None = None,
289
310
  **params: Unpack[Params],
290
311
  ) -> StreamResponse | StreamResponse[FormattableT]:
291
312
  """Generate an `llm.StreamResponse` by synchronously streaming from the OpenAI API.
@@ -318,7 +339,10 @@ class OpenAIProvider(BaseProvider[OpenAI]):
318
339
  tools: Sequence[Tool | ContextTool[DepsT]]
319
340
  | ContextToolkit[DepsT]
320
341
  | None = None,
321
- format: type[FormattableT] | Format[FormattableT] | None = None,
342
+ format: type[FormattableT]
343
+ | Format[FormattableT]
344
+ | OutputParser[FormattableT]
345
+ | None = None,
322
346
  **params: Unpack[Params],
323
347
  ) -> ContextStreamResponse[DepsT] | ContextStreamResponse[DepsT, FormattableT]:
324
348
  """Generate an `llm.ContextStreamResponse` by synchronously streaming from the OpenAI API.
@@ -350,7 +374,10 @@ class OpenAIProvider(BaseProvider[OpenAI]):
350
374
  model_id: OpenAIModelId,
351
375
  messages: Sequence[Message],
352
376
  tools: Sequence[AsyncTool] | AsyncToolkit | None = None,
353
- format: type[FormattableT] | Format[FormattableT] | None = None,
377
+ format: type[FormattableT]
378
+ | Format[FormattableT]
379
+ | OutputParser[FormattableT]
380
+ | None = None,
354
381
  **params: Unpack[Params],
355
382
  ) -> AsyncStreamResponse | AsyncStreamResponse[FormattableT]:
356
383
  """Generate an `llm.AsyncStreamResponse` by asynchronously streaming from the OpenAI API.
@@ -382,7 +409,10 @@ class OpenAIProvider(BaseProvider[OpenAI]):
382
409
  tools: Sequence[AsyncTool | AsyncContextTool[DepsT]]
383
410
  | AsyncContextToolkit[DepsT]
384
411
  | None = None,
385
- format: type[FormattableT] | Format[FormattableT] | None = None,
412
+ format: type[FormattableT]
413
+ | Format[FormattableT]
414
+ | OutputParser[FormattableT]
415
+ | None = None,
386
416
  **params: Unpack[Params],
387
417
  ) -> (
388
418
  AsyncContextStreamResponse[DepsT]
@@ -1,20 +1,4 @@
1
- from typing import TYPE_CHECKING
2
-
3
- if TYPE_CHECKING:
4
- from .provider import OpenAIResponsesProvider
5
- else:
6
- try:
7
- from .provider import OpenAIResponsesProvider
8
- except ImportError: # pragma: no cover
9
- from ..._missing_import_stubs import (
10
- create_import_error_stub,
11
- create_provider_stub,
12
- )
13
-
14
- OpenAIResponsesProvider = create_provider_stub(
15
- "openai", "OpenAIResponsesProvider"
16
- )
17
-
1
+ from .provider import OpenAIResponsesProvider
18
2
 
19
3
  __all__ = [
20
4
  "OpenAIResponsesProvider",
@@ -1,3 +1,4 @@
1
+ from ....base._utils import get_include_thoughts
1
2
  from .decode import (
2
3
  decode_async_stream,
3
4
  decode_response,
@@ -10,4 +11,5 @@ __all__ = [
10
11
  "decode_response",
11
12
  "decode_stream",
12
13
  "encode_request",
14
+ "get_include_thoughts",
13
15
  ]
@@ -78,6 +78,8 @@ def decode_response(
78
78
  response: openai_types.Response,
79
79
  model_id: OpenAIModelId,
80
80
  provider_id: str,
81
+ *,
82
+ include_thoughts: bool,
81
83
  ) -> tuple[AssistantMessage, FinishReason | None, Usage | None]:
82
84
  """Convert OpenAI Responses Response to mirascope AssistantMessage and usage."""
83
85
  parts: list[AssistantContentPart] = []
@@ -114,6 +116,9 @@ def decode_response(
114
116
  else:
115
117
  raise NotImplementedError(f"Unsupported output item: {output_item.type}")
116
118
 
119
+ if not include_thoughts:
120
+ parts = [part for part in parts if part.type != "thought"]
121
+
117
122
  if refused:
118
123
  finish_reason = FinishReason.REFUSAL
119
124
  elif details := response.incomplete_details:
@@ -136,9 +141,10 @@ def decode_response(
136
141
  class _OpenAIResponsesChunkProcessor:
137
142
  """Processes OpenAI Responses streaming events and maintains state across chunks."""
138
143
 
139
- def __init__(self) -> None:
144
+ def __init__(self, *, include_thoughts: bool) -> None:
140
145
  self.current_content_type: Literal["text", "tool_call", "thought"] | None = None
141
146
  self.refusal_encountered = False
147
+ self.include_thoughts = include_thoughts
142
148
 
143
149
  def process_chunk(self, event: ResponseStreamEvent) -> ChunkIterator:
144
150
  """Process a single OpenAI Responses stream event and yield the appropriate content chunks."""
@@ -173,23 +179,26 @@ class _OpenAIResponsesChunkProcessor:
173
179
  )
174
180
  self.current_content_type = "tool_call"
175
181
  elif event.type == "response.function_call_arguments.delta":
176
- yield ToolCallChunk(delta=event.delta)
182
+ yield ToolCallChunk(id=self.current_tool_call_id, delta=event.delta)
177
183
  elif event.type == "response.function_call_arguments.done":
178
- yield ToolCallEndChunk()
184
+ yield ToolCallEndChunk(id=self.current_tool_call_id)
179
185
  self.current_content_type = None
180
186
  elif (
181
187
  event.type == "response.reasoning_text.delta"
182
188
  or event.type == "response.reasoning_summary_text.delta"
183
189
  ):
184
190
  if not self.current_content_type:
185
- yield ThoughtStartChunk()
191
+ if self.include_thoughts:
192
+ yield ThoughtStartChunk()
186
193
  self.current_content_type = "thought"
187
- yield ThoughtChunk(delta=event.delta)
194
+ if self.include_thoughts:
195
+ yield ThoughtChunk(delta=event.delta)
188
196
  elif (
189
197
  event.type == "response.reasoning_summary_text.done"
190
198
  or event.type == "response.reasoning_text.done"
191
199
  ):
192
- yield ThoughtEndChunk()
200
+ if self.include_thoughts:
201
+ yield ThoughtEndChunk()
193
202
  self.current_content_type = None
194
203
  elif event.type == "response.incomplete":
195
204
  details = event.response.incomplete_details
@@ -230,18 +239,22 @@ class _OpenAIResponsesChunkProcessor:
230
239
 
231
240
  def decode_stream(
232
241
  openai_stream: Stream[ResponseStreamEvent],
242
+ *,
243
+ include_thoughts: bool,
233
244
  ) -> ChunkIterator:
234
245
  """Returns a ChunkIterator converted from an OpenAI Stream[ResponseStreamEvent]"""
235
- processor = _OpenAIResponsesChunkProcessor()
246
+ processor = _OpenAIResponsesChunkProcessor(include_thoughts=include_thoughts)
236
247
  for event in openai_stream:
237
248
  yield from processor.process_chunk(event)
238
249
 
239
250
 
240
251
  async def decode_async_stream(
241
252
  openai_stream: AsyncStream[ResponseStreamEvent],
253
+ *,
254
+ include_thoughts: bool,
242
255
  ) -> AsyncChunkIterator:
243
256
  """Returns an AsyncChunkIterator converted from an OpenAI AsyncStream[ResponseStreamEvent]"""
244
- processor = _OpenAIResponsesChunkProcessor()
257
+ processor = _OpenAIResponsesChunkProcessor(include_thoughts=include_thoughts)
245
258
  async for event in openai_stream:
246
259
  for item in processor.process_chunk(event):
247
260
  yield item