mirascope 2.0.0__py3-none-any.whl → 2.0.0a0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (442) hide show
  1. mirascope/__init__.py +2 -11
  2. mirascope/graphs/__init__.py +22 -0
  3. mirascope/graphs/finite_state_machine.py +625 -0
  4. mirascope/llm/__init__.py +16 -101
  5. mirascope/llm/agents/__init__.py +15 -0
  6. mirascope/llm/agents/agent.py +97 -0
  7. mirascope/llm/agents/agent_template.py +45 -0
  8. mirascope/llm/agents/decorator.py +176 -0
  9. mirascope/llm/calls/__init__.py +1 -2
  10. mirascope/llm/calls/base_call.py +33 -0
  11. mirascope/llm/calls/calls.py +58 -84
  12. mirascope/llm/calls/decorator.py +120 -140
  13. mirascope/llm/clients/__init__.py +34 -0
  14. mirascope/llm/clients/anthropic/__init__.py +11 -0
  15. mirascope/llm/{providers/openai/completions → clients/anthropic}/_utils/__init__.py +0 -2
  16. mirascope/llm/{providers → clients}/anthropic/_utils/decode.py +22 -66
  17. mirascope/llm/clients/anthropic/_utils/encode.py +243 -0
  18. mirascope/llm/clients/anthropic/clients.py +819 -0
  19. mirascope/llm/clients/anthropic/model_ids.py +8 -0
  20. mirascope/llm/{providers → clients}/base/__init__.py +5 -4
  21. mirascope/llm/{providers → clients}/base/_utils.py +17 -78
  22. mirascope/llm/{providers/base/base_provider.py → clients/base/client.py} +145 -468
  23. mirascope/llm/{models → clients/base}/params.py +37 -16
  24. mirascope/llm/clients/google/__init__.py +6 -0
  25. mirascope/llm/{providers/openai/responses → clients/google}/_utils/__init__.py +0 -2
  26. mirascope/llm/{providers → clients}/google/_utils/decode.py +22 -98
  27. mirascope/llm/{providers → clients}/google/_utils/encode.py +46 -168
  28. mirascope/llm/clients/google/clients.py +853 -0
  29. mirascope/llm/clients/google/model_ids.py +15 -0
  30. mirascope/llm/clients/openai/__init__.py +25 -0
  31. mirascope/llm/clients/openai/completions/__init__.py +9 -0
  32. mirascope/llm/{providers/google → clients/openai/completions}/_utils/__init__.py +0 -4
  33. mirascope/llm/{providers → clients}/openai/completions/_utils/decode.py +9 -74
  34. mirascope/llm/{providers → clients}/openai/completions/_utils/encode.py +52 -70
  35. mirascope/llm/clients/openai/completions/_utils/model_features.py +81 -0
  36. mirascope/llm/clients/openai/completions/clients.py +833 -0
  37. mirascope/llm/clients/openai/completions/model_ids.py +8 -0
  38. mirascope/llm/clients/openai/responses/__init__.py +9 -0
  39. mirascope/llm/clients/openai/responses/_utils/__init__.py +13 -0
  40. mirascope/llm/{providers → clients}/openai/responses/_utils/decode.py +14 -80
  41. mirascope/llm/{providers → clients}/openai/responses/_utils/encode.py +41 -92
  42. mirascope/llm/clients/openai/responses/_utils/model_features.py +87 -0
  43. mirascope/llm/clients/openai/responses/clients.py +832 -0
  44. mirascope/llm/clients/openai/responses/model_ids.py +8 -0
  45. mirascope/llm/clients/openai/shared/__init__.py +7 -0
  46. mirascope/llm/clients/openai/shared/_utils.py +55 -0
  47. mirascope/llm/clients/providers.py +175 -0
  48. mirascope/llm/content/__init__.py +2 -3
  49. mirascope/llm/content/tool_call.py +0 -6
  50. mirascope/llm/content/tool_output.py +5 -22
  51. mirascope/llm/context/_utils.py +6 -19
  52. mirascope/llm/exceptions.py +43 -298
  53. mirascope/llm/formatting/__init__.py +2 -19
  54. mirascope/llm/formatting/_utils.py +74 -0
  55. mirascope/llm/formatting/format.py +30 -219
  56. mirascope/llm/formatting/from_call_args.py +2 -2
  57. mirascope/llm/formatting/partial.py +7 -80
  58. mirascope/llm/formatting/types.py +64 -21
  59. mirascope/llm/mcp/__init__.py +2 -2
  60. mirascope/llm/mcp/client.py +118 -0
  61. mirascope/llm/messages/__init__.py +0 -3
  62. mirascope/llm/messages/message.py +5 -13
  63. mirascope/llm/models/__init__.py +2 -7
  64. mirascope/llm/models/models.py +139 -315
  65. mirascope/llm/prompts/__init__.py +12 -13
  66. mirascope/llm/prompts/_utils.py +43 -14
  67. mirascope/llm/prompts/decorator.py +204 -144
  68. mirascope/llm/prompts/protocols.py +59 -25
  69. mirascope/llm/responses/__init__.py +1 -9
  70. mirascope/llm/responses/_utils.py +12 -102
  71. mirascope/llm/responses/base_response.py +6 -18
  72. mirascope/llm/responses/base_stream_response.py +50 -173
  73. mirascope/llm/responses/finish_reason.py +0 -1
  74. mirascope/llm/responses/response.py +13 -34
  75. mirascope/llm/responses/root_response.py +29 -100
  76. mirascope/llm/responses/stream_response.py +31 -40
  77. mirascope/llm/tools/__init__.py +2 -9
  78. mirascope/llm/tools/_utils.py +3 -12
  79. mirascope/llm/tools/decorator.py +16 -25
  80. mirascope/llm/tools/protocols.py +4 -4
  81. mirascope/llm/tools/tool_schema.py +19 -87
  82. mirascope/llm/tools/toolkit.py +27 -35
  83. mirascope/llm/tools/tools.py +41 -135
  84. {mirascope-2.0.0.dist-info → mirascope-2.0.0a0.dist-info}/METADATA +9 -95
  85. mirascope-2.0.0a0.dist-info/RECORD +101 -0
  86. {mirascope-2.0.0.dist-info → mirascope-2.0.0a0.dist-info}/WHEEL +1 -1
  87. {mirascope-2.0.0.dist-info → mirascope-2.0.0a0.dist-info}/licenses/LICENSE +1 -1
  88. mirascope/_stubs.py +0 -363
  89. mirascope/api/__init__.py +0 -14
  90. mirascope/api/_generated/README.md +0 -207
  91. mirascope/api/_generated/__init__.py +0 -440
  92. mirascope/api/_generated/annotations/__init__.py +0 -33
  93. mirascope/api/_generated/annotations/client.py +0 -506
  94. mirascope/api/_generated/annotations/raw_client.py +0 -1414
  95. mirascope/api/_generated/annotations/types/__init__.py +0 -31
  96. mirascope/api/_generated/annotations/types/annotations_create_request_label.py +0 -5
  97. mirascope/api/_generated/annotations/types/annotations_create_response.py +0 -48
  98. mirascope/api/_generated/annotations/types/annotations_create_response_label.py +0 -5
  99. mirascope/api/_generated/annotations/types/annotations_get_response.py +0 -48
  100. mirascope/api/_generated/annotations/types/annotations_get_response_label.py +0 -5
  101. mirascope/api/_generated/annotations/types/annotations_list_request_label.py +0 -5
  102. mirascope/api/_generated/annotations/types/annotations_list_response.py +0 -21
  103. mirascope/api/_generated/annotations/types/annotations_list_response_annotations_item.py +0 -50
  104. mirascope/api/_generated/annotations/types/annotations_list_response_annotations_item_label.py +0 -5
  105. mirascope/api/_generated/annotations/types/annotations_update_request_label.py +0 -5
  106. mirascope/api/_generated/annotations/types/annotations_update_response.py +0 -48
  107. mirascope/api/_generated/annotations/types/annotations_update_response_label.py +0 -5
  108. mirascope/api/_generated/api_keys/__init__.py +0 -17
  109. mirascope/api/_generated/api_keys/client.py +0 -530
  110. mirascope/api/_generated/api_keys/raw_client.py +0 -1236
  111. mirascope/api/_generated/api_keys/types/__init__.py +0 -15
  112. mirascope/api/_generated/api_keys/types/api_keys_create_response.py +0 -28
  113. mirascope/api/_generated/api_keys/types/api_keys_get_response.py +0 -27
  114. mirascope/api/_generated/api_keys/types/api_keys_list_all_for_org_response_item.py +0 -40
  115. mirascope/api/_generated/api_keys/types/api_keys_list_response_item.py +0 -27
  116. mirascope/api/_generated/client.py +0 -211
  117. mirascope/api/_generated/core/__init__.py +0 -52
  118. mirascope/api/_generated/core/api_error.py +0 -23
  119. mirascope/api/_generated/core/client_wrapper.py +0 -46
  120. mirascope/api/_generated/core/datetime_utils.py +0 -28
  121. mirascope/api/_generated/core/file.py +0 -67
  122. mirascope/api/_generated/core/force_multipart.py +0 -16
  123. mirascope/api/_generated/core/http_client.py +0 -543
  124. mirascope/api/_generated/core/http_response.py +0 -55
  125. mirascope/api/_generated/core/jsonable_encoder.py +0 -100
  126. mirascope/api/_generated/core/pydantic_utilities.py +0 -255
  127. mirascope/api/_generated/core/query_encoder.py +0 -58
  128. mirascope/api/_generated/core/remove_none_from_dict.py +0 -11
  129. mirascope/api/_generated/core/request_options.py +0 -35
  130. mirascope/api/_generated/core/serialization.py +0 -276
  131. mirascope/api/_generated/docs/__init__.py +0 -4
  132. mirascope/api/_generated/docs/client.py +0 -91
  133. mirascope/api/_generated/docs/raw_client.py +0 -178
  134. mirascope/api/_generated/environment.py +0 -9
  135. mirascope/api/_generated/environments/__init__.py +0 -23
  136. mirascope/api/_generated/environments/client.py +0 -649
  137. mirascope/api/_generated/environments/raw_client.py +0 -1567
  138. mirascope/api/_generated/environments/types/__init__.py +0 -25
  139. mirascope/api/_generated/environments/types/environments_create_response.py +0 -24
  140. mirascope/api/_generated/environments/types/environments_get_analytics_response.py +0 -60
  141. mirascope/api/_generated/environments/types/environments_get_analytics_response_top_functions_item.py +0 -24
  142. mirascope/api/_generated/environments/types/environments_get_analytics_response_top_models_item.py +0 -22
  143. mirascope/api/_generated/environments/types/environments_get_response.py +0 -24
  144. mirascope/api/_generated/environments/types/environments_list_response_item.py +0 -24
  145. mirascope/api/_generated/environments/types/environments_update_response.py +0 -24
  146. mirascope/api/_generated/errors/__init__.py +0 -25
  147. mirascope/api/_generated/errors/bad_request_error.py +0 -14
  148. mirascope/api/_generated/errors/conflict_error.py +0 -14
  149. mirascope/api/_generated/errors/forbidden_error.py +0 -11
  150. mirascope/api/_generated/errors/internal_server_error.py +0 -10
  151. mirascope/api/_generated/errors/not_found_error.py +0 -11
  152. mirascope/api/_generated/errors/payment_required_error.py +0 -15
  153. mirascope/api/_generated/errors/service_unavailable_error.py +0 -14
  154. mirascope/api/_generated/errors/too_many_requests_error.py +0 -15
  155. mirascope/api/_generated/errors/unauthorized_error.py +0 -11
  156. mirascope/api/_generated/functions/__init__.py +0 -39
  157. mirascope/api/_generated/functions/client.py +0 -647
  158. mirascope/api/_generated/functions/raw_client.py +0 -1890
  159. mirascope/api/_generated/functions/types/__init__.py +0 -53
  160. mirascope/api/_generated/functions/types/functions_create_request_dependencies_value.py +0 -20
  161. mirascope/api/_generated/functions/types/functions_create_response.py +0 -37
  162. mirascope/api/_generated/functions/types/functions_create_response_dependencies_value.py +0 -20
  163. mirascope/api/_generated/functions/types/functions_find_by_hash_response.py +0 -39
  164. mirascope/api/_generated/functions/types/functions_find_by_hash_response_dependencies_value.py +0 -20
  165. mirascope/api/_generated/functions/types/functions_get_by_env_response.py +0 -53
  166. mirascope/api/_generated/functions/types/functions_get_by_env_response_dependencies_value.py +0 -22
  167. mirascope/api/_generated/functions/types/functions_get_response.py +0 -37
  168. mirascope/api/_generated/functions/types/functions_get_response_dependencies_value.py +0 -20
  169. mirascope/api/_generated/functions/types/functions_list_by_env_response.py +0 -25
  170. mirascope/api/_generated/functions/types/functions_list_by_env_response_functions_item.py +0 -56
  171. mirascope/api/_generated/functions/types/functions_list_by_env_response_functions_item_dependencies_value.py +0 -22
  172. mirascope/api/_generated/functions/types/functions_list_response.py +0 -21
  173. mirascope/api/_generated/functions/types/functions_list_response_functions_item.py +0 -41
  174. mirascope/api/_generated/functions/types/functions_list_response_functions_item_dependencies_value.py +0 -20
  175. mirascope/api/_generated/health/__init__.py +0 -7
  176. mirascope/api/_generated/health/client.py +0 -92
  177. mirascope/api/_generated/health/raw_client.py +0 -175
  178. mirascope/api/_generated/health/types/__init__.py +0 -8
  179. mirascope/api/_generated/health/types/health_check_response.py +0 -22
  180. mirascope/api/_generated/health/types/health_check_response_status.py +0 -5
  181. mirascope/api/_generated/organization_invitations/__init__.py +0 -33
  182. mirascope/api/_generated/organization_invitations/client.py +0 -546
  183. mirascope/api/_generated/organization_invitations/raw_client.py +0 -1519
  184. mirascope/api/_generated/organization_invitations/types/__init__.py +0 -53
  185. mirascope/api/_generated/organization_invitations/types/organization_invitations_accept_response.py +0 -34
  186. mirascope/api/_generated/organization_invitations/types/organization_invitations_accept_response_role.py +0 -7
  187. mirascope/api/_generated/organization_invitations/types/organization_invitations_create_request_role.py +0 -7
  188. mirascope/api/_generated/organization_invitations/types/organization_invitations_create_response.py +0 -48
  189. mirascope/api/_generated/organization_invitations/types/organization_invitations_create_response_role.py +0 -7
  190. mirascope/api/_generated/organization_invitations/types/organization_invitations_create_response_status.py +0 -7
  191. mirascope/api/_generated/organization_invitations/types/organization_invitations_get_response.py +0 -48
  192. mirascope/api/_generated/organization_invitations/types/organization_invitations_get_response_role.py +0 -7
  193. mirascope/api/_generated/organization_invitations/types/organization_invitations_get_response_status.py +0 -7
  194. mirascope/api/_generated/organization_invitations/types/organization_invitations_list_response_item.py +0 -48
  195. mirascope/api/_generated/organization_invitations/types/organization_invitations_list_response_item_role.py +0 -7
  196. mirascope/api/_generated/organization_invitations/types/organization_invitations_list_response_item_status.py +0 -7
  197. mirascope/api/_generated/organization_memberships/__init__.py +0 -19
  198. mirascope/api/_generated/organization_memberships/client.py +0 -302
  199. mirascope/api/_generated/organization_memberships/raw_client.py +0 -736
  200. mirascope/api/_generated/organization_memberships/types/__init__.py +0 -27
  201. mirascope/api/_generated/organization_memberships/types/organization_memberships_list_response_item.py +0 -33
  202. mirascope/api/_generated/organization_memberships/types/organization_memberships_list_response_item_role.py +0 -7
  203. mirascope/api/_generated/organization_memberships/types/organization_memberships_update_request_role.py +0 -7
  204. mirascope/api/_generated/organization_memberships/types/organization_memberships_update_response.py +0 -31
  205. mirascope/api/_generated/organization_memberships/types/organization_memberships_update_response_role.py +0 -7
  206. mirascope/api/_generated/organizations/__init__.py +0 -51
  207. mirascope/api/_generated/organizations/client.py +0 -869
  208. mirascope/api/_generated/organizations/raw_client.py +0 -2593
  209. mirascope/api/_generated/organizations/types/__init__.py +0 -71
  210. mirascope/api/_generated/organizations/types/organizations_create_payment_intent_response.py +0 -24
  211. mirascope/api/_generated/organizations/types/organizations_create_response.py +0 -26
  212. mirascope/api/_generated/organizations/types/organizations_create_response_role.py +0 -5
  213. mirascope/api/_generated/organizations/types/organizations_get_response.py +0 -26
  214. mirascope/api/_generated/organizations/types/organizations_get_response_role.py +0 -5
  215. mirascope/api/_generated/organizations/types/organizations_list_response_item.py +0 -26
  216. mirascope/api/_generated/organizations/types/organizations_list_response_item_role.py +0 -5
  217. mirascope/api/_generated/organizations/types/organizations_preview_subscription_change_request_target_plan.py +0 -7
  218. mirascope/api/_generated/organizations/types/organizations_preview_subscription_change_response.py +0 -47
  219. mirascope/api/_generated/organizations/types/organizations_preview_subscription_change_response_validation_errors_item.py +0 -33
  220. mirascope/api/_generated/organizations/types/organizations_preview_subscription_change_response_validation_errors_item_resource.py +0 -7
  221. mirascope/api/_generated/organizations/types/organizations_router_balance_response.py +0 -24
  222. mirascope/api/_generated/organizations/types/organizations_subscription_response.py +0 -53
  223. mirascope/api/_generated/organizations/types/organizations_subscription_response_current_plan.py +0 -7
  224. mirascope/api/_generated/organizations/types/organizations_subscription_response_payment_method.py +0 -26
  225. mirascope/api/_generated/organizations/types/organizations_subscription_response_scheduled_change.py +0 -34
  226. mirascope/api/_generated/organizations/types/organizations_subscription_response_scheduled_change_target_plan.py +0 -7
  227. mirascope/api/_generated/organizations/types/organizations_update_response.py +0 -26
  228. mirascope/api/_generated/organizations/types/organizations_update_response_role.py +0 -5
  229. mirascope/api/_generated/organizations/types/organizations_update_subscription_request_target_plan.py +0 -7
  230. mirascope/api/_generated/organizations/types/organizations_update_subscription_response.py +0 -35
  231. mirascope/api/_generated/project_memberships/__init__.py +0 -25
  232. mirascope/api/_generated/project_memberships/client.py +0 -437
  233. mirascope/api/_generated/project_memberships/raw_client.py +0 -1039
  234. mirascope/api/_generated/project_memberships/types/__init__.py +0 -29
  235. mirascope/api/_generated/project_memberships/types/project_memberships_create_request_role.py +0 -7
  236. mirascope/api/_generated/project_memberships/types/project_memberships_create_response.py +0 -35
  237. mirascope/api/_generated/project_memberships/types/project_memberships_create_response_role.py +0 -7
  238. mirascope/api/_generated/project_memberships/types/project_memberships_list_response_item.py +0 -33
  239. mirascope/api/_generated/project_memberships/types/project_memberships_list_response_item_role.py +0 -7
  240. mirascope/api/_generated/project_memberships/types/project_memberships_update_request_role.py +0 -7
  241. mirascope/api/_generated/project_memberships/types/project_memberships_update_response.py +0 -35
  242. mirascope/api/_generated/project_memberships/types/project_memberships_update_response_role.py +0 -7
  243. mirascope/api/_generated/projects/__init__.py +0 -7
  244. mirascope/api/_generated/projects/client.py +0 -428
  245. mirascope/api/_generated/projects/raw_client.py +0 -1302
  246. mirascope/api/_generated/projects/types/__init__.py +0 -10
  247. mirascope/api/_generated/projects/types/projects_create_response.py +0 -25
  248. mirascope/api/_generated/projects/types/projects_get_response.py +0 -25
  249. mirascope/api/_generated/projects/types/projects_list_response_item.py +0 -25
  250. mirascope/api/_generated/projects/types/projects_update_response.py +0 -25
  251. mirascope/api/_generated/reference.md +0 -4915
  252. mirascope/api/_generated/tags/__init__.py +0 -19
  253. mirascope/api/_generated/tags/client.py +0 -504
  254. mirascope/api/_generated/tags/raw_client.py +0 -1288
  255. mirascope/api/_generated/tags/types/__init__.py +0 -17
  256. mirascope/api/_generated/tags/types/tags_create_response.py +0 -41
  257. mirascope/api/_generated/tags/types/tags_get_response.py +0 -41
  258. mirascope/api/_generated/tags/types/tags_list_response.py +0 -23
  259. mirascope/api/_generated/tags/types/tags_list_response_tags_item.py +0 -41
  260. mirascope/api/_generated/tags/types/tags_update_response.py +0 -41
  261. mirascope/api/_generated/token_cost/__init__.py +0 -7
  262. mirascope/api/_generated/token_cost/client.py +0 -160
  263. mirascope/api/_generated/token_cost/raw_client.py +0 -264
  264. mirascope/api/_generated/token_cost/types/__init__.py +0 -8
  265. mirascope/api/_generated/token_cost/types/token_cost_calculate_request_usage.py +0 -54
  266. mirascope/api/_generated/token_cost/types/token_cost_calculate_response.py +0 -52
  267. mirascope/api/_generated/traces/__init__.py +0 -97
  268. mirascope/api/_generated/traces/client.py +0 -1103
  269. mirascope/api/_generated/traces/raw_client.py +0 -2322
  270. mirascope/api/_generated/traces/types/__init__.py +0 -155
  271. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item.py +0 -29
  272. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_resource.py +0 -27
  273. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_resource_attributes_item.py +0 -23
  274. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_resource_attributes_item_value.py +0 -38
  275. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_resource_attributes_item_value_array_value.py +0 -19
  276. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_resource_attributes_item_value_kvlist_value.py +0 -22
  277. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_resource_attributes_item_value_kvlist_value_values_item.py +0 -20
  278. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item.py +0 -29
  279. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_scope.py +0 -31
  280. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_scope_attributes_item.py +0 -23
  281. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_scope_attributes_item_value.py +0 -38
  282. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_scope_attributes_item_value_array_value.py +0 -19
  283. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_scope_attributes_item_value_kvlist_value.py +0 -22
  284. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_scope_attributes_item_value_kvlist_value_values_item.py +0 -22
  285. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item.py +0 -48
  286. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item_attributes_item.py +0 -23
  287. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item_attributes_item_value.py +0 -38
  288. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item_attributes_item_value_array_value.py +0 -19
  289. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item_attributes_item_value_kvlist_value.py +0 -24
  290. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item_attributes_item_value_kvlist_value_values_item.py +0 -22
  291. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item_status.py +0 -20
  292. mirascope/api/_generated/traces/types/traces_create_response.py +0 -24
  293. mirascope/api/_generated/traces/types/traces_create_response_partial_success.py +0 -22
  294. mirascope/api/_generated/traces/types/traces_get_analytics_summary_response.py +0 -60
  295. mirascope/api/_generated/traces/types/traces_get_analytics_summary_response_top_functions_item.py +0 -24
  296. mirascope/api/_generated/traces/types/traces_get_analytics_summary_response_top_models_item.py +0 -22
  297. mirascope/api/_generated/traces/types/traces_get_trace_detail_by_env_response.py +0 -33
  298. mirascope/api/_generated/traces/types/traces_get_trace_detail_by_env_response_spans_item.py +0 -88
  299. mirascope/api/_generated/traces/types/traces_get_trace_detail_response.py +0 -33
  300. mirascope/api/_generated/traces/types/traces_get_trace_detail_response_spans_item.py +0 -88
  301. mirascope/api/_generated/traces/types/traces_list_by_function_hash_response.py +0 -25
  302. mirascope/api/_generated/traces/types/traces_list_by_function_hash_response_traces_item.py +0 -44
  303. mirascope/api/_generated/traces/types/traces_search_by_env_request_attribute_filters_item.py +0 -26
  304. mirascope/api/_generated/traces/types/traces_search_by_env_request_attribute_filters_item_operator.py +0 -7
  305. mirascope/api/_generated/traces/types/traces_search_by_env_request_sort_by.py +0 -7
  306. mirascope/api/_generated/traces/types/traces_search_by_env_request_sort_order.py +0 -7
  307. mirascope/api/_generated/traces/types/traces_search_by_env_response.py +0 -26
  308. mirascope/api/_generated/traces/types/traces_search_by_env_response_spans_item.py +0 -50
  309. mirascope/api/_generated/traces/types/traces_search_request_attribute_filters_item.py +0 -26
  310. mirascope/api/_generated/traces/types/traces_search_request_attribute_filters_item_operator.py +0 -7
  311. mirascope/api/_generated/traces/types/traces_search_request_sort_by.py +0 -7
  312. mirascope/api/_generated/traces/types/traces_search_request_sort_order.py +0 -5
  313. mirascope/api/_generated/traces/types/traces_search_response.py +0 -26
  314. mirascope/api/_generated/traces/types/traces_search_response_spans_item.py +0 -50
  315. mirascope/api/_generated/types/__init__.py +0 -85
  316. mirascope/api/_generated/types/already_exists_error.py +0 -22
  317. mirascope/api/_generated/types/already_exists_error_tag.py +0 -5
  318. mirascope/api/_generated/types/bad_request_error_body.py +0 -50
  319. mirascope/api/_generated/types/click_house_error.py +0 -22
  320. mirascope/api/_generated/types/database_error.py +0 -22
  321. mirascope/api/_generated/types/database_error_tag.py +0 -5
  322. mirascope/api/_generated/types/date.py +0 -3
  323. mirascope/api/_generated/types/http_api_decode_error.py +0 -27
  324. mirascope/api/_generated/types/http_api_decode_error_tag.py +0 -5
  325. mirascope/api/_generated/types/immutable_resource_error.py +0 -22
  326. mirascope/api/_generated/types/internal_server_error_body.py +0 -49
  327. mirascope/api/_generated/types/issue.py +0 -38
  328. mirascope/api/_generated/types/issue_tag.py +0 -10
  329. mirascope/api/_generated/types/not_found_error_body.py +0 -22
  330. mirascope/api/_generated/types/not_found_error_tag.py +0 -5
  331. mirascope/api/_generated/types/number_from_string.py +0 -3
  332. mirascope/api/_generated/types/permission_denied_error.py +0 -22
  333. mirascope/api/_generated/types/permission_denied_error_tag.py +0 -5
  334. mirascope/api/_generated/types/plan_limit_exceeded_error.py +0 -32
  335. mirascope/api/_generated/types/plan_limit_exceeded_error_tag.py +0 -7
  336. mirascope/api/_generated/types/pricing_unavailable_error.py +0 -23
  337. mirascope/api/_generated/types/property_key.py +0 -7
  338. mirascope/api/_generated/types/property_key_key.py +0 -25
  339. mirascope/api/_generated/types/property_key_key_tag.py +0 -5
  340. mirascope/api/_generated/types/rate_limit_error.py +0 -31
  341. mirascope/api/_generated/types/rate_limit_error_tag.py +0 -5
  342. mirascope/api/_generated/types/service_unavailable_error_body.py +0 -24
  343. mirascope/api/_generated/types/service_unavailable_error_tag.py +0 -7
  344. mirascope/api/_generated/types/stripe_error.py +0 -20
  345. mirascope/api/_generated/types/subscription_past_due_error.py +0 -31
  346. mirascope/api/_generated/types/subscription_past_due_error_tag.py +0 -7
  347. mirascope/api/_generated/types/unauthorized_error_body.py +0 -21
  348. mirascope/api/_generated/types/unauthorized_error_tag.py +0 -5
  349. mirascope/api/client.py +0 -255
  350. mirascope/api/settings.py +0 -99
  351. mirascope/llm/formatting/output_parser.py +0 -178
  352. mirascope/llm/formatting/primitives.py +0 -192
  353. mirascope/llm/mcp/mcp_client.py +0 -130
  354. mirascope/llm/messages/_utils.py +0 -34
  355. mirascope/llm/models/thinking_config.py +0 -61
  356. mirascope/llm/prompts/prompts.py +0 -487
  357. mirascope/llm/providers/__init__.py +0 -62
  358. mirascope/llm/providers/anthropic/__init__.py +0 -11
  359. mirascope/llm/providers/anthropic/_utils/__init__.py +0 -27
  360. mirascope/llm/providers/anthropic/_utils/beta_decode.py +0 -282
  361. mirascope/llm/providers/anthropic/_utils/beta_encode.py +0 -266
  362. mirascope/llm/providers/anthropic/_utils/encode.py +0 -418
  363. mirascope/llm/providers/anthropic/_utils/errors.py +0 -46
  364. mirascope/llm/providers/anthropic/beta_provider.py +0 -374
  365. mirascope/llm/providers/anthropic/model_id.py +0 -23
  366. mirascope/llm/providers/anthropic/model_info.py +0 -87
  367. mirascope/llm/providers/anthropic/provider.py +0 -479
  368. mirascope/llm/providers/google/__init__.py +0 -6
  369. mirascope/llm/providers/google/_utils/errors.py +0 -50
  370. mirascope/llm/providers/google/model_id.py +0 -22
  371. mirascope/llm/providers/google/model_info.py +0 -63
  372. mirascope/llm/providers/google/provider.py +0 -492
  373. mirascope/llm/providers/mirascope/__init__.py +0 -5
  374. mirascope/llm/providers/mirascope/_utils.py +0 -73
  375. mirascope/llm/providers/mirascope/provider.py +0 -349
  376. mirascope/llm/providers/mlx/__init__.py +0 -9
  377. mirascope/llm/providers/mlx/_utils.py +0 -141
  378. mirascope/llm/providers/mlx/encoding/__init__.py +0 -8
  379. mirascope/llm/providers/mlx/encoding/base.py +0 -72
  380. mirascope/llm/providers/mlx/encoding/transformers.py +0 -150
  381. mirascope/llm/providers/mlx/mlx.py +0 -254
  382. mirascope/llm/providers/mlx/model_id.py +0 -17
  383. mirascope/llm/providers/mlx/provider.py +0 -452
  384. mirascope/llm/providers/model_id.py +0 -16
  385. mirascope/llm/providers/ollama/__init__.py +0 -7
  386. mirascope/llm/providers/ollama/provider.py +0 -71
  387. mirascope/llm/providers/openai/__init__.py +0 -15
  388. mirascope/llm/providers/openai/_utils/__init__.py +0 -5
  389. mirascope/llm/providers/openai/_utils/errors.py +0 -46
  390. mirascope/llm/providers/openai/completions/__init__.py +0 -7
  391. mirascope/llm/providers/openai/completions/base_provider.py +0 -542
  392. mirascope/llm/providers/openai/completions/provider.py +0 -22
  393. mirascope/llm/providers/openai/model_id.py +0 -31
  394. mirascope/llm/providers/openai/model_info.py +0 -303
  395. mirascope/llm/providers/openai/provider.py +0 -441
  396. mirascope/llm/providers/openai/responses/__init__.py +0 -5
  397. mirascope/llm/providers/openai/responses/provider.py +0 -513
  398. mirascope/llm/providers/provider_id.py +0 -24
  399. mirascope/llm/providers/provider_registry.py +0 -299
  400. mirascope/llm/providers/together/__init__.py +0 -7
  401. mirascope/llm/providers/together/provider.py +0 -40
  402. mirascope/llm/responses/usage.py +0 -95
  403. mirascope/ops/__init__.py +0 -111
  404. mirascope/ops/_internal/__init__.py +0 -5
  405. mirascope/ops/_internal/closure.py +0 -1169
  406. mirascope/ops/_internal/configuration.py +0 -177
  407. mirascope/ops/_internal/context.py +0 -76
  408. mirascope/ops/_internal/exporters/__init__.py +0 -26
  409. mirascope/ops/_internal/exporters/exporters.py +0 -395
  410. mirascope/ops/_internal/exporters/processors.py +0 -104
  411. mirascope/ops/_internal/exporters/types.py +0 -165
  412. mirascope/ops/_internal/exporters/utils.py +0 -29
  413. mirascope/ops/_internal/instrumentation/__init__.py +0 -8
  414. mirascope/ops/_internal/instrumentation/llm/__init__.py +0 -8
  415. mirascope/ops/_internal/instrumentation/llm/common.py +0 -530
  416. mirascope/ops/_internal/instrumentation/llm/cost.py +0 -190
  417. mirascope/ops/_internal/instrumentation/llm/encode.py +0 -238
  418. mirascope/ops/_internal/instrumentation/llm/gen_ai_types/__init__.py +0 -38
  419. mirascope/ops/_internal/instrumentation/llm/gen_ai_types/gen_ai_input_messages.py +0 -31
  420. mirascope/ops/_internal/instrumentation/llm/gen_ai_types/gen_ai_output_messages.py +0 -38
  421. mirascope/ops/_internal/instrumentation/llm/gen_ai_types/gen_ai_system_instructions.py +0 -18
  422. mirascope/ops/_internal/instrumentation/llm/gen_ai_types/shared.py +0 -100
  423. mirascope/ops/_internal/instrumentation/llm/llm.py +0 -161
  424. mirascope/ops/_internal/instrumentation/llm/model.py +0 -1798
  425. mirascope/ops/_internal/instrumentation/llm/response.py +0 -521
  426. mirascope/ops/_internal/instrumentation/llm/serialize.py +0 -300
  427. mirascope/ops/_internal/propagation.py +0 -198
  428. mirascope/ops/_internal/protocols.py +0 -133
  429. mirascope/ops/_internal/session.py +0 -139
  430. mirascope/ops/_internal/spans.py +0 -232
  431. mirascope/ops/_internal/traced_calls.py +0 -375
  432. mirascope/ops/_internal/traced_functions.py +0 -523
  433. mirascope/ops/_internal/tracing.py +0 -353
  434. mirascope/ops/_internal/types.py +0 -13
  435. mirascope/ops/_internal/utils.py +0 -123
  436. mirascope/ops/_internal/versioned_calls.py +0 -512
  437. mirascope/ops/_internal/versioned_functions.py +0 -357
  438. mirascope/ops/_internal/versioning.py +0 -303
  439. mirascope/ops/exceptions.py +0 -21
  440. mirascope-2.0.0.dist-info/RECORD +0 -423
  441. /mirascope/llm/{providers → clients}/base/kwargs.py +0 -0
  442. /mirascope/llm/{providers → clients}/google/message.py +0 -0
@@ -0,0 +1,832 @@
1
+ """OpenAI Responses API client implementation."""
2
+
3
+ import os
4
+ from collections.abc import Sequence
5
+ from contextvars import ContextVar
6
+ from functools import lru_cache
7
+ from typing import overload
8
+ from typing_extensions import Unpack
9
+
10
+ from openai import AsyncOpenAI, OpenAI
11
+
12
+ from ....context import Context, DepsT
13
+ from ....formatting import Format, FormattableT
14
+ from ....messages import Message
15
+ from ....responses import (
16
+ AsyncContextResponse,
17
+ AsyncContextStreamResponse,
18
+ AsyncResponse,
19
+ AsyncStreamResponse,
20
+ ContextResponse,
21
+ ContextStreamResponse,
22
+ Response,
23
+ StreamResponse,
24
+ )
25
+ from ....tools import (
26
+ AsyncContextTool,
27
+ AsyncContextToolkit,
28
+ AsyncTool,
29
+ AsyncToolkit,
30
+ ContextTool,
31
+ ContextToolkit,
32
+ Tool,
33
+ Toolkit,
34
+ )
35
+ from ...base import BaseClient, Params
36
+ from . import _utils
37
+ from .model_ids import OpenAIResponsesModelId
38
+
39
+ OPENAI_RESPONSES_CLIENT_CONTEXT: ContextVar["OpenAIResponsesClient | None"] = (
40
+ ContextVar("OPENAI_RESPONSES_CLIENT_CONTEXT", default=None)
41
+ )
42
+
43
+
44
+ @lru_cache(maxsize=256)
45
+ def _openai_responses_singleton(
46
+ api_key: str | None, base_url: str | None
47
+ ) -> "OpenAIResponsesClient":
48
+ """Return a cached `OpenAIResponsesClient` instance for the given parameters."""
49
+ return OpenAIResponsesClient(api_key=api_key, base_url=base_url)
50
+
51
+
52
+ def client(
53
+ *, api_key: str | None = None, base_url: str | None = None
54
+ ) -> "OpenAIResponsesClient":
55
+ """Return an `OpenAIResponsesClient`."""
56
+ api_key = api_key or os.getenv("OPENAI_API_KEY")
57
+ base_url = base_url or os.getenv("OPENAI_BASE_URL")
58
+ return _openai_responses_singleton(api_key, base_url)
59
+
60
+
61
+ def get_client() -> "OpenAIResponsesClient":
62
+ """Get the current `OpenAIResponsesClient` from context."""
63
+ current_client = OPENAI_RESPONSES_CLIENT_CONTEXT.get()
64
+ if current_client is None:
65
+ current_client = client()
66
+ OPENAI_RESPONSES_CLIENT_CONTEXT.set(current_client)
67
+ return current_client
68
+
69
+
70
+ class OpenAIResponsesClient(BaseClient[OpenAIResponsesModelId, OpenAI]):
71
+ """The client for the OpenAI Responses API."""
72
+
73
+ @property
74
+ def _context_var(self) -> ContextVar["OpenAIResponsesClient | None"]:
75
+ return OPENAI_RESPONSES_CLIENT_CONTEXT
76
+
77
+ def __init__(
78
+ self, *, api_key: str | None = None, base_url: str | None = None
79
+ ) -> None:
80
+ """Initialize the OpenAI Responses client."""
81
+ self.client = OpenAI(api_key=api_key, base_url=base_url)
82
+ self.async_client = AsyncOpenAI(api_key=api_key, base_url=base_url)
83
+
84
+ @overload
85
+ def call(
86
+ self,
87
+ *,
88
+ model_id: OpenAIResponsesModelId,
89
+ messages: Sequence[Message],
90
+ tools: Sequence[Tool] | Toolkit | None = None,
91
+ format: None = None,
92
+ **params: Unpack[Params],
93
+ ) -> Response:
94
+ """Generate an `llm.Response` without a response format."""
95
+ ...
96
+
97
+ @overload
98
+ def call(
99
+ self,
100
+ *,
101
+ model_id: OpenAIResponsesModelId,
102
+ messages: Sequence[Message],
103
+ tools: Sequence[Tool] | Toolkit | None = None,
104
+ format: type[FormattableT] | Format[FormattableT],
105
+ **params: Unpack[Params],
106
+ ) -> Response[FormattableT]:
107
+ """Generate an `llm.Response` with a response format."""
108
+ ...
109
+
110
+ @overload
111
+ def call(
112
+ self,
113
+ *,
114
+ model_id: OpenAIResponsesModelId,
115
+ messages: Sequence[Message],
116
+ tools: Sequence[Tool] | Toolkit | None = None,
117
+ format: type[FormattableT] | Format[FormattableT] | None = None,
118
+ **params: Unpack[Params],
119
+ ) -> Response | Response[FormattableT]:
120
+ """Generate an `llm.Response` with optional response format."""
121
+ ...
122
+
123
+ def call(
124
+ self,
125
+ *,
126
+ model_id: OpenAIResponsesModelId,
127
+ messages: Sequence[Message],
128
+ tools: Sequence[Tool] | Toolkit | None = None,
129
+ format: type[FormattableT] | Format[FormattableT] | None = None,
130
+ **params: Unpack[Params],
131
+ ) -> Response | Response[FormattableT]:
132
+ """Generate an `llm.Response` by synchronously calling the OpenAI Responses API.
133
+
134
+ Args:
135
+ model_id: Model identifier to use.
136
+ messages: Messages to send to the LLM.
137
+ tools: Optional tools that the model may invoke.
138
+ format: Optional response format specifier.
139
+ **params: Additional parameters to configure output (e.g. temperature). See `llm.Params`.
140
+
141
+ Returns:
142
+ An `llm.Response` object containing the LLM-generated content.
143
+ """
144
+ messages, format, kwargs = _utils.encode_request(
145
+ model_id=model_id,
146
+ messages=messages,
147
+ tools=tools,
148
+ format=format,
149
+ params=params,
150
+ )
151
+
152
+ openai_response = self.client.responses.create(**kwargs)
153
+
154
+ assistant_message, finish_reason = _utils.decode_response(
155
+ openai_response, model_id
156
+ )
157
+
158
+ return Response(
159
+ raw=openai_response,
160
+ provider="openai:responses",
161
+ model_id=model_id,
162
+ params=params,
163
+ tools=tools,
164
+ input_messages=messages,
165
+ assistant_message=assistant_message,
166
+ finish_reason=finish_reason,
167
+ format=format,
168
+ )
169
+
170
+ @overload
171
+ async def call_async(
172
+ self,
173
+ *,
174
+ model_id: OpenAIResponsesModelId,
175
+ messages: Sequence[Message],
176
+ tools: Sequence[AsyncTool] | AsyncToolkit | None = None,
177
+ format: None = None,
178
+ **params: Unpack[Params],
179
+ ) -> AsyncResponse:
180
+ """Generate an `llm.AsyncResponse` without a response format."""
181
+ ...
182
+
183
+ @overload
184
+ async def call_async(
185
+ self,
186
+ *,
187
+ model_id: OpenAIResponsesModelId,
188
+ messages: Sequence[Message],
189
+ tools: Sequence[AsyncTool] | AsyncToolkit | None = None,
190
+ format: type[FormattableT] | Format[FormattableT],
191
+ **params: Unpack[Params],
192
+ ) -> AsyncResponse[FormattableT]:
193
+ """Generate an `llm.AsyncResponse` with a response format."""
194
+ ...
195
+
196
+ @overload
197
+ async def call_async(
198
+ self,
199
+ *,
200
+ model_id: OpenAIResponsesModelId,
201
+ messages: Sequence[Message],
202
+ tools: Sequence[AsyncTool] | AsyncToolkit | None = None,
203
+ format: type[FormattableT] | Format[FormattableT] | None = None,
204
+ **params: Unpack[Params],
205
+ ) -> AsyncResponse | AsyncResponse[FormattableT]:
206
+ """Generate an `llm.AsyncResponse` with optional response format."""
207
+ ...
208
+
209
+ async def call_async(
210
+ self,
211
+ *,
212
+ model_id: OpenAIResponsesModelId,
213
+ messages: Sequence[Message],
214
+ tools: Sequence[AsyncTool] | AsyncToolkit | None = None,
215
+ format: type[FormattableT] | Format[FormattableT] | None = None,
216
+ **params: Unpack[Params],
217
+ ) -> AsyncResponse | AsyncResponse[FormattableT]:
218
+ """Generate an `llm.AsyncResponse` by asynchronously calling the OpenAI Responses API.
219
+
220
+ Args:
221
+ model_id: Model identifier to use.
222
+ messages: Messages to send to the LLM.
223
+ tools: Optional tools that the model may invoke.
224
+ format: Optional response format specifier.
225
+ **params: Additional parameters to configure output (e.g. temperature). See `llm.Params`.
226
+
227
+ Returns:
228
+ An `llm.AsyncResponse` object containing the LLM-generated content.
229
+ """
230
+ messages, format, kwargs = _utils.encode_request(
231
+ model_id=model_id,
232
+ messages=messages,
233
+ tools=tools,
234
+ format=format,
235
+ params=params,
236
+ )
237
+
238
+ openai_response = await self.async_client.responses.create(**kwargs)
239
+
240
+ assistant_message, finish_reason = _utils.decode_response(
241
+ openai_response, model_id
242
+ )
243
+
244
+ return AsyncResponse(
245
+ raw=openai_response,
246
+ provider="openai:responses",
247
+ model_id=model_id,
248
+ params=params,
249
+ tools=tools,
250
+ input_messages=messages,
251
+ assistant_message=assistant_message,
252
+ finish_reason=finish_reason,
253
+ format=format,
254
+ )
255
+
256
+ @overload
257
+ def stream(
258
+ self,
259
+ *,
260
+ model_id: OpenAIResponsesModelId,
261
+ messages: Sequence[Message],
262
+ tools: Sequence[Tool] | Toolkit | None = None,
263
+ format: None = None,
264
+ **params: Unpack[Params],
265
+ ) -> StreamResponse:
266
+ """Generate a `llm.StreamResponse` without a response format."""
267
+ ...
268
+
269
+ @overload
270
+ def stream(
271
+ self,
272
+ *,
273
+ model_id: OpenAIResponsesModelId,
274
+ messages: Sequence[Message],
275
+ tools: Sequence[Tool] | Toolkit | None = None,
276
+ format: type[FormattableT] | Format[FormattableT],
277
+ **params: Unpack[Params],
278
+ ) -> StreamResponse[FormattableT]:
279
+ """Generate a `llm.StreamResponse` with a response format."""
280
+ ...
281
+
282
+ @overload
283
+ def stream(
284
+ self,
285
+ *,
286
+ model_id: OpenAIResponsesModelId,
287
+ messages: Sequence[Message],
288
+ tools: Sequence[Tool] | Toolkit | None = None,
289
+ format: type[FormattableT] | Format[FormattableT] | None = None,
290
+ **params: Unpack[Params],
291
+ ) -> StreamResponse | StreamResponse[FormattableT]:
292
+ """Generate a `llm.StreamResponse` with optional response format."""
293
+ ...
294
+
295
+ def stream(
296
+ self,
297
+ *,
298
+ model_id: OpenAIResponsesModelId,
299
+ messages: Sequence[Message],
300
+ tools: Sequence[Tool] | Toolkit | None = None,
301
+ format: type[FormattableT] | Format[FormattableT] | None = None,
302
+ **params: Unpack[Params],
303
+ ) -> StreamResponse | StreamResponse[FormattableT]:
304
+ """Generate a `llm.StreamResponse` by synchronously streaming from the OpenAI Responses API.
305
+
306
+ Args:
307
+ model_id: Model identifier to use.
308
+ messages: Messages to send to the LLM.
309
+ tools: Optional tools that the model may invoke.
310
+ format: Optional response format specifier.
311
+ **params: Additional parameters to configure output (e.g. temperature). See `llm.Params`.
312
+
313
+ Returns:
314
+ A `llm.StreamResponse` object containing the LLM-generated content stream.
315
+ """
316
+ messages, format, kwargs = _utils.encode_request(
317
+ model_id=model_id,
318
+ messages=messages,
319
+ tools=tools,
320
+ format=format,
321
+ params=params,
322
+ )
323
+
324
+ openai_stream = self.client.responses.create(
325
+ **kwargs,
326
+ stream=True,
327
+ )
328
+
329
+ chunk_iterator = _utils.decode_stream(
330
+ openai_stream,
331
+ )
332
+
333
+ return StreamResponse(
334
+ provider="openai:responses",
335
+ model_id=model_id,
336
+ params=params,
337
+ tools=tools,
338
+ input_messages=messages,
339
+ chunk_iterator=chunk_iterator,
340
+ format=format,
341
+ )
342
+
343
+ @overload
344
+ async def stream_async(
345
+ self,
346
+ *,
347
+ model_id: OpenAIResponsesModelId,
348
+ messages: Sequence[Message],
349
+ tools: Sequence[AsyncTool] | AsyncToolkit | None = None,
350
+ format: None = None,
351
+ **params: Unpack[Params],
352
+ ) -> AsyncStreamResponse:
353
+ """Generate a `llm.AsyncStreamResponse` without a response format."""
354
+ ...
355
+
356
+ @overload
357
+ async def stream_async(
358
+ self,
359
+ *,
360
+ model_id: OpenAIResponsesModelId,
361
+ messages: Sequence[Message],
362
+ tools: Sequence[AsyncTool] | AsyncToolkit | None = None,
363
+ format: type[FormattableT] | Format[FormattableT],
364
+ **params: Unpack[Params],
365
+ ) -> AsyncStreamResponse[FormattableT]:
366
+ """Generate a `llm.AsyncStreamResponse` with a response format."""
367
+ ...
368
+
369
+ @overload
370
+ async def stream_async(
371
+ self,
372
+ *,
373
+ model_id: OpenAIResponsesModelId,
374
+ messages: Sequence[Message],
375
+ tools: Sequence[AsyncTool] | AsyncToolkit | None = None,
376
+ format: type[FormattableT] | Format[FormattableT] | None = None,
377
+ **params: Unpack[Params],
378
+ ) -> AsyncStreamResponse | AsyncStreamResponse[FormattableT]:
379
+ """Generate a `llm.AsyncStreamResponse` with optional response format."""
380
+ ...
381
+
382
+ async def stream_async(
383
+ self,
384
+ *,
385
+ model_id: OpenAIResponsesModelId,
386
+ messages: Sequence[Message],
387
+ tools: Sequence[AsyncTool] | AsyncToolkit | None = None,
388
+ format: type[FormattableT] | Format[FormattableT] | None = None,
389
+ **params: Unpack[Params],
390
+ ) -> AsyncStreamResponse | AsyncStreamResponse[FormattableT]:
391
+ """Generate a `llm.AsyncStreamResponse` by asynchronously streaming from the OpenAI Responses API.
392
+
393
+ Args:
394
+ model_id: Model identifier to use.
395
+ messages: Messages to send to the LLM.
396
+ tools: Optional tools that the model may invoke.
397
+ format: Optional response format specifier.
398
+ **params: Additional parameters to configure output (e.g. temperature). See `llm.Params`.
399
+
400
+ Returns:
401
+ A `llm.AsyncStreamResponse` object containing the LLM-generated content stream.
402
+ """
403
+ messages, format, kwargs = _utils.encode_request(
404
+ model_id=model_id,
405
+ messages=messages,
406
+ tools=tools,
407
+ format=format,
408
+ params=params,
409
+ )
410
+
411
+ openai_stream = await self.async_client.responses.create(
412
+ **kwargs,
413
+ stream=True,
414
+ )
415
+
416
+ chunk_iterator = _utils.decode_async_stream(
417
+ openai_stream,
418
+ )
419
+
420
+ return AsyncStreamResponse(
421
+ provider="openai:responses",
422
+ model_id=model_id,
423
+ params=params,
424
+ tools=tools,
425
+ input_messages=messages,
426
+ chunk_iterator=chunk_iterator,
427
+ format=format,
428
+ )
429
+
430
+ @overload
431
+ def context_call(
432
+ self,
433
+ *,
434
+ ctx: Context[DepsT],
435
+ model_id: OpenAIResponsesModelId,
436
+ messages: Sequence[Message],
437
+ tools: Sequence[Tool | ContextTool[DepsT]]
438
+ | ContextToolkit[DepsT]
439
+ | None = None,
440
+ format: None = None,
441
+ **params: Unpack[Params],
442
+ ) -> ContextResponse[DepsT]:
443
+ """Generate a `llm.ContextResponse` without a response format."""
444
+ ...
445
+
446
+ @overload
447
+ def context_call(
448
+ self,
449
+ *,
450
+ ctx: Context[DepsT],
451
+ model_id: OpenAIResponsesModelId,
452
+ messages: Sequence[Message],
453
+ tools: Sequence[Tool | ContextTool[DepsT]]
454
+ | ContextToolkit[DepsT]
455
+ | None = None,
456
+ format: type[FormattableT] | Format[FormattableT],
457
+ **params: Unpack[Params],
458
+ ) -> ContextResponse[DepsT, FormattableT]:
459
+ """Generate a `llm.ContextResponse` with a response format."""
460
+ ...
461
+
462
+ @overload
463
+ def context_call(
464
+ self,
465
+ *,
466
+ ctx: Context[DepsT],
467
+ model_id: OpenAIResponsesModelId,
468
+ messages: Sequence[Message],
469
+ tools: Sequence[Tool | ContextTool[DepsT]]
470
+ | ContextToolkit[DepsT]
471
+ | None = None,
472
+ format: type[FormattableT] | Format[FormattableT] | None = None,
473
+ **params: Unpack[Params],
474
+ ) -> ContextResponse[DepsT] | ContextResponse[DepsT, FormattableT]:
475
+ """Generate a `llm.ContextResponse` with optional response format."""
476
+ ...
477
+
478
+ def context_call(
479
+ self,
480
+ *,
481
+ ctx: Context[DepsT],
482
+ model_id: OpenAIResponsesModelId,
483
+ messages: Sequence[Message],
484
+ tools: Sequence[Tool | ContextTool[DepsT]]
485
+ | ContextToolkit[DepsT]
486
+ | None = None,
487
+ format: type[FormattableT] | Format[FormattableT] | None = None,
488
+ **params: Unpack[Params],
489
+ ) -> ContextResponse[DepsT] | ContextResponse[DepsT, FormattableT]:
490
+ """Generate a `llm.ContextResponse` by synchronously calling the OpenAI Responses API with context.
491
+
492
+ Args:
493
+ ctx: The context object containing dependencies.
494
+ model_id: Model identifier to use.
495
+ messages: Messages to send to the LLM.
496
+ tools: Optional tools that the model may invoke.
497
+ format: Optional response format specifier.
498
+ **params: Additional parameters to configure output (e.g. temperature). See `llm.Params`.
499
+
500
+ Returns:
501
+ A `llm.ContextResponse` object containing the LLM-generated content and context.
502
+ """
503
+ messages, format, kwargs = _utils.encode_request(
504
+ model_id=model_id,
505
+ messages=messages,
506
+ tools=tools,
507
+ format=format,
508
+ params=params,
509
+ )
510
+
511
+ openai_response = self.client.responses.create(**kwargs)
512
+
513
+ assistant_message, finish_reason = _utils.decode_response(
514
+ openai_response, model_id
515
+ )
516
+
517
+ return ContextResponse(
518
+ raw=openai_response,
519
+ provider="openai:responses",
520
+ model_id=model_id,
521
+ params=params,
522
+ tools=tools,
523
+ input_messages=messages,
524
+ assistant_message=assistant_message,
525
+ finish_reason=finish_reason,
526
+ format=format,
527
+ )
528
+
529
+ @overload
530
+ async def context_call_async(
531
+ self,
532
+ *,
533
+ ctx: Context[DepsT],
534
+ model_id: OpenAIResponsesModelId,
535
+ messages: Sequence[Message],
536
+ tools: Sequence[AsyncTool | AsyncContextTool[DepsT]]
537
+ | AsyncContextToolkit[DepsT]
538
+ | None = None,
539
+ format: None = None,
540
+ **params: Unpack[Params],
541
+ ) -> AsyncContextResponse[DepsT]:
542
+ """Generate a `llm.AsyncContextResponse` without a response format."""
543
+ ...
544
+
545
+ @overload
546
+ async def context_call_async(
547
+ self,
548
+ *,
549
+ ctx: Context[DepsT],
550
+ model_id: OpenAIResponsesModelId,
551
+ messages: Sequence[Message],
552
+ tools: Sequence[AsyncTool | AsyncContextTool[DepsT]]
553
+ | AsyncContextToolkit[DepsT]
554
+ | None = None,
555
+ format: type[FormattableT] | Format[FormattableT],
556
+ **params: Unpack[Params],
557
+ ) -> AsyncContextResponse[DepsT, FormattableT]:
558
+ """Generate a `llm.AsyncContextResponse` with a response format."""
559
+ ...
560
+
561
+ @overload
562
+ async def context_call_async(
563
+ self,
564
+ *,
565
+ ctx: Context[DepsT],
566
+ model_id: OpenAIResponsesModelId,
567
+ messages: Sequence[Message],
568
+ tools: Sequence[AsyncTool | AsyncContextTool[DepsT]]
569
+ | AsyncContextToolkit[DepsT]
570
+ | None = None,
571
+ format: type[FormattableT] | Format[FormattableT] | None = None,
572
+ **params: Unpack[Params],
573
+ ) -> AsyncContextResponse[DepsT] | AsyncContextResponse[DepsT, FormattableT]:
574
+ """Generate a `llm.AsyncContextResponse` with optional response format."""
575
+ ...
576
+
577
+ async def context_call_async(
578
+ self,
579
+ *,
580
+ ctx: Context[DepsT],
581
+ model_id: OpenAIResponsesModelId,
582
+ messages: Sequence[Message],
583
+ tools: Sequence[AsyncTool | AsyncContextTool[DepsT]]
584
+ | AsyncContextToolkit[DepsT]
585
+ | None = None,
586
+ format: type[FormattableT] | Format[FormattableT] | None = None,
587
+ **params: Unpack[Params],
588
+ ) -> AsyncContextResponse[DepsT] | AsyncContextResponse[DepsT, FormattableT]:
589
+ """Generate a `llm.AsyncContextResponse` by asynchronously calling the OpenAI Responses API with context.
590
+
591
+ Args:
592
+ ctx: The context object containing dependencies.
593
+ model_id: Model identifier to use.
594
+ messages: Messages to send to the LLM.
595
+ tools: Optional tools that the model may invoke.
596
+ format: Optional response format specifier.
597
+ **params: Additional parameters to configure output (e.g. temperature). See `llm.Params`.
598
+
599
+ Returns:
600
+ A `llm.AsyncContextResponse` object containing the LLM-generated content and context.
601
+ """
602
+ messages, format, kwargs = _utils.encode_request(
603
+ model_id=model_id,
604
+ messages=messages,
605
+ tools=tools,
606
+ format=format,
607
+ params=params,
608
+ )
609
+
610
+ openai_response = await self.async_client.responses.create(**kwargs)
611
+
612
+ assistant_message, finish_reason = _utils.decode_response(
613
+ openai_response, model_id
614
+ )
615
+
616
+ return AsyncContextResponse(
617
+ raw=openai_response,
618
+ provider="openai:responses",
619
+ model_id=model_id,
620
+ params=params,
621
+ tools=tools,
622
+ input_messages=messages,
623
+ assistant_message=assistant_message,
624
+ finish_reason=finish_reason,
625
+ format=format,
626
+ )
627
+
628
+ @overload
629
+ def context_stream(
630
+ self,
631
+ *,
632
+ ctx: Context[DepsT],
633
+ model_id: OpenAIResponsesModelId,
634
+ messages: Sequence[Message],
635
+ tools: Sequence[Tool | ContextTool[DepsT]]
636
+ | ContextToolkit[DepsT]
637
+ | None = None,
638
+ format: None = None,
639
+ **params: Unpack[Params],
640
+ ) -> ContextStreamResponse[DepsT]:
641
+ """Generate a `llm.ContextStreamResponse` without a response format."""
642
+ ...
643
+
644
+ @overload
645
+ def context_stream(
646
+ self,
647
+ *,
648
+ ctx: Context[DepsT],
649
+ model_id: OpenAIResponsesModelId,
650
+ messages: Sequence[Message],
651
+ tools: Sequence[Tool | ContextTool[DepsT]]
652
+ | ContextToolkit[DepsT]
653
+ | None = None,
654
+ format: type[FormattableT] | Format[FormattableT],
655
+ **params: Unpack[Params],
656
+ ) -> ContextStreamResponse[DepsT, FormattableT]:
657
+ """Generate a `llm.ContextStreamResponse` with a response format."""
658
+ ...
659
+
660
+ @overload
661
+ def context_stream(
662
+ self,
663
+ *,
664
+ ctx: Context[DepsT],
665
+ model_id: OpenAIResponsesModelId,
666
+ messages: Sequence[Message],
667
+ tools: Sequence[Tool | ContextTool[DepsT]]
668
+ | ContextToolkit[DepsT]
669
+ | None = None,
670
+ format: type[FormattableT] | Format[FormattableT] | None = None,
671
+ **params: Unpack[Params],
672
+ ) -> ContextStreamResponse[DepsT] | ContextStreamResponse[DepsT, FormattableT]:
673
+ """Generate a `llm.ContextStreamResponse` with optional response format."""
674
+ ...
675
+
676
+ def context_stream(
677
+ self,
678
+ *,
679
+ ctx: Context[DepsT],
680
+ model_id: OpenAIResponsesModelId,
681
+ messages: Sequence[Message],
682
+ tools: Sequence[Tool | ContextTool[DepsT]]
683
+ | ContextToolkit[DepsT]
684
+ | None = None,
685
+ format: type[FormattableT] | Format[FormattableT] | None = None,
686
+ **params: Unpack[Params],
687
+ ) -> ContextStreamResponse[DepsT] | ContextStreamResponse[DepsT, FormattableT]:
688
+ """Generate a `llm.ContextStreamResponse` by synchronously streaming from the OpenAI Responses API with context.
689
+
690
+ Args:
691
+ ctx: The context object containing dependencies.
692
+ model_id: Model identifier to use.
693
+ messages: Messages to send to the LLM.
694
+ tools: Optional tools that the model may invoke.
695
+ format: Optional response format specifier.
696
+ **params: Additional parameters to configure output (e.g. temperature). See `llm.Params`.
697
+
698
+ Returns:
699
+ A `llm.ContextStreamResponse` object containing the LLM-generated content stream and context.
700
+ """
701
+ messages, format, kwargs = _utils.encode_request(
702
+ model_id=model_id,
703
+ messages=messages,
704
+ tools=tools,
705
+ format=format,
706
+ params=params,
707
+ )
708
+
709
+ openai_stream = self.client.responses.create(
710
+ **kwargs,
711
+ stream=True,
712
+ )
713
+
714
+ chunk_iterator = _utils.decode_stream(
715
+ openai_stream,
716
+ )
717
+
718
+ return ContextStreamResponse(
719
+ provider="openai:responses",
720
+ model_id=model_id,
721
+ params=params,
722
+ tools=tools,
723
+ input_messages=messages,
724
+ chunk_iterator=chunk_iterator,
725
+ format=format,
726
+ )
727
+
728
+ @overload
729
+ async def context_stream_async(
730
+ self,
731
+ *,
732
+ ctx: Context[DepsT],
733
+ model_id: OpenAIResponsesModelId,
734
+ messages: Sequence[Message],
735
+ tools: Sequence[AsyncTool | AsyncContextTool[DepsT]]
736
+ | AsyncContextToolkit[DepsT]
737
+ | None = None,
738
+ format: None = None,
739
+ **params: Unpack[Params],
740
+ ) -> AsyncContextStreamResponse[DepsT]:
741
+ """Generate a `llm.AsyncContextStreamResponse` without a response format."""
742
+ ...
743
+
744
+ @overload
745
+ async def context_stream_async(
746
+ self,
747
+ *,
748
+ ctx: Context[DepsT],
749
+ model_id: OpenAIResponsesModelId,
750
+ messages: Sequence[Message],
751
+ tools: Sequence[AsyncTool | AsyncContextTool[DepsT]]
752
+ | AsyncContextToolkit[DepsT]
753
+ | None = None,
754
+ format: type[FormattableT] | Format[FormattableT],
755
+ **params: Unpack[Params],
756
+ ) -> AsyncContextStreamResponse[DepsT, FormattableT]:
757
+ """Generate a `llm.AsyncContextStreamResponse` with a response format."""
758
+ ...
759
+
760
+ @overload
761
+ async def context_stream_async(
762
+ self,
763
+ *,
764
+ ctx: Context[DepsT],
765
+ model_id: OpenAIResponsesModelId,
766
+ messages: Sequence[Message],
767
+ tools: Sequence[AsyncTool | AsyncContextTool[DepsT]]
768
+ | AsyncContextToolkit[DepsT]
769
+ | None = None,
770
+ format: type[FormattableT] | Format[FormattableT] | None = None,
771
+ **params: Unpack[Params],
772
+ ) -> (
773
+ AsyncContextStreamResponse[DepsT]
774
+ | AsyncContextStreamResponse[DepsT, FormattableT]
775
+ ):
776
+ """Generate a `llm.AsyncContextStreamResponse` with optional response format."""
777
+ ...
778
+
779
+ async def context_stream_async(
780
+ self,
781
+ *,
782
+ ctx: Context[DepsT],
783
+ model_id: OpenAIResponsesModelId,
784
+ messages: Sequence[Message],
785
+ tools: Sequence[AsyncTool | AsyncContextTool[DepsT]]
786
+ | AsyncContextToolkit[DepsT]
787
+ | None = None,
788
+ format: type[FormattableT] | Format[FormattableT] | None = None,
789
+ **params: Unpack[Params],
790
+ ) -> (
791
+ AsyncContextStreamResponse[DepsT]
792
+ | AsyncContextStreamResponse[DepsT, FormattableT]
793
+ ):
794
+ """Generate a `llm.AsyncContextStreamResponse` by asynchronously streaming from the OpenAI Responses API with context.
795
+
796
+ Args:
797
+ ctx: The context object containing dependencies.
798
+ model_id: Model identifier to use.
799
+ messages: Messages to send to the LLM.
800
+ tools: Optional tools that the model may invoke.
801
+ format: Optional response format specifier.
802
+ **params: Additional parameters to configure output (e.g. temperature). See `llm.Params`.
803
+
804
+ Returns:
805
+ A `llm.AsyncContextStreamResponse` object containing the LLM-generated content stream and context.
806
+ """
807
+ messages, format, kwargs = _utils.encode_request(
808
+ model_id=model_id,
809
+ messages=messages,
810
+ tools=tools,
811
+ format=format,
812
+ params=params,
813
+ )
814
+
815
+ openai_stream = await self.async_client.responses.create(
816
+ **kwargs,
817
+ stream=True,
818
+ )
819
+
820
+ chunk_iterator = _utils.decode_async_stream(
821
+ openai_stream,
822
+ )
823
+
824
+ return AsyncContextStreamResponse(
825
+ provider="openai:responses",
826
+ model_id=model_id,
827
+ params=params,
828
+ tools=tools,
829
+ input_messages=messages,
830
+ chunk_iterator=chunk_iterator,
831
+ format=format,
832
+ )