mirascope 2.0.0__py3-none-any.whl → 2.0.0a1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (443) hide show
  1. mirascope/__init__.py +2 -11
  2. mirascope/graphs/__init__.py +22 -0
  3. mirascope/graphs/finite_state_machine.py +625 -0
  4. mirascope/llm/__init__.py +15 -96
  5. mirascope/llm/agents/__init__.py +15 -0
  6. mirascope/llm/agents/agent.py +97 -0
  7. mirascope/llm/agents/agent_template.py +45 -0
  8. mirascope/llm/agents/decorator.py +176 -0
  9. mirascope/llm/calls/__init__.py +1 -2
  10. mirascope/llm/calls/base_call.py +33 -0
  11. mirascope/llm/calls/calls.py +58 -84
  12. mirascope/llm/calls/decorator.py +120 -140
  13. mirascope/llm/clients/__init__.py +34 -0
  14. mirascope/llm/clients/_missing_import_stubs.py +47 -0
  15. mirascope/llm/clients/anthropic/__init__.py +25 -0
  16. mirascope/llm/{providers/openai/completions → clients/anthropic}/_utils/__init__.py +0 -2
  17. mirascope/llm/{providers → clients}/anthropic/_utils/decode.py +22 -66
  18. mirascope/llm/clients/anthropic/_utils/encode.py +243 -0
  19. mirascope/llm/clients/anthropic/clients.py +819 -0
  20. mirascope/llm/clients/anthropic/model_ids.py +8 -0
  21. mirascope/llm/{providers → clients}/base/__init__.py +5 -4
  22. mirascope/llm/{providers → clients}/base/_utils.py +17 -78
  23. mirascope/llm/{providers/base/base_provider.py → clients/base/client.py} +145 -468
  24. mirascope/llm/{models → clients/base}/params.py +37 -16
  25. mirascope/llm/clients/google/__init__.py +20 -0
  26. mirascope/llm/{providers/openai/responses → clients/google}/_utils/__init__.py +0 -2
  27. mirascope/llm/{providers → clients}/google/_utils/decode.py +22 -98
  28. mirascope/llm/{providers → clients}/google/_utils/encode.py +46 -168
  29. mirascope/llm/clients/google/clients.py +853 -0
  30. mirascope/llm/clients/google/model_ids.py +15 -0
  31. mirascope/llm/clients/openai/__init__.py +25 -0
  32. mirascope/llm/clients/openai/completions/__init__.py +28 -0
  33. mirascope/llm/{providers/google → clients/openai/completions}/_utils/__init__.py +0 -4
  34. mirascope/llm/{providers → clients}/openai/completions/_utils/decode.py +9 -74
  35. mirascope/llm/{providers → clients}/openai/completions/_utils/encode.py +52 -70
  36. mirascope/llm/clients/openai/completions/_utils/model_features.py +81 -0
  37. mirascope/llm/clients/openai/completions/clients.py +833 -0
  38. mirascope/llm/clients/openai/completions/model_ids.py +8 -0
  39. mirascope/llm/clients/openai/responses/__init__.py +26 -0
  40. mirascope/llm/clients/openai/responses/_utils/__init__.py +13 -0
  41. mirascope/llm/{providers → clients}/openai/responses/_utils/decode.py +14 -80
  42. mirascope/llm/{providers → clients}/openai/responses/_utils/encode.py +41 -92
  43. mirascope/llm/clients/openai/responses/_utils/model_features.py +87 -0
  44. mirascope/llm/clients/openai/responses/clients.py +832 -0
  45. mirascope/llm/clients/openai/responses/model_ids.py +8 -0
  46. mirascope/llm/clients/openai/shared/__init__.py +7 -0
  47. mirascope/llm/clients/openai/shared/_utils.py +55 -0
  48. mirascope/llm/clients/providers.py +175 -0
  49. mirascope/llm/content/__init__.py +2 -3
  50. mirascope/llm/content/tool_call.py +0 -6
  51. mirascope/llm/content/tool_output.py +5 -22
  52. mirascope/llm/context/_utils.py +6 -19
  53. mirascope/llm/exceptions.py +43 -298
  54. mirascope/llm/formatting/__init__.py +2 -19
  55. mirascope/llm/formatting/_utils.py +74 -0
  56. mirascope/llm/formatting/format.py +30 -219
  57. mirascope/llm/formatting/from_call_args.py +2 -2
  58. mirascope/llm/formatting/partial.py +7 -80
  59. mirascope/llm/formatting/types.py +64 -21
  60. mirascope/llm/mcp/__init__.py +2 -2
  61. mirascope/llm/mcp/client.py +118 -0
  62. mirascope/llm/messages/__init__.py +0 -3
  63. mirascope/llm/messages/message.py +5 -13
  64. mirascope/llm/models/__init__.py +2 -7
  65. mirascope/llm/models/models.py +139 -315
  66. mirascope/llm/prompts/__init__.py +12 -13
  67. mirascope/llm/prompts/_utils.py +43 -14
  68. mirascope/llm/prompts/decorator.py +204 -144
  69. mirascope/llm/prompts/protocols.py +59 -25
  70. mirascope/llm/responses/__init__.py +1 -9
  71. mirascope/llm/responses/_utils.py +12 -102
  72. mirascope/llm/responses/base_response.py +6 -18
  73. mirascope/llm/responses/base_stream_response.py +50 -173
  74. mirascope/llm/responses/finish_reason.py +0 -1
  75. mirascope/llm/responses/response.py +13 -34
  76. mirascope/llm/responses/root_response.py +29 -100
  77. mirascope/llm/responses/stream_response.py +31 -40
  78. mirascope/llm/tools/__init__.py +2 -9
  79. mirascope/llm/tools/_utils.py +3 -12
  80. mirascope/llm/tools/decorator.py +16 -25
  81. mirascope/llm/tools/protocols.py +4 -4
  82. mirascope/llm/tools/tool_schema.py +19 -87
  83. mirascope/llm/tools/toolkit.py +27 -35
  84. mirascope/llm/tools/tools.py +41 -135
  85. {mirascope-2.0.0.dist-info → mirascope-2.0.0a1.dist-info}/METADATA +13 -90
  86. mirascope-2.0.0a1.dist-info/RECORD +102 -0
  87. {mirascope-2.0.0.dist-info → mirascope-2.0.0a1.dist-info}/WHEEL +1 -1
  88. {mirascope-2.0.0.dist-info → mirascope-2.0.0a1.dist-info}/licenses/LICENSE +1 -1
  89. mirascope/_stubs.py +0 -363
  90. mirascope/api/__init__.py +0 -14
  91. mirascope/api/_generated/README.md +0 -207
  92. mirascope/api/_generated/__init__.py +0 -440
  93. mirascope/api/_generated/annotations/__init__.py +0 -33
  94. mirascope/api/_generated/annotations/client.py +0 -506
  95. mirascope/api/_generated/annotations/raw_client.py +0 -1414
  96. mirascope/api/_generated/annotations/types/__init__.py +0 -31
  97. mirascope/api/_generated/annotations/types/annotations_create_request_label.py +0 -5
  98. mirascope/api/_generated/annotations/types/annotations_create_response.py +0 -48
  99. mirascope/api/_generated/annotations/types/annotations_create_response_label.py +0 -5
  100. mirascope/api/_generated/annotations/types/annotations_get_response.py +0 -48
  101. mirascope/api/_generated/annotations/types/annotations_get_response_label.py +0 -5
  102. mirascope/api/_generated/annotations/types/annotations_list_request_label.py +0 -5
  103. mirascope/api/_generated/annotations/types/annotations_list_response.py +0 -21
  104. mirascope/api/_generated/annotations/types/annotations_list_response_annotations_item.py +0 -50
  105. mirascope/api/_generated/annotations/types/annotations_list_response_annotations_item_label.py +0 -5
  106. mirascope/api/_generated/annotations/types/annotations_update_request_label.py +0 -5
  107. mirascope/api/_generated/annotations/types/annotations_update_response.py +0 -48
  108. mirascope/api/_generated/annotations/types/annotations_update_response_label.py +0 -5
  109. mirascope/api/_generated/api_keys/__init__.py +0 -17
  110. mirascope/api/_generated/api_keys/client.py +0 -530
  111. mirascope/api/_generated/api_keys/raw_client.py +0 -1236
  112. mirascope/api/_generated/api_keys/types/__init__.py +0 -15
  113. mirascope/api/_generated/api_keys/types/api_keys_create_response.py +0 -28
  114. mirascope/api/_generated/api_keys/types/api_keys_get_response.py +0 -27
  115. mirascope/api/_generated/api_keys/types/api_keys_list_all_for_org_response_item.py +0 -40
  116. mirascope/api/_generated/api_keys/types/api_keys_list_response_item.py +0 -27
  117. mirascope/api/_generated/client.py +0 -211
  118. mirascope/api/_generated/core/__init__.py +0 -52
  119. mirascope/api/_generated/core/api_error.py +0 -23
  120. mirascope/api/_generated/core/client_wrapper.py +0 -46
  121. mirascope/api/_generated/core/datetime_utils.py +0 -28
  122. mirascope/api/_generated/core/file.py +0 -67
  123. mirascope/api/_generated/core/force_multipart.py +0 -16
  124. mirascope/api/_generated/core/http_client.py +0 -543
  125. mirascope/api/_generated/core/http_response.py +0 -55
  126. mirascope/api/_generated/core/jsonable_encoder.py +0 -100
  127. mirascope/api/_generated/core/pydantic_utilities.py +0 -255
  128. mirascope/api/_generated/core/query_encoder.py +0 -58
  129. mirascope/api/_generated/core/remove_none_from_dict.py +0 -11
  130. mirascope/api/_generated/core/request_options.py +0 -35
  131. mirascope/api/_generated/core/serialization.py +0 -276
  132. mirascope/api/_generated/docs/__init__.py +0 -4
  133. mirascope/api/_generated/docs/client.py +0 -91
  134. mirascope/api/_generated/docs/raw_client.py +0 -178
  135. mirascope/api/_generated/environment.py +0 -9
  136. mirascope/api/_generated/environments/__init__.py +0 -23
  137. mirascope/api/_generated/environments/client.py +0 -649
  138. mirascope/api/_generated/environments/raw_client.py +0 -1567
  139. mirascope/api/_generated/environments/types/__init__.py +0 -25
  140. mirascope/api/_generated/environments/types/environments_create_response.py +0 -24
  141. mirascope/api/_generated/environments/types/environments_get_analytics_response.py +0 -60
  142. mirascope/api/_generated/environments/types/environments_get_analytics_response_top_functions_item.py +0 -24
  143. mirascope/api/_generated/environments/types/environments_get_analytics_response_top_models_item.py +0 -22
  144. mirascope/api/_generated/environments/types/environments_get_response.py +0 -24
  145. mirascope/api/_generated/environments/types/environments_list_response_item.py +0 -24
  146. mirascope/api/_generated/environments/types/environments_update_response.py +0 -24
  147. mirascope/api/_generated/errors/__init__.py +0 -25
  148. mirascope/api/_generated/errors/bad_request_error.py +0 -14
  149. mirascope/api/_generated/errors/conflict_error.py +0 -14
  150. mirascope/api/_generated/errors/forbidden_error.py +0 -11
  151. mirascope/api/_generated/errors/internal_server_error.py +0 -10
  152. mirascope/api/_generated/errors/not_found_error.py +0 -11
  153. mirascope/api/_generated/errors/payment_required_error.py +0 -15
  154. mirascope/api/_generated/errors/service_unavailable_error.py +0 -14
  155. mirascope/api/_generated/errors/too_many_requests_error.py +0 -15
  156. mirascope/api/_generated/errors/unauthorized_error.py +0 -11
  157. mirascope/api/_generated/functions/__init__.py +0 -39
  158. mirascope/api/_generated/functions/client.py +0 -647
  159. mirascope/api/_generated/functions/raw_client.py +0 -1890
  160. mirascope/api/_generated/functions/types/__init__.py +0 -53
  161. mirascope/api/_generated/functions/types/functions_create_request_dependencies_value.py +0 -20
  162. mirascope/api/_generated/functions/types/functions_create_response.py +0 -37
  163. mirascope/api/_generated/functions/types/functions_create_response_dependencies_value.py +0 -20
  164. mirascope/api/_generated/functions/types/functions_find_by_hash_response.py +0 -39
  165. mirascope/api/_generated/functions/types/functions_find_by_hash_response_dependencies_value.py +0 -20
  166. mirascope/api/_generated/functions/types/functions_get_by_env_response.py +0 -53
  167. mirascope/api/_generated/functions/types/functions_get_by_env_response_dependencies_value.py +0 -22
  168. mirascope/api/_generated/functions/types/functions_get_response.py +0 -37
  169. mirascope/api/_generated/functions/types/functions_get_response_dependencies_value.py +0 -20
  170. mirascope/api/_generated/functions/types/functions_list_by_env_response.py +0 -25
  171. mirascope/api/_generated/functions/types/functions_list_by_env_response_functions_item.py +0 -56
  172. mirascope/api/_generated/functions/types/functions_list_by_env_response_functions_item_dependencies_value.py +0 -22
  173. mirascope/api/_generated/functions/types/functions_list_response.py +0 -21
  174. mirascope/api/_generated/functions/types/functions_list_response_functions_item.py +0 -41
  175. mirascope/api/_generated/functions/types/functions_list_response_functions_item_dependencies_value.py +0 -20
  176. mirascope/api/_generated/health/__init__.py +0 -7
  177. mirascope/api/_generated/health/client.py +0 -92
  178. mirascope/api/_generated/health/raw_client.py +0 -175
  179. mirascope/api/_generated/health/types/__init__.py +0 -8
  180. mirascope/api/_generated/health/types/health_check_response.py +0 -22
  181. mirascope/api/_generated/health/types/health_check_response_status.py +0 -5
  182. mirascope/api/_generated/organization_invitations/__init__.py +0 -33
  183. mirascope/api/_generated/organization_invitations/client.py +0 -546
  184. mirascope/api/_generated/organization_invitations/raw_client.py +0 -1519
  185. mirascope/api/_generated/organization_invitations/types/__init__.py +0 -53
  186. mirascope/api/_generated/organization_invitations/types/organization_invitations_accept_response.py +0 -34
  187. mirascope/api/_generated/organization_invitations/types/organization_invitations_accept_response_role.py +0 -7
  188. mirascope/api/_generated/organization_invitations/types/organization_invitations_create_request_role.py +0 -7
  189. mirascope/api/_generated/organization_invitations/types/organization_invitations_create_response.py +0 -48
  190. mirascope/api/_generated/organization_invitations/types/organization_invitations_create_response_role.py +0 -7
  191. mirascope/api/_generated/organization_invitations/types/organization_invitations_create_response_status.py +0 -7
  192. mirascope/api/_generated/organization_invitations/types/organization_invitations_get_response.py +0 -48
  193. mirascope/api/_generated/organization_invitations/types/organization_invitations_get_response_role.py +0 -7
  194. mirascope/api/_generated/organization_invitations/types/organization_invitations_get_response_status.py +0 -7
  195. mirascope/api/_generated/organization_invitations/types/organization_invitations_list_response_item.py +0 -48
  196. mirascope/api/_generated/organization_invitations/types/organization_invitations_list_response_item_role.py +0 -7
  197. mirascope/api/_generated/organization_invitations/types/organization_invitations_list_response_item_status.py +0 -7
  198. mirascope/api/_generated/organization_memberships/__init__.py +0 -19
  199. mirascope/api/_generated/organization_memberships/client.py +0 -302
  200. mirascope/api/_generated/organization_memberships/raw_client.py +0 -736
  201. mirascope/api/_generated/organization_memberships/types/__init__.py +0 -27
  202. mirascope/api/_generated/organization_memberships/types/organization_memberships_list_response_item.py +0 -33
  203. mirascope/api/_generated/organization_memberships/types/organization_memberships_list_response_item_role.py +0 -7
  204. mirascope/api/_generated/organization_memberships/types/organization_memberships_update_request_role.py +0 -7
  205. mirascope/api/_generated/organization_memberships/types/organization_memberships_update_response.py +0 -31
  206. mirascope/api/_generated/organization_memberships/types/organization_memberships_update_response_role.py +0 -7
  207. mirascope/api/_generated/organizations/__init__.py +0 -51
  208. mirascope/api/_generated/organizations/client.py +0 -869
  209. mirascope/api/_generated/organizations/raw_client.py +0 -2593
  210. mirascope/api/_generated/organizations/types/__init__.py +0 -71
  211. mirascope/api/_generated/organizations/types/organizations_create_payment_intent_response.py +0 -24
  212. mirascope/api/_generated/organizations/types/organizations_create_response.py +0 -26
  213. mirascope/api/_generated/organizations/types/organizations_create_response_role.py +0 -5
  214. mirascope/api/_generated/organizations/types/organizations_get_response.py +0 -26
  215. mirascope/api/_generated/organizations/types/organizations_get_response_role.py +0 -5
  216. mirascope/api/_generated/organizations/types/organizations_list_response_item.py +0 -26
  217. mirascope/api/_generated/organizations/types/organizations_list_response_item_role.py +0 -5
  218. mirascope/api/_generated/organizations/types/organizations_preview_subscription_change_request_target_plan.py +0 -7
  219. mirascope/api/_generated/organizations/types/organizations_preview_subscription_change_response.py +0 -47
  220. mirascope/api/_generated/organizations/types/organizations_preview_subscription_change_response_validation_errors_item.py +0 -33
  221. mirascope/api/_generated/organizations/types/organizations_preview_subscription_change_response_validation_errors_item_resource.py +0 -7
  222. mirascope/api/_generated/organizations/types/organizations_router_balance_response.py +0 -24
  223. mirascope/api/_generated/organizations/types/organizations_subscription_response.py +0 -53
  224. mirascope/api/_generated/organizations/types/organizations_subscription_response_current_plan.py +0 -7
  225. mirascope/api/_generated/organizations/types/organizations_subscription_response_payment_method.py +0 -26
  226. mirascope/api/_generated/organizations/types/organizations_subscription_response_scheduled_change.py +0 -34
  227. mirascope/api/_generated/organizations/types/organizations_subscription_response_scheduled_change_target_plan.py +0 -7
  228. mirascope/api/_generated/organizations/types/organizations_update_response.py +0 -26
  229. mirascope/api/_generated/organizations/types/organizations_update_response_role.py +0 -5
  230. mirascope/api/_generated/organizations/types/organizations_update_subscription_request_target_plan.py +0 -7
  231. mirascope/api/_generated/organizations/types/organizations_update_subscription_response.py +0 -35
  232. mirascope/api/_generated/project_memberships/__init__.py +0 -25
  233. mirascope/api/_generated/project_memberships/client.py +0 -437
  234. mirascope/api/_generated/project_memberships/raw_client.py +0 -1039
  235. mirascope/api/_generated/project_memberships/types/__init__.py +0 -29
  236. mirascope/api/_generated/project_memberships/types/project_memberships_create_request_role.py +0 -7
  237. mirascope/api/_generated/project_memberships/types/project_memberships_create_response.py +0 -35
  238. mirascope/api/_generated/project_memberships/types/project_memberships_create_response_role.py +0 -7
  239. mirascope/api/_generated/project_memberships/types/project_memberships_list_response_item.py +0 -33
  240. mirascope/api/_generated/project_memberships/types/project_memberships_list_response_item_role.py +0 -7
  241. mirascope/api/_generated/project_memberships/types/project_memberships_update_request_role.py +0 -7
  242. mirascope/api/_generated/project_memberships/types/project_memberships_update_response.py +0 -35
  243. mirascope/api/_generated/project_memberships/types/project_memberships_update_response_role.py +0 -7
  244. mirascope/api/_generated/projects/__init__.py +0 -7
  245. mirascope/api/_generated/projects/client.py +0 -428
  246. mirascope/api/_generated/projects/raw_client.py +0 -1302
  247. mirascope/api/_generated/projects/types/__init__.py +0 -10
  248. mirascope/api/_generated/projects/types/projects_create_response.py +0 -25
  249. mirascope/api/_generated/projects/types/projects_get_response.py +0 -25
  250. mirascope/api/_generated/projects/types/projects_list_response_item.py +0 -25
  251. mirascope/api/_generated/projects/types/projects_update_response.py +0 -25
  252. mirascope/api/_generated/reference.md +0 -4915
  253. mirascope/api/_generated/tags/__init__.py +0 -19
  254. mirascope/api/_generated/tags/client.py +0 -504
  255. mirascope/api/_generated/tags/raw_client.py +0 -1288
  256. mirascope/api/_generated/tags/types/__init__.py +0 -17
  257. mirascope/api/_generated/tags/types/tags_create_response.py +0 -41
  258. mirascope/api/_generated/tags/types/tags_get_response.py +0 -41
  259. mirascope/api/_generated/tags/types/tags_list_response.py +0 -23
  260. mirascope/api/_generated/tags/types/tags_list_response_tags_item.py +0 -41
  261. mirascope/api/_generated/tags/types/tags_update_response.py +0 -41
  262. mirascope/api/_generated/token_cost/__init__.py +0 -7
  263. mirascope/api/_generated/token_cost/client.py +0 -160
  264. mirascope/api/_generated/token_cost/raw_client.py +0 -264
  265. mirascope/api/_generated/token_cost/types/__init__.py +0 -8
  266. mirascope/api/_generated/token_cost/types/token_cost_calculate_request_usage.py +0 -54
  267. mirascope/api/_generated/token_cost/types/token_cost_calculate_response.py +0 -52
  268. mirascope/api/_generated/traces/__init__.py +0 -97
  269. mirascope/api/_generated/traces/client.py +0 -1103
  270. mirascope/api/_generated/traces/raw_client.py +0 -2322
  271. mirascope/api/_generated/traces/types/__init__.py +0 -155
  272. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item.py +0 -29
  273. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_resource.py +0 -27
  274. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_resource_attributes_item.py +0 -23
  275. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_resource_attributes_item_value.py +0 -38
  276. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_resource_attributes_item_value_array_value.py +0 -19
  277. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_resource_attributes_item_value_kvlist_value.py +0 -22
  278. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_resource_attributes_item_value_kvlist_value_values_item.py +0 -20
  279. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item.py +0 -29
  280. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_scope.py +0 -31
  281. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_scope_attributes_item.py +0 -23
  282. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_scope_attributes_item_value.py +0 -38
  283. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_scope_attributes_item_value_array_value.py +0 -19
  284. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_scope_attributes_item_value_kvlist_value.py +0 -22
  285. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_scope_attributes_item_value_kvlist_value_values_item.py +0 -22
  286. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item.py +0 -48
  287. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item_attributes_item.py +0 -23
  288. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item_attributes_item_value.py +0 -38
  289. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item_attributes_item_value_array_value.py +0 -19
  290. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item_attributes_item_value_kvlist_value.py +0 -24
  291. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item_attributes_item_value_kvlist_value_values_item.py +0 -22
  292. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item_status.py +0 -20
  293. mirascope/api/_generated/traces/types/traces_create_response.py +0 -24
  294. mirascope/api/_generated/traces/types/traces_create_response_partial_success.py +0 -22
  295. mirascope/api/_generated/traces/types/traces_get_analytics_summary_response.py +0 -60
  296. mirascope/api/_generated/traces/types/traces_get_analytics_summary_response_top_functions_item.py +0 -24
  297. mirascope/api/_generated/traces/types/traces_get_analytics_summary_response_top_models_item.py +0 -22
  298. mirascope/api/_generated/traces/types/traces_get_trace_detail_by_env_response.py +0 -33
  299. mirascope/api/_generated/traces/types/traces_get_trace_detail_by_env_response_spans_item.py +0 -88
  300. mirascope/api/_generated/traces/types/traces_get_trace_detail_response.py +0 -33
  301. mirascope/api/_generated/traces/types/traces_get_trace_detail_response_spans_item.py +0 -88
  302. mirascope/api/_generated/traces/types/traces_list_by_function_hash_response.py +0 -25
  303. mirascope/api/_generated/traces/types/traces_list_by_function_hash_response_traces_item.py +0 -44
  304. mirascope/api/_generated/traces/types/traces_search_by_env_request_attribute_filters_item.py +0 -26
  305. mirascope/api/_generated/traces/types/traces_search_by_env_request_attribute_filters_item_operator.py +0 -7
  306. mirascope/api/_generated/traces/types/traces_search_by_env_request_sort_by.py +0 -7
  307. mirascope/api/_generated/traces/types/traces_search_by_env_request_sort_order.py +0 -7
  308. mirascope/api/_generated/traces/types/traces_search_by_env_response.py +0 -26
  309. mirascope/api/_generated/traces/types/traces_search_by_env_response_spans_item.py +0 -50
  310. mirascope/api/_generated/traces/types/traces_search_request_attribute_filters_item.py +0 -26
  311. mirascope/api/_generated/traces/types/traces_search_request_attribute_filters_item_operator.py +0 -7
  312. mirascope/api/_generated/traces/types/traces_search_request_sort_by.py +0 -7
  313. mirascope/api/_generated/traces/types/traces_search_request_sort_order.py +0 -5
  314. mirascope/api/_generated/traces/types/traces_search_response.py +0 -26
  315. mirascope/api/_generated/traces/types/traces_search_response_spans_item.py +0 -50
  316. mirascope/api/_generated/types/__init__.py +0 -85
  317. mirascope/api/_generated/types/already_exists_error.py +0 -22
  318. mirascope/api/_generated/types/already_exists_error_tag.py +0 -5
  319. mirascope/api/_generated/types/bad_request_error_body.py +0 -50
  320. mirascope/api/_generated/types/click_house_error.py +0 -22
  321. mirascope/api/_generated/types/database_error.py +0 -22
  322. mirascope/api/_generated/types/database_error_tag.py +0 -5
  323. mirascope/api/_generated/types/date.py +0 -3
  324. mirascope/api/_generated/types/http_api_decode_error.py +0 -27
  325. mirascope/api/_generated/types/http_api_decode_error_tag.py +0 -5
  326. mirascope/api/_generated/types/immutable_resource_error.py +0 -22
  327. mirascope/api/_generated/types/internal_server_error_body.py +0 -49
  328. mirascope/api/_generated/types/issue.py +0 -38
  329. mirascope/api/_generated/types/issue_tag.py +0 -10
  330. mirascope/api/_generated/types/not_found_error_body.py +0 -22
  331. mirascope/api/_generated/types/not_found_error_tag.py +0 -5
  332. mirascope/api/_generated/types/number_from_string.py +0 -3
  333. mirascope/api/_generated/types/permission_denied_error.py +0 -22
  334. mirascope/api/_generated/types/permission_denied_error_tag.py +0 -5
  335. mirascope/api/_generated/types/plan_limit_exceeded_error.py +0 -32
  336. mirascope/api/_generated/types/plan_limit_exceeded_error_tag.py +0 -7
  337. mirascope/api/_generated/types/pricing_unavailable_error.py +0 -23
  338. mirascope/api/_generated/types/property_key.py +0 -7
  339. mirascope/api/_generated/types/property_key_key.py +0 -25
  340. mirascope/api/_generated/types/property_key_key_tag.py +0 -5
  341. mirascope/api/_generated/types/rate_limit_error.py +0 -31
  342. mirascope/api/_generated/types/rate_limit_error_tag.py +0 -5
  343. mirascope/api/_generated/types/service_unavailable_error_body.py +0 -24
  344. mirascope/api/_generated/types/service_unavailable_error_tag.py +0 -7
  345. mirascope/api/_generated/types/stripe_error.py +0 -20
  346. mirascope/api/_generated/types/subscription_past_due_error.py +0 -31
  347. mirascope/api/_generated/types/subscription_past_due_error_tag.py +0 -7
  348. mirascope/api/_generated/types/unauthorized_error_body.py +0 -21
  349. mirascope/api/_generated/types/unauthorized_error_tag.py +0 -5
  350. mirascope/api/client.py +0 -255
  351. mirascope/api/settings.py +0 -99
  352. mirascope/llm/formatting/output_parser.py +0 -178
  353. mirascope/llm/formatting/primitives.py +0 -192
  354. mirascope/llm/mcp/mcp_client.py +0 -130
  355. mirascope/llm/messages/_utils.py +0 -34
  356. mirascope/llm/models/thinking_config.py +0 -61
  357. mirascope/llm/prompts/prompts.py +0 -487
  358. mirascope/llm/providers/__init__.py +0 -62
  359. mirascope/llm/providers/anthropic/__init__.py +0 -11
  360. mirascope/llm/providers/anthropic/_utils/__init__.py +0 -27
  361. mirascope/llm/providers/anthropic/_utils/beta_decode.py +0 -282
  362. mirascope/llm/providers/anthropic/_utils/beta_encode.py +0 -266
  363. mirascope/llm/providers/anthropic/_utils/encode.py +0 -418
  364. mirascope/llm/providers/anthropic/_utils/errors.py +0 -46
  365. mirascope/llm/providers/anthropic/beta_provider.py +0 -374
  366. mirascope/llm/providers/anthropic/model_id.py +0 -23
  367. mirascope/llm/providers/anthropic/model_info.py +0 -87
  368. mirascope/llm/providers/anthropic/provider.py +0 -479
  369. mirascope/llm/providers/google/__init__.py +0 -6
  370. mirascope/llm/providers/google/_utils/errors.py +0 -50
  371. mirascope/llm/providers/google/model_id.py +0 -22
  372. mirascope/llm/providers/google/model_info.py +0 -63
  373. mirascope/llm/providers/google/provider.py +0 -492
  374. mirascope/llm/providers/mirascope/__init__.py +0 -5
  375. mirascope/llm/providers/mirascope/_utils.py +0 -73
  376. mirascope/llm/providers/mirascope/provider.py +0 -349
  377. mirascope/llm/providers/mlx/__init__.py +0 -9
  378. mirascope/llm/providers/mlx/_utils.py +0 -141
  379. mirascope/llm/providers/mlx/encoding/__init__.py +0 -8
  380. mirascope/llm/providers/mlx/encoding/base.py +0 -72
  381. mirascope/llm/providers/mlx/encoding/transformers.py +0 -150
  382. mirascope/llm/providers/mlx/mlx.py +0 -254
  383. mirascope/llm/providers/mlx/model_id.py +0 -17
  384. mirascope/llm/providers/mlx/provider.py +0 -452
  385. mirascope/llm/providers/model_id.py +0 -16
  386. mirascope/llm/providers/ollama/__init__.py +0 -7
  387. mirascope/llm/providers/ollama/provider.py +0 -71
  388. mirascope/llm/providers/openai/__init__.py +0 -15
  389. mirascope/llm/providers/openai/_utils/__init__.py +0 -5
  390. mirascope/llm/providers/openai/_utils/errors.py +0 -46
  391. mirascope/llm/providers/openai/completions/__init__.py +0 -7
  392. mirascope/llm/providers/openai/completions/base_provider.py +0 -542
  393. mirascope/llm/providers/openai/completions/provider.py +0 -22
  394. mirascope/llm/providers/openai/model_id.py +0 -31
  395. mirascope/llm/providers/openai/model_info.py +0 -303
  396. mirascope/llm/providers/openai/provider.py +0 -441
  397. mirascope/llm/providers/openai/responses/__init__.py +0 -5
  398. mirascope/llm/providers/openai/responses/provider.py +0 -513
  399. mirascope/llm/providers/provider_id.py +0 -24
  400. mirascope/llm/providers/provider_registry.py +0 -299
  401. mirascope/llm/providers/together/__init__.py +0 -7
  402. mirascope/llm/providers/together/provider.py +0 -40
  403. mirascope/llm/responses/usage.py +0 -95
  404. mirascope/ops/__init__.py +0 -111
  405. mirascope/ops/_internal/__init__.py +0 -5
  406. mirascope/ops/_internal/closure.py +0 -1169
  407. mirascope/ops/_internal/configuration.py +0 -177
  408. mirascope/ops/_internal/context.py +0 -76
  409. mirascope/ops/_internal/exporters/__init__.py +0 -26
  410. mirascope/ops/_internal/exporters/exporters.py +0 -395
  411. mirascope/ops/_internal/exporters/processors.py +0 -104
  412. mirascope/ops/_internal/exporters/types.py +0 -165
  413. mirascope/ops/_internal/exporters/utils.py +0 -29
  414. mirascope/ops/_internal/instrumentation/__init__.py +0 -8
  415. mirascope/ops/_internal/instrumentation/llm/__init__.py +0 -8
  416. mirascope/ops/_internal/instrumentation/llm/common.py +0 -530
  417. mirascope/ops/_internal/instrumentation/llm/cost.py +0 -190
  418. mirascope/ops/_internal/instrumentation/llm/encode.py +0 -238
  419. mirascope/ops/_internal/instrumentation/llm/gen_ai_types/__init__.py +0 -38
  420. mirascope/ops/_internal/instrumentation/llm/gen_ai_types/gen_ai_input_messages.py +0 -31
  421. mirascope/ops/_internal/instrumentation/llm/gen_ai_types/gen_ai_output_messages.py +0 -38
  422. mirascope/ops/_internal/instrumentation/llm/gen_ai_types/gen_ai_system_instructions.py +0 -18
  423. mirascope/ops/_internal/instrumentation/llm/gen_ai_types/shared.py +0 -100
  424. mirascope/ops/_internal/instrumentation/llm/llm.py +0 -161
  425. mirascope/ops/_internal/instrumentation/llm/model.py +0 -1798
  426. mirascope/ops/_internal/instrumentation/llm/response.py +0 -521
  427. mirascope/ops/_internal/instrumentation/llm/serialize.py +0 -300
  428. mirascope/ops/_internal/propagation.py +0 -198
  429. mirascope/ops/_internal/protocols.py +0 -133
  430. mirascope/ops/_internal/session.py +0 -139
  431. mirascope/ops/_internal/spans.py +0 -232
  432. mirascope/ops/_internal/traced_calls.py +0 -375
  433. mirascope/ops/_internal/traced_functions.py +0 -523
  434. mirascope/ops/_internal/tracing.py +0 -353
  435. mirascope/ops/_internal/types.py +0 -13
  436. mirascope/ops/_internal/utils.py +0 -123
  437. mirascope/ops/_internal/versioned_calls.py +0 -512
  438. mirascope/ops/_internal/versioned_functions.py +0 -357
  439. mirascope/ops/_internal/versioning.py +0 -303
  440. mirascope/ops/exceptions.py +0 -21
  441. mirascope-2.0.0.dist-info/RECORD +0 -423
  442. /mirascope/llm/{providers → clients}/base/kwargs.py +0 -0
  443. /mirascope/llm/{providers → clients}/google/message.py +0 -0
@@ -1,11 +1,10 @@
1
1
  """The Call module for generating responses using LLMs."""
2
2
 
3
3
  from dataclasses import dataclass
4
- from typing import Generic, TypeVar, overload
4
+ from typing import Generic, overload
5
5
 
6
6
  from ..context import Context, DepsT
7
7
  from ..formatting import FormattableT
8
- from ..models import Model, use_model
9
8
  from ..prompts import (
10
9
  AsyncContextPrompt,
11
10
  AsyncPrompt,
@@ -22,39 +21,19 @@ from ..responses import (
22
21
  Response,
23
22
  StreamResponse,
24
23
  )
24
+ from ..tools import (
25
+ AsyncContextToolkit,
26
+ AsyncToolkit,
27
+ ContextToolkit,
28
+ Toolkit,
29
+ )
25
30
  from ..types import P
26
-
27
- CallT = TypeVar("CallT", bound="BaseCall")
28
-
29
-
30
- @dataclass
31
- class BaseCall:
32
- """Base class for all Call types with shared model functionality."""
33
-
34
- default_model: Model
35
- """The default model that will be used if no model is set in context."""
36
-
37
- @property
38
- def model(self) -> Model:
39
- """The model used for generating responses. May be overwritten via `with llm.model(...)`."""
40
- return use_model(self.default_model)
31
+ from .base_call import BaseCall
41
32
 
42
33
 
43
34
  @dataclass
44
- class Call(BaseCall, Generic[P, FormattableT]):
45
- """A call that directly generates LLM responses without requiring a model argument.
46
-
47
- Created by decorating a `MessageTemplate` with `llm.call`. The decorated function
48
- becomes directly callable to generate responses, with the `Model` bundled in.
49
-
50
- A `Call` is essentially: `MessageTemplate` + tools + format + `Model`.
51
- It can be invoked directly: `call(*args, **kwargs)` (no model argument needed).
52
-
53
- The model can be overridden at runtime using `with llm.model(...)` context manager.
54
- """
55
-
56
- prompt: Prompt[P, FormattableT]
57
- """The underlying Prompt instance that generates messages with tools and format."""
35
+ class Call(BaseCall[P, Prompt, Toolkit, FormattableT], Generic[P, FormattableT]):
36
+ """A class for generating responses using LLMs."""
58
37
 
59
38
  @overload
60
39
  def __call__(
@@ -84,7 +63,10 @@ class Call(BaseCall, Generic[P, FormattableT]):
84
63
  self, *args: P.args, **kwargs: P.kwargs
85
64
  ) -> Response | Response[FormattableT]:
86
65
  """Generates a response using the LLM."""
87
- return self.prompt.call(self.model, *args, **kwargs)
66
+ messages = self.fn(*args, **kwargs)
67
+ return self.model.call(
68
+ messages=messages, tools=self.toolkit, format=self.format
69
+ )
88
70
 
89
71
  @overload
90
72
  def stream(
@@ -100,24 +82,18 @@ class Call(BaseCall, Generic[P, FormattableT]):
100
82
  self, *args: P.args, **kwargs: P.kwargs
101
83
  ) -> StreamResponse | StreamResponse[FormattableT]:
102
84
  """Generates a streaming response using the LLM."""
103
- return self.prompt.stream(self.model, *args, **kwargs)
85
+ messages = self.fn(*args, **kwargs)
86
+ return self.model.stream(
87
+ messages=messages, tools=self.toolkit, format=self.format
88
+ )
104
89
 
105
90
 
106
91
  @dataclass
107
- class AsyncCall(BaseCall, Generic[P, FormattableT]):
108
- """An async call that directly generates LLM responses without requiring a model argument.
109
-
110
- Created by decorating an async `MessageTemplate` with `llm.call`. The decorated async
111
- function becomes directly callable to generate responses asynchronously, with the `Model` bundled in.
112
-
113
- An `AsyncCall` is essentially: async `MessageTemplate` + tools + format + `Model`.
114
- It can be invoked directly: `await call(*args, **kwargs)` (no model argument needed).
115
-
116
- The model can be overridden at runtime using `with llm.model(...)` context manager.
117
- """
118
-
119
- prompt: AsyncPrompt[P, FormattableT]
120
- """The underlying AsyncPrompt instance that generates messages with tools and format."""
92
+ class AsyncCall(
93
+ BaseCall[P, AsyncPrompt, AsyncToolkit, FormattableT],
94
+ Generic[P, FormattableT],
95
+ ):
96
+ """A class for generating responses using LLMs asynchronously."""
121
97
 
122
98
  @overload
123
99
  async def __call__(
@@ -132,7 +108,7 @@ class AsyncCall(BaseCall, Generic[P, FormattableT]):
132
108
  async def __call__(
133
109
  self, *args: P.args, **kwargs: P.kwargs
134
110
  ) -> AsyncResponse | AsyncResponse[FormattableT]:
135
- """Generates a response using the LLM asynchronously."""
111
+ """Generates a Asyncresponse using the LLM asynchronously."""
136
112
  return await self.call(*args, **kwargs)
137
113
 
138
114
  @overload
@@ -149,7 +125,10 @@ class AsyncCall(BaseCall, Generic[P, FormattableT]):
149
125
  self, *args: P.args, **kwargs: P.kwargs
150
126
  ) -> AsyncResponse | AsyncResponse[FormattableT]:
151
127
  """Generates a response using the LLM asynchronously."""
152
- return await self.prompt.call(self.model, *args, **kwargs)
128
+ messages = await self.fn(*args, **kwargs)
129
+ return await self.model.call_async(
130
+ messages=messages, tools=self.toolkit, format=self.format
131
+ )
153
132
 
154
133
  @overload
155
134
  async def stream(
@@ -165,25 +144,18 @@ class AsyncCall(BaseCall, Generic[P, FormattableT]):
165
144
  self, *args: P.args, **kwargs: P.kwargs
166
145
  ) -> AsyncStreamResponse[FormattableT] | AsyncStreamResponse:
167
146
  """Generates a streaming response using the LLM asynchronously."""
168
- return await self.prompt.stream(self.model, *args, **kwargs)
147
+ messages = await self.fn(*args, **kwargs)
148
+ return await self.model.stream_async(
149
+ messages=messages, tools=self.toolkit, format=self.format
150
+ )
169
151
 
170
152
 
171
153
  @dataclass
172
- class ContextCall(BaseCall, Generic[P, DepsT, FormattableT]):
173
- """A context-aware call that directly generates LLM responses without requiring a model argument.
174
-
175
- Created by decorating a `ContextMessageTemplate` with `llm.call`. The decorated function
176
- (with first parameter `'ctx'` of type `Context[DepsT]`) becomes directly callable to generate
177
- responses with context dependencies, with the `Model` bundled in.
178
-
179
- A `ContextCall` is essentially: `ContextMessageTemplate` + tools + format + `Model`.
180
- It can be invoked directly: `call(ctx, *args, **kwargs)` (no model argument needed).
181
-
182
- The model can be overridden at runtime using `with llm.model(...)` context manager.
183
- """
184
-
185
- prompt: ContextPrompt[P, DepsT, FormattableT]
186
- """The underlying ContextPrompt instance that generates messages with tools and format."""
154
+ class ContextCall(
155
+ BaseCall[P, ContextPrompt, ContextToolkit[DepsT], FormattableT],
156
+ Generic[P, DepsT, FormattableT],
157
+ ):
158
+ """A class for generating responses using LLMs."""
187
159
 
188
160
  @overload
189
161
  def __call__(
@@ -227,7 +199,10 @@ class ContextCall(BaseCall, Generic[P, DepsT, FormattableT]):
227
199
  self, ctx: Context[DepsT], *args: P.args, **kwargs: P.kwargs
228
200
  ) -> ContextResponse[DepsT, None] | ContextResponse[DepsT, FormattableT]:
229
201
  """Generates a response using the LLM."""
230
- return self.prompt.call(self.model, ctx, *args, **kwargs)
202
+ messages = self.fn(ctx, *args, **kwargs)
203
+ return self.model.context_call(
204
+ ctx=ctx, messages=messages, tools=self.toolkit, format=self.format
205
+ )
231
206
 
232
207
  @overload
233
208
  def stream(
@@ -251,25 +226,18 @@ class ContextCall(BaseCall, Generic[P, DepsT, FormattableT]):
251
226
  ContextStreamResponse[DepsT, None] | ContextStreamResponse[DepsT, FormattableT]
252
227
  ):
253
228
  """Generates a streaming response using the LLM."""
254
- return self.prompt.stream(self.model, ctx, *args, **kwargs)
229
+ messages = self.fn(ctx, *args, **kwargs)
230
+ return self.model.context_stream(
231
+ ctx=ctx, messages=messages, tools=self.toolkit, format=self.format
232
+ )
255
233
 
256
234
 
257
235
  @dataclass
258
- class AsyncContextCall(BaseCall, Generic[P, DepsT, FormattableT]):
259
- """An async context-aware call that directly generates LLM responses without requiring a model argument.
260
-
261
- Created by decorating an async `ContextMessageTemplate` with `llm.call`. The decorated async
262
- function (with first parameter `'ctx'` of type `Context[DepsT]`) becomes directly callable to generate
263
- responses asynchronously with context dependencies, with the `Model` bundled in.
264
-
265
- An `AsyncContextCall` is essentially: async `ContextMessageTemplate` + tools + format + `Model`.
266
- It can be invoked directly: `await call(ctx, *args, **kwargs)` (no model argument needed).
267
-
268
- The model can be overridden at runtime using `with llm.model(...)` context manager.
269
- """
270
-
271
- prompt: AsyncContextPrompt[P, DepsT, FormattableT]
272
- """The underlying AsyncContextPrompt instance that generates messages with tools and format."""
236
+ class AsyncContextCall(
237
+ BaseCall[P, AsyncContextPrompt, AsyncContextToolkit[DepsT], FormattableT],
238
+ Generic[P, DepsT, FormattableT],
239
+ ):
240
+ """A class for generating responses using LLMs asynchronously."""
273
241
 
274
242
  @overload
275
243
  async def __call__(
@@ -313,7 +281,10 @@ class AsyncContextCall(BaseCall, Generic[P, DepsT, FormattableT]):
313
281
  self, ctx: Context[DepsT], *args: P.args, **kwargs: P.kwargs
314
282
  ) -> AsyncContextResponse[DepsT, None] | AsyncContextResponse[DepsT, FormattableT]:
315
283
  """Generates a response using the LLM asynchronously."""
316
- return await self.prompt.call(self.model, ctx, *args, **kwargs)
284
+ messages = await self.fn(ctx, *args, **kwargs)
285
+ return await self.model.context_call_async(
286
+ ctx=ctx, messages=messages, tools=self.toolkit, format=self.format
287
+ )
317
288
 
318
289
  @overload
319
290
  async def stream(
@@ -338,4 +309,7 @@ class AsyncContextCall(BaseCall, Generic[P, DepsT, FormattableT]):
338
309
  | AsyncContextStreamResponse[DepsT, FormattableT]
339
310
  ):
340
311
  """Generates a streaming response using the LLM asynchronously."""
341
- return await self.prompt.stream(self.model, ctx, *args, **kwargs)
312
+ messages = await self.fn(ctx, *args, **kwargs)
313
+ return await self.model.context_stream_async(
314
+ ctx=ctx, messages=messages, tools=self.toolkit, format=self.format
315
+ )
@@ -4,24 +4,29 @@ from __future__ import annotations
4
4
 
5
5
  from collections.abc import Sequence
6
6
  from dataclasses import dataclass
7
- from typing import TYPE_CHECKING, Generic, cast, overload
7
+ from typing import Generic, Literal, cast, overload
8
8
  from typing_extensions import Unpack
9
9
 
10
+ from ..clients import (
11
+ AnthropicModelId,
12
+ GoogleModelId,
13
+ ModelId,
14
+ OpenAICompletionsModelId,
15
+ OpenAIResponsesModelId,
16
+ Params,
17
+ Provider,
18
+ )
10
19
  from ..context import DepsT
11
- from ..formatting import Format, FormattableT, OutputParser
20
+ from ..formatting import Format, FormattableT
12
21
  from ..models import Model
13
22
  from ..prompts import (
14
- AsyncContextMessageTemplate,
15
- AsyncContextPrompt,
16
- AsyncMessageTemplate,
17
- AsyncPrompt,
18
- ContextMessageTemplate,
19
- ContextPrompt,
20
- MessageTemplate,
21
- Prompt,
22
- _utils,
23
+ AsyncContextPromptable,
24
+ AsyncPromptable,
25
+ ContextPromptable,
26
+ Promptable,
27
+ _utils as _prompt_utils,
28
+ prompt,
23
29
  )
24
- from ..providers import ModelId
25
30
  from ..tools import (
26
31
  AsyncContextTool,
27
32
  AsyncContextToolkit,
@@ -36,40 +41,19 @@ from ..tools import (
36
41
  from ..types import P
37
42
  from .calls import AsyncCall, AsyncContextCall, Call, ContextCall
38
43
 
39
- if TYPE_CHECKING:
40
- from ..models import Params
41
-
42
44
 
43
45
  @dataclass(kw_only=True)
44
46
  class CallDecorator(Generic[ToolT, FormattableT]):
45
- """Decorator for converting a `MessageTemplate` into a `Call`.
46
-
47
- Takes a raw prompt function that returns message content and wraps it with tools,
48
- format, and a model to create a `Call` that can be invoked directly without needing
49
- to pass a model argument.
50
-
51
- The decorator automatically detects whether the function is async or context-aware
52
- and creates the appropriate `Call` variant (`Call`, `AsyncCall`, `ContextCall`, or `AsyncContextCall`).
53
-
54
- Conceptually: `CallDecorator` = `PromptDecorator` + `Model`
55
- Result: `Call` = `MessageTemplate` + tools + format + `Model`
56
- """
47
+ """A decorator for converting prompts to calls."""
57
48
 
58
49
  model: Model
59
- """The default model to use with this call. May be overridden."""
60
-
61
50
  tools: Sequence[ToolT] | None
62
- """The tools that are included in the prompt, if any."""
63
-
64
- format: (
65
- type[FormattableT] | Format[FormattableT] | OutputParser[FormattableT] | None
66
- )
67
- """The structured output format off the prompt, if any."""
51
+ format: type[FormattableT] | Format[FormattableT] | None
68
52
 
69
53
  @overload
70
54
  def __call__(
71
55
  self: CallDecorator[AsyncTool | AsyncContextTool[DepsT], FormattableT],
72
- fn: AsyncContextMessageTemplate[P, DepsT],
56
+ fn: AsyncContextPromptable[P, DepsT],
73
57
  ) -> AsyncContextCall[P, DepsT, FormattableT]:
74
58
  """Decorate an async context prompt into an AsyncContextCall."""
75
59
  ...
@@ -77,31 +61,31 @@ class CallDecorator(Generic[ToolT, FormattableT]):
77
61
  @overload
78
62
  def __call__(
79
63
  self: CallDecorator[Tool | ContextTool[DepsT], FormattableT],
80
- fn: ContextMessageTemplate[P, DepsT],
64
+ fn: ContextPromptable[P, DepsT],
81
65
  ) -> ContextCall[P, DepsT, FormattableT]:
82
66
  """Decorate a context prompt into a ContextCall."""
83
67
  ...
84
68
 
85
69
  @overload
86
70
  def __call__(
87
- self: CallDecorator[AsyncTool, FormattableT], fn: AsyncMessageTemplate[P]
71
+ self: CallDecorator[AsyncTool, FormattableT], fn: AsyncPromptable[P]
88
72
  ) -> AsyncCall[P, FormattableT]:
89
73
  """Decorate an async prompt into an AsyncCall."""
90
74
  ...
91
75
 
92
76
  @overload
93
77
  def __call__(
94
- self: CallDecorator[Tool, FormattableT], fn: MessageTemplate[P]
78
+ self: CallDecorator[Tool, FormattableT], fn: Promptable[P]
95
79
  ) -> Call[P, FormattableT]:
96
80
  """Decorate a prompt into a Call."""
97
81
  ...
98
82
 
99
83
  def __call__(
100
84
  self,
101
- fn: ContextMessageTemplate[P, DepsT]
102
- | AsyncContextMessageTemplate[P, DepsT]
103
- | MessageTemplate[P]
104
- | AsyncMessageTemplate[P],
85
+ fn: ContextPromptable[P, DepsT]
86
+ | AsyncContextPromptable[P, DepsT]
87
+ | Promptable[P]
88
+ | AsyncPromptable[P],
105
89
  ) -> (
106
90
  ContextCall[P, DepsT, FormattableT]
107
91
  | AsyncContextCall[P, DepsT, FormattableT]
@@ -109,131 +93,123 @@ class CallDecorator(Generic[ToolT, FormattableT]):
109
93
  | AsyncCall[P, FormattableT]
110
94
  ):
111
95
  """Decorates a prompt into a Call or ContextCall."""
112
- is_context = _utils.is_context_promptable(fn)
113
- is_async = _utils.is_async_promptable(fn)
96
+ is_context = _prompt_utils.is_context_promptable(fn)
97
+ is_async = _prompt_utils.is_async_promptable(fn)
114
98
 
115
99
  if is_context and is_async:
116
100
  tools = cast(
117
101
  Sequence[AsyncTool | AsyncContextTool[DepsT]] | None, self.tools
118
102
  )
119
- prompt = AsyncContextPrompt(
120
- fn=fn,
121
- toolkit=AsyncContextToolkit(tools=tools),
122
- format=self.format,
123
- )
124
103
  return AsyncContextCall(
125
- prompt=prompt,
104
+ fn=prompt(fn),
126
105
  default_model=self.model,
106
+ format=self.format,
107
+ toolkit=AsyncContextToolkit(tools=tools),
127
108
  )
128
109
  elif is_context:
129
110
  tools = cast(Sequence[Tool | ContextTool[DepsT]] | None, self.tools)
130
- prompt = ContextPrompt(
131
- fn=fn,
132
- toolkit=ContextToolkit(tools=tools),
133
- format=self.format,
134
- )
135
111
  return ContextCall(
136
- prompt=prompt,
112
+ fn=prompt(fn),
137
113
  default_model=self.model,
114
+ format=self.format,
115
+ toolkit=ContextToolkit(tools=tools),
138
116
  )
139
117
  elif is_async:
140
118
  tools = cast(Sequence[AsyncTool] | None, self.tools)
141
- prompt = AsyncPrompt(
142
- fn=fn, toolkit=AsyncToolkit(tools=tools), format=self.format
143
- )
144
119
  return AsyncCall(
145
- prompt=prompt,
120
+ fn=prompt(fn),
146
121
  default_model=self.model,
122
+ format=self.format,
123
+ toolkit=AsyncToolkit(tools=tools),
147
124
  )
148
125
  else:
149
126
  tools = cast(Sequence[Tool] | None, self.tools)
150
- prompt = Prompt(fn=fn, toolkit=Toolkit(tools=tools), format=self.format)
151
127
  return Call(
152
- prompt=prompt,
128
+ fn=prompt(fn),
153
129
  default_model=self.model,
130
+ format=self.format,
131
+ toolkit=Toolkit(tools=tools),
154
132
  )
155
133
 
156
134
 
157
135
  @overload
158
136
  def call(
159
- model: ModelId,
160
137
  *,
161
- tools: Sequence[ToolT] | None = None,
162
- format: type[FormattableT]
163
- | Format[FormattableT]
164
- | OutputParser[FormattableT]
165
- | None = None,
138
+ provider: Literal["anthropic"],
139
+ model_id: AnthropicModelId,
140
+ tools: list[ToolT] | None = None,
141
+ format: type[FormattableT] | Format[FormattableT] | None = None,
166
142
  **params: Unpack[Params],
167
143
  ) -> CallDecorator[ToolT, FormattableT]:
168
- """Decorator for converting prompt functions into LLM calls.
144
+ """Decorate a prompt into a Call using Anthropic models."""
145
+ ...
169
146
 
170
- This overload accepts a model ID string and allows additional params.
171
- """
147
+
148
+ @overload
149
+ def call(
150
+ *,
151
+ provider: Literal["google"],
152
+ model_id: GoogleModelId,
153
+ tools: list[ToolT] | None = None,
154
+ format: type[FormattableT] | Format[FormattableT] | None = None,
155
+ **params: Unpack[Params],
156
+ ) -> CallDecorator[ToolT, FormattableT]:
157
+ """Decorate a prompt into a Call using Google models."""
172
158
  ...
173
159
 
174
160
 
175
161
  @overload
176
162
  def call(
177
- model: Model,
178
163
  *,
179
- tools: Sequence[ToolT] | None = None,
180
- format: type[FormattableT]
181
- | Format[FormattableT]
182
- | OutputParser[FormattableT]
183
- | None = None,
164
+ provider: Literal["openai:completions"],
165
+ model_id: OpenAICompletionsModelId,
166
+ tools: list[ToolT] | None = None,
167
+ format: type[FormattableT] | Format[FormattableT] | None = None,
168
+ **params: Unpack[Params],
184
169
  ) -> CallDecorator[ToolT, FormattableT]:
185
- """Decorator for converting prompt functions into LLM calls.
170
+ """Decorate a prompt into a Call using OpenAI models."""
171
+ ...
186
172
 
187
- This overload accepts a Model instance and does not allow additional params.
188
- """
173
+
174
+ @overload
175
+ def call(
176
+ *,
177
+ provider: Literal["openai:responses", "openai"],
178
+ model_id: OpenAIResponsesModelId,
179
+ tools: list[ToolT] | None = None,
180
+ format: type[FormattableT] | Format[FormattableT] | None = None,
181
+ **params: Unpack[Params],
182
+ ) -> CallDecorator[ToolT, FormattableT]:
183
+ """Decorate a prompt into a Call using OpenAI models (Responses API)."""
184
+ ...
185
+
186
+
187
+ @overload
188
+ def call(
189
+ *,
190
+ provider: Provider,
191
+ model_id: ModelId,
192
+ tools: list[ToolT] | None = None,
193
+ format: type[FormattableT] | Format[FormattableT] | None = None,
194
+ **params: Unpack[Params],
195
+ ) -> CallDecorator[ToolT, FormattableT]:
196
+ """Decorate a prompt into a Call using a generic provider and model."""
189
197
  ...
190
198
 
191
199
 
192
200
  def call(
193
- model: ModelId | Model,
194
201
  *,
195
- tools: Sequence[ToolT] | None = None,
196
- format: type[FormattableT]
197
- | Format[FormattableT]
198
- | OutputParser[FormattableT]
199
- | None = None,
202
+ provider: Provider,
203
+ model_id: ModelId,
204
+ tools: list[ToolT] | None = None,
205
+ format: type[FormattableT] | Format[FormattableT] | None = None,
200
206
  **params: Unpack[Params],
201
207
  ) -> CallDecorator[ToolT, FormattableT]:
202
- """Decorates a `MessageTemplate` to create a `Call` that can be invoked directly.
203
-
204
- The `llm.call` decorator is the most convenient way to use Mirascope. It transforms
205
- a raw prompt function (that returns message content) into a `Call` object that bundles
206
- the function with tools, format, and a model. The resulting `Call` can be invoked
207
- directly to generate LLM responses without needing to pass a model argument.
208
-
209
- The decorator automatically detects the function type:
210
- - If the first parameter is named `'ctx'` with type `llm.Context[T]` (or a subclass thereof),
211
- creates a `ContextCall`
212
- - If the function is async, creates an `AsyncCall` or `AsyncContextCall`
213
- - Otherwise, creates a regular `Call`
214
-
215
- The model specified in the decorator can be overridden at runtime using the
216
- `llm.model()` context manager. When overridden, the context model completely
217
- replaces the decorated model, including all parameters.
218
-
219
- Conceptual flow:
220
- - `MessageTemplate`: raw function returning content
221
- - `@llm.prompt`: `MessageTemplate` → `Prompt`
222
- Includes tools and format, if applicable. Can be called by providing a `Model`.
223
- - `@llm.call`: `MessageTemplate` → `Call`. Includes a model, tools, and format. The
224
- model may be created on the fly from a model identifier and optional params, or
225
- provided outright.
226
-
227
- Args:
228
- model: A model ID string (e.g., "openai/gpt-4") or a `Model` instance
229
- tools: Optional `Sequence` of tools to make available to the LLM
230
- format: Optional response format class (`BaseModel`) or Format instance
231
- **params: Additional call parameters (temperature, max_tokens, etc.)
232
- Only available when passing a model ID string
233
-
234
- Returns:
235
- A `CallDecorator` that converts prompt functions into `Call` variants
236
- (`Call`, `AsyncCall`, `ContextCall`, or `AsyncContextCall`)
208
+ """Returns a decorator for turning prompt template functions into generations.
209
+
210
+ This decorator creates a `Call` or `ContextCall` that can be used with prompt functions.
211
+ If the first parameter is typed as `llm.Context[T]`, it creates a ContextCall.
212
+ Otherwise, it creates a regular Call.
237
213
 
238
214
  Example:
239
215
 
@@ -241,12 +217,15 @@ def call(
241
217
  ```python
242
218
  from mirascope import llm
243
219
 
244
- @llm.call("openai/gpt-4")
245
- def recommend_book(genre: str):
246
- return f"Please recommend a book in {genre}."
220
+ @llm.call(
221
+ provider="openai:completions",
222
+ model_id="gpt-4o-mini",
223
+ )
224
+ def answer_question(question: str) -> str:
225
+ return f"Answer this question: {question}"
247
226
 
248
- response: llm.Response = recommend_book("fantasy")
249
- print(response.pretty())
227
+ response: llm.Response = answer_question("What is the capital of France?")
228
+ print(response)
250
229
  ```
251
230
 
252
231
  Example:
@@ -257,19 +236,20 @@ def call(
257
236
  from mirascope import llm
258
237
 
259
238
  @dataclass
260
- class User:
261
- name: str
262
- age: int
263
-
264
- @llm.call("openai/gpt-4")
265
- def recommend_book(ctx: llm.Context[User], genre: str):
266
- return f"Recommend a {genre} book for {ctx.deps.name}, age {ctx.deps.age}."
267
-
268
- ctx = llm.Context(deps=User(name="Alice", age=15))
269
- response = recommend_book(ctx, "fantasy")
270
- print(response.pretty())
239
+ class Personality:
240
+ vibe: str
241
+
242
+ @llm.call(
243
+ provider="openai:completions",
244
+ model_id="gpt-4o-mini",
245
+ )
246
+ def answer_question(ctx: llm.Context[Personality], question: str) -> str:
247
+ return f"Your vibe is {ctx.deps.vibe}. Answer this question: {question}"
248
+
249
+ ctx = llm.Context(deps=Personality(vibe="snarky"))
250
+ response = answer_question(ctx, "What is the capital of France?")
251
+ print(response)
271
252
  ```
272
253
  """
273
- if isinstance(model, str):
274
- model = Model(model, **params)
254
+ model = Model(provider=provider, model_id=model_id, **params)
275
255
  return CallDecorator(model=model, tools=tools, format=format)
@@ -0,0 +1,34 @@
1
+ """Client interfaces for LLM providers."""
2
+
3
+ from .anthropic import (
4
+ AnthropicClient,
5
+ AnthropicModelId,
6
+ )
7
+ from .base import BaseClient, ClientT, Params
8
+ from .google import GoogleClient, GoogleModelId
9
+ from .openai import (
10
+ OpenAICompletionsClient,
11
+ OpenAICompletionsModelId,
12
+ OpenAIResponsesClient,
13
+ OpenAIResponsesModelId,
14
+ )
15
+ from .providers import PROVIDERS, ModelId, Provider, client, get_client
16
+
17
+ __all__ = [
18
+ "PROVIDERS",
19
+ "AnthropicClient",
20
+ "AnthropicModelId",
21
+ "BaseClient",
22
+ "ClientT",
23
+ "GoogleClient",
24
+ "GoogleModelId",
25
+ "ModelId",
26
+ "OpenAICompletionsClient",
27
+ "OpenAICompletionsModelId",
28
+ "OpenAIResponsesClient",
29
+ "OpenAIResponsesModelId",
30
+ "Params",
31
+ "Provider",
32
+ "client",
33
+ "get_client",
34
+ ]