mirascope 2.0.0__py3-none-any.whl → 2.0.0a0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (442) hide show
  1. mirascope/__init__.py +2 -11
  2. mirascope/graphs/__init__.py +22 -0
  3. mirascope/graphs/finite_state_machine.py +625 -0
  4. mirascope/llm/__init__.py +16 -101
  5. mirascope/llm/agents/__init__.py +15 -0
  6. mirascope/llm/agents/agent.py +97 -0
  7. mirascope/llm/agents/agent_template.py +45 -0
  8. mirascope/llm/agents/decorator.py +176 -0
  9. mirascope/llm/calls/__init__.py +1 -2
  10. mirascope/llm/calls/base_call.py +33 -0
  11. mirascope/llm/calls/calls.py +58 -84
  12. mirascope/llm/calls/decorator.py +120 -140
  13. mirascope/llm/clients/__init__.py +34 -0
  14. mirascope/llm/clients/anthropic/__init__.py +11 -0
  15. mirascope/llm/{providers/openai/completions → clients/anthropic}/_utils/__init__.py +0 -2
  16. mirascope/llm/{providers → clients}/anthropic/_utils/decode.py +22 -66
  17. mirascope/llm/clients/anthropic/_utils/encode.py +243 -0
  18. mirascope/llm/clients/anthropic/clients.py +819 -0
  19. mirascope/llm/clients/anthropic/model_ids.py +8 -0
  20. mirascope/llm/{providers → clients}/base/__init__.py +5 -4
  21. mirascope/llm/{providers → clients}/base/_utils.py +17 -78
  22. mirascope/llm/{providers/base/base_provider.py → clients/base/client.py} +145 -468
  23. mirascope/llm/{models → clients/base}/params.py +37 -16
  24. mirascope/llm/clients/google/__init__.py +6 -0
  25. mirascope/llm/{providers/openai/responses → clients/google}/_utils/__init__.py +0 -2
  26. mirascope/llm/{providers → clients}/google/_utils/decode.py +22 -98
  27. mirascope/llm/{providers → clients}/google/_utils/encode.py +46 -168
  28. mirascope/llm/clients/google/clients.py +853 -0
  29. mirascope/llm/clients/google/model_ids.py +15 -0
  30. mirascope/llm/clients/openai/__init__.py +25 -0
  31. mirascope/llm/clients/openai/completions/__init__.py +9 -0
  32. mirascope/llm/{providers/google → clients/openai/completions}/_utils/__init__.py +0 -4
  33. mirascope/llm/{providers → clients}/openai/completions/_utils/decode.py +9 -74
  34. mirascope/llm/{providers → clients}/openai/completions/_utils/encode.py +52 -70
  35. mirascope/llm/clients/openai/completions/_utils/model_features.py +81 -0
  36. mirascope/llm/clients/openai/completions/clients.py +833 -0
  37. mirascope/llm/clients/openai/completions/model_ids.py +8 -0
  38. mirascope/llm/clients/openai/responses/__init__.py +9 -0
  39. mirascope/llm/clients/openai/responses/_utils/__init__.py +13 -0
  40. mirascope/llm/{providers → clients}/openai/responses/_utils/decode.py +14 -80
  41. mirascope/llm/{providers → clients}/openai/responses/_utils/encode.py +41 -92
  42. mirascope/llm/clients/openai/responses/_utils/model_features.py +87 -0
  43. mirascope/llm/clients/openai/responses/clients.py +832 -0
  44. mirascope/llm/clients/openai/responses/model_ids.py +8 -0
  45. mirascope/llm/clients/openai/shared/__init__.py +7 -0
  46. mirascope/llm/clients/openai/shared/_utils.py +55 -0
  47. mirascope/llm/clients/providers.py +175 -0
  48. mirascope/llm/content/__init__.py +2 -3
  49. mirascope/llm/content/tool_call.py +0 -6
  50. mirascope/llm/content/tool_output.py +5 -22
  51. mirascope/llm/context/_utils.py +6 -19
  52. mirascope/llm/exceptions.py +43 -298
  53. mirascope/llm/formatting/__init__.py +2 -19
  54. mirascope/llm/formatting/_utils.py +74 -0
  55. mirascope/llm/formatting/format.py +30 -219
  56. mirascope/llm/formatting/from_call_args.py +2 -2
  57. mirascope/llm/formatting/partial.py +7 -80
  58. mirascope/llm/formatting/types.py +64 -21
  59. mirascope/llm/mcp/__init__.py +2 -2
  60. mirascope/llm/mcp/client.py +118 -0
  61. mirascope/llm/messages/__init__.py +0 -3
  62. mirascope/llm/messages/message.py +5 -13
  63. mirascope/llm/models/__init__.py +2 -7
  64. mirascope/llm/models/models.py +139 -315
  65. mirascope/llm/prompts/__init__.py +12 -13
  66. mirascope/llm/prompts/_utils.py +43 -14
  67. mirascope/llm/prompts/decorator.py +204 -144
  68. mirascope/llm/prompts/protocols.py +59 -25
  69. mirascope/llm/responses/__init__.py +1 -9
  70. mirascope/llm/responses/_utils.py +12 -102
  71. mirascope/llm/responses/base_response.py +6 -18
  72. mirascope/llm/responses/base_stream_response.py +50 -173
  73. mirascope/llm/responses/finish_reason.py +0 -1
  74. mirascope/llm/responses/response.py +13 -34
  75. mirascope/llm/responses/root_response.py +29 -100
  76. mirascope/llm/responses/stream_response.py +31 -40
  77. mirascope/llm/tools/__init__.py +2 -9
  78. mirascope/llm/tools/_utils.py +3 -12
  79. mirascope/llm/tools/decorator.py +16 -25
  80. mirascope/llm/tools/protocols.py +4 -4
  81. mirascope/llm/tools/tool_schema.py +19 -87
  82. mirascope/llm/tools/toolkit.py +27 -35
  83. mirascope/llm/tools/tools.py +41 -135
  84. {mirascope-2.0.0.dist-info → mirascope-2.0.0a0.dist-info}/METADATA +9 -95
  85. mirascope-2.0.0a0.dist-info/RECORD +101 -0
  86. {mirascope-2.0.0.dist-info → mirascope-2.0.0a0.dist-info}/WHEEL +1 -1
  87. {mirascope-2.0.0.dist-info → mirascope-2.0.0a0.dist-info}/licenses/LICENSE +1 -1
  88. mirascope/_stubs.py +0 -363
  89. mirascope/api/__init__.py +0 -14
  90. mirascope/api/_generated/README.md +0 -207
  91. mirascope/api/_generated/__init__.py +0 -440
  92. mirascope/api/_generated/annotations/__init__.py +0 -33
  93. mirascope/api/_generated/annotations/client.py +0 -506
  94. mirascope/api/_generated/annotations/raw_client.py +0 -1414
  95. mirascope/api/_generated/annotations/types/__init__.py +0 -31
  96. mirascope/api/_generated/annotations/types/annotations_create_request_label.py +0 -5
  97. mirascope/api/_generated/annotations/types/annotations_create_response.py +0 -48
  98. mirascope/api/_generated/annotations/types/annotations_create_response_label.py +0 -5
  99. mirascope/api/_generated/annotations/types/annotations_get_response.py +0 -48
  100. mirascope/api/_generated/annotations/types/annotations_get_response_label.py +0 -5
  101. mirascope/api/_generated/annotations/types/annotations_list_request_label.py +0 -5
  102. mirascope/api/_generated/annotations/types/annotations_list_response.py +0 -21
  103. mirascope/api/_generated/annotations/types/annotations_list_response_annotations_item.py +0 -50
  104. mirascope/api/_generated/annotations/types/annotations_list_response_annotations_item_label.py +0 -5
  105. mirascope/api/_generated/annotations/types/annotations_update_request_label.py +0 -5
  106. mirascope/api/_generated/annotations/types/annotations_update_response.py +0 -48
  107. mirascope/api/_generated/annotations/types/annotations_update_response_label.py +0 -5
  108. mirascope/api/_generated/api_keys/__init__.py +0 -17
  109. mirascope/api/_generated/api_keys/client.py +0 -530
  110. mirascope/api/_generated/api_keys/raw_client.py +0 -1236
  111. mirascope/api/_generated/api_keys/types/__init__.py +0 -15
  112. mirascope/api/_generated/api_keys/types/api_keys_create_response.py +0 -28
  113. mirascope/api/_generated/api_keys/types/api_keys_get_response.py +0 -27
  114. mirascope/api/_generated/api_keys/types/api_keys_list_all_for_org_response_item.py +0 -40
  115. mirascope/api/_generated/api_keys/types/api_keys_list_response_item.py +0 -27
  116. mirascope/api/_generated/client.py +0 -211
  117. mirascope/api/_generated/core/__init__.py +0 -52
  118. mirascope/api/_generated/core/api_error.py +0 -23
  119. mirascope/api/_generated/core/client_wrapper.py +0 -46
  120. mirascope/api/_generated/core/datetime_utils.py +0 -28
  121. mirascope/api/_generated/core/file.py +0 -67
  122. mirascope/api/_generated/core/force_multipart.py +0 -16
  123. mirascope/api/_generated/core/http_client.py +0 -543
  124. mirascope/api/_generated/core/http_response.py +0 -55
  125. mirascope/api/_generated/core/jsonable_encoder.py +0 -100
  126. mirascope/api/_generated/core/pydantic_utilities.py +0 -255
  127. mirascope/api/_generated/core/query_encoder.py +0 -58
  128. mirascope/api/_generated/core/remove_none_from_dict.py +0 -11
  129. mirascope/api/_generated/core/request_options.py +0 -35
  130. mirascope/api/_generated/core/serialization.py +0 -276
  131. mirascope/api/_generated/docs/__init__.py +0 -4
  132. mirascope/api/_generated/docs/client.py +0 -91
  133. mirascope/api/_generated/docs/raw_client.py +0 -178
  134. mirascope/api/_generated/environment.py +0 -9
  135. mirascope/api/_generated/environments/__init__.py +0 -23
  136. mirascope/api/_generated/environments/client.py +0 -649
  137. mirascope/api/_generated/environments/raw_client.py +0 -1567
  138. mirascope/api/_generated/environments/types/__init__.py +0 -25
  139. mirascope/api/_generated/environments/types/environments_create_response.py +0 -24
  140. mirascope/api/_generated/environments/types/environments_get_analytics_response.py +0 -60
  141. mirascope/api/_generated/environments/types/environments_get_analytics_response_top_functions_item.py +0 -24
  142. mirascope/api/_generated/environments/types/environments_get_analytics_response_top_models_item.py +0 -22
  143. mirascope/api/_generated/environments/types/environments_get_response.py +0 -24
  144. mirascope/api/_generated/environments/types/environments_list_response_item.py +0 -24
  145. mirascope/api/_generated/environments/types/environments_update_response.py +0 -24
  146. mirascope/api/_generated/errors/__init__.py +0 -25
  147. mirascope/api/_generated/errors/bad_request_error.py +0 -14
  148. mirascope/api/_generated/errors/conflict_error.py +0 -14
  149. mirascope/api/_generated/errors/forbidden_error.py +0 -11
  150. mirascope/api/_generated/errors/internal_server_error.py +0 -10
  151. mirascope/api/_generated/errors/not_found_error.py +0 -11
  152. mirascope/api/_generated/errors/payment_required_error.py +0 -15
  153. mirascope/api/_generated/errors/service_unavailable_error.py +0 -14
  154. mirascope/api/_generated/errors/too_many_requests_error.py +0 -15
  155. mirascope/api/_generated/errors/unauthorized_error.py +0 -11
  156. mirascope/api/_generated/functions/__init__.py +0 -39
  157. mirascope/api/_generated/functions/client.py +0 -647
  158. mirascope/api/_generated/functions/raw_client.py +0 -1890
  159. mirascope/api/_generated/functions/types/__init__.py +0 -53
  160. mirascope/api/_generated/functions/types/functions_create_request_dependencies_value.py +0 -20
  161. mirascope/api/_generated/functions/types/functions_create_response.py +0 -37
  162. mirascope/api/_generated/functions/types/functions_create_response_dependencies_value.py +0 -20
  163. mirascope/api/_generated/functions/types/functions_find_by_hash_response.py +0 -39
  164. mirascope/api/_generated/functions/types/functions_find_by_hash_response_dependencies_value.py +0 -20
  165. mirascope/api/_generated/functions/types/functions_get_by_env_response.py +0 -53
  166. mirascope/api/_generated/functions/types/functions_get_by_env_response_dependencies_value.py +0 -22
  167. mirascope/api/_generated/functions/types/functions_get_response.py +0 -37
  168. mirascope/api/_generated/functions/types/functions_get_response_dependencies_value.py +0 -20
  169. mirascope/api/_generated/functions/types/functions_list_by_env_response.py +0 -25
  170. mirascope/api/_generated/functions/types/functions_list_by_env_response_functions_item.py +0 -56
  171. mirascope/api/_generated/functions/types/functions_list_by_env_response_functions_item_dependencies_value.py +0 -22
  172. mirascope/api/_generated/functions/types/functions_list_response.py +0 -21
  173. mirascope/api/_generated/functions/types/functions_list_response_functions_item.py +0 -41
  174. mirascope/api/_generated/functions/types/functions_list_response_functions_item_dependencies_value.py +0 -20
  175. mirascope/api/_generated/health/__init__.py +0 -7
  176. mirascope/api/_generated/health/client.py +0 -92
  177. mirascope/api/_generated/health/raw_client.py +0 -175
  178. mirascope/api/_generated/health/types/__init__.py +0 -8
  179. mirascope/api/_generated/health/types/health_check_response.py +0 -22
  180. mirascope/api/_generated/health/types/health_check_response_status.py +0 -5
  181. mirascope/api/_generated/organization_invitations/__init__.py +0 -33
  182. mirascope/api/_generated/organization_invitations/client.py +0 -546
  183. mirascope/api/_generated/organization_invitations/raw_client.py +0 -1519
  184. mirascope/api/_generated/organization_invitations/types/__init__.py +0 -53
  185. mirascope/api/_generated/organization_invitations/types/organization_invitations_accept_response.py +0 -34
  186. mirascope/api/_generated/organization_invitations/types/organization_invitations_accept_response_role.py +0 -7
  187. mirascope/api/_generated/organization_invitations/types/organization_invitations_create_request_role.py +0 -7
  188. mirascope/api/_generated/organization_invitations/types/organization_invitations_create_response.py +0 -48
  189. mirascope/api/_generated/organization_invitations/types/organization_invitations_create_response_role.py +0 -7
  190. mirascope/api/_generated/organization_invitations/types/organization_invitations_create_response_status.py +0 -7
  191. mirascope/api/_generated/organization_invitations/types/organization_invitations_get_response.py +0 -48
  192. mirascope/api/_generated/organization_invitations/types/organization_invitations_get_response_role.py +0 -7
  193. mirascope/api/_generated/organization_invitations/types/organization_invitations_get_response_status.py +0 -7
  194. mirascope/api/_generated/organization_invitations/types/organization_invitations_list_response_item.py +0 -48
  195. mirascope/api/_generated/organization_invitations/types/organization_invitations_list_response_item_role.py +0 -7
  196. mirascope/api/_generated/organization_invitations/types/organization_invitations_list_response_item_status.py +0 -7
  197. mirascope/api/_generated/organization_memberships/__init__.py +0 -19
  198. mirascope/api/_generated/organization_memberships/client.py +0 -302
  199. mirascope/api/_generated/organization_memberships/raw_client.py +0 -736
  200. mirascope/api/_generated/organization_memberships/types/__init__.py +0 -27
  201. mirascope/api/_generated/organization_memberships/types/organization_memberships_list_response_item.py +0 -33
  202. mirascope/api/_generated/organization_memberships/types/organization_memberships_list_response_item_role.py +0 -7
  203. mirascope/api/_generated/organization_memberships/types/organization_memberships_update_request_role.py +0 -7
  204. mirascope/api/_generated/organization_memberships/types/organization_memberships_update_response.py +0 -31
  205. mirascope/api/_generated/organization_memberships/types/organization_memberships_update_response_role.py +0 -7
  206. mirascope/api/_generated/organizations/__init__.py +0 -51
  207. mirascope/api/_generated/organizations/client.py +0 -869
  208. mirascope/api/_generated/organizations/raw_client.py +0 -2593
  209. mirascope/api/_generated/organizations/types/__init__.py +0 -71
  210. mirascope/api/_generated/organizations/types/organizations_create_payment_intent_response.py +0 -24
  211. mirascope/api/_generated/organizations/types/organizations_create_response.py +0 -26
  212. mirascope/api/_generated/organizations/types/organizations_create_response_role.py +0 -5
  213. mirascope/api/_generated/organizations/types/organizations_get_response.py +0 -26
  214. mirascope/api/_generated/organizations/types/organizations_get_response_role.py +0 -5
  215. mirascope/api/_generated/organizations/types/organizations_list_response_item.py +0 -26
  216. mirascope/api/_generated/organizations/types/organizations_list_response_item_role.py +0 -5
  217. mirascope/api/_generated/organizations/types/organizations_preview_subscription_change_request_target_plan.py +0 -7
  218. mirascope/api/_generated/organizations/types/organizations_preview_subscription_change_response.py +0 -47
  219. mirascope/api/_generated/organizations/types/organizations_preview_subscription_change_response_validation_errors_item.py +0 -33
  220. mirascope/api/_generated/organizations/types/organizations_preview_subscription_change_response_validation_errors_item_resource.py +0 -7
  221. mirascope/api/_generated/organizations/types/organizations_router_balance_response.py +0 -24
  222. mirascope/api/_generated/organizations/types/organizations_subscription_response.py +0 -53
  223. mirascope/api/_generated/organizations/types/organizations_subscription_response_current_plan.py +0 -7
  224. mirascope/api/_generated/organizations/types/organizations_subscription_response_payment_method.py +0 -26
  225. mirascope/api/_generated/organizations/types/organizations_subscription_response_scheduled_change.py +0 -34
  226. mirascope/api/_generated/organizations/types/organizations_subscription_response_scheduled_change_target_plan.py +0 -7
  227. mirascope/api/_generated/organizations/types/organizations_update_response.py +0 -26
  228. mirascope/api/_generated/organizations/types/organizations_update_response_role.py +0 -5
  229. mirascope/api/_generated/organizations/types/organizations_update_subscription_request_target_plan.py +0 -7
  230. mirascope/api/_generated/organizations/types/organizations_update_subscription_response.py +0 -35
  231. mirascope/api/_generated/project_memberships/__init__.py +0 -25
  232. mirascope/api/_generated/project_memberships/client.py +0 -437
  233. mirascope/api/_generated/project_memberships/raw_client.py +0 -1039
  234. mirascope/api/_generated/project_memberships/types/__init__.py +0 -29
  235. mirascope/api/_generated/project_memberships/types/project_memberships_create_request_role.py +0 -7
  236. mirascope/api/_generated/project_memberships/types/project_memberships_create_response.py +0 -35
  237. mirascope/api/_generated/project_memberships/types/project_memberships_create_response_role.py +0 -7
  238. mirascope/api/_generated/project_memberships/types/project_memberships_list_response_item.py +0 -33
  239. mirascope/api/_generated/project_memberships/types/project_memberships_list_response_item_role.py +0 -7
  240. mirascope/api/_generated/project_memberships/types/project_memberships_update_request_role.py +0 -7
  241. mirascope/api/_generated/project_memberships/types/project_memberships_update_response.py +0 -35
  242. mirascope/api/_generated/project_memberships/types/project_memberships_update_response_role.py +0 -7
  243. mirascope/api/_generated/projects/__init__.py +0 -7
  244. mirascope/api/_generated/projects/client.py +0 -428
  245. mirascope/api/_generated/projects/raw_client.py +0 -1302
  246. mirascope/api/_generated/projects/types/__init__.py +0 -10
  247. mirascope/api/_generated/projects/types/projects_create_response.py +0 -25
  248. mirascope/api/_generated/projects/types/projects_get_response.py +0 -25
  249. mirascope/api/_generated/projects/types/projects_list_response_item.py +0 -25
  250. mirascope/api/_generated/projects/types/projects_update_response.py +0 -25
  251. mirascope/api/_generated/reference.md +0 -4915
  252. mirascope/api/_generated/tags/__init__.py +0 -19
  253. mirascope/api/_generated/tags/client.py +0 -504
  254. mirascope/api/_generated/tags/raw_client.py +0 -1288
  255. mirascope/api/_generated/tags/types/__init__.py +0 -17
  256. mirascope/api/_generated/tags/types/tags_create_response.py +0 -41
  257. mirascope/api/_generated/tags/types/tags_get_response.py +0 -41
  258. mirascope/api/_generated/tags/types/tags_list_response.py +0 -23
  259. mirascope/api/_generated/tags/types/tags_list_response_tags_item.py +0 -41
  260. mirascope/api/_generated/tags/types/tags_update_response.py +0 -41
  261. mirascope/api/_generated/token_cost/__init__.py +0 -7
  262. mirascope/api/_generated/token_cost/client.py +0 -160
  263. mirascope/api/_generated/token_cost/raw_client.py +0 -264
  264. mirascope/api/_generated/token_cost/types/__init__.py +0 -8
  265. mirascope/api/_generated/token_cost/types/token_cost_calculate_request_usage.py +0 -54
  266. mirascope/api/_generated/token_cost/types/token_cost_calculate_response.py +0 -52
  267. mirascope/api/_generated/traces/__init__.py +0 -97
  268. mirascope/api/_generated/traces/client.py +0 -1103
  269. mirascope/api/_generated/traces/raw_client.py +0 -2322
  270. mirascope/api/_generated/traces/types/__init__.py +0 -155
  271. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item.py +0 -29
  272. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_resource.py +0 -27
  273. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_resource_attributes_item.py +0 -23
  274. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_resource_attributes_item_value.py +0 -38
  275. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_resource_attributes_item_value_array_value.py +0 -19
  276. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_resource_attributes_item_value_kvlist_value.py +0 -22
  277. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_resource_attributes_item_value_kvlist_value_values_item.py +0 -20
  278. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item.py +0 -29
  279. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_scope.py +0 -31
  280. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_scope_attributes_item.py +0 -23
  281. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_scope_attributes_item_value.py +0 -38
  282. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_scope_attributes_item_value_array_value.py +0 -19
  283. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_scope_attributes_item_value_kvlist_value.py +0 -22
  284. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_scope_attributes_item_value_kvlist_value_values_item.py +0 -22
  285. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item.py +0 -48
  286. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item_attributes_item.py +0 -23
  287. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item_attributes_item_value.py +0 -38
  288. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item_attributes_item_value_array_value.py +0 -19
  289. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item_attributes_item_value_kvlist_value.py +0 -24
  290. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item_attributes_item_value_kvlist_value_values_item.py +0 -22
  291. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item_status.py +0 -20
  292. mirascope/api/_generated/traces/types/traces_create_response.py +0 -24
  293. mirascope/api/_generated/traces/types/traces_create_response_partial_success.py +0 -22
  294. mirascope/api/_generated/traces/types/traces_get_analytics_summary_response.py +0 -60
  295. mirascope/api/_generated/traces/types/traces_get_analytics_summary_response_top_functions_item.py +0 -24
  296. mirascope/api/_generated/traces/types/traces_get_analytics_summary_response_top_models_item.py +0 -22
  297. mirascope/api/_generated/traces/types/traces_get_trace_detail_by_env_response.py +0 -33
  298. mirascope/api/_generated/traces/types/traces_get_trace_detail_by_env_response_spans_item.py +0 -88
  299. mirascope/api/_generated/traces/types/traces_get_trace_detail_response.py +0 -33
  300. mirascope/api/_generated/traces/types/traces_get_trace_detail_response_spans_item.py +0 -88
  301. mirascope/api/_generated/traces/types/traces_list_by_function_hash_response.py +0 -25
  302. mirascope/api/_generated/traces/types/traces_list_by_function_hash_response_traces_item.py +0 -44
  303. mirascope/api/_generated/traces/types/traces_search_by_env_request_attribute_filters_item.py +0 -26
  304. mirascope/api/_generated/traces/types/traces_search_by_env_request_attribute_filters_item_operator.py +0 -7
  305. mirascope/api/_generated/traces/types/traces_search_by_env_request_sort_by.py +0 -7
  306. mirascope/api/_generated/traces/types/traces_search_by_env_request_sort_order.py +0 -7
  307. mirascope/api/_generated/traces/types/traces_search_by_env_response.py +0 -26
  308. mirascope/api/_generated/traces/types/traces_search_by_env_response_spans_item.py +0 -50
  309. mirascope/api/_generated/traces/types/traces_search_request_attribute_filters_item.py +0 -26
  310. mirascope/api/_generated/traces/types/traces_search_request_attribute_filters_item_operator.py +0 -7
  311. mirascope/api/_generated/traces/types/traces_search_request_sort_by.py +0 -7
  312. mirascope/api/_generated/traces/types/traces_search_request_sort_order.py +0 -5
  313. mirascope/api/_generated/traces/types/traces_search_response.py +0 -26
  314. mirascope/api/_generated/traces/types/traces_search_response_spans_item.py +0 -50
  315. mirascope/api/_generated/types/__init__.py +0 -85
  316. mirascope/api/_generated/types/already_exists_error.py +0 -22
  317. mirascope/api/_generated/types/already_exists_error_tag.py +0 -5
  318. mirascope/api/_generated/types/bad_request_error_body.py +0 -50
  319. mirascope/api/_generated/types/click_house_error.py +0 -22
  320. mirascope/api/_generated/types/database_error.py +0 -22
  321. mirascope/api/_generated/types/database_error_tag.py +0 -5
  322. mirascope/api/_generated/types/date.py +0 -3
  323. mirascope/api/_generated/types/http_api_decode_error.py +0 -27
  324. mirascope/api/_generated/types/http_api_decode_error_tag.py +0 -5
  325. mirascope/api/_generated/types/immutable_resource_error.py +0 -22
  326. mirascope/api/_generated/types/internal_server_error_body.py +0 -49
  327. mirascope/api/_generated/types/issue.py +0 -38
  328. mirascope/api/_generated/types/issue_tag.py +0 -10
  329. mirascope/api/_generated/types/not_found_error_body.py +0 -22
  330. mirascope/api/_generated/types/not_found_error_tag.py +0 -5
  331. mirascope/api/_generated/types/number_from_string.py +0 -3
  332. mirascope/api/_generated/types/permission_denied_error.py +0 -22
  333. mirascope/api/_generated/types/permission_denied_error_tag.py +0 -5
  334. mirascope/api/_generated/types/plan_limit_exceeded_error.py +0 -32
  335. mirascope/api/_generated/types/plan_limit_exceeded_error_tag.py +0 -7
  336. mirascope/api/_generated/types/pricing_unavailable_error.py +0 -23
  337. mirascope/api/_generated/types/property_key.py +0 -7
  338. mirascope/api/_generated/types/property_key_key.py +0 -25
  339. mirascope/api/_generated/types/property_key_key_tag.py +0 -5
  340. mirascope/api/_generated/types/rate_limit_error.py +0 -31
  341. mirascope/api/_generated/types/rate_limit_error_tag.py +0 -5
  342. mirascope/api/_generated/types/service_unavailable_error_body.py +0 -24
  343. mirascope/api/_generated/types/service_unavailable_error_tag.py +0 -7
  344. mirascope/api/_generated/types/stripe_error.py +0 -20
  345. mirascope/api/_generated/types/subscription_past_due_error.py +0 -31
  346. mirascope/api/_generated/types/subscription_past_due_error_tag.py +0 -7
  347. mirascope/api/_generated/types/unauthorized_error_body.py +0 -21
  348. mirascope/api/_generated/types/unauthorized_error_tag.py +0 -5
  349. mirascope/api/client.py +0 -255
  350. mirascope/api/settings.py +0 -99
  351. mirascope/llm/formatting/output_parser.py +0 -178
  352. mirascope/llm/formatting/primitives.py +0 -192
  353. mirascope/llm/mcp/mcp_client.py +0 -130
  354. mirascope/llm/messages/_utils.py +0 -34
  355. mirascope/llm/models/thinking_config.py +0 -61
  356. mirascope/llm/prompts/prompts.py +0 -487
  357. mirascope/llm/providers/__init__.py +0 -62
  358. mirascope/llm/providers/anthropic/__init__.py +0 -11
  359. mirascope/llm/providers/anthropic/_utils/__init__.py +0 -27
  360. mirascope/llm/providers/anthropic/_utils/beta_decode.py +0 -282
  361. mirascope/llm/providers/anthropic/_utils/beta_encode.py +0 -266
  362. mirascope/llm/providers/anthropic/_utils/encode.py +0 -418
  363. mirascope/llm/providers/anthropic/_utils/errors.py +0 -46
  364. mirascope/llm/providers/anthropic/beta_provider.py +0 -374
  365. mirascope/llm/providers/anthropic/model_id.py +0 -23
  366. mirascope/llm/providers/anthropic/model_info.py +0 -87
  367. mirascope/llm/providers/anthropic/provider.py +0 -479
  368. mirascope/llm/providers/google/__init__.py +0 -6
  369. mirascope/llm/providers/google/_utils/errors.py +0 -50
  370. mirascope/llm/providers/google/model_id.py +0 -22
  371. mirascope/llm/providers/google/model_info.py +0 -63
  372. mirascope/llm/providers/google/provider.py +0 -492
  373. mirascope/llm/providers/mirascope/__init__.py +0 -5
  374. mirascope/llm/providers/mirascope/_utils.py +0 -73
  375. mirascope/llm/providers/mirascope/provider.py +0 -349
  376. mirascope/llm/providers/mlx/__init__.py +0 -9
  377. mirascope/llm/providers/mlx/_utils.py +0 -141
  378. mirascope/llm/providers/mlx/encoding/__init__.py +0 -8
  379. mirascope/llm/providers/mlx/encoding/base.py +0 -72
  380. mirascope/llm/providers/mlx/encoding/transformers.py +0 -150
  381. mirascope/llm/providers/mlx/mlx.py +0 -254
  382. mirascope/llm/providers/mlx/model_id.py +0 -17
  383. mirascope/llm/providers/mlx/provider.py +0 -452
  384. mirascope/llm/providers/model_id.py +0 -16
  385. mirascope/llm/providers/ollama/__init__.py +0 -7
  386. mirascope/llm/providers/ollama/provider.py +0 -71
  387. mirascope/llm/providers/openai/__init__.py +0 -15
  388. mirascope/llm/providers/openai/_utils/__init__.py +0 -5
  389. mirascope/llm/providers/openai/_utils/errors.py +0 -46
  390. mirascope/llm/providers/openai/completions/__init__.py +0 -7
  391. mirascope/llm/providers/openai/completions/base_provider.py +0 -542
  392. mirascope/llm/providers/openai/completions/provider.py +0 -22
  393. mirascope/llm/providers/openai/model_id.py +0 -31
  394. mirascope/llm/providers/openai/model_info.py +0 -303
  395. mirascope/llm/providers/openai/provider.py +0 -441
  396. mirascope/llm/providers/openai/responses/__init__.py +0 -5
  397. mirascope/llm/providers/openai/responses/provider.py +0 -513
  398. mirascope/llm/providers/provider_id.py +0 -24
  399. mirascope/llm/providers/provider_registry.py +0 -299
  400. mirascope/llm/providers/together/__init__.py +0 -7
  401. mirascope/llm/providers/together/provider.py +0 -40
  402. mirascope/llm/responses/usage.py +0 -95
  403. mirascope/ops/__init__.py +0 -111
  404. mirascope/ops/_internal/__init__.py +0 -5
  405. mirascope/ops/_internal/closure.py +0 -1169
  406. mirascope/ops/_internal/configuration.py +0 -177
  407. mirascope/ops/_internal/context.py +0 -76
  408. mirascope/ops/_internal/exporters/__init__.py +0 -26
  409. mirascope/ops/_internal/exporters/exporters.py +0 -395
  410. mirascope/ops/_internal/exporters/processors.py +0 -104
  411. mirascope/ops/_internal/exporters/types.py +0 -165
  412. mirascope/ops/_internal/exporters/utils.py +0 -29
  413. mirascope/ops/_internal/instrumentation/__init__.py +0 -8
  414. mirascope/ops/_internal/instrumentation/llm/__init__.py +0 -8
  415. mirascope/ops/_internal/instrumentation/llm/common.py +0 -530
  416. mirascope/ops/_internal/instrumentation/llm/cost.py +0 -190
  417. mirascope/ops/_internal/instrumentation/llm/encode.py +0 -238
  418. mirascope/ops/_internal/instrumentation/llm/gen_ai_types/__init__.py +0 -38
  419. mirascope/ops/_internal/instrumentation/llm/gen_ai_types/gen_ai_input_messages.py +0 -31
  420. mirascope/ops/_internal/instrumentation/llm/gen_ai_types/gen_ai_output_messages.py +0 -38
  421. mirascope/ops/_internal/instrumentation/llm/gen_ai_types/gen_ai_system_instructions.py +0 -18
  422. mirascope/ops/_internal/instrumentation/llm/gen_ai_types/shared.py +0 -100
  423. mirascope/ops/_internal/instrumentation/llm/llm.py +0 -161
  424. mirascope/ops/_internal/instrumentation/llm/model.py +0 -1798
  425. mirascope/ops/_internal/instrumentation/llm/response.py +0 -521
  426. mirascope/ops/_internal/instrumentation/llm/serialize.py +0 -300
  427. mirascope/ops/_internal/propagation.py +0 -198
  428. mirascope/ops/_internal/protocols.py +0 -133
  429. mirascope/ops/_internal/session.py +0 -139
  430. mirascope/ops/_internal/spans.py +0 -232
  431. mirascope/ops/_internal/traced_calls.py +0 -375
  432. mirascope/ops/_internal/traced_functions.py +0 -523
  433. mirascope/ops/_internal/tracing.py +0 -353
  434. mirascope/ops/_internal/types.py +0 -13
  435. mirascope/ops/_internal/utils.py +0 -123
  436. mirascope/ops/_internal/versioned_calls.py +0 -512
  437. mirascope/ops/_internal/versioned_functions.py +0 -357
  438. mirascope/ops/_internal/versioning.py +0 -303
  439. mirascope/ops/exceptions.py +0 -21
  440. mirascope-2.0.0.dist-info/RECORD +0 -423
  441. /mirascope/llm/{providers → clients}/base/kwargs.py +0 -0
  442. /mirascope/llm/{providers → clients}/google/message.py +0 -0
@@ -2,8 +2,6 @@
2
2
 
3
3
  from typing import TypedDict
4
4
 
5
- from .thinking_config import ThinkingConfig
6
-
7
5
 
8
6
  class Params(TypedDict, total=False):
9
7
  """Common parameters shared across LLM providers.
@@ -25,8 +23,8 @@ class Params(TypedDict, total=False):
25
23
 
26
24
  top_p: float
27
25
  """Nucleus sampling parameter (0.0 to 1.0).
28
-
29
- Tokens are selected from the most to least probable until the sum of their
26
+
27
+ Tokens are selected from the most to least probable until the sum of their
30
28
  probabilities equals this value. Use a lower value for less random responses and a
31
29
  higher value for more random responses.
32
30
  """
@@ -43,7 +41,7 @@ class Params(TypedDict, total=False):
43
41
 
44
42
  seed: int
45
43
  """Random seed for reproducibility.
46
-
44
+
47
45
  When ``seed`` is fixed to a specific number, the model makes a best
48
46
  effort to provide the same response for repeated requests.
49
47
 
@@ -52,21 +50,44 @@ class Params(TypedDict, total=False):
52
50
 
53
51
  stop_sequences: list[str]
54
52
  """Stop sequences to end generation.
55
-
53
+
56
54
  The model will stop generating text if one of these strings is encountered in the
57
55
  response.
58
56
  """
59
57
 
60
- thinking: ThinkingConfig | None
61
- """Configuration for extended reasoning/thinking.
58
+ thinking: bool
59
+ """Configures whether the model should use thinking.
60
+
61
+ Thinking is a process where the model spends additional tokens thinking about the
62
+ prompt before generating a response. You may configure thinking either by passing
63
+ a bool to enable or disable it.
64
+
65
+ If `params.thinking` is `True`, then thinking and thought summaries will be enabled
66
+ (if supported by the model/provider), with a default budget for thinking tokens.
67
+
68
+ If `params.thinking` is `False`, then thinking will be wholly disabled, assuming
69
+ the model allows this (some models, e.g. `google:gemini-2.5-pro`, do not allow
70
+ disabling thinking).
71
+
72
+ If `params.thinking` is unset (or `None`), then we will use provider-specific default
73
+ behavior for the chosen model.
74
+ """
75
+
76
+ encode_thoughts_as_text: bool
77
+ """Configures whether `Thought` content should be re-encoded as text for model consumption.
78
+
79
+ If `True`, then when an `AssistantMessage` contains `Thoughts` and is being passed back
80
+ to an LLM, those `Thoughts` will be encoded as `Text`, so that the assistant can read
81
+ those thoughts. That ensures the assistant has access to (at least the summarized output of)
82
+ its reasoning process, and contrasts with provider default behaviors which may ignore
83
+ prior thoughts, particularly if tool calls are not involved.
84
+
85
+ When `True`, we will always re-encode Mirascope messages being passed to the provider,
86
+ rather than reusing raw provider response content. This may disable provider-specific
87
+ behavior like cached reasoning tokens.
62
88
 
63
- Pass a `ThinkingConfig` to configure thinking behavior. The `level` field controls
64
- whether thinking is enabled and how much reasoning to use. Level may be one of
65
- "minimal", "low", "medium", or "high". If level is unset, then thinking is enabled
66
- with a provider-specific default level.
89
+ If `False`, then `Thoughts` will not be encoded as text, and whether reasoning context
90
+ is available to the model depends entirely on the provider's behavior.
67
91
 
68
- `ThinkingConfig` can also include `encode_thoughts_as_text`, which is an advanced
69
- feature for providing past thoughts back to the model as text content. This is
70
- primarily useful for making thoughts transferable when passing a conversation
71
- to a different model or provider than the one that generated the thinking.
92
+ Defaults to `False` if unset.
72
93
  """
@@ -0,0 +1,6 @@
1
+ """Google client implementation."""
2
+
3
+ from .clients import GoogleClient, client, get_client
4
+ from .model_ids import GoogleModelId
5
+
6
+ __all__ = ["GoogleClient", "GoogleModelId", "client", "get_client"]
@@ -1,4 +1,3 @@
1
- from ....base._utils import get_include_thoughts
2
1
  from .decode import (
3
2
  decode_async_stream,
4
3
  decode_response,
@@ -11,5 +10,4 @@ __all__ = [
11
10
  "decode_response",
12
11
  "decode_stream",
13
12
  "encode_request",
14
- "get_include_thoughts",
15
13
  ]
@@ -29,10 +29,8 @@ from ....responses import (
29
29
  FinishReasonChunk,
30
30
  RawMessageChunk,
31
31
  RawStreamEventChunk,
32
- Usage,
33
- UsageDeltaChunk,
34
32
  )
35
- from ..model_id import GoogleModelId, model_name
33
+ from ..model_ids import GoogleModelId
36
34
  from .encode import UNKNOWN_TOOL_ID
37
35
 
38
36
  GOOGLE_FINISH_REASON_MAP = {
@@ -45,30 +43,6 @@ GOOGLE_FINISH_REASON_MAP = {
45
43
  }
46
44
 
47
45
 
48
- def _decode_usage(
49
- usage: genai_types.GenerateContentResponseUsageMetadata | None,
50
- ) -> Usage | None:
51
- """Convert Google UsageMetadata to Mirascope Usage."""
52
- if (
53
- usage is None
54
- or usage.prompt_token_count is None
55
- or usage.candidates_token_count is None
56
- ): # pragma: no cover
57
- return None
58
-
59
- reasoning_tokens = usage.thoughts_token_count or 0
60
- output_tokens = usage.candidates_token_count + reasoning_tokens
61
-
62
- return Usage(
63
- input_tokens=usage.prompt_token_count,
64
- output_tokens=output_tokens,
65
- cache_read_tokens=usage.cached_content_token_count or 0,
66
- cache_write_tokens=0,
67
- reasoning_tokens=usage.thoughts_token_count or 0,
68
- raw=usage,
69
- )
70
-
71
-
72
46
  def _decode_content_part(part: genai_types.Part) -> AssistantContentPart | None:
73
47
  """Returns an `AssistantContentPart` (or `None`) decoded from a google `Part`"""
74
48
  if part.thought and part.text:
@@ -114,7 +88,7 @@ def _decode_candidate_content(
114
88
  candidate: genai_types.Candidate,
115
89
  ) -> Sequence[AssistantContentPart]:
116
90
  """Returns a sequence of `AssistantContentPart` decoded from a google `Candidate`"""
117
- content_parts: list[AssistantContentPart] = []
91
+ content_parts = []
118
92
  if candidate.content and candidate.content.parts:
119
93
  for part in candidate.content.parts:
120
94
  decoded_part = _decode_content_part(part)
@@ -124,20 +98,15 @@ def _decode_candidate_content(
124
98
 
125
99
 
126
100
  def decode_response(
127
- response: genai_types.GenerateContentResponse,
128
- model_id: GoogleModelId,
129
- *,
130
- include_thoughts: bool,
131
- ) -> tuple[AssistantMessage, FinishReason | None, Usage | None]:
132
- """Returns an `AssistantMessage`, `FinishReason`, and `Usage` extracted from a `GenerateContentResponse`"""
101
+ response: genai_types.GenerateContentResponse, model_id: GoogleModelId
102
+ ) -> tuple[AssistantMessage, FinishReason | None]:
103
+ """Returns an `AssistantMessage` and `FinishReason` extracted from a `GenerateContentResponse`"""
133
104
  content: Sequence[AssistantContentPart] = []
134
105
  candidate_content: genai_types.Content | None = None
135
106
  finish_reason: FinishReason | None = None
136
107
 
137
108
  if response.candidates and (candidate := response.candidates[0]):
138
109
  content = _decode_candidate_content(candidate)
139
- if not include_thoughts:
140
- content = [part for part in content if part.type != "thought"]
141
110
  candidate_content = candidate.content
142
111
  if candidate.finish_reason:
143
112
  finish_reason = GOOGLE_FINISH_REASON_MAP.get(candidate.finish_reason)
@@ -146,26 +115,21 @@ def decode_response(
146
115
 
147
116
  assistant_message = AssistantMessage(
148
117
  content=content,
149
- provider_id="google",
118
+ provider="google",
150
119
  model_id=model_id,
151
- provider_model_name=model_name(model_id),
152
120
  raw_message=candidate_content.model_dump(),
153
121
  )
154
122
 
155
- usage = _decode_usage(response.usage_metadata)
156
- return assistant_message, finish_reason, usage
123
+ return assistant_message, finish_reason
157
124
 
158
125
 
159
126
  class _GoogleChunkProcessor:
160
127
  """Processes Google stream chunks and maintains state across chunks."""
161
128
 
162
- def __init__(self, *, include_thoughts: bool) -> None:
129
+ def __init__(self) -> None:
163
130
  self.current_content_type: Literal["text", "tool_call", "thought"] | None = None
164
131
  self.accumulated_parts: list[genai_types.Part] = []
165
132
  self.reconstructed_content = genai_types.Content(parts=[])
166
- # Track previous cumulative usage to compute deltas
167
- self.prev_usage = Usage()
168
- self.include_thoughts = include_thoughts
169
133
 
170
134
  def process_chunk(
171
135
  self, chunk: genai_types.GenerateContentResponse
@@ -180,34 +144,27 @@ class _GoogleChunkProcessor:
180
144
  for part in candidate.content.parts:
181
145
  self.accumulated_parts.append(part)
182
146
  if self.current_content_type == "thought" and not part.thought:
183
- if self.include_thoughts:
184
- yield ThoughtEndChunk()
147
+ yield ThoughtEndChunk()
185
148
  self.current_content_type = None
186
- elif (
187
- self.current_content_type == "text" and not part.text
188
- ): # pragma: no cover
189
- yield TextEndChunk()
190
- self.current_content_type = None
191
- elif (
192
- self.current_content_type == "tool_call" and not part.function_call
193
- ): # pragma: no cover
149
+ elif self.current_content_type == "text" and not part.text:
150
+ yield TextEndChunk() # pragma: no cover
151
+ self.current_content_type = None # pragma: no cover
152
+ elif self.current_content_type == "tool_call" and not part.function_call:
194
153
  # In testing, Gemini never emits tool calls and text in the same message
195
154
  # (even when specifically asked in system and user prompt), so
196
155
  # the following code is uncovered but included for completeness
197
- yield ToolCallEndChunk(id=UNKNOWN_TOOL_ID)
198
- self.current_content_type = None
156
+ yield ToolCallEndChunk() # pragma: no cover
157
+ self.current_content_type = None # pragma: no cover
199
158
 
200
159
  if part.thought:
201
160
  if self.current_content_type is None:
202
- if self.include_thoughts:
203
- yield ThoughtStartChunk()
161
+ yield ThoughtStartChunk()
204
162
  self.current_content_type = "thought"
205
163
  if not part.text:
206
164
  raise ValueError(
207
165
  "Inside thought part with no text content"
208
166
  ) # pragma: no cover
209
- if self.include_thoughts:
210
- yield ThoughtChunk(delta=part.text)
167
+ yield ThoughtChunk(delta=part.text)
211
168
 
212
169
  elif part.text:
213
170
  if self.current_content_type is None:
@@ -222,29 +179,23 @@ class _GoogleChunkProcessor:
222
179
  "Required name missing on Google function call"
223
180
  ) # pragma: no cover
224
181
 
225
- tool_id = function_call.id or UNKNOWN_TOOL_ID
226
- self.current_content_type = "tool_call"
227
-
228
182
  yield ToolCallStartChunk(
229
- id=tool_id,
183
+ id=function_call.id or UNKNOWN_TOOL_ID,
230
184
  name=function_call.name,
231
185
  )
232
186
 
233
187
  yield ToolCallChunk(
234
- id=tool_id,
235
188
  delta=json.dumps(function_call.args)
236
189
  if function_call.args
237
190
  else "{}",
238
191
  )
239
- yield ToolCallEndChunk(id=tool_id)
240
- self.current_content_type = None
192
+ yield ToolCallEndChunk()
241
193
 
242
194
  if candidate.finish_reason:
243
195
  if self.current_content_type == "text":
244
196
  yield TextEndChunk()
245
197
  elif self.current_content_type == "thought":
246
- if self.include_thoughts: # pragma: no cover
247
- yield ThoughtEndChunk()
198
+ yield ThoughtEndChunk() # pragma: no cover
248
199
  elif self.current_content_type is not None:
249
200
  raise NotImplementedError
250
201
 
@@ -254,29 +205,6 @@ class _GoogleChunkProcessor:
254
205
  if finish_reason is not None:
255
206
  yield FinishReasonChunk(finish_reason=finish_reason)
256
207
 
257
- # Emit usage delta if usage metadata is present
258
- if chunk.usage_metadata:
259
- usage_metadata = chunk.usage_metadata
260
- current_input = usage_metadata.prompt_token_count or 0
261
- current_output = usage_metadata.candidates_token_count or 0
262
- current_cache_read = usage_metadata.cached_content_token_count or 0
263
- current_reasoning = usage_metadata.thoughts_token_count or 0
264
-
265
- yield UsageDeltaChunk(
266
- input_tokens=current_input - self.prev_usage.input_tokens,
267
- output_tokens=current_output - self.prev_usage.output_tokens,
268
- cache_read_tokens=current_cache_read
269
- - self.prev_usage.cache_read_tokens,
270
- cache_write_tokens=0,
271
- reasoning_tokens=current_reasoning - self.prev_usage.reasoning_tokens,
272
- )
273
-
274
- # Update previous usage
275
- self.prev_usage.input_tokens = current_input
276
- self.prev_usage.output_tokens = current_output
277
- self.prev_usage.cache_read_tokens = current_cache_read
278
- self.prev_usage.reasoning_tokens = current_reasoning
279
-
280
208
  def raw_message_chunk(self) -> RawMessageChunk:
281
209
  content = genai_types.Content(role="model", parts=self.accumulated_parts)
282
210
  return RawMessageChunk(raw_message=content.model_dump())
@@ -284,11 +212,9 @@ class _GoogleChunkProcessor:
284
212
 
285
213
  def decode_stream(
286
214
  google_stream: Iterator[genai_types.GenerateContentResponse],
287
- *,
288
- include_thoughts: bool,
289
215
  ) -> ChunkIterator:
290
216
  """Returns a ChunkIterator converted from a Google stream."""
291
- processor = _GoogleChunkProcessor(include_thoughts=include_thoughts)
217
+ processor = _GoogleChunkProcessor()
292
218
  for chunk in google_stream:
293
219
  yield from processor.process_chunk(chunk)
294
220
  yield processor.raw_message_chunk()
@@ -296,11 +222,9 @@ def decode_stream(
296
222
 
297
223
  async def decode_async_stream(
298
224
  google_stream: AsyncIterator[genai_types.GenerateContentResponse],
299
- *,
300
- include_thoughts: bool,
301
225
  ) -> AsyncChunkIterator:
302
226
  """Returns an AsyncChunkIterator converted from a Google async stream."""
303
- processor = _GoogleChunkProcessor(include_thoughts=include_thoughts)
227
+ processor = _GoogleChunkProcessor()
304
228
  async for chunk in google_stream:
305
229
  for item in processor.process_chunk(chunk):
306
230
  yield item
@@ -1,13 +1,10 @@
1
1
  """Google message encoding and request preparation."""
2
2
 
3
- from __future__ import annotations
4
-
5
3
  import base64
6
4
  import json
7
5
  from collections.abc import Sequence
8
6
  from functools import lru_cache
9
- from typing import TYPE_CHECKING, Any, TypedDict, cast
10
- from typing_extensions import Required
7
+ from typing import Any, cast
11
8
 
12
9
  from google.genai import types as genai_types
13
10
 
@@ -16,116 +13,19 @@ from ....exceptions import FeatureNotSupportedError
16
13
  from ....formatting import (
17
14
  Format,
18
15
  FormattableT,
19
- OutputParser,
16
+ _utils as _formatting_utils,
20
17
  resolve_format,
21
18
  )
22
19
  from ....messages import AssistantMessage, Message, UserMessage
23
- from ....tools import FORMAT_TOOL_NAME, AnyToolSchema, BaseToolkit
24
- from ...base import _utils as _base_utils
25
- from ..model_id import GoogleModelId, model_name
26
- from ..model_info import MODELS_WITHOUT_STRUCTURED_OUTPUT_AND_TOOLS_SUPPORT
27
-
28
- if TYPE_CHECKING:
29
- from ....models import Params, ThinkingConfig, ThinkingLevel
20
+ from ....tools import FORMAT_TOOL_NAME, BaseToolkit, ToolSchema
21
+ from ...base import BaseKwargs, Params, _utils as _base_utils
22
+ from ..model_ids import GoogleModelId
30
23
 
31
24
  UNKNOWN_TOOL_ID = "google_unknown_tool_id"
32
25
 
33
- # Thinking level to a float multiplier % of max tokens (for 2.5 models using budget)
34
- THINKING_LEVEL_TO_BUDGET_MULTIPLIER: dict[ThinkingLevel, float] = {
35
- "none": 0,
36
- "minimal": 0.1,
37
- "low": 0.2,
38
- "medium": 0.4,
39
- "high": 0.6,
40
- "max": 0.8,
41
- }
42
-
43
- # Gemini 3 Pro supports only LOW or HIGH
44
- # https://ai.google.dev/gemini-api/docs/gemini-3#thinking_level
45
- THINKING_LEVEL_FOR_GEMINI_3_PRO: dict[ThinkingLevel, genai_types.ThinkingLevel] = {
46
- "default": genai_types.ThinkingLevel.THINKING_LEVEL_UNSPECIFIED,
47
- "none": genai_types.ThinkingLevel.LOW,
48
- "minimal": genai_types.ThinkingLevel.LOW,
49
- "low": genai_types.ThinkingLevel.LOW,
50
- "medium": genai_types.ThinkingLevel.HIGH,
51
- "high": genai_types.ThinkingLevel.HIGH,
52
- "max": genai_types.ThinkingLevel.HIGH,
53
- }
54
-
55
- # Gemini 3 Flash supports MINIMAL, LOW, MEDIUM, HIGH
56
- # https://ai.google.dev/gemini-api/docs/gemini-3#thinking_level
57
- THINKING_LEVEL_FOR_GEMINI_3_FLASH: dict[ThinkingLevel, genai_types.ThinkingLevel] = {
58
- "default": genai_types.ThinkingLevel.THINKING_LEVEL_UNSPECIFIED,
59
- "none": genai_types.ThinkingLevel.MINIMAL,
60
- "minimal": genai_types.ThinkingLevel.MINIMAL,
61
- "low": genai_types.ThinkingLevel.LOW,
62
- "medium": genai_types.ThinkingLevel.MEDIUM,
63
- "high": genai_types.ThinkingLevel.HIGH,
64
- "max": genai_types.ThinkingLevel.HIGH,
65
- }
66
-
67
-
68
- def google_thinking_config(
69
- thinking_config: ThinkingConfig,
70
- max_tokens: int | None,
71
- model_id: GoogleModelId,
72
- ) -> genai_types.ThinkingConfigDict:
73
- """Compute Google thinking configuration based on model version.
74
-
75
- Args:
76
- thinking_config: The ThinkingConfig from params
77
- max_tokens: Max output tokens (used to compute budget for 2.5 models)
78
- model_id: The Google model ID to determine version
79
-
80
- Returns:
81
- ThinkingConfigDict with either thinking_level or thinking_budget set.
82
-
83
- Notes:
84
- - Gemini 2.5 models use thinking_budget (token count)
85
- - Gemini 3.0 Pro supports thinking_level "low" or "high"
86
- - Gemini 3.0 Flash supports thinking_level "minimal", "low", "medium", "high"
87
-
88
- See: https://ai.google.dev/gemini-api/docs/gemini-3#thinking_level
89
- """
90
- level: ThinkingLevel = thinking_config.get("level", "default")
91
- include_thoughts = thinking_config.get("include_thoughts")
92
-
93
- result = genai_types.ThinkingConfigDict()
94
-
95
- if "gemini-3-flash" in model_id:
96
- result["thinking_level"] = THINKING_LEVEL_FOR_GEMINI_3_FLASH.get(
97
- level, genai_types.ThinkingLevel.THINKING_LEVEL_UNSPECIFIED
98
- )
99
- elif "gemini-3-pro" in model_id:
100
- result["thinking_level"] = THINKING_LEVEL_FOR_GEMINI_3_PRO.get(
101
- level, genai_types.ThinkingLevel.THINKING_LEVEL_UNSPECIFIED
102
- )
103
- else: # Fall back to 2.5-style budgets
104
- # 2.5 models use thinking_budget
105
- if level == "default":
106
- budget = -1 # Dynamic budget
107
- elif level == "none":
108
- budget = 0 # Disable thinking
109
- else:
110
- # Compute budget as percentage of max_tokens
111
- if max_tokens is None:
112
- max_tokens = 16000
113
- multiplier = THINKING_LEVEL_TO_BUDGET_MULTIPLIER.get(level, 0.4)
114
- budget = int(multiplier * max_tokens)
115
-
116
- result["thinking_budget"] = budget
117
- if include_thoughts is not None:
118
- result["include_thoughts"] = include_thoughts
119
-
120
- return result
121
26
 
122
-
123
- class GoogleKwargs(TypedDict, total=False):
124
- """Kwargs for Google's generate_content method."""
125
-
126
- model: Required[str]
127
- contents: Required[genai_types.ContentListUnionDict]
128
- config: genai_types.GenerateContentConfigDict
27
+ class GoogleKwargs(BaseKwargs, genai_types.GenerateContentConfigDict):
28
+ """Google's `GenerateContentConfigDict` typed dict, subclassing BaseKwargs for type safety."""
129
29
 
130
30
 
131
31
  def _resolve_refs(
@@ -152,7 +52,7 @@ def _encode_content(
152
52
  content: Sequence[ContentPart], encode_thoughts: bool
153
53
  ) -> list[genai_types.PartDict]:
154
54
  """Returns a list of google `PartDicts` converted from a sequence of Mirascope `ContentPart`s"""
155
- result: list[genai_types.PartDict] = []
55
+ result = []
156
56
 
157
57
  for part in content:
158
58
  if part.type == "text":
@@ -199,7 +99,7 @@ def _encode_content(
199
99
  function_response=genai_types.FunctionResponseDict(
200
100
  id=part.id if part.id != UNKNOWN_TOOL_ID else None,
201
101
  name=part.name,
202
- response={"output": str(part.result)},
102
+ response={"output": str(part.value)},
203
103
  )
204
104
  )
205
105
  )
@@ -221,7 +121,7 @@ def _encode_message(
221
121
  """Returns a Google `ContentDict` converted from a Mirascope `Message`"""
222
122
  if (
223
123
  message.role == "assistant"
224
- and message.provider_id == "google"
124
+ and message.provider == "google"
225
125
  and message.model_id == model_id
226
126
  and message.raw_message
227
127
  and not encode_thoughts
@@ -244,7 +144,7 @@ def _encode_messages(
244
144
 
245
145
  @lru_cache(maxsize=128)
246
146
  def _convert_tool_to_function_declaration(
247
- tool: AnyToolSchema,
147
+ tool: ToolSchema,
248
148
  ) -> genai_types.FunctionDeclarationDict:
249
149
  """Convert a single Mirascope tool to Google FunctionDeclaration format with caching."""
250
150
  schema_dict = tool.parameters.model_dump(by_alias=True, exclude_none=True)
@@ -270,25 +170,21 @@ def encode_request(
270
170
  *,
271
171
  model_id: GoogleModelId,
272
172
  messages: Sequence[Message],
273
- tools: Sequence[AnyToolSchema] | BaseToolkit[AnyToolSchema] | None,
274
- format: type[FormattableT]
275
- | Format[FormattableT]
276
- | OutputParser[FormattableT]
277
- | None,
173
+ tools: Sequence[ToolSchema] | BaseToolkit | None,
174
+ format: type[FormattableT] | Format[FormattableT] | None,
278
175
  params: Params,
279
- ) -> tuple[Sequence[Message], Format[FormattableT] | None, GoogleKwargs]:
176
+ ) -> tuple[
177
+ Sequence[Message],
178
+ Format[FormattableT] | None,
179
+ genai_types.ContentListUnionDict,
180
+ GoogleKwargs,
181
+ ]:
280
182
  """Prepares a request for the genai `Client.models.generate_content` method."""
281
- if not model_id.startswith("google/"): # pragma: no cover
282
- raise ValueError(f"Model ID must start with 'google/' prefix, got: {model_id}")
283
-
284
- google_config: genai_types.GenerateContentConfigDict = (
285
- genai_types.GenerateContentConfigDict()
286
- )
287
- encode_thoughts_as_text = False
288
- google_model_name = model_name(model_id)
183
+ google_config: GoogleKwargs = GoogleKwargs()
184
+ encode_thoughts = False
289
185
 
290
186
  with _base_utils.ensure_all_params_accessed(
291
- params=params, provider_id="google"
187
+ params=params, provider="google"
292
188
  ) as param_accessor:
293
189
  if param_accessor.temperature is not None:
294
190
  google_config["temperature"] = param_accessor.temperature
@@ -303,54 +199,39 @@ def encode_request(
303
199
  if param_accessor.stop_sequences is not None:
304
200
  google_config["stop_sequences"] = param_accessor.stop_sequences
305
201
  if param_accessor.thinking is not None:
306
- thinking_config = param_accessor.thinking
307
-
308
- # Compute thinking config based on model version
309
- google_config["thinking_config"] = google_thinking_config(
310
- thinking_config, param_accessor.max_tokens, model_id
311
- )
312
-
313
- # Handle encode_thoughts_as_text from ThinkingConfig
314
- if thinking_config.get("encode_thoughts_as_text"):
315
- encode_thoughts_as_text = True
202
+ if param_accessor.thinking:
203
+ google_config["thinking_config"] = genai_types.ThinkingConfigDict(
204
+ thinking_budget=-1, # automatic budget
205
+ include_thoughts=True,
206
+ )
207
+ else:
208
+ google_config["thinking_config"] = genai_types.ThinkingConfigDict(
209
+ include_thoughts=False, thinking_budget=0
210
+ )
211
+ if param_accessor.encode_thoughts_as_text:
212
+ encode_thoughts = True
316
213
 
317
214
  tools = tools.tools if isinstance(tools, BaseToolkit) else tools or []
318
-
319
- if _base_utils.has_strict_tools(tools):
320
- raise FeatureNotSupportedError(
321
- feature="strict tools",
322
- provider_id="google",
323
- model_id=model_id,
324
- message="Google does not support strict mode for tools. "
325
- "Set strict=False on your tools or omit the strict parameter.",
326
- )
327
-
328
215
  google_tools: list[genai_types.ToolDict] = []
329
216
 
330
- allows_strict_mode_with_tools = (
331
- google_model_name not in MODELS_WITHOUT_STRUCTURED_OUTPUT_AND_TOOLS_SUPPORT
217
+ format = resolve_format(
218
+ format,
219
+ # Google does not support strict outputs when tools are present
220
+ # (Gemini 2.5 will error, 2.0 and below will ignore tools)
221
+ default_mode="strict" if not tools else "tool",
332
222
  )
333
- # Older google models do not allow strict mode when using tools; if so, we use tool
334
- # mode when tools are present by default for compatibility. Otherwise, prefer strict mode.
335
- default_mode = "tool" if tools and not allows_strict_mode_with_tools else "strict"
336
- format = resolve_format(format, default_mode=default_mode)
337
223
  if format is not None:
338
- if (
339
- format.mode in ("strict", "json")
340
- and tools
341
- and not allows_strict_mode_with_tools
342
- ):
224
+ if format.mode in ("strict", "json") and tools:
343
225
  raise FeatureNotSupportedError(
344
226
  feature=f"formatting_mode:{format.mode} with tools",
345
- provider_id="google",
346
- model_id=model_id,
227
+ provider="google",
347
228
  )
348
229
 
349
230
  if format.mode == "strict":
350
231
  google_config["response_mime_type"] = "application/json"
351
232
  google_config["response_schema"] = format.schema
352
233
  elif format.mode == "tool":
353
- format_tool_schema = format.create_tool_schema()
234
+ format_tool_schema = _formatting_utils.create_tool_schema(format)
354
235
  format_tool = _convert_tool_to_function_declaration(format_tool_schema)
355
236
  google_tools.append(
356
237
  genai_types.ToolDict(function_declarations=[format_tool])
@@ -390,12 +271,9 @@ def encode_request(
390
271
  if system_message_content:
391
272
  google_config["system_instruction"] = system_message_content
392
273
 
393
- kwargs = GoogleKwargs(
394
- model=model_name(model_id),
395
- contents=_encode_messages(
396
- remaining_messages, model_id, encode_thoughts_as_text
397
- ),
398
- config=google_config,
274
+ return (
275
+ messages,
276
+ format,
277
+ _encode_messages(remaining_messages, model_id, encode_thoughts),
278
+ google_config,
399
279
  )
400
-
401
- return messages, format, kwargs