mirascope 2.0.0a2__py3-none-any.whl → 2.0.0a4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (252) hide show
  1. mirascope/__init__.py +2 -2
  2. mirascope/api/__init__.py +6 -0
  3. mirascope/api/_generated/README.md +207 -0
  4. mirascope/api/_generated/__init__.py +141 -0
  5. mirascope/api/_generated/client.py +163 -0
  6. mirascope/api/_generated/core/__init__.py +52 -0
  7. mirascope/api/_generated/core/api_error.py +23 -0
  8. mirascope/api/_generated/core/client_wrapper.py +58 -0
  9. mirascope/api/_generated/core/datetime_utils.py +30 -0
  10. mirascope/api/_generated/core/file.py +70 -0
  11. mirascope/api/_generated/core/force_multipart.py +16 -0
  12. mirascope/api/_generated/core/http_client.py +619 -0
  13. mirascope/api/_generated/core/http_response.py +55 -0
  14. mirascope/api/_generated/core/jsonable_encoder.py +102 -0
  15. mirascope/api/_generated/core/pydantic_utilities.py +310 -0
  16. mirascope/api/_generated/core/query_encoder.py +60 -0
  17. mirascope/api/_generated/core/remove_none_from_dict.py +11 -0
  18. mirascope/api/_generated/core/request_options.py +35 -0
  19. mirascope/api/_generated/core/serialization.py +282 -0
  20. mirascope/api/_generated/docs/__init__.py +4 -0
  21. mirascope/api/_generated/docs/client.py +95 -0
  22. mirascope/api/_generated/docs/raw_client.py +132 -0
  23. mirascope/api/_generated/environment.py +9 -0
  24. mirascope/api/_generated/errors/__init__.py +17 -0
  25. mirascope/api/_generated/errors/bad_request_error.py +15 -0
  26. mirascope/api/_generated/errors/conflict_error.py +15 -0
  27. mirascope/api/_generated/errors/forbidden_error.py +15 -0
  28. mirascope/api/_generated/errors/internal_server_error.py +15 -0
  29. mirascope/api/_generated/errors/not_found_error.py +15 -0
  30. mirascope/api/_generated/health/__init__.py +7 -0
  31. mirascope/api/_generated/health/client.py +96 -0
  32. mirascope/api/_generated/health/raw_client.py +129 -0
  33. mirascope/api/_generated/health/types/__init__.py +8 -0
  34. mirascope/api/_generated/health/types/health_check_response.py +24 -0
  35. mirascope/api/_generated/health/types/health_check_response_status.py +5 -0
  36. mirascope/api/_generated/organizations/__init__.py +25 -0
  37. mirascope/api/_generated/organizations/client.py +380 -0
  38. mirascope/api/_generated/organizations/raw_client.py +876 -0
  39. mirascope/api/_generated/organizations/types/__init__.py +23 -0
  40. mirascope/api/_generated/organizations/types/organizations_create_response.py +24 -0
  41. mirascope/api/_generated/organizations/types/organizations_create_response_role.py +7 -0
  42. mirascope/api/_generated/organizations/types/organizations_get_response.py +24 -0
  43. mirascope/api/_generated/organizations/types/organizations_get_response_role.py +7 -0
  44. mirascope/api/_generated/organizations/types/organizations_list_response_item.py +24 -0
  45. mirascope/api/_generated/organizations/types/organizations_list_response_item_role.py +7 -0
  46. mirascope/api/_generated/organizations/types/organizations_update_response.py +24 -0
  47. mirascope/api/_generated/organizations/types/organizations_update_response_role.py +7 -0
  48. mirascope/api/_generated/projects/__init__.py +17 -0
  49. mirascope/api/_generated/projects/client.py +458 -0
  50. mirascope/api/_generated/projects/raw_client.py +1016 -0
  51. mirascope/api/_generated/projects/types/__init__.py +15 -0
  52. mirascope/api/_generated/projects/types/projects_create_response.py +30 -0
  53. mirascope/api/_generated/projects/types/projects_get_response.py +30 -0
  54. mirascope/api/_generated/projects/types/projects_list_response_item.py +30 -0
  55. mirascope/api/_generated/projects/types/projects_update_response.py +30 -0
  56. mirascope/api/_generated/reference.md +753 -0
  57. mirascope/api/_generated/traces/__init__.py +55 -0
  58. mirascope/api/_generated/traces/client.py +162 -0
  59. mirascope/api/_generated/traces/raw_client.py +168 -0
  60. mirascope/api/_generated/traces/types/__init__.py +95 -0
  61. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item.py +36 -0
  62. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_resource.py +31 -0
  63. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_resource_attributes_item.py +25 -0
  64. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_resource_attributes_item_value.py +54 -0
  65. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_resource_attributes_item_value_array_value.py +23 -0
  66. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_resource_attributes_item_value_kvlist_value.py +28 -0
  67. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_resource_attributes_item_value_kvlist_value_values_item.py +24 -0
  68. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item.py +35 -0
  69. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_scope.py +35 -0
  70. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_scope_attributes_item.py +27 -0
  71. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_scope_attributes_item_value.py +54 -0
  72. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_scope_attributes_item_value_array_value.py +23 -0
  73. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_scope_attributes_item_value_kvlist_value.py +28 -0
  74. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_scope_attributes_item_value_kvlist_value_values_item.py +24 -0
  75. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item.py +60 -0
  76. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item_attributes_item.py +29 -0
  77. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item_attributes_item_value.py +54 -0
  78. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item_attributes_item_value_array_value.py +23 -0
  79. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item_attributes_item_value_kvlist_value.py +28 -0
  80. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item_attributes_item_value_kvlist_value_values_item.py +24 -0
  81. mirascope/api/_generated/traces/types/traces_create_request_resource_spans_item_scope_spans_item_spans_item_status.py +24 -0
  82. mirascope/api/_generated/traces/types/traces_create_response.py +27 -0
  83. mirascope/api/_generated/traces/types/traces_create_response_partial_success.py +28 -0
  84. mirascope/api/_generated/types/__init__.py +37 -0
  85. mirascope/api/_generated/types/already_exists_error.py +24 -0
  86. mirascope/api/_generated/types/already_exists_error_tag.py +5 -0
  87. mirascope/api/_generated/types/database_error.py +24 -0
  88. mirascope/api/_generated/types/database_error_tag.py +5 -0
  89. mirascope/api/_generated/types/http_api_decode_error.py +29 -0
  90. mirascope/api/_generated/types/http_api_decode_error_tag.py +5 -0
  91. mirascope/api/_generated/types/issue.py +40 -0
  92. mirascope/api/_generated/types/issue_tag.py +17 -0
  93. mirascope/api/_generated/types/not_found_error_body.py +24 -0
  94. mirascope/api/_generated/types/not_found_error_tag.py +5 -0
  95. mirascope/api/_generated/types/permission_denied_error.py +24 -0
  96. mirascope/api/_generated/types/permission_denied_error_tag.py +7 -0
  97. mirascope/api/_generated/types/property_key.py +7 -0
  98. mirascope/api/_generated/types/property_key_key.py +27 -0
  99. mirascope/api/_generated/types/property_key_key_tag.py +5 -0
  100. mirascope/api/client.py +255 -0
  101. mirascope/api/settings.py +81 -0
  102. mirascope/llm/__init__.py +45 -11
  103. mirascope/llm/calls/calls.py +81 -57
  104. mirascope/llm/calls/decorator.py +121 -115
  105. mirascope/llm/content/__init__.py +3 -2
  106. mirascope/llm/context/_utils.py +19 -6
  107. mirascope/llm/exceptions.py +30 -16
  108. mirascope/llm/formatting/_utils.py +9 -5
  109. mirascope/llm/formatting/format.py +2 -2
  110. mirascope/llm/formatting/from_call_args.py +2 -2
  111. mirascope/llm/messages/message.py +13 -5
  112. mirascope/llm/models/__init__.py +2 -2
  113. mirascope/llm/models/models.py +189 -81
  114. mirascope/llm/prompts/__init__.py +13 -12
  115. mirascope/llm/prompts/_utils.py +27 -24
  116. mirascope/llm/prompts/decorator.py +133 -204
  117. mirascope/llm/prompts/prompts.py +424 -0
  118. mirascope/llm/prompts/protocols.py +25 -59
  119. mirascope/llm/providers/__init__.py +44 -0
  120. mirascope/llm/{clients → providers}/_missing_import_stubs.py +8 -6
  121. mirascope/llm/providers/anthropic/__init__.py +29 -0
  122. mirascope/llm/providers/anthropic/_utils/__init__.py +23 -0
  123. mirascope/llm/providers/anthropic/_utils/beta_decode.py +271 -0
  124. mirascope/llm/providers/anthropic/_utils/beta_encode.py +216 -0
  125. mirascope/llm/{clients → providers}/anthropic/_utils/decode.py +44 -11
  126. mirascope/llm/providers/anthropic/_utils/encode.py +356 -0
  127. mirascope/llm/providers/anthropic/beta_provider.py +322 -0
  128. mirascope/llm/providers/anthropic/model_id.py +23 -0
  129. mirascope/llm/providers/anthropic/model_info.py +87 -0
  130. mirascope/llm/providers/anthropic/provider.py +416 -0
  131. mirascope/llm/{clients → providers}/base/__init__.py +3 -3
  132. mirascope/llm/{clients → providers}/base/_utils.py +25 -8
  133. mirascope/llm/{clients/base/client.py → providers/base/base_provider.py} +255 -126
  134. mirascope/llm/providers/google/__init__.py +21 -0
  135. mirascope/llm/{clients → providers}/google/_utils/decode.py +61 -7
  136. mirascope/llm/{clients → providers}/google/_utils/encode.py +44 -30
  137. mirascope/llm/providers/google/model_id.py +22 -0
  138. mirascope/llm/providers/google/model_info.py +62 -0
  139. mirascope/llm/providers/google/provider.py +442 -0
  140. mirascope/llm/providers/load_provider.py +54 -0
  141. mirascope/llm/providers/mlx/__init__.py +24 -0
  142. mirascope/llm/providers/mlx/_utils.py +129 -0
  143. mirascope/llm/providers/mlx/encoding/__init__.py +8 -0
  144. mirascope/llm/providers/mlx/encoding/base.py +69 -0
  145. mirascope/llm/providers/mlx/encoding/transformers.py +147 -0
  146. mirascope/llm/providers/mlx/mlx.py +237 -0
  147. mirascope/llm/providers/mlx/model_id.py +17 -0
  148. mirascope/llm/providers/mlx/provider.py +415 -0
  149. mirascope/llm/providers/model_id.py +16 -0
  150. mirascope/llm/providers/ollama/__init__.py +19 -0
  151. mirascope/llm/providers/ollama/provider.py +71 -0
  152. mirascope/llm/providers/openai/__init__.py +6 -0
  153. mirascope/llm/providers/openai/completions/__init__.py +25 -0
  154. mirascope/llm/{clients → providers}/openai/completions/_utils/__init__.py +2 -0
  155. mirascope/llm/{clients → providers}/openai/completions/_utils/decode.py +60 -6
  156. mirascope/llm/{clients → providers}/openai/completions/_utils/encode.py +37 -26
  157. mirascope/llm/providers/openai/completions/base_provider.py +513 -0
  158. mirascope/llm/providers/openai/completions/provider.py +22 -0
  159. mirascope/llm/providers/openai/model_id.py +31 -0
  160. mirascope/llm/providers/openai/model_info.py +303 -0
  161. mirascope/llm/providers/openai/provider.py +398 -0
  162. mirascope/llm/providers/openai/responses/__init__.py +21 -0
  163. mirascope/llm/{clients → providers}/openai/responses/_utils/decode.py +59 -6
  164. mirascope/llm/{clients → providers}/openai/responses/_utils/encode.py +34 -23
  165. mirascope/llm/providers/openai/responses/provider.py +469 -0
  166. mirascope/llm/providers/provider_id.py +23 -0
  167. mirascope/llm/providers/provider_registry.py +169 -0
  168. mirascope/llm/providers/together/__init__.py +19 -0
  169. mirascope/llm/providers/together/provider.py +40 -0
  170. mirascope/llm/responses/__init__.py +3 -0
  171. mirascope/llm/responses/base_response.py +14 -5
  172. mirascope/llm/responses/base_stream_response.py +35 -6
  173. mirascope/llm/responses/finish_reason.py +1 -0
  174. mirascope/llm/responses/response.py +33 -13
  175. mirascope/llm/responses/root_response.py +12 -13
  176. mirascope/llm/responses/stream_response.py +35 -23
  177. mirascope/llm/responses/usage.py +95 -0
  178. mirascope/llm/tools/__init__.py +9 -2
  179. mirascope/llm/tools/_utils.py +12 -3
  180. mirascope/llm/tools/protocols.py +4 -4
  181. mirascope/llm/tools/tool_schema.py +44 -9
  182. mirascope/llm/tools/tools.py +10 -9
  183. mirascope/ops/__init__.py +156 -0
  184. mirascope/ops/_internal/__init__.py +5 -0
  185. mirascope/ops/_internal/closure.py +1118 -0
  186. mirascope/ops/_internal/configuration.py +126 -0
  187. mirascope/ops/_internal/context.py +76 -0
  188. mirascope/ops/_internal/exporters/__init__.py +26 -0
  189. mirascope/ops/_internal/exporters/exporters.py +342 -0
  190. mirascope/ops/_internal/exporters/processors.py +104 -0
  191. mirascope/ops/_internal/exporters/types.py +165 -0
  192. mirascope/ops/_internal/exporters/utils.py +29 -0
  193. mirascope/ops/_internal/instrumentation/__init__.py +8 -0
  194. mirascope/ops/_internal/instrumentation/llm/__init__.py +8 -0
  195. mirascope/ops/_internal/instrumentation/llm/encode.py +238 -0
  196. mirascope/ops/_internal/instrumentation/llm/gen_ai_types/__init__.py +38 -0
  197. mirascope/ops/_internal/instrumentation/llm/gen_ai_types/gen_ai_input_messages.py +31 -0
  198. mirascope/ops/_internal/instrumentation/llm/gen_ai_types/gen_ai_output_messages.py +38 -0
  199. mirascope/ops/_internal/instrumentation/llm/gen_ai_types/gen_ai_system_instructions.py +18 -0
  200. mirascope/ops/_internal/instrumentation/llm/gen_ai_types/shared.py +100 -0
  201. mirascope/ops/_internal/instrumentation/llm/llm.py +1288 -0
  202. mirascope/ops/_internal/propagation.py +198 -0
  203. mirascope/ops/_internal/protocols.py +51 -0
  204. mirascope/ops/_internal/session.py +139 -0
  205. mirascope/ops/_internal/spans.py +232 -0
  206. mirascope/ops/_internal/traced_calls.py +371 -0
  207. mirascope/ops/_internal/traced_functions.py +394 -0
  208. mirascope/ops/_internal/tracing.py +276 -0
  209. mirascope/ops/_internal/types.py +13 -0
  210. mirascope/ops/_internal/utils.py +75 -0
  211. mirascope/ops/_internal/versioned_calls.py +512 -0
  212. mirascope/ops/_internal/versioned_functions.py +346 -0
  213. mirascope/ops/_internal/versioning.py +303 -0
  214. mirascope/ops/exceptions.py +21 -0
  215. {mirascope-2.0.0a2.dist-info → mirascope-2.0.0a4.dist-info}/METADATA +78 -3
  216. mirascope-2.0.0a4.dist-info/RECORD +247 -0
  217. {mirascope-2.0.0a2.dist-info → mirascope-2.0.0a4.dist-info}/WHEEL +1 -1
  218. mirascope/graphs/__init__.py +0 -22
  219. mirascope/graphs/finite_state_machine.py +0 -625
  220. mirascope/llm/agents/__init__.py +0 -15
  221. mirascope/llm/agents/agent.py +0 -97
  222. mirascope/llm/agents/agent_template.py +0 -45
  223. mirascope/llm/agents/decorator.py +0 -176
  224. mirascope/llm/calls/base_call.py +0 -33
  225. mirascope/llm/clients/__init__.py +0 -34
  226. mirascope/llm/clients/anthropic/__init__.py +0 -25
  227. mirascope/llm/clients/anthropic/_utils/encode.py +0 -243
  228. mirascope/llm/clients/anthropic/clients.py +0 -819
  229. mirascope/llm/clients/anthropic/model_ids.py +0 -8
  230. mirascope/llm/clients/google/__init__.py +0 -20
  231. mirascope/llm/clients/google/clients.py +0 -853
  232. mirascope/llm/clients/google/model_ids.py +0 -15
  233. mirascope/llm/clients/openai/__init__.py +0 -25
  234. mirascope/llm/clients/openai/completions/__init__.py +0 -28
  235. mirascope/llm/clients/openai/completions/_utils/model_features.py +0 -81
  236. mirascope/llm/clients/openai/completions/clients.py +0 -833
  237. mirascope/llm/clients/openai/completions/model_ids.py +0 -8
  238. mirascope/llm/clients/openai/responses/__init__.py +0 -26
  239. mirascope/llm/clients/openai/responses/_utils/__init__.py +0 -13
  240. mirascope/llm/clients/openai/responses/_utils/model_features.py +0 -87
  241. mirascope/llm/clients/openai/responses/clients.py +0 -832
  242. mirascope/llm/clients/openai/responses/model_ids.py +0 -8
  243. mirascope/llm/clients/openai/shared/__init__.py +0 -7
  244. mirascope/llm/clients/openai/shared/_utils.py +0 -55
  245. mirascope/llm/clients/providers.py +0 -175
  246. mirascope-2.0.0a2.dist-info/RECORD +0 -102
  247. /mirascope/llm/{clients → providers}/base/kwargs.py +0 -0
  248. /mirascope/llm/{clients → providers}/base/params.py +0 -0
  249. /mirascope/llm/{clients/anthropic → providers/google}/_utils/__init__.py +0 -0
  250. /mirascope/llm/{clients → providers}/google/message.py +0 -0
  251. /mirascope/llm/{clients/google → providers/openai/responses}/_utils/__init__.py +0 -0
  252. {mirascope-2.0.0a2.dist-info → mirascope-2.0.0a4.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,87 @@
1
+ """Anthropic model information.
2
+
3
+ This file is auto-generated by scripts/model_features/codegen_anthropic.py
4
+ Do not edit manually - run the codegen script to update."""
5
+
6
+ from typing import Literal
7
+
8
+ AnthropicKnownModels = Literal[
9
+ "anthropic/claude-3-5-haiku",
10
+ "anthropic/claude-3-5-haiku-20241022",
11
+ "anthropic/claude-3-5-haiku-latest",
12
+ "anthropic/claude-3-7-sonnet",
13
+ "anthropic/claude-3-7-sonnet-20250219",
14
+ "anthropic/claude-3-7-sonnet-latest",
15
+ "anthropic/claude-3-haiku",
16
+ "anthropic/claude-3-haiku-20240307",
17
+ "anthropic/claude-3-haiku-latest",
18
+ "anthropic/claude-3-opus",
19
+ "anthropic/claude-3-opus-20240229",
20
+ "anthropic/claude-3-opus-latest",
21
+ "anthropic/claude-haiku-4-5",
22
+ "anthropic/claude-haiku-4-5-0",
23
+ "anthropic/claude-haiku-4-5-0-20251001",
24
+ "anthropic/claude-haiku-4-5-0-latest",
25
+ "anthropic/claude-haiku-4-5-20251001",
26
+ "anthropic/claude-haiku-4-5-latest",
27
+ "anthropic/claude-opus-4",
28
+ "anthropic/claude-opus-4-0",
29
+ "anthropic/claude-opus-4-0-20250514",
30
+ "anthropic/claude-opus-4-0-latest",
31
+ "anthropic/claude-opus-4-1",
32
+ "anthropic/claude-opus-4-1-0",
33
+ "anthropic/claude-opus-4-1-0-20250805",
34
+ "anthropic/claude-opus-4-1-0-latest",
35
+ "anthropic/claude-opus-4-1-20250805",
36
+ "anthropic/claude-opus-4-1-latest",
37
+ "anthropic/claude-opus-4-20250514",
38
+ "anthropic/claude-opus-4-5",
39
+ "anthropic/claude-opus-4-5-0",
40
+ "anthropic/claude-opus-4-5-0-20251101",
41
+ "anthropic/claude-opus-4-5-0-latest",
42
+ "anthropic/claude-opus-4-5-20251101",
43
+ "anthropic/claude-opus-4-5-latest",
44
+ "anthropic/claude-opus-4-latest",
45
+ "anthropic/claude-sonnet-4",
46
+ "anthropic/claude-sonnet-4-0",
47
+ "anthropic/claude-sonnet-4-0-20250514",
48
+ "anthropic/claude-sonnet-4-0-latest",
49
+ "anthropic/claude-sonnet-4-20250514",
50
+ "anthropic/claude-sonnet-4-5",
51
+ "anthropic/claude-sonnet-4-5-0",
52
+ "anthropic/claude-sonnet-4-5-0-20250929",
53
+ "anthropic/claude-sonnet-4-5-0-latest",
54
+ "anthropic/claude-sonnet-4-5-20250929",
55
+ "anthropic/claude-sonnet-4-5-latest",
56
+ "anthropic/claude-sonnet-4-latest",
57
+ ]
58
+ """Valid Anthropic model IDs."""
59
+
60
+
61
+ MODELS_WITHOUT_STRICT_STRUCTURED_OUTPUTS: set[str] = {
62
+ "claude-3-5-haiku",
63
+ "claude-3-5-haiku-20241022",
64
+ "claude-3-5-haiku-latest",
65
+ "claude-3-7-sonnet",
66
+ "claude-3-7-sonnet-20250219",
67
+ "claude-3-7-sonnet-latest",
68
+ "claude-3-haiku",
69
+ "claude-3-haiku-20240307",
70
+ "claude-3-haiku-latest",
71
+ "claude-3-opus",
72
+ "claude-3-opus-20240229",
73
+ "claude-3-opus-latest",
74
+ "claude-opus-4",
75
+ "claude-opus-4-0",
76
+ "claude-opus-4-0-20250514",
77
+ "claude-opus-4-0-latest",
78
+ "claude-opus-4-20250514",
79
+ "claude-opus-4-latest",
80
+ "claude-sonnet-4",
81
+ "claude-sonnet-4-0",
82
+ "claude-sonnet-4-0-20250514",
83
+ "claude-sonnet-4-0-latest",
84
+ "claude-sonnet-4-20250514",
85
+ "claude-sonnet-4-latest",
86
+ }
87
+ """Models that do not support strict structured outputs (strict mode tools)."""
@@ -0,0 +1,416 @@
1
+ """Anthropic client implementation."""
2
+
3
+ from collections.abc import Sequence
4
+ from typing_extensions import Unpack
5
+
6
+ from anthropic import Anthropic, AsyncAnthropic
7
+
8
+ from ...context import Context, DepsT
9
+ from ...formatting import Format, FormattableT, resolve_format
10
+ from ...messages import Message
11
+ from ...responses import (
12
+ AsyncContextResponse,
13
+ AsyncContextStreamResponse,
14
+ AsyncResponse,
15
+ AsyncStreamResponse,
16
+ ContextResponse,
17
+ ContextStreamResponse,
18
+ Response,
19
+ StreamResponse,
20
+ )
21
+ from ...tools import (
22
+ AsyncContextTool,
23
+ AsyncContextToolkit,
24
+ AsyncTool,
25
+ AsyncToolkit,
26
+ ContextTool,
27
+ ContextToolkit,
28
+ Tool,
29
+ Toolkit,
30
+ )
31
+ from ..base import BaseProvider, Params
32
+ from . import _utils
33
+ from .beta_provider import AnthropicBetaProvider
34
+ from .model_id import AnthropicModelId, model_name
35
+ from .model_info import MODELS_WITHOUT_STRICT_STRUCTURED_OUTPUTS
36
+
37
+
38
+ def _should_use_beta(
39
+ model_id: AnthropicModelId,
40
+ format: type[FormattableT] | Format[FormattableT] | None,
41
+ ) -> bool:
42
+ """Determine whether to use the beta API based on format mode.
43
+
44
+ If the format resolves to strict mode, and the model plausibly has
45
+ strict structured output support, then we will use the beta provider.
46
+ """
47
+ resolved = resolve_format(format, default_mode=_utils.DEFAULT_FORMAT_MODE)
48
+ if resolved is None or resolved.mode != "strict":
49
+ return False
50
+ return model_name(model_id) not in MODELS_WITHOUT_STRICT_STRUCTURED_OUTPUTS
51
+
52
+
53
+ class AnthropicProvider(BaseProvider[Anthropic]):
54
+ """The client for the Anthropic LLM model."""
55
+
56
+ id = "anthropic"
57
+ default_scope = "anthropic/"
58
+ _beta_provider: AnthropicBetaProvider
59
+
60
+ def __init__(
61
+ self, *, api_key: str | None = None, base_url: str | None = None
62
+ ) -> None:
63
+ """Initialize the Anthropic client."""
64
+ self.client = Anthropic(api_key=api_key, base_url=base_url)
65
+ self.async_client = AsyncAnthropic(api_key=api_key, base_url=base_url)
66
+ self._beta_provider = AnthropicBetaProvider(api_key=api_key, base_url=base_url)
67
+
68
+ def _call(
69
+ self,
70
+ *,
71
+ model_id: AnthropicModelId,
72
+ messages: Sequence[Message],
73
+ tools: Sequence[Tool] | Toolkit | None = None,
74
+ format: type[FormattableT] | Format[FormattableT] | None = None,
75
+ **params: Unpack[Params],
76
+ ) -> Response | Response[FormattableT]:
77
+ """Generate an `llm.Response` by synchronously calling the Anthropic Messages API."""
78
+ if _should_use_beta(model_id, format):
79
+ return self._beta_provider.call(
80
+ model_id=model_id,
81
+ messages=messages,
82
+ tools=tools,
83
+ format=format,
84
+ **params,
85
+ )
86
+
87
+ input_messages, resolved_format, kwargs = _utils.encode_request(
88
+ model_id=model_id,
89
+ messages=messages,
90
+ tools=tools,
91
+ format=format,
92
+ params=params,
93
+ )
94
+ anthropic_response = self.client.messages.create(**kwargs)
95
+ assistant_message, finish_reason, usage = _utils.decode_response(
96
+ anthropic_response, model_id
97
+ )
98
+ return Response(
99
+ raw=anthropic_response,
100
+ provider_id="anthropic",
101
+ model_id=model_id,
102
+ provider_model_name=model_name(model_id),
103
+ params=params,
104
+ tools=tools,
105
+ input_messages=input_messages,
106
+ assistant_message=assistant_message,
107
+ finish_reason=finish_reason,
108
+ usage=usage,
109
+ format=resolved_format,
110
+ )
111
+
112
+ def _context_call(
113
+ self,
114
+ *,
115
+ ctx: Context[DepsT],
116
+ model_id: AnthropicModelId,
117
+ messages: Sequence[Message],
118
+ tools: Sequence[Tool | ContextTool[DepsT]]
119
+ | ContextToolkit[DepsT]
120
+ | None = None,
121
+ format: type[FormattableT] | Format[FormattableT] | None = None,
122
+ **params: Unpack[Params],
123
+ ) -> ContextResponse[DepsT, None] | ContextResponse[DepsT, FormattableT]:
124
+ """Generate an `llm.ContextResponse` by synchronously calling the Anthropic Messages API."""
125
+ if _should_use_beta(model_id, format):
126
+ return self._beta_provider.context_call(
127
+ ctx=ctx,
128
+ model_id=model_id,
129
+ messages=messages,
130
+ tools=tools,
131
+ format=format,
132
+ **params,
133
+ )
134
+
135
+ input_messages, resolved_format, kwargs = _utils.encode_request(
136
+ model_id=model_id,
137
+ messages=messages,
138
+ tools=tools,
139
+ format=format,
140
+ params=params,
141
+ )
142
+ anthropic_response = self.client.messages.create(**kwargs)
143
+ assistant_message, finish_reason, usage = _utils.decode_response(
144
+ anthropic_response, model_id
145
+ )
146
+ return ContextResponse(
147
+ raw=anthropic_response,
148
+ provider_id="anthropic",
149
+ model_id=model_id,
150
+ provider_model_name=model_name(model_id),
151
+ params=params,
152
+ tools=tools,
153
+ input_messages=input_messages,
154
+ assistant_message=assistant_message,
155
+ finish_reason=finish_reason,
156
+ usage=usage,
157
+ format=resolved_format,
158
+ )
159
+
160
+ async def _call_async(
161
+ self,
162
+ *,
163
+ model_id: AnthropicModelId,
164
+ messages: Sequence[Message],
165
+ tools: Sequence[AsyncTool] | AsyncToolkit | None = None,
166
+ format: type[FormattableT] | Format[FormattableT] | None = None,
167
+ **params: Unpack[Params],
168
+ ) -> AsyncResponse | AsyncResponse[FormattableT]:
169
+ """Generate an `llm.AsyncResponse` by asynchronously calling the Anthropic Messages API."""
170
+ if _should_use_beta(model_id, format):
171
+ return await self._beta_provider.call_async(
172
+ model_id=model_id,
173
+ messages=messages,
174
+ tools=tools,
175
+ format=format,
176
+ **params,
177
+ )
178
+
179
+ input_messages, resolved_format, kwargs = _utils.encode_request(
180
+ model_id=model_id,
181
+ messages=messages,
182
+ tools=tools,
183
+ format=format,
184
+ params=params,
185
+ )
186
+ anthropic_response = await self.async_client.messages.create(**kwargs)
187
+ assistant_message, finish_reason, usage = _utils.decode_response(
188
+ anthropic_response, model_id
189
+ )
190
+ return AsyncResponse(
191
+ raw=anthropic_response,
192
+ provider_id="anthropic",
193
+ model_id=model_id,
194
+ provider_model_name=model_name(model_id),
195
+ params=params,
196
+ tools=tools,
197
+ input_messages=input_messages,
198
+ assistant_message=assistant_message,
199
+ finish_reason=finish_reason,
200
+ usage=usage,
201
+ format=resolved_format,
202
+ )
203
+
204
+ async def _context_call_async(
205
+ self,
206
+ *,
207
+ ctx: Context[DepsT],
208
+ model_id: AnthropicModelId,
209
+ messages: Sequence[Message],
210
+ tools: Sequence[AsyncTool | AsyncContextTool[DepsT]]
211
+ | AsyncContextToolkit[DepsT]
212
+ | None = None,
213
+ format: type[FormattableT] | Format[FormattableT] | None = None,
214
+ **params: Unpack[Params],
215
+ ) -> AsyncContextResponse[DepsT, None] | AsyncContextResponse[DepsT, FormattableT]:
216
+ """Generate an `llm.AsyncContextResponse` by asynchronously calling the Anthropic Messages API."""
217
+ if _should_use_beta(model_id, format):
218
+ return await self._beta_provider.context_call_async(
219
+ ctx=ctx,
220
+ model_id=model_id,
221
+ messages=messages,
222
+ tools=tools,
223
+ format=format,
224
+ **params,
225
+ )
226
+
227
+ input_messages, resolved_format, kwargs = _utils.encode_request(
228
+ model_id=model_id,
229
+ messages=messages,
230
+ tools=tools,
231
+ format=format,
232
+ params=params,
233
+ )
234
+ anthropic_response = await self.async_client.messages.create(**kwargs)
235
+ assistant_message, finish_reason, usage = _utils.decode_response(
236
+ anthropic_response, model_id
237
+ )
238
+ return AsyncContextResponse(
239
+ raw=anthropic_response,
240
+ provider_id="anthropic",
241
+ model_id=model_id,
242
+ provider_model_name=model_name(model_id),
243
+ params=params,
244
+ tools=tools,
245
+ input_messages=input_messages,
246
+ assistant_message=assistant_message,
247
+ finish_reason=finish_reason,
248
+ usage=usage,
249
+ format=resolved_format,
250
+ )
251
+
252
+ def _stream(
253
+ self,
254
+ *,
255
+ model_id: AnthropicModelId,
256
+ messages: Sequence[Message],
257
+ tools: Sequence[Tool] | Toolkit | None = None,
258
+ format: type[FormattableT] | Format[FormattableT] | None = None,
259
+ **params: Unpack[Params],
260
+ ) -> StreamResponse | StreamResponse[FormattableT]:
261
+ """Generate an `llm.StreamResponse` by synchronously streaming from the Anthropic Messages API."""
262
+ if _should_use_beta(model_id, format):
263
+ return self._beta_provider.stream(
264
+ model_id=model_id,
265
+ messages=messages,
266
+ tools=tools,
267
+ format=format,
268
+ **params,
269
+ )
270
+
271
+ input_messages, resolved_format, kwargs = _utils.encode_request(
272
+ model_id=model_id,
273
+ messages=messages,
274
+ tools=tools,
275
+ format=format,
276
+ params=params,
277
+ )
278
+ anthropic_stream = self.client.messages.stream(**kwargs)
279
+ chunk_iterator = _utils.decode_stream(anthropic_stream)
280
+ return StreamResponse(
281
+ provider_id="anthropic",
282
+ model_id=model_id,
283
+ provider_model_name=model_name(model_id),
284
+ params=params,
285
+ tools=tools,
286
+ input_messages=input_messages,
287
+ chunk_iterator=chunk_iterator,
288
+ format=resolved_format,
289
+ )
290
+
291
+ def _context_stream(
292
+ self,
293
+ *,
294
+ ctx: Context[DepsT],
295
+ model_id: AnthropicModelId,
296
+ messages: Sequence[Message],
297
+ tools: Sequence[Tool | ContextTool[DepsT]]
298
+ | ContextToolkit[DepsT]
299
+ | None = None,
300
+ format: type[FormattableT] | Format[FormattableT] | None = None,
301
+ **params: Unpack[Params],
302
+ ) -> ContextStreamResponse[DepsT] | ContextStreamResponse[DepsT, FormattableT]:
303
+ """Generate an `llm.ContextStreamResponse` by synchronously streaming from the Anthropic Messages API."""
304
+ if _should_use_beta(model_id, format):
305
+ return self._beta_provider.context_stream(
306
+ ctx=ctx,
307
+ model_id=model_id,
308
+ messages=messages,
309
+ tools=tools,
310
+ format=format,
311
+ **params,
312
+ )
313
+
314
+ input_messages, resolved_format, kwargs = _utils.encode_request(
315
+ model_id=model_id,
316
+ messages=messages,
317
+ tools=tools,
318
+ format=format,
319
+ params=params,
320
+ )
321
+ anthropic_stream = self.client.messages.stream(**kwargs)
322
+ chunk_iterator = _utils.decode_stream(anthropic_stream)
323
+ return ContextStreamResponse(
324
+ provider_id="anthropic",
325
+ model_id=model_id,
326
+ provider_model_name=model_name(model_id),
327
+ params=params,
328
+ tools=tools,
329
+ input_messages=input_messages,
330
+ chunk_iterator=chunk_iterator,
331
+ format=resolved_format,
332
+ )
333
+
334
+ async def _stream_async(
335
+ self,
336
+ *,
337
+ model_id: AnthropicModelId,
338
+ messages: Sequence[Message],
339
+ tools: Sequence[AsyncTool] | AsyncToolkit | None = None,
340
+ format: type[FormattableT] | Format[FormattableT] | None = None,
341
+ **params: Unpack[Params],
342
+ ) -> AsyncStreamResponse | AsyncStreamResponse[FormattableT]:
343
+ """Generate an `llm.AsyncStreamResponse` by asynchronously streaming from the Anthropic Messages API."""
344
+ if _should_use_beta(model_id, format):
345
+ return await self._beta_provider.stream_async(
346
+ model_id=model_id,
347
+ messages=messages,
348
+ tools=tools,
349
+ format=format,
350
+ **params,
351
+ )
352
+ input_messages, resolved_format, kwargs = _utils.encode_request(
353
+ model_id=model_id,
354
+ messages=messages,
355
+ tools=tools,
356
+ format=format,
357
+ params=params,
358
+ )
359
+ anthropic_stream = self.async_client.messages.stream(**kwargs)
360
+ chunk_iterator = _utils.decode_async_stream(anthropic_stream)
361
+ return AsyncStreamResponse(
362
+ provider_id="anthropic",
363
+ model_id=model_id,
364
+ provider_model_name=model_name(model_id),
365
+ params=params,
366
+ tools=tools,
367
+ input_messages=input_messages,
368
+ chunk_iterator=chunk_iterator,
369
+ format=resolved_format,
370
+ )
371
+
372
+ async def _context_stream_async(
373
+ self,
374
+ *,
375
+ ctx: Context[DepsT],
376
+ model_id: AnthropicModelId,
377
+ messages: Sequence[Message],
378
+ tools: Sequence[AsyncTool | AsyncContextTool[DepsT]]
379
+ | AsyncContextToolkit[DepsT]
380
+ | None = None,
381
+ format: type[FormattableT] | Format[FormattableT] | None = None,
382
+ **params: Unpack[Params],
383
+ ) -> (
384
+ AsyncContextStreamResponse[DepsT]
385
+ | AsyncContextStreamResponse[DepsT, FormattableT]
386
+ ):
387
+ """Generate an `llm.AsyncContextStreamResponse` by asynchronously streaming from the Anthropic Messages API."""
388
+ if _should_use_beta(model_id, format):
389
+ return await self._beta_provider.context_stream_async(
390
+ ctx=ctx,
391
+ model_id=model_id,
392
+ messages=messages,
393
+ tools=tools,
394
+ format=format,
395
+ **params,
396
+ )
397
+
398
+ input_messages, resolved_format, kwargs = _utils.encode_request(
399
+ model_id=model_id,
400
+ messages=messages,
401
+ tools=tools,
402
+ format=format,
403
+ params=params,
404
+ )
405
+ anthropic_stream = self.async_client.messages.stream(**kwargs)
406
+ chunk_iterator = _utils.decode_async_stream(anthropic_stream)
407
+ return AsyncContextStreamResponse(
408
+ provider_id="anthropic",
409
+ model_id=model_id,
410
+ provider_model_name=model_name(model_id),
411
+ params=params,
412
+ tools=tools,
413
+ input_messages=input_messages,
414
+ chunk_iterator=chunk_iterator,
415
+ format=resolved_format,
416
+ )
@@ -1,15 +1,15 @@
1
1
  """Base client interfaces and types."""
2
2
 
3
3
  from . import _utils
4
- from .client import BaseClient, ClientT
4
+ from .base_provider import BaseProvider, Provider
5
5
  from .kwargs import BaseKwargs, KwargsT
6
6
  from .params import Params
7
7
 
8
8
  __all__ = [
9
- "BaseClient",
10
9
  "BaseKwargs",
11
- "ClientT",
10
+ "BaseProvider",
12
11
  "KwargsT",
13
12
  "Params",
13
+ "Provider",
14
14
  "_utils",
15
15
  ]
@@ -1,20 +1,35 @@
1
1
  import logging
2
2
  from collections.abc import Generator, Sequence
3
3
  from contextlib import contextmanager
4
- from typing import TYPE_CHECKING, TypeAlias, get_type_hints
4
+ from typing import TYPE_CHECKING, TypeAlias, cast, get_type_hints
5
5
 
6
6
  from ...content import Text
7
7
  from ...messages import AssistantMessage, Message, SystemMessage, UserMessage
8
+ from ..provider_id import ProviderId
8
9
  from .params import Params
9
10
 
10
11
  if TYPE_CHECKING:
11
- from ..providers import ModelId, Provider
12
+ from ..model_id import ModelId
12
13
 
13
14
  logger = logging.getLogger(__name__)
14
15
 
15
16
  SystemMessageContent: TypeAlias = str | None
16
17
 
17
18
 
19
+ def ensure_additional_properties_false(obj: object) -> None:
20
+ """Recursively adds additionalProperties = False to a schema, required for strict mode."""
21
+ if isinstance(obj, dict):
22
+ obj = cast(dict[str, object], obj)
23
+ if obj.get("type") == "object" and "additionalProperties" not in obj:
24
+ obj["additionalProperties"] = False
25
+ for value in obj.values():
26
+ ensure_additional_properties_false(value)
27
+ elif isinstance(obj, list):
28
+ obj = cast(list[object], obj)
29
+ for item in obj:
30
+ ensure_additional_properties_false(item)
31
+
32
+
18
33
  def add_system_instructions(
19
34
  messages: Sequence[Message], additional_system_instructions: str
20
35
  ) -> Sequence[Message]:
@@ -61,8 +76,8 @@ def extract_system_message(
61
76
  This is intended for use in clients where the system message is not included in the
62
77
  input messages, but passed as an additional argument or metadata.
63
78
  """
64
- system_message_content = None
65
- remaining_messages = []
79
+ system_message_content: SystemMessageContent = None
80
+ remaining_messages: list[UserMessage | AssistantMessage] = []
66
81
 
67
82
  for i, message in enumerate(messages):
68
83
  if message.role == "system":
@@ -138,10 +153,10 @@ class SafeParamsAccessor:
138
153
  self,
139
154
  param_name: str,
140
155
  param_value: object,
141
- provider: "Provider",
156
+ provider_id: "ProviderId",
142
157
  model_id: "ModelId | None" = None,
143
158
  ) -> None:
144
- unsupported_by = f"provider: {provider}"
159
+ unsupported_by = f"provider: {provider_id}"
145
160
  if model_id:
146
161
  unsupported_by += f" with model_id: {model_id}"
147
162
  logger.warning(
@@ -159,7 +174,7 @@ class SafeParamsAccessor:
159
174
  def ensure_all_params_accessed(
160
175
  *,
161
176
  params: Params,
162
- provider: "Provider",
177
+ provider_id: "ProviderId",
163
178
  unsupported_params: list[str] | None = None,
164
179
  ) -> Generator[SafeParamsAccessor, None, None]:
165
180
  """Context manager that ensures all parameters are accessed.
@@ -185,7 +200,9 @@ def ensure_all_params_accessed(
185
200
  unsupported_params = unsupported_params or []
186
201
  for unsupported in unsupported_params:
187
202
  if (val := params.get(unsupported)) is not None:
188
- accessor.emit_warning_for_unused_param(unsupported, val, provider=provider)
203
+ accessor.emit_warning_for_unused_param(
204
+ unsupported, val, provider_id=provider_id
205
+ )
189
206
  try:
190
207
  yield accessor
191
208
  finally: