mirascope 2.0.0a6__py3-none-any.whl → 2.0.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (226) hide show
  1. mirascope/api/_generated/__init__.py +186 -5
  2. mirascope/api/_generated/annotations/client.py +38 -6
  3. mirascope/api/_generated/annotations/raw_client.py +366 -47
  4. mirascope/api/_generated/annotations/types/annotations_create_response.py +19 -6
  5. mirascope/api/_generated/annotations/types/annotations_get_response.py +19 -6
  6. mirascope/api/_generated/annotations/types/annotations_list_response_annotations_item.py +22 -7
  7. mirascope/api/_generated/annotations/types/annotations_update_response.py +19 -6
  8. mirascope/api/_generated/api_keys/__init__.py +12 -2
  9. mirascope/api/_generated/api_keys/client.py +107 -6
  10. mirascope/api/_generated/api_keys/raw_client.py +486 -38
  11. mirascope/api/_generated/api_keys/types/__init__.py +7 -1
  12. mirascope/api/_generated/api_keys/types/api_keys_list_all_for_org_response_item.py +40 -0
  13. mirascope/api/_generated/client.py +36 -0
  14. mirascope/api/_generated/docs/raw_client.py +71 -9
  15. mirascope/api/_generated/environment.py +3 -3
  16. mirascope/api/_generated/environments/__init__.py +6 -0
  17. mirascope/api/_generated/environments/client.py +158 -9
  18. mirascope/api/_generated/environments/raw_client.py +620 -52
  19. mirascope/api/_generated/environments/types/__init__.py +10 -0
  20. mirascope/api/_generated/environments/types/environments_get_analytics_response.py +60 -0
  21. mirascope/api/_generated/environments/types/environments_get_analytics_response_top_functions_item.py +24 -0
  22. mirascope/api/_generated/{organizations/types/organizations_credits_response.py → environments/types/environments_get_analytics_response_top_models_item.py} +6 -3
  23. mirascope/api/_generated/errors/__init__.py +6 -0
  24. mirascope/api/_generated/errors/bad_request_error.py +5 -2
  25. mirascope/api/_generated/errors/conflict_error.py +5 -2
  26. mirascope/api/_generated/errors/payment_required_error.py +15 -0
  27. mirascope/api/_generated/errors/service_unavailable_error.py +14 -0
  28. mirascope/api/_generated/errors/too_many_requests_error.py +15 -0
  29. mirascope/api/_generated/functions/__init__.py +10 -0
  30. mirascope/api/_generated/functions/client.py +222 -8
  31. mirascope/api/_generated/functions/raw_client.py +975 -134
  32. mirascope/api/_generated/functions/types/__init__.py +28 -4
  33. mirascope/api/_generated/functions/types/functions_get_by_env_response.py +53 -0
  34. mirascope/api/_generated/functions/types/functions_get_by_env_response_dependencies_value.py +22 -0
  35. mirascope/api/_generated/functions/types/functions_list_by_env_response.py +25 -0
  36. mirascope/api/_generated/functions/types/functions_list_by_env_response_functions_item.py +56 -0
  37. mirascope/api/_generated/functions/types/functions_list_by_env_response_functions_item_dependencies_value.py +22 -0
  38. mirascope/api/_generated/health/raw_client.py +74 -10
  39. mirascope/api/_generated/organization_invitations/__init__.py +33 -0
  40. mirascope/api/_generated/organization_invitations/client.py +546 -0
  41. mirascope/api/_generated/organization_invitations/raw_client.py +1519 -0
  42. mirascope/api/_generated/organization_invitations/types/__init__.py +53 -0
  43. mirascope/api/_generated/organization_invitations/types/organization_invitations_accept_response.py +34 -0
  44. mirascope/api/_generated/organization_invitations/types/organization_invitations_accept_response_role.py +7 -0
  45. mirascope/api/_generated/organization_invitations/types/organization_invitations_create_request_role.py +7 -0
  46. mirascope/api/_generated/organization_invitations/types/organization_invitations_create_response.py +48 -0
  47. mirascope/api/_generated/organization_invitations/types/organization_invitations_create_response_role.py +7 -0
  48. mirascope/api/_generated/organization_invitations/types/organization_invitations_create_response_status.py +7 -0
  49. mirascope/api/_generated/organization_invitations/types/organization_invitations_get_response.py +48 -0
  50. mirascope/api/_generated/organization_invitations/types/organization_invitations_get_response_role.py +7 -0
  51. mirascope/api/_generated/organization_invitations/types/organization_invitations_get_response_status.py +7 -0
  52. mirascope/api/_generated/organization_invitations/types/organization_invitations_list_response_item.py +48 -0
  53. mirascope/api/_generated/organization_invitations/types/organization_invitations_list_response_item_role.py +7 -0
  54. mirascope/api/_generated/organization_invitations/types/organization_invitations_list_response_item_status.py +7 -0
  55. mirascope/api/_generated/organization_memberships/__init__.py +19 -0
  56. mirascope/api/_generated/organization_memberships/client.py +302 -0
  57. mirascope/api/_generated/organization_memberships/raw_client.py +736 -0
  58. mirascope/api/_generated/organization_memberships/types/__init__.py +27 -0
  59. mirascope/api/_generated/organization_memberships/types/organization_memberships_list_response_item.py +33 -0
  60. mirascope/api/_generated/organization_memberships/types/organization_memberships_list_response_item_role.py +7 -0
  61. mirascope/api/_generated/organization_memberships/types/organization_memberships_update_request_role.py +7 -0
  62. mirascope/api/_generated/organization_memberships/types/organization_memberships_update_response.py +31 -0
  63. mirascope/api/_generated/organization_memberships/types/organization_memberships_update_response_role.py +7 -0
  64. mirascope/api/_generated/organizations/__init__.py +26 -2
  65. mirascope/api/_generated/organizations/client.py +442 -20
  66. mirascope/api/_generated/organizations/raw_client.py +1763 -164
  67. mirascope/api/_generated/organizations/types/__init__.py +48 -2
  68. mirascope/api/_generated/organizations/types/organizations_create_payment_intent_response.py +24 -0
  69. mirascope/api/_generated/organizations/types/organizations_preview_subscription_change_request_target_plan.py +7 -0
  70. mirascope/api/_generated/organizations/types/organizations_preview_subscription_change_response.py +47 -0
  71. mirascope/api/_generated/organizations/types/organizations_preview_subscription_change_response_validation_errors_item.py +33 -0
  72. mirascope/api/_generated/organizations/types/organizations_preview_subscription_change_response_validation_errors_item_resource.py +7 -0
  73. mirascope/api/_generated/organizations/types/organizations_router_balance_response.py +24 -0
  74. mirascope/api/_generated/organizations/types/organizations_subscription_response.py +53 -0
  75. mirascope/api/_generated/organizations/types/organizations_subscription_response_current_plan.py +7 -0
  76. mirascope/api/_generated/organizations/types/organizations_subscription_response_payment_method.py +26 -0
  77. mirascope/api/_generated/organizations/types/organizations_subscription_response_scheduled_change.py +34 -0
  78. mirascope/api/_generated/organizations/types/organizations_subscription_response_scheduled_change_target_plan.py +7 -0
  79. mirascope/api/_generated/organizations/types/organizations_update_subscription_request_target_plan.py +7 -0
  80. mirascope/api/_generated/organizations/types/organizations_update_subscription_response.py +35 -0
  81. mirascope/api/_generated/project_memberships/__init__.py +25 -0
  82. mirascope/api/_generated/project_memberships/client.py +437 -0
  83. mirascope/api/_generated/project_memberships/raw_client.py +1039 -0
  84. mirascope/api/_generated/project_memberships/types/__init__.py +29 -0
  85. mirascope/api/_generated/project_memberships/types/project_memberships_create_request_role.py +7 -0
  86. mirascope/api/_generated/project_memberships/types/project_memberships_create_response.py +35 -0
  87. mirascope/api/_generated/project_memberships/types/project_memberships_create_response_role.py +7 -0
  88. mirascope/api/_generated/project_memberships/types/project_memberships_list_response_item.py +33 -0
  89. mirascope/api/_generated/project_memberships/types/project_memberships_list_response_item_role.py +7 -0
  90. mirascope/api/_generated/project_memberships/types/project_memberships_update_request_role.py +7 -0
  91. mirascope/api/_generated/project_memberships/types/project_memberships_update_response.py +35 -0
  92. mirascope/api/_generated/project_memberships/types/project_memberships_update_response_role.py +7 -0
  93. mirascope/api/_generated/projects/raw_client.py +415 -58
  94. mirascope/api/_generated/reference.md +2767 -397
  95. mirascope/api/_generated/tags/__init__.py +19 -0
  96. mirascope/api/_generated/tags/client.py +504 -0
  97. mirascope/api/_generated/tags/raw_client.py +1288 -0
  98. mirascope/api/_generated/tags/types/__init__.py +17 -0
  99. mirascope/api/_generated/tags/types/tags_create_response.py +41 -0
  100. mirascope/api/_generated/tags/types/tags_get_response.py +41 -0
  101. mirascope/api/_generated/tags/types/tags_list_response.py +23 -0
  102. mirascope/api/_generated/tags/types/tags_list_response_tags_item.py +41 -0
  103. mirascope/api/_generated/tags/types/tags_update_response.py +41 -0
  104. mirascope/api/_generated/token_cost/__init__.py +7 -0
  105. mirascope/api/_generated/token_cost/client.py +160 -0
  106. mirascope/api/_generated/token_cost/raw_client.py +264 -0
  107. mirascope/api/_generated/token_cost/types/__init__.py +8 -0
  108. mirascope/api/_generated/token_cost/types/token_cost_calculate_request_usage.py +54 -0
  109. mirascope/api/_generated/token_cost/types/token_cost_calculate_response.py +52 -0
  110. mirascope/api/_generated/traces/__init__.py +20 -0
  111. mirascope/api/_generated/traces/client.py +543 -0
  112. mirascope/api/_generated/traces/raw_client.py +1366 -96
  113. mirascope/api/_generated/traces/types/__init__.py +28 -0
  114. mirascope/api/_generated/traces/types/traces_get_analytics_summary_response.py +6 -0
  115. mirascope/api/_generated/traces/types/traces_get_trace_detail_by_env_response.py +33 -0
  116. mirascope/api/_generated/traces/types/traces_get_trace_detail_by_env_response_spans_item.py +88 -0
  117. mirascope/api/_generated/traces/types/traces_get_trace_detail_response_spans_item.py +0 -2
  118. mirascope/api/_generated/traces/types/traces_list_by_function_hash_response.py +25 -0
  119. mirascope/api/_generated/traces/types/traces_list_by_function_hash_response_traces_item.py +44 -0
  120. mirascope/api/_generated/traces/types/traces_search_by_env_request_attribute_filters_item.py +26 -0
  121. mirascope/api/_generated/traces/types/traces_search_by_env_request_attribute_filters_item_operator.py +7 -0
  122. mirascope/api/_generated/traces/types/traces_search_by_env_request_sort_by.py +7 -0
  123. mirascope/api/_generated/traces/types/traces_search_by_env_request_sort_order.py +7 -0
  124. mirascope/api/_generated/traces/types/traces_search_by_env_response.py +26 -0
  125. mirascope/api/_generated/traces/types/traces_search_by_env_response_spans_item.py +50 -0
  126. mirascope/api/_generated/traces/types/traces_search_response_spans_item.py +10 -1
  127. mirascope/api/_generated/types/__init__.py +32 -2
  128. mirascope/api/_generated/types/bad_request_error_body.py +50 -0
  129. mirascope/api/_generated/types/date.py +3 -0
  130. mirascope/api/_generated/types/immutable_resource_error.py +22 -0
  131. mirascope/api/_generated/types/internal_server_error_body.py +3 -3
  132. mirascope/api/_generated/types/plan_limit_exceeded_error.py +32 -0
  133. mirascope/api/_generated/types/plan_limit_exceeded_error_tag.py +7 -0
  134. mirascope/api/_generated/types/pricing_unavailable_error.py +23 -0
  135. mirascope/api/_generated/types/rate_limit_error.py +31 -0
  136. mirascope/api/_generated/types/rate_limit_error_tag.py +5 -0
  137. mirascope/api/_generated/types/service_unavailable_error_body.py +24 -0
  138. mirascope/api/_generated/types/service_unavailable_error_tag.py +7 -0
  139. mirascope/api/_generated/types/subscription_past_due_error.py +31 -0
  140. mirascope/api/_generated/types/subscription_past_due_error_tag.py +7 -0
  141. mirascope/api/settings.py +19 -1
  142. mirascope/llm/__init__.py +53 -10
  143. mirascope/llm/calls/__init__.py +2 -1
  144. mirascope/llm/calls/calls.py +3 -1
  145. mirascope/llm/calls/decorator.py +21 -7
  146. mirascope/llm/content/tool_output.py +22 -5
  147. mirascope/llm/exceptions.py +284 -71
  148. mirascope/llm/formatting/__init__.py +17 -0
  149. mirascope/llm/formatting/format.py +112 -35
  150. mirascope/llm/formatting/output_parser.py +178 -0
  151. mirascope/llm/formatting/partial.py +80 -7
  152. mirascope/llm/formatting/primitives.py +192 -0
  153. mirascope/llm/formatting/types.py +20 -8
  154. mirascope/llm/messages/__init__.py +3 -0
  155. mirascope/llm/messages/_utils.py +34 -0
  156. mirascope/llm/models/__init__.py +5 -0
  157. mirascope/llm/models/models.py +137 -69
  158. mirascope/llm/{providers/base → models}/params.py +7 -57
  159. mirascope/llm/models/thinking_config.py +61 -0
  160. mirascope/llm/prompts/_utils.py +0 -32
  161. mirascope/llm/prompts/decorator.py +16 -5
  162. mirascope/llm/prompts/prompts.py +131 -68
  163. mirascope/llm/providers/__init__.py +1 -4
  164. mirascope/llm/providers/anthropic/_utils/__init__.py +2 -0
  165. mirascope/llm/providers/anthropic/_utils/beta_decode.py +18 -9
  166. mirascope/llm/providers/anthropic/_utils/beta_encode.py +62 -13
  167. mirascope/llm/providers/anthropic/_utils/decode.py +18 -9
  168. mirascope/llm/providers/anthropic/_utils/encode.py +26 -7
  169. mirascope/llm/providers/anthropic/_utils/errors.py +2 -2
  170. mirascope/llm/providers/anthropic/beta_provider.py +64 -18
  171. mirascope/llm/providers/anthropic/provider.py +91 -33
  172. mirascope/llm/providers/base/__init__.py +0 -4
  173. mirascope/llm/providers/base/_utils.py +55 -6
  174. mirascope/llm/providers/base/base_provider.py +116 -37
  175. mirascope/llm/providers/google/_utils/__init__.py +2 -0
  176. mirascope/llm/providers/google/_utils/decode.py +20 -7
  177. mirascope/llm/providers/google/_utils/encode.py +26 -7
  178. mirascope/llm/providers/google/_utils/errors.py +3 -2
  179. mirascope/llm/providers/google/provider.py +64 -18
  180. mirascope/llm/providers/mirascope/_utils.py +13 -17
  181. mirascope/llm/providers/mirascope/provider.py +49 -18
  182. mirascope/llm/providers/mlx/_utils.py +7 -2
  183. mirascope/llm/providers/mlx/encoding/base.py +5 -2
  184. mirascope/llm/providers/mlx/encoding/transformers.py +5 -2
  185. mirascope/llm/providers/mlx/mlx.py +23 -6
  186. mirascope/llm/providers/mlx/provider.py +42 -13
  187. mirascope/llm/providers/openai/_utils/errors.py +2 -2
  188. mirascope/llm/providers/openai/completions/_utils/encode.py +20 -16
  189. mirascope/llm/providers/openai/completions/base_provider.py +40 -11
  190. mirascope/llm/providers/openai/provider.py +40 -10
  191. mirascope/llm/providers/openai/responses/_utils/__init__.py +2 -0
  192. mirascope/llm/providers/openai/responses/_utils/decode.py +19 -6
  193. mirascope/llm/providers/openai/responses/_utils/encode.py +22 -10
  194. mirascope/llm/providers/openai/responses/provider.py +56 -18
  195. mirascope/llm/providers/provider_registry.py +93 -19
  196. mirascope/llm/responses/__init__.py +6 -1
  197. mirascope/llm/responses/_utils.py +102 -12
  198. mirascope/llm/responses/base_response.py +5 -2
  199. mirascope/llm/responses/base_stream_response.py +115 -25
  200. mirascope/llm/responses/response.py +2 -1
  201. mirascope/llm/responses/root_response.py +89 -17
  202. mirascope/llm/responses/stream_response.py +6 -9
  203. mirascope/llm/tools/decorator.py +9 -4
  204. mirascope/llm/tools/tool_schema.py +12 -6
  205. mirascope/llm/tools/toolkit.py +35 -27
  206. mirascope/llm/tools/tools.py +45 -20
  207. mirascope/ops/__init__.py +4 -0
  208. mirascope/ops/_internal/configuration.py +82 -31
  209. mirascope/ops/_internal/exporters/exporters.py +64 -11
  210. mirascope/ops/_internal/instrumentation/llm/common.py +530 -0
  211. mirascope/ops/_internal/instrumentation/llm/cost.py +190 -0
  212. mirascope/ops/_internal/instrumentation/llm/encode.py +1 -1
  213. mirascope/ops/_internal/instrumentation/llm/llm.py +116 -1242
  214. mirascope/ops/_internal/instrumentation/llm/model.py +1798 -0
  215. mirascope/ops/_internal/instrumentation/llm/response.py +521 -0
  216. mirascope/ops/_internal/instrumentation/llm/serialize.py +300 -0
  217. mirascope/ops/_internal/protocols.py +83 -1
  218. mirascope/ops/_internal/traced_calls.py +4 -0
  219. mirascope/ops/_internal/traced_functions.py +118 -8
  220. mirascope/ops/_internal/tracing.py +78 -1
  221. mirascope/ops/_internal/utils.py +52 -4
  222. {mirascope-2.0.0a6.dist-info → mirascope-2.0.1.dist-info}/METADATA +12 -11
  223. mirascope-2.0.1.dist-info/RECORD +423 -0
  224. {mirascope-2.0.0a6.dist-info → mirascope-2.0.1.dist-info}/licenses/LICENSE +1 -1
  225. mirascope-2.0.0a6.dist-info/RECORD +0 -316
  226. {mirascope-2.0.0a6.dist-info → mirascope-2.0.1.dist-info}/WHEEL +0 -0
@@ -1,12 +1,15 @@
1
1
  """Anthropic client implementation."""
2
2
 
3
+ from __future__ import annotations
4
+
3
5
  from collections.abc import Sequence
6
+ from typing import TYPE_CHECKING
4
7
  from typing_extensions import Unpack
5
8
 
6
9
  from anthropic import Anthropic, AsyncAnthropic
7
10
 
8
11
  from ...context import Context, DepsT
9
- from ...formatting import Format, FormattableT, resolve_format
12
+ from ...formatting import Format, FormattableT, OutputParser, resolve_format
10
13
  from ...messages import Message
11
14
  from ...responses import (
12
15
  AsyncContextResponse,
@@ -19,35 +22,50 @@ from ...responses import (
19
22
  StreamResponse,
20
23
  )
21
24
  from ...tools import (
25
+ AnyToolSchema,
22
26
  AsyncContextTool,
23
27
  AsyncContextToolkit,
24
28
  AsyncTool,
25
29
  AsyncToolkit,
30
+ BaseToolkit,
26
31
  ContextTool,
27
32
  ContextToolkit,
28
33
  Tool,
29
34
  Toolkit,
30
35
  )
31
- from ..base import BaseProvider, Params
36
+ from ..base import BaseProvider, _utils as _base_utils
32
37
  from . import _utils
33
38
  from .beta_provider import AnthropicBetaProvider
34
39
  from .model_id import AnthropicModelId, model_name
35
40
  from .model_info import MODELS_WITHOUT_STRICT_STRUCTURED_OUTPUTS
36
41
 
42
+ if TYPE_CHECKING:
43
+ from ...models import Params
44
+
37
45
 
38
46
  def _should_use_beta(
39
47
  model_id: AnthropicModelId,
40
- format: type[FormattableT] | Format[FormattableT] | None,
48
+ format: type[FormattableT]
49
+ | Format[FormattableT]
50
+ | OutputParser[FormattableT]
51
+ | None,
52
+ tools: Sequence[AnyToolSchema] | BaseToolkit[AnyToolSchema] | None,
41
53
  ) -> bool:
42
- """Determine whether to use the beta API based on format mode.
54
+ """Determine whether to use the beta API based on format mode or strict tools.
43
55
 
44
- If the format resolves to strict mode, and the model plausibly has
45
- strict structured output support, then we will use the beta provider.
56
+ If the format resolves to strict mode, or any tools have strict=True,
57
+ and the model plausibly has strict structured output support, then we
58
+ will use the beta provider.
46
59
  """
47
- resolved = resolve_format(format, default_mode=_utils.DEFAULT_FORMAT_MODE)
48
- if resolved is None or resolved.mode != "strict":
60
+ if model_name(model_id) in MODELS_WITHOUT_STRICT_STRUCTURED_OUTPUTS:
49
61
  return False
50
- return model_name(model_id) not in MODELS_WITHOUT_STRICT_STRUCTURED_OUTPUTS
62
+
63
+ # Check if format requires strict mode
64
+ resolved = resolve_format(format, default_mode=_utils.DEFAULT_FORMAT_MODE)
65
+ if resolved is not None and resolved.mode == "strict":
66
+ return True
67
+
68
+ return _base_utils.has_strict_tools(tools)
51
69
 
52
70
 
53
71
  class AnthropicProvider(BaseProvider[Anthropic]):
@@ -76,11 +94,14 @@ class AnthropicProvider(BaseProvider[Anthropic]):
76
94
  model_id: AnthropicModelId,
77
95
  messages: Sequence[Message],
78
96
  tools: Sequence[Tool] | Toolkit | None = None,
79
- format: type[FormattableT] | Format[FormattableT] | None = None,
97
+ format: type[FormattableT]
98
+ | Format[FormattableT]
99
+ | OutputParser[FormattableT]
100
+ | None = None,
80
101
  **params: Unpack[Params],
81
102
  ) -> Response | Response[FormattableT]:
82
103
  """Generate an `llm.Response` by synchronously calling the Anthropic Messages API."""
83
- if _should_use_beta(model_id, format):
104
+ if _should_use_beta(model_id, format, tools):
84
105
  return self._beta_provider.call(
85
106
  model_id=model_id,
86
107
  messages=messages,
@@ -97,8 +118,9 @@ class AnthropicProvider(BaseProvider[Anthropic]):
97
118
  params=params,
98
119
  )
99
120
  anthropic_response = self.client.messages.create(**kwargs)
121
+ include_thoughts = _utils.get_include_thoughts(params)
100
122
  assistant_message, finish_reason, usage = _utils.decode_response(
101
- anthropic_response, model_id
123
+ anthropic_response, model_id, include_thoughts=include_thoughts
102
124
  )
103
125
  return Response(
104
126
  raw=anthropic_response,
@@ -123,11 +145,14 @@ class AnthropicProvider(BaseProvider[Anthropic]):
123
145
  tools: Sequence[Tool | ContextTool[DepsT]]
124
146
  | ContextToolkit[DepsT]
125
147
  | None = None,
126
- format: type[FormattableT] | Format[FormattableT] | None = None,
148
+ format: type[FormattableT]
149
+ | Format[FormattableT]
150
+ | OutputParser[FormattableT]
151
+ | None = None,
127
152
  **params: Unpack[Params],
128
153
  ) -> ContextResponse[DepsT, None] | ContextResponse[DepsT, FormattableT]:
129
154
  """Generate an `llm.ContextResponse` by synchronously calling the Anthropic Messages API."""
130
- if _should_use_beta(model_id, format):
155
+ if _should_use_beta(model_id, format, tools):
131
156
  return self._beta_provider.context_call(
132
157
  ctx=ctx,
133
158
  model_id=model_id,
@@ -145,8 +170,9 @@ class AnthropicProvider(BaseProvider[Anthropic]):
145
170
  params=params,
146
171
  )
147
172
  anthropic_response = self.client.messages.create(**kwargs)
173
+ include_thoughts = _utils.get_include_thoughts(params)
148
174
  assistant_message, finish_reason, usage = _utils.decode_response(
149
- anthropic_response, model_id
175
+ anthropic_response, model_id, include_thoughts=include_thoughts
150
176
  )
151
177
  return ContextResponse(
152
178
  raw=anthropic_response,
@@ -168,11 +194,14 @@ class AnthropicProvider(BaseProvider[Anthropic]):
168
194
  model_id: AnthropicModelId,
169
195
  messages: Sequence[Message],
170
196
  tools: Sequence[AsyncTool] | AsyncToolkit | None = None,
171
- format: type[FormattableT] | Format[FormattableT] | None = None,
197
+ format: type[FormattableT]
198
+ | Format[FormattableT]
199
+ | OutputParser[FormattableT]
200
+ | None = None,
172
201
  **params: Unpack[Params],
173
202
  ) -> AsyncResponse | AsyncResponse[FormattableT]:
174
203
  """Generate an `llm.AsyncResponse` by asynchronously calling the Anthropic Messages API."""
175
- if _should_use_beta(model_id, format):
204
+ if _should_use_beta(model_id, format, tools):
176
205
  return await self._beta_provider.call_async(
177
206
  model_id=model_id,
178
207
  messages=messages,
@@ -189,8 +218,9 @@ class AnthropicProvider(BaseProvider[Anthropic]):
189
218
  params=params,
190
219
  )
191
220
  anthropic_response = await self.async_client.messages.create(**kwargs)
221
+ include_thoughts = _utils.get_include_thoughts(params)
192
222
  assistant_message, finish_reason, usage = _utils.decode_response(
193
- anthropic_response, model_id
223
+ anthropic_response, model_id, include_thoughts=include_thoughts
194
224
  )
195
225
  return AsyncResponse(
196
226
  raw=anthropic_response,
@@ -215,11 +245,14 @@ class AnthropicProvider(BaseProvider[Anthropic]):
215
245
  tools: Sequence[AsyncTool | AsyncContextTool[DepsT]]
216
246
  | AsyncContextToolkit[DepsT]
217
247
  | None = None,
218
- format: type[FormattableT] | Format[FormattableT] | None = None,
248
+ format: type[FormattableT]
249
+ | Format[FormattableT]
250
+ | OutputParser[FormattableT]
251
+ | None = None,
219
252
  **params: Unpack[Params],
220
253
  ) -> AsyncContextResponse[DepsT, None] | AsyncContextResponse[DepsT, FormattableT]:
221
254
  """Generate an `llm.AsyncContextResponse` by asynchronously calling the Anthropic Messages API."""
222
- if _should_use_beta(model_id, format):
255
+ if _should_use_beta(model_id, format, tools):
223
256
  return await self._beta_provider.context_call_async(
224
257
  ctx=ctx,
225
258
  model_id=model_id,
@@ -237,8 +270,9 @@ class AnthropicProvider(BaseProvider[Anthropic]):
237
270
  params=params,
238
271
  )
239
272
  anthropic_response = await self.async_client.messages.create(**kwargs)
273
+ include_thoughts = _utils.get_include_thoughts(params)
240
274
  assistant_message, finish_reason, usage = _utils.decode_response(
241
- anthropic_response, model_id
275
+ anthropic_response, model_id, include_thoughts=include_thoughts
242
276
  )
243
277
  return AsyncContextResponse(
244
278
  raw=anthropic_response,
@@ -260,11 +294,14 @@ class AnthropicProvider(BaseProvider[Anthropic]):
260
294
  model_id: AnthropicModelId,
261
295
  messages: Sequence[Message],
262
296
  tools: Sequence[Tool] | Toolkit | None = None,
263
- format: type[FormattableT] | Format[FormattableT] | None = None,
297
+ format: type[FormattableT]
298
+ | Format[FormattableT]
299
+ | OutputParser[FormattableT]
300
+ | None = None,
264
301
  **params: Unpack[Params],
265
302
  ) -> StreamResponse | StreamResponse[FormattableT]:
266
303
  """Generate an `llm.StreamResponse` by synchronously streaming from the Anthropic Messages API."""
267
- if _should_use_beta(model_id, format):
304
+ if _should_use_beta(model_id, format, tools):
268
305
  return self._beta_provider.stream(
269
306
  model_id=model_id,
270
307
  messages=messages,
@@ -281,7 +318,10 @@ class AnthropicProvider(BaseProvider[Anthropic]):
281
318
  params=params,
282
319
  )
283
320
  anthropic_stream = self.client.messages.stream(**kwargs)
284
- chunk_iterator = _utils.decode_stream(anthropic_stream)
321
+ include_thoughts = _utils.get_include_thoughts(params)
322
+ chunk_iterator = _utils.decode_stream(
323
+ anthropic_stream, include_thoughts=include_thoughts
324
+ )
285
325
  return StreamResponse(
286
326
  provider_id="anthropic",
287
327
  model_id=model_id,
@@ -302,11 +342,14 @@ class AnthropicProvider(BaseProvider[Anthropic]):
302
342
  tools: Sequence[Tool | ContextTool[DepsT]]
303
343
  | ContextToolkit[DepsT]
304
344
  | None = None,
305
- format: type[FormattableT] | Format[FormattableT] | None = None,
345
+ format: type[FormattableT]
346
+ | Format[FormattableT]
347
+ | OutputParser[FormattableT]
348
+ | None = None,
306
349
  **params: Unpack[Params],
307
350
  ) -> ContextStreamResponse[DepsT] | ContextStreamResponse[DepsT, FormattableT]:
308
351
  """Generate an `llm.ContextStreamResponse` by synchronously streaming from the Anthropic Messages API."""
309
- if _should_use_beta(model_id, format):
352
+ if _should_use_beta(model_id, format, tools):
310
353
  return self._beta_provider.context_stream(
311
354
  ctx=ctx,
312
355
  model_id=model_id,
@@ -324,7 +367,10 @@ class AnthropicProvider(BaseProvider[Anthropic]):
324
367
  params=params,
325
368
  )
326
369
  anthropic_stream = self.client.messages.stream(**kwargs)
327
- chunk_iterator = _utils.decode_stream(anthropic_stream)
370
+ include_thoughts = _utils.get_include_thoughts(params)
371
+ chunk_iterator = _utils.decode_stream(
372
+ anthropic_stream, include_thoughts=include_thoughts
373
+ )
328
374
  return ContextStreamResponse(
329
375
  provider_id="anthropic",
330
376
  model_id=model_id,
@@ -342,11 +388,14 @@ class AnthropicProvider(BaseProvider[Anthropic]):
342
388
  model_id: AnthropicModelId,
343
389
  messages: Sequence[Message],
344
390
  tools: Sequence[AsyncTool] | AsyncToolkit | None = None,
345
- format: type[FormattableT] | Format[FormattableT] | None = None,
391
+ format: type[FormattableT]
392
+ | Format[FormattableT]
393
+ | OutputParser[FormattableT]
394
+ | None = None,
346
395
  **params: Unpack[Params],
347
396
  ) -> AsyncStreamResponse | AsyncStreamResponse[FormattableT]:
348
397
  """Generate an `llm.AsyncStreamResponse` by asynchronously streaming from the Anthropic Messages API."""
349
- if _should_use_beta(model_id, format):
398
+ if _should_use_beta(model_id, format, tools):
350
399
  return await self._beta_provider.stream_async(
351
400
  model_id=model_id,
352
401
  messages=messages,
@@ -362,7 +411,10 @@ class AnthropicProvider(BaseProvider[Anthropic]):
362
411
  params=params,
363
412
  )
364
413
  anthropic_stream = self.async_client.messages.stream(**kwargs)
365
- chunk_iterator = _utils.decode_async_stream(anthropic_stream)
414
+ include_thoughts = _utils.get_include_thoughts(params)
415
+ chunk_iterator = _utils.decode_async_stream(
416
+ anthropic_stream, include_thoughts=include_thoughts
417
+ )
366
418
  return AsyncStreamResponse(
367
419
  provider_id="anthropic",
368
420
  model_id=model_id,
@@ -383,14 +435,17 @@ class AnthropicProvider(BaseProvider[Anthropic]):
383
435
  tools: Sequence[AsyncTool | AsyncContextTool[DepsT]]
384
436
  | AsyncContextToolkit[DepsT]
385
437
  | None = None,
386
- format: type[FormattableT] | Format[FormattableT] | None = None,
438
+ format: type[FormattableT]
439
+ | Format[FormattableT]
440
+ | OutputParser[FormattableT]
441
+ | None = None,
387
442
  **params: Unpack[Params],
388
443
  ) -> (
389
444
  AsyncContextStreamResponse[DepsT]
390
445
  | AsyncContextStreamResponse[DepsT, FormattableT]
391
446
  ):
392
447
  """Generate an `llm.AsyncContextStreamResponse` by asynchronously streaming from the Anthropic Messages API."""
393
- if _should_use_beta(model_id, format):
448
+ if _should_use_beta(model_id, format, tools):
394
449
  return await self._beta_provider.context_stream_async(
395
450
  ctx=ctx,
396
451
  model_id=model_id,
@@ -408,7 +463,10 @@ class AnthropicProvider(BaseProvider[Anthropic]):
408
463
  params=params,
409
464
  )
410
465
  anthropic_stream = self.async_client.messages.stream(**kwargs)
411
- chunk_iterator = _utils.decode_async_stream(anthropic_stream)
466
+ include_thoughts = _utils.get_include_thoughts(params)
467
+ chunk_iterator = _utils.decode_async_stream(
468
+ anthropic_stream, include_thoughts=include_thoughts
469
+ )
412
470
  return AsyncContextStreamResponse(
413
471
  provider_id="anthropic",
414
472
  model_id=model_id,
@@ -3,16 +3,12 @@
3
3
  from . import _utils
4
4
  from .base_provider import BaseProvider, Provider, ProviderErrorMap
5
5
  from .kwargs import BaseKwargs, KwargsT
6
- from .params import Params, ThinkingConfig, ThinkingLevel
7
6
 
8
7
  __all__ = [
9
8
  "BaseKwargs",
10
9
  "BaseProvider",
11
10
  "KwargsT",
12
- "Params",
13
11
  "Provider",
14
12
  "ProviderErrorMap",
15
- "ThinkingConfig",
16
- "ThinkingLevel",
17
13
  "_utils",
18
14
  ]
@@ -1,3 +1,5 @@
1
+ from __future__ import annotations
2
+
1
3
  import logging
2
4
  from collections.abc import Generator, Sequence
3
5
  from contextlib import contextmanager
@@ -5,18 +7,44 @@ from typing import TYPE_CHECKING, TypeAlias, cast, get_type_hints
5
7
 
6
8
  from ...content import Text
7
9
  from ...messages import AssistantMessage, Message, SystemMessage, UserMessage
10
+ from ...models.params import (
11
+ Params, # Import directly from params.py to avoid circular dependency
12
+ )
13
+ from ...tools import AnyToolSchema, BaseToolkit
8
14
  from ..provider_id import ProviderId
9
- from .params import Params
10
15
 
11
16
  if TYPE_CHECKING:
17
+ from ...models import ThinkingConfig
12
18
  from ..model_id import ModelId
13
- from .params import ThinkingConfig
14
19
 
15
20
  logger = logging.getLogger(__name__)
16
21
 
17
22
  SystemMessageContent: TypeAlias = str | None
18
23
 
19
24
 
25
+ def get_include_thoughts(params: Params) -> bool:
26
+ """Extract include_thoughts from params thinking config."""
27
+ thinking_config = params.get("thinking")
28
+ return (thinking_config or {}).get("include_thoughts", False)
29
+
30
+
31
+ def has_strict_tools(
32
+ tools: Sequence[AnyToolSchema] | BaseToolkit[AnyToolSchema] | None,
33
+ ) -> bool:
34
+ """Check if any tools have strict=True explicitly set.
35
+
36
+ Args:
37
+ tools: The tools to check, either a sequence or a toolkit
38
+
39
+ Returns:
40
+ True if any tool has strict=True, False otherwise
41
+ """
42
+ if tools is None:
43
+ return False
44
+ tools_list = tools.tools if isinstance(tools, BaseToolkit) else tools
45
+ return any(tool.strict is True for tool in tools_list)
46
+
47
+
20
48
  def ensure_additional_properties_false(obj: object) -> None:
21
49
  """Recursively adds additionalProperties = False to a schema, required for strict mode."""
22
50
  if isinstance(obj, dict):
@@ -31,6 +59,27 @@ def ensure_additional_properties_false(obj: object) -> None:
31
59
  ensure_additional_properties_false(item)
32
60
 
33
61
 
62
+ def ensure_all_properties_required(obj: object) -> None:
63
+ """Recursively ensures all properties are in required array, needed for OpenAI strict mode.
64
+
65
+ OpenAI's strict mode requires that all properties in an object schema are listed
66
+ in the 'required' array, even if they have default values.
67
+ """
68
+ if isinstance(obj, dict):
69
+ obj = cast(dict[str, object], obj)
70
+ if obj.get("type") == "object" and "properties" in obj:
71
+ properties = obj.get("properties")
72
+ if isinstance(properties, dict):
73
+ property_keys = cast(dict[str, object], properties)
74
+ obj["required"] = list(property_keys.keys())
75
+ for value in obj.values():
76
+ ensure_all_properties_required(value)
77
+ elif isinstance(obj, list):
78
+ obj = cast(list[object], obj)
79
+ for item in obj:
80
+ ensure_all_properties_required(item)
81
+
82
+
34
83
  def add_system_instructions(
35
84
  messages: Sequence[Message], additional_system_instructions: str
36
85
  ) -> Sequence[Message]:
@@ -139,7 +188,7 @@ class SafeParamsAccessor:
139
188
  return self._params.get("stop_sequences")
140
189
 
141
190
  @property
142
- def thinking(self) -> "ThinkingConfig | None":
191
+ def thinking(self) -> ThinkingConfig | None:
143
192
  """Access the thinking parameter."""
144
193
  self._unaccessed.discard("thinking")
145
194
  return self._params.get("thinking")
@@ -148,8 +197,8 @@ class SafeParamsAccessor:
148
197
  self,
149
198
  param_name: str,
150
199
  param_value: object,
151
- provider_id: "ProviderId",
152
- model_id: "ModelId | None" = None,
200
+ provider_id: ProviderId,
201
+ model_id: ModelId | None = None,
153
202
  ) -> None:
154
203
  unsupported_by = f"provider: {provider_id}"
155
204
  if model_id:
@@ -169,7 +218,7 @@ class SafeParamsAccessor:
169
218
  def ensure_all_params_accessed(
170
219
  *,
171
220
  params: Params,
172
- provider_id: "ProviderId",
221
+ provider_id: ProviderId,
173
222
  unsupported_params: list[str] | None = None,
174
223
  ) -> Generator[SafeParamsAccessor, None, None]:
175
224
  """Context manager that ensures all parameters are accessed.