anthropic 0.75.0__py3-none-any.whl → 0.77.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (81) hide show
  1. anthropic/_base_client.py +145 -13
  2. anthropic/_client.py +4 -12
  3. anthropic/_compat.py +3 -3
  4. anthropic/_models.py +16 -1
  5. anthropic/_streaming.py +78 -76
  6. anthropic/_types.py +12 -2
  7. anthropic/_utils/_json.py +35 -0
  8. anthropic/_version.py +1 -1
  9. anthropic/lib/_parse/_response.py +29 -1
  10. anthropic/lib/streaming/__init__.py +3 -0
  11. anthropic/lib/streaming/_messages.py +74 -40
  12. anthropic/lib/streaming/_types.py +42 -2
  13. anthropic/lib/tools/_beta_compaction_control.py +2 -2
  14. anthropic/lib/tools/_beta_runner.py +17 -0
  15. anthropic/resources/beta/messages/messages.py +229 -83
  16. anthropic/resources/messages/messages.py +409 -5
  17. anthropic/types/__init__.py +7 -0
  18. anthropic/types/beta/beta_code_execution_tool_20250522_param.py +1 -0
  19. anthropic/types/beta/beta_code_execution_tool_20250825_param.py +1 -0
  20. anthropic/types/beta/beta_container.py +4 -0
  21. anthropic/types/beta/beta_container_params.py +2 -0
  22. anthropic/types/beta/beta_container_upload_block.py +2 -0
  23. anthropic/types/beta/beta_container_upload_block_param.py +5 -0
  24. anthropic/types/beta/beta_direct_caller.py +2 -0
  25. anthropic/types/beta/beta_direct_caller_param.py +2 -0
  26. anthropic/types/beta/beta_mcp_tool_config_param.py +2 -0
  27. anthropic/types/beta/beta_mcp_tool_default_config_param.py +2 -0
  28. anthropic/types/beta/beta_mcp_toolset_param.py +6 -0
  29. anthropic/types/beta/beta_memory_tool_20250818_param.py +1 -0
  30. anthropic/types/beta/beta_output_config_param.py +15 -1
  31. anthropic/types/beta/beta_server_tool_caller.py +2 -0
  32. anthropic/types/beta/beta_server_tool_caller_param.py +2 -0
  33. anthropic/types/beta/beta_server_tool_use_block.py +4 -4
  34. anthropic/types/beta/beta_skill.py +2 -0
  35. anthropic/types/beta/beta_skill_params.py +2 -0
  36. anthropic/types/beta/beta_tool_bash_20241022_param.py +1 -0
  37. anthropic/types/beta/beta_tool_bash_20250124_param.py +1 -0
  38. anthropic/types/beta/beta_tool_choice_any_param.py +2 -0
  39. anthropic/types/beta/beta_tool_choice_auto_param.py +2 -0
  40. anthropic/types/beta/beta_tool_choice_none_param.py +2 -0
  41. anthropic/types/beta/beta_tool_choice_tool_param.py +2 -0
  42. anthropic/types/beta/beta_tool_computer_use_20241022_param.py +1 -0
  43. anthropic/types/beta/beta_tool_computer_use_20250124_param.py +1 -0
  44. anthropic/types/beta/beta_tool_computer_use_20251124_param.py +1 -0
  45. anthropic/types/beta/beta_tool_param.py +6 -0
  46. anthropic/types/beta/beta_tool_reference_block_param.py +2 -0
  47. anthropic/types/beta/beta_tool_search_tool_bm25_20251119_param.py +1 -0
  48. anthropic/types/beta/beta_tool_search_tool_regex_20251119_param.py +1 -0
  49. anthropic/types/beta/beta_tool_text_editor_20241022_param.py +1 -0
  50. anthropic/types/beta/beta_tool_text_editor_20250124_param.py +1 -0
  51. anthropic/types/beta/beta_tool_text_editor_20250429_param.py +1 -0
  52. anthropic/types/beta/beta_tool_text_editor_20250728_param.py +1 -0
  53. anthropic/types/beta/beta_web_fetch_tool_20250910_param.py +1 -0
  54. anthropic/types/beta/beta_web_search_tool_20250305_param.py +6 -0
  55. anthropic/types/beta/beta_web_search_tool_result_error_code.py +1 -1
  56. anthropic/types/beta/message_count_tokens_params.py +9 -5
  57. anthropic/types/beta/message_create_params.py +9 -5
  58. anthropic/types/beta/messages/batch_create_params.py +2 -9
  59. anthropic/types/beta/messages/beta_message_batch_individual_response.py +4 -0
  60. anthropic/types/json_output_format_param.py +15 -0
  61. anthropic/types/message_count_tokens_params.py +4 -0
  62. anthropic/types/message_create_params.py +4 -0
  63. anthropic/types/messages/message_batch_individual_response.py +4 -0
  64. anthropic/types/output_config_param.py +19 -0
  65. anthropic/types/parsed_message.py +56 -0
  66. anthropic/types/tool_bash_20250124_param.py +3 -0
  67. anthropic/types/tool_choice_any_param.py +2 -0
  68. anthropic/types/tool_choice_auto_param.py +2 -0
  69. anthropic/types/tool_choice_none_param.py +2 -0
  70. anthropic/types/tool_choice_tool_param.py +2 -0
  71. anthropic/types/tool_param.py +8 -0
  72. anthropic/types/tool_text_editor_20250124_param.py +3 -0
  73. anthropic/types/tool_text_editor_20250429_param.py +3 -0
  74. anthropic/types/tool_text_editor_20250728_param.py +3 -0
  75. anthropic/types/web_search_tool_20250305_param.py +8 -0
  76. anthropic/types/web_search_tool_request_error_param.py +8 -1
  77. anthropic/types/web_search_tool_result_error.py +8 -1
  78. {anthropic-0.75.0.dist-info → anthropic-0.77.0.dist-info}/METADATA +4 -2
  79. {anthropic-0.75.0.dist-info → anthropic-0.77.0.dist-info}/RECORD +81 -77
  80. {anthropic-0.75.0.dist-info → anthropic-0.77.0.dist-info}/WHEEL +0 -0
  81. {anthropic-0.75.0.dist-info → anthropic-0.77.0.dist-info}/licenses/LICENSE +0 -0
@@ -3,11 +3,12 @@
3
3
  from __future__ import annotations
4
4
 
5
5
  import warnings
6
- from typing import Union, Iterable, Optional
6
+ from typing import Type, Union, Iterable, Optional, cast
7
7
  from functools import partial
8
8
  from typing_extensions import Literal, overload
9
9
 
10
10
  import httpx
11
+ import pydantic
11
12
 
12
13
  from ... import _legacy_response
13
14
  from ...types import (
@@ -26,21 +27,28 @@ from .batches import (
26
27
  from ..._types import NOT_GIVEN, Body, Omit, Query, Headers, NotGiven, SequenceNotStr, omit, not_given
27
28
  from ..._utils import is_given, required_args, maybe_transform, async_maybe_transform
28
29
  from ..._compat import cached_property
30
+ from ..._models import TypeAdapter
29
31
  from ..._resource import SyncAPIResource, AsyncAPIResource
30
32
  from ..._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
31
33
  from ..._constants import DEFAULT_TIMEOUT, MODEL_NONSTREAMING_TOKENS
32
34
  from ..._streaming import Stream, AsyncStream
33
35
  from ..._base_client import make_request_options
36
+ from ..._utils._utils import is_dict
34
37
  from ...lib.streaming import MessageStreamManager, AsyncMessageStreamManager
35
38
  from ...types.message import Message
36
39
  from ...types.model_param import ModelParam
37
40
  from ...types.message_param import MessageParam
41
+ from ...lib._parse._response import ResponseFormatT, parse_response
38
42
  from ...types.metadata_param import MetadataParam
43
+ from ...types.parsed_message import ParsedMessage
44
+ from ...lib._parse._transform import transform_schema
39
45
  from ...types.text_block_param import TextBlockParam
40
46
  from ...types.tool_union_param import ToolUnionParam
41
47
  from ...types.tool_choice_param import ToolChoiceParam
48
+ from ...types.output_config_param import OutputConfigParam
42
49
  from ...types.message_tokens_count import MessageTokensCount
43
50
  from ...types.thinking_config_param import ThinkingConfigParam
51
+ from ...types.json_output_format_param import JSONOutputFormatParam
44
52
  from ...types.raw_message_stream_event import RawMessageStreamEvent
45
53
  from ...types.message_count_tokens_tool_param import MessageCountTokensToolParam
46
54
 
@@ -59,6 +67,8 @@ DEPRECATED_MODELS = {
59
67
  "claude-2.0": "July 21st, 2025",
60
68
  "claude-3-7-sonnet-latest": "February 19th, 2026",
61
69
  "claude-3-7-sonnet-20250219": "February 19th, 2026",
70
+ "claude-3-5-haiku-latest": "February 19th, 2026",
71
+ "claude-3-5-haiku-20241022": "February 19th, 2026",
62
72
  }
63
73
 
64
74
 
@@ -94,6 +104,7 @@ class Messages(SyncAPIResource):
94
104
  messages: Iterable[MessageParam],
95
105
  model: ModelParam,
96
106
  metadata: MetadataParam | Omit = omit,
107
+ output_config: OutputConfigParam | Omit = omit,
97
108
  service_tier: Literal["auto", "standard_only"] | Omit = omit,
98
109
  stop_sequences: SequenceNotStr[str] | Omit = omit,
99
110
  stream: Literal[False] | Omit = omit,
@@ -202,6 +213,8 @@ class Messages(SyncAPIResource):
202
213
 
203
214
  metadata: An object describing metadata about the request.
204
215
 
216
+ output_config: Configuration options for the model's output, such as the output format.
217
+
205
218
  service_tier: Determines whether to use priority capacity (if available) or standard capacity
206
219
  for this request.
207
220
 
@@ -363,6 +376,7 @@ class Messages(SyncAPIResource):
363
376
  model: ModelParam,
364
377
  stream: Literal[True],
365
378
  metadata: MetadataParam | Omit = omit,
379
+ output_config: OutputConfigParam | Omit = omit,
366
380
  service_tier: Literal["auto", "standard_only"] | Omit = omit,
367
381
  stop_sequences: SequenceNotStr[str] | Omit = omit,
368
382
  system: Union[str, Iterable[TextBlockParam]] | Omit = omit,
@@ -474,6 +488,8 @@ class Messages(SyncAPIResource):
474
488
 
475
489
  metadata: An object describing metadata about the request.
476
490
 
491
+ output_config: Configuration options for the model's output, such as the output format.
492
+
477
493
  service_tier: Determines whether to use priority capacity (if available) or standard capacity
478
494
  for this request.
479
495
 
@@ -631,6 +647,7 @@ class Messages(SyncAPIResource):
631
647
  model: ModelParam,
632
648
  stream: bool,
633
649
  metadata: MetadataParam | Omit = omit,
650
+ output_config: OutputConfigParam | Omit = omit,
634
651
  service_tier: Literal["auto", "standard_only"] | Omit = omit,
635
652
  stop_sequences: SequenceNotStr[str] | Omit = omit,
636
653
  system: Union[str, Iterable[TextBlockParam]] | Omit = omit,
@@ -742,6 +759,8 @@ class Messages(SyncAPIResource):
742
759
 
743
760
  metadata: An object describing metadata about the request.
744
761
 
762
+ output_config: Configuration options for the model's output, such as the output format.
763
+
745
764
  service_tier: Determines whether to use priority capacity (if available) or standard capacity
746
765
  for this request.
747
766
 
@@ -898,6 +917,7 @@ class Messages(SyncAPIResource):
898
917
  messages: Iterable[MessageParam],
899
918
  model: ModelParam,
900
919
  metadata: MetadataParam | Omit = omit,
920
+ output_config: OutputConfigParam | Omit = omit,
901
921
  service_tier: Literal["auto", "standard_only"] | Omit = omit,
902
922
  stop_sequences: SequenceNotStr[str] | Omit = omit,
903
923
  stream: Literal[False] | Literal[True] | Omit = omit,
@@ -935,6 +955,7 @@ class Messages(SyncAPIResource):
935
955
  "messages": messages,
936
956
  "model": model,
937
957
  "metadata": metadata,
958
+ "output_config": output_config,
938
959
  "service_tier": service_tier,
939
960
  "stop_sequences": stop_sequences,
940
961
  "stream": stream,
@@ -965,6 +986,8 @@ class Messages(SyncAPIResource):
965
986
  messages: Iterable[MessageParam],
966
987
  model: ModelParam,
967
988
  metadata: MetadataParam | Omit = omit,
989
+ output_config: OutputConfigParam | Omit = omit,
990
+ output_format: None | JSONOutputFormatParam | type[ResponseFormatT] | Omit = omit,
968
991
  container: Optional[str] | Omit = omit,
969
992
  service_tier: Literal["auto", "standard_only"] | Omit = omit,
970
993
  stop_sequences: SequenceNotStr[str] | Omit = omit,
@@ -981,7 +1004,7 @@ class Messages(SyncAPIResource):
981
1004
  extra_query: Query | None = None,
982
1005
  extra_body: Body | None = None,
983
1006
  timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
984
- ) -> MessageStreamManager:
1007
+ ) -> MessageStreamManager[ResponseFormatT]:
985
1008
  """Create a Message stream"""
986
1009
  if model in DEPRECATED_MODELS:
987
1010
  warnings.warn(
@@ -995,6 +1018,35 @@ class Messages(SyncAPIResource):
995
1018
  "X-Stainless-Stream-Helper": "messages",
996
1019
  **(extra_headers or {}),
997
1020
  }
1021
+
1022
+ transformed_output_format: Optional[JSONOutputFormatParam] | NotGiven = NOT_GIVEN
1023
+
1024
+ if is_dict(output_format):
1025
+ transformed_output_format = cast(JSONOutputFormatParam, output_format)
1026
+ elif is_given(output_format) and output_format is not None:
1027
+ adapted_type: TypeAdapter[ResponseFormatT] = TypeAdapter(output_format)
1028
+
1029
+ try:
1030
+ schema = adapted_type.json_schema()
1031
+ transformed_output_format = JSONOutputFormatParam(schema=transform_schema(schema), type="json_schema")
1032
+ except pydantic.errors.PydanticSchemaGenerationError as e:
1033
+ raise TypeError(
1034
+ (
1035
+ "Could not generate JSON schema for the given `output_format` type. "
1036
+ "Use a type that works with `pydantic.TypeAdapter`"
1037
+ )
1038
+ ) from e
1039
+
1040
+ # Merge output_format into output_config
1041
+ merged_output_config: OutputConfigParam | Omit = omit
1042
+ if is_given(transformed_output_format):
1043
+ if is_given(output_config):
1044
+ merged_output_config = {**output_config, "format": transformed_output_format}
1045
+ else:
1046
+ merged_output_config = {"format": transformed_output_format}
1047
+ elif is_given(output_config):
1048
+ merged_output_config = output_config
1049
+
998
1050
  make_request = partial(
999
1051
  self._post,
1000
1052
  "/v1/messages",
@@ -1004,6 +1056,7 @@ class Messages(SyncAPIResource):
1004
1056
  "messages": messages,
1005
1057
  "model": model,
1006
1058
  "metadata": metadata,
1059
+ "output_config": merged_output_config,
1007
1060
  "container": container,
1008
1061
  "service_tier": service_tier,
1009
1062
  "stop_sequences": stop_sequences,
@@ -1025,13 +1078,129 @@ class Messages(SyncAPIResource):
1025
1078
  stream=True,
1026
1079
  stream_cls=Stream[RawMessageStreamEvent],
1027
1080
  )
1028
- return MessageStreamManager(make_request)
1081
+ return MessageStreamManager(
1082
+ make_request,
1083
+ output_format=NOT_GIVEN if is_dict(output_format) else cast(ResponseFormatT, output_format),
1084
+ )
1085
+
1086
+ def parse(
1087
+ self,
1088
+ *,
1089
+ max_tokens: int,
1090
+ messages: Iterable[MessageParam],
1091
+ model: ModelParam,
1092
+ metadata: MetadataParam | Omit = omit,
1093
+ output_config: OutputConfigParam | Omit = omit,
1094
+ output_format: Optional[type[ResponseFormatT]] | Omit = omit,
1095
+ service_tier: Literal["auto", "standard_only"] | Omit = omit,
1096
+ stop_sequences: SequenceNotStr[str] | Omit = omit,
1097
+ stream: Literal[False] | Literal[True] | Omit = omit,
1098
+ system: Union[str, Iterable[TextBlockParam]] | Omit = omit,
1099
+ temperature: float | Omit = omit,
1100
+ thinking: ThinkingConfigParam | Omit = omit,
1101
+ tool_choice: ToolChoiceParam | Omit = omit,
1102
+ tools: Iterable[ToolUnionParam] | Omit = omit,
1103
+ top_k: int | Omit = omit,
1104
+ top_p: float | Omit = omit,
1105
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
1106
+ # The extra values given here take precedence over values defined on the client or passed to this method.
1107
+ extra_headers: Headers | None = None,
1108
+ extra_query: Query | None = None,
1109
+ extra_body: Body | None = None,
1110
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
1111
+ ) -> ParsedMessage[ResponseFormatT]:
1112
+ if not stream and not is_given(timeout) and self._client.timeout == DEFAULT_TIMEOUT:
1113
+ timeout = self._client._calculate_nonstreaming_timeout(
1114
+ max_tokens, MODEL_NONSTREAMING_TOKENS.get(model, None)
1115
+ )
1116
+
1117
+ if model in DEPRECATED_MODELS:
1118
+ warnings.warn(
1119
+ f"The model '{model}' is deprecated and will reach end-of-life on {DEPRECATED_MODELS[model]}.\nPlease migrate to a newer model. Visit https://docs.anthropic.com/en/docs/resources/model-deprecations for more information.",
1120
+ DeprecationWarning,
1121
+ stacklevel=3,
1122
+ )
1123
+
1124
+ extra_headers = {
1125
+ "X-Stainless-Helper": "messages.parse",
1126
+ **(extra_headers or {}),
1127
+ }
1128
+
1129
+ transformed_output_format: Optional[JSONOutputFormatParam] | NotGiven = NOT_GIVEN
1130
+
1131
+ if is_given(output_format) and output_format is not None:
1132
+ adapted_type: TypeAdapter[ResponseFormatT] = TypeAdapter(output_format)
1133
+
1134
+ try:
1135
+ schema = adapted_type.json_schema()
1136
+ transformed_output_format = JSONOutputFormatParam(schema=transform_schema(schema), type="json_schema")
1137
+ except pydantic.errors.PydanticSchemaGenerationError as e:
1138
+ raise TypeError(
1139
+ (
1140
+ "Could not generate JSON schema for the given `output_format` type. "
1141
+ "Use a type that works with `pydantic.TypeAdapter`"
1142
+ )
1143
+ ) from e
1144
+
1145
+ def parser(response: Message) -> ParsedMessage[ResponseFormatT]:
1146
+ return parse_response(
1147
+ response=response,
1148
+ output_format=cast(
1149
+ ResponseFormatT,
1150
+ output_format if is_given(output_format) and output_format is not None else NOT_GIVEN,
1151
+ ),
1152
+ )
1153
+
1154
+ # Merge output_format into output_config
1155
+ merged_output_config: OutputConfigParam | Omit = omit
1156
+ if is_given(transformed_output_format):
1157
+ if is_given(output_config):
1158
+ merged_output_config = {**output_config, "format": transformed_output_format}
1159
+ else:
1160
+ merged_output_config = {"format": transformed_output_format}
1161
+ elif is_given(output_config):
1162
+ merged_output_config = output_config
1163
+
1164
+ return self._post(
1165
+ "/v1/messages",
1166
+ body=maybe_transform(
1167
+ {
1168
+ "max_tokens": max_tokens,
1169
+ "messages": messages,
1170
+ "model": model,
1171
+ "metadata": metadata,
1172
+ "output_config": merged_output_config,
1173
+ "service_tier": service_tier,
1174
+ "stop_sequences": stop_sequences,
1175
+ "stream": stream,
1176
+ "system": system,
1177
+ "temperature": temperature,
1178
+ "thinking": thinking,
1179
+ "tool_choice": tool_choice,
1180
+ "tools": tools,
1181
+ "top_k": top_k,
1182
+ "top_p": top_p,
1183
+ },
1184
+ message_create_params.MessageCreateParamsNonStreaming,
1185
+ ),
1186
+ options=make_request_options(
1187
+ extra_headers=extra_headers,
1188
+ extra_query=extra_query,
1189
+ extra_body=extra_body,
1190
+ timeout=timeout,
1191
+ post_parser=parser,
1192
+ ),
1193
+ cast_to=cast(Type[ParsedMessage[ResponseFormatT]], Message),
1194
+ stream=False,
1195
+ )
1029
1196
 
1030
1197
  def count_tokens(
1031
1198
  self,
1032
1199
  *,
1033
1200
  messages: Iterable[MessageParam],
1034
1201
  model: ModelParam,
1202
+ output_config: OutputConfigParam | Omit = omit,
1203
+ output_format: None | JSONOutputFormatParam | type | Omit = omit,
1035
1204
  system: Union[str, Iterable[TextBlockParam]] | Omit = omit,
1036
1205
  thinking: ThinkingConfigParam | Omit = omit,
1037
1206
  tool_choice: ToolChoiceParam | Omit = omit,
@@ -1123,6 +1292,14 @@ class Messages(SyncAPIResource):
1123
1292
  [models](https://docs.anthropic.com/en/docs/models-overview) for additional
1124
1293
  details and options.
1125
1294
 
1295
+ output_config: Configuration options for the model's output, such as the output format.
1296
+
1297
+ output_format: A Pydantic model, JSON schema dictionary, or type that will be
1298
+ converted to a JSON schema for structured output. This is a convenience parameter
1299
+ that will be merged into output_config.format. See
1300
+ [structured outputs](https://docs.anthropic.com/en/docs/build-with-claude/structured-outputs)
1301
+ for more details.
1302
+
1126
1303
  system: System prompt.
1127
1304
 
1128
1305
  A system prompt is a way of providing context and instructions to Claude, such
@@ -1226,12 +1403,42 @@ class Messages(SyncAPIResource):
1226
1403
 
1227
1404
  timeout: Override the client-level default timeout for this request, in seconds
1228
1405
  """
1406
+ # Transform output_format if provided
1407
+ transformed_output_format: Optional[JSONOutputFormatParam] | NotGiven = NOT_GIVEN
1408
+
1409
+ if is_dict(output_format):
1410
+ transformed_output_format = cast(JSONOutputFormatParam, output_format)
1411
+ elif is_given(output_format) and output_format is not None:
1412
+ adapted_type: TypeAdapter[type] = TypeAdapter(output_format)
1413
+
1414
+ try:
1415
+ schema = adapted_type.json_schema()
1416
+ transformed_output_format = JSONOutputFormatParam(schema=transform_schema(schema), type="json_schema")
1417
+ except pydantic.errors.PydanticSchemaGenerationError as e:
1418
+ raise TypeError(
1419
+ (
1420
+ "Could not generate JSON schema for the given `output_format` type. "
1421
+ "Use a type that works with `pydantic.TypeAdapter`"
1422
+ )
1423
+ ) from e
1424
+
1425
+ # Merge output_format into output_config
1426
+ merged_output_config: OutputConfigParam | Omit = omit
1427
+ if is_given(transformed_output_format):
1428
+ if is_given(output_config):
1429
+ merged_output_config = {**output_config, "format": transformed_output_format}
1430
+ else:
1431
+ merged_output_config = {"format": transformed_output_format}
1432
+ elif is_given(output_config):
1433
+ merged_output_config = output_config
1434
+
1229
1435
  return self._post(
1230
1436
  "/v1/messages/count_tokens",
1231
1437
  body=maybe_transform(
1232
1438
  {
1233
1439
  "messages": messages,
1234
1440
  "model": model,
1441
+ "output_config": merged_output_config,
1235
1442
  "system": system,
1236
1443
  "thinking": thinking,
1237
1444
  "tool_choice": tool_choice,
@@ -1278,6 +1485,7 @@ class AsyncMessages(AsyncAPIResource):
1278
1485
  messages: Iterable[MessageParam],
1279
1486
  model: ModelParam,
1280
1487
  metadata: MetadataParam | Omit = omit,
1488
+ output_config: OutputConfigParam | Omit = omit,
1281
1489
  service_tier: Literal["auto", "standard_only"] | Omit = omit,
1282
1490
  stop_sequences: SequenceNotStr[str] | Omit = omit,
1283
1491
  stream: Literal[False] | Omit = omit,
@@ -1386,6 +1594,8 @@ class AsyncMessages(AsyncAPIResource):
1386
1594
 
1387
1595
  metadata: An object describing metadata about the request.
1388
1596
 
1597
+ output_config: Configuration options for the model's output, such as the output format.
1598
+
1389
1599
  service_tier: Determines whether to use priority capacity (if available) or standard capacity
1390
1600
  for this request.
1391
1601
 
@@ -1547,6 +1757,7 @@ class AsyncMessages(AsyncAPIResource):
1547
1757
  model: ModelParam,
1548
1758
  stream: Literal[True],
1549
1759
  metadata: MetadataParam | Omit = omit,
1760
+ output_config: OutputConfigParam | Omit = omit,
1550
1761
  service_tier: Literal["auto", "standard_only"] | Omit = omit,
1551
1762
  stop_sequences: SequenceNotStr[str] | Omit = omit,
1552
1763
  system: Union[str, Iterable[TextBlockParam]] | Omit = omit,
@@ -1658,6 +1869,8 @@ class AsyncMessages(AsyncAPIResource):
1658
1869
 
1659
1870
  metadata: An object describing metadata about the request.
1660
1871
 
1872
+ output_config: Configuration options for the model's output, such as the output format.
1873
+
1661
1874
  service_tier: Determines whether to use priority capacity (if available) or standard capacity
1662
1875
  for this request.
1663
1876
 
@@ -1815,6 +2028,7 @@ class AsyncMessages(AsyncAPIResource):
1815
2028
  model: ModelParam,
1816
2029
  stream: bool,
1817
2030
  metadata: MetadataParam | Omit = omit,
2031
+ output_config: OutputConfigParam | Omit = omit,
1818
2032
  service_tier: Literal["auto", "standard_only"] | Omit = omit,
1819
2033
  stop_sequences: SequenceNotStr[str] | Omit = omit,
1820
2034
  system: Union[str, Iterable[TextBlockParam]] | Omit = omit,
@@ -1926,6 +2140,8 @@ class AsyncMessages(AsyncAPIResource):
1926
2140
 
1927
2141
  metadata: An object describing metadata about the request.
1928
2142
 
2143
+ output_config: Configuration options for the model's output, such as the output format.
2144
+
1929
2145
  service_tier: Determines whether to use priority capacity (if available) or standard capacity
1930
2146
  for this request.
1931
2147
 
@@ -2082,6 +2298,7 @@ class AsyncMessages(AsyncAPIResource):
2082
2298
  messages: Iterable[MessageParam],
2083
2299
  model: ModelParam,
2084
2300
  metadata: MetadataParam | Omit = omit,
2301
+ output_config: OutputConfigParam | Omit = omit,
2085
2302
  service_tier: Literal["auto", "standard_only"] | Omit = omit,
2086
2303
  stop_sequences: SequenceNotStr[str] | Omit = omit,
2087
2304
  stream: Literal[False] | Literal[True] | Omit = omit,
@@ -2119,6 +2336,7 @@ class AsyncMessages(AsyncAPIResource):
2119
2336
  "messages": messages,
2120
2337
  "model": model,
2121
2338
  "metadata": metadata,
2339
+ "output_config": output_config,
2122
2340
  "service_tier": service_tier,
2123
2341
  "stop_sequences": stop_sequences,
2124
2342
  "stream": stream,
@@ -2149,6 +2367,8 @@ class AsyncMessages(AsyncAPIResource):
2149
2367
  messages: Iterable[MessageParam],
2150
2368
  model: ModelParam,
2151
2369
  metadata: MetadataParam | Omit = omit,
2370
+ output_config: OutputConfigParam | Omit = omit,
2371
+ output_format: None | JSONOutputFormatParam | type[ResponseFormatT] | Omit = omit,
2152
2372
  container: Optional[str] | Omit = omit,
2153
2373
  service_tier: Literal["auto", "standard_only"] | Omit = omit,
2154
2374
  stop_sequences: SequenceNotStr[str] | Omit = omit,
@@ -2165,7 +2385,7 @@ class AsyncMessages(AsyncAPIResource):
2165
2385
  extra_query: Query | None = None,
2166
2386
  extra_body: Body | None = None,
2167
2387
  timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
2168
- ) -> AsyncMessageStreamManager:
2388
+ ) -> AsyncMessageStreamManager[ResponseFormatT]:
2169
2389
  """Create a Message stream"""
2170
2390
  if model in DEPRECATED_MODELS:
2171
2391
  warnings.warn(
@@ -2179,6 +2399,35 @@ class AsyncMessages(AsyncAPIResource):
2179
2399
  "X-Stainless-Stream-Helper": "messages",
2180
2400
  **(extra_headers or {}),
2181
2401
  }
2402
+
2403
+ transformed_output_format: Optional[JSONOutputFormatParam] | NotGiven = NOT_GIVEN
2404
+
2405
+ if is_dict(output_format):
2406
+ transformed_output_format = cast(JSONOutputFormatParam, output_format)
2407
+ elif is_given(output_format) and output_format is not None:
2408
+ adapted_type: TypeAdapter[ResponseFormatT] = TypeAdapter(output_format)
2409
+
2410
+ try:
2411
+ schema = adapted_type.json_schema()
2412
+ transformed_output_format = JSONOutputFormatParam(schema=transform_schema(schema), type="json_schema")
2413
+ except pydantic.errors.PydanticSchemaGenerationError as e:
2414
+ raise TypeError(
2415
+ (
2416
+ "Could not generate JSON schema for the given `output_format` type. "
2417
+ "Use a type that works with `pydantic.TypeAdapter`"
2418
+ )
2419
+ ) from e
2420
+
2421
+ # Merge output_format into output_config
2422
+ merged_output_config: OutputConfigParam | Omit = omit
2423
+ if is_given(transformed_output_format):
2424
+ if is_given(output_config):
2425
+ merged_output_config = {**output_config, "format": transformed_output_format}
2426
+ else:
2427
+ merged_output_config = {"format": transformed_output_format}
2428
+ elif is_given(output_config):
2429
+ merged_output_config = output_config
2430
+
2182
2431
  request = self._post(
2183
2432
  "/v1/messages",
2184
2433
  body=maybe_transform(
@@ -2187,6 +2436,7 @@ class AsyncMessages(AsyncAPIResource):
2187
2436
  "messages": messages,
2188
2437
  "model": model,
2189
2438
  "metadata": metadata,
2439
+ "output_config": merged_output_config,
2190
2440
  "container": container,
2191
2441
  "service_tier": service_tier,
2192
2442
  "stop_sequences": stop_sequences,
@@ -2208,13 +2458,129 @@ class AsyncMessages(AsyncAPIResource):
2208
2458
  stream=True,
2209
2459
  stream_cls=AsyncStream[RawMessageStreamEvent],
2210
2460
  )
2211
- return AsyncMessageStreamManager(request)
2461
+ return AsyncMessageStreamManager(
2462
+ request,
2463
+ output_format=NOT_GIVEN if is_dict(output_format) else cast(ResponseFormatT, output_format),
2464
+ )
2465
+
2466
+ async def parse(
2467
+ self,
2468
+ *,
2469
+ max_tokens: int,
2470
+ messages: Iterable[MessageParam],
2471
+ model: ModelParam,
2472
+ metadata: MetadataParam | Omit = omit,
2473
+ output_config: OutputConfigParam | Omit = omit,
2474
+ output_format: Optional[type[ResponseFormatT]] | Omit = omit,
2475
+ service_tier: Literal["auto", "standard_only"] | Omit = omit,
2476
+ stop_sequences: SequenceNotStr[str] | Omit = omit,
2477
+ stream: Literal[False] | Literal[True] | Omit = omit,
2478
+ system: Union[str, Iterable[TextBlockParam]] | Omit = omit,
2479
+ temperature: float | Omit = omit,
2480
+ thinking: ThinkingConfigParam | Omit = omit,
2481
+ tool_choice: ToolChoiceParam | Omit = omit,
2482
+ tools: Iterable[ToolUnionParam] | Omit = omit,
2483
+ top_k: int | Omit = omit,
2484
+ top_p: float | Omit = omit,
2485
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
2486
+ # The extra values given here take precedence over values defined on the client or passed to this method.
2487
+ extra_headers: Headers | None = None,
2488
+ extra_query: Query | None = None,
2489
+ extra_body: Body | None = None,
2490
+ timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
2491
+ ) -> ParsedMessage[ResponseFormatT]:
2492
+ if not stream and not is_given(timeout) and self._client.timeout == DEFAULT_TIMEOUT:
2493
+ timeout = self._client._calculate_nonstreaming_timeout(
2494
+ max_tokens, MODEL_NONSTREAMING_TOKENS.get(model, None)
2495
+ )
2496
+
2497
+ if model in DEPRECATED_MODELS:
2498
+ warnings.warn(
2499
+ f"The model '{model}' is deprecated and will reach end-of-life on {DEPRECATED_MODELS[model]}.\nPlease migrate to a newer model. Visit https://docs.anthropic.com/en/docs/resources/model-deprecations for more information.",
2500
+ DeprecationWarning,
2501
+ stacklevel=3,
2502
+ )
2503
+
2504
+ extra_headers = {
2505
+ "X-Stainless-Helper": "messages.parse",
2506
+ **(extra_headers or {}),
2507
+ }
2508
+
2509
+ transformed_output_format: Optional[JSONOutputFormatParam] | NotGiven = NOT_GIVEN
2510
+
2511
+ if is_given(output_format) and output_format is not None:
2512
+ adapted_type: TypeAdapter[ResponseFormatT] = TypeAdapter(output_format)
2513
+
2514
+ try:
2515
+ schema = adapted_type.json_schema()
2516
+ transformed_output_format = JSONOutputFormatParam(schema=transform_schema(schema), type="json_schema")
2517
+ except pydantic.errors.PydanticSchemaGenerationError as e:
2518
+ raise TypeError(
2519
+ (
2520
+ "Could not generate JSON schema for the given `output_format` type. "
2521
+ "Use a type that works with `pydantic.TypeAdapter`"
2522
+ )
2523
+ ) from e
2524
+
2525
+ def parser(response: Message) -> ParsedMessage[ResponseFormatT]:
2526
+ return parse_response(
2527
+ response=response,
2528
+ output_format=cast(
2529
+ ResponseFormatT,
2530
+ output_format if is_given(output_format) and output_format is not None else NOT_GIVEN,
2531
+ ),
2532
+ )
2533
+
2534
+ # Merge output_format into output_config
2535
+ merged_output_config: OutputConfigParam | Omit = omit
2536
+ if is_given(transformed_output_format):
2537
+ if is_given(output_config):
2538
+ merged_output_config = {**output_config, "format": transformed_output_format}
2539
+ else:
2540
+ merged_output_config = {"format": transformed_output_format}
2541
+ elif is_given(output_config):
2542
+ merged_output_config = output_config
2543
+
2544
+ return await self._post(
2545
+ "/v1/messages",
2546
+ body=await async_maybe_transform(
2547
+ {
2548
+ "max_tokens": max_tokens,
2549
+ "messages": messages,
2550
+ "model": model,
2551
+ "metadata": metadata,
2552
+ "output_config": merged_output_config,
2553
+ "service_tier": service_tier,
2554
+ "stop_sequences": stop_sequences,
2555
+ "stream": stream,
2556
+ "system": system,
2557
+ "temperature": temperature,
2558
+ "thinking": thinking,
2559
+ "tool_choice": tool_choice,
2560
+ "tools": tools,
2561
+ "top_k": top_k,
2562
+ "top_p": top_p,
2563
+ },
2564
+ message_create_params.MessageCreateParamsNonStreaming,
2565
+ ),
2566
+ options=make_request_options(
2567
+ extra_headers=extra_headers,
2568
+ extra_query=extra_query,
2569
+ extra_body=extra_body,
2570
+ timeout=timeout,
2571
+ post_parser=parser,
2572
+ ),
2573
+ cast_to=cast(Type[ParsedMessage[ResponseFormatT]], Message),
2574
+ stream=False,
2575
+ )
2212
2576
 
2213
2577
  async def count_tokens(
2214
2578
  self,
2215
2579
  *,
2216
2580
  messages: Iterable[MessageParam],
2217
2581
  model: ModelParam,
2582
+ output_config: OutputConfigParam | Omit = omit,
2583
+ output_format: None | JSONOutputFormatParam | type | Omit = omit,
2218
2584
  system: Union[str, Iterable[TextBlockParam]] | Omit = omit,
2219
2585
  thinking: ThinkingConfigParam | Omit = omit,
2220
2586
  tool_choice: ToolChoiceParam | Omit = omit,
@@ -2306,6 +2672,14 @@ class AsyncMessages(AsyncAPIResource):
2306
2672
  [models](https://docs.anthropic.com/en/docs/models-overview) for additional
2307
2673
  details and options.
2308
2674
 
2675
+ output_config: Configuration options for the model's output, such as the output format.
2676
+
2677
+ output_format: A Pydantic model, JSON schema dictionary, or type that will be
2678
+ converted to a JSON schema for structured output. This is a convenience parameter
2679
+ that will be merged into output_config.format. See
2680
+ [structured outputs](https://docs.anthropic.com/en/docs/build-with-claude/structured-outputs)
2681
+ for more details.
2682
+
2309
2683
  system: System prompt.
2310
2684
 
2311
2685
  A system prompt is a way of providing context and instructions to Claude, such
@@ -2409,12 +2783,42 @@ class AsyncMessages(AsyncAPIResource):
2409
2783
 
2410
2784
  timeout: Override the client-level default timeout for this request, in seconds
2411
2785
  """
2786
+ # Transform output_format if provided
2787
+ transformed_output_format: Optional[JSONOutputFormatParam] | NotGiven = NOT_GIVEN
2788
+
2789
+ if is_dict(output_format):
2790
+ transformed_output_format = cast(JSONOutputFormatParam, output_format)
2791
+ elif is_given(output_format) and output_format is not None:
2792
+ adapted_type: TypeAdapter[type] = TypeAdapter(output_format)
2793
+
2794
+ try:
2795
+ schema = adapted_type.json_schema()
2796
+ transformed_output_format = JSONOutputFormatParam(schema=transform_schema(schema), type="json_schema")
2797
+ except pydantic.errors.PydanticSchemaGenerationError as e:
2798
+ raise TypeError(
2799
+ (
2800
+ "Could not generate JSON schema for the given `output_format` type. "
2801
+ "Use a type that works with `pydantic.TypeAdapter`"
2802
+ )
2803
+ ) from e
2804
+
2805
+ # Merge output_format into output_config
2806
+ merged_output_config: OutputConfigParam | Omit = omit
2807
+ if is_given(transformed_output_format):
2808
+ if is_given(output_config):
2809
+ merged_output_config = {**output_config, "format": transformed_output_format}
2810
+ else:
2811
+ merged_output_config = {"format": transformed_output_format}
2812
+ elif is_given(output_config):
2813
+ merged_output_config = output_config
2814
+
2412
2815
  return await self._post(
2413
2816
  "/v1/messages/count_tokens",
2414
2817
  body=await async_maybe_transform(
2415
2818
  {
2416
2819
  "messages": messages,
2417
2820
  "model": model,
2821
+ "output_config": merged_output_config,
2418
2822
  "system": system,
2419
2823
  "thinking": thinking,
2420
2824
  "tool_choice": tool_choice,