anthropic 0.76.0__py3-none-any.whl → 0.77.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- anthropic/_base_client.py +5 -2
- anthropic/_compat.py +3 -3
- anthropic/_utils/_json.py +35 -0
- anthropic/_version.py +1 -1
- anthropic/lib/_parse/_response.py +29 -1
- anthropic/lib/streaming/__init__.py +3 -0
- anthropic/lib/streaming/_messages.py +74 -40
- anthropic/lib/streaming/_types.py +42 -2
- anthropic/resources/beta/messages/messages.py +170 -59
- anthropic/resources/messages/messages.py +407 -5
- anthropic/types/__init__.py +7 -0
- anthropic/types/beta/beta_code_execution_tool_20250522_param.py +1 -0
- anthropic/types/beta/beta_code_execution_tool_20250825_param.py +1 -0
- anthropic/types/beta/beta_memory_tool_20250818_param.py +1 -0
- anthropic/types/beta/beta_output_config_param.py +15 -1
- anthropic/types/beta/beta_server_tool_use_block.py +4 -4
- anthropic/types/beta/beta_tool_bash_20241022_param.py +1 -0
- anthropic/types/beta/beta_tool_bash_20250124_param.py +1 -0
- anthropic/types/beta/beta_tool_computer_use_20241022_param.py +1 -0
- anthropic/types/beta/beta_tool_computer_use_20250124_param.py +1 -0
- anthropic/types/beta/beta_tool_computer_use_20251124_param.py +1 -0
- anthropic/types/beta/beta_tool_param.py +1 -0
- anthropic/types/beta/beta_tool_search_tool_bm25_20251119_param.py +1 -0
- anthropic/types/beta/beta_tool_search_tool_regex_20251119_param.py +1 -0
- anthropic/types/beta/beta_tool_text_editor_20241022_param.py +1 -0
- anthropic/types/beta/beta_tool_text_editor_20250124_param.py +1 -0
- anthropic/types/beta/beta_tool_text_editor_20250429_param.py +1 -0
- anthropic/types/beta/beta_tool_text_editor_20250728_param.py +1 -0
- anthropic/types/beta/beta_web_fetch_tool_20250910_param.py +1 -0
- anthropic/types/beta/beta_web_search_tool_20250305_param.py +1 -0
- anthropic/types/beta/beta_web_search_tool_result_error_code.py +1 -1
- anthropic/types/beta/message_count_tokens_params.py +9 -5
- anthropic/types/beta/message_create_params.py +9 -5
- anthropic/types/beta/messages/batch_create_params.py +2 -9
- anthropic/types/json_output_format_param.py +15 -0
- anthropic/types/message_count_tokens_params.py +4 -0
- anthropic/types/message_create_params.py +4 -0
- anthropic/types/output_config_param.py +19 -0
- anthropic/types/parsed_message.py +56 -0
- anthropic/types/tool_bash_20250124_param.py +3 -0
- anthropic/types/tool_param.py +3 -0
- anthropic/types/tool_text_editor_20250124_param.py +3 -0
- anthropic/types/tool_text_editor_20250429_param.py +3 -0
- anthropic/types/tool_text_editor_20250728_param.py +3 -0
- anthropic/types/web_search_tool_20250305_param.py +3 -0
- anthropic/types/web_search_tool_request_error_param.py +8 -1
- anthropic/types/web_search_tool_result_error.py +8 -1
- {anthropic-0.76.0.dist-info → anthropic-0.77.1.dist-info}/METADATA +1 -1
- {anthropic-0.76.0.dist-info → anthropic-0.77.1.dist-info}/RECORD +51 -47
- {anthropic-0.76.0.dist-info → anthropic-0.77.1.dist-info}/WHEEL +0 -0
- {anthropic-0.76.0.dist-info → anthropic-0.77.1.dist-info}/licenses/LICENSE +0 -0
|
@@ -3,11 +3,12 @@
|
|
|
3
3
|
from __future__ import annotations
|
|
4
4
|
|
|
5
5
|
import warnings
|
|
6
|
-
from typing import Union, Iterable, Optional
|
|
6
|
+
from typing import Type, Union, Iterable, Optional, cast
|
|
7
7
|
from functools import partial
|
|
8
8
|
from typing_extensions import Literal, overload
|
|
9
9
|
|
|
10
10
|
import httpx
|
|
11
|
+
import pydantic
|
|
11
12
|
|
|
12
13
|
from ... import _legacy_response
|
|
13
14
|
from ...types import (
|
|
@@ -26,21 +27,28 @@ from .batches import (
|
|
|
26
27
|
from ..._types import NOT_GIVEN, Body, Omit, Query, Headers, NotGiven, SequenceNotStr, omit, not_given
|
|
27
28
|
from ..._utils import is_given, required_args, maybe_transform, async_maybe_transform
|
|
28
29
|
from ..._compat import cached_property
|
|
30
|
+
from ..._models import TypeAdapter
|
|
29
31
|
from ..._resource import SyncAPIResource, AsyncAPIResource
|
|
30
32
|
from ..._response import to_streamed_response_wrapper, async_to_streamed_response_wrapper
|
|
31
33
|
from ..._constants import DEFAULT_TIMEOUT, MODEL_NONSTREAMING_TOKENS
|
|
32
34
|
from ..._streaming import Stream, AsyncStream
|
|
33
35
|
from ..._base_client import make_request_options
|
|
36
|
+
from ..._utils._utils import is_dict
|
|
34
37
|
from ...lib.streaming import MessageStreamManager, AsyncMessageStreamManager
|
|
35
38
|
from ...types.message import Message
|
|
36
39
|
from ...types.model_param import ModelParam
|
|
37
40
|
from ...types.message_param import MessageParam
|
|
41
|
+
from ...lib._parse._response import ResponseFormatT, parse_response
|
|
38
42
|
from ...types.metadata_param import MetadataParam
|
|
43
|
+
from ...types.parsed_message import ParsedMessage
|
|
44
|
+
from ...lib._parse._transform import transform_schema
|
|
39
45
|
from ...types.text_block_param import TextBlockParam
|
|
40
46
|
from ...types.tool_union_param import ToolUnionParam
|
|
41
47
|
from ...types.tool_choice_param import ToolChoiceParam
|
|
48
|
+
from ...types.output_config_param import OutputConfigParam
|
|
42
49
|
from ...types.message_tokens_count import MessageTokensCount
|
|
43
50
|
from ...types.thinking_config_param import ThinkingConfigParam
|
|
51
|
+
from ...types.json_output_format_param import JSONOutputFormatParam
|
|
44
52
|
from ...types.raw_message_stream_event import RawMessageStreamEvent
|
|
45
53
|
from ...types.message_count_tokens_tool_param import MessageCountTokensToolParam
|
|
46
54
|
|
|
@@ -96,6 +104,7 @@ class Messages(SyncAPIResource):
|
|
|
96
104
|
messages: Iterable[MessageParam],
|
|
97
105
|
model: ModelParam,
|
|
98
106
|
metadata: MetadataParam | Omit = omit,
|
|
107
|
+
output_config: OutputConfigParam | Omit = omit,
|
|
99
108
|
service_tier: Literal["auto", "standard_only"] | Omit = omit,
|
|
100
109
|
stop_sequences: SequenceNotStr[str] | Omit = omit,
|
|
101
110
|
stream: Literal[False] | Omit = omit,
|
|
@@ -204,6 +213,8 @@ class Messages(SyncAPIResource):
|
|
|
204
213
|
|
|
205
214
|
metadata: An object describing metadata about the request.
|
|
206
215
|
|
|
216
|
+
output_config: Configuration options for the model's output, such as the output format.
|
|
217
|
+
|
|
207
218
|
service_tier: Determines whether to use priority capacity (if available) or standard capacity
|
|
208
219
|
for this request.
|
|
209
220
|
|
|
@@ -365,6 +376,7 @@ class Messages(SyncAPIResource):
|
|
|
365
376
|
model: ModelParam,
|
|
366
377
|
stream: Literal[True],
|
|
367
378
|
metadata: MetadataParam | Omit = omit,
|
|
379
|
+
output_config: OutputConfigParam | Omit = omit,
|
|
368
380
|
service_tier: Literal["auto", "standard_only"] | Omit = omit,
|
|
369
381
|
stop_sequences: SequenceNotStr[str] | Omit = omit,
|
|
370
382
|
system: Union[str, Iterable[TextBlockParam]] | Omit = omit,
|
|
@@ -476,6 +488,8 @@ class Messages(SyncAPIResource):
|
|
|
476
488
|
|
|
477
489
|
metadata: An object describing metadata about the request.
|
|
478
490
|
|
|
491
|
+
output_config: Configuration options for the model's output, such as the output format.
|
|
492
|
+
|
|
479
493
|
service_tier: Determines whether to use priority capacity (if available) or standard capacity
|
|
480
494
|
for this request.
|
|
481
495
|
|
|
@@ -633,6 +647,7 @@ class Messages(SyncAPIResource):
|
|
|
633
647
|
model: ModelParam,
|
|
634
648
|
stream: bool,
|
|
635
649
|
metadata: MetadataParam | Omit = omit,
|
|
650
|
+
output_config: OutputConfigParam | Omit = omit,
|
|
636
651
|
service_tier: Literal["auto", "standard_only"] | Omit = omit,
|
|
637
652
|
stop_sequences: SequenceNotStr[str] | Omit = omit,
|
|
638
653
|
system: Union[str, Iterable[TextBlockParam]] | Omit = omit,
|
|
@@ -744,6 +759,8 @@ class Messages(SyncAPIResource):
|
|
|
744
759
|
|
|
745
760
|
metadata: An object describing metadata about the request.
|
|
746
761
|
|
|
762
|
+
output_config: Configuration options for the model's output, such as the output format.
|
|
763
|
+
|
|
747
764
|
service_tier: Determines whether to use priority capacity (if available) or standard capacity
|
|
748
765
|
for this request.
|
|
749
766
|
|
|
@@ -900,6 +917,7 @@ class Messages(SyncAPIResource):
|
|
|
900
917
|
messages: Iterable[MessageParam],
|
|
901
918
|
model: ModelParam,
|
|
902
919
|
metadata: MetadataParam | Omit = omit,
|
|
920
|
+
output_config: OutputConfigParam | Omit = omit,
|
|
903
921
|
service_tier: Literal["auto", "standard_only"] | Omit = omit,
|
|
904
922
|
stop_sequences: SequenceNotStr[str] | Omit = omit,
|
|
905
923
|
stream: Literal[False] | Literal[True] | Omit = omit,
|
|
@@ -937,6 +955,7 @@ class Messages(SyncAPIResource):
|
|
|
937
955
|
"messages": messages,
|
|
938
956
|
"model": model,
|
|
939
957
|
"metadata": metadata,
|
|
958
|
+
"output_config": output_config,
|
|
940
959
|
"service_tier": service_tier,
|
|
941
960
|
"stop_sequences": stop_sequences,
|
|
942
961
|
"stream": stream,
|
|
@@ -967,6 +986,8 @@ class Messages(SyncAPIResource):
|
|
|
967
986
|
messages: Iterable[MessageParam],
|
|
968
987
|
model: ModelParam,
|
|
969
988
|
metadata: MetadataParam | Omit = omit,
|
|
989
|
+
output_config: OutputConfigParam | Omit = omit,
|
|
990
|
+
output_format: None | JSONOutputFormatParam | type[ResponseFormatT] | Omit = omit,
|
|
970
991
|
container: Optional[str] | Omit = omit,
|
|
971
992
|
service_tier: Literal["auto", "standard_only"] | Omit = omit,
|
|
972
993
|
stop_sequences: SequenceNotStr[str] | Omit = omit,
|
|
@@ -983,7 +1004,7 @@ class Messages(SyncAPIResource):
|
|
|
983
1004
|
extra_query: Query | None = None,
|
|
984
1005
|
extra_body: Body | None = None,
|
|
985
1006
|
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
|
|
986
|
-
) -> MessageStreamManager:
|
|
1007
|
+
) -> MessageStreamManager[ResponseFormatT]:
|
|
987
1008
|
"""Create a Message stream"""
|
|
988
1009
|
if model in DEPRECATED_MODELS:
|
|
989
1010
|
warnings.warn(
|
|
@@ -997,6 +1018,35 @@ class Messages(SyncAPIResource):
|
|
|
997
1018
|
"X-Stainless-Stream-Helper": "messages",
|
|
998
1019
|
**(extra_headers or {}),
|
|
999
1020
|
}
|
|
1021
|
+
|
|
1022
|
+
transformed_output_format: Optional[JSONOutputFormatParam] | NotGiven = NOT_GIVEN
|
|
1023
|
+
|
|
1024
|
+
if is_dict(output_format):
|
|
1025
|
+
transformed_output_format = cast(JSONOutputFormatParam, output_format)
|
|
1026
|
+
elif is_given(output_format) and output_format is not None:
|
|
1027
|
+
adapted_type: TypeAdapter[ResponseFormatT] = TypeAdapter(output_format)
|
|
1028
|
+
|
|
1029
|
+
try:
|
|
1030
|
+
schema = adapted_type.json_schema()
|
|
1031
|
+
transformed_output_format = JSONOutputFormatParam(schema=transform_schema(schema), type="json_schema")
|
|
1032
|
+
except pydantic.errors.PydanticSchemaGenerationError as e:
|
|
1033
|
+
raise TypeError(
|
|
1034
|
+
(
|
|
1035
|
+
"Could not generate JSON schema for the given `output_format` type. "
|
|
1036
|
+
"Use a type that works with `pydantic.TypeAdapter`"
|
|
1037
|
+
)
|
|
1038
|
+
) from e
|
|
1039
|
+
|
|
1040
|
+
# Merge output_format into output_config
|
|
1041
|
+
merged_output_config: OutputConfigParam | Omit = omit
|
|
1042
|
+
if is_given(transformed_output_format):
|
|
1043
|
+
if is_given(output_config):
|
|
1044
|
+
merged_output_config = {**output_config, "format": transformed_output_format}
|
|
1045
|
+
else:
|
|
1046
|
+
merged_output_config = {"format": transformed_output_format}
|
|
1047
|
+
elif is_given(output_config):
|
|
1048
|
+
merged_output_config = output_config
|
|
1049
|
+
|
|
1000
1050
|
make_request = partial(
|
|
1001
1051
|
self._post,
|
|
1002
1052
|
"/v1/messages",
|
|
@@ -1006,6 +1056,7 @@ class Messages(SyncAPIResource):
|
|
|
1006
1056
|
"messages": messages,
|
|
1007
1057
|
"model": model,
|
|
1008
1058
|
"metadata": metadata,
|
|
1059
|
+
"output_config": merged_output_config,
|
|
1009
1060
|
"container": container,
|
|
1010
1061
|
"service_tier": service_tier,
|
|
1011
1062
|
"stop_sequences": stop_sequences,
|
|
@@ -1027,13 +1078,129 @@ class Messages(SyncAPIResource):
|
|
|
1027
1078
|
stream=True,
|
|
1028
1079
|
stream_cls=Stream[RawMessageStreamEvent],
|
|
1029
1080
|
)
|
|
1030
|
-
return MessageStreamManager(
|
|
1081
|
+
return MessageStreamManager(
|
|
1082
|
+
make_request,
|
|
1083
|
+
output_format=NOT_GIVEN if is_dict(output_format) else cast(ResponseFormatT, output_format),
|
|
1084
|
+
)
|
|
1085
|
+
|
|
1086
|
+
def parse(
|
|
1087
|
+
self,
|
|
1088
|
+
*,
|
|
1089
|
+
max_tokens: int,
|
|
1090
|
+
messages: Iterable[MessageParam],
|
|
1091
|
+
model: ModelParam,
|
|
1092
|
+
metadata: MetadataParam | Omit = omit,
|
|
1093
|
+
output_config: OutputConfigParam | Omit = omit,
|
|
1094
|
+
output_format: Optional[type[ResponseFormatT]] | Omit = omit,
|
|
1095
|
+
service_tier: Literal["auto", "standard_only"] | Omit = omit,
|
|
1096
|
+
stop_sequences: SequenceNotStr[str] | Omit = omit,
|
|
1097
|
+
stream: Literal[False] | Literal[True] | Omit = omit,
|
|
1098
|
+
system: Union[str, Iterable[TextBlockParam]] | Omit = omit,
|
|
1099
|
+
temperature: float | Omit = omit,
|
|
1100
|
+
thinking: ThinkingConfigParam | Omit = omit,
|
|
1101
|
+
tool_choice: ToolChoiceParam | Omit = omit,
|
|
1102
|
+
tools: Iterable[ToolUnionParam] | Omit = omit,
|
|
1103
|
+
top_k: int | Omit = omit,
|
|
1104
|
+
top_p: float | Omit = omit,
|
|
1105
|
+
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
|
|
1106
|
+
# The extra values given here take precedence over values defined on the client or passed to this method.
|
|
1107
|
+
extra_headers: Headers | None = None,
|
|
1108
|
+
extra_query: Query | None = None,
|
|
1109
|
+
extra_body: Body | None = None,
|
|
1110
|
+
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
|
|
1111
|
+
) -> ParsedMessage[ResponseFormatT]:
|
|
1112
|
+
if not stream and not is_given(timeout) and self._client.timeout == DEFAULT_TIMEOUT:
|
|
1113
|
+
timeout = self._client._calculate_nonstreaming_timeout(
|
|
1114
|
+
max_tokens, MODEL_NONSTREAMING_TOKENS.get(model, None)
|
|
1115
|
+
)
|
|
1116
|
+
|
|
1117
|
+
if model in DEPRECATED_MODELS:
|
|
1118
|
+
warnings.warn(
|
|
1119
|
+
f"The model '{model}' is deprecated and will reach end-of-life on {DEPRECATED_MODELS[model]}.\nPlease migrate to a newer model. Visit https://docs.anthropic.com/en/docs/resources/model-deprecations for more information.",
|
|
1120
|
+
DeprecationWarning,
|
|
1121
|
+
stacklevel=3,
|
|
1122
|
+
)
|
|
1123
|
+
|
|
1124
|
+
extra_headers = {
|
|
1125
|
+
"X-Stainless-Helper": "messages.parse",
|
|
1126
|
+
**(extra_headers or {}),
|
|
1127
|
+
}
|
|
1128
|
+
|
|
1129
|
+
transformed_output_format: Optional[JSONOutputFormatParam] | NotGiven = NOT_GIVEN
|
|
1130
|
+
|
|
1131
|
+
if is_given(output_format) and output_format is not None:
|
|
1132
|
+
adapted_type: TypeAdapter[ResponseFormatT] = TypeAdapter(output_format)
|
|
1133
|
+
|
|
1134
|
+
try:
|
|
1135
|
+
schema = adapted_type.json_schema()
|
|
1136
|
+
transformed_output_format = JSONOutputFormatParam(schema=transform_schema(schema), type="json_schema")
|
|
1137
|
+
except pydantic.errors.PydanticSchemaGenerationError as e:
|
|
1138
|
+
raise TypeError(
|
|
1139
|
+
(
|
|
1140
|
+
"Could not generate JSON schema for the given `output_format` type. "
|
|
1141
|
+
"Use a type that works with `pydantic.TypeAdapter`"
|
|
1142
|
+
)
|
|
1143
|
+
) from e
|
|
1144
|
+
|
|
1145
|
+
def parser(response: Message) -> ParsedMessage[ResponseFormatT]:
|
|
1146
|
+
return parse_response(
|
|
1147
|
+
response=response,
|
|
1148
|
+
output_format=cast(
|
|
1149
|
+
ResponseFormatT,
|
|
1150
|
+
output_format if is_given(output_format) and output_format is not None else NOT_GIVEN,
|
|
1151
|
+
),
|
|
1152
|
+
)
|
|
1153
|
+
|
|
1154
|
+
# Merge output_format into output_config
|
|
1155
|
+
merged_output_config: OutputConfigParam | Omit = omit
|
|
1156
|
+
if is_given(transformed_output_format):
|
|
1157
|
+
if is_given(output_config):
|
|
1158
|
+
merged_output_config = {**output_config, "format": transformed_output_format}
|
|
1159
|
+
else:
|
|
1160
|
+
merged_output_config = {"format": transformed_output_format}
|
|
1161
|
+
elif is_given(output_config):
|
|
1162
|
+
merged_output_config = output_config
|
|
1163
|
+
|
|
1164
|
+
return self._post(
|
|
1165
|
+
"/v1/messages",
|
|
1166
|
+
body=maybe_transform(
|
|
1167
|
+
{
|
|
1168
|
+
"max_tokens": max_tokens,
|
|
1169
|
+
"messages": messages,
|
|
1170
|
+
"model": model,
|
|
1171
|
+
"metadata": metadata,
|
|
1172
|
+
"output_config": merged_output_config,
|
|
1173
|
+
"service_tier": service_tier,
|
|
1174
|
+
"stop_sequences": stop_sequences,
|
|
1175
|
+
"stream": stream,
|
|
1176
|
+
"system": system,
|
|
1177
|
+
"temperature": temperature,
|
|
1178
|
+
"thinking": thinking,
|
|
1179
|
+
"tool_choice": tool_choice,
|
|
1180
|
+
"tools": tools,
|
|
1181
|
+
"top_k": top_k,
|
|
1182
|
+
"top_p": top_p,
|
|
1183
|
+
},
|
|
1184
|
+
message_create_params.MessageCreateParamsNonStreaming,
|
|
1185
|
+
),
|
|
1186
|
+
options=make_request_options(
|
|
1187
|
+
extra_headers=extra_headers,
|
|
1188
|
+
extra_query=extra_query,
|
|
1189
|
+
extra_body=extra_body,
|
|
1190
|
+
timeout=timeout,
|
|
1191
|
+
post_parser=parser,
|
|
1192
|
+
),
|
|
1193
|
+
cast_to=cast(Type[ParsedMessage[ResponseFormatT]], Message),
|
|
1194
|
+
stream=False,
|
|
1195
|
+
)
|
|
1031
1196
|
|
|
1032
1197
|
def count_tokens(
|
|
1033
1198
|
self,
|
|
1034
1199
|
*,
|
|
1035
1200
|
messages: Iterable[MessageParam],
|
|
1036
1201
|
model: ModelParam,
|
|
1202
|
+
output_config: OutputConfigParam | Omit = omit,
|
|
1203
|
+
output_format: None | JSONOutputFormatParam | type | Omit = omit,
|
|
1037
1204
|
system: Union[str, Iterable[TextBlockParam]] | Omit = omit,
|
|
1038
1205
|
thinking: ThinkingConfigParam | Omit = omit,
|
|
1039
1206
|
tool_choice: ToolChoiceParam | Omit = omit,
|
|
@@ -1125,6 +1292,14 @@ class Messages(SyncAPIResource):
|
|
|
1125
1292
|
[models](https://docs.anthropic.com/en/docs/models-overview) for additional
|
|
1126
1293
|
details and options.
|
|
1127
1294
|
|
|
1295
|
+
output_config: Configuration options for the model's output, such as the output format.
|
|
1296
|
+
|
|
1297
|
+
output_format: A Pydantic model, JSON schema dictionary, or type that will be
|
|
1298
|
+
converted to a JSON schema for structured output. This is a convenience parameter
|
|
1299
|
+
that will be merged into output_config.format. See
|
|
1300
|
+
[structured outputs](https://docs.anthropic.com/en/docs/build-with-claude/structured-outputs)
|
|
1301
|
+
for more details.
|
|
1302
|
+
|
|
1128
1303
|
system: System prompt.
|
|
1129
1304
|
|
|
1130
1305
|
A system prompt is a way of providing context and instructions to Claude, such
|
|
@@ -1228,12 +1403,42 @@ class Messages(SyncAPIResource):
|
|
|
1228
1403
|
|
|
1229
1404
|
timeout: Override the client-level default timeout for this request, in seconds
|
|
1230
1405
|
"""
|
|
1406
|
+
# Transform output_format if provided
|
|
1407
|
+
transformed_output_format: Optional[JSONOutputFormatParam] | NotGiven = NOT_GIVEN
|
|
1408
|
+
|
|
1409
|
+
if is_dict(output_format):
|
|
1410
|
+
transformed_output_format = cast(JSONOutputFormatParam, output_format)
|
|
1411
|
+
elif is_given(output_format) and output_format is not None:
|
|
1412
|
+
adapted_type: TypeAdapter[type] = TypeAdapter(output_format)
|
|
1413
|
+
|
|
1414
|
+
try:
|
|
1415
|
+
schema = adapted_type.json_schema()
|
|
1416
|
+
transformed_output_format = JSONOutputFormatParam(schema=transform_schema(schema), type="json_schema")
|
|
1417
|
+
except pydantic.errors.PydanticSchemaGenerationError as e:
|
|
1418
|
+
raise TypeError(
|
|
1419
|
+
(
|
|
1420
|
+
"Could not generate JSON schema for the given `output_format` type. "
|
|
1421
|
+
"Use a type that works with `pydantic.TypeAdapter`"
|
|
1422
|
+
)
|
|
1423
|
+
) from e
|
|
1424
|
+
|
|
1425
|
+
# Merge output_format into output_config
|
|
1426
|
+
merged_output_config: OutputConfigParam | Omit = omit
|
|
1427
|
+
if is_given(transformed_output_format):
|
|
1428
|
+
if is_given(output_config):
|
|
1429
|
+
merged_output_config = {**output_config, "format": transformed_output_format}
|
|
1430
|
+
else:
|
|
1431
|
+
merged_output_config = {"format": transformed_output_format}
|
|
1432
|
+
elif is_given(output_config):
|
|
1433
|
+
merged_output_config = output_config
|
|
1434
|
+
|
|
1231
1435
|
return self._post(
|
|
1232
1436
|
"/v1/messages/count_tokens",
|
|
1233
1437
|
body=maybe_transform(
|
|
1234
1438
|
{
|
|
1235
1439
|
"messages": messages,
|
|
1236
1440
|
"model": model,
|
|
1441
|
+
"output_config": merged_output_config,
|
|
1237
1442
|
"system": system,
|
|
1238
1443
|
"thinking": thinking,
|
|
1239
1444
|
"tool_choice": tool_choice,
|
|
@@ -1280,6 +1485,7 @@ class AsyncMessages(AsyncAPIResource):
|
|
|
1280
1485
|
messages: Iterable[MessageParam],
|
|
1281
1486
|
model: ModelParam,
|
|
1282
1487
|
metadata: MetadataParam | Omit = omit,
|
|
1488
|
+
output_config: OutputConfigParam | Omit = omit,
|
|
1283
1489
|
service_tier: Literal["auto", "standard_only"] | Omit = omit,
|
|
1284
1490
|
stop_sequences: SequenceNotStr[str] | Omit = omit,
|
|
1285
1491
|
stream: Literal[False] | Omit = omit,
|
|
@@ -1388,6 +1594,8 @@ class AsyncMessages(AsyncAPIResource):
|
|
|
1388
1594
|
|
|
1389
1595
|
metadata: An object describing metadata about the request.
|
|
1390
1596
|
|
|
1597
|
+
output_config: Configuration options for the model's output, such as the output format.
|
|
1598
|
+
|
|
1391
1599
|
service_tier: Determines whether to use priority capacity (if available) or standard capacity
|
|
1392
1600
|
for this request.
|
|
1393
1601
|
|
|
@@ -1549,6 +1757,7 @@ class AsyncMessages(AsyncAPIResource):
|
|
|
1549
1757
|
model: ModelParam,
|
|
1550
1758
|
stream: Literal[True],
|
|
1551
1759
|
metadata: MetadataParam | Omit = omit,
|
|
1760
|
+
output_config: OutputConfigParam | Omit = omit,
|
|
1552
1761
|
service_tier: Literal["auto", "standard_only"] | Omit = omit,
|
|
1553
1762
|
stop_sequences: SequenceNotStr[str] | Omit = omit,
|
|
1554
1763
|
system: Union[str, Iterable[TextBlockParam]] | Omit = omit,
|
|
@@ -1660,6 +1869,8 @@ class AsyncMessages(AsyncAPIResource):
|
|
|
1660
1869
|
|
|
1661
1870
|
metadata: An object describing metadata about the request.
|
|
1662
1871
|
|
|
1872
|
+
output_config: Configuration options for the model's output, such as the output format.
|
|
1873
|
+
|
|
1663
1874
|
service_tier: Determines whether to use priority capacity (if available) or standard capacity
|
|
1664
1875
|
for this request.
|
|
1665
1876
|
|
|
@@ -1817,6 +2028,7 @@ class AsyncMessages(AsyncAPIResource):
|
|
|
1817
2028
|
model: ModelParam,
|
|
1818
2029
|
stream: bool,
|
|
1819
2030
|
metadata: MetadataParam | Omit = omit,
|
|
2031
|
+
output_config: OutputConfigParam | Omit = omit,
|
|
1820
2032
|
service_tier: Literal["auto", "standard_only"] | Omit = omit,
|
|
1821
2033
|
stop_sequences: SequenceNotStr[str] | Omit = omit,
|
|
1822
2034
|
system: Union[str, Iterable[TextBlockParam]] | Omit = omit,
|
|
@@ -1928,6 +2140,8 @@ class AsyncMessages(AsyncAPIResource):
|
|
|
1928
2140
|
|
|
1929
2141
|
metadata: An object describing metadata about the request.
|
|
1930
2142
|
|
|
2143
|
+
output_config: Configuration options for the model's output, such as the output format.
|
|
2144
|
+
|
|
1931
2145
|
service_tier: Determines whether to use priority capacity (if available) or standard capacity
|
|
1932
2146
|
for this request.
|
|
1933
2147
|
|
|
@@ -2084,6 +2298,7 @@ class AsyncMessages(AsyncAPIResource):
|
|
|
2084
2298
|
messages: Iterable[MessageParam],
|
|
2085
2299
|
model: ModelParam,
|
|
2086
2300
|
metadata: MetadataParam | Omit = omit,
|
|
2301
|
+
output_config: OutputConfigParam | Omit = omit,
|
|
2087
2302
|
service_tier: Literal["auto", "standard_only"] | Omit = omit,
|
|
2088
2303
|
stop_sequences: SequenceNotStr[str] | Omit = omit,
|
|
2089
2304
|
stream: Literal[False] | Literal[True] | Omit = omit,
|
|
@@ -2121,6 +2336,7 @@ class AsyncMessages(AsyncAPIResource):
|
|
|
2121
2336
|
"messages": messages,
|
|
2122
2337
|
"model": model,
|
|
2123
2338
|
"metadata": metadata,
|
|
2339
|
+
"output_config": output_config,
|
|
2124
2340
|
"service_tier": service_tier,
|
|
2125
2341
|
"stop_sequences": stop_sequences,
|
|
2126
2342
|
"stream": stream,
|
|
@@ -2151,6 +2367,8 @@ class AsyncMessages(AsyncAPIResource):
|
|
|
2151
2367
|
messages: Iterable[MessageParam],
|
|
2152
2368
|
model: ModelParam,
|
|
2153
2369
|
metadata: MetadataParam | Omit = omit,
|
|
2370
|
+
output_config: OutputConfigParam | Omit = omit,
|
|
2371
|
+
output_format: None | JSONOutputFormatParam | type[ResponseFormatT] | Omit = omit,
|
|
2154
2372
|
container: Optional[str] | Omit = omit,
|
|
2155
2373
|
service_tier: Literal["auto", "standard_only"] | Omit = omit,
|
|
2156
2374
|
stop_sequences: SequenceNotStr[str] | Omit = omit,
|
|
@@ -2167,7 +2385,7 @@ class AsyncMessages(AsyncAPIResource):
|
|
|
2167
2385
|
extra_query: Query | None = None,
|
|
2168
2386
|
extra_body: Body | None = None,
|
|
2169
2387
|
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
|
|
2170
|
-
) -> AsyncMessageStreamManager:
|
|
2388
|
+
) -> AsyncMessageStreamManager[ResponseFormatT]:
|
|
2171
2389
|
"""Create a Message stream"""
|
|
2172
2390
|
if model in DEPRECATED_MODELS:
|
|
2173
2391
|
warnings.warn(
|
|
@@ -2181,6 +2399,35 @@ class AsyncMessages(AsyncAPIResource):
|
|
|
2181
2399
|
"X-Stainless-Stream-Helper": "messages",
|
|
2182
2400
|
**(extra_headers or {}),
|
|
2183
2401
|
}
|
|
2402
|
+
|
|
2403
|
+
transformed_output_format: Optional[JSONOutputFormatParam] | NotGiven = NOT_GIVEN
|
|
2404
|
+
|
|
2405
|
+
if is_dict(output_format):
|
|
2406
|
+
transformed_output_format = cast(JSONOutputFormatParam, output_format)
|
|
2407
|
+
elif is_given(output_format) and output_format is not None:
|
|
2408
|
+
adapted_type: TypeAdapter[ResponseFormatT] = TypeAdapter(output_format)
|
|
2409
|
+
|
|
2410
|
+
try:
|
|
2411
|
+
schema = adapted_type.json_schema()
|
|
2412
|
+
transformed_output_format = JSONOutputFormatParam(schema=transform_schema(schema), type="json_schema")
|
|
2413
|
+
except pydantic.errors.PydanticSchemaGenerationError as e:
|
|
2414
|
+
raise TypeError(
|
|
2415
|
+
(
|
|
2416
|
+
"Could not generate JSON schema for the given `output_format` type. "
|
|
2417
|
+
"Use a type that works with `pydantic.TypeAdapter`"
|
|
2418
|
+
)
|
|
2419
|
+
) from e
|
|
2420
|
+
|
|
2421
|
+
# Merge output_format into output_config
|
|
2422
|
+
merged_output_config: OutputConfigParam | Omit = omit
|
|
2423
|
+
if is_given(transformed_output_format):
|
|
2424
|
+
if is_given(output_config):
|
|
2425
|
+
merged_output_config = {**output_config, "format": transformed_output_format}
|
|
2426
|
+
else:
|
|
2427
|
+
merged_output_config = {"format": transformed_output_format}
|
|
2428
|
+
elif is_given(output_config):
|
|
2429
|
+
merged_output_config = output_config
|
|
2430
|
+
|
|
2184
2431
|
request = self._post(
|
|
2185
2432
|
"/v1/messages",
|
|
2186
2433
|
body=maybe_transform(
|
|
@@ -2189,6 +2436,7 @@ class AsyncMessages(AsyncAPIResource):
|
|
|
2189
2436
|
"messages": messages,
|
|
2190
2437
|
"model": model,
|
|
2191
2438
|
"metadata": metadata,
|
|
2439
|
+
"output_config": merged_output_config,
|
|
2192
2440
|
"container": container,
|
|
2193
2441
|
"service_tier": service_tier,
|
|
2194
2442
|
"stop_sequences": stop_sequences,
|
|
@@ -2210,13 +2458,129 @@ class AsyncMessages(AsyncAPIResource):
|
|
|
2210
2458
|
stream=True,
|
|
2211
2459
|
stream_cls=AsyncStream[RawMessageStreamEvent],
|
|
2212
2460
|
)
|
|
2213
|
-
return AsyncMessageStreamManager(
|
|
2461
|
+
return AsyncMessageStreamManager(
|
|
2462
|
+
request,
|
|
2463
|
+
output_format=NOT_GIVEN if is_dict(output_format) else cast(ResponseFormatT, output_format),
|
|
2464
|
+
)
|
|
2465
|
+
|
|
2466
|
+
async def parse(
|
|
2467
|
+
self,
|
|
2468
|
+
*,
|
|
2469
|
+
max_tokens: int,
|
|
2470
|
+
messages: Iterable[MessageParam],
|
|
2471
|
+
model: ModelParam,
|
|
2472
|
+
metadata: MetadataParam | Omit = omit,
|
|
2473
|
+
output_config: OutputConfigParam | Omit = omit,
|
|
2474
|
+
output_format: Optional[type[ResponseFormatT]] | Omit = omit,
|
|
2475
|
+
service_tier: Literal["auto", "standard_only"] | Omit = omit,
|
|
2476
|
+
stop_sequences: SequenceNotStr[str] | Omit = omit,
|
|
2477
|
+
stream: Literal[False] | Literal[True] | Omit = omit,
|
|
2478
|
+
system: Union[str, Iterable[TextBlockParam]] | Omit = omit,
|
|
2479
|
+
temperature: float | Omit = omit,
|
|
2480
|
+
thinking: ThinkingConfigParam | Omit = omit,
|
|
2481
|
+
tool_choice: ToolChoiceParam | Omit = omit,
|
|
2482
|
+
tools: Iterable[ToolUnionParam] | Omit = omit,
|
|
2483
|
+
top_k: int | Omit = omit,
|
|
2484
|
+
top_p: float | Omit = omit,
|
|
2485
|
+
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
|
|
2486
|
+
# The extra values given here take precedence over values defined on the client or passed to this method.
|
|
2487
|
+
extra_headers: Headers | None = None,
|
|
2488
|
+
extra_query: Query | None = None,
|
|
2489
|
+
extra_body: Body | None = None,
|
|
2490
|
+
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
|
|
2491
|
+
) -> ParsedMessage[ResponseFormatT]:
|
|
2492
|
+
if not stream and not is_given(timeout) and self._client.timeout == DEFAULT_TIMEOUT:
|
|
2493
|
+
timeout = self._client._calculate_nonstreaming_timeout(
|
|
2494
|
+
max_tokens, MODEL_NONSTREAMING_TOKENS.get(model, None)
|
|
2495
|
+
)
|
|
2496
|
+
|
|
2497
|
+
if model in DEPRECATED_MODELS:
|
|
2498
|
+
warnings.warn(
|
|
2499
|
+
f"The model '{model}' is deprecated and will reach end-of-life on {DEPRECATED_MODELS[model]}.\nPlease migrate to a newer model. Visit https://docs.anthropic.com/en/docs/resources/model-deprecations for more information.",
|
|
2500
|
+
DeprecationWarning,
|
|
2501
|
+
stacklevel=3,
|
|
2502
|
+
)
|
|
2503
|
+
|
|
2504
|
+
extra_headers = {
|
|
2505
|
+
"X-Stainless-Helper": "messages.parse",
|
|
2506
|
+
**(extra_headers or {}),
|
|
2507
|
+
}
|
|
2508
|
+
|
|
2509
|
+
transformed_output_format: Optional[JSONOutputFormatParam] | NotGiven = NOT_GIVEN
|
|
2510
|
+
|
|
2511
|
+
if is_given(output_format) and output_format is not None:
|
|
2512
|
+
adapted_type: TypeAdapter[ResponseFormatT] = TypeAdapter(output_format)
|
|
2513
|
+
|
|
2514
|
+
try:
|
|
2515
|
+
schema = adapted_type.json_schema()
|
|
2516
|
+
transformed_output_format = JSONOutputFormatParam(schema=transform_schema(schema), type="json_schema")
|
|
2517
|
+
except pydantic.errors.PydanticSchemaGenerationError as e:
|
|
2518
|
+
raise TypeError(
|
|
2519
|
+
(
|
|
2520
|
+
"Could not generate JSON schema for the given `output_format` type. "
|
|
2521
|
+
"Use a type that works with `pydantic.TypeAdapter`"
|
|
2522
|
+
)
|
|
2523
|
+
) from e
|
|
2524
|
+
|
|
2525
|
+
def parser(response: Message) -> ParsedMessage[ResponseFormatT]:
|
|
2526
|
+
return parse_response(
|
|
2527
|
+
response=response,
|
|
2528
|
+
output_format=cast(
|
|
2529
|
+
ResponseFormatT,
|
|
2530
|
+
output_format if is_given(output_format) and output_format is not None else NOT_GIVEN,
|
|
2531
|
+
),
|
|
2532
|
+
)
|
|
2533
|
+
|
|
2534
|
+
# Merge output_format into output_config
|
|
2535
|
+
merged_output_config: OutputConfigParam | Omit = omit
|
|
2536
|
+
if is_given(transformed_output_format):
|
|
2537
|
+
if is_given(output_config):
|
|
2538
|
+
merged_output_config = {**output_config, "format": transformed_output_format}
|
|
2539
|
+
else:
|
|
2540
|
+
merged_output_config = {"format": transformed_output_format}
|
|
2541
|
+
elif is_given(output_config):
|
|
2542
|
+
merged_output_config = output_config
|
|
2543
|
+
|
|
2544
|
+
return await self._post(
|
|
2545
|
+
"/v1/messages",
|
|
2546
|
+
body=await async_maybe_transform(
|
|
2547
|
+
{
|
|
2548
|
+
"max_tokens": max_tokens,
|
|
2549
|
+
"messages": messages,
|
|
2550
|
+
"model": model,
|
|
2551
|
+
"metadata": metadata,
|
|
2552
|
+
"output_config": merged_output_config,
|
|
2553
|
+
"service_tier": service_tier,
|
|
2554
|
+
"stop_sequences": stop_sequences,
|
|
2555
|
+
"stream": stream,
|
|
2556
|
+
"system": system,
|
|
2557
|
+
"temperature": temperature,
|
|
2558
|
+
"thinking": thinking,
|
|
2559
|
+
"tool_choice": tool_choice,
|
|
2560
|
+
"tools": tools,
|
|
2561
|
+
"top_k": top_k,
|
|
2562
|
+
"top_p": top_p,
|
|
2563
|
+
},
|
|
2564
|
+
message_create_params.MessageCreateParamsNonStreaming,
|
|
2565
|
+
),
|
|
2566
|
+
options=make_request_options(
|
|
2567
|
+
extra_headers=extra_headers,
|
|
2568
|
+
extra_query=extra_query,
|
|
2569
|
+
extra_body=extra_body,
|
|
2570
|
+
timeout=timeout,
|
|
2571
|
+
post_parser=parser,
|
|
2572
|
+
),
|
|
2573
|
+
cast_to=cast(Type[ParsedMessage[ResponseFormatT]], Message),
|
|
2574
|
+
stream=False,
|
|
2575
|
+
)
|
|
2214
2576
|
|
|
2215
2577
|
async def count_tokens(
|
|
2216
2578
|
self,
|
|
2217
2579
|
*,
|
|
2218
2580
|
messages: Iterable[MessageParam],
|
|
2219
2581
|
model: ModelParam,
|
|
2582
|
+
output_config: OutputConfigParam | Omit = omit,
|
|
2583
|
+
output_format: None | JSONOutputFormatParam | type | Omit = omit,
|
|
2220
2584
|
system: Union[str, Iterable[TextBlockParam]] | Omit = omit,
|
|
2221
2585
|
thinking: ThinkingConfigParam | Omit = omit,
|
|
2222
2586
|
tool_choice: ToolChoiceParam | Omit = omit,
|
|
@@ -2308,6 +2672,14 @@ class AsyncMessages(AsyncAPIResource):
|
|
|
2308
2672
|
[models](https://docs.anthropic.com/en/docs/models-overview) for additional
|
|
2309
2673
|
details and options.
|
|
2310
2674
|
|
|
2675
|
+
output_config: Configuration options for the model's output, such as the output format.
|
|
2676
|
+
|
|
2677
|
+
output_format: A Pydantic model, JSON schema dictionary, or type that will be
|
|
2678
|
+
converted to a JSON schema for structured output. This is a convenience parameter
|
|
2679
|
+
that will be merged into output_config.format. See
|
|
2680
|
+
[structured outputs](https://docs.anthropic.com/en/docs/build-with-claude/structured-outputs)
|
|
2681
|
+
for more details.
|
|
2682
|
+
|
|
2311
2683
|
system: System prompt.
|
|
2312
2684
|
|
|
2313
2685
|
A system prompt is a way of providing context and instructions to Claude, such
|
|
@@ -2411,12 +2783,42 @@ class AsyncMessages(AsyncAPIResource):
|
|
|
2411
2783
|
|
|
2412
2784
|
timeout: Override the client-level default timeout for this request, in seconds
|
|
2413
2785
|
"""
|
|
2786
|
+
# Transform output_format if provided
|
|
2787
|
+
transformed_output_format: Optional[JSONOutputFormatParam] | NotGiven = NOT_GIVEN
|
|
2788
|
+
|
|
2789
|
+
if is_dict(output_format):
|
|
2790
|
+
transformed_output_format = cast(JSONOutputFormatParam, output_format)
|
|
2791
|
+
elif is_given(output_format) and output_format is not None:
|
|
2792
|
+
adapted_type: TypeAdapter[type] = TypeAdapter(output_format)
|
|
2793
|
+
|
|
2794
|
+
try:
|
|
2795
|
+
schema = adapted_type.json_schema()
|
|
2796
|
+
transformed_output_format = JSONOutputFormatParam(schema=transform_schema(schema), type="json_schema")
|
|
2797
|
+
except pydantic.errors.PydanticSchemaGenerationError as e:
|
|
2798
|
+
raise TypeError(
|
|
2799
|
+
(
|
|
2800
|
+
"Could not generate JSON schema for the given `output_format` type. "
|
|
2801
|
+
"Use a type that works with `pydantic.TypeAdapter`"
|
|
2802
|
+
)
|
|
2803
|
+
) from e
|
|
2804
|
+
|
|
2805
|
+
# Merge output_format into output_config
|
|
2806
|
+
merged_output_config: OutputConfigParam | Omit = omit
|
|
2807
|
+
if is_given(transformed_output_format):
|
|
2808
|
+
if is_given(output_config):
|
|
2809
|
+
merged_output_config = {**output_config, "format": transformed_output_format}
|
|
2810
|
+
else:
|
|
2811
|
+
merged_output_config = {"format": transformed_output_format}
|
|
2812
|
+
elif is_given(output_config):
|
|
2813
|
+
merged_output_config = output_config
|
|
2814
|
+
|
|
2414
2815
|
return await self._post(
|
|
2415
2816
|
"/v1/messages/count_tokens",
|
|
2416
2817
|
body=await async_maybe_transform(
|
|
2417
2818
|
{
|
|
2418
2819
|
"messages": messages,
|
|
2419
2820
|
"model": model,
|
|
2821
|
+
"output_config": merged_output_config,
|
|
2420
2822
|
"system": system,
|
|
2421
2823
|
"thinking": thinking,
|
|
2422
2824
|
"tool_choice": tool_choice,
|