anthropic 0.69.0__py3-none-any.whl → 0.71.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (49) hide show
  1. anthropic/_version.py +1 -1
  2. anthropic/lib/__init__.py +1 -0
  3. anthropic/lib/_files.py +42 -0
  4. anthropic/pagination.py +117 -1
  5. anthropic/resources/beta/__init__.py +14 -0
  6. anthropic/resources/beta/beta.py +32 -0
  7. anthropic/resources/beta/messages/messages.py +140 -122
  8. anthropic/resources/beta/skills/__init__.py +33 -0
  9. anthropic/resources/beta/skills/skills.py +680 -0
  10. anthropic/resources/beta/skills/versions.py +658 -0
  11. anthropic/resources/completions.py +36 -42
  12. anthropic/resources/messages/messages.py +90 -96
  13. anthropic/types/anthropic_beta_param.py +3 -0
  14. anthropic/types/beta/__init__.py +9 -0
  15. anthropic/types/beta/beta_container.py +5 -0
  16. anthropic/types/beta/beta_container_params.py +18 -0
  17. anthropic/types/beta/beta_message.py +4 -1
  18. anthropic/types/beta/beta_raw_message_delta_event.py +1 -1
  19. anthropic/types/beta/beta_skill.py +18 -0
  20. anthropic/types/beta/beta_skill_params.py +18 -0
  21. anthropic/types/beta/beta_thinking_config_enabled_param.py +1 -1
  22. anthropic/types/beta/message_count_tokens_params.py +14 -10
  23. anthropic/types/beta/message_create_params.py +25 -19
  24. anthropic/types/beta/messages/batch_create_params.py +1 -0
  25. anthropic/types/beta/skill_create_params.py +31 -0
  26. anthropic/types/beta/skill_create_response.py +49 -0
  27. anthropic/types/beta/skill_delete_response.py +19 -0
  28. anthropic/types/beta/skill_list_params.py +38 -0
  29. anthropic/types/beta/skill_list_response.py +49 -0
  30. anthropic/types/beta/skill_retrieve_response.py +49 -0
  31. anthropic/types/beta/skills/__init__.py +10 -0
  32. anthropic/types/beta/skills/version_create_params.py +24 -0
  33. anthropic/types/beta/skills/version_create_response.py +49 -0
  34. anthropic/types/beta/skills/version_delete_response.py +19 -0
  35. anthropic/types/beta/skills/version_list_params.py +25 -0
  36. anthropic/types/beta/skills/version_list_response.py +49 -0
  37. anthropic/types/beta/skills/version_retrieve_response.py +49 -0
  38. anthropic/types/completion_create_params.py +5 -6
  39. anthropic/types/message_count_tokens_params.py +9 -9
  40. anthropic/types/message_create_params.py +13 -15
  41. anthropic/types/messages/batch_create_params.py +1 -0
  42. anthropic/types/model.py +2 -0
  43. anthropic/types/model_param.py +2 -0
  44. anthropic/types/stop_reason.py +1 -3
  45. anthropic/types/thinking_config_enabled_param.py +1 -1
  46. {anthropic-0.69.0.dist-info → anthropic-0.71.0.dist-info}/METADATA +1 -1
  47. {anthropic-0.69.0.dist-info → anthropic-0.71.0.dist-info}/RECORD +49 -29
  48. {anthropic-0.69.0.dist-info → anthropic-0.71.0.dist-info}/WHEEL +0 -0
  49. {anthropic-0.69.0.dist-info → anthropic-0.71.0.dist-info}/licenses/LICENSE +0 -0
@@ -92,7 +92,7 @@ class Messages(SyncAPIResource):
92
92
  max_tokens: int,
93
93
  messages: Iterable[BetaMessageParam],
94
94
  model: ModelParam,
95
- container: Optional[str] | Omit = omit,
95
+ container: Optional[message_create_params.Container] | Omit = omit,
96
96
  context_management: Optional[BetaContextManagementConfigParam] | Omit = omit,
97
97
  mcp_servers: Iterable[BetaRequestMCPServerURLDefinitionParam] | Omit = omit,
98
98
  metadata: BetaMetadataParam | Omit = omit,
@@ -130,7 +130,7 @@ class Messages(SyncAPIResource):
130
130
  only specifies the absolute maximum number of tokens to generate.
131
131
 
132
132
  Different models have different maximum values for this parameter. See
133
- [models](https://docs.anthropic.com/en/docs/models-overview) for details.
133
+ [models](https://docs.claude.com/en/docs/models-overview) for details.
134
134
 
135
135
  messages: Input messages.
136
136
 
@@ -189,12 +189,12 @@ class Messages(SyncAPIResource):
189
189
  { "role": "user", "content": [{ "type": "text", "text": "Hello, Claude" }] }
190
190
  ```
191
191
 
192
- See [input examples](https://docs.anthropic.com/en/api/messages-examples).
192
+ See [input examples](https://docs.claude.com/en/api/messages-examples).
193
193
 
194
194
  Note that if you want to include a
195
- [system prompt](https://docs.anthropic.com/en/docs/system-prompts), you can use
196
- the top-level `system` parameter — there is no `"system"` role for input
197
- messages in the Messages API.
195
+ [system prompt](https://docs.claude.com/en/docs/system-prompts), you can use the
196
+ top-level `system` parameter — there is no `"system"` role for input messages in
197
+ the Messages API.
198
198
 
199
199
  There is a limit of 100,000 messages in a single request.
200
200
 
@@ -204,7 +204,10 @@ class Messages(SyncAPIResource):
204
204
 
205
205
  container: Container identifier for reuse across requests.
206
206
 
207
- context_management: Configuration for context management operations.
207
+ context_management: Context management configuration.
208
+
209
+ This allows you to control how Claude manages context across multiple requests,
210
+ such as whether to clear function results or not.
208
211
 
209
212
  mcp_servers: MCP servers to be utilized in this request
210
213
 
@@ -214,7 +217,7 @@ class Messages(SyncAPIResource):
214
217
  for this request.
215
218
 
216
219
  Anthropic offers different levels of service for your API requests. See
217
- [service-tiers](https://docs.anthropic.com/en/api/service-tiers) for details.
220
+ [service-tiers](https://docs.claude.com/en/api/service-tiers) for details.
218
221
 
219
222
  stop_sequences: Custom text sequences that will cause the model to stop generating.
220
223
 
@@ -228,14 +231,13 @@ class Messages(SyncAPIResource):
228
231
 
229
232
  stream: Whether to incrementally stream the response using server-sent events.
230
233
 
231
- See [streaming](https://docs.anthropic.com/en/api/messages-streaming) for
232
- details.
234
+ See [streaming](https://docs.claude.com/en/api/messages-streaming) for details.
233
235
 
234
236
  system: System prompt.
235
237
 
236
238
  A system prompt is a way of providing context and instructions to Claude, such
237
239
  as specifying a particular goal or role. See our
238
- [guide to system prompts](https://docs.anthropic.com/en/docs/system-prompts).
240
+ [guide to system prompts](https://docs.claude.com/en/docs/system-prompts).
239
241
 
240
242
  temperature: Amount of randomness injected into the response.
241
243
 
@@ -253,7 +255,7 @@ class Messages(SyncAPIResource):
253
255
  tokens and counts towards your `max_tokens` limit.
254
256
 
255
257
  See
256
- [extended thinking](https://docs.anthropic.com/en/docs/build-with-claude/extended-thinking)
258
+ [extended thinking](https://docs.claude.com/en/docs/build-with-claude/extended-thinking)
257
259
  for details.
258
260
 
259
261
  tool_choice: How the model should use the provided tools. The model can use a specific tool,
@@ -268,9 +270,9 @@ class Messages(SyncAPIResource):
268
270
 
269
271
  There are two types of tools: **client tools** and **server tools**. The
270
272
  behavior described below applies to client tools. For
271
- [server tools](https://docs.anthropic.com/en/docs/agents-and-tools/tool-use/overview#server-tools),
273
+ [server tools](https://docs.claude.com/en/docs/agents-and-tools/tool-use/overview#server-tools),
272
274
  see their individual documentation as each has its own behavior (e.g., the
273
- [web search tool](https://docs.anthropic.com/en/docs/agents-and-tools/tool-use/web-search-tool)).
275
+ [web search tool](https://docs.claude.com/en/docs/agents-and-tools/tool-use/web-search-tool)).
274
276
 
275
277
  Each tool definition includes:
276
278
 
@@ -333,7 +335,7 @@ class Messages(SyncAPIResource):
333
335
  functions, or more generally whenever you want the model to produce a particular
334
336
  JSON structure of output.
335
337
 
336
- See our [guide](https://docs.anthropic.com/en/docs/tool-use) for more details.
338
+ See our [guide](https://docs.claude.com/en/docs/tool-use) for more details.
337
339
 
338
340
  top_k: Only sample from the top K options for each subsequent token.
339
341
 
@@ -373,7 +375,7 @@ class Messages(SyncAPIResource):
373
375
  messages: Iterable[BetaMessageParam],
374
376
  model: ModelParam,
375
377
  stream: Literal[True],
376
- container: Optional[str] | Omit = omit,
378
+ container: Optional[message_create_params.Container] | Omit = omit,
377
379
  context_management: Optional[BetaContextManagementConfigParam] | Omit = omit,
378
380
  mcp_servers: Iterable[BetaRequestMCPServerURLDefinitionParam] | Omit = omit,
379
381
  metadata: BetaMetadataParam | Omit = omit,
@@ -410,7 +412,7 @@ class Messages(SyncAPIResource):
410
412
  only specifies the absolute maximum number of tokens to generate.
411
413
 
412
414
  Different models have different maximum values for this parameter. See
413
- [models](https://docs.anthropic.com/en/docs/models-overview) for details.
415
+ [models](https://docs.claude.com/en/docs/models-overview) for details.
414
416
 
415
417
  messages: Input messages.
416
418
 
@@ -469,12 +471,12 @@ class Messages(SyncAPIResource):
469
471
  { "role": "user", "content": [{ "type": "text", "text": "Hello, Claude" }] }
470
472
  ```
471
473
 
472
- See [input examples](https://docs.anthropic.com/en/api/messages-examples).
474
+ See [input examples](https://docs.claude.com/en/api/messages-examples).
473
475
 
474
476
  Note that if you want to include a
475
- [system prompt](https://docs.anthropic.com/en/docs/system-prompts), you can use
476
- the top-level `system` parameter — there is no `"system"` role for input
477
- messages in the Messages API.
477
+ [system prompt](https://docs.claude.com/en/docs/system-prompts), you can use the
478
+ top-level `system` parameter — there is no `"system"` role for input messages in
479
+ the Messages API.
478
480
 
479
481
  There is a limit of 100,000 messages in a single request.
480
482
 
@@ -484,12 +486,14 @@ class Messages(SyncAPIResource):
484
486
 
485
487
  stream: Whether to incrementally stream the response using server-sent events.
486
488
 
487
- See [streaming](https://docs.anthropic.com/en/api/messages-streaming) for
488
- details.
489
+ See [streaming](https://docs.claude.com/en/api/messages-streaming) for details.
489
490
 
490
491
  container: Container identifier for reuse across requests.
491
492
 
492
- context_management: Configuration for context management operations.
493
+ context_management: Context management configuration.
494
+
495
+ This allows you to control how Claude manages context across multiple requests,
496
+ such as whether to clear function results or not.
493
497
 
494
498
  mcp_servers: MCP servers to be utilized in this request
495
499
 
@@ -499,7 +503,7 @@ class Messages(SyncAPIResource):
499
503
  for this request.
500
504
 
501
505
  Anthropic offers different levels of service for your API requests. See
502
- [service-tiers](https://docs.anthropic.com/en/api/service-tiers) for details.
506
+ [service-tiers](https://docs.claude.com/en/api/service-tiers) for details.
503
507
 
504
508
  stop_sequences: Custom text sequences that will cause the model to stop generating.
505
509
 
@@ -515,7 +519,7 @@ class Messages(SyncAPIResource):
515
519
 
516
520
  A system prompt is a way of providing context and instructions to Claude, such
517
521
  as specifying a particular goal or role. See our
518
- [guide to system prompts](https://docs.anthropic.com/en/docs/system-prompts).
522
+ [guide to system prompts](https://docs.claude.com/en/docs/system-prompts).
519
523
 
520
524
  temperature: Amount of randomness injected into the response.
521
525
 
@@ -533,7 +537,7 @@ class Messages(SyncAPIResource):
533
537
  tokens and counts towards your `max_tokens` limit.
534
538
 
535
539
  See
536
- [extended thinking](https://docs.anthropic.com/en/docs/build-with-claude/extended-thinking)
540
+ [extended thinking](https://docs.claude.com/en/docs/build-with-claude/extended-thinking)
537
541
  for details.
538
542
 
539
543
  tool_choice: How the model should use the provided tools. The model can use a specific tool,
@@ -548,9 +552,9 @@ class Messages(SyncAPIResource):
548
552
 
549
553
  There are two types of tools: **client tools** and **server tools**. The
550
554
  behavior described below applies to client tools. For
551
- [server tools](https://docs.anthropic.com/en/docs/agents-and-tools/tool-use/overview#server-tools),
555
+ [server tools](https://docs.claude.com/en/docs/agents-and-tools/tool-use/overview#server-tools),
552
556
  see their individual documentation as each has its own behavior (e.g., the
553
- [web search tool](https://docs.anthropic.com/en/docs/agents-and-tools/tool-use/web-search-tool)).
557
+ [web search tool](https://docs.claude.com/en/docs/agents-and-tools/tool-use/web-search-tool)).
554
558
 
555
559
  Each tool definition includes:
556
560
 
@@ -613,7 +617,7 @@ class Messages(SyncAPIResource):
613
617
  functions, or more generally whenever you want the model to produce a particular
614
618
  JSON structure of output.
615
619
 
616
- See our [guide](https://docs.anthropic.com/en/docs/tool-use) for more details.
620
+ See our [guide](https://docs.claude.com/en/docs/tool-use) for more details.
617
621
 
618
622
  top_k: Only sample from the top K options for each subsequent token.
619
623
 
@@ -653,7 +657,7 @@ class Messages(SyncAPIResource):
653
657
  messages: Iterable[BetaMessageParam],
654
658
  model: ModelParam,
655
659
  stream: bool,
656
- container: Optional[str] | Omit = omit,
660
+ container: Optional[message_create_params.Container] | Omit = omit,
657
661
  context_management: Optional[BetaContextManagementConfigParam] | Omit = omit,
658
662
  mcp_servers: Iterable[BetaRequestMCPServerURLDefinitionParam] | Omit = omit,
659
663
  metadata: BetaMetadataParam | Omit = omit,
@@ -690,7 +694,7 @@ class Messages(SyncAPIResource):
690
694
  only specifies the absolute maximum number of tokens to generate.
691
695
 
692
696
  Different models have different maximum values for this parameter. See
693
- [models](https://docs.anthropic.com/en/docs/models-overview) for details.
697
+ [models](https://docs.claude.com/en/docs/models-overview) for details.
694
698
 
695
699
  messages: Input messages.
696
700
 
@@ -749,12 +753,12 @@ class Messages(SyncAPIResource):
749
753
  { "role": "user", "content": [{ "type": "text", "text": "Hello, Claude" }] }
750
754
  ```
751
755
 
752
- See [input examples](https://docs.anthropic.com/en/api/messages-examples).
756
+ See [input examples](https://docs.claude.com/en/api/messages-examples).
753
757
 
754
758
  Note that if you want to include a
755
- [system prompt](https://docs.anthropic.com/en/docs/system-prompts), you can use
756
- the top-level `system` parameter — there is no `"system"` role for input
757
- messages in the Messages API.
759
+ [system prompt](https://docs.claude.com/en/docs/system-prompts), you can use the
760
+ top-level `system` parameter — there is no `"system"` role for input messages in
761
+ the Messages API.
758
762
 
759
763
  There is a limit of 100,000 messages in a single request.
760
764
 
@@ -764,12 +768,14 @@ class Messages(SyncAPIResource):
764
768
 
765
769
  stream: Whether to incrementally stream the response using server-sent events.
766
770
 
767
- See [streaming](https://docs.anthropic.com/en/api/messages-streaming) for
768
- details.
771
+ See [streaming](https://docs.claude.com/en/api/messages-streaming) for details.
769
772
 
770
773
  container: Container identifier for reuse across requests.
771
774
 
772
- context_management: Configuration for context management operations.
775
+ context_management: Context management configuration.
776
+
777
+ This allows you to control how Claude manages context across multiple requests,
778
+ such as whether to clear function results or not.
773
779
 
774
780
  mcp_servers: MCP servers to be utilized in this request
775
781
 
@@ -779,7 +785,7 @@ class Messages(SyncAPIResource):
779
785
  for this request.
780
786
 
781
787
  Anthropic offers different levels of service for your API requests. See
782
- [service-tiers](https://docs.anthropic.com/en/api/service-tiers) for details.
788
+ [service-tiers](https://docs.claude.com/en/api/service-tiers) for details.
783
789
 
784
790
  stop_sequences: Custom text sequences that will cause the model to stop generating.
785
791
 
@@ -795,7 +801,7 @@ class Messages(SyncAPIResource):
795
801
 
796
802
  A system prompt is a way of providing context and instructions to Claude, such
797
803
  as specifying a particular goal or role. See our
798
- [guide to system prompts](https://docs.anthropic.com/en/docs/system-prompts).
804
+ [guide to system prompts](https://docs.claude.com/en/docs/system-prompts).
799
805
 
800
806
  temperature: Amount of randomness injected into the response.
801
807
 
@@ -813,7 +819,7 @@ class Messages(SyncAPIResource):
813
819
  tokens and counts towards your `max_tokens` limit.
814
820
 
815
821
  See
816
- [extended thinking](https://docs.anthropic.com/en/docs/build-with-claude/extended-thinking)
822
+ [extended thinking](https://docs.claude.com/en/docs/build-with-claude/extended-thinking)
817
823
  for details.
818
824
 
819
825
  tool_choice: How the model should use the provided tools. The model can use a specific tool,
@@ -828,9 +834,9 @@ class Messages(SyncAPIResource):
828
834
 
829
835
  There are two types of tools: **client tools** and **server tools**. The
830
836
  behavior described below applies to client tools. For
831
- [server tools](https://docs.anthropic.com/en/docs/agents-and-tools/tool-use/overview#server-tools),
837
+ [server tools](https://docs.claude.com/en/docs/agents-and-tools/tool-use/overview#server-tools),
832
838
  see their individual documentation as each has its own behavior (e.g., the
833
- [web search tool](https://docs.anthropic.com/en/docs/agents-and-tools/tool-use/web-search-tool)).
839
+ [web search tool](https://docs.claude.com/en/docs/agents-and-tools/tool-use/web-search-tool)).
834
840
 
835
841
  Each tool definition includes:
836
842
 
@@ -893,7 +899,7 @@ class Messages(SyncAPIResource):
893
899
  functions, or more generally whenever you want the model to produce a particular
894
900
  JSON structure of output.
895
901
 
896
- See our [guide](https://docs.anthropic.com/en/docs/tool-use) for more details.
902
+ See our [guide](https://docs.claude.com/en/docs/tool-use) for more details.
897
903
 
898
904
  top_k: Only sample from the top K options for each subsequent token.
899
905
 
@@ -932,7 +938,7 @@ class Messages(SyncAPIResource):
932
938
  max_tokens: int,
933
939
  messages: Iterable[BetaMessageParam],
934
940
  model: ModelParam,
935
- container: Optional[str] | Omit = omit,
941
+ container: Optional[message_create_params.Container] | Omit = omit,
936
942
  context_management: Optional[BetaContextManagementConfigParam] | Omit = omit,
937
943
  mcp_servers: Iterable[BetaRequestMCPServerURLDefinitionParam] | Omit = omit,
938
944
  metadata: BetaMetadataParam | Omit = omit,
@@ -1013,7 +1019,7 @@ class Messages(SyncAPIResource):
1013
1019
  model: ModelParam,
1014
1020
  tools: Iterable[BetaRunnableTool],
1015
1021
  max_iterations: int | Omit = omit,
1016
- container: Optional[str] | Omit = omit,
1022
+ container: Optional[message_create_params.Container] | Omit = omit,
1017
1023
  context_management: Optional[BetaContextManagementConfigParam] | Omit = omit,
1018
1024
  mcp_servers: Iterable[BetaRequestMCPServerURLDefinitionParam] | Omit = omit,
1019
1025
  metadata: BetaMetadataParam | Omit = omit,
@@ -1045,7 +1051,7 @@ class Messages(SyncAPIResource):
1045
1051
  tools: Iterable[BetaRunnableTool],
1046
1052
  stream: Literal[True],
1047
1053
  max_iterations: int | Omit = omit,
1048
- container: Optional[str] | Omit = omit,
1054
+ container: Optional[message_create_params.Container] | Omit = omit,
1049
1055
  context_management: Optional[BetaContextManagementConfigParam] | Omit = omit,
1050
1056
  mcp_servers: Iterable[BetaRequestMCPServerURLDefinitionParam] | Omit = omit,
1051
1057
  metadata: BetaMetadataParam | Omit = omit,
@@ -1076,7 +1082,7 @@ class Messages(SyncAPIResource):
1076
1082
  tools: Iterable[BetaRunnableTool],
1077
1083
  stream: bool,
1078
1084
  max_iterations: int | Omit = omit,
1079
- container: Optional[str] | Omit = omit,
1085
+ container: Optional[message_create_params.Container] | Omit = omit,
1080
1086
  context_management: Optional[BetaContextManagementConfigParam] | Omit = omit,
1081
1087
  mcp_servers: Iterable[BetaRequestMCPServerURLDefinitionParam] | Omit = omit,
1082
1088
  metadata: BetaMetadataParam | Omit = omit,
@@ -1105,7 +1111,7 @@ class Messages(SyncAPIResource):
1105
1111
  model: ModelParam,
1106
1112
  tools: Iterable[BetaRunnableTool],
1107
1113
  max_iterations: int | Omit = omit,
1108
- container: Optional[str] | Omit = omit,
1114
+ container: Optional[message_create_params.Container] | Omit = omit,
1109
1115
  context_management: Optional[BetaContextManagementConfigParam] | Omit = omit,
1110
1116
  mcp_servers: Iterable[BetaRequestMCPServerURLDefinitionParam] | Omit = omit,
1111
1117
  metadata: BetaMetadataParam | Omit = omit,
@@ -1194,7 +1200,7 @@ class Messages(SyncAPIResource):
1194
1200
  max_tokens: int,
1195
1201
  messages: Iterable[BetaMessageParam],
1196
1202
  model: ModelParam,
1197
- container: Optional[str] | Omit = omit,
1203
+ container: Optional[message_create_params.Container] | Omit = omit,
1198
1204
  context_management: Optional[BetaContextManagementConfigParam] | Omit = omit,
1199
1205
  mcp_servers: Iterable[BetaRequestMCPServerURLDefinitionParam] | Omit = omit,
1200
1206
  metadata: BetaMetadataParam | Omit = omit,
@@ -1348,12 +1354,12 @@ class Messages(SyncAPIResource):
1348
1354
  { "role": "user", "content": [{ "type": "text", "text": "Hello, Claude" }] }
1349
1355
  ```
1350
1356
 
1351
- See [input examples](https://docs.anthropic.com/en/api/messages-examples).
1357
+ See [input examples](https://docs.claude.com/en/api/messages-examples).
1352
1358
 
1353
1359
  Note that if you want to include a
1354
- [system prompt](https://docs.anthropic.com/en/docs/system-prompts), you can use
1355
- the top-level `system` parameter — there is no `"system"` role for input
1356
- messages in the Messages API.
1360
+ [system prompt](https://docs.claude.com/en/docs/system-prompts), you can use the
1361
+ top-level `system` parameter — there is no `"system"` role for input messages in
1362
+ the Messages API.
1357
1363
 
1358
1364
  There is a limit of 100,000 messages in a single request.
1359
1365
 
@@ -1361,7 +1367,10 @@ class Messages(SyncAPIResource):
1361
1367
  [models](https://docs.anthropic.com/en/docs/models-overview) for additional
1362
1368
  details and options.
1363
1369
 
1364
- context_management: Configuration for context management operations.
1370
+ context_management: Context management configuration.
1371
+
1372
+ This allows you to control how Claude manages context across multiple requests,
1373
+ such as whether to clear function results or not.
1365
1374
 
1366
1375
  mcp_servers: MCP servers to be utilized in this request
1367
1376
 
@@ -1369,7 +1378,7 @@ class Messages(SyncAPIResource):
1369
1378
 
1370
1379
  A system prompt is a way of providing context and instructions to Claude, such
1371
1380
  as specifying a particular goal or role. See our
1372
- [guide to system prompts](https://docs.anthropic.com/en/docs/system-prompts).
1381
+ [guide to system prompts](https://docs.claude.com/en/docs/system-prompts).
1373
1382
 
1374
1383
  thinking: Configuration for enabling Claude's extended thinking.
1375
1384
 
@@ -1378,7 +1387,7 @@ class Messages(SyncAPIResource):
1378
1387
  tokens and counts towards your `max_tokens` limit.
1379
1388
 
1380
1389
  See
1381
- [extended thinking](https://docs.anthropic.com/en/docs/build-with-claude/extended-thinking)
1390
+ [extended thinking](https://docs.claude.com/en/docs/build-with-claude/extended-thinking)
1382
1391
  for details.
1383
1392
 
1384
1393
  tool_choice: How the model should use the provided tools. The model can use a specific tool,
@@ -1393,9 +1402,9 @@ class Messages(SyncAPIResource):
1393
1402
 
1394
1403
  There are two types of tools: **client tools** and **server tools**. The
1395
1404
  behavior described below applies to client tools. For
1396
- [server tools](https://docs.anthropic.com/en/docs/agents-and-tools/tool-use/overview#server-tools),
1405
+ [server tools](https://docs.claude.com/en/docs/agents-and-tools/tool-use/overview#server-tools),
1397
1406
  see their individual documentation as each has its own behavior (e.g., the
1398
- [web search tool](https://docs.anthropic.com/en/docs/agents-and-tools/tool-use/web-search-tool)).
1407
+ [web search tool](https://docs.claude.com/en/docs/agents-and-tools/tool-use/web-search-tool)).
1399
1408
 
1400
1409
  Each tool definition includes:
1401
1410
 
@@ -1458,7 +1467,7 @@ class Messages(SyncAPIResource):
1458
1467
  functions, or more generally whenever you want the model to produce a particular
1459
1468
  JSON structure of output.
1460
1469
 
1461
- See our [guide](https://docs.anthropic.com/en/docs/tool-use) for more details.
1470
+ See our [guide](https://docs.claude.com/en/docs/tool-use) for more details.
1462
1471
 
1463
1472
  betas: Optional header to specify the beta version(s) you want to use.
1464
1473
 
@@ -1534,7 +1543,7 @@ class AsyncMessages(AsyncAPIResource):
1534
1543
  max_tokens: int,
1535
1544
  messages: Iterable[BetaMessageParam],
1536
1545
  model: ModelParam,
1537
- container: Optional[str] | Omit = omit,
1546
+ container: Optional[message_create_params.Container] | Omit = omit,
1538
1547
  context_management: Optional[BetaContextManagementConfigParam] | Omit = omit,
1539
1548
  mcp_servers: Iterable[BetaRequestMCPServerURLDefinitionParam] | Omit = omit,
1540
1549
  metadata: BetaMetadataParam | Omit = omit,
@@ -1572,7 +1581,7 @@ class AsyncMessages(AsyncAPIResource):
1572
1581
  only specifies the absolute maximum number of tokens to generate.
1573
1582
 
1574
1583
  Different models have different maximum values for this parameter. See
1575
- [models](https://docs.anthropic.com/en/docs/models-overview) for details.
1584
+ [models](https://docs.claude.com/en/docs/models-overview) for details.
1576
1585
 
1577
1586
  messages: Input messages.
1578
1587
 
@@ -1631,12 +1640,12 @@ class AsyncMessages(AsyncAPIResource):
1631
1640
  { "role": "user", "content": [{ "type": "text", "text": "Hello, Claude" }] }
1632
1641
  ```
1633
1642
 
1634
- See [input examples](https://docs.anthropic.com/en/api/messages-examples).
1643
+ See [input examples](https://docs.claude.com/en/api/messages-examples).
1635
1644
 
1636
1645
  Note that if you want to include a
1637
- [system prompt](https://docs.anthropic.com/en/docs/system-prompts), you can use
1638
- the top-level `system` parameter — there is no `"system"` role for input
1639
- messages in the Messages API.
1646
+ [system prompt](https://docs.claude.com/en/docs/system-prompts), you can use the
1647
+ top-level `system` parameter — there is no `"system"` role for input messages in
1648
+ the Messages API.
1640
1649
 
1641
1650
  There is a limit of 100,000 messages in a single request.
1642
1651
 
@@ -1646,7 +1655,10 @@ class AsyncMessages(AsyncAPIResource):
1646
1655
 
1647
1656
  container: Container identifier for reuse across requests.
1648
1657
 
1649
- context_management: Configuration for context management operations.
1658
+ context_management: Context management configuration.
1659
+
1660
+ This allows you to control how Claude manages context across multiple requests,
1661
+ such as whether to clear function results or not.
1650
1662
 
1651
1663
  mcp_servers: MCP servers to be utilized in this request
1652
1664
 
@@ -1656,7 +1668,7 @@ class AsyncMessages(AsyncAPIResource):
1656
1668
  for this request.
1657
1669
 
1658
1670
  Anthropic offers different levels of service for your API requests. See
1659
- [service-tiers](https://docs.anthropic.com/en/api/service-tiers) for details.
1671
+ [service-tiers](https://docs.claude.com/en/api/service-tiers) for details.
1660
1672
 
1661
1673
  stop_sequences: Custom text sequences that will cause the model to stop generating.
1662
1674
 
@@ -1670,14 +1682,13 @@ class AsyncMessages(AsyncAPIResource):
1670
1682
 
1671
1683
  stream: Whether to incrementally stream the response using server-sent events.
1672
1684
 
1673
- See [streaming](https://docs.anthropic.com/en/api/messages-streaming) for
1674
- details.
1685
+ See [streaming](https://docs.claude.com/en/api/messages-streaming) for details.
1675
1686
 
1676
1687
  system: System prompt.
1677
1688
 
1678
1689
  A system prompt is a way of providing context and instructions to Claude, such
1679
1690
  as specifying a particular goal or role. See our
1680
- [guide to system prompts](https://docs.anthropic.com/en/docs/system-prompts).
1691
+ [guide to system prompts](https://docs.claude.com/en/docs/system-prompts).
1681
1692
 
1682
1693
  temperature: Amount of randomness injected into the response.
1683
1694
 
@@ -1695,7 +1706,7 @@ class AsyncMessages(AsyncAPIResource):
1695
1706
  tokens and counts towards your `max_tokens` limit.
1696
1707
 
1697
1708
  See
1698
- [extended thinking](https://docs.anthropic.com/en/docs/build-with-claude/extended-thinking)
1709
+ [extended thinking](https://docs.claude.com/en/docs/build-with-claude/extended-thinking)
1699
1710
  for details.
1700
1711
 
1701
1712
  tool_choice: How the model should use the provided tools. The model can use a specific tool,
@@ -1710,9 +1721,9 @@ class AsyncMessages(AsyncAPIResource):
1710
1721
 
1711
1722
  There are two types of tools: **client tools** and **server tools**. The
1712
1723
  behavior described below applies to client tools. For
1713
- [server tools](https://docs.anthropic.com/en/docs/agents-and-tools/tool-use/overview#server-tools),
1724
+ [server tools](https://docs.claude.com/en/docs/agents-and-tools/tool-use/overview#server-tools),
1714
1725
  see their individual documentation as each has its own behavior (e.g., the
1715
- [web search tool](https://docs.anthropic.com/en/docs/agents-and-tools/tool-use/web-search-tool)).
1726
+ [web search tool](https://docs.claude.com/en/docs/agents-and-tools/tool-use/web-search-tool)).
1716
1727
 
1717
1728
  Each tool definition includes:
1718
1729
 
@@ -1775,7 +1786,7 @@ class AsyncMessages(AsyncAPIResource):
1775
1786
  functions, or more generally whenever you want the model to produce a particular
1776
1787
  JSON structure of output.
1777
1788
 
1778
- See our [guide](https://docs.anthropic.com/en/docs/tool-use) for more details.
1789
+ See our [guide](https://docs.claude.com/en/docs/tool-use) for more details.
1779
1790
 
1780
1791
  top_k: Only sample from the top K options for each subsequent token.
1781
1792
 
@@ -1815,7 +1826,7 @@ class AsyncMessages(AsyncAPIResource):
1815
1826
  messages: Iterable[BetaMessageParam],
1816
1827
  model: ModelParam,
1817
1828
  stream: Literal[True],
1818
- container: Optional[str] | Omit = omit,
1829
+ container: Optional[message_create_params.Container] | Omit = omit,
1819
1830
  context_management: Optional[BetaContextManagementConfigParam] | Omit = omit,
1820
1831
  mcp_servers: Iterable[BetaRequestMCPServerURLDefinitionParam] | Omit = omit,
1821
1832
  metadata: BetaMetadataParam | Omit = omit,
@@ -1852,7 +1863,7 @@ class AsyncMessages(AsyncAPIResource):
1852
1863
  only specifies the absolute maximum number of tokens to generate.
1853
1864
 
1854
1865
  Different models have different maximum values for this parameter. See
1855
- [models](https://docs.anthropic.com/en/docs/models-overview) for details.
1866
+ [models](https://docs.claude.com/en/docs/models-overview) for details.
1856
1867
 
1857
1868
  messages: Input messages.
1858
1869
 
@@ -1911,12 +1922,12 @@ class AsyncMessages(AsyncAPIResource):
1911
1922
  { "role": "user", "content": [{ "type": "text", "text": "Hello, Claude" }] }
1912
1923
  ```
1913
1924
 
1914
- See [input examples](https://docs.anthropic.com/en/api/messages-examples).
1925
+ See [input examples](https://docs.claude.com/en/api/messages-examples).
1915
1926
 
1916
1927
  Note that if you want to include a
1917
- [system prompt](https://docs.anthropic.com/en/docs/system-prompts), you can use
1918
- the top-level `system` parameter — there is no `"system"` role for input
1919
- messages in the Messages API.
1928
+ [system prompt](https://docs.claude.com/en/docs/system-prompts), you can use the
1929
+ top-level `system` parameter — there is no `"system"` role for input messages in
1930
+ the Messages API.
1920
1931
 
1921
1932
  There is a limit of 100,000 messages in a single request.
1922
1933
 
@@ -1926,12 +1937,14 @@ class AsyncMessages(AsyncAPIResource):
1926
1937
 
1927
1938
  stream: Whether to incrementally stream the response using server-sent events.
1928
1939
 
1929
- See [streaming](https://docs.anthropic.com/en/api/messages-streaming) for
1930
- details.
1940
+ See [streaming](https://docs.claude.com/en/api/messages-streaming) for details.
1931
1941
 
1932
1942
  container: Container identifier for reuse across requests.
1933
1943
 
1934
- context_management: Configuration for context management operations.
1944
+ context_management: Context management configuration.
1945
+
1946
+ This allows you to control how Claude manages context across multiple requests,
1947
+ such as whether to clear function results or not.
1935
1948
 
1936
1949
  mcp_servers: MCP servers to be utilized in this request
1937
1950
 
@@ -1941,7 +1954,7 @@ class AsyncMessages(AsyncAPIResource):
1941
1954
  for this request.
1942
1955
 
1943
1956
  Anthropic offers different levels of service for your API requests. See
1944
- [service-tiers](https://docs.anthropic.com/en/api/service-tiers) for details.
1957
+ [service-tiers](https://docs.claude.com/en/api/service-tiers) for details.
1945
1958
 
1946
1959
  stop_sequences: Custom text sequences that will cause the model to stop generating.
1947
1960
 
@@ -1957,7 +1970,7 @@ class AsyncMessages(AsyncAPIResource):
1957
1970
 
1958
1971
  A system prompt is a way of providing context and instructions to Claude, such
1959
1972
  as specifying a particular goal or role. See our
1960
- [guide to system prompts](https://docs.anthropic.com/en/docs/system-prompts).
1973
+ [guide to system prompts](https://docs.claude.com/en/docs/system-prompts).
1961
1974
 
1962
1975
  temperature: Amount of randomness injected into the response.
1963
1976
 
@@ -1975,7 +1988,7 @@ class AsyncMessages(AsyncAPIResource):
1975
1988
  tokens and counts towards your `max_tokens` limit.
1976
1989
 
1977
1990
  See
1978
- [extended thinking](https://docs.anthropic.com/en/docs/build-with-claude/extended-thinking)
1991
+ [extended thinking](https://docs.claude.com/en/docs/build-with-claude/extended-thinking)
1979
1992
  for details.
1980
1993
 
1981
1994
  tool_choice: How the model should use the provided tools. The model can use a specific tool,
@@ -1990,9 +2003,9 @@ class AsyncMessages(AsyncAPIResource):
1990
2003
 
1991
2004
  There are two types of tools: **client tools** and **server tools**. The
1992
2005
  behavior described below applies to client tools. For
1993
- [server tools](https://docs.anthropic.com/en/docs/agents-and-tools/tool-use/overview#server-tools),
2006
+ [server tools](https://docs.claude.com/en/docs/agents-and-tools/tool-use/overview#server-tools),
1994
2007
  see their individual documentation as each has its own behavior (e.g., the
1995
- [web search tool](https://docs.anthropic.com/en/docs/agents-and-tools/tool-use/web-search-tool)).
2008
+ [web search tool](https://docs.claude.com/en/docs/agents-and-tools/tool-use/web-search-tool)).
1996
2009
 
1997
2010
  Each tool definition includes:
1998
2011
 
@@ -2055,7 +2068,7 @@ class AsyncMessages(AsyncAPIResource):
2055
2068
  functions, or more generally whenever you want the model to produce a particular
2056
2069
  JSON structure of output.
2057
2070
 
2058
- See our [guide](https://docs.anthropic.com/en/docs/tool-use) for more details.
2071
+ See our [guide](https://docs.claude.com/en/docs/tool-use) for more details.
2059
2072
 
2060
2073
  top_k: Only sample from the top K options for each subsequent token.
2061
2074
 
@@ -2095,7 +2108,7 @@ class AsyncMessages(AsyncAPIResource):
2095
2108
  messages: Iterable[BetaMessageParam],
2096
2109
  model: ModelParam,
2097
2110
  stream: bool,
2098
- container: Optional[str] | Omit = omit,
2111
+ container: Optional[message_create_params.Container] | Omit = omit,
2099
2112
  context_management: Optional[BetaContextManagementConfigParam] | Omit = omit,
2100
2113
  mcp_servers: Iterable[BetaRequestMCPServerURLDefinitionParam] | Omit = omit,
2101
2114
  metadata: BetaMetadataParam | Omit = omit,
@@ -2132,7 +2145,7 @@ class AsyncMessages(AsyncAPIResource):
2132
2145
  only specifies the absolute maximum number of tokens to generate.
2133
2146
 
2134
2147
  Different models have different maximum values for this parameter. See
2135
- [models](https://docs.anthropic.com/en/docs/models-overview) for details.
2148
+ [models](https://docs.claude.com/en/docs/models-overview) for details.
2136
2149
 
2137
2150
  messages: Input messages.
2138
2151
 
@@ -2191,12 +2204,12 @@ class AsyncMessages(AsyncAPIResource):
2191
2204
  { "role": "user", "content": [{ "type": "text", "text": "Hello, Claude" }] }
2192
2205
  ```
2193
2206
 
2194
- See [input examples](https://docs.anthropic.com/en/api/messages-examples).
2207
+ See [input examples](https://docs.claude.com/en/api/messages-examples).
2195
2208
 
2196
2209
  Note that if you want to include a
2197
- [system prompt](https://docs.anthropic.com/en/docs/system-prompts), you can use
2198
- the top-level `system` parameter — there is no `"system"` role for input
2199
- messages in the Messages API.
2210
+ [system prompt](https://docs.claude.com/en/docs/system-prompts), you can use the
2211
+ top-level `system` parameter — there is no `"system"` role for input messages in
2212
+ the Messages API.
2200
2213
 
2201
2214
  There is a limit of 100,000 messages in a single request.
2202
2215
 
@@ -2206,12 +2219,14 @@ class AsyncMessages(AsyncAPIResource):
2206
2219
 
2207
2220
  stream: Whether to incrementally stream the response using server-sent events.
2208
2221
 
2209
- See [streaming](https://docs.anthropic.com/en/api/messages-streaming) for
2210
- details.
2222
+ See [streaming](https://docs.claude.com/en/api/messages-streaming) for details.
2211
2223
 
2212
2224
  container: Container identifier for reuse across requests.
2213
2225
 
2214
- context_management: Configuration for context management operations.
2226
+ context_management: Context management configuration.
2227
+
2228
+ This allows you to control how Claude manages context across multiple requests,
2229
+ such as whether to clear function results or not.
2215
2230
 
2216
2231
  mcp_servers: MCP servers to be utilized in this request
2217
2232
 
@@ -2221,7 +2236,7 @@ class AsyncMessages(AsyncAPIResource):
2221
2236
  for this request.
2222
2237
 
2223
2238
  Anthropic offers different levels of service for your API requests. See
2224
- [service-tiers](https://docs.anthropic.com/en/api/service-tiers) for details.
2239
+ [service-tiers](https://docs.claude.com/en/api/service-tiers) for details.
2225
2240
 
2226
2241
  stop_sequences: Custom text sequences that will cause the model to stop generating.
2227
2242
 
@@ -2237,7 +2252,7 @@ class AsyncMessages(AsyncAPIResource):
2237
2252
 
2238
2253
  A system prompt is a way of providing context and instructions to Claude, such
2239
2254
  as specifying a particular goal or role. See our
2240
- [guide to system prompts](https://docs.anthropic.com/en/docs/system-prompts).
2255
+ [guide to system prompts](https://docs.claude.com/en/docs/system-prompts).
2241
2256
 
2242
2257
  temperature: Amount of randomness injected into the response.
2243
2258
 
@@ -2255,7 +2270,7 @@ class AsyncMessages(AsyncAPIResource):
2255
2270
  tokens and counts towards your `max_tokens` limit.
2256
2271
 
2257
2272
  See
2258
- [extended thinking](https://docs.anthropic.com/en/docs/build-with-claude/extended-thinking)
2273
+ [extended thinking](https://docs.claude.com/en/docs/build-with-claude/extended-thinking)
2259
2274
  for details.
2260
2275
 
2261
2276
  tool_choice: How the model should use the provided tools. The model can use a specific tool,
@@ -2270,9 +2285,9 @@ class AsyncMessages(AsyncAPIResource):
2270
2285
 
2271
2286
  There are two types of tools: **client tools** and **server tools**. The
2272
2287
  behavior described below applies to client tools. For
2273
- [server tools](https://docs.anthropic.com/en/docs/agents-and-tools/tool-use/overview#server-tools),
2288
+ [server tools](https://docs.claude.com/en/docs/agents-and-tools/tool-use/overview#server-tools),
2274
2289
  see their individual documentation as each has its own behavior (e.g., the
2275
- [web search tool](https://docs.anthropic.com/en/docs/agents-and-tools/tool-use/web-search-tool)).
2290
+ [web search tool](https://docs.claude.com/en/docs/agents-and-tools/tool-use/web-search-tool)).
2276
2291
 
2277
2292
  Each tool definition includes:
2278
2293
 
@@ -2335,7 +2350,7 @@ class AsyncMessages(AsyncAPIResource):
2335
2350
  functions, or more generally whenever you want the model to produce a particular
2336
2351
  JSON structure of output.
2337
2352
 
2338
- See our [guide](https://docs.anthropic.com/en/docs/tool-use) for more details.
2353
+ See our [guide](https://docs.claude.com/en/docs/tool-use) for more details.
2339
2354
 
2340
2355
  top_k: Only sample from the top K options for each subsequent token.
2341
2356
 
@@ -2374,7 +2389,7 @@ class AsyncMessages(AsyncAPIResource):
2374
2389
  max_tokens: int,
2375
2390
  messages: Iterable[BetaMessageParam],
2376
2391
  model: ModelParam,
2377
- container: Optional[str] | Omit = omit,
2392
+ container: Optional[message_create_params.Container] | Omit = omit,
2378
2393
  context_management: Optional[BetaContextManagementConfigParam] | Omit = omit,
2379
2394
  mcp_servers: Iterable[BetaRequestMCPServerURLDefinitionParam] | Omit = omit,
2380
2395
  metadata: BetaMetadataParam | Omit = omit,
@@ -2455,7 +2470,7 @@ class AsyncMessages(AsyncAPIResource):
2455
2470
  model: ModelParam,
2456
2471
  tools: Iterable[BetaAsyncRunnableTool],
2457
2472
  max_iterations: int | Omit = omit,
2458
- container: Optional[str] | Omit = omit,
2473
+ container: Optional[message_create_params.Container] | Omit = omit,
2459
2474
  context_management: Optional[BetaContextManagementConfigParam] | Omit = omit,
2460
2475
  mcp_servers: Iterable[BetaRequestMCPServerURLDefinitionParam] | Omit = omit,
2461
2476
  metadata: BetaMetadataParam | Omit = omit,
@@ -2487,7 +2502,7 @@ class AsyncMessages(AsyncAPIResource):
2487
2502
  tools: Iterable[BetaAsyncRunnableTool],
2488
2503
  stream: Literal[True],
2489
2504
  max_iterations: int | Omit = omit,
2490
- container: Optional[str] | Omit = omit,
2505
+ container: Optional[message_create_params.Container] | Omit = omit,
2491
2506
  context_management: Optional[BetaContextManagementConfigParam] | Omit = omit,
2492
2507
  mcp_servers: Iterable[BetaRequestMCPServerURLDefinitionParam] | Omit = omit,
2493
2508
  metadata: BetaMetadataParam | Omit = omit,
@@ -2518,7 +2533,7 @@ class AsyncMessages(AsyncAPIResource):
2518
2533
  tools: Iterable[BetaAsyncRunnableTool],
2519
2534
  stream: bool,
2520
2535
  max_iterations: int | Omit = omit,
2521
- container: Optional[str] | Omit = omit,
2536
+ container: Optional[message_create_params.Container] | Omit = omit,
2522
2537
  context_management: Optional[BetaContextManagementConfigParam] | Omit = omit,
2523
2538
  mcp_servers: Iterable[BetaRequestMCPServerURLDefinitionParam] | Omit = omit,
2524
2539
  metadata: BetaMetadataParam | Omit = omit,
@@ -2547,7 +2562,7 @@ class AsyncMessages(AsyncAPIResource):
2547
2562
  model: ModelParam,
2548
2563
  tools: Iterable[BetaAsyncRunnableTool],
2549
2564
  max_iterations: int | Omit = omit,
2550
- container: Optional[str] | Omit = omit,
2565
+ container: Optional[message_create_params.Container] | Omit = omit,
2551
2566
  context_management: Optional[BetaContextManagementConfigParam] | Omit = omit,
2552
2567
  mcp_servers: Iterable[BetaRequestMCPServerURLDefinitionParam] | Omit = omit,
2553
2568
  metadata: BetaMetadataParam | Omit = omit,
@@ -2637,7 +2652,7 @@ class AsyncMessages(AsyncAPIResource):
2637
2652
  messages: Iterable[BetaMessageParam],
2638
2653
  model: ModelParam,
2639
2654
  metadata: BetaMetadataParam | Omit = omit,
2640
- container: Optional[str] | Omit = omit,
2655
+ container: Optional[message_create_params.Container] | Omit = omit,
2641
2656
  context_management: Optional[BetaContextManagementConfigParam] | Omit = omit,
2642
2657
  mcp_servers: Iterable[BetaRequestMCPServerURLDefinitionParam] | Omit = omit,
2643
2658
  service_tier: Literal["auto", "standard_only"] | Omit = omit,
@@ -2788,12 +2803,12 @@ class AsyncMessages(AsyncAPIResource):
2788
2803
  { "role": "user", "content": [{ "type": "text", "text": "Hello, Claude" }] }
2789
2804
  ```
2790
2805
 
2791
- See [input examples](https://docs.anthropic.com/en/api/messages-examples).
2806
+ See [input examples](https://docs.claude.com/en/api/messages-examples).
2792
2807
 
2793
2808
  Note that if you want to include a
2794
- [system prompt](https://docs.anthropic.com/en/docs/system-prompts), you can use
2795
- the top-level `system` parameter — there is no `"system"` role for input
2796
- messages in the Messages API.
2809
+ [system prompt](https://docs.claude.com/en/docs/system-prompts), you can use the
2810
+ top-level `system` parameter — there is no `"system"` role for input messages in
2811
+ the Messages API.
2797
2812
 
2798
2813
  There is a limit of 100,000 messages in a single request.
2799
2814
 
@@ -2801,7 +2816,10 @@ class AsyncMessages(AsyncAPIResource):
2801
2816
  [models](https://docs.anthropic.com/en/docs/models-overview) for additional
2802
2817
  details and options.
2803
2818
 
2804
- context_management: Configuration for context management operations.
2819
+ context_management: Context management configuration.
2820
+
2821
+ This allows you to control how Claude manages context across multiple requests,
2822
+ such as whether to clear function results or not.
2805
2823
 
2806
2824
  mcp_servers: MCP servers to be utilized in this request
2807
2825
 
@@ -2809,7 +2827,7 @@ class AsyncMessages(AsyncAPIResource):
2809
2827
 
2810
2828
  A system prompt is a way of providing context and instructions to Claude, such
2811
2829
  as specifying a particular goal or role. See our
2812
- [guide to system prompts](https://docs.anthropic.com/en/docs/system-prompts).
2830
+ [guide to system prompts](https://docs.claude.com/en/docs/system-prompts).
2813
2831
 
2814
2832
  thinking: Configuration for enabling Claude's extended thinking.
2815
2833
 
@@ -2818,7 +2836,7 @@ class AsyncMessages(AsyncAPIResource):
2818
2836
  tokens and counts towards your `max_tokens` limit.
2819
2837
 
2820
2838
  See
2821
- [extended thinking](https://docs.anthropic.com/en/docs/build-with-claude/extended-thinking)
2839
+ [extended thinking](https://docs.claude.com/en/docs/build-with-claude/extended-thinking)
2822
2840
  for details.
2823
2841
 
2824
2842
  tool_choice: How the model should use the provided tools. The model can use a specific tool,
@@ -2833,9 +2851,9 @@ class AsyncMessages(AsyncAPIResource):
2833
2851
 
2834
2852
  There are two types of tools: **client tools** and **server tools**. The
2835
2853
  behavior described below applies to client tools. For
2836
- [server tools](https://docs.anthropic.com/en/docs/agents-and-tools/tool-use/overview#server-tools),
2854
+ [server tools](https://docs.claude.com/en/docs/agents-and-tools/tool-use/overview#server-tools),
2837
2855
  see their individual documentation as each has its own behavior (e.g., the
2838
- [web search tool](https://docs.anthropic.com/en/docs/agents-and-tools/tool-use/web-search-tool)).
2856
+ [web search tool](https://docs.claude.com/en/docs/agents-and-tools/tool-use/web-search-tool)).
2839
2857
 
2840
2858
  Each tool definition includes:
2841
2859
 
@@ -2898,7 +2916,7 @@ class AsyncMessages(AsyncAPIResource):
2898
2916
  functions, or more generally whenever you want the model to produce a particular
2899
2917
  JSON structure of output.
2900
2918
 
2901
- See our [guide](https://docs.anthropic.com/en/docs/tool-use) for more details.
2919
+ See our [guide](https://docs.claude.com/en/docs/tool-use) for more details.
2902
2920
 
2903
2921
  betas: Optional header to specify the beta version(s) you want to use.
2904
2922