aws-sdk 2.1630.0 → 2.1632.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -834,6 +834,12 @@ declare namespace BedrockAgent {
834
834
  }
835
835
  export type BasePromptTemplate = string;
836
836
  export type BedrockEmbeddingModelArn = string;
837
+ export interface BedrockEmbeddingModelConfiguration {
838
+ /**
839
+ * The dimensions details for the vector configuration used on the Bedrock embeddings model.
840
+ */
841
+ dimensions?: Dimensions;
842
+ }
837
843
  export type Boolean = boolean;
838
844
  export type BucketOwnerAccountId = string;
839
845
  export interface ChunkingConfiguration {
@@ -1283,6 +1289,7 @@ declare namespace BedrockAgent {
1283
1289
  status: KnowledgeBaseStatus;
1284
1290
  }
1285
1291
  export type Description = string;
1292
+ export type Dimensions = number;
1286
1293
  export interface DisassociateAgentKnowledgeBaseRequest {
1287
1294
  /**
1288
1295
  * The unique identifier of the agent from which to disassociate the knowledge base.
@@ -1300,6 +1307,12 @@ declare namespace BedrockAgent {
1300
1307
  export interface DisassociateAgentKnowledgeBaseResponse {
1301
1308
  }
1302
1309
  export type DraftVersion = string;
1310
+ export interface EmbeddingModelConfiguration {
1311
+ /**
1312
+ * The vector configuration details on the Bedrock embeddings model.
1313
+ */
1314
+ bedrockEmbeddingModelConfiguration?: BedrockEmbeddingModelConfiguration;
1315
+ }
1303
1316
  export type FailureReason = string;
1304
1317
  export type FailureReasons = FailureReason[];
1305
1318
  export type FieldName = string;
@@ -2576,6 +2589,10 @@ declare namespace BedrockAgent {
2576
2589
  * The Amazon Resource Name (ARN) of the model used to create vector embeddings for the knowledge base.
2577
2590
  */
2578
2591
  embeddingModelArn: BedrockEmbeddingModelArn;
2592
+ /**
2593
+ * The embeddings model configuration details for the vector model used in Knowledge Base.
2594
+ */
2595
+ embeddingModelConfiguration?: EmbeddingModelConfiguration;
2579
2596
  }
2580
2597
  export type Version = string;
2581
2598
  /**
@@ -12,6 +12,22 @@ declare class BedrockRuntime extends Service {
12
12
  */
13
13
  constructor(options?: BedrockRuntime.Types.ClientConfiguration)
14
14
  config: Config & BedrockRuntime.Types.ClientConfiguration;
15
+ /**
16
+ * Sends messages to the specified Amazon Bedrock model. Converse provides a consistent interface that works with all models that support messages. This allows you to write code once and use it with different models. Should a model have unique inference parameters, you can also pass those unique parameters to the model. For more information, see Run inference in the Bedrock User Guide. This operation requires permission for the bedrock:InvokeModel action.
17
+ */
18
+ converse(params: BedrockRuntime.Types.ConverseRequest, callback?: (err: AWSError, data: BedrockRuntime.Types.ConverseResponse) => void): Request<BedrockRuntime.Types.ConverseResponse, AWSError>;
19
+ /**
20
+ * Sends messages to the specified Amazon Bedrock model. Converse provides a consistent interface that works with all models that support messages. This allows you to write code once and use it with different models. Should a model have unique inference parameters, you can also pass those unique parameters to the model. For more information, see Run inference in the Bedrock User Guide. This operation requires permission for the bedrock:InvokeModel action.
21
+ */
22
+ converse(callback?: (err: AWSError, data: BedrockRuntime.Types.ConverseResponse) => void): Request<BedrockRuntime.Types.ConverseResponse, AWSError>;
23
+ /**
24
+ * Sends messages to the specified Amazon Bedrock model and returns the response in a stream. ConverseStream provides a consistent API that works with all Amazon Bedrock models that support messages. This allows you to write code once and use it with different models. Should a model have unique inference parameters, you can also pass those unique parameters to the model. For more information, see Run inference in the Bedrock User Guide. To find out if a model supports streaming, call GetFoundationModel and check the responseStreamingSupported field in the response. For example code, see Invoke model with streaming code example in the Amazon Bedrock User Guide. This operation requires permission for the bedrock:InvokeModelWithResponseStream action.
25
+ */
26
+ converseStream(params: BedrockRuntime.Types.ConverseStreamRequest, callback?: (err: AWSError, data: BedrockRuntime.Types.ConverseStreamResponse) => void): Request<BedrockRuntime.Types.ConverseStreamResponse, AWSError>;
27
+ /**
28
+ * Sends messages to the specified Amazon Bedrock model and returns the response in a stream. ConverseStream provides a consistent API that works with all Amazon Bedrock models that support messages. This allows you to write code once and use it with different models. Should a model have unique inference parameters, you can also pass those unique parameters to the model. For more information, see Run inference in the Bedrock User Guide. To find out if a model supports streaming, call GetFoundationModel and check the responseStreamingSupported field in the response. For example code, see Invoke model with streaming code example in the Amazon Bedrock User Guide. This operation requires permission for the bedrock:InvokeModelWithResponseStream action.
29
+ */
30
+ converseStream(callback?: (err: AWSError, data: BedrockRuntime.Types.ConverseStreamResponse) => void): Request<BedrockRuntime.Types.ConverseStreamResponse, AWSError>;
15
31
  /**
16
32
  * Invokes the specified Amazon Bedrock model to run inference using the prompt and inference parameters provided in the request body. You use model inference to generate text, images, and embeddings. For example code, see Invoke model code examples in the Amazon Bedrock User Guide. This operation requires permission for the bedrock:InvokeModel action.
17
33
  */
@@ -30,9 +46,239 @@ declare class BedrockRuntime extends Service {
30
46
  invokeModelWithResponseStream(callback?: (err: AWSError, data: BedrockRuntime.Types.InvokeModelWithResponseStreamResponse) => void): Request<BedrockRuntime.Types.InvokeModelWithResponseStreamResponse, AWSError>;
31
47
  }
32
48
  declare namespace BedrockRuntime {
49
+ export interface AnyToolChoice {
50
+ }
51
+ export interface AutoToolChoice {
52
+ }
33
53
  export type Body = Buffer|Uint8Array|Blob|string;
54
+ export interface ContentBlock {
55
+ /**
56
+ * Text to include in the message.
57
+ */
58
+ text?: String;
59
+ /**
60
+ * Image to include in the message. This field is only supported by Anthropic Claude 3 models.
61
+ */
62
+ image?: ImageBlock;
63
+ /**
64
+ * Information about a tool use request from a model.
65
+ */
66
+ toolUse?: ToolUseBlock;
67
+ /**
68
+ * The result for a tool request that a model makes.
69
+ */
70
+ toolResult?: ToolResultBlock;
71
+ }
72
+ export interface ContentBlockDelta {
73
+ /**
74
+ * The content text.
75
+ */
76
+ text?: String;
77
+ /**
78
+ * Information about a tool that the model is requesting to use.
79
+ */
80
+ toolUse?: ToolUseBlockDelta;
81
+ }
82
+ export interface ContentBlockDeltaEvent {
83
+ /**
84
+ * The delta for a content block delta event.
85
+ */
86
+ delta: ContentBlockDelta;
87
+ /**
88
+ * The block index for a content block delta event.
89
+ */
90
+ contentBlockIndex: NonNegativeInteger;
91
+ }
92
+ export interface ContentBlockStart {
93
+ /**
94
+ * Information about a tool that the model is requesting to use.
95
+ */
96
+ toolUse?: ToolUseBlockStart;
97
+ }
98
+ export interface ContentBlockStartEvent {
99
+ /**
100
+ * Start information about a content block start event.
101
+ */
102
+ start: ContentBlockStart;
103
+ /**
104
+ * The index for a content block start event.
105
+ */
106
+ contentBlockIndex: NonNegativeInteger;
107
+ }
108
+ export interface ContentBlockStopEvent {
109
+ /**
110
+ * The index for a content block.
111
+ */
112
+ contentBlockIndex: NonNegativeInteger;
113
+ }
114
+ export type ContentBlocks = ContentBlock[];
115
+ export type ConversationRole = "user"|"assistant"|string;
116
+ export type ConversationalModelId = string;
117
+ export interface ConverseMetrics {
118
+ /**
119
+ * The latency of the call to Converse, in milliseconds.
120
+ */
121
+ latencyMs: Long;
122
+ }
123
+ export interface ConverseOutput {
124
+ /**
125
+ * The message that the model generates.
126
+ */
127
+ message?: Message;
128
+ }
129
+ export interface ConverseRequest {
130
+ /**
131
+ * The identifier for the model that you want to call. The modelId to provide depends on the type of model that you use: If you use a base model, specify the model ID or its ARN. For a list of model IDs for base models, see Amazon Bedrock base model IDs (on-demand throughput) in the Amazon Bedrock User Guide. If you use a provisioned model, specify the ARN of the Provisioned Throughput. For more information, see Run inference using a Provisioned Throughput in the Amazon Bedrock User Guide. If you use a custom model, first purchase Provisioned Throughput for it. Then specify the ARN of the resulting provisioned model. For more information, see Use a custom model in Amazon Bedrock in the Amazon Bedrock User Guide.
132
+ */
133
+ modelId: ConversationalModelId;
134
+ /**
135
+ * The messages that you want to send to the model.
136
+ */
137
+ messages: Messages;
138
+ /**
139
+ * A system prompt to pass to the model.
140
+ */
141
+ system?: SystemContentBlocks;
142
+ /**
143
+ * Inference parameters to pass to the model. Converse supports a base set of inference parameters. If you need to pass additional parameters that the model supports, use the additionalModelRequestFields request field.
144
+ */
145
+ inferenceConfig?: InferenceConfiguration;
146
+ /**
147
+ * Configuration information for the tools that the model can use when generating a response. This field is only supported by Anthropic Claude 3, Cohere Command R, Cohere Command R+, and Mistral Large models.
148
+ */
149
+ toolConfig?: ToolConfiguration;
150
+ /**
151
+ * Additional inference parameters that the model supports, beyond the base set of inference parameters that Converse supports in the inferenceConfig field. For more information, see Model parameters.
152
+ */
153
+ additionalModelRequestFields?: Document;
154
+ /**
155
+ * Additional model parameters field paths to return in the response. Converse returns the requested fields as a JSON Pointer object in the additionalModelResultFields field. The following is example JSON for additionalModelResponseFieldPaths. [ "/stop_sequence" ] For information about the JSON Pointer syntax, see the Internet Engineering Task Force (IETF) documentation. Converse rejects an empty JSON Pointer or incorrectly structured JSON Pointer with a 400 error code. if the JSON Pointer is valid, but the requested field is not in the model response, it is ignored by Converse.
156
+ */
157
+ additionalModelResponseFieldPaths?: ConverseRequestAdditionalModelResponseFieldPathsList;
158
+ }
159
+ export type ConverseRequestAdditionalModelResponseFieldPathsList = ConverseRequestAdditionalModelResponseFieldPathsListMemberString[];
160
+ export type ConverseRequestAdditionalModelResponseFieldPathsListMemberString = string;
161
+ export interface ConverseResponse {
162
+ /**
163
+ * The result from the call to Converse.
164
+ */
165
+ output: ConverseOutput;
166
+ /**
167
+ * The reason why the model stopped generating output.
168
+ */
169
+ stopReason: StopReason;
170
+ /**
171
+ * The total number of tokens used in the call to Converse. The total includes the tokens input to the model and the tokens generated by the model.
172
+ */
173
+ usage: TokenUsage;
174
+ /**
175
+ * Metrics for the call to Converse.
176
+ */
177
+ metrics: ConverseMetrics;
178
+ /**
179
+ * Additional fields in the response that are unique to the model.
180
+ */
181
+ additionalModelResponseFields?: Document;
182
+ }
183
+ export interface ConverseStreamMetadataEvent {
184
+ /**
185
+ * Usage information for the conversation stream event.
186
+ */
187
+ usage: TokenUsage;
188
+ /**
189
+ * The metrics for the conversation stream metadata event.
190
+ */
191
+ metrics: ConverseStreamMetrics;
192
+ }
193
+ export interface ConverseStreamMetrics {
194
+ /**
195
+ * The latency for the streaming request, in milliseconds.
196
+ */
197
+ latencyMs: Long;
198
+ }
199
+ export type ConverseStreamOutput = EventStream<{messageStart?:MessageStartEvent,contentBlockStart?:ContentBlockStartEvent,contentBlockDelta?:ContentBlockDeltaEvent,contentBlockStop?:ContentBlockStopEvent,messageStop?:MessageStopEvent,metadata?:ConverseStreamMetadataEvent,internalServerException?:InternalServerException,modelStreamErrorException?:ModelStreamErrorException,validationException?:ValidationException,throttlingException?:ThrottlingException}>;
200
+ export interface ConverseStreamRequest {
201
+ /**
202
+ * The ID for the model. The modelId to provide depends on the type of model that you use: If you use a base model, specify the model ID or its ARN. For a list of model IDs for base models, see Amazon Bedrock base model IDs (on-demand throughput) in the Amazon Bedrock User Guide. If you use a provisioned model, specify the ARN of the Provisioned Throughput. For more information, see Run inference using a Provisioned Throughput in the Amazon Bedrock User Guide. If you use a custom model, first purchase Provisioned Throughput for it. Then specify the ARN of the resulting provisioned model. For more information, see Use a custom model in Amazon Bedrock in the Amazon Bedrock User Guide.
203
+ */
204
+ modelId: ConversationalModelId;
205
+ /**
206
+ * The messages that you want to send to the model.
207
+ */
208
+ messages: Messages;
209
+ /**
210
+ * A system prompt to send to the model.
211
+ */
212
+ system?: SystemContentBlocks;
213
+ /**
214
+ * Inference parameters to pass to the model. ConverseStream supports a base set of inference parameters. If you need to pass additional parameters that the model supports, use the additionalModelRequestFields request field.
215
+ */
216
+ inferenceConfig?: InferenceConfiguration;
217
+ /**
218
+ * Configuration information for the tools that the model can use when generating a response. This field is only supported by Anthropic Claude 3 models.
219
+ */
220
+ toolConfig?: ToolConfiguration;
221
+ /**
222
+ * Additional inference parameters that the model supports, beyond the base set of inference parameters that ConverseStream supports in the inferenceConfig field.
223
+ */
224
+ additionalModelRequestFields?: Document;
225
+ /**
226
+ * Additional model parameters field paths to return in the response. ConverseStream returns the requested fields as a JSON Pointer object in the additionalModelResultFields field. The following is example JSON for additionalModelResponseFieldPaths. [ "/stop_sequence" ] For information about the JSON Pointer syntax, see the Internet Engineering Task Force (IETF) documentation. ConverseStream rejects an empty JSON Pointer or incorrectly structured JSON Pointer with a 400 error code. if the JSON Pointer is valid, but the requested field is not in the model response, it is ignored by ConverseStream.
227
+ */
228
+ additionalModelResponseFieldPaths?: ConverseStreamRequestAdditionalModelResponseFieldPathsList;
229
+ }
230
+ export type ConverseStreamRequestAdditionalModelResponseFieldPathsList = ConverseStreamRequestAdditionalModelResponseFieldPathsListMemberString[];
231
+ export type ConverseStreamRequestAdditionalModelResponseFieldPathsListMemberString = string;
232
+ export interface ConverseStreamResponse {
233
+ /**
234
+ * The output stream that the model generated.
235
+ */
236
+ stream?: ConverseStreamOutput;
237
+ }
238
+ export interface Document {
239
+ }
34
240
  export type GuardrailIdentifier = string;
35
241
  export type GuardrailVersion = string;
242
+ export interface ImageBlock {
243
+ /**
244
+ * The format of the image.
245
+ */
246
+ format: ImageFormat;
247
+ /**
248
+ * The source for the image.
249
+ */
250
+ source: ImageSource;
251
+ }
252
+ export type ImageFormat = "png"|"jpeg"|"gif"|"webp"|string;
253
+ export interface ImageSource {
254
+ /**
255
+ * The raw image bytes for the image. If you use an AWS SDK, you don't need to base64 encode the image bytes.
256
+ */
257
+ bytes?: ImageSourceBytesBlob;
258
+ }
259
+ export type ImageSourceBytesBlob = Buffer|Uint8Array|Blob|string;
260
+ export interface InferenceConfiguration {
261
+ /**
262
+ * The maximum number of tokens to allow in the generated response. The default value is the maximum allowed value for the model that you are using. For more information, see Inference parameters for foundatio{ "messages": [ { "role": "user", "content": [ { "text": "what's the weather in Queens, NY and Austin, TX?" } ] }, { "role": "assistant", "content": [ { "toolUse": { "toolUseId": "1", "name": "get_weather", "input": { "city": "Queens", "state": "NY" } } }, { "toolUse": { "toolUseId": "2", "name": "get_weather", "input": { "city": "Austin", "state": "TX" } } } ] }, { "role": "user", "content": [ { "toolResult": { "toolUseId": "2", "content": [ { "json": { "weather": "40" } } ] } }, { "text": "..." }, { "toolResult": { "toolUseId": "1", "content": [ { "text": "result text" } ] } } ] } ], "toolConfig": { "tools": [ { "name": "get_weather", "description": "Get weather", "inputSchema": { "type": "object", "properties": { "city": { "type": "string", "description": "City of location" }, "state": { "type": "string", "description": "State of location" } }, "required": ["city", "state"] } } ] } } n models.
263
+ */
264
+ maxTokens?: InferenceConfigurationMaxTokensInteger;
265
+ /**
266
+ * The likelihood of the model selecting higher-probability options while generating a response. A lower value makes the model more likely to choose higher-probability options, while a higher value makes the model more likely to choose lower-probability options. The default value is the default value for the model that you are using. For more information, see Inference parameters for foundation models.
267
+ */
268
+ temperature?: InferenceConfigurationTemperatureFloat;
269
+ /**
270
+ * The percentage of most-likely candidates that the model considers for the next token. For example, if you choose a value of 0.8 for topP, the model selects from the top 80% of the probability distribution of tokens that could be next in the sequence. The default value is the default value for the model that you are using. For more information, see Inference parameters for foundation models.
271
+ */
272
+ topP?: InferenceConfigurationTopPFloat;
273
+ /**
274
+ * A list of stop sequences. A stop sequence is a sequence of characters that causes the model to stop generating the response.
275
+ */
276
+ stopSequences?: InferenceConfigurationStopSequencesList;
277
+ }
278
+ export type InferenceConfigurationMaxTokensInteger = number;
279
+ export type InferenceConfigurationStopSequencesList = NonEmptyString[];
280
+ export type InferenceConfigurationTemperatureFloat = number;
281
+ export type InferenceConfigurationTopPFloat = number;
36
282
  export interface InternalServerException {
37
283
  message?: NonBlankString;
38
284
  }
@@ -117,6 +363,34 @@ declare namespace BedrockRuntime {
117
363
  */
118
364
  contentType: MimeType;
119
365
  }
366
+ export type Long = number;
367
+ export interface Message {
368
+ /**
369
+ * The role that the message plays in the message.
370
+ */
371
+ role: ConversationRole;
372
+ /**
373
+ * The message content.
374
+ */
375
+ content: ContentBlocks;
376
+ }
377
+ export interface MessageStartEvent {
378
+ /**
379
+ * The role for the message.
380
+ */
381
+ role: ConversationRole;
382
+ }
383
+ export interface MessageStopEvent {
384
+ /**
385
+ * The reason why the model stopped generating output.
386
+ */
387
+ stopReason: StopReason;
388
+ /**
389
+ * The additional model response fields.
390
+ */
391
+ additionalModelResponseFields?: Document;
392
+ }
393
+ export type Messages = Message[];
120
394
  export type MimeType = string;
121
395
  export interface ModelStreamErrorException {
122
396
  message?: NonBlankString;
@@ -133,6 +407,8 @@ declare namespace BedrockRuntime {
133
407
  message?: NonBlankString;
134
408
  }
135
409
  export type NonBlankString = string;
410
+ export type NonEmptyString = string;
411
+ export type NonNegativeInteger = number;
136
412
  export type PartBody = Buffer|Uint8Array|Blob|string;
137
413
  export interface PayloadPart {
138
414
  /**
@@ -141,10 +417,155 @@ declare namespace BedrockRuntime {
141
417
  bytes?: PartBody;
142
418
  }
143
419
  export type ResponseStream = EventStream<{chunk?:PayloadPart,internalServerException?:InternalServerException,modelStreamErrorException?:ModelStreamErrorException,validationException?:ValidationException,throttlingException?:ThrottlingException,modelTimeoutException?:ModelTimeoutException}>;
420
+ export interface SpecificToolChoice {
421
+ /**
422
+ * The name of the tool that the model must request.
423
+ */
424
+ name: ToolName;
425
+ }
144
426
  export type StatusCode = number;
427
+ export type StopReason = "end_turn"|"tool_use"|"max_tokens"|"stop_sequence"|"content_filtered"|string;
428
+ export type String = string;
429
+ export interface SystemContentBlock {
430
+ /**
431
+ * A system prompt for the model.
432
+ */
433
+ text?: NonEmptyString;
434
+ }
435
+ export type SystemContentBlocks = SystemContentBlock[];
145
436
  export interface ThrottlingException {
146
437
  message?: NonBlankString;
147
438
  }
439
+ export interface TokenUsage {
440
+ /**
441
+ * The number of tokens sent in the request to the model.
442
+ */
443
+ inputTokens: TokenUsageInputTokensInteger;
444
+ /**
445
+ * The number of tokens that the model generated for the request.
446
+ */
447
+ outputTokens: TokenUsageOutputTokensInteger;
448
+ /**
449
+ * The total of input tokens and tokens generated by the model.
450
+ */
451
+ totalTokens: TokenUsageTotalTokensInteger;
452
+ }
453
+ export type TokenUsageInputTokensInteger = number;
454
+ export type TokenUsageOutputTokensInteger = number;
455
+ export type TokenUsageTotalTokensInteger = number;
456
+ export interface Tool {
457
+ /**
458
+ * The specfication for the tool.
459
+ */
460
+ toolSpec?: ToolSpecification;
461
+ }
462
+ export interface ToolChoice {
463
+ /**
464
+ * The Model automatically decides if a tool should be called or to whether to generate text instead.
465
+ */
466
+ auto?: AutoToolChoice;
467
+ /**
468
+ * The model must request at least one tool (no text is generated).
469
+ */
470
+ any?: AnyToolChoice;
471
+ /**
472
+ * The Model must request the specified tool.
473
+ */
474
+ tool?: SpecificToolChoice;
475
+ }
476
+ export interface ToolConfiguration {
477
+ /**
478
+ * An array of tools that you want to pass to a model.
479
+ */
480
+ tools: ToolConfigurationToolsList;
481
+ /**
482
+ * If supported by model, forces the model to request a tool.
483
+ */
484
+ toolChoice?: ToolChoice;
485
+ }
486
+ export type ToolConfigurationToolsList = Tool[];
487
+ export interface ToolInputSchema {
488
+ /**
489
+ * The JSON schema for the tool. For more information, see JSON Schema Reference.
490
+ */
491
+ json?: Document;
492
+ }
493
+ export type ToolName = string;
494
+ export interface ToolResultBlock {
495
+ /**
496
+ * The ID of the tool request that this is the result for.
497
+ */
498
+ toolUseId: ToolUseId;
499
+ /**
500
+ * The content for tool result content block.
501
+ */
502
+ content: ToolResultContentBlocks;
503
+ /**
504
+ * The status for the tool result content block. This field is only supported Anthropic Claude 3 models.
505
+ */
506
+ status?: ToolResultStatus;
507
+ }
508
+ export interface ToolResultContentBlock {
509
+ /**
510
+ * A tool result that is JSON format data.
511
+ */
512
+ json?: Document;
513
+ /**
514
+ * A tool result that is text.
515
+ */
516
+ text?: String;
517
+ /**
518
+ * A tool result that is an image. This field is only supported by Anthropic Claude 3 models.
519
+ */
520
+ image?: ImageBlock;
521
+ }
522
+ export type ToolResultContentBlocks = ToolResultContentBlock[];
523
+ export type ToolResultStatus = "success"|"error"|string;
524
+ export interface ToolSpecification {
525
+ /**
526
+ * The name for the tool.
527
+ */
528
+ name: ToolName;
529
+ /**
530
+ * The description for the tool.
531
+ */
532
+ description?: NonEmptyString;
533
+ /**
534
+ * The input schema for the tool in JSON format.
535
+ */
536
+ inputSchema: ToolInputSchema;
537
+ }
538
+ export interface ToolUseBlock {
539
+ /**
540
+ * The ID for the tool request.
541
+ */
542
+ toolUseId: ToolUseId;
543
+ /**
544
+ * The name of the tool that the model wants to use.
545
+ */
546
+ name: ToolName;
547
+ /**
548
+ * The input to pass to the tool.
549
+ */
550
+ input: Document;
551
+ }
552
+ export interface ToolUseBlockDelta {
553
+ /**
554
+ * The input for a requested tool.
555
+ */
556
+ input: String;
557
+ }
558
+ export interface ToolUseBlockStart {
559
+ /**
560
+ * The ID for the tool request.
561
+ */
562
+ toolUseId: ToolUseId;
563
+ /**
564
+ * The name of the tool that the model is requesting to use.
565
+ */
566
+ name: ToolName;
567
+ }
568
+ export type ToolUseId = string;
148
569
  export type Trace = "ENABLED"|"DISABLED"|string;
149
570
  export interface ValidationException {
150
571
  message?: NonBlankString;