@aws-sdk/client-pipes 3.587.0 → 3.590.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -13,25 +13,29 @@ export declare const AssignPublicIp: {
13
13
  */
14
14
  export type AssignPublicIp = (typeof AssignPublicIp)[keyof typeof AssignPublicIp];
15
15
  /**
16
- * <p>This structure specifies the VPC subnets and security groups for the task, and whether a public IP address is to be used.
17
- * This structure is relevant only for ECS tasks that use the <code>awsvpc</code> network mode.</p>
16
+ * <p>This structure specifies the VPC subnets and security groups for the task, and whether a
17
+ * public IP address is to be used. This structure is relevant only for ECS tasks that use the
18
+ * <code>awsvpc</code> network mode.</p>
18
19
  * @public
19
20
  */
20
21
  export interface AwsVpcConfiguration {
21
22
  /**
22
- * <p>Specifies the subnets associated with the task. These subnets must all be in the same VPC. You can specify as many as 16 subnets.</p>
23
+ * <p>Specifies the subnets associated with the task. These subnets must all be in the same
24
+ * VPC. You can specify as many as 16 subnets.</p>
23
25
  * @public
24
26
  */
25
27
  Subnets: string[] | undefined;
26
28
  /**
27
- * <p>Specifies the security groups associated with the task. These security groups must all be in the same VPC. You can specify as many
28
- * as five security groups. If you do not specify a security group, the default security group for the VPC is used.</p>
29
+ * <p>Specifies the security groups associated with the task. These security groups must all
30
+ * be in the same VPC. You can specify as many as five security groups. If you do not specify
31
+ * a security group, the default security group for the VPC is used.</p>
29
32
  * @public
30
33
  */
31
34
  SecurityGroups?: string[];
32
35
  /**
33
- * <p>Specifies whether the task's elastic network interface receives a public IP address. You can specify <code>ENABLED</code> only when
34
- * <code>LaunchType</code> in <code>EcsParameters</code> is set to <code>FARGATE</code>.</p>
36
+ * <p>Specifies whether the task's elastic network interface receives a public IP address. You
37
+ * can specify <code>ENABLED</code> only when <code>LaunchType</code> in
38
+ * <code>EcsParameters</code> is set to <code>FARGATE</code>.</p>
35
39
  * @public
36
40
  */
37
41
  AssignPublicIp?: AssignPublicIp;
@@ -49,21 +53,25 @@ export interface BatchArrayProperties {
49
53
  Size?: number;
50
54
  }
51
55
  /**
52
- * <p>The environment variables to send to the container. You can add new environment variables, which are added to the container at launch, or you can override the existing
56
+ * <p>The environment variables to send to the container. You can add new environment
57
+ * variables, which are added to the container at launch, or you can override the existing
53
58
  * environment variables from the Docker image or the task definition.</p>
54
59
  * <note>
55
- * <p>Environment variables cannot start with "<code>Batch</code>". This naming convention is reserved for variables that Batch sets.</p>
60
+ * <p>Environment variables cannot start with "<code>Batch</code>". This
61
+ * naming convention is reserved for variables that Batch sets.</p>
56
62
  * </note>
57
63
  * @public
58
64
  */
59
65
  export interface BatchEnvironmentVariable {
60
66
  /**
61
- * <p>The name of the key-value pair. For environment variables, this is the name of the environment variable.</p>
67
+ * <p>The name of the key-value pair. For environment variables, this is the name of the
68
+ * environment variable.</p>
62
69
  * @public
63
70
  */
64
71
  Name?: string;
65
72
  /**
66
- * <p>The value of the key-value pair. For environment variables, this is the value of the environment variable.</p>
73
+ * <p>The value of the key-value pair. For environment variables, this is the value of the
74
+ * environment variable.</p>
67
75
  * @public
68
76
  */
69
77
  Value?: string;
@@ -82,48 +90,54 @@ export declare const BatchResourceRequirementType: {
82
90
  */
83
91
  export type BatchResourceRequirementType = (typeof BatchResourceRequirementType)[keyof typeof BatchResourceRequirementType];
84
92
  /**
85
- * <p>The type and amount of a resource to assign to a container. The supported resources include <code>GPU</code>, <code>MEMORY</code>, and <code>VCPU</code>.</p>
93
+ * <p>The type and amount of a resource to assign to a container. The supported resources
94
+ * include <code>GPU</code>, <code>MEMORY</code>, and <code>VCPU</code>.</p>
86
95
  * @public
87
96
  */
88
97
  export interface BatchResourceRequirement {
89
98
  /**
90
- * <p>The type of resource to assign to a container. The supported resources include <code>GPU</code>, <code>MEMORY</code>, and <code>VCPU</code>.</p>
99
+ * <p>The type of resource to assign to a container. The supported resources include
100
+ * <code>GPU</code>, <code>MEMORY</code>, and <code>VCPU</code>.</p>
91
101
  * @public
92
102
  */
93
103
  Type: BatchResourceRequirementType | undefined;
94
104
  /**
95
- * <p>The quantity of the specified resource to reserve for the container. The values vary based on the
96
- * <code>type</code> specified.</p>
105
+ * <p>The quantity of the specified resource to reserve for the container. The values vary
106
+ * based on the <code>type</code> specified.</p>
97
107
  * <dl>
98
108
  * <dt>type="GPU"</dt>
99
109
  * <dd>
100
- * <p>The number of physical GPUs to reserve for the container. Make sure that the number of GPUs reserved for all
101
- * containers in a job doesn't exceed the number of available GPUs on the compute resource that the job is launched
102
- * on.</p>
110
+ * <p>The number of physical GPUs to reserve for the container. Make sure that the
111
+ * number of GPUs reserved for all containers in a job doesn't exceed the number of
112
+ * available GPUs on the compute resource that the job is launched on.</p>
103
113
  * <note>
104
- * <p>GPUs aren't available for jobs that are running on Fargate resources.</p>
114
+ * <p>GPUs aren't available for jobs that are running on Fargate
115
+ * resources.</p>
105
116
  * </note>
106
117
  * </dd>
107
118
  * <dt>type="MEMORY"</dt>
108
119
  * <dd>
109
- * <p>The memory hard limit (in MiB) present to the container. This parameter is supported for jobs that are
110
- * running on EC2 resources. If your container attempts to exceed the memory specified, the container is terminated.
111
- * This parameter maps to <code>Memory</code> in the <a href="https://docs.docker.com/engine/api/v1.23/#create-a-container">
112
- * Create a container</a> section of the <a href="https://docs.docker.com/engine/api/v1.23/">Docker Remote API</a>
113
- * and the <code>--memory</code> option to <a href="https://docs.docker.com/engine/reference/run/">docker run</a>.
114
- * You must specify at least 4 MiB of memory for a job. This is required but can be specified in several places for
115
- * multi-node parallel (MNP) jobs. It must be specified for each node at least once. This parameter maps to
116
- * <code>Memory</code> in the <a href="https://docs.docker.com/engine/api/v1.23/#create-a-container">
117
- * Create a container</a> section of the <a href="https://docs.docker.com/engine/api/v1.23/">Docker Remote API</a> and the
118
- * <code>--memory</code> option to <a href="https://docs.docker.com/engine/reference/run/">docker run</a>.</p>
120
+ * <p>The memory hard limit (in MiB) present to the container. This parameter is
121
+ * supported for jobs that are running on EC2 resources. If your container attempts
122
+ * to exceed the memory specified, the container is terminated. This parameter maps
123
+ * to <code>Memory</code> in the <a href="https://docs.docker.com/engine/api/v1.23/#create-a-container"> Create a
124
+ * container</a> section of the <a href="https://docs.docker.com/engine/api/v1.23/">Docker Remote API</a> and
125
+ * the <code>--memory</code> option to <a href="https://docs.docker.com/engine/reference/run/">docker run</a>. You
126
+ * must specify at least 4 MiB of memory for a job. This is required but can be
127
+ * specified in several places for multi-node parallel (MNP) jobs. It must be
128
+ * specified for each node at least once. This parameter maps to <code>Memory</code>
129
+ * in the <a href="https://docs.docker.com/engine/api/v1.23/#create-a-container">
130
+ * Create a container</a> section of the <a href="https://docs.docker.com/engine/api/v1.23/">Docker Remote API</a> and
131
+ * the <code>--memory</code> option to <a href="https://docs.docker.com/engine/reference/run/">docker run</a>.</p>
119
132
  * <note>
120
- * <p>If you're trying to maximize your resource utilization by providing your jobs as much memory as possible for
121
- * a particular instance type, see <a href="https://docs.aws.amazon.com/batch/latest/userguide/memory-management.html">Memory
122
- * management</a> in the <i>Batch User Guide</i>.</p>
133
+ * <p>If you're trying to maximize your resource utilization by providing your
134
+ * jobs as much memory as possible for a particular instance type, see <a href="https://docs.aws.amazon.com/batch/latest/userguide/memory-management.html">Memory management</a> in the <i>Batch User
135
+ * Guide</i>.</p>
123
136
  * </note>
124
- * <p>For jobs that are running on Fargate resources, then <code>value</code> is the hard limit (in MiB), and
125
- * must match one of the supported values and the <code>VCPU</code> values must be one of the values supported for
126
- * that memory value.</p>
137
+ * <p>For jobs that are running on Fargate resources, then
138
+ * <code>value</code> is the hard limit (in MiB), and must match one of the
139
+ * supported values and the <code>VCPU</code> values must be one of the values
140
+ * supported for that memory value.</p>
127
141
  * <dl>
128
142
  * <dt>value = 512</dt>
129
143
  * <dd>
@@ -170,7 +184,8 @@ export interface BatchResourceRequirement {
170
184
  * <p>
171
185
  * <code>VCPU</code> = 2, 4, or 8</p>
172
186
  * </dd>
173
- * <dt>value = 17408, 18432, 19456, 21504, 22528, 23552, 25600, 26624, 27648, 29696, or 30720</dt>
187
+ * <dt>value = 17408, 18432, 19456, 21504, 22528, 23552, 25600, 26624, 27648,
188
+ * 29696, or 30720</dt>
174
189
  * <dd>
175
190
  * <p>
176
191
  * <code>VCPU</code> = 4</p>
@@ -190,7 +205,8 @@ export interface BatchResourceRequirement {
190
205
  * <p>
191
206
  * <code>VCPU</code> = 8 or 16</p>
192
207
  * </dd>
193
- * <dt>value = 65536, 73728, 81920, 90112, 98304, 106496, 114688, or 122880</dt>
208
+ * <dt>value = 65536, 73728, 81920, 90112, 98304, 106496, 114688, or
209
+ * 122880</dt>
194
210
  * <dd>
195
211
  * <p>
196
212
  * <code>VCPU</code> = 16</p>
@@ -199,18 +215,21 @@ export interface BatchResourceRequirement {
199
215
  * </dd>
200
216
  * <dt>type="VCPU"</dt>
201
217
  * <dd>
202
- * <p>The number of vCPUs reserved for the container. This parameter maps to <code>CpuShares</code> in the
203
- * <a href="https://docs.docker.com/engine/api/v1.23/#create-a-container">
204
- * Create a container</a> section of the <a href="https://docs.docker.com/engine/api/v1.23/">Docker Remote API</a>
205
- * and the <code>--cpu-shares</code> option to
206
- * <a href="https://docs.docker.com/engine/reference/run/">docker run</a>. Each vCPU is equivalent to 1,024 CPU shares. For EC2
207
- * resources, you must specify at least one vCPU. This is required but can be specified in several places; it must be
208
- * specified for each node at least once.</p>
209
- * <p>The default for the Fargate On-Demand vCPU resource count quota is 6 vCPUs. For more information about
210
- * Fargate quotas, see <a href="https://docs.aws.amazon.com/general/latest/gr/ecs-service.html#service-quotas-fargate">Fargate quotas</a> in the <i>Amazon Web Services General Reference</i>.</p>
211
- * <p>For jobs that are running on Fargate resources, then <code>value</code> must match one of the supported
212
- * values and the <code>MEMORY</code> values must be one of the values supported for that <code>VCPU</code> value.
213
- * The supported values are 0.25, 0.5, 1, 2, 4, 8, and 16</p>
218
+ * <p>The number of vCPUs reserved for the container. This parameter maps to
219
+ * <code>CpuShares</code> in the <a href="https://docs.docker.com/engine/api/v1.23/#create-a-container"> Create a
220
+ * container</a> section of the <a href="https://docs.docker.com/engine/api/v1.23/">Docker Remote API</a> and
221
+ * the <code>--cpu-shares</code> option to <a href="https://docs.docker.com/engine/reference/run/">docker run</a>. Each
222
+ * vCPU is equivalent to 1,024 CPU shares. For EC2 resources, you must specify at
223
+ * least one vCPU. This is required but can be specified in several places; it must
224
+ * be specified for each node at least once.</p>
225
+ * <p>The default for the Fargate On-Demand vCPU resource count quota
226
+ * is 6 vCPUs. For more information about Fargate quotas, see <a href="https://docs.aws.amazon.com/general/latest/gr/ecs-service.html#service-quotas-fargate">Fargate quotas</a> in the <i>Amazon Web Services
227
+ * General Reference</i>.</p>
228
+ * <p>For jobs that are running on Fargate resources, then
229
+ * <code>value</code> must match one of the supported values and the
230
+ * <code>MEMORY</code> values must be one of the values supported for that
231
+ * <code>VCPU</code> value. The supported values are 0.25, 0.5, 1, 2, 4, 8, and
232
+ * 16</p>
214
233
  * <dl>
215
234
  * <dt>value = 0.25</dt>
216
235
  * <dd>
@@ -225,30 +244,33 @@ export interface BatchResourceRequirement {
225
244
  * <dt>value = 1</dt>
226
245
  * <dd>
227
246
  * <p>
228
- * <code>MEMORY</code> = 2048, 3072, 4096, 5120, 6144, 7168, or 8192</p>
247
+ * <code>MEMORY</code> = 2048, 3072, 4096, 5120, 6144, 7168, or
248
+ * 8192</p>
229
249
  * </dd>
230
250
  * <dt>value = 2</dt>
231
251
  * <dd>
232
252
  * <p>
233
- * <code>MEMORY</code> = 4096, 5120, 6144, 7168, 8192, 9216, 10240, 11264, 12288, 13312, 14336, 15360, or 16384</p>
253
+ * <code>MEMORY</code> = 4096, 5120, 6144, 7168, 8192, 9216, 10240,
254
+ * 11264, 12288, 13312, 14336, 15360, or 16384</p>
234
255
  * </dd>
235
256
  * <dt>value = 4</dt>
236
257
  * <dd>
237
258
  * <p>
238
- * <code>MEMORY</code> = 8192, 9216, 10240, 11264, 12288, 13312, 14336, 15360, 16384, 17408, 18432, 19456,
239
- * 20480, 21504, 22528, 23552, 24576, 25600, 26624, 27648, 28672, 29696, or 30720</p>
259
+ * <code>MEMORY</code> = 8192, 9216, 10240, 11264, 12288, 13312, 14336,
260
+ * 15360, 16384, 17408, 18432, 19456, 20480, 21504, 22528, 23552, 24576,
261
+ * 25600, 26624, 27648, 28672, 29696, or 30720</p>
240
262
  * </dd>
241
263
  * <dt>value = 8</dt>
242
264
  * <dd>
243
265
  * <p>
244
- * <code>MEMORY</code> = 16384, 20480, 24576, 28672, 32768, 36864, 40960, 45056, 49152, 53248, 57344, or 61440
245
- * </p>
266
+ * <code>MEMORY</code> = 16384, 20480, 24576, 28672, 32768, 36864, 40960,
267
+ * 45056, 49152, 53248, 57344, or 61440 </p>
246
268
  * </dd>
247
269
  * <dt>value = 16</dt>
248
270
  * <dd>
249
271
  * <p>
250
- * <code>MEMORY</code> = 32768, 40960, 49152, 57344, 65536, 73728, 81920, 90112, 98304, 106496, 114688, or 122880
251
- * </p>
272
+ * <code>MEMORY</code> = 32768, 40960, 49152, 57344, 65536, 73728, 81920,
273
+ * 90112, 98304, 106496, 114688, or 122880 </p>
252
274
  * </dd>
253
275
  * </dl>
254
276
  * </dd>
@@ -263,15 +285,18 @@ export interface BatchResourceRequirement {
263
285
  */
264
286
  export interface BatchContainerOverrides {
265
287
  /**
266
- * <p>The command to send to the container that overrides the default command from the Docker image or the task definition.</p>
288
+ * <p>The command to send to the container that overrides the default command from the Docker
289
+ * image or the task definition.</p>
267
290
  * @public
268
291
  */
269
292
  Command?: string[];
270
293
  /**
271
- * <p>The environment variables to send to the container. You can add new environment variables, which are added to the container at launch, or you can override the existing
294
+ * <p>The environment variables to send to the container. You can add new environment
295
+ * variables, which are added to the container at launch, or you can override the existing
272
296
  * environment variables from the Docker image or the task definition.</p>
273
297
  * <note>
274
- * <p>Environment variables cannot start with "<code>Batch</code>". This naming convention is reserved for variables that Batch sets.</p>
298
+ * <p>Environment variables cannot start with "<code>Batch</code>". This
299
+ * naming convention is reserved for variables that Batch sets.</p>
275
300
  * </note>
276
301
  * @public
277
302
  */
@@ -279,14 +304,16 @@ export interface BatchContainerOverrides {
279
304
  /**
280
305
  * <p>The instance type to use for a multi-node parallel job.</p>
281
306
  * <note>
282
- * <p>This parameter isn't applicable to single-node container jobs or jobs that run on Fargate resources, and shouldn't be provided.</p>
307
+ * <p>This parameter isn't applicable to single-node container jobs or jobs that run on
308
+ * Fargate resources, and shouldn't be provided.</p>
283
309
  * </note>
284
310
  * @public
285
311
  */
286
312
  InstanceType?: string;
287
313
  /**
288
- * <p>The type and amount of resources to assign to a container. This overrides the settings in the job definition. The supported resources include <code>GPU</code>, <code>MEMORY</code>,
289
- * and <code>VCPU</code>.</p>
314
+ * <p>The type and amount of resources to assign to a container. This overrides the settings
315
+ * in the job definition. The supported resources include <code>GPU</code>,
316
+ * <code>MEMORY</code>, and <code>VCPU</code>.</p>
290
317
  * @public
291
318
  */
292
319
  ResourceRequirements?: BatchResourceRequirement[];
@@ -309,7 +336,8 @@ export type BatchJobDependencyType = (typeof BatchJobDependencyType)[keyof typeo
309
336
  */
310
337
  export interface BatchJobDependency {
311
338
  /**
312
- * <p>The job ID of the Batch job that's associated with this dependency.</p>
339
+ * <p>The job ID of the Batch job that's associated with this
340
+ * dependency.</p>
313
341
  * @public
314
342
  */
315
343
  JobId?: string;
@@ -320,14 +348,15 @@ export interface BatchJobDependency {
320
348
  Type?: BatchJobDependencyType;
321
349
  }
322
350
  /**
323
- * <p>The retry strategy that's associated with a job. For more information, see <a href="https://docs.aws.amazon.com/batch/latest/userguide/job_retries.html">
324
- * Automated job retries</a> in the <i>Batch User Guide</i>.</p>
351
+ * <p>The retry strategy that's associated with a job. For more information, see <a href="https://docs.aws.amazon.com/batch/latest/userguide/job_retries.html"> Automated job
352
+ * retries</a> in the <i>Batch User Guide</i>.</p>
325
353
  * @public
326
354
  */
327
355
  export interface BatchRetryStrategy {
328
356
  /**
329
- * <p>The number of times to move a job to the <code>RUNNABLE</code> status. If the value of <code>attempts</code> is greater than one, the job is retried on
330
- * failure the same number of attempts as the value.</p>
357
+ * <p>The number of times to move a job to the <code>RUNNABLE</code> status. If the value of
358
+ * <code>attempts</code> is greater than one, the job is retried on failure the same number
359
+ * of attempts as the value.</p>
331
360
  * @public
332
361
  */
333
362
  Attempts?: number;
@@ -343,16 +372,16 @@ export interface CapacityProviderStrategyItem {
343
372
  */
344
373
  capacityProvider: string | undefined;
345
374
  /**
346
- * <p>The weight value designates the relative percentage of the total number of tasks launched
347
- * that should use the specified capacity provider. The weight value is taken into consideration
348
- * after the base value, if defined, is satisfied.</p>
375
+ * <p>The weight value designates the relative percentage of the total number of tasks
376
+ * launched that should use the specified capacity provider. The weight value is taken into
377
+ * consideration after the base value, if defined, is satisfied.</p>
349
378
  * @public
350
379
  */
351
380
  weight?: number;
352
381
  /**
353
382
  * <p>The base value designates how many tasks, at a minimum, to run on the specified capacity
354
- * provider. Only one capacity provider in a capacity provider strategy can have a base defined.
355
- * If no value is specified, the default value of 0 is used. </p>
383
+ * provider. Only one capacity provider in a capacity provider strategy can have a base
384
+ * defined. If no value is specified, the default value of 0 is used. </p>
356
385
  * @public
357
386
  */
358
387
  base?: number;
@@ -414,28 +443,27 @@ export declare const RequestedPipeState: {
414
443
  */
415
444
  export type RequestedPipeState = (typeof RequestedPipeState)[keyof typeof RequestedPipeState];
416
445
  /**
417
- * <p>These are custom parameter to be used when the target is an API Gateway REST APIs or
418
- * EventBridge ApiDestinations. In the latter case, these are merged with any
419
- * InvocationParameters specified on the Connection, with any values from the Connection taking
420
- * precedence.</p>
446
+ * <p>These are custom parameter to be used when the target is an API Gateway REST APIs
447
+ * or EventBridge ApiDestinations. In the latter case, these are merged with any
448
+ * InvocationParameters specified on the Connection, with any values from the Connection
449
+ * taking precedence.</p>
421
450
  * @public
422
451
  */
423
452
  export interface PipeEnrichmentHttpParameters {
424
453
  /**
425
- * <p>The path parameter values to be used to populate API Gateway REST API or EventBridge
426
- * ApiDestination path wildcards ("*").</p>
454
+ * <p>The path parameter values to be used to populate API Gateway REST API or EventBridge ApiDestination path wildcards ("*").</p>
427
455
  * @public
428
456
  */
429
457
  PathParameterValues?: string[];
430
458
  /**
431
- * <p>The headers that need to be sent as part of request invoking the API Gateway REST API or
432
- * EventBridge ApiDestination.</p>
459
+ * <p>The headers that need to be sent as part of request invoking the API Gateway REST
460
+ * API or EventBridge ApiDestination.</p>
433
461
  * @public
434
462
  */
435
463
  HeaderParameters?: Record<string, string>;
436
464
  /**
437
- * <p>The query string keys/values that need to be sent as part of request invoking the API Gateway
438
- * REST API or EventBridge ApiDestination.</p>
465
+ * <p>The query string keys/values that need to be sent as part of request invoking the
466
+ * API Gateway REST API or EventBridge ApiDestination.</p>
439
467
  * @public
440
468
  */
441
469
  QueryStringParameters?: Record<string, string>;
@@ -447,31 +475,32 @@ export interface PipeEnrichmentHttpParameters {
447
475
  export interface PipeEnrichmentParameters {
448
476
  /**
449
477
  * <p>Valid JSON text passed to the enrichment. In this case, nothing from the event itself is
450
- * passed to the enrichment. For more information, see <a href="http://www.rfc-editor.org/rfc/rfc7159.txt">The JavaScript Object Notation (JSON) Data
451
- * Interchange Format</a>.</p>
478
+ * passed to the enrichment. For more information, see <a href="http://www.rfc-editor.org/rfc/rfc7159.txt">The JavaScript Object Notation (JSON)
479
+ * Data Interchange Format</a>.</p>
452
480
  * <p>To remove an input template, specify an empty string.</p>
453
481
  * @public
454
482
  */
455
483
  InputTemplate?: string;
456
484
  /**
457
- * <p>Contains the HTTP parameters to use when the target is a API Gateway REST endpoint or
458
- * EventBridge ApiDestination.</p>
459
- * <p>If you specify an API Gateway REST API or EventBridge ApiDestination as a target, you can
460
- * use this parameter to specify headers, path parameters, and query string keys/values as part
461
- * of your target invoking request. If you're using ApiDestinations, the corresponding Connection
462
- * can also have these values configured. In case of any conflicting keys, values from the
463
- * Connection take precedence.</p>
485
+ * <p>Contains the HTTP parameters to use when the target is a API Gateway REST
486
+ * endpoint or EventBridge ApiDestination.</p>
487
+ * <p>If you specify an API Gateway REST API or EventBridge ApiDestination as a
488
+ * target, you can use this parameter to specify headers, path parameters, and query string
489
+ * keys/values as part of your target invoking request. If you're using ApiDestinations, the
490
+ * corresponding Connection can also have these values configured. In case of any conflicting
491
+ * keys, values from the Connection take precedence.</p>
464
492
  * @public
465
493
  */
466
494
  HttpParameters?: PipeEnrichmentHttpParameters;
467
495
  }
468
496
  /**
469
- * <p>The Amazon Kinesis Data Firehose logging configuration settings for the pipe.</p>
497
+ * <p>The Amazon Data Firehose logging configuration settings for the pipe.</p>
470
498
  * @public
471
499
  */
472
500
  export interface FirehoseLogDestinationParameters {
473
501
  /**
474
- * <p>Specifies the Amazon Resource Name (ARN) of the Kinesis Data Firehose delivery stream to which EventBridge delivers the pipe log records.</p>
502
+ * <p>Specifies the Amazon Resource Name (ARN) of the Firehose delivery stream to
503
+ * which EventBridge delivers the pipe log records.</p>
475
504
  * @public
476
505
  */
477
506
  DeliveryStreamArn: string | undefined;
@@ -520,12 +549,14 @@ export type S3OutputFormat = (typeof S3OutputFormat)[keyof typeof S3OutputFormat
520
549
  */
521
550
  export interface S3LogDestinationParameters {
522
551
  /**
523
- * <p>Specifies the name of the Amazon S3 bucket to which EventBridge delivers the log records for the pipe.</p>
552
+ * <p>Specifies the name of the Amazon S3 bucket to which EventBridge delivers
553
+ * the log records for the pipe.</p>
524
554
  * @public
525
555
  */
526
556
  BucketName: string | undefined;
527
557
  /**
528
- * <p>Specifies the Amazon Web Services account that owns the Amazon S3 bucket to which EventBridge delivers the log records for the pipe.</p>
558
+ * <p>Specifies the Amazon Web Services account that owns the Amazon S3 bucket to which
559
+ * EventBridge delivers the log records for the pipe.</p>
529
560
  * @public
530
561
  */
531
562
  BucketOwner: string | undefined;
@@ -542,7 +573,8 @@ export interface S3LogDestinationParameters {
542
573
  * </li>
543
574
  * <li>
544
575
  * <p>
545
- * <code>w3c</code>: <a href="https://www.w3.org/TR/WD-logfile">W3C extended logging file format</a>
576
+ * <code>w3c</code>: <a href="https://www.w3.org/TR/WD-logfile">W3C extended
577
+ * logging file format</a>
546
578
  * </p>
547
579
  * </li>
548
580
  * </ul>
@@ -551,11 +583,11 @@ export interface S3LogDestinationParameters {
551
583
  OutputFormat?: S3OutputFormat;
552
584
  /**
553
585
  * <p>Specifies any prefix text with which to begin Amazon S3 log object names.</p>
554
- * <p>You can use prefixes to organize the data that you store in Amazon S3 buckets.
555
- * A prefix is a string of characters at the beginning of the object key name.
556
- * A prefix can be any length, subject to the maximum length of the object key name (1,024 bytes).
557
- * For more information, see <a href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-prefixes.html">Organizing objects using prefixes</a>
558
- * in the <i>Amazon Simple Storage Service User Guide</i>.</p>
586
+ * <p>You can use prefixes to organize the data that you store in Amazon S3 buckets. A
587
+ * prefix is a string of characters at the beginning of the object key name. A prefix can be
588
+ * any length, subject to the maximum length of the object key name (1,024 bytes). For more
589
+ * information, see <a href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-prefixes.html">Organizing objects using
590
+ * prefixes</a> in the <i>Amazon Simple Storage Service User Guide</i>.</p>
559
591
  * @public
560
592
  */
561
593
  Prefix?: string;
@@ -570,13 +602,13 @@ export interface S3LogDestinationParameters {
570
602
  * <code>FirehoseLogDestinationParameters</code>, or
571
603
  * <code>S3LogDestinationParameters</code>), EventBridge sets that field to its
572
604
  * system-default value during the update. </p>
573
- * <p>For example, suppose when you created the pipe you
574
- * specified a Kinesis Data Firehose stream log destination. You then update the pipe to add an
575
- * Amazon S3 log destination. In addition to specifying the
576
- * <code>S3LogDestinationParameters</code> for the new log destination, you must also
577
- * specify the fields in the <code>FirehoseLogDestinationParameters</code> object in order to
578
- * retain the Kinesis Data Firehose stream log destination. </p>
579
- * <p>For more information on generating pipe log records, see <a href="eventbridge/latest/userguide/eb-pipes-logs.html">Log EventBridge Pipes</a> in the <i>Amazon EventBridge User Guide</i>.</p>
605
+ * <p>For example, suppose when you created the pipe you specified a Firehose stream
606
+ * log destination. You then update the pipe to add an Amazon S3 log destination. In
607
+ * addition to specifying the <code>S3LogDestinationParameters</code> for the new log
608
+ * destination, you must also specify the fields in the
609
+ * <code>FirehoseLogDestinationParameters</code> object in order to retain the Firehose stream log destination. </p>
610
+ * <p>For more information on generating pipe log records, see <a href="eventbridge/latest/userguide/eb-pipes-logs.html">Log EventBridge
611
+ * Pipes</a> in the <i>Amazon EventBridge User Guide</i>.</p>
580
612
  * @public
581
613
  */
582
614
  export interface PipeLogConfigurationParameters {
@@ -586,7 +618,7 @@ export interface PipeLogConfigurationParameters {
586
618
  */
587
619
  S3LogDestination?: S3LogDestinationParameters;
588
620
  /**
589
- * <p>The Amazon Kinesis Data Firehose logging configuration settings for the pipe.</p>
621
+ * <p>The Amazon Data Firehose logging configuration settings for the pipe.</p>
590
622
  * @public
591
623
  */
592
624
  FirehoseLogDestination?: FirehoseLogDestinationParameters;
@@ -597,15 +629,20 @@ export interface PipeLogConfigurationParameters {
597
629
  CloudwatchLogsLogDestination?: CloudwatchLogsLogDestinationParameters;
598
630
  /**
599
631
  * <p>The level of logging detail to include. This applies to all log destinations for the pipe.</p>
600
- * <p>For more information, see <a href="https://docs.aws.amazon.com/eventbridge/latest/userguide/eb-pipes-logs.html#eb-pipes-logs-level">Specifying EventBridge Pipes log level</a> in the <i>Amazon EventBridge User Guide</i>.</p>
632
+ * <p>For more information, see <a href="https://docs.aws.amazon.com/eventbridge/latest/userguide/eb-pipes-logs.html#eb-pipes-logs-level">Specifying
633
+ * EventBridge Pipes log level</a> in the <i>Amazon EventBridge User
634
+ * Guide</i>.</p>
601
635
  * @public
602
636
  */
603
637
  Level: LogLevel | undefined;
604
638
  /**
605
- * <p>Specify <code>ON</code> to include the execution data (specifically, the <code>payload</code> and <code>awsRequest</code> fields) in the log messages for this pipe.</p>
639
+ * <p>Specify <code>ALL</code> to include the execution data (specifically, the
640
+ * <code>payload</code>, <code>awsRequest</code>, and <code>awsResponse</code> fields) in
641
+ * the log messages for this pipe.</p>
606
642
  * <p>This applies to all log destinations for the pipe.</p>
607
- * <p>For more information, see <a href="https://docs.aws.amazon.com/eventbridge/latest/userguide/eb-pipes-logs.html#eb-pipes-logs-execution-data">Including execution data in logs</a> in the <i>Amazon EventBridge User Guide</i>.</p>
608
- * <p>The default is <code>OFF</code>.</p>
643
+ * <p>For more information, see <a href="https://docs.aws.amazon.com/eventbridge/latest/userguide/eb-pipes-logs.html#eb-pipes-logs-execution-data">Including execution data in logs</a> in the <i>Amazon EventBridge User
644
+ * Guide</i>.</p>
645
+ * <p>By default, execution data is not included.</p>
609
646
  * @public
610
647
  */
611
648
  IncludeExecutionData?: IncludeExecutionDataOption[];
@@ -667,7 +704,8 @@ export interface PipeSourceActiveMQBrokerParameters {
667
704
  MaximumBatchingWindowInSeconds?: number;
668
705
  }
669
706
  /**
670
- * <p>A <code>DeadLetterConfig</code> object that contains information about a dead-letter queue configuration.</p>
707
+ * <p>A <code>DeadLetterConfig</code> object that contains information about a dead-letter
708
+ * queue configuration.</p>
671
709
  * @public
672
710
  */
673
711
  export interface DeadLetterConfig {
@@ -753,7 +791,7 @@ export interface PipeSourceDynamoDBStreamParameters {
753
791
  }
754
792
  /**
755
793
  * <p>Filter events using an event pattern. For more information, see <a href="https://docs.aws.amazon.com/eventbridge/latest/userguide/eventbridge-and-event-patterns.html">Events and Event
756
- * Patterns</a> in the <i>Amazon EventBridge User Guide</i>.</p>
794
+ * Patterns</a> in the <i>Amazon EventBridge User Guide</i>.</p>
757
795
  * @public
758
796
  */
759
797
  export interface Filter {
@@ -839,7 +877,8 @@ export interface PipeSourceKinesisStreamParameters {
839
877
  */
840
878
  StartingPosition: KinesisStreamStartPosition | undefined;
841
879
  /**
842
- * <p>With <code>StartingPosition</code> set to <code>AT_TIMESTAMP</code>, the time from which to start reading, in Unix time seconds.</p>
880
+ * <p>With <code>StartingPosition</code> set to <code>AT_TIMESTAMP</code>, the time from which
881
+ * to start reading, in Unix time seconds.</p>
843
882
  * @public
844
883
  */
845
884
  StartingPositionTimestamp?: Date;
@@ -1050,24 +1089,31 @@ export declare const SelfManagedKafkaStartPosition: {
1050
1089
  */
1051
1090
  export type SelfManagedKafkaStartPosition = (typeof SelfManagedKafkaStartPosition)[keyof typeof SelfManagedKafkaStartPosition];
1052
1091
  /**
1053
- * <p>This structure specifies the VPC subnets and security groups for the stream, and whether a public IP address is to be used.</p>
1092
+ * <p>This structure specifies the VPC subnets and security groups for the stream, and whether
1093
+ * a public IP address is to be used.</p>
1054
1094
  * @public
1055
1095
  */
1056
1096
  export interface SelfManagedKafkaAccessConfigurationVpc {
1057
1097
  /**
1058
- * <p>Specifies the subnets associated with the stream. These subnets must all be in the same VPC. You can specify as many as 16 subnets.</p>
1098
+ * <p>Specifies the subnets associated with the stream. These subnets must all be in the same
1099
+ * VPC. You can specify as many as 16 subnets.</p>
1059
1100
  * @public
1060
1101
  */
1061
1102
  Subnets?: string[];
1062
1103
  /**
1063
- * <p>Specifies the security groups associated with the stream. These security groups must all be in the same VPC. You can specify as many
1064
- * as five security groups. If you do not specify a security group, the default security group for the VPC is used.</p>
1104
+ * <p>Specifies the security groups associated with the stream. These security groups must all
1105
+ * be in the same VPC. You can specify as many as five security groups. If you do not specify
1106
+ * a security group, the default security group for the VPC is used.</p>
1065
1107
  * @public
1066
1108
  */
1067
1109
  SecurityGroup?: string[];
1068
1110
  }
1069
1111
  /**
1070
1112
  * <p>The parameters for using a self-managed Apache Kafka stream as a source.</p>
1113
+ * <p>A <i>self managed</i> cluster refers to any Apache Kafka cluster not hosted by Amazon Web Services.
1114
+ * This includes both clusters you manage yourself, as well as those hosted by a third-party
1115
+ * provider, such as <a href="https://www.confluent.io/">Confluent
1116
+ * Cloud</a>, <a href="https://www.cloudkarafka.com/">CloudKarafka</a>, or <a href="https://redpanda.com/">Redpanda</a>. For more information, see <a href="https://docs.aws.amazon.com/eventbridge/latest/userguide/eb-pipes-kafka.html">Apache Kafka streams as a source</a> in the <i>Amazon EventBridge User Guide</i>.</p>
1071
1117
  * @public
1072
1118
  */
1073
1119
  export interface PipeSourceSelfManagedKafkaParameters {
@@ -1178,6 +1224,10 @@ export interface PipeSourceParameters {
1178
1224
  ManagedStreamingKafkaParameters?: PipeSourceManagedStreamingKafkaParameters;
1179
1225
  /**
1180
1226
  * <p>The parameters for using a self-managed Apache Kafka stream as a source.</p>
1227
+ * <p>A <i>self managed</i> cluster refers to any Apache Kafka cluster not hosted by Amazon Web Services.
1228
+ * This includes both clusters you manage yourself, as well as those hosted by a third-party
1229
+ * provider, such as <a href="https://www.confluent.io/">Confluent
1230
+ * Cloud</a>, <a href="https://www.cloudkarafka.com/">CloudKarafka</a>, or <a href="https://redpanda.com/">Redpanda</a>. For more information, see <a href="https://docs.aws.amazon.com/eventbridge/latest/userguide/eb-pipes-kafka.html">Apache Kafka streams as a source</a> in the <i>Amazon EventBridge User Guide</i>.</p>
1181
1231
  * @public
1182
1232
  */
1183
1233
  SelfManagedKafkaParameters?: PipeSourceSelfManagedKafkaParameters;
@@ -1188,14 +1238,16 @@ export interface PipeSourceParameters {
1188
1238
  */
1189
1239
  export interface PipeTargetBatchJobParameters {
1190
1240
  /**
1191
- * <p>The job definition used by this job. This value can be one of <code>name</code>, <code>name:revision</code>, or the Amazon Resource Name (ARN) for the job definition.
1192
- * If name is specified without a revision then the latest active revision is used.</p>
1241
+ * <p>The job definition used by this job. This value can be one of <code>name</code>,
1242
+ * <code>name:revision</code>, or the Amazon Resource Name (ARN) for the job definition. If
1243
+ * name is specified without a revision then the latest active revision is used.</p>
1193
1244
  * @public
1194
1245
  */
1195
1246
  JobDefinition: string | undefined;
1196
1247
  /**
1197
- * <p>The name of the job. It can be up to 128 letters long. The first character must be alphanumeric, can contain uppercase and lowercase letters, numbers, hyphens (-),
1198
- * and underscores (_).</p>
1248
+ * <p>The name of the job. It can be up to 128 letters long. The first character must be
1249
+ * alphanumeric, can contain uppercase and lowercase letters, numbers, hyphens (-), and
1250
+ * underscores (_).</p>
1199
1251
  * @public
1200
1252
  */
1201
1253
  JobName: string | undefined;
@@ -1206,7 +1258,8 @@ export interface PipeTargetBatchJobParameters {
1206
1258
  */
1207
1259
  ArrayProperties?: BatchArrayProperties;
1208
1260
  /**
1209
- * <p>The retry strategy to use for failed jobs. When a retry strategy is specified here, it overrides the retry strategy defined in the job definition.</p>
1261
+ * <p>The retry strategy to use for failed jobs. When a retry strategy is specified here, it
1262
+ * overrides the retry strategy defined in the job definition.</p>
1210
1263
  * @public
1211
1264
  */
1212
1265
  RetryStrategy?: BatchRetryStrategy;
@@ -1216,16 +1269,20 @@ export interface PipeTargetBatchJobParameters {
1216
1269
  */
1217
1270
  ContainerOverrides?: BatchContainerOverrides;
1218
1271
  /**
1219
- * <p>A list of dependencies for the job. A job can depend upon a maximum of 20 jobs. You can specify a <code>SEQUENTIAL</code> type dependency without
1220
- * specifying a job ID for array jobs so that each child array job completes sequentially, starting at index 0. You can also specify an <code>N_TO_N</code>
1221
- * type dependency with a job ID for array jobs. In that case, each index child of this job must wait for the corresponding index child of each
1222
- * dependency to complete before it can begin.</p>
1272
+ * <p>A list of dependencies for the job. A job can depend upon a maximum of 20 jobs. You can
1273
+ * specify a <code>SEQUENTIAL</code> type dependency without specifying a job ID for array
1274
+ * jobs so that each child array job completes sequentially, starting at index 0. You can also
1275
+ * specify an <code>N_TO_N</code> type dependency with a job ID for array jobs. In that case,
1276
+ * each index child of this job must wait for the corresponding index child of each dependency
1277
+ * to complete before it can begin.</p>
1223
1278
  * @public
1224
1279
  */
1225
1280
  DependsOn?: BatchJobDependency[];
1226
1281
  /**
1227
- * <p>Additional parameters passed to the job that replace parameter substitution placeholders that are set in the job definition. Parameters are specified as a key and
1228
- * value pair mapping. Parameters included here override any corresponding parameter defaults from the job definition.</p>
1282
+ * <p>Additional parameters passed to the job that replace parameter substitution placeholders
1283
+ * that are set in the job definition. Parameters are specified as a key and value pair
1284
+ * mapping. Parameters included here override any corresponding parameter defaults from the
1285
+ * job definition.</p>
1229
1286
  * @public
1230
1287
  */
1231
1288
  Parameters?: Record<string, string>;
@@ -1241,7 +1298,8 @@ export interface PipeTargetCloudWatchLogsParameters {
1241
1298
  */
1242
1299
  LogStreamName?: string;
1243
1300
  /**
1244
- * <p>The time the event occurred, expressed as the number of milliseconds after Jan 1, 1970 00:00:00 UTC.</p>
1301
+ * <p>The time the event occurred, expressed as the number of milliseconds after Jan 1, 1970
1302
+ * 00:00:00 UTC.</p>
1245
1303
  * @public
1246
1304
  */
1247
1305
  Timestamp?: string;
@@ -1260,31 +1318,36 @@ export declare const LaunchType: {
1260
1318
  */
1261
1319
  export type LaunchType = (typeof LaunchType)[keyof typeof LaunchType];
1262
1320
  /**
1263
- * <p>This structure specifies the network configuration for an Amazon ECS task.</p>
1321
+ * <p>This structure specifies the network configuration for an Amazon ECS
1322
+ * task.</p>
1264
1323
  * @public
1265
1324
  */
1266
1325
  export interface NetworkConfiguration {
1267
1326
  /**
1268
1327
  * <p>Use this structure to specify the VPC subnets and security groups for the task, and
1269
- * whether a public IP address is to be used. This structure is relevant only for ECS tasks that
1270
- * use the <code>awsvpc</code> network mode.</p>
1328
+ * whether a public IP address is to be used. This structure is relevant only for ECS tasks
1329
+ * that use the <code>awsvpc</code> network mode.</p>
1271
1330
  * @public
1272
1331
  */
1273
1332
  awsvpcConfiguration?: AwsVpcConfiguration;
1274
1333
  }
1275
1334
  /**
1276
- * <p>The environment variables to send to the container. You can add new environment variables, which are added to the container at launch, or you can
1277
- * override the existing environment variables from the Docker image or the task definition. You must also specify a container name.</p>
1335
+ * <p>The environment variables to send to the container. You can add new environment
1336
+ * variables, which are added to the container at launch, or you can override the existing
1337
+ * environment variables from the Docker image or the task definition. You must also specify a
1338
+ * container name.</p>
1278
1339
  * @public
1279
1340
  */
1280
1341
  export interface EcsEnvironmentVariable {
1281
1342
  /**
1282
- * <p>The name of the key-value pair. For environment variables, this is the name of the environment variable.</p>
1343
+ * <p>The name of the key-value pair. For environment variables, this is the name of the
1344
+ * environment variable.</p>
1283
1345
  * @public
1284
1346
  */
1285
1347
  name?: string;
1286
1348
  /**
1287
- * <p>The value of the key-value pair. For environment variables, this is the value of the environment variable.</p>
1349
+ * <p>The value of the key-value pair. For environment variables, this is the value of the
1350
+ * environment variable.</p>
1288
1351
  * @public
1289
1352
  */
1290
1353
  value?: string;
@@ -1302,20 +1365,21 @@ export declare const EcsEnvironmentFileType: {
1302
1365
  export type EcsEnvironmentFileType = (typeof EcsEnvironmentFileType)[keyof typeof EcsEnvironmentFileType];
1303
1366
  /**
1304
1367
  * <p>A list of files containing the environment variables to pass to a container. You can
1305
- * specify up to ten environment files. The file must have a <code>.env</code> file
1306
- * extension. Each line in an environment file should contain an environment variable in
1307
- * <code>VARIABLE=VALUE</code> format. Lines beginning with <code>#</code> are treated
1308
- * as comments and are ignored. For more information about the environment variable file
1309
- * syntax, see <a href="https://docs.docker.com/compose/env-file/">Declare default
1310
- * environment variables in file</a>.</p>
1368
+ * specify up to ten environment files. The file must have a <code>.env</code> file extension.
1369
+ * Each line in an environment file should contain an environment variable in
1370
+ * <code>VARIABLE=VALUE</code> format. Lines beginning with <code>#</code> are treated as
1371
+ * comments and are ignored. For more information about the environment variable file syntax,
1372
+ * see <a href="https://docs.docker.com/compose/env-file/">Declare default environment
1373
+ * variables in file</a>.</p>
1311
1374
  * <p>If there are environment variables specified using the <code>environment</code>
1312
- * parameter in a container definition, they take precedence over the variables contained
1313
- * within an environment file. If multiple environment files are specified that contain the
1314
- * same variable, they're processed from the top down. We recommend that you use unique
1315
- * variable names. For more information, see <a href="https://docs.aws.amazon.com/AmazonECS/latest/developerguide/taskdef-envfiles.html">Specifying environment
1316
- * variables</a> in the <i>Amazon Elastic Container Service Developer Guide</i>.</p>
1375
+ * parameter in a container definition, they take precedence over the variables contained
1376
+ * within an environment file. If multiple environment files are specified that contain the
1377
+ * same variable, they're processed from the top down. We recommend that you use unique
1378
+ * variable names. For more information, see <a href="https://docs.aws.amazon.com/AmazonECS/latest/developerguide/taskdef-envfiles.html">Specifying environment
1379
+ * variables</a> in the <i>Amazon Elastic Container Service Developer
1380
+ * Guide</i>.</p>
1317
1381
  * <p>This parameter is only supported for tasks hosted on Fargate using the
1318
- * following platform versions:</p>
1382
+ * following platform versions:</p>
1319
1383
  * <ul>
1320
1384
  * <li>
1321
1385
  * <p>Linux platform version <code>1.4.0</code> or later.</p>
@@ -1333,7 +1397,8 @@ export interface EcsEnvironmentFile {
1333
1397
  */
1334
1398
  type: EcsEnvironmentFileType | undefined;
1335
1399
  /**
1336
- * <p>The Amazon Resource Name (ARN) of the Amazon S3 object containing the environment variable file.</p>
1400
+ * <p>The Amazon Resource Name (ARN) of the Amazon S3 object containing the
1401
+ * environment variable file.</p>
1337
1402
  * @public
1338
1403
  */
1339
1404
  value: string | undefined;
@@ -1351,80 +1416,90 @@ export declare const EcsResourceRequirementType: {
1351
1416
  */
1352
1417
  export type EcsResourceRequirementType = (typeof EcsResourceRequirementType)[keyof typeof EcsResourceRequirementType];
1353
1418
  /**
1354
- * <p>The type and amount of a resource to assign to a container. The supported resource
1355
- * types are GPUs and Elastic Inference accelerators. For more information, see <a href="https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-gpu.html">Working with
1356
- * GPUs on Amazon ECS</a> or <a href="https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-inference.html">Working with
1357
- * Amazon Elastic Inference on Amazon ECS</a> in the
1358
- * <i>Amazon Elastic Container Service Developer Guide</i>
1419
+ * <p>The type and amount of a resource to assign to a container. The supported resource types
1420
+ * are GPUs and Elastic Inference accelerators. For more information, see <a href="https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-gpu.html">Working with
1421
+ * GPUs on Amazon ECS</a> or <a href="https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-inference.html">Working with Amazon Elastic
1422
+ * Inference on Amazon ECS</a> in the <i>Amazon Elastic Container Service
1423
+ * Developer Guide</i>
1359
1424
  * </p>
1360
1425
  * @public
1361
1426
  */
1362
1427
  export interface EcsResourceRequirement {
1363
1428
  /**
1364
- * <p>The type of resource to assign to a container. The supported values are
1365
- * <code>GPU</code> or <code>InferenceAccelerator</code>.</p>
1429
+ * <p>The type of resource to assign to a container. The supported values are <code>GPU</code>
1430
+ * or <code>InferenceAccelerator</code>.</p>
1366
1431
  * @public
1367
1432
  */
1368
1433
  type: EcsResourceRequirementType | undefined;
1369
1434
  /**
1370
1435
  * <p>The value for the specified resource type.</p>
1371
1436
  * <p>If the <code>GPU</code> type is used, the value is the number of physical
1372
- * <code>GPUs</code> the Amazon ECS container agent reserves for the container. The number
1373
- * of GPUs that's reserved for all containers in a task can't exceed the number of
1437
+ * <code>GPUs</code> the Amazon ECS container agent reserves for the container. The
1438
+ * number of GPUs that's reserved for all containers in a task can't exceed the number of
1374
1439
  * available GPUs on the container instance that the task is launched on.</p>
1375
1440
  * <p>If the <code>InferenceAccelerator</code> type is used, the <code>value</code> matches
1376
- * the <code>deviceName</code> for an InferenceAccelerator specified in a
1377
- * task definition.</p>
1441
+ * the <code>deviceName</code> for an InferenceAccelerator specified in a task
1442
+ * definition.</p>
1378
1443
  * @public
1379
1444
  */
1380
1445
  value: string | undefined;
1381
1446
  }
1382
1447
  /**
1383
- * <p>The overrides that are sent to a container. An empty container override can be passed in. An example of an empty
1384
- * container override is <code>\{"containerOverrides": [ ] \}</code>. If a non-empty container override is specified, the <code>name</code> parameter must be included.</p>
1448
+ * <p>The overrides that are sent to a container. An empty container override can be passed
1449
+ * in. An example of an empty container override is <code>\{"containerOverrides": [ ] \}</code>.
1450
+ * If a non-empty container override is specified, the <code>name</code> parameter must be
1451
+ * included.</p>
1385
1452
  * @public
1386
1453
  */
1387
1454
  export interface EcsContainerOverride {
1388
1455
  /**
1389
- * <p>The command to send to the container that overrides the default command from the Docker image or the task definition. You must also specify a container name.</p>
1456
+ * <p>The command to send to the container that overrides the default command from the Docker
1457
+ * image or the task definition. You must also specify a container name.</p>
1390
1458
  * @public
1391
1459
  */
1392
1460
  Command?: string[];
1393
1461
  /**
1394
- * <p>The number of <code>cpu</code> units reserved for the container, instead of the default value from the task definition. You must also specify a container name.</p>
1462
+ * <p>The number of <code>cpu</code> units reserved for the container, instead of the default
1463
+ * value from the task definition. You must also specify a container name.</p>
1395
1464
  * @public
1396
1465
  */
1397
1466
  Cpu?: number;
1398
1467
  /**
1399
- * <p>The environment variables to send to the container. You can add new environment variables, which are added to the container at launch, or you can
1400
- * override the existing environment variables from the Docker image or the task definition. You must also specify a container name.</p>
1468
+ * <p>The environment variables to send to the container. You can add new environment
1469
+ * variables, which are added to the container at launch, or you can override the existing
1470
+ * environment variables from the Docker image or the task definition. You must also specify a
1471
+ * container name.</p>
1401
1472
  * @public
1402
1473
  */
1403
1474
  Environment?: EcsEnvironmentVariable[];
1404
1475
  /**
1405
- * <p>A list of files containing the environment variables to pass to a container, instead of the value from the container definition.</p>
1476
+ * <p>A list of files containing the environment variables to pass to a container, instead of
1477
+ * the value from the container definition.</p>
1406
1478
  * @public
1407
1479
  */
1408
1480
  EnvironmentFiles?: EcsEnvironmentFile[];
1409
1481
  /**
1410
- * <p>The hard limit (in MiB) of memory to present to the container, instead of the default value from the task definition.
1411
- * If your container attempts to exceed the memory specified here, the container is killed. You must also specify a container name.</p>
1482
+ * <p>The hard limit (in MiB) of memory to present to the container, instead of the default
1483
+ * value from the task definition. If your container attempts to exceed the memory specified
1484
+ * here, the container is killed. You must also specify a container name.</p>
1412
1485
  * @public
1413
1486
  */
1414
1487
  Memory?: number;
1415
1488
  /**
1416
- * <p>The soft limit (in MiB) of memory to reserve for the container, instead of the default value from the task definition.
1417
- * You must also specify a container name.</p>
1489
+ * <p>The soft limit (in MiB) of memory to reserve for the container, instead of the default
1490
+ * value from the task definition. You must also specify a container name.</p>
1418
1491
  * @public
1419
1492
  */
1420
1493
  MemoryReservation?: number;
1421
1494
  /**
1422
- * <p>The name of the container that receives the override. This parameter is required if any override is specified.</p>
1495
+ * <p>The name of the container that receives the override. This parameter is required if any
1496
+ * override is specified.</p>
1423
1497
  * @public
1424
1498
  */
1425
1499
  Name?: string;
1426
1500
  /**
1427
- * <p>The type and amount of a resource to assign to a container, instead of the default value from the task definition. The only supported resource is a GPU.</p>
1501
+ * <p>The type and amount of a resource to assign to a container, instead of the default value
1502
+ * from the task definition. The only supported resource is a GPU.</p>
1428
1503
  * @public
1429
1504
  */
1430
1505
  ResourceRequirements?: EcsResourceRequirement[];
@@ -1432,20 +1507,20 @@ export interface EcsContainerOverride {
1432
1507
  /**
1433
1508
  * <p>The amount of ephemeral storage to allocate for the task. This parameter is used to
1434
1509
  * expand the total amount of ephemeral storage available, beyond the default amount, for
1435
- * tasks hosted on Fargate. For more information, see <a href="https://docs.aws.amazon.com/AmazonECS/latest/userguide/using_data_volumes.html">Fargate task
1436
- * storage</a> in the <i>Amazon ECS User Guide for Fargate</i>.</p>
1510
+ * tasks hosted on Fargate. For more information, see <a href="https://docs.aws.amazon.com/AmazonECS/latest/userguide/using_data_volumes.html">Fargate task storage</a> in the <i>Amazon ECS User Guide
1511
+ * for Fargate</i>.</p>
1437
1512
  * <note>
1438
1513
  * <p>This parameter is only supported for tasks hosted on Fargate using
1439
- * Linux platform version <code>1.4.0</code> or later. This parameter is not supported
1440
- * for Windows containers on Fargate.</p>
1514
+ * Linux platform version <code>1.4.0</code> or later. This parameter is not supported for
1515
+ * Windows containers on Fargate.</p>
1441
1516
  * </note>
1442
1517
  * @public
1443
1518
  */
1444
1519
  export interface EcsEphemeralStorage {
1445
1520
  /**
1446
1521
  * <p>The total amount, in GiB, of ephemeral storage to set for the task. The minimum
1447
- * supported value is <code>21</code> GiB and the maximum supported value is
1448
- * <code>200</code> GiB.</p>
1522
+ * supported value is <code>21</code> GiB and the maximum supported value is <code>200</code>
1523
+ * GiB.</p>
1449
1524
  * @public
1450
1525
  */
1451
1526
  sizeInGiB: number | undefined;
@@ -1453,14 +1528,15 @@ export interface EcsEphemeralStorage {
1453
1528
  /**
1454
1529
  * <p>Details on an Elastic Inference accelerator task override. This parameter is used to
1455
1530
  * override the Elastic Inference accelerator specified in the task definition. For more
1456
- * information, see <a href="https://docs.aws.amazon.com/AmazonECS/latest/userguide/ecs-inference.html">Working with Amazon
1457
- * Elastic Inference on Amazon ECS</a> in the
1458
- * <i>Amazon Elastic Container Service Developer Guide</i>.</p>
1531
+ * information, see <a href="https://docs.aws.amazon.com/AmazonECS/latest/userguide/ecs-inference.html">Working with Amazon Elastic
1532
+ * Inference on Amazon ECS</a> in the <i>Amazon Elastic Container Service
1533
+ * Developer Guide</i>.</p>
1459
1534
  * @public
1460
1535
  */
1461
1536
  export interface EcsInferenceAcceleratorOverride {
1462
1537
  /**
1463
- * <p>The Elastic Inference accelerator device name to override for the task. This parameter must match a <code>deviceName</code> specified in the task definition.</p>
1538
+ * <p>The Elastic Inference accelerator device name to override for the task. This parameter
1539
+ * must match a <code>deviceName</code> specified in the task definition.</p>
1464
1540
  * @public
1465
1541
  */
1466
1542
  deviceName?: string;
@@ -1488,8 +1564,8 @@ export interface EcsTaskOverride {
1488
1564
  /**
1489
1565
  * <p>The ephemeral storage setting override for the task.</p>
1490
1566
  * <note>
1491
- * <p>This parameter is only supported for tasks hosted on Fargate that
1492
- * use the following platform versions:</p>
1567
+ * <p>This parameter is only supported for tasks hosted on Fargate that use
1568
+ * the following platform versions:</p>
1493
1569
  * <ul>
1494
1570
  * <li>
1495
1571
  * <p>Linux platform version <code>1.4.0</code> or later.</p>
@@ -1503,9 +1579,10 @@ export interface EcsTaskOverride {
1503
1579
  */
1504
1580
  EphemeralStorage?: EcsEphemeralStorage;
1505
1581
  /**
1506
- * <p>The Amazon Resource Name (ARN) of the task execution IAM role override for the task. For more
1507
- * information, see <a href="https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_execution_IAM_role.html">Amazon ECS task
1508
- * execution IAM role</a> in the <i>Amazon Elastic Container Service Developer Guide</i>.</p>
1582
+ * <p>The Amazon Resource Name (ARN) of the task execution IAM role override for the task. For
1583
+ * more information, see <a href="https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task_execution_IAM_role.html">Amazon ECS
1584
+ * task execution IAM role</a> in the <i>Amazon Elastic Container Service Developer
1585
+ * Guide</i>.</p>
1509
1586
  * @public
1510
1587
  */
1511
1588
  ExecutionRoleArn?: string;
@@ -1520,10 +1597,10 @@ export interface EcsTaskOverride {
1520
1597
  */
1521
1598
  Memory?: string;
1522
1599
  /**
1523
- * <p>The Amazon Resource Name (ARN) of the IAM role that containers in this task can assume. All containers
1524
- * in this task are granted the permissions that are specified in this role. For more
1525
- * information, see <a href="https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-iam-roles.html">IAM Role for Tasks</a>
1526
- * in the <i>Amazon Elastic Container Service Developer Guide</i>.</p>
1600
+ * <p>The Amazon Resource Name (ARN) of the IAM role that containers in this task can assume.
1601
+ * All containers in this task are granted the permissions that are specified in this role.
1602
+ * For more information, see <a href="https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-iam-roles.html">IAM Role for Tasks</a> in
1603
+ * the <i>Amazon Elastic Container Service Developer Guide</i>.</p>
1527
1604
  * @public
1528
1605
  */
1529
1606
  TaskRoleArn?: string;
@@ -1541,22 +1618,23 @@ export declare const PlacementConstraintType: {
1541
1618
  */
1542
1619
  export type PlacementConstraintType = (typeof PlacementConstraintType)[keyof typeof PlacementConstraintType];
1543
1620
  /**
1544
- * <p>An object representing a constraint on task placement. To learn more, see <a href="https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-placement-constraints.html">Task Placement Constraints</a> in the Amazon Elastic Container Service Developer
1545
- * Guide.</p>
1621
+ * <p>An object representing a constraint on task placement. To learn more, see <a href="https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-placement-constraints.html">Task Placement
1622
+ * Constraints</a> in the Amazon Elastic Container Service Developer Guide.</p>
1546
1623
  * @public
1547
1624
  */
1548
1625
  export interface PlacementConstraint {
1549
1626
  /**
1550
1627
  * <p>The type of constraint. Use distinctInstance to ensure that each task in a particular
1551
- * group is running on a different container instance. Use memberOf to restrict the selection to
1552
- * a group of valid candidates. </p>
1628
+ * group is running on a different container instance. Use memberOf to restrict the selection
1629
+ * to a group of valid candidates. </p>
1553
1630
  * @public
1554
1631
  */
1555
1632
  type?: PlacementConstraintType;
1556
1633
  /**
1557
1634
  * <p>A cluster query language expression to apply to the constraint. You cannot specify an
1558
- * expression if the constraint type is <code>distinctInstance</code>. To learn more, see <a href="https://docs.aws.amazon.com/AmazonECS/latest/developerguide/cluster-query-language.html">Cluster Query Language</a> in the Amazon Elastic Container Service Developer Guide.
1559
- * </p>
1635
+ * expression if the constraint type is <code>distinctInstance</code>. To learn more, see
1636
+ * <a href="https://docs.aws.amazon.com/AmazonECS/latest/developerguide/cluster-query-language.html">Cluster Query
1637
+ * Language</a> in the Amazon Elastic Container Service Developer Guide. </p>
1560
1638
  * @public
1561
1639
  */
1562
1640
  expression?: string;
@@ -1575,18 +1653,19 @@ export declare const PlacementStrategyType: {
1575
1653
  */
1576
1654
  export type PlacementStrategyType = (typeof PlacementStrategyType)[keyof typeof PlacementStrategyType];
1577
1655
  /**
1578
- * <p>The task placement strategy for a task or service. To learn more, see <a href="https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-placement-strategies.html">Task Placement Strategies</a> in the Amazon Elastic Container Service Service Developer
1579
- * Guide.</p>
1656
+ * <p>The task placement strategy for a task or service. To learn more, see <a href="https://docs.aws.amazon.com/AmazonECS/latest/developerguide/task-placement-strategies.html">Task Placement
1657
+ * Strategies</a> in the Amazon Elastic Container Service Service Developer Guide.</p>
1580
1658
  * @public
1581
1659
  */
1582
1660
  export interface PlacementStrategy {
1583
1661
  /**
1584
1662
  * <p>The type of placement strategy. The random placement strategy randomly places tasks on
1585
1663
  * available candidates. The spread placement strategy spreads placement across available
1586
- * candidates evenly based on the field parameter. The binpack strategy places tasks on available
1587
- * candidates that have the least available amount of the resource that is specified with the
1588
- * field parameter. For example, if you binpack on memory, a task is placed on the instance with
1589
- * the least amount of remaining memory (but still enough to run the task). </p>
1664
+ * candidates evenly based on the field parameter. The binpack strategy places tasks on
1665
+ * available candidates that have the least available amount of the resource that is specified
1666
+ * with the field parameter. For example, if you binpack on memory, a task is placed on the
1667
+ * instance with the least amount of remaining memory (but still enough to run the task).
1668
+ * </p>
1590
1669
  * @public
1591
1670
  */
1592
1671
  type?: PlacementStrategyType;
@@ -1594,8 +1673,8 @@ export interface PlacementStrategy {
1594
1673
  * <p>The field to apply the placement strategy against. For the spread placement strategy,
1595
1674
  * valid values are instanceId (or host, which has the same effect), or any platform or custom
1596
1675
  * attribute that is applied to a container instance, such as attribute:ecs.availability-zone.
1597
- * For the binpack placement strategy, valid values are cpu and memory. For the random placement
1598
- * strategy, this field is not used. </p>
1676
+ * For the binpack placement strategy, valid values are cpu and memory. For the random
1677
+ * placement strategy, this field is not used. </p>
1599
1678
  * @public
1600
1679
  */
1601
1680
  field?: string;
@@ -1612,8 +1691,8 @@ export declare const PropagateTags: {
1612
1691
  */
1613
1692
  export type PropagateTags = (typeof PropagateTags)[keyof typeof PropagateTags];
1614
1693
  /**
1615
- * <p>A key-value pair associated with an Amazon Web Services resource. In EventBridge, rules and event buses
1616
- * support tagging.</p>
1694
+ * <p>A key-value pair associated with an Amazon Web Services resource. In EventBridge,
1695
+ * rules and event buses support tagging.</p>
1617
1696
  * @public
1618
1697
  */
1619
1698
  export interface Tag {
@@ -1635,32 +1714,33 @@ export interface Tag {
1635
1714
  */
1636
1715
  export interface PipeTargetEcsTaskParameters {
1637
1716
  /**
1638
- * <p>The ARN of the task definition to use if the event target is an Amazon ECS task. </p>
1717
+ * <p>The ARN of the task definition to use if the event target is an Amazon ECS task.
1718
+ * </p>
1639
1719
  * @public
1640
1720
  */
1641
1721
  TaskDefinitionArn: string | undefined;
1642
1722
  /**
1643
- * <p>The number of tasks to create based on <code>TaskDefinition</code>. The default is 1.</p>
1723
+ * <p>The number of tasks to create based on <code>TaskDefinition</code>. The default is
1724
+ * 1.</p>
1644
1725
  * @public
1645
1726
  */
1646
1727
  TaskCount?: number;
1647
1728
  /**
1648
- * <p>Specifies the launch type on which your task is running. The launch type that you specify
1649
- * here must match one of the launch type (compatibilities) of the target task. The
1650
- * <code>FARGATE</code> value is supported only in the Regions where Fargate with Amazon ECS
1651
- * is supported. For more information, see <a href="https://docs.aws.amazon.com/AmazonECS/latest/developerguide/AWS-Fargate.html">Fargate on Amazon ECS</a> in
1652
- * the <i>Amazon Elastic Container Service Developer Guide</i>.</p>
1729
+ * <p>Specifies the launch type on which your task is running. The launch type that you
1730
+ * specify here must match one of the launch type (compatibilities) of the target task. The
1731
+ * <code>FARGATE</code> value is supported only in the Regions where Fargate with Amazon ECS is supported. For more information, see
1732
+ * <a href="https://docs.aws.amazon.com/AmazonECS/latest/developerguide/AWS-Fargate.html">Fargate on Amazon ECS</a> in the <i>Amazon Elastic Container Service Developer Guide</i>.</p>
1653
1733
  * @public
1654
1734
  */
1655
1735
  LaunchType?: LaunchType;
1656
1736
  /**
1657
- * <p>Use this structure if the Amazon ECS task uses the <code>awsvpc</code> network mode. This
1658
- * structure specifies the VPC subnets and security groups associated with the task, and whether
1659
- * a public IP address is to be used. This structure is required if <code>LaunchType</code> is
1660
- * <code>FARGATE</code> because the <code>awsvpc</code> mode is required for Fargate
1661
- * tasks.</p>
1662
- * <p>If you specify <code>NetworkConfiguration</code> when the target ECS task does not use the
1663
- * <code>awsvpc</code> network mode, the task fails.</p>
1737
+ * <p>Use this structure if the Amazon ECS task uses the <code>awsvpc</code> network
1738
+ * mode. This structure specifies the VPC subnets and security groups associated with the
1739
+ * task, and whether a public IP address is to be used. This structure is required if
1740
+ * <code>LaunchType</code> is <code>FARGATE</code> because the <code>awsvpc</code> mode is
1741
+ * required for Fargate tasks.</p>
1742
+ * <p>If you specify <code>NetworkConfiguration</code> when the target ECS task does not use
1743
+ * the <code>awsvpc</code> network mode, the task fails.</p>
1664
1744
  * @public
1665
1745
  */
1666
1746
  NetworkConfiguration?: NetworkConfiguration;
@@ -1668,14 +1748,15 @@ export interface PipeTargetEcsTaskParameters {
1668
1748
  * <p>Specifies the platform version for the task. Specify only the numeric portion of the
1669
1749
  * platform version, such as <code>1.1.0</code>.</p>
1670
1750
  * <p>This structure is used only if <code>LaunchType</code> is <code>FARGATE</code>. For more
1671
- * information about valid platform versions, see <a href="https://docs.aws.amazon.com/AmazonECS/latest/developerguide/platform_versions.html">Fargate Platform
1672
- * Versions</a> in the <i>Amazon Elastic Container Service Developer
1673
- * Guide</i>.</p>
1751
+ * information about valid platform versions, see <a href="https://docs.aws.amazon.com/AmazonECS/latest/developerguide/platform_versions.html">Fargate
1752
+ * Platform Versions</a> in the <i>Amazon Elastic Container Service Developer
1753
+ * Guide</i>.</p>
1674
1754
  * @public
1675
1755
  */
1676
1756
  PlatformVersion?: string;
1677
1757
  /**
1678
- * <p>Specifies an Amazon ECS task group for the task. The maximum length is 255 characters.</p>
1758
+ * <p>Specifies an Amazon ECS task group for the task. The maximum length is 255
1759
+ * characters.</p>
1679
1760
  * @public
1680
1761
  */
1681
1762
  Group?: string;
@@ -1683,14 +1764,14 @@ export interface PipeTargetEcsTaskParameters {
1683
1764
  * <p>The capacity provider strategy to use for the task.</p>
1684
1765
  * <p>If a <code>capacityProviderStrategy</code> is specified, the <code>launchType</code>
1685
1766
  * parameter must be omitted. If no <code>capacityProviderStrategy</code> or launchType is
1686
- * specified, the <code>defaultCapacityProviderStrategy</code> for the cluster is used. </p>
1767
+ * specified, the <code>defaultCapacityProviderStrategy</code> for the cluster is used.
1768
+ * </p>
1687
1769
  * @public
1688
1770
  */
1689
1771
  CapacityProviderStrategy?: CapacityProviderStrategyItem[];
1690
1772
  /**
1691
- * <p>Specifies whether to enable Amazon ECS managed tags for the task. For more information,
1692
- * see <a href="https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-using-tags.html">Tagging Your Amazon ECS Resources</a> in the Amazon Elastic Container Service Developer
1693
- * Guide. </p>
1773
+ * <p>Specifies whether to enable Amazon ECS managed tags for the task. For more
1774
+ * information, see <a href="https://docs.aws.amazon.com/AmazonECS/latest/developerguide/ecs-using-tags.html">Tagging Your Amazon ECS Resources</a> in the Amazon Elastic Container Service Developer Guide. </p>
1694
1775
  * @public
1695
1776
  */
1696
1777
  EnableECSManagedTags?: boolean;
@@ -1715,9 +1796,10 @@ export interface PipeTargetEcsTaskParameters {
1715
1796
  */
1716
1797
  PlacementStrategy?: PlacementStrategy[];
1717
1798
  /**
1718
- * <p>Specifies whether to propagate the tags from the task definition to the task. If no value
1719
- * is specified, the tags are not propagated. Tags can only be propagated to the task during task
1720
- * creation. To add tags to a task after task creation, use the <code>TagResource</code> API action. </p>
1799
+ * <p>Specifies whether to propagate the tags from the task definition to the task. If no
1800
+ * value is specified, the tags are not propagated. Tags can only be propagated to the task
1801
+ * during task creation. To add tags to a task after task creation, use the
1802
+ * <code>TagResource</code> API action. </p>
1721
1803
  * @public
1722
1804
  */
1723
1805
  PropagateTags?: PropagateTags;
@@ -1732,8 +1814,9 @@ export interface PipeTargetEcsTaskParameters {
1732
1814
  */
1733
1815
  Overrides?: EcsTaskOverride;
1734
1816
  /**
1735
- * <p>The metadata that you apply to the task to help you categorize and organize them. Each tag
1736
- * consists of a key and an optional value, both of which you define. To learn more, see <a href="https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_RunTask.html#ECS-RunTask-request-tags">RunTask</a> in the Amazon ECS API Reference.</p>
1817
+ * <p>The metadata that you apply to the task to help you categorize and organize them. Each
1818
+ * tag consists of a key and an optional value, both of which you define. To learn more, see
1819
+ * <a href="https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_RunTask.html#ECS-RunTask-request-tags">RunTask</a> in the Amazon ECS API Reference.</p>
1737
1820
  * @public
1738
1821
  */
1739
1822
  Tags?: Tag[];
@@ -1744,12 +1827,15 @@ export interface PipeTargetEcsTaskParameters {
1744
1827
  */
1745
1828
  export interface PipeTargetEventBridgeEventBusParameters {
1746
1829
  /**
1747
- * <p>The URL subdomain of the endpoint. For example, if the URL for Endpoint is https://abcde.veo.endpoints.event.amazonaws.com, then the EndpointId is <code>abcde.veo</code>.</p>
1830
+ * <p>The URL subdomain of the endpoint. For example, if the URL for Endpoint is
1831
+ * https://abcde.veo.endpoints.event.amazonaws.com, then the EndpointId is
1832
+ * <code>abcde.veo</code>.</p>
1748
1833
  * @public
1749
1834
  */
1750
1835
  EndpointId?: string;
1751
1836
  /**
1752
- * <p>A free-form string, with a maximum of 128 characters, used to decide what fields to expect in the event detail.</p>
1837
+ * <p>A free-form string, with a maximum of 128 characters, used to decide what fields to
1838
+ * expect in the event detail.</p>
1753
1839
  * @public
1754
1840
  */
1755
1841
  DetailType?: string;
@@ -1759,8 +1845,8 @@ export interface PipeTargetEventBridgeEventBusParameters {
1759
1845
  */
1760
1846
  Source?: string;
1761
1847
  /**
1762
- * <p>Amazon Web Services resources, identified by Amazon Resource Name (ARN), which the event primarily
1763
- * concerns. Any number, including zero, may be present.</p>
1848
+ * <p>Amazon Web Services resources, identified by Amazon Resource Name (ARN), which the event
1849
+ * primarily concerns. Any number, including zero, may be present.</p>
1764
1850
  * @public
1765
1851
  */
1766
1852
  Resources?: string[];
@@ -1777,20 +1863,19 @@ export interface PipeTargetEventBridgeEventBusParameters {
1777
1863
  */
1778
1864
  export interface PipeTargetHttpParameters {
1779
1865
  /**
1780
- * <p>The path parameter values to be used to populate API Gateway REST API or EventBridge
1781
- * ApiDestination path wildcards ("*").</p>
1866
+ * <p>The path parameter values to be used to populate API Gateway REST API or EventBridge ApiDestination path wildcards ("*").</p>
1782
1867
  * @public
1783
1868
  */
1784
1869
  PathParameterValues?: string[];
1785
1870
  /**
1786
- * <p>The headers that need to be sent as part of request invoking the API Gateway REST API or
1787
- * EventBridge ApiDestination.</p>
1871
+ * <p>The headers that need to be sent as part of request invoking the API Gateway REST
1872
+ * API or EventBridge ApiDestination.</p>
1788
1873
  * @public
1789
1874
  */
1790
1875
  HeaderParameters?: Record<string, string>;
1791
1876
  /**
1792
- * <p>The query string keys/values that need to be sent as part of request invoking the API Gateway
1793
- * REST API or EventBridge ApiDestination.</p>
1877
+ * <p>The query string keys/values that need to be sent as part of request invoking the
1878
+ * API Gateway REST API or EventBridge ApiDestination.</p>
1794
1879
  * @public
1795
1880
  */
1796
1881
  QueryStringParameters?: Record<string, string>;
@@ -1801,10 +1886,12 @@ export interface PipeTargetHttpParameters {
1801
1886
  */
1802
1887
  export interface PipeTargetKinesisStreamParameters {
1803
1888
  /**
1804
- * <p>Determines which shard in the stream the data record is assigned to. Partition keys are Unicode strings with a maximum length limit of 256 characters
1805
- * for each key. Amazon Kinesis Data Streams uses the partition key as input to a hash function that maps the partition key and associated data to a specific shard.
1806
- * Specifically, an MD5 hash function is used to map partition keys to 128-bit integer values and to map associated data records to shards. As a result of this
1807
- * hashing mechanism, all data records with the same partition key map to the same shard within the stream.</p>
1889
+ * <p>Determines which shard in the stream the data record is assigned to. Partition keys are
1890
+ * Unicode strings with a maximum length limit of 256 characters for each key. Amazon Kinesis Data Streams uses the partition key as input to a hash function that maps the
1891
+ * partition key and associated data to a specific shard. Specifically, an MD5 hash function
1892
+ * is used to map partition keys to 128-bit integer values and to map associated data records
1893
+ * to shards. As a result of this hashing mechanism, all data records with the same partition
1894
+ * key map to the same shard within the stream.</p>
1808
1895
  * @public
1809
1896
  */
1810
1897
  PartitionKey: string | undefined;
@@ -1831,14 +1918,23 @@ export interface PipeTargetLambdaFunctionParameters {
1831
1918
  * <ul>
1832
1919
  * <li>
1833
1920
  * <p>
1834
- * <code>REQUEST_RESPONSE</code> (default) - Invoke synchronously. This corresponds to the <code>RequestResponse</code> option in the <code>InvocationType</code> parameter for the Lambda <a href="https://docs.aws.amazon.com/lambda/latest/dg/API_Invoke.html#API_Invoke_RequestSyntax">Invoke</a> API.</p>
1921
+ * <code>REQUEST_RESPONSE</code> (default) - Invoke synchronously. This corresponds
1922
+ * to the <code>RequestResponse</code> option in the <code>InvocationType</code>
1923
+ * parameter for the Lambda
1924
+ * <a href="https://docs.aws.amazon.com/lambda/latest/dg/API_Invoke.html#API_Invoke_RequestSyntax">Invoke</a>
1925
+ * API.</p>
1835
1926
  * </li>
1836
1927
  * <li>
1837
1928
  * <p>
1838
- * <code>FIRE_AND_FORGET</code> - Invoke asynchronously. This corresponds to the <code>Event</code> option in the <code>InvocationType</code> parameter for the Lambda <a href="https://docs.aws.amazon.com/lambda/latest/dg/API_Invoke.html#API_Invoke_RequestSyntax">Invoke</a> API.</p>
1929
+ * <code>FIRE_AND_FORGET</code> - Invoke asynchronously. This corresponds to the
1930
+ * <code>Event</code> option in the <code>InvocationType</code> parameter for the
1931
+ * Lambda
1932
+ * <a href="https://docs.aws.amazon.com/lambda/latest/dg/API_Invoke.html#API_Invoke_RequestSyntax">Invoke</a>
1933
+ * API.</p>
1839
1934
  * </li>
1840
1935
  * </ul>
1841
- * <p>For more information, see <a href="https://docs.aws.amazon.com/eventbridge/latest/userguide/eb-pipes.html#pipes-invocation">Invocation types</a> in the <i>Amazon EventBridge User Guide</i>.</p>
1936
+ * <p>For more information, see <a href="https://docs.aws.amazon.com/eventbridge/latest/userguide/eb-pipes.html#pipes-invocation">Invocation
1937
+ * types</a> in the <i>Amazon EventBridge User Guide</i>.</p>
1842
1938
  * @public
1843
1939
  */
1844
1940
  InvocationType?: PipeTargetInvocationType;
@@ -1856,7 +1952,8 @@ export interface PipeTargetRedshiftDataParameters {
1856
1952
  */
1857
1953
  SecretManagerArn?: string;
1858
1954
  /**
1859
- * <p>The name of the database. Required when authenticating using temporary credentials.</p>
1955
+ * <p>The name of the database. Required when authenticating using temporary
1956
+ * credentials.</p>
1860
1957
  * @public
1861
1958
  */
1862
1959
  Database: string | undefined;
@@ -1890,12 +1987,14 @@ export interface PipeTargetRedshiftDataParameters {
1890
1987
  */
1891
1988
  export interface SageMakerPipelineParameter {
1892
1989
  /**
1893
- * <p>Name of parameter to start execution of a SageMaker Model Building Pipeline.</p>
1990
+ * <p>Name of parameter to start execution of a SageMaker Model Building
1991
+ * Pipeline.</p>
1894
1992
  * @public
1895
1993
  */
1896
1994
  Name: string | undefined;
1897
1995
  /**
1898
- * <p>Value of parameter to start execution of a SageMaker Model Building Pipeline.</p>
1996
+ * <p>Value of parameter to start execution of a SageMaker Model Building
1997
+ * Pipeline.</p>
1899
1998
  * @public
1900
1999
  */
1901
2000
  Value: string | undefined;
@@ -1906,7 +2005,8 @@ export interface SageMakerPipelineParameter {
1906
2005
  */
1907
2006
  export interface PipeTargetSageMakerPipelineParameters {
1908
2007
  /**
1909
- * <p>List of Parameter names and values for SageMaker Model Building Pipeline execution.</p>
2008
+ * <p>List of Parameter names and values for SageMaker Model Building Pipeline
2009
+ * execution.</p>
1910
2010
  * @public
1911
2011
  */
1912
2012
  PipelineParameterList?: SageMakerPipelineParameter[];
@@ -1934,26 +2034,245 @@ export interface PipeTargetSqsQueueParameters {
1934
2034
  */
1935
2035
  export interface PipeTargetStateMachineParameters {
1936
2036
  /**
1937
- * <p>Specify whether to invoke the Step Functions state machine synchronously or asynchronously.</p>
2037
+ * <p>Specify whether to invoke the Step Functions state machine synchronously or
2038
+ * asynchronously.</p>
1938
2039
  * <ul>
1939
2040
  * <li>
1940
2041
  * <p>
1941
- * <code>REQUEST_RESPONSE</code> (default) - Invoke synchronously. For more information, see <a href="https://docs.aws.amazon.com/step-functions/latest/apireference/API_StartSyncExecution.html">StartSyncExecution</a> in the <i>Step Functions API Reference</i>.</p>
2042
+ * <code>REQUEST_RESPONSE</code> (default) - Invoke synchronously. For more
2043
+ * information, see <a href="https://docs.aws.amazon.com/step-functions/latest/apireference/API_StartSyncExecution.html">StartSyncExecution</a> in the <i>Step Functions API
2044
+ * Reference</i>.</p>
1942
2045
  * <note>
1943
2046
  * <p>
1944
- * <code>REQUEST_RESPONSE</code> is not supported for <code>STANDARD</code> state machine workflows.</p>
2047
+ * <code>REQUEST_RESPONSE</code> is not supported for <code>STANDARD</code> state
2048
+ * machine workflows.</p>
1945
2049
  * </note>
1946
2050
  * </li>
1947
2051
  * <li>
1948
2052
  * <p>
1949
- * <code>FIRE_AND_FORGET</code> - Invoke asynchronously. For more information, see <a href="https://docs.aws.amazon.com/step-functions/latest/apireference/API_StartExecution.html">StartExecution</a> in the <i>Step Functions API Reference</i>.</p>
2053
+ * <code>FIRE_AND_FORGET</code> - Invoke asynchronously. For more information, see
2054
+ * <a href="https://docs.aws.amazon.com/step-functions/latest/apireference/API_StartExecution.html">StartExecution</a> in the <i>Step Functions API
2055
+ * Reference</i>.</p>
1950
2056
  * </li>
1951
2057
  * </ul>
1952
- * <p>For more information, see <a href="https://docs.aws.amazon.com/eventbridge/latest/userguide/eb-pipes.html#pipes-invocation">Invocation types</a> in the <i>Amazon EventBridge User Guide</i>.</p>
2058
+ * <p>For more information, see <a href="https://docs.aws.amazon.com/eventbridge/latest/userguide/eb-pipes.html#pipes-invocation">Invocation
2059
+ * types</a> in the <i>Amazon EventBridge User Guide</i>.</p>
1953
2060
  * @public
1954
2061
  */
1955
2062
  InvocationType?: PipeTargetInvocationType;
1956
2063
  }
2064
+ /**
2065
+ * @public
2066
+ * @enum
2067
+ */
2068
+ export declare const DimensionValueType: {
2069
+ readonly VARCHAR: "VARCHAR";
2070
+ };
2071
+ /**
2072
+ * @public
2073
+ */
2074
+ export type DimensionValueType = (typeof DimensionValueType)[keyof typeof DimensionValueType];
2075
+ /**
2076
+ * <p>Maps source data to a dimension in the target Timestream for LiveAnalytics
2077
+ * table.</p>
2078
+ * <p>For more information, see <a href="https://docs.aws.amazon.com/timestream/latest/developerguide/concepts.html">Amazon Timestream for LiveAnalytics concepts</a>
2079
+ * </p>
2080
+ * @public
2081
+ */
2082
+ export interface DimensionMapping {
2083
+ /**
2084
+ * <p>Dynamic path to the dimension value in the source event.</p>
2085
+ * @public
2086
+ */
2087
+ DimensionValue: string | undefined;
2088
+ /**
2089
+ * <p>The data type of the dimension for the time-series data.</p>
2090
+ * @public
2091
+ */
2092
+ DimensionValueType: DimensionValueType | undefined;
2093
+ /**
2094
+ * <p>The metadata attributes of the time series. For example, the name and Availability Zone
2095
+ * of an Amazon EC2 instance or the name of the manufacturer of a wind turbine are
2096
+ * dimensions.</p>
2097
+ * @public
2098
+ */
2099
+ DimensionName: string | undefined;
2100
+ }
2101
+ /**
2102
+ * @public
2103
+ * @enum
2104
+ */
2105
+ export declare const EpochTimeUnit: {
2106
+ readonly MICROSECONDS: "MICROSECONDS";
2107
+ readonly MILLISECONDS: "MILLISECONDS";
2108
+ readonly NANOSECONDS: "NANOSECONDS";
2109
+ readonly SECONDS: "SECONDS";
2110
+ };
2111
+ /**
2112
+ * @public
2113
+ */
2114
+ export type EpochTimeUnit = (typeof EpochTimeUnit)[keyof typeof EpochTimeUnit];
2115
+ /**
2116
+ * @public
2117
+ * @enum
2118
+ */
2119
+ export declare const MeasureValueType: {
2120
+ readonly BIGINT: "BIGINT";
2121
+ readonly BOOLEAN: "BOOLEAN";
2122
+ readonly DOUBLE: "DOUBLE";
2123
+ readonly TIMESTAMP: "TIMESTAMP";
2124
+ readonly VARCHAR: "VARCHAR";
2125
+ };
2126
+ /**
2127
+ * @public
2128
+ */
2129
+ export type MeasureValueType = (typeof MeasureValueType)[keyof typeof MeasureValueType];
2130
+ /**
2131
+ * <p>A mapping of a source event data field to a measure in a Timestream for
2132
+ * LiveAnalytics record.</p>
2133
+ * @public
2134
+ */
2135
+ export interface MultiMeasureAttributeMapping {
2136
+ /**
2137
+ * <p>Dynamic path to the measurement attribute in the source event.</p>
2138
+ * @public
2139
+ */
2140
+ MeasureValue: string | undefined;
2141
+ /**
2142
+ * <p>Data type of the measurement attribute in the source event.</p>
2143
+ * @public
2144
+ */
2145
+ MeasureValueType: MeasureValueType | undefined;
2146
+ /**
2147
+ * <p>Target measure name to be used.</p>
2148
+ * @public
2149
+ */
2150
+ MultiMeasureAttributeName: string | undefined;
2151
+ }
2152
+ /**
2153
+ * <p>Maps multiple measures from the source event to the same Timestream for
2154
+ * LiveAnalytics record.</p>
2155
+ * <p>For more information, see <a href="https://docs.aws.amazon.com/timestream/latest/developerguide/concepts.html">Amazon Timestream for LiveAnalytics concepts</a>
2156
+ * </p>
2157
+ * @public
2158
+ */
2159
+ export interface MultiMeasureMapping {
2160
+ /**
2161
+ * <p>The name of the multiple measurements per record (multi-measure).</p>
2162
+ * @public
2163
+ */
2164
+ MultiMeasureName: string | undefined;
2165
+ /**
2166
+ * <p>Mappings that represent multiple source event fields mapped to measures in the same
2167
+ * Timestream for LiveAnalytics record.</p>
2168
+ * @public
2169
+ */
2170
+ MultiMeasureAttributeMappings: MultiMeasureAttributeMapping[] | undefined;
2171
+ }
2172
+ /**
2173
+ * <p>Maps a single source data field to a single record in the specified Timestream
2174
+ * for LiveAnalytics table.</p>
2175
+ * <p>For more information, see <a href="https://docs.aws.amazon.com/timestream/latest/developerguide/concepts.html">Amazon Timestream for LiveAnalytics concepts</a>
2176
+ * </p>
2177
+ * @public
2178
+ */
2179
+ export interface SingleMeasureMapping {
2180
+ /**
2181
+ * <p>Dynamic path of the source field to map to the measure in the record.</p>
2182
+ * @public
2183
+ */
2184
+ MeasureValue: string | undefined;
2185
+ /**
2186
+ * <p>Data type of the source field.</p>
2187
+ * @public
2188
+ */
2189
+ MeasureValueType: MeasureValueType | undefined;
2190
+ /**
2191
+ * <p>Target measure name for the measurement attribute in the Timestream table.</p>
2192
+ * @public
2193
+ */
2194
+ MeasureName: string | undefined;
2195
+ }
2196
+ /**
2197
+ * @public
2198
+ * @enum
2199
+ */
2200
+ export declare const TimeFieldType: {
2201
+ readonly EPOCH: "EPOCH";
2202
+ readonly TIMESTAMP_FORMAT: "TIMESTAMP_FORMAT";
2203
+ };
2204
+ /**
2205
+ * @public
2206
+ */
2207
+ export type TimeFieldType = (typeof TimeFieldType)[keyof typeof TimeFieldType];
2208
+ /**
2209
+ * <p>The parameters for using a Timestream for LiveAnalytics table as a
2210
+ * target.</p>
2211
+ * @public
2212
+ */
2213
+ export interface PipeTargetTimestreamParameters {
2214
+ /**
2215
+ * <p>Dynamic path to the source data field that represents the time value for your data.</p>
2216
+ * @public
2217
+ */
2218
+ TimeValue: string | undefined;
2219
+ /**
2220
+ * <p>The granularity of the time units used. Default is <code>MILLISECONDS</code>.</p>
2221
+ * <p>Required if <code>TimeFieldType</code> is specified as <code>EPOCH</code>.</p>
2222
+ * @public
2223
+ */
2224
+ EpochTimeUnit?: EpochTimeUnit;
2225
+ /**
2226
+ * <p>The type of time value used.</p>
2227
+ * <p>The default is <code>EPOCH</code>.</p>
2228
+ * @public
2229
+ */
2230
+ TimeFieldType?: TimeFieldType;
2231
+ /**
2232
+ * <p>How to format the timestamps. For example,
2233
+ * <code>YYYY-MM-DDThh:mm:ss.sssTZD</code>.</p>
2234
+ * <p>Required if <code>TimeFieldType</code> is specified as
2235
+ * <code>TIMESTAMP_FORMAT</code>.</p>
2236
+ * @public
2237
+ */
2238
+ TimestampFormat?: string;
2239
+ /**
2240
+ * <p>64 bit version value or source data field that represents the version value for your data.</p>
2241
+ * <p>Write requests with a higher version number will update the existing measure values of the record and version.
2242
+ * In cases where the measure value is the same, the version will still be updated. </p>
2243
+ * <p>Default value is 1. </p>
2244
+ * <p>Timestream for LiveAnalytics does not support updating partial measure values in a record.</p>
2245
+ * <p>Write requests for duplicate data with a
2246
+ * higher version number will update the existing measure value and version. In cases where
2247
+ * the measure value is the same, <code>Version</code> will still be updated. Default value is
2248
+ * <code>1</code>.</p>
2249
+ * <note>
2250
+ * <p>
2251
+ * <code>Version</code> must be <code>1</code> or greater, or you will receive a
2252
+ * <code>ValidationException</code> error.</p>
2253
+ * </note>
2254
+ * @public
2255
+ */
2256
+ VersionValue: string | undefined;
2257
+ /**
2258
+ * <p>Map source data to dimensions in the target Timestream for LiveAnalytics
2259
+ * table.</p>
2260
+ * <p>For more information, see <a href="https://docs.aws.amazon.com/timestream/latest/developerguide/concepts.html">Amazon Timestream for LiveAnalytics concepts</a>
2261
+ * </p>
2262
+ * @public
2263
+ */
2264
+ DimensionMappings: DimensionMapping[] | undefined;
2265
+ /**
2266
+ * <p>Mappings of single source data fields to individual records in the specified Timestream for LiveAnalytics table.</p>
2267
+ * @public
2268
+ */
2269
+ SingleMeasureMappings?: SingleMeasureMapping[];
2270
+ /**
2271
+ * <p>Maps multiple measures from the source event to the same record in the specified Timestream for LiveAnalytics table.</p>
2272
+ * @public
2273
+ */
2274
+ MultiMeasureMappings?: MultiMeasureMapping[];
2275
+ }
1957
2276
  /**
1958
2277
  * <p>The parameters required to set up a target for your pipe.</p>
1959
2278
  * <p>For more information about pipe target parameters, including how to use dynamic path parameters, see <a href="https://docs.aws.amazon.com/eventbridge/latest/userguide/eb-pipes-event-target.html">Target parameters</a> in the <i>Amazon EventBridge User Guide</i>.</p>
@@ -1962,8 +2281,8 @@ export interface PipeTargetStateMachineParameters {
1962
2281
  export interface PipeTargetParameters {
1963
2282
  /**
1964
2283
  * <p>Valid JSON text passed to the target. In this case, nothing from the event itself is
1965
- * passed to the target. For more information, see <a href="http://www.rfc-editor.org/rfc/rfc7159.txt">The JavaScript Object Notation (JSON) Data
1966
- * Interchange Format</a>.</p>
2284
+ * passed to the target. For more information, see <a href="http://www.rfc-editor.org/rfc/rfc7159.txt">The JavaScript Object Notation (JSON)
2285
+ * Data Interchange Format</a>.</p>
1967
2286
  * <p>To remove an input template, specify an empty string.</p>
1968
2287
  * @public
1969
2288
  */
@@ -2025,6 +2344,12 @@ export interface PipeTargetParameters {
2025
2344
  * @public
2026
2345
  */
2027
2346
  CloudWatchLogsParameters?: PipeTargetCloudWatchLogsParameters;
2347
+ /**
2348
+ * <p>The parameters for using a Timestream for LiveAnalytics table as a
2349
+ * target.</p>
2350
+ * @public
2351
+ */
2352
+ TimestreamParameters?: PipeTargetTimestreamParameters;
2028
2353
  }
2029
2354
  /**
2030
2355
  * @public
@@ -2160,7 +2485,8 @@ export declare class InternalException extends __BaseException {
2160
2485
  readonly name: "InternalException";
2161
2486
  readonly $fault: "server";
2162
2487
  /**
2163
- * <p>The number of seconds to wait before retrying the action that caused the exception.</p>
2488
+ * <p>The number of seconds to wait before retrying the action that caused the
2489
+ * exception.</p>
2164
2490
  * @public
2165
2491
  */
2166
2492
  retryAfterSeconds?: number;
@@ -2231,7 +2557,8 @@ export declare class ThrottlingException extends __BaseException {
2231
2557
  */
2232
2558
  quotaCode?: string;
2233
2559
  /**
2234
- * <p>The number of seconds to wait before retrying the action that caused the exception.</p>
2560
+ * <p>The number of seconds to wait before retrying the action that caused the
2561
+ * exception.</p>
2235
2562
  * @public
2236
2563
  */
2237
2564
  retryAfterSeconds?: number;
@@ -2264,7 +2591,8 @@ export declare class ValidationException extends __BaseException {
2264
2591
  readonly name: "ValidationException";
2265
2592
  readonly $fault: "client";
2266
2593
  /**
2267
- * <p>The list of fields for which validation failed and the corresponding failure messages.</p>
2594
+ * <p>The list of fields for which validation failed and the corresponding failure
2595
+ * messages.</p>
2268
2596
  * @public
2269
2597
  */
2270
2598
  fieldList?: ValidationExceptionField[];
@@ -2342,12 +2670,12 @@ export interface DescribePipeRequest {
2342
2670
  Name: string | undefined;
2343
2671
  }
2344
2672
  /**
2345
- * <p>The Amazon Kinesis Data Firehose logging configuration settings for the pipe.</p>
2673
+ * <p>The Amazon Data Firehose logging configuration settings for the pipe.</p>
2346
2674
  * @public
2347
2675
  */
2348
2676
  export interface FirehoseLogDestination {
2349
2677
  /**
2350
- * <p>The Amazon Resource Name (ARN) of the Kinesis Data Firehose delivery stream to which EventBridge delivers the pipe log records.</p>
2678
+ * <p>The Amazon Resource Name (ARN) of the Firehose delivery stream to which EventBridge delivers the pipe log records.</p>
2351
2679
  * @public
2352
2680
  */
2353
2681
  DeliveryStreamArn?: string;
@@ -2358,14 +2686,15 @@ export interface FirehoseLogDestination {
2358
2686
  */
2359
2687
  export interface S3LogDestination {
2360
2688
  /**
2361
- * <p>The name of the Amazon S3 bucket to which EventBridge delivers the log records for the pipe.</p>
2689
+ * <p>The name of the Amazon S3 bucket to which EventBridge delivers the log
2690
+ * records for the pipe.</p>
2362
2691
  * @public
2363
2692
  */
2364
2693
  BucketName?: string;
2365
2694
  /**
2366
2695
  * <p>The prefix text with which to begin Amazon S3 log object names.</p>
2367
- * <p>For more information, see <a href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-prefixes.html">Organizing objects using prefixes</a>
2368
- * in the <i>Amazon Simple Storage Service User Guide</i>.</p>
2696
+ * <p>For more information, see <a href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-prefixes.html">Organizing objects using
2697
+ * prefixes</a> in the <i>Amazon Simple Storage Service User Guide</i>.</p>
2369
2698
  * @public
2370
2699
  */
2371
2700
  Prefix?: string;
@@ -2387,7 +2716,8 @@ export interface S3LogDestination {
2387
2716
  * </li>
2388
2717
  * <li>
2389
2718
  * <p>
2390
- * <code>w3c</code>: <a href="https://www.w3.org/TR/WD-logfile">W3C extended logging file format</a>
2719
+ * <code>w3c</code>: <a href="https://www.w3.org/TR/WD-logfile">W3C extended
2720
+ * logging file format</a>
2391
2721
  * </p>
2392
2722
  * </li>
2393
2723
  * </ul>
@@ -2406,7 +2736,7 @@ export interface PipeLogConfiguration {
2406
2736
  */
2407
2737
  S3LogDestination?: S3LogDestination;
2408
2738
  /**
2409
- * <p>The Amazon Kinesis Data Firehose logging configuration settings for the pipe.</p>
2739
+ * <p>The Amazon Data Firehose logging configuration settings for the pipe.</p>
2410
2740
  * @public
2411
2741
  */
2412
2742
  FirehoseLogDestination?: FirehoseLogDestination;
@@ -2421,9 +2751,12 @@ export interface PipeLogConfiguration {
2421
2751
  */
2422
2752
  Level?: LogLevel;
2423
2753
  /**
2424
- * <p>Whether the execution data (specifically, the <code>payload</code>, <code>awsRequest</code>, and <code>awsResponse</code> fields) is included in the log messages for this pipe.</p>
2754
+ * <p>Whether the execution data (specifically, the <code>payload</code>,
2755
+ * <code>awsRequest</code>, and <code>awsResponse</code> fields) is included in the log
2756
+ * messages for this pipe.</p>
2425
2757
  * <p>This applies to all log destinations for the pipe.</p>
2426
- * <p>For more information, see <a href="https://docs.aws.amazon.com/eventbridge/latest/userguide/eb-pipes-logs.html#eb-pipes-logs-execution-data">Including execution data in logs</a> in the <i>Amazon EventBridge User Guide</i>.</p>
2758
+ * <p>For more information, see <a href="https://docs.aws.amazon.com/eventbridge/latest/userguide/eb-pipes-logs.html#eb-pipes-logs-execution-data">Including execution data in logs</a> in the <i>Amazon EventBridge User
2759
+ * Guide</i>.</p>
2427
2760
  * @public
2428
2761
  */
2429
2762
  IncludeExecutionData?: IncludeExecutionDataOption[];
@@ -2524,8 +2857,9 @@ export interface DescribePipeResponse {
2524
2857
  */
2525
2858
  export interface ListPipesRequest {
2526
2859
  /**
2527
- * <p>A value that will return a subset of the pipes associated with this account. For example, <code>"NamePrefix": "ABC"</code> will return
2528
- * all endpoints with "ABC" in the name.</p>
2860
+ * <p>A value that will return a subset of the pipes associated with this account. For
2861
+ * example, <code>"NamePrefix": "ABC"</code> will return all endpoints with "ABC" in the
2862
+ * name.</p>
2529
2863
  * @public
2530
2864
  */
2531
2865
  NamePrefix?: string;
@@ -2563,7 +2897,8 @@ export interface ListPipesRequest {
2563
2897
  Limit?: number;
2564
2898
  }
2565
2899
  /**
2566
- * <p>An object that represents a pipe. Amazon EventBridgePipes connect event sources to targets and reduces the need for specialized knowledge and integration code.</p>
2900
+ * <p>An object that represents a pipe. Amazon EventBridgePipes connect event sources to
2901
+ * targets and reduces the need for specialized knowledge and integration code.</p>
2567
2902
  * @public
2568
2903
  */
2569
2904
  export interface Pipe {
@@ -2898,6 +3233,10 @@ export interface UpdatePipeSourceRabbitMQBrokerParameters {
2898
3233
  }
2899
3234
  /**
2900
3235
  * <p>The parameters for using a self-managed Apache Kafka stream as a source.</p>
3236
+ * <p>A <i>self managed</i> cluster refers to any Apache Kafka cluster not hosted by Amazon Web Services.
3237
+ * This includes both clusters you manage yourself, as well as those hosted by a third-party
3238
+ * provider, such as <a href="https://www.confluent.io/">Confluent
3239
+ * Cloud</a>, <a href="https://www.cloudkarafka.com/">CloudKarafka</a>, or <a href="https://redpanda.com/">Redpanda</a>. For more information, see <a href="https://docs.aws.amazon.com/eventbridge/latest/userguide/eb-pipes-kafka.html">Apache Kafka streams as a source</a> in the <i>Amazon EventBridge User Guide</i>.</p>
2901
3240
  * @public
2902
3241
  */
2903
3242
  export interface UpdatePipeSourceSelfManagedKafkaParameters {
@@ -2988,6 +3327,10 @@ export interface UpdatePipeSourceParameters {
2988
3327
  ManagedStreamingKafkaParameters?: UpdatePipeSourceManagedStreamingKafkaParameters;
2989
3328
  /**
2990
3329
  * <p>The parameters for using a self-managed Apache Kafka stream as a source.</p>
3330
+ * <p>A <i>self managed</i> cluster refers to any Apache Kafka cluster not hosted by Amazon Web Services.
3331
+ * This includes both clusters you manage yourself, as well as those hosted by a third-party
3332
+ * provider, such as <a href="https://www.confluent.io/">Confluent
3333
+ * Cloud</a>, <a href="https://www.cloudkarafka.com/">CloudKarafka</a>, or <a href="https://redpanda.com/">Redpanda</a>. For more information, see <a href="https://docs.aws.amazon.com/eventbridge/latest/userguide/eb-pipes-kafka.html">Apache Kafka streams as a source</a> in the <i>Amazon EventBridge User Guide</i>.</p>
2991
3334
  * @public
2992
3335
  */
2993
3336
  SelfManagedKafkaParameters?: UpdatePipeSourceSelfManagedKafkaParameters;