@pulumi/aws-native 1.28.0-alpha.1745559155 → 1.28.0-alpha.1745848410

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (113) hide show
  1. package/apigatewayv2/api.d.ts +12 -0
  2. package/apigatewayv2/api.js +2 -0
  3. package/apigatewayv2/api.js.map +1 -1
  4. package/apigatewayv2/getApi.d.ts +6 -0
  5. package/apigatewayv2/getApi.js.map +1 -1
  6. package/apprunner/getService.d.ts +4 -0
  7. package/apprunner/getService.js.map +1 -1
  8. package/apprunner/service.d.ts +2 -2
  9. package/apprunner/service.js +1 -1
  10. package/apprunner/service.js.map +1 -1
  11. package/appsync/channelNamespace.d.ts +2 -0
  12. package/appsync/channelNamespace.js +2 -0
  13. package/appsync/channelNamespace.js.map +1 -1
  14. package/appsync/getChannelNamespace.d.ts +1 -0
  15. package/appsync/getChannelNamespace.js.map +1 -1
  16. package/aps/getWorkspace.d.ts +3 -0
  17. package/aps/getWorkspace.js.map +1 -1
  18. package/aps/workspace.d.ts +6 -0
  19. package/aps/workspace.js.map +1 -1
  20. package/autoscaling/autoScalingGroup.d.ts +1 -0
  21. package/autoscaling/autoScalingGroup.js +2 -0
  22. package/autoscaling/autoScalingGroup.js.map +1 -1
  23. package/autoscaling/getAutoScalingGroup.d.ts +1 -0
  24. package/autoscaling/getAutoScalingGroup.js.map +1 -1
  25. package/ce/anomalyMonitor.d.ts +2 -2
  26. package/ce/anomalySubscription.d.ts +2 -2
  27. package/cloudfront/connectionGroup.d.ts +71 -0
  28. package/cloudfront/connectionGroup.js +82 -0
  29. package/cloudfront/connectionGroup.js.map +1 -0
  30. package/cloudfront/distributionTenant.d.ts +75 -0
  31. package/cloudfront/distributionTenant.js +94 -0
  32. package/cloudfront/distributionTenant.js.map +1 -0
  33. package/cloudfront/getConnectionGroup.d.ts +42 -0
  34. package/cloudfront/getConnectionGroup.js +28 -0
  35. package/cloudfront/getConnectionGroup.js.map +1 -0
  36. package/cloudfront/getDistributionTenant.d.ts +41 -0
  37. package/cloudfront/getDistributionTenant.js +28 -0
  38. package/cloudfront/getDistributionTenant.js.map +1 -0
  39. package/cloudfront/index.d.ts +12 -0
  40. package/cloudfront/index.js +15 -1
  41. package/cloudfront/index.js.map +1 -1
  42. package/codebuild/fleet.d.ts +2 -2
  43. package/codebuild/getFleet.d.ts +1 -1
  44. package/cognito/getManagedLoginBranding.d.ts +1 -1
  45. package/cognito/managedLoginBranding.d.ts +2 -2
  46. package/cognito/userPoolDomain.d.ts +2 -2
  47. package/dynamodb/getTable.d.ts +4 -4
  48. package/dynamodb/getTable.js +2 -2
  49. package/dynamodb/table.d.ts +5 -5
  50. package/dynamodb/table.js +1 -1
  51. package/ecr/getRegistryScanningConfiguration.d.ts +28 -0
  52. package/ecr/getRegistryScanningConfiguration.js +28 -0
  53. package/ecr/getRegistryScanningConfiguration.js.map +1 -0
  54. package/ecr/index.d.ts +6 -0
  55. package/ecr/index.js +8 -1
  56. package/ecr/index.js.map +1 -1
  57. package/ecr/registryScanningConfiguration.d.ts +53 -0
  58. package/ecr/registryScanningConfiguration.js +66 -0
  59. package/ecr/registryScanningConfiguration.js.map +1 -0
  60. package/ecs/service.d.ts +4 -4
  61. package/events/archive.d.ts +22 -0
  62. package/events/archive.js.map +1 -1
  63. package/events/connection.d.ts +14 -0
  64. package/events/connection.js.map +1 -1
  65. package/events/getArchive.d.ts +11 -0
  66. package/events/getArchive.js.map +1 -1
  67. package/events/getConnection.d.ts +7 -0
  68. package/events/getConnection.js.map +1 -1
  69. package/kms/alias.d.ts +1 -1
  70. package/kms/alias.js +1 -1
  71. package/kms/getAlias.d.ts +2 -2
  72. package/kms/getAlias.js +2 -2
  73. package/kms/getKey.d.ts +17 -17
  74. package/kms/getKey.js +4 -4
  75. package/kms/key.d.ts +30 -30
  76. package/kms/key.js +2 -2
  77. package/omics/workflow.d.ts +2 -2
  78. package/package.json +3 -3
  79. package/package.json.dev +2 -2
  80. package/rds/dbInstance.d.ts +10 -2
  81. package/rds/dbInstance.js +1 -1
  82. package/rds/dbInstance.js.map +1 -1
  83. package/rds/getDbInstance.d.ts +3 -1
  84. package/rds/getDbInstance.js.map +1 -1
  85. package/redshift/cluster.d.ts +2 -2
  86. package/redshift/getCluster.d.ts +1 -1
  87. package/redshift/getClusterParameterGroup.d.ts +4 -0
  88. package/redshift/getClusterParameterGroup.js.map +1 -1
  89. package/types/enums/apprunner/index.d.ts +1 -0
  90. package/types/enums/apprunner/index.js +1 -0
  91. package/types/enums/apprunner/index.js.map +1 -1
  92. package/types/enums/appsync/index.d.ts +16 -0
  93. package/types/enums/appsync/index.js +9 -1
  94. package/types/enums/appsync/index.js.map +1 -1
  95. package/types/enums/bedrock/index.d.ts +7 -0
  96. package/types/enums/bedrock/index.js +8 -2
  97. package/types/enums/bedrock/index.js.map +1 -1
  98. package/types/enums/cloudfront/index.d.ts +31 -0
  99. package/types/enums/cloudfront/index.js +26 -1
  100. package/types/enums/cloudfront/index.js.map +1 -1
  101. package/types/enums/ecr/index.d.ts +23 -0
  102. package/types/enums/ecr/index.js +12 -1
  103. package/types/enums/ecr/index.js.map +1 -1
  104. package/types/enums/ecs/index.d.ts +3 -3
  105. package/types/enums/kms/index.d.ts +9 -9
  106. package/types/enums/wisdom/index.d.ts +2 -0
  107. package/types/enums/wisdom/index.js +2 -0
  108. package/types/enums/wisdom/index.js.map +1 -1
  109. package/types/input.d.ts +239 -89
  110. package/types/output.d.ts +243 -89
  111. package/types/output.js.map +1 -1
  112. package/wisdom/aiPrompt.d.ts +38 -2
  113. package/wisdom/aiPrompt.js.map +1 -1
package/types/output.d.ts CHANGED
@@ -2107,6 +2107,10 @@ export declare namespace apigatewayv2 {
2107
2107
  * The endpoint type.
2108
2108
  */
2109
2109
  endpointType?: string;
2110
+ /**
2111
+ * The IP address types that can invoke the domain name. Use `ipv4` to allow only IPv4 addresses to invoke your domain name, or use `dualstack` to allow both IPv4 and IPv6 addresses to invoke your domain name.
2112
+ */
2113
+ ipAddressType?: string;
2110
2114
  /**
2111
2115
  * The Amazon resource name (ARN) for the public certificate issued by ACMlong. This ARN is used to validate custom domain ownership. It's required only if you configure mutual TLS and use either an ACM-imported or a private CA certificate ARN as the regionalCertificateArn.
2112
2116
  */
@@ -5565,6 +5569,24 @@ export declare namespace appsync {
5565
5569
  interface ChannelNamespaceAuthMode {
5566
5570
  authType?: enums.appsync.ChannelNamespaceAuthenticationType;
5567
5571
  }
5572
+ interface ChannelNamespaceHandlerConfig {
5573
+ behavior: enums.appsync.ChannelNamespaceHandlerBehavior;
5574
+ integration: outputs.appsync.ChannelNamespaceIntegration;
5575
+ }
5576
+ interface ChannelNamespaceHandlerConfigs {
5577
+ onPublish?: outputs.appsync.ChannelNamespaceHandlerConfig;
5578
+ onSubscribe?: outputs.appsync.ChannelNamespaceHandlerConfig;
5579
+ }
5580
+ interface ChannelNamespaceIntegration {
5581
+ /**
5582
+ * Data source to invoke for this integration.
5583
+ */
5584
+ dataSourceName: string;
5585
+ lambdaConfig?: outputs.appsync.ChannelNamespaceLambdaConfig;
5586
+ }
5587
+ interface ChannelNamespaceLambdaConfig {
5588
+ invokeType: enums.appsync.ChannelNamespaceInvokeType;
5589
+ }
5568
5590
  interface DataSourceAuthorizationConfig {
5569
5591
  /**
5570
5592
  * The authorization type that the HTTP endpoint requires.
@@ -6022,6 +6044,9 @@ export declare namespace aps {
6022
6044
  * An array of series labels
6023
6045
  */
6024
6046
  labelSet: outputs.aps.WorkspaceLabel[];
6047
+ /**
6048
+ * This structure contains the information about the limits that apply to time series that match this label set.
6049
+ */
6025
6050
  limits: outputs.aps.WorkspaceLimitsPerLabelSetEntry;
6026
6051
  }
6027
6052
  /**
@@ -6453,20 +6478,26 @@ export declare namespace autoscaling {
6453
6478
  */
6454
6479
  min?: number;
6455
6480
  }
6481
+ /**
6482
+ * The baseline performance to consider, using an instance family as a baseline reference. The instance family establishes the lowest acceptable level of performance. Auto Scaling uses this baseline to guide instance type selection, but there is no guarantee that the selected instance types will always exceed the baseline for every application.
6483
+ * Currently, this parameter only supports CPU performance as a baseline performance factor. For example, specifying ``c6i`` uses the CPU performance of the ``c6i`` family as the baseline reference.
6484
+ */
6456
6485
  interface AutoScalingGroupBaselinePerformanceFactorsRequest {
6457
6486
  /**
6458
6487
  * The CPU performance to consider, using an instance family as the baseline reference.
6459
6488
  */
6460
6489
  cpu?: outputs.autoscaling.AutoScalingGroupCpuPerformanceFactorRequest;
6461
6490
  }
6491
+ /**
6492
+ * Describes the Capacity Reservation preference and targeting options. If you specify ``open`` or ``none`` for ``CapacityReservationPreference``, do not specify a ``CapacityReservationTarget``.
6493
+ */
6462
6494
  interface AutoScalingGroupCapacityReservationSpecification {
6463
6495
  /**
6464
6496
  * The capacity reservation preference. The following options are available:
6465
- *
6466
- * - `capacity-reservations-only` - Auto Scaling will only launch instances into a Capacity Reservation or Capacity Reservation resource group. If capacity isn't available, instances will fail to launch.
6467
- * - `capacity-reservations-first` - Auto Scaling will try to launch instances into a Capacity Reservation or Capacity Reservation resource group first. If capacity isn't available, instances will run in On-Demand capacity.
6468
- * - `none` - Auto Scaling will not launch instances into a Capacity Reservation. Instances will run in On-Demand capacity.
6469
- * - `default` - Auto Scaling uses the Capacity Reservation preference from your launch template or an open Capacity Reservation.
6497
+ * + ``capacity-reservations-only`` - Auto Scaling will only launch instances into a Capacity Reservation or Capacity Reservation resource group. If capacity isn't available, instances will fail to launch.
6498
+ * + ``capacity-reservations-first`` - Auto Scaling will try to launch instances into a Capacity Reservation or Capacity Reservation resource group first. If capacity isn't available, instances will run in On-Demand capacity.
6499
+ * + ``none`` - Auto Scaling will not launch instances into a Capacity Reservation. Instances will run in On-Demand capacity.
6500
+ * + ``default`` - Auto Scaling uses the Capacity Reservation preference from your launch template or an open Capacity Reservation.
6470
6501
  */
6471
6502
  capacityReservationPreference: string;
6472
6503
  /**
@@ -6474,6 +6505,9 @@ export declare namespace autoscaling {
6474
6505
  */
6475
6506
  capacityReservationTarget?: outputs.autoscaling.AutoScalingGroupCapacityReservationTarget;
6476
6507
  }
6508
+ /**
6509
+ * The target for the Capacity Reservation. Specify Capacity Reservations IDs or Capacity Reservation resource group ARNs.
6510
+ */
6477
6511
  interface AutoScalingGroupCapacityReservationTarget {
6478
6512
  /**
6479
6513
  * The Capacity Reservation IDs to launch instances into.
@@ -6484,11 +6518,13 @@ export declare namespace autoscaling {
6484
6518
  */
6485
6519
  capacityReservationResourceGroupArns?: string[];
6486
6520
  }
6521
+ /**
6522
+ * The CPU performance to consider, using an instance family as the baseline reference.
6523
+ */
6487
6524
  interface AutoScalingGroupCpuPerformanceFactorRequest {
6488
6525
  /**
6489
6526
  * Specify an instance family to use as the baseline reference for CPU performance. All instance types that match your specified attributes will be compared against the CPU performance of the referenced instance family, regardless of CPU manufacturer or architecture differences.
6490
- *
6491
- * > Currently only one instance family can be specified in the list.
6527
+ * Currently only one instance family can be specified in the list.
6492
6528
  */
6493
6529
  references?: outputs.autoscaling.AutoScalingGroupPerformanceFactorReferenceRequest[];
6494
6530
  }
@@ -6973,7 +7009,27 @@ export declare namespace autoscaling {
6973
7009
  */
6974
7010
  topicArn: string[];
6975
7011
  }
7012
+ /**
7013
+ * Specify an instance family to use as the baseline reference for CPU performance. All instance types that All instance types that match your specified attributes will be compared against the CPU performance of the referenced instance family, regardless of CPU manufacturer or architecture differences.
7014
+ * Currently only one instance family can be specified in the list.
7015
+ */
6976
7016
  interface AutoScalingGroupPerformanceFactorReferenceRequest {
7017
+ /**
7018
+ * The instance family to use as a baseline reference.
7019
+ * Make sure that you specify the correct value for the instance family. The instance family is everything before the period (.) in the instance type name. For example, in the instance ``c6i.large``, the instance family is ``c6i``, not ``c6``. For more information, see [Amazon EC2 instance type naming conventions](https://docs.aws.amazon.com/ec2/latest/instancetypes/instance-type-names.html) in *Amazon EC2 Instance Types*.
7020
+ * The following instance types are *not supported* for performance protection.
7021
+ * + ``c1``
7022
+ * + ``g3| g3s``
7023
+ * + ``hpc7g``
7024
+ * + ``m1| m2``
7025
+ * + ``mac1 | mac2 | mac2-m1ultra | mac2-m2 | mac2-m2pro``
7026
+ * + ``p3dn | p4d | p5``
7027
+ * + ``t1``
7028
+ * + ``u-12tb1 | u-18tb1 | u-24tb1 | u-3tb1 | u-6tb1 | u-9tb1 | u7i-12tb | u7in-16tb | u7in-24tb | u7in-32tb``
7029
+ *
7030
+ * If you performance protection by specifying a supported instance family, the returned instance types will exclude the preceding unsupported instance families.
7031
+ * If you specify an unsupported instance family as a value for baseline performance, the API returns an empty response.
7032
+ */
6977
7033
  instanceFamily?: string;
6978
7034
  }
6979
7035
  /**
@@ -8233,7 +8289,7 @@ export declare namespace batch {
8233
8289
  */
8234
8290
  jobExecutionTimeoutMinutes?: number;
8235
8291
  /**
8236
- * Specifies whether jobs are automatically terminated when the computer environment infrastructure is updated. The default value is `false` .
8292
+ * Specifies whether jobs are automatically terminated when the compute environment infrastructure is updated. The default value is `false` .
8237
8293
  */
8238
8294
  terminateJobsOnUpdate?: boolean;
8239
8295
  }
@@ -9858,6 +9914,9 @@ export declare namespace bedrock {
9858
9914
  */
9859
9915
  types?: enums.bedrock.DataAutomationProjectAudioExtractionCategoryType[];
9860
9916
  }
9917
+ interface DataAutomationProjectAudioOverrideConfiguration {
9918
+ modalityProcessing?: outputs.bedrock.DataAutomationProjectModalityProcessingConfiguration;
9919
+ }
9861
9920
  interface DataAutomationProjectAudioStandardExtraction {
9862
9921
  /**
9863
9922
  * Settings for generating data from audio.
@@ -9942,6 +10001,7 @@ export declare namespace bedrock {
9942
10001
  types?: enums.bedrock.DataAutomationProjectDocumentOutputTextFormatType[];
9943
10002
  }
9944
10003
  interface DataAutomationProjectDocumentOverrideConfiguration {
10004
+ modalityProcessing?: outputs.bedrock.DataAutomationProjectModalityProcessingConfiguration;
9945
10005
  /**
9946
10006
  * Whether document splitter is enabled for a project.
9947
10007
  */
@@ -9993,6 +10053,9 @@ export declare namespace bedrock {
9993
10053
  */
9994
10054
  types?: enums.bedrock.DataAutomationProjectImageExtractionCategoryType[];
9995
10055
  }
10056
+ interface DataAutomationProjectImageOverrideConfiguration {
10057
+ modalityProcessing?: outputs.bedrock.DataAutomationProjectModalityProcessingConfiguration;
10058
+ }
9996
10059
  interface DataAutomationProjectImageStandardExtraction {
9997
10060
  /**
9998
10061
  * Settings for generating bounding boxes.
@@ -10023,14 +10086,30 @@ export declare namespace bedrock {
10023
10086
  */
10024
10087
  generativeField?: outputs.bedrock.DataAutomationProjectImageStandardGenerativeField;
10025
10088
  }
10089
+ interface DataAutomationProjectModalityProcessingConfiguration {
10090
+ state?: enums.bedrock.DataAutomationProjectState;
10091
+ }
10092
+ /**
10093
+ * Modality routing configuration
10094
+ */
10095
+ interface DataAutomationProjectModalityRoutingConfiguration {
10096
+ jpeg?: enums.bedrock.DataAutomationProjectDesiredModality;
10097
+ mov?: enums.bedrock.DataAutomationProjectDesiredModality;
10098
+ mp4?: enums.bedrock.DataAutomationProjectDesiredModality;
10099
+ png?: enums.bedrock.DataAutomationProjectDesiredModality;
10100
+ }
10026
10101
  /**
10027
10102
  * Override configuration
10028
10103
  */
10029
10104
  interface DataAutomationProjectOverrideConfiguration {
10105
+ audio?: outputs.bedrock.DataAutomationProjectAudioOverrideConfiguration;
10030
10106
  /**
10031
10107
  * Additional settings for a project.
10032
10108
  */
10033
10109
  document?: outputs.bedrock.DataAutomationProjectDocumentOverrideConfiguration;
10110
+ image?: outputs.bedrock.DataAutomationProjectImageOverrideConfiguration;
10111
+ modalityRouting?: outputs.bedrock.DataAutomationProjectModalityRoutingConfiguration;
10112
+ video?: outputs.bedrock.DataAutomationProjectVideoOverrideConfiguration;
10034
10113
  }
10035
10114
  interface DataAutomationProjectSplitterConfiguration {
10036
10115
  /**
@@ -10075,6 +10154,9 @@ export declare namespace bedrock {
10075
10154
  */
10076
10155
  types?: enums.bedrock.DataAutomationProjectVideoExtractionCategoryType[];
10077
10156
  }
10157
+ interface DataAutomationProjectVideoOverrideConfiguration {
10158
+ modalityProcessing?: outputs.bedrock.DataAutomationProjectModalityProcessingConfiguration;
10159
+ }
10078
10160
  interface DataAutomationProjectVideoStandardExtraction {
10079
10161
  /**
10080
10162
  * Settings for generating bounding boxes.
@@ -13236,32 +13318,6 @@ export declare namespace cassandra {
13236
13318
  }
13237
13319
  }
13238
13320
  export declare namespace ce {
13239
- /**
13240
- * A key-value pair to associate with a resource.
13241
- */
13242
- interface AnomalyMonitorResourceTag {
13243
- /**
13244
- * The key name for the tag.
13245
- */
13246
- key: string;
13247
- /**
13248
- * The value for the tag.
13249
- */
13250
- value: string;
13251
- }
13252
- /**
13253
- * A key-value pair to associate with a resource.
13254
- */
13255
- interface AnomalySubscriptionResourceTag {
13256
- /**
13257
- * The key name for the tag.
13258
- */
13259
- key: string;
13260
- /**
13261
- * The value for the tag.
13262
- */
13263
- value: string;
13264
- }
13265
13321
  interface AnomalySubscriptionSubscriber {
13266
13322
  /**
13267
13323
  * The email address or SNS Topic Amazon Resource Name (ARN), depending on the `Type` .
@@ -14548,6 +14604,7 @@ export declare namespace cloudfront {
14548
14604
  * A comment to describe the distribution. The comment cannot be longer than 128 characters.
14549
14605
  */
14550
14606
  comment?: string;
14607
+ connectionMode?: enums.cloudfront.DistributionConnectionMode;
14551
14608
  /**
14552
14609
  * The identifier of a continuous deployment policy. For more information, see ``CreateContinuousDeploymentPolicy``.
14553
14610
  */
@@ -14633,6 +14690,7 @@ export declare namespace cloudfront {
14633
14690
  * A Boolean that indicates whether this is a staging distribution. When this value is ``true``, this is a staging distribution. When this value is ``false``, this is not a staging distribution.
14634
14691
  */
14635
14692
  staging?: boolean;
14693
+ tenantConfig?: outputs.cloudfront.DistributionConfigTenantConfigProperties;
14636
14694
  /**
14637
14695
  * A complex type that determines the distribution's SSL/TLS configuration for communicating with viewers.
14638
14696
  */
@@ -14643,6 +14701,9 @@ export declare namespace cloudfront {
14643
14701
  */
14644
14702
  webAclId?: string;
14645
14703
  }
14704
+ interface DistributionConfigTenantConfigProperties {
14705
+ parameterDefinitions?: outputs.cloudfront.DistributionParameterDefinition[];
14706
+ }
14646
14707
  /**
14647
14708
  * This field is deprecated. We recommend that you use a cache policy or an origin request policy instead of this field.
14648
14709
  * If you want to include cookies in the cache key, use a cache policy. For more information, see [Creating cache policies](https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/controlling-the-cache-key.html#cache-key-create-cache-policy) in the *Amazon CloudFront Developer Guide*.
@@ -15182,6 +15243,18 @@ export declare namespace cloudfront {
15182
15243
  */
15183
15244
  originShieldRegion?: string;
15184
15245
  }
15246
+ interface DistributionParameterDefinition {
15247
+ definition: outputs.cloudfront.DistributionParameterDefinitionDefinitionProperties;
15248
+ name: string;
15249
+ }
15250
+ interface DistributionParameterDefinitionDefinitionProperties {
15251
+ stringSchema?: outputs.cloudfront.DistributionParameterDefinitionDefinitionPropertiesStringSchemaProperties;
15252
+ }
15253
+ interface DistributionParameterDefinitionDefinitionPropertiesStringSchemaProperties {
15254
+ comment?: string;
15255
+ defaultValue?: string;
15256
+ required: boolean;
15257
+ }
15185
15258
  /**
15186
15259
  * A complex type that identifies ways in which you want to restrict distribution of your content.
15187
15260
  */
@@ -15220,6 +15293,35 @@ export declare namespace cloudfront {
15220
15293
  */
15221
15294
  quantity: number;
15222
15295
  }
15296
+ interface DistributionTenantCertificate {
15297
+ arn?: string;
15298
+ }
15299
+ interface DistributionTenantCustomizations {
15300
+ certificate?: outputs.cloudfront.DistributionTenantCertificate;
15301
+ geoRestrictions?: outputs.cloudfront.DistributionTenantGeoRestrictionCustomization;
15302
+ webAcl?: outputs.cloudfront.DistributionTenantWebAclCustomization;
15303
+ }
15304
+ interface DistributionTenantDomainResult {
15305
+ domain?: string;
15306
+ status?: enums.cloudfront.DistributionTenantDomainResultStatus;
15307
+ }
15308
+ interface DistributionTenantGeoRestrictionCustomization {
15309
+ locations?: string[];
15310
+ restrictionType?: enums.cloudfront.DistributionTenantGeoRestrictionCustomizationRestrictionType;
15311
+ }
15312
+ interface DistributionTenantManagedCertificateRequest {
15313
+ certificateTransparencyLoggingPreference?: enums.cloudfront.DistributionTenantManagedCertificateRequestCertificateTransparencyLoggingPreference;
15314
+ primaryDomainName?: string;
15315
+ validationTokenHost?: enums.cloudfront.DistributionTenantManagedCertificateRequestValidationTokenHost;
15316
+ }
15317
+ interface DistributionTenantParameter {
15318
+ name?: string;
15319
+ value?: string;
15320
+ }
15321
+ interface DistributionTenantWebAclCustomization {
15322
+ action?: enums.cloudfront.DistributionTenantWebAclCustomizationAction;
15323
+ arn?: string;
15324
+ }
15223
15325
  /**
15224
15326
  * A complex type that determines the distribution's SSL/TLS configuration for communicating with viewers.
15225
15327
  * If the distribution doesn't use ``Aliases`` (also known as alternate domain names or CNAMEs)—that is, if the distribution uses the CloudFront domain name such as ``d111111abcdef8.cloudfront.net``—set ``CloudFrontDefaultCertificate`` to ``true`` and leave all other fields empty.
@@ -16548,6 +16650,10 @@ export declare namespace codepipeline {
16548
16650
  * The action's configuration. These are key-value pairs that specify input values for an action.
16549
16651
  */
16550
16652
  configuration?: any;
16653
+ /**
16654
+ * The list of environment variables that are input to a compute based action.
16655
+ */
16656
+ environmentVariables?: outputs.codepipeline.PipelineEnvironmentVariable[];
16551
16657
  /**
16552
16658
  * The name or ID of the artifact consumed by the action, such as a test or build artifact. While the field is not a required parameter, most actions have an action configuration that requires a specified quantity of input artifacts. To refer to the action configuration specification by action provider, see the [Action structure reference](https://docs.aws.amazon.com/codepipeline/latest/userguide/action-reference.html) in the *AWS CodePipeline User Guide* .
16553
16659
  *
@@ -16688,6 +16794,19 @@ export declare namespace codepipeline {
16688
16794
  */
16689
16795
  type: string;
16690
16796
  }
16797
+ /**
16798
+ * Represents information about the environment variable of an action.
16799
+ */
16800
+ interface PipelineEnvironmentVariable {
16801
+ /**
16802
+ * The name of the environment variable.
16803
+ */
16804
+ name: string;
16805
+ /**
16806
+ * The value of the environment variable.
16807
+ */
16808
+ value: string;
16809
+ }
16691
16810
  /**
16692
16811
  * The configuration that specifies the result, such as rollback, to occur upon stage failure
16693
16812
  */
@@ -23046,9 +23165,9 @@ export declare namespace dynamodb {
23046
23165
  attributeName: string;
23047
23166
  /**
23048
23167
  * The data type for the attribute, where:
23049
- * + ``S`` - the attribute is of type String
23050
- * + ``N`` - the attribute is of type Number
23051
- * + ``B`` - the attribute is of type Binary
23168
+ * + ``S`` - the attribute is of type String
23169
+ * + ``N`` - the attribute is of type Number
23170
+ * + ``B`` - the attribute is of type Binary
23052
23171
  */
23053
23172
  attributeType: string;
23054
23173
  }
@@ -23088,8 +23207,8 @@ export declare namespace dynamodb {
23088
23207
  indexName: string;
23089
23208
  /**
23090
23209
  * The complete key schema for a global secondary index, which consists of one or more pairs of attribute names and key types:
23091
- * + ``HASH`` - partition key
23092
- * + ``RANGE`` - sort key
23210
+ * + ``HASH`` - partition key
23211
+ * + ``RANGE`` - sort key
23093
23212
  *
23094
23213
  * The partition key of an item is also known as its *hash attribute*. The term "hash attribute" derives from DynamoDB's usage of an internal hash function to evenly distribute data items across partitions, based on their partition key values.
23095
23214
  * The sort key of an item is also known as its *range attribute*. The term "range attribute" derives from the way DynamoDB stores items with the same partition key physically close together, in sorted order by the sort key value.
@@ -23155,8 +23274,8 @@ export declare namespace dynamodb {
23155
23274
  attributeName: string;
23156
23275
  /**
23157
23276
  * The role that this key attribute will assume:
23158
- * + ``HASH`` - partition key
23159
- * + ``RANGE`` - sort key
23277
+ * + ``HASH`` - partition key
23278
+ * + ``RANGE`` - sort key
23160
23279
  *
23161
23280
  * The partition key of an item is also known as its *hash attribute*. The term "hash attribute" derives from DynamoDB's usage of an internal hash function to evenly distribute data items across partitions, based on their partition key values.
23162
23281
  * The sort key of an item is also known as its *range attribute*. The term "range attribute" derives from the way DynamoDB stores items with the same partition key physically close together, in sorted order by the sort key value.
@@ -23187,8 +23306,8 @@ export declare namespace dynamodb {
23187
23306
  indexName: string;
23188
23307
  /**
23189
23308
  * The complete key schema for the local secondary index, consisting of one or more pairs of attribute names and key types:
23190
- * + ``HASH`` - partition key
23191
- * + ``RANGE`` - sort key
23309
+ * + ``HASH`` - partition key
23310
+ * + ``RANGE`` - sort key
23192
23311
  *
23193
23312
  * The partition key of an item is also known as its *hash attribute*. The term "hash attribute" derives from DynamoDB's usage of an internal hash function to evenly distribute data items across partitions, based on their partition key values.
23194
23313
  * The sort key of an item is also known as its *range attribute*. The term "range attribute" derives from the way DynamoDB stores items with the same partition key physically close together, in sorted order by the sort key value.
@@ -23238,9 +23357,9 @@ export declare namespace dynamodb {
23238
23357
  nonKeyAttributes?: string[];
23239
23358
  /**
23240
23359
  * The set of attributes that are projected into the index:
23241
- * + ``KEYS_ONLY`` - Only the index and primary keys are projected into the index.
23242
- * + ``INCLUDE`` - In addition to the attributes described in ``KEYS_ONLY``, the secondary index will include other non-key attributes that you specify.
23243
- * + ``ALL`` - All of the table attributes are projected into the index.
23360
+ * + ``KEYS_ONLY`` - Only the index and primary keys are projected into the index.
23361
+ * + ``INCLUDE`` - In addition to the attributes described in ``KEYS_ONLY``, the secondary index will include other non-key attributes that you specify.
23362
+ * + ``ALL`` - All of the table attributes are projected into the index.
23244
23363
  *
23245
23364
  * When using the DynamoDB console, ``ALL`` is selected by default.
23246
23365
  */
@@ -23310,7 +23429,7 @@ export declare namespace dynamodb {
23310
23429
  sseEnabled: boolean;
23311
23430
  /**
23312
23431
  * Server-side encryption type. The only supported value is:
23313
- * + ``KMS`` - Server-side encryption that uses KMSlong. The key is stored in your account and is managed by KMS (KMS charges apply).
23432
+ * + ``KMS`` - Server-side encryption that uses KMSlong. The key is stored in your account and is managed by KMS (KMS charges apply).
23314
23433
  */
23315
23434
  sseType?: string;
23316
23435
  }
@@ -23325,10 +23444,10 @@ export declare namespace dynamodb {
23325
23444
  resourcePolicy?: outputs.dynamodb.TableResourcePolicy;
23326
23445
  /**
23327
23446
  * When an item in the table is modified, ``StreamViewType`` determines what information is written to the stream for this table. Valid values for ``StreamViewType`` are:
23328
- * + ``KEYS_ONLY`` - Only the key attributes of the modified item are written to the stream.
23329
- * + ``NEW_IMAGE`` - The entire item, as it appears after it was modified, is written to the stream.
23330
- * + ``OLD_IMAGE`` - The entire item, as it appeared before it was modified, is written to the stream.
23331
- * + ``NEW_AND_OLD_IMAGES`` - Both the new and the old item images of the item are written to the stream.
23447
+ * + ``KEYS_ONLY`` - Only the key attributes of the modified item are written to the stream.
23448
+ * + ``NEW_IMAGE`` - The entire item, as it appears after it was modified, is written to the stream.
23449
+ * + ``OLD_IMAGE`` - The entire item, as it appeared before it was modified, is written to the stream.
23450
+ * + ``NEW_AND_OLD_IMAGES`` - Both the new and the old item images of the item are written to the stream.
23332
23451
  */
23333
23452
  streamViewType: string;
23334
23453
  }
@@ -23712,6 +23831,7 @@ export declare namespace ec2 {
23712
23831
  *
23713
23832
  * - For instance types with FPGA accelerators, specify `fpga` .
23714
23833
  * - For instance types with GPU accelerators, specify `gpu` .
23834
+ * - For instance types with Inference accelerators, specify `inference` .
23715
23835
  *
23716
23836
  * Default: Any accelerator type
23717
23837
  */
@@ -26744,6 +26864,7 @@ export declare namespace ec2 {
26744
26864
  *
26745
26865
  * - For instance types with FPGA accelerators, specify `fpga` .
26746
26866
  * - For instance types with GPU accelerators, specify `gpu` .
26867
+ * - For instance types with Inference accelerators, specify `inference` .
26747
26868
  *
26748
26869
  * Default: Any accelerator type
26749
26870
  */
@@ -27954,6 +28075,23 @@ export declare namespace ec2 {
27954
28075
  }
27955
28076
  }
27956
28077
  export declare namespace ecr {
28078
+ /**
28079
+ * The details of a scanning repository filter.
28080
+ */
28081
+ interface RegistryScanningConfigurationRepositoryFilter {
28082
+ filter: string;
28083
+ filterType: enums.ecr.RegistryScanningConfigurationFilterType;
28084
+ }
28085
+ /**
28086
+ * A rule representing the details of a scanning configuration.
28087
+ */
28088
+ interface RegistryScanningConfigurationScanningRule {
28089
+ /**
28090
+ * The repository filters associated with the scanning configuration for a private registry.
28091
+ */
28092
+ repositoryFilters: outputs.ecr.RegistryScanningConfigurationRepositoryFilter[];
28093
+ scanFrequency: enums.ecr.RegistryScanningConfigurationScanFrequency;
28094
+ }
27957
28095
  /**
27958
28096
  * The replication configuration for a registry.
27959
28097
  */
@@ -28580,7 +28718,9 @@ export declare namespace ecs {
28580
28718
  /**
28581
28719
  * The configuration options to send to the log driver.
28582
28720
  * The options you can specify depend on the log driver. Some of the options you can specify when you use the ``awslogs`` log driver to route logs to Amazon CloudWatch include the following:
28583
- * + awslogs-create-group Required: No Specify whether you want the log group to be created automatically. If this option isn't specified, it defaults to false. Your IAM policy must include the logs:CreateLogGroup permission before you attempt to use awslogs-create-group. + awslogs-region Required: Yes Specify the Region that the awslogs log driver is to send your Docker logs to. You can choose to send all of your logs from clusters in different Regions to a single region in CloudWatch Logs. This is so that they're all visible in one location. Otherwise, you can separate them by Region for more granularity. Make sure that the specified log group exists in the Region that you specify with this option. + awslogs-group Required: Yes Make sure to specify a log group that the awslogs log driver sends its log streams to. + awslogs-stream-prefix Required: Yes, when using the Fargate launch type.Optional for the EC2 launch type, required for the Fargate launch type. Use the awslogs-stream-prefix option to associate a log stream with the specified prefix, the container name, and the ID of the Amazon ECS task that the container belongs to. If you specify a prefix with this option, then the log stream takes the format prefix-name/container-name/ecs-task-id. If you don't specify a prefix with this option, then the log stream is named after the container ID that's assigned by the Docker daemon on the container instance. Because it's difficult to trace logs back to the container that sent them with just the Docker container ID (which is only available on the container instance), we recommend that you specify a prefix with this option. For Amazon ECS services, you can use the service name as the prefix. Doing so, you can trace log streams to the service that the container belongs to, the name of the container that sent them, and the ID of the task that the container belongs to. You must specify a stream-prefix for your logs to have your logs appear in the Log pane when using the Amazon ECS console. + awslogs-datetime-format Required: No This option defines a multiline start pattern in Python strftime format. A log message consists of a line that matches the pattern and any following lines that don’t match the pattern. The matched line is the delimiter between log messages. One example of a use case for using this format is for parsing output such as a stack dump, which might otherwise be logged in multiple entries. The correct pattern allows it to be captured in a single entry. For more information, see awslogs-datetime-format. You cannot configure both the awslogs-datetime-format and awslogs-multiline-pattern options. Multiline logging performs regular expression parsing and matching of all log messages. This might have a negative impact on logging performance. + awslogs-multiline-pattern Required: No This option defines a multiline start pattern that uses a regular expression. A log message consists of a line that matches the pattern and any following lines that don’t match the pattern. The matched line is the delimiter between log messages. For more information, see awslogs-multiline-pattern. This option is ignored if awslogs-datetime-format is also configured. You cannot configure both the awslogs-datetime-format and awslogs-multiline-pattern options. Multiline logging performs regular expression parsing and matching of all log messages. This might have a negative impact on logging performance. + mode Required: No Valid values: non-blocking | blocking This option defines the delivery mode of log messages from the container to CloudWatch Logs. The delivery mode you choose affects application availability when the flow of logs from container to CloudWatch is interrupted. If you use the blocking mode and the flow of logs to CloudWatch is interrupted, calls from container code to write to the stdout and stderr streams will block. The logging thread of the application will block as a result. This may cause the application to become unresponsive and lead to container healthcheck failure. If you use the non-blocking mode, the container's logs are instead stored in an in-memory intermediate buffer configured with the max-buffer-size option. This prevents the application from becoming unresponsive when logs cannot be sent to CloudWatch. We recommend using this mode if you want to ensure service availability and are okay with some log loss. For more information, see Preventing log loss with non-blocking mode in the awslogs container log driver. + max-buffer-size Required: No Default value: 1m When non-blocking mode is used, the max-buffer-size log option controls the size of the buffer that's used for intermediate message storage. Make sure to specify an adequate buffer size based on your application. When the buffer fills up, further logs cannot be stored. Logs that cannot be stored are lost.
28721
+ * + awslogs-create-group Required: No Specify whether you want the log group to be created automatically. If this option isn't specified, it defaults to false. Your IAM policy must include the logs:CreateLogGroup permission before you attempt to use awslogs-create-group. + awslogs-region Required: Yes Specify the Region that the awslogs log driver is to send your Docker logs to. You can choose to send all of your logs from clusters in different Regions to a single region in CloudWatch Logs. This is so that they're all visible in one location. Otherwise, you can separate them by Region for more granularity. Make sure that the specified log group exists in the Region that you specify with this option. + awslogs-group Required: Yes Make sure to specify a log group that the awslogs log driver sends its log streams to. + awslogs-stream-prefix Required: Yes, when using Fargate.Optional when using EC2. Use the awslogs-stream-prefix option to associate a log stream with the specified prefix, the container name, and the ID of the Amazon ECS task that the container belongs to. If you specify a prefix with this option, then the log stream takes the format prefix-name/container-name/ecs-task-id. If you don't specify a prefix with this option, then the log stream is named after the container ID that's assigned by the Docker daemon on the container instance. Because it's difficult to trace logs back to the container that sent them with just the Docker container ID (which is only available on the container instance), we recommend that you specify a prefix with this option. For Amazon ECS services, you can use the service name as the prefix. Doing so, you can trace log streams to the service that the container belongs to, the name of the container that sent them, and the ID of the task that the container belongs to. You must specify a stream-prefix for your logs to have your logs appear in the Log pane when using the Amazon ECS console. + awslogs-datetime-format Required: No This option defines a multiline start pattern in Python strftime format. A log message consists of a line that matches the pattern and any following lines that don’t match the pattern. The matched line is the delimiter between log messages. One example of a use case for using this format is for parsing output such as a stack dump, which might otherwise be logged in multiple entries. The correct pattern allows it to be captured in a single entry. For more information, see awslogs-datetime-format. You cannot configure both the awslogs-datetime-format and awslogs-multiline-pattern options. Multiline logging performs regular expression parsing and matching of all log messages. This might have a negative impact on logging performance. + awslogs-multiline-pattern Required: No This option defines a multiline start pattern that uses a regular expression. A log message consists of a line that matches the pattern and any following lines that don’t match the pattern. The matched line is the delimiter between log messages. For more information, see awslogs-multiline-pattern. This option is ignored if awslogs-datetime-format is also configured. You cannot configure both the awslogs-datetime-format and awslogs-multiline-pattern options. Multiline logging performs regular expression parsing and matching of all log messages. This might have a negative impact on logging performance.
28722
+ * The following options apply to all supported log drivers.
28723
+ * + mode Required: No Valid values: non-blocking | blocking This option defines the delivery mode of log messages from the container to the log driver specified using logDriver. The delivery mode you choose affects application availability when the flow of logs from container is interrupted. If you use the blocking mode and the flow of logs is interrupted, calls from container code to write to the stdout and stderr streams will block. The logging thread of the application will block as a result. This may cause the application to become unresponsive and lead to container healthcheck failure. If you use the non-blocking mode, the container's logs are instead stored in an in-memory intermediate buffer configured with the max-buffer-size option. This prevents the application from becoming unresponsive when logs cannot be sent. We recommend using this mode if you want to ensure service availability and are okay with some log loss. For more information, see Preventing log loss with non-blocking mode in the awslogs container log driver. You can set a default mode for all containers in a specific Region by using the defaultLogDriverMode account setting. If you don't specify the mode option or configure the account setting, Amazon ECS will default to the blocking mode. For more information about the account setting, see Default log driver mode in the Amazon Elastic Container Service Developer Guide. + max-buffer-size Required: No Default value: 1m When non-blocking mode is used, the max-buffer-size log option controls the size of the buffer that's used for intermediate message storage. Make sure to specify an adequate buffer size based on your application. When the buffer fills up, further logs cannot be stored. Logs that cannot be stored are lost.
28584
28724
  * To route logs using the ``splunk`` log router, you need to specify a ``splunk-token`` and a ``splunk-url``.
28585
28725
  * When you use the ``awsfirelens`` log router to route logs to an AWS Service or AWS Partner Network destination for log storage and analytics, you can set the ``log-driver-buffer-limit`` option to limit the number of events that are buffered in memory, before being sent to the log router container. It can help to resolve potential log loss issue because high throughput might result in memory running out for the buffer inside of Docker.
28586
28726
  * Other options you can specify when using ``awsfirelens`` to route logs depend on the destination. When you export logs to Amazon Data Firehose, you can specify the AWS Region with ``region`` and a name for the log stream with ``delivery_stream``.
@@ -28616,9 +28756,9 @@ export declare namespace ecs {
28616
28756
  /**
28617
28757
  * The number of I/O operations per second (IOPS). For ``gp3``, ``io1``, and ``io2`` volumes, this represents the number of IOPS that are provisioned for the volume. For ``gp2`` volumes, this represents the baseline performance of the volume and the rate at which the volume accumulates I/O credits for bursting.
28618
28758
  * The following are the supported values for each volume type.
28619
- * + ``gp3``: 3,000 - 16,000 IOPS
28620
- * + ``io1``: 100 - 64,000 IOPS
28621
- * + ``io2``: 100 - 256,000 IOPS
28759
+ * + ``gp3``: 3,000 - 16,000 IOPS
28760
+ * + ``io1``: 100 - 64,000 IOPS
28761
+ * + ``io2``: 100 - 256,000 IOPS
28622
28762
  *
28623
28763
  * This parameter is required for ``io1`` and ``io2`` volume types. The default for ``gp3`` volumes is ``3,000 IOPS``. This parameter is not supported for ``st1``, ``sc1``, or ``standard`` volume types.
28624
28764
  * This parameter maps 1:1 with the ``Iops`` parameter of the [CreateVolume API](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateVolume.html) in the *Amazon EC2 API Reference*.
@@ -28626,7 +28766,7 @@ export declare namespace ecs {
28626
28766
  iops?: number;
28627
28767
  /**
28628
28768
  * The Amazon Resource Name (ARN) identifier of the AWS Key Management Service key to use for Amazon EBS encryption. When encryption is turned on and no AWS Key Management Service key is specified, the default AWS managed key for Amazon EBS volumes is used. This parameter maps 1:1 with the ``KmsKeyId`` parameter of the [CreateVolume API](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateVolume.html) in the *Amazon EC2 API Reference*.
28629
- * AWS authenticates the AWS Key Management Service key asynchronously. Therefore, if you specify an ID, alias, or ARN that is invalid, the action can appear to complete, but eventually fails.
28769
+ * AWS authenticates the AWS Key Management Service key asynchronously. Therefore, if you specify an ID, alias, or ARN that is invalid, the action can appear to complete, but eventually fails.
28630
28770
  */
28631
28771
  kmsKeyId?: string;
28632
28772
  /**
@@ -28636,10 +28776,10 @@ export declare namespace ecs {
28636
28776
  /**
28637
28777
  * The size of the volume in GiB. You must specify either a volume size or a snapshot ID. If you specify a snapshot ID, the snapshot size is used for the volume size by default. You can optionally specify a volume size greater than or equal to the snapshot size. This parameter maps 1:1 with the ``Size`` parameter of the [CreateVolume API](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_CreateVolume.html) in the *Amazon EC2 API Reference*.
28638
28778
  * The following are the supported volume size values for each volume type.
28639
- * + ``gp2`` and ``gp3``: 1-16,384
28640
- * + ``io1`` and ``io2``: 4-16,384
28641
- * + ``st1`` and ``sc1``: 125-16,384
28642
- * + ``standard``: 1-1,024
28779
+ * + ``gp2`` and ``gp3``: 1-16,384
28780
+ * + ``io1`` and ``io2``: 4-16,384
28781
+ * + ``st1`` and ``sc1``: 125-16,384
28782
+ * + ``standard``: 1-1,024
28643
28783
  */
28644
28784
  sizeInGiB?: number;
28645
28785
  /**
@@ -28786,7 +28926,7 @@ export declare namespace ecs {
28786
28926
  */
28787
28927
  idleTimeoutSeconds?: number;
28788
28928
  /**
28789
- * The amount of time waiting for the upstream to respond with a complete response per request. A value of ``0`` can be set to disable ``perRequestTimeout``. ``perRequestTimeout`` can only be set if Service Connect ``appProtocol`` isn't ``TCP``. Only ``idleTimeout`` is allowed for ``TCP`` ``appProtocol``.
28929
+ * The amount of time waiting for the upstream to respond with a complete response per request. A value of ``0`` can be set to disable ``perRequestTimeout``. ``perRequestTimeout`` can only be set if Service Connect ``appProtocol`` isn't ``TCP``. Only ``idleTimeout`` is allowed for ``TCP````appProtocol``.
28790
28930
  */
28791
28931
  perRequestTimeoutSeconds?: number;
28792
28932
  }
@@ -28848,9 +28988,9 @@ export declare namespace ecs {
28848
28988
  * You can determine the number of CPU units that are available per EC2 instance type by multiplying the vCPUs listed for that instance type on the [Amazon EC2 Instances](https://docs.aws.amazon.com/ec2/instance-types/) detail page by 1,024.
28849
28989
  * Linux containers share unallocated CPU units with other containers on the container instance with the same ratio as their allocated amount. For example, if you run a single-container task on a single-core instance type with 512 CPU units specified for that container, and that's the only task running on the container instance, that container could use the full 1,024 CPU unit share at any given time. However, if you launched another copy of the same task on that container instance, each task is guaranteed a minimum of 512 CPU units when needed. Moreover, each container could float to higher CPU usage if the other container was not using it. If both tasks were 100% active all of the time, they would be limited to 512 CPU units.
28850
28990
  * On Linux container instances, the Docker daemon on the container instance uses the CPU value to calculate the relative CPU share ratios for running containers. The minimum valid CPU share value that the Linux kernel allows is 2, and the maximum valid CPU share value that the Linux kernel allows is 262144. However, the CPU parameter isn't required, and you can use CPU values below 2 or above 262144 in your container definitions. For CPU values below 2 (including null) or above 262144, the behavior varies based on your Amazon ECS container agent version:
28851
- * + *Agent versions less than or equal to 1.1.0:* Null and zero CPU values are passed to Docker as 0, which Docker then converts to 1,024 CPU shares. CPU values of 1 are passed to Docker as 1, which the Linux kernel converts to two CPU shares.
28852
- * + *Agent versions greater than or equal to 1.2.0:* Null, zero, and CPU values of 1 are passed to Docker as 2.
28853
- * + *Agent versions greater than or equal to 1.84.0:* CPU values greater than 256 vCPU are passed to Docker as 256, which is equivalent to 262144 CPU shares.
28991
+ * + *Agent versions less than or equal to 1.1.0:* Null and zero CPU values are passed to Docker as 0, which Docker then converts to 1,024 CPU shares. CPU values of 1 are passed to Docker as 1, which the Linux kernel converts to two CPU shares.
28992
+ * + *Agent versions greater than or equal to 1.2.0:* Null, zero, and CPU values of 1 are passed to Docker as 2.
28993
+ * + *Agent versions greater than or equal to 1.84.0:* CPU values greater than 256 vCPU are passed to Docker as 256, which is equivalent to 262144 CPU shares.
28854
28994
  *
28855
28995
  * On Windows container instances, the CPU limit is enforced as an absolute limit, or a quota. Windows containers only have access to the specified amount of CPU that's described in the task definition. A null or zero CPU value is passed to Docker as ``0``, which Windows interprets as 1% of one CPU.
28856
28996
  */
@@ -29104,10 +29244,10 @@ export declare namespace ecs {
29104
29244
  interface TaskDefinitionContainerDependency {
29105
29245
  /**
29106
29246
  * The dependency condition of the container. The following are the available conditions and their behavior:
29107
- * + ``START`` - This condition emulates the behavior of links and volumes today. It validates that a dependent container is started before permitting other containers to start.
29108
- * + ``COMPLETE`` - This condition validates that a dependent container runs to completion (exits) before permitting other containers to start. This can be useful for nonessential containers that run a script and then exit. This condition can't be set on an essential container.
29109
- * + ``SUCCESS`` - This condition is the same as ``COMPLETE``, but it also requires that the container exits with a ``zero`` status. This condition can't be set on an essential container.
29110
- * + ``HEALTHY`` - This condition validates that the dependent container passes its Docker health check before permitting other containers to start. This requires that the dependent container has health checks configured. This condition is confirmed only at task startup.
29247
+ * + ``START`` - This condition emulates the behavior of links and volumes today. It validates that a dependent container is started before permitting other containers to start.
29248
+ * + ``COMPLETE`` - This condition validates that a dependent container runs to completion (exits) before permitting other containers to start. This can be useful for nonessential containers that run a script and then exit. This condition can't be set on an essential container.
29249
+ * + ``SUCCESS`` - This condition is the same as ``COMPLETE``, but it also requires that the container exits with a ``zero`` status. This condition can't be set on an essential container.
29250
+ * + ``HEALTHY`` - This condition validates that the dependent container passes its Docker health check before permitting other containers to start. This requires that the dependent container has health checks configured. This condition is confirmed only at task startup.
29111
29251
  */
29112
29252
  condition?: string;
29113
29253
  /**
@@ -29262,9 +29402,9 @@ export declare namespace ecs {
29262
29402
  /**
29263
29403
  * The options to use when configuring the log router. This field is optional and can be used to add additional metadata, such as the task, task definition, cluster, and container instance details to the log event.
29264
29404
  * If specified, valid option keys are:
29265
- * + ``enable-ecs-log-metadata``, which can be ``true`` or ``false``
29266
- * + ``config-file-type``, which can be ``s3`` or ``file``
29267
- * + ``config-file-value``, which is either an S3 ARN or a file path
29405
+ * + ``enable-ecs-log-metadata``, which can be ``true`` or ``false``
29406
+ * + ``config-file-type``, which can be ``s3`` or ``file``
29407
+ * + ``config-file-value``, which is either an S3 ARN or a file path
29268
29408
  */
29269
29409
  options?: {
29270
29410
  [key: string]: string;
@@ -29440,7 +29580,9 @@ export declare namespace ecs {
29440
29580
  /**
29441
29581
  * The configuration options to send to the log driver.
29442
29582
  * The options you can specify depend on the log driver. Some of the options you can specify when you use the ``awslogs`` log driver to route logs to Amazon CloudWatch include the following:
29443
- * + awslogs-create-group Required: No Specify whether you want the log group to be created automatically. If this option isn't specified, it defaults to false. Your IAM policy must include the logs:CreateLogGroup permission before you attempt to use awslogs-create-group. + awslogs-region Required: Yes Specify the Region that the awslogs log driver is to send your Docker logs to. You can choose to send all of your logs from clusters in different Regions to a single region in CloudWatch Logs. This is so that they're all visible in one location. Otherwise, you can separate them by Region for more granularity. Make sure that the specified log group exists in the Region that you specify with this option. + awslogs-group Required: Yes Make sure to specify a log group that the awslogs log driver sends its log streams to. + awslogs-stream-prefix Required: Yes, when using the Fargate launch type.Optional for the EC2 launch type, required for the Fargate launch type. Use the awslogs-stream-prefix option to associate a log stream with the specified prefix, the container name, and the ID of the Amazon ECS task that the container belongs to. If you specify a prefix with this option, then the log stream takes the format prefix-name/container-name/ecs-task-id. If you don't specify a prefix with this option, then the log stream is named after the container ID that's assigned by the Docker daemon on the container instance. Because it's difficult to trace logs back to the container that sent them with just the Docker container ID (which is only available on the container instance), we recommend that you specify a prefix with this option. For Amazon ECS services, you can use the service name as the prefix. Doing so, you can trace log streams to the service that the container belongs to, the name of the container that sent them, and the ID of the task that the container belongs to. You must specify a stream-prefix for your logs to have your logs appear in the Log pane when using the Amazon ECS console. + awslogs-datetime-format Required: No This option defines a multiline start pattern in Python strftime format. A log message consists of a line that matches the pattern and any following lines that don’t match the pattern. The matched line is the delimiter between log messages. One example of a use case for using this format is for parsing output such as a stack dump, which might otherwise be logged in multiple entries. The correct pattern allows it to be captured in a single entry. For more information, see awslogs-datetime-format. You cannot configure both the awslogs-datetime-format and awslogs-multiline-pattern options. Multiline logging performs regular expression parsing and matching of all log messages. This might have a negative impact on logging performance. + awslogs-multiline-pattern Required: No This option defines a multiline start pattern that uses a regular expression. A log message consists of a line that matches the pattern and any following lines that don’t match the pattern. The matched line is the delimiter between log messages. For more information, see awslogs-multiline-pattern. This option is ignored if awslogs-datetime-format is also configured. You cannot configure both the awslogs-datetime-format and awslogs-multiline-pattern options. Multiline logging performs regular expression parsing and matching of all log messages. This might have a negative impact on logging performance. + mode Required: No Valid values: non-blocking | blocking This option defines the delivery mode of log messages from the container to CloudWatch Logs. The delivery mode you choose affects application availability when the flow of logs from container to CloudWatch is interrupted. If you use the blocking mode and the flow of logs to CloudWatch is interrupted, calls from container code to write to the stdout and stderr streams will block. The logging thread of the application will block as a result. This may cause the application to become unresponsive and lead to container healthcheck failure. If you use the non-blocking mode, the container's logs are instead stored in an in-memory intermediate buffer configured with the max-buffer-size option. This prevents the application from becoming unresponsive when logs cannot be sent to CloudWatch. We recommend using this mode if you want to ensure service availability and are okay with some log loss. For more information, see Preventing log loss with non-blocking mode in the awslogs container log driver. + max-buffer-size Required: No Default value: 1m When non-blocking mode is used, the max-buffer-size log option controls the size of the buffer that's used for intermediate message storage. Make sure to specify an adequate buffer size based on your application. When the buffer fills up, further logs cannot be stored. Logs that cannot be stored are lost.
29583
+ * + awslogs-create-group Required: No Specify whether you want the log group to be created automatically. If this option isn't specified, it defaults to false. Your IAM policy must include the logs:CreateLogGroup permission before you attempt to use awslogs-create-group. + awslogs-region Required: Yes Specify the Region that the awslogs log driver is to send your Docker logs to. You can choose to send all of your logs from clusters in different Regions to a single region in CloudWatch Logs. This is so that they're all visible in one location. Otherwise, you can separate them by Region for more granularity. Make sure that the specified log group exists in the Region that you specify with this option. + awslogs-group Required: Yes Make sure to specify a log group that the awslogs log driver sends its log streams to. + awslogs-stream-prefix Required: Yes, when using Fargate.Optional when using EC2. Use the awslogs-stream-prefix option to associate a log stream with the specified prefix, the container name, and the ID of the Amazon ECS task that the container belongs to. If you specify a prefix with this option, then the log stream takes the format prefix-name/container-name/ecs-task-id. If you don't specify a prefix with this option, then the log stream is named after the container ID that's assigned by the Docker daemon on the container instance. Because it's difficult to trace logs back to the container that sent them with just the Docker container ID (which is only available on the container instance), we recommend that you specify a prefix with this option. For Amazon ECS services, you can use the service name as the prefix. Doing so, you can trace log streams to the service that the container belongs to, the name of the container that sent them, and the ID of the task that the container belongs to. You must specify a stream-prefix for your logs to have your logs appear in the Log pane when using the Amazon ECS console. + awslogs-datetime-format Required: No This option defines a multiline start pattern in Python strftime format. A log message consists of a line that matches the pattern and any following lines that don’t match the pattern. The matched line is the delimiter between log messages. One example of a use case for using this format is for parsing output such as a stack dump, which might otherwise be logged in multiple entries. The correct pattern allows it to be captured in a single entry. For more information, see awslogs-datetime-format. You cannot configure both the awslogs-datetime-format and awslogs-multiline-pattern options. Multiline logging performs regular expression parsing and matching of all log messages. This might have a negative impact on logging performance. + awslogs-multiline-pattern Required: No This option defines a multiline start pattern that uses a regular expression. A log message consists of a line that matches the pattern and any following lines that don’t match the pattern. The matched line is the delimiter between log messages. For more information, see awslogs-multiline-pattern. This option is ignored if awslogs-datetime-format is also configured. You cannot configure both the awslogs-datetime-format and awslogs-multiline-pattern options. Multiline logging performs regular expression parsing and matching of all log messages. This might have a negative impact on logging performance.
29584
+ * The following options apply to all supported log drivers.
29585
+ * + mode Required: No Valid values: non-blocking | blocking This option defines the delivery mode of log messages from the container to the log driver specified using logDriver. The delivery mode you choose affects application availability when the flow of logs from container is interrupted. If you use the blocking mode and the flow of logs is interrupted, calls from container code to write to the stdout and stderr streams will block. The logging thread of the application will block as a result. This may cause the application to become unresponsive and lead to container healthcheck failure. If you use the non-blocking mode, the container's logs are instead stored in an in-memory intermediate buffer configured with the max-buffer-size option. This prevents the application from becoming unresponsive when logs cannot be sent. We recommend using this mode if you want to ensure service availability and are okay with some log loss. For more information, see Preventing log loss with non-blocking mode in the awslogs container log driver. You can set a default mode for all containers in a specific Region by using the defaultLogDriverMode account setting. If you don't specify the mode option or configure the account setting, Amazon ECS will default to the blocking mode. For more information about the account setting, see Default log driver mode in the Amazon Elastic Container Service Developer Guide. + max-buffer-size Required: No Default value: 1m When non-blocking mode is used, the max-buffer-size log option controls the size of the buffer that's used for intermediate message storage. Make sure to specify an adequate buffer size based on your application. When the buffer fills up, further logs cannot be stored. Logs that cannot be stored are lost.
29444
29586
  * To route logs using the ``splunk`` log router, you need to specify a ``splunk-token`` and a ``splunk-url``.
29445
29587
  * When you use the ``awsfirelens`` log router to route logs to an AWS Service or AWS Partner Network destination for log storage and analytics, you can set the ``log-driver-buffer-limit`` option to limit the number of events that are buffered in memory, before being sent to the log router container. It can help to resolve potential log loss issue because high throughput might result in memory running out for the buffer inside of Docker.
29446
29588
  * Other options you can specify when using ``awsfirelens`` to route logs depend on the destination. When you export logs to Amazon Data Firehose, you can specify the AWS Region with ``region`` and a name for the log stream with ``delivery_stream``.
@@ -29497,7 +29639,7 @@ export declare namespace ecs {
29497
29639
  /**
29498
29640
  * The application protocol that's used for the port mapping. This parameter only applies to Service Connect. We recommend that you set this parameter to be consistent with the protocol that your application uses. If you set this parameter, Amazon ECS adds protocol-specific connection handling to the Service Connect proxy. If you set this parameter, Amazon ECS adds protocol-specific telemetry in the Amazon ECS console and CloudWatch.
29499
29641
  * If you don't set a value for this parameter, then TCP is used. However, Amazon ECS doesn't add protocol-specific telemetry for TCP.
29500
- * ``appProtocol`` is immutable in a Service Connect service. Updating this field requires a service deletion and redeployment.
29642
+ * ``appProtocol`` is immutable in a Service Connect service. Updating this field requires a service deletion and redeployment.
29501
29643
  * Tasks that run in a namespace can use short names to connect to services in the namespace. Tasks can connect to services across all of the clusters in the namespace. Tasks connect through a managed proxy container that collects logs and metrics for increased visibility. Only the tasks that Amazon ECS services create are supported with Service Connect. For more information, see [Service Connect](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/service-connect.html) in the *Amazon Elastic Container Service Developer Guide*.
29502
29644
  */
29503
29645
  appProtocol?: enums.ecs.TaskDefinitionPortMappingAppProtocol;
@@ -29562,13 +29704,13 @@ export declare namespace ecs {
29562
29704
  containerName: string;
29563
29705
  /**
29564
29706
  * The set of network configuration parameters to provide the Container Network Interface (CNI) plugin, specified as key-value pairs.
29565
- * + ``IgnoredUID`` - (Required) The user ID (UID) of the proxy container as defined by the ``user`` parameter in a container definition. This is used to ensure the proxy ignores its own traffic. If ``IgnoredGID`` is specified, this field can be empty.
29566
- * + ``IgnoredGID`` - (Required) The group ID (GID) of the proxy container as defined by the ``user`` parameter in a container definition. This is used to ensure the proxy ignores its own traffic. If ``IgnoredUID`` is specified, this field can be empty.
29567
- * + ``AppPorts`` - (Required) The list of ports that the application uses. Network traffic to these ports is forwarded to the ``ProxyIngressPort`` and ``ProxyEgressPort``.
29568
- * + ``ProxyIngressPort`` - (Required) Specifies the port that incoming traffic to the ``AppPorts`` is directed to.
29569
- * + ``ProxyEgressPort`` - (Required) Specifies the port that outgoing traffic from the ``AppPorts`` is directed to.
29570
- * + ``EgressIgnoredPorts`` - (Required) The egress traffic going to the specified ports is ignored and not redirected to the ``ProxyEgressPort``. It can be an empty list.
29571
- * + ``EgressIgnoredIPs`` - (Required) The egress traffic going to the specified IP addresses is ignored and not redirected to the ``ProxyEgressPort``. It can be an empty list.
29707
+ * + ``IgnoredUID`` - (Required) The user ID (UID) of the proxy container as defined by the ``user`` parameter in a container definition. This is used to ensure the proxy ignores its own traffic. If ``IgnoredGID`` is specified, this field can be empty.
29708
+ * + ``IgnoredGID`` - (Required) The group ID (GID) of the proxy container as defined by the ``user`` parameter in a container definition. This is used to ensure the proxy ignores its own traffic. If ``IgnoredUID`` is specified, this field can be empty.
29709
+ * + ``AppPorts`` - (Required) The list of ports that the application uses. Network traffic to these ports is forwarded to the ``ProxyIngressPort`` and ``ProxyEgressPort``.
29710
+ * + ``ProxyIngressPort`` - (Required) Specifies the port that incoming traffic to the ``AppPorts`` is directed to.
29711
+ * + ``ProxyEgressPort`` - (Required) Specifies the port that outgoing traffic from the ``AppPorts`` is directed to.
29712
+ * + ``EgressIgnoredPorts`` - (Required) The egress traffic going to the specified ports is ignored and not redirected to the ``ProxyEgressPort``. It can be an empty list.
29713
+ * + ``EgressIgnoredIPs`` - (Required) The egress traffic going to the specified IP addresses is ignored and not redirected to the ``ProxyEgressPort``. It can be an empty list.
29572
29714
  */
29573
29715
  proxyConfigurationProperties?: outputs.ecs.TaskDefinitionKeyValuePair[];
29574
29716
  /**
@@ -50406,11 +50548,11 @@ export declare namespace mediapackagev2 {
50406
50548
  */
50407
50549
  interface OriginEndpointForceEndpointErrorConfiguration {
50408
50550
  /**
50409
- * <p>The failover settings for the endpoint. The options are:</p>
50551
+ * <p>The failover conditions for the endpoint. The options are:</p>
50410
50552
  * <ul>
50411
50553
  * <li>
50412
50554
  * <p>
50413
- * <code>STALE_MANIFEST</code> - The manifest stalled and there a no new segments or parts.</p>
50555
+ * <code>STALE_MANIFEST</code> - The manifest stalled and there are no new segments or parts.</p>
50414
50556
  * </li>
50415
50557
  * <li>
50416
50558
  * <p>
@@ -50420,6 +50562,10 @@ export declare namespace mediapackagev2 {
50420
50562
  * <p>
50421
50563
  * <code>MISSING_DRM_KEY</code> - Key rotation is enabled but we're unable to fetch the key for the current key period.</p>
50422
50564
  * </li>
50565
+ * <li>
50566
+ * <p>
50567
+ * <code>SLATE_INPUT</code> - The segments which contain slate content are considered to be missing content.</p>
50568
+ * </li>
50423
50569
  * </ul>
50424
50570
  */
50425
50571
  endpointErrorConditions?: enums.mediapackagev2.OriginEndpointEndpointErrorCondition[];
@@ -50444,8 +50590,7 @@ export declare namespace mediapackagev2 {
50444
50590
  /**
50445
50591
  * <p>Inserts EXT-X-PROGRAM-DATE-TIME tags in the output manifest at the interval that you specify. If you don't enter an interval,
50446
50592
  * EXT-X-PROGRAM-DATE-TIME tags aren't included in the manifest.
50447
- * The tags sync the stream to the wall clock so that viewers can seek to a specific time in the playback timeline on the player.
50448
- * ID3Timed metadata messages generate every 5 seconds whenever the content is ingested.</p>
50593
+ * The tags sync the stream to the wall clock so that viewers can seek to a specific time in the playback timeline on the player.</p>
50449
50594
  * <p>Irrespective of this parameter, if any ID3Timed metadata is in the HLS input, it is passed through to the HLS output.</p>
50450
50595
  */
50451
50596
  programDateTimeIntervalSeconds?: number;
@@ -50458,6 +50603,11 @@ export declare namespace mediapackagev2 {
50458
50603
  * <p>The egress domain URL for stream delivery from MediaPackage.</p>
50459
50604
  */
50460
50605
  url?: string;
50606
+ /**
50607
+ * <p>When enabled, MediaPackage URL-encodes the query string for API requests for HLS child manifests to comply with Amazon Web Services Signature Version 4 (SigV4) signature signing protocol.
50608
+ * For more information, see <a href="https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_sigv.html">Amazon Web Services Signature Version 4 for API requests</a> in <i>Identity and Access Management User Guide</i>.</p>
50609
+ */
50610
+ urlEncodeChildManifest?: boolean;
50461
50611
  }
50462
50612
  /**
50463
50613
  * <p>Retrieve the low-latency HTTP live streaming (HLS) manifest configuration.</p>
@@ -50479,8 +50629,7 @@ export declare namespace mediapackagev2 {
50479
50629
  /**
50480
50630
  * <p>Inserts EXT-X-PROGRAM-DATE-TIME tags in the output manifest at the interval that you specify. If you don't enter an interval,
50481
50631
  * EXT-X-PROGRAM-DATE-TIME tags aren't included in the manifest.
50482
- * The tags sync the stream to the wall clock so that viewers can seek to a specific time in the playback timeline on the player.
50483
- * ID3Timed metadata messages generate every 5 seconds whenever the content is ingested.</p>
50632
+ * The tags sync the stream to the wall clock so that viewers can seek to a specific time in the playback timeline on the player.</p>
50484
50633
  * <p>Irrespective of this parameter, if any ID3Timed metadata is in the HLS input, it is passed through to the HLS output.</p>
50485
50634
  */
50486
50635
  programDateTimeIntervalSeconds?: number;
@@ -50493,6 +50642,11 @@ export declare namespace mediapackagev2 {
50493
50642
  * <p>The egress domain URL for stream delivery from MediaPackage.</p>
50494
50643
  */
50495
50644
  url?: string;
50645
+ /**
50646
+ * <p>When enabled, MediaPackage URL-encodes the query string for API requests for LL-HLS child manifests to comply with Amazon Web Services Signature Version 4 (SigV4) signature signing protocol.
50647
+ * For more information, see <a href="https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_sigv.html">Amazon Web Services Signature Version 4 for API requests</a> in <i>Identity and Access Management User Guide</i>.</p>
50648
+ */
50649
+ urlEncodeChildManifest?: boolean;
50496
50650
  }
50497
50651
  /**
50498
50652
  * <p>The SCTE configuration.</p>
@@ -55654,7 +55808,7 @@ export declare namespace qbusiness {
55654
55808
  */
55655
55809
  invocationCondition?: outputs.qbusiness.DataSourceDocumentAttributeCondition;
55656
55810
  /**
55657
- * The Amazon Resource Name (ARN) of the Lambda function sduring ingestion. For more information, see [Using Lambda functions for Amazon Q Business document enrichment](https://docs.aws.amazon.com/amazonq/latest/qbusiness-ug/cde-lambda-operations.html) .
55811
+ * The Amazon Resource Name (ARN) of the Lambda function during ingestion. For more information, see [Using Lambda functions for Amazon Q Business document enrichment](https://docs.aws.amazon.com/amazonq/latest/qbusiness-ug/cde-lambda-operations.html) .
55658
55812
  */
55659
55813
  lambdaArn?: string;
55660
55814
  /**
@@ -96794,7 +96948,7 @@ export declare namespace vpclattice {
96794
96948
  }
96795
96949
  interface ListenerFixedResponse {
96796
96950
  /**
96797
- * The HTTP response code.
96951
+ * The HTTP response code. Only `404` and `500` status codes are supported.
96798
96952
  */
96799
96953
  statusCode: number;
96800
96954
  }
@@ -96862,7 +97016,7 @@ export declare namespace vpclattice {
96862
97016
  }
96863
97017
  interface RuleFixedResponse {
96864
97018
  /**
96865
- * The HTTP response code.
97019
+ * The HTTP response code. Only `404` and `500` status codes are supported.
96866
97020
  */
96867
97021
  statusCode: number;
96868
97022
  }