@pulumi/aws-native 1.32.0-alpha.1754026912 → 1.32.0-alpha.1754570277

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (211) hide show
  1. package/appconfig/deployment.d.ts +5 -0
  2. package/appconfig/deployment.js +2 -0
  3. package/appconfig/deployment.js.map +1 -1
  4. package/appconfig/getDeployment.d.ts +5 -0
  5. package/appconfig/getDeployment.js.map +1 -1
  6. package/arcregionswitch/getPlan.d.ts +71 -0
  7. package/arcregionswitch/getPlan.js +28 -0
  8. package/arcregionswitch/getPlan.js.map +1 -0
  9. package/arcregionswitch/index.d.ts +7 -0
  10. package/arcregionswitch/index.js +41 -0
  11. package/arcregionswitch/index.js.map +1 -0
  12. package/arcregionswitch/plan.d.ts +144 -0
  13. package/arcregionswitch/plan.js +98 -0
  14. package/arcregionswitch/plan.js.map +1 -0
  15. package/batch/getJobQueue.d.ts +4 -0
  16. package/batch/getJobQueue.js.map +1 -1
  17. package/batch/getServiceEnvironment.d.ts +15 -0
  18. package/batch/getServiceEnvironment.js.map +1 -1
  19. package/batch/jobQueue.d.ts +18 -2
  20. package/batch/jobQueue.js +5 -4
  21. package/batch/jobQueue.js.map +1 -1
  22. package/batch/serviceEnvironment.d.ts +27 -0
  23. package/batch/serviceEnvironment.js.map +1 -1
  24. package/cloudwatch/alarm.d.ts +4 -4
  25. package/cloudwatch/getAlarm.d.ts +2 -2
  26. package/ec2/natGateway.d.ts +3 -3
  27. package/ec2/natGateway.js +0 -3
  28. package/ec2/natGateway.js.map +1 -1
  29. package/ecr/getRepository.d.ts +4 -0
  30. package/ecr/getRepository.js.map +1 -1
  31. package/ecr/getRepositoryCreationTemplate.d.ts +4 -0
  32. package/ecr/getRepositoryCreationTemplate.js.map +1 -1
  33. package/ecr/repository.d.ts +8 -0
  34. package/ecr/repository.js +2 -0
  35. package/ecr/repository.js.map +1 -1
  36. package/ecr/repositoryCreationTemplate.d.ts +8 -0
  37. package/ecr/repositoryCreationTemplate.js +2 -0
  38. package/ecr/repositoryCreationTemplate.js.map +1 -1
  39. package/ecs/getService.d.ts +2 -2
  40. package/ecs/service.d.ts +4 -4
  41. package/eks/cluster.d.ts +8 -0
  42. package/eks/cluster.js +2 -0
  43. package/eks/cluster.js.map +1 -1
  44. package/eks/getCluster.d.ts +4 -0
  45. package/eks/getCluster.js.map +1 -1
  46. package/elasticloadbalancingv2/getListener.d.ts +3 -2
  47. package/elasticloadbalancingv2/getListener.js.map +1 -1
  48. package/elasticloadbalancingv2/getLoadBalancer.d.ts +1 -1
  49. package/elasticloadbalancingv2/listener.d.ts +6 -4
  50. package/elasticloadbalancingv2/listener.js.map +1 -1
  51. package/elasticloadbalancingv2/loadBalancer.d.ts +2 -2
  52. package/entityresolution/getIdMappingWorkflow.d.ts +1 -1
  53. package/entityresolution/getMatchingWorkflow.d.ts +2 -2
  54. package/entityresolution/idMappingWorkflow.d.ts +2 -2
  55. package/entityresolution/matchingWorkflow.d.ts +4 -4
  56. package/evs/environment.d.ts +2 -2
  57. package/index.d.ts +2 -1
  58. package/index.js +8 -6
  59. package/index.js.map +1 -1
  60. package/iot/encryptionConfiguration.d.ts +44 -0
  61. package/iot/encryptionConfiguration.js +69 -0
  62. package/iot/encryptionConfiguration.js.map +1 -0
  63. package/iot/getEncryptionConfiguration.d.ts +25 -0
  64. package/iot/getEncryptionConfiguration.js +28 -0
  65. package/iot/getEncryptionConfiguration.js.map +1 -0
  66. package/iot/index.d.ts +6 -0
  67. package/iot/index.js +9 -2
  68. package/iot/index.js.map +1 -1
  69. package/iotsitewise/assetModel.d.ts +10 -2
  70. package/iotsitewise/assetModel.js +2 -0
  71. package/iotsitewise/assetModel.js.map +1 -1
  72. package/iotsitewise/getAssetModel.d.ts +4 -0
  73. package/iotsitewise/getAssetModel.js.map +1 -1
  74. package/logs/destination.d.ts +10 -0
  75. package/logs/destination.js +2 -0
  76. package/logs/destination.js.map +1 -1
  77. package/logs/getDestination.d.ts +5 -0
  78. package/logs/getDestination.js.map +1 -1
  79. package/logs/getLogGroup.d.ts +4 -0
  80. package/logs/getLogGroup.js.map +1 -1
  81. package/logs/logGroup.d.ts +8 -0
  82. package/logs/logGroup.js +2 -0
  83. package/logs/logGroup.js.map +1 -1
  84. package/mediapackagev2/getOriginEndpointPolicy.d.ts +3 -0
  85. package/mediapackagev2/getOriginEndpointPolicy.js.map +1 -1
  86. package/mediapackagev2/originEndpointPolicy.d.ts +6 -0
  87. package/mediapackagev2/originEndpointPolicy.js.map +1 -1
  88. package/networkfirewall/firewall.d.ts +30 -6
  89. package/networkfirewall/firewall.js +6 -6
  90. package/networkfirewall/firewall.js.map +1 -1
  91. package/networkfirewall/firewallPolicy.d.ts +2 -2
  92. package/networkfirewall/getFirewall.d.ts +16 -4
  93. package/networkfirewall/getFirewall.js.map +1 -1
  94. package/networkfirewall/getFirewallPolicy.d.ts +4 -4
  95. package/networkfirewall/getLoggingConfiguration.d.ts +3 -3
  96. package/networkfirewall/getRuleGroup.d.ts +11 -4
  97. package/networkfirewall/getRuleGroup.js.map +1 -1
  98. package/networkfirewall/getVpcEndpointAssociation.d.ts +3 -0
  99. package/networkfirewall/getVpcEndpointAssociation.js.map +1 -1
  100. package/networkfirewall/loggingConfiguration.d.ts +4 -4
  101. package/networkfirewall/ruleGroup.d.ts +16 -2
  102. package/networkfirewall/ruleGroup.js +2 -0
  103. package/networkfirewall/ruleGroup.js.map +1 -1
  104. package/networkfirewall/vpcEndpointAssociation.d.ts +3 -0
  105. package/networkfirewall/vpcEndpointAssociation.js.map +1 -1
  106. package/package.json +3 -3
  107. package/package.json.dev +2 -2
  108. package/qbusiness/getWebExperience.d.ts +1 -1
  109. package/qbusiness/webExperience.d.ts +2 -2
  110. package/rds/dbCluster.d.ts +8 -0
  111. package/rds/dbCluster.js +3 -1
  112. package/rds/dbCluster.js.map +1 -1
  113. package/rds/dbInstance.d.ts +24 -0
  114. package/rds/dbInstance.js +10 -0
  115. package/rds/dbInstance.js.map +1 -1
  116. package/rds/getDbInstance.d.ts +20 -0
  117. package/rds/getDbInstance.js.map +1 -1
  118. package/s3/accessPoint.d.ts +8 -0
  119. package/s3/accessPoint.js +2 -0
  120. package/s3/accessPoint.js.map +1 -1
  121. package/s3/bucket.d.ts +2 -2
  122. package/s3/getAccessPoint.d.ts +4 -0
  123. package/s3/getAccessPoint.js.map +1 -1
  124. package/s3/getBucket.d.ts +1 -1
  125. package/s3express/accessPoint.d.ts +2 -0
  126. package/s3express/accessPoint.js +2 -0
  127. package/s3express/accessPoint.js.map +1 -1
  128. package/s3express/getAccessPoint.d.ts +1 -0
  129. package/s3express/getAccessPoint.js.map +1 -1
  130. package/sagemaker/cluster.d.ts +5 -3
  131. package/sagemaker/cluster.js +3 -4
  132. package/sagemaker/cluster.js.map +1 -1
  133. package/sagemaker/getCluster.d.ts +1 -0
  134. package/sagemaker/getCluster.js.map +1 -1
  135. package/sagemaker/getProcessingJob.d.ts +68 -0
  136. package/sagemaker/getProcessingJob.js +28 -0
  137. package/sagemaker/getProcessingJob.js.map +1 -0
  138. package/sagemaker/index.d.ts +6 -0
  139. package/sagemaker/index.js +8 -1
  140. package/sagemaker/index.js.map +1 -1
  141. package/sagemaker/processingJob.d.ts +168 -0
  142. package/sagemaker/processingJob.js +109 -0
  143. package/sagemaker/processingJob.js.map +1 -0
  144. package/ssm/getParameter.d.ts +7 -4
  145. package/ssm/getParameter.js +2 -2
  146. package/ssm/getParameter.js.map +1 -1
  147. package/ssm/parameter.d.ts +9 -5
  148. package/ssm/parameter.js +1 -1
  149. package/ssm/parameter.js.map +1 -1
  150. package/types/enums/appconfig/index.d.ts +13 -0
  151. package/types/enums/appconfig/index.js +10 -1
  152. package/types/enums/appconfig/index.js.map +1 -1
  153. package/types/enums/arcregionswitch/index.d.ts +47 -0
  154. package/types/enums/arcregionswitch/index.js +45 -0
  155. package/types/enums/arcregionswitch/index.js.map +1 -0
  156. package/types/enums/batch/index.d.ts +1 -0
  157. package/types/enums/batch/index.js +1 -0
  158. package/types/enums/batch/index.js.map +1 -1
  159. package/types/enums/ecr/index.d.ts +15 -0
  160. package/types/enums/ecr/index.js +11 -1
  161. package/types/enums/ecr/index.js.map +1 -1
  162. package/types/enums/ecs/index.d.ts +36 -5
  163. package/types/enums/ecs/index.js.map +1 -1
  164. package/types/enums/entityresolution/index.d.ts +8 -8
  165. package/types/enums/glue/index.d.ts +6 -0
  166. package/types/enums/glue/index.js +6 -0
  167. package/types/enums/glue/index.js.map +1 -1
  168. package/types/enums/index.d.ts +2 -1
  169. package/types/enums/index.js +6 -4
  170. package/types/enums/index.js.map +1 -1
  171. package/types/enums/iot/index.d.ts +10 -0
  172. package/types/enums/iot/index.js +9 -1
  173. package/types/enums/iot/index.js.map +1 -1
  174. package/types/enums/networkfirewall/index.d.ts +9 -3
  175. package/types/enums/networkfirewall/index.js +6 -1
  176. package/types/enums/networkfirewall/index.js.map +1 -1
  177. package/types/enums/pcs/index.d.ts +8 -0
  178. package/types/enums/pcs/index.js +5 -1
  179. package/types/enums/pcs/index.js.map +1 -1
  180. package/types/enums/rds/index.d.ts +1 -0
  181. package/types/enums/rds/index.js +1 -0
  182. package/types/enums/rds/index.js.map +1 -1
  183. package/types/enums/s3/index.d.ts +5 -4
  184. package/types/enums/s3/index.js.map +1 -1
  185. package/types/enums/sagemaker/index.d.ts +226 -0
  186. package/types/enums/sagemaker/index.js +176 -2
  187. package/types/enums/sagemaker/index.js.map +1 -1
  188. package/types/enums/ssm/index.d.ts +1 -0
  189. package/types/enums/ssm/index.js.map +1 -1
  190. package/types/enums/workspacesweb/index.d.ts +30 -0
  191. package/types/enums/workspacesweb/index.js +28 -1
  192. package/types/enums/workspacesweb/index.js.map +1 -1
  193. package/types/input.d.ts +933 -144
  194. package/types/output.d.ts +946 -144
  195. package/types/output.js.map +1 -1
  196. package/verifiedpermissions/getPolicyStore.d.ts +1 -1
  197. package/verifiedpermissions/policyStore.d.ts +2 -2
  198. package/workspacesweb/getPortal.d.ts +1 -0
  199. package/workspacesweb/getPortal.js.map +1 -1
  200. package/workspacesweb/getSessionLogger.d.ts +25 -0
  201. package/workspacesweb/getSessionLogger.js +28 -0
  202. package/workspacesweb/getSessionLogger.js.map +1 -0
  203. package/workspacesweb/index.d.ts +6 -0
  204. package/workspacesweb/index.js +8 -1
  205. package/workspacesweb/index.js.map +1 -1
  206. package/workspacesweb/portal.d.ts +2 -0
  207. package/workspacesweb/portal.js +2 -0
  208. package/workspacesweb/portal.js.map +1 -1
  209. package/workspacesweb/sessionLogger.d.ts +54 -0
  210. package/workspacesweb/sessionLogger.js +80 -0
  211. package/workspacesweb/sessionLogger.js.map +1 -0
package/types/output.d.ts CHANGED
@@ -6274,6 +6274,241 @@ export declare namespace aps {
6274
6274
  destinations: outputs.aps.WorkspaceLoggingDestination[];
6275
6275
  }
6276
6276
  }
6277
+ export declare namespace arcregionswitch {
6278
+ interface PlanArcRoutingControlConfiguration {
6279
+ crossAccountRole?: string;
6280
+ externalId?: string;
6281
+ regionAndRoutingControls: {
6282
+ [key: string]: any;
6283
+ };
6284
+ timeoutMinutes?: number;
6285
+ }
6286
+ interface PlanAsg {
6287
+ arn?: string;
6288
+ crossAccountRole?: string;
6289
+ externalId?: string;
6290
+ }
6291
+ interface PlanAssociatedAlarm {
6292
+ /**
6293
+ * The alarm type for an associated alarm. An associated CloudWatch alarm can be an application health alarm or a trigger alarm.
6294
+ */
6295
+ alarmType: enums.arcregionswitch.PlanAlarmType;
6296
+ /**
6297
+ * The cross account role for the configuration.
6298
+ */
6299
+ crossAccountRole?: string;
6300
+ /**
6301
+ * The external ID (secret key) for the configuration.
6302
+ */
6303
+ externalId?: string;
6304
+ /**
6305
+ * The resource identifier for alarms that you associate with a plan.
6306
+ */
6307
+ resourceIdentifier: string;
6308
+ }
6309
+ interface PlanCustomActionLambdaConfiguration {
6310
+ lambdas: outputs.arcregionswitch.PlanLambdas[];
6311
+ regionToRun: enums.arcregionswitch.PlanRegionToRunIn;
6312
+ retryIntervalMinutes: number;
6313
+ timeoutMinutes?: number;
6314
+ ungraceful?: outputs.arcregionswitch.PlanLambdaUngraceful;
6315
+ }
6316
+ interface PlanEc2AsgCapacityIncreaseConfiguration {
6317
+ asgs: outputs.arcregionswitch.PlanAsg[];
6318
+ capacityMonitoringApproach?: any;
6319
+ targetPercent?: number;
6320
+ timeoutMinutes?: number;
6321
+ ungraceful?: outputs.arcregionswitch.PlanEc2Ungraceful;
6322
+ }
6323
+ interface PlanEc2Ungraceful {
6324
+ minimumSuccessPercentage: number;
6325
+ }
6326
+ interface PlanEcsCapacityIncreaseConfiguration {
6327
+ capacityMonitoringApproach?: any;
6328
+ services: outputs.arcregionswitch.PlanService[];
6329
+ targetPercent?: number;
6330
+ timeoutMinutes?: number;
6331
+ ungraceful?: outputs.arcregionswitch.PlanEcsUngraceful;
6332
+ }
6333
+ interface PlanEcsUngraceful {
6334
+ minimumSuccessPercentage: number;
6335
+ }
6336
+ interface PlanEksCluster {
6337
+ clusterArn: string;
6338
+ crossAccountRole?: string;
6339
+ externalId?: string;
6340
+ }
6341
+ interface PlanEksResourceScalingConfiguration {
6342
+ capacityMonitoringApproach?: any;
6343
+ eksClusters?: outputs.arcregionswitch.PlanEksCluster[];
6344
+ kubernetesResourceType: outputs.arcregionswitch.PlanKubernetesResourceType;
6345
+ scalingResources?: {
6346
+ [key: string]: any;
6347
+ }[];
6348
+ targetPercent?: number;
6349
+ timeoutMinutes?: number;
6350
+ ungraceful?: outputs.arcregionswitch.PlanEksResourceScalingUngraceful;
6351
+ }
6352
+ interface PlanEksResourceScalingUngraceful {
6353
+ minimumSuccessPercentage: number;
6354
+ }
6355
+ interface PlanExecutionApprovalConfiguration {
6356
+ approvalRole: string;
6357
+ timeoutMinutes?: number;
6358
+ }
6359
+ interface PlanExecutionBlockConfiguration0Properties {
6360
+ customActionLambdaConfig: outputs.arcregionswitch.PlanCustomActionLambdaConfiguration;
6361
+ }
6362
+ interface PlanExecutionBlockConfiguration1Properties {
6363
+ ec2AsgCapacityIncreaseConfig: outputs.arcregionswitch.PlanEc2AsgCapacityIncreaseConfiguration;
6364
+ }
6365
+ interface PlanExecutionBlockConfiguration2Properties {
6366
+ executionApprovalConfig: outputs.arcregionswitch.PlanExecutionApprovalConfiguration;
6367
+ }
6368
+ interface PlanExecutionBlockConfiguration3Properties {
6369
+ arcRoutingControlConfig: outputs.arcregionswitch.PlanArcRoutingControlConfiguration;
6370
+ }
6371
+ interface PlanExecutionBlockConfiguration4Properties {
6372
+ globalAuroraConfig: outputs.arcregionswitch.PlanGlobalAuroraConfiguration;
6373
+ }
6374
+ interface PlanExecutionBlockConfiguration5Properties {
6375
+ parallelConfig: outputs.arcregionswitch.PlanParallelExecutionBlockConfiguration;
6376
+ }
6377
+ interface PlanExecutionBlockConfiguration6Properties {
6378
+ regionSwitchPlanConfig: outputs.arcregionswitch.PlanRegionSwitchPlanConfiguration;
6379
+ }
6380
+ interface PlanExecutionBlockConfiguration7Properties {
6381
+ ecsCapacityIncreaseConfig: outputs.arcregionswitch.PlanEcsCapacityIncreaseConfiguration;
6382
+ }
6383
+ interface PlanExecutionBlockConfiguration8Properties {
6384
+ eksResourceScalingConfig: outputs.arcregionswitch.PlanEksResourceScalingConfiguration;
6385
+ }
6386
+ interface PlanExecutionBlockConfiguration9Properties {
6387
+ route53HealthCheckConfig: outputs.arcregionswitch.PlanRoute53HealthCheckConfiguration;
6388
+ }
6389
+ interface PlanGlobalAuroraConfiguration {
6390
+ behavior: any;
6391
+ crossAccountRole?: string;
6392
+ databaseClusterArns: string[];
6393
+ externalId?: string;
6394
+ globalClusterIdentifier: string;
6395
+ timeoutMinutes?: number;
6396
+ ungraceful?: outputs.arcregionswitch.PlanGlobalAuroraUngraceful;
6397
+ }
6398
+ interface PlanGlobalAuroraUngraceful {
6399
+ ungraceful?: enums.arcregionswitch.PlanGlobalAuroraUngracefulBehavior;
6400
+ }
6401
+ interface PlanHealthCheckState {
6402
+ healthCheckId?: string;
6403
+ region?: string;
6404
+ }
6405
+ interface PlanKubernetesResourceType {
6406
+ apiVersion: string;
6407
+ kind: string;
6408
+ }
6409
+ interface PlanLambdaUngraceful {
6410
+ behavior?: any;
6411
+ }
6412
+ interface PlanLambdas {
6413
+ arn?: string;
6414
+ crossAccountRole?: string;
6415
+ externalId?: string;
6416
+ }
6417
+ interface PlanParallelExecutionBlockConfiguration {
6418
+ steps: outputs.arcregionswitch.PlanStep[];
6419
+ }
6420
+ interface PlanRegionSwitchPlanConfiguration {
6421
+ arn: string;
6422
+ crossAccountRole?: string;
6423
+ externalId?: string;
6424
+ }
6425
+ interface PlanRoute53HealthCheckConfiguration {
6426
+ crossAccountRole?: string;
6427
+ externalId?: string;
6428
+ hostedZoneId: string;
6429
+ recordName: string;
6430
+ recordSets?: outputs.arcregionswitch.PlanRoute53ResourceRecordSet[];
6431
+ timeoutMinutes?: number;
6432
+ }
6433
+ interface PlanRoute53ResourceRecordSet {
6434
+ recordSetIdentifier?: string;
6435
+ region?: string;
6436
+ }
6437
+ interface PlanService {
6438
+ clusterArn?: string;
6439
+ crossAccountRole?: string;
6440
+ externalId?: string;
6441
+ serviceArn?: string;
6442
+ }
6443
+ interface PlanStep {
6444
+ /**
6445
+ * The description of a step in a workflow.
6446
+ */
6447
+ description?: string;
6448
+ /**
6449
+ * The configuration for an execution block in a workflow.
6450
+ */
6451
+ executionBlockConfiguration: outputs.arcregionswitch.PlanExecutionBlockConfiguration0Properties | outputs.arcregionswitch.PlanExecutionBlockConfiguration1Properties | outputs.arcregionswitch.PlanExecutionBlockConfiguration2Properties | outputs.arcregionswitch.PlanExecutionBlockConfiguration3Properties | outputs.arcregionswitch.PlanExecutionBlockConfiguration4Properties | outputs.arcregionswitch.PlanExecutionBlockConfiguration5Properties | outputs.arcregionswitch.PlanExecutionBlockConfiguration6Properties | outputs.arcregionswitch.PlanExecutionBlockConfiguration7Properties | outputs.arcregionswitch.PlanExecutionBlockConfiguration8Properties | outputs.arcregionswitch.PlanExecutionBlockConfiguration9Properties;
6452
+ /**
6453
+ * The type of an execution block in a workflow.
6454
+ */
6455
+ executionBlockType: enums.arcregionswitch.PlanExecutionBlockType;
6456
+ /**
6457
+ * The name of a step in a workflow.
6458
+ */
6459
+ name: string;
6460
+ }
6461
+ interface PlanTrigger {
6462
+ /**
6463
+ * The action to perform when the trigger fires. Valid values include ACTIVATE and DEACTIVATE.
6464
+ */
6465
+ action: enums.arcregionswitch.PlanWorkflowTargetAction;
6466
+ /**
6467
+ * The conditions that must be met for the trigger to fire.
6468
+ */
6469
+ conditions: outputs.arcregionswitch.PlanTriggerCondition[];
6470
+ /**
6471
+ * The description for a trigger.
6472
+ */
6473
+ description?: string;
6474
+ /**
6475
+ * The minimum time, in minutes, that must elapse between automatic executions of the plan.
6476
+ */
6477
+ minDelayMinutesBetweenExecutions: number;
6478
+ /**
6479
+ * The AWS Region for a trigger.
6480
+ */
6481
+ targetRegion: string;
6482
+ }
6483
+ interface PlanTriggerCondition {
6484
+ /**
6485
+ * The name of the CloudWatch alarm associated with the condition.
6486
+ */
6487
+ associatedAlarmName: string;
6488
+ /**
6489
+ * The condition that must be met. Valid values include ALARM and OK.
6490
+ */
6491
+ condition: enums.arcregionswitch.PlanAlarmCondition;
6492
+ }
6493
+ interface PlanWorkflow {
6494
+ /**
6495
+ * The steps that make up the workflow.
6496
+ */
6497
+ steps?: outputs.arcregionswitch.PlanStep[];
6498
+ /**
6499
+ * The description of the workflow.
6500
+ */
6501
+ workflowDescription?: string;
6502
+ /**
6503
+ * The action that the workflow performs. Valid values include ACTIVATE and DEACTIVATE.
6504
+ */
6505
+ workflowTargetAction: enums.arcregionswitch.PlanWorkflowTargetAction;
6506
+ /**
6507
+ * The AWS Region that the workflow targets.
6508
+ */
6509
+ workflowTargetRegion?: string;
6510
+ }
6511
+ }
6277
6512
  export declare namespace arczonalshift {
6278
6513
  interface ZonalAutoshiftConfigurationControlCondition {
6279
6514
  /**
@@ -6485,9 +6720,9 @@ export declare namespace athena {
6485
6720
  */
6486
6721
  encryptionConfiguration?: outputs.athena.WorkGroupEncryptionConfiguration;
6487
6722
  /**
6488
- * The AWS account ID that you expect to be the owner of the Amazon S3 bucket specified by `ResultConfiguration$OutputLocation` . If set, Athena uses the value for `ExpectedBucketOwner` when it makes Amazon S3 calls to your specified output location. If the `ExpectedBucketOwner` AWS account ID does not match the actual owner of the Amazon S3 bucket, the call fails with a permissions error.
6723
+ * The AWS account ID that you expect to be the owner of the Amazon S3 bucket specified by [](https://docs.aws.amazon.com/AWSCloudFormation/latest/TemplateReference/aws-properties-athena-workgroup-resultconfiguration.html#cfn-athena-workgroup-resultconfiguration-outputlocation) . If set, Athena uses the value for `ExpectedBucketOwner` when it makes Amazon S3 calls to your specified output location. If the `ExpectedBucketOwner` AWS account ID does not match the actual owner of the Amazon S3 bucket, the call fails with a permissions error.
6489
6724
  *
6490
- * If workgroup settings override client-side settings, then the query uses the `ExpectedBucketOwner` setting that is specified for the workgroup, and also uses the location for storing query results specified in the workgroup. See `WorkGroupConfiguration$EnforceWorkGroupConfiguration` and [Workgroup Settings Override Client-Side Settings](https://docs.aws.amazon.com/athena/latest/ug/workgroups-settings-override.html) .
6725
+ * If workgroup settings override client-side settings, then the query uses the `ExpectedBucketOwner` setting that is specified for the workgroup, and also uses the location for storing query results specified in the workgroup. See [](https://docs.aws.amazon.com/AWSCloudFormation/latest/TemplateReference/aws-properties-athena-workgroup-workgroupconfiguration.html#cfn-athena-workgroup-workgroupconfiguration-enforceworkgroupconfiguration) and [Workgroup Settings Override Client-Side Settings](https://docs.aws.amazon.com/athena/latest/ug/workgroups-settings-override.html) .
6491
6726
  */
6492
6727
  expectedBucketOwner?: string;
6493
6728
  /**
@@ -6503,7 +6738,7 @@ export declare namespace athena {
6503
6738
  */
6504
6739
  removeEncryptionConfiguration?: boolean;
6505
6740
  /**
6506
- * If set to "true", removes the AWS account ID previously specified for `ResultConfiguration$ExpectedBucketOwner` . If set to "false" or not set, and a value is present in the `ExpectedBucketOwner` in `ResultConfigurationUpdates` (the client-side setting), the `ExpectedBucketOwner` in the workgroup's `ResultConfiguration` is updated with the new value. For more information, see [Workgroup Settings Override Client-Side Settings](https://docs.aws.amazon.com/athena/latest/ug/workgroups-settings-override.html) .
6741
+ * If set to "true", removes the AWS account ID previously specified for [](https://docs.aws.amazon.com/AWSCloudFormation/latest/TemplateReference/aws-properties-athena-workgroup-resultconfiguration.html#cfn-athena-workgroup-resultconfiguration-expectedbucketowner) . If set to "false" or not set, and a value is present in the `ExpectedBucketOwner` in `ResultConfigurationUpdates` (the client-side setting), the `ExpectedBucketOwner` in the workgroup's `ResultConfiguration` is updated with the new value. For more information, see [Workgroup Settings Override Client-Side Settings](https://docs.aws.amazon.com/athena/latest/ug/workgroups-settings-override.html) .
6507
6742
  */
6508
6743
  removeExpectedBucketOwner?: boolean;
6509
6744
  /**
@@ -9798,6 +10033,16 @@ export declare namespace batch {
9798
10033
  */
9799
10034
  state: enums.batch.JobQueueJobStateTimeLimitActionState;
9800
10035
  }
10036
+ interface JobQueueServiceEnvironmentOrder {
10037
+ /**
10038
+ * The order of the service environment. Job queues with a higher priority are evaluated first when associated with the same service environment.
10039
+ */
10040
+ order: number;
10041
+ /**
10042
+ * The name or ARN of the service environment.
10043
+ */
10044
+ serviceEnvironment: string;
10045
+ }
9801
10046
  /**
9802
10047
  * Fair Share Policy for the Job Queue.
9803
10048
  */
@@ -15989,6 +16234,13 @@ export declare namespace cloudfront {
15989
16234
  * For more information, see [Using Origin Shield](https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/origin-shield.html) in the *Amazon CloudFront Developer Guide*.
15990
16235
  */
15991
16236
  originShield?: outputs.cloudfront.DistributionOriginShield;
16237
+ /**
16238
+ * The time (in seconds) that a request from CloudFront to the origin can stay open and wait for a response. If the complete response isn't received from the origin by this time, CloudFront ends the connection.
16239
+ *
16240
+ * The value for `ResponseCompletionTimeout` must be equal to or greater than the value for `OriginReadTimeout` . If you don't set a value for `ResponseCompletionTimeout` , CloudFront doesn't enforce a maximum value.
16241
+ *
16242
+ * For more information, see [Response completion timeout](https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/DownloadDistValuesOrigin.html#response-completion-timeout) in the *Amazon CloudFront Developer Guide* .
16243
+ */
15992
16244
  responseCompletionTimeout?: number;
15993
16245
  /**
15994
16246
  * Use this type to specify an origin that is an Amazon S3 bucket that is not configured with static website hosting. To specify any other type of origin, including an Amazon S3 bucket that is configured with static website hosting, use the ``CustomOriginConfig`` type instead.
@@ -16143,6 +16395,11 @@ export declare namespace cloudfront {
16143
16395
  * For more information about the origin access identity, see [Serving Private Content through CloudFront](https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/PrivateContent.html) in the *Amazon CloudFront Developer Guide*.
16144
16396
  */
16145
16397
  originAccessIdentity?: string;
16398
+ /**
16399
+ * Specifies how long, in seconds, CloudFront waits for a response from the origin. This is also known as the *origin response timeout* . The minimum timeout is 1 second, the maximum is 120 seconds, and the default (if you don't specify otherwise) is 30 seconds.
16400
+ *
16401
+ * For more information, see [Response timeout](https://docs.aws.amazon.com/AmazonCloudFront/latest/DeveloperGuide/DownloadDistValuesOrigin.html#DownloadDistValuesOriginResponseTimeout) in the *Amazon CloudFront Developer Guide* .
16402
+ */
16146
16403
  originReadTimeout?: number;
16147
16404
  }
16148
16405
  /**
@@ -17162,7 +17419,7 @@ export declare namespace cloudtrail {
17162
17419
  }
17163
17420
  export declare namespace cloudwatch {
17164
17421
  /**
17165
- * Dimension is an embedded property of the ``AWS::CloudWatch::Alarm`` type. Dimensions are name/value pairs that can be associated with a CW metric. You can specify a maximum of 10 dimensions for a given metric.
17422
+ * Dimension is an embedded property of the ``AWS::CloudWatch::Alarm`` type. Dimensions are name/value pairs that can be associated with a CW metric. You can specify a maximum of 30 dimensions for a given metric.
17166
17423
  */
17167
17424
  interface AlarmDimension {
17168
17425
  /**
@@ -17219,7 +17476,7 @@ export declare namespace cloudwatch {
17219
17476
  */
17220
17477
  metricStat?: outputs.cloudwatch.AlarmMetricStat;
17221
17478
  /**
17222
- * The granularity, in seconds, of the returned data points. For metrics with regular resolution, a period can be as short as one minute (60 seconds) and must be a multiple of 60. For high-resolution metrics that are collected at intervals of less than one minute, the period can be 1, 5, 10, 30, 60, or any multiple of 60. High-resolution metrics are those metrics stored by a ``PutMetricData`` operation that includes a ``StorageResolution of 1 second``.
17479
+ * The granularity, in seconds, of the returned data points. For metrics with regular resolution, a period can be as short as one minute (60 seconds) and must be a multiple of 60. For high-resolution metrics that are collected at intervals of less than one minute, the period can be 1, 5, 10, 20, 30, 60, or any multiple of 60. High-resolution metrics are those metrics stored by a ``PutMetricData`` operation that includes a ``StorageResolution of 1 second``.
17223
17480
  */
17224
17481
  period?: number;
17225
17482
  /**
@@ -17231,7 +17488,7 @@ export declare namespace cloudwatch {
17231
17488
  }
17232
17489
  /**
17233
17490
  * This structure defines the metric to be returned, along with the statistics, period, and units.
17234
- * ``MetricStat`` is a property of the [MetricDataQuery](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-cloudwatch-alarm-metricdataquery.html) property type.
17491
+ * ``MetricStat`` is a property of the [MetricDataQuery](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-cloudwatch-alarm-metricdataquery.html) property type.
17235
17492
  */
17236
17493
  interface AlarmMetricStat {
17237
17494
  /**
@@ -17239,7 +17496,7 @@ export declare namespace cloudwatch {
17239
17496
  */
17240
17497
  metric: outputs.cloudwatch.AlarmMetric;
17241
17498
  /**
17242
- * The granularity, in seconds, of the returned data points. For metrics with regular resolution, a period can be as short as one minute (60 seconds) and must be a multiple of 60. For high-resolution metrics that are collected at intervals of less than one minute, the period can be 1, 5, 10, 30, 60, or any multiple of 60. High-resolution metrics are those metrics stored by a ``PutMetricData`` call that includes a ``StorageResolution`` of 1 second.
17499
+ * The granularity, in seconds, of the returned data points. For metrics with regular resolution, a period can be as short as one minute (60 seconds) and must be a multiple of 60. For high-resolution metrics that are collected at intervals of less than one minute, the period can be 1, 5, 10, 20, 30, 60, or any multiple of 60. High-resolution metrics are those metrics stored by a ``PutMetricData`` call that includes a ``StorageResolution`` of 1 second.
17243
17500
  * If the ``StartTime`` parameter specifies a time stamp that is greater than 3 hours ago, you must specify the period as follows or no data points in that time range is returned:
17244
17501
  * + Start time between 3 hours and 15 days ago - Use a multiple of 60 seconds (1 minute).
17245
17502
  * + Start time between 15 and 63 days ago - Use a multiple of 300 seconds (5 minutes).
@@ -23541,6 +23798,7 @@ export declare namespace deadline {
23541
23798
  instanceCapabilities: outputs.deadline.FleetServiceManagedEc2InstanceCapabilities;
23542
23799
  instanceMarketOptions: outputs.deadline.FleetServiceManagedEc2InstanceMarketOptions;
23543
23800
  storageProfileId?: string;
23801
+ vpcConfiguration?: outputs.deadline.FleetVpcConfiguration;
23544
23802
  }
23545
23803
  interface FleetServiceManagedEc2InstanceCapabilities {
23546
23804
  acceleratorCapabilities?: outputs.deadline.FleetAcceleratorCapabilities;
@@ -23561,6 +23819,9 @@ export declare namespace deadline {
23561
23819
  max?: number;
23562
23820
  min: number;
23563
23821
  }
23822
+ interface FleetVpcConfiguration {
23823
+ resourceConfigurationArns?: string[];
23824
+ }
23564
23825
  interface QueueJobAttachmentSettings {
23565
23826
  /**
23566
23827
  * The root prefix.
@@ -25748,17 +26009,17 @@ export declare namespace ec2 {
25748
26009
  }
25749
26010
  interface InstanceElasticGpuSpecification {
25750
26011
  /**
25751
- * The type of Elastic Graphics accelerator.
26012
+ * The type of Elastic Graphics accelerator. Amazon Elastic Graphics is no longer available.
25752
26013
  */
25753
26014
  type: string;
25754
26015
  }
25755
26016
  interface InstanceElasticInferenceAccelerator {
25756
26017
  /**
25757
- * The number of elastic inference accelerators to attach to the instance.
26018
+ * The number of elastic inference accelerators to attach to the instance. Amazon Elastic Inference is no longer available.
25758
26019
  */
25759
26020
  count?: number;
25760
26021
  /**
25761
- * The type of elastic inference accelerator.
26022
+ * The type of elastic inference accelerator. Amazon Elastic Inference is no longer available.
25762
26023
  */
25763
26024
  type: string;
25764
26025
  }
@@ -26216,17 +26477,6 @@ export declare namespace ec2 {
26216
26477
  * Indicates whether the instance is optimized for Amazon EBS I/O. This optimization provides dedicated throughput to Amazon EBS and an optimized configuration stack to provide optimal Amazon EBS I/O performance. This optimization isn't available with all instance types. Additional usage charges apply when using an EBS-optimized instance.
26217
26478
  */
26218
26479
  ebsOptimized?: boolean;
26219
- /**
26220
- * Deprecated.
26221
- * Amazon Elastic Graphics reached end of life on January 8, 2024.
26222
- */
26223
- elasticGpuSpecifications?: outputs.ec2.LaunchTemplateElasticGpuSpecification[];
26224
- /**
26225
- * Amazon Elastic Inference is no longer available.
26226
- * An elastic inference accelerator to associate with the instance. Elastic inference accelerators are a resource you can attach to your Amazon EC2 instances to accelerate your Deep Learning (DL) inference workloads.
26227
- * You cannot specify accelerators from different generations in the same request.
26228
- */
26229
- elasticInferenceAccelerators?: outputs.ec2.LaunchTemplateElasticInferenceAccelerator[];
26230
26480
  /**
26231
26481
  * Indicates whether the instance is enabled for AWS Nitro Enclaves. For more information, see [What is Nitro Enclaves?](https://docs.aws.amazon.com/enclaves/latest/user/nitro-enclave.html) in the *Nitro Enclaves User Guide*.
26232
26482
  * You can't enable AWS Nitro Enclaves and hibernation on the same instance.
@@ -26408,33 +26658,6 @@ export declare namespace ec2 {
26408
26658
  */
26409
26659
  volumeType?: string;
26410
26660
  }
26411
- /**
26412
- * Amazon Elastic Graphics reached end of life on January 8, 2024.
26413
- * Specifies a specification for an Elastic GPU for an Amazon EC2 launch template.
26414
- * ``ElasticGpuSpecification`` is a property of [AWS::EC2::LaunchTemplate LaunchTemplateData](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-launchtemplate-launchtemplatedata.html).
26415
- */
26416
- interface LaunchTemplateElasticGpuSpecification {
26417
- /**
26418
- * The type of Elastic Graphics accelerator.
26419
- */
26420
- type?: string;
26421
- }
26422
- /**
26423
- * Amazon Elastic Inference is no longer available.
26424
- * Specifies an elastic inference accelerator.
26425
- * ``LaunchTemplateElasticInferenceAccelerator`` is a property of [AWS::EC2::LaunchTemplate LaunchTemplateData](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-ec2-launchtemplate-launchtemplatedata.html).
26426
- */
26427
- interface LaunchTemplateElasticInferenceAccelerator {
26428
- /**
26429
- * The number of elastic inference accelerators to attach to the instance.
26430
- * Default: 1
26431
- */
26432
- count?: number;
26433
- /**
26434
- * The type of elastic inference accelerator. The possible values are eia1.medium, eia1.large, and eia1.xlarge.
26435
- */
26436
- type?: string;
26437
- }
26438
26661
  /**
26439
26662
  * ENA Express uses AWS Scalable Reliable Datagram (SRD) technology to increase the maximum bandwidth used per stream and minimize tail latency of network traffic between EC2 instances. With ENA Express, you can communicate between two EC2 instances in the same subnet within the same account, or in different accounts. Both sending and receiving instances must have ENA Express enabled.
26440
26663
  * To improve the reliability of network packet delivery, ENA Express reorders network packets on the receiving end by default. However, some UDP-based applications are designed to handle network packets that are out of order to reduce the overhead for packet delivery at the network layer. When ENA Express is enabled, you can specify whether UDP network traffic uses it.
@@ -26873,6 +27096,7 @@ export declare namespace ec2 {
26873
27096
  * If you create a launch template that includes secondary network interfaces but no primary network interface, and you specify it using the ``LaunchTemplate`` property of ``AWS::EC2::Instance``, then you must include a primary network interface using the ``NetworkInterfaces`` property of ``AWS::EC2::Instance``.
26874
27097
  */
26875
27098
  deviceIndex?: number;
27099
+ enaQueueCount?: number;
26876
27100
  /**
26877
27101
  * The ENA Express configuration for the network interface.
26878
27102
  */
@@ -29580,6 +29804,13 @@ export declare namespace ecr {
29580
29804
  */
29581
29805
  kmsKey?: string;
29582
29806
  }
29807
+ /**
29808
+ * Overrides the default image tag mutability setting of the repository for image tags that match the specified filters.
29809
+ */
29810
+ interface RepositoryCreationTemplateImageTagMutabilityExclusionFilter {
29811
+ imageTagMutabilityExclusionFilterType: enums.ecr.RepositoryCreationTemplateImageTagMutabilityExclusionFilterImageTagMutabilityExclusionFilterType;
29812
+ imageTagMutabilityExclusionFilterValue: string;
29813
+ }
29583
29814
  /**
29584
29815
  * The metadata to apply to a resource to help you categorize and organize them. Each tag consists of a key and a value, both of which you define. Tag keys can have a maximum character length of 128 characters, and tag values can have a maximum length of 256 characters.
29585
29816
  */
@@ -29625,6 +29856,10 @@ export declare namespace ecr {
29625
29856
  */
29626
29857
  scanOnPush?: boolean;
29627
29858
  }
29859
+ interface RepositoryImageTagMutabilityExclusionFilter {
29860
+ imageTagMutabilityExclusionFilterType: enums.ecr.RepositoryImageTagMutabilityExclusionFilterType;
29861
+ imageTagMutabilityExclusionFilterValue: string;
29862
+ }
29628
29863
  /**
29629
29864
  * The ``LifecyclePolicy`` property type specifies a lifecycle policy. For information about lifecycle policy syntax, see [Lifecycle policy template](https://docs.aws.amazon.com/AmazonECR/latest/userguide/LifecyclePolicies.html) in the *Amazon ECR User Guide*.
29630
29865
  */
@@ -29827,6 +30062,9 @@ export declare namespace ecs {
29827
30062
  */
29828
30063
  value?: string;
29829
30064
  }
30065
+ /**
30066
+ * The advanced settings for a load balancer used in blue/green deployments. Specify the alternate target group, listener rules, and IAM role required for traffic shifting during blue/green deployments. For more information, see [Required resources for Amazon ECS blue/green deployments](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/blue-green-deployment-implementation.html) in the *Amazon Elastic Container Service Developer Guide*.
30067
+ */
29830
30068
  interface ServiceAdvancedConfiguration {
29831
30069
  /**
29832
30070
  * The Amazon Resource Name (ARN) of the alternate target group for Amazon ECS blue/green deployments.
@@ -29978,6 +30216,10 @@ export declare namespace ecs {
29978
30216
  */
29979
30217
  tls?: outputs.ecs.ServiceConnectTlsConfiguration;
29980
30218
  }
30219
+ /**
30220
+ * The test traffic routing configuration for Amazon ECS blue/green deployments. This configuration allows you to define rules for routing specific traffic to the new service revision during the deployment process, allowing for safe testing before full production traffic shift.
30221
+ * For more information, see [Service Connect for Amazon ECS blue/green deployments](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/service-connect-blue-green.html) in the *Amazon Elastic Container Service Developer Guide*.
30222
+ */
29981
30223
  interface ServiceConnectTestTrafficRules {
29982
30224
  /**
29983
30225
  * The HTTP header-based routing rules that determine which requests should be routed to the new service version during blue/green deployment testing. These rules provide fine-grained control over test traffic routing based on request headers.
@@ -30020,7 +30262,7 @@ export declare namespace ecs {
30020
30262
  /**
30021
30263
  * One of the methods which provide a way for you to quickly identify when a deployment has failed, and then to optionally roll back the failure to the last working deployment.
30022
30264
  * When the alarms are generated, Amazon ECS sets the service deployment to failed. Set the rollback parameter to have Amazon ECS to roll back your service to the last completed deployment after a failure.
30023
- * You can only use the ``DeploymentAlarms`` method to detect failures when the ``DeploymentController`` is set to ``ECS`` (rolling update).
30265
+ * You can only use the ``DeploymentAlarms`` method to detect failures when the ``DeploymentController`` is set to ``ECS``.
30024
30266
  * For more information, see [Rolling update](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/deployment-type-ecs.html) in the *Amazon Elastic Container Service Developer Guide*.
30025
30267
  */
30026
30268
  interface ServiceDeploymentAlarms {
@@ -30062,12 +30304,10 @@ export declare namespace ecs {
30062
30304
  alarms?: outputs.ecs.ServiceDeploymentAlarms;
30063
30305
  /**
30064
30306
  * The duration when both blue and green service revisions are running simultaneously after the production traffic has shifted.
30065
- *
30066
- * The following rules apply when you don't specify a value:
30067
- *
30068
- * - For rolling deployments, the value is set to 3 hours (180 minutes).
30069
- * - When you use an external deployment controller ( `EXTERNAL` ), or the CodeDeploy blue/green deployment controller ( `CODE_DEPLOY` ), the value is set to 3 hours (180 minutes).
30070
- * - For all other cases, the value is set to 36 hours (2160 minutes).
30307
+ * The following rules apply when you don't specify a value:
30308
+ * + For rolling deployments, the value is set to 3 hours (180 minutes).
30309
+ * + When you use an external deployment controller (``EXTERNAL``), or the ACD blue/green deployment controller (``CODE_DEPLOY``), the value is set to 3 hours (180 minutes).
30310
+ * + For all other cases, the value is set to 36 hours (2160 minutes).
30071
30311
  */
30072
30312
  bakeTimeInMinutes?: number;
30073
30313
  /**
@@ -30108,9 +30348,8 @@ export declare namespace ecs {
30108
30348
  minimumHealthyPercent?: number;
30109
30349
  /**
30110
30350
  * The deployment strategy for the service. Choose from these valid values:
30111
- *
30112
- * - `ROLLING` - When you create a service which uses the rolling update ( `ROLLING` ) deployment strategy, the Amazon ECS service scheduler replaces the currently running tasks with new tasks. The number of tasks that Amazon ECS adds or removes from the service during a rolling update is controlled by the service deployment configuration.
30113
- * - `BLUE_GREEN` - A blue/green deployment strategy ( `BLUE_GREEN` ) is a release methodology that reduces downtime and risk by running two identical production environments called blue and green. With Amazon ECS blue/green deployments, you can validate new service revisions before directing production traffic to them. This approach provides a safer way to deploy changes with the ability to quickly roll back if needed.
30351
+ * + ``ROLLING`` - When you create a service which uses the rolling update (``ROLLING``) deployment strategy, the Amazon ECS service scheduler replaces the currently running tasks with new tasks. The number of tasks that Amazon ECS adds or removes from the service during a rolling update is controlled by the service deployment configuration.
30352
+ * + ``BLUE_GREEN`` - A blue/green deployment strategy (``BLUE_GREEN``) is a release methodology that reduces downtime and risk by running two identical production environments called blue and green. With Amazon ECS blue/green deployments, you can validate new service revisions before directing production traffic to them. This approach provides a safer way to deploy changes with the ability to quickly roll back if needed.
30114
30353
  */
30115
30354
  strategy?: enums.ecs.ServiceDeploymentConfigurationStrategy;
30116
30355
  }
@@ -30119,64 +30358,83 @@ export declare namespace ecs {
30119
30358
  */
30120
30359
  interface ServiceDeploymentController {
30121
30360
  /**
30122
- * The deployment controller type to use. There are three deployment controller types available:
30123
- * + ECS The rolling update (ECS) deployment type involves replacing the current running version of the container with the latest version. The number of containers Amazon ECS adds or removes from the service during a rolling update is controlled by adjusting the minimum and maximum number of healthy tasks allowed during a service deployment, as specified in the DeploymentConfiguration. + CODE_DEPLOY The blue/green (CODE_DEPLOY) deployment type uses the blue/green deployment model powered by , which allows you to verify a new deployment of a service before sending production traffic to it. + EXTERNAL The external (EXTERNAL) deployment type enables you to use any third-party deployment controller for full control over the deployment process for an Amazon ECS service.
30361
+ * The deployment controller type to use.
30362
+ * The deployment controller is the mechanism that determines how tasks are deployed for your service. The valid options are:
30363
+ * + ECS
30364
+ * When you create a service which uses the ``ECS`` deployment controller, you can choose between the following deployment strategies:
30365
+ * + ``ROLLING``: When you create a service which uses the *rolling update* (``ROLLING``) deployment strategy, the ECS service scheduler replaces the currently running tasks with new tasks. The number of tasks that ECS adds or removes from the service during a rolling update is controlled by the service deployment configuration.
30366
+ * Rolling update deployments are best suited for the following scenarios:
30367
+ * + Gradual service updates: You need to update your service incrementally without taking the entire service offline at once.
30368
+ * + Limited resource requirements: You want to avoid the additional resource costs of running two complete environments simultaneously (as required by blue/green deployments).
30369
+ * + Acceptable deployment time: Your application can tolerate a longer deployment process, as rolling updates replace tasks one by one.
30370
+ * + No need for instant roll back: Your service can tolerate a rollback process that takes minutes rather than seconds.
30371
+ * + Simple deployment process: You prefer a straightforward deployment approach without the complexity of managing multiple environments, target groups, and listeners.
30372
+ * + No load balancer requirement: Your service doesn't use or require a load balancer, ALB, NLB, or Service Connect (which are required for blue/green deployments).
30373
+ * + Stateful applications: Your application maintains state that makes it difficult to run two parallel environments.
30374
+ * + Cost sensitivity: You want to minimize deployment costs by not running duplicate environments during deployment.
30375
+ *
30376
+ * Rolling updates are the default deployment strategy for services and provide a balance between deployment safety and resource efficiency for many common application scenarios.
30377
+ * + ``BLUE_GREEN``: A *blue/green* deployment strategy (``BLUE_GREEN``) is a release methodology that reduces downtime and risk by running two identical production environments called blue and green. With ECS blue/green deployments, you can validate new service revisions before directing production traffic to them. This approach provides a safer way to deploy changes with the ability to quickly roll back if needed.
30378
+ * ECS blue/green deployments are best suited for the following scenarios:
30379
+ * + Service validation: When you need to validate new service revisions before directing production traffic to them
30380
+ * + Zero downtime: When your service requires zero-downtime deployments
30381
+ * + Instant roll back: When you need the ability to quickly roll back if issues are detected
30382
+ * + Load balancer requirement: When your service uses ALB, NLB, or Service Connect
30383
+ *
30384
+ *
30385
+ * + External
30386
+ * Use a third-party deployment controller.
30387
+ * + Blue/green deployment (powered by ACD)
30388
+ * ACD installs an updated version of the application as a new replacement task set and reroutes production traffic from the original application task set to the replacement task set. The original task set is terminated after a successful deployment. Use this deployment controller to verify a new deployment of a service before sending production traffic to it.
30389
+ *
30390
+ * When updating the deployment controller for a service, consider the following depending on the type of migration you're performing.
30391
+ * + If you have a template that contains the ``EXTERNAL`` deployment controller information as well as ``TaskSet`` and ``PrimaryTaskSet`` resources, and you remove the task set resources from the template when updating from ``EXTERNAL`` to ``ECS``, the ``DescribeTaskSet`` and ``DeleteTaskSet`` API calls will return a 400 error after the deployment controller is updated to ``ECS``. This results in a delete failure on the task set resources, even though the stack transitions to ``UPDATE_COMPLETE`` status. For more information, see [Resource removed from stack but not deleted](https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/troubleshooting.html#troubleshooting-errors-resource-removed-not-deleted) in the CFNlong User Guide. To fix this issue, delete the task sets directly using the ECS``DeleteTaskSet`` API. For more information about how to delete a task set, see [DeleteTaskSet](https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_DeleteTaskSet.html) in the ECSlong API Reference.
30392
+ * + If you're migrating from ``CODE_DEPLOY`` to ``ECS`` with a new task definition and CFN performs a rollback operation, the ECS``UpdateService`` request fails with the following error:
30393
+ * Resource handler returned message: "Invalid request provided: Unable to update task definition on services with a CODE_DEPLOY deployment controller.
30394
+ * + After a successful migration from ``ECS`` to ``EXTERNAL`` deployment controller, you need to manually remove the ``ACTIVE`` task set, because ECS no longer manages the deployment. For information about how to delete a task set, see [DeleteTaskSet](https://docs.aws.amazon.com/AmazonECS/latest/APIReference/API_DeleteTaskSet.html) in the ECSlong API Reference.
30124
30395
  */
30125
30396
  type?: enums.ecs.ServiceDeploymentControllerType;
30126
30397
  }
30398
+ /**
30399
+ * A deployment lifecycle hook runs custom logic at specific stages of the deployment process. Currently, you can use Lambda functions as hook targets.
30400
+ * For more information, see [Lifecycle hooks for Amazon ECS service deployments](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/deployment-lifecycle-hooks.html) in the *Amazon Elastic Container Service Developer Guide*.
30401
+ */
30127
30402
  interface ServiceDeploymentLifecycleHook {
30128
30403
  /**
30129
30404
  * The Amazon Resource Name (ARN) of the hook target. Currently, only Lambda function ARNs are supported.
30130
- *
30131
- * You must provide this parameter when configuring a deployment lifecycle hook.
30405
+ * You must provide this parameter when configuring a deployment lifecycle hook.
30132
30406
  */
30133
30407
  hookTargetArn: string;
30134
30408
  /**
30135
30409
  * The lifecycle stages at which to run the hook. Choose from these valid values:
30136
- *
30137
- * - RECONCILE_SERVICE
30138
- *
30139
- * The reconciliation stage that only happens when you start a new service deployment with more than 1 service revision in an ACTIVE state.
30140
- *
30141
- * You can use a lifecycle hook for this stage.
30142
- * - PRE_SCALE_UP
30143
- *
30144
- * The green service revision has not started. The blue service revision is handling 100% of the production traffic. There is no test traffic.
30145
- *
30146
- * You can use a lifecycle hook for this stage.
30147
- * - POST_SCALE_UP
30148
- *
30149
- * The green service revision has started. The blue service revision is handling 100% of the production traffic. There is no test traffic.
30150
- *
30151
- * You can use a lifecycle hook for this stage.
30152
- * - TEST_TRAFFIC_SHIFT
30153
- *
30154
- * The blue and green service revisions are running. The blue service revision handles 100% of the production traffic. The green service revision is migrating from 0% to 100% of test traffic.
30155
- *
30156
- * You can use a lifecycle hook for this stage.
30157
- * - POST_TEST_TRAFFIC_SHIFT
30158
- *
30159
- * The test traffic shift is complete. The green service revision handles 100% of the test traffic.
30160
- *
30161
- * You can use a lifecycle hook for this stage.
30162
- * - PRODUCTION_TRAFFIC_SHIFT
30163
- *
30164
- * Production traffic is shifting to the green service revision. The green service revision is migrating from 0% to 100% of production traffic.
30165
- *
30166
- * You can use a lifecycle hook for this stage.
30167
- * - POST_PRODUCTION_TRAFFIC_SHIFT
30168
- *
30169
- * The production traffic shift is complete.
30170
- *
30171
- * You can use a lifecycle hook for this stage.
30172
- *
30173
- * You must provide this parameter when configuring a deployment lifecycle hook.
30410
+ * + RECONCILE_SERVICE
30411
+ * The reconciliation stage that only happens when you start a new service deployment with more than 1 service revision in an ACTIVE state.
30412
+ * You can use a lifecycle hook for this stage.
30413
+ * + PRE_SCALE_UP
30414
+ * The green service revision has not started. The blue service revision is handling 100% of the production traffic. There is no test traffic.
30415
+ * You can use a lifecycle hook for this stage.
30416
+ * + POST_SCALE_UP
30417
+ * The green service revision has started. The blue service revision is handling 100% of the production traffic. There is no test traffic.
30418
+ * You can use a lifecycle hook for this stage.
30419
+ * + TEST_TRAFFIC_SHIFT
30420
+ * The blue and green service revisions are running. The blue service revision handles 100% of the production traffic. The green service revision is migrating from 0% to 100% of test traffic.
30421
+ * You can use a lifecycle hook for this stage.
30422
+ * + POST_TEST_TRAFFIC_SHIFT
30423
+ * The test traffic shift is complete. The green service revision handles 100% of the test traffic.
30424
+ * You can use a lifecycle hook for this stage.
30425
+ * + PRODUCTION_TRAFFIC_SHIFT
30426
+ * Production traffic is shifting to the green service revision. The green service revision is migrating from 0% to 100% of production traffic.
30427
+ * You can use a lifecycle hook for this stage.
30428
+ * + POST_PRODUCTION_TRAFFIC_SHIFT
30429
+ * The production traffic shift is complete.
30430
+ * You can use a lifecycle hook for this stage.
30431
+ *
30432
+ * You must provide this parameter when configuring a deployment lifecycle hook.
30174
30433
  */
30175
30434
  lifecycleStages: enums.ecs.ServiceDeploymentLifecycleHookLifecycleStagesItem[];
30176
30435
  /**
30177
30436
  * The Amazon Resource Name (ARN) of the IAM role that grants Amazon ECS permission to call Lambda functions on your behalf.
30178
- *
30179
- * For more information, see [Permissions required for Lambda functions in Amazon ECS blue/green deployments](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/blue-green-permissions.html) in the *Amazon Elastic Container Service Developer Guide* .
30437
+ * For more information, see [Permissions required for Lambda functions in Amazon ECS blue/green deployments](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/blue-green-permissions.html) in the *Amazon Elastic Container Service Developer Guide*.
30180
30438
  */
30181
30439
  roleArn: string;
30182
30440
  }
@@ -32793,7 +33051,7 @@ export declare namespace elasticloadbalancingv2 {
32793
33051
  */
32794
33052
  interface ListenerRuleHostHeaderConfig {
32795
33053
  /**
32796
- * The host names. The maximum size of each name is 128 characters. The comparison is case insensitive. The following wildcard characters are supported: * (matches 0 or more characters) and ? (matches exactly 1 character).
33054
+ * The host names. The maximum size of each name is 128 characters. The comparison is case insensitive. The following wildcard characters are supported: * (matches 0 or more characters) and ? (matches exactly 1 character). You must include at least one "." character. You can include only alphabetical characters after the final "." character.
32797
33055
  * If you specify multiple strings, the condition is satisfied if one of the strings matches the host name.
32798
33056
  */
32799
33057
  values?: string[];
@@ -32968,7 +33226,7 @@ export declare namespace elasticloadbalancingv2 {
32968
33226
  */
32969
33227
  interface ListenerRuleTargetGroupStickinessConfig {
32970
33228
  /**
32971
- * The time period, in seconds, during which requests from a client should be routed to the same target group. The range is 1-604800 seconds (7 days).
33229
+ * The time period, in seconds, during which requests from a client should be routed to the same target group. The range is 1-604800 seconds (7 days). You must specify this value when enabling target group stickiness.
32972
33230
  */
32973
33231
  durationSeconds?: number;
32974
33232
  /**
@@ -32994,7 +33252,7 @@ export declare namespace elasticloadbalancingv2 {
32994
33252
  */
32995
33253
  interface ListenerTargetGroupStickinessConfig {
32996
33254
  /**
32997
- * The time period, in seconds, during which requests from a client should be routed to the same target group. The range is 1-604800 seconds (7 days).
33255
+ * The time period, in seconds, during which requests from a client should be routed to the same target group. The range is 1-604800 seconds (7 days). You must specify this value when enabling target group stickiness.
32998
33256
  */
32999
33257
  durationSeconds?: number;
33000
33258
  /**
@@ -33447,9 +33705,9 @@ export declare namespace entityresolution {
33447
33705
  /**
33448
33706
  * The comparison type. You can either choose `ONE_TO_ONE` or `MANY_TO_MANY` as the `attributeMatchingModel` .
33449
33707
  *
33450
- * If you choose `MANY_TO_MANY` , the system can match attributes across the sub-types of an attribute type. For example, if the value of the `Email` field of Profile A matches the value of the `BusinessEmail` field of Profile B, the two profiles are matched on the `Email` attribute type.
33451
- *
33452
33708
  * If you choose `ONE_TO_ONE` , the system can only match attributes if the sub-types are an exact match. For example, for the `Email` attribute type, the system will only consider it a match if the value of the `Email` field of Profile A matches the value of the `Email` field of Profile B.
33709
+ *
33710
+ * If you choose `MANY_TO_MANY` , the system can match attributes across the sub-types of an attribute type. For example, if the value of the `Email` field of Profile A matches the value of the `BusinessEmail` field of Profile B, the two profiles are matched on the `Email` attribute type.
33453
33711
  */
33454
33712
  attributeMatchingModel: enums.entityresolution.IdMappingWorkflowIdMappingRuleBasedPropertiesAttributeMatchingModel;
33455
33713
  /**
@@ -33583,9 +33841,9 @@ export declare namespace entityresolution {
33583
33841
  /**
33584
33842
  * The comparison type. You can either choose `ONE_TO_ONE` or `MANY_TO_MANY` as the `attributeMatchingModel` .
33585
33843
  *
33586
- * If you choose `MANY_TO_MANY` , the system can match attributes across the sub-types of an attribute type. For example, if the value of the `Email` field of Profile A matches the value of `BusinessEmail` field of Profile B, the two profiles are matched on the `Email` attribute type.
33587
- *
33588
33844
  * If you choose `ONE_TO_ONE` , the system can only match attributes if the sub-types are an exact match. For example, for the `Email` attribute type, the system will only consider it a match if the value of the `Email` field of Profile A matches the value of the `Email` field of Profile B.
33845
+ *
33846
+ * If you choose `MANY_TO_MANY` , the system can match attributes across the sub-types of an attribute type. For example, if the value of the `Email` field of Profile A matches the value of `BusinessEmail` field of Profile B, the two profiles are matched on the `Email` attribute type.
33589
33847
  */
33590
33848
  attributeMatchingModel?: enums.entityresolution.IdNamespaceNamespaceRuleBasedPropertiesAttributeMatchingModel;
33591
33849
  /**
@@ -33619,7 +33877,7 @@ export declare namespace entityresolution {
33619
33877
  /**
33620
33878
  * The type of incremental run. The only valid value is `IMMEDIATE` . This appears as "Automatic" in the console.
33621
33879
  *
33622
- * > For workflows where `resolutionType` is `ML_MATCHING` , incremental processing is not supported.
33880
+ * > For workflows where `resolutionType` is `ML_MATCHING` or `PROVIDER` , incremental processing is not supported.
33623
33881
  */
33624
33882
  incrementalRunType: enums.entityresolution.MatchingWorkflowIncrementalRunConfigIncrementalRunType;
33625
33883
  }
@@ -33693,13 +33951,21 @@ export declare namespace entityresolution {
33693
33951
  */
33694
33952
  providerProperties?: outputs.entityresolution.MatchingWorkflowProviderProperties;
33695
33953
  /**
33696
- * The type of matching. There are three types of matching: `RULE_MATCHING` , `ML_MATCHING` , and `PROVIDER` .
33954
+ * The type of matching workflow to create. Specify one of the following types:
33955
+ *
33956
+ * - `RULE_MATCHING` : Match records using configurable rule-based criteria
33957
+ * - `ML_MATCHING` : Match records using machine learning models
33958
+ * - `PROVIDER` : Match records using a third-party matching provider
33697
33959
  */
33698
33960
  resolutionType?: enums.entityresolution.MatchingWorkflowResolutionType;
33699
33961
  /**
33700
- * An object which defines the list of matching rules to run and has a field `Rules` , which is a list of rule objects.
33962
+ * An object which defines the list of matching rules to run and has a field `rules` , which is a list of rule objects.
33701
33963
  */
33702
33964
  ruleBasedProperties?: outputs.entityresolution.MatchingWorkflowRuleBasedProperties;
33965
+ /**
33966
+ * An object containing the `rules` for a matching workflow.
33967
+ */
33968
+ ruleConditionProperties?: outputs.entityresolution.MatchingWorkflowRuleConditionProperties;
33703
33969
  }
33704
33970
  interface MatchingWorkflowRule {
33705
33971
  /**
@@ -33713,11 +33979,11 @@ export declare namespace entityresolution {
33713
33979
  }
33714
33980
  interface MatchingWorkflowRuleBasedProperties {
33715
33981
  /**
33716
- * The comparison type. You can either choose `ONE_TO_ONE` or `MANY_TO_MANY` as the `attributeMatchingModel` .
33717
- *
33718
- * If you choose `MANY_TO_MANY` , the system can match attributes across the sub-types of an attribute type. For example, if the value of the `Email` field of Profile A and the value of `BusinessEmail` field of Profile B matches, the two profiles are matched on the `Email` attribute type.
33982
+ * The comparison type. You can choose `ONE_TO_ONE` or `MANY_TO_MANY` as the `attributeMatchingModel` .
33719
33983
  *
33720
33984
  * If you choose `ONE_TO_ONE` , the system can only match attributes if the sub-types are an exact match. For example, for the `Email` attribute type, the system will only consider it a match if the value of the `Email` field of Profile A matches the value of the `Email` field of Profile B.
33985
+ *
33986
+ * If you choose `MANY_TO_MANY` , the system can match attributes across the sub-types of an attribute type. For example, if the value of the `Email` field of Profile A and the value of `BusinessEmail` field of Profile B matches, the two profiles are matched on the `Email` attribute type.
33721
33987
  */
33722
33988
  attributeMatchingModel: enums.entityresolution.MatchingWorkflowRuleBasedPropertiesAttributeMatchingModel;
33723
33989
  /**
@@ -33733,6 +33999,32 @@ export declare namespace entityresolution {
33733
33999
  */
33734
34000
  rules: outputs.entityresolution.MatchingWorkflowRule[];
33735
34001
  }
34002
+ interface MatchingWorkflowRuleCondition {
34003
+ /**
34004
+ * A statement that specifies the conditions for a matching rule.
34005
+ *
34006
+ * If your data is accurate, use an Exact matching function: `Exact` or `ExactManyToMany` .
34007
+ *
34008
+ * If your data has variations in spelling or pronunciation, use a Fuzzy matching function: `Cosine` , `Levenshtein` , or `Soundex` .
34009
+ *
34010
+ * Use operators if you want to combine ( `AND` ), separate ( `OR` ), or group matching functions `(...)` .
34011
+ *
34012
+ * For example: `(Cosine(a, 10) AND Exact(b, true)) OR ExactManyToMany(c, d)`
34013
+ */
34014
+ condition?: string;
34015
+ /**
34016
+ * A name for the matching rule.
34017
+ *
34018
+ * For example: `Rule1`
34019
+ */
34020
+ ruleName?: string;
34021
+ }
34022
+ interface MatchingWorkflowRuleConditionProperties {
34023
+ /**
34024
+ * A list of rule objects, each of which have fields `ruleName` and `condition` .
34025
+ */
34026
+ rules: outputs.entityresolution.MatchingWorkflowRuleCondition[];
34027
+ }
33736
34028
  interface SchemaMappingSchemaInputAttribute {
33737
34029
  fieldName: string;
33738
34030
  groupName?: string;
@@ -39547,6 +39839,11 @@ export declare namespace iot {
39547
39839
  content?: string;
39548
39840
  contentType?: string;
39549
39841
  }
39842
+ interface ConfigurationDetailsProperties {
39843
+ configurationStatus?: enums.iot.EncryptionConfigurationConfigurationDetailsPropertiesConfigurationStatus;
39844
+ errorCode?: string;
39845
+ errorMessage?: string;
39846
+ }
39550
39847
  interface DomainConfigurationAuthorizerConfig {
39551
39848
  /**
39552
39849
  * A Boolean that specifies whether the domain configuration's authorization service can be overridden.
@@ -42664,6 +42961,36 @@ export declare namespace iotsitewise {
42664
42961
  */
42665
42962
  type: string;
42666
42963
  }
42964
+ /**
42965
+ * Contains information about enforced interface property and asset model property
42966
+ */
42967
+ interface AssetModelEnforcedAssetModelInterfacePropertyMapping {
42968
+ /**
42969
+ * The external ID of the enforced asset model property
42970
+ */
42971
+ assetModelPropertyExternalId?: string;
42972
+ /**
42973
+ * The logical ID of the enforced asset model property
42974
+ */
42975
+ assetModelPropertyLogicalId?: string;
42976
+ /**
42977
+ * The external ID of the enforced interface property
42978
+ */
42979
+ interfaceAssetModelPropertyExternalId: string;
42980
+ }
42981
+ /**
42982
+ * Contains information about enforced interface hierarchy and asset model hierarchy
42983
+ */
42984
+ interface AssetModelEnforcedAssetModelInterfaceRelationship {
42985
+ /**
42986
+ * The ID of the interface that is enforced to the asset model
42987
+ */
42988
+ interfaceAssetModelId?: string;
42989
+ /**
42990
+ * Contains information about enforced interface property and asset model property
42991
+ */
42992
+ propertyMappings?: outputs.iotsitewise.AssetModelEnforcedAssetModelInterfacePropertyMapping[];
42993
+ }
42667
42994
  interface AssetModelExpressionVariable {
42668
42995
  /**
42669
42996
  * The friendly name of the variable to be used in the expression.
@@ -52407,6 +52734,10 @@ export declare namespace mediapackagev2 {
52407
52734
  * <p>When true, AWS Elemental MediaPackage performs input switching based on the MQCS. Default is true. This setting is valid only when <code>InputType</code> is <code>CMAF</code>.</p>
52408
52735
  */
52409
52736
  mqcsInputSwitching?: boolean;
52737
+ /**
52738
+ * For CMAF inputs, indicates which input MediaPackage should prefer when both inputs have equal MQCS scores. Select `1` to prefer the first ingest endpoint, or `2` to prefer the second ingest endpoint. If you don't specify a preferred input, MediaPackage uses its default switching behavior when MQCS scores are equal.
52739
+ */
52740
+ preferredInput?: number;
52410
52741
  }
52411
52742
  /**
52412
52743
  * <p>The settings for what common media server data (CMSD) headers AWS Elemental MediaPackage includes in responses to the CDN.</p>
@@ -52620,6 +52951,10 @@ export declare namespace mediapackagev2 {
52620
52951
  * <p>The parameters for encrypting content.</p>
52621
52952
  */
52622
52953
  interface OriginEndpointEncryption {
52954
+ /**
52955
+ * <p>Excludes SEIG and SGPD boxes from segment metadata in CMAF containers.</p> <p>When set to <code>true</code>, MediaPackage omits these DRM metadata boxes from CMAF segments, which can improve compatibility with certain devices and players that don't support these boxes.</p> <p>Important considerations:</p> <ul> <li> <p>This setting only affects CMAF container formats</p> </li> <li> <p>Key rotation can still be handled through media playlist signaling</p> </li> <li> <p>PSSH and TENC boxes remain unaffected</p> </li> <li> <p>Default behavior is preserved when this setting is disabled</p> </li> </ul> <p>Valid values: <code>true</code> | <code>false</code> </p> <p>Default: <code>false</code> </p>
52956
+ */
52957
+ cmafExcludeSegmentDrmMetadata?: boolean;
52623
52958
  /**
52624
52959
  * <p>A 128-bit, 16-byte hex value represented by a 32-character string, used in conjunction with the key for encrypting content. If you don't specify a value, then MediaPackage creates the constant initialization vector (IV).</p>
52625
52960
  */
@@ -52803,7 +53138,13 @@ export declare namespace mediapackagev2 {
52803
53138
  urlEncodeChildManifest?: boolean;
52804
53139
  }
52805
53140
  interface OriginEndpointPolicyCdnAuthConfiguration {
53141
+ /**
53142
+ * The ARN for the secret in Secrets Manager that your CDN uses for authorization to access the endpoint.
53143
+ */
52806
53144
  cdnIdentifierSecretArns: string[];
53145
+ /**
53146
+ * The ARN for the IAM role that gives MediaPackage read access to Secrets Manager and AWS KMS for CDN authorization.
53147
+ */
52807
53148
  secretsRoleArn: string;
52808
53149
  }
52809
53150
  /**
@@ -53843,6 +54184,12 @@ export declare namespace neptunegraph {
53843
54184
  }
53844
54185
  }
53845
54186
  export declare namespace networkfirewall {
54187
+ interface FirewallAvailabilityZoneMapping {
54188
+ /**
54189
+ * A AvailabilityZone
54190
+ */
54191
+ availabilityZone: string;
54192
+ }
53846
54193
  interface FirewallPolicy {
53847
54194
  /**
53848
54195
  * Contains variables that you can use to override default Suricata settings in your firewall policy.
@@ -53970,12 +54317,18 @@ export declare namespace networkfirewall {
53970
54317
  action?: enums.networkfirewall.FirewallPolicyOverrideAction;
53971
54318
  }
53972
54319
  interface FirewallPolicyStatefulRuleGroupReference {
54320
+ /**
54321
+ * AWS Network Firewall plans to augment the active threat defense managed rule group with an additional deep threat inspection capability. When this capability is released, AWS will analyze service logs of network traffic processed by these rule groups to identify threat indicators across customers. AWS will use these threat indicators to improve the active threat defense managed rule groups and protect the security of AWS customers and services.
54322
+ *
54323
+ * > Customers can opt-out of deep threat inspection at any time through the AWS Network Firewall console or API. When customers opt out, AWS Network Firewall will not use the network traffic processed by those customers' active threat defense rule groups for rule group improvement.
54324
+ */
54325
+ deepThreatInspection?: boolean;
53973
54326
  /**
53974
54327
  * The action that allows the policy owner to override the behavior of the rule group within a policy.
53975
54328
  */
53976
54329
  override?: outputs.networkfirewall.FirewallPolicyStatefulRuleGroupOverride;
53977
54330
  /**
53978
- * An integer setting that indicates the order in which to run the stateful rule groups in a single `FirewallPolicy` . This setting only applies to firewall policies that specify the `STRICT_ORDER` rule order in the stateful engine options settings.
54331
+ * An integer setting that indicates the order in which to run the stateful rule groups in a single firewall policy. This setting only applies to firewall policies that specify the `STRICT_ORDER` rule order in the stateful engine options settings.
53979
54332
  *
53980
54333
  * Network Firewall evalutes each stateful rule group against a packet starting with the group that has the lowest priority setting. You must ensure that the priority settings are unique within each policy.
53981
54334
  *
@@ -53989,7 +54342,7 @@ export declare namespace networkfirewall {
53989
54342
  }
53990
54343
  interface FirewallPolicyStatelessRuleGroupReference {
53991
54344
  /**
53992
- * An integer setting that indicates the order in which to run the stateless rule groups in a single `FirewallPolicy` . Network Firewall applies each stateless rule group to a packet starting with the group that has the lowest priority setting. You must ensure that the priority settings are unique within each policy.
54345
+ * An integer setting that indicates the order in which to run the stateless rule groups in a single firewall policy. Network Firewall applies each stateless rule group to a packet starting with the group that has the lowest priority setting. You must ensure that the priority settings are unique within each policy.
53993
54346
  */
53994
54347
  priority: number;
53995
54348
  /**
@@ -54204,7 +54557,7 @@ export declare namespace networkfirewall {
54204
54557
  /**
54205
54558
  * The actions to take on a packet that matches one of the stateless rule definition's match attributes. You must specify a standard action and you can add custom actions.
54206
54559
  *
54207
- * > Network Firewall only forwards a packet for stateful rule inspection if you specify `aws:forward_to_sfe` for a rule that the packet matches, or if the packet doesn't match any stateless rule and you specify `aws:forward_to_sfe` for the `StatelessDefaultActions` setting for the `FirewallPolicy` .
54560
+ * > Network Firewall only forwards a packet for stateful rule inspection if you specify `aws:forward_to_sfe` for a rule that the packet matches, or if the packet doesn't match any stateless rule and you specify `aws:forward_to_sfe` for the `StatelessDefaultActions` setting for the firewall policy.
54208
54561
  *
54209
54562
  * For every rule, you must specify exactly one of the following standard actions.
54210
54563
  *
@@ -54256,7 +54609,7 @@ export declare namespace networkfirewall {
54256
54609
  *
54257
54610
  * These rules contain the inspection criteria and the action to take for traffic that matches the criteria, so this type of rule group doesn't have a separate action setting.
54258
54611
  *
54259
- * > You can't use the `priority` keyword if the `RuleOrder` option in `StatefulRuleOptions` is set to `STRICT_ORDER` .
54612
+ * > You can't use the `priority` keyword if the `RuleOrder` option in StatefulRuleOptions is set to `STRICT_ORDER` .
54260
54613
  */
54261
54614
  rulesString?: string;
54262
54615
  /**
@@ -54292,12 +54645,12 @@ export declare namespace networkfirewall {
54292
54645
  * The actions for a stateful rule are defined as follows:
54293
54646
  *
54294
54647
  * - *PASS* - Permits the packets to go to the intended destination.
54295
- * - *DROP* - Blocks the packets from going to the intended destination and sends an alert log message, if alert logging is configured in the `Firewall` `LoggingConfiguration` .
54648
+ * - *DROP* - Blocks the packets from going to the intended destination and sends an alert log message, if alert logging is configured in the firewall logging configuration.
54296
54649
  * - *REJECT* - Drops traffic that matches the conditions of the stateful rule and sends a TCP reset packet back to sender of the packet. A TCP reset packet is a packet with no payload and a `RST` bit contained in the TCP header flags. `REJECT` is available only for TCP traffic.
54297
- * - *ALERT* - Permits the packets to go to the intended destination and sends an alert log message, if alert logging is configured in the `Firewall` `LoggingConfiguration` .
54650
+ * - *ALERT* - Permits the packets to go to the intended destination and sends an alert log message, if alert logging is configured in the firewall logging configuration.
54298
54651
  *
54299
54652
  * You can use this action to test a rule that you intend to use to drop traffic. You can enable the rule with `ALERT` action, verify in the logs that the rule is filtering as you want, then change the action to `DROP` .
54300
- * - *REJECT* - Drops TCP traffic that matches the conditions of the stateful rule, and sends a TCP reset packet back to sender of the packet. A TCP reset packet is a packet with no payload and a `RST` bit contained in the TCP header flags. Also sends an alert log mesage if alert logging is configured in the `Firewall` `LoggingConfiguration` .
54653
+ * - *REJECT* - Drops TCP traffic that matches the conditions of the stateful rule, and sends a TCP reset packet back to sender of the packet. A TCP reset packet is a packet with no payload and a `RST` bit contained in the TCP header flags. Also sends an alert log mesage if alert logging is configured in the firewall logging configuration.
54301
54654
  *
54302
54655
  * `REJECT` isn't currently available for use with IMAP and FTP protocols.
54303
54656
  */
@@ -54333,7 +54686,7 @@ export declare namespace networkfirewall {
54333
54686
  }
54334
54687
  interface RuleGroupStatelessRulesAndCustomActions {
54335
54688
  /**
54336
- * Defines an array of individual custom action definitions that are available for use by the stateless rules in this `StatelessRulesAndCustomActions` specification. You name each custom action that you define, and then you can use it by name in your stateless rule `RuleGroup.RuleDefinition` `Actions` specification.
54689
+ * Defines an array of individual custom action definitions that are available for use by the stateless rules in this `StatelessRulesAndCustomActions` specification. You name each custom action that you define, and then you can use it by name in your stateless rule definition `Actions` specification.
54337
54690
  */
54338
54691
  customActions?: outputs.networkfirewall.RuleGroupCustomAction[];
54339
54692
  /**
@@ -54356,6 +54709,18 @@ export declare namespace networkfirewall {
54356
54709
  */
54357
54710
  masks?: enums.networkfirewall.RuleGroupTcpFlag[];
54358
54711
  }
54712
+ /**
54713
+ * A complex type containing the currently selected rule option fields that will be displayed for rule summarization returned by `DescribeRuleGroupSummary` .
54714
+ *
54715
+ * - The `RuleOptions` specified in `SummaryConfiguration`
54716
+ * - Rule metadata organization preferences
54717
+ */
54718
+ interface SummaryConfigurationProperties {
54719
+ /**
54720
+ * Specifies the selected rule options returned by `DescribeRuleGroupSummary` .
54721
+ */
54722
+ ruleOptions?: enums.networkfirewall.RuleGroupSummaryRuleOption[];
54723
+ }
54359
54724
  interface TlsInspectionConfigurationAddress {
54360
54725
  /**
54361
54726
  * Specify an IP address or a block of IP addresses in Classless Inter-Domain Routing (CIDR) notation. Network Firewall supports all address ranges for IPv4 and IPv6.
@@ -56233,6 +56598,10 @@ export declare namespace pcs {
56233
56598
  * An endpoint available for interaction with the scheduler.
56234
56599
  */
56235
56600
  interface ClusterEndpoint {
56601
+ /**
56602
+ * The endpoint's IPv6 address.
56603
+ */
56604
+ ipv6Address?: string;
56236
56605
  /**
56237
56606
  * The endpoint's connection port number.
56238
56607
  */
@@ -56315,6 +56684,10 @@ export declare namespace pcs {
56315
56684
  * The networking configuration for the cluster's control plane.
56316
56685
  */
56317
56686
  interface NetworkingProperties {
56687
+ /**
56688
+ * The IP of the cluster (IPV4 or IPV6)
56689
+ */
56690
+ networkType?: enums.pcs.ClusterNetworkingPropertiesNetworkType;
56318
56691
  /**
56319
56692
  * The list of security group IDs associated with the Elastic Network Interface (ENI) created in subnets.
56320
56693
  */
@@ -86990,6 +87363,24 @@ export declare namespace rds {
86990
87363
  */
86991
87364
  roleArn: string;
86992
87365
  }
87366
+ interface DbInstanceDbInstanceStatusInfo {
87367
+ /**
87368
+ * Details of the error if there is an error for the instance. If the instance isn't in an error state, this value is blank.
87369
+ */
87370
+ message?: string;
87371
+ /**
87372
+ * Indicates whether the instance is operating normally (TRUE) or is in an error state (FALSE).
87373
+ */
87374
+ normal?: boolean;
87375
+ /**
87376
+ * The status of the DB instance. For a StatusType of read replica, the values can be replicating, replication stop point set, replication stop point reached, error, stopped, or terminated.
87377
+ */
87378
+ status?: string;
87379
+ /**
87380
+ * The status type of the DB instance.
87381
+ */
87382
+ statusType?: string;
87383
+ }
86993
87384
  /**
86994
87385
  * This data type represents the information you need to connect to an Amazon RDS DB instance. This data type is used as a response element in the following actions:
86995
87386
  * + ``CreateDBInstance``
@@ -88951,7 +89342,7 @@ export declare namespace s3 {
88951
89342
  tierings: outputs.s3.BucketTiering[];
88952
89343
  }
88953
89344
  /**
88954
- * Specifies the inventory configuration for an Amazon S3 bucket. For more information, see [GET Bucket inventory](https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGETInventoryConfig.html) in the *Amazon S3 API Reference*.
89345
+ * Specifies the S3 Inventory configuration for an Amazon S3 bucket. For more information, see [GET Bucket inventory](https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGETInventoryConfig.html) in the *Amazon S3 API Reference*.
88955
89346
  */
88956
89347
  interface BucketInventoryConfiguration {
88957
89348
  /**
@@ -88983,9 +89374,12 @@ export declare namespace s3 {
88983
89374
  */
88984
89375
  scheduleFrequency: enums.s3.BucketInventoryConfigurationScheduleFrequency;
88985
89376
  }
89377
+ /**
89378
+ * The inventory table configuration for an S3 Metadata configuration.
89379
+ */
88986
89380
  interface BucketInventoryTableConfiguration {
88987
89381
  /**
88988
- * Specifies whether inventory table configuration is enabled or disabled.
89382
+ * The configuration state of the inventory table, indicating whether the inventory table is enabled or disabled.
88989
89383
  */
88990
89384
  configurationState: enums.s3.BucketInventoryTableConfigurationConfigurationState;
88991
89385
  /**
@@ -88993,7 +89387,7 @@ export declare namespace s3 {
88993
89387
  */
88994
89388
  encryptionConfiguration?: outputs.s3.BucketMetadataTableEncryptionConfiguration;
88995
89389
  /**
88996
- * The ARN of the inventory table.
89390
+ * The Amazon Resource Name (ARN) for the inventory table.
88997
89391
  */
88998
89392
  tableArn?: string;
88999
89393
  /**
@@ -89001,6 +89395,9 @@ export declare namespace s3 {
89001
89395
  */
89002
89396
  tableName?: string;
89003
89397
  }
89398
+ /**
89399
+ * The journal table configuration for an S3 Metadata configuration.
89400
+ */
89004
89401
  interface BucketJournalTableConfiguration {
89005
89402
  /**
89006
89403
  * The encryption configuration for the journal table.
@@ -89011,7 +89408,7 @@ export declare namespace s3 {
89011
89408
  */
89012
89409
  recordExpiration: outputs.s3.BucketRecordExpiration;
89013
89410
  /**
89014
- * The ARN of the journal table.
89411
+ * The Amazon Resource Name (ARN) for the journal table.
89015
89412
  */
89016
89413
  tableArn?: string;
89017
89414
  /**
@@ -89072,36 +89469,44 @@ export declare namespace s3 {
89072
89469
  */
89073
89470
  targetObjectKeyFormat?: outputs.s3.BucketTargetObjectKeyFormat;
89074
89471
  }
89472
+ /**
89473
+ * Creates a V2 S3 Metadata configuration of a general purpose bucket. For more information, see [Accelerating data discovery with S3 Metadata](https://docs.aws.amazon.com/AmazonS3/latest/userguide/metadata-tables-overview.html) in the *Amazon S3 User Guide*.
89474
+ */
89075
89475
  interface BucketMetadataConfiguration {
89076
89476
  /**
89077
- * The destination information for the metadata configuration.
89477
+ * The destination information for the S3 Metadata configuration.
89078
89478
  */
89079
89479
  destination?: outputs.s3.BucketMetadataDestination;
89080
89480
  /**
89081
- * The configuration for the inventory table.
89481
+ * The inventory table configuration for a metadata configuration.
89082
89482
  */
89083
89483
  inventoryTableConfiguration?: outputs.s3.BucketInventoryTableConfiguration;
89084
89484
  /**
89085
- * The configuration for the journal table.
89485
+ * The journal table configuration for a metadata configuration.
89086
89486
  */
89087
89487
  journalTableConfiguration: outputs.s3.BucketJournalTableConfiguration;
89088
89488
  }
89489
+ /**
89490
+ * The destination information for the S3 Metadata configuration.
89491
+ */
89089
89492
  interface BucketMetadataDestination {
89090
89493
  /**
89091
- * The ARN of the table bucket.
89494
+ * The Amazon Resource Name (ARN) of the table bucket where the metadata configuration is stored.
89092
89495
  */
89093
89496
  tableBucketArn?: string;
89094
89497
  /**
89095
- * The type of the table bucket.
89498
+ * The type of the table bucket where the metadata configuration is stored. The ``aws`` value indicates an AWS managed table bucket, and the ``customer`` value indicates a customer-managed table bucket. V2 metadata configurations are stored in AWS managed table buckets, and V1 metadata configurations are stored in customer-managed table buckets.
89096
89499
  */
89097
89500
  tableBucketType: enums.s3.BucketMetadataDestinationTableBucketType;
89098
89501
  /**
89099
- * The namespace of the table.
89502
+ * The namespace in the table bucket where the metadata tables for a metadata configuration are stored.
89100
89503
  */
89101
89504
  tableNamespace?: string;
89102
89505
  }
89103
89506
  /**
89104
- * The metadata table configuration of an S3 general purpose bucket. For more information, see [Accelerating data discovery with S3 Metadata](https://docs.aws.amazon.com/AmazonS3/latest/userguide/metadata-tables-overview.html) and [Setting up permissions for configuring metadata tables](https://docs.aws.amazon.com/AmazonS3/latest/userguide/metadata-tables-permissions.html).
89507
+ * We recommend that you create your S3 Metadata configurations by using the V2 [MetadataConfiguration](https://docs.aws.amazon.com/AWSCloudFormation/latest/TemplateReference/aws-properties-s3-bucket-metadataconfiguration.html) resource type. We no longer recommend using the V1 ``MetadataTableConfiguration`` resource type.
89508
+ * If you created your S3 Metadata configuration before July 15, 2025, we recommend that you delete and re-create your configuration by using the [MetadataConfiguration](https://docs.aws.amazon.com/AWSCloudFormation/latest/TemplateReference/aws-properties-s3-bucket-metadataconfiguration.html) resource type so that you can expire journal table records and create a live inventory table.
89509
+ * Creates a V1 S3 Metadata configuration for a general purpose bucket. For more information, see [Accelerating data discovery with S3 Metadata](https://docs.aws.amazon.com/AmazonS3/latest/userguide/metadata-tables-overview.html) in the *Amazon S3 User Guide*.
89105
89510
  */
89106
89511
  interface BucketMetadataTableConfiguration {
89107
89512
  /**
@@ -89109,13 +89514,16 @@ export declare namespace s3 {
89109
89514
  */
89110
89515
  s3TablesDestination: outputs.s3.BucketS3TablesDestination;
89111
89516
  }
89517
+ /**
89518
+ * The encryption settings for an S3 Metadata journal table or inventory table configuration.
89519
+ */
89112
89520
  interface BucketMetadataTableEncryptionConfiguration {
89113
89521
  /**
89114
- * The ARN of the KMS key. Required if SseAlgorithm is aws:kms.
89522
+ * If server-side encryption with KMSlong (KMS) keys (SSE-KMS) is specified, you must also specify the KMS key Amazon Resource Name (ARN). You must specify a customer-managed KMS key that's located in the same Region as the general purpose bucket that corresponds to the metadata table configuration.
89115
89523
  */
89116
89524
  kmsKeyArn?: string;
89117
89525
  /**
89118
- * Specifies the server-side encryption algorithm to use for encrypting tables.
89526
+ * The encryption type specified for a metadata table. To specify server-side encryption with KMSlong (KMS) keys (SSE-KMS), use the ``aws:kms`` value. To specify server-side encryption with Amazon S3 managed keys (SSE-S3), use the ``AES256`` value.
89119
89527
  */
89120
89528
  sseAlgorithm: enums.s3.BucketMetadataTableEncryptionConfigurationSseAlgorithm;
89121
89529
  }
@@ -89305,13 +89713,16 @@ export declare namespace s3 {
89305
89713
  */
89306
89714
  queue: string;
89307
89715
  }
89716
+ /**
89717
+ * The journal table record expiration settings for a journal table in an S3 Metadata configuration.
89718
+ */
89308
89719
  interface BucketRecordExpiration {
89309
89720
  /**
89310
- * The number of days after which records expire. Required if Expiration is ENABLED.
89721
+ * If you enable journal table record expiration, you can set the number of days to retain your journal table records. Journal table records must be retained for a minimum of 7 days. To set this value, specify any whole number from ``7`` to ``2147483647``. For example, to retain your journal table records for one year, set this value to ``365``.
89311
89722
  */
89312
89723
  days?: number;
89313
89724
  /**
89314
- * Specifies whether record expiration is enabled or disabled.
89725
+ * Specifies whether journal table record expiration is enabled or disabled.
89315
89726
  */
89316
89727
  expiration: enums.s3.BucketRecordExpirationExpiration;
89317
89728
  }
@@ -89410,6 +89821,7 @@ export declare namespace s3 {
89410
89821
  /**
89411
89822
  * The storage class to use when replicating objects, such as S3 Standard or reduced redundancy. By default, Amazon S3 uses the storage class of the source object to create the object replica.
89412
89823
  * For valid values, see the ``StorageClass`` element of the [PUT Bucket replication](https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTreplication.html) action in the *Amazon S3 API Reference*.
89824
+ * ``FSX_OPENZFS`` is not an accepted value when replicating objects.
89413
89825
  */
89414
89826
  storageClass?: enums.s3.BucketReplicationDestinationStorageClass;
89415
89827
  }
@@ -89625,7 +90037,7 @@ export declare namespace s3 {
89625
90037
  rules: outputs.s3.BucketFilterRule[];
89626
90038
  }
89627
90039
  /**
89628
- * The destination information for the metadata table configuration. The destination table bucket must be in the same Region and AWS-account as the general purpose bucket. The specified metadata table name must be unique within the ``aws_s3_metadata`` namespace in the destination table bucket.
90040
+ * The destination information for a V1 S3 Metadata configuration. The destination table bucket must be in the same Region and AWS-account as the general purpose bucket. The specified metadata table name must be unique within the ``aws_s3_metadata`` namespace in the destination table bucket.
89629
90041
  */
89630
90042
  interface BucketS3TablesDestination {
89631
90043
  /**
@@ -90767,6 +91179,25 @@ export declare namespace sagemaker {
90767
91179
  */
90768
91180
  sageMakerImageVersionArn?: string;
90769
91181
  }
91182
+ /**
91183
+ * The configuration for the restricted instance groups (RIG) environment.
91184
+ */
91185
+ interface ClusterEnvironmentConfig {
91186
+ fSxLustreConfig?: outputs.sagemaker.ClusterFSxLustreConfig;
91187
+ }
91188
+ /**
91189
+ * Configuration settings for an Amazon FSx for Lustre file system to be used with the cluster.
91190
+ */
91191
+ interface ClusterFSxLustreConfig {
91192
+ /**
91193
+ * The throughput capacity of the FSx for Lustre file system, measured in MB/s per TiB of storage.
91194
+ */
91195
+ perUnitStorageThroughput: number;
91196
+ /**
91197
+ * The storage capacity of the FSx for Lustre file system, specified in gibibytes (GiB).
91198
+ */
91199
+ sizeInGiB: number;
91200
+ }
90770
91201
  /**
90771
91202
  * Details of an instance group in a SageMaker HyperPod cluster.
90772
91203
  */
@@ -90831,6 +91262,34 @@ export declare namespace sagemaker {
90831
91262
  */
90832
91263
  clusterArn: string;
90833
91264
  }
91265
+ /**
91266
+ * Details of a restricted instance group in a SageMaker HyperPod cluster.
91267
+ */
91268
+ interface ClusterRestrictedInstanceGroup {
91269
+ /**
91270
+ * The number of instances that are currently in the restricted instance group of a SageMaker HyperPod cluster.
91271
+ */
91272
+ currentCount?: number;
91273
+ environmentConfig: outputs.sagemaker.ClusterEnvironmentConfig;
91274
+ executionRole: string;
91275
+ /**
91276
+ * The number of instances you specified to add to the restricted instance group of a SageMaker HyperPod cluster.
91277
+ */
91278
+ instanceCount: number;
91279
+ instanceGroupName: string;
91280
+ instanceStorageConfigs?: outputs.sagemaker.ClusterInstanceStorageConfig[];
91281
+ instanceType: string;
91282
+ onStartDeepHealthChecks?: enums.sagemaker.ClusterDeepHealthCheckType[];
91283
+ overrideVpcConfig?: outputs.sagemaker.ClusterVpcConfig;
91284
+ /**
91285
+ * The number you specified to TreadsPerCore in CreateCluster for enabling or disabling multithreading. For instance types that support multithreading, you can specify 1 for disabling multithreading and 2 for enabling multithreading.
91286
+ */
91287
+ threadsPerCore?: number;
91288
+ /**
91289
+ * The Amazon Resource Name (ARN) of the training plan to use for this cluster restricted instance group. For more information about how to reserve GPU capacity for your SageMaker HyperPod clusters using Amazon SageMaker Training Plan, see CreateTrainingPlan.
91290
+ */
91291
+ trainingPlanArn?: string;
91292
+ }
90834
91293
  /**
90835
91294
  * Specifies an Amazon Virtual Private Cloud (VPC) that your SageMaker jobs, hosted models, and compute resources have access to. You can control access to and from your resources by configuring a VPC.
90836
91295
  */
@@ -94431,6 +94890,331 @@ export declare namespace sagemaker {
94431
94890
  */
94432
94891
  version?: string;
94433
94892
  }
94893
+ /**
94894
+ * Configures the processing job to run a specified Docker container image.
94895
+ */
94896
+ interface ProcessingJobAppSpecification {
94897
+ /**
94898
+ * The arguments for a container used to run a processing job.
94899
+ */
94900
+ containerArguments?: string[];
94901
+ /**
94902
+ * The entrypoint for a container used to run a processing job.
94903
+ */
94904
+ containerEntrypoint?: string[];
94905
+ /**
94906
+ * The container image to be run by the processing job.
94907
+ */
94908
+ imageUri: string;
94909
+ }
94910
+ /**
94911
+ * Configuration for Athena Dataset Definition input.
94912
+ */
94913
+ interface ProcessingJobAthenaDatasetDefinition {
94914
+ /**
94915
+ * The name of the data catalog used in Athena query execution.
94916
+ */
94917
+ catalog: string;
94918
+ /**
94919
+ * The name of the database used in the Athena query execution.
94920
+ */
94921
+ database: string;
94922
+ /**
94923
+ * The AWS Key Management Service (AWS KMS) key that Amazon SageMaker uses to encrypt data generated from an Athena query execution.
94924
+ */
94925
+ kmsKeyId?: string;
94926
+ /**
94927
+ * The compression used for Athena query results.
94928
+ */
94929
+ outputCompression?: enums.sagemaker.ProcessingJobAthenaDatasetDefinitionOutputCompression;
94930
+ /**
94931
+ * The data storage format for Athena query results.
94932
+ */
94933
+ outputFormat: enums.sagemaker.ProcessingJobAthenaDatasetDefinitionOutputFormat;
94934
+ /**
94935
+ * The location in Amazon S3 where Athena query results are stored.
94936
+ */
94937
+ outputS3Uri: string;
94938
+ /**
94939
+ * The SQL query statements, to be executed.
94940
+ */
94941
+ queryString: string;
94942
+ /**
94943
+ * The name of the workgroup in which the Athena query is being started.
94944
+ */
94945
+ workGroup?: string;
94946
+ }
94947
+ /**
94948
+ * Configuration for the cluster used to run a processing job.
94949
+ */
94950
+ interface ProcessingJobClusterConfig {
94951
+ /**
94952
+ * The number of ML compute instances to use in the processing job. For distributed processing jobs, specify a value greater than 1. The default value is 1.
94953
+ */
94954
+ instanceCount: number;
94955
+ /**
94956
+ * The ML compute instance type for the processing job.
94957
+ */
94958
+ instanceType: enums.sagemaker.ProcessingJobClusterConfigInstanceType;
94959
+ /**
94960
+ * The AWS Key Management Service (AWS KMS) key that Amazon SageMaker uses to encrypt data on the storage volume attached to the ML compute instance(s) that run the processing job.
94961
+ */
94962
+ volumeKmsKeyId?: string;
94963
+ /**
94964
+ * The size of the ML storage volume in gigabytes that you want to provision. You must specify sufficient ML storage for your scenario.
94965
+ */
94966
+ volumeSizeInGb: number;
94967
+ }
94968
+ /**
94969
+ * Configuration for Dataset Definition inputs. The Dataset Definition input must specify exactly one of either `AthenaDatasetDefinition` or `RedshiftDatasetDefinition` types.
94970
+ */
94971
+ interface ProcessingJobDatasetDefinition {
94972
+ /**
94973
+ * Configuration for Athena Dataset Definition input.
94974
+ */
94975
+ athenaDatasetDefinition?: outputs.sagemaker.ProcessingJobAthenaDatasetDefinition;
94976
+ /**
94977
+ * Whether the generated dataset is FullyReplicated or ShardedByS3Key (default).
94978
+ */
94979
+ dataDistributionType?: enums.sagemaker.ProcessingJobDatasetDefinitionDataDistributionType;
94980
+ /**
94981
+ * Whether to use File or Pipe input mode. In File (default) mode, Amazon SageMaker copies the data from the input source onto the local Amazon Elastic Block Store (Amazon EBS) volumes before starting your training algorithm. This is the most commonly used input mode. In Pipe mode, Amazon SageMaker streams input data from the source directly to your algorithm without using the EBS volume.
94982
+ */
94983
+ inputMode?: enums.sagemaker.ProcessingJobDatasetDefinitionInputMode;
94984
+ /**
94985
+ * The local path where you want Amazon SageMaker to download the Dataset Definition inputs to run a processing job. LocalPath is an absolute path to the input data. This is a required parameter when AppManaged is False (default).
94986
+ */
94987
+ localPath?: string;
94988
+ /**
94989
+ * Configuration for Redshift Dataset Definition input.
94990
+ */
94991
+ redshiftDatasetDefinition?: outputs.sagemaker.ProcessingJobRedshiftDatasetDefinition;
94992
+ }
94993
+ /**
94994
+ * Sets the environment variables in the Docker container
94995
+ */
94996
+ interface ProcessingJobEnvironment {
94997
+ }
94998
+ /**
94999
+ * Associates a SageMaker job as a trial component with an experiment and trial.
95000
+ */
95001
+ interface ProcessingJobExperimentConfig {
95002
+ /**
95003
+ * The name of an existing experiment to associate with the trial component.
95004
+ */
95005
+ experimentName?: string;
95006
+ /**
95007
+ * The name of the experiment run to associate with the trial component.
95008
+ */
95009
+ runName?: string;
95010
+ /**
95011
+ * The display name for the trial component. If this key isn't specified, the display name is the trial component name.
95012
+ */
95013
+ trialComponentDisplayName?: string;
95014
+ /**
95015
+ * The name of an existing trial to associate the trial component with. If not specified, a new trial is created.
95016
+ */
95017
+ trialName?: string;
95018
+ }
95019
+ /**
95020
+ * Configuration for processing job outputs in Amazon SageMaker Feature Store.
95021
+ */
95022
+ interface ProcessingJobFeatureStoreOutput {
95023
+ /**
95024
+ * The name of the Amazon SageMaker FeatureGroup to use as the destination for processing job output. Note that your processing script is responsible for putting records into your Feature Store.
95025
+ */
95026
+ featureGroupName: string;
95027
+ }
95028
+ /**
95029
+ * Networking options for a job, such as network traffic encryption between containers, whether to allow inbound and outbound network calls to and from containers, and the VPC subnets and security groups to use for VPC-enabled jobs.
95030
+ */
95031
+ interface ProcessingJobNetworkConfig {
95032
+ /**
95033
+ * Whether to encrypt all communications between distributed processing jobs. Choose True to encrypt communications. Encryption provides greater security for distributed processing jobs, but the processing might take longer.
95034
+ */
95035
+ enableInterContainerTrafficEncryption?: boolean;
95036
+ /**
95037
+ * Whether to allow inbound and outbound network calls to and from the containers used for the processing job.
95038
+ */
95039
+ enableNetworkIsolation?: boolean;
95040
+ /**
95041
+ * Specifies an Amazon Virtual Private Cloud (VPC) that your SageMaker jobs, hosted models, and compute resources have access to. You can control access to and from your resources by configuring a VPC. For more information, see [Give SageMaker Access to Resources in your Amazon VPC](https://docs.aws.amazon.com/sagemaker/latest/dg/infrastructure-give-access.html) .
95042
+ */
95043
+ vpcConfig?: outputs.sagemaker.ProcessingJobVpcConfig;
95044
+ }
95045
+ /**
95046
+ * The inputs for a processing job. The processing input must specify exactly one of either S3Input or DatasetDefinition types.
95047
+ */
95048
+ interface ProcessingJobProcessingInputsObject {
95049
+ /**
95050
+ * When True, input operations such as data download are managed natively by the processing job application. When False (default), input operations are managed by Amazon SageMaker.
95051
+ */
95052
+ appManaged?: boolean;
95053
+ /**
95054
+ * Configuration for Dataset Definition inputs. The Dataset Definition input must specify exactly one of either `AthenaDatasetDefinition` or `RedshiftDatasetDefinition` types.
95055
+ */
95056
+ datasetDefinition?: outputs.sagemaker.ProcessingJobDatasetDefinition;
95057
+ /**
95058
+ * The name for the processing job input.
95059
+ */
95060
+ inputName: string;
95061
+ /**
95062
+ * Configuration for downloading input data from Amazon S3 into the processing container.
95063
+ */
95064
+ s3Input?: outputs.sagemaker.ProcessingJobS3Input;
95065
+ }
95066
+ /**
95067
+ * Configuration for uploading output from the processing container.
95068
+ */
95069
+ interface ProcessingJobProcessingOutputConfig {
95070
+ /**
95071
+ * The AWS Key Management Service (AWS KMS) key that Amazon SageMaker uses to encrypt the processing job output. KmsKeyId can be an ID of a KMS key, ARN of a KMS key, or alias of a KMS key. The KmsKeyId is applied to all outputs.
95072
+ */
95073
+ kmsKeyId?: string;
95074
+ /**
95075
+ * An array of outputs configuring the data to upload from the processing container.
95076
+ */
95077
+ outputs: outputs.sagemaker.ProcessingJobProcessingOutputsObject[];
95078
+ }
95079
+ /**
95080
+ * Describes the results of a processing job. The processing output must specify exactly one of either S3Output or FeatureStoreOutput types.
95081
+ */
95082
+ interface ProcessingJobProcessingOutputsObject {
95083
+ /**
95084
+ * When True, output operations such as data upload are managed natively by the processing job application. When False (default), output operations are managed by Amazon SageMaker.
95085
+ */
95086
+ appManaged?: boolean;
95087
+ /**
95088
+ * Configuration for processing job outputs in Amazon SageMaker Feature Store.
95089
+ */
95090
+ featureStoreOutput?: outputs.sagemaker.ProcessingJobFeatureStoreOutput;
95091
+ /**
95092
+ * The name for the processing job output.
95093
+ */
95094
+ outputName: string;
95095
+ /**
95096
+ * Configuration for uploading output data to Amazon S3 from the processing container.
95097
+ */
95098
+ s3Output?: outputs.sagemaker.ProcessingJobS3Output;
95099
+ }
95100
+ /**
95101
+ * Identifies the resources, ML compute instances, and ML storage volumes to deploy for a processing job. In distributed training, you specify more than one instance.
95102
+ */
95103
+ interface ProcessingJobProcessingResources {
95104
+ /**
95105
+ * The configuration for the resources in a cluster used to run the processing job.
95106
+ */
95107
+ clusterConfig: outputs.sagemaker.ProcessingJobClusterConfig;
95108
+ }
95109
+ /**
95110
+ * Configuration for Redshift Dataset Definition input.
95111
+ */
95112
+ interface ProcessingJobRedshiftDatasetDefinition {
95113
+ /**
95114
+ * The Redshift cluster Identifier.
95115
+ */
95116
+ clusterId: string;
95117
+ /**
95118
+ * The IAM role attached to your Redshift cluster that Amazon SageMaker uses to generate datasets.
95119
+ */
95120
+ clusterRoleArn: string;
95121
+ /**
95122
+ * The name of the Redshift database used in Redshift query execution.
95123
+ */
95124
+ database: string;
95125
+ /**
95126
+ * The database user name used in Redshift query execution.
95127
+ */
95128
+ dbUser: string;
95129
+ /**
95130
+ * The AWS Key Management Service (AWS KMS) key that Amazon SageMaker uses to encrypt data from a Redshift execution.
95131
+ */
95132
+ kmsKeyId?: string;
95133
+ /**
95134
+ * The compression used for Redshift query results.
95135
+ */
95136
+ outputCompression?: enums.sagemaker.ProcessingJobRedshiftDatasetDefinitionOutputCompression;
95137
+ /**
95138
+ * The data storage format for Redshift query results.
95139
+ */
95140
+ outputFormat: enums.sagemaker.ProcessingJobRedshiftDatasetDefinitionOutputFormat;
95141
+ /**
95142
+ * The location in Amazon S3 where the Redshift query results are stored.
95143
+ */
95144
+ outputS3Uri: string;
95145
+ /**
95146
+ * The SQL query statements to be executed.
95147
+ */
95148
+ queryString: string;
95149
+ }
95150
+ /**
95151
+ * Configuration for downloading input data from Amazon S3 into the processing container.
95152
+ */
95153
+ interface ProcessingJobS3Input {
95154
+ /**
95155
+ * The local path in your container where you want Amazon SageMaker to write input data to. `LocalPath` is an absolute path to the input data and must begin with `/opt/ml/processing/`. LocalPath is a required parameter when `AppManaged` is `False` (default).
95156
+ */
95157
+ localPath?: string;
95158
+ /**
95159
+ * Whether to GZIP-decompress the data in Amazon S3 as it is streamed into the processing container. `Gzip` can only be used when `Pipe` mode is specified as the `S3InputMode`. In `Pipe` mode, Amazon SageMaker streams input data from the source directly to your container without using the EBS volume.
95160
+ */
95161
+ s3CompressionType?: enums.sagemaker.ProcessingJobS3InputS3CompressionType;
95162
+ /**
95163
+ * Whether to distribute the data from Amazon S3 to all processing instances with `FullyReplicated`, or whether the data from Amazon S3 is shared by Amazon S3 key, downloading one shard of data to each processing instance.
95164
+ */
95165
+ s3DataDistributionType?: enums.sagemaker.ProcessingJobS3InputS3DataDistributionType;
95166
+ /**
95167
+ * Whether you use an S3Prefix or a ManifestFile for the data type. If you choose S3Prefix, S3Uri identifies a key name prefix. Amazon SageMaker uses all objects with the specified key name prefix for the processing job. If you choose ManifestFile, S3Uri identifies an object that is a manifest file containing a list of object keys that you want Amazon SageMaker to use for the processing job.
95168
+ */
95169
+ s3DataType: enums.sagemaker.ProcessingJobS3InputS3DataType;
95170
+ /**
95171
+ * Whether to use File or Pipe input mode. In File mode, Amazon SageMaker copies the data from the input source onto the local ML storage volume before starting your processing container. This is the most commonly used input mode. In Pipe mode, Amazon SageMaker streams input data from the source directly to your processing container into named pipes without using the ML storage volume.
95172
+ */
95173
+ s3InputMode?: enums.sagemaker.ProcessingJobS3InputS3InputMode;
95174
+ /**
95175
+ * The URI of the Amazon S3 prefix Amazon SageMaker downloads data required to run a processing job.
95176
+ */
95177
+ s3Uri: string;
95178
+ }
95179
+ /**
95180
+ * Configuration for uploading output data to Amazon S3 from the processing container.
95181
+ */
95182
+ interface ProcessingJobS3Output {
95183
+ /**
95184
+ * The local path of a directory where you want Amazon SageMaker to upload its contents to Amazon S3. LocalPath is an absolute path to a directory containing output files. This directory will be created by the platform and exist when your container's entrypoint is invoked.
95185
+ */
95186
+ localPath?: string;
95187
+ /**
95188
+ * Whether to upload the results of the processing job continuously or after the job completes.
95189
+ */
95190
+ s3UploadMode: enums.sagemaker.ProcessingJobS3OutputS3UploadMode;
95191
+ /**
95192
+ * A URI that identifies the Amazon S3 bucket where you want Amazon SageMaker to save the results of a processing job.
95193
+ */
95194
+ s3Uri: string;
95195
+ }
95196
+ /**
95197
+ * Configures conditions under which the processing job should be stopped, such as how long the processing job has been running. After the condition is met, the processing job is stopped.
95198
+ */
95199
+ interface ProcessingJobStoppingCondition {
95200
+ /**
95201
+ * Specifies the maximum runtime in seconds.
95202
+ */
95203
+ maxRuntimeInSeconds: number;
95204
+ }
95205
+ /**
95206
+ * Specifies an Amazon Virtual Private Cloud (VPC) that your SageMaker jobs, hosted models, and compute resources have access to. You can control access to and from your resources by configuring a VPC. For more information, see https://docs.aws.amazon.com/sagemaker/latest/dg/infrastructure-give-access.html
95207
+ */
95208
+ interface ProcessingJobVpcConfig {
95209
+ /**
95210
+ * The VPC security group IDs, in the form 'sg-xxxxxxxx'. Specify the security groups for the VPC that is specified in the 'Subnets' field.
95211
+ */
95212
+ securityGroupIds: string[];
95213
+ /**
95214
+ * The ID of the subnets in the VPC to which you want to connect your training job or model. For information about the availability of specific instance types, see https://docs.aws.amazon.com/sagemaker/latest/dg/regions-quotas.html
95215
+ */
95216
+ subnets: string[];
95217
+ }
94434
95218
  interface ProjectCfnStackParameter {
94435
95219
  /**
94436
95220
  * The key of the parameter.
@@ -99915,11 +100699,11 @@ export declare namespace verifiedpermissions {
99915
100699
  */
99916
100700
  mode: enums.verifiedpermissions.PolicyStoreDeletionMode;
99917
100701
  }
99918
- interface PolicyStoreSchemaDefinition {
99919
- /**
99920
- * A JSON string representation of the schema supported by applications that use this policy store. For more information, see [Policy store schema](https://docs.aws.amazon.com/verifiedpermissions/latest/userguide/schema.html) in the AVP User Guide.
99921
- */
99922
- cedarJson?: string;
100702
+ interface PolicyStoreSchemaDefinition0Properties {
100703
+ cedarJson: string;
100704
+ }
100705
+ interface PolicyStoreSchemaDefinition1Properties {
100706
+ cedarFormat: string;
99923
100707
  }
99924
100708
  interface PolicyStoreValidationSettings {
99925
100709
  /**
@@ -104492,6 +105276,24 @@ export declare namespace workspacesweb {
104492
105276
  */
104493
105277
  ipRange: string;
104494
105278
  }
105279
+ interface SessionLoggerEventFilter0Properties {
105280
+ all: outputs.workspacesweb.SessionLoggerUnit;
105281
+ }
105282
+ interface SessionLoggerEventFilter1Properties {
105283
+ include: enums.workspacesweb.SessionLoggerEvent[];
105284
+ }
105285
+ interface SessionLoggerLogConfiguration {
105286
+ s3?: outputs.workspacesweb.SessionLoggerS3LogConfiguration;
105287
+ }
105288
+ interface SessionLoggerS3LogConfiguration {
105289
+ bucket: string;
105290
+ bucketOwner?: string;
105291
+ folderStructure: enums.workspacesweb.SessionLoggerFolderStructure;
105292
+ keyPrefix?: string;
105293
+ logFileFormat: enums.workspacesweb.SessionLoggerLogFileFormat;
105294
+ }
105295
+ interface SessionLoggerUnit {
105296
+ }
104495
105297
  interface UserSettingsCookieSpecification {
104496
105298
  /**
104497
105299
  * The domain of the cookie.