cdk-lambda-subminute 2.0.316 → 2.0.318

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (28) hide show
  1. package/.jsii +31 -3
  2. package/lib/cdk-lambda-subminute.js +3 -3
  3. package/node_modules/aws-sdk/README.md +1 -1
  4. package/node_modules/aws-sdk/apis/athena-2017-05-18.min.json +110 -68
  5. package/node_modules/aws-sdk/apis/billingconductor-2021-07-30.min.json +9 -3
  6. package/node_modules/aws-sdk/apis/braket-2019-09-01.min.json +77 -42
  7. package/node_modules/aws-sdk/apis/cleanroomsml-2023-09-06.min.json +1 -1
  8. package/node_modules/aws-sdk/apis/cloud9-2017-09-23.min.json +2 -1
  9. package/node_modules/aws-sdk/apis/cloudformation-2010-05-15.waiters2.json +48 -0
  10. package/node_modules/aws-sdk/apis/medialive-2017-10-14.min.json +257 -225
  11. package/node_modules/aws-sdk/clients/athena.d.ts +80 -5
  12. package/node_modules/aws-sdk/clients/billingconductor.d.ts +16 -0
  13. package/node_modules/aws-sdk/clients/braket.d.ts +33 -2
  14. package/node_modules/aws-sdk/clients/cleanroomsml.d.ts +1 -1
  15. package/node_modules/aws-sdk/clients/cloud9.d.ts +2 -2
  16. package/node_modules/aws-sdk/clients/cloudformation.d.ts +3 -3
  17. package/node_modules/aws-sdk/clients/ec2.d.ts +6 -6
  18. package/node_modules/aws-sdk/clients/finspace.d.ts +11 -11
  19. package/node_modules/aws-sdk/clients/medialive.d.ts +26 -0
  20. package/node_modules/aws-sdk/clients/servicecatalogappregistry.d.ts +2 -2
  21. package/node_modules/aws-sdk/dist/aws-sdk-core-react-native.js +6 -1
  22. package/node_modules/aws-sdk/dist/aws-sdk-react-native.js +13 -8
  23. package/node_modules/aws-sdk/dist/aws-sdk.js +166 -71
  24. package/node_modules/aws-sdk/dist/aws-sdk.min.js +103 -103
  25. package/node_modules/aws-sdk/lib/core.js +1 -1
  26. package/node_modules/aws-sdk/lib/json/parser.js +5 -0
  27. package/node_modules/aws-sdk/package.json +1 -1
  28. package/package.json +3 -3
@@ -596,6 +596,7 @@ declare namespace Athena {
596
596
  ErrorMessage?: String;
597
597
  }
598
598
  export type AuthToken = string;
599
+ export type AuthenticationType = "DIRECTORY_IDENTITY"|string;
599
600
  export type AwsAccountId = string;
600
601
  export interface BatchGetNamedQueryInput {
601
602
  /**
@@ -847,7 +848,7 @@ declare namespace Athena {
847
848
  */
848
849
  Scale?: Integer;
849
850
  /**
850
- * Indicates the column's nullable status.
851
+ * Unsupported constraint. This value always shows as UNKNOWN.
851
852
  */
852
853
  Nullable?: ColumnNullable;
853
854
  /**
@@ -1325,6 +1326,10 @@ declare namespace Athena {
1325
1326
  * The name of the data catalog to return.
1326
1327
  */
1327
1328
  Name: CatalogNameString;
1329
+ /**
1330
+ * The name of the workgroup. Required if making an IAM Identity Center request.
1331
+ */
1332
+ WorkGroup?: WorkGroupName;
1328
1333
  }
1329
1334
  export interface GetDataCatalogOutput {
1330
1335
  /**
@@ -1341,6 +1346,10 @@ declare namespace Athena {
1341
1346
  * The name of the database to return.
1342
1347
  */
1343
1348
  DatabaseName: NameString;
1349
+ /**
1350
+ * The name of the workgroup for which the metadata is being fetched. Required if requesting an IAM Identity Center enabled Glue Data Catalog.
1351
+ */
1352
+ WorkGroup?: WorkGroupName;
1344
1353
  }
1345
1354
  export interface GetDatabaseOutput {
1346
1355
  /**
@@ -1513,6 +1522,10 @@ declare namespace Athena {
1513
1522
  * The name of the table for which metadata is returned.
1514
1523
  */
1515
1524
  TableName: NameString;
1525
+ /**
1526
+ * The name of the workgroup for which the metadata is being fetched. Required if requesting an IAM Identity Center enabled Glue Data Catalog.
1527
+ */
1528
+ WorkGroup?: WorkGroupName;
1516
1529
  }
1517
1530
  export interface GetTableMetadataOutput {
1518
1531
  /**
@@ -1533,6 +1546,18 @@ declare namespace Athena {
1533
1546
  WorkGroup?: WorkGroup;
1534
1547
  }
1535
1548
  export type IdempotencyToken = string;
1549
+ export type IdentityCenterApplicationArn = string;
1550
+ export interface IdentityCenterConfiguration {
1551
+ /**
1552
+ * Specifies whether the workgroup is IAM Identity Center supported.
1553
+ */
1554
+ EnableIdentityCenter?: BoxedBoolean;
1555
+ /**
1556
+ * The IAM Identity Center instance ARN that the workgroup associates to.
1557
+ */
1558
+ IdentityCenterInstanceArn?: IdentityCenterInstanceArn;
1559
+ }
1560
+ export type IdentityCenterInstanceArn = string;
1536
1561
  export interface ImportNotebookInput {
1537
1562
  /**
1538
1563
  * The name of the Spark enabled workgroup to import the notebook to.
@@ -1641,6 +1666,10 @@ declare namespace Athena {
1641
1666
  * Specifies the maximum number of data catalogs to return.
1642
1667
  */
1643
1668
  MaxResults?: MaxDataCatalogsCount;
1669
+ /**
1670
+ * The name of the workgroup. Required if making an IAM Identity Center request.
1671
+ */
1672
+ WorkGroup?: WorkGroupName;
1644
1673
  }
1645
1674
  export interface ListDataCatalogsOutput {
1646
1675
  /**
@@ -1665,6 +1694,10 @@ declare namespace Athena {
1665
1694
  * Specifies the maximum number of results to return.
1666
1695
  */
1667
1696
  MaxResults?: MaxDatabasesCount;
1697
+ /**
1698
+ * The name of the workgroup for which the metadata is being fetched. Required if requesting an IAM Identity Center enabled Glue Data Catalog.
1699
+ */
1700
+ WorkGroup?: WorkGroupName;
1668
1701
  }
1669
1702
  export interface ListDatabasesOutput {
1670
1703
  /**
@@ -1901,6 +1934,10 @@ declare namespace Athena {
1901
1934
  * Specifies the maximum number of results to return.
1902
1935
  */
1903
1936
  MaxResults?: MaxTableMetadataCount;
1937
+ /**
1938
+ * The name of the workgroup for which the metadata is being fetched. Required if requesting an IAM Identity Center enabled Glue Data Catalog.
1939
+ */
1940
+ WorkGroup?: WorkGroupName;
1904
1941
  }
1905
1942
  export interface ListTableMetadataOutput {
1906
1943
  /**
@@ -2145,6 +2182,10 @@ declare namespace Athena {
2145
2182
  * The kind of query statement that was run.
2146
2183
  */
2147
2184
  SubstatementType?: String;
2185
+ /**
2186
+ * Specifies whether Amazon S3 access grants are enabled for query results.
2187
+ */
2188
+ QueryResultsS3AccessGrantsConfiguration?: QueryResultsS3AccessGrantsConfiguration;
2148
2189
  }
2149
2190
  export interface QueryExecutionContext {
2150
2191
  /**
@@ -2220,6 +2261,20 @@ declare namespace Athena {
2220
2261
  */
2221
2262
  AthenaError?: AthenaError;
2222
2263
  }
2264
+ export interface QueryResultsS3AccessGrantsConfiguration {
2265
+ /**
2266
+ * Specifies whether Amazon S3 access grants are enabled for query results.
2267
+ */
2268
+ EnableS3AccessGrants: BoxedBoolean;
2269
+ /**
2270
+ * When enabled, appends the user ID as an Amazon S3 path prefix to the query result output location.
2271
+ */
2272
+ CreateUserLevelPrefix?: BoxedBoolean;
2273
+ /**
2274
+ * The authentication type used for Amazon S3 access grants. Currently, only DIRECTORY_IDENTITY is supported.
2275
+ */
2276
+ AuthenticationType: AuthenticationType;
2277
+ }
2223
2278
  export interface QueryRuntimeStatistics {
2224
2279
  Timeline?: QueryRuntimeStatisticsTimeline;
2225
2280
  Rows?: QueryRuntimeStatisticsRows;
@@ -2434,7 +2489,7 @@ declare namespace Athena {
2434
2489
  export type S3Uri = string;
2435
2490
  export interface SessionConfiguration {
2436
2491
  /**
2437
- * The ARN of the execution role used in a Spark session to access user resources. This property applies only to Spark-enabled workgroups.
2492
+ * The ARN of the execution role used to access user resources for Spark sessions and Identity Center enabled workgroups. This property applies only to Spark enabled workgroups and Identity Center enabled workgroups.
2438
2493
  */
2439
2494
  ExecutionRole?: RoleArn;
2440
2495
  /**
@@ -2544,7 +2599,7 @@ declare namespace Athena {
2544
2599
  */
2545
2600
  QueryString: QueryString;
2546
2601
  /**
2547
- * A unique case-sensitive string used to ensure the request to create the query is idempotent (executes only once). If another StartQueryExecution request is received, the same response is returned and another query is not created. If a parameter has changed, for example, the QueryString, an error is returned. This token is listed as not required because Amazon Web Services SDKs (for example the Amazon Web Services SDK for Java) auto-generate the token for users. If you are not using the Amazon Web Services SDK or the Amazon Web Services CLI, you must provide this token or the action will fail.
2602
+ * A unique case-sensitive string used to ensure the request to create the query is idempotent (executes only once). If another StartQueryExecution request is received, the same response is returned and another query is not created. An error is returned if a parameter, such as QueryString, has changed. A call to StartQueryExecution that uses a previous client request token returns the same QueryExecutionId even if the requester doesn't have permission on the tables specified in QueryString. This token is listed as not required because Amazon Web Services SDKs (for example the Amazon Web Services SDK for Java) auto-generate the token for users. If you are not using the Amazon Web Services SDK or the Amazon Web Services CLI, you must provide this token or the action will fail.
2548
2603
  */
2549
2604
  ClientRequestToken?: IdempotencyToken;
2550
2605
  /**
@@ -2919,6 +2974,10 @@ declare namespace Athena {
2919
2974
  * The date and time the workgroup was created.
2920
2975
  */
2921
2976
  CreationTime?: _Date;
2977
+ /**
2978
+ * The ARN of the IAM Identity Center enabled application associated with the workgroup.
2979
+ */
2980
+ IdentityCenterApplicationArn?: IdentityCenterApplicationArn;
2922
2981
  }
2923
2982
  export interface WorkGroupConfiguration {
2924
2983
  /**
@@ -2950,7 +3009,7 @@ declare namespace Athena {
2950
3009
  */
2951
3010
  AdditionalConfiguration?: NameString;
2952
3011
  /**
2953
- * Role used in a Spark session for accessing the user's resources. This property applies only to Spark-enabled workgroups.
3012
+ * The ARN of the execution role used to access user resources for Spark sessions and Identity Center enabled workgroups. This property applies only to Spark enabled workgroups and Identity Center enabled workgroups.
2954
3013
  */
2955
3014
  ExecutionRole?: RoleArn;
2956
3015
  /**
@@ -2961,6 +3020,14 @@ declare namespace Athena {
2961
3020
  * Enforces a minimal level of encryption for the workgroup for query and calculation results that are written to Amazon S3. When enabled, workgroup users can set encryption only to the minimum level set by the administrator or higher when they submit queries. The EnforceWorkGroupConfiguration setting takes precedence over the EnableMinimumEncryptionConfiguration flag. This means that if EnforceWorkGroupConfiguration is true, the EnableMinimumEncryptionConfiguration flag is ignored, and the workgroup configuration for encryption is used.
2962
3021
  */
2963
3022
  EnableMinimumEncryptionConfiguration?: BoxedBoolean;
3023
+ /**
3024
+ * Specifies whether the workgroup is IAM Identity Center supported.
3025
+ */
3026
+ IdentityCenterConfiguration?: IdentityCenterConfiguration;
3027
+ /**
3028
+ * Specifies whether Amazon S3 access grants are enabled for query results.
3029
+ */
3030
+ QueryResultsS3AccessGrantsConfiguration?: QueryResultsS3AccessGrantsConfiguration;
2964
3031
  }
2965
3032
  export interface WorkGroupConfigurationUpdates {
2966
3033
  /**
@@ -3000,7 +3067,7 @@ declare namespace Athena {
3000
3067
  */
3001
3068
  AdditionalConfiguration?: NameString;
3002
3069
  /**
3003
- * The ARN of the execution role used to access user resources. This property applies only to Spark-enabled workgroups.
3070
+ * The ARN of the execution role used to access user resources for Spark sessions and Identity Center enabled workgroups. This property applies only to Spark enabled workgroups and Identity Center enabled workgroups.
3004
3071
  */
3005
3072
  ExecutionRole?: RoleArn;
3006
3073
  CustomerContentEncryptionConfiguration?: CustomerContentEncryptionConfiguration;
@@ -3008,6 +3075,10 @@ declare namespace Athena {
3008
3075
  * Enforces a minimal level of encryption for the workgroup for query and calculation results that are written to Amazon S3. When enabled, workgroup users can set encryption only to the minimum level set by the administrator or higher when they submit queries. This setting does not apply to Spark-enabled workgroups. The EnforceWorkGroupConfiguration setting takes precedence over the EnableMinimumEncryptionConfiguration flag. This means that if EnforceWorkGroupConfiguration is true, the EnableMinimumEncryptionConfiguration flag is ignored, and the workgroup configuration for encryption is used.
3009
3076
  */
3010
3077
  EnableMinimumEncryptionConfiguration?: BoxedBoolean;
3078
+ /**
3079
+ * Specifies whether Amazon S3 access grants are enabled for query results.
3080
+ */
3081
+ QueryResultsS3AccessGrantsConfiguration?: QueryResultsS3AccessGrantsConfiguration;
3011
3082
  }
3012
3083
  export type WorkGroupDescriptionString = string;
3013
3084
  export type WorkGroupName = string;
@@ -3034,6 +3105,10 @@ declare namespace Athena {
3034
3105
  * The engine version setting for all queries on the workgroup. Queries on the AmazonAthenaPreviewFunctionality workgroup run on the preview engine regardless of this setting.
3035
3106
  */
3036
3107
  EngineVersion?: EngineVersion;
3108
+ /**
3109
+ * The ARN of the IAM Identity Center enabled application associated with the workgroup.
3110
+ */
3111
+ IdentityCenterApplicationArn?: IdentityCenterApplicationArn;
3037
3112
  }
3038
3113
  export type WorkGroupsList = WorkGroupSummary[];
3039
3114
  export type datumList = Datum[];
@@ -548,6 +548,10 @@ declare namespace Billingconductor {
548
548
  * A CustomLineItemChargeDetails that describes the charge details for a custom line item.
549
549
  */
550
550
  ChargeDetails: CustomLineItemChargeDetails;
551
+ /**
552
+ * The Amazon Web Services account in which this custom line item will be applied to.
553
+ */
554
+ AccountId?: AccountId;
551
555
  }
552
556
  export interface CreateCustomLineItemOutput {
553
557
  /**
@@ -737,6 +741,10 @@ declare namespace Billingconductor {
737
741
  * The number of resources that are associated to the custom line item.
738
742
  */
739
743
  AssociationSize?: NumberOfAssociations;
744
+ /**
745
+ * The Amazon Web Services account in which this custom line item will be applied to.
746
+ */
747
+ AccountId?: AccountId;
740
748
  }
741
749
  export type CustomLineItemName = string;
742
750
  export type CustomLineItemNameList = CustomLineItemName[];
@@ -805,6 +813,10 @@ declare namespace Billingconductor {
805
813
  * The inclusive start time.
806
814
  */
807
815
  StartTime?: Instant;
816
+ /**
817
+ * The Amazon Web Services account in which this custom line item will be applied to.
818
+ */
819
+ AccountId?: AccountId;
808
820
  }
809
821
  export interface DeleteBillingGroupInput {
810
822
  /**
@@ -1134,6 +1146,10 @@ declare namespace Billingconductor {
1134
1146
  * A list of custom line item ARNs to retrieve information.
1135
1147
  */
1136
1148
  Arns?: CustomLineItemArns;
1149
+ /**
1150
+ * The Amazon Web Services accounts in which this custom line item will be applied to.
1151
+ */
1152
+ AccountIds?: AccountIdList;
1137
1153
  }
1138
1154
  export interface ListCustomLineItemsInput {
1139
1155
  /**
@@ -127,6 +127,19 @@ declare namespace Braket {
127
127
  */
128
128
  scriptModeConfig?: ScriptModeConfig;
129
129
  }
130
+ export interface Association {
131
+ /**
132
+ * The Amazon Braket resource arn.
133
+ */
134
+ arn: BraketResourceArn;
135
+ /**
136
+ * The association type for the specified Amazon Braket resource arn.
137
+ */
138
+ type: AssociationType;
139
+ }
140
+ export type AssociationType = "RESERVATION_TIME_WINDOW_ARN"|string;
141
+ export type Associations = Association[];
142
+ export type BraketResourceArn = string;
130
143
  export interface CancelJobRequest {
131
144
  /**
132
145
  * The ARN of the Amazon Braket job to cancel.
@@ -176,6 +189,10 @@ declare namespace Braket {
176
189
  * Definition of the Amazon Braket job to be created. Specifies the container image the job uses and information about the Python scripts used for entry and training.
177
190
  */
178
191
  algorithmSpecification: AlgorithmSpecification;
192
+ /**
193
+ * The list of Amazon Braket resources associated with the hybrid job.
194
+ */
195
+ associations?: CreateJobRequestAssociationsList;
179
196
  /**
180
197
  * Information about the output locations for job checkpoint data.
181
198
  */
@@ -221,6 +238,7 @@ declare namespace Braket {
221
238
  */
222
239
  tags?: TagsMap;
223
240
  }
241
+ export type CreateJobRequestAssociationsList = Association[];
224
242
  export type CreateJobRequestInputDataConfigList = InputFileConfig[];
225
243
  export type CreateJobRequestJobNameString = string;
226
244
  export interface CreateJobResponse {
@@ -234,6 +252,10 @@ declare namespace Braket {
234
252
  * The action associated with the task.
235
253
  */
236
254
  action: JsonValue;
255
+ /**
256
+ * The list of Amazon Braket resources associated with the quantum task.
257
+ */
258
+ associations?: CreateQuantumTaskRequestAssociationsList;
237
259
  /**
238
260
  * The client token associated with the request.
239
261
  */
@@ -267,6 +289,7 @@ declare namespace Braket {
267
289
  */
268
290
  tags?: TagsMap;
269
291
  }
292
+ export type CreateQuantumTaskRequestAssociationsList = Association[];
270
293
  export type CreateQuantumTaskRequestDeviceParametersString = string;
271
294
  export type CreateQuantumTaskRequestOutputS3BucketString = string;
272
295
  export type CreateQuantumTaskRequestOutputS3KeyPrefixString = string;
@@ -381,6 +404,10 @@ declare namespace Braket {
381
404
  * Definition of the Amazon Braket job created. Specifies the container image the job uses, information about the Python scripts used for entry and training, and the user-defined metrics used to evaluation the job.
382
405
  */
383
406
  algorithmSpecification: AlgorithmSpecification;
407
+ /**
408
+ * The list of Amazon Braket resources associated with the hybrid job.
409
+ */
410
+ associations?: Associations;
384
411
  /**
385
412
  * The billable time the Amazon Braket job used to complete.
386
413
  */
@@ -465,11 +492,15 @@ declare namespace Braket {
465
492
  */
466
493
  additionalAttributeNames?: QuantumTaskAdditionalAttributeNamesList;
467
494
  /**
468
- * the ARN of the task to retrieve.
495
+ * The ARN of the task to retrieve.
469
496
  */
470
497
  quantumTaskArn: QuantumTaskArn;
471
498
  }
472
499
  export interface GetQuantumTaskResponse {
500
+ /**
501
+ * The list of Amazon Braket resources associated with the quantum task.
502
+ */
503
+ associations?: Associations;
473
504
  /**
474
505
  * The time at which the task was created.
475
506
  */
@@ -596,7 +627,7 @@ declare namespace Braket {
596
627
  */
597
628
  message?: JobEventDetailsMessageString;
598
629
  /**
599
- * TThe type of event that occurred related to the Amazon Braket job.
630
+ * The type of event that occurred related to the Amazon Braket job.
600
631
  */
601
632
  timeOfEvent?: SyntheticTimestamp_date_time;
602
633
  }
@@ -487,7 +487,7 @@ declare namespace CleanRoomsML {
487
487
  */
488
488
  audienceSizeConfig?: AudienceSizeConfig;
489
489
  /**
490
- * Configure how the service tags audience generation jobs created using this configured audience model. If you specify NONE, the tags from the StartAudienceGenerationJob request determine the tags of the audience generation job. If you specify FROM_PARENT_RESOURCE, the audience generation job inherits the tags from the configured audience model, by default. Tags in the StartAudienceGenerationJob will override the default.
490
+ * Configure how the service tags audience generation jobs created using this configured audience model. If you specify NONE, the tags from the StartAudienceGenerationJob request determine the tags of the audience generation job. If you specify FROM_PARENT_RESOURCE, the audience generation job inherits the tags from the configured audience model, by default. Tags in the StartAudienceGenerationJob will override the default. When the client is in a different account than the configured audience model, the tags from the client are never applied to a resource in the caller's account.
491
491
  */
492
492
  childResourceTagOnCreatePolicy?: TagOnCreatePolicy;
493
493
  /**
@@ -143,9 +143,9 @@ declare namespace Cloud9 {
143
143
  */
144
144
  subnetId?: SubnetId;
145
145
  /**
146
- * The identifier for the Amazon Machine Image (AMI) that's used to create the EC2 instance. To choose an AMI for the instance, you must specify a valid AMI alias or a valid Amazon EC2 Systems Manager (SSM) path. From December 04, 2023, you will be required to include the imageId parameter for the CreateEnvironmentEC2 action. This change will be reflected across all direct methods of communicating with the API, such as Amazon Web Services SDK, Amazon Web Services CLI and Amazon Web Services CloudFormation. This change will only affect direct API consumers, and not Cloud9 console users. From January 22, 2024, Amazon Linux (AL1) will be removed from the list of available image IDs for Cloud9. This is necessary as AL1 will reach the end of maintenance support in December 2023, and as a result will no longer receive security updates. We recommend using Amazon Linux 2 as the AMI to create your environment as it is fully supported. This change will only affect direct API consumers, and not Cloud9 console users. Since Ubuntu 18.04 has ended standard support as of May 31, 2023, we recommend you choose Ubuntu 22.04. AMI aliases Amazon Linux (default): amazonlinux-1-x86_64 Amazon Linux 2: amazonlinux-2-x86_64 Ubuntu 18.04: ubuntu-18.04-x86_64 Ubuntu 22.04: ubuntu-22.04-x86_64 SSM paths Amazon Linux (default): resolve:ssm:/aws/service/cloud9/amis/amazonlinux-1-x86_64 Amazon Linux 2: resolve:ssm:/aws/service/cloud9/amis/amazonlinux-2-x86_64 Ubuntu 18.04: resolve:ssm:/aws/service/cloud9/amis/ubuntu-18.04-x86_64 Ubuntu 22.04: resolve:ssm:/aws/service/cloud9/amis/ubuntu-22.04-x86_64
146
+ * The identifier for the Amazon Machine Image (AMI) that's used to create the EC2 instance. To choose an AMI for the instance, you must specify a valid AMI alias or a valid Amazon EC2 Systems Manager (SSM) path. From December 04, 2023, you will be required to include the imageId parameter for the CreateEnvironmentEC2 action. This change will be reflected across all direct methods of communicating with the API, such as Amazon Web Services SDK, Amazon Web Services CLI and Amazon Web Services CloudFormation. This change will only affect direct API consumers, and not Cloud9 console users. From January 22, 2024, Amazon Linux (AL1) will be removed from the list of available image IDs for Cloud9. This is necessary as AL1 will reach the end of maintenance support in December 2023, and as a result will no longer receive security updates. We recommend using Amazon Linux 2 as the AMI to create your environment as it is fully supported. This change will only affect direct API consumers, and not Cloud9 console users. Since Ubuntu 18.04 has ended standard support as of May 31, 2023, we recommend you choose Ubuntu 22.04. AMI aliases Amazon Linux: amazonlinux-1-x86_64 Amazon Linux 2: amazonlinux-2-x86_64 Ubuntu 18.04: ubuntu-18.04-x86_64 Ubuntu 22.04: ubuntu-22.04-x86_64 SSM paths Amazon Linux: resolve:ssm:/aws/service/cloud9/amis/amazonlinux-1-x86_64 Amazon Linux 2: resolve:ssm:/aws/service/cloud9/amis/amazonlinux-2-x86_64 Ubuntu 18.04: resolve:ssm:/aws/service/cloud9/amis/ubuntu-18.04-x86_64 Ubuntu 22.04: resolve:ssm:/aws/service/cloud9/amis/ubuntu-22.04-x86_64
147
147
  */
148
- imageId?: ImageId;
148
+ imageId: ImageId;
149
149
  /**
150
150
  * The number of minutes until the running instance is shut down after the environment has last been used.
151
151
  */
@@ -909,7 +909,7 @@ declare namespace CloudFormation {
909
909
  */
910
910
  RootChangeSetId?: ChangeSetId;
911
911
  /**
912
- * Indicates if the stack set imports resources that already exist.
912
+ * Indicates if the change set imports resources that already exist.
913
913
  */
914
914
  ImportExistingResources?: ImportExistingResources;
915
915
  }
@@ -1016,7 +1016,7 @@ declare namespace CloudFormation {
1016
1016
  */
1017
1017
  OnStackFailure?: OnStackFailure;
1018
1018
  /**
1019
- * Indicates if the stack set imports resources that already exist. This parameter can only import resources that have custom names in templates. For more information, see name type in the CloudFormation User Guide. To import resources that do not accept custom names, such as EC2 instances, use the resource import feature instead. For more information, see Bringing existing resources into CloudFormation management in the CloudFormation User Guide.
1019
+ * Indicates if the change set imports resources that already exist. This parameter can only import resources that have custom names in templates. For more information, see name type in the CloudFormation User Guide. To import resources that do not accept custom names, such as EC2 instances, use the resource import feature instead. For more information, see Bringing existing resources into CloudFormation management in the CloudFormation User Guide.
1020
1020
  */
1021
1021
  ImportExistingResources?: ImportExistingResources;
1022
1022
  }
@@ -1521,7 +1521,7 @@ declare namespace CloudFormation {
1521
1521
  */
1522
1522
  OnStackFailure?: OnStackFailure;
1523
1523
  /**
1524
- * Indicates if the stack set imports resources that already exist. This parameter can only import resources that have custom names in templates. To import resources that do not accept custom names, such as EC2 instances, use the resource import feature instead.
1524
+ * Indicates if the change set imports resources that already exist. This parameter can only import resources that have custom names in templates. To import resources that do not accept custom names, such as EC2 instances, use the resource import feature instead.
1525
1525
  */
1526
1526
  ImportExistingResources?: ImportExistingResources;
1527
1527
  }
@@ -5242,9 +5242,9 @@ declare namespace EC2 {
5242
5242
  */
5243
5243
  Max?: Integer;
5244
5244
  }
5245
- export type AcceleratorManufacturer = "amazon-web-services"|"amd"|"nvidia"|"xilinx"|string;
5245
+ export type AcceleratorManufacturer = "amazon-web-services"|"amd"|"nvidia"|"xilinx"|"habana"|string;
5246
5246
  export type AcceleratorManufacturerSet = AcceleratorManufacturer[];
5247
- export type AcceleratorName = "a100"|"inferentia"|"k520"|"k80"|"m60"|"radeon-pro-v520"|"t4"|"vu9p"|"v100"|string;
5247
+ export type AcceleratorName = "a100"|"inferentia"|"k520"|"k80"|"m60"|"radeon-pro-v520"|"t4"|"vu9p"|"v100"|"a10g"|"h100"|"t4g"|string;
5248
5248
  export type AcceleratorNameSet = AcceleratorName[];
5249
5249
  export interface AcceleratorTotalMemoryMiB {
5250
5250
  /**
@@ -24781,11 +24781,11 @@ declare namespace EC2 {
24781
24781
  */
24782
24782
  AcceleratorCount?: AcceleratorCount;
24783
24783
  /**
24784
- * Indicates whether instance types must have accelerators by specific manufacturers. For instance types with NVIDIA devices, specify nvidia. For instance types with AMD devices, specify amd. For instance types with Amazon Web Services devices, specify amazon-web-services. For instance types with Xilinx devices, specify xilinx. Default: Any manufacturer
24784
+ * Indicates whether instance types must have accelerators by specific manufacturers. For instance types with Amazon Web Services devices, specify amazon-web-services. For instance types with AMD devices, specify amd. For instance types with Habana devices, specify habana. For instance types with NVIDIA devices, specify nvidia. For instance types with Xilinx devices, specify xilinx. Default: Any manufacturer
24785
24785
  */
24786
24786
  AcceleratorManufacturers?: AcceleratorManufacturerSet;
24787
24787
  /**
24788
- * The accelerators that must be on the instance type. For instance types with NVIDIA A100 GPUs, specify a100. For instance types with NVIDIA V100 GPUs, specify v100. For instance types with NVIDIA K80 GPUs, specify k80. For instance types with NVIDIA T4 GPUs, specify t4. For instance types with NVIDIA M60 GPUs, specify m60. For instance types with AMD Radeon Pro V520 GPUs, specify radeon-pro-v520. For instance types with Xilinx VU9P FPGAs, specify vu9p. For instance types with Amazon Web Services Inferentia chips, specify inferentia. For instance types with NVIDIA GRID K520 GPUs, specify k520. Default: Any accelerator
24788
+ * The accelerators that must be on the instance type. For instance types with NVIDIA A10G GPUs, specify a10g. For instance types with NVIDIA A100 GPUs, specify a100. For instance types with NVIDIA H100 GPUs, specify h100. For instance types with Amazon Web Services Inferentia chips, specify inferentia. For instance types with NVIDIA GRID K520 GPUs, specify k520. For instance types with NVIDIA K80 GPUs, specify k80. For instance types with NVIDIA M60 GPUs, specify m60. For instance types with AMD Radeon Pro V520 GPUs, specify radeon-pro-v520. For instance types with NVIDIA T4 GPUs, specify t4. For instance types with NVIDIA T4G GPUs, specify t4g. For instance types with Xilinx VU9P FPGAs, specify vu9p. For instance types with NVIDIA V100 GPUs, specify v100. Default: Any accelerator
24789
24789
  */
24790
24790
  AcceleratorNames?: AcceleratorNameSet;
24791
24791
  /**
@@ -24875,11 +24875,11 @@ declare namespace EC2 {
24875
24875
  */
24876
24876
  AcceleratorCount?: AcceleratorCountRequest;
24877
24877
  /**
24878
- * Indicates whether instance types must have accelerators by specific manufacturers. For instance types with NVIDIA devices, specify nvidia. For instance types with AMD devices, specify amd. For instance types with Amazon Web Services devices, specify amazon-web-services. For instance types with Xilinx devices, specify xilinx. Default: Any manufacturer
24878
+ * Indicates whether instance types must have accelerators by specific manufacturers. For instance types with Amazon Web Services devices, specify amazon-web-services. For instance types with AMD devices, specify amd. For instance types with Habana devices, specify habana. For instance types with NVIDIA devices, specify nvidia. For instance types with Xilinx devices, specify xilinx. Default: Any manufacturer
24879
24879
  */
24880
24880
  AcceleratorManufacturers?: AcceleratorManufacturerSet;
24881
24881
  /**
24882
- * The accelerators that must be on the instance type. For instance types with NVIDIA A100 GPUs, specify a100. For instance types with NVIDIA V100 GPUs, specify v100. For instance types with NVIDIA K80 GPUs, specify k80. For instance types with NVIDIA T4 GPUs, specify t4. For instance types with NVIDIA M60 GPUs, specify m60. For instance types with AMD Radeon Pro V520 GPUs, specify radeon-pro-v520. For instance types with Xilinx VU9P FPGAs, specify vu9p. For instance types with Amazon Web Services Inferentia chips, specify inferentia. For instance types with NVIDIA GRID K520 GPUs, specify k520. Default: Any accelerator
24882
+ * The accelerators that must be on the instance type. For instance types with NVIDIA A10G GPUs, specify a10g. For instance types with NVIDIA A100 GPUs, specify a100. For instance types with NVIDIA H100 GPUs, specify h100. For instance types with Amazon Web Services Inferentia chips, specify inferentia. For instance types with NVIDIA GRID K520 GPUs, specify k520. For instance types with NVIDIA K80 GPUs, specify k80. For instance types with NVIDIA M60 GPUs, specify m60. For instance types with AMD Radeon Pro V520 GPUs, specify radeon-pro-v520. For instance types with NVIDIA T4 GPUs, specify t4. For instance types with NVIDIA T4G GPUs, specify t4g. For instance types with Xilinx VU9P FPGAs, specify vu9p. For instance types with NVIDIA V100 GPUs, specify v100. Default: Any accelerator
24883
24883
  */
24884
24884
  AcceleratorNames?: AcceleratorNameSet;
24885
24885
  /**
@@ -428,7 +428,7 @@ declare namespace Finspace {
428
428
  */
429
429
  databaseName: DatabaseName;
430
430
  /**
431
- * A list of change request objects that are run in order. A change request object consists of changeType , s3Path, and a dbPath. A changeType can has the following values: PUT – Adds or updates files in a database. DELETE – Deletes files in a database. All the change requests require a mandatory dbPath attribute that defines the path within the database directory. The s3Path attribute defines the s3 source file path and is required for a PUT change type. Here is an example of how you can use the change request object: [ { "changeType": "PUT", "s3Path":"s3://bucket/db/2020.01.02/", "dbPath":"/2020.01.02/"}, { "changeType": "PUT", "s3Path":"s3://bucket/db/sym", "dbPath":"/"}, { "changeType": "DELETE", "dbPath": "/2020.01.01/"} ] In this example, the first request with PUT change type allows you to add files in the given s3Path under the 2020.01.02 partition of the database. The second request with PUT change type allows you to add a single sym file at database root location. The last request with DELETE change type allows you to delete the files under the 2020.01.01 partition of the database.
431
+ * A list of change request objects that are run in order. A change request object consists of changeType , s3Path, and dbPath. A changeType can has the following values: PUT – Adds or updates files in a database. DELETE – Deletes files in a database. All the change requests require a mandatory dbPath attribute that defines the path within the database directory. All database paths must start with a leading / and end with a trailing /. The s3Path attribute defines the s3 source file path and is required for a PUT change type. The s3path must end with a trailing / if it is a directory and must end without a trailing / if it is a file. Here are few examples of how you can use the change request object: This request adds a single sym file at database root location. { "changeType": "PUT", "s3Path":"s3://bucket/db/sym", "dbPath":"/"} This request adds files in the given s3Path under the 2020.01.02 partition of the database. { "changeType": "PUT", "s3Path":"s3://bucket/db/2020.01.02/", "dbPath":"/2020.01.02/"} This request adds files in the given s3Path under the taq table partition of the database. [ { "changeType": "PUT", "s3Path":"s3://bucket/db/2020.01.02/taq/", "dbPath":"/2020.01.02/taq/"}] This request deletes the 2020.01.02 partition of the database. [{ "changeType": "DELETE", "dbPath": "/2020.01.02/"} ] The DELETE request allows you to delete the existing files under the 2020.01.02 partition of the database, and the PUT request adds a new taq table under it. [ {"changeType": "DELETE", "dbPath":"/2020.01.02/"}, {"changeType": "PUT", "s3Path":"s3://bucket/db/2020.01.02/taq/", "dbPath":"/2020.01.02/taq/"}]
432
432
  */
433
433
  changeRequests: ChangeRequests;
434
434
  /**
@@ -484,7 +484,7 @@ declare namespace Finspace {
484
484
  */
485
485
  clusterName: KxClusterName;
486
486
  /**
487
- * Specifies the type of KDB database that is being created. The following types are available: HDB – A Historical Database. The data is only accessible with read-only permissions from one of the FinSpace managed kdb databases mounted to the cluster. RDB – A Realtime Database. This type of database captures all the data from a ticker plant and stores it in memory until the end of day, after which it writes all of its data to a disk and reloads the HDB. This cluster type requires local storage for temporary storage of data during the savedown process. If you specify this field in your request, you must provide the savedownStorageConfiguration parameter. GATEWAY – A gateway cluster allows you to access data across processes in kdb systems. It allows you to create your own routing logic using the initialization scripts and custom code. This type of cluster does not require a writable local storage.
487
+ * Specifies the type of KDB database that is being created. The following types are available: HDB – A Historical Database. The data is only accessible with read-only permissions from one of the FinSpace managed kdb databases mounted to the cluster. RDB – A Realtime Database. This type of database captures all the data from a ticker plant and stores it in memory until the end of day, after which it writes all of its data to a disk and reloads the HDB. This cluster type requires local storage for temporary storage of data during the savedown process. If you specify this field in your request, you must provide the savedownStorageConfiguration parameter. GATEWAY – A gateway cluster allows you to access data across processes in kdb systems. It allows you to create your own routing logic using the initialization scripts and custom code. This type of cluster does not require a writable local storage. GP – A general purpose cluster allows you to quickly iterate on code during development by granting greater access to system commands and enabling a fast reload of custom code. This cluster type can optionally mount databases including cache and savedown storage. For this cluster type, the node count is fixed at 1. It does not support autoscaling and supports only SINGLE AZ mode.
488
488
  */
489
489
  clusterType: KxClusterType;
490
490
  /**
@@ -566,7 +566,7 @@ declare namespace Finspace {
566
566
  */
567
567
  clusterName?: KxClusterName;
568
568
  /**
569
- * Specifies the type of KDB database that is being created. The following types are available: HDB – A Historical Database. The data is only accessible with read-only permissions from one of the FinSpace managed kdb databases mounted to the cluster. RDB – A Realtime Database. This type of database captures all the data from a ticker plant and stores it in memory until the end of day, after which it writes all of its data to a disk and reloads the HDB. This cluster type requires local storage for temporary storage of data during the savedown process. If you specify this field in your request, you must provide the savedownStorageConfiguration parameter. GATEWAY – A gateway cluster allows you to access data across processes in kdb systems. It allows you to create your own routing logic using the initialization scripts and custom code. This type of cluster does not require a writable local storage.
569
+ * Specifies the type of KDB database that is being created. The following types are available: HDB – A Historical Database. The data is only accessible with read-only permissions from one of the FinSpace managed kdb databases mounted to the cluster. RDB – A Realtime Database. This type of database captures all the data from a ticker plant and stores it in memory until the end of day, after which it writes all of its data to a disk and reloads the HDB. This cluster type requires local storage for temporary storage of data during the savedown process. If you specify this field in your request, you must provide the savedownStorageConfiguration parameter. GATEWAY – A gateway cluster allows you to access data across processes in kdb systems. It allows you to create your own routing logic using the initialization scripts and custom code. This type of cluster does not require a writable local storage. GP – A general purpose cluster allows you to quickly iterate on code during development by granting greater access to system commands and enabling a fast reload of custom code. This cluster type can optionally mount databases including cache and savedown storage. For this cluster type, the node count is fixed at 1. It does not support autoscaling and supports only SINGLE AZ mode.
570
570
  */
571
571
  clusterType?: KxClusterType;
572
572
  /**
@@ -1041,7 +1041,7 @@ declare namespace Finspace {
1041
1041
  */
1042
1042
  clusterName?: KxClusterName;
1043
1043
  /**
1044
- * Specifies the type of KDB database that is being created. The following types are available: HDB – A Historical Database. The data is only accessible with read-only permissions from one of the FinSpace managed kdb databases mounted to the cluster. RDB – A Realtime Database. This type of database captures all the data from a ticker plant and stores it in memory until the end of day, after which it writes all of its data to a disk and reloads the HDB. This cluster type requires local storage for temporary storage of data during the savedown process. If you specify this field in your request, you must provide the savedownStorageConfiguration parameter. GATEWAY – A gateway cluster allows you to access data across processes in kdb systems. It allows you to create your own routing logic using the initialization scripts and custom code. This type of cluster does not require a writable local storage.
1044
+ * Specifies the type of KDB database that is being created. The following types are available: HDB – A Historical Database. The data is only accessible with read-only permissions from one of the FinSpace managed kdb databases mounted to the cluster. RDB – A Realtime Database. This type of database captures all the data from a ticker plant and stores it in memory until the end of day, after which it writes all of its data to a disk and reloads the HDB. This cluster type requires local storage for temporary storage of data during the savedown process. If you specify this field in your request, you must provide the savedownStorageConfiguration parameter. GATEWAY – A gateway cluster allows you to access data across processes in kdb systems. It allows you to create your own routing logic using the initialization scripts and custom code. This type of cluster does not require a writable local storage. GP – A general purpose cluster allows you to quickly iterate on code during development by granting greater access to system commands and enabling a fast reload of custom code. This cluster type can optionally mount databases including cache and savedown storage. For this cluster type, the node count is fixed at 1. It does not support autoscaling and supports only SINGLE AZ mode.
1045
1045
  */
1046
1046
  clusterType?: KxClusterType;
1047
1047
  /**
@@ -1349,7 +1349,7 @@ declare namespace Finspace {
1349
1349
  */
1350
1350
  clusterName?: KxClusterName;
1351
1351
  /**
1352
- * Specifies the type of KDB database that is being created. The following types are available: HDB – A Historical Database. The data is only accessible with read-only permissions from one of the FinSpace managed kdb databases mounted to the cluster. RDB – A Realtime Database. This type of database captures all the data from a ticker plant and stores it in memory until the end of day, after which it writes all of its data to a disk and reloads the HDB. This cluster type requires local storage for temporary storage of data during the savedown process. If you specify this field in your request, you must provide the savedownStorageConfiguration parameter. GATEWAY – A gateway cluster allows you to access data across processes in kdb systems. It allows you to create your own routing logic using the initialization scripts and custom code. This type of cluster does not require a writable local storage.
1352
+ * Specifies the type of KDB database that is being created. The following types are available: HDB – A Historical Database. The data is only accessible with read-only permissions from one of the FinSpace managed kdb databases mounted to the cluster. RDB – A Realtime Database. This type of database captures all the data from a ticker plant and stores it in memory until the end of day, after which it writes all of its data to a disk and reloads the HDB. This cluster type requires local storage for temporary storage of data during the savedown process. If you specify this field in your request, you must provide the savedownStorageConfiguration parameter. GATEWAY – A gateway cluster allows you to access data across processes in kdb systems. It allows you to create your own routing logic using the initialization scripts and custom code. This type of cluster does not require a writable local storage. GP – A general purpose cluster allows you to quickly iterate on code during development by granting greater access to system commands and enabling a fast reload of custom code. This cluster type can optionally mount databases including cache and savedown storage. For this cluster type, the node count is fixed at 1. It does not support autoscaling and supports only SINGLE AZ mode.
1353
1353
  */
1354
1354
  clusterType?: KxClusterType;
1355
1355
  /**
@@ -1387,17 +1387,17 @@ declare namespace Finspace {
1387
1387
  }
1388
1388
  export interface KxClusterCodeDeploymentConfiguration {
1389
1389
  /**
1390
- * The type of deployment that you want on a cluster. ROLLING – This options updates the cluster by stopping the exiting q process and starting a new q process with updated configuration. FORCE – This option updates the cluster by immediately stopping all the running processes before starting up new ones with the updated configuration.
1390
+ * The type of deployment that you want on a cluster. ROLLING – This options updates the cluster by stopping the exiting q process and starting a new q process with updated configuration. NO_RESTART – This option updates the cluster without stopping the running q process. It is only available for GP type cluster. This option is quicker as it reduces the turn around time to update configuration on a cluster. With this deployment mode, you cannot update the initializationScript and commandLineArguments parameters. FORCE – This option updates the cluster by immediately stopping all the running processes before starting up new ones with the updated configuration.
1391
1391
  */
1392
1392
  deploymentStrategy: KxClusterCodeDeploymentStrategy;
1393
1393
  }
1394
- export type KxClusterCodeDeploymentStrategy = "ROLLING"|"FORCE"|string;
1394
+ export type KxClusterCodeDeploymentStrategy = "NO_RESTART"|"ROLLING"|"FORCE"|string;
1395
1395
  export type KxClusterDescription = string;
1396
1396
  export type KxClusterName = string;
1397
1397
  export type KxClusterNodeIdString = string;
1398
1398
  export type KxClusterStatus = "PENDING"|"CREATING"|"CREATE_FAILED"|"RUNNING"|"UPDATING"|"DELETING"|"DELETED"|"DELETE_FAILED"|string;
1399
1399
  export type KxClusterStatusReason = string;
1400
- export type KxClusterType = "HDB"|"RDB"|"GATEWAY"|string;
1400
+ export type KxClusterType = "HDB"|"RDB"|"GATEWAY"|"GP"|string;
1401
1401
  export type KxClusters = KxCluster[];
1402
1402
  export interface KxCommandLineArgument {
1403
1403
  /**
@@ -1667,7 +1667,7 @@ declare namespace Finspace {
1667
1667
  */
1668
1668
  environmentId: KxEnvironmentId;
1669
1669
  /**
1670
- * Specifies the type of KDB database that is being created. The following types are available: HDB – A Historical Database. The data is only accessible with read-only permissions from one of the FinSpace managed kdb databases mounted to the cluster. RDB – A Realtime Database. This type of database captures all the data from a ticker plant and stores it in memory until the end of day, after which it writes all of its data to a disk and reloads the HDB. This cluster type requires local storage for temporary storage of data during the savedown process. If you specify this field in your request, you must provide the savedownStorageConfiguration parameter. GATEWAY – A gateway cluster allows you to access data across processes in kdb systems. It allows you to create your own routing logic using the initialization scripts and custom code. This type of cluster does not require a writable local storage.
1670
+ * Specifies the type of KDB database that is being created. The following types are available: HDB – A Historical Database. The data is only accessible with read-only permissions from one of the FinSpace managed kdb databases mounted to the cluster. RDB – A Realtime Database. This type of database captures all the data from a ticker plant and stores it in memory until the end of day, after which it writes all of its data to a disk and reloads the HDB. This cluster type requires local storage for temporary storage of data during the savedown process. If you specify this field in your request, you must provide the savedownStorageConfiguration parameter. GATEWAY – A gateway cluster allows you to access data across processes in kdb systems. It allows you to create your own routing logic using the initialization scripts and custom code. This type of cluster does not require a writable local storage. GP – A general purpose cluster allows you to quickly iterate on code during development by granting greater access to system commands and enabling a fast reload of custom code. This cluster type can optionally mount databases including cache and savedown storage. For this cluster type, the node count is fixed at 1. It does not support autoscaling and supports only SINGLE AZ mode.
1671
1671
  */
1672
1672
  clusterType?: KxClusterType;
1673
1673
  /**
@@ -1927,11 +1927,11 @@ declare namespace Finspace {
1927
1927
  clientToken?: ClientTokenString;
1928
1928
  code: CodeConfiguration;
1929
1929
  /**
1930
- * Specifies a Q program that will be run at launch of a cluster. It is a relative path within .zip file that contains the custom code, which will be loaded on the cluster. It must include the file name itself. For example, somedir/init.q.
1930
+ * Specifies a Q program that will be run at launch of a cluster. It is a relative path within .zip file that contains the custom code, which will be loaded on the cluster. It must include the file name itself. For example, somedir/init.q. You cannot update this parameter for a NO_RESTART deployment.
1931
1931
  */
1932
1932
  initializationScript?: InitializationScriptFilePath;
1933
1933
  /**
1934
- * Specifies the key-value pairs to make them available inside the cluster.
1934
+ * Specifies the key-value pairs to make them available inside the cluster. You cannot update this parameter for a NO_RESTART deployment.
1935
1935
  */
1936
1936
  commandLineArguments?: KxCommandLineArguments;
1937
1937
  /**
@@ -1533,6 +1533,27 @@ one destination per packager.
1533
1533
  }
1534
1534
  export interface ClaimDeviceResponse {
1535
1535
  }
1536
+ export interface ColorCorrection {
1537
+ /**
1538
+ * The color space of the input.
1539
+ */
1540
+ InputColorSpace: ColorSpace;
1541
+ /**
1542
+ * The color space of the output.
1543
+ */
1544
+ OutputColorSpace: ColorSpace;
1545
+ /**
1546
+ * The URI of the 3D LUT file. The protocol must be 's3:' or 's3ssl:':.
1547
+ */
1548
+ Uri: __string;
1549
+ }
1550
+ export interface ColorCorrectionSettings {
1551
+ /**
1552
+ * An array of colorCorrections that applies when you are using 3D LUT files to perform color conversion on video. Each colorCorrection contains one 3D LUT file (that defines the color mapping for converting an input color space to an output color space), and the input/output combination that this 3D LUT file applies to. MediaLive reads the color space in the input metadata, determines the color space that you have specified for the output, and finds and uses the LUT file that applies to this combination.
1553
+ */
1554
+ GlobalColorCorrections: __listOfColorCorrection;
1555
+ }
1556
+ export type ColorSpace = "HDR10"|"HLG_2020"|"REC_601"|"REC_709"|string;
1536
1557
  export interface ColorSpacePassthroughSettings {
1537
1558
  }
1538
1559
  export interface CreateChannelRequest {
@@ -2881,6 +2902,10 @@ You specify only the font family. All other style information (color, bold, posi
2881
2902
  * Settings for caption decriptions
2882
2903
  */
2883
2904
  CaptionDescriptions?: __listOfCaptionDescription;
2905
+ /**
2906
+ * Color correction settings
2907
+ */
2908
+ ColorCorrectionSettings?: ColorCorrectionSettings;
2884
2909
  /**
2885
2910
  * Feature Activations
2886
2911
  */
@@ -7522,6 +7547,7 @@ If STANDARD channel, subnet IDs must be mapped to two unique availability zones
7522
7547
  export type __listOfCaptionSelector = CaptionSelector[];
7523
7548
  export type __listOfChannelEgressEndpoint = ChannelEgressEndpoint[];
7524
7549
  export type __listOfChannelSummary = ChannelSummary[];
7550
+ export type __listOfColorCorrection = ColorCorrection[];
7525
7551
  export type __listOfFailoverCondition = FailoverCondition[];
7526
7552
  export type __listOfHlsAdMarkers = HlsAdMarkers[];
7527
7553
  export type __listOfInput = Input[];
@@ -20,11 +20,11 @@ declare class ServiceCatalogAppRegistry extends Service {
20
20
  */
21
21
  associateAttributeGroup(callback?: (err: AWSError, data: ServiceCatalogAppRegistry.Types.AssociateAttributeGroupResponse) => void): Request<ServiceCatalogAppRegistry.Types.AssociateAttributeGroupResponse, AWSError>;
22
22
  /**
23
- * Associates a resource with an application. The resource can be specified by its ARN or name. The application can be specified by ARN, ID, or name. Minimum permissions You must have the following permissions to associate a resource using the OPTIONS parameter set to APPLY_APPLICATION_TAG. tag:GetResources tag:TagResources You must also have these additional permissions if you don't use the AWSServiceCatalogAppRegistryFullAccess policy. For more information, see AWSServiceCatalogAppRegistryFullAccess in the AppRegistry Administrator Guide. resource-groups:DisassociateResource cloudformation:UpdateStack cloudformation:DescribeStacks In addition, you must have the tagging permission defined by the Amazon Web Services service that creates the resource. For more information, see TagResources in the Resource Groups Tagging API Reference.
23
+ * Associates a resource with an application. The resource can be specified by its ARN or name. The application can be specified by ARN, ID, or name. Minimum permissions You must have the following permissions to associate a resource using the OPTIONS parameter set to APPLY_APPLICATION_TAG. tag:GetResources tag:TagResources You must also have these additional permissions if you don't use the AWSServiceCatalogAppRegistryFullAccess policy. For more information, see AWSServiceCatalogAppRegistryFullAccess in the AppRegistry Administrator Guide. resource-groups:AssociateResource cloudformation:UpdateStack cloudformation:DescribeStacks In addition, you must have the tagging permission defined by the Amazon Web Services service that creates the resource. For more information, see TagResources in the Resource Groups Tagging API Reference.
24
24
  */
25
25
  associateResource(params: ServiceCatalogAppRegistry.Types.AssociateResourceRequest, callback?: (err: AWSError, data: ServiceCatalogAppRegistry.Types.AssociateResourceResponse) => void): Request<ServiceCatalogAppRegistry.Types.AssociateResourceResponse, AWSError>;
26
26
  /**
27
- * Associates a resource with an application. The resource can be specified by its ARN or name. The application can be specified by ARN, ID, or name. Minimum permissions You must have the following permissions to associate a resource using the OPTIONS parameter set to APPLY_APPLICATION_TAG. tag:GetResources tag:TagResources You must also have these additional permissions if you don't use the AWSServiceCatalogAppRegistryFullAccess policy. For more information, see AWSServiceCatalogAppRegistryFullAccess in the AppRegistry Administrator Guide. resource-groups:DisassociateResource cloudformation:UpdateStack cloudformation:DescribeStacks In addition, you must have the tagging permission defined by the Amazon Web Services service that creates the resource. For more information, see TagResources in the Resource Groups Tagging API Reference.
27
+ * Associates a resource with an application. The resource can be specified by its ARN or name. The application can be specified by ARN, ID, or name. Minimum permissions You must have the following permissions to associate a resource using the OPTIONS parameter set to APPLY_APPLICATION_TAG. tag:GetResources tag:TagResources You must also have these additional permissions if you don't use the AWSServiceCatalogAppRegistryFullAccess policy. For more information, see AWSServiceCatalogAppRegistryFullAccess in the AppRegistry Administrator Guide. resource-groups:AssociateResource cloudformation:UpdateStack cloudformation:DescribeStacks In addition, you must have the tagging permission defined by the Amazon Web Services service that creates the resource. For more information, see TagResources in the Resource Groups Tagging API Reference.
28
28
  */
29
29
  associateResource(callback?: (err: AWSError, data: ServiceCatalogAppRegistry.Types.AssociateResourceResponse) => void): Request<ServiceCatalogAppRegistry.Types.AssociateResourceResponse, AWSError>;
30
30
  /**