cdk-lambda-subminute 2.0.315 → 2.0.317

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -548,6 +548,10 @@ declare namespace Billingconductor {
548
548
  * A CustomLineItemChargeDetails that describes the charge details for a custom line item.
549
549
  */
550
550
  ChargeDetails: CustomLineItemChargeDetails;
551
+ /**
552
+ * The Amazon Web Services account in which this custom line item will be applied to.
553
+ */
554
+ AccountId?: AccountId;
551
555
  }
552
556
  export interface CreateCustomLineItemOutput {
553
557
  /**
@@ -737,6 +741,10 @@ declare namespace Billingconductor {
737
741
  * The number of resources that are associated to the custom line item.
738
742
  */
739
743
  AssociationSize?: NumberOfAssociations;
744
+ /**
745
+ * The Amazon Web Services account in which this custom line item will be applied to.
746
+ */
747
+ AccountId?: AccountId;
740
748
  }
741
749
  export type CustomLineItemName = string;
742
750
  export type CustomLineItemNameList = CustomLineItemName[];
@@ -805,6 +813,10 @@ declare namespace Billingconductor {
805
813
  * The inclusive start time.
806
814
  */
807
815
  StartTime?: Instant;
816
+ /**
817
+ * The Amazon Web Services account in which this custom line item will be applied to.
818
+ */
819
+ AccountId?: AccountId;
808
820
  }
809
821
  export interface DeleteBillingGroupInput {
810
822
  /**
@@ -1134,6 +1146,10 @@ declare namespace Billingconductor {
1134
1146
  * A list of custom line item ARNs to retrieve information.
1135
1147
  */
1136
1148
  Arns?: CustomLineItemArns;
1149
+ /**
1150
+ * The Amazon Web Services accounts in which this custom line item will be applied to.
1151
+ */
1152
+ AccountIds?: AccountIdList;
1137
1153
  }
1138
1154
  export interface ListCustomLineItemsInput {
1139
1155
  /**
@@ -127,6 +127,19 @@ declare namespace Braket {
127
127
  */
128
128
  scriptModeConfig?: ScriptModeConfig;
129
129
  }
130
+ export interface Association {
131
+ /**
132
+ * The Amazon Braket resource arn.
133
+ */
134
+ arn: BraketResourceArn;
135
+ /**
136
+ * The association type for the specified Amazon Braket resource arn.
137
+ */
138
+ type: AssociationType;
139
+ }
140
+ export type AssociationType = "RESERVATION_TIME_WINDOW_ARN"|string;
141
+ export type Associations = Association[];
142
+ export type BraketResourceArn = string;
130
143
  export interface CancelJobRequest {
131
144
  /**
132
145
  * The ARN of the Amazon Braket job to cancel.
@@ -176,6 +189,10 @@ declare namespace Braket {
176
189
  * Definition of the Amazon Braket job to be created. Specifies the container image the job uses and information about the Python scripts used for entry and training.
177
190
  */
178
191
  algorithmSpecification: AlgorithmSpecification;
192
+ /**
193
+ * The list of Amazon Braket resources associated with the hybrid job.
194
+ */
195
+ associations?: CreateJobRequestAssociationsList;
179
196
  /**
180
197
  * Information about the output locations for job checkpoint data.
181
198
  */
@@ -221,6 +238,7 @@ declare namespace Braket {
221
238
  */
222
239
  tags?: TagsMap;
223
240
  }
241
+ export type CreateJobRequestAssociationsList = Association[];
224
242
  export type CreateJobRequestInputDataConfigList = InputFileConfig[];
225
243
  export type CreateJobRequestJobNameString = string;
226
244
  export interface CreateJobResponse {
@@ -234,6 +252,10 @@ declare namespace Braket {
234
252
  * The action associated with the task.
235
253
  */
236
254
  action: JsonValue;
255
+ /**
256
+ * The list of Amazon Braket resources associated with the quantum task.
257
+ */
258
+ associations?: CreateQuantumTaskRequestAssociationsList;
237
259
  /**
238
260
  * The client token associated with the request.
239
261
  */
@@ -267,6 +289,7 @@ declare namespace Braket {
267
289
  */
268
290
  tags?: TagsMap;
269
291
  }
292
+ export type CreateQuantumTaskRequestAssociationsList = Association[];
270
293
  export type CreateQuantumTaskRequestDeviceParametersString = string;
271
294
  export type CreateQuantumTaskRequestOutputS3BucketString = string;
272
295
  export type CreateQuantumTaskRequestOutputS3KeyPrefixString = string;
@@ -381,6 +404,10 @@ declare namespace Braket {
381
404
  * Definition of the Amazon Braket job created. Specifies the container image the job uses, information about the Python scripts used for entry and training, and the user-defined metrics used to evaluation the job.
382
405
  */
383
406
  algorithmSpecification: AlgorithmSpecification;
407
+ /**
408
+ * The list of Amazon Braket resources associated with the hybrid job.
409
+ */
410
+ associations?: Associations;
384
411
  /**
385
412
  * The billable time the Amazon Braket job used to complete.
386
413
  */
@@ -465,11 +492,15 @@ declare namespace Braket {
465
492
  */
466
493
  additionalAttributeNames?: QuantumTaskAdditionalAttributeNamesList;
467
494
  /**
468
- * the ARN of the task to retrieve.
495
+ * The ARN of the task to retrieve.
469
496
  */
470
497
  quantumTaskArn: QuantumTaskArn;
471
498
  }
472
499
  export interface GetQuantumTaskResponse {
500
+ /**
501
+ * The list of Amazon Braket resources associated with the quantum task.
502
+ */
503
+ associations?: Associations;
473
504
  /**
474
505
  * The time at which the task was created.
475
506
  */
@@ -596,7 +627,7 @@ declare namespace Braket {
596
627
  */
597
628
  message?: JobEventDetailsMessageString;
598
629
  /**
599
- * TThe type of event that occurred related to the Amazon Braket job.
630
+ * The type of event that occurred related to the Amazon Braket job.
600
631
  */
601
632
  timeOfEvent?: SyntheticTimestamp_date_time;
602
633
  }
@@ -143,9 +143,9 @@ declare namespace Cloud9 {
143
143
  */
144
144
  subnetId?: SubnetId;
145
145
  /**
146
- * The identifier for the Amazon Machine Image (AMI) that's used to create the EC2 instance. To choose an AMI for the instance, you must specify a valid AMI alias or a valid Amazon EC2 Systems Manager (SSM) path. From December 04, 2023, you will be required to include the imageId parameter for the CreateEnvironmentEC2 action. This change will be reflected across all direct methods of communicating with the API, such as Amazon Web Services SDK, Amazon Web Services CLI and Amazon Web Services CloudFormation. This change will only affect direct API consumers, and not Cloud9 console users. From January 22, 2024, Amazon Linux (AL1) will be removed from the list of available image IDs for Cloud9. This is necessary as AL1 will reach the end of maintenance support in December 2023, and as a result will no longer receive security updates. We recommend using Amazon Linux 2 as the AMI to create your environment as it is fully supported. This change will only affect direct API consumers, and not Cloud9 console users. Since Ubuntu 18.04 has ended standard support as of May 31, 2023, we recommend you choose Ubuntu 22.04. AMI aliases Amazon Linux (default): amazonlinux-1-x86_64 Amazon Linux 2: amazonlinux-2-x86_64 Ubuntu 18.04: ubuntu-18.04-x86_64 Ubuntu 22.04: ubuntu-22.04-x86_64 SSM paths Amazon Linux (default): resolve:ssm:/aws/service/cloud9/amis/amazonlinux-1-x86_64 Amazon Linux 2: resolve:ssm:/aws/service/cloud9/amis/amazonlinux-2-x86_64 Ubuntu 18.04: resolve:ssm:/aws/service/cloud9/amis/ubuntu-18.04-x86_64 Ubuntu 22.04: resolve:ssm:/aws/service/cloud9/amis/ubuntu-22.04-x86_64
146
+ * The identifier for the Amazon Machine Image (AMI) that's used to create the EC2 instance. To choose an AMI for the instance, you must specify a valid AMI alias or a valid Amazon EC2 Systems Manager (SSM) path. From December 04, 2023, you will be required to include the imageId parameter for the CreateEnvironmentEC2 action. This change will be reflected across all direct methods of communicating with the API, such as Amazon Web Services SDK, Amazon Web Services CLI and Amazon Web Services CloudFormation. This change will only affect direct API consumers, and not Cloud9 console users. From January 22, 2024, Amazon Linux (AL1) will be removed from the list of available image IDs for Cloud9. This is necessary as AL1 will reach the end of maintenance support in December 2023, and as a result will no longer receive security updates. We recommend using Amazon Linux 2 as the AMI to create your environment as it is fully supported. This change will only affect direct API consumers, and not Cloud9 console users. Since Ubuntu 18.04 has ended standard support as of May 31, 2023, we recommend you choose Ubuntu 22.04. AMI aliases Amazon Linux: amazonlinux-1-x86_64 Amazon Linux 2: amazonlinux-2-x86_64 Ubuntu 18.04: ubuntu-18.04-x86_64 Ubuntu 22.04: ubuntu-22.04-x86_64 SSM paths Amazon Linux: resolve:ssm:/aws/service/cloud9/amis/amazonlinux-1-x86_64 Amazon Linux 2: resolve:ssm:/aws/service/cloud9/amis/amazonlinux-2-x86_64 Ubuntu 18.04: resolve:ssm:/aws/service/cloud9/amis/ubuntu-18.04-x86_64 Ubuntu 22.04: resolve:ssm:/aws/service/cloud9/amis/ubuntu-22.04-x86_64
147
147
  */
148
- imageId?: ImageId;
148
+ imageId: ImageId;
149
149
  /**
150
150
  * The number of minutes until the running instance is shut down after the environment has last been used.
151
151
  */
@@ -428,7 +428,7 @@ declare namespace Finspace {
428
428
  */
429
429
  databaseName: DatabaseName;
430
430
  /**
431
- * A list of change request objects that are run in order. A change request object consists of changeType , s3Path, and a dbPath. A changeType can has the following values: PUT – Adds or updates files in a database. DELETE – Deletes files in a database. All the change requests require a mandatory dbPath attribute that defines the path within the database directory. The s3Path attribute defines the s3 source file path and is required for a PUT change type. Here is an example of how you can use the change request object: [ { "changeType": "PUT", "s3Path":"s3://bucket/db/2020.01.02/", "dbPath":"/2020.01.02/"}, { "changeType": "PUT", "s3Path":"s3://bucket/db/sym", "dbPath":"/"}, { "changeType": "DELETE", "dbPath": "/2020.01.01/"} ] In this example, the first request with PUT change type allows you to add files in the given s3Path under the 2020.01.02 partition of the database. The second request with PUT change type allows you to add a single sym file at database root location. The last request with DELETE change type allows you to delete the files under the 2020.01.01 partition of the database.
431
+ * A list of change request objects that are run in order. A change request object consists of changeType , s3Path, and dbPath. A changeType can has the following values: PUT – Adds or updates files in a database. DELETE – Deletes files in a database. All the change requests require a mandatory dbPath attribute that defines the path within the database directory. All database paths must start with a leading / and end with a trailing /. The s3Path attribute defines the s3 source file path and is required for a PUT change type. The s3path must end with a trailing / if it is a directory and must end without a trailing / if it is a file. Here are few examples of how you can use the change request object: This request adds a single sym file at database root location. { "changeType": "PUT", "s3Path":"s3://bucket/db/sym", "dbPath":"/"} This request adds files in the given s3Path under the 2020.01.02 partition of the database. { "changeType": "PUT", "s3Path":"s3://bucket/db/2020.01.02/", "dbPath":"/2020.01.02/"} This request adds files in the given s3Path under the taq table partition of the database. [ { "changeType": "PUT", "s3Path":"s3://bucket/db/2020.01.02/taq/", "dbPath":"/2020.01.02/taq/"}] This request deletes the 2020.01.02 partition of the database. [{ "changeType": "DELETE", "dbPath": "/2020.01.02/"} ] The DELETE request allows you to delete the existing files under the 2020.01.02 partition of the database, and the PUT request adds a new taq table under it. [ {"changeType": "DELETE", "dbPath":"/2020.01.02/"}, {"changeType": "PUT", "s3Path":"s3://bucket/db/2020.01.02/taq/", "dbPath":"/2020.01.02/taq/"}]
432
432
  */
433
433
  changeRequests: ChangeRequests;
434
434
  /**
@@ -484,7 +484,7 @@ declare namespace Finspace {
484
484
  */
485
485
  clusterName: KxClusterName;
486
486
  /**
487
- * Specifies the type of KDB database that is being created. The following types are available: HDB – A Historical Database. The data is only accessible with read-only permissions from one of the FinSpace managed kdb databases mounted to the cluster. RDB – A Realtime Database. This type of database captures all the data from a ticker plant and stores it in memory until the end of day, after which it writes all of its data to a disk and reloads the HDB. This cluster type requires local storage for temporary storage of data during the savedown process. If you specify this field in your request, you must provide the savedownStorageConfiguration parameter. GATEWAY – A gateway cluster allows you to access data across processes in kdb systems. It allows you to create your own routing logic using the initialization scripts and custom code. This type of cluster does not require a writable local storage.
487
+ * Specifies the type of KDB database that is being created. The following types are available: HDB – A Historical Database. The data is only accessible with read-only permissions from one of the FinSpace managed kdb databases mounted to the cluster. RDB – A Realtime Database. This type of database captures all the data from a ticker plant and stores it in memory until the end of day, after which it writes all of its data to a disk and reloads the HDB. This cluster type requires local storage for temporary storage of data during the savedown process. If you specify this field in your request, you must provide the savedownStorageConfiguration parameter. GATEWAY – A gateway cluster allows you to access data across processes in kdb systems. It allows you to create your own routing logic using the initialization scripts and custom code. This type of cluster does not require a writable local storage. GP – A general purpose cluster allows you to quickly iterate on code during development by granting greater access to system commands and enabling a fast reload of custom code. This cluster type can optionally mount databases including cache and savedown storage. For this cluster type, the node count is fixed at 1. It does not support autoscaling and supports only SINGLE AZ mode.
488
488
  */
489
489
  clusterType: KxClusterType;
490
490
  /**
@@ -566,7 +566,7 @@ declare namespace Finspace {
566
566
  */
567
567
  clusterName?: KxClusterName;
568
568
  /**
569
- * Specifies the type of KDB database that is being created. The following types are available: HDB – A Historical Database. The data is only accessible with read-only permissions from one of the FinSpace managed kdb databases mounted to the cluster. RDB – A Realtime Database. This type of database captures all the data from a ticker plant and stores it in memory until the end of day, after which it writes all of its data to a disk and reloads the HDB. This cluster type requires local storage for temporary storage of data during the savedown process. If you specify this field in your request, you must provide the savedownStorageConfiguration parameter. GATEWAY – A gateway cluster allows you to access data across processes in kdb systems. It allows you to create your own routing logic using the initialization scripts and custom code. This type of cluster does not require a writable local storage.
569
+ * Specifies the type of KDB database that is being created. The following types are available: HDB – A Historical Database. The data is only accessible with read-only permissions from one of the FinSpace managed kdb databases mounted to the cluster. RDB – A Realtime Database. This type of database captures all the data from a ticker plant and stores it in memory until the end of day, after which it writes all of its data to a disk and reloads the HDB. This cluster type requires local storage for temporary storage of data during the savedown process. If you specify this field in your request, you must provide the savedownStorageConfiguration parameter. GATEWAY – A gateway cluster allows you to access data across processes in kdb systems. It allows you to create your own routing logic using the initialization scripts and custom code. This type of cluster does not require a writable local storage. GP – A general purpose cluster allows you to quickly iterate on code during development by granting greater access to system commands and enabling a fast reload of custom code. This cluster type can optionally mount databases including cache and savedown storage. For this cluster type, the node count is fixed at 1. It does not support autoscaling and supports only SINGLE AZ mode.
570
570
  */
571
571
  clusterType?: KxClusterType;
572
572
  /**
@@ -1041,7 +1041,7 @@ declare namespace Finspace {
1041
1041
  */
1042
1042
  clusterName?: KxClusterName;
1043
1043
  /**
1044
- * Specifies the type of KDB database that is being created. The following types are available: HDB – A Historical Database. The data is only accessible with read-only permissions from one of the FinSpace managed kdb databases mounted to the cluster. RDB – A Realtime Database. This type of database captures all the data from a ticker plant and stores it in memory until the end of day, after which it writes all of its data to a disk and reloads the HDB. This cluster type requires local storage for temporary storage of data during the savedown process. If you specify this field in your request, you must provide the savedownStorageConfiguration parameter. GATEWAY – A gateway cluster allows you to access data across processes in kdb systems. It allows you to create your own routing logic using the initialization scripts and custom code. This type of cluster does not require a writable local storage.
1044
+ * Specifies the type of KDB database that is being created. The following types are available: HDB – A Historical Database. The data is only accessible with read-only permissions from one of the FinSpace managed kdb databases mounted to the cluster. RDB – A Realtime Database. This type of database captures all the data from a ticker plant and stores it in memory until the end of day, after which it writes all of its data to a disk and reloads the HDB. This cluster type requires local storage for temporary storage of data during the savedown process. If you specify this field in your request, you must provide the savedownStorageConfiguration parameter. GATEWAY – A gateway cluster allows you to access data across processes in kdb systems. It allows you to create your own routing logic using the initialization scripts and custom code. This type of cluster does not require a writable local storage. GP – A general purpose cluster allows you to quickly iterate on code during development by granting greater access to system commands and enabling a fast reload of custom code. This cluster type can optionally mount databases including cache and savedown storage. For this cluster type, the node count is fixed at 1. It does not support autoscaling and supports only SINGLE AZ mode.
1045
1045
  */
1046
1046
  clusterType?: KxClusterType;
1047
1047
  /**
@@ -1349,7 +1349,7 @@ declare namespace Finspace {
1349
1349
  */
1350
1350
  clusterName?: KxClusterName;
1351
1351
  /**
1352
- * Specifies the type of KDB database that is being created. The following types are available: HDB – A Historical Database. The data is only accessible with read-only permissions from one of the FinSpace managed kdb databases mounted to the cluster. RDB – A Realtime Database. This type of database captures all the data from a ticker plant and stores it in memory until the end of day, after which it writes all of its data to a disk and reloads the HDB. This cluster type requires local storage for temporary storage of data during the savedown process. If you specify this field in your request, you must provide the savedownStorageConfiguration parameter. GATEWAY – A gateway cluster allows you to access data across processes in kdb systems. It allows you to create your own routing logic using the initialization scripts and custom code. This type of cluster does not require a writable local storage.
1352
+ * Specifies the type of KDB database that is being created. The following types are available: HDB – A Historical Database. The data is only accessible with read-only permissions from one of the FinSpace managed kdb databases mounted to the cluster. RDB – A Realtime Database. This type of database captures all the data from a ticker plant and stores it in memory until the end of day, after which it writes all of its data to a disk and reloads the HDB. This cluster type requires local storage for temporary storage of data during the savedown process. If you specify this field in your request, you must provide the savedownStorageConfiguration parameter. GATEWAY – A gateway cluster allows you to access data across processes in kdb systems. It allows you to create your own routing logic using the initialization scripts and custom code. This type of cluster does not require a writable local storage. GP – A general purpose cluster allows you to quickly iterate on code during development by granting greater access to system commands and enabling a fast reload of custom code. This cluster type can optionally mount databases including cache and savedown storage. For this cluster type, the node count is fixed at 1. It does not support autoscaling and supports only SINGLE AZ mode.
1353
1353
  */
1354
1354
  clusterType?: KxClusterType;
1355
1355
  /**
@@ -1387,17 +1387,17 @@ declare namespace Finspace {
1387
1387
  }
1388
1388
  export interface KxClusterCodeDeploymentConfiguration {
1389
1389
  /**
1390
- * The type of deployment that you want on a cluster. ROLLING – This options updates the cluster by stopping the exiting q process and starting a new q process with updated configuration. FORCE – This option updates the cluster by immediately stopping all the running processes before starting up new ones with the updated configuration.
1390
+ * The type of deployment that you want on a cluster. ROLLING – This options updates the cluster by stopping the exiting q process and starting a new q process with updated configuration. NO_RESTART – This option updates the cluster without stopping the running q process. It is only available for GP type cluster. This option is quicker as it reduces the turn around time to update configuration on a cluster. With this deployment mode, you cannot update the initializationScript and commandLineArguments parameters. FORCE – This option updates the cluster by immediately stopping all the running processes before starting up new ones with the updated configuration.
1391
1391
  */
1392
1392
  deploymentStrategy: KxClusterCodeDeploymentStrategy;
1393
1393
  }
1394
- export type KxClusterCodeDeploymentStrategy = "ROLLING"|"FORCE"|string;
1394
+ export type KxClusterCodeDeploymentStrategy = "NO_RESTART"|"ROLLING"|"FORCE"|string;
1395
1395
  export type KxClusterDescription = string;
1396
1396
  export type KxClusterName = string;
1397
1397
  export type KxClusterNodeIdString = string;
1398
1398
  export type KxClusterStatus = "PENDING"|"CREATING"|"CREATE_FAILED"|"RUNNING"|"UPDATING"|"DELETING"|"DELETED"|"DELETE_FAILED"|string;
1399
1399
  export type KxClusterStatusReason = string;
1400
- export type KxClusterType = "HDB"|"RDB"|"GATEWAY"|string;
1400
+ export type KxClusterType = "HDB"|"RDB"|"GATEWAY"|"GP"|string;
1401
1401
  export type KxClusters = KxCluster[];
1402
1402
  export interface KxCommandLineArgument {
1403
1403
  /**
@@ -1667,7 +1667,7 @@ declare namespace Finspace {
1667
1667
  */
1668
1668
  environmentId: KxEnvironmentId;
1669
1669
  /**
1670
- * Specifies the type of KDB database that is being created. The following types are available: HDB – A Historical Database. The data is only accessible with read-only permissions from one of the FinSpace managed kdb databases mounted to the cluster. RDB – A Realtime Database. This type of database captures all the data from a ticker plant and stores it in memory until the end of day, after which it writes all of its data to a disk and reloads the HDB. This cluster type requires local storage for temporary storage of data during the savedown process. If you specify this field in your request, you must provide the savedownStorageConfiguration parameter. GATEWAY – A gateway cluster allows you to access data across processes in kdb systems. It allows you to create your own routing logic using the initialization scripts and custom code. This type of cluster does not require a writable local storage.
1670
+ * Specifies the type of KDB database that is being created. The following types are available: HDB – A Historical Database. The data is only accessible with read-only permissions from one of the FinSpace managed kdb databases mounted to the cluster. RDB – A Realtime Database. This type of database captures all the data from a ticker plant and stores it in memory until the end of day, after which it writes all of its data to a disk and reloads the HDB. This cluster type requires local storage for temporary storage of data during the savedown process. If you specify this field in your request, you must provide the savedownStorageConfiguration parameter. GATEWAY – A gateway cluster allows you to access data across processes in kdb systems. It allows you to create your own routing logic using the initialization scripts and custom code. This type of cluster does not require a writable local storage. GP – A general purpose cluster allows you to quickly iterate on code during development by granting greater access to system commands and enabling a fast reload of custom code. This cluster type can optionally mount databases including cache and savedown storage. For this cluster type, the node count is fixed at 1. It does not support autoscaling and supports only SINGLE AZ mode.
1671
1671
  */
1672
1672
  clusterType?: KxClusterType;
1673
1673
  /**
@@ -1927,11 +1927,11 @@ declare namespace Finspace {
1927
1927
  clientToken?: ClientTokenString;
1928
1928
  code: CodeConfiguration;
1929
1929
  /**
1930
- * Specifies a Q program that will be run at launch of a cluster. It is a relative path within .zip file that contains the custom code, which will be loaded on the cluster. It must include the file name itself. For example, somedir/init.q.
1930
+ * Specifies a Q program that will be run at launch of a cluster. It is a relative path within .zip file that contains the custom code, which will be loaded on the cluster. It must include the file name itself. For example, somedir/init.q. You cannot update this parameter for a NO_RESTART deployment.
1931
1931
  */
1932
1932
  initializationScript?: InitializationScriptFilePath;
1933
1933
  /**
1934
- * Specifies the key-value pairs to make them available inside the cluster.
1934
+ * Specifies the key-value pairs to make them available inside the cluster. You cannot update this parameter for a NO_RESTART deployment.
1935
1935
  */
1936
1936
  commandLineArguments?: KxCommandLineArguments;
1937
1937
  /**
@@ -1533,6 +1533,27 @@ one destination per packager.
1533
1533
  }
1534
1534
  export interface ClaimDeviceResponse {
1535
1535
  }
1536
+ export interface ColorCorrection {
1537
+ /**
1538
+ * The color space of the input.
1539
+ */
1540
+ InputColorSpace: ColorSpace;
1541
+ /**
1542
+ * The color space of the output.
1543
+ */
1544
+ OutputColorSpace: ColorSpace;
1545
+ /**
1546
+ * The URI of the 3D LUT file. The protocol must be 's3:' or 's3ssl:':.
1547
+ */
1548
+ Uri: __string;
1549
+ }
1550
+ export interface ColorCorrectionSettings {
1551
+ /**
1552
+ * An array of colorCorrections that applies when you are using 3D LUT files to perform color conversion on video. Each colorCorrection contains one 3D LUT file (that defines the color mapping for converting an input color space to an output color space), and the input/output combination that this 3D LUT file applies to. MediaLive reads the color space in the input metadata, determines the color space that you have specified for the output, and finds and uses the LUT file that applies to this combination.
1553
+ */
1554
+ GlobalColorCorrections: __listOfColorCorrection;
1555
+ }
1556
+ export type ColorSpace = "HDR10"|"HLG_2020"|"REC_601"|"REC_709"|string;
1536
1557
  export interface ColorSpacePassthroughSettings {
1537
1558
  }
1538
1559
  export interface CreateChannelRequest {
@@ -2881,6 +2902,10 @@ You specify only the font family. All other style information (color, bold, posi
2881
2902
  * Settings for caption decriptions
2882
2903
  */
2883
2904
  CaptionDescriptions?: __listOfCaptionDescription;
2905
+ /**
2906
+ * Color correction settings
2907
+ */
2908
+ ColorCorrectionSettings?: ColorCorrectionSettings;
2884
2909
  /**
2885
2910
  * Feature Activations
2886
2911
  */
@@ -7522,6 +7547,7 @@ If STANDARD channel, subnet IDs must be mapped to two unique availability zones
7522
7547
  export type __listOfCaptionSelector = CaptionSelector[];
7523
7548
  export type __listOfChannelEgressEndpoint = ChannelEgressEndpoint[];
7524
7549
  export type __listOfChannelSummary = ChannelSummary[];
7550
+ export type __listOfColorCorrection = ColorCorrection[];
7525
7551
  export type __listOfFailoverCondition = FailoverCondition[];
7526
7552
  export type __listOfHlsAdMarkers = HlsAdMarkers[];
7527
7553
  export type __listOfInput = Input[];
@@ -20,11 +20,11 @@ declare class ServiceCatalogAppRegistry extends Service {
20
20
  */
21
21
  associateAttributeGroup(callback?: (err: AWSError, data: ServiceCatalogAppRegistry.Types.AssociateAttributeGroupResponse) => void): Request<ServiceCatalogAppRegistry.Types.AssociateAttributeGroupResponse, AWSError>;
22
22
  /**
23
- * Associates a resource with an application. The resource can be specified by its ARN or name. The application can be specified by ARN, ID, or name. Minimum permissions You must have the following permissions to associate a resource using the OPTIONS parameter set to APPLY_APPLICATION_TAG. tag:GetResources tag:TagResources You must also have these additional permissions if you don't use the AWSServiceCatalogAppRegistryFullAccess policy. For more information, see AWSServiceCatalogAppRegistryFullAccess in the AppRegistry Administrator Guide. resource-groups:DisassociateResource cloudformation:UpdateStack cloudformation:DescribeStacks In addition, you must have the tagging permission defined by the Amazon Web Services service that creates the resource. For more information, see TagResources in the Resource Groups Tagging API Reference.
23
+ * Associates a resource with an application. The resource can be specified by its ARN or name. The application can be specified by ARN, ID, or name. Minimum permissions You must have the following permissions to associate a resource using the OPTIONS parameter set to APPLY_APPLICATION_TAG. tag:GetResources tag:TagResources You must also have these additional permissions if you don't use the AWSServiceCatalogAppRegistryFullAccess policy. For more information, see AWSServiceCatalogAppRegistryFullAccess in the AppRegistry Administrator Guide. resource-groups:AssociateResource cloudformation:UpdateStack cloudformation:DescribeStacks In addition, you must have the tagging permission defined by the Amazon Web Services service that creates the resource. For more information, see TagResources in the Resource Groups Tagging API Reference.
24
24
  */
25
25
  associateResource(params: ServiceCatalogAppRegistry.Types.AssociateResourceRequest, callback?: (err: AWSError, data: ServiceCatalogAppRegistry.Types.AssociateResourceResponse) => void): Request<ServiceCatalogAppRegistry.Types.AssociateResourceResponse, AWSError>;
26
26
  /**
27
- * Associates a resource with an application. The resource can be specified by its ARN or name. The application can be specified by ARN, ID, or name. Minimum permissions You must have the following permissions to associate a resource using the OPTIONS parameter set to APPLY_APPLICATION_TAG. tag:GetResources tag:TagResources You must also have these additional permissions if you don't use the AWSServiceCatalogAppRegistryFullAccess policy. For more information, see AWSServiceCatalogAppRegistryFullAccess in the AppRegistry Administrator Guide. resource-groups:DisassociateResource cloudformation:UpdateStack cloudformation:DescribeStacks In addition, you must have the tagging permission defined by the Amazon Web Services service that creates the resource. For more information, see TagResources in the Resource Groups Tagging API Reference.
27
+ * Associates a resource with an application. The resource can be specified by its ARN or name. The application can be specified by ARN, ID, or name. Minimum permissions You must have the following permissions to associate a resource using the OPTIONS parameter set to APPLY_APPLICATION_TAG. tag:GetResources tag:TagResources You must also have these additional permissions if you don't use the AWSServiceCatalogAppRegistryFullAccess policy. For more information, see AWSServiceCatalogAppRegistryFullAccess in the AppRegistry Administrator Guide. resource-groups:AssociateResource cloudformation:UpdateStack cloudformation:DescribeStacks In addition, you must have the tagging permission defined by the Amazon Web Services service that creates the resource. For more information, see TagResources in the Resource Groups Tagging API Reference.
28
28
  */
29
29
  associateResource(callback?: (err: AWSError, data: ServiceCatalogAppRegistry.Types.AssociateResourceResponse) => void): Request<ServiceCatalogAppRegistry.Types.AssociateResourceResponse, AWSError>;
30
30
  /**
@@ -83,7 +83,7 @@ return /******/ (function(modules) { // webpackBootstrap
83
83
  /**
84
84
  * @constant
85
85
  */
86
- VERSION: '2.1510.0',
86
+ VERSION: '2.1511.0',
87
87
 
88
88
  /**
89
89
  * @api private
@@ -2640,12 +2640,17 @@ return /******/ (function(modules) { // webpackBootstrap
2640
2640
 
2641
2641
  var struct = {};
2642
2642
  var shapeMembers = shape.members;
2643
+ var isAwsQueryCompatible = shape.api && shape.api.awsQueryCompatible;
2643
2644
  util.each(shapeMembers, function(name, memberShape) {
2644
2645
  var locationName = memberShape.isLocationName ? memberShape.name : name;
2645
2646
  if (Object.prototype.hasOwnProperty.call(structure, locationName)) {
2646
2647
  var value = structure[locationName];
2647
2648
  var result = translate(value, memberShape);
2648
2649
  if (result !== undefined) struct[name] = result;
2650
+ } else if (isAwsQueryCompatible && memberShape.defaultValue) {
2651
+ if (memberShape.type === 'list') {
2652
+ struct[name] = typeof memberShape.defaultValue === 'function' ? memberShape.defaultValue() : memberShape.defaultValue;
2653
+ }
2649
2654
  }
2650
2655
  });
2651
2656
  return struct;