cdk-lambda-subminute 2.0.283 → 2.0.285
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.jsii +3 -3
- package/lib/cdk-lambda-subminute.js +3 -3
- package/node_modules/aws-sdk/CHANGELOG.md +20 -1
- package/node_modules/aws-sdk/README.md +1 -1
- package/node_modules/aws-sdk/apis/amplify-2017-07-25.min.json +53 -38
- package/node_modules/aws-sdk/apis/amplify-2017-07-25.paginators.json +24 -0
- package/node_modules/aws-sdk/apis/application-insights-2018-11-25.min.json +35 -25
- package/node_modules/aws-sdk/apis/connect-2017-08-08.min.json +8 -5
- package/node_modules/aws-sdk/apis/dataexchange-2017-07-25.min.json +125 -0
- package/node_modules/aws-sdk/apis/datasync-2018-11-09.min.json +27 -15
- package/node_modules/aws-sdk/apis/ec2-2016-11-15.min.json +1297 -1181
- package/node_modules/aws-sdk/apis/ec2-2016-11-15.paginators.json +6 -0
- package/node_modules/aws-sdk/apis/finspace-2021-03-12.min.json +47 -0
- package/node_modules/aws-sdk/apis/m2-2021-04-28.min.json +36 -6
- package/node_modules/aws-sdk/apis/mediapackagev2-2022-12-25.min.json +37 -10
- package/node_modules/aws-sdk/apis/rds-2014-10-31.min.json +222 -107
- package/node_modules/aws-sdk/apis/rds-2014-10-31.paginators.json +6 -0
- package/node_modules/aws-sdk/apis/redshift-serverless-2021-04-21.min.json +205 -76
- package/node_modules/aws-sdk/apis/redshift-serverless-2021-04-21.paginators.json +6 -0
- package/node_modules/aws-sdk/apis/resiliencehub-2020-04-30.min.json +136 -106
- package/node_modules/aws-sdk/apis/s3outposts-2017-07-25.min.json +1 -0
- package/node_modules/aws-sdk/apis/translate-2017-07-01.min.json +23 -26
- package/node_modules/aws-sdk/clients/amplify.d.ts +239 -223
- package/node_modules/aws-sdk/clients/applicationinsights.d.ts +17 -0
- package/node_modules/aws-sdk/clients/connect.d.ts +46 -22
- package/node_modules/aws-sdk/clients/dataexchange.d.ts +153 -0
- package/node_modules/aws-sdk/clients/datasync.d.ts +45 -30
- package/node_modules/aws-sdk/clients/ec2.d.ts +138 -5
- package/node_modules/aws-sdk/clients/finspace.d.ts +46 -2
- package/node_modules/aws-sdk/clients/m2.d.ts +51 -6
- package/node_modules/aws-sdk/clients/mediapackagev2.d.ts +24 -0
- package/node_modules/aws-sdk/clients/neptunedata.d.ts +11 -11
- package/node_modules/aws-sdk/clients/rds.d.ts +148 -0
- package/node_modules/aws-sdk/clients/redshiftserverless.d.ts +212 -2
- package/node_modules/aws-sdk/clients/resiliencehub.d.ts +136 -103
- package/node_modules/aws-sdk/clients/s3outposts.d.ts +5 -0
- package/node_modules/aws-sdk/clients/translate.d.ts +17 -9
- package/node_modules/aws-sdk/clients/wisdom.d.ts +8 -8
- package/node_modules/aws-sdk/dist/aws-sdk-core-react-native.js +1 -1
- package/node_modules/aws-sdk/dist/aws-sdk-react-native.js +19 -19
- package/node_modules/aws-sdk/dist/aws-sdk.js +1565 -1322
- package/node_modules/aws-sdk/dist/aws-sdk.min.js +81 -80
- package/node_modules/aws-sdk/lib/core.js +1 -1
- package/node_modules/aws-sdk/package.json +1 -1
- package/package.json +3 -3
@@ -431,6 +431,10 @@ declare namespace M2 {
|
|
431
431
|
* Specifies a file associated with a specific batch job.
|
432
432
|
*/
|
433
433
|
fileBatchJobIdentifier?: FileBatchJobIdentifier;
|
434
|
+
/**
|
435
|
+
* Specifies an Amazon S3 location that identifies the batch jobs that you want to run. Use this identifier to run ad hoc batch jobs.
|
436
|
+
*/
|
437
|
+
s3BatchJobIdentifier?: S3BatchJobIdentifier;
|
434
438
|
/**
|
435
439
|
* A batch job identifier in which the batch job to run is identified by the script name.
|
436
440
|
*/
|
@@ -580,7 +584,7 @@ declare namespace M2 {
|
|
580
584
|
*/
|
581
585
|
name: EntityName;
|
582
586
|
/**
|
583
|
-
* Configures the maintenance window you want for the runtime environment. If you do not provide a value, a random system-generated value will be assigned.
|
587
|
+
* Configures the maintenance window that you want for the runtime environment. The maintenance window must have the format ddd:hh24:mi-ddd:hh24:mi and must be less than 24 hours. The following two examples are valid maintenance windows: sun:23:45-mon:00:15 or sat:01:00-sat:03:00. If you do not provide a value, a random system-generated value will be assigned.
|
584
588
|
*/
|
585
589
|
preferredMaintenanceWindow?: String50;
|
586
590
|
/**
|
@@ -680,6 +684,10 @@ declare namespace M2 {
|
|
680
684
|
* The status of the data set import task.
|
681
685
|
*/
|
682
686
|
status: DataSetTaskLifecycle;
|
687
|
+
/**
|
688
|
+
* If dataset import failed, the failure reason will show here.
|
689
|
+
*/
|
690
|
+
statusReason?: String;
|
683
691
|
/**
|
684
692
|
* A summary of the data set import task.
|
685
693
|
*/
|
@@ -716,7 +724,7 @@ declare namespace M2 {
|
|
716
724
|
*/
|
717
725
|
lastUpdatedTime?: Timestamp;
|
718
726
|
}
|
719
|
-
export type DataSetTaskLifecycle = "Creating"|"Running"|"Completed"|string;
|
727
|
+
export type DataSetTaskLifecycle = "Creating"|"Running"|"Completed"|"Failed"|string;
|
720
728
|
export type DataSetsSummaryList = DataSetSummary[];
|
721
729
|
export interface DatasetDetailOrgAttributes {
|
722
730
|
/**
|
@@ -806,7 +814,7 @@ declare namespace M2 {
|
|
806
814
|
*/
|
807
815
|
statusReason?: String;
|
808
816
|
}
|
809
|
-
export type DeploymentLifecycle = "Deploying"|"Succeeded"|"Failed"|string;
|
817
|
+
export type DeploymentLifecycle = "Deploying"|"Succeeded"|"Failed"|"Updating Deployment"|string;
|
810
818
|
export type DeploymentList = DeploymentSummary[];
|
811
819
|
export interface DeploymentSummary {
|
812
820
|
/**
|
@@ -1171,6 +1179,10 @@ declare namespace M2 {
|
|
1171
1179
|
* The type of data set. The only supported value is VSAM.
|
1172
1180
|
*/
|
1173
1181
|
dataSetOrg?: DatasetDetailOrgAttributes;
|
1182
|
+
/**
|
1183
|
+
* File size of the dataset.
|
1184
|
+
*/
|
1185
|
+
fileSize?: Long;
|
1174
1186
|
/**
|
1175
1187
|
* The last time the data set was referenced.
|
1176
1188
|
*/
|
@@ -1260,7 +1272,7 @@ declare namespace M2 {
|
|
1260
1272
|
}
|
1261
1273
|
export interface GetEnvironmentResponse {
|
1262
1274
|
/**
|
1263
|
-
* The number of instances included in the runtime environment. A standalone runtime environment has a
|
1275
|
+
* The number of instances included in the runtime environment. A standalone runtime environment has a maximum of one instance. Currently, a high availability runtime environment has a maximum of two instances.
|
1264
1276
|
*/
|
1265
1277
|
actualCapacity?: CapacityValue;
|
1266
1278
|
/**
|
@@ -1312,7 +1324,7 @@ declare namespace M2 {
|
|
1312
1324
|
*/
|
1313
1325
|
pendingMaintenance?: PendingMaintenance;
|
1314
1326
|
/**
|
1315
|
-
*
|
1327
|
+
* The maintenance window for the runtime environment. If you don't provide a value for the maintenance window, the service assigns a random value.
|
1316
1328
|
*/
|
1317
1329
|
preferredMaintenanceWindow?: String50;
|
1318
1330
|
/**
|
@@ -1363,6 +1375,16 @@ declare namespace M2 {
|
|
1363
1375
|
export type Identifier = string;
|
1364
1376
|
export type IdentifierList = Identifier[];
|
1365
1377
|
export type Integer = number;
|
1378
|
+
export interface JobIdentifier {
|
1379
|
+
/**
|
1380
|
+
* The name of the file that contains the batch job definition.
|
1381
|
+
*/
|
1382
|
+
fileName?: String;
|
1383
|
+
/**
|
1384
|
+
* The name of the script that contains the batch job definition.
|
1385
|
+
*/
|
1386
|
+
scriptName?: String;
|
1387
|
+
}
|
1366
1388
|
export interface ListApplicationVersionsRequest {
|
1367
1389
|
/**
|
1368
1390
|
* The unique identifier of the application.
|
@@ -1520,6 +1542,10 @@ declare namespace M2 {
|
|
1520
1542
|
* The maximum number of objects to return.
|
1521
1543
|
*/
|
1522
1544
|
maxResults?: MaxResults;
|
1545
|
+
/**
|
1546
|
+
* Filter dataset name matching the specified pattern. Can use * and % as wild cards.
|
1547
|
+
*/
|
1548
|
+
nameFilter?: String200;
|
1523
1549
|
/**
|
1524
1550
|
* A pagination token returned from a previous call to this operation. This specifies the next item to return. To return to the beginning of the list, exclude this parameter.
|
1525
1551
|
*/
|
@@ -1639,6 +1665,7 @@ declare namespace M2 {
|
|
1639
1665
|
*/
|
1640
1666
|
logType: String20;
|
1641
1667
|
}
|
1668
|
+
export type Long = number;
|
1642
1669
|
export interface MaintenanceSchedule {
|
1643
1670
|
/**
|
1644
1671
|
* The time the scheduled maintenance is to end.
|
@@ -1730,6 +1757,20 @@ declare namespace M2 {
|
|
1730
1757
|
*/
|
1731
1758
|
min: Integer;
|
1732
1759
|
}
|
1760
|
+
export interface S3BatchJobIdentifier {
|
1761
|
+
/**
|
1762
|
+
* The Amazon S3 bucket that contains the batch job definitions.
|
1763
|
+
*/
|
1764
|
+
bucket: String;
|
1765
|
+
/**
|
1766
|
+
* Identifies the batch job definition. This identifier can also point to any batch job definition that already exists in the application or to one of the batch job definitions within the directory that is specified in keyPrefix.
|
1767
|
+
*/
|
1768
|
+
identifier: JobIdentifier;
|
1769
|
+
/**
|
1770
|
+
* The key prefix that specifies the path to the folder in the S3 bucket that has the batch job definitions.
|
1771
|
+
*/
|
1772
|
+
keyPrefix?: String;
|
1773
|
+
}
|
1733
1774
|
export interface ScriptBatchJobDefinition {
|
1734
1775
|
/**
|
1735
1776
|
* The name of the script containing the batch job definition.
|
@@ -1872,12 +1913,16 @@ declare namespace M2 {
|
|
1872
1913
|
* The unique identifier of the runtime environment that you want to update.
|
1873
1914
|
*/
|
1874
1915
|
environmentId: Identifier;
|
1916
|
+
/**
|
1917
|
+
* Forces the updates on the environment. This option is needed if the applications in the environment are not stopped or if there are ongoing application-related activities in the environment. If you use this option, be aware that it could lead to data corruption in the applications, and that you might need to perform repair and recovery procedures for the applications. This option is not needed if the attribute being updated is preferredMaintenanceWindow.
|
1918
|
+
*/
|
1919
|
+
forceUpdate?: Boolean;
|
1875
1920
|
/**
|
1876
1921
|
* The instance type for the runtime environment to update.
|
1877
1922
|
*/
|
1878
1923
|
instanceType?: String20;
|
1879
1924
|
/**
|
1880
|
-
* Configures the maintenance window you want for the runtime environment. If you do not provide a value, a random system-generated value will be assigned.
|
1925
|
+
* Configures the maintenance window that you want for the runtime environment. The maintenance window must have the format ddd:hh24:mi-ddd:hh24:mi and must be less than 24 hours. The following two examples are valid maintenance windows: sun:23:45-mon:00:15 or sat:01:00-sat:03:00. If you do not provide a value, a random system-generated value will be assigned.
|
1881
1926
|
*/
|
1882
1927
|
preferredMaintenanceWindow?: String;
|
1883
1928
|
}
|
@@ -378,6 +378,7 @@ declare namespace MediaPackageV2 {
|
|
378
378
|
* Inserts EXT-X-PROGRAM-DATE-TIME tags in the output manifest at the interval that you specify. If you don't enter an interval, EXT-X-PROGRAM-DATE-TIME tags aren't included in the manifest. The tags sync the stream to the wall clock so that viewers can seek to a specific time in the playback timeline on the player. ID3Timed metadata messages generate every 5 seconds whenever the content is ingested. Irrespective of this parameter, if any ID3Timed metadata is in the HLS input, it is passed through to the HLS output.
|
379
379
|
*/
|
380
380
|
ProgramDateTimeIntervalSeconds?: CreateHlsManifestConfigurationProgramDateTimeIntervalSecondsInteger;
|
381
|
+
FilterConfiguration?: FilterConfiguration;
|
381
382
|
}
|
382
383
|
export type CreateHlsManifestConfigurationManifestWindowSecondsInteger = number;
|
383
384
|
export type CreateHlsManifestConfigurationProgramDateTimeIntervalSecondsInteger = number;
|
@@ -400,6 +401,7 @@ declare namespace MediaPackageV2 {
|
|
400
401
|
* Inserts EXT-X-PROGRAM-DATE-TIME tags in the output manifest at the interval that you specify. If you don't enter an interval, EXT-X-PROGRAM-DATE-TIME tags aren't included in the manifest. The tags sync the stream to the wall clock so that viewers can seek to a specific time in the playback timeline on the player. ID3Timed metadata messages generate every 5 seconds whenever the content is ingested. Irrespective of this parameter, if any ID3Timed metadata is in the HLS input, it is passed through to the HLS output.
|
401
402
|
*/
|
402
403
|
ProgramDateTimeIntervalSeconds?: CreateLowLatencyHlsManifestConfigurationProgramDateTimeIntervalSecondsInteger;
|
404
|
+
FilterConfiguration?: FilterConfiguration;
|
403
405
|
}
|
404
406
|
export type CreateLowLatencyHlsManifestConfigurationManifestWindowSecondsInteger = number;
|
405
407
|
export type CreateLowLatencyHlsManifestConfigurationProgramDateTimeIntervalSecondsInteger = number;
|
@@ -610,6 +612,26 @@ declare namespace MediaPackageV2 {
|
|
610
612
|
*/
|
611
613
|
CmafEncryptionMethod?: CmafEncryptionMethod;
|
612
614
|
}
|
615
|
+
export interface FilterConfiguration {
|
616
|
+
/**
|
617
|
+
* Optionally specify one or more manifest filters for all of your manifest egress requests. When you include a manifest filter, note that you cannot use an identical manifest filter query parameter for this manifest's endpoint URL.
|
618
|
+
*/
|
619
|
+
ManifestFilter?: FilterConfigurationManifestFilterString;
|
620
|
+
/**
|
621
|
+
* Optionally specify the start time for all of your manifest egress requests. When you include start time, note that you cannot use start time query parameters for this manifest's endpoint URL.
|
622
|
+
*/
|
623
|
+
Start?: Timestamp;
|
624
|
+
/**
|
625
|
+
* Optionally specify the end time for all of your manifest egress requests. When you include end time, note that you cannot use end time query parameters for this manifest's endpoint URL.
|
626
|
+
*/
|
627
|
+
End?: Timestamp;
|
628
|
+
/**
|
629
|
+
* Optionally specify the time delay for all of your manifest egress requests. Enter a value that is smaller than your endpoint's startover window. When you include time delay, note that you cannot use time delay query parameters for this manifest's endpoint URL.
|
630
|
+
*/
|
631
|
+
TimeDelaySeconds?: FilterConfigurationTimeDelaySecondsInteger;
|
632
|
+
}
|
633
|
+
export type FilterConfigurationManifestFilterString = string;
|
634
|
+
export type FilterConfigurationTimeDelaySecondsInteger = number;
|
613
635
|
export interface GetChannelGroupRequest {
|
614
636
|
/**
|
615
637
|
* The name that describes the channel group. The name is the primary identifier for the channel group, and must be unique for your account in the AWS Region.
|
@@ -733,6 +755,7 @@ declare namespace MediaPackageV2 {
|
|
733
755
|
*/
|
734
756
|
ProgramDateTimeIntervalSeconds?: Integer;
|
735
757
|
ScteHls?: ScteHls;
|
758
|
+
FilterConfiguration?: FilterConfiguration;
|
736
759
|
}
|
737
760
|
export type GetHlsManifests = GetHlsManifestConfiguration[];
|
738
761
|
export interface GetLowLatencyHlsManifestConfiguration {
|
@@ -757,6 +780,7 @@ declare namespace MediaPackageV2 {
|
|
757
780
|
*/
|
758
781
|
ProgramDateTimeIntervalSeconds?: Integer;
|
759
782
|
ScteHls?: ScteHls;
|
783
|
+
FilterConfiguration?: FilterConfiguration;
|
760
784
|
}
|
761
785
|
export type GetLowLatencyHlsManifests = GetLowLatencyHlsManifestConfiguration[];
|
762
786
|
export interface GetOriginEndpointPolicyRequest {
|
@@ -116,19 +116,19 @@ declare class Neptunedata extends Service {
|
|
116
116
|
*/
|
117
117
|
executeGremlinQuery(callback?: (err: AWSError, data: Neptunedata.Types.ExecuteGremlinQueryOutput) => void): Request<Neptunedata.Types.ExecuteGremlinQueryOutput, AWSError>;
|
118
118
|
/**
|
119
|
-
* Executes an openCypher explain request. See The openCypher explain feature for more information. When invoking this operation in a Neptune cluster that has IAM authentication enabled, the IAM user or role making the request must have a policy attached that allows the neptune-db:ReadDataViaQuery IAM action in that cluster. Note that the neptune-db:QueryLanguage:
|
119
|
+
* Executes an openCypher explain request. See The openCypher explain feature for more information. When invoking this operation in a Neptune cluster that has IAM authentication enabled, the IAM user or role making the request must have a policy attached that allows the neptune-db:ReadDataViaQuery IAM action in that cluster. Note that the neptune-db:QueryLanguage:OpenCypher IAM condition key can be used in the policy document to restrict the use of openCypher queries (see Condition keys available in Neptune IAM data-access policy statements).
|
120
120
|
*/
|
121
121
|
executeOpenCypherExplainQuery(params: Neptunedata.Types.ExecuteOpenCypherExplainQueryInput, callback?: (err: AWSError, data: Neptunedata.Types.ExecuteOpenCypherExplainQueryOutput) => void): Request<Neptunedata.Types.ExecuteOpenCypherExplainQueryOutput, AWSError>;
|
122
122
|
/**
|
123
|
-
* Executes an openCypher explain request. See The openCypher explain feature for more information. When invoking this operation in a Neptune cluster that has IAM authentication enabled, the IAM user or role making the request must have a policy attached that allows the neptune-db:ReadDataViaQuery IAM action in that cluster. Note that the neptune-db:QueryLanguage:
|
123
|
+
* Executes an openCypher explain request. See The openCypher explain feature for more information. When invoking this operation in a Neptune cluster that has IAM authentication enabled, the IAM user or role making the request must have a policy attached that allows the neptune-db:ReadDataViaQuery IAM action in that cluster. Note that the neptune-db:QueryLanguage:OpenCypher IAM condition key can be used in the policy document to restrict the use of openCypher queries (see Condition keys available in Neptune IAM data-access policy statements).
|
124
124
|
*/
|
125
125
|
executeOpenCypherExplainQuery(callback?: (err: AWSError, data: Neptunedata.Types.ExecuteOpenCypherExplainQueryOutput) => void): Request<Neptunedata.Types.ExecuteOpenCypherExplainQueryOutput, AWSError>;
|
126
126
|
/**
|
127
|
-
* Executes an openCypher query. See Accessing the Neptune Graph with openCypher for more information. Neptune supports building graph applications using openCypher, which is currently one of the most popular query languages among developers working with graph databases. Developers, business analysts, and data scientists like openCypher's declarative, SQL-inspired syntax because it provides a familiar structure in which to querying property graphs. The openCypher language was originally developed by Neo4j, then open-sourced in 2015 and contributed to the openCypher project under an Apache 2 open-source license. Note that when invoking this operation in a Neptune cluster that has IAM authentication enabled, the IAM user or role making the request must have a policy attached that allows one of the following IAM actions in that cluster, depending on the query: neptune-db:ReadDataViaQuery neptune-db:WriteDataViaQuery neptune-db:DeleteDataViaQuery Note also that the neptune-db:QueryLanguage:
|
127
|
+
* Executes an openCypher query. See Accessing the Neptune Graph with openCypher for more information. Neptune supports building graph applications using openCypher, which is currently one of the most popular query languages among developers working with graph databases. Developers, business analysts, and data scientists like openCypher's declarative, SQL-inspired syntax because it provides a familiar structure in which to querying property graphs. The openCypher language was originally developed by Neo4j, then open-sourced in 2015 and contributed to the openCypher project under an Apache 2 open-source license. Note that when invoking this operation in a Neptune cluster that has IAM authentication enabled, the IAM user or role making the request must have a policy attached that allows one of the following IAM actions in that cluster, depending on the query: neptune-db:ReadDataViaQuery neptune-db:WriteDataViaQuery neptune-db:DeleteDataViaQuery Note also that the neptune-db:QueryLanguage:OpenCypher IAM condition key can be used in the policy document to restrict the use of openCypher queries (see Condition keys available in Neptune IAM data-access policy statements).
|
128
128
|
*/
|
129
129
|
executeOpenCypherQuery(params: Neptunedata.Types.ExecuteOpenCypherQueryInput, callback?: (err: AWSError, data: Neptunedata.Types.ExecuteOpenCypherQueryOutput) => void): Request<Neptunedata.Types.ExecuteOpenCypherQueryOutput, AWSError>;
|
130
130
|
/**
|
131
|
-
* Executes an openCypher query. See Accessing the Neptune Graph with openCypher for more information. Neptune supports building graph applications using openCypher, which is currently one of the most popular query languages among developers working with graph databases. Developers, business analysts, and data scientists like openCypher's declarative, SQL-inspired syntax because it provides a familiar structure in which to querying property graphs. The openCypher language was originally developed by Neo4j, then open-sourced in 2015 and contributed to the openCypher project under an Apache 2 open-source license. Note that when invoking this operation in a Neptune cluster that has IAM authentication enabled, the IAM user or role making the request must have a policy attached that allows one of the following IAM actions in that cluster, depending on the query: neptune-db:ReadDataViaQuery neptune-db:WriteDataViaQuery neptune-db:DeleteDataViaQuery Note also that the neptune-db:QueryLanguage:
|
131
|
+
* Executes an openCypher query. See Accessing the Neptune Graph with openCypher for more information. Neptune supports building graph applications using openCypher, which is currently one of the most popular query languages among developers working with graph databases. Developers, business analysts, and data scientists like openCypher's declarative, SQL-inspired syntax because it provides a familiar structure in which to querying property graphs. The openCypher language was originally developed by Neo4j, then open-sourced in 2015 and contributed to the openCypher project under an Apache 2 open-source license. Note that when invoking this operation in a Neptune cluster that has IAM authentication enabled, the IAM user or role making the request must have a policy attached that allows one of the following IAM actions in that cluster, depending on the query: neptune-db:ReadDataViaQuery neptune-db:WriteDataViaQuery neptune-db:DeleteDataViaQuery Note also that the neptune-db:QueryLanguage:OpenCypher IAM condition key can be used in the policy document to restrict the use of openCypher queries (see Condition keys available in Neptune IAM data-access policy statements).
|
132
132
|
*/
|
133
133
|
executeOpenCypherQuery(callback?: (err: AWSError, data: Neptunedata.Types.ExecuteOpenCypherQueryOutput) => void): Request<Neptunedata.Types.ExecuteOpenCypherQueryOutput, AWSError>;
|
134
134
|
/**
|
@@ -184,11 +184,11 @@ declare class Neptunedata extends Service {
|
|
184
184
|
*/
|
185
185
|
getMLModelTransformJob(callback?: (err: AWSError, data: Neptunedata.Types.GetMLModelTransformJobOutput) => void): Request<Neptunedata.Types.GetMLModelTransformJobOutput, AWSError>;
|
186
186
|
/**
|
187
|
-
* Retrieves the status of a specified openCypher query. When invoking this operation in a Neptune cluster that has IAM authentication enabled, the IAM user or role making the request must have a policy attached that allows the neptune-db:GetQueryStatus IAM action in that cluster. Note that the neptune-db:QueryLanguage:
|
187
|
+
* Retrieves the status of a specified openCypher query. When invoking this operation in a Neptune cluster that has IAM authentication enabled, the IAM user or role making the request must have a policy attached that allows the neptune-db:GetQueryStatus IAM action in that cluster. Note that the neptune-db:QueryLanguage:OpenCypher IAM condition key can be used in the policy document to restrict the use of openCypher queries (see Condition keys available in Neptune IAM data-access policy statements).
|
188
188
|
*/
|
189
189
|
getOpenCypherQueryStatus(params: Neptunedata.Types.GetOpenCypherQueryStatusInput, callback?: (err: AWSError, data: Neptunedata.Types.GetOpenCypherQueryStatusOutput) => void): Request<Neptunedata.Types.GetOpenCypherQueryStatusOutput, AWSError>;
|
190
190
|
/**
|
191
|
-
* Retrieves the status of a specified openCypher query. When invoking this operation in a Neptune cluster that has IAM authentication enabled, the IAM user or role making the request must have a policy attached that allows the neptune-db:GetQueryStatus IAM action in that cluster. Note that the neptune-db:QueryLanguage:
|
191
|
+
* Retrieves the status of a specified openCypher query. When invoking this operation in a Neptune cluster that has IAM authentication enabled, the IAM user or role making the request must have a policy attached that allows the neptune-db:GetQueryStatus IAM action in that cluster. Note that the neptune-db:QueryLanguage:OpenCypher IAM condition key can be used in the policy document to restrict the use of openCypher queries (see Condition keys available in Neptune IAM data-access policy statements).
|
192
192
|
*/
|
193
193
|
getOpenCypherQueryStatus(callback?: (err: AWSError, data: Neptunedata.Types.GetOpenCypherQueryStatusOutput) => void): Request<Neptunedata.Types.GetOpenCypherQueryStatusOutput, AWSError>;
|
194
194
|
/**
|
@@ -196,11 +196,11 @@ declare class Neptunedata extends Service {
|
|
196
196
|
*/
|
197
197
|
getPropertygraphStatistics(callback?: (err: AWSError, data: Neptunedata.Types.GetPropertygraphStatisticsOutput) => void): Request<Neptunedata.Types.GetPropertygraphStatisticsOutput, AWSError>;
|
198
198
|
/**
|
199
|
-
* Gets a stream for a property graph. With the Neptune Streams feature, you can generate a complete sequence of change-log entries that record every change made to your graph data as it happens. GetPropertygraphStream lets you collect these change-log entries for a property graph. The Neptune streams feature needs to be enabled on your Neptune DBcluster. To enable streams, set the neptune_streams DB cluster parameter to 1. See Capturing graph changes in real time using Neptune streams. When invoking this operation in a Neptune cluster that has IAM authentication enabled, the IAM user or role making the request must have a policy attached that allows the neptune-db:GetStreamRecords IAM action in that cluster. When invoking this operation in a Neptune cluster that has IAM authentication enabled, the IAM user or role making the request must have a policy attached that enables one of the following IAM actions, depending on the query: Note that you can restrict property-graph queries using the following IAM context keys: neptune-db:QueryLanguage:Gremlin neptune-db:QueryLanguage:
|
199
|
+
* Gets a stream for a property graph. With the Neptune Streams feature, you can generate a complete sequence of change-log entries that record every change made to your graph data as it happens. GetPropertygraphStream lets you collect these change-log entries for a property graph. The Neptune streams feature needs to be enabled on your Neptune DBcluster. To enable streams, set the neptune_streams DB cluster parameter to 1. See Capturing graph changes in real time using Neptune streams. When invoking this operation in a Neptune cluster that has IAM authentication enabled, the IAM user or role making the request must have a policy attached that allows the neptune-db:GetStreamRecords IAM action in that cluster. When invoking this operation in a Neptune cluster that has IAM authentication enabled, the IAM user or role making the request must have a policy attached that enables one of the following IAM actions, depending on the query: Note that you can restrict property-graph queries using the following IAM context keys: neptune-db:QueryLanguage:Gremlin neptune-db:QueryLanguage:OpenCypher See Condition keys available in Neptune IAM data-access policy statements).
|
200
200
|
*/
|
201
201
|
getPropertygraphStream(params: Neptunedata.Types.GetPropertygraphStreamInput, callback?: (err: AWSError, data: Neptunedata.Types.GetPropertygraphStreamOutput) => void): Request<Neptunedata.Types.GetPropertygraphStreamOutput, AWSError>;
|
202
202
|
/**
|
203
|
-
* Gets a stream for a property graph. With the Neptune Streams feature, you can generate a complete sequence of change-log entries that record every change made to your graph data as it happens. GetPropertygraphStream lets you collect these change-log entries for a property graph. The Neptune streams feature needs to be enabled on your Neptune DBcluster. To enable streams, set the neptune_streams DB cluster parameter to 1. See Capturing graph changes in real time using Neptune streams. When invoking this operation in a Neptune cluster that has IAM authentication enabled, the IAM user or role making the request must have a policy attached that allows the neptune-db:GetStreamRecords IAM action in that cluster. When invoking this operation in a Neptune cluster that has IAM authentication enabled, the IAM user or role making the request must have a policy attached that enables one of the following IAM actions, depending on the query: Note that you can restrict property-graph queries using the following IAM context keys: neptune-db:QueryLanguage:Gremlin neptune-db:QueryLanguage:
|
203
|
+
* Gets a stream for a property graph. With the Neptune Streams feature, you can generate a complete sequence of change-log entries that record every change made to your graph data as it happens. GetPropertygraphStream lets you collect these change-log entries for a property graph. The Neptune streams feature needs to be enabled on your Neptune DBcluster. To enable streams, set the neptune_streams DB cluster parameter to 1. See Capturing graph changes in real time using Neptune streams. When invoking this operation in a Neptune cluster that has IAM authentication enabled, the IAM user or role making the request must have a policy attached that allows the neptune-db:GetStreamRecords IAM action in that cluster. When invoking this operation in a Neptune cluster that has IAM authentication enabled, the IAM user or role making the request must have a policy attached that enables one of the following IAM actions, depending on the query: Note that you can restrict property-graph queries using the following IAM context keys: neptune-db:QueryLanguage:Gremlin neptune-db:QueryLanguage:OpenCypher See Condition keys available in Neptune IAM data-access policy statements).
|
204
204
|
*/
|
205
205
|
getPropertygraphStream(callback?: (err: AWSError, data: Neptunedata.Types.GetPropertygraphStreamOutput) => void): Request<Neptunedata.Types.GetPropertygraphStreamOutput, AWSError>;
|
206
206
|
/**
|
@@ -280,11 +280,11 @@ declare class Neptunedata extends Service {
|
|
280
280
|
*/
|
281
281
|
listMLModelTransformJobs(callback?: (err: AWSError, data: Neptunedata.Types.ListMLModelTransformJobsOutput) => void): Request<Neptunedata.Types.ListMLModelTransformJobsOutput, AWSError>;
|
282
282
|
/**
|
283
|
-
* Lists active openCypher queries. See Neptune openCypher status endpoint for more information. When invoking this operation in a Neptune cluster that has IAM authentication enabled, the IAM user or role making the request must have a policy attached that allows the neptune-db:GetQueryStatus IAM action in that cluster. Note that the neptune-db:QueryLanguage:
|
283
|
+
* Lists active openCypher queries. See Neptune openCypher status endpoint for more information. When invoking this operation in a Neptune cluster that has IAM authentication enabled, the IAM user or role making the request must have a policy attached that allows the neptune-db:GetQueryStatus IAM action in that cluster. Note that the neptune-db:QueryLanguage:OpenCypher IAM condition key can be used in the policy document to restrict the use of openCypher queries (see Condition keys available in Neptune IAM data-access policy statements).
|
284
284
|
*/
|
285
285
|
listOpenCypherQueries(params: Neptunedata.Types.ListOpenCypherQueriesInput, callback?: (err: AWSError, data: Neptunedata.Types.ListOpenCypherQueriesOutput) => void): Request<Neptunedata.Types.ListOpenCypherQueriesOutput, AWSError>;
|
286
286
|
/**
|
287
|
-
* Lists active openCypher queries. See Neptune openCypher status endpoint for more information. When invoking this operation in a Neptune cluster that has IAM authentication enabled, the IAM user or role making the request must have a policy attached that allows the neptune-db:GetQueryStatus IAM action in that cluster. Note that the neptune-db:QueryLanguage:
|
287
|
+
* Lists active openCypher queries. See Neptune openCypher status endpoint for more information. When invoking this operation in a Neptune cluster that has IAM authentication enabled, the IAM user or role making the request must have a policy attached that allows the neptune-db:GetQueryStatus IAM action in that cluster. Note that the neptune-db:QueryLanguage:OpenCypher IAM condition key can be used in the policy document to restrict the use of openCypher queries (see Condition keys available in Neptune IAM data-access policy statements).
|
288
288
|
*/
|
289
289
|
listOpenCypherQueries(callback?: (err: AWSError, data: Neptunedata.Types.ListOpenCypherQueriesOutput) => void): Request<Neptunedata.Types.ListOpenCypherQueriesOutput, AWSError>;
|
290
290
|
/**
|
@@ -1643,7 +1643,7 @@ declare namespace Neptunedata {
|
|
1643
1643
|
*/
|
1644
1644
|
updateSingleCardinalityProperties?: Boolean;
|
1645
1645
|
/**
|
1646
|
-
* This is an optional flag parameter that indicates whether the load request can be queued up or not. You don't have to wait for one load job to complete before issuing the next one, because Neptune can queue up as many as 64 jobs at a time, provided that their queueRequest parameters are all set to "TRUE". If the queueRequest parameter is omitted or set to "FALSE", the load request will fail if another load job is already running. Allowed values: "TRUE", "FALSE". Default value: "FALSE".
|
1646
|
+
* This is an optional flag parameter that indicates whether the load request can be queued up or not. You don't have to wait for one load job to complete before issuing the next one, because Neptune can queue up as many as 64 jobs at a time, provided that their queueRequest parameters are all set to "TRUE". The queue order of the jobs will be first-in-first-out (FIFO). If the queueRequest parameter is omitted or set to "FALSE", the load request will fail if another load job is already running. Allowed values: "TRUE", "FALSE". Default value: "FALSE".
|
1647
1647
|
*/
|
1648
1648
|
queueRequest?: Boolean;
|
1649
1649
|
/**
|
@@ -245,6 +245,14 @@ declare class RDS extends Service {
|
|
245
245
|
* Creates an Aurora global database spread across multiple Amazon Web Services Regions. The global database contains a single primary cluster with read-write capability, and a read-only secondary cluster that receives data from the primary cluster through high-speed replication performed by the Aurora storage subsystem. You can create a global database that is initially empty, and then create the primary and secondary DB clusters in the global database. Or you can specify an existing Aurora cluster during the create operation, and this cluster becomes the primary cluster of the global database. This operation applies only to Aurora DB clusters.
|
246
246
|
*/
|
247
247
|
createGlobalCluster(callback?: (err: AWSError, data: RDS.Types.CreateGlobalClusterResult) => void): Request<RDS.Types.CreateGlobalClusterResult, AWSError>;
|
248
|
+
/**
|
249
|
+
* Creates a zero-ETL integration with Amazon Redshift. For more information, see Working with Amazon Aurora zero-ETL integrations with Amazon Redshift in the Amazon Aurora User Guide.
|
250
|
+
*/
|
251
|
+
createIntegration(params: RDS.Types.CreateIntegrationMessage, callback?: (err: AWSError, data: RDS.Types.Integration) => void): Request<RDS.Types.Integration, AWSError>;
|
252
|
+
/**
|
253
|
+
* Creates a zero-ETL integration with Amazon Redshift. For more information, see Working with Amazon Aurora zero-ETL integrations with Amazon Redshift in the Amazon Aurora User Guide.
|
254
|
+
*/
|
255
|
+
createIntegration(callback?: (err: AWSError, data: RDS.Types.Integration) => void): Request<RDS.Types.Integration, AWSError>;
|
248
256
|
/**
|
249
257
|
* Creates a new option group. You can create up to 20 option groups. This command doesn't apply to RDS Custom.
|
250
258
|
*/
|
@@ -389,6 +397,14 @@ declare class RDS extends Service {
|
|
389
397
|
* Deletes a global database cluster. The primary and secondary clusters must already be detached or destroyed first. This action only applies to Aurora DB clusters.
|
390
398
|
*/
|
391
399
|
deleteGlobalCluster(callback?: (err: AWSError, data: RDS.Types.DeleteGlobalClusterResult) => void): Request<RDS.Types.DeleteGlobalClusterResult, AWSError>;
|
400
|
+
/**
|
401
|
+
* Deletes a zero-ETL integration with Amazon Redshift. For more information, see Deleting Amazon Aurora zero-ETL integrations with Amazon Redshift in the Amazon Aurora User Guide
|
402
|
+
*/
|
403
|
+
deleteIntegration(params: RDS.Types.DeleteIntegrationMessage, callback?: (err: AWSError, data: RDS.Types.Integration) => void): Request<RDS.Types.Integration, AWSError>;
|
404
|
+
/**
|
405
|
+
* Deletes a zero-ETL integration with Amazon Redshift. For more information, see Deleting Amazon Aurora zero-ETL integrations with Amazon Redshift in the Amazon Aurora User Guide
|
406
|
+
*/
|
407
|
+
deleteIntegration(callback?: (err: AWSError, data: RDS.Types.Integration) => void): Request<RDS.Types.Integration, AWSError>;
|
392
408
|
/**
|
393
409
|
* Deletes an existing option group.
|
394
410
|
*/
|
@@ -661,6 +677,14 @@ declare class RDS extends Service {
|
|
661
677
|
* Returns information about Aurora global database clusters. This API supports pagination. For more information on Amazon Aurora, see What is Amazon Aurora? in the Amazon Aurora User Guide. This action only applies to Aurora DB clusters.
|
662
678
|
*/
|
663
679
|
describeGlobalClusters(callback?: (err: AWSError, data: RDS.Types.GlobalClustersMessage) => void): Request<RDS.Types.GlobalClustersMessage, AWSError>;
|
680
|
+
/**
|
681
|
+
* Describe one or more zero-ETL integration with Amazon Redshift. For more information, see Viewing and monitoring Amazon Aurora zero-ETL integrations with Amazon Redshift in the Amazon Aurora User Guide
|
682
|
+
*/
|
683
|
+
describeIntegrations(params: RDS.Types.DescribeIntegrationsMessage, callback?: (err: AWSError, data: RDS.Types.DescribeIntegrationsResponse) => void): Request<RDS.Types.DescribeIntegrationsResponse, AWSError>;
|
684
|
+
/**
|
685
|
+
* Describe one or more zero-ETL integration with Amazon Redshift. For more information, see Viewing and monitoring Amazon Aurora zero-ETL integrations with Amazon Redshift in the Amazon Aurora User Guide
|
686
|
+
*/
|
687
|
+
describeIntegrations(callback?: (err: AWSError, data: RDS.Types.DescribeIntegrationsResponse) => void): Request<RDS.Types.DescribeIntegrationsResponse, AWSError>;
|
664
688
|
/**
|
665
689
|
* Describes all available options.
|
666
690
|
*/
|
@@ -1328,6 +1352,7 @@ declare namespace RDS {
|
|
1328
1352
|
export interface ApplyPendingMaintenanceActionResult {
|
1329
1353
|
ResourcePendingMaintenanceActions?: ResourcePendingMaintenanceActions;
|
1330
1354
|
}
|
1355
|
+
export type Arn = string;
|
1331
1356
|
export type AttributeValueList = String[];
|
1332
1357
|
export type AuditPolicyState = "locked"|"unlocked"|string;
|
1333
1358
|
export type AuthScheme = "SECRETS"|string;
|
@@ -2749,6 +2774,29 @@ declare namespace RDS {
|
|
2749
2774
|
export interface CreateGlobalClusterResult {
|
2750
2775
|
GlobalCluster?: GlobalCluster;
|
2751
2776
|
}
|
2777
|
+
export interface CreateIntegrationMessage {
|
2778
|
+
/**
|
2779
|
+
* The Amazon Resource Name (ARN) of the Aurora DB cluster to use as the source for replication.
|
2780
|
+
*/
|
2781
|
+
SourceArn: SourceArn;
|
2782
|
+
/**
|
2783
|
+
* The ARN of the Redshift data warehouse to use as the target for replication.
|
2784
|
+
*/
|
2785
|
+
TargetArn: Arn;
|
2786
|
+
/**
|
2787
|
+
* The name of the integration.
|
2788
|
+
*/
|
2789
|
+
IntegrationName: IntegrationName;
|
2790
|
+
/**
|
2791
|
+
* The Amazon Web Services Key Management System (Amazon Web Services KMS) key identifier for the key to use to encrypt the integration. If you don't specify an encryption key, Aurora uses a default Amazon Web Services owned key.
|
2792
|
+
*/
|
2793
|
+
KMSKeyId?: String;
|
2794
|
+
/**
|
2795
|
+
* An optional set of non-secret key–value pairs that contains additional contextual information about the data. For more information, see Encryption context in the Amazon Web Services Key Management Service Developer Guide. You can only include this parameter if you specify the KMSKeyId parameter.
|
2796
|
+
*/
|
2797
|
+
AdditionalEncryptionContext?: EncryptionContextMap;
|
2798
|
+
Tags?: TagList;
|
2799
|
+
}
|
2752
2800
|
export interface CreateOptionGroupMessage {
|
2753
2801
|
/**
|
2754
2802
|
* Specifies the name of the option group to be created. Constraints: Must be 1 to 255 letters, numbers, or hyphens First character must be a letter Can't end with a hyphen or contain two consecutive hyphens Example: myoptiongroup
|
@@ -3654,6 +3702,10 @@ declare namespace RDS {
|
|
3654
3702
|
* Indicates whether the DB engine version supports forwarding write operations from reader DB instances to the writer DB instance in the DB cluster. By default, write operations aren't allowed on reader DB instances. Valid for: Aurora DB clusters only
|
3655
3703
|
*/
|
3656
3704
|
SupportsLocalWriteForwarding?: BooleanOptional;
|
3705
|
+
/**
|
3706
|
+
* Indicates whether the DB engine version supports Aurora zero-ETL integrations with Amazon Redshift.
|
3707
|
+
*/
|
3708
|
+
SupportsIntegrations?: Boolean;
|
3657
3709
|
}
|
3658
3710
|
export type DBEngineVersionList = DBEngineVersion[];
|
3659
3711
|
export interface DBEngineVersionMessage {
|
@@ -4868,6 +4920,12 @@ declare namespace RDS {
|
|
4868
4920
|
export interface DeleteGlobalClusterResult {
|
4869
4921
|
GlobalCluster?: GlobalCluster;
|
4870
4922
|
}
|
4923
|
+
export interface DeleteIntegrationMessage {
|
4924
|
+
/**
|
4925
|
+
* The unique identifier of the integration.
|
4926
|
+
*/
|
4927
|
+
IntegrationIdentifier: IntegrationIdentifier;
|
4928
|
+
}
|
4871
4929
|
export interface DeleteOptionGroupMessage {
|
4872
4930
|
/**
|
4873
4931
|
* The name of the option group to be deleted. You can't delete default option groups.
|
@@ -5657,6 +5715,34 @@ declare namespace RDS {
|
|
5657
5715
|
*/
|
5658
5716
|
Marker?: String;
|
5659
5717
|
}
|
5718
|
+
export interface DescribeIntegrationsMessage {
|
5719
|
+
/**
|
5720
|
+
* The unique identifier of the integration.
|
5721
|
+
*/
|
5722
|
+
IntegrationIdentifier?: IntegrationIdentifier;
|
5723
|
+
/**
|
5724
|
+
* A filter that specifies one or more resources to return.
|
5725
|
+
*/
|
5726
|
+
Filters?: FilterList;
|
5727
|
+
/**
|
5728
|
+
* The maximum number of records to include in the response. If more records exist than the specified MaxRecords value, a pagination token called a marker is included in the response so that you can retrieve the remaining results. Default: 100 Constraints: Minimum 20, maximum 100.
|
5729
|
+
*/
|
5730
|
+
MaxRecords?: IntegerOptional;
|
5731
|
+
/**
|
5732
|
+
* An optional pagination token provided by a previous DescribeIntegrations request. If this parameter is specified, the response includes only records beyond the marker, up to the value specified by MaxRecords.
|
5733
|
+
*/
|
5734
|
+
Marker?: Marker;
|
5735
|
+
}
|
5736
|
+
export interface DescribeIntegrationsResponse {
|
5737
|
+
/**
|
5738
|
+
* A pagination token that can be used in a later DescribeIntegrations request.
|
5739
|
+
*/
|
5740
|
+
Marker?: Marker;
|
5741
|
+
/**
|
5742
|
+
* A list of integrations.
|
5743
|
+
*/
|
5744
|
+
Integrations?: IntegrationList;
|
5745
|
+
}
|
5660
5746
|
export interface DescribeOptionGroupOptionsMessage {
|
5661
5747
|
/**
|
5662
5748
|
* A required parameter. Options available for the given engine name are described. Valid Values: mariadb mysql oracle-ee oracle-ee-cdb oracle-se2 oracle-se2-cdb postgres sqlserver-ee sqlserver-se sqlserver-ex sqlserver-web
|
@@ -5968,6 +6054,7 @@ declare namespace RDS {
|
|
5968
6054
|
EC2SecurityGroupOwnerId?: String;
|
5969
6055
|
}
|
5970
6056
|
export type EC2SecurityGroupList = EC2SecurityGroup[];
|
6057
|
+
export type EncryptionContextMap = {[key: string]: String};
|
5971
6058
|
export interface Endpoint {
|
5972
6059
|
/**
|
5973
6060
|
* Specifies the DNS address of the DB instance.
|
@@ -6346,6 +6433,61 @@ declare namespace RDS {
|
|
6346
6433
|
export type IPRangeList = IPRange[];
|
6347
6434
|
export type Integer = number;
|
6348
6435
|
export type IntegerOptional = number;
|
6436
|
+
export interface Integration {
|
6437
|
+
/**
|
6438
|
+
* The Amazon Resource Name (ARN) of the Aurora DB cluster used as the source for replication.
|
6439
|
+
*/
|
6440
|
+
SourceArn?: SourceArn;
|
6441
|
+
/**
|
6442
|
+
* The ARN of the Redshift data warehouse used as the target for replication.
|
6443
|
+
*/
|
6444
|
+
TargetArn?: Arn;
|
6445
|
+
/**
|
6446
|
+
* The name of the integration.
|
6447
|
+
*/
|
6448
|
+
IntegrationName?: IntegrationName;
|
6449
|
+
/**
|
6450
|
+
* The ARN of the integration.
|
6451
|
+
*/
|
6452
|
+
IntegrationArn?: IntegrationArn;
|
6453
|
+
/**
|
6454
|
+
* The Amazon Web Services Key Management System (Amazon Web Services KMS) key identifier for the key used to to encrypt the integration.
|
6455
|
+
*/
|
6456
|
+
KMSKeyId?: String;
|
6457
|
+
/**
|
6458
|
+
* The encryption context for the integration. For more information, see Encryption context in the Amazon Web Services Key Management Service Developer Guide.
|
6459
|
+
*/
|
6460
|
+
AdditionalEncryptionContext?: EncryptionContextMap;
|
6461
|
+
/**
|
6462
|
+
* The current status of the integration.
|
6463
|
+
*/
|
6464
|
+
Status?: IntegrationStatus;
|
6465
|
+
Tags?: TagList;
|
6466
|
+
/**
|
6467
|
+
* The time when the integration was created, in Universal Coordinated Time (UTC).
|
6468
|
+
*/
|
6469
|
+
CreateTime?: TStamp;
|
6470
|
+
/**
|
6471
|
+
* Any errors associated with the integration.
|
6472
|
+
*/
|
6473
|
+
Errors?: IntegrationErrorList;
|
6474
|
+
}
|
6475
|
+
export type IntegrationArn = string;
|
6476
|
+
export interface IntegrationError {
|
6477
|
+
/**
|
6478
|
+
* The error code associated with the integration.
|
6479
|
+
*/
|
6480
|
+
ErrorCode: String;
|
6481
|
+
/**
|
6482
|
+
* A message explaining the error.
|
6483
|
+
*/
|
6484
|
+
ErrorMessage?: String;
|
6485
|
+
}
|
6486
|
+
export type IntegrationErrorList = IntegrationError[];
|
6487
|
+
export type IntegrationIdentifier = string;
|
6488
|
+
export type IntegrationList = Integration[];
|
6489
|
+
export type IntegrationName = string;
|
6490
|
+
export type IntegrationStatus = "creating"|"active"|"modifying"|"failed"|"deleting"|"syncing"|"needs_attention"|string;
|
6349
6491
|
export type KeyList = String[];
|
6350
6492
|
export type KmsKeyIdOrArn = string;
|
6351
6493
|
export interface ListTagsForResourceMessage {
|
@@ -6362,6 +6504,7 @@ declare namespace RDS {
|
|
6362
6504
|
export type LogTypeList = String[];
|
6363
6505
|
export type Long = number;
|
6364
6506
|
export type LongOptional = number;
|
6507
|
+
export type Marker = string;
|
6365
6508
|
export interface MasterUserSecret {
|
6366
6509
|
/**
|
6367
6510
|
* The Amazon Resource Name (ARN) of the secret.
|
@@ -9179,6 +9322,7 @@ declare namespace RDS {
|
|
9179
9322
|
*/
|
9180
9323
|
MaxCapacity?: DoubleOptional;
|
9181
9324
|
}
|
9325
|
+
export type SourceArn = string;
|
9182
9326
|
export type SourceIdsList = String[];
|
9183
9327
|
export interface SourceRegion {
|
9184
9328
|
/**
|
@@ -9545,6 +9689,10 @@ declare namespace RDS {
|
|
9545
9689
|
* Indicates whether the target engine version supports forwarding write operations from reader DB instances to the writer DB instance in the DB cluster. By default, write operations aren't allowed on reader DB instances. Valid for: Aurora DB clusters only
|
9546
9690
|
*/
|
9547
9691
|
SupportsLocalWriteForwarding?: BooleanOptional;
|
9692
|
+
/**
|
9693
|
+
* Indicates whether the DB engine version supports Aurora zero-ETL integrations with Amazon Redshift.
|
9694
|
+
*/
|
9695
|
+
SupportsIntegrations?: BooleanOptional;
|
9548
9696
|
}
|
9549
9697
|
export interface UserAuthConfig {
|
9550
9698
|
/**
|