cdk-lambda-subminute 2.0.355 → 2.0.357
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.jsii +3 -3
- package/lib/cdk-lambda-subminute.js +3 -3
- package/node_modules/aws-sdk/README.md +1 -1
- package/node_modules/aws-sdk/apis/athena-2017-05-18.min.json +1 -1
- package/node_modules/aws-sdk/apis/b2bi-2022-06-23.examples.json +709 -0
- package/node_modules/aws-sdk/apis/cloudtrail-2013-11-01.min.json +53 -0
- package/node_modules/aws-sdk/apis/cloudtrail-2013-11-01.paginators.json +5 -0
- package/node_modules/aws-sdk/apis/codebuild-2016-10-06.min.json +335 -119
- package/node_modules/aws-sdk/apis/codebuild-2016-10-06.paginators.json +5 -0
- package/node_modules/aws-sdk/apis/dynamodb-2012-08-10.min.json +83 -36
- package/node_modules/aws-sdk/apis/firehose-2015-08-04.min.json +258 -30
- package/node_modules/aws-sdk/clients/athena.d.ts +9 -5
- package/node_modules/aws-sdk/clients/b2bi.d.ts +2 -2
- package/node_modules/aws-sdk/clients/cloudtrail.d.ts +101 -11
- package/node_modules/aws-sdk/clients/codebuild.d.ts +287 -2
- package/node_modules/aws-sdk/clients/connect.d.ts +5 -5
- package/node_modules/aws-sdk/clients/dynamodb.d.ts +68 -3
- package/node_modules/aws-sdk/clients/firehose.d.ts +231 -0
- package/node_modules/aws-sdk/clients/sagemakerfeaturestoreruntime.d.ts +2 -2
- package/node_modules/aws-sdk/dist/aws-sdk-core-react-native.js +1 -1
- package/node_modules/aws-sdk/dist/aws-sdk-react-native.js +8 -8
- package/node_modules/aws-sdk/dist/aws-sdk.js +743 -189
- package/node_modules/aws-sdk/dist/aws-sdk.min.js +104 -103
- package/node_modules/aws-sdk/lib/core.js +1 -1
- package/node_modules/aws-sdk/lib/dynamodb/document_client.d.ts +60 -3
- package/node_modules/aws-sdk/package.json +1 -1
- package/package.json +3 -3
@@ -414,6 +414,14 @@ declare class DynamoDB extends DynamoDBCustomizations {
|
|
414
414
|
* Edits an existing item's attributes, or adds a new item to the table if it does not already exist. You can put, delete, or add attribute values. You can also perform a conditional update on an existing item (insert a new attribute name-value pair if it doesn't exist, or replace an existing name-value pair if it has certain expected attribute values). You can also return the item's attribute values in the same UpdateItem operation using the ReturnValues parameter.
|
415
415
|
*/
|
416
416
|
updateItem(callback?: (err: AWSError, data: DynamoDB.Types.UpdateItemOutput) => void): Request<DynamoDB.Types.UpdateItemOutput, AWSError>;
|
417
|
+
/**
|
418
|
+
* The command to update the Kinesis stream destination.
|
419
|
+
*/
|
420
|
+
updateKinesisStreamingDestination(params: DynamoDB.Types.UpdateKinesisStreamingDestinationInput, callback?: (err: AWSError, data: DynamoDB.Types.UpdateKinesisStreamingDestinationOutput) => void): Request<DynamoDB.Types.UpdateKinesisStreamingDestinationOutput, AWSError>;
|
421
|
+
/**
|
422
|
+
* The command to update the Kinesis stream destination.
|
423
|
+
*/
|
424
|
+
updateKinesisStreamingDestination(callback?: (err: AWSError, data: DynamoDB.Types.UpdateKinesisStreamingDestinationOutput) => void): Request<DynamoDB.Types.UpdateKinesisStreamingDestinationOutput, AWSError>;
|
417
425
|
/**
|
418
426
|
* Modifies the provisioned throughput settings, global secondary indexes, or DynamoDB Streams settings for a given table. This operation only applies to Version 2019.11.21 (Current) of global tables. You can only perform one of the following operations at once: Modify the provisioned throughput settings of the table. Remove a global secondary index from the table. Create a new global secondary index on the table. After the index begins backfilling, you can use UpdateTable to perform other operations. UpdateTable is an asynchronous operation; while it is executing, the table status changes from ACTIVE to UPDATING. While it is UPDATING, you cannot issue another UpdateTable request. When the table returns to the ACTIVE state, the UpdateTable operation is complete.
|
419
427
|
*/
|
@@ -460,6 +468,7 @@ declare namespace DynamoDB {
|
|
460
468
|
export import Converter = converter;
|
461
469
|
}
|
462
470
|
declare namespace DynamoDB {
|
471
|
+
export type ApproximateCreationDateTimePrecision = "MILLISECOND"|"MICROSECOND"|string;
|
463
472
|
export type ArchivalReason = string;
|
464
473
|
export interface ArchivalSummary {
|
465
474
|
/**
|
@@ -1463,8 +1472,14 @@ declare namespace DynamoDB {
|
|
1463
1472
|
*/
|
1464
1473
|
TimeToLiveDescription?: TimeToLiveDescription;
|
1465
1474
|
}
|
1466
|
-
export type DestinationStatus = "ENABLING"|"ACTIVE"|"DISABLING"|"DISABLED"|"ENABLE_FAILED"|string;
|
1475
|
+
export type DestinationStatus = "ENABLING"|"ACTIVE"|"DISABLING"|"DISABLED"|"ENABLE_FAILED"|"UPDATING"|string;
|
1467
1476
|
export type DoubleObject = number;
|
1477
|
+
export interface EnableKinesisStreamingConfiguration {
|
1478
|
+
/**
|
1479
|
+
* Toggle for the precision of Kinesis data stream timestamp. The values are either MILLISECOND or MICROSECOND.
|
1480
|
+
*/
|
1481
|
+
ApproximateCreationDateTimePrecision?: ApproximateCreationDateTimePrecision;
|
1482
|
+
}
|
1468
1483
|
export interface Endpoint {
|
1469
1484
|
/**
|
1470
1485
|
* IP address of the endpoint.
|
@@ -2206,6 +2221,10 @@ declare namespace DynamoDB {
|
|
2206
2221
|
* The human-readable string that corresponds to the replica status.
|
2207
2222
|
*/
|
2208
2223
|
DestinationStatusDescription?: String;
|
2224
|
+
/**
|
2225
|
+
* The precision of the Kinesis data stream timestamp. The values are either MILLISECOND or MICROSECOND.
|
2226
|
+
*/
|
2227
|
+
ApproximateCreationDateTimePrecision?: ApproximateCreationDateTimePrecision;
|
2209
2228
|
}
|
2210
2229
|
export type KinesisDataStreamDestinations = KinesisDataStreamDestination[];
|
2211
2230
|
export interface KinesisStreamingDestinationInput {
|
@@ -2217,6 +2236,10 @@ declare namespace DynamoDB {
|
|
2217
2236
|
* The ARN for a Kinesis data stream.
|
2218
2237
|
*/
|
2219
2238
|
StreamArn: StreamArn;
|
2239
|
+
/**
|
2240
|
+
* The source for the Kinesis streaming information that is being enabled.
|
2241
|
+
*/
|
2242
|
+
EnableKinesisStreamingConfiguration?: EnableKinesisStreamingConfiguration;
|
2220
2243
|
}
|
2221
2244
|
export interface KinesisStreamingDestinationOutput {
|
2222
2245
|
/**
|
@@ -2231,6 +2254,10 @@ declare namespace DynamoDB {
|
|
2231
2254
|
* The current status of the replication.
|
2232
2255
|
*/
|
2233
2256
|
DestinationStatus?: DestinationStatus;
|
2257
|
+
/**
|
2258
|
+
* The destination for the Kinesis streaming information that is being enabled.
|
2259
|
+
*/
|
2260
|
+
EnableKinesisStreamingConfiguration?: EnableKinesisStreamingConfiguration;
|
2234
2261
|
}
|
2235
2262
|
export type LastUpdateDateTime = Date;
|
2236
2263
|
export type ListAttributeValue = AttributeValue[];
|
@@ -2479,7 +2506,7 @@ declare namespace DynamoDB {
|
|
2479
2506
|
export type NumberSetAttributeValue = NumberAttributeValue[];
|
2480
2507
|
export interface ParameterizedStatement {
|
2481
2508
|
/**
|
2482
|
-
* A PartiQL
|
2509
|
+
* A PartiQL statement that uses parameters.
|
2483
2510
|
*/
|
2484
2511
|
Statement: PartiQLStatement;
|
2485
2512
|
/**
|
@@ -3805,6 +3832,44 @@ declare namespace DynamoDB {
|
|
3805
3832
|
*/
|
3806
3833
|
ItemCollectionMetrics?: ItemCollectionMetrics;
|
3807
3834
|
}
|
3835
|
+
export interface UpdateKinesisStreamingConfiguration {
|
3836
|
+
/**
|
3837
|
+
* Enables updating the precision of Kinesis data stream timestamp.
|
3838
|
+
*/
|
3839
|
+
ApproximateCreationDateTimePrecision?: ApproximateCreationDateTimePrecision;
|
3840
|
+
}
|
3841
|
+
export interface UpdateKinesisStreamingDestinationInput {
|
3842
|
+
/**
|
3843
|
+
* The table name for the Kinesis streaming destination input.
|
3844
|
+
*/
|
3845
|
+
TableName: TableName;
|
3846
|
+
/**
|
3847
|
+
* The ARN for the Kinesis stream input.
|
3848
|
+
*/
|
3849
|
+
StreamArn: StreamArn;
|
3850
|
+
/**
|
3851
|
+
* The command to update the Kinesis stream configuration.
|
3852
|
+
*/
|
3853
|
+
UpdateKinesisStreamingConfiguration?: UpdateKinesisStreamingConfiguration;
|
3854
|
+
}
|
3855
|
+
export interface UpdateKinesisStreamingDestinationOutput {
|
3856
|
+
/**
|
3857
|
+
* The table name for the Kinesis streaming destination output.
|
3858
|
+
*/
|
3859
|
+
TableName?: TableName;
|
3860
|
+
/**
|
3861
|
+
* The ARN for the Kinesis stream input.
|
3862
|
+
*/
|
3863
|
+
StreamArn?: StreamArn;
|
3864
|
+
/**
|
3865
|
+
* The status of the attempt to update the Kinesis streaming destination output.
|
3866
|
+
*/
|
3867
|
+
DestinationStatus?: DestinationStatus;
|
3868
|
+
/**
|
3869
|
+
* The command to update the Kinesis streaming destination configuration.
|
3870
|
+
*/
|
3871
|
+
UpdateKinesisStreamingConfiguration?: UpdateKinesisStreamingConfiguration;
|
3872
|
+
}
|
3808
3873
|
export interface UpdateReplicationGroupMemberAction {
|
3809
3874
|
/**
|
3810
3875
|
* The Region where the replica exists.
|
@@ -3849,7 +3914,7 @@ declare namespace DynamoDB {
|
|
3849
3914
|
*/
|
3850
3915
|
GlobalSecondaryIndexUpdates?: GlobalSecondaryIndexUpdateList;
|
3851
3916
|
/**
|
3852
|
-
* Represents the DynamoDB Streams configuration for the table. You receive a
|
3917
|
+
* Represents the DynamoDB Streams configuration for the table. You receive a ResourceInUseException if you try to enable a stream on a table that already has a stream, or if you try to disable a stream on a table that doesn't have a stream.
|
3853
3918
|
*/
|
3854
3919
|
StreamSpecification?: StreamSpecification;
|
3855
3920
|
/**
|
@@ -486,6 +486,10 @@ declare namespace Firehose {
|
|
486
486
|
*/
|
487
487
|
AmazonOpenSearchServerlessDestinationConfiguration?: AmazonOpenSearchServerlessDestinationConfiguration;
|
488
488
|
MSKSourceConfiguration?: MSKSourceConfiguration;
|
489
|
+
/**
|
490
|
+
* Configure Snowflake destination
|
491
|
+
*/
|
492
|
+
SnowflakeDestinationConfiguration?: SnowflakeDestinationConfiguration;
|
489
493
|
}
|
490
494
|
export interface CreateDeliveryStreamOutput {
|
491
495
|
/**
|
@@ -678,6 +682,10 @@ declare namespace Firehose {
|
|
678
682
|
* Describes the specified HTTP endpoint destination.
|
679
683
|
*/
|
680
684
|
HttpEndpointDestinationDescription?: HttpEndpointDestinationDescription;
|
685
|
+
/**
|
686
|
+
* Optional description for the destination
|
687
|
+
*/
|
688
|
+
SnowflakeDestinationDescription?: SnowflakeDestinationDescription;
|
681
689
|
/**
|
682
690
|
* The destination in the Serverless offering for Amazon OpenSearch Service.
|
683
691
|
*/
|
@@ -1866,6 +1874,225 @@ declare namespace Firehose {
|
|
1866
1874
|
OrcSerDe?: OrcSerDe;
|
1867
1875
|
}
|
1868
1876
|
export type SizeInMBs = number;
|
1877
|
+
export type SnowflakeAccountUrl = string;
|
1878
|
+
export type SnowflakeContentColumnName = string;
|
1879
|
+
export type SnowflakeDataLoadingOption = "JSON_MAPPING"|"VARIANT_CONTENT_MAPPING"|"VARIANT_CONTENT_AND_METADATA_MAPPING"|string;
|
1880
|
+
export type SnowflakeDatabase = string;
|
1881
|
+
export interface SnowflakeDestinationConfiguration {
|
1882
|
+
/**
|
1883
|
+
* URL for accessing your Snowflake account. This URL must include your account identifier. Note that the protocol (https://) and port number are optional.
|
1884
|
+
*/
|
1885
|
+
AccountUrl: SnowflakeAccountUrl;
|
1886
|
+
/**
|
1887
|
+
* The private key used to encrypt your Snowflake client. For information, see Using Key Pair Authentication & Key Rotation.
|
1888
|
+
*/
|
1889
|
+
PrivateKey: SnowflakePrivateKey;
|
1890
|
+
/**
|
1891
|
+
* Passphrase to decrypt the private key when the key is encrypted. For information, see Using Key Pair Authentication & Key Rotation.
|
1892
|
+
*/
|
1893
|
+
KeyPassphrase?: SnowflakeKeyPassphrase;
|
1894
|
+
/**
|
1895
|
+
* User login name for the Snowflake account.
|
1896
|
+
*/
|
1897
|
+
User: SnowflakeUser;
|
1898
|
+
/**
|
1899
|
+
* All data in Snowflake is maintained in databases.
|
1900
|
+
*/
|
1901
|
+
Database: SnowflakeDatabase;
|
1902
|
+
/**
|
1903
|
+
* Each database consists of one or more schemas, which are logical groupings of database objects, such as tables and views
|
1904
|
+
*/
|
1905
|
+
Schema: SnowflakeSchema;
|
1906
|
+
/**
|
1907
|
+
* All data in Snowflake is stored in database tables, logically structured as collections of columns and rows.
|
1908
|
+
*/
|
1909
|
+
Table: SnowflakeTable;
|
1910
|
+
/**
|
1911
|
+
* Optionally configure a Snowflake role. Otherwise the default user role will be used.
|
1912
|
+
*/
|
1913
|
+
SnowflakeRoleConfiguration?: SnowflakeRoleConfiguration;
|
1914
|
+
/**
|
1915
|
+
* Choose to load JSON keys mapped to table column names or choose to split the JSON payload where content is mapped to a record content column and source metadata is mapped to a record metadata column.
|
1916
|
+
*/
|
1917
|
+
DataLoadingOption?: SnowflakeDataLoadingOption;
|
1918
|
+
/**
|
1919
|
+
* The name of the record metadata column
|
1920
|
+
*/
|
1921
|
+
MetaDataColumnName?: SnowflakeMetaDataColumnName;
|
1922
|
+
/**
|
1923
|
+
* The name of the record content column
|
1924
|
+
*/
|
1925
|
+
ContentColumnName?: SnowflakeContentColumnName;
|
1926
|
+
/**
|
1927
|
+
* The VPCE ID for Firehose to privately connect with Snowflake. The ID format is com.amazonaws.vpce.[region].vpce-svc-<[id]>. For more information, see Amazon PrivateLink & Snowflake
|
1928
|
+
*/
|
1929
|
+
SnowflakeVpcConfiguration?: SnowflakeVpcConfiguration;
|
1930
|
+
CloudWatchLoggingOptions?: CloudWatchLoggingOptions;
|
1931
|
+
ProcessingConfiguration?: ProcessingConfiguration;
|
1932
|
+
/**
|
1933
|
+
* The Amazon Resource Name (ARN) of the Snowflake role
|
1934
|
+
*/
|
1935
|
+
RoleARN: RoleARN;
|
1936
|
+
/**
|
1937
|
+
* The time period where Kinesis Data Firehose will retry sending data to the chosen HTTP endpoint.
|
1938
|
+
*/
|
1939
|
+
RetryOptions?: SnowflakeRetryOptions;
|
1940
|
+
/**
|
1941
|
+
* Choose an S3 backup mode
|
1942
|
+
*/
|
1943
|
+
S3BackupMode?: SnowflakeS3BackupMode;
|
1944
|
+
S3Configuration: S3DestinationConfiguration;
|
1945
|
+
}
|
1946
|
+
export interface SnowflakeDestinationDescription {
|
1947
|
+
/**
|
1948
|
+
* URL for accessing your Snowflake account. This URL must include your account identifier. Note that the protocol (https://) and port number are optional.
|
1949
|
+
*/
|
1950
|
+
AccountUrl?: SnowflakeAccountUrl;
|
1951
|
+
/**
|
1952
|
+
* User login name for the Snowflake account.
|
1953
|
+
*/
|
1954
|
+
User?: SnowflakeUser;
|
1955
|
+
/**
|
1956
|
+
* All data in Snowflake is maintained in databases.
|
1957
|
+
*/
|
1958
|
+
Database?: SnowflakeDatabase;
|
1959
|
+
/**
|
1960
|
+
* Each database consists of one or more schemas, which are logical groupings of database objects, such as tables and views
|
1961
|
+
*/
|
1962
|
+
Schema?: SnowflakeSchema;
|
1963
|
+
/**
|
1964
|
+
* All data in Snowflake is stored in database tables, logically structured as collections of columns and rows.
|
1965
|
+
*/
|
1966
|
+
Table?: SnowflakeTable;
|
1967
|
+
/**
|
1968
|
+
* Optionally configure a Snowflake role. Otherwise the default user role will be used.
|
1969
|
+
*/
|
1970
|
+
SnowflakeRoleConfiguration?: SnowflakeRoleConfiguration;
|
1971
|
+
/**
|
1972
|
+
* Choose to load JSON keys mapped to table column names or choose to split the JSON payload where content is mapped to a record content column and source metadata is mapped to a record metadata column.
|
1973
|
+
*/
|
1974
|
+
DataLoadingOption?: SnowflakeDataLoadingOption;
|
1975
|
+
/**
|
1976
|
+
* The name of the record metadata column
|
1977
|
+
*/
|
1978
|
+
MetaDataColumnName?: SnowflakeMetaDataColumnName;
|
1979
|
+
/**
|
1980
|
+
* The name of the record content column
|
1981
|
+
*/
|
1982
|
+
ContentColumnName?: SnowflakeContentColumnName;
|
1983
|
+
/**
|
1984
|
+
* The VPCE ID for Firehose to privately connect with Snowflake. The ID format is com.amazonaws.vpce.[region].vpce-svc-<[id]>. For more information, see Amazon PrivateLink & Snowflake
|
1985
|
+
*/
|
1986
|
+
SnowflakeVpcConfiguration?: SnowflakeVpcConfiguration;
|
1987
|
+
CloudWatchLoggingOptions?: CloudWatchLoggingOptions;
|
1988
|
+
ProcessingConfiguration?: ProcessingConfiguration;
|
1989
|
+
/**
|
1990
|
+
* The Amazon Resource Name (ARN) of the Snowflake role
|
1991
|
+
*/
|
1992
|
+
RoleARN?: RoleARN;
|
1993
|
+
/**
|
1994
|
+
* The time period where Kinesis Data Firehose will retry sending data to the chosen HTTP endpoint.
|
1995
|
+
*/
|
1996
|
+
RetryOptions?: SnowflakeRetryOptions;
|
1997
|
+
/**
|
1998
|
+
* Choose an S3 backup mode
|
1999
|
+
*/
|
2000
|
+
S3BackupMode?: SnowflakeS3BackupMode;
|
2001
|
+
S3DestinationDescription?: S3DestinationDescription;
|
2002
|
+
}
|
2003
|
+
export interface SnowflakeDestinationUpdate {
|
2004
|
+
/**
|
2005
|
+
* URL for accessing your Snowflake account. This URL must include your account identifier. Note that the protocol (https://) and port number are optional.
|
2006
|
+
*/
|
2007
|
+
AccountUrl?: SnowflakeAccountUrl;
|
2008
|
+
/**
|
2009
|
+
* The private key used to encrypt your Snowflake client. For information, see Using Key Pair Authentication & Key Rotation.
|
2010
|
+
*/
|
2011
|
+
PrivateKey?: SnowflakePrivateKey;
|
2012
|
+
/**
|
2013
|
+
* Passphrase to decrypt the private key when the key is encrypted. For information, see Using Key Pair Authentication & Key Rotation.
|
2014
|
+
*/
|
2015
|
+
KeyPassphrase?: SnowflakeKeyPassphrase;
|
2016
|
+
/**
|
2017
|
+
* User login name for the Snowflake account.
|
2018
|
+
*/
|
2019
|
+
User?: SnowflakeUser;
|
2020
|
+
/**
|
2021
|
+
* All data in Snowflake is maintained in databases.
|
2022
|
+
*/
|
2023
|
+
Database?: SnowflakeDatabase;
|
2024
|
+
/**
|
2025
|
+
* Each database consists of one or more schemas, which are logical groupings of database objects, such as tables and views
|
2026
|
+
*/
|
2027
|
+
Schema?: SnowflakeSchema;
|
2028
|
+
/**
|
2029
|
+
* All data in Snowflake is stored in database tables, logically structured as collections of columns and rows.
|
2030
|
+
*/
|
2031
|
+
Table?: SnowflakeTable;
|
2032
|
+
/**
|
2033
|
+
* Optionally configure a Snowflake role. Otherwise the default user role will be used.
|
2034
|
+
*/
|
2035
|
+
SnowflakeRoleConfiguration?: SnowflakeRoleConfiguration;
|
2036
|
+
/**
|
2037
|
+
* JSON keys mapped to table column names or choose to split the JSON payload where content is mapped to a record content column and source metadata is mapped to a record metadata column.
|
2038
|
+
*/
|
2039
|
+
DataLoadingOption?: SnowflakeDataLoadingOption;
|
2040
|
+
/**
|
2041
|
+
* The name of the record metadata column
|
2042
|
+
*/
|
2043
|
+
MetaDataColumnName?: SnowflakeMetaDataColumnName;
|
2044
|
+
/**
|
2045
|
+
* The name of the content metadata column
|
2046
|
+
*/
|
2047
|
+
ContentColumnName?: SnowflakeContentColumnName;
|
2048
|
+
CloudWatchLoggingOptions?: CloudWatchLoggingOptions;
|
2049
|
+
ProcessingConfiguration?: ProcessingConfiguration;
|
2050
|
+
/**
|
2051
|
+
* The Amazon Resource Name (ARN) of the Snowflake role
|
2052
|
+
*/
|
2053
|
+
RoleARN?: RoleARN;
|
2054
|
+
/**
|
2055
|
+
* Specify how long Kinesis Data Firehose retries sending data to the New Relic HTTP endpoint. After sending data, Kinesis Data Firehose first waits for an acknowledgment from the HTTP endpoint. If an error occurs or the acknowledgment doesn’t arrive within the acknowledgment timeout period, Kinesis Data Firehose starts the retry duration counter. It keeps retrying until the retry duration expires. After that, Kinesis Data Firehose considers it a data delivery failure and backs up the data to your Amazon S3 bucket. Every time that Kinesis Data Firehose sends data to the HTTP endpoint (either the initial attempt or a retry), it restarts the acknowledgement timeout counter and waits for an acknowledgement from the HTTP endpoint. Even if the retry duration expires, Kinesis Data Firehose still waits for the acknowledgment until it receives it or the acknowledgement timeout period is reached. If the acknowledgment times out, Kinesis Data Firehose determines whether there's time left in the retry counter. If there is time left, it retries again and repeats the logic until it receives an acknowledgment or determines that the retry time has expired. If you don't want Kinesis Data Firehose to retry sending data, set this value to 0.
|
2056
|
+
*/
|
2057
|
+
RetryOptions?: SnowflakeRetryOptions;
|
2058
|
+
/**
|
2059
|
+
* Choose an S3 backup mode
|
2060
|
+
*/
|
2061
|
+
S3BackupMode?: SnowflakeS3BackupMode;
|
2062
|
+
S3Update?: S3DestinationUpdate;
|
2063
|
+
}
|
2064
|
+
export type SnowflakeKeyPassphrase = string;
|
2065
|
+
export type SnowflakeMetaDataColumnName = string;
|
2066
|
+
export type SnowflakePrivateKey = string;
|
2067
|
+
export type SnowflakePrivateLinkVpceId = string;
|
2068
|
+
export type SnowflakeRetryDurationInSeconds = number;
|
2069
|
+
export interface SnowflakeRetryOptions {
|
2070
|
+
/**
|
2071
|
+
* the time period where Kinesis Data Firehose will retry sending data to the chosen HTTP endpoint.
|
2072
|
+
*/
|
2073
|
+
DurationInSeconds?: SnowflakeRetryDurationInSeconds;
|
2074
|
+
}
|
2075
|
+
export type SnowflakeRole = string;
|
2076
|
+
export interface SnowflakeRoleConfiguration {
|
2077
|
+
/**
|
2078
|
+
* Enable Snowflake role
|
2079
|
+
*/
|
2080
|
+
Enabled?: BooleanObject;
|
2081
|
+
/**
|
2082
|
+
* The Snowflake role you wish to configure
|
2083
|
+
*/
|
2084
|
+
SnowflakeRole?: SnowflakeRole;
|
2085
|
+
}
|
2086
|
+
export type SnowflakeS3BackupMode = "FailedDataOnly"|"AllData"|string;
|
2087
|
+
export type SnowflakeSchema = string;
|
2088
|
+
export type SnowflakeTable = string;
|
2089
|
+
export type SnowflakeUser = string;
|
2090
|
+
export interface SnowflakeVpcConfiguration {
|
2091
|
+
/**
|
2092
|
+
* The VPCE ID for Firehose to privately connect with Snowflake. The ID format is com.amazonaws.vpce.[region].vpce-svc-<[id]>. For more information, see Amazon PrivateLink & Snowflake
|
2093
|
+
*/
|
2094
|
+
PrivateLinkVpceId: SnowflakePrivateLinkVpceId;
|
2095
|
+
}
|
1869
2096
|
export interface SourceDescription {
|
1870
2097
|
/**
|
1871
2098
|
* The KinesisStreamSourceDescription value for the source Kinesis data stream.
|
@@ -2128,6 +2355,10 @@ declare namespace Firehose {
|
|
2128
2355
|
* Describes an update for a destination in the Serverless offering for Amazon OpenSearch Service.
|
2129
2356
|
*/
|
2130
2357
|
AmazonOpenSearchServerlessDestinationUpdate?: AmazonOpenSearchServerlessDestinationUpdate;
|
2358
|
+
/**
|
2359
|
+
* Update to the Snowflake destination condiguration settings
|
2360
|
+
*/
|
2361
|
+
SnowflakeDestinationUpdate?: SnowflakeDestinationUpdate;
|
2131
2362
|
}
|
2132
2363
|
export interface UpdateDestinationOutput {
|
2133
2364
|
}
|
@@ -20,11 +20,11 @@ declare class SageMakerFeatureStoreRuntime extends Service {
|
|
20
20
|
*/
|
21
21
|
batchGetRecord(callback?: (err: AWSError, data: SageMakerFeatureStoreRuntime.Types.BatchGetRecordResponse) => void): Request<SageMakerFeatureStoreRuntime.Types.BatchGetRecordResponse, AWSError>;
|
22
22
|
/**
|
23
|
-
* Deletes a Record from a FeatureGroup in the OnlineStore. Feature Store supports both SoftDelete and HardDelete. For SoftDelete (default), feature columns are set to null and the record is no longer retrievable by GetRecord or BatchGetRecord. For HardDelete, the complete Record is removed from the OnlineStore. In both cases, Feature Store appends the deleted record marker to the OfflineStore with
|
23
|
+
* Deletes a Record from a FeatureGroup in the OnlineStore. Feature Store supports both SoftDelete and HardDelete. For SoftDelete (default), feature columns are set to null and the record is no longer retrievable by GetRecord or BatchGetRecord. For HardDelete, the complete Record is removed from the OnlineStore. In both cases, Feature Store appends the deleted record marker to the OfflineStore. The deleted record marker is a record with the same RecordIdentifer as the original, but with is_deleted value set to True, EventTime set to the delete input EventTime, and other feature values set to null. Note that the EventTime specified in DeleteRecord should be set later than the EventTime of the existing record in the OnlineStore for that RecordIdentifer. If it is not, the deletion does not occur: For SoftDelete, the existing (not deleted) record remains in the OnlineStore, though the delete record marker is still written to the OfflineStore. HardDelete returns EventTime: 400 ValidationException to indicate that the delete operation failed. No delete record marker is written to the OfflineStore. When a record is deleted from the OnlineStore, the deleted record marker is appended to the OfflineStore. If you have the Iceberg table format enabled for your OfflineStore, you can remove all history of a record from the OfflineStore using Amazon Athena or Apache Spark. For information on how to hard delete a record from the OfflineStore with the Iceberg table format enabled, see Delete records from the offline store.
|
24
24
|
*/
|
25
25
|
deleteRecord(params: SageMakerFeatureStoreRuntime.Types.DeleteRecordRequest, callback?: (err: AWSError, data: {}) => void): Request<{}, AWSError>;
|
26
26
|
/**
|
27
|
-
* Deletes a Record from a FeatureGroup in the OnlineStore. Feature Store supports both SoftDelete and HardDelete. For SoftDelete (default), feature columns are set to null and the record is no longer retrievable by GetRecord or BatchGetRecord. For HardDelete, the complete Record is removed from the OnlineStore. In both cases, Feature Store appends the deleted record marker to the OfflineStore with
|
27
|
+
* Deletes a Record from a FeatureGroup in the OnlineStore. Feature Store supports both SoftDelete and HardDelete. For SoftDelete (default), feature columns are set to null and the record is no longer retrievable by GetRecord or BatchGetRecord. For HardDelete, the complete Record is removed from the OnlineStore. In both cases, Feature Store appends the deleted record marker to the OfflineStore. The deleted record marker is a record with the same RecordIdentifer as the original, but with is_deleted value set to True, EventTime set to the delete input EventTime, and other feature values set to null. Note that the EventTime specified in DeleteRecord should be set later than the EventTime of the existing record in the OnlineStore for that RecordIdentifer. If it is not, the deletion does not occur: For SoftDelete, the existing (not deleted) record remains in the OnlineStore, though the delete record marker is still written to the OfflineStore. HardDelete returns EventTime: 400 ValidationException to indicate that the delete operation failed. No delete record marker is written to the OfflineStore. When a record is deleted from the OnlineStore, the deleted record marker is appended to the OfflineStore. If you have the Iceberg table format enabled for your OfflineStore, you can remove all history of a record from the OfflineStore using Amazon Athena or Apache Spark. For information on how to hard delete a record from the OfflineStore with the Iceberg table format enabled, see Delete records from the offline store.
|
28
28
|
*/
|
29
29
|
deleteRecord(callback?: (err: AWSError, data: {}) => void): Request<{}, AWSError>;
|
30
30
|
/**
|