cdk-docker-image-deployment 0.0.177 → 0.0.178

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (35) hide show
  1. package/.jsii +3 -3
  2. package/lib/destination.js +1 -1
  3. package/lib/docker-image-deployment.js +1 -1
  4. package/lib/source.js +1 -1
  5. package/node_modules/aws-sdk/CHANGELOG.md +9 -1
  6. package/node_modules/aws-sdk/README.md +1 -1
  7. package/node_modules/aws-sdk/apis/devops-guru-2020-12-01.min.json +4 -2
  8. package/node_modules/aws-sdk/apis/drs-2020-02-26.min.json +40 -36
  9. package/node_modules/aws-sdk/apis/internetmonitor-2021-06-03.examples.json +5 -0
  10. package/node_modules/aws-sdk/apis/internetmonitor-2021-06-03.min.json +590 -0
  11. package/node_modules/aws-sdk/apis/internetmonitor-2021-06-03.paginators.json +16 -0
  12. package/node_modules/aws-sdk/apis/internetmonitor-2021-06-03.waiters2.json +5 -0
  13. package/node_modules/aws-sdk/apis/lambda-2015-03-31.min.json +134 -116
  14. package/node_modules/aws-sdk/apis/mediaconvert-2017-08-29.min.json +142 -127
  15. package/node_modules/aws-sdk/apis/metadata.json +3 -0
  16. package/node_modules/aws-sdk/apis/timestream-write-2018-11-01.min.json +332 -26
  17. package/node_modules/aws-sdk/apis/timestream-write-2018-11-01.paginators.json +5 -0
  18. package/node_modules/aws-sdk/clients/all.d.ts +1 -0
  19. package/node_modules/aws-sdk/clients/all.js +2 -1
  20. package/node_modules/aws-sdk/clients/devopsguru.d.ts +8 -0
  21. package/node_modules/aws-sdk/clients/drs.d.ts +18 -2
  22. package/node_modules/aws-sdk/clients/internetmonitor.d.ts +606 -0
  23. package/node_modules/aws-sdk/clients/internetmonitor.js +19 -0
  24. package/node_modules/aws-sdk/clients/lambda.d.ts +46 -13
  25. package/node_modules/aws-sdk/clients/mediaconvert.d.ts +30 -10
  26. package/node_modules/aws-sdk/clients/timestreamwrite.d.ts +435 -38
  27. package/node_modules/aws-sdk/dist/aws-sdk-core-react-native.js +3 -3
  28. package/node_modules/aws-sdk/dist/aws-sdk-react-native.js +56 -12
  29. package/node_modules/aws-sdk/dist/aws-sdk.js +142 -119
  30. package/node_modules/aws-sdk/dist/aws-sdk.min.js +42 -42
  31. package/node_modules/aws-sdk/lib/config_service_placeholders.d.ts +2 -0
  32. package/node_modules/aws-sdk/lib/core.js +1 -1
  33. package/node_modules/aws-sdk/lib/region_config_data.json +2 -0
  34. package/node_modules/aws-sdk/package.json +1 -1
  35. package/package.json +4 -4
@@ -12,37 +12,53 @@ declare class TimestreamWrite extends Service {
12
12
  constructor(options?: TimestreamWrite.Types.ClientConfiguration)
13
13
  config: Config & TimestreamWrite.Types.ClientConfiguration;
14
14
  /**
15
- * Creates a new Timestream database. If the KMS key is not specified, the database will be encrypted with a Timestream managed KMS key located in your account. Refer to Amazon Web Services managed KMS keys for more info. Service quotas apply. See code sample for details.
15
+ * Creates a new Timestream batch load task. A batch load task processes data from a CSV source in an S3 location and writes to a Timestream table. A mapping from source to target is defined in a batch load task. Errors and events are written to a report at an S3 location. For the report, if the KMS key is not specified, the batch load task will be encrypted with a Timestream managed KMS key located in your account. For more information, see Amazon Web Services managed keys. Service quotas apply. For details, see code sample.
16
+ */
17
+ createBatchLoadTask(params: TimestreamWrite.Types.CreateBatchLoadTaskRequest, callback?: (err: AWSError, data: TimestreamWrite.Types.CreateBatchLoadTaskResponse) => void): Request<TimestreamWrite.Types.CreateBatchLoadTaskResponse, AWSError>;
18
+ /**
19
+ * Creates a new Timestream batch load task. A batch load task processes data from a CSV source in an S3 location and writes to a Timestream table. A mapping from source to target is defined in a batch load task. Errors and events are written to a report at an S3 location. For the report, if the KMS key is not specified, the batch load task will be encrypted with a Timestream managed KMS key located in your account. For more information, see Amazon Web Services managed keys. Service quotas apply. For details, see code sample.
20
+ */
21
+ createBatchLoadTask(callback?: (err: AWSError, data: TimestreamWrite.Types.CreateBatchLoadTaskResponse) => void): Request<TimestreamWrite.Types.CreateBatchLoadTaskResponse, AWSError>;
22
+ /**
23
+ * Creates a new Timestream database. If the KMS key is not specified, the database will be encrypted with a Timestream managed KMS key located in your account. For more information, see Amazon Web Services managed keys. Service quotas apply. For details, see code sample.
16
24
  */
17
25
  createDatabase(params: TimestreamWrite.Types.CreateDatabaseRequest, callback?: (err: AWSError, data: TimestreamWrite.Types.CreateDatabaseResponse) => void): Request<TimestreamWrite.Types.CreateDatabaseResponse, AWSError>;
18
26
  /**
19
- * Creates a new Timestream database. If the KMS key is not specified, the database will be encrypted with a Timestream managed KMS key located in your account. Refer to Amazon Web Services managed KMS keys for more info. Service quotas apply. See code sample for details.
27
+ * Creates a new Timestream database. If the KMS key is not specified, the database will be encrypted with a Timestream managed KMS key located in your account. For more information, see Amazon Web Services managed keys. Service quotas apply. For details, see code sample.
20
28
  */
21
29
  createDatabase(callback?: (err: AWSError, data: TimestreamWrite.Types.CreateDatabaseResponse) => void): Request<TimestreamWrite.Types.CreateDatabaseResponse, AWSError>;
22
30
  /**
23
- * The CreateTable operation adds a new table to an existing database in your account. In an Amazon Web Services account, table names must be at least unique within each Region if they are in the same database. You may have identical table names in the same Region if the tables are in separate databases. While creating the table, you must specify the table name, database name, and the retention properties. Service quotas apply. See code sample for details.
31
+ * Adds a new table to an existing database in your account. In an Amazon Web Services account, table names must be at least unique within each Region if they are in the same database. You might have identical table names in the same Region if the tables are in separate databases. While creating the table, you must specify the table name, database name, and the retention properties. Service quotas apply. See code sample for details.
24
32
  */
25
33
  createTable(params: TimestreamWrite.Types.CreateTableRequest, callback?: (err: AWSError, data: TimestreamWrite.Types.CreateTableResponse) => void): Request<TimestreamWrite.Types.CreateTableResponse, AWSError>;
26
34
  /**
27
- * The CreateTable operation adds a new table to an existing database in your account. In an Amazon Web Services account, table names must be at least unique within each Region if they are in the same database. You may have identical table names in the same Region if the tables are in separate databases. While creating the table, you must specify the table name, database name, and the retention properties. Service quotas apply. See code sample for details.
35
+ * Adds a new table to an existing database in your account. In an Amazon Web Services account, table names must be at least unique within each Region if they are in the same database. You might have identical table names in the same Region if the tables are in separate databases. While creating the table, you must specify the table name, database name, and the retention properties. Service quotas apply. See code sample for details.
28
36
  */
29
37
  createTable(callback?: (err: AWSError, data: TimestreamWrite.Types.CreateTableResponse) => void): Request<TimestreamWrite.Types.CreateTableResponse, AWSError>;
30
38
  /**
31
- * Deletes a given Timestream database. This is an irreversible operation. After a database is deleted, the time series data from its tables cannot be recovered. All tables in the database must be deleted first, or a ValidationException error will be thrown. Due to the nature of distributed retries, the operation can return either success or a ResourceNotFoundException. Clients should consider them equivalent. See code sample for details.
39
+ * Deletes a given Timestream database. This is an irreversible operation. After a database is deleted, the time-series data from its tables cannot be recovered. All tables in the database must be deleted first, or a ValidationException error will be thrown. Due to the nature of distributed retries, the operation can return either success or a ResourceNotFoundException. Clients should consider them equivalent. See code sample for details.
32
40
  */
33
41
  deleteDatabase(params: TimestreamWrite.Types.DeleteDatabaseRequest, callback?: (err: AWSError, data: {}) => void): Request<{}, AWSError>;
34
42
  /**
35
- * Deletes a given Timestream database. This is an irreversible operation. After a database is deleted, the time series data from its tables cannot be recovered. All tables in the database must be deleted first, or a ValidationException error will be thrown. Due to the nature of distributed retries, the operation can return either success or a ResourceNotFoundException. Clients should consider them equivalent. See code sample for details.
43
+ * Deletes a given Timestream database. This is an irreversible operation. After a database is deleted, the time-series data from its tables cannot be recovered. All tables in the database must be deleted first, or a ValidationException error will be thrown. Due to the nature of distributed retries, the operation can return either success or a ResourceNotFoundException. Clients should consider them equivalent. See code sample for details.
36
44
  */
37
45
  deleteDatabase(callback?: (err: AWSError, data: {}) => void): Request<{}, AWSError>;
38
46
  /**
39
- * Deletes a given Timestream table. This is an irreversible operation. After a Timestream database table is deleted, the time series data stored in the table cannot be recovered. Due to the nature of distributed retries, the operation can return either success or a ResourceNotFoundException. Clients should consider them equivalent. See code sample for details.
47
+ * Deletes a given Timestream table. This is an irreversible operation. After a Timestream database table is deleted, the time-series data stored in the table cannot be recovered. Due to the nature of distributed retries, the operation can return either success or a ResourceNotFoundException. Clients should consider them equivalent. See code sample for details.
40
48
  */
41
49
  deleteTable(params: TimestreamWrite.Types.DeleteTableRequest, callback?: (err: AWSError, data: {}) => void): Request<{}, AWSError>;
42
50
  /**
43
- * Deletes a given Timestream table. This is an irreversible operation. After a Timestream database table is deleted, the time series data stored in the table cannot be recovered. Due to the nature of distributed retries, the operation can return either success or a ResourceNotFoundException. Clients should consider them equivalent. See code sample for details.
51
+ * Deletes a given Timestream table. This is an irreversible operation. After a Timestream database table is deleted, the time-series data stored in the table cannot be recovered. Due to the nature of distributed retries, the operation can return either success or a ResourceNotFoundException. Clients should consider them equivalent. See code sample for details.
44
52
  */
45
53
  deleteTable(callback?: (err: AWSError, data: {}) => void): Request<{}, AWSError>;
54
+ /**
55
+ * Returns information about the batch load task, including configurations, mappings, progress, and other details. Service quotas apply. See code sample for details.
56
+ */
57
+ describeBatchLoadTask(params: TimestreamWrite.Types.DescribeBatchLoadTaskRequest, callback?: (err: AWSError, data: TimestreamWrite.Types.DescribeBatchLoadTaskResponse) => void): Request<TimestreamWrite.Types.DescribeBatchLoadTaskResponse, AWSError>;
58
+ /**
59
+ * Returns information about the batch load task, including configurations, mappings, progress, and other details. Service quotas apply. See code sample for details.
60
+ */
61
+ describeBatchLoadTask(callback?: (err: AWSError, data: TimestreamWrite.Types.DescribeBatchLoadTaskResponse) => void): Request<TimestreamWrite.Types.DescribeBatchLoadTaskResponse, AWSError>;
46
62
  /**
47
63
  * Returns information about the database, including the database name, time that the database was created, and the total number of tables found within the database. Service quotas apply. See code sample for details.
48
64
  */
@@ -52,11 +68,11 @@ declare class TimestreamWrite extends Service {
52
68
  */
53
69
  describeDatabase(callback?: (err: AWSError, data: TimestreamWrite.Types.DescribeDatabaseResponse) => void): Request<TimestreamWrite.Types.DescribeDatabaseResponse, AWSError>;
54
70
  /**
55
- * DescribeEndpoints returns a list of available endpoints to make Timestream API calls against. This API is available through both Write and Query. Because the Timestream SDKs are designed to transparently work with the service’s architecture, including the management and mapping of the service endpoints, it is not recommended that you use this API unless: You are using VPC endpoints (Amazon Web Services PrivateLink) with Timestream Your application uses a programming language that does not yet have SDK support You require better control over the client-side implementation For detailed information on how and when to use and implement DescribeEndpoints, see The Endpoint Discovery Pattern.
71
+ * Returns a list of available endpoints to make Timestream API calls against. This API operation is available through both the Write and Query APIs. Because the Timestream SDKs are designed to transparently work with the service’s architecture, including the management and mapping of the service endpoints, we don't recommend that you use this API operation unless: You are using VPC endpoints (Amazon Web Services PrivateLink) with Timestream Your application uses a programming language that does not yet have SDK support You require better control over the client-side implementation For detailed information on how and when to use and implement DescribeEndpoints, see The Endpoint Discovery Pattern.
56
72
  */
57
73
  describeEndpoints(params: TimestreamWrite.Types.DescribeEndpointsRequest, callback?: (err: AWSError, data: TimestreamWrite.Types.DescribeEndpointsResponse) => void): Request<TimestreamWrite.Types.DescribeEndpointsResponse, AWSError>;
58
74
  /**
59
- * DescribeEndpoints returns a list of available endpoints to make Timestream API calls against. This API is available through both Write and Query. Because the Timestream SDKs are designed to transparently work with the service’s architecture, including the management and mapping of the service endpoints, it is not recommended that you use this API unless: You are using VPC endpoints (Amazon Web Services PrivateLink) with Timestream Your application uses a programming language that does not yet have SDK support You require better control over the client-side implementation For detailed information on how and when to use and implement DescribeEndpoints, see The Endpoint Discovery Pattern.
75
+ * Returns a list of available endpoints to make Timestream API calls against. This API operation is available through both the Write and Query APIs. Because the Timestream SDKs are designed to transparently work with the service’s architecture, including the management and mapping of the service endpoints, we don't recommend that you use this API operation unless: You are using VPC endpoints (Amazon Web Services PrivateLink) with Timestream Your application uses a programming language that does not yet have SDK support You require better control over the client-side implementation For detailed information on how and when to use and implement DescribeEndpoints, see The Endpoint Discovery Pattern.
60
76
  */
61
77
  describeEndpoints(callback?: (err: AWSError, data: TimestreamWrite.Types.DescribeEndpointsResponse) => void): Request<TimestreamWrite.Types.DescribeEndpointsResponse, AWSError>;
62
78
  /**
@@ -67,6 +83,14 @@ declare class TimestreamWrite extends Service {
67
83
  * Returns information about the table, including the table name, database name, retention duration of the memory store and the magnetic store. Service quotas apply. See code sample for details.
68
84
  */
69
85
  describeTable(callback?: (err: AWSError, data: TimestreamWrite.Types.DescribeTableResponse) => void): Request<TimestreamWrite.Types.DescribeTableResponse, AWSError>;
86
+ /**
87
+ * Provides a list of batch load tasks, along with the name, status, when the task is resumable until, and other details. See code sample for details.
88
+ */
89
+ listBatchLoadTasks(params: TimestreamWrite.Types.ListBatchLoadTasksRequest, callback?: (err: AWSError, data: TimestreamWrite.Types.ListBatchLoadTasksResponse) => void): Request<TimestreamWrite.Types.ListBatchLoadTasksResponse, AWSError>;
90
+ /**
91
+ * Provides a list of batch load tasks, along with the name, status, when the task is resumable until, and other details. See code sample for details.
92
+ */
93
+ listBatchLoadTasks(callback?: (err: AWSError, data: TimestreamWrite.Types.ListBatchLoadTasksResponse) => void): Request<TimestreamWrite.Types.ListBatchLoadTasksResponse, AWSError>;
70
94
  /**
71
95
  * Returns a list of your Timestream databases. Service quotas apply. See code sample for details.
72
96
  */
@@ -76,27 +100,35 @@ declare class TimestreamWrite extends Service {
76
100
  */
77
101
  listDatabases(callback?: (err: AWSError, data: TimestreamWrite.Types.ListDatabasesResponse) => void): Request<TimestreamWrite.Types.ListDatabasesResponse, AWSError>;
78
102
  /**
79
- * A list of tables, along with the name, status and retention properties of each table. See code sample for details.
103
+ * Provides a list of tables, along with the name, status, and retention properties of each table. See code sample for details.
80
104
  */
81
105
  listTables(params: TimestreamWrite.Types.ListTablesRequest, callback?: (err: AWSError, data: TimestreamWrite.Types.ListTablesResponse) => void): Request<TimestreamWrite.Types.ListTablesResponse, AWSError>;
82
106
  /**
83
- * A list of tables, along with the name, status and retention properties of each table. See code sample for details.
107
+ * Provides a list of tables, along with the name, status, and retention properties of each table. See code sample for details.
84
108
  */
85
109
  listTables(callback?: (err: AWSError, data: TimestreamWrite.Types.ListTablesResponse) => void): Request<TimestreamWrite.Types.ListTablesResponse, AWSError>;
86
110
  /**
87
- * List all tags on a Timestream resource.
111
+ * Lists all tags on a Timestream resource.
88
112
  */
89
113
  listTagsForResource(params: TimestreamWrite.Types.ListTagsForResourceRequest, callback?: (err: AWSError, data: TimestreamWrite.Types.ListTagsForResourceResponse) => void): Request<TimestreamWrite.Types.ListTagsForResourceResponse, AWSError>;
90
114
  /**
91
- * List all tags on a Timestream resource.
115
+ * Lists all tags on a Timestream resource.
92
116
  */
93
117
  listTagsForResource(callback?: (err: AWSError, data: TimestreamWrite.Types.ListTagsForResourceResponse) => void): Request<TimestreamWrite.Types.ListTagsForResourceResponse, AWSError>;
94
118
  /**
95
- * Associate a set of tags with a Timestream resource. You can then activate these user-defined tags so that they appear on the Billing and Cost Management console for cost allocation tracking.
119
+ *
120
+ */
121
+ resumeBatchLoadTask(params: TimestreamWrite.Types.ResumeBatchLoadTaskRequest, callback?: (err: AWSError, data: TimestreamWrite.Types.ResumeBatchLoadTaskResponse) => void): Request<TimestreamWrite.Types.ResumeBatchLoadTaskResponse, AWSError>;
122
+ /**
123
+ *
124
+ */
125
+ resumeBatchLoadTask(callback?: (err: AWSError, data: TimestreamWrite.Types.ResumeBatchLoadTaskResponse) => void): Request<TimestreamWrite.Types.ResumeBatchLoadTaskResponse, AWSError>;
126
+ /**
127
+ * Associates a set of tags with a Timestream resource. You can then activate these user-defined tags so that they appear on the Billing and Cost Management console for cost allocation tracking.
96
128
  */
97
129
  tagResource(params: TimestreamWrite.Types.TagResourceRequest, callback?: (err: AWSError, data: TimestreamWrite.Types.TagResourceResponse) => void): Request<TimestreamWrite.Types.TagResourceResponse, AWSError>;
98
130
  /**
99
- * Associate a set of tags with a Timestream resource. You can then activate these user-defined tags so that they appear on the Billing and Cost Management console for cost allocation tracking.
131
+ * Associates a set of tags with a Timestream resource. You can then activate these user-defined tags so that they appear on the Billing and Cost Management console for cost allocation tracking.
100
132
  */
101
133
  tagResource(callback?: (err: AWSError, data: TimestreamWrite.Types.TagResourceResponse) => void): Request<TimestreamWrite.Types.TagResourceResponse, AWSError>;
102
134
  /**
@@ -124,24 +156,169 @@ declare class TimestreamWrite extends Service {
124
156
  */
125
157
  updateTable(callback?: (err: AWSError, data: TimestreamWrite.Types.UpdateTableResponse) => void): Request<TimestreamWrite.Types.UpdateTableResponse, AWSError>;
126
158
  /**
127
- * The WriteRecords operation enables you to write your time series data into Timestream. You can specify a single data point or a batch of data points to be inserted into the system. Timestream offers you with a flexible schema that auto detects the column names and data types for your Timestream tables based on the dimension names and data types of the data points you specify when invoking writes into the database. Timestream support eventual consistency read semantics. This means that when you query data immediately after writing a batch of data into Timestream, the query results might not reflect the results of a recently completed write operation. The results may also include some stale data. If you repeat the query request after a short time, the results should return the latest data. Service quotas apply. See code sample for details. Upserts You can use the Version parameter in a WriteRecords request to update data points. Timestream tracks a version number with each record. Version defaults to 1 when not specified for the record in the request. Timestream will update an existing record’s measure value along with its Version upon receiving a write request with a higher Version number for that record. Upon receiving an update request where the measure value is the same as that of the existing record, Timestream still updates Version, if it is greater than the existing value of Version. You can update a data point as many times as desired, as long as the value of Version continuously increases. For example, suppose you write a new record without indicating Version in the request. Timestream will store this record, and set Version to 1. Now, suppose you try to update this record with a WriteRecords request of the same record with a different measure value but, like before, do not provide Version. In this case, Timestream will reject this update with a RejectedRecordsException since the updated record’s version is not greater than the existing value of Version. However, if you were to resend the update request with Version set to 2, Timestream would then succeed in updating the record’s value, and the Version would be set to 2. Next, suppose you sent a WriteRecords request with this same record and an identical measure value, but with Version set to 3. In this case, Timestream would only update Version to 3. Any further updates would need to send a version number greater than 3, or the update requests would receive a RejectedRecordsException.
159
+ * Enables you to write your time-series data into Timestream. You can specify a single data point or a batch of data points to be inserted into the system. Timestream offers you a flexible schema that auto detects the column names and data types for your Timestream tables based on the dimension names and data types of the data points you specify when invoking writes into the database. Timestream supports eventual consistency read semantics. This means that when you query data immediately after writing a batch of data into Timestream, the query results might not reflect the results of a recently completed write operation. The results may also include some stale data. If you repeat the query request after a short time, the results should return the latest data. Service quotas apply. See code sample for details. Upserts You can use the Version parameter in a WriteRecords request to update data points. Timestream tracks a version number with each record. Version defaults to 1 when it's not specified for the record in the request. Timestream updates an existing record’s measure value along with its Version when it receives a write request with a higher Version number for that record. When it receives an update request where the measure value is the same as that of the existing record, Timestream still updates Version, if it is greater than the existing value of Version. You can update a data point as many times as desired, as long as the value of Version continuously increases. For example, suppose you write a new record without indicating Version in the request. Timestream stores this record, and set Version to 1. Now, suppose you try to update this record with a WriteRecords request of the same record with a different measure value but, like before, do not provide Version. In this case, Timestream will reject this update with a RejectedRecordsException since the updated record’s version is not greater than the existing value of Version. However, if you were to resend the update request with Version set to 2, Timestream would then succeed in updating the record’s value, and the Version would be set to 2. Next, suppose you sent a WriteRecords request with this same record and an identical measure value, but with Version set to 3. In this case, Timestream would only update Version to 3. Any further updates would need to send a version number greater than 3, or the update requests would receive a RejectedRecordsException.
128
160
  */
129
161
  writeRecords(params: TimestreamWrite.Types.WriteRecordsRequest, callback?: (err: AWSError, data: TimestreamWrite.Types.WriteRecordsResponse) => void): Request<TimestreamWrite.Types.WriteRecordsResponse, AWSError>;
130
162
  /**
131
- * The WriteRecords operation enables you to write your time series data into Timestream. You can specify a single data point or a batch of data points to be inserted into the system. Timestream offers you with a flexible schema that auto detects the column names and data types for your Timestream tables based on the dimension names and data types of the data points you specify when invoking writes into the database. Timestream support eventual consistency read semantics. This means that when you query data immediately after writing a batch of data into Timestream, the query results might not reflect the results of a recently completed write operation. The results may also include some stale data. If you repeat the query request after a short time, the results should return the latest data. Service quotas apply. See code sample for details. Upserts You can use the Version parameter in a WriteRecords request to update data points. Timestream tracks a version number with each record. Version defaults to 1 when not specified for the record in the request. Timestream will update an existing record’s measure value along with its Version upon receiving a write request with a higher Version number for that record. Upon receiving an update request where the measure value is the same as that of the existing record, Timestream still updates Version, if it is greater than the existing value of Version. You can update a data point as many times as desired, as long as the value of Version continuously increases. For example, suppose you write a new record without indicating Version in the request. Timestream will store this record, and set Version to 1. Now, suppose you try to update this record with a WriteRecords request of the same record with a different measure value but, like before, do not provide Version. In this case, Timestream will reject this update with a RejectedRecordsException since the updated record’s version is not greater than the existing value of Version. However, if you were to resend the update request with Version set to 2, Timestream would then succeed in updating the record’s value, and the Version would be set to 2. Next, suppose you sent a WriteRecords request with this same record and an identical measure value, but with Version set to 3. In this case, Timestream would only update Version to 3. Any further updates would need to send a version number greater than 3, or the update requests would receive a RejectedRecordsException.
163
+ * Enables you to write your time-series data into Timestream. You can specify a single data point or a batch of data points to be inserted into the system. Timestream offers you a flexible schema that auto detects the column names and data types for your Timestream tables based on the dimension names and data types of the data points you specify when invoking writes into the database. Timestream supports eventual consistency read semantics. This means that when you query data immediately after writing a batch of data into Timestream, the query results might not reflect the results of a recently completed write operation. The results may also include some stale data. If you repeat the query request after a short time, the results should return the latest data. Service quotas apply. See code sample for details. Upserts You can use the Version parameter in a WriteRecords request to update data points. Timestream tracks a version number with each record. Version defaults to 1 when it's not specified for the record in the request. Timestream updates an existing record’s measure value along with its Version when it receives a write request with a higher Version number for that record. When it receives an update request where the measure value is the same as that of the existing record, Timestream still updates Version, if it is greater than the existing value of Version. You can update a data point as many times as desired, as long as the value of Version continuously increases. For example, suppose you write a new record without indicating Version in the request. Timestream stores this record, and set Version to 1. Now, suppose you try to update this record with a WriteRecords request of the same record with a different measure value but, like before, do not provide Version. In this case, Timestream will reject this update with a RejectedRecordsException since the updated record’s version is not greater than the existing value of Version. However, if you were to resend the update request with Version set to 2, Timestream would then succeed in updating the record’s value, and the Version would be set to 2. Next, suppose you sent a WriteRecords request with this same record and an identical measure value, but with Version set to 3. In this case, Timestream would only update Version to 3. Any further updates would need to send a version number greater than 3, or the update requests would receive a RejectedRecordsException.
132
164
  */
133
165
  writeRecords(callback?: (err: AWSError, data: TimestreamWrite.Types.WriteRecordsResponse) => void): Request<TimestreamWrite.Types.WriteRecordsResponse, AWSError>;
134
166
  }
135
167
  declare namespace TimestreamWrite {
136
168
  export type AmazonResourceName = string;
169
+ export type BatchLoadDataFormat = "CSV"|string;
170
+ export interface BatchLoadProgressReport {
171
+ /**
172
+ *
173
+ */
174
+ RecordsProcessed?: Long;
175
+ /**
176
+ *
177
+ */
178
+ RecordsIngested?: Long;
179
+ /**
180
+ *
181
+ */
182
+ ParseFailures?: Long;
183
+ /**
184
+ *
185
+ */
186
+ RecordIngestionFailures?: Long;
187
+ /**
188
+ *
189
+ */
190
+ FileFailures?: Long;
191
+ /**
192
+ *
193
+ */
194
+ BytesMetered?: Long;
195
+ }
196
+ export type BatchLoadStatus = "CREATED"|"IN_PROGRESS"|"FAILED"|"SUCCEEDED"|"PROGRESS_STOPPED"|"PENDING_RESUME"|string;
197
+ export interface BatchLoadTask {
198
+ /**
199
+ * The ID of the batch load task.
200
+ */
201
+ TaskId?: BatchLoadTaskId;
202
+ /**
203
+ * Status of the batch load task.
204
+ */
205
+ TaskStatus?: BatchLoadStatus;
206
+ /**
207
+ * Database name for the database into which a batch load task loads data.
208
+ */
209
+ DatabaseName?: ResourceName;
210
+ /**
211
+ * Table name for the table into which a batch load task loads data.
212
+ */
213
+ TableName?: ResourceName;
214
+ /**
215
+ * The time when the Timestream batch load task was created.
216
+ */
217
+ CreationTime?: _Date;
218
+ /**
219
+ * The time when the Timestream batch load task was last updated.
220
+ */
221
+ LastUpdatedTime?: _Date;
222
+ /**
223
+ *
224
+ */
225
+ ResumableUntil?: _Date;
226
+ }
227
+ export interface BatchLoadTaskDescription {
228
+ /**
229
+ * The ID of the batch load task.
230
+ */
231
+ TaskId?: BatchLoadTaskId;
232
+ /**
233
+ *
234
+ */
235
+ ErrorMessage?: StringValue2048;
236
+ /**
237
+ * Configuration details about the data source for a batch load task.
238
+ */
239
+ DataSourceConfiguration?: DataSourceConfiguration;
240
+ /**
241
+ *
242
+ */
243
+ ProgressReport?: BatchLoadProgressReport;
244
+ /**
245
+ * Report configuration for a batch load task. This contains details about where error reports are stored.
246
+ */
247
+ ReportConfiguration?: ReportConfiguration;
248
+ /**
249
+ * Data model configuration for a batch load task. This contains details about where a data model for a batch load task is stored.
250
+ */
251
+ DataModelConfiguration?: DataModelConfiguration;
252
+ /**
253
+ *
254
+ */
255
+ TargetDatabaseName?: ResourceName;
256
+ /**
257
+ *
258
+ */
259
+ TargetTableName?: ResourceName;
260
+ /**
261
+ * Status of the batch load task.
262
+ */
263
+ TaskStatus?: BatchLoadStatus;
264
+ /**
265
+ *
266
+ */
267
+ RecordVersion?: RecordVersion;
268
+ /**
269
+ * The time when the Timestream batch load task was created.
270
+ */
271
+ CreationTime?: _Date;
272
+ /**
273
+ * The time when the Timestream batch load task was last updated.
274
+ */
275
+ LastUpdatedTime?: _Date;
276
+ /**
277
+ *
278
+ */
279
+ ResumableUntil?: _Date;
280
+ }
281
+ export type BatchLoadTaskId = string;
282
+ export type BatchLoadTaskList = BatchLoadTask[];
137
283
  export type Boolean = boolean;
284
+ export type ClientRequestToken = string;
285
+ export interface CreateBatchLoadTaskRequest {
286
+ /**
287
+ *
288
+ */
289
+ ClientToken?: ClientRequestToken;
290
+ DataModelConfiguration?: DataModelConfiguration;
291
+ /**
292
+ * Defines configuration details about the data source for a batch load task.
293
+ */
294
+ DataSourceConfiguration: DataSourceConfiguration;
295
+ ReportConfiguration: ReportConfiguration;
296
+ /**
297
+ * Target Timestream database for a batch load task.
298
+ */
299
+ TargetDatabaseName: ResourceCreateAPIName;
300
+ /**
301
+ * Target Timestream table for a batch load task.
302
+ */
303
+ TargetTableName: ResourceCreateAPIName;
304
+ /**
305
+ *
306
+ */
307
+ RecordVersion?: RecordVersion;
308
+ }
309
+ export interface CreateBatchLoadTaskResponse {
310
+ /**
311
+ * The ID of the batch load task.
312
+ */
313
+ TaskId: BatchLoadTaskId;
314
+ }
138
315
  export interface CreateDatabaseRequest {
139
316
  /**
140
317
  * The name of the Timestream database.
141
318
  */
142
319
  DatabaseName: ResourceCreateAPIName;
143
320
  /**
144
- * The KMS key for the database. If the KMS key is not specified, the database will be encrypted with a Timestream managed KMS key located in your account. Refer to Amazon Web Services managed KMS keys for more info.
321
+ * The KMS key for the database. If the KMS key is not specified, the database will be encrypted with a Timestream managed KMS key located in your account. For more information, see Amazon Web Services managed keys.
145
322
  */
146
323
  KmsKeyId?: StringValue2048;
147
324
  /**
@@ -165,7 +342,7 @@ declare namespace TimestreamWrite {
165
342
  */
166
343
  TableName: ResourceCreateAPIName;
167
344
  /**
168
- * The duration for which your time series data must be stored in the memory store and the magnetic store.
345
+ * The duration for which your time-series data must be stored in the memory store and the magnetic store.
169
346
  */
170
347
  RetentionProperties?: RetentionProperties;
171
348
  /**
@@ -183,6 +360,95 @@ declare namespace TimestreamWrite {
183
360
  */
184
361
  Table?: Table;
185
362
  }
363
+ export interface CsvConfiguration {
364
+ /**
365
+ * Column separator can be one of comma (','), pipe ('|), semicolon (';'), tab('/t'), or blank space (' ').
366
+ */
367
+ ColumnSeparator?: StringValue1;
368
+ /**
369
+ * Escape character can be one of
370
+ */
371
+ EscapeChar?: StringValue1;
372
+ /**
373
+ * Can be single quote (') or double quote (").
374
+ */
375
+ QuoteChar?: StringValue1;
376
+ /**
377
+ * Can be blank space (' ').
378
+ */
379
+ NullValue?: StringValue256;
380
+ /**
381
+ * Specifies to trim leading and trailing white space.
382
+ */
383
+ TrimWhiteSpace?: Boolean;
384
+ }
385
+ export interface DataModel {
386
+ /**
387
+ * Source column to be mapped to time.
388
+ */
389
+ TimeColumn?: StringValue256;
390
+ /**
391
+ * The granularity of the timestamp unit. It indicates if the time value is in seconds, milliseconds, nanoseconds, or other supported values. Default is MILLISECONDS.
392
+ */
393
+ TimeUnit?: TimeUnit;
394
+ /**
395
+ * Source to target mappings for dimensions.
396
+ */
397
+ DimensionMappings: DimensionMappings;
398
+ /**
399
+ * Source to target mappings for multi-measure records.
400
+ */
401
+ MultiMeasureMappings?: MultiMeasureMappings;
402
+ /**
403
+ * Source to target mappings for measures.
404
+ */
405
+ MixedMeasureMappings?: MixedMeasureMappingList;
406
+ /**
407
+ *
408
+ */
409
+ MeasureNameColumn?: StringValue256;
410
+ }
411
+ export interface DataModelConfiguration {
412
+ /**
413
+ *
414
+ */
415
+ DataModel?: DataModel;
416
+ /**
417
+ *
418
+ */
419
+ DataModelS3Configuration?: DataModelS3Configuration;
420
+ }
421
+ export interface DataModelS3Configuration {
422
+ /**
423
+ *
424
+ */
425
+ BucketName?: S3BucketName;
426
+ /**
427
+ *
428
+ */
429
+ ObjectKey?: S3ObjectKey;
430
+ }
431
+ export interface DataSourceConfiguration {
432
+ /**
433
+ * Configuration of an S3 location for a file which contains data to load.
434
+ */
435
+ DataSourceS3Configuration: DataSourceS3Configuration;
436
+ CsvConfiguration?: CsvConfiguration;
437
+ /**
438
+ * This is currently CSV.
439
+ */
440
+ DataFormat: BatchLoadDataFormat;
441
+ }
442
+ export interface DataSourceS3Configuration {
443
+ /**
444
+ * The bucket name of the customer S3 bucket.
445
+ */
446
+ BucketName: S3BucketName;
447
+ /**
448
+ *
449
+ */
450
+ ObjectKeyPrefix?: S3ObjectKey;
451
+ }
186
452
  export interface Database {
187
453
  /**
188
454
  * The Amazon Resource Name that uniquely identifies this database.
@@ -227,6 +493,18 @@ declare namespace TimestreamWrite {
227
493
  */
228
494
  TableName: ResourceName;
229
495
  }
496
+ export interface DescribeBatchLoadTaskRequest {
497
+ /**
498
+ * The ID of the batch load task.
499
+ */
500
+ TaskId: BatchLoadTaskId;
501
+ }
502
+ export interface DescribeBatchLoadTaskResponse {
503
+ /**
504
+ * Description of the batch load task.
505
+ */
506
+ BatchLoadTaskDescription: BatchLoadTaskDescription;
507
+ }
230
508
  export interface DescribeDatabaseRequest {
231
509
  /**
232
510
  * The name of the Timestream database.
@@ -265,7 +543,7 @@ declare namespace TimestreamWrite {
265
543
  }
266
544
  export interface Dimension {
267
545
  /**
268
- * Dimension represents the meta data attributes of the time series. For example, the name and availability zone of an EC2 instance or the name of the manufacturer of a wind turbine are dimensions. For constraints on Dimension names, see Naming Constraints.
546
+ * Dimension represents the metadata attributes of the time series. For example, the name and Availability Zone of an EC2 instance or the name of the manufacturer of a wind turbine are dimensions. For constraints on dimension names, see Naming Constraints.
269
547
  */
270
548
  Name: SchemaName;
271
549
  /**
@@ -273,10 +551,21 @@ declare namespace TimestreamWrite {
273
551
  */
274
552
  Value: SchemaValue;
275
553
  /**
276
- * The data type of the dimension for the time series data point.
554
+ * The data type of the dimension for the time-series data point.
277
555
  */
278
556
  DimensionValueType?: DimensionValueType;
279
557
  }
558
+ export interface DimensionMapping {
559
+ /**
560
+ *
561
+ */
562
+ SourceColumn?: SchemaName;
563
+ /**
564
+ *
565
+ */
566
+ DestinationColumn?: SchemaName;
567
+ }
568
+ export type DimensionMappings = DimensionMapping[];
280
569
  export type DimensionValueType = "VARCHAR"|string;
281
570
  export type Dimensions = Dimension[];
282
571
  export interface Endpoint {
@@ -291,6 +580,30 @@ declare namespace TimestreamWrite {
291
580
  }
292
581
  export type Endpoints = Endpoint[];
293
582
  export type Integer = number;
583
+ export interface ListBatchLoadTasksRequest {
584
+ /**
585
+ * A token to specify where to start paginating. This is the NextToken from a previously truncated response.
586
+ */
587
+ NextToken?: String;
588
+ /**
589
+ * The total number of items to return in the output. If the total number of items available is more than the value specified, a NextToken is provided in the output. To resume pagination, provide the NextToken value as argument of a subsequent API invocation.
590
+ */
591
+ MaxResults?: PageLimit;
592
+ /**
593
+ * Status of the batch load task.
594
+ */
595
+ TaskStatus?: BatchLoadStatus;
596
+ }
597
+ export interface ListBatchLoadTasksResponse {
598
+ /**
599
+ * A token to specify where to start paginating. Provide the next ListBatchLoadTasksRequest.
600
+ */
601
+ NextToken?: String;
602
+ /**
603
+ * A list of batch load task details.
604
+ */
605
+ BatchLoadTasks?: BatchLoadTaskList;
606
+ }
294
607
  export interface ListDatabasesRequest {
295
608
  /**
296
609
  * The pagination token. To resume pagination, provide the NextToken value as argument of a subsequent API invocation.
@@ -367,25 +680,74 @@ declare namespace TimestreamWrite {
367
680
  }
368
681
  export interface MeasureValue {
369
682
  /**
370
- * Name of the MeasureValue. For constraints on MeasureValue names, refer to Naming Constraints in the Timestream developer guide.
683
+ * The name of the MeasureValue. For constraints on MeasureValue names, see Naming Constraints in the Amazon Timestream Developer Guide.
371
684
  */
372
685
  Name: SchemaName;
373
686
  /**
374
- * Value for the MeasureValue.
687
+ * The value for the MeasureValue.
375
688
  */
376
689
  Value: StringValue2048;
377
690
  /**
378
- * Contains the data type of the MeasureValue for the time series data point.
691
+ * Contains the data type of the MeasureValue for the time-series data point.
379
692
  */
380
693
  Type: MeasureValueType;
381
694
  }
382
695
  export type MeasureValueType = "DOUBLE"|"BIGINT"|"VARCHAR"|"BOOLEAN"|"TIMESTAMP"|"MULTI"|string;
383
696
  export type MeasureValues = MeasureValue[];
384
697
  export type MemoryStoreRetentionPeriodInHours = number;
698
+ export interface MixedMeasureMapping {
699
+ /**
700
+ *
701
+ */
702
+ MeasureName?: SchemaName;
703
+ /**
704
+ *
705
+ */
706
+ SourceColumn?: SchemaName;
707
+ /**
708
+ *
709
+ */
710
+ TargetMeasureName?: SchemaName;
711
+ /**
712
+ *
713
+ */
714
+ MeasureValueType: MeasureValueType;
715
+ /**
716
+ *
717
+ */
718
+ MultiMeasureAttributeMappings?: MultiMeasureAttributeMappingList;
719
+ }
720
+ export type MixedMeasureMappingList = MixedMeasureMapping[];
721
+ export interface MultiMeasureAttributeMapping {
722
+ /**
723
+ *
724
+ */
725
+ SourceColumn: SchemaName;
726
+ /**
727
+ *
728
+ */
729
+ TargetMultiMeasureAttributeName?: SchemaName;
730
+ /**
731
+ *
732
+ */
733
+ MeasureValueType?: ScalarMeasureValueType;
734
+ }
735
+ export type MultiMeasureAttributeMappingList = MultiMeasureAttributeMapping[];
736
+ export interface MultiMeasureMappings {
737
+ /**
738
+ *
739
+ */
740
+ TargetMultiMeasureName?: SchemaName;
741
+ /**
742
+ *
743
+ */
744
+ MultiMeasureAttributeMappings: MultiMeasureAttributeMappingList;
745
+ }
746
+ export type PageLimit = number;
385
747
  export type PaginationLimit = number;
386
748
  export interface Record {
387
749
  /**
388
- * Contains the list of dimensions for time series data points.
750
+ * Contains the list of dimensions for time-series data points.
389
751
  */
390
752
  Dimensions?: Dimensions;
391
753
  /**
@@ -393,11 +755,11 @@ declare namespace TimestreamWrite {
393
755
  */
394
756
  MeasureName?: SchemaName;
395
757
  /**
396
- * Contains the measure value for the time series data point.
758
+ * Contains the measure value for the time-series data point.
397
759
  */
398
760
  MeasureValue?: StringValue2048;
399
761
  /**
400
- * Contains the data type of the measure value for the time series data point. Default type is DOUBLE.
762
+ * Contains the data type of the measure value for the time-series data point. Default type is DOUBLE.
401
763
  */
402
764
  MeasureValueType?: MeasureValueType;
403
765
  /**
@@ -405,15 +767,15 @@ declare namespace TimestreamWrite {
405
767
  */
406
768
  Time?: StringValue256;
407
769
  /**
408
- * The granularity of the timestamp unit. It indicates if the time value is in seconds, milliseconds, nanoseconds or other supported values. Default is MILLISECONDS.
770
+ * The granularity of the timestamp unit. It indicates if the time value is in seconds, milliseconds, nanoseconds, or other supported values. Default is MILLISECONDS.
409
771
  */
410
772
  TimeUnit?: TimeUnit;
411
773
  /**
412
- * 64-bit attribute used for record updates. Write requests for duplicate data with a higher version number will update the existing measure value and version. In cases where the measure value is the same, Version will still be updated . Default value is 1. Version must be 1 or greater, or you will receive a ValidationException error.
774
+ * 64-bit attribute used for record updates. Write requests for duplicate data with a higher version number will update the existing measure value and version. In cases where the measure value is the same, Version will still be updated. Default value is 1. Version must be 1 or greater, or you will receive a ValidationException error.
413
775
  */
414
776
  Version?: RecordVersion;
415
777
  /**
416
- * Contains the list of MeasureValue for time series data points. This is only allowed for type MULTI. For scalar values, use MeasureValue attribute of the Record directly.
778
+ * Contains the list of MeasureValue for time-series data points. This is only allowed for type MULTI. For scalar values, use MeasureValue attribute of the record directly.
417
779
  */
418
780
  MeasureValues?: MeasureValues;
419
781
  }
@@ -433,8 +795,40 @@ declare namespace TimestreamWrite {
433
795
  */
434
796
  MagneticStore?: Integer;
435
797
  }
798
+ export interface ReportConfiguration {
799
+ /**
800
+ * Configuration of an S3 location to write error reports and events for a batch load.
801
+ */
802
+ ReportS3Configuration?: ReportS3Configuration;
803
+ }
804
+ export interface ReportS3Configuration {
805
+ /**
806
+ *
807
+ */
808
+ BucketName: S3BucketName;
809
+ /**
810
+ *
811
+ */
812
+ ObjectKeyPrefix?: S3ObjectKeyPrefix;
813
+ /**
814
+ *
815
+ */
816
+ EncryptionOption?: S3EncryptionOption;
817
+ /**
818
+ *
819
+ */
820
+ KmsKeyId?: StringValue2048;
821
+ }
436
822
  export type ResourceCreateAPIName = string;
437
823
  export type ResourceName = string;
824
+ export interface ResumeBatchLoadTaskRequest {
825
+ /**
826
+ * The ID of the batch load task to resume.
827
+ */
828
+ TaskId: BatchLoadTaskId;
829
+ }
830
+ export interface ResumeBatchLoadTaskResponse {
831
+ }
438
832
  export interface RetentionProperties {
439
833
  /**
440
834
  * The duration for which data must be stored in the memory store.
@@ -448,27 +842,30 @@ declare namespace TimestreamWrite {
448
842
  export type S3BucketName = string;
449
843
  export interface S3Configuration {
450
844
  /**
451
- * &gt;Bucket name of the customer S3 bucket.
845
+ * The bucket name of the customer S3 bucket.
452
846
  */
453
847
  BucketName?: S3BucketName;
454
848
  /**
455
- * Object key preview for the customer S3 location.
849
+ * The object key preview for the customer S3 location.
456
850
  */
457
851
  ObjectKeyPrefix?: S3ObjectKeyPrefix;
458
852
  /**
459
- * Encryption option for the customer s3 location. Options are S3 server side encryption with an S3-managed key or KMS managed key.
853
+ * The encryption option for the customer S3 location. Options are S3 server-side encryption with an S3 managed key or Amazon Web Services managed key.
460
854
  */
461
855
  EncryptionOption?: S3EncryptionOption;
462
856
  /**
463
- * KMS key id for the customer s3 location when encrypting with a KMS managed key.
857
+ * The KMS key ID for the customer S3 location when encrypting with an Amazon Web Services managed key.
464
858
  */
465
859
  KmsKeyId?: StringValue2048;
466
860
  }
467
861
  export type S3EncryptionOption = "SSE_S3"|"SSE_KMS"|string;
862
+ export type S3ObjectKey = string;
468
863
  export type S3ObjectKeyPrefix = string;
864
+ export type ScalarMeasureValueType = "DOUBLE"|"BIGINT"|"BOOLEAN"|"VARCHAR"|"TIMESTAMP"|string;
469
865
  export type SchemaName = string;
470
866
  export type SchemaValue = string;
471
867
  export type String = string;
868
+ export type StringValue1 = string;
472
869
  export type StringValue2048 = string;
473
870
  export type StringValue256 = string;
474
871
  export interface Table {
@@ -506,7 +903,7 @@ declare namespace TimestreamWrite {
506
903
  MagneticStoreWriteProperties?: MagneticStoreWriteProperties;
507
904
  }
508
905
  export type TableList = Table[];
509
- export type TableStatus = "ACTIVE"|"DELETING"|string;
906
+ export type TableStatus = "ACTIVE"|"DELETING"|"RESTORING"|string;
510
907
  export interface Tag {
511
908
  /**
512
909
  * The key of the tag. Tag keys are case sensitive.
@@ -593,11 +990,11 @@ declare namespace TimestreamWrite {
593
990
  */
594
991
  TableName: ResourceName;
595
992
  /**
596
- * A record containing the common measure, dimension, time, and version attributes shared across all the records in the request. The measure and dimension attributes specified will be merged with the measure and dimension attributes in the records object when the data is written into Timestream. Dimensions may not overlap, or a ValidationException will be thrown. In other words, a record must contain dimensions with unique names.
993
+ * A record that contains the common measure, dimension, time, and version attributes shared across all the records in the request. The measure and dimension attributes specified will be merged with the measure and dimension attributes in the records object when the data is written into Timestream. Dimensions may not overlap, or a ValidationException will be thrown. In other words, a record must contain dimensions with unique names.
597
994
  */
598
995
  CommonAttributes?: Record;
599
996
  /**
600
- * An array of records containing the unique measure, dimension, time, and version attributes for each time series data point.
997
+ * An array of records that contain the unique measure, dimension, time, and version attributes for each time-series data point.
601
998
  */
602
999
  Records: Records;
603
1000
  }