aws-sdk-timestreamwrite 1.16.0 → 1.17.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 533d244536b4849c1f70f990ebcb131d14ffba6bcfb5be44a0ee4b6af4033c05
4
- data.tar.gz: d8a3a9565744e2fbb86629c5be97bf9a88f150cf57ef2c4ee3648c1b566d87d8
3
+ metadata.gz: 5c2c6cfd35217c1d3d2b03c5524452d8acf0f432fe322fde8002bf4037d4bd7f
4
+ data.tar.gz: 30b852d39cd4bc76b5c950fba1b0585768802d7167d45ce1a534398727bf7325
5
5
  SHA512:
6
- metadata.gz: 6565ada6da2f67162fac839376bde5d1588e93b4ab2919e3b126382be143a2b3b7b453b8fceda3a3209698befee5845c51ccb35feb0dd8c389ee95ae9522f310
7
- data.tar.gz: c3316d33266829587cdb5c0b8e5196f28003ca198902301fb30664ba62bddca37229e8cd6b2469395982c98bb5ed3fc952baa082656dd99b8ca2ddcd643f3e2d
6
+ metadata.gz: 75d2a9c987e54b7620cade87544d24a029311ed39695e1d31af1740bacc0970f3079bfa421c9eae26dd109ea5a89c8c3fe7e6a13eaa888e1a2da524574341484
7
+ data.tar.gz: 75bcefa689a228551a597f85fc4437346b1a7ae64ea6aadf8ecb1618e9603098a59452544617188671ded47fdcf8cf2ddf446e8e84b766b21ae7afa60853fb23
data/CHANGELOG.md CHANGED
@@ -1,6 +1,11 @@
1
1
  Unreleased Changes
2
2
  ------------------
3
3
 
4
+ 1.17.0 (2023-02-27)
5
+ ------------------
6
+
7
+ * Feature - This release adds the ability to ingest batched historical data or migrate data in bulk from S3 into Timestream using CSV files.
8
+
4
9
  1.16.0 (2023-01-18)
5
10
  ------------------
6
11
 
data/VERSION CHANGED
@@ -1 +1 @@
1
- 1.16.0
1
+ 1.17.0
@@ -378,11 +378,139 @@ module Aws::TimestreamWrite
378
378
 
379
379
  # @!group API Operations
380
380
 
381
+ # Creates a new Timestream batch load task. A batch load task processes
382
+ # data from a CSV source in an S3 location and writes to a Timestream
383
+ # table. A mapping from source to target is defined in a batch load
384
+ # task. Errors and events are written to a report at an S3 location. For
385
+ # the report, if the KMS key is not specified, the batch load task will
386
+ # be encrypted with a Timestream managed KMS key located in your
387
+ # account. For more information, see [Amazon Web Services managed
388
+ # keys][1]. [Service quotas apply][2]. For details, see [code
389
+ # sample][3].
390
+ #
391
+ #
392
+ #
393
+ # [1]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#aws-managed-cmk
394
+ # [2]: https://docs.aws.amazon.com/timestream/latest/developerguide/ts-limits.html
395
+ # [3]: https://docs.aws.amazon.com/timestream/latest/developerguide/code-samples.create-batch-load.html
396
+ #
397
+ # @option params [String] :client_token
398
+ # **A suitable default value is auto-generated.** You should normally
399
+ # not need to pass this option.**
400
+ #
401
+ # @option params [Types::DataModelConfiguration] :data_model_configuration
402
+ #
403
+ # @option params [required, Types::DataSourceConfiguration] :data_source_configuration
404
+ # Defines configuration details about the data source for a batch load
405
+ # task.
406
+ #
407
+ # @option params [required, Types::ReportConfiguration] :report_configuration
408
+ # Report configuration for a batch load task. This contains details
409
+ # about where error reports are stored.
410
+ #
411
+ # @option params [required, String] :target_database_name
412
+ # Target Timestream database for a batch load task.
413
+ #
414
+ # @option params [required, String] :target_table_name
415
+ # Target Timestream table for a batch load task.
416
+ #
417
+ # @option params [Integer] :record_version
418
+ #
419
+ # @return [Types::CreateBatchLoadTaskResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
420
+ #
421
+ # * {Types::CreateBatchLoadTaskResponse#task_id #task_id} => String
422
+ #
423
+ # @example Request syntax with placeholder values
424
+ #
425
+ # resp = client.create_batch_load_task({
426
+ # client_token: "ClientRequestToken",
427
+ # data_model_configuration: {
428
+ # data_model: {
429
+ # time_column: "StringValue256",
430
+ # time_unit: "MILLISECONDS", # accepts MILLISECONDS, SECONDS, MICROSECONDS, NANOSECONDS
431
+ # dimension_mappings: [ # required
432
+ # {
433
+ # source_column: "SchemaName",
434
+ # destination_column: "SchemaName",
435
+ # },
436
+ # ],
437
+ # multi_measure_mappings: {
438
+ # target_multi_measure_name: "SchemaName",
439
+ # multi_measure_attribute_mappings: [ # required
440
+ # {
441
+ # source_column: "SchemaName", # required
442
+ # target_multi_measure_attribute_name: "SchemaName",
443
+ # measure_value_type: "DOUBLE", # accepts DOUBLE, BIGINT, BOOLEAN, VARCHAR, TIMESTAMP
444
+ # },
445
+ # ],
446
+ # },
447
+ # mixed_measure_mappings: [
448
+ # {
449
+ # measure_name: "SchemaName",
450
+ # source_column: "SchemaName",
451
+ # target_measure_name: "SchemaName",
452
+ # measure_value_type: "DOUBLE", # required, accepts DOUBLE, BIGINT, VARCHAR, BOOLEAN, TIMESTAMP, MULTI
453
+ # multi_measure_attribute_mappings: [
454
+ # {
455
+ # source_column: "SchemaName", # required
456
+ # target_multi_measure_attribute_name: "SchemaName",
457
+ # measure_value_type: "DOUBLE", # accepts DOUBLE, BIGINT, BOOLEAN, VARCHAR, TIMESTAMP
458
+ # },
459
+ # ],
460
+ # },
461
+ # ],
462
+ # measure_name_column: "StringValue256",
463
+ # },
464
+ # data_model_s3_configuration: {
465
+ # bucket_name: "S3BucketName",
466
+ # object_key: "S3ObjectKey",
467
+ # },
468
+ # },
469
+ # data_source_configuration: { # required
470
+ # data_source_s3_configuration: { # required
471
+ # bucket_name: "S3BucketName", # required
472
+ # object_key_prefix: "S3ObjectKey",
473
+ # },
474
+ # csv_configuration: {
475
+ # column_separator: "StringValue1",
476
+ # escape_char: "StringValue1",
477
+ # quote_char: "StringValue1",
478
+ # null_value: "StringValue256",
479
+ # trim_white_space: false,
480
+ # },
481
+ # data_format: "CSV", # required, accepts CSV
482
+ # },
483
+ # report_configuration: { # required
484
+ # report_s3_configuration: {
485
+ # bucket_name: "S3BucketName", # required
486
+ # object_key_prefix: "S3ObjectKeyPrefix",
487
+ # encryption_option: "SSE_S3", # accepts SSE_S3, SSE_KMS
488
+ # kms_key_id: "StringValue2048",
489
+ # },
490
+ # },
491
+ # target_database_name: "ResourceCreateAPIName", # required
492
+ # target_table_name: "ResourceCreateAPIName", # required
493
+ # record_version: 1,
494
+ # })
495
+ #
496
+ # @example Response structure
497
+ #
498
+ # resp.task_id #=> String
499
+ #
500
+ # @see http://docs.aws.amazon.com/goto/WebAPI/timestream-write-2018-11-01/CreateBatchLoadTask AWS API Documentation
501
+ #
502
+ # @overload create_batch_load_task(params = {})
503
+ # @param [Hash] params ({})
504
+ def create_batch_load_task(params = {}, options = {})
505
+ req = build_request(:create_batch_load_task, params)
506
+ req.send_request(options)
507
+ end
508
+
381
509
  # Creates a new Timestream database. If the KMS key is not specified,
382
510
  # the database will be encrypted with a Timestream managed KMS key
383
- # located in your account. Refer to [Amazon Web Services managed KMS
384
- # keys][1] for more info. [Service quotas apply][2]. See [code
385
- # sample][3] for details.
511
+ # located in your account. For more information, see [Amazon Web
512
+ # Services managed keys][1]. [Service quotas apply][2]. For details, see
513
+ # [code sample][3].
386
514
  #
387
515
  #
388
516
  #
@@ -396,8 +524,8 @@ module Aws::TimestreamWrite
396
524
  # @option params [String] :kms_key_id
397
525
  # The KMS key for the database. If the KMS key is not specified, the
398
526
  # database will be encrypted with a Timestream managed KMS key located
399
- # in your account. Refer to [Amazon Web Services managed KMS keys][1]
400
- # for more info.
527
+ # in your account. For more information, see [Amazon Web Services
528
+ # managed keys][1].
401
529
  #
402
530
  #
403
531
  #
@@ -441,13 +569,13 @@ module Aws::TimestreamWrite
441
569
  req.send_request(options)
442
570
  end
443
571
 
444
- # The CreateTable operation adds a new table to an existing database in
445
- # your account. In an Amazon Web Services account, table names must be
446
- # at least unique within each Region if they are in the same database.
447
- # You may have identical table names in the same Region if the tables
448
- # are in separate databases. While creating the table, you must specify
449
- # the table name, database name, and the retention properties. [Service
450
- # quotas apply][1]. See [code sample][2] for details.
572
+ # Adds a new table to an existing database in your account. In an Amazon
573
+ # Web Services account, table names must be at least unique within each
574
+ # Region if they are in the same database. You might have identical
575
+ # table names in the same Region if the tables are in separate
576
+ # databases. While creating the table, you must specify the table name,
577
+ # database name, and the retention properties. [Service quotas
578
+ # apply][1]. See [code sample][2] for details.
451
579
  #
452
580
  #
453
581
  #
@@ -461,7 +589,7 @@ module Aws::TimestreamWrite
461
589
  # The name of the Timestream table.
462
590
  #
463
591
  # @option params [Types::RetentionProperties] :retention_properties
464
- # The duration for which your time series data must be stored in the
592
+ # The duration for which your time-series data must be stored in the
465
593
  # memory store and the magnetic store.
466
594
  #
467
595
  # @option params [Array<Types::Tag>] :tags
@@ -508,7 +636,7 @@ module Aws::TimestreamWrite
508
636
  # resp.table.arn #=> String
509
637
  # resp.table.table_name #=> String
510
638
  # resp.table.database_name #=> String
511
- # resp.table.table_status #=> String, one of "ACTIVE", "DELETING"
639
+ # resp.table.table_status #=> String, one of "ACTIVE", "DELETING", "RESTORING"
512
640
  # resp.table.retention_properties.memory_store_retention_period_in_hours #=> Integer
513
641
  # resp.table.retention_properties.magnetic_store_retention_period_in_days #=> Integer
514
642
  # resp.table.creation_time #=> Time
@@ -529,7 +657,7 @@ module Aws::TimestreamWrite
529
657
  end
530
658
 
531
659
  # Deletes a given Timestream database. *This is an irreversible
532
- # operation. After a database is deleted, the time series data from its
660
+ # operation. After a database is deleted, the time-series data from its
533
661
  # tables cannot be recovered.*
534
662
  #
535
663
  # <note markdown="1"> All tables in the database must be deleted first, or a
@@ -568,7 +696,7 @@ module Aws::TimestreamWrite
568
696
  end
569
697
 
570
698
  # Deletes a given Timestream table. This is an irreversible operation.
571
- # After a Timestream database table is deleted, the time series data
699
+ # After a Timestream database table is deleted, the time-series data
572
700
  # stored in the table cannot be recovered.
573
701
  #
574
702
  # <note markdown="1"> Due to the nature of distributed retries, the operation can return
@@ -608,6 +736,89 @@ module Aws::TimestreamWrite
608
736
  req.send_request(options)
609
737
  end
610
738
 
739
+ # Returns information about the batch load task, including
740
+ # configurations, mappings, progress, and other details. [Service quotas
741
+ # apply][1]. See [code sample][2] for details.
742
+ #
743
+ #
744
+ #
745
+ # [1]: https://docs.aws.amazon.com/timestream/latest/developerguide/ts-limits.html
746
+ # [2]: https://docs.aws.amazon.com/timestream/latest/developerguide/code-samples.describe-batch-load.html
747
+ #
748
+ # @option params [required, String] :task_id
749
+ # The ID of the batch load task.
750
+ #
751
+ # @return [Types::DescribeBatchLoadTaskResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
752
+ #
753
+ # * {Types::DescribeBatchLoadTaskResponse#batch_load_task_description #batch_load_task_description} => Types::BatchLoadTaskDescription
754
+ #
755
+ # @example Request syntax with placeholder values
756
+ #
757
+ # resp = client.describe_batch_load_task({
758
+ # task_id: "BatchLoadTaskId", # required
759
+ # })
760
+ #
761
+ # @example Response structure
762
+ #
763
+ # resp.batch_load_task_description.task_id #=> String
764
+ # resp.batch_load_task_description.error_message #=> String
765
+ # resp.batch_load_task_description.data_source_configuration.data_source_s3_configuration.bucket_name #=> String
766
+ # resp.batch_load_task_description.data_source_configuration.data_source_s3_configuration.object_key_prefix #=> String
767
+ # resp.batch_load_task_description.data_source_configuration.csv_configuration.column_separator #=> String
768
+ # resp.batch_load_task_description.data_source_configuration.csv_configuration.escape_char #=> String
769
+ # resp.batch_load_task_description.data_source_configuration.csv_configuration.quote_char #=> String
770
+ # resp.batch_load_task_description.data_source_configuration.csv_configuration.null_value #=> String
771
+ # resp.batch_load_task_description.data_source_configuration.csv_configuration.trim_white_space #=> Boolean
772
+ # resp.batch_load_task_description.data_source_configuration.data_format #=> String, one of "CSV"
773
+ # resp.batch_load_task_description.progress_report.records_processed #=> Integer
774
+ # resp.batch_load_task_description.progress_report.records_ingested #=> Integer
775
+ # resp.batch_load_task_description.progress_report.parse_failures #=> Integer
776
+ # resp.batch_load_task_description.progress_report.record_ingestion_failures #=> Integer
777
+ # resp.batch_load_task_description.progress_report.file_failures #=> Integer
778
+ # resp.batch_load_task_description.progress_report.bytes_metered #=> Integer
779
+ # resp.batch_load_task_description.report_configuration.report_s3_configuration.bucket_name #=> String
780
+ # resp.batch_load_task_description.report_configuration.report_s3_configuration.object_key_prefix #=> String
781
+ # resp.batch_load_task_description.report_configuration.report_s3_configuration.encryption_option #=> String, one of "SSE_S3", "SSE_KMS"
782
+ # resp.batch_load_task_description.report_configuration.report_s3_configuration.kms_key_id #=> String
783
+ # resp.batch_load_task_description.data_model_configuration.data_model.time_column #=> String
784
+ # resp.batch_load_task_description.data_model_configuration.data_model.time_unit #=> String, one of "MILLISECONDS", "SECONDS", "MICROSECONDS", "NANOSECONDS"
785
+ # resp.batch_load_task_description.data_model_configuration.data_model.dimension_mappings #=> Array
786
+ # resp.batch_load_task_description.data_model_configuration.data_model.dimension_mappings[0].source_column #=> String
787
+ # resp.batch_load_task_description.data_model_configuration.data_model.dimension_mappings[0].destination_column #=> String
788
+ # resp.batch_load_task_description.data_model_configuration.data_model.multi_measure_mappings.target_multi_measure_name #=> String
789
+ # resp.batch_load_task_description.data_model_configuration.data_model.multi_measure_mappings.multi_measure_attribute_mappings #=> Array
790
+ # resp.batch_load_task_description.data_model_configuration.data_model.multi_measure_mappings.multi_measure_attribute_mappings[0].source_column #=> String
791
+ # resp.batch_load_task_description.data_model_configuration.data_model.multi_measure_mappings.multi_measure_attribute_mappings[0].target_multi_measure_attribute_name #=> String
792
+ # resp.batch_load_task_description.data_model_configuration.data_model.multi_measure_mappings.multi_measure_attribute_mappings[0].measure_value_type #=> String, one of "DOUBLE", "BIGINT", "BOOLEAN", "VARCHAR", "TIMESTAMP"
793
+ # resp.batch_load_task_description.data_model_configuration.data_model.mixed_measure_mappings #=> Array
794
+ # resp.batch_load_task_description.data_model_configuration.data_model.mixed_measure_mappings[0].measure_name #=> String
795
+ # resp.batch_load_task_description.data_model_configuration.data_model.mixed_measure_mappings[0].source_column #=> String
796
+ # resp.batch_load_task_description.data_model_configuration.data_model.mixed_measure_mappings[0].target_measure_name #=> String
797
+ # resp.batch_load_task_description.data_model_configuration.data_model.mixed_measure_mappings[0].measure_value_type #=> String, one of "DOUBLE", "BIGINT", "VARCHAR", "BOOLEAN", "TIMESTAMP", "MULTI"
798
+ # resp.batch_load_task_description.data_model_configuration.data_model.mixed_measure_mappings[0].multi_measure_attribute_mappings #=> Array
799
+ # resp.batch_load_task_description.data_model_configuration.data_model.mixed_measure_mappings[0].multi_measure_attribute_mappings[0].source_column #=> String
800
+ # resp.batch_load_task_description.data_model_configuration.data_model.mixed_measure_mappings[0].multi_measure_attribute_mappings[0].target_multi_measure_attribute_name #=> String
801
+ # resp.batch_load_task_description.data_model_configuration.data_model.mixed_measure_mappings[0].multi_measure_attribute_mappings[0].measure_value_type #=> String, one of "DOUBLE", "BIGINT", "BOOLEAN", "VARCHAR", "TIMESTAMP"
802
+ # resp.batch_load_task_description.data_model_configuration.data_model.measure_name_column #=> String
803
+ # resp.batch_load_task_description.data_model_configuration.data_model_s3_configuration.bucket_name #=> String
804
+ # resp.batch_load_task_description.data_model_configuration.data_model_s3_configuration.object_key #=> String
805
+ # resp.batch_load_task_description.target_database_name #=> String
806
+ # resp.batch_load_task_description.target_table_name #=> String
807
+ # resp.batch_load_task_description.task_status #=> String, one of "CREATED", "IN_PROGRESS", "FAILED", "SUCCEEDED", "PROGRESS_STOPPED", "PENDING_RESUME"
808
+ # resp.batch_load_task_description.record_version #=> Integer
809
+ # resp.batch_load_task_description.creation_time #=> Time
810
+ # resp.batch_load_task_description.last_updated_time #=> Time
811
+ # resp.batch_load_task_description.resumable_until #=> Time
812
+ #
813
+ # @see http://docs.aws.amazon.com/goto/WebAPI/timestream-write-2018-11-01/DescribeBatchLoadTask AWS API Documentation
814
+ #
815
+ # @overload describe_batch_load_task(params = {})
816
+ # @param [Hash] params ({})
817
+ def describe_batch_load_task(params = {}, options = {})
818
+ req = build_request(:describe_batch_load_task, params)
819
+ req.send_request(options)
820
+ end
821
+
611
822
  # Returns information about the database, including the database name,
612
823
  # time that the database was created, and the total number of tables
613
824
  # found within the database. [Service quotas apply][1]. See [code
@@ -649,14 +860,14 @@ module Aws::TimestreamWrite
649
860
  req.send_request(options)
650
861
  end
651
862
 
652
- # DescribeEndpoints returns a list of available endpoints to make
653
- # Timestream API calls against. This API is available through both Write
654
- # and Query.
863
+ # Returns a list of available endpoints to make Timestream API calls
864
+ # against. This API operation is available through both the Write and
865
+ # Query APIs.
655
866
  #
656
867
  # Because the Timestream SDKs are designed to transparently work with
657
868
  # the service’s architecture, including the management and mapping of
658
- # the service endpoints, *it is not recommended that you use this API
659
- # unless*\:
869
+ # the service endpoints, *we don't recommend that you use this API
870
+ # operation unless*\:
660
871
  #
661
872
  # * You are using [VPC endpoints (Amazon Web Services PrivateLink) with
662
873
  # Timestream][1]
@@ -724,7 +935,7 @@ module Aws::TimestreamWrite
724
935
  # resp.table.arn #=> String
725
936
  # resp.table.table_name #=> String
726
937
  # resp.table.database_name #=> String
727
- # resp.table.table_status #=> String, one of "ACTIVE", "DELETING"
938
+ # resp.table.table_status #=> String, one of "ACTIVE", "DELETING", "RESTORING"
728
939
  # resp.table.retention_properties.memory_store_retention_period_in_hours #=> Integer
729
940
  # resp.table.retention_properties.magnetic_store_retention_period_in_days #=> Integer
730
941
  # resp.table.creation_time #=> Time
@@ -744,6 +955,63 @@ module Aws::TimestreamWrite
744
955
  req.send_request(options)
745
956
  end
746
957
 
958
+ # Provides a list of batch load tasks, along with the name, status, when
959
+ # the task is resumable until, and other details. See [code sample][1]
960
+ # for details.
961
+ #
962
+ #
963
+ #
964
+ # [1]: https://docs.aws.amazon.com/timestream/latest/developerguide/code-samples.list-batch-load-tasks.html
965
+ #
966
+ # @option params [String] :next_token
967
+ # A token to specify where to start paginating. This is the NextToken
968
+ # from a previously truncated response.
969
+ #
970
+ # @option params [Integer] :max_results
971
+ # The total number of items to return in the output. If the total number
972
+ # of items available is more than the value specified, a NextToken is
973
+ # provided in the output. To resume pagination, provide the NextToken
974
+ # value as argument of a subsequent API invocation.
975
+ #
976
+ # @option params [String] :task_status
977
+ # Status of the batch load task.
978
+ #
979
+ # @return [Types::ListBatchLoadTasksResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
980
+ #
981
+ # * {Types::ListBatchLoadTasksResponse#next_token #next_token} => String
982
+ # * {Types::ListBatchLoadTasksResponse#batch_load_tasks #batch_load_tasks} => Array&lt;Types::BatchLoadTask&gt;
983
+ #
984
+ # The returned {Seahorse::Client::Response response} is a pageable response and is Enumerable. For details on usage see {Aws::PageableResponse PageableResponse}.
985
+ #
986
+ # @example Request syntax with placeholder values
987
+ #
988
+ # resp = client.list_batch_load_tasks({
989
+ # next_token: "String",
990
+ # max_results: 1,
991
+ # task_status: "CREATED", # accepts CREATED, IN_PROGRESS, FAILED, SUCCEEDED, PROGRESS_STOPPED, PENDING_RESUME
992
+ # })
993
+ #
994
+ # @example Response structure
995
+ #
996
+ # resp.next_token #=> String
997
+ # resp.batch_load_tasks #=> Array
998
+ # resp.batch_load_tasks[0].task_id #=> String
999
+ # resp.batch_load_tasks[0].task_status #=> String, one of "CREATED", "IN_PROGRESS", "FAILED", "SUCCEEDED", "PROGRESS_STOPPED", "PENDING_RESUME"
1000
+ # resp.batch_load_tasks[0].database_name #=> String
1001
+ # resp.batch_load_tasks[0].table_name #=> String
1002
+ # resp.batch_load_tasks[0].creation_time #=> Time
1003
+ # resp.batch_load_tasks[0].last_updated_time #=> Time
1004
+ # resp.batch_load_tasks[0].resumable_until #=> Time
1005
+ #
1006
+ # @see http://docs.aws.amazon.com/goto/WebAPI/timestream-write-2018-11-01/ListBatchLoadTasks AWS API Documentation
1007
+ #
1008
+ # @overload list_batch_load_tasks(params = {})
1009
+ # @param [Hash] params ({})
1010
+ def list_batch_load_tasks(params = {}, options = {})
1011
+ req = build_request(:list_batch_load_tasks, params)
1012
+ req.send_request(options)
1013
+ end
1014
+
747
1015
  # Returns a list of your Timestream databases. [Service quotas
748
1016
  # apply][1]. See [code sample][2] for details.
749
1017
  #
@@ -796,8 +1064,8 @@ module Aws::TimestreamWrite
796
1064
  req.send_request(options)
797
1065
  end
798
1066
 
799
- # A list of tables, along with the name, status and retention properties
800
- # of each table. See [code sample][1] for details.
1067
+ # Provides a list of tables, along with the name, status, and retention
1068
+ # properties of each table. See [code sample][1] for details.
801
1069
  #
802
1070
  #
803
1071
  #
@@ -837,7 +1105,7 @@ module Aws::TimestreamWrite
837
1105
  # resp.tables[0].arn #=> String
838
1106
  # resp.tables[0].table_name #=> String
839
1107
  # resp.tables[0].database_name #=> String
840
- # resp.tables[0].table_status #=> String, one of "ACTIVE", "DELETING"
1108
+ # resp.tables[0].table_status #=> String, one of "ACTIVE", "DELETING", "RESTORING"
841
1109
  # resp.tables[0].retention_properties.memory_store_retention_period_in_hours #=> Integer
842
1110
  # resp.tables[0].retention_properties.magnetic_store_retention_period_in_days #=> Integer
843
1111
  # resp.tables[0].creation_time #=> Time
@@ -858,7 +1126,7 @@ module Aws::TimestreamWrite
858
1126
  req.send_request(options)
859
1127
  end
860
1128
 
861
- # List all tags on a Timestream resource.
1129
+ # Lists all tags on a Timestream resource.
862
1130
  #
863
1131
  # @option params [required, String] :resource_arn
864
1132
  # The Timestream resource with tags to be listed. This value is an
@@ -889,7 +1157,27 @@ module Aws::TimestreamWrite
889
1157
  req.send_request(options)
890
1158
  end
891
1159
 
892
- # Associate a set of tags with a Timestream resource. You can then
1160
+ # @option params [required, String] :task_id
1161
+ # The ID of the batch load task to resume.
1162
+ #
1163
+ # @return [Struct] Returns an empty {Seahorse::Client::Response response}.
1164
+ #
1165
+ # @example Request syntax with placeholder values
1166
+ #
1167
+ # resp = client.resume_batch_load_task({
1168
+ # task_id: "BatchLoadTaskId", # required
1169
+ # })
1170
+ #
1171
+ # @see http://docs.aws.amazon.com/goto/WebAPI/timestream-write-2018-11-01/ResumeBatchLoadTask AWS API Documentation
1172
+ #
1173
+ # @overload resume_batch_load_task(params = {})
1174
+ # @param [Hash] params ({})
1175
+ def resume_batch_load_task(params = {}, options = {})
1176
+ req = build_request(:resume_batch_load_task, params)
1177
+ req.send_request(options)
1178
+ end
1179
+
1180
+ # Associates a set of tags with a Timestream resource. You can then
893
1181
  # activate these user-defined tags so that they appear on the Billing
894
1182
  # and Cost Management console for cost allocation tracking.
895
1183
  #
@@ -1070,7 +1358,7 @@ module Aws::TimestreamWrite
1070
1358
  # resp.table.arn #=> String
1071
1359
  # resp.table.table_name #=> String
1072
1360
  # resp.table.database_name #=> String
1073
- # resp.table.table_status #=> String, one of "ACTIVE", "DELETING"
1361
+ # resp.table.table_status #=> String, one of "ACTIVE", "DELETING", "RESTORING"
1074
1362
  # resp.table.retention_properties.memory_store_retention_period_in_hours #=> Integer
1075
1363
  # resp.table.retention_properties.magnetic_store_retention_period_in_days #=> Integer
1076
1364
  # resp.table.creation_time #=> Time
@@ -1090,13 +1378,14 @@ module Aws::TimestreamWrite
1090
1378
  req.send_request(options)
1091
1379
  end
1092
1380
 
1093
- # The WriteRecords operation enables you to write your time series data
1094
- # into Timestream. You can specify a single data point or a batch of
1095
- # data points to be inserted into the system. Timestream offers you with
1096
- # a flexible schema that auto detects the column names and data types
1097
- # for your Timestream tables based on the dimension names and data types
1098
- # of the data points you specify when invoking writes into the database.
1099
- # Timestream support eventual consistency read semantics. This means
1381
+ # Enables you to write your time-series data into Timestream. You can
1382
+ # specify a single data point or a batch of data points to be inserted
1383
+ # into the system. Timestream offers you a flexible schema that auto
1384
+ # detects the column names and data types for your Timestream tables
1385
+ # based on the dimension names and data types of the data points you
1386
+ # specify when invoking writes into the database.
1387
+ #
1388
+ # Timestream supports eventual consistency read semantics. This means
1100
1389
  # that when you query data immediately after writing a batch of data
1101
1390
  # into Timestream, the query results might not reflect the results of a
1102
1391
  # recently completed write operation. The results may also include some
@@ -1109,30 +1398,32 @@ module Aws::TimestreamWrite
1109
1398
  #
1110
1399
  # You can use the `Version` parameter in a `WriteRecords` request to
1111
1400
  # update data points. Timestream tracks a version number with each
1112
- # record. `Version` defaults to `1` when not specified for the record in
1113
- # the request. Timestream will update an existing record’s measure value
1114
- # along with its `Version` upon receiving a write request with a higher
1115
- # `Version` number for that record. Upon receiving an update request
1116
- # where the measure value is the same as that of the existing record,
1117
- # Timestream still updates `Version`, if it is greater than the existing
1118
- # value of `Version`. You can update a data point as many times as
1119
- # desired, as long as the value of `Version` continuously increases.
1401
+ # record. `Version` defaults to `1` when it's not specified for the
1402
+ # record in the request. Timestream updates an existing record’s measure
1403
+ # value along with its `Version` when it receives a write request with a
1404
+ # higher `Version` number for that record. When it receives an update
1405
+ # request where the measure value is the same as that of the existing
1406
+ # record, Timestream still updates `Version`, if it is greater than the
1407
+ # existing value of `Version`. You can update a data point as many times
1408
+ # as desired, as long as the value of `Version` continuously increases.
1120
1409
  #
1121
1410
  # For example, suppose you write a new record without indicating
1122
- # `Version` in the request. Timestream will store this record, and set
1411
+ # `Version` in the request. Timestream stores this record, and set
1123
1412
  # `Version` to `1`. Now, suppose you try to update this record with a
1124
1413
  # `WriteRecords` request of the same record with a different measure
1125
1414
  # value but, like before, do not provide `Version`. In this case,
1126
1415
  # Timestream will reject this update with a `RejectedRecordsException`
1127
1416
  # since the updated record’s version is not greater than the existing
1128
- # value of Version. However, if you were to resend the update request
1129
- # with `Version` set to `2`, Timestream would then succeed in updating
1130
- # the record’s value, and the `Version` would be set to `2`. Next,
1131
- # suppose you sent a `WriteRecords` request with this same record and an
1132
- # identical measure value, but with `Version` set to `3`. In this case,
1133
- # Timestream would only update `Version` to `3`. Any further updates
1134
- # would need to send a version number greater than `3`, or the update
1135
- # requests would receive a `RejectedRecordsException`.
1417
+ # value of Version.
1418
+ #
1419
+ # However, if you were to resend the update request with `Version` set
1420
+ # to `2`, Timestream would then succeed in updating the record’s value,
1421
+ # and the `Version` would be set to `2`. Next, suppose you sent a
1422
+ # `WriteRecords` request with this same record and an identical measure
1423
+ # value, but with `Version` set to `3`. In this case, Timestream would
1424
+ # only update `Version` to `3`. Any further updates would need to send a
1425
+ # version number greater than `3`, or the update requests would receive
1426
+ # a `RejectedRecordsException`.
1136
1427
  #
1137
1428
  #
1138
1429
  #
@@ -1146,17 +1437,17 @@ module Aws::TimestreamWrite
1146
1437
  # The name of the Timestream table.
1147
1438
  #
1148
1439
  # @option params [Types::Record] :common_attributes
1149
- # A record containing the common measure, dimension, time, and version
1150
- # attributes shared across all the records in the request. The measure
1151
- # and dimension attributes specified will be merged with the measure and
1152
- # dimension attributes in the records object when the data is written
1153
- # into Timestream. Dimensions may not overlap, or a
1440
+ # A record that contains the common measure, dimension, time, and
1441
+ # version attributes shared across all the records in the request. The
1442
+ # measure and dimension attributes specified will be merged with the
1443
+ # measure and dimension attributes in the records object when the data
1444
+ # is written into Timestream. Dimensions may not overlap, or a
1154
1445
  # `ValidationException` will be thrown. In other words, a record must
1155
1446
  # contain dimensions with unique names.
1156
1447
  #
1157
1448
  # @option params [required, Array<Types::Record>] :records
1158
- # An array of records containing the unique measure, dimension, time,
1159
- # and version attributes for each time series data point.
1449
+ # An array of records that contain the unique measure, dimension, time,
1450
+ # and version attributes for each time-series data point.
1160
1451
  #
1161
1452
  # @return [Types::WriteRecordsResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
1162
1453
  #
@@ -1243,7 +1534,7 @@ module Aws::TimestreamWrite
1243
1534
  params: params,
1244
1535
  config: config)
1245
1536
  context[:gem_name] = 'aws-sdk-timestreamwrite'
1246
- context[:gem_version] = '1.16.0'
1537
+ context[:gem_version] = '1.17.0'
1247
1538
  Seahorse::Client::Request.new(handlers, context)
1248
1539
  end
1249
1540