aws-sdk-timestreamwrite 1.16.0 → 1.18.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 533d244536b4849c1f70f990ebcb131d14ffba6bcfb5be44a0ee4b6af4033c05
4
- data.tar.gz: d8a3a9565744e2fbb86629c5be97bf9a88f150cf57ef2c4ee3648c1b566d87d8
3
+ metadata.gz: 9d57348275f669a453e5d38ebbb00d35a193ac6eeb16eef0e1b6a5767b48f6d3
4
+ data.tar.gz: 109d4eccd752b54904034877b103ac43d033a145711ecb9e34a56d1130bfc0ed
5
5
  SHA512:
6
- metadata.gz: 6565ada6da2f67162fac839376bde5d1588e93b4ab2919e3b126382be143a2b3b7b453b8fceda3a3209698befee5845c51ccb35feb0dd8c389ee95ae9522f310
7
- data.tar.gz: c3316d33266829587cdb5c0b8e5196f28003ca198902301fb30664ba62bddca37229e8cd6b2469395982c98bb5ed3fc952baa082656dd99b8ca2ddcd643f3e2d
6
+ metadata.gz: cfff79262cafdd35bdf2dcbbab23ddc82f06ce73f83689b1e4705879af39301ff841cc0c91b504dfedc858db44bc71ed125343812c03ceb51656782544d308e3
7
+ data.tar.gz: b04283f2e444ca0b9f3b86e5919365de31c01c1f2c86e5034030895e55beeb8caf6b67cd84cbcb742c0e851fdfba88dd09a1c9f69bafedb5a299cea8d0b87f67
data/CHANGELOG.md CHANGED
@@ -1,6 +1,16 @@
1
1
  Unreleased Changes
2
2
  ------------------
3
3
 
4
+ 1.18.0 (2023-05-31)
5
+ ------------------
6
+
7
+ * Feature - Code Generated Changes, see `./build_tools` or `aws-sdk-core`'s CHANGELOG.md for details.
8
+
9
+ 1.17.0 (2023-02-27)
10
+ ------------------
11
+
12
+ * Feature - This release adds the ability to ingest batched historical data or migrate data in bulk from S3 into Timestream using CSV files.
13
+
4
14
  1.16.0 (2023-01-18)
5
15
  ------------------
6
16
 
data/VERSION CHANGED
@@ -1 +1 @@
1
- 1.16.0
1
+ 1.18.0
@@ -275,6 +275,11 @@ module Aws::TimestreamWrite
275
275
  # in the future.
276
276
  #
277
277
  #
278
+ # @option options [String] :sdk_ua_app_id
279
+ # A unique and opaque application ID that is appended to the
280
+ # User-Agent header as app/<sdk_ua_app_id>. It should have a
281
+ # maximum length of 50.
282
+ #
278
283
  # @option options [String] :secret_access_key
279
284
  #
280
285
  # @option options [String] :session_token
@@ -378,11 +383,139 @@ module Aws::TimestreamWrite
378
383
 
379
384
  # @!group API Operations
380
385
 
386
+ # Creates a new Timestream batch load task. A batch load task processes
387
+ # data from a CSV source in an S3 location and writes to a Timestream
388
+ # table. A mapping from source to target is defined in a batch load
389
+ # task. Errors and events are written to a report at an S3 location. For
390
+ # the report, if the KMS key is not specified, the batch load task will
391
+ # be encrypted with a Timestream managed KMS key located in your
392
+ # account. For more information, see [Amazon Web Services managed
393
+ # keys][1]. [Service quotas apply][2]. For details, see [code
394
+ # sample][3].
395
+ #
396
+ #
397
+ #
398
+ # [1]: https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#aws-managed-cmk
399
+ # [2]: https://docs.aws.amazon.com/timestream/latest/developerguide/ts-limits.html
400
+ # [3]: https://docs.aws.amazon.com/timestream/latest/developerguide/code-samples.create-batch-load.html
401
+ #
402
+ # @option params [String] :client_token
403
+ # **A suitable default value is auto-generated.** You should normally
404
+ # not need to pass this option.**
405
+ #
406
+ # @option params [Types::DataModelConfiguration] :data_model_configuration
407
+ #
408
+ # @option params [required, Types::DataSourceConfiguration] :data_source_configuration
409
+ # Defines configuration details about the data source for a batch load
410
+ # task.
411
+ #
412
+ # @option params [required, Types::ReportConfiguration] :report_configuration
413
+ # Report configuration for a batch load task. This contains details
414
+ # about where error reports are stored.
415
+ #
416
+ # @option params [required, String] :target_database_name
417
+ # Target Timestream database for a batch load task.
418
+ #
419
+ # @option params [required, String] :target_table_name
420
+ # Target Timestream table for a batch load task.
421
+ #
422
+ # @option params [Integer] :record_version
423
+ #
424
+ # @return [Types::CreateBatchLoadTaskResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
425
+ #
426
+ # * {Types::CreateBatchLoadTaskResponse#task_id #task_id} => String
427
+ #
428
+ # @example Request syntax with placeholder values
429
+ #
430
+ # resp = client.create_batch_load_task({
431
+ # client_token: "ClientRequestToken",
432
+ # data_model_configuration: {
433
+ # data_model: {
434
+ # time_column: "StringValue256",
435
+ # time_unit: "MILLISECONDS", # accepts MILLISECONDS, SECONDS, MICROSECONDS, NANOSECONDS
436
+ # dimension_mappings: [ # required
437
+ # {
438
+ # source_column: "SchemaName",
439
+ # destination_column: "SchemaName",
440
+ # },
441
+ # ],
442
+ # multi_measure_mappings: {
443
+ # target_multi_measure_name: "SchemaName",
444
+ # multi_measure_attribute_mappings: [ # required
445
+ # {
446
+ # source_column: "SchemaName", # required
447
+ # target_multi_measure_attribute_name: "SchemaName",
448
+ # measure_value_type: "DOUBLE", # accepts DOUBLE, BIGINT, BOOLEAN, VARCHAR, TIMESTAMP
449
+ # },
450
+ # ],
451
+ # },
452
+ # mixed_measure_mappings: [
453
+ # {
454
+ # measure_name: "SchemaName",
455
+ # source_column: "SchemaName",
456
+ # target_measure_name: "SchemaName",
457
+ # measure_value_type: "DOUBLE", # required, accepts DOUBLE, BIGINT, VARCHAR, BOOLEAN, TIMESTAMP, MULTI
458
+ # multi_measure_attribute_mappings: [
459
+ # {
460
+ # source_column: "SchemaName", # required
461
+ # target_multi_measure_attribute_name: "SchemaName",
462
+ # measure_value_type: "DOUBLE", # accepts DOUBLE, BIGINT, BOOLEAN, VARCHAR, TIMESTAMP
463
+ # },
464
+ # ],
465
+ # },
466
+ # ],
467
+ # measure_name_column: "StringValue256",
468
+ # },
469
+ # data_model_s3_configuration: {
470
+ # bucket_name: "S3BucketName",
471
+ # object_key: "S3ObjectKey",
472
+ # },
473
+ # },
474
+ # data_source_configuration: { # required
475
+ # data_source_s3_configuration: { # required
476
+ # bucket_name: "S3BucketName", # required
477
+ # object_key_prefix: "S3ObjectKey",
478
+ # },
479
+ # csv_configuration: {
480
+ # column_separator: "StringValue1",
481
+ # escape_char: "StringValue1",
482
+ # quote_char: "StringValue1",
483
+ # null_value: "StringValue256",
484
+ # trim_white_space: false,
485
+ # },
486
+ # data_format: "CSV", # required, accepts CSV
487
+ # },
488
+ # report_configuration: { # required
489
+ # report_s3_configuration: {
490
+ # bucket_name: "S3BucketName", # required
491
+ # object_key_prefix: "S3ObjectKeyPrefix",
492
+ # encryption_option: "SSE_S3", # accepts SSE_S3, SSE_KMS
493
+ # kms_key_id: "StringValue2048",
494
+ # },
495
+ # },
496
+ # target_database_name: "ResourceCreateAPIName", # required
497
+ # target_table_name: "ResourceCreateAPIName", # required
498
+ # record_version: 1,
499
+ # })
500
+ #
501
+ # @example Response structure
502
+ #
503
+ # resp.task_id #=> String
504
+ #
505
+ # @see http://docs.aws.amazon.com/goto/WebAPI/timestream-write-2018-11-01/CreateBatchLoadTask AWS API Documentation
506
+ #
507
+ # @overload create_batch_load_task(params = {})
508
+ # @param [Hash] params ({})
509
+ def create_batch_load_task(params = {}, options = {})
510
+ req = build_request(:create_batch_load_task, params)
511
+ req.send_request(options)
512
+ end
513
+
381
514
  # Creates a new Timestream database. If the KMS key is not specified,
382
515
  # the database will be encrypted with a Timestream managed KMS key
383
- # located in your account. Refer to [Amazon Web Services managed KMS
384
- # keys][1] for more info. [Service quotas apply][2]. See [code
385
- # sample][3] for details.
516
+ # located in your account. For more information, see [Amazon Web
517
+ # Services managed keys][1]. [Service quotas apply][2]. For details, see
518
+ # [code sample][3].
386
519
  #
387
520
  #
388
521
  #
@@ -396,8 +529,8 @@ module Aws::TimestreamWrite
396
529
  # @option params [String] :kms_key_id
397
530
  # The KMS key for the database. If the KMS key is not specified, the
398
531
  # database will be encrypted with a Timestream managed KMS key located
399
- # in your account. Refer to [Amazon Web Services managed KMS keys][1]
400
- # for more info.
532
+ # in your account. For more information, see [Amazon Web Services
533
+ # managed keys][1].
401
534
  #
402
535
  #
403
536
  #
@@ -441,13 +574,13 @@ module Aws::TimestreamWrite
441
574
  req.send_request(options)
442
575
  end
443
576
 
444
- # The CreateTable operation adds a new table to an existing database in
445
- # your account. In an Amazon Web Services account, table names must be
446
- # at least unique within each Region if they are in the same database.
447
- # You may have identical table names in the same Region if the tables
448
- # are in separate databases. While creating the table, you must specify
449
- # the table name, database name, and the retention properties. [Service
450
- # quotas apply][1]. See [code sample][2] for details.
577
+ # Adds a new table to an existing database in your account. In an Amazon
578
+ # Web Services account, table names must be at least unique within each
579
+ # Region if they are in the same database. You might have identical
580
+ # table names in the same Region if the tables are in separate
581
+ # databases. While creating the table, you must specify the table name,
582
+ # database name, and the retention properties. [Service quotas
583
+ # apply][1]. See [code sample][2] for details.
451
584
  #
452
585
  #
453
586
  #
@@ -461,7 +594,7 @@ module Aws::TimestreamWrite
461
594
  # The name of the Timestream table.
462
595
  #
463
596
  # @option params [Types::RetentionProperties] :retention_properties
464
- # The duration for which your time series data must be stored in the
597
+ # The duration for which your time-series data must be stored in the
465
598
  # memory store and the magnetic store.
466
599
  #
467
600
  # @option params [Array<Types::Tag>] :tags
@@ -508,7 +641,7 @@ module Aws::TimestreamWrite
508
641
  # resp.table.arn #=> String
509
642
  # resp.table.table_name #=> String
510
643
  # resp.table.database_name #=> String
511
- # resp.table.table_status #=> String, one of "ACTIVE", "DELETING"
644
+ # resp.table.table_status #=> String, one of "ACTIVE", "DELETING", "RESTORING"
512
645
  # resp.table.retention_properties.memory_store_retention_period_in_hours #=> Integer
513
646
  # resp.table.retention_properties.magnetic_store_retention_period_in_days #=> Integer
514
647
  # resp.table.creation_time #=> Time
@@ -529,7 +662,7 @@ module Aws::TimestreamWrite
529
662
  end
530
663
 
531
664
  # Deletes a given Timestream database. *This is an irreversible
532
- # operation. After a database is deleted, the time series data from its
665
+ # operation. After a database is deleted, the time-series data from its
533
666
  # tables cannot be recovered.*
534
667
  #
535
668
  # <note markdown="1"> All tables in the database must be deleted first, or a
@@ -568,7 +701,7 @@ module Aws::TimestreamWrite
568
701
  end
569
702
 
570
703
  # Deletes a given Timestream table. This is an irreversible operation.
571
- # After a Timestream database table is deleted, the time series data
704
+ # After a Timestream database table is deleted, the time-series data
572
705
  # stored in the table cannot be recovered.
573
706
  #
574
707
  # <note markdown="1"> Due to the nature of distributed retries, the operation can return
@@ -608,6 +741,89 @@ module Aws::TimestreamWrite
608
741
  req.send_request(options)
609
742
  end
610
743
 
744
+ # Returns information about the batch load task, including
745
+ # configurations, mappings, progress, and other details. [Service quotas
746
+ # apply][1]. See [code sample][2] for details.
747
+ #
748
+ #
749
+ #
750
+ # [1]: https://docs.aws.amazon.com/timestream/latest/developerguide/ts-limits.html
751
+ # [2]: https://docs.aws.amazon.com/timestream/latest/developerguide/code-samples.describe-batch-load.html
752
+ #
753
+ # @option params [required, String] :task_id
754
+ # The ID of the batch load task.
755
+ #
756
+ # @return [Types::DescribeBatchLoadTaskResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
757
+ #
758
+ # * {Types::DescribeBatchLoadTaskResponse#batch_load_task_description #batch_load_task_description} => Types::BatchLoadTaskDescription
759
+ #
760
+ # @example Request syntax with placeholder values
761
+ #
762
+ # resp = client.describe_batch_load_task({
763
+ # task_id: "BatchLoadTaskId", # required
764
+ # })
765
+ #
766
+ # @example Response structure
767
+ #
768
+ # resp.batch_load_task_description.task_id #=> String
769
+ # resp.batch_load_task_description.error_message #=> String
770
+ # resp.batch_load_task_description.data_source_configuration.data_source_s3_configuration.bucket_name #=> String
771
+ # resp.batch_load_task_description.data_source_configuration.data_source_s3_configuration.object_key_prefix #=> String
772
+ # resp.batch_load_task_description.data_source_configuration.csv_configuration.column_separator #=> String
773
+ # resp.batch_load_task_description.data_source_configuration.csv_configuration.escape_char #=> String
774
+ # resp.batch_load_task_description.data_source_configuration.csv_configuration.quote_char #=> String
775
+ # resp.batch_load_task_description.data_source_configuration.csv_configuration.null_value #=> String
776
+ # resp.batch_load_task_description.data_source_configuration.csv_configuration.trim_white_space #=> Boolean
777
+ # resp.batch_load_task_description.data_source_configuration.data_format #=> String, one of "CSV"
778
+ # resp.batch_load_task_description.progress_report.records_processed #=> Integer
779
+ # resp.batch_load_task_description.progress_report.records_ingested #=> Integer
780
+ # resp.batch_load_task_description.progress_report.parse_failures #=> Integer
781
+ # resp.batch_load_task_description.progress_report.record_ingestion_failures #=> Integer
782
+ # resp.batch_load_task_description.progress_report.file_failures #=> Integer
783
+ # resp.batch_load_task_description.progress_report.bytes_metered #=> Integer
784
+ # resp.batch_load_task_description.report_configuration.report_s3_configuration.bucket_name #=> String
785
+ # resp.batch_load_task_description.report_configuration.report_s3_configuration.object_key_prefix #=> String
786
+ # resp.batch_load_task_description.report_configuration.report_s3_configuration.encryption_option #=> String, one of "SSE_S3", "SSE_KMS"
787
+ # resp.batch_load_task_description.report_configuration.report_s3_configuration.kms_key_id #=> String
788
+ # resp.batch_load_task_description.data_model_configuration.data_model.time_column #=> String
789
+ # resp.batch_load_task_description.data_model_configuration.data_model.time_unit #=> String, one of "MILLISECONDS", "SECONDS", "MICROSECONDS", "NANOSECONDS"
790
+ # resp.batch_load_task_description.data_model_configuration.data_model.dimension_mappings #=> Array
791
+ # resp.batch_load_task_description.data_model_configuration.data_model.dimension_mappings[0].source_column #=> String
792
+ # resp.batch_load_task_description.data_model_configuration.data_model.dimension_mappings[0].destination_column #=> String
793
+ # resp.batch_load_task_description.data_model_configuration.data_model.multi_measure_mappings.target_multi_measure_name #=> String
794
+ # resp.batch_load_task_description.data_model_configuration.data_model.multi_measure_mappings.multi_measure_attribute_mappings #=> Array
795
+ # resp.batch_load_task_description.data_model_configuration.data_model.multi_measure_mappings.multi_measure_attribute_mappings[0].source_column #=> String
796
+ # resp.batch_load_task_description.data_model_configuration.data_model.multi_measure_mappings.multi_measure_attribute_mappings[0].target_multi_measure_attribute_name #=> String
797
+ # resp.batch_load_task_description.data_model_configuration.data_model.multi_measure_mappings.multi_measure_attribute_mappings[0].measure_value_type #=> String, one of "DOUBLE", "BIGINT", "BOOLEAN", "VARCHAR", "TIMESTAMP"
798
+ # resp.batch_load_task_description.data_model_configuration.data_model.mixed_measure_mappings #=> Array
799
+ # resp.batch_load_task_description.data_model_configuration.data_model.mixed_measure_mappings[0].measure_name #=> String
800
+ # resp.batch_load_task_description.data_model_configuration.data_model.mixed_measure_mappings[0].source_column #=> String
801
+ # resp.batch_load_task_description.data_model_configuration.data_model.mixed_measure_mappings[0].target_measure_name #=> String
802
+ # resp.batch_load_task_description.data_model_configuration.data_model.mixed_measure_mappings[0].measure_value_type #=> String, one of "DOUBLE", "BIGINT", "VARCHAR", "BOOLEAN", "TIMESTAMP", "MULTI"
803
+ # resp.batch_load_task_description.data_model_configuration.data_model.mixed_measure_mappings[0].multi_measure_attribute_mappings #=> Array
804
+ # resp.batch_load_task_description.data_model_configuration.data_model.mixed_measure_mappings[0].multi_measure_attribute_mappings[0].source_column #=> String
805
+ # resp.batch_load_task_description.data_model_configuration.data_model.mixed_measure_mappings[0].multi_measure_attribute_mappings[0].target_multi_measure_attribute_name #=> String
806
+ # resp.batch_load_task_description.data_model_configuration.data_model.mixed_measure_mappings[0].multi_measure_attribute_mappings[0].measure_value_type #=> String, one of "DOUBLE", "BIGINT", "BOOLEAN", "VARCHAR", "TIMESTAMP"
807
+ # resp.batch_load_task_description.data_model_configuration.data_model.measure_name_column #=> String
808
+ # resp.batch_load_task_description.data_model_configuration.data_model_s3_configuration.bucket_name #=> String
809
+ # resp.batch_load_task_description.data_model_configuration.data_model_s3_configuration.object_key #=> String
810
+ # resp.batch_load_task_description.target_database_name #=> String
811
+ # resp.batch_load_task_description.target_table_name #=> String
812
+ # resp.batch_load_task_description.task_status #=> String, one of "CREATED", "IN_PROGRESS", "FAILED", "SUCCEEDED", "PROGRESS_STOPPED", "PENDING_RESUME"
813
+ # resp.batch_load_task_description.record_version #=> Integer
814
+ # resp.batch_load_task_description.creation_time #=> Time
815
+ # resp.batch_load_task_description.last_updated_time #=> Time
816
+ # resp.batch_load_task_description.resumable_until #=> Time
817
+ #
818
+ # @see http://docs.aws.amazon.com/goto/WebAPI/timestream-write-2018-11-01/DescribeBatchLoadTask AWS API Documentation
819
+ #
820
+ # @overload describe_batch_load_task(params = {})
821
+ # @param [Hash] params ({})
822
+ def describe_batch_load_task(params = {}, options = {})
823
+ req = build_request(:describe_batch_load_task, params)
824
+ req.send_request(options)
825
+ end
826
+
611
827
  # Returns information about the database, including the database name,
612
828
  # time that the database was created, and the total number of tables
613
829
  # found within the database. [Service quotas apply][1]. See [code
@@ -649,14 +865,14 @@ module Aws::TimestreamWrite
649
865
  req.send_request(options)
650
866
  end
651
867
 
652
- # DescribeEndpoints returns a list of available endpoints to make
653
- # Timestream API calls against. This API is available through both Write
654
- # and Query.
868
+ # Returns a list of available endpoints to make Timestream API calls
869
+ # against. This API operation is available through both the Write and
870
+ # Query APIs.
655
871
  #
656
872
  # Because the Timestream SDKs are designed to transparently work with
657
873
  # the service’s architecture, including the management and mapping of
658
- # the service endpoints, *it is not recommended that you use this API
659
- # unless*\:
874
+ # the service endpoints, *we don't recommend that you use this API
875
+ # operation unless*:
660
876
  #
661
877
  # * You are using [VPC endpoints (Amazon Web Services PrivateLink) with
662
878
  # Timestream][1]
@@ -724,7 +940,7 @@ module Aws::TimestreamWrite
724
940
  # resp.table.arn #=> String
725
941
  # resp.table.table_name #=> String
726
942
  # resp.table.database_name #=> String
727
- # resp.table.table_status #=> String, one of "ACTIVE", "DELETING"
943
+ # resp.table.table_status #=> String, one of "ACTIVE", "DELETING", "RESTORING"
728
944
  # resp.table.retention_properties.memory_store_retention_period_in_hours #=> Integer
729
945
  # resp.table.retention_properties.magnetic_store_retention_period_in_days #=> Integer
730
946
  # resp.table.creation_time #=> Time
@@ -744,6 +960,63 @@ module Aws::TimestreamWrite
744
960
  req.send_request(options)
745
961
  end
746
962
 
963
+ # Provides a list of batch load tasks, along with the name, status, when
964
+ # the task is resumable until, and other details. See [code sample][1]
965
+ # for details.
966
+ #
967
+ #
968
+ #
969
+ # [1]: https://docs.aws.amazon.com/timestream/latest/developerguide/code-samples.list-batch-load-tasks.html
970
+ #
971
+ # @option params [String] :next_token
972
+ # A token to specify where to start paginating. This is the NextToken
973
+ # from a previously truncated response.
974
+ #
975
+ # @option params [Integer] :max_results
976
+ # The total number of items to return in the output. If the total number
977
+ # of items available is more than the value specified, a NextToken is
978
+ # provided in the output. To resume pagination, provide the NextToken
979
+ # value as argument of a subsequent API invocation.
980
+ #
981
+ # @option params [String] :task_status
982
+ # Status of the batch load task.
983
+ #
984
+ # @return [Types::ListBatchLoadTasksResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
985
+ #
986
+ # * {Types::ListBatchLoadTasksResponse#next_token #next_token} => String
987
+ # * {Types::ListBatchLoadTasksResponse#batch_load_tasks #batch_load_tasks} => Array&lt;Types::BatchLoadTask&gt;
988
+ #
989
+ # The returned {Seahorse::Client::Response response} is a pageable response and is Enumerable. For details on usage see {Aws::PageableResponse PageableResponse}.
990
+ #
991
+ # @example Request syntax with placeholder values
992
+ #
993
+ # resp = client.list_batch_load_tasks({
994
+ # next_token: "String",
995
+ # max_results: 1,
996
+ # task_status: "CREATED", # accepts CREATED, IN_PROGRESS, FAILED, SUCCEEDED, PROGRESS_STOPPED, PENDING_RESUME
997
+ # })
998
+ #
999
+ # @example Response structure
1000
+ #
1001
+ # resp.next_token #=> String
1002
+ # resp.batch_load_tasks #=> Array
1003
+ # resp.batch_load_tasks[0].task_id #=> String
1004
+ # resp.batch_load_tasks[0].task_status #=> String, one of "CREATED", "IN_PROGRESS", "FAILED", "SUCCEEDED", "PROGRESS_STOPPED", "PENDING_RESUME"
1005
+ # resp.batch_load_tasks[0].database_name #=> String
1006
+ # resp.batch_load_tasks[0].table_name #=> String
1007
+ # resp.batch_load_tasks[0].creation_time #=> Time
1008
+ # resp.batch_load_tasks[0].last_updated_time #=> Time
1009
+ # resp.batch_load_tasks[0].resumable_until #=> Time
1010
+ #
1011
+ # @see http://docs.aws.amazon.com/goto/WebAPI/timestream-write-2018-11-01/ListBatchLoadTasks AWS API Documentation
1012
+ #
1013
+ # @overload list_batch_load_tasks(params = {})
1014
+ # @param [Hash] params ({})
1015
+ def list_batch_load_tasks(params = {}, options = {})
1016
+ req = build_request(:list_batch_load_tasks, params)
1017
+ req.send_request(options)
1018
+ end
1019
+
747
1020
  # Returns a list of your Timestream databases. [Service quotas
748
1021
  # apply][1]. See [code sample][2] for details.
749
1022
  #
@@ -796,8 +1069,8 @@ module Aws::TimestreamWrite
796
1069
  req.send_request(options)
797
1070
  end
798
1071
 
799
- # A list of tables, along with the name, status and retention properties
800
- # of each table. See [code sample][1] for details.
1072
+ # Provides a list of tables, along with the name, status, and retention
1073
+ # properties of each table. See [code sample][1] for details.
801
1074
  #
802
1075
  #
803
1076
  #
@@ -837,7 +1110,7 @@ module Aws::TimestreamWrite
837
1110
  # resp.tables[0].arn #=> String
838
1111
  # resp.tables[0].table_name #=> String
839
1112
  # resp.tables[0].database_name #=> String
840
- # resp.tables[0].table_status #=> String, one of "ACTIVE", "DELETING"
1113
+ # resp.tables[0].table_status #=> String, one of "ACTIVE", "DELETING", "RESTORING"
841
1114
  # resp.tables[0].retention_properties.memory_store_retention_period_in_hours #=> Integer
842
1115
  # resp.tables[0].retention_properties.magnetic_store_retention_period_in_days #=> Integer
843
1116
  # resp.tables[0].creation_time #=> Time
@@ -858,7 +1131,7 @@ module Aws::TimestreamWrite
858
1131
  req.send_request(options)
859
1132
  end
860
1133
 
861
- # List all tags on a Timestream resource.
1134
+ # Lists all tags on a Timestream resource.
862
1135
  #
863
1136
  # @option params [required, String] :resource_arn
864
1137
  # The Timestream resource with tags to be listed. This value is an
@@ -889,7 +1162,27 @@ module Aws::TimestreamWrite
889
1162
  req.send_request(options)
890
1163
  end
891
1164
 
892
- # Associate a set of tags with a Timestream resource. You can then
1165
+ # @option params [required, String] :task_id
1166
+ # The ID of the batch load task to resume.
1167
+ #
1168
+ # @return [Struct] Returns an empty {Seahorse::Client::Response response}.
1169
+ #
1170
+ # @example Request syntax with placeholder values
1171
+ #
1172
+ # resp = client.resume_batch_load_task({
1173
+ # task_id: "BatchLoadTaskId", # required
1174
+ # })
1175
+ #
1176
+ # @see http://docs.aws.amazon.com/goto/WebAPI/timestream-write-2018-11-01/ResumeBatchLoadTask AWS API Documentation
1177
+ #
1178
+ # @overload resume_batch_load_task(params = {})
1179
+ # @param [Hash] params ({})
1180
+ def resume_batch_load_task(params = {}, options = {})
1181
+ req = build_request(:resume_batch_load_task, params)
1182
+ req.send_request(options)
1183
+ end
1184
+
1185
+ # Associates a set of tags with a Timestream resource. You can then
893
1186
  # activate these user-defined tags so that they appear on the Billing
894
1187
  # and Cost Management console for cost allocation tracking.
895
1188
  #
@@ -1070,7 +1363,7 @@ module Aws::TimestreamWrite
1070
1363
  # resp.table.arn #=> String
1071
1364
  # resp.table.table_name #=> String
1072
1365
  # resp.table.database_name #=> String
1073
- # resp.table.table_status #=> String, one of "ACTIVE", "DELETING"
1366
+ # resp.table.table_status #=> String, one of "ACTIVE", "DELETING", "RESTORING"
1074
1367
  # resp.table.retention_properties.memory_store_retention_period_in_hours #=> Integer
1075
1368
  # resp.table.retention_properties.magnetic_store_retention_period_in_days #=> Integer
1076
1369
  # resp.table.creation_time #=> Time
@@ -1090,13 +1383,14 @@ module Aws::TimestreamWrite
1090
1383
  req.send_request(options)
1091
1384
  end
1092
1385
 
1093
- # The WriteRecords operation enables you to write your time series data
1094
- # into Timestream. You can specify a single data point or a batch of
1095
- # data points to be inserted into the system. Timestream offers you with
1096
- # a flexible schema that auto detects the column names and data types
1097
- # for your Timestream tables based on the dimension names and data types
1098
- # of the data points you specify when invoking writes into the database.
1099
- # Timestream support eventual consistency read semantics. This means
1386
+ # Enables you to write your time-series data into Timestream. You can
1387
+ # specify a single data point or a batch of data points to be inserted
1388
+ # into the system. Timestream offers you a flexible schema that auto
1389
+ # detects the column names and data types for your Timestream tables
1390
+ # based on the dimension names and data types of the data points you
1391
+ # specify when invoking writes into the database.
1392
+ #
1393
+ # Timestream supports eventual consistency read semantics. This means
1100
1394
  # that when you query data immediately after writing a batch of data
1101
1395
  # into Timestream, the query results might not reflect the results of a
1102
1396
  # recently completed write operation. The results may also include some
@@ -1109,30 +1403,32 @@ module Aws::TimestreamWrite
1109
1403
  #
1110
1404
  # You can use the `Version` parameter in a `WriteRecords` request to
1111
1405
  # update data points. Timestream tracks a version number with each
1112
- # record. `Version` defaults to `1` when not specified for the record in
1113
- # the request. Timestream will update an existing record’s measure value
1114
- # along with its `Version` upon receiving a write request with a higher
1115
- # `Version` number for that record. Upon receiving an update request
1116
- # where the measure value is the same as that of the existing record,
1117
- # Timestream still updates `Version`, if it is greater than the existing
1118
- # value of `Version`. You can update a data point as many times as
1119
- # desired, as long as the value of `Version` continuously increases.
1406
+ # record. `Version` defaults to `1` when it's not specified for the
1407
+ # record in the request. Timestream updates an existing record’s measure
1408
+ # value along with its `Version` when it receives a write request with a
1409
+ # higher `Version` number for that record. When it receives an update
1410
+ # request where the measure value is the same as that of the existing
1411
+ # record, Timestream still updates `Version`, if it is greater than the
1412
+ # existing value of `Version`. You can update a data point as many times
1413
+ # as desired, as long as the value of `Version` continuously increases.
1120
1414
  #
1121
1415
  # For example, suppose you write a new record without indicating
1122
- # `Version` in the request. Timestream will store this record, and set
1416
+ # `Version` in the request. Timestream stores this record, and set
1123
1417
  # `Version` to `1`. Now, suppose you try to update this record with a
1124
1418
  # `WriteRecords` request of the same record with a different measure
1125
1419
  # value but, like before, do not provide `Version`. In this case,
1126
1420
  # Timestream will reject this update with a `RejectedRecordsException`
1127
1421
  # since the updated record’s version is not greater than the existing
1128
- # value of Version. However, if you were to resend the update request
1129
- # with `Version` set to `2`, Timestream would then succeed in updating
1130
- # the record’s value, and the `Version` would be set to `2`. Next,
1131
- # suppose you sent a `WriteRecords` request with this same record and an
1132
- # identical measure value, but with `Version` set to `3`. In this case,
1133
- # Timestream would only update `Version` to `3`. Any further updates
1134
- # would need to send a version number greater than `3`, or the update
1135
- # requests would receive a `RejectedRecordsException`.
1422
+ # value of Version.
1423
+ #
1424
+ # However, if you were to resend the update request with `Version` set
1425
+ # to `2`, Timestream would then succeed in updating the record’s value,
1426
+ # and the `Version` would be set to `2`. Next, suppose you sent a
1427
+ # `WriteRecords` request with this same record and an identical measure
1428
+ # value, but with `Version` set to `3`. In this case, Timestream would
1429
+ # only update `Version` to `3`. Any further updates would need to send a
1430
+ # version number greater than `3`, or the update requests would receive
1431
+ # a `RejectedRecordsException`.
1136
1432
  #
1137
1433
  #
1138
1434
  #
@@ -1146,17 +1442,17 @@ module Aws::TimestreamWrite
1146
1442
  # The name of the Timestream table.
1147
1443
  #
1148
1444
  # @option params [Types::Record] :common_attributes
1149
- # A record containing the common measure, dimension, time, and version
1150
- # attributes shared across all the records in the request. The measure
1151
- # and dimension attributes specified will be merged with the measure and
1152
- # dimension attributes in the records object when the data is written
1153
- # into Timestream. Dimensions may not overlap, or a
1445
+ # A record that contains the common measure, dimension, time, and
1446
+ # version attributes shared across all the records in the request. The
1447
+ # measure and dimension attributes specified will be merged with the
1448
+ # measure and dimension attributes in the records object when the data
1449
+ # is written into Timestream. Dimensions may not overlap, or a
1154
1450
  # `ValidationException` will be thrown. In other words, a record must
1155
1451
  # contain dimensions with unique names.
1156
1452
  #
1157
1453
  # @option params [required, Array<Types::Record>] :records
1158
- # An array of records containing the unique measure, dimension, time,
1159
- # and version attributes for each time series data point.
1454
+ # An array of records that contain the unique measure, dimension, time,
1455
+ # and version attributes for each time-series data point.
1160
1456
  #
1161
1457
  # @return [Types::WriteRecordsResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
1162
1458
  #
@@ -1243,7 +1539,7 @@ module Aws::TimestreamWrite
1243
1539
  params: params,
1244
1540
  config: config)
1245
1541
  context[:gem_name] = 'aws-sdk-timestreamwrite'
1246
- context[:gem_version] = '1.16.0'
1542
+ context[:gem_version] = '1.18.0'
1247
1543
  Seahorse::Client::Request.new(handlers, context)
1248
1544
  end
1249
1545