aws-sdk-datasync 1.37.0 → 1.41.0

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: 37157815b2abccc0c8f064fb766152e608469c1efb3c84455a09b25f411c44a6
4
- data.tar.gz: 303ea94eb62e43adaee3fa6052f6ebdb4cd0fc5ee2bd1dce536efd8fc8d3401d
3
+ metadata.gz: e5ba18e98b8f96b62f4279d3f4da8f6741b32866b3ff9c5f635e2ce49b1633c6
4
+ data.tar.gz: 315f1653749f803e8745878297b00686caa16faa1fac6bc6b2c616d5e29d98b6
5
5
  SHA512:
6
- metadata.gz: 8bcafa13e1c50f2ffc300cc14c1492488e1b30b3a2c1f82bd1a7ab79d0d5776f7a80251718cc1af230116b7ee63975e86df940e6199ac0dd6de6051584af4826
7
- data.tar.gz: 5cb5d65aeee6fdf84f90075a1659a4e79139fc5f8c422e3a3e4b2e945cc2722befb43b8c9548a579df605e8aeb0876d016a2c78d306a6153a60cb28a1f5de51a
6
+ metadata.gz: ba35b4a3c6b25bb7bb4cd8e8049ec6e2632a6841f1ed5a7004d4e606e1ffb9770890bdc926c40c480103a87bcba7ceb2bd2ee4c77b6e53502812e7a93a0c0d32
7
+ data.tar.gz: dd39e3b04791998f7f54df28353f9f79110f82b06bc742390dfabaca7225104eee6adcf6d6df3238ae2f8be16eddb73f60ecc5870f313c981c61eee83af3e0c9
data/CHANGELOG.md CHANGED
@@ -1,6 +1,26 @@
1
1
  Unreleased Changes
2
2
  ------------------
3
3
 
4
+ 1.41.0 (2021-12-20)
5
+ ------------------
6
+
7
+ * Feature - AWS DataSync now supports FSx Lustre Locations.
8
+
9
+ 1.40.0 (2021-11-30)
10
+ ------------------
11
+
12
+ * Feature - Code Generated Changes, see `./build_tools` or `aws-sdk-core`'s CHANGELOG.md for details.
13
+
14
+ 1.39.0 (2021-11-04)
15
+ ------------------
16
+
17
+ * Feature - Code Generated Changes, see `./build_tools` or `aws-sdk-core`'s CHANGELOG.md for details.
18
+
19
+ 1.38.0 (2021-11-03)
20
+ ------------------
21
+
22
+ * Feature - AWS DataSync now supports Hadoop Distributed File System (HDFS) Locations
23
+
4
24
  1.37.0 (2021-10-18)
5
25
  ------------------
6
26
 
data/VERSION CHANGED
@@ -1 +1 @@
1
- 1.37.0
1
+ 1.41.0
@@ -119,7 +119,9 @@ module Aws::DataSync
119
119
  # * EC2/ECS IMDS instance profile - When used by default, the timeouts
120
120
  # are very aggressive. Construct and pass an instance of
121
121
  # `Aws::InstanceProfileCredentails` or `Aws::ECSCredentials` to
122
- # enable retries and extended timeouts.
122
+ # enable retries and extended timeouts. Instance profile credential
123
+ # fetching can be disabled by setting ENV['AWS_EC2_METADATA_DISABLED']
124
+ # to true.
123
125
  #
124
126
  # @option options [required, String] :region
125
127
  # The AWS region to connect to. The configured `:region` is
@@ -285,6 +287,15 @@ module Aws::DataSync
285
287
  # ** Please note ** When response stubbing is enabled, no HTTP
286
288
  # requests are made, and retries are disabled.
287
289
  #
290
+ # @option options [Boolean] :use_dualstack_endpoint
291
+ # When set to `true`, dualstack enabled endpoints (with `.aws` TLD)
292
+ # will be used if available.
293
+ #
294
+ # @option options [Boolean] :use_fips_endpoint
295
+ # When set to `true`, fips compatible endpoints will be used if available.
296
+ # When a `fips` region is used, the region is normalized and this config
297
+ # is set to `true`.
298
+ #
288
299
  # @option options [Boolean] :validate_params (true)
289
300
  # When `true`, request parameters are validated before
290
301
  # sending the request.
@@ -563,22 +574,73 @@ module Aws::DataSync
563
574
  req.send_request(options)
564
575
  end
565
576
 
577
+ # Creates an endpoint for an Amazon FSx for Lustre file system.
578
+ #
579
+ # @option params [required, String] :fsx_filesystem_arn
580
+ # The Amazon Resource Name (ARN) for the FSx for Lustre file system.
581
+ #
582
+ # @option params [required, Array<String>] :security_group_arns
583
+ # The Amazon Resource Names (ARNs) of the security groups that are used
584
+ # to configure the FSx for Lustre file system.
585
+ #
586
+ # @option params [String] :subdirectory
587
+ # A subdirectory in the location's path. This subdirectory in the FSx
588
+ # for Lustre file system is used to read data from the FSx for Lustre
589
+ # source location or write data to the FSx for Lustre destination.
590
+ #
591
+ # @option params [Array<Types::TagListEntry>] :tags
592
+ # The key-value pair that represents a tag that you want to add to the
593
+ # resource. The value can be an empty string. This value helps you
594
+ # manage, filter, and search for your resources. We recommend that you
595
+ # create a name tag for your location.
596
+ #
597
+ # @return [Types::CreateLocationFsxLustreResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
598
+ #
599
+ # * {Types::CreateLocationFsxLustreResponse#location_arn #location_arn} => String
600
+ #
601
+ # @example Request syntax with placeholder values
602
+ #
603
+ # resp = client.create_location_fsx_lustre({
604
+ # fsx_filesystem_arn: "FsxFilesystemArn", # required
605
+ # security_group_arns: ["Ec2SecurityGroupArn"], # required
606
+ # subdirectory: "FsxLustreSubdirectory",
607
+ # tags: [
608
+ # {
609
+ # key: "TagKey", # required
610
+ # value: "TagValue",
611
+ # },
612
+ # ],
613
+ # })
614
+ #
615
+ # @example Response structure
616
+ #
617
+ # resp.location_arn #=> String
618
+ #
619
+ # @see http://docs.aws.amazon.com/goto/WebAPI/datasync-2018-11-09/CreateLocationFsxLustre AWS API Documentation
620
+ #
621
+ # @overload create_location_fsx_lustre(params = {})
622
+ # @param [Hash] params ({})
623
+ def create_location_fsx_lustre(params = {}, options = {})
624
+ req = build_request(:create_location_fsx_lustre, params)
625
+ req.send_request(options)
626
+ end
627
+
566
628
  # Creates an endpoint for an Amazon FSx for Windows File Server file
567
629
  # system.
568
630
  #
569
631
  # @option params [String] :subdirectory
570
- # A subdirectory in the locations path. This subdirectory in the Amazon
571
- # FSx for Windows File Server file system is used to read data from the
572
- # Amazon FSx for Windows File Server source location or write data to
573
- # the FSx for Windows File Server destination.
632
+ # A subdirectory in the location's path. This subdirectory in the
633
+ # Amazon FSx for Windows File Server file system is used to read data
634
+ # from the Amazon FSx for Windows File Server source location or write
635
+ # data to the FSx for Windows File Server destination.
574
636
  #
575
637
  # @option params [required, String] :fsx_filesystem_arn
576
638
  # The Amazon Resource Name (ARN) for the FSx for Windows File Server
577
639
  # file system.
578
640
  #
579
641
  # @option params [required, Array<String>] :security_group_arns
580
- # The Amazon Resource Names (ARNs) of the security groups that are to
581
- # use to configure the FSx for Windows File Server file system.
642
+ # The Amazon Resource Names (ARNs) of the security groups that are used
643
+ # to configure the FSx for Windows File Server file system.
582
644
  #
583
645
  # @option params [Array<Types::TagListEntry>] :tags
584
646
  # The key-value pair that represents a tag that you want to add to the
@@ -636,6 +698,141 @@ module Aws::DataSync
636
698
  req.send_request(options)
637
699
  end
638
700
 
701
+ # Creates an endpoint for a Hadoop Distributed File System (HDFS).
702
+ #
703
+ # @option params [String] :subdirectory
704
+ # A subdirectory in the HDFS cluster. This subdirectory is used to read
705
+ # data from or write data to the HDFS cluster. If the subdirectory
706
+ # isn't specified, it will default to `/`.
707
+ #
708
+ # @option params [required, Array<Types::HdfsNameNode>] :name_nodes
709
+ # The NameNode that manages the HDFS namespace. The NameNode performs
710
+ # operations such as opening, closing, and renaming files and
711
+ # directories. The NameNode contains the information to map blocks of
712
+ # data to the DataNodes. You can use only one NameNode.
713
+ #
714
+ # @option params [Integer] :block_size
715
+ # The size of data blocks to write into the HDFS cluster. The block size
716
+ # must be a multiple of 512 bytes. The default block size is 128
717
+ # mebibytes (MiB).
718
+ #
719
+ # @option params [Integer] :replication_factor
720
+ # The number of DataNodes to replicate the data to when writing to the
721
+ # HDFS cluster. By default, data is replicated to three DataNodes.
722
+ #
723
+ # @option params [String] :kms_key_provider_uri
724
+ # The URI of the HDFS cluster's Key Management Server (KMS).
725
+ #
726
+ # @option params [Types::QopConfiguration] :qop_configuration
727
+ # The Quality of Protection (QOP) configuration specifies the Remote
728
+ # Procedure Call (RPC) and data transfer protection settings configured
729
+ # on the Hadoop Distributed File System (HDFS) cluster. If
730
+ # `QopConfiguration` isn't specified, `RpcProtection` and
731
+ # `DataTransferProtection` default to `PRIVACY`. If you set
732
+ # `RpcProtection` or `DataTransferProtection`, the other parameter
733
+ # assumes the same value.
734
+ #
735
+ # @option params [required, String] :authentication_type
736
+ # The type of authentication used to determine the identity of the user.
737
+ #
738
+ # @option params [String] :simple_user
739
+ # The user name used to identify the client on the host operating
740
+ # system.
741
+ #
742
+ # <note markdown="1"> If `SIMPLE` is specified for `AuthenticationType`, this parameter is
743
+ # required.
744
+ #
745
+ # </note>
746
+ #
747
+ # @option params [String] :kerberos_principal
748
+ # The Kerberos principal with access to the files and folders on the
749
+ # HDFS cluster.
750
+ #
751
+ # <note markdown="1"> If `KERBEROS` is specified for `AuthenticationType`, this parameter is
752
+ # required.
753
+ #
754
+ # </note>
755
+ #
756
+ # @option params [String, StringIO, File] :kerberos_keytab
757
+ # The Kerberos key table (keytab) that contains mappings between the
758
+ # defined Kerberos principal and the encrypted keys. You can load the
759
+ # keytab from a file by providing the file's address. If you're using
760
+ # the CLI, it performs base64 encoding for you. Otherwise, provide the
761
+ # base64-encoded text.
762
+ #
763
+ # <note markdown="1"> If `KERBEROS` is specified for `AuthenticationType`, this parameter is
764
+ # required.
765
+ #
766
+ # </note>
767
+ #
768
+ # @option params [String, StringIO, File] :kerberos_krb_5_conf
769
+ # The `krb5.conf` file that contains the Kerberos configuration
770
+ # information. You can load the `krb5.conf` file by providing the
771
+ # file's address. If you're using the CLI, it performs the base64
772
+ # encoding for you. Otherwise, provide the base64-encoded text.
773
+ #
774
+ # <note markdown="1"> If `KERBEROS` is specified for `AuthenticationType`, this parameter is
775
+ # required.
776
+ #
777
+ # </note>
778
+ #
779
+ # @option params [required, Array<String>] :agent_arns
780
+ # The Amazon Resource Names (ARNs) of the agents that are used to
781
+ # connect to the HDFS cluster.
782
+ #
783
+ # @option params [Array<Types::TagListEntry>] :tags
784
+ # The key-value pair that represents the tag that you want to add to the
785
+ # location. The value can be an empty string. We recommend using tags to
786
+ # name your resources.
787
+ #
788
+ # @return [Types::CreateLocationHdfsResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
789
+ #
790
+ # * {Types::CreateLocationHdfsResponse#location_arn #location_arn} => String
791
+ #
792
+ # @example Request syntax with placeholder values
793
+ #
794
+ # resp = client.create_location_hdfs({
795
+ # subdirectory: "HdfsSubdirectory",
796
+ # name_nodes: [ # required
797
+ # {
798
+ # hostname: "HdfsServerHostname", # required
799
+ # port: 1, # required
800
+ # },
801
+ # ],
802
+ # block_size: 1,
803
+ # replication_factor: 1,
804
+ # kms_key_provider_uri: "KmsKeyProviderUri",
805
+ # qop_configuration: {
806
+ # rpc_protection: "DISABLED", # accepts DISABLED, AUTHENTICATION, INTEGRITY, PRIVACY
807
+ # data_transfer_protection: "DISABLED", # accepts DISABLED, AUTHENTICATION, INTEGRITY, PRIVACY
808
+ # },
809
+ # authentication_type: "SIMPLE", # required, accepts SIMPLE, KERBEROS
810
+ # simple_user: "HdfsUser",
811
+ # kerberos_principal: "KerberosPrincipal",
812
+ # kerberos_keytab: "data",
813
+ # kerberos_krb_5_conf: "data",
814
+ # agent_arns: ["AgentArn"], # required
815
+ # tags: [
816
+ # {
817
+ # key: "TagKey", # required
818
+ # value: "TagValue",
819
+ # },
820
+ # ],
821
+ # })
822
+ #
823
+ # @example Response structure
824
+ #
825
+ # resp.location_arn #=> String
826
+ #
827
+ # @see http://docs.aws.amazon.com/goto/WebAPI/datasync-2018-11-09/CreateLocationHdfs AWS API Documentation
828
+ #
829
+ # @overload create_location_hdfs(params = {})
830
+ # @param [Hash] params ({})
831
+ def create_location_hdfs(params = {}, options = {})
832
+ req = build_request(:create_location_hdfs, params)
833
+ req.send_request(options)
834
+ end
835
+
639
836
  # Defines a file system on a Network File System (NFS) server that can
640
837
  # be read from or written to.
641
838
  #
@@ -1103,9 +1300,9 @@ module Aws::DataSync
1103
1300
  #
1104
1301
  # @option params [Array<Types::FilterRule>] :includes
1105
1302
  # A list of filter rules that determines which files to include when
1106
- # running a task. The pattern should contain a single filter string that
1303
+ # running a task. The pattern contains a single filter string that
1107
1304
  # consists of the patterns to include. The patterns are delimited by
1108
- # "\|" (that is, a pipe). For example: `"/folder1|/folder2`"
1305
+ # "\|" (that is, a pipe), for example, `"/folder1|/folder2"`.
1109
1306
  #
1110
1307
  # @return [Types::CreateTaskResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
1111
1308
  #
@@ -1327,6 +1524,43 @@ module Aws::DataSync
1327
1524
  req.send_request(options)
1328
1525
  end
1329
1526
 
1527
+ # Returns metadata, such as the path information about an Amazon FSx for
1528
+ # Lustre location.
1529
+ #
1530
+ # @option params [required, String] :location_arn
1531
+ # The Amazon Resource Name (ARN) of the FSx for Lustre location to
1532
+ # describe.
1533
+ #
1534
+ # @return [Types::DescribeLocationFsxLustreResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
1535
+ #
1536
+ # * {Types::DescribeLocationFsxLustreResponse#location_arn #location_arn} => String
1537
+ # * {Types::DescribeLocationFsxLustreResponse#location_uri #location_uri} => String
1538
+ # * {Types::DescribeLocationFsxLustreResponse#security_group_arns #security_group_arns} => Array&lt;String&gt;
1539
+ # * {Types::DescribeLocationFsxLustreResponse#creation_time #creation_time} => Time
1540
+ #
1541
+ # @example Request syntax with placeholder values
1542
+ #
1543
+ # resp = client.describe_location_fsx_lustre({
1544
+ # location_arn: "LocationArn", # required
1545
+ # })
1546
+ #
1547
+ # @example Response structure
1548
+ #
1549
+ # resp.location_arn #=> String
1550
+ # resp.location_uri #=> String
1551
+ # resp.security_group_arns #=> Array
1552
+ # resp.security_group_arns[0] #=> String
1553
+ # resp.creation_time #=> Time
1554
+ #
1555
+ # @see http://docs.aws.amazon.com/goto/WebAPI/datasync-2018-11-09/DescribeLocationFsxLustre AWS API Documentation
1556
+ #
1557
+ # @overload describe_location_fsx_lustre(params = {})
1558
+ # @param [Hash] params ({})
1559
+ def describe_location_fsx_lustre(params = {}, options = {})
1560
+ req = build_request(:describe_location_fsx_lustre, params)
1561
+ req.send_request(options)
1562
+ end
1563
+
1330
1564
  # Returns metadata, such as the path information about an Amazon FSx for
1331
1565
  # Windows File Server location.
1332
1566
  #
@@ -1368,6 +1602,62 @@ module Aws::DataSync
1368
1602
  req.send_request(options)
1369
1603
  end
1370
1604
 
1605
+ # Returns metadata, such as the authentication information about the
1606
+ # Hadoop Distributed File System (HDFS) location.
1607
+ #
1608
+ # @option params [required, String] :location_arn
1609
+ # The Amazon Resource Name (ARN) of the HDFS cluster location to
1610
+ # describe.
1611
+ #
1612
+ # @return [Types::DescribeLocationHdfsResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
1613
+ #
1614
+ # * {Types::DescribeLocationHdfsResponse#location_arn #location_arn} => String
1615
+ # * {Types::DescribeLocationHdfsResponse#location_uri #location_uri} => String
1616
+ # * {Types::DescribeLocationHdfsResponse#name_nodes #name_nodes} => Array&lt;Types::HdfsNameNode&gt;
1617
+ # * {Types::DescribeLocationHdfsResponse#block_size #block_size} => Integer
1618
+ # * {Types::DescribeLocationHdfsResponse#replication_factor #replication_factor} => Integer
1619
+ # * {Types::DescribeLocationHdfsResponse#kms_key_provider_uri #kms_key_provider_uri} => String
1620
+ # * {Types::DescribeLocationHdfsResponse#qop_configuration #qop_configuration} => Types::QopConfiguration
1621
+ # * {Types::DescribeLocationHdfsResponse#authentication_type #authentication_type} => String
1622
+ # * {Types::DescribeLocationHdfsResponse#simple_user #simple_user} => String
1623
+ # * {Types::DescribeLocationHdfsResponse#kerberos_principal #kerberos_principal} => String
1624
+ # * {Types::DescribeLocationHdfsResponse#agent_arns #agent_arns} => Array&lt;String&gt;
1625
+ # * {Types::DescribeLocationHdfsResponse#creation_time #creation_time} => Time
1626
+ #
1627
+ # @example Request syntax with placeholder values
1628
+ #
1629
+ # resp = client.describe_location_hdfs({
1630
+ # location_arn: "LocationArn", # required
1631
+ # })
1632
+ #
1633
+ # @example Response structure
1634
+ #
1635
+ # resp.location_arn #=> String
1636
+ # resp.location_uri #=> String
1637
+ # resp.name_nodes #=> Array
1638
+ # resp.name_nodes[0].hostname #=> String
1639
+ # resp.name_nodes[0].port #=> Integer
1640
+ # resp.block_size #=> Integer
1641
+ # resp.replication_factor #=> Integer
1642
+ # resp.kms_key_provider_uri #=> String
1643
+ # resp.qop_configuration.rpc_protection #=> String, one of "DISABLED", "AUTHENTICATION", "INTEGRITY", "PRIVACY"
1644
+ # resp.qop_configuration.data_transfer_protection #=> String, one of "DISABLED", "AUTHENTICATION", "INTEGRITY", "PRIVACY"
1645
+ # resp.authentication_type #=> String, one of "SIMPLE", "KERBEROS"
1646
+ # resp.simple_user #=> String
1647
+ # resp.kerberos_principal #=> String
1648
+ # resp.agent_arns #=> Array
1649
+ # resp.agent_arns[0] #=> String
1650
+ # resp.creation_time #=> Time
1651
+ #
1652
+ # @see http://docs.aws.amazon.com/goto/WebAPI/datasync-2018-11-09/DescribeLocationHdfs AWS API Documentation
1653
+ #
1654
+ # @overload describe_location_hdfs(params = {})
1655
+ # @param [Hash] params ({})
1656
+ def describe_location_hdfs(params = {}, options = {})
1657
+ req = build_request(:describe_location_hdfs, params)
1658
+ req.send_request(options)
1659
+ end
1660
+
1371
1661
  # Returns metadata, such as the path information, about an NFS location.
1372
1662
  #
1373
1663
  # @option params [required, String] :location_arn
@@ -1969,13 +2259,13 @@ module Aws::DataSync
1969
2259
  # A list of filter rules that determines which files to include when
1970
2260
  # running a task. The pattern should contain a single filter string that
1971
2261
  # consists of the patterns to include. The patterns are delimited by
1972
- # "\|" (that is, a pipe). For example: `"/folder1|/folder2"`
2262
+ # "\|" (that is, a pipe), for example, `"/folder1|/folder2"`.
1973
2263
  #
1974
2264
  # @option params [Array<Types::FilterRule>] :excludes
1975
2265
  # A list of filter rules that determines which files to exclude from a
1976
- # task. The list should contain a single filter string that consists of
1977
- # the patterns to exclude. The patterns are delimited by "\|" (that
1978
- # is, a pipe), for example, `"/folder1|/folder2"`.
2266
+ # task. The list contains a single filter string that consists of the
2267
+ # patterns to exclude. The patterns are delimited by "\|" (that is, a
2268
+ # pipe), for example, `"/folder1|/folder2"`.
1979
2269
  #
1980
2270
  # @return [Types::StartTaskExecutionResponse] Returns a {Seahorse::Client::Response response} object which responds to the following methods:
1981
2271
  #
@@ -2111,6 +2401,101 @@ module Aws::DataSync
2111
2401
  req.send_request(options)
2112
2402
  end
2113
2403
 
2404
+ # Updates some parameters of a previously created location for a Hadoop
2405
+ # Distributed File System cluster.
2406
+ #
2407
+ # @option params [required, String] :location_arn
2408
+ # The Amazon Resource Name (ARN) of the source HDFS cluster location.
2409
+ #
2410
+ # @option params [String] :subdirectory
2411
+ # A subdirectory in the HDFS cluster. This subdirectory is used to read
2412
+ # data from or write data to the HDFS cluster.
2413
+ #
2414
+ # @option params [Array<Types::HdfsNameNode>] :name_nodes
2415
+ # The NameNode that manages the HDFS namespace. The NameNode performs
2416
+ # operations such as opening, closing, and renaming files and
2417
+ # directories. The NameNode contains the information to map blocks of
2418
+ # data to the DataNodes. You can use only one NameNode.
2419
+ #
2420
+ # @option params [Integer] :block_size
2421
+ # The size of the data blocks to write into the HDFS cluster.
2422
+ #
2423
+ # @option params [Integer] :replication_factor
2424
+ # The number of DataNodes to replicate the data to when writing to the
2425
+ # HDFS cluster.
2426
+ #
2427
+ # @option params [String] :kms_key_provider_uri
2428
+ # The URI of the HDFS cluster's Key Management Server (KMS).
2429
+ #
2430
+ # @option params [Types::QopConfiguration] :qop_configuration
2431
+ # The Quality of Protection (QOP) configuration specifies the Remote
2432
+ # Procedure Call (RPC) and data transfer privacy settings configured on
2433
+ # the Hadoop Distributed File System (HDFS) cluster.
2434
+ #
2435
+ # @option params [String] :authentication_type
2436
+ # The type of authentication used to determine the identity of the user.
2437
+ #
2438
+ # @option params [String] :simple_user
2439
+ # The user name used to identify the client on the host operating
2440
+ # system.
2441
+ #
2442
+ # @option params [String] :kerberos_principal
2443
+ # The Kerberos principal with access to the files and folders on the
2444
+ # HDFS cluster.
2445
+ #
2446
+ # @option params [String, StringIO, File] :kerberos_keytab
2447
+ # The Kerberos key table (keytab) that contains mappings between the
2448
+ # defined Kerberos principal and the encrypted keys. You can load the
2449
+ # keytab from a file by providing the file's address. If you use the
2450
+ # AWS CLI, it performs base64 encoding for you. Otherwise, provide the
2451
+ # base64-encoded text.
2452
+ #
2453
+ # @option params [String, StringIO, File] :kerberos_krb_5_conf
2454
+ # The `krb5.conf` file that contains the Kerberos configuration
2455
+ # information. You can load the `krb5.conf` file by providing the
2456
+ # file's address. If you're using the AWS CLI, it performs the base64
2457
+ # encoding for you. Otherwise, provide the base64-encoded text.
2458
+ #
2459
+ # @option params [Array<String>] :agent_arns
2460
+ # The ARNs of the agents that are used to connect to the HDFS cluster.
2461
+ #
2462
+ # @return [Struct] Returns an empty {Seahorse::Client::Response response}.
2463
+ #
2464
+ # @example Request syntax with placeholder values
2465
+ #
2466
+ # resp = client.update_location_hdfs({
2467
+ # location_arn: "LocationArn", # required
2468
+ # subdirectory: "HdfsSubdirectory",
2469
+ # name_nodes: [
2470
+ # {
2471
+ # hostname: "HdfsServerHostname", # required
2472
+ # port: 1, # required
2473
+ # },
2474
+ # ],
2475
+ # block_size: 1,
2476
+ # replication_factor: 1,
2477
+ # kms_key_provider_uri: "KmsKeyProviderUri",
2478
+ # qop_configuration: {
2479
+ # rpc_protection: "DISABLED", # accepts DISABLED, AUTHENTICATION, INTEGRITY, PRIVACY
2480
+ # data_transfer_protection: "DISABLED", # accepts DISABLED, AUTHENTICATION, INTEGRITY, PRIVACY
2481
+ # },
2482
+ # authentication_type: "SIMPLE", # accepts SIMPLE, KERBEROS
2483
+ # simple_user: "HdfsUser",
2484
+ # kerberos_principal: "KerberosPrincipal",
2485
+ # kerberos_keytab: "data",
2486
+ # kerberos_krb_5_conf: "data",
2487
+ # agent_arns: ["AgentArn"],
2488
+ # })
2489
+ #
2490
+ # @see http://docs.aws.amazon.com/goto/WebAPI/datasync-2018-11-09/UpdateLocationHdfs AWS API Documentation
2491
+ #
2492
+ # @overload update_location_hdfs(params = {})
2493
+ # @param [Hash] params ({})
2494
+ def update_location_hdfs(params = {}, options = {})
2495
+ req = build_request(:update_location_hdfs, params)
2496
+ req.send_request(options)
2497
+ end
2498
+
2114
2499
  # Updates some of the parameters of a previously created location for
2115
2500
  # Network File System (NFS) access. For information about creating an
2116
2501
  # NFS location, see [Creating a location for NFS][1].
@@ -2360,7 +2745,7 @@ module Aws::DataSync
2360
2745
  # A list of filter rules that determines which files to exclude from a
2361
2746
  # task. The list should contain a single filter string that consists of
2362
2747
  # the patterns to exclude. The patterns are delimited by "\|" (that
2363
- # is, a pipe), for example: `"/folder1|/folder2"`
2748
+ # is, a pipe), for example, `"/folder1|/folder2"`.
2364
2749
  #
2365
2750
  # @option params [Types::TaskSchedule] :schedule
2366
2751
  # Specifies a schedule used to periodically transfer files from a source
@@ -2378,14 +2763,14 @@ module Aws::DataSync
2378
2763
  # The name of the task to update.
2379
2764
  #
2380
2765
  # @option params [String] :cloud_watch_log_group_arn
2381
- # The Amazon Resource Name (ARN) of the resource name of the CloudWatch
2382
- # LogGroup.
2766
+ # The Amazon Resource Name (ARN) of the resource name of the Amazon
2767
+ # CloudWatch log group.
2383
2768
  #
2384
2769
  # @option params [Array<Types::FilterRule>] :includes
2385
2770
  # A list of filter rules that determines which files to include when
2386
- # running a task. The pattern should contain a single filter string that
2771
+ # running a task. The pattern contains a single filter string that
2387
2772
  # consists of the patterns to include. The patterns are delimited by
2388
- # "\|" (that is, a pipe). For example: `"/folder1|/folder2`"
2773
+ # "\|" (that is, a pipe), for example, `"/folder1|/folder2"`.
2389
2774
  #
2390
2775
  # @return [Struct] Returns an empty {Seahorse::Client::Response response}.
2391
2776
  #
@@ -2518,7 +2903,7 @@ module Aws::DataSync
2518
2903
  params: params,
2519
2904
  config: config)
2520
2905
  context[:gem_name] = 'aws-sdk-datasync'
2521
- context[:gem_version] = '1.37.0'
2906
+ context[:gem_version] = '1.41.0'
2522
2907
  Seahorse::Client::Request.new(handlers, context)
2523
2908
  end
2524
2909