awscli 1.38.7__py3-none-any.whl → 1.38.8__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of awscli might be problematic. Click here for more details.
- awscli/__init__.py +1 -1
- awscli/examples/cloudformation/_package_description.rst +1 -1
- awscli/examples/emr/add-steps.rst +8 -8
- awscli/examples/emr/create-cluster-examples.rst +5 -5
- awscli/examples/rds/cancel-export-task.rst +22 -22
- awscli/examples/rds/describe-export-tasks.rst +40 -40
- awscli/examples/rds/restore-db-cluster-from-s3.rst +64 -64
- awscli/examples/rds/start-export-task.rst +26 -26
- awscli/examples/s3/_concepts.rst +2 -2
- awscli/examples/s3/cp.rst +30 -30
- awscli/examples/s3/ls.rst +7 -7
- awscli/examples/s3/mb.rst +6 -6
- awscli/examples/s3/mv.rst +21 -21
- awscli/examples/s3/rb.rst +8 -8
- awscli/examples/s3/rm.rst +12 -12
- awscli/examples/s3/sync.rst +27 -27
- awscli/examples/s3api/get-bucket-policy.rst +2 -2
- {awscli-1.38.7.dist-info → awscli-1.38.8.dist-info}/METADATA +2 -2
- {awscli-1.38.7.dist-info → awscli-1.38.8.dist-info}/RECORD +27 -27
- {awscli-1.38.7.data → awscli-1.38.8.data}/scripts/aws +0 -0
- {awscli-1.38.7.data → awscli-1.38.8.data}/scripts/aws.cmd +0 -0
- {awscli-1.38.7.data → awscli-1.38.8.data}/scripts/aws_bash_completer +0 -0
- {awscli-1.38.7.data → awscli-1.38.8.data}/scripts/aws_completer +0 -0
- {awscli-1.38.7.data → awscli-1.38.8.data}/scripts/aws_zsh_completer.sh +0 -0
- {awscli-1.38.7.dist-info → awscli-1.38.8.dist-info}/LICENSE.txt +0 -0
- {awscli-1.38.7.dist-info → awscli-1.38.8.dist-info}/WHEEL +0 -0
- {awscli-1.38.7.dist-info → awscli-1.38.8.dist-info}/top_level.txt +0 -0
awscli/__init__.py
CHANGED
|
@@ -40,7 +40,7 @@ For example, if your AWS Lambda function source code is in the
|
|
|
40
40
|
``/home/user/code/lambdafunction/`` folder, specify
|
|
41
41
|
``CodeUri: /home/user/code/lambdafunction`` for the
|
|
42
42
|
``AWS::Serverless::Function`` resource. The command returns a template and replaces
|
|
43
|
-
the local path with the S3 location: ``CodeUri: s3://
|
|
43
|
+
the local path with the S3 location: ``CodeUri: s3://amzn-s3-demo-bucket/lambdafunction.zip``.
|
|
44
44
|
|
|
45
45
|
If you specify a file, the command directly uploads it to the S3 bucket. If you
|
|
46
46
|
specify a folder, the command zips the folder and then uploads the .zip file.
|
|
@@ -2,7 +2,7 @@
|
|
|
2
2
|
|
|
3
3
|
- Command::
|
|
4
4
|
|
|
5
|
-
aws emr add-steps --cluster-id j-XXXXXXXX --steps Type=CUSTOM_JAR,Name=CustomJAR,ActionOnFailure=CONTINUE,Jar=s3://
|
|
5
|
+
aws emr add-steps --cluster-id j-XXXXXXXX --steps Type=CUSTOM_JAR,Name=CustomJAR,ActionOnFailure=CONTINUE,Jar=s3://amzn-s3-demo-bucket/mytest.jar,Args=arg1,arg2,arg3 Type=CUSTOM_JAR,Name=CustomJAR,ActionOnFailure=CONTINUE,Jar=s3://amzn-s3-demo-bucket/mytest.jar,MainClass=mymainclass,Args=arg1,arg2,arg3
|
|
6
6
|
|
|
7
7
|
- Required parameters::
|
|
8
8
|
|
|
@@ -25,7 +25,7 @@
|
|
|
25
25
|
|
|
26
26
|
- Command::
|
|
27
27
|
|
|
28
|
-
aws emr add-steps --cluster-id j-XXXXXXXX --steps Type=STREAMING,Name='Streaming Program',ActionOnFailure=CONTINUE,Args=[-files,s3://elasticmapreduce/samples/wordcount/wordSplitter.py,-mapper,wordSplitter.py,-reducer,aggregate,-input,s3://elasticmapreduce/samples/wordcount/input,-output,s3://
|
|
28
|
+
aws emr add-steps --cluster-id j-XXXXXXXX --steps Type=STREAMING,Name='Streaming Program',ActionOnFailure=CONTINUE,Args=[-files,s3://elasticmapreduce/samples/wordcount/wordSplitter.py,-mapper,wordSplitter.py,-reducer,aggregate,-input,s3://elasticmapreduce/samples/wordcount/input,-output,s3://amzn-s3-demo-bucket/wordcount/output]
|
|
29
29
|
|
|
30
30
|
- Required parameters::
|
|
31
31
|
|
|
@@ -40,7 +40,7 @@
|
|
|
40
40
|
[
|
|
41
41
|
{
|
|
42
42
|
"Name": "JSON Streaming Step",
|
|
43
|
-
"Args": ["-files","s3://elasticmapreduce/samples/wordcount/wordSplitter.py","-mapper","wordSplitter.py","-reducer","aggregate","-input","s3://elasticmapreduce/samples/wordcount/input","-output","s3://
|
|
43
|
+
"Args": ["-files","s3://elasticmapreduce/samples/wordcount/wordSplitter.py","-mapper","wordSplitter.py","-reducer","aggregate","-input","s3://elasticmapreduce/samples/wordcount/input","-output","s3://amzn-s3-demo-bucket/wordcount/output"],
|
|
44
44
|
"ActionOnFailure": "CONTINUE",
|
|
45
45
|
"Type": "STREAMING"
|
|
46
46
|
}
|
|
@@ -72,15 +72,15 @@ NOTE: JSON arguments must include options and values as their own items in the l
|
|
|
72
72
|
"ActionOnFailure": "CONTINUE",
|
|
73
73
|
"Args": [
|
|
74
74
|
"-files",
|
|
75
|
-
"s3://
|
|
75
|
+
"s3://amzn-s3-demo-bucket/mapper.py,s3://amzn-s3-demo-bucket/reducer.py",
|
|
76
76
|
"-mapper",
|
|
77
77
|
"mapper.py",
|
|
78
78
|
"-reducer",
|
|
79
79
|
"reducer.py",
|
|
80
80
|
"-input",
|
|
81
|
-
"s3://
|
|
81
|
+
"s3://amzn-s3-demo-bucket/input",
|
|
82
82
|
"-output",
|
|
83
|
-
"s3://
|
|
83
|
+
"s3://amzn-s3-demo-bucket/output"]
|
|
84
84
|
}
|
|
85
85
|
]
|
|
86
86
|
|
|
@@ -109,7 +109,7 @@ NOTE: JSON arguments must include options and values as their own items in the l
|
|
|
109
109
|
|
|
110
110
|
- Command::
|
|
111
111
|
|
|
112
|
-
aws emr add-steps --cluster-id j-XXXXXXXX --steps Type=HIVE,Name='Hive program',ActionOnFailure=CONTINUE,Args=[-f,s3://
|
|
112
|
+
aws emr add-steps --cluster-id j-XXXXXXXX --steps Type=HIVE,Name='Hive program',ActionOnFailure=CONTINUE,Args=[-f,s3://amzn-s3-demo-bucket/myhivescript.q,-d,INPUT=s3://amzn-s3-demo-bucket/myhiveinput,-d,OUTPUT=s3://amzn-s3-demo-bucket/myhiveoutput,arg1,arg2] Type=HIVE,Name='Hive steps',ActionOnFailure=TERMINATE_CLUSTER,Args=[-f,s3://elasticmapreduce/samples/hive-ads/libs/model-build.q,-d,INPUT=s3://elasticmapreduce/samples/hive-ads/tables,-d,OUTPUT=s3://amzn-s3-demo-bucket/hive-ads/output/2014-04-18/11-07-32,-d,LIBS=s3://elasticmapreduce/samples/hive-ads/libs]
|
|
113
113
|
|
|
114
114
|
|
|
115
115
|
- Required parameters::
|
|
@@ -134,7 +134,7 @@ NOTE: JSON arguments must include options and values as their own items in the l
|
|
|
134
134
|
|
|
135
135
|
- Command::
|
|
136
136
|
|
|
137
|
-
aws emr add-steps --cluster-id j-XXXXXXXX --steps Type=PIG,Name='Pig program',ActionOnFailure=CONTINUE,Args=[-f,s3://
|
|
137
|
+
aws emr add-steps --cluster-id j-XXXXXXXX --steps Type=PIG,Name='Pig program',ActionOnFailure=CONTINUE,Args=[-f,s3://amzn-s3-demo-bucket/mypigscript.pig,-p,INPUT=s3://amzn-s3-demo-bucket/mypiginput,-p,OUTPUT=s3://amzn-s3-demo-bucket/mypigoutput,arg1,arg2] Type=PIG,Name='Pig program',Args=[-f,s3://elasticmapreduce/samples/pig-apache/do-reports2.pig,-p,INPUT=s3://elasticmapreduce/samples/pig-apache/input,-p,OUTPUT=s3://amzn-s3-demo-bucket/pig-apache/output,arg1,arg2]
|
|
138
138
|
|
|
139
139
|
|
|
140
140
|
- Required parameters::
|
|
@@ -369,7 +369,7 @@ The following ``create-cluster`` examples add a streaming step to a cluster that
|
|
|
369
369
|
The following example specifies the step inline. ::
|
|
370
370
|
|
|
371
371
|
aws emr create-cluster \
|
|
372
|
-
--steps Type=STREAMING,Name='Streaming Program',ActionOnFailure=CONTINUE,Args=[-files,s3://elasticmapreduce/samples/wordcount/wordSplitter.py,-mapper,wordSplitter.py,-reducer,aggregate,-input,s3://elasticmapreduce/samples/wordcount/input,-output,s3://
|
|
372
|
+
--steps Type=STREAMING,Name='Streaming Program',ActionOnFailure=CONTINUE,Args=[-files,s3://elasticmapreduce/samples/wordcount/wordSplitter.py,-mapper,wordSplitter.py,-reducer,aggregate,-input,s3://elasticmapreduce/samples/wordcount/input,-output,s3://amzn-s3-demo-bucket/wordcount/output] \
|
|
373
373
|
--release-label emr-5.3.1 \
|
|
374
374
|
--instance-groups InstanceGroupType=MASTER,InstanceCount=1,InstanceType=m4.large InstanceGroupType=CORE,InstanceCount=2,InstanceType=m4.large \
|
|
375
375
|
--auto-terminate
|
|
@@ -397,7 +397,7 @@ Contents of ``multiplefiles.json``::
|
|
|
397
397
|
"-input",
|
|
398
398
|
"s3://elasticmapreduce/samples/wordcount/input",
|
|
399
399
|
"-output",
|
|
400
|
-
"s3://
|
|
400
|
+
"s3://amzn-s3-demo-bucket/wordcount/output"
|
|
401
401
|
],
|
|
402
402
|
"ActionOnFailure": "CONTINUE",
|
|
403
403
|
"Type": "STREAMING"
|
|
@@ -409,7 +409,7 @@ Contents of ``multiplefiles.json``::
|
|
|
409
409
|
The following example add Hive steps when creating a cluster. Hive steps require parameters ``Type`` and ``Args``. Hive steps optional parameters are ``Name`` and ``ActionOnFailure``. ::
|
|
410
410
|
|
|
411
411
|
aws emr create-cluster \
|
|
412
|
-
--steps Type=HIVE,Name='Hive program',ActionOnFailure=CONTINUE,ActionOnFailure=TERMINATE_CLUSTER,Args=[-f,s3://elasticmapreduce/samples/hive-ads/libs/model-build.q,-d,INPUT=s3://elasticmapreduce/samples/hive-ads/tables,-d,OUTPUT=s3://
|
|
412
|
+
--steps Type=HIVE,Name='Hive program',ActionOnFailure=CONTINUE,ActionOnFailure=TERMINATE_CLUSTER,Args=[-f,s3://elasticmapreduce/samples/hive-ads/libs/model-build.q,-d,INPUT=s3://elasticmapreduce/samples/hive-ads/tables,-d,OUTPUT=s3://amzn-s3-demo-bucket/hive-ads/output/2014-04-18/11-07-32,-d,LIBS=s3://elasticmapreduce/samples/hive-ads/libs] \
|
|
413
413
|
--applications Name=Hive \
|
|
414
414
|
--release-label emr-5.3.1 \
|
|
415
415
|
--instance-groups InstanceGroupType=MASTER,InstanceCount=1,InstanceType=m4.large InstanceGroupType=CORE,InstanceCount=2,InstanceType=m4.large
|
|
@@ -419,7 +419,7 @@ The following example add Hive steps when creating a cluster. Hive steps require
|
|
|
419
419
|
The following example adds Pig steps when creating a cluster. Pig steps required parameters are ``Type`` and ``Args``. Pig steps optional parameters are ``Name`` and ``ActionOnFailure``. ::
|
|
420
420
|
|
|
421
421
|
aws emr create-cluster \
|
|
422
|
-
--steps Type=PIG,Name='Pig program',ActionOnFailure=CONTINUE,Args=[-f,s3://elasticmapreduce/samples/pig-apache/do-reports2.pig,-p,INPUT=s3://elasticmapreduce/samples/pig-apache/input,-p,OUTPUT=s3://
|
|
422
|
+
--steps Type=PIG,Name='Pig program',ActionOnFailure=CONTINUE,Args=[-f,s3://elasticmapreduce/samples/pig-apache/do-reports2.pig,-p,INPUT=s3://elasticmapreduce/samples/pig-apache/input,-p,OUTPUT=s3://amzn-s3-demo-bucket/pig-apache/output] \
|
|
423
423
|
--applications Name=Pig \
|
|
424
424
|
--release-label emr-5.3.1 \
|
|
425
425
|
--instance-groups InstanceGroupType=MASTER,InstanceCount=1,InstanceType=m4.large InstanceGroupType=CORE,InstanceCount=2,InstanceType=m4.large
|
|
@@ -429,7 +429,7 @@ The following example adds Pig steps when creating a cluster. Pig steps required
|
|
|
429
429
|
The following ``create-cluster`` example runs two bootstrap actions defined as scripts that are stored in Amazon S3. ::
|
|
430
430
|
|
|
431
431
|
aws emr create-cluster \
|
|
432
|
-
--bootstrap-actions Path=s3://
|
|
432
|
+
--bootstrap-actions Path=s3://amzn-s3-demo-bucket/myscript1,Name=BootstrapAction1,Args=[arg1,arg2] Path=s3://amzn-s3-demo-bucket/myscript2,Name=BootstrapAction2,Args=[arg1,arg2] \
|
|
433
433
|
--release-label emr-5.3.1 \
|
|
434
434
|
--instance-groups InstanceGroupType=MASTER,InstanceCount=1,InstanceType=m4.large InstanceGroupType=CORE,InstanceCount=2,InstanceType=m4.large \
|
|
435
435
|
--auto-terminate
|
|
@@ -1,23 +1,23 @@
|
|
|
1
|
-
**To cancel a snapshot export to Amazon S3**
|
|
2
|
-
|
|
3
|
-
The following ``cancel-export-task`` example cancels an export task in progress that is exporting a snapshot to Amazon S3. ::
|
|
4
|
-
|
|
5
|
-
aws rds cancel-export-task \
|
|
6
|
-
--export-task-identifier my-s3-export-1
|
|
7
|
-
|
|
8
|
-
Output::
|
|
9
|
-
|
|
10
|
-
{
|
|
11
|
-
"ExportTaskIdentifier": "my-s3-export-1",
|
|
12
|
-
"SourceArn": "arn:aws:rds:us-east-1:123456789012:snapshot:publisher-final-snapshot",
|
|
13
|
-
"SnapshotTime": "2019-03-24T20:01:09.815Z",
|
|
14
|
-
"S3Bucket": "
|
|
15
|
-
"S3Prefix": "",
|
|
16
|
-
"IamRoleArn": "arn:aws:iam::123456789012:role/service-role/export-snap-S3-role",
|
|
17
|
-
"KmsKeyId": "arn:aws:kms:us-east-1:123456789012:key/abcd0000-7bfd-4594-af38-aabbccddeeff",
|
|
18
|
-
"Status": "CANCELING",
|
|
19
|
-
"PercentProgress": 0,
|
|
20
|
-
"TotalExtractedDataInGB": 0
|
|
21
|
-
}
|
|
22
|
-
|
|
1
|
+
**To cancel a snapshot export to Amazon S3**
|
|
2
|
+
|
|
3
|
+
The following ``cancel-export-task`` example cancels an export task in progress that is exporting a snapshot to Amazon S3. ::
|
|
4
|
+
|
|
5
|
+
aws rds cancel-export-task \
|
|
6
|
+
--export-task-identifier my-s3-export-1
|
|
7
|
+
|
|
8
|
+
Output::
|
|
9
|
+
|
|
10
|
+
{
|
|
11
|
+
"ExportTaskIdentifier": "my-s3-export-1",
|
|
12
|
+
"SourceArn": "arn:aws:rds:us-east-1:123456789012:snapshot:publisher-final-snapshot",
|
|
13
|
+
"SnapshotTime": "2019-03-24T20:01:09.815Z",
|
|
14
|
+
"S3Bucket": "amzn-s3-demo-bucket",
|
|
15
|
+
"S3Prefix": "",
|
|
16
|
+
"IamRoleArn": "arn:aws:iam::123456789012:role/service-role/export-snap-S3-role",
|
|
17
|
+
"KmsKeyId": "arn:aws:kms:us-east-1:123456789012:key/abcd0000-7bfd-4594-af38-aabbccddeeff",
|
|
18
|
+
"Status": "CANCELING",
|
|
19
|
+
"PercentProgress": 0,
|
|
20
|
+
"TotalExtractedDataInGB": 0
|
|
21
|
+
}
|
|
22
|
+
|
|
23
23
|
For more information, see `Canceling a snapshot export task <https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_ExportSnapshot.html#USER_ExportSnapshot.Canceling>`__ in the *Amazon RDS User Guide* or `Canceling a snapshot export task <https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/USER_ExportSnapshot.html#USER_ExportSnapshot.Canceling>`__ in the *Amazon Aurora User Guide*.
|
|
@@ -1,40 +1,40 @@
|
|
|
1
|
-
**To describe snapshot export tasks**
|
|
2
|
-
|
|
3
|
-
The following ``describe-export-tasks`` example returns information about snapshot exports to Amazon S3. ::
|
|
4
|
-
|
|
5
|
-
aws rds describe-export-tasks
|
|
6
|
-
|
|
7
|
-
Output::
|
|
8
|
-
|
|
9
|
-
{
|
|
10
|
-
"ExportTasks": [
|
|
11
|
-
{
|
|
12
|
-
"ExportTaskIdentifier": "test-snapshot-export",
|
|
13
|
-
"SourceArn": "arn:aws:rds:us-west-2:123456789012:snapshot:test-snapshot",
|
|
14
|
-
"SnapshotTime": "2020-03-02T18:26:28.163Z",
|
|
15
|
-
"TaskStartTime": "2020-03-02T18:57:56.896Z",
|
|
16
|
-
"TaskEndTime": "2020-03-02T19:10:31.985Z",
|
|
17
|
-
"S3Bucket": "
|
|
18
|
-
"S3Prefix": "",
|
|
19
|
-
"IamRoleArn": "arn:aws:iam::123456789012:role/service-role/ExportRole",
|
|
20
|
-
"KmsKeyId": "arn:aws:kms:us-west-2:123456789012:key/abcd0000-7fca-4128-82f2-aabbccddeeff",
|
|
21
|
-
"Status": "COMPLETE",
|
|
22
|
-
"PercentProgress": 100,
|
|
23
|
-
"TotalExtractedDataInGB": 0
|
|
24
|
-
},
|
|
25
|
-
{
|
|
26
|
-
"ExportTaskIdentifier": "my-s3-export",
|
|
27
|
-
"SourceArn": "arn:aws:rds:us-west-2:123456789012:snapshot:db5-snapshot-test",
|
|
28
|
-
"SnapshotTime": "2020-03-27T20:48:42.023Z",
|
|
29
|
-
"S3Bucket": "
|
|
30
|
-
"S3Prefix": "",
|
|
31
|
-
"IamRoleArn": "arn:aws:iam::123456789012:role/service-role/ExportRole",
|
|
32
|
-
"KmsKeyId": "arn:aws:kms:us-west-2:123456789012:key/abcd0000-7fca-4128-82f2-aabbccddeeff",
|
|
33
|
-
"Status": "STARTING",
|
|
34
|
-
"PercentProgress": 0,
|
|
35
|
-
"TotalExtractedDataInGB": 0
|
|
36
|
-
}
|
|
37
|
-
]
|
|
38
|
-
}
|
|
39
|
-
|
|
40
|
-
For more information, see `Monitoring Snapshot Exports <https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_ExportSnapshot.html#USER_ExportSnapshot.Monitoring>`__ in the *Amazon RDS User Guide*.
|
|
1
|
+
**To describe snapshot export tasks**
|
|
2
|
+
|
|
3
|
+
The following ``describe-export-tasks`` example returns information about snapshot exports to Amazon S3. ::
|
|
4
|
+
|
|
5
|
+
aws rds describe-export-tasks
|
|
6
|
+
|
|
7
|
+
Output::
|
|
8
|
+
|
|
9
|
+
{
|
|
10
|
+
"ExportTasks": [
|
|
11
|
+
{
|
|
12
|
+
"ExportTaskIdentifier": "test-snapshot-export",
|
|
13
|
+
"SourceArn": "arn:aws:rds:us-west-2:123456789012:snapshot:test-snapshot",
|
|
14
|
+
"SnapshotTime": "2020-03-02T18:26:28.163Z",
|
|
15
|
+
"TaskStartTime": "2020-03-02T18:57:56.896Z",
|
|
16
|
+
"TaskEndTime": "2020-03-02T19:10:31.985Z",
|
|
17
|
+
"S3Bucket": "amzn-s3-demo-bucket",
|
|
18
|
+
"S3Prefix": "",
|
|
19
|
+
"IamRoleArn": "arn:aws:iam::123456789012:role/service-role/ExportRole",
|
|
20
|
+
"KmsKeyId": "arn:aws:kms:us-west-2:123456789012:key/abcd0000-7fca-4128-82f2-aabbccddeeff",
|
|
21
|
+
"Status": "COMPLETE",
|
|
22
|
+
"PercentProgress": 100,
|
|
23
|
+
"TotalExtractedDataInGB": 0
|
|
24
|
+
},
|
|
25
|
+
{
|
|
26
|
+
"ExportTaskIdentifier": "my-s3-export",
|
|
27
|
+
"SourceArn": "arn:aws:rds:us-west-2:123456789012:snapshot:db5-snapshot-test",
|
|
28
|
+
"SnapshotTime": "2020-03-27T20:48:42.023Z",
|
|
29
|
+
"S3Bucket": "amzn-s3-demo-bucket",
|
|
30
|
+
"S3Prefix": "",
|
|
31
|
+
"IamRoleArn": "arn:aws:iam::123456789012:role/service-role/ExportRole",
|
|
32
|
+
"KmsKeyId": "arn:aws:kms:us-west-2:123456789012:key/abcd0000-7fca-4128-82f2-aabbccddeeff",
|
|
33
|
+
"Status": "STARTING",
|
|
34
|
+
"PercentProgress": 0,
|
|
35
|
+
"TotalExtractedDataInGB": 0
|
|
36
|
+
}
|
|
37
|
+
]
|
|
38
|
+
}
|
|
39
|
+
|
|
40
|
+
For more information, see `Monitoring Snapshot Exports <https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_ExportSnapshot.html#USER_ExportSnapshot.Monitoring>`__ in the *Amazon RDS User Guide*.
|
|
@@ -1,64 +1,64 @@
|
|
|
1
|
-
**To restore an Amazon Aurora DB cluster from Amazon S3**
|
|
2
|
-
|
|
3
|
-
The following ``restore-db-cluster-from-s3`` example restores an Amazon Aurora MySQL version 5.7-compatible DB cluster from a MySQL 5.7 DB backup file in Amazon S3. ::
|
|
4
|
-
|
|
5
|
-
aws rds restore-db-cluster-from-s3 \
|
|
6
|
-
--db-cluster-identifier cluster-s3-restore \
|
|
7
|
-
--engine aurora-mysql \
|
|
8
|
-
--master-username admin \
|
|
9
|
-
--master-user-password mypassword \
|
|
10
|
-
--s3-bucket-name
|
|
11
|
-
--s3-prefix test-backup \
|
|
12
|
-
--s3-ingestion-role-arn arn:aws:iam::123456789012:role/service-role/TestBackup \
|
|
13
|
-
--source-engine mysql \
|
|
14
|
-
--source-engine-version 5.7.28
|
|
15
|
-
|
|
16
|
-
Output::
|
|
17
|
-
|
|
18
|
-
{
|
|
19
|
-
"DBCluster": {
|
|
20
|
-
"AllocatedStorage": 1,
|
|
21
|
-
"AvailabilityZones": [
|
|
22
|
-
"us-west-2c",
|
|
23
|
-
"us-west-2a",
|
|
24
|
-
"us-west-2b"
|
|
25
|
-
],
|
|
26
|
-
"BackupRetentionPeriod": 1,
|
|
27
|
-
"DBClusterIdentifier": "cluster-s3-restore",
|
|
28
|
-
"DBClusterParameterGroup": "default.aurora-mysql5.7",
|
|
29
|
-
"DBSubnetGroup": "default",
|
|
30
|
-
"Status": "creating",
|
|
31
|
-
"Endpoint": "cluster-s3-restore.cluster-co3xyzabc123.us-west-2.rds.amazonaws.com",
|
|
32
|
-
"ReaderEndpoint": "cluster-s3-restore.cluster-ro-co3xyzabc123.us-west-2.rds.amazonaws.com",
|
|
33
|
-
"MultiAZ": false,
|
|
34
|
-
"Engine": "aurora-mysql",
|
|
35
|
-
"EngineVersion": "5.7.12",
|
|
36
|
-
"Port": 3306,
|
|
37
|
-
"MasterUsername": "admin",
|
|
38
|
-
"PreferredBackupWindow": "11:15-11:45",
|
|
39
|
-
"PreferredMaintenanceWindow": "thu:12:19-thu:12:49",
|
|
40
|
-
"ReadReplicaIdentifiers": [],
|
|
41
|
-
"DBClusterMembers": [],
|
|
42
|
-
"VpcSecurityGroups": [
|
|
43
|
-
{
|
|
44
|
-
"VpcSecurityGroupId": "sg-########",
|
|
45
|
-
"Status": "active"
|
|
46
|
-
}
|
|
47
|
-
],
|
|
48
|
-
"HostedZoneId": "Z1PVIF0EXAMPLE",
|
|
49
|
-
"StorageEncrypted": false,
|
|
50
|
-
"DbClusterResourceId": "cluster-SU5THYQQHOWCXZZDGXREXAMPLE",
|
|
51
|
-
"DBClusterArn": "arn:aws:rds:us-west-2:123456789012:cluster:cluster-s3-restore",
|
|
52
|
-
"AssociatedRoles": [],
|
|
53
|
-
"IAMDatabaseAuthenticationEnabled": false,
|
|
54
|
-
"ClusterCreateTime": "2020-07-27T14:22:08.095Z",
|
|
55
|
-
"EngineMode": "provisioned",
|
|
56
|
-
"DeletionProtection": false,
|
|
57
|
-
"HttpEndpointEnabled": false,
|
|
58
|
-
"CopyTagsToSnapshot": false,
|
|
59
|
-
"CrossAccountClone": false,
|
|
60
|
-
"DomainMemberships": []
|
|
61
|
-
}
|
|
62
|
-
}
|
|
63
|
-
|
|
64
|
-
For more information, see `Migrating Data from MySQL by Using an Amazon S3 Bucket <https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/AuroraMySQL.Migrating.ExtMySQL.html#AuroraMySQL.Migrating.ExtMySQL.S3>`__ in the *Amazon Aurora User Guide*.
|
|
1
|
+
**To restore an Amazon Aurora DB cluster from Amazon S3**
|
|
2
|
+
|
|
3
|
+
The following ``restore-db-cluster-from-s3`` example restores an Amazon Aurora MySQL version 5.7-compatible DB cluster from a MySQL 5.7 DB backup file in Amazon S3. ::
|
|
4
|
+
|
|
5
|
+
aws rds restore-db-cluster-from-s3 \
|
|
6
|
+
--db-cluster-identifier cluster-s3-restore \
|
|
7
|
+
--engine aurora-mysql \
|
|
8
|
+
--master-username admin \
|
|
9
|
+
--master-user-password mypassword \
|
|
10
|
+
--s3-bucket-name amzn-s3-demo-bucket \
|
|
11
|
+
--s3-prefix test-backup \
|
|
12
|
+
--s3-ingestion-role-arn arn:aws:iam::123456789012:role/service-role/TestBackup \
|
|
13
|
+
--source-engine mysql \
|
|
14
|
+
--source-engine-version 5.7.28
|
|
15
|
+
|
|
16
|
+
Output::
|
|
17
|
+
|
|
18
|
+
{
|
|
19
|
+
"DBCluster": {
|
|
20
|
+
"AllocatedStorage": 1,
|
|
21
|
+
"AvailabilityZones": [
|
|
22
|
+
"us-west-2c",
|
|
23
|
+
"us-west-2a",
|
|
24
|
+
"us-west-2b"
|
|
25
|
+
],
|
|
26
|
+
"BackupRetentionPeriod": 1,
|
|
27
|
+
"DBClusterIdentifier": "cluster-s3-restore",
|
|
28
|
+
"DBClusterParameterGroup": "default.aurora-mysql5.7",
|
|
29
|
+
"DBSubnetGroup": "default",
|
|
30
|
+
"Status": "creating",
|
|
31
|
+
"Endpoint": "cluster-s3-restore.cluster-co3xyzabc123.us-west-2.rds.amazonaws.com",
|
|
32
|
+
"ReaderEndpoint": "cluster-s3-restore.cluster-ro-co3xyzabc123.us-west-2.rds.amazonaws.com",
|
|
33
|
+
"MultiAZ": false,
|
|
34
|
+
"Engine": "aurora-mysql",
|
|
35
|
+
"EngineVersion": "5.7.12",
|
|
36
|
+
"Port": 3306,
|
|
37
|
+
"MasterUsername": "admin",
|
|
38
|
+
"PreferredBackupWindow": "11:15-11:45",
|
|
39
|
+
"PreferredMaintenanceWindow": "thu:12:19-thu:12:49",
|
|
40
|
+
"ReadReplicaIdentifiers": [],
|
|
41
|
+
"DBClusterMembers": [],
|
|
42
|
+
"VpcSecurityGroups": [
|
|
43
|
+
{
|
|
44
|
+
"VpcSecurityGroupId": "sg-########",
|
|
45
|
+
"Status": "active"
|
|
46
|
+
}
|
|
47
|
+
],
|
|
48
|
+
"HostedZoneId": "Z1PVIF0EXAMPLE",
|
|
49
|
+
"StorageEncrypted": false,
|
|
50
|
+
"DbClusterResourceId": "cluster-SU5THYQQHOWCXZZDGXREXAMPLE",
|
|
51
|
+
"DBClusterArn": "arn:aws:rds:us-west-2:123456789012:cluster:cluster-s3-restore",
|
|
52
|
+
"AssociatedRoles": [],
|
|
53
|
+
"IAMDatabaseAuthenticationEnabled": false,
|
|
54
|
+
"ClusterCreateTime": "2020-07-27T14:22:08.095Z",
|
|
55
|
+
"EngineMode": "provisioned",
|
|
56
|
+
"DeletionProtection": false,
|
|
57
|
+
"HttpEndpointEnabled": false,
|
|
58
|
+
"CopyTagsToSnapshot": false,
|
|
59
|
+
"CrossAccountClone": false,
|
|
60
|
+
"DomainMemberships": []
|
|
61
|
+
}
|
|
62
|
+
}
|
|
63
|
+
|
|
64
|
+
For more information, see `Migrating Data from MySQL by Using an Amazon S3 Bucket <https://docs.aws.amazon.com/AmazonRDS/latest/AuroraUserGuide/AuroraMySQL.Migrating.ExtMySQL.html#AuroraMySQL.Migrating.ExtMySQL.S3>`__ in the *Amazon Aurora User Guide*.
|
|
@@ -1,26 +1,26 @@
|
|
|
1
|
-
**To export a snapshot to Amazon S3**
|
|
2
|
-
|
|
3
|
-
The following ``start-export-task`` example exports a DB snapshot named ``db5-snapshot-test`` to the Amazon S3 bucket named ``
|
|
4
|
-
|
|
5
|
-
aws rds start-export-task \
|
|
6
|
-
--export-task-identifier my-s3-export \
|
|
7
|
-
--source-arn arn:aws:rds:us-west-2:123456789012:snapshot:db5-snapshot-test \
|
|
8
|
-
--s3-bucket-name
|
|
9
|
-
--iam-role-arn arn:aws:iam::123456789012:role/service-role/ExportRole \
|
|
10
|
-
--kms-key-id arn:aws:kms:us-west-2:123456789012:key/abcd0000-7fca-4128-82f2-aabbccddeeff
|
|
11
|
-
|
|
12
|
-
Output::
|
|
13
|
-
|
|
14
|
-
{
|
|
15
|
-
"ExportTaskIdentifier": "my-s3-export",
|
|
16
|
-
"SourceArn": "arn:aws:rds:us-west-2:123456789012:snapshot:db5-snapshot-test",
|
|
17
|
-
"SnapshotTime": "2020-03-27T20:48:42.023Z",
|
|
18
|
-
"S3Bucket": "
|
|
19
|
-
"IamRoleArn": "arn:aws:iam::123456789012:role/service-role/ExportRole",
|
|
20
|
-
"KmsKeyId": "arn:aws:kms:us-west-2:123456789012:key/abcd0000-7fca-4128-82f2-aabbccddeeff",
|
|
21
|
-
"Status": "STARTING",
|
|
22
|
-
"PercentProgress": 0,
|
|
23
|
-
"TotalExtractedDataInGB": 0
|
|
24
|
-
}
|
|
25
|
-
|
|
26
|
-
For more information, see `Exporting a Snapshot to an Amazon S3 Bucket <https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_ExportSnapshot.html#USER_ExportSnapshot.Exporting>`__ in the *Amazon RDS User Guide*.
|
|
1
|
+
**To export a snapshot to Amazon S3**
|
|
2
|
+
|
|
3
|
+
The following ``start-export-task`` example exports a DB snapshot named ``db5-snapshot-test`` to the Amazon S3 bucket named ``amzn-s3-demo-bucket``. ::
|
|
4
|
+
|
|
5
|
+
aws rds start-export-task \
|
|
6
|
+
--export-task-identifier my-s3-export \
|
|
7
|
+
--source-arn arn:aws:rds:us-west-2:123456789012:snapshot:db5-snapshot-test \
|
|
8
|
+
--s3-bucket-name amzn-s3-demo-bucket \
|
|
9
|
+
--iam-role-arn arn:aws:iam::123456789012:role/service-role/ExportRole \
|
|
10
|
+
--kms-key-id arn:aws:kms:us-west-2:123456789012:key/abcd0000-7fca-4128-82f2-aabbccddeeff
|
|
11
|
+
|
|
12
|
+
Output::
|
|
13
|
+
|
|
14
|
+
{
|
|
15
|
+
"ExportTaskIdentifier": "my-s3-export",
|
|
16
|
+
"SourceArn": "arn:aws:rds:us-west-2:123456789012:snapshot:db5-snapshot-test",
|
|
17
|
+
"SnapshotTime": "2020-03-27T20:48:42.023Z",
|
|
18
|
+
"S3Bucket": "amzn-s3-demo-bucket",
|
|
19
|
+
"IamRoleArn": "arn:aws:iam::123456789012:role/service-role/ExportRole",
|
|
20
|
+
"KmsKeyId": "arn:aws:kms:us-west-2:123456789012:key/abcd0000-7fca-4128-82f2-aabbccddeeff",
|
|
21
|
+
"Status": "STARTING",
|
|
22
|
+
"PercentProgress": 0,
|
|
23
|
+
"TotalExtractedDataInGB": 0
|
|
24
|
+
}
|
|
25
|
+
|
|
26
|
+
For more information, see `Exporting a Snapshot to an Amazon S3 Bucket <https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_ExportSnapshot.html#USER_ExportSnapshot.Exporting>`__ in the *Amazon RDS User Guide*.
|
awscli/examples/s3/_concepts.rst
CHANGED
|
@@ -14,13 +14,13 @@ are two types of path arguments: ``LocalPath`` and ``S3Uri``.
|
|
|
14
14
|
written as an absolute path or relative path.
|
|
15
15
|
|
|
16
16
|
``S3Uri``: represents the location of a S3 object, prefix, or bucket. This
|
|
17
|
-
must be written in the form ``s3://
|
|
17
|
+
must be written in the form ``s3://amzn-s3-demo-bucket/mykey`` where ``amzn-s3-demo-bucket`` is
|
|
18
18
|
the specified S3 bucket, ``mykey`` is the specified S3 key. The path argument
|
|
19
19
|
must begin with ``s3://`` in order to denote that the path argument refers to
|
|
20
20
|
a S3 object. Note that prefixes are separated by forward slashes. For
|
|
21
21
|
example, if the S3 object ``myobject`` had the prefix ``myprefix``, the
|
|
22
22
|
S3 key would be ``myprefix/myobject``, and if the object was in the bucket
|
|
23
|
-
``
|
|
23
|
+
``amzn-s3-demo-bucket``, the ``S3Uri`` would be ``s3://amzn-s3-demo-bucket/myprefix/myobject``.
|
|
24
24
|
|
|
25
25
|
``S3Uri`` also supports S3 access points. To specify an access point, this
|
|
26
26
|
value must be of the form ``s3://<access-point-arn>/<key>``. For example if
|