awspub 0.0.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,120 @@
1
+ import logging
2
+ import re
3
+ from typing import Any, Dict
4
+
5
+ import boto3
6
+ from mypy_boto3_marketplace_catalog import MarketplaceCatalogClient
7
+
8
+ from awspub.context import Context
9
+
10
+ logger = logging.getLogger(__name__)
11
+
12
+
13
+ class ImageMarketplace:
14
+ """
15
+ Handle AWS Marketplace API interaction
16
+ """
17
+
18
+ def __init__(self, context: Context, image_name: str):
19
+ self._ctx: Context = context
20
+ self._image_name: str = image_name
21
+ # marketplace-catalog API is only available via us-east-1
22
+ self._mpclient: MarketplaceCatalogClient = boto3.client("marketplace-catalog", region_name="us-east-1")
23
+
24
+ @property
25
+ def conf(self) -> Dict[str, Any]:
26
+ """
27
+ The marketplace configuration for the current image (based on "image_name") from context
28
+ """
29
+ return self._ctx.conf["images"][self._image_name]["marketplace"]
30
+
31
+ def request_new_version(self, image_id: str) -> None:
32
+ """
33
+ Request a new Marketplace version for the given image Id
34
+
35
+ :param image_id: an image Id (in the format 'ami-123')
36
+ :type image_id: str
37
+ """
38
+ entity = self._mpclient.describe_entity(Catalog="AWSMarketplace", EntityId=self.conf["entity_id"])
39
+ # check if the version already exists
40
+ for version in entity["DetailsDocument"]["Versions"]:
41
+ if version["VersionTitle"] == self.conf["version_title"]:
42
+ logger.info(f"Marketplace version '{self.conf['version_title']}' already exists. Do nothing")
43
+ return
44
+
45
+ # version doesn't exist already - create a new one
46
+ changeset = self._request_new_version_changeset(image_id)
47
+ changeset_name = ImageMarketplace.sanitize_changeset_name(
48
+ f"New version request for {self.conf['version_title']}"
49
+ )
50
+ resp = self._mpclient.start_change_set(
51
+ Catalog="AWSMarketplace", ChangeSet=changeset, ChangeSetTags=self._ctx.tags, ChangeSetName=changeset_name
52
+ )
53
+ logger.info(
54
+ f"new version '{self.conf['version_title']}' (image: {image_id}) for entity "
55
+ f"{self.conf['entity_id']} requested (changeset-id: {resp['ChangeSetId']})"
56
+ )
57
+
58
+ def _request_new_version_changeset(self, image_id: str):
59
+ """
60
+ Create a changeset structure for a new AmiProduct version
61
+ See https://docs.aws.amazon.com/marketplace-catalog/latest/api-reference/ami-products.html#ami-add-version
62
+
63
+ :param image_id: an image Id (in the format 'ami-123')
64
+ :type image_id: str
65
+ :return: A changeset structure to request a new version
66
+ :rtype: List[Dict[str, Any]]
67
+ """
68
+ return [
69
+ {
70
+ "ChangeType": "AddDeliveryOptions",
71
+ "Entity": {
72
+ "Identifier": self.conf["entity_id"],
73
+ "Type": "AmiProduct@1.0",
74
+ },
75
+ "DetailsDocument": {
76
+ "Version": {
77
+ "VersionTitle": self.conf["version_title"],
78
+ "ReleaseNotes": self.conf["release_notes"],
79
+ },
80
+ "DeliveryOptions": [
81
+ {
82
+ "Details": {
83
+ "AmiDeliveryOptionDetails": {
84
+ "AmiSource": {
85
+ "AmiId": image_id,
86
+ "AccessRoleArn": self.conf["access_role_arn"],
87
+ "UserName": self.conf["user_name"],
88
+ "OperatingSystemName": self.conf["os_name"],
89
+ "OperatingSystemVersion": self.conf["os_version"],
90
+ },
91
+ "UsageInstructions": self.conf["usage_instructions"],
92
+ "RecommendedInstanceType": self.conf["recommended_instance_type"],
93
+ "SecurityGroups": [
94
+ {
95
+ "IpProtocol": sg["ip_protocol"],
96
+ "IpRanges": [ipr for ipr in sg["ip_ranges"]],
97
+ "FromPort": sg["from_port"],
98
+ "ToPort": sg["to_port"],
99
+ }
100
+ for sg in self.conf["security_groups"]
101
+ ],
102
+ }
103
+ }
104
+ }
105
+ ],
106
+ },
107
+ }
108
+ ]
109
+
110
+ @staticmethod
111
+ def sanitize_changeset_name(name: str) -> str:
112
+ # changeset names can only include alphanumeric characters, whitespace, and any combination of the following
113
+ # characters: _+=.:@- This regex pattern takes the list of allowed characters, does a negative match on the
114
+ # string and removes all matched (i.e. disallowed) characters. See [0] for reference.
115
+ # [0] https://docs.aws.amazon.com/marketplace-catalog/latest/api-reference/API_StartChangeSet.html#API_StartChangeSet_RequestSyntax # noqa
116
+ return re.sub(
117
+ "[^\\w\\s+=.:@-]",
118
+ "",
119
+ name,
120
+ )
awspub/s3.py ADDED
@@ -0,0 +1,262 @@
1
+ import base64
2
+ import hashlib
3
+ import logging
4
+ import os
5
+ from typing import Dict
6
+
7
+ import boto3
8
+ from mypy_boto3_s3.type_defs import CompletedPartTypeDef
9
+
10
+ from awspub.context import Context
11
+ from awspub.exceptions import BucketDoesNotExistException
12
+
13
+ # chunk size is required for calculating the checksums
14
+ MULTIPART_CHUNK_SIZE = 8 * 1024 * 1024
15
+
16
+
17
+ logger = logging.getLogger(__name__)
18
+
19
+
20
+ class S3:
21
+ """
22
+ Handle S3 API interaction
23
+ """
24
+
25
+ def __init__(self, context: Context):
26
+ """
27
+ :param context:
28
+ "type context: awspub.context.Context
29
+ """
30
+ self._ctx: Context = context
31
+ self._s3client = boto3.client("s3")
32
+ self._bucket_region = None
33
+
34
+ @property
35
+ def bucket_region(self):
36
+ if not self._bucket_region:
37
+ if not self._bucket_exists():
38
+ raise BucketDoesNotExistException(self.bucket_name)
39
+ self._bucket_region = self._s3client.head_bucket(Bucket=self.bucket_name)["BucketRegion"]
40
+
41
+ return self._bucket_region
42
+
43
+ @property
44
+ def bucket_name(self):
45
+ return self._ctx.conf["s3"]["bucket_name"]
46
+
47
+ def __repr__(self):
48
+ return (
49
+ f"<{self.__class__} bucket:'{self.bucket_name}' "
50
+ f"region:'{self.bucket_region} key:{self._ctx.source_sha256}'>"
51
+ )
52
+
53
+ def _multipart_sha256sum(self, file_path: str) -> str:
54
+ """
55
+ Calculate the sha256 checksum like AWS does it (in a multipart upload) per chunk
56
+ See https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums
57
+
58
+ :param file_path: the path to the local file to upload
59
+ :type file_path: str
60
+ """
61
+ sha256_list = []
62
+ count = 0
63
+ with open(file_path, "rb") as f:
64
+ for chunk in iter(lambda: f.read(MULTIPART_CHUNK_SIZE), b""):
65
+ sha256_list.append(hashlib.sha256(chunk))
66
+ count += 1
67
+
68
+ sha256_list_digest_concatenated = b"".join([s.digest() for s in sha256_list])
69
+ sha256_b64 = base64.b64encode(hashlib.sha256(sha256_list_digest_concatenated).digest())
70
+ return f"{sha256_b64.decode('ascii')}-{count}"
71
+
72
+ def _bucket_exists(self) -> bool:
73
+ """
74
+ Check if the S3 bucket from context exists
75
+
76
+ :return: True if the bucket exists, otherwise False
77
+ :rtype: bool
78
+ """
79
+ resp = self._s3client.list_buckets()
80
+ return self.bucket_name in [b["Name"] for b in resp["Buckets"]]
81
+
82
+ def upload_file(self, source_path: str):
83
+ """
84
+ Upload a given file to the bucket from context. The key name will be the sha256sum hexdigest of the file.
85
+ If a file with that name already exist in the given bucket and the calculated sha256sum matches
86
+ the sha256sum from S3, nothing will be uploaded. Instead the existing file will be used.
87
+ This method does use a multipart upload internally so an upload can be retriggered in case
88
+ of errors and the previously uploaded content will be reused.
89
+ Note: be aware that failed multipart uploads are not deleted. So it's recommended to setup
90
+ a bucket lifecycle rule to delete incomplete multipart uploads.
91
+ See https://docs.aws.amazon.com/AmazonS3/latest/userguide//mpu-abort-incomplete-mpu-lifecycle-config.html
92
+
93
+ :param source_path: the path to the local file to upload (usually a .vmdk file)
94
+ :type source_path: str
95
+ """
96
+ # make sure the bucket exists
97
+ if not self._bucket_exists():
98
+ raise BucketDoesNotExistException(self.bucket_name)
99
+
100
+ s3_sha256sum = self._multipart_sha256sum(source_path)
101
+
102
+ try:
103
+ # check if the key exists already in the bucket and if so, if the multipart upload
104
+ # sha256sum does match
105
+ head = self._s3client.head_object(
106
+ Bucket=self.bucket_name, Key=self._ctx.source_sha256, ChecksumMode="ENABLED"
107
+ )
108
+
109
+ if head["ChecksumSHA256"] == s3_sha256sum:
110
+ logger.info(
111
+ f"'{self._ctx.source_sha256}' in bucket '{self.bucket_name}' "
112
+ "already exists and sha256sum matches. nothing to upload to S3"
113
+ )
114
+ return
115
+ else:
116
+ logger.warn(
117
+ f"'{self._ctx.source_sha256}' in bucket '{self.bucket_name}' "
118
+ f"already exists but sha256sum does not match. Will be overwritten ..."
119
+ )
120
+ except Exception:
121
+ logging.debug(f"Can not find '{self._ctx.source_sha256}' in bucket '{self.bucket_name}'")
122
+
123
+ # do the real upload
124
+ self._upload_file_multipart(source_path, s3_sha256sum)
125
+
126
+ def _get_multipart_upload_id(self) -> str:
127
+ """
128
+ Get an existing or create a multipart upload id
129
+
130
+ :return: a multipart upload id
131
+ :rtype: str
132
+ """
133
+ resp = self._s3client.list_multipart_uploads(Bucket=self.bucket_name)
134
+ multipart_uploads = [
135
+ upload["UploadId"] for upload in resp.get("Uploads", []) if upload["Key"] == self._ctx.source_sha256
136
+ ]
137
+ if len(multipart_uploads) == 1:
138
+ logger.info(f"found existing multipart upload '{multipart_uploads[0]}' for key '{self._ctx.source_sha256}'")
139
+ return multipart_uploads[0]
140
+ elif len(multipart_uploads) == 0:
141
+ # create a new multipart upload
142
+ resp_create = self._s3client.create_multipart_upload(
143
+ Bucket=self.bucket_name,
144
+ Key=self._ctx.source_sha256,
145
+ ChecksumAlgorithm="SHA256",
146
+ ACL="private",
147
+ )
148
+ upload_id = resp_create["UploadId"]
149
+ logger.info(
150
+ f"new multipart upload (upload id: '{upload_id})' started in bucket "
151
+ f"{self.bucket_name} for key {self._ctx.source_sha256}"
152
+ )
153
+ # if there's an expire rule configured for that bucket, inform about it
154
+ if resp_create.get("AbortDate"):
155
+ logger.info(
156
+ f"multipart upload '{upload_id}' will expire at "
157
+ f"{resp_create['AbortDate']} (rule: {resp_create.get('AbortRuleId')})"
158
+ )
159
+ else:
160
+ logger.warning("there is no matching expire/lifecycle rule configured for incomplete multipart uploads")
161
+ return upload_id
162
+ else:
163
+ # multiple multipart uploads for the same key available
164
+ logger.warning(
165
+ f"there are multiple ({len(multipart_uploads)}) multipart uploads ongoing in "
166
+ f"bucket {self.bucket_name} for key {self._ctx.source_sha256}"
167
+ )
168
+ logger.warning("using the first found multipart upload but you should delete pending multipart uploads")
169
+ return multipart_uploads[0]
170
+
171
+ def _upload_file_multipart(self, source_path: str, s3_sha256sum: str) -> None:
172
+ """
173
+ Upload a given file to the bucket from context. The key name will be the sha256sum hexdigest of the file
174
+
175
+ :param source_path: the path to the local file to upload (usually a .vmdk file)
176
+ :type source_path: str
177
+ :param s3_sha256sum: the sha256sum how S3 calculates it
178
+ :type s3_sha256sum: str
179
+ """
180
+ upload_id = self._get_multipart_upload_id()
181
+
182
+ logger.info(f"using upload id '{upload_id}' for multipart upload of '{source_path}' ...")
183
+ resp_list_parts = self._s3client.list_parts(
184
+ Bucket=self.bucket_name, Key=self._ctx.source_sha256, UploadId=upload_id
185
+ )
186
+
187
+ # sanity check for the used checksum algorithm
188
+ if resp_list_parts["ChecksumAlgorithm"] != "SHA256":
189
+ logger.error(f"available ongoing multipart upload '{upload_id}' does not use SHA256 as checksum algorithm")
190
+
191
+ # already available parts
192
+ parts_available = {p["PartNumber"]: p for p in resp_list_parts.get("Parts", [])}
193
+ # keep a list of parts (either already available or created) required to complete the multipart upload
194
+ parts: Dict[int, CompletedPartTypeDef] = {}
195
+ parts_size_done: int = 0
196
+ source_path_size: int = os.path.getsize(source_path)
197
+ with open(source_path, "rb") as f:
198
+ # parts start at 1 (not 0)
199
+ for part_number, chunk in enumerate(iter(lambda: f.read(MULTIPART_CHUNK_SIZE), b""), start=1):
200
+ # the sha256sum of the current part
201
+ sha256_part = base64.b64encode(hashlib.sha256(chunk).digest()).decode("ascii")
202
+ # do nothing if that part number already exist and the sha256sum matches
203
+ if parts_available.get(part_number):
204
+ if parts_available[part_number]["ChecksumSHA256"] == sha256_part:
205
+ logger.info(f"part {part_number} already exists and sha256sum matches. continue")
206
+ parts[part_number] = dict(
207
+ PartNumber=part_number,
208
+ ETag=parts_available[part_number]["ETag"],
209
+ ChecksumSHA256=parts_available[part_number]["ChecksumSHA256"],
210
+ )
211
+ parts_size_done += len(chunk)
212
+ continue
213
+ else:
214
+ logger.info(f"part {part_number} already exists but will be overwritten")
215
+
216
+ # upload a new part
217
+ resp_upload_part = self._s3client.upload_part(
218
+ Body=chunk,
219
+ Bucket=self.bucket_name,
220
+ ContentLength=len(chunk),
221
+ ChecksumAlgorithm="SHA256",
222
+ ChecksumSHA256=sha256_part,
223
+ Key=self._ctx.source_sha256,
224
+ PartNumber=part_number,
225
+ UploadId=upload_id,
226
+ )
227
+ parts_size_done += len(chunk)
228
+ # add new part to the dict of parts
229
+ parts[part_number] = dict(
230
+ PartNumber=part_number,
231
+ ETag=resp_upload_part["ETag"],
232
+ ChecksumSHA256=sha256_part,
233
+ )
234
+ logger.info(
235
+ f"part {part_number} uploaded ({round(parts_size_done/source_path_size * 100, 2)}% "
236
+ f"; {parts_size_done} / {source_path_size} bytes)"
237
+ )
238
+
239
+ logger.info(
240
+ f"finishing the multipart upload for key '{self._ctx.source_sha256}' in bucket {self.bucket_name} now ..."
241
+ )
242
+ # finish the multipart upload
243
+ self._s3client.complete_multipart_upload(
244
+ Bucket=self.bucket_name,
245
+ Key=self._ctx.source_sha256,
246
+ UploadId=upload_id,
247
+ ChecksumSHA256=s3_sha256sum,
248
+ MultipartUpload={"Parts": [value for key, value in parts.items()]},
249
+ )
250
+ logger.info(
251
+ f"multipart upload finished and key '{self._ctx.source_sha256}' now "
252
+ f"available in bucket '{self.bucket_name}'"
253
+ )
254
+
255
+ # add tagging to the final s3 object
256
+ self._s3client.put_object_tagging(
257
+ Bucket=self.bucket_name,
258
+ Key=self._ctx.source_sha256,
259
+ Tagging={
260
+ "TagSet": self._ctx.tags,
261
+ },
262
+ )
awspub/snapshot.py ADDED
@@ -0,0 +1,241 @@
1
+ import logging
2
+ from typing import Dict, List, Optional
3
+
4
+ import boto3
5
+ from mypy_boto3_ec2.client import EC2Client
6
+
7
+ from awspub import exceptions
8
+ from awspub.context import Context
9
+
10
+ logger = logging.getLogger(__name__)
11
+
12
+
13
+ class Snapshot:
14
+ """
15
+ Handle EC2 Snapshot API interaction
16
+ """
17
+
18
+ def __init__(self, context: Context):
19
+ self._ctx: Context = context
20
+
21
+ def _get(self, ec2client: EC2Client, snapshot_name: str) -> Optional[str]:
22
+ """
23
+ Get the snapshot id for the given name or None
24
+
25
+ :param ec2client: EC2 client for a specific region
26
+ :type ec2client: EC2Client
27
+ :param snapshot_name: the Snapshot name
28
+ :type snapshot_name: str
29
+ :return: Either None or a snapshot-id
30
+ :rtype: Optional[str]
31
+ """
32
+ resp = ec2client.describe_snapshots(
33
+ Filters=[
34
+ {
35
+ "Name": "tag:Name",
36
+ "Values": [
37
+ snapshot_name,
38
+ ],
39
+ },
40
+ {
41
+ "Name": "status",
42
+ "Values": [
43
+ "pending",
44
+ "completed",
45
+ ],
46
+ },
47
+ ],
48
+ OwnerIds=["self"],
49
+ )
50
+ if len(resp.get("Snapshots", [])) == 1:
51
+ return resp["Snapshots"][0]["SnapshotId"]
52
+ elif len(resp.get("Snapshots", [])) == 0:
53
+ return None
54
+ else:
55
+ raise exceptions.MultipleSnapshotsException(
56
+ f"Found {len(resp.get('Snapshots', []))} snapshots with "
57
+ f"name '{snapshot_name}' in region {ec2client.meta.region_name}"
58
+ )
59
+
60
+ def _get_import_snapshot_task(self, ec2client: EC2Client, snapshot_name: str) -> Optional[str]:
61
+ """
62
+ Get a import snapshot task for the given name
63
+
64
+ :param ec2client: EC2 client for a specific region
65
+ :type ec2client: EC2Client
66
+ :param snapshot_name: the Snapshot name
67
+ :type snapshot_name: str
68
+ :return: Either None or a import-snapshot-task-id
69
+ :rtype: Optional[str]
70
+ """
71
+ resp = ec2client.describe_import_snapshot_tasks(
72
+ Filters=[
73
+ {
74
+ "Name": "tag:Name",
75
+ "Values": [
76
+ snapshot_name,
77
+ ],
78
+ }
79
+ ]
80
+ )
81
+ # API doesn't support filters by status so filter here
82
+ tasks: List = resp.get("ImportSnapshotTasks", [])
83
+ # we already know here that the snapshot does not exist (checked in create() before calling this
84
+ # function). so ignore "deleted" or "completed" tasks here
85
+ # it might happen (for whatever reason) that a task got completed but the snapshot got deleted
86
+ # afterwards. In that case a "completed" task for the given snapshot_name exists but
87
+ # that doesn't help so ignore it
88
+ tasks = [t for t in tasks if t["SnapshotTaskDetail"]["Status"] not in ["deleted", "completed"]]
89
+ if len(tasks) == 1:
90
+ return tasks[0]["ImportTaskId"]
91
+ elif len(tasks) == 0:
92
+ return None
93
+ else:
94
+ raise exceptions.MultipleImportSnapshotTasksException(
95
+ f"Found {len(tasks)} import snapshot tasks with "
96
+ f"name '{snapshot_name}' in region {ec2client.meta.region_name}"
97
+ )
98
+
99
+ def create(self, ec2client: EC2Client, snapshot_name: str) -> str:
100
+ """
101
+ Create a EC2 snapshot with the given name
102
+ If the snapshot already exists, just return the snapshot-id for the existing snapshot.
103
+
104
+ :param ec2client: EC2 client for a specific region
105
+ :type ec2client: EC2Client
106
+ :param snapshot_name: the Snapshot name
107
+ :type snapshot_name: str
108
+ :return: a snapshot-id
109
+ :rtype: str
110
+ """
111
+ # does a snapshot with the given name already exists?
112
+ snap_id: Optional[str] = self._get(ec2client, snapshot_name)
113
+ if snap_id:
114
+ logger.info(f"snapshot with name '{snapshot_name}' already exists in region {ec2client.meta.region_name}")
115
+ return snap_id
116
+
117
+ logger.info(
118
+ f"Create snapshot from bucket '{self._ctx.conf['s3']['bucket_name']}' "
119
+ f"for '{snapshot_name}' in region {ec2client.meta.region_name}"
120
+ )
121
+
122
+ # extend tags
123
+ tags = self._ctx.tags
124
+ tags.append({"Key": "Name", "Value": snapshot_name})
125
+
126
+ # does a import snapshot task with the given name already exist?
127
+ import_snapshot_task_id: Optional[str] = self._get_import_snapshot_task(ec2client, snapshot_name)
128
+ if import_snapshot_task_id:
129
+ logger.info(
130
+ f"import snapshot task ({import_snapshot_task_id}) with "
131
+ f"name '{snapshot_name}' exists in region {ec2client.meta.region_name}"
132
+ )
133
+ else:
134
+ resp = ec2client.import_snapshot(
135
+ Description="Import ",
136
+ DiskContainer={
137
+ "Description": "",
138
+ "Format": "vmdk",
139
+ "UserBucket": {
140
+ "S3Bucket": self._ctx.conf["s3"]["bucket_name"],
141
+ "S3Key": self._ctx.source_sha256,
142
+ },
143
+ },
144
+ TagSpecifications=[
145
+ {"ResourceType": "import-snapshot-task", "Tags": tags},
146
+ ],
147
+ )
148
+ import_snapshot_task_id = resp["ImportTaskId"]
149
+
150
+ logger.info(
151
+ f"Waiting for snapshot import task (id: {import_snapshot_task_id}) "
152
+ f"in region {ec2client.meta.region_name} ..."
153
+ )
154
+
155
+ waiter_import = ec2client.get_waiter("snapshot_imported")
156
+ waiter_import.wait(ImportTaskIds=[import_snapshot_task_id], WaiterConfig={"Delay": 30, "MaxAttempts": 90})
157
+
158
+ task_details = ec2client.describe_import_snapshot_tasks(ImportTaskIds=[import_snapshot_task_id])
159
+ snapshot_id = task_details["ImportSnapshotTasks"][0]["SnapshotTaskDetail"]["SnapshotId"]
160
+
161
+ # create tags before waiting for completion so the tags are already there
162
+ ec2client.create_tags(Resources=[snapshot_id], Tags=tags)
163
+
164
+ waiter_completed = ec2client.get_waiter("snapshot_completed")
165
+ waiter_completed.wait(SnapshotIds=[snapshot_id], WaiterConfig={"Delay": 30, "MaxAttempts": 60})
166
+
167
+ logger.info(f"Snapshot import as '{snapshot_id}' in region {ec2client.meta.region_name} done")
168
+ return snapshot_id
169
+
170
+ def _copy(self, snapshot_name: str, source_region: str, destination_region: str) -> str:
171
+ """
172
+ Copy a EC2 snapshot for the given context to the destination region
173
+ NOTE: we don't wait for the snapshot to complete here!
174
+
175
+ :param snapshot_name: the Snapshot name to copy
176
+ :type snapshot_name: str
177
+ :param source_region: a region to copy the snapshot from
178
+ :type source_region: str
179
+ :param destination_region: a region to copy the snapshot to
180
+ :type destionation_region: str
181
+
182
+ :return: the existing or created snapshot-id
183
+ :rtype: str
184
+ """
185
+
186
+ # does the snapshot with that name already exist in the destination region?
187
+ ec2client_dest: EC2Client = boto3.client("ec2", region_name=destination_region)
188
+ snapshot_id: Optional[str] = self._get(ec2client_dest, snapshot_name)
189
+ if snapshot_id:
190
+ logger.info(
191
+ f"snapshot with name '{snapshot_name}' already "
192
+ f"exists ({snapshot_id}) in destination region {ec2client_dest.meta.region_name}"
193
+ )
194
+ return snapshot_id
195
+
196
+ ec2client_source: EC2Client = boto3.client("ec2", region_name=source_region)
197
+ source_snapshot_id: Optional[str] = self._get(ec2client_source, snapshot_name)
198
+ if not source_snapshot_id:
199
+ raise ValueError(
200
+ f"Can not find source snapshot with name '{snapshot_name}' "
201
+ f"in region {ec2client_source.meta.region_name}"
202
+ )
203
+
204
+ logger.info(f"Copy snapshot {source_snapshot_id} from " f"{source_region} to {destination_region}")
205
+ # extend tags
206
+ tags = self._ctx.tags
207
+ tags.append({"Key": "Name", "Value": snapshot_name})
208
+ resp = ec2client_dest.copy_snapshot(
209
+ SourceRegion=source_region,
210
+ SourceSnapshotId=source_snapshot_id,
211
+ TagSpecifications=[{"ResourceType": "snapshot", "Tags": tags}],
212
+ )
213
+
214
+ # note: we don't wait for the snapshot to complete here!
215
+ return resp["SnapshotId"]
216
+
217
+ def copy(self, snapshot_name: str, source_region: str, destination_regions: List[str]) -> Dict[str, str]:
218
+ """
219
+ Copy a snapshot to multiple regions
220
+
221
+ :param snapshot_name: the Snapshot name to copy
222
+ :type snapshot_name: str
223
+ :param source_region: a region to copy the snapshot from
224
+ :type source_region: str
225
+ :param destination_regions: a list of regions to copy the snaphot to
226
+ :type destionation_regions: List[str]
227
+ :return: a dict with region/snapshot-id mapping for the newly copied snapshots
228
+ :rtype: Dict[str, str] where the key is a region name and the value a snapshot-id
229
+ """
230
+ snapshot_ids: Dict[str, str] = dict()
231
+ for destination_region in destination_regions:
232
+ snapshot_ids[destination_region] = self._copy(snapshot_name, source_region, destination_region)
233
+
234
+ logger.info(f"Waiting for {len(snapshot_ids)} snapshots to appear in the destination regions ...")
235
+ for destination_region, snapshot_id in snapshot_ids.items():
236
+ ec2client_dest = boto3.client("ec2", region_name=destination_region)
237
+ waiter = ec2client_dest.get_waiter("snapshot_completed")
238
+ logger.info(f"Waiting for {snapshot_id} in {ec2client_dest.meta.region_name} to complete ...")
239
+ waiter.wait(SnapshotIds=[snapshot_id], WaiterConfig={"Delay": 30, "MaxAttempts": 90})
240
+
241
+ return snapshot_ids
File without changes
@@ -0,0 +1,12 @@
1
+ awspub:
2
+ s3:
3
+ bucket_name: "bucket1"
4
+ invalid_field: "not allowed" # This is an invalid field
5
+ source:
6
+ path: "config1.vmdk"
7
+ architecture: "x86_64"
8
+ images:
9
+ test-image:
10
+ description: "Test Image"
11
+ separate_snapshot: "False"
12
+ boot_mode: "uefi-preferred"
@@ -0,0 +1,12 @@
1
+ ---
2
+ awspub:
3
+ source:
4
+ path: "config1.vmdk"
5
+ architecture: "x86_64"
6
+
7
+ s3:
8
+ bucket_name: "bucket1"
9
+
10
+ images:
11
+ "my-custom-image":
12
+ boot_mode: "uefi-preferred"
@@ -0,0 +1,13 @@
1
+ awspub:
2
+ s3:
3
+ bucket_name: "bucket1"
4
+ source:
5
+ path: "config1.vmdk"
6
+ architecture: "x86_64"
7
+ images:
8
+ test-image:
9
+ description: "Test Image"
10
+ separate_snapshot: "False"
11
+ boot_mode: "uefi-preferred"
12
+ notawspub: # to make sure config outside of toplevel `awspub` dict is allowed
13
+ foo_bar: "irrelevant"
Binary file