cdk-ecr-deployment 3.3.1 → 4.0.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.jsii +26 -103
- package/.jsii.tabl.json +1 -1
- package/API.md +0 -59
- package/README.md +7 -17
- package/lib/index.d.ts +1 -29
- package/lib/index.js +9 -26
- package/package.json +1 -1
- package/lambda-src/.dockerignore +0 -9
- package/lambda-src/Dockerfile +0 -26
- package/lambda-src/Makefile +0 -33
- package/lambda-src/go.mod +0 -136
- package/lambda-src/go.sum +0 -499
- package/lambda-src/internal/iolimits/iolimits.go +0 -68
- package/lambda-src/internal/iolimits/iolimits_test.go +0 -39
- package/lambda-src/internal/tarfile/reader.go +0 -179
- package/lambda-src/internal/tarfile/reader_test.go +0 -31
- package/lambda-src/internal/tarfile/s3file.go +0 -363
- package/lambda-src/internal/tarfile/s3file_test.go +0 -135
- package/lambda-src/internal/tarfile/src.go +0 -328
- package/lambda-src/internal/tarfile/types.go +0 -31
- package/lambda-src/main.go +0 -176
- package/lambda-src/main_test.go +0 -64
- package/lambda-src/s3/src.go +0 -40
- package/lambda-src/s3/transport.go +0 -149
- package/lambda-src/s3/transport_test.go +0 -96
- package/lambda-src/utils.go +0 -206
- package/lambda-src/utils_test.go +0 -63
- package/lib/config.d.ts +0 -1
- package/lib/config.js +0 -12
|
@@ -1,328 +0,0 @@
|
|
|
1
|
-
// Taken from https://github.com/containers/image
|
|
2
|
-
// Modifications Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
|
|
3
|
-
|
|
4
|
-
package tarfile
|
|
5
|
-
|
|
6
|
-
import (
|
|
7
|
-
"archive/tar"
|
|
8
|
-
"bytes"
|
|
9
|
-
"cdk-ecr-deployment-handler/internal/iolimits"
|
|
10
|
-
"context"
|
|
11
|
-
"encoding/json"
|
|
12
|
-
"io"
|
|
13
|
-
"io/ioutil"
|
|
14
|
-
"path"
|
|
15
|
-
"sync"
|
|
16
|
-
|
|
17
|
-
"github.com/containers/image/v5/docker/reference"
|
|
18
|
-
"github.com/containers/image/v5/manifest"
|
|
19
|
-
"github.com/containers/image/v5/pkg/compression"
|
|
20
|
-
"github.com/containers/image/v5/types"
|
|
21
|
-
digest "github.com/opencontainers/go-digest"
|
|
22
|
-
"github.com/pkg/errors"
|
|
23
|
-
)
|
|
24
|
-
|
|
25
|
-
// S3FileSource is a partial implementation of types.ImageSource for reading from tarPath.
|
|
26
|
-
type S3FileSource struct {
|
|
27
|
-
s3fileReader *S3FileReader
|
|
28
|
-
closeArchive bool // .Close() the archive when the source is closed.
|
|
29
|
-
// If ref is nil and sourceIndex is -1, indicates the only image in the archive.
|
|
30
|
-
ref reference.NamedTagged // May be nil
|
|
31
|
-
sourceIndex int // May be -1
|
|
32
|
-
// The following data is only available after ensureCachedDataIsPresent() succeeds
|
|
33
|
-
tarManifest *ManifestItem // nil if not available yet.
|
|
34
|
-
configBytes []byte
|
|
35
|
-
configDigest digest.Digest
|
|
36
|
-
orderedDiffIDList []digest.Digest
|
|
37
|
-
knownLayers map[digest.Digest]*layerInfo
|
|
38
|
-
// Other state
|
|
39
|
-
generatedManifest []byte // Private cache for GetManifest(), nil if not set yet.
|
|
40
|
-
cacheDataLock sync.Once // Private state for ensureCachedDataIsPresent to make it concurrency-safe
|
|
41
|
-
cacheDataResult error // Private state for ensureCachedDataIsPresent
|
|
42
|
-
}
|
|
43
|
-
|
|
44
|
-
type layerInfo struct {
|
|
45
|
-
path string
|
|
46
|
-
size int64
|
|
47
|
-
}
|
|
48
|
-
|
|
49
|
-
// NewSource returns a tarfile.Source for an image in the specified archive matching ref
|
|
50
|
-
// and sourceIndex (or the only image if they are (nil, -1)).
|
|
51
|
-
// The archive will be closed if closeArchive
|
|
52
|
-
func NewSource(archive *S3FileReader, closeArchive bool, ref reference.NamedTagged, sourceIndex int) *S3FileSource {
|
|
53
|
-
return &S3FileSource{
|
|
54
|
-
s3fileReader: archive,
|
|
55
|
-
closeArchive: closeArchive,
|
|
56
|
-
ref: ref,
|
|
57
|
-
sourceIndex: sourceIndex,
|
|
58
|
-
}
|
|
59
|
-
}
|
|
60
|
-
|
|
61
|
-
// ensureCachedDataIsPresent loads data necessary for any of the public accessors.
|
|
62
|
-
// It is safe to call this from multi-threaded code.
|
|
63
|
-
func (s *S3FileSource) ensureCachedDataIsPresent() error {
|
|
64
|
-
s.cacheDataLock.Do(func() {
|
|
65
|
-
s.cacheDataResult = s.ensureCachedDataIsPresentPrivate()
|
|
66
|
-
})
|
|
67
|
-
return s.cacheDataResult
|
|
68
|
-
}
|
|
69
|
-
|
|
70
|
-
// ensureCachedDataIsPresentPrivate is a private implementation detail of ensureCachedDataIsPresent.
|
|
71
|
-
// Call ensureCachedDataIsPresent instead.
|
|
72
|
-
func (s *S3FileSource) ensureCachedDataIsPresentPrivate() error {
|
|
73
|
-
tarManifest, _, err := s.s3fileReader.ChooseManifestItem(s.ref, s.sourceIndex)
|
|
74
|
-
if err != nil {
|
|
75
|
-
return err
|
|
76
|
-
}
|
|
77
|
-
|
|
78
|
-
// Read and parse config.
|
|
79
|
-
configBytes, err := s.s3fileReader.readTarComponent(tarManifest.Config, iolimits.MaxConfigBodySize)
|
|
80
|
-
if err != nil {
|
|
81
|
-
return err
|
|
82
|
-
}
|
|
83
|
-
var parsedConfig manifest.Schema2Image // There's a lot of info there, but we only really care about layer DiffIDs.
|
|
84
|
-
if err := json.Unmarshal(configBytes, &parsedConfig); err != nil {
|
|
85
|
-
return errors.Wrapf(err, "Error decoding tar config %s", tarManifest.Config)
|
|
86
|
-
}
|
|
87
|
-
if parsedConfig.RootFS == nil {
|
|
88
|
-
return errors.Errorf("Invalid image config (rootFS is not set): %s", tarManifest.Config)
|
|
89
|
-
}
|
|
90
|
-
|
|
91
|
-
knownLayers, err := s.prepareLayerData(tarManifest, &parsedConfig)
|
|
92
|
-
if err != nil {
|
|
93
|
-
return err
|
|
94
|
-
}
|
|
95
|
-
|
|
96
|
-
// Success; commit.
|
|
97
|
-
s.tarManifest = tarManifest
|
|
98
|
-
s.configBytes = configBytes
|
|
99
|
-
s.configDigest = digest.FromBytes(configBytes)
|
|
100
|
-
s.orderedDiffIDList = parsedConfig.RootFS.DiffIDs
|
|
101
|
-
s.knownLayers = knownLayers
|
|
102
|
-
return nil
|
|
103
|
-
}
|
|
104
|
-
|
|
105
|
-
// Close removes resources associated with an initialized Source, if any.
|
|
106
|
-
func (s *S3FileSource) Close() error {
|
|
107
|
-
if s.closeArchive {
|
|
108
|
-
return s.s3fileReader.Close()
|
|
109
|
-
}
|
|
110
|
-
return nil
|
|
111
|
-
}
|
|
112
|
-
|
|
113
|
-
// TarManifest returns contents of manifest.json
|
|
114
|
-
func (s *S3FileSource) TarManifest() []ManifestItem {
|
|
115
|
-
return s.s3fileReader.Manifest
|
|
116
|
-
}
|
|
117
|
-
|
|
118
|
-
func (s *S3FileSource) prepareLayerData(tarManifest *ManifestItem, parsedConfig *manifest.Schema2Image) (map[digest.Digest]*layerInfo, error) {
|
|
119
|
-
// Collect layer data available in manifest and config.
|
|
120
|
-
if len(tarManifest.Layers) != len(parsedConfig.RootFS.DiffIDs) {
|
|
121
|
-
return nil, errors.Errorf("Inconsistent layer count: %d in manifest, %d in config", len(tarManifest.Layers), len(parsedConfig.RootFS.DiffIDs))
|
|
122
|
-
}
|
|
123
|
-
knownLayers := map[digest.Digest]*layerInfo{}
|
|
124
|
-
unknownLayerSizes := map[string]*layerInfo{} // Points into knownLayers, a "to do list" of items with unknown sizes.
|
|
125
|
-
for i, diffID := range parsedConfig.RootFS.DiffIDs {
|
|
126
|
-
if _, ok := knownLayers[diffID]; ok {
|
|
127
|
-
// Apparently it really can happen that a single image contains the same layer diff more than once.
|
|
128
|
-
// In that case, the diffID validation ensures that both layers truly are the same, and it should not matter
|
|
129
|
-
// which of the tarManifest.Layers paths is used; (docker save) actually makes the duplicates symlinks to the original.
|
|
130
|
-
continue
|
|
131
|
-
}
|
|
132
|
-
layerPath := path.Clean(tarManifest.Layers[i])
|
|
133
|
-
if _, ok := unknownLayerSizes[layerPath]; ok {
|
|
134
|
-
return nil, errors.Errorf("Layer tarfile %s used for two different DiffID values", layerPath)
|
|
135
|
-
}
|
|
136
|
-
li := &layerInfo{ // A new element in each iteration
|
|
137
|
-
path: layerPath,
|
|
138
|
-
size: -1,
|
|
139
|
-
}
|
|
140
|
-
knownLayers[diffID] = li
|
|
141
|
-
unknownLayerSizes[layerPath] = li
|
|
142
|
-
}
|
|
143
|
-
|
|
144
|
-
// Scan the tar file to collect layer sizes.
|
|
145
|
-
t := tar.NewReader(s.s3fileReader.s3file)
|
|
146
|
-
for {
|
|
147
|
-
h, err := t.Next()
|
|
148
|
-
if err == io.EOF {
|
|
149
|
-
break
|
|
150
|
-
}
|
|
151
|
-
if err != nil {
|
|
152
|
-
return nil, err
|
|
153
|
-
}
|
|
154
|
-
layerPath := path.Clean(h.Name)
|
|
155
|
-
// FIXME: Cache this data across images in Reader.
|
|
156
|
-
if li, ok := unknownLayerSizes[layerPath]; ok {
|
|
157
|
-
// Since GetBlob will decompress layers that are compressed we need
|
|
158
|
-
// to do the decompression here as well, otherwise we will
|
|
159
|
-
// incorrectly report the size. Pretty critical, since tools like
|
|
160
|
-
// umoci always compress layer blobs. Obviously we only bother with
|
|
161
|
-
// the slower method of checking if it's compressed.
|
|
162
|
-
uncompressedStream, isCompressed, err := compression.AutoDecompress(t)
|
|
163
|
-
if err != nil {
|
|
164
|
-
return nil, errors.Wrapf(err, "Error auto-decompressing %s to determine its size", layerPath)
|
|
165
|
-
}
|
|
166
|
-
defer uncompressedStream.Close()
|
|
167
|
-
|
|
168
|
-
uncompressedSize := h.Size
|
|
169
|
-
if isCompressed {
|
|
170
|
-
uncompressedSize, err = io.Copy(ioutil.Discard, uncompressedStream)
|
|
171
|
-
if err != nil {
|
|
172
|
-
return nil, errors.Wrapf(err, "Error reading %s to find its size", layerPath)
|
|
173
|
-
}
|
|
174
|
-
}
|
|
175
|
-
li.size = uncompressedSize
|
|
176
|
-
delete(unknownLayerSizes, layerPath)
|
|
177
|
-
}
|
|
178
|
-
}
|
|
179
|
-
if len(unknownLayerSizes) != 0 {
|
|
180
|
-
return nil, errors.Errorf("Some layer tarfiles are missing in the tarball") // This could do with a better error reporting, if this ever happened in practice.
|
|
181
|
-
}
|
|
182
|
-
|
|
183
|
-
return knownLayers, nil
|
|
184
|
-
}
|
|
185
|
-
|
|
186
|
-
// GetManifest returns the image's manifest along with its MIME type (which may be empty when it can't be determined but the manifest is available).
|
|
187
|
-
// It may use a remote (= slow) service.
|
|
188
|
-
// If instanceDigest is not nil, it contains a digest of the specific manifest instance to retrieve (when the primary manifest is a manifest list);
|
|
189
|
-
// this never happens if the primary manifest is not a manifest list (e.g. if the source never returns manifest lists).
|
|
190
|
-
// This source implementation does not support manifest lists, so the passed-in instanceDigest should always be nil,
|
|
191
|
-
// as the primary manifest can not be a list, so there can be no secondary instances.
|
|
192
|
-
func (s *S3FileSource) GetManifest(ctx context.Context, instanceDigest *digest.Digest) ([]byte, string, error) {
|
|
193
|
-
if instanceDigest != nil {
|
|
194
|
-
// How did we even get here? GetManifest(ctx, nil) has returned a manifest.DockerV2Schema2MediaType.
|
|
195
|
-
return nil, "", errors.New(`Manifest lists are not supported by "docker-daemon:"`)
|
|
196
|
-
}
|
|
197
|
-
if s.generatedManifest == nil {
|
|
198
|
-
if err := s.ensureCachedDataIsPresent(); err != nil {
|
|
199
|
-
return nil, "", err
|
|
200
|
-
}
|
|
201
|
-
m := manifest.Schema2{
|
|
202
|
-
SchemaVersion: 2,
|
|
203
|
-
MediaType: manifest.DockerV2Schema2MediaType,
|
|
204
|
-
ConfigDescriptor: manifest.Schema2Descriptor{
|
|
205
|
-
MediaType: manifest.DockerV2Schema2ConfigMediaType,
|
|
206
|
-
Size: int64(len(s.configBytes)),
|
|
207
|
-
Digest: s.configDigest,
|
|
208
|
-
},
|
|
209
|
-
LayersDescriptors: []manifest.Schema2Descriptor{},
|
|
210
|
-
}
|
|
211
|
-
for _, diffID := range s.orderedDiffIDList {
|
|
212
|
-
li, ok := s.knownLayers[diffID]
|
|
213
|
-
if !ok {
|
|
214
|
-
return nil, "", errors.Errorf("Internal inconsistency: Information about layer %s missing", diffID)
|
|
215
|
-
}
|
|
216
|
-
m.LayersDescriptors = append(m.LayersDescriptors, manifest.Schema2Descriptor{
|
|
217
|
-
Digest: diffID, // diffID is a digest of the uncompressed tarball
|
|
218
|
-
MediaType: manifest.DockerV2Schema2LayerMediaType,
|
|
219
|
-
Size: li.size,
|
|
220
|
-
})
|
|
221
|
-
}
|
|
222
|
-
manifestBytes, err := json.Marshal(&m)
|
|
223
|
-
if err != nil {
|
|
224
|
-
return nil, "", err
|
|
225
|
-
}
|
|
226
|
-
s.generatedManifest = manifestBytes
|
|
227
|
-
}
|
|
228
|
-
return s.generatedManifest, manifest.DockerV2Schema2MediaType, nil
|
|
229
|
-
}
|
|
230
|
-
|
|
231
|
-
// uncompressedReadCloser is an io.ReadCloser that closes both the uncompressed stream and the underlying input.
|
|
232
|
-
type uncompressedReadCloser struct {
|
|
233
|
-
io.Reader
|
|
234
|
-
underlyingCloser func() error
|
|
235
|
-
uncompressedCloser func() error
|
|
236
|
-
}
|
|
237
|
-
|
|
238
|
-
func (r uncompressedReadCloser) Close() error {
|
|
239
|
-
var res error
|
|
240
|
-
if err := r.uncompressedCloser(); err != nil {
|
|
241
|
-
res = err
|
|
242
|
-
}
|
|
243
|
-
if err := r.underlyingCloser(); err != nil && res == nil {
|
|
244
|
-
res = err
|
|
245
|
-
}
|
|
246
|
-
return res
|
|
247
|
-
}
|
|
248
|
-
|
|
249
|
-
// HasThreadSafeGetBlob indicates whether GetBlob can be executed concurrently.
|
|
250
|
-
func (s *S3FileSource) HasThreadSafeGetBlob() bool {
|
|
251
|
-
return false // Not supported yet
|
|
252
|
-
}
|
|
253
|
-
|
|
254
|
-
// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown).
|
|
255
|
-
// The Digest field in BlobInfo is guaranteed to be provided, Size may be -1 and MediaType may be optionally provided.
|
|
256
|
-
// May update BlobInfoCache, preferably after it knows for certain that a blob truly exists at a specific location.
|
|
257
|
-
func (s *S3FileSource) GetBlob(ctx context.Context, info types.BlobInfo, cache types.BlobInfoCache) (io.ReadCloser, int64, error) {
|
|
258
|
-
if err := s.ensureCachedDataIsPresent(); err != nil {
|
|
259
|
-
return nil, 0, err
|
|
260
|
-
}
|
|
261
|
-
|
|
262
|
-
if info.Digest == s.configDigest { // FIXME? Implement a more general algorithm matching instead of assuming sha256.
|
|
263
|
-
return ioutil.NopCloser(bytes.NewReader(s.configBytes)), int64(len(s.configBytes)), nil
|
|
264
|
-
}
|
|
265
|
-
|
|
266
|
-
if li, ok := s.knownLayers[info.Digest]; ok { // diffID is a digest of the uncompressed tarball,
|
|
267
|
-
underlyingStream, err := s.s3fileReader.openTarComponent(li.path)
|
|
268
|
-
if err != nil {
|
|
269
|
-
return nil, 0, err
|
|
270
|
-
}
|
|
271
|
-
closeUnderlyingStream := true
|
|
272
|
-
defer func() {
|
|
273
|
-
if closeUnderlyingStream {
|
|
274
|
-
underlyingStream.Close()
|
|
275
|
-
}
|
|
276
|
-
}()
|
|
277
|
-
|
|
278
|
-
// In order to handle the fact that digests != diffIDs (and thus that a
|
|
279
|
-
// caller which is trying to verify the blob will run into problems),
|
|
280
|
-
// we need to decompress blobs. This is a bit ugly, but it's a
|
|
281
|
-
// consequence of making everything addressable by their DiffID rather
|
|
282
|
-
// than by their digest...
|
|
283
|
-
//
|
|
284
|
-
// In particular, because the v2s2 manifest being generated uses
|
|
285
|
-
// DiffIDs, any caller of GetBlob is going to be asking for DiffIDs of
|
|
286
|
-
// layers not their _actual_ digest. The result is that copy/... will
|
|
287
|
-
// be verifying a "digest" which is not the actual layer's digest (but
|
|
288
|
-
// is instead the DiffID).
|
|
289
|
-
|
|
290
|
-
uncompressedStream, _, err := compression.AutoDecompress(underlyingStream)
|
|
291
|
-
if err != nil {
|
|
292
|
-
return nil, 0, errors.Wrapf(err, "Error auto-decompressing blob %s", info.Digest)
|
|
293
|
-
}
|
|
294
|
-
|
|
295
|
-
newStream := uncompressedReadCloser{
|
|
296
|
-
Reader: uncompressedStream,
|
|
297
|
-
underlyingCloser: underlyingStream.Close,
|
|
298
|
-
uncompressedCloser: uncompressedStream.Close,
|
|
299
|
-
}
|
|
300
|
-
closeUnderlyingStream = false
|
|
301
|
-
|
|
302
|
-
return newStream, li.size, nil
|
|
303
|
-
}
|
|
304
|
-
|
|
305
|
-
return nil, 0, errors.Errorf("Unknown blob %s", info.Digest)
|
|
306
|
-
}
|
|
307
|
-
|
|
308
|
-
// GetSignatures returns the image's signatures. It may use a remote (= slow) service.
|
|
309
|
-
// This source implementation does not support manifest lists, so the passed-in instanceDigest should always be nil,
|
|
310
|
-
// as there can be no secondary manifests.
|
|
311
|
-
func (s *S3FileSource) GetSignatures(ctx context.Context, instanceDigest *digest.Digest) ([][]byte, error) {
|
|
312
|
-
if instanceDigest != nil {
|
|
313
|
-
// How did we even get here? GetManifest(ctx, nil) has returned a manifest.DockerV2Schema2MediaType.
|
|
314
|
-
return nil, errors.Errorf(`Manifest lists are not supported by "docker-daemon:"`)
|
|
315
|
-
}
|
|
316
|
-
return [][]byte{}, nil
|
|
317
|
-
}
|
|
318
|
-
|
|
319
|
-
// LayerInfosForCopy returns either nil (meaning the values in the manifest are fine), or updated values for the layer
|
|
320
|
-
// blobsums that are listed in the image's manifest. If values are returned, they should be used when using GetBlob()
|
|
321
|
-
// to read the image's layers.
|
|
322
|
-
// This source implementation does not support manifest lists, so the passed-in instanceDigest should always be nil,
|
|
323
|
-
// as the primary manifest can not be a list, so there can be no secondary manifests.
|
|
324
|
-
// The Digest field is guaranteed to be provided; Size may be -1.
|
|
325
|
-
// WARNING: The list may contain duplicates, and they are semantically relevant.
|
|
326
|
-
func (s *S3FileSource) LayerInfosForCopy(context.Context, *digest.Digest) ([]types.BlobInfo, error) {
|
|
327
|
-
return nil, nil
|
|
328
|
-
}
|
|
@@ -1,31 +0,0 @@
|
|
|
1
|
-
// Taken from https://github.com/containers/image
|
|
2
|
-
// Modifications Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
|
|
3
|
-
|
|
4
|
-
package tarfile
|
|
5
|
-
|
|
6
|
-
import (
|
|
7
|
-
"github.com/containers/image/v5/manifest"
|
|
8
|
-
"github.com/opencontainers/go-digest"
|
|
9
|
-
)
|
|
10
|
-
|
|
11
|
-
// Various data structures.
|
|
12
|
-
|
|
13
|
-
// Based on github.com/docker/docker/image/tarexport/tarexport.go
|
|
14
|
-
const (
|
|
15
|
-
manifestFileName = "manifest.json"
|
|
16
|
-
legacyLayerFileName = "layer.tar"
|
|
17
|
-
legacyConfigFileName = "json"
|
|
18
|
-
legacyVersionFileName = "VERSION"
|
|
19
|
-
legacyRepositoriesFileName = "repositories"
|
|
20
|
-
)
|
|
21
|
-
|
|
22
|
-
// ManifestItem is an element of the array stored in the top-level manifest.json file.
|
|
23
|
-
type ManifestItem struct { // NOTE: This is visible as docker/tarfile.ManifestItem, and a part of the stable API.
|
|
24
|
-
Config string
|
|
25
|
-
RepoTags []string
|
|
26
|
-
Layers []string
|
|
27
|
-
Parent imageID `json:",omitempty"`
|
|
28
|
-
LayerSources map[digest.Digest]manifest.Schema2Descriptor `json:",omitempty"`
|
|
29
|
-
}
|
|
30
|
-
|
|
31
|
-
type imageID string
|
package/lambda-src/main.go
DELETED
|
@@ -1,176 +0,0 @@
|
|
|
1
|
-
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
|
|
2
|
-
// SPDX-License-Identifier: Apache-2.0
|
|
3
|
-
|
|
4
|
-
package main
|
|
5
|
-
|
|
6
|
-
import (
|
|
7
|
-
"context"
|
|
8
|
-
"encoding/json"
|
|
9
|
-
"fmt"
|
|
10
|
-
"log"
|
|
11
|
-
"os"
|
|
12
|
-
|
|
13
|
-
"github.com/containers/image/v5/copy"
|
|
14
|
-
"github.com/containers/image/v5/signature"
|
|
15
|
-
"github.com/containers/image/v5/transports/alltransports"
|
|
16
|
-
"github.com/sirupsen/logrus"
|
|
17
|
-
|
|
18
|
-
"github.com/aws/aws-lambda-go/cfn"
|
|
19
|
-
"github.com/aws/aws-lambda-go/lambda"
|
|
20
|
-
|
|
21
|
-
_ "cdk-ecr-deployment-handler/s3" // Install s3 transport plugin
|
|
22
|
-
)
|
|
23
|
-
|
|
24
|
-
const EnvLogLevel = "LOG_LEVEL"
|
|
25
|
-
|
|
26
|
-
func init() {
|
|
27
|
-
s, exists := os.LookupEnv(EnvLogLevel)
|
|
28
|
-
if !exists {
|
|
29
|
-
logrus.SetLevel(logrus.InfoLevel)
|
|
30
|
-
} else {
|
|
31
|
-
lvl, err := logrus.ParseLevel(s)
|
|
32
|
-
if err != nil {
|
|
33
|
-
logrus.Errorf("error parsing %s: %v", EnvLogLevel, err)
|
|
34
|
-
}
|
|
35
|
-
logrus.SetLevel(lvl)
|
|
36
|
-
}
|
|
37
|
-
}
|
|
38
|
-
|
|
39
|
-
func handler(ctx context.Context, event cfn.Event) (physicalResourceID string, data map[string]interface{}, err error) {
|
|
40
|
-
physicalResourceID = event.PhysicalResourceID
|
|
41
|
-
data = make(map[string]interface{})
|
|
42
|
-
|
|
43
|
-
log.Printf("Event: %s", Dumps(event))
|
|
44
|
-
|
|
45
|
-
if event.RequestType == cfn.RequestDelete {
|
|
46
|
-
return physicalResourceID, data, nil
|
|
47
|
-
}
|
|
48
|
-
if event.RequestType == cfn.RequestCreate || event.RequestType == cfn.RequestUpdate {
|
|
49
|
-
srcImage, err := getStrProps(event.ResourceProperties, SRC_IMAGE)
|
|
50
|
-
if err != nil {
|
|
51
|
-
return physicalResourceID, data, err
|
|
52
|
-
}
|
|
53
|
-
destImage, err := getStrProps(event.ResourceProperties, DEST_IMAGE)
|
|
54
|
-
if err != nil {
|
|
55
|
-
return physicalResourceID, data, err
|
|
56
|
-
}
|
|
57
|
-
imageArch, err := getStrPropsDefault(event.ResourceProperties, IMAGE_ARCH, "")
|
|
58
|
-
if err != nil {
|
|
59
|
-
return physicalResourceID, data, err
|
|
60
|
-
}
|
|
61
|
-
srcCreds, err := getStrPropsDefault(event.ResourceProperties, SRC_CREDS, "")
|
|
62
|
-
if err != nil {
|
|
63
|
-
return physicalResourceID, data, err
|
|
64
|
-
}
|
|
65
|
-
destCreds, err := getStrPropsDefault(event.ResourceProperties, DEST_CREDS, "")
|
|
66
|
-
if err != nil {
|
|
67
|
-
return physicalResourceID, data, err
|
|
68
|
-
}
|
|
69
|
-
|
|
70
|
-
srcCreds, err = parseCreds(srcCreds)
|
|
71
|
-
if err != nil {
|
|
72
|
-
return physicalResourceID, data, err
|
|
73
|
-
}
|
|
74
|
-
destCreds, err = parseCreds(destCreds)
|
|
75
|
-
if err != nil {
|
|
76
|
-
return physicalResourceID, data, err
|
|
77
|
-
}
|
|
78
|
-
|
|
79
|
-
log.Printf("SrcImage: %v DestImage: %v ImageArch: %v", srcImage, destImage, imageArch)
|
|
80
|
-
|
|
81
|
-
srcRef, err := alltransports.ParseImageName(srcImage)
|
|
82
|
-
if err != nil {
|
|
83
|
-
return physicalResourceID, data, err
|
|
84
|
-
}
|
|
85
|
-
destRef, err := alltransports.ParseImageName(destImage)
|
|
86
|
-
if err != nil {
|
|
87
|
-
return physicalResourceID, data, err
|
|
88
|
-
}
|
|
89
|
-
|
|
90
|
-
srcOpts := NewImageOpts(srcImage, imageArch)
|
|
91
|
-
srcOpts.SetCreds(srcCreds)
|
|
92
|
-
srcCtx, err := srcOpts.NewSystemContext()
|
|
93
|
-
if err != nil {
|
|
94
|
-
return physicalResourceID, data, err
|
|
95
|
-
}
|
|
96
|
-
destOpts := NewImageOpts(destImage, imageArch)
|
|
97
|
-
destOpts.SetCreds(destCreds)
|
|
98
|
-
destCtx, err := destOpts.NewSystemContext()
|
|
99
|
-
if err != nil {
|
|
100
|
-
return physicalResourceID, data, err
|
|
101
|
-
}
|
|
102
|
-
|
|
103
|
-
ctx, cancel := newTimeoutContext()
|
|
104
|
-
defer cancel()
|
|
105
|
-
policyContext, err := newPolicyContext()
|
|
106
|
-
if err != nil {
|
|
107
|
-
return physicalResourceID, data, err
|
|
108
|
-
}
|
|
109
|
-
defer policyContext.Destroy()
|
|
110
|
-
|
|
111
|
-
_, err = copy.Image(ctx, policyContext, destRef, srcRef, ©.Options{
|
|
112
|
-
ReportWriter: os.Stdout,
|
|
113
|
-
DestinationCtx: destCtx,
|
|
114
|
-
SourceCtx: srcCtx,
|
|
115
|
-
})
|
|
116
|
-
if err != nil {
|
|
117
|
-
// log.Printf("Copy image failed: %v", err.Error())
|
|
118
|
-
// return physicalResourceID, data, nil
|
|
119
|
-
return physicalResourceID, data, fmt.Errorf("copy image failed: %s", err.Error())
|
|
120
|
-
}
|
|
121
|
-
}
|
|
122
|
-
|
|
123
|
-
return physicalResourceID, data, nil
|
|
124
|
-
}
|
|
125
|
-
|
|
126
|
-
func main() {
|
|
127
|
-
lambda.Start(cfn.LambdaWrap(handler))
|
|
128
|
-
}
|
|
129
|
-
|
|
130
|
-
func newTimeoutContext() (context.Context, context.CancelFunc) {
|
|
131
|
-
ctx := context.Background()
|
|
132
|
-
var cancel context.CancelFunc = func() {}
|
|
133
|
-
return ctx, cancel
|
|
134
|
-
}
|
|
135
|
-
|
|
136
|
-
func newPolicyContext() (*signature.PolicyContext, error) {
|
|
137
|
-
policy := &signature.Policy{Default: []signature.PolicyRequirement{signature.NewPRInsecureAcceptAnything()}}
|
|
138
|
-
return signature.NewPolicyContext(policy)
|
|
139
|
-
}
|
|
140
|
-
|
|
141
|
-
func getStrProps(m map[string]interface{}, k string) (string, error) {
|
|
142
|
-
v := m[k]
|
|
143
|
-
val, ok := v.(string)
|
|
144
|
-
if ok {
|
|
145
|
-
return val, nil
|
|
146
|
-
}
|
|
147
|
-
return "", fmt.Errorf("can't get %v", k)
|
|
148
|
-
}
|
|
149
|
-
|
|
150
|
-
func getStrPropsDefault(m map[string]interface{}, k string, d string) (string, error) {
|
|
151
|
-
v := m[k]
|
|
152
|
-
if v == nil {
|
|
153
|
-
return d, nil
|
|
154
|
-
}
|
|
155
|
-
val, ok := v.(string)
|
|
156
|
-
if ok {
|
|
157
|
-
return val, nil
|
|
158
|
-
}
|
|
159
|
-
return "", fmt.Errorf("can't get %v", k)
|
|
160
|
-
}
|
|
161
|
-
|
|
162
|
-
func parseCreds(creds string) (string, error) {
|
|
163
|
-
credsType := GetCredsType(creds)
|
|
164
|
-
if creds == "" {
|
|
165
|
-
return "", nil
|
|
166
|
-
} else if (credsType == SECRET_ARN) || (credsType == SECRET_NAME) {
|
|
167
|
-
secret, err := GetSecret(creds)
|
|
168
|
-
if err != nil && len(secret) > 0 && json.Valid([]byte(secret)) {
|
|
169
|
-
secret, err = ParseJsonSecret(secret)
|
|
170
|
-
}
|
|
171
|
-
return secret, err
|
|
172
|
-
} else if credsType == SECRET_TEXT {
|
|
173
|
-
return creds, nil
|
|
174
|
-
}
|
|
175
|
-
return "", fmt.Errorf("unkown creds type")
|
|
176
|
-
}
|
package/lambda-src/main_test.go
DELETED
|
@@ -1,64 +0,0 @@
|
|
|
1
|
-
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
|
|
2
|
-
// SPDX-License-Identifier: Apache-2.0
|
|
3
|
-
|
|
4
|
-
package main
|
|
5
|
-
|
|
6
|
-
import (
|
|
7
|
-
"log"
|
|
8
|
-
"os"
|
|
9
|
-
"testing"
|
|
10
|
-
|
|
11
|
-
"github.com/containers/image/v5/copy"
|
|
12
|
-
"github.com/containers/image/v5/transports/alltransports"
|
|
13
|
-
"github.com/stretchr/testify/assert"
|
|
14
|
-
|
|
15
|
-
_ "cdk-ecr-deployment-handler/s3"
|
|
16
|
-
)
|
|
17
|
-
|
|
18
|
-
func TestMain(t *testing.T) {
|
|
19
|
-
t.Skip()
|
|
20
|
-
|
|
21
|
-
// reference format: s3://bucket/key[:docker-reference]
|
|
22
|
-
// valid examples:
|
|
23
|
-
// s3://bucket/key:nginx:latest
|
|
24
|
-
// s3://bucket/key:@0
|
|
25
|
-
|
|
26
|
-
srcImage := "s3://cdk-ecr-deployment/nginx.tar:nginx:latest"
|
|
27
|
-
destImage := "dir:/tmp/nginx.dir"
|
|
28
|
-
|
|
29
|
-
log.Printf("SrcImage: %v DestImage: %v", srcImage, destImage)
|
|
30
|
-
|
|
31
|
-
srcRef, err := alltransports.ParseImageName(srcImage)
|
|
32
|
-
assert.NoError(t, err)
|
|
33
|
-
destRef, err := alltransports.ParseImageName(destImage)
|
|
34
|
-
assert.NoError(t, err)
|
|
35
|
-
|
|
36
|
-
srcOpts := NewImageOpts(srcImage, "")
|
|
37
|
-
srcCtx, err := srcOpts.NewSystemContext()
|
|
38
|
-
assert.NoError(t, err)
|
|
39
|
-
destOpts := NewImageOpts(destImage, "")
|
|
40
|
-
destCtx, err := destOpts.NewSystemContext()
|
|
41
|
-
assert.NoError(t, err)
|
|
42
|
-
|
|
43
|
-
ctx, cancel := newTimeoutContext()
|
|
44
|
-
defer cancel()
|
|
45
|
-
policyContext, err := newPolicyContext()
|
|
46
|
-
assert.NoError(t, err)
|
|
47
|
-
defer policyContext.Destroy()
|
|
48
|
-
|
|
49
|
-
_, err = copy.Image(ctx, policyContext, destRef, srcRef, ©.Options{
|
|
50
|
-
ReportWriter: os.Stdout,
|
|
51
|
-
DestinationCtx: destCtx,
|
|
52
|
-
SourceCtx: srcCtx,
|
|
53
|
-
})
|
|
54
|
-
assert.NoError(t, err)
|
|
55
|
-
}
|
|
56
|
-
|
|
57
|
-
func TestNewImageOpts(t *testing.T) {
|
|
58
|
-
srcOpts := NewImageOpts("s3://cdk-ecr-deployment/nginx.tar:nginx:latest", "arm64")
|
|
59
|
-
_, err := srcOpts.NewSystemContext()
|
|
60
|
-
assert.NoError(t, err)
|
|
61
|
-
destOpts := NewImageOpts("dir:/tmp/nginx.dir", "arm64")
|
|
62
|
-
_, err = destOpts.NewSystemContext()
|
|
63
|
-
assert.NoError(t, err)
|
|
64
|
-
}
|
package/lambda-src/s3/src.go
DELETED
|
@@ -1,40 +0,0 @@
|
|
|
1
|
-
// Taken from https://github.com/containers/image
|
|
2
|
-
// Modifications Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
|
|
3
|
-
|
|
4
|
-
package s3
|
|
5
|
-
|
|
6
|
-
import (
|
|
7
|
-
"cdk-ecr-deployment-handler/internal/tarfile"
|
|
8
|
-
"context"
|
|
9
|
-
|
|
10
|
-
"github.com/aws/aws-sdk-go-v2/config"
|
|
11
|
-
"github.com/containers/image/v5/types"
|
|
12
|
-
)
|
|
13
|
-
|
|
14
|
-
type s3ArchiveImageSource struct {
|
|
15
|
-
*tarfile.S3FileSource
|
|
16
|
-
ref *s3ArchiveReference
|
|
17
|
-
}
|
|
18
|
-
|
|
19
|
-
func (s *s3ArchiveImageSource) Reference() types.ImageReference {
|
|
20
|
-
return s.ref
|
|
21
|
-
}
|
|
22
|
-
|
|
23
|
-
func newImageSource(ctx context.Context, sys *types.SystemContext, ref *s3ArchiveReference) (types.ImageSource, error) {
|
|
24
|
-
cfg, err := config.LoadDefaultConfig(context.TODO())
|
|
25
|
-
if err != nil {
|
|
26
|
-
return nil, err
|
|
27
|
-
}
|
|
28
|
-
f, err := tarfile.NewS3File(cfg, *ref.s3uri)
|
|
29
|
-
if err != nil {
|
|
30
|
-
return nil, err
|
|
31
|
-
}
|
|
32
|
-
reader, err := tarfile.NewS3FileReader(f)
|
|
33
|
-
if err != nil {
|
|
34
|
-
return nil, err
|
|
35
|
-
}
|
|
36
|
-
return &s3ArchiveImageSource{
|
|
37
|
-
S3FileSource: tarfile.NewSource(reader, false, ref.ref, ref.sourceIndex),
|
|
38
|
-
ref: ref,
|
|
39
|
-
}, nil
|
|
40
|
-
}
|