cdk-ecr-deployment 3.3.1 → 4.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.jsii +26 -103
- package/.jsii.tabl.json +1 -1
- package/API.md +0 -59
- package/README.md +7 -17
- package/lib/index.d.ts +1 -29
- package/lib/index.js +8 -26
- package/package.json +1 -1
- package/lambda-src/.dockerignore +0 -9
- package/lambda-src/Dockerfile +0 -26
- package/lambda-src/Makefile +0 -33
- package/lambda-src/go.mod +0 -136
- package/lambda-src/go.sum +0 -499
- package/lambda-src/internal/iolimits/iolimits.go +0 -68
- package/lambda-src/internal/iolimits/iolimits_test.go +0 -39
- package/lambda-src/internal/tarfile/reader.go +0 -179
- package/lambda-src/internal/tarfile/reader_test.go +0 -31
- package/lambda-src/internal/tarfile/s3file.go +0 -363
- package/lambda-src/internal/tarfile/s3file_test.go +0 -135
- package/lambda-src/internal/tarfile/src.go +0 -328
- package/lambda-src/internal/tarfile/types.go +0 -31
- package/lambda-src/main.go +0 -176
- package/lambda-src/main_test.go +0 -64
- package/lambda-src/s3/src.go +0 -40
- package/lambda-src/s3/transport.go +0 -149
- package/lambda-src/s3/transport_test.go +0 -96
- package/lambda-src/utils.go +0 -206
- package/lambda-src/utils_test.go +0 -63
- package/lib/config.d.ts +0 -1
- package/lib/config.js +0 -12
|
@@ -1,179 +0,0 @@
|
|
|
1
|
-
// Taken from https://github.com/containers/image
|
|
2
|
-
// Modifications Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
|
|
3
|
-
|
|
4
|
-
package tarfile
|
|
5
|
-
|
|
6
|
-
import (
|
|
7
|
-
"archive/tar"
|
|
8
|
-
"encoding/json"
|
|
9
|
-
"io"
|
|
10
|
-
"os"
|
|
11
|
-
"path"
|
|
12
|
-
|
|
13
|
-
"cdk-ecr-deployment-handler/internal/iolimits"
|
|
14
|
-
|
|
15
|
-
"github.com/containers/image/v5/docker/reference"
|
|
16
|
-
"github.com/pkg/errors"
|
|
17
|
-
)
|
|
18
|
-
|
|
19
|
-
// S3FileReader is a ((docker save)-formatted) tar archive that allows random access to any component.
|
|
20
|
-
type S3FileReader struct {
|
|
21
|
-
// None of the fields below are modified after the archive is created, until .Close();
|
|
22
|
-
// this allows concurrent readers of the same archive.
|
|
23
|
-
s3file *S3File
|
|
24
|
-
Manifest []ManifestItem // Guaranteed to exist after the archive is created.
|
|
25
|
-
}
|
|
26
|
-
|
|
27
|
-
// newReader creates a Reader for the specified path and removeOnClose flag.
|
|
28
|
-
// The caller should call .Close() on the returned archive when done.
|
|
29
|
-
func NewS3FileReader(s3file *S3File) (*S3FileReader, error) {
|
|
30
|
-
if s3file == nil {
|
|
31
|
-
return nil, errors.New("s3.tarfile.S3FileReader can't be nil")
|
|
32
|
-
}
|
|
33
|
-
|
|
34
|
-
// This is a valid enough archive, except Manifest is not yet filled.
|
|
35
|
-
r := &S3FileReader{s3file: s3file}
|
|
36
|
-
|
|
37
|
-
// We initialize Manifest immediately when constructing the Reader instead
|
|
38
|
-
// of later on-demand because every caller will need the data, and because doing it now
|
|
39
|
-
// removes the need to synchronize the access/creation of the data if the archive is later
|
|
40
|
-
// used from multiple goroutines to access different images.
|
|
41
|
-
|
|
42
|
-
// FIXME? Do we need to deal with the legacy format?
|
|
43
|
-
bytes, err := r.readTarComponent(manifestFileName, iolimits.MegaByte)
|
|
44
|
-
if err != nil {
|
|
45
|
-
return nil, err
|
|
46
|
-
}
|
|
47
|
-
if err := json.Unmarshal(bytes, &r.Manifest); err != nil {
|
|
48
|
-
return nil, errors.Wrap(err, "Error decoding tar manifest.json")
|
|
49
|
-
}
|
|
50
|
-
|
|
51
|
-
return r, nil
|
|
52
|
-
}
|
|
53
|
-
|
|
54
|
-
// Close removes resources associated with an initialized Reader, if any.
|
|
55
|
-
func (r *S3FileReader) Close() error {
|
|
56
|
-
return r.s3file.Close()
|
|
57
|
-
}
|
|
58
|
-
|
|
59
|
-
// ChooseManifestItem selects a manifest item from r.Manifest matching (ref, sourceIndex), one or
|
|
60
|
-
// both of which should be (nil, -1).
|
|
61
|
-
// On success, it returns the manifest item and an index of the matching tag, if a tag was used
|
|
62
|
-
// for matching; the index is -1 if a tag was not used.
|
|
63
|
-
func (r *S3FileReader) ChooseManifestItem(ref reference.NamedTagged, sourceIndex int) (*ManifestItem, int, error) {
|
|
64
|
-
switch {
|
|
65
|
-
case ref != nil && sourceIndex != -1:
|
|
66
|
-
return nil, -1, errors.Errorf("Internal error: Cannot have both ref %s and source index @%d",
|
|
67
|
-
ref.String(), sourceIndex)
|
|
68
|
-
|
|
69
|
-
case ref != nil:
|
|
70
|
-
refString := ref.String()
|
|
71
|
-
for i := range r.Manifest {
|
|
72
|
-
for tagIndex, tag := range r.Manifest[i].RepoTags {
|
|
73
|
-
parsedTag, err := reference.ParseNormalizedNamed(tag)
|
|
74
|
-
if err != nil {
|
|
75
|
-
return nil, -1, errors.Wrapf(err, "Invalid tag %#v in manifest.json item @%d", tag, i)
|
|
76
|
-
}
|
|
77
|
-
if parsedTag.String() == refString {
|
|
78
|
-
return &r.Manifest[i], tagIndex, nil
|
|
79
|
-
}
|
|
80
|
-
}
|
|
81
|
-
}
|
|
82
|
-
return nil, -1, errors.Errorf("Tag %#v not found", refString)
|
|
83
|
-
|
|
84
|
-
case sourceIndex != -1:
|
|
85
|
-
if sourceIndex >= len(r.Manifest) {
|
|
86
|
-
return nil, -1, errors.Errorf("Invalid source index @%d, only %d manifest items available",
|
|
87
|
-
sourceIndex, len(r.Manifest))
|
|
88
|
-
}
|
|
89
|
-
return &r.Manifest[sourceIndex], -1, nil
|
|
90
|
-
|
|
91
|
-
default:
|
|
92
|
-
if len(r.Manifest) != 1 {
|
|
93
|
-
return nil, -1, errors.Errorf("Unexpected tar manifest.json: expected 1 item, got %d", len(r.Manifest))
|
|
94
|
-
}
|
|
95
|
-
return &r.Manifest[0], -1, nil
|
|
96
|
-
}
|
|
97
|
-
}
|
|
98
|
-
|
|
99
|
-
// tarReadCloser is a way to close the backing file of a tar.Reader when the user no longer needs the tar component.
|
|
100
|
-
type tarReadCloser struct {
|
|
101
|
-
*tar.Reader
|
|
102
|
-
}
|
|
103
|
-
|
|
104
|
-
func (t *tarReadCloser) Close() error {
|
|
105
|
-
return nil
|
|
106
|
-
}
|
|
107
|
-
|
|
108
|
-
// openTarComponent returns a ReadCloser for the specific file within the archive.
|
|
109
|
-
// This is linear scan; we assume that the tar file will have a fairly small amount of files (~layers),
|
|
110
|
-
// and that filesystem caching will make the repeated seeking over the (uncompressed) tarPath cheap enough.
|
|
111
|
-
// It is safe to call this method from multiple goroutines simultaneously.
|
|
112
|
-
// The caller should call .Close() on the returned stream.
|
|
113
|
-
func (r *S3FileReader) openTarComponent(componentPath string) (io.ReadCloser, error) {
|
|
114
|
-
// We must clone at here because we need to make sure each tar reader must read from the beginning.
|
|
115
|
-
// And the internal rcache should be shared.
|
|
116
|
-
f := r.s3file.Clone()
|
|
117
|
-
tarReader, header, err := findTarComponent(f, componentPath)
|
|
118
|
-
if err != nil {
|
|
119
|
-
return nil, err
|
|
120
|
-
}
|
|
121
|
-
if header == nil {
|
|
122
|
-
return nil, os.ErrNotExist
|
|
123
|
-
}
|
|
124
|
-
if header.FileInfo().Mode()&os.ModeType == os.ModeSymlink { // FIXME: untested
|
|
125
|
-
// We follow only one symlink; so no loops are possible.
|
|
126
|
-
if _, err := f.Seek(0, io.SeekStart); err != nil {
|
|
127
|
-
return nil, err
|
|
128
|
-
}
|
|
129
|
-
// The new path could easily point "outside" the archive, but we only compare it to existing tar headers without extracting the archive,
|
|
130
|
-
// so we don't care.
|
|
131
|
-
tarReader, header, err = findTarComponent(f, path.Join(path.Dir(componentPath), header.Linkname))
|
|
132
|
-
if err != nil {
|
|
133
|
-
return nil, err
|
|
134
|
-
}
|
|
135
|
-
if header == nil {
|
|
136
|
-
return nil, os.ErrNotExist
|
|
137
|
-
}
|
|
138
|
-
}
|
|
139
|
-
|
|
140
|
-
if !header.FileInfo().Mode().IsRegular() {
|
|
141
|
-
return nil, errors.Errorf("Error reading tar archive component %s: not a regular file", header.Name)
|
|
142
|
-
}
|
|
143
|
-
return &tarReadCloser{Reader: tarReader}, nil
|
|
144
|
-
}
|
|
145
|
-
|
|
146
|
-
// findTarComponent returns a header and a reader matching componentPath within inputFile,
|
|
147
|
-
// or (nil, nil, nil) if not found.
|
|
148
|
-
func findTarComponent(inputFile io.Reader, componentPath string) (*tar.Reader, *tar.Header, error) {
|
|
149
|
-
t := tar.NewReader(inputFile)
|
|
150
|
-
componentPath = path.Clean(componentPath)
|
|
151
|
-
for {
|
|
152
|
-
h, err := t.Next()
|
|
153
|
-
if err == io.EOF {
|
|
154
|
-
break
|
|
155
|
-
}
|
|
156
|
-
if err != nil {
|
|
157
|
-
return nil, nil, err
|
|
158
|
-
}
|
|
159
|
-
if path.Clean(h.Name) == componentPath {
|
|
160
|
-
return t, h, nil
|
|
161
|
-
}
|
|
162
|
-
}
|
|
163
|
-
return nil, nil, nil
|
|
164
|
-
}
|
|
165
|
-
|
|
166
|
-
// readTarComponent returns full contents of componentPath.
|
|
167
|
-
// It is safe to call this method from multiple goroutines simultaneously.
|
|
168
|
-
func (r *S3FileReader) readTarComponent(path string, limit int) ([]byte, error) {
|
|
169
|
-
file, err := r.openTarComponent(path)
|
|
170
|
-
if err != nil {
|
|
171
|
-
return nil, errors.Wrapf(err, "Error loading tar component %s", path)
|
|
172
|
-
}
|
|
173
|
-
defer file.Close()
|
|
174
|
-
bytes, err := iolimits.ReadAtMost(file, limit)
|
|
175
|
-
if err != nil {
|
|
176
|
-
return nil, err
|
|
177
|
-
}
|
|
178
|
-
return bytes, nil
|
|
179
|
-
}
|
|
@@ -1,31 +0,0 @@
|
|
|
1
|
-
// Taken from https://github.com/containers/image
|
|
2
|
-
// Modifications Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
|
|
3
|
-
|
|
4
|
-
package tarfile
|
|
5
|
-
|
|
6
|
-
import (
|
|
7
|
-
"context"
|
|
8
|
-
"log"
|
|
9
|
-
"testing"
|
|
10
|
-
|
|
11
|
-
"github.com/aws/aws-sdk-go-v2/config"
|
|
12
|
-
"github.com/stretchr/testify/assert"
|
|
13
|
-
)
|
|
14
|
-
|
|
15
|
-
func TestNewS3FileReader(t *testing.T) {
|
|
16
|
-
t.Skip()
|
|
17
|
-
cfg, err := config.LoadDefaultConfig(context.TODO())
|
|
18
|
-
assert.NoError(t, err)
|
|
19
|
-
|
|
20
|
-
s3uri, _ := ParseS3Uri("s3://cdk-ecr-deployment/nginx.tar")
|
|
21
|
-
|
|
22
|
-
f, err := NewS3File(cfg, *s3uri)
|
|
23
|
-
assert.NoError(t, err)
|
|
24
|
-
|
|
25
|
-
log.Printf("file size: %d", f.Size())
|
|
26
|
-
|
|
27
|
-
reader, err := NewS3FileReader(f)
|
|
28
|
-
assert.NoError(t, err)
|
|
29
|
-
|
|
30
|
-
log.Printf("%+v", reader.Manifest)
|
|
31
|
-
}
|
|
@@ -1,363 +0,0 @@
|
|
|
1
|
-
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
|
|
2
|
-
// SPDX-License-Identifier: Apache-2.0
|
|
3
|
-
|
|
4
|
-
package tarfile
|
|
5
|
-
|
|
6
|
-
import (
|
|
7
|
-
"cdk-ecr-deployment-handler/internal/iolimits"
|
|
8
|
-
"context"
|
|
9
|
-
"fmt"
|
|
10
|
-
"io"
|
|
11
|
-
"strings"
|
|
12
|
-
"sync"
|
|
13
|
-
|
|
14
|
-
"github.com/pkg/errors"
|
|
15
|
-
|
|
16
|
-
"github.com/sirupsen/logrus"
|
|
17
|
-
|
|
18
|
-
"github.com/aws/aws-sdk-go-v2/aws"
|
|
19
|
-
"github.com/aws/aws-sdk-go-v2/service/s3"
|
|
20
|
-
"github.com/golang/groupcache/lru"
|
|
21
|
-
)
|
|
22
|
-
|
|
23
|
-
const S3Prefix = "s3://"
|
|
24
|
-
|
|
25
|
-
type S3Uri struct {
|
|
26
|
-
Bucket string
|
|
27
|
-
Key string
|
|
28
|
-
}
|
|
29
|
-
|
|
30
|
-
func ParseS3Uri(s string) (*S3Uri, error) {
|
|
31
|
-
if !strings.HasPrefix(s, S3Prefix) {
|
|
32
|
-
return nil, fmt.Errorf("s3 uri must begin with %v", S3Prefix)
|
|
33
|
-
}
|
|
34
|
-
s = strings.TrimPrefix(s, S3Prefix)
|
|
35
|
-
parts := strings.SplitN(s, "/", 2)
|
|
36
|
-
if len(parts) == 1 {
|
|
37
|
-
return &S3Uri{
|
|
38
|
-
Bucket: parts[0],
|
|
39
|
-
Key: "",
|
|
40
|
-
}, nil
|
|
41
|
-
}
|
|
42
|
-
return &S3Uri{
|
|
43
|
-
Bucket: parts[0],
|
|
44
|
-
Key: parts[1],
|
|
45
|
-
}, nil
|
|
46
|
-
}
|
|
47
|
-
|
|
48
|
-
type S3File struct {
|
|
49
|
-
s3uri S3Uri
|
|
50
|
-
client *s3.Client
|
|
51
|
-
i int64 // current reading index
|
|
52
|
-
size int64 // the size of the s3 object
|
|
53
|
-
rcache *BlockCache // read cache
|
|
54
|
-
}
|
|
55
|
-
|
|
56
|
-
// Len returns the number of bytes of the unread portion of the s3 object
|
|
57
|
-
func (f *S3File) Len() int64 {
|
|
58
|
-
if f.i >= f.size {
|
|
59
|
-
return 0
|
|
60
|
-
}
|
|
61
|
-
return f.size - f.i
|
|
62
|
-
}
|
|
63
|
-
|
|
64
|
-
// Size returns the original length of the s3 object
|
|
65
|
-
func (f *S3File) Size() int64 {
|
|
66
|
-
return f.size
|
|
67
|
-
}
|
|
68
|
-
|
|
69
|
-
// func (f *S3File) Read(b []byte) (n int, err error) {
|
|
70
|
-
// logrus.Debugf("S3File: Read %d bytes", len(b))
|
|
71
|
-
|
|
72
|
-
// if f.i >= f.size {
|
|
73
|
-
// return 0, io.EOF
|
|
74
|
-
// }
|
|
75
|
-
// out, err := f.client.GetObject(context.TODO(), &s3.GetObjectInput{
|
|
76
|
-
// Bucket: &f.s3uri.Bucket,
|
|
77
|
-
// Key: &f.s3uri.Key,
|
|
78
|
-
// Range: aws.String(fmt.Sprintf("bytes=%d-%d", f.i, f.i+int64(len(b))-1)),
|
|
79
|
-
// })
|
|
80
|
-
// if err != nil {
|
|
81
|
-
// return 0, err
|
|
82
|
-
// }
|
|
83
|
-
// defer out.Body.Close()
|
|
84
|
-
|
|
85
|
-
// n, err = out.Body.Read(b)
|
|
86
|
-
// f.i += int64(n)
|
|
87
|
-
// if err == io.EOF {
|
|
88
|
-
// return n, nil // e is EOF, so return nil explicitly
|
|
89
|
-
// }
|
|
90
|
-
// return
|
|
91
|
-
// }
|
|
92
|
-
|
|
93
|
-
func (f *S3File) onCacheMiss(block *Block) (err error) {
|
|
94
|
-
if f.client == nil {
|
|
95
|
-
return errors.New("S3File: api client is nil, did you close the file?")
|
|
96
|
-
}
|
|
97
|
-
bid := block.Id
|
|
98
|
-
out, err := f.client.GetObject(context.TODO(), &s3.GetObjectInput{
|
|
99
|
-
Bucket: &f.s3uri.Bucket,
|
|
100
|
-
Key: &f.s3uri.Key,
|
|
101
|
-
Range: aws.String(fmt.Sprintf("bytes=%d-%d", bid*iolimits.BlockSize, (bid+1)*iolimits.BlockSize-1)),
|
|
102
|
-
})
|
|
103
|
-
if err != nil {
|
|
104
|
-
return err
|
|
105
|
-
}
|
|
106
|
-
defer out.Body.Close()
|
|
107
|
-
|
|
108
|
-
i, n := 0, 0
|
|
109
|
-
for i < iolimits.BlockSize {
|
|
110
|
-
n, err = out.Body.Read(block.Buf[i:iolimits.BlockSize])
|
|
111
|
-
i += n
|
|
112
|
-
if err != nil {
|
|
113
|
-
break
|
|
114
|
-
}
|
|
115
|
-
}
|
|
116
|
-
if err == io.EOF {
|
|
117
|
-
return nil
|
|
118
|
-
}
|
|
119
|
-
return err
|
|
120
|
-
}
|
|
121
|
-
|
|
122
|
-
// Read implements the io.Reader interface.
|
|
123
|
-
func (f *S3File) Read(b []byte) (n int, err error) {
|
|
124
|
-
logrus.Debugf("S3File: Read %d bytes", len(b))
|
|
125
|
-
|
|
126
|
-
if f.i >= f.size {
|
|
127
|
-
return 0, io.EOF
|
|
128
|
-
}
|
|
129
|
-
if f.rcache == nil {
|
|
130
|
-
return 0, errors.New("S3File: rcache is nil, did you close the file?")
|
|
131
|
-
}
|
|
132
|
-
buf, err := f.rcache.Read(f.i, f.i+int64(len(b)), f.onCacheMiss)
|
|
133
|
-
if err != nil {
|
|
134
|
-
return 0, err
|
|
135
|
-
}
|
|
136
|
-
n = copy(b, buf)
|
|
137
|
-
f.i += int64(n)
|
|
138
|
-
return n, nil
|
|
139
|
-
}
|
|
140
|
-
|
|
141
|
-
// ReadAt implements the io.ReaderAt interface.
|
|
142
|
-
func (f *S3File) ReadAt(b []byte, off int64) (n int, err error) {
|
|
143
|
-
logrus.Debugf("S3File: ReadAt %d bytes %d offset", len(b), off)
|
|
144
|
-
|
|
145
|
-
if off < 0 {
|
|
146
|
-
return 0, errors.New("S3File: negative offset")
|
|
147
|
-
}
|
|
148
|
-
if off >= f.size {
|
|
149
|
-
return 0, io.EOF
|
|
150
|
-
}
|
|
151
|
-
if f.rcache == nil {
|
|
152
|
-
return 0, errors.New("S3File: rcache is nil, did you close the file?")
|
|
153
|
-
}
|
|
154
|
-
buf, err := f.rcache.Read(off, off+int64(len(b)), f.onCacheMiss)
|
|
155
|
-
if err != nil {
|
|
156
|
-
return 0, err
|
|
157
|
-
}
|
|
158
|
-
return copy(b, buf), nil
|
|
159
|
-
}
|
|
160
|
-
|
|
161
|
-
// Seek implements the io.Seeker interface.
|
|
162
|
-
func (f *S3File) Seek(offset int64, whence int) (int64, error) {
|
|
163
|
-
logrus.Debugf("S3File: Seek %d offset %d whence", offset, whence)
|
|
164
|
-
|
|
165
|
-
var abs int64
|
|
166
|
-
switch whence {
|
|
167
|
-
case io.SeekStart:
|
|
168
|
-
abs = offset
|
|
169
|
-
case io.SeekCurrent:
|
|
170
|
-
abs = f.i + offset
|
|
171
|
-
case io.SeekEnd:
|
|
172
|
-
abs = f.size + offset
|
|
173
|
-
default:
|
|
174
|
-
return 0, errors.New("S3File: invalid whence")
|
|
175
|
-
}
|
|
176
|
-
if abs < 0 {
|
|
177
|
-
return 0, errors.New("S3File: negative position")
|
|
178
|
-
}
|
|
179
|
-
f.i = abs
|
|
180
|
-
return abs, nil
|
|
181
|
-
}
|
|
182
|
-
|
|
183
|
-
func (f *S3File) Reset() {
|
|
184
|
-
f.i = 0
|
|
185
|
-
}
|
|
186
|
-
|
|
187
|
-
func (f *S3File) Close() error {
|
|
188
|
-
f.client = nil
|
|
189
|
-
f.rcache = nil
|
|
190
|
-
return nil
|
|
191
|
-
}
|
|
192
|
-
|
|
193
|
-
func (f *S3File) Clone() *S3File {
|
|
194
|
-
return &S3File{
|
|
195
|
-
s3uri: f.s3uri,
|
|
196
|
-
client: f.client,
|
|
197
|
-
i: 0,
|
|
198
|
-
size: f.size,
|
|
199
|
-
rcache: f.rcache,
|
|
200
|
-
}
|
|
201
|
-
}
|
|
202
|
-
|
|
203
|
-
// WriteTo implements the io.WriterTo interface.
|
|
204
|
-
// func (f *S3File) WriteTo(w io.Writer) (n int64, err error) {
|
|
205
|
-
// logrus.Debugf("S3File: WriteTo")
|
|
206
|
-
|
|
207
|
-
// if f.i >= f.size {
|
|
208
|
-
// return 0, io.EOF
|
|
209
|
-
// }
|
|
210
|
-
|
|
211
|
-
// wa, ok := w.(io.WriterAt)
|
|
212
|
-
// if !ok {
|
|
213
|
-
// return 0, errors.New("S3File: writer must be io.WriterAt")
|
|
214
|
-
// }
|
|
215
|
-
|
|
216
|
-
// downloader := manager.NewDownloader(f.client)
|
|
217
|
-
// n, err = downloader.Download(context.TODO(), wa, &s3.GetObjectInput{
|
|
218
|
-
// Bucket: &f.s3uri.Bucket,
|
|
219
|
-
// Key: &f.s3uri.Key,
|
|
220
|
-
// Range: aws.String(fmt.Sprintf("bytes=%d-", f.i)),
|
|
221
|
-
// })
|
|
222
|
-
// f.i += n
|
|
223
|
-
// return
|
|
224
|
-
// }
|
|
225
|
-
|
|
226
|
-
func NewS3File(cfg aws.Config, s3uri S3Uri) (*S3File, error) {
|
|
227
|
-
client := s3.NewFromConfig(cfg)
|
|
228
|
-
output, err := client.HeadObject(context.TODO(), &s3.HeadObjectInput{
|
|
229
|
-
Bucket: &s3uri.Bucket,
|
|
230
|
-
Key: &s3uri.Key,
|
|
231
|
-
})
|
|
232
|
-
if err != nil {
|
|
233
|
-
return nil, err
|
|
234
|
-
}
|
|
235
|
-
|
|
236
|
-
return &S3File{
|
|
237
|
-
s3uri: s3uri,
|
|
238
|
-
client: client,
|
|
239
|
-
i: 0,
|
|
240
|
-
size: *output.ContentLength,
|
|
241
|
-
// The total cache size is `iolimits.CacheBlockCount * iolimits.BlockSize`
|
|
242
|
-
rcache: NewBlockCache(iolimits.CacheBlockCount),
|
|
243
|
-
}, nil
|
|
244
|
-
}
|
|
245
|
-
|
|
246
|
-
type Block struct {
|
|
247
|
-
Id int64
|
|
248
|
-
Buf []byte
|
|
249
|
-
}
|
|
250
|
-
|
|
251
|
-
func (b *Block) Size() int {
|
|
252
|
-
return len(b.Buf)
|
|
253
|
-
}
|
|
254
|
-
|
|
255
|
-
type LRUBlockPool struct {
|
|
256
|
-
pool *sync.Pool
|
|
257
|
-
cache *lru.Cache
|
|
258
|
-
mutex sync.Mutex
|
|
259
|
-
}
|
|
260
|
-
|
|
261
|
-
func NewLRUBlockPool(capacity int) *LRUBlockPool {
|
|
262
|
-
pool := &sync.Pool{
|
|
263
|
-
New: func() interface{} {
|
|
264
|
-
return &Block{
|
|
265
|
-
Id: -1,
|
|
266
|
-
Buf: make([]byte, iolimits.BlockSize),
|
|
267
|
-
}
|
|
268
|
-
},
|
|
269
|
-
}
|
|
270
|
-
cache := lru.New(capacity)
|
|
271
|
-
cache.OnEvicted = func(k lru.Key, v interface{}) {
|
|
272
|
-
pool.Put(v)
|
|
273
|
-
}
|
|
274
|
-
return &LRUBlockPool{
|
|
275
|
-
pool: pool,
|
|
276
|
-
cache: cache,
|
|
277
|
-
}
|
|
278
|
-
}
|
|
279
|
-
|
|
280
|
-
func (p *LRUBlockPool) GetBlock(id int64, blockInitFn func(*Block) error) (block *Block, err error) {
|
|
281
|
-
p.mutex.Lock()
|
|
282
|
-
defer p.mutex.Unlock()
|
|
283
|
-
val, hit := p.cache.Get(id)
|
|
284
|
-
if hit {
|
|
285
|
-
if block, ok := val.(*Block); ok {
|
|
286
|
-
return block, nil
|
|
287
|
-
} else {
|
|
288
|
-
return nil, errors.New("get an invalid block from cache")
|
|
289
|
-
}
|
|
290
|
-
} else {
|
|
291
|
-
logrus.Debugf("LRUBlockPool: miss block#%d", id)
|
|
292
|
-
if (p.cache.MaxEntries != 0) && (p.cache.Len() >= p.cache.MaxEntries) {
|
|
293
|
-
p.cache.RemoveOldest()
|
|
294
|
-
}
|
|
295
|
-
blk := p.pool.Get()
|
|
296
|
-
if block, ok := blk.(*Block); ok {
|
|
297
|
-
block.Id = id
|
|
298
|
-
err = blockInitFn(block)
|
|
299
|
-
p.cache.Add(id, block)
|
|
300
|
-
return block, err
|
|
301
|
-
} else {
|
|
302
|
-
return nil, errors.New("get an invalid block from pool")
|
|
303
|
-
}
|
|
304
|
-
}
|
|
305
|
-
}
|
|
306
|
-
|
|
307
|
-
type CacheMissFn func(b *Block) error
|
|
308
|
-
|
|
309
|
-
type BlockCache struct {
|
|
310
|
-
pool *LRUBlockPool
|
|
311
|
-
}
|
|
312
|
-
|
|
313
|
-
func NewBlockCache(capacity int) *BlockCache {
|
|
314
|
-
return &BlockCache{
|
|
315
|
-
pool: NewLRUBlockPool(capacity),
|
|
316
|
-
}
|
|
317
|
-
}
|
|
318
|
-
|
|
319
|
-
func (c *BlockCache) Read(begin, end int64, cacheMissFn CacheMissFn) (buf []byte, err error) {
|
|
320
|
-
if begin < 0 {
|
|
321
|
-
return nil, fmt.Errorf("LRUBlockCache: negative begin")
|
|
322
|
-
}
|
|
323
|
-
if end < 0 {
|
|
324
|
-
return nil, fmt.Errorf("LRUBlockCache: negative end")
|
|
325
|
-
}
|
|
326
|
-
if begin >= end {
|
|
327
|
-
return nil, fmt.Errorf("LRUBlockCache: byte end must greater than byte begin")
|
|
328
|
-
}
|
|
329
|
-
bidBegin := begin / iolimits.BlockSize
|
|
330
|
-
bidEnd := end / iolimits.BlockSize
|
|
331
|
-
buf = make([]byte, 0)
|
|
332
|
-
|
|
333
|
-
for bid := bidBegin; bid <= bidEnd; bid++ {
|
|
334
|
-
b, e := blockAddressTranslation(begin, end, bid)
|
|
335
|
-
block, err := c.pool.GetBlock(bid, cacheMissFn)
|
|
336
|
-
if err != nil || block == nil {
|
|
337
|
-
return nil, errors.Wrapf(err, "error when get block from pool")
|
|
338
|
-
}
|
|
339
|
-
buf = append(buf, block.Buf[b:e]...)
|
|
340
|
-
}
|
|
341
|
-
return buf, nil
|
|
342
|
-
}
|
|
343
|
-
|
|
344
|
-
// Returns the byte range of the block at the given begin and end address
|
|
345
|
-
func blockAddressTranslation(begin, end, bid int64) (b, e int64) {
|
|
346
|
-
b = max(begin, bid*iolimits.BlockSize) - bid*iolimits.BlockSize
|
|
347
|
-
e = min(end, (bid+1)*iolimits.BlockSize) - bid*iolimits.BlockSize
|
|
348
|
-
return
|
|
349
|
-
}
|
|
350
|
-
|
|
351
|
-
func max(a, b int64) int64 {
|
|
352
|
-
if a > b {
|
|
353
|
-
return a
|
|
354
|
-
}
|
|
355
|
-
return b
|
|
356
|
-
}
|
|
357
|
-
|
|
358
|
-
func min(a, b int64) int64 {
|
|
359
|
-
if a < b {
|
|
360
|
-
return a
|
|
361
|
-
}
|
|
362
|
-
return b
|
|
363
|
-
}
|
|
@@ -1,135 +0,0 @@
|
|
|
1
|
-
// Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
|
|
2
|
-
// SPDX-License-Identifier: Apache-2.0
|
|
3
|
-
|
|
4
|
-
package tarfile
|
|
5
|
-
|
|
6
|
-
import (
|
|
7
|
-
"archive/tar"
|
|
8
|
-
"cdk-ecr-deployment-handler/internal/iolimits"
|
|
9
|
-
"context"
|
|
10
|
-
"fmt"
|
|
11
|
-
"io"
|
|
12
|
-
"log"
|
|
13
|
-
"testing"
|
|
14
|
-
|
|
15
|
-
"github.com/aws/aws-sdk-go-v2/config"
|
|
16
|
-
"github.com/stretchr/testify/assert"
|
|
17
|
-
)
|
|
18
|
-
|
|
19
|
-
func TestNewS3File(t *testing.T) {
|
|
20
|
-
t.Skip()
|
|
21
|
-
cfg, err := config.LoadDefaultConfig(context.TODO())
|
|
22
|
-
assert.NoError(t, err)
|
|
23
|
-
|
|
24
|
-
s3uri, _ := ParseS3Uri("s3://cdk-ecr-deployment/nginx.tar")
|
|
25
|
-
|
|
26
|
-
f, err := NewS3File(cfg, *s3uri)
|
|
27
|
-
assert.NoError(t, err)
|
|
28
|
-
|
|
29
|
-
log.Printf("file size: %d", f.Size())
|
|
30
|
-
|
|
31
|
-
tr := tar.NewReader(f)
|
|
32
|
-
for {
|
|
33
|
-
hdr, err := tr.Next()
|
|
34
|
-
if err == io.EOF {
|
|
35
|
-
break // End of archive
|
|
36
|
-
}
|
|
37
|
-
if err != nil {
|
|
38
|
-
log.Fatal(err)
|
|
39
|
-
}
|
|
40
|
-
fmt.Printf("%s\n", hdr.Name)
|
|
41
|
-
}
|
|
42
|
-
}
|
|
43
|
-
|
|
44
|
-
func TestBlockAddressTranslation(t *testing.T) {
|
|
45
|
-
begin := int64(iolimits.BlockSize - iolimits.MegaByte)
|
|
46
|
-
end := int64(3*iolimits.BlockSize - iolimits.MegaByte)
|
|
47
|
-
|
|
48
|
-
b, e := blockAddressTranslation(begin, end, 0)
|
|
49
|
-
assert.Equal(t, begin, b)
|
|
50
|
-
assert.Equal(t, int64(iolimits.BlockSize), e)
|
|
51
|
-
|
|
52
|
-
b, e = blockAddressTranslation(begin, end, 1)
|
|
53
|
-
assert.Equal(t, int64(0), b)
|
|
54
|
-
assert.Equal(t, int64(iolimits.BlockSize), e)
|
|
55
|
-
|
|
56
|
-
b, e = blockAddressTranslation(begin, end, 2)
|
|
57
|
-
assert.Equal(t, int64(0), b)
|
|
58
|
-
assert.Equal(t, int64(iolimits.BlockSize-iolimits.MegaByte), e)
|
|
59
|
-
}
|
|
60
|
-
|
|
61
|
-
func TestBlockCache(t *testing.T) {
|
|
62
|
-
n := 0
|
|
63
|
-
cache := NewBlockCache(1)
|
|
64
|
-
cacheMissFn := func(block *Block) error {
|
|
65
|
-
n++
|
|
66
|
-
copy(block.Buf, magic(block.Id))
|
|
67
|
-
return nil
|
|
68
|
-
}
|
|
69
|
-
|
|
70
|
-
// read 0-3 bytes of block0
|
|
71
|
-
buf, err := cache.Read(0, 3, cacheMissFn)
|
|
72
|
-
assert.NoError(t, err)
|
|
73
|
-
assert.Equal(t, 1, n)
|
|
74
|
-
assert.Equal(t, magic(0), buf)
|
|
75
|
-
|
|
76
|
-
// read 0-3 bytes of block0's cache
|
|
77
|
-
buf, err = cache.Read(0, 3, cacheMissFn)
|
|
78
|
-
assert.NoError(t, err)
|
|
79
|
-
assert.Equal(t, 1, n)
|
|
80
|
-
assert.Equal(t, magic(0), buf)
|
|
81
|
-
|
|
82
|
-
// read 0-3 bytes of block1
|
|
83
|
-
buf, err = cache.Read(iolimits.BlockSize, iolimits.BlockSize+3, cacheMissFn)
|
|
84
|
-
assert.NoError(t, err)
|
|
85
|
-
assert.Equal(t, 2, n)
|
|
86
|
-
assert.Equal(t, magic(1), buf)
|
|
87
|
-
|
|
88
|
-
// read whole block1 and 0-3 bytes of block2
|
|
89
|
-
buf, err = cache.Read(0, iolimits.BlockSize+3, cacheMissFn)
|
|
90
|
-
assert.NoError(t, err)
|
|
91
|
-
assert.Equal(t, 4, n)
|
|
92
|
-
assert.Equal(t, append(mkblk(magic(0)), magic(1)...), buf)
|
|
93
|
-
}
|
|
94
|
-
|
|
95
|
-
func TestLRUBlockPool(t *testing.T) {
|
|
96
|
-
n := 0
|
|
97
|
-
pool := NewLRUBlockPool(1)
|
|
98
|
-
blockInitFn := func(block *Block) error {
|
|
99
|
-
n++
|
|
100
|
-
return nil
|
|
101
|
-
}
|
|
102
|
-
|
|
103
|
-
block, err := pool.GetBlock(0, blockInitFn)
|
|
104
|
-
assert.NoError(t, err)
|
|
105
|
-
assert.Equal(t, 1, n)
|
|
106
|
-
assert.Equal(t, int64(0), block.Id)
|
|
107
|
-
assert.Equal(t, iolimits.BlockSize, block.Size())
|
|
108
|
-
block.Buf[0] = byte('A')
|
|
109
|
-
|
|
110
|
-
block, err = pool.GetBlock(1, blockInitFn)
|
|
111
|
-
assert.NoError(t, err)
|
|
112
|
-
assert.Equal(t, 2, n)
|
|
113
|
-
assert.Equal(t, int64(1), block.Id)
|
|
114
|
-
assert.Equal(t, iolimits.BlockSize, block.Size())
|
|
115
|
-
assert.Equal(t, byte('A'), block.Buf[0])
|
|
116
|
-
block.Buf[0] = byte('B')
|
|
117
|
-
|
|
118
|
-
block, err = pool.GetBlock(1, blockInitFn)
|
|
119
|
-
assert.NoError(t, err)
|
|
120
|
-
assert.Equal(t, 2, n)
|
|
121
|
-
assert.Equal(t, int64(1), block.Id)
|
|
122
|
-
assert.Equal(t, iolimits.BlockSize, block.Size())
|
|
123
|
-
assert.Equal(t, byte('B'), block.Buf[0])
|
|
124
|
-
}
|
|
125
|
-
|
|
126
|
-
// Create magic bytes based on seed: [seed-1, seed, seed+1]
|
|
127
|
-
func magic(seed int64) []byte {
|
|
128
|
-
return []byte{byte(seed - 1), byte(seed), byte(seed + 1)}
|
|
129
|
-
}
|
|
130
|
-
|
|
131
|
-
func mkblk(init []byte) []byte {
|
|
132
|
-
block := make([]byte, iolimits.BlockSize)
|
|
133
|
-
copy(block[0:len(init)], init)
|
|
134
|
-
return block
|
|
135
|
-
}
|