@appthreat/caxa 1.0.13 → 2.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/stubs/stub.go CHANGED
@@ -3,7 +3,6 @@ package main
3
3
  import (
4
4
  "archive/tar"
5
5
  "bytes"
6
- "compress/gzip"
7
6
  "context"
8
7
  "encoding/json"
9
8
  "errors"
@@ -15,284 +14,271 @@ import (
15
14
  "path"
16
15
  "path/filepath"
17
16
  "regexp"
17
+ "runtime"
18
18
  "strconv"
19
19
  "strings"
20
+ "sync"
20
21
  "time"
22
+
23
+ "github.com/klauspost/compress/gzip"
21
24
  )
22
25
 
26
+ type Config struct {
27
+ Identifier string `json:"identifier"`
28
+ Command []string `json:"command"`
29
+ UncompressionMessage string `json:"uncompressionMessage"`
30
+ }
31
+
32
+ type fileJob struct {
33
+ dest string
34
+ data []byte
35
+ mode int64
36
+ }
37
+
38
+ const maxBufferSize = 1 * 1024 * 1024
39
+
23
40
  func main() {
24
- executableFile, err := os.Executable()
41
+ exePath, err := os.Executable()
25
42
  if err != nil {
26
- log.Fatalf("caxa stub: Failed to find executable: %v", err)
43
+ log.Fatalf("caxa: failed to find executable: %v", err)
27
44
  }
28
45
 
29
- executable, err := os.ReadFile(executableFile)
46
+ data, err := os.ReadFile(exePath)
30
47
  if err != nil {
31
- log.Fatalf("caxa stub: Failed to read executable: %v", err)
48
+ log.Fatalf("caxa: failed to read executable: %v", err)
32
49
  }
33
50
 
34
- footerSeparator := []byte("\n")
35
- footerIndex := bytes.LastIndex(executable, footerSeparator)
36
- if footerIndex == -1 {
37
- log.Fatalf("caxa stub: Failed to find footer (did you append an archive and a footer to the stub?): %v", err)
38
- }
39
- footerString := executable[footerIndex+len(footerSeparator):]
40
- var footer struct {
41
- Identifier string `json:"identifier"`
42
- Command []string `json:"command"`
43
- UncompressionMessage string `json:"uncompressionMessage"`
51
+ config, payload, err := parseBinary(data)
52
+ if err != nil {
53
+ log.Fatalf("caxa: binary corrupted: %v", err)
44
54
  }
45
- if err := json.Unmarshal(footerString, &footer); err != nil {
46
- log.Fatalf("caxa stub: Failed to parse JSON in footer: %v", err)
55
+
56
+ appDir, err := prepareApplication(config, payload)
57
+ if err != nil {
58
+ log.Fatalf("caxa: failed to prepare application: %v", err)
47
59
  }
48
60
 
49
- var applicationDirectory string
50
- for extractionAttempt := 0; true; extractionAttempt++ {
51
- lock := path.Join(os.TempDir(), "caxa", "locks", footer.Identifier, strconv.Itoa(extractionAttempt))
52
- applicationDirectory = path.Join(os.TempDir(), "caxa", "applications", footer.Identifier, strconv.Itoa(extractionAttempt))
53
- applicationDirectoryFileInfo, err := os.Stat(applicationDirectory)
54
- if err != nil && !errors.Is(err, os.ErrNotExist) {
55
- log.Fatalf("caxa stub: Failed to find information about the application directory: %v", err)
56
- }
57
- if err == nil && !applicationDirectoryFileInfo.IsDir() {
58
- log.Fatalf("caxa stub: Path to application directory already exists and isn’t a directory: %v", err)
61
+ if err := run(config, appDir); err != nil {
62
+ var exitErr *exec.ExitError
63
+ if errors.As(err, &exitErr) {
64
+ os.Exit(exitErr.ExitCode())
59
65
  }
60
- if err == nil && applicationDirectoryFileInfo.IsDir() {
61
- lockFileInfo, err := os.Stat(lock)
62
- if err != nil && !errors.Is(err, os.ErrNotExist) {
63
- log.Fatalf("caxa stub: Failed to find information about the lock: %v", err)
64
- }
65
- if err == nil && !lockFileInfo.IsDir() {
66
- log.Fatalf("caxa stub: Path to lock already exists and isn’t a directory: %v", err)
67
- }
68
- if err == nil && lockFileInfo.IsDir() {
69
- // Application directory exists and lock exists as well, so a previous extraction wasn’t successful or an extraction is happening right now and hasn’t finished yet, in either case, start over with a fresh name.
70
- continue
71
- }
72
- if err != nil && errors.Is(err, os.ErrNotExist) {
73
- // Application directory exists and lock doesn’t exist, so a previous extraction was successful. Use the cached version of the application directory and don’t extract again.
74
- break
75
- }
76
- }
77
- if err != nil && errors.Is(err, os.ErrNotExist) {
78
- ctx, cancelCtx := context.WithCancel(context.Background())
79
- if footer.UncompressionMessage != "" {
80
- fmt.Fprint(os.Stderr, footer.UncompressionMessage)
81
- go func() {
82
- ticker := time.NewTicker(time.Second * 5)
83
- defer ticker.Stop()
84
- for {
85
- select {
86
- case <-ticker.C:
87
- fmt.Fprint(os.Stderr, ".")
88
- case <-ctx.Done():
89
- fmt.Fprintln(os.Stderr, "")
90
- return
91
- }
92
- }
93
- }()
94
- }
66
+ log.Fatalf("caxa: execution failed: %v", err)
67
+ }
68
+ }
95
69
 
96
- if err := os.MkdirAll(lock, 0755); err != nil {
97
- log.Fatalf("caxa stub: Failed to create the lock directory: %v", err)
98
- }
70
+ func parseBinary(data []byte) (*Config, []byte, error) {
71
+ footerSep := []byte("\n")
72
+ footerIdx := bytes.LastIndex(data, footerSep)
73
+ if footerIdx == -1 {
74
+ return nil, nil, errors.New("footer not found")
75
+ }
99
76
 
100
- // The use of ‘Repeat’ below is to make it even more improbable that the separator will appear literally in the compiled stub.
101
- archiveSeparator := []byte("\n" + strings.Repeat("CAXA", 3) + "\n")
102
- archiveIndex := bytes.Index(executable, archiveSeparator)
103
- if archiveIndex == -1 {
104
- log.Fatalf("caxa stub: Failed to find archive (did you append the separator when building the stub?): %v", err)
105
- }
106
- archive := executable[archiveIndex+len(archiveSeparator) : footerIndex]
77
+ var config Config
78
+ if err := json.Unmarshal(data[footerIdx+1:], &config); err != nil {
79
+ return nil, nil, fmt.Errorf("invalid footer json: %w", err)
80
+ }
81
+
82
+ archiveSep := []byte("\nCAXACAXACAXA\n")
83
+ archiveIdx := bytes.Index(data, archiveSep)
84
+ if archiveIdx == -1 {
85
+ return nil, nil, errors.New("archive separator not found")
86
+ }
87
+
88
+ payload := data[archiveIdx+len(archiveSep) : footerIdx]
89
+ return &config, payload, nil
90
+ }
107
91
 
108
- if err := Untar(bytes.NewReader(archive), applicationDirectory); err != nil {
109
- log.Fatalf("caxa stub: Failed to uncompress archive: %v", err)
92
+ func prepareApplication(config *Config, payload []byte) (string, error) {
93
+ tempDir := os.Getenv("CAXA_TEMP_DIR")
94
+ if tempDir == "" {
95
+ tempDir = path.Join(os.TempDir(), "caxa")
96
+ }
97
+
98
+ for attempt := 0; ; attempt++ {
99
+ id := config.Identifier
100
+ sAttempt := strconv.Itoa(attempt)
101
+
102
+ appDir := path.Join(tempDir, "apps", id, sAttempt)
103
+ lockDir := path.Join(tempDir, "locks", id, sAttempt)
104
+
105
+ if info, err := os.Stat(appDir); err == nil && info.IsDir() {
106
+ if _, err := os.Stat(lockDir); os.IsNotExist(err) {
107
+ return appDir, nil
110
108
  }
109
+ continue
110
+ }
111
111
 
112
- os.Remove(lock)
112
+ if err := os.MkdirAll(lockDir, 0755); err != nil {
113
+ return "", fmt.Errorf("failed to create lock: %w", err)
114
+ }
113
115
 
114
- cancelCtx()
115
- break
116
+ ctx, cancel := context.WithCancel(context.Background())
117
+ if config.UncompressionMessage != "" {
118
+ fmt.Fprint(os.Stderr, config.UncompressionMessage)
119
+ go func() {
120
+ t := time.NewTicker(2 * time.Second)
121
+ defer t.Stop()
122
+ for {
123
+ select {
124
+ case <-t.C:
125
+ fmt.Fprint(os.Stderr, ".")
126
+ case <-ctx.Done():
127
+ fmt.Fprintln(os.Stderr, "")
128
+ return
129
+ }
130
+ }
131
+ }()
116
132
  }
117
- }
118
133
 
119
- expandedCommand := make([]string, len(footer.Command))
120
- applicationDirectoryPlaceholderRegexp := regexp.MustCompile(`\{\{\s*caxa\s*\}\}`)
121
- for key, commandPart := range footer.Command {
122
- expandedCommand[key] = applicationDirectoryPlaceholderRegexp.ReplaceAllLiteralString(commandPart, applicationDirectory)
123
- }
134
+ if err := extract(payload, appDir); err != nil {
135
+ cancel()
136
+ os.RemoveAll(appDir)
137
+ os.RemoveAll(lockDir)
138
+ return "", err
139
+ }
124
140
 
125
- command := exec.Command(expandedCommand[0], append(expandedCommand[1:], os.Args[1:]...)...)
126
- command.Stdin = os.Stdin
127
- command.Stdout = os.Stdout
128
- command.Stderr = os.Stderr
129
- err = command.Run()
130
- var exitError *exec.ExitError
131
- if errors.As(err, &exitError) {
132
- os.Exit(exitError.ExitCode())
133
- } else if err != nil {
134
- log.Fatalf("caxa stub: Failed to run command: %v", err)
141
+ os.RemoveAll(lockDir)
142
+ cancel()
143
+ return appDir, nil
135
144
  }
136
145
  }
137
146
 
138
- // Adapted from https://github.com/golang/build/blob/db2c93053bcd6b944723c262828c90af91b0477a/internal/untar/untar.go and https://github.com/mholt/archiver/tree/v3.5.0
139
-
140
- // Copyright 2017 The Go Authors. All rights reserved.
141
- // Use of this source code is governed by a BSD-style
142
- // license that can be found in the LICENSE file.
143
-
144
- // Package untar untars a tarball to disk.
145
- // package untar
146
-
147
- // import (
148
- // "archive/tar"
149
- // "compress/gzip"
150
- // "fmt"
151
- // "io"
152
- // "log"
153
- // "os"
154
- // "path"
155
- // "path/filepath"
156
- // "strings"
157
- // "time"
158
- // )
159
-
160
- // TODO(bradfitz): this was copied from x/build/cmd/buildlet/buildlet.go
161
- // but there were some buildlet-specific bits in there, so the code is
162
- // forked for now. Unfork and add some opts arguments here, so the
163
- // buildlet can use this code somehow.
164
-
165
- // Untar reads the gzip-compressed tar file from r and writes it into dir.
166
- func Untar(r io.Reader, dir string) error {
167
- return untar(r, dir)
168
- }
169
-
170
- func untar(r io.Reader, dir string) (err error) {
171
- t0 := time.Now()
172
- nFiles := 0
173
- madeDir := map[string]bool{}
174
- // defer func() {
175
- // td := time.Since(t0)
176
- // if err == nil {
177
- // log.Printf("extracted tarball into %s: %d files, %d dirs (%v)", dir, nFiles, len(madeDir), td)
178
- // } else {
179
- // log.Printf("error extracting tarball into %s after %d files, %d dirs, %v: %v", dir, nFiles, len(madeDir), td, err)
180
- // }
181
- // }()
182
- zr, err := gzip.NewReader(r)
147
+ func extract(payload []byte, dest string) error {
148
+ gr, err := gzip.NewReader(bytes.NewReader(payload))
183
149
  if err != nil {
184
- return fmt.Errorf("requires gzip-compressed body: %v", err)
150
+ return err
151
+ }
152
+ defer gr.Close()
153
+
154
+ tr := tar.NewReader(gr)
155
+
156
+ numWorkers := runtime.NumCPU()
157
+ jobs := make(chan fileJob, numWorkers*2)
158
+ errChan := make(chan error, numWorkers)
159
+ var wg sync.WaitGroup
160
+
161
+ for i := 0; i < numWorkers; i++ {
162
+ wg.Add(1)
163
+ go func() {
164
+ defer wg.Done()
165
+ for job := range jobs {
166
+ if err := os.MkdirAll(filepath.Dir(job.dest), 0755); err != nil {
167
+ select {
168
+ case errChan <- err:
169
+ default:
170
+ }
171
+ return
172
+ }
173
+ if err := os.WriteFile(job.dest, job.data, os.FileMode(job.mode)); err != nil {
174
+ select {
175
+ case errChan <- err:
176
+ default:
177
+ }
178
+ return
179
+ }
180
+ }
181
+ }()
185
182
  }
186
- tr := tar.NewReader(zr)
187
- loggedChtimesError := false
183
+
188
184
  for {
189
- f, err := tr.Next()
185
+ select {
186
+ case err := <-errChan:
187
+ close(jobs)
188
+ return err
189
+ default:
190
+ }
191
+
192
+ header, err := tr.Next()
190
193
  if err == io.EOF {
191
194
  break
192
195
  }
193
196
  if err != nil {
194
- // log.Printf("tar reading error: %v", err)
195
- return fmt.Errorf("tar error: %v", err)
197
+ close(jobs)
198
+ return err
196
199
  }
197
- if !validRelPath(f.Name) {
198
- return fmt.Errorf("tar contained invalid name error %q", f.Name)
200
+
201
+ target := filepath.Join(dest, filepath.FromSlash(header.Name))
202
+
203
+ if !strings.HasPrefix(target, filepath.Clean(dest)+string(os.PathSeparator)) {
204
+ close(jobs)
205
+ return fmt.Errorf("illegal file path: %s", header.Name)
199
206
  }
200
- rel := filepath.FromSlash(f.Name)
201
- abs := filepath.Join(dir, rel)
202
-
203
- fi := f.FileInfo()
204
- mode := fi.Mode()
205
- switch {
206
- case mode.IsRegular():
207
- // Make the directory. This is redundant because it should
208
- // already be made by a directory entry in the tar
209
- // beforehand. Thus, don't check for errors; the next
210
- // write will fail with the same error.
211
- dir := filepath.Dir(abs)
212
- if !madeDir[dir] {
213
- if err := os.MkdirAll(filepath.Dir(abs), 0755); err != nil {
214
- return err
215
- }
216
- madeDir[dir] = true
217
- }
218
- wf, err := os.OpenFile(abs, os.O_RDWR|os.O_CREATE|os.O_TRUNC, mode.Perm())
219
- if err != nil {
207
+
208
+ switch header.Typeflag {
209
+ case tar.TypeDir:
210
+ if err := os.MkdirAll(target, 0755); err != nil {
211
+ close(jobs)
220
212
  return err
221
213
  }
222
- n, err := io.Copy(wf, tr)
223
- if closeErr := wf.Close(); closeErr != nil && err == nil {
224
- err = closeErr
225
- }
226
- if err != nil {
227
- return fmt.Errorf("error writing to %s: %v", abs, err)
228
- }
229
- if n != f.Size {
230
- return fmt.Errorf("only wrote %d bytes to %s; expected %d", n, abs, f.Size)
231
- }
232
- modTime := f.ModTime
233
- if modTime.After(t0) {
234
- // Clamp modtimes at system time. See
235
- // golang.org/issue/19062 when clock on
236
- // buildlet was behind the gitmirror server
237
- // doing the git-archive.
238
- modTime = t0
239
- }
240
- if !modTime.IsZero() {
241
- if err := os.Chtimes(abs, modTime, modTime); err != nil && !loggedChtimesError {
242
- // benign error. Gerrit doesn't even set the
243
- // modtime in these, and we don't end up relying
244
- // on it anywhere (the gomote push command relies
245
- // on digests only), so this is a little pointless
246
- // for now.
247
- // log.Printf("error changing modtime: %v (further Chtimes errors suppressed)", err)
248
- loggedChtimesError = true // once is enough
214
+ case tar.TypeReg:
215
+ if header.Size < maxBufferSize {
216
+ buf := make([]byte, header.Size)
217
+ if _, err := io.ReadFull(tr, buf); err != nil {
218
+ close(jobs)
219
+ return err
249
220
  }
250
- }
251
- nFiles++
252
- case mode.IsDir():
253
- if err := os.MkdirAll(abs, 0755); err != nil {
254
- return err
255
- }
256
- madeDir[abs] = true
257
- case f.Typeflag == tar.TypeSymlink:
258
- // leafac: Added by me to support symbolic links. Adapted from https://github.com/mholt/archiver/blob/v3.5.0/tar.go#L254-L276 and https://github.com/mholt/archiver/blob/v3.5.0/archiver.go#L313-L332
259
- err := os.MkdirAll(filepath.Dir(abs), 0755)
260
- if err != nil {
261
- return fmt.Errorf("%s: making directory for file: %v", abs, err)
262
- }
263
- _, err = os.Lstat(abs)
264
- if err == nil {
265
- err = os.Remove(abs)
221
+ jobs <- fileJob{dest: target, data: buf, mode: header.Mode}
222
+ } else {
223
+ if err := os.MkdirAll(filepath.Dir(target), 0755); err != nil {
224
+ close(jobs)
225
+ return err
226
+ }
227
+ f, err := os.OpenFile(target, os.O_CREATE|os.O_RDWR, os.FileMode(header.Mode))
266
228
  if err != nil {
267
- return fmt.Errorf("%s: failed to unlink: %+v", abs, err)
229
+ close(jobs)
230
+ return err
231
+ }
232
+ if _, err := io.Copy(f, tr); err != nil {
233
+ f.Close()
234
+ close(jobs)
235
+ return err
268
236
  }
237
+ f.Close()
269
238
  }
270
-
271
- err = os.Symlink(f.Linkname, abs)
272
- if err != nil {
273
- return fmt.Errorf("%s: making symbolic link for: %v", abs, err)
239
+ case tar.TypeSymlink:
240
+ if err := os.MkdirAll(filepath.Dir(target), 0755); err != nil {
241
+ close(jobs)
242
+ return err
243
+ }
244
+ _ = os.Remove(target)
245
+ if err := os.Symlink(header.Linkname, target); err != nil {
246
+ close(jobs)
247
+ return err
274
248
  }
275
- default:
276
- return fmt.Errorf("tar file entry %s contained unsupported file type %v", f.Name, mode)
277
249
  }
278
250
  }
279
- return nil
251
+
252
+ close(jobs)
253
+ wg.Wait()
254
+
255
+ select {
256
+ case err := <-errChan:
257
+ return err
258
+ default:
259
+ return nil
260
+ }
280
261
  }
281
262
 
282
- func validRelativeDir(dir string) bool {
283
- if strings.Contains(dir, `\`) || path.IsAbs(dir) {
284
- return false
263
+ func run(config *Config, appDir string) error {
264
+ args := make([]string, len(config.Command))
265
+ rx := regexp.MustCompile(`\{\{\s*caxa\s*\}\}`)
266
+
267
+ for i, part := range config.Command {
268
+ args[i] = rx.ReplaceAllLiteralString(part, appDir)
285
269
  }
286
- dir = path.Clean(dir)
287
- if strings.HasPrefix(dir, "../") || strings.HasSuffix(dir, "/..") || dir == ".." {
288
- return false
270
+
271
+ if len(os.Args) > 1 {
272
+ args = append(args, os.Args[1:]...)
289
273
  }
290
- return true
291
- }
292
274
 
293
- func validRelPath(p string) bool {
294
- if p == "" || strings.Contains(p, `\`) || strings.HasPrefix(p, "/") || strings.Contains(p, "../") {
295
- return false
275
+ if len(args) == 0 {
276
+ return errors.New("no command defined")
296
277
  }
297
- return true
298
- }
278
+
279
+ cmd := exec.Command(args[0], args[1:]...)
280
+ cmd.Stdin = os.Stdin
281
+ cmd.Stdout = os.Stdout
282
+ cmd.Stderr = os.Stderr
283
+ return cmd.Run()
284
+ }
@@ -0,0 +1,144 @@
1
+ package main
2
+
3
+ import (
4
+ "archive/tar"
5
+ "bytes"
6
+ "compress/gzip"
7
+ "crypto/rand"
8
+ "encoding/json"
9
+ "os"
10
+ "path/filepath"
11
+ "strings"
12
+ "testing"
13
+ )
14
+
15
+ func createMockPayload(files map[string][]byte) ([]byte, error) {
16
+ var buf bytes.Buffer
17
+ gw := gzip.NewWriter(&buf)
18
+ tw := tar.NewWriter(gw)
19
+
20
+ for name, content := range files {
21
+ header := &tar.Header{
22
+ Name: name,
23
+ Mode: 0600,
24
+ Size: int64(len(content)),
25
+ }
26
+ if err := tw.WriteHeader(header); err != nil {
27
+ return nil, err
28
+ }
29
+ if _, err := tw.Write(content); err != nil {
30
+ return nil, err
31
+ }
32
+ }
33
+
34
+ if err := tw.Close(); err != nil {
35
+ return nil, err
36
+ }
37
+ if err := gw.Close(); err != nil {
38
+ return nil, err
39
+ }
40
+
41
+ return buf.Bytes(), nil
42
+ }
43
+
44
+ func TestParseBinary(t *testing.T) {
45
+ payloadData := []byte("mock-compressed-data")
46
+ separator := []byte("\nCAXACAXACAXA\n")
47
+
48
+ config := Config{
49
+ Identifier: "test-id",
50
+ Command: []string{"node", "index.js"},
51
+ }
52
+ configData, _ := json.Marshal(config)
53
+
54
+ var binaryBuilder bytes.Buffer
55
+ binaryBuilder.Write([]byte("some-binary-code-here"))
56
+ binaryBuilder.Write(separator)
57
+ binaryBuilder.Write(payloadData)
58
+ binaryBuilder.Write([]byte("\n"))
59
+ binaryBuilder.Write(configData)
60
+
61
+ parsedConfig, parsedPayload, err := parseBinary(binaryBuilder.Bytes())
62
+ if err != nil {
63
+ t.Fatalf("parseBinary failed: %v", err)
64
+ }
65
+
66
+ if parsedConfig.Identifier != "test-id" {
67
+ t.Errorf("Expected identifier 'test-id', got '%s'", parsedConfig.Identifier)
68
+ }
69
+ if !bytes.Equal(parsedPayload, payloadData) {
70
+ t.Errorf("Payload mismatch")
71
+ }
72
+ }
73
+
74
+ func TestExtract_Parallel_And_LargeFiles(t *testing.T) {
75
+ smallContent := []byte("small-file")
76
+
77
+ largeSize := 1024 * 1024 + 100
78
+ largeContent := make([]byte, largeSize)
79
+ rand.Read(largeContent)
80
+
81
+ files := map[string][]byte{
82
+ "small.txt": smallContent,
83
+ "subdir/test.txt": smallContent,
84
+ "large.bin": largeContent,
85
+ }
86
+
87
+ payload, err := createMockPayload(files)
88
+ if err != nil {
89
+ t.Fatalf("Failed to create mock payload: %v", err)
90
+ }
91
+
92
+ destDir, err := os.MkdirTemp("", "caxa-test-*")
93
+ if err != nil {
94
+ t.Fatalf("Failed to create temp dir: %v", err)
95
+ }
96
+ defer os.RemoveAll(destDir)
97
+
98
+ if err := extract(payload, destDir); err != nil {
99
+ t.Fatalf("extract failed: %v", err)
100
+ }
101
+
102
+ checkFile := func(path string, expected []byte) {
103
+ content, err := os.ReadFile(filepath.Join(destDir, path))
104
+ if err != nil {
105
+ t.Errorf("Failed to read extracted file %s: %v", path, err)
106
+ return
107
+ }
108
+ if !bytes.Equal(content, expected) {
109
+ t.Errorf("Content mismatch for %s", path)
110
+ }
111
+ }
112
+
113
+ checkFile("small.txt", smallContent)
114
+ checkFile("subdir/test.txt", smallContent)
115
+ checkFile("large.bin", largeContent)
116
+ }
117
+
118
+ func TestExtract_ZipSlip_Security(t *testing.T) {
119
+ var buf bytes.Buffer
120
+ gw := gzip.NewWriter(&buf)
121
+ tw := tar.NewWriter(gw)
122
+
123
+ header := &tar.Header{
124
+ Name: "../../../etc/passwd",
125
+ Mode: 0600,
126
+ Size: int64(4),
127
+ }
128
+ tw.WriteHeader(header)
129
+ tw.Write([]byte("root"))
130
+ tw.Close()
131
+ gw.Close()
132
+
133
+ destDir, _ := os.MkdirTemp("", "caxa-security-test-*")
134
+ defer os.RemoveAll(destDir)
135
+
136
+ err := extract(buf.Bytes(), destDir)
137
+ if err == nil {
138
+ t.Fatal("Expected extract to fail on ZipSlip attempt, but it succeeded")
139
+ }
140
+
141
+ if !strings.Contains(err.Error(), "illegal file path") {
142
+ t.Errorf("Expected 'illegal file path' error, got: %v", err)
143
+ }
144
+ }
package/tsconfig.json ADDED
@@ -0,0 +1,28 @@
1
+ {
2
+ "compilerOptions": {
3
+ "rootDir": "source",
4
+ "outDir": "build",
5
+
6
+ "target": "ES2022",
7
+ "module": "NodeNext",
8
+ "moduleResolution": "NodeNext",
9
+ "moduleDetection": "force",
10
+
11
+ "lib": ["ESNext"],
12
+ "esModuleInterop": true,
13
+ "skipLibCheck": true,
14
+
15
+ "strict": true,
16
+ "forceConsistentCasingInFileNames": true,
17
+ "noImplicitReturns": true,
18
+ "noFallthroughCasesInSwitch": true,
19
+ "verbatimModuleSyntax": true,
20
+
21
+ "declaration": true,
22
+ "declarationMap": true,
23
+ "sourceMap": true,
24
+ "removeComments": false
25
+ },
26
+ "include": ["source/**/*"],
27
+ "exclude": ["node_modules", "build", "stubs"]
28
+ }