@actions/artifact 0.4.2 → 0.6.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (48) hide show
  1. package/LICENSE.md +8 -8
  2. package/README.md +213 -213
  3. package/lib/artifact-client.d.ts +10 -10
  4. package/lib/artifact-client.js +10 -10
  5. package/lib/internal/artifact-client.d.ts +41 -41
  6. package/lib/internal/artifact-client.js +164 -148
  7. package/lib/internal/artifact-client.js.map +1 -1
  8. package/lib/internal/config-variables.d.ts +11 -11
  9. package/lib/internal/config-variables.js +70 -70
  10. package/lib/internal/contracts.d.ts +67 -57
  11. package/lib/internal/contracts.js +2 -2
  12. package/lib/internal/download-http-client.d.ts +39 -39
  13. package/lib/internal/download-http-client.js +271 -281
  14. package/lib/internal/download-http-client.js.map +1 -1
  15. package/lib/internal/download-options.d.ts +7 -7
  16. package/lib/internal/download-options.js +2 -2
  17. package/lib/internal/download-response.d.ts +10 -10
  18. package/lib/internal/download-response.js +2 -2
  19. package/lib/internal/download-specification.d.ts +19 -19
  20. package/lib/internal/download-specification.js +60 -60
  21. package/lib/internal/http-manager.d.ts +12 -12
  22. package/lib/internal/http-manager.js +30 -30
  23. package/lib/internal/path-and-artifact-name-validation.d.ts +8 -0
  24. package/lib/internal/path-and-artifact-name-validation.js +66 -0
  25. package/lib/internal/path-and-artifact-name-validation.js.map +1 -0
  26. package/lib/internal/requestUtils.d.ts +3 -0
  27. package/lib/internal/requestUtils.js +75 -0
  28. package/lib/internal/requestUtils.js.map +1 -0
  29. package/lib/internal/status-reporter.d.ts +21 -22
  30. package/lib/internal/status-reporter.js +50 -63
  31. package/lib/internal/status-reporter.js.map +1 -1
  32. package/lib/internal/upload-gzip.d.ts +14 -14
  33. package/lib/internal/upload-gzip.js +107 -88
  34. package/lib/internal/upload-gzip.js.map +1 -1
  35. package/lib/internal/upload-http-client.d.ts +48 -48
  36. package/lib/internal/upload-http-client.js +395 -376
  37. package/lib/internal/upload-http-client.js.map +1 -1
  38. package/lib/internal/upload-options.d.ts +34 -34
  39. package/lib/internal/upload-options.js +2 -2
  40. package/lib/internal/upload-response.d.ts +19 -19
  41. package/lib/internal/upload-response.js +2 -2
  42. package/lib/internal/upload-specification.d.ts +11 -11
  43. package/lib/internal/upload-specification.js +87 -87
  44. package/lib/internal/upload-specification.js.map +1 -1
  45. package/lib/internal/utils.d.ts +66 -73
  46. package/lib/internal/utils.js +262 -297
  47. package/lib/internal/utils.js.map +1 -1
  48. package/package.json +49 -49
@@ -1,377 +1,396 @@
1
- "use strict";
2
- var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, generator) {
3
- function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); }
4
- return new (P || (P = Promise))(function (resolve, reject) {
5
- function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } }
6
- function rejected(value) { try { step(generator["throw"](value)); } catch (e) { reject(e); } }
7
- function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); }
8
- step((generator = generator.apply(thisArg, _arguments || [])).next());
9
- });
10
- };
11
- var __importStar = (this && this.__importStar) || function (mod) {
12
- if (mod && mod.__esModule) return mod;
13
- var result = {};
14
- if (mod != null) for (var k in mod) if (Object.hasOwnProperty.call(mod, k)) result[k] = mod[k];
15
- result["default"] = mod;
16
- return result;
17
- };
18
- Object.defineProperty(exports, "__esModule", { value: true });
19
- const fs = __importStar(require("fs"));
20
- const core = __importStar(require("@actions/core"));
21
- const tmp = __importStar(require("tmp-promise"));
22
- const stream = __importStar(require("stream"));
23
- const utils_1 = require("./utils");
24
- const config_variables_1 = require("./config-variables");
25
- const util_1 = require("util");
26
- const url_1 = require("url");
27
- const perf_hooks_1 = require("perf_hooks");
28
- const status_reporter_1 = require("./status-reporter");
29
- const http_manager_1 = require("./http-manager");
30
- const upload_gzip_1 = require("./upload-gzip");
31
- const stat = util_1.promisify(fs.stat);
32
- class UploadHttpClient {
33
- constructor() {
34
- this.uploadHttpManager = new http_manager_1.HttpManager(config_variables_1.getUploadFileConcurrency(), '@actions/artifact-upload');
35
- this.statusReporter = new status_reporter_1.StatusReporter(10000);
36
- }
37
- /**
38
- * Creates a file container for the new artifact in the remote blob storage/file service
39
- * @param {string} artifactName Name of the artifact being created
40
- * @returns The response from the Artifact Service if the file container was successfully created
41
- */
42
- createArtifactInFileContainer(artifactName, options) {
43
- return __awaiter(this, void 0, void 0, function* () {
44
- const parameters = {
45
- Type: 'actions_storage',
46
- Name: artifactName
47
- };
48
- // calculate retention period
49
- if (options && options.retentionDays) {
50
- const maxRetentionStr = config_variables_1.getRetentionDays();
51
- parameters.RetentionDays = utils_1.getProperRetention(options.retentionDays, maxRetentionStr);
52
- }
53
- const data = JSON.stringify(parameters, null, 2);
54
- const artifactUrl = utils_1.getArtifactUrl();
55
- // use the first client from the httpManager, `keep-alive` is not used so the connection will close immediately
56
- const client = this.uploadHttpManager.getClient(0);
57
- const headers = utils_1.getUploadHeaders('application/json', false);
58
- const rawResponse = yield client.post(artifactUrl, data, headers);
59
- const body = yield rawResponse.readBody();
60
- if (utils_1.isSuccessStatusCode(rawResponse.message.statusCode) && body) {
61
- return JSON.parse(body);
62
- }
63
- else if (utils_1.isForbiddenStatusCode(rawResponse.message.statusCode)) {
64
- // if a 403 is returned when trying to create a file container, the customer has exceeded
65
- // their storage quota so no new artifact containers can be created
66
- throw new Error(`Artifact storage quota has been hit. Unable to upload any new artifacts`);
67
- }
68
- else {
69
- utils_1.displayHttpDiagnostics(rawResponse);
70
- throw new Error(`Unable to create a container for the artifact ${artifactName} at ${artifactUrl}`);
71
- }
72
- });
73
- }
74
- /**
75
- * Concurrently upload all of the files in chunks
76
- * @param {string} uploadUrl Base Url for the artifact that was created
77
- * @param {SearchResult[]} filesToUpload A list of information about the files being uploaded
78
- * @returns The size of all the files uploaded in bytes
79
- */
80
- uploadArtifactToFileContainer(uploadUrl, filesToUpload, options) {
81
- return __awaiter(this, void 0, void 0, function* () {
82
- const FILE_CONCURRENCY = config_variables_1.getUploadFileConcurrency();
83
- const MAX_CHUNK_SIZE = config_variables_1.getUploadChunkSize();
84
- core.debug(`File Concurrency: ${FILE_CONCURRENCY}, and Chunk Size: ${MAX_CHUNK_SIZE}`);
85
- const parameters = [];
86
- // by default, file uploads will continue if there is an error unless specified differently in the options
87
- let continueOnError = true;
88
- if (options) {
89
- if (options.continueOnError === false) {
90
- continueOnError = false;
91
- }
92
- }
93
- // prepare the necessary parameters to upload all the files
94
- for (const file of filesToUpload) {
95
- const resourceUrl = new url_1.URL(uploadUrl);
96
- resourceUrl.searchParams.append('itemPath', file.uploadFilePath);
97
- parameters.push({
98
- file: file.absoluteFilePath,
99
- resourceUrl: resourceUrl.toString(),
100
- maxChunkSize: MAX_CHUNK_SIZE,
101
- continueOnError
102
- });
103
- }
104
- const parallelUploads = [...new Array(FILE_CONCURRENCY).keys()];
105
- const failedItemsToReport = [];
106
- let currentFile = 0;
107
- let completedFiles = 0;
108
- let uploadFileSize = 0;
109
- let totalFileSize = 0;
110
- let abortPendingFileUploads = false;
111
- this.statusReporter.setTotalNumberOfFilesToProcess(filesToUpload.length);
112
- this.statusReporter.start();
113
- // only allow a certain amount of files to be uploaded at once, this is done to reduce potential errors
114
- yield Promise.all(parallelUploads.map((index) => __awaiter(this, void 0, void 0, function* () {
115
- while (currentFile < filesToUpload.length) {
116
- const currentFileParameters = parameters[currentFile];
117
- currentFile += 1;
118
- if (abortPendingFileUploads) {
119
- failedItemsToReport.push(currentFileParameters.file);
120
- continue;
121
- }
122
- const startTime = perf_hooks_1.performance.now();
123
- const uploadFileResult = yield this.uploadFileAsync(index, currentFileParameters);
124
- if (core.isDebug()) {
125
- core.debug(`File: ${++completedFiles}/${filesToUpload.length}. ${currentFileParameters.file} took ${(perf_hooks_1.performance.now() - startTime).toFixed(3)} milliseconds to finish upload`);
126
- }
127
- uploadFileSize += uploadFileResult.successfulUploadSize;
128
- totalFileSize += uploadFileResult.totalSize;
129
- if (uploadFileResult.isSuccess === false) {
130
- failedItemsToReport.push(currentFileParameters.file);
131
- if (!continueOnError) {
132
- // fail fast
133
- core.error(`aborting artifact upload`);
134
- abortPendingFileUploads = true;
135
- }
136
- }
137
- this.statusReporter.incrementProcessedCount();
138
- }
139
- })));
140
- this.statusReporter.stop();
141
- // done uploading, safety dispose all connections
142
- this.uploadHttpManager.disposeAndReplaceAllClients();
143
- core.info(`Total size of all the files uploaded is ${uploadFileSize} bytes`);
144
- return {
145
- uploadSize: uploadFileSize,
146
- totalSize: totalFileSize,
147
- failedItems: failedItemsToReport
148
- };
149
- });
150
- }
151
- /**
152
- * Asynchronously uploads a file. The file is compressed and uploaded using GZip if it is determined to save space.
153
- * If the upload file is bigger than the max chunk size it will be uploaded via multiple calls
154
- * @param {number} httpClientIndex The index of the httpClient that is being used to make all of the calls
155
- * @param {UploadFileParameters} parameters Information about the file that needs to be uploaded
156
- * @returns The size of the file that was uploaded in bytes along with any failed uploads
157
- */
158
- uploadFileAsync(httpClientIndex, parameters) {
159
- return __awaiter(this, void 0, void 0, function* () {
160
- const totalFileSize = (yield stat(parameters.file)).size;
161
- let offset = 0;
162
- let isUploadSuccessful = true;
163
- let failedChunkSizes = 0;
164
- let uploadFileSize = 0;
165
- let isGzip = true;
166
- // the file that is being uploaded is less than 64k in size, to increase throughput and to minimize disk I/O
167
- // for creating a new GZip file, an in-memory buffer is used for compression
168
- if (totalFileSize < 65536) {
169
- const buffer = yield upload_gzip_1.createGZipFileInBuffer(parameters.file);
170
- //An open stream is needed in the event of a failure and we need to retry. If a NodeJS.ReadableStream is directly passed in,
171
- // it will not properly get reset to the start of the stream if a chunk upload needs to be retried
172
- let openUploadStream;
173
- if (totalFileSize < buffer.byteLength) {
174
- // compression did not help with reducing the size, use a readable stream from the original file for upload
175
- openUploadStream = () => fs.createReadStream(parameters.file);
176
- isGzip = false;
177
- uploadFileSize = totalFileSize;
178
- }
179
- else {
180
- // create a readable stream using a PassThrough stream that is both readable and writable
181
- openUploadStream = () => {
182
- const passThrough = new stream.PassThrough();
183
- passThrough.end(buffer);
184
- return passThrough;
185
- };
186
- uploadFileSize = buffer.byteLength;
187
- }
188
- const result = yield this.uploadChunk(httpClientIndex, parameters.resourceUrl, openUploadStream, 0, uploadFileSize - 1, uploadFileSize, isGzip, totalFileSize);
189
- if (!result) {
190
- // chunk failed to upload
191
- isUploadSuccessful = false;
192
- failedChunkSizes += uploadFileSize;
193
- core.warning(`Aborting upload for ${parameters.file} due to failure`);
194
- }
195
- return {
196
- isSuccess: isUploadSuccessful,
197
- successfulUploadSize: uploadFileSize - failedChunkSizes,
198
- totalSize: totalFileSize
199
- };
200
- }
201
- else {
202
- // the file that is being uploaded is greater than 64k in size, a temporary file gets created on disk using the
203
- // npm tmp-promise package and this file gets used to create a GZipped file
204
- const tempFile = yield tmp.file();
205
- // create a GZip file of the original file being uploaded, the original file should not be modified in any way
206
- uploadFileSize = yield upload_gzip_1.createGZipFileOnDisk(parameters.file, tempFile.path);
207
- let uploadFilePath = tempFile.path;
208
- // compression did not help with size reduction, use the original file for upload and delete the temp GZip file
209
- if (totalFileSize < uploadFileSize) {
210
- uploadFileSize = totalFileSize;
211
- uploadFilePath = parameters.file;
212
- isGzip = false;
213
- }
214
- let abortFileUpload = false;
215
- // upload only a single chunk at a time
216
- while (offset < uploadFileSize) {
217
- const chunkSize = Math.min(uploadFileSize - offset, parameters.maxChunkSize);
218
- // if an individual file is greater than 100MB (1024*1024*100) in size, display extra information about the upload status
219
- if (uploadFileSize > 104857600) {
220
- this.statusReporter.updateLargeFileStatus(parameters.file, offset, uploadFileSize);
221
- }
222
- const start = offset;
223
- const end = offset + chunkSize - 1;
224
- offset += parameters.maxChunkSize;
225
- if (abortFileUpload) {
226
- // if we don't want to continue in the event of an error, any pending upload chunks will be marked as failed
227
- failedChunkSizes += chunkSize;
228
- continue;
229
- }
230
- const result = yield this.uploadChunk(httpClientIndex, parameters.resourceUrl, () => fs.createReadStream(uploadFilePath, {
231
- start,
232
- end,
233
- autoClose: false
234
- }), start, end, uploadFileSize, isGzip, totalFileSize);
235
- if (!result) {
236
- // Chunk failed to upload, report as failed and do not continue uploading any more chunks for the file. It is possible that part of a chunk was
237
- // successfully uploaded so the server may report a different size for what was uploaded
238
- isUploadSuccessful = false;
239
- failedChunkSizes += chunkSize;
240
- core.warning(`Aborting upload for ${parameters.file} due to failure`);
241
- abortFileUpload = true;
242
- }
243
- }
244
- // Delete the temporary file that was created as part of the upload. If the temp file does not get manually deleted by
245
- // calling cleanup, it gets removed when the node process exits. For more info see: https://www.npmjs.com/package/tmp-promise#about
246
- yield tempFile.cleanup();
247
- return {
248
- isSuccess: isUploadSuccessful,
249
- successfulUploadSize: uploadFileSize - failedChunkSizes,
250
- totalSize: totalFileSize
251
- };
252
- }
253
- });
254
- }
255
- /**
256
- * Uploads a chunk of an individual file to the specified resourceUrl. If the upload fails and the status code
257
- * indicates a retryable status, we try to upload the chunk as well
258
- * @param {number} httpClientIndex The index of the httpClient being used to make all the necessary calls
259
- * @param {string} resourceUrl Url of the resource that the chunk will be uploaded to
260
- * @param {NodeJS.ReadableStream} openStream Stream of the file that will be uploaded
261
- * @param {number} start Starting byte index of file that the chunk belongs to
262
- * @param {number} end Ending byte index of file that the chunk belongs to
263
- * @param {number} uploadFileSize Total size of the file in bytes that is being uploaded
264
- * @param {boolean} isGzip Denotes if we are uploading a Gzip compressed stream
265
- * @param {number} totalFileSize Original total size of the file that is being uploaded
266
- * @returns if the chunk was successfully uploaded
267
- */
268
- uploadChunk(httpClientIndex, resourceUrl, openStream, start, end, uploadFileSize, isGzip, totalFileSize) {
269
- return __awaiter(this, void 0, void 0, function* () {
270
- // prepare all the necessary headers before making any http call
271
- const headers = utils_1.getUploadHeaders('application/octet-stream', true, isGzip, totalFileSize, end - start + 1, utils_1.getContentRange(start, end, uploadFileSize));
272
- const uploadChunkRequest = () => __awaiter(this, void 0, void 0, function* () {
273
- const client = this.uploadHttpManager.getClient(httpClientIndex);
274
- return yield client.sendStream('PUT', resourceUrl, openStream(), headers);
275
- });
276
- let retryCount = 0;
277
- const retryLimit = config_variables_1.getRetryLimit();
278
- // Increments the current retry count and then checks if the retry limit has been reached
279
- // If there have been too many retries, fail so the download stops
280
- const incrementAndCheckRetryLimit = (response) => {
281
- retryCount++;
282
- if (retryCount > retryLimit) {
283
- if (response) {
284
- utils_1.displayHttpDiagnostics(response);
285
- }
286
- core.info(`Retry limit has been reached for chunk at offset ${start} to ${resourceUrl}`);
287
- return true;
288
- }
289
- return false;
290
- };
291
- const backOff = (retryAfterValue) => __awaiter(this, void 0, void 0, function* () {
292
- this.uploadHttpManager.disposeAndReplaceClient(httpClientIndex);
293
- if (retryAfterValue) {
294
- core.info(`Backoff due to too many requests, retry #${retryCount}. Waiting for ${retryAfterValue} milliseconds before continuing the upload`);
295
- yield new Promise(resolve => setTimeout(resolve, retryAfterValue));
296
- }
297
- else {
298
- const backoffTime = utils_1.getExponentialRetryTimeInMilliseconds(retryCount);
299
- core.info(`Exponential backoff for retry #${retryCount}. Waiting for ${backoffTime} milliseconds before continuing the upload at offset ${start}`);
300
- yield new Promise(resolve => setTimeout(resolve, backoffTime));
301
- }
302
- core.info(`Finished backoff for retry #${retryCount}, continuing with upload`);
303
- return;
304
- });
305
- // allow for failed chunks to be retried multiple times
306
- while (retryCount <= retryLimit) {
307
- let response;
308
- try {
309
- response = yield uploadChunkRequest();
310
- }
311
- catch (error) {
312
- // if an error is caught, it is usually indicative of a timeout so retry the upload
313
- core.info(`An error has been caught http-client index ${httpClientIndex}, retrying the upload`);
314
- // eslint-disable-next-line no-console
315
- console.log(error);
316
- if (incrementAndCheckRetryLimit()) {
317
- return false;
318
- }
319
- yield backOff();
320
- continue;
321
- }
322
- // Always read the body of the response. There is potential for a resource leak if the body is not read which will
323
- // result in the connection remaining open along with unintended consequences when trying to dispose of the client
324
- yield response.readBody();
325
- if (utils_1.isSuccessStatusCode(response.message.statusCode)) {
326
- return true;
327
- }
328
- else if (utils_1.isRetryableStatusCode(response.message.statusCode)) {
329
- core.info(`A ${response.message.statusCode} status code has been received, will attempt to retry the upload`);
330
- if (incrementAndCheckRetryLimit(response)) {
331
- return false;
332
- }
333
- utils_1.isThrottledStatusCode(response.message.statusCode)
334
- ? yield backOff(utils_1.tryGetRetryAfterValueTimeInMilliseconds(response.message.headers))
335
- : yield backOff();
336
- }
337
- else {
338
- core.error(`Unexpected response. Unable to upload chunk to ${resourceUrl}`);
339
- utils_1.displayHttpDiagnostics(response);
340
- return false;
341
- }
342
- }
343
- return false;
344
- });
345
- }
346
- /**
347
- * Updates the size of the artifact from -1 which was initially set when the container was first created for the artifact.
348
- * Updating the size indicates that we are done uploading all the contents of the artifact
349
- */
350
- patchArtifactSize(size, artifactName) {
351
- return __awaiter(this, void 0, void 0, function* () {
352
- const headers = utils_1.getUploadHeaders('application/json', false);
353
- const resourceUrl = new url_1.URL(utils_1.getArtifactUrl());
354
- resourceUrl.searchParams.append('artifactName', artifactName);
355
- const parameters = { Size: size };
356
- const data = JSON.stringify(parameters, null, 2);
357
- core.debug(`URL is ${resourceUrl.toString()}`);
358
- // use the first client from the httpManager, `keep-alive` is not used so the connection will close immediately
359
- const client = this.uploadHttpManager.getClient(0);
360
- const response = yield client.patch(resourceUrl.toString(), data, headers);
361
- const body = yield response.readBody();
362
- if (utils_1.isSuccessStatusCode(response.message.statusCode)) {
363
- core.debug(`Artifact ${artifactName} has been successfully uploaded, total size in bytes: ${size}`);
364
- }
365
- else if (response.message.statusCode === 404) {
366
- throw new Error(`An Artifact with the name ${artifactName} was not found`);
367
- }
368
- else {
369
- utils_1.displayHttpDiagnostics(response);
370
- core.info(body);
371
- throw new Error(`Unable to finish uploading artifact ${artifactName} to ${resourceUrl}`);
372
- }
373
- });
374
- }
375
- }
376
- exports.UploadHttpClient = UploadHttpClient;
1
+ "use strict";
2
+ var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, generator) {
3
+ function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); }
4
+ return new (P || (P = Promise))(function (resolve, reject) {
5
+ function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } }
6
+ function rejected(value) { try { step(generator["throw"](value)); } catch (e) { reject(e); } }
7
+ function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); }
8
+ step((generator = generator.apply(thisArg, _arguments || [])).next());
9
+ });
10
+ };
11
+ var __importStar = (this && this.__importStar) || function (mod) {
12
+ if (mod && mod.__esModule) return mod;
13
+ var result = {};
14
+ if (mod != null) for (var k in mod) if (Object.hasOwnProperty.call(mod, k)) result[k] = mod[k];
15
+ result["default"] = mod;
16
+ return result;
17
+ };
18
+ Object.defineProperty(exports, "__esModule", { value: true });
19
+ const fs = __importStar(require("fs"));
20
+ const core = __importStar(require("@actions/core"));
21
+ const tmp = __importStar(require("tmp-promise"));
22
+ const stream = __importStar(require("stream"));
23
+ const utils_1 = require("./utils");
24
+ const config_variables_1 = require("./config-variables");
25
+ const util_1 = require("util");
26
+ const url_1 = require("url");
27
+ const perf_hooks_1 = require("perf_hooks");
28
+ const status_reporter_1 = require("./status-reporter");
29
+ const http_client_1 = require("@actions/http-client");
30
+ const http_manager_1 = require("./http-manager");
31
+ const upload_gzip_1 = require("./upload-gzip");
32
+ const requestUtils_1 = require("./requestUtils");
33
+ const stat = util_1.promisify(fs.stat);
34
+ class UploadHttpClient {
35
+ constructor() {
36
+ this.uploadHttpManager = new http_manager_1.HttpManager(config_variables_1.getUploadFileConcurrency(), '@actions/artifact-upload');
37
+ this.statusReporter = new status_reporter_1.StatusReporter(10000);
38
+ }
39
+ /**
40
+ * Creates a file container for the new artifact in the remote blob storage/file service
41
+ * @param {string} artifactName Name of the artifact being created
42
+ * @returns The response from the Artifact Service if the file container was successfully created
43
+ */
44
+ createArtifactInFileContainer(artifactName, options) {
45
+ return __awaiter(this, void 0, void 0, function* () {
46
+ const parameters = {
47
+ Type: 'actions_storage',
48
+ Name: artifactName
49
+ };
50
+ // calculate retention period
51
+ if (options && options.retentionDays) {
52
+ const maxRetentionStr = config_variables_1.getRetentionDays();
53
+ parameters.RetentionDays = utils_1.getProperRetention(options.retentionDays, maxRetentionStr);
54
+ }
55
+ const data = JSON.stringify(parameters, null, 2);
56
+ const artifactUrl = utils_1.getArtifactUrl();
57
+ // use the first client from the httpManager, `keep-alive` is not used so the connection will close immediately
58
+ const client = this.uploadHttpManager.getClient(0);
59
+ const headers = utils_1.getUploadHeaders('application/json', false);
60
+ // Extra information to display when a particular HTTP code is returned
61
+ // If a 403 is returned when trying to create a file container, the customer has exceeded
62
+ // their storage quota so no new artifact containers can be created
63
+ const customErrorMessages = new Map([
64
+ [
65
+ http_client_1.HttpCodes.Forbidden,
66
+ 'Artifact storage quota has been hit. Unable to upload any new artifacts'
67
+ ],
68
+ [
69
+ http_client_1.HttpCodes.BadRequest,
70
+ `The artifact name ${artifactName} is not valid. Request URL ${artifactUrl}`
71
+ ]
72
+ ]);
73
+ const response = yield requestUtils_1.retryHttpClientRequest('Create Artifact Container', () => __awaiter(this, void 0, void 0, function* () { return client.post(artifactUrl, data, headers); }), customErrorMessages);
74
+ const body = yield response.readBody();
75
+ return JSON.parse(body);
76
+ });
77
+ }
78
+ /**
79
+ * Concurrently upload all of the files in chunks
80
+ * @param {string} uploadUrl Base Url for the artifact that was created
81
+ * @param {SearchResult[]} filesToUpload A list of information about the files being uploaded
82
+ * @returns The size of all the files uploaded in bytes
83
+ */
84
+ uploadArtifactToFileContainer(uploadUrl, filesToUpload, options) {
85
+ return __awaiter(this, void 0, void 0, function* () {
86
+ const FILE_CONCURRENCY = config_variables_1.getUploadFileConcurrency();
87
+ const MAX_CHUNK_SIZE = config_variables_1.getUploadChunkSize();
88
+ core.debug(`File Concurrency: ${FILE_CONCURRENCY}, and Chunk Size: ${MAX_CHUNK_SIZE}`);
89
+ const parameters = [];
90
+ // by default, file uploads will continue if there is an error unless specified differently in the options
91
+ let continueOnError = true;
92
+ if (options) {
93
+ if (options.continueOnError === false) {
94
+ continueOnError = false;
95
+ }
96
+ }
97
+ // prepare the necessary parameters to upload all the files
98
+ for (const file of filesToUpload) {
99
+ const resourceUrl = new url_1.URL(uploadUrl);
100
+ resourceUrl.searchParams.append('itemPath', file.uploadFilePath);
101
+ parameters.push({
102
+ file: file.absoluteFilePath,
103
+ resourceUrl: resourceUrl.toString(),
104
+ maxChunkSize: MAX_CHUNK_SIZE,
105
+ continueOnError
106
+ });
107
+ }
108
+ const parallelUploads = [...new Array(FILE_CONCURRENCY).keys()];
109
+ const failedItemsToReport = [];
110
+ let currentFile = 0;
111
+ let completedFiles = 0;
112
+ let uploadFileSize = 0;
113
+ let totalFileSize = 0;
114
+ let abortPendingFileUploads = false;
115
+ this.statusReporter.setTotalNumberOfFilesToProcess(filesToUpload.length);
116
+ this.statusReporter.start();
117
+ // only allow a certain amount of files to be uploaded at once, this is done to reduce potential errors
118
+ yield Promise.all(parallelUploads.map((index) => __awaiter(this, void 0, void 0, function* () {
119
+ while (currentFile < filesToUpload.length) {
120
+ const currentFileParameters = parameters[currentFile];
121
+ currentFile += 1;
122
+ if (abortPendingFileUploads) {
123
+ failedItemsToReport.push(currentFileParameters.file);
124
+ continue;
125
+ }
126
+ const startTime = perf_hooks_1.performance.now();
127
+ const uploadFileResult = yield this.uploadFileAsync(index, currentFileParameters);
128
+ if (core.isDebug()) {
129
+ core.debug(`File: ${++completedFiles}/${filesToUpload.length}. ${currentFileParameters.file} took ${(perf_hooks_1.performance.now() - startTime).toFixed(3)} milliseconds to finish upload`);
130
+ }
131
+ uploadFileSize += uploadFileResult.successfulUploadSize;
132
+ totalFileSize += uploadFileResult.totalSize;
133
+ if (uploadFileResult.isSuccess === false) {
134
+ failedItemsToReport.push(currentFileParameters.file);
135
+ if (!continueOnError) {
136
+ // fail fast
137
+ core.error(`aborting artifact upload`);
138
+ abortPendingFileUploads = true;
139
+ }
140
+ }
141
+ this.statusReporter.incrementProcessedCount();
142
+ }
143
+ })));
144
+ this.statusReporter.stop();
145
+ // done uploading, safety dispose all connections
146
+ this.uploadHttpManager.disposeAndReplaceAllClients();
147
+ core.info(`Total size of all the files uploaded is ${uploadFileSize} bytes`);
148
+ return {
149
+ uploadSize: uploadFileSize,
150
+ totalSize: totalFileSize,
151
+ failedItems: failedItemsToReport
152
+ };
153
+ });
154
+ }
155
+ /**
156
+ * Asynchronously uploads a file. The file is compressed and uploaded using GZip if it is determined to save space.
157
+ * If the upload file is bigger than the max chunk size it will be uploaded via multiple calls
158
+ * @param {number} httpClientIndex The index of the httpClient that is being used to make all of the calls
159
+ * @param {UploadFileParameters} parameters Information about the file that needs to be uploaded
160
+ * @returns The size of the file that was uploaded in bytes along with any failed uploads
161
+ */
162
+ uploadFileAsync(httpClientIndex, parameters) {
163
+ return __awaiter(this, void 0, void 0, function* () {
164
+ const fileStat = yield stat(parameters.file);
165
+ const totalFileSize = fileStat.size;
166
+ // on Windows with mkfifo from MSYS2 stats.isFIFO returns false, so we check if running on Windows node and
167
+ // if the file has size of 0 to compensate
168
+ const isFIFO = fileStat.isFIFO() || (process.platform === 'win32' && totalFileSize === 0);
169
+ let offset = 0;
170
+ let isUploadSuccessful = true;
171
+ let failedChunkSizes = 0;
172
+ let uploadFileSize = 0;
173
+ let isGzip = true;
174
+ // the file that is being uploaded is less than 64k in size to increase throughput and to minimize disk I/O
175
+ // for creating a new GZip file, an in-memory buffer is used for compression
176
+ // with named pipes the file size is reported as zero in that case don't read the file in memory
177
+ if (!isFIFO && totalFileSize < 65536) {
178
+ core.debug(`${parameters.file} is less than 64k in size. Creating a gzip file in-memory to potentially reduce the upload size`);
179
+ const buffer = yield upload_gzip_1.createGZipFileInBuffer(parameters.file);
180
+ // An open stream is needed in the event of a failure and we need to retry. If a NodeJS.ReadableStream is directly passed in,
181
+ // it will not properly get reset to the start of the stream if a chunk upload needs to be retried
182
+ let openUploadStream;
183
+ if (totalFileSize < buffer.byteLength) {
184
+ // compression did not help with reducing the size, use a readable stream from the original file for upload
185
+ core.debug(`The gzip file created for ${parameters.file} did not help with reducing the size of the file. The original file will be uploaded as-is`);
186
+ openUploadStream = () => fs.createReadStream(parameters.file);
187
+ isGzip = false;
188
+ uploadFileSize = totalFileSize;
189
+ }
190
+ else {
191
+ // create a readable stream using a PassThrough stream that is both readable and writable
192
+ core.debug(`A gzip file created for ${parameters.file} helped with reducing the size of the original file. The file will be uploaded using gzip.`);
193
+ openUploadStream = () => {
194
+ const passThrough = new stream.PassThrough();
195
+ passThrough.end(buffer);
196
+ return passThrough;
197
+ };
198
+ uploadFileSize = buffer.byteLength;
199
+ }
200
+ const result = yield this.uploadChunk(httpClientIndex, parameters.resourceUrl, openUploadStream, 0, uploadFileSize - 1, uploadFileSize, isGzip, totalFileSize);
201
+ if (!result) {
202
+ // chunk failed to upload
203
+ isUploadSuccessful = false;
204
+ failedChunkSizes += uploadFileSize;
205
+ core.warning(`Aborting upload for ${parameters.file} due to failure`);
206
+ }
207
+ return {
208
+ isSuccess: isUploadSuccessful,
209
+ successfulUploadSize: uploadFileSize - failedChunkSizes,
210
+ totalSize: totalFileSize
211
+ };
212
+ }
213
+ else {
214
+ // the file that is being uploaded is greater than 64k in size, a temporary file gets created on disk using the
215
+ // npm tmp-promise package and this file gets used to create a GZipped file
216
+ const tempFile = yield tmp.file();
217
+ core.debug(`${parameters.file} is greater than 64k in size. Creating a gzip file on-disk ${tempFile.path} to potentially reduce the upload size`);
218
+ // create a GZip file of the original file being uploaded, the original file should not be modified in any way
219
+ uploadFileSize = yield upload_gzip_1.createGZipFileOnDisk(parameters.file, tempFile.path);
220
+ let uploadFilePath = tempFile.path;
221
+ // compression did not help with size reduction, use the original file for upload and delete the temp GZip file
222
+ // for named pipes totalFileSize is zero, this assumes compression did help
223
+ if (!isFIFO && totalFileSize < uploadFileSize) {
224
+ core.debug(`The gzip file created for ${parameters.file} did not help with reducing the size of the file. The original file will be uploaded as-is`);
225
+ uploadFileSize = totalFileSize;
226
+ uploadFilePath = parameters.file;
227
+ isGzip = false;
228
+ }
229
+ else {
230
+ core.debug(`The gzip file created for ${parameters.file} is smaller than the original file. The file will be uploaded using gzip.`);
231
+ }
232
+ let abortFileUpload = false;
233
+ // upload only a single chunk at a time
234
+ while (offset < uploadFileSize) {
235
+ const chunkSize = Math.min(uploadFileSize - offset, parameters.maxChunkSize);
236
+ const startChunkIndex = offset;
237
+ const endChunkIndex = offset + chunkSize - 1;
238
+ offset += parameters.maxChunkSize;
239
+ if (abortFileUpload) {
240
+ // if we don't want to continue in the event of an error, any pending upload chunks will be marked as failed
241
+ failedChunkSizes += chunkSize;
242
+ continue;
243
+ }
244
+ const result = yield this.uploadChunk(httpClientIndex, parameters.resourceUrl, () => fs.createReadStream(uploadFilePath, {
245
+ start: startChunkIndex,
246
+ end: endChunkIndex,
247
+ autoClose: false
248
+ }), startChunkIndex, endChunkIndex, uploadFileSize, isGzip, totalFileSize);
249
+ if (!result) {
250
+ // Chunk failed to upload, report as failed and do not continue uploading any more chunks for the file. It is possible that part of a chunk was
251
+ // successfully uploaded so the server may report a different size for what was uploaded
252
+ isUploadSuccessful = false;
253
+ failedChunkSizes += chunkSize;
254
+ core.warning(`Aborting upload for ${parameters.file} due to failure`);
255
+ abortFileUpload = true;
256
+ }
257
+ else {
258
+ // if an individual file is greater than 8MB (1024*1024*8) in size, display extra information about the upload status
259
+ if (uploadFileSize > 8388608) {
260
+ this.statusReporter.updateLargeFileStatus(parameters.file, startChunkIndex, endChunkIndex, uploadFileSize);
261
+ }
262
+ }
263
+ }
264
+ // Delete the temporary file that was created as part of the upload. If the temp file does not get manually deleted by
265
+ // calling cleanup, it gets removed when the node process exits. For more info see: https://www.npmjs.com/package/tmp-promise#about
266
+ core.debug(`deleting temporary gzip file ${tempFile.path}`);
267
+ yield tempFile.cleanup();
268
+ return {
269
+ isSuccess: isUploadSuccessful,
270
+ successfulUploadSize: uploadFileSize - failedChunkSizes,
271
+ totalSize: totalFileSize
272
+ };
273
+ }
274
+ });
275
+ }
276
+ /**
277
+ * Uploads a chunk of an individual file to the specified resourceUrl. If the upload fails and the status code
278
+ * indicates a retryable status, we try to upload the chunk as well
279
+ * @param {number} httpClientIndex The index of the httpClient being used to make all the necessary calls
280
+ * @param {string} resourceUrl Url of the resource that the chunk will be uploaded to
281
+ * @param {NodeJS.ReadableStream} openStream Stream of the file that will be uploaded
282
+ * @param {number} start Starting byte index of file that the chunk belongs to
283
+ * @param {number} end Ending byte index of file that the chunk belongs to
284
+ * @param {number} uploadFileSize Total size of the file in bytes that is being uploaded
285
+ * @param {boolean} isGzip Denotes if we are uploading a Gzip compressed stream
286
+ * @param {number} totalFileSize Original total size of the file that is being uploaded
287
+ * @returns if the chunk was successfully uploaded
288
+ */
289
+ uploadChunk(httpClientIndex, resourceUrl, openStream, start, end, uploadFileSize, isGzip, totalFileSize) {
290
+ return __awaiter(this, void 0, void 0, function* () {
291
+ // prepare all the necessary headers before making any http call
292
+ const headers = utils_1.getUploadHeaders('application/octet-stream', true, isGzip, totalFileSize, end - start + 1, utils_1.getContentRange(start, end, uploadFileSize));
293
+ const uploadChunkRequest = () => __awaiter(this, void 0, void 0, function* () {
294
+ const client = this.uploadHttpManager.getClient(httpClientIndex);
295
+ return yield client.sendStream('PUT', resourceUrl, openStream(), headers);
296
+ });
297
+ let retryCount = 0;
298
+ const retryLimit = config_variables_1.getRetryLimit();
299
+ // Increments the current retry count and then checks if the retry limit has been reached
300
+ // If there have been too many retries, fail so the download stops
301
+ const incrementAndCheckRetryLimit = (response) => {
302
+ retryCount++;
303
+ if (retryCount > retryLimit) {
304
+ if (response) {
305
+ utils_1.displayHttpDiagnostics(response);
306
+ }
307
+ core.info(`Retry limit has been reached for chunk at offset ${start} to ${resourceUrl}`);
308
+ return true;
309
+ }
310
+ return false;
311
+ };
312
+ const backOff = (retryAfterValue) => __awaiter(this, void 0, void 0, function* () {
313
+ this.uploadHttpManager.disposeAndReplaceClient(httpClientIndex);
314
+ if (retryAfterValue) {
315
+ core.info(`Backoff due to too many requests, retry #${retryCount}. Waiting for ${retryAfterValue} milliseconds before continuing the upload`);
316
+ yield utils_1.sleep(retryAfterValue);
317
+ }
318
+ else {
319
+ const backoffTime = utils_1.getExponentialRetryTimeInMilliseconds(retryCount);
320
+ core.info(`Exponential backoff for retry #${retryCount}. Waiting for ${backoffTime} milliseconds before continuing the upload at offset ${start}`);
321
+ yield utils_1.sleep(backoffTime);
322
+ }
323
+ core.info(`Finished backoff for retry #${retryCount}, continuing with upload`);
324
+ return;
325
+ });
326
+ // allow for failed chunks to be retried multiple times
327
+ while (retryCount <= retryLimit) {
328
+ let response;
329
+ try {
330
+ response = yield uploadChunkRequest();
331
+ }
332
+ catch (error) {
333
+ // if an error is caught, it is usually indicative of a timeout so retry the upload
334
+ core.info(`An error has been caught http-client index ${httpClientIndex}, retrying the upload`);
335
+ // eslint-disable-next-line no-console
336
+ console.log(error);
337
+ if (incrementAndCheckRetryLimit()) {
338
+ return false;
339
+ }
340
+ yield backOff();
341
+ continue;
342
+ }
343
+ // Always read the body of the response. There is potential for a resource leak if the body is not read which will
344
+ // result in the connection remaining open along with unintended consequences when trying to dispose of the client
345
+ yield response.readBody();
346
+ if (utils_1.isSuccessStatusCode(response.message.statusCode)) {
347
+ return true;
348
+ }
349
+ else if (utils_1.isRetryableStatusCode(response.message.statusCode)) {
350
+ core.info(`A ${response.message.statusCode} status code has been received, will attempt to retry the upload`);
351
+ if (incrementAndCheckRetryLimit(response)) {
352
+ return false;
353
+ }
354
+ utils_1.isThrottledStatusCode(response.message.statusCode)
355
+ ? yield backOff(utils_1.tryGetRetryAfterValueTimeInMilliseconds(response.message.headers))
356
+ : yield backOff();
357
+ }
358
+ else {
359
+ core.error(`Unexpected response. Unable to upload chunk to ${resourceUrl}`);
360
+ utils_1.displayHttpDiagnostics(response);
361
+ return false;
362
+ }
363
+ }
364
+ return false;
365
+ });
366
+ }
367
+ /**
368
+ * Updates the size of the artifact from -1 which was initially set when the container was first created for the artifact.
369
+ * Updating the size indicates that we are done uploading all the contents of the artifact
370
+ */
371
+ patchArtifactSize(size, artifactName) {
372
+ return __awaiter(this, void 0, void 0, function* () {
373
+ const resourceUrl = new url_1.URL(utils_1.getArtifactUrl());
374
+ resourceUrl.searchParams.append('artifactName', artifactName);
375
+ const parameters = { Size: size };
376
+ const data = JSON.stringify(parameters, null, 2);
377
+ core.debug(`URL is ${resourceUrl.toString()}`);
378
+ // use the first client from the httpManager, `keep-alive` is not used so the connection will close immediately
379
+ const client = this.uploadHttpManager.getClient(0);
380
+ const headers = utils_1.getUploadHeaders('application/json', false);
381
+ // Extra information to display when a particular HTTP code is returned
382
+ const customErrorMessages = new Map([
383
+ [
384
+ http_client_1.HttpCodes.NotFound,
385
+ `An Artifact with the name ${artifactName} was not found`
386
+ ]
387
+ ]);
388
+ // TODO retry for all possible response codes, the artifact upload is pretty much complete so it at all costs we should try to finish this
389
+ const response = yield requestUtils_1.retryHttpClientRequest('Finalize artifact upload', () => __awaiter(this, void 0, void 0, function* () { return client.patch(resourceUrl.toString(), data, headers); }), customErrorMessages);
390
+ yield response.readBody();
391
+ core.debug(`Artifact ${artifactName} has been successfully uploaded, total size in bytes: ${size}`);
392
+ });
393
+ }
394
+ }
395
+ exports.UploadHttpClient = UploadHttpClient;
377
396
  //# sourceMappingURL=upload-http-client.js.map