@actions/artifact 0.5.0 → 0.6.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (47) hide show
  1. package/LICENSE.md +8 -8
  2. package/README.md +213 -213
  3. package/lib/artifact-client.d.ts +10 -10
  4. package/lib/artifact-client.js +10 -10
  5. package/lib/internal/artifact-client.d.ts +41 -41
  6. package/lib/internal/artifact-client.js +164 -148
  7. package/lib/internal/artifact-client.js.map +1 -1
  8. package/lib/internal/config-variables.d.ts +11 -11
  9. package/lib/internal/config-variables.js +70 -70
  10. package/lib/internal/contracts.d.ts +67 -57
  11. package/lib/internal/contracts.js +2 -2
  12. package/lib/internal/download-http-client.d.ts +39 -39
  13. package/lib/internal/download-http-client.js +271 -274
  14. package/lib/internal/download-http-client.js.map +1 -1
  15. package/lib/internal/download-options.d.ts +7 -7
  16. package/lib/internal/download-options.js +2 -2
  17. package/lib/internal/download-response.d.ts +10 -10
  18. package/lib/internal/download-response.js +2 -2
  19. package/lib/internal/download-specification.d.ts +19 -19
  20. package/lib/internal/download-specification.js +60 -60
  21. package/lib/internal/http-manager.d.ts +12 -12
  22. package/lib/internal/http-manager.js +30 -30
  23. package/lib/internal/path-and-artifact-name-validation.d.ts +8 -0
  24. package/lib/internal/path-and-artifact-name-validation.js +66 -0
  25. package/lib/internal/path-and-artifact-name-validation.js.map +1 -0
  26. package/lib/internal/requestUtils.d.ts +3 -3
  27. package/lib/internal/requestUtils.js +74 -74
  28. package/lib/internal/status-reporter.d.ts +21 -22
  29. package/lib/internal/status-reporter.js +50 -63
  30. package/lib/internal/status-reporter.js.map +1 -1
  31. package/lib/internal/upload-gzip.d.ts +14 -14
  32. package/lib/internal/upload-gzip.js +107 -88
  33. package/lib/internal/upload-gzip.js.map +1 -1
  34. package/lib/internal/upload-http-client.d.ts +48 -48
  35. package/lib/internal/upload-http-client.js +393 -378
  36. package/lib/internal/upload-http-client.js.map +1 -1
  37. package/lib/internal/upload-options.d.ts +34 -34
  38. package/lib/internal/upload-options.js +2 -2
  39. package/lib/internal/upload-response.d.ts +19 -19
  40. package/lib/internal/upload-response.js +2 -2
  41. package/lib/internal/upload-specification.d.ts +11 -11
  42. package/lib/internal/upload-specification.js +87 -87
  43. package/lib/internal/upload-specification.js.map +1 -1
  44. package/lib/internal/utils.d.ts +66 -74
  45. package/lib/internal/utils.js +262 -303
  46. package/lib/internal/utils.js.map +1 -1
  47. package/package.json +49 -49
@@ -1,379 +1,394 @@
1
- "use strict";
2
- var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, generator) {
3
- function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); }
4
- return new (P || (P = Promise))(function (resolve, reject) {
5
- function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } }
6
- function rejected(value) { try { step(generator["throw"](value)); } catch (e) { reject(e); } }
7
- function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); }
8
- step((generator = generator.apply(thisArg, _arguments || [])).next());
9
- });
10
- };
11
- var __importStar = (this && this.__importStar) || function (mod) {
12
- if (mod && mod.__esModule) return mod;
13
- var result = {};
14
- if (mod != null) for (var k in mod) if (Object.hasOwnProperty.call(mod, k)) result[k] = mod[k];
15
- result["default"] = mod;
16
- return result;
17
- };
18
- Object.defineProperty(exports, "__esModule", { value: true });
19
- const fs = __importStar(require("fs"));
20
- const core = __importStar(require("@actions/core"));
21
- const tmp = __importStar(require("tmp-promise"));
22
- const stream = __importStar(require("stream"));
23
- const utils_1 = require("./utils");
24
- const config_variables_1 = require("./config-variables");
25
- const util_1 = require("util");
26
- const url_1 = require("url");
27
- const perf_hooks_1 = require("perf_hooks");
28
- const status_reporter_1 = require("./status-reporter");
29
- const http_client_1 = require("@actions/http-client");
30
- const http_manager_1 = require("./http-manager");
31
- const upload_gzip_1 = require("./upload-gzip");
32
- const requestUtils_1 = require("./requestUtils");
33
- const stat = util_1.promisify(fs.stat);
34
- class UploadHttpClient {
35
- constructor() {
36
- this.uploadHttpManager = new http_manager_1.HttpManager(config_variables_1.getUploadFileConcurrency(), '@actions/artifact-upload');
37
- this.statusReporter = new status_reporter_1.StatusReporter(10000);
38
- }
39
- /**
40
- * Creates a file container for the new artifact in the remote blob storage/file service
41
- * @param {string} artifactName Name of the artifact being created
42
- * @returns The response from the Artifact Service if the file container was successfully created
43
- */
44
- createArtifactInFileContainer(artifactName, options) {
45
- return __awaiter(this, void 0, void 0, function* () {
46
- const parameters = {
47
- Type: 'actions_storage',
48
- Name: artifactName
49
- };
50
- // calculate retention period
51
- if (options && options.retentionDays) {
52
- const maxRetentionStr = config_variables_1.getRetentionDays();
53
- parameters.RetentionDays = utils_1.getProperRetention(options.retentionDays, maxRetentionStr);
54
- }
55
- const data = JSON.stringify(parameters, null, 2);
56
- const artifactUrl = utils_1.getArtifactUrl();
57
- // use the first client from the httpManager, `keep-alive` is not used so the connection will close immediately
58
- const client = this.uploadHttpManager.getClient(0);
59
- const headers = utils_1.getUploadHeaders('application/json', false);
60
- // Extra information to display when a particular HTTP code is returned
61
- // If a 403 is returned when trying to create a file container, the customer has exceeded
62
- // their storage quota so no new artifact containers can be created
63
- const customErrorMessages = new Map([
64
- [
65
- http_client_1.HttpCodes.Forbidden,
66
- 'Artifact storage quota has been hit. Unable to upload any new artifacts'
67
- ],
68
- [
69
- http_client_1.HttpCodes.BadRequest,
70
- `The artifact name ${artifactName} is not valid. Request URL ${artifactUrl}`
71
- ]
72
- ]);
73
- const response = yield requestUtils_1.retryHttpClientRequest('Create Artifact Container', () => __awaiter(this, void 0, void 0, function* () { return client.post(artifactUrl, data, headers); }), customErrorMessages);
74
- const body = yield response.readBody();
75
- return JSON.parse(body);
76
- });
77
- }
78
- /**
79
- * Concurrently upload all of the files in chunks
80
- * @param {string} uploadUrl Base Url for the artifact that was created
81
- * @param {SearchResult[]} filesToUpload A list of information about the files being uploaded
82
- * @returns The size of all the files uploaded in bytes
83
- */
84
- uploadArtifactToFileContainer(uploadUrl, filesToUpload, options) {
85
- return __awaiter(this, void 0, void 0, function* () {
86
- const FILE_CONCURRENCY = config_variables_1.getUploadFileConcurrency();
87
- const MAX_CHUNK_SIZE = config_variables_1.getUploadChunkSize();
88
- core.debug(`File Concurrency: ${FILE_CONCURRENCY}, and Chunk Size: ${MAX_CHUNK_SIZE}`);
89
- const parameters = [];
90
- // by default, file uploads will continue if there is an error unless specified differently in the options
91
- let continueOnError = true;
92
- if (options) {
93
- if (options.continueOnError === false) {
94
- continueOnError = false;
95
- }
96
- }
97
- // prepare the necessary parameters to upload all the files
98
- for (const file of filesToUpload) {
99
- const resourceUrl = new url_1.URL(uploadUrl);
100
- resourceUrl.searchParams.append('itemPath', file.uploadFilePath);
101
- parameters.push({
102
- file: file.absoluteFilePath,
103
- resourceUrl: resourceUrl.toString(),
104
- maxChunkSize: MAX_CHUNK_SIZE,
105
- continueOnError
106
- });
107
- }
108
- const parallelUploads = [...new Array(FILE_CONCURRENCY).keys()];
109
- const failedItemsToReport = [];
110
- let currentFile = 0;
111
- let completedFiles = 0;
112
- let uploadFileSize = 0;
113
- let totalFileSize = 0;
114
- let abortPendingFileUploads = false;
115
- this.statusReporter.setTotalNumberOfFilesToProcess(filesToUpload.length);
116
- this.statusReporter.start();
117
- // only allow a certain amount of files to be uploaded at once, this is done to reduce potential errors
118
- yield Promise.all(parallelUploads.map((index) => __awaiter(this, void 0, void 0, function* () {
119
- while (currentFile < filesToUpload.length) {
120
- const currentFileParameters = parameters[currentFile];
121
- currentFile += 1;
122
- if (abortPendingFileUploads) {
123
- failedItemsToReport.push(currentFileParameters.file);
124
- continue;
125
- }
126
- const startTime = perf_hooks_1.performance.now();
127
- const uploadFileResult = yield this.uploadFileAsync(index, currentFileParameters);
128
- if (core.isDebug()) {
129
- core.debug(`File: ${++completedFiles}/${filesToUpload.length}. ${currentFileParameters.file} took ${(perf_hooks_1.performance.now() - startTime).toFixed(3)} milliseconds to finish upload`);
130
- }
131
- uploadFileSize += uploadFileResult.successfulUploadSize;
132
- totalFileSize += uploadFileResult.totalSize;
133
- if (uploadFileResult.isSuccess === false) {
134
- failedItemsToReport.push(currentFileParameters.file);
135
- if (!continueOnError) {
136
- // fail fast
137
- core.error(`aborting artifact upload`);
138
- abortPendingFileUploads = true;
139
- }
140
- }
141
- this.statusReporter.incrementProcessedCount();
142
- }
143
- })));
144
- this.statusReporter.stop();
145
- // done uploading, safety dispose all connections
146
- this.uploadHttpManager.disposeAndReplaceAllClients();
147
- core.info(`Total size of all the files uploaded is ${uploadFileSize} bytes`);
148
- return {
149
- uploadSize: uploadFileSize,
150
- totalSize: totalFileSize,
151
- failedItems: failedItemsToReport
152
- };
153
- });
154
- }
155
- /**
156
- * Asynchronously uploads a file. The file is compressed and uploaded using GZip if it is determined to save space.
157
- * If the upload file is bigger than the max chunk size it will be uploaded via multiple calls
158
- * @param {number} httpClientIndex The index of the httpClient that is being used to make all of the calls
159
- * @param {UploadFileParameters} parameters Information about the file that needs to be uploaded
160
- * @returns The size of the file that was uploaded in bytes along with any failed uploads
161
- */
162
- uploadFileAsync(httpClientIndex, parameters) {
163
- return __awaiter(this, void 0, void 0, function* () {
164
- const totalFileSize = (yield stat(parameters.file)).size;
165
- let offset = 0;
166
- let isUploadSuccessful = true;
167
- let failedChunkSizes = 0;
168
- let uploadFileSize = 0;
169
- let isGzip = true;
170
- // the file that is being uploaded is less than 64k in size, to increase throughput and to minimize disk I/O
171
- // for creating a new GZip file, an in-memory buffer is used for compression
172
- if (totalFileSize < 65536) {
173
- const buffer = yield upload_gzip_1.createGZipFileInBuffer(parameters.file);
174
- //An open stream is needed in the event of a failure and we need to retry. If a NodeJS.ReadableStream is directly passed in,
175
- // it will not properly get reset to the start of the stream if a chunk upload needs to be retried
176
- let openUploadStream;
177
- if (totalFileSize < buffer.byteLength) {
178
- // compression did not help with reducing the size, use a readable stream from the original file for upload
179
- openUploadStream = () => fs.createReadStream(parameters.file);
180
- isGzip = false;
181
- uploadFileSize = totalFileSize;
182
- }
183
- else {
184
- // create a readable stream using a PassThrough stream that is both readable and writable
185
- openUploadStream = () => {
186
- const passThrough = new stream.PassThrough();
187
- passThrough.end(buffer);
188
- return passThrough;
189
- };
190
- uploadFileSize = buffer.byteLength;
191
- }
192
- const result = yield this.uploadChunk(httpClientIndex, parameters.resourceUrl, openUploadStream, 0, uploadFileSize - 1, uploadFileSize, isGzip, totalFileSize);
193
- if (!result) {
194
- // chunk failed to upload
195
- isUploadSuccessful = false;
196
- failedChunkSizes += uploadFileSize;
197
- core.warning(`Aborting upload for ${parameters.file} due to failure`);
198
- }
199
- return {
200
- isSuccess: isUploadSuccessful,
201
- successfulUploadSize: uploadFileSize - failedChunkSizes,
202
- totalSize: totalFileSize
203
- };
204
- }
205
- else {
206
- // the file that is being uploaded is greater than 64k in size, a temporary file gets created on disk using the
207
- // npm tmp-promise package and this file gets used to create a GZipped file
208
- const tempFile = yield tmp.file();
209
- // create a GZip file of the original file being uploaded, the original file should not be modified in any way
210
- uploadFileSize = yield upload_gzip_1.createGZipFileOnDisk(parameters.file, tempFile.path);
211
- let uploadFilePath = tempFile.path;
212
- // compression did not help with size reduction, use the original file for upload and delete the temp GZip file
213
- if (totalFileSize < uploadFileSize) {
214
- uploadFileSize = totalFileSize;
215
- uploadFilePath = parameters.file;
216
- isGzip = false;
217
- }
218
- let abortFileUpload = false;
219
- // upload only a single chunk at a time
220
- while (offset < uploadFileSize) {
221
- const chunkSize = Math.min(uploadFileSize - offset, parameters.maxChunkSize);
222
- // if an individual file is greater than 100MB (1024*1024*100) in size, display extra information about the upload status
223
- if (uploadFileSize > 104857600) {
224
- this.statusReporter.updateLargeFileStatus(parameters.file, offset, uploadFileSize);
225
- }
226
- const start = offset;
227
- const end = offset + chunkSize - 1;
228
- offset += parameters.maxChunkSize;
229
- if (abortFileUpload) {
230
- // if we don't want to continue in the event of an error, any pending upload chunks will be marked as failed
231
- failedChunkSizes += chunkSize;
232
- continue;
233
- }
234
- const result = yield this.uploadChunk(httpClientIndex, parameters.resourceUrl, () => fs.createReadStream(uploadFilePath, {
235
- start,
236
- end,
237
- autoClose: false
238
- }), start, end, uploadFileSize, isGzip, totalFileSize);
239
- if (!result) {
240
- // Chunk failed to upload, report as failed and do not continue uploading any more chunks for the file. It is possible that part of a chunk was
241
- // successfully uploaded so the server may report a different size for what was uploaded
242
- isUploadSuccessful = false;
243
- failedChunkSizes += chunkSize;
244
- core.warning(`Aborting upload for ${parameters.file} due to failure`);
245
- abortFileUpload = true;
246
- }
247
- }
248
- // Delete the temporary file that was created as part of the upload. If the temp file does not get manually deleted by
249
- // calling cleanup, it gets removed when the node process exits. For more info see: https://www.npmjs.com/package/tmp-promise#about
250
- yield tempFile.cleanup();
251
- return {
252
- isSuccess: isUploadSuccessful,
253
- successfulUploadSize: uploadFileSize - failedChunkSizes,
254
- totalSize: totalFileSize
255
- };
256
- }
257
- });
258
- }
259
- /**
260
- * Uploads a chunk of an individual file to the specified resourceUrl. If the upload fails and the status code
261
- * indicates a retryable status, we try to upload the chunk as well
262
- * @param {number} httpClientIndex The index of the httpClient being used to make all the necessary calls
263
- * @param {string} resourceUrl Url of the resource that the chunk will be uploaded to
264
- * @param {NodeJS.ReadableStream} openStream Stream of the file that will be uploaded
265
- * @param {number} start Starting byte index of file that the chunk belongs to
266
- * @param {number} end Ending byte index of file that the chunk belongs to
267
- * @param {number} uploadFileSize Total size of the file in bytes that is being uploaded
268
- * @param {boolean} isGzip Denotes if we are uploading a Gzip compressed stream
269
- * @param {number} totalFileSize Original total size of the file that is being uploaded
270
- * @returns if the chunk was successfully uploaded
271
- */
272
- uploadChunk(httpClientIndex, resourceUrl, openStream, start, end, uploadFileSize, isGzip, totalFileSize) {
273
- return __awaiter(this, void 0, void 0, function* () {
274
- // prepare all the necessary headers before making any http call
275
- const headers = utils_1.getUploadHeaders('application/octet-stream', true, isGzip, totalFileSize, end - start + 1, utils_1.getContentRange(start, end, uploadFileSize));
276
- const uploadChunkRequest = () => __awaiter(this, void 0, void 0, function* () {
277
- const client = this.uploadHttpManager.getClient(httpClientIndex);
278
- return yield client.sendStream('PUT', resourceUrl, openStream(), headers);
279
- });
280
- let retryCount = 0;
281
- const retryLimit = config_variables_1.getRetryLimit();
282
- // Increments the current retry count and then checks if the retry limit has been reached
283
- // If there have been too many retries, fail so the download stops
284
- const incrementAndCheckRetryLimit = (response) => {
285
- retryCount++;
286
- if (retryCount > retryLimit) {
287
- if (response) {
288
- utils_1.displayHttpDiagnostics(response);
289
- }
290
- core.info(`Retry limit has been reached for chunk at offset ${start} to ${resourceUrl}`);
291
- return true;
292
- }
293
- return false;
294
- };
295
- const backOff = (retryAfterValue) => __awaiter(this, void 0, void 0, function* () {
296
- this.uploadHttpManager.disposeAndReplaceClient(httpClientIndex);
297
- if (retryAfterValue) {
298
- core.info(`Backoff due to too many requests, retry #${retryCount}. Waiting for ${retryAfterValue} milliseconds before continuing the upload`);
299
- yield utils_1.sleep(retryAfterValue);
300
- }
301
- else {
302
- const backoffTime = utils_1.getExponentialRetryTimeInMilliseconds(retryCount);
303
- core.info(`Exponential backoff for retry #${retryCount}. Waiting for ${backoffTime} milliseconds before continuing the upload at offset ${start}`);
304
- yield utils_1.sleep(backoffTime);
305
- }
306
- core.info(`Finished backoff for retry #${retryCount}, continuing with upload`);
307
- return;
308
- });
309
- // allow for failed chunks to be retried multiple times
310
- while (retryCount <= retryLimit) {
311
- let response;
312
- try {
313
- response = yield uploadChunkRequest();
314
- }
315
- catch (error) {
316
- // if an error is caught, it is usually indicative of a timeout so retry the upload
317
- core.info(`An error has been caught http-client index ${httpClientIndex}, retrying the upload`);
318
- // eslint-disable-next-line no-console
319
- console.log(error);
320
- if (incrementAndCheckRetryLimit()) {
321
- return false;
322
- }
323
- yield backOff();
324
- continue;
325
- }
326
- // Always read the body of the response. There is potential for a resource leak if the body is not read which will
327
- // result in the connection remaining open along with unintended consequences when trying to dispose of the client
328
- yield response.readBody();
329
- if (utils_1.isSuccessStatusCode(response.message.statusCode)) {
330
- return true;
331
- }
332
- else if (utils_1.isRetryableStatusCode(response.message.statusCode)) {
333
- core.info(`A ${response.message.statusCode} status code has been received, will attempt to retry the upload`);
334
- if (incrementAndCheckRetryLimit(response)) {
335
- return false;
336
- }
337
- utils_1.isThrottledStatusCode(response.message.statusCode)
338
- ? yield backOff(utils_1.tryGetRetryAfterValueTimeInMilliseconds(response.message.headers))
339
- : yield backOff();
340
- }
341
- else {
342
- core.error(`Unexpected response. Unable to upload chunk to ${resourceUrl}`);
343
- utils_1.displayHttpDiagnostics(response);
344
- return false;
345
- }
346
- }
347
- return false;
348
- });
349
- }
350
- /**
351
- * Updates the size of the artifact from -1 which was initially set when the container was first created for the artifact.
352
- * Updating the size indicates that we are done uploading all the contents of the artifact
353
- */
354
- patchArtifactSize(size, artifactName) {
355
- return __awaiter(this, void 0, void 0, function* () {
356
- const resourceUrl = new url_1.URL(utils_1.getArtifactUrl());
357
- resourceUrl.searchParams.append('artifactName', artifactName);
358
- const parameters = { Size: size };
359
- const data = JSON.stringify(parameters, null, 2);
360
- core.debug(`URL is ${resourceUrl.toString()}`);
361
- // use the first client from the httpManager, `keep-alive` is not used so the connection will close immediately
362
- const client = this.uploadHttpManager.getClient(0);
363
- const headers = utils_1.getUploadHeaders('application/json', false);
364
- // Extra information to display when a particular HTTP code is returned
365
- const customErrorMessages = new Map([
366
- [
367
- http_client_1.HttpCodes.NotFound,
368
- `An Artifact with the name ${artifactName} was not found`
369
- ]
370
- ]);
371
- // TODO retry for all possible response codes, the artifact upload is pretty much complete so it at all costs we should try to finish this
372
- const response = yield requestUtils_1.retryHttpClientRequest('Finalize artifact upload', () => __awaiter(this, void 0, void 0, function* () { return client.patch(resourceUrl.toString(), data, headers); }), customErrorMessages);
373
- yield response.readBody();
374
- core.debug(`Artifact ${artifactName} has been successfully uploaded, total size in bytes: ${size}`);
375
- });
376
- }
377
- }
378
- exports.UploadHttpClient = UploadHttpClient;
1
+ "use strict";
2
+ var __awaiter = (this && this.__awaiter) || function (thisArg, _arguments, P, generator) {
3
+ function adopt(value) { return value instanceof P ? value : new P(function (resolve) { resolve(value); }); }
4
+ return new (P || (P = Promise))(function (resolve, reject) {
5
+ function fulfilled(value) { try { step(generator.next(value)); } catch (e) { reject(e); } }
6
+ function rejected(value) { try { step(generator["throw"](value)); } catch (e) { reject(e); } }
7
+ function step(result) { result.done ? resolve(result.value) : adopt(result.value).then(fulfilled, rejected); }
8
+ step((generator = generator.apply(thisArg, _arguments || [])).next());
9
+ });
10
+ };
11
+ var __importStar = (this && this.__importStar) || function (mod) {
12
+ if (mod && mod.__esModule) return mod;
13
+ var result = {};
14
+ if (mod != null) for (var k in mod) if (Object.hasOwnProperty.call(mod, k)) result[k] = mod[k];
15
+ result["default"] = mod;
16
+ return result;
17
+ };
18
+ Object.defineProperty(exports, "__esModule", { value: true });
19
+ const fs = __importStar(require("fs"));
20
+ const core = __importStar(require("@actions/core"));
21
+ const tmp = __importStar(require("tmp-promise"));
22
+ const stream = __importStar(require("stream"));
23
+ const utils_1 = require("./utils");
24
+ const config_variables_1 = require("./config-variables");
25
+ const util_1 = require("util");
26
+ const url_1 = require("url");
27
+ const perf_hooks_1 = require("perf_hooks");
28
+ const status_reporter_1 = require("./status-reporter");
29
+ const http_client_1 = require("@actions/http-client");
30
+ const http_manager_1 = require("./http-manager");
31
+ const upload_gzip_1 = require("./upload-gzip");
32
+ const requestUtils_1 = require("./requestUtils");
33
+ const stat = util_1.promisify(fs.stat);
34
+ class UploadHttpClient {
35
+ constructor() {
36
+ this.uploadHttpManager = new http_manager_1.HttpManager(config_variables_1.getUploadFileConcurrency(), '@actions/artifact-upload');
37
+ this.statusReporter = new status_reporter_1.StatusReporter(10000);
38
+ }
39
+ /**
40
+ * Creates a file container for the new artifact in the remote blob storage/file service
41
+ * @param {string} artifactName Name of the artifact being created
42
+ * @returns The response from the Artifact Service if the file container was successfully created
43
+ */
44
+ createArtifactInFileContainer(artifactName, options) {
45
+ return __awaiter(this, void 0, void 0, function* () {
46
+ const parameters = {
47
+ Type: 'actions_storage',
48
+ Name: artifactName
49
+ };
50
+ // calculate retention period
51
+ if (options && options.retentionDays) {
52
+ const maxRetentionStr = config_variables_1.getRetentionDays();
53
+ parameters.RetentionDays = utils_1.getProperRetention(options.retentionDays, maxRetentionStr);
54
+ }
55
+ const data = JSON.stringify(parameters, null, 2);
56
+ const artifactUrl = utils_1.getArtifactUrl();
57
+ // use the first client from the httpManager, `keep-alive` is not used so the connection will close immediately
58
+ const client = this.uploadHttpManager.getClient(0);
59
+ const headers = utils_1.getUploadHeaders('application/json', false);
60
+ // Extra information to display when a particular HTTP code is returned
61
+ // If a 403 is returned when trying to create a file container, the customer has exceeded
62
+ // their storage quota so no new artifact containers can be created
63
+ const customErrorMessages = new Map([
64
+ [
65
+ http_client_1.HttpCodes.Forbidden,
66
+ 'Artifact storage quota has been hit. Unable to upload any new artifacts'
67
+ ],
68
+ [
69
+ http_client_1.HttpCodes.BadRequest,
70
+ `The artifact name ${artifactName} is not valid. Request URL ${artifactUrl}`
71
+ ]
72
+ ]);
73
+ const response = yield requestUtils_1.retryHttpClientRequest('Create Artifact Container', () => __awaiter(this, void 0, void 0, function* () { return client.post(artifactUrl, data, headers); }), customErrorMessages);
74
+ const body = yield response.readBody();
75
+ return JSON.parse(body);
76
+ });
77
+ }
78
+ /**
79
+ * Concurrently upload all of the files in chunks
80
+ * @param {string} uploadUrl Base Url for the artifact that was created
81
+ * @param {SearchResult[]} filesToUpload A list of information about the files being uploaded
82
+ * @returns The size of all the files uploaded in bytes
83
+ */
84
+ uploadArtifactToFileContainer(uploadUrl, filesToUpload, options) {
85
+ return __awaiter(this, void 0, void 0, function* () {
86
+ const FILE_CONCURRENCY = config_variables_1.getUploadFileConcurrency();
87
+ const MAX_CHUNK_SIZE = config_variables_1.getUploadChunkSize();
88
+ core.debug(`File Concurrency: ${FILE_CONCURRENCY}, and Chunk Size: ${MAX_CHUNK_SIZE}`);
89
+ const parameters = [];
90
+ // by default, file uploads will continue if there is an error unless specified differently in the options
91
+ let continueOnError = true;
92
+ if (options) {
93
+ if (options.continueOnError === false) {
94
+ continueOnError = false;
95
+ }
96
+ }
97
+ // prepare the necessary parameters to upload all the files
98
+ for (const file of filesToUpload) {
99
+ const resourceUrl = new url_1.URL(uploadUrl);
100
+ resourceUrl.searchParams.append('itemPath', file.uploadFilePath);
101
+ parameters.push({
102
+ file: file.absoluteFilePath,
103
+ resourceUrl: resourceUrl.toString(),
104
+ maxChunkSize: MAX_CHUNK_SIZE,
105
+ continueOnError
106
+ });
107
+ }
108
+ const parallelUploads = [...new Array(FILE_CONCURRENCY).keys()];
109
+ const failedItemsToReport = [];
110
+ let currentFile = 0;
111
+ let completedFiles = 0;
112
+ let uploadFileSize = 0;
113
+ let totalFileSize = 0;
114
+ let abortPendingFileUploads = false;
115
+ this.statusReporter.setTotalNumberOfFilesToProcess(filesToUpload.length);
116
+ this.statusReporter.start();
117
+ // only allow a certain amount of files to be uploaded at once, this is done to reduce potential errors
118
+ yield Promise.all(parallelUploads.map((index) => __awaiter(this, void 0, void 0, function* () {
119
+ while (currentFile < filesToUpload.length) {
120
+ const currentFileParameters = parameters[currentFile];
121
+ currentFile += 1;
122
+ if (abortPendingFileUploads) {
123
+ failedItemsToReport.push(currentFileParameters.file);
124
+ continue;
125
+ }
126
+ const startTime = perf_hooks_1.performance.now();
127
+ const uploadFileResult = yield this.uploadFileAsync(index, currentFileParameters);
128
+ if (core.isDebug()) {
129
+ core.debug(`File: ${++completedFiles}/${filesToUpload.length}. ${currentFileParameters.file} took ${(perf_hooks_1.performance.now() - startTime).toFixed(3)} milliseconds to finish upload`);
130
+ }
131
+ uploadFileSize += uploadFileResult.successfulUploadSize;
132
+ totalFileSize += uploadFileResult.totalSize;
133
+ if (uploadFileResult.isSuccess === false) {
134
+ failedItemsToReport.push(currentFileParameters.file);
135
+ if (!continueOnError) {
136
+ // fail fast
137
+ core.error(`aborting artifact upload`);
138
+ abortPendingFileUploads = true;
139
+ }
140
+ }
141
+ this.statusReporter.incrementProcessedCount();
142
+ }
143
+ })));
144
+ this.statusReporter.stop();
145
+ // done uploading, safety dispose all connections
146
+ this.uploadHttpManager.disposeAndReplaceAllClients();
147
+ core.info(`Total size of all the files uploaded is ${uploadFileSize} bytes`);
148
+ return {
149
+ uploadSize: uploadFileSize,
150
+ totalSize: totalFileSize,
151
+ failedItems: failedItemsToReport
152
+ };
153
+ });
154
+ }
155
+ /**
156
+ * Asynchronously uploads a file. The file is compressed and uploaded using GZip if it is determined to save space.
157
+ * If the upload file is bigger than the max chunk size it will be uploaded via multiple calls
158
+ * @param {number} httpClientIndex The index of the httpClient that is being used to make all of the calls
159
+ * @param {UploadFileParameters} parameters Information about the file that needs to be uploaded
160
+ * @returns The size of the file that was uploaded in bytes along with any failed uploads
161
+ */
162
+ uploadFileAsync(httpClientIndex, parameters) {
163
+ return __awaiter(this, void 0, void 0, function* () {
164
+ const fileStat = yield stat(parameters.file);
165
+ const totalFileSize = fileStat.size;
166
+ const isFIFO = fileStat.isFIFO();
167
+ let offset = 0;
168
+ let isUploadSuccessful = true;
169
+ let failedChunkSizes = 0;
170
+ let uploadFileSize = 0;
171
+ let isGzip = true;
172
+ // the file that is being uploaded is less than 64k in size to increase throughput and to minimize disk I/O
173
+ // for creating a new GZip file, an in-memory buffer is used for compression
174
+ // with named pipes the file size is reported as zero in that case don't read the file in memory
175
+ if (!isFIFO && totalFileSize < 65536) {
176
+ core.debug(`${parameters.file} is less than 64k in size. Creating a gzip file in-memory to potentially reduce the upload size`);
177
+ const buffer = yield upload_gzip_1.createGZipFileInBuffer(parameters.file);
178
+ // An open stream is needed in the event of a failure and we need to retry. If a NodeJS.ReadableStream is directly passed in,
179
+ // it will not properly get reset to the start of the stream if a chunk upload needs to be retried
180
+ let openUploadStream;
181
+ if (totalFileSize < buffer.byteLength) {
182
+ // compression did not help with reducing the size, use a readable stream from the original file for upload
183
+ core.debug(`The gzip file created for ${parameters.file} did not help with reducing the size of the file. The original file will be uploaded as-is`);
184
+ openUploadStream = () => fs.createReadStream(parameters.file);
185
+ isGzip = false;
186
+ uploadFileSize = totalFileSize;
187
+ }
188
+ else {
189
+ // create a readable stream using a PassThrough stream that is both readable and writable
190
+ core.debug(`A gzip file created for ${parameters.file} helped with reducing the size of the original file. The file will be uploaded using gzip.`);
191
+ openUploadStream = () => {
192
+ const passThrough = new stream.PassThrough();
193
+ passThrough.end(buffer);
194
+ return passThrough;
195
+ };
196
+ uploadFileSize = buffer.byteLength;
197
+ }
198
+ const result = yield this.uploadChunk(httpClientIndex, parameters.resourceUrl, openUploadStream, 0, uploadFileSize - 1, uploadFileSize, isGzip, totalFileSize);
199
+ if (!result) {
200
+ // chunk failed to upload
201
+ isUploadSuccessful = false;
202
+ failedChunkSizes += uploadFileSize;
203
+ core.warning(`Aborting upload for ${parameters.file} due to failure`);
204
+ }
205
+ return {
206
+ isSuccess: isUploadSuccessful,
207
+ successfulUploadSize: uploadFileSize - failedChunkSizes,
208
+ totalSize: totalFileSize
209
+ };
210
+ }
211
+ else {
212
+ // the file that is being uploaded is greater than 64k in size, a temporary file gets created on disk using the
213
+ // npm tmp-promise package and this file gets used to create a GZipped file
214
+ const tempFile = yield tmp.file();
215
+ core.debug(`${parameters.file} is greater than 64k in size. Creating a gzip file on-disk ${tempFile.path} to potentially reduce the upload size`);
216
+ // create a GZip file of the original file being uploaded, the original file should not be modified in any way
217
+ uploadFileSize = yield upload_gzip_1.createGZipFileOnDisk(parameters.file, tempFile.path);
218
+ let uploadFilePath = tempFile.path;
219
+ // compression did not help with size reduction, use the original file for upload and delete the temp GZip file
220
+ // for named pipes totalFileSize is zero, this assumes compression did help
221
+ if (!isFIFO && totalFileSize < uploadFileSize) {
222
+ core.debug(`The gzip file created for ${parameters.file} did not help with reducing the size of the file. The original file will be uploaded as-is`);
223
+ uploadFileSize = totalFileSize;
224
+ uploadFilePath = parameters.file;
225
+ isGzip = false;
226
+ }
227
+ else {
228
+ core.debug(`The gzip file created for ${parameters.file} is smaller than the original file. The file will be uploaded using gzip.`);
229
+ }
230
+ let abortFileUpload = false;
231
+ // upload only a single chunk at a time
232
+ while (offset < uploadFileSize) {
233
+ const chunkSize = Math.min(uploadFileSize - offset, parameters.maxChunkSize);
234
+ const startChunkIndex = offset;
235
+ const endChunkIndex = offset + chunkSize - 1;
236
+ offset += parameters.maxChunkSize;
237
+ if (abortFileUpload) {
238
+ // if we don't want to continue in the event of an error, any pending upload chunks will be marked as failed
239
+ failedChunkSizes += chunkSize;
240
+ continue;
241
+ }
242
+ const result = yield this.uploadChunk(httpClientIndex, parameters.resourceUrl, () => fs.createReadStream(uploadFilePath, {
243
+ start: startChunkIndex,
244
+ end: endChunkIndex,
245
+ autoClose: false
246
+ }), startChunkIndex, endChunkIndex, uploadFileSize, isGzip, totalFileSize);
247
+ if (!result) {
248
+ // Chunk failed to upload, report as failed and do not continue uploading any more chunks for the file. It is possible that part of a chunk was
249
+ // successfully uploaded so the server may report a different size for what was uploaded
250
+ isUploadSuccessful = false;
251
+ failedChunkSizes += chunkSize;
252
+ core.warning(`Aborting upload for ${parameters.file} due to failure`);
253
+ abortFileUpload = true;
254
+ }
255
+ else {
256
+ // if an individual file is greater than 8MB (1024*1024*8) in size, display extra information about the upload status
257
+ if (uploadFileSize > 8388608) {
258
+ this.statusReporter.updateLargeFileStatus(parameters.file, startChunkIndex, endChunkIndex, uploadFileSize);
259
+ }
260
+ }
261
+ }
262
+ // Delete the temporary file that was created as part of the upload. If the temp file does not get manually deleted by
263
+ // calling cleanup, it gets removed when the node process exits. For more info see: https://www.npmjs.com/package/tmp-promise#about
264
+ core.debug(`deleting temporary gzip file ${tempFile.path}`);
265
+ yield tempFile.cleanup();
266
+ return {
267
+ isSuccess: isUploadSuccessful,
268
+ successfulUploadSize: uploadFileSize - failedChunkSizes,
269
+ totalSize: totalFileSize
270
+ };
271
+ }
272
+ });
273
+ }
274
+ /**
275
+ * Uploads a chunk of an individual file to the specified resourceUrl. If the upload fails and the status code
276
+ * indicates a retryable status, we try to upload the chunk as well
277
+ * @param {number} httpClientIndex The index of the httpClient being used to make all the necessary calls
278
+ * @param {string} resourceUrl Url of the resource that the chunk will be uploaded to
279
+ * @param {NodeJS.ReadableStream} openStream Stream of the file that will be uploaded
280
+ * @param {number} start Starting byte index of file that the chunk belongs to
281
+ * @param {number} end Ending byte index of file that the chunk belongs to
282
+ * @param {number} uploadFileSize Total size of the file in bytes that is being uploaded
283
+ * @param {boolean} isGzip Denotes if we are uploading a Gzip compressed stream
284
+ * @param {number} totalFileSize Original total size of the file that is being uploaded
285
+ * @returns if the chunk was successfully uploaded
286
+ */
287
+ uploadChunk(httpClientIndex, resourceUrl, openStream, start, end, uploadFileSize, isGzip, totalFileSize) {
288
+ return __awaiter(this, void 0, void 0, function* () {
289
+ // prepare all the necessary headers before making any http call
290
+ const headers = utils_1.getUploadHeaders('application/octet-stream', true, isGzip, totalFileSize, end - start + 1, utils_1.getContentRange(start, end, uploadFileSize));
291
+ const uploadChunkRequest = () => __awaiter(this, void 0, void 0, function* () {
292
+ const client = this.uploadHttpManager.getClient(httpClientIndex);
293
+ return yield client.sendStream('PUT', resourceUrl, openStream(), headers);
294
+ });
295
+ let retryCount = 0;
296
+ const retryLimit = config_variables_1.getRetryLimit();
297
+ // Increments the current retry count and then checks if the retry limit has been reached
298
+ // If there have been too many retries, fail so the download stops
299
+ const incrementAndCheckRetryLimit = (response) => {
300
+ retryCount++;
301
+ if (retryCount > retryLimit) {
302
+ if (response) {
303
+ utils_1.displayHttpDiagnostics(response);
304
+ }
305
+ core.info(`Retry limit has been reached for chunk at offset ${start} to ${resourceUrl}`);
306
+ return true;
307
+ }
308
+ return false;
309
+ };
310
+ const backOff = (retryAfterValue) => __awaiter(this, void 0, void 0, function* () {
311
+ this.uploadHttpManager.disposeAndReplaceClient(httpClientIndex);
312
+ if (retryAfterValue) {
313
+ core.info(`Backoff due to too many requests, retry #${retryCount}. Waiting for ${retryAfterValue} milliseconds before continuing the upload`);
314
+ yield utils_1.sleep(retryAfterValue);
315
+ }
316
+ else {
317
+ const backoffTime = utils_1.getExponentialRetryTimeInMilliseconds(retryCount);
318
+ core.info(`Exponential backoff for retry #${retryCount}. Waiting for ${backoffTime} milliseconds before continuing the upload at offset ${start}`);
319
+ yield utils_1.sleep(backoffTime);
320
+ }
321
+ core.info(`Finished backoff for retry #${retryCount}, continuing with upload`);
322
+ return;
323
+ });
324
+ // allow for failed chunks to be retried multiple times
325
+ while (retryCount <= retryLimit) {
326
+ let response;
327
+ try {
328
+ response = yield uploadChunkRequest();
329
+ }
330
+ catch (error) {
331
+ // if an error is caught, it is usually indicative of a timeout so retry the upload
332
+ core.info(`An error has been caught http-client index ${httpClientIndex}, retrying the upload`);
333
+ // eslint-disable-next-line no-console
334
+ console.log(error);
335
+ if (incrementAndCheckRetryLimit()) {
336
+ return false;
337
+ }
338
+ yield backOff();
339
+ continue;
340
+ }
341
+ // Always read the body of the response. There is potential for a resource leak if the body is not read which will
342
+ // result in the connection remaining open along with unintended consequences when trying to dispose of the client
343
+ yield response.readBody();
344
+ if (utils_1.isSuccessStatusCode(response.message.statusCode)) {
345
+ return true;
346
+ }
347
+ else if (utils_1.isRetryableStatusCode(response.message.statusCode)) {
348
+ core.info(`A ${response.message.statusCode} status code has been received, will attempt to retry the upload`);
349
+ if (incrementAndCheckRetryLimit(response)) {
350
+ return false;
351
+ }
352
+ utils_1.isThrottledStatusCode(response.message.statusCode)
353
+ ? yield backOff(utils_1.tryGetRetryAfterValueTimeInMilliseconds(response.message.headers))
354
+ : yield backOff();
355
+ }
356
+ else {
357
+ core.error(`Unexpected response. Unable to upload chunk to ${resourceUrl}`);
358
+ utils_1.displayHttpDiagnostics(response);
359
+ return false;
360
+ }
361
+ }
362
+ return false;
363
+ });
364
+ }
365
+ /**
366
+ * Updates the size of the artifact from -1 which was initially set when the container was first created for the artifact.
367
+ * Updating the size indicates that we are done uploading all the contents of the artifact
368
+ */
369
+ patchArtifactSize(size, artifactName) {
370
+ return __awaiter(this, void 0, void 0, function* () {
371
+ const resourceUrl = new url_1.URL(utils_1.getArtifactUrl());
372
+ resourceUrl.searchParams.append('artifactName', artifactName);
373
+ const parameters = { Size: size };
374
+ const data = JSON.stringify(parameters, null, 2);
375
+ core.debug(`URL is ${resourceUrl.toString()}`);
376
+ // use the first client from the httpManager, `keep-alive` is not used so the connection will close immediately
377
+ const client = this.uploadHttpManager.getClient(0);
378
+ const headers = utils_1.getUploadHeaders('application/json', false);
379
+ // Extra information to display when a particular HTTP code is returned
380
+ const customErrorMessages = new Map([
381
+ [
382
+ http_client_1.HttpCodes.NotFound,
383
+ `An Artifact with the name ${artifactName} was not found`
384
+ ]
385
+ ]);
386
+ // TODO retry for all possible response codes, the artifact upload is pretty much complete so it at all costs we should try to finish this
387
+ const response = yield requestUtils_1.retryHttpClientRequest('Finalize artifact upload', () => __awaiter(this, void 0, void 0, function* () { return client.patch(resourceUrl.toString(), data, headers); }), customErrorMessages);
388
+ yield response.readBody();
389
+ core.debug(`Artifact ${artifactName} has been successfully uploaded, total size in bytes: ${size}`);
390
+ });
391
+ }
392
+ }
393
+ exports.UploadHttpClient = UploadHttpClient;
379
394
  //# sourceMappingURL=upload-http-client.js.map