@backstage/plugin-techdocs-node 1.2.0-next.2 → 1.2.1-next.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +48 -0
- package/dist/index.cjs.js +634 -246
- package/dist/index.cjs.js.map +1 -1
- package/package.json +8 -8
package/dist/index.cjs.js
CHANGED
|
@@ -74,7 +74,9 @@ const lowerCaseEntityTripletInStoragePath = (originalPath) => {
|
|
|
74
74
|
parts.shift();
|
|
75
75
|
}
|
|
76
76
|
if (parts.length <= 3) {
|
|
77
|
-
throw new Error(
|
|
77
|
+
throw new Error(
|
|
78
|
+
`Encountered file unmanaged by TechDocs ${originalPath}. Skipping.`
|
|
79
|
+
);
|
|
78
80
|
}
|
|
79
81
|
return lowerCaseEntityTriplet(parts.join(path__default["default"].posix.sep));
|
|
80
82
|
};
|
|
@@ -156,7 +158,10 @@ const getRepoUrlFromLocationAnnotation = (parsedLocationAnnotation, scmIntegrati
|
|
|
156
158
|
url: `./${docsFolder}`,
|
|
157
159
|
base: target
|
|
158
160
|
});
|
|
159
|
-
return {
|
|
161
|
+
return {
|
|
162
|
+
repo_url: target,
|
|
163
|
+
edit_uri: integration.resolveEditUrl(sourceFolder)
|
|
164
|
+
};
|
|
160
165
|
}
|
|
161
166
|
}
|
|
162
167
|
return {};
|
|
@@ -202,7 +207,10 @@ const getMkdocsYml = async (inputDir) => {
|
|
|
202
207
|
mkdocsYmlPath = path__default["default"].join(inputDir, "mkdocs.yml");
|
|
203
208
|
mkdocsYmlFileString = await fs__default["default"].readFile(mkdocsYmlPath, "utf8");
|
|
204
209
|
} catch (error) {
|
|
205
|
-
throw new errors.ForwardedError(
|
|
210
|
+
throw new errors.ForwardedError(
|
|
211
|
+
"Could not read MkDocs YAML config file mkdocs.yml or mkdocs.yaml for validation",
|
|
212
|
+
error
|
|
213
|
+
);
|
|
206
214
|
}
|
|
207
215
|
}
|
|
208
216
|
return {
|
|
@@ -219,8 +227,10 @@ const validateMkdocsYaml = async (inputDir, mkdocsYmlFileString) => {
|
|
|
219
227
|
}
|
|
220
228
|
const parsedMkdocsYml = mkdocsYml;
|
|
221
229
|
if (parsedMkdocsYml.docs_dir && !backendCommon.isChildPath(inputDir, path.resolve(inputDir, parsedMkdocsYml.docs_dir))) {
|
|
222
|
-
throw new Error(
|
|
223
|
-
|
|
230
|
+
throw new Error(
|
|
231
|
+
`docs_dir configuration value in mkdocs can't be an absolute directory or start with ../ for security reasons.
|
|
232
|
+
Use relative paths instead which are resolved relative to your mkdocs.yml file location.`
|
|
233
|
+
);
|
|
224
234
|
}
|
|
225
235
|
return parsedMkdocsYml.docs_dir;
|
|
226
236
|
};
|
|
@@ -250,10 +260,12 @@ const patchIndexPreBuild = async ({
|
|
|
250
260
|
logger.warn(`${path__default["default"].relative(inputDir, filePath)} not found.`);
|
|
251
261
|
}
|
|
252
262
|
}
|
|
253
|
-
logger.warn(
|
|
254
|
-
|
|
255
|
-
|
|
256
|
-
|
|
263
|
+
logger.warn(
|
|
264
|
+
`Could not find any techdocs' index file. Please make sure at least one of ${[
|
|
265
|
+
indexMdPath,
|
|
266
|
+
...fallbacks
|
|
267
|
+
].join(" ")} exists.`
|
|
268
|
+
);
|
|
257
269
|
};
|
|
258
270
|
const createOrUpdateMetadata = async (techdocsMetadataPath, logger) => {
|
|
259
271
|
const techdocsMetadataDir = techdocsMetadataPath.split(path__default["default"].sep).slice(0, -1).join(path__default["default"].sep);
|
|
@@ -273,7 +285,9 @@ const createOrUpdateMetadata = async (techdocsMetadataPath, logger) => {
|
|
|
273
285
|
}
|
|
274
286
|
json.build_timestamp = Date.now();
|
|
275
287
|
try {
|
|
276
|
-
json.files = (await getFileTreeRecursively(techdocsMetadataDir)).map(
|
|
288
|
+
json.files = (await getFileTreeRecursively(techdocsMetadataDir)).map(
|
|
289
|
+
(file) => file.replace(`${techdocsMetadataDir}${path__default["default"].sep}`, "")
|
|
290
|
+
);
|
|
277
291
|
} catch (err) {
|
|
278
292
|
errors.assertError(err);
|
|
279
293
|
json.files = [];
|
|
@@ -295,7 +309,9 @@ const patchMkdocsFile = async (mkdocsYmlPath, logger, updateAction) => {
|
|
|
295
309
|
mkdocsYmlFileString = await fs__default["default"].readFile(mkdocsYmlPath, "utf8");
|
|
296
310
|
} catch (error) {
|
|
297
311
|
errors.assertError(error);
|
|
298
|
-
logger.warn(
|
|
312
|
+
logger.warn(
|
|
313
|
+
`Could not read MkDocs YAML config file ${mkdocsYmlPath} before running the generator: ${error.message}`
|
|
314
|
+
);
|
|
299
315
|
return;
|
|
300
316
|
}
|
|
301
317
|
let mkdocsYml;
|
|
@@ -306,28 +322,44 @@ const patchMkdocsFile = async (mkdocsYmlPath, logger, updateAction) => {
|
|
|
306
322
|
}
|
|
307
323
|
} catch (error) {
|
|
308
324
|
errors.assertError(error);
|
|
309
|
-
logger.warn(
|
|
325
|
+
logger.warn(
|
|
326
|
+
`Error in parsing YAML at ${mkdocsYmlPath} before running the generator. ${error.message}`
|
|
327
|
+
);
|
|
310
328
|
return;
|
|
311
329
|
}
|
|
312
330
|
didEdit = updateAction(mkdocsYml);
|
|
313
331
|
try {
|
|
314
332
|
if (didEdit) {
|
|
315
|
-
await fs__default["default"].writeFile(
|
|
333
|
+
await fs__default["default"].writeFile(
|
|
334
|
+
mkdocsYmlPath,
|
|
335
|
+
yaml__default["default"].dump(mkdocsYml, { schema: MKDOCS_SCHEMA }),
|
|
336
|
+
"utf8"
|
|
337
|
+
);
|
|
316
338
|
}
|
|
317
339
|
} catch (error) {
|
|
318
340
|
errors.assertError(error);
|
|
319
|
-
logger.warn(
|
|
341
|
+
logger.warn(
|
|
342
|
+
`Could not write to ${mkdocsYmlPath} after updating it before running the generator. ${error.message}`
|
|
343
|
+
);
|
|
320
344
|
return;
|
|
321
345
|
}
|
|
322
346
|
};
|
|
323
347
|
const patchMkdocsYmlPreBuild = async (mkdocsYmlPath, logger, parsedLocationAnnotation, scmIntegrations) => {
|
|
324
348
|
await patchMkdocsFile(mkdocsYmlPath, logger, (mkdocsYml) => {
|
|
325
349
|
if (!("repo_url" in mkdocsYml) && !("edit_uri" in mkdocsYml)) {
|
|
326
|
-
const result = getRepoUrlFromLocationAnnotation(
|
|
350
|
+
const result = getRepoUrlFromLocationAnnotation(
|
|
351
|
+
parsedLocationAnnotation,
|
|
352
|
+
scmIntegrations,
|
|
353
|
+
mkdocsYml.docs_dir
|
|
354
|
+
);
|
|
327
355
|
if (result.repo_url || result.edit_uri) {
|
|
328
356
|
mkdocsYml.repo_url = result.repo_url;
|
|
329
357
|
mkdocsYml.edit_uri = result.edit_uri;
|
|
330
|
-
logger.info(
|
|
358
|
+
logger.info(
|
|
359
|
+
`Set ${JSON.stringify(
|
|
360
|
+
result
|
|
361
|
+
)}. You can disable this feature by manually setting 'repo_url' or 'edit_uri' according to the MkDocs documentation at https://www.mkdocs.org/user-guide/configuration/#repo_url`
|
|
362
|
+
);
|
|
331
363
|
return true;
|
|
332
364
|
}
|
|
333
365
|
}
|
|
@@ -378,7 +410,12 @@ const _TechdocsGenerator = class {
|
|
|
378
410
|
const { path: mkdocsYmlPath, content } = await getMkdocsYml(inputDir);
|
|
379
411
|
const docsDir = await validateMkdocsYaml(inputDir, content);
|
|
380
412
|
if (parsedLocationAnnotation) {
|
|
381
|
-
await patchMkdocsYmlPreBuild(
|
|
413
|
+
await patchMkdocsYmlPreBuild(
|
|
414
|
+
mkdocsYmlPath,
|
|
415
|
+
childLogger,
|
|
416
|
+
parsedLocationAnnotation,
|
|
417
|
+
this.scmIntegrations
|
|
418
|
+
);
|
|
382
419
|
}
|
|
383
420
|
if (this.options.legacyCopyReadmeMdToIndexMd) {
|
|
384
421
|
await patchIndexPreBuild({ inputDir, logger: childLogger, docsDir });
|
|
@@ -401,7 +438,9 @@ const _TechdocsGenerator = class {
|
|
|
401
438
|
},
|
|
402
439
|
logStream
|
|
403
440
|
});
|
|
404
|
-
childLogger.info(
|
|
441
|
+
childLogger.info(
|
|
442
|
+
`Successfully generated docs from ${inputDir} into ${outputDir} using local mkdocs`
|
|
443
|
+
);
|
|
405
444
|
break;
|
|
406
445
|
case "docker":
|
|
407
446
|
await this.containerRunner.runContainer({
|
|
@@ -413,18 +452,33 @@ const _TechdocsGenerator = class {
|
|
|
413
452
|
envVars: { HOME: "/tmp" },
|
|
414
453
|
pullImage: this.options.pullImage
|
|
415
454
|
});
|
|
416
|
-
childLogger.info(
|
|
455
|
+
childLogger.info(
|
|
456
|
+
`Successfully generated docs from ${inputDir} into ${outputDir} using techdocs-container`
|
|
457
|
+
);
|
|
417
458
|
break;
|
|
418
459
|
default:
|
|
419
|
-
throw new Error(
|
|
460
|
+
throw new Error(
|
|
461
|
+
`Invalid config value "${this.options.runIn}" provided in 'techdocs.generators.techdocs'.`
|
|
462
|
+
);
|
|
420
463
|
}
|
|
421
464
|
} catch (error) {
|
|
422
|
-
this.logger.debug(
|
|
423
|
-
|
|
424
|
-
|
|
425
|
-
|
|
465
|
+
this.logger.debug(
|
|
466
|
+
`Failed to generate docs from ${inputDir} into ${outputDir}`
|
|
467
|
+
);
|
|
468
|
+
throw new errors.ForwardedError(
|
|
469
|
+
`Failed to generate docs from ${inputDir} into ${outputDir}`,
|
|
470
|
+
error
|
|
471
|
+
);
|
|
472
|
+
}
|
|
473
|
+
await createOrUpdateMetadata(
|
|
474
|
+
path__default["default"].join(outputDir, "techdocs_metadata.json"),
|
|
475
|
+
childLogger
|
|
476
|
+
);
|
|
426
477
|
if (etag) {
|
|
427
|
-
await storeEtagMetadata(
|
|
478
|
+
await storeEtagMetadata(
|
|
479
|
+
path__default["default"].join(outputDir, "techdocs_metadata.json"),
|
|
480
|
+
etag
|
|
481
|
+
);
|
|
428
482
|
}
|
|
429
483
|
}
|
|
430
484
|
};
|
|
@@ -432,16 +486,24 @@ let TechdocsGenerator = _TechdocsGenerator;
|
|
|
432
486
|
TechdocsGenerator.defaultDockerImage = "spotify/techdocs:v1.0.3";
|
|
433
487
|
function readGeneratorConfig(config, logger) {
|
|
434
488
|
var _a;
|
|
435
|
-
const legacyGeneratorType = config.getOptionalString(
|
|
489
|
+
const legacyGeneratorType = config.getOptionalString(
|
|
490
|
+
"techdocs.generators.techdocs"
|
|
491
|
+
);
|
|
436
492
|
if (legacyGeneratorType) {
|
|
437
|
-
logger.warn(
|
|
493
|
+
logger.warn(
|
|
494
|
+
`The 'techdocs.generators.techdocs' configuration key is deprecated and will be removed in the future. Please use 'techdocs.generator' instead. See here https://backstage.io/docs/features/techdocs/configuration`
|
|
495
|
+
);
|
|
438
496
|
}
|
|
439
497
|
return {
|
|
440
498
|
runIn: (_a = legacyGeneratorType != null ? legacyGeneratorType : config.getOptionalString("techdocs.generator.runIn")) != null ? _a : "docker",
|
|
441
499
|
dockerImage: config.getOptionalString("techdocs.generator.dockerImage"),
|
|
442
500
|
pullImage: config.getOptionalBoolean("techdocs.generator.pullImage"),
|
|
443
|
-
omitTechdocsCoreMkdocsPlugin: config.getOptionalBoolean(
|
|
444
|
-
|
|
501
|
+
omitTechdocsCoreMkdocsPlugin: config.getOptionalBoolean(
|
|
502
|
+
"techdocs.generator.mkdocs.omitTechdocsCorePlugin"
|
|
503
|
+
),
|
|
504
|
+
legacyCopyReadmeMdToIndexMd: config.getOptionalBoolean(
|
|
505
|
+
"techdocs.generator.mkdocs.legacyCopyReadmeMdToIndexMd"
|
|
506
|
+
)
|
|
445
507
|
};
|
|
446
508
|
}
|
|
447
509
|
|
|
@@ -472,7 +534,9 @@ const parseReferenceAnnotation = (annotationName, entity) => {
|
|
|
472
534
|
var _a;
|
|
473
535
|
const annotation = (_a = entity.metadata.annotations) == null ? void 0 : _a[annotationName];
|
|
474
536
|
if (!annotation) {
|
|
475
|
-
throw new errors.InputError(
|
|
537
|
+
throw new errors.InputError(
|
|
538
|
+
`No location annotation provided in entity: ${entity.metadata.name}`
|
|
539
|
+
);
|
|
476
540
|
}
|
|
477
541
|
const { type, target } = catalogModel.parseLocationRef(annotation);
|
|
478
542
|
return {
|
|
@@ -494,7 +558,10 @@ const transformDirLocation = (entity, dirAnnotation, scmIntegrations) => {
|
|
|
494
558
|
};
|
|
495
559
|
}
|
|
496
560
|
case "file": {
|
|
497
|
-
const target = backendCommon.resolveSafeChildPath(
|
|
561
|
+
const target = backendCommon.resolveSafeChildPath(
|
|
562
|
+
path__default["default"].dirname(location.target),
|
|
563
|
+
dirAnnotation.target
|
|
564
|
+
);
|
|
498
565
|
return {
|
|
499
566
|
type: "dir",
|
|
500
567
|
target
|
|
@@ -505,7 +572,10 @@ const transformDirLocation = (entity, dirAnnotation, scmIntegrations) => {
|
|
|
505
572
|
}
|
|
506
573
|
};
|
|
507
574
|
const getLocationForEntity = (entity, scmIntegration) => {
|
|
508
|
-
const annotation = parseReferenceAnnotation(
|
|
575
|
+
const annotation = parseReferenceAnnotation(
|
|
576
|
+
"backstage.io/techdocs-ref",
|
|
577
|
+
entity
|
|
578
|
+
);
|
|
509
579
|
switch (annotation.type) {
|
|
510
580
|
case "url":
|
|
511
581
|
return annotation;
|
|
@@ -517,7 +587,10 @@ const getLocationForEntity = (entity, scmIntegration) => {
|
|
|
517
587
|
};
|
|
518
588
|
const getDocFilesFromRepository = async (reader, entity, opts) => {
|
|
519
589
|
var _a, _b;
|
|
520
|
-
const { target } = parseReferenceAnnotation(
|
|
590
|
+
const { target } = parseReferenceAnnotation(
|
|
591
|
+
"backstage.io/techdocs-ref",
|
|
592
|
+
entity
|
|
593
|
+
);
|
|
521
594
|
(_a = opts == null ? void 0 : opts.logger) == null ? void 0 : _a.debug(`Reading files from ${target}`);
|
|
522
595
|
const readTreeResponse = await reader.readTree(target, { etag: opts == null ? void 0 : opts.etag });
|
|
523
596
|
const preparedDir = await readTreeResponse.dir();
|
|
@@ -538,8 +611,15 @@ class DirectoryPreparer {
|
|
|
538
611
|
}
|
|
539
612
|
async prepare(entity, options) {
|
|
540
613
|
var _a, _b;
|
|
541
|
-
const annotation = parseReferenceAnnotation(
|
|
542
|
-
|
|
614
|
+
const annotation = parseReferenceAnnotation(
|
|
615
|
+
"backstage.io/techdocs-ref",
|
|
616
|
+
entity
|
|
617
|
+
);
|
|
618
|
+
const { type, target } = transformDirLocation(
|
|
619
|
+
entity,
|
|
620
|
+
annotation,
|
|
621
|
+
this.scmIntegrations
|
|
622
|
+
);
|
|
543
623
|
switch (type) {
|
|
544
624
|
case "url": {
|
|
545
625
|
(_a = options == null ? void 0 : options.logger) == null ? void 0 : _a.debug(`Reading files from ${target}`);
|
|
@@ -584,7 +664,9 @@ class UrlPreparer {
|
|
|
584
664
|
if (error.name === "NotModifiedError") {
|
|
585
665
|
this.logger.debug(`Cache is valid for etag ${options == null ? void 0 : options.etag}`);
|
|
586
666
|
} else {
|
|
587
|
-
this.logger.debug(
|
|
667
|
+
this.logger.debug(
|
|
668
|
+
`Unable to fetch files for building docs ${error.message}`
|
|
669
|
+
);
|
|
588
670
|
}
|
|
589
671
|
throw error;
|
|
590
672
|
}
|
|
@@ -610,7 +692,10 @@ class Preparers {
|
|
|
610
692
|
this.preparerMap.set(protocol, preparer);
|
|
611
693
|
}
|
|
612
694
|
get(entity) {
|
|
613
|
-
const { type } = parseReferenceAnnotation(
|
|
695
|
+
const { type } = parseReferenceAnnotation(
|
|
696
|
+
"backstage.io/techdocs-ref",
|
|
697
|
+
entity
|
|
698
|
+
);
|
|
614
699
|
const preparer = this.preparerMap.get(type);
|
|
615
700
|
if (!preparer) {
|
|
616
701
|
throw new Error(`No preparer registered for type: "${type}"`);
|
|
@@ -624,7 +709,10 @@ const streamToBuffer$1 = (stream) => {
|
|
|
624
709
|
try {
|
|
625
710
|
const chunks = [];
|
|
626
711
|
stream.on("data", (chunk) => chunks.push(chunk));
|
|
627
|
-
stream.on(
|
|
712
|
+
stream.on(
|
|
713
|
+
"error",
|
|
714
|
+
(e) => reject(new errors.ForwardedError("Unable to read stream", e))
|
|
715
|
+
);
|
|
628
716
|
stream.on("end", () => resolve(Buffer.concat(chunks)));
|
|
629
717
|
} catch (e) {
|
|
630
718
|
throw new errors.ForwardedError("Unable to parse the response data", e);
|
|
@@ -645,22 +733,34 @@ class AwsS3Publish {
|
|
|
645
733
|
try {
|
|
646
734
|
bucketName = config.getString("techdocs.publisher.awsS3.bucketName");
|
|
647
735
|
} catch (error) {
|
|
648
|
-
throw new Error(
|
|
736
|
+
throw new Error(
|
|
737
|
+
"Since techdocs.publisher.type is set to 'awsS3' in your app config, techdocs.publisher.awsS3.bucketName is required."
|
|
738
|
+
);
|
|
649
739
|
}
|
|
650
|
-
const bucketRootPath = normalizeExternalStorageRootPath(
|
|
740
|
+
const bucketRootPath = normalizeExternalStorageRootPath(
|
|
741
|
+
config.getOptionalString("techdocs.publisher.awsS3.bucketRootPath") || ""
|
|
742
|
+
);
|
|
651
743
|
const sse = config.getOptionalString("techdocs.publisher.awsS3.sse");
|
|
652
|
-
const credentialsConfig = config.getOptionalConfig(
|
|
744
|
+
const credentialsConfig = config.getOptionalConfig(
|
|
745
|
+
"techdocs.publisher.awsS3.credentials"
|
|
746
|
+
);
|
|
653
747
|
const credentials = AwsS3Publish.buildCredentials(credentialsConfig);
|
|
654
748
|
const region = config.getOptionalString("techdocs.publisher.awsS3.region");
|
|
655
|
-
const endpoint = config.getOptionalString(
|
|
656
|
-
|
|
749
|
+
const endpoint = config.getOptionalString(
|
|
750
|
+
"techdocs.publisher.awsS3.endpoint"
|
|
751
|
+
);
|
|
752
|
+
const s3ForcePathStyle = config.getOptionalBoolean(
|
|
753
|
+
"techdocs.publisher.awsS3.s3ForcePathStyle"
|
|
754
|
+
);
|
|
657
755
|
const storageClient = new aws__default["default"].S3({
|
|
658
756
|
credentials,
|
|
659
757
|
...region && { region },
|
|
660
758
|
...endpoint && { endpoint },
|
|
661
759
|
...s3ForcePathStyle && { s3ForcePathStyle }
|
|
662
760
|
});
|
|
663
|
-
const legacyPathCasing = config.getOptionalBoolean(
|
|
761
|
+
const legacyPathCasing = config.getOptionalBoolean(
|
|
762
|
+
"techdocs.legacyUseCaseSensitiveTripletPaths"
|
|
763
|
+
) || false;
|
|
664
764
|
return new AwsS3Publish({
|
|
665
765
|
storageClient,
|
|
666
766
|
bucketName,
|
|
@@ -698,10 +798,14 @@ class AwsS3Publish {
|
|
|
698
798
|
async getReadiness() {
|
|
699
799
|
try {
|
|
700
800
|
await this.storageClient.headBucket({ Bucket: this.bucketName }).promise();
|
|
701
|
-
this.logger.info(
|
|
801
|
+
this.logger.info(
|
|
802
|
+
`Successfully connected to the AWS S3 bucket ${this.bucketName}.`
|
|
803
|
+
);
|
|
702
804
|
return { isAvailable: true };
|
|
703
805
|
} catch (error) {
|
|
704
|
-
this.logger.error(
|
|
806
|
+
this.logger.error(
|
|
807
|
+
`Could not retrieve metadata about the AWS S3 bucket ${this.bucketName}. Make sure the bucket exists. Also make sure that authentication is setup either by explicitly defining credentials and region in techdocs.publisher.awsS3 in app config or by using environment variables. Refer to https://backstage.io/docs/features/techdocs/using-cloud-storage`
|
|
808
|
+
);
|
|
705
809
|
this.logger.error(`from AWS client library`, error);
|
|
706
810
|
return {
|
|
707
811
|
isAvailable: false
|
|
@@ -718,45 +822,76 @@ class AwsS3Publish {
|
|
|
718
822
|
const sse = this.sse;
|
|
719
823
|
let existingFiles = [];
|
|
720
824
|
try {
|
|
721
|
-
const remoteFolder = getCloudPathForLocalPath(
|
|
825
|
+
const remoteFolder = getCloudPathForLocalPath(
|
|
826
|
+
entity,
|
|
827
|
+
void 0,
|
|
828
|
+
useLegacyPathCasing,
|
|
829
|
+
bucketRootPath
|
|
830
|
+
);
|
|
722
831
|
existingFiles = await this.getAllObjectsFromBucket({
|
|
723
832
|
prefix: remoteFolder
|
|
724
833
|
});
|
|
725
834
|
} catch (e) {
|
|
726
835
|
errors.assertError(e);
|
|
727
|
-
this.logger.error(
|
|
836
|
+
this.logger.error(
|
|
837
|
+
`Unable to list files for Entity ${entity.metadata.name}: ${e.message}`
|
|
838
|
+
);
|
|
728
839
|
}
|
|
729
840
|
let absoluteFilesToUpload;
|
|
730
841
|
try {
|
|
731
842
|
absoluteFilesToUpload = await getFileTreeRecursively(directory);
|
|
732
|
-
await bulkStorageOperation(
|
|
733
|
-
|
|
734
|
-
|
|
735
|
-
|
|
736
|
-
|
|
737
|
-
|
|
738
|
-
|
|
739
|
-
|
|
740
|
-
|
|
741
|
-
|
|
742
|
-
|
|
743
|
-
|
|
744
|
-
|
|
843
|
+
await bulkStorageOperation(
|
|
844
|
+
async (absoluteFilePath) => {
|
|
845
|
+
const relativeFilePath = path__default["default"].relative(directory, absoluteFilePath);
|
|
846
|
+
const fileStream = fs__default["default"].createReadStream(absoluteFilePath);
|
|
847
|
+
const params = {
|
|
848
|
+
Bucket: this.bucketName,
|
|
849
|
+
Key: getCloudPathForLocalPath(
|
|
850
|
+
entity,
|
|
851
|
+
relativeFilePath,
|
|
852
|
+
useLegacyPathCasing,
|
|
853
|
+
bucketRootPath
|
|
854
|
+
),
|
|
855
|
+
Body: fileStream,
|
|
856
|
+
...sse && { ServerSideEncryption: sse }
|
|
857
|
+
};
|
|
858
|
+
objects.push(params.Key);
|
|
859
|
+
return this.storageClient.upload(params).promise();
|
|
860
|
+
},
|
|
861
|
+
absoluteFilesToUpload,
|
|
862
|
+
{ concurrencyLimit: 10 }
|
|
863
|
+
);
|
|
864
|
+
this.logger.info(
|
|
865
|
+
`Successfully uploaded all the generated files for Entity ${entity.metadata.name}. Total number of files: ${absoluteFilesToUpload.length}`
|
|
866
|
+
);
|
|
745
867
|
} catch (e) {
|
|
746
868
|
const errorMessage = `Unable to upload file(s) to AWS S3. ${e}`;
|
|
747
869
|
this.logger.error(errorMessage);
|
|
748
870
|
throw new Error(errorMessage);
|
|
749
871
|
}
|
|
750
872
|
try {
|
|
751
|
-
const relativeFilesToUpload = absoluteFilesToUpload.map(
|
|
873
|
+
const relativeFilesToUpload = absoluteFilesToUpload.map(
|
|
874
|
+
(absoluteFilePath) => getCloudPathForLocalPath(
|
|
875
|
+
entity,
|
|
876
|
+
path__default["default"].relative(directory, absoluteFilePath),
|
|
877
|
+
useLegacyPathCasing,
|
|
878
|
+
bucketRootPath
|
|
879
|
+
)
|
|
880
|
+
);
|
|
752
881
|
const staleFiles = getStaleFiles(relativeFilesToUpload, existingFiles);
|
|
753
|
-
await bulkStorageOperation(
|
|
754
|
-
|
|
755
|
-
|
|
756
|
-
|
|
757
|
-
|
|
758
|
-
|
|
759
|
-
|
|
882
|
+
await bulkStorageOperation(
|
|
883
|
+
async (relativeFilePath) => {
|
|
884
|
+
return await this.storageClient.deleteObject({
|
|
885
|
+
Bucket: this.bucketName,
|
|
886
|
+
Key: relativeFilePath
|
|
887
|
+
}).promise();
|
|
888
|
+
},
|
|
889
|
+
staleFiles,
|
|
890
|
+
{ concurrencyLimit: 10 }
|
|
891
|
+
);
|
|
892
|
+
this.logger.info(
|
|
893
|
+
`Successfully deleted stale files for Entity ${entity.metadata.name}. Total number of files: ${staleFiles.length}`
|
|
894
|
+
);
|
|
760
895
|
} catch (error) {
|
|
761
896
|
const errorMessage = `Unable to delete file(s) from AWS S3. ${error}`;
|
|
762
897
|
this.logger.error(errorMessage);
|
|
@@ -776,9 +911,13 @@ class AwsS3Publish {
|
|
|
776
911
|
try {
|
|
777
912
|
const techdocsMetadataJson = await streamToBuffer$1(stream);
|
|
778
913
|
if (!techdocsMetadataJson) {
|
|
779
|
-
throw new Error(
|
|
914
|
+
throw new Error(
|
|
915
|
+
`Unable to parse the techdocs metadata file ${entityRootDir}/techdocs_metadata.json.`
|
|
916
|
+
);
|
|
780
917
|
}
|
|
781
|
-
const techdocsMetadata = JSON5__default["default"].parse(
|
|
918
|
+
const techdocsMetadata = JSON5__default["default"].parse(
|
|
919
|
+
techdocsMetadataJson.toString("utf-8")
|
|
920
|
+
);
|
|
782
921
|
resolve(techdocsMetadata);
|
|
783
922
|
} catch (err) {
|
|
784
923
|
errors.assertError(err);
|
|
@@ -800,13 +939,17 @@ class AwsS3Publish {
|
|
|
800
939
|
const responseHeaders = getHeadersForFileExtension(fileExtension);
|
|
801
940
|
const stream = this.storageClient.getObject({ Bucket: this.bucketName, Key: filePath }).createReadStream();
|
|
802
941
|
try {
|
|
803
|
-
for (const [headerKey, headerValue] of Object.entries(
|
|
942
|
+
for (const [headerKey, headerValue] of Object.entries(
|
|
943
|
+
responseHeaders
|
|
944
|
+
)) {
|
|
804
945
|
res.setHeader(headerKey, headerValue);
|
|
805
946
|
}
|
|
806
947
|
res.send(await streamToBuffer$1(stream));
|
|
807
948
|
} catch (err) {
|
|
808
949
|
errors.assertError(err);
|
|
809
|
-
this.logger.warn(
|
|
950
|
+
this.logger.warn(
|
|
951
|
+
`TechDocs S3 router failed to serve static files from bucket ${this.bucketName} at key ${filePath}: ${err.message}`
|
|
952
|
+
);
|
|
810
953
|
res.status(404).send("File Not Found");
|
|
811
954
|
}
|
|
812
955
|
};
|
|
@@ -831,36 +974,40 @@ class AwsS3Publish {
|
|
|
831
974
|
}) {
|
|
832
975
|
const allObjects = await this.getAllObjectsFromBucket();
|
|
833
976
|
const limiter = createLimiter__default["default"](concurrency);
|
|
834
|
-
await Promise.all(
|
|
835
|
-
|
|
836
|
-
|
|
837
|
-
|
|
838
|
-
|
|
839
|
-
|
|
840
|
-
|
|
841
|
-
|
|
842
|
-
|
|
843
|
-
|
|
844
|
-
|
|
845
|
-
|
|
846
|
-
|
|
847
|
-
|
|
848
|
-
|
|
849
|
-
|
|
850
|
-
|
|
851
|
-
|
|
852
|
-
|
|
853
|
-
|
|
854
|
-
|
|
855
|
-
|
|
856
|
-
|
|
857
|
-
|
|
858
|
-
|
|
859
|
-
|
|
860
|
-
|
|
861
|
-
|
|
862
|
-
|
|
863
|
-
|
|
977
|
+
await Promise.all(
|
|
978
|
+
allObjects.map(
|
|
979
|
+
(f) => limiter(async (file) => {
|
|
980
|
+
let newPath;
|
|
981
|
+
try {
|
|
982
|
+
newPath = lowerCaseEntityTripletInStoragePath(file);
|
|
983
|
+
} catch (e) {
|
|
984
|
+
errors.assertError(e);
|
|
985
|
+
this.logger.warn(e.message);
|
|
986
|
+
return;
|
|
987
|
+
}
|
|
988
|
+
if (file === newPath) {
|
|
989
|
+
return;
|
|
990
|
+
}
|
|
991
|
+
try {
|
|
992
|
+
this.logger.verbose(`Migrating ${file}`);
|
|
993
|
+
await this.storageClient.copyObject({
|
|
994
|
+
Bucket: this.bucketName,
|
|
995
|
+
CopySource: [this.bucketName, file].join("/"),
|
|
996
|
+
Key: newPath
|
|
997
|
+
}).promise();
|
|
998
|
+
if (removeOriginal) {
|
|
999
|
+
await this.storageClient.deleteObject({
|
|
1000
|
+
Bucket: this.bucketName,
|
|
1001
|
+
Key: file
|
|
1002
|
+
}).promise();
|
|
1003
|
+
}
|
|
1004
|
+
} catch (e) {
|
|
1005
|
+
errors.assertError(e);
|
|
1006
|
+
this.logger.warn(`Unable to migrate ${file}: ${e.message}`);
|
|
1007
|
+
}
|
|
1008
|
+
}, f)
|
|
1009
|
+
)
|
|
1010
|
+
);
|
|
864
1011
|
}
|
|
865
1012
|
async getAllObjectsFromBucket({ prefix } = { prefix: "" }) {
|
|
866
1013
|
const objects = [];
|
|
@@ -872,7 +1019,9 @@ class AwsS3Publish {
|
|
|
872
1019
|
ContinuationToken: nextContinuation,
|
|
873
1020
|
...prefix ? { Prefix: prefix } : {}
|
|
874
1021
|
}).promise();
|
|
875
|
-
objects.push(
|
|
1022
|
+
objects.push(
|
|
1023
|
+
...(allObjects.Contents || []).map((f) => f.Key || "").filter((f) => !!f)
|
|
1024
|
+
);
|
|
876
1025
|
nextContinuation = allObjects.NextContinuationToken;
|
|
877
1026
|
} while (nextContinuation);
|
|
878
1027
|
return objects;
|
|
@@ -890,25 +1039,40 @@ class AzureBlobStoragePublish {
|
|
|
890
1039
|
static fromConfig(config, logger) {
|
|
891
1040
|
let containerName = "";
|
|
892
1041
|
try {
|
|
893
|
-
containerName = config.getString(
|
|
1042
|
+
containerName = config.getString(
|
|
1043
|
+
"techdocs.publisher.azureBlobStorage.containerName"
|
|
1044
|
+
);
|
|
894
1045
|
} catch (error) {
|
|
895
|
-
throw new Error(
|
|
1046
|
+
throw new Error(
|
|
1047
|
+
"Since techdocs.publisher.type is set to 'azureBlobStorage' in your app config, techdocs.publisher.azureBlobStorage.containerName is required."
|
|
1048
|
+
);
|
|
896
1049
|
}
|
|
897
1050
|
let accountName = "";
|
|
898
1051
|
try {
|
|
899
|
-
accountName = config.getString(
|
|
1052
|
+
accountName = config.getString(
|
|
1053
|
+
"techdocs.publisher.azureBlobStorage.credentials.accountName"
|
|
1054
|
+
);
|
|
900
1055
|
} catch (error) {
|
|
901
|
-
throw new Error(
|
|
1056
|
+
throw new Error(
|
|
1057
|
+
"Since techdocs.publisher.type is set to 'azureBlobStorage' in your app config, techdocs.publisher.azureBlobStorage.credentials.accountName is required."
|
|
1058
|
+
);
|
|
902
1059
|
}
|
|
903
|
-
const accountKey = config.getOptionalString(
|
|
1060
|
+
const accountKey = config.getOptionalString(
|
|
1061
|
+
"techdocs.publisher.azureBlobStorage.credentials.accountKey"
|
|
1062
|
+
);
|
|
904
1063
|
let credential;
|
|
905
1064
|
if (accountKey) {
|
|
906
1065
|
credential = new storageBlob.StorageSharedKeyCredential(accountName, accountKey);
|
|
907
1066
|
} else {
|
|
908
1067
|
credential = new identity.DefaultAzureCredential();
|
|
909
1068
|
}
|
|
910
|
-
const storageClient = new storageBlob.BlobServiceClient(
|
|
911
|
-
|
|
1069
|
+
const storageClient = new storageBlob.BlobServiceClient(
|
|
1070
|
+
`https://${accountName}.blob.core.windows.net`,
|
|
1071
|
+
credential
|
|
1072
|
+
);
|
|
1073
|
+
const legacyPathCasing = config.getOptionalBoolean(
|
|
1074
|
+
"techdocs.legacyUseCaseSensitiveTripletPaths"
|
|
1075
|
+
) || false;
|
|
912
1076
|
return new AzureBlobStoragePublish({
|
|
913
1077
|
storageClient,
|
|
914
1078
|
containerName,
|
|
@@ -925,13 +1089,17 @@ class AzureBlobStoragePublish {
|
|
|
925
1089
|
};
|
|
926
1090
|
}
|
|
927
1091
|
if (response._response.status >= 400) {
|
|
928
|
-
this.logger.error(
|
|
1092
|
+
this.logger.error(
|
|
1093
|
+
`Failed to retrieve metadata from ${response._response.request.url} with status code ${response._response.status}.`
|
|
1094
|
+
);
|
|
929
1095
|
}
|
|
930
1096
|
} catch (e) {
|
|
931
1097
|
errors.assertError(e);
|
|
932
1098
|
this.logger.error(`from Azure Blob Storage client library: ${e.message}`);
|
|
933
1099
|
}
|
|
934
|
-
this.logger.error(
|
|
1100
|
+
this.logger.error(
|
|
1101
|
+
`Could not retrieve metadata about the Azure Blob Storage container ${this.containerName}. Make sure that the Azure project and container exist and the access key is setup correctly techdocs.publisher.azureBlobStorage.credentials defined in app config has correct permissions. Refer to https://backstage.io/docs/features/techdocs/using-cloud-storage`
|
|
1102
|
+
);
|
|
935
1103
|
return { isAvailable: false };
|
|
936
1104
|
}
|
|
937
1105
|
async publish({
|
|
@@ -940,7 +1108,11 @@ class AzureBlobStoragePublish {
|
|
|
940
1108
|
}) {
|
|
941
1109
|
const objects = [];
|
|
942
1110
|
const useLegacyPathCasing = this.legacyPathCasing;
|
|
943
|
-
const remoteFolder = getCloudPathForLocalPath(
|
|
1111
|
+
const remoteFolder = getCloudPathForLocalPath(
|
|
1112
|
+
entity,
|
|
1113
|
+
void 0,
|
|
1114
|
+
useLegacyPathCasing
|
|
1115
|
+
);
|
|
944
1116
|
let existingFiles = [];
|
|
945
1117
|
try {
|
|
946
1118
|
existingFiles = await this.getAllBlobsFromContainer({
|
|
@@ -949,7 +1121,9 @@ class AzureBlobStoragePublish {
|
|
|
949
1121
|
});
|
|
950
1122
|
} catch (e) {
|
|
951
1123
|
errors.assertError(e);
|
|
952
|
-
this.logger.error(
|
|
1124
|
+
this.logger.error(
|
|
1125
|
+
`Unable to list files for Entity ${entity.metadata.name}: ${e.message}`
|
|
1126
|
+
);
|
|
953
1127
|
}
|
|
954
1128
|
let absoluteFilesToUpload;
|
|
955
1129
|
let container;
|
|
@@ -957,32 +1131,62 @@ class AzureBlobStoragePublish {
|
|
|
957
1131
|
absoluteFilesToUpload = await getFileTreeRecursively(directory);
|
|
958
1132
|
container = this.storageClient.getContainerClient(this.containerName);
|
|
959
1133
|
const failedOperations = [];
|
|
960
|
-
await bulkStorageOperation(
|
|
961
|
-
|
|
962
|
-
|
|
963
|
-
|
|
964
|
-
|
|
965
|
-
|
|
966
|
-
|
|
967
|
-
|
|
968
|
-
|
|
969
|
-
|
|
1134
|
+
await bulkStorageOperation(
|
|
1135
|
+
async (absoluteFilePath) => {
|
|
1136
|
+
const relativeFilePath = path__default["default"].normalize(
|
|
1137
|
+
path__default["default"].relative(directory, absoluteFilePath)
|
|
1138
|
+
);
|
|
1139
|
+
const remotePath = getCloudPathForLocalPath(
|
|
1140
|
+
entity,
|
|
1141
|
+
relativeFilePath,
|
|
1142
|
+
useLegacyPathCasing
|
|
1143
|
+
);
|
|
1144
|
+
objects.push(remotePath);
|
|
1145
|
+
const response = await container.getBlockBlobClient(remotePath).uploadFile(absoluteFilePath);
|
|
1146
|
+
if (response._response.status >= 400) {
|
|
1147
|
+
failedOperations.push(
|
|
1148
|
+
new Error(
|
|
1149
|
+
`Upload failed for ${absoluteFilePath} with status code ${response._response.status}`
|
|
1150
|
+
)
|
|
1151
|
+
);
|
|
1152
|
+
}
|
|
1153
|
+
return response;
|
|
1154
|
+
},
|
|
1155
|
+
absoluteFilesToUpload,
|
|
1156
|
+
{ concurrencyLimit: BATCH_CONCURRENCY }
|
|
1157
|
+
);
|
|
970
1158
|
if (failedOperations.length > 0) {
|
|
971
|
-
throw new Error(
|
|
1159
|
+
throw new Error(
|
|
1160
|
+
failedOperations.map((r) => r.message).filter(Boolean).join(" ")
|
|
1161
|
+
);
|
|
972
1162
|
}
|
|
973
|
-
this.logger.info(
|
|
1163
|
+
this.logger.info(
|
|
1164
|
+
`Successfully uploaded all the generated files for Entity ${entity.metadata.name}. Total number of files: ${absoluteFilesToUpload.length}`
|
|
1165
|
+
);
|
|
974
1166
|
} catch (e) {
|
|
975
1167
|
const errorMessage = `Unable to upload file(s) to Azure. ${e}`;
|
|
976
1168
|
this.logger.error(errorMessage);
|
|
977
1169
|
throw new Error(errorMessage);
|
|
978
1170
|
}
|
|
979
1171
|
try {
|
|
980
|
-
const relativeFilesToUpload = absoluteFilesToUpload.map(
|
|
1172
|
+
const relativeFilesToUpload = absoluteFilesToUpload.map(
|
|
1173
|
+
(absoluteFilePath) => getCloudPathForLocalPath(
|
|
1174
|
+
entity,
|
|
1175
|
+
path__default["default"].relative(directory, absoluteFilePath),
|
|
1176
|
+
useLegacyPathCasing
|
|
1177
|
+
)
|
|
1178
|
+
);
|
|
981
1179
|
const staleFiles = getStaleFiles(relativeFilesToUpload, existingFiles);
|
|
982
|
-
await bulkStorageOperation(
|
|
983
|
-
|
|
984
|
-
|
|
985
|
-
|
|
1180
|
+
await bulkStorageOperation(
|
|
1181
|
+
async (relativeFilePath) => {
|
|
1182
|
+
return await container.deleteBlob(relativeFilePath);
|
|
1183
|
+
},
|
|
1184
|
+
staleFiles,
|
|
1185
|
+
{ concurrencyLimit: BATCH_CONCURRENCY }
|
|
1186
|
+
);
|
|
1187
|
+
this.logger.info(
|
|
1188
|
+
`Successfully deleted stale files for Entity ${entity.metadata.name}. Total number of files: ${staleFiles.length}`
|
|
1189
|
+
);
|
|
986
1190
|
} catch (error) {
|
|
987
1191
|
const errorMessage = `Unable to delete file(s) from Azure. ${error}`;
|
|
988
1192
|
this.logger.error(errorMessage);
|
|
@@ -1010,11 +1214,18 @@ class AzureBlobStoragePublish {
|
|
|
1010
1214
|
const entityTriplet = `${entityName.namespace}/${entityName.kind}/${entityName.name}`;
|
|
1011
1215
|
const entityRootDir = this.legacyPathCasing ? entityTriplet : lowerCaseEntityTriplet(entityTriplet);
|
|
1012
1216
|
try {
|
|
1013
|
-
const techdocsMetadataJson = await this.download(
|
|
1217
|
+
const techdocsMetadataJson = await this.download(
|
|
1218
|
+
this.containerName,
|
|
1219
|
+
`${entityRootDir}/techdocs_metadata.json`
|
|
1220
|
+
);
|
|
1014
1221
|
if (!techdocsMetadataJson) {
|
|
1015
|
-
throw new Error(
|
|
1222
|
+
throw new Error(
|
|
1223
|
+
`Unable to parse the techdocs metadata file ${entityRootDir}/techdocs_metadata.json.`
|
|
1224
|
+
);
|
|
1016
1225
|
}
|
|
1017
|
-
const techdocsMetadata = JSON5__default["default"].parse(
|
|
1226
|
+
const techdocsMetadata = JSON5__default["default"].parse(
|
|
1227
|
+
techdocsMetadataJson.toString("utf-8")
|
|
1228
|
+
);
|
|
1018
1229
|
return techdocsMetadata;
|
|
1019
1230
|
} catch (e) {
|
|
1020
1231
|
throw new errors.ForwardedError("TechDocs metadata fetch failed", e);
|
|
@@ -1027,12 +1238,16 @@ class AzureBlobStoragePublish {
|
|
|
1027
1238
|
const fileExtension = path__default["default"].extname(filePath);
|
|
1028
1239
|
const responseHeaders = getHeadersForFileExtension(fileExtension);
|
|
1029
1240
|
this.download(this.containerName, filePath).then((fileContent) => {
|
|
1030
|
-
for (const [headerKey, headerValue] of Object.entries(
|
|
1241
|
+
for (const [headerKey, headerValue] of Object.entries(
|
|
1242
|
+
responseHeaders
|
|
1243
|
+
)) {
|
|
1031
1244
|
res.setHeader(headerKey, headerValue);
|
|
1032
1245
|
}
|
|
1033
1246
|
res.send(fileContent);
|
|
1034
1247
|
}).catch((e) => {
|
|
1035
|
-
this.logger.warn(
|
|
1248
|
+
this.logger.warn(
|
|
1249
|
+
`TechDocs Azure router failed to serve content from container ${this.containerName} at path ${filePath}: ${e.message}`
|
|
1250
|
+
);
|
|
1036
1251
|
res.status(404).send("File Not Found");
|
|
1037
1252
|
});
|
|
1038
1253
|
};
|
|
@@ -1079,7 +1294,13 @@ class AzureBlobStoragePublish {
|
|
|
1079
1294
|
const limiter = createLimiter__default["default"](concurrency);
|
|
1080
1295
|
const container = this.storageClient.getContainerClient(this.containerName);
|
|
1081
1296
|
for await (const blob of container.listBlobsFlat()) {
|
|
1082
|
-
promises.push(
|
|
1297
|
+
promises.push(
|
|
1298
|
+
limiter(
|
|
1299
|
+
this.renameBlobToLowerCase.bind(this),
|
|
1300
|
+
blob.name,
|
|
1301
|
+
removeOriginal
|
|
1302
|
+
)
|
|
1303
|
+
);
|
|
1083
1304
|
}
|
|
1084
1305
|
await Promise.all(promises);
|
|
1085
1306
|
}
|
|
@@ -1133,7 +1354,9 @@ class MigrateWriteStream extends stream.Writable {
|
|
|
1133
1354
|
}
|
|
1134
1355
|
const migrate = this.removeOriginal ? file.move.bind(file) : file.copy.bind(file);
|
|
1135
1356
|
this.logger.verbose(`Migrating ${file.name}`);
|
|
1136
|
-
migrate(newFile).catch(
|
|
1357
|
+
migrate(newFile).catch(
|
|
1358
|
+
(e) => this.logger.warn(`Unable to migrate ${file.name}: ${e.message}`)
|
|
1359
|
+
).finally(() => {
|
|
1137
1360
|
this.inFlight--;
|
|
1138
1361
|
if (shouldCallNext) {
|
|
1139
1362
|
next();
|
|
@@ -1155,16 +1378,24 @@ class GoogleGCSPublish {
|
|
|
1155
1378
|
try {
|
|
1156
1379
|
bucketName = config.getString("techdocs.publisher.googleGcs.bucketName");
|
|
1157
1380
|
} catch (error) {
|
|
1158
|
-
throw new Error(
|
|
1159
|
-
|
|
1160
|
-
|
|
1161
|
-
|
|
1381
|
+
throw new Error(
|
|
1382
|
+
"Since techdocs.publisher.type is set to 'googleGcs' in your app config, techdocs.publisher.googleGcs.bucketName is required."
|
|
1383
|
+
);
|
|
1384
|
+
}
|
|
1385
|
+
const bucketRootPath = normalizeExternalStorageRootPath(
|
|
1386
|
+
config.getOptionalString("techdocs.publisher.googleGcs.bucketRootPath") || ""
|
|
1387
|
+
);
|
|
1388
|
+
const credentials = config.getOptionalString(
|
|
1389
|
+
"techdocs.publisher.googleGcs.credentials"
|
|
1390
|
+
);
|
|
1162
1391
|
let credentialsJson = {};
|
|
1163
1392
|
if (credentials) {
|
|
1164
1393
|
try {
|
|
1165
1394
|
credentialsJson = JSON.parse(credentials);
|
|
1166
1395
|
} catch (err) {
|
|
1167
|
-
throw new Error(
|
|
1396
|
+
throw new Error(
|
|
1397
|
+
"Error in parsing techdocs.publisher.googleGcs.credentials config to JSON."
|
|
1398
|
+
);
|
|
1168
1399
|
}
|
|
1169
1400
|
}
|
|
1170
1401
|
const storageClient = new storage.Storage({
|
|
@@ -1173,7 +1404,9 @@ class GoogleGCSPublish {
|
|
|
1173
1404
|
credentials: credentialsJson
|
|
1174
1405
|
}
|
|
1175
1406
|
});
|
|
1176
|
-
const legacyPathCasing = config.getOptionalBoolean(
|
|
1407
|
+
const legacyPathCasing = config.getOptionalBoolean(
|
|
1408
|
+
"techdocs.legacyUseCaseSensitiveTripletPaths"
|
|
1409
|
+
) || false;
|
|
1177
1410
|
return new GoogleGCSPublish({
|
|
1178
1411
|
storageClient,
|
|
1179
1412
|
bucketName,
|
|
@@ -1185,13 +1418,17 @@ class GoogleGCSPublish {
|
|
|
1185
1418
|
async getReadiness() {
|
|
1186
1419
|
try {
|
|
1187
1420
|
await this.storageClient.bucket(this.bucketName).getMetadata();
|
|
1188
|
-
this.logger.info(
|
|
1421
|
+
this.logger.info(
|
|
1422
|
+
`Successfully connected to the GCS bucket ${this.bucketName}.`
|
|
1423
|
+
);
|
|
1189
1424
|
return {
|
|
1190
1425
|
isAvailable: true
|
|
1191
1426
|
};
|
|
1192
1427
|
} catch (err) {
|
|
1193
1428
|
errors.assertError(err);
|
|
1194
|
-
this.logger.error(
|
|
1429
|
+
this.logger.error(
|
|
1430
|
+
`Could not retrieve metadata about the GCS bucket ${this.bucketName}. Make sure the bucket exists. Also make sure that authentication is setup either by explicitly defining techdocs.publisher.googleGcs.credentials in app config or by using environment variables. Refer to https://backstage.io/docs/features/techdocs/using-cloud-storage`
|
|
1431
|
+
);
|
|
1195
1432
|
this.logger.error(`from GCS client library: ${err.message}`);
|
|
1196
1433
|
return { isAvailable: false };
|
|
1197
1434
|
}
|
|
@@ -1206,34 +1443,65 @@ class GoogleGCSPublish {
|
|
|
1206
1443
|
const bucketRootPath = this.bucketRootPath;
|
|
1207
1444
|
let existingFiles = [];
|
|
1208
1445
|
try {
|
|
1209
|
-
const remoteFolder = getCloudPathForLocalPath(
|
|
1446
|
+
const remoteFolder = getCloudPathForLocalPath(
|
|
1447
|
+
entity,
|
|
1448
|
+
void 0,
|
|
1449
|
+
useLegacyPathCasing,
|
|
1450
|
+
bucketRootPath
|
|
1451
|
+
);
|
|
1210
1452
|
existingFiles = await this.getFilesForFolder(remoteFolder);
|
|
1211
1453
|
} catch (e) {
|
|
1212
1454
|
errors.assertError(e);
|
|
1213
|
-
this.logger.error(
|
|
1455
|
+
this.logger.error(
|
|
1456
|
+
`Unable to list files for Entity ${entity.metadata.name}: ${e.message}`
|
|
1457
|
+
);
|
|
1214
1458
|
}
|
|
1215
1459
|
let absoluteFilesToUpload;
|
|
1216
1460
|
try {
|
|
1217
1461
|
absoluteFilesToUpload = await getFileTreeRecursively(directory);
|
|
1218
|
-
await bulkStorageOperation(
|
|
1219
|
-
|
|
1220
|
-
|
|
1221
|
-
|
|
1222
|
-
|
|
1223
|
-
|
|
1224
|
-
|
|
1462
|
+
await bulkStorageOperation(
|
|
1463
|
+
async (absoluteFilePath) => {
|
|
1464
|
+
const relativeFilePath = path__default["default"].relative(directory, absoluteFilePath);
|
|
1465
|
+
const destination = getCloudPathForLocalPath(
|
|
1466
|
+
entity,
|
|
1467
|
+
relativeFilePath,
|
|
1468
|
+
useLegacyPathCasing,
|
|
1469
|
+
bucketRootPath
|
|
1470
|
+
);
|
|
1471
|
+
objects.push(destination);
|
|
1472
|
+
return await bucket.upload(absoluteFilePath, { destination });
|
|
1473
|
+
},
|
|
1474
|
+
absoluteFilesToUpload,
|
|
1475
|
+
{ concurrencyLimit: 10 }
|
|
1476
|
+
);
|
|
1477
|
+
this.logger.info(
|
|
1478
|
+
`Successfully uploaded all the generated files for Entity ${entity.metadata.name}. Total number of files: ${absoluteFilesToUpload.length}`
|
|
1479
|
+
);
|
|
1225
1480
|
} catch (e) {
|
|
1226
1481
|
const errorMessage = `Unable to upload file(s) to Google Cloud Storage. ${e}`;
|
|
1227
1482
|
this.logger.error(errorMessage);
|
|
1228
1483
|
throw new Error(errorMessage);
|
|
1229
1484
|
}
|
|
1230
1485
|
try {
|
|
1231
|
-
const relativeFilesToUpload = absoluteFilesToUpload.map(
|
|
1486
|
+
const relativeFilesToUpload = absoluteFilesToUpload.map(
|
|
1487
|
+
(absoluteFilePath) => getCloudPathForLocalPath(
|
|
1488
|
+
entity,
|
|
1489
|
+
path__default["default"].relative(directory, absoluteFilePath),
|
|
1490
|
+
useLegacyPathCasing,
|
|
1491
|
+
bucketRootPath
|
|
1492
|
+
)
|
|
1493
|
+
);
|
|
1232
1494
|
const staleFiles = getStaleFiles(relativeFilesToUpload, existingFiles);
|
|
1233
|
-
await bulkStorageOperation(
|
|
1234
|
-
|
|
1235
|
-
|
|
1236
|
-
|
|
1495
|
+
await bulkStorageOperation(
|
|
1496
|
+
async (relativeFilePath) => {
|
|
1497
|
+
return await bucket.file(relativeFilePath).delete();
|
|
1498
|
+
},
|
|
1499
|
+
staleFiles,
|
|
1500
|
+
{ concurrencyLimit: 10 }
|
|
1501
|
+
);
|
|
1502
|
+
this.logger.info(
|
|
1503
|
+
`Successfully deleted stale files for Entity ${entity.metadata.name}. Total number of files: ${staleFiles.length}`
|
|
1504
|
+
);
|
|
1237
1505
|
} catch (error) {
|
|
1238
1506
|
const errorMessage = `Unable to delete file(s) from Google Cloud Storage. ${error}`;
|
|
1239
1507
|
this.logger.error(errorMessage);
|
|
@@ -1268,7 +1536,9 @@ class GoogleGCSPublish {
|
|
|
1268
1536
|
this.storageClient.bucket(this.bucketName).file(filePath).createReadStream().on("pipe", () => {
|
|
1269
1537
|
res.writeHead(200, responseHeaders);
|
|
1270
1538
|
}).on("error", (err) => {
|
|
1271
|
-
this.logger.warn(
|
|
1539
|
+
this.logger.warn(
|
|
1540
|
+
`TechDocs Google GCS router failed to serve content from bucket ${this.bucketName} at path ${filePath}: ${err.message}`
|
|
1541
|
+
);
|
|
1272
1542
|
if (!res.headersSent) {
|
|
1273
1543
|
res.status(404).send("File Not Found");
|
|
1274
1544
|
} else {
|
|
@@ -1292,7 +1562,11 @@ class GoogleGCSPublish {
|
|
|
1292
1562
|
migrateDocsCase({ removeOriginal = false, concurrency = 25 }) {
|
|
1293
1563
|
return new Promise((resolve, reject) => {
|
|
1294
1564
|
const allFileMetadata = this.storageClient.bucket(this.bucketName).getFilesStream();
|
|
1295
|
-
const migrateFiles = new MigrateWriteStream(
|
|
1565
|
+
const migrateFiles = new MigrateWriteStream(
|
|
1566
|
+
this.logger,
|
|
1567
|
+
removeOriginal,
|
|
1568
|
+
concurrency
|
|
1569
|
+
);
|
|
1296
1570
|
migrateFiles.on("finish", resolve).on("error", reject);
|
|
1297
1571
|
allFileMetadata.pipe(migrateFiles).on("error", (error) => {
|
|
1298
1572
|
migrateFiles.destroy();
|
|
@@ -1325,11 +1599,18 @@ class LocalPublish {
|
|
|
1325
1599
|
this.staticDocsDir = options.staticDocsDir;
|
|
1326
1600
|
}
|
|
1327
1601
|
static fromConfig(config, logger, discovery) {
|
|
1328
|
-
const legacyPathCasing = config.getOptionalBoolean(
|
|
1329
|
-
|
|
1602
|
+
const legacyPathCasing = config.getOptionalBoolean(
|
|
1603
|
+
"techdocs.legacyUseCaseSensitiveTripletPaths"
|
|
1604
|
+
) || false;
|
|
1605
|
+
let staticDocsDir = config.getOptionalString(
|
|
1606
|
+
"techdocs.publisher.local.publishDirectory"
|
|
1607
|
+
);
|
|
1330
1608
|
if (!staticDocsDir) {
|
|
1331
1609
|
try {
|
|
1332
|
-
staticDocsDir = backendCommon.resolvePackagePath(
|
|
1610
|
+
staticDocsDir = backendCommon.resolvePackagePath(
|
|
1611
|
+
"@backstage/plugin-techdocs-backend",
|
|
1612
|
+
"static/docs"
|
|
1613
|
+
);
|
|
1333
1614
|
} catch (err) {
|
|
1334
1615
|
staticDocsDir = os__default["default"].tmpdir();
|
|
1335
1616
|
}
|
|
@@ -1354,9 +1635,18 @@ class LocalPublish {
|
|
|
1354
1635
|
const entityNamespace = (_a = entity.metadata.namespace) != null ? _a : "default";
|
|
1355
1636
|
let publishDir;
|
|
1356
1637
|
try {
|
|
1357
|
-
publishDir = this.staticEntityPathJoin(
|
|
1638
|
+
publishDir = this.staticEntityPathJoin(
|
|
1639
|
+
entityNamespace,
|
|
1640
|
+
entity.kind,
|
|
1641
|
+
entity.metadata.name
|
|
1642
|
+
);
|
|
1358
1643
|
} catch (error) {
|
|
1359
|
-
throw new errors.ForwardedError(
|
|
1644
|
+
throw new errors.ForwardedError(
|
|
1645
|
+
`Unable to publish TechDocs site for entity: ${catalogModel.stringifyEntityRef(
|
|
1646
|
+
entity
|
|
1647
|
+
)}`,
|
|
1648
|
+
error
|
|
1649
|
+
);
|
|
1360
1650
|
}
|
|
1361
1651
|
if (!fs__default["default"].existsSync(publishDir)) {
|
|
1362
1652
|
this.logger.info(`Could not find ${publishDir}, creating the directory.`);
|
|
@@ -1366,29 +1656,48 @@ class LocalPublish {
|
|
|
1366
1656
|
await fs__default["default"].copy(directory, publishDir);
|
|
1367
1657
|
this.logger.info(`Published site stored at ${publishDir}`);
|
|
1368
1658
|
} catch (error) {
|
|
1369
|
-
this.logger.debug(
|
|
1659
|
+
this.logger.debug(
|
|
1660
|
+
`Failed to copy docs from ${directory} to ${publishDir}`
|
|
1661
|
+
);
|
|
1370
1662
|
throw error;
|
|
1371
1663
|
}
|
|
1372
1664
|
const techdocsApiUrl = await this.discovery.getBaseUrl("techdocs");
|
|
1373
|
-
const publishedFilePaths = (await getFileTreeRecursively(publishDir)).map(
|
|
1374
|
-
|
|
1375
|
-
|
|
1665
|
+
const publishedFilePaths = (await getFileTreeRecursively(publishDir)).map(
|
|
1666
|
+
(abs) => {
|
|
1667
|
+
return abs.split(`${this.staticDocsDir}/`)[1];
|
|
1668
|
+
}
|
|
1669
|
+
);
|
|
1376
1670
|
return {
|
|
1377
|
-
remoteUrl: `${techdocsApiUrl}/static/docs/${encodeURIComponent(
|
|
1671
|
+
remoteUrl: `${techdocsApiUrl}/static/docs/${encodeURIComponent(
|
|
1672
|
+
entity.metadata.name
|
|
1673
|
+
)}`,
|
|
1378
1674
|
objects: publishedFilePaths
|
|
1379
1675
|
};
|
|
1380
1676
|
}
|
|
1381
1677
|
async fetchTechDocsMetadata(entityName) {
|
|
1382
1678
|
let metadataPath;
|
|
1383
1679
|
try {
|
|
1384
|
-
metadataPath = this.staticEntityPathJoin(
|
|
1680
|
+
metadataPath = this.staticEntityPathJoin(
|
|
1681
|
+
entityName.namespace,
|
|
1682
|
+
entityName.kind,
|
|
1683
|
+
entityName.name,
|
|
1684
|
+
"techdocs_metadata.json"
|
|
1685
|
+
);
|
|
1385
1686
|
} catch (err) {
|
|
1386
|
-
throw new errors.ForwardedError(
|
|
1687
|
+
throw new errors.ForwardedError(
|
|
1688
|
+
`Unexpected entity when fetching metadata: ${catalogModel.stringifyEntityRef(
|
|
1689
|
+
entityName
|
|
1690
|
+
)}`,
|
|
1691
|
+
err
|
|
1692
|
+
);
|
|
1387
1693
|
}
|
|
1388
1694
|
try {
|
|
1389
1695
|
return await fs__default["default"].readJson(metadataPath);
|
|
1390
1696
|
} catch (err) {
|
|
1391
|
-
throw new errors.ForwardedError(
|
|
1697
|
+
throw new errors.ForwardedError(
|
|
1698
|
+
`Unable to read techdocs_metadata.json at ${metadataPath}. Error: ${err}`,
|
|
1699
|
+
err
|
|
1700
|
+
);
|
|
1392
1701
|
}
|
|
1393
1702
|
}
|
|
1394
1703
|
docsRouter() {
|
|
@@ -1413,27 +1722,38 @@ class LocalPublish {
|
|
|
1413
1722
|
}
|
|
1414
1723
|
return res.redirect(301, req.baseUrl + newPath);
|
|
1415
1724
|
});
|
|
1416
|
-
router.use(
|
|
1417
|
-
|
|
1418
|
-
|
|
1419
|
-
|
|
1420
|
-
|
|
1421
|
-
|
|
1725
|
+
router.use(
|
|
1726
|
+
express__default["default"].static(this.staticDocsDir, {
|
|
1727
|
+
setHeaders: (res, filePath) => {
|
|
1728
|
+
const fileExtension = path__default["default"].extname(filePath);
|
|
1729
|
+
const headers = getHeadersForFileExtension(fileExtension);
|
|
1730
|
+
for (const [header, value] of Object.entries(headers)) {
|
|
1731
|
+
res.setHeader(header, value);
|
|
1732
|
+
}
|
|
1422
1733
|
}
|
|
1423
|
-
}
|
|
1424
|
-
|
|
1734
|
+
})
|
|
1735
|
+
);
|
|
1425
1736
|
return router;
|
|
1426
1737
|
}
|
|
1427
1738
|
async hasDocsBeenGenerated(entity) {
|
|
1428
1739
|
var _a;
|
|
1429
1740
|
const namespace = (_a = entity.metadata.namespace) != null ? _a : "default";
|
|
1430
1741
|
try {
|
|
1431
|
-
const indexHtmlPath = this.staticEntityPathJoin(
|
|
1742
|
+
const indexHtmlPath = this.staticEntityPathJoin(
|
|
1743
|
+
namespace,
|
|
1744
|
+
entity.kind,
|
|
1745
|
+
entity.metadata.name,
|
|
1746
|
+
"index.html"
|
|
1747
|
+
);
|
|
1432
1748
|
await fs__default["default"].access(indexHtmlPath, fs__default["default"].constants.F_OK);
|
|
1433
1749
|
return true;
|
|
1434
1750
|
} catch (err) {
|
|
1435
1751
|
if (err.name === "NotAllowedError") {
|
|
1436
|
-
this.logger.error(
|
|
1752
|
+
this.logger.error(
|
|
1753
|
+
`Unexpected entity when checking if generated: ${catalogModel.stringifyEntityRef(
|
|
1754
|
+
entity
|
|
1755
|
+
)}`
|
|
1756
|
+
);
|
|
1437
1757
|
}
|
|
1438
1758
|
return false;
|
|
1439
1759
|
}
|
|
@@ -1444,29 +1764,41 @@ class LocalPublish {
|
|
|
1444
1764
|
}) {
|
|
1445
1765
|
const files = await getFileTreeRecursively(this.staticDocsDir);
|
|
1446
1766
|
const limit = createLimiter__default["default"](concurrency);
|
|
1447
|
-
await Promise.all(
|
|
1448
|
-
|
|
1449
|
-
|
|
1450
|
-
|
|
1451
|
-
|
|
1452
|
-
|
|
1453
|
-
|
|
1454
|
-
|
|
1455
|
-
|
|
1456
|
-
|
|
1457
|
-
if (err) {
|
|
1458
|
-
this.logger.warn(`Unable to migrate ${relativeFile}: ${err.message}`);
|
|
1767
|
+
await Promise.all(
|
|
1768
|
+
files.map(
|
|
1769
|
+
(f) => limit(async (file) => {
|
|
1770
|
+
const relativeFile = file.replace(
|
|
1771
|
+
`${this.staticDocsDir}${path__default["default"].sep}`,
|
|
1772
|
+
""
|
|
1773
|
+
);
|
|
1774
|
+
const newFile = lowerCaseEntityTripletInStoragePath(relativeFile);
|
|
1775
|
+
if (relativeFile === newFile) {
|
|
1776
|
+
return;
|
|
1459
1777
|
}
|
|
1460
|
-
resolve
|
|
1461
|
-
|
|
1462
|
-
|
|
1463
|
-
|
|
1778
|
+
await new Promise((resolve) => {
|
|
1779
|
+
const migrate = removeOriginal ? fs__default["default"].move : fs__default["default"].copyFile;
|
|
1780
|
+
this.logger.verbose(`Migrating ${relativeFile}`);
|
|
1781
|
+
migrate(file, newFile, (err) => {
|
|
1782
|
+
if (err) {
|
|
1783
|
+
this.logger.warn(
|
|
1784
|
+
`Unable to migrate ${relativeFile}: ${err.message}`
|
|
1785
|
+
);
|
|
1786
|
+
}
|
|
1787
|
+
resolve();
|
|
1788
|
+
});
|
|
1789
|
+
});
|
|
1790
|
+
}, f)
|
|
1791
|
+
)
|
|
1792
|
+
);
|
|
1464
1793
|
}
|
|
1465
1794
|
staticEntityPathJoin(...allParts) {
|
|
1466
1795
|
let staticEntityPath = this.staticDocsDir;
|
|
1467
1796
|
allParts.map((part) => part.split(path__default["default"].sep)).flat().forEach((part, index) => {
|
|
1468
1797
|
if (index < 3) {
|
|
1469
|
-
staticEntityPath = backendCommon.resolveSafeChildPath(
|
|
1798
|
+
staticEntityPath = backendCommon.resolveSafeChildPath(
|
|
1799
|
+
staticEntityPath,
|
|
1800
|
+
this.legacyPathCasing ? part : part.toLowerCase()
|
|
1801
|
+
);
|
|
1470
1802
|
return;
|
|
1471
1803
|
}
|
|
1472
1804
|
staticEntityPath = backendCommon.resolveSafeChildPath(staticEntityPath, part);
|
|
@@ -1502,11 +1834,17 @@ class OpenStackSwiftPublish {
|
|
|
1502
1834
|
static fromConfig(config, logger) {
|
|
1503
1835
|
let containerName = "";
|
|
1504
1836
|
try {
|
|
1505
|
-
containerName = config.getString(
|
|
1837
|
+
containerName = config.getString(
|
|
1838
|
+
"techdocs.publisher.openStackSwift.containerName"
|
|
1839
|
+
);
|
|
1506
1840
|
} catch (error) {
|
|
1507
|
-
throw new Error(
|
|
1841
|
+
throw new Error(
|
|
1842
|
+
"Since techdocs.publisher.type is set to 'openStackSwift' in your app config, techdocs.publisher.openStackSwift.containerName is required."
|
|
1843
|
+
);
|
|
1508
1844
|
}
|
|
1509
|
-
const openStackSwiftConfig = config.getConfig(
|
|
1845
|
+
const openStackSwiftConfig = config.getConfig(
|
|
1846
|
+
"techdocs.publisher.openStackSwift"
|
|
1847
|
+
);
|
|
1510
1848
|
const storageClient = new openstackSwiftSdk.SwiftClient({
|
|
1511
1849
|
authEndpoint: openStackSwiftConfig.getString("authUrl"),
|
|
1512
1850
|
swiftEndpoint: openStackSwiftConfig.getString("swiftUrl"),
|
|
@@ -1517,14 +1855,20 @@ class OpenStackSwiftPublish {
|
|
|
1517
1855
|
}
|
|
1518
1856
|
async getReadiness() {
|
|
1519
1857
|
try {
|
|
1520
|
-
const container = await this.storageClient.getContainerMetadata(
|
|
1858
|
+
const container = await this.storageClient.getContainerMetadata(
|
|
1859
|
+
this.containerName
|
|
1860
|
+
);
|
|
1521
1861
|
if (!(container instanceof types.NotFound)) {
|
|
1522
|
-
this.logger.info(
|
|
1862
|
+
this.logger.info(
|
|
1863
|
+
`Successfully connected to the OpenStack Swift container ${this.containerName}.`
|
|
1864
|
+
);
|
|
1523
1865
|
return {
|
|
1524
1866
|
isAvailable: true
|
|
1525
1867
|
};
|
|
1526
1868
|
}
|
|
1527
|
-
this.logger.error(
|
|
1869
|
+
this.logger.error(
|
|
1870
|
+
`Could not retrieve metadata about the OpenStack Swift container ${this.containerName}. Make sure the container exists. Also make sure that authentication is setup either by explicitly defining credentials and region in techdocs.publisher.openStackSwift in app config or by using environment variables. Refer to https://backstage.io/docs/features/techdocs/using-cloud-storage`
|
|
1871
|
+
);
|
|
1528
1872
|
return {
|
|
1529
1873
|
isAvailable: false
|
|
1530
1874
|
};
|
|
@@ -1554,12 +1898,18 @@ class OpenStackSwiftPublish {
|
|
|
1554
1898
|
const uploadFile = limiter(async () => {
|
|
1555
1899
|
const fileBuffer = await fs__default["default"].readFile(filePath);
|
|
1556
1900
|
const stream = bufferToStream(fileBuffer);
|
|
1557
|
-
return this.storageClient.upload(
|
|
1901
|
+
return this.storageClient.upload(
|
|
1902
|
+
this.containerName,
|
|
1903
|
+
destination,
|
|
1904
|
+
stream
|
|
1905
|
+
);
|
|
1558
1906
|
});
|
|
1559
1907
|
uploadPromises.push(uploadFile);
|
|
1560
1908
|
}
|
|
1561
1909
|
await Promise.all(uploadPromises);
|
|
1562
|
-
this.logger.info(
|
|
1910
|
+
this.logger.info(
|
|
1911
|
+
`Successfully uploaded all the generated files for Entity ${entity.metadata.name}. Total number of files: ${allFilesToUpload.length}`
|
|
1912
|
+
);
|
|
1563
1913
|
return { objects };
|
|
1564
1914
|
} catch (e) {
|
|
1565
1915
|
const errorMessage = `Unable to upload file(s) to OpenStack Swift. ${e}`;
|
|
@@ -1570,15 +1920,22 @@ class OpenStackSwiftPublish {
|
|
|
1570
1920
|
async fetchTechDocsMetadata(entityName) {
|
|
1571
1921
|
return await new Promise(async (resolve, reject) => {
|
|
1572
1922
|
const entityRootDir = `${entityName.namespace}/${entityName.kind}/${entityName.name}`;
|
|
1573
|
-
const downloadResponse = await this.storageClient.download(
|
|
1923
|
+
const downloadResponse = await this.storageClient.download(
|
|
1924
|
+
this.containerName,
|
|
1925
|
+
`${entityRootDir}/techdocs_metadata.json`
|
|
1926
|
+
);
|
|
1574
1927
|
if (!(downloadResponse instanceof types.NotFound)) {
|
|
1575
1928
|
const stream = downloadResponse.data;
|
|
1576
1929
|
try {
|
|
1577
1930
|
const techdocsMetadataJson = await streamToBuffer(stream);
|
|
1578
1931
|
if (!techdocsMetadataJson) {
|
|
1579
|
-
throw new Error(
|
|
1932
|
+
throw new Error(
|
|
1933
|
+
`Unable to parse the techdocs metadata file ${entityRootDir}/techdocs_metadata.json.`
|
|
1934
|
+
);
|
|
1580
1935
|
}
|
|
1581
|
-
const techdocsMetadata = JSON5__default["default"].parse(
|
|
1936
|
+
const techdocsMetadata = JSON5__default["default"].parse(
|
|
1937
|
+
techdocsMetadataJson.toString("utf-8")
|
|
1938
|
+
);
|
|
1582
1939
|
resolve(techdocsMetadata);
|
|
1583
1940
|
} catch (err) {
|
|
1584
1941
|
errors.assertError(err);
|
|
@@ -1597,21 +1954,30 @@ class OpenStackSwiftPublish {
|
|
|
1597
1954
|
const filePath = decodeURI(req.path.replace(/^\//, ""));
|
|
1598
1955
|
const fileExtension = path__default["default"].extname(filePath);
|
|
1599
1956
|
const responseHeaders = getHeadersForFileExtension(fileExtension);
|
|
1600
|
-
const downloadResponse = await this.storageClient.download(
|
|
1957
|
+
const downloadResponse = await this.storageClient.download(
|
|
1958
|
+
this.containerName,
|
|
1959
|
+
filePath
|
|
1960
|
+
);
|
|
1601
1961
|
if (!(downloadResponse instanceof types.NotFound)) {
|
|
1602
1962
|
const stream = downloadResponse.data;
|
|
1603
1963
|
try {
|
|
1604
|
-
for (const [headerKey, headerValue] of Object.entries(
|
|
1964
|
+
for (const [headerKey, headerValue] of Object.entries(
|
|
1965
|
+
responseHeaders
|
|
1966
|
+
)) {
|
|
1605
1967
|
res.setHeader(headerKey, headerValue);
|
|
1606
1968
|
}
|
|
1607
1969
|
res.send(await streamToBuffer(stream));
|
|
1608
1970
|
} catch (err) {
|
|
1609
1971
|
errors.assertError(err);
|
|
1610
|
-
this.logger.warn(
|
|
1972
|
+
this.logger.warn(
|
|
1973
|
+
`TechDocs OpenStack swift router failed to serve content from container ${this.containerName} at path ${filePath}: ${err.message}`
|
|
1974
|
+
);
|
|
1611
1975
|
res.status(404).send("File Not Found");
|
|
1612
1976
|
}
|
|
1613
1977
|
} else {
|
|
1614
|
-
this.logger.warn(
|
|
1978
|
+
this.logger.warn(
|
|
1979
|
+
`TechDocs OpenStack swift router failed to serve content from container ${this.containerName} at path ${filePath}: Not found`
|
|
1980
|
+
);
|
|
1615
1981
|
res.status(404).send("File Not Found");
|
|
1616
1982
|
}
|
|
1617
1983
|
};
|
|
@@ -1619,7 +1985,10 @@ class OpenStackSwiftPublish {
|
|
|
1619
1985
|
async hasDocsBeenGenerated(entity) {
|
|
1620
1986
|
const entityRootDir = `${entity.metadata.namespace}/${entity.kind}/${entity.metadata.name}`;
|
|
1621
1987
|
try {
|
|
1622
|
-
const fileResponse = await this.storageClient.getMetadata(
|
|
1988
|
+
const fileResponse = await this.storageClient.getMetadata(
|
|
1989
|
+
this.containerName,
|
|
1990
|
+
`${entityRootDir}/index.html`
|
|
1991
|
+
);
|
|
1623
1992
|
if (!(fileResponse instanceof types.NotFound)) {
|
|
1624
1993
|
return true;
|
|
1625
1994
|
}
|
|
@@ -1636,34 +2005,47 @@ class OpenStackSwiftPublish {
|
|
|
1636
2005
|
}) {
|
|
1637
2006
|
const allObjects = await this.getAllObjectsFromContainer();
|
|
1638
2007
|
const limiter = createLimiter__default["default"](concurrency);
|
|
1639
|
-
await Promise.all(
|
|
1640
|
-
|
|
1641
|
-
|
|
1642
|
-
|
|
1643
|
-
|
|
1644
|
-
|
|
1645
|
-
|
|
1646
|
-
|
|
1647
|
-
|
|
1648
|
-
|
|
1649
|
-
|
|
1650
|
-
|
|
1651
|
-
|
|
1652
|
-
|
|
1653
|
-
|
|
1654
|
-
|
|
1655
|
-
|
|
1656
|
-
|
|
1657
|
-
|
|
1658
|
-
|
|
1659
|
-
|
|
1660
|
-
|
|
1661
|
-
|
|
2008
|
+
await Promise.all(
|
|
2009
|
+
allObjects.map(
|
|
2010
|
+
(f) => limiter(async (file) => {
|
|
2011
|
+
let newPath;
|
|
2012
|
+
try {
|
|
2013
|
+
newPath = lowerCaseEntityTripletInStoragePath(file);
|
|
2014
|
+
} catch (e) {
|
|
2015
|
+
errors.assertError(e);
|
|
2016
|
+
this.logger.warn(e.message);
|
|
2017
|
+
return;
|
|
2018
|
+
}
|
|
2019
|
+
if (file === newPath) {
|
|
2020
|
+
return;
|
|
2021
|
+
}
|
|
2022
|
+
try {
|
|
2023
|
+
this.logger.verbose(`Migrating ${file} to ${newPath}`);
|
|
2024
|
+
await this.storageClient.copy(
|
|
2025
|
+
this.containerName,
|
|
2026
|
+
file,
|
|
2027
|
+
this.containerName,
|
|
2028
|
+
newPath
|
|
2029
|
+
);
|
|
2030
|
+
if (removeOriginal) {
|
|
2031
|
+
await this.storageClient.delete(this.containerName, file);
|
|
2032
|
+
}
|
|
2033
|
+
} catch (e) {
|
|
2034
|
+
errors.assertError(e);
|
|
2035
|
+
this.logger.warn(`Unable to migrate ${file}: ${e.message}`);
|
|
2036
|
+
}
|
|
2037
|
+
}, f)
|
|
2038
|
+
)
|
|
2039
|
+
);
|
|
1662
2040
|
}
|
|
1663
2041
|
async getAllObjectsFromContainer({ prefix } = { prefix: "" }) {
|
|
1664
2042
|
let objects = [];
|
|
1665
2043
|
const OSS_MAX_LIMIT = Math.pow(2, 31) - 1;
|
|
1666
|
-
const allObjects = await this.storageClient.list(
|
|
2044
|
+
const allObjects = await this.storageClient.list(
|
|
2045
|
+
this.containerName,
|
|
2046
|
+
prefix,
|
|
2047
|
+
OSS_MAX_LIMIT
|
|
2048
|
+
);
|
|
1667
2049
|
objects = allObjects.map((object) => object.name);
|
|
1668
2050
|
return objects;
|
|
1669
2051
|
}
|
|
@@ -1672,7 +2054,9 @@ class OpenStackSwiftPublish {
|
|
|
1672
2054
|
class Publisher {
|
|
1673
2055
|
static async fromConfig(config, { logger, discovery }) {
|
|
1674
2056
|
var _a;
|
|
1675
|
-
const publisherType = (_a = config.getOptionalString(
|
|
2057
|
+
const publisherType = (_a = config.getOptionalString(
|
|
2058
|
+
"techdocs.publisher.type"
|
|
2059
|
+
)) != null ? _a : "local";
|
|
1676
2060
|
switch (publisherType) {
|
|
1677
2061
|
case "googleGcs":
|
|
1678
2062
|
logger.info("Creating Google Storage Bucket publisher for TechDocs");
|
|
@@ -1681,10 +2065,14 @@ class Publisher {
|
|
|
1681
2065
|
logger.info("Creating AWS S3 Bucket publisher for TechDocs");
|
|
1682
2066
|
return AwsS3Publish.fromConfig(config, logger);
|
|
1683
2067
|
case "azureBlobStorage":
|
|
1684
|
-
logger.info(
|
|
2068
|
+
logger.info(
|
|
2069
|
+
"Creating Azure Blob Storage Container publisher for TechDocs"
|
|
2070
|
+
);
|
|
1685
2071
|
return AzureBlobStoragePublish.fromConfig(config, logger);
|
|
1686
2072
|
case "openStackSwift":
|
|
1687
|
-
logger.info(
|
|
2073
|
+
logger.info(
|
|
2074
|
+
"Creating OpenStack Swift Container publisher for TechDocs"
|
|
2075
|
+
);
|
|
1688
2076
|
return OpenStackSwiftPublish.fromConfig(config, logger);
|
|
1689
2077
|
case "local":
|
|
1690
2078
|
logger.info("Creating Local publisher for TechDocs");
|