@backstage/plugin-techdocs-node 1.4.3-next.0 → 1.4.3-next.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +18 -0
- package/dist/index.cjs.js +78 -48
- package/dist/index.cjs.js.map +1 -1
- package/package.json +14 -10
package/CHANGELOG.md
CHANGED
|
@@ -1,5 +1,23 @@
|
|
|
1
1
|
# @backstage/plugin-techdocs-node
|
|
2
2
|
|
|
3
|
+
## 1.4.3-next.1
|
|
4
|
+
|
|
5
|
+
### Patch Changes
|
|
6
|
+
|
|
7
|
+
- 0a61aab172: Bump dependency @azure/identity to next minor
|
|
8
|
+
- 37931c33ce: Upgrade to AWS SDK for Javascript v3
|
|
9
|
+
|
|
10
|
+
Techdocs support for AWS S3 now requires defining the AWS region to connect to.
|
|
11
|
+
If `techdocs.publisher.awsS3.region` is missing from the config, the AWS environment variable `AWS_REGION` will be used.
|
|
12
|
+
|
|
13
|
+
- Updated dependencies
|
|
14
|
+
- @backstage/backend-common@0.17.0-next.1
|
|
15
|
+
- @backstage/config@1.0.5-next.1
|
|
16
|
+
- @backstage/integration@1.4.1-next.1
|
|
17
|
+
- @backstage/catalog-model@1.1.4-next.1
|
|
18
|
+
- @backstage/errors@1.1.4-next.1
|
|
19
|
+
- @backstage/plugin-search-common@1.1.2-next.1
|
|
20
|
+
|
|
3
21
|
## 1.4.3-next.0
|
|
4
22
|
|
|
5
23
|
### Patch Changes
|
package/dist/index.cjs.js
CHANGED
|
@@ -15,7 +15,9 @@ var catalogModel = require('@backstage/catalog-model');
|
|
|
15
15
|
var mime = require('mime-types');
|
|
16
16
|
var createLimiter = require('p-limit');
|
|
17
17
|
var recursiveReadDir = require('recursive-readdir');
|
|
18
|
-
var
|
|
18
|
+
var clientS3 = require('@aws-sdk/client-s3');
|
|
19
|
+
var credentialProviders = require('@aws-sdk/credential-providers');
|
|
20
|
+
var libStorage = require('@aws-sdk/lib-storage');
|
|
19
21
|
var JSON5 = require('json5');
|
|
20
22
|
var identity = require('@azure/identity');
|
|
21
23
|
var storageBlob = require('@azure/storage-blob');
|
|
@@ -34,7 +36,6 @@ var yaml__default = /*#__PURE__*/_interopDefaultLegacy(yaml);
|
|
|
34
36
|
var mime__default = /*#__PURE__*/_interopDefaultLegacy(mime);
|
|
35
37
|
var createLimiter__default = /*#__PURE__*/_interopDefaultLegacy(createLimiter);
|
|
36
38
|
var recursiveReadDir__default = /*#__PURE__*/_interopDefaultLegacy(recursiveReadDir);
|
|
37
|
-
var aws__default = /*#__PURE__*/_interopDefaultLegacy(aws);
|
|
38
39
|
var JSON5__default = /*#__PURE__*/_interopDefaultLegacy(JSON5);
|
|
39
40
|
var express__default = /*#__PURE__*/_interopDefaultLegacy(express);
|
|
40
41
|
var os__default = /*#__PURE__*/_interopDefaultLegacy(os);
|
|
@@ -746,19 +747,23 @@ class AwsS3Publish {
|
|
|
746
747
|
config.getOptionalString("techdocs.publisher.awsS3.bucketRootPath") || ""
|
|
747
748
|
);
|
|
748
749
|
const sse = config.getOptionalString("techdocs.publisher.awsS3.sse");
|
|
750
|
+
const region = config.getOptionalString("techdocs.publisher.awsS3.region");
|
|
749
751
|
const credentialsConfig = config.getOptionalConfig(
|
|
750
752
|
"techdocs.publisher.awsS3.credentials"
|
|
751
753
|
);
|
|
752
|
-
const credentials = AwsS3Publish.buildCredentials(
|
|
753
|
-
|
|
754
|
+
const credentials = AwsS3Publish.buildCredentials(
|
|
755
|
+
credentialsConfig,
|
|
756
|
+
region
|
|
757
|
+
);
|
|
754
758
|
const endpoint = config.getOptionalString(
|
|
755
759
|
"techdocs.publisher.awsS3.endpoint"
|
|
756
760
|
);
|
|
757
761
|
const s3ForcePathStyle = config.getOptionalBoolean(
|
|
758
762
|
"techdocs.publisher.awsS3.s3ForcePathStyle"
|
|
759
763
|
);
|
|
760
|
-
const storageClient = new
|
|
761
|
-
|
|
764
|
+
const storageClient = new clientS3.S3Client({
|
|
765
|
+
customUserAgent: "backstage-aws-techdocs-s3-publisher",
|
|
766
|
+
credentialDefaultProvider: () => credentials,
|
|
762
767
|
...region && { region },
|
|
763
768
|
...endpoint && { endpoint },
|
|
764
769
|
...s3ForcePathStyle && { s3ForcePathStyle }
|
|
@@ -775,34 +780,39 @@ class AwsS3Publish {
|
|
|
775
780
|
sse
|
|
776
781
|
});
|
|
777
782
|
}
|
|
778
|
-
static
|
|
779
|
-
|
|
780
|
-
return
|
|
781
|
-
}
|
|
782
|
-
const accessKeyId = config.getOptionalString("accessKeyId");
|
|
783
|
-
const secretAccessKey = config.getOptionalString("secretAccessKey");
|
|
784
|
-
let explicitCredentials;
|
|
785
|
-
if (accessKeyId && secretAccessKey) {
|
|
786
|
-
explicitCredentials = new aws.Credentials({
|
|
783
|
+
static buildStaticCredentials(accessKeyId, secretAccessKey) {
|
|
784
|
+
return async () => {
|
|
785
|
+
return Promise.resolve({
|
|
787
786
|
accessKeyId,
|
|
788
787
|
secretAccessKey
|
|
789
788
|
});
|
|
789
|
+
};
|
|
790
|
+
}
|
|
791
|
+
static buildCredentials(config, region) {
|
|
792
|
+
if (!config) {
|
|
793
|
+
return credentialProviders.fromNodeProviderChain();
|
|
790
794
|
}
|
|
795
|
+
const accessKeyId = config.getOptionalString("accessKeyId");
|
|
796
|
+
const secretAccessKey = config.getOptionalString("secretAccessKey");
|
|
797
|
+
const explicitCredentials = accessKeyId && secretAccessKey ? AwsS3Publish.buildStaticCredentials(accessKeyId, secretAccessKey) : credentialProviders.fromNodeProviderChain();
|
|
791
798
|
const roleArn = config.getOptionalString("roleArn");
|
|
792
799
|
if (roleArn) {
|
|
793
|
-
return
|
|
800
|
+
return credentialProviders.fromTemporaryCredentials({
|
|
794
801
|
masterCredentials: explicitCredentials,
|
|
795
802
|
params: {
|
|
796
803
|
RoleSessionName: "backstage-aws-techdocs-s3-publisher",
|
|
797
804
|
RoleArn: roleArn
|
|
798
|
-
}
|
|
805
|
+
},
|
|
806
|
+
clientConfig: { region }
|
|
799
807
|
});
|
|
800
808
|
}
|
|
801
809
|
return explicitCredentials;
|
|
802
810
|
}
|
|
803
811
|
async getReadiness() {
|
|
804
812
|
try {
|
|
805
|
-
await this.storageClient.
|
|
813
|
+
await this.storageClient.send(
|
|
814
|
+
new clientS3.HeadBucketCommand({ Bucket: this.bucketName })
|
|
815
|
+
);
|
|
806
816
|
this.logger.info(
|
|
807
817
|
`Successfully connected to the AWS S3 bucket ${this.bucketName}.`
|
|
808
818
|
);
|
|
@@ -861,7 +871,11 @@ class AwsS3Publish {
|
|
|
861
871
|
...sse && { ServerSideEncryption: sse }
|
|
862
872
|
};
|
|
863
873
|
objects.push(params.Key);
|
|
864
|
-
|
|
874
|
+
const upload = new libStorage.Upload({
|
|
875
|
+
client: this.storageClient,
|
|
876
|
+
params
|
|
877
|
+
});
|
|
878
|
+
return upload.done();
|
|
865
879
|
},
|
|
866
880
|
absoluteFilesToUpload,
|
|
867
881
|
{ concurrencyLimit: 10 }
|
|
@@ -886,10 +900,12 @@ class AwsS3Publish {
|
|
|
886
900
|
const staleFiles = getStaleFiles(relativeFilesToUpload, existingFiles);
|
|
887
901
|
await bulkStorageOperation(
|
|
888
902
|
async (relativeFilePath) => {
|
|
889
|
-
return await this.storageClient.
|
|
890
|
-
|
|
891
|
-
|
|
892
|
-
|
|
903
|
+
return await this.storageClient.send(
|
|
904
|
+
new clientS3.DeleteObjectCommand({
|
|
905
|
+
Bucket: this.bucketName,
|
|
906
|
+
Key: relativeFilePath
|
|
907
|
+
})
|
|
908
|
+
);
|
|
893
909
|
},
|
|
894
910
|
staleFiles,
|
|
895
911
|
{ concurrencyLimit: 10 }
|
|
@@ -909,12 +925,16 @@ class AwsS3Publish {
|
|
|
909
925
|
const entityTriplet = `${entityName.namespace}/${entityName.kind}/${entityName.name}`;
|
|
910
926
|
const entityDir = this.legacyPathCasing ? entityTriplet : lowerCaseEntityTriplet(entityTriplet);
|
|
911
927
|
const entityRootDir = path__default["default"].posix.join(this.bucketRootPath, entityDir);
|
|
912
|
-
const stream = this.storageClient.getObject({
|
|
913
|
-
Bucket: this.bucketName,
|
|
914
|
-
Key: `${entityRootDir}/techdocs_metadata.json`
|
|
915
|
-
}).createReadStream();
|
|
916
928
|
try {
|
|
917
|
-
const
|
|
929
|
+
const resp = await this.storageClient.send(
|
|
930
|
+
new clientS3.GetObjectCommand({
|
|
931
|
+
Bucket: this.bucketName,
|
|
932
|
+
Key: `${entityRootDir}/techdocs_metadata.json`
|
|
933
|
+
})
|
|
934
|
+
);
|
|
935
|
+
const techdocsMetadataJson = await streamToBuffer$1(
|
|
936
|
+
resp.Body
|
|
937
|
+
);
|
|
918
938
|
if (!techdocsMetadataJson) {
|
|
919
939
|
throw new Error(
|
|
920
940
|
`Unable to parse the techdocs metadata file ${entityRootDir}/techdocs_metadata.json.`
|
|
@@ -941,14 +961,16 @@ class AwsS3Publish {
|
|
|
941
961
|
const filePath = path__default["default"].posix.join(this.bucketRootPath, filePathNoRoot);
|
|
942
962
|
const fileExtension = path__default["default"].extname(filePath);
|
|
943
963
|
const responseHeaders = getHeadersForFileExtension(fileExtension);
|
|
944
|
-
const stream = this.storageClient.getObject({ Bucket: this.bucketName, Key: filePath }).createReadStream();
|
|
945
964
|
try {
|
|
965
|
+
const resp = await this.storageClient.send(
|
|
966
|
+
new clientS3.GetObjectCommand({ Bucket: this.bucketName, Key: filePath })
|
|
967
|
+
);
|
|
946
968
|
for (const [headerKey, headerValue] of Object.entries(
|
|
947
969
|
responseHeaders
|
|
948
970
|
)) {
|
|
949
971
|
res.setHeader(headerKey, headerValue);
|
|
950
972
|
}
|
|
951
|
-
res.send(await streamToBuffer$1(
|
|
973
|
+
res.send(await streamToBuffer$1(resp.Body));
|
|
952
974
|
} catch (err) {
|
|
953
975
|
errors.assertError(err);
|
|
954
976
|
this.logger.warn(
|
|
@@ -963,10 +985,12 @@ class AwsS3Publish {
|
|
|
963
985
|
const entityTriplet = `${entity.metadata.namespace}/${entity.kind}/${entity.metadata.name}`;
|
|
964
986
|
const entityDir = this.legacyPathCasing ? entityTriplet : lowerCaseEntityTriplet(entityTriplet);
|
|
965
987
|
const entityRootDir = path__default["default"].posix.join(this.bucketRootPath, entityDir);
|
|
966
|
-
await this.storageClient.
|
|
967
|
-
|
|
968
|
-
|
|
969
|
-
|
|
988
|
+
await this.storageClient.send(
|
|
989
|
+
new clientS3.HeadObjectCommand({
|
|
990
|
+
Bucket: this.bucketName,
|
|
991
|
+
Key: `${entityRootDir}/index.html`
|
|
992
|
+
})
|
|
993
|
+
);
|
|
970
994
|
return Promise.resolve(true);
|
|
971
995
|
} catch (e) {
|
|
972
996
|
return Promise.resolve(false);
|
|
@@ -994,16 +1018,20 @@ class AwsS3Publish {
|
|
|
994
1018
|
}
|
|
995
1019
|
try {
|
|
996
1020
|
this.logger.verbose(`Migrating ${file}`);
|
|
997
|
-
await this.storageClient.
|
|
998
|
-
|
|
999
|
-
CopySource: [this.bucketName, file].join("/"),
|
|
1000
|
-
Key: newPath
|
|
1001
|
-
}).promise();
|
|
1002
|
-
if (removeOriginal) {
|
|
1003
|
-
await this.storageClient.deleteObject({
|
|
1021
|
+
await this.storageClient.send(
|
|
1022
|
+
new clientS3.CopyObjectCommand({
|
|
1004
1023
|
Bucket: this.bucketName,
|
|
1005
|
-
|
|
1006
|
-
|
|
1024
|
+
CopySource: [this.bucketName, file].join("/"),
|
|
1025
|
+
Key: newPath
|
|
1026
|
+
})
|
|
1027
|
+
);
|
|
1028
|
+
if (removeOriginal) {
|
|
1029
|
+
await this.storageClient.send(
|
|
1030
|
+
new clientS3.DeleteObjectCommand({
|
|
1031
|
+
Bucket: this.bucketName,
|
|
1032
|
+
Key: file
|
|
1033
|
+
})
|
|
1034
|
+
);
|
|
1007
1035
|
}
|
|
1008
1036
|
} catch (e) {
|
|
1009
1037
|
errors.assertError(e);
|
|
@@ -1018,11 +1046,13 @@ class AwsS3Publish {
|
|
|
1018
1046
|
let nextContinuation;
|
|
1019
1047
|
let allObjects;
|
|
1020
1048
|
do {
|
|
1021
|
-
allObjects = await this.storageClient.
|
|
1022
|
-
|
|
1023
|
-
|
|
1024
|
-
|
|
1025
|
-
|
|
1049
|
+
allObjects = await this.storageClient.send(
|
|
1050
|
+
new clientS3.ListObjectsV2Command({
|
|
1051
|
+
Bucket: this.bucketName,
|
|
1052
|
+
ContinuationToken: nextContinuation,
|
|
1053
|
+
...prefix ? { Prefix: prefix } : {}
|
|
1054
|
+
})
|
|
1055
|
+
);
|
|
1026
1056
|
objects.push(
|
|
1027
1057
|
...(allObjects.Contents || []).map((f) => f.Key || "").filter((f) => !!f)
|
|
1028
1058
|
);
|