keycloakify 6.9.0 → 6.9.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,289 +1,80 @@
1
- import { dirname as pathDirname, basename as pathBasename, join as pathJoin } from "path";
2
- import { createReadStream, createWriteStream, unlinkSync } from "fs";
3
- import { stat, mkdir, unlink, readFile, writeFile } from "fs/promises";
1
+ import { basename as pathBasename, join as pathJoin } from "path";
2
+ import { execSync } from "child_process";
3
+ import * as fs from "fs";
4
4
  import { transformCodebase } from "./transformCodebase";
5
- import { createHash } from "crypto";
6
- import http from "http";
7
- import https from "https";
8
- import { createInflateRaw } from "zlib";
5
+ import * as crypto from "crypto";
9
6
 
10
- import type { Readable } from "stream";
7
+ /** assert url ends with .zip */
8
+ export function downloadAndUnzip(params: {
9
+ isSilent: boolean;
10
+ url: string;
11
+ destDirPath: string;
12
+ pathOfDirToExtractInArchive?: string;
13
+ cacheDirPath: string;
14
+ }) {
15
+ const { url, destDirPath, pathOfDirToExtractInArchive, cacheDirPath } = params;
11
16
 
12
- function hash(s: string) {
13
- return createHash("sha256").update(s).digest("hex");
14
- }
17
+ const extractDirPath = pathJoin(
18
+ cacheDirPath,
19
+ `_${crypto.createHash("sha256").update(JSON.stringify({ url, pathOfDirToExtractInArchive })).digest("hex").substring(0, 15)}`
20
+ );
15
21
 
16
- async function maybeReadFile(path: string) {
17
- try {
18
- return await readFile(path, "utf-8");
19
- } catch (error) {
20
- if ((error as Error & { code: string }).code === "ENOENT") return undefined;
21
- throw error;
22
- }
23
- }
22
+ fs.mkdirSync(cacheDirPath, { "recursive": true });
24
23
 
25
- async function maybeStat(path: string) {
26
- try {
27
- return await stat(path);
28
- } catch (error) {
29
- if ((error as Error & { code: string }).code === "ENOENT") return undefined;
30
- throw error;
31
- }
32
- }
24
+ const { readIsSuccessByExtractDirPath, writeIsSuccessByExtractDirPath } = (() => {
25
+ const filePath = pathJoin(cacheDirPath, "isSuccessByExtractDirPath.json");
33
26
 
34
- /**
35
- * Download a file from `url` to `dir`. Will try to avoid downloading existing
36
- * files by using an `{hash(url)}.etag` file. If this file exists, we add an
37
- * etag headear, so server can tell us if file changed and we should re-download
38
- * or if our file is up-to-date.
39
- *
40
- * Warning, this method assumes that the target filename can be extracted from
41
- * url, content-disposition headers are ignored.
42
- *
43
- * If the target directory does not exist, it will be created.
44
- *
45
- * If the target file exists and is out of date, it will be overwritten.
46
- * If the target file exists and there is no etag file, the target file will
47
- * be overwritten.
48
- *
49
- * @param url download url
50
- * @param dir target directory
51
- * @returns promise for the full path of the downloaded file
52
- */
53
- async function download(url: string, dir: string): Promise<string> {
54
- await mkdir(dir, { recursive: true });
55
- const filename = pathBasename(url);
56
- const filepath = pathJoin(dir, filename);
57
- // If downloaded file exists already and has an `.etag` companion file,
58
- // read the etag from that file. This will avoid re-downloading the file
59
- // if it is up to date.
60
- const exists = await maybeStat(filepath);
61
- const etagFilepath = pathJoin(dir, "_" + hash(url).substring(0, 15) + ".etag");
62
- const etag = !exists ? undefined : await maybeReadFile(etagFilepath);
27
+ type IsSuccessByExtractDirPath = Record<string, boolean | undefined>;
63
28
 
64
- return new Promise((resolve, reject) => {
65
- // use inner method to allow following redirects
66
- function request(url1: URL) {
67
- const headers: Record<string, string> = {};
68
- if (etag) headers["If-None-Match"] = etag;
69
- (url1.protocol === "https:" ? https : http).get(url1, { headers }, response => {
70
- if (response.statusCode === 301 || response.statusCode === 302) {
71
- // follow redirects
72
- request(new URL(response.headers.location!!));
73
- } else if (response.statusCode === 304) {
74
- // up-to-date, resolve now
75
- resolve(filepath);
76
- } else if (response.statusCode !== 200) {
77
- reject(new Error(`Request to ${url1} returned status ${response.statusCode}.`));
78
- } else {
79
- const fp = createWriteStream(filepath, { autoClose: true });
80
- fp.on("err", e => {
81
- fp.close();
82
- unlinkSync(filepath);
83
- reject(e);
84
- });
85
- fp.on("finish", async () => {
86
- // when targetfile has been written, write etag file so that
87
- // next time around we don't need to re-download
88
- const responseEtag = response.headers.etag;
89
- if (responseEtag) await writeFile(etagFilepath, responseEtag, "utf-8");
90
- resolve(filepath);
91
- });
92
- response.pipe(fp);
93
- }
94
- });
95
- }
96
- request(new URL(url));
97
- });
98
- }
29
+ function readIsSuccessByExtractDirPath(): IsSuccessByExtractDirPath {
30
+ if (!fs.existsSync(filePath)) {
31
+ return {};
32
+ }
99
33
 
100
- /**
101
- * @typedef
102
- * @type MultiError = Error & { cause: Error[] }
103
- */
34
+ return JSON.parse(fs.readFileSync(filePath).toString("utf8"));
35
+ }
104
36
 
105
- /**
106
- * Extract the archive `zipFile` into the directory `dir`. If `archiveDir` is given,
107
- * only that directory will be extracted, stripping the given path components.
108
- *
109
- * If dir does not exist, it will be created.
110
- *
111
- * If any archive file exists, it will be overwritten.
112
- *
113
- * Will unzip using all available nodejs worker threads.
114
- *
115
- * Will try to clean up extracted files on failure.
116
- *
117
- * If unpacking fails, will either throw an regular error, or
118
- * possibly an `MultiError`, which contains a `cause` field with
119
- * a number of root cause errors.
120
- *
121
- * Warning this method is not optimized for continuous reading of the zip
122
- * archive, but is a trade-off between simplicity and allowing extraction
123
- * of a single directory from the archive.
124
- *
125
- * @param zipFile the file to unzip
126
- * @param dir the target directory
127
- * @param archiveDir if given, unpack only files from this archive directory
128
- * @throws {MultiError} error
129
- * @returns Promise for a list of full file paths pointing to actually extracted files
130
- */
131
- async function unzip(zipFile: string, dir: string, archiveDir?: string): Promise<string[]> {
132
- await mkdir(dir, { recursive: true });
133
- const promises: Promise<string>[] = [];
37
+ function writeIsSuccessByExtractDirPath(isSuccessByExtractDirPath: IsSuccessByExtractDirPath): void {
38
+ fs.writeFileSync(filePath, Buffer.from(JSON.stringify(isSuccessByExtractDirPath, null, 2), "utf8"));
39
+ }
134
40
 
135
- // Iterate over all files in the zip, skip files which are not in archiveDir,
136
- // if given.
137
- for await (const record of iterateZipArchive(zipFile)) {
138
- const { path: recordPath, createReadStream: createRecordReadStream } = record;
139
- const filePath = pathJoin(dir, recordPath);
140
- const parent = pathDirname(filePath);
141
- if (archiveDir && !recordPath.startsWith(archiveDir)) continue;
142
- promises.push(
143
- new Promise<string>(async (resolve, reject) => {
144
- await mkdir(parent, { recursive: true });
145
- // Pull the file out of the archive, write it to the target directory
146
- const input = createRecordReadStream();
147
- const output = createWriteStream(filePath);
148
- output.on("error", e => reject(Object.assign(e, { filePath })));
149
- output.on("finish", () => resolve(filePath));
150
- input.pipe(output);
151
- })
152
- );
153
- }
41
+ return { readIsSuccessByExtractDirPath, writeIsSuccessByExtractDirPath };
42
+ })();
154
43
 
155
- // Wait until _all_ files are either extracted or failed
156
- const results = await Promise.allSettled(promises);
157
- const success = results.filter(r => r.status === "fulfilled").map(r => (r as PromiseFulfilledResult<string>).value);
158
- const failure = results.filter(r => r.status === "rejected").map(r => (r as PromiseRejectedResult).reason);
44
+ downloadAndUnzip: {
45
+ const isSuccessByExtractDirPath = readIsSuccessByExtractDirPath();
159
46
 
160
- // If any extraction failed, try to clean up, then throw a MultiError,
161
- // which has a `cause` field, containing a list of root cause errors.
162
- if (failure.length) {
163
- await Promise.all(success.map(path => unlink(path)));
164
- await Promise.all(failure.map(e => e && e.path && unlink(e.path as string)));
165
- const e = new Error("Failed to extract: " + failure.map(e => e.message).join(";"));
166
- (e as any).cause = failure;
167
- throw e;
168
- }
47
+ if (isSuccessByExtractDirPath[extractDirPath]) {
48
+ break downloadAndUnzip;
49
+ }
169
50
 
170
- return success;
171
- }
51
+ writeIsSuccessByExtractDirPath({
52
+ ...isSuccessByExtractDirPath,
53
+ [extractDirPath]: false
54
+ });
172
55
 
173
- /**
174
- *
175
- * @param file file to read
176
- * @param start first byte to read
177
- * @param end last byte to read
178
- * @returns Promise of a buffer of read bytes
179
- */
180
- async function readFileChunk(file: string, start: number, end: number): Promise<Buffer> {
181
- const chunks: Buffer[] = [];
182
- return new Promise((resolve, reject) => {
183
- const stream = createReadStream(file, { start, end });
184
- stream.on("error", e => reject(e));
185
- stream.on("end", () => resolve(Buffer.concat(chunks)));
186
- stream.on("data", chunk => chunks.push(chunk as Buffer));
187
- });
188
- }
56
+ fs.rmSync(extractDirPath, { "recursive": true, "force": true });
189
57
 
190
- type ZipRecord = {
191
- path: string;
192
- createReadStream: () => Readable;
193
- compressionMethod: "deflate" | undefined;
194
- };
58
+ fs.mkdirSync(extractDirPath);
195
59
 
196
- type ZipRecordGenerator = AsyncGenerator<ZipRecord, void, unknown>;
60
+ const zipFileBasename = pathBasename(url);
197
61
 
198
- /**
199
- * Iterate over all records of a zipfile, and yield a ZipRecord.
200
- * Use `record.createReadStream()` to actually read the file.
201
- *
202
- * Warning this method will only work with single-disk zip files.
203
- * Warning this method may fail if the zip archive has an crazy amount
204
- * of files and the central directory is not fully contained within the
205
- * last 65k bytes of the zip file.
206
- *
207
- * @param zipFile
208
- * @returns AsyncGenerator which will yield ZipRecords
209
- */
210
- async function* iterateZipArchive(zipFile: string): ZipRecordGenerator {
211
- // Need to know zip file size before we can do anything else
212
- const { size } = await stat(zipFile);
213
- const chunkSize = 65_535 + 22 + 1; // max comment size + end header size + wiggle
214
- // Read last ~65k bytes. Zip files have an comment up to 65_535 bytes at the very end,
215
- // before that comes the zip central directory end header.
216
- let chunk = await readFileChunk(zipFile, size - chunkSize, size);
217
- const unread = size - chunk.length;
218
- let i = chunk.length - 4;
219
- let found = false;
220
- // Find central directory end header, reading backwards from the end
221
- while (!found && i-- > 0) if (chunk[i] === 0x50 && chunk.readUInt32LE(i) === 0x06054b50) found = true;
222
- if (!found) throw new Error("Not a zip file");
223
- // This method will fail on a multi-disk zip, so bail early.
224
- if (chunk.readUInt16LE(i + 4) !== 0) throw new Error("Multi-disk zip not supported");
225
- let nFiles = chunk.readUint16LE(i + 10);
226
- // Get the position of the central directory
227
- const directorySize = chunk.readUint32LE(i + 12);
228
- const directoryOffset = chunk.readUint32LE(i + 16);
229
- if (directoryOffset === 0xffff_ffff) throw new Error("zip64 not supported");
230
- if (directoryOffset > size) throw new Error(`Central directory offset ${directoryOffset} is outside file`);
231
- i = directoryOffset - unread;
232
- // If i < 0, it means that the central directory is not contained within `chunk`
233
- if (i < 0) {
234
- chunk = await readFileChunk(zipFile, directoryOffset, directoryOffset + directorySize);
235
- i = 0;
236
- }
237
- // Now iterate the central directory records, yield an `ZipRecord` for every entry
238
- while (nFiles-- > 0) {
239
- // Check for marker bytes
240
- if (chunk.readUInt32LE(i) !== 0x02014b50) throw new Error("No central directory record at position " + (unread + i));
241
- const compressionMethod = ({ 8: "deflate" } as const)[chunk.readUint16LE(i + 10)];
242
- const compressedFileSize = chunk.readUint32LE(i + 20);
243
- const filenameLength = chunk.readUint16LE(i + 28);
244
- const extraLength = chunk.readUint16LE(i + 30);
245
- const commentLength = chunk.readUint16LE(i + 32);
246
- // Start of thea actual content byte stream is after the 'local' record header,
247
- // which is 30 bytes long plus filename and extra field
248
- const start = chunk.readUint32LE(i + 42) + 30 + filenameLength + extraLength;
249
- const end = start + compressedFileSize;
250
- const filename = chunk.slice(i + 46, i + 46 + filenameLength).toString("utf-8");
251
- const createRecordReadStream = () => {
252
- const input = createReadStream(zipFile, { start, end });
253
- if (compressionMethod === "deflate") {
254
- const inflate = createInflateRaw();
255
- input.pipe(inflate);
256
- return inflate;
257
- }
258
- return input;
259
- };
260
- if (end > start) yield { path: filename, createReadStream: createRecordReadStream, compressionMethod };
261
- // advance pointer to next central directory entry
262
- i += 46 + filenameLength + extraLength + commentLength;
263
- }
264
- }
62
+ execSync(`curl -L ${url} -o ${zipFileBasename} ${params.isSilent ? "-s" : ""}`, { "cwd": extractDirPath });
265
63
 
266
- export async function downloadAndUnzip({
267
- url,
268
- destDirPath,
269
- pathOfDirToExtractInArchive,
270
- cacheDirPath
271
- }: {
272
- isSilent: boolean;
273
- url: string;
274
- destDirPath: string;
275
- pathOfDirToExtractInArchive?: string;
276
- cacheDirPath: string;
277
- }) {
278
- const downloadHash = hash(JSON.stringify({ url, pathOfDirToExtractInArchive })).substring(0, 15);
279
- const extractDirPath = pathJoin(cacheDirPath, `_${downloadHash}`);
64
+ execSync(`unzip -o ${zipFileBasename}${pathOfDirToExtractInArchive === undefined ? "" : ` "${pathOfDirToExtractInArchive}/**/*"`}`, {
65
+ "cwd": extractDirPath
66
+ });
280
67
 
281
- const zipFilepath = await download(url, cacheDirPath);
282
- const zipMtime = (await stat(zipFilepath)).mtimeMs;
283
- const unzipMtime = (await maybeStat(extractDirPath))?.mtimeMs;
68
+ fs.rmSync(pathJoin(extractDirPath, zipFileBasename), { "recursive": true, "force": true });
284
69
 
285
- if (!unzipMtime || zipMtime > unzipMtime) await unzip(zipFilepath, extractDirPath, pathOfDirToExtractInArchive);
70
+ writeIsSuccessByExtractDirPath({
71
+ ...isSuccessByExtractDirPath,
72
+ [extractDirPath]: true
73
+ });
74
+ }
286
75
 
287
- const srcDirPath = pathOfDirToExtractInArchive === undefined ? extractDirPath : pathJoin(extractDirPath, pathOfDirToExtractInArchive);
288
- transformCodebase({ srcDirPath, destDirPath });
76
+ transformCodebase({
77
+ "srcDirPath": pathOfDirToExtractInArchive === undefined ? extractDirPath : pathJoin(extractDirPath, pathOfDirToExtractInArchive),
78
+ destDirPath
79
+ });
289
80
  }
@@ -1,17 +1,10 @@
1
1
  import { getProjectRoot } from "./getProjectRoot";
2
2
  import { join as pathJoin } from "path";
3
- import { constants } from "fs";
4
- import { chmod, stat } from "fs/promises";
3
+ import * as child_process from "child_process";
4
+ import * as fs from "fs";
5
5
 
6
- async () => {
7
- var { bin } = await import(pathJoin(getProjectRoot(), "package.json"));
8
-
9
- var promises = Object.values<string>(bin).map(async scriptPath => {
10
- const fullPath = pathJoin(getProjectRoot(), scriptPath);
11
- const oldMode = (await stat(fullPath)).mode;
12
- const newMode = oldMode | constants.S_IXUSR | constants.S_IXGRP | constants.S_IXOTH;
13
- await chmod(fullPath, newMode);
14
- });
15
-
16
- await Promise.all(promises);
17
- };
6
+ Object.entries<string>(JSON.parse(fs.readFileSync(pathJoin(getProjectRoot(), "package.json")).toString("utf8"))["bin"]).forEach(([, scriptPath]) =>
7
+ child_process.execSync(`chmod +x ${scriptPath}`, {
8
+ "cwd": getProjectRoot()
9
+ })
10
+ );