@f5xc-salesdemos/pi-utils 14.0.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/package.json +60 -0
- package/src/abortable.ts +85 -0
- package/src/async.ts +50 -0
- package/src/cli.ts +432 -0
- package/src/color.ts +204 -0
- package/src/dirs.ts +425 -0
- package/src/env.ts +84 -0
- package/src/format.ts +106 -0
- package/src/frontmatter.ts +118 -0
- package/src/fs-error.ts +56 -0
- package/src/glob.ts +189 -0
- package/src/hook-fetch.ts +30 -0
- package/src/index.ts +47 -0
- package/src/json.ts +10 -0
- package/src/logger.ts +204 -0
- package/src/mermaid-ascii.ts +31 -0
- package/src/mime.ts +159 -0
- package/src/peek-file.ts +114 -0
- package/src/postmortem.ts +197 -0
- package/src/procmgr.ts +326 -0
- package/src/prompt.ts +401 -0
- package/src/ptree.ts +386 -0
- package/src/ring.ts +169 -0
- package/src/snowflake.ts +136 -0
- package/src/stream.ts +316 -0
- package/src/temp.ts +77 -0
- package/src/type-guards.ts +11 -0
- package/src/which.ts +230 -0
package/src/logger.ts
ADDED
|
@@ -0,0 +1,204 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Centralized file logger for xcsh.
|
|
3
|
+
*
|
|
4
|
+
* Logs to ~/.xcsh/logs/ with size-based rotation, supporting concurrent xcsh instances.
|
|
5
|
+
* Each log entry includes process.pid for traceability.
|
|
6
|
+
*/
|
|
7
|
+
import * as fs from "node:fs";
|
|
8
|
+
import winston from "winston";
|
|
9
|
+
import DailyRotateFile from "winston-daily-rotate-file";
|
|
10
|
+
import { getLogsDir } from "./dirs";
|
|
11
|
+
|
|
12
|
+
/** Ensure logs directory exists */
|
|
13
|
+
function ensureLogsDir(): string {
|
|
14
|
+
const logsDir = getLogsDir();
|
|
15
|
+
if (!fs.existsSync(logsDir)) {
|
|
16
|
+
fs.mkdirSync(logsDir, { recursive: true });
|
|
17
|
+
}
|
|
18
|
+
return logsDir;
|
|
19
|
+
}
|
|
20
|
+
|
|
21
|
+
/** Custom format that includes pid and flattens metadata */
|
|
22
|
+
const logFormat = winston.format.combine(
|
|
23
|
+
winston.format.timestamp({ format: "YYYY-MM-DDTHH:mm:ss.SSSZ" }),
|
|
24
|
+
winston.format.printf(({ timestamp, level, message, ...meta }) => {
|
|
25
|
+
const entry: Record<string, unknown> = {
|
|
26
|
+
timestamp,
|
|
27
|
+
level,
|
|
28
|
+
pid: process.pid,
|
|
29
|
+
message,
|
|
30
|
+
};
|
|
31
|
+
// Flatten metadata into entry
|
|
32
|
+
for (const [key, value] of Object.entries(meta)) {
|
|
33
|
+
if (key !== "level" && key !== "timestamp" && key !== "message") {
|
|
34
|
+
entry[key] = value;
|
|
35
|
+
}
|
|
36
|
+
}
|
|
37
|
+
return JSON.stringify(entry);
|
|
38
|
+
}),
|
|
39
|
+
);
|
|
40
|
+
|
|
41
|
+
/** Size-based rotating file transport */
|
|
42
|
+
const fileTransport = new DailyRotateFile({
|
|
43
|
+
dirname: ensureLogsDir(),
|
|
44
|
+
filename: "xcsh.%DATE%.log",
|
|
45
|
+
datePattern: "YYYY-MM-DD",
|
|
46
|
+
maxSize: "10m",
|
|
47
|
+
maxFiles: 5,
|
|
48
|
+
zippedArchive: true,
|
|
49
|
+
});
|
|
50
|
+
|
|
51
|
+
/** The winston logger instance */
|
|
52
|
+
const winstonLogger = winston.createLogger({
|
|
53
|
+
level: "debug",
|
|
54
|
+
format: logFormat,
|
|
55
|
+
transports: [fileTransport],
|
|
56
|
+
// Don't exit on error - logging failures shouldn't crash the app
|
|
57
|
+
exitOnError: false,
|
|
58
|
+
});
|
|
59
|
+
|
|
60
|
+
/**
|
|
61
|
+
* Log an error message.
|
|
62
|
+
* @param message - The message to log.
|
|
63
|
+
* @param context - The context to log.
|
|
64
|
+
*/
|
|
65
|
+
export function error(message: string, context?: Record<string, unknown>): void {
|
|
66
|
+
try {
|
|
67
|
+
winstonLogger.error(message, context);
|
|
68
|
+
} catch {
|
|
69
|
+
// Silently ignore logging failures
|
|
70
|
+
}
|
|
71
|
+
}
|
|
72
|
+
|
|
73
|
+
/**
|
|
74
|
+
* Log a warning message.
|
|
75
|
+
* @param message - The message to log.
|
|
76
|
+
* @param context - The context to log.
|
|
77
|
+
*/
|
|
78
|
+
export function warn(message: string, context?: Record<string, unknown>): void {
|
|
79
|
+
try {
|
|
80
|
+
winstonLogger.warn(message, context);
|
|
81
|
+
} catch {
|
|
82
|
+
// Silently ignore logging failures
|
|
83
|
+
}
|
|
84
|
+
}
|
|
85
|
+
|
|
86
|
+
/**
|
|
87
|
+
* Log a debug message.
|
|
88
|
+
* @param message - The message to log.
|
|
89
|
+
* @param context - The context to log.
|
|
90
|
+
*/
|
|
91
|
+
export function debug(message: string, context?: Record<string, unknown>): void {
|
|
92
|
+
try {
|
|
93
|
+
winstonLogger.debug(message, context);
|
|
94
|
+
} catch {
|
|
95
|
+
// Silently ignore logging failures
|
|
96
|
+
}
|
|
97
|
+
}
|
|
98
|
+
|
|
99
|
+
const LOGGED_TIMING_THRESHOLD_MS = 5;
|
|
100
|
+
|
|
101
|
+
/** Sequential wall-clock markers (next marker closes the previous segment). */
|
|
102
|
+
let gTimings: [op: string, ts: number][] = [];
|
|
103
|
+
|
|
104
|
+
/** Await-accurate durations (safe for parallel work; sums can overlap). */
|
|
105
|
+
let gAsyncSpans: [op: string, durationMs: number][] = [];
|
|
106
|
+
|
|
107
|
+
/** Whether to record timings. */
|
|
108
|
+
let gRecordTimings = false;
|
|
109
|
+
|
|
110
|
+
/**
|
|
111
|
+
* Print collected timings to stderr.
|
|
112
|
+
* Wall segments are gaps between consecutive {@link time} markers only; they are wrong when
|
|
113
|
+
* concurrent code also calls {@link time} (e.g. parallel capability loads). Use {@link timeAsync}
|
|
114
|
+
* for those awaits instead.
|
|
115
|
+
*/
|
|
116
|
+
export function printTimings(): void {
|
|
117
|
+
if (!gRecordTimings || gTimings.length === 0) {
|
|
118
|
+
console.error("\n--- Startup Timings ---\n(no markers)\n");
|
|
119
|
+
return;
|
|
120
|
+
}
|
|
121
|
+
|
|
122
|
+
const endTs = performance.now();
|
|
123
|
+
gTimings.push(["(end)", endTs]);
|
|
124
|
+
|
|
125
|
+
console.error("\n--- Startup timings (wall segments between time() markers) ---");
|
|
126
|
+
const firstTs = gTimings[0][1];
|
|
127
|
+
for (let i = 0; i < gTimings.length - 1; i++) {
|
|
128
|
+
const [op, ts] = gTimings[i];
|
|
129
|
+
const [, nextTs] = gTimings[i + 1];
|
|
130
|
+
const dur = nextTs - ts;
|
|
131
|
+
if (dur > LOGGED_TIMING_THRESHOLD_MS) {
|
|
132
|
+
console.error(` ${op}: ${dur}ms`);
|
|
133
|
+
}
|
|
134
|
+
}
|
|
135
|
+
console.error(` span (first marker → end): ${endTs - firstTs}ms`);
|
|
136
|
+
|
|
137
|
+
if (gAsyncSpans.length > 0) {
|
|
138
|
+
console.error("\n--- Async (await-accurate; parallel spans may overlap) ---");
|
|
139
|
+
for (const [op, dur] of gAsyncSpans) {
|
|
140
|
+
if (dur > LOGGED_TIMING_THRESHOLD_MS) {
|
|
141
|
+
console.error(` ${op}: ${dur}ms`);
|
|
142
|
+
}
|
|
143
|
+
}
|
|
144
|
+
}
|
|
145
|
+
|
|
146
|
+
console.error("------------------------\n");
|
|
147
|
+
|
|
148
|
+
gTimings.pop();
|
|
149
|
+
}
|
|
150
|
+
|
|
151
|
+
/**
|
|
152
|
+
* Begin recording startup timings. Seeds the timeline so the first segment is meaningful.
|
|
153
|
+
*/
|
|
154
|
+
export function startTiming(): void {
|
|
155
|
+
gTimings = [["(startup)", performance.now()]];
|
|
156
|
+
gAsyncSpans = [];
|
|
157
|
+
gRecordTimings = true;
|
|
158
|
+
}
|
|
159
|
+
|
|
160
|
+
/**
|
|
161
|
+
* End timing window and clear buffers.
|
|
162
|
+
*/
|
|
163
|
+
export function endTiming(): void {
|
|
164
|
+
gTimings = [];
|
|
165
|
+
gAsyncSpans = [];
|
|
166
|
+
gRecordTimings = false;
|
|
167
|
+
}
|
|
168
|
+
|
|
169
|
+
function recordAsyncSpan(op: string, start: number): void {
|
|
170
|
+
const dur = performance.now() - start;
|
|
171
|
+
if (dur > LOGGED_TIMING_THRESHOLD_MS) {
|
|
172
|
+
gAsyncSpans.push([op, dur]);
|
|
173
|
+
}
|
|
174
|
+
}
|
|
175
|
+
|
|
176
|
+
/**
|
|
177
|
+
* Wall-clock segment boundary: duration for this label runs until the next {@link time} call.
|
|
178
|
+
* Do not use across `await` when other tasks may call {@link time}; use {@link timeAsync} for the awaited work.
|
|
179
|
+
*/
|
|
180
|
+
export function time(op: string): void;
|
|
181
|
+
export function time<T, A extends unknown[]>(op: string, fn: (...args: A) => T, ...args: A): T;
|
|
182
|
+
export function time<T, A extends unknown[]>(op: string, fn?: (...args: A) => T, ...args: A): T | undefined {
|
|
183
|
+
if (fn === undefined) {
|
|
184
|
+
if (gRecordTimings) {
|
|
185
|
+
gTimings.push([op, performance.now()]);
|
|
186
|
+
}
|
|
187
|
+
return undefined as T;
|
|
188
|
+
} else if (gRecordTimings) {
|
|
189
|
+
const start = performance.now();
|
|
190
|
+
try {
|
|
191
|
+
const result = fn(...args);
|
|
192
|
+
if (result instanceof Promise) {
|
|
193
|
+
return result.finally(recordAsyncSpan.bind(null, op, start)) as T;
|
|
194
|
+
}
|
|
195
|
+
recordAsyncSpan(op, start);
|
|
196
|
+
return result;
|
|
197
|
+
} catch (error) {
|
|
198
|
+
recordAsyncSpan(op, start);
|
|
199
|
+
throw error;
|
|
200
|
+
}
|
|
201
|
+
} else {
|
|
202
|
+
return fn(...args);
|
|
203
|
+
}
|
|
204
|
+
}
|
|
@@ -0,0 +1,31 @@
|
|
|
1
|
+
import { type AsciiRenderOptions, renderMermaidASCII } from "beautiful-mermaid";
|
|
2
|
+
|
|
3
|
+
export type { AsciiRenderOptions as MermaidAsciiRenderOptions };
|
|
4
|
+
|
|
5
|
+
export function renderMermaidAscii(source: string, options?: AsciiRenderOptions): string {
|
|
6
|
+
return renderMermaidASCII(source, options);
|
|
7
|
+
}
|
|
8
|
+
|
|
9
|
+
export function renderMermaidAsciiSafe(source: string, options?: AsciiRenderOptions): string | null {
|
|
10
|
+
try {
|
|
11
|
+
return renderMermaidASCII(source, options);
|
|
12
|
+
} catch {
|
|
13
|
+
return null;
|
|
14
|
+
}
|
|
15
|
+
}
|
|
16
|
+
|
|
17
|
+
/**
|
|
18
|
+
* Extract mermaid code blocks from markdown text.
|
|
19
|
+
*/
|
|
20
|
+
export function extractMermaidBlocks(markdown: string): { source: string; hash: bigint }[] {
|
|
21
|
+
const blocks: { source: string; hash: bigint }[] = [];
|
|
22
|
+
const regex = /```mermaid\s*\n([\s\S]*?)```/g;
|
|
23
|
+
|
|
24
|
+
for (let match = regex.exec(markdown); match !== null; match = regex.exec(markdown)) {
|
|
25
|
+
const source = match[1].trim();
|
|
26
|
+
const hash = Bun.hash.xxHash64(source);
|
|
27
|
+
blocks.push({ source, hash });
|
|
28
|
+
}
|
|
29
|
+
|
|
30
|
+
return blocks;
|
|
31
|
+
}
|
package/src/mime.ts
ADDED
|
@@ -0,0 +1,159 @@
|
|
|
1
|
+
import { peekFile, peekFileSync } from "./peek-file";
|
|
2
|
+
|
|
3
|
+
const DEFAULT_IMAGE_METADATA_HEADER_BYTES = 256 * 1024;
|
|
4
|
+
|
|
5
|
+
const PNG_MAGIC = Buffer.from([0x89, 0x50, 0x4e, 0x47, 0x0d, 0x0a, 0x1a, 0x0a]);
|
|
6
|
+
const JPEG_MAGIC = Buffer.from([0xff, 0xd8, 0xff]);
|
|
7
|
+
const WEBP_RIFF_MAGIC = Buffer.from([0x52, 0x49, 0x46, 0x46]);
|
|
8
|
+
const WEBP_MAGIC = Buffer.from([0x57, 0x45, 0x42, 0x50]);
|
|
9
|
+
const PNG_IHDR = Buffer.from("IHDR");
|
|
10
|
+
const GIF87A = Buffer.from("GIF87a");
|
|
11
|
+
const GIF89A = Buffer.from("GIF89a");
|
|
12
|
+
const WEBP_VP8X = Buffer.from("VP8X");
|
|
13
|
+
const WEBP_VP8L = Buffer.from("VP8L");
|
|
14
|
+
const WEBP_VP8 = Buffer.from("VP8 ");
|
|
15
|
+
|
|
16
|
+
export const SUPPORTED_IMAGE_MIME_TYPES = new Set(["image/png", "image/jpeg", "image/gif", "image/webp"]);
|
|
17
|
+
|
|
18
|
+
export type ImageMetadata =
|
|
19
|
+
| { mimeType: "image/png"; width?: number; height?: number; channels?: number; hasAlpha?: boolean }
|
|
20
|
+
| { mimeType: "image/jpeg"; width?: number; height?: number; channels?: number; hasAlpha?: false }
|
|
21
|
+
| { mimeType: "image/gif"; width?: number; height?: number; channels?: 3; hasAlpha?: never }
|
|
22
|
+
| { mimeType: "image/webp"; width?: number; height?: number; channels?: number; hasAlpha?: boolean };
|
|
23
|
+
|
|
24
|
+
function magicEquals(header: Uint8Array, offset: number, magic: Buffer): boolean {
|
|
25
|
+
if (header.length < offset + magic.length) {
|
|
26
|
+
return false;
|
|
27
|
+
}
|
|
28
|
+
return magic.equals(header.subarray(offset, offset + magic.length));
|
|
29
|
+
}
|
|
30
|
+
|
|
31
|
+
function parsePngMetadata(header: Uint8Array): ImageMetadata | null {
|
|
32
|
+
if (!magicEquals(header, 0, PNG_MAGIC)) return null;
|
|
33
|
+
if (!magicEquals(header, 12, PNG_IHDR)) return { mimeType: "image/png" };
|
|
34
|
+
if (header.length < 26) return { mimeType: "image/png" };
|
|
35
|
+
|
|
36
|
+
const view = new DataView(header.buffer, header.byteOffset, header.byteLength);
|
|
37
|
+
const width = view.getUint32(16, false);
|
|
38
|
+
const height = view.getUint32(20, false);
|
|
39
|
+
const colorType = view.getUint8(25);
|
|
40
|
+
if (colorType === 0) return { mimeType: "image/png", width, height, channels: 1, hasAlpha: false };
|
|
41
|
+
if (colorType === 2) return { mimeType: "image/png", width, height, channels: 3, hasAlpha: false };
|
|
42
|
+
if (colorType === 3) return { mimeType: "image/png", width, height, channels: 3 };
|
|
43
|
+
if (colorType === 4) return { mimeType: "image/png", width, height, channels: 2, hasAlpha: true };
|
|
44
|
+
if (colorType === 6) return { mimeType: "image/png", width, height, channels: 4, hasAlpha: true };
|
|
45
|
+
return { mimeType: "image/png", width, height };
|
|
46
|
+
}
|
|
47
|
+
|
|
48
|
+
function parseJpegMetadata(header: Uint8Array): ImageMetadata | null {
|
|
49
|
+
if (!magicEquals(header, 0, JPEG_MAGIC)) return null;
|
|
50
|
+
if (header.length < 4) return { mimeType: "image/jpeg" };
|
|
51
|
+
|
|
52
|
+
const view = new DataView(header.buffer, header.byteOffset, header.byteLength);
|
|
53
|
+
let offset = 2;
|
|
54
|
+
while (offset + 9 < header.length) {
|
|
55
|
+
if (header[offset] !== 0xff) {
|
|
56
|
+
offset += 1;
|
|
57
|
+
continue;
|
|
58
|
+
}
|
|
59
|
+
|
|
60
|
+
let markerOffset = offset + 1;
|
|
61
|
+
while (markerOffset < header.length && header[markerOffset] === 0xff) {
|
|
62
|
+
markerOffset += 1;
|
|
63
|
+
}
|
|
64
|
+
if (markerOffset >= header.length) break;
|
|
65
|
+
|
|
66
|
+
const marker = header[markerOffset];
|
|
67
|
+
const segmentOffset = markerOffset + 1;
|
|
68
|
+
if (marker === 0xd8 || marker === 0xd9 || marker === 0x01 || (marker >= 0xd0 && marker <= 0xd7)) {
|
|
69
|
+
offset = segmentOffset;
|
|
70
|
+
continue;
|
|
71
|
+
}
|
|
72
|
+
if (segmentOffset + 1 >= header.length) break;
|
|
73
|
+
|
|
74
|
+
const segmentLength = view.getUint16(segmentOffset, false);
|
|
75
|
+
if (segmentLength < 2) break;
|
|
76
|
+
|
|
77
|
+
const isStartOfFrame = marker >= 0xc0 && marker <= 0xcf && marker !== 0xc4 && marker !== 0xc8 && marker !== 0xcc;
|
|
78
|
+
if (isStartOfFrame) {
|
|
79
|
+
if (segmentOffset + 7 >= header.length) break;
|
|
80
|
+
const height = view.getUint16(segmentOffset + 3, false);
|
|
81
|
+
const width = view.getUint16(segmentOffset + 5, false);
|
|
82
|
+
const channels = header[segmentOffset + 7];
|
|
83
|
+
return {
|
|
84
|
+
mimeType: "image/jpeg",
|
|
85
|
+
width,
|
|
86
|
+
height,
|
|
87
|
+
channels: Number.isFinite(channels) ? channels : undefined,
|
|
88
|
+
hasAlpha: false,
|
|
89
|
+
};
|
|
90
|
+
}
|
|
91
|
+
|
|
92
|
+
offset = segmentOffset + segmentLength;
|
|
93
|
+
}
|
|
94
|
+
|
|
95
|
+
return { mimeType: "image/jpeg" };
|
|
96
|
+
}
|
|
97
|
+
|
|
98
|
+
function parseGifMetadata(header: Uint8Array): ImageMetadata | null {
|
|
99
|
+
if (!magicEquals(header, 0, GIF87A) && !magicEquals(header, 0, GIF89A)) return null;
|
|
100
|
+
if (header.length < 10) return { mimeType: "image/gif" };
|
|
101
|
+
const view = new DataView(header.buffer, header.byteOffset, header.byteLength);
|
|
102
|
+
return {
|
|
103
|
+
mimeType: "image/gif",
|
|
104
|
+
width: view.getUint16(6, true),
|
|
105
|
+
height: view.getUint16(8, true),
|
|
106
|
+
channels: 3,
|
|
107
|
+
};
|
|
108
|
+
}
|
|
109
|
+
|
|
110
|
+
function parseWebpMetadata(header: Uint8Array): ImageMetadata | null {
|
|
111
|
+
if (!magicEquals(header, 0, WEBP_RIFF_MAGIC)) return null;
|
|
112
|
+
if (!magicEquals(header, 8, WEBP_MAGIC)) return null;
|
|
113
|
+
if (header.length < 30) return { mimeType: "image/webp" };
|
|
114
|
+
|
|
115
|
+
if (magicEquals(header, 12, WEBP_VP8X)) {
|
|
116
|
+
const hasAlpha = (header[20] & 0x10) !== 0;
|
|
117
|
+
const width = (header[24] | (header[25] << 8) | (header[26] << 16)) + 1;
|
|
118
|
+
const height = (header[27] | (header[28] << 8) | (header[29] << 16)) + 1;
|
|
119
|
+
return { mimeType: "image/webp", width, height, channels: hasAlpha ? 4 : 3, hasAlpha };
|
|
120
|
+
}
|
|
121
|
+
|
|
122
|
+
const view = new DataView(header.buffer, header.byteOffset, header.byteLength);
|
|
123
|
+
if (magicEquals(header, 12, WEBP_VP8L)) {
|
|
124
|
+
if (header.length < 25) return { mimeType: "image/webp" };
|
|
125
|
+
const bits = view.getUint32(21, true);
|
|
126
|
+
const width = (bits & 0x3fff) + 1;
|
|
127
|
+
const height = ((bits >> 14) & 0x3fff) + 1;
|
|
128
|
+
const hasAlpha = ((bits >> 28) & 0x1) === 1;
|
|
129
|
+
return { mimeType: "image/webp", width, height, channels: hasAlpha ? 4 : 3, hasAlpha };
|
|
130
|
+
}
|
|
131
|
+
|
|
132
|
+
if (magicEquals(header, 12, WEBP_VP8)) {
|
|
133
|
+
const width = view.getUint16(26, true) & 0x3fff;
|
|
134
|
+
const height = view.getUint16(28, true) & 0x3fff;
|
|
135
|
+
return { mimeType: "image/webp", width, height, channels: 3, hasAlpha: false };
|
|
136
|
+
}
|
|
137
|
+
|
|
138
|
+
return { mimeType: "image/webp" };
|
|
139
|
+
}
|
|
140
|
+
|
|
141
|
+
export function parseImageMetadata(header: Uint8Array): ImageMetadata | null {
|
|
142
|
+
return (
|
|
143
|
+
parsePngMetadata(header) ?? parseJpegMetadata(header) ?? parseGifMetadata(header) ?? parseWebpMetadata(header)
|
|
144
|
+
);
|
|
145
|
+
}
|
|
146
|
+
|
|
147
|
+
export function readImageMetadataSync(
|
|
148
|
+
filePath: string,
|
|
149
|
+
maxBytes = DEFAULT_IMAGE_METADATA_HEADER_BYTES,
|
|
150
|
+
): ImageMetadata | null {
|
|
151
|
+
return peekFileSync(filePath, maxBytes, parseImageMetadata);
|
|
152
|
+
}
|
|
153
|
+
|
|
154
|
+
export function readImageMetadata(
|
|
155
|
+
filePath: string,
|
|
156
|
+
maxBytes = DEFAULT_IMAGE_METADATA_HEADER_BYTES,
|
|
157
|
+
): Promise<ImageMetadata | null> {
|
|
158
|
+
return peekFile(filePath, maxBytes, parseImageMetadata);
|
|
159
|
+
}
|
package/src/peek-file.ts
ADDED
|
@@ -0,0 +1,114 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Read the first `maxBytes` of a file (offset 0) and pass that slice to `op`.
|
|
3
|
+
*
|
|
4
|
+
* Buffers are reused to avoid allocating on every peek: sync uses one growable
|
|
5
|
+
* `Uint8Array`; async uses a small fixed pool of `Buffer`s with a bounded wait
|
|
6
|
+
* queue, falling back to a fresh allocation when the pool and queue are saturated
|
|
7
|
+
* or when `maxBytes` exceeds the pool slot size.
|
|
8
|
+
*/
|
|
9
|
+
import * as fs from "node:fs";
|
|
10
|
+
|
|
11
|
+
/** Async pool slot size; larger peeks allocate ad hoc. */
|
|
12
|
+
const POOLED_BUFFER_SIZE = 512;
|
|
13
|
+
const ASYNC_POOL_SIZE = 10;
|
|
14
|
+
/** Cap waiter queue so heavy concurrency does not queue unbounded; overflow uses alloc. */
|
|
15
|
+
const MAX_ASYNC_WAITERS = 4;
|
|
16
|
+
const INITIAL_SYNC_BUFFER_SIZE = 1024;
|
|
17
|
+
const EMPTY_BUFFER = Buffer.alloc(0);
|
|
18
|
+
|
|
19
|
+
const asyncPool = Array.from({ length: ASYNC_POOL_SIZE }, () => Buffer.allocUnsafe(POOLED_BUFFER_SIZE));
|
|
20
|
+
const availableAsyncPoolIndexes = Array.from({ length: ASYNC_POOL_SIZE }, (_, index) => index);
|
|
21
|
+
const asyncPoolWaiters: Array<(index: number) => void> = [];
|
|
22
|
+
let syncPool = new Uint8Array(INITIAL_SYNC_BUFFER_SIZE);
|
|
23
|
+
|
|
24
|
+
/** Returns a pool slot index, or `-1` when the caller should use a standalone buffer. */
|
|
25
|
+
function acquireAsyncPoolIndex(): Promise<number> | number {
|
|
26
|
+
const index = availableAsyncPoolIndexes.pop();
|
|
27
|
+
if (index !== undefined) {
|
|
28
|
+
return index;
|
|
29
|
+
}
|
|
30
|
+
if (asyncPoolWaiters.length >= MAX_ASYNC_WAITERS) {
|
|
31
|
+
return -1;
|
|
32
|
+
}
|
|
33
|
+
const { promise, resolve } = Promise.withResolvers<number>();
|
|
34
|
+
asyncPoolWaiters.push(resolve);
|
|
35
|
+
return promise;
|
|
36
|
+
}
|
|
37
|
+
|
|
38
|
+
function releaseAsyncPoolIndex(index: number): void {
|
|
39
|
+
if (index < 0) {
|
|
40
|
+
return;
|
|
41
|
+
}
|
|
42
|
+
const waiter = asyncPoolWaiters.shift();
|
|
43
|
+
if (waiter) {
|
|
44
|
+
waiter(index);
|
|
45
|
+
return;
|
|
46
|
+
}
|
|
47
|
+
availableAsyncPoolIndexes.push(index);
|
|
48
|
+
}
|
|
49
|
+
|
|
50
|
+
async function withAsyncPoolBuffer<T>(maxBytes: number, op: (buffer: Buffer) => Promise<T>): Promise<T> {
|
|
51
|
+
if (maxBytes <= 0) {
|
|
52
|
+
return op(EMPTY_BUFFER);
|
|
53
|
+
}
|
|
54
|
+
if (maxBytes > POOLED_BUFFER_SIZE) {
|
|
55
|
+
return op(Buffer.allocUnsafe(maxBytes));
|
|
56
|
+
}
|
|
57
|
+
|
|
58
|
+
const poolIndex = await acquireAsyncPoolIndex();
|
|
59
|
+
const buffer = poolIndex >= 0 ? asyncPool[poolIndex] : Buffer.allocUnsafe(maxBytes);
|
|
60
|
+
try {
|
|
61
|
+
return await op(buffer.subarray(0, maxBytes));
|
|
62
|
+
} finally {
|
|
63
|
+
releaseAsyncPoolIndex(poolIndex);
|
|
64
|
+
}
|
|
65
|
+
}
|
|
66
|
+
|
|
67
|
+
function withSyncPoolBuffer<T>(maxBytes: number, op: (buffer: Uint8Array) => T): T {
|
|
68
|
+
if (maxBytes <= 0) {
|
|
69
|
+
return op(EMPTY_BUFFER);
|
|
70
|
+
}
|
|
71
|
+
if (maxBytes > syncPool.byteLength) {
|
|
72
|
+
syncPool = new Uint8Array(maxBytes + (maxBytes >> 1));
|
|
73
|
+
}
|
|
74
|
+
return op(syncPool.subarray(0, maxBytes));
|
|
75
|
+
}
|
|
76
|
+
|
|
77
|
+
/**
|
|
78
|
+
* Synchronously reads up to `maxBytes` from the start of `filePath` and returns `op(header)`.
|
|
79
|
+
* If the file is shorter, `header` is only the bytes actually read.
|
|
80
|
+
*/
|
|
81
|
+
export function peekFileSync<T>(filePath: string, maxBytes: number, op: (header: Uint8Array) => T): T {
|
|
82
|
+
if (maxBytes <= 0) {
|
|
83
|
+
return op(EMPTY_BUFFER);
|
|
84
|
+
}
|
|
85
|
+
|
|
86
|
+
const fileHandle = fs.openSync(filePath, "r");
|
|
87
|
+
try {
|
|
88
|
+
return withSyncPoolBuffer(maxBytes, buffer => {
|
|
89
|
+
const bytesRead = fs.readSync(fileHandle, buffer, 0, buffer.byteLength, 0);
|
|
90
|
+
return op(buffer.subarray(0, bytesRead));
|
|
91
|
+
});
|
|
92
|
+
} finally {
|
|
93
|
+
fs.closeSync(fileHandle);
|
|
94
|
+
}
|
|
95
|
+
}
|
|
96
|
+
|
|
97
|
+
/**
|
|
98
|
+
* Like {@link peekFileSync} but uses async I/O.
|
|
99
|
+
*/
|
|
100
|
+
export async function peekFile<T>(filePath: string, maxBytes: number, op: (header: Uint8Array) => T): Promise<T> {
|
|
101
|
+
if (maxBytes <= 0) {
|
|
102
|
+
return op(EMPTY_BUFFER);
|
|
103
|
+
}
|
|
104
|
+
|
|
105
|
+
const fileHandle = await fs.promises.open(filePath, "r");
|
|
106
|
+
try {
|
|
107
|
+
return await withAsyncPoolBuffer(maxBytes, async buffer => {
|
|
108
|
+
const { bytesRead } = await fileHandle.read(buffer, 0, buffer.byteLength, 0);
|
|
109
|
+
return op(buffer.subarray(0, bytesRead));
|
|
110
|
+
});
|
|
111
|
+
} finally {
|
|
112
|
+
await fileHandle.close();
|
|
113
|
+
}
|
|
114
|
+
}
|
|
@@ -0,0 +1,197 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Cleanup and postmortem handler utilities.
|
|
3
|
+
*
|
|
4
|
+
* This module provides a system for registering and running cleanup callbacks
|
|
5
|
+
* in response to process exit, signals, or fatal exceptions. It is intended to
|
|
6
|
+
* allow reliably releasing resources or shutting down subprocesses, files, sockets, etc.
|
|
7
|
+
*/
|
|
8
|
+
import inspector from "node:inspector";
|
|
9
|
+
import { isMainThread } from "node:worker_threads";
|
|
10
|
+
import { logger } from ".";
|
|
11
|
+
|
|
12
|
+
// Cleanup reasons, in order of priority/meaning.
|
|
13
|
+
export enum Reason {
|
|
14
|
+
PRE_EXIT = "pre_exit", // Pre-exit phase (not used by default)
|
|
15
|
+
EXIT = "exit", // Normal process exit
|
|
16
|
+
SIGINT = "sigint", // Ctrl-C or SIGINT
|
|
17
|
+
SIGTERM = "sigterm", // SIGTERM
|
|
18
|
+
SIGHUP = "sighup", // SIGHUP
|
|
19
|
+
UNCAUGHT_EXCEPTION = "uncaught_exception", // Fatal exception
|
|
20
|
+
UNHANDLED_REJECTION = "unhandled_rejection", // Unhandled promise rejection
|
|
21
|
+
MANUAL = "manual", // Manual cleanup (not triggered by process)
|
|
22
|
+
}
|
|
23
|
+
|
|
24
|
+
// Internal list of active cleanup callbacks (in registration order)
|
|
25
|
+
const callbackList: ((reason: Reason) => Promise<void> | void)[] = [];
|
|
26
|
+
// Tracks cleanup run state (to prevent recursion/reentry issues)
|
|
27
|
+
let cleanupStage: "idle" | "running" | "complete" = "idle";
|
|
28
|
+
|
|
29
|
+
/**
|
|
30
|
+
* Internal: runs all registered cleanup callbacks for the given reason.
|
|
31
|
+
* Ensures each callback is invoked at most once. Handles errors and prevents reentrancy.
|
|
32
|
+
*
|
|
33
|
+
* Returns a Promise that settles after all cleanups complete or error out.
|
|
34
|
+
*/
|
|
35
|
+
function runCleanup(reason: Reason): Promise<void> {
|
|
36
|
+
switch (cleanupStage) {
|
|
37
|
+
case "idle":
|
|
38
|
+
cleanupStage = "running";
|
|
39
|
+
break;
|
|
40
|
+
case "running":
|
|
41
|
+
logger.error("Cleanup invoked recursively", { stack: new Error().stack });
|
|
42
|
+
return Promise.resolve();
|
|
43
|
+
case "complete":
|
|
44
|
+
return Promise.resolve();
|
|
45
|
+
}
|
|
46
|
+
|
|
47
|
+
// Call .cleanup() for each callback that is still "armed".
|
|
48
|
+
// Use Promise.try to handle sync/async, but only those armed.
|
|
49
|
+
// Create a copy to avoid mutating the original array with reverse()
|
|
50
|
+
const promises = [...callbackList].reverse().map(callback => {
|
|
51
|
+
return Promise.try(() => callback(reason));
|
|
52
|
+
});
|
|
53
|
+
|
|
54
|
+
return Promise.allSettled(promises).then(results => {
|
|
55
|
+
for (const result of results) {
|
|
56
|
+
if (result.status === "rejected") {
|
|
57
|
+
const err = result.reason instanceof Error ? result.reason : new Error(String(result.reason));
|
|
58
|
+
logger.error("Cleanup callback failed", { err, stack: err.stack });
|
|
59
|
+
}
|
|
60
|
+
}
|
|
61
|
+
cleanupStage = "complete";
|
|
62
|
+
});
|
|
63
|
+
}
|
|
64
|
+
|
|
65
|
+
// Register signal and error event handlers to trigger cleanup before exit.
|
|
66
|
+
// Main thread: full signal handling (SIGINT, SIGTERM, SIGHUP) + exceptions + exit
|
|
67
|
+
// Worker thread: exit only (workers use self.addEventListener for exceptions)
|
|
68
|
+
let inspectorOpened = false;
|
|
69
|
+
|
|
70
|
+
function formatFatalError(label: string, err: Error): string {
|
|
71
|
+
const name = err.name || "Error";
|
|
72
|
+
const message = err.message || "(no message)";
|
|
73
|
+
const stack = err.stack || "";
|
|
74
|
+
const stackLines = stack.split("\n").slice(1);
|
|
75
|
+
const formattedStack = stackLines.length > 0 ? `\n${stackLines.join("\n")}` : "";
|
|
76
|
+
return `\n[${label}] ${name}: ${message}${formattedStack}\n`;
|
|
77
|
+
}
|
|
78
|
+
|
|
79
|
+
if (isMainThread) {
|
|
80
|
+
process
|
|
81
|
+
.on("SIGINT", async () => {
|
|
82
|
+
await runCleanup(Reason.SIGINT);
|
|
83
|
+
process.exit(130); // 128 + SIGINT (2)
|
|
84
|
+
})
|
|
85
|
+
.on("SIGUSR1", () => {
|
|
86
|
+
if (inspectorOpened) return;
|
|
87
|
+
inspectorOpened = true;
|
|
88
|
+
inspector.open(undefined, undefined, false);
|
|
89
|
+
const url = inspector.url();
|
|
90
|
+
process.stderr.write(`Inspector opened: ${url}\n`);
|
|
91
|
+
})
|
|
92
|
+
.on("uncaughtException", async err => {
|
|
93
|
+
process.stderr.write(formatFatalError("Uncaught Exception", err));
|
|
94
|
+
logger.error("Uncaught exception", { err, stack: err.stack });
|
|
95
|
+
await runCleanup(Reason.UNCAUGHT_EXCEPTION);
|
|
96
|
+
process.exit(1);
|
|
97
|
+
})
|
|
98
|
+
.on("unhandledRejection", async reason => {
|
|
99
|
+
const err = reason instanceof Error ? reason : new Error(String(reason));
|
|
100
|
+
process.stderr.write(formatFatalError("Unhandled Rejection", err));
|
|
101
|
+
logger.error("Unhandled rejection", { err, stack: err.stack });
|
|
102
|
+
await runCleanup(Reason.UNHANDLED_REJECTION);
|
|
103
|
+
process.exit(1);
|
|
104
|
+
})
|
|
105
|
+
.on("exit", async () => {
|
|
106
|
+
void runCleanup(Reason.EXIT); // fire and forget (exit imminent)
|
|
107
|
+
})
|
|
108
|
+
.on("SIGTERM", async () => {
|
|
109
|
+
await runCleanup(Reason.SIGTERM);
|
|
110
|
+
process.exit(143); // 128 + SIGTERM (15)
|
|
111
|
+
})
|
|
112
|
+
.on("SIGHUP", async () => {
|
|
113
|
+
await runCleanup(Reason.SIGHUP);
|
|
114
|
+
process.exit(129); // 128 + SIGHUP (1)
|
|
115
|
+
});
|
|
116
|
+
} else {
|
|
117
|
+
// Worker thread: only register exit handler for cleanup.
|
|
118
|
+
// DO NOT register uncaughtException/unhandledRejection handlers here -
|
|
119
|
+
// they would swallow errors before the worker's own handlers (self.addEventListener)
|
|
120
|
+
// can report failures back to the parent thread.
|
|
121
|
+
process.on("exit", () => {
|
|
122
|
+
void runCleanup(Reason.EXIT);
|
|
123
|
+
});
|
|
124
|
+
}
|
|
125
|
+
|
|
126
|
+
/**
|
|
127
|
+
* Register a process cleanup callback, to be run on shutdown, signal, or fatal error.
|
|
128
|
+
*
|
|
129
|
+
* Returns a Callback instance that can be used to cancel (unregister) or manually clean up.
|
|
130
|
+
* If register is called after cleanup already began, invokes callback on a microtask.
|
|
131
|
+
*/
|
|
132
|
+
export function register(id: string, callback: (reason: Reason) => void | Promise<void>): () => void {
|
|
133
|
+
let done = false;
|
|
134
|
+
const exec = (reason: Reason) => {
|
|
135
|
+
if (done) return;
|
|
136
|
+
done = true;
|
|
137
|
+
try {
|
|
138
|
+
return callback(reason);
|
|
139
|
+
} catch (e) {
|
|
140
|
+
const err = e instanceof Error ? e : new Error(String(e));
|
|
141
|
+
logger.error("Cleanup callback failed", { err, id, stack: err.stack });
|
|
142
|
+
}
|
|
143
|
+
};
|
|
144
|
+
|
|
145
|
+
const cancel = () => {
|
|
146
|
+
const index = callbackList.indexOf(exec);
|
|
147
|
+
if (index >= 0) {
|
|
148
|
+
callbackList.splice(index, 1);
|
|
149
|
+
}
|
|
150
|
+
done = true;
|
|
151
|
+
};
|
|
152
|
+
|
|
153
|
+
if (cleanupStage !== "idle") {
|
|
154
|
+
// If cleanup is already running/completed, warn and run on microtask.
|
|
155
|
+
logger.warn("Cleanup invoked recursively", { id });
|
|
156
|
+
try {
|
|
157
|
+
callback(Reason.MANUAL);
|
|
158
|
+
} catch (e) {
|
|
159
|
+
const err = e instanceof Error ? e : new Error(String(e));
|
|
160
|
+
logger.error("Cleanup callback failed", { err, id, stack: err.stack });
|
|
161
|
+
}
|
|
162
|
+
return () => {};
|
|
163
|
+
}
|
|
164
|
+
|
|
165
|
+
// Register callback as "armed" (active).
|
|
166
|
+
callbackList.push(exec);
|
|
167
|
+
return cancel;
|
|
168
|
+
}
|
|
169
|
+
|
|
170
|
+
/**
|
|
171
|
+
* Runs all cleanup callbacks without exiting.
|
|
172
|
+
* Use this in workers or when you need to clean up but continue execution.
|
|
173
|
+
*/
|
|
174
|
+
export function cleanup(): Promise<void> {
|
|
175
|
+
return runCleanup(Reason.MANUAL);
|
|
176
|
+
}
|
|
177
|
+
|
|
178
|
+
/**
|
|
179
|
+
* Runs all cleanup callbacks and exits.
|
|
180
|
+
*
|
|
181
|
+
* In main thread: waits for stdout drain, then calls process.exit().
|
|
182
|
+
* In workers: runs cleanup only (process.exit would kill entire process).
|
|
183
|
+
*/
|
|
184
|
+
export async function quit(code: number = 0): Promise<void> {
|
|
185
|
+
await runCleanup(Reason.MANUAL);
|
|
186
|
+
|
|
187
|
+
if (!isMainThread) {
|
|
188
|
+
return; // Workers: cleanup done, let worker exit naturally
|
|
189
|
+
}
|
|
190
|
+
|
|
191
|
+
if (process.stdout.writableLength > 0) {
|
|
192
|
+
const { promise, resolve } = Promise.withResolvers<void>();
|
|
193
|
+
process.stdout.once("drain", resolve);
|
|
194
|
+
await Promise.race([promise, Bun.sleep(5000)]);
|
|
195
|
+
}
|
|
196
|
+
process.exit(code);
|
|
197
|
+
}
|