@logtape/file 1.0.0-dev.237 → 1.0.0-dev.241

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,336 @@
1
+ import { getStreamFileSink } from "./streamfilesink.ts";
2
+ import { suite } from "@alinea/suite";
3
+ import type { LogRecord, Sink } from "@logtape/logtape";
4
+ import { assert } from "@std/assert/assert";
5
+ import { assertEquals } from "@std/assert/equals";
6
+ import { delay } from "@std/async/delay";
7
+ import { join } from "@std/path/join";
8
+ import fs from "node:fs";
9
+ import { tmpdir } from "node:os";
10
+ import { debug, error, fatal, info, warning } from "../logtape/fixtures.ts";
11
+
12
+ const test = suite(import.meta);
13
+
14
+ function makeTempFileSync(): string {
15
+ return join(fs.mkdtempSync(join(tmpdir(), "logtape-")), "logtape.txt");
16
+ }
17
+
18
+ test("getStreamFileSink() basic functionality", async () => {
19
+ const path = makeTempFileSync();
20
+ const sink: Sink & Disposable = getStreamFileSink(path);
21
+
22
+ sink(debug);
23
+ sink(info);
24
+ sink(warning);
25
+ sink(error);
26
+ sink(fatal);
27
+
28
+ sink[Symbol.dispose]();
29
+
30
+ // Allow stream to fully flush
31
+ await delay(50);
32
+
33
+ const content = fs.readFileSync(path, { encoding: "utf-8" });
34
+ assertEquals(
35
+ content,
36
+ "2023-11-14 22:13:20.000 +00:00 [DBG] my-app·junk: Hello, 123 & 456!\n" +
37
+ "2023-11-14 22:13:20.000 +00:00 [INF] my-app·junk: Hello, 123 & 456!\n" +
38
+ "2023-11-14 22:13:20.000 +00:00 [WRN] my-app·junk: Hello, 123 & 456!\n" +
39
+ "2023-11-14 22:13:20.000 +00:00 [ERR] my-app·junk: Hello, 123 & 456!\n" +
40
+ "2023-11-14 22:13:20.000 +00:00 [FTL] my-app·junk: Hello, 123 & 456!\n",
41
+ );
42
+ });
43
+
44
+ test("getStreamFileSink() with custom highWaterMark", async () => {
45
+ const path = makeTempFileSync();
46
+ const sink = getStreamFileSink(path, { highWaterMark: 1024 });
47
+
48
+ sink(debug);
49
+ sink(info);
50
+ sink[Symbol.dispose]();
51
+
52
+ await delay(50);
53
+
54
+ const content = fs.readFileSync(path, { encoding: "utf-8" });
55
+ assertEquals(
56
+ content,
57
+ "2023-11-14 22:13:20.000 +00:00 [DBG] my-app·junk: Hello, 123 & 456!\n" +
58
+ "2023-11-14 22:13:20.000 +00:00 [INF] my-app·junk: Hello, 123 & 456!\n",
59
+ );
60
+ });
61
+
62
+ test("getStreamFileSink() with custom formatter", async () => {
63
+ const path = makeTempFileSync();
64
+ const customFormatter = (record: LogRecord) =>
65
+ `CUSTOM: ${record.message.join("")}\n`;
66
+ const sink = getStreamFileSink(path, { formatter: customFormatter });
67
+
68
+ sink(debug);
69
+ sink(info);
70
+ sink[Symbol.dispose]();
71
+
72
+ await delay(50);
73
+
74
+ const content = fs.readFileSync(path, { encoding: "utf-8" });
75
+ assertEquals(
76
+ content,
77
+ "CUSTOM: Hello, 123 & 456!\n" +
78
+ "CUSTOM: Hello, 123 & 456!\n",
79
+ );
80
+ });
81
+
82
+ test("getStreamFileSink() appends to existing file", async () => {
83
+ const path = makeTempFileSync();
84
+
85
+ // Write initial content
86
+ fs.writeFileSync(path, "Initial content\n");
87
+
88
+ const sink = getStreamFileSink(path);
89
+ sink(debug);
90
+ sink[Symbol.dispose]();
91
+
92
+ await delay(50);
93
+
94
+ const content = fs.readFileSync(path, { encoding: "utf-8" });
95
+ assert(content.startsWith("Initial content\n"));
96
+ assert(content.includes("Hello, 123 & 456!"));
97
+ });
98
+
99
+ test("getStreamFileSink() high-volume logging", async () => {
100
+ const path = makeTempFileSync();
101
+ const sink = getStreamFileSink(path, { highWaterMark: 1024 });
102
+
103
+ // Write many records quickly to test stream backpressure
104
+ for (let i = 0; i < 100; i++) {
105
+ const record: LogRecord = {
106
+ ...debug,
107
+ message: [`Log entry ${i}`],
108
+ };
109
+ sink(record);
110
+ }
111
+
112
+ sink[Symbol.dispose]();
113
+ await delay(100); // Allow streams to finish
114
+
115
+ const content = fs.readFileSync(path, { encoding: "utf-8" });
116
+ const lines = content.split("\n").filter((line) => line.length > 0);
117
+ assertEquals(lines.length, 100);
118
+
119
+ // Verify first and last entries
120
+ assert(lines[0].includes("Log entry 0"));
121
+ assert(lines[99].includes("Log entry 99"));
122
+ });
123
+
124
+ test("getStreamFileSink() disposal stops writing", async () => {
125
+ const path = makeTempFileSync();
126
+ const sink = getStreamFileSink(path);
127
+
128
+ sink(debug);
129
+ sink[Symbol.dispose]();
130
+
131
+ // Writing after disposal should be ignored
132
+ sink(info);
133
+ sink(warning);
134
+
135
+ await delay(50);
136
+
137
+ const content = fs.readFileSync(path, { encoding: "utf-8" });
138
+ const lines = content.split("\n").filter((line) => line.length > 0);
139
+ assertEquals(lines.length, 1); // Only debug record
140
+ assert(content.includes("[DBG]"));
141
+ assert(!content.includes("[INF]"));
142
+ assert(!content.includes("[WRN]"));
143
+ });
144
+
145
+ test("getStreamFileSink() double disposal", async () => {
146
+ const path = makeTempFileSync();
147
+ const sink = getStreamFileSink(path);
148
+
149
+ sink(debug);
150
+ sink[Symbol.dispose]();
151
+ sink[Symbol.dispose](); // Should not throw
152
+
153
+ await delay(50);
154
+
155
+ const content = fs.readFileSync(path, { encoding: "utf-8" });
156
+ const lines = content.split("\n").filter((line) => line.length > 0);
157
+ assertEquals(lines.length, 1);
158
+ });
159
+
160
+ test("getStreamFileSink() handles rapid disposal", async () => {
161
+ const path = makeTempFileSync();
162
+ const sink = getStreamFileSink(path);
163
+
164
+ sink(debug);
165
+ // Dispose immediately without waiting
166
+ sink[Symbol.dispose]();
167
+
168
+ await delay(50);
169
+
170
+ const content = fs.readFileSync(path, { encoding: "utf-8" });
171
+ assert(content.includes("Hello, 123 & 456!"));
172
+ });
173
+
174
+ test("getStreamFileSink() concurrent writes", async () => {
175
+ const path = makeTempFileSync();
176
+ const sink = getStreamFileSink(path);
177
+
178
+ // Simulate concurrent logging from different parts of application
179
+ const promises = [];
180
+ for (let i = 0; i < 10; i++) {
181
+ promises.push(
182
+ new Promise<void>((resolve) => {
183
+ setTimeout(() => {
184
+ const record: LogRecord = {
185
+ ...debug,
186
+ message: [`Concurrent log ${i}`],
187
+ };
188
+ sink(record);
189
+ resolve();
190
+ }, Math.random() * 10);
191
+ }),
192
+ );
193
+ }
194
+
195
+ await Promise.all(promises);
196
+ sink[Symbol.dispose]();
197
+ await delay(100);
198
+
199
+ const content = fs.readFileSync(path, { encoding: "utf-8" });
200
+ const lines = content.split("\n").filter((line) => line.length > 0);
201
+ assertEquals(lines.length, 10);
202
+
203
+ // All concurrent logs should be present
204
+ for (let i = 0; i < 10; i++) {
205
+ assert(content.includes(`Concurrent log ${i}`));
206
+ }
207
+ });
208
+
209
+ test("getStreamFileSink() with empty records", async () => {
210
+ const path = makeTempFileSync();
211
+ const sink = getStreamFileSink(path);
212
+
213
+ const emptyRecord: LogRecord = {
214
+ ...debug,
215
+ message: [""],
216
+ };
217
+
218
+ sink(emptyRecord);
219
+ sink[Symbol.dispose]();
220
+
221
+ await delay(50);
222
+
223
+ const content = fs.readFileSync(path, { encoding: "utf-8" });
224
+ assert(content.includes("[DBG]"));
225
+ // Should still write the timestamp and level even with empty message
226
+ assert(content.includes("2023-11-14 22:13:20.000 +00:00"));
227
+ });
228
+
229
+ test("getStreamFileSink() with large messages", async () => {
230
+ const path = makeTempFileSync();
231
+ const sink = getStreamFileSink(path);
232
+
233
+ const largeMessage = "x".repeat(10000);
234
+ const largeRecord: LogRecord = {
235
+ ...debug,
236
+ message: [largeMessage],
237
+ };
238
+
239
+ sink(largeRecord);
240
+ sink[Symbol.dispose]();
241
+
242
+ await delay(100); // Give more time for large write
243
+
244
+ const content = fs.readFileSync(path, { encoding: "utf-8" });
245
+ assert(content.includes(largeMessage));
246
+ assert(content.includes("[DBG]"));
247
+ });
248
+
249
+ test("getStreamFileSink() memory efficiency", async () => {
250
+ const path = makeTempFileSync();
251
+ const sink = getStreamFileSink(path);
252
+
253
+ // Create many small records to test memory usage
254
+ for (let i = 0; i < 1000; i++) {
255
+ const record: LogRecord = {
256
+ ...debug,
257
+ message: [`Memory test ${i}`],
258
+ };
259
+ sink(record);
260
+
261
+ // Occasionally allow event loop to process
262
+ if (i % 100 === 0) {
263
+ await delay(1);
264
+ }
265
+ }
266
+
267
+ sink[Symbol.dispose]();
268
+ await delay(200);
269
+
270
+ const content = fs.readFileSync(path, { encoding: "utf-8" });
271
+ const lines = content.split("\n").filter((line) => line.length > 0);
272
+ assertEquals(lines.length, 1000);
273
+
274
+ // Verify first and last entries
275
+ assert(lines[0].includes("Memory test 0"));
276
+ assert(lines[999].includes("Memory test 999"));
277
+ });
278
+
279
+ test("getStreamFileSink() creates new file when it doesn't exist", async () => {
280
+ // Use a file that doesn't exist yet
281
+ const tempDir = fs.mkdtempSync(join(tmpdir(), "logtape-"));
282
+ const path = join(tempDir, "new-file.log");
283
+
284
+ const sink = getStreamFileSink(path);
285
+ sink(debug);
286
+ sink[Symbol.dispose]();
287
+
288
+ await delay(50);
289
+
290
+ // File should have been created
291
+ assert(fs.existsSync(path));
292
+ const content = fs.readFileSync(path, { encoding: "utf-8" });
293
+ assert(content.includes("Hello, 123 & 456!"));
294
+ });
295
+
296
+ test("getStreamFileSink() multiple instances on same file", async () => {
297
+ const path = makeTempFileSync();
298
+
299
+ const sink1 = getStreamFileSink(path);
300
+ const sink2 = getStreamFileSink(path);
301
+
302
+ sink1(debug);
303
+ sink2(info);
304
+
305
+ sink1[Symbol.dispose]();
306
+ sink2[Symbol.dispose]();
307
+
308
+ await delay(100);
309
+
310
+ const content = fs.readFileSync(path, { encoding: "utf-8" });
311
+ assert(content.includes("[DBG]"));
312
+ assert(content.includes("[INF]"));
313
+ });
314
+
315
+ test("getStreamFileSink() stream error handling", async () => {
316
+ const path = makeTempFileSync();
317
+ const sink = getStreamFileSink(path);
318
+
319
+ sink(debug);
320
+ sink[Symbol.dispose]();
321
+ await delay(50);
322
+
323
+ // Delete the file after disposal
324
+ try {
325
+ fs.unlinkSync(path);
326
+ } catch {
327
+ // Ignore if file doesn't exist
328
+ }
329
+
330
+ // These writes after disposal should be ignored
331
+ sink(info);
332
+ sink(warning);
333
+
334
+ // Test should complete without throwing
335
+ assert(true);
336
+ });
@@ -0,0 +1,136 @@
1
+ import {
2
+ defaultTextFormatter,
3
+ type LogRecord,
4
+ type Sink,
5
+ type TextFormatter,
6
+ } from "@logtape/logtape";
7
+ import { createWriteStream } from "node:fs";
8
+ import { PassThrough } from "node:stream";
9
+
10
+ /**
11
+ * Options for the {@link getStreamFileSink} function.
12
+ *
13
+ * This interface configures the high-performance stream-based file sink that
14
+ * uses Node.js PassThrough streams for optimal I/O performance with automatic
15
+ * backpressure management.
16
+ *
17
+ * @since 1.0.0
18
+ */
19
+ export interface StreamFileSinkOptions {
20
+ /**
21
+ * High water mark for the PassThrough stream buffer in bytes.
22
+ *
23
+ * This controls the internal buffer size of the PassThrough stream.
24
+ * Higher values can improve performance for high-volume logging but use
25
+ * more memory. Lower values reduce memory usage but may impact performance.
26
+ *
27
+ * @default 16384
28
+ * @since 1.0.0
29
+ */
30
+ readonly highWaterMark?: number;
31
+
32
+ /**
33
+ * A custom formatter for log records.
34
+ *
35
+ * If not specified, the default text formatter will be used, which formats
36
+ * records in the standard LogTape format with timestamp, level, category,
37
+ * and message.
38
+ *
39
+ * @default defaultTextFormatter
40
+ * @since 1.0.0
41
+ */
42
+ readonly formatter?: TextFormatter;
43
+ }
44
+
45
+ /**
46
+ * Create a high-performance stream-based file sink that writes log records to a file.
47
+ *
48
+ * This sink uses Node.js PassThrough streams piped to WriteStreams for optimal
49
+ * I/O performance. It leverages the Node.js stream infrastructure to provide
50
+ * automatic backpressure management, efficient buffering, and asynchronous writes
51
+ * without blocking the main thread.
52
+ *
53
+ * ## Performance Characteristics
54
+ *
55
+ * - **High Performance**: Optimized for high-volume logging scenarios
56
+ * - **Non-blocking**: Uses asynchronous I/O that doesn't block the main thread
57
+ * - **Memory Efficient**: Automatic backpressure prevents memory buildup
58
+ * - **Stream-based**: Leverages Node.js native stream optimizations
59
+ *
60
+ * ## When to Use
61
+ *
62
+ * Use this sink when you need:
63
+ * - High-performance file logging for production applications
64
+ * - Non-blocking I/O behavior for real-time applications
65
+ * - Automatic backpressure handling for high-volume scenarios
66
+ * - Simple file output without complex buffering configuration
67
+ *
68
+ * For more control over buffering behavior, consider using {@link getFileSink}
69
+ * instead, which provides options for buffer size, flush intervals, and
70
+ * non-blocking modes.
71
+ *
72
+ * ## Example
73
+ *
74
+ * ```typescript
75
+ * import { configure } from "@logtape/logtape";
76
+ * import { getStreamFileSink } from "@logtape/file";
77
+ *
78
+ * await configure({
79
+ * sinks: {
80
+ * file: getStreamFileSink("app.log", {
81
+ * highWaterMark: 32768 // 32KB buffer for high-volume logging
82
+ * })
83
+ * },
84
+ * loggers: [
85
+ * { category: ["myapp"], sinks: ["file"] }
86
+ * ]
87
+ * });
88
+ * ```
89
+ *
90
+ * @param path The path to the file to write logs to. The file will be created
91
+ * if it doesn't exist, or appended to if it does exist.
92
+ * @param options Configuration options for the stream-based sink.
93
+ * @returns A sink that writes formatted log records to the specified file.
94
+ * The returned sink implements `Disposable` for proper resource cleanup.
95
+ *
96
+ * @since 1.0.0
97
+ */
98
+ export function getStreamFileSink(
99
+ path: string,
100
+ options: StreamFileSinkOptions = {},
101
+ ): Sink & Disposable {
102
+ const highWaterMark = options.highWaterMark ?? 16384;
103
+ const formatter = options.formatter ?? defaultTextFormatter;
104
+
105
+ // Create PassThrough stream for optimal performance
106
+ const passThrough = new PassThrough({
107
+ highWaterMark,
108
+ objectMode: false,
109
+ });
110
+
111
+ // Create WriteStream immediately (not lazy)
112
+ const writeStream = createWriteStream(path, { flags: "a" });
113
+
114
+ // Pipe PassThrough to WriteStream for automatic backpressure handling
115
+ passThrough.pipe(writeStream);
116
+
117
+ let disposed = false;
118
+
119
+ // Stream-based sink function for high performance
120
+ const sink: Sink & Disposable = (record: LogRecord) => {
121
+ if (disposed) return;
122
+
123
+ // Direct write to PassThrough stream
124
+ passThrough.write(formatter(record));
125
+ };
126
+
127
+ // Minimal disposal
128
+ sink[Symbol.dispose] = () => {
129
+ if (disposed) return;
130
+ disposed = true;
131
+ passThrough.end();
132
+ writeStream.end();
133
+ };
134
+
135
+ return sink;
136
+ }
package/tsdown.config.ts CHANGED
@@ -6,13 +6,13 @@ export default defineConfig({
6
6
  sourcemap: true,
7
7
  },
8
8
  format: ["esm", "cjs"],
9
- platform: "neutral",
9
+ platform: "node",
10
10
  unbundle: true,
11
11
  inputOptions: {
12
12
  onLog(level, log, defaultHandler) {
13
13
  if (
14
14
  level === "warn" && log.code === "UNRESOLVED_IMPORT" &&
15
- ["node:fs", "node:util", "#filesink"].includes(log.exporter ?? "")
15
+ log.exporter === "#filesink"
16
16
  ) {
17
17
  return;
18
18
  }