@logtape/file 1.0.0-dev.237 → 1.0.0-dev.241

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/deno.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@logtape/file",
3
- "version": "1.0.0-dev.237+0615301b",
3
+ "version": "1.0.0-dev.241+13810dcf",
4
4
  "license": "MIT",
5
5
  "exports": "./mod.ts",
6
6
  "imports": {
@@ -2,36 +2,233 @@ const require_rolldown_runtime = require('./_virtual/rolldown_runtime.cjs');
2
2
  const __logtape_logtape = require_rolldown_runtime.__toESM(require("@logtape/logtape"));
3
3
 
4
4
  //#region filesink.base.ts
5
+ /**
6
+ * Adaptive flush strategy that dynamically adjusts buffer thresholds
7
+ * based on recent flush patterns for optimal performance.
8
+ */
9
+ var AdaptiveFlushStrategy = class {
10
+ recentFlushSizes = [];
11
+ recentFlushTimes = [];
12
+ avgFlushSize;
13
+ avgFlushInterval;
14
+ maxHistorySize = 10;
15
+ baseThreshold;
16
+ constructor(baseThreshold, baseInterval) {
17
+ this.baseThreshold = baseThreshold;
18
+ this.avgFlushSize = baseThreshold;
19
+ this.avgFlushInterval = baseInterval;
20
+ }
21
+ /**
22
+ * Record a flush event for pattern analysis.
23
+ * @param size The size of data flushed in bytes.
24
+ * @param timeSinceLastFlush Time since last flush in milliseconds.
25
+ */
26
+ recordFlush(size, timeSinceLastFlush) {
27
+ this.recentFlushSizes.push(size);
28
+ this.recentFlushTimes.push(timeSinceLastFlush);
29
+ if (this.recentFlushSizes.length > this.maxHistorySize) {
30
+ this.recentFlushSizes.shift();
31
+ this.recentFlushTimes.shift();
32
+ }
33
+ this.updateAverages();
34
+ }
35
+ /**
36
+ * Determine if buffer should be flushed based on adaptive strategy.
37
+ * @param currentSize Current buffer size in bytes.
38
+ * @param timeSinceLastFlush Time since last flush in milliseconds.
39
+ * @returns True if buffer should be flushed.
40
+ */
41
+ shouldFlush(currentSize, timeSinceLastFlush) {
42
+ const adaptiveThreshold = this.calculateAdaptiveThreshold();
43
+ const adaptiveInterval = this.calculateAdaptiveInterval();
44
+ return currentSize >= adaptiveThreshold || adaptiveInterval > 0 && timeSinceLastFlush >= adaptiveInterval;
45
+ }
46
+ updateAverages() {
47
+ if (this.recentFlushSizes.length === 0) return;
48
+ this.avgFlushSize = this.recentFlushSizes.reduce((sum, size) => sum + size, 0) / this.recentFlushSizes.length;
49
+ this.avgFlushInterval = this.recentFlushTimes.reduce((sum, time) => sum + time, 0) / this.recentFlushTimes.length;
50
+ }
51
+ calculateAdaptiveThreshold() {
52
+ const adaptiveFactor = Math.min(2, Math.max(.5, this.avgFlushSize / this.baseThreshold));
53
+ return Math.max(Math.min(4096, this.baseThreshold / 2), Math.min(64 * 1024, this.baseThreshold * adaptiveFactor));
54
+ }
55
+ calculateAdaptiveInterval() {
56
+ if (this.avgFlushInterval <= 0) return 0;
57
+ if (this.recentFlushTimes.length < 3) return this.avgFlushInterval;
58
+ const variance = this.calculateVariance(this.recentFlushTimes);
59
+ const stabilityFactor = Math.min(2, Math.max(.5, 1e3 / variance));
60
+ return Math.max(1e3, Math.min(1e4, this.avgFlushInterval * stabilityFactor));
61
+ }
62
+ calculateVariance(values) {
63
+ if (values.length < 2) return 1e3;
64
+ const mean = values.reduce((sum, val) => sum + val, 0) / values.length;
65
+ const squaredDiffs = values.map((val) => Math.pow(val - mean, 2));
66
+ return squaredDiffs.reduce((sum, diff) => sum + diff, 0) / values.length;
67
+ }
68
+ };
69
+ /**
70
+ * Memory pool for reusing Uint8Array buffers to minimize GC pressure.
71
+ * Maintains a pool of pre-allocated buffers for efficient reuse.
72
+ */
73
+ var BufferPool = class {
74
+ pool = [];
75
+ maxPoolSize = 50;
76
+ maxBufferSize = 64 * 1024;
77
+ /**
78
+ * Acquire a buffer from the pool or create a new one.
79
+ * @param size The minimum size needed for the buffer.
80
+ * @returns A Uint8Array that can be used for encoding.
81
+ */
82
+ acquire(size) {
83
+ if (size > this.maxBufferSize) return new Uint8Array(size);
84
+ for (let i = this.pool.length - 1; i >= 0; i--) {
85
+ const buffer = this.pool[i];
86
+ if (buffer.length >= size) {
87
+ this.pool.splice(i, 1);
88
+ return buffer.subarray(0, size);
89
+ }
90
+ }
91
+ const actualSize = Math.max(size, 1024);
92
+ return new Uint8Array(actualSize);
93
+ }
94
+ /**
95
+ * Return a buffer to the pool for future reuse.
96
+ * @param buffer The buffer to return to the pool.
97
+ */
98
+ release(buffer) {
99
+ if (this.pool.length >= this.maxPoolSize || buffer.length > this.maxBufferSize) return;
100
+ if (buffer.length < 256) return;
101
+ this.pool.push(buffer);
102
+ }
103
+ /**
104
+ * Clear the pool to free memory. Useful for cleanup.
105
+ */
106
+ clear() {
107
+ this.pool.length = 0;
108
+ }
109
+ /**
110
+ * Get current pool statistics for monitoring.
111
+ * @returns Object with pool size and buffer count.
112
+ */
113
+ getStats() {
114
+ return {
115
+ poolSize: this.pool.reduce((sum, buf) => sum + buf.length, 0),
116
+ totalBuffers: this.pool.length
117
+ };
118
+ }
119
+ };
120
+ /**
121
+ * High-performance byte buffer for batching log records.
122
+ * Eliminates string concatenation overhead by storing pre-encoded bytes.
123
+ * Uses memory pooling to reduce GC pressure.
124
+ */
125
+ var ByteRingBuffer = class {
126
+ buffers = [];
127
+ totalSize = 0;
128
+ bufferPool;
129
+ constructor(bufferPool) {
130
+ this.bufferPool = bufferPool;
131
+ }
132
+ /**
133
+ * Append pre-encoded log record bytes to the buffer.
134
+ * @param data The encoded log record as bytes.
135
+ */
136
+ append(data) {
137
+ this.buffers.push(data);
138
+ this.totalSize += data.length;
139
+ }
140
+ /**
141
+ * Get the current total size of buffered data in bytes.
142
+ * @returns The total size in bytes.
143
+ */
144
+ size() {
145
+ return this.totalSize;
146
+ }
147
+ /**
148
+ * Get the number of buffered records.
149
+ * @returns The number of records in the buffer.
150
+ */
151
+ count() {
152
+ return this.buffers.length;
153
+ }
154
+ /**
155
+ * Flush all buffered data and return it as an array of byte arrays.
156
+ * This clears the internal buffer and returns used buffers to the pool.
157
+ * @returns Array of buffered byte arrays ready for writev() operations.
158
+ */
159
+ flush() {
160
+ const result = [...this.buffers];
161
+ this.clear();
162
+ return result;
163
+ }
164
+ /**
165
+ * Clear the buffer without returning data.
166
+ * Returns buffers to the pool for reuse.
167
+ */
168
+ clear() {
169
+ for (const buffer of this.buffers) this.bufferPool.release(buffer);
170
+ this.buffers.length = 0;
171
+ this.totalSize = 0;
172
+ }
173
+ /**
174
+ * Check if the buffer is empty.
175
+ * @returns True if the buffer contains no data.
176
+ */
177
+ isEmpty() {
178
+ return this.buffers.length === 0;
179
+ }
180
+ };
5
181
  function getBaseFileSink(path, options) {
6
182
  const formatter = options.formatter ?? __logtape_logtape.defaultTextFormatter;
7
183
  const encoder = options.encoder ?? new TextEncoder();
8
184
  const bufferSize = options.bufferSize ?? 1024 * 8;
9
185
  const flushInterval = options.flushInterval ?? 5e3;
10
186
  let fd = options.lazy ? null : options.openSync(path);
11
- let buffer = "";
187
+ const bufferPool = new BufferPool();
188
+ const byteBuffer = new ByteRingBuffer(bufferPool);
189
+ const adaptiveStrategy = new AdaptiveFlushStrategy(bufferSize, flushInterval);
12
190
  let lastFlushTimestamp = Date.now();
13
191
  if (!options.nonBlocking) {
14
192
  function flushBuffer$1() {
15
- if (fd == null) return;
16
- if (buffer.length > 0) {
17
- options.writeSync(fd, encoder.encode(buffer));
18
- buffer = "";
19
- options.flushSync(fd);
20
- lastFlushTimestamp = Date.now();
21
- }
193
+ if (fd == null || byteBuffer.isEmpty()) return;
194
+ const flushSize = byteBuffer.size();
195
+ const currentTime = Date.now();
196
+ const timeSinceLastFlush = currentTime - lastFlushTimestamp;
197
+ const chunks = byteBuffer.flush();
198
+ if (options.writeManySync && chunks.length > 1) options.writeManySync(fd, chunks);
199
+ else for (const chunk of chunks) options.writeSync(fd, chunk);
200
+ options.flushSync(fd);
201
+ adaptiveStrategy.recordFlush(flushSize, timeSinceLastFlush);
202
+ lastFlushTimestamp = currentTime;
22
203
  }
23
204
  const sink = (record) => {
24
205
  if (fd == null) fd = options.openSync(path);
25
- buffer += formatter(record);
26
- const shouldFlushBySize = buffer.length >= bufferSize;
27
- const shouldFlushByTime = flushInterval > 0 && record.timestamp - lastFlushTimestamp >= flushInterval;
28
- if (shouldFlushBySize || shouldFlushByTime) flushBuffer$1();
206
+ if (byteBuffer.isEmpty() && bufferSize === 8192) {
207
+ const formattedRecord$1 = formatter(record);
208
+ const encodedRecord$1 = encoder.encode(formattedRecord$1);
209
+ if (encodedRecord$1.length < 200) {
210
+ options.writeSync(fd, encodedRecord$1);
211
+ options.flushSync(fd);
212
+ lastFlushTimestamp = Date.now();
213
+ return;
214
+ }
215
+ }
216
+ const formattedRecord = formatter(record);
217
+ const encodedRecord = encoder.encode(formattedRecord);
218
+ byteBuffer.append(encodedRecord);
219
+ if (bufferSize <= 0) flushBuffer$1();
220
+ else {
221
+ const timeSinceLastFlush = record.timestamp - lastFlushTimestamp;
222
+ const shouldFlush = adaptiveStrategy.shouldFlush(byteBuffer.size(), timeSinceLastFlush);
223
+ if (shouldFlush) flushBuffer$1();
224
+ }
29
225
  };
30
226
  sink[Symbol.dispose] = () => {
31
227
  if (fd !== null) {
32
228
  flushBuffer$1();
33
229
  options.closeSync(fd);
34
230
  }
231
+ bufferPool.clear();
35
232
  };
36
233
  return sink;
37
234
  }
@@ -40,13 +237,17 @@ function getBaseFileSink(path, options) {
40
237
  let activeFlush = null;
41
238
  let flushTimer = null;
42
239
  async function flushBuffer() {
43
- if (fd == null || buffer.length === 0) return;
44
- const data = buffer;
45
- buffer = "";
240
+ if (fd == null || byteBuffer.isEmpty()) return;
241
+ const flushSize = byteBuffer.size();
242
+ const currentTime = Date.now();
243
+ const timeSinceLastFlush = currentTime - lastFlushTimestamp;
244
+ const chunks = byteBuffer.flush();
46
245
  try {
47
- asyncOptions.writeSync(fd, encoder.encode(data));
246
+ if (asyncOptions.writeMany && chunks.length > 1) await asyncOptions.writeMany(fd, chunks);
247
+ else for (const chunk of chunks) asyncOptions.writeSync(fd, chunk);
48
248
  await asyncOptions.flush(fd);
49
- lastFlushTimestamp = Date.now();
249
+ adaptiveStrategy.recordFlush(flushSize, timeSinceLastFlush);
250
+ lastFlushTimestamp = currentTime;
50
251
  } catch {}
51
252
  }
52
253
  function scheduleFlush() {
@@ -64,11 +265,26 @@ function getBaseFileSink(path, options) {
64
265
  const nonBlockingSink = (record) => {
65
266
  if (disposed) return;
66
267
  if (fd == null) fd = asyncOptions.openSync(path);
67
- buffer += formatter(record);
68
- const shouldFlushBySize = buffer.length >= bufferSize;
69
- const shouldFlushByTime = flushInterval > 0 && record.timestamp - lastFlushTimestamp >= flushInterval;
70
- if (shouldFlushBySize || shouldFlushByTime) scheduleFlush();
71
- else if (flushTimer === null && flushInterval > 0) startFlushTimer();
268
+ if (byteBuffer.isEmpty() && !activeFlush && bufferSize === 8192) {
269
+ const formattedRecord$1 = formatter(record);
270
+ const encodedRecord$1 = encoder.encode(formattedRecord$1);
271
+ if (encodedRecord$1.length < 200) {
272
+ asyncOptions.writeSync(fd, encodedRecord$1);
273
+ scheduleFlush();
274
+ lastFlushTimestamp = Date.now();
275
+ return;
276
+ }
277
+ }
278
+ const formattedRecord = formatter(record);
279
+ const encodedRecord = encoder.encode(formattedRecord);
280
+ byteBuffer.append(encodedRecord);
281
+ if (bufferSize <= 0) scheduleFlush();
282
+ else {
283
+ const timeSinceLastFlush = record.timestamp - lastFlushTimestamp;
284
+ const shouldFlush = adaptiveStrategy.shouldFlush(byteBuffer.size(), timeSinceLastFlush);
285
+ if (shouldFlush) scheduleFlush();
286
+ else if (flushTimer === null && flushInterval > 0) startFlushTimer();
287
+ }
72
288
  };
73
289
  nonBlockingSink[Symbol.asyncDispose] = async () => {
74
290
  disposed = true;
@@ -80,6 +296,7 @@ function getBaseFileSink(path, options) {
80
296
  if (fd !== null) try {
81
297
  await asyncOptions.close(fd);
82
298
  } catch {}
299
+ bufferPool.clear();
83
300
  };
84
301
  return nonBlockingSink;
85
302
  }
@@ -52,6 +52,13 @@ interface FileSinkDriver<TFile> {
52
52
  * @param chunk The data to write.
53
53
  */
54
54
  writeSync(fd: TFile, chunk: Uint8Array): void;
55
+ /**
56
+ * Write multiple chunks of data to the file in a single operation.
57
+ * This is optional - if not implemented, falls back to multiple writeSync calls.
58
+ * @param fd The file descriptor.
59
+ * @param chunks Array of data chunks to write.
60
+ */
61
+ writeManySync?(fd: TFile, chunks: Uint8Array[]): void;
55
62
  /**
56
63
  * Flush the file to ensure that all data is written to the disk.
57
64
  * @param fd The file descriptor.
@@ -69,6 +76,13 @@ interface FileSinkDriver<TFile> {
69
76
  * @since 1.0.0
70
77
  */
71
78
  interface AsyncFileSinkDriver<TFile> extends FileSinkDriver<TFile> {
79
+ /**
80
+ * Asynchronously write multiple chunks of data to the file in a single operation.
81
+ * This is optional - if not implemented, falls back to multiple writeSync calls.
82
+ * @param fd The file descriptor.
83
+ * @param chunks Array of data chunks to write.
84
+ */
85
+ writeMany?(fd: TFile, chunks: Uint8Array[]): Promise<void>;
72
86
  /**
73
87
  * Asynchronously flush the file to ensure that all data is written to the disk.
74
88
  * @param fd The file descriptor.
@@ -1 +1 @@
1
- {"version":3,"file":"filesink.base.d.cts","names":[],"sources":["../filesink.base.ts"],"sourcesContent":[],"mappings":";;;;;;AAUA;AAuCiB,KAvCL,eAAA,GAAkB,iBAuCC,GAAA;EAAA;;;EAYV,IAAS,CAAA,EAAA,OAAA;EAAU;;AAYnB;AAQrB;;;;EAKiB,UAAG,CAAA,EAAA,MAAA;EAAO;;;AALuC;AA0JlE;;;EAAqE,aAApB,CAAA,EAAA,MAAA;EAAI;AAerD;;;;AAAqE;AAoBrE;;EAA4C,WACd,CAAA,EAAA,OAAA;CAAK;AAAN;;;;UA9NZ;;;;;0BAKS;;;;;;gBAOV,cAAc;;;;;gBAMd;;;;;gBAMA;;;;;;;UAQC,mCAAmC,eAAe;;;;;YAKvD,QAAQ;;;;;YAMR,QAAQ;;;;;;;;;;;;;;;;UA+IH,uBAAA,SAAgC,KAAK;;;;;;;;;;;;;UAerC,sCAAsC,eAAe;;;;;;;;;;;;;;;;;;;;UAoBrD,2CACP,oBAAoB"}
1
+ {"version":3,"file":"filesink.base.d.cts","names":[],"sources":["../filesink.base.ts"],"sourcesContent":[],"mappings":";;;;;;AAqQA;AAuCiB,KAvCL,eAAA,GAAkB,iBAuCC,GAAA;EAAA;;;EAYV,IAAS,CAAA,EAAA,OAAA;EAAU;;;;AAoBnB;AAQrB;;EAAoC,UAA+B,CAAA,EAAA,MAAA;EAAK;;;;;;;EAmB7C,aAnByB,CAAA,EAAA,MAAA;EAAc;AAgQlE;;;;AAAqD;AAerD;;EAAuC,WAA+B,CAAA,EAAA,OAAA;CAAK;AAAN;AAoBrE;;;AACU,UA5UO,cA4UP,CAAA,KAAA,CAAA,CAAA;EAAmB;;;;0BAvUH;;;;;;gBAOV,cAAc;;;;;;;qBAQT,eAAe;;;;;gBAMpB;;;;;gBAMA;;;;;;;UAQC,mCAAmC,eAAe;;;;;;;iBAOlD,eAAe,eAAe;;;;;YAMnC,QAAQ;;;;;YAMR,QAAQ;;;;;;;;;;;;;;;;UA6OH,uBAAA,SAAgC,KAAK;;;;;;;;;;;;;UAerC,sCAAsC,eAAe;;;;;;;;;;;;;;;;;;;;UAoBrD,2CACP,oBAAoB"}
@@ -52,6 +52,13 @@ interface FileSinkDriver<TFile> {
52
52
  * @param chunk The data to write.
53
53
  */
54
54
  writeSync(fd: TFile, chunk: Uint8Array): void;
55
+ /**
56
+ * Write multiple chunks of data to the file in a single operation.
57
+ * This is optional - if not implemented, falls back to multiple writeSync calls.
58
+ * @param fd The file descriptor.
59
+ * @param chunks Array of data chunks to write.
60
+ */
61
+ writeManySync?(fd: TFile, chunks: Uint8Array[]): void;
55
62
  /**
56
63
  * Flush the file to ensure that all data is written to the disk.
57
64
  * @param fd The file descriptor.
@@ -69,6 +76,13 @@ interface FileSinkDriver<TFile> {
69
76
  * @since 1.0.0
70
77
  */
71
78
  interface AsyncFileSinkDriver<TFile> extends FileSinkDriver<TFile> {
79
+ /**
80
+ * Asynchronously write multiple chunks of data to the file in a single operation.
81
+ * This is optional - if not implemented, falls back to multiple writeSync calls.
82
+ * @param fd The file descriptor.
83
+ * @param chunks Array of data chunks to write.
84
+ */
85
+ writeMany?(fd: TFile, chunks: Uint8Array[]): Promise<void>;
72
86
  /**
73
87
  * Asynchronously flush the file to ensure that all data is written to the disk.
74
88
  * @param fd The file descriptor.
@@ -1 +1 @@
1
- {"version":3,"file":"filesink.base.d.ts","names":[],"sources":["../filesink.base.ts"],"sourcesContent":[],"mappings":";;;;;;AAUA;AAuCiB,KAvCL,eAAA,GAAkB,iBAuCC,GAAA;EAAA;;;EAYV,IAAS,CAAA,EAAA,OAAA;EAAU;;AAYnB;AAQrB;;;;EAKiB,UAAG,CAAA,EAAA,MAAA;EAAO;;;AALuC;AA0JlE;;;EAAqE,aAApB,CAAA,EAAA,MAAA;EAAI;AAerD;;;;AAAqE;AAoBrE;;EAA4C,WACd,CAAA,EAAA,OAAA;CAAK;AAAN;;;;UA9NZ;;;;;0BAKS;;;;;;gBAOV,cAAc;;;;;gBAMd;;;;;gBAMA;;;;;;;UAQC,mCAAmC,eAAe;;;;;YAKvD,QAAQ;;;;;YAMR,QAAQ;;;;;;;;;;;;;;;;UA+IH,uBAAA,SAAgC,KAAK;;;;;;;;;;;;;UAerC,sCAAsC,eAAe;;;;;;;;;;;;;;;;;;;;UAoBrD,2CACP,oBAAoB"}
1
+ {"version":3,"file":"filesink.base.d.ts","names":[],"sources":["../filesink.base.ts"],"sourcesContent":[],"mappings":";;;;;;AAqQA;AAuCiB,KAvCL,eAAA,GAAkB,iBAuCC,GAAA;EAAA;;;EAYV,IAAS,CAAA,EAAA,OAAA;EAAU;;;;AAoBnB;AAQrB;;EAAoC,UAA+B,CAAA,EAAA,MAAA;EAAK;;;;;;;EAmB7C,aAnByB,CAAA,EAAA,MAAA;EAAc;AAgQlE;;;;AAAqD;AAerD;;EAAuC,WAA+B,CAAA,EAAA,OAAA;CAAK;AAAN;AAoBrE;;;AACU,UA5UO,cA4UP,CAAA,KAAA,CAAA,CAAA;EAAmB;;;;0BAvUH;;;;;;gBAOV,cAAc;;;;;;;qBAQT,eAAe;;;;;gBAMpB;;;;;gBAMA;;;;;;;UAQC,mCAAmC,eAAe;;;;;;;iBAOlD,eAAe,eAAe;;;;;YAMnC,QAAQ;;;;;YAMR,QAAQ;;;;;;;;;;;;;;;;UA6OH,uBAAA,SAAgC,KAAK;;;;;;;;;;;;;UAerC,sCAAsC,eAAe;;;;;;;;;;;;;;;;;;;;UAoBrD,2CACP,oBAAoB"}