@durable-streams/server 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,863 @@
1
+ /**
2
+ * File-backed stream storage implementation using LMDB for metadata
3
+ * and append-only log files for stream data.
4
+ */
5
+
6
+ import * as fs from "node:fs"
7
+ import * as path from "node:path"
8
+ import { randomBytes } from "node:crypto"
9
+ import { open as openLMDB } from "lmdb"
10
+ import { SieveCache } from "@neophi/sieve-cache"
11
+ import { StreamFileManager } from "./file-manager"
12
+ import { encodeStreamPath } from "./path-encoding"
13
+ import {
14
+ formatJsonResponse,
15
+ normalizeContentType,
16
+ processJsonAppend,
17
+ } from "./store"
18
+ import type { Database } from "lmdb"
19
+ import type { PendingLongPoll, Stream, StreamMessage } from "./types"
20
+
21
+ /**
22
+ * Stream metadata stored in LMDB.
23
+ */
24
+ interface StreamMetadata {
25
+ path: string
26
+ contentType?: string
27
+ currentOffset: string
28
+ lastSeq?: string
29
+ ttlSeconds?: number
30
+ expiresAt?: string
31
+ createdAt: number
32
+ segmentCount: number
33
+ totalBytes: number
34
+ /**
35
+ * Unique directory name for this stream instance.
36
+ * Format: {encoded_path}~{timestamp}~{random_hex}
37
+ * This allows safe async deletion and immediate reuse of stream paths.
38
+ */
39
+ directoryName: string
40
+ }
41
+
42
+ /**
43
+ * File handle pool with SIEVE cache eviction.
44
+ * Automatically closes least-recently-used handles when capacity is reached.
45
+ */
46
+ interface PooledHandle {
47
+ stream: fs.WriteStream
48
+ }
49
+
50
+ class FileHandlePool {
51
+ private cache: SieveCache<string, PooledHandle>
52
+
53
+ constructor(maxSize: number) {
54
+ this.cache = new SieveCache<string, PooledHandle>(maxSize, {
55
+ evictHook: (_key: string, handle: PooledHandle) => {
56
+ // Close the handle when evicted (sync version - fire and forget)
57
+ this.closeHandle(handle).catch((err: Error) => {
58
+ console.error(`[FileHandlePool] Error closing evicted handle:`, err)
59
+ })
60
+ },
61
+ })
62
+ }
63
+
64
+ getWriteStream(filePath: string): fs.WriteStream {
65
+ let handle = this.cache.get(filePath)
66
+
67
+ if (!handle) {
68
+ const stream = fs.createWriteStream(filePath, { flags: `a` })
69
+ handle = { stream }
70
+ this.cache.set(filePath, handle)
71
+ }
72
+
73
+ return handle.stream
74
+ }
75
+
76
+ /**
77
+ * Flush a specific file to disk immediately.
78
+ * This is called after each append to ensure durability.
79
+ */
80
+ async fsyncFile(filePath: string): Promise<void> {
81
+ const handle = this.cache.get(filePath)
82
+ if (!handle) return
83
+
84
+ return new Promise<void>((resolve, reject) => {
85
+ // Use fdatasync (faster than fsync, skips metadata)
86
+ // Cast to any to access fd property (exists at runtime but not in types)
87
+ const fd = (handle.stream as any).fd
88
+
89
+ // If fd is null, stream hasn't been opened yet - wait for open event
90
+ if (typeof fd !== `number`) {
91
+ const onOpen = (openedFd: number): void => {
92
+ handle.stream.off(`error`, onError)
93
+ fs.fdatasync(openedFd, (err) => {
94
+ if (err) reject(err)
95
+ else resolve()
96
+ })
97
+ }
98
+ const onError = (err: Error): void => {
99
+ handle.stream.off(`open`, onOpen)
100
+ reject(err)
101
+ }
102
+ handle.stream.once(`open`, onOpen)
103
+ handle.stream.once(`error`, onError)
104
+ return
105
+ }
106
+
107
+ fs.fdatasync(fd, (err) => {
108
+ if (err) reject(err)
109
+ else resolve()
110
+ })
111
+ })
112
+ }
113
+
114
+ async closeAll(): Promise<void> {
115
+ const promises: Array<Promise<void>> = []
116
+ for (const [_key, handle] of this.cache.entries()) {
117
+ promises.push(this.closeHandle(handle))
118
+ }
119
+
120
+ await Promise.all(promises)
121
+ this.cache.clear()
122
+ }
123
+
124
+ /**
125
+ * Close a specific file handle if it exists in the cache.
126
+ * Useful for cleanup before deleting files.
127
+ */
128
+ async closeFileHandle(filePath: string): Promise<void> {
129
+ const handle = this.cache.get(filePath)
130
+ if (handle) {
131
+ await this.closeHandle(handle)
132
+ this.cache.delete(filePath)
133
+ }
134
+ }
135
+
136
+ private async closeHandle(handle: PooledHandle): Promise<void> {
137
+ // Close the stream (data is already fsynced on each append)
138
+ return new Promise<void>((resolve) => {
139
+ handle.stream.end(() => resolve())
140
+ })
141
+ }
142
+ }
143
+
144
+ export interface FileBackedStreamStoreOptions {
145
+ dataDir: string
146
+ maxFileHandles?: number
147
+ }
148
+
149
+ /**
150
+ * Generate a unique directory name for a stream.
151
+ * Format: {encoded_path}~{timestamp}~{random_hex}
152
+ * This allows safe async deletion and immediate reuse of stream paths.
153
+ */
154
+ function generateUniqueDirectoryName(streamPath: string): string {
155
+ const encoded = encodeStreamPath(streamPath)
156
+ const timestamp = Date.now().toString(36) // Base36 for shorter strings
157
+ const random = randomBytes(4).toString(`hex`) // 8 chars hex
158
+ return `${encoded}~${timestamp}~${random}`
159
+ }
160
+
161
+ /**
162
+ * File-backed implementation of StreamStore.
163
+ * Maintains the same interface as the in-memory StreamStore for drop-in compatibility.
164
+ */
165
+ export class FileBackedStreamStore {
166
+ private db: Database
167
+ private fileManager: StreamFileManager
168
+ private fileHandlePool: FileHandlePool
169
+ private pendingLongPolls: Array<PendingLongPoll> = []
170
+ private dataDir: string
171
+
172
+ constructor(options: FileBackedStreamStoreOptions) {
173
+ this.dataDir = options.dataDir
174
+
175
+ // Initialize LMDB
176
+ this.db = openLMDB({
177
+ path: path.join(this.dataDir, `metadata.lmdb`),
178
+ compression: true,
179
+ })
180
+
181
+ // Initialize file manager
182
+ this.fileManager = new StreamFileManager(path.join(this.dataDir, `streams`))
183
+
184
+ // Initialize file handle pool with SIEVE cache
185
+ const maxFileHandles = options.maxFileHandles ?? 100
186
+ this.fileHandlePool = new FileHandlePool(maxFileHandles)
187
+
188
+ // Recover from disk
189
+ this.recover()
190
+ }
191
+
192
+ /**
193
+ * Recover streams from disk on startup.
194
+ * Validates that LMDB metadata matches actual file contents and reconciles any mismatches.
195
+ */
196
+ private recover(): void {
197
+ console.log(`[FileBackedStreamStore] Starting recovery...`)
198
+
199
+ let recovered = 0
200
+ let reconciled = 0
201
+ let errors = 0
202
+
203
+ // Scan LMDB for all streams
204
+ const range = this.db.getRange({
205
+ start: `stream:`,
206
+ end: `stream:\xFF`,
207
+ })
208
+
209
+ // Convert to array to avoid iterator issues
210
+ const entries = Array.from(range)
211
+
212
+ for (const { key, value } of entries) {
213
+ try {
214
+ // Key should be a string in our schema
215
+ if (typeof key !== `string`) continue
216
+
217
+ const streamMeta = value as StreamMetadata
218
+ const streamPath = key.replace(`stream:`, ``)
219
+
220
+ // Get segment file path
221
+ const segmentPath = path.join(
222
+ this.dataDir,
223
+ `streams`,
224
+ streamMeta.directoryName,
225
+ `segment_00000.log`
226
+ )
227
+
228
+ // Check if file exists
229
+ if (!fs.existsSync(segmentPath)) {
230
+ console.warn(
231
+ `[FileBackedStreamStore] Recovery: Stream file missing for ${streamPath}, removing from LMDB`
232
+ )
233
+ this.db.removeSync(key)
234
+ errors++
235
+ continue
236
+ }
237
+
238
+ // Scan file to compute true offset
239
+ const trueOffset = this.scanFileForTrueOffset(segmentPath)
240
+
241
+ // Check if offset matches
242
+ if (trueOffset !== streamMeta.currentOffset) {
243
+ console.warn(
244
+ `[FileBackedStreamStore] Recovery: Offset mismatch for ${streamPath}: ` +
245
+ `LMDB says ${streamMeta.currentOffset}, file says ${trueOffset}. Reconciling to file.`
246
+ )
247
+
248
+ // Update LMDB to match file (source of truth)
249
+ const reconciledMeta: StreamMetadata = {
250
+ ...streamMeta,
251
+ currentOffset: trueOffset,
252
+ }
253
+ this.db.putSync(key, reconciledMeta)
254
+ reconciled++
255
+ }
256
+
257
+ recovered++
258
+ } catch (err) {
259
+ console.error(`[FileBackedStreamStore] Error recovering stream:`, err)
260
+ errors++
261
+ }
262
+ }
263
+
264
+ console.log(
265
+ `[FileBackedStreamStore] Recovery complete: ${recovered} streams, ` +
266
+ `${reconciled} reconciled, ${errors} errors`
267
+ )
268
+ }
269
+
270
+ /**
271
+ * Scan a segment file to compute the true last offset.
272
+ * Handles partial/truncated messages at the end.
273
+ */
274
+ private scanFileForTrueOffset(segmentPath: string): string {
275
+ try {
276
+ const fileContent = fs.readFileSync(segmentPath)
277
+ let filePos = 0
278
+ let currentDataOffset = 0
279
+
280
+ while (filePos < fileContent.length) {
281
+ // Read message length (4 bytes)
282
+ if (filePos + 4 > fileContent.length) {
283
+ // Truncated length header - stop here
284
+ break
285
+ }
286
+
287
+ const messageLength = fileContent.readUInt32BE(filePos)
288
+ filePos += 4
289
+
290
+ // Check if we have the full message
291
+ if (filePos + messageLength > fileContent.length) {
292
+ // Truncated message data - stop here
293
+ break
294
+ }
295
+
296
+ filePos += messageLength
297
+
298
+ // Skip newline
299
+ if (filePos < fileContent.length) {
300
+ filePos += 1
301
+ }
302
+
303
+ // Update offset with this complete message
304
+ currentDataOffset += messageLength
305
+ }
306
+
307
+ // Return offset in format "readSeq_byteOffset" with zero-padding
308
+ return `0000000000000000_${String(currentDataOffset).padStart(16, `0`)}`
309
+ } catch (err) {
310
+ console.error(
311
+ `[FileBackedStreamStore] Error scanning file ${segmentPath}:`,
312
+ err
313
+ )
314
+ // Return empty offset on error
315
+ return `0000000000000000_0000000000000000`
316
+ }
317
+ }
318
+
319
+ /**
320
+ * Convert LMDB metadata to Stream object.
321
+ */
322
+ private streamMetaToStream(meta: StreamMetadata): Stream {
323
+ return {
324
+ path: meta.path,
325
+ contentType: meta.contentType,
326
+ messages: [], // Messages not stored in memory
327
+ currentOffset: meta.currentOffset,
328
+ lastSeq: meta.lastSeq,
329
+ ttlSeconds: meta.ttlSeconds,
330
+ expiresAt: meta.expiresAt,
331
+ createdAt: meta.createdAt,
332
+ }
333
+ }
334
+
335
+ /**
336
+ * Close the store, closing all file handles and database.
337
+ * All data is already fsynced on each append, so no final flush needed.
338
+ */
339
+ async close(): Promise<void> {
340
+ await this.fileHandlePool.closeAll()
341
+ await this.db.close()
342
+ }
343
+
344
+ // ============================================================================
345
+ // StreamStore interface methods (to be implemented)
346
+ // ============================================================================
347
+
348
+ async create(
349
+ streamPath: string,
350
+ options: {
351
+ contentType?: string
352
+ ttlSeconds?: number
353
+ expiresAt?: string
354
+ initialData?: Uint8Array
355
+ } = {}
356
+ ): Promise<Stream> {
357
+ const key = `stream:${streamPath}`
358
+ const existing = this.db.get(key) as StreamMetadata | undefined
359
+
360
+ if (existing) {
361
+ // Check if config matches (idempotent create)
362
+ // MIME types are case-insensitive per RFC 2045
363
+ const normalizeMimeType = (ct: string | undefined) =>
364
+ (ct ?? `application/octet-stream`).toLowerCase()
365
+ const contentTypeMatches =
366
+ normalizeMimeType(options.contentType) ===
367
+ normalizeMimeType(existing.contentType)
368
+ const ttlMatches = options.ttlSeconds === existing.ttlSeconds
369
+ const expiresMatches = options.expiresAt === existing.expiresAt
370
+
371
+ if (contentTypeMatches && ttlMatches && expiresMatches) {
372
+ // Idempotent success - return existing stream
373
+ return this.streamMetaToStream(existing)
374
+ } else {
375
+ // Config mismatch - conflict
376
+ throw new Error(
377
+ `Stream already exists with different configuration: ${streamPath}`
378
+ )
379
+ }
380
+ }
381
+
382
+ // Initialize metadata
383
+ const streamMeta: StreamMetadata = {
384
+ path: streamPath,
385
+ contentType: options.contentType,
386
+ currentOffset: `0000000000000000_0000000000000000`,
387
+ lastSeq: undefined,
388
+ ttlSeconds: options.ttlSeconds,
389
+ expiresAt: options.expiresAt,
390
+ createdAt: Date.now(),
391
+ segmentCount: 1,
392
+ totalBytes: 0,
393
+ directoryName: generateUniqueDirectoryName(streamPath),
394
+ }
395
+
396
+ // Create stream directory and empty segment file immediately
397
+ // This ensures the stream is fully initialized and can be recovered
398
+ const streamDir = path.join(
399
+ this.dataDir,
400
+ `streams`,
401
+ streamMeta.directoryName
402
+ )
403
+ try {
404
+ fs.mkdirSync(streamDir, { recursive: true })
405
+ const segmentPath = path.join(streamDir, `segment_00000.log`)
406
+ fs.writeFileSync(segmentPath, ``)
407
+ } catch (err) {
408
+ console.error(
409
+ `[FileBackedStreamStore] Error creating stream directory:`,
410
+ err
411
+ )
412
+ throw err
413
+ }
414
+
415
+ // Save to LMDB
416
+ this.db.putSync(key, streamMeta)
417
+
418
+ // Append initial data if provided
419
+ if (options.initialData && options.initialData.length > 0) {
420
+ await this.append(streamPath, options.initialData, {
421
+ contentType: options.contentType,
422
+ isInitialCreate: true,
423
+ })
424
+ // Re-fetch updated metadata
425
+ const updated = this.db.get(key) as StreamMetadata
426
+ return this.streamMetaToStream(updated)
427
+ }
428
+
429
+ return this.streamMetaToStream(streamMeta)
430
+ }
431
+
432
+ get(streamPath: string): Stream | undefined {
433
+ const key = `stream:${streamPath}`
434
+ const meta = this.db.get(key) as StreamMetadata | undefined
435
+ return meta ? this.streamMetaToStream(meta) : undefined
436
+ }
437
+
438
+ has(streamPath: string): boolean {
439
+ const key = `stream:${streamPath}`
440
+ return this.db.get(key) !== undefined
441
+ }
442
+
443
+ delete(streamPath: string): boolean {
444
+ const key = `stream:${streamPath}`
445
+ const streamMeta = this.db.get(key) as StreamMetadata | undefined
446
+
447
+ if (!streamMeta) {
448
+ return false
449
+ }
450
+
451
+ // Cancel any pending long-polls for this stream
452
+ this.cancelLongPollsForStream(streamPath)
453
+
454
+ // Close any open file handle for this stream's segment file
455
+ // This is important especially on Windows where open handles block deletion
456
+ const segmentPath = path.join(
457
+ this.dataDir,
458
+ `streams`,
459
+ streamMeta.directoryName,
460
+ `segment_00000.log`
461
+ )
462
+ this.fileHandlePool.closeFileHandle(segmentPath).catch((err: Error) => {
463
+ console.error(`[FileBackedStreamStore] Error closing file handle:`, err)
464
+ })
465
+
466
+ // Delete from LMDB
467
+ this.db.removeSync(key)
468
+
469
+ // Delete files using unique directory name (async, but don't wait)
470
+ // Safe to reuse stream path immediately since new creation gets new directory
471
+ this.fileManager
472
+ .deleteDirectoryByName(streamMeta.directoryName)
473
+ .catch((err: Error) => {
474
+ console.error(
475
+ `[FileBackedStreamStore] Error deleting stream directory:`,
476
+ err
477
+ )
478
+ })
479
+
480
+ return true
481
+ }
482
+
483
+ async append(
484
+ streamPath: string,
485
+ data: Uint8Array,
486
+ options: {
487
+ seq?: string
488
+ contentType?: string
489
+ isInitialCreate?: boolean
490
+ } = {}
491
+ ): Promise<StreamMessage | null> {
492
+ const key = `stream:${streamPath}`
493
+ const streamMeta = this.db.get(key) as StreamMetadata | undefined
494
+
495
+ if (!streamMeta) {
496
+ throw new Error(`Stream not found: ${streamPath}`)
497
+ }
498
+
499
+ // Check content type match using normalization (handles charset parameters)
500
+ if (options.contentType && streamMeta.contentType) {
501
+ const providedType = normalizeContentType(options.contentType)
502
+ const streamType = normalizeContentType(streamMeta.contentType)
503
+ if (providedType !== streamType) {
504
+ throw new Error(
505
+ `Content-type mismatch: expected ${streamMeta.contentType}, got ${options.contentType}`
506
+ )
507
+ }
508
+ }
509
+
510
+ // Check sequence for writer coordination
511
+ if (options.seq !== undefined) {
512
+ if (
513
+ streamMeta.lastSeq !== undefined &&
514
+ options.seq <= streamMeta.lastSeq
515
+ ) {
516
+ throw new Error(
517
+ `Sequence conflict: ${options.seq} <= ${streamMeta.lastSeq}`
518
+ )
519
+ }
520
+ }
521
+
522
+ // Process JSON mode data (throws on invalid JSON or empty arrays for appends)
523
+ let processedData = data
524
+ if (normalizeContentType(streamMeta.contentType) === `application/json`) {
525
+ processedData = processJsonAppend(data, options.isInitialCreate ?? false)
526
+ // If empty array in create mode, return null (empty stream created successfully)
527
+ if (processedData.length === 0) {
528
+ return null
529
+ }
530
+ }
531
+
532
+ // Parse current offset
533
+ const parts = streamMeta.currentOffset.split(`_`).map(Number)
534
+ const readSeq = parts[0]!
535
+ const byteOffset = parts[1]!
536
+
537
+ // Calculate new offset with zero-padding for lexicographic sorting (only data bytes, not framing)
538
+ const newByteOffset = byteOffset + processedData.length
539
+ const newOffset = `${String(readSeq).padStart(16, `0`)}_${String(newByteOffset).padStart(16, `0`)}`
540
+
541
+ // Get segment file path (directory was created in create())
542
+ const streamDir = path.join(
543
+ this.dataDir,
544
+ `streams`,
545
+ streamMeta.directoryName
546
+ )
547
+ const segmentPath = path.join(streamDir, `segment_00000.log`)
548
+
549
+ // Get write stream from pool
550
+ const stream = this.fileHandlePool.getWriteStream(segmentPath)
551
+
552
+ // 1. Write message with framing: [4 bytes length][data][\n]
553
+ // Combine into single buffer for single syscall, and wait for write
554
+ // to be flushed to kernel before calling fsync
555
+ const lengthBuf = Buffer.allocUnsafe(4)
556
+ lengthBuf.writeUInt32BE(processedData.length, 0)
557
+ const frameBuf = Buffer.concat([
558
+ lengthBuf,
559
+ processedData,
560
+ Buffer.from(`\n`),
561
+ ])
562
+ await new Promise<void>((resolve, reject) => {
563
+ stream.write(frameBuf, (err) => {
564
+ if (err) reject(err)
565
+ else resolve()
566
+ })
567
+ })
568
+
569
+ // 2. Create message object for return value
570
+ const message: StreamMessage = {
571
+ data: processedData,
572
+ offset: newOffset,
573
+ timestamp: Date.now(),
574
+ }
575
+
576
+ // 3. Flush to disk (blocks here until durable)
577
+ await this.fileHandlePool.fsyncFile(segmentPath)
578
+
579
+ // 4. Update LMDB metadata (only after flush, so metadata reflects durability)
580
+ const updatedMeta: StreamMetadata = {
581
+ ...streamMeta,
582
+ currentOffset: newOffset,
583
+ lastSeq: options.seq ?? streamMeta.lastSeq,
584
+ totalBytes: streamMeta.totalBytes + processedData.length + 5, // +4 for length, +1 for newline
585
+ }
586
+ this.db.putSync(key, updatedMeta)
587
+
588
+ // 5. Notify long-polls (data is now readable from disk)
589
+ this.notifyLongPolls(streamPath)
590
+
591
+ // 6. Return (client knows data is durable)
592
+ return message
593
+ }
594
+
595
+ read(
596
+ streamPath: string,
597
+ offset?: string
598
+ ): { messages: Array<StreamMessage>; upToDate: boolean } {
599
+ const key = `stream:${streamPath}`
600
+ const streamMeta = this.db.get(key) as StreamMetadata | undefined
601
+
602
+ if (!streamMeta) {
603
+ throw new Error(`Stream not found: ${streamPath}`)
604
+ }
605
+
606
+ // Parse offsets
607
+ const startOffset = offset ?? `0000000000000000_0000000000000000`
608
+ const startParts = startOffset.split(`_`).map(Number)
609
+ const startByte = startParts[1] ?? 0
610
+ const currentParts = streamMeta.currentOffset.split(`_`).map(Number)
611
+ const currentSeq = currentParts[0] ?? 0
612
+ const currentByte = currentParts[1] ?? 0
613
+
614
+ // Early return if no data available
615
+ if (streamMeta.currentOffset === `0000000000000000_0000000000000000`) {
616
+ return { messages: [], upToDate: true }
617
+ }
618
+
619
+ // If start offset is at or past current offset, return empty
620
+ if (startByte >= currentByte) {
621
+ return { messages: [], upToDate: true }
622
+ }
623
+
624
+ // Get segment file path using unique directory name
625
+ const streamDir = path.join(
626
+ this.dataDir,
627
+ `streams`,
628
+ streamMeta.directoryName
629
+ )
630
+ const segmentPath = path.join(streamDir, `segment_00000.log`)
631
+
632
+ // Check if file exists
633
+ if (!fs.existsSync(segmentPath)) {
634
+ return { messages: [], upToDate: true }
635
+ }
636
+
637
+ // Read and parse messages from file
638
+ const messages: Array<StreamMessage> = []
639
+
640
+ try {
641
+ // Calculate file position from offset
642
+ // We need to read from the beginning and skip to the right position
643
+ // because the file has framing overhead
644
+ const fileContent = fs.readFileSync(segmentPath)
645
+ let filePos = 0
646
+ let currentDataOffset = 0
647
+
648
+ while (filePos < fileContent.length) {
649
+ // Read message length (4 bytes)
650
+ if (filePos + 4 > fileContent.length) break
651
+
652
+ const messageLength = fileContent.readUInt32BE(filePos)
653
+ filePos += 4
654
+
655
+ // Read message data
656
+ if (filePos + messageLength > fileContent.length) break
657
+
658
+ const messageData = fileContent.subarray(
659
+ filePos,
660
+ filePos + messageLength
661
+ )
662
+ filePos += messageLength
663
+
664
+ // Skip newline
665
+ filePos += 1
666
+
667
+ // Calculate this message's offset (end position)
668
+ const messageOffset = currentDataOffset + messageLength
669
+
670
+ // Only include messages after start offset
671
+ if (messageOffset > startByte) {
672
+ messages.push({
673
+ data: new Uint8Array(messageData),
674
+ offset: `${String(currentSeq).padStart(16, `0`)}_${String(messageOffset).padStart(16, `0`)}`,
675
+ timestamp: 0, // Not stored in MVP
676
+ })
677
+ }
678
+
679
+ currentDataOffset = messageOffset
680
+ }
681
+ } catch (err) {
682
+ console.error(`[FileBackedStreamStore] Error reading file:`, err)
683
+ }
684
+
685
+ return { messages, upToDate: true }
686
+ }
687
+
688
+ async waitForMessages(
689
+ streamPath: string,
690
+ offset: string,
691
+ timeoutMs: number
692
+ ): Promise<{ messages: Array<StreamMessage>; timedOut: boolean }> {
693
+ const key = `stream:${streamPath}`
694
+ const streamMeta = this.db.get(key) as StreamMetadata | undefined
695
+
696
+ if (!streamMeta) {
697
+ throw new Error(`Stream not found: ${streamPath}`)
698
+ }
699
+
700
+ // Check if there are already new messages
701
+ const { messages } = this.read(streamPath, offset)
702
+ if (messages.length > 0) {
703
+ return { messages, timedOut: false }
704
+ }
705
+
706
+ // Wait for new messages
707
+ return new Promise((resolve) => {
708
+ const timeoutId = setTimeout(() => {
709
+ // Remove from pending
710
+ this.removePendingLongPoll(pending)
711
+ resolve({ messages: [], timedOut: true })
712
+ }, timeoutMs)
713
+
714
+ const pending: PendingLongPoll = {
715
+ path: streamPath,
716
+ offset,
717
+ resolve: (msgs) => {
718
+ clearTimeout(timeoutId)
719
+ this.removePendingLongPoll(pending)
720
+ resolve({ messages: msgs, timedOut: false })
721
+ },
722
+ timeoutId,
723
+ }
724
+
725
+ this.pendingLongPolls.push(pending)
726
+ })
727
+ }
728
+
729
+ /**
730
+ * Format messages for response.
731
+ * For JSON mode, wraps concatenated data in array brackets.
732
+ */
733
+ formatResponse(
734
+ streamPath: string,
735
+ messages: Array<StreamMessage>
736
+ ): Uint8Array {
737
+ const key = `stream:${streamPath}`
738
+ const streamMeta = this.db.get(key) as StreamMetadata | undefined
739
+
740
+ if (!streamMeta) {
741
+ throw new Error(`Stream not found: ${streamPath}`)
742
+ }
743
+
744
+ // Concatenate all message data
745
+ const totalSize = messages.reduce((sum, m) => sum + m.data.length, 0)
746
+ const concatenated = new Uint8Array(totalSize)
747
+ let offset = 0
748
+ for (const msg of messages) {
749
+ concatenated.set(msg.data, offset)
750
+ offset += msg.data.length
751
+ }
752
+
753
+ // For JSON mode, wrap in array brackets
754
+ if (normalizeContentType(streamMeta.contentType) === `application/json`) {
755
+ return formatJsonResponse(concatenated)
756
+ }
757
+
758
+ return concatenated
759
+ }
760
+
761
+ getCurrentOffset(streamPath: string): string | undefined {
762
+ const key = `stream:${streamPath}`
763
+ const streamMeta = this.db.get(key) as StreamMetadata | undefined
764
+ return streamMeta?.currentOffset
765
+ }
766
+
767
+ clear(): void {
768
+ // Cancel all pending long-polls and resolve them with empty result
769
+ for (const pending of this.pendingLongPolls) {
770
+ clearTimeout(pending.timeoutId)
771
+ // Resolve with empty result to unblock waiting handlers
772
+ pending.resolve([])
773
+ }
774
+ this.pendingLongPolls = []
775
+
776
+ // Clear all streams from LMDB
777
+ const range = this.db.getRange({
778
+ start: `stream:`,
779
+ end: `stream:\xFF`,
780
+ })
781
+
782
+ // Convert to array to avoid iterator issues
783
+ const entries = Array.from(range)
784
+
785
+ for (const { key } of entries) {
786
+ this.db.removeSync(key)
787
+ }
788
+
789
+ // Clear file handle pool
790
+ this.fileHandlePool.closeAll().catch((err: Error) => {
791
+ console.error(`[FileBackedStreamStore] Error closing handles:`, err)
792
+ })
793
+
794
+ // Note: Files are not deleted in clear() with unique directory names
795
+ // New streams get fresh directories, so old files won't interfere
796
+ }
797
+
798
+ /**
799
+ * Cancel all pending long-polls (used during shutdown).
800
+ */
801
+ cancelAllWaits(): void {
802
+ for (const pending of this.pendingLongPolls) {
803
+ clearTimeout(pending.timeoutId)
804
+ // Resolve with empty result to unblock waiting handlers
805
+ pending.resolve([])
806
+ }
807
+ this.pendingLongPolls = []
808
+ }
809
+
810
+ list(): Array<string> {
811
+ const paths: Array<string> = []
812
+
813
+ const range = this.db.getRange({
814
+ start: `stream:`,
815
+ end: `stream:\xFF`,
816
+ })
817
+
818
+ // Convert to array to avoid iterator issues
819
+ const entries = Array.from(range)
820
+
821
+ for (const { key } of entries) {
822
+ // Key should be a string in our schema
823
+ if (typeof key === `string`) {
824
+ paths.push(key.replace(`stream:`, ``))
825
+ }
826
+ }
827
+
828
+ return paths
829
+ }
830
+
831
+ // ============================================================================
832
+ // Private helper methods for long-poll support
833
+ // ============================================================================
834
+
835
+ private notifyLongPolls(streamPath: string): void {
836
+ const toNotify = this.pendingLongPolls.filter((p) => p.path === streamPath)
837
+
838
+ for (const pending of toNotify) {
839
+ const { messages } = this.read(streamPath, pending.offset)
840
+ if (messages.length > 0) {
841
+ pending.resolve(messages)
842
+ }
843
+ }
844
+ }
845
+
846
+ private cancelLongPollsForStream(streamPath: string): void {
847
+ const toCancel = this.pendingLongPolls.filter((p) => p.path === streamPath)
848
+ for (const pending of toCancel) {
849
+ clearTimeout(pending.timeoutId)
850
+ pending.resolve([])
851
+ }
852
+ this.pendingLongPolls = this.pendingLongPolls.filter(
853
+ (p) => p.path !== streamPath
854
+ )
855
+ }
856
+
857
+ private removePendingLongPoll(pending: PendingLongPoll): void {
858
+ const index = this.pendingLongPolls.indexOf(pending)
859
+ if (index !== -1) {
860
+ this.pendingLongPolls.splice(index, 1)
861
+ }
862
+ }
863
+ }