@durable-streams/server 0.2.3 → 0.3.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/src/file-store.ts CHANGED
@@ -45,6 +45,12 @@ interface StreamMetadata {
45
45
  ttlSeconds?: number
46
46
  expiresAt?: string
47
47
  createdAt: number
48
+ /**
49
+ * Timestamp of the last read or write (for TTL renewal).
50
+ * Optional for backward-compatible deserialization from LMDB (old records won't have it).
51
+ * Falls back to createdAt when missing.
52
+ */
53
+ lastAccessedAt?: number
48
54
  segmentCount: number
49
55
  totalBytes: number
50
56
  /**
@@ -73,6 +79,23 @@ interface StreamMetadata {
73
79
  epoch: number
74
80
  seq: number
75
81
  }
82
+ /**
83
+ * Source stream path (set when this stream is a fork).
84
+ */
85
+ forkedFrom?: string
86
+ /**
87
+ * Divergence offset from the source stream.
88
+ */
89
+ forkOffset?: string
90
+ /**
91
+ * Number of forks referencing this stream.
92
+ * Defaults to 0. Optional for backward-compatible deserialization from LMDB.
93
+ */
94
+ refCount?: number
95
+ /**
96
+ * Whether this stream is logically deleted but retained for fork readers.
97
+ */
98
+ softDeleted?: boolean
76
99
  }
77
100
 
78
101
  /**
@@ -277,7 +300,19 @@ export class FileBackedStreamStore {
277
300
  }
278
301
 
279
302
  // Scan file to compute true offset
280
- const trueOffset = this.scanFileForTrueOffset(segmentPath)
303
+ // For forks, physical file bytes need to be added to forkOffset base
304
+ const physicalOffset = this.scanFileForTrueOffset(segmentPath)
305
+ const physicalBytes = Number(physicalOffset.split(`_`)[1] ?? 0)
306
+
307
+ let trueOffset: string
308
+ if (streamMeta.forkOffset) {
309
+ // Fork: logical offset = forkOffset base + physical bytes in own file
310
+ const forkBaseByte = Number(streamMeta.forkOffset.split(`_`)[1] ?? 0)
311
+ const logicalBytes = forkBaseByte + physicalBytes
312
+ trueOffset = `${String(0).padStart(16, `0`)}_${String(logicalBytes).padStart(16, `0`)}`
313
+ } else {
314
+ trueOffset = physicalOffset
315
+ }
281
316
 
282
317
  // Check if offset matches
283
318
  if (trueOffset !== streamMeta.currentOffset) {
@@ -379,9 +414,14 @@ export class FileBackedStreamStore {
379
414
  ttlSeconds: meta.ttlSeconds,
380
415
  expiresAt: meta.expiresAt,
381
416
  createdAt: meta.createdAt,
417
+ lastAccessedAt: meta.lastAccessedAt ?? meta.createdAt,
382
418
  producers,
383
419
  closed: meta.closed,
384
420
  closedBy: meta.closedBy,
421
+ forkedFrom: meta.forkedFrom,
422
+ forkOffset: meta.forkOffset,
423
+ refCount: meta.refCount ?? 0,
424
+ softDeleted: meta.softDeleted,
385
425
  }
386
426
  }
387
427
 
@@ -507,6 +547,21 @@ export class FileBackedStreamStore {
507
547
  return meta.producers[producerId]?.epoch
508
548
  }
509
549
 
550
+ /**
551
+ * Update lastAccessedAt to now. Called on reads and appends (not HEAD).
552
+ */
553
+ touchAccess(streamPath: string): void {
554
+ const key = `stream:${streamPath}`
555
+ const meta = this.db.get(key) as StreamMetadata | undefined
556
+ if (meta) {
557
+ const updatedMeta: StreamMetadata = {
558
+ ...meta,
559
+ lastAccessedAt: Date.now(),
560
+ }
561
+ this.db.putSync(key, updatedMeta)
562
+ }
563
+ }
564
+
510
565
  /**
511
566
  * Check if a stream is expired based on TTL or Expires-At.
512
567
  */
@@ -522,9 +577,10 @@ export class FileBackedStreamStore {
522
577
  }
523
578
  }
524
579
 
525
- // Check TTL (relative to creation time)
580
+ // Check TTL (sliding window from last access)
526
581
  if (meta.ttlSeconds !== undefined) {
527
- const expiryTime = meta.createdAt + meta.ttlSeconds * 1000
582
+ const lastAccessed = meta.lastAccessedAt ?? meta.createdAt
583
+ const expiryTime = lastAccessed + meta.ttlSeconds * 1000
528
584
  if (now >= expiryTime) {
529
585
  return true
530
586
  }
@@ -535,7 +591,8 @@ export class FileBackedStreamStore {
535
591
 
536
592
  /**
537
593
  * Get stream metadata, deleting it if expired.
538
- * Returns undefined if stream doesn't exist or is expired.
594
+ * Returns undefined if stream doesn't exist or is expired (and has no refs).
595
+ * Expired streams with refCount > 0 are soft-deleted instead of fully deleted.
539
596
  */
540
597
  private getMetaIfNotExpired(streamPath: string): StreamMetadata | undefined {
541
598
  const key = `stream:${streamPath}`
@@ -544,6 +601,15 @@ export class FileBackedStreamStore {
544
601
  return undefined
545
602
  }
546
603
  if (this.isExpired(meta)) {
604
+ if ((meta.refCount ?? 0) > 0) {
605
+ // Expired with refs: soft-delete instead of full delete
606
+ if (!meta.softDeleted) {
607
+ const updatedMeta: StreamMetadata = { ...meta, softDeleted: true }
608
+ this.db.putSync(key, updatedMeta)
609
+ return updatedMeta
610
+ }
611
+ return meta
612
+ }
547
613
  // Delete expired stream
548
614
  this.delete(streamPath)
549
615
  return undefined
@@ -551,6 +617,36 @@ export class FileBackedStreamStore {
551
617
  return meta
552
618
  }
553
619
 
620
+ /**
621
+ * Resolve fork expiry per the decision table.
622
+ * Forks have independent lifetimes — no capping at source expiry.
623
+ */
624
+ private resolveForkExpiry(
625
+ opts: { ttlSeconds?: number; expiresAt?: string },
626
+ sourceMeta: StreamMetadata
627
+ ): { ttlSeconds?: number; expiresAt?: string } {
628
+ // Fork explicitly requests TTL — use it
629
+ if (opts.ttlSeconds !== undefined) {
630
+ return { ttlSeconds: opts.ttlSeconds }
631
+ }
632
+
633
+ // Fork explicitly requests Expires-At — use it
634
+ if (opts.expiresAt) {
635
+ return { expiresAt: opts.expiresAt }
636
+ }
637
+
638
+ // No expiry requested — inherit from source
639
+ if (sourceMeta.ttlSeconds !== undefined) {
640
+ return { ttlSeconds: sourceMeta.ttlSeconds }
641
+ }
642
+ if (sourceMeta.expiresAt) {
643
+ return { expiresAt: sourceMeta.expiresAt }
644
+ }
645
+
646
+ // Source has no expiry either
647
+ return {}
648
+ }
649
+
554
650
  /**
555
651
  * Close the store, closing all file handles and database.
556
652
  * All data is already fsynced on each append, so no final flush needed.
@@ -572,35 +668,130 @@ export class FileBackedStreamStore {
572
668
  expiresAt?: string
573
669
  initialData?: Uint8Array
574
670
  closed?: boolean
671
+ forkedFrom?: string
672
+ forkOffset?: string
575
673
  } = {}
576
674
  ): Promise<Stream> {
577
675
  // Use getMetaIfNotExpired to treat expired streams as non-existent
578
- const existing = this.getMetaIfNotExpired(streamPath)
579
-
580
- if (existing) {
581
- // Check if config matches (idempotent create)
582
- // MIME types are case-insensitive per RFC 2045
583
- const normalizeMimeType = (ct: string | undefined) =>
584
- (ct ?? `application/octet-stream`).toLowerCase()
585
- const contentTypeMatches =
586
- normalizeMimeType(options.contentType) ===
587
- normalizeMimeType(existing.contentType)
588
- const ttlMatches = options.ttlSeconds === existing.ttlSeconds
589
- const expiresMatches = options.expiresAt === existing.expiresAt
590
- const closedMatches =
591
- (options.closed ?? false) === (existing.closed ?? false)
592
-
593
- if (contentTypeMatches && ttlMatches && expiresMatches && closedMatches) {
594
- // Idempotent success - return existing stream
595
- return this.streamMetaToStream(existing)
596
- } else {
597
- // Config mismatch - conflict
676
+ const existingRaw = this.db.get(`stream:${streamPath}`) as
677
+ | StreamMetadata
678
+ | undefined
679
+
680
+ if (existingRaw) {
681
+ if (this.isExpired(existingRaw)) {
682
+ // Expired: delete and proceed with creation
683
+ this.delete(streamPath)
684
+ } else if (existingRaw.softDeleted) {
685
+ // Soft-deleted streams block new creation
598
686
  throw new Error(
599
- `Stream already exists with different configuration: ${streamPath}`
687
+ `Stream has active forks path cannot be reused until all forks are removed: ${streamPath}`
600
688
  )
689
+ } else {
690
+ // Check if config matches (idempotent create)
691
+ // MIME types are case-insensitive per RFC 2045
692
+ const normalizeMimeType = (ct: string | undefined) =>
693
+ (ct ?? `application/octet-stream`).toLowerCase()
694
+ const contentTypeMatches =
695
+ normalizeMimeType(options.contentType) ===
696
+ normalizeMimeType(existingRaw.contentType)
697
+ const ttlMatches = options.ttlSeconds === existingRaw.ttlSeconds
698
+ const expiresMatches = options.expiresAt === existingRaw.expiresAt
699
+ const closedMatches =
700
+ (options.closed ?? false) === (existingRaw.closed ?? false)
701
+ const forkedFromMatches =
702
+ (options.forkedFrom ?? undefined) === existingRaw.forkedFrom
703
+ // Only compare forkOffset when explicitly provided; when omitted the
704
+ // server resolves a default at creation time, so a second PUT that
705
+ // also omits it should still be considered idempotent.
706
+ const forkOffsetMatches =
707
+ options.forkOffset === undefined ||
708
+ options.forkOffset === existingRaw.forkOffset
709
+
710
+ if (
711
+ contentTypeMatches &&
712
+ ttlMatches &&
713
+ expiresMatches &&
714
+ closedMatches &&
715
+ forkedFromMatches &&
716
+ forkOffsetMatches
717
+ ) {
718
+ // Idempotent success - return existing stream
719
+ return this.streamMetaToStream(existingRaw)
720
+ } else {
721
+ // Config mismatch - conflict
722
+ throw new Error(
723
+ `Stream already exists with different configuration: ${streamPath}`
724
+ )
725
+ }
601
726
  }
602
727
  }
603
728
 
729
+ // Fork creation: validate source stream and resolve fork parameters
730
+ const isFork = !!options.forkedFrom
731
+ let forkOffset = `0000000000000000_0000000000000000`
732
+ let sourceContentType: string | undefined
733
+ let sourceMeta: StreamMetadata | undefined
734
+
735
+ if (isFork) {
736
+ const sourceKey = `stream:${options.forkedFrom!}`
737
+ sourceMeta = this.db.get(sourceKey) as StreamMetadata | undefined
738
+ if (!sourceMeta) {
739
+ throw new Error(`Source stream not found: ${options.forkedFrom}`)
740
+ }
741
+ if (sourceMeta.softDeleted) {
742
+ throw new Error(`Source stream is soft-deleted: ${options.forkedFrom}`)
743
+ }
744
+ if (this.isExpired(sourceMeta)) {
745
+ throw new Error(`Source stream not found: ${options.forkedFrom}`)
746
+ }
747
+
748
+ sourceContentType = sourceMeta.contentType
749
+
750
+ // Resolve fork offset: use provided or source's currentOffset
751
+ if (options.forkOffset) {
752
+ forkOffset = options.forkOffset
753
+ } else {
754
+ forkOffset = sourceMeta.currentOffset
755
+ }
756
+
757
+ // Validate: zeroOffset <= forkOffset <= source.currentOffset
758
+ const zeroOffset = `0000000000000000_0000000000000000`
759
+ if (forkOffset < zeroOffset || sourceMeta.currentOffset < forkOffset) {
760
+ throw new Error(`Invalid fork offset: ${forkOffset}`)
761
+ }
762
+
763
+ // Atomically increment source refcount in LMDB
764
+ const freshSource = this.db.get(sourceKey) as StreamMetadata
765
+ const updatedSource: StreamMetadata = {
766
+ ...freshSource,
767
+ refCount: (freshSource.refCount ?? 0) + 1,
768
+ }
769
+ this.db.putSync(sourceKey, updatedSource)
770
+ }
771
+
772
+ // Determine content type: use options, or inherit from source if fork
773
+ let contentType = options.contentType
774
+ if (!contentType || contentType.trim() === ``) {
775
+ if (isFork) {
776
+ contentType = sourceContentType
777
+ }
778
+ } else if (
779
+ isFork &&
780
+ normalizeContentType(contentType) !==
781
+ normalizeContentType(sourceContentType)
782
+ ) {
783
+ throw new Error(`Content type mismatch with source stream`)
784
+ }
785
+
786
+ // Compute effective expiry for forks
787
+ let effectiveExpiresAt = options.expiresAt
788
+ let effectiveTtlSeconds = options.ttlSeconds
789
+ if (isFork) {
790
+ const resolved = this.resolveForkExpiry(options, sourceMeta!)
791
+ effectiveExpiresAt = resolved.expiresAt
792
+ effectiveTtlSeconds = resolved.ttlSeconds
793
+ }
794
+
604
795
  // Define key for LMDB operations
605
796
  const key = `stream:${streamPath}`
606
797
 
@@ -609,16 +800,20 @@ export class FileBackedStreamStore {
609
800
  // This prevents the closed check from rejecting the initial append
610
801
  const streamMeta: StreamMetadata = {
611
802
  path: streamPath,
612
- contentType: options.contentType,
613
- currentOffset: `0000000000000000_0000000000000000`,
803
+ contentType,
804
+ currentOffset: isFork ? forkOffset : `0000000000000000_0000000000000000`,
614
805
  lastSeq: undefined,
615
- ttlSeconds: options.ttlSeconds,
616
- expiresAt: options.expiresAt,
806
+ ttlSeconds: effectiveTtlSeconds,
807
+ expiresAt: effectiveExpiresAt,
617
808
  createdAt: Date.now(),
809
+ lastAccessedAt: Date.now(),
618
810
  segmentCount: 1,
619
811
  totalBytes: 0,
620
812
  directoryName: generateUniqueDirectoryName(streamPath),
621
813
  closed: false, // Set to false initially, will be updated after initial append if needed
814
+ forkedFrom: isFork ? options.forkedFrom : undefined,
815
+ forkOffset: isFork ? forkOffset : undefined,
816
+ refCount: 0,
622
817
  }
623
818
 
624
819
  // Create stream directory and empty segment file immediately
@@ -633,6 +828,18 @@ export class FileBackedStreamStore {
633
828
  const segmentPath = path.join(streamDir, `segment_00000.log`)
634
829
  fs.writeFileSync(segmentPath, ``)
635
830
  } catch (err) {
831
+ // Rollback source refcount on failure
832
+ if (isFork && sourceMeta) {
833
+ const sourceKey = `stream:${options.forkedFrom!}`
834
+ const freshSource = this.db.get(sourceKey) as StreamMetadata | undefined
835
+ if (freshSource) {
836
+ const updatedSource: StreamMetadata = {
837
+ ...freshSource,
838
+ refCount: Math.max(0, (freshSource.refCount ?? 0) - 1),
839
+ }
840
+ this.db.putSync(sourceKey, updatedSource)
841
+ }
842
+ }
636
843
  console.error(
637
844
  `[FileBackedStreamStore] Error creating stream directory:`,
638
845
  err
@@ -645,10 +852,28 @@ export class FileBackedStreamStore {
645
852
 
646
853
  // Append initial data if provided
647
854
  if (options.initialData && options.initialData.length > 0) {
648
- await this.append(streamPath, options.initialData, {
649
- contentType: options.contentType,
650
- isInitialCreate: true,
651
- })
855
+ try {
856
+ await this.append(streamPath, options.initialData, {
857
+ contentType: options.contentType,
858
+ isInitialCreate: true,
859
+ })
860
+ } catch (err) {
861
+ // Rollback source refcount on failure
862
+ if (isFork && sourceMeta) {
863
+ const sourceKey = `stream:${options.forkedFrom!}`
864
+ const freshSource = this.db.get(sourceKey) as
865
+ | StreamMetadata
866
+ | undefined
867
+ if (freshSource) {
868
+ const updatedSource: StreamMetadata = {
869
+ ...freshSource,
870
+ refCount: Math.max(0, (freshSource.refCount ?? 0) - 1),
871
+ }
872
+ this.db.putSync(sourceKey, updatedSource)
873
+ }
874
+ }
875
+ throw err
876
+ }
652
877
  }
653
878
 
654
879
  // Now set closed flag if requested (after initial append succeeded)
@@ -665,11 +890,15 @@ export class FileBackedStreamStore {
665
890
 
666
891
  get(streamPath: string): Stream | undefined {
667
892
  const meta = this.getMetaIfNotExpired(streamPath)
668
- return meta ? this.streamMetaToStream(meta) : undefined
893
+ if (!meta) return undefined
894
+ return this.streamMetaToStream(meta)
669
895
  }
670
896
 
671
897
  has(streamPath: string): boolean {
672
- return this.getMetaIfNotExpired(streamPath) !== undefined
898
+ const meta = this.getMetaIfNotExpired(streamPath)
899
+ if (!meta) return false
900
+ if (meta.softDeleted) return false
901
+ return true
673
902
  }
674
903
 
675
904
  delete(streamPath: string): boolean {
@@ -680,11 +909,39 @@ export class FileBackedStreamStore {
680
909
  return false
681
910
  }
682
911
 
912
+ // Already soft-deleted: idempotent success
913
+ if (streamMeta.softDeleted) {
914
+ return true
915
+ }
916
+
917
+ // If there are forks referencing this stream, soft-delete
918
+ if ((streamMeta.refCount ?? 0) > 0) {
919
+ const updatedMeta: StreamMetadata = { ...streamMeta, softDeleted: true }
920
+ this.db.putSync(key, updatedMeta)
921
+ this.cancelLongPollsForStream(streamPath)
922
+ return true
923
+ }
924
+
925
+ // RefCount == 0: full delete with cascade
926
+ this.deleteWithCascade(streamPath)
927
+ return true
928
+ }
929
+
930
+ /**
931
+ * Fully delete a stream and cascade to soft-deleted parents
932
+ * whose refcount drops to zero.
933
+ */
934
+ private deleteWithCascade(streamPath: string): void {
935
+ const key = `stream:${streamPath}`
936
+ const streamMeta = this.db.get(key) as StreamMetadata | undefined
937
+ if (!streamMeta) return
938
+
939
+ const forkedFrom = streamMeta.forkedFrom
940
+
683
941
  // Cancel any pending long-polls for this stream
684
942
  this.cancelLongPollsForStream(streamPath)
685
943
 
686
944
  // Close any open file handle for this stream's segment file
687
- // This is important especially on Windows where open handles block deletion
688
945
  const segmentPath = path.join(
689
946
  this.dataDir,
690
947
  `streams`,
@@ -699,7 +956,6 @@ export class FileBackedStreamStore {
699
956
  this.db.removeSync(key)
700
957
 
701
958
  // Delete files using unique directory name (async, but don't wait)
702
- // Safe to reuse stream path immediately since new creation gets new directory
703
959
  this.fileManager
704
960
  .deleteDirectoryByName(streamMeta.directoryName)
705
961
  .catch((err: Error) => {
@@ -709,7 +965,24 @@ export class FileBackedStreamStore {
709
965
  )
710
966
  })
711
967
 
712
- return true
968
+ // If this stream is a fork, decrement the source's refcount
969
+ if (forkedFrom) {
970
+ const parentKey = `stream:${forkedFrom}`
971
+ const parentMeta = this.db.get(parentKey) as StreamMetadata | undefined
972
+ if (parentMeta) {
973
+ const newRefCount = Math.max(0, (parentMeta.refCount ?? 0) - 1)
974
+ const updatedParent: StreamMetadata = {
975
+ ...parentMeta,
976
+ refCount: newRefCount,
977
+ }
978
+ this.db.putSync(parentKey, updatedParent)
979
+
980
+ // If parent refcount hit 0 and parent is soft-deleted, cascade
981
+ if (newRefCount === 0 && updatedParent.softDeleted) {
982
+ this.deleteWithCascade(forkedFrom)
983
+ }
984
+ }
985
+ }
713
986
  }
714
987
 
715
988
  async append(
@@ -723,6 +996,11 @@ export class FileBackedStreamStore {
723
996
  throw new Error(`Stream not found: ${streamPath}`)
724
997
  }
725
998
 
999
+ // Guard against soft-deleted streams
1000
+ if (streamMeta.softDeleted) {
1001
+ throw new Error(`Stream is soft-deleted: ${streamPath}`)
1002
+ }
1003
+
726
1004
  // Check if stream is closed
727
1005
  if (streamMeta.closed) {
728
1006
  // Check if this is a duplicate of the closing request (idempotent retry)
@@ -1075,57 +1353,30 @@ export class FileBackedStreamStore {
1075
1353
  }
1076
1354
  }
1077
1355
 
1078
- read(
1079
- streamPath: string,
1080
- offset?: string
1081
- ): { messages: Array<StreamMessage>; upToDate: boolean } {
1082
- const streamMeta = this.getMetaIfNotExpired(streamPath)
1083
-
1084
- if (!streamMeta) {
1085
- throw new Error(`Stream not found: ${streamPath}`)
1086
- }
1087
-
1088
- // Parse offsets
1089
- const startOffset = offset ?? `0000000000000000_0000000000000000`
1090
- const startParts = startOffset.split(`_`).map(Number)
1091
- const startByte = startParts[1] ?? 0
1092
- const currentParts = streamMeta.currentOffset.split(`_`).map(Number)
1093
- const currentSeq = currentParts[0] ?? 0
1094
- const currentByte = currentParts[1] ?? 0
1095
-
1096
- // Early return if no data available
1097
- if (streamMeta.currentOffset === `0000000000000000_0000000000000000`) {
1098
- return { messages: [], upToDate: true }
1099
- }
1100
-
1101
- // If start offset is at or past current offset, return empty
1102
- if (startByte >= currentByte) {
1103
- return { messages: [], upToDate: true }
1104
- }
1105
-
1106
- // Get segment file path using unique directory name
1107
- const streamDir = path.join(
1108
- this.dataDir,
1109
- `streams`,
1110
- streamMeta.directoryName
1111
- )
1112
- const segmentPath = path.join(streamDir, `segment_00000.log`)
1356
+ /**
1357
+ * Read messages from a specific segment file.
1358
+ * @param segmentPath - Path to the segment file
1359
+ * @param startByte - Start byte offset (skip messages at or before this offset)
1360
+ * @param baseByteOffset - Base byte offset to add to physical offsets (for fork stitching)
1361
+ * @param capByte - Optional cap: stop reading when logical offset exceeds this value
1362
+ * @returns Array of messages with properly computed offsets
1363
+ */
1364
+ private readMessagesFromSegmentFile(
1365
+ segmentPath: string,
1366
+ startByte: number,
1367
+ baseByteOffset: number,
1368
+ capByte?: number
1369
+ ): Array<StreamMessage> {
1370
+ const messages: Array<StreamMessage> = []
1113
1371
 
1114
- // Check if file exists
1115
1372
  if (!fs.existsSync(segmentPath)) {
1116
- return { messages: [], upToDate: true }
1373
+ return messages
1117
1374
  }
1118
1375
 
1119
- // Read and parse messages from file
1120
- const messages: Array<StreamMessage> = []
1121
-
1122
1376
  try {
1123
- // Calculate file position from offset
1124
- // We need to read from the beginning and skip to the right position
1125
- // because the file has framing overhead
1126
1377
  const fileContent = fs.readFileSync(segmentPath)
1127
1378
  let filePos = 0
1128
- let currentDataOffset = 0
1379
+ let physicalDataOffset = 0
1129
1380
 
1130
1381
  while (filePos < fileContent.length) {
1131
1382
  // Read message length (4 bytes)
@@ -1146,22 +1397,161 @@ export class FileBackedStreamStore {
1146
1397
  // Skip newline
1147
1398
  filePos += 1
1148
1399
 
1149
- // Calculate this message's offset (end position)
1150
- const messageOffset = currentDataOffset + messageLength
1400
+ // Calculate this message's logical offset (end position)
1401
+ physicalDataOffset += messageLength
1402
+ const logicalOffset = baseByteOffset + physicalDataOffset
1403
+
1404
+ // Stop if we've exceeded the cap
1405
+ if (capByte !== undefined && logicalOffset > capByte) {
1406
+ break
1407
+ }
1151
1408
 
1152
- // Only include messages after start offset
1153
- if (messageOffset > startByte) {
1409
+ // Only include messages after start byte
1410
+ if (logicalOffset > startByte) {
1154
1411
  messages.push({
1155
1412
  data: new Uint8Array(messageData),
1156
- offset: `${String(currentSeq).padStart(16, `0`)}_${String(messageOffset).padStart(16, `0`)}`,
1413
+ offset: `${String(0).padStart(16, `0`)}_${String(logicalOffset).padStart(16, `0`)}`,
1157
1414
  timestamp: 0, // Not stored in MVP
1158
1415
  })
1159
1416
  }
1160
-
1161
- currentDataOffset = messageOffset
1162
1417
  }
1163
1418
  } catch (err) {
1164
- console.error(`[FileBackedStreamStore] Error reading file:`, err)
1419
+ console.error(`[FileBackedStreamStore] Error reading segment file:`, err)
1420
+ }
1421
+
1422
+ return messages
1423
+ }
1424
+
1425
+ /**
1426
+ * Recursively read messages from a fork's source chain.
1427
+ * Reads from source (and its sources if also forked), capped at capByte.
1428
+ * Does NOT check softDeleted -- forks must read through soft-deleted sources.
1429
+ */
1430
+ private readForkedMessages(
1431
+ sourcePath: string,
1432
+ startByte: number,
1433
+ capByte: number
1434
+ ): Array<StreamMessage> {
1435
+ const sourceKey = `stream:${sourcePath}`
1436
+ const sourceMeta = this.db.get(sourceKey) as StreamMetadata | undefined
1437
+ if (!sourceMeta) {
1438
+ return []
1439
+ }
1440
+
1441
+ const messages: Array<StreamMessage> = []
1442
+
1443
+ // If source is also a fork and we need messages before source's forkOffset,
1444
+ // recursively read from source's source
1445
+ if (sourceMeta.forkedFrom && sourceMeta.forkOffset) {
1446
+ const sourceForkByte = Number(sourceMeta.forkOffset.split(`_`)[1] ?? 0)
1447
+
1448
+ if (startByte < sourceForkByte) {
1449
+ // Cap at the minimum of source's forkByte and our capByte
1450
+ const inheritedCap = Math.min(sourceForkByte, capByte)
1451
+ const inherited = this.readForkedMessages(
1452
+ sourceMeta.forkedFrom,
1453
+ startByte,
1454
+ inheritedCap
1455
+ )
1456
+ messages.push(...inherited)
1457
+ }
1458
+ }
1459
+
1460
+ // Read source's own segment file
1461
+ // For a fork source, its own data starts at physical byte 0 in its segment file,
1462
+ // but the logical offsets need to account for its own forkOffset base
1463
+ const segmentPath = path.join(
1464
+ this.dataDir,
1465
+ `streams`,
1466
+ sourceMeta.directoryName,
1467
+ `segment_00000.log`
1468
+ )
1469
+
1470
+ // The base offset for this source's own data is its forkOffset (if it's a fork) or 0
1471
+ const sourceBaseByte = sourceMeta.forkOffset
1472
+ ? Number(sourceMeta.forkOffset.split(`_`)[1] ?? 0)
1473
+ : 0
1474
+
1475
+ const ownMessages = this.readMessagesFromSegmentFile(
1476
+ segmentPath,
1477
+ startByte,
1478
+ sourceBaseByte,
1479
+ capByte
1480
+ )
1481
+ messages.push(...ownMessages)
1482
+
1483
+ return messages
1484
+ }
1485
+
1486
+ read(
1487
+ streamPath: string,
1488
+ offset?: string
1489
+ ): { messages: Array<StreamMessage>; upToDate: boolean } {
1490
+ const streamMeta = this.getMetaIfNotExpired(streamPath)
1491
+
1492
+ if (!streamMeta) {
1493
+ throw new Error(`Stream not found: ${streamPath}`)
1494
+ }
1495
+
1496
+ // Parse offsets
1497
+ const startOffset = offset ?? `0000000000000000_0000000000000000`
1498
+ const startByte = Number(startOffset.split(`_`)[1] ?? 0)
1499
+ const currentByte = Number(streamMeta.currentOffset.split(`_`)[1] ?? 0)
1500
+
1501
+ // Early return if no data available
1502
+ if (streamMeta.currentOffset === `0000000000000000_0000000000000000`) {
1503
+ return { messages: [], upToDate: true }
1504
+ }
1505
+
1506
+ // If start offset is at or past current offset, return empty
1507
+ if (startByte >= currentByte) {
1508
+ return { messages: [], upToDate: true }
1509
+ }
1510
+
1511
+ const messages: Array<StreamMessage> = []
1512
+
1513
+ // For forked streams, stitch inherited and own messages
1514
+ if (streamMeta.forkedFrom && streamMeta.forkOffset) {
1515
+ const forkByte = Number(streamMeta.forkOffset.split(`_`)[1] ?? 0)
1516
+
1517
+ // If offset is before the forkOffset, read from source chain
1518
+ if (startByte < forkByte) {
1519
+ const inherited = this.readForkedMessages(
1520
+ streamMeta.forkedFrom,
1521
+ startByte,
1522
+ forkByte
1523
+ )
1524
+ messages.push(...inherited)
1525
+ }
1526
+
1527
+ // Read fork's own segment file with offset translation
1528
+ // Physical bytes in file start at 0, but logical offsets start at forkOffset
1529
+ const segmentPath = path.join(
1530
+ this.dataDir,
1531
+ `streams`,
1532
+ streamMeta.directoryName,
1533
+ `segment_00000.log`
1534
+ )
1535
+ const ownMessages = this.readMessagesFromSegmentFile(
1536
+ segmentPath,
1537
+ startByte,
1538
+ forkByte
1539
+ )
1540
+ messages.push(...ownMessages)
1541
+ } else {
1542
+ // Non-forked stream: read from segment file directly
1543
+ const segmentPath = path.join(
1544
+ this.dataDir,
1545
+ `streams`,
1546
+ streamMeta.directoryName,
1547
+ `segment_00000.log`
1548
+ )
1549
+ const ownMessages = this.readMessagesFromSegmentFile(
1550
+ segmentPath,
1551
+ startByte,
1552
+ 0
1553
+ )
1554
+ messages.push(...ownMessages)
1165
1555
  }
1166
1556
 
1167
1557
  return { messages, upToDate: true }
@@ -1182,6 +1572,17 @@ export class FileBackedStreamStore {
1182
1572
  throw new Error(`Stream not found: ${streamPath}`)
1183
1573
  }
1184
1574
 
1575
+ // For forks: if offset is in the inherited range (< forkOffset),
1576
+ // read and return immediately instead of long-polling
1577
+ if (
1578
+ streamMeta.forkedFrom &&
1579
+ streamMeta.forkOffset &&
1580
+ offset < streamMeta.forkOffset
1581
+ ) {
1582
+ const { messages } = this.read(streamPath, offset)
1583
+ return { messages, timedOut: false }
1584
+ }
1585
+
1185
1586
  // If stream is closed and client is at tail, return immediately
1186
1587
  if (streamMeta.closed && offset === streamMeta.currentOffset) {
1187
1588
  return { messages: [], timedOut: false, streamClosed: true }