@durable-streams/client 0.1.2 → 0.1.4

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,642 @@
1
+ /**
2
+ * IdempotentProducer - Fire-and-forget producer with exactly-once write semantics.
3
+ *
4
+ * Implements Kafka-style idempotent producer pattern with:
5
+ * - Client-provided producer IDs (zero RTT overhead)
6
+ * - Client-declared epochs, server-validated fencing
7
+ * - Per-batch sequence numbers for deduplication
8
+ * - Automatic batching and pipelining for throughput
9
+ */
10
+
11
+ import fastq from "fastq"
12
+
13
+ import { DurableStreamError, FetchError } from "./error"
14
+ import {
15
+ PRODUCER_EPOCH_HEADER,
16
+ PRODUCER_EXPECTED_SEQ_HEADER,
17
+ PRODUCER_ID_HEADER,
18
+ PRODUCER_RECEIVED_SEQ_HEADER,
19
+ PRODUCER_SEQ_HEADER,
20
+ STREAM_OFFSET_HEADER,
21
+ } from "./constants"
22
+ import type { queueAsPromised } from "fastq"
23
+ import type { DurableStream } from "./stream"
24
+ import type { IdempotentProducerOptions, Offset } from "./types"
25
+
26
+ /**
27
+ * Error thrown when a producer's epoch is stale (zombie fencing).
28
+ */
29
+ export class StaleEpochError extends Error {
30
+ /**
31
+ * The current epoch on the server.
32
+ */
33
+ readonly currentEpoch: number
34
+
35
+ constructor(currentEpoch: number) {
36
+ super(
37
+ `Producer epoch is stale. Current server epoch: ${currentEpoch}. ` +
38
+ `Call restart() or create a new producer with a higher epoch.`
39
+ )
40
+ this.name = `StaleEpochError`
41
+ this.currentEpoch = currentEpoch
42
+ }
43
+ }
44
+
45
+ /**
46
+ * Error thrown when an unrecoverable sequence gap is detected.
47
+ *
48
+ * With maxInFlight > 1, HTTP requests can arrive out of order at the server,
49
+ * causing temporary 409 responses. The client automatically handles these
50
+ * by waiting for earlier sequences to complete, then retrying.
51
+ *
52
+ * This error is only thrown when the gap cannot be resolved (e.g., the
53
+ * expected sequence is >= our sequence, indicating a true protocol violation).
54
+ */
55
+ export class SequenceGapError extends Error {
56
+ readonly expectedSeq: number
57
+ readonly receivedSeq: number
58
+
59
+ constructor(expectedSeq: number, receivedSeq: number) {
60
+ super(
61
+ `Producer sequence gap: expected ${expectedSeq}, received ${receivedSeq}`
62
+ )
63
+ this.name = `SequenceGapError`
64
+ this.expectedSeq = expectedSeq
65
+ this.receivedSeq = receivedSeq
66
+ }
67
+ }
68
+
69
+ /**
70
+ * Normalize content-type by extracting the media type (before any semicolon).
71
+ */
72
+ function normalizeContentType(contentType: string | undefined): string {
73
+ if (!contentType) return ``
74
+ return contentType.split(`;`)[0]!.trim().toLowerCase()
75
+ }
76
+
77
+ /**
78
+ * Internal type for pending batch entries.
79
+ * Stores original data for proper JSON batching.
80
+ */
81
+ interface PendingEntry {
82
+ /** Original data - parsed for JSON mode batching */
83
+ data: unknown
84
+ /** Encoded bytes for byte-stream mode */
85
+ body: Uint8Array
86
+ }
87
+
88
+ /**
89
+ * Internal type for batch tasks submitted to the queue.
90
+ */
91
+ interface BatchTask {
92
+ batch: Array<PendingEntry>
93
+ seq: number
94
+ }
95
+
96
+ /**
97
+ * An idempotent producer for exactly-once writes to a durable stream.
98
+ *
99
+ * Features:
100
+ * - Fire-and-forget: append() returns immediately, batches in background
101
+ * - Exactly-once: server deduplicates using (producerId, epoch, seq)
102
+ * - Batching: multiple appends batched into single HTTP request
103
+ * - Pipelining: up to maxInFlight concurrent batches
104
+ * - Zombie fencing: stale producers rejected via epoch validation
105
+ *
106
+ * @example
107
+ * ```typescript
108
+ * const stream = new DurableStream({ url: "https://..." });
109
+ * const producer = new IdempotentProducer(stream, "order-service-1", {
110
+ * epoch: 0,
111
+ * autoClaim: true,
112
+ * });
113
+ *
114
+ * // Fire-and-forget writes (synchronous, returns immediately)
115
+ * producer.append("message 1");
116
+ * producer.append("message 2");
117
+ *
118
+ * // Ensure all messages are delivered before shutdown
119
+ * await producer.flush();
120
+ * await producer.close();
121
+ * ```
122
+ */
123
+ export class IdempotentProducer {
124
+ readonly #stream: DurableStream
125
+ readonly #producerId: string
126
+ #epoch: number
127
+ #nextSeq = 0
128
+ readonly #autoClaim: boolean
129
+ readonly #maxBatchBytes: number
130
+ readonly #lingerMs: number
131
+ readonly #fetchClient: typeof fetch
132
+ readonly #signal?: AbortSignal
133
+ readonly #onError?: (error: Error) => void
134
+
135
+ // Batching state
136
+ #pendingBatch: Array<PendingEntry> = []
137
+ #batchBytes = 0
138
+ #lingerTimeout: ReturnType<typeof setTimeout> | null = null
139
+
140
+ // Pipelining via fastq
141
+ readonly #queue: queueAsPromised<BatchTask>
142
+ readonly #maxInFlight: number
143
+ #closed = false
144
+
145
+ // When autoClaim is true, we must wait for the first batch to complete
146
+ // before allowing pipelining (to know what epoch was claimed)
147
+ #epochClaimed: boolean
148
+
149
+ // Track sequence completions for 409 retry coordination
150
+ // When HTTP requests arrive out of order, we get 409 errors.
151
+ // Maps epoch -> (seq -> { resolved, error?, waiters })
152
+ #seqState: Map<
153
+ number,
154
+ Map<
155
+ number,
156
+ {
157
+ resolved: boolean
158
+ error?: Error
159
+ waiters: Array<(err?: Error) => void>
160
+ }
161
+ >
162
+ > = new Map()
163
+
164
+ /**
165
+ * Create an idempotent producer for a stream.
166
+ *
167
+ * @param stream - The DurableStream to write to
168
+ * @param producerId - Stable identifier for this producer (e.g., "order-service-1")
169
+ * @param opts - Producer options
170
+ */
171
+ constructor(
172
+ stream: DurableStream,
173
+ producerId: string,
174
+ opts?: IdempotentProducerOptions
175
+ ) {
176
+ this.#stream = stream
177
+ this.#producerId = producerId
178
+ this.#epoch = opts?.epoch ?? 0
179
+ this.#autoClaim = opts?.autoClaim ?? false
180
+ this.#maxBatchBytes = opts?.maxBatchBytes ?? 1024 * 1024 // 1MB
181
+ this.#lingerMs = opts?.lingerMs ?? 5
182
+ this.#signal = opts?.signal
183
+ this.#onError = opts?.onError
184
+ this.#fetchClient =
185
+ opts?.fetch ?? ((...args: Parameters<typeof fetch>) => fetch(...args))
186
+
187
+ this.#maxInFlight = opts?.maxInFlight ?? 5
188
+
189
+ // When autoClaim is true, epoch is not yet known until first batch completes
190
+ // We block pipelining until then to avoid racing with the claim
191
+ this.#epochClaimed = !this.#autoClaim
192
+
193
+ // Initialize fastq with maxInFlight concurrency
194
+ this.#queue = fastq.promise(this.#batchWorker.bind(this), this.#maxInFlight)
195
+
196
+ // Handle signal abort (use { once: true } to auto-cleanup)
197
+ if (this.#signal) {
198
+ this.#signal.addEventListener(
199
+ `abort`,
200
+ () => {
201
+ this.#rejectPendingBatch(
202
+ new DurableStreamError(
203
+ `Producer aborted`,
204
+ `ALREADY_CLOSED`,
205
+ undefined,
206
+ undefined
207
+ )
208
+ )
209
+ },
210
+ { once: true }
211
+ )
212
+ }
213
+ }
214
+
215
+ /**
216
+ * Append data to the stream.
217
+ *
218
+ * This is fire-and-forget: returns immediately after adding to the batch.
219
+ * The message is batched and sent when:
220
+ * - maxBatchBytes is reached
221
+ * - lingerMs elapses
222
+ * - flush() is called
223
+ *
224
+ * Errors are reported via onError callback if configured. Use flush() to
225
+ * wait for all pending messages to be sent.
226
+ *
227
+ * For JSON streams, pass native objects (which will be serialized internally).
228
+ * For byte streams, pass string or Uint8Array.
229
+ *
230
+ * @param body - Data to append (object for JSON streams, string or Uint8Array for byte streams)
231
+ */
232
+ append(body: Uint8Array | string | unknown): void {
233
+ if (this.#closed) {
234
+ throw new DurableStreamError(
235
+ `Producer is closed`,
236
+ `ALREADY_CLOSED`,
237
+ undefined,
238
+ undefined
239
+ )
240
+ }
241
+
242
+ const isJson =
243
+ normalizeContentType(this.#stream.contentType) === `application/json`
244
+
245
+ let bytes: Uint8Array
246
+ let data: unknown
247
+
248
+ if (isJson) {
249
+ // For JSON streams: accept native objects, serialize internally
250
+ const json = JSON.stringify(body)
251
+ bytes = new TextEncoder().encode(json)
252
+ data = body
253
+ } else {
254
+ // For byte streams, require string or Uint8Array
255
+ if (typeof body === `string`) {
256
+ bytes = new TextEncoder().encode(body)
257
+ } else if (body instanceof Uint8Array) {
258
+ bytes = body
259
+ } else {
260
+ throw new DurableStreamError(
261
+ `Non-JSON streams require string or Uint8Array`,
262
+ `BAD_REQUEST`,
263
+ 400,
264
+ undefined
265
+ )
266
+ }
267
+ data = bytes
268
+ }
269
+
270
+ this.#pendingBatch.push({ data, body: bytes })
271
+ this.#batchBytes += bytes.length
272
+
273
+ // Check if batch should be sent immediately
274
+ if (this.#batchBytes >= this.#maxBatchBytes) {
275
+ this.#enqueuePendingBatch()
276
+ } else if (!this.#lingerTimeout) {
277
+ // Start linger timer
278
+ this.#lingerTimeout = setTimeout(() => {
279
+ this.#lingerTimeout = null
280
+ if (this.#pendingBatch.length > 0) {
281
+ this.#enqueuePendingBatch()
282
+ }
283
+ }, this.#lingerMs)
284
+ }
285
+ }
286
+
287
+ /**
288
+ * Send any pending batch immediately and wait for all in-flight batches.
289
+ *
290
+ * Call this before shutdown to ensure all messages are delivered.
291
+ */
292
+ async flush(): Promise<void> {
293
+ // Clear linger timeout
294
+ if (this.#lingerTimeout) {
295
+ clearTimeout(this.#lingerTimeout)
296
+ this.#lingerTimeout = null
297
+ }
298
+
299
+ // Enqueue any pending batch
300
+ if (this.#pendingBatch.length > 0) {
301
+ this.#enqueuePendingBatch()
302
+ }
303
+
304
+ // Wait for queue to drain
305
+ await this.#queue.drained()
306
+ }
307
+
308
+ /**
309
+ * Flush pending messages and close the producer.
310
+ *
311
+ * After calling close(), further append() calls will throw.
312
+ */
313
+ async close(): Promise<void> {
314
+ if (this.#closed) return
315
+
316
+ this.#closed = true
317
+
318
+ try {
319
+ await this.flush()
320
+ } catch {
321
+ // Ignore errors during close
322
+ }
323
+ }
324
+
325
+ /**
326
+ * Increment epoch and reset sequence.
327
+ *
328
+ * Call this when restarting the producer to establish a new session.
329
+ * Flushes any pending messages first.
330
+ */
331
+ async restart(): Promise<void> {
332
+ await this.flush()
333
+ this.#epoch++
334
+ this.#nextSeq = 0
335
+ }
336
+
337
+ /**
338
+ * Current epoch for this producer.
339
+ */
340
+ get epoch(): number {
341
+ return this.#epoch
342
+ }
343
+
344
+ /**
345
+ * Next sequence number to be assigned.
346
+ */
347
+ get nextSeq(): number {
348
+ return this.#nextSeq
349
+ }
350
+
351
+ /**
352
+ * Number of messages in the current pending batch.
353
+ */
354
+ get pendingCount(): number {
355
+ return this.#pendingBatch.length
356
+ }
357
+
358
+ /**
359
+ * Number of batches currently in flight.
360
+ */
361
+ get inFlightCount(): number {
362
+ return this.#queue.length()
363
+ }
364
+
365
+ // ============================================================================
366
+ // Private implementation
367
+ // ============================================================================
368
+
369
+ /**
370
+ * Enqueue the current pending batch for processing.
371
+ */
372
+ #enqueuePendingBatch(): void {
373
+ if (this.#pendingBatch.length === 0) return
374
+
375
+ // Take the current batch
376
+ const batch = this.#pendingBatch
377
+ const seq = this.#nextSeq
378
+
379
+ this.#pendingBatch = []
380
+ this.#batchBytes = 0
381
+ this.#nextSeq++
382
+
383
+ // When autoClaim is enabled and epoch hasn't been claimed yet,
384
+ // we must wait for any in-flight batch to complete before sending more.
385
+ // This ensures the first batch claims the epoch before pipelining begins.
386
+ if (this.#autoClaim && !this.#epochClaimed && this.#queue.length() > 0) {
387
+ // Wait for queue to drain, then push
388
+ this.#queue.drained().then(() => {
389
+ this.#queue.push({ batch, seq }).catch(() => {
390
+ // Error handling is done in #batchWorker
391
+ })
392
+ })
393
+ } else {
394
+ // Push to fastq - it handles concurrency automatically
395
+ this.#queue.push({ batch, seq }).catch(() => {
396
+ // Error handling is done in #batchWorker
397
+ })
398
+ }
399
+ }
400
+
401
+ /**
402
+ * Batch worker - processes batches via fastq.
403
+ */
404
+ async #batchWorker(task: BatchTask): Promise<void> {
405
+ const { batch, seq } = task
406
+ const epoch = this.#epoch
407
+
408
+ try {
409
+ await this.#doSendBatch(batch, seq, epoch)
410
+
411
+ // Mark epoch as claimed after first successful batch
412
+ // This enables full pipelining for subsequent batches
413
+ if (!this.#epochClaimed) {
414
+ this.#epochClaimed = true
415
+ }
416
+
417
+ // Signal success for this sequence (for 409 retry coordination)
418
+ this.#signalSeqComplete(epoch, seq, undefined)
419
+ } catch (error) {
420
+ // Signal failure so waiting batches can fail too
421
+ this.#signalSeqComplete(epoch, seq, error as Error)
422
+
423
+ // Call onError callback if configured
424
+ if (this.#onError) {
425
+ this.#onError(error as Error)
426
+ }
427
+ throw error
428
+ }
429
+ }
430
+
431
+ /**
432
+ * Signal that a sequence has completed (success or failure).
433
+ */
434
+ #signalSeqComplete(
435
+ epoch: number,
436
+ seq: number,
437
+ error: Error | undefined
438
+ ): void {
439
+ let epochMap = this.#seqState.get(epoch)
440
+ if (!epochMap) {
441
+ epochMap = new Map()
442
+ this.#seqState.set(epoch, epochMap)
443
+ }
444
+
445
+ const state = epochMap.get(seq)
446
+ if (state) {
447
+ // Mark resolved and notify all waiters
448
+ state.resolved = true
449
+ state.error = error
450
+ for (const waiter of state.waiters) {
451
+ waiter(error)
452
+ }
453
+ state.waiters = []
454
+ } else {
455
+ // No waiters yet, just mark as resolved
456
+ epochMap.set(seq, { resolved: true, error, waiters: [] })
457
+ }
458
+
459
+ // Clean up old entries to prevent unbounded memory growth.
460
+ // We keep entries for the last maxInFlight * 3 sequences to handle
461
+ // potential late 409 retries from pipelining.
462
+ const cleanupThreshold = seq - this.#maxInFlight * 3
463
+ if (cleanupThreshold > 0) {
464
+ for (const oldSeq of epochMap.keys()) {
465
+ if (oldSeq < cleanupThreshold) {
466
+ epochMap.delete(oldSeq)
467
+ }
468
+ }
469
+ }
470
+ }
471
+
472
+ /**
473
+ * Wait for a specific sequence to complete.
474
+ * Returns immediately if already completed.
475
+ * Throws if the sequence failed.
476
+ */
477
+ #waitForSeq(epoch: number, seq: number): Promise<void> {
478
+ let epochMap = this.#seqState.get(epoch)
479
+ if (!epochMap) {
480
+ epochMap = new Map()
481
+ this.#seqState.set(epoch, epochMap)
482
+ }
483
+
484
+ const state = epochMap.get(seq)
485
+ if (state?.resolved) {
486
+ // Already completed
487
+ if (state.error) {
488
+ return Promise.reject(state.error)
489
+ }
490
+ return Promise.resolve()
491
+ }
492
+
493
+ // Not yet completed, add a waiter
494
+ return new Promise((resolve, reject) => {
495
+ const waiter = (err?: Error) => {
496
+ if (err) reject(err)
497
+ else resolve()
498
+ }
499
+ if (state) {
500
+ state.waiters.push(waiter)
501
+ } else {
502
+ epochMap.set(seq, { resolved: false, waiters: [waiter] })
503
+ }
504
+ })
505
+ }
506
+
507
+ /**
508
+ * Actually send the batch to the server.
509
+ * Handles auto-claim retry on 403 (stale epoch) if autoClaim is enabled.
510
+ * Does NOT implement general retry/backoff for network errors or 5xx responses.
511
+ */
512
+ async #doSendBatch(
513
+ batch: Array<PendingEntry>,
514
+ seq: number,
515
+ epoch: number
516
+ ): Promise<{ offset: Offset; duplicate: boolean }> {
517
+ const contentType = this.#stream.contentType ?? `application/octet-stream`
518
+ const isJson = normalizeContentType(contentType) === `application/json`
519
+
520
+ // Build batch body based on content type
521
+ let batchedBody: BodyInit
522
+ if (isJson) {
523
+ // For JSON mode: always send as array (server flattens one level)
524
+ // Single append: [value] → server stores value
525
+ // Multiple appends: [val1, val2] → server stores val1, val2
526
+ const values = batch.map((e) => e.data)
527
+ batchedBody = JSON.stringify(values)
528
+ } else {
529
+ // For byte mode: concatenate all chunks
530
+ const totalSize = batch.reduce((sum, e) => sum + e.body.length, 0)
531
+ const concatenated = new Uint8Array(totalSize)
532
+ let offset = 0
533
+ for (const entry of batch) {
534
+ concatenated.set(entry.body, offset)
535
+ offset += entry.body.length
536
+ }
537
+ batchedBody = concatenated
538
+ }
539
+
540
+ // Build URL
541
+ const url = this.#stream.url
542
+
543
+ // Build headers
544
+ const headers: Record<string, string> = {
545
+ "content-type": contentType,
546
+ [PRODUCER_ID_HEADER]: this.#producerId,
547
+ [PRODUCER_EPOCH_HEADER]: epoch.toString(),
548
+ [PRODUCER_SEQ_HEADER]: seq.toString(),
549
+ }
550
+
551
+ // Send request
552
+ const response = await this.#fetchClient(url, {
553
+ method: `POST`,
554
+ headers,
555
+ body: batchedBody,
556
+ signal: this.#signal,
557
+ })
558
+
559
+ // Handle response
560
+ if (response.status === 204) {
561
+ // Duplicate - idempotent success
562
+ return { offset: ``, duplicate: true }
563
+ }
564
+
565
+ if (response.status === 200) {
566
+ // Success
567
+ const resultOffset = response.headers.get(STREAM_OFFSET_HEADER) ?? ``
568
+ return { offset: resultOffset, duplicate: false }
569
+ }
570
+
571
+ if (response.status === 403) {
572
+ // Stale epoch
573
+ const currentEpochStr = response.headers.get(PRODUCER_EPOCH_HEADER)
574
+ const currentEpoch = currentEpochStr
575
+ ? parseInt(currentEpochStr, 10)
576
+ : epoch
577
+
578
+ if (this.#autoClaim) {
579
+ // Auto-claim: retry with epoch+1
580
+ const newEpoch = currentEpoch + 1
581
+ this.#epoch = newEpoch
582
+ this.#nextSeq = 1 // This batch will use seq 0
583
+
584
+ // Retry with new epoch, starting at seq 0
585
+ return this.#doSendBatch(batch, 0, newEpoch)
586
+ }
587
+
588
+ throw new StaleEpochError(currentEpoch)
589
+ }
590
+
591
+ if (response.status === 409) {
592
+ // Sequence gap - our request arrived before an earlier sequence
593
+ const expectedSeqStr = response.headers.get(PRODUCER_EXPECTED_SEQ_HEADER)
594
+ const expectedSeq = expectedSeqStr ? parseInt(expectedSeqStr, 10) : 0
595
+
596
+ // If our seq is ahead of expectedSeq, wait for earlier sequences to complete then retry
597
+ // This handles HTTP request reordering with maxInFlight > 1
598
+ if (expectedSeq < seq) {
599
+ // Wait for all sequences from expectedSeq to seq-1
600
+ const waitPromises: Array<Promise<void>> = []
601
+ for (let s = expectedSeq; s < seq; s++) {
602
+ waitPromises.push(this.#waitForSeq(epoch, s))
603
+ }
604
+ await Promise.all(waitPromises)
605
+ // Retry now that earlier sequences have completed
606
+ return this.#doSendBatch(batch, seq, epoch)
607
+ }
608
+
609
+ // If expectedSeq >= seq, something is wrong (shouldn't happen) - throw error
610
+ const receivedSeqStr = response.headers.get(PRODUCER_RECEIVED_SEQ_HEADER)
611
+ const receivedSeq = receivedSeqStr ? parseInt(receivedSeqStr, 10) : seq
612
+ throw new SequenceGapError(expectedSeq, receivedSeq)
613
+ }
614
+
615
+ if (response.status === 400) {
616
+ // Bad request (e.g., invalid epoch/seq)
617
+ const error = await DurableStreamError.fromResponse(response, url)
618
+ throw error
619
+ }
620
+
621
+ // Other errors - use FetchError for standard handling
622
+ const error = await FetchError.fromResponse(response, url)
623
+ throw error
624
+ }
625
+
626
+ /**
627
+ * Clear pending batch and report error.
628
+ */
629
+ #rejectPendingBatch(error: Error): void {
630
+ // Call onError callback if configured
631
+ if (this.#onError && this.#pendingBatch.length > 0) {
632
+ this.#onError(error)
633
+ }
634
+ this.#pendingBatch = []
635
+ this.#batchBytes = 0
636
+
637
+ if (this.#lingerTimeout) {
638
+ clearTimeout(this.#lingerTimeout)
639
+ this.#lingerTimeout = null
640
+ }
641
+ }
642
+ }
package/src/index.ts CHANGED
@@ -20,6 +20,20 @@ export { stream } from "./stream-api"
20
20
  // DurableStream class for read/write operations
21
21
  export { DurableStream, type DurableStreamOptions } from "./stream"
22
22
 
23
+ // HTTP warning utility
24
+ export { warnIfUsingHttpInBrowser, _resetHttpWarningForTesting } from "./utils"
25
+
26
+ // ============================================================================
27
+ // Idempotent Producer
28
+ // ============================================================================
29
+
30
+ // IdempotentProducer for exactly-once writes
31
+ export {
32
+ IdempotentProducer,
33
+ StaleEpochError,
34
+ SequenceGapError,
35
+ } from "./idempotent-producer"
36
+
23
37
  // ============================================================================
24
38
  // Types
25
39
  // ============================================================================
@@ -51,6 +65,10 @@ export type {
51
65
  HeadResult,
52
66
  LegacyLiveMode,
53
67
 
68
+ // Idempotent producer types
69
+ IdempotentProducerOptions,
70
+ IdempotentAppendResult,
71
+
54
72
  // Error handling
55
73
  DurableStreamErrorCode,
56
74
  RetryOpts,
@@ -100,4 +118,10 @@ export {
100
118
  CURSOR_QUERY_PARAM,
101
119
  SSE_COMPATIBLE_CONTENT_TYPES,
102
120
  DURABLE_STREAM_PROTOCOL_QUERY_PARAMS,
121
+ // Idempotent producer headers
122
+ PRODUCER_ID_HEADER,
123
+ PRODUCER_EPOCH_HEADER,
124
+ PRODUCER_SEQ_HEADER,
125
+ PRODUCER_EXPECTED_SEQ_HEADER,
126
+ PRODUCER_RECEIVED_SEQ_HEADER,
103
127
  } from "./constants"
package/src/sse.ts CHANGED
@@ -50,6 +50,9 @@ export async function* parseSSEStream(
50
50
 
51
51
  buffer += decoder.decode(value, { stream: true })
52
52
 
53
+ // Normalize line endings: CRLF → LF, lone CR → LF (per SSE spec)
54
+ buffer = buffer.replace(/\r\n/g, `\n`).replace(/\r/g, `\n`)
55
+
53
56
  // Process complete lines
54
57
  const lines = buffer.split(`\n`)
55
58
  // Keep the last incomplete line in the buffer