@s2-dev/streamstore 0.16.11 → 0.17.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (123) hide show
  1. package/dist/cjs/basin.d.ts +9 -2
  2. package/dist/cjs/basin.d.ts.map +1 -1
  3. package/dist/cjs/basin.js +14 -5
  4. package/dist/cjs/basin.js.map +1 -1
  5. package/dist/cjs/batch-transform.d.ts +64 -0
  6. package/dist/cjs/batch-transform.d.ts.map +1 -0
  7. package/dist/cjs/batch-transform.js +144 -0
  8. package/dist/cjs/batch-transform.js.map +1 -0
  9. package/dist/cjs/generated/client/utils.gen.d.ts +1 -1
  10. package/dist/cjs/generated/client/utils.gen.d.ts.map +1 -1
  11. package/dist/cjs/generated/client/utils.gen.js +7 -6
  12. package/dist/cjs/generated/client/utils.gen.js.map +1 -1
  13. package/dist/cjs/generated/core/bodySerializer.gen.d.ts +12 -4
  14. package/dist/cjs/generated/core/bodySerializer.gen.d.ts.map +1 -1
  15. package/dist/cjs/generated/core/bodySerializer.gen.js.map +1 -1
  16. package/dist/cjs/generated/proto/s2.d.ts.map +1 -1
  17. package/dist/cjs/generated/proto/s2.js.map +1 -1
  18. package/dist/cjs/index.d.ts +4 -2
  19. package/dist/cjs/index.d.ts.map +1 -1
  20. package/dist/cjs/index.js +5 -1
  21. package/dist/cjs/index.js.map +1 -1
  22. package/dist/cjs/lib/stream/factory.d.ts +15 -0
  23. package/dist/cjs/lib/stream/factory.d.ts.map +1 -0
  24. package/dist/cjs/lib/stream/factory.js +36 -0
  25. package/dist/cjs/lib/stream/factory.js.map +1 -0
  26. package/dist/cjs/lib/stream/runtime.d.ts +13 -0
  27. package/dist/cjs/lib/stream/runtime.d.ts.map +1 -0
  28. package/dist/cjs/lib/stream/runtime.js +50 -0
  29. package/dist/cjs/lib/stream/runtime.js.map +1 -0
  30. package/dist/cjs/lib/stream/transport/fetch/index.d.ts +79 -0
  31. package/dist/cjs/lib/stream/transport/fetch/index.d.ts.map +1 -0
  32. package/dist/cjs/lib/stream/transport/fetch/index.js +382 -0
  33. package/dist/cjs/lib/stream/transport/fetch/index.js.map +1 -0
  34. package/dist/cjs/lib/stream/transport/fetch/shared.d.ts +7 -0
  35. package/dist/cjs/lib/stream/transport/fetch/shared.d.ts.map +1 -0
  36. package/dist/cjs/lib/stream/transport/fetch/shared.js +170 -0
  37. package/dist/cjs/lib/stream/transport/fetch/shared.js.map +1 -0
  38. package/dist/cjs/lib/stream/transport/s2s/framing.d.ts +47 -0
  39. package/dist/cjs/lib/stream/transport/s2s/framing.d.ts.map +1 -0
  40. package/dist/cjs/lib/stream/transport/s2s/framing.js +123 -0
  41. package/dist/cjs/lib/stream/transport/s2s/framing.js.map +1 -0
  42. package/dist/cjs/lib/stream/transport/s2s/index.d.ts +23 -0
  43. package/dist/cjs/lib/stream/transport/s2s/index.d.ts.map +1 -0
  44. package/dist/cjs/lib/stream/transport/s2s/index.js +785 -0
  45. package/dist/cjs/lib/stream/transport/s2s/index.js.map +1 -0
  46. package/dist/cjs/lib/stream/types.d.ts +53 -0
  47. package/dist/cjs/lib/stream/types.d.ts.map +1 -0
  48. package/dist/cjs/lib/stream/types.js +3 -0
  49. package/dist/cjs/lib/stream/types.js.map +1 -0
  50. package/dist/cjs/s2.d.ts +1 -0
  51. package/dist/cjs/s2.d.ts.map +1 -1
  52. package/dist/cjs/s2.js +3 -0
  53. package/dist/cjs/s2.js.map +1 -1
  54. package/dist/cjs/stream.d.ts +22 -116
  55. package/dist/cjs/stream.d.ts.map +1 -1
  56. package/dist/cjs/stream.js +34 -549
  57. package/dist/cjs/stream.js.map +1 -1
  58. package/dist/cjs/utils.d.ts +32 -6
  59. package/dist/cjs/utils.d.ts.map +1 -1
  60. package/dist/cjs/utils.js +129 -34
  61. package/dist/cjs/utils.js.map +1 -1
  62. package/dist/esm/basin.d.ts +9 -2
  63. package/dist/esm/basin.d.ts.map +1 -1
  64. package/dist/esm/basin.js +14 -5
  65. package/dist/esm/basin.js.map +1 -1
  66. package/dist/esm/batch-transform.d.ts +64 -0
  67. package/dist/esm/batch-transform.d.ts.map +1 -0
  68. package/dist/esm/batch-transform.js +140 -0
  69. package/dist/esm/batch-transform.js.map +1 -0
  70. package/dist/esm/generated/client/utils.gen.d.ts +1 -1
  71. package/dist/esm/generated/client/utils.gen.d.ts.map +1 -1
  72. package/dist/esm/generated/client/utils.gen.js +7 -6
  73. package/dist/esm/generated/client/utils.gen.js.map +1 -1
  74. package/dist/esm/generated/core/bodySerializer.gen.d.ts +12 -4
  75. package/dist/esm/generated/core/bodySerializer.gen.d.ts.map +1 -1
  76. package/dist/esm/generated/core/bodySerializer.gen.js.map +1 -1
  77. package/dist/esm/generated/proto/s2.d.ts.map +1 -1
  78. package/dist/esm/generated/proto/s2.js.map +1 -1
  79. package/dist/esm/index.d.ts +4 -2
  80. package/dist/esm/index.d.ts.map +1 -1
  81. package/dist/esm/index.js +2 -1
  82. package/dist/esm/index.js.map +1 -1
  83. package/dist/esm/lib/stream/factory.d.ts +15 -0
  84. package/dist/esm/lib/stream/factory.d.ts.map +1 -0
  85. package/dist/esm/lib/stream/factory.js +33 -0
  86. package/dist/esm/lib/stream/factory.js.map +1 -0
  87. package/dist/esm/lib/stream/runtime.d.ts +13 -0
  88. package/dist/esm/lib/stream/runtime.d.ts.map +1 -0
  89. package/dist/esm/lib/stream/runtime.js +46 -0
  90. package/dist/esm/lib/stream/runtime.js.map +1 -0
  91. package/dist/esm/lib/stream/transport/fetch/index.d.ts +79 -0
  92. package/dist/esm/lib/stream/transport/fetch/index.d.ts.map +1 -0
  93. package/dist/esm/lib/stream/transport/fetch/index.js +376 -0
  94. package/dist/esm/lib/stream/transport/fetch/index.js.map +1 -0
  95. package/dist/esm/lib/stream/transport/fetch/shared.d.ts +7 -0
  96. package/dist/esm/lib/stream/transport/fetch/shared.d.ts.map +1 -0
  97. package/dist/esm/lib/stream/transport/fetch/shared.js +166 -0
  98. package/dist/esm/lib/stream/transport/fetch/shared.js.map +1 -0
  99. package/dist/esm/lib/stream/transport/s2s/framing.d.ts +47 -0
  100. package/dist/esm/lib/stream/transport/s2s/framing.d.ts.map +1 -0
  101. package/dist/esm/lib/stream/transport/s2s/framing.js +118 -0
  102. package/dist/esm/lib/stream/transport/s2s/framing.js.map +1 -0
  103. package/dist/esm/lib/stream/transport/s2s/index.d.ts +23 -0
  104. package/dist/esm/lib/stream/transport/s2s/index.d.ts.map +1 -0
  105. package/dist/esm/lib/stream/transport/s2s/index.js +781 -0
  106. package/dist/esm/lib/stream/transport/s2s/index.js.map +1 -0
  107. package/dist/esm/lib/stream/types.d.ts +53 -0
  108. package/dist/esm/lib/stream/types.d.ts.map +1 -0
  109. package/dist/esm/lib/stream/types.js +2 -0
  110. package/dist/esm/lib/stream/types.js.map +1 -0
  111. package/dist/esm/s2.d.ts +1 -0
  112. package/dist/esm/s2.d.ts.map +1 -1
  113. package/dist/esm/s2.js +3 -0
  114. package/dist/esm/s2.js.map +1 -1
  115. package/dist/esm/stream.d.ts +22 -116
  116. package/dist/esm/stream.d.ts.map +1 -1
  117. package/dist/esm/stream.js +36 -551
  118. package/dist/esm/stream.js.map +1 -1
  119. package/dist/esm/utils.d.ts +32 -6
  120. package/dist/esm/utils.d.ts.map +1 -1
  121. package/dist/esm/utils.js +126 -34
  122. package/dist/esm/utils.js.map +1 -1
  123. package/package.json +2 -4
@@ -0,0 +1,781 @@
1
+ /**
2
+ * S2S HTTP/2 transport for Node.js
3
+ * Uses the s2s binary protocol over HTTP/2 for efficient streaming
4
+ *
5
+ * This file should only be imported in Node.js environments
6
+ */
7
+ import * as http2 from "node:http2";
8
+ import { createClient, createConfig, } from "../../../../generated/client/index.js";
9
+ import { AppendAck as ProtoAppendAck, AppendInput as ProtoAppendInput, ReadBatch as ProtoReadBatch, } from "../../../../generated/proto/s2.js";
10
+ import { S2Error } from "../../../../index.js";
11
+ import { meteredSizeBytes } from "../../../../utils.js";
12
+ import * as Redacted from "../../../redacted.js";
13
+ import { frameMessage, S2SFrameParser } from "./framing.js";
14
+ export class S2STransport {
15
+ client;
16
+ transportConfig;
17
+ connection;
18
+ connectionPromise;
19
+ constructor(config) {
20
+ this.client = createClient(createConfig({
21
+ baseUrl: config.baseUrl,
22
+ auth: () => Redacted.value(config.accessToken),
23
+ }));
24
+ this.transportConfig = config;
25
+ }
26
+ async makeAppendSession(stream, sessionOptions, requestOptions) {
27
+ return S2SAppendSession.create(this.transportConfig.baseUrl, this.transportConfig.accessToken, stream, () => this.getConnection(), sessionOptions, requestOptions);
28
+ }
29
+ async makeReadSession(stream, args, options) {
30
+ return S2SReadSession.create(this.transportConfig.baseUrl, this.transportConfig.accessToken, stream, args, options, () => this.getConnection());
31
+ }
32
+ /**
33
+ * Get or create HTTP/2 connection (one per transport)
34
+ */
35
+ async getConnection() {
36
+ if (this.connection &&
37
+ !this.connection.closed &&
38
+ !this.connection.destroyed) {
39
+ return this.connection;
40
+ }
41
+ // If connection is in progress, wait for it
42
+ if (this.connectionPromise) {
43
+ return this.connectionPromise;
44
+ }
45
+ // Create new connection
46
+ this.connectionPromise = this.createConnection();
47
+ try {
48
+ this.connection = await this.connectionPromise;
49
+ return this.connection;
50
+ }
51
+ finally {
52
+ this.connectionPromise = undefined;
53
+ }
54
+ }
55
+ async createConnection() {
56
+ const url = new URL(this.transportConfig.baseUrl);
57
+ const client = http2.connect(url.origin, {
58
+ // Use HTTPS settings
59
+ ...(url.protocol === "https:"
60
+ ? {
61
+ // TLS options can go here if needed
62
+ }
63
+ : {}),
64
+ });
65
+ return new Promise((resolve, reject) => {
66
+ client.once("connect", () => {
67
+ resolve(client);
68
+ });
69
+ client.once("error", (err) => {
70
+ reject(err);
71
+ });
72
+ // Handle connection close
73
+ client.once("close", () => {
74
+ if (this.connection === client) {
75
+ this.connection = undefined;
76
+ }
77
+ });
78
+ });
79
+ }
80
+ }
81
+ class S2SReadSession extends ReadableStream {
82
+ streamName;
83
+ args;
84
+ authToken;
85
+ url;
86
+ options;
87
+ getConnection;
88
+ http2Stream;
89
+ _lastReadPosition;
90
+ parser = new S2SFrameParser();
91
+ static async create(baseUrl, bearerToken, streamName, args, options, getConnection) {
92
+ const url = new URL(baseUrl);
93
+ return new S2SReadSession(streamName, args, bearerToken, url, options, getConnection);
94
+ }
95
+ constructor(streamName, args, authToken, url, options, getConnection) {
96
+ // Initialize parser and textDecoder before super() call
97
+ const parser = new S2SFrameParser();
98
+ const textDecoder = new TextDecoder();
99
+ let http2Stream;
100
+ let lastReadPosition;
101
+ super({
102
+ start: async (controller) => {
103
+ let controllerClosed = false;
104
+ const safeClose = () => {
105
+ if (!controllerClosed) {
106
+ controllerClosed = true;
107
+ try {
108
+ controller.close();
109
+ }
110
+ catch {
111
+ // Controller may already be closed, ignore
112
+ }
113
+ }
114
+ };
115
+ const safeError = (err) => {
116
+ if (!controllerClosed) {
117
+ controllerClosed = true;
118
+ controller.error(err);
119
+ }
120
+ };
121
+ try {
122
+ const connection = await getConnection();
123
+ // Build query string
124
+ const queryParams = new URLSearchParams();
125
+ const { as, ...readParams } = args ?? {};
126
+ if (readParams.seq_num !== undefined)
127
+ queryParams.set("seq_num", readParams.seq_num.toString());
128
+ if (readParams.timestamp !== undefined)
129
+ queryParams.set("timestamp", readParams.timestamp.toString());
130
+ if (readParams.tail_offset !== undefined)
131
+ queryParams.set("tail_offset", readParams.tail_offset.toString());
132
+ if (readParams.count !== undefined)
133
+ queryParams.set("count", readParams.count.toString());
134
+ if (readParams.bytes !== undefined)
135
+ queryParams.set("bytes", readParams.bytes.toString());
136
+ if (readParams.wait !== undefined)
137
+ queryParams.set("wait", readParams.wait.toString());
138
+ if (typeof readParams.until === "number") {
139
+ queryParams.set("until", readParams.until.toString());
140
+ }
141
+ const queryString = queryParams.toString();
142
+ const path = `${url.pathname}/streams/${encodeURIComponent(streamName)}/records${queryString ? `?${queryString}` : ""}`;
143
+ const stream = connection.request({
144
+ ":method": "GET",
145
+ ":path": path,
146
+ ":scheme": url.protocol.slice(0, -1),
147
+ ":authority": url.host,
148
+ authorization: `Bearer ${Redacted.value(authToken)}`,
149
+ accept: "application/protobuf",
150
+ "content-type": "s2s/proto",
151
+ });
152
+ http2Stream = stream;
153
+ options?.signal?.addEventListener("abort", () => {
154
+ if (!stream.closed) {
155
+ stream.close();
156
+ }
157
+ });
158
+ stream.on("data", (chunk) => {
159
+ // Buffer already extends Uint8Array in Node.js, no need to convert
160
+ parser.push(chunk);
161
+ let frame = parser.parseFrame();
162
+ while (frame) {
163
+ if (frame.terminal) {
164
+ if (frame.statusCode && frame.statusCode >= 400) {
165
+ const errorText = textDecoder.decode(frame.body);
166
+ try {
167
+ const errorJson = JSON.parse(errorText);
168
+ safeError(new S2Error({
169
+ message: errorJson.message ?? "Unknown error",
170
+ code: errorJson.code,
171
+ status: frame.statusCode,
172
+ }));
173
+ }
174
+ catch {
175
+ safeError(new S2Error({
176
+ message: errorText || "Unknown error",
177
+ status: frame.statusCode,
178
+ }));
179
+ }
180
+ }
181
+ else {
182
+ safeClose();
183
+ }
184
+ stream.close();
185
+ }
186
+ else {
187
+ // Parse ReadBatch
188
+ try {
189
+ const protoBatch = ProtoReadBatch.fromBinary(frame.body);
190
+ // Update position from tail
191
+ if (protoBatch.tail) {
192
+ lastReadPosition = convertStreamPosition(protoBatch.tail);
193
+ // Assign to instance property
194
+ this._lastReadPosition = lastReadPosition;
195
+ }
196
+ // Enqueue each record
197
+ for (const record of protoBatch.records) {
198
+ const converted = this.convertRecord(record, as ?? "string", textDecoder);
199
+ controller.enqueue(converted);
200
+ }
201
+ }
202
+ catch (err) {
203
+ safeError(new S2Error({
204
+ message: `Failed to parse ReadBatch: ${err}`,
205
+ }));
206
+ }
207
+ }
208
+ frame = parser.parseFrame();
209
+ }
210
+ });
211
+ stream.on("error", (err) => {
212
+ safeError(err);
213
+ });
214
+ stream.on("close", () => {
215
+ safeClose();
216
+ });
217
+ }
218
+ catch (err) {
219
+ safeError(err);
220
+ }
221
+ },
222
+ cancel: async () => {
223
+ if (http2Stream && !http2Stream.closed) {
224
+ http2Stream.close();
225
+ }
226
+ },
227
+ });
228
+ this.streamName = streamName;
229
+ this.args = args;
230
+ this.authToken = authToken;
231
+ this.url = url;
232
+ this.options = options;
233
+ this.getConnection = getConnection;
234
+ // Assign parser to instance property after super() completes
235
+ this.parser = parser;
236
+ this.http2Stream = http2Stream;
237
+ }
238
+ /**
239
+ * Convert a protobuf SequencedRecord to the requested format
240
+ */
241
+ convertRecord(record, format, textDecoder) {
242
+ if (format === "bytes") {
243
+ return {
244
+ seq_num: Number(record.seqNum),
245
+ timestamp: Number(record.timestamp),
246
+ headers: record.headers?.map((h) => [h.name ?? new Uint8Array(), h.value ?? new Uint8Array()]),
247
+ body: record.body,
248
+ };
249
+ }
250
+ else {
251
+ // Convert to string format
252
+ return {
253
+ seq_num: Number(record.seqNum),
254
+ timestamp: Number(record.timestamp),
255
+ headers: record.headers?.map((h) => [
256
+ h.name ? textDecoder.decode(h.name) : "",
257
+ h.value ? textDecoder.decode(h.value) : "",
258
+ ]),
259
+ body: record.body ? textDecoder.decode(record.body) : undefined,
260
+ };
261
+ }
262
+ }
263
+ async [Symbol.asyncDispose]() {
264
+ await this.cancel("disposed");
265
+ }
266
+ // Polyfill for older browsers / Node.js environments
267
+ [Symbol.asyncIterator]() {
268
+ const fn = ReadableStream.prototype[Symbol.asyncIterator];
269
+ if (typeof fn === "function")
270
+ return fn.call(this);
271
+ const reader = this.getReader();
272
+ return {
273
+ next: async () => {
274
+ const r = await reader.read();
275
+ if (r.done) {
276
+ reader.releaseLock();
277
+ return { done: true, value: undefined };
278
+ }
279
+ return { done: false, value: r.value };
280
+ },
281
+ throw: async (e) => {
282
+ await reader.cancel(e);
283
+ reader.releaseLock();
284
+ return { done: true, value: undefined };
285
+ },
286
+ return: async () => {
287
+ await reader.cancel("done");
288
+ reader.releaseLock();
289
+ return { done: true, value: undefined };
290
+ },
291
+ [Symbol.asyncIterator]() {
292
+ return this;
293
+ },
294
+ };
295
+ }
296
+ lastReadPosition() {
297
+ return this._lastReadPosition;
298
+ }
299
+ }
300
+ /**
301
+ * AcksStream for S2S append session
302
+ */
303
+ class S2SAcksStream extends ReadableStream {
304
+ constructor(setController) {
305
+ super({
306
+ start: (controller) => {
307
+ setController(controller);
308
+ },
309
+ });
310
+ }
311
+ async [Symbol.asyncDispose]() {
312
+ await this.cancel("disposed");
313
+ }
314
+ // Polyfill for older browsers
315
+ [Symbol.asyncIterator]() {
316
+ const fn = ReadableStream.prototype[Symbol.asyncIterator];
317
+ if (typeof fn === "function")
318
+ return fn.call(this);
319
+ const reader = this.getReader();
320
+ return {
321
+ next: async () => {
322
+ const r = await reader.read();
323
+ if (r.done) {
324
+ reader.releaseLock();
325
+ return { done: true, value: undefined };
326
+ }
327
+ return { done: false, value: r.value };
328
+ },
329
+ throw: async (e) => {
330
+ await reader.cancel(e);
331
+ reader.releaseLock();
332
+ return { done: true, value: undefined };
333
+ },
334
+ return: async () => {
335
+ await reader.cancel("done");
336
+ reader.releaseLock();
337
+ return { done: true, value: undefined };
338
+ },
339
+ [Symbol.asyncIterator]() {
340
+ return this;
341
+ },
342
+ };
343
+ }
344
+ }
345
+ /**
346
+ * S2S Append Session for pipelined writes
347
+ * Unlike fetch-based append, writes don't block on acks - only on submission
348
+ */
349
+ class S2SAppendSession {
350
+ baseUrl;
351
+ authToken;
352
+ streamName;
353
+ getConnection;
354
+ options;
355
+ http2Stream;
356
+ _lastAckedPosition;
357
+ parser = new S2SFrameParser();
358
+ acksController;
359
+ _readable;
360
+ _writable;
361
+ closed = false;
362
+ queuedBytes = 0;
363
+ maxQueuedBytes;
364
+ waitingForCapacity = [];
365
+ pendingAcks = [];
366
+ initPromise;
367
+ readable;
368
+ writable;
369
+ static async create(baseUrl, bearerToken, streamName, getConnection, sessionOptions, requestOptions) {
370
+ return new S2SAppendSession(baseUrl, bearerToken, streamName, getConnection, sessionOptions, requestOptions);
371
+ }
372
+ constructor(baseUrl, authToken, streamName, getConnection, sessionOptions, options) {
373
+ this.baseUrl = baseUrl;
374
+ this.authToken = authToken;
375
+ this.streamName = streamName;
376
+ this.getConnection = getConnection;
377
+ this.options = options;
378
+ this.maxQueuedBytes = sessionOptions?.maxQueuedBytes ?? 10 * 1024 * 1024; // 10 MiB default
379
+ // Create the readable stream for acks
380
+ this._readable = new S2SAcksStream((controller) => {
381
+ this.acksController = controller;
382
+ });
383
+ this.readable = this._readable;
384
+ // Create the writable stream
385
+ this._writable = new WritableStream({
386
+ start: async (controller) => {
387
+ this.initPromise = this.initializeStream();
388
+ await this.initPromise;
389
+ },
390
+ write: async (chunk) => {
391
+ if (this.closed) {
392
+ throw new S2Error({ message: "AppendSession is closed" });
393
+ }
394
+ const recordsArray = Array.isArray(chunk.records)
395
+ ? chunk.records
396
+ : [chunk.records];
397
+ // Validate batch size limits
398
+ if (recordsArray.length > 1000) {
399
+ throw new S2Error({
400
+ message: `Batch of ${recordsArray.length} exceeds maximum batch size of 1000 records`,
401
+ });
402
+ }
403
+ // Calculate metered size
404
+ let batchMeteredSize = 0;
405
+ for (const record of recordsArray) {
406
+ batchMeteredSize += meteredSizeBytes(record);
407
+ }
408
+ if (batchMeteredSize > 1024 * 1024) {
409
+ throw new S2Error({
410
+ message: `Batch size ${batchMeteredSize} bytes exceeds maximum of 1 MiB (1048576 bytes)`,
411
+ });
412
+ }
413
+ // Wait for capacity if needed (backpressure)
414
+ while (this.queuedBytes + batchMeteredSize > this.maxQueuedBytes &&
415
+ !this.closed) {
416
+ await new Promise((resolve) => {
417
+ this.waitingForCapacity.push(resolve);
418
+ });
419
+ }
420
+ if (this.closed) {
421
+ throw new S2Error({ message: "AppendSession is closed" });
422
+ }
423
+ // Send the batch immediately (pipelined)
424
+ // Returns when frame is sent, not when ack is received
425
+ await this.sendBatchNonBlocking(recordsArray, chunk, batchMeteredSize);
426
+ },
427
+ close: async () => {
428
+ this.closed = true;
429
+ await this.closeStream();
430
+ },
431
+ abort: async (reason) => {
432
+ this.closed = true;
433
+ this.queuedBytes = 0;
434
+ // Reject all pending acks
435
+ const error = new S2Error({
436
+ message: `AppendSession was aborted: ${reason}`,
437
+ });
438
+ for (const pending of this.pendingAcks) {
439
+ pending.reject(error);
440
+ }
441
+ this.pendingAcks = [];
442
+ // Wake up all waiting for capacity
443
+ for (const resolver of this.waitingForCapacity) {
444
+ resolver();
445
+ }
446
+ this.waitingForCapacity = [];
447
+ if (this.http2Stream && !this.http2Stream.closed) {
448
+ this.http2Stream.close();
449
+ }
450
+ },
451
+ });
452
+ this.writable = this._writable;
453
+ }
454
+ async initializeStream() {
455
+ const url = new URL(this.baseUrl);
456
+ const connection = await this.getConnection();
457
+ const path = `${url.pathname}/streams/${encodeURIComponent(this.streamName)}/records`;
458
+ const stream = connection.request({
459
+ ":method": "POST",
460
+ ":path": path,
461
+ ":scheme": url.protocol.slice(0, -1),
462
+ ":authority": url.host,
463
+ authorization: `Bearer ${Redacted.value(this.authToken)}`,
464
+ "content-type": "s2s/proto",
465
+ accept: "application/protobuf",
466
+ });
467
+ this.http2Stream = stream;
468
+ this.options?.signal?.addEventListener("abort", () => {
469
+ if (!stream.closed) {
470
+ stream.close();
471
+ }
472
+ });
473
+ const textDecoder = new TextDecoder();
474
+ let controllerClosed = false;
475
+ const safeClose = () => {
476
+ if (!controllerClosed && this.acksController) {
477
+ controllerClosed = true;
478
+ try {
479
+ this.acksController.close();
480
+ }
481
+ catch {
482
+ // Controller may already be closed, ignore
483
+ }
484
+ }
485
+ };
486
+ const safeError = (err) => {
487
+ if (!controllerClosed && this.acksController) {
488
+ controllerClosed = true;
489
+ this.acksController.error(err);
490
+ }
491
+ // Reject all pending acks
492
+ for (const pending of this.pendingAcks) {
493
+ pending.reject(err);
494
+ }
495
+ this.pendingAcks = [];
496
+ };
497
+ // Handle incoming data (acks)
498
+ stream.on("data", (chunk) => {
499
+ this.parser.push(chunk);
500
+ let frame = this.parser.parseFrame();
501
+ while (frame) {
502
+ if (frame.terminal) {
503
+ if (frame.statusCode && frame.statusCode >= 400) {
504
+ const errorText = textDecoder.decode(frame.body);
505
+ try {
506
+ const errorJson = JSON.parse(errorText);
507
+ safeError(new S2Error({
508
+ message: errorJson.message ?? "Unknown error",
509
+ code: errorJson.code,
510
+ status: frame.statusCode,
511
+ }));
512
+ }
513
+ catch {
514
+ safeError(new S2Error({
515
+ message: errorText || "Unknown error",
516
+ status: frame.statusCode,
517
+ }));
518
+ }
519
+ }
520
+ else {
521
+ safeClose();
522
+ }
523
+ stream.close();
524
+ }
525
+ else {
526
+ // Parse AppendAck
527
+ try {
528
+ const protoAck = ProtoAppendAck.fromBinary(frame.body);
529
+ const ack = convertAppendAck(protoAck);
530
+ this._lastAckedPosition = ack;
531
+ // Enqueue to readable stream
532
+ if (this.acksController) {
533
+ this.acksController.enqueue(ack);
534
+ }
535
+ // Resolve the pending ack promise
536
+ const pending = this.pendingAcks.shift();
537
+ if (pending) {
538
+ pending.resolve(ack);
539
+ // Release capacity
540
+ this.queuedBytes -= pending.batchSize;
541
+ // Wake up one waiting writer
542
+ if (this.waitingForCapacity.length > 0) {
543
+ const waiter = this.waitingForCapacity.shift();
544
+ waiter();
545
+ }
546
+ }
547
+ }
548
+ catch (err) {
549
+ safeError(new S2Error({
550
+ message: `Failed to parse AppendAck: ${err}`,
551
+ }));
552
+ }
553
+ }
554
+ frame = this.parser.parseFrame();
555
+ }
556
+ });
557
+ stream.on("error", (err) => {
558
+ safeError(err);
559
+ });
560
+ stream.on("close", () => {
561
+ safeClose();
562
+ });
563
+ }
564
+ /**
565
+ * Send a batch non-blocking (returns when frame is sent, not when ack is received)
566
+ */
567
+ sendBatchNonBlocking(records, args, batchMeteredSize) {
568
+ if (!this.http2Stream || this.http2Stream.closed) {
569
+ return Promise.reject(new S2Error({ message: "HTTP/2 stream is not open" }));
570
+ }
571
+ // Convert to protobuf AppendInput
572
+ const textEncoder = new TextEncoder();
573
+ const protoInput = ProtoAppendInput.create({
574
+ records: records.map((record) => {
575
+ // Convert headers to array of tuples if it's a Record
576
+ let headersArray;
577
+ if (record.headers) {
578
+ if (Array.isArray(record.headers)) {
579
+ headersArray = record.headers;
580
+ }
581
+ else {
582
+ // Convert Record to array of tuples
583
+ headersArray = Object.entries(record.headers);
584
+ }
585
+ }
586
+ return {
587
+ headers: headersArray?.map((h) => ({
588
+ name: typeof h[0] === "string" ? textEncoder.encode(h[0]) : h[0],
589
+ value: typeof h[1] === "string" ? textEncoder.encode(h[1]) : h[1],
590
+ })),
591
+ body: typeof record.body === "string"
592
+ ? textEncoder.encode(record.body)
593
+ : record.body,
594
+ };
595
+ }),
596
+ fencingToken: args.fencing_token ?? undefined,
597
+ matchSeqNum: args.match_seq_num ? BigInt(args.match_seq_num) : undefined,
598
+ });
599
+ const bodyBytes = ProtoAppendInput.toBinary(protoInput);
600
+ // Frame the message
601
+ const frame = frameMessage({
602
+ terminal: false,
603
+ body: bodyBytes,
604
+ });
605
+ // This promise resolves when the frame is written (not when ack is received)
606
+ return new Promise((resolve, reject) => {
607
+ // Track pending ack - will be resolved when ack arrives
608
+ const ackPromise = {
609
+ resolve: () => { },
610
+ reject,
611
+ batchSize: batchMeteredSize,
612
+ };
613
+ this.pendingAcks.push(ackPromise);
614
+ this.queuedBytes += batchMeteredSize;
615
+ // Send the frame (pipelined)
616
+ this.http2Stream.write(frame, (err) => {
617
+ if (err) {
618
+ // Remove from pending acks on write error
619
+ const idx = this.pendingAcks.indexOf(ackPromise);
620
+ if (idx !== -1) {
621
+ this.pendingAcks.splice(idx, 1);
622
+ this.queuedBytes -= batchMeteredSize;
623
+ }
624
+ reject(err);
625
+ }
626
+ else {
627
+ // Frame written successfully - resolve immediately (pipelined)
628
+ resolve();
629
+ }
630
+ });
631
+ });
632
+ }
633
+ /**
634
+ * Send a batch and wait for ack (used by submit method)
635
+ */
636
+ sendBatch(records, args, batchMeteredSize) {
637
+ if (!this.http2Stream || this.http2Stream.closed) {
638
+ return Promise.reject(new S2Error({ message: "HTTP/2 stream is not open" }));
639
+ }
640
+ // Convert to protobuf AppendInput
641
+ const textEncoder = new TextEncoder();
642
+ const protoInput = ProtoAppendInput.create({
643
+ records: records.map((record) => {
644
+ // Convert headers to array of tuples if it's a Record
645
+ let headersArray;
646
+ if (record.headers) {
647
+ if (Array.isArray(record.headers)) {
648
+ headersArray = record.headers;
649
+ }
650
+ else {
651
+ // Convert Record to array of tuples
652
+ headersArray = Object.entries(record.headers);
653
+ }
654
+ }
655
+ return {
656
+ headers: headersArray?.map((h) => ({
657
+ name: typeof h[0] === "string" ? textEncoder.encode(h[0]) : h[0],
658
+ value: typeof h[1] === "string" ? textEncoder.encode(h[1]) : h[1],
659
+ })),
660
+ body: typeof record.body === "string"
661
+ ? textEncoder.encode(record.body)
662
+ : record.body,
663
+ };
664
+ }),
665
+ fencingToken: args.fencing_token ?? undefined,
666
+ matchSeqNum: args.match_seq_num ? BigInt(args.match_seq_num) : undefined,
667
+ });
668
+ const bodyBytes = ProtoAppendInput.toBinary(protoInput);
669
+ // Frame the message
670
+ const frame = frameMessage({
671
+ terminal: false,
672
+ body: bodyBytes,
673
+ });
674
+ // Track pending ack - this promise resolves when the ack is received
675
+ return new Promise((resolve, reject) => {
676
+ this.pendingAcks.push({
677
+ resolve,
678
+ reject,
679
+ batchSize: batchMeteredSize,
680
+ });
681
+ this.queuedBytes += batchMeteredSize;
682
+ // Send the frame (non-blocking - pipelined)
683
+ this.http2Stream.write(frame, (err) => {
684
+ if (err) {
685
+ // Remove from pending acks on write error
686
+ const idx = this.pendingAcks.findIndex((p) => p.reject === reject);
687
+ if (idx !== -1) {
688
+ this.pendingAcks.splice(idx, 1);
689
+ this.queuedBytes -= batchMeteredSize;
690
+ }
691
+ reject(err);
692
+ }
693
+ // Write completed, but promise resolves when ack is received
694
+ });
695
+ });
696
+ }
697
+ async closeStream() {
698
+ // Wait for all pending acks
699
+ while (this.pendingAcks.length > 0) {
700
+ await new Promise((resolve) => setTimeout(resolve, 10));
701
+ }
702
+ // Close the HTTP/2 stream (client doesn't send terminal frame for clean close)
703
+ if (this.http2Stream && !this.http2Stream.closed) {
704
+ this.http2Stream.end();
705
+ }
706
+ }
707
+ async [Symbol.asyncDispose]() {
708
+ await this.close();
709
+ }
710
+ /**
711
+ * Get a stream of acknowledgements for appends.
712
+ */
713
+ acks() {
714
+ return this._readable;
715
+ }
716
+ /**
717
+ * Close the append session.
718
+ * Waits for all pending appends to complete before resolving.
719
+ */
720
+ async close() {
721
+ await this.writable.close();
722
+ }
723
+ /**
724
+ * Submit an append request to the session.
725
+ * Returns a promise that resolves with the ack when received.
726
+ */
727
+ async submit(records, args) {
728
+ if (this.closed) {
729
+ return Promise.reject(new S2Error({ message: "AppendSession is closed" }));
730
+ }
731
+ // Wait for initialization
732
+ if (this.initPromise) {
733
+ await this.initPromise;
734
+ }
735
+ const recordsArray = Array.isArray(records) ? records : [records];
736
+ // Validate batch size limits
737
+ if (recordsArray.length > 1000) {
738
+ return Promise.reject(new S2Error({
739
+ message: `Batch of ${recordsArray.length} exceeds maximum batch size of 1000 records`,
740
+ }));
741
+ }
742
+ // Calculate metered size
743
+ let batchMeteredSize = 0;
744
+ for (const record of recordsArray) {
745
+ batchMeteredSize += meteredSizeBytes(record);
746
+ }
747
+ if (batchMeteredSize > 1024 * 1024) {
748
+ return Promise.reject(new S2Error({
749
+ message: `Batch size ${batchMeteredSize} bytes exceeds maximum of 1 MiB (1048576 bytes)`,
750
+ }));
751
+ }
752
+ return this.sendBatch(recordsArray, {
753
+ records: recordsArray,
754
+ fencing_token: args?.fencing_token,
755
+ match_seq_num: args?.match_seq_num,
756
+ }, batchMeteredSize);
757
+ }
758
+ lastAckedPosition() {
759
+ return this._lastAckedPosition;
760
+ }
761
+ }
762
+ /**
763
+ * Convert protobuf StreamPosition to OpenAPI StreamPosition
764
+ */
765
+ function convertStreamPosition(proto) {
766
+ return {
767
+ seq_num: Number(proto.seqNum),
768
+ timestamp: Number(proto.timestamp),
769
+ };
770
+ }
771
+ function convertAppendAck(proto) {
772
+ if (!proto.start || !proto.end || !proto.tail) {
773
+ throw new Error("Invariant violation: AppendAck is missing required fields");
774
+ }
775
+ return {
776
+ start: convertStreamPosition(proto.start),
777
+ end: convertStreamPosition(proto.end),
778
+ tail: convertStreamPosition(proto.tail),
779
+ };
780
+ }
781
+ //# sourceMappingURL=index.js.map