@s2-dev/streamstore 0.16.12 → 0.17.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (109) hide show
  1. package/dist/cjs/basin.d.ts +6 -2
  2. package/dist/cjs/basin.d.ts.map +1 -1
  3. package/dist/cjs/basin.js +11 -5
  4. package/dist/cjs/basin.js.map +1 -1
  5. package/dist/cjs/batch-transform.d.ts +64 -0
  6. package/dist/cjs/batch-transform.d.ts.map +1 -0
  7. package/dist/cjs/batch-transform.js +144 -0
  8. package/dist/cjs/batch-transform.js.map +1 -0
  9. package/dist/cjs/generated/proto/s2.d.ts.map +1 -0
  10. package/dist/cjs/generated/proto/s2.js.map +1 -0
  11. package/dist/cjs/index.d.ts +4 -2
  12. package/dist/cjs/index.d.ts.map +1 -1
  13. package/dist/cjs/index.js +5 -1
  14. package/dist/cjs/index.js.map +1 -1
  15. package/dist/cjs/lib/stream/factory.d.ts +15 -0
  16. package/dist/cjs/lib/stream/factory.d.ts.map +1 -0
  17. package/dist/cjs/lib/stream/factory.js +36 -0
  18. package/dist/cjs/lib/stream/factory.js.map +1 -0
  19. package/dist/cjs/lib/stream/runtime.d.ts +13 -0
  20. package/dist/cjs/lib/stream/runtime.d.ts.map +1 -0
  21. package/dist/cjs/lib/stream/runtime.js +50 -0
  22. package/dist/cjs/lib/stream/runtime.js.map +1 -0
  23. package/dist/cjs/lib/stream/transport/fetch/index.d.ts +79 -0
  24. package/dist/cjs/lib/stream/transport/fetch/index.d.ts.map +1 -0
  25. package/dist/cjs/lib/stream/transport/fetch/index.js +382 -0
  26. package/dist/cjs/lib/stream/transport/fetch/index.js.map +1 -0
  27. package/dist/cjs/lib/stream/transport/fetch/shared.d.ts +7 -0
  28. package/dist/cjs/lib/stream/transport/fetch/shared.d.ts.map +1 -0
  29. package/dist/cjs/lib/stream/transport/fetch/shared.js +170 -0
  30. package/dist/cjs/lib/stream/transport/fetch/shared.js.map +1 -0
  31. package/dist/cjs/lib/stream/transport/s2s/framing.d.ts +47 -0
  32. package/dist/cjs/lib/stream/transport/s2s/framing.d.ts.map +1 -0
  33. package/dist/cjs/lib/stream/transport/s2s/framing.js +123 -0
  34. package/dist/cjs/lib/stream/transport/s2s/framing.js.map +1 -0
  35. package/dist/cjs/lib/stream/transport/s2s/index.d.ts +23 -0
  36. package/dist/cjs/lib/stream/transport/s2s/index.d.ts.map +1 -0
  37. package/dist/cjs/lib/stream/transport/s2s/index.js +789 -0
  38. package/dist/cjs/lib/stream/transport/s2s/index.js.map +1 -0
  39. package/dist/cjs/lib/stream/types.d.ts +53 -0
  40. package/dist/cjs/lib/stream/types.d.ts.map +1 -0
  41. package/dist/cjs/lib/stream/types.js +3 -0
  42. package/dist/cjs/lib/stream/types.js.map +1 -0
  43. package/dist/cjs/stream.d.ts +22 -116
  44. package/dist/cjs/stream.d.ts.map +1 -1
  45. package/dist/cjs/stream.js +35 -551
  46. package/dist/cjs/stream.js.map +1 -1
  47. package/dist/cjs/utils.d.ts +32 -6
  48. package/dist/cjs/utils.d.ts.map +1 -1
  49. package/dist/cjs/utils.js +129 -34
  50. package/dist/cjs/utils.js.map +1 -1
  51. package/dist/esm/basin.d.ts +6 -2
  52. package/dist/esm/basin.d.ts.map +1 -1
  53. package/dist/esm/basin.js +11 -5
  54. package/dist/esm/basin.js.map +1 -1
  55. package/dist/esm/batch-transform.d.ts +64 -0
  56. package/dist/esm/batch-transform.d.ts.map +1 -0
  57. package/dist/esm/batch-transform.js +140 -0
  58. package/dist/esm/batch-transform.js.map +1 -0
  59. package/dist/esm/generated/proto/s2.d.ts.map +1 -0
  60. package/dist/esm/generated/proto/s2.js.map +1 -0
  61. package/dist/esm/index.d.ts +4 -2
  62. package/dist/esm/index.d.ts.map +1 -1
  63. package/dist/esm/index.js +2 -1
  64. package/dist/esm/index.js.map +1 -1
  65. package/dist/esm/lib/stream/factory.d.ts +15 -0
  66. package/dist/esm/lib/stream/factory.d.ts.map +1 -0
  67. package/dist/esm/lib/stream/factory.js +33 -0
  68. package/dist/esm/lib/stream/factory.js.map +1 -0
  69. package/dist/esm/lib/stream/runtime.d.ts +13 -0
  70. package/dist/esm/lib/stream/runtime.d.ts.map +1 -0
  71. package/dist/esm/lib/stream/runtime.js +46 -0
  72. package/dist/esm/lib/stream/runtime.js.map +1 -0
  73. package/dist/esm/lib/stream/transport/fetch/index.d.ts +79 -0
  74. package/dist/esm/lib/stream/transport/fetch/index.d.ts.map +1 -0
  75. package/dist/esm/lib/stream/transport/fetch/index.js +376 -0
  76. package/dist/esm/lib/stream/transport/fetch/index.js.map +1 -0
  77. package/dist/esm/lib/stream/transport/fetch/shared.d.ts +7 -0
  78. package/dist/esm/lib/stream/transport/fetch/shared.d.ts.map +1 -0
  79. package/dist/esm/lib/stream/transport/fetch/shared.js +166 -0
  80. package/dist/esm/lib/stream/transport/fetch/shared.js.map +1 -0
  81. package/dist/esm/lib/stream/transport/s2s/framing.d.ts +47 -0
  82. package/dist/esm/lib/stream/transport/s2s/framing.d.ts.map +1 -0
  83. package/dist/esm/lib/stream/transport/s2s/framing.js +118 -0
  84. package/dist/esm/lib/stream/transport/s2s/framing.js.map +1 -0
  85. package/dist/esm/lib/stream/transport/s2s/index.d.ts +23 -0
  86. package/dist/esm/lib/stream/transport/s2s/index.d.ts.map +1 -0
  87. package/dist/esm/lib/stream/transport/s2s/index.js +785 -0
  88. package/dist/esm/lib/stream/transport/s2s/index.js.map +1 -0
  89. package/dist/esm/lib/stream/types.d.ts +53 -0
  90. package/dist/esm/lib/stream/types.d.ts.map +1 -0
  91. package/dist/esm/lib/stream/types.js +2 -0
  92. package/dist/esm/lib/stream/types.js.map +1 -0
  93. package/dist/esm/stream.d.ts +22 -116
  94. package/dist/esm/stream.d.ts.map +1 -1
  95. package/dist/esm/stream.js +36 -551
  96. package/dist/esm/stream.js.map +1 -1
  97. package/dist/esm/utils.d.ts +32 -6
  98. package/dist/esm/utils.d.ts.map +1 -1
  99. package/dist/esm/utils.js +126 -34
  100. package/dist/esm/utils.js.map +1 -1
  101. package/package.json +5 -2
  102. package/dist/cjs/generated/proto/s2/v1/s2.d.ts.map +0 -1
  103. package/dist/cjs/generated/proto/s2/v1/s2.js.map +0 -1
  104. package/dist/esm/generated/proto/s2/v1/s2.d.ts.map +0 -1
  105. package/dist/esm/generated/proto/s2/v1/s2.js.map +0 -1
  106. /package/dist/cjs/generated/proto/{s2/v1/s2.d.ts → s2.d.ts} +0 -0
  107. /package/dist/cjs/generated/proto/{s2/v1/s2.js → s2.js} +0 -0
  108. /package/dist/esm/generated/proto/{s2/v1/s2.d.ts → s2.d.ts} +0 -0
  109. /package/dist/esm/generated/proto/{s2/v1/s2.js → s2.js} +0 -0
@@ -0,0 +1,789 @@
1
+ "use strict";
2
+ /**
3
+ * S2S HTTP/2 transport for Node.js
4
+ * Uses the s2s binary protocol over HTTP/2 for efficient streaming
5
+ *
6
+ * This file should only be imported in Node.js environments
7
+ */
8
+ Object.defineProperty(exports, "__esModule", { value: true });
9
+ exports.S2STransport = void 0;
10
+ const http2 = require("node:http2");
11
+ const index_js_1 = require("../../../../generated/client/index.js");
12
+ const s2_js_1 = require("../../../../generated/proto/s2.js");
13
+ const index_js_2 = require("../../../../index.js");
14
+ const utils_js_1 = require("../../../../utils.js");
15
+ const Redacted = require("../../../redacted.js");
16
+ const framing_js_1 = require("./framing.js");
17
+ class S2STransport {
18
+ client;
19
+ transportConfig;
20
+ connection;
21
+ connectionPromise;
22
+ constructor(config) {
23
+ this.client = (0, index_js_1.createClient)((0, index_js_1.createConfig)({
24
+ baseUrl: config.baseUrl,
25
+ auth: () => Redacted.value(config.accessToken),
26
+ }));
27
+ this.transportConfig = config;
28
+ }
29
+ async makeAppendSession(stream, sessionOptions, requestOptions) {
30
+ return S2SAppendSession.create(this.transportConfig.baseUrl, this.transportConfig.accessToken, stream, () => this.getConnection(), sessionOptions, requestOptions);
31
+ }
32
+ async makeReadSession(stream, args, options) {
33
+ return S2SReadSession.create(this.transportConfig.baseUrl, this.transportConfig.accessToken, stream, args, options, () => this.getConnection());
34
+ }
35
+ /**
36
+ * Get or create HTTP/2 connection (one per transport)
37
+ */
38
+ async getConnection() {
39
+ if (this.connection &&
40
+ !this.connection.closed &&
41
+ !this.connection.destroyed) {
42
+ return this.connection;
43
+ }
44
+ // If connection is in progress, wait for it
45
+ if (this.connectionPromise) {
46
+ return this.connectionPromise;
47
+ }
48
+ // Create new connection
49
+ this.connectionPromise = this.createConnection();
50
+ try {
51
+ this.connection = await this.connectionPromise;
52
+ return this.connection;
53
+ }
54
+ finally {
55
+ this.connectionPromise = undefined;
56
+ }
57
+ }
58
+ async createConnection() {
59
+ const url = new URL(this.transportConfig.baseUrl);
60
+ const client = http2.connect(url.origin, {
61
+ // Use HTTPS settings
62
+ ...(url.protocol === "https:"
63
+ ? {
64
+ // TLS options can go here if needed
65
+ }
66
+ : {}),
67
+ settings: {
68
+ initialWindowSize: 10 * 1024 * 1024, // 10 MB
69
+ },
70
+ });
71
+ return new Promise((resolve, reject) => {
72
+ client.once("connect", () => {
73
+ client.setLocalWindowSize(10 * 1024 * 1024);
74
+ resolve(client);
75
+ });
76
+ client.once("error", (err) => {
77
+ reject(err);
78
+ });
79
+ // Handle connection close
80
+ client.once("close", () => {
81
+ if (this.connection === client) {
82
+ this.connection = undefined;
83
+ }
84
+ });
85
+ });
86
+ }
87
+ }
88
+ exports.S2STransport = S2STransport;
89
+ class S2SReadSession extends ReadableStream {
90
+ streamName;
91
+ args;
92
+ authToken;
93
+ url;
94
+ options;
95
+ getConnection;
96
+ http2Stream;
97
+ _lastReadPosition;
98
+ parser = new framing_js_1.S2SFrameParser();
99
+ static async create(baseUrl, bearerToken, streamName, args, options, getConnection) {
100
+ const url = new URL(baseUrl);
101
+ return new S2SReadSession(streamName, args, bearerToken, url, options, getConnection);
102
+ }
103
+ constructor(streamName, args, authToken, url, options, getConnection) {
104
+ // Initialize parser and textDecoder before super() call
105
+ const parser = new framing_js_1.S2SFrameParser();
106
+ const textDecoder = new TextDecoder();
107
+ let http2Stream;
108
+ let lastReadPosition;
109
+ super({
110
+ start: async (controller) => {
111
+ let controllerClosed = false;
112
+ const safeClose = () => {
113
+ if (!controllerClosed) {
114
+ controllerClosed = true;
115
+ try {
116
+ controller.close();
117
+ }
118
+ catch {
119
+ // Controller may already be closed, ignore
120
+ }
121
+ }
122
+ };
123
+ const safeError = (err) => {
124
+ if (!controllerClosed) {
125
+ controllerClosed = true;
126
+ controller.error(err);
127
+ }
128
+ };
129
+ try {
130
+ const connection = await getConnection();
131
+ // Build query string
132
+ const queryParams = new URLSearchParams();
133
+ const { as, ...readParams } = args ?? {};
134
+ if (readParams.seq_num !== undefined)
135
+ queryParams.set("seq_num", readParams.seq_num.toString());
136
+ if (readParams.timestamp !== undefined)
137
+ queryParams.set("timestamp", readParams.timestamp.toString());
138
+ if (readParams.tail_offset !== undefined)
139
+ queryParams.set("tail_offset", readParams.tail_offset.toString());
140
+ if (readParams.count !== undefined)
141
+ queryParams.set("count", readParams.count.toString());
142
+ if (readParams.bytes !== undefined)
143
+ queryParams.set("bytes", readParams.bytes.toString());
144
+ if (readParams.wait !== undefined)
145
+ queryParams.set("wait", readParams.wait.toString());
146
+ if (typeof readParams.until === "number") {
147
+ queryParams.set("until", readParams.until.toString());
148
+ }
149
+ const queryString = queryParams.toString();
150
+ const path = `${url.pathname}/streams/${encodeURIComponent(streamName)}/records${queryString ? `?${queryString}` : ""}`;
151
+ const stream = connection.request({
152
+ ":method": "GET",
153
+ ":path": path,
154
+ ":scheme": url.protocol.slice(0, -1),
155
+ ":authority": url.host,
156
+ authorization: `Bearer ${Redacted.value(authToken)}`,
157
+ accept: "application/protobuf",
158
+ "content-type": "s2s/proto",
159
+ });
160
+ http2Stream = stream;
161
+ options?.signal?.addEventListener("abort", () => {
162
+ if (!stream.closed) {
163
+ stream.close();
164
+ }
165
+ });
166
+ stream.on("data", (chunk) => {
167
+ // Buffer already extends Uint8Array in Node.js, no need to convert
168
+ parser.push(chunk);
169
+ let frame = parser.parseFrame();
170
+ while (frame) {
171
+ if (frame.terminal) {
172
+ if (frame.statusCode && frame.statusCode >= 400) {
173
+ const errorText = textDecoder.decode(frame.body);
174
+ try {
175
+ const errorJson = JSON.parse(errorText);
176
+ safeError(new index_js_2.S2Error({
177
+ message: errorJson.message ?? "Unknown error",
178
+ code: errorJson.code,
179
+ status: frame.statusCode,
180
+ }));
181
+ }
182
+ catch {
183
+ safeError(new index_js_2.S2Error({
184
+ message: errorText || "Unknown error",
185
+ status: frame.statusCode,
186
+ }));
187
+ }
188
+ }
189
+ else {
190
+ safeClose();
191
+ }
192
+ stream.close();
193
+ }
194
+ else {
195
+ // Parse ReadBatch
196
+ try {
197
+ const protoBatch = s2_js_1.ReadBatch.fromBinary(frame.body);
198
+ // Update position from tail
199
+ if (protoBatch.tail) {
200
+ lastReadPosition = convertStreamPosition(protoBatch.tail);
201
+ // Assign to instance property
202
+ this._lastReadPosition = lastReadPosition;
203
+ }
204
+ // Enqueue each record
205
+ for (const record of protoBatch.records) {
206
+ const converted = this.convertRecord(record, as ?? "string", textDecoder);
207
+ controller.enqueue(converted);
208
+ }
209
+ }
210
+ catch (err) {
211
+ safeError(new index_js_2.S2Error({
212
+ message: `Failed to parse ReadBatch: ${err}`,
213
+ }));
214
+ }
215
+ }
216
+ frame = parser.parseFrame();
217
+ }
218
+ });
219
+ stream.on("error", (err) => {
220
+ safeError(err);
221
+ });
222
+ stream.on("close", () => {
223
+ safeClose();
224
+ });
225
+ }
226
+ catch (err) {
227
+ safeError(err);
228
+ }
229
+ },
230
+ cancel: async () => {
231
+ if (http2Stream && !http2Stream.closed) {
232
+ http2Stream.close();
233
+ }
234
+ },
235
+ });
236
+ this.streamName = streamName;
237
+ this.args = args;
238
+ this.authToken = authToken;
239
+ this.url = url;
240
+ this.options = options;
241
+ this.getConnection = getConnection;
242
+ // Assign parser to instance property after super() completes
243
+ this.parser = parser;
244
+ this.http2Stream = http2Stream;
245
+ }
246
+ /**
247
+ * Convert a protobuf SequencedRecord to the requested format
248
+ */
249
+ convertRecord(record, format, textDecoder) {
250
+ if (format === "bytes") {
251
+ return {
252
+ seq_num: Number(record.seqNum),
253
+ timestamp: Number(record.timestamp),
254
+ headers: record.headers?.map((h) => [h.name ?? new Uint8Array(), h.value ?? new Uint8Array()]),
255
+ body: record.body,
256
+ };
257
+ }
258
+ else {
259
+ // Convert to string format
260
+ return {
261
+ seq_num: Number(record.seqNum),
262
+ timestamp: Number(record.timestamp),
263
+ headers: record.headers?.map((h) => [
264
+ h.name ? textDecoder.decode(h.name) : "",
265
+ h.value ? textDecoder.decode(h.value) : "",
266
+ ]),
267
+ body: record.body ? textDecoder.decode(record.body) : undefined,
268
+ };
269
+ }
270
+ }
271
+ async [Symbol.asyncDispose]() {
272
+ await this.cancel("disposed");
273
+ }
274
+ // Polyfill for older browsers / Node.js environments
275
+ [Symbol.asyncIterator]() {
276
+ const fn = ReadableStream.prototype[Symbol.asyncIterator];
277
+ if (typeof fn === "function")
278
+ return fn.call(this);
279
+ const reader = this.getReader();
280
+ return {
281
+ next: async () => {
282
+ const r = await reader.read();
283
+ if (r.done) {
284
+ reader.releaseLock();
285
+ return { done: true, value: undefined };
286
+ }
287
+ return { done: false, value: r.value };
288
+ },
289
+ throw: async (e) => {
290
+ await reader.cancel(e);
291
+ reader.releaseLock();
292
+ return { done: true, value: undefined };
293
+ },
294
+ return: async () => {
295
+ await reader.cancel("done");
296
+ reader.releaseLock();
297
+ return { done: true, value: undefined };
298
+ },
299
+ [Symbol.asyncIterator]() {
300
+ return this;
301
+ },
302
+ };
303
+ }
304
+ lastReadPosition() {
305
+ return this._lastReadPosition;
306
+ }
307
+ }
308
+ /**
309
+ * AcksStream for S2S append session
310
+ */
311
+ class S2SAcksStream extends ReadableStream {
312
+ constructor(setController) {
313
+ super({
314
+ start: (controller) => {
315
+ setController(controller);
316
+ },
317
+ });
318
+ }
319
+ async [Symbol.asyncDispose]() {
320
+ await this.cancel("disposed");
321
+ }
322
+ // Polyfill for older browsers
323
+ [Symbol.asyncIterator]() {
324
+ const fn = ReadableStream.prototype[Symbol.asyncIterator];
325
+ if (typeof fn === "function")
326
+ return fn.call(this);
327
+ const reader = this.getReader();
328
+ return {
329
+ next: async () => {
330
+ const r = await reader.read();
331
+ if (r.done) {
332
+ reader.releaseLock();
333
+ return { done: true, value: undefined };
334
+ }
335
+ return { done: false, value: r.value };
336
+ },
337
+ throw: async (e) => {
338
+ await reader.cancel(e);
339
+ reader.releaseLock();
340
+ return { done: true, value: undefined };
341
+ },
342
+ return: async () => {
343
+ await reader.cancel("done");
344
+ reader.releaseLock();
345
+ return { done: true, value: undefined };
346
+ },
347
+ [Symbol.asyncIterator]() {
348
+ return this;
349
+ },
350
+ };
351
+ }
352
+ }
353
+ /**
354
+ * S2S Append Session for pipelined writes
355
+ * Unlike fetch-based append, writes don't block on acks - only on submission
356
+ */
357
+ class S2SAppendSession {
358
+ baseUrl;
359
+ authToken;
360
+ streamName;
361
+ getConnection;
362
+ options;
363
+ http2Stream;
364
+ _lastAckedPosition;
365
+ parser = new framing_js_1.S2SFrameParser();
366
+ acksController;
367
+ _readable;
368
+ _writable;
369
+ closed = false;
370
+ queuedBytes = 0;
371
+ maxQueuedBytes;
372
+ waitingForCapacity = [];
373
+ pendingAcks = [];
374
+ initPromise;
375
+ readable;
376
+ writable;
377
+ static async create(baseUrl, bearerToken, streamName, getConnection, sessionOptions, requestOptions) {
378
+ return new S2SAppendSession(baseUrl, bearerToken, streamName, getConnection, sessionOptions, requestOptions);
379
+ }
380
+ constructor(baseUrl, authToken, streamName, getConnection, sessionOptions, options) {
381
+ this.baseUrl = baseUrl;
382
+ this.authToken = authToken;
383
+ this.streamName = streamName;
384
+ this.getConnection = getConnection;
385
+ this.options = options;
386
+ this.maxQueuedBytes = sessionOptions?.maxQueuedBytes ?? 10 * 1024 * 1024; // 10 MiB default
387
+ // Create the readable stream for acks
388
+ this._readable = new S2SAcksStream((controller) => {
389
+ this.acksController = controller;
390
+ });
391
+ this.readable = this._readable;
392
+ // Create the writable stream
393
+ this._writable = new WritableStream({
394
+ start: async (controller) => {
395
+ this.initPromise = this.initializeStream();
396
+ await this.initPromise;
397
+ },
398
+ write: async (chunk) => {
399
+ if (this.closed) {
400
+ throw new index_js_2.S2Error({ message: "AppendSession is closed" });
401
+ }
402
+ const recordsArray = Array.isArray(chunk.records)
403
+ ? chunk.records
404
+ : [chunk.records];
405
+ // Validate batch size limits
406
+ if (recordsArray.length > 1000) {
407
+ throw new index_js_2.S2Error({
408
+ message: `Batch of ${recordsArray.length} exceeds maximum batch size of 1000 records`,
409
+ });
410
+ }
411
+ // Calculate metered size
412
+ let batchMeteredSize = 0;
413
+ for (const record of recordsArray) {
414
+ batchMeteredSize += (0, utils_js_1.meteredSizeBytes)(record);
415
+ }
416
+ if (batchMeteredSize > 1024 * 1024) {
417
+ throw new index_js_2.S2Error({
418
+ message: `Batch size ${batchMeteredSize} bytes exceeds maximum of 1 MiB (1048576 bytes)`,
419
+ });
420
+ }
421
+ // Wait for capacity if needed (backpressure)
422
+ while (this.queuedBytes + batchMeteredSize > this.maxQueuedBytes &&
423
+ !this.closed) {
424
+ await new Promise((resolve) => {
425
+ this.waitingForCapacity.push(resolve);
426
+ });
427
+ }
428
+ if (this.closed) {
429
+ throw new index_js_2.S2Error({ message: "AppendSession is closed" });
430
+ }
431
+ // Send the batch immediately (pipelined)
432
+ // Returns when frame is sent, not when ack is received
433
+ await this.sendBatchNonBlocking(recordsArray, chunk, batchMeteredSize);
434
+ },
435
+ close: async () => {
436
+ this.closed = true;
437
+ await this.closeStream();
438
+ },
439
+ abort: async (reason) => {
440
+ this.closed = true;
441
+ this.queuedBytes = 0;
442
+ // Reject all pending acks
443
+ const error = new index_js_2.S2Error({
444
+ message: `AppendSession was aborted: ${reason}`,
445
+ });
446
+ for (const pending of this.pendingAcks) {
447
+ pending.reject(error);
448
+ }
449
+ this.pendingAcks = [];
450
+ // Wake up all waiting for capacity
451
+ for (const resolver of this.waitingForCapacity) {
452
+ resolver();
453
+ }
454
+ this.waitingForCapacity = [];
455
+ if (this.http2Stream && !this.http2Stream.closed) {
456
+ this.http2Stream.close();
457
+ }
458
+ },
459
+ });
460
+ this.writable = this._writable;
461
+ }
462
+ async initializeStream() {
463
+ const url = new URL(this.baseUrl);
464
+ const connection = await this.getConnection();
465
+ const path = `${url.pathname}/streams/${encodeURIComponent(this.streamName)}/records`;
466
+ const stream = connection.request({
467
+ ":method": "POST",
468
+ ":path": path,
469
+ ":scheme": url.protocol.slice(0, -1),
470
+ ":authority": url.host,
471
+ authorization: `Bearer ${Redacted.value(this.authToken)}`,
472
+ "content-type": "s2s/proto",
473
+ accept: "application/protobuf",
474
+ });
475
+ this.http2Stream = stream;
476
+ this.options?.signal?.addEventListener("abort", () => {
477
+ if (!stream.closed) {
478
+ stream.close();
479
+ }
480
+ });
481
+ const textDecoder = new TextDecoder();
482
+ let controllerClosed = false;
483
+ const safeClose = () => {
484
+ if (!controllerClosed && this.acksController) {
485
+ controllerClosed = true;
486
+ try {
487
+ this.acksController.close();
488
+ }
489
+ catch {
490
+ // Controller may already be closed, ignore
491
+ }
492
+ }
493
+ };
494
+ const safeError = (err) => {
495
+ if (!controllerClosed && this.acksController) {
496
+ controllerClosed = true;
497
+ this.acksController.error(err);
498
+ }
499
+ // Reject all pending acks
500
+ for (const pending of this.pendingAcks) {
501
+ pending.reject(err);
502
+ }
503
+ this.pendingAcks = [];
504
+ };
505
+ // Handle incoming data (acks)
506
+ stream.on("data", (chunk) => {
507
+ this.parser.push(chunk);
508
+ let frame = this.parser.parseFrame();
509
+ while (frame) {
510
+ if (frame.terminal) {
511
+ if (frame.statusCode && frame.statusCode >= 400) {
512
+ const errorText = textDecoder.decode(frame.body);
513
+ try {
514
+ const errorJson = JSON.parse(errorText);
515
+ safeError(new index_js_2.S2Error({
516
+ message: errorJson.message ?? "Unknown error",
517
+ code: errorJson.code,
518
+ status: frame.statusCode,
519
+ }));
520
+ }
521
+ catch {
522
+ safeError(new index_js_2.S2Error({
523
+ message: errorText || "Unknown error",
524
+ status: frame.statusCode,
525
+ }));
526
+ }
527
+ }
528
+ else {
529
+ safeClose();
530
+ }
531
+ stream.close();
532
+ }
533
+ else {
534
+ // Parse AppendAck
535
+ try {
536
+ const protoAck = s2_js_1.AppendAck.fromBinary(frame.body);
537
+ const ack = convertAppendAck(protoAck);
538
+ this._lastAckedPosition = ack;
539
+ // Enqueue to readable stream
540
+ if (this.acksController) {
541
+ this.acksController.enqueue(ack);
542
+ }
543
+ // Resolve the pending ack promise
544
+ const pending = this.pendingAcks.shift();
545
+ if (pending) {
546
+ pending.resolve(ack);
547
+ // Release capacity
548
+ this.queuedBytes -= pending.batchSize;
549
+ // Wake up one waiting writer
550
+ if (this.waitingForCapacity.length > 0) {
551
+ const waiter = this.waitingForCapacity.shift();
552
+ waiter();
553
+ }
554
+ }
555
+ }
556
+ catch (err) {
557
+ safeError(new index_js_2.S2Error({
558
+ message: `Failed to parse AppendAck: ${err}`,
559
+ }));
560
+ }
561
+ }
562
+ frame = this.parser.parseFrame();
563
+ }
564
+ });
565
+ stream.on("error", (err) => {
566
+ safeError(err);
567
+ });
568
+ stream.on("close", () => {
569
+ safeClose();
570
+ });
571
+ }
572
+ /**
573
+ * Send a batch non-blocking (returns when frame is sent, not when ack is received)
574
+ */
575
+ sendBatchNonBlocking(records, args, batchMeteredSize) {
576
+ if (!this.http2Stream || this.http2Stream.closed) {
577
+ return Promise.reject(new index_js_2.S2Error({ message: "HTTP/2 stream is not open" }));
578
+ }
579
+ // Convert to protobuf AppendInput
580
+ const textEncoder = new TextEncoder();
581
+ const protoInput = s2_js_1.AppendInput.create({
582
+ records: records.map((record) => {
583
+ // Convert headers to array of tuples if it's a Record
584
+ let headersArray;
585
+ if (record.headers) {
586
+ if (Array.isArray(record.headers)) {
587
+ headersArray = record.headers;
588
+ }
589
+ else {
590
+ // Convert Record to array of tuples
591
+ headersArray = Object.entries(record.headers);
592
+ }
593
+ }
594
+ return {
595
+ headers: headersArray?.map((h) => ({
596
+ name: typeof h[0] === "string" ? textEncoder.encode(h[0]) : h[0],
597
+ value: typeof h[1] === "string" ? textEncoder.encode(h[1]) : h[1],
598
+ })),
599
+ body: typeof record.body === "string"
600
+ ? textEncoder.encode(record.body)
601
+ : record.body,
602
+ };
603
+ }),
604
+ fencingToken: args.fencing_token ?? undefined,
605
+ matchSeqNum: args.match_seq_num ? BigInt(args.match_seq_num) : undefined,
606
+ });
607
+ const bodyBytes = s2_js_1.AppendInput.toBinary(protoInput);
608
+ // Frame the message
609
+ const frame = (0, framing_js_1.frameMessage)({
610
+ terminal: false,
611
+ body: bodyBytes,
612
+ });
613
+ // This promise resolves when the frame is written (not when ack is received)
614
+ return new Promise((resolve, reject) => {
615
+ // Track pending ack - will be resolved when ack arrives
616
+ const ackPromise = {
617
+ resolve: () => { },
618
+ reject,
619
+ batchSize: batchMeteredSize,
620
+ };
621
+ this.pendingAcks.push(ackPromise);
622
+ this.queuedBytes += batchMeteredSize;
623
+ // Send the frame (pipelined)
624
+ this.http2Stream.write(frame, (err) => {
625
+ if (err) {
626
+ // Remove from pending acks on write error
627
+ const idx = this.pendingAcks.indexOf(ackPromise);
628
+ if (idx !== -1) {
629
+ this.pendingAcks.splice(idx, 1);
630
+ this.queuedBytes -= batchMeteredSize;
631
+ }
632
+ reject(err);
633
+ }
634
+ else {
635
+ // Frame written successfully - resolve immediately (pipelined)
636
+ resolve();
637
+ }
638
+ });
639
+ });
640
+ }
641
+ /**
642
+ * Send a batch and wait for ack (used by submit method)
643
+ */
644
+ sendBatch(records, args, batchMeteredSize) {
645
+ if (!this.http2Stream || this.http2Stream.closed) {
646
+ return Promise.reject(new index_js_2.S2Error({ message: "HTTP/2 stream is not open" }));
647
+ }
648
+ // Convert to protobuf AppendInput
649
+ const textEncoder = new TextEncoder();
650
+ const protoInput = s2_js_1.AppendInput.create({
651
+ records: records.map((record) => {
652
+ // Convert headers to array of tuples if it's a Record
653
+ let headersArray;
654
+ if (record.headers) {
655
+ if (Array.isArray(record.headers)) {
656
+ headersArray = record.headers;
657
+ }
658
+ else {
659
+ // Convert Record to array of tuples
660
+ headersArray = Object.entries(record.headers);
661
+ }
662
+ }
663
+ return {
664
+ headers: headersArray?.map((h) => ({
665
+ name: typeof h[0] === "string" ? textEncoder.encode(h[0]) : h[0],
666
+ value: typeof h[1] === "string" ? textEncoder.encode(h[1]) : h[1],
667
+ })),
668
+ body: typeof record.body === "string"
669
+ ? textEncoder.encode(record.body)
670
+ : record.body,
671
+ };
672
+ }),
673
+ fencingToken: args.fencing_token ?? undefined,
674
+ matchSeqNum: args.match_seq_num ? BigInt(args.match_seq_num) : undefined,
675
+ });
676
+ const bodyBytes = s2_js_1.AppendInput.toBinary(protoInput);
677
+ // Frame the message
678
+ const frame = (0, framing_js_1.frameMessage)({
679
+ terminal: false,
680
+ body: bodyBytes,
681
+ });
682
+ // Track pending ack - this promise resolves when the ack is received
683
+ return new Promise((resolve, reject) => {
684
+ this.pendingAcks.push({
685
+ resolve,
686
+ reject,
687
+ batchSize: batchMeteredSize,
688
+ });
689
+ this.queuedBytes += batchMeteredSize;
690
+ // Send the frame (non-blocking - pipelined)
691
+ this.http2Stream.write(frame, (err) => {
692
+ if (err) {
693
+ // Remove from pending acks on write error
694
+ const idx = this.pendingAcks.findIndex((p) => p.reject === reject);
695
+ if (idx !== -1) {
696
+ this.pendingAcks.splice(idx, 1);
697
+ this.queuedBytes -= batchMeteredSize;
698
+ }
699
+ reject(err);
700
+ }
701
+ // Write completed, but promise resolves when ack is received
702
+ });
703
+ });
704
+ }
705
+ async closeStream() {
706
+ // Wait for all pending acks
707
+ while (this.pendingAcks.length > 0) {
708
+ await new Promise((resolve) => setTimeout(resolve, 10));
709
+ }
710
+ // Close the HTTP/2 stream (client doesn't send terminal frame for clean close)
711
+ if (this.http2Stream && !this.http2Stream.closed) {
712
+ this.http2Stream.end();
713
+ }
714
+ }
715
+ async [Symbol.asyncDispose]() {
716
+ await this.close();
717
+ }
718
+ /**
719
+ * Get a stream of acknowledgements for appends.
720
+ */
721
+ acks() {
722
+ return this._readable;
723
+ }
724
+ /**
725
+ * Close the append session.
726
+ * Waits for all pending appends to complete before resolving.
727
+ */
728
+ async close() {
729
+ await this.writable.close();
730
+ }
731
+ /**
732
+ * Submit an append request to the session.
733
+ * Returns a promise that resolves with the ack when received.
734
+ */
735
+ async submit(records, args) {
736
+ if (this.closed) {
737
+ return Promise.reject(new index_js_2.S2Error({ message: "AppendSession is closed" }));
738
+ }
739
+ // Wait for initialization
740
+ if (this.initPromise) {
741
+ await this.initPromise;
742
+ }
743
+ const recordsArray = Array.isArray(records) ? records : [records];
744
+ // Validate batch size limits
745
+ if (recordsArray.length > 1000) {
746
+ return Promise.reject(new index_js_2.S2Error({
747
+ message: `Batch of ${recordsArray.length} exceeds maximum batch size of 1000 records`,
748
+ }));
749
+ }
750
+ // Calculate metered size
751
+ let batchMeteredSize = 0;
752
+ for (const record of recordsArray) {
753
+ batchMeteredSize += (0, utils_js_1.meteredSizeBytes)(record);
754
+ }
755
+ if (batchMeteredSize > 1024 * 1024) {
756
+ return Promise.reject(new index_js_2.S2Error({
757
+ message: `Batch size ${batchMeteredSize} bytes exceeds maximum of 1 MiB (1048576 bytes)`,
758
+ }));
759
+ }
760
+ return this.sendBatch(recordsArray, {
761
+ records: recordsArray,
762
+ fencing_token: args?.fencing_token,
763
+ match_seq_num: args?.match_seq_num,
764
+ }, batchMeteredSize);
765
+ }
766
+ lastAckedPosition() {
767
+ return this._lastAckedPosition;
768
+ }
769
+ }
770
+ /**
771
+ * Convert protobuf StreamPosition to OpenAPI StreamPosition
772
+ */
773
+ function convertStreamPosition(proto) {
774
+ return {
775
+ seq_num: Number(proto.seqNum),
776
+ timestamp: Number(proto.timestamp),
777
+ };
778
+ }
779
+ function convertAppendAck(proto) {
780
+ if (!proto.start || !proto.end || !proto.tail) {
781
+ throw new Error("Invariant violation: AppendAck is missing required fields");
782
+ }
783
+ return {
784
+ start: convertStreamPosition(proto.start),
785
+ end: convertStreamPosition(proto.end),
786
+ tail: convertStreamPosition(proto.tail),
787
+ };
788
+ }
789
+ //# sourceMappingURL=index.js.map