@s2-dev/streamstore 0.16.12 → 0.17.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (109) hide show
  1. package/dist/cjs/basin.d.ts +6 -2
  2. package/dist/cjs/basin.d.ts.map +1 -1
  3. package/dist/cjs/basin.js +11 -5
  4. package/dist/cjs/basin.js.map +1 -1
  5. package/dist/cjs/batch-transform.d.ts +64 -0
  6. package/dist/cjs/batch-transform.d.ts.map +1 -0
  7. package/dist/cjs/batch-transform.js +144 -0
  8. package/dist/cjs/batch-transform.js.map +1 -0
  9. package/dist/cjs/generated/proto/s2.d.ts.map +1 -0
  10. package/dist/cjs/generated/proto/s2.js.map +1 -0
  11. package/dist/cjs/index.d.ts +4 -2
  12. package/dist/cjs/index.d.ts.map +1 -1
  13. package/dist/cjs/index.js +5 -1
  14. package/dist/cjs/index.js.map +1 -1
  15. package/dist/cjs/lib/stream/factory.d.ts +15 -0
  16. package/dist/cjs/lib/stream/factory.d.ts.map +1 -0
  17. package/dist/cjs/lib/stream/factory.js +36 -0
  18. package/dist/cjs/lib/stream/factory.js.map +1 -0
  19. package/dist/cjs/lib/stream/runtime.d.ts +13 -0
  20. package/dist/cjs/lib/stream/runtime.d.ts.map +1 -0
  21. package/dist/cjs/lib/stream/runtime.js +50 -0
  22. package/dist/cjs/lib/stream/runtime.js.map +1 -0
  23. package/dist/cjs/lib/stream/transport/fetch/index.d.ts +79 -0
  24. package/dist/cjs/lib/stream/transport/fetch/index.d.ts.map +1 -0
  25. package/dist/cjs/lib/stream/transport/fetch/index.js +382 -0
  26. package/dist/cjs/lib/stream/transport/fetch/index.js.map +1 -0
  27. package/dist/cjs/lib/stream/transport/fetch/shared.d.ts +7 -0
  28. package/dist/cjs/lib/stream/transport/fetch/shared.d.ts.map +1 -0
  29. package/dist/cjs/lib/stream/transport/fetch/shared.js +170 -0
  30. package/dist/cjs/lib/stream/transport/fetch/shared.js.map +1 -0
  31. package/dist/cjs/lib/stream/transport/s2s/framing.d.ts +47 -0
  32. package/dist/cjs/lib/stream/transport/s2s/framing.d.ts.map +1 -0
  33. package/dist/cjs/lib/stream/transport/s2s/framing.js +123 -0
  34. package/dist/cjs/lib/stream/transport/s2s/framing.js.map +1 -0
  35. package/dist/cjs/lib/stream/transport/s2s/index.d.ts +23 -0
  36. package/dist/cjs/lib/stream/transport/s2s/index.d.ts.map +1 -0
  37. package/dist/cjs/lib/stream/transport/s2s/index.js +785 -0
  38. package/dist/cjs/lib/stream/transport/s2s/index.js.map +1 -0
  39. package/dist/cjs/lib/stream/types.d.ts +53 -0
  40. package/dist/cjs/lib/stream/types.d.ts.map +1 -0
  41. package/dist/cjs/lib/stream/types.js +3 -0
  42. package/dist/cjs/lib/stream/types.js.map +1 -0
  43. package/dist/cjs/stream.d.ts +22 -116
  44. package/dist/cjs/stream.d.ts.map +1 -1
  45. package/dist/cjs/stream.js +35 -551
  46. package/dist/cjs/stream.js.map +1 -1
  47. package/dist/cjs/utils.d.ts +32 -6
  48. package/dist/cjs/utils.d.ts.map +1 -1
  49. package/dist/cjs/utils.js +129 -34
  50. package/dist/cjs/utils.js.map +1 -1
  51. package/dist/esm/basin.d.ts +6 -2
  52. package/dist/esm/basin.d.ts.map +1 -1
  53. package/dist/esm/basin.js +11 -5
  54. package/dist/esm/basin.js.map +1 -1
  55. package/dist/esm/batch-transform.d.ts +64 -0
  56. package/dist/esm/batch-transform.d.ts.map +1 -0
  57. package/dist/esm/batch-transform.js +140 -0
  58. package/dist/esm/batch-transform.js.map +1 -0
  59. package/dist/esm/generated/proto/s2.d.ts.map +1 -0
  60. package/dist/esm/generated/proto/s2.js.map +1 -0
  61. package/dist/esm/index.d.ts +4 -2
  62. package/dist/esm/index.d.ts.map +1 -1
  63. package/dist/esm/index.js +2 -1
  64. package/dist/esm/index.js.map +1 -1
  65. package/dist/esm/lib/stream/factory.d.ts +15 -0
  66. package/dist/esm/lib/stream/factory.d.ts.map +1 -0
  67. package/dist/esm/lib/stream/factory.js +33 -0
  68. package/dist/esm/lib/stream/factory.js.map +1 -0
  69. package/dist/esm/lib/stream/runtime.d.ts +13 -0
  70. package/dist/esm/lib/stream/runtime.d.ts.map +1 -0
  71. package/dist/esm/lib/stream/runtime.js +46 -0
  72. package/dist/esm/lib/stream/runtime.js.map +1 -0
  73. package/dist/esm/lib/stream/transport/fetch/index.d.ts +79 -0
  74. package/dist/esm/lib/stream/transport/fetch/index.d.ts.map +1 -0
  75. package/dist/esm/lib/stream/transport/fetch/index.js +376 -0
  76. package/dist/esm/lib/stream/transport/fetch/index.js.map +1 -0
  77. package/dist/esm/lib/stream/transport/fetch/shared.d.ts +7 -0
  78. package/dist/esm/lib/stream/transport/fetch/shared.d.ts.map +1 -0
  79. package/dist/esm/lib/stream/transport/fetch/shared.js +166 -0
  80. package/dist/esm/lib/stream/transport/fetch/shared.js.map +1 -0
  81. package/dist/esm/lib/stream/transport/s2s/framing.d.ts +47 -0
  82. package/dist/esm/lib/stream/transport/s2s/framing.d.ts.map +1 -0
  83. package/dist/esm/lib/stream/transport/s2s/framing.js +118 -0
  84. package/dist/esm/lib/stream/transport/s2s/framing.js.map +1 -0
  85. package/dist/esm/lib/stream/transport/s2s/index.d.ts +23 -0
  86. package/dist/esm/lib/stream/transport/s2s/index.d.ts.map +1 -0
  87. package/dist/esm/lib/stream/transport/s2s/index.js +781 -0
  88. package/dist/esm/lib/stream/transport/s2s/index.js.map +1 -0
  89. package/dist/esm/lib/stream/types.d.ts +53 -0
  90. package/dist/esm/lib/stream/types.d.ts.map +1 -0
  91. package/dist/esm/lib/stream/types.js +2 -0
  92. package/dist/esm/lib/stream/types.js.map +1 -0
  93. package/dist/esm/stream.d.ts +22 -116
  94. package/dist/esm/stream.d.ts.map +1 -1
  95. package/dist/esm/stream.js +36 -551
  96. package/dist/esm/stream.js.map +1 -1
  97. package/dist/esm/utils.d.ts +32 -6
  98. package/dist/esm/utils.d.ts.map +1 -1
  99. package/dist/esm/utils.js +126 -34
  100. package/dist/esm/utils.js.map +1 -1
  101. package/package.json +2 -2
  102. package/dist/cjs/generated/proto/s2/v1/s2.d.ts.map +0 -1
  103. package/dist/cjs/generated/proto/s2/v1/s2.js.map +0 -1
  104. package/dist/esm/generated/proto/s2/v1/s2.d.ts.map +0 -1
  105. package/dist/esm/generated/proto/s2/v1/s2.js.map +0 -1
  106. /package/dist/cjs/generated/proto/{s2/v1/s2.d.ts → s2.d.ts} +0 -0
  107. /package/dist/cjs/generated/proto/{s2/v1/s2.js → s2.js} +0 -0
  108. /package/dist/esm/generated/proto/{s2/v1/s2.d.ts → s2.d.ts} +0 -0
  109. /package/dist/esm/generated/proto/{s2/v1/s2.js → s2.js} +0 -0
@@ -0,0 +1,785 @@
1
+ "use strict";
2
+ /**
3
+ * S2S HTTP/2 transport for Node.js
4
+ * Uses the s2s binary protocol over HTTP/2 for efficient streaming
5
+ *
6
+ * This file should only be imported in Node.js environments
7
+ */
8
+ Object.defineProperty(exports, "__esModule", { value: true });
9
+ exports.S2STransport = void 0;
10
+ const http2 = require("node:http2");
11
+ const index_js_1 = require("../../../../generated/client/index.js");
12
+ const s2_js_1 = require("../../../../generated/proto/s2.js");
13
+ const index_js_2 = require("../../../../index.js");
14
+ const utils_js_1 = require("../../../../utils.js");
15
+ const Redacted = require("../../../redacted.js");
16
+ const framing_js_1 = require("./framing.js");
17
+ class S2STransport {
18
+ client;
19
+ transportConfig;
20
+ connection;
21
+ connectionPromise;
22
+ constructor(config) {
23
+ this.client = (0, index_js_1.createClient)((0, index_js_1.createConfig)({
24
+ baseUrl: config.baseUrl,
25
+ auth: () => Redacted.value(config.accessToken),
26
+ }));
27
+ this.transportConfig = config;
28
+ }
29
+ async makeAppendSession(stream, sessionOptions, requestOptions) {
30
+ return S2SAppendSession.create(this.transportConfig.baseUrl, this.transportConfig.accessToken, stream, () => this.getConnection(), sessionOptions, requestOptions);
31
+ }
32
+ async makeReadSession(stream, args, options) {
33
+ return S2SReadSession.create(this.transportConfig.baseUrl, this.transportConfig.accessToken, stream, args, options, () => this.getConnection());
34
+ }
35
+ /**
36
+ * Get or create HTTP/2 connection (one per transport)
37
+ */
38
+ async getConnection() {
39
+ if (this.connection &&
40
+ !this.connection.closed &&
41
+ !this.connection.destroyed) {
42
+ return this.connection;
43
+ }
44
+ // If connection is in progress, wait for it
45
+ if (this.connectionPromise) {
46
+ return this.connectionPromise;
47
+ }
48
+ // Create new connection
49
+ this.connectionPromise = this.createConnection();
50
+ try {
51
+ this.connection = await this.connectionPromise;
52
+ return this.connection;
53
+ }
54
+ finally {
55
+ this.connectionPromise = undefined;
56
+ }
57
+ }
58
+ async createConnection() {
59
+ const url = new URL(this.transportConfig.baseUrl);
60
+ const client = http2.connect(url.origin, {
61
+ // Use HTTPS settings
62
+ ...(url.protocol === "https:"
63
+ ? {
64
+ // TLS options can go here if needed
65
+ }
66
+ : {}),
67
+ });
68
+ return new Promise((resolve, reject) => {
69
+ client.once("connect", () => {
70
+ resolve(client);
71
+ });
72
+ client.once("error", (err) => {
73
+ reject(err);
74
+ });
75
+ // Handle connection close
76
+ client.once("close", () => {
77
+ if (this.connection === client) {
78
+ this.connection = undefined;
79
+ }
80
+ });
81
+ });
82
+ }
83
+ }
84
+ exports.S2STransport = S2STransport;
85
+ class S2SReadSession extends ReadableStream {
86
+ streamName;
87
+ args;
88
+ authToken;
89
+ url;
90
+ options;
91
+ getConnection;
92
+ http2Stream;
93
+ _lastReadPosition;
94
+ parser = new framing_js_1.S2SFrameParser();
95
+ static async create(baseUrl, bearerToken, streamName, args, options, getConnection) {
96
+ const url = new URL(baseUrl);
97
+ return new S2SReadSession(streamName, args, bearerToken, url, options, getConnection);
98
+ }
99
+ constructor(streamName, args, authToken, url, options, getConnection) {
100
+ // Initialize parser and textDecoder before super() call
101
+ const parser = new framing_js_1.S2SFrameParser();
102
+ const textDecoder = new TextDecoder();
103
+ let http2Stream;
104
+ let lastReadPosition;
105
+ super({
106
+ start: async (controller) => {
107
+ let controllerClosed = false;
108
+ const safeClose = () => {
109
+ if (!controllerClosed) {
110
+ controllerClosed = true;
111
+ try {
112
+ controller.close();
113
+ }
114
+ catch {
115
+ // Controller may already be closed, ignore
116
+ }
117
+ }
118
+ };
119
+ const safeError = (err) => {
120
+ if (!controllerClosed) {
121
+ controllerClosed = true;
122
+ controller.error(err);
123
+ }
124
+ };
125
+ try {
126
+ const connection = await getConnection();
127
+ // Build query string
128
+ const queryParams = new URLSearchParams();
129
+ const { as, ...readParams } = args ?? {};
130
+ if (readParams.seq_num !== undefined)
131
+ queryParams.set("seq_num", readParams.seq_num.toString());
132
+ if (readParams.timestamp !== undefined)
133
+ queryParams.set("timestamp", readParams.timestamp.toString());
134
+ if (readParams.tail_offset !== undefined)
135
+ queryParams.set("tail_offset", readParams.tail_offset.toString());
136
+ if (readParams.count !== undefined)
137
+ queryParams.set("count", readParams.count.toString());
138
+ if (readParams.bytes !== undefined)
139
+ queryParams.set("bytes", readParams.bytes.toString());
140
+ if (readParams.wait !== undefined)
141
+ queryParams.set("wait", readParams.wait.toString());
142
+ if (typeof readParams.until === "number") {
143
+ queryParams.set("until", readParams.until.toString());
144
+ }
145
+ const queryString = queryParams.toString();
146
+ const path = `${url.pathname}/streams/${encodeURIComponent(streamName)}/records${queryString ? `?${queryString}` : ""}`;
147
+ const stream = connection.request({
148
+ ":method": "GET",
149
+ ":path": path,
150
+ ":scheme": url.protocol.slice(0, -1),
151
+ ":authority": url.host,
152
+ authorization: `Bearer ${Redacted.value(authToken)}`,
153
+ accept: "application/protobuf",
154
+ "content-type": "s2s/proto",
155
+ });
156
+ http2Stream = stream;
157
+ options?.signal?.addEventListener("abort", () => {
158
+ if (!stream.closed) {
159
+ stream.close();
160
+ }
161
+ });
162
+ stream.on("data", (chunk) => {
163
+ // Buffer already extends Uint8Array in Node.js, no need to convert
164
+ parser.push(chunk);
165
+ let frame = parser.parseFrame();
166
+ while (frame) {
167
+ if (frame.terminal) {
168
+ if (frame.statusCode && frame.statusCode >= 400) {
169
+ const errorText = textDecoder.decode(frame.body);
170
+ try {
171
+ const errorJson = JSON.parse(errorText);
172
+ safeError(new index_js_2.S2Error({
173
+ message: errorJson.message ?? "Unknown error",
174
+ code: errorJson.code,
175
+ status: frame.statusCode,
176
+ }));
177
+ }
178
+ catch {
179
+ safeError(new index_js_2.S2Error({
180
+ message: errorText || "Unknown error",
181
+ status: frame.statusCode,
182
+ }));
183
+ }
184
+ }
185
+ else {
186
+ safeClose();
187
+ }
188
+ stream.close();
189
+ }
190
+ else {
191
+ // Parse ReadBatch
192
+ try {
193
+ const protoBatch = s2_js_1.ReadBatch.fromBinary(frame.body);
194
+ // Update position from tail
195
+ if (protoBatch.tail) {
196
+ lastReadPosition = convertStreamPosition(protoBatch.tail);
197
+ // Assign to instance property
198
+ this._lastReadPosition = lastReadPosition;
199
+ }
200
+ // Enqueue each record
201
+ for (const record of protoBatch.records) {
202
+ const converted = this.convertRecord(record, as ?? "string", textDecoder);
203
+ controller.enqueue(converted);
204
+ }
205
+ }
206
+ catch (err) {
207
+ safeError(new index_js_2.S2Error({
208
+ message: `Failed to parse ReadBatch: ${err}`,
209
+ }));
210
+ }
211
+ }
212
+ frame = parser.parseFrame();
213
+ }
214
+ });
215
+ stream.on("error", (err) => {
216
+ safeError(err);
217
+ });
218
+ stream.on("close", () => {
219
+ safeClose();
220
+ });
221
+ }
222
+ catch (err) {
223
+ safeError(err);
224
+ }
225
+ },
226
+ cancel: async () => {
227
+ if (http2Stream && !http2Stream.closed) {
228
+ http2Stream.close();
229
+ }
230
+ },
231
+ });
232
+ this.streamName = streamName;
233
+ this.args = args;
234
+ this.authToken = authToken;
235
+ this.url = url;
236
+ this.options = options;
237
+ this.getConnection = getConnection;
238
+ // Assign parser to instance property after super() completes
239
+ this.parser = parser;
240
+ this.http2Stream = http2Stream;
241
+ }
242
+ /**
243
+ * Convert a protobuf SequencedRecord to the requested format
244
+ */
245
+ convertRecord(record, format, textDecoder) {
246
+ if (format === "bytes") {
247
+ return {
248
+ seq_num: Number(record.seqNum),
249
+ timestamp: Number(record.timestamp),
250
+ headers: record.headers?.map((h) => [h.name ?? new Uint8Array(), h.value ?? new Uint8Array()]),
251
+ body: record.body,
252
+ };
253
+ }
254
+ else {
255
+ // Convert to string format
256
+ return {
257
+ seq_num: Number(record.seqNum),
258
+ timestamp: Number(record.timestamp),
259
+ headers: record.headers?.map((h) => [
260
+ h.name ? textDecoder.decode(h.name) : "",
261
+ h.value ? textDecoder.decode(h.value) : "",
262
+ ]),
263
+ body: record.body ? textDecoder.decode(record.body) : undefined,
264
+ };
265
+ }
266
+ }
267
+ async [Symbol.asyncDispose]() {
268
+ await this.cancel("disposed");
269
+ }
270
+ // Polyfill for older browsers / Node.js environments
271
+ [Symbol.asyncIterator]() {
272
+ const fn = ReadableStream.prototype[Symbol.asyncIterator];
273
+ if (typeof fn === "function")
274
+ return fn.call(this);
275
+ const reader = this.getReader();
276
+ return {
277
+ next: async () => {
278
+ const r = await reader.read();
279
+ if (r.done) {
280
+ reader.releaseLock();
281
+ return { done: true, value: undefined };
282
+ }
283
+ return { done: false, value: r.value };
284
+ },
285
+ throw: async (e) => {
286
+ await reader.cancel(e);
287
+ reader.releaseLock();
288
+ return { done: true, value: undefined };
289
+ },
290
+ return: async () => {
291
+ await reader.cancel("done");
292
+ reader.releaseLock();
293
+ return { done: true, value: undefined };
294
+ },
295
+ [Symbol.asyncIterator]() {
296
+ return this;
297
+ },
298
+ };
299
+ }
300
+ lastReadPosition() {
301
+ return this._lastReadPosition;
302
+ }
303
+ }
304
+ /**
305
+ * AcksStream for S2S append session
306
+ */
307
+ class S2SAcksStream extends ReadableStream {
308
+ constructor(setController) {
309
+ super({
310
+ start: (controller) => {
311
+ setController(controller);
312
+ },
313
+ });
314
+ }
315
+ async [Symbol.asyncDispose]() {
316
+ await this.cancel("disposed");
317
+ }
318
+ // Polyfill for older browsers
319
+ [Symbol.asyncIterator]() {
320
+ const fn = ReadableStream.prototype[Symbol.asyncIterator];
321
+ if (typeof fn === "function")
322
+ return fn.call(this);
323
+ const reader = this.getReader();
324
+ return {
325
+ next: async () => {
326
+ const r = await reader.read();
327
+ if (r.done) {
328
+ reader.releaseLock();
329
+ return { done: true, value: undefined };
330
+ }
331
+ return { done: false, value: r.value };
332
+ },
333
+ throw: async (e) => {
334
+ await reader.cancel(e);
335
+ reader.releaseLock();
336
+ return { done: true, value: undefined };
337
+ },
338
+ return: async () => {
339
+ await reader.cancel("done");
340
+ reader.releaseLock();
341
+ return { done: true, value: undefined };
342
+ },
343
+ [Symbol.asyncIterator]() {
344
+ return this;
345
+ },
346
+ };
347
+ }
348
+ }
349
+ /**
350
+ * S2S Append Session for pipelined writes
351
+ * Unlike fetch-based append, writes don't block on acks - only on submission
352
+ */
353
+ class S2SAppendSession {
354
+ baseUrl;
355
+ authToken;
356
+ streamName;
357
+ getConnection;
358
+ options;
359
+ http2Stream;
360
+ _lastAckedPosition;
361
+ parser = new framing_js_1.S2SFrameParser();
362
+ acksController;
363
+ _readable;
364
+ _writable;
365
+ closed = false;
366
+ queuedBytes = 0;
367
+ maxQueuedBytes;
368
+ waitingForCapacity = [];
369
+ pendingAcks = [];
370
+ initPromise;
371
+ readable;
372
+ writable;
373
+ static async create(baseUrl, bearerToken, streamName, getConnection, sessionOptions, requestOptions) {
374
+ return new S2SAppendSession(baseUrl, bearerToken, streamName, getConnection, sessionOptions, requestOptions);
375
+ }
376
+ constructor(baseUrl, authToken, streamName, getConnection, sessionOptions, options) {
377
+ this.baseUrl = baseUrl;
378
+ this.authToken = authToken;
379
+ this.streamName = streamName;
380
+ this.getConnection = getConnection;
381
+ this.options = options;
382
+ this.maxQueuedBytes = sessionOptions?.maxQueuedBytes ?? 10 * 1024 * 1024; // 10 MiB default
383
+ // Create the readable stream for acks
384
+ this._readable = new S2SAcksStream((controller) => {
385
+ this.acksController = controller;
386
+ });
387
+ this.readable = this._readable;
388
+ // Create the writable stream
389
+ this._writable = new WritableStream({
390
+ start: async (controller) => {
391
+ this.initPromise = this.initializeStream();
392
+ await this.initPromise;
393
+ },
394
+ write: async (chunk) => {
395
+ if (this.closed) {
396
+ throw new index_js_2.S2Error({ message: "AppendSession is closed" });
397
+ }
398
+ const recordsArray = Array.isArray(chunk.records)
399
+ ? chunk.records
400
+ : [chunk.records];
401
+ // Validate batch size limits
402
+ if (recordsArray.length > 1000) {
403
+ throw new index_js_2.S2Error({
404
+ message: `Batch of ${recordsArray.length} exceeds maximum batch size of 1000 records`,
405
+ });
406
+ }
407
+ // Calculate metered size
408
+ let batchMeteredSize = 0;
409
+ for (const record of recordsArray) {
410
+ batchMeteredSize += (0, utils_js_1.meteredSizeBytes)(record);
411
+ }
412
+ if (batchMeteredSize > 1024 * 1024) {
413
+ throw new index_js_2.S2Error({
414
+ message: `Batch size ${batchMeteredSize} bytes exceeds maximum of 1 MiB (1048576 bytes)`,
415
+ });
416
+ }
417
+ // Wait for capacity if needed (backpressure)
418
+ while (this.queuedBytes + batchMeteredSize > this.maxQueuedBytes &&
419
+ !this.closed) {
420
+ await new Promise((resolve) => {
421
+ this.waitingForCapacity.push(resolve);
422
+ });
423
+ }
424
+ if (this.closed) {
425
+ throw new index_js_2.S2Error({ message: "AppendSession is closed" });
426
+ }
427
+ // Send the batch immediately (pipelined)
428
+ // Returns when frame is sent, not when ack is received
429
+ await this.sendBatchNonBlocking(recordsArray, chunk, batchMeteredSize);
430
+ },
431
+ close: async () => {
432
+ this.closed = true;
433
+ await this.closeStream();
434
+ },
435
+ abort: async (reason) => {
436
+ this.closed = true;
437
+ this.queuedBytes = 0;
438
+ // Reject all pending acks
439
+ const error = new index_js_2.S2Error({
440
+ message: `AppendSession was aborted: ${reason}`,
441
+ });
442
+ for (const pending of this.pendingAcks) {
443
+ pending.reject(error);
444
+ }
445
+ this.pendingAcks = [];
446
+ // Wake up all waiting for capacity
447
+ for (const resolver of this.waitingForCapacity) {
448
+ resolver();
449
+ }
450
+ this.waitingForCapacity = [];
451
+ if (this.http2Stream && !this.http2Stream.closed) {
452
+ this.http2Stream.close();
453
+ }
454
+ },
455
+ });
456
+ this.writable = this._writable;
457
+ }
458
+ async initializeStream() {
459
+ const url = new URL(this.baseUrl);
460
+ const connection = await this.getConnection();
461
+ const path = `${url.pathname}/streams/${encodeURIComponent(this.streamName)}/records`;
462
+ const stream = connection.request({
463
+ ":method": "POST",
464
+ ":path": path,
465
+ ":scheme": url.protocol.slice(0, -1),
466
+ ":authority": url.host,
467
+ authorization: `Bearer ${Redacted.value(this.authToken)}`,
468
+ "content-type": "s2s/proto",
469
+ accept: "application/protobuf",
470
+ });
471
+ this.http2Stream = stream;
472
+ this.options?.signal?.addEventListener("abort", () => {
473
+ if (!stream.closed) {
474
+ stream.close();
475
+ }
476
+ });
477
+ const textDecoder = new TextDecoder();
478
+ let controllerClosed = false;
479
+ const safeClose = () => {
480
+ if (!controllerClosed && this.acksController) {
481
+ controllerClosed = true;
482
+ try {
483
+ this.acksController.close();
484
+ }
485
+ catch {
486
+ // Controller may already be closed, ignore
487
+ }
488
+ }
489
+ };
490
+ const safeError = (err) => {
491
+ if (!controllerClosed && this.acksController) {
492
+ controllerClosed = true;
493
+ this.acksController.error(err);
494
+ }
495
+ // Reject all pending acks
496
+ for (const pending of this.pendingAcks) {
497
+ pending.reject(err);
498
+ }
499
+ this.pendingAcks = [];
500
+ };
501
+ // Handle incoming data (acks)
502
+ stream.on("data", (chunk) => {
503
+ this.parser.push(chunk);
504
+ let frame = this.parser.parseFrame();
505
+ while (frame) {
506
+ if (frame.terminal) {
507
+ if (frame.statusCode && frame.statusCode >= 400) {
508
+ const errorText = textDecoder.decode(frame.body);
509
+ try {
510
+ const errorJson = JSON.parse(errorText);
511
+ safeError(new index_js_2.S2Error({
512
+ message: errorJson.message ?? "Unknown error",
513
+ code: errorJson.code,
514
+ status: frame.statusCode,
515
+ }));
516
+ }
517
+ catch {
518
+ safeError(new index_js_2.S2Error({
519
+ message: errorText || "Unknown error",
520
+ status: frame.statusCode,
521
+ }));
522
+ }
523
+ }
524
+ else {
525
+ safeClose();
526
+ }
527
+ stream.close();
528
+ }
529
+ else {
530
+ // Parse AppendAck
531
+ try {
532
+ const protoAck = s2_js_1.AppendAck.fromBinary(frame.body);
533
+ const ack = convertAppendAck(protoAck);
534
+ this._lastAckedPosition = ack;
535
+ // Enqueue to readable stream
536
+ if (this.acksController) {
537
+ this.acksController.enqueue(ack);
538
+ }
539
+ // Resolve the pending ack promise
540
+ const pending = this.pendingAcks.shift();
541
+ if (pending) {
542
+ pending.resolve(ack);
543
+ // Release capacity
544
+ this.queuedBytes -= pending.batchSize;
545
+ // Wake up one waiting writer
546
+ if (this.waitingForCapacity.length > 0) {
547
+ const waiter = this.waitingForCapacity.shift();
548
+ waiter();
549
+ }
550
+ }
551
+ }
552
+ catch (err) {
553
+ safeError(new index_js_2.S2Error({
554
+ message: `Failed to parse AppendAck: ${err}`,
555
+ }));
556
+ }
557
+ }
558
+ frame = this.parser.parseFrame();
559
+ }
560
+ });
561
+ stream.on("error", (err) => {
562
+ safeError(err);
563
+ });
564
+ stream.on("close", () => {
565
+ safeClose();
566
+ });
567
+ }
568
+ /**
569
+ * Send a batch non-blocking (returns when frame is sent, not when ack is received)
570
+ */
571
+ sendBatchNonBlocking(records, args, batchMeteredSize) {
572
+ if (!this.http2Stream || this.http2Stream.closed) {
573
+ return Promise.reject(new index_js_2.S2Error({ message: "HTTP/2 stream is not open" }));
574
+ }
575
+ // Convert to protobuf AppendInput
576
+ const textEncoder = new TextEncoder();
577
+ const protoInput = s2_js_1.AppendInput.create({
578
+ records: records.map((record) => {
579
+ // Convert headers to array of tuples if it's a Record
580
+ let headersArray;
581
+ if (record.headers) {
582
+ if (Array.isArray(record.headers)) {
583
+ headersArray = record.headers;
584
+ }
585
+ else {
586
+ // Convert Record to array of tuples
587
+ headersArray = Object.entries(record.headers);
588
+ }
589
+ }
590
+ return {
591
+ headers: headersArray?.map((h) => ({
592
+ name: typeof h[0] === "string" ? textEncoder.encode(h[0]) : h[0],
593
+ value: typeof h[1] === "string" ? textEncoder.encode(h[1]) : h[1],
594
+ })),
595
+ body: typeof record.body === "string"
596
+ ? textEncoder.encode(record.body)
597
+ : record.body,
598
+ };
599
+ }),
600
+ fencingToken: args.fencing_token ?? undefined,
601
+ matchSeqNum: args.match_seq_num ? BigInt(args.match_seq_num) : undefined,
602
+ });
603
+ const bodyBytes = s2_js_1.AppendInput.toBinary(protoInput);
604
+ // Frame the message
605
+ const frame = (0, framing_js_1.frameMessage)({
606
+ terminal: false,
607
+ body: bodyBytes,
608
+ });
609
+ // This promise resolves when the frame is written (not when ack is received)
610
+ return new Promise((resolve, reject) => {
611
+ // Track pending ack - will be resolved when ack arrives
612
+ const ackPromise = {
613
+ resolve: () => { },
614
+ reject,
615
+ batchSize: batchMeteredSize,
616
+ };
617
+ this.pendingAcks.push(ackPromise);
618
+ this.queuedBytes += batchMeteredSize;
619
+ // Send the frame (pipelined)
620
+ this.http2Stream.write(frame, (err) => {
621
+ if (err) {
622
+ // Remove from pending acks on write error
623
+ const idx = this.pendingAcks.indexOf(ackPromise);
624
+ if (idx !== -1) {
625
+ this.pendingAcks.splice(idx, 1);
626
+ this.queuedBytes -= batchMeteredSize;
627
+ }
628
+ reject(err);
629
+ }
630
+ else {
631
+ // Frame written successfully - resolve immediately (pipelined)
632
+ resolve();
633
+ }
634
+ });
635
+ });
636
+ }
637
+ /**
638
+ * Send a batch and wait for ack (used by submit method)
639
+ */
640
+ sendBatch(records, args, batchMeteredSize) {
641
+ if (!this.http2Stream || this.http2Stream.closed) {
642
+ return Promise.reject(new index_js_2.S2Error({ message: "HTTP/2 stream is not open" }));
643
+ }
644
+ // Convert to protobuf AppendInput
645
+ const textEncoder = new TextEncoder();
646
+ const protoInput = s2_js_1.AppendInput.create({
647
+ records: records.map((record) => {
648
+ // Convert headers to array of tuples if it's a Record
649
+ let headersArray;
650
+ if (record.headers) {
651
+ if (Array.isArray(record.headers)) {
652
+ headersArray = record.headers;
653
+ }
654
+ else {
655
+ // Convert Record to array of tuples
656
+ headersArray = Object.entries(record.headers);
657
+ }
658
+ }
659
+ return {
660
+ headers: headersArray?.map((h) => ({
661
+ name: typeof h[0] === "string" ? textEncoder.encode(h[0]) : h[0],
662
+ value: typeof h[1] === "string" ? textEncoder.encode(h[1]) : h[1],
663
+ })),
664
+ body: typeof record.body === "string"
665
+ ? textEncoder.encode(record.body)
666
+ : record.body,
667
+ };
668
+ }),
669
+ fencingToken: args.fencing_token ?? undefined,
670
+ matchSeqNum: args.match_seq_num ? BigInt(args.match_seq_num) : undefined,
671
+ });
672
+ const bodyBytes = s2_js_1.AppendInput.toBinary(protoInput);
673
+ // Frame the message
674
+ const frame = (0, framing_js_1.frameMessage)({
675
+ terminal: false,
676
+ body: bodyBytes,
677
+ });
678
+ // Track pending ack - this promise resolves when the ack is received
679
+ return new Promise((resolve, reject) => {
680
+ this.pendingAcks.push({
681
+ resolve,
682
+ reject,
683
+ batchSize: batchMeteredSize,
684
+ });
685
+ this.queuedBytes += batchMeteredSize;
686
+ // Send the frame (non-blocking - pipelined)
687
+ this.http2Stream.write(frame, (err) => {
688
+ if (err) {
689
+ // Remove from pending acks on write error
690
+ const idx = this.pendingAcks.findIndex((p) => p.reject === reject);
691
+ if (idx !== -1) {
692
+ this.pendingAcks.splice(idx, 1);
693
+ this.queuedBytes -= batchMeteredSize;
694
+ }
695
+ reject(err);
696
+ }
697
+ // Write completed, but promise resolves when ack is received
698
+ });
699
+ });
700
+ }
701
+ async closeStream() {
702
+ // Wait for all pending acks
703
+ while (this.pendingAcks.length > 0) {
704
+ await new Promise((resolve) => setTimeout(resolve, 10));
705
+ }
706
+ // Close the HTTP/2 stream (client doesn't send terminal frame for clean close)
707
+ if (this.http2Stream && !this.http2Stream.closed) {
708
+ this.http2Stream.end();
709
+ }
710
+ }
711
+ async [Symbol.asyncDispose]() {
712
+ await this.close();
713
+ }
714
+ /**
715
+ * Get a stream of acknowledgements for appends.
716
+ */
717
+ acks() {
718
+ return this._readable;
719
+ }
720
+ /**
721
+ * Close the append session.
722
+ * Waits for all pending appends to complete before resolving.
723
+ */
724
+ async close() {
725
+ await this.writable.close();
726
+ }
727
+ /**
728
+ * Submit an append request to the session.
729
+ * Returns a promise that resolves with the ack when received.
730
+ */
731
+ async submit(records, args) {
732
+ if (this.closed) {
733
+ return Promise.reject(new index_js_2.S2Error({ message: "AppendSession is closed" }));
734
+ }
735
+ // Wait for initialization
736
+ if (this.initPromise) {
737
+ await this.initPromise;
738
+ }
739
+ const recordsArray = Array.isArray(records) ? records : [records];
740
+ // Validate batch size limits
741
+ if (recordsArray.length > 1000) {
742
+ return Promise.reject(new index_js_2.S2Error({
743
+ message: `Batch of ${recordsArray.length} exceeds maximum batch size of 1000 records`,
744
+ }));
745
+ }
746
+ // Calculate metered size
747
+ let batchMeteredSize = 0;
748
+ for (const record of recordsArray) {
749
+ batchMeteredSize += (0, utils_js_1.meteredSizeBytes)(record);
750
+ }
751
+ if (batchMeteredSize > 1024 * 1024) {
752
+ return Promise.reject(new index_js_2.S2Error({
753
+ message: `Batch size ${batchMeteredSize} bytes exceeds maximum of 1 MiB (1048576 bytes)`,
754
+ }));
755
+ }
756
+ return this.sendBatch(recordsArray, {
757
+ records: recordsArray,
758
+ fencing_token: args?.fencing_token,
759
+ match_seq_num: args?.match_seq_num,
760
+ }, batchMeteredSize);
761
+ }
762
+ lastAckedPosition() {
763
+ return this._lastAckedPosition;
764
+ }
765
+ }
766
+ /**
767
+ * Convert protobuf StreamPosition to OpenAPI StreamPosition
768
+ */
769
+ function convertStreamPosition(proto) {
770
+ return {
771
+ seq_num: Number(proto.seqNum),
772
+ timestamp: Number(proto.timestamp),
773
+ };
774
+ }
775
+ function convertAppendAck(proto) {
776
+ if (!proto.start || !proto.end || !proto.tail) {
777
+ throw new Error("Invariant violation: AppendAck is missing required fields");
778
+ }
779
+ return {
780
+ start: convertStreamPosition(proto.start),
781
+ end: convertStreamPosition(proto.end),
782
+ tail: convertStreamPosition(proto.tail),
783
+ };
784
+ }
785
+ //# sourceMappingURL=index.js.map