@s2-dev/streamstore 0.17.6 → 0.18.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +69 -1
- package/dist/cjs/accessTokens.d.ts +3 -2
- package/dist/cjs/accessTokens.d.ts.map +1 -1
- package/dist/cjs/accessTokens.js +22 -37
- package/dist/cjs/accessTokens.js.map +1 -1
- package/dist/cjs/basin.d.ts +4 -3
- package/dist/cjs/basin.d.ts.map +1 -1
- package/dist/cjs/basin.js +7 -5
- package/dist/cjs/basin.js.map +1 -1
- package/dist/cjs/basins.d.ts +10 -10
- package/dist/cjs/basins.d.ts.map +1 -1
- package/dist/cjs/basins.js +36 -64
- package/dist/cjs/basins.js.map +1 -1
- package/dist/cjs/batch-transform.d.ts +1 -1
- package/dist/cjs/batch-transform.d.ts.map +1 -1
- package/dist/cjs/batch-transform.js +36 -5
- package/dist/cjs/batch-transform.js.map +1 -1
- package/dist/cjs/common.d.ts +42 -0
- package/dist/cjs/common.d.ts.map +1 -1
- package/dist/cjs/error.d.ts +40 -2
- package/dist/cjs/error.d.ts.map +1 -1
- package/dist/cjs/error.js +268 -2
- package/dist/cjs/error.js.map +1 -1
- package/dist/cjs/generated/client/types.gen.d.ts +7 -0
- package/dist/cjs/generated/client/types.gen.d.ts.map +1 -1
- package/dist/cjs/generated/client/utils.gen.d.ts +1 -0
- package/dist/cjs/generated/client/utils.gen.d.ts.map +1 -1
- package/dist/cjs/generated/client/utils.gen.js.map +1 -1
- package/dist/cjs/generated/core/types.gen.d.ts +2 -0
- package/dist/cjs/generated/core/types.gen.d.ts.map +1 -1
- package/dist/cjs/index.d.ts +46 -3
- package/dist/cjs/index.d.ts.map +1 -1
- package/dist/cjs/index.js +28 -2
- package/dist/cjs/index.js.map +1 -1
- package/dist/cjs/lib/result.d.ts +57 -0
- package/dist/cjs/lib/result.d.ts.map +1 -0
- package/dist/cjs/lib/result.js +43 -0
- package/dist/cjs/lib/result.js.map +1 -0
- package/dist/cjs/lib/retry.d.ts +151 -0
- package/dist/cjs/lib/retry.d.ts.map +1 -0
- package/dist/cjs/lib/retry.js +839 -0
- package/dist/cjs/lib/retry.js.map +1 -0
- package/dist/cjs/lib/stream/factory.d.ts +0 -1
- package/dist/cjs/lib/stream/factory.d.ts.map +1 -1
- package/dist/cjs/lib/stream/factory.js +0 -1
- package/dist/cjs/lib/stream/factory.js.map +1 -1
- package/dist/cjs/lib/stream/transport/fetch/index.d.ts +24 -32
- package/dist/cjs/lib/stream/transport/fetch/index.d.ts.map +1 -1
- package/dist/cjs/lib/stream/transport/fetch/index.js +247 -187
- package/dist/cjs/lib/stream/transport/fetch/index.js.map +1 -1
- package/dist/cjs/lib/stream/transport/fetch/shared.d.ts +1 -2
- package/dist/cjs/lib/stream/transport/fetch/shared.d.ts.map +1 -1
- package/dist/cjs/lib/stream/transport/fetch/shared.js +49 -72
- package/dist/cjs/lib/stream/transport/fetch/shared.js.map +1 -1
- package/dist/cjs/lib/stream/transport/s2s/index.d.ts +0 -1
- package/dist/cjs/lib/stream/transport/s2s/index.d.ts.map +1 -1
- package/dist/cjs/lib/stream/transport/s2s/index.js +309 -352
- package/dist/cjs/lib/stream/transport/s2s/index.js.map +1 -1
- package/dist/cjs/lib/stream/types.d.ts +102 -8
- package/dist/cjs/lib/stream/types.d.ts.map +1 -1
- package/dist/cjs/metrics.d.ts +3 -2
- package/dist/cjs/metrics.d.ts.map +1 -1
- package/dist/cjs/metrics.js +24 -39
- package/dist/cjs/metrics.js.map +1 -1
- package/dist/cjs/s2.d.ts +1 -0
- package/dist/cjs/s2.d.ts.map +1 -1
- package/dist/cjs/s2.js +14 -3
- package/dist/cjs/s2.js.map +1 -1
- package/dist/cjs/stream.d.ts +5 -3
- package/dist/cjs/stream.d.ts.map +1 -1
- package/dist/cjs/stream.js +29 -18
- package/dist/cjs/stream.js.map +1 -1
- package/dist/cjs/streams.d.ts +10 -10
- package/dist/cjs/streams.d.ts.map +1 -1
- package/dist/cjs/streams.js +36 -64
- package/dist/cjs/streams.js.map +1 -1
- package/dist/cjs/utils.d.ts +3 -3
- package/dist/cjs/utils.d.ts.map +1 -1
- package/dist/cjs/utils.js +3 -3
- package/dist/cjs/utils.js.map +1 -1
- package/dist/esm/accessTokens.d.ts +3 -2
- package/dist/esm/accessTokens.d.ts.map +1 -1
- package/dist/esm/accessTokens.js +23 -38
- package/dist/esm/accessTokens.js.map +1 -1
- package/dist/esm/basin.d.ts +4 -3
- package/dist/esm/basin.d.ts.map +1 -1
- package/dist/esm/basin.js +7 -5
- package/dist/esm/basin.js.map +1 -1
- package/dist/esm/basins.d.ts +10 -10
- package/dist/esm/basins.d.ts.map +1 -1
- package/dist/esm/basins.js +37 -65
- package/dist/esm/basins.js.map +1 -1
- package/dist/esm/batch-transform.d.ts +1 -1
- package/dist/esm/batch-transform.d.ts.map +1 -1
- package/dist/esm/batch-transform.js +37 -6
- package/dist/esm/batch-transform.js.map +1 -1
- package/dist/esm/common.d.ts +42 -0
- package/dist/esm/common.d.ts.map +1 -1
- package/dist/esm/error.d.ts +40 -2
- package/dist/esm/error.d.ts.map +1 -1
- package/dist/esm/error.js +260 -2
- package/dist/esm/error.js.map +1 -1
- package/dist/esm/generated/client/types.gen.d.ts +7 -0
- package/dist/esm/generated/client/types.gen.d.ts.map +1 -1
- package/dist/esm/generated/client/utils.gen.d.ts +1 -0
- package/dist/esm/generated/client/utils.gen.d.ts.map +1 -1
- package/dist/esm/generated/client/utils.gen.js.map +1 -1
- package/dist/esm/generated/core/types.gen.d.ts +2 -0
- package/dist/esm/generated/core/types.gen.d.ts.map +1 -1
- package/dist/esm/index.d.ts +46 -3
- package/dist/esm/index.d.ts.map +1 -1
- package/dist/esm/index.js +23 -1
- package/dist/esm/index.js.map +1 -1
- package/dist/esm/lib/result.d.ts +57 -0
- package/dist/esm/lib/result.d.ts.map +1 -0
- package/dist/esm/lib/result.js +37 -0
- package/dist/esm/lib/result.js.map +1 -0
- package/dist/esm/lib/retry.d.ts +151 -0
- package/dist/esm/lib/retry.d.ts.map +1 -0
- package/dist/esm/lib/retry.js +830 -0
- package/dist/esm/lib/retry.js.map +1 -0
- package/dist/esm/lib/stream/factory.d.ts +0 -1
- package/dist/esm/lib/stream/factory.d.ts.map +1 -1
- package/dist/esm/lib/stream/factory.js +0 -1
- package/dist/esm/lib/stream/factory.js.map +1 -1
- package/dist/esm/lib/stream/transport/fetch/index.d.ts +24 -32
- package/dist/esm/lib/stream/transport/fetch/index.d.ts.map +1 -1
- package/dist/esm/lib/stream/transport/fetch/index.js +247 -187
- package/dist/esm/lib/stream/transport/fetch/index.js.map +1 -1
- package/dist/esm/lib/stream/transport/fetch/shared.d.ts +1 -2
- package/dist/esm/lib/stream/transport/fetch/shared.d.ts.map +1 -1
- package/dist/esm/lib/stream/transport/fetch/shared.js +51 -74
- package/dist/esm/lib/stream/transport/fetch/shared.js.map +1 -1
- package/dist/esm/lib/stream/transport/s2s/index.d.ts +0 -1
- package/dist/esm/lib/stream/transport/s2s/index.d.ts.map +1 -1
- package/dist/esm/lib/stream/transport/s2s/index.js +310 -353
- package/dist/esm/lib/stream/transport/s2s/index.js.map +1 -1
- package/dist/esm/lib/stream/types.d.ts +102 -8
- package/dist/esm/lib/stream/types.d.ts.map +1 -1
- package/dist/esm/metrics.d.ts +3 -2
- package/dist/esm/metrics.d.ts.map +1 -1
- package/dist/esm/metrics.js +25 -40
- package/dist/esm/metrics.js.map +1 -1
- package/dist/esm/s2.d.ts +1 -0
- package/dist/esm/s2.d.ts.map +1 -1
- package/dist/esm/s2.js +14 -3
- package/dist/esm/s2.js.map +1 -1
- package/dist/esm/stream.d.ts +5 -3
- package/dist/esm/stream.d.ts.map +1 -1
- package/dist/esm/stream.js +30 -19
- package/dist/esm/stream.js.map +1 -1
- package/dist/esm/streams.d.ts +10 -10
- package/dist/esm/streams.d.ts.map +1 -1
- package/dist/esm/streams.js +37 -65
- package/dist/esm/streams.js.map +1 -1
- package/dist/esm/utils.d.ts +3 -3
- package/dist/esm/utils.d.ts.map +1 -1
- package/dist/esm/utils.js +2 -2
- package/dist/esm/utils.js.map +1 -1
- package/package.json +4 -2
|
@@ -9,12 +9,15 @@ Object.defineProperty(exports, "__esModule", { value: true });
|
|
|
9
9
|
exports.S2STransport = void 0;
|
|
10
10
|
exports.buildProtoAppendInput = buildProtoAppendInput;
|
|
11
11
|
const http2 = require("node:http2");
|
|
12
|
-
const
|
|
12
|
+
const debug_1 = require("debug");
|
|
13
|
+
const error_js_1 = require("../../../../error.js");
|
|
13
14
|
const s2_js_1 = require("../../../../generated/proto/s2.js");
|
|
14
|
-
const index_js_2 = require("../../../../index.js");
|
|
15
15
|
const utils_js_1 = require("../../../../utils.js");
|
|
16
16
|
const Redacted = require("../../../redacted.js");
|
|
17
|
+
const result_js_1 = require("../../../result.js");
|
|
18
|
+
const retry_js_1 = require("../../../retry.js");
|
|
17
19
|
const framing_js_1 = require("./framing.js");
|
|
20
|
+
const debug = (0, debug_1.default)("s2:s2s");
|
|
18
21
|
function buildProtoAppendInput(records, args) {
|
|
19
22
|
const textEncoder = new TextEncoder();
|
|
20
23
|
return s2_js_1.AppendInput.create({
|
|
@@ -44,22 +47,21 @@ function buildProtoAppendInput(records, args) {
|
|
|
44
47
|
});
|
|
45
48
|
}
|
|
46
49
|
class S2STransport {
|
|
47
|
-
client;
|
|
48
50
|
transportConfig;
|
|
49
51
|
connection;
|
|
50
52
|
connectionPromise;
|
|
51
53
|
constructor(config) {
|
|
52
|
-
this.client = (0, index_js_1.createClient)((0, index_js_1.createConfig)({
|
|
53
|
-
baseUrl: config.baseUrl,
|
|
54
|
-
auth: () => Redacted.value(config.accessToken),
|
|
55
|
-
}));
|
|
56
54
|
this.transportConfig = config;
|
|
57
55
|
}
|
|
58
56
|
async makeAppendSession(stream, sessionOptions, requestOptions) {
|
|
59
|
-
return
|
|
57
|
+
return retry_js_1.RetryAppendSession.create((myOptions) => {
|
|
58
|
+
return S2SAppendSession.create(this.transportConfig.baseUrl, this.transportConfig.accessToken, stream, () => this.getConnection(), this.transportConfig.basinName, myOptions, requestOptions);
|
|
59
|
+
}, sessionOptions, this.transportConfig.retry);
|
|
60
60
|
}
|
|
61
61
|
async makeReadSession(stream, args, options) {
|
|
62
|
-
return
|
|
62
|
+
return retry_js_1.RetryReadSession.create((myArgs) => {
|
|
63
|
+
return S2SReadSession.create(this.transportConfig.baseUrl, this.transportConfig.accessToken, stream, myArgs, options, () => this.getConnection(), this.transportConfig.basinName);
|
|
64
|
+
}, args, this.transportConfig.retry);
|
|
63
65
|
}
|
|
64
66
|
/**
|
|
65
67
|
* Get or create HTTP/2 connection (one per transport)
|
|
@@ -122,25 +124,36 @@ class S2SReadSession extends ReadableStream {
|
|
|
122
124
|
url;
|
|
123
125
|
options;
|
|
124
126
|
getConnection;
|
|
127
|
+
basinName;
|
|
125
128
|
http2Stream;
|
|
126
129
|
_lastReadPosition;
|
|
130
|
+
_nextReadPosition;
|
|
131
|
+
_lastObservedTail;
|
|
127
132
|
parser = new framing_js_1.S2SFrameParser();
|
|
128
|
-
static async create(baseUrl, bearerToken, streamName, args, options, getConnection) {
|
|
133
|
+
static async create(baseUrl, bearerToken, streamName, args, options, getConnection, basinName) {
|
|
129
134
|
const url = new URL(baseUrl);
|
|
130
|
-
return new S2SReadSession(streamName, args, bearerToken, url, options, getConnection);
|
|
135
|
+
return new S2SReadSession(streamName, args, bearerToken, url, options, getConnection, basinName);
|
|
131
136
|
}
|
|
132
|
-
constructor(streamName, args, authToken, url, options, getConnection) {
|
|
137
|
+
constructor(streamName, args, authToken, url, options, getConnection, basinName) {
|
|
133
138
|
// Initialize parser and textDecoder before super() call
|
|
134
139
|
const parser = new framing_js_1.S2SFrameParser();
|
|
135
140
|
const textDecoder = new TextDecoder();
|
|
136
141
|
let http2Stream;
|
|
137
142
|
let lastReadPosition;
|
|
143
|
+
// Track timeout for detecting when server stops sending data
|
|
144
|
+
const TAIL_TIMEOUT_MS = 20000; // 20 seconds
|
|
145
|
+
let timeoutTimer;
|
|
138
146
|
super({
|
|
139
147
|
start: async (controller) => {
|
|
140
148
|
let controllerClosed = false;
|
|
149
|
+
let responseCode;
|
|
141
150
|
const safeClose = () => {
|
|
142
151
|
if (!controllerClosed) {
|
|
143
152
|
controllerClosed = true;
|
|
153
|
+
if (timeoutTimer) {
|
|
154
|
+
clearTimeout(timeoutTimer);
|
|
155
|
+
timeoutTimer = undefined;
|
|
156
|
+
}
|
|
144
157
|
try {
|
|
145
158
|
controller.close();
|
|
146
159
|
}
|
|
@@ -152,10 +165,37 @@ class S2SReadSession extends ReadableStream {
|
|
|
152
165
|
const safeError = (err) => {
|
|
153
166
|
if (!controllerClosed) {
|
|
154
167
|
controllerClosed = true;
|
|
155
|
-
|
|
168
|
+
if (timeoutTimer) {
|
|
169
|
+
clearTimeout(timeoutTimer);
|
|
170
|
+
timeoutTimer = undefined;
|
|
171
|
+
}
|
|
172
|
+
// Convert error to S2Error and enqueue as error result
|
|
173
|
+
const s2Err = err instanceof error_js_1.S2Error
|
|
174
|
+
? err
|
|
175
|
+
: new error_js_1.S2Error({ message: String(err), status: 500 });
|
|
176
|
+
controller.enqueue({ ok: false, error: s2Err });
|
|
177
|
+
controller.close();
|
|
156
178
|
}
|
|
157
179
|
};
|
|
180
|
+
// Helper to start/reset the timeout timer
|
|
181
|
+
// Resets on every tail received, fires only if no tail for 20s
|
|
182
|
+
const resetTimeoutTimer = () => {
|
|
183
|
+
if (timeoutTimer) {
|
|
184
|
+
clearTimeout(timeoutTimer);
|
|
185
|
+
}
|
|
186
|
+
timeoutTimer = setTimeout(() => {
|
|
187
|
+
const timeoutError = new error_js_1.S2Error({
|
|
188
|
+
message: `No tail received for ${TAIL_TIMEOUT_MS / 1000}s`,
|
|
189
|
+
status: 408, // Request Timeout
|
|
190
|
+
code: "TIMEOUT",
|
|
191
|
+
});
|
|
192
|
+
debug("tail timeout detected");
|
|
193
|
+
safeError(timeoutError);
|
|
194
|
+
}, TAIL_TIMEOUT_MS);
|
|
195
|
+
};
|
|
158
196
|
try {
|
|
197
|
+
// Start the timeout timer - will fire in 20s if no tail received
|
|
198
|
+
resetTimeoutTimer();
|
|
159
199
|
const connection = await getConnection();
|
|
160
200
|
// Build query string
|
|
161
201
|
const queryParams = new URLSearchParams();
|
|
@@ -185,6 +225,7 @@ class S2SReadSession extends ReadableStream {
|
|
|
185
225
|
authorization: `Bearer ${Redacted.value(authToken)}`,
|
|
186
226
|
accept: "application/protobuf",
|
|
187
227
|
"content-type": "s2s/proto",
|
|
228
|
+
...(basinName ? { "s2-basin": basinName } : {}),
|
|
188
229
|
});
|
|
189
230
|
http2Stream = stream;
|
|
190
231
|
options?.signal?.addEventListener("abort", () => {
|
|
@@ -192,64 +233,139 @@ class S2SReadSession extends ReadableStream {
|
|
|
192
233
|
stream.close();
|
|
193
234
|
}
|
|
194
235
|
});
|
|
236
|
+
stream.on("response", (headers) => {
|
|
237
|
+
// Cache the status.
|
|
238
|
+
// This informs whether we should attempt to parse s2s frames in the "data" handler.
|
|
239
|
+
responseCode = headers[":status"] ?? 500;
|
|
240
|
+
});
|
|
241
|
+
connection.on("goaway", (errorCode, lastStreamID, opaqueData) => {
|
|
242
|
+
debug("received GOAWAY from server");
|
|
243
|
+
});
|
|
244
|
+
stream.on("error", (err) => {
|
|
245
|
+
safeError(err);
|
|
246
|
+
});
|
|
195
247
|
stream.on("data", (chunk) => {
|
|
196
|
-
|
|
197
|
-
|
|
198
|
-
|
|
199
|
-
|
|
200
|
-
|
|
201
|
-
|
|
202
|
-
|
|
203
|
-
|
|
204
|
-
|
|
205
|
-
|
|
206
|
-
|
|
207
|
-
code: errorJson.code,
|
|
208
|
-
status: frame.statusCode,
|
|
209
|
-
}));
|
|
210
|
-
}
|
|
211
|
-
catch {
|
|
212
|
-
safeError(new index_js_2.S2Error({
|
|
213
|
-
message: errorText || "Unknown error",
|
|
214
|
-
status: frame.statusCode,
|
|
215
|
-
}));
|
|
216
|
-
}
|
|
248
|
+
try {
|
|
249
|
+
if ((responseCode ?? 500) >= 400) {
|
|
250
|
+
const errorText = textDecoder.decode(chunk);
|
|
251
|
+
try {
|
|
252
|
+
const errorJson = JSON.parse(errorText);
|
|
253
|
+
safeError(new error_js_1.S2Error({
|
|
254
|
+
message: errorJson.message ?? "Unknown error",
|
|
255
|
+
code: errorJson.code,
|
|
256
|
+
status: responseCode,
|
|
257
|
+
origin: "server",
|
|
258
|
+
}));
|
|
217
259
|
}
|
|
218
|
-
|
|
219
|
-
|
|
260
|
+
catch {
|
|
261
|
+
safeError(new error_js_1.S2Error({
|
|
262
|
+
message: errorText || "Unknown error",
|
|
263
|
+
status: responseCode,
|
|
264
|
+
origin: "server",
|
|
265
|
+
}));
|
|
220
266
|
}
|
|
221
|
-
|
|
267
|
+
return;
|
|
222
268
|
}
|
|
223
|
-
|
|
224
|
-
|
|
225
|
-
|
|
226
|
-
|
|
227
|
-
|
|
228
|
-
if (
|
|
229
|
-
|
|
230
|
-
|
|
231
|
-
|
|
269
|
+
// Buffer already extends Uint8Array in Node.js, no need to convert
|
|
270
|
+
parser.push(chunk);
|
|
271
|
+
let frame = parser.parseFrame();
|
|
272
|
+
while (frame) {
|
|
273
|
+
if (frame.terminal) {
|
|
274
|
+
if (frame.statusCode && frame.statusCode >= 400) {
|
|
275
|
+
const errorText = textDecoder.decode(frame.body);
|
|
276
|
+
try {
|
|
277
|
+
const errorJson = JSON.parse(errorText);
|
|
278
|
+
const status = frame.statusCode ?? 500;
|
|
279
|
+
// Map known read errors
|
|
280
|
+
if (status === 416) {
|
|
281
|
+
safeError(new error_js_1.RangeNotSatisfiableError({ status }));
|
|
282
|
+
}
|
|
283
|
+
else {
|
|
284
|
+
safeError((0, error_js_1.makeServerError)({ status, statusText: undefined }, errorJson));
|
|
285
|
+
}
|
|
286
|
+
}
|
|
287
|
+
catch {
|
|
288
|
+
safeError((0, error_js_1.makeServerError)({
|
|
289
|
+
status: frame.statusCode ?? 500,
|
|
290
|
+
statusText: undefined,
|
|
291
|
+
}, errorText));
|
|
292
|
+
}
|
|
232
293
|
}
|
|
233
|
-
|
|
234
|
-
|
|
235
|
-
const converted = this.convertRecord(record, as ?? "string", textDecoder);
|
|
236
|
-
controller.enqueue(converted);
|
|
294
|
+
else {
|
|
295
|
+
safeClose();
|
|
237
296
|
}
|
|
297
|
+
stream.close();
|
|
238
298
|
}
|
|
239
|
-
|
|
240
|
-
|
|
241
|
-
|
|
242
|
-
|
|
299
|
+
else {
|
|
300
|
+
// Parse ReadBatch
|
|
301
|
+
try {
|
|
302
|
+
const protoBatch = s2_js_1.ReadBatch.fromBinary(frame.body);
|
|
303
|
+
resetTimeoutTimer();
|
|
304
|
+
// Update tail from batch
|
|
305
|
+
if (protoBatch.tail) {
|
|
306
|
+
const tail = convertStreamPosition(protoBatch.tail);
|
|
307
|
+
lastReadPosition = tail;
|
|
308
|
+
this._lastReadPosition = tail;
|
|
309
|
+
this._lastObservedTail = tail;
|
|
310
|
+
debug("received tail");
|
|
311
|
+
}
|
|
312
|
+
// Enqueue each record and track next position
|
|
313
|
+
for (const record of protoBatch.records) {
|
|
314
|
+
const converted = this.convertRecord(record, as ?? "string", textDecoder);
|
|
315
|
+
controller.enqueue({ ok: true, value: converted });
|
|
316
|
+
// Update next read position to after this record
|
|
317
|
+
if (record.seqNum !== undefined) {
|
|
318
|
+
this._nextReadPosition = {
|
|
319
|
+
seq_num: Number(record.seqNum) + 1,
|
|
320
|
+
timestamp: Number(record.timestamp ?? 0n),
|
|
321
|
+
};
|
|
322
|
+
}
|
|
323
|
+
}
|
|
324
|
+
}
|
|
325
|
+
catch (err) {
|
|
326
|
+
safeError(new error_js_1.S2Error({
|
|
327
|
+
message: `Failed to parse ReadBatch: ${err}`,
|
|
328
|
+
status: 500,
|
|
329
|
+
origin: "sdk",
|
|
330
|
+
}));
|
|
331
|
+
}
|
|
243
332
|
}
|
|
333
|
+
frame = parser.parseFrame();
|
|
244
334
|
}
|
|
245
|
-
|
|
335
|
+
}
|
|
336
|
+
catch (error) {
|
|
337
|
+
safeError(error instanceof error_js_1.S2Error
|
|
338
|
+
? error
|
|
339
|
+
: new error_js_1.S2Error({
|
|
340
|
+
message: `Failed to process read data: ${error}`,
|
|
341
|
+
status: 500,
|
|
342
|
+
origin: "sdk",
|
|
343
|
+
}));
|
|
246
344
|
}
|
|
247
345
|
});
|
|
248
|
-
stream.on("
|
|
249
|
-
|
|
346
|
+
stream.on("end", () => {
|
|
347
|
+
if (stream.rstCode != 0) {
|
|
348
|
+
debug("stream reset code=%d", stream.rstCode);
|
|
349
|
+
safeError(new error_js_1.S2Error({
|
|
350
|
+
message: `Stream ended with error: ${stream.rstCode}`,
|
|
351
|
+
status: 500,
|
|
352
|
+
code: "stream reset",
|
|
353
|
+
origin: "sdk",
|
|
354
|
+
}));
|
|
355
|
+
}
|
|
250
356
|
});
|
|
251
357
|
stream.on("close", () => {
|
|
252
|
-
|
|
358
|
+
if (parser.hasData()) {
|
|
359
|
+
safeError(new error_js_1.S2Error({
|
|
360
|
+
message: "Stream closed with unparsed data remaining",
|
|
361
|
+
status: 500,
|
|
362
|
+
code: "STREAM_CLOSED_PREMATURELY",
|
|
363
|
+
origin: "sdk",
|
|
364
|
+
}));
|
|
365
|
+
}
|
|
366
|
+
else {
|
|
367
|
+
safeClose();
|
|
368
|
+
}
|
|
253
369
|
});
|
|
254
370
|
}
|
|
255
371
|
catch (err) {
|
|
@@ -268,6 +384,7 @@ class S2SReadSession extends ReadableStream {
|
|
|
268
384
|
this.url = url;
|
|
269
385
|
this.options = options;
|
|
270
386
|
this.getConnection = getConnection;
|
|
387
|
+
this.basinName = basinName;
|
|
271
388
|
// Assign parser to instance property after super() completes
|
|
272
389
|
this.parser = parser;
|
|
273
390
|
this.http2Stream = http2Stream;
|
|
@@ -330,163 +447,46 @@ class S2SReadSession extends ReadableStream {
|
|
|
330
447
|
},
|
|
331
448
|
};
|
|
332
449
|
}
|
|
333
|
-
|
|
334
|
-
return this.
|
|
450
|
+
nextReadPosition() {
|
|
451
|
+
return this._nextReadPosition;
|
|
452
|
+
}
|
|
453
|
+
lastObservedTail() {
|
|
454
|
+
return this._lastObservedTail;
|
|
335
455
|
}
|
|
336
456
|
}
|
|
337
457
|
/**
|
|
338
458
|
* AcksStream for S2S append session
|
|
339
459
|
*/
|
|
340
|
-
|
|
341
|
-
constructor(setController) {
|
|
342
|
-
super({
|
|
343
|
-
start: (controller) => {
|
|
344
|
-
setController(controller);
|
|
345
|
-
},
|
|
346
|
-
});
|
|
347
|
-
}
|
|
348
|
-
async [Symbol.asyncDispose]() {
|
|
349
|
-
await this.cancel("disposed");
|
|
350
|
-
}
|
|
351
|
-
// Polyfill for older browsers
|
|
352
|
-
[Symbol.asyncIterator]() {
|
|
353
|
-
const fn = ReadableStream.prototype[Symbol.asyncIterator];
|
|
354
|
-
if (typeof fn === "function")
|
|
355
|
-
return fn.call(this);
|
|
356
|
-
const reader = this.getReader();
|
|
357
|
-
return {
|
|
358
|
-
next: async () => {
|
|
359
|
-
const r = await reader.read();
|
|
360
|
-
if (r.done) {
|
|
361
|
-
reader.releaseLock();
|
|
362
|
-
return { done: true, value: undefined };
|
|
363
|
-
}
|
|
364
|
-
return { done: false, value: r.value };
|
|
365
|
-
},
|
|
366
|
-
throw: async (e) => {
|
|
367
|
-
await reader.cancel(e);
|
|
368
|
-
reader.releaseLock();
|
|
369
|
-
return { done: true, value: undefined };
|
|
370
|
-
},
|
|
371
|
-
return: async () => {
|
|
372
|
-
await reader.cancel("done");
|
|
373
|
-
reader.releaseLock();
|
|
374
|
-
return { done: true, value: undefined };
|
|
375
|
-
},
|
|
376
|
-
[Symbol.asyncIterator]() {
|
|
377
|
-
return this;
|
|
378
|
-
},
|
|
379
|
-
};
|
|
380
|
-
}
|
|
381
|
-
}
|
|
460
|
+
// Removed S2SAcksStream - transport sessions no longer expose streams
|
|
382
461
|
/**
|
|
383
|
-
*
|
|
384
|
-
*
|
|
462
|
+
* Fetch-based transport session for appending records via HTTP/2.
|
|
463
|
+
* Pipelined: multiple requests can be in-flight simultaneously.
|
|
464
|
+
* No backpressure, no retry logic, no streams - just submit/close with value-encoded errors.
|
|
385
465
|
*/
|
|
386
466
|
class S2SAppendSession {
|
|
387
467
|
baseUrl;
|
|
388
468
|
authToken;
|
|
389
469
|
streamName;
|
|
390
470
|
getConnection;
|
|
471
|
+
basinName;
|
|
391
472
|
options;
|
|
392
473
|
http2Stream;
|
|
393
|
-
_lastAckedPosition;
|
|
394
474
|
parser = new framing_js_1.S2SFrameParser();
|
|
395
|
-
acksController;
|
|
396
|
-
_readable;
|
|
397
|
-
_writable;
|
|
398
475
|
closed = false;
|
|
399
|
-
queuedBytes = 0;
|
|
400
|
-
maxQueuedBytes;
|
|
401
|
-
waitingForCapacity = [];
|
|
402
476
|
pendingAcks = [];
|
|
403
477
|
initPromise;
|
|
404
|
-
|
|
405
|
-
|
|
406
|
-
static async create(baseUrl, bearerToken, streamName, getConnection, sessionOptions, requestOptions) {
|
|
407
|
-
return new S2SAppendSession(baseUrl, bearerToken, streamName, getConnection, sessionOptions, requestOptions);
|
|
478
|
+
static async create(baseUrl, bearerToken, streamName, getConnection, basinName, sessionOptions, requestOptions) {
|
|
479
|
+
return new S2SAppendSession(baseUrl, bearerToken, streamName, getConnection, basinName, sessionOptions, requestOptions);
|
|
408
480
|
}
|
|
409
|
-
constructor(baseUrl, authToken, streamName, getConnection, sessionOptions, options) {
|
|
481
|
+
constructor(baseUrl, authToken, streamName, getConnection, basinName, sessionOptions, options) {
|
|
410
482
|
this.baseUrl = baseUrl;
|
|
411
483
|
this.authToken = authToken;
|
|
412
484
|
this.streamName = streamName;
|
|
413
485
|
this.getConnection = getConnection;
|
|
486
|
+
this.basinName = basinName;
|
|
414
487
|
this.options = options;
|
|
415
|
-
|
|
416
|
-
//
|
|
417
|
-
this._readable = new S2SAcksStream((controller) => {
|
|
418
|
-
this.acksController = controller;
|
|
419
|
-
});
|
|
420
|
-
this.readable = this._readable;
|
|
421
|
-
// Create the writable stream
|
|
422
|
-
this._writable = new WritableStream({
|
|
423
|
-
start: async (controller) => {
|
|
424
|
-
this.initPromise = this.initializeStream();
|
|
425
|
-
await this.initPromise;
|
|
426
|
-
},
|
|
427
|
-
write: async (chunk) => {
|
|
428
|
-
if (this.closed) {
|
|
429
|
-
throw new index_js_2.S2Error({ message: "AppendSession is closed" });
|
|
430
|
-
}
|
|
431
|
-
const recordsArray = Array.isArray(chunk.records)
|
|
432
|
-
? chunk.records
|
|
433
|
-
: [chunk.records];
|
|
434
|
-
// Validate batch size limits
|
|
435
|
-
if (recordsArray.length > 1000) {
|
|
436
|
-
throw new index_js_2.S2Error({
|
|
437
|
-
message: `Batch of ${recordsArray.length} exceeds maximum batch size of 1000 records`,
|
|
438
|
-
});
|
|
439
|
-
}
|
|
440
|
-
// Calculate metered size
|
|
441
|
-
let batchMeteredSize = 0;
|
|
442
|
-
for (const record of recordsArray) {
|
|
443
|
-
batchMeteredSize += (0, utils_js_1.meteredSizeBytes)(record);
|
|
444
|
-
}
|
|
445
|
-
if (batchMeteredSize > 1024 * 1024) {
|
|
446
|
-
throw new index_js_2.S2Error({
|
|
447
|
-
message: `Batch size ${batchMeteredSize} bytes exceeds maximum of 1 MiB (1048576 bytes)`,
|
|
448
|
-
});
|
|
449
|
-
}
|
|
450
|
-
// Wait for capacity if needed (backpressure)
|
|
451
|
-
while (this.queuedBytes + batchMeteredSize > this.maxQueuedBytes &&
|
|
452
|
-
!this.closed) {
|
|
453
|
-
await new Promise((resolve) => {
|
|
454
|
-
this.waitingForCapacity.push(resolve);
|
|
455
|
-
});
|
|
456
|
-
}
|
|
457
|
-
if (this.closed) {
|
|
458
|
-
throw new index_js_2.S2Error({ message: "AppendSession is closed" });
|
|
459
|
-
}
|
|
460
|
-
// Send the batch immediately (pipelined)
|
|
461
|
-
// Returns when frame is sent, not when ack is received
|
|
462
|
-
await this.sendBatchNonBlocking(recordsArray, chunk, batchMeteredSize);
|
|
463
|
-
},
|
|
464
|
-
close: async () => {
|
|
465
|
-
this.closed = true;
|
|
466
|
-
await this.closeStream();
|
|
467
|
-
},
|
|
468
|
-
abort: async (reason) => {
|
|
469
|
-
this.closed = true;
|
|
470
|
-
this.queuedBytes = 0;
|
|
471
|
-
// Reject all pending acks
|
|
472
|
-
const error = new index_js_2.S2Error({
|
|
473
|
-
message: `AppendSession was aborted: ${reason}`,
|
|
474
|
-
});
|
|
475
|
-
for (const pending of this.pendingAcks) {
|
|
476
|
-
pending.reject(error);
|
|
477
|
-
}
|
|
478
|
-
this.pendingAcks = [];
|
|
479
|
-
// Wake up all waiting for capacity
|
|
480
|
-
for (const resolver of this.waitingForCapacity) {
|
|
481
|
-
resolver();
|
|
482
|
-
}
|
|
483
|
-
this.waitingForCapacity = [];
|
|
484
|
-
if (this.http2Stream && !this.http2Stream.closed) {
|
|
485
|
-
this.http2Stream.close();
|
|
486
|
-
}
|
|
487
|
-
},
|
|
488
|
-
});
|
|
489
|
-
this.writable = this._writable;
|
|
488
|
+
// No stream setup
|
|
489
|
+
// Initialization happens lazily on first submit
|
|
490
490
|
}
|
|
491
491
|
async initializeStream() {
|
|
492
492
|
const url = new URL(this.baseUrl);
|
|
@@ -500,6 +500,7 @@ class S2SAppendSession {
|
|
|
500
500
|
authorization: `Bearer ${Redacted.value(this.authToken)}`,
|
|
501
501
|
"content-type": "s2s/proto",
|
|
502
502
|
accept: "application/protobuf",
|
|
503
|
+
...(this.basinName ? { "s2-basin": this.basinName } : {}),
|
|
503
504
|
});
|
|
504
505
|
this.http2Stream = stream;
|
|
505
506
|
this.options?.signal?.addEventListener("abort", () => {
|
|
@@ -508,145 +509,87 @@ class S2SAppendSession {
|
|
|
508
509
|
}
|
|
509
510
|
});
|
|
510
511
|
const textDecoder = new TextDecoder();
|
|
511
|
-
|
|
512
|
-
|
|
513
|
-
|
|
514
|
-
|
|
515
|
-
|
|
516
|
-
this.acksController.close();
|
|
517
|
-
}
|
|
518
|
-
catch {
|
|
519
|
-
// Controller may already be closed, ignore
|
|
520
|
-
}
|
|
521
|
-
}
|
|
522
|
-
};
|
|
523
|
-
const safeError = (err) => {
|
|
524
|
-
if (!controllerClosed && this.acksController) {
|
|
525
|
-
controllerClosed = true;
|
|
526
|
-
this.acksController.error(err);
|
|
527
|
-
}
|
|
528
|
-
// Reject all pending acks
|
|
512
|
+
const safeError = (error) => {
|
|
513
|
+
const s2Err = error instanceof error_js_1.S2Error
|
|
514
|
+
? error
|
|
515
|
+
: new error_js_1.S2Error({ message: String(error), status: 502 });
|
|
516
|
+
// Resolve all pending acks with error result
|
|
529
517
|
for (const pending of this.pendingAcks) {
|
|
530
|
-
pending.
|
|
518
|
+
pending.resolve((0, result_js_1.err)(s2Err));
|
|
531
519
|
}
|
|
532
520
|
this.pendingAcks = [];
|
|
533
521
|
};
|
|
534
522
|
// Handle incoming data (acks)
|
|
535
523
|
stream.on("data", (chunk) => {
|
|
536
|
-
|
|
537
|
-
|
|
538
|
-
|
|
539
|
-
|
|
540
|
-
if (frame.
|
|
541
|
-
|
|
542
|
-
|
|
543
|
-
const
|
|
544
|
-
|
|
545
|
-
|
|
546
|
-
|
|
547
|
-
|
|
548
|
-
|
|
549
|
-
|
|
550
|
-
|
|
551
|
-
|
|
552
|
-
|
|
553
|
-
|
|
554
|
-
}
|
|
524
|
+
try {
|
|
525
|
+
this.parser.push(chunk);
|
|
526
|
+
let frame = this.parser.parseFrame();
|
|
527
|
+
while (frame) {
|
|
528
|
+
if (frame.terminal) {
|
|
529
|
+
if (frame.statusCode && frame.statusCode >= 400) {
|
|
530
|
+
const errorText = textDecoder.decode(frame.body);
|
|
531
|
+
const status = frame.statusCode ?? 500;
|
|
532
|
+
try {
|
|
533
|
+
const errorJson = JSON.parse(errorText);
|
|
534
|
+
const err = status === 412
|
|
535
|
+
? (0, error_js_1.makeAppendPreconditionError)(status, errorJson)
|
|
536
|
+
: (0, error_js_1.makeServerError)({ status, statusText: undefined }, errorJson);
|
|
537
|
+
queueMicrotask(() => safeError(err));
|
|
538
|
+
}
|
|
539
|
+
catch {
|
|
540
|
+
const err = (0, error_js_1.makeServerError)({ status, statusText: undefined }, errorText);
|
|
541
|
+
queueMicrotask(() => safeError(err));
|
|
542
|
+
}
|
|
555
543
|
}
|
|
544
|
+
stream.close();
|
|
556
545
|
}
|
|
557
546
|
else {
|
|
558
|
-
|
|
559
|
-
|
|
560
|
-
|
|
561
|
-
|
|
562
|
-
|
|
563
|
-
|
|
564
|
-
|
|
565
|
-
|
|
566
|
-
const ack = convertAppendAck(protoAck);
|
|
567
|
-
this._lastAckedPosition = ack;
|
|
568
|
-
// Enqueue to readable stream
|
|
569
|
-
if (this.acksController) {
|
|
570
|
-
this.acksController.enqueue(ack);
|
|
571
|
-
}
|
|
572
|
-
// Resolve the pending ack promise
|
|
573
|
-
const pending = this.pendingAcks.shift();
|
|
574
|
-
if (pending) {
|
|
575
|
-
pending.resolve(ack);
|
|
576
|
-
// Release capacity
|
|
577
|
-
this.queuedBytes -= pending.batchSize;
|
|
578
|
-
// Wake up one waiting writer
|
|
579
|
-
if (this.waitingForCapacity.length > 0) {
|
|
580
|
-
const waiter = this.waitingForCapacity.shift();
|
|
581
|
-
waiter();
|
|
547
|
+
// Parse AppendAck
|
|
548
|
+
try {
|
|
549
|
+
const protoAck = s2_js_1.AppendAck.fromBinary(frame.body);
|
|
550
|
+
const ack = convertAppendAck(protoAck);
|
|
551
|
+
// Resolve the pending ack promise (FIFO)
|
|
552
|
+
const pending = this.pendingAcks.shift();
|
|
553
|
+
if (pending) {
|
|
554
|
+
pending.resolve((0, result_js_1.ok)(ack));
|
|
582
555
|
}
|
|
583
556
|
}
|
|
557
|
+
catch (parseErr) {
|
|
558
|
+
queueMicrotask(() => safeError(new error_js_1.S2Error({
|
|
559
|
+
message: `Failed to parse AppendAck: ${parseErr}`,
|
|
560
|
+
status: 500,
|
|
561
|
+
})));
|
|
562
|
+
}
|
|
584
563
|
}
|
|
585
|
-
|
|
586
|
-
safeError(new index_js_2.S2Error({
|
|
587
|
-
message: `Failed to parse AppendAck: ${err}`,
|
|
588
|
-
}));
|
|
589
|
-
}
|
|
564
|
+
frame = this.parser.parseFrame();
|
|
590
565
|
}
|
|
591
|
-
|
|
566
|
+
}
|
|
567
|
+
catch (error) {
|
|
568
|
+
queueMicrotask(() => safeError(error));
|
|
592
569
|
}
|
|
593
570
|
});
|
|
594
|
-
stream.on("error", (
|
|
595
|
-
safeError(
|
|
571
|
+
stream.on("error", (streamErr) => {
|
|
572
|
+
queueMicrotask(() => safeError(streamErr));
|
|
596
573
|
});
|
|
597
574
|
stream.on("close", () => {
|
|
598
|
-
|
|
599
|
-
|
|
600
|
-
|
|
601
|
-
|
|
602
|
-
|
|
603
|
-
|
|
604
|
-
|
|
605
|
-
|
|
606
|
-
|
|
607
|
-
}
|
|
608
|
-
// Convert to protobuf AppendInput
|
|
609
|
-
const protoInput = buildProtoAppendInput(records, args);
|
|
610
|
-
const bodyBytes = s2_js_1.AppendInput.toBinary(protoInput);
|
|
611
|
-
// Frame the message
|
|
612
|
-
const frame = (0, framing_js_1.frameMessage)({
|
|
613
|
-
terminal: false,
|
|
614
|
-
body: bodyBytes,
|
|
615
|
-
});
|
|
616
|
-
// This promise resolves when the frame is written (not when ack is received)
|
|
617
|
-
return new Promise((resolve, reject) => {
|
|
618
|
-
// Track pending ack - will be resolved when ack arrives
|
|
619
|
-
const ackPromise = {
|
|
620
|
-
resolve: () => { },
|
|
621
|
-
reject,
|
|
622
|
-
batchSize: batchMeteredSize,
|
|
623
|
-
};
|
|
624
|
-
this.pendingAcks.push(ackPromise);
|
|
625
|
-
this.queuedBytes += batchMeteredSize;
|
|
626
|
-
// Send the frame (pipelined)
|
|
627
|
-
this.http2Stream.write(frame, (err) => {
|
|
628
|
-
if (err) {
|
|
629
|
-
// Remove from pending acks on write error
|
|
630
|
-
const idx = this.pendingAcks.indexOf(ackPromise);
|
|
631
|
-
if (idx !== -1) {
|
|
632
|
-
this.pendingAcks.splice(idx, 1);
|
|
633
|
-
this.queuedBytes -= batchMeteredSize;
|
|
634
|
-
}
|
|
635
|
-
reject(err);
|
|
636
|
-
}
|
|
637
|
-
else {
|
|
638
|
-
// Frame written successfully - resolve immediately (pipelined)
|
|
639
|
-
resolve();
|
|
640
|
-
}
|
|
641
|
-
});
|
|
575
|
+
// Stream closed - resolve any remaining pending acks with error
|
|
576
|
+
// This can happen if the server closes the stream without sending all acks
|
|
577
|
+
if (this.pendingAcks.length > 0) {
|
|
578
|
+
queueMicrotask(() => safeError(new error_js_1.S2Error({
|
|
579
|
+
message: "Stream closed with pending acks",
|
|
580
|
+
status: 502,
|
|
581
|
+
code: "BAD_GATEWAY",
|
|
582
|
+
})));
|
|
583
|
+
}
|
|
642
584
|
});
|
|
643
585
|
}
|
|
644
586
|
/**
|
|
645
|
-
* Send a batch and wait for ack
|
|
587
|
+
* Send a batch and wait for ack. Returns AppendResult (never throws).
|
|
588
|
+
* Pipelined: multiple sends can be in-flight; acks resolve FIFO.
|
|
646
589
|
*/
|
|
647
590
|
sendBatch(records, args, batchMeteredSize) {
|
|
648
591
|
if (!this.http2Stream || this.http2Stream.closed) {
|
|
649
|
-
return Promise.
|
|
592
|
+
return Promise.resolve((0, result_js_1.err)(new error_js_1.S2Error({ message: "HTTP/2 stream is not open", status: 502 })));
|
|
650
593
|
}
|
|
651
594
|
// Convert to protobuf AppendInput
|
|
652
595
|
const protoInput = buildProtoAppendInput(records, args);
|
|
@@ -656,82 +599,99 @@ class S2SAppendSession {
|
|
|
656
599
|
terminal: false,
|
|
657
600
|
body: bodyBytes,
|
|
658
601
|
});
|
|
659
|
-
// Track pending ack - this promise resolves when the ack is received
|
|
660
|
-
return new Promise((resolve
|
|
602
|
+
// Track pending ack - this promise resolves when the ack is received (FIFO)
|
|
603
|
+
return new Promise((resolve) => {
|
|
661
604
|
this.pendingAcks.push({
|
|
662
605
|
resolve,
|
|
663
|
-
reject,
|
|
664
606
|
batchSize: batchMeteredSize,
|
|
665
607
|
});
|
|
666
|
-
|
|
667
|
-
|
|
668
|
-
|
|
669
|
-
if (err) {
|
|
608
|
+
// Send the frame (pipelined - non-blocking)
|
|
609
|
+
this.http2Stream.write(frame, (writeErr) => {
|
|
610
|
+
if (writeErr) {
|
|
670
611
|
// Remove from pending acks on write error
|
|
671
|
-
const idx = this.pendingAcks.findIndex((p) => p.
|
|
612
|
+
const idx = this.pendingAcks.findIndex((p) => p.resolve === resolve);
|
|
672
613
|
if (idx !== -1) {
|
|
673
614
|
this.pendingAcks.splice(idx, 1);
|
|
674
|
-
this.queuedBytes -= batchMeteredSize;
|
|
675
615
|
}
|
|
676
|
-
|
|
616
|
+
// Resolve with error result
|
|
617
|
+
const s2Err = writeErr instanceof error_js_1.S2Error
|
|
618
|
+
? writeErr
|
|
619
|
+
: new error_js_1.S2Error({ message: String(writeErr), status: 502 });
|
|
620
|
+
resolve((0, result_js_1.err)(s2Err));
|
|
677
621
|
}
|
|
678
|
-
// Write completed
|
|
622
|
+
// Write completed successfully - promise resolves later when ack is received
|
|
679
623
|
});
|
|
680
624
|
});
|
|
681
625
|
}
|
|
682
|
-
async closeStream() {
|
|
683
|
-
// Wait for all pending acks
|
|
684
|
-
while (this.pendingAcks.length > 0) {
|
|
685
|
-
await new Promise((resolve) => setTimeout(resolve, 10));
|
|
686
|
-
}
|
|
687
|
-
// Close the HTTP/2 stream (client doesn't send terminal frame for clean close)
|
|
688
|
-
if (this.http2Stream && !this.http2Stream.closed) {
|
|
689
|
-
this.http2Stream.end();
|
|
690
|
-
}
|
|
691
|
-
}
|
|
692
|
-
async [Symbol.asyncDispose]() {
|
|
693
|
-
await this.close();
|
|
694
|
-
}
|
|
695
|
-
/**
|
|
696
|
-
* Get a stream of acknowledgements for appends.
|
|
697
|
-
*/
|
|
698
|
-
acks() {
|
|
699
|
-
return this._readable;
|
|
700
|
-
}
|
|
701
626
|
/**
|
|
702
627
|
* Close the append session.
|
|
703
628
|
* Waits for all pending appends to complete before resolving.
|
|
629
|
+
* Never throws - returns CloseResult.
|
|
704
630
|
*/
|
|
705
631
|
async close() {
|
|
706
|
-
|
|
632
|
+
try {
|
|
633
|
+
this.closed = true;
|
|
634
|
+
// Wait for all pending acks to complete
|
|
635
|
+
while (this.pendingAcks.length > 0) {
|
|
636
|
+
await new Promise((resolve) => setTimeout(resolve, 10));
|
|
637
|
+
}
|
|
638
|
+
// Close the HTTP/2 stream (client doesn't send terminal frame for clean close)
|
|
639
|
+
if (this.http2Stream && !this.http2Stream.closed) {
|
|
640
|
+
this.http2Stream.end();
|
|
641
|
+
}
|
|
642
|
+
return (0, result_js_1.okClose)();
|
|
643
|
+
}
|
|
644
|
+
catch (error) {
|
|
645
|
+
const s2Err = error instanceof error_js_1.S2Error
|
|
646
|
+
? error
|
|
647
|
+
: new error_js_1.S2Error({ message: String(error), status: 500 });
|
|
648
|
+
return (0, result_js_1.errClose)(s2Err);
|
|
649
|
+
}
|
|
707
650
|
}
|
|
708
651
|
/**
|
|
709
652
|
* Submit an append request to the session.
|
|
710
|
-
* Returns
|
|
653
|
+
* Returns AppendResult (never throws).
|
|
654
|
+
* Pipelined: multiple submits can be in-flight; acks resolve FIFO.
|
|
711
655
|
*/
|
|
712
656
|
async submit(records, args) {
|
|
657
|
+
// Validate closed state
|
|
713
658
|
if (this.closed) {
|
|
714
|
-
return
|
|
659
|
+
return (0, result_js_1.err)(new error_js_1.S2Error({ message: "AppendSession is closed", status: 400 }));
|
|
660
|
+
}
|
|
661
|
+
// Lazy initialize HTTP/2 stream on first submit
|
|
662
|
+
if (!this.initPromise) {
|
|
663
|
+
this.initPromise = this.initializeStream();
|
|
715
664
|
}
|
|
716
|
-
|
|
717
|
-
if (this.initPromise) {
|
|
665
|
+
try {
|
|
718
666
|
await this.initPromise;
|
|
719
667
|
}
|
|
668
|
+
catch (initErr) {
|
|
669
|
+
const s2Err = initErr instanceof error_js_1.S2Error
|
|
670
|
+
? initErr
|
|
671
|
+
: new error_js_1.S2Error({ message: String(initErr), status: 502 });
|
|
672
|
+
return (0, result_js_1.err)(s2Err);
|
|
673
|
+
}
|
|
720
674
|
const recordsArray = Array.isArray(records) ? records : [records];
|
|
721
|
-
// Validate batch size limits
|
|
675
|
+
// Validate batch size limits (non-retryable 400-level error)
|
|
722
676
|
if (recordsArray.length > 1000) {
|
|
723
|
-
return
|
|
677
|
+
return (0, result_js_1.err)(new error_js_1.S2Error({
|
|
724
678
|
message: `Batch of ${recordsArray.length} exceeds maximum batch size of 1000 records`,
|
|
679
|
+
status: 400,
|
|
680
|
+
code: "INVALID_ARGUMENT",
|
|
725
681
|
}));
|
|
726
682
|
}
|
|
727
|
-
// Calculate metered size
|
|
728
|
-
let batchMeteredSize = 0;
|
|
729
|
-
|
|
730
|
-
|
|
683
|
+
// Calculate metered size (use precalculated if provided)
|
|
684
|
+
let batchMeteredSize = args?.precalculatedSize ?? 0;
|
|
685
|
+
if (batchMeteredSize === 0) {
|
|
686
|
+
for (const record of recordsArray) {
|
|
687
|
+
batchMeteredSize += (0, utils_js_1.meteredBytes)(record);
|
|
688
|
+
}
|
|
731
689
|
}
|
|
732
690
|
if (batchMeteredSize > 1024 * 1024) {
|
|
733
|
-
return
|
|
691
|
+
return (0, result_js_1.err)(new error_js_1.S2Error({
|
|
734
692
|
message: `Batch size ${batchMeteredSize} bytes exceeds maximum of 1 MiB (1048576 bytes)`,
|
|
693
|
+
status: 400,
|
|
694
|
+
code: "INVALID_ARGUMENT",
|
|
735
695
|
}));
|
|
736
696
|
}
|
|
737
697
|
return this.sendBatch(recordsArray, {
|
|
@@ -740,9 +700,6 @@ class S2SAppendSession {
|
|
|
740
700
|
match_seq_num: args?.match_seq_num,
|
|
741
701
|
}, batchMeteredSize);
|
|
742
702
|
}
|
|
743
|
-
lastAckedPosition() {
|
|
744
|
-
return this._lastAckedPosition;
|
|
745
|
-
}
|
|
746
703
|
}
|
|
747
704
|
/**
|
|
748
705
|
* Convert protobuf StreamPosition to OpenAPI StreamPosition
|