@s2-dev/streamstore 0.17.6 → 0.18.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +69 -1
- package/dist/cjs/accessTokens.d.ts +3 -2
- package/dist/cjs/accessTokens.d.ts.map +1 -1
- package/dist/cjs/accessTokens.js +22 -37
- package/dist/cjs/accessTokens.js.map +1 -1
- package/dist/cjs/basin.d.ts +4 -3
- package/dist/cjs/basin.d.ts.map +1 -1
- package/dist/cjs/basin.js +7 -5
- package/dist/cjs/basin.js.map +1 -1
- package/dist/cjs/basins.d.ts +10 -10
- package/dist/cjs/basins.d.ts.map +1 -1
- package/dist/cjs/basins.js +36 -64
- package/dist/cjs/basins.js.map +1 -1
- package/dist/cjs/batch-transform.d.ts +1 -1
- package/dist/cjs/batch-transform.d.ts.map +1 -1
- package/dist/cjs/batch-transform.js +36 -5
- package/dist/cjs/batch-transform.js.map +1 -1
- package/dist/cjs/common.d.ts +42 -0
- package/dist/cjs/common.d.ts.map +1 -1
- package/dist/cjs/error.d.ts +40 -2
- package/dist/cjs/error.d.ts.map +1 -1
- package/dist/cjs/error.js +268 -2
- package/dist/cjs/error.js.map +1 -1
- package/dist/cjs/generated/client/types.gen.d.ts +7 -0
- package/dist/cjs/generated/client/types.gen.d.ts.map +1 -1
- package/dist/cjs/generated/client/utils.gen.d.ts +1 -0
- package/dist/cjs/generated/client/utils.gen.d.ts.map +1 -1
- package/dist/cjs/generated/client/utils.gen.js.map +1 -1
- package/dist/cjs/generated/core/types.gen.d.ts +2 -0
- package/dist/cjs/generated/core/types.gen.d.ts.map +1 -1
- package/dist/cjs/index.d.ts +46 -3
- package/dist/cjs/index.d.ts.map +1 -1
- package/dist/cjs/index.js +28 -2
- package/dist/cjs/index.js.map +1 -1
- package/dist/cjs/lib/result.d.ts +57 -0
- package/dist/cjs/lib/result.d.ts.map +1 -0
- package/dist/cjs/lib/result.js +43 -0
- package/dist/cjs/lib/result.js.map +1 -0
- package/dist/cjs/lib/retry.d.ts +151 -0
- package/dist/cjs/lib/retry.d.ts.map +1 -0
- package/dist/cjs/lib/retry.js +839 -0
- package/dist/cjs/lib/retry.js.map +1 -0
- package/dist/cjs/lib/stream/factory.d.ts +0 -1
- package/dist/cjs/lib/stream/factory.d.ts.map +1 -1
- package/dist/cjs/lib/stream/factory.js +0 -1
- package/dist/cjs/lib/stream/factory.js.map +1 -1
- package/dist/cjs/lib/stream/transport/fetch/index.d.ts +24 -32
- package/dist/cjs/lib/stream/transport/fetch/index.d.ts.map +1 -1
- package/dist/cjs/lib/stream/transport/fetch/index.js +247 -187
- package/dist/cjs/lib/stream/transport/fetch/index.js.map +1 -1
- package/dist/cjs/lib/stream/transport/fetch/shared.d.ts +1 -2
- package/dist/cjs/lib/stream/transport/fetch/shared.d.ts.map +1 -1
- package/dist/cjs/lib/stream/transport/fetch/shared.js +49 -72
- package/dist/cjs/lib/stream/transport/fetch/shared.js.map +1 -1
- package/dist/cjs/lib/stream/transport/s2s/index.d.ts +0 -1
- package/dist/cjs/lib/stream/transport/s2s/index.d.ts.map +1 -1
- package/dist/cjs/lib/stream/transport/s2s/index.js +309 -352
- package/dist/cjs/lib/stream/transport/s2s/index.js.map +1 -1
- package/dist/cjs/lib/stream/types.d.ts +102 -8
- package/dist/cjs/lib/stream/types.d.ts.map +1 -1
- package/dist/cjs/metrics.d.ts +3 -2
- package/dist/cjs/metrics.d.ts.map +1 -1
- package/dist/cjs/metrics.js +24 -39
- package/dist/cjs/metrics.js.map +1 -1
- package/dist/cjs/s2.d.ts +1 -0
- package/dist/cjs/s2.d.ts.map +1 -1
- package/dist/cjs/s2.js +14 -3
- package/dist/cjs/s2.js.map +1 -1
- package/dist/cjs/stream.d.ts +5 -3
- package/dist/cjs/stream.d.ts.map +1 -1
- package/dist/cjs/stream.js +29 -18
- package/dist/cjs/stream.js.map +1 -1
- package/dist/cjs/streams.d.ts +10 -10
- package/dist/cjs/streams.d.ts.map +1 -1
- package/dist/cjs/streams.js +36 -64
- package/dist/cjs/streams.js.map +1 -1
- package/dist/cjs/utils.d.ts +3 -3
- package/dist/cjs/utils.d.ts.map +1 -1
- package/dist/cjs/utils.js +3 -3
- package/dist/cjs/utils.js.map +1 -1
- package/dist/esm/accessTokens.d.ts +3 -2
- package/dist/esm/accessTokens.d.ts.map +1 -1
- package/dist/esm/accessTokens.js +23 -38
- package/dist/esm/accessTokens.js.map +1 -1
- package/dist/esm/basin.d.ts +4 -3
- package/dist/esm/basin.d.ts.map +1 -1
- package/dist/esm/basin.js +7 -5
- package/dist/esm/basin.js.map +1 -1
- package/dist/esm/basins.d.ts +10 -10
- package/dist/esm/basins.d.ts.map +1 -1
- package/dist/esm/basins.js +37 -65
- package/dist/esm/basins.js.map +1 -1
- package/dist/esm/batch-transform.d.ts +1 -1
- package/dist/esm/batch-transform.d.ts.map +1 -1
- package/dist/esm/batch-transform.js +37 -6
- package/dist/esm/batch-transform.js.map +1 -1
- package/dist/esm/common.d.ts +42 -0
- package/dist/esm/common.d.ts.map +1 -1
- package/dist/esm/error.d.ts +40 -2
- package/dist/esm/error.d.ts.map +1 -1
- package/dist/esm/error.js +260 -2
- package/dist/esm/error.js.map +1 -1
- package/dist/esm/generated/client/types.gen.d.ts +7 -0
- package/dist/esm/generated/client/types.gen.d.ts.map +1 -1
- package/dist/esm/generated/client/utils.gen.d.ts +1 -0
- package/dist/esm/generated/client/utils.gen.d.ts.map +1 -1
- package/dist/esm/generated/client/utils.gen.js.map +1 -1
- package/dist/esm/generated/core/types.gen.d.ts +2 -0
- package/dist/esm/generated/core/types.gen.d.ts.map +1 -1
- package/dist/esm/index.d.ts +46 -3
- package/dist/esm/index.d.ts.map +1 -1
- package/dist/esm/index.js +23 -1
- package/dist/esm/index.js.map +1 -1
- package/dist/esm/lib/result.d.ts +57 -0
- package/dist/esm/lib/result.d.ts.map +1 -0
- package/dist/esm/lib/result.js +37 -0
- package/dist/esm/lib/result.js.map +1 -0
- package/dist/esm/lib/retry.d.ts +151 -0
- package/dist/esm/lib/retry.d.ts.map +1 -0
- package/dist/esm/lib/retry.js +830 -0
- package/dist/esm/lib/retry.js.map +1 -0
- package/dist/esm/lib/stream/factory.d.ts +0 -1
- package/dist/esm/lib/stream/factory.d.ts.map +1 -1
- package/dist/esm/lib/stream/factory.js +0 -1
- package/dist/esm/lib/stream/factory.js.map +1 -1
- package/dist/esm/lib/stream/transport/fetch/index.d.ts +24 -32
- package/dist/esm/lib/stream/transport/fetch/index.d.ts.map +1 -1
- package/dist/esm/lib/stream/transport/fetch/index.js +247 -187
- package/dist/esm/lib/stream/transport/fetch/index.js.map +1 -1
- package/dist/esm/lib/stream/transport/fetch/shared.d.ts +1 -2
- package/dist/esm/lib/stream/transport/fetch/shared.d.ts.map +1 -1
- package/dist/esm/lib/stream/transport/fetch/shared.js +51 -74
- package/dist/esm/lib/stream/transport/fetch/shared.js.map +1 -1
- package/dist/esm/lib/stream/transport/s2s/index.d.ts +0 -1
- package/dist/esm/lib/stream/transport/s2s/index.d.ts.map +1 -1
- package/dist/esm/lib/stream/transport/s2s/index.js +310 -353
- package/dist/esm/lib/stream/transport/s2s/index.js.map +1 -1
- package/dist/esm/lib/stream/types.d.ts +102 -8
- package/dist/esm/lib/stream/types.d.ts.map +1 -1
- package/dist/esm/metrics.d.ts +3 -2
- package/dist/esm/metrics.d.ts.map +1 -1
- package/dist/esm/metrics.js +25 -40
- package/dist/esm/metrics.js.map +1 -1
- package/dist/esm/s2.d.ts +1 -0
- package/dist/esm/s2.d.ts.map +1 -1
- package/dist/esm/s2.js +14 -3
- package/dist/esm/s2.js.map +1 -1
- package/dist/esm/stream.d.ts +5 -3
- package/dist/esm/stream.d.ts.map +1 -1
- package/dist/esm/stream.js +30 -19
- package/dist/esm/stream.js.map +1 -1
- package/dist/esm/streams.d.ts +10 -10
- package/dist/esm/streams.d.ts.map +1 -1
- package/dist/esm/streams.js +37 -65
- package/dist/esm/streams.js.map +1 -1
- package/dist/esm/utils.d.ts +3 -3
- package/dist/esm/utils.d.ts.map +1 -1
- package/dist/esm/utils.js +2 -2
- package/dist/esm/utils.js.map +1 -1
- package/package.json +4 -2
|
@@ -5,12 +5,15 @@
|
|
|
5
5
|
* This file should only be imported in Node.js environments
|
|
6
6
|
*/
|
|
7
7
|
import * as http2 from "node:http2";
|
|
8
|
-
import
|
|
8
|
+
import createDebug from "debug";
|
|
9
|
+
import { makeAppendPreconditionError, makeServerError, RangeNotSatisfiableError, S2Error, } from "../../../../error.js";
|
|
9
10
|
import { AppendAck as ProtoAppendAck, AppendInput as ProtoAppendInput, ReadBatch as ProtoReadBatch, } from "../../../../generated/proto/s2.js";
|
|
10
|
-
import {
|
|
11
|
-
import { meteredSizeBytes } from "../../../../utils.js";
|
|
11
|
+
import { meteredBytes } from "../../../../utils.js";
|
|
12
12
|
import * as Redacted from "../../../redacted.js";
|
|
13
|
+
import { err, errClose, ok, okClose } from "../../../result.js";
|
|
14
|
+
import { RetryAppendSession as AppendSessionImpl, RetryReadSession as ReadSessionImpl, } from "../../../retry.js";
|
|
13
15
|
import { frameMessage, S2SFrameParser } from "./framing.js";
|
|
16
|
+
const debug = createDebug("s2:s2s");
|
|
14
17
|
export function buildProtoAppendInput(records, args) {
|
|
15
18
|
const textEncoder = new TextEncoder();
|
|
16
19
|
return ProtoAppendInput.create({
|
|
@@ -40,22 +43,21 @@ export function buildProtoAppendInput(records, args) {
|
|
|
40
43
|
});
|
|
41
44
|
}
|
|
42
45
|
export class S2STransport {
|
|
43
|
-
client;
|
|
44
46
|
transportConfig;
|
|
45
47
|
connection;
|
|
46
48
|
connectionPromise;
|
|
47
49
|
constructor(config) {
|
|
48
|
-
this.client = createClient(createConfig({
|
|
49
|
-
baseUrl: config.baseUrl,
|
|
50
|
-
auth: () => Redacted.value(config.accessToken),
|
|
51
|
-
}));
|
|
52
50
|
this.transportConfig = config;
|
|
53
51
|
}
|
|
54
52
|
async makeAppendSession(stream, sessionOptions, requestOptions) {
|
|
55
|
-
return
|
|
53
|
+
return AppendSessionImpl.create((myOptions) => {
|
|
54
|
+
return S2SAppendSession.create(this.transportConfig.baseUrl, this.transportConfig.accessToken, stream, () => this.getConnection(), this.transportConfig.basinName, myOptions, requestOptions);
|
|
55
|
+
}, sessionOptions, this.transportConfig.retry);
|
|
56
56
|
}
|
|
57
57
|
async makeReadSession(stream, args, options) {
|
|
58
|
-
return
|
|
58
|
+
return ReadSessionImpl.create((myArgs) => {
|
|
59
|
+
return S2SReadSession.create(this.transportConfig.baseUrl, this.transportConfig.accessToken, stream, myArgs, options, () => this.getConnection(), this.transportConfig.basinName);
|
|
60
|
+
}, args, this.transportConfig.retry);
|
|
59
61
|
}
|
|
60
62
|
/**
|
|
61
63
|
* Get or create HTTP/2 connection (one per transport)
|
|
@@ -117,25 +119,36 @@ class S2SReadSession extends ReadableStream {
|
|
|
117
119
|
url;
|
|
118
120
|
options;
|
|
119
121
|
getConnection;
|
|
122
|
+
basinName;
|
|
120
123
|
http2Stream;
|
|
121
124
|
_lastReadPosition;
|
|
125
|
+
_nextReadPosition;
|
|
126
|
+
_lastObservedTail;
|
|
122
127
|
parser = new S2SFrameParser();
|
|
123
|
-
static async create(baseUrl, bearerToken, streamName, args, options, getConnection) {
|
|
128
|
+
static async create(baseUrl, bearerToken, streamName, args, options, getConnection, basinName) {
|
|
124
129
|
const url = new URL(baseUrl);
|
|
125
|
-
return new S2SReadSession(streamName, args, bearerToken, url, options, getConnection);
|
|
130
|
+
return new S2SReadSession(streamName, args, bearerToken, url, options, getConnection, basinName);
|
|
126
131
|
}
|
|
127
|
-
constructor(streamName, args, authToken, url, options, getConnection) {
|
|
132
|
+
constructor(streamName, args, authToken, url, options, getConnection, basinName) {
|
|
128
133
|
// Initialize parser and textDecoder before super() call
|
|
129
134
|
const parser = new S2SFrameParser();
|
|
130
135
|
const textDecoder = new TextDecoder();
|
|
131
136
|
let http2Stream;
|
|
132
137
|
let lastReadPosition;
|
|
138
|
+
// Track timeout for detecting when server stops sending data
|
|
139
|
+
const TAIL_TIMEOUT_MS = 20000; // 20 seconds
|
|
140
|
+
let timeoutTimer;
|
|
133
141
|
super({
|
|
134
142
|
start: async (controller) => {
|
|
135
143
|
let controllerClosed = false;
|
|
144
|
+
let responseCode;
|
|
136
145
|
const safeClose = () => {
|
|
137
146
|
if (!controllerClosed) {
|
|
138
147
|
controllerClosed = true;
|
|
148
|
+
if (timeoutTimer) {
|
|
149
|
+
clearTimeout(timeoutTimer);
|
|
150
|
+
timeoutTimer = undefined;
|
|
151
|
+
}
|
|
139
152
|
try {
|
|
140
153
|
controller.close();
|
|
141
154
|
}
|
|
@@ -147,10 +160,37 @@ class S2SReadSession extends ReadableStream {
|
|
|
147
160
|
const safeError = (err) => {
|
|
148
161
|
if (!controllerClosed) {
|
|
149
162
|
controllerClosed = true;
|
|
150
|
-
|
|
163
|
+
if (timeoutTimer) {
|
|
164
|
+
clearTimeout(timeoutTimer);
|
|
165
|
+
timeoutTimer = undefined;
|
|
166
|
+
}
|
|
167
|
+
// Convert error to S2Error and enqueue as error result
|
|
168
|
+
const s2Err = err instanceof S2Error
|
|
169
|
+
? err
|
|
170
|
+
: new S2Error({ message: String(err), status: 500 });
|
|
171
|
+
controller.enqueue({ ok: false, error: s2Err });
|
|
172
|
+
controller.close();
|
|
173
|
+
}
|
|
174
|
+
};
|
|
175
|
+
// Helper to start/reset the timeout timer
|
|
176
|
+
// Resets on every tail received, fires only if no tail for 20s
|
|
177
|
+
const resetTimeoutTimer = () => {
|
|
178
|
+
if (timeoutTimer) {
|
|
179
|
+
clearTimeout(timeoutTimer);
|
|
151
180
|
}
|
|
181
|
+
timeoutTimer = setTimeout(() => {
|
|
182
|
+
const timeoutError = new S2Error({
|
|
183
|
+
message: `No tail received for ${TAIL_TIMEOUT_MS / 1000}s`,
|
|
184
|
+
status: 408, // Request Timeout
|
|
185
|
+
code: "TIMEOUT",
|
|
186
|
+
});
|
|
187
|
+
debug("tail timeout detected");
|
|
188
|
+
safeError(timeoutError);
|
|
189
|
+
}, TAIL_TIMEOUT_MS);
|
|
152
190
|
};
|
|
153
191
|
try {
|
|
192
|
+
// Start the timeout timer - will fire in 20s if no tail received
|
|
193
|
+
resetTimeoutTimer();
|
|
154
194
|
const connection = await getConnection();
|
|
155
195
|
// Build query string
|
|
156
196
|
const queryParams = new URLSearchParams();
|
|
@@ -180,6 +220,7 @@ class S2SReadSession extends ReadableStream {
|
|
|
180
220
|
authorization: `Bearer ${Redacted.value(authToken)}`,
|
|
181
221
|
accept: "application/protobuf",
|
|
182
222
|
"content-type": "s2s/proto",
|
|
223
|
+
...(basinName ? { "s2-basin": basinName } : {}),
|
|
183
224
|
});
|
|
184
225
|
http2Stream = stream;
|
|
185
226
|
options?.signal?.addEventListener("abort", () => {
|
|
@@ -187,64 +228,139 @@ class S2SReadSession extends ReadableStream {
|
|
|
187
228
|
stream.close();
|
|
188
229
|
}
|
|
189
230
|
});
|
|
231
|
+
stream.on("response", (headers) => {
|
|
232
|
+
// Cache the status.
|
|
233
|
+
// This informs whether we should attempt to parse s2s frames in the "data" handler.
|
|
234
|
+
responseCode = headers[":status"] ?? 500;
|
|
235
|
+
});
|
|
236
|
+
connection.on("goaway", (errorCode, lastStreamID, opaqueData) => {
|
|
237
|
+
debug("received GOAWAY from server");
|
|
238
|
+
});
|
|
239
|
+
stream.on("error", (err) => {
|
|
240
|
+
safeError(err);
|
|
241
|
+
});
|
|
190
242
|
stream.on("data", (chunk) => {
|
|
191
|
-
|
|
192
|
-
|
|
193
|
-
|
|
194
|
-
|
|
195
|
-
|
|
196
|
-
|
|
197
|
-
|
|
198
|
-
|
|
199
|
-
|
|
200
|
-
|
|
201
|
-
|
|
202
|
-
code: errorJson.code,
|
|
203
|
-
status: frame.statusCode,
|
|
204
|
-
}));
|
|
205
|
-
}
|
|
206
|
-
catch {
|
|
207
|
-
safeError(new S2Error({
|
|
208
|
-
message: errorText || "Unknown error",
|
|
209
|
-
status: frame.statusCode,
|
|
210
|
-
}));
|
|
211
|
-
}
|
|
243
|
+
try {
|
|
244
|
+
if ((responseCode ?? 500) >= 400) {
|
|
245
|
+
const errorText = textDecoder.decode(chunk);
|
|
246
|
+
try {
|
|
247
|
+
const errorJson = JSON.parse(errorText);
|
|
248
|
+
safeError(new S2Error({
|
|
249
|
+
message: errorJson.message ?? "Unknown error",
|
|
250
|
+
code: errorJson.code,
|
|
251
|
+
status: responseCode,
|
|
252
|
+
origin: "server",
|
|
253
|
+
}));
|
|
212
254
|
}
|
|
213
|
-
|
|
214
|
-
|
|
255
|
+
catch {
|
|
256
|
+
safeError(new S2Error({
|
|
257
|
+
message: errorText || "Unknown error",
|
|
258
|
+
status: responseCode,
|
|
259
|
+
origin: "server",
|
|
260
|
+
}));
|
|
215
261
|
}
|
|
216
|
-
|
|
262
|
+
return;
|
|
217
263
|
}
|
|
218
|
-
|
|
219
|
-
|
|
220
|
-
|
|
221
|
-
|
|
222
|
-
|
|
223
|
-
if (
|
|
224
|
-
|
|
225
|
-
|
|
226
|
-
|
|
264
|
+
// Buffer already extends Uint8Array in Node.js, no need to convert
|
|
265
|
+
parser.push(chunk);
|
|
266
|
+
let frame = parser.parseFrame();
|
|
267
|
+
while (frame) {
|
|
268
|
+
if (frame.terminal) {
|
|
269
|
+
if (frame.statusCode && frame.statusCode >= 400) {
|
|
270
|
+
const errorText = textDecoder.decode(frame.body);
|
|
271
|
+
try {
|
|
272
|
+
const errorJson = JSON.parse(errorText);
|
|
273
|
+
const status = frame.statusCode ?? 500;
|
|
274
|
+
// Map known read errors
|
|
275
|
+
if (status === 416) {
|
|
276
|
+
safeError(new RangeNotSatisfiableError({ status }));
|
|
277
|
+
}
|
|
278
|
+
else {
|
|
279
|
+
safeError(makeServerError({ status, statusText: undefined }, errorJson));
|
|
280
|
+
}
|
|
281
|
+
}
|
|
282
|
+
catch {
|
|
283
|
+
safeError(makeServerError({
|
|
284
|
+
status: frame.statusCode ?? 500,
|
|
285
|
+
statusText: undefined,
|
|
286
|
+
}, errorText));
|
|
287
|
+
}
|
|
227
288
|
}
|
|
228
|
-
|
|
229
|
-
|
|
230
|
-
const converted = this.convertRecord(record, as ?? "string", textDecoder);
|
|
231
|
-
controller.enqueue(converted);
|
|
289
|
+
else {
|
|
290
|
+
safeClose();
|
|
232
291
|
}
|
|
292
|
+
stream.close();
|
|
233
293
|
}
|
|
234
|
-
|
|
235
|
-
|
|
236
|
-
|
|
237
|
-
|
|
294
|
+
else {
|
|
295
|
+
// Parse ReadBatch
|
|
296
|
+
try {
|
|
297
|
+
const protoBatch = ProtoReadBatch.fromBinary(frame.body);
|
|
298
|
+
resetTimeoutTimer();
|
|
299
|
+
// Update tail from batch
|
|
300
|
+
if (protoBatch.tail) {
|
|
301
|
+
const tail = convertStreamPosition(protoBatch.tail);
|
|
302
|
+
lastReadPosition = tail;
|
|
303
|
+
this._lastReadPosition = tail;
|
|
304
|
+
this._lastObservedTail = tail;
|
|
305
|
+
debug("received tail");
|
|
306
|
+
}
|
|
307
|
+
// Enqueue each record and track next position
|
|
308
|
+
for (const record of protoBatch.records) {
|
|
309
|
+
const converted = this.convertRecord(record, as ?? "string", textDecoder);
|
|
310
|
+
controller.enqueue({ ok: true, value: converted });
|
|
311
|
+
// Update next read position to after this record
|
|
312
|
+
if (record.seqNum !== undefined) {
|
|
313
|
+
this._nextReadPosition = {
|
|
314
|
+
seq_num: Number(record.seqNum) + 1,
|
|
315
|
+
timestamp: Number(record.timestamp ?? 0n),
|
|
316
|
+
};
|
|
317
|
+
}
|
|
318
|
+
}
|
|
319
|
+
}
|
|
320
|
+
catch (err) {
|
|
321
|
+
safeError(new S2Error({
|
|
322
|
+
message: `Failed to parse ReadBatch: ${err}`,
|
|
323
|
+
status: 500,
|
|
324
|
+
origin: "sdk",
|
|
325
|
+
}));
|
|
326
|
+
}
|
|
238
327
|
}
|
|
328
|
+
frame = parser.parseFrame();
|
|
239
329
|
}
|
|
240
|
-
|
|
330
|
+
}
|
|
331
|
+
catch (error) {
|
|
332
|
+
safeError(error instanceof S2Error
|
|
333
|
+
? error
|
|
334
|
+
: new S2Error({
|
|
335
|
+
message: `Failed to process read data: ${error}`,
|
|
336
|
+
status: 500,
|
|
337
|
+
origin: "sdk",
|
|
338
|
+
}));
|
|
241
339
|
}
|
|
242
340
|
});
|
|
243
|
-
stream.on("
|
|
244
|
-
|
|
341
|
+
stream.on("end", () => {
|
|
342
|
+
if (stream.rstCode != 0) {
|
|
343
|
+
debug("stream reset code=%d", stream.rstCode);
|
|
344
|
+
safeError(new S2Error({
|
|
345
|
+
message: `Stream ended with error: ${stream.rstCode}`,
|
|
346
|
+
status: 500,
|
|
347
|
+
code: "stream reset",
|
|
348
|
+
origin: "sdk",
|
|
349
|
+
}));
|
|
350
|
+
}
|
|
245
351
|
});
|
|
246
352
|
stream.on("close", () => {
|
|
247
|
-
|
|
353
|
+
if (parser.hasData()) {
|
|
354
|
+
safeError(new S2Error({
|
|
355
|
+
message: "Stream closed with unparsed data remaining",
|
|
356
|
+
status: 500,
|
|
357
|
+
code: "STREAM_CLOSED_PREMATURELY",
|
|
358
|
+
origin: "sdk",
|
|
359
|
+
}));
|
|
360
|
+
}
|
|
361
|
+
else {
|
|
362
|
+
safeClose();
|
|
363
|
+
}
|
|
248
364
|
});
|
|
249
365
|
}
|
|
250
366
|
catch (err) {
|
|
@@ -263,6 +379,7 @@ class S2SReadSession extends ReadableStream {
|
|
|
263
379
|
this.url = url;
|
|
264
380
|
this.options = options;
|
|
265
381
|
this.getConnection = getConnection;
|
|
382
|
+
this.basinName = basinName;
|
|
266
383
|
// Assign parser to instance property after super() completes
|
|
267
384
|
this.parser = parser;
|
|
268
385
|
this.http2Stream = http2Stream;
|
|
@@ -325,163 +442,46 @@ class S2SReadSession extends ReadableStream {
|
|
|
325
442
|
},
|
|
326
443
|
};
|
|
327
444
|
}
|
|
328
|
-
|
|
329
|
-
return this.
|
|
445
|
+
nextReadPosition() {
|
|
446
|
+
return this._nextReadPosition;
|
|
447
|
+
}
|
|
448
|
+
lastObservedTail() {
|
|
449
|
+
return this._lastObservedTail;
|
|
330
450
|
}
|
|
331
451
|
}
|
|
332
452
|
/**
|
|
333
453
|
* AcksStream for S2S append session
|
|
334
454
|
*/
|
|
335
|
-
|
|
336
|
-
constructor(setController) {
|
|
337
|
-
super({
|
|
338
|
-
start: (controller) => {
|
|
339
|
-
setController(controller);
|
|
340
|
-
},
|
|
341
|
-
});
|
|
342
|
-
}
|
|
343
|
-
async [Symbol.asyncDispose]() {
|
|
344
|
-
await this.cancel("disposed");
|
|
345
|
-
}
|
|
346
|
-
// Polyfill for older browsers
|
|
347
|
-
[Symbol.asyncIterator]() {
|
|
348
|
-
const fn = ReadableStream.prototype[Symbol.asyncIterator];
|
|
349
|
-
if (typeof fn === "function")
|
|
350
|
-
return fn.call(this);
|
|
351
|
-
const reader = this.getReader();
|
|
352
|
-
return {
|
|
353
|
-
next: async () => {
|
|
354
|
-
const r = await reader.read();
|
|
355
|
-
if (r.done) {
|
|
356
|
-
reader.releaseLock();
|
|
357
|
-
return { done: true, value: undefined };
|
|
358
|
-
}
|
|
359
|
-
return { done: false, value: r.value };
|
|
360
|
-
},
|
|
361
|
-
throw: async (e) => {
|
|
362
|
-
await reader.cancel(e);
|
|
363
|
-
reader.releaseLock();
|
|
364
|
-
return { done: true, value: undefined };
|
|
365
|
-
},
|
|
366
|
-
return: async () => {
|
|
367
|
-
await reader.cancel("done");
|
|
368
|
-
reader.releaseLock();
|
|
369
|
-
return { done: true, value: undefined };
|
|
370
|
-
},
|
|
371
|
-
[Symbol.asyncIterator]() {
|
|
372
|
-
return this;
|
|
373
|
-
},
|
|
374
|
-
};
|
|
375
|
-
}
|
|
376
|
-
}
|
|
455
|
+
// Removed S2SAcksStream - transport sessions no longer expose streams
|
|
377
456
|
/**
|
|
378
|
-
*
|
|
379
|
-
*
|
|
457
|
+
* Fetch-based transport session for appending records via HTTP/2.
|
|
458
|
+
* Pipelined: multiple requests can be in-flight simultaneously.
|
|
459
|
+
* No backpressure, no retry logic, no streams - just submit/close with value-encoded errors.
|
|
380
460
|
*/
|
|
381
461
|
class S2SAppendSession {
|
|
382
462
|
baseUrl;
|
|
383
463
|
authToken;
|
|
384
464
|
streamName;
|
|
385
465
|
getConnection;
|
|
466
|
+
basinName;
|
|
386
467
|
options;
|
|
387
468
|
http2Stream;
|
|
388
|
-
_lastAckedPosition;
|
|
389
469
|
parser = new S2SFrameParser();
|
|
390
|
-
acksController;
|
|
391
|
-
_readable;
|
|
392
|
-
_writable;
|
|
393
470
|
closed = false;
|
|
394
|
-
queuedBytes = 0;
|
|
395
|
-
maxQueuedBytes;
|
|
396
|
-
waitingForCapacity = [];
|
|
397
471
|
pendingAcks = [];
|
|
398
472
|
initPromise;
|
|
399
|
-
|
|
400
|
-
|
|
401
|
-
static async create(baseUrl, bearerToken, streamName, getConnection, sessionOptions, requestOptions) {
|
|
402
|
-
return new S2SAppendSession(baseUrl, bearerToken, streamName, getConnection, sessionOptions, requestOptions);
|
|
473
|
+
static async create(baseUrl, bearerToken, streamName, getConnection, basinName, sessionOptions, requestOptions) {
|
|
474
|
+
return new S2SAppendSession(baseUrl, bearerToken, streamName, getConnection, basinName, sessionOptions, requestOptions);
|
|
403
475
|
}
|
|
404
|
-
constructor(baseUrl, authToken, streamName, getConnection, sessionOptions, options) {
|
|
476
|
+
constructor(baseUrl, authToken, streamName, getConnection, basinName, sessionOptions, options) {
|
|
405
477
|
this.baseUrl = baseUrl;
|
|
406
478
|
this.authToken = authToken;
|
|
407
479
|
this.streamName = streamName;
|
|
408
480
|
this.getConnection = getConnection;
|
|
481
|
+
this.basinName = basinName;
|
|
409
482
|
this.options = options;
|
|
410
|
-
|
|
411
|
-
//
|
|
412
|
-
this._readable = new S2SAcksStream((controller) => {
|
|
413
|
-
this.acksController = controller;
|
|
414
|
-
});
|
|
415
|
-
this.readable = this._readable;
|
|
416
|
-
// Create the writable stream
|
|
417
|
-
this._writable = new WritableStream({
|
|
418
|
-
start: async (controller) => {
|
|
419
|
-
this.initPromise = this.initializeStream();
|
|
420
|
-
await this.initPromise;
|
|
421
|
-
},
|
|
422
|
-
write: async (chunk) => {
|
|
423
|
-
if (this.closed) {
|
|
424
|
-
throw new S2Error({ message: "AppendSession is closed" });
|
|
425
|
-
}
|
|
426
|
-
const recordsArray = Array.isArray(chunk.records)
|
|
427
|
-
? chunk.records
|
|
428
|
-
: [chunk.records];
|
|
429
|
-
// Validate batch size limits
|
|
430
|
-
if (recordsArray.length > 1000) {
|
|
431
|
-
throw new S2Error({
|
|
432
|
-
message: `Batch of ${recordsArray.length} exceeds maximum batch size of 1000 records`,
|
|
433
|
-
});
|
|
434
|
-
}
|
|
435
|
-
// Calculate metered size
|
|
436
|
-
let batchMeteredSize = 0;
|
|
437
|
-
for (const record of recordsArray) {
|
|
438
|
-
batchMeteredSize += meteredSizeBytes(record);
|
|
439
|
-
}
|
|
440
|
-
if (batchMeteredSize > 1024 * 1024) {
|
|
441
|
-
throw new S2Error({
|
|
442
|
-
message: `Batch size ${batchMeteredSize} bytes exceeds maximum of 1 MiB (1048576 bytes)`,
|
|
443
|
-
});
|
|
444
|
-
}
|
|
445
|
-
// Wait for capacity if needed (backpressure)
|
|
446
|
-
while (this.queuedBytes + batchMeteredSize > this.maxQueuedBytes &&
|
|
447
|
-
!this.closed) {
|
|
448
|
-
await new Promise((resolve) => {
|
|
449
|
-
this.waitingForCapacity.push(resolve);
|
|
450
|
-
});
|
|
451
|
-
}
|
|
452
|
-
if (this.closed) {
|
|
453
|
-
throw new S2Error({ message: "AppendSession is closed" });
|
|
454
|
-
}
|
|
455
|
-
// Send the batch immediately (pipelined)
|
|
456
|
-
// Returns when frame is sent, not when ack is received
|
|
457
|
-
await this.sendBatchNonBlocking(recordsArray, chunk, batchMeteredSize);
|
|
458
|
-
},
|
|
459
|
-
close: async () => {
|
|
460
|
-
this.closed = true;
|
|
461
|
-
await this.closeStream();
|
|
462
|
-
},
|
|
463
|
-
abort: async (reason) => {
|
|
464
|
-
this.closed = true;
|
|
465
|
-
this.queuedBytes = 0;
|
|
466
|
-
// Reject all pending acks
|
|
467
|
-
const error = new S2Error({
|
|
468
|
-
message: `AppendSession was aborted: ${reason}`,
|
|
469
|
-
});
|
|
470
|
-
for (const pending of this.pendingAcks) {
|
|
471
|
-
pending.reject(error);
|
|
472
|
-
}
|
|
473
|
-
this.pendingAcks = [];
|
|
474
|
-
// Wake up all waiting for capacity
|
|
475
|
-
for (const resolver of this.waitingForCapacity) {
|
|
476
|
-
resolver();
|
|
477
|
-
}
|
|
478
|
-
this.waitingForCapacity = [];
|
|
479
|
-
if (this.http2Stream && !this.http2Stream.closed) {
|
|
480
|
-
this.http2Stream.close();
|
|
481
|
-
}
|
|
482
|
-
},
|
|
483
|
-
});
|
|
484
|
-
this.writable = this._writable;
|
|
483
|
+
// No stream setup
|
|
484
|
+
// Initialization happens lazily on first submit
|
|
485
485
|
}
|
|
486
486
|
async initializeStream() {
|
|
487
487
|
const url = new URL(this.baseUrl);
|
|
@@ -495,6 +495,7 @@ class S2SAppendSession {
|
|
|
495
495
|
authorization: `Bearer ${Redacted.value(this.authToken)}`,
|
|
496
496
|
"content-type": "s2s/proto",
|
|
497
497
|
accept: "application/protobuf",
|
|
498
|
+
...(this.basinName ? { "s2-basin": this.basinName } : {}),
|
|
498
499
|
});
|
|
499
500
|
this.http2Stream = stream;
|
|
500
501
|
this.options?.signal?.addEventListener("abort", () => {
|
|
@@ -503,145 +504,87 @@ class S2SAppendSession {
|
|
|
503
504
|
}
|
|
504
505
|
});
|
|
505
506
|
const textDecoder = new TextDecoder();
|
|
506
|
-
|
|
507
|
-
|
|
508
|
-
|
|
509
|
-
|
|
510
|
-
|
|
511
|
-
this.acksController.close();
|
|
512
|
-
}
|
|
513
|
-
catch {
|
|
514
|
-
// Controller may already be closed, ignore
|
|
515
|
-
}
|
|
516
|
-
}
|
|
517
|
-
};
|
|
518
|
-
const safeError = (err) => {
|
|
519
|
-
if (!controllerClosed && this.acksController) {
|
|
520
|
-
controllerClosed = true;
|
|
521
|
-
this.acksController.error(err);
|
|
522
|
-
}
|
|
523
|
-
// Reject all pending acks
|
|
507
|
+
const safeError = (error) => {
|
|
508
|
+
const s2Err = error instanceof S2Error
|
|
509
|
+
? error
|
|
510
|
+
: new S2Error({ message: String(error), status: 502 });
|
|
511
|
+
// Resolve all pending acks with error result
|
|
524
512
|
for (const pending of this.pendingAcks) {
|
|
525
|
-
pending.
|
|
513
|
+
pending.resolve(err(s2Err));
|
|
526
514
|
}
|
|
527
515
|
this.pendingAcks = [];
|
|
528
516
|
};
|
|
529
517
|
// Handle incoming data (acks)
|
|
530
518
|
stream.on("data", (chunk) => {
|
|
531
|
-
|
|
532
|
-
|
|
533
|
-
|
|
534
|
-
|
|
535
|
-
if (frame.
|
|
536
|
-
|
|
537
|
-
|
|
538
|
-
const
|
|
539
|
-
|
|
540
|
-
|
|
541
|
-
|
|
542
|
-
|
|
543
|
-
|
|
544
|
-
|
|
545
|
-
|
|
546
|
-
|
|
547
|
-
|
|
548
|
-
|
|
549
|
-
}
|
|
519
|
+
try {
|
|
520
|
+
this.parser.push(chunk);
|
|
521
|
+
let frame = this.parser.parseFrame();
|
|
522
|
+
while (frame) {
|
|
523
|
+
if (frame.terminal) {
|
|
524
|
+
if (frame.statusCode && frame.statusCode >= 400) {
|
|
525
|
+
const errorText = textDecoder.decode(frame.body);
|
|
526
|
+
const status = frame.statusCode ?? 500;
|
|
527
|
+
try {
|
|
528
|
+
const errorJson = JSON.parse(errorText);
|
|
529
|
+
const err = status === 412
|
|
530
|
+
? makeAppendPreconditionError(status, errorJson)
|
|
531
|
+
: makeServerError({ status, statusText: undefined }, errorJson);
|
|
532
|
+
queueMicrotask(() => safeError(err));
|
|
533
|
+
}
|
|
534
|
+
catch {
|
|
535
|
+
const err = makeServerError({ status, statusText: undefined }, errorText);
|
|
536
|
+
queueMicrotask(() => safeError(err));
|
|
537
|
+
}
|
|
550
538
|
}
|
|
539
|
+
stream.close();
|
|
551
540
|
}
|
|
552
541
|
else {
|
|
553
|
-
|
|
554
|
-
|
|
555
|
-
|
|
556
|
-
|
|
557
|
-
|
|
558
|
-
|
|
559
|
-
|
|
560
|
-
|
|
561
|
-
const ack = convertAppendAck(protoAck);
|
|
562
|
-
this._lastAckedPosition = ack;
|
|
563
|
-
// Enqueue to readable stream
|
|
564
|
-
if (this.acksController) {
|
|
565
|
-
this.acksController.enqueue(ack);
|
|
566
|
-
}
|
|
567
|
-
// Resolve the pending ack promise
|
|
568
|
-
const pending = this.pendingAcks.shift();
|
|
569
|
-
if (pending) {
|
|
570
|
-
pending.resolve(ack);
|
|
571
|
-
// Release capacity
|
|
572
|
-
this.queuedBytes -= pending.batchSize;
|
|
573
|
-
// Wake up one waiting writer
|
|
574
|
-
if (this.waitingForCapacity.length > 0) {
|
|
575
|
-
const waiter = this.waitingForCapacity.shift();
|
|
576
|
-
waiter();
|
|
542
|
+
// Parse AppendAck
|
|
543
|
+
try {
|
|
544
|
+
const protoAck = ProtoAppendAck.fromBinary(frame.body);
|
|
545
|
+
const ack = convertAppendAck(protoAck);
|
|
546
|
+
// Resolve the pending ack promise (FIFO)
|
|
547
|
+
const pending = this.pendingAcks.shift();
|
|
548
|
+
if (pending) {
|
|
549
|
+
pending.resolve(ok(ack));
|
|
577
550
|
}
|
|
578
551
|
}
|
|
552
|
+
catch (parseErr) {
|
|
553
|
+
queueMicrotask(() => safeError(new S2Error({
|
|
554
|
+
message: `Failed to parse AppendAck: ${parseErr}`,
|
|
555
|
+
status: 500,
|
|
556
|
+
})));
|
|
557
|
+
}
|
|
579
558
|
}
|
|
580
|
-
|
|
581
|
-
safeError(new S2Error({
|
|
582
|
-
message: `Failed to parse AppendAck: ${err}`,
|
|
583
|
-
}));
|
|
584
|
-
}
|
|
559
|
+
frame = this.parser.parseFrame();
|
|
585
560
|
}
|
|
586
|
-
|
|
561
|
+
}
|
|
562
|
+
catch (error) {
|
|
563
|
+
queueMicrotask(() => safeError(error));
|
|
587
564
|
}
|
|
588
565
|
});
|
|
589
|
-
stream.on("error", (
|
|
590
|
-
safeError(
|
|
566
|
+
stream.on("error", (streamErr) => {
|
|
567
|
+
queueMicrotask(() => safeError(streamErr));
|
|
591
568
|
});
|
|
592
569
|
stream.on("close", () => {
|
|
593
|
-
|
|
594
|
-
|
|
595
|
-
|
|
596
|
-
|
|
597
|
-
|
|
598
|
-
|
|
599
|
-
|
|
600
|
-
|
|
601
|
-
|
|
602
|
-
}
|
|
603
|
-
// Convert to protobuf AppendInput
|
|
604
|
-
const protoInput = buildProtoAppendInput(records, args);
|
|
605
|
-
const bodyBytes = ProtoAppendInput.toBinary(protoInput);
|
|
606
|
-
// Frame the message
|
|
607
|
-
const frame = frameMessage({
|
|
608
|
-
terminal: false,
|
|
609
|
-
body: bodyBytes,
|
|
610
|
-
});
|
|
611
|
-
// This promise resolves when the frame is written (not when ack is received)
|
|
612
|
-
return new Promise((resolve, reject) => {
|
|
613
|
-
// Track pending ack - will be resolved when ack arrives
|
|
614
|
-
const ackPromise = {
|
|
615
|
-
resolve: () => { },
|
|
616
|
-
reject,
|
|
617
|
-
batchSize: batchMeteredSize,
|
|
618
|
-
};
|
|
619
|
-
this.pendingAcks.push(ackPromise);
|
|
620
|
-
this.queuedBytes += batchMeteredSize;
|
|
621
|
-
// Send the frame (pipelined)
|
|
622
|
-
this.http2Stream.write(frame, (err) => {
|
|
623
|
-
if (err) {
|
|
624
|
-
// Remove from pending acks on write error
|
|
625
|
-
const idx = this.pendingAcks.indexOf(ackPromise);
|
|
626
|
-
if (idx !== -1) {
|
|
627
|
-
this.pendingAcks.splice(idx, 1);
|
|
628
|
-
this.queuedBytes -= batchMeteredSize;
|
|
629
|
-
}
|
|
630
|
-
reject(err);
|
|
631
|
-
}
|
|
632
|
-
else {
|
|
633
|
-
// Frame written successfully - resolve immediately (pipelined)
|
|
634
|
-
resolve();
|
|
635
|
-
}
|
|
636
|
-
});
|
|
570
|
+
// Stream closed - resolve any remaining pending acks with error
|
|
571
|
+
// This can happen if the server closes the stream without sending all acks
|
|
572
|
+
if (this.pendingAcks.length > 0) {
|
|
573
|
+
queueMicrotask(() => safeError(new S2Error({
|
|
574
|
+
message: "Stream closed with pending acks",
|
|
575
|
+
status: 502,
|
|
576
|
+
code: "BAD_GATEWAY",
|
|
577
|
+
})));
|
|
578
|
+
}
|
|
637
579
|
});
|
|
638
580
|
}
|
|
639
581
|
/**
|
|
640
|
-
* Send a batch and wait for ack
|
|
582
|
+
* Send a batch and wait for ack. Returns AppendResult (never throws).
|
|
583
|
+
* Pipelined: multiple sends can be in-flight; acks resolve FIFO.
|
|
641
584
|
*/
|
|
642
585
|
sendBatch(records, args, batchMeteredSize) {
|
|
643
586
|
if (!this.http2Stream || this.http2Stream.closed) {
|
|
644
|
-
return Promise.
|
|
587
|
+
return Promise.resolve(err(new S2Error({ message: "HTTP/2 stream is not open", status: 502 })));
|
|
645
588
|
}
|
|
646
589
|
// Convert to protobuf AppendInput
|
|
647
590
|
const protoInput = buildProtoAppendInput(records, args);
|
|
@@ -651,82 +594,99 @@ class S2SAppendSession {
|
|
|
651
594
|
terminal: false,
|
|
652
595
|
body: bodyBytes,
|
|
653
596
|
});
|
|
654
|
-
// Track pending ack - this promise resolves when the ack is received
|
|
655
|
-
return new Promise((resolve
|
|
597
|
+
// Track pending ack - this promise resolves when the ack is received (FIFO)
|
|
598
|
+
return new Promise((resolve) => {
|
|
656
599
|
this.pendingAcks.push({
|
|
657
600
|
resolve,
|
|
658
|
-
reject,
|
|
659
601
|
batchSize: batchMeteredSize,
|
|
660
602
|
});
|
|
661
|
-
|
|
662
|
-
|
|
663
|
-
|
|
664
|
-
if (err) {
|
|
603
|
+
// Send the frame (pipelined - non-blocking)
|
|
604
|
+
this.http2Stream.write(frame, (writeErr) => {
|
|
605
|
+
if (writeErr) {
|
|
665
606
|
// Remove from pending acks on write error
|
|
666
|
-
const idx = this.pendingAcks.findIndex((p) => p.
|
|
607
|
+
const idx = this.pendingAcks.findIndex((p) => p.resolve === resolve);
|
|
667
608
|
if (idx !== -1) {
|
|
668
609
|
this.pendingAcks.splice(idx, 1);
|
|
669
|
-
this.queuedBytes -= batchMeteredSize;
|
|
670
610
|
}
|
|
671
|
-
|
|
611
|
+
// Resolve with error result
|
|
612
|
+
const s2Err = writeErr instanceof S2Error
|
|
613
|
+
? writeErr
|
|
614
|
+
: new S2Error({ message: String(writeErr), status: 502 });
|
|
615
|
+
resolve(err(s2Err));
|
|
672
616
|
}
|
|
673
|
-
// Write completed
|
|
617
|
+
// Write completed successfully - promise resolves later when ack is received
|
|
674
618
|
});
|
|
675
619
|
});
|
|
676
620
|
}
|
|
677
|
-
async closeStream() {
|
|
678
|
-
// Wait for all pending acks
|
|
679
|
-
while (this.pendingAcks.length > 0) {
|
|
680
|
-
await new Promise((resolve) => setTimeout(resolve, 10));
|
|
681
|
-
}
|
|
682
|
-
// Close the HTTP/2 stream (client doesn't send terminal frame for clean close)
|
|
683
|
-
if (this.http2Stream && !this.http2Stream.closed) {
|
|
684
|
-
this.http2Stream.end();
|
|
685
|
-
}
|
|
686
|
-
}
|
|
687
|
-
async [Symbol.asyncDispose]() {
|
|
688
|
-
await this.close();
|
|
689
|
-
}
|
|
690
|
-
/**
|
|
691
|
-
* Get a stream of acknowledgements for appends.
|
|
692
|
-
*/
|
|
693
|
-
acks() {
|
|
694
|
-
return this._readable;
|
|
695
|
-
}
|
|
696
621
|
/**
|
|
697
622
|
* Close the append session.
|
|
698
623
|
* Waits for all pending appends to complete before resolving.
|
|
624
|
+
* Never throws - returns CloseResult.
|
|
699
625
|
*/
|
|
700
626
|
async close() {
|
|
701
|
-
|
|
627
|
+
try {
|
|
628
|
+
this.closed = true;
|
|
629
|
+
// Wait for all pending acks to complete
|
|
630
|
+
while (this.pendingAcks.length > 0) {
|
|
631
|
+
await new Promise((resolve) => setTimeout(resolve, 10));
|
|
632
|
+
}
|
|
633
|
+
// Close the HTTP/2 stream (client doesn't send terminal frame for clean close)
|
|
634
|
+
if (this.http2Stream && !this.http2Stream.closed) {
|
|
635
|
+
this.http2Stream.end();
|
|
636
|
+
}
|
|
637
|
+
return okClose();
|
|
638
|
+
}
|
|
639
|
+
catch (error) {
|
|
640
|
+
const s2Err = error instanceof S2Error
|
|
641
|
+
? error
|
|
642
|
+
: new S2Error({ message: String(error), status: 500 });
|
|
643
|
+
return errClose(s2Err);
|
|
644
|
+
}
|
|
702
645
|
}
|
|
703
646
|
/**
|
|
704
647
|
* Submit an append request to the session.
|
|
705
|
-
* Returns
|
|
648
|
+
* Returns AppendResult (never throws).
|
|
649
|
+
* Pipelined: multiple submits can be in-flight; acks resolve FIFO.
|
|
706
650
|
*/
|
|
707
651
|
async submit(records, args) {
|
|
652
|
+
// Validate closed state
|
|
708
653
|
if (this.closed) {
|
|
709
|
-
return
|
|
654
|
+
return err(new S2Error({ message: "AppendSession is closed", status: 400 }));
|
|
710
655
|
}
|
|
711
|
-
//
|
|
712
|
-
if (this.initPromise) {
|
|
656
|
+
// Lazy initialize HTTP/2 stream on first submit
|
|
657
|
+
if (!this.initPromise) {
|
|
658
|
+
this.initPromise = this.initializeStream();
|
|
659
|
+
}
|
|
660
|
+
try {
|
|
713
661
|
await this.initPromise;
|
|
714
662
|
}
|
|
663
|
+
catch (initErr) {
|
|
664
|
+
const s2Err = initErr instanceof S2Error
|
|
665
|
+
? initErr
|
|
666
|
+
: new S2Error({ message: String(initErr), status: 502 });
|
|
667
|
+
return err(s2Err);
|
|
668
|
+
}
|
|
715
669
|
const recordsArray = Array.isArray(records) ? records : [records];
|
|
716
|
-
// Validate batch size limits
|
|
670
|
+
// Validate batch size limits (non-retryable 400-level error)
|
|
717
671
|
if (recordsArray.length > 1000) {
|
|
718
|
-
return
|
|
672
|
+
return err(new S2Error({
|
|
719
673
|
message: `Batch of ${recordsArray.length} exceeds maximum batch size of 1000 records`,
|
|
674
|
+
status: 400,
|
|
675
|
+
code: "INVALID_ARGUMENT",
|
|
720
676
|
}));
|
|
721
677
|
}
|
|
722
|
-
// Calculate metered size
|
|
723
|
-
let batchMeteredSize = 0;
|
|
724
|
-
|
|
725
|
-
|
|
678
|
+
// Calculate metered size (use precalculated if provided)
|
|
679
|
+
let batchMeteredSize = args?.precalculatedSize ?? 0;
|
|
680
|
+
if (batchMeteredSize === 0) {
|
|
681
|
+
for (const record of recordsArray) {
|
|
682
|
+
batchMeteredSize += meteredBytes(record);
|
|
683
|
+
}
|
|
726
684
|
}
|
|
727
685
|
if (batchMeteredSize > 1024 * 1024) {
|
|
728
|
-
return
|
|
686
|
+
return err(new S2Error({
|
|
729
687
|
message: `Batch size ${batchMeteredSize} bytes exceeds maximum of 1 MiB (1048576 bytes)`,
|
|
688
|
+
status: 400,
|
|
689
|
+
code: "INVALID_ARGUMENT",
|
|
730
690
|
}));
|
|
731
691
|
}
|
|
732
692
|
return this.sendBatch(recordsArray, {
|
|
@@ -735,9 +695,6 @@ class S2SAppendSession {
|
|
|
735
695
|
match_seq_num: args?.match_seq_num,
|
|
736
696
|
}, batchMeteredSize);
|
|
737
697
|
}
|
|
738
|
-
lastAckedPosition() {
|
|
739
|
-
return this._lastAckedPosition;
|
|
740
|
-
}
|
|
741
698
|
}
|
|
742
699
|
/**
|
|
743
700
|
* Convert protobuf StreamPosition to OpenAPI StreamPosition
|