@s2-dev/streamstore 0.17.6 → 0.18.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +69 -1
- package/dist/cjs/accessTokens.d.ts +3 -2
- package/dist/cjs/accessTokens.d.ts.map +1 -1
- package/dist/cjs/accessTokens.js +22 -37
- package/dist/cjs/accessTokens.js.map +1 -1
- package/dist/cjs/basin.d.ts +4 -3
- package/dist/cjs/basin.d.ts.map +1 -1
- package/dist/cjs/basin.js +16 -6
- package/dist/cjs/basin.js.map +1 -1
- package/dist/cjs/basins.d.ts +10 -10
- package/dist/cjs/basins.d.ts.map +1 -1
- package/dist/cjs/basins.js +36 -64
- package/dist/cjs/basins.js.map +1 -1
- package/dist/cjs/batch-transform.d.ts +1 -1
- package/dist/cjs/batch-transform.d.ts.map +1 -1
- package/dist/cjs/batch-transform.js +36 -5
- package/dist/cjs/batch-transform.js.map +1 -1
- package/dist/cjs/common.d.ts +42 -0
- package/dist/cjs/common.d.ts.map +1 -1
- package/dist/cjs/error.d.ts +40 -2
- package/dist/cjs/error.d.ts.map +1 -1
- package/dist/cjs/error.js +268 -2
- package/dist/cjs/error.js.map +1 -1
- package/dist/cjs/generated/client/types.gen.d.ts +7 -0
- package/dist/cjs/generated/client/types.gen.d.ts.map +1 -1
- package/dist/cjs/generated/client/utils.gen.d.ts +1 -0
- package/dist/cjs/generated/client/utils.gen.d.ts.map +1 -1
- package/dist/cjs/generated/client/utils.gen.js.map +1 -1
- package/dist/cjs/generated/core/types.gen.d.ts +2 -0
- package/dist/cjs/generated/core/types.gen.d.ts.map +1 -1
- package/dist/cjs/index.d.ts +46 -3
- package/dist/cjs/index.d.ts.map +1 -1
- package/dist/cjs/index.js +28 -2
- package/dist/cjs/index.js.map +1 -1
- package/dist/cjs/lib/result.d.ts +57 -0
- package/dist/cjs/lib/result.d.ts.map +1 -0
- package/dist/cjs/lib/result.js +43 -0
- package/dist/cjs/lib/result.js.map +1 -0
- package/dist/cjs/lib/retry.d.ts +151 -0
- package/dist/cjs/lib/retry.d.ts.map +1 -0
- package/dist/cjs/lib/retry.js +839 -0
- package/dist/cjs/lib/retry.js.map +1 -0
- package/dist/cjs/lib/stream/factory.d.ts +0 -1
- package/dist/cjs/lib/stream/factory.d.ts.map +1 -1
- package/dist/cjs/lib/stream/factory.js +0 -1
- package/dist/cjs/lib/stream/factory.js.map +1 -1
- package/dist/cjs/lib/stream/runtime.d.ts +14 -0
- package/dist/cjs/lib/stream/runtime.d.ts.map +1 -1
- package/dist/cjs/lib/stream/runtime.js +18 -3
- package/dist/cjs/lib/stream/runtime.js.map +1 -1
- package/dist/cjs/lib/stream/transport/fetch/index.d.ts +24 -32
- package/dist/cjs/lib/stream/transport/fetch/index.d.ts.map +1 -1
- package/dist/cjs/lib/stream/transport/fetch/index.js +260 -187
- package/dist/cjs/lib/stream/transport/fetch/index.js.map +1 -1
- package/dist/cjs/lib/stream/transport/fetch/shared.d.ts +1 -2
- package/dist/cjs/lib/stream/transport/fetch/shared.d.ts.map +1 -1
- package/dist/cjs/lib/stream/transport/fetch/shared.js +49 -72
- package/dist/cjs/lib/stream/transport/fetch/shared.js.map +1 -1
- package/dist/cjs/lib/stream/transport/s2s/index.d.ts +0 -1
- package/dist/cjs/lib/stream/transport/s2s/index.d.ts.map +1 -1
- package/dist/cjs/lib/stream/transport/s2s/index.js +312 -352
- package/dist/cjs/lib/stream/transport/s2s/index.js.map +1 -1
- package/dist/cjs/lib/stream/types.d.ts +102 -8
- package/dist/cjs/lib/stream/types.d.ts.map +1 -1
- package/dist/cjs/metrics.d.ts +3 -2
- package/dist/cjs/metrics.d.ts.map +1 -1
- package/dist/cjs/metrics.js +24 -39
- package/dist/cjs/metrics.js.map +1 -1
- package/dist/cjs/s2.d.ts +1 -0
- package/dist/cjs/s2.d.ts.map +1 -1
- package/dist/cjs/s2.js +20 -3
- package/dist/cjs/s2.js.map +1 -1
- package/dist/cjs/stream.d.ts +5 -3
- package/dist/cjs/stream.d.ts.map +1 -1
- package/dist/cjs/stream.js +29 -18
- package/dist/cjs/stream.js.map +1 -1
- package/dist/cjs/streams.d.ts +10 -10
- package/dist/cjs/streams.d.ts.map +1 -1
- package/dist/cjs/streams.js +36 -64
- package/dist/cjs/streams.js.map +1 -1
- package/dist/cjs/utils.d.ts +3 -3
- package/dist/cjs/utils.d.ts.map +1 -1
- package/dist/cjs/utils.js +3 -3
- package/dist/cjs/utils.js.map +1 -1
- package/dist/cjs/version.d.ts +8 -0
- package/dist/cjs/version.d.ts.map +1 -0
- package/dist/cjs/version.js +11 -0
- package/dist/cjs/version.js.map +1 -0
- package/dist/esm/accessTokens.d.ts +3 -2
- package/dist/esm/accessTokens.d.ts.map +1 -1
- package/dist/esm/accessTokens.js +23 -38
- package/dist/esm/accessTokens.js.map +1 -1
- package/dist/esm/basin.d.ts +4 -3
- package/dist/esm/basin.d.ts.map +1 -1
- package/dist/esm/basin.js +16 -6
- package/dist/esm/basin.js.map +1 -1
- package/dist/esm/basins.d.ts +10 -10
- package/dist/esm/basins.d.ts.map +1 -1
- package/dist/esm/basins.js +37 -65
- package/dist/esm/basins.js.map +1 -1
- package/dist/esm/batch-transform.d.ts +1 -1
- package/dist/esm/batch-transform.d.ts.map +1 -1
- package/dist/esm/batch-transform.js +37 -6
- package/dist/esm/batch-transform.js.map +1 -1
- package/dist/esm/common.d.ts +42 -0
- package/dist/esm/common.d.ts.map +1 -1
- package/dist/esm/error.d.ts +40 -2
- package/dist/esm/error.d.ts.map +1 -1
- package/dist/esm/error.js +260 -2
- package/dist/esm/error.js.map +1 -1
- package/dist/esm/generated/client/types.gen.d.ts +7 -0
- package/dist/esm/generated/client/types.gen.d.ts.map +1 -1
- package/dist/esm/generated/client/utils.gen.d.ts +1 -0
- package/dist/esm/generated/client/utils.gen.d.ts.map +1 -1
- package/dist/esm/generated/client/utils.gen.js.map +1 -1
- package/dist/esm/generated/core/types.gen.d.ts +2 -0
- package/dist/esm/generated/core/types.gen.d.ts.map +1 -1
- package/dist/esm/index.d.ts +46 -3
- package/dist/esm/index.d.ts.map +1 -1
- package/dist/esm/index.js +23 -1
- package/dist/esm/index.js.map +1 -1
- package/dist/esm/lib/result.d.ts +57 -0
- package/dist/esm/lib/result.d.ts.map +1 -0
- package/dist/esm/lib/result.js +37 -0
- package/dist/esm/lib/result.js.map +1 -0
- package/dist/esm/lib/retry.d.ts +151 -0
- package/dist/esm/lib/retry.d.ts.map +1 -0
- package/dist/esm/lib/retry.js +830 -0
- package/dist/esm/lib/retry.js.map +1 -0
- package/dist/esm/lib/stream/factory.d.ts +0 -1
- package/dist/esm/lib/stream/factory.d.ts.map +1 -1
- package/dist/esm/lib/stream/factory.js +0 -1
- package/dist/esm/lib/stream/factory.js.map +1 -1
- package/dist/esm/lib/stream/runtime.d.ts +14 -0
- package/dist/esm/lib/stream/runtime.d.ts.map +1 -1
- package/dist/esm/lib/stream/runtime.js +23 -3
- package/dist/esm/lib/stream/runtime.js.map +1 -1
- package/dist/esm/lib/stream/transport/fetch/index.d.ts +24 -32
- package/dist/esm/lib/stream/transport/fetch/index.d.ts.map +1 -1
- package/dist/esm/lib/stream/transport/fetch/index.js +260 -187
- package/dist/esm/lib/stream/transport/fetch/index.js.map +1 -1
- package/dist/esm/lib/stream/transport/fetch/shared.d.ts +1 -2
- package/dist/esm/lib/stream/transport/fetch/shared.d.ts.map +1 -1
- package/dist/esm/lib/stream/transport/fetch/shared.js +51 -74
- package/dist/esm/lib/stream/transport/fetch/shared.js.map +1 -1
- package/dist/esm/lib/stream/transport/s2s/index.d.ts +0 -1
- package/dist/esm/lib/stream/transport/s2s/index.d.ts.map +1 -1
- package/dist/esm/lib/stream/transport/s2s/index.js +313 -353
- package/dist/esm/lib/stream/transport/s2s/index.js.map +1 -1
- package/dist/esm/lib/stream/types.d.ts +102 -8
- package/dist/esm/lib/stream/types.d.ts.map +1 -1
- package/dist/esm/metrics.d.ts +3 -2
- package/dist/esm/metrics.d.ts.map +1 -1
- package/dist/esm/metrics.js +25 -40
- package/dist/esm/metrics.js.map +1 -1
- package/dist/esm/s2.d.ts +1 -0
- package/dist/esm/s2.d.ts.map +1 -1
- package/dist/esm/s2.js +20 -3
- package/dist/esm/s2.js.map +1 -1
- package/dist/esm/stream.d.ts +5 -3
- package/dist/esm/stream.d.ts.map +1 -1
- package/dist/esm/stream.js +30 -19
- package/dist/esm/stream.js.map +1 -1
- package/dist/esm/streams.d.ts +10 -10
- package/dist/esm/streams.d.ts.map +1 -1
- package/dist/esm/streams.js +37 -65
- package/dist/esm/streams.js.map +1 -1
- package/dist/esm/utils.d.ts +3 -3
- package/dist/esm/utils.d.ts.map +1 -1
- package/dist/esm/utils.js +2 -2
- package/dist/esm/utils.js.map +1 -1
- package/dist/esm/version.d.ts +8 -0
- package/dist/esm/version.d.ts.map +1 -0
- package/dist/esm/version.js +8 -0
- package/dist/esm/version.js.map +1 -0
- package/package.json +7 -4
|
@@ -9,12 +9,16 @@ Object.defineProperty(exports, "__esModule", { value: true });
|
|
|
9
9
|
exports.S2STransport = void 0;
|
|
10
10
|
exports.buildProtoAppendInput = buildProtoAppendInput;
|
|
11
11
|
const http2 = require("node:http2");
|
|
12
|
-
const
|
|
12
|
+
const debug_1 = require("debug");
|
|
13
|
+
const error_js_1 = require("../../../../error.js");
|
|
13
14
|
const s2_js_1 = require("../../../../generated/proto/s2.js");
|
|
14
|
-
const index_js_2 = require("../../../../index.js");
|
|
15
15
|
const utils_js_1 = require("../../../../utils.js");
|
|
16
16
|
const Redacted = require("../../../redacted.js");
|
|
17
|
+
const result_js_1 = require("../../../result.js");
|
|
18
|
+
const retry_js_1 = require("../../../retry.js");
|
|
19
|
+
const runtime_js_1 = require("../../runtime.js");
|
|
17
20
|
const framing_js_1 = require("./framing.js");
|
|
21
|
+
const debug = (0, debug_1.default)("s2:s2s");
|
|
18
22
|
function buildProtoAppendInput(records, args) {
|
|
19
23
|
const textEncoder = new TextEncoder();
|
|
20
24
|
return s2_js_1.AppendInput.create({
|
|
@@ -44,22 +48,21 @@ function buildProtoAppendInput(records, args) {
|
|
|
44
48
|
});
|
|
45
49
|
}
|
|
46
50
|
class S2STransport {
|
|
47
|
-
client;
|
|
48
51
|
transportConfig;
|
|
49
52
|
connection;
|
|
50
53
|
connectionPromise;
|
|
51
54
|
constructor(config) {
|
|
52
|
-
this.client = (0, index_js_1.createClient)((0, index_js_1.createConfig)({
|
|
53
|
-
baseUrl: config.baseUrl,
|
|
54
|
-
auth: () => Redacted.value(config.accessToken),
|
|
55
|
-
}));
|
|
56
55
|
this.transportConfig = config;
|
|
57
56
|
}
|
|
58
57
|
async makeAppendSession(stream, sessionOptions, requestOptions) {
|
|
59
|
-
return
|
|
58
|
+
return retry_js_1.RetryAppendSession.create((myOptions) => {
|
|
59
|
+
return S2SAppendSession.create(this.transportConfig.baseUrl, this.transportConfig.accessToken, stream, () => this.getConnection(), this.transportConfig.basinName, myOptions, requestOptions);
|
|
60
|
+
}, sessionOptions, this.transportConfig.retry);
|
|
60
61
|
}
|
|
61
62
|
async makeReadSession(stream, args, options) {
|
|
62
|
-
return
|
|
63
|
+
return retry_js_1.RetryReadSession.create((myArgs) => {
|
|
64
|
+
return S2SReadSession.create(this.transportConfig.baseUrl, this.transportConfig.accessToken, stream, myArgs, options, () => this.getConnection(), this.transportConfig.basinName);
|
|
65
|
+
}, args, this.transportConfig.retry);
|
|
63
66
|
}
|
|
64
67
|
/**
|
|
65
68
|
* Get or create HTTP/2 connection (one per transport)
|
|
@@ -122,25 +125,36 @@ class S2SReadSession extends ReadableStream {
|
|
|
122
125
|
url;
|
|
123
126
|
options;
|
|
124
127
|
getConnection;
|
|
128
|
+
basinName;
|
|
125
129
|
http2Stream;
|
|
126
130
|
_lastReadPosition;
|
|
131
|
+
_nextReadPosition;
|
|
132
|
+
_lastObservedTail;
|
|
127
133
|
parser = new framing_js_1.S2SFrameParser();
|
|
128
|
-
static async create(baseUrl, bearerToken, streamName, args, options, getConnection) {
|
|
134
|
+
static async create(baseUrl, bearerToken, streamName, args, options, getConnection, basinName) {
|
|
129
135
|
const url = new URL(baseUrl);
|
|
130
|
-
return new S2SReadSession(streamName, args, bearerToken, url, options, getConnection);
|
|
136
|
+
return new S2SReadSession(streamName, args, bearerToken, url, options, getConnection, basinName);
|
|
131
137
|
}
|
|
132
|
-
constructor(streamName, args, authToken, url, options, getConnection) {
|
|
138
|
+
constructor(streamName, args, authToken, url, options, getConnection, basinName) {
|
|
133
139
|
// Initialize parser and textDecoder before super() call
|
|
134
140
|
const parser = new framing_js_1.S2SFrameParser();
|
|
135
141
|
const textDecoder = new TextDecoder();
|
|
136
142
|
let http2Stream;
|
|
137
143
|
let lastReadPosition;
|
|
144
|
+
// Track timeout for detecting when server stops sending data
|
|
145
|
+
const TAIL_TIMEOUT_MS = 20000; // 20 seconds
|
|
146
|
+
let timeoutTimer;
|
|
138
147
|
super({
|
|
139
148
|
start: async (controller) => {
|
|
140
149
|
let controllerClosed = false;
|
|
150
|
+
let responseCode;
|
|
141
151
|
const safeClose = () => {
|
|
142
152
|
if (!controllerClosed) {
|
|
143
153
|
controllerClosed = true;
|
|
154
|
+
if (timeoutTimer) {
|
|
155
|
+
clearTimeout(timeoutTimer);
|
|
156
|
+
timeoutTimer = undefined;
|
|
157
|
+
}
|
|
144
158
|
try {
|
|
145
159
|
controller.close();
|
|
146
160
|
}
|
|
@@ -152,10 +166,37 @@ class S2SReadSession extends ReadableStream {
|
|
|
152
166
|
const safeError = (err) => {
|
|
153
167
|
if (!controllerClosed) {
|
|
154
168
|
controllerClosed = true;
|
|
155
|
-
|
|
169
|
+
if (timeoutTimer) {
|
|
170
|
+
clearTimeout(timeoutTimer);
|
|
171
|
+
timeoutTimer = undefined;
|
|
172
|
+
}
|
|
173
|
+
// Convert error to S2Error and enqueue as error result
|
|
174
|
+
const s2Err = err instanceof error_js_1.S2Error
|
|
175
|
+
? err
|
|
176
|
+
: new error_js_1.S2Error({ message: String(err), status: 500 });
|
|
177
|
+
controller.enqueue({ ok: false, error: s2Err });
|
|
178
|
+
controller.close();
|
|
156
179
|
}
|
|
157
180
|
};
|
|
181
|
+
// Helper to start/reset the timeout timer
|
|
182
|
+
// Resets on every tail received, fires only if no tail for 20s
|
|
183
|
+
const resetTimeoutTimer = () => {
|
|
184
|
+
if (timeoutTimer) {
|
|
185
|
+
clearTimeout(timeoutTimer);
|
|
186
|
+
}
|
|
187
|
+
timeoutTimer = setTimeout(() => {
|
|
188
|
+
const timeoutError = new error_js_1.S2Error({
|
|
189
|
+
message: `No tail received for ${TAIL_TIMEOUT_MS / 1000}s`,
|
|
190
|
+
status: 408, // Request Timeout
|
|
191
|
+
code: "TIMEOUT",
|
|
192
|
+
});
|
|
193
|
+
debug("tail timeout detected");
|
|
194
|
+
safeError(timeoutError);
|
|
195
|
+
}, TAIL_TIMEOUT_MS);
|
|
196
|
+
};
|
|
158
197
|
try {
|
|
198
|
+
// Start the timeout timer - will fire in 20s if no tail received
|
|
199
|
+
resetTimeoutTimer();
|
|
159
200
|
const connection = await getConnection();
|
|
160
201
|
// Build query string
|
|
161
202
|
const queryParams = new URLSearchParams();
|
|
@@ -182,9 +223,11 @@ class S2SReadSession extends ReadableStream {
|
|
|
182
223
|
":path": path,
|
|
183
224
|
":scheme": url.protocol.slice(0, -1),
|
|
184
225
|
":authority": url.host,
|
|
226
|
+
"user-agent": runtime_js_1.DEFAULT_USER_AGENT,
|
|
185
227
|
authorization: `Bearer ${Redacted.value(authToken)}`,
|
|
186
228
|
accept: "application/protobuf",
|
|
187
229
|
"content-type": "s2s/proto",
|
|
230
|
+
...(basinName ? { "s2-basin": basinName } : {}),
|
|
188
231
|
});
|
|
189
232
|
http2Stream = stream;
|
|
190
233
|
options?.signal?.addEventListener("abort", () => {
|
|
@@ -192,64 +235,139 @@ class S2SReadSession extends ReadableStream {
|
|
|
192
235
|
stream.close();
|
|
193
236
|
}
|
|
194
237
|
});
|
|
238
|
+
stream.on("response", (headers) => {
|
|
239
|
+
// Cache the status.
|
|
240
|
+
// This informs whether we should attempt to parse s2s frames in the "data" handler.
|
|
241
|
+
responseCode = headers[":status"] ?? 500;
|
|
242
|
+
});
|
|
243
|
+
connection.on("goaway", (errorCode, lastStreamID, opaqueData) => {
|
|
244
|
+
debug("received GOAWAY from server");
|
|
245
|
+
});
|
|
246
|
+
stream.on("error", (err) => {
|
|
247
|
+
safeError(err);
|
|
248
|
+
});
|
|
195
249
|
stream.on("data", (chunk) => {
|
|
196
|
-
|
|
197
|
-
|
|
198
|
-
|
|
199
|
-
|
|
200
|
-
|
|
201
|
-
|
|
202
|
-
|
|
203
|
-
|
|
204
|
-
|
|
205
|
-
|
|
206
|
-
|
|
207
|
-
code: errorJson.code,
|
|
208
|
-
status: frame.statusCode,
|
|
209
|
-
}));
|
|
210
|
-
}
|
|
211
|
-
catch {
|
|
212
|
-
safeError(new index_js_2.S2Error({
|
|
213
|
-
message: errorText || "Unknown error",
|
|
214
|
-
status: frame.statusCode,
|
|
215
|
-
}));
|
|
216
|
-
}
|
|
250
|
+
try {
|
|
251
|
+
if ((responseCode ?? 500) >= 400) {
|
|
252
|
+
const errorText = textDecoder.decode(chunk);
|
|
253
|
+
try {
|
|
254
|
+
const errorJson = JSON.parse(errorText);
|
|
255
|
+
safeError(new error_js_1.S2Error({
|
|
256
|
+
message: errorJson.message ?? "Unknown error",
|
|
257
|
+
code: errorJson.code,
|
|
258
|
+
status: responseCode,
|
|
259
|
+
origin: "server",
|
|
260
|
+
}));
|
|
217
261
|
}
|
|
218
|
-
|
|
219
|
-
|
|
262
|
+
catch {
|
|
263
|
+
safeError(new error_js_1.S2Error({
|
|
264
|
+
message: errorText || "Unknown error",
|
|
265
|
+
status: responseCode,
|
|
266
|
+
origin: "server",
|
|
267
|
+
}));
|
|
220
268
|
}
|
|
221
|
-
|
|
269
|
+
return;
|
|
222
270
|
}
|
|
223
|
-
|
|
224
|
-
|
|
225
|
-
|
|
226
|
-
|
|
227
|
-
|
|
228
|
-
if (
|
|
229
|
-
|
|
230
|
-
|
|
231
|
-
|
|
271
|
+
// Buffer already extends Uint8Array in Node.js, no need to convert
|
|
272
|
+
parser.push(chunk);
|
|
273
|
+
let frame = parser.parseFrame();
|
|
274
|
+
while (frame) {
|
|
275
|
+
if (frame.terminal) {
|
|
276
|
+
if (frame.statusCode && frame.statusCode >= 400) {
|
|
277
|
+
const errorText = textDecoder.decode(frame.body);
|
|
278
|
+
try {
|
|
279
|
+
const errorJson = JSON.parse(errorText);
|
|
280
|
+
const status = frame.statusCode ?? 500;
|
|
281
|
+
// Map known read errors
|
|
282
|
+
if (status === 416) {
|
|
283
|
+
safeError(new error_js_1.RangeNotSatisfiableError({ status }));
|
|
284
|
+
}
|
|
285
|
+
else {
|
|
286
|
+
safeError((0, error_js_1.makeServerError)({ status, statusText: undefined }, errorJson));
|
|
287
|
+
}
|
|
288
|
+
}
|
|
289
|
+
catch {
|
|
290
|
+
safeError((0, error_js_1.makeServerError)({
|
|
291
|
+
status: frame.statusCode ?? 500,
|
|
292
|
+
statusText: undefined,
|
|
293
|
+
}, errorText));
|
|
294
|
+
}
|
|
232
295
|
}
|
|
233
|
-
|
|
234
|
-
|
|
235
|
-
const converted = this.convertRecord(record, as ?? "string", textDecoder);
|
|
236
|
-
controller.enqueue(converted);
|
|
296
|
+
else {
|
|
297
|
+
safeClose();
|
|
237
298
|
}
|
|
299
|
+
stream.close();
|
|
238
300
|
}
|
|
239
|
-
|
|
240
|
-
|
|
241
|
-
|
|
242
|
-
|
|
301
|
+
else {
|
|
302
|
+
// Parse ReadBatch
|
|
303
|
+
try {
|
|
304
|
+
const protoBatch = s2_js_1.ReadBatch.fromBinary(frame.body);
|
|
305
|
+
resetTimeoutTimer();
|
|
306
|
+
// Update tail from batch
|
|
307
|
+
if (protoBatch.tail) {
|
|
308
|
+
const tail = convertStreamPosition(protoBatch.tail);
|
|
309
|
+
lastReadPosition = tail;
|
|
310
|
+
this._lastReadPosition = tail;
|
|
311
|
+
this._lastObservedTail = tail;
|
|
312
|
+
debug("received tail");
|
|
313
|
+
}
|
|
314
|
+
// Enqueue each record and track next position
|
|
315
|
+
for (const record of protoBatch.records) {
|
|
316
|
+
const converted = this.convertRecord(record, as ?? "string", textDecoder);
|
|
317
|
+
controller.enqueue({ ok: true, value: converted });
|
|
318
|
+
// Update next read position to after this record
|
|
319
|
+
if (record.seqNum !== undefined) {
|
|
320
|
+
this._nextReadPosition = {
|
|
321
|
+
seq_num: Number(record.seqNum) + 1,
|
|
322
|
+
timestamp: Number(record.timestamp ?? 0n),
|
|
323
|
+
};
|
|
324
|
+
}
|
|
325
|
+
}
|
|
326
|
+
}
|
|
327
|
+
catch (err) {
|
|
328
|
+
safeError(new error_js_1.S2Error({
|
|
329
|
+
message: `Failed to parse ReadBatch: ${err}`,
|
|
330
|
+
status: 500,
|
|
331
|
+
origin: "sdk",
|
|
332
|
+
}));
|
|
333
|
+
}
|
|
243
334
|
}
|
|
335
|
+
frame = parser.parseFrame();
|
|
244
336
|
}
|
|
245
|
-
|
|
337
|
+
}
|
|
338
|
+
catch (error) {
|
|
339
|
+
safeError(error instanceof error_js_1.S2Error
|
|
340
|
+
? error
|
|
341
|
+
: new error_js_1.S2Error({
|
|
342
|
+
message: `Failed to process read data: ${error}`,
|
|
343
|
+
status: 500,
|
|
344
|
+
origin: "sdk",
|
|
345
|
+
}));
|
|
246
346
|
}
|
|
247
347
|
});
|
|
248
|
-
stream.on("
|
|
249
|
-
|
|
348
|
+
stream.on("end", () => {
|
|
349
|
+
if (stream.rstCode != 0) {
|
|
350
|
+
debug("stream reset code=%d", stream.rstCode);
|
|
351
|
+
safeError(new error_js_1.S2Error({
|
|
352
|
+
message: `Stream ended with error: ${stream.rstCode}`,
|
|
353
|
+
status: 500,
|
|
354
|
+
code: "stream reset",
|
|
355
|
+
origin: "sdk",
|
|
356
|
+
}));
|
|
357
|
+
}
|
|
250
358
|
});
|
|
251
359
|
stream.on("close", () => {
|
|
252
|
-
|
|
360
|
+
if (parser.hasData()) {
|
|
361
|
+
safeError(new error_js_1.S2Error({
|
|
362
|
+
message: "Stream closed with unparsed data remaining",
|
|
363
|
+
status: 500,
|
|
364
|
+
code: "STREAM_CLOSED_PREMATURELY",
|
|
365
|
+
origin: "sdk",
|
|
366
|
+
}));
|
|
367
|
+
}
|
|
368
|
+
else {
|
|
369
|
+
safeClose();
|
|
370
|
+
}
|
|
253
371
|
});
|
|
254
372
|
}
|
|
255
373
|
catch (err) {
|
|
@@ -268,6 +386,7 @@ class S2SReadSession extends ReadableStream {
|
|
|
268
386
|
this.url = url;
|
|
269
387
|
this.options = options;
|
|
270
388
|
this.getConnection = getConnection;
|
|
389
|
+
this.basinName = basinName;
|
|
271
390
|
// Assign parser to instance property after super() completes
|
|
272
391
|
this.parser = parser;
|
|
273
392
|
this.http2Stream = http2Stream;
|
|
@@ -330,163 +449,46 @@ class S2SReadSession extends ReadableStream {
|
|
|
330
449
|
},
|
|
331
450
|
};
|
|
332
451
|
}
|
|
333
|
-
|
|
334
|
-
return this.
|
|
452
|
+
nextReadPosition() {
|
|
453
|
+
return this._nextReadPosition;
|
|
454
|
+
}
|
|
455
|
+
lastObservedTail() {
|
|
456
|
+
return this._lastObservedTail;
|
|
335
457
|
}
|
|
336
458
|
}
|
|
337
459
|
/**
|
|
338
460
|
* AcksStream for S2S append session
|
|
339
461
|
*/
|
|
340
|
-
|
|
341
|
-
constructor(setController) {
|
|
342
|
-
super({
|
|
343
|
-
start: (controller) => {
|
|
344
|
-
setController(controller);
|
|
345
|
-
},
|
|
346
|
-
});
|
|
347
|
-
}
|
|
348
|
-
async [Symbol.asyncDispose]() {
|
|
349
|
-
await this.cancel("disposed");
|
|
350
|
-
}
|
|
351
|
-
// Polyfill for older browsers
|
|
352
|
-
[Symbol.asyncIterator]() {
|
|
353
|
-
const fn = ReadableStream.prototype[Symbol.asyncIterator];
|
|
354
|
-
if (typeof fn === "function")
|
|
355
|
-
return fn.call(this);
|
|
356
|
-
const reader = this.getReader();
|
|
357
|
-
return {
|
|
358
|
-
next: async () => {
|
|
359
|
-
const r = await reader.read();
|
|
360
|
-
if (r.done) {
|
|
361
|
-
reader.releaseLock();
|
|
362
|
-
return { done: true, value: undefined };
|
|
363
|
-
}
|
|
364
|
-
return { done: false, value: r.value };
|
|
365
|
-
},
|
|
366
|
-
throw: async (e) => {
|
|
367
|
-
await reader.cancel(e);
|
|
368
|
-
reader.releaseLock();
|
|
369
|
-
return { done: true, value: undefined };
|
|
370
|
-
},
|
|
371
|
-
return: async () => {
|
|
372
|
-
await reader.cancel("done");
|
|
373
|
-
reader.releaseLock();
|
|
374
|
-
return { done: true, value: undefined };
|
|
375
|
-
},
|
|
376
|
-
[Symbol.asyncIterator]() {
|
|
377
|
-
return this;
|
|
378
|
-
},
|
|
379
|
-
};
|
|
380
|
-
}
|
|
381
|
-
}
|
|
462
|
+
// Removed S2SAcksStream - transport sessions no longer expose streams
|
|
382
463
|
/**
|
|
383
|
-
*
|
|
384
|
-
*
|
|
464
|
+
* Fetch-based transport session for appending records via HTTP/2.
|
|
465
|
+
* Pipelined: multiple requests can be in-flight simultaneously.
|
|
466
|
+
* No backpressure, no retry logic, no streams - just submit/close with value-encoded errors.
|
|
385
467
|
*/
|
|
386
468
|
class S2SAppendSession {
|
|
387
469
|
baseUrl;
|
|
388
470
|
authToken;
|
|
389
471
|
streamName;
|
|
390
472
|
getConnection;
|
|
473
|
+
basinName;
|
|
391
474
|
options;
|
|
392
475
|
http2Stream;
|
|
393
|
-
_lastAckedPosition;
|
|
394
476
|
parser = new framing_js_1.S2SFrameParser();
|
|
395
|
-
acksController;
|
|
396
|
-
_readable;
|
|
397
|
-
_writable;
|
|
398
477
|
closed = false;
|
|
399
|
-
queuedBytes = 0;
|
|
400
|
-
maxQueuedBytes;
|
|
401
|
-
waitingForCapacity = [];
|
|
402
478
|
pendingAcks = [];
|
|
403
479
|
initPromise;
|
|
404
|
-
|
|
405
|
-
|
|
406
|
-
static async create(baseUrl, bearerToken, streamName, getConnection, sessionOptions, requestOptions) {
|
|
407
|
-
return new S2SAppendSession(baseUrl, bearerToken, streamName, getConnection, sessionOptions, requestOptions);
|
|
480
|
+
static async create(baseUrl, bearerToken, streamName, getConnection, basinName, sessionOptions, requestOptions) {
|
|
481
|
+
return new S2SAppendSession(baseUrl, bearerToken, streamName, getConnection, basinName, sessionOptions, requestOptions);
|
|
408
482
|
}
|
|
409
|
-
constructor(baseUrl, authToken, streamName, getConnection, sessionOptions, options) {
|
|
483
|
+
constructor(baseUrl, authToken, streamName, getConnection, basinName, sessionOptions, options) {
|
|
410
484
|
this.baseUrl = baseUrl;
|
|
411
485
|
this.authToken = authToken;
|
|
412
486
|
this.streamName = streamName;
|
|
413
487
|
this.getConnection = getConnection;
|
|
488
|
+
this.basinName = basinName;
|
|
414
489
|
this.options = options;
|
|
415
|
-
|
|
416
|
-
//
|
|
417
|
-
this._readable = new S2SAcksStream((controller) => {
|
|
418
|
-
this.acksController = controller;
|
|
419
|
-
});
|
|
420
|
-
this.readable = this._readable;
|
|
421
|
-
// Create the writable stream
|
|
422
|
-
this._writable = new WritableStream({
|
|
423
|
-
start: async (controller) => {
|
|
424
|
-
this.initPromise = this.initializeStream();
|
|
425
|
-
await this.initPromise;
|
|
426
|
-
},
|
|
427
|
-
write: async (chunk) => {
|
|
428
|
-
if (this.closed) {
|
|
429
|
-
throw new index_js_2.S2Error({ message: "AppendSession is closed" });
|
|
430
|
-
}
|
|
431
|
-
const recordsArray = Array.isArray(chunk.records)
|
|
432
|
-
? chunk.records
|
|
433
|
-
: [chunk.records];
|
|
434
|
-
// Validate batch size limits
|
|
435
|
-
if (recordsArray.length > 1000) {
|
|
436
|
-
throw new index_js_2.S2Error({
|
|
437
|
-
message: `Batch of ${recordsArray.length} exceeds maximum batch size of 1000 records`,
|
|
438
|
-
});
|
|
439
|
-
}
|
|
440
|
-
// Calculate metered size
|
|
441
|
-
let batchMeteredSize = 0;
|
|
442
|
-
for (const record of recordsArray) {
|
|
443
|
-
batchMeteredSize += (0, utils_js_1.meteredSizeBytes)(record);
|
|
444
|
-
}
|
|
445
|
-
if (batchMeteredSize > 1024 * 1024) {
|
|
446
|
-
throw new index_js_2.S2Error({
|
|
447
|
-
message: `Batch size ${batchMeteredSize} bytes exceeds maximum of 1 MiB (1048576 bytes)`,
|
|
448
|
-
});
|
|
449
|
-
}
|
|
450
|
-
// Wait for capacity if needed (backpressure)
|
|
451
|
-
while (this.queuedBytes + batchMeteredSize > this.maxQueuedBytes &&
|
|
452
|
-
!this.closed) {
|
|
453
|
-
await new Promise((resolve) => {
|
|
454
|
-
this.waitingForCapacity.push(resolve);
|
|
455
|
-
});
|
|
456
|
-
}
|
|
457
|
-
if (this.closed) {
|
|
458
|
-
throw new index_js_2.S2Error({ message: "AppendSession is closed" });
|
|
459
|
-
}
|
|
460
|
-
// Send the batch immediately (pipelined)
|
|
461
|
-
// Returns when frame is sent, not when ack is received
|
|
462
|
-
await this.sendBatchNonBlocking(recordsArray, chunk, batchMeteredSize);
|
|
463
|
-
},
|
|
464
|
-
close: async () => {
|
|
465
|
-
this.closed = true;
|
|
466
|
-
await this.closeStream();
|
|
467
|
-
},
|
|
468
|
-
abort: async (reason) => {
|
|
469
|
-
this.closed = true;
|
|
470
|
-
this.queuedBytes = 0;
|
|
471
|
-
// Reject all pending acks
|
|
472
|
-
const error = new index_js_2.S2Error({
|
|
473
|
-
message: `AppendSession was aborted: ${reason}`,
|
|
474
|
-
});
|
|
475
|
-
for (const pending of this.pendingAcks) {
|
|
476
|
-
pending.reject(error);
|
|
477
|
-
}
|
|
478
|
-
this.pendingAcks = [];
|
|
479
|
-
// Wake up all waiting for capacity
|
|
480
|
-
for (const resolver of this.waitingForCapacity) {
|
|
481
|
-
resolver();
|
|
482
|
-
}
|
|
483
|
-
this.waitingForCapacity = [];
|
|
484
|
-
if (this.http2Stream && !this.http2Stream.closed) {
|
|
485
|
-
this.http2Stream.close();
|
|
486
|
-
}
|
|
487
|
-
},
|
|
488
|
-
});
|
|
489
|
-
this.writable = this._writable;
|
|
490
|
+
// No stream setup
|
|
491
|
+
// Initialization happens lazily on first submit
|
|
490
492
|
}
|
|
491
493
|
async initializeStream() {
|
|
492
494
|
const url = new URL(this.baseUrl);
|
|
@@ -497,9 +499,11 @@ class S2SAppendSession {
|
|
|
497
499
|
":path": path,
|
|
498
500
|
":scheme": url.protocol.slice(0, -1),
|
|
499
501
|
":authority": url.host,
|
|
502
|
+
"user-agent": runtime_js_1.DEFAULT_USER_AGENT,
|
|
500
503
|
authorization: `Bearer ${Redacted.value(this.authToken)}`,
|
|
501
504
|
"content-type": "s2s/proto",
|
|
502
505
|
accept: "application/protobuf",
|
|
506
|
+
...(this.basinName ? { "s2-basin": this.basinName } : {}),
|
|
503
507
|
});
|
|
504
508
|
this.http2Stream = stream;
|
|
505
509
|
this.options?.signal?.addEventListener("abort", () => {
|
|
@@ -508,145 +512,87 @@ class S2SAppendSession {
|
|
|
508
512
|
}
|
|
509
513
|
});
|
|
510
514
|
const textDecoder = new TextDecoder();
|
|
511
|
-
|
|
512
|
-
|
|
513
|
-
|
|
514
|
-
|
|
515
|
-
|
|
516
|
-
this.acksController.close();
|
|
517
|
-
}
|
|
518
|
-
catch {
|
|
519
|
-
// Controller may already be closed, ignore
|
|
520
|
-
}
|
|
521
|
-
}
|
|
522
|
-
};
|
|
523
|
-
const safeError = (err) => {
|
|
524
|
-
if (!controllerClosed && this.acksController) {
|
|
525
|
-
controllerClosed = true;
|
|
526
|
-
this.acksController.error(err);
|
|
527
|
-
}
|
|
528
|
-
// Reject all pending acks
|
|
515
|
+
const safeError = (error) => {
|
|
516
|
+
const s2Err = error instanceof error_js_1.S2Error
|
|
517
|
+
? error
|
|
518
|
+
: new error_js_1.S2Error({ message: String(error), status: 502 });
|
|
519
|
+
// Resolve all pending acks with error result
|
|
529
520
|
for (const pending of this.pendingAcks) {
|
|
530
|
-
pending.
|
|
521
|
+
pending.resolve((0, result_js_1.err)(s2Err));
|
|
531
522
|
}
|
|
532
523
|
this.pendingAcks = [];
|
|
533
524
|
};
|
|
534
525
|
// Handle incoming data (acks)
|
|
535
526
|
stream.on("data", (chunk) => {
|
|
536
|
-
|
|
537
|
-
|
|
538
|
-
|
|
539
|
-
|
|
540
|
-
if (frame.
|
|
541
|
-
|
|
542
|
-
|
|
543
|
-
const
|
|
544
|
-
|
|
545
|
-
|
|
546
|
-
|
|
547
|
-
|
|
548
|
-
|
|
549
|
-
|
|
550
|
-
|
|
551
|
-
|
|
552
|
-
|
|
553
|
-
|
|
554
|
-
}
|
|
527
|
+
try {
|
|
528
|
+
this.parser.push(chunk);
|
|
529
|
+
let frame = this.parser.parseFrame();
|
|
530
|
+
while (frame) {
|
|
531
|
+
if (frame.terminal) {
|
|
532
|
+
if (frame.statusCode && frame.statusCode >= 400) {
|
|
533
|
+
const errorText = textDecoder.decode(frame.body);
|
|
534
|
+
const status = frame.statusCode ?? 500;
|
|
535
|
+
try {
|
|
536
|
+
const errorJson = JSON.parse(errorText);
|
|
537
|
+
const err = status === 412
|
|
538
|
+
? (0, error_js_1.makeAppendPreconditionError)(status, errorJson)
|
|
539
|
+
: (0, error_js_1.makeServerError)({ status, statusText: undefined }, errorJson);
|
|
540
|
+
queueMicrotask(() => safeError(err));
|
|
541
|
+
}
|
|
542
|
+
catch {
|
|
543
|
+
const err = (0, error_js_1.makeServerError)({ status, statusText: undefined }, errorText);
|
|
544
|
+
queueMicrotask(() => safeError(err));
|
|
545
|
+
}
|
|
555
546
|
}
|
|
547
|
+
stream.close();
|
|
556
548
|
}
|
|
557
549
|
else {
|
|
558
|
-
|
|
559
|
-
|
|
560
|
-
|
|
561
|
-
|
|
562
|
-
|
|
563
|
-
|
|
564
|
-
|
|
565
|
-
|
|
566
|
-
const ack = convertAppendAck(protoAck);
|
|
567
|
-
this._lastAckedPosition = ack;
|
|
568
|
-
// Enqueue to readable stream
|
|
569
|
-
if (this.acksController) {
|
|
570
|
-
this.acksController.enqueue(ack);
|
|
571
|
-
}
|
|
572
|
-
// Resolve the pending ack promise
|
|
573
|
-
const pending = this.pendingAcks.shift();
|
|
574
|
-
if (pending) {
|
|
575
|
-
pending.resolve(ack);
|
|
576
|
-
// Release capacity
|
|
577
|
-
this.queuedBytes -= pending.batchSize;
|
|
578
|
-
// Wake up one waiting writer
|
|
579
|
-
if (this.waitingForCapacity.length > 0) {
|
|
580
|
-
const waiter = this.waitingForCapacity.shift();
|
|
581
|
-
waiter();
|
|
550
|
+
// Parse AppendAck
|
|
551
|
+
try {
|
|
552
|
+
const protoAck = s2_js_1.AppendAck.fromBinary(frame.body);
|
|
553
|
+
const ack = convertAppendAck(protoAck);
|
|
554
|
+
// Resolve the pending ack promise (FIFO)
|
|
555
|
+
const pending = this.pendingAcks.shift();
|
|
556
|
+
if (pending) {
|
|
557
|
+
pending.resolve((0, result_js_1.ok)(ack));
|
|
582
558
|
}
|
|
583
559
|
}
|
|
560
|
+
catch (parseErr) {
|
|
561
|
+
queueMicrotask(() => safeError(new error_js_1.S2Error({
|
|
562
|
+
message: `Failed to parse AppendAck: ${parseErr}`,
|
|
563
|
+
status: 500,
|
|
564
|
+
})));
|
|
565
|
+
}
|
|
584
566
|
}
|
|
585
|
-
|
|
586
|
-
safeError(new index_js_2.S2Error({
|
|
587
|
-
message: `Failed to parse AppendAck: ${err}`,
|
|
588
|
-
}));
|
|
589
|
-
}
|
|
567
|
+
frame = this.parser.parseFrame();
|
|
590
568
|
}
|
|
591
|
-
|
|
569
|
+
}
|
|
570
|
+
catch (error) {
|
|
571
|
+
queueMicrotask(() => safeError(error));
|
|
592
572
|
}
|
|
593
573
|
});
|
|
594
|
-
stream.on("error", (
|
|
595
|
-
safeError(
|
|
574
|
+
stream.on("error", (streamErr) => {
|
|
575
|
+
queueMicrotask(() => safeError(streamErr));
|
|
596
576
|
});
|
|
597
577
|
stream.on("close", () => {
|
|
598
|
-
|
|
599
|
-
|
|
600
|
-
|
|
601
|
-
|
|
602
|
-
|
|
603
|
-
|
|
604
|
-
|
|
605
|
-
|
|
606
|
-
|
|
607
|
-
}
|
|
608
|
-
// Convert to protobuf AppendInput
|
|
609
|
-
const protoInput = buildProtoAppendInput(records, args);
|
|
610
|
-
const bodyBytes = s2_js_1.AppendInput.toBinary(protoInput);
|
|
611
|
-
// Frame the message
|
|
612
|
-
const frame = (0, framing_js_1.frameMessage)({
|
|
613
|
-
terminal: false,
|
|
614
|
-
body: bodyBytes,
|
|
615
|
-
});
|
|
616
|
-
// This promise resolves when the frame is written (not when ack is received)
|
|
617
|
-
return new Promise((resolve, reject) => {
|
|
618
|
-
// Track pending ack - will be resolved when ack arrives
|
|
619
|
-
const ackPromise = {
|
|
620
|
-
resolve: () => { },
|
|
621
|
-
reject,
|
|
622
|
-
batchSize: batchMeteredSize,
|
|
623
|
-
};
|
|
624
|
-
this.pendingAcks.push(ackPromise);
|
|
625
|
-
this.queuedBytes += batchMeteredSize;
|
|
626
|
-
// Send the frame (pipelined)
|
|
627
|
-
this.http2Stream.write(frame, (err) => {
|
|
628
|
-
if (err) {
|
|
629
|
-
// Remove from pending acks on write error
|
|
630
|
-
const idx = this.pendingAcks.indexOf(ackPromise);
|
|
631
|
-
if (idx !== -1) {
|
|
632
|
-
this.pendingAcks.splice(idx, 1);
|
|
633
|
-
this.queuedBytes -= batchMeteredSize;
|
|
634
|
-
}
|
|
635
|
-
reject(err);
|
|
636
|
-
}
|
|
637
|
-
else {
|
|
638
|
-
// Frame written successfully - resolve immediately (pipelined)
|
|
639
|
-
resolve();
|
|
640
|
-
}
|
|
641
|
-
});
|
|
578
|
+
// Stream closed - resolve any remaining pending acks with error
|
|
579
|
+
// This can happen if the server closes the stream without sending all acks
|
|
580
|
+
if (this.pendingAcks.length > 0) {
|
|
581
|
+
queueMicrotask(() => safeError(new error_js_1.S2Error({
|
|
582
|
+
message: "Stream closed with pending acks",
|
|
583
|
+
status: 502,
|
|
584
|
+
code: "BAD_GATEWAY",
|
|
585
|
+
})));
|
|
586
|
+
}
|
|
642
587
|
});
|
|
643
588
|
}
|
|
644
589
|
/**
|
|
645
|
-
* Send a batch and wait for ack
|
|
590
|
+
* Send a batch and wait for ack. Returns AppendResult (never throws).
|
|
591
|
+
* Pipelined: multiple sends can be in-flight; acks resolve FIFO.
|
|
646
592
|
*/
|
|
647
593
|
sendBatch(records, args, batchMeteredSize) {
|
|
648
594
|
if (!this.http2Stream || this.http2Stream.closed) {
|
|
649
|
-
return Promise.
|
|
595
|
+
return Promise.resolve((0, result_js_1.err)(new error_js_1.S2Error({ message: "HTTP/2 stream is not open", status: 502 })));
|
|
650
596
|
}
|
|
651
597
|
// Convert to protobuf AppendInput
|
|
652
598
|
const protoInput = buildProtoAppendInput(records, args);
|
|
@@ -656,82 +602,99 @@ class S2SAppendSession {
|
|
|
656
602
|
terminal: false,
|
|
657
603
|
body: bodyBytes,
|
|
658
604
|
});
|
|
659
|
-
// Track pending ack - this promise resolves when the ack is received
|
|
660
|
-
return new Promise((resolve
|
|
605
|
+
// Track pending ack - this promise resolves when the ack is received (FIFO)
|
|
606
|
+
return new Promise((resolve) => {
|
|
661
607
|
this.pendingAcks.push({
|
|
662
608
|
resolve,
|
|
663
|
-
reject,
|
|
664
609
|
batchSize: batchMeteredSize,
|
|
665
610
|
});
|
|
666
|
-
|
|
667
|
-
|
|
668
|
-
|
|
669
|
-
if (err) {
|
|
611
|
+
// Send the frame (pipelined - non-blocking)
|
|
612
|
+
this.http2Stream.write(frame, (writeErr) => {
|
|
613
|
+
if (writeErr) {
|
|
670
614
|
// Remove from pending acks on write error
|
|
671
|
-
const idx = this.pendingAcks.findIndex((p) => p.
|
|
615
|
+
const idx = this.pendingAcks.findIndex((p) => p.resolve === resolve);
|
|
672
616
|
if (idx !== -1) {
|
|
673
617
|
this.pendingAcks.splice(idx, 1);
|
|
674
|
-
this.queuedBytes -= batchMeteredSize;
|
|
675
618
|
}
|
|
676
|
-
|
|
619
|
+
// Resolve with error result
|
|
620
|
+
const s2Err = writeErr instanceof error_js_1.S2Error
|
|
621
|
+
? writeErr
|
|
622
|
+
: new error_js_1.S2Error({ message: String(writeErr), status: 502 });
|
|
623
|
+
resolve((0, result_js_1.err)(s2Err));
|
|
677
624
|
}
|
|
678
|
-
// Write completed
|
|
625
|
+
// Write completed successfully - promise resolves later when ack is received
|
|
679
626
|
});
|
|
680
627
|
});
|
|
681
628
|
}
|
|
682
|
-
async closeStream() {
|
|
683
|
-
// Wait for all pending acks
|
|
684
|
-
while (this.pendingAcks.length > 0) {
|
|
685
|
-
await new Promise((resolve) => setTimeout(resolve, 10));
|
|
686
|
-
}
|
|
687
|
-
// Close the HTTP/2 stream (client doesn't send terminal frame for clean close)
|
|
688
|
-
if (this.http2Stream && !this.http2Stream.closed) {
|
|
689
|
-
this.http2Stream.end();
|
|
690
|
-
}
|
|
691
|
-
}
|
|
692
|
-
async [Symbol.asyncDispose]() {
|
|
693
|
-
await this.close();
|
|
694
|
-
}
|
|
695
|
-
/**
|
|
696
|
-
* Get a stream of acknowledgements for appends.
|
|
697
|
-
*/
|
|
698
|
-
acks() {
|
|
699
|
-
return this._readable;
|
|
700
|
-
}
|
|
701
629
|
/**
|
|
702
630
|
* Close the append session.
|
|
703
631
|
* Waits for all pending appends to complete before resolving.
|
|
632
|
+
* Never throws - returns CloseResult.
|
|
704
633
|
*/
|
|
705
634
|
async close() {
|
|
706
|
-
|
|
635
|
+
try {
|
|
636
|
+
this.closed = true;
|
|
637
|
+
// Wait for all pending acks to complete
|
|
638
|
+
while (this.pendingAcks.length > 0) {
|
|
639
|
+
await new Promise((resolve) => setTimeout(resolve, 10));
|
|
640
|
+
}
|
|
641
|
+
// Close the HTTP/2 stream (client doesn't send terminal frame for clean close)
|
|
642
|
+
if (this.http2Stream && !this.http2Stream.closed) {
|
|
643
|
+
this.http2Stream.end();
|
|
644
|
+
}
|
|
645
|
+
return (0, result_js_1.okClose)();
|
|
646
|
+
}
|
|
647
|
+
catch (error) {
|
|
648
|
+
const s2Err = error instanceof error_js_1.S2Error
|
|
649
|
+
? error
|
|
650
|
+
: new error_js_1.S2Error({ message: String(error), status: 500 });
|
|
651
|
+
return (0, result_js_1.errClose)(s2Err);
|
|
652
|
+
}
|
|
707
653
|
}
|
|
708
654
|
/**
|
|
709
655
|
* Submit an append request to the session.
|
|
710
|
-
* Returns
|
|
656
|
+
* Returns AppendResult (never throws).
|
|
657
|
+
* Pipelined: multiple submits can be in-flight; acks resolve FIFO.
|
|
711
658
|
*/
|
|
712
659
|
async submit(records, args) {
|
|
660
|
+
// Validate closed state
|
|
713
661
|
if (this.closed) {
|
|
714
|
-
return
|
|
662
|
+
return (0, result_js_1.err)(new error_js_1.S2Error({ message: "AppendSession is closed", status: 400 }));
|
|
663
|
+
}
|
|
664
|
+
// Lazy initialize HTTP/2 stream on first submit
|
|
665
|
+
if (!this.initPromise) {
|
|
666
|
+
this.initPromise = this.initializeStream();
|
|
715
667
|
}
|
|
716
|
-
|
|
717
|
-
if (this.initPromise) {
|
|
668
|
+
try {
|
|
718
669
|
await this.initPromise;
|
|
719
670
|
}
|
|
671
|
+
catch (initErr) {
|
|
672
|
+
const s2Err = initErr instanceof error_js_1.S2Error
|
|
673
|
+
? initErr
|
|
674
|
+
: new error_js_1.S2Error({ message: String(initErr), status: 502 });
|
|
675
|
+
return (0, result_js_1.err)(s2Err);
|
|
676
|
+
}
|
|
720
677
|
const recordsArray = Array.isArray(records) ? records : [records];
|
|
721
|
-
// Validate batch size limits
|
|
678
|
+
// Validate batch size limits (non-retryable 400-level error)
|
|
722
679
|
if (recordsArray.length > 1000) {
|
|
723
|
-
return
|
|
680
|
+
return (0, result_js_1.err)(new error_js_1.S2Error({
|
|
724
681
|
message: `Batch of ${recordsArray.length} exceeds maximum batch size of 1000 records`,
|
|
682
|
+
status: 400,
|
|
683
|
+
code: "INVALID_ARGUMENT",
|
|
725
684
|
}));
|
|
726
685
|
}
|
|
727
|
-
// Calculate metered size
|
|
728
|
-
let batchMeteredSize = 0;
|
|
729
|
-
|
|
730
|
-
|
|
686
|
+
// Calculate metered size (use precalculated if provided)
|
|
687
|
+
let batchMeteredSize = args?.precalculatedSize ?? 0;
|
|
688
|
+
if (batchMeteredSize === 0) {
|
|
689
|
+
for (const record of recordsArray) {
|
|
690
|
+
batchMeteredSize += (0, utils_js_1.meteredBytes)(record);
|
|
691
|
+
}
|
|
731
692
|
}
|
|
732
693
|
if (batchMeteredSize > 1024 * 1024) {
|
|
733
|
-
return
|
|
694
|
+
return (0, result_js_1.err)(new error_js_1.S2Error({
|
|
734
695
|
message: `Batch size ${batchMeteredSize} bytes exceeds maximum of 1 MiB (1048576 bytes)`,
|
|
696
|
+
status: 400,
|
|
697
|
+
code: "INVALID_ARGUMENT",
|
|
735
698
|
}));
|
|
736
699
|
}
|
|
737
700
|
return this.sendBatch(recordsArray, {
|
|
@@ -740,9 +703,6 @@ class S2SAppendSession {
|
|
|
740
703
|
match_seq_num: args?.match_seq_num,
|
|
741
704
|
}, batchMeteredSize);
|
|
742
705
|
}
|
|
743
|
-
lastAckedPosition() {
|
|
744
|
-
return this._lastAckedPosition;
|
|
745
|
-
}
|
|
746
706
|
}
|
|
747
707
|
/**
|
|
748
708
|
* Convert protobuf StreamPosition to OpenAPI StreamPosition
|