@s2-dev/streamstore 0.19.5 → 0.21.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +21 -201
- package/README.md +60 -10
- package/dist/cjs/accessTokens.d.ts +27 -14
- package/dist/cjs/accessTokens.d.ts.map +1 -1
- package/dist/cjs/accessTokens.js +72 -8
- package/dist/cjs/accessTokens.js.map +1 -1
- package/dist/cjs/basins.d.ts +29 -19
- package/dist/cjs/basins.d.ts.map +1 -1
- package/dist/cjs/basins.js +119 -9
- package/dist/cjs/basins.js.map +1 -1
- package/dist/cjs/batch-transform.d.ts +12 -16
- package/dist/cjs/batch-transform.d.ts.map +1 -1
- package/dist/cjs/batch-transform.js +17 -21
- package/dist/cjs/batch-transform.js.map +1 -1
- package/dist/cjs/common.d.ts +31 -24
- package/dist/cjs/common.d.ts.map +1 -1
- package/dist/cjs/common.js +22 -0
- package/dist/cjs/common.js.map +1 -1
- package/dist/cjs/endpoints.d.ts +63 -0
- package/dist/cjs/endpoints.d.ts.map +1 -0
- package/dist/cjs/endpoints.js +120 -0
- package/dist/cjs/endpoints.js.map +1 -0
- package/dist/cjs/error.d.ts.map +1 -1
- package/dist/cjs/error.js +11 -0
- package/dist/cjs/error.js.map +1 -1
- package/dist/cjs/generated/types.gen.d.ts +11 -20
- package/dist/cjs/generated/types.gen.d.ts.map +1 -1
- package/dist/cjs/index.d.ts +30 -46
- package/dist/cjs/index.d.ts.map +1 -1
- package/dist/cjs/index.js +50 -26
- package/dist/cjs/index.js.map +1 -1
- package/dist/cjs/internal/case-transform.d.ts +59 -0
- package/dist/cjs/internal/case-transform.d.ts.map +1 -0
- package/dist/cjs/internal/case-transform.js +80 -0
- package/dist/cjs/internal/case-transform.js.map +1 -0
- package/dist/cjs/internal/mappers.d.ts +51 -0
- package/dist/cjs/internal/mappers.d.ts.map +1 -0
- package/dist/cjs/internal/mappers.js +225 -0
- package/dist/cjs/internal/mappers.js.map +1 -0
- package/dist/cjs/internal/sdk-types.d.ts +127 -0
- package/dist/cjs/internal/sdk-types.d.ts.map +1 -0
- package/dist/cjs/internal/sdk-types.js +9 -0
- package/dist/cjs/internal/sdk-types.js.map +1 -0
- package/dist/cjs/lib/base64.d.ts +8 -0
- package/dist/cjs/lib/base64.d.ts.map +1 -1
- package/dist/cjs/lib/base64.js +32 -12
- package/dist/cjs/lib/base64.js.map +1 -1
- package/dist/cjs/lib/event-stream.d.ts.map +1 -1
- package/dist/cjs/lib/event-stream.js +2 -1
- package/dist/cjs/lib/event-stream.js.map +1 -1
- package/dist/cjs/lib/paginate.d.ts +57 -0
- package/dist/cjs/lib/paginate.d.ts.map +1 -0
- package/dist/cjs/lib/paginate.js +51 -0
- package/dist/cjs/lib/paginate.js.map +1 -0
- package/dist/cjs/lib/result.d.ts +1 -1
- package/dist/cjs/lib/result.d.ts.map +1 -1
- package/dist/cjs/lib/retry.d.ts +47 -31
- package/dist/cjs/lib/retry.d.ts.map +1 -1
- package/dist/cjs/lib/retry.js +302 -201
- package/dist/cjs/lib/retry.js.map +1 -1
- package/dist/cjs/lib/stream/runtime.d.ts +1 -1
- package/dist/cjs/lib/stream/transport/fetch/index.d.ts +7 -9
- package/dist/cjs/lib/stream/transport/fetch/index.d.ts.map +1 -1
- package/dist/cjs/lib/stream/transport/fetch/index.js +38 -39
- package/dist/cjs/lib/stream/transport/fetch/index.js.map +1 -1
- package/dist/cjs/lib/stream/transport/fetch/shared.d.ts +7 -2
- package/dist/cjs/lib/stream/transport/fetch/shared.d.ts.map +1 -1
- package/dist/cjs/lib/stream/transport/fetch/shared.js +56 -110
- package/dist/cjs/lib/stream/transport/fetch/shared.js.map +1 -1
- package/dist/cjs/lib/stream/transport/proto.d.ts +9 -0
- package/dist/cjs/lib/stream/transport/proto.d.ts.map +1 -0
- package/dist/cjs/lib/stream/transport/proto.js +118 -0
- package/dist/cjs/lib/stream/transport/proto.js.map +1 -0
- package/dist/cjs/lib/stream/transport/s2s/index.d.ts +3 -3
- package/dist/cjs/lib/stream/transport/s2s/index.d.ts.map +1 -1
- package/dist/cjs/lib/stream/transport/s2s/index.js +115 -82
- package/dist/cjs/lib/stream/transport/s2s/index.js.map +1 -1
- package/dist/cjs/lib/stream/types.d.ts +81 -36
- package/dist/cjs/lib/stream/types.d.ts.map +1 -1
- package/dist/cjs/lib/stream/types.js +18 -0
- package/dist/cjs/lib/stream/types.js.map +1 -1
- package/dist/cjs/metrics.d.ts +18 -17
- package/dist/cjs/metrics.d.ts.map +1 -1
- package/dist/cjs/metrics.js +67 -12
- package/dist/cjs/metrics.js.map +1 -1
- package/dist/cjs/producer.d.ts +82 -0
- package/dist/cjs/producer.d.ts.map +1 -0
- package/dist/cjs/producer.js +305 -0
- package/dist/cjs/producer.js.map +1 -0
- package/dist/cjs/s2.d.ts +1 -2
- package/dist/cjs/s2.d.ts.map +1 -1
- package/dist/cjs/s2.js +11 -15
- package/dist/cjs/s2.js.map +1 -1
- package/dist/cjs/stream.d.ts +26 -12
- package/dist/cjs/stream.d.ts.map +1 -1
- package/dist/cjs/stream.js +77 -13
- package/dist/cjs/stream.js.map +1 -1
- package/dist/cjs/streams.d.ts +29 -19
- package/dist/cjs/streams.d.ts.map +1 -1
- package/dist/cjs/streams.js +120 -9
- package/dist/cjs/streams.js.map +1 -1
- package/dist/cjs/types.d.ts +624 -0
- package/dist/cjs/types.d.ts.map +1 -0
- package/dist/cjs/types.js +129 -0
- package/dist/cjs/types.js.map +1 -0
- package/dist/cjs/utils.d.ts +1 -22
- package/dist/cjs/utils.d.ts.map +1 -1
- package/dist/cjs/utils.js +0 -42
- package/dist/cjs/utils.js.map +1 -1
- package/dist/cjs/version.d.ts +1 -1
- package/dist/cjs/version.js +1 -1
- package/dist/esm/accessTokens.d.ts +27 -14
- package/dist/esm/accessTokens.d.ts.map +1 -1
- package/dist/esm/accessTokens.js +73 -9
- package/dist/esm/accessTokens.js.map +1 -1
- package/dist/esm/basins.d.ts +29 -19
- package/dist/esm/basins.d.ts.map +1 -1
- package/dist/esm/basins.js +119 -9
- package/dist/esm/basins.js.map +1 -1
- package/dist/esm/batch-transform.d.ts +12 -16
- package/dist/esm/batch-transform.d.ts.map +1 -1
- package/dist/esm/batch-transform.js +18 -22
- package/dist/esm/batch-transform.js.map +1 -1
- package/dist/esm/common.d.ts +31 -24
- package/dist/esm/common.d.ts.map +1 -1
- package/dist/esm/common.js +20 -1
- package/dist/esm/common.js.map +1 -1
- package/dist/esm/endpoints.d.ts +63 -0
- package/dist/esm/endpoints.d.ts.map +1 -0
- package/dist/esm/endpoints.js +115 -0
- package/dist/esm/endpoints.js.map +1 -0
- package/dist/esm/error.d.ts.map +1 -1
- package/dist/esm/error.js +11 -0
- package/dist/esm/error.js.map +1 -1
- package/dist/esm/generated/types.gen.d.ts +11 -20
- package/dist/esm/generated/types.gen.d.ts.map +1 -1
- package/dist/esm/index.d.ts +30 -46
- package/dist/esm/index.d.ts.map +1 -1
- package/dist/esm/index.js +33 -19
- package/dist/esm/index.js.map +1 -1
- package/dist/esm/internal/case-transform.d.ts +59 -0
- package/dist/esm/internal/case-transform.d.ts.map +1 -0
- package/dist/esm/internal/case-transform.js +76 -0
- package/dist/esm/internal/case-transform.js.map +1 -0
- package/dist/esm/internal/mappers.d.ts +51 -0
- package/dist/esm/internal/mappers.d.ts.map +1 -0
- package/dist/esm/internal/mappers.js +218 -0
- package/dist/esm/internal/mappers.js.map +1 -0
- package/dist/esm/internal/sdk-types.d.ts +127 -0
- package/dist/esm/internal/sdk-types.d.ts.map +1 -0
- package/dist/esm/internal/sdk-types.js +8 -0
- package/dist/esm/internal/sdk-types.js.map +1 -0
- package/dist/esm/lib/base64.d.ts +8 -0
- package/dist/esm/lib/base64.d.ts.map +1 -1
- package/dist/esm/lib/base64.js +30 -11
- package/dist/esm/lib/base64.js.map +1 -1
- package/dist/esm/lib/event-stream.d.ts.map +1 -1
- package/dist/esm/lib/event-stream.js +2 -1
- package/dist/esm/lib/event-stream.js.map +1 -1
- package/dist/esm/lib/paginate.d.ts +57 -0
- package/dist/esm/lib/paginate.d.ts.map +1 -0
- package/dist/esm/lib/paginate.js +48 -0
- package/dist/esm/lib/paginate.js.map +1 -0
- package/dist/esm/lib/result.d.ts +1 -1
- package/dist/esm/lib/result.d.ts.map +1 -1
- package/dist/esm/lib/retry.d.ts +47 -31
- package/dist/esm/lib/retry.d.ts.map +1 -1
- package/dist/esm/lib/retry.js +303 -201
- package/dist/esm/lib/retry.js.map +1 -1
- package/dist/esm/lib/stream/runtime.d.ts +1 -1
- package/dist/esm/lib/stream/transport/fetch/index.d.ts +7 -9
- package/dist/esm/lib/stream/transport/fetch/index.d.ts.map +1 -1
- package/dist/esm/lib/stream/transport/fetch/index.js +40 -41
- package/dist/esm/lib/stream/transport/fetch/index.js.map +1 -1
- package/dist/esm/lib/stream/transport/fetch/shared.d.ts +7 -2
- package/dist/esm/lib/stream/transport/fetch/shared.d.ts.map +1 -1
- package/dist/esm/lib/stream/transport/fetch/shared.js +58 -112
- package/dist/esm/lib/stream/transport/fetch/shared.js.map +1 -1
- package/dist/esm/lib/stream/transport/proto.d.ts +9 -0
- package/dist/esm/lib/stream/transport/proto.d.ts.map +1 -0
- package/dist/esm/lib/stream/transport/proto.js +110 -0
- package/dist/esm/lib/stream/transport/proto.js.map +1 -0
- package/dist/esm/lib/stream/transport/s2s/index.d.ts +3 -3
- package/dist/esm/lib/stream/transport/s2s/index.d.ts.map +1 -1
- package/dist/esm/lib/stream/transport/s2s/index.js +116 -82
- package/dist/esm/lib/stream/transport/s2s/index.js.map +1 -1
- package/dist/esm/lib/stream/types.d.ts +81 -36
- package/dist/esm/lib/stream/types.d.ts.map +1 -1
- package/dist/esm/lib/stream/types.js +17 -1
- package/dist/esm/lib/stream/types.js.map +1 -1
- package/dist/esm/metrics.d.ts +18 -17
- package/dist/esm/metrics.d.ts.map +1 -1
- package/dist/esm/metrics.js +66 -12
- package/dist/esm/metrics.js.map +1 -1
- package/dist/esm/producer.d.ts +82 -0
- package/dist/esm/producer.d.ts.map +1 -0
- package/dist/esm/producer.js +300 -0
- package/dist/esm/producer.js.map +1 -0
- package/dist/esm/s2.d.ts +1 -2
- package/dist/esm/s2.d.ts.map +1 -1
- package/dist/esm/s2.js +12 -16
- package/dist/esm/s2.js.map +1 -1
- package/dist/esm/stream.d.ts +26 -12
- package/dist/esm/stream.d.ts.map +1 -1
- package/dist/esm/stream.js +79 -15
- package/dist/esm/stream.js.map +1 -1
- package/dist/esm/streams.d.ts +29 -19
- package/dist/esm/streams.d.ts.map +1 -1
- package/dist/esm/streams.js +120 -9
- package/dist/esm/streams.js.map +1 -1
- package/dist/esm/types.d.ts +624 -0
- package/dist/esm/types.d.ts.map +1 -0
- package/dist/esm/types.js +126 -0
- package/dist/esm/types.js.map +1 -0
- package/dist/esm/utils.d.ts +1 -22
- package/dist/esm/utils.d.ts.map +1 -1
- package/dist/esm/utils.js +0 -41
- package/dist/esm/utils.js.map +1 -1
- package/dist/esm/version.d.ts +1 -1
- package/dist/esm/version.js +1 -1
- package/package.json +4 -3
package/dist/esm/lib/retry.js
CHANGED
|
@@ -1,18 +1,65 @@
|
|
|
1
1
|
import createDebug from "debug";
|
|
2
2
|
import { abortedError, invariantViolation, S2Error, s2Error, withS2Error, } from "../error.js";
|
|
3
|
+
import * as Types from "../types.js";
|
|
3
4
|
import { meteredBytes } from "../utils.js";
|
|
4
5
|
import { err, errClose, ok, okClose } from "./result.js";
|
|
6
|
+
import { BatchSubmitTicket } from "./stream/types.js";
|
|
5
7
|
const debugWith = createDebug("s2:retry:with");
|
|
6
8
|
const debugRead = createDebug("s2:retry:read");
|
|
7
9
|
const debugSession = createDebug("s2:retry:session");
|
|
10
|
+
/** Type guard for errors with a code property (e.g., Node.js errors). */
|
|
11
|
+
function hasErrorCode(err, code) {
|
|
12
|
+
return (typeof err === "object" &&
|
|
13
|
+
err !== null &&
|
|
14
|
+
"code" in err &&
|
|
15
|
+
err.code === code);
|
|
16
|
+
}
|
|
17
|
+
/**
|
|
18
|
+
* Convert generated StreamPosition to SDK StreamPosition.
|
|
19
|
+
*/
|
|
20
|
+
function toSDKStreamPosition(pos) {
|
|
21
|
+
return {
|
|
22
|
+
seqNum: pos.seq_num,
|
|
23
|
+
timestamp: new Date(pos.timestamp),
|
|
24
|
+
};
|
|
25
|
+
}
|
|
26
|
+
/**
|
|
27
|
+
* Convert internal ReadRecord (with headers as object for strings) to SDK ReadRecord (with headers as array).
|
|
28
|
+
*/
|
|
29
|
+
function toSDKReadRecord(record) {
|
|
30
|
+
if (record.headers &&
|
|
31
|
+
typeof record.headers === "object" &&
|
|
32
|
+
!Array.isArray(record.headers)) {
|
|
33
|
+
// String format: headers is an object, convert to array of tuples
|
|
34
|
+
const result = {
|
|
35
|
+
seqNum: record.seq_num,
|
|
36
|
+
timestamp: new Date(record.timestamp),
|
|
37
|
+
body: record.body ?? "",
|
|
38
|
+
headers: Object.entries(record.headers),
|
|
39
|
+
};
|
|
40
|
+
return result;
|
|
41
|
+
}
|
|
42
|
+
else {
|
|
43
|
+
// Bytes format: headers is already an array
|
|
44
|
+
const result = {
|
|
45
|
+
seqNum: record.seq_num,
|
|
46
|
+
timestamp: new Date(record.timestamp),
|
|
47
|
+
body: record.body ?? new Uint8Array(),
|
|
48
|
+
headers: record.headers ?? [],
|
|
49
|
+
};
|
|
50
|
+
return result;
|
|
51
|
+
}
|
|
52
|
+
}
|
|
8
53
|
/**
|
|
9
54
|
* Default retry configuration.
|
|
10
55
|
*/
|
|
11
56
|
export const DEFAULT_RETRY_CONFIG = {
|
|
12
57
|
maxAttempts: 3,
|
|
13
|
-
|
|
58
|
+
minDelayMillis: 100,
|
|
59
|
+
maxDelayMillis: 1000,
|
|
14
60
|
appendRetryPolicy: "all",
|
|
15
61
|
requestTimeoutMillis: 5000, // 5 seconds
|
|
62
|
+
connectionTimeoutMillis: 5000, // 5 seconds
|
|
16
63
|
};
|
|
17
64
|
const RETRYABLE_STATUS_CODES = new Set([
|
|
18
65
|
408, // request_timeout
|
|
@@ -20,6 +67,7 @@ const RETRYABLE_STATUS_CODES = new Set([
|
|
|
20
67
|
500, // internal_server_error
|
|
21
68
|
502, // bad_gateway
|
|
22
69
|
503, // service_unavailable
|
|
70
|
+
504, // gateway_timeout
|
|
23
71
|
]);
|
|
24
72
|
/**
|
|
25
73
|
* Determines if an error should be retried based on its characteristics.
|
|
@@ -39,16 +87,25 @@ export function isRetryable(error) {
|
|
|
39
87
|
return false;
|
|
40
88
|
}
|
|
41
89
|
/**
|
|
42
|
-
* Calculates the delay before the next retry attempt using
|
|
43
|
-
* with jitter.
|
|
44
|
-
*
|
|
90
|
+
* Calculates the delay before the next retry attempt using exponential backoff
|
|
91
|
+
* with additive jitter.
|
|
92
|
+
*
|
|
93
|
+
* Formula:
|
|
94
|
+
* baseDelay = min(minDelayMillis * 2^attempt, maxDelayMillis)
|
|
95
|
+
* jitter = random(0, baseDelay)
|
|
96
|
+
* delay = baseDelay + jitter
|
|
97
|
+
*
|
|
98
|
+
* @param attempt - Zero-based retry attempt number (0 = first retry)
|
|
99
|
+
* @param minDelayMillis - Minimum delay for exponential backoff
|
|
100
|
+
* @param maxDelayMillis - Maximum base delay (actual delay can be up to 2x with jitter)
|
|
45
101
|
*/
|
|
46
|
-
export function calculateDelay(attempt,
|
|
47
|
-
//
|
|
48
|
-
const
|
|
49
|
-
|
|
50
|
-
const
|
|
51
|
-
|
|
102
|
+
export function calculateDelay(attempt, minDelayMillis, maxDelayMillis) {
|
|
103
|
+
// Calculate exponential backoff: minDelay * 2^attempt, capped at maxDelay
|
|
104
|
+
const baseDelay = Math.min(minDelayMillis * Math.pow(2, attempt), maxDelayMillis);
|
|
105
|
+
// Add jitter: random value in [0, baseDelay)
|
|
106
|
+
const jitter = Math.random() * baseDelay;
|
|
107
|
+
// Total delay is base + jitter
|
|
108
|
+
return Math.floor(baseDelay + jitter);
|
|
52
109
|
}
|
|
53
110
|
/**
|
|
54
111
|
* Sleeps for the specified duration.
|
|
@@ -100,7 +157,7 @@ export async function withRetries(retryConfig, fn, isPolicyCompliant = () => tru
|
|
|
100
157
|
throw error;
|
|
101
158
|
}
|
|
102
159
|
// Calculate delay and wait before retrying
|
|
103
|
-
const delay = calculateDelay(attemptNo - 1, config.
|
|
160
|
+
const delay = calculateDelay(attemptNo - 1, config.minDelayMillis, config.maxDelayMillis);
|
|
104
161
|
debugWith("retryable error, backing off for %dms, status=%s", delay, error.status);
|
|
105
162
|
await sleep(delay);
|
|
106
163
|
}
|
|
@@ -129,16 +186,11 @@ export class RetryReadSession extends ReadableStream {
|
|
|
129
186
|
return new RetryReadSession(args, generator, config, session);
|
|
130
187
|
}
|
|
131
188
|
catch (err) {
|
|
132
|
-
const error = err
|
|
133
|
-
? err
|
|
134
|
-
: new S2Error({
|
|
135
|
-
message: String(err),
|
|
136
|
-
status: 502,
|
|
137
|
-
});
|
|
189
|
+
const error = s2Error(err);
|
|
138
190
|
lastError = error;
|
|
139
191
|
const effectiveMax = Math.max(1, retryConfig.maxAttempts);
|
|
140
192
|
if (isRetryable(error) && attempt < effectiveMax - 1) {
|
|
141
|
-
const delay = calculateDelay(attempt, retryConfig.
|
|
193
|
+
const delay = calculateDelay(attempt, retryConfig.minDelayMillis, retryConfig.maxDelayMillis);
|
|
142
194
|
debugRead("connection error in create, will retry after %dms, status=%s", delay, error.status);
|
|
143
195
|
await sleep(delay);
|
|
144
196
|
attempt++;
|
|
@@ -174,16 +226,11 @@ export class RetryReadSession extends ReadableStream {
|
|
|
174
226
|
}
|
|
175
227
|
catch (err) {
|
|
176
228
|
// Convert to S2Error if needed
|
|
177
|
-
const error = err
|
|
178
|
-
? err
|
|
179
|
-
: new S2Error({
|
|
180
|
-
message: String(err),
|
|
181
|
-
status: 502, // Bad Gateway - connection failure
|
|
182
|
-
});
|
|
229
|
+
const error = s2Error(err);
|
|
183
230
|
// Check if we can retry connection errors
|
|
184
231
|
const effectiveMax = Math.max(1, retryConfig.maxAttempts);
|
|
185
232
|
if (isRetryable(error) && attempt < effectiveMax - 1) {
|
|
186
|
-
const delay = calculateDelay(attempt, retryConfig.
|
|
233
|
+
const delay = calculateDelay(attempt, retryConfig.minDelayMillis, retryConfig.maxDelayMillis);
|
|
187
234
|
debugRead("connection error, will retry after %dms, status=%s", delay, error.status);
|
|
188
235
|
await sleep(delay);
|
|
189
236
|
attempt++;
|
|
@@ -224,7 +271,7 @@ export class RetryReadSession extends ReadableStream {
|
|
|
224
271
|
delete nextArgs.tail_offset;
|
|
225
272
|
}
|
|
226
273
|
// Compute planned backoff delay now so we can subtract it from wait budget
|
|
227
|
-
const delay = calculateDelay(attempt, retryConfig.
|
|
274
|
+
const delay = calculateDelay(attempt, retryConfig.minDelayMillis, retryConfig.maxDelayMillis);
|
|
228
275
|
// Recompute remaining budget from original request each time to avoid double-subtraction
|
|
229
276
|
if (baselineCount !== undefined) {
|
|
230
277
|
nextArgs.count = Math.max(0, baselineCount - this._recordsRead);
|
|
@@ -235,7 +282,7 @@ export class RetryReadSession extends ReadableStream {
|
|
|
235
282
|
// Adjust wait from original budget based on total elapsed time since start
|
|
236
283
|
if (baselineWait !== undefined) {
|
|
237
284
|
const elapsedSeconds = (performance.now() - startTimeMs) / 1000;
|
|
238
|
-
nextArgs.wait = Math.max(0, baselineWait - (elapsedSeconds + delay / 1000));
|
|
285
|
+
nextArgs.wait = Math.max(0, Math.floor(baselineWait - (elapsedSeconds + delay / 1000)));
|
|
239
286
|
}
|
|
240
287
|
// Proactively cancel the current transport session before retrying
|
|
241
288
|
try {
|
|
@@ -263,7 +310,7 @@ export class RetryReadSession extends ReadableStream {
|
|
|
263
310
|
this._recordsRead++;
|
|
264
311
|
this._bytesRead += meteredBytes(record);
|
|
265
312
|
attempt = 0;
|
|
266
|
-
controller.enqueue(record);
|
|
313
|
+
controller.enqueue(toSDKReadRecord(record));
|
|
267
314
|
}
|
|
268
315
|
}
|
|
269
316
|
},
|
|
@@ -273,7 +320,7 @@ export class RetryReadSession extends ReadableStream {
|
|
|
273
320
|
}
|
|
274
321
|
catch (err) {
|
|
275
322
|
// Ignore ERR_INVALID_STATE - stream may already be closed/cancelled
|
|
276
|
-
if (err
|
|
323
|
+
if (!hasErrorCode(err, "ERR_INVALID_STATE")) {
|
|
277
324
|
throw err;
|
|
278
325
|
}
|
|
279
326
|
}
|
|
@@ -285,7 +332,8 @@ export class RetryReadSession extends ReadableStream {
|
|
|
285
332
|
}
|
|
286
333
|
// Polyfill for older browsers / Node.js environments
|
|
287
334
|
[Symbol.asyncIterator]() {
|
|
288
|
-
const
|
|
335
|
+
const proto = ReadableStream.prototype;
|
|
336
|
+
const fn = proto[Symbol.asyncIterator];
|
|
289
337
|
if (typeof fn === "function") {
|
|
290
338
|
try {
|
|
291
339
|
return fn.call(this);
|
|
@@ -310,7 +358,7 @@ export class RetryReadSession extends ReadableStream {
|
|
|
310
358
|
await reader.cancel(e);
|
|
311
359
|
}
|
|
312
360
|
catch (err) {
|
|
313
|
-
if (err
|
|
361
|
+
if (!hasErrorCode(err, "ERR_INVALID_STATE"))
|
|
314
362
|
throw err;
|
|
315
363
|
}
|
|
316
364
|
reader.releaseLock();
|
|
@@ -321,7 +369,7 @@ export class RetryReadSession extends ReadableStream {
|
|
|
321
369
|
await reader.cancel("done");
|
|
322
370
|
}
|
|
323
371
|
catch (err) {
|
|
324
|
-
if (err
|
|
372
|
+
if (!hasErrorCode(err, "ERR_INVALID_STATE"))
|
|
325
373
|
throw err;
|
|
326
374
|
}
|
|
327
375
|
reader.releaseLock();
|
|
@@ -333,13 +381,18 @@ export class RetryReadSession extends ReadableStream {
|
|
|
333
381
|
};
|
|
334
382
|
}
|
|
335
383
|
lastObservedTail() {
|
|
336
|
-
return this._lastObservedTail
|
|
384
|
+
return this._lastObservedTail
|
|
385
|
+
? toSDKStreamPosition(this._lastObservedTail)
|
|
386
|
+
: undefined;
|
|
337
387
|
}
|
|
338
388
|
nextReadPosition() {
|
|
339
|
-
return this._nextReadPosition
|
|
389
|
+
return this._nextReadPosition
|
|
390
|
+
? toSDKStreamPosition(this._nextReadPosition)
|
|
391
|
+
: undefined;
|
|
340
392
|
}
|
|
341
393
|
}
|
|
342
|
-
const
|
|
394
|
+
const MIN_MAX_INFLIGHT_BYTES = 1 * 1024 * 1024; // 1 MiB minimum
|
|
395
|
+
const DEFAULT_MAX_INFLIGHT_BYTES = 3 * 1024 * 1024; // 3 MiB default
|
|
343
396
|
export class RetryAppendSession {
|
|
344
397
|
generator;
|
|
345
398
|
sessionOptions;
|
|
@@ -348,10 +401,11 @@ export class RetryAppendSession {
|
|
|
348
401
|
maxInflightBatches;
|
|
349
402
|
retryConfig;
|
|
350
403
|
inflight = [];
|
|
351
|
-
|
|
404
|
+
capacityWaiters = []; // Queue of waiters for capacity
|
|
352
405
|
session;
|
|
353
406
|
queuedBytes = 0;
|
|
354
407
|
pendingBytes = 0;
|
|
408
|
+
pendingBatches = 0;
|
|
355
409
|
consecutiveFailures = 0;
|
|
356
410
|
currentAttempt = 0;
|
|
357
411
|
pumpPromise;
|
|
@@ -364,6 +418,7 @@ export class RetryAppendSession {
|
|
|
364
418
|
acksController;
|
|
365
419
|
readable;
|
|
366
420
|
writable;
|
|
421
|
+
streamName;
|
|
367
422
|
/**
|
|
368
423
|
* If the session has failed, returns the original fatal error that caused
|
|
369
424
|
* the pump to stop. Returns undefined when the session has not failed.
|
|
@@ -371,17 +426,22 @@ export class RetryAppendSession {
|
|
|
371
426
|
failureCause() {
|
|
372
427
|
return this.fatalError;
|
|
373
428
|
}
|
|
374
|
-
constructor(generator, sessionOptions, config) {
|
|
429
|
+
constructor(generator, sessionOptions, config, streamName) {
|
|
375
430
|
this.generator = generator;
|
|
376
431
|
this.sessionOptions = sessionOptions;
|
|
432
|
+
this.streamName = streamName ?? "unknown";
|
|
377
433
|
this.retryConfig = {
|
|
378
434
|
...DEFAULT_RETRY_CONFIG,
|
|
379
435
|
...config,
|
|
380
436
|
};
|
|
381
437
|
this.requestTimeoutMillis = this.retryConfig.requestTimeoutMillis;
|
|
382
|
-
|
|
383
|
-
|
|
384
|
-
|
|
438
|
+
// Clamp maxInflightBytes to at least 1 MiB
|
|
439
|
+
this.maxQueuedBytes = Math.max(MIN_MAX_INFLIGHT_BYTES, this.sessionOptions?.maxInflightBytes ?? DEFAULT_MAX_INFLIGHT_BYTES);
|
|
440
|
+
// Clamp maxInflightBatches to at least 1 if set
|
|
441
|
+
this.maxInflightBatches =
|
|
442
|
+
this.sessionOptions?.maxInflightBatches !== undefined
|
|
443
|
+
? Math.max(1, this.sessionOptions.maxInflightBatches)
|
|
444
|
+
: undefined;
|
|
385
445
|
this.readable = new ReadableStream({
|
|
386
446
|
start: (controller) => {
|
|
387
447
|
this.acksController = controller;
|
|
@@ -389,25 +449,16 @@ export class RetryAppendSession {
|
|
|
389
449
|
});
|
|
390
450
|
this.writable = new WritableStream({
|
|
391
451
|
write: async (chunk) => {
|
|
392
|
-
|
|
393
|
-
|
|
394
|
-
: [chunk.records];
|
|
395
|
-
// Calculate metered size
|
|
396
|
-
let batchMeteredSize = 0;
|
|
397
|
-
for (const record of recordsArray) {
|
|
398
|
-
batchMeteredSize += meteredBytes(record);
|
|
452
|
+
if (this.closed || this.closing) {
|
|
453
|
+
throw new S2Error({ message: "AppendSession is closed" });
|
|
399
454
|
}
|
|
400
|
-
//
|
|
401
|
-
|
|
402
|
-
const
|
|
403
|
-
|
|
404
|
-
|
|
405
|
-
|
|
406
|
-
|
|
407
|
-
// Submit without waiting for ack (writable doesn't need per-batch resolution)
|
|
408
|
-
const promise = this.submitInternal(recordsArray, args, batchMeteredSize);
|
|
409
|
-
promise.catch(() => {
|
|
410
|
-
// Swallow to avoid unhandled rejection; pump surfaces errors via readable stream
|
|
455
|
+
// chunk is already AppendInput with meteredBytes computed
|
|
456
|
+
// Reuse submit() to leverage shared backpressure/pump logic.
|
|
457
|
+
const ticket = await this.submit(chunk);
|
|
458
|
+
// Writable stream API only needs enqueue semantics, so drop ack but
|
|
459
|
+
// suppress rejection noise (pump surfaces fatal errors elsewhere).
|
|
460
|
+
ticket.ack().catch(() => {
|
|
461
|
+
// Intentionally ignored.
|
|
411
462
|
});
|
|
412
463
|
},
|
|
413
464
|
close: async () => {
|
|
@@ -419,57 +470,104 @@ export class RetryAppendSession {
|
|
|
419
470
|
},
|
|
420
471
|
});
|
|
421
472
|
}
|
|
422
|
-
static async create(generator, sessionOptions, config) {
|
|
423
|
-
return new RetryAppendSession(generator, sessionOptions, config);
|
|
473
|
+
static async create(generator, sessionOptions, config, streamName) {
|
|
474
|
+
return new RetryAppendSession(generator, sessionOptions, config, streamName);
|
|
424
475
|
}
|
|
425
476
|
/**
|
|
426
|
-
*
|
|
427
|
-
*
|
|
477
|
+
* Wait for capacity to be available for the given batch size.
|
|
478
|
+
* Call this before submit() to apply backpressure based on maxInflightBatches/maxInflightBytes.
|
|
479
|
+
*
|
|
480
|
+
* @param bytes - Size in bytes (use meteredBytes() to calculate)
|
|
481
|
+
* @param numBatches - Number of batches (default: 1)
|
|
482
|
+
* @returns Promise that resolves when capacity is available
|
|
428
483
|
*/
|
|
429
|
-
async
|
|
430
|
-
|
|
431
|
-
//
|
|
432
|
-
|
|
433
|
-
|
|
434
|
-
|
|
435
|
-
|
|
484
|
+
async waitForCapacity(bytes, numBatches = 1) {
|
|
485
|
+
debugSession("[%s] [CAPACITY] checking for %d bytes, %d batches: queuedBytes=%d, pendingBytes=%d, maxQueuedBytes=%d, inflight=%d, pendingBatches=%d, maxInflightBatches=%s", this.streamName, bytes, numBatches, this.queuedBytes, this.pendingBytes, this.maxQueuedBytes, this.inflight.length, this.pendingBatches, this.maxInflightBatches ?? "unlimited");
|
|
486
|
+
// Check if we have capacity
|
|
487
|
+
while (true) {
|
|
488
|
+
// Check for fatal error before adding to pendingBytes
|
|
489
|
+
if (this.fatalError) {
|
|
490
|
+
debugSession("[%s] [CAPACITY] fatal error detected, rejecting: %s", this.streamName, this.fatalError.message);
|
|
491
|
+
throw this.fatalError;
|
|
436
492
|
}
|
|
493
|
+
// Byte-based gating
|
|
494
|
+
if (this.queuedBytes + this.pendingBytes + bytes <= this.maxQueuedBytes) {
|
|
495
|
+
// Batch-based gating (if configured)
|
|
496
|
+
if (this.maxInflightBatches === undefined ||
|
|
497
|
+
this.inflight.length + this.pendingBatches + numBatches <=
|
|
498
|
+
this.maxInflightBatches) {
|
|
499
|
+
debugSession("[%s] [CAPACITY] capacity available, adding %d to pendingBytes and %d to pendingBatches", this.streamName, bytes, numBatches);
|
|
500
|
+
this.pendingBytes += bytes;
|
|
501
|
+
this.pendingBatches += numBatches;
|
|
502
|
+
return;
|
|
503
|
+
}
|
|
504
|
+
}
|
|
505
|
+
// No capacity - wait in queue
|
|
506
|
+
debugSession("[%s] [CAPACITY] no capacity, waiting for release", this.streamName);
|
|
507
|
+
await new Promise((resolve) => {
|
|
508
|
+
this.capacityWaiters.push({
|
|
509
|
+
resolve,
|
|
510
|
+
bytes,
|
|
511
|
+
batches: numBatches,
|
|
512
|
+
});
|
|
513
|
+
});
|
|
514
|
+
debugSession("[%s] [CAPACITY] woke up, rechecking", this.streamName);
|
|
437
515
|
}
|
|
438
|
-
|
|
439
|
-
|
|
440
|
-
|
|
441
|
-
|
|
442
|
-
|
|
443
|
-
|
|
444
|
-
|
|
445
|
-
|
|
516
|
+
}
|
|
517
|
+
/**
|
|
518
|
+
* Submit an append request.
|
|
519
|
+
* Returns a promise that resolves to a submit ticket once the batch is enqueued (has capacity).
|
|
520
|
+
* The ticket's ack() can be awaited to get the AppendAck once the batch is durable.
|
|
521
|
+
* This method applies backpressure and will block if capacity limits are reached.
|
|
522
|
+
*/
|
|
523
|
+
async submit(input) {
|
|
524
|
+
if (this.closed || this.closing) {
|
|
525
|
+
return Promise.reject(new S2Error({ message: "AppendSession is closed" }));
|
|
526
|
+
}
|
|
527
|
+
// Use cached metered size from AppendInput
|
|
528
|
+
const batchMeteredSize = input.meteredBytes;
|
|
529
|
+
// This needs to happen in the sync path.
|
|
530
|
+
this.ensurePump();
|
|
531
|
+
// Wait for capacity (this is where backpressure is applied - outer promise resolves when enqueued)
|
|
532
|
+
await this.waitForCapacity(batchMeteredSize, 1);
|
|
533
|
+
// Move reserved bytes and batches to queued accounting before submission
|
|
534
|
+
this.pendingBytes = Math.max(0, this.pendingBytes - batchMeteredSize);
|
|
535
|
+
this.pendingBatches = Math.max(0, this.pendingBatches - 1);
|
|
536
|
+
// Create the inner promise that resolves when durable
|
|
537
|
+
const innerPromise = this.submitInternal(input, batchMeteredSize).then((result) => {
|
|
538
|
+
if (result.ok) {
|
|
539
|
+
return result.value;
|
|
540
|
+
}
|
|
541
|
+
else {
|
|
542
|
+
throw result.error;
|
|
543
|
+
}
|
|
544
|
+
});
|
|
545
|
+
// Prevent early rejections from surfacing as unhandled when callers delay ack()
|
|
546
|
+
innerPromise.catch(() => { });
|
|
547
|
+
// Return ticket immediately (outer promise has resolved via waitForCapacity)
|
|
548
|
+
return new BatchSubmitTicket(innerPromise, batchMeteredSize, input.records.length);
|
|
446
549
|
}
|
|
447
550
|
/**
|
|
448
551
|
* Internal submit that returns discriminated union.
|
|
449
552
|
* Creates inflight entry and starts pump if needed.
|
|
450
553
|
*/
|
|
451
|
-
submitInternal(
|
|
452
|
-
if (this.closed || this.closing) {
|
|
453
|
-
return Promise.resolve(err(new S2Error({ message: "AppendSession is closed", status: 400 })));
|
|
454
|
-
}
|
|
554
|
+
submitInternal(input, batchMeteredSize) {
|
|
455
555
|
// Check for fatal error (e.g., from abort())
|
|
456
556
|
if (this.fatalError) {
|
|
457
|
-
debugSession("[SUBMIT] rejecting due to fatal error: %s", this.fatalError.message);
|
|
557
|
+
debugSession("[%s] [SUBMIT] rejecting due to fatal error: %s", this.streamName, this.fatalError.message);
|
|
458
558
|
return Promise.resolve(err(this.fatalError));
|
|
459
559
|
}
|
|
460
560
|
// Create promise for submit() callers
|
|
461
561
|
return new Promise((resolve) => {
|
|
462
562
|
// Create inflight entry (innerPromise will be set when pump processes it)
|
|
463
563
|
const entry = {
|
|
464
|
-
|
|
465
|
-
|
|
466
|
-
expectedCount: records.length,
|
|
467
|
-
meteredBytes: batchMeteredSize,
|
|
564
|
+
input,
|
|
565
|
+
expectedCount: input.records.length,
|
|
468
566
|
innerPromise: new Promise(() => { }), // Never-resolving placeholder
|
|
469
567
|
maybeResolve: resolve,
|
|
470
568
|
needsSubmit: true, // Mark for pump to submit
|
|
471
569
|
};
|
|
472
|
-
debugSession("[SUBMIT] enqueueing %d records (%d bytes): inflight=%d->%d, queuedBytes=%d->%d", records.length, batchMeteredSize, this.inflight.length, this.inflight.length + 1, this.queuedBytes, this.queuedBytes + batchMeteredSize);
|
|
570
|
+
debugSession("[%s] [SUBMIT] enqueueing %d records (%d bytes), match_seq_num=%s: inflight=%d->%d, queuedBytes=%d->%d", this.streamName, input.records.length, batchMeteredSize, input.matchSeqNum ?? "none", this.inflight.length, this.inflight.length + 1, this.queuedBytes, this.queuedBytes + batchMeteredSize);
|
|
473
571
|
this.inflight.push(entry);
|
|
474
572
|
this.queuedBytes += batchMeteredSize;
|
|
475
573
|
// Wake pump if it's sleeping
|
|
@@ -480,50 +578,42 @@ export class RetryAppendSession {
|
|
|
480
578
|
this.ensurePump();
|
|
481
579
|
});
|
|
482
580
|
}
|
|
483
|
-
/**
|
|
484
|
-
* Wait for capacity before allowing write to proceed (writable only).
|
|
485
|
-
*/
|
|
486
|
-
async waitForCapacity(bytes) {
|
|
487
|
-
debugSession("[CAPACITY] checking for %d bytes: queuedBytes=%d, pendingBytes=%d, maxQueuedBytes=%d, inflight=%d", bytes, this.queuedBytes, this.pendingBytes, this.maxQueuedBytes, this.inflight.length);
|
|
488
|
-
// Check if we have capacity
|
|
489
|
-
while (true) {
|
|
490
|
-
// Check for fatal error before adding to pendingBytes
|
|
491
|
-
if (this.fatalError) {
|
|
492
|
-
debugSession("[CAPACITY] fatal error detected, rejecting: %s", this.fatalError.message);
|
|
493
|
-
throw this.fatalError;
|
|
494
|
-
}
|
|
495
|
-
// Byte-based gating
|
|
496
|
-
if (this.queuedBytes + this.pendingBytes + bytes <= this.maxQueuedBytes) {
|
|
497
|
-
// Batch-based gating (if configured)
|
|
498
|
-
if (this.maxInflightBatches === undefined ||
|
|
499
|
-
this.inflight.length < this.maxInflightBatches) {
|
|
500
|
-
debugSession("[CAPACITY] capacity available, adding %d to pendingBytes", bytes);
|
|
501
|
-
this.pendingBytes += bytes;
|
|
502
|
-
return;
|
|
503
|
-
}
|
|
504
|
-
}
|
|
505
|
-
// No capacity - wait
|
|
506
|
-
// WritableStream enforces writer lock, so only one write can be blocked at a time
|
|
507
|
-
debugSession("[CAPACITY] no capacity, waiting for release");
|
|
508
|
-
await new Promise((resolve) => {
|
|
509
|
-
this.capacityWaiter = resolve;
|
|
510
|
-
});
|
|
511
|
-
debugSession("[CAPACITY] woke up, rechecking");
|
|
512
|
-
}
|
|
513
|
-
}
|
|
514
581
|
/**
|
|
515
582
|
* Release capacity and wake waiter if present.
|
|
516
583
|
*/
|
|
517
584
|
releaseCapacity(bytes) {
|
|
518
|
-
debugSession("[CAPACITY] releasing %d bytes: queuedBytes=%d->%d, pendingBytes=%d->%d,
|
|
585
|
+
debugSession("[%s] [CAPACITY] releasing %d bytes: queuedBytes=%d->%d, pendingBytes=%d->%d, pendingBatches=%d, numWaiters=%d", this.streamName, bytes, this.queuedBytes, this.queuedBytes - bytes, this.pendingBytes, Math.max(0, this.pendingBytes - bytes), this.pendingBatches, this.capacityWaiters.length);
|
|
519
586
|
this.queuedBytes -= bytes;
|
|
520
587
|
this.pendingBytes = Math.max(0, this.pendingBytes - bytes);
|
|
521
|
-
|
|
522
|
-
|
|
523
|
-
|
|
524
|
-
|
|
525
|
-
|
|
526
|
-
|
|
588
|
+
this.wakeCapacityWaiters();
|
|
589
|
+
}
|
|
590
|
+
wakeCapacityWaiters() {
|
|
591
|
+
if (this.capacityWaiters.length === 0) {
|
|
592
|
+
return;
|
|
593
|
+
}
|
|
594
|
+
let availableBytes = Math.max(0, this.maxQueuedBytes - (this.queuedBytes + this.pendingBytes));
|
|
595
|
+
let availableBatches = this.maxInflightBatches === undefined
|
|
596
|
+
? Number.POSITIVE_INFINITY
|
|
597
|
+
: Math.max(0, this.maxInflightBatches -
|
|
598
|
+
(this.inflight.length + this.pendingBatches));
|
|
599
|
+
while (this.capacityWaiters.length > 0) {
|
|
600
|
+
const next = this.capacityWaiters[0];
|
|
601
|
+
const needsBytes = next.bytes;
|
|
602
|
+
const needsBatches = next.batches;
|
|
603
|
+
const hasBatchCapacity = this.maxInflightBatches === undefined ||
|
|
604
|
+
needsBatches <= availableBatches;
|
|
605
|
+
if (needsBytes <= availableBytes && hasBatchCapacity) {
|
|
606
|
+
this.capacityWaiters.shift();
|
|
607
|
+
availableBytes -= needsBytes;
|
|
608
|
+
if (this.maxInflightBatches !== undefined) {
|
|
609
|
+
availableBatches -= needsBatches;
|
|
610
|
+
}
|
|
611
|
+
debugSession("[%s] [CAPACITY] waking waiter (bytes=%d, batches=%d)", this.streamName, needsBytes, needsBatches);
|
|
612
|
+
next.resolve();
|
|
613
|
+
continue;
|
|
614
|
+
}
|
|
615
|
+
// Not enough capacity for the next waiter yet - keep them queued.
|
|
616
|
+
break;
|
|
527
617
|
}
|
|
528
618
|
}
|
|
529
619
|
/**
|
|
@@ -534,7 +624,7 @@ export class RetryAppendSession {
|
|
|
534
624
|
return;
|
|
535
625
|
}
|
|
536
626
|
this.pumpPromise = this.runPump().catch((e) => {
|
|
537
|
-
debugSession("pump crashed unexpectedly: %s", e);
|
|
627
|
+
debugSession("[%s] pump crashed unexpectedly: %s", this.streamName, e);
|
|
538
628
|
// This should never happen - pump handles all errors internally
|
|
539
629
|
});
|
|
540
630
|
}
|
|
@@ -542,93 +632,99 @@ export class RetryAppendSession {
|
|
|
542
632
|
* Main pump loop: processes inflight queue, handles acks, retries, and recovery.
|
|
543
633
|
*/
|
|
544
634
|
async runPump() {
|
|
545
|
-
debugSession("pump started");
|
|
635
|
+
debugSession("[%s] pump started", this.streamName);
|
|
546
636
|
while (true) {
|
|
547
|
-
debugSession("[PUMP] loop: inflight=%d, queuedBytes=%d, pendingBytes=%d, closing=%s, pumpStopped=%s", this.inflight.length, this.queuedBytes, this.pendingBytes, this.closing, this.pumpStopped);
|
|
637
|
+
debugSession("[%s] [PUMP] loop: inflight=%d, queuedBytes=%d, pendingBytes=%d, pendingBatches=%d, closing=%s, pumpStopped=%s", this.streamName, this.inflight.length, this.queuedBytes, this.pendingBytes, this.pendingBatches, this.closing, this.pumpStopped);
|
|
548
638
|
// Check if we should stop
|
|
549
639
|
if (this.pumpStopped) {
|
|
550
|
-
debugSession("[PUMP] stopped by flag");
|
|
640
|
+
debugSession("[%s] [PUMP] stopped by flag", this.streamName);
|
|
551
641
|
return;
|
|
552
642
|
}
|
|
553
643
|
// If closing and queue is empty, stop
|
|
554
|
-
if
|
|
555
|
-
|
|
644
|
+
// BUT: if there are capacity waiters, they might add to inflight, so keep running
|
|
645
|
+
if (this.closing &&
|
|
646
|
+
this.inflight.length === 0 &&
|
|
647
|
+
this.capacityWaiters.length === 0) {
|
|
648
|
+
debugSession("[%s] [PUMP] closing and queue empty, stopping", this.streamName);
|
|
556
649
|
this.pumpStopped = true;
|
|
557
650
|
return;
|
|
558
651
|
}
|
|
559
652
|
// If no entries, sleep and continue
|
|
560
653
|
if (this.inflight.length === 0) {
|
|
561
|
-
debugSession("[PUMP] no entries,
|
|
562
|
-
|
|
563
|
-
|
|
564
|
-
|
|
565
|
-
new Promise((resolve) => {
|
|
566
|
-
this.pumpWakeup = resolve;
|
|
567
|
-
}),
|
|
568
|
-
]);
|
|
654
|
+
debugSession("[%s] [PUMP] no entries, parking until wakeup", this.streamName);
|
|
655
|
+
await new Promise((resolve) => {
|
|
656
|
+
this.pumpWakeup = resolve;
|
|
657
|
+
});
|
|
569
658
|
this.pumpWakeup = undefined;
|
|
570
659
|
continue;
|
|
571
660
|
}
|
|
572
661
|
// Get head entry (we know it exists because we checked length above)
|
|
573
662
|
const head = this.inflight[0];
|
|
574
|
-
debugSession("[PUMP] processing head: expectedCount=%d, meteredBytes=%d", head.expectedCount, head.meteredBytes);
|
|
663
|
+
debugSession("[%s] [PUMP] processing head: expectedCount=%d, meteredBytes=%d, match_seq_num=%s", this.streamName, head.expectedCount, head.input.meteredBytes, head.input.matchSeqNum ?? "none");
|
|
575
664
|
// Ensure session exists
|
|
576
|
-
debugSession("[PUMP] ensuring session exists");
|
|
665
|
+
debugSession("[%s] [PUMP] ensuring session exists", this.streamName);
|
|
577
666
|
await this.ensureSession();
|
|
578
667
|
if (!this.session) {
|
|
579
668
|
// Session creation failed - will retry
|
|
580
|
-
|
|
581
|
-
|
|
669
|
+
this.consecutiveFailures++;
|
|
670
|
+
const delay = calculateDelay(this.consecutiveFailures - 1, this.retryConfig.minDelayMillis, this.retryConfig.maxDelayMillis);
|
|
671
|
+
debugSession("[%s] [PUMP] session creation failed, backing off for %dms", this.streamName, delay);
|
|
672
|
+
await sleep(delay);
|
|
582
673
|
continue;
|
|
583
674
|
}
|
|
584
675
|
// Submit ALL entries that need submitting (enables HTTP/2 pipelining for S2S)
|
|
585
676
|
for (const entry of this.inflight) {
|
|
586
677
|
if (!entry.innerPromise || entry.needsSubmit) {
|
|
587
|
-
debugSession("[PUMP] submitting entry to inner session (%d records, %d bytes)", entry.expectedCount, entry.meteredBytes);
|
|
588
|
-
|
|
589
|
-
entry.
|
|
678
|
+
debugSession("[%s] [PUMP] submitting entry to inner session (%d records, %d bytes, match_seq_num=%s)", this.streamName, entry.expectedCount, entry.input.meteredBytes, entry.input.matchSeqNum ?? "none");
|
|
679
|
+
const attemptStarted = performance.now();
|
|
680
|
+
entry.attemptStartedMonotonicMs = attemptStarted;
|
|
681
|
+
entry.innerPromise = this.session.submit(entry.input);
|
|
590
682
|
delete entry.needsSubmit;
|
|
591
683
|
}
|
|
592
684
|
}
|
|
593
685
|
// Wait for head with timeout
|
|
594
|
-
debugSession("[PUMP] waiting for head result");
|
|
686
|
+
debugSession("[%s] [PUMP] waiting for head result", this.streamName);
|
|
595
687
|
const result = await this.waitForHead(head);
|
|
596
|
-
debugSession("[PUMP] got result: kind=%s", result.kind);
|
|
688
|
+
debugSession("[%s] [PUMP] got result: kind=%s", this.streamName, result.kind);
|
|
689
|
+
// Convert result to AppendResult (timeout becomes retryable error)
|
|
690
|
+
let appendResult;
|
|
597
691
|
if (result.kind === "timeout") {
|
|
598
|
-
// Ack timeout -
|
|
692
|
+
// Ack timeout - convert to retryable error that flows through retry logic
|
|
599
693
|
const attemptElapsed = head.attemptStartedMonotonicMs != null
|
|
600
694
|
? Math.round(performance.now() - head.attemptStartedMonotonicMs)
|
|
601
695
|
: undefined;
|
|
602
696
|
const error = new S2Error({
|
|
603
|
-
message: `Request timeout after ${attemptElapsed ?? "unknown"}ms (${head.expectedCount} records, ${head.meteredBytes} bytes)`,
|
|
697
|
+
message: `Request timeout after ${attemptElapsed ?? "unknown"}ms (${head.expectedCount} records, ${head.input.meteredBytes} bytes)`,
|
|
604
698
|
status: 408,
|
|
605
699
|
code: "REQUEST_TIMEOUT",
|
|
700
|
+
origin: "sdk",
|
|
606
701
|
});
|
|
607
|
-
debugSession("ack timeout for head entry: %s", error.message);
|
|
608
|
-
|
|
609
|
-
|
|
702
|
+
debugSession("[%s] ack timeout for head entry: %s", this.streamName, error.message);
|
|
703
|
+
appendResult = err(error);
|
|
704
|
+
}
|
|
705
|
+
else {
|
|
706
|
+
// Promise settled
|
|
707
|
+
appendResult = result.value;
|
|
610
708
|
}
|
|
611
|
-
// Promise settled
|
|
612
|
-
const appendResult = result.value;
|
|
613
709
|
if (appendResult.ok) {
|
|
614
710
|
// Success!
|
|
615
711
|
const ack = appendResult.value;
|
|
616
|
-
debugSession("[PUMP] success, got ack",
|
|
712
|
+
debugSession("[%s] [PUMP] success, got ack: seq_num=%d-%d", this.streamName, ack.start.seqNum, ack.end.seqNum);
|
|
617
713
|
// Invariant check: ack count matches batch count
|
|
618
|
-
const ackCount =
|
|
714
|
+
const ackCount = ack.end.seqNum - ack.start.seqNum;
|
|
619
715
|
if (ackCount !== head.expectedCount) {
|
|
620
716
|
const error = invariantViolation(`Ack count mismatch: expected ${head.expectedCount}, got ${ackCount}`);
|
|
621
|
-
debugSession("invariant violation: %s", error.message);
|
|
717
|
+
debugSession("[%s] invariant violation: %s", this.streamName, error.message);
|
|
622
718
|
await this.abort(error);
|
|
623
719
|
return;
|
|
624
720
|
}
|
|
625
721
|
// Invariant check: sequence numbers must be strictly increasing
|
|
626
722
|
if (this._lastAckedPosition) {
|
|
627
|
-
const prevEnd =
|
|
628
|
-
const currentEnd =
|
|
723
|
+
const prevEnd = this._lastAckedPosition.end.seqNum;
|
|
724
|
+
const currentEnd = ack.end.seqNum;
|
|
629
725
|
if (currentEnd <= prevEnd) {
|
|
630
726
|
const error = invariantViolation(`Sequence number not strictly increasing: previous=${prevEnd}, current=${currentEnd}`);
|
|
631
|
-
debugSession("invariant violation: %s", error.message);
|
|
727
|
+
debugSession("[%s] invariant violation: %s", this.streamName, error.message);
|
|
632
728
|
await this.abort(error);
|
|
633
729
|
return;
|
|
634
730
|
}
|
|
@@ -644,12 +740,12 @@ export class RetryAppendSession {
|
|
|
644
740
|
this.acksController?.enqueue(ack);
|
|
645
741
|
}
|
|
646
742
|
catch (e) {
|
|
647
|
-
debugSession("failed to enqueue ack: %s", e);
|
|
743
|
+
debugSession("[%s] failed to enqueue ack: %s", this.streamName, e);
|
|
648
744
|
}
|
|
649
745
|
// Remove from inflight and release capacity
|
|
650
|
-
debugSession("[PUMP] removing head from inflight, releasing %d bytes", head.meteredBytes);
|
|
746
|
+
debugSession("[%s] [PUMP] removing head from inflight, releasing %d bytes", this.streamName, head.input.meteredBytes);
|
|
651
747
|
this.inflight.shift();
|
|
652
|
-
this.releaseCapacity(head.meteredBytes);
|
|
748
|
+
this.releaseCapacity(head.input.meteredBytes);
|
|
653
749
|
// Reset consecutive failures on success
|
|
654
750
|
this.consecutiveFailures = 0;
|
|
655
751
|
this.currentAttempt = 0;
|
|
@@ -657,17 +753,17 @@ export class RetryAppendSession {
|
|
|
657
753
|
else {
|
|
658
754
|
// Error result
|
|
659
755
|
const error = appendResult.error;
|
|
660
|
-
debugSession("[PUMP] error: status=%s, message=%s", error.status, error.message);
|
|
756
|
+
debugSession("[%s] [PUMP] error: status=%s, message=%s", this.streamName, error.status, error.message);
|
|
661
757
|
// Check if retryable
|
|
662
758
|
if (!isRetryable(error)) {
|
|
663
|
-
debugSession("error not retryable, aborting");
|
|
759
|
+
debugSession("[%s] error not retryable, aborting", this.streamName);
|
|
664
760
|
await this.abort(error);
|
|
665
761
|
return;
|
|
666
762
|
}
|
|
667
763
|
// Check policy compliance
|
|
668
764
|
if (this.retryConfig.appendRetryPolicy === "noSideEffects" &&
|
|
669
765
|
!this.isIdempotent(head)) {
|
|
670
|
-
debugSession("error not policy-compliant (noSideEffects), aborting");
|
|
766
|
+
debugSession("[%s] error not policy-compliant (noSideEffects), aborting", this.streamName);
|
|
671
767
|
await this.abort(error);
|
|
672
768
|
return;
|
|
673
769
|
}
|
|
@@ -675,7 +771,7 @@ export class RetryAppendSession {
|
|
|
675
771
|
const effectiveMax = Math.max(1, this.retryConfig.maxAttempts);
|
|
676
772
|
const allowedRetries = effectiveMax - 1;
|
|
677
773
|
if (this.currentAttempt >= allowedRetries) {
|
|
678
|
-
debugSession("max attempts reached (%d), aborting", effectiveMax);
|
|
774
|
+
debugSession("[%s] max attempts reached (%d), aborting", this.streamName, effectiveMax);
|
|
679
775
|
const wrappedError = new S2Error({
|
|
680
776
|
message: `Max attempts (${effectiveMax}) exhausted: ${error.message}`,
|
|
681
777
|
status: error.status,
|
|
@@ -687,7 +783,7 @@ export class RetryAppendSession {
|
|
|
687
783
|
// Perform recovery
|
|
688
784
|
this.consecutiveFailures++;
|
|
689
785
|
this.currentAttempt++;
|
|
690
|
-
debugSession("performing recovery (retry %d/%d)", this.currentAttempt, allowedRetries);
|
|
786
|
+
debugSession("[%s] performing recovery (retry %d/%d)", this.streamName, this.currentAttempt, allowedRetries);
|
|
691
787
|
await this.recover();
|
|
692
788
|
}
|
|
693
789
|
}
|
|
@@ -697,15 +793,16 @@ export class RetryAppendSession {
|
|
|
697
793
|
* Returns either the settled result or a timeout indicator.
|
|
698
794
|
*
|
|
699
795
|
* Per-attempt ack timeout semantics:
|
|
700
|
-
* - The deadline is computed from the
|
|
701
|
-
*
|
|
702
|
-
*
|
|
796
|
+
* - The deadline is computed from the current attempt's start time using a
|
|
797
|
+
* monotonic clock (performance.now) to avoid issues with wall clock adjustments.
|
|
798
|
+
* - Each retry gets a fresh timeout window (attemptStartedMonotonicMs is reset
|
|
799
|
+
* during recovery).
|
|
703
800
|
* - If attempt start is missing (for backward compatibility), we measure
|
|
704
801
|
* from "now" with the full timeout window.
|
|
705
802
|
*/
|
|
706
803
|
async waitForHead(head) {
|
|
707
|
-
const
|
|
708
|
-
const deadline =
|
|
804
|
+
const attemptStart = head.attemptStartedMonotonicMs ?? performance.now();
|
|
805
|
+
const deadline = attemptStart + this.requestTimeoutMillis;
|
|
709
806
|
const remaining = Math.max(0, deadline - performance.now());
|
|
710
807
|
let timer;
|
|
711
808
|
const timeoutP = new Promise((resolve) => {
|
|
@@ -727,53 +824,52 @@ export class RetryAppendSession {
|
|
|
727
824
|
* Recover from transient error: recreate session and resubmit all inflight entries.
|
|
728
825
|
*/
|
|
729
826
|
async recover() {
|
|
730
|
-
debugSession("starting recovery");
|
|
827
|
+
debugSession("[%s] starting recovery", this.streamName);
|
|
731
828
|
// Calculate backoff delay
|
|
732
|
-
const delay = calculateDelay(this.consecutiveFailures - 1, this.retryConfig.
|
|
733
|
-
debugSession("backing off for %dms", delay);
|
|
829
|
+
const delay = calculateDelay(this.consecutiveFailures - 1, this.retryConfig.minDelayMillis, this.retryConfig.maxDelayMillis);
|
|
830
|
+
debugSession("[%s] backing off for %dms", this.streamName, delay);
|
|
734
831
|
await sleep(delay);
|
|
735
832
|
// Teardown old session
|
|
736
833
|
if (this.session) {
|
|
737
834
|
try {
|
|
738
835
|
const closeResult = await this.session.close();
|
|
739
836
|
if (!closeResult.ok) {
|
|
740
|
-
debugSession("error closing old session during recovery: %s", closeResult.error.message);
|
|
837
|
+
debugSession("[%s] error closing old session during recovery: %s", this.streamName, closeResult.error.message);
|
|
741
838
|
}
|
|
742
839
|
}
|
|
743
840
|
catch (e) {
|
|
744
|
-
debugSession("exception closing old session: %s", e);
|
|
841
|
+
debugSession("[%s] exception closing old session: %s", this.streamName, e);
|
|
745
842
|
}
|
|
746
843
|
this.session = undefined;
|
|
747
844
|
}
|
|
748
845
|
// Create new session
|
|
749
846
|
await this.ensureSession();
|
|
750
847
|
if (!this.session) {
|
|
751
|
-
debugSession("failed to create new session during recovery");
|
|
848
|
+
debugSession("[%s] failed to create new session during recovery", this.streamName);
|
|
752
849
|
// Will retry on next pump iteration
|
|
753
850
|
return;
|
|
754
851
|
}
|
|
755
852
|
// Store session in local variable to help TypeScript type narrowing
|
|
756
853
|
const session = this.session;
|
|
757
854
|
// Resubmit all inflight entries (replace their innerPromise and reset attempt start)
|
|
758
|
-
debugSession("resubmitting %d inflight entries", this.inflight.length);
|
|
855
|
+
debugSession("[%s] resubmitting %d inflight entries", this.streamName, this.inflight.length);
|
|
759
856
|
for (const entry of this.inflight) {
|
|
760
857
|
// Attach .catch to superseded promise to avoid unhandled rejection
|
|
761
858
|
entry.innerPromise.catch(() => { });
|
|
762
859
|
// Create new promise from new session
|
|
763
|
-
|
|
764
|
-
entry.
|
|
860
|
+
const attemptStarted = performance.now();
|
|
861
|
+
entry.attemptStartedMonotonicMs = attemptStarted;
|
|
862
|
+
entry.innerPromise = session.submit(entry.input);
|
|
863
|
+
debugSession("[%s] resubmitted entry (%d records, %d bytes, match_seq_num=%s)", this.streamName, entry.expectedCount, entry.input.meteredBytes, entry.input.matchSeqNum ?? "none");
|
|
765
864
|
}
|
|
766
|
-
debugSession("recovery complete");
|
|
865
|
+
debugSession("[%s] recovery complete", this.streamName);
|
|
767
866
|
}
|
|
768
867
|
/**
|
|
769
868
|
* Check if append can be retried under noSideEffects policy.
|
|
770
869
|
* For appends, idempotency requires match_seq_num.
|
|
771
870
|
*/
|
|
772
871
|
isIdempotent(entry) {
|
|
773
|
-
|
|
774
|
-
if (!args)
|
|
775
|
-
return false;
|
|
776
|
-
return args.match_seq_num !== undefined;
|
|
872
|
+
return entry.input.matchSeqNum !== undefined;
|
|
777
873
|
}
|
|
778
874
|
/**
|
|
779
875
|
* Ensure session exists, creating it if necessary.
|
|
@@ -783,11 +879,13 @@ export class RetryAppendSession {
|
|
|
783
879
|
return;
|
|
784
880
|
}
|
|
785
881
|
try {
|
|
882
|
+
debugSession("[%s] creating new transport session", this.streamName);
|
|
786
883
|
this.session = await this.generator(this.sessionOptions);
|
|
884
|
+
debugSession("[%s] transport session created", this.streamName);
|
|
787
885
|
}
|
|
788
886
|
catch (e) {
|
|
789
887
|
const error = s2Error(e);
|
|
790
|
-
debugSession("failed to create session: %s", error.message);
|
|
888
|
+
debugSession("[%s] failed to create session: %s", this.streamName, error.message);
|
|
791
889
|
// Don't set this.session - will retry later
|
|
792
890
|
}
|
|
793
891
|
}
|
|
@@ -798,10 +896,11 @@ export class RetryAppendSession {
|
|
|
798
896
|
if (this.pumpStopped) {
|
|
799
897
|
return; // Already aborted
|
|
800
898
|
}
|
|
801
|
-
debugSession("aborting session: %s", error.message);
|
|
899
|
+
debugSession("[%s] aborting session: %s", this.streamName, error.message);
|
|
802
900
|
this.fatalError = error;
|
|
803
901
|
this.pumpStopped = true;
|
|
804
902
|
// Resolve all inflight entries with error
|
|
903
|
+
debugSession("[%s] rejecting %d inflight entries", this.streamName, this.inflight.length);
|
|
805
904
|
for (const entry of this.inflight) {
|
|
806
905
|
if (entry.maybeResolve) {
|
|
807
906
|
entry.maybeResolve(err(error));
|
|
@@ -810,25 +909,27 @@ export class RetryAppendSession {
|
|
|
810
909
|
this.inflight.length = 0;
|
|
811
910
|
this.queuedBytes = 0;
|
|
812
911
|
this.pendingBytes = 0;
|
|
912
|
+
this.pendingBatches = 0;
|
|
813
913
|
// Error the readable stream
|
|
814
914
|
try {
|
|
815
915
|
this.acksController?.error(error);
|
|
816
916
|
}
|
|
817
917
|
catch (e) {
|
|
818
|
-
debugSession("failed to error acks controller: %s", e);
|
|
918
|
+
debugSession("[%s] failed to error acks controller: %s", this.streamName, e);
|
|
819
919
|
}
|
|
820
|
-
// Wake capacity
|
|
821
|
-
|
|
822
|
-
|
|
823
|
-
this.capacityWaiter = undefined;
|
|
920
|
+
// Wake all capacity waiters to unblock any pending writers
|
|
921
|
+
for (const waiter of this.capacityWaiters) {
|
|
922
|
+
waiter.resolve();
|
|
824
923
|
}
|
|
924
|
+
this.capacityWaiters = [];
|
|
825
925
|
// Close inner session
|
|
826
926
|
if (this.session) {
|
|
927
|
+
debugSession("[%s] closing inner session", this.streamName);
|
|
827
928
|
try {
|
|
828
929
|
await this.session.close();
|
|
829
930
|
}
|
|
830
931
|
catch (e) {
|
|
831
|
-
debugSession("error closing session during abort: %s", e);
|
|
932
|
+
debugSession("[%s] error closing session during abort: %s", this.streamName, e);
|
|
832
933
|
}
|
|
833
934
|
this.session = undefined;
|
|
834
935
|
}
|
|
@@ -845,7 +946,7 @@ export class RetryAppendSession {
|
|
|
845
946
|
}
|
|
846
947
|
return;
|
|
847
948
|
}
|
|
848
|
-
debugSession("close requested");
|
|
949
|
+
debugSession("[%s] close requested", this.streamName);
|
|
849
950
|
this.closing = true;
|
|
850
951
|
// Wake pump if it's sleeping so it can check closing flag
|
|
851
952
|
if (this.pumpWakeup) {
|
|
@@ -853,6 +954,7 @@ export class RetryAppendSession {
|
|
|
853
954
|
}
|
|
854
955
|
// Wait for pump to stop (drains inflight queue, including through recovery)
|
|
855
956
|
if (this.pumpPromise) {
|
|
957
|
+
debugSession("[%s] [CLOSE] awaiting pump to drain inflight queue", this.streamName);
|
|
856
958
|
await this.pumpPromise;
|
|
857
959
|
}
|
|
858
960
|
// Close inner session
|
|
@@ -860,11 +962,11 @@ export class RetryAppendSession {
|
|
|
860
962
|
try {
|
|
861
963
|
const result = await this.session.close();
|
|
862
964
|
if (!result.ok) {
|
|
863
|
-
debugSession("error closing inner session: %s", result.error.message);
|
|
965
|
+
debugSession("[%s] error closing inner session: %s", this.streamName, result.error.message);
|
|
864
966
|
}
|
|
865
967
|
}
|
|
866
968
|
catch (e) {
|
|
867
|
-
debugSession("exception closing inner session: %s", e);
|
|
969
|
+
debugSession("[%s] exception closing inner session: %s", this.streamName, e);
|
|
868
970
|
}
|
|
869
971
|
this.session = undefined;
|
|
870
972
|
}
|
|
@@ -873,14 +975,14 @@ export class RetryAppendSession {
|
|
|
873
975
|
this.acksController?.close();
|
|
874
976
|
}
|
|
875
977
|
catch (e) {
|
|
876
|
-
debugSession("error closing acks controller: %s", e);
|
|
978
|
+
debugSession("[%s] error closing acks controller: %s", this.streamName, e);
|
|
877
979
|
}
|
|
878
980
|
this.closed = true;
|
|
879
981
|
// If fatal error occurred, throw it
|
|
880
982
|
if (this.fatalError) {
|
|
881
983
|
throw this.fatalError;
|
|
882
984
|
}
|
|
883
|
-
debugSession("close complete");
|
|
985
|
+
debugSession("[%s] close complete", this.streamName);
|
|
884
986
|
}
|
|
885
987
|
async [Symbol.asyncDispose]() {
|
|
886
988
|
await this.close();
|