@s2-dev/streamstore 0.17.6 → 0.18.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +69 -1
- package/dist/cjs/accessTokens.d.ts +3 -2
- package/dist/cjs/accessTokens.d.ts.map +1 -1
- package/dist/cjs/accessTokens.js +22 -37
- package/dist/cjs/accessTokens.js.map +1 -1
- package/dist/cjs/basin.d.ts +4 -3
- package/dist/cjs/basin.d.ts.map +1 -1
- package/dist/cjs/basin.js +16 -6
- package/dist/cjs/basin.js.map +1 -1
- package/dist/cjs/basins.d.ts +10 -10
- package/dist/cjs/basins.d.ts.map +1 -1
- package/dist/cjs/basins.js +36 -64
- package/dist/cjs/basins.js.map +1 -1
- package/dist/cjs/batch-transform.d.ts +1 -1
- package/dist/cjs/batch-transform.d.ts.map +1 -1
- package/dist/cjs/batch-transform.js +36 -5
- package/dist/cjs/batch-transform.js.map +1 -1
- package/dist/cjs/common.d.ts +42 -0
- package/dist/cjs/common.d.ts.map +1 -1
- package/dist/cjs/error.d.ts +40 -2
- package/dist/cjs/error.d.ts.map +1 -1
- package/dist/cjs/error.js +268 -2
- package/dist/cjs/error.js.map +1 -1
- package/dist/cjs/generated/client/types.gen.d.ts +7 -0
- package/dist/cjs/generated/client/types.gen.d.ts.map +1 -1
- package/dist/cjs/generated/client/utils.gen.d.ts +1 -0
- package/dist/cjs/generated/client/utils.gen.d.ts.map +1 -1
- package/dist/cjs/generated/client/utils.gen.js.map +1 -1
- package/dist/cjs/generated/core/types.gen.d.ts +2 -0
- package/dist/cjs/generated/core/types.gen.d.ts.map +1 -1
- package/dist/cjs/index.d.ts +46 -3
- package/dist/cjs/index.d.ts.map +1 -1
- package/dist/cjs/index.js +28 -2
- package/dist/cjs/index.js.map +1 -1
- package/dist/cjs/lib/result.d.ts +57 -0
- package/dist/cjs/lib/result.d.ts.map +1 -0
- package/dist/cjs/lib/result.js +43 -0
- package/dist/cjs/lib/result.js.map +1 -0
- package/dist/cjs/lib/retry.d.ts +151 -0
- package/dist/cjs/lib/retry.d.ts.map +1 -0
- package/dist/cjs/lib/retry.js +839 -0
- package/dist/cjs/lib/retry.js.map +1 -0
- package/dist/cjs/lib/stream/factory.d.ts +0 -1
- package/dist/cjs/lib/stream/factory.d.ts.map +1 -1
- package/dist/cjs/lib/stream/factory.js +0 -1
- package/dist/cjs/lib/stream/factory.js.map +1 -1
- package/dist/cjs/lib/stream/runtime.d.ts +14 -0
- package/dist/cjs/lib/stream/runtime.d.ts.map +1 -1
- package/dist/cjs/lib/stream/runtime.js +18 -3
- package/dist/cjs/lib/stream/runtime.js.map +1 -1
- package/dist/cjs/lib/stream/transport/fetch/index.d.ts +24 -32
- package/dist/cjs/lib/stream/transport/fetch/index.d.ts.map +1 -1
- package/dist/cjs/lib/stream/transport/fetch/index.js +260 -187
- package/dist/cjs/lib/stream/transport/fetch/index.js.map +1 -1
- package/dist/cjs/lib/stream/transport/fetch/shared.d.ts +1 -2
- package/dist/cjs/lib/stream/transport/fetch/shared.d.ts.map +1 -1
- package/dist/cjs/lib/stream/transport/fetch/shared.js +49 -72
- package/dist/cjs/lib/stream/transport/fetch/shared.js.map +1 -1
- package/dist/cjs/lib/stream/transport/s2s/index.d.ts +0 -1
- package/dist/cjs/lib/stream/transport/s2s/index.d.ts.map +1 -1
- package/dist/cjs/lib/stream/transport/s2s/index.js +312 -352
- package/dist/cjs/lib/stream/transport/s2s/index.js.map +1 -1
- package/dist/cjs/lib/stream/types.d.ts +102 -8
- package/dist/cjs/lib/stream/types.d.ts.map +1 -1
- package/dist/cjs/metrics.d.ts +3 -2
- package/dist/cjs/metrics.d.ts.map +1 -1
- package/dist/cjs/metrics.js +24 -39
- package/dist/cjs/metrics.js.map +1 -1
- package/dist/cjs/s2.d.ts +1 -0
- package/dist/cjs/s2.d.ts.map +1 -1
- package/dist/cjs/s2.js +20 -3
- package/dist/cjs/s2.js.map +1 -1
- package/dist/cjs/stream.d.ts +5 -3
- package/dist/cjs/stream.d.ts.map +1 -1
- package/dist/cjs/stream.js +29 -18
- package/dist/cjs/stream.js.map +1 -1
- package/dist/cjs/streams.d.ts +10 -10
- package/dist/cjs/streams.d.ts.map +1 -1
- package/dist/cjs/streams.js +36 -64
- package/dist/cjs/streams.js.map +1 -1
- package/dist/cjs/utils.d.ts +3 -3
- package/dist/cjs/utils.d.ts.map +1 -1
- package/dist/cjs/utils.js +3 -3
- package/dist/cjs/utils.js.map +1 -1
- package/dist/cjs/version.d.ts +8 -0
- package/dist/cjs/version.d.ts.map +1 -0
- package/dist/cjs/version.js +11 -0
- package/dist/cjs/version.js.map +1 -0
- package/dist/esm/accessTokens.d.ts +3 -2
- package/dist/esm/accessTokens.d.ts.map +1 -1
- package/dist/esm/accessTokens.js +23 -38
- package/dist/esm/accessTokens.js.map +1 -1
- package/dist/esm/basin.d.ts +4 -3
- package/dist/esm/basin.d.ts.map +1 -1
- package/dist/esm/basin.js +16 -6
- package/dist/esm/basin.js.map +1 -1
- package/dist/esm/basins.d.ts +10 -10
- package/dist/esm/basins.d.ts.map +1 -1
- package/dist/esm/basins.js +37 -65
- package/dist/esm/basins.js.map +1 -1
- package/dist/esm/batch-transform.d.ts +1 -1
- package/dist/esm/batch-transform.d.ts.map +1 -1
- package/dist/esm/batch-transform.js +37 -6
- package/dist/esm/batch-transform.js.map +1 -1
- package/dist/esm/common.d.ts +42 -0
- package/dist/esm/common.d.ts.map +1 -1
- package/dist/esm/error.d.ts +40 -2
- package/dist/esm/error.d.ts.map +1 -1
- package/dist/esm/error.js +260 -2
- package/dist/esm/error.js.map +1 -1
- package/dist/esm/generated/client/types.gen.d.ts +7 -0
- package/dist/esm/generated/client/types.gen.d.ts.map +1 -1
- package/dist/esm/generated/client/utils.gen.d.ts +1 -0
- package/dist/esm/generated/client/utils.gen.d.ts.map +1 -1
- package/dist/esm/generated/client/utils.gen.js.map +1 -1
- package/dist/esm/generated/core/types.gen.d.ts +2 -0
- package/dist/esm/generated/core/types.gen.d.ts.map +1 -1
- package/dist/esm/index.d.ts +46 -3
- package/dist/esm/index.d.ts.map +1 -1
- package/dist/esm/index.js +23 -1
- package/dist/esm/index.js.map +1 -1
- package/dist/esm/lib/result.d.ts +57 -0
- package/dist/esm/lib/result.d.ts.map +1 -0
- package/dist/esm/lib/result.js +37 -0
- package/dist/esm/lib/result.js.map +1 -0
- package/dist/esm/lib/retry.d.ts +151 -0
- package/dist/esm/lib/retry.d.ts.map +1 -0
- package/dist/esm/lib/retry.js +830 -0
- package/dist/esm/lib/retry.js.map +1 -0
- package/dist/esm/lib/stream/factory.d.ts +0 -1
- package/dist/esm/lib/stream/factory.d.ts.map +1 -1
- package/dist/esm/lib/stream/factory.js +0 -1
- package/dist/esm/lib/stream/factory.js.map +1 -1
- package/dist/esm/lib/stream/runtime.d.ts +14 -0
- package/dist/esm/lib/stream/runtime.d.ts.map +1 -1
- package/dist/esm/lib/stream/runtime.js +23 -3
- package/dist/esm/lib/stream/runtime.js.map +1 -1
- package/dist/esm/lib/stream/transport/fetch/index.d.ts +24 -32
- package/dist/esm/lib/stream/transport/fetch/index.d.ts.map +1 -1
- package/dist/esm/lib/stream/transport/fetch/index.js +260 -187
- package/dist/esm/lib/stream/transport/fetch/index.js.map +1 -1
- package/dist/esm/lib/stream/transport/fetch/shared.d.ts +1 -2
- package/dist/esm/lib/stream/transport/fetch/shared.d.ts.map +1 -1
- package/dist/esm/lib/stream/transport/fetch/shared.js +51 -74
- package/dist/esm/lib/stream/transport/fetch/shared.js.map +1 -1
- package/dist/esm/lib/stream/transport/s2s/index.d.ts +0 -1
- package/dist/esm/lib/stream/transport/s2s/index.d.ts.map +1 -1
- package/dist/esm/lib/stream/transport/s2s/index.js +313 -353
- package/dist/esm/lib/stream/transport/s2s/index.js.map +1 -1
- package/dist/esm/lib/stream/types.d.ts +102 -8
- package/dist/esm/lib/stream/types.d.ts.map +1 -1
- package/dist/esm/metrics.d.ts +3 -2
- package/dist/esm/metrics.d.ts.map +1 -1
- package/dist/esm/metrics.js +25 -40
- package/dist/esm/metrics.js.map +1 -1
- package/dist/esm/s2.d.ts +1 -0
- package/dist/esm/s2.d.ts.map +1 -1
- package/dist/esm/s2.js +20 -3
- package/dist/esm/s2.js.map +1 -1
- package/dist/esm/stream.d.ts +5 -3
- package/dist/esm/stream.d.ts.map +1 -1
- package/dist/esm/stream.js +30 -19
- package/dist/esm/stream.js.map +1 -1
- package/dist/esm/streams.d.ts +10 -10
- package/dist/esm/streams.d.ts.map +1 -1
- package/dist/esm/streams.js +37 -65
- package/dist/esm/streams.js.map +1 -1
- package/dist/esm/utils.d.ts +3 -3
- package/dist/esm/utils.d.ts.map +1 -1
- package/dist/esm/utils.js +2 -2
- package/dist/esm/utils.js.map +1 -1
- package/dist/esm/version.d.ts +8 -0
- package/dist/esm/version.d.ts.map +1 -0
- package/dist/esm/version.js +8 -0
- package/dist/esm/version.js.map +1 -0
- package/package.json +7 -4
|
@@ -0,0 +1,839 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
+
exports.RetryAppendSession = exports.RetryReadSession = exports.DEFAULT_RETRY_CONFIG = void 0;
|
|
4
|
+
exports.isRetryable = isRetryable;
|
|
5
|
+
exports.calculateDelay = calculateDelay;
|
|
6
|
+
exports.sleep = sleep;
|
|
7
|
+
exports.withRetries = withRetries;
|
|
8
|
+
const debug_1 = require("debug");
|
|
9
|
+
const error_js_1 = require("../error.js");
|
|
10
|
+
const utils_js_1 = require("../utils.js");
|
|
11
|
+
const result_js_1 = require("./result.js");
|
|
12
|
+
const debugWith = (0, debug_1.default)("s2:retry:with");
|
|
13
|
+
const debugRead = (0, debug_1.default)("s2:retry:read");
|
|
14
|
+
const debugSession = (0, debug_1.default)("s2:retry:session");
|
|
15
|
+
/**
|
|
16
|
+
* Default retry configuration.
|
|
17
|
+
*/
|
|
18
|
+
exports.DEFAULT_RETRY_CONFIG = {
|
|
19
|
+
maxAttempts: 3,
|
|
20
|
+
retryBackoffDurationMillis: 100,
|
|
21
|
+
appendRetryPolicy: "all",
|
|
22
|
+
requestTimeoutMillis: 5000, // 5 seconds
|
|
23
|
+
};
|
|
24
|
+
const RETRYABLE_STATUS_CODES = new Set([
|
|
25
|
+
408, // request_timeout
|
|
26
|
+
429, // too_many_requests
|
|
27
|
+
500, // internal_server_error
|
|
28
|
+
502, // bad_gateway
|
|
29
|
+
503, // service_unavailable
|
|
30
|
+
]);
|
|
31
|
+
/**
|
|
32
|
+
* Determines if an error should be retried based on its characteristics.
|
|
33
|
+
* 400-level errors (except 408, 429) are non-retryable validation/client errors.
|
|
34
|
+
*/
|
|
35
|
+
function isRetryable(error) {
|
|
36
|
+
if (!error.status)
|
|
37
|
+
return false;
|
|
38
|
+
// Explicit retryable codes (including some 4xx like 408, 429)
|
|
39
|
+
if (RETRYABLE_STATUS_CODES.has(error.status)) {
|
|
40
|
+
return true;
|
|
41
|
+
}
|
|
42
|
+
// 400-level errors are generally non-retryable (validation, bad request)
|
|
43
|
+
if (error.status >= 400 && error.status < 500) {
|
|
44
|
+
return false;
|
|
45
|
+
}
|
|
46
|
+
return false;
|
|
47
|
+
}
|
|
48
|
+
/**
|
|
49
|
+
* Calculates the delay before the next retry attempt using fixed backoff
|
|
50
|
+
* with jitter. The `attempt` parameter is currently ignored to keep a
|
|
51
|
+
* constant base delay per attempt.
|
|
52
|
+
*/
|
|
53
|
+
function calculateDelay(attempt, baseDelayMillis) {
|
|
54
|
+
// Apply ±50% jitter around the base delay
|
|
55
|
+
const jitterRange = 0.5; // 50% up or down
|
|
56
|
+
const factor = 1 + (Math.random() * 2 - 1) * jitterRange; // [0.5, 1.5]
|
|
57
|
+
const delay = Math.max(0, baseDelayMillis * factor);
|
|
58
|
+
return Math.floor(delay);
|
|
59
|
+
}
|
|
60
|
+
/**
|
|
61
|
+
* Sleeps for the specified duration.
|
|
62
|
+
*/
|
|
63
|
+
function sleep(ms) {
|
|
64
|
+
return new Promise((resolve) => setTimeout(resolve, ms));
|
|
65
|
+
}
|
|
66
|
+
/**
|
|
67
|
+
* Executes an async function with automatic retry logic for transient failures.
|
|
68
|
+
*
|
|
69
|
+
* @param retryConfig Retry configuration (max attempts, backoff duration)
|
|
70
|
+
* @param fn The async function to execute
|
|
71
|
+
* @returns The result of the function
|
|
72
|
+
* @throws The last error if all retry attempts are exhausted
|
|
73
|
+
*/
|
|
74
|
+
async function withRetries(retryConfig, fn, isPolicyCompliant = () => true) {
|
|
75
|
+
const config = {
|
|
76
|
+
...exports.DEFAULT_RETRY_CONFIG,
|
|
77
|
+
...retryConfig,
|
|
78
|
+
};
|
|
79
|
+
// Enforce minimum of 1 attempt (1 = no retries)
|
|
80
|
+
if (config.maxAttempts < 1)
|
|
81
|
+
config.maxAttempts = 1;
|
|
82
|
+
let lastError = undefined;
|
|
83
|
+
// attemptNo is 1-based: 1..maxAttempts
|
|
84
|
+
for (let attemptNo = 1; attemptNo <= config.maxAttempts; attemptNo++) {
|
|
85
|
+
try {
|
|
86
|
+
const result = await fn();
|
|
87
|
+
if (attemptNo > 1) {
|
|
88
|
+
debugWith("succeeded after %d retries", attemptNo - 1);
|
|
89
|
+
}
|
|
90
|
+
return result;
|
|
91
|
+
}
|
|
92
|
+
catch (error) {
|
|
93
|
+
// withRetry only handles S2Errors (withS2Error should be called first)
|
|
94
|
+
if (!(error instanceof error_js_1.S2Error)) {
|
|
95
|
+
debugWith("non-S2Error thrown, rethrowing immediately: %s", error);
|
|
96
|
+
throw error;
|
|
97
|
+
}
|
|
98
|
+
lastError = error;
|
|
99
|
+
// Don't retry if this is the last attempt
|
|
100
|
+
if (attemptNo === config.maxAttempts) {
|
|
101
|
+
debugWith("max attempts exhausted, throwing error");
|
|
102
|
+
break;
|
|
103
|
+
}
|
|
104
|
+
// Check if error is retryable
|
|
105
|
+
if (!isPolicyCompliant(config, lastError) || !isRetryable(lastError)) {
|
|
106
|
+
debugWith("error not retryable, throwing immediately");
|
|
107
|
+
throw error;
|
|
108
|
+
}
|
|
109
|
+
// Calculate delay and wait before retrying
|
|
110
|
+
const delay = calculateDelay(attemptNo - 1, config.retryBackoffDurationMillis);
|
|
111
|
+
debugWith("retryable error, backing off for %dms, status=%s", delay, error.status);
|
|
112
|
+
await sleep(delay);
|
|
113
|
+
}
|
|
114
|
+
}
|
|
115
|
+
throw lastError;
|
|
116
|
+
}
|
|
117
|
+
class RetryReadSession extends ReadableStream {
|
|
118
|
+
_nextReadPosition = undefined;
|
|
119
|
+
_lastObservedTail = undefined;
|
|
120
|
+
_recordsRead = 0;
|
|
121
|
+
_bytesRead = 0;
|
|
122
|
+
static async create(generator, args = {}, config) {
|
|
123
|
+
return new RetryReadSession(args, generator, config);
|
|
124
|
+
}
|
|
125
|
+
constructor(args, generator, config) {
|
|
126
|
+
const retryConfig = {
|
|
127
|
+
...exports.DEFAULT_RETRY_CONFIG,
|
|
128
|
+
...config,
|
|
129
|
+
};
|
|
130
|
+
let session = undefined;
|
|
131
|
+
const startTimeMs = performance.now(); // Capture start time before super()
|
|
132
|
+
super({
|
|
133
|
+
start: async (controller) => {
|
|
134
|
+
let nextArgs = { ...args };
|
|
135
|
+
// Capture original request budget so retries compute from a stable baseline
|
|
136
|
+
const baselineCount = args?.count;
|
|
137
|
+
const baselineBytes = args?.bytes;
|
|
138
|
+
const baselineWait = args?.wait;
|
|
139
|
+
let attempt = 0;
|
|
140
|
+
while (true) {
|
|
141
|
+
debugRead("starting read session with args: %o", nextArgs);
|
|
142
|
+
session = await generator(nextArgs);
|
|
143
|
+
const reader = session.getReader();
|
|
144
|
+
while (true) {
|
|
145
|
+
const { done, value: result } = await reader.read();
|
|
146
|
+
// Update last observed tail if transport exposes it
|
|
147
|
+
try {
|
|
148
|
+
const tail = session.lastObservedTail?.();
|
|
149
|
+
if (tail)
|
|
150
|
+
this._lastObservedTail = tail;
|
|
151
|
+
}
|
|
152
|
+
catch { }
|
|
153
|
+
if (done) {
|
|
154
|
+
reader.releaseLock();
|
|
155
|
+
controller.close();
|
|
156
|
+
return;
|
|
157
|
+
}
|
|
158
|
+
// Check if result is an error
|
|
159
|
+
if (!result.ok) {
|
|
160
|
+
reader.releaseLock();
|
|
161
|
+
const error = result.error;
|
|
162
|
+
// Check if we can retry (track session attempts, not record reads)
|
|
163
|
+
const effectiveMax = Math.max(1, retryConfig.maxAttempts);
|
|
164
|
+
if (isRetryable(error) && attempt < effectiveMax - 1) {
|
|
165
|
+
if (this._nextReadPosition) {
|
|
166
|
+
nextArgs.seq_num = this._nextReadPosition.seq_num;
|
|
167
|
+
// Clear alternative start position fields to avoid conflicting params
|
|
168
|
+
delete nextArgs.timestamp;
|
|
169
|
+
delete nextArgs.tail_offset;
|
|
170
|
+
}
|
|
171
|
+
// Compute planned backoff delay now so we can subtract it from wait budget
|
|
172
|
+
const delay = calculateDelay(attempt, retryConfig.retryBackoffDurationMillis);
|
|
173
|
+
// Recompute remaining budget from original request each time to avoid double-subtraction
|
|
174
|
+
if (baselineCount !== undefined) {
|
|
175
|
+
nextArgs.count = Math.max(0, baselineCount - this._recordsRead);
|
|
176
|
+
}
|
|
177
|
+
if (baselineBytes !== undefined) {
|
|
178
|
+
nextArgs.bytes = Math.max(0, baselineBytes - this._bytesRead);
|
|
179
|
+
}
|
|
180
|
+
// Adjust wait from original budget based on total elapsed time since start
|
|
181
|
+
if (baselineWait !== undefined) {
|
|
182
|
+
const elapsedSeconds = (performance.now() - startTimeMs) / 1000;
|
|
183
|
+
nextArgs.wait = Math.max(0, baselineWait - (elapsedSeconds + delay / 1000));
|
|
184
|
+
}
|
|
185
|
+
// Proactively cancel the current transport session before retrying
|
|
186
|
+
try {
|
|
187
|
+
await session.cancel?.("retry");
|
|
188
|
+
}
|
|
189
|
+
catch { }
|
|
190
|
+
debugRead("will retry after %dms, status=%s", delay, error.status);
|
|
191
|
+
await sleep(delay);
|
|
192
|
+
attempt++;
|
|
193
|
+
break; // Break inner loop to retry
|
|
194
|
+
}
|
|
195
|
+
// Error is not retryable or attempts exhausted
|
|
196
|
+
debugRead("error in retry loop: %s", error);
|
|
197
|
+
controller.error(error);
|
|
198
|
+
return;
|
|
199
|
+
}
|
|
200
|
+
// Success: enqueue the record and reset retry attempt counter
|
|
201
|
+
const record = result.value;
|
|
202
|
+
this._nextReadPosition = {
|
|
203
|
+
seq_num: record.seq_num + 1,
|
|
204
|
+
timestamp: record.timestamp,
|
|
205
|
+
};
|
|
206
|
+
this._recordsRead++;
|
|
207
|
+
this._bytesRead += (0, utils_js_1.meteredBytes)(record);
|
|
208
|
+
attempt = 0;
|
|
209
|
+
controller.enqueue(record);
|
|
210
|
+
}
|
|
211
|
+
}
|
|
212
|
+
},
|
|
213
|
+
cancel: async (reason) => {
|
|
214
|
+
try {
|
|
215
|
+
await session?.cancel(reason);
|
|
216
|
+
}
|
|
217
|
+
catch (err) {
|
|
218
|
+
// Ignore ERR_INVALID_STATE - stream may already be closed/cancelled
|
|
219
|
+
if (err?.code !== "ERR_INVALID_STATE") {
|
|
220
|
+
throw err;
|
|
221
|
+
}
|
|
222
|
+
}
|
|
223
|
+
},
|
|
224
|
+
});
|
|
225
|
+
}
|
|
226
|
+
async [Symbol.asyncDispose]() {
|
|
227
|
+
await this.cancel("disposed");
|
|
228
|
+
}
|
|
229
|
+
// Polyfill for older browsers / Node.js environments
|
|
230
|
+
[Symbol.asyncIterator]() {
|
|
231
|
+
const fn = ReadableStream.prototype[Symbol.asyncIterator];
|
|
232
|
+
if (typeof fn === "function")
|
|
233
|
+
return fn.call(this);
|
|
234
|
+
const reader = this.getReader();
|
|
235
|
+
return {
|
|
236
|
+
next: async () => {
|
|
237
|
+
const r = await reader.read();
|
|
238
|
+
if (r.done) {
|
|
239
|
+
reader.releaseLock();
|
|
240
|
+
return { done: true, value: undefined };
|
|
241
|
+
}
|
|
242
|
+
return { done: false, value: r.value };
|
|
243
|
+
},
|
|
244
|
+
throw: async (e) => {
|
|
245
|
+
try {
|
|
246
|
+
await reader.cancel(e);
|
|
247
|
+
}
|
|
248
|
+
catch (err) {
|
|
249
|
+
if (err?.code !== "ERR_INVALID_STATE")
|
|
250
|
+
throw err;
|
|
251
|
+
}
|
|
252
|
+
reader.releaseLock();
|
|
253
|
+
return { done: true, value: undefined };
|
|
254
|
+
},
|
|
255
|
+
return: async () => {
|
|
256
|
+
try {
|
|
257
|
+
await reader.cancel("done");
|
|
258
|
+
}
|
|
259
|
+
catch (err) {
|
|
260
|
+
if (err?.code !== "ERR_INVALID_STATE")
|
|
261
|
+
throw err;
|
|
262
|
+
}
|
|
263
|
+
reader.releaseLock();
|
|
264
|
+
return { done: true, value: undefined };
|
|
265
|
+
},
|
|
266
|
+
[Symbol.asyncIterator]() {
|
|
267
|
+
return this;
|
|
268
|
+
},
|
|
269
|
+
};
|
|
270
|
+
}
|
|
271
|
+
lastObservedTail() {
|
|
272
|
+
return this._lastObservedTail;
|
|
273
|
+
}
|
|
274
|
+
nextReadPosition() {
|
|
275
|
+
return this._nextReadPosition;
|
|
276
|
+
}
|
|
277
|
+
}
|
|
278
|
+
exports.RetryReadSession = RetryReadSession;
|
|
279
|
+
const DEFAULT_MAX_INFLIGHT_BYTES = 10 * 1024 * 1024; // 10 MiB default
|
|
280
|
+
class RetryAppendSession {
|
|
281
|
+
generator;
|
|
282
|
+
sessionOptions;
|
|
283
|
+
requestTimeoutMillis;
|
|
284
|
+
maxQueuedBytes;
|
|
285
|
+
maxInflightBatches;
|
|
286
|
+
retryConfig;
|
|
287
|
+
inflight = [];
|
|
288
|
+
capacityWaiter; // Single waiter (WritableStream writer lock)
|
|
289
|
+
session;
|
|
290
|
+
queuedBytes = 0;
|
|
291
|
+
pendingBytes = 0;
|
|
292
|
+
consecutiveFailures = 0;
|
|
293
|
+
currentAttempt = 0;
|
|
294
|
+
pumpPromise;
|
|
295
|
+
pumpStopped = false;
|
|
296
|
+
closing = false;
|
|
297
|
+
pumpWakeup;
|
|
298
|
+
closed = false;
|
|
299
|
+
fatalError;
|
|
300
|
+
_lastAckedPosition;
|
|
301
|
+
acksController;
|
|
302
|
+
readable;
|
|
303
|
+
writable;
|
|
304
|
+
/**
|
|
305
|
+
* If the session has failed, returns the original fatal error that caused
|
|
306
|
+
* the pump to stop. Returns undefined when the session has not failed.
|
|
307
|
+
*/
|
|
308
|
+
failureCause() {
|
|
309
|
+
return this.fatalError;
|
|
310
|
+
}
|
|
311
|
+
constructor(generator, sessionOptions, config) {
|
|
312
|
+
this.generator = generator;
|
|
313
|
+
this.sessionOptions = sessionOptions;
|
|
314
|
+
this.retryConfig = {
|
|
315
|
+
...exports.DEFAULT_RETRY_CONFIG,
|
|
316
|
+
...config,
|
|
317
|
+
};
|
|
318
|
+
this.requestTimeoutMillis = this.retryConfig.requestTimeoutMillis;
|
|
319
|
+
this.maxQueuedBytes =
|
|
320
|
+
this.sessionOptions?.maxInflightBytes ?? DEFAULT_MAX_INFLIGHT_BYTES;
|
|
321
|
+
this.maxInflightBatches = this.sessionOptions?.maxInflightBatches;
|
|
322
|
+
this.readable = new ReadableStream({
|
|
323
|
+
start: (controller) => {
|
|
324
|
+
this.acksController = controller;
|
|
325
|
+
},
|
|
326
|
+
});
|
|
327
|
+
this.writable = new WritableStream({
|
|
328
|
+
write: async (chunk) => {
|
|
329
|
+
const recordsArray = Array.isArray(chunk.records)
|
|
330
|
+
? chunk.records
|
|
331
|
+
: [chunk.records];
|
|
332
|
+
// Calculate metered size
|
|
333
|
+
let batchMeteredSize = 0;
|
|
334
|
+
for (const record of recordsArray) {
|
|
335
|
+
batchMeteredSize += (0, utils_js_1.meteredBytes)(record);
|
|
336
|
+
}
|
|
337
|
+
// Wait for capacity (backpressure for writable only)
|
|
338
|
+
await this.waitForCapacity(batchMeteredSize);
|
|
339
|
+
const { records: _records, ...rest } = chunk;
|
|
340
|
+
const args = rest;
|
|
341
|
+
args.precalculatedSize = batchMeteredSize;
|
|
342
|
+
// Move reserved bytes to queued bytes accounting before submission
|
|
343
|
+
this.pendingBytes = Math.max(0, this.pendingBytes - batchMeteredSize);
|
|
344
|
+
// Submit without waiting for ack (writable doesn't need per-batch resolution)
|
|
345
|
+
const promise = this.submitInternal(recordsArray, args, batchMeteredSize);
|
|
346
|
+
promise.catch(() => {
|
|
347
|
+
// Swallow to avoid unhandled rejection; pump surfaces errors via readable stream
|
|
348
|
+
});
|
|
349
|
+
},
|
|
350
|
+
close: async () => {
|
|
351
|
+
await this.close();
|
|
352
|
+
},
|
|
353
|
+
abort: async (reason) => {
|
|
354
|
+
const error = (0, error_js_1.abortedError)(`AppendSession aborted: ${reason}`);
|
|
355
|
+
await this.abort(error);
|
|
356
|
+
},
|
|
357
|
+
});
|
|
358
|
+
}
|
|
359
|
+
static async create(generator, sessionOptions, config) {
|
|
360
|
+
return new RetryAppendSession(generator, sessionOptions, config);
|
|
361
|
+
}
|
|
362
|
+
/**
|
|
363
|
+
* Submit an append request. Returns a promise that resolves with the ack.
|
|
364
|
+
* This method does not block on capacity (only writable.write() does).
|
|
365
|
+
*/
|
|
366
|
+
async submit(records, args) {
|
|
367
|
+
const recordsArray = Array.isArray(records) ? records : [records];
|
|
368
|
+
// Calculate metered size if not provided
|
|
369
|
+
let batchMeteredSize = args?.precalculatedSize ?? 0;
|
|
370
|
+
if (batchMeteredSize === 0) {
|
|
371
|
+
for (const record of recordsArray) {
|
|
372
|
+
batchMeteredSize += (0, utils_js_1.meteredBytes)(record);
|
|
373
|
+
}
|
|
374
|
+
}
|
|
375
|
+
const result = await this.submitInternal(recordsArray, args, batchMeteredSize);
|
|
376
|
+
// Convert discriminated union back to throw pattern for public API
|
|
377
|
+
if (result.ok) {
|
|
378
|
+
return result.value;
|
|
379
|
+
}
|
|
380
|
+
else {
|
|
381
|
+
throw result.error;
|
|
382
|
+
}
|
|
383
|
+
}
|
|
384
|
+
/**
|
|
385
|
+
* Internal submit that returns discriminated union.
|
|
386
|
+
* Creates inflight entry and starts pump if needed.
|
|
387
|
+
*/
|
|
388
|
+
submitInternal(records, args, batchMeteredSize) {
|
|
389
|
+
if (this.closed || this.closing) {
|
|
390
|
+
return Promise.resolve((0, result_js_1.err)(new error_js_1.S2Error({ message: "AppendSession is closed", status: 400 })));
|
|
391
|
+
}
|
|
392
|
+
// Check for fatal error (e.g., from abort())
|
|
393
|
+
if (this.fatalError) {
|
|
394
|
+
debugSession("[SUBMIT] rejecting due to fatal error: %s", this.fatalError.message);
|
|
395
|
+
return Promise.resolve((0, result_js_1.err)(this.fatalError));
|
|
396
|
+
}
|
|
397
|
+
// Create promise for submit() callers
|
|
398
|
+
return new Promise((resolve) => {
|
|
399
|
+
// Create inflight entry (innerPromise will be set when pump processes it)
|
|
400
|
+
const entry = {
|
|
401
|
+
records,
|
|
402
|
+
args,
|
|
403
|
+
expectedCount: records.length,
|
|
404
|
+
meteredBytes: batchMeteredSize,
|
|
405
|
+
innerPromise: new Promise(() => { }), // Never-resolving placeholder
|
|
406
|
+
maybeResolve: resolve,
|
|
407
|
+
needsSubmit: true, // Mark for pump to submit
|
|
408
|
+
};
|
|
409
|
+
debugSession("[SUBMIT] enqueueing %d records (%d bytes): inflight=%d->%d, queuedBytes=%d->%d", records.length, batchMeteredSize, this.inflight.length, this.inflight.length + 1, this.queuedBytes, this.queuedBytes + batchMeteredSize);
|
|
410
|
+
this.inflight.push(entry);
|
|
411
|
+
this.queuedBytes += batchMeteredSize;
|
|
412
|
+
// Wake pump if it's sleeping
|
|
413
|
+
if (this.pumpWakeup) {
|
|
414
|
+
this.pumpWakeup();
|
|
415
|
+
}
|
|
416
|
+
// Start pump if not already running
|
|
417
|
+
this.ensurePump();
|
|
418
|
+
});
|
|
419
|
+
}
|
|
420
|
+
/**
|
|
421
|
+
* Wait for capacity before allowing write to proceed (writable only).
|
|
422
|
+
*/
|
|
423
|
+
async waitForCapacity(bytes) {
|
|
424
|
+
debugSession("[CAPACITY] checking for %d bytes: queuedBytes=%d, pendingBytes=%d, maxQueuedBytes=%d, inflight=%d", bytes, this.queuedBytes, this.pendingBytes, this.maxQueuedBytes, this.inflight.length);
|
|
425
|
+
// Check if we have capacity
|
|
426
|
+
while (true) {
|
|
427
|
+
// Check for fatal error before adding to pendingBytes
|
|
428
|
+
if (this.fatalError) {
|
|
429
|
+
debugSession("[CAPACITY] fatal error detected, rejecting: %s", this.fatalError.message);
|
|
430
|
+
throw this.fatalError;
|
|
431
|
+
}
|
|
432
|
+
// Byte-based gating
|
|
433
|
+
if (this.queuedBytes + this.pendingBytes + bytes <= this.maxQueuedBytes) {
|
|
434
|
+
// Batch-based gating (if configured)
|
|
435
|
+
if (this.maxInflightBatches === undefined ||
|
|
436
|
+
this.inflight.length < this.maxInflightBatches) {
|
|
437
|
+
debugSession("[CAPACITY] capacity available, adding %d to pendingBytes", bytes);
|
|
438
|
+
this.pendingBytes += bytes;
|
|
439
|
+
return;
|
|
440
|
+
}
|
|
441
|
+
}
|
|
442
|
+
// No capacity - wait
|
|
443
|
+
// WritableStream enforces writer lock, so only one write can be blocked at a time
|
|
444
|
+
debugSession("[CAPACITY] no capacity, waiting for release");
|
|
445
|
+
await new Promise((resolve) => {
|
|
446
|
+
this.capacityWaiter = resolve;
|
|
447
|
+
});
|
|
448
|
+
debugSession("[CAPACITY] woke up, rechecking");
|
|
449
|
+
}
|
|
450
|
+
}
|
|
451
|
+
/**
|
|
452
|
+
* Release capacity and wake waiter if present.
|
|
453
|
+
*/
|
|
454
|
+
releaseCapacity(bytes) {
|
|
455
|
+
debugSession("[CAPACITY] releasing %d bytes: queuedBytes=%d->%d, pendingBytes=%d->%d, hasWaiter=%s", bytes, this.queuedBytes, this.queuedBytes - bytes, this.pendingBytes, Math.max(0, this.pendingBytes - bytes), !!this.capacityWaiter);
|
|
456
|
+
this.queuedBytes -= bytes;
|
|
457
|
+
this.pendingBytes = Math.max(0, this.pendingBytes - bytes);
|
|
458
|
+
// Wake single waiter
|
|
459
|
+
const waiter = this.capacityWaiter;
|
|
460
|
+
if (waiter) {
|
|
461
|
+
debugSession("[CAPACITY] waking waiter");
|
|
462
|
+
this.capacityWaiter = undefined;
|
|
463
|
+
waiter();
|
|
464
|
+
}
|
|
465
|
+
}
|
|
466
|
+
/**
|
|
467
|
+
* Ensure pump loop is running.
|
|
468
|
+
*/
|
|
469
|
+
ensurePump() {
|
|
470
|
+
if (this.pumpPromise || this.pumpStopped) {
|
|
471
|
+
return;
|
|
472
|
+
}
|
|
473
|
+
this.pumpPromise = this.runPump().catch((e) => {
|
|
474
|
+
debugSession("pump crashed unexpectedly: %s", e);
|
|
475
|
+
// This should never happen - pump handles all errors internally
|
|
476
|
+
});
|
|
477
|
+
}
|
|
478
|
+
/**
|
|
479
|
+
* Main pump loop: processes inflight queue, handles acks, retries, and recovery.
|
|
480
|
+
*/
|
|
481
|
+
async runPump() {
|
|
482
|
+
debugSession("pump started");
|
|
483
|
+
while (true) {
|
|
484
|
+
debugSession("[PUMP] loop: inflight=%d, queuedBytes=%d, pendingBytes=%d, closing=%s, pumpStopped=%s", this.inflight.length, this.queuedBytes, this.pendingBytes, this.closing, this.pumpStopped);
|
|
485
|
+
// Check if we should stop
|
|
486
|
+
if (this.pumpStopped) {
|
|
487
|
+
debugSession("[PUMP] stopped by flag");
|
|
488
|
+
return;
|
|
489
|
+
}
|
|
490
|
+
// If closing and queue is empty, stop
|
|
491
|
+
if (this.closing && this.inflight.length === 0) {
|
|
492
|
+
debugSession("[PUMP] closing and queue empty, stopping");
|
|
493
|
+
this.pumpStopped = true;
|
|
494
|
+
return;
|
|
495
|
+
}
|
|
496
|
+
// If no entries, sleep and continue
|
|
497
|
+
if (this.inflight.length === 0) {
|
|
498
|
+
debugSession("[PUMP] no entries, sleeping 10ms");
|
|
499
|
+
// Use interruptible sleep - can be woken by new submissions
|
|
500
|
+
await Promise.race([
|
|
501
|
+
sleep(10),
|
|
502
|
+
new Promise((resolve) => {
|
|
503
|
+
this.pumpWakeup = resolve;
|
|
504
|
+
}),
|
|
505
|
+
]);
|
|
506
|
+
this.pumpWakeup = undefined;
|
|
507
|
+
continue;
|
|
508
|
+
}
|
|
509
|
+
// Get head entry (we know it exists because we checked length above)
|
|
510
|
+
const head = this.inflight[0];
|
|
511
|
+
debugSession("[PUMP] processing head: expectedCount=%d, meteredBytes=%d", head.expectedCount, head.meteredBytes);
|
|
512
|
+
// Ensure session exists
|
|
513
|
+
debugSession("[PUMP] ensuring session exists");
|
|
514
|
+
await this.ensureSession();
|
|
515
|
+
if (!this.session) {
|
|
516
|
+
// Session creation failed - will retry
|
|
517
|
+
debugSession("[PUMP] session creation failed, sleeping 100ms");
|
|
518
|
+
await sleep(100);
|
|
519
|
+
continue;
|
|
520
|
+
}
|
|
521
|
+
// Submit ALL entries that need submitting (enables HTTP/2 pipelining for S2S)
|
|
522
|
+
for (const entry of this.inflight) {
|
|
523
|
+
if (!entry.innerPromise || entry.needsSubmit) {
|
|
524
|
+
debugSession("[PUMP] submitting entry to inner session (%d records, %d bytes)", entry.expectedCount, entry.meteredBytes);
|
|
525
|
+
entry.attemptStartedMonotonicMs = performance.now();
|
|
526
|
+
entry.innerPromise = this.session.submit(entry.records, entry.args);
|
|
527
|
+
delete entry.needsSubmit;
|
|
528
|
+
}
|
|
529
|
+
}
|
|
530
|
+
// Wait for head with timeout
|
|
531
|
+
debugSession("[PUMP] waiting for head result");
|
|
532
|
+
const result = await this.waitForHead(head);
|
|
533
|
+
debugSession("[PUMP] got result: kind=%s", result.kind);
|
|
534
|
+
if (result.kind === "timeout") {
|
|
535
|
+
// Ack timeout - fatal (per-attempt)
|
|
536
|
+
const attemptElapsed = head.attemptStartedMonotonicMs != null
|
|
537
|
+
? Math.round(performance.now() - head.attemptStartedMonotonicMs)
|
|
538
|
+
: undefined;
|
|
539
|
+
const error = new error_js_1.S2Error({
|
|
540
|
+
message: `Request timeout after ${attemptElapsed ?? "unknown"}ms (${head.expectedCount} records, ${head.meteredBytes} bytes)`,
|
|
541
|
+
status: 408,
|
|
542
|
+
code: "REQUEST_TIMEOUT",
|
|
543
|
+
});
|
|
544
|
+
debugSession("ack timeout for head entry: %s", error.message);
|
|
545
|
+
await this.abort(error);
|
|
546
|
+
return;
|
|
547
|
+
}
|
|
548
|
+
// Promise settled
|
|
549
|
+
const appendResult = result.value;
|
|
550
|
+
if (appendResult.ok) {
|
|
551
|
+
// Success!
|
|
552
|
+
const ack = appendResult.value;
|
|
553
|
+
debugSession("[PUMP] success, got ack", { ack });
|
|
554
|
+
// Invariant check: ack count matches batch count
|
|
555
|
+
const ackCount = Number(ack.end.seq_num) - Number(ack.start.seq_num);
|
|
556
|
+
if (ackCount !== head.expectedCount) {
|
|
557
|
+
const error = (0, error_js_1.invariantViolation)(`Ack count mismatch: expected ${head.expectedCount}, got ${ackCount}`);
|
|
558
|
+
debugSession("invariant violation: %s", error.message);
|
|
559
|
+
await this.abort(error);
|
|
560
|
+
return;
|
|
561
|
+
}
|
|
562
|
+
// Invariant check: sequence numbers must be strictly increasing
|
|
563
|
+
if (this._lastAckedPosition) {
|
|
564
|
+
const prevEnd = BigInt(this._lastAckedPosition.end.seq_num);
|
|
565
|
+
const currentEnd = BigInt(ack.end.seq_num);
|
|
566
|
+
if (currentEnd <= prevEnd) {
|
|
567
|
+
const error = (0, error_js_1.invariantViolation)(`Sequence number not strictly increasing: previous=${prevEnd}, current=${currentEnd}`);
|
|
568
|
+
debugSession("invariant violation: %s", error.message);
|
|
569
|
+
await this.abort(error);
|
|
570
|
+
return;
|
|
571
|
+
}
|
|
572
|
+
}
|
|
573
|
+
// Update last acked position
|
|
574
|
+
this._lastAckedPosition = ack;
|
|
575
|
+
// Resolve submit() caller if present
|
|
576
|
+
if (head.maybeResolve) {
|
|
577
|
+
head.maybeResolve((0, result_js_1.ok)(ack));
|
|
578
|
+
}
|
|
579
|
+
// Emit to readable stream
|
|
580
|
+
try {
|
|
581
|
+
this.acksController?.enqueue(ack);
|
|
582
|
+
}
|
|
583
|
+
catch (e) {
|
|
584
|
+
debugSession("failed to enqueue ack: %s", e);
|
|
585
|
+
}
|
|
586
|
+
// Remove from inflight and release capacity
|
|
587
|
+
debugSession("[PUMP] removing head from inflight, releasing %d bytes", head.meteredBytes);
|
|
588
|
+
this.inflight.shift();
|
|
589
|
+
this.releaseCapacity(head.meteredBytes);
|
|
590
|
+
// Reset consecutive failures on success
|
|
591
|
+
this.consecutiveFailures = 0;
|
|
592
|
+
this.currentAttempt = 0;
|
|
593
|
+
}
|
|
594
|
+
else {
|
|
595
|
+
// Error result
|
|
596
|
+
const error = appendResult.error;
|
|
597
|
+
debugSession("[PUMP] error: status=%s, message=%s", error.status, error.message);
|
|
598
|
+
// Check if retryable
|
|
599
|
+
if (!isRetryable(error)) {
|
|
600
|
+
debugSession("error not retryable, aborting");
|
|
601
|
+
await this.abort(error);
|
|
602
|
+
return;
|
|
603
|
+
}
|
|
604
|
+
// Check policy compliance
|
|
605
|
+
if (this.retryConfig.appendRetryPolicy === "noSideEffects" &&
|
|
606
|
+
!this.isIdempotent(head)) {
|
|
607
|
+
debugSession("error not policy-compliant (noSideEffects), aborting");
|
|
608
|
+
await this.abort(error);
|
|
609
|
+
return;
|
|
610
|
+
}
|
|
611
|
+
// Check max attempts (total attempts include initial; retries = max - 1)
|
|
612
|
+
const effectiveMax = Math.max(1, this.retryConfig.maxAttempts);
|
|
613
|
+
const allowedRetries = effectiveMax - 1;
|
|
614
|
+
if (this.currentAttempt >= allowedRetries) {
|
|
615
|
+
debugSession("max attempts reached (%d), aborting", effectiveMax);
|
|
616
|
+
const wrappedError = new error_js_1.S2Error({
|
|
617
|
+
message: `Max attempts (${effectiveMax}) exhausted: ${error.message}`,
|
|
618
|
+
status: error.status,
|
|
619
|
+
code: error.code,
|
|
620
|
+
});
|
|
621
|
+
await this.abort(wrappedError);
|
|
622
|
+
return;
|
|
623
|
+
}
|
|
624
|
+
// Perform recovery
|
|
625
|
+
this.consecutiveFailures++;
|
|
626
|
+
this.currentAttempt++;
|
|
627
|
+
debugSession("performing recovery (retry %d/%d)", this.currentAttempt, allowedRetries);
|
|
628
|
+
await this.recover();
|
|
629
|
+
}
|
|
630
|
+
}
|
|
631
|
+
}
|
|
632
|
+
/**
|
|
633
|
+
* Wait for head entry's innerPromise with timeout.
|
|
634
|
+
* Returns either the settled result or a timeout indicator.
|
|
635
|
+
*
|
|
636
|
+
* Per-attempt ack timeout semantics:
|
|
637
|
+
* - The deadline is computed from the most recent (re)submit attempt using
|
|
638
|
+
* a monotonic clock (performance.now) to avoid issues with wall clock
|
|
639
|
+
* adjustments.
|
|
640
|
+
* - If attempt start is missing (for backward compatibility), we measure
|
|
641
|
+
* from "now" with the full timeout window.
|
|
642
|
+
*/
|
|
643
|
+
async waitForHead(head) {
|
|
644
|
+
const startMono = head.attemptStartedMonotonicMs ?? performance.now();
|
|
645
|
+
const deadline = startMono + this.requestTimeoutMillis;
|
|
646
|
+
const remaining = Math.max(0, deadline - performance.now());
|
|
647
|
+
let timer;
|
|
648
|
+
const timeoutP = new Promise((resolve) => {
|
|
649
|
+
timer = setTimeout(() => resolve({ kind: "timeout" }), remaining);
|
|
650
|
+
});
|
|
651
|
+
const settledP = head.innerPromise.then((result) => ({
|
|
652
|
+
kind: "settled",
|
|
653
|
+
value: result,
|
|
654
|
+
}));
|
|
655
|
+
try {
|
|
656
|
+
return await Promise.race([settledP, timeoutP]);
|
|
657
|
+
}
|
|
658
|
+
finally {
|
|
659
|
+
if (timer)
|
|
660
|
+
clearTimeout(timer);
|
|
661
|
+
}
|
|
662
|
+
}
|
|
663
|
+
/**
|
|
664
|
+
* Recover from transient error: recreate session and resubmit all inflight entries.
|
|
665
|
+
*/
|
|
666
|
+
async recover() {
|
|
667
|
+
debugSession("starting recovery");
|
|
668
|
+
// Calculate backoff delay
|
|
669
|
+
const delay = calculateDelay(this.consecutiveFailures - 1, this.retryConfig.retryBackoffDurationMillis);
|
|
670
|
+
debugSession("backing off for %dms", delay);
|
|
671
|
+
await sleep(delay);
|
|
672
|
+
// Teardown old session
|
|
673
|
+
if (this.session) {
|
|
674
|
+
try {
|
|
675
|
+
const closeResult = await this.session.close();
|
|
676
|
+
if (!closeResult.ok) {
|
|
677
|
+
debugSession("error closing old session during recovery: %s", closeResult.error.message);
|
|
678
|
+
}
|
|
679
|
+
}
|
|
680
|
+
catch (e) {
|
|
681
|
+
debugSession("exception closing old session: %s", e);
|
|
682
|
+
}
|
|
683
|
+
this.session = undefined;
|
|
684
|
+
}
|
|
685
|
+
// Create new session
|
|
686
|
+
await this.ensureSession();
|
|
687
|
+
if (!this.session) {
|
|
688
|
+
debugSession("failed to create new session during recovery");
|
|
689
|
+
// Will retry on next pump iteration
|
|
690
|
+
return;
|
|
691
|
+
}
|
|
692
|
+
// Store session in local variable to help TypeScript type narrowing
|
|
693
|
+
const session = this.session;
|
|
694
|
+
// Resubmit all inflight entries (replace their innerPromise and reset attempt start)
|
|
695
|
+
debugSession("resubmitting %d inflight entries", this.inflight.length);
|
|
696
|
+
for (const entry of this.inflight) {
|
|
697
|
+
// Attach .catch to superseded promise to avoid unhandled rejection
|
|
698
|
+
entry.innerPromise.catch(() => { });
|
|
699
|
+
// Create new promise from new session
|
|
700
|
+
entry.attemptStartedMonotonicMs = performance.now();
|
|
701
|
+
entry.innerPromise = session.submit(entry.records, entry.args);
|
|
702
|
+
}
|
|
703
|
+
debugSession("recovery complete");
|
|
704
|
+
}
|
|
705
|
+
/**
|
|
706
|
+
* Check if append can be retried under noSideEffects policy.
|
|
707
|
+
* For appends, idempotency requires match_seq_num.
|
|
708
|
+
*/
|
|
709
|
+
isIdempotent(entry) {
|
|
710
|
+
const args = entry.args;
|
|
711
|
+
if (!args)
|
|
712
|
+
return false;
|
|
713
|
+
return args.match_seq_num !== undefined;
|
|
714
|
+
}
|
|
715
|
+
/**
|
|
716
|
+
* Ensure session exists, creating it if necessary.
|
|
717
|
+
*/
|
|
718
|
+
async ensureSession() {
|
|
719
|
+
if (this.session) {
|
|
720
|
+
return;
|
|
721
|
+
}
|
|
722
|
+
try {
|
|
723
|
+
this.session = await this.generator(this.sessionOptions);
|
|
724
|
+
}
|
|
725
|
+
catch (e) {
|
|
726
|
+
const error = (0, error_js_1.s2Error)(e);
|
|
727
|
+
debugSession("failed to create session: %s", error.message);
|
|
728
|
+
// Don't set this.session - will retry later
|
|
729
|
+
}
|
|
730
|
+
}
|
|
731
|
+
/**
|
|
732
|
+
* Abort the session with a fatal error.
|
|
733
|
+
*/
|
|
734
|
+
async abort(error) {
|
|
735
|
+
if (this.pumpStopped) {
|
|
736
|
+
return; // Already aborted
|
|
737
|
+
}
|
|
738
|
+
debugSession("aborting session: %s", error.message);
|
|
739
|
+
this.fatalError = error;
|
|
740
|
+
this.pumpStopped = true;
|
|
741
|
+
// Resolve all inflight entries with error
|
|
742
|
+
for (const entry of this.inflight) {
|
|
743
|
+
if (entry.maybeResolve) {
|
|
744
|
+
entry.maybeResolve((0, result_js_1.err)(error));
|
|
745
|
+
}
|
|
746
|
+
}
|
|
747
|
+
this.inflight.length = 0;
|
|
748
|
+
this.queuedBytes = 0;
|
|
749
|
+
this.pendingBytes = 0;
|
|
750
|
+
// Error the readable stream
|
|
751
|
+
try {
|
|
752
|
+
this.acksController?.error(error);
|
|
753
|
+
}
|
|
754
|
+
catch (e) {
|
|
755
|
+
debugSession("failed to error acks controller: %s", e);
|
|
756
|
+
}
|
|
757
|
+
// Wake capacity waiter to unblock any pending writer
|
|
758
|
+
if (this.capacityWaiter) {
|
|
759
|
+
this.capacityWaiter();
|
|
760
|
+
this.capacityWaiter = undefined;
|
|
761
|
+
}
|
|
762
|
+
// Close inner session
|
|
763
|
+
if (this.session) {
|
|
764
|
+
try {
|
|
765
|
+
await this.session.close();
|
|
766
|
+
}
|
|
767
|
+
catch (e) {
|
|
768
|
+
debugSession("error closing session during abort: %s", e);
|
|
769
|
+
}
|
|
770
|
+
this.session = undefined;
|
|
771
|
+
}
|
|
772
|
+
}
|
|
773
|
+
/**
|
|
774
|
+
* Close the append session.
|
|
775
|
+
* Waits for all pending appends to complete before resolving.
|
|
776
|
+
* Does not interrupt recovery - allows it to complete.
|
|
777
|
+
*/
|
|
778
|
+
async close() {
|
|
779
|
+
if (this.closed) {
|
|
780
|
+
if (this.fatalError) {
|
|
781
|
+
throw this.fatalError;
|
|
782
|
+
}
|
|
783
|
+
return;
|
|
784
|
+
}
|
|
785
|
+
debugSession("close requested");
|
|
786
|
+
this.closing = true;
|
|
787
|
+
// Wake pump if it's sleeping so it can check closing flag
|
|
788
|
+
if (this.pumpWakeup) {
|
|
789
|
+
this.pumpWakeup();
|
|
790
|
+
}
|
|
791
|
+
// Wait for pump to stop (drains inflight queue, including through recovery)
|
|
792
|
+
if (this.pumpPromise) {
|
|
793
|
+
await this.pumpPromise;
|
|
794
|
+
}
|
|
795
|
+
// Close inner session
|
|
796
|
+
if (this.session) {
|
|
797
|
+
try {
|
|
798
|
+
const result = await this.session.close();
|
|
799
|
+
if (!result.ok) {
|
|
800
|
+
debugSession("error closing inner session: %s", result.error.message);
|
|
801
|
+
}
|
|
802
|
+
}
|
|
803
|
+
catch (e) {
|
|
804
|
+
debugSession("exception closing inner session: %s", e);
|
|
805
|
+
}
|
|
806
|
+
this.session = undefined;
|
|
807
|
+
}
|
|
808
|
+
// Close readable stream
|
|
809
|
+
try {
|
|
810
|
+
this.acksController?.close();
|
|
811
|
+
}
|
|
812
|
+
catch (e) {
|
|
813
|
+
debugSession("error closing acks controller: %s", e);
|
|
814
|
+
}
|
|
815
|
+
this.closed = true;
|
|
816
|
+
// If fatal error occurred, throw it
|
|
817
|
+
if (this.fatalError) {
|
|
818
|
+
throw this.fatalError;
|
|
819
|
+
}
|
|
820
|
+
debugSession("close complete");
|
|
821
|
+
}
|
|
822
|
+
async [Symbol.asyncDispose]() {
|
|
823
|
+
await this.close();
|
|
824
|
+
}
|
|
825
|
+
/**
|
|
826
|
+
* Get a stream of acknowledgements for appends.
|
|
827
|
+
*/
|
|
828
|
+
acks() {
|
|
829
|
+
return this.readable;
|
|
830
|
+
}
|
|
831
|
+
/**
|
|
832
|
+
* Get the last acknowledged position.
|
|
833
|
+
*/
|
|
834
|
+
lastAckedPosition() {
|
|
835
|
+
return this._lastAckedPosition;
|
|
836
|
+
}
|
|
837
|
+
}
|
|
838
|
+
exports.RetryAppendSession = RetryAppendSession;
|
|
839
|
+
//# sourceMappingURL=retry.js.map
|