@nextn/outbound-guard 0.1.1 → 0.1.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -28,21 +28,28 @@ interface MicroCacheOptions {
28
28
  keyFn?: (req: ResilientRequest) => string;
29
29
  retry?: MicroCacheRetryOptions;
30
30
  }
31
+ interface HealthOptions {
32
+ enabled?: boolean;
33
+ }
31
34
  interface ResilientHttpClientOptions {
35
+ /**
36
+ * Applied per base URL (scheme + host + port).
37
+ */
32
38
  maxInFlight: number;
33
- maxQueue: number;
34
- enqueueTimeoutMs: number;
35
- requestTimeoutMs: number;
36
39
  /**
37
- * GET-only micro-cache + request coalescing.
40
+ * Per-attempt timeout.
38
41
  */
42
+ requestTimeoutMs: number;
43
+ health?: HealthOptions;
39
44
  microCache?: MicroCacheOptions;
40
45
  }
41
46
 
42
47
  declare class ResilientHttpClient extends EventEmitter {
43
48
  private readonly opts;
44
- private readonly limiter;
45
49
  private readonly requestTimeoutMs;
50
+ private readonly healthEnabled;
51
+ private readonly limiters;
52
+ private readonly health;
46
53
  private readonly microCache?;
47
54
  private cache?;
48
55
  private inFlight?;
@@ -50,28 +57,24 @@ declare class ResilientHttpClient extends EventEmitter {
50
57
  private readonly cleanupEveryNRequests;
51
58
  constructor(opts: ResilientHttpClientOptions);
52
59
  request(req: ResilientRequest): Promise<ResilientResponse>;
60
+ snapshot(): {
61
+ inFlight: number;
62
+ queueDepth: number;
63
+ };
64
+ private getLimiter;
65
+ private getHealth;
66
+ private closeHealth;
67
+ private halfOpenHealth;
68
+ private openHealth;
69
+ private recordOutcome;
70
+ private execute;
53
71
  private cloneResponse;
54
72
  private maybeCleanupExpired;
55
73
  private evictIfNeeded;
56
74
  private isRetryableStatus;
57
75
  private computeBackoffMs;
58
76
  private fetchWithLeaderRetry;
59
- /**
60
- * Window behavior:
61
- * - 0..ttlMs: return cache (fresh)
62
- * - ttlMs..maxStaleMs: leader refreshes; others get old value until replaced (stale-while-revalidate)
63
- * - >maxStaleMs: do not serve old; behave like no cache
64
- *
65
- * Follower controls (only when no cache is served):
66
- * - maxWaiters: cap concurrent followers joining the leader
67
- * - followerTimeoutMs: shared "join window" from first follower; after it expires, late followers fail fast until leader completes
68
- */
69
77
  private requestWithMicroCache;
70
- private existingPipeline;
71
- snapshot(): {
72
- inFlight: number;
73
- queueDepth: number;
74
- };
75
78
  }
76
79
 
77
80
  declare abstract class ResilientHttpError extends Error {
@@ -94,5 +97,14 @@ declare class UpstreamError extends ResilientHttpError {
94
97
  readonly status: number;
95
98
  constructor(status: number);
96
99
  }
100
+ declare class UpstreamUnhealthyError extends ResilientHttpError {
101
+ readonly baseUrl: string;
102
+ readonly state: string;
103
+ constructor(baseUrl: string, state: string);
104
+ }
105
+ declare class HalfOpenRejectedError extends ResilientHttpError {
106
+ readonly baseUrl: string;
107
+ constructor(baseUrl: string);
108
+ }
97
109
 
98
- export { type HttpMethod, type MicroCacheOptions, type MicroCacheRetryOptions, QueueFullError, QueueTimeoutError, RequestTimeoutError, ResilientHttpClient, type ResilientHttpClientOptions, ResilientHttpError, type ResilientRequest, type ResilientResponse, UpstreamError };
110
+ export { HalfOpenRejectedError, type HealthOptions, type HttpMethod, type MicroCacheOptions, type MicroCacheRetryOptions, QueueFullError, QueueTimeoutError, RequestTimeoutError, ResilientHttpClient, type ResilientHttpClientOptions, ResilientHttpError, type ResilientRequest, type ResilientResponse, UpstreamError, UpstreamUnhealthyError };
@@ -28,21 +28,28 @@ interface MicroCacheOptions {
28
28
  keyFn?: (req: ResilientRequest) => string;
29
29
  retry?: MicroCacheRetryOptions;
30
30
  }
31
+ interface HealthOptions {
32
+ enabled?: boolean;
33
+ }
31
34
  interface ResilientHttpClientOptions {
35
+ /**
36
+ * Applied per base URL (scheme + host + port).
37
+ */
32
38
  maxInFlight: number;
33
- maxQueue: number;
34
- enqueueTimeoutMs: number;
35
- requestTimeoutMs: number;
36
39
  /**
37
- * GET-only micro-cache + request coalescing.
40
+ * Per-attempt timeout.
38
41
  */
42
+ requestTimeoutMs: number;
43
+ health?: HealthOptions;
39
44
  microCache?: MicroCacheOptions;
40
45
  }
41
46
 
42
47
  declare class ResilientHttpClient extends EventEmitter {
43
48
  private readonly opts;
44
- private readonly limiter;
45
49
  private readonly requestTimeoutMs;
50
+ private readonly healthEnabled;
51
+ private readonly limiters;
52
+ private readonly health;
46
53
  private readonly microCache?;
47
54
  private cache?;
48
55
  private inFlight?;
@@ -50,28 +57,24 @@ declare class ResilientHttpClient extends EventEmitter {
50
57
  private readonly cleanupEveryNRequests;
51
58
  constructor(opts: ResilientHttpClientOptions);
52
59
  request(req: ResilientRequest): Promise<ResilientResponse>;
60
+ snapshot(): {
61
+ inFlight: number;
62
+ queueDepth: number;
63
+ };
64
+ private getLimiter;
65
+ private getHealth;
66
+ private closeHealth;
67
+ private halfOpenHealth;
68
+ private openHealth;
69
+ private recordOutcome;
70
+ private execute;
53
71
  private cloneResponse;
54
72
  private maybeCleanupExpired;
55
73
  private evictIfNeeded;
56
74
  private isRetryableStatus;
57
75
  private computeBackoffMs;
58
76
  private fetchWithLeaderRetry;
59
- /**
60
- * Window behavior:
61
- * - 0..ttlMs: return cache (fresh)
62
- * - ttlMs..maxStaleMs: leader refreshes; others get old value until replaced (stale-while-revalidate)
63
- * - >maxStaleMs: do not serve old; behave like no cache
64
- *
65
- * Follower controls (only when no cache is served):
66
- * - maxWaiters: cap concurrent followers joining the leader
67
- * - followerTimeoutMs: shared "join window" from first follower; after it expires, late followers fail fast until leader completes
68
- */
69
77
  private requestWithMicroCache;
70
- private existingPipeline;
71
- snapshot(): {
72
- inFlight: number;
73
- queueDepth: number;
74
- };
75
78
  }
76
79
 
77
80
  declare abstract class ResilientHttpError extends Error {
@@ -94,5 +97,14 @@ declare class UpstreamError extends ResilientHttpError {
94
97
  readonly status: number;
95
98
  constructor(status: number);
96
99
  }
100
+ declare class UpstreamUnhealthyError extends ResilientHttpError {
101
+ readonly baseUrl: string;
102
+ readonly state: string;
103
+ constructor(baseUrl: string, state: string);
104
+ }
105
+ declare class HalfOpenRejectedError extends ResilientHttpError {
106
+ readonly baseUrl: string;
107
+ constructor(baseUrl: string);
108
+ }
97
109
 
98
- export { type HttpMethod, type MicroCacheOptions, type MicroCacheRetryOptions, QueueFullError, QueueTimeoutError, RequestTimeoutError, ResilientHttpClient, type ResilientHttpClientOptions, ResilientHttpError, type ResilientRequest, type ResilientResponse, UpstreamError };
110
+ export { HalfOpenRejectedError, type HealthOptions, type HttpMethod, type MicroCacheOptions, type MicroCacheRetryOptions, QueueFullError, QueueTimeoutError, RequestTimeoutError, ResilientHttpClient, type ResilientHttpClientOptions, ResilientHttpError, type ResilientRequest, type ResilientResponse, UpstreamError, UpstreamUnhealthyError };
@@ -33,12 +33,24 @@ var UpstreamError = class extends ResilientHttpError {
33
33
  this.status = status;
34
34
  }
35
35
  };
36
+ var UpstreamUnhealthyError = class extends ResilientHttpError {
37
+ constructor(baseUrl, state) {
38
+ super(`Upstream is unhealthy (state=${state}, baseUrl=${baseUrl}).`);
39
+ this.baseUrl = baseUrl;
40
+ this.state = state;
41
+ }
42
+ };
43
+ var HalfOpenRejectedError = class extends ResilientHttpError {
44
+ constructor(baseUrl) {
45
+ super(`Upstream is HALF_OPEN (probe only) for baseUrl=${baseUrl}.`);
46
+ this.baseUrl = baseUrl;
47
+ }
48
+ };
36
49
 
37
50
  // src/limiter.ts
38
51
  var ConcurrencyLimiter = class {
39
52
  maxInFlight;
40
53
  maxQueue;
41
- enqueueTimeoutMs;
42
54
  inFlight = 0;
43
55
  queue = [];
44
56
  constructor(opts) {
@@ -48,17 +60,9 @@ var ConcurrencyLimiter = class {
48
60
  if (!Number.isFinite(opts.maxQueue) || opts.maxQueue < 0) {
49
61
  throw new Error(`maxQueue must be >= 0 (got ${opts.maxQueue})`);
50
62
  }
51
- if (!Number.isFinite(opts.enqueueTimeoutMs) || opts.enqueueTimeoutMs <= 0) {
52
- throw new Error(`enqueueTimeoutMs must be > 0 (got ${opts.enqueueTimeoutMs})`);
53
- }
54
63
  this.maxInFlight = opts.maxInFlight;
55
64
  this.maxQueue = opts.maxQueue;
56
- this.enqueueTimeoutMs = opts.enqueueTimeoutMs;
57
65
  }
58
- /**
59
- * Acquire a permit. Resolves once you are allowed to proceed.
60
- * MUST be followed by `release()` exactly once.
61
- */
62
66
  acquire() {
63
67
  if (this.inFlight < this.maxInFlight) {
64
68
  this.inFlight += 1;
@@ -68,32 +72,36 @@ var ConcurrencyLimiter = class {
68
72
  return Promise.reject(new QueueFullError(this.maxQueue));
69
73
  }
70
74
  return new Promise((resolve, reject) => {
71
- const timer = setTimeout(() => {
72
- const idx = this.queue.findIndex((w) => w.resolve === resolve);
73
- if (idx >= 0) {
74
- const [w] = this.queue.splice(idx, 1);
75
- clearTimeout(w.timer);
76
- }
77
- reject(new QueueTimeoutError(this.enqueueTimeoutMs));
78
- }, this.enqueueTimeoutMs);
79
- this.queue.push({ resolve, reject, timer });
75
+ this.queue.push({ resolve, reject });
80
76
  });
81
77
  }
82
78
  /**
83
- * Release a permit. Always call this in a `finally` block.
79
+ * Acquire without queueing: either start now or fail.
80
+ * Used for HALF_OPEN probes so recovery never waits behind backlog.
84
81
  */
82
+ acquireNoQueue() {
83
+ if (this.inFlight < this.maxInFlight) {
84
+ this.inFlight += 1;
85
+ return Promise.resolve();
86
+ }
87
+ return Promise.reject(new QueueFullError(0));
88
+ }
85
89
  release() {
86
90
  if (this.inFlight <= 0) {
87
91
  throw new Error("release() called when inFlight is already 0");
88
92
  }
89
93
  const next = this.queue.shift();
90
94
  if (next) {
91
- clearTimeout(next.timer);
92
95
  next.resolve();
93
96
  return;
94
97
  }
95
98
  this.inFlight -= 1;
96
99
  }
100
+ flush(err) {
101
+ const q = this.queue;
102
+ this.queue = [];
103
+ for (const w of q) w.reject(err);
104
+ }
97
105
  snapshot() {
98
106
  return {
99
107
  inFlight: this.inFlight,
@@ -151,6 +159,14 @@ function normalizeUrlForKey(rawUrl) {
151
159
  if (isHttpDefault || isHttpsDefault) u.port = "";
152
160
  return u.toString();
153
161
  }
162
+ function baseUrlKey(rawUrl) {
163
+ const u = new URL(rawUrl);
164
+ u.hostname = u.hostname.toLowerCase();
165
+ const isHttpDefault = u.protocol === "http:" && u.port === "80";
166
+ const isHttpsDefault = u.protocol === "https:" && u.port === "443";
167
+ if (isHttpDefault || isHttpsDefault) u.port = "";
168
+ return `${u.protocol}//${u.host}`;
169
+ }
154
170
  function defaultMicroCacheKeyFn(req) {
155
171
  return `GET ${normalizeUrlForKey(req.url)}`;
156
172
  }
@@ -163,16 +179,46 @@ function sleep(ms) {
163
179
  function clamp(n, lo, hi) {
164
180
  return Math.max(lo, Math.min(hi, n));
165
181
  }
182
+ function jitterMs(ms) {
183
+ const mult = 0.8 + Math.random() * 0.4;
184
+ return Math.round(ms * mult);
185
+ }
186
+ var SOFT_FAIL_STATUSES = /* @__PURE__ */ new Set([429, 502, 503, 504]);
187
+ function classifyHttpStatus(status) {
188
+ if (status >= 200 && status < 300) return "SUCCESS";
189
+ if (SOFT_FAIL_STATUSES.has(status)) return "SOFT_FAIL";
190
+ return "SUCCESS";
191
+ }
192
+ function computeRates(window) {
193
+ const total = window.length;
194
+ let hard = 0;
195
+ let soft = 0;
196
+ for (const o of window) {
197
+ if (o === "HARD_FAIL") hard += 1;
198
+ else if (o === "SOFT_FAIL") soft += 1;
199
+ }
200
+ return {
201
+ total,
202
+ hard,
203
+ soft,
204
+ hardFailRate: total === 0 ? 0 : hard / total,
205
+ failRate: total === 0 ? 0 : (hard + soft) / total
206
+ };
207
+ }
208
+ function shouldCountAsHardFail(err) {
209
+ if (err instanceof UpstreamUnhealthyError) return false;
210
+ if (err instanceof HalfOpenRejectedError) return false;
211
+ if (err instanceof QueueFullError) return false;
212
+ if (err instanceof RequestTimeoutError) return true;
213
+ if (err instanceof ResilientHttpError) return false;
214
+ return true;
215
+ }
166
216
  var ResilientHttpClient = class extends EventEmitter {
167
217
  constructor(opts) {
168
218
  super();
169
219
  this.opts = opts;
170
- this.limiter = new ConcurrencyLimiter({
171
- maxInFlight: opts.maxInFlight,
172
- maxQueue: opts.maxQueue,
173
- enqueueTimeoutMs: opts.enqueueTimeoutMs
174
- });
175
220
  this.requestTimeoutMs = opts.requestTimeoutMs;
221
+ this.healthEnabled = opts.health?.enabled ?? true;
176
222
  const mc = opts.microCache;
177
223
  if (mc?.enabled) {
178
224
  const retry = mc.retry ? {
@@ -195,8 +241,10 @@ var ResilientHttpClient = class extends EventEmitter {
195
241
  this.inFlight = /* @__PURE__ */ new Map();
196
242
  }
197
243
  }
198
- limiter;
199
244
  requestTimeoutMs;
245
+ healthEnabled;
246
+ limiters = /* @__PURE__ */ new Map();
247
+ health = /* @__PURE__ */ new Map();
200
248
  microCache;
201
249
  cache;
202
250
  inFlight;
@@ -206,14 +254,188 @@ var ResilientHttpClient = class extends EventEmitter {
206
254
  if (this.microCache?.enabled && req.method === "GET" && req.body == null) {
207
255
  return this.requestWithMicroCache(req);
208
256
  }
209
- return this.existingPipeline(req);
257
+ return this.execute(req, { allowProbe: false });
258
+ }
259
+ snapshot() {
260
+ let inFlight = 0;
261
+ let queueDepth = 0;
262
+ for (const l of this.limiters.values()) {
263
+ const s = l.snapshot();
264
+ inFlight += s.inFlight;
265
+ queueDepth += s.queueDepth;
266
+ }
267
+ return { inFlight, queueDepth };
268
+ }
269
+ /* ---------------- internals ---------------- */
270
+ getLimiter(baseKey) {
271
+ let l = this.limiters.get(baseKey);
272
+ if (!l) {
273
+ l = new ConcurrencyLimiter({
274
+ maxInFlight: this.opts.maxInFlight,
275
+ maxQueue: this.opts.maxInFlight * 10
276
+ // hidden factor
277
+ });
278
+ this.limiters.set(baseKey, l);
279
+ }
280
+ return l;
281
+ }
282
+ getHealth(baseKey) {
283
+ let h = this.health.get(baseKey);
284
+ if (!h) {
285
+ h = {
286
+ state: "OPEN",
287
+ window: [],
288
+ windowSize: 20,
289
+ minSamples: 10,
290
+ consecutiveHardFails: 0,
291
+ cooldownBaseMs: 1e3,
292
+ cooldownCapMs: 3e4,
293
+ cooldownMs: 1e3,
294
+ cooldownUntil: 0,
295
+ probeInFlight: false,
296
+ probeRemaining: 0,
297
+ stableNonHard: 0
298
+ };
299
+ this.health.set(baseKey, h);
300
+ }
301
+ return h;
210
302
  }
303
+ closeHealth(baseKey, reason) {
304
+ const h = this.getHealth(baseKey);
305
+ if (h.state === "CLOSED") return;
306
+ h.state = "CLOSED";
307
+ h.cooldownUntil = Date.now() + jitterMs(h.cooldownMs);
308
+ h.cooldownMs = Math.min(h.cooldownMs * 2, h.cooldownCapMs);
309
+ this.getLimiter(baseKey).flush(new UpstreamUnhealthyError(baseKey, "CLOSED"));
310
+ const rates = computeRates(h.window);
311
+ this.emit("health:closed", {
312
+ baseUrl: baseKey,
313
+ reason,
314
+ cooldownMs: h.cooldownUntil - Date.now(),
315
+ hardFailRate: rates.hardFailRate,
316
+ failRate: rates.failRate,
317
+ samples: rates.total
318
+ });
319
+ }
320
+ halfOpenHealth(baseKey) {
321
+ const h = this.getHealth(baseKey);
322
+ if (h.state !== "CLOSED") return;
323
+ h.state = "HALF_OPEN";
324
+ h.probeInFlight = false;
325
+ h.probeRemaining = 1;
326
+ this.emit("health:half_open", { baseUrl: baseKey });
327
+ }
328
+ openHealth(baseKey) {
329
+ const h = this.getHealth(baseKey);
330
+ h.state = "OPEN";
331
+ h.window = [];
332
+ h.consecutiveHardFails = 0;
333
+ h.probeInFlight = false;
334
+ h.probeRemaining = 0;
335
+ h.stableNonHard = 0;
336
+ this.emit("health:open", { baseUrl: baseKey });
337
+ }
338
+ recordOutcome(baseKey, outcome) {
339
+ const h = this.getHealth(baseKey);
340
+ h.window.push(outcome);
341
+ while (h.window.length > h.windowSize) h.window.shift();
342
+ if (outcome === "HARD_FAIL") h.consecutiveHardFails += 1;
343
+ else h.consecutiveHardFails = 0;
344
+ if (h.state === "OPEN") {
345
+ if (outcome !== "HARD_FAIL") {
346
+ h.stableNonHard += 1;
347
+ if (h.stableNonHard >= 5) {
348
+ h.cooldownMs = h.cooldownBaseMs;
349
+ }
350
+ } else {
351
+ h.stableNonHard = 0;
352
+ }
353
+ }
354
+ if (!this.healthEnabled) return;
355
+ if (h.consecutiveHardFails >= 3) {
356
+ this.closeHealth(baseKey, "3 consecutive hard failures");
357
+ return;
358
+ }
359
+ const rates = computeRates(h.window);
360
+ if (rates.total >= h.minSamples) {
361
+ if (rates.hardFailRate >= 0.3) {
362
+ this.closeHealth(baseKey, "hardFailRate >= 30%");
363
+ return;
364
+ }
365
+ if (rates.failRate >= 0.5) {
366
+ this.closeHealth(baseKey, "failRate >= 50%");
367
+ return;
368
+ }
369
+ }
370
+ }
371
+ async execute(req, opts) {
372
+ const baseKey = baseUrlKey(req.url);
373
+ const h = this.getHealth(baseKey);
374
+ const limiter = this.getLimiter(baseKey);
375
+ if (this.healthEnabled) {
376
+ if (h.state === "CLOSED") {
377
+ if (Date.now() >= h.cooldownUntil) {
378
+ this.halfOpenHealth(baseKey);
379
+ } else {
380
+ throw new UpstreamUnhealthyError(baseKey, "CLOSED");
381
+ }
382
+ }
383
+ if (h.state === "HALF_OPEN") {
384
+ if (!opts.allowProbe) throw new HalfOpenRejectedError(baseKey);
385
+ if (h.probeRemaining <= 0 || h.probeInFlight) throw new HalfOpenRejectedError(baseKey);
386
+ h.probeInFlight = true;
387
+ h.probeRemaining -= 1;
388
+ }
389
+ }
390
+ const requestId = genRequestId();
391
+ const start = Date.now();
392
+ try {
393
+ if (this.healthEnabled && h.state === "HALF_OPEN") {
394
+ await limiter.acquireNoQueue();
395
+ } else {
396
+ await limiter.acquire();
397
+ }
398
+ } catch (err) {
399
+ this.emit("request:rejected", { requestId, request: req, error: err });
400
+ throw err;
401
+ }
402
+ this.emit("request:start", { requestId, request: req });
403
+ try {
404
+ const res = await doHttpRequest(req, this.requestTimeoutMs);
405
+ const durationMs = Date.now() - start;
406
+ const outcome = classifyHttpStatus(res.status);
407
+ this.recordOutcome(baseKey, outcome);
408
+ if (this.healthEnabled && h.state === "HALF_OPEN") {
409
+ this.emit("health:probe", { baseUrl: baseKey, outcome, status: res.status });
410
+ if (res.status >= 200 && res.status < 300) {
411
+ this.openHealth(baseKey);
412
+ } else {
413
+ this.closeHealth(baseKey, `probe failed status=${res.status}`);
414
+ }
415
+ }
416
+ this.emit("request:success", { requestId, request: req, status: res.status, durationMs });
417
+ return res;
418
+ } catch (err) {
419
+ const durationMs = Date.now() - start;
420
+ if (shouldCountAsHardFail(err)) {
421
+ this.recordOutcome(baseKey, "HARD_FAIL");
422
+ }
423
+ if (this.healthEnabled && h.state === "HALF_OPEN") {
424
+ this.emit("health:probe", { baseUrl: baseKey, outcome: "HARD_FAIL", error: err });
425
+ this.closeHealth(baseKey, "probe hard failure");
426
+ }
427
+ this.emit("request:failure", { requestId, request: req, error: err, durationMs });
428
+ throw err;
429
+ } finally {
430
+ if (this.healthEnabled && h.state === "HALF_OPEN") {
431
+ h.probeInFlight = false;
432
+ }
433
+ limiter.release();
434
+ }
435
+ }
436
+ /* ---------------- microcache ---------------- */
211
437
  cloneResponse(res) {
212
- return {
213
- status: res.status,
214
- headers: { ...res.headers },
215
- body: new Uint8Array(res.body)
216
- };
438
+ return { status: res.status, headers: { ...res.headers }, body: new Uint8Array(res.body) };
217
439
  }
218
440
  maybeCleanupExpired(cache, maxStaleMs) {
219
441
  this.microCacheReqCount++;
@@ -242,21 +464,15 @@ var ResilientHttpClient = class extends EventEmitter {
242
464
  async fetchWithLeaderRetry(req) {
243
465
  const mc = this.microCache;
244
466
  const retry = mc.retry;
245
- if (!retry) return this.existingPipeline(req);
467
+ if (!retry) return this.execute(req, { allowProbe: false });
246
468
  const { maxAttempts, baseDelayMs, maxDelayMs, retryOnStatus } = retry;
247
469
  let last;
248
470
  for (let attempt = 1; attempt <= maxAttempts; attempt++) {
249
- const res = await this.existingPipeline(req);
471
+ const res = await this.execute(req, { allowProbe: false });
250
472
  last = res;
251
473
  if (this.isRetryableStatus(res.status, retryOnStatus) && attempt < maxAttempts) {
252
474
  const delay = this.computeBackoffMs(attempt, baseDelayMs, maxDelayMs);
253
- this.emit("microcache:retry", {
254
- url: req.url,
255
- attempt,
256
- maxAttempts,
257
- reason: `status ${res.status}`,
258
- delayMs: delay
259
- });
475
+ this.emit("microcache:retry", { url: req.url, attempt, maxAttempts, reason: `status ${res.status}`, delayMs: delay });
260
476
  await sleep(delay);
261
477
  continue;
262
478
  }
@@ -264,16 +480,6 @@ var ResilientHttpClient = class extends EventEmitter {
264
480
  }
265
481
  return last;
266
482
  }
267
- /**
268
- * Window behavior:
269
- * - 0..ttlMs: return cache (fresh)
270
- * - ttlMs..maxStaleMs: leader refreshes; others get old value until replaced (stale-while-revalidate)
271
- * - >maxStaleMs: do not serve old; behave like no cache
272
- *
273
- * Follower controls (only when no cache is served):
274
- * - maxWaiters: cap concurrent followers joining the leader
275
- * - followerTimeoutMs: shared "join window" from first follower; after it expires, late followers fail fast until leader completes
276
- */
277
483
  async requestWithMicroCache(req) {
278
484
  const mc = this.microCache;
279
485
  const cache = this.cache;
@@ -282,20 +488,23 @@ var ResilientHttpClient = class extends EventEmitter {
282
488
  const key = mc.keyFn(req);
283
489
  const now = Date.now();
284
490
  const hit0 = cache.get(key);
285
- if (hit0 && now - hit0.createdAt > mc.maxStaleMs) {
286
- cache.delete(key);
287
- }
491
+ if (hit0 && now - hit0.createdAt > mc.maxStaleMs) cache.delete(key);
288
492
  const hit = cache.get(key);
289
- if (hit && now < hit.expiresAt) {
290
- return this.cloneResponse(hit.value);
493
+ if (hit && now < hit.expiresAt) return this.cloneResponse(hit.value);
494
+ if (this.healthEnabled) {
495
+ const baseKey = baseUrlKey(req.url);
496
+ const h = this.getHealth(baseKey);
497
+ if (h.state === "CLOSED") {
498
+ const staleAllowed = !!hit && now - hit.createdAt <= mc.maxStaleMs;
499
+ if (staleAllowed) return this.cloneResponse(hit.value);
500
+ throw new UpstreamUnhealthyError(baseKey, "CLOSED");
501
+ }
291
502
  }
292
503
  const group = inFlight.get(key);
293
504
  if (group) {
294
505
  const h = cache.get(key);
295
506
  const staleAllowed = !!h && now - h.createdAt <= mc.maxStaleMs;
296
- if (h && staleAllowed) {
297
- return this.cloneResponse(h.value);
298
- }
507
+ if (h && staleAllowed) return this.cloneResponse(h.value);
299
508
  const age = now - group.windowStartMs;
300
509
  if (age > mc.followerTimeoutMs) {
301
510
  const err = new Error(`Follower window closed for key=${key}`);
@@ -318,24 +527,18 @@ var ResilientHttpClient = class extends EventEmitter {
318
527
  const prev = cache.get(key);
319
528
  const prevStaleAllowed = !!prev && now - prev.createdAt <= mc.maxStaleMs;
320
529
  const promise = (async () => {
321
- const res = await this.fetchWithLeaderRetry(req);
530
+ const baseKey = baseUrlKey(req.url);
531
+ const h = this.getHealth(baseKey);
532
+ const allowProbe = this.healthEnabled && h.state === "HALF_OPEN";
533
+ const res = allowProbe ? await this.execute(req, { allowProbe: true }) : await this.fetchWithLeaderRetry(req);
322
534
  if (res.status >= 200 && res.status < 300) {
323
535
  this.evictIfNeeded(cache, mc.maxEntries);
324
536
  const t = Date.now();
325
- cache.set(key, {
326
- value: this.cloneResponse(res),
327
- createdAt: t,
328
- expiresAt: t + mc.ttlMs
329
- });
537
+ cache.set(key, { value: this.cloneResponse(res), createdAt: t, expiresAt: t + mc.ttlMs });
330
538
  }
331
539
  return res;
332
540
  })();
333
- const newGroup = {
334
- promise,
335
- windowStartMs: Date.now(),
336
- waiters: 0
337
- };
338
- inFlight.set(key, newGroup);
541
+ inFlight.set(key, { promise, windowStartMs: Date.now(), waiters: 0 });
339
542
  try {
340
543
  const res = await promise;
341
544
  if (!(res.status >= 200 && res.status < 300) && prev && prevStaleAllowed) {
@@ -352,40 +555,15 @@ var ResilientHttpClient = class extends EventEmitter {
352
555
  inFlight.delete(key);
353
556
  }
354
557
  }
355
- async existingPipeline(req) {
356
- const requestId = genRequestId();
357
- try {
358
- await this.limiter.acquire();
359
- } catch (err) {
360
- this.emit("request:rejected", { requestId, request: req, error: err });
361
- throw err;
362
- }
363
- const start = Date.now();
364
- this.emit("request:start", { requestId, request: req });
365
- try {
366
- const res = await doHttpRequest(req, this.requestTimeoutMs);
367
- const durationMs = Date.now() - start;
368
- this.emit("request:success", { requestId, request: req, status: res.status, durationMs });
369
- return res;
370
- } catch (err) {
371
- const durationMs = Date.now() - start;
372
- this.emit("request:failure", { requestId, request: req, error: err, durationMs });
373
- throw err;
374
- } finally {
375
- this.limiter.release();
376
- }
377
- }
378
- snapshot() {
379
- const s = this.limiter.snapshot();
380
- return { inFlight: s.inFlight, queueDepth: s.queueDepth };
381
- }
382
558
  };
383
559
  export {
560
+ HalfOpenRejectedError,
384
561
  QueueFullError,
385
562
  QueueTimeoutError,
386
563
  RequestTimeoutError,
387
564
  ResilientHttpClient,
388
565
  ResilientHttpError,
389
- UpstreamError
566
+ UpstreamError,
567
+ UpstreamUnhealthyError
390
568
  };
391
569
  //# sourceMappingURL=index.js.map