brass-runtime 1.13.8 → 1.15.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +6 -3
- package/dist/agent/cli/main.cjs +44 -43
- package/dist/agent/cli/main.js +5 -4
- package/dist/agent/cli/main.mjs +5 -4
- package/dist/agent/index.cjs +4 -3
- package/dist/agent/index.d.ts +1 -1
- package/dist/agent/index.js +3 -2
- package/dist/agent/index.mjs +3 -2
- package/dist/{chunk-3R7ZYRK2.mjs → chunk-3QMOKAS5.js} +9 -7
- package/dist/{chunk-ATHSSDUF.js → chunk-4NHES7VK.mjs} +113 -31
- package/dist/chunk-AR22SXML.js +1043 -0
- package/dist/chunk-BDF4AMWX.mjs +3773 -0
- package/dist/chunk-BDYEENHT.js +224 -0
- package/dist/chunk-BMH5AV44.js +3773 -0
- package/dist/chunk-ELOOF35R.mjs +131 -0
- package/dist/chunk-JFPU5GQI.mjs +1043 -0
- package/dist/{chunk-INZBKOHY.js → chunk-K6M7MDZ4.mjs} +9 -7
- package/dist/chunk-MS34J5LY.cjs +224 -0
- package/dist/{chunk-XNOTJSMZ.mjs → chunk-PPUXIH5R.js} +113 -31
- package/dist/chunk-R3R2FVLG.cjs +131 -0
- package/dist/{chunk-ZTDK2DLG.cjs → chunk-STVLQ3XD.cjs} +169 -87
- package/dist/chunk-TGIFUAK4.cjs +3773 -0
- package/dist/chunk-TO7IKXYT.js +131 -0
- package/dist/chunk-UMAZLXAB.mjs +224 -0
- package/dist/{chunk-XDINDYNA.cjs → chunk-VEZNF5GZ.cjs} +136 -134
- package/dist/chunk-XPZNXSVN.cjs +1043 -0
- package/dist/core/index.cjs +216 -0
- package/dist/core/index.d.ts +673 -0
- package/dist/core/index.js +216 -0
- package/dist/core/index.mjs +216 -0
- package/dist/{effect-ISvXPLgc.d.ts → effect-CMOQKX8y.d.ts} +202 -31
- package/dist/http/index.cjs +3177 -187
- package/dist/http/index.d.ts +1692 -9
- package/dist/http/index.js +3164 -174
- package/dist/http/index.mjs +3164 -174
- package/dist/index.cjs +936 -219
- package/dist/index.d.ts +313 -36
- package/dist/index.js +830 -113
- package/dist/index.mjs +830 -113
- package/dist/{stream-BvukHxCv.d.ts → stream-FQm9h4Mg.d.ts} +12 -4
- package/dist/tracing-DNT9jEbr.d.ts +106 -0
- package/package.json +11 -3
- package/wasm/pkg/brass_runtime_wasm_engine.d.ts +95 -16
- package/wasm/pkg/brass_runtime_wasm_engine.js +715 -15
- package/wasm/pkg/brass_runtime_wasm_engine_bg.wasm +0 -0
- package/wasm/pkg/brass_runtime_wasm_engine_bg.wasm.d.ts +78 -7
- package/dist/chunk-2P4PD6D7.cjs +0 -2557
- package/dist/chunk-7F2R7A2V.mjs +0 -2557
- package/dist/chunk-L6KKKM66.js +0 -2557
package/dist/http/index.d.ts
CHANGED
|
@@ -1,14 +1,148 @@
|
|
|
1
|
-
import { A as Async,
|
|
2
|
-
import { Z as ZStream } from '../stream-
|
|
1
|
+
import { A as Async, t as AsyncWithPromise } from '../effect-CMOQKX8y.js';
|
|
2
|
+
import { Z as ZStream } from '../stream-FQm9h4Mg.js';
|
|
3
|
+
import { a as CircuitBreakerConfig, T as Tracer } from '../tracing-DNT9jEbr.js';
|
|
3
4
|
|
|
5
|
+
/**
|
|
6
|
+
* Observable event emitted on each retry attempt via the `onRetry` callback.
|
|
7
|
+
*/
|
|
8
|
+
type RetryEvent = {
|
|
9
|
+
/** Zero-based attempt number (0 = first retry, not the initial request). */
|
|
10
|
+
attempt: number;
|
|
11
|
+
/** Computed delay in milliseconds before the next attempt. */
|
|
12
|
+
delayMs: number;
|
|
13
|
+
/** The error that triggered this retry, if the request failed with an HttpError. */
|
|
14
|
+
error?: HttpError;
|
|
15
|
+
/** The HTTP status code that triggered this retry, if the request returned a retryable status. */
|
|
16
|
+
status?: number;
|
|
17
|
+
/** The request URL. */
|
|
18
|
+
url: string;
|
|
19
|
+
/** The request HTTP method. */
|
|
20
|
+
method: HttpMethod;
|
|
21
|
+
/** Timestamp (ms since epoch) when the retry decision was made. */
|
|
22
|
+
timestamp: number;
|
|
23
|
+
};
|
|
24
|
+
/**
|
|
25
|
+
* Per-request retry override. Attached as `(req as any).retry`.
|
|
26
|
+
* - `false` disables retry entirely for this request.
|
|
27
|
+
* - A partial policy object merges with the middleware-level policy (per-request wins).
|
|
28
|
+
*/
|
|
29
|
+
type PerRequestRetryOverride = false | {
|
|
30
|
+
maxRetries?: number;
|
|
31
|
+
baseDelayMs?: number;
|
|
32
|
+
maxDelayMs?: number;
|
|
33
|
+
retryOnStatus?: (status: number) => boolean;
|
|
34
|
+
};
|
|
4
35
|
type RetryPolicy = {
|
|
5
36
|
maxRetries: number;
|
|
6
37
|
baseDelayMs: number;
|
|
7
38
|
maxDelayMs: number;
|
|
39
|
+
/** Optional total retry budget, including request attempts and sleeps. */
|
|
40
|
+
maxElapsedMs?: number;
|
|
41
|
+
/** Defaults to true. When true, Retry-After is honored but capped by maxDelayMs/budget. */
|
|
42
|
+
respectRetryAfter?: boolean;
|
|
8
43
|
retryOnMethods?: HttpMethod[];
|
|
9
44
|
retryOnStatus?: (status: number) => boolean;
|
|
10
45
|
retryOnError?: (e: HttpError) => boolean;
|
|
46
|
+
/** Strict engine selector for retry planning. Defaults to ts. */
|
|
47
|
+
engine?: "ts" | "wasm";
|
|
48
|
+
/** Back-compat knob: wasm=true maps to engine="wasm", wasm=false maps to engine="ts". */
|
|
49
|
+
wasm?: boolean;
|
|
50
|
+
/** Called synchronously before each retry delay begins. Zero overhead when omitted. */
|
|
51
|
+
onRetry?: (event: RetryEvent) => void;
|
|
52
|
+
};
|
|
53
|
+
declare const defaultRetryableMethods: HttpMethod[];
|
|
54
|
+
declare const defaultRetryOnStatus: (s: number) => s is 408 | 429 | 500 | 502 | 503 | 504;
|
|
55
|
+
declare const defaultRetryOnError: (e: HttpError) => e is {
|
|
56
|
+
_tag: "FetchError";
|
|
57
|
+
message: string;
|
|
58
|
+
} | {
|
|
59
|
+
_tag: "Timeout";
|
|
60
|
+
timeoutMs: number;
|
|
61
|
+
message: string;
|
|
62
|
+
phase?: "request" | "queue" | "retry";
|
|
63
|
+
} | {
|
|
64
|
+
_tag: "PoolTimeout";
|
|
65
|
+
key: string;
|
|
66
|
+
timeoutMs: number;
|
|
67
|
+
message: string;
|
|
11
68
|
};
|
|
69
|
+
declare const backoffDelayMs: (attempt: number, base: number, cap: number) => number;
|
|
70
|
+
declare const retryAfterMs: (headers: Record<string, string>) => number | undefined;
|
|
71
|
+
declare const normalizeRetryBudget: (ms: number | undefined) => number | undefined;
|
|
72
|
+
declare const withRetry: (p: RetryPolicy) => HttpMiddleware;
|
|
73
|
+
|
|
74
|
+
type HttpPoolKeyResolver = "global" | "origin" | "host" | ((req: HttpRequest, url: URL) => string);
|
|
75
|
+
type HttpPoolConfig = {
|
|
76
|
+
/** Max concurrent downstream calls per resolved key. */
|
|
77
|
+
readonly concurrency?: number;
|
|
78
|
+
/** Max queued waiters per key. `0` means fail fast when the pool is full. */
|
|
79
|
+
readonly maxQueue?: number;
|
|
80
|
+
/** Max time a request may wait for a pool slot before failing fast. */
|
|
81
|
+
readonly queueTimeoutMs?: number;
|
|
82
|
+
/** How to isolate pools. Default: `origin`; useful values: `global`, `host`, `origin`. */
|
|
83
|
+
readonly key?: HttpPoolKeyResolver;
|
|
84
|
+
/**
|
|
85
|
+
* Strict engine selector for permit governance. Defaults to ts.
|
|
86
|
+
* - ts: TypeScript permit pool.
|
|
87
|
+
* - wasm: require BrassWasmHttpPermitPool from wasm/pkg; never falls back.
|
|
88
|
+
*/
|
|
89
|
+
readonly engine?: "ts" | "wasm";
|
|
90
|
+
/** Back-compat knob: wasm=true maps to engine="wasm", wasm=false maps to engine="ts". */
|
|
91
|
+
readonly wasm?: boolean;
|
|
92
|
+
};
|
|
93
|
+
type HttpPoolKeyStats = {
|
|
94
|
+
readonly key: string;
|
|
95
|
+
readonly running: number;
|
|
96
|
+
readonly queued: number;
|
|
97
|
+
readonly concurrency: number;
|
|
98
|
+
readonly maxQueue: number;
|
|
99
|
+
readonly acquired: number;
|
|
100
|
+
readonly released: number;
|
|
101
|
+
readonly rejected: number;
|
|
102
|
+
readonly queueTimeouts: number;
|
|
103
|
+
readonly abortedWhileQueued: number;
|
|
104
|
+
};
|
|
105
|
+
type HttpPoolStats = {
|
|
106
|
+
readonly running: number;
|
|
107
|
+
readonly queued: number;
|
|
108
|
+
readonly acquired: number;
|
|
109
|
+
readonly released: number;
|
|
110
|
+
readonly rejected: number;
|
|
111
|
+
readonly queueTimeouts: number;
|
|
112
|
+
readonly abortedWhileQueued: number;
|
|
113
|
+
readonly wasm?: unknown;
|
|
114
|
+
readonly keys: HttpPoolKeyStats[];
|
|
115
|
+
};
|
|
116
|
+
type HttpPoolLease = {
|
|
117
|
+
readonly key: string;
|
|
118
|
+
release: () => void;
|
|
119
|
+
};
|
|
120
|
+
declare function resolveHttpPoolKey(resolver: HttpPoolKeyResolver | undefined, req: HttpRequest, url: URL): string;
|
|
121
|
+
declare class HttpConcurrencyPool {
|
|
122
|
+
private readonly states;
|
|
123
|
+
private readonly concurrency;
|
|
124
|
+
private readonly maxQueue;
|
|
125
|
+
private readonly queueTimeoutMs;
|
|
126
|
+
readonly keyResolver: HttpPoolKeyResolver | undefined;
|
|
127
|
+
private readonly wasm;
|
|
128
|
+
private readonly wasmWaiters;
|
|
129
|
+
private wasmTimer;
|
|
130
|
+
private nextSubjectId;
|
|
131
|
+
constructor(config?: HttpPoolConfig);
|
|
132
|
+
acquire(key: string, signal: AbortSignal): Promise<HttpPoolLease>;
|
|
133
|
+
stats(): HttpPoolStats;
|
|
134
|
+
private acquireJs;
|
|
135
|
+
private acquireWasm;
|
|
136
|
+
private getState;
|
|
137
|
+
private makeLease;
|
|
138
|
+
private drain;
|
|
139
|
+
private handleWasmGrants;
|
|
140
|
+
private handleWasmTimeouts;
|
|
141
|
+
private scheduleWasmTimeoutPump;
|
|
142
|
+
private cleanupWaiter;
|
|
143
|
+
private removeWaiter;
|
|
144
|
+
private allocateSubjectId;
|
|
145
|
+
}
|
|
12
146
|
|
|
13
147
|
type HttpError = {
|
|
14
148
|
_tag: "Abort";
|
|
@@ -18,15 +152,35 @@ type HttpError = {
|
|
|
18
152
|
} | {
|
|
19
153
|
_tag: "FetchError";
|
|
20
154
|
message: string;
|
|
155
|
+
} | {
|
|
156
|
+
_tag: "Timeout";
|
|
157
|
+
timeoutMs: number;
|
|
158
|
+
message: string;
|
|
159
|
+
phase?: "request" | "queue" | "retry";
|
|
160
|
+
} | {
|
|
161
|
+
_tag: "PoolRejected";
|
|
162
|
+
key: string;
|
|
163
|
+
limit: number;
|
|
164
|
+
message: string;
|
|
165
|
+
} | {
|
|
166
|
+
_tag: "PoolTimeout";
|
|
167
|
+
key: string;
|
|
168
|
+
timeoutMs: number;
|
|
169
|
+
message: string;
|
|
21
170
|
};
|
|
22
171
|
type HttpMethod = "GET" | "POST" | "PUT" | "PATCH" | "DELETE" | "HEAD" | "OPTIONS";
|
|
23
172
|
type HttpInit = Omit<RequestInit, "method" | "body" | "headers">;
|
|
173
|
+
type HttpBody = string | Uint8Array | ArrayBuffer;
|
|
24
174
|
type HttpRequest = {
|
|
25
175
|
method: HttpMethod;
|
|
26
176
|
url: string;
|
|
27
177
|
headers?: Record<string, string>;
|
|
28
|
-
body?:
|
|
178
|
+
body?: HttpBody;
|
|
29
179
|
init?: HttpInit;
|
|
180
|
+
/** Per-request override for `MakeHttpConfig.timeoutMs`. */
|
|
181
|
+
timeoutMs?: number;
|
|
182
|
+
/** Optional stable key for downstream isolation. When omitted, the pool uses origin/host/global config. */
|
|
183
|
+
poolKey?: string;
|
|
30
184
|
};
|
|
31
185
|
type HttpWireResponse = {
|
|
32
186
|
status: number;
|
|
@@ -35,9 +189,25 @@ type HttpWireResponse = {
|
|
|
35
189
|
bodyText: string;
|
|
36
190
|
ms: number;
|
|
37
191
|
};
|
|
192
|
+
type HttpClientStats = {
|
|
193
|
+
readonly inFlight: number;
|
|
194
|
+
readonly started: number;
|
|
195
|
+
readonly succeeded: number;
|
|
196
|
+
readonly failed: number;
|
|
197
|
+
readonly aborted: number;
|
|
198
|
+
readonly timedOut: number;
|
|
199
|
+
readonly poolRejected: number;
|
|
200
|
+
readonly poolTimeouts: number;
|
|
201
|
+
readonly lastDurationMs?: number;
|
|
202
|
+
readonly pool?: HttpPoolStats;
|
|
203
|
+
};
|
|
38
204
|
type MakeHttpConfig = {
|
|
39
205
|
baseUrl?: string;
|
|
40
206
|
headers?: Record<string, string>;
|
|
207
|
+
/** Request budget covering pool wait + fetch + body read. Disabled when omitted. */
|
|
208
|
+
timeoutMs?: number;
|
|
209
|
+
/** Downstream pool/concurrency limiter. Disabled by default to preserve existing behavior. */
|
|
210
|
+
pool?: false | HttpPoolConfig;
|
|
41
211
|
};
|
|
42
212
|
type HttpWireResponseStream = {
|
|
43
213
|
status: number;
|
|
@@ -46,20 +216,28 @@ type HttpWireResponseStream = {
|
|
|
46
216
|
body: ZStream<unknown, HttpError, Uint8Array>;
|
|
47
217
|
ms: number;
|
|
48
218
|
};
|
|
49
|
-
type
|
|
219
|
+
type HttpClientStreamFn = (req: HttpRequest) => Async<unknown, HttpError, HttpWireResponseStream>;
|
|
220
|
+
type HttpClientStream = HttpClientStreamFn & {
|
|
221
|
+
stats: () => HttpClientStats;
|
|
222
|
+
};
|
|
223
|
+
type HttpClientFn = (req: HttpRequest) => Async<unknown, HttpError, HttpWireResponse>;
|
|
224
|
+
type HttpMiddleware = (next: HttpClientFn) => HttpClientFn;
|
|
50
225
|
type HttpClient = HttpClientFn & {
|
|
51
226
|
with: (mw: HttpMiddleware) => HttpClient;
|
|
227
|
+
stats: () => HttpClientStats;
|
|
52
228
|
};
|
|
229
|
+
declare const decorate: (run: HttpClientFn, stats?: () => HttpClientStats) => HttpClient;
|
|
53
230
|
declare const withMiddleware: (mw: HttpMiddleware) => (c: HttpClient) => HttpClient;
|
|
54
|
-
declare const decorate: (run: HttpClientFn) => HttpClient;
|
|
55
|
-
type HttpClientFn = (req: HttpRequest) => Async<unknown, HttpError, HttpWireResponse>;
|
|
56
|
-
type HttpMiddleware = (next: HttpClientFn) => HttpClientFn;
|
|
57
231
|
declare const normalizeHeadersInit: (h: any) => Record<string, string> | undefined;
|
|
58
232
|
declare function makeHttpStream(cfg?: MakeHttpConfig): HttpClientStream;
|
|
59
233
|
declare function makeHttp(cfg?: MakeHttpConfig): HttpClient;
|
|
60
234
|
declare const withRetryStream: (p: RetryPolicy) => (next: HttpClientStream) => HttpClientStream;
|
|
61
235
|
|
|
62
|
-
type InitNoMethodBody = Omit<RequestInit, "method" | "body"
|
|
236
|
+
type InitNoMethodBody = Omit<RequestInit, "method" | "body"> & {
|
|
237
|
+
timeoutMs?: number;
|
|
238
|
+
poolKey?: string;
|
|
239
|
+
headers?: any;
|
|
240
|
+
};
|
|
63
241
|
type HttpMeta = {
|
|
64
242
|
request: HttpRequest;
|
|
65
243
|
urlFinal: string;
|
|
@@ -83,6 +261,8 @@ type HttpResponseWithMeta<A> = {
|
|
|
83
261
|
};
|
|
84
262
|
type AnyInitWithHeaders = {
|
|
85
263
|
headers?: any;
|
|
264
|
+
timeoutMs?: number;
|
|
265
|
+
poolKey?: string;
|
|
86
266
|
} & Record<string, any>;
|
|
87
267
|
type Dx = {
|
|
88
268
|
request: (req: HttpRequest) => AsyncWithPromise<unknown, HttpError, HttpWireResponse>;
|
|
@@ -94,6 +274,7 @@ type Dx = {
|
|
|
94
274
|
with: (mw: HttpMiddleware) => Dx;
|
|
95
275
|
withRetry: (p: RetryPolicy) => Dx;
|
|
96
276
|
wire: HttpClient;
|
|
277
|
+
stats: () => ReturnType<HttpClient["stats"]>;
|
|
97
278
|
};
|
|
98
279
|
declare function httpClient(cfg?: MakeHttpConfig): Dx;
|
|
99
280
|
declare function httpClientWithMeta(cfg?: MakeHttpConfig): {
|
|
@@ -149,6 +330,1508 @@ declare function httpClientStream(cfg?: MakeHttpConfig): {
|
|
|
149
330
|
with: (mw: (n: HttpClientStream) => HttpClientStream) => /*elided*/ any;
|
|
150
331
|
withRetry: (p: RetryPolicy) => /*elided*/ any;
|
|
151
332
|
wire: HttpClientStream;
|
|
333
|
+
stats: () => HttpClientStats;
|
|
334
|
+
};
|
|
335
|
+
|
|
336
|
+
type HttpCircuitBreakerConfig = CircuitBreakerConfig & {
|
|
337
|
+
/** Key resolver for per-origin circuit breakers. Default: per-origin. */
|
|
338
|
+
perOrigin?: boolean;
|
|
339
|
+
};
|
|
340
|
+
/**
|
|
341
|
+
* HTTP middleware that wraps requests in a circuit breaker.
|
|
342
|
+
* When the circuit opens, requests fail fast with CircuitBreakerOpen error.
|
|
343
|
+
*/
|
|
344
|
+
declare function withCircuitBreaker(config?: HttpCircuitBreakerConfig): HttpMiddleware;
|
|
345
|
+
|
|
346
|
+
/**
|
|
347
|
+
* HTTP middleware that creates a span for each request.
|
|
348
|
+
*/
|
|
349
|
+
declare function withTracing(tracer: Tracer): HttpMiddleware;
|
|
350
|
+
|
|
351
|
+
type ValidationError = {
|
|
352
|
+
_tag: "ValidationError";
|
|
353
|
+
message: string;
|
|
354
|
+
body: string;
|
|
355
|
+
schema?: string;
|
|
356
|
+
};
|
|
357
|
+
type JsonValidator<A> = (data: unknown) => {
|
|
358
|
+
success: true;
|
|
359
|
+
data: A;
|
|
360
|
+
} | {
|
|
361
|
+
success: false;
|
|
362
|
+
error: string;
|
|
363
|
+
};
|
|
364
|
+
/**
|
|
365
|
+
* Creates a validated JSON getter that checks the response body against a schema.
|
|
366
|
+
*
|
|
367
|
+
* Usage:
|
|
368
|
+
* ```ts
|
|
369
|
+
* const getUser = validatedJson<User>(client, (data) => {
|
|
370
|
+
* if (typeof data === "object" && data !== null && "id" in data) {
|
|
371
|
+
* return { success: true, data: data as User };
|
|
372
|
+
* }
|
|
373
|
+
* return { success: false, error: "Invalid user shape" };
|
|
374
|
+
* });
|
|
375
|
+
*
|
|
376
|
+
* const user = await run(getUser({ method: "GET", url: "/users/1" }));
|
|
377
|
+
* ```
|
|
378
|
+
*/
|
|
379
|
+
declare function validatedJson<A>(client: HttpClientFn, validator: JsonValidator<A>): (req: Parameters<HttpClientFn>[0]) => Async<unknown, HttpError | ValidationError, A>;
|
|
380
|
+
|
|
381
|
+
/**
|
|
382
|
+
* Configuration for the deduplication layer.
|
|
383
|
+
*
|
|
384
|
+
* When enabled, the dedup layer collapses concurrent identical requests into a single
|
|
385
|
+
* in-flight Async_Effect, sharing the response across all callers with the same Cache_Key.
|
|
386
|
+
*
|
|
387
|
+
* @property {function} [dedupKey] - Custom key function that computes a dedup key from an HttpRequest.
|
|
388
|
+
* When provided, overrides the default Cache_Key computation. Default: undefined (uses default key derivation).
|
|
389
|
+
*/
|
|
390
|
+
type DedupConfig$1 = {
|
|
391
|
+
/** Custom key function. When provided, overrides default Cache_Key computation. */
|
|
392
|
+
dedupKey?: (req: HttpRequest) => string;
|
|
393
|
+
/** Internal lifecycle observer. Public callers should prefer LifecycleClientConfig.onEvent. */
|
|
394
|
+
onEvent?: (event: {
|
|
395
|
+
type: "dedup-hit" | "dedup-miss" | "dedup-active";
|
|
396
|
+
cacheKey?: string;
|
|
397
|
+
active?: number;
|
|
398
|
+
}) => void;
|
|
399
|
+
};
|
|
400
|
+
/**
|
|
401
|
+
* Configuration for the response cache layer.
|
|
402
|
+
*
|
|
403
|
+
* Controls how responses are stored and retrieved from the in-memory LRU cache.
|
|
404
|
+
* Each cached entry is keyed by its deterministic Cache_Key.
|
|
405
|
+
*
|
|
406
|
+
* @property {number} [ttlSeconds] - Time-to-live in seconds for cached entries. Default: 60. Valid range: [1, 86400].
|
|
407
|
+
* @property {number} [maxEntries] - Maximum number of cached entries before LRU eviction. Default: 1024. Valid range: >= 1.
|
|
408
|
+
* @property {boolean} [staleWhileRevalidate] - When true, serves stale cache entries while revalidating in the background. Default: false.
|
|
409
|
+
* @property {function} [cachePolicy] - Custom cache policy function that determines cacheability and optional TTL override for a given request/response pair. Default: undefined (uses built-in policy).
|
|
410
|
+
* @property {string[]} [cacheRelevantHeaders] - Additional HTTP headers to include in Cache_Key computation beyond the defaults. Default: undefined (uses DEFAULT_CACHE_RELEVANT_HEADERS).
|
|
411
|
+
*/
|
|
412
|
+
type CacheConfig$1 = {
|
|
413
|
+
/** Time-to-live in seconds. Default: 60. Range: [1, 86400]. */
|
|
414
|
+
ttlSeconds?: number;
|
|
415
|
+
/** Maximum number of cached entries. Default: 1024. Minimum: 1. */
|
|
416
|
+
maxEntries?: number;
|
|
417
|
+
/** Enable stale-while-revalidate. Default: false. */
|
|
418
|
+
staleWhileRevalidate?: boolean;
|
|
419
|
+
/** Custom cache policy function. */
|
|
420
|
+
cachePolicy?: (req: HttpRequest, res: HttpWireResponse) => CachePolicyResult$1;
|
|
421
|
+
/** Additional headers to include in Cache_Key computation. */
|
|
422
|
+
cacheRelevantHeaders?: string[];
|
|
423
|
+
/** Cache-specific observer for stale revalidation failures. */
|
|
424
|
+
onEvent?: (event: {
|
|
425
|
+
type: string;
|
|
426
|
+
cacheKey?: string;
|
|
427
|
+
error?: any;
|
|
428
|
+
}) => void;
|
|
429
|
+
/** Internal lifecycle observer. Public callers should prefer LifecycleClientConfig.onEvent. */
|
|
430
|
+
onLifecycleEvent?: (event: {
|
|
431
|
+
type: "cache-hit" | "cache-miss" | "cache-eviction";
|
|
432
|
+
cacheKey?: string;
|
|
433
|
+
count?: number;
|
|
434
|
+
}) => void;
|
|
435
|
+
};
|
|
436
|
+
/**
|
|
437
|
+
* Result of a custom cache policy evaluation.
|
|
438
|
+
*
|
|
439
|
+
* Returned by the `cachePolicy` function in {@link CacheConfig} to control
|
|
440
|
+
* whether a response should be stored in the cache and for how long.
|
|
441
|
+
*
|
|
442
|
+
* @property {boolean} cacheable - Whether the response should be cached. Required.
|
|
443
|
+
* @property {number} [ttlSeconds] - Optional TTL override in seconds. When provided, takes precedence over the global CacheConfig ttlSeconds. Valid range: [1, 86400].
|
|
444
|
+
*/
|
|
445
|
+
type CachePolicyResult$1 = {
|
|
446
|
+
/** Whether the response should be cached. */
|
|
447
|
+
cacheable: boolean;
|
|
448
|
+
/** Optional TTL override in seconds. */
|
|
449
|
+
ttlSeconds?: number;
|
|
450
|
+
};
|
|
451
|
+
/**
|
|
452
|
+
* Configuration for the priority scheduler layer.
|
|
453
|
+
*
|
|
454
|
+
* The priority scheduler orders outgoing requests by priority level and limits
|
|
455
|
+
* concurrency to prevent overwhelming the downstream Wire_Client.
|
|
456
|
+
*
|
|
457
|
+
* @property {number} [concurrency] - Maximum concurrent requests dispatched by the priority scheduler. Default: 32. Valid range: >= 1.
|
|
458
|
+
* @property {number} [queueTimeoutMs] - Queue timeout in milliseconds for priority-queued requests. When a request waits longer than this value, it is rejected. Default: undefined (no timeout).
|
|
459
|
+
*/
|
|
460
|
+
type PriorityConfig$1 = {
|
|
461
|
+
/** Maximum concurrent requests dispatched by the priority scheduler. Default: 32. Valid range: >= 1. */
|
|
462
|
+
concurrency?: number;
|
|
463
|
+
/** Queue timeout in ms for priority-queued requests. Default: no timeout. */
|
|
464
|
+
queueTimeoutMs?: number;
|
|
465
|
+
/** Internal lifecycle observer. Public callers should prefer LifecycleClientConfig.onEvent. */
|
|
466
|
+
onEvent?: (event: {
|
|
467
|
+
type: "queue-enqueue" | "queue-dispatch";
|
|
468
|
+
priority: number;
|
|
469
|
+
}) => void;
|
|
470
|
+
};
|
|
471
|
+
/**
|
|
472
|
+
* Configuration for creating a lifecycle client.
|
|
473
|
+
*
|
|
474
|
+
* Extends MakeHttpConfig with optional lifecycle layer configurations.
|
|
475
|
+
* Each layer (dedup, cache, priority) can be configured with an options object
|
|
476
|
+
* or explicitly disabled by setting it to `false`. When omitted, the layer is disabled
|
|
477
|
+
* (zero-cost when disabled).
|
|
478
|
+
*
|
|
479
|
+
* @property {DedupConfig | false} [dedup] - Dedup layer configuration. Set to an object to enable with options, or `false` to explicitly disable. Default: undefined (disabled).
|
|
480
|
+
* @property {CacheConfig | false} [cache] - Cache layer configuration. Set to an object to enable with options, or `false` to explicitly disable. Default: undefined (disabled).
|
|
481
|
+
* @property {PriorityConfig | false} [priority] - Priority scheduler configuration. Set to an object to enable with options, or `false` to explicitly disable. Default: undefined (disabled).
|
|
482
|
+
* @property {function} [onEvent] - Optional event observer callback invoked for each {@link LifecycleEvent} during request processing. Default: undefined.
|
|
483
|
+
*/
|
|
484
|
+
type LifecycleClientConfig = MakeHttpConfig & {
|
|
485
|
+
/** Dedup layer config. Set to `false` to explicitly disable. Default: undefined (disabled). */
|
|
486
|
+
dedup?: DedupConfig$1 | false;
|
|
487
|
+
/** Cache layer config. Set to `false` to explicitly disable. Default: undefined (disabled). */
|
|
488
|
+
cache?: CacheConfig$1 | false;
|
|
489
|
+
/** Priority scheduler config. Set to `false` to explicitly disable. Default: undefined (disabled). */
|
|
490
|
+
priority?: PriorityConfig$1 | false;
|
|
491
|
+
/** Retry policy. Set to `false` to explicitly disable. Default: undefined (disabled). */
|
|
492
|
+
retry?: RetryPolicy | false;
|
|
493
|
+
/** Optional event observer for lifecycle events. */
|
|
494
|
+
onEvent?: (event: LifecycleEvent) => void;
|
|
495
|
+
};
|
|
496
|
+
/**
|
|
497
|
+
* The lifecycle client interface.
|
|
498
|
+
*
|
|
499
|
+
* A callable HTTP client function (Wire_Client wrapper) with additional lifecycle
|
|
500
|
+
* management methods. Supports middleware composition via `.with()`, statistics
|
|
501
|
+
* via `.stats()`, and bulk cancellation via `.cancelAll()`.
|
|
502
|
+
*
|
|
503
|
+
* @property {function} with - Apply middleware, returning a new LifecycleClient with the middleware applied.
|
|
504
|
+
* @property {function} stats - Return a frozen snapshot of {@link LifecycleStats}.
|
|
505
|
+
* @property {function} cancelAll - Cancel all in-flight and queued requests, returning an Async_Effect that resolves when cancellation is complete.
|
|
506
|
+
* @property {object} cache - Cache management methods for manual invalidation.
|
|
507
|
+
* @property {function} cache.invalidate - Invalidate a specific cache entry by its Cache_Key.
|
|
508
|
+
* @property {function} cache.clear - Clear all cache entries.
|
|
509
|
+
*/
|
|
510
|
+
type LifecycleClient = HttpClientFn & {
|
|
511
|
+
/** Apply middleware, returning a new LifecycleClient with the middleware applied. */
|
|
512
|
+
with: (mw: HttpMiddleware) => LifecycleClient;
|
|
513
|
+
/** Return a frozen snapshot of lifecycle statistics. */
|
|
514
|
+
stats: () => LifecycleStats;
|
|
515
|
+
/** Cancel all in-flight and queued requests. Returns an Async_Effect that resolves when complete. */
|
|
516
|
+
cancelAll: () => Async<unknown, never, void>;
|
|
517
|
+
/** Cache management methods. */
|
|
518
|
+
cache: {
|
|
519
|
+
/** Invalidate a specific cache entry by Cache_Key. */
|
|
520
|
+
invalidate: (key: string) => void;
|
|
521
|
+
/** Clear all cache entries. */
|
|
522
|
+
clear: () => void;
|
|
523
|
+
};
|
|
524
|
+
};
|
|
525
|
+
/**
|
|
526
|
+
* Lifecycle event types emitted during request processing.
|
|
527
|
+
*
|
|
528
|
+
* Each value represents a distinct point in the lifecycle pipeline:
|
|
529
|
+
* - `"request-start"` — Emitted when a request enters the lifecycle pipeline.
|
|
530
|
+
* - `"request-end"` — Emitted when a request completes (success or failure).
|
|
531
|
+
* - `"cache-hit"` — Emitted when a response is served from the cache.
|
|
532
|
+
* - `"cache-miss"` — Emitted when a request misses the cache and proceeds to the Wire_Client.
|
|
533
|
+
* - `"dedup-hit"` — Emitted when a request is collapsed into an existing in-flight Async_Effect.
|
|
534
|
+
* - `"dedup-miss"` — Emitted when a request initiates a new in-flight Async_Effect (no existing match).
|
|
535
|
+
* - `"queue-enqueue"` — Emitted when a request is enqueued in the priority scheduler.
|
|
536
|
+
* - `"queue-dispatch"` — Emitted when a queued request is dispatched to the Wire_Client.
|
|
537
|
+
* - `"retry"` — Emitted when the retry middleware schedules another attempt.
|
|
538
|
+
*/
|
|
539
|
+
type LifecycleEventType = "request-start" | "request-end" | "cache-hit" | "cache-miss" | "dedup-hit" | "dedup-miss" | "queue-enqueue" | "queue-dispatch" | "retry";
|
|
540
|
+
/**
|
|
541
|
+
* A lifecycle event emitted to the onEvent observer.
|
|
542
|
+
*
|
|
543
|
+
* Provides observability into the lifecycle pipeline by reporting events
|
|
544
|
+
* as they occur during request processing.
|
|
545
|
+
*
|
|
546
|
+
* @property {LifecycleEventType} type - The type of lifecycle event. Required.
|
|
547
|
+
* @property {number} timestamp - Timestamp in milliseconds (from `Date.now()`) when the event occurred. Required.
|
|
548
|
+
* @property {string} [cacheKey] - The Cache_Key associated with the event, if applicable (present for cache and dedup events).
|
|
549
|
+
* @property {number} [priority] - Priority level associated with the event, if applicable (present for queue events). Valid range: 0-9.
|
|
550
|
+
* @property {number} [attempt] - Zero-based retry attempt, if applicable.
|
|
551
|
+
* @property {number} [delayMs] - Retry delay in milliseconds, if applicable.
|
|
552
|
+
* @property {number} [status] - HTTP status that triggered retry, if applicable.
|
|
553
|
+
* @property {string} [errorTag] - HttpError tag that triggered retry, if applicable.
|
|
554
|
+
*/
|
|
555
|
+
type LifecycleEvent = {
|
|
556
|
+
/** The type of lifecycle event. */
|
|
557
|
+
type: LifecycleEventType;
|
|
558
|
+
/** Timestamp (ms) when the event occurred. */
|
|
559
|
+
timestamp: number;
|
|
560
|
+
/** Cache_Key associated with the event, if applicable. */
|
|
561
|
+
cacheKey?: string;
|
|
562
|
+
/** Priority level associated with the event, if applicable. Valid range: 0-9. */
|
|
563
|
+
priority?: number;
|
|
564
|
+
/** Zero-based retry attempt, if applicable. */
|
|
565
|
+
attempt?: number;
|
|
566
|
+
/** Retry delay in milliseconds, if applicable. */
|
|
567
|
+
delayMs?: number;
|
|
568
|
+
/** HTTP status that triggered retry, if applicable. */
|
|
569
|
+
status?: number;
|
|
570
|
+
/** HttpError tag that triggered retry, if applicable. */
|
|
571
|
+
errorTag?: string;
|
|
572
|
+
};
|
|
573
|
+
/**
|
|
574
|
+
* Lifecycle statistics snapshot.
|
|
575
|
+
*
|
|
576
|
+
* All counters start at zero and increase monotonically. Returned as a frozen
|
|
577
|
+
* object by {@link LifecycleClient.stats}.
|
|
578
|
+
*
|
|
579
|
+
* @property {number} cacheHits - Number of cache hits (responses served from cache). Default: 0.
|
|
580
|
+
* @property {number} cacheMisses - Number of cache misses (requests forwarded to Wire_Client). Default: 0.
|
|
581
|
+
* @property {number} cacheEvictions - Number of cache evictions triggered by LRU policy. Default: 0.
|
|
582
|
+
* @property {number} dedupHits - Number of dedup hits (requests collapsed into an existing in-flight Async_Effect). Default: 0.
|
|
583
|
+
* @property {number} dedupActive - Number of currently active dedup groups (in-flight unique Cache_Keys). Default: 0.
|
|
584
|
+
* @property {number} queueDepth - Current depth of the priority queue (requests waiting to be dispatched). Default: 0.
|
|
585
|
+
* @property {number} requestsStarted - Total number of requests that entered the lifecycle pipeline. Default: 0.
|
|
586
|
+
* @property {number} requestsCompleted - Total number of requests that completed successfully. Default: 0.
|
|
587
|
+
* @property {number} requestsFailed - Total number of requests that failed with an error. Default: 0.
|
|
588
|
+
* @property {number} retries - Total number of retry attempts scheduled. Default: 0.
|
|
589
|
+
* @property {HttpClientStats} wire - Underlying Wire_Client statistics snapshot.
|
|
590
|
+
*/
|
|
591
|
+
type LifecycleStats = {
|
|
592
|
+
/** Number of cache hits. */
|
|
593
|
+
cacheHits: number;
|
|
594
|
+
/** Number of cache misses. */
|
|
595
|
+
cacheMisses: number;
|
|
596
|
+
/** Number of cache evictions (LRU). */
|
|
597
|
+
cacheEvictions: number;
|
|
598
|
+
/** Number of dedup hits (requests collapsed into existing in-flight Async_Effect). */
|
|
599
|
+
dedupHits: number;
|
|
600
|
+
/** Number of currently active dedup groups. */
|
|
601
|
+
dedupActive: number;
|
|
602
|
+
/** Current depth of the priority queue. */
|
|
603
|
+
queueDepth: number;
|
|
604
|
+
/** Total number of requests started. */
|
|
605
|
+
requestsStarted: number;
|
|
606
|
+
/** Total number of requests completed successfully. */
|
|
607
|
+
requestsCompleted: number;
|
|
608
|
+
/** Total number of requests that failed. */
|
|
609
|
+
requestsFailed: number;
|
|
610
|
+
/** Total number of retry attempts scheduled. */
|
|
611
|
+
retries: number;
|
|
612
|
+
/** Underlying Wire_Client stats. */
|
|
613
|
+
wire: HttpClientStats;
|
|
614
|
+
};
|
|
615
|
+
/**
|
|
616
|
+
* Per-request lifecycle options that can be passed alongside a request.
|
|
617
|
+
*
|
|
618
|
+
* Allows fine-grained control over lifecycle behavior on a per-request basis,
|
|
619
|
+
* overriding the client-level configuration for individual requests.
|
|
620
|
+
*
|
|
621
|
+
* @property {number} [priority] - Priority level for this request. Valid range: 0-9 (0 = highest priority). Default: 5.
|
|
622
|
+
* @property {string} [dedupKey] - Custom dedup key override for this request. When provided, overrides the computed Cache_Key for dedup purposes. Default: undefined.
|
|
623
|
+
* @property {boolean} [noCache] - When true, bypasses the cache layer for this request (neither reads from nor writes to cache). Default: false.
|
|
624
|
+
* @property {boolean} [noDedup] - When true, bypasses the dedup layer for this request (always creates a new in-flight Async_Effect). Default: false.
|
|
625
|
+
*/
|
|
626
|
+
type LifecycleRequestOptions = {
|
|
627
|
+
/** Priority level 0-9 (0 = highest). Default: 5. Valid range: 0-9. */
|
|
628
|
+
priority?: number;
|
|
629
|
+
/** Custom dedup key override for this request. */
|
|
630
|
+
dedupKey?: string;
|
|
631
|
+
/** Skip cache for this request. Default: false. */
|
|
632
|
+
noCache?: boolean;
|
|
633
|
+
/** Skip dedup for this request. Default: false. */
|
|
634
|
+
noDedup?: boolean;
|
|
635
|
+
};
|
|
636
|
+
|
|
637
|
+
/**
|
|
638
|
+
* Creates a lifecycle-aware HTTP client that composes deduplication, caching,
|
|
639
|
+
* and priority scheduling layers on top of the Wire_Client.
|
|
640
|
+
*
|
|
641
|
+
* When no layers are configured, the client delegates directly to the underlying
|
|
642
|
+
* Wire_Client with zero additional overhead (zero-cost when disabled). Each layer
|
|
643
|
+
* is independently optional and can be set to `false` to explicitly disable.
|
|
644
|
+
*
|
|
645
|
+
* Layer composition order (outermost to innermost):
|
|
646
|
+
* - User middleware (applied via `.with()`)
|
|
647
|
+
* - Dedup layer (if enabled)
|
|
648
|
+
* - Cache layer (if enabled)
|
|
649
|
+
* - Priority layer (if enabled)
|
|
650
|
+
* - Wire_Client (`makeHttp`)
|
|
651
|
+
*
|
|
652
|
+
* @param config - Lifecycle client configuration extending `MakeHttpConfig` with optional layer settings.
|
|
653
|
+
* - `config.baseUrl` — Base URL prepended to relative request paths.
|
|
654
|
+
* - `config.headers` — Default headers merged into every request.
|
|
655
|
+
* - `config.timeoutMs` — Request budget in milliseconds covering pool wait + fetch + body read.
|
|
656
|
+
* - `config.dedup` — Deduplication layer config or `false` to disable.
|
|
657
|
+
* - `config.dedup.dedupKey` — Custom key function overriding default key computation.
|
|
658
|
+
* - `config.cache` — Response cache layer config or `false` to disable.
|
|
659
|
+
* - `config.cache.ttlSeconds` — Time-to-live in seconds; integer between 1 and 86400 (default: 60).
|
|
660
|
+
* - `config.cache.maxEntries` — Maximum cached entries; integer >= 1 (default: 1024).
|
|
661
|
+
* - `config.cache.staleWhileRevalidate` — Enable stale-while-revalidate (default: false).
|
|
662
|
+
* - `config.cache.cachePolicy` — Custom cache policy function.
|
|
663
|
+
* - `config.cache.cacheRelevantHeaders` — Additional headers included in Cache_Key computation.
|
|
664
|
+
* - `config.priority` — Priority scheduler layer config or `false` to disable.
|
|
665
|
+
* - `config.priority.concurrency` — Maximum concurrent dispatched requests; integer >= 1 (default: 32).
|
|
666
|
+
* - `config.priority.queueTimeoutMs` — Queue timeout in milliseconds for priority-queued requests.
|
|
667
|
+
* - `config.onEvent` — Optional observer callback invoked on each lifecycle event.
|
|
668
|
+
*
|
|
669
|
+
* @returns A {@link LifecycleClient} instance that is callable as an `HttpClientFn` and exposes
|
|
670
|
+
* `.with()` for middleware composition, `.stats()` for observability, `.cancelAll()` for
|
|
671
|
+
* bulk cancellation, and `.cache` for cache management.
|
|
672
|
+
*
|
|
673
|
+
* @example
|
|
674
|
+
* ```typescript
|
|
675
|
+
* import { makeLifecycleClient } from "./index";
|
|
676
|
+
* import type { LifecycleClientConfig } from "./index";
|
|
677
|
+
*
|
|
678
|
+
* const config: LifecycleClientConfig = {
|
|
679
|
+
* baseUrl: "https://api.example.com",
|
|
680
|
+
* cache: { ttlSeconds: 120, maxEntries: 512 },
|
|
681
|
+
* priority: { concurrency: 8 },
|
|
682
|
+
* dedup: {},
|
|
683
|
+
* };
|
|
684
|
+
*
|
|
685
|
+
* const client = makeLifecycleClient(config);
|
|
686
|
+
*
|
|
687
|
+
* // Execute a GET request through all lifecycle layers
|
|
688
|
+
* const response = client({ method: "GET", url: "/users" });
|
|
689
|
+
* ```
|
|
690
|
+
*/
|
|
691
|
+
declare function makeLifecycleClient(config?: LifecycleClientConfig): LifecycleClient;
|
|
692
|
+
/**
|
|
693
|
+
* Canonical production HTTP client factory.
|
|
694
|
+
*
|
|
695
|
+
* Alias of {@link makeLifecycleClient}; kept as the recommended public name
|
|
696
|
+
* for callers that want the stable wire -> priority -> retry -> cache -> dedup
|
|
697
|
+
* lifecycle pipeline without importing lower-level building blocks.
|
|
698
|
+
*
|
|
699
|
+
* @param config - Lifecycle client configuration with optional wire, retry, cache, dedup, and priority settings.
|
|
700
|
+
* @returns A lifecycle-aware HTTP client with stats, cache controls, middleware composition, and `cancelAll`.
|
|
701
|
+
*/
|
|
702
|
+
declare function makeHttpClient(config?: LifecycleClientConfig): LifecycleClient;
|
|
703
|
+
|
|
704
|
+
/**
|
|
705
|
+
* Components of a parsed Cache_Key, representing the individual parts
|
|
706
|
+
* that make up a deterministic cache key string.
|
|
707
|
+
*
|
|
708
|
+
* @property method - The HTTP method (uppercase), e.g. "GET", "POST"
|
|
709
|
+
* @property resolvedUrl - The fully resolved URL including base URL resolution
|
|
710
|
+
* @property headers - Cache-relevant headers as key-value pairs (lowercase keys)
|
|
711
|
+
* @property body - The request body string, or empty string if no body was present
|
|
712
|
+
*/
|
|
713
|
+
type CacheKeyComponents = {
|
|
714
|
+
method: string;
|
|
715
|
+
resolvedUrl: string;
|
|
716
|
+
headers: Record<string, string>;
|
|
717
|
+
body: string;
|
|
718
|
+
};
|
|
719
|
+
/**
|
|
720
|
+
* Null character (`\u0000`) used as a separator between Cache_Key components.
|
|
721
|
+
*
|
|
722
|
+
* This non-printable character is chosen because it cannot appear in valid HTTP
|
|
723
|
+
* method names, URLs, or header values, ensuring unambiguous key parsing via
|
|
724
|
+
* `parseCacheKey`.
|
|
725
|
+
*/
|
|
726
|
+
declare const SEPARATOR = "\0";
|
|
727
|
+
/**
|
|
728
|
+
* Default set of HTTP headers included in Cache_Key computation.
|
|
729
|
+
*
|
|
730
|
+
* Value: `["accept", "authorization", "content-type"]`
|
|
731
|
+
*
|
|
732
|
+
* These headers are always factored into the cache key to ensure that requests
|
|
733
|
+
* with different content negotiation, authentication, or body encoding are
|
|
734
|
+
* cached separately. Additional headers can be included via the `extraHeaders`
|
|
735
|
+
* parameter of `computeCacheKey` or the `cacheRelevantHeaders` option in `CacheConfig`.
|
|
736
|
+
*/
|
|
737
|
+
declare const DEFAULT_CACHE_RELEVANT_HEADERS: string[];
|
|
738
|
+
/**
|
|
739
|
+
* Computes a deterministic Cache_Key string from an HTTP request.
|
|
740
|
+
*
|
|
741
|
+
* The key is composed of: method (uppercase), resolved URL, sorted filtered headers,
|
|
742
|
+
* and body — concatenated with null character separators. The resulting string
|
|
743
|
+
* uniquely identifies a cacheable request and can be round-tripped via `parseCacheKey`.
|
|
744
|
+
*
|
|
745
|
+
* @param req - The HTTP request to compute a Cache_Key for
|
|
746
|
+
* @param baseUrl - Base URL for resolving relative request URLs
|
|
747
|
+
* @param extraHeaders - Additional header names to include in the Cache_Key beyond
|
|
748
|
+
* the defaults in `DEFAULT_CACHE_RELEVANT_HEADERS`
|
|
749
|
+
* @returns A deterministic Cache_Key string suitable for use as a cache lookup key
|
|
750
|
+
*
|
|
751
|
+
* @example
|
|
752
|
+
* ```typescript
|
|
753
|
+
* import { computeCacheKey } from "./cacheKey";
|
|
754
|
+
*
|
|
755
|
+
* const key = computeCacheKey(
|
|
756
|
+
* { method: "GET", url: "/users", headers: { accept: "application/json" } },
|
|
757
|
+
* "https://api.example.com"
|
|
758
|
+
* );
|
|
759
|
+
* // key is a deterministic string encoding method, URL, headers, and body
|
|
760
|
+
* ```
|
|
761
|
+
*/
|
|
762
|
+
declare function computeCacheKey(req: HttpRequest, baseUrl: string, extraHeaders?: string[]): string;
|
|
763
|
+
/**
|
|
764
|
+
* Parses a Cache_Key string back into its component parts.
|
|
765
|
+
*
|
|
766
|
+
* Splits on the null character separator and reconstructs the `CacheKeyComponents` object.
|
|
767
|
+
* The body may contain separator characters, so all parts after the third separator
|
|
768
|
+
* are joined back together as the body. This enables round-trip fidelity with
|
|
769
|
+
* `computeCacheKey`.
|
|
770
|
+
*
|
|
771
|
+
* @param key - A Cache_Key string produced by `computeCacheKey`
|
|
772
|
+
* @returns The parsed `CacheKeyComponents` with method, resolvedUrl, headers, and body
|
|
773
|
+
*
|
|
774
|
+
* @example
|
|
775
|
+
* ```typescript
|
|
776
|
+
* import { computeCacheKey, parseCacheKey } from "./cacheKey";
|
|
777
|
+
*
|
|
778
|
+
* const key = computeCacheKey(
|
|
779
|
+
* { method: "POST", url: "/data", headers: { "content-type": "application/json" }, body: '{"id":1}' },
|
|
780
|
+
* "https://api.example.com"
|
|
781
|
+
* );
|
|
782
|
+
* const parts = parseCacheKey(key);
|
|
783
|
+
* // parts.method === "POST"
|
|
784
|
+
* // parts.resolvedUrl === "https://api.example.com/data"
|
|
785
|
+
* // parts.headers === { "content-type": "application/json" }
|
|
786
|
+
* // parts.body === '{"id":1}'
|
|
787
|
+
* ```
|
|
788
|
+
*/
|
|
789
|
+
declare function parseCacheKey(key: string): CacheKeyComponents;
|
|
790
|
+
|
|
791
|
+
/**
|
|
792
|
+
* Event object passed to the `withLogging` middleware's logger callback on each
|
|
793
|
+
* request lifecycle phase (request, response, or error).
|
|
794
|
+
*
|
|
795
|
+
* @property phase - The lifecycle phase that triggered this event: `"request"` before
|
|
796
|
+
* the request is sent, `"response"` on success, or `"error"` on failure.
|
|
797
|
+
* @property req - The original HttpRequest being executed.
|
|
798
|
+
* @property res - The HttpWireResponse received from the server. Present only when
|
|
799
|
+
* `phase` is `"response"`.
|
|
800
|
+
* @property error - The HttpError that occurred. Present only when `phase` is `"error"`.
|
|
801
|
+
* @property durationMs - Elapsed time in milliseconds since the request was initiated.
|
|
802
|
+
* Present only when `phase` is `"response"` or `"error"`.
|
|
803
|
+
*/
|
|
804
|
+
type LogEvent = {
|
|
805
|
+
phase: "request" | "response" | "error";
|
|
806
|
+
req: HttpRequest;
|
|
807
|
+
res?: HttpWireResponse;
|
|
808
|
+
error?: HttpError;
|
|
809
|
+
durationMs?: number;
|
|
810
|
+
};
|
|
811
|
+
/**
|
|
812
|
+
* Creates a middleware that injects a Bearer token into the Authorization header.
|
|
813
|
+
* The token is obtained asynchronously via the provided `tokenProvider` Async_Effect.
|
|
814
|
+
* If the token provider fails, the error propagates to the caller unchanged.
|
|
815
|
+
*
|
|
816
|
+
* @param tokenProvider - A function returning an Async_Effect that resolves to the
|
|
817
|
+
* Bearer token string. Called on every request to support token rotation.
|
|
818
|
+
* @returns An HttpMiddleware that prepends `Authorization: Bearer <token>` to outgoing requests.
|
|
819
|
+
*
|
|
820
|
+
* @example
|
|
821
|
+
* ```typescript
|
|
822
|
+
* import { makeLifecycleClient, withAuth } from "./index";
|
|
823
|
+
* import { asyncSucceed } from "../../core/types/asyncEffect";
|
|
824
|
+
*
|
|
825
|
+
* const client = makeLifecycleClient({ baseUrl: "https://api.example.com" })
|
|
826
|
+
* .with(withAuth(() => asyncSucceed("my-secret-token")));
|
|
827
|
+
*
|
|
828
|
+
* // All requests now include Authorization: Bearer my-secret-token
|
|
829
|
+
* const result = client({ method: "GET", url: "/users" });
|
|
830
|
+
* ```
|
|
831
|
+
*/
|
|
832
|
+
declare function withAuth(tokenProvider: () => Async<unknown, HttpError, string>): HttpMiddleware;
|
|
833
|
+
/**
|
|
834
|
+
* Creates a middleware that logs request, response, and error events through a
|
|
835
|
+
* user-supplied logger callback. The logger is invoked synchronously at each phase;
|
|
836
|
+
* if it throws, the error is swallowed to avoid disrupting the request pipeline.
|
|
837
|
+
*
|
|
838
|
+
* @param logger - A synchronous callback invoked with a {@link LogEvent} for each
|
|
839
|
+
* lifecycle phase (`"request"`, `"response"`, `"error"`). Exceptions thrown by
|
|
840
|
+
* the logger are silently caught.
|
|
841
|
+
* @returns An HttpMiddleware that instruments requests with logging side-effects.
|
|
842
|
+
*
|
|
843
|
+
* @example
|
|
844
|
+
* ```typescript
|
|
845
|
+
* import { makeLifecycleClient, withLogging } from "./index";
|
|
846
|
+
* import type { LogEvent } from "./index";
|
|
847
|
+
*
|
|
848
|
+
* const client = makeLifecycleClient({ baseUrl: "https://api.example.com" })
|
|
849
|
+
* .with(withLogging((event: LogEvent) => {
|
|
850
|
+
* console.log(`[${event.phase}] ${event.req.method} ${event.req.url} ${event.durationMs ?? ""}ms`);
|
|
851
|
+
* }));
|
|
852
|
+
*
|
|
853
|
+
* const result = client({ method: "GET", url: "/health" });
|
|
854
|
+
* ```
|
|
855
|
+
*/
|
|
856
|
+
declare function withLogging(logger: (event: LogEvent) => void): HttpMiddleware;
|
|
857
|
+
/**
|
|
858
|
+
* Creates a middleware that transforms HTTP responses after retrieval. The
|
|
859
|
+
* transformation is applied to both cached and network responses. Cached
|
|
860
|
+
* responses are stored in their original (untransformed) form, so the transform
|
|
861
|
+
* runs on every access.
|
|
862
|
+
*
|
|
863
|
+
* If the transform function throws, the error is propagated as a `FetchError`.
|
|
864
|
+
*
|
|
865
|
+
* @param fn - A synchronous function that receives the response and the original
|
|
866
|
+
* request, and returns a modified HttpWireResponse. Must not return `undefined`.
|
|
867
|
+
* @returns An HttpMiddleware that applies the transform to every successful response.
|
|
868
|
+
*
|
|
869
|
+
* @example
|
|
870
|
+
* ```typescript
|
|
871
|
+
* import { makeLifecycleClient, withResponseTransform } from "./index";
|
|
872
|
+
*
|
|
873
|
+
* const client = makeLifecycleClient({ baseUrl: "https://api.example.com" })
|
|
874
|
+
* .with(withResponseTransform((res, req) => ({
|
|
875
|
+
* ...res,
|
|
876
|
+
* headers: { ...res.headers, "x-request-url": req.url },
|
|
877
|
+
* })));
|
|
878
|
+
*
|
|
879
|
+
* // Responses now include the x-request-url header
|
|
880
|
+
* const result = client({ method: "GET", url: "/data" });
|
|
881
|
+
* ```
|
|
882
|
+
*/
|
|
883
|
+
declare function withResponseTransform(fn: (res: HttpWireResponse, req: HttpRequest) => HttpWireResponse): HttpMiddleware;
|
|
884
|
+
|
|
885
|
+
/**
|
|
886
|
+
* Configuration for the LRU cache.
|
|
887
|
+
*
|
|
888
|
+
* @property maxEntries - Maximum number of entries the cache can hold.
|
|
889
|
+
* Must be >= 1. Values less than 1 are clamped to 1. Fractional values are floored.
|
|
890
|
+
* Default: 1024.
|
|
891
|
+
* @property onEvict - Optional callback invoked when entries are evicted from the cache.
|
|
892
|
+
* Receives the number of entries evicted in that operation (currently always 1).
|
|
893
|
+
*
|
|
894
|
+
* @example
|
|
895
|
+
* ```typescript
|
|
896
|
+
* import { LRUCache } from "./lruCache";
|
|
897
|
+
*
|
|
898
|
+
* const cache = new LRUCache<string>({ maxEntries: 100, onEvict: (n) => console.log(`Evicted ${n}`) });
|
|
899
|
+
* ```
|
|
900
|
+
*/
|
|
901
|
+
type LRUCacheConfig = {
|
|
902
|
+
/** Maximum number of entries. Must be >= 1. Default: 1024. */
|
|
903
|
+
maxEntries?: number;
|
|
904
|
+
/** Optional callback invoked with the number of entries evicted on each eviction. */
|
|
905
|
+
onEvict?: (count: number) => void;
|
|
906
|
+
};
|
|
907
|
+
/**
|
|
908
|
+
* A generic LRU (Least Recently Used) cache with per-entry TTL support.
|
|
909
|
+
*
|
|
910
|
+
* Uses a doubly-linked list combined with a Map for O(1) get, set, and eviction
|
|
911
|
+
* operations. The head of the list is the most recently used entry; the tail is
|
|
912
|
+
* the least recently used.
|
|
913
|
+
*
|
|
914
|
+
* When the cache exceeds `maxEntries`, the least recently used entry is evicted.
|
|
915
|
+
* Expired entries are lazily removed on access (get).
|
|
916
|
+
*
|
|
917
|
+
* @example
|
|
918
|
+
* ```typescript
|
|
919
|
+
* import { LRUCache } from "./lruCache";
|
|
920
|
+
*
|
|
921
|
+
* const cache = new LRUCache<string>({ maxEntries: 256 });
|
|
922
|
+
* cache.set("user:1", "Alice", 60_000); // TTL of 60 seconds
|
|
923
|
+
* const value = cache.get("user:1"); // "Alice" (moves to head)
|
|
924
|
+
* cache.delete("user:1"); // true
|
|
925
|
+
* ```
|
|
926
|
+
*/
|
|
927
|
+
declare class LRUCache<V> {
|
|
928
|
+
private readonly map;
|
|
929
|
+
private head;
|
|
930
|
+
private tail;
|
|
931
|
+
private readonly maxEntries;
|
|
932
|
+
private readonly onEvict;
|
|
933
|
+
/**
|
|
934
|
+
* Creates a new LRU cache instance.
|
|
935
|
+
*
|
|
936
|
+
* @param config - Cache configuration options.
|
|
937
|
+
* @param config.maxEntries - Maximum number of entries. Must be >= 1. Default: 1024.
|
|
938
|
+
* @param config.onEvict - Optional eviction callback.
|
|
939
|
+
*
|
|
940
|
+
* @example
|
|
941
|
+
* ```typescript
|
|
942
|
+
* import { LRUCache } from "./lruCache";
|
|
943
|
+
*
|
|
944
|
+
* const cache = new LRUCache<number>({ maxEntries: 50 });
|
|
945
|
+
* ```
|
|
946
|
+
*/
|
|
947
|
+
constructor(config?: LRUCacheConfig);
|
|
948
|
+
/**
|
|
949
|
+
* Returns the number of entries currently in the cache.
|
|
950
|
+
*
|
|
951
|
+
* @returns The current entry count.
|
|
952
|
+
*
|
|
953
|
+
* @example
|
|
954
|
+
* ```typescript
|
|
955
|
+
* import { LRUCache } from "./lruCache";
|
|
956
|
+
*
|
|
957
|
+
* const cache = new LRUCache<string>();
|
|
958
|
+
* cache.set("a", "1", 10_000);
|
|
959
|
+
* console.log(cache.size); // 1
|
|
960
|
+
* ```
|
|
961
|
+
*/
|
|
962
|
+
get size(): number;
|
|
963
|
+
/**
|
|
964
|
+
* Retrieves a value by key.
|
|
965
|
+
*
|
|
966
|
+
* Returns `undefined` if the key is not found or the entry has expired.
|
|
967
|
+
* On a hit (non-expired), the entry is moved to the head (most recently used).
|
|
968
|
+
* Expired entries are lazily removed on access.
|
|
969
|
+
*
|
|
970
|
+
* @param key - The cache key to look up.
|
|
971
|
+
* @returns The cached value, or `undefined` if not found or expired.
|
|
972
|
+
*
|
|
973
|
+
* @example
|
|
974
|
+
* ```typescript
|
|
975
|
+
* import { LRUCache } from "./lruCache";
|
|
976
|
+
*
|
|
977
|
+
* const cache = new LRUCache<string>();
|
|
978
|
+
* cache.set("greeting", "hello", 30_000);
|
|
979
|
+
* const val = cache.get("greeting"); // "hello"
|
|
980
|
+
* const miss = cache.get("unknown"); // undefined
|
|
981
|
+
* ```
|
|
982
|
+
*/
|
|
983
|
+
get(key: string): V | undefined;
|
|
984
|
+
/**
|
|
985
|
+
* Inserts or updates an entry in the cache.
|
|
986
|
+
*
|
|
987
|
+
* If the key already exists, the value and TTL are updated and the entry is
|
|
988
|
+
* moved to the head. If inserting a new entry causes the cache to exceed
|
|
989
|
+
* `maxEntries` (must be >= 1), the least recently used entry is evicted.
|
|
990
|
+
*
|
|
991
|
+
* @param key - The cache key.
|
|
992
|
+
* @param value - The value to store.
|
|
993
|
+
* @param ttlMs - Time-to-live in milliseconds. The entry expires after this duration.
|
|
994
|
+
*
|
|
995
|
+
* @example
|
|
996
|
+
* ```typescript
|
|
997
|
+
* import { LRUCache } from "./lruCache";
|
|
998
|
+
*
|
|
999
|
+
* const cache = new LRUCache<string>({ maxEntries: 2 });
|
|
1000
|
+
* cache.set("a", "alpha", 60_000);
|
|
1001
|
+
* cache.set("b", "beta", 60_000);
|
|
1002
|
+
* cache.set("c", "gamma", 60_000); // evicts "a" (LRU)
|
|
1003
|
+
* ```
|
|
1004
|
+
*/
|
|
1005
|
+
set(key: string, value: V, ttlMs: number): void;
|
|
1006
|
+
/**
|
|
1007
|
+
* Removes an entry by key.
|
|
1008
|
+
*
|
|
1009
|
+
* @param key - The cache key to remove.
|
|
1010
|
+
* @returns `true` if the entry was found and removed, `false` otherwise.
|
|
1011
|
+
*
|
|
1012
|
+
* @example
|
|
1013
|
+
* ```typescript
|
|
1014
|
+
* import { LRUCache } from "./lruCache";
|
|
1015
|
+
*
|
|
1016
|
+
* const cache = new LRUCache<string>();
|
|
1017
|
+
* cache.set("x", "value", 10_000);
|
|
1018
|
+
* cache.delete("x"); // true
|
|
1019
|
+
* cache.delete("x"); // false (already removed)
|
|
1020
|
+
* ```
|
|
1021
|
+
*/
|
|
1022
|
+
delete(key: string): boolean;
|
|
1023
|
+
/**
|
|
1024
|
+
* Removes all entries from the cache, resetting it to an empty state.
|
|
1025
|
+
*
|
|
1026
|
+
* @example
|
|
1027
|
+
* ```typescript
|
|
1028
|
+
* import { LRUCache } from "./lruCache";
|
|
1029
|
+
*
|
|
1030
|
+
* const cache = new LRUCache<string>();
|
|
1031
|
+
* cache.set("a", "1", 10_000);
|
|
1032
|
+
* cache.clear();
|
|
1033
|
+
* console.log(cache.size); // 0
|
|
1034
|
+
* ```
|
|
1035
|
+
*/
|
|
1036
|
+
clear(): void;
|
|
1037
|
+
/** Adds a node to the head of the list (most recently used position). */
|
|
1038
|
+
private addToHead;
|
|
1039
|
+
/** Removes a node from its current position in the list. */
|
|
1040
|
+
private removeNode;
|
|
1041
|
+
/** Moves an existing node to the head of the list. */
|
|
1042
|
+
private moveToHead;
|
|
1043
|
+
/** Evicts the tail node (least recently used) and notifies via callback. */
|
|
1044
|
+
private evictTail;
|
|
1045
|
+
}
|
|
1046
|
+
|
|
1047
|
+
/**
|
|
1048
|
+
* Clamps a priority value to the valid range [0, 9].
|
|
1049
|
+
*
|
|
1050
|
+
* - Truncates toward zero (removes fractional part)
|
|
1051
|
+
* - Clamps the result to the integer range 0 through 9
|
|
1052
|
+
* - Returns a default of 5 for `undefined`, `NaN`, or non-finite values
|
|
1053
|
+
*
|
|
1054
|
+
* @param value - The priority value to clamp. Must be an integer from 0 to 9.
|
|
1055
|
+
* Values outside this range are clamped. Undefined or non-finite values default to 5.
|
|
1056
|
+
* @returns An integer in the range [0, 9] representing the clamped priority.
|
|
1057
|
+
*
|
|
1058
|
+
* @example
|
|
1059
|
+
* ```typescript
|
|
1060
|
+
* import { clampPriority } from "./priorityQueue";
|
|
1061
|
+
*
|
|
1062
|
+
* clampPriority(3); // 3
|
|
1063
|
+
* clampPriority(15); // 9 (clamped to max)
|
|
1064
|
+
* clampPriority(-2); // 0 (clamped to min)
|
|
1065
|
+
* clampPriority(undefined); // 5 (default)
|
|
1066
|
+
* clampPriority(2.7); // 2 (truncated)
|
|
1067
|
+
* ```
|
|
1068
|
+
*/
|
|
1069
|
+
declare function clampPriority(value: number | undefined): number;
|
|
1070
|
+
/**
|
|
1071
|
+
* An entry stored in the priority queue.
|
|
1072
|
+
*
|
|
1073
|
+
* @property priority - Priority level from 0 to 9, where 0 is the highest priority.
|
|
1074
|
+
* Clamped on enqueue via `clampPriority`.
|
|
1075
|
+
* @property arrivalOrder - Monotonic counter used for FIFO tiebreak within the same
|
|
1076
|
+
* priority level. Lower values are dispatched first.
|
|
1077
|
+
* @property value - The stored value associated with this entry.
|
|
1078
|
+
* @property cancelled - When `true`, the entry is logically removed (lazy deletion).
|
|
1079
|
+
* Cancelled entries are skipped during dequeue and peek operations.
|
|
1080
|
+
*/
|
|
1081
|
+
type PriorityQueueEntry<T> = {
|
|
1082
|
+
/** Priority level 0-9 (0 = highest priority). Clamped on enqueue. */
|
|
1083
|
+
priority: number;
|
|
1084
|
+
/** Monotonic counter for FIFO tiebreak within the same priority level. */
|
|
1085
|
+
arrivalOrder: number;
|
|
1086
|
+
/** The stored value. */
|
|
1087
|
+
value: T;
|
|
1088
|
+
/** When true, the entry is logically removed (lazy deletion). */
|
|
1089
|
+
cancelled: boolean;
|
|
1090
|
+
};
|
|
1091
|
+
/**
|
|
1092
|
+
* A generic binary min-heap priority queue.
|
|
1093
|
+
*
|
|
1094
|
+
* Entries are ordered by priority ascending (lower value = higher priority),
|
|
1095
|
+
* with FIFO tiebreak via a monotonic arrivalOrder counter. Priority values
|
|
1096
|
+
* are integers from 0 to 9, where 0 is the highest priority.
|
|
1097
|
+
*
|
|
1098
|
+
* Supports lazy removal: entries can be marked as cancelled and are
|
|
1099
|
+
* skipped during dequeue and peek operations.
|
|
1100
|
+
*
|
|
1101
|
+
* @example
|
|
1102
|
+
* ```typescript
|
|
1103
|
+
* import { PriorityQueue } from "./priorityQueue";
|
|
1104
|
+
*
|
|
1105
|
+
* const queue = new PriorityQueue<string>();
|
|
1106
|
+
* queue.enqueue("low", 9);
|
|
1107
|
+
* queue.enqueue("high", 0);
|
|
1108
|
+
* const entry = queue.dequeue(); // { value: "high", priority: 0, ... }
|
|
1109
|
+
* ```
|
|
1110
|
+
*/
|
|
1111
|
+
declare class PriorityQueue<T> {
|
|
1112
|
+
private heap;
|
|
1113
|
+
private counter;
|
|
1114
|
+
/**
|
|
1115
|
+
* Returns the number of entries in the queue (including cancelled entries).
|
|
1116
|
+
*
|
|
1117
|
+
* @returns The total number of entries in the internal heap.
|
|
1118
|
+
*
|
|
1119
|
+
* @example
|
|
1120
|
+
* ```typescript
|
|
1121
|
+
* import { PriorityQueue } from "./priorityQueue";
|
|
1122
|
+
*
|
|
1123
|
+
* const queue = new PriorityQueue<string>();
|
|
1124
|
+
* queue.enqueue("task", 5);
|
|
1125
|
+
* console.log(queue.size); // 1
|
|
1126
|
+
* ```
|
|
1127
|
+
*/
|
|
1128
|
+
get size(): number;
|
|
1129
|
+
/** Returns the number of entries that have not been cancelled. */
|
|
1130
|
+
get activeSize(): number;
|
|
1131
|
+
/**
|
|
1132
|
+
* Adds a value to the queue with the given priority.
|
|
1133
|
+
*
|
|
1134
|
+
* Priority is clamped to the valid range [0, 9] via `clampPriority`.
|
|
1135
|
+
* Returns the created entry, which can be used for later cancellation
|
|
1136
|
+
* by setting `entry.cancelled = true`.
|
|
1137
|
+
*
|
|
1138
|
+
* @param value - The value to enqueue.
|
|
1139
|
+
* @param priority - Priority level, integer from 0 (highest) to 9 (lowest).
|
|
1140
|
+
* Clamped to [0, 9]. Defaults to 5 if undefined.
|
|
1141
|
+
* @returns The created queue entry.
|
|
1142
|
+
*
|
|
1143
|
+
* @example
|
|
1144
|
+
* ```typescript
|
|
1145
|
+
* import { PriorityQueue } from "./priorityQueue";
|
|
1146
|
+
*
|
|
1147
|
+
* const queue = new PriorityQueue<string>();
|
|
1148
|
+
* const entry = queue.enqueue("urgent-task", 0);
|
|
1149
|
+
* entry.cancelled = true; // cancel later if needed
|
|
1150
|
+
* ```
|
|
1151
|
+
*/
|
|
1152
|
+
enqueue(value: T, priority?: number): PriorityQueueEntry<T>;
|
|
1153
|
+
/**
|
|
1154
|
+
* Removes and returns the highest-priority non-cancelled entry.
|
|
1155
|
+
*
|
|
1156
|
+
* Skips (and discards) any cancelled entries at the top of the heap.
|
|
1157
|
+
* Returns `undefined` if the queue is empty or all entries are cancelled.
|
|
1158
|
+
*
|
|
1159
|
+
* @returns The highest-priority non-cancelled entry, or `undefined` if none available.
|
|
1160
|
+
*
|
|
1161
|
+
* @example
|
|
1162
|
+
* ```typescript
|
|
1163
|
+
* import { PriorityQueue } from "./priorityQueue";
|
|
1164
|
+
*
|
|
1165
|
+
* const queue = new PriorityQueue<string>();
|
|
1166
|
+
* queue.enqueue("first", 1);
|
|
1167
|
+
* queue.enqueue("second", 2);
|
|
1168
|
+
* const entry = queue.dequeue(); // { value: "first", priority: 1, ... }
|
|
1169
|
+
* ```
|
|
1170
|
+
*/
|
|
1171
|
+
dequeue(): PriorityQueueEntry<T> | undefined;
|
|
1172
|
+
/**
|
|
1173
|
+
* Returns the highest-priority non-cancelled entry without removing it.
|
|
1174
|
+
*
|
|
1175
|
+
* Discards cancelled entries at the top of the heap as a side effect.
|
|
1176
|
+
* Returns `undefined` if the queue is empty or all entries are cancelled.
|
|
1177
|
+
*
|
|
1178
|
+
* @returns The highest-priority non-cancelled entry, or `undefined` if none available.
|
|
1179
|
+
*
|
|
1180
|
+
* @example
|
|
1181
|
+
* ```typescript
|
|
1182
|
+
* import { PriorityQueue } from "./priorityQueue";
|
|
1183
|
+
*
|
|
1184
|
+
* const queue = new PriorityQueue<string>();
|
|
1185
|
+
* queue.enqueue("task", 3);
|
|
1186
|
+
* const top = queue.peek(); // { value: "task", priority: 3, ... }
|
|
1187
|
+
* console.log(queue.size); // 1 (not removed)
|
|
1188
|
+
* ```
|
|
1189
|
+
*/
|
|
1190
|
+
peek(): PriorityQueueEntry<T> | undefined;
|
|
1191
|
+
/**
|
|
1192
|
+
* Marks all entries matching the predicate as cancelled (lazy removal).
|
|
1193
|
+
*
|
|
1194
|
+
* Cancelled entries are skipped on subsequent dequeue/peek calls.
|
|
1195
|
+
* This does not immediately remove entries from the heap; they are
|
|
1196
|
+
* discarded lazily when encountered at the top during dequeue or peek.
|
|
1197
|
+
*
|
|
1198
|
+
* @param predicate - A function that returns `true` for entries to cancel.
|
|
1199
|
+
* @returns The number of entries marked as cancelled.
|
|
1200
|
+
*
|
|
1201
|
+
* @example
|
|
1202
|
+
* ```typescript
|
|
1203
|
+
* import { PriorityQueue } from "./priorityQueue";
|
|
1204
|
+
*
|
|
1205
|
+
* const queue = new PriorityQueue<string>();
|
|
1206
|
+
* queue.enqueue("a", 1);
|
|
1207
|
+
* queue.enqueue("b", 2);
|
|
1208
|
+
* const removed = queue.remove((e) => e.value === "a"); // 1
|
|
1209
|
+
* ```
|
|
1210
|
+
*/
|
|
1211
|
+
remove(predicate: (entry: PriorityQueueEntry<T>) => boolean): number;
|
|
1212
|
+
/** Removes the top element from the heap and restores heap property. */
|
|
1213
|
+
private removeTop;
|
|
1214
|
+
/** Moves an element up the heap until the heap property is restored. */
|
|
1215
|
+
private bubbleUp;
|
|
1216
|
+
/** Moves an element down the heap until the heap property is restored. */
|
|
1217
|
+
private sinkDown;
|
|
1218
|
+
}
|
|
1219
|
+
|
|
1220
|
+
/**
|
|
1221
|
+
* Tracks lifecycle statistics for the HTTP Lifecycle Client.
|
|
1222
|
+
*
|
|
1223
|
+
* All counters start at zero and increase monotonically. The tracker also
|
|
1224
|
+
* provides event emission for observability, wrapping the user-supplied
|
|
1225
|
+
* `onEvent` callback in a try-catch so that callback errors never disrupt
|
|
1226
|
+
* request processing.
|
|
1227
|
+
*
|
|
1228
|
+
* Use the `snapshot()` method to obtain a frozen point-in-time view of all
|
|
1229
|
+
* statistics, including wire-level stats from the underlying HTTP client.
|
|
1230
|
+
*
|
|
1231
|
+
* @example
|
|
1232
|
+
* ```typescript
|
|
1233
|
+
* import { LifecycleStatsTracker } from "./stats";
|
|
1234
|
+
*
|
|
1235
|
+
* const tracker = new LifecycleStatsTracker({
|
|
1236
|
+
* onEvent: (event) => console.log(event.type),
|
|
1237
|
+
* wireStats: () => ({ requestCount: 0, errorCount: 0 }),
|
|
1238
|
+
* });
|
|
1239
|
+
* tracker.cacheHit();
|
|
1240
|
+
* const stats = tracker.snapshot();
|
|
1241
|
+
* console.log(stats.cacheHits); // 1
|
|
1242
|
+
* ```
|
|
1243
|
+
*/
|
|
1244
|
+
declare class LifecycleStatsTracker {
|
|
1245
|
+
private _cacheHits;
|
|
1246
|
+
private _cacheMisses;
|
|
1247
|
+
private _cacheEvictions;
|
|
1248
|
+
private _dedupHits;
|
|
1249
|
+
private _dedupActive;
|
|
1250
|
+
private _queueDepth;
|
|
1251
|
+
private _requestsStarted;
|
|
1252
|
+
private _requestsCompleted;
|
|
1253
|
+
private _requestsFailed;
|
|
1254
|
+
private _retries;
|
|
1255
|
+
private readonly _onEvent;
|
|
1256
|
+
private readonly _wireStats;
|
|
1257
|
+
/**
|
|
1258
|
+
* Creates a new lifecycle stats tracker.
|
|
1259
|
+
*
|
|
1260
|
+
* @param opts - Configuration options for the tracker.
|
|
1261
|
+
* @param opts.onEvent - Optional callback invoked on each lifecycle event.
|
|
1262
|
+
* Errors thrown by this callback are silently discarded.
|
|
1263
|
+
* @param opts.wireStats - A function returning the current wire-level HTTP client stats.
|
|
1264
|
+
*
|
|
1265
|
+
* @example
|
|
1266
|
+
* ```typescript
|
|
1267
|
+
* import { LifecycleStatsTracker } from "./stats";
|
|
1268
|
+
*
|
|
1269
|
+
* const tracker = new LifecycleStatsTracker({
|
|
1270
|
+
* wireStats: () => ({ requestCount: 0, errorCount: 0 }),
|
|
1271
|
+
* });
|
|
1272
|
+
* ```
|
|
1273
|
+
*/
|
|
1274
|
+
constructor(opts: {
|
|
1275
|
+
onEvent?: (event: LifecycleEvent) => void;
|
|
1276
|
+
wireStats: () => HttpClientStats;
|
|
1277
|
+
});
|
|
1278
|
+
/**
|
|
1279
|
+
* Records a cache hit. Increments the cache hit counter by 1.
|
|
1280
|
+
*
|
|
1281
|
+
* @example
|
|
1282
|
+
* ```typescript
|
|
1283
|
+
* import { LifecycleStatsTracker } from "./stats";
|
|
1284
|
+
*
|
|
1285
|
+
* const tracker = new LifecycleStatsTracker({ wireStats: () => ({ requestCount: 0, errorCount: 0 }) });
|
|
1286
|
+
* tracker.cacheHit();
|
|
1287
|
+
* ```
|
|
1288
|
+
*/
|
|
1289
|
+
cacheHit(): void;
|
|
1290
|
+
/**
|
|
1291
|
+
* Records a cache miss. Increments the cache miss counter by 1.
|
|
1292
|
+
*
|
|
1293
|
+
* @example
|
|
1294
|
+
* ```typescript
|
|
1295
|
+
* import { LifecycleStatsTracker } from "./stats";
|
|
1296
|
+
*
|
|
1297
|
+
* const tracker = new LifecycleStatsTracker({ wireStats: () => ({ requestCount: 0, errorCount: 0 }) });
|
|
1298
|
+
* tracker.cacheMiss();
|
|
1299
|
+
* ```
|
|
1300
|
+
*/
|
|
1301
|
+
cacheMiss(): void;
|
|
1302
|
+
/**
|
|
1303
|
+
* Records a cache eviction. Increments the cache eviction counter by 1.
|
|
1304
|
+
*
|
|
1305
|
+
* @example
|
|
1306
|
+
* ```typescript
|
|
1307
|
+
* import { LifecycleStatsTracker } from "./stats";
|
|
1308
|
+
*
|
|
1309
|
+
* const tracker = new LifecycleStatsTracker({ wireStats: () => ({ requestCount: 0, errorCount: 0 }) });
|
|
1310
|
+
* tracker.cacheEviction();
|
|
1311
|
+
* ```
|
|
1312
|
+
*/
|
|
1313
|
+
cacheEviction(): void;
|
|
1314
|
+
/**
|
|
1315
|
+
* Records a dedup hit (a request that joined an in-flight duplicate).
|
|
1316
|
+
* Increments the dedup hit counter by 1.
|
|
1317
|
+
*
|
|
1318
|
+
* @example
|
|
1319
|
+
* ```typescript
|
|
1320
|
+
* import { LifecycleStatsTracker } from "./stats";
|
|
1321
|
+
*
|
|
1322
|
+
* const tracker = new LifecycleStatsTracker({ wireStats: () => ({ requestCount: 0, errorCount: 0 }) });
|
|
1323
|
+
* tracker.dedupHit();
|
|
1324
|
+
* ```
|
|
1325
|
+
*/
|
|
1326
|
+
dedupHit(): void;
|
|
1327
|
+
/**
|
|
1328
|
+
* Sets the current number of active dedup groups.
|
|
1329
|
+
*
|
|
1330
|
+
* @param n - The current count of active dedup groups. Must be >= 0.
|
|
1331
|
+
*
|
|
1332
|
+
* @example
|
|
1333
|
+
* ```typescript
|
|
1334
|
+
* import { LifecycleStatsTracker } from "./stats";
|
|
1335
|
+
*
|
|
1336
|
+
* const tracker = new LifecycleStatsTracker({ wireStats: () => ({ requestCount: 0, errorCount: 0 }) });
|
|
1337
|
+
* tracker.setDedupActive(3);
|
|
1338
|
+
* ```
|
|
1339
|
+
*/
|
|
1340
|
+
setDedupActive(n: number): void;
|
|
1341
|
+
/**
|
|
1342
|
+
* Sets the current priority queue depth.
|
|
1343
|
+
*
|
|
1344
|
+
* @param n - The current number of entries in the priority queue. Must be >= 0.
|
|
1345
|
+
*
|
|
1346
|
+
* @example
|
|
1347
|
+
* ```typescript
|
|
1348
|
+
* import { LifecycleStatsTracker } from "./stats";
|
|
1349
|
+
*
|
|
1350
|
+
* const tracker = new LifecycleStatsTracker({ wireStats: () => ({ requestCount: 0, errorCount: 0 }) });
|
|
1351
|
+
* tracker.setQueueDepth(5);
|
|
1352
|
+
* ```
|
|
1353
|
+
*/
|
|
1354
|
+
setQueueDepth(n: number): void;
|
|
1355
|
+
/**
|
|
1356
|
+
* Records that a request has started. Increments the requests started counter by 1.
|
|
1357
|
+
*
|
|
1358
|
+
* @example
|
|
1359
|
+
* ```typescript
|
|
1360
|
+
* import { LifecycleStatsTracker } from "./stats";
|
|
1361
|
+
*
|
|
1362
|
+
* const tracker = new LifecycleStatsTracker({ wireStats: () => ({ requestCount: 0, errorCount: 0 }) });
|
|
1363
|
+
* tracker.requestStarted();
|
|
1364
|
+
* ```
|
|
1365
|
+
*/
|
|
1366
|
+
requestStarted(): void;
|
|
1367
|
+
/**
|
|
1368
|
+
* Records that a request has completed successfully.
|
|
1369
|
+
* Increments the requests completed counter by 1.
|
|
1370
|
+
*
|
|
1371
|
+
* @example
|
|
1372
|
+
* ```typescript
|
|
1373
|
+
* import { LifecycleStatsTracker } from "./stats";
|
|
1374
|
+
*
|
|
1375
|
+
* const tracker = new LifecycleStatsTracker({ wireStats: () => ({ requestCount: 0, errorCount: 0 }) });
|
|
1376
|
+
* tracker.requestCompleted();
|
|
1377
|
+
* ```
|
|
1378
|
+
*/
|
|
1379
|
+
requestCompleted(): void;
|
|
1380
|
+
/**
|
|
1381
|
+
* Records that a request has failed.
|
|
1382
|
+
* Increments the requests failed counter by 1.
|
|
1383
|
+
*
|
|
1384
|
+
* @example
|
|
1385
|
+
* ```typescript
|
|
1386
|
+
* import { LifecycleStatsTracker } from "./stats";
|
|
1387
|
+
*
|
|
1388
|
+
* const tracker = new LifecycleStatsTracker({ wireStats: () => ({ requestCount: 0, errorCount: 0 }) });
|
|
1389
|
+
* tracker.requestFailed();
|
|
1390
|
+
* ```
|
|
1391
|
+
*/
|
|
1392
|
+
requestFailed(): void;
|
|
1393
|
+
retry(): void;
|
|
1394
|
+
/**
|
|
1395
|
+
* Emits a lifecycle event to the registered `onEvent` callback.
|
|
1396
|
+
*
|
|
1397
|
+
* The callback is wrapped in a try-catch so that any exception thrown by
|
|
1398
|
+
* the callback is silently discarded and request processing continues
|
|
1399
|
+
* unaffected. If no `onEvent` callback was provided, this is a no-op.
|
|
1400
|
+
*
|
|
1401
|
+
* @param type - The lifecycle event type to emit (e.g., `"cache-hit"`, `"request-start"`).
|
|
1402
|
+
* @param extra - Optional additional event data.
|
|
1403
|
+
*
|
|
1404
|
+
* @example
|
|
1405
|
+
* ```typescript
|
|
1406
|
+
* import { LifecycleStatsTracker } from "./stats";
|
|
1407
|
+
*
|
|
1408
|
+
* const tracker = new LifecycleStatsTracker({
|
|
1409
|
+
* onEvent: (event) => console.log(event.type, event.timestamp),
|
|
1410
|
+
* wireStats: () => ({ requestCount: 0, errorCount: 0 }),
|
|
1411
|
+
* });
|
|
1412
|
+
* tracker.emit("cache-hit", { cacheKey: "GET|/api/users" });
|
|
1413
|
+
* ```
|
|
1414
|
+
*/
|
|
1415
|
+
emit(type: LifecycleEventType, extra?: {
|
|
1416
|
+
cacheKey?: string;
|
|
1417
|
+
priority?: number;
|
|
1418
|
+
attempt?: number;
|
|
1419
|
+
delayMs?: number;
|
|
1420
|
+
status?: number;
|
|
1421
|
+
errorTag?: string;
|
|
1422
|
+
}): void;
|
|
1423
|
+
/**
|
|
1424
|
+
* Returns a frozen snapshot of all lifecycle statistics including wire stats.
|
|
1425
|
+
*
|
|
1426
|
+
* The returned object is frozen (immutable) and represents a point-in-time
|
|
1427
|
+
* view of all counters and gauges.
|
|
1428
|
+
*
|
|
1429
|
+
* @returns A frozen `LifecycleStats` object containing all current statistics.
|
|
1430
|
+
*
|
|
1431
|
+
* @example
|
|
1432
|
+
* ```typescript
|
|
1433
|
+
* import { LifecycleStatsTracker } from "./stats";
|
|
1434
|
+
*
|
|
1435
|
+
* const tracker = new LifecycleStatsTracker({
|
|
1436
|
+
* wireStats: () => ({ requestCount: 10, errorCount: 1 }),
|
|
1437
|
+
* });
|
|
1438
|
+
* tracker.cacheHit();
|
|
1439
|
+
* tracker.cacheHit();
|
|
1440
|
+
* const stats = tracker.snapshot();
|
|
1441
|
+
* console.log(stats.cacheHits); // 2
|
|
1442
|
+
* ```
|
|
1443
|
+
*/
|
|
1444
|
+
snapshot(): LifecycleStats;
|
|
1445
|
+
}
|
|
1446
|
+
|
|
1447
|
+
/**
|
|
1448
|
+
* Configuration for the deduplication middleware.
|
|
1449
|
+
*/
|
|
1450
|
+
type DedupConfig = {
|
|
1451
|
+
/** Custom key function. When provided, overrides default key computation. */
|
|
1452
|
+
dedupKey?: (req: HttpRequest) => string;
|
|
1453
|
+
/** Optional lifecycle observer for dedup hits/misses. */
|
|
1454
|
+
onEvent?: (event: {
|
|
1455
|
+
type: "dedup-hit" | "dedup-miss" | "dedup-active";
|
|
1456
|
+
cacheKey?: string;
|
|
1457
|
+
active?: number;
|
|
1458
|
+
}) => void;
|
|
1459
|
+
};
|
|
1460
|
+
/**
|
|
1461
|
+
* Creates a deduplication middleware that collapses identical in-flight requests
|
|
1462
|
+
* into a single network call.
|
|
1463
|
+
*
|
|
1464
|
+
* For safe HTTP methods (GET, HEAD, OPTIONS), concurrent requests with the same
|
|
1465
|
+
* dedup key share a single underlying network call. All callers receive the same
|
|
1466
|
+
* response or error.
|
|
1467
|
+
*
|
|
1468
|
+
* Non-safe methods (POST, PUT, PATCH, DELETE) pass through without deduplication.
|
|
1469
|
+
*
|
|
1470
|
+
* Supports ref-counted cancellation: when a caller cancels, the refCount is decremented.
|
|
1471
|
+
* When refCount reaches 0, the underlying request is aborted via AbortController.
|
|
1472
|
+
*
|
|
1473
|
+
* @param config - Optional dedup configuration. Provide a `dedupKey` function to override
|
|
1474
|
+
* the default Cache_Key computation. Return an empty string from `dedupKey` to bypass
|
|
1475
|
+
* deduplication for a specific request.
|
|
1476
|
+
* @returns An HttpMiddleware that wraps the next Wire_Client with deduplication logic.
|
|
1477
|
+
* Concurrent safe-method requests sharing the same key resolve to a single network call.
|
|
1478
|
+
*
|
|
1479
|
+
* @example
|
|
1480
|
+
* ```typescript
|
|
1481
|
+
* import { withDedup } from "./dedup";
|
|
1482
|
+
*
|
|
1483
|
+
* // Basic usage with default key computation
|
|
1484
|
+
* const dedupMiddleware = withDedup();
|
|
1485
|
+
*
|
|
1486
|
+
* // With custom key function
|
|
1487
|
+
* const customDedup = withDedup({
|
|
1488
|
+
* dedupKey: (req) => `${req.method}:${req.url}`,
|
|
1489
|
+
* });
|
|
1490
|
+
* ```
|
|
1491
|
+
*/
|
|
1492
|
+
declare function withDedup(config?: DedupConfig): HttpMiddleware;
|
|
1493
|
+
|
|
1494
|
+
/**
|
|
1495
|
+
* Result of a custom cache policy function.
|
|
1496
|
+
*/
|
|
1497
|
+
type CachePolicyResult = {
|
|
1498
|
+
cacheable: boolean;
|
|
1499
|
+
ttlSeconds?: number;
|
|
1500
|
+
};
|
|
1501
|
+
/**
|
|
1502
|
+
* Configuration for the response cache middleware.
|
|
1503
|
+
*/
|
|
1504
|
+
type CacheConfig = {
|
|
1505
|
+
/** Time-to-live in seconds. Default: 60. Range: [1, 86400]. */
|
|
1506
|
+
ttlSeconds?: number;
|
|
1507
|
+
/** Maximum number of cached entries. Default: 1024. Minimum: 1. */
|
|
1508
|
+
maxEntries?: number;
|
|
1509
|
+
/** Enable stale-while-revalidate. Default: false. */
|
|
1510
|
+
staleWhileRevalidate?: boolean;
|
|
1511
|
+
/** Custom cache policy function. */
|
|
1512
|
+
cachePolicy?: (req: HttpRequest, res: HttpWireResponse) => CachePolicyResult;
|
|
1513
|
+
/** Additional headers to include in cache key computation. */
|
|
1514
|
+
cacheRelevantHeaders?: string[];
|
|
1515
|
+
/** Base URL needed for cache key computation. */
|
|
1516
|
+
baseUrl?: string;
|
|
1517
|
+
/** Optional event callback for structured cache failure events. */
|
|
1518
|
+
onEvent?: (event: {
|
|
1519
|
+
type: string;
|
|
1520
|
+
cacheKey?: string;
|
|
1521
|
+
error?: any;
|
|
1522
|
+
}) => void;
|
|
1523
|
+
/** Optional internal lifecycle callback for hit/miss/eviction stats. */
|
|
1524
|
+
onLifecycleEvent?: (event: {
|
|
1525
|
+
type: "cache-hit" | "cache-miss" | "cache-eviction";
|
|
1526
|
+
cacheKey?: string;
|
|
1527
|
+
count?: number;
|
|
1528
|
+
}) => void;
|
|
1529
|
+
};
|
|
1530
|
+
/**
|
|
1531
|
+
* Creates a response cache middleware that stores and serves previously fetched
|
|
1532
|
+
* responses based on configurable cache policies.
|
|
1533
|
+
*
|
|
1534
|
+
* Features:
|
|
1535
|
+
* - LRU eviction when maxEntries is exceeded
|
|
1536
|
+
* - Per-entry TTL with configurable default
|
|
1537
|
+
* - Stale-while-revalidate support
|
|
1538
|
+
* - Custom cache policy function for cacheability and TTL override
|
|
1539
|
+
* - Only caches safe methods (GET, HEAD, OPTIONS) by default
|
|
1540
|
+
* - Exposes `invalidate(key)` and `clear()` for manual cache control
|
|
1541
|
+
*
|
|
1542
|
+
* @param config - Optional cache configuration object.
|
|
1543
|
+
* - `ttlSeconds`: Time-to-live per entry in seconds, clamped to [1, 86400]. Default: 60.
|
|
1544
|
+
* - `maxEntries`: Maximum cached entries, minimum 1. Default: 1024.
|
|
1545
|
+
* - `staleWhileRevalidate`: When true, serves stale entries while refreshing in background. Default: false.
|
|
1546
|
+
* - `cachePolicy`: Custom function to determine cacheability and per-entry TTL override.
|
|
1547
|
+
* - `cacheRelevantHeaders`: Additional headers included in Cache_Key computation.
|
|
1548
|
+
* - `baseUrl`: Base URL for Cache_Key computation.
|
|
1549
|
+
* - `onEvent`: Callback for structured cache events (e.g., revalidation failures).
|
|
1550
|
+
* @returns An object containing:
|
|
1551
|
+
* - `middleware`: An HttpMiddleware that wraps the next Wire_Client with caching logic.
|
|
1552
|
+
* - `invalidate(key)`: Removes a specific entry from the cache by its Cache_Key.
|
|
1553
|
+
* - `clear()`: Removes all entries from the cache.
|
|
1554
|
+
*
|
|
1555
|
+
* @example
|
|
1556
|
+
* ```typescript
|
|
1557
|
+
* import { withCache } from "./responseCache";
|
|
1558
|
+
*
|
|
1559
|
+
* // Basic usage with defaults (60s TTL, 1024 max entries)
|
|
1560
|
+
* const { middleware, invalidate, clear } = withCache();
|
|
1561
|
+
*
|
|
1562
|
+
* // Custom TTL and max entries
|
|
1563
|
+
* const cache = withCache({
|
|
1564
|
+
* ttlSeconds: 300,
|
|
1565
|
+
* maxEntries: 512,
|
|
1566
|
+
* staleWhileRevalidate: true,
|
|
1567
|
+
* });
|
|
1568
|
+
*
|
|
1569
|
+
* // Manually invalidate a cached entry
|
|
1570
|
+
* cache.invalidate("GET|https://api.example.com/users");
|
|
1571
|
+
* ```
|
|
1572
|
+
*/
|
|
1573
|
+
declare function withCache(config?: CacheConfig): {
|
|
1574
|
+
middleware: HttpMiddleware;
|
|
1575
|
+
invalidate: (key: string) => void;
|
|
1576
|
+
clear: () => void;
|
|
1577
|
+
};
|
|
1578
|
+
|
|
1579
|
+
/**
|
|
1580
|
+
* Configuration for the priority scheduler middleware.
|
|
1581
|
+
*/
|
|
1582
|
+
type PriorityConfig = {
|
|
1583
|
+
/** Maximum concurrent requests dispatched to the wire client. Default: 32. */
|
|
1584
|
+
concurrency?: number;
|
|
1585
|
+
/** Queue timeout in ms for priority-queued requests. Default: no timeout. */
|
|
1586
|
+
queueTimeoutMs?: number;
|
|
1587
|
+
/** Optional lifecycle observer for queue events. */
|
|
1588
|
+
onEvent?: (event: {
|
|
1589
|
+
type: "queue-enqueue" | "queue-dispatch";
|
|
1590
|
+
priority: number;
|
|
1591
|
+
}) => void;
|
|
1592
|
+
};
|
|
1593
|
+
/**
|
|
1594
|
+
* Creates a priority scheduler middleware that reorders queued requests
|
|
1595
|
+
* by priority before dispatching them to the downstream wire client.
|
|
1596
|
+
*
|
|
1597
|
+
* When the concurrency limit is not reached, requests are dispatched immediately.
|
|
1598
|
+
* When at capacity, requests are held in a priority queue (lower numeric priority = higher urgency)
|
|
1599
|
+
* and dispatched in priority order as slots become available.
|
|
1600
|
+
*
|
|
1601
|
+
* Supports:
|
|
1602
|
+
* - Priority extraction from request options (default 5, clamped to 0-9)
|
|
1603
|
+
* - Queue timeout via `queueTimeoutMs` config (produces PoolTimeout error)
|
|
1604
|
+
* - Cancellation: removes from queue on abort signal
|
|
1605
|
+
* - Stats tracking via `queueDepth` getter
|
|
1606
|
+
*
|
|
1607
|
+
* @param config - Optional priority scheduler configuration.
|
|
1608
|
+
* - `concurrency`: Maximum concurrent requests dispatched to the Wire_Client.
|
|
1609
|
+
* Must be a positive integer (>= 1). Default: 32.
|
|
1610
|
+
* - `queueTimeoutMs`: Maximum time in milliseconds a request may wait in the queue
|
|
1611
|
+
* before receiving a PoolTimeout error. Must be a positive integer (>= 1) or undefined
|
|
1612
|
+
* for no timeout. Default: undefined (no timeout).
|
|
1613
|
+
* @returns An HttpMiddleware (with an additional `queueDepth()` method) that wraps the
|
|
1614
|
+
* next Wire_Client with priority-based scheduling. Requests carry a priority level
|
|
1615
|
+
* (integer from 0 to 9, where 0 is highest urgency). Default priority is 5.
|
|
1616
|
+
*
|
|
1617
|
+
* @example
|
|
1618
|
+
* ```typescript
|
|
1619
|
+
* import { withPriority } from "./priorityScheduler";
|
|
1620
|
+
*
|
|
1621
|
+
* // Basic usage with default concurrency (32)
|
|
1622
|
+
* const priorityMiddleware = withPriority();
|
|
1623
|
+
*
|
|
1624
|
+
* // Limit concurrency and set queue timeout
|
|
1625
|
+
* const scheduler = withPriority({
|
|
1626
|
+
* concurrency: 4,
|
|
1627
|
+
* queueTimeoutMs: 5000,
|
|
1628
|
+
* });
|
|
1629
|
+
*
|
|
1630
|
+
* // Check current queue depth
|
|
1631
|
+
* const depth = scheduler.queueDepth();
|
|
1632
|
+
* ```
|
|
1633
|
+
*/
|
|
1634
|
+
declare function withPriority(config?: PriorityConfig): HttpMiddleware & {
|
|
1635
|
+
queueDepth: () => number;
|
|
1636
|
+
};
|
|
1637
|
+
|
|
1638
|
+
/**
|
|
1639
|
+
* Supported content encoding algorithms.
|
|
1640
|
+
*/
|
|
1641
|
+
type SupportedEncoding = "gzip" | "br" | "deflate";
|
|
1642
|
+
/**
|
|
1643
|
+
* All supported encodings in default preference order (Brotli first).
|
|
1644
|
+
*/
|
|
1645
|
+
declare const SUPPORTED_ENCODINGS: readonly SupportedEncoding[];
|
|
1646
|
+
/**
|
|
1647
|
+
* Configuration for the compression middleware.
|
|
1648
|
+
*/
|
|
1649
|
+
type CompressionConfig = {
|
|
1650
|
+
/**
|
|
1651
|
+
* Enabled encodings in preference order.
|
|
1652
|
+
* Default: ["br", "gzip", "deflate"]
|
|
1653
|
+
*/
|
|
1654
|
+
encodings?: SupportedEncoding[];
|
|
1655
|
+
};
|
|
1656
|
+
type RequestCompressionConfig = {
|
|
1657
|
+
/**
|
|
1658
|
+
* Encoding to apply to outbound request bodies.
|
|
1659
|
+
* Default: "gzip".
|
|
1660
|
+
*/
|
|
1661
|
+
encoding?: SupportedEncoding;
|
|
1662
|
+
/**
|
|
1663
|
+
* Minimum uncompressed body size in bytes before compression is attempted.
|
|
1664
|
+
* Default: 1024.
|
|
1665
|
+
*/
|
|
1666
|
+
minBytes?: number;
|
|
1667
|
+
/**
|
|
1668
|
+
* HTTP methods eligible for request compression.
|
|
1669
|
+
* Default: ["POST", "PUT", "PATCH"].
|
|
1670
|
+
*/
|
|
1671
|
+
methods?: readonly string[];
|
|
1672
|
+
};
|
|
1673
|
+
/**
|
|
1674
|
+
* Frozen snapshot of compression statistics.
|
|
1675
|
+
*/
|
|
1676
|
+
type CompressionStats = {
|
|
1677
|
+
/** Responses decompressed per encoding type */
|
|
1678
|
+
readonly decompressed: Readonly<Record<SupportedEncoding, number>>;
|
|
1679
|
+
/** Total compressed bytes received */
|
|
1680
|
+
readonly compressedBytes: number;
|
|
1681
|
+
/** Total decompressed bytes produced */
|
|
1682
|
+
readonly decompressedBytes: number;
|
|
1683
|
+
/** Responses that bypassed decompression */
|
|
1684
|
+
readonly passthroughCount: number;
|
|
1685
|
+
/** Decompression errors encountered */
|
|
1686
|
+
readonly errorCount: number;
|
|
1687
|
+
/** Unsupported encoding warnings */
|
|
1688
|
+
readonly unsupportedEncodingCount: number;
|
|
1689
|
+
};
|
|
1690
|
+
type RequestCompressionStats = {
|
|
1691
|
+
readonly compressedCount: number;
|
|
1692
|
+
readonly skippedCount: number;
|
|
1693
|
+
readonly errorCount: number;
|
|
1694
|
+
readonly originalBytes: number;
|
|
1695
|
+
readonly compressedBytes: number;
|
|
1696
|
+
};
|
|
1697
|
+
/**
|
|
1698
|
+
* Result of a decompression attempt.
|
|
1699
|
+
*/
|
|
1700
|
+
type DecompressResult = {
|
|
1701
|
+
ok: true;
|
|
1702
|
+
data: Buffer;
|
|
1703
|
+
} | {
|
|
1704
|
+
ok: false;
|
|
1705
|
+
error: string;
|
|
1706
|
+
};
|
|
1707
|
+
/**
|
|
1708
|
+
* Abstraction over zlib / noop decompression.
|
|
1709
|
+
*/
|
|
1710
|
+
interface Decompressor {
|
|
1711
|
+
readonly isPassthrough: boolean;
|
|
1712
|
+
decompress(data: Buffer | Uint8Array, encoding: SupportedEncoding): DecompressResult;
|
|
1713
|
+
}
|
|
1714
|
+
/**
|
|
1715
|
+
* Result of makeCompressionMiddleware — the middleware plus a stats accessor.
|
|
1716
|
+
*/
|
|
1717
|
+
type CompressionMiddlewareResult = {
|
|
1718
|
+
middleware: HttpMiddleware;
|
|
1719
|
+
stats: () => CompressionStats;
|
|
1720
|
+
};
|
|
1721
|
+
type RequestCompressionMiddlewareResult = {
|
|
1722
|
+
middleware: HttpMiddleware;
|
|
1723
|
+
stats: () => RequestCompressionStats;
|
|
1724
|
+
};
|
|
1725
|
+
|
|
1726
|
+
/**
|
|
1727
|
+
* Creates the compression middleware with optional configuration.
|
|
1728
|
+
*
|
|
1729
|
+
* The middleware:
|
|
1730
|
+
* 1. Injects Accept-Encoding header on outgoing requests (if missing)
|
|
1731
|
+
* 2. Decompresses response bodies based on Content-Encoding header
|
|
1732
|
+
* 3. Tracks compression statistics
|
|
1733
|
+
*/
|
|
1734
|
+
declare function makeCompressionMiddleware(config?: CompressionConfig): CompressionMiddlewareResult;
|
|
1735
|
+
declare const makeResponseCompressionMiddleware: typeof makeCompressionMiddleware;
|
|
1736
|
+
declare function makeRequestCompressionMiddleware(config?: RequestCompressionConfig): RequestCompressionMiddlewareResult;
|
|
1737
|
+
|
|
1738
|
+
type RequestBatchingEvent = {
|
|
1739
|
+
type: "batch-enqueue";
|
|
1740
|
+
key: string;
|
|
1741
|
+
size: number;
|
|
1742
|
+
request: HttpRequest;
|
|
1743
|
+
} | {
|
|
1744
|
+
type: "batch-flush";
|
|
1745
|
+
key: string;
|
|
1746
|
+
size: number;
|
|
1747
|
+
reason: "size" | "timer" | "manual";
|
|
1748
|
+
} | {
|
|
1749
|
+
type: "batch-cancel";
|
|
1750
|
+
key: string;
|
|
1751
|
+
remaining: number;
|
|
1752
|
+
} | {
|
|
1753
|
+
type: "batch-error";
|
|
1754
|
+
key: string;
|
|
1755
|
+
size: number;
|
|
1756
|
+
error: HttpError;
|
|
1757
|
+
};
|
|
1758
|
+
type RequestBatchingConfig = {
|
|
1759
|
+
/**
|
|
1760
|
+
* Groups requests into independent batches. Return undefined/null/empty string
|
|
1761
|
+
* to bypass batching for a request.
|
|
1762
|
+
*
|
|
1763
|
+
* Default: `${method}:${url}`.
|
|
1764
|
+
*/
|
|
1765
|
+
key?: (req: HttpRequest) => string | undefined | null;
|
|
1766
|
+
/** Extra predicate for per-request opt-in/out. Default: batch all keyed requests. */
|
|
1767
|
+
shouldBatch?: (req: HttpRequest) => boolean;
|
|
1768
|
+
/** Maximum requests per batch. Default: 16. */
|
|
1769
|
+
maxBatchSize?: number;
|
|
1770
|
+
/** Maximum time to wait before flushing a non-full batch. Default: 5ms. */
|
|
1771
|
+
maxWaitMs?: number;
|
|
1772
|
+
/** Builds the actual wire request sent to the batch endpoint. */
|
|
1773
|
+
encode: (requests: readonly HttpRequest[]) => HttpRequest;
|
|
1774
|
+
/**
|
|
1775
|
+
* Splits the batch endpoint response back into one response per original
|
|
1776
|
+
* request. The returned array must have the same length and order.
|
|
1777
|
+
*/
|
|
1778
|
+
decode: (response: HttpWireResponse, requests: readonly HttpRequest[]) => readonly HttpWireResponse[];
|
|
1779
|
+
/** Optional observability hook. Exceptions are swallowed. */
|
|
1780
|
+
onEvent?: (event: RequestBatchingEvent) => void;
|
|
1781
|
+
};
|
|
1782
|
+
declare function withRequestBatching(config: RequestBatchingConfig): HttpMiddleware;
|
|
1783
|
+
|
|
1784
|
+
type ConnectionPrewarmAttempt = {
|
|
1785
|
+
url: string;
|
|
1786
|
+
origin: string;
|
|
1787
|
+
ok: boolean;
|
|
1788
|
+
status?: number;
|
|
1789
|
+
ms: number;
|
|
1790
|
+
error?: HttpError;
|
|
1791
|
+
};
|
|
1792
|
+
type ConnectionPrewarmResult = {
|
|
1793
|
+
attempted: number;
|
|
1794
|
+
warmed: number;
|
|
1795
|
+
failed: number;
|
|
1796
|
+
skipped: number;
|
|
1797
|
+
attempts: readonly ConnectionPrewarmAttempt[];
|
|
1798
|
+
};
|
|
1799
|
+
type ConnectionPrewarmEvent = {
|
|
1800
|
+
type: "prewarm-start";
|
|
1801
|
+
url: string;
|
|
1802
|
+
origin: string;
|
|
1803
|
+
} | {
|
|
1804
|
+
type: "prewarm-success";
|
|
1805
|
+
url: string;
|
|
1806
|
+
origin: string;
|
|
1807
|
+
status: number;
|
|
1808
|
+
ms: number;
|
|
1809
|
+
} | {
|
|
1810
|
+
type: "prewarm-failure";
|
|
1811
|
+
url: string;
|
|
1812
|
+
origin: string;
|
|
1813
|
+
error: HttpError;
|
|
1814
|
+
ms: number;
|
|
1815
|
+
};
|
|
1816
|
+
type ConnectionPrewarmConfig = {
|
|
1817
|
+
baseUrl?: string;
|
|
1818
|
+
urls?: readonly string[];
|
|
1819
|
+
origins?: readonly string[];
|
|
1820
|
+
path?: string;
|
|
1821
|
+
method?: Extract<HttpMethod, "HEAD" | "GET" | "OPTIONS">;
|
|
1822
|
+
headers?: Record<string, string>;
|
|
1823
|
+
timeoutMs?: number;
|
|
1824
|
+
failFast?: boolean;
|
|
1825
|
+
fetchImpl?: typeof fetch;
|
|
1826
|
+
onEvent?: (event: ConnectionPrewarmEvent) => void;
|
|
1827
|
+
};
|
|
1828
|
+
type ConnectionPrewarmingMiddlewareConfig = ConnectionPrewarmConfig & {
|
|
1829
|
+
once?: boolean;
|
|
1830
|
+
shouldPrewarm?: (req: HttpRequest) => boolean;
|
|
1831
|
+
target?: (req: HttpRequest) => string | undefined | null;
|
|
152
1832
|
};
|
|
1833
|
+
declare function prewarmConnections(config?: ConnectionPrewarmConfig): Async<unknown, HttpError, ConnectionPrewarmResult>;
|
|
1834
|
+
declare const prewarmHttpConnections: typeof prewarmConnections;
|
|
1835
|
+
declare function withConnectionPrewarming(config?: ConnectionPrewarmingMiddlewareConfig): HttpMiddleware;
|
|
153
1836
|
|
|
154
|
-
export { type Dx, type HttpClient, type HttpClientFn, type HttpClientStream, type HttpError, type HttpInit, type HttpMeta, type HttpMethod, type HttpMiddleware, type HttpRequest, type HttpResponse, type HttpResponseWithMeta, type HttpWireResponse, type HttpWireResponseStream, type HttpWireWithMeta, type MakeHttpConfig, decorate, httpClient, httpClientStream, httpClientWithMeta, makeHttp, makeHttpStream, normalizeHeadersInit, withMiddleware, withRetryStream };
|
|
1837
|
+
export { type CacheConfig$1 as CacheConfig, type CacheKeyComponents, type CachePolicyResult$1 as CachePolicyResult, type CompressionConfig, type CompressionMiddlewareResult, type CompressionStats, type ConnectionPrewarmAttempt, type ConnectionPrewarmConfig, type ConnectionPrewarmEvent, type ConnectionPrewarmResult, type ConnectionPrewarmingMiddlewareConfig, DEFAULT_CACHE_RELEVANT_HEADERS, type DecompressResult, type Decompressor, type DedupConfig$1 as DedupConfig, type Dx, type HttpBody, type HttpCircuitBreakerConfig, type HttpClient, type HttpClientFn, type HttpClientStats, type HttpClientStream, type HttpClientStreamFn, HttpConcurrencyPool, type HttpError, type HttpInit, type HttpMeta, type HttpMethod, type HttpMiddleware, type HttpPoolConfig, type HttpPoolKeyResolver, type HttpPoolKeyStats, type HttpPoolLease, type HttpPoolStats, type HttpRequest, type HttpResponse, type HttpResponseWithMeta, type HttpWireResponse, type HttpWireResponseStream, type HttpWireWithMeta, type JsonValidator, LRUCache, type LRUCacheConfig, type LifecycleClient, type LifecycleClientConfig, type LifecycleEvent, type LifecycleEventType, type LifecycleRequestOptions, type LifecycleStats, LifecycleStatsTracker, type LogEvent, type MakeHttpConfig, type PerRequestRetryOverride, type PriorityConfig$1 as PriorityConfig, PriorityQueue, type PriorityQueueEntry, type RequestBatchingConfig, type RequestBatchingEvent, type RequestCompressionConfig, type RequestCompressionMiddlewareResult, type RequestCompressionStats, type RetryEvent, type RetryPolicy, SEPARATOR, SUPPORTED_ENCODINGS, type SupportedEncoding, type ValidationError, backoffDelayMs, clampPriority, computeCacheKey, decorate, defaultRetryOnError, defaultRetryOnStatus, defaultRetryableMethods, httpClient, httpClientStream, httpClientWithMeta, makeCompressionMiddleware, makeHttp, makeHttpClient, makeHttpStream, makeLifecycleClient, makeRequestCompressionMiddleware, makeResponseCompressionMiddleware, normalizeHeadersInit, normalizeRetryBudget, parseCacheKey, prewarmConnections, prewarmHttpConnections, resolveHttpPoolKey, retryAfterMs, validatedJson, withAuth, withCache, withCircuitBreaker, withConnectionPrewarming, withDedup, withLogging, withMiddleware, withPriority, withRequestBatching, withResponseTransform, withRetry, withRetryStream, withTracing };
|