@providerprotocol/ai 0.0.21 → 0.0.22
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/anthropic/index.d.ts +1 -1
- package/dist/anthropic/index.js +100 -29
- package/dist/anthropic/index.js.map +1 -1
- package/dist/{chunk-Y3GBJNA2.js → chunk-7WYBJPJJ.js} +2 -2
- package/dist/chunk-I2VHCGQE.js +49 -0
- package/dist/chunk-I2VHCGQE.js.map +1 -0
- package/dist/{chunk-SKY2JLA7.js → chunk-MKDLXV4O.js} +1 -1
- package/dist/chunk-MKDLXV4O.js.map +1 -0
- package/dist/{chunk-Z7RBRCRN.js → chunk-NWS5IKNR.js} +37 -11
- package/dist/chunk-NWS5IKNR.js.map +1 -0
- package/dist/{chunk-EDENPF3E.js → chunk-RFWLEFAB.js} +96 -42
- package/dist/chunk-RFWLEFAB.js.map +1 -0
- package/dist/{chunk-Z4ILICF5.js → chunk-RS7C25LS.js} +35 -10
- package/dist/chunk-RS7C25LS.js.map +1 -0
- package/dist/google/index.d.ts +20 -6
- package/dist/google/index.js +261 -65
- package/dist/google/index.js.map +1 -1
- package/dist/http/index.d.ts +3 -3
- package/dist/http/index.js +4 -4
- package/dist/index.d.ts +7 -5
- package/dist/index.js +286 -119
- package/dist/index.js.map +1 -1
- package/dist/ollama/index.d.ts +1 -1
- package/dist/ollama/index.js +66 -12
- package/dist/ollama/index.js.map +1 -1
- package/dist/openai/index.d.ts +1 -1
- package/dist/openai/index.js +183 -43
- package/dist/openai/index.js.map +1 -1
- package/dist/openrouter/index.d.ts +1 -1
- package/dist/openrouter/index.js +161 -31
- package/dist/openrouter/index.js.map +1 -1
- package/dist/{provider-DGQHYE6I.d.ts → provider-DWEAzeM5.d.ts} +11 -1
- package/dist/proxy/index.d.ts +2 -2
- package/dist/proxy/index.js +171 -12
- package/dist/proxy/index.js.map +1 -1
- package/dist/{retry-Pcs3hnbu.d.ts → retry-DmPmqZL6.d.ts} +11 -2
- package/dist/{stream-Di9acos2.d.ts → stream-DbkLOIbJ.d.ts} +15 -5
- package/dist/xai/index.d.ts +1 -1
- package/dist/xai/index.js +139 -30
- package/dist/xai/index.js.map +1 -1
- package/package.json +1 -1
- package/dist/chunk-EDENPF3E.js.map +0 -1
- package/dist/chunk-SKY2JLA7.js.map +0 -1
- package/dist/chunk-Z4ILICF5.js.map +0 -1
- package/dist/chunk-Z7RBRCRN.js.map +0 -1
- /package/dist/{chunk-Y3GBJNA2.js.map → chunk-7WYBJPJJ.js.map} +0 -0
|
@@ -1,12 +1,25 @@
|
|
|
1
1
|
// src/http/sse.ts
|
|
2
|
+
var MAX_SSE_BUFFER_CHARS = 1024 * 1024;
|
|
2
3
|
async function* parseSSEStream(body) {
|
|
3
4
|
const reader = body.getReader();
|
|
4
5
|
const decoder = new TextDecoder();
|
|
5
6
|
let buffer = "";
|
|
7
|
+
const appendToBuffer = (chunk) => {
|
|
8
|
+
if (buffer.length + chunk.length > MAX_SSE_BUFFER_CHARS) {
|
|
9
|
+
throw new Error(
|
|
10
|
+
`SSE buffer exceeded maximum size (${MAX_SSE_BUFFER_CHARS} chars)`
|
|
11
|
+
);
|
|
12
|
+
}
|
|
13
|
+
buffer += chunk;
|
|
14
|
+
};
|
|
6
15
|
try {
|
|
7
16
|
while (true) {
|
|
8
17
|
const { done, value } = await reader.read();
|
|
9
18
|
if (done) {
|
|
19
|
+
const tail = decoder.decode();
|
|
20
|
+
if (tail) {
|
|
21
|
+
appendToBuffer(tail);
|
|
22
|
+
}
|
|
10
23
|
if (buffer.trim()) {
|
|
11
24
|
const event = parseSSEEvent(buffer);
|
|
12
25
|
if (event !== null && event !== void 0) {
|
|
@@ -15,7 +28,10 @@ async function* parseSSEStream(body) {
|
|
|
15
28
|
}
|
|
16
29
|
break;
|
|
17
30
|
}
|
|
18
|
-
|
|
31
|
+
const chunk = decoder.decode(value, { stream: true });
|
|
32
|
+
if (chunk) {
|
|
33
|
+
appendToBuffer(chunk);
|
|
34
|
+
}
|
|
19
35
|
const events = buffer.split(/\r?\n\r?\n/);
|
|
20
36
|
buffer = events.pop() ?? "";
|
|
21
37
|
for (const eventText of events) {
|
|
@@ -38,16 +54,22 @@ function parseSSEEvent(eventText) {
|
|
|
38
54
|
let data = "";
|
|
39
55
|
let eventType = "";
|
|
40
56
|
for (const line of lines) {
|
|
41
|
-
const
|
|
42
|
-
if (
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
57
|
+
const normalizedLine = line.endsWith("\r") ? line.slice(0, -1) : line;
|
|
58
|
+
if (normalizedLine.startsWith("event:")) {
|
|
59
|
+
let value = normalizedLine.slice(6);
|
|
60
|
+
if (value.startsWith(" ")) value = value.slice(1);
|
|
61
|
+
eventType = value;
|
|
62
|
+
} else if (normalizedLine.startsWith("data:")) {
|
|
63
|
+
let value = normalizedLine.slice(5);
|
|
64
|
+
if (value.startsWith(" ")) value = value.slice(1);
|
|
65
|
+
data += (data ? "\n" : "") + value;
|
|
66
|
+
} else if (normalizedLine.startsWith(":")) {
|
|
48
67
|
continue;
|
|
49
|
-
} else
|
|
50
|
-
|
|
68
|
+
} else {
|
|
69
|
+
const trimmedStart = normalizedLine.trimStart();
|
|
70
|
+
if (trimmedStart.startsWith("{") || trimmedStart.startsWith("[")) {
|
|
71
|
+
data += (data ? "\n" : "") + trimmedStart;
|
|
72
|
+
}
|
|
51
73
|
}
|
|
52
74
|
}
|
|
53
75
|
if (!data) {
|
|
@@ -78,6 +100,10 @@ async function* parseSimpleTextStream(body) {
|
|
|
78
100
|
yield text;
|
|
79
101
|
}
|
|
80
102
|
}
|
|
103
|
+
const remaining = decoder.decode();
|
|
104
|
+
if (remaining) {
|
|
105
|
+
yield remaining;
|
|
106
|
+
}
|
|
81
107
|
} finally {
|
|
82
108
|
reader.releaseLock();
|
|
83
109
|
}
|
|
@@ -87,4 +113,4 @@ export {
|
|
|
87
113
|
parseSSEStream,
|
|
88
114
|
parseSimpleTextStream
|
|
89
115
|
};
|
|
90
|
-
//# sourceMappingURL=chunk-
|
|
116
|
+
//# sourceMappingURL=chunk-NWS5IKNR.js.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"sources":["../src/http/sse.ts"],"sourcesContent":["/**\n * Server-Sent Events (SSE) stream parsing utilities.\n * @module http/sse\n */\n\nconst MAX_SSE_BUFFER_CHARS = 1024 * 1024;\n\n/**\n * Parses a Server-Sent Events stream into JSON objects.\n *\n * This async generator handles the standard SSE wire format:\n * - Lines prefixed with \"data:\" contain event data\n * - Lines prefixed with \"event:\" specify event types\n * - Lines prefixed with \":\" are comments (used for keep-alive)\n * - Events are separated by double newlines\n * - Stream terminates on \"[DONE]\" message (OpenAI convention)\n *\n * Also handles non-standard formats used by some providers:\n * - Raw JSON without \"data:\" prefix (Google)\n * - Multi-line data fields\n *\n * @param body - ReadableStream from fetch response body\n * @yields Parsed JSON objects from each SSE event\n *\n * @example\n * ```typescript\n * const response = await doStreamFetch(url, init, config, 'openai', 'llm');\n *\n * for await (const event of parseSSEStream(response.body!)) {\n * // event is parsed JSON from each SSE data field\n * const chunk = event as OpenAIStreamChunk;\n * const delta = chunk.choices[0]?.delta?.content;\n * if (delta) {\n * process.stdout.write(delta);\n * }\n * }\n * ```\n */\nexport async function* parseSSEStream(\n body: ReadableStream<Uint8Array>\n): AsyncGenerator<unknown, void, unknown> {\n const reader = body.getReader();\n const decoder = new TextDecoder();\n let buffer = '';\n const appendToBuffer = (chunk: string): void => {\n if (buffer.length + chunk.length > MAX_SSE_BUFFER_CHARS) {\n throw new Error(\n `SSE buffer exceeded maximum size (${MAX_SSE_BUFFER_CHARS} chars)`\n );\n }\n buffer += chunk;\n };\n\n try {\n while (true) {\n const { done, value } = await reader.read();\n\n if (done) {\n const tail = decoder.decode();\n if (tail) {\n appendToBuffer(tail);\n }\n // Process any remaining data in buffer\n if (buffer.trim()) {\n const event = parseSSEEvent(buffer);\n if (event !== null && event !== undefined) {\n yield event;\n }\n }\n break;\n }\n\n const chunk = decoder.decode(value, { stream: true });\n if (chunk) {\n appendToBuffer(chunk);\n }\n\n // Process complete events (separated by double newlines or \\r\\n\\r\\n)\n const events = buffer.split(/\\r?\\n\\r?\\n/);\n\n // Keep the last partial event in the buffer\n buffer = events.pop() ?? '';\n\n for (const eventText of events) {\n if (!eventText.trim()) continue;\n\n const event = parseSSEEvent(eventText);\n if (event === 'DONE') {\n return;\n }\n if (event !== null && event !== undefined) {\n yield event;\n }\n }\n }\n } finally {\n reader.releaseLock();\n }\n}\n\n/**\n * Parses a single SSE event block into a JSON object.\n *\n * Handles the following line prefixes:\n * - \"data:\" - Event data (multiple data lines are concatenated)\n * - \"event:\" - Event type (added to result as _eventType)\n * - \":\" - Comment (ignored, often used for keep-alive)\n * - Raw JSON starting with { or [ (provider-specific fallback)\n *\n * @param eventText - Raw text of a single SSE event block\n * @returns Parsed JSON object, 'DONE' for termination signal, or null for invalid/empty events\n */\nfunction parseSSEEvent(eventText: string): unknown | 'DONE' | null {\n const lines = eventText.split('\\n');\n let data = '';\n let eventType = '';\n\n for (const line of lines) {\n const normalizedLine = line.endsWith('\\r') ? line.slice(0, -1) : line;\n if (normalizedLine.startsWith('event:')) {\n let value = normalizedLine.slice(6);\n if (value.startsWith(' ')) value = value.slice(1);\n eventType = value;\n } else if (normalizedLine.startsWith('data:')) {\n let value = normalizedLine.slice(5);\n if (value.startsWith(' ')) value = value.slice(1);\n data += (data ? '\\n' : '') + value;\n } else if (normalizedLine.startsWith(':')) {\n continue;\n } else {\n const trimmedStart = normalizedLine.trimStart();\n if (trimmedStart.startsWith('{') || trimmedStart.startsWith('[')) {\n data += (data ? '\\n' : '') + trimmedStart;\n }\n }\n }\n\n if (!data) {\n return null;\n }\n\n if (data === '[DONE]') {\n return 'DONE';\n }\n\n try {\n const parsed = JSON.parse(data);\n\n if (eventType) {\n return { _eventType: eventType, ...parsed };\n }\n\n return parsed;\n } catch {\n return null;\n }\n}\n\n/**\n * Parses a simple text stream without SSE formatting.\n *\n * This is a simpler alternative to {@link parseSSEStream} for providers\n * that stream raw text deltas without SSE event wrappers. Each chunk\n * from the response body is decoded and yielded as-is.\n *\n * Use this for:\n * - Plain text streaming responses\n * - Providers with custom streaming formats\n * - Testing and debugging stream handling\n *\n * @param body - ReadableStream from fetch response body\n * @yields Decoded text strings from each stream chunk\n *\n * @example\n * ```typescript\n * const response = await doStreamFetch(url, init, config, 'custom', 'llm');\n *\n * for await (const text of parseSimpleTextStream(response.body!)) {\n * process.stdout.write(text);\n * }\n * ```\n */\nexport async function* parseSimpleTextStream(\n body: ReadableStream<Uint8Array>\n): AsyncGenerator<string, void, unknown> {\n const reader = body.getReader();\n const decoder = new TextDecoder();\n\n try {\n while (true) {\n const { done, value } = await reader.read();\n\n if (done) break;\n\n const text = decoder.decode(value, { stream: true });\n if (text) {\n yield text;\n }\n }\n const remaining = decoder.decode();\n if (remaining) {\n yield remaining;\n }\n } finally {\n reader.releaseLock();\n }\n}\n"],"mappings":";AAKA,IAAM,uBAAuB,OAAO;AAiCpC,gBAAuB,eACrB,MACwC;AACxC,QAAM,SAAS,KAAK,UAAU;AAC9B,QAAM,UAAU,IAAI,YAAY;AAChC,MAAI,SAAS;AACb,QAAM,iBAAiB,CAAC,UAAwB;AAC9C,QAAI,OAAO,SAAS,MAAM,SAAS,sBAAsB;AACvD,YAAM,IAAI;AAAA,QACR,qCAAqC,oBAAoB;AAAA,MAC3D;AAAA,IACF;AACA,cAAU;AAAA,EACZ;AAEA,MAAI;AACF,WAAO,MAAM;AACX,YAAM,EAAE,MAAM,MAAM,IAAI,MAAM,OAAO,KAAK;AAE1C,UAAI,MAAM;AACR,cAAM,OAAO,QAAQ,OAAO;AAC5B,YAAI,MAAM;AACR,yBAAe,IAAI;AAAA,QACrB;AAEA,YAAI,OAAO,KAAK,GAAG;AACjB,gBAAM,QAAQ,cAAc,MAAM;AAClC,cAAI,UAAU,QAAQ,UAAU,QAAW;AACzC,kBAAM;AAAA,UACR;AAAA,QACF;AACA;AAAA,MACF;AAEA,YAAM,QAAQ,QAAQ,OAAO,OAAO,EAAE,QAAQ,KAAK,CAAC;AACpD,UAAI,OAAO;AACT,uBAAe,KAAK;AAAA,MACtB;AAGA,YAAM,SAAS,OAAO,MAAM,YAAY;AAGxC,eAAS,OAAO,IAAI,KAAK;AAEzB,iBAAW,aAAa,QAAQ;AAC9B,YAAI,CAAC,UAAU,KAAK,EAAG;AAEvB,cAAM,QAAQ,cAAc,SAAS;AACrC,YAAI,UAAU,QAAQ;AACpB;AAAA,QACF;AACA,YAAI,UAAU,QAAQ,UAAU,QAAW;AACzC,gBAAM;AAAA,QACR;AAAA,MACF;AAAA,IACF;AAAA,EACF,UAAE;AACA,WAAO,YAAY;AAAA,EACrB;AACF;AAcA,SAAS,cAAc,WAA4C;AACjE,QAAM,QAAQ,UAAU,MAAM,IAAI;AAClC,MAAI,OAAO;AACX,MAAI,YAAY;AAEhB,aAAW,QAAQ,OAAO;AACxB,UAAM,iBAAiB,KAAK,SAAS,IAAI,IAAI,KAAK,MAAM,GAAG,EAAE,IAAI;AACjE,QAAI,eAAe,WAAW,QAAQ,GAAG;AACvC,UAAI,QAAQ,eAAe,MAAM,CAAC;AAClC,UAAI,MAAM,WAAW,GAAG,EAAG,SAAQ,MAAM,MAAM,CAAC;AAChD,kBAAY;AAAA,IACd,WAAW,eAAe,WAAW,OAAO,GAAG;AAC7C,UAAI,QAAQ,eAAe,MAAM,CAAC;AAClC,UAAI,MAAM,WAAW,GAAG,EAAG,SAAQ,MAAM,MAAM,CAAC;AAChD,eAAS,OAAO,OAAO,MAAM;AAAA,IAC/B,WAAW,eAAe,WAAW,GAAG,GAAG;AACzC;AAAA,IACF,OAAO;AACL,YAAM,eAAe,eAAe,UAAU;AAC9C,UAAI,aAAa,WAAW,GAAG,KAAK,aAAa,WAAW,GAAG,GAAG;AAChE,iBAAS,OAAO,OAAO,MAAM;AAAA,MAC/B;AAAA,IACF;AAAA,EACF;AAEA,MAAI,CAAC,MAAM;AACT,WAAO;AAAA,EACT;AAEA,MAAI,SAAS,UAAU;AACrB,WAAO;AAAA,EACT;AAEA,MAAI;AACF,UAAM,SAAS,KAAK,MAAM,IAAI;AAE9B,QAAI,WAAW;AACb,aAAO,EAAE,YAAY,WAAW,GAAG,OAAO;AAAA,IAC5C;AAEA,WAAO;AAAA,EACT,QAAQ;AACN,WAAO;AAAA,EACT;AACF;AA0BA,gBAAuB,sBACrB,MACuC;AACvC,QAAM,SAAS,KAAK,UAAU;AAC9B,QAAM,UAAU,IAAI,YAAY;AAEhC,MAAI;AACF,WAAO,MAAM;AACX,YAAM,EAAE,MAAM,MAAM,IAAI,MAAM,OAAO,KAAK;AAE1C,UAAI,KAAM;AAEV,YAAM,OAAO,QAAQ,OAAO,OAAO,EAAE,QAAQ,KAAK,CAAC;AACnD,UAAI,MAAM;AACR,cAAM;AAAA,MACR;AAAA,IACF;AACA,UAAM,YAAY,QAAQ,OAAO;AACjC,QAAI,WAAW;AACb,YAAM;AAAA,IACR;AAAA,EACF,UAAE;AACA,WAAO,YAAY;AAAA,EACrB;AACF;","names":[]}
|
|
@@ -65,11 +65,30 @@ var UPPError = class _UPPError extends Error {
|
|
|
65
65
|
}
|
|
66
66
|
};
|
|
67
67
|
|
|
68
|
+
// src/utils/error.ts
|
|
69
|
+
function toError(value) {
|
|
70
|
+
if (value instanceof Error) {
|
|
71
|
+
return value;
|
|
72
|
+
}
|
|
73
|
+
if (typeof value === "string") {
|
|
74
|
+
return new Error(value);
|
|
75
|
+
}
|
|
76
|
+
if (typeof value === "object" && value !== null && "message" in value) {
|
|
77
|
+
const message = value.message;
|
|
78
|
+
if (typeof message === "string") {
|
|
79
|
+
return new Error(message);
|
|
80
|
+
}
|
|
81
|
+
}
|
|
82
|
+
return new Error(String(value));
|
|
83
|
+
}
|
|
84
|
+
|
|
68
85
|
// src/http/errors.ts
|
|
69
86
|
function statusToErrorCode(status) {
|
|
70
87
|
switch (status) {
|
|
71
88
|
case 400:
|
|
72
89
|
return "INVALID_REQUEST";
|
|
90
|
+
case 402:
|
|
91
|
+
return "QUOTA_EXCEEDED";
|
|
73
92
|
case 401:
|
|
74
93
|
case 403:
|
|
75
94
|
return "AUTHENTICATION_FAILED";
|
|
@@ -77,8 +96,14 @@ function statusToErrorCode(status) {
|
|
|
77
96
|
return "MODEL_NOT_FOUND";
|
|
78
97
|
case 408:
|
|
79
98
|
return "TIMEOUT";
|
|
99
|
+
case 409:
|
|
100
|
+
return "INVALID_REQUEST";
|
|
101
|
+
case 422:
|
|
102
|
+
return "INVALID_REQUEST";
|
|
80
103
|
case 413:
|
|
81
104
|
return "CONTEXT_LENGTH_EXCEEDED";
|
|
105
|
+
case 451:
|
|
106
|
+
return "CONTENT_FILTERED";
|
|
82
107
|
case 429:
|
|
83
108
|
return "RATE_LIMITED";
|
|
84
109
|
case 500:
|
|
@@ -93,6 +118,7 @@ function statusToErrorCode(status) {
|
|
|
93
118
|
async function normalizeHttpError(response, provider, modality) {
|
|
94
119
|
const code = statusToErrorCode(response.status);
|
|
95
120
|
let message = `HTTP ${response.status}: ${response.statusText}`;
|
|
121
|
+
let bodyReadError;
|
|
96
122
|
try {
|
|
97
123
|
const body = await response.text();
|
|
98
124
|
if (body) {
|
|
@@ -108,9 +134,10 @@ async function normalizeHttpError(response, provider, modality) {
|
|
|
108
134
|
}
|
|
109
135
|
}
|
|
110
136
|
}
|
|
111
|
-
} catch {
|
|
137
|
+
} catch (error) {
|
|
138
|
+
bodyReadError = toError(error);
|
|
112
139
|
}
|
|
113
|
-
return new UPPError(message, code, provider, modality, response.status);
|
|
140
|
+
return new UPPError(message, code, provider, modality, response.status, bodyReadError);
|
|
114
141
|
}
|
|
115
142
|
function networkError(error, provider, modality) {
|
|
116
143
|
return new UPPError(
|
|
@@ -136,22 +163,27 @@ function cancelledError(provider, modality) {
|
|
|
136
163
|
|
|
137
164
|
// src/http/fetch.ts
|
|
138
165
|
var DEFAULT_TIMEOUT = 12e4;
|
|
166
|
+
var MAX_RETRY_AFTER_SECONDS = 3600;
|
|
167
|
+
function hasFork(strategy) {
|
|
168
|
+
return !!strategy && typeof strategy.fork === "function";
|
|
169
|
+
}
|
|
139
170
|
async function doFetch(url, init, config, provider, modality) {
|
|
140
171
|
const fetchFn = config.fetch ?? fetch;
|
|
141
172
|
const timeout = config.timeout ?? DEFAULT_TIMEOUT;
|
|
142
|
-
const
|
|
143
|
-
|
|
144
|
-
const delay = await strategy.beforeRequest();
|
|
145
|
-
if (delay > 0) {
|
|
146
|
-
await sleep(delay);
|
|
147
|
-
}
|
|
148
|
-
}
|
|
149
|
-
let lastError;
|
|
173
|
+
const baseStrategy = config.retryStrategy;
|
|
174
|
+
const strategy = hasFork(baseStrategy) ? baseStrategy.fork() : baseStrategy;
|
|
150
175
|
let attempt = 0;
|
|
151
176
|
while (true) {
|
|
152
177
|
attempt++;
|
|
178
|
+
if (strategy?.beforeRequest) {
|
|
179
|
+
const delay = await strategy.beforeRequest();
|
|
180
|
+
if (delay > 0) {
|
|
181
|
+
await sleep(delay);
|
|
182
|
+
}
|
|
183
|
+
}
|
|
184
|
+
let response;
|
|
153
185
|
try {
|
|
154
|
-
|
|
186
|
+
response = await fetchWithTimeout(
|
|
155
187
|
fetchFn,
|
|
156
188
|
url,
|
|
157
189
|
init,
|
|
@@ -159,52 +191,49 @@ async function doFetch(url, init, config, provider, modality) {
|
|
|
159
191
|
provider,
|
|
160
192
|
modality
|
|
161
193
|
);
|
|
162
|
-
if (!response.ok) {
|
|
163
|
-
const error = await normalizeHttpError(response, provider, modality);
|
|
164
|
-
const retryAfter = response.headers.get("Retry-After");
|
|
165
|
-
if (retryAfter && strategy) {
|
|
166
|
-
const seconds = parseInt(retryAfter, 10);
|
|
167
|
-
if (!isNaN(seconds) && "setRetryAfter" in strategy) {
|
|
168
|
-
strategy.setRetryAfter(
|
|
169
|
-
seconds
|
|
170
|
-
);
|
|
171
|
-
}
|
|
172
|
-
}
|
|
173
|
-
if (strategy) {
|
|
174
|
-
const delay = await strategy.onRetry(error, attempt);
|
|
175
|
-
if (delay !== null) {
|
|
176
|
-
await sleep(delay);
|
|
177
|
-
lastError = error;
|
|
178
|
-
continue;
|
|
179
|
-
}
|
|
180
|
-
}
|
|
181
|
-
throw error;
|
|
182
|
-
}
|
|
183
|
-
strategy?.reset?.();
|
|
184
|
-
return response;
|
|
185
194
|
} catch (error) {
|
|
186
195
|
if (error instanceof UPPError) {
|
|
187
196
|
if (strategy) {
|
|
188
197
|
const delay = await strategy.onRetry(error, attempt);
|
|
189
198
|
if (delay !== null) {
|
|
190
199
|
await sleep(delay);
|
|
191
|
-
lastError = error;
|
|
192
200
|
continue;
|
|
193
201
|
}
|
|
194
202
|
}
|
|
195
203
|
throw error;
|
|
196
204
|
}
|
|
197
|
-
const uppError = networkError(error, provider, modality);
|
|
205
|
+
const uppError = networkError(toError(error), provider, modality);
|
|
198
206
|
if (strategy) {
|
|
199
207
|
const delay = await strategy.onRetry(uppError, attempt);
|
|
200
208
|
if (delay !== null) {
|
|
201
209
|
await sleep(delay);
|
|
202
|
-
lastError = uppError;
|
|
203
210
|
continue;
|
|
204
211
|
}
|
|
205
212
|
}
|
|
206
213
|
throw uppError;
|
|
207
214
|
}
|
|
215
|
+
if (!response.ok) {
|
|
216
|
+
const error = await normalizeHttpError(response, provider, modality);
|
|
217
|
+
const retryAfterSeconds = parseRetryAfter(
|
|
218
|
+
response.headers.get("Retry-After"),
|
|
219
|
+
config.retryAfterMaxSeconds ?? MAX_RETRY_AFTER_SECONDS
|
|
220
|
+
);
|
|
221
|
+
if (retryAfterSeconds !== null && strategy && "setRetryAfter" in strategy) {
|
|
222
|
+
strategy.setRetryAfter(
|
|
223
|
+
retryAfterSeconds
|
|
224
|
+
);
|
|
225
|
+
}
|
|
226
|
+
if (strategy) {
|
|
227
|
+
const delay = await strategy.onRetry(error, attempt);
|
|
228
|
+
if (delay !== null) {
|
|
229
|
+
await sleep(delay);
|
|
230
|
+
continue;
|
|
231
|
+
}
|
|
232
|
+
}
|
|
233
|
+
throw error;
|
|
234
|
+
}
|
|
235
|
+
strategy?.reset?.();
|
|
236
|
+
return response;
|
|
208
237
|
}
|
|
209
238
|
}
|
|
210
239
|
async function fetchWithTimeout(fetchFn, url, init, timeout, provider, modality) {
|
|
@@ -214,8 +243,9 @@ async function fetchWithTimeout(fetchFn, url, init, timeout, provider, modality)
|
|
|
214
243
|
}
|
|
215
244
|
const controller = new AbortController();
|
|
216
245
|
const timeoutId = setTimeout(() => controller.abort(), timeout);
|
|
246
|
+
const onAbort = () => controller.abort();
|
|
217
247
|
if (existingSignal) {
|
|
218
|
-
existingSignal.addEventListener("abort",
|
|
248
|
+
existingSignal.addEventListener("abort", onAbort, { once: true });
|
|
219
249
|
}
|
|
220
250
|
try {
|
|
221
251
|
const response = await fetchFn(url, {
|
|
@@ -224,7 +254,7 @@ async function fetchWithTimeout(fetchFn, url, init, timeout, provider, modality)
|
|
|
224
254
|
});
|
|
225
255
|
return response;
|
|
226
256
|
} catch (error) {
|
|
227
|
-
if (error.name === "AbortError") {
|
|
257
|
+
if (toError(error).name === "AbortError") {
|
|
228
258
|
if (existingSignal?.aborted) {
|
|
229
259
|
throw cancelledError(provider, modality);
|
|
230
260
|
}
|
|
@@ -233,6 +263,9 @@ async function fetchWithTimeout(fetchFn, url, init, timeout, provider, modality)
|
|
|
233
263
|
throw error;
|
|
234
264
|
} finally {
|
|
235
265
|
clearTimeout(timeoutId);
|
|
266
|
+
if (existingSignal) {
|
|
267
|
+
existingSignal.removeEventListener("abort", onAbort);
|
|
268
|
+
}
|
|
236
269
|
}
|
|
237
270
|
}
|
|
238
271
|
function sleep(ms) {
|
|
@@ -241,7 +274,8 @@ function sleep(ms) {
|
|
|
241
274
|
async function doStreamFetch(url, init, config, provider, modality) {
|
|
242
275
|
const fetchFn = config.fetch ?? fetch;
|
|
243
276
|
const timeout = config.timeout ?? DEFAULT_TIMEOUT;
|
|
244
|
-
const
|
|
277
|
+
const baseStrategy = config.retryStrategy;
|
|
278
|
+
const strategy = hasFork(baseStrategy) ? baseStrategy.fork() : baseStrategy;
|
|
245
279
|
if (strategy?.beforeRequest) {
|
|
246
280
|
const delay = await strategy.beforeRequest();
|
|
247
281
|
if (delay > 0) {
|
|
@@ -262,12 +296,32 @@ async function doStreamFetch(url, init, config, provider, modality) {
|
|
|
262
296
|
if (error instanceof UPPError) {
|
|
263
297
|
throw error;
|
|
264
298
|
}
|
|
265
|
-
throw networkError(error, provider, modality);
|
|
299
|
+
throw networkError(toError(error), provider, modality);
|
|
300
|
+
}
|
|
301
|
+
}
|
|
302
|
+
function parseRetryAfter(headerValue, maxSeconds) {
|
|
303
|
+
if (!headerValue) {
|
|
304
|
+
return null;
|
|
305
|
+
}
|
|
306
|
+
const seconds = parseInt(headerValue, 10);
|
|
307
|
+
if (!Number.isNaN(seconds)) {
|
|
308
|
+
return Math.min(maxSeconds, Math.max(0, seconds));
|
|
309
|
+
}
|
|
310
|
+
const dateMillis = Date.parse(headerValue);
|
|
311
|
+
if (Number.isNaN(dateMillis)) {
|
|
312
|
+
return null;
|
|
313
|
+
}
|
|
314
|
+
const deltaMs = dateMillis - Date.now();
|
|
315
|
+
if (deltaMs <= 0) {
|
|
316
|
+
return 0;
|
|
266
317
|
}
|
|
318
|
+
const deltaSeconds = Math.ceil(deltaMs / 1e3);
|
|
319
|
+
return Math.min(maxSeconds, Math.max(0, deltaSeconds));
|
|
267
320
|
}
|
|
268
321
|
|
|
269
322
|
export {
|
|
270
323
|
UPPError,
|
|
324
|
+
toError,
|
|
271
325
|
statusToErrorCode,
|
|
272
326
|
normalizeHttpError,
|
|
273
327
|
networkError,
|
|
@@ -276,4 +330,4 @@ export {
|
|
|
276
330
|
doFetch,
|
|
277
331
|
doStreamFetch
|
|
278
332
|
};
|
|
279
|
-
//# sourceMappingURL=chunk-
|
|
333
|
+
//# sourceMappingURL=chunk-RFWLEFAB.js.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"sources":["../src/types/errors.ts","../src/utils/error.ts","../src/http/errors.ts","../src/http/fetch.ts"],"sourcesContent":["/**\n * @fileoverview Error types for the Unified Provider Protocol.\n *\n * Provides normalized error codes and a unified error class for handling\n * errors across different AI providers in a consistent manner.\n *\n * @module types/errors\n */\n\n/**\n * Normalized error codes for cross-provider error handling.\n *\n * These codes provide a consistent way to identify and handle errors\n * regardless of which AI provider generated them.\n *\n * @example\n * ```typescript\n * try {\n * await llm.generate('Hello');\n * } catch (error) {\n * if (error instanceof UPPError) {\n * switch (error.code) {\n * case 'RATE_LIMITED':\n * await delay(error.retryAfter);\n * break;\n * case 'AUTHENTICATION_FAILED':\n * throw new Error('Invalid API key');\n * }\n * }\n * }\n * ```\n */\nexport type ErrorCode =\n /** API key is invalid or expired */\n | 'AUTHENTICATION_FAILED'\n /** Rate limit exceeded, retry after delay */\n | 'RATE_LIMITED'\n /** Input exceeds model's context window */\n | 'CONTEXT_LENGTH_EXCEEDED'\n /** Requested model does not exist */\n | 'MODEL_NOT_FOUND'\n /** Request parameters are malformed */\n | 'INVALID_REQUEST'\n /** Provider returned an unexpected response format */\n | 'INVALID_RESPONSE'\n /** Content was blocked by safety filters */\n | 'CONTENT_FILTERED'\n /** Account quota or credits exhausted */\n | 'QUOTA_EXCEEDED'\n /** Provider-specific error not covered by other codes */\n | 'PROVIDER_ERROR'\n /** Network connectivity issue */\n | 'NETWORK_ERROR'\n /** Request exceeded timeout limit */\n | 'TIMEOUT'\n /** Request was cancelled via AbortSignal */\n | 'CANCELLED';\n\n/**\n * Modality types supported by UPP.\n *\n * Each modality represents a different type of AI capability that\n * can be provided by a UPP-compatible provider.\n */\nexport type Modality =\n /** Large language model for text generation */\n | 'llm'\n /** Text/image embedding model */\n | 'embedding'\n /** Image generation model */\n | 'image'\n /** Audio processing/generation model */\n | 'audio'\n /** Video processing/generation model */\n | 'video';\n\n/**\n * Unified Provider Protocol Error.\n *\n * All provider-specific errors are normalized to this type, providing\n * a consistent interface for error handling across different AI providers.\n *\n * @example\n * ```typescript\n * throw new UPPError(\n * 'API key is invalid',\n * 'AUTHENTICATION_FAILED',\n * 'openai',\n * 'llm',\n * 401\n * );\n * ```\n *\n * @example\n * ```typescript\n * // Wrapping a provider error\n * try {\n * await openai.chat.completions.create({ ... });\n * } catch (err) {\n * throw new UPPError(\n * 'OpenAI request failed',\n * 'PROVIDER_ERROR',\n * 'openai',\n * 'llm',\n * err.status,\n * err\n * );\n * }\n * ```\n */\nexport class UPPError extends Error {\n /** Normalized error code for programmatic handling */\n readonly code: ErrorCode;\n\n /** Name of the provider that generated the error */\n readonly provider: string;\n\n /** The modality that was being used when the error occurred */\n readonly modality: Modality;\n\n /** HTTP status code from the provider's response, if available */\n readonly statusCode?: number;\n\n /** The original error that caused this UPPError, if wrapping another error */\n override readonly cause?: Error;\n\n /** Error class name, always 'UPPError' */\n override readonly name = 'UPPError';\n\n /**\n * Creates a new UPPError instance.\n *\n * @param message - Human-readable error description\n * @param code - Normalized error code for programmatic handling\n * @param provider - Name of the provider that generated the error\n * @param modality - The modality that was being used\n * @param statusCode - HTTP status code from the provider's response\n * @param cause - The original error being wrapped\n */\n constructor(\n message: string,\n code: ErrorCode,\n provider: string,\n modality: Modality,\n statusCode?: number,\n cause?: Error\n ) {\n super(message);\n this.code = code;\n this.provider = provider;\n this.modality = modality;\n this.statusCode = statusCode;\n this.cause = cause;\n\n if (Error.captureStackTrace) {\n Error.captureStackTrace(this, UPPError);\n }\n }\n\n /**\n * Creates a string representation of the error.\n *\n * @returns Formatted error string including code, message, provider, and modality\n */\n override toString(): string {\n let str = `UPPError [${this.code}]: ${this.message}`;\n str += ` (provider: ${this.provider}, modality: ${this.modality}`;\n if (this.statusCode) {\n str += `, status: ${this.statusCode}`;\n }\n str += ')';\n return str;\n }\n\n /**\n * Converts the error to a JSON-serializable object.\n *\n * @returns Plain object representation suitable for logging or transmission\n */\n toJSON(): Record<string, unknown> {\n return {\n name: this.name,\n message: this.message,\n code: this.code,\n provider: this.provider,\n modality: this.modality,\n statusCode: this.statusCode,\n cause: this.cause?.message,\n };\n }\n}\n","/**\n * @fileoverview Error normalization utilities.\n *\n * @module utils/error\n */\n\n/**\n * Converts an unknown thrown value into an Error instance.\n *\n * @param value - Unknown error value\n * @returns An Error instance\n */\nexport function toError(value: unknown): Error {\n if (value instanceof Error) {\n return value;\n }\n if (typeof value === 'string') {\n return new Error(value);\n }\n if (typeof value === 'object' && value !== null && 'message' in value) {\n const message = (value as { message?: unknown }).message;\n if (typeof message === 'string') {\n return new Error(message);\n }\n }\n return new Error(String(value));\n}\n","/**\n * HTTP error handling and normalization utilities.\n * @module http/errors\n */\n\nimport { UPPError, type ErrorCode, type Modality } from '../types/errors.ts';\nimport { toError } from '../utils/error.ts';\n\n/**\n * Maps HTTP status codes to standardized UPP error codes.\n *\n * This function provides consistent error categorization across all providers:\n * - 400 -> INVALID_REQUEST (bad request format or parameters)\n * - 401, 403 -> AUTHENTICATION_FAILED (invalid or missing credentials)\n * - 404 -> MODEL_NOT_FOUND (requested model does not exist)\n * - 408 -> TIMEOUT (request timed out)\n * - 413 -> CONTEXT_LENGTH_EXCEEDED (input too long)\n * - 429 -> RATE_LIMITED (too many requests)\n * - 5xx -> PROVIDER_ERROR (server-side issues)\n *\n * @param status - HTTP status code from the response\n * @returns The corresponding UPP ErrorCode\n *\n * @example\n * ```typescript\n * const errorCode = statusToErrorCode(429);\n * // Returns 'RATE_LIMITED'\n *\n * const serverError = statusToErrorCode(503);\n * // Returns 'PROVIDER_ERROR'\n * ```\n */\nexport function statusToErrorCode(status: number): ErrorCode {\n switch (status) {\n case 400:\n return 'INVALID_REQUEST';\n case 402:\n return 'QUOTA_EXCEEDED';\n case 401:\n case 403:\n return 'AUTHENTICATION_FAILED';\n case 404:\n return 'MODEL_NOT_FOUND';\n case 408:\n return 'TIMEOUT';\n case 409:\n return 'INVALID_REQUEST';\n case 422:\n return 'INVALID_REQUEST';\n case 413:\n return 'CONTEXT_LENGTH_EXCEEDED';\n case 451:\n return 'CONTENT_FILTERED';\n case 429:\n return 'RATE_LIMITED';\n case 500:\n case 502:\n case 503:\n case 504:\n return 'PROVIDER_ERROR';\n default:\n return 'PROVIDER_ERROR';\n }\n}\n\n/**\n * Normalizes HTTP error responses into standardized UPPError objects.\n *\n * This function performs several operations:\n * 1. Maps the HTTP status code to an appropriate ErrorCode\n * 2. Attempts to extract a meaningful error message from the response body\n * 3. Handles various provider-specific error response formats\n *\n * Supported error message formats:\n * - `{ error: { message: \"...\" } }` (OpenAI, Anthropic)\n * - `{ message: \"...\" }` (simple format)\n * - `{ error: { error: { message: \"...\" } } }` (nested format)\n * - `{ detail: \"...\" }` (FastAPI style)\n * - Plain text body (if under 200 characters)\n *\n * @param response - The HTTP Response object with non-2xx status\n * @param provider - Provider identifier for error context\n * @param modality - Request modality for error context\n * @returns A UPPError with normalized code and message\n *\n * @example\n * ```typescript\n * if (!response.ok) {\n * const error = await normalizeHttpError(response, 'openai', 'llm');\n * // error.code might be 'RATE_LIMITED' for 429\n * // error.message contains provider's error message\n * throw error;\n * }\n * ```\n */\nexport async function normalizeHttpError(\n response: Response,\n provider: string,\n modality: Modality\n): Promise<UPPError> {\n const code = statusToErrorCode(response.status);\n let message = `HTTP ${response.status}: ${response.statusText}`;\n let bodyReadError: Error | undefined;\n\n try {\n const body = await response.text();\n if (body) {\n try {\n const json = JSON.parse(body);\n const extractedMessage =\n json.error?.message ||\n json.message ||\n json.error?.error?.message ||\n json.detail;\n\n if (extractedMessage) {\n message = extractedMessage;\n }\n } catch {\n if (body.length < 200) {\n message = body;\n }\n }\n }\n } catch (error) {\n bodyReadError = toError(error);\n }\n\n return new UPPError(message, code, provider, modality, response.status, bodyReadError);\n}\n\n/**\n * Creates a UPPError for network failures (DNS, connection, etc.).\n *\n * Use this when the request fails before receiving any HTTP response,\n * such as DNS resolution failures, connection refused, or network unreachable.\n *\n * @param error - The underlying Error that caused the failure\n * @param provider - Provider identifier for error context\n * @param modality - Request modality for error context\n * @returns A UPPError with NETWORK_ERROR code and the original error attached\n */\nexport function networkError(\n error: Error,\n provider: string,\n modality: Modality\n): UPPError {\n return new UPPError(\n `Network error: ${error.message}`,\n 'NETWORK_ERROR',\n provider,\n modality,\n undefined,\n error\n );\n}\n\n/**\n * Creates a UPPError for request timeout.\n *\n * Use this when the request exceeds the configured timeout duration\n * and is aborted by the AbortController.\n *\n * @param timeout - The timeout duration in milliseconds that was exceeded\n * @param provider - Provider identifier for error context\n * @param modality - Request modality for error context\n * @returns A UPPError with TIMEOUT code\n */\nexport function timeoutError(\n timeout: number,\n provider: string,\n modality: Modality\n): UPPError {\n return new UPPError(\n `Request timed out after ${timeout}ms`,\n 'TIMEOUT',\n provider,\n modality\n );\n}\n\n/**\n * Creates a UPPError for user-initiated request cancellation.\n *\n * Use this when the request is aborted via a user-provided AbortSignal,\n * distinct from timeout-based cancellation.\n *\n * @param provider - Provider identifier for error context\n * @param modality - Request modality for error context\n * @returns A UPPError with CANCELLED code\n */\nexport function cancelledError(provider: string, modality: Modality): UPPError {\n return new UPPError('Request was cancelled', 'CANCELLED', provider, modality);\n}\n","/**\n * HTTP fetch utilities with retry, timeout, and error normalization.\n * @module http/fetch\n */\n\nimport type { ProviderConfig, RetryStrategy } from '../types/provider.ts';\nimport type { Modality } from '../types/errors.ts';\nimport { UPPError } from '../types/errors.ts';\nimport {\n normalizeHttpError,\n networkError,\n timeoutError,\n cancelledError,\n} from './errors.ts';\nimport { toError } from '../utils/error.ts';\n\n/** Default request timeout in milliseconds (2 minutes). */\nconst DEFAULT_TIMEOUT = 120000;\nconst MAX_RETRY_AFTER_SECONDS = 3600;\n\ntype ForkableRetryStrategy = RetryStrategy & {\n fork: () => RetryStrategy | undefined;\n};\n\nfunction hasFork(strategy: RetryStrategy | undefined): strategy is ForkableRetryStrategy {\n return !!strategy && typeof (strategy as { fork?: unknown }).fork === 'function';\n}\n\n/**\n * Executes an HTTP fetch request with automatic retry, timeout handling, and error normalization.\n *\n * This function wraps the standard fetch API with additional capabilities:\n * - Configurable timeout with automatic request cancellation (per attempt)\n * - Retry strategy support (exponential backoff, linear, token bucket, etc.)\n * - Pre-request delay support for rate limiting strategies\n * - Automatic Retry-After header parsing and handling\n * - Error normalization to UPPError format\n *\n * @param url - The URL to fetch\n * @param init - Standard fetch RequestInit options (method, headers, body, etc.)\n * @param config - Provider configuration containing fetch customization, timeout, and retry strategy\n * @param provider - Provider identifier for error context (e.g., 'openai', 'anthropic')\n * @param modality - Request modality for error context (e.g., 'llm', 'embedding', 'image')\n * @returns The successful Response object\n *\n * @throws {UPPError} RATE_LIMITED - When rate limited and retries exhausted\n * @throws {UPPError} NETWORK_ERROR - When a network failure occurs\n * @throws {UPPError} TIMEOUT - When the request times out\n * @throws {UPPError} CANCELLED - When the request is aborted via signal\n * @throws {UPPError} Various codes based on HTTP status (see statusToErrorCode)\n *\n * @example\n * ```typescript\n * const response = await doFetch(\n * 'https://api.openai.com/v1/chat/completions',\n * {\n * method: 'POST',\n * headers: { 'Authorization': 'Bearer sk-...' },\n * body: JSON.stringify({ model: 'gpt-4', messages: [] })\n * },\n * { timeout: 30000, retryStrategy: new ExponentialBackoff() },\n * 'openai',\n * 'llm'\n * );\n * ```\n */\nexport async function doFetch(\n url: string,\n init: RequestInit,\n config: ProviderConfig,\n provider: string,\n modality: Modality\n): Promise<Response> {\n const fetchFn = config.fetch ?? fetch;\n const timeout = config.timeout ?? DEFAULT_TIMEOUT;\n const baseStrategy = config.retryStrategy;\n const strategy = hasFork(baseStrategy) ? baseStrategy.fork() : baseStrategy;\n\n let attempt = 0;\n\n while (true) {\n attempt++;\n\n if (strategy?.beforeRequest) {\n const delay = await strategy.beforeRequest();\n if (delay > 0) {\n await sleep(delay);\n }\n }\n\n let response: Response;\n try {\n response = await fetchWithTimeout(\n fetchFn,\n url,\n init,\n timeout,\n provider,\n modality\n );\n } catch (error) {\n if (error instanceof UPPError) {\n if (strategy) {\n const delay = await strategy.onRetry(error, attempt);\n if (delay !== null) {\n await sleep(delay);\n continue;\n }\n }\n throw error;\n }\n\n const uppError = networkError(toError(error), provider, modality);\n\n if (strategy) {\n const delay = await strategy.onRetry(uppError, attempt);\n if (delay !== null) {\n await sleep(delay);\n continue;\n }\n }\n\n throw uppError;\n }\n\n if (!response.ok) {\n const error = await normalizeHttpError(response, provider, modality);\n\n const retryAfterSeconds = parseRetryAfter(\n response.headers.get('Retry-After'),\n config.retryAfterMaxSeconds ?? MAX_RETRY_AFTER_SECONDS\n );\n if (retryAfterSeconds !== null && strategy && 'setRetryAfter' in strategy) {\n (strategy as { setRetryAfter: (s: number) => void }).setRetryAfter(\n retryAfterSeconds\n );\n }\n\n if (strategy) {\n const delay = await strategy.onRetry(error, attempt);\n if (delay !== null) {\n await sleep(delay);\n continue;\n }\n }\n\n throw error;\n }\n\n strategy?.reset?.();\n\n return response;\n }\n}\n\n/**\n * Executes a fetch request with configurable timeout.\n *\n * Creates an AbortController to cancel the request if it exceeds the timeout.\n * Properly handles both user-provided abort signals and timeout-based cancellation,\n * throwing appropriate error types for each case.\n *\n * @param fetchFn - The fetch function to use (allows custom implementations)\n * @param url - The URL to fetch\n * @param init - Standard fetch RequestInit options\n * @param timeout - Maximum time in milliseconds before aborting\n * @param provider - Provider identifier for error context\n * @param modality - Request modality for error context\n * @returns The Response from the fetch call\n *\n * @throws {UPPError} TIMEOUT - When the timeout is exceeded\n * @throws {UPPError} CANCELLED - When cancelled via user-provided signal\n * @throws {Error} Network errors are passed through unchanged\n */\nasync function fetchWithTimeout(\n fetchFn: typeof fetch,\n url: string,\n init: RequestInit,\n timeout: number,\n provider: string,\n modality: Modality\n): Promise<Response> {\n const existingSignal = init.signal;\n\n // Check if already aborted before starting\n if (existingSignal?.aborted) {\n throw cancelledError(provider, modality);\n }\n\n const controller = new AbortController();\n const timeoutId = setTimeout(() => controller.abort(), timeout);\n\n const onAbort = () => controller.abort();\n if (existingSignal) {\n existingSignal.addEventListener('abort', onAbort, { once: true });\n }\n\n try {\n const response = await fetchFn(url, {\n ...init,\n signal: controller.signal,\n });\n return response;\n } catch (error) {\n if (toError(error).name === 'AbortError') {\n if (existingSignal?.aborted) {\n throw cancelledError(provider, modality);\n }\n throw timeoutError(timeout, provider, modality);\n }\n throw error;\n } finally {\n clearTimeout(timeoutId);\n if (existingSignal) {\n existingSignal.removeEventListener('abort', onAbort);\n }\n }\n}\n\n/**\n * Delays execution for a specified duration.\n *\n * @param ms - Duration to sleep in milliseconds\n * @returns Promise that resolves after the specified delay\n */\nfunction sleep(ms: number): Promise<void> {\n return new Promise((resolve) => setTimeout(resolve, ms));\n}\n\n/**\n * Executes an HTTP fetch request for streaming responses.\n *\n * Unlike {@link doFetch}, this function returns the response immediately without\n * checking the HTTP status. This is necessary for Server-Sent Events (SSE) and\n * other streaming protocols where error information may be embedded in the stream.\n *\n * The caller is responsible for:\n * - Checking response.ok and handling HTTP errors\n * - Parsing the response stream (e.g., using parseSSEStream)\n * - Handling stream-specific error conditions\n *\n * Retries are not performed for streaming requests since partial data may have\n * already been consumed by the caller.\n *\n * @param url - The URL to fetch\n * @param init - Standard fetch RequestInit options\n * @param config - Provider configuration containing fetch customization and timeout\n * @param provider - Provider identifier for error context\n * @param modality - Request modality for error context\n * @returns The Response object (may have non-2xx status)\n *\n * @throws {UPPError} NETWORK_ERROR - When a network failure occurs\n * @throws {UPPError} TIMEOUT - When the request times out\n * @throws {UPPError} CANCELLED - When the request is aborted via signal\n *\n * @example\n * ```typescript\n * const response = await doStreamFetch(\n * 'https://api.openai.com/v1/chat/completions',\n * {\n * method: 'POST',\n * headers: { 'Authorization': 'Bearer sk-...' },\n * body: JSON.stringify({ model: 'gpt-4', messages: [], stream: true })\n * },\n * { timeout: 120000 },\n * 'openai',\n * 'llm'\n * );\n *\n * if (!response.ok) {\n * throw await normalizeHttpError(response, 'openai', 'llm');\n * }\n *\n * for await (const event of parseSSEStream(response.body!)) {\n * console.log(event);\n * }\n * ```\n */\nexport async function doStreamFetch(\n url: string,\n init: RequestInit,\n config: ProviderConfig,\n provider: string,\n modality: Modality\n): Promise<Response> {\n const fetchFn = config.fetch ?? fetch;\n const timeout = config.timeout ?? DEFAULT_TIMEOUT;\n const baseStrategy = config.retryStrategy;\n const strategy = hasFork(baseStrategy) ? baseStrategy.fork() : baseStrategy;\n\n if (strategy?.beforeRequest) {\n const delay = await strategy.beforeRequest();\n if (delay > 0) {\n await sleep(delay);\n }\n }\n\n try {\n const response = await fetchWithTimeout(\n fetchFn,\n url,\n init,\n timeout,\n provider,\n modality\n );\n return response;\n } catch (error) {\n if (error instanceof UPPError) {\n throw error;\n }\n throw networkError(toError(error), provider, modality);\n }\n}\n\n/**\n * Parses Retry-After header values into seconds.\n *\n * Supports both delta-seconds and HTTP-date formats.\n */\nfunction parseRetryAfter(headerValue: string | null, maxSeconds: number): number | null {\n if (!headerValue) {\n return null;\n }\n\n const seconds = parseInt(headerValue, 10);\n if (!Number.isNaN(seconds)) {\n return Math.min(maxSeconds, Math.max(0, seconds));\n }\n\n const dateMillis = Date.parse(headerValue);\n if (Number.isNaN(dateMillis)) {\n return null;\n }\n\n const deltaMs = dateMillis - Date.now();\n if (deltaMs <= 0) {\n return 0;\n }\n\n const deltaSeconds = Math.ceil(deltaMs / 1000);\n return Math.min(maxSeconds, Math.max(0, deltaSeconds));\n}\n"],"mappings":";AA8GO,IAAM,WAAN,MAAM,kBAAiB,MAAM;AAAA;AAAA,EAEzB;AAAA;AAAA,EAGA;AAAA;AAAA,EAGA;AAAA;AAAA,EAGA;AAAA;AAAA,EAGS;AAAA;AAAA,EAGA,OAAO;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAYzB,YACE,SACA,MACA,UACA,UACA,YACA,OACA;AACA,UAAM,OAAO;AACb,SAAK,OAAO;AACZ,SAAK,WAAW;AAChB,SAAK,WAAW;AAChB,SAAK,aAAa;AAClB,SAAK,QAAQ;AAEb,QAAI,MAAM,mBAAmB;AAC3B,YAAM,kBAAkB,MAAM,SAAQ;AAAA,IACxC;AAAA,EACF;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAOS,WAAmB;AAC1B,QAAI,MAAM,aAAa,KAAK,IAAI,MAAM,KAAK,OAAO;AAClD,WAAO,eAAe,KAAK,QAAQ,eAAe,KAAK,QAAQ;AAC/D,QAAI,KAAK,YAAY;AACnB,aAAO,aAAa,KAAK,UAAU;AAAA,IACrC;AACA,WAAO;AACP,WAAO;AAAA,EACT;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAOA,SAAkC;AAChC,WAAO;AAAA,MACL,MAAM,KAAK;AAAA,MACX,SAAS,KAAK;AAAA,MACd,MAAM,KAAK;AAAA,MACX,UAAU,KAAK;AAAA,MACf,UAAU,KAAK;AAAA,MACf,YAAY,KAAK;AAAA,MACjB,OAAO,KAAK,OAAO;AAAA,IACrB;AAAA,EACF;AACF;;;AClLO,SAAS,QAAQ,OAAuB;AAC7C,MAAI,iBAAiB,OAAO;AAC1B,WAAO;AAAA,EACT;AACA,MAAI,OAAO,UAAU,UAAU;AAC7B,WAAO,IAAI,MAAM,KAAK;AAAA,EACxB;AACA,MAAI,OAAO,UAAU,YAAY,UAAU,QAAQ,aAAa,OAAO;AACrE,UAAM,UAAW,MAAgC;AACjD,QAAI,OAAO,YAAY,UAAU;AAC/B,aAAO,IAAI,MAAM,OAAO;AAAA,IAC1B;AAAA,EACF;AACA,SAAO,IAAI,MAAM,OAAO,KAAK,CAAC;AAChC;;;ACMO,SAAS,kBAAkB,QAA2B;AAC3D,UAAQ,QAAQ;AAAA,IACd,KAAK;AACH,aAAO;AAAA,IACT,KAAK;AACH,aAAO;AAAA,IACT,KAAK;AAAA,IACL,KAAK;AACH,aAAO;AAAA,IACT,KAAK;AACH,aAAO;AAAA,IACT,KAAK;AACH,aAAO;AAAA,IACT,KAAK;AACH,aAAO;AAAA,IACT,KAAK;AACH,aAAO;AAAA,IACT,KAAK;AACH,aAAO;AAAA,IACT,KAAK;AACH,aAAO;AAAA,IACT,KAAK;AACH,aAAO;AAAA,IACT,KAAK;AAAA,IACL,KAAK;AAAA,IACL,KAAK;AAAA,IACL,KAAK;AACH,aAAO;AAAA,IACT;AACE,aAAO;AAAA,EACX;AACF;AAgCA,eAAsB,mBACpB,UACA,UACA,UACmB;AACnB,QAAM,OAAO,kBAAkB,SAAS,MAAM;AAC9C,MAAI,UAAU,QAAQ,SAAS,MAAM,KAAK,SAAS,UAAU;AAC7D,MAAI;AAEJ,MAAI;AACF,UAAM,OAAO,MAAM,SAAS,KAAK;AACjC,QAAI,MAAM;AACR,UAAI;AACF,cAAM,OAAO,KAAK,MAAM,IAAI;AAC5B,cAAM,mBACJ,KAAK,OAAO,WACZ,KAAK,WACL,KAAK,OAAO,OAAO,WACnB,KAAK;AAEP,YAAI,kBAAkB;AACpB,oBAAU;AAAA,QACZ;AAAA,MACF,QAAQ;AACN,YAAI,KAAK,SAAS,KAAK;AACrB,oBAAU;AAAA,QACZ;AAAA,MACF;AAAA,IACF;AAAA,EACF,SAAS,OAAO;AACd,oBAAgB,QAAQ,KAAK;AAAA,EAC/B;AAEA,SAAO,IAAI,SAAS,SAAS,MAAM,UAAU,UAAU,SAAS,QAAQ,aAAa;AACvF;AAaO,SAAS,aACd,OACA,UACA,UACU;AACV,SAAO,IAAI;AAAA,IACT,kBAAkB,MAAM,OAAO;AAAA,IAC/B;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,IACA;AAAA,EACF;AACF;AAaO,SAAS,aACd,SACA,UACA,UACU;AACV,SAAO,IAAI;AAAA,IACT,2BAA2B,OAAO;AAAA,IAClC;AAAA,IACA;AAAA,IACA;AAAA,EACF;AACF;AAYO,SAAS,eAAe,UAAkB,UAA8B;AAC7E,SAAO,IAAI,SAAS,yBAAyB,aAAa,UAAU,QAAQ;AAC9E;;;AChLA,IAAM,kBAAkB;AACxB,IAAM,0BAA0B;AAMhC,SAAS,QAAQ,UAAwE;AACvF,SAAO,CAAC,CAAC,YAAY,OAAQ,SAAgC,SAAS;AACxE;AAwCA,eAAsB,QACpB,KACA,MACA,QACA,UACA,UACmB;AACnB,QAAM,UAAU,OAAO,SAAS;AAChC,QAAM,UAAU,OAAO,WAAW;AAClC,QAAM,eAAe,OAAO;AAC5B,QAAM,WAAW,QAAQ,YAAY,IAAI,aAAa,KAAK,IAAI;AAE/D,MAAI,UAAU;AAEd,SAAO,MAAM;AACX;AAEA,QAAI,UAAU,eAAe;AAC3B,YAAM,QAAQ,MAAM,SAAS,cAAc;AAC3C,UAAI,QAAQ,GAAG;AACb,cAAM,MAAM,KAAK;AAAA,MACnB;AAAA,IACF;AAEA,QAAI;AACJ,QAAI;AACF,iBAAW,MAAM;AAAA,QACf;AAAA,QACA;AAAA,QACA;AAAA,QACA;AAAA,QACA;AAAA,QACA;AAAA,MACF;AAAA,IACF,SAAS,OAAO;AACd,UAAI,iBAAiB,UAAU;AAC7B,YAAI,UAAU;AACZ,gBAAM,QAAQ,MAAM,SAAS,QAAQ,OAAO,OAAO;AACnD,cAAI,UAAU,MAAM;AAClB,kBAAM,MAAM,KAAK;AACjB;AAAA,UACF;AAAA,QACF;AACA,cAAM;AAAA,MACR;AAEA,YAAM,WAAW,aAAa,QAAQ,KAAK,GAAG,UAAU,QAAQ;AAEhE,UAAI,UAAU;AACZ,cAAM,QAAQ,MAAM,SAAS,QAAQ,UAAU,OAAO;AACtD,YAAI,UAAU,MAAM;AAClB,gBAAM,MAAM,KAAK;AACjB;AAAA,QACF;AAAA,MACF;AAEA,YAAM;AAAA,IACR;AAEA,QAAI,CAAC,SAAS,IAAI;AAChB,YAAM,QAAQ,MAAM,mBAAmB,UAAU,UAAU,QAAQ;AAEnE,YAAM,oBAAoB;AAAA,QACxB,SAAS,QAAQ,IAAI,aAAa;AAAA,QAClC,OAAO,wBAAwB;AAAA,MACjC;AACA,UAAI,sBAAsB,QAAQ,YAAY,mBAAmB,UAAU;AACzE,QAAC,SAAoD;AAAA,UACnD;AAAA,QACF;AAAA,MACF;AAEA,UAAI,UAAU;AACZ,cAAM,QAAQ,MAAM,SAAS,QAAQ,OAAO,OAAO;AACnD,YAAI,UAAU,MAAM;AAClB,gBAAM,MAAM,KAAK;AACjB;AAAA,QACF;AAAA,MACF;AAEA,YAAM;AAAA,IACR;AAEA,cAAU,QAAQ;AAElB,WAAO;AAAA,EACT;AACF;AAqBA,eAAe,iBACb,SACA,KACA,MACA,SACA,UACA,UACmB;AACnB,QAAM,iBAAiB,KAAK;AAG5B,MAAI,gBAAgB,SAAS;AAC3B,UAAM,eAAe,UAAU,QAAQ;AAAA,EACzC;AAEA,QAAM,aAAa,IAAI,gBAAgB;AACvC,QAAM,YAAY,WAAW,MAAM,WAAW,MAAM,GAAG,OAAO;AAE9D,QAAM,UAAU,MAAM,WAAW,MAAM;AACvC,MAAI,gBAAgB;AAClB,mBAAe,iBAAiB,SAAS,SAAS,EAAE,MAAM,KAAK,CAAC;AAAA,EAClE;AAEA,MAAI;AACF,UAAM,WAAW,MAAM,QAAQ,KAAK;AAAA,MAClC,GAAG;AAAA,MACH,QAAQ,WAAW;AAAA,IACrB,CAAC;AACD,WAAO;AAAA,EACT,SAAS,OAAO;AACd,QAAI,QAAQ,KAAK,EAAE,SAAS,cAAc;AACxC,UAAI,gBAAgB,SAAS;AAC3B,cAAM,eAAe,UAAU,QAAQ;AAAA,MACzC;AACA,YAAM,aAAa,SAAS,UAAU,QAAQ;AAAA,IAChD;AACA,UAAM;AAAA,EACR,UAAE;AACA,iBAAa,SAAS;AACtB,QAAI,gBAAgB;AAClB,qBAAe,oBAAoB,SAAS,OAAO;AAAA,IACrD;AAAA,EACF;AACF;AAQA,SAAS,MAAM,IAA2B;AACxC,SAAO,IAAI,QAAQ,CAAC,YAAY,WAAW,SAAS,EAAE,CAAC;AACzD;AAmDA,eAAsB,cACpB,KACA,MACA,QACA,UACA,UACmB;AACnB,QAAM,UAAU,OAAO,SAAS;AAChC,QAAM,UAAU,OAAO,WAAW;AAClC,QAAM,eAAe,OAAO;AAC5B,QAAM,WAAW,QAAQ,YAAY,IAAI,aAAa,KAAK,IAAI;AAE/D,MAAI,UAAU,eAAe;AAC3B,UAAM,QAAQ,MAAM,SAAS,cAAc;AAC3C,QAAI,QAAQ,GAAG;AACb,YAAM,MAAM,KAAK;AAAA,IACnB;AAAA,EACF;AAEA,MAAI;AACF,UAAM,WAAW,MAAM;AAAA,MACrB;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,MACA;AAAA,IACF;AACA,WAAO;AAAA,EACT,SAAS,OAAO;AACd,QAAI,iBAAiB,UAAU;AAC7B,YAAM;AAAA,IACR;AACA,UAAM,aAAa,QAAQ,KAAK,GAAG,UAAU,QAAQ;AAAA,EACvD;AACF;AAOA,SAAS,gBAAgB,aAA4B,YAAmC;AACtF,MAAI,CAAC,aAAa;AAChB,WAAO;AAAA,EACT;AAEA,QAAM,UAAU,SAAS,aAAa,EAAE;AACxC,MAAI,CAAC,OAAO,MAAM,OAAO,GAAG;AAC1B,WAAO,KAAK,IAAI,YAAY,KAAK,IAAI,GAAG,OAAO,CAAC;AAAA,EAClD;AAEA,QAAM,aAAa,KAAK,MAAM,WAAW;AACzC,MAAI,OAAO,MAAM,UAAU,GAAG;AAC5B,WAAO;AAAA,EACT;AAEA,QAAM,UAAU,aAAa,KAAK,IAAI;AACtC,MAAI,WAAW,GAAG;AAChB,WAAO;AAAA,EACT;AAEA,QAAM,eAAe,KAAK,KAAK,UAAU,GAAI;AAC7C,SAAO,KAAK,IAAI,YAAY,KAAK,IAAI,GAAG,YAAY,CAAC;AACvD;","names":[]}
|
|
@@ -106,6 +106,7 @@ var TokenBucket = class {
|
|
|
106
106
|
refillRate;
|
|
107
107
|
lastRefill;
|
|
108
108
|
maxAttempts;
|
|
109
|
+
lock;
|
|
109
110
|
/**
|
|
110
111
|
* Creates a new TokenBucket instance.
|
|
111
112
|
*
|
|
@@ -120,6 +121,7 @@ var TokenBucket = class {
|
|
|
120
121
|
this.maxAttempts = options.maxAttempts ?? 3;
|
|
121
122
|
this.tokens = this.maxTokens;
|
|
122
123
|
this.lastRefill = Date.now();
|
|
124
|
+
this.lock = Promise.resolve();
|
|
123
125
|
}
|
|
124
126
|
/**
|
|
125
127
|
* Called before each request to consume a token or calculate wait time.
|
|
@@ -128,16 +130,23 @@ var TokenBucket = class {
|
|
|
128
130
|
* - Returns 0 if a token is available (consumed immediately)
|
|
129
131
|
* - Returns the wait time in milliseconds until the next token
|
|
130
132
|
*
|
|
133
|
+
* This method may allow tokens to go negative to reserve future capacity
|
|
134
|
+
* and avoid concurrent callers oversubscribing the same refill.
|
|
135
|
+
*
|
|
131
136
|
* @returns Delay in milliseconds before the request can proceed
|
|
132
137
|
*/
|
|
133
138
|
beforeRequest() {
|
|
134
|
-
this.
|
|
135
|
-
|
|
139
|
+
return this.withLock(() => {
|
|
140
|
+
this.refill();
|
|
141
|
+
if (this.tokens >= 1) {
|
|
142
|
+
this.tokens -= 1;
|
|
143
|
+
return 0;
|
|
144
|
+
}
|
|
145
|
+
const deficit = 1 - this.tokens;
|
|
146
|
+
const msPerToken = 1e3 / this.refillRate;
|
|
136
147
|
this.tokens -= 1;
|
|
137
|
-
return
|
|
138
|
-
}
|
|
139
|
-
const msPerToken = 1e3 / this.refillRate;
|
|
140
|
-
return Math.ceil(msPerToken);
|
|
148
|
+
return Math.ceil(deficit * msPerToken);
|
|
149
|
+
});
|
|
141
150
|
}
|
|
142
151
|
/**
|
|
143
152
|
* Handles retry logic for rate-limited requests.
|
|
@@ -164,8 +173,10 @@ var TokenBucket = class {
|
|
|
164
173
|
* Called automatically on successful requests to restore available tokens.
|
|
165
174
|
*/
|
|
166
175
|
reset() {
|
|
167
|
-
this.
|
|
168
|
-
|
|
176
|
+
void this.withLock(() => {
|
|
177
|
+
this.tokens = this.maxTokens;
|
|
178
|
+
this.lastRefill = Date.now();
|
|
179
|
+
});
|
|
169
180
|
}
|
|
170
181
|
/**
|
|
171
182
|
* Refills the bucket based on elapsed time since last refill.
|
|
@@ -177,8 +188,13 @@ var TokenBucket = class {
|
|
|
177
188
|
this.tokens = Math.min(this.maxTokens, this.tokens + newTokens);
|
|
178
189
|
this.lastRefill = now;
|
|
179
190
|
}
|
|
191
|
+
async withLock(fn) {
|
|
192
|
+
const next = this.lock.then(fn, fn);
|
|
193
|
+
this.lock = next.then(() => void 0, () => void 0);
|
|
194
|
+
return next;
|
|
195
|
+
}
|
|
180
196
|
};
|
|
181
|
-
var RetryAfterStrategy = class {
|
|
197
|
+
var RetryAfterStrategy = class _RetryAfterStrategy {
|
|
182
198
|
maxAttempts;
|
|
183
199
|
fallbackDelay;
|
|
184
200
|
lastRetryAfter;
|
|
@@ -193,6 +209,15 @@ var RetryAfterStrategy = class {
|
|
|
193
209
|
this.maxAttempts = options.maxAttempts ?? 3;
|
|
194
210
|
this.fallbackDelay = options.fallbackDelay ?? 5e3;
|
|
195
211
|
}
|
|
212
|
+
/**
|
|
213
|
+
* Creates a request-scoped copy of this strategy.
|
|
214
|
+
*/
|
|
215
|
+
fork() {
|
|
216
|
+
return new _RetryAfterStrategy({
|
|
217
|
+
maxAttempts: this.maxAttempts,
|
|
218
|
+
fallbackDelay: this.fallbackDelay
|
|
219
|
+
});
|
|
220
|
+
}
|
|
196
221
|
/**
|
|
197
222
|
* Sets the retry delay from a Retry-After header value.
|
|
198
223
|
*
|
|
@@ -231,4 +256,4 @@ export {
|
|
|
231
256
|
TokenBucket,
|
|
232
257
|
RetryAfterStrategy
|
|
233
258
|
};
|
|
234
|
-
//# sourceMappingURL=chunk-
|
|
259
|
+
//# sourceMappingURL=chunk-RS7C25LS.js.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"sources":["../src/http/retry.ts"],"sourcesContent":["/**\n * Retry strategies for handling transient failures in HTTP requests.\n * @module http/retry\n */\n\nimport type { RetryStrategy } from '../types/provider.ts';\nimport type { UPPError } from '../types/errors.ts';\n\n/**\n * Implements exponential backoff with optional jitter for retry delays.\n *\n * The delay between retries doubles with each attempt, helping to:\n * - Avoid overwhelming servers during outages\n * - Reduce thundering herd effects when many clients retry simultaneously\n * - Give transient issues time to resolve\n *\n * Delay formula: min(baseDelay * 2^(attempt-1), maxDelay)\n * With jitter: delay * random(0.5, 1.0)\n *\n * Only retries on transient errors: RATE_LIMITED, NETWORK_ERROR, TIMEOUT, PROVIDER_ERROR\n *\n * @implements {RetryStrategy}\n *\n * @example\n * ```typescript\n * // Default configuration (3 retries, 1s base, 30s max, jitter enabled)\n * const retry = new ExponentialBackoff();\n *\n * // Custom configuration\n * const customRetry = new ExponentialBackoff({\n * maxAttempts: 5, // Up to 5 retry attempts\n * baseDelay: 500, // Start with 500ms delay\n * maxDelay: 60000, // Cap at 60 seconds\n * jitter: false // Disable random jitter\n * });\n *\n * // Use with provider\n * const provider = createOpenAI({\n * retryStrategy: customRetry\n * });\n * ```\n */\nexport class ExponentialBackoff implements RetryStrategy {\n private maxAttempts: number;\n private baseDelay: number;\n private maxDelay: number;\n private jitter: boolean;\n\n /**\n * Creates a new ExponentialBackoff instance.\n *\n * @param options - Configuration options\n * @param options.maxAttempts - Maximum number of retry attempts (default: 3)\n * @param options.baseDelay - Initial delay in milliseconds (default: 1000)\n * @param options.maxDelay - Maximum delay cap in milliseconds (default: 30000)\n * @param options.jitter - Whether to add random jitter to delays (default: true)\n */\n constructor(options: {\n maxAttempts?: number;\n baseDelay?: number;\n maxDelay?: number;\n jitter?: boolean;\n } = {}) {\n this.maxAttempts = options.maxAttempts ?? 3;\n this.baseDelay = options.baseDelay ?? 1000;\n this.maxDelay = options.maxDelay ?? 30000;\n this.jitter = options.jitter ?? true;\n }\n\n /**\n * Determines whether to retry and calculates the delay.\n *\n * @param error - The error that triggered the retry\n * @param attempt - Current attempt number (1-indexed)\n * @returns Delay in milliseconds before next retry, or null to stop retrying\n */\n onRetry(error: UPPError, attempt: number): number | null {\n if (attempt > this.maxAttempts) {\n return null;\n }\n\n if (!this.isRetryable(error)) {\n return null;\n }\n\n let delay = this.baseDelay * Math.pow(2, attempt - 1);\n delay = Math.min(delay, this.maxDelay);\n\n if (this.jitter) {\n delay = delay * (0.5 + Math.random());\n }\n\n return Math.floor(delay);\n }\n\n /**\n * Checks if an error is eligible for retry.\n *\n * @param error - The error to evaluate\n * @returns True if the error is transient and retryable\n */\n private isRetryable(error: UPPError): boolean {\n return (\n error.code === 'RATE_LIMITED' ||\n error.code === 'NETWORK_ERROR' ||\n error.code === 'TIMEOUT' ||\n error.code === 'PROVIDER_ERROR'\n );\n }\n}\n\n/**\n * Implements linear backoff where delays increase proportionally with each attempt.\n *\n * Unlike exponential backoff, linear backoff increases delays at a constant rate:\n * - Attempt 1: delay * 1 (e.g., 1000ms)\n * - Attempt 2: delay * 2 (e.g., 2000ms)\n * - Attempt 3: delay * 3 (e.g., 3000ms)\n *\n * This strategy is simpler and more predictable than exponential backoff,\n * suitable for scenarios where gradual delay increase is preferred over\n * aggressive backoff.\n *\n * Only retries on transient errors: RATE_LIMITED, NETWORK_ERROR, TIMEOUT, PROVIDER_ERROR\n *\n * @implements {RetryStrategy}\n *\n * @example\n * ```typescript\n * // Default configuration (3 retries, 1s delay increment)\n * const retry = new LinearBackoff();\n *\n * // Custom configuration\n * const customRetry = new LinearBackoff({\n * maxAttempts: 4, // Up to 4 retry attempts\n * delay: 2000 // 2s, 4s, 6s, 8s delays\n * });\n *\n * // Use with provider\n * const provider = createAnthropic({\n * retryStrategy: customRetry\n * });\n * ```\n */\nexport class LinearBackoff implements RetryStrategy {\n private maxAttempts: number;\n private delay: number;\n\n /**\n * Creates a new LinearBackoff instance.\n *\n * @param options - Configuration options\n * @param options.maxAttempts - Maximum number of retry attempts (default: 3)\n * @param options.delay - Base delay multiplier in milliseconds (default: 1000)\n */\n constructor(options: {\n maxAttempts?: number;\n delay?: number;\n } = {}) {\n this.maxAttempts = options.maxAttempts ?? 3;\n this.delay = options.delay ?? 1000;\n }\n\n /**\n * Determines whether to retry and calculates the linear delay.\n *\n * @param error - The error that triggered the retry\n * @param attempt - Current attempt number (1-indexed)\n * @returns Delay in milliseconds (delay * attempt), or null to stop retrying\n */\n onRetry(error: UPPError, attempt: number): number | null {\n if (attempt > this.maxAttempts) {\n return null;\n }\n\n if (!this.isRetryable(error)) {\n return null;\n }\n\n return this.delay * attempt;\n }\n\n /**\n * Checks if an error is eligible for retry.\n *\n * @param error - The error to evaluate\n * @returns True if the error is transient and retryable\n */\n private isRetryable(error: UPPError): boolean {\n return (\n error.code === 'RATE_LIMITED' ||\n error.code === 'NETWORK_ERROR' ||\n error.code === 'TIMEOUT' ||\n error.code === 'PROVIDER_ERROR'\n );\n }\n}\n\n/**\n * Disables all retry behavior, failing immediately on any error.\n *\n * Use this strategy when:\n * - Retries are handled at a higher level in your application\n * - You want immediate failure feedback\n * - The operation is not idempotent\n * - Time sensitivity requires fast failure\n *\n * @implements {RetryStrategy}\n *\n * @example\n * ```typescript\n * // Disable retries for time-sensitive operations\n * const provider = createOpenAI({\n * retryStrategy: new NoRetry()\n * });\n * ```\n */\nexport class NoRetry implements RetryStrategy {\n /**\n * Always returns null to indicate no retry should be attempted.\n *\n * @returns Always returns null\n */\n onRetry(_error: UPPError, _attempt: number): null {\n return null;\n }\n}\n\n/**\n * Implements token bucket rate limiting with automatic refill.\n *\n * The token bucket algorithm provides smooth rate limiting by:\n * - Maintaining a bucket of tokens that replenish over time\n * - Consuming one token per request\n * - Delaying requests when the bucket is empty\n * - Allowing burst traffic up to the bucket capacity\n *\n * This is particularly useful for:\n * - Client-side rate limiting to avoid hitting API rate limits\n * - Smoothing request patterns to maintain consistent throughput\n * - Preventing accidental API abuse\n *\n * Unlike other retry strategies, TokenBucket implements {@link beforeRequest}\n * to proactively delay requests before they are made.\n *\n * @implements {RetryStrategy}\n *\n * @example\n * ```typescript\n * // Allow 10 requests burst, refill 1 token per second\n * const bucket = new TokenBucket({\n * maxTokens: 10, // Burst capacity\n * refillRate: 1, // Tokens per second\n * maxAttempts: 3 // Retry attempts on rate limit\n * });\n *\n * // Aggressive rate limiting: 5 req/s sustained\n * const strictBucket = new TokenBucket({\n * maxTokens: 5,\n * refillRate: 5\n * });\n *\n * // Use with provider\n * const provider = createOpenAI({\n * retryStrategy: bucket\n * });\n * ```\n */\nexport class TokenBucket implements RetryStrategy {\n private tokens: number;\n private maxTokens: number;\n private refillRate: number;\n private lastRefill: number;\n private maxAttempts: number;\n private lock: Promise<void>;\n\n /**\n * Creates a new TokenBucket instance.\n *\n * @param options - Configuration options\n * @param options.maxTokens - Maximum bucket capacity (default: 10)\n * @param options.refillRate - Tokens added per second (default: 1)\n * @param options.maxAttempts - Maximum retry attempts on rate limit (default: 3)\n */\n constructor(options: {\n maxTokens?: number;\n refillRate?: number;\n maxAttempts?: number;\n } = {}) {\n this.maxTokens = options.maxTokens ?? 10;\n this.refillRate = options.refillRate ?? 1;\n this.maxAttempts = options.maxAttempts ?? 3;\n this.tokens = this.maxTokens;\n this.lastRefill = Date.now();\n this.lock = Promise.resolve();\n }\n\n /**\n * Called before each request to consume a token or calculate wait time.\n *\n * Refills the bucket based on elapsed time, then either:\n * - Returns 0 if a token is available (consumed immediately)\n * - Returns the wait time in milliseconds until the next token\n *\n * This method may allow tokens to go negative to reserve future capacity\n * and avoid concurrent callers oversubscribing the same refill.\n *\n * @returns Delay in milliseconds before the request can proceed\n */\n beforeRequest(): Promise<number> {\n return this.withLock(() => {\n this.refill();\n\n if (this.tokens >= 1) {\n this.tokens -= 1;\n return 0;\n }\n\n const deficit = 1 - this.tokens;\n const msPerToken = 1000 / this.refillRate;\n this.tokens -= 1;\n return Math.ceil(deficit * msPerToken);\n });\n }\n\n /**\n * Handles retry logic for rate-limited requests.\n *\n * Only retries on RATE_LIMITED errors, waiting for bucket refill.\n *\n * @param error - The error that triggered the retry\n * @param attempt - Current attempt number (1-indexed)\n * @returns Delay in milliseconds (time for 2 tokens), or null to stop\n */\n onRetry(error: UPPError, attempt: number): number | null {\n if (attempt > this.maxAttempts) {\n return null;\n }\n\n if (error.code !== 'RATE_LIMITED') {\n return null;\n }\n\n const msPerToken = 1000 / this.refillRate;\n return Math.ceil(msPerToken * 2);\n }\n\n /**\n * Resets the bucket to full capacity.\n *\n * Called automatically on successful requests to restore available tokens.\n */\n reset(): void {\n void this.withLock(() => {\n this.tokens = this.maxTokens;\n this.lastRefill = Date.now();\n });\n }\n\n /**\n * Refills the bucket based on elapsed time since last refill.\n */\n private refill(): void {\n const now = Date.now();\n const elapsed = (now - this.lastRefill) / 1000;\n const newTokens = elapsed * this.refillRate;\n\n this.tokens = Math.min(this.maxTokens, this.tokens + newTokens);\n this.lastRefill = now;\n }\n\n private async withLock<T>(fn: () => T | Promise<T>): Promise<T> {\n const next = this.lock.then(fn, fn);\n this.lock = next.then(() => undefined, () => undefined);\n return next;\n }\n}\n\n/**\n * Respects server-provided Retry-After headers for optimal retry timing.\n *\n * When servers return a 429 (Too Many Requests) response, they often include\n * a Retry-After header indicating when the client should retry. This strategy\n * uses that information for precise retry timing.\n *\n * Benefits over fixed backoff strategies:\n * - Follows server recommendations for optimal retry timing\n * - Avoids retrying too early and wasting requests\n * - Adapts to dynamic rate limit windows\n *\n * If no Retry-After header is provided, falls back to a configurable delay.\n * Only retries on RATE_LIMITED errors.\n *\n * @implements {RetryStrategy}\n *\n * @example\n * ```typescript\n * // Use server-recommended retry timing\n * const retryAfter = new RetryAfterStrategy({\n * maxAttempts: 5, // Retry up to 5 times\n * fallbackDelay: 10000 // 10s fallback if no header\n * });\n *\n * // The doFetch function automatically calls setRetryAfter\n * // when a Retry-After header is present in the response\n *\n * const provider = createOpenAI({\n * retryStrategy: retryAfter\n * });\n * ```\n */\nexport class RetryAfterStrategy implements RetryStrategy {\n private maxAttempts: number;\n private fallbackDelay: number;\n private lastRetryAfter?: number;\n\n /**\n * Creates a new RetryAfterStrategy instance.\n *\n * @param options - Configuration options\n * @param options.maxAttempts - Maximum number of retry attempts (default: 3)\n * @param options.fallbackDelay - Delay in ms when no Retry-After header (default: 5000)\n */\n constructor(options: {\n maxAttempts?: number;\n fallbackDelay?: number;\n } = {}) {\n this.maxAttempts = options.maxAttempts ?? 3;\n this.fallbackDelay = options.fallbackDelay ?? 5000;\n }\n\n /**\n * Creates a request-scoped copy of this strategy.\n */\n fork(): RetryAfterStrategy {\n return new RetryAfterStrategy({\n maxAttempts: this.maxAttempts,\n fallbackDelay: this.fallbackDelay,\n });\n }\n\n /**\n * Sets the retry delay from a Retry-After header value.\n *\n * Called by doFetch when a Retry-After header is present in the response.\n * The value is used for the next onRetry call and then cleared.\n *\n * @param seconds - The Retry-After value in seconds\n */\n setRetryAfter(seconds: number): void {\n this.lastRetryAfter = seconds * 1000;\n }\n\n /**\n * Determines retry delay using Retry-After header or fallback.\n *\n * @param error - The error that triggered the retry\n * @param attempt - Current attempt number (1-indexed)\n * @returns Delay from Retry-After header or fallback, null to stop\n */\n onRetry(error: UPPError, attempt: number): number | null {\n if (attempt > this.maxAttempts) {\n return null;\n }\n\n if (error.code !== 'RATE_LIMITED') {\n return null;\n }\n\n const delay = this.lastRetryAfter ?? this.fallbackDelay;\n this.lastRetryAfter = undefined;\n return delay;\n }\n}\n"],"mappings":";AA0CO,IAAM,qBAAN,MAAkD;AAAA,EAC/C;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAWR,YAAY,UAKR,CAAC,GAAG;AACN,SAAK,cAAc,QAAQ,eAAe;AAC1C,SAAK,YAAY,QAAQ,aAAa;AACtC,SAAK,WAAW,QAAQ,YAAY;AACpC,SAAK,SAAS,QAAQ,UAAU;AAAA,EAClC;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EASA,QAAQ,OAAiB,SAAgC;AACvD,QAAI,UAAU,KAAK,aAAa;AAC9B,aAAO;AAAA,IACT;AAEA,QAAI,CAAC,KAAK,YAAY,KAAK,GAAG;AAC5B,aAAO;AAAA,IACT;AAEA,QAAI,QAAQ,KAAK,YAAY,KAAK,IAAI,GAAG,UAAU,CAAC;AACpD,YAAQ,KAAK,IAAI,OAAO,KAAK,QAAQ;AAErC,QAAI,KAAK,QAAQ;AACf,cAAQ,SAAS,MAAM,KAAK,OAAO;AAAA,IACrC;AAEA,WAAO,KAAK,MAAM,KAAK;AAAA,EACzB;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAQQ,YAAY,OAA0B;AAC5C,WACE,MAAM,SAAS,kBACf,MAAM,SAAS,mBACf,MAAM,SAAS,aACf,MAAM,SAAS;AAAA,EAEnB;AACF;AAmCO,IAAM,gBAAN,MAA6C;AAAA,EAC1C;AAAA,EACA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EASR,YAAY,UAGR,CAAC,GAAG;AACN,SAAK,cAAc,QAAQ,eAAe;AAC1C,SAAK,QAAQ,QAAQ,SAAS;AAAA,EAChC;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EASA,QAAQ,OAAiB,SAAgC;AACvD,QAAI,UAAU,KAAK,aAAa;AAC9B,aAAO;AAAA,IACT;AAEA,QAAI,CAAC,KAAK,YAAY,KAAK,GAAG;AAC5B,aAAO;AAAA,IACT;AAEA,WAAO,KAAK,QAAQ;AAAA,EACtB;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAQQ,YAAY,OAA0B;AAC5C,WACE,MAAM,SAAS,kBACf,MAAM,SAAS,mBACf,MAAM,SAAS,aACf,MAAM,SAAS;AAAA,EAEnB;AACF;AAqBO,IAAM,UAAN,MAAuC;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAM5C,QAAQ,QAAkB,UAAwB;AAChD,WAAO;AAAA,EACT;AACF;AA0CO,IAAM,cAAN,MAA2C;AAAA,EACxC;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAUR,YAAY,UAIR,CAAC,GAAG;AACN,SAAK,YAAY,QAAQ,aAAa;AACtC,SAAK,aAAa,QAAQ,cAAc;AACxC,SAAK,cAAc,QAAQ,eAAe;AAC1C,SAAK,SAAS,KAAK;AACnB,SAAK,aAAa,KAAK,IAAI;AAC3B,SAAK,OAAO,QAAQ,QAAQ;AAAA,EAC9B;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAcA,gBAAiC;AAC/B,WAAO,KAAK,SAAS,MAAM;AACzB,WAAK,OAAO;AAEZ,UAAI,KAAK,UAAU,GAAG;AACpB,aAAK,UAAU;AACf,eAAO;AAAA,MACT;AAEA,YAAM,UAAU,IAAI,KAAK;AACzB,YAAM,aAAa,MAAO,KAAK;AAC/B,WAAK,UAAU;AACf,aAAO,KAAK,KAAK,UAAU,UAAU;AAAA,IACvC,CAAC;AAAA,EACH;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAWA,QAAQ,OAAiB,SAAgC;AACvD,QAAI,UAAU,KAAK,aAAa;AAC9B,aAAO;AAAA,IACT;AAEA,QAAI,MAAM,SAAS,gBAAgB;AACjC,aAAO;AAAA,IACT;AAEA,UAAM,aAAa,MAAO,KAAK;AAC/B,WAAO,KAAK,KAAK,aAAa,CAAC;AAAA,EACjC;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAOA,QAAc;AACZ,SAAK,KAAK,SAAS,MAAM;AACvB,WAAK,SAAS,KAAK;AACnB,WAAK,aAAa,KAAK,IAAI;AAAA,IAC7B,CAAC;AAAA,EACH;AAAA;AAAA;AAAA;AAAA,EAKQ,SAAe;AACrB,UAAM,MAAM,KAAK,IAAI;AACrB,UAAM,WAAW,MAAM,KAAK,cAAc;AAC1C,UAAM,YAAY,UAAU,KAAK;AAEjC,SAAK,SAAS,KAAK,IAAI,KAAK,WAAW,KAAK,SAAS,SAAS;AAC9D,SAAK,aAAa;AAAA,EACpB;AAAA,EAEA,MAAc,SAAY,IAAsC;AAC9D,UAAM,OAAO,KAAK,KAAK,KAAK,IAAI,EAAE;AAClC,SAAK,OAAO,KAAK,KAAK,MAAM,QAAW,MAAM,MAAS;AACtD,WAAO;AAAA,EACT;AACF;AAmCO,IAAM,qBAAN,MAAM,oBAA4C;AAAA,EAC/C;AAAA,EACA;AAAA,EACA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EASR,YAAY,UAGR,CAAC,GAAG;AACN,SAAK,cAAc,QAAQ,eAAe;AAC1C,SAAK,gBAAgB,QAAQ,iBAAiB;AAAA,EAChD;AAAA;AAAA;AAAA;AAAA,EAKA,OAA2B;AACzB,WAAO,IAAI,oBAAmB;AAAA,MAC5B,aAAa,KAAK;AAAA,MAClB,eAAe,KAAK;AAAA,IACtB,CAAC;AAAA,EACH;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EAUA,cAAc,SAAuB;AACnC,SAAK,iBAAiB,UAAU;AAAA,EAClC;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA,EASA,QAAQ,OAAiB,SAAgC;AACvD,QAAI,UAAU,KAAK,aAAa;AAC9B,aAAO;AAAA,IACT;AAEA,QAAI,MAAM,SAAS,gBAAgB;AACjC,aAAO;AAAA,IACT;AAEA,UAAM,QAAQ,KAAK,kBAAkB,KAAK;AAC1C,SAAK,iBAAiB;AACtB,WAAO;AAAA,EACT;AACF;","names":[]}
|
package/dist/google/index.d.ts
CHANGED
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
import { g as Provider } from '../provider-
|
|
1
|
+
import { a as ProviderConfig, g as Provider } from '../provider-DWEAzeM5.js';
|
|
2
2
|
|
|
3
3
|
/**
|
|
4
4
|
* Provider-specific parameters for Google Gemini API requests.
|
|
@@ -671,6 +671,10 @@ declare const tools: {
|
|
|
671
671
|
interface CacheCreateOptions {
|
|
672
672
|
/** API key for authentication */
|
|
673
673
|
apiKey: string;
|
|
674
|
+
/** Provider configuration (timeout, retry strategy, custom fetch) */
|
|
675
|
+
config?: ProviderConfig;
|
|
676
|
+
/** Abort signal for cancellation */
|
|
677
|
+
signal?: AbortSignal;
|
|
674
678
|
/** Model to associate with this cache (e.g., "gemini-3-flash-preview") */
|
|
675
679
|
model: string;
|
|
676
680
|
/** Optional display name for the cache (max 128 chars) */
|
|
@@ -692,6 +696,10 @@ interface CacheCreateOptions {
|
|
|
692
696
|
interface CacheListOptions {
|
|
693
697
|
/** API key for authentication */
|
|
694
698
|
apiKey: string;
|
|
699
|
+
/** Provider configuration (timeout, retry strategy, custom fetch) */
|
|
700
|
+
config?: ProviderConfig;
|
|
701
|
+
/** Abort signal for cancellation */
|
|
702
|
+
signal?: AbortSignal;
|
|
695
703
|
/** Maximum number of caches to return per page */
|
|
696
704
|
pageSize?: number;
|
|
697
705
|
/** Token for fetching the next page of results */
|
|
@@ -736,6 +744,8 @@ declare function create(options: CacheCreateOptions): Promise<GoogleCacheRespons
|
|
|
736
744
|
*
|
|
737
745
|
* @param name - The cache name (format: "cachedContents/{id}")
|
|
738
746
|
* @param apiKey - API key for authentication
|
|
747
|
+
* @param config - Provider configuration (timeout, retry strategy, custom fetch)
|
|
748
|
+
* @param signal - Abort signal for cancellation
|
|
739
749
|
* @returns The cache entry details
|
|
740
750
|
*
|
|
741
751
|
* @example
|
|
@@ -744,7 +754,7 @@ declare function create(options: CacheCreateOptions): Promise<GoogleCacheRespons
|
|
|
744
754
|
* console.log(`Cache expires at: ${cache.expireTime}`);
|
|
745
755
|
* ```
|
|
746
756
|
*/
|
|
747
|
-
declare function get(name: string, apiKey: string): Promise<GoogleCacheResponse>;
|
|
757
|
+
declare function get(name: string, apiKey: string, config?: ProviderConfig, signal?: AbortSignal): Promise<GoogleCacheResponse>;
|
|
748
758
|
/**
|
|
749
759
|
* Lists all cached content entries.
|
|
750
760
|
*
|
|
@@ -771,8 +781,10 @@ declare function list(options: CacheListOptions): Promise<GoogleCacheListRespons
|
|
|
771
781
|
* (contents, systemInstruction, tools) are immutable after creation.
|
|
772
782
|
*
|
|
773
783
|
* @param name - The cache name (format: "cachedContents/{id}")
|
|
774
|
-
* @param update - The update to apply (ttl or expireTime)
|
|
784
|
+
* @param update - The update to apply (exactly one of ttl or expireTime)
|
|
775
785
|
* @param apiKey - API key for authentication
|
|
786
|
+
* @param config - Provider configuration (timeout, retry strategy, custom fetch)
|
|
787
|
+
* @param signal - Abort signal for cancellation
|
|
776
788
|
* @returns The updated cache entry
|
|
777
789
|
*
|
|
778
790
|
* @example
|
|
@@ -785,19 +797,21 @@ declare function list(options: CacheListOptions): Promise<GoogleCacheListRespons
|
|
|
785
797
|
* );
|
|
786
798
|
* ```
|
|
787
799
|
*/
|
|
788
|
-
declare function update(name: string, updateRequest: GoogleCacheUpdateRequest, apiKey: string): Promise<GoogleCacheResponse>;
|
|
800
|
+
declare function update(name: string, updateRequest: GoogleCacheUpdateRequest, apiKey: string, config?: ProviderConfig, signal?: AbortSignal): Promise<GoogleCacheResponse>;
|
|
789
801
|
/**
|
|
790
802
|
* Deletes a cached content entry.
|
|
791
803
|
*
|
|
792
804
|
* @param name - The cache name (format: "cachedContents/{id}")
|
|
793
805
|
* @param apiKey - API key for authentication
|
|
806
|
+
* @param config - Provider configuration (timeout, retry strategy, custom fetch)
|
|
807
|
+
* @param signal - Abort signal for cancellation
|
|
794
808
|
*
|
|
795
809
|
* @example
|
|
796
810
|
* ```typescript
|
|
797
811
|
* await google.cache.delete('cachedContents/abc123', apiKey);
|
|
798
812
|
* ```
|
|
799
813
|
*/
|
|
800
|
-
declare function deleteCache(name: string, apiKey: string): Promise<void>;
|
|
814
|
+
declare function deleteCache(name: string, apiKey: string, config?: ProviderConfig, signal?: AbortSignal): Promise<void>;
|
|
801
815
|
/**
|
|
802
816
|
* Cache utilities namespace.
|
|
803
817
|
*
|
|
@@ -930,7 +944,7 @@ declare const google: Provider<unknown> & {
|
|
|
930
944
|
get: typeof get;
|
|
931
945
|
list: typeof list;
|
|
932
946
|
update: typeof update;
|
|
933
|
-
delete: (name: string, apiKey: string) => Promise<void>;
|
|
947
|
+
delete: (name: string, apiKey: string, config?: ProviderConfig, signal?: AbortSignal) => Promise<void>;
|
|
934
948
|
};
|
|
935
949
|
};
|
|
936
950
|
|