@stravigor/saina 0.4.7 → 0.4.9
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/package.json +1 -1
- package/src/helpers.ts +17 -0
- package/src/providers/anthropic_provider.ts +18 -20
- package/src/providers/openai_provider.ts +194 -36
- package/src/types.ts +2 -0
- package/src/utils/retry.ts +100 -0
package/package.json
CHANGED
package/src/helpers.ts
CHANGED
|
@@ -216,6 +216,7 @@ export class AgentRunner<T extends Agent = Agent> {
|
|
|
216
216
|
private _context: Record<string, unknown> = {}
|
|
217
217
|
private _provider?: string
|
|
218
218
|
private _model?: string
|
|
219
|
+
private _tools?: ToolDefinition[]
|
|
219
220
|
|
|
220
221
|
constructor(private AgentClass: new () => T) {}
|
|
221
222
|
|
|
@@ -238,11 +239,22 @@ export class AgentRunner<T extends Agent = Agent> {
|
|
|
238
239
|
return this
|
|
239
240
|
}
|
|
240
241
|
|
|
242
|
+
/** Set or override the tools available to the agent for this run. */
|
|
243
|
+
tools(tools: ToolDefinition[]): this {
|
|
244
|
+
this._tools = tools
|
|
245
|
+
return this
|
|
246
|
+
}
|
|
247
|
+
|
|
241
248
|
/** Run the agent to completion. */
|
|
242
249
|
async run(): Promise<AgentResult> {
|
|
243
250
|
const agent = new this.AgentClass()
|
|
244
251
|
const config = SainaManager.config
|
|
245
252
|
|
|
253
|
+
// Runner-level tools override agent-level tools
|
|
254
|
+
if (this._tools) {
|
|
255
|
+
agent.tools = this._tools
|
|
256
|
+
}
|
|
257
|
+
|
|
246
258
|
const providerName = this._provider ?? agent.provider ?? config.default
|
|
247
259
|
const providerConfig = config.providers[providerName]
|
|
248
260
|
const model = this._model ?? agent.model ?? providerConfig?.model ?? ''
|
|
@@ -369,6 +381,11 @@ export class AgentRunner<T extends Agent = Agent> {
|
|
|
369
381
|
const agent = new this.AgentClass()
|
|
370
382
|
const config = SainaManager.config
|
|
371
383
|
|
|
384
|
+
// Runner-level tools override agent-level tools
|
|
385
|
+
if (this._tools) {
|
|
386
|
+
agent.tools = this._tools
|
|
387
|
+
}
|
|
388
|
+
|
|
372
389
|
const providerName = this._provider ?? agent.provider ?? config.default
|
|
373
390
|
const providerConfig = config.providers[providerName]
|
|
374
391
|
const model = this._model ?? agent.model ?? providerConfig?.model ?? ''
|
|
@@ -1,4 +1,5 @@
|
|
|
1
1
|
import { parseSSE } from '../utils/sse_parser.ts'
|
|
2
|
+
import { retryableFetch, type RetryOptions } from '../utils/retry.ts'
|
|
2
3
|
import { ExternalServiceError } from '@stravigor/core/exceptions/errors'
|
|
3
4
|
import type {
|
|
4
5
|
AIProvider,
|
|
@@ -23,6 +24,7 @@ export class AnthropicProvider implements AIProvider {
|
|
|
23
24
|
private baseUrl: string
|
|
24
25
|
private defaultModel: string
|
|
25
26
|
private defaultMaxTokens: number
|
|
27
|
+
private retryOptions: RetryOptions
|
|
26
28
|
|
|
27
29
|
constructor(config: ProviderConfig) {
|
|
28
30
|
this.name = 'anthropic'
|
|
@@ -30,21 +32,21 @@ export class AnthropicProvider implements AIProvider {
|
|
|
30
32
|
this.baseUrl = (config.baseUrl ?? 'https://api.anthropic.com').replace(/\/$/, '')
|
|
31
33
|
this.defaultModel = config.model
|
|
32
34
|
this.defaultMaxTokens = config.maxTokens ?? 4096
|
|
35
|
+
this.retryOptions = {
|
|
36
|
+
maxRetries: config.maxRetries ?? 3,
|
|
37
|
+
baseDelay: config.retryBaseDelay ?? 1000,
|
|
38
|
+
}
|
|
33
39
|
}
|
|
34
40
|
|
|
35
41
|
async complete(request: CompletionRequest): Promise<CompletionResponse> {
|
|
36
42
|
const body = this.buildRequestBody(request, false)
|
|
37
43
|
|
|
38
|
-
const response = await
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
body: JSON.stringify(body),
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
if (!response.ok) {
|
|
45
|
-
const text = await response.text()
|
|
46
|
-
throw new ExternalServiceError('Anthropic', response.status, text)
|
|
47
|
-
}
|
|
44
|
+
const response = await retryableFetch(
|
|
45
|
+
'Anthropic',
|
|
46
|
+
`${this.baseUrl}/v1/messages`,
|
|
47
|
+
{ method: 'POST', headers: this.buildHeaders(), body: JSON.stringify(body) },
|
|
48
|
+
this.retryOptions
|
|
49
|
+
)
|
|
48
50
|
|
|
49
51
|
const data: any = await response.json()
|
|
50
52
|
return this.parseResponse(data)
|
|
@@ -53,16 +55,12 @@ export class AnthropicProvider implements AIProvider {
|
|
|
53
55
|
async *stream(request: CompletionRequest): AsyncIterable<StreamChunk> {
|
|
54
56
|
const body = this.buildRequestBody(request, true)
|
|
55
57
|
|
|
56
|
-
const response = await
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
body: JSON.stringify(body),
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
if (!response.ok) {
|
|
63
|
-
const text = await response.text()
|
|
64
|
-
throw new ExternalServiceError('Anthropic', response.status, text)
|
|
65
|
-
}
|
|
58
|
+
const response = await retryableFetch(
|
|
59
|
+
'Anthropic',
|
|
60
|
+
`${this.baseUrl}/v1/messages`,
|
|
61
|
+
{ method: 'POST', headers: this.buildHeaders(), body: JSON.stringify(body) },
|
|
62
|
+
this.retryOptions
|
|
63
|
+
)
|
|
66
64
|
|
|
67
65
|
if (!response.body) {
|
|
68
66
|
throw new ExternalServiceError('Anthropic', undefined, 'No stream body returned')
|
|
@@ -1,4 +1,5 @@
|
|
|
1
1
|
import { parseSSE } from '../utils/sse_parser.ts'
|
|
2
|
+
import { retryableFetch, type RetryOptions } from '../utils/retry.ts'
|
|
2
3
|
import { ExternalServiceError } from '@stravigor/core/exceptions/errors'
|
|
3
4
|
import type {
|
|
4
5
|
AIProvider,
|
|
@@ -24,6 +25,7 @@ export class OpenAIProvider implements AIProvider {
|
|
|
24
25
|
private baseUrl: string
|
|
25
26
|
private defaultModel: string
|
|
26
27
|
private defaultMaxTokens?: number
|
|
28
|
+
private retryOptions: RetryOptions
|
|
27
29
|
|
|
28
30
|
constructor(config: ProviderConfig, name?: string) {
|
|
29
31
|
this.name = name ?? 'openai'
|
|
@@ -31,6 +33,10 @@ export class OpenAIProvider implements AIProvider {
|
|
|
31
33
|
this.baseUrl = (config.baseUrl ?? 'https://api.openai.com').replace(/\/$/, '')
|
|
32
34
|
this.defaultModel = config.model
|
|
33
35
|
this.defaultMaxTokens = config.maxTokens
|
|
36
|
+
this.retryOptions = {
|
|
37
|
+
maxRetries: config.maxRetries ?? 3,
|
|
38
|
+
baseDelay: config.retryBaseDelay ?? 1000,
|
|
39
|
+
}
|
|
34
40
|
}
|
|
35
41
|
|
|
36
42
|
/** Whether this provider supports OpenAI's native json_schema response format. */
|
|
@@ -41,16 +47,12 @@ export class OpenAIProvider implements AIProvider {
|
|
|
41
47
|
async complete(request: CompletionRequest): Promise<CompletionResponse> {
|
|
42
48
|
const body = this.buildRequestBody(request, false)
|
|
43
49
|
|
|
44
|
-
const response = await
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
body: JSON.stringify(body),
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
if (!response.ok) {
|
|
51
|
-
const text = await response.text()
|
|
52
|
-
throw new ExternalServiceError('OpenAI', response.status, text)
|
|
53
|
-
}
|
|
50
|
+
const response = await retryableFetch(
|
|
51
|
+
'OpenAI',
|
|
52
|
+
`${this.baseUrl}/v1/chat/completions`,
|
|
53
|
+
{ method: 'POST', headers: this.buildHeaders(), body: JSON.stringify(body) },
|
|
54
|
+
this.retryOptions
|
|
55
|
+
)
|
|
54
56
|
|
|
55
57
|
const data: any = await response.json()
|
|
56
58
|
return this.parseResponse(data)
|
|
@@ -59,16 +61,12 @@ export class OpenAIProvider implements AIProvider {
|
|
|
59
61
|
async *stream(request: CompletionRequest): AsyncIterable<StreamChunk> {
|
|
60
62
|
const body = this.buildRequestBody(request, true)
|
|
61
63
|
|
|
62
|
-
const response = await
|
|
63
|
-
|
|
64
|
-
|
|
65
|
-
body: JSON.stringify(body),
|
|
66
|
-
|
|
67
|
-
|
|
68
|
-
if (!response.ok) {
|
|
69
|
-
const text = await response.text()
|
|
70
|
-
throw new ExternalServiceError('OpenAI', response.status, text)
|
|
71
|
-
}
|
|
64
|
+
const response = await retryableFetch(
|
|
65
|
+
'OpenAI',
|
|
66
|
+
`${this.baseUrl}/v1/chat/completions`,
|
|
67
|
+
{ method: 'POST', headers: this.buildHeaders(), body: JSON.stringify(body) },
|
|
68
|
+
this.retryOptions
|
|
69
|
+
)
|
|
72
70
|
|
|
73
71
|
if (!response.body) {
|
|
74
72
|
throw new ExternalServiceError('OpenAI', undefined, 'No stream body returned')
|
|
@@ -157,16 +155,12 @@ export class OpenAIProvider implements AIProvider {
|
|
|
157
155
|
model: model ?? 'text-embedding-3-small',
|
|
158
156
|
}
|
|
159
157
|
|
|
160
|
-
const response = await
|
|
161
|
-
|
|
162
|
-
|
|
163
|
-
body: JSON.stringify(body),
|
|
164
|
-
|
|
165
|
-
|
|
166
|
-
if (!response.ok) {
|
|
167
|
-
const text = await response.text()
|
|
168
|
-
throw new ExternalServiceError('OpenAI', response.status, text)
|
|
169
|
-
}
|
|
158
|
+
const response = await retryableFetch(
|
|
159
|
+
'OpenAI',
|
|
160
|
+
`${this.baseUrl}/v1/embeddings`,
|
|
161
|
+
{ method: 'POST', headers: this.buildHeaders(), body: JSON.stringify(body) },
|
|
162
|
+
this.retryOptions
|
|
163
|
+
)
|
|
170
164
|
|
|
171
165
|
const data: any = await response.json()
|
|
172
166
|
|
|
@@ -179,6 +173,14 @@ export class OpenAIProvider implements AIProvider {
|
|
|
179
173
|
|
|
180
174
|
// ── Private helpers ──────────────────────────────────────────────────────
|
|
181
175
|
|
|
176
|
+
private isReasoningModel(model: string): boolean {
|
|
177
|
+
return /^(o[1-9]|gpt-5)/.test(model)
|
|
178
|
+
}
|
|
179
|
+
|
|
180
|
+
private usesMaxCompletionTokens(model: string): boolean {
|
|
181
|
+
return this.isReasoningModel(model) || /^gpt-4\.1|gpt-4o-mini-2024/.test(model)
|
|
182
|
+
}
|
|
183
|
+
|
|
182
184
|
private buildHeaders(): Record<string, string> {
|
|
183
185
|
return {
|
|
184
186
|
'content-type': 'application/json',
|
|
@@ -194,9 +196,18 @@ export class OpenAIProvider implements AIProvider {
|
|
|
194
196
|
|
|
195
197
|
if (stream) body.stream = true
|
|
196
198
|
if (request.maxTokens ?? this.defaultMaxTokens) {
|
|
197
|
-
|
|
199
|
+
const tokens = request.maxTokens ?? this.defaultMaxTokens
|
|
200
|
+
const model = (body.model as string) ?? ''
|
|
201
|
+
|
|
202
|
+
if (this.usesMaxCompletionTokens(model)) {
|
|
203
|
+
body.max_completion_tokens = tokens
|
|
204
|
+
} else {
|
|
205
|
+
body.max_tokens = tokens
|
|
206
|
+
}
|
|
207
|
+
}
|
|
208
|
+
if (request.temperature !== undefined && !this.isReasoningModel((body.model as string) ?? '')) {
|
|
209
|
+
body.temperature = request.temperature
|
|
198
210
|
}
|
|
199
|
-
if (request.temperature !== undefined) body.temperature = request.temperature
|
|
200
211
|
if (request.stopSequences?.length) body.stop = request.stopSequences
|
|
201
212
|
|
|
202
213
|
// Tools
|
|
@@ -225,19 +236,20 @@ export class OpenAIProvider implements AIProvider {
|
|
|
225
236
|
|
|
226
237
|
// Structured output
|
|
227
238
|
if (request.schema) {
|
|
228
|
-
|
|
239
|
+
const useStrict = this.supportsJsonSchema && this.isStrictCompatible(request.schema)
|
|
240
|
+
|
|
241
|
+
if (useStrict) {
|
|
229
242
|
body.response_format = {
|
|
230
243
|
type: 'json_schema',
|
|
231
244
|
json_schema: {
|
|
232
245
|
name: 'response',
|
|
233
|
-
schema: request.schema,
|
|
246
|
+
schema: this.normalizeSchemaForOpenAI(request.schema),
|
|
234
247
|
strict: true,
|
|
235
248
|
},
|
|
236
249
|
}
|
|
237
250
|
} else {
|
|
238
|
-
// Fallback
|
|
251
|
+
// Fallback: json_object mode with schema injected into system prompt
|
|
239
252
|
body.response_format = { type: 'json_object' }
|
|
240
|
-
// Inject schema into system prompt so the model knows the expected format
|
|
241
253
|
const schemaHint = `\n\nYou MUST respond with valid JSON matching this schema:\n${JSON.stringify(request.schema, null, 2)}`
|
|
242
254
|
const messages = body.messages as any[]
|
|
243
255
|
if (messages[0]?.role === 'system') {
|
|
@@ -348,4 +360,150 @@ export class OpenAIProvider implements AIProvider {
|
|
|
348
360
|
raw: data,
|
|
349
361
|
}
|
|
350
362
|
}
|
|
363
|
+
|
|
364
|
+
/**
|
|
365
|
+
* OpenAI's strict structured output requires:
|
|
366
|
+
* - All properties listed in `required`
|
|
367
|
+
* - Optional properties use nullable types instead
|
|
368
|
+
* - `additionalProperties: false` on every object
|
|
369
|
+
*/
|
|
370
|
+
/**
|
|
371
|
+
* Check if a schema is compatible with OpenAI's strict structured output.
|
|
372
|
+
* Record types (object with additionalProperties != false) are not supported.
|
|
373
|
+
*/
|
|
374
|
+
private isStrictCompatible(schema: Record<string, unknown>): boolean {
|
|
375
|
+
if (schema == null || typeof schema !== 'object') return true
|
|
376
|
+
|
|
377
|
+
// Record type: object with additionalProperties that isn't false
|
|
378
|
+
if (
|
|
379
|
+
schema.type === 'object' &&
|
|
380
|
+
schema.additionalProperties !== undefined &&
|
|
381
|
+
schema.additionalProperties !== false
|
|
382
|
+
) {
|
|
383
|
+
return false
|
|
384
|
+
}
|
|
385
|
+
|
|
386
|
+
// Check nested properties
|
|
387
|
+
if (schema.properties) {
|
|
388
|
+
for (const prop of Object.values(schema.properties as Record<string, any>)) {
|
|
389
|
+
if (!this.isStrictCompatible(prop)) return false
|
|
390
|
+
}
|
|
391
|
+
}
|
|
392
|
+
|
|
393
|
+
// Check array items
|
|
394
|
+
if (schema.items && !this.isStrictCompatible(schema.items as Record<string, unknown>))
|
|
395
|
+
return false
|
|
396
|
+
|
|
397
|
+
// Check anyOf / oneOf
|
|
398
|
+
for (const key of ['anyOf', 'oneOf'] as const) {
|
|
399
|
+
if (Array.isArray(schema[key])) {
|
|
400
|
+
for (const s of schema[key] as any[]) {
|
|
401
|
+
if (!this.isStrictCompatible(s)) return false
|
|
402
|
+
}
|
|
403
|
+
}
|
|
404
|
+
}
|
|
405
|
+
|
|
406
|
+
return true
|
|
407
|
+
}
|
|
408
|
+
|
|
409
|
+
/** Keywords OpenAI strict mode does NOT support. */
|
|
410
|
+
private static UNSUPPORTED_KEYWORDS = new Set([
|
|
411
|
+
'propertyNames',
|
|
412
|
+
'patternProperties',
|
|
413
|
+
'if',
|
|
414
|
+
'then',
|
|
415
|
+
'else',
|
|
416
|
+
'not',
|
|
417
|
+
'contains',
|
|
418
|
+
'minItems',
|
|
419
|
+
'maxItems',
|
|
420
|
+
'minProperties',
|
|
421
|
+
'maxProperties',
|
|
422
|
+
'minLength',
|
|
423
|
+
'maxLength',
|
|
424
|
+
'minimum',
|
|
425
|
+
'maximum',
|
|
426
|
+
'exclusiveMinimum',
|
|
427
|
+
'exclusiveMaximum',
|
|
428
|
+
'multipleOf',
|
|
429
|
+
'pattern',
|
|
430
|
+
'format',
|
|
431
|
+
'contentEncoding',
|
|
432
|
+
'contentMediaType',
|
|
433
|
+
'unevaluatedProperties',
|
|
434
|
+
'$schema',
|
|
435
|
+
])
|
|
436
|
+
|
|
437
|
+
private normalizeSchemaForOpenAI(schema: Record<string, unknown>): Record<string, unknown> {
|
|
438
|
+
if (schema == null || typeof schema !== 'object') return schema
|
|
439
|
+
|
|
440
|
+
// Strip unsupported keywords
|
|
441
|
+
const result: Record<string, unknown> = {}
|
|
442
|
+
for (const [k, v] of Object.entries(schema)) {
|
|
443
|
+
if (!OpenAIProvider.UNSUPPORTED_KEYWORDS.has(k)) {
|
|
444
|
+
result[k] = v
|
|
445
|
+
}
|
|
446
|
+
}
|
|
447
|
+
|
|
448
|
+
// Handle object types with explicit properties
|
|
449
|
+
if (result.type === 'object' && result.properties) {
|
|
450
|
+
const props = result.properties as Record<string, any>
|
|
451
|
+
const currentRequired = new Set(
|
|
452
|
+
Array.isArray(result.required) ? (result.required as string[]) : []
|
|
453
|
+
)
|
|
454
|
+
|
|
455
|
+
const normalizedProps: Record<string, any> = {}
|
|
456
|
+
|
|
457
|
+
for (const [key, prop] of Object.entries(props)) {
|
|
458
|
+
let normalizedProp = this.normalizeSchemaForOpenAI(prop)
|
|
459
|
+
|
|
460
|
+
// If property is not required, make it nullable and add to required
|
|
461
|
+
if (!currentRequired.has(key)) {
|
|
462
|
+
normalizedProp = this.makeNullable(normalizedProp)
|
|
463
|
+
}
|
|
464
|
+
|
|
465
|
+
normalizedProps[key] = normalizedProp
|
|
466
|
+
}
|
|
467
|
+
|
|
468
|
+
result.properties = normalizedProps
|
|
469
|
+
result.required = Object.keys(normalizedProps)
|
|
470
|
+
result.additionalProperties = false
|
|
471
|
+
}
|
|
472
|
+
|
|
473
|
+
// Handle arrays
|
|
474
|
+
if (result.type === 'array' && result.items) {
|
|
475
|
+
result.items = this.normalizeSchemaForOpenAI(result.items as Record<string, unknown>)
|
|
476
|
+
}
|
|
477
|
+
|
|
478
|
+
// Handle anyOf / oneOf
|
|
479
|
+
for (const key of ['anyOf', 'oneOf'] as const) {
|
|
480
|
+
if (Array.isArray(result[key])) {
|
|
481
|
+
result[key] = (result[key] as any[]).map((s: any) => this.normalizeSchemaForOpenAI(s))
|
|
482
|
+
}
|
|
483
|
+
}
|
|
484
|
+
|
|
485
|
+
return result
|
|
486
|
+
}
|
|
487
|
+
|
|
488
|
+
private makeNullable(schema: Record<string, unknown>): Record<string, unknown> {
|
|
489
|
+
// Already nullable
|
|
490
|
+
if (Array.isArray(schema.type) && schema.type.includes('null')) return schema
|
|
491
|
+
|
|
492
|
+
// Has anyOf — add null variant
|
|
493
|
+
if (Array.isArray(schema.anyOf)) {
|
|
494
|
+
const hasNull = schema.anyOf.some((s: any) => s.type === 'null')
|
|
495
|
+
if (!hasNull) {
|
|
496
|
+
return { ...schema, anyOf: [...schema.anyOf, { type: 'null' }] }
|
|
497
|
+
}
|
|
498
|
+
return schema
|
|
499
|
+
}
|
|
500
|
+
|
|
501
|
+
// Simple type — wrap in anyOf with null
|
|
502
|
+
if (schema.type) {
|
|
503
|
+
const { type, ...rest } = schema
|
|
504
|
+
return { anyOf: [{ type, ...rest }, { type: 'null' }] }
|
|
505
|
+
}
|
|
506
|
+
|
|
507
|
+
return schema
|
|
508
|
+
}
|
|
351
509
|
}
|
package/src/types.ts
CHANGED
|
@@ -0,0 +1,100 @@
|
|
|
1
|
+
import { ExternalServiceError } from '@stravigor/core/exceptions/errors'
|
|
2
|
+
|
|
3
|
+
export interface RetryOptions {
|
|
4
|
+
maxRetries?: number
|
|
5
|
+
baseDelay?: number
|
|
6
|
+
maxDelay?: number
|
|
7
|
+
retryableStatuses?: number[]
|
|
8
|
+
}
|
|
9
|
+
|
|
10
|
+
const DEFAULT_RETRYABLE = [429, 500, 502, 503, 529]
|
|
11
|
+
|
|
12
|
+
/**
|
|
13
|
+
* Fetch with automatic retry and exponential backoff for transient errors.
|
|
14
|
+
*
|
|
15
|
+
* Retries on 429 (rate limit), 5xx, and network failures.
|
|
16
|
+
* Parses the `retry-after` header when available; otherwise uses
|
|
17
|
+
* exponential backoff with jitter.
|
|
18
|
+
*
|
|
19
|
+
* Returns the successful `Response`. On final failure, throws
|
|
20
|
+
* `ExternalServiceError` with the last status and body.
|
|
21
|
+
*/
|
|
22
|
+
export async function retryableFetch(
|
|
23
|
+
service: string,
|
|
24
|
+
url: string,
|
|
25
|
+
init: RequestInit,
|
|
26
|
+
options?: RetryOptions
|
|
27
|
+
): Promise<Response> {
|
|
28
|
+
const maxRetries = options?.maxRetries ?? 3
|
|
29
|
+
const baseDelay = options?.baseDelay ?? 1000
|
|
30
|
+
const maxDelay = options?.maxDelay ?? 60_000
|
|
31
|
+
const retryable = options?.retryableStatuses ?? DEFAULT_RETRYABLE
|
|
32
|
+
|
|
33
|
+
for (let attempt = 0; attempt <= maxRetries; attempt++) {
|
|
34
|
+
let response: Response
|
|
35
|
+
|
|
36
|
+
try {
|
|
37
|
+
response = await fetch(url, init)
|
|
38
|
+
} catch (err) {
|
|
39
|
+
// Network error (DNS, connection refused, etc.)
|
|
40
|
+
if (attempt === maxRetries) {
|
|
41
|
+
throw new ExternalServiceError(
|
|
42
|
+
service,
|
|
43
|
+
undefined,
|
|
44
|
+
err instanceof Error ? err.message : String(err)
|
|
45
|
+
)
|
|
46
|
+
}
|
|
47
|
+
await sleep(backoffDelay(attempt, baseDelay, maxDelay))
|
|
48
|
+
continue
|
|
49
|
+
}
|
|
50
|
+
|
|
51
|
+
if (response.ok) return response
|
|
52
|
+
|
|
53
|
+
// Non-retryable status — fail immediately
|
|
54
|
+
if (!retryable.includes(response.status)) {
|
|
55
|
+
const text = await response.text()
|
|
56
|
+
throw new ExternalServiceError(service, response.status, text)
|
|
57
|
+
}
|
|
58
|
+
|
|
59
|
+
// Retryable status — wait and retry (unless last attempt)
|
|
60
|
+
if (attempt === maxRetries) {
|
|
61
|
+
const text = await response.text()
|
|
62
|
+
throw new ExternalServiceError(service, response.status, text)
|
|
63
|
+
}
|
|
64
|
+
|
|
65
|
+
const delay = parseRetryAfter(response) ?? backoffDelay(attempt, baseDelay, maxDelay)
|
|
66
|
+
await sleep(delay)
|
|
67
|
+
}
|
|
68
|
+
|
|
69
|
+
// Unreachable, but satisfies TypeScript
|
|
70
|
+
throw new ExternalServiceError(service, undefined, 'Retry loop exited unexpectedly')
|
|
71
|
+
}
|
|
72
|
+
|
|
73
|
+
/**
|
|
74
|
+
* Parse the `retry-after` header into milliseconds.
|
|
75
|
+
* Supports both delta-seconds ("2") and HTTP-date formats.
|
|
76
|
+
*/
|
|
77
|
+
function parseRetryAfter(response: Response): number | null {
|
|
78
|
+
const header = response.headers.get('retry-after')
|
|
79
|
+
if (!header) return null
|
|
80
|
+
|
|
81
|
+
const seconds = Number(header)
|
|
82
|
+
if (!Number.isNaN(seconds)) return seconds * 1000
|
|
83
|
+
|
|
84
|
+
// HTTP-date format
|
|
85
|
+
const date = Date.parse(header)
|
|
86
|
+
if (!Number.isNaN(date)) return Math.max(0, date - Date.now())
|
|
87
|
+
|
|
88
|
+
return null
|
|
89
|
+
}
|
|
90
|
+
|
|
91
|
+
/** Exponential backoff with jitter: base * 2^attempt + random jitter, capped at maxDelay. */
|
|
92
|
+
function backoffDelay(attempt: number, baseDelay: number, maxDelay: number): number {
|
|
93
|
+
const exp = baseDelay * 2 ** attempt
|
|
94
|
+
const jitter = Math.random() * baseDelay
|
|
95
|
+
return Math.min(exp + jitter, maxDelay)
|
|
96
|
+
}
|
|
97
|
+
|
|
98
|
+
function sleep(ms: number): Promise<void> {
|
|
99
|
+
return new Promise(resolve => setTimeout(resolve, ms))
|
|
100
|
+
}
|