@pwshub/aisdk 0.0.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +21 -0
- package/README.md +297 -0
- package/package.json +54 -0
- package/src/coerce.js +52 -0
- package/src/config.js +209 -0
- package/src/errors.js +106 -0
- package/src/index.js +269 -0
- package/src/providers.js +249 -0
- package/src/registry.js +164 -0
- package/src/validation.js +113 -0
package/src/errors.js
ADDED
|
@@ -0,0 +1,106 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* @fileoverview Structured error types for the AI client.
|
|
3
|
+
*
|
|
4
|
+
* Distinguishes between two categories of failure:
|
|
5
|
+
*
|
|
6
|
+
* - `ProviderError` — transient or capacity issues on the provider side
|
|
7
|
+
* (5xx, 429). Safe to retry with same or fallback model.
|
|
8
|
+
*
|
|
9
|
+
* - `InputError` — request was rejected due to bad input or auth
|
|
10
|
+
* (400, 401, 403, 422). Retrying will not help;
|
|
11
|
+
* do NOT attempt fallback for these.
|
|
12
|
+
*
|
|
13
|
+
* Callers can use `instanceof` to decide retry/fallback strategy:
|
|
14
|
+
*
|
|
15
|
+
* @example
|
|
16
|
+
* try {
|
|
17
|
+
* const result = await ai.ask({ model: 'gpt-4o', prompt: '...' })
|
|
18
|
+
* } catch (err) {
|
|
19
|
+
* if (err instanceof ProviderError) {
|
|
20
|
+
* // safe to retry or fallback to another model
|
|
21
|
+
* } else if (err instanceof InputError) {
|
|
22
|
+
* // bad request — fix the input, do not retry
|
|
23
|
+
* }
|
|
24
|
+
* }
|
|
25
|
+
*/
|
|
26
|
+
|
|
27
|
+
/**
|
|
28
|
+
* Thrown when the provider returns a transient or server-side error.
|
|
29
|
+
* HTTP 429 (rate limit) and 5xx responses produce this error.
|
|
30
|
+
* Safe to retry or fall back to another model.
|
|
31
|
+
*/
|
|
32
|
+
export class ProviderError extends Error {
|
|
33
|
+
/**
|
|
34
|
+
* @param {string} message
|
|
35
|
+
* @param {object} meta
|
|
36
|
+
* @param {number} meta.status - HTTP status code
|
|
37
|
+
* @param {string} meta.provider - Provider ID
|
|
38
|
+
* @param {string} meta.model - Model ID that was called
|
|
39
|
+
* @param {string} [meta.raw] - Raw response body from provider
|
|
40
|
+
*/
|
|
41
|
+
constructor(message, {
|
|
42
|
+
status, provider, model, raw,
|
|
43
|
+
} = {}) {
|
|
44
|
+
super(message)
|
|
45
|
+
this.name = 'ProviderError'
|
|
46
|
+
this.status = status
|
|
47
|
+
this.provider = provider
|
|
48
|
+
this.model = model
|
|
49
|
+
this.raw = raw
|
|
50
|
+
}
|
|
51
|
+
}
|
|
52
|
+
|
|
53
|
+
/**
|
|
54
|
+
* Thrown when the provider rejects the request due to invalid input or auth.
|
|
55
|
+
* HTTP 400, 401, 403, 422 responses produce this error.
|
|
56
|
+
* Retrying or falling back will NOT resolve this — the input must be fixed.
|
|
57
|
+
*/
|
|
58
|
+
export class InputError extends Error {
|
|
59
|
+
/**
|
|
60
|
+
* @param {string} message
|
|
61
|
+
* @param {object} meta
|
|
62
|
+
* @param {number} meta.status
|
|
63
|
+
* @param {string} meta.provider
|
|
64
|
+
* @param {string} meta.model
|
|
65
|
+
* @param {string} [meta.raw]
|
|
66
|
+
*/
|
|
67
|
+
constructor(message, {
|
|
68
|
+
status, provider, model, raw,
|
|
69
|
+
} = {}) {
|
|
70
|
+
super(message)
|
|
71
|
+
this.name = 'InputError'
|
|
72
|
+
this.status = status
|
|
73
|
+
this.provider = provider
|
|
74
|
+
this.model = model
|
|
75
|
+
this.raw = raw
|
|
76
|
+
}
|
|
77
|
+
}
|
|
78
|
+
|
|
79
|
+
/**
|
|
80
|
+
* HTTP status codes that indicate a provider-side transient failure.
|
|
81
|
+
* These are safe to retry or fall back on.
|
|
82
|
+
* @type {Set<number>}
|
|
83
|
+
*/
|
|
84
|
+
export const PROVIDER_ERROR_STATUSES = new Set([429, 500, 502, 503, 504])
|
|
85
|
+
|
|
86
|
+
/**
|
|
87
|
+
* Classifies an HTTP response into ProviderError or InputError and throws it.
|
|
88
|
+
*
|
|
89
|
+
* @param {Response} res
|
|
90
|
+
* @param {string} provider
|
|
91
|
+
* @param {string} model
|
|
92
|
+
* @returns {Promise<never>}
|
|
93
|
+
*/
|
|
94
|
+
export const throwHttpError = async (res, provider, model) => {
|
|
95
|
+
const raw = await res.text()
|
|
96
|
+
const meta = {
|
|
97
|
+
status: res.status, provider, model, raw,
|
|
98
|
+
}
|
|
99
|
+
const message = `${provider}/${model} responded with HTTP ${res.status}`
|
|
100
|
+
|
|
101
|
+
if (PROVIDER_ERROR_STATUSES.has(res.status)) {
|
|
102
|
+
throw new ProviderError(message, meta)
|
|
103
|
+
}
|
|
104
|
+
|
|
105
|
+
throw new InputError(message, meta)
|
|
106
|
+
}
|
package/src/index.js
ADDED
|
@@ -0,0 +1,269 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* @fileoverview Thin AI client — single unified interface for text generation.
|
|
3
|
+
*
|
|
4
|
+
* @example Basic usage
|
|
5
|
+
* import { createAi } from '@pwshub/aisdk'
|
|
6
|
+
*
|
|
7
|
+
* const ai = createAi()
|
|
8
|
+
* const result = await ai.ask({
|
|
9
|
+
* model: 'claude-sonnet-4-20250514',
|
|
10
|
+
* apikey: 'your-api-key',
|
|
11
|
+
* prompt: 'What is the capital of Vietnam?',
|
|
12
|
+
* temperature: 0.5,
|
|
13
|
+
* })
|
|
14
|
+
* console.log(result.text)
|
|
15
|
+
* console.log(result.usage) // { inputTokens, outputTokens, cacheTokens, estimatedCost }
|
|
16
|
+
*
|
|
17
|
+
* @example With fallbacks
|
|
18
|
+
* const result = await ai.ask({
|
|
19
|
+
* model: 'gpt-4o',
|
|
20
|
+
* apikey: 'your-openai-key',
|
|
21
|
+
* prompt: '...',
|
|
22
|
+
* fallbacks: ['gpt-4o-mini', 'claude-haiku-4-5-20251001'],
|
|
23
|
+
* })
|
|
24
|
+
* if (result.model !== 'gpt-4o') {
|
|
25
|
+
* console.warn('Fell back to', result.model)
|
|
26
|
+
* }
|
|
27
|
+
*
|
|
28
|
+
* @example Google provider-specific options
|
|
29
|
+
* const result = await ai.ask({
|
|
30
|
+
* model: 'gemini-2.0-flash',
|
|
31
|
+
* apikey: 'your-google-key',
|
|
32
|
+
* prompt: '...',
|
|
33
|
+
* providerOptions: {
|
|
34
|
+
* safetySettings: [
|
|
35
|
+
* { category: 'HARM_CATEGORY_HARASSMENT', threshold: 'BLOCK_NONE' },
|
|
36
|
+
* ],
|
|
37
|
+
* thinkingConfig: { thinkingBudget: 1024 },
|
|
38
|
+
* },
|
|
39
|
+
* })
|
|
40
|
+
*
|
|
41
|
+
*/
|
|
42
|
+
|
|
43
|
+
import {
|
|
44
|
+
getModel, listModels, setModels,
|
|
45
|
+
} from './registry.js'
|
|
46
|
+
import { normalizeConfig } from './config.js'
|
|
47
|
+
import { coerceConfig } from './coerce.js'
|
|
48
|
+
import { getAdapter } from './providers.js'
|
|
49
|
+
import {
|
|
50
|
+
ProviderError, InputError, throwHttpError,
|
|
51
|
+
} from './errors.js'
|
|
52
|
+
import { validateAskOptions } from './validation.js'
|
|
53
|
+
|
|
54
|
+
export {
|
|
55
|
+
ProviderError, InputError,
|
|
56
|
+
}
|
|
57
|
+
|
|
58
|
+
/**
|
|
59
|
+
* @typedef {Object} AiOptions
|
|
60
|
+
* @property {string} [gatewayUrl] - Optional AI gateway URL override
|
|
61
|
+
*/
|
|
62
|
+
|
|
63
|
+
/**
|
|
64
|
+
* @typedef {Object} AskParams
|
|
65
|
+
* @property {string} model - Model ID (must be registered via setModels())
|
|
66
|
+
* @property {string} apikey - API key for the provider
|
|
67
|
+
* @property {string} prompt - The user message
|
|
68
|
+
* @property {string} [system] - Optional system prompt
|
|
69
|
+
* @property {string[]} [fallbacks] - Ordered list of fallback model IDs
|
|
70
|
+
* @property {Record<string, unknown>} [providerOptions] - Provider-specific options merged into body
|
|
71
|
+
* @property {number} [temperature]
|
|
72
|
+
* @property {number} [maxTokens]
|
|
73
|
+
* @property {number} [topP]
|
|
74
|
+
* @property {number} [topK]
|
|
75
|
+
* @property {number} [frequencyPenalty]
|
|
76
|
+
* @property {number} [presencePenalty]
|
|
77
|
+
*/
|
|
78
|
+
|
|
79
|
+
/**
|
|
80
|
+
* @typedef {Object} Usage
|
|
81
|
+
* @property {number} inputTokens
|
|
82
|
+
* @property {number} outputTokens
|
|
83
|
+
* @property {number} cacheTokens
|
|
84
|
+
* @property {number} estimatedCost - In USD, based on models.json pricing
|
|
85
|
+
*/
|
|
86
|
+
|
|
87
|
+
/**
|
|
88
|
+
* @typedef {Object} AskResult
|
|
89
|
+
* @property {string} text
|
|
90
|
+
* @property {string} model - The model that actually responded (may differ if fallback was used)
|
|
91
|
+
* @property {Usage} usage
|
|
92
|
+
*/
|
|
93
|
+
|
|
94
|
+
/**
|
|
95
|
+
* Picks generation config keys from AskParams, dropping routing params.
|
|
96
|
+
* @param {AskParams} params
|
|
97
|
+
* @returns {import('./config.js').GenerationConfig}
|
|
98
|
+
*/
|
|
99
|
+
const extractGenConfig = (params) => {
|
|
100
|
+
const keys = ['temperature', 'maxTokens', 'topP', 'topK', 'frequencyPenalty', 'presencePenalty']
|
|
101
|
+
return Object.fromEntries(
|
|
102
|
+
keys.filter((k) => params[k] !== undefined).map((k) => [k, params[k]])
|
|
103
|
+
)
|
|
104
|
+
}
|
|
105
|
+
|
|
106
|
+
/**
|
|
107
|
+
* Calculates estimated cost in USD from token counts and model pricing.
|
|
108
|
+
*
|
|
109
|
+
* @param {import('./registry.js').RawUsage} usage
|
|
110
|
+
* @param {import('./registry.js').ModelRecord} record
|
|
111
|
+
* @returns {number}
|
|
112
|
+
*/
|
|
113
|
+
const calcCost = (usage, record) => {
|
|
114
|
+
const M = 1_000_000
|
|
115
|
+
const inputCost = (usage.inputTokens / M) * record.input_price
|
|
116
|
+
const outputCost = (usage.outputTokens / M) * record.output_price
|
|
117
|
+
const cacheCost = (usage.cacheTokens / M) * record.cache_price
|
|
118
|
+
|
|
119
|
+
// Round to 8 decimal places to avoid floating point noise
|
|
120
|
+
return Math.round((inputCost + outputCost + cacheCost) * 1e8) / 1e8
|
|
121
|
+
}
|
|
122
|
+
|
|
123
|
+
/**
|
|
124
|
+
* Sends a single request to a provider. No retry logic — throws structured
|
|
125
|
+
* errors so the caller (ask) can decide how to handle them.
|
|
126
|
+
*
|
|
127
|
+
* @param {string} modelId
|
|
128
|
+
* @param {AskParams} params
|
|
129
|
+
* @param {string} [gatewayUrl]
|
|
130
|
+
* @returns {Promise<AskResult>}
|
|
131
|
+
* @throws {ProviderError} On 429 / 5xx — safe to retry or fallback
|
|
132
|
+
* @throws {InputError} On 4xx — do not retry, fix the input
|
|
133
|
+
*/
|
|
134
|
+
const callModel = async (modelId, params, gatewayUrl) => {
|
|
135
|
+
const {
|
|
136
|
+
record, supportedParams,
|
|
137
|
+
} = getModel(modelId)
|
|
138
|
+
const {
|
|
139
|
+
provider: providerId, name: modelName,
|
|
140
|
+
} = record
|
|
141
|
+
|
|
142
|
+
const { apikey } = params
|
|
143
|
+
const adapter = getAdapter(providerId)
|
|
144
|
+
|
|
145
|
+
const genConfig = extractGenConfig(params)
|
|
146
|
+
|
|
147
|
+
// Coerce values to provider's acceptable ranges (clamp, don't throw)
|
|
148
|
+
const coerced = coerceConfig(genConfig, providerId)
|
|
149
|
+
|
|
150
|
+
// Normalize to wire format
|
|
151
|
+
const normalizedConfig = normalizeConfig(coerced, providerId, supportedParams, modelId)
|
|
152
|
+
|
|
153
|
+
const {
|
|
154
|
+
prompt, system, providerOptions = {},
|
|
155
|
+
} = params
|
|
156
|
+
|
|
157
|
+
/** @type {import('./providers.js').Message[]} */
|
|
158
|
+
const messages = [
|
|
159
|
+
...(system ? [{
|
|
160
|
+
role: 'system', content: system,
|
|
161
|
+
}] : []),
|
|
162
|
+
{
|
|
163
|
+
role: 'user', content: prompt,
|
|
164
|
+
},
|
|
165
|
+
]
|
|
166
|
+
|
|
167
|
+
const url = gatewayUrl ?? adapter.url(modelName, apikey)
|
|
168
|
+
const body = adapter.buildBody(modelName, messages, normalizedConfig, providerOptions)
|
|
169
|
+
|
|
170
|
+
let res
|
|
171
|
+
try {
|
|
172
|
+
res = await fetch(url, {
|
|
173
|
+
method: 'POST',
|
|
174
|
+
headers: adapter.headers(apikey),
|
|
175
|
+
body: JSON.stringify(body),
|
|
176
|
+
})
|
|
177
|
+
} catch (networkErr) {
|
|
178
|
+
// Network-level failure (DNS, connection refused) — treat as provider error
|
|
179
|
+
throw new ProviderError(`Network error calling ${providerId}/${modelId}: ${networkErr.message}`, {
|
|
180
|
+
status: 0,
|
|
181
|
+
provider: providerId,
|
|
182
|
+
model: modelId,
|
|
183
|
+
})
|
|
184
|
+
}
|
|
185
|
+
|
|
186
|
+
if (!res.ok) {
|
|
187
|
+
await throwHttpError(res, providerId, modelId)
|
|
188
|
+
}
|
|
189
|
+
|
|
190
|
+
const data = await res.json()
|
|
191
|
+
const rawUsage = adapter.extractUsage(data)
|
|
192
|
+
|
|
193
|
+
/** @type {Usage} */
|
|
194
|
+
const usage = {
|
|
195
|
+
...rawUsage,
|
|
196
|
+
estimatedCost: calcCost(rawUsage, record),
|
|
197
|
+
}
|
|
198
|
+
|
|
199
|
+
return {
|
|
200
|
+
text: adapter.extractText(data),
|
|
201
|
+
model: modelId,
|
|
202
|
+
usage,
|
|
203
|
+
}
|
|
204
|
+
}
|
|
205
|
+
|
|
206
|
+
/**
|
|
207
|
+
* Creates a thin AI client.
|
|
208
|
+
*
|
|
209
|
+
* No internal retry — the caller controls retry strategy and can track
|
|
210
|
+
* attempt counts and errors externally. Fallbacks are provider-error-only:
|
|
211
|
+
* input errors (bad request, auth) are thrown immediately without trying
|
|
212
|
+
* fallback models.
|
|
213
|
+
*
|
|
214
|
+
* @param {AiOptions} [opts={}]
|
|
215
|
+
* @returns {{ ask: (params: AskParams) => Promise<AskResult>, listModels: () => import('./registry.js').ModelRecord[] }}
|
|
216
|
+
*/
|
|
217
|
+
export const createAi = (opts = {}) => {
|
|
218
|
+
const { gatewayUrl } = opts
|
|
219
|
+
|
|
220
|
+
/**
|
|
221
|
+
* Sends a text generation request, with optional fallback chain.
|
|
222
|
+
* Retrying is the caller's responsibility.
|
|
223
|
+
*
|
|
224
|
+
* @param {AskParams} params
|
|
225
|
+
* @returns {Promise<AskResult>}
|
|
226
|
+
* @throws {ProviderError} When all models in the chain fail with provider errors
|
|
227
|
+
* @throws {InputError} Immediately, without trying fallbacks
|
|
228
|
+
*/
|
|
229
|
+
const ask = async (params) => {
|
|
230
|
+
// Validate input structure and types
|
|
231
|
+
try {
|
|
232
|
+
validateAskOptions(params)
|
|
233
|
+
} catch (error) {
|
|
234
|
+
throw new InputError('Invalid options', {
|
|
235
|
+
status: 400,
|
|
236
|
+
provider: 'client',
|
|
237
|
+
model: params.model || 'unknown',
|
|
238
|
+
raw: error.message,
|
|
239
|
+
})
|
|
240
|
+
}
|
|
241
|
+
|
|
242
|
+
const chain = [params.model, ...(params.fallbacks ?? [])]
|
|
243
|
+
let lastProviderError
|
|
244
|
+
|
|
245
|
+
for (const modelId of chain) {
|
|
246
|
+
try {
|
|
247
|
+
return await callModel(modelId, params, gatewayUrl)
|
|
248
|
+
} catch (err) {
|
|
249
|
+
if (err instanceof InputError) {
|
|
250
|
+
// Input errors are not fallback-able — rethrow immediately
|
|
251
|
+
throw err
|
|
252
|
+
}
|
|
253
|
+
// ProviderError — log and try next model in chain
|
|
254
|
+
console.warn(
|
|
255
|
+
`[ai-client] ${err.message}. ${modelId === chain.at(-1) ? 'No more fallbacks.' : 'Trying next fallback...'}`
|
|
256
|
+
)
|
|
257
|
+
lastProviderError = err
|
|
258
|
+
}
|
|
259
|
+
}
|
|
260
|
+
|
|
261
|
+
throw lastProviderError
|
|
262
|
+
}
|
|
263
|
+
|
|
264
|
+
return {
|
|
265
|
+
ask, listModels,
|
|
266
|
+
}
|
|
267
|
+
}
|
|
268
|
+
|
|
269
|
+
export { setModels }
|
package/src/providers.js
ADDED
|
@@ -0,0 +1,249 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* @fileoverview Provider adapters — headers, URL, request body, response parsing.
|
|
3
|
+
*
|
|
4
|
+
* Each adapter also implements `extractUsage()` to pull token counts from the
|
|
5
|
+
* raw response. Field names differ per provider; we normalize to canonical names.
|
|
6
|
+
*
|
|
7
|
+
* `providerOptions` is an escape hatch for provider-specific features that
|
|
8
|
+
* cannot be generalized (e.g. Google's safetySettings, thinkingConfig).
|
|
9
|
+
* Its contents are merged directly into the request body.
|
|
10
|
+
*/
|
|
11
|
+
|
|
12
|
+
/**
|
|
13
|
+
* @typedef {'openai'|'anthropic'|'google'|'dashscope'|'deepseek'} ProviderId
|
|
14
|
+
*/
|
|
15
|
+
|
|
16
|
+
/**
|
|
17
|
+
* @typedef {Object} Message
|
|
18
|
+
* @property {'user'|'assistant'|'system'} role
|
|
19
|
+
* @property {string} content
|
|
20
|
+
*/
|
|
21
|
+
|
|
22
|
+
/**
|
|
23
|
+
* @typedef {Object} RawUsage
|
|
24
|
+
* @property {number} inputTokens
|
|
25
|
+
* @property {number} outputTokens
|
|
26
|
+
* @property {number} cacheTokens - 0 when not applicable
|
|
27
|
+
*/
|
|
28
|
+
|
|
29
|
+
/**
|
|
30
|
+
* @typedef {Object} ProviderAdapter
|
|
31
|
+
* @property {(apikey: string) => Record<string, string>} headers
|
|
32
|
+
* @property {(modelName: string, apikey: string) => string} url
|
|
33
|
+
* @property {(modelName: string, messages: Message[], config: Record<string, unknown>, providerOptions: Record<string, unknown>) => Record<string, unknown>} buildBody
|
|
34
|
+
* @property {(data: Record<string, unknown>) => string} extractText
|
|
35
|
+
* @property {(data: Record<string, unknown>) => RawUsage} extractUsage
|
|
36
|
+
*/
|
|
37
|
+
|
|
38
|
+
/** @type {ProviderAdapter} */
|
|
39
|
+
const openai = {
|
|
40
|
+
headers: (apikey) => ({
|
|
41
|
+
Authorization: `Bearer ${apikey}`,
|
|
42
|
+
'Content-Type': 'application/json',
|
|
43
|
+
}),
|
|
44
|
+
url: () => 'https://api.openai.com/v1/chat/completions',
|
|
45
|
+
buildBody: (modelName, messages, config, providerOptions) => ({
|
|
46
|
+
model: modelName,
|
|
47
|
+
messages,
|
|
48
|
+
n: 1,
|
|
49
|
+
...config,
|
|
50
|
+
...providerOptions,
|
|
51
|
+
}),
|
|
52
|
+
extractText: (data) => {
|
|
53
|
+
const choice = data.choices?.[0]
|
|
54
|
+
if (!choice) {
|
|
55
|
+
throw new Error(`OpenAI response missing choices. Full response: ${JSON.stringify(data)}`)
|
|
56
|
+
}
|
|
57
|
+
|
|
58
|
+
const message = choice.message
|
|
59
|
+
if (!message) {
|
|
60
|
+
throw new Error(`OpenAI response missing message. Full response: ${JSON.stringify(data)}`)
|
|
61
|
+
}
|
|
62
|
+
|
|
63
|
+
// Reasoning models (o1, o3, gpt-5) may return content differently
|
|
64
|
+
// Try standard content first, then reasoning_content
|
|
65
|
+
if (message.content) {
|
|
66
|
+
return message.content
|
|
67
|
+
}
|
|
68
|
+
|
|
69
|
+
// Some reasoning models use reasoning_content
|
|
70
|
+
if (message.reasoning_content) {
|
|
71
|
+
return message.reasoning_content
|
|
72
|
+
}
|
|
73
|
+
|
|
74
|
+
// Fallback: check for any content-like field
|
|
75
|
+
for (const key of Object.keys(message)) {
|
|
76
|
+
if (key.includes('content') && typeof message[key] === 'string') {
|
|
77
|
+
return message[key]
|
|
78
|
+
}
|
|
79
|
+
}
|
|
80
|
+
|
|
81
|
+
throw new Error(`OpenAI response missing content. Message: ${JSON.stringify(message)}`)
|
|
82
|
+
},
|
|
83
|
+
extractUsage: (data) => ({
|
|
84
|
+
inputTokens: data.usage?.prompt_tokens ?? 0,
|
|
85
|
+
outputTokens: data.usage?.completion_tokens ?? 0,
|
|
86
|
+
cacheTokens: data.usage?.prompt_tokens_details?.cached_tokens ?? 0,
|
|
87
|
+
}),
|
|
88
|
+
}
|
|
89
|
+
|
|
90
|
+
/** @type {ProviderAdapter} */
|
|
91
|
+
const anthropic = {
|
|
92
|
+
headers: (apikey) => ({
|
|
93
|
+
'x-api-key': apikey,
|
|
94
|
+
'anthropic-version': '2023-06-01',
|
|
95
|
+
'Content-Type': 'application/json',
|
|
96
|
+
}),
|
|
97
|
+
url: () => 'https://api.anthropic.com/v1/messages',
|
|
98
|
+
buildBody: (modelName, messages, config, providerOptions) => {
|
|
99
|
+
const system = messages.find((m) => m.role === 'system')?.content
|
|
100
|
+
const filtered = messages.filter((m) => m.role !== 'system')
|
|
101
|
+
return {
|
|
102
|
+
model: modelName,
|
|
103
|
+
messages: filtered,
|
|
104
|
+
...(system && { system }),
|
|
105
|
+
max_tokens: 4096, // required — overridden if maxTokens was in config
|
|
106
|
+
...config,
|
|
107
|
+
...providerOptions,
|
|
108
|
+
}
|
|
109
|
+
},
|
|
110
|
+
extractText: (data) => {
|
|
111
|
+
// Anthropic can return multiple content blocks (text, tool_use, etc.)
|
|
112
|
+
// Concatenate all text blocks
|
|
113
|
+
const texts = data.content?.filter((c) => c.type === 'text').map((c) => c.text)
|
|
114
|
+
if (!texts || texts.length === 0) {
|
|
115
|
+
throw new Error('Anthropic response missing content')
|
|
116
|
+
}
|
|
117
|
+
return texts.join('')
|
|
118
|
+
},
|
|
119
|
+
extractUsage: (data) => ({
|
|
120
|
+
inputTokens: data.usage?.input_tokens ?? 0,
|
|
121
|
+
outputTokens: data.usage?.output_tokens ?? 0,
|
|
122
|
+
cacheTokens: data.usage?.cache_read_input_tokens ?? 0,
|
|
123
|
+
}),
|
|
124
|
+
}
|
|
125
|
+
|
|
126
|
+
/** @type {ProviderAdapter} */
|
|
127
|
+
const google = {
|
|
128
|
+
headers: () => ({ 'Content-Type': 'application/json' }),
|
|
129
|
+
url: (modelName, apikey) =>
|
|
130
|
+
`https://generativelanguage.googleapis.com/v1/models/${modelName}:generateContent?key=${apikey}`,
|
|
131
|
+
buildBody: (modelName, messages, config, providerOptions) => {
|
|
132
|
+
const system = messages.find((m) => m.role === 'system')?.content
|
|
133
|
+
const contents = messages
|
|
134
|
+
.filter((m) => m.role !== 'system')
|
|
135
|
+
.map((m) => ({
|
|
136
|
+
role: m.role === 'assistant' ? 'model' : 'user',
|
|
137
|
+
parts: [{ text: m.content }],
|
|
138
|
+
}))
|
|
139
|
+
return {
|
|
140
|
+
contents,
|
|
141
|
+
...(system && { systemInstruction: { parts: [{ text: system }] } }),
|
|
142
|
+
...config, // includes nested generationConfig
|
|
143
|
+
...providerOptions, // safetySettings, thinkingConfig, etc.
|
|
144
|
+
}
|
|
145
|
+
},
|
|
146
|
+
extractText: (data) => {
|
|
147
|
+
// Google may return empty candidates if blocked by safety filters
|
|
148
|
+
const candidate = data.candidates?.[0]
|
|
149
|
+
if (!candidate) {
|
|
150
|
+
throw new Error('Google response has no candidates (may be blocked by safety filters)')
|
|
151
|
+
}
|
|
152
|
+
|
|
153
|
+
const finishReason = candidate.finishReason
|
|
154
|
+
if (finishReason === 'SAFETY') {
|
|
155
|
+
throw new Error('Google response blocked by safety filters')
|
|
156
|
+
}
|
|
157
|
+
|
|
158
|
+
const text = candidate.content?.parts?.[0]?.text
|
|
159
|
+
if (!text) {
|
|
160
|
+
throw new Error('Google response missing content')
|
|
161
|
+
}
|
|
162
|
+
return text
|
|
163
|
+
},
|
|
164
|
+
extractUsage: (data) => ({
|
|
165
|
+
inputTokens: data.usageMetadata?.promptTokenCount ?? 0,
|
|
166
|
+
outputTokens: data.usageMetadata?.candidatesTokenCount ?? 0,
|
|
167
|
+
cacheTokens: data.usageMetadata?.cachedContentTokenCount ?? 0,
|
|
168
|
+
}),
|
|
169
|
+
}
|
|
170
|
+
|
|
171
|
+
/** @type {ProviderAdapter} */
|
|
172
|
+
const dashscope = {
|
|
173
|
+
headers: (apikey) => ({
|
|
174
|
+
Authorization: `Bearer ${apikey}`,
|
|
175
|
+
'Content-Type': 'application/json',
|
|
176
|
+
}),
|
|
177
|
+
// International users should use dashscope-intl.aliyuncs.com
|
|
178
|
+
// China users can use dashscope.aliyuncs.com
|
|
179
|
+
url: () => 'https://dashscope-intl.aliyuncs.com/compatible-mode/v1/chat/completions',
|
|
180
|
+
buildBody: (modelName, messages, config, providerOptions) => ({
|
|
181
|
+
model: modelName,
|
|
182
|
+
messages,
|
|
183
|
+
...config,
|
|
184
|
+
...providerOptions,
|
|
185
|
+
}),
|
|
186
|
+
extractText: (data) => {
|
|
187
|
+
// OpenAI-compatible format returns choices directly
|
|
188
|
+
const content = data.choices?.[0]?.message?.content ?? data.output?.choices?.[0]?.message?.content
|
|
189
|
+
if (!content) {
|
|
190
|
+
throw new Error('DashScope response missing content')
|
|
191
|
+
}
|
|
192
|
+
return content
|
|
193
|
+
},
|
|
194
|
+
extractUsage: (data) => {
|
|
195
|
+
// OpenAI-compatible format
|
|
196
|
+
const usage = data.usage ?? data.output?.usage
|
|
197
|
+
return {
|
|
198
|
+
inputTokens: usage?.input_tokens ?? usage?.prompt_tokens ?? 0,
|
|
199
|
+
outputTokens: usage?.output_tokens ?? usage?.completion_tokens ?? 0,
|
|
200
|
+
cacheTokens: 0,
|
|
201
|
+
}
|
|
202
|
+
},
|
|
203
|
+
}
|
|
204
|
+
|
|
205
|
+
/** @type {ProviderAdapter} */
|
|
206
|
+
const deepseek = {
|
|
207
|
+
headers: (apikey) => ({
|
|
208
|
+
Authorization: `Bearer ${apikey}`,
|
|
209
|
+
'Content-Type': 'application/json',
|
|
210
|
+
}),
|
|
211
|
+
url: () => 'https://api.deepseek.com/chat/completions',
|
|
212
|
+
buildBody: (modelName, messages, config, providerOptions) => ({
|
|
213
|
+
model: modelName,
|
|
214
|
+
messages,
|
|
215
|
+
...config,
|
|
216
|
+
...providerOptions,
|
|
217
|
+
}),
|
|
218
|
+
extractText: (data) => {
|
|
219
|
+
const content = data.choices?.[0]?.message?.content
|
|
220
|
+
if (!content) {
|
|
221
|
+
throw new Error('DeepSeek response missing content')
|
|
222
|
+
}
|
|
223
|
+
return content
|
|
224
|
+
},
|
|
225
|
+
extractUsage: (data) => ({
|
|
226
|
+
inputTokens: data.usage?.prompt_tokens ?? 0,
|
|
227
|
+
outputTokens: data.usage?.completion_tokens ?? 0,
|
|
228
|
+
cacheTokens: 0,
|
|
229
|
+
}),
|
|
230
|
+
}
|
|
231
|
+
|
|
232
|
+
/** @type {Record<string, ProviderAdapter>} */
|
|
233
|
+
const ADAPTERS = {
|
|
234
|
+
openai, anthropic, google, dashscope, deepseek,
|
|
235
|
+
}
|
|
236
|
+
|
|
237
|
+
/**
|
|
238
|
+
* Returns the provider adapter for a given provider ID.
|
|
239
|
+
* @param {string} providerId
|
|
240
|
+
* @returns {ProviderAdapter}
|
|
241
|
+
* @throws {Error}
|
|
242
|
+
*/
|
|
243
|
+
export const getAdapter = (providerId) => {
|
|
244
|
+
const adapter = ADAPTERS[providerId]
|
|
245
|
+
if (!adapter) {
|
|
246
|
+
throw new Error(`No adapter found for provider: "${providerId}"`)
|
|
247
|
+
}
|
|
248
|
+
return adapter
|
|
249
|
+
}
|