llmconfig 0.0.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/LICENSE.md ADDED
@@ -0,0 +1,21 @@
1
+ MIT License
2
+
3
+ Copyright (c) 2025-PRESENT masol <https://github.com/masol>
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
package/README.md ADDED
@@ -0,0 +1,42 @@
1
+ # pkg-placeholder
2
+
3
+ [![npm version][npm-version-src]][npm-version-href]
4
+ [![npm downloads][npm-downloads-src]][npm-downloads-href]
5
+ [![bundle][bundle-src]][bundle-href]
6
+ [![JSDocs][jsdocs-src]][jsdocs-href]
7
+ [![License][license-src]][license-href]
8
+
9
+ _description_
10
+
11
+ ## Note for Developers
12
+
13
+ This starter recommands using [npm Trusted Publisher](https://github.com/e18e/ecosystem-issues/issues/201), where the release is done on CI to ensure the security of the packages.
14
+
15
+ To do so, you need to run `pnpm publish` manually for the very first time to create the package on npm, and then go to `https://www.npmjs.com/package/<your-package-name>/access` to set the connection to your GitHub repo.
16
+
17
+ Then for the future releases, you can run `pnpm run release` to do the release and the GitHub Actions will take care of the release process.
18
+
19
+ ## Sponsors
20
+
21
+ <p align="center">
22
+ <a href="https://cdn.jsdelivr.net/gh/antfu/static/sponsors.svg">
23
+ <img src='https://cdn.jsdelivr.net/gh/antfu/static/sponsors.svg'/>
24
+ </a>
25
+ </p>
26
+
27
+ ## License
28
+
29
+ [MIT](./LICENSE) License © [Anthony Fu](https://github.com/antfu)
30
+
31
+ <!-- Badges -->
32
+
33
+ [npm-version-src]: https://img.shields.io/npm/v/pkg-placeholder?style=flat&colorA=080f12&colorB=1fa669
34
+ [npm-version-href]: https://npmjs.com/package/pkg-placeholder
35
+ [npm-downloads-src]: https://img.shields.io/npm/dm/pkg-placeholder?style=flat&colorA=080f12&colorB=1fa669
36
+ [npm-downloads-href]: https://npmjs.com/package/pkg-placeholder
37
+ [bundle-src]: https://img.shields.io/bundlephobia/minzip/pkg-placeholder?style=flat&colorA=080f12&colorB=1fa669&label=minzip
38
+ [bundle-href]: https://bundlephobia.com/result?p=pkg-placeholder
39
+ [license-src]: https://img.shields.io/github/license/antfu/pkg-placeholder.svg?style=flat&colorA=080f12&colorB=1fa669
40
+ [license-href]: https://github.com/antfu/pkg-placeholder/blob/main/LICENSE
41
+ [jsdocs-src]: https://img.shields.io/badge/jsdocs-reference-080f12?style=flat&colorA=080f12&colorB=1fa669
42
+ [jsdocs-href]: https://www.jsdocs.io/package/pkg-placeholder
@@ -0,0 +1,147 @@
1
+ import { LanguageModelUsage, ModelMessage } from "ai";
2
+ import { Table } from "dexie";
3
+ import z$1, { z } from "zod";
4
+
5
+ //#region src/utils/const.d.ts
6
+ declare const TableSchema: {
7
+ readonly provider: "id,name,createdAt";
8
+ readonly llms: "id,pid,model,bility,createdAt";
9
+ };
10
+ declare const LLMBilities: readonly ["text", "image", "image_modify", "video", "video_modify", "speech", "speech_modify", "music", "music_modify"];
11
+ //#endregion
12
+ //#region src/types/llmconfig.d.ts
13
+ type LLMBility = typeof LLMBilities[number];
14
+ interface LLMProvider {
15
+ id: string;
16
+ name: string;
17
+ apiKey: string;
18
+ createdAt: number;
19
+ baseUrl?: string;
20
+ }
21
+ /**
22
+ * LLM配置接口
23
+ */
24
+ interface LLMConfig {
25
+ id: string;
26
+ pid: string;
27
+ model: string;
28
+ bility: LLMBility;
29
+ createdAt: number;
30
+ enabled?: boolean;
31
+ temperature?: number;
32
+ maxTokens?: number;
33
+ }
34
+ //#endregion
35
+ //#region src/types/llmwrapper.d.ts
36
+ /**
37
+ * 基础调用结果接口
38
+ */
39
+ interface CallResult {
40
+ success: boolean;
41
+ response?: string;
42
+ error?: Error;
43
+ usage?: LanguageModelUsage;
44
+ responseTime?: number;
45
+ }
46
+ /**
47
+ * JSON调用结果接口
48
+ */
49
+ interface JSONCallResult extends CallResult {
50
+ json?: unknown;
51
+ }
52
+ /**
53
+ * 生成对象结果接口
54
+ */
55
+ interface GenerateObjectResult<T> extends CallResult {
56
+ object?: T;
57
+ finishReason?: string;
58
+ }
59
+ /**
60
+ * 生成对象选项
61
+ */
62
+ interface GenerateObjectOptions {
63
+ mode?: 'auto' | 'json' | 'tool';
64
+ schemaName?: string;
65
+ schemaDescription?: string;
66
+ }
67
+ /**
68
+ * 基础 LLMWrapper 接口
69
+ */
70
+ interface ILLMWrapper {
71
+ readonly callCount: number;
72
+ /**
73
+ * 调用 LLM
74
+ * @param input 输入内容(字符串、数字或消息数组)
75
+ * @returns 调用结果
76
+ */
77
+ call: (input: string | ModelMessage[]) => Promise<CallResult>;
78
+ /**
79
+ * 流式调用 LLM
80
+ * @param input 输入内容
81
+ * @param onChunk 数据块回调函数
82
+ * @returns 调用结果
83
+ */
84
+ callStream: (input: string | ModelMessage[], onChunk: (chunk: string) => void) => Promise<CallResult>;
85
+ /**
86
+ * 获取配置信息
87
+ * @returns LLM配置
88
+ */
89
+ getConfig: () => LLMConfig;
90
+ }
91
+ /**
92
+ * 支持结构化输出的 LLMWrapper 接口
93
+ */
94
+ interface ITextLLMWrapper extends ILLMWrapper {
95
+ /**
96
+ * 调用 LLM 并返回 JSON(基于文本解析)
97
+ * @param input 输入内容
98
+ * @param maxRetries 最大重试次数,默认2次
99
+ * @returns JSON调用结果
100
+ */
101
+ callJSON: (input: string | ModelMessage[], maxRetries?: number) => Promise<JSONCallResult>;
102
+ /**
103
+ * 使用 schema 生成结构化对象(推荐使用)
104
+ * @param input 输入内容
105
+ * @param schema Zod schema
106
+ * @param options 生成选项
107
+ * @returns 生成对象结果
108
+ */
109
+ generateObject: <T extends z.ZodType>(input: string | ModelMessage[], schema: T, options?: GenerateObjectOptions) => Promise<GenerateObjectResult<z.infer<T>>>;
110
+ }
111
+ //#endregion
112
+ //#region src/manager.d.ts
113
+ type WrapperPredicate = (wrapper: ILLMWrapper) => boolean;
114
+ declare class Manager {
115
+ #private;
116
+ get configs(): LLMConfig[];
117
+ get providers(): LLMProvider[];
118
+ get wrappers(): ILLMWrapper[];
119
+ getWrapper(id: string): ILLMWrapper | undefined;
120
+ filterWrapper(pred: WrapperPredicate): ILLMWrapper[];
121
+ findWrapper(pred: WrapperPredicate): ILLMWrapper | undefined;
122
+ reLoad(models: LLMConfig[], provides: LLMProvider[]): void;
123
+ upsertWrapper(cfg: LLMConfig): boolean;
124
+ delWrapper(id: string): void;
125
+ private getMatchingWrapper;
126
+ call(bility: LLMBility, input: string | ModelMessage[]): Promise<CallResult>;
127
+ callJSON(input: string | ModelMessage[], maxRetries?: number): Promise<JSONCallResult>;
128
+ generateObject<T extends z$1.ZodType>(input: string | ModelMessage[], schema: T, options?: GenerateObjectOptions): Promise<GenerateObjectResult<z$1.infer<T>>>;
129
+ }
130
+ //#endregion
131
+ //#region src/library.d.ts
132
+ declare class Library extends Manager {
133
+ #private;
134
+ get modelTable(): Table<LLMConfig>;
135
+ get providerTable(): Table<LLMProvider>;
136
+ init(models: Table<LLMConfig>, providers: Table<LLMProvider>): Promise<void>;
137
+ upsertModel(cfg: LLMConfig): Promise<boolean>;
138
+ delModel(id: string): Promise<void>;
139
+ upsertProvider(provider: LLMProvider): Promise<boolean>;
140
+ delProvider(pid: string): Promise<void>;
141
+ }
142
+ declare const llmcfgInst: Library;
143
+ //#endregion
144
+ //#region src/wrapper/providers.d.ts
145
+ declare function listModels(provider: LLMProvider): Promise<string[]>;
146
+ //#endregion
147
+ export { type CallResult, type GenerateObjectOptions, type GenerateObjectResult, type ILLMWrapper, type ITextLLMWrapper, type JSONCallResult, LLMBilities, type LLMBility, type LLMConfig, type LLMProvider, TableSchema, llmcfgInst as default, llmcfgInst, listModels };
package/dist/index.mjs ADDED
@@ -0,0 +1,617 @@
1
+ import { Output, generateText, streamText } from "ai";
2
+ import { decode } from "html-entities";
3
+ import JSON5 from "json5";
4
+ import { isEmpty } from "lodash";
5
+ import { marked } from "marked";
6
+ import { createAnthropic } from "@ai-sdk/anthropic";
7
+ import { createGoogleGenerativeAI } from "@ai-sdk/google";
8
+ import { createOpenAI } from "@ai-sdk/openai";
9
+ import { createXai } from "@ai-sdk/xai";
10
+
11
+ //#region src/utils/guard.ts
12
+ /**
13
+ * 类型守卫:判断是否为 ITextLLMWrapper
14
+ * @param wrapper LLMWrapper 实例
15
+ * @returns 是否支持结构化输出
16
+ */
17
+ function isTextLLMWrapper(wrapper) {
18
+ return "callJSON" in wrapper && typeof wrapper.callJSON === "function" && "generateObject" in wrapper && typeof wrapper.generateObject === "function";
19
+ }
20
+
21
+ //#endregion
22
+ //#region src/wrapper/json-parser.ts
23
+ /**
24
+ * JSON 解析工具类
25
+ */
26
+ var JSONParser = class {
27
+ /**
28
+ * 解析 JSON 字符串
29
+ */
30
+ static parseJSON(jsonString) {
31
+ const parsingAttempts = [() => JSON5.parse(jsonString), () => JSON5.parse(decode(jsonString))];
32
+ for (const attempt of parsingAttempts) try {
33
+ const result = attempt();
34
+ if (!isEmpty(result)) return result;
35
+ } catch {
36
+ continue;
37
+ }
38
+ return null;
39
+ }
40
+ /**
41
+ * 从 Markdown 中提取 JSON 代码块
42
+ */
43
+ static extractJsonBlock(markdown) {
44
+ const jsonBlocks = marked.lexer(markdown).filter((t) => t.type === "code").filter((t) => !t.lang || t.lang.toLowerCase() === "json").map((t) => {
45
+ try {
46
+ return this.parseJSON(t.text.trim());
47
+ } catch {
48
+ return null;
49
+ }
50
+ }).filter(Boolean);
51
+ if (jsonBlocks.length > 0) return jsonBlocks[0];
52
+ return null;
53
+ }
54
+ /**
55
+ * 提取 JSON(尝试多种方式)
56
+ */
57
+ static async extractJSON(jsonString) {
58
+ const parsingAttempts = [() => this.parseJSON(jsonString), () => this.extractJsonBlock(jsonString)];
59
+ for (const attempt of parsingAttempts) try {
60
+ const result = attempt();
61
+ if (result && !isEmpty(result)) return result;
62
+ } catch {
63
+ continue;
64
+ }
65
+ return null;
66
+ }
67
+ /**
68
+ * 创建 JSON 修复提示词
69
+ */
70
+ static createJSONFixPrompt(invalidResponse) {
71
+ return `
72
+ 以下内容应该是JSON格式,但因为语法错误,解析失败了:
73
+
74
+ ${invalidResponse}
75
+
76
+ 请修复并返回正确的JSON格式。只返回修复后的JSON,不要添加任何解释或其他内容。
77
+ `.trim();
78
+ }
79
+ };
80
+
81
+ //#endregion
82
+ //#region src/wrapper/providers.ts
83
+ const PROVIDER_CONFIG = {
84
+ qianwen: {
85
+ baseURL: "https://dashscope.aliyuncs.com/compatible-mode/v1",
86
+ apikeyURL: "https://dashscope.console.aliyun.com/apiKey"
87
+ },
88
+ deepseek: {
89
+ baseURL: "https://api.deepseek.com",
90
+ apikeyURL: "https://platform.deepseek.com/api_keys"
91
+ },
92
+ qianfan: {
93
+ baseURL: "https://qianfan.baidubce.com/v2",
94
+ apikeyURL: "https://console.bce.baidu.com/iam/#/iam/apikey/list"
95
+ },
96
+ zhipu: {
97
+ baseURL: "https://open.bigmodel.cn/api/paas/v4",
98
+ apikeyURL: "https://bigmodel.cn/usercenter/proj-mgmt/apikeys"
99
+ },
100
+ hunyuan: {
101
+ baseURL: "https://api.hunyuan.cloud.tencent.com/v1",
102
+ apikeyURL: "https://console.cloud.tencent.com/hunyuan/api-key"
103
+ },
104
+ openrouter: {
105
+ baseURL: "https://openrouter.ai/api/v1",
106
+ apikeyURL: "https://openrouter.ai/settings/keys"
107
+ },
108
+ deepinfra: {
109
+ baseURL: "https://api.deepinfra.com/v1/openai",
110
+ apikeyURL: "https://deepinfra.com/dash/api_keys"
111
+ },
112
+ google: {
113
+ baseURL: "https://generativelanguage.googleapis.com/v1beta/openai",
114
+ apikeyURL: "https://aistudio.google.com/api-keys"
115
+ },
116
+ openai: {
117
+ baseURL: "https://api.openai.com/v1",
118
+ apikeyURL: "https://platform.openai.com/api-keys"
119
+ },
120
+ groq: {
121
+ baseURL: "https://api.groq.com/openai/v1",
122
+ apikeyURL: "https://console.groq.com/keys"
123
+ },
124
+ moonshot: {
125
+ baseURL: "https://api.moonshot.cn/v1",
126
+ apikeyURL: "https://platform.moonshot.cn/console/api-keys"
127
+ },
128
+ poe: {
129
+ baseURL: "https://api.poe.com/v1",
130
+ apikeyURL: "https://poe.com/api_key"
131
+ }
132
+ };
133
+ const ProviderNames = Object.keys(PROVIDER_CONFIG);
134
+ async function listModels(provider) {
135
+ const providerConfig = PROVIDER_CONFIG[provider.name];
136
+ if (!providerConfig) throw new Error(`不支持的提供商: ${provider.name}`);
137
+ const baseURL = providerConfig.baseURL.trim();
138
+ const response = await fetch(`${baseURL}/models`, { headers: {
139
+ "Authorization": `Bearer ${provider.apiKey}`,
140
+ "Content-Type": "application/json"
141
+ } });
142
+ if (!response.ok) throw new Error(`获取模型列表失败: ${response.statusText}`);
143
+ return (await response.json()).data.map((model) => model.id);
144
+ }
145
+
146
+ //#endregion
147
+ //#region src/wrapper/model-factory.ts
148
+ /**
149
+ * 模型工厂类 - 负责创建各种 LLM 模型实例
150
+ */
151
+ var ModelFactory = class {
152
+ /**
153
+ * 创建 AI SDK 模型实例
154
+ */
155
+ static createModel(config, provider) {
156
+ const constProvider = PROVIDER_CONFIG[provider.name];
157
+ if (!constProvider) throw new Error(`不支持的提供商: ${config.pid}`);
158
+ const modelName = config.model.trim();
159
+ const apiKey = provider.apiKey;
160
+ const baseURL = constProvider.baseURL?.trim();
161
+ const protocol = constProvider.protocal || "openai";
162
+ try {
163
+ return this.createModelByProtocol(protocol, modelName, apiKey, baseURL);
164
+ } catch (error) {
165
+ console.error(`创建模型失败 (${config.pid}):`, error);
166
+ throw new Error(`无法创建模型: ${config.pid} - ${modelName}`);
167
+ }
168
+ }
169
+ /**
170
+ * 根据协议类型创建模型
171
+ */
172
+ static createModelByProtocol(protocol, modelName, apiKey, baseURL) {
173
+ switch (protocol) {
174
+ case "openai": return this.createOpenAIModel(modelName, apiKey, baseURL, true);
175
+ case "anthropic": return this.createAnthropicModel(modelName, apiKey, baseURL);
176
+ case "xai": return this.createXaiModel(modelName, apiKey, baseURL);
177
+ case "google": return this.createGoogleModel(modelName, apiKey, baseURL);
178
+ case "huggingface": return this.createHuggingFaceModel(modelName, apiKey, baseURL);
179
+ case "perplexity": return this.createPerplexityModel(modelName, apiKey, baseURL);
180
+ case "ollama": return this.createOllamaModel(modelName, baseURL);
181
+ default: return this.createOpenAIModel(modelName, apiKey, baseURL, false);
182
+ }
183
+ }
184
+ /**
185
+ * 创建 OpenAI 模型
186
+ * @param modelName - 模型名称.
187
+ * @param strict - true 为严格模式,false 为兼容模式
188
+ */
189
+ static createOpenAIModel(modelName, apiKey, baseURL, strict = true) {
190
+ return createOpenAI({
191
+ apiKey,
192
+ baseURL,
193
+ fetch: strict ? void 0 : async (url, init) => {
194
+ try {
195
+ return await fetch(url, init);
196
+ } catch (error) {
197
+ console.warn("OpenAI compatible fetch error:", error);
198
+ throw error;
199
+ }
200
+ }
201
+ }).chat(modelName);
202
+ }
203
+ /**
204
+ * 创建 Anthropic 模型
205
+ */
206
+ static createAnthropicModel(modelName, apiKey, baseURL) {
207
+ return createAnthropic({
208
+ apiKey,
209
+ baseURL
210
+ }).chat(modelName);
211
+ }
212
+ /**
213
+ * 创建 xAI 模型
214
+ */
215
+ static createXaiModel(modelName, apiKey, baseURL) {
216
+ return createXai({
217
+ apiKey,
218
+ baseURL
219
+ })(modelName);
220
+ }
221
+ /**
222
+ * 创建 Google Generative AI 模型
223
+ */
224
+ static createGoogleModel(modelName, apiKey, baseURL) {
225
+ return createGoogleGenerativeAI({
226
+ apiKey,
227
+ baseURL
228
+ })(modelName);
229
+ }
230
+ /**
231
+ * 创建 HuggingFace 模型(使用 OpenAI 兼容接口)
232
+ */
233
+ static createHuggingFaceModel(modelName, apiKey, baseURL) {
234
+ return createOpenAI({
235
+ apiKey,
236
+ baseURL: baseURL || "https://api-inference.huggingface.co/v1"
237
+ })(modelName);
238
+ }
239
+ /**
240
+ * 创建 Perplexity 模型(使用 OpenAI 兼容接口)
241
+ */
242
+ static createPerplexityModel(modelName, apiKey, baseURL) {
243
+ return createOpenAI({
244
+ apiKey,
245
+ baseURL: baseURL || "https://api.perplexity.ai"
246
+ })(modelName);
247
+ }
248
+ /**
249
+ * 创建 Ollama 模型
250
+ */
251
+ static createOllamaModel(modelName, baseURL) {
252
+ return createOpenAI({
253
+ apiKey: "ollama",
254
+ baseURL: baseURL || "http://localhost:11434/v1"
255
+ })(modelName);
256
+ }
257
+ };
258
+
259
+ //#endregion
260
+ //#region src/wrapper/text.ts
261
+ var TextWrapper = class {
262
+ model;
263
+ #callCount = 0;
264
+ get callCount() {
265
+ return this.#callCount;
266
+ }
267
+ constructor(config, provider) {
268
+ this.config = config;
269
+ this.provider = provider;
270
+ this.model = ModelFactory.createModel(config, provider);
271
+ }
272
+ /**
273
+ * 将输入转换为消息格式
274
+ */
275
+ prepareMessages(input) {
276
+ if (Array.isArray(input)) return input;
277
+ return [{
278
+ role: "user",
279
+ content: typeof input === "number" ? input.toString() : input
280
+ }];
281
+ }
282
+ /**
283
+ * 调用 LLM
284
+ */
285
+ async call(input) {
286
+ const startTime = Date.now();
287
+ try {
288
+ const messages = this.prepareMessages(input);
289
+ const { text } = await generateText({
290
+ model: this.model,
291
+ messages,
292
+ temperature: this.config?.temperature
293
+ });
294
+ return {
295
+ success: true,
296
+ response: text,
297
+ responseTime: Date.now() - startTime
298
+ };
299
+ } catch (error) {
300
+ const responseTime = Date.now() - startTime;
301
+ const err = error;
302
+ console.error(`LLM调用失败 (${this.config.model}):`, error);
303
+ return {
304
+ success: false,
305
+ error: err,
306
+ responseTime
307
+ };
308
+ }
309
+ }
310
+ /**
311
+ * 调用 LLM 并返回 JSON(基于文本解析,保留向后兼容)
312
+ */
313
+ async callJSON(input, maxRetries = 2) {
314
+ const startTime = Date.now();
315
+ const callResult = await this.call(input);
316
+ if (!callResult.success) return {
317
+ ...callResult,
318
+ responseTime: Date.now() - startTime
319
+ };
320
+ let response = callResult.response;
321
+ let jsonResult = await JSONParser.extractJSON(response);
322
+ if (jsonResult !== null) return {
323
+ success: true,
324
+ response,
325
+ json: jsonResult,
326
+ responseTime: Date.now() - startTime
327
+ };
328
+ let attempts = 0;
329
+ while (attempts < maxRetries) {
330
+ attempts++;
331
+ try {
332
+ const fixPrompt = JSONParser.createJSONFixPrompt(response);
333
+ const fixResult = await this.call(fixPrompt);
334
+ if (!fixResult.success) continue;
335
+ response = fixResult.response;
336
+ jsonResult = await JSONParser.extractJSON(response);
337
+ if (jsonResult !== null) return {
338
+ success: true,
339
+ response,
340
+ json: jsonResult,
341
+ responseTime: Date.now() - startTime
342
+ };
343
+ } catch (error) {
344
+ console.warn(`JSON修复第${attempts}次尝试失败:`, error);
345
+ }
346
+ }
347
+ return {
348
+ success: false,
349
+ error: /* @__PURE__ */ new Error("无法解析为有效JSON格式"),
350
+ response,
351
+ responseTime: Date.now() - startTime
352
+ };
353
+ }
354
+ /**
355
+ * 使用 schema 生成结构化对象(推荐使用)
356
+ */
357
+ async generateObject(input, schema, options) {
358
+ const startTime = Date.now();
359
+ try {
360
+ const messages = this.prepareMessages(input);
361
+ const result = await generateText({
362
+ model: this.model,
363
+ output: Output.object({
364
+ schema,
365
+ name: options?.schemaName,
366
+ description: options?.schemaDescription
367
+ }),
368
+ messages,
369
+ temperature: this.config?.temperature
370
+ });
371
+ const responseTime = Date.now() - startTime;
372
+ return {
373
+ success: true,
374
+ object: result.output,
375
+ finishReason: result.finishReason,
376
+ usage: result.usage,
377
+ responseTime
378
+ };
379
+ } catch (error) {
380
+ const responseTime = Date.now() - startTime;
381
+ const err = error;
382
+ console.error(`生成对象失败 (${this.config.model}):`, error);
383
+ return {
384
+ success: false,
385
+ error: err,
386
+ responseTime
387
+ };
388
+ }
389
+ }
390
+ /**
391
+ * 流式调用 LLM
392
+ */
393
+ async callStream(input, onChunk) {
394
+ const startTime = Date.now();
395
+ try {
396
+ const messages = this.prepareMessages(input);
397
+ const { textStream } = await streamText({
398
+ model: this.model,
399
+ messages,
400
+ temperature: this.config?.temperature
401
+ });
402
+ let fullResponse = "";
403
+ for await (const chunk of textStream) {
404
+ fullResponse += chunk;
405
+ onChunk(chunk);
406
+ }
407
+ const responseTime = Date.now() - startTime;
408
+ return {
409
+ success: true,
410
+ response: fullResponse,
411
+ responseTime
412
+ };
413
+ } catch (error) {
414
+ const responseTime = Date.now() - startTime;
415
+ const err = error;
416
+ console.error(`LLM流式调用失败 (${this.config.model}):`, error);
417
+ return {
418
+ success: false,
419
+ error: err,
420
+ responseTime
421
+ };
422
+ }
423
+ }
424
+ /**
425
+ * 获取配置信息
426
+ */
427
+ getConfig() {
428
+ return { ...this.config };
429
+ }
430
+ };
431
+
432
+ //#endregion
433
+ //#region src/wrapper/index.ts
434
+ /**
435
+ * 包装器工厂
436
+ */
437
+ var WrapperFactory = class {
438
+ /**
439
+ * 根据 tag 创建相应的包装器
440
+ */
441
+ static createWrapper(config, provider) {
442
+ const bility = this.getModelTypeFromTag(config.bility);
443
+ switch (bility) {
444
+ case "text": return new TextWrapper(config, provider);
445
+ case "image": break;
446
+ }
447
+ throw new Error(`不支持的模型标签: ${bility}`);
448
+ }
449
+ /**
450
+ * 获取标签对应的模型类型
451
+ */
452
+ static getModelTypeFromTag(tag) {
453
+ if (tag === "text") return "text";
454
+ if (tag === "image" || tag === "image_modify") return "image";
455
+ if (tag === "video" || tag === "video_modify") return "video";
456
+ if (tag === "speech" || tag === "speech_modify" || tag === "music" || tag === "music_modify") return "audio";
457
+ throw new Error(`未知的能力标签: ${tag}`);
458
+ }
459
+ };
460
+
461
+ //#endregion
462
+ //#region src/manager.ts
463
+ const CallError = {
464
+ success: false,
465
+ error: /* @__PURE__ */ new Error("Not Found or All Error")
466
+ };
467
+ var Manager = class {
468
+ #providers = [];
469
+ #wrappers = /* @__PURE__ */ new Map();
470
+ get configs() {
471
+ return Array.from(this.#wrappers.values()).map((wrapper) => wrapper.getConfig());
472
+ }
473
+ get providers() {
474
+ return this.#providers;
475
+ }
476
+ get wrappers() {
477
+ return Array.from(this.#wrappers.values());
478
+ }
479
+ getWrapper(id) {
480
+ return this.#wrappers.get(id);
481
+ }
482
+ filterWrapper(pred) {
483
+ return Array.from(this.#wrappers.values()).filter(pred);
484
+ }
485
+ findWrapper(pred) {
486
+ return Array.from(this.#wrappers.values()).find(pred);
487
+ }
488
+ reLoad(models, provides) {
489
+ this.#providers = [...provides];
490
+ this.#wrappers.clear();
491
+ models.forEach((model) => {
492
+ this.upsertWrapper(model);
493
+ });
494
+ }
495
+ upsertWrapper(cfg) {
496
+ if (this.#wrappers.has(cfg.id)) this.#wrappers.delete(cfg.id);
497
+ if (!cfg.enabled) return true;
498
+ const provider = this.#providers.find((p) => p.id === cfg.pid);
499
+ if (!provider) return false;
500
+ const wrapper = WrapperFactory.createWrapper(cfg, provider);
501
+ this.#wrappers.set(cfg.id, wrapper);
502
+ return true;
503
+ }
504
+ delWrapper(id) {
505
+ if (this.#wrappers.has(id)) this.#wrappers.delete(id);
506
+ }
507
+ getMatchingWrapper(bility) {
508
+ return Array.from(this.#wrappers.values()).filter((value) => {
509
+ const cfg = value.getConfig();
510
+ return cfg.enabled && cfg.bility === bility;
511
+ }).sort((a, b) => a.callCount - b.callCount);
512
+ }
513
+ async call(bility, input) {
514
+ const matchingWrappers = this.getMatchingWrapper(bility);
515
+ for (const wrapper of matchingWrappers) {
516
+ const result = await wrapper.call(input);
517
+ if (result.success) return result;
518
+ }
519
+ return CallError;
520
+ }
521
+ async callJSON(input, maxRetries = 3) {
522
+ const matchingWrappers = this.getMatchingWrapper("text");
523
+ for (const wrapper of matchingWrappers) if (isTextLLMWrapper(wrapper)) {
524
+ const result = await wrapper.callJSON(input, maxRetries);
525
+ if (result.success) return result;
526
+ }
527
+ return CallError;
528
+ }
529
+ async generateObject(input, schema, options) {
530
+ const matchingWrappers = this.getMatchingWrapper("text");
531
+ for (const wrapper of matchingWrappers) if (isTextLLMWrapper(wrapper)) {
532
+ const result = await wrapper.generateObject(input, schema, options);
533
+ if (result.success) return result;
534
+ }
535
+ return CallError;
536
+ }
537
+ };
538
+
539
+ //#endregion
540
+ //#region src/library.ts
541
+ var Library = class extends Manager {
542
+ #modelTable;
543
+ #providerTable;
544
+ get modelTable() {
545
+ return this.#modelTable;
546
+ }
547
+ get providerTable() {
548
+ return this.#providerTable;
549
+ }
550
+ async init(models, providers) {
551
+ this.#modelTable = models;
552
+ this.#providerTable = providers;
553
+ const allModels = await models.toArray();
554
+ const allProviders = await providers.toArray();
555
+ super.reLoad(allModels, allProviders);
556
+ }
557
+ async upsertModel(cfg) {
558
+ if (!super.upsertWrapper(cfg)) return false;
559
+ return await this.#modelTable.put(cfg) === cfg.id;
560
+ }
561
+ async delModel(id) {
562
+ super.delWrapper(id);
563
+ await this.#modelTable.delete(id);
564
+ }
565
+ async upsertProvider(provider) {
566
+ const idx = super.providers.findIndex((p) => p.id === provider.id);
567
+ await this.#providerTable.put(provider);
568
+ if (idx >= 0) {
569
+ super.providers[idx] = { ...provider };
570
+ super.configs.filter((cfg) => cfg.pid === provider.id).forEach((model) => {
571
+ super.upsertWrapper(model);
572
+ });
573
+ } else super.providers.push({ ...provider });
574
+ return true;
575
+ }
576
+ async delProvider(pid) {
577
+ const idx = super.providers.findIndex((p) => p.id === pid);
578
+ if (idx !== -1) {
579
+ await this.#providerTable.delete(pid);
580
+ super.providers.splice(idx, 1);
581
+ const affectModels = super.configs.filter((cfg) => cfg.pid === pid);
582
+ if (affectModels.length > 0) {
583
+ const ids = affectModels.map((model) => {
584
+ super.delWrapper(model.id);
585
+ return model.id;
586
+ });
587
+ await this.#modelTable.bulkDelete(ids);
588
+ }
589
+ }
590
+ }
591
+ };
592
+ const llmcfgInst = new Library();
593
+
594
+ //#endregion
595
+ //#region src/utils/const.ts
596
+ const TableSchema = {
597
+ provider: "id,name,createdAt",
598
+ llms: "id,pid,model,bility,createdAt"
599
+ };
600
+ const LLMBilities = [
601
+ "text",
602
+ "image",
603
+ "image_modify",
604
+ "video",
605
+ "video_modify",
606
+ "speech",
607
+ "speech_modify",
608
+ "music",
609
+ "music_modify"
610
+ ];
611
+
612
+ //#endregion
613
+ //#region src/index.ts
614
+ var src_default = llmcfgInst;
615
+
616
+ //#endregion
617
+ export { LLMBilities, TableSchema, src_default as default, listModels, llmcfgInst };
package/package.json ADDED
@@ -0,0 +1,85 @@
1
+ {
2
+ "name": "llmconfig",
3
+ "type": "module",
4
+ "version": "0.0.1",
5
+ "packageManager": "pnpm@10.25.0",
6
+ "description": "基于@ai-sdk,允许封装多个llm provider.使得配置与调用分离。",
7
+ "author": "lizhutang",
8
+ "license": "MIT",
9
+ "funding": "https://github.com/sponsors/masol",
10
+ "homepage": "https://github.com/masol/llmconfig#readme",
11
+ "repository": {
12
+ "type": "git",
13
+ "url": "git+https://github.com/masol/llmconfig.git"
14
+ },
15
+ "bugs": "https://github.com/masol/llmconfig/issues",
16
+ "keywords": [
17
+ "llm",
18
+ "config"
19
+ ],
20
+ "sideEffects": false,
21
+ "exports": {
22
+ ".": "./dist/index.mjs",
23
+ "./package.json": "./package.json"
24
+ },
25
+ "main": "./dist/index.mjs",
26
+ "module": "./dist/index.mjs",
27
+ "types": "./dist/index.d.mts",
28
+ "files": [
29
+ "dist"
30
+ ],
31
+ "scripts": {
32
+ "build": "tsdown",
33
+ "dev": "tsdown --watch",
34
+ "lint": "eslint",
35
+ "prepublishOnly": "nr build",
36
+ "release": "bumpp",
37
+ "start": "tsx src/index.ts",
38
+ "test": "vitest",
39
+ "typecheck": "tsc",
40
+ "prepare": "simple-git-hooks"
41
+ },
42
+ "dependencies": {
43
+ "@ai-sdk/anthropic": "catalog:",
44
+ "@ai-sdk/google": "catalog:",
45
+ "@ai-sdk/openai": "catalog:",
46
+ "@ai-sdk/xai": "catalog:",
47
+ "ai": "catalog:",
48
+ "html-entities": "catalog:",
49
+ "json5": "catalog:",
50
+ "lodash": "catalog:",
51
+ "marked": "catalog:",
52
+ "zod": "catalog:"
53
+ },
54
+ "devDependencies": {
55
+ "@antfu/eslint-config": "catalog:cli",
56
+ "@antfu/ni": "catalog:cli",
57
+ "@antfu/utils": "catalog:inlined",
58
+ "@types/lodash": "catalog:",
59
+ "@types/node": "catalog:types",
60
+ "bumpp": "catalog:cli",
61
+ "dexie": "catalog:",
62
+ "eslint": "catalog:cli",
63
+ "lint-staged": "catalog:cli",
64
+ "publint": "catalog:cli",
65
+ "simple-git-hooks": "catalog:cli",
66
+ "tinyexec": "catalog:testing",
67
+ "tsdown": "catalog:",
68
+ "tsx": "catalog:cli",
69
+ "typescript": "catalog:cli",
70
+ "vite": "catalog:",
71
+ "vitest": "catalog:testing",
72
+ "vitest-package-exports": "catalog:testing",
73
+ "yaml": "catalog:testing"
74
+ },
75
+ "simple-git-hooks": {
76
+ "pre-commit": "pnpm i --frozen-lockfile --ignore-scripts --offline && npx lint-staged"
77
+ },
78
+ "lint-staged": {
79
+ "*": "eslint --fix"
80
+ },
81
+ "publishConfig": {
82
+ "registry": "https://registry.npmjs.org",
83
+ "access": "public"
84
+ }
85
+ }