@weisiren000/oiiai 0.1.3 → 0.2.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.mjs CHANGED
@@ -1,85 +1,762 @@
1
- // src/providers/openrouter.ts
2
- import { OpenRouter } from "@openrouter/sdk";
3
-
4
- // src/providers/__base__.ts
5
- var BaseProvider = class {
6
- /**
7
- * 简单对话:单轮问答(默认实现)
8
- * 对于思考模型,如果 content 为空则返回 reasoning
9
- */
10
- async ask(model, question, options) {
11
- const result = await this.chat({
12
- model,
13
- messages: [{ role: "user", content: question }],
14
- ...options
15
- });
16
- return result.content || result.reasoning || "";
17
- }
18
- /**
19
- * 带系统提示的对话(默认实现)
20
- * 对于思考模型,如果 content 为空则返回 reasoning
21
- */
22
- async askWithSystem(model, systemPrompt, userMessage, options) {
23
- const result = await this.chat({
24
- model,
25
- messages: [
26
- { role: "system", content: systemPrompt },
27
- { role: "user", content: userMessage }
28
- ],
29
- ...options
30
- });
31
- return result.content || result.reasoning || "";
1
+ var __defProp = Object.defineProperty;
2
+ var __getOwnPropDesc = Object.getOwnPropertyDescriptor;
3
+ var __getOwnPropNames = Object.getOwnPropertyNames;
4
+ var __hasOwnProp = Object.prototype.hasOwnProperty;
5
+ var __require = /* @__PURE__ */ ((x) => typeof require !== "undefined" ? require : typeof Proxy !== "undefined" ? new Proxy(x, {
6
+ get: (a, b) => (typeof require !== "undefined" ? require : a)[b]
7
+ }) : x)(function(x) {
8
+ if (typeof require !== "undefined") return require.apply(this, arguments);
9
+ throw Error('Dynamic require of "' + x + '" is not supported');
10
+ });
11
+ var __esm = (fn, res) => function __init() {
12
+ return fn && (res = (0, fn[__getOwnPropNames(fn)[0]])(fn = 0)), res;
13
+ };
14
+ var __export = (target, all) => {
15
+ for (var name in all)
16
+ __defProp(target, name, { get: all[name], enumerable: true });
17
+ };
18
+ var __copyProps = (to, from, except, desc) => {
19
+ if (from && typeof from === "object" || typeof from === "function") {
20
+ for (let key of __getOwnPropNames(from))
21
+ if (!__hasOwnProp.call(to, key) && key !== except)
22
+ __defProp(to, key, { get: () => from[key], enumerable: !(desc = __getOwnPropDesc(from, key)) || desc.enumerable });
32
23
  }
24
+ return to;
33
25
  };
26
+ var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: true }), mod);
34
27
 
35
28
  // src/providers/__types__.ts
36
- var EFFORT_TOKEN_MAP = {
37
- off: 0,
38
- low: 1024,
39
- medium: 4096,
40
- high: 16384
41
- };
29
+ var EFFORT_TOKEN_MAP;
30
+ var init_types = __esm({
31
+ "src/providers/__types__.ts"() {
32
+ "use strict";
33
+ EFFORT_TOKEN_MAP = {
34
+ off: 0,
35
+ low: 1024,
36
+ medium: 4096,
37
+ high: 16384
38
+ };
39
+ }
40
+ });
42
41
 
43
- // src/providers/openrouter.ts
44
- function extractTextContent(content) {
45
- if (typeof content === "string") {
46
- return content;
42
+ // src/utils/request-builder.ts
43
+ var RequestBuilder;
44
+ var init_request_builder = __esm({
45
+ "src/utils/request-builder.ts"() {
46
+ "use strict";
47
+ init_types();
48
+ RequestBuilder = class {
49
+ /**
50
+ * 构建聊天请求的基础参数
51
+ * 生成标准化的 OpenAI 兼容格式请求体
52
+ *
53
+ * @param options - 聊天选项
54
+ * @param stream - 是否为流式请求
55
+ * @returns 请求体对象
56
+ *
57
+ * @example
58
+ * ```ts
59
+ * const body = RequestBuilder.buildChatBody({
60
+ * model: 'gpt-4',
61
+ * messages: [{ role: 'user', content: 'Hello' }],
62
+ * temperature: 0.7
63
+ * });
64
+ * ```
65
+ */
66
+ static buildChatBody(options, stream = false) {
67
+ const { model, messages, temperature = 0.7, maxTokens } = options;
68
+ const body = {
69
+ model,
70
+ messages,
71
+ temperature,
72
+ stream
73
+ };
74
+ if (maxTokens !== void 0) {
75
+ body.max_tokens = maxTokens;
76
+ }
77
+ return body;
78
+ }
79
+ /**
80
+ * 构建 OpenRouter 格式的 reasoning 参数
81
+ *
82
+ * @param config - 推理配置
83
+ * @returns OpenRouter 格式的 reasoning 参数,或 undefined
84
+ *
85
+ * @example
86
+ * ```ts
87
+ * const reasoning = RequestBuilder.buildOpenRouterReasoning({ effort: 'high' });
88
+ * // => { effort: 'high', max_tokens: 16384 }
89
+ * ```
90
+ */
91
+ static buildOpenRouterReasoning(config) {
92
+ if (!config) return void 0;
93
+ if (config.effort === "off") return void 0;
94
+ const param = {};
95
+ if (config.effort) {
96
+ param.effort = config.effort;
97
+ }
98
+ if (config.budgetTokens !== void 0) {
99
+ param.max_tokens = config.budgetTokens;
100
+ } else if (config.effort && EFFORT_TOKEN_MAP[config.effort]) {
101
+ param.max_tokens = EFFORT_TOKEN_MAP[config.effort];
102
+ }
103
+ if (config.exclude !== void 0) {
104
+ param.exclude = config.exclude;
105
+ }
106
+ return Object.keys(param).length > 0 ? param : void 0;
107
+ }
108
+ /**
109
+ * 构建 Gemini 格式的 reasoning 参数
110
+ * Gemini 2.5+ 模型使用 reasoning_effort 控制思考
111
+ *
112
+ * @param config - 推理配置
113
+ * @returns Gemini 格式的参数对象
114
+ *
115
+ * @example
116
+ * ```ts
117
+ * const params = RequestBuilder.buildGeminiReasoning({ effort: 'high' });
118
+ * // => { reasoning_effort: 'high' }
119
+ * ```
120
+ */
121
+ static buildGeminiReasoning(config) {
122
+ if (!config || !config.effort || config.effort === "off") {
123
+ return {};
124
+ }
125
+ return {
126
+ reasoning_effort: config.effort
127
+ };
128
+ }
129
+ /**
130
+ * 构建 Groq 格式的 reasoning 参数
131
+ * Groq 使用 reasoning_format 参数控制推理输出
132
+ *
133
+ * @param config - 推理配置
134
+ * @returns Groq 格式的参数对象
135
+ *
136
+ * @example
137
+ * ```ts
138
+ * const params = RequestBuilder.buildGroqReasoning({ effort: 'high' });
139
+ * // => { reasoning_format: 'parsed' }
140
+ * ```
141
+ */
142
+ static buildGroqReasoning(config) {
143
+ if (!config) {
144
+ return {};
145
+ }
146
+ if (config.effort === "off") {
147
+ return { include_reasoning: false };
148
+ }
149
+ if (config.effort) {
150
+ return { reasoning_format: "parsed" };
151
+ }
152
+ return {};
153
+ }
154
+ /**
155
+ * 构建 DeepSeek 格式的 reasoning 参数
156
+ * DeepSeek 使用 thinking 参数启用思考模式
157
+ *
158
+ * @param config - 推理配置
159
+ * @returns DeepSeek 格式的参数对象
160
+ */
161
+ static buildDeepSeekReasoning(config) {
162
+ if (!config || !config.effort || config.effort === "off") {
163
+ return {};
164
+ }
165
+ return {
166
+ thinking: { type: "enabled" }
167
+ };
168
+ }
169
+ /**
170
+ * 构建 Nova 格式的 reasoning 参数
171
+ * Nova 使用 reasoningConfig 控制 extended thinking
172
+ *
173
+ * @param config - 推理配置
174
+ * @returns Nova 格式的参数对象
175
+ */
176
+ static buildNovaReasoning(config) {
177
+ if (!config || !config.effort || config.effort === "off") {
178
+ return {};
179
+ }
180
+ return {
181
+ reasoningConfig: {
182
+ type: "enabled",
183
+ maxReasoningEffort: config.effort
184
+ }
185
+ };
186
+ }
187
+ /**
188
+ * 构建 HTTP 请求头
189
+ *
190
+ * @param apiKey - API 密钥
191
+ * @param additionalHeaders - 额外的请求头
192
+ * @returns 请求头对象
193
+ *
194
+ * @example
195
+ * ```ts
196
+ * const headers = RequestBuilder.buildHeaders('sk-xxx', {
197
+ * 'X-Custom-Header': 'value'
198
+ * });
199
+ * ```
200
+ */
201
+ static buildHeaders(apiKey, additionalHeaders) {
202
+ return {
203
+ "Content-Type": "application/json",
204
+ Authorization: `Bearer ${apiKey}`,
205
+ ...additionalHeaders
206
+ };
207
+ }
208
+ };
47
209
  }
48
- if (Array.isArray(content)) {
49
- return content.filter(
50
- (item) => typeof item === "object" && item !== null && item.type === "text" && typeof item.text === "string"
51
- ).map((item) => item.text).join("");
210
+ });
211
+
212
+ // src/client/types.ts
213
+ var ProviderError, APIError, NetworkError, TimeoutError;
214
+ var init_types2 = __esm({
215
+ "src/client/types.ts"() {
216
+ "use strict";
217
+ ProviderError = class extends Error {
218
+ constructor(message, code, provider, cause) {
219
+ super(message);
220
+ this.code = code;
221
+ this.provider = provider;
222
+ this.cause = cause;
223
+ this.name = "ProviderError";
224
+ }
225
+ };
226
+ APIError = class extends ProviderError {
227
+ constructor(message, provider, statusCode, responseBody) {
228
+ super(message, "API_ERROR", provider);
229
+ this.statusCode = statusCode;
230
+ this.responseBody = responseBody;
231
+ this.name = "APIError";
232
+ }
233
+ };
234
+ NetworkError = class extends ProviderError {
235
+ constructor(message, provider, cause) {
236
+ super(message, "NETWORK_ERROR", provider, cause);
237
+ this.name = "NetworkError";
238
+ }
239
+ };
240
+ TimeoutError = class extends ProviderError {
241
+ constructor(message, provider, timeoutMs) {
242
+ super(message, "TIMEOUT_ERROR", provider);
243
+ this.timeoutMs = timeoutMs;
244
+ this.name = "TimeoutError";
245
+ }
246
+ };
52
247
  }
53
- return "";
54
- }
55
- function buildReasoningParam(config) {
56
- if (!config) return void 0;
57
- if (config.effort === "off") return void 0;
58
- const param = {};
59
- if (config.effort) {
60
- param.effort = config.effort;
61
- }
62
- if (config.budgetTokens !== void 0) {
63
- param.max_tokens = config.budgetTokens;
64
- } else if (config.effort && EFFORT_TOKEN_MAP[config.effort]) {
65
- param.max_tokens = EFFORT_TOKEN_MAP[config.effort];
66
- }
67
- if (config.exclude !== void 0) {
68
- param.exclude = config.exclude;
69
- }
70
- return Object.keys(param).length > 0 ? param : void 0;
71
- }
72
- var OpenRouterProvider = class extends BaseProvider {
73
- name = "openrouter";
74
- client;
75
- constructor(apiKey) {
76
- super();
77
- this.client = new OpenRouter({ apiKey });
248
+ });
249
+
250
+ // src/client/http-provider-client.ts
251
+ var http_provider_client_exports = {};
252
+ __export(http_provider_client_exports, {
253
+ HttpProviderClient: () => HttpProviderClient
254
+ });
255
+ var DEFAULT_TIMEOUT, HttpProviderClient;
256
+ var init_http_provider_client = __esm({
257
+ "src/client/http-provider-client.ts"() {
258
+ "use strict";
259
+ init_request_builder();
260
+ init_types2();
261
+ DEFAULT_TIMEOUT = 3e4;
262
+ HttpProviderClient = class {
263
+ config;
264
+ /**
265
+ * 创建 HTTP Provider 客户端实例
266
+ *
267
+ * @param config - 客户端配置
268
+ *
269
+ * @example
270
+ * ```ts
271
+ * const client = new HttpProviderClient({
272
+ * apiKey: 'sk-xxx',
273
+ * baseUrl: 'https://api.openai.com/v1',
274
+ * timeout: 60000
275
+ * });
276
+ * ```
277
+ */
278
+ constructor(config) {
279
+ this.config = {
280
+ ...config,
281
+ timeout: config.timeout ?? DEFAULT_TIMEOUT
282
+ };
283
+ }
284
+ /**
285
+ * 获取 Provider 名称(从 baseUrl 推断)
286
+ */
287
+ getProviderName() {
288
+ try {
289
+ const url = new URL(this.config.baseUrl);
290
+ return url.hostname;
291
+ } catch {
292
+ return "unknown";
293
+ }
294
+ }
295
+ /**
296
+ * 构建完整的请求 URL
297
+ */
298
+ buildUrl(endpoint) {
299
+ const baseUrl = this.config.baseUrl.replace(/\/$/, "");
300
+ const path = endpoint.startsWith("/") ? endpoint : `/${endpoint}`;
301
+ return `${baseUrl}${path}`;
302
+ }
303
+ /**
304
+ * 构建请求头
305
+ */
306
+ buildHeaders() {
307
+ return RequestBuilder.buildHeaders(this.config.apiKey, this.config.headers);
308
+ }
309
+ /**
310
+ * 创建带超时的 AbortController
311
+ */
312
+ createAbortController() {
313
+ const controller = new AbortController();
314
+ const timeoutId = setTimeout(() => {
315
+ controller.abort();
316
+ }, this.config.timeout ?? DEFAULT_TIMEOUT);
317
+ return { controller, timeoutId };
318
+ }
319
+ /**
320
+ * 处理 HTTP 错误响应
321
+ */
322
+ async handleErrorResponse(response) {
323
+ const provider = this.getProviderName();
324
+ let responseBody;
325
+ try {
326
+ responseBody = await response.text();
327
+ } catch {
328
+ }
329
+ let message;
330
+ switch (response.status) {
331
+ case 400:
332
+ message = "\u8BF7\u6C42\u53C2\u6570\u9519\u8BEF";
333
+ break;
334
+ case 401:
335
+ message = "API \u5BC6\u94A5\u65E0\u6548\u6216\u5DF2\u8FC7\u671F";
336
+ break;
337
+ case 403:
338
+ message = "\u6CA1\u6709\u6743\u9650\u8BBF\u95EE\u6B64\u8D44\u6E90";
339
+ break;
340
+ case 404:
341
+ message = "\u8BF7\u6C42\u7684\u8D44\u6E90\u4E0D\u5B58\u5728";
342
+ break;
343
+ case 429:
344
+ message = "\u8BF7\u6C42\u8FC7\u4E8E\u9891\u7E41\uFF0C\u8BF7\u7A0D\u540E\u91CD\u8BD5";
345
+ break;
346
+ case 500:
347
+ message = "\u670D\u52A1\u5668\u5185\u90E8\u9519\u8BEF";
348
+ break;
349
+ case 502:
350
+ message = "\u7F51\u5173\u9519\u8BEF";
351
+ break;
352
+ case 503:
353
+ message = "\u670D\u52A1\u6682\u65F6\u4E0D\u53EF\u7528";
354
+ break;
355
+ default:
356
+ message = `HTTP \u9519\u8BEF: ${response.status} ${response.statusText}`;
357
+ }
358
+ throw new APIError(message, provider, response.status, responseBody);
359
+ }
360
+ /**
361
+ * 发送聊天请求(非流式)
362
+ *
363
+ * @param endpoint - API 端点路径
364
+ * @param body - 请求体
365
+ * @returns 响应数据
366
+ */
367
+ async chat(endpoint, body) {
368
+ const url = this.buildUrl(endpoint);
369
+ const headers = this.buildHeaders();
370
+ const { controller, timeoutId } = this.createAbortController();
371
+ const provider = this.getProviderName();
372
+ try {
373
+ const response = await fetch(url, {
374
+ method: "POST",
375
+ headers,
376
+ body: JSON.stringify(body),
377
+ signal: controller.signal
378
+ });
379
+ clearTimeout(timeoutId);
380
+ if (!response.ok) {
381
+ await this.handleErrorResponse(response);
382
+ }
383
+ const data = await response.json();
384
+ return data;
385
+ } catch (error) {
386
+ clearTimeout(timeoutId);
387
+ if (error instanceof APIError) {
388
+ throw error;
389
+ }
390
+ if (error instanceof Error && error.name === "AbortError") {
391
+ throw new TimeoutError(
392
+ `\u8BF7\u6C42\u8D85\u65F6\uFF08${this.config.timeout}ms\uFF09`,
393
+ provider,
394
+ this.config.timeout ?? DEFAULT_TIMEOUT
395
+ );
396
+ }
397
+ if (error instanceof TypeError) {
398
+ throw new NetworkError("\u7F51\u7EDC\u8FDE\u63A5\u5931\u8D25\uFF0C\u8BF7\u68C0\u67E5\u7F51\u7EDC\u8BBE\u7F6E", provider, error);
399
+ }
400
+ throw new NetworkError(
401
+ error instanceof Error ? error.message : "\u672A\u77E5\u9519\u8BEF",
402
+ provider,
403
+ error instanceof Error ? error : void 0
404
+ );
405
+ }
406
+ }
407
+ /**
408
+ * 发送流式聊天请求
409
+ *
410
+ * @param endpoint - API 端点路径
411
+ * @param body - 请求体
412
+ * @returns fetch Response 对象
413
+ */
414
+ async chatStream(endpoint, body) {
415
+ const url = this.buildUrl(endpoint);
416
+ const headers = this.buildHeaders();
417
+ const { controller, timeoutId } = this.createAbortController();
418
+ const provider = this.getProviderName();
419
+ try {
420
+ const response = await fetch(url, {
421
+ method: "POST",
422
+ headers,
423
+ body: JSON.stringify(body),
424
+ signal: controller.signal
425
+ });
426
+ clearTimeout(timeoutId);
427
+ if (!response.ok) {
428
+ await this.handleErrorResponse(response);
429
+ }
430
+ return response;
431
+ } catch (error) {
432
+ clearTimeout(timeoutId);
433
+ if (error instanceof APIError) {
434
+ throw error;
435
+ }
436
+ if (error instanceof Error && error.name === "AbortError") {
437
+ throw new TimeoutError(
438
+ `\u8BF7\u6C42\u8D85\u65F6\uFF08${this.config.timeout}ms\uFF09`,
439
+ provider,
440
+ this.config.timeout ?? DEFAULT_TIMEOUT
441
+ );
442
+ }
443
+ if (error instanceof TypeError) {
444
+ throw new NetworkError("\u7F51\u7EDC\u8FDE\u63A5\u5931\u8D25\uFF0C\u8BF7\u68C0\u67E5\u7F51\u7EDC\u8BBE\u7F6E", provider, error);
445
+ }
446
+ throw new NetworkError(
447
+ error instanceof Error ? error.message : "\u672A\u77E5\u9519\u8BEF",
448
+ provider,
449
+ error instanceof Error ? error : void 0
450
+ );
451
+ }
452
+ }
453
+ };
454
+ }
455
+ });
456
+
457
+ // src/utils/stream-processor.ts
458
+ var stream_processor_exports = {};
459
+ __export(stream_processor_exports, {
460
+ StreamProcessor: () => StreamProcessor
461
+ });
462
+ var StreamProcessor;
463
+ var init_stream_processor = __esm({
464
+ "src/utils/stream-processor.ts"() {
465
+ "use strict";
466
+ StreamProcessor = class _StreamProcessor {
467
+ /**
468
+ * 从响应内容中提取文本
469
+ * 支持字符串和数组格式的 content
470
+ *
471
+ * @param content - 响应内容,可以是字符串、数组或其他类型
472
+ * @returns 提取的文本内容
473
+ *
474
+ * @example
475
+ * ```ts
476
+ * // 字符串格式
477
+ * StreamProcessor.extractTextContent('Hello') // => 'Hello'
478
+ *
479
+ * // 数组格式
480
+ * StreamProcessor.extractTextContent([
481
+ * { type: 'text', text: 'Hello' },
482
+ * { type: 'text', text: ' World' }
483
+ * ]) // => 'Hello World'
484
+ * ```
485
+ */
486
+ static extractTextContent(content) {
487
+ if (typeof content === "string") {
488
+ return content;
489
+ }
490
+ if (Array.isArray(content)) {
491
+ return content.filter(
492
+ (item) => typeof item === "object" && item !== null && item.type === "text" && typeof item.text === "string"
493
+ ).map((item) => item.text).join("");
494
+ }
495
+ return "";
496
+ }
497
+ /**
498
+ * 解析 SSE 数据行
499
+ *
500
+ * @param line - SSE 数据行(如 "data: {...}")
501
+ * @returns 解析后的 JSON 对象,或 null(如果是 [DONE] 或无效数据)
502
+ *
503
+ * @example
504
+ * ```ts
505
+ * StreamProcessor.parseSSELine('data: {"content": "Hello"}')
506
+ * // => { content: 'Hello' }
507
+ *
508
+ * StreamProcessor.parseSSELine('data: [DONE]')
509
+ * // => null
510
+ * ```
511
+ */
512
+ static parseSSELine(line) {
513
+ const trimmed = line.trim();
514
+ if (!trimmed || trimmed === "data: [DONE]") {
515
+ return null;
516
+ }
517
+ if (!trimmed.startsWith("data: ")) {
518
+ return null;
519
+ }
520
+ try {
521
+ const jsonStr = trimmed.slice(6);
522
+ return JSON.parse(jsonStr);
523
+ } catch {
524
+ return null;
525
+ }
526
+ }
527
+ /**
528
+ * 创建流式响应处理器
529
+ * 处理 SSE 格式的流式响应,提取并生成 StreamChunk
530
+ *
531
+ * @param response - fetch Response 对象
532
+ * @param deltaExtractor - 从 delta 中提取 StreamChunk 的函数
533
+ * @returns AsyncGenerator<StreamChunk>
534
+ *
535
+ * @example
536
+ * ```ts
537
+ * const response = await fetch(url, { ... });
538
+ * const extractor = (delta) => {
539
+ * if (delta.content) {
540
+ * return { type: 'content', text: delta.content };
541
+ * }
542
+ * return null;
543
+ * };
544
+ *
545
+ * for await (const chunk of StreamProcessor.processStream(response, extractor)) {
546
+ * console.log(chunk.type, chunk.text);
547
+ * }
548
+ * ```
549
+ */
550
+ static async *processStream(response, deltaExtractor) {
551
+ const reader = response.body?.getReader();
552
+ if (!reader) {
553
+ throw new Error("No response body");
554
+ }
555
+ const decoder = new TextDecoder();
556
+ let buffer = "";
557
+ try {
558
+ while (true) {
559
+ const { done, value } = await reader.read();
560
+ if (done) break;
561
+ buffer += decoder.decode(value, { stream: true });
562
+ const lines = buffer.split("\n");
563
+ buffer = lines.pop() ?? "";
564
+ for (const line of lines) {
565
+ const data = _StreamProcessor.parseSSELine(line);
566
+ if (!data) continue;
567
+ const choices = data.choices;
568
+ const delta = choices?.[0]?.delta;
569
+ if (!delta) continue;
570
+ const chunk = deltaExtractor(delta);
571
+ if (chunk) {
572
+ yield chunk;
573
+ }
574
+ }
575
+ }
576
+ } finally {
577
+ reader.releaseLock();
578
+ }
579
+ }
580
+ /**
581
+ * 创建默认的 delta 提取器
582
+ * 支持 reasoning_content、reasoning、thoughts 和 content 字段
583
+ *
584
+ * @returns DeltaExtractor 函数
585
+ */
586
+ static createDefaultExtractor() {
587
+ return (delta) => {
588
+ const reasoningContent = delta.reasoning_content ?? delta.reasoning ?? delta.thoughts;
589
+ if (reasoningContent) {
590
+ return {
591
+ type: "reasoning",
592
+ text: _StreamProcessor.extractTextContent(reasoningContent)
593
+ };
594
+ }
595
+ if (delta.content) {
596
+ return {
597
+ type: "content",
598
+ text: _StreamProcessor.extractTextContent(delta.content)
599
+ };
600
+ }
601
+ return null;
602
+ };
603
+ }
604
+ };
605
+ }
606
+ });
607
+
608
+ // src/fluent/chat-session.ts
609
+ var chat_session_exports = {};
610
+ __export(chat_session_exports, {
611
+ ChatSessionImpl: () => ChatSessionImpl
612
+ });
613
+ var ChatSessionImpl;
614
+ var init_chat_session = __esm({
615
+ "src/fluent/chat-session.ts"() {
616
+ "use strict";
617
+ ChatSessionImpl = class {
618
+ /** 预设实例引用 */
619
+ preset;
620
+ /** 模型 ID */
621
+ model;
622
+ /** 会话配置 */
623
+ options;
624
+ /** 对话历史 */
625
+ history = [];
626
+ /**
627
+ * 创建对话会话
628
+ * @param preset - 预设实例
629
+ * @param model - 模型 ID
630
+ * @param options - 会话配置
631
+ */
632
+ constructor(preset, model, options) {
633
+ this.preset = preset;
634
+ this.model = model;
635
+ this.options = options ?? {};
636
+ if (this.options.system) {
637
+ this.history.push({
638
+ role: "system",
639
+ content: this.options.system
640
+ });
641
+ }
642
+ }
643
+ /**
644
+ * 发送消息并获取响应(非流式)
645
+ * @param message - 用户消息
646
+ * @returns 助手响应内容
647
+ */
648
+ async send(message) {
649
+ this.history.push({
650
+ role: "user",
651
+ content: message
652
+ });
653
+ try {
654
+ const response = await this.preset.ask(this.model, message, {
655
+ system: this.buildSystemContext(),
656
+ temperature: this.options.temperature,
657
+ maxTokens: this.options.maxTokens,
658
+ reasoning: this.options.reasoning
659
+ });
660
+ this.history.push({
661
+ role: "assistant",
662
+ content: response
663
+ });
664
+ return response;
665
+ } catch (error) {
666
+ this.history.pop();
667
+ throw error;
668
+ }
669
+ }
670
+ /**
671
+ * 发送消息并获取流式响应
672
+ * @param message - 用户消息
673
+ * @returns 流式数据块生成器
674
+ */
675
+ async *sendStream(message) {
676
+ this.history.push({
677
+ role: "user",
678
+ content: message
679
+ });
680
+ let responseContent = "";
681
+ try {
682
+ const stream = this.preset.stream(this.model, message, {
683
+ system: this.buildSystemContext(),
684
+ temperature: this.options.temperature,
685
+ maxTokens: this.options.maxTokens,
686
+ reasoning: this.options.reasoning
687
+ });
688
+ for await (const chunk of stream) {
689
+ if (chunk.type === "content") {
690
+ responseContent += chunk.text;
691
+ }
692
+ yield chunk;
693
+ }
694
+ this.history.push({
695
+ role: "assistant",
696
+ content: responseContent
697
+ });
698
+ } catch (error) {
699
+ this.history.pop();
700
+ throw error;
701
+ }
702
+ }
703
+ /**
704
+ * 获取对话历史
705
+ * @returns 按发送顺序排列的消息列表
706
+ */
707
+ getHistory() {
708
+ return [...this.history];
709
+ }
710
+ /**
711
+ * 清空对话历史
712
+ */
713
+ clearHistory() {
714
+ this.history = [];
715
+ if (this.options.system) {
716
+ this.history.push({
717
+ role: "system",
718
+ content: this.options.system
719
+ });
720
+ }
721
+ }
722
+ /**
723
+ * 构建系统上下文
724
+ * 将对话历史转换为系统提示词的一部分
725
+ * @returns 系统上下文字符串
726
+ */
727
+ buildSystemContext() {
728
+ const conversationHistory = this.history.filter(
729
+ (msg, index) => msg.role !== "system" && index < this.history.length - 1
730
+ );
731
+ if (conversationHistory.length === 0) {
732
+ return this.options.system;
733
+ }
734
+ const historyContext = conversationHistory.map((msg) => `${msg.role === "user" ? "\u7528\u6237" : "\u52A9\u624B"}: ${msg.content}`).join("\n");
735
+ const baseSystem = this.options.system ?? "";
736
+ return `${baseSystem}
737
+
738
+ \u4EE5\u4E0B\u662F\u4E4B\u524D\u7684\u5BF9\u8BDD\u5386\u53F2\uFF1A
739
+ ${historyContext}`.trim();
740
+ }
741
+ };
78
742
  }
743
+ });
744
+
745
+ // src/adapters/types.ts
746
+ var BaseAdapter = class {
79
747
  /**
80
- * 发送聊天请求(非流式)
748
+ * 创建 Provider 客户端
749
+ * 默认实现:需要在运行时导入 HttpProviderClient 以避免循环依赖
81
750
  */
82
- async chat(options) {
751
+ createClient(config) {
752
+ const { HttpProviderClient: HttpProviderClient2 } = (init_http_provider_client(), __toCommonJS(http_provider_client_exports));
753
+ return new HttpProviderClient2(config);
754
+ }
755
+ /**
756
+ * 构建聊天请求体
757
+ * 默认实现:构建 OpenAI 兼容格式的请求体
758
+ */
759
+ buildChatRequest(options, stream = false) {
83
760
  const {
84
761
  model,
85
762
  messages,
@@ -87,40 +764,93 @@ var OpenRouterProvider = class extends BaseProvider {
87
764
  maxTokens,
88
765
  reasoning
89
766
  } = options;
90
- const reasoningParam = buildReasoningParam(reasoning);
91
- const requestParams = {
767
+ const body = {
92
768
  model,
93
769
  messages,
94
770
  temperature,
95
- maxTokens,
96
- stream: false
771
+ stream
97
772
  };
98
- if (reasoningParam) {
99
- requestParams.reasoning = reasoningParam;
773
+ if (maxTokens !== void 0) {
774
+ body.max_tokens = maxTokens;
100
775
  }
101
- const result = await this.client.chat.send(requestParams);
102
- const choice = result.choices[0];
776
+ const reasoningParams = this.buildReasoningParams(reasoning);
777
+ Object.assign(body, reasoningParams);
778
+ return body;
779
+ }
780
+ /**
781
+ * 构建 reasoning 参数
782
+ * 默认实现:返回空对象,子类应覆盖此方法
783
+ */
784
+ buildReasoningParams(_config) {
785
+ return {};
786
+ }
787
+ /**
788
+ * 解析聊天响应
789
+ * 默认实现:解析 OpenAI 兼容格式的响应
790
+ */
791
+ parseChatResponse(response, model) {
792
+ const choices = response.choices;
793
+ const choice = choices?.[0];
103
794
  if (!choice) {
104
795
  throw new Error("No response from model");
105
796
  }
106
797
  const msg = choice.message;
107
- const reasoningContent = msg.reasoning_content ?? msg.reasoning ?? null;
798
+ const reasoningContent = msg?.reasoning_content ?? msg?.reasoning ?? null;
799
+ const { StreamProcessor: StreamProcessor2 } = (init_stream_processor(), __toCommonJS(stream_processor_exports));
800
+ const usage = response.usage;
108
801
  return {
109
- content: extractTextContent(msg.content),
110
- reasoning: reasoningContent ? extractTextContent(reasoningContent) : null,
111
- model: result.model,
802
+ content: StreamProcessor2.extractTextContent(msg?.content),
803
+ reasoning: reasoningContent ? StreamProcessor2.extractTextContent(reasoningContent) : null,
804
+ model: response.model ?? model,
112
805
  usage: {
113
- promptTokens: result.usage?.promptTokens ?? 0,
114
- completionTokens: result.usage?.completionTokens ?? 0,
115
- totalTokens: result.usage?.totalTokens ?? 0
806
+ promptTokens: usage?.prompt_tokens ?? usage?.promptTokens ?? 0,
807
+ completionTokens: usage?.completion_tokens ?? usage?.completionTokens ?? 0,
808
+ totalTokens: usage?.total_tokens ?? usage?.totalTokens ?? 0
116
809
  },
117
- finishReason: choice.finishReason
810
+ finishReason: choice.finish_reason ?? choice.finishReason ?? null
118
811
  };
119
812
  }
120
813
  /**
121
- * 发送流式聊天请求
814
+ * 从 delta 中提取 StreamChunk
815
+ * 默认实现:支持 reasoning_content、reasoning、thoughts 和 content 字段
122
816
  */
123
- async *chatStream(options) {
817
+ extractStreamChunk(delta) {
818
+ const { StreamProcessor: StreamProcessor2 } = (init_stream_processor(), __toCommonJS(stream_processor_exports));
819
+ const reasoningContent = delta.reasoning_content ?? delta.reasoning ?? delta.thoughts;
820
+ if (reasoningContent) {
821
+ return {
822
+ type: "reasoning",
823
+ text: StreamProcessor2.extractTextContent(reasoningContent)
824
+ };
825
+ }
826
+ if (delta.content) {
827
+ return {
828
+ type: "content",
829
+ text: StreamProcessor2.extractTextContent(delta.content)
830
+ };
831
+ }
832
+ return null;
833
+ }
834
+ /**
835
+ * 获取 API 端点 URL
836
+ * 默认实现:返回 /chat/completions 端点
837
+ */
838
+ getEndpointUrl(baseUrl) {
839
+ return `${baseUrl}/chat/completions`;
840
+ }
841
+ };
842
+
843
+ // src/adapters/openrouter-adapter.ts
844
+ init_request_builder();
845
+ var DEFAULT_BASE_URL = "https://openrouter.ai/api/v1";
846
+ var OpenRouterAdapter = class extends BaseAdapter {
847
+ name = "openrouter";
848
+ defaultBaseUrl = DEFAULT_BASE_URL;
849
+ /**
850
+ * 构建聊天请求体
851
+ * OpenRouter 使用 OpenAI 兼容格式,但有特殊的 reasoning 参数
852
+ */
853
+ buildChatRequest(options, stream = false) {
124
854
  const {
125
855
  model,
126
856
  messages,
@@ -128,93 +858,54 @@ var OpenRouterProvider = class extends BaseProvider {
128
858
  maxTokens,
129
859
  reasoning
130
860
  } = options;
131
- const reasoningParam = buildReasoningParam(reasoning);
132
- const requestParams = {
861
+ const body = {
133
862
  model,
134
863
  messages,
135
864
  temperature,
136
- maxTokens,
137
- stream: true
865
+ stream
138
866
  };
139
- if (reasoningParam) {
140
- requestParams.reasoning = reasoningParam;
867
+ if (maxTokens !== void 0) {
868
+ body.max_tokens = maxTokens;
141
869
  }
142
- const stream = await this.client.chat.send(
143
- requestParams
144
- );
145
- for await (const chunk of stream) {
146
- const delta = chunk.choices?.[0]?.delta;
147
- if (!delta) continue;
148
- const reasoningContent = delta.reasoning_content ?? delta.reasoning;
149
- if (reasoningContent) {
150
- yield { type: "reasoning", text: extractTextContent(reasoningContent) };
151
- }
152
- if (delta.content) {
153
- yield { type: "content", text: extractTextContent(delta.content) };
154
- }
870
+ const reasoningParam = this.buildReasoningParams(reasoning);
871
+ if (reasoningParam && Object.keys(reasoningParam).length > 0) {
872
+ body.reasoning = reasoningParam;
155
873
  }
874
+ return body;
156
875
  }
157
876
  /**
158
- * 获取可用模型列表
877
+ * 构建 OpenRouter 格式的 reasoning 参数
878
+ *
879
+ * OpenRouter reasoning 参数格式:
880
+ * {
881
+ * effort: 'low' | 'medium' | 'high',
882
+ * max_tokens: number,
883
+ * exclude: boolean
884
+ * }
159
885
  */
160
- async listModels() {
161
- const result = await this.client.models.list();
162
- return (result.data ?? []).map((m) => ({
163
- id: m.id,
164
- canonicalSlug: m.canonical_slug ?? m.id,
165
- name: m.name,
166
- description: m.description ?? "",
167
- created: m.created ?? 0,
168
- pricing: {
169
- prompt: m.pricing?.prompt ?? "0",
170
- completion: m.pricing?.completion ?? "0",
171
- request: m.pricing?.request ?? "0",
172
- image: m.pricing?.image ?? "0"
173
- },
174
- contextLength: m.context_length ?? 0,
175
- architecture: {
176
- modality: m.architecture?.modality ?? "",
177
- inputModalities: m.architecture?.input_modalities ?? [],
178
- outputModalities: m.architecture?.output_modalities ?? [],
179
- tokenizer: m.architecture?.tokenizer ?? "",
180
- instructType: m.architecture?.instruct_type ?? ""
181
- },
182
- supportedParameters: m.supported_parameters ?? []
183
- }));
886
+ buildReasoningParams(config) {
887
+ return RequestBuilder.buildOpenRouterReasoning(config) ?? {};
888
+ }
889
+ /**
890
+ * 获取 API 端点 URL
891
+ * OpenRouter 使用标准的 /chat/completions 端点
892
+ */
893
+ getEndpointUrl(baseUrl) {
894
+ return `${baseUrl}/chat/completions`;
184
895
  }
185
896
  };
186
897
 
187
- // src/providers/gemini.ts
188
- var BASE_URL = "https://generativelanguage.googleapis.com/v1beta/openai";
189
- function extractTextContent2(content) {
190
- if (typeof content === "string") {
191
- return content;
192
- }
193
- if (Array.isArray(content)) {
194
- return content.filter(
195
- (item) => typeof item === "object" && item !== null && item.type === "text" && typeof item.text === "string"
196
- ).map((item) => item.text).join("");
197
- }
198
- return "";
199
- }
200
- var GeminiProvider = class extends BaseProvider {
898
+ // src/adapters/gemini-adapter.ts
899
+ init_request_builder();
900
+ var DEFAULT_BASE_URL2 = "https://generativelanguage.googleapis.com/v1beta/openai";
901
+ var GeminiAdapter = class extends BaseAdapter {
201
902
  name = "gemini";
202
- apiKey;
203
- baseUrl;
204
- constructor(config) {
205
- super();
206
- if (typeof config === "string") {
207
- this.apiKey = config;
208
- this.baseUrl = BASE_URL;
209
- } else {
210
- this.apiKey = config.apiKey;
211
- this.baseUrl = config.baseUrl ?? BASE_URL;
212
- }
213
- }
903
+ defaultBaseUrl = DEFAULT_BASE_URL2;
214
904
  /**
215
- * 发送聊天请求(非流式)
905
+ * 构建聊天请求体
906
+ * Gemini 使用 OpenAI 兼容格式,reasoning_effort 直接放在请求体中
216
907
  */
217
- async chat(options) {
908
+ buildChatRequest(options, stream = false) {
218
909
  const {
219
910
  model,
220
911
  messages,
@@ -226,49 +917,132 @@ var GeminiProvider = class extends BaseProvider {
226
917
  model,
227
918
  messages,
228
919
  temperature,
229
- stream: false
920
+ stream
230
921
  };
231
- if (maxTokens) {
922
+ if (maxTokens !== void 0) {
232
923
  body.max_tokens = maxTokens;
233
924
  }
234
- if (reasoning?.effort && reasoning.effort !== "off") {
235
- body.reasoning_effort = reasoning.effort;
236
- }
237
- const response = await fetch(`${this.baseUrl}/chat/completions`, {
238
- method: "POST",
239
- headers: {
240
- "Content-Type": "application/json",
241
- Authorization: `Bearer ${this.apiKey}`
242
- },
243
- body: JSON.stringify(body)
244
- });
245
- if (!response.ok) {
246
- const error = await response.text();
247
- throw new Error(`Gemini API error: ${response.status} ${error}`);
925
+ const reasoningParams = this.buildReasoningParams(reasoning);
926
+ Object.assign(body, reasoningParams);
927
+ return body;
928
+ }
929
+ /**
930
+ * 构建 Gemini 格式的 reasoning 参数
931
+ *
932
+ * Gemini 2.5+ 模型使用 reasoning_effort 参数:
933
+ * - 'low': 快速思考
934
+ * - 'medium': 平衡模式
935
+ * - 'high': 深度思考
936
+ */
937
+ buildReasoningParams(config) {
938
+ return RequestBuilder.buildGeminiReasoning(config);
939
+ }
940
+ /**
941
+ * 从 delta 中提取 StreamChunk
942
+ * Gemini 可能使用 reasoning_content 或 thoughts 字段
943
+ */
944
+ extractStreamChunk(delta) {
945
+ const { StreamProcessor: StreamProcessor2 } = (init_stream_processor(), __toCommonJS(stream_processor_exports));
946
+ const reasoningContent = delta.reasoning_content ?? delta.thoughts;
947
+ if (reasoningContent) {
948
+ return {
949
+ type: "reasoning",
950
+ text: StreamProcessor2.extractTextContent(reasoningContent)
951
+ };
248
952
  }
249
- const result = await response.json();
250
- const choice = result.choices?.[0];
251
- if (!choice) {
252
- throw new Error("No response from model");
953
+ if (delta.content) {
954
+ return {
955
+ type: "content",
956
+ text: StreamProcessor2.extractTextContent(delta.content)
957
+ };
253
958
  }
254
- const msg = choice.message;
255
- const reasoningContent = msg?.reasoning_content ?? null;
256
- return {
257
- content: extractTextContent2(msg?.content),
258
- reasoning: reasoningContent ? extractTextContent2(reasoningContent) : null,
259
- model: result.model ?? model,
260
- usage: {
261
- promptTokens: result.usage?.prompt_tokens ?? 0,
262
- completionTokens: result.usage?.completion_tokens ?? 0,
263
- totalTokens: result.usage?.total_tokens ?? 0
264
- },
265
- finishReason: choice.finish_reason ?? null
959
+ return null;
960
+ }
961
+ /**
962
+ * 获取 API 端点 URL
963
+ */
964
+ getEndpointUrl(baseUrl) {
965
+ return `${baseUrl}/chat/completions`;
966
+ }
967
+ };
968
+
969
+ // src/adapters/groq-adapter.ts
970
+ init_request_builder();
971
+ var DEFAULT_BASE_URL3 = "https://api.groq.com/openai/v1";
972
+ var GroqAdapter = class extends BaseAdapter {
973
+ name = "groq";
974
+ defaultBaseUrl = DEFAULT_BASE_URL3;
975
+ /**
976
+ * 构建聊天请求体
977
+ * Groq 使用 OpenAI 兼容格式,但有一些特殊参数
978
+ */
979
+ buildChatRequest(options, stream = false) {
980
+ const { model, messages, temperature = 1, maxTokens, reasoning } = options;
981
+ const body = {
982
+ model,
983
+ messages,
984
+ temperature,
985
+ stream,
986
+ top_p: 1
266
987
  };
988
+ if (maxTokens !== void 0) {
989
+ body.max_completion_tokens = maxTokens;
990
+ }
991
+ const reasoningParams = this.buildReasoningParams(reasoning);
992
+ Object.assign(body, reasoningParams);
993
+ return body;
267
994
  }
268
995
  /**
269
- * 发送流式聊天请求
996
+ * 构建 Groq 格式的 reasoning 参数
997
+ *
998
+ * Groq 使用 reasoning_format 参数:
999
+ * - 'raw': 原始格式
1000
+ * - 'parsed': 解析格式(推荐)
1001
+ *
1002
+ * 注意:不能同时使用 include_reasoning 和 reasoning_format
270
1003
  */
271
- async *chatStream(options) {
1004
+ buildReasoningParams(config) {
1005
+ return RequestBuilder.buildGroqReasoning(config);
1006
+ }
1007
+ /**
1008
+ * 从 delta 中提取 StreamChunk
1009
+ * Groq 使用 reasoning_content 或 reasoning 字段
1010
+ */
1011
+ extractStreamChunk(delta) {
1012
+ const { StreamProcessor: StreamProcessor2 } = (init_stream_processor(), __toCommonJS(stream_processor_exports));
1013
+ const reasoningContent = delta.reasoning_content ?? delta.reasoning;
1014
+ if (reasoningContent) {
1015
+ return {
1016
+ type: "reasoning",
1017
+ text: StreamProcessor2.extractTextContent(reasoningContent)
1018
+ };
1019
+ }
1020
+ if (delta.content) {
1021
+ return {
1022
+ type: "content",
1023
+ text: StreamProcessor2.extractTextContent(delta.content)
1024
+ };
1025
+ }
1026
+ return null;
1027
+ }
1028
+ /**
1029
+ * 获取 API 端点 URL
1030
+ */
1031
+ getEndpointUrl(baseUrl) {
1032
+ return `${baseUrl}/chat/completions`;
1033
+ }
1034
+ };
1035
+
1036
+ // src/adapters/huggingface-adapter.ts
1037
+ var DEFAULT_BASE_URL4 = "https://router.huggingface.co/v1";
1038
+ var HuggingFaceAdapter = class extends BaseAdapter {
1039
+ name = "huggingface";
1040
+ defaultBaseUrl = DEFAULT_BASE_URL4;
1041
+ /**
1042
+ * 构建聊天请求体
1043
+ * HuggingFace 使用标准 OpenAI 兼容格式
1044
+ */
1045
+ buildChatRequest(options, stream = false) {
272
1046
  const {
273
1047
  model,
274
1048
  messages,
@@ -280,396 +1054,218 @@ var GeminiProvider = class extends BaseProvider {
280
1054
  model,
281
1055
  messages,
282
1056
  temperature,
283
- stream: true
1057
+ stream
284
1058
  };
285
- if (maxTokens) {
1059
+ if (maxTokens !== void 0) {
286
1060
  body.max_tokens = maxTokens;
287
1061
  }
288
- if (reasoning?.effort && reasoning.effort !== "off") {
289
- body.reasoning_effort = reasoning.effort;
1062
+ const reasoningParams = this.buildReasoningParams(reasoning);
1063
+ Object.assign(body, reasoningParams);
1064
+ return body;
1065
+ }
1066
+ /**
1067
+ * 构建 HuggingFace 格式的 reasoning 参数
1068
+ * HuggingFace 使用 reasoning_effort 参数(取决于具体模型是否支持)
1069
+ */
1070
+ buildReasoningParams(config) {
1071
+ if (!config || !config.effort || config.effort === "off") {
1072
+ return {};
290
1073
  }
291
- const response = await fetch(`${this.baseUrl}/chat/completions`, {
292
- method: "POST",
293
- headers: {
294
- "Content-Type": "application/json",
295
- Authorization: `Bearer ${this.apiKey}`
296
- },
297
- body: JSON.stringify(body)
298
- });
299
- if (!response.ok) {
300
- const error = await response.text();
301
- throw new Error(`Gemini API error: ${response.status} ${error}`);
302
- }
303
- const reader = response.body?.getReader();
304
- if (!reader) {
305
- throw new Error("No response body");
1074
+ return {
1075
+ reasoning_effort: config.effort
1076
+ };
1077
+ }
1078
+ /**
1079
+ * 从 delta 中提取 StreamChunk
1080
+ */
1081
+ extractStreamChunk(delta) {
1082
+ const { StreamProcessor: StreamProcessor2 } = (init_stream_processor(), __toCommonJS(stream_processor_exports));
1083
+ if (delta.reasoning_content) {
1084
+ return {
1085
+ type: "reasoning",
1086
+ text: StreamProcessor2.extractTextContent(delta.reasoning_content)
1087
+ };
306
1088
  }
307
- const decoder = new TextDecoder();
308
- let buffer = "";
309
- try {
310
- while (true) {
311
- const { done, value } = await reader.read();
312
- if (done) break;
313
- buffer += decoder.decode(value, { stream: true });
314
- const lines = buffer.split("\n");
315
- buffer = lines.pop() ?? "";
316
- for (const line of lines) {
317
- const trimmed = line.trim();
318
- if (!trimmed || trimmed === "data: [DONE]") continue;
319
- if (!trimmed.startsWith("data: ")) continue;
320
- try {
321
- const data = JSON.parse(trimmed.slice(6));
322
- const delta = data.choices?.[0]?.delta;
323
- if (!delta) continue;
324
- const thought = delta.reasoning_content ?? delta.thoughts;
325
- if (thought) {
326
- yield {
327
- type: "reasoning",
328
- text: extractTextContent2(thought)
329
- };
330
- }
331
- if (delta.content) {
332
- yield {
333
- type: "content",
334
- text: extractTextContent2(delta.content)
335
- };
336
- }
337
- } catch {
338
- }
339
- }
340
- }
341
- } finally {
342
- reader.releaseLock();
1089
+ if (delta.content) {
1090
+ return {
1091
+ type: "content",
1092
+ text: StreamProcessor2.extractTextContent(delta.content)
1093
+ };
343
1094
  }
1095
+ return null;
1096
+ }
1097
+ /**
1098
+ * 获取 API 端点 URL
1099
+ */
1100
+ getEndpointUrl(baseUrl) {
1101
+ return `${baseUrl}/chat/completions`;
344
1102
  }
345
1103
  };
346
1104
 
347
- // src/providers/groq.ts
348
- var BASE_URL2 = "https://api.groq.com/openai/v1";
349
- function extractTextContent3(content) {
350
- if (typeof content === "string") {
351
- return content;
352
- }
353
- if (Array.isArray(content)) {
354
- return content.filter(
355
- (item) => typeof item === "object" && item !== null && item.type === "text" && typeof item.text === "string"
356
- ).map((item) => item.text).join("");
357
- }
358
- return "";
359
- }
360
- var GroqProvider = class extends BaseProvider {
361
- name = "groq";
362
- apiKey;
363
- baseUrl;
364
- constructor(config) {
365
- super();
366
- if (typeof config === "string") {
367
- this.apiKey = config;
368
- this.baseUrl = BASE_URL2;
369
- } else {
370
- this.apiKey = config.apiKey;
371
- this.baseUrl = config.baseUrl ?? BASE_URL2;
372
- }
373
- }
1105
+ // src/adapters/modelscope-adapter.ts
1106
+ var DEFAULT_BASE_URL5 = "https://api-inference.modelscope.cn/v1";
1107
+ var ModelScopeAdapter = class extends BaseAdapter {
1108
+ name = "modelscope";
1109
+ defaultBaseUrl = DEFAULT_BASE_URL5;
374
1110
  /**
375
- * 发送聊天请求(非流式)
1111
+ * 构建聊天请求体
1112
+ * ModelScope 使用 OpenAI 兼容格式
376
1113
  */
377
- async chat(options) {
378
- const { model, messages, temperature = 1, maxTokens, reasoning } = options;
1114
+ buildChatRequest(options, stream = false) {
1115
+ const {
1116
+ model,
1117
+ messages,
1118
+ temperature = 0.7,
1119
+ maxTokens,
1120
+ reasoning
1121
+ } = options;
379
1122
  const body = {
380
1123
  model,
381
1124
  messages,
382
1125
  temperature,
383
- stream: false,
384
- top_p: 1
1126
+ stream
385
1127
  };
386
- if (maxTokens) {
387
- body.max_completion_tokens = maxTokens;
388
- }
389
- if (reasoning?.effort && reasoning.effort !== "off") {
390
- body.reasoning_format = "parsed";
391
- } else if (reasoning?.effort === "off") {
392
- body.include_reasoning = false;
393
- }
394
- const response = await fetch(`${this.baseUrl}/chat/completions`, {
395
- method: "POST",
396
- headers: {
397
- "Content-Type": "application/json",
398
- Authorization: `Bearer ${this.apiKey}`
399
- },
400
- body: JSON.stringify(body)
401
- });
402
- if (!response.ok) {
403
- const error = await response.text();
404
- throw new Error(`Groq API error: ${response.status} ${error}`);
405
- }
406
- const result = await response.json();
407
- const choice = result.choices?.[0];
408
- if (!choice) {
409
- throw new Error("No response from model");
1128
+ if (maxTokens !== void 0) {
1129
+ body.max_tokens = maxTokens;
410
1130
  }
411
- const msg = choice.message;
412
- const reasoningContent = msg?.reasoning_content ?? msg?.reasoning ?? null;
413
- return {
414
- content: extractTextContent3(msg?.content),
415
- reasoning: reasoningContent ? extractTextContent3(reasoningContent) : null,
416
- model: result.model ?? model,
417
- usage: {
418
- promptTokens: result.usage?.prompt_tokens ?? 0,
419
- completionTokens: result.usage?.completion_tokens ?? 0,
420
- totalTokens: result.usage?.total_tokens ?? 0
421
- },
422
- finishReason: choice.finish_reason ?? null
423
- };
1131
+ const reasoningParams = this.buildReasoningParams(reasoning);
1132
+ Object.assign(body, reasoningParams);
1133
+ return body;
424
1134
  }
425
1135
  /**
426
- * 发送流式聊天请求
1136
+ * 构建 ModelScope 格式的 reasoning 参数
1137
+ * ModelScope 使用 enable_thinking 参数控制思考模式
427
1138
  */
428
- async *chatStream(options) {
429
- const { model, messages, temperature = 1, maxTokens, reasoning } = options;
430
- const body = {
431
- model,
432
- messages,
433
- temperature,
434
- stream: true,
435
- top_p: 1
436
- };
437
- if (maxTokens) {
438
- body.max_completion_tokens = maxTokens;
1139
+ buildReasoningParams(config) {
1140
+ if (!config || !config.effort) {
1141
+ return {};
439
1142
  }
440
- if (reasoning?.effort && reasoning.effort !== "off") {
441
- body.reasoning_format = "parsed";
442
- } else if (reasoning?.effort === "off") {
443
- body.include_reasoning = false;
1143
+ if (config.effort === "off") {
1144
+ return { enable_thinking: false };
444
1145
  }
445
- const response = await fetch(`${this.baseUrl}/chat/completions`, {
446
- method: "POST",
447
- headers: {
448
- "Content-Type": "application/json",
449
- Authorization: `Bearer ${this.apiKey}`
450
- },
451
- body: JSON.stringify(body)
452
- });
453
- if (!response.ok) {
454
- const error = await response.text();
455
- throw new Error(`Groq API error: ${response.status} ${error}`);
456
- }
457
- const reader = response.body?.getReader();
458
- if (!reader) {
459
- throw new Error("No response body");
1146
+ return { enable_thinking: true };
1147
+ }
1148
+ /**
1149
+ * 从 delta 中提取 StreamChunk
1150
+ */
1151
+ extractStreamChunk(delta) {
1152
+ const { StreamProcessor: StreamProcessor2 } = (init_stream_processor(), __toCommonJS(stream_processor_exports));
1153
+ if (delta.reasoning_content) {
1154
+ return {
1155
+ type: "reasoning",
1156
+ text: StreamProcessor2.extractTextContent(delta.reasoning_content)
1157
+ };
460
1158
  }
461
- const decoder = new TextDecoder();
462
- let buffer = "";
463
- try {
464
- while (true) {
465
- const { done, value } = await reader.read();
466
- if (done) break;
467
- buffer += decoder.decode(value, { stream: true });
468
- const lines = buffer.split("\n");
469
- buffer = lines.pop() ?? "";
470
- for (const line of lines) {
471
- const trimmed = line.trim();
472
- if (!trimmed || trimmed === "data: [DONE]") continue;
473
- if (!trimmed.startsWith("data: ")) continue;
474
- try {
475
- const data = JSON.parse(trimmed.slice(6));
476
- const delta = data.choices?.[0]?.delta;
477
- if (!delta) continue;
478
- const reasoningContent = delta.reasoning_content ?? delta.reasoning;
479
- if (reasoningContent) {
480
- yield {
481
- type: "reasoning",
482
- text: extractTextContent3(reasoningContent)
483
- };
484
- }
485
- if (delta.content) {
486
- yield {
487
- type: "content",
488
- text: extractTextContent3(delta.content)
489
- };
490
- }
491
- } catch {
492
- }
493
- }
494
- }
495
- } finally {
496
- reader.releaseLock();
1159
+ if (delta.content) {
1160
+ return {
1161
+ type: "content",
1162
+ text: StreamProcessor2.extractTextContent(delta.content)
1163
+ };
497
1164
  }
1165
+ return null;
1166
+ }
1167
+ /**
1168
+ * 获取 API 端点 URL
1169
+ */
1170
+ getEndpointUrl(baseUrl) {
1171
+ return `${baseUrl}/chat/completions`;
498
1172
  }
499
1173
  };
500
1174
 
501
- // src/providers/huggingface.ts
502
- var BASE_URL3 = "https://router.huggingface.co/v1";
503
- function extractTextContent4(content) {
504
- if (typeof content === "string") {
505
- return content;
506
- }
507
- if (Array.isArray(content)) {
508
- return content.filter(
509
- (item) => typeof item === "object" && item !== null && item.type === "text" && typeof item.text === "string"
510
- ).map((item) => item.text).join("");
511
- }
512
- return "";
513
- }
514
- var HuggingFaceProvider = class extends BaseProvider {
515
- name = "huggingface";
516
- apiKey;
517
- baseUrl;
518
- constructor(config) {
519
- super();
520
- if (typeof config === "string") {
521
- this.apiKey = config;
522
- this.baseUrl = BASE_URL3;
523
- } else {
524
- this.apiKey = config.apiKey;
525
- this.baseUrl = config.baseUrl ?? BASE_URL3;
526
- }
527
- }
1175
+ // src/adapters/deepseek-adapter.ts
1176
+ init_request_builder();
1177
+ var DEFAULT_BASE_URL6 = "https://api.deepseek.com";
1178
+ var DeepSeekAdapter = class extends BaseAdapter {
1179
+ name = "deepseek";
1180
+ defaultBaseUrl = DEFAULT_BASE_URL6;
528
1181
  /**
529
- * 发送聊天请求(非流式)
1182
+ * 构建聊天请求体
1183
+ * DeepSeek 使用 OpenAI 兼容格式
530
1184
  */
531
- async chat(options) {
532
- const { model, messages, temperature = 0.7, maxTokens } = options;
1185
+ buildChatRequest(options, stream = false) {
1186
+ const {
1187
+ model,
1188
+ messages,
1189
+ temperature = 0.7,
1190
+ maxTokens,
1191
+ reasoning
1192
+ } = options;
533
1193
  const body = {
534
1194
  model,
535
1195
  messages,
536
1196
  temperature,
537
- stream: false
1197
+ stream
538
1198
  };
539
- if (maxTokens) {
1199
+ if (maxTokens !== void 0) {
540
1200
  body.max_tokens = maxTokens;
541
1201
  }
542
- const response = await fetch(`${this.baseUrl}/chat/completions`, {
543
- method: "POST",
544
- headers: {
545
- "Content-Type": "application/json",
546
- Authorization: `Bearer ${this.apiKey}`
547
- },
548
- body: JSON.stringify(body)
549
- });
550
- if (!response.ok) {
551
- const error = await response.text();
552
- throw new Error(`HuggingFace API error: ${response.status} ${error}`);
553
- }
554
- const result = await response.json();
555
- const choice = result.choices?.[0];
556
- if (!choice) {
557
- throw new Error("No response from model");
558
- }
559
- const msg = choice.message;
560
- const reasoningContent = msg?.reasoning_content ?? null;
561
- return {
562
- content: extractTextContent4(msg?.content),
563
- reasoning: reasoningContent ? extractTextContent4(reasoningContent) : null,
564
- model: result.model ?? model,
565
- usage: {
566
- promptTokens: result.usage?.prompt_tokens ?? 0,
567
- completionTokens: result.usage?.completion_tokens ?? 0,
568
- totalTokens: result.usage?.total_tokens ?? 0
569
- },
570
- finishReason: choice.finish_reason ?? null
571
- };
1202
+ const reasoningParams = this.buildReasoningParams(reasoning);
1203
+ Object.assign(body, reasoningParams);
1204
+ return body;
572
1205
  }
573
1206
  /**
574
- * 发送流式聊天请求
1207
+ * 构建 DeepSeek 格式的 reasoning 参数
1208
+ * DeepSeek 使用 thinking 参数启用思考模式
575
1209
  */
576
- async *chatStream(options) {
577
- const { model, messages, temperature = 0.7, maxTokens } = options;
578
- const body = {
579
- model,
580
- messages,
581
- temperature,
582
- stream: true
583
- };
584
- if (maxTokens) {
585
- body.max_tokens = maxTokens;
586
- }
587
- const response = await fetch(`${this.baseUrl}/chat/completions`, {
588
- method: "POST",
589
- headers: {
590
- "Content-Type": "application/json",
591
- Authorization: `Bearer ${this.apiKey}`
592
- },
593
- body: JSON.stringify(body)
594
- });
595
- if (!response.ok) {
596
- const error = await response.text();
597
- throw new Error(`HuggingFace API error: ${response.status} ${error}`);
598
- }
599
- const reader = response.body?.getReader();
600
- if (!reader) {
601
- throw new Error("No response body");
1210
+ buildReasoningParams(config) {
1211
+ return RequestBuilder.buildDeepSeekReasoning(config);
1212
+ }
1213
+ /**
1214
+ * 从 delta 中提取 StreamChunk
1215
+ * DeepSeek R1 使用 reasoning_content 返回思考过程
1216
+ */
1217
+ extractStreamChunk(delta) {
1218
+ const { StreamProcessor: StreamProcessor2 } = (init_stream_processor(), __toCommonJS(stream_processor_exports));
1219
+ if (delta.reasoning_content) {
1220
+ return {
1221
+ type: "reasoning",
1222
+ text: StreamProcessor2.extractTextContent(delta.reasoning_content)
1223
+ };
602
1224
  }
603
- const decoder = new TextDecoder();
604
- let buffer = "";
605
- try {
606
- while (true) {
607
- const { done, value } = await reader.read();
608
- if (done) break;
609
- buffer += decoder.decode(value, { stream: true });
610
- const lines = buffer.split("\n");
611
- buffer = lines.pop() ?? "";
612
- for (const line of lines) {
613
- const trimmed = line.trim();
614
- if (!trimmed || trimmed === "data: [DONE]") continue;
615
- if (!trimmed.startsWith("data: ")) continue;
616
- try {
617
- const data = JSON.parse(trimmed.slice(6));
618
- const delta = data.choices?.[0]?.delta;
619
- if (!delta) continue;
620
- if (delta.reasoning_content) {
621
- yield {
622
- type: "reasoning",
623
- text: extractTextContent4(delta.reasoning_content)
624
- };
625
- }
626
- if (delta.content) {
627
- yield {
628
- type: "content",
629
- text: extractTextContent4(delta.content)
630
- };
631
- }
632
- } catch {
633
- }
634
- }
635
- }
636
- } finally {
637
- reader.releaseLock();
1225
+ if (delta.content) {
1226
+ return {
1227
+ type: "content",
1228
+ text: StreamProcessor2.extractTextContent(delta.content)
1229
+ };
638
1230
  }
1231
+ return null;
1232
+ }
1233
+ /**
1234
+ * 获取 API 端点 URL
1235
+ */
1236
+ getEndpointUrl(baseUrl) {
1237
+ return `${baseUrl}/chat/completions`;
639
1238
  }
640
1239
  };
641
1240
 
642
- // src/providers/modelscope.ts
643
- var BASE_URL4 = "https://api-inference.modelscope.cn/v1";
644
- function extractTextContent5(content) {
645
- if (typeof content === "string") {
646
- return content;
1241
+ // src/adapters/poe-adapter.ts
1242
+ init_types();
1243
+ var DEFAULT_BASE_URL7 = "https://api.poe.com/v1";
1244
+ function extractThinkingFromContent(content) {
1245
+ const thinkMatch = content.match(/<think>([\s\S]*?)<\/think>/);
1246
+ if (thinkMatch) {
1247
+ const thinking = thinkMatch[1].trim();
1248
+ const cleanContent = content.replace(/<think>[\s\S]*?<\/think>/, "").trim();
1249
+ return { thinking, content: cleanContent };
647
1250
  }
648
- if (Array.isArray(content)) {
649
- return content.filter(
650
- (item) => typeof item === "object" && item !== null && item.type === "text" && typeof item.text === "string"
651
- ).map((item) => item.text).join("");
1251
+ const thinkingMatch = content.match(
1252
+ /^\*Thinking\.{0,3}\*\s*\n((?:>.*(?:\n|$))+)/
1253
+ );
1254
+ if (thinkingMatch) {
1255
+ const thinking = thinkingMatch[1].split("\n").map((line) => line.replace(/^>\s?/, "")).join("\n").trim();
1256
+ const cleanContent = content.replace(thinkingMatch[0], "").trim();
1257
+ return { thinking, content: cleanContent };
652
1258
  }
653
- return "";
1259
+ return { thinking: "", content };
654
1260
  }
655
- var ModelScopeProvider = class extends BaseProvider {
656
- name = "modelscope";
657
- apiKey;
658
- baseUrl;
659
- constructor(config) {
660
- super();
661
- if (typeof config === "string") {
662
- this.apiKey = config;
663
- this.baseUrl = BASE_URL4;
664
- } else {
665
- this.apiKey = config.apiKey;
666
- this.baseUrl = config.baseUrl ?? BASE_URL4;
667
- }
668
- }
1261
+ var PoeAdapter = class extends BaseAdapter {
1262
+ name = "poe";
1263
+ defaultBaseUrl = DEFAULT_BASE_URL7;
669
1264
  /**
670
- * 发送聊天请求(非流式)
1265
+ * 构建聊天请求体
1266
+ * Poe 使用 OpenAI 兼容格式,通过 extra_body 传递自定义参数
671
1267
  */
672
- async chat(options) {
1268
+ buildChatRequest(options, stream = false) {
673
1269
  const {
674
1270
  model,
675
1271
  messages,
@@ -681,53 +1277,107 @@ var ModelScopeProvider = class extends BaseProvider {
681
1277
  model,
682
1278
  messages,
683
1279
  temperature,
684
- stream: false
1280
+ stream
685
1281
  };
686
- if (maxTokens) {
1282
+ if (maxTokens !== void 0) {
687
1283
  body.max_tokens = maxTokens;
688
1284
  }
689
- if (reasoning?.effort) {
690
- if (reasoning.effort === "off") {
691
- body.enable_thinking = false;
692
- } else {
693
- body.enable_thinking = true;
694
- }
1285
+ const reasoningParams = this.buildReasoningParams(reasoning);
1286
+ Object.assign(body, reasoningParams);
1287
+ return body;
1288
+ }
1289
+ /**
1290
+ * 构建 Poe 格式的 reasoning 参数
1291
+ * Poe 通过 extra_body 传递 reasoning_effort 和 thinking_budget
1292
+ */
1293
+ buildReasoningParams(config) {
1294
+ if (!config || config.effort === "off") {
1295
+ return {};
695
1296
  }
696
- const response = await fetch(`${this.baseUrl}/chat/completions`, {
697
- method: "POST",
698
- headers: {
699
- "Content-Type": "application/json",
700
- Authorization: `Bearer ${this.apiKey}`
701
- },
702
- body: JSON.stringify(body)
703
- });
704
- if (!response.ok) {
705
- const error = await response.text();
706
- throw new Error(`ModelScope API error: ${response.status} ${error}`);
1297
+ const params = {};
1298
+ if (config.effort) {
1299
+ params.reasoning_effort = config.effort;
707
1300
  }
708
- const result = await response.json();
709
- const choice = result.choices?.[0];
1301
+ if (config.budgetTokens !== void 0) {
1302
+ params.thinking_budget = config.budgetTokens;
1303
+ } else if (config.effort && EFFORT_TOKEN_MAP[config.effort]) {
1304
+ params.thinking_budget = EFFORT_TOKEN_MAP[config.effort];
1305
+ }
1306
+ return params;
1307
+ }
1308
+ /**
1309
+ * 解析聊天响应
1310
+ * Poe 可能返回 reasoning_content,或者需要从 <think> 标签提取
1311
+ */
1312
+ parseChatResponse(response, model) {
1313
+ const { StreamProcessor: StreamProcessor2 } = (init_stream_processor(), __toCommonJS(stream_processor_exports));
1314
+ const choices = response.choices;
1315
+ const choice = choices?.[0];
710
1316
  if (!choice) {
711
1317
  throw new Error("No response from model");
712
1318
  }
713
1319
  const msg = choice.message;
714
- const reasoningContent = msg?.reasoning_content ?? null;
1320
+ let reasoningContent = msg?.reasoning_content ?? null;
1321
+ let contentText = StreamProcessor2.extractTextContent(msg?.content);
1322
+ if (!reasoningContent && contentText) {
1323
+ const extracted = extractThinkingFromContent(contentText);
1324
+ if (extracted.thinking) {
1325
+ reasoningContent = extracted.thinking;
1326
+ contentText = extracted.content;
1327
+ }
1328
+ }
1329
+ const usage = response.usage;
715
1330
  return {
716
- content: extractTextContent5(msg?.content),
717
- reasoning: reasoningContent ? extractTextContent5(reasoningContent) : null,
718
- model: result.model ?? model,
1331
+ content: contentText,
1332
+ reasoning: reasoningContent ? StreamProcessor2.extractTextContent(reasoningContent) : null,
1333
+ model: response.model ?? model,
719
1334
  usage: {
720
- promptTokens: result.usage?.prompt_tokens ?? 0,
721
- completionTokens: result.usage?.completion_tokens ?? 0,
722
- totalTokens: result.usage?.total_tokens ?? 0
1335
+ promptTokens: usage?.prompt_tokens ?? usage?.promptTokens ?? 0,
1336
+ completionTokens: usage?.completion_tokens ?? usage?.completionTokens ?? 0,
1337
+ totalTokens: usage?.total_tokens ?? usage?.totalTokens ?? 0
723
1338
  },
724
- finishReason: choice.finish_reason ?? null
1339
+ finishReason: choice.finish_reason ?? choice.finishReason ?? null
725
1340
  };
726
1341
  }
727
1342
  /**
728
- * 发送流式聊天请求
1343
+ * 从 delta 中提取 StreamChunk
1344
+ * Poe 的流式响应处理比较复杂,需要处理多种思考格式
729
1345
  */
730
- async *chatStream(options) {
1346
+ extractStreamChunk(delta) {
1347
+ const { StreamProcessor: StreamProcessor2 } = (init_stream_processor(), __toCommonJS(stream_processor_exports));
1348
+ if (delta.reasoning_content) {
1349
+ return {
1350
+ type: "reasoning",
1351
+ text: StreamProcessor2.extractTextContent(delta.reasoning_content)
1352
+ };
1353
+ }
1354
+ if (delta.content) {
1355
+ return {
1356
+ type: "content",
1357
+ text: StreamProcessor2.extractTextContent(delta.content)
1358
+ };
1359
+ }
1360
+ return null;
1361
+ }
1362
+ /**
1363
+ * 获取 API 端点 URL
1364
+ */
1365
+ getEndpointUrl(baseUrl) {
1366
+ return `${baseUrl}/chat/completions`;
1367
+ }
1368
+ };
1369
+
1370
+ // src/adapters/nova-adapter.ts
1371
+ init_request_builder();
1372
+ var DEFAULT_BASE_URL8 = "https://api.nova.amazon.com/v1";
1373
+ var NovaAdapter = class extends BaseAdapter {
1374
+ name = "nova";
1375
+ defaultBaseUrl = DEFAULT_BASE_URL8;
1376
+ /**
1377
+ * 构建聊天请求体
1378
+ * Nova 使用 OpenAI 兼容格式
1379
+ */
1380
+ buildChatRequest(options, stream = false) {
731
1381
  const {
732
1382
  model,
733
1383
  messages,
@@ -739,265 +1389,813 @@ var ModelScopeProvider = class extends BaseProvider {
739
1389
  model,
740
1390
  messages,
741
1391
  temperature,
742
- stream: true
1392
+ stream
743
1393
  };
744
- if (maxTokens) {
1394
+ if (maxTokens !== void 0) {
745
1395
  body.max_tokens = maxTokens;
746
1396
  }
747
- if (reasoning?.effort) {
748
- if (reasoning.effort === "off") {
749
- body.enable_thinking = false;
750
- } else {
751
- body.enable_thinking = true;
752
- }
753
- }
754
- const response = await fetch(`${this.baseUrl}/chat/completions`, {
755
- method: "POST",
756
- headers: {
757
- "Content-Type": "application/json",
758
- Authorization: `Bearer ${this.apiKey}`
759
- },
760
- body: JSON.stringify(body)
761
- });
762
- if (!response.ok) {
763
- const error = await response.text();
764
- throw new Error(`ModelScope API error: ${response.status} ${error}`);
765
- }
766
- const reader = response.body?.getReader();
767
- if (!reader) {
768
- throw new Error("No response body");
1397
+ const reasoningParams = this.buildReasoningParams(reasoning);
1398
+ Object.assign(body, reasoningParams);
1399
+ return body;
1400
+ }
1401
+ /**
1402
+ * 构建 Nova 格式的 reasoning 参数
1403
+ * Nova 使用 reasoningConfig 控制 extended thinking
1404
+ */
1405
+ buildReasoningParams(config) {
1406
+ return RequestBuilder.buildNovaReasoning(config);
1407
+ }
1408
+ /**
1409
+ * 从 delta 中提取 StreamChunk
1410
+ * Nova 返回 reasoning_content 作为思考过程
1411
+ */
1412
+ extractStreamChunk(delta) {
1413
+ const { StreamProcessor: StreamProcessor2 } = (init_stream_processor(), __toCommonJS(stream_processor_exports));
1414
+ if (delta.reasoning_content) {
1415
+ return {
1416
+ type: "reasoning",
1417
+ text: StreamProcessor2.extractTextContent(delta.reasoning_content)
1418
+ };
769
1419
  }
770
- const decoder = new TextDecoder();
771
- let buffer = "";
772
- try {
773
- while (true) {
774
- const { done, value } = await reader.read();
775
- if (done) break;
776
- buffer += decoder.decode(value, { stream: true });
777
- const lines = buffer.split("\n");
778
- buffer = lines.pop() ?? "";
779
- for (const line of lines) {
780
- const trimmed = line.trim();
781
- if (!trimmed || trimmed === "data: [DONE]") continue;
782
- if (!trimmed.startsWith("data: ")) continue;
783
- try {
784
- const data = JSON.parse(trimmed.slice(6));
785
- const delta = data.choices?.[0]?.delta;
786
- if (!delta) continue;
787
- if (delta.reasoning_content) {
788
- yield {
789
- type: "reasoning",
790
- text: extractTextContent5(delta.reasoning_content)
791
- };
792
- }
793
- if (delta.content) {
794
- yield {
795
- type: "content",
796
- text: extractTextContent5(delta.content)
797
- };
798
- }
799
- } catch {
800
- }
801
- }
802
- }
803
- } finally {
804
- reader.releaseLock();
1420
+ if (delta.content) {
1421
+ return {
1422
+ type: "content",
1423
+ text: StreamProcessor2.extractTextContent(delta.content)
1424
+ };
805
1425
  }
1426
+ return null;
1427
+ }
1428
+ /**
1429
+ * 获取 API 端点 URL
1430
+ */
1431
+ getEndpointUrl(baseUrl) {
1432
+ return `${baseUrl}/chat/completions`;
806
1433
  }
807
1434
  };
808
1435
 
809
- // src/providers/deepseek.ts
810
- var BASE_URL5 = "https://api.deepseek.com";
811
- function getReasoningMaxTokens(reasoning, userMaxTokens) {
812
- if (!reasoning || reasoning.effort === "off") {
813
- return userMaxTokens;
1436
+ // src/adapters/index.ts
1437
+ function createBuiltInAdapters() {
1438
+ const adapters = /* @__PURE__ */ new Map();
1439
+ adapters.set("openrouter", new OpenRouterAdapter());
1440
+ adapters.set("gemini", new GeminiAdapter());
1441
+ adapters.set("groq", new GroqAdapter());
1442
+ adapters.set("huggingface", new HuggingFaceAdapter());
1443
+ adapters.set("modelscope", new ModelScopeAdapter());
1444
+ adapters.set("deepseek", new DeepSeekAdapter());
1445
+ adapters.set("poe", new PoeAdapter());
1446
+ adapters.set("nova", new NovaAdapter());
1447
+ return adapters;
1448
+ }
1449
+
1450
+ // src/registry/provider-registry.ts
1451
+ var RegistryError = class extends Error {
1452
+ constructor(message, provider, code = "REGISTRY_ERROR") {
1453
+ super(message);
1454
+ this.provider = provider;
1455
+ this.code = code;
1456
+ this.name = "RegistryError";
814
1457
  }
815
- if (reasoning.budgetTokens !== void 0) {
816
- return reasoning.budgetTokens;
1458
+ };
1459
+ var ProviderRegistry = class {
1460
+ /** 适配器映射表 */
1461
+ static adapters = /* @__PURE__ */ new Map();
1462
+ /** 是否已初始化内置适配器 */
1463
+ static initialized = false;
1464
+ /**
1465
+ * 注册 Provider 适配器
1466
+ *
1467
+ * @param adapter - 要注册的适配器实例
1468
+ * @throws RegistryError 如果适配器无效
1469
+ *
1470
+ * @example
1471
+ * ```typescript
1472
+ * const myAdapter = new MyCustomAdapter();
1473
+ * ProviderRegistry.register(myAdapter);
1474
+ * ```
1475
+ */
1476
+ static register(adapter) {
1477
+ if (!adapter) {
1478
+ throw new RegistryError("\u9002\u914D\u5668\u4E0D\u80FD\u4E3A\u7A7A", void 0, "INVALID_ADAPTER");
1479
+ }
1480
+ if (!adapter.name) {
1481
+ throw new RegistryError(
1482
+ "\u9002\u914D\u5668\u5FC5\u987B\u6709 name \u5C5E\u6027",
1483
+ void 0,
1484
+ "INVALID_ADAPTER"
1485
+ );
1486
+ }
1487
+ this.adapters.set(adapter.name, adapter);
817
1488
  }
818
- if (reasoning.effort) {
819
- return EFFORT_TOKEN_MAP[reasoning.effort];
1489
+ /**
1490
+ * 获取 Provider 适配器
1491
+ *
1492
+ * @param type - Provider 类型
1493
+ * @returns 对应的适配器实例
1494
+ * @throws RegistryError 如果 Provider 未注册
1495
+ *
1496
+ * @example
1497
+ * ```typescript
1498
+ * const adapter = ProviderRegistry.getAdapter('openrouter');
1499
+ * const client = adapter.createClient(config);
1500
+ * ```
1501
+ */
1502
+ static getAdapter(type) {
1503
+ this.initializeBuiltIn();
1504
+ const adapter = this.adapters.get(type);
1505
+ if (!adapter) {
1506
+ const supported = this.listSupported();
1507
+ throw new RegistryError(
1508
+ `Provider "${type}" \u672A\u6CE8\u518C\u3002\u53EF\u7528\u7684 Provider: ${supported.join(", ")}`,
1509
+ type,
1510
+ "PROVIDER_NOT_FOUND"
1511
+ );
1512
+ }
1513
+ return adapter;
820
1514
  }
821
- return userMaxTokens;
822
- }
823
- function extractTextContent6(content) {
824
- if (typeof content === "string") {
825
- return content;
1515
+ /**
1516
+ * 检查 Provider 是否已注册
1517
+ *
1518
+ * @param type - Provider 类型
1519
+ * @returns 是否已注册
1520
+ *
1521
+ * @example
1522
+ * ```typescript
1523
+ * if (ProviderRegistry.hasAdapter('gemini')) {
1524
+ * console.log('Gemini 已注册');
1525
+ * }
1526
+ * ```
1527
+ */
1528
+ static hasAdapter(type) {
1529
+ this.initializeBuiltIn();
1530
+ return this.adapters.has(type);
826
1531
  }
827
- if (Array.isArray(content)) {
828
- return content.filter(
829
- (item) => typeof item === "object" && item !== null && item.type === "text" && typeof item.text === "string"
830
- ).map((item) => item.text).join("");
1532
+ /**
1533
+ * 获取所有已注册的 Provider 类型
1534
+ *
1535
+ * @returns Provider 类型数组
1536
+ *
1537
+ * @example
1538
+ * ```typescript
1539
+ * const providers = ProviderRegistry.listSupported();
1540
+ * console.log('支持的 Provider:', providers);
1541
+ * ```
1542
+ */
1543
+ static listSupported() {
1544
+ this.initializeBuiltIn();
1545
+ return Array.from(this.adapters.keys());
831
1546
  }
832
- return "";
833
- }
834
- var DeepSeekProvider = class extends BaseProvider {
835
- name = "deepseek";
836
- apiKey;
837
- baseUrl;
838
- constructor(config) {
839
- super();
840
- if (typeof config === "string") {
841
- this.apiKey = config;
842
- this.baseUrl = BASE_URL5;
843
- } else {
844
- this.apiKey = config.apiKey;
845
- this.baseUrl = config.baseUrl ?? BASE_URL5;
1547
+ /**
1548
+ * 从配置文件加载并注册 Provider
1549
+ *
1550
+ * @param config - 注册表配置
1551
+ * @throws RegistryError 如果配置无效或加载失败
1552
+ *
1553
+ * @example
1554
+ * ```typescript
1555
+ * const config: RegistryConfig = {
1556
+ * providers: {
1557
+ * 'custom-provider': {
1558
+ * adapter: './my-adapter',
1559
+ * config: {
1560
+ * apiKey: 'xxx',
1561
+ * baseUrl: 'https://api.example.com'
1562
+ * }
1563
+ * }
1564
+ * }
1565
+ * };
1566
+ * ProviderRegistry.loadFromConfig(config);
1567
+ * ```
1568
+ */
1569
+ static loadFromConfig(config) {
1570
+ if (!config || !config.providers) {
1571
+ throw new RegistryError(
1572
+ "\u914D\u7F6E\u65E0\u6548\uFF1A\u7F3A\u5C11 providers \u5B57\u6BB5",
1573
+ void 0,
1574
+ "INVALID_CONFIG"
1575
+ );
1576
+ }
1577
+ this.initializeBuiltIn();
1578
+ for (const [providerName, providerConfig] of Object.entries(
1579
+ config.providers
1580
+ )) {
1581
+ if (providerConfig.adapter) {
1582
+ try {
1583
+ const CustomAdapter = __require(providerConfig.adapter);
1584
+ const AdapterClass = CustomAdapter.default || CustomAdapter;
1585
+ const adapter = new AdapterClass();
1586
+ if (typeof adapter.name !== "string" || typeof adapter.createClient !== "function") {
1587
+ throw new RegistryError(
1588
+ `\u81EA\u5B9A\u4E49\u9002\u914D\u5668 "${providerConfig.adapter}" \u672A\u6B63\u786E\u5B9E\u73B0 ProviderAdapter \u63A5\u53E3`,
1589
+ providerName,
1590
+ "INVALID_ADAPTER"
1591
+ );
1592
+ }
1593
+ this.adapters.set(providerName, adapter);
1594
+ } catch (error) {
1595
+ if (error instanceof RegistryError) {
1596
+ throw error;
1597
+ }
1598
+ throw new RegistryError(
1599
+ `\u52A0\u8F7D\u81EA\u5B9A\u4E49\u9002\u914D\u5668\u5931\u8D25: ${providerConfig.adapter}`,
1600
+ providerName,
1601
+ "ADAPTER_LOAD_ERROR"
1602
+ );
1603
+ }
1604
+ } else if (!this.adapters.has(providerName)) {
1605
+ throw new RegistryError(
1606
+ `Provider "${providerName}" \u672A\u6CE8\u518C\u4E14\u672A\u6307\u5B9A\u81EA\u5B9A\u4E49\u9002\u914D\u5668`,
1607
+ providerName,
1608
+ "PROVIDER_NOT_FOUND"
1609
+ );
1610
+ }
846
1611
  }
847
1612
  }
848
1613
  /**
849
- * 发送聊天请求(非流式)
1614
+ * 初始化内置 Provider
1615
+ * 在首次使用时自动调用
1616
+ *
1617
+ * @example
1618
+ * ```typescript
1619
+ * // 通常不需要手动调用,会在首次使用时自动初始化
1620
+ * ProviderRegistry.initializeBuiltIn();
1621
+ * ```
850
1622
  */
851
- async chat(options) {
852
- const {
853
- model,
854
- messages,
855
- temperature = 0.7,
856
- maxTokens,
857
- reasoning
858
- } = options;
859
- const effectiveMaxTokens = getReasoningMaxTokens(reasoning, maxTokens);
860
- const body = {
861
- model,
862
- messages,
863
- temperature,
864
- stream: false
865
- };
866
- if (effectiveMaxTokens) {
867
- body.max_tokens = effectiveMaxTokens;
1623
+ static initializeBuiltIn() {
1624
+ if (this.initialized) {
1625
+ return;
868
1626
  }
869
- if (reasoning?.effort && reasoning.effort !== "off") {
870
- body.thinking = { type: "enabled" };
1627
+ const builtInAdapters = createBuiltInAdapters();
1628
+ for (const [type, adapter] of builtInAdapters) {
1629
+ if (!this.adapters.has(type)) {
1630
+ this.adapters.set(type, adapter);
1631
+ }
871
1632
  }
872
- const response = await fetch(`${this.baseUrl}/chat/completions`, {
873
- method: "POST",
874
- headers: {
875
- "Content-Type": "application/json",
876
- Authorization: `Bearer ${this.apiKey}`
877
- },
878
- body: JSON.stringify(body)
879
- });
880
- if (!response.ok) {
881
- const error = await response.text();
882
- throw new Error(`DeepSeek API error: ${response.status} ${error}`);
1633
+ this.initialized = true;
1634
+ }
1635
+ /**
1636
+ * 重置注册表(主要用于测试)
1637
+ * 清除所有已注册的适配器并重置初始化状态
1638
+ */
1639
+ static reset() {
1640
+ this.adapters.clear();
1641
+ this.initialized = false;
1642
+ }
1643
+ /**
1644
+ * 获取适配器数量(主要用于测试)
1645
+ *
1646
+ * @returns 已注册的适配器数量
1647
+ */
1648
+ static get size() {
1649
+ this.initializeBuiltIn();
1650
+ return this.adapters.size;
1651
+ }
1652
+ };
1653
+
1654
+ // src/config/types.ts
1655
+ var CONFIG_DEFAULTS = {
1656
+ /** 默认超时时间(毫秒) */
1657
+ timeout: 3e4,
1658
+ /** 默认重试次数 */
1659
+ retries: 3,
1660
+ /** 默认功能开关 */
1661
+ features: {
1662
+ streaming: true,
1663
+ reasoning: false
1664
+ }
1665
+ };
1666
+ var VALID_PROVIDERS = [
1667
+ "openrouter",
1668
+ "gemini",
1669
+ "groq",
1670
+ "huggingface",
1671
+ "modelscope",
1672
+ "deepseek",
1673
+ "poe",
1674
+ "nova"
1675
+ ];
1676
+
1677
+ // src/utils/config-validator.ts
1678
+ var VALID_PROVIDERS2 = [
1679
+ "openrouter",
1680
+ "gemini",
1681
+ "groq",
1682
+ "huggingface",
1683
+ "modelscope",
1684
+ "deepseek",
1685
+ "poe",
1686
+ "nova"
1687
+ ];
1688
+ var ConfigValidator = class _ConfigValidator {
1689
+ /**
1690
+ * 验证 Provider 配置
1691
+ *
1692
+ * @param config - 要验证的配置对象
1693
+ * @returns 验证结果
1694
+ *
1695
+ * @example
1696
+ * ```ts
1697
+ * const result = ConfigValidator.validate({
1698
+ * provider: 'openrouter',
1699
+ * credentials: { apiKey: 'sk-xxx' }
1700
+ * });
1701
+ *
1702
+ * if (!result.valid) {
1703
+ * console.error(result.errors);
1704
+ * }
1705
+ * ```
1706
+ */
1707
+ static validate(config) {
1708
+ const errors = [];
1709
+ if (!config || typeof config !== "object") {
1710
+ return {
1711
+ valid: false,
1712
+ errors: [
1713
+ {
1714
+ field: "",
1715
+ message: "\u914D\u7F6E\u5FC5\u987B\u662F\u4E00\u4E2A\u5BF9\u8C61",
1716
+ code: "INVALID_CONFIG_TYPE"
1717
+ }
1718
+ ]
1719
+ };
883
1720
  }
884
- const result = await response.json();
885
- const choice = result.choices?.[0];
886
- if (!choice) {
887
- throw new Error("No response from model");
1721
+ const cfg = config;
1722
+ if (!cfg.provider) {
1723
+ errors.push({
1724
+ field: "provider",
1725
+ message: "provider \u5B57\u6BB5\u662F\u5FC5\u586B\u7684",
1726
+ code: "MISSING_PROVIDER"
1727
+ });
1728
+ } else if (typeof cfg.provider !== "string") {
1729
+ errors.push({
1730
+ field: "provider",
1731
+ message: "provider \u5FC5\u987B\u662F\u5B57\u7B26\u4E32",
1732
+ code: "INVALID_PROVIDER_TYPE"
1733
+ });
1734
+ } else if (!VALID_PROVIDERS2.includes(cfg.provider)) {
1735
+ errors.push({
1736
+ field: "provider",
1737
+ message: `\u65E0\u6548\u7684 provider: ${cfg.provider}\uFF0C\u6709\u6548\u503C\u4E3A: ${VALID_PROVIDERS2.join(", ")}`,
1738
+ code: "INVALID_PROVIDER"
1739
+ });
1740
+ }
1741
+ if (!cfg.credentials) {
1742
+ errors.push({
1743
+ field: "credentials",
1744
+ message: "credentials \u5B57\u6BB5\u662F\u5FC5\u586B\u7684",
1745
+ code: "MISSING_CREDENTIALS"
1746
+ });
1747
+ } else if (typeof cfg.credentials !== "object") {
1748
+ errors.push({
1749
+ field: "credentials",
1750
+ message: "credentials \u5FC5\u987B\u662F\u4E00\u4E2A\u5BF9\u8C61",
1751
+ code: "INVALID_CREDENTIALS_TYPE"
1752
+ });
1753
+ } else {
1754
+ const creds = cfg.credentials;
1755
+ if (!creds.apiKey) {
1756
+ errors.push({
1757
+ field: "credentials.apiKey",
1758
+ message: "apiKey \u5B57\u6BB5\u662F\u5FC5\u586B\u7684",
1759
+ code: "MISSING_API_KEY"
1760
+ });
1761
+ } else if (typeof creds.apiKey !== "string") {
1762
+ errors.push({
1763
+ field: "credentials.apiKey",
1764
+ message: "apiKey \u5FC5\u987B\u662F\u5B57\u7B26\u4E32",
1765
+ code: "INVALID_API_KEY_TYPE"
1766
+ });
1767
+ } else if (creds.apiKey.trim() === "") {
1768
+ errors.push({
1769
+ field: "credentials.apiKey",
1770
+ message: "apiKey \u4E0D\u80FD\u4E3A\u7A7A",
1771
+ code: "EMPTY_API_KEY"
1772
+ });
1773
+ }
1774
+ if (creds.baseUrl !== void 0) {
1775
+ const urlResult = _ConfigValidator.validateUrl(creds.baseUrl);
1776
+ if (!urlResult.valid) {
1777
+ errors.push(
1778
+ ...urlResult.errors.map((e) => ({
1779
+ ...e,
1780
+ field: "credentials.baseUrl"
1781
+ }))
1782
+ );
1783
+ }
1784
+ }
1785
+ }
1786
+ if (cfg.options !== void 0) {
1787
+ if (typeof cfg.options !== "object") {
1788
+ errors.push({
1789
+ field: "options",
1790
+ message: "options \u5FC5\u987B\u662F\u4E00\u4E2A\u5BF9\u8C61",
1791
+ code: "INVALID_OPTIONS_TYPE"
1792
+ });
1793
+ } else {
1794
+ const opts = cfg.options;
1795
+ if (opts.timeout !== void 0) {
1796
+ if (typeof opts.timeout !== "number" || opts.timeout <= 0) {
1797
+ errors.push({
1798
+ field: "options.timeout",
1799
+ message: "timeout \u5FC5\u987B\u662F\u6B63\u6570",
1800
+ code: "INVALID_TIMEOUT"
1801
+ });
1802
+ }
1803
+ }
1804
+ if (opts.retries !== void 0) {
1805
+ if (typeof opts.retries !== "number" || opts.retries < 0 || !Number.isInteger(opts.retries)) {
1806
+ errors.push({
1807
+ field: "options.retries",
1808
+ message: "retries \u5FC5\u987B\u662F\u975E\u8D1F\u6574\u6570",
1809
+ code: "INVALID_RETRIES"
1810
+ });
1811
+ }
1812
+ }
1813
+ }
888
1814
  }
889
- const msg = choice.message;
890
- const reasoningContent = msg?.reasoning_content ?? null;
891
1815
  return {
892
- content: extractTextContent6(msg?.content),
893
- reasoning: reasoningContent ? extractTextContent6(reasoningContent) : null,
894
- model: result.model ?? model,
895
- usage: {
896
- promptTokens: result.usage?.prompt_tokens ?? 0,
897
- completionTokens: result.usage?.completion_tokens ?? 0,
898
- totalTokens: result.usage?.total_tokens ?? 0
899
- },
900
- finishReason: choice.finish_reason ?? null
1816
+ valid: errors.length === 0,
1817
+ errors
901
1818
  };
902
1819
  }
903
1820
  /**
904
- * 发送流式聊天请求
1821
+ * 验证 API Key 格式
1822
+ * 不同 Provider 可能有不同的 API Key 格式要求
1823
+ *
1824
+ * @param apiKey - API 密钥
1825
+ * @param provider - Provider 类型
1826
+ * @returns 验证结果
905
1827
  */
906
- async *chatStream(options) {
907
- const {
908
- model,
909
- messages,
910
- temperature = 0.7,
911
- maxTokens,
912
- reasoning
913
- } = options;
914
- const effectiveMaxTokens = getReasoningMaxTokens(reasoning, maxTokens);
915
- const body = {
916
- model,
917
- messages,
918
- temperature,
919
- stream: true
1828
+ static validateApiKey(apiKey, provider) {
1829
+ const errors = [];
1830
+ if (!apiKey || typeof apiKey !== "string") {
1831
+ errors.push({
1832
+ field: "apiKey",
1833
+ message: "apiKey \u5FC5\u987B\u662F\u975E\u7A7A\u5B57\u7B26\u4E32",
1834
+ code: "INVALID_API_KEY"
1835
+ });
1836
+ return { valid: false, errors };
1837
+ }
1838
+ const trimmed = apiKey.trim();
1839
+ if (trimmed === "") {
1840
+ errors.push({
1841
+ field: "apiKey",
1842
+ message: "apiKey \u4E0D\u80FD\u4E3A\u7A7A",
1843
+ code: "EMPTY_API_KEY"
1844
+ });
1845
+ return { valid: false, errors };
1846
+ }
1847
+ switch (provider) {
1848
+ case "openrouter":
1849
+ if (!trimmed.startsWith("sk-")) {
1850
+ errors.push({
1851
+ field: "apiKey",
1852
+ message: "OpenRouter API Key \u5E94\u4EE5 sk- \u5F00\u5934",
1853
+ code: "INVALID_API_KEY_FORMAT"
1854
+ });
1855
+ }
1856
+ break;
1857
+ case "gemini":
1858
+ if (!trimmed.startsWith("AI")) {
1859
+ errors.push({
1860
+ field: "apiKey",
1861
+ message: "Gemini API Key \u683C\u5F0F\u53EF\u80FD\u4E0D\u6B63\u786E",
1862
+ code: "INVALID_API_KEY_FORMAT"
1863
+ });
1864
+ }
1865
+ break;
1866
+ // 其他 Provider 暂不做特定格式验证
1867
+ default:
1868
+ break;
1869
+ }
1870
+ return {
1871
+ valid: errors.length === 0,
1872
+ errors
920
1873
  };
921
- if (effectiveMaxTokens) {
922
- body.max_tokens = effectiveMaxTokens;
1874
+ }
1875
+ /**
1876
+ * 验证 URL 格式
1877
+ *
1878
+ * @param url - 要验证的 URL
1879
+ * @returns 验证结果
1880
+ */
1881
+ static validateUrl(url) {
1882
+ const errors = [];
1883
+ if (typeof url !== "string") {
1884
+ errors.push({
1885
+ field: "url",
1886
+ message: "URL \u5FC5\u987B\u662F\u5B57\u7B26\u4E32",
1887
+ code: "INVALID_URL_TYPE"
1888
+ });
1889
+ return { valid: false, errors };
923
1890
  }
924
- if (reasoning?.effort && reasoning.effort !== "off") {
925
- body.thinking = { type: "enabled" };
1891
+ const trimmed = url.trim();
1892
+ if (trimmed === "") {
1893
+ errors.push({
1894
+ field: "url",
1895
+ message: "URL \u4E0D\u80FD\u4E3A\u7A7A",
1896
+ code: "EMPTY_URL"
1897
+ });
1898
+ return { valid: false, errors };
926
1899
  }
927
- const response = await fetch(`${this.baseUrl}/chat/completions`, {
928
- method: "POST",
929
- headers: {
930
- "Content-Type": "application/json",
931
- Authorization: `Bearer ${this.apiKey}`
1900
+ try {
1901
+ const parsed = new URL(trimmed);
1902
+ if (!["http:", "https:"].includes(parsed.protocol)) {
1903
+ errors.push({
1904
+ field: "url",
1905
+ message: "URL \u5FC5\u987B\u4F7F\u7528 http \u6216 https \u534F\u8BAE",
1906
+ code: "INVALID_URL_PROTOCOL"
1907
+ });
1908
+ }
1909
+ } catch {
1910
+ errors.push({
1911
+ field: "url",
1912
+ message: "URL \u683C\u5F0F\u65E0\u6548",
1913
+ code: "INVALID_URL_FORMAT"
1914
+ });
1915
+ }
1916
+ return {
1917
+ valid: errors.length === 0,
1918
+ errors
1919
+ };
1920
+ }
1921
+ };
1922
+
1923
+ // src/config/config-manager.ts
1924
+ var ConfigManager = class _ConfigManager {
1925
+ /**
1926
+ * 验证配置
1927
+ * 检查配置是否符合 UnifiedProviderConfig 格式要求
1928
+ *
1929
+ * @param config - 要验证的配置对象
1930
+ * @returns 验证结果
1931
+ *
1932
+ * @example
1933
+ * ```ts
1934
+ * const result = ConfigManager.validate({
1935
+ * provider: 'openrouter',
1936
+ * credentials: { apiKey: 'sk-xxx' }
1937
+ * });
1938
+ *
1939
+ * if (!result.valid) {
1940
+ * console.error(result.errors);
1941
+ * }
1942
+ * ```
1943
+ */
1944
+ static validate(config) {
1945
+ return ConfigValidator.validate(config);
1946
+ }
1947
+ /**
1948
+ * 应用默认值
1949
+ * 为缺失的可选字段填充默认值
1950
+ *
1951
+ * @param config - 部分配置对象
1952
+ * @returns 填充默认值后的完整配置
1953
+ *
1954
+ * @example
1955
+ * ```ts
1956
+ * const fullConfig = ConfigManager.applyDefaults({
1957
+ * provider: 'openrouter',
1958
+ * credentials: { apiKey: 'sk-xxx' }
1959
+ * });
1960
+ * // fullConfig.options.timeout === 30000
1961
+ * // fullConfig.options.retries === 3
1962
+ * ```
1963
+ */
1964
+ static applyDefaults(config) {
1965
+ if (!config.provider || !config.credentials?.apiKey) {
1966
+ throw new Error("\u914D\u7F6E\u7F3A\u5C11\u5FC5\u586B\u5B57\u6BB5: provider \u548C credentials.apiKey");
1967
+ }
1968
+ return {
1969
+ provider: config.provider,
1970
+ adapter: config.adapter,
1971
+ credentials: {
1972
+ apiKey: config.credentials.apiKey,
1973
+ baseUrl: config.credentials.baseUrl
932
1974
  },
933
- body: JSON.stringify(body)
1975
+ options: {
1976
+ timeout: config.options?.timeout ?? CONFIG_DEFAULTS.timeout,
1977
+ retries: config.options?.retries ?? CONFIG_DEFAULTS.retries,
1978
+ headers: config.options?.headers ?? {}
1979
+ },
1980
+ features: {
1981
+ streaming: config.features?.streaming ?? CONFIG_DEFAULTS.features.streaming,
1982
+ reasoning: config.features?.reasoning ?? CONFIG_DEFAULTS.features.reasoning
1983
+ }
1984
+ };
1985
+ }
1986
+ /**
1987
+ * 合并环境变量
1988
+ * 将 ${ENV_VAR} 格式的占位符替换为实际环境变量值
1989
+ *
1990
+ * @param config - 包含环境变量占位符的配置
1991
+ * @returns 替换后的配置
1992
+ *
1993
+ * @example
1994
+ * ```ts
1995
+ * // 假设 process.env.OPENROUTER_API_KEY = 'sk-xxx'
1996
+ * const config = ConfigManager.mergeWithEnv({
1997
+ * provider: 'openrouter',
1998
+ * credentials: { apiKey: '${OPENROUTER_API_KEY}' }
1999
+ * });
2000
+ * // config.credentials.apiKey === 'sk-xxx'
2001
+ * ```
2002
+ */
2003
+ static mergeWithEnv(config) {
2004
+ const result = JSON.parse(JSON.stringify(config));
2005
+ const replaceEnvVars = (obj) => {
2006
+ for (const key of Object.keys(obj)) {
2007
+ const value = obj[key];
2008
+ if (typeof value === "string") {
2009
+ obj[key] = _ConfigManager.replaceEnvPlaceholders(value);
2010
+ } else if (value && typeof value === "object" && !Array.isArray(value)) {
2011
+ replaceEnvVars(value);
2012
+ }
2013
+ }
2014
+ };
2015
+ replaceEnvVars(result);
2016
+ return result;
2017
+ }
2018
+ /**
2019
+ * 替换字符串中的环境变量占位符
2020
+ * 支持 ${ENV_VAR} 格式
2021
+ *
2022
+ * @param str - 包含占位符的字符串
2023
+ * @returns 替换后的字符串
2024
+ */
2025
+ static replaceEnvPlaceholders(str) {
2026
+ const envVarPattern = /\$\{([^}]+)\}/g;
2027
+ return str.replace(envVarPattern, (match, envVarName) => {
2028
+ const envValue = process.env[envVarName];
2029
+ return envValue !== void 0 ? envValue : match;
934
2030
  });
935
- if (!response.ok) {
936
- const error = await response.text();
937
- throw new Error(`DeepSeek API error: ${response.status} ${error}`);
2031
+ }
2032
+ /**
2033
+ * 从旧格式配置转换为新格式
2034
+ * 保持向后兼容性
2035
+ *
2036
+ * @param config - 旧格式的 Provider 配置
2037
+ * @returns 新格式的统一配置
2038
+ *
2039
+ * @example
2040
+ * ```ts
2041
+ * const newConfig = ConfigManager.fromLegacyConfig({
2042
+ * provider: 'openrouter',
2043
+ * apiKey: 'sk-xxx',
2044
+ * baseUrl: 'https://api.example.com'
2045
+ * });
2046
+ * // newConfig.credentials.apiKey === 'sk-xxx'
2047
+ * // newConfig.credentials.baseUrl === 'https://api.example.com'
2048
+ * ```
2049
+ */
2050
+ static fromLegacyConfig(config) {
2051
+ if (!config.provider) {
2052
+ throw new Error("\u65E7\u683C\u5F0F\u914D\u7F6E\u7F3A\u5C11 provider \u5B57\u6BB5");
938
2053
  }
939
- const reader = response.body?.getReader();
940
- if (!reader) {
941
- throw new Error("No response body");
2054
+ if (!config.apiKey) {
2055
+ throw new Error("\u65E7\u683C\u5F0F\u914D\u7F6E\u7F3A\u5C11 apiKey \u5B57\u6BB5");
942
2056
  }
943
- const decoder = new TextDecoder();
944
- let buffer = "";
945
- try {
946
- while (true) {
947
- const { done, value } = await reader.read();
948
- if (done) break;
949
- buffer += decoder.decode(value, { stream: true });
950
- const lines = buffer.split("\n");
951
- buffer = lines.pop() ?? "";
952
- for (const line of lines) {
953
- const trimmed = line.trim();
954
- if (!trimmed || trimmed === "data: [DONE]") continue;
955
- if (!trimmed.startsWith("data: ")) continue;
956
- try {
957
- const data = JSON.parse(trimmed.slice(6));
958
- const delta = data.choices?.[0]?.delta;
959
- if (!delta) continue;
960
- if (delta.reasoning_content) {
961
- yield {
962
- type: "reasoning",
963
- text: extractTextContent6(delta.reasoning_content)
964
- };
965
- }
966
- if (delta.content) {
967
- yield {
968
- type: "content",
969
- text: extractTextContent6(delta.content)
970
- };
971
- }
972
- } catch {
973
- }
974
- }
2057
+ if (!VALID_PROVIDERS.includes(config.provider)) {
2058
+ throw new Error(
2059
+ `\u65E0\u6548\u7684 provider: ${config.provider}\uFF0C\u6709\u6548\u503C\u4E3A: ${VALID_PROVIDERS.join(", ")}`
2060
+ );
2061
+ }
2062
+ return _ConfigManager.applyDefaults({
2063
+ provider: config.provider,
2064
+ credentials: {
2065
+ apiKey: config.apiKey,
2066
+ baseUrl: config.baseUrl
975
2067
  }
976
- } finally {
977
- reader.releaseLock();
2068
+ });
2069
+ }
2070
+ /**
2071
+ * 检查配置是否为旧格式
2072
+ *
2073
+ * @param config - 要检查的配置对象
2074
+ * @returns 是否为旧格式
2075
+ */
2076
+ static isLegacyConfig(config) {
2077
+ if (!config || typeof config !== "object") {
2078
+ return false;
2079
+ }
2080
+ const cfg = config;
2081
+ return typeof cfg.provider === "string" && typeof cfg.apiKey === "string" && cfg.credentials === void 0;
2082
+ }
2083
+ /**
2084
+ * 智能转换配置
2085
+ * 自动检测配置格式并转换为统一格式
2086
+ *
2087
+ * @param config - 任意格式的配置
2088
+ * @returns 统一格式的配置
2089
+ */
2090
+ static normalize(config) {
2091
+ if (_ConfigManager.isLegacyConfig(config)) {
2092
+ return _ConfigManager.fromLegacyConfig(config);
978
2093
  }
2094
+ return _ConfigManager.applyDefaults(
2095
+ config
2096
+ );
2097
+ }
2098
+ /**
2099
+ * 获取指定 Provider 的默认基础 URL
2100
+ *
2101
+ * @param provider - Provider 类型
2102
+ * @returns 默认基础 URL
2103
+ */
2104
+ static getDefaultBaseUrl(provider) {
2105
+ const defaultUrls = {
2106
+ openrouter: "https://openrouter.ai/api/v1",
2107
+ gemini: "https://generativelanguage.googleapis.com/v1beta",
2108
+ groq: "https://api.groq.com/openai/v1",
2109
+ huggingface: "https://api-inference.huggingface.co",
2110
+ modelscope: "https://dashscope.aliyuncs.com/compatible-mode/v1",
2111
+ deepseek: "https://api.deepseek.com/v1",
2112
+ poe: "https://api.poe.com/bot",
2113
+ nova: "https://bedrock-runtime.us-east-1.amazonaws.com"
2114
+ };
2115
+ return defaultUrls[provider];
979
2116
  }
980
2117
  };
981
2118
 
982
2119
  // src/providers/__factory__.ts
983
- function createProvider(config) {
984
- const { provider, apiKey, baseUrl } = config;
985
- switch (provider) {
986
- case "openrouter":
987
- return new OpenRouterProvider(apiKey);
988
- case "gemini":
989
- return new GeminiProvider(baseUrl ? { apiKey, baseUrl } : apiKey);
990
- case "groq":
991
- return new GroqProvider(baseUrl ? { apiKey, baseUrl } : apiKey);
992
- case "huggingface":
993
- return new HuggingFaceProvider(baseUrl ? { apiKey, baseUrl } : apiKey);
994
- case "modelscope":
995
- return new ModelScopeProvider(baseUrl ? { apiKey, baseUrl } : apiKey);
996
- case "deepseek":
997
- return new DeepSeekProvider(baseUrl ? { apiKey, baseUrl } : apiKey);
998
- default:
999
- throw new Error(`Unknown provider: ${provider}`);
2120
+ var AdapterBasedProvider = class {
2121
+ constructor(adapter, apiKey, baseUrl) {
2122
+ this.adapter = adapter;
2123
+ this.apiKey = apiKey;
2124
+ this.baseUrl = baseUrl;
2125
+ this.name = adapter.name;
2126
+ }
2127
+ name;
2128
+ /**
2129
+ * 获取客户端实例
2130
+ */
2131
+ getClient() {
2132
+ return this.adapter.createClient({
2133
+ apiKey: this.apiKey,
2134
+ baseUrl: this.baseUrl ?? this.adapter.defaultBaseUrl
2135
+ });
2136
+ }
2137
+ /**
2138
+ * 发送聊天请求(非流式)
2139
+ */
2140
+ async chat(options) {
2141
+ const client = this.getClient();
2142
+ const baseUrl = this.baseUrl ?? this.adapter.defaultBaseUrl;
2143
+ const endpoint = this.adapter.getEndpointUrl(baseUrl);
2144
+ const endpointPath = endpoint.replace(baseUrl, "");
2145
+ const body = this.adapter.buildChatRequest(options, false);
2146
+ const response = await client.chat(endpointPath, body);
2147
+ return this.adapter.parseChatResponse(response, options.model);
2148
+ }
2149
+ /**
2150
+ * 发送流式聊天请求
2151
+ */
2152
+ async *chatStream(options) {
2153
+ const client = this.getClient();
2154
+ const baseUrl = this.baseUrl ?? this.adapter.defaultBaseUrl;
2155
+ const endpoint = this.adapter.getEndpointUrl(baseUrl);
2156
+ const endpointPath = endpoint.replace(baseUrl, "");
2157
+ const body = this.adapter.buildChatRequest(options, true);
2158
+ const response = await client.chatStream(endpointPath, body);
2159
+ const { StreamProcessor: StreamProcessor2 } = (init_stream_processor(), __toCommonJS(stream_processor_exports));
2160
+ yield* StreamProcessor2.processStream(
2161
+ response,
2162
+ (delta) => this.adapter.extractStreamChunk(delta)
2163
+ );
2164
+ }
2165
+ /**
2166
+ * 简单对话:单轮问答
2167
+ */
2168
+ async ask(model, question, options) {
2169
+ const result = await this.chat({
2170
+ model,
2171
+ messages: [{ role: "user", content: question }],
2172
+ ...options
2173
+ });
2174
+ return result.content;
2175
+ }
2176
+ /**
2177
+ * 带系统提示的对话
2178
+ */
2179
+ async askWithSystem(model, systemPrompt, userMessage, options) {
2180
+ const result = await this.chat({
2181
+ model,
2182
+ messages: [
2183
+ { role: "system", content: systemPrompt },
2184
+ { role: "user", content: userMessage }
2185
+ ],
2186
+ ...options
2187
+ });
2188
+ return result.content;
1000
2189
  }
2190
+ };
2191
+ function createProvider(config) {
2192
+ const unifiedConfig = ConfigManager.fromLegacyConfig(config);
2193
+ const adapter = ProviderRegistry.getAdapter(unifiedConfig.provider);
2194
+ return new AdapterBasedProvider(
2195
+ adapter,
2196
+ unifiedConfig.credentials.apiKey,
2197
+ unifiedConfig.credentials.baseUrl
2198
+ );
1001
2199
  }
1002
2200
  var ai = {
1003
2201
  openrouter: (apiKey, baseUrl) => createProvider({ provider: "openrouter", apiKey, baseUrl }),
@@ -1005,17 +2203,1165 @@ var ai = {
1005
2203
  groq: (apiKey, baseUrl) => createProvider({ provider: "groq", apiKey, baseUrl }),
1006
2204
  huggingface: (apiKey, baseUrl) => createProvider({ provider: "huggingface", apiKey, baseUrl }),
1007
2205
  modelscope: (apiKey, baseUrl) => createProvider({ provider: "modelscope", apiKey, baseUrl }),
1008
- deepseek: (apiKey, baseUrl) => createProvider({ provider: "deepseek", apiKey, baseUrl })
2206
+ deepseek: (apiKey, baseUrl) => createProvider({ provider: "deepseek", apiKey, baseUrl }),
2207
+ poe: (apiKey, baseUrl) => createProvider({ provider: "poe", apiKey, baseUrl }),
2208
+ nova: (apiKey, baseUrl) => createProvider({ provider: "nova", apiKey, baseUrl })
2209
+ };
2210
+
2211
+ // src/providers/__model-detection__.ts
2212
+ var THINKING_MODEL_PATTERNS = [
2213
+ // 明确的思考/推理标识
2214
+ /[-_]think(?:ing)?(?:[-_:]|$)/i,
2215
+ // *-think, *-thinking
2216
+ /[-_]reason(?:ing)?(?:[-_:]|$)/i,
2217
+ // *-reason, *-reasoning
2218
+ /[-_]cot(?:[-_:]|$)/i,
2219
+ // chain-of-thought
2220
+ /[-_]reflect(?:ion)?(?:[-_:]|$)/i,
2221
+ // reflection models
2222
+ // 知名推理模型系列
2223
+ /\bo1[-_]?/i,
2224
+ // OpenAI o1 系列
2225
+ /\bo3[-_]?/i,
2226
+ // OpenAI o3 系列
2227
+ /\br1[-_]?/i,
2228
+ // DeepSeek R1 等
2229
+ /\bqwq\b/i,
2230
+ // Qwen QwQ
2231
+ /\bn1[-_]?/i,
2232
+ // nex-n1 等
2233
+ // 通用思考关键词(较低优先级)
2234
+ /think/i
2235
+ // 包含 think
2236
+ ];
2237
+ var DIRECT_ANSWER_PATTERNS = [
2238
+ /[-_]chat$/i,
2239
+ // *-chat (结尾)
2240
+ /[-_]instruct/i,
2241
+ // *-instruct
2242
+ /[-_]turbo/i,
2243
+ // *-turbo
2244
+ /[-_]flash/i,
2245
+ // gemini-flash 等快速模型
2246
+ /[-_]lite[-_v]/i,
2247
+ // lite 版本(但不匹配 lite 结尾,避免误判)
2248
+ /[-_]fast/i
2249
+ // fast 模型
2250
+ ];
2251
+ var PROBLEMATIC_MODEL_PATTERNS = [
2252
+ /nova[-_]?\d*[-_]lite/i
2253
+ // Amazon Nova Lite 系列
2254
+ ];
2255
+ function isProblematicModel(modelId) {
2256
+ return PROBLEMATIC_MODEL_PATTERNS.some((pattern) => pattern.test(modelId));
2257
+ }
2258
+ function detectByModelName(modelId) {
2259
+ const normalizedId = modelId.toLowerCase();
2260
+ const isThinkingPattern = THINKING_MODEL_PATTERNS.some(
2261
+ (pattern) => pattern.test(normalizedId)
2262
+ );
2263
+ const isDirectPattern = DIRECT_ANSWER_PATTERNS.some(
2264
+ (pattern) => pattern.test(normalizedId)
2265
+ );
2266
+ const isProblematic = isProblematicModel(normalizedId);
2267
+ if (isThinkingPattern) {
2268
+ return {
2269
+ behavior: "thinking-first",
2270
+ supportsReasoningConfig: true,
2271
+ recommendedMinTokens: 500,
2272
+ confidence: 0.7,
2273
+ detectedBy: "pattern"
2274
+ };
2275
+ }
2276
+ if (isDirectPattern) {
2277
+ return {
2278
+ behavior: "direct-answer",
2279
+ supportsReasoningConfig: false,
2280
+ // 问题模型需要更多 token
2281
+ recommendedMinTokens: isProblematic ? 300 : 100,
2282
+ confidence: 0.6,
2283
+ detectedBy: "pattern"
2284
+ };
2285
+ }
2286
+ return {
2287
+ behavior: "unknown",
2288
+ supportsReasoningConfig: false,
2289
+ recommendedMinTokens: isProblematic ? 300 : 200,
2290
+ confidence: 0.3,
2291
+ detectedBy: "pattern"
2292
+ };
2293
+ }
2294
+ var modelCharacteristicsCache = /* @__PURE__ */ new Map();
2295
+ function detectByResponse(modelId, result) {
2296
+ const hasReasoning = !!result.reasoning && result.reasoning.length > 0;
2297
+ const hasContent = !!result.content && result.content.trim().length > 0;
2298
+ const reasoningLength = result.reasoning?.length ?? 0;
2299
+ const contentLength = result.content?.length ?? 0;
2300
+ let behavior;
2301
+ let supportsReasoningConfig = false;
2302
+ let recommendedMinTokens = 200;
2303
+ if (hasReasoning && !hasContent) {
2304
+ behavior = "thinking-first";
2305
+ supportsReasoningConfig = true;
2306
+ recommendedMinTokens = Math.max(500, reasoningLength + 200);
2307
+ } else if (hasReasoning && hasContent) {
2308
+ if (reasoningLength > contentLength * 2) {
2309
+ behavior = "thinking-first";
2310
+ supportsReasoningConfig = true;
2311
+ recommendedMinTokens = 500;
2312
+ } else {
2313
+ behavior = "hybrid";
2314
+ supportsReasoningConfig = true;
2315
+ recommendedMinTokens = 300;
2316
+ }
2317
+ } else if (hasContent && !hasReasoning) {
2318
+ behavior = "direct-answer";
2319
+ supportsReasoningConfig = false;
2320
+ recommendedMinTokens = 100;
2321
+ } else {
2322
+ behavior = "unknown";
2323
+ recommendedMinTokens = 500;
2324
+ }
2325
+ const characteristics = {
2326
+ behavior,
2327
+ supportsReasoningConfig,
2328
+ recommendedMinTokens,
2329
+ confidence: 0.9,
2330
+ detectedBy: "runtime"
2331
+ };
2332
+ modelCharacteristicsCache.set(modelId, characteristics);
2333
+ return characteristics;
2334
+ }
2335
+ function getModelCharacteristics(modelId) {
2336
+ const cached = modelCharacteristicsCache.get(modelId);
2337
+ if (cached) {
2338
+ return { ...cached, detectedBy: "cache" };
2339
+ }
2340
+ return detectByModelName(modelId);
2341
+ }
2342
+ var DEFAULT_FALLBACK_CONFIG = {
2343
+ enabled: true,
2344
+ returnReasoningAsContent: true,
2345
+ extractConclusionFromReasoning: true,
2346
+ autoRetryWithMoreTokens: false,
2347
+ // 默认关闭自动重试,避免额外消耗
2348
+ retryTokenIncrement: 300,
2349
+ maxRetries: 2
2350
+ };
2351
+ function extractConclusionFromReasoning(reasoning) {
2352
+ if (!reasoning) return null;
2353
+ const conclusionPatterns = [
2354
+ /(?:therefore|thus|so|hence|finally|in conclusion|the answer is|result is)[:\s]*(.+?)(?:\n|$)/i,
2355
+ /(?:答案是|结论是|因此|所以|最终)[::\s]*(.+?)(?:\n|$)/,
2356
+ /(?:\*\*answer\*\*|\*\*result\*\*)[:\s]*(.+?)(?:\n|$)/i,
2357
+ /=\s*(.+?)(?:\n|$)/
2358
+ // 数学等式结果
2359
+ ];
2360
+ for (const pattern of conclusionPatterns) {
2361
+ const match = reasoning.match(pattern);
2362
+ if (match && match[1]) {
2363
+ return match[1].trim();
2364
+ }
2365
+ }
2366
+ const paragraphs = reasoning.split(/\n\n+/).filter((p) => p.trim());
2367
+ if (paragraphs.length > 0) {
2368
+ const lastParagraph = paragraphs[paragraphs.length - 1].trim();
2369
+ if (lastParagraph.length < 500) {
2370
+ return lastParagraph;
2371
+ }
2372
+ }
2373
+ return null;
2374
+ }
2375
+ function applyFallbackStrategy(result, config = {}) {
2376
+ const finalConfig = { ...DEFAULT_FALLBACK_CONFIG, ...config };
2377
+ if (result.content && result.content.trim().length > 0) {
2378
+ return {
2379
+ content: result.content,
2380
+ didFallback: false,
2381
+ originalReasoning: result.reasoning
2382
+ };
2383
+ }
2384
+ if (!finalConfig.enabled) {
2385
+ return {
2386
+ content: "",
2387
+ didFallback: false,
2388
+ originalReasoning: result.reasoning
2389
+ };
2390
+ }
2391
+ if (finalConfig.extractConclusionFromReasoning && result.reasoning) {
2392
+ const conclusion = extractConclusionFromReasoning(result.reasoning);
2393
+ if (conclusion) {
2394
+ return {
2395
+ content: conclusion,
2396
+ didFallback: true,
2397
+ fallbackReason: "extracted_conclusion_from_reasoning",
2398
+ originalReasoning: result.reasoning
2399
+ };
2400
+ }
2401
+ }
2402
+ if (finalConfig.returnReasoningAsContent && result.reasoning) {
2403
+ return {
2404
+ content: result.reasoning,
2405
+ didFallback: true,
2406
+ fallbackReason: "returned_reasoning_as_content",
2407
+ originalReasoning: result.reasoning
2408
+ };
2409
+ }
2410
+ return {
2411
+ content: "",
2412
+ didFallback: false,
2413
+ fallbackReason: "no_fallback_available",
2414
+ originalReasoning: result.reasoning
2415
+ };
2416
+ }
2417
+ function adjustOptionsForModel(modelId, options) {
2418
+ const characteristics = getModelCharacteristics(modelId);
2419
+ if (characteristics.behavior === "thinking-first" && (!options.maxTokens || options.maxTokens < characteristics.recommendedMinTokens)) {
2420
+ return {
2421
+ ...options,
2422
+ maxTokens: Math.max(
2423
+ options.maxTokens ?? 0,
2424
+ characteristics.recommendedMinTokens
2425
+ )
2426
+ };
2427
+ }
2428
+ return options;
2429
+ }
2430
+ function getRecommendedConfig(modelId, scenario = "simple") {
2431
+ const characteristics = getModelCharacteristics(modelId);
2432
+ if (characteristics.behavior !== "thinking-first") {
2433
+ return {};
2434
+ }
2435
+ const configs = {
2436
+ simple: {
2437
+ maxTokens: 300,
2438
+ reasoning: { effort: "low" }
2439
+ },
2440
+ math: {
2441
+ maxTokens: 600,
2442
+ reasoning: { effort: "high" }
2443
+ },
2444
+ reasoning: {
2445
+ maxTokens: 800,
2446
+ reasoning: { effort: "medium" }
2447
+ },
2448
+ fast: {
2449
+ maxTokens: 200,
2450
+ reasoning: { effort: "off" }
2451
+ }
2452
+ };
2453
+ return configs[scenario] ?? configs.simple;
2454
+ }
2455
+ var ModelDetection = {
2456
+ detectByModelName,
2457
+ detectByResponse,
2458
+ getModelCharacteristics,
2459
+ applyFallbackStrategy,
2460
+ adjustOptionsForModel,
2461
+ getRecommendedConfig,
2462
+ extractConclusionFromReasoning,
2463
+ isProblematicModel,
2464
+ clearCache: () => modelCharacteristicsCache.clear()
2465
+ };
2466
+
2467
+ // src/providers/__base__.ts
2468
+ var BaseProvider = class {
2469
+ /** 降级策略配置 */
2470
+ fallbackConfig = DEFAULT_FALLBACK_CONFIG;
2471
+ /** 是否启用自动参数调整 */
2472
+ autoAdjustEnabled = true;
2473
+ /**
2474
+ * 配置降级策略
2475
+ */
2476
+ configureFallback(config) {
2477
+ this.fallbackConfig = { ...this.fallbackConfig, ...config };
2478
+ return this;
2479
+ }
2480
+ /**
2481
+ * 启用/禁用自动参数调整
2482
+ */
2483
+ setAutoAdjust(enabled) {
2484
+ this.autoAdjustEnabled = enabled;
2485
+ return this;
2486
+ }
2487
+ /**
2488
+ * 获取模型特性信息
2489
+ */
2490
+ getModelCharacteristics(modelId) {
2491
+ return ModelDetection.getModelCharacteristics(modelId);
2492
+ }
2493
+ /**
2494
+ * 智能聊天:自动检测模型特性并应用降级策略
2495
+ */
2496
+ async chatSmart(options) {
2497
+ const adjustedOptions = this.autoAdjustEnabled ? ModelDetection.adjustOptionsForModel(options.model, options) : options;
2498
+ const result = await this.chat(adjustedOptions);
2499
+ ModelDetection.detectByResponse(options.model, result);
2500
+ return result;
2501
+ }
2502
+ /**
2503
+ * 简单对话:单轮问答(默认实现)
2504
+ *
2505
+ * 智能处理思考模型:
2506
+ * 1. 自动检测模型类型
2507
+ * 2. 为思考模型自动调整 maxTokens
2508
+ * 3. 如果 content 为空,智能降级(提取结论或返回 reasoning)
2509
+ */
2510
+ async ask(model, question, options) {
2511
+ const {
2512
+ fallback,
2513
+ autoAdjust = this.autoAdjustEnabled,
2514
+ ...chatOptions
2515
+ } = options ?? {};
2516
+ let finalOptions = {
2517
+ model,
2518
+ messages: [{ role: "user", content: question }],
2519
+ ...chatOptions
2520
+ };
2521
+ if (autoAdjust) {
2522
+ finalOptions = ModelDetection.adjustOptionsForModel(model, finalOptions);
2523
+ }
2524
+ const result = await this.chat(finalOptions);
2525
+ ModelDetection.detectByResponse(model, result);
2526
+ const fallbackResult = ModelDetection.applyFallbackStrategy(result, {
2527
+ ...this.fallbackConfig,
2528
+ ...fallback
2529
+ });
2530
+ return fallbackResult.content;
2531
+ }
2532
+ /**
2533
+ * 带系统提示的对话(默认实现)
2534
+ *
2535
+ * 智能处理思考模型:
2536
+ * 1. 自动检测模型类型
2537
+ * 2. 为思考模型自动调整 maxTokens
2538
+ * 3. 如果 content 为空,智能降级(提取结论或返回 reasoning)
2539
+ */
2540
+ async askWithSystem(model, systemPrompt, userMessage, options) {
2541
+ const {
2542
+ fallback,
2543
+ autoAdjust = this.autoAdjustEnabled,
2544
+ ...chatOptions
2545
+ } = options ?? {};
2546
+ let finalOptions = {
2547
+ model,
2548
+ messages: [
2549
+ { role: "system", content: systemPrompt },
2550
+ { role: "user", content: userMessage }
2551
+ ],
2552
+ ...chatOptions
2553
+ };
2554
+ if (autoAdjust) {
2555
+ finalOptions = ModelDetection.adjustOptionsForModel(model, finalOptions);
2556
+ }
2557
+ const result = await this.chat(finalOptions);
2558
+ ModelDetection.detectByResponse(model, result);
2559
+ const fallbackResult = ModelDetection.applyFallbackStrategy(result, {
2560
+ ...this.fallbackConfig,
2561
+ ...fallback
2562
+ });
2563
+ return fallbackResult.content;
2564
+ }
2565
+ /**
2566
+ * 场景化问答:根据场景自动配置参数
2567
+ *
2568
+ * @param model 模型 ID
2569
+ * @param question 问题
2570
+ * @param scenario 场景类型
2571
+ * - 'simple': 简单问答(默认)
2572
+ * - 'math': 数学计算
2573
+ * - 'reasoning': 逻辑推理
2574
+ * - 'fast': 快速回答(关闭思考)
2575
+ */
2576
+ async askWithScenario(model, question, scenario = "simple", options) {
2577
+ const recommendedConfig = ModelDetection.getRecommendedConfig(
2578
+ model,
2579
+ scenario
2580
+ );
2581
+ return this.ask(model, question, {
2582
+ ...recommendedConfig,
2583
+ ...options
2584
+ });
2585
+ }
1009
2586
  };
2587
+
2588
+ // src/providers/__index__.ts
2589
+ init_types();
2590
+
2591
+ // src/providers/openrouter.ts
2592
+ init_http_provider_client();
2593
+ init_stream_processor();
2594
+ var OpenRouterProvider = class extends BaseProvider {
2595
+ name = "openrouter";
2596
+ adapter;
2597
+ client;
2598
+ baseUrl;
2599
+ apiKey;
2600
+ constructor(apiKey, baseUrl) {
2601
+ super();
2602
+ this.apiKey = apiKey;
2603
+ this.adapter = new OpenRouterAdapter();
2604
+ this.baseUrl = baseUrl ?? this.adapter.defaultBaseUrl;
2605
+ this.client = new HttpProviderClient({
2606
+ apiKey,
2607
+ baseUrl: this.baseUrl
2608
+ });
2609
+ }
2610
+ /**
2611
+ * 发送聊天请求(非流式)
2612
+ */
2613
+ async chat(options) {
2614
+ const body = this.adapter.buildChatRequest(options, false);
2615
+ const endpoint = this.adapter.getEndpointUrl(this.baseUrl);
2616
+ const endpointPath = endpoint.replace(this.baseUrl, "");
2617
+ const response = await this.client.chat(endpointPath, body);
2618
+ return this.adapter.parseChatResponse(response, options.model);
2619
+ }
2620
+ /**
2621
+ * 发送流式聊天请求
2622
+ */
2623
+ async *chatStream(options) {
2624
+ const body = this.adapter.buildChatRequest(options, true);
2625
+ const endpoint = this.adapter.getEndpointUrl(this.baseUrl);
2626
+ const endpointPath = endpoint.replace(this.baseUrl, "");
2627
+ const response = await this.client.chatStream(endpointPath, body);
2628
+ yield* StreamProcessor.processStream(
2629
+ response,
2630
+ (delta) => this.adapter.extractStreamChunk(delta)
2631
+ );
2632
+ }
2633
+ /**
2634
+ * 获取可用模型列表
2635
+ * 注意:此方法直接调用 OpenRouter API,不使用适配器
2636
+ */
2637
+ async listModels() {
2638
+ const response = await fetch(`${this.baseUrl}/models`, {
2639
+ headers: {
2640
+ Authorization: `Bearer ${this.apiKey}`,
2641
+ "Content-Type": "application/json"
2642
+ }
2643
+ });
2644
+ if (!response.ok) {
2645
+ throw new Error(
2646
+ `Failed to fetch models: ${response.status} ${response.statusText}`
2647
+ );
2648
+ }
2649
+ const result = await response.json();
2650
+ return (result.data ?? []).map((m) => ({
2651
+ id: m.id,
2652
+ canonicalSlug: m.canonical_slug ?? m.id,
2653
+ name: m.name,
2654
+ description: m.description ?? "",
2655
+ created: m.created ?? 0,
2656
+ pricing: {
2657
+ prompt: m.pricing?.prompt ?? "0",
2658
+ completion: m.pricing?.completion ?? "0",
2659
+ request: m.pricing?.request ?? "0",
2660
+ image: m.pricing?.image ?? "0"
2661
+ },
2662
+ contextLength: m.context_length ?? 0,
2663
+ architecture: {
2664
+ modality: m.architecture?.modality ?? "",
2665
+ inputModalities: m.architecture?.input_modalities ?? [],
2666
+ outputModalities: m.architecture?.output_modalities ?? [],
2667
+ tokenizer: m.architecture?.tokenizer ?? "",
2668
+ instructType: m.architecture?.instruct_type ?? ""
2669
+ },
2670
+ supportedParameters: m.supported_parameters ?? []
2671
+ }));
2672
+ }
2673
+ };
2674
+
2675
+ // src/providers/modelscope.ts
2676
+ init_http_provider_client();
2677
+ init_stream_processor();
2678
+ var ModelScopeProvider = class extends BaseProvider {
2679
+ name = "modelscope";
2680
+ adapter;
2681
+ client;
2682
+ baseUrl;
2683
+ constructor(config) {
2684
+ super();
2685
+ this.adapter = new ModelScopeAdapter();
2686
+ if (typeof config === "string") {
2687
+ this.baseUrl = this.adapter.defaultBaseUrl;
2688
+ this.client = new HttpProviderClient({
2689
+ apiKey: config,
2690
+ baseUrl: this.baseUrl
2691
+ });
2692
+ } else {
2693
+ this.baseUrl = config.baseUrl ?? this.adapter.defaultBaseUrl;
2694
+ this.client = new HttpProviderClient({
2695
+ apiKey: config.apiKey,
2696
+ baseUrl: this.baseUrl
2697
+ });
2698
+ }
2699
+ }
2700
+ /**
2701
+ * 发送聊天请求(非流式)
2702
+ */
2703
+ async chat(options) {
2704
+ const body = this.adapter.buildChatRequest(options, false);
2705
+ const endpoint = this.adapter.getEndpointUrl(this.baseUrl);
2706
+ const endpointPath = endpoint.replace(this.baseUrl, "");
2707
+ const response = await this.client.chat(endpointPath, body);
2708
+ return this.adapter.parseChatResponse(response, options.model);
2709
+ }
2710
+ /**
2711
+ * 发送流式聊天请求
2712
+ */
2713
+ async *chatStream(options) {
2714
+ const body = this.adapter.buildChatRequest(options, true);
2715
+ const endpoint = this.adapter.getEndpointUrl(this.baseUrl);
2716
+ const endpointPath = endpoint.replace(this.baseUrl, "");
2717
+ const response = await this.client.chatStream(endpointPath, body);
2718
+ yield* StreamProcessor.processStream(
2719
+ response,
2720
+ (delta) => this.adapter.extractStreamChunk(delta)
2721
+ );
2722
+ }
2723
+ };
2724
+
2725
+ // src/providers/huggingface.ts
2726
+ init_http_provider_client();
2727
+ init_stream_processor();
2728
+ var HuggingFaceProvider = class extends BaseProvider {
2729
+ name = "huggingface";
2730
+ adapter;
2731
+ client;
2732
+ baseUrl;
2733
+ constructor(config) {
2734
+ super();
2735
+ this.adapter = new HuggingFaceAdapter();
2736
+ if (typeof config === "string") {
2737
+ this.baseUrl = this.adapter.defaultBaseUrl;
2738
+ this.client = new HttpProviderClient({
2739
+ apiKey: config,
2740
+ baseUrl: this.baseUrl
2741
+ });
2742
+ } else {
2743
+ this.baseUrl = config.baseUrl ?? this.adapter.defaultBaseUrl;
2744
+ this.client = new HttpProviderClient({
2745
+ apiKey: config.apiKey,
2746
+ baseUrl: this.baseUrl
2747
+ });
2748
+ }
2749
+ }
2750
+ /**
2751
+ * 发送聊天请求(非流式)
2752
+ *
2753
+ * reasoning 参数说明:
2754
+ * - HuggingFace 是模型聚合平台,thinking 支持取决于具体模型
2755
+ * - 如果模型支持,会返回 reasoning_content
2756
+ */
2757
+ async chat(options) {
2758
+ const body = this.adapter.buildChatRequest(options, false);
2759
+ const endpoint = this.adapter.getEndpointUrl(this.baseUrl);
2760
+ const endpointPath = endpoint.replace(this.baseUrl, "");
2761
+ const response = await this.client.chat(endpointPath, body);
2762
+ return this.adapter.parseChatResponse(response, options.model);
2763
+ }
2764
+ /**
2765
+ * 发送流式聊天请求
2766
+ */
2767
+ async *chatStream(options) {
2768
+ const body = this.adapter.buildChatRequest(options, true);
2769
+ const endpoint = this.adapter.getEndpointUrl(this.baseUrl);
2770
+ const endpointPath = endpoint.replace(this.baseUrl, "");
2771
+ const response = await this.client.chatStream(endpointPath, body);
2772
+ yield* StreamProcessor.processStream(
2773
+ response,
2774
+ (delta) => this.adapter.extractStreamChunk(delta)
2775
+ );
2776
+ }
2777
+ };
2778
+
2779
+ // src/providers/groq.ts
2780
+ init_http_provider_client();
2781
+ init_stream_processor();
2782
+ var GroqProvider = class extends BaseProvider {
2783
+ name = "groq";
2784
+ adapter;
2785
+ client;
2786
+ baseUrl;
2787
+ constructor(config) {
2788
+ super();
2789
+ this.adapter = new GroqAdapter();
2790
+ if (typeof config === "string") {
2791
+ this.baseUrl = this.adapter.defaultBaseUrl;
2792
+ this.client = new HttpProviderClient({
2793
+ apiKey: config,
2794
+ baseUrl: this.baseUrl
2795
+ });
2796
+ } else {
2797
+ this.baseUrl = config.baseUrl ?? this.adapter.defaultBaseUrl;
2798
+ this.client = new HttpProviderClient({
2799
+ apiKey: config.apiKey,
2800
+ baseUrl: this.baseUrl
2801
+ });
2802
+ }
2803
+ }
2804
+ /**
2805
+ * 发送聊天请求(非流式)
2806
+ */
2807
+ async chat(options) {
2808
+ const body = this.adapter.buildChatRequest(options, false);
2809
+ const endpoint = this.adapter.getEndpointUrl(this.baseUrl);
2810
+ const endpointPath = endpoint.replace(this.baseUrl, "");
2811
+ const response = await this.client.chat(endpointPath, body);
2812
+ return this.adapter.parseChatResponse(response, options.model);
2813
+ }
2814
+ /**
2815
+ * 发送流式聊天请求
2816
+ */
2817
+ async *chatStream(options) {
2818
+ const body = this.adapter.buildChatRequest(options, true);
2819
+ const endpoint = this.adapter.getEndpointUrl(this.baseUrl);
2820
+ const endpointPath = endpoint.replace(this.baseUrl, "");
2821
+ const response = await this.client.chatStream(endpointPath, body);
2822
+ yield* StreamProcessor.processStream(
2823
+ response,
2824
+ (delta) => this.adapter.extractStreamChunk(delta)
2825
+ );
2826
+ }
2827
+ };
2828
+
2829
+ // src/providers/gemini.ts
2830
+ init_http_provider_client();
2831
+ init_stream_processor();
2832
+ var GeminiProvider = class extends BaseProvider {
2833
+ name = "gemini";
2834
+ adapter;
2835
+ client;
2836
+ baseUrl;
2837
+ constructor(config) {
2838
+ super();
2839
+ this.adapter = new GeminiAdapter();
2840
+ if (typeof config === "string") {
2841
+ this.baseUrl = this.adapter.defaultBaseUrl;
2842
+ this.client = new HttpProviderClient({
2843
+ apiKey: config,
2844
+ baseUrl: this.baseUrl
2845
+ });
2846
+ } else {
2847
+ this.baseUrl = config.baseUrl ?? this.adapter.defaultBaseUrl;
2848
+ this.client = new HttpProviderClient({
2849
+ apiKey: config.apiKey,
2850
+ baseUrl: this.baseUrl
2851
+ });
2852
+ }
2853
+ }
2854
+ /**
2855
+ * 发送聊天请求(非流式)
2856
+ */
2857
+ async chat(options) {
2858
+ const body = this.adapter.buildChatRequest(options, false);
2859
+ const endpoint = this.adapter.getEndpointUrl(this.baseUrl);
2860
+ const endpointPath = endpoint.replace(this.baseUrl, "");
2861
+ const response = await this.client.chat(endpointPath, body);
2862
+ return this.adapter.parseChatResponse(response, options.model);
2863
+ }
2864
+ /**
2865
+ * 发送流式聊天请求
2866
+ */
2867
+ async *chatStream(options) {
2868
+ const body = this.adapter.buildChatRequest(options, true);
2869
+ const endpoint = this.adapter.getEndpointUrl(this.baseUrl);
2870
+ const endpointPath = endpoint.replace(this.baseUrl, "");
2871
+ const response = await this.client.chatStream(endpointPath, body);
2872
+ yield* StreamProcessor.processStream(
2873
+ response,
2874
+ (delta) => this.adapter.extractStreamChunk(delta)
2875
+ );
2876
+ }
2877
+ };
2878
+
2879
+ // src/providers/deepseek.ts
2880
+ init_http_provider_client();
2881
+ init_stream_processor();
2882
+
2883
+ // src/providers/poe.ts
2884
+ init_http_provider_client();
2885
+ init_stream_processor();
2886
+
2887
+ // src/providers/nova.ts
2888
+ init_http_provider_client();
2889
+ init_stream_processor();
2890
+
2891
+ // src/utils/index.ts
2892
+ init_stream_processor();
2893
+ init_request_builder();
2894
+
2895
+ // src/client/index.ts
2896
+ init_types2();
2897
+ init_http_provider_client();
2898
+
2899
+ // src/fluent/errors.ts
2900
+ var ConfigurationError = class _ConfigurationError extends Error {
2901
+ constructor(message) {
2902
+ super(message);
2903
+ this.name = "ConfigurationError";
2904
+ Object.setPrototypeOf(this, _ConfigurationError.prototype);
2905
+ }
2906
+ };
2907
+ var ValidationError = class _ValidationError extends Error {
2908
+ constructor(message) {
2909
+ super(message);
2910
+ this.name = "ValidationError";
2911
+ Object.setPrototypeOf(this, _ValidationError.prototype);
2912
+ }
2913
+ };
2914
+
2915
+ // src/fluent/builder.ts
2916
+ var OiiaiBuilderImpl = class _OiiaiBuilderImpl {
2917
+ /** 内部配置状态 */
2918
+ config = {};
2919
+ /**
2920
+ * 创建构建器实例
2921
+ * @param initialConfig - 可选的初始配置
2922
+ */
2923
+ constructor(initialConfig) {
2924
+ if (initialConfig) {
2925
+ this.config = { ...initialConfig };
2926
+ }
2927
+ }
2928
+ /**
2929
+ * 选择服务提供商
2930
+ * @param provider - Provider 类型
2931
+ * @returns this 支持链式调用
2932
+ */
2933
+ use(provider) {
2934
+ if (!ProviderRegistry.hasAdapter(provider)) {
2935
+ const supported = ProviderRegistry.listSupported();
2936
+ throw new ValidationError(
2937
+ `\u4E0D\u652F\u6301\u7684 Provider: ${provider}\uFF0C\u652F\u6301\u7684 Provider: ${supported.join(", ")}`
2938
+ );
2939
+ }
2940
+ this.config.provider = provider;
2941
+ return this;
2942
+ }
2943
+ /**
2944
+ * 指定模型
2945
+ * @param modelId - 模型 ID
2946
+ * @returns this 支持链式调用
2947
+ */
2948
+ model(modelId) {
2949
+ this.config.model = modelId;
2950
+ return this;
2951
+ }
2952
+ /**
2953
+ * 设置系统提示词
2954
+ * @param prompt - 系统提示词
2955
+ * @returns this 支持链式调用
2956
+ */
2957
+ system(prompt) {
2958
+ this.config.system = prompt;
2959
+ return this;
2960
+ }
2961
+ /**
2962
+ * 设置温度参数
2963
+ * @param value - 温度值 (0-2)
2964
+ * @returns this 支持链式调用
2965
+ */
2966
+ temperature(value) {
2967
+ if (value < 0 || value > 2) {
2968
+ throw new ValidationError("temperature \u5FC5\u987B\u5728 0-2 \u4E4B\u95F4");
2969
+ }
2970
+ this.config.temperature = value;
2971
+ return this;
2972
+ }
2973
+ /**
2974
+ * 设置最大输出 token 数
2975
+ * @param value - token 数量
2976
+ * @returns this 支持链式调用
2977
+ */
2978
+ maxTokens(value) {
2979
+ this.config.maxTokens = value;
2980
+ return this;
2981
+ }
2982
+ /**
2983
+ * 配置思考/推理模式
2984
+ * @param config - 推理配置
2985
+ * @returns this 支持链式调用
2986
+ */
2987
+ reasoning(config) {
2988
+ this.config.reasoning = config;
2989
+ return this;
2990
+ }
2991
+ /**
2992
+ * 设置 API Key
2993
+ * @param apiKey - API Key
2994
+ * @returns this 支持链式调用
2995
+ */
2996
+ key(apiKey) {
2997
+ this.config.apiKey = apiKey;
2998
+ return this;
2999
+ }
3000
+ /**
3001
+ * 设置基础 URL
3002
+ * @param url - 基础 URL
3003
+ * @returns this 支持链式调用
3004
+ */
3005
+ baseUrl(url) {
3006
+ this.config.baseUrl = url;
3007
+ return this;
3008
+ }
3009
+ /**
3010
+ * 标记为流式请求
3011
+ * 调用后 ask() 将返回 AsyncGenerator
3012
+ * @returns StreamBuilder 类型
3013
+ */
3014
+ stream() {
3015
+ this.config.isStream = true;
3016
+ return this;
3017
+ }
3018
+ ask(question) {
3019
+ this.validateConfig();
3020
+ const adapter = ProviderRegistry.getAdapter(this.config.provider);
3021
+ const client = adapter.createClient({
3022
+ apiKey: this.config.apiKey,
3023
+ baseUrl: this.config.baseUrl ?? adapter.defaultBaseUrl
3024
+ });
3025
+ const messages = [];
3026
+ if (this.config.system) {
3027
+ messages.push({ role: "system", content: this.config.system });
3028
+ }
3029
+ messages.push({ role: "user", content: question });
3030
+ const chatOptions = {
3031
+ model: this.config.model,
3032
+ messages,
3033
+ temperature: this.config.temperature,
3034
+ maxTokens: this.config.maxTokens,
3035
+ reasoning: this.config.reasoning
3036
+ };
3037
+ if (this.config.isStream) {
3038
+ return this.executeStreamRequest(adapter, client, chatOptions);
3039
+ }
3040
+ return this.executeNonStreamRequest(adapter, client, chatOptions);
3041
+ }
3042
+ /**
3043
+ * 执行非流式请求
3044
+ * @param adapter - Provider 适配器
3045
+ * @param client - Provider 客户端
3046
+ * @param chatOptions - 聊天选项
3047
+ * @returns 响应内容
3048
+ */
3049
+ async executeNonStreamRequest(adapter, client, chatOptions) {
3050
+ const baseUrl = this.config.baseUrl ?? adapter.defaultBaseUrl;
3051
+ const endpoint = adapter.getEndpointUrl(baseUrl);
3052
+ const endpointPath = endpoint.replace(baseUrl, "");
3053
+ const body = adapter.buildChatRequest(chatOptions, false);
3054
+ const response = await client.chat(endpointPath, body);
3055
+ const result = adapter.parseChatResponse(response, this.config.model);
3056
+ return result.content;
3057
+ }
3058
+ /**
3059
+ * 执行流式请求
3060
+ * @param adapter - Provider 适配器
3061
+ * @param client - Provider 客户端
3062
+ * @param chatOptions - 聊天选项
3063
+ * @returns 流式数据块生成器
3064
+ */
3065
+ async *executeStreamRequest(adapter, client, chatOptions) {
3066
+ const baseUrl = this.config.baseUrl ?? adapter.defaultBaseUrl;
3067
+ const endpoint = adapter.getEndpointUrl(baseUrl);
3068
+ const endpointPath = endpoint.replace(baseUrl, "");
3069
+ const body = adapter.buildChatRequest(chatOptions, true);
3070
+ const response = await client.chatStream(endpointPath, body);
3071
+ const { StreamProcessor: StreamProcessor2 } = (init_stream_processor(), __toCommonJS(stream_processor_exports));
3072
+ yield* StreamProcessor2.processStream(
3073
+ response,
3074
+ (delta) => adapter.extractStreamChunk(delta)
3075
+ );
3076
+ }
3077
+ /**
3078
+ * 验证配置是否完整
3079
+ * @throws ConfigurationError 如果缺少必需参数
3080
+ */
3081
+ validateConfig() {
3082
+ if (!this.config.provider) {
3083
+ throw new ConfigurationError("\u8BF7\u5148\u8C03\u7528 use(provider) \u9009\u62E9\u670D\u52A1\u63D0\u4F9B\u5546");
3084
+ }
3085
+ if (!this.config.model) {
3086
+ throw new ConfigurationError("\u8BF7\u5148\u8C03\u7528 model(modelId) \u6307\u5B9A\u6A21\u578B");
3087
+ }
3088
+ if (!this.config.apiKey) {
3089
+ throw new ConfigurationError(
3090
+ "\u8BF7\u5148\u914D\u7F6E API Key\uFF1A\u8C03\u7528 key(apiKey) \u6216\u901A\u8FC7\u9884\u8BBE\u5B9E\u4F8B\u914D\u7F6E"
3091
+ );
3092
+ }
3093
+ }
3094
+ /**
3095
+ * 获取当前配置(用于测试)
3096
+ * @returns 当前配置的副本
3097
+ */
3098
+ getConfig() {
3099
+ return { ...this.config };
3100
+ }
3101
+ /**
3102
+ * 克隆构建器(用于创建新实例)
3103
+ * @returns 新的构建器实例
3104
+ */
3105
+ clone() {
3106
+ return new _OiiaiBuilderImpl({ ...this.config });
3107
+ }
3108
+ };
3109
+ function createBuilder(initialConfig) {
3110
+ return new OiiaiBuilderImpl(initialConfig);
3111
+ }
3112
+ var oiiai = new Proxy({}, {
3113
+ get(_target, prop) {
3114
+ const builder = new OiiaiBuilderImpl();
3115
+ const value = builder[prop];
3116
+ if (typeof value === "function") {
3117
+ return value.bind(builder);
3118
+ }
3119
+ return value;
3120
+ }
3121
+ });
3122
+
3123
+ // src/fluent/preset-provider.ts
3124
+ var PresetProviderImpl = class _PresetProviderImpl {
3125
+ /** Provider 名称 */
3126
+ name;
3127
+ /** 内部配置状态 */
3128
+ config = {};
3129
+ /** 环境变量名称映射 */
3130
+ static ENV_KEY_MAP = {
3131
+ deepseek: "DEEPSEEK_API_KEY",
3132
+ openrouter: "OPENROUTER_API_KEY",
3133
+ gemini: "GEMINI_API_KEY",
3134
+ groq: "GROQ_API_KEY",
3135
+ huggingface: "HUGGINGFACE_API_KEY",
3136
+ modelscope: "MODELSCOPE_API_KEY",
3137
+ poe: "POE_API_KEY",
3138
+ nova: "NOVA_API_KEY"
3139
+ };
3140
+ /**
3141
+ * 创建预设实例
3142
+ * @param providerType - Provider 类型
3143
+ */
3144
+ constructor(providerType) {
3145
+ if (!ProviderRegistry.hasAdapter(providerType)) {
3146
+ const supported = ProviderRegistry.listSupported();
3147
+ throw new ValidationError(
3148
+ `\u4E0D\u652F\u6301\u7684 Provider: ${providerType}\uFF0C\u652F\u6301\u7684 Provider: ${supported.join(", ")}`
3149
+ );
3150
+ }
3151
+ this.name = providerType;
3152
+ }
3153
+ /**
3154
+ * 配置 API Key 和其他选项
3155
+ * @param options - 配置选项
3156
+ * @returns this 支持链式调用
3157
+ */
3158
+ configure(options) {
3159
+ this.config.apiKey = options.apiKey;
3160
+ if (options.baseUrl) {
3161
+ this.config.baseUrl = options.baseUrl;
3162
+ }
3163
+ return this;
3164
+ }
3165
+ /**
3166
+ * 从环境变量读取配置
3167
+ * 环境变量名格式: {PROVIDER}_API_KEY (如 DEEPSEEK_API_KEY)
3168
+ * @returns this 支持链式调用
3169
+ */
3170
+ fromEnv() {
3171
+ const envKey = _PresetProviderImpl.ENV_KEY_MAP[this.name];
3172
+ const apiKey = process.env[envKey];
3173
+ if (!apiKey) {
3174
+ throw new ConfigurationError(`\u73AF\u5883\u53D8\u91CF ${envKey} \u672A\u8BBE\u7F6E`);
3175
+ }
3176
+ this.config.apiKey = apiKey;
3177
+ return this;
3178
+ }
3179
+ /**
3180
+ * 简单问答(非流式)
3181
+ * @param model - 模型 ID
3182
+ * @param question - 问题
3183
+ * @param options - 可选配置
3184
+ * @returns 响应内容
3185
+ */
3186
+ async ask(model, question, options) {
3187
+ this.validateApiKey();
3188
+ const builder = this.createConfiguredBuilder(model, options);
3189
+ const result = builder.ask(question);
3190
+ return result;
3191
+ }
3192
+ /**
3193
+ * 流式问答
3194
+ * @param model - 模型 ID
3195
+ * @param question - 问题
3196
+ * @param options - 可选配置
3197
+ * @returns 流式数据块生成器
3198
+ */
3199
+ async *stream(model, question, options) {
3200
+ this.validateApiKey();
3201
+ const builder = this.createConfiguredBuilder(model, options);
3202
+ const streamBuilder = builder.stream();
3203
+ yield* streamBuilder.ask(question);
3204
+ }
3205
+ /**
3206
+ * 带回调的流式问答
3207
+ * @param model - 模型 ID
3208
+ * @param question - 问题
3209
+ * @param callbacks - 回调函数
3210
+ * @returns Promise,完成时 resolve
3211
+ */
3212
+ async streamWithCallbacks(model, question, callbacks) {
3213
+ this.validateApiKey();
3214
+ let reasoningContent = "";
3215
+ let contentText = "";
3216
+ const builder = this.createConfiguredBuilder(model);
3217
+ const streamBuilder = builder.stream();
3218
+ for await (const chunk of streamBuilder.ask(question)) {
3219
+ if (chunk.type === "reasoning") {
3220
+ reasoningContent += chunk.text;
3221
+ callbacks.onReasoning?.(chunk.text);
3222
+ } else if (chunk.type === "content") {
3223
+ contentText += chunk.text;
3224
+ callbacks.onContent?.(chunk.text);
3225
+ }
3226
+ }
3227
+ callbacks.onDone?.({
3228
+ reasoning: reasoningContent,
3229
+ content: contentText
3230
+ });
3231
+ }
3232
+ /**
3233
+ * 获取构建器(预配置 provider 和 model)
3234
+ * @param model - 模型 ID
3235
+ * @returns 预配置的构建器
3236
+ */
3237
+ builder(model) {
3238
+ this.validateApiKey();
3239
+ const adapter = ProviderRegistry.getAdapter(this.name);
3240
+ const builder = new OiiaiBuilderImpl({
3241
+ provider: this.name,
3242
+ model,
3243
+ apiKey: this.config.apiKey,
3244
+ baseUrl: this.config.baseUrl ?? adapter.defaultBaseUrl
3245
+ });
3246
+ return builder;
3247
+ }
3248
+ /**
3249
+ * 创建多轮对话会话
3250
+ * @param model - 模型 ID
3251
+ * @param options - 会话配置
3252
+ * @returns 对话会话实例
3253
+ */
3254
+ chat(model, options) {
3255
+ this.validateApiKey();
3256
+ const { ChatSessionImpl: ChatSessionImpl2 } = (init_chat_session(), __toCommonJS(chat_session_exports));
3257
+ return new ChatSessionImpl2(this, model, options);
3258
+ }
3259
+ /**
3260
+ * 验证 API Key 是否已配置
3261
+ * @throws ConfigurationError 如果未配置 API Key
3262
+ */
3263
+ validateApiKey() {
3264
+ if (!this.config.apiKey) {
3265
+ throw new ConfigurationError(
3266
+ `\u8BF7\u5148\u914D\u7F6E API Key\uFF1A\u8C03\u7528 configure({ apiKey: 'xxx' }) \u6216 fromEnv()`
3267
+ );
3268
+ }
3269
+ }
3270
+ /**
3271
+ * 创建已配置的构建器
3272
+ * @param model - 模型 ID
3273
+ * @param options - 可选配置
3274
+ * @returns 配置好的构建器
3275
+ */
3276
+ createConfiguredBuilder(model, options) {
3277
+ const adapter = ProviderRegistry.getAdapter(this.name);
3278
+ const builder = new OiiaiBuilderImpl({
3279
+ provider: this.name,
3280
+ model,
3281
+ apiKey: this.config.apiKey,
3282
+ baseUrl: this.config.baseUrl ?? adapter.defaultBaseUrl
3283
+ });
3284
+ if (options?.system) {
3285
+ builder.system(options.system);
3286
+ }
3287
+ if (options?.temperature !== void 0) {
3288
+ builder.temperature(options.temperature);
3289
+ }
3290
+ if (options?.maxTokens !== void 0) {
3291
+ builder.maxTokens(options.maxTokens);
3292
+ }
3293
+ if (options?.reasoning) {
3294
+ builder.reasoning(options.reasoning);
3295
+ }
3296
+ return builder;
3297
+ }
3298
+ /**
3299
+ * 获取当前配置(用于测试)
3300
+ * @returns 当前配置的副本
3301
+ */
3302
+ getConfig() {
3303
+ return { ...this.config };
3304
+ }
3305
+ };
3306
+
3307
+ // src/fluent/index.ts
3308
+ init_chat_session();
3309
+
3310
+ // src/fluent/preset-instances.ts
3311
+ var deepseek = new PresetProviderImpl("deepseek");
3312
+ var openrouter = new PresetProviderImpl("openrouter");
3313
+ var gemini = new PresetProviderImpl("gemini");
3314
+ var groq = new PresetProviderImpl("groq");
3315
+ var huggingface = new PresetProviderImpl(
3316
+ "huggingface"
3317
+ );
3318
+ var modelscope = new PresetProviderImpl("modelscope");
3319
+ var poe = new PresetProviderImpl("poe");
3320
+ var nova = new PresetProviderImpl("nova");
1010
3321
  export {
3322
+ APIError,
3323
+ BaseAdapter,
1011
3324
  BaseProvider,
3325
+ CONFIG_DEFAULTS,
3326
+ ConfigManager,
3327
+ ConfigValidator,
3328
+ ConfigurationError,
3329
+ DeepSeekAdapter,
1012
3330
  EFFORT_TOKEN_MAP,
3331
+ ValidationError as FluentValidationError,
3332
+ GeminiAdapter,
1013
3333
  GeminiProvider,
3334
+ GroqAdapter,
1014
3335
  GroqProvider,
3336
+ HttpProviderClient,
3337
+ HuggingFaceAdapter,
1015
3338
  HuggingFaceProvider,
3339
+ ModelScopeAdapter,
1016
3340
  ModelScopeProvider,
3341
+ NetworkError,
3342
+ NovaAdapter,
3343
+ OpenRouterAdapter,
1017
3344
  OpenRouterProvider,
3345
+ PoeAdapter,
3346
+ ProviderError,
3347
+ ProviderRegistry,
3348
+ RegistryError,
3349
+ RequestBuilder,
3350
+ StreamProcessor,
3351
+ TimeoutError,
3352
+ VALID_PROVIDERS,
1018
3353
  ai,
1019
- createProvider
3354
+ createBuilder,
3355
+ createBuiltInAdapters,
3356
+ createProvider,
3357
+ deepseek,
3358
+ gemini,
3359
+ groq,
3360
+ huggingface,
3361
+ modelscope,
3362
+ nova,
3363
+ oiiai,
3364
+ openrouter,
3365
+ poe
1020
3366
  };
1021
3367
  //# sourceMappingURL=index.mjs.map