@weisiren000/oiiai 0.1.2 → 0.1.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.js CHANGED
@@ -21,10 +21,20 @@ var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: tru
21
21
  var index_exports = {};
22
22
  __export(index_exports, {
23
23
  BaseProvider: () => BaseProvider,
24
- OpenRouterProvider: () => OpenRouterProvider
24
+ EFFORT_TOKEN_MAP: () => EFFORT_TOKEN_MAP,
25
+ GeminiProvider: () => GeminiProvider,
26
+ GroqProvider: () => GroqProvider,
27
+ HuggingFaceProvider: () => HuggingFaceProvider,
28
+ ModelScopeProvider: () => ModelScopeProvider,
29
+ OpenRouterProvider: () => OpenRouterProvider,
30
+ ai: () => ai,
31
+ createProvider: () => createProvider
25
32
  });
26
33
  module.exports = __toCommonJS(index_exports);
27
34
 
35
+ // src/providers/openrouter.ts
36
+ var import_sdk = require("@openrouter/sdk");
37
+
28
38
  // src/providers/__base__.ts
29
39
  var BaseProvider = class {
30
40
  /**
@@ -56,8 +66,15 @@ var BaseProvider = class {
56
66
  }
57
67
  };
58
68
 
69
+ // src/providers/__types__.ts
70
+ var EFFORT_TOKEN_MAP = {
71
+ off: 0,
72
+ low: 1024,
73
+ medium: 4096,
74
+ high: 16384
75
+ };
76
+
59
77
  // src/providers/openrouter.ts
60
- var import_sdk = require("@openrouter/sdk");
61
78
  function extractTextContent(content) {
62
79
  if (typeof content === "string") {
63
80
  return content;
@@ -71,19 +88,19 @@ function extractTextContent(content) {
71
88
  }
72
89
  function buildReasoningParam(config) {
73
90
  if (!config) return void 0;
91
+ if (config.effort === "off") return void 0;
74
92
  const param = {};
75
- if (config.effort !== void 0) {
93
+ if (config.effort) {
76
94
  param.effort = config.effort;
77
95
  }
78
- if (config.maxTokens !== void 0) {
79
- param.max_tokens = config.maxTokens;
96
+ if (config.budgetTokens !== void 0) {
97
+ param.max_tokens = config.budgetTokens;
98
+ } else if (config.effort && EFFORT_TOKEN_MAP[config.effort]) {
99
+ param.max_tokens = EFFORT_TOKEN_MAP[config.effort];
80
100
  }
81
101
  if (config.exclude !== void 0) {
82
102
  param.exclude = config.exclude;
83
103
  }
84
- if (config.enabled !== void 0) {
85
- param.enabled = config.enabled;
86
- }
87
104
  return Object.keys(param).length > 0 ? param : void 0;
88
105
  }
89
106
  var OpenRouterProvider = class extends BaseProvider {
@@ -97,7 +114,13 @@ var OpenRouterProvider = class extends BaseProvider {
97
114
  * 发送聊天请求(非流式)
98
115
  */
99
116
  async chat(options) {
100
- const { model, messages, temperature = 0.7, maxTokens, reasoning } = options;
117
+ const {
118
+ model,
119
+ messages,
120
+ temperature = 0.7,
121
+ maxTokens,
122
+ reasoning
123
+ } = options;
101
124
  const reasoningParam = buildReasoningParam(reasoning);
102
125
  const requestParams = {
103
126
  model,
@@ -132,7 +155,13 @@ var OpenRouterProvider = class extends BaseProvider {
132
155
  * 发送流式聊天请求
133
156
  */
134
157
  async *chatStream(options) {
135
- const { model, messages, temperature = 0.7, maxTokens, reasoning } = options;
158
+ const {
159
+ model,
160
+ messages,
161
+ temperature = 0.7,
162
+ maxTokens,
163
+ reasoning
164
+ } = options;
136
165
  const reasoningParam = buildReasoningParam(reasoning);
137
166
  const requestParams = {
138
167
  model,
@@ -144,7 +173,9 @@ var OpenRouterProvider = class extends BaseProvider {
144
173
  if (reasoningParam) {
145
174
  requestParams.reasoning = reasoningParam;
146
175
  }
147
- const stream = await this.client.chat.send(requestParams);
176
+ const stream = await this.client.chat.send(
177
+ requestParams
178
+ );
148
179
  for await (const chunk of stream) {
149
180
  const delta = chunk.choices?.[0]?.delta;
150
181
  if (!delta) continue;
@@ -186,9 +217,840 @@ var OpenRouterProvider = class extends BaseProvider {
186
217
  }));
187
218
  }
188
219
  };
220
+
221
+ // src/providers/gemini.ts
222
+ var BASE_URL = "https://generativelanguage.googleapis.com/v1beta/openai";
223
+ function extractTextContent2(content) {
224
+ if (typeof content === "string") {
225
+ return content;
226
+ }
227
+ if (Array.isArray(content)) {
228
+ return content.filter(
229
+ (item) => typeof item === "object" && item !== null && item.type === "text" && typeof item.text === "string"
230
+ ).map((item) => item.text).join("");
231
+ }
232
+ return "";
233
+ }
234
+ var GeminiProvider = class extends BaseProvider {
235
+ name = "gemini";
236
+ apiKey;
237
+ baseUrl;
238
+ constructor(config) {
239
+ super();
240
+ if (typeof config === "string") {
241
+ this.apiKey = config;
242
+ this.baseUrl = BASE_URL;
243
+ } else {
244
+ this.apiKey = config.apiKey;
245
+ this.baseUrl = config.baseUrl ?? BASE_URL;
246
+ }
247
+ }
248
+ /**
249
+ * 发送聊天请求(非流式)
250
+ */
251
+ async chat(options) {
252
+ const {
253
+ model,
254
+ messages,
255
+ temperature = 0.7,
256
+ maxTokens,
257
+ reasoning
258
+ } = options;
259
+ const body = {
260
+ model,
261
+ messages,
262
+ temperature,
263
+ stream: false
264
+ };
265
+ if (maxTokens) {
266
+ body.max_tokens = maxTokens;
267
+ }
268
+ if (reasoning?.effort && reasoning.effort !== "off") {
269
+ body.reasoning_effort = reasoning.effort;
270
+ }
271
+ const response = await fetch(`${this.baseUrl}/chat/completions`, {
272
+ method: "POST",
273
+ headers: {
274
+ "Content-Type": "application/json",
275
+ Authorization: `Bearer ${this.apiKey}`
276
+ },
277
+ body: JSON.stringify(body)
278
+ });
279
+ if (!response.ok) {
280
+ const error = await response.text();
281
+ throw new Error(`Gemini API error: ${response.status} ${error}`);
282
+ }
283
+ const result = await response.json();
284
+ const choice = result.choices?.[0];
285
+ if (!choice) {
286
+ throw new Error("No response from model");
287
+ }
288
+ const msg = choice.message;
289
+ const reasoningContent = msg?.reasoning_content ?? null;
290
+ return {
291
+ content: extractTextContent2(msg?.content),
292
+ reasoning: reasoningContent ? extractTextContent2(reasoningContent) : null,
293
+ model: result.model ?? model,
294
+ usage: {
295
+ promptTokens: result.usage?.prompt_tokens ?? 0,
296
+ completionTokens: result.usage?.completion_tokens ?? 0,
297
+ totalTokens: result.usage?.total_tokens ?? 0
298
+ },
299
+ finishReason: choice.finish_reason ?? null
300
+ };
301
+ }
302
+ /**
303
+ * 发送流式聊天请求
304
+ */
305
+ async *chatStream(options) {
306
+ const {
307
+ model,
308
+ messages,
309
+ temperature = 0.7,
310
+ maxTokens,
311
+ reasoning
312
+ } = options;
313
+ const body = {
314
+ model,
315
+ messages,
316
+ temperature,
317
+ stream: true
318
+ };
319
+ if (maxTokens) {
320
+ body.max_tokens = maxTokens;
321
+ }
322
+ if (reasoning?.effort && reasoning.effort !== "off") {
323
+ body.reasoning_effort = reasoning.effort;
324
+ }
325
+ const response = await fetch(`${this.baseUrl}/chat/completions`, {
326
+ method: "POST",
327
+ headers: {
328
+ "Content-Type": "application/json",
329
+ Authorization: `Bearer ${this.apiKey}`
330
+ },
331
+ body: JSON.stringify(body)
332
+ });
333
+ if (!response.ok) {
334
+ const error = await response.text();
335
+ throw new Error(`Gemini API error: ${response.status} ${error}`);
336
+ }
337
+ const reader = response.body?.getReader();
338
+ if (!reader) {
339
+ throw new Error("No response body");
340
+ }
341
+ const decoder = new TextDecoder();
342
+ let buffer = "";
343
+ try {
344
+ while (true) {
345
+ const { done, value } = await reader.read();
346
+ if (done) break;
347
+ buffer += decoder.decode(value, { stream: true });
348
+ const lines = buffer.split("\n");
349
+ buffer = lines.pop() ?? "";
350
+ for (const line of lines) {
351
+ const trimmed = line.trim();
352
+ if (!trimmed || trimmed === "data: [DONE]") continue;
353
+ if (!trimmed.startsWith("data: ")) continue;
354
+ try {
355
+ const data = JSON.parse(trimmed.slice(6));
356
+ const delta = data.choices?.[0]?.delta;
357
+ if (!delta) continue;
358
+ const thought = delta.reasoning_content ?? delta.thoughts;
359
+ if (thought) {
360
+ yield {
361
+ type: "reasoning",
362
+ text: extractTextContent2(thought)
363
+ };
364
+ }
365
+ if (delta.content) {
366
+ yield {
367
+ type: "content",
368
+ text: extractTextContent2(delta.content)
369
+ };
370
+ }
371
+ } catch {
372
+ }
373
+ }
374
+ }
375
+ } finally {
376
+ reader.releaseLock();
377
+ }
378
+ }
379
+ };
380
+
381
+ // src/providers/groq.ts
382
+ var BASE_URL2 = "https://api.groq.com/openai/v1";
383
+ function extractTextContent3(content) {
384
+ if (typeof content === "string") {
385
+ return content;
386
+ }
387
+ if (Array.isArray(content)) {
388
+ return content.filter(
389
+ (item) => typeof item === "object" && item !== null && item.type === "text" && typeof item.text === "string"
390
+ ).map((item) => item.text).join("");
391
+ }
392
+ return "";
393
+ }
394
+ var GroqProvider = class extends BaseProvider {
395
+ name = "groq";
396
+ apiKey;
397
+ baseUrl;
398
+ constructor(config) {
399
+ super();
400
+ if (typeof config === "string") {
401
+ this.apiKey = config;
402
+ this.baseUrl = BASE_URL2;
403
+ } else {
404
+ this.apiKey = config.apiKey;
405
+ this.baseUrl = config.baseUrl ?? BASE_URL2;
406
+ }
407
+ }
408
+ /**
409
+ * 发送聊天请求(非流式)
410
+ */
411
+ async chat(options) {
412
+ const { model, messages, temperature = 1, maxTokens, reasoning } = options;
413
+ const body = {
414
+ model,
415
+ messages,
416
+ temperature,
417
+ stream: false,
418
+ top_p: 1
419
+ };
420
+ if (maxTokens) {
421
+ body.max_completion_tokens = maxTokens;
422
+ }
423
+ if (reasoning?.effort && reasoning.effort !== "off") {
424
+ body.reasoning_format = "parsed";
425
+ } else if (reasoning?.effort === "off") {
426
+ body.include_reasoning = false;
427
+ }
428
+ const response = await fetch(`${this.baseUrl}/chat/completions`, {
429
+ method: "POST",
430
+ headers: {
431
+ "Content-Type": "application/json",
432
+ Authorization: `Bearer ${this.apiKey}`
433
+ },
434
+ body: JSON.stringify(body)
435
+ });
436
+ if (!response.ok) {
437
+ const error = await response.text();
438
+ throw new Error(`Groq API error: ${response.status} ${error}`);
439
+ }
440
+ const result = await response.json();
441
+ const choice = result.choices?.[0];
442
+ if (!choice) {
443
+ throw new Error("No response from model");
444
+ }
445
+ const msg = choice.message;
446
+ const reasoningContent = msg?.reasoning_content ?? msg?.reasoning ?? null;
447
+ return {
448
+ content: extractTextContent3(msg?.content),
449
+ reasoning: reasoningContent ? extractTextContent3(reasoningContent) : null,
450
+ model: result.model ?? model,
451
+ usage: {
452
+ promptTokens: result.usage?.prompt_tokens ?? 0,
453
+ completionTokens: result.usage?.completion_tokens ?? 0,
454
+ totalTokens: result.usage?.total_tokens ?? 0
455
+ },
456
+ finishReason: choice.finish_reason ?? null
457
+ };
458
+ }
459
+ /**
460
+ * 发送流式聊天请求
461
+ */
462
+ async *chatStream(options) {
463
+ const { model, messages, temperature = 1, maxTokens, reasoning } = options;
464
+ const body = {
465
+ model,
466
+ messages,
467
+ temperature,
468
+ stream: true,
469
+ top_p: 1
470
+ };
471
+ if (maxTokens) {
472
+ body.max_completion_tokens = maxTokens;
473
+ }
474
+ if (reasoning?.effort && reasoning.effort !== "off") {
475
+ body.reasoning_format = "parsed";
476
+ } else if (reasoning?.effort === "off") {
477
+ body.include_reasoning = false;
478
+ }
479
+ const response = await fetch(`${this.baseUrl}/chat/completions`, {
480
+ method: "POST",
481
+ headers: {
482
+ "Content-Type": "application/json",
483
+ Authorization: `Bearer ${this.apiKey}`
484
+ },
485
+ body: JSON.stringify(body)
486
+ });
487
+ if (!response.ok) {
488
+ const error = await response.text();
489
+ throw new Error(`Groq API error: ${response.status} ${error}`);
490
+ }
491
+ const reader = response.body?.getReader();
492
+ if (!reader) {
493
+ throw new Error("No response body");
494
+ }
495
+ const decoder = new TextDecoder();
496
+ let buffer = "";
497
+ try {
498
+ while (true) {
499
+ const { done, value } = await reader.read();
500
+ if (done) break;
501
+ buffer += decoder.decode(value, { stream: true });
502
+ const lines = buffer.split("\n");
503
+ buffer = lines.pop() ?? "";
504
+ for (const line of lines) {
505
+ const trimmed = line.trim();
506
+ if (!trimmed || trimmed === "data: [DONE]") continue;
507
+ if (!trimmed.startsWith("data: ")) continue;
508
+ try {
509
+ const data = JSON.parse(trimmed.slice(6));
510
+ const delta = data.choices?.[0]?.delta;
511
+ if (!delta) continue;
512
+ const reasoningContent = delta.reasoning_content ?? delta.reasoning;
513
+ if (reasoningContent) {
514
+ yield {
515
+ type: "reasoning",
516
+ text: extractTextContent3(reasoningContent)
517
+ };
518
+ }
519
+ if (delta.content) {
520
+ yield {
521
+ type: "content",
522
+ text: extractTextContent3(delta.content)
523
+ };
524
+ }
525
+ } catch {
526
+ }
527
+ }
528
+ }
529
+ } finally {
530
+ reader.releaseLock();
531
+ }
532
+ }
533
+ };
534
+
535
+ // src/providers/huggingface.ts
536
+ var BASE_URL3 = "https://router.huggingface.co/v1";
537
+ function extractTextContent4(content) {
538
+ if (typeof content === "string") {
539
+ return content;
540
+ }
541
+ if (Array.isArray(content)) {
542
+ return content.filter(
543
+ (item) => typeof item === "object" && item !== null && item.type === "text" && typeof item.text === "string"
544
+ ).map((item) => item.text).join("");
545
+ }
546
+ return "";
547
+ }
548
+ var HuggingFaceProvider = class extends BaseProvider {
549
+ name = "huggingface";
550
+ apiKey;
551
+ baseUrl;
552
+ constructor(config) {
553
+ super();
554
+ if (typeof config === "string") {
555
+ this.apiKey = config;
556
+ this.baseUrl = BASE_URL3;
557
+ } else {
558
+ this.apiKey = config.apiKey;
559
+ this.baseUrl = config.baseUrl ?? BASE_URL3;
560
+ }
561
+ }
562
+ /**
563
+ * 发送聊天请求(非流式)
564
+ */
565
+ async chat(options) {
566
+ const { model, messages, temperature = 0.7, maxTokens } = options;
567
+ const body = {
568
+ model,
569
+ messages,
570
+ temperature,
571
+ stream: false
572
+ };
573
+ if (maxTokens) {
574
+ body.max_tokens = maxTokens;
575
+ }
576
+ const response = await fetch(`${this.baseUrl}/chat/completions`, {
577
+ method: "POST",
578
+ headers: {
579
+ "Content-Type": "application/json",
580
+ Authorization: `Bearer ${this.apiKey}`
581
+ },
582
+ body: JSON.stringify(body)
583
+ });
584
+ if (!response.ok) {
585
+ const error = await response.text();
586
+ throw new Error(`HuggingFace API error: ${response.status} ${error}`);
587
+ }
588
+ const result = await response.json();
589
+ const choice = result.choices?.[0];
590
+ if (!choice) {
591
+ throw new Error("No response from model");
592
+ }
593
+ const msg = choice.message;
594
+ const reasoningContent = msg?.reasoning_content ?? null;
595
+ return {
596
+ content: extractTextContent4(msg?.content),
597
+ reasoning: reasoningContent ? extractTextContent4(reasoningContent) : null,
598
+ model: result.model ?? model,
599
+ usage: {
600
+ promptTokens: result.usage?.prompt_tokens ?? 0,
601
+ completionTokens: result.usage?.completion_tokens ?? 0,
602
+ totalTokens: result.usage?.total_tokens ?? 0
603
+ },
604
+ finishReason: choice.finish_reason ?? null
605
+ };
606
+ }
607
+ /**
608
+ * 发送流式聊天请求
609
+ */
610
+ async *chatStream(options) {
611
+ const { model, messages, temperature = 0.7, maxTokens } = options;
612
+ const body = {
613
+ model,
614
+ messages,
615
+ temperature,
616
+ stream: true
617
+ };
618
+ if (maxTokens) {
619
+ body.max_tokens = maxTokens;
620
+ }
621
+ const response = await fetch(`${this.baseUrl}/chat/completions`, {
622
+ method: "POST",
623
+ headers: {
624
+ "Content-Type": "application/json",
625
+ Authorization: `Bearer ${this.apiKey}`
626
+ },
627
+ body: JSON.stringify(body)
628
+ });
629
+ if (!response.ok) {
630
+ const error = await response.text();
631
+ throw new Error(`HuggingFace API error: ${response.status} ${error}`);
632
+ }
633
+ const reader = response.body?.getReader();
634
+ if (!reader) {
635
+ throw new Error("No response body");
636
+ }
637
+ const decoder = new TextDecoder();
638
+ let buffer = "";
639
+ try {
640
+ while (true) {
641
+ const { done, value } = await reader.read();
642
+ if (done) break;
643
+ buffer += decoder.decode(value, { stream: true });
644
+ const lines = buffer.split("\n");
645
+ buffer = lines.pop() ?? "";
646
+ for (const line of lines) {
647
+ const trimmed = line.trim();
648
+ if (!trimmed || trimmed === "data: [DONE]") continue;
649
+ if (!trimmed.startsWith("data: ")) continue;
650
+ try {
651
+ const data = JSON.parse(trimmed.slice(6));
652
+ const delta = data.choices?.[0]?.delta;
653
+ if (!delta) continue;
654
+ if (delta.reasoning_content) {
655
+ yield {
656
+ type: "reasoning",
657
+ text: extractTextContent4(delta.reasoning_content)
658
+ };
659
+ }
660
+ if (delta.content) {
661
+ yield {
662
+ type: "content",
663
+ text: extractTextContent4(delta.content)
664
+ };
665
+ }
666
+ } catch {
667
+ }
668
+ }
669
+ }
670
+ } finally {
671
+ reader.releaseLock();
672
+ }
673
+ }
674
+ };
675
+
676
+ // src/providers/modelscope.ts
677
+ var BASE_URL4 = "https://api-inference.modelscope.cn/v1";
678
+ function extractTextContent5(content) {
679
+ if (typeof content === "string") {
680
+ return content;
681
+ }
682
+ if (Array.isArray(content)) {
683
+ return content.filter(
684
+ (item) => typeof item === "object" && item !== null && item.type === "text" && typeof item.text === "string"
685
+ ).map((item) => item.text).join("");
686
+ }
687
+ return "";
688
+ }
689
+ var ModelScopeProvider = class extends BaseProvider {
690
+ name = "modelscope";
691
+ apiKey;
692
+ baseUrl;
693
+ constructor(config) {
694
+ super();
695
+ if (typeof config === "string") {
696
+ this.apiKey = config;
697
+ this.baseUrl = BASE_URL4;
698
+ } else {
699
+ this.apiKey = config.apiKey;
700
+ this.baseUrl = config.baseUrl ?? BASE_URL4;
701
+ }
702
+ }
703
+ /**
704
+ * 发送聊天请求(非流式)
705
+ */
706
+ async chat(options) {
707
+ const {
708
+ model,
709
+ messages,
710
+ temperature = 0.7,
711
+ maxTokens,
712
+ reasoning
713
+ } = options;
714
+ const body = {
715
+ model,
716
+ messages,
717
+ temperature,
718
+ stream: false
719
+ };
720
+ if (maxTokens) {
721
+ body.max_tokens = maxTokens;
722
+ }
723
+ if (reasoning?.effort) {
724
+ if (reasoning.effort === "off") {
725
+ body.enable_thinking = false;
726
+ } else {
727
+ body.enable_thinking = true;
728
+ }
729
+ }
730
+ const response = await fetch(`${this.baseUrl}/chat/completions`, {
731
+ method: "POST",
732
+ headers: {
733
+ "Content-Type": "application/json",
734
+ Authorization: `Bearer ${this.apiKey}`
735
+ },
736
+ body: JSON.stringify(body)
737
+ });
738
+ if (!response.ok) {
739
+ const error = await response.text();
740
+ throw new Error(`ModelScope API error: ${response.status} ${error}`);
741
+ }
742
+ const result = await response.json();
743
+ const choice = result.choices?.[0];
744
+ if (!choice) {
745
+ throw new Error("No response from model");
746
+ }
747
+ const msg = choice.message;
748
+ const reasoningContent = msg?.reasoning_content ?? null;
749
+ return {
750
+ content: extractTextContent5(msg?.content),
751
+ reasoning: reasoningContent ? extractTextContent5(reasoningContent) : null,
752
+ model: result.model ?? model,
753
+ usage: {
754
+ promptTokens: result.usage?.prompt_tokens ?? 0,
755
+ completionTokens: result.usage?.completion_tokens ?? 0,
756
+ totalTokens: result.usage?.total_tokens ?? 0
757
+ },
758
+ finishReason: choice.finish_reason ?? null
759
+ };
760
+ }
761
+ /**
762
+ * 发送流式聊天请求
763
+ */
764
+ async *chatStream(options) {
765
+ const {
766
+ model,
767
+ messages,
768
+ temperature = 0.7,
769
+ maxTokens,
770
+ reasoning
771
+ } = options;
772
+ const body = {
773
+ model,
774
+ messages,
775
+ temperature,
776
+ stream: true
777
+ };
778
+ if (maxTokens) {
779
+ body.max_tokens = maxTokens;
780
+ }
781
+ if (reasoning?.effort) {
782
+ if (reasoning.effort === "off") {
783
+ body.enable_thinking = false;
784
+ } else {
785
+ body.enable_thinking = true;
786
+ }
787
+ }
788
+ const response = await fetch(`${this.baseUrl}/chat/completions`, {
789
+ method: "POST",
790
+ headers: {
791
+ "Content-Type": "application/json",
792
+ Authorization: `Bearer ${this.apiKey}`
793
+ },
794
+ body: JSON.stringify(body)
795
+ });
796
+ if (!response.ok) {
797
+ const error = await response.text();
798
+ throw new Error(`ModelScope API error: ${response.status} ${error}`);
799
+ }
800
+ const reader = response.body?.getReader();
801
+ if (!reader) {
802
+ throw new Error("No response body");
803
+ }
804
+ const decoder = new TextDecoder();
805
+ let buffer = "";
806
+ try {
807
+ while (true) {
808
+ const { done, value } = await reader.read();
809
+ if (done) break;
810
+ buffer += decoder.decode(value, { stream: true });
811
+ const lines = buffer.split("\n");
812
+ buffer = lines.pop() ?? "";
813
+ for (const line of lines) {
814
+ const trimmed = line.trim();
815
+ if (!trimmed || trimmed === "data: [DONE]") continue;
816
+ if (!trimmed.startsWith("data: ")) continue;
817
+ try {
818
+ const data = JSON.parse(trimmed.slice(6));
819
+ const delta = data.choices?.[0]?.delta;
820
+ if (!delta) continue;
821
+ if (delta.reasoning_content) {
822
+ yield {
823
+ type: "reasoning",
824
+ text: extractTextContent5(delta.reasoning_content)
825
+ };
826
+ }
827
+ if (delta.content) {
828
+ yield {
829
+ type: "content",
830
+ text: extractTextContent5(delta.content)
831
+ };
832
+ }
833
+ } catch {
834
+ }
835
+ }
836
+ }
837
+ } finally {
838
+ reader.releaseLock();
839
+ }
840
+ }
841
+ };
842
+
843
+ // src/providers/deepseek.ts
844
+ var BASE_URL5 = "https://api.deepseek.com";
845
+ function getReasoningMaxTokens(reasoning, userMaxTokens) {
846
+ if (!reasoning || reasoning.effort === "off") {
847
+ return userMaxTokens;
848
+ }
849
+ if (reasoning.budgetTokens !== void 0) {
850
+ return reasoning.budgetTokens;
851
+ }
852
+ if (reasoning.effort) {
853
+ return EFFORT_TOKEN_MAP[reasoning.effort];
854
+ }
855
+ return userMaxTokens;
856
+ }
857
+ function extractTextContent6(content) {
858
+ if (typeof content === "string") {
859
+ return content;
860
+ }
861
+ if (Array.isArray(content)) {
862
+ return content.filter(
863
+ (item) => typeof item === "object" && item !== null && item.type === "text" && typeof item.text === "string"
864
+ ).map((item) => item.text).join("");
865
+ }
866
+ return "";
867
+ }
868
+ var DeepSeekProvider = class extends BaseProvider {
869
+ name = "deepseek";
870
+ apiKey;
871
+ baseUrl;
872
+ constructor(config) {
873
+ super();
874
+ if (typeof config === "string") {
875
+ this.apiKey = config;
876
+ this.baseUrl = BASE_URL5;
877
+ } else {
878
+ this.apiKey = config.apiKey;
879
+ this.baseUrl = config.baseUrl ?? BASE_URL5;
880
+ }
881
+ }
882
+ /**
883
+ * 发送聊天请求(非流式)
884
+ */
885
+ async chat(options) {
886
+ const {
887
+ model,
888
+ messages,
889
+ temperature = 0.7,
890
+ maxTokens,
891
+ reasoning
892
+ } = options;
893
+ const effectiveMaxTokens = getReasoningMaxTokens(reasoning, maxTokens);
894
+ const body = {
895
+ model,
896
+ messages,
897
+ temperature,
898
+ stream: false
899
+ };
900
+ if (effectiveMaxTokens) {
901
+ body.max_tokens = effectiveMaxTokens;
902
+ }
903
+ if (reasoning?.effort && reasoning.effort !== "off") {
904
+ body.thinking = { type: "enabled" };
905
+ }
906
+ const response = await fetch(`${this.baseUrl}/chat/completions`, {
907
+ method: "POST",
908
+ headers: {
909
+ "Content-Type": "application/json",
910
+ Authorization: `Bearer ${this.apiKey}`
911
+ },
912
+ body: JSON.stringify(body)
913
+ });
914
+ if (!response.ok) {
915
+ const error = await response.text();
916
+ throw new Error(`DeepSeek API error: ${response.status} ${error}`);
917
+ }
918
+ const result = await response.json();
919
+ const choice = result.choices?.[0];
920
+ if (!choice) {
921
+ throw new Error("No response from model");
922
+ }
923
+ const msg = choice.message;
924
+ const reasoningContent = msg?.reasoning_content ?? null;
925
+ return {
926
+ content: extractTextContent6(msg?.content),
927
+ reasoning: reasoningContent ? extractTextContent6(reasoningContent) : null,
928
+ model: result.model ?? model,
929
+ usage: {
930
+ promptTokens: result.usage?.prompt_tokens ?? 0,
931
+ completionTokens: result.usage?.completion_tokens ?? 0,
932
+ totalTokens: result.usage?.total_tokens ?? 0
933
+ },
934
+ finishReason: choice.finish_reason ?? null
935
+ };
936
+ }
937
+ /**
938
+ * 发送流式聊天请求
939
+ */
940
+ async *chatStream(options) {
941
+ const {
942
+ model,
943
+ messages,
944
+ temperature = 0.7,
945
+ maxTokens,
946
+ reasoning
947
+ } = options;
948
+ const effectiveMaxTokens = getReasoningMaxTokens(reasoning, maxTokens);
949
+ const body = {
950
+ model,
951
+ messages,
952
+ temperature,
953
+ stream: true
954
+ };
955
+ if (effectiveMaxTokens) {
956
+ body.max_tokens = effectiveMaxTokens;
957
+ }
958
+ if (reasoning?.effort && reasoning.effort !== "off") {
959
+ body.thinking = { type: "enabled" };
960
+ }
961
+ const response = await fetch(`${this.baseUrl}/chat/completions`, {
962
+ method: "POST",
963
+ headers: {
964
+ "Content-Type": "application/json",
965
+ Authorization: `Bearer ${this.apiKey}`
966
+ },
967
+ body: JSON.stringify(body)
968
+ });
969
+ if (!response.ok) {
970
+ const error = await response.text();
971
+ throw new Error(`DeepSeek API error: ${response.status} ${error}`);
972
+ }
973
+ const reader = response.body?.getReader();
974
+ if (!reader) {
975
+ throw new Error("No response body");
976
+ }
977
+ const decoder = new TextDecoder();
978
+ let buffer = "";
979
+ try {
980
+ while (true) {
981
+ const { done, value } = await reader.read();
982
+ if (done) break;
983
+ buffer += decoder.decode(value, { stream: true });
984
+ const lines = buffer.split("\n");
985
+ buffer = lines.pop() ?? "";
986
+ for (const line of lines) {
987
+ const trimmed = line.trim();
988
+ if (!trimmed || trimmed === "data: [DONE]") continue;
989
+ if (!trimmed.startsWith("data: ")) continue;
990
+ try {
991
+ const data = JSON.parse(trimmed.slice(6));
992
+ const delta = data.choices?.[0]?.delta;
993
+ if (!delta) continue;
994
+ if (delta.reasoning_content) {
995
+ yield {
996
+ type: "reasoning",
997
+ text: extractTextContent6(delta.reasoning_content)
998
+ };
999
+ }
1000
+ if (delta.content) {
1001
+ yield {
1002
+ type: "content",
1003
+ text: extractTextContent6(delta.content)
1004
+ };
1005
+ }
1006
+ } catch {
1007
+ }
1008
+ }
1009
+ }
1010
+ } finally {
1011
+ reader.releaseLock();
1012
+ }
1013
+ }
1014
+ };
1015
+
1016
+ // src/providers/__factory__.ts
1017
+ function createProvider(config) {
1018
+ const { provider, apiKey, baseUrl } = config;
1019
+ switch (provider) {
1020
+ case "openrouter":
1021
+ return new OpenRouterProvider(apiKey);
1022
+ case "gemini":
1023
+ return new GeminiProvider(baseUrl ? { apiKey, baseUrl } : apiKey);
1024
+ case "groq":
1025
+ return new GroqProvider(baseUrl ? { apiKey, baseUrl } : apiKey);
1026
+ case "huggingface":
1027
+ return new HuggingFaceProvider(baseUrl ? { apiKey, baseUrl } : apiKey);
1028
+ case "modelscope":
1029
+ return new ModelScopeProvider(baseUrl ? { apiKey, baseUrl } : apiKey);
1030
+ case "deepseek":
1031
+ return new DeepSeekProvider(baseUrl ? { apiKey, baseUrl } : apiKey);
1032
+ default:
1033
+ throw new Error(`Unknown provider: ${provider}`);
1034
+ }
1035
+ }
1036
+ var ai = {
1037
+ openrouter: (apiKey, baseUrl) => createProvider({ provider: "openrouter", apiKey, baseUrl }),
1038
+ gemini: (apiKey, baseUrl) => createProvider({ provider: "gemini", apiKey, baseUrl }),
1039
+ groq: (apiKey, baseUrl) => createProvider({ provider: "groq", apiKey, baseUrl }),
1040
+ huggingface: (apiKey, baseUrl) => createProvider({ provider: "huggingface", apiKey, baseUrl }),
1041
+ modelscope: (apiKey, baseUrl) => createProvider({ provider: "modelscope", apiKey, baseUrl }),
1042
+ deepseek: (apiKey, baseUrl) => createProvider({ provider: "deepseek", apiKey, baseUrl })
1043
+ };
189
1044
  // Annotate the CommonJS export names for ESM import in node:
190
1045
  0 && (module.exports = {
191
1046
  BaseProvider,
192
- OpenRouterProvider
1047
+ EFFORT_TOKEN_MAP,
1048
+ GeminiProvider,
1049
+ GroqProvider,
1050
+ HuggingFaceProvider,
1051
+ ModelScopeProvider,
1052
+ OpenRouterProvider,
1053
+ ai,
1054
+ createProvider
193
1055
  });
194
1056
  //# sourceMappingURL=index.js.map