ai-world-sdk 1.0.3 → 1.0.6

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -62,8 +62,9 @@ describe("Langchain SDK Tests", () => {
62
62
  });
63
63
  test("ChatGoogleGenerativeAI - 非流式调用", async () => {
64
64
  const gemini = new index_1.ChatGoogleGenerativeAI({
65
- modelName: "gemini-2.0-flash-exp-image-generation",
65
+ modelName: "gemini-2.5-flash-image",
66
66
  temperature: 0.7,
67
+ provider: "gemini",
67
68
  });
68
69
  const response = await gemini.invoke([
69
70
  new index_1.HumanMessage("你好,请用一句话介绍人工智能。"),
@@ -78,6 +79,7 @@ describe("Langchain SDK Tests", () => {
78
79
  const gemini = new index_1.ChatGoogleGenerativeAI({
79
80
  modelName: "gemini-2.0-flash-exp-image-generation",
80
81
  temperature: 0.7,
82
+ provider: "gemini",
81
83
  });
82
84
  let fullText = "";
83
85
  for await (const chunk of gemini.stream([
@@ -95,6 +97,7 @@ describe("Langchain SDK Tests", () => {
95
97
  test("createChatModel 工厂函数", async () => {
96
98
  const model = (0, index_1.createChatModel)("gemini-2.0-flash-exp-image-generation", {
97
99
  temperature: 0.7,
100
+ provider: "gemini",
98
101
  });
99
102
  const response = await model.invoke([
100
103
  new index_1.SystemMessage("You are a helpful assistant."),
@@ -110,6 +113,7 @@ describe("Langchain SDK Tests", () => {
110
113
  const gemini = new index_1.ChatGoogleGenerativeAI({
111
114
  modelName: "gemini-2.0-flash-exp-image-generation",
112
115
  temperature: 0.5,
116
+ provider: "gemini",
113
117
  });
114
118
  // 使用 bind 方法绑定新的参数
115
119
  const boundModel = gemini.bind({ temperature: 0.9 });
@@ -125,6 +129,7 @@ describe("Langchain SDK Tests", () => {
125
129
  const gemini = new index_1.ChatGoogleGenerativeAI({
126
130
  modelName: "gemini-2.0-flash-exp-image-generation",
127
131
  temperature: 0.7,
132
+ provider: "gemini",
128
133
  });
129
134
  // 批量调用
130
135
  const responses = await gemini.batch([
@@ -142,13 +147,26 @@ describe("Langchain SDK Tests", () => {
142
147
  console.log("✅ batch 方法测试成功");
143
148
  console.log(`批量响应数量: ${responses.length}`);
144
149
  }, 30000);
150
+ // 添加openai模型测试
151
+ test("ChatOpenAI - 基础测试", async () => {
152
+ const openai = new index_1.ChatOpenAI({
153
+ modelName: "gpt-4o-mini",
154
+ temperature: 0.7,
155
+ provider: "aihubmix",
156
+ });
157
+ const response = await openai.invoke([
158
+ new index_1.HumanMessage("你好,请介绍一下你自己"),
159
+ ]);
160
+ expect(response).toBeDefined();
161
+ expect(response.content).toBeDefined();
162
+ console.log("✅ ChatOpenAI 基础测试成功");
163
+ console.log("AI:", response.content);
164
+ }, 30000);
145
165
  test("ChatGoogleGenerativeAI - aihubmix.com no stream", async () => {
146
166
  const gemini = new index_1.ChatGoogleGenerativeAI({
147
- modelName: "gemini-2.0-flash-exp-image-generation",
167
+ modelName: "gemini-2.5-flash-image",
148
168
  temperature: 0.7,
149
- headers: {
150
- "X-Base-Url": "https://aihubmix.com",
151
- },
169
+ provider: "aihubmix",
152
170
  });
153
171
  const response = await gemini.invoke([
154
172
  new index_1.HumanMessage("aihubmix.com是做什么的"),
@@ -161,11 +179,9 @@ describe("Langchain SDK Tests", () => {
161
179
  }, 30000);
162
180
  test("ChatGoogleGenerativeAI - aihubmix.com stream", async () => {
163
181
  const gemini = new index_1.ChatGoogleGenerativeAI({
164
- modelName: "gemini-2.0-flash-exp-image-generation",
182
+ modelName: "gemini-2.5-flash-image",
165
183
  temperature: 0.7,
166
- headers: {
167
- "X-Base-Url": "https://aihubmix.com",
168
- },
184
+ provider: "aihubmix",
169
185
  });
170
186
  let fullText = "";
171
187
  for await (const chunk of gemini.stream([
@@ -184,6 +200,7 @@ describe("Langchain SDK Tests", () => {
184
200
  const gemini = new index_1.ChatGoogleGenerativeAI({
185
201
  modelName: "gemini-3-pro-image-preview",
186
202
  temperature: 0.7,
203
+ provider: "gemini",
187
204
  });
188
205
  const response = await gemini.invoke([
189
206
  new index_1.HumanMessage("请生成一张展示人工智能概念的图片"),
@@ -215,43 +232,7 @@ describe("Langchain SDK Tests", () => {
215
232
  const gemini = new index_1.ChatGoogleGenerativeAI({
216
233
  modelName: "gemini-3-pro-image-preview",
217
234
  temperature: 0.7,
218
- headers: {
219
- "X-Base-Url": "https://aihubmix.com",
220
- },
221
- });
222
- const response = await gemini.invoke([
223
- new index_1.HumanMessage("请生成一张展示人工智能概念的图片"),
224
- ]);
225
- expect(response).toBeDefined();
226
- expect(response.content).toBeDefined();
227
- // 图像生成响应可能包含文本和图像数据
228
- // 检查响应内容(可能是字符串、数组或包含图像的对象)
229
- const content = response.content;
230
- if (typeof content === "string") {
231
- console.log("✅ aihubmix.com 图像生成测试成功(文本响应)");
232
- console.log("AI:", content);
233
- }
234
- else if (Array.isArray(content)) {
235
- // 多模态响应,可能包含图像
236
- const hasImage = content.some((item) => item?.type === "image_url" ||
237
- item?.image_url ||
238
- (typeof item === "string" && item.startsWith("data:image")));
239
- console.log("✅ aihubmix.com 图像生成测试成功(多模态响应)");
240
- console.log("响应类型: 数组,包含图像:", hasImage);
241
- console.log("响应内容:", JSON.stringify(content, null, 2));
242
- }
243
- else {
244
- console.log("✅ aihubmix.com 图像生成测试成功(其他格式)");
245
- console.log("响应内容:", content);
246
- }
247
- }, 60000);
248
- test("ChatGoogleGenerativeAI - seedream 图像生成", async () => {
249
- const gemini = new index_1.ChatGoogleGenerativeAI({
250
- modelName: "doubao-seedream-4-5-251128",
251
- temperature: 0.7,
252
- headers: {
253
- "X-Base-Url": "https://ark.cn-beijing.volces.com/api/v3/images/generations",
254
- },
235
+ provider: "aihubmix",
255
236
  });
256
237
  const response = await gemini.invoke([
257
238
  new index_1.HumanMessage("请生成一张展示人工智能概念的图片"),
@@ -447,4 +428,462 @@ describe("Langchain SDK Tests", () => {
447
428
  console.log("✅ 使用 content 参数创建任务成功");
448
429
  console.log("任务 ID:", task.id);
449
430
  }, 30000);
431
+ // ========== 新增测试用例 ==========
432
+ test("ChatAnthropic - 基础测试", async () => {
433
+ const claude = new index_1.ChatAnthropic({
434
+ modelName: "claude-3-sonnet-20240229",
435
+ temperature: 0.7,
436
+ provider: "aihubmix",
437
+ });
438
+ const response = await claude.invoke([
439
+ new index_1.HumanMessage("你好,请用一句话介绍你自己"),
440
+ ]);
441
+ expect(response).toBeDefined();
442
+ expect(response.content).toBeDefined();
443
+ expect(typeof response.content).toBe("string");
444
+ console.log("✅ ChatAnthropic 基础测试成功");
445
+ console.log("AI:", response.content);
446
+ }, 30000);
447
+ test("ChatAnthropic - 流式调用", async () => {
448
+ const claude = new index_1.ChatAnthropic({
449
+ modelName: "claude-3-sonnet-20240229",
450
+ temperature: 0.7,
451
+ provider: "aihubmix",
452
+ });
453
+ let fullText = "";
454
+ for await (const chunk of claude.stream([
455
+ new index_1.HumanMessage("请用一句话介绍人工智能,然后解释它的重要性。"),
456
+ ])) {
457
+ fullText += extractTextFromChunk(chunk);
458
+ }
459
+ expect(fullText.length).toBeGreaterThan(0);
460
+ console.log("✅ ChatAnthropic 流式测试成功");
461
+ console.log(`完整回复长度: ${fullText.length} 字符`);
462
+ }, 30000);
463
+ test("createChatModel - Claude 模型", async () => {
464
+ const model = (0, index_1.createChatModel)("claude-3-sonnet-20240229", {
465
+ temperature: 0.7,
466
+ provider: "aihubmix",
467
+ });
468
+ expect(model).toBeInstanceOf(index_1.ChatAnthropic);
469
+ const response = await model.invoke([
470
+ new index_1.HumanMessage("What is the capital of France?"),
471
+ ]);
472
+ expect(response).toBeDefined();
473
+ expect(response.content).toBeDefined();
474
+ console.log("✅ createChatModel Claude 测试成功");
475
+ console.log("AI:", response.content);
476
+ }, 30000);
477
+ test("createChatModel - Doubao 模型", async () => {
478
+ const model = (0, index_1.createChatModel)("doubao-seedream-4-5-251128", {
479
+ temperature: 0.7,
480
+ provider: "doubao",
481
+ });
482
+ expect(model).toBeInstanceOf(index_1.ChatOpenAI);
483
+ const response = await model.invoke([
484
+ new index_1.HumanMessage("你好"),
485
+ ]);
486
+ expect(response).toBeDefined();
487
+ expect(response.content).toBeDefined();
488
+ console.log("✅ createChatModel Doubao 测试成功");
489
+ console.log("AI:", response.content);
490
+ }, 30000);
491
+ test("createChatModel - GPT 模型", async () => {
492
+ const model = (0, index_1.createChatModel)("gpt-4o-mini", {
493
+ temperature: 0.7,
494
+ provider: "aihubmix",
495
+ });
496
+ expect(model).toBeInstanceOf(index_1.ChatOpenAI);
497
+ const response = await model.invoke([
498
+ new index_1.HumanMessage("Hello"),
499
+ ]);
500
+ expect(response).toBeDefined();
501
+ expect(response.content).toBeDefined();
502
+ console.log("✅ createChatModel GPT 测试成功");
503
+ console.log("AI:", response.content);
504
+ }, 30000);
505
+ test("BaseChatModel - getModelName 方法", () => {
506
+ const gemini = new index_1.ChatGoogleGenerativeAI({
507
+ modelName: "gemini-2.0-flash-exp-image-generation",
508
+ temperature: 0.7,
509
+ provider: "gemini",
510
+ });
511
+ expect(gemini.getModelName()).toBe("gemini-2.0-flash-exp-image-generation");
512
+ console.log("✅ getModelName 方法测试成功");
513
+ });
514
+ test("BaseChatModel - bindTools 方法", async () => {
515
+ const gemini = new index_1.ChatGoogleGenerativeAI({
516
+ modelName: "gemini-2.0-flash-exp-image-generation",
517
+ temperature: 0.7,
518
+ provider: "gemini",
519
+ });
520
+ const tools = [
521
+ {
522
+ type: "function",
523
+ function: {
524
+ name: "get_weather",
525
+ description: "Get the current weather in a location",
526
+ parameters: {
527
+ type: "object",
528
+ properties: {
529
+ location: {
530
+ type: "string",
531
+ description: "The city and state, e.g. San Francisco, CA",
532
+ },
533
+ },
534
+ required: ["location"],
535
+ },
536
+ },
537
+ },
538
+ ];
539
+ const modelWithTools = gemini.bindTools(tools);
540
+ const response = await modelWithTools.invoke([
541
+ new index_1.HumanMessage("What is the weather in Paris?"),
542
+ ]);
543
+ expect(response).toBeDefined();
544
+ expect(response.content).toBeDefined();
545
+ console.log("✅ bindTools 方法测试成功");
546
+ console.log("AI:", response.content);
547
+ }, 30000);
548
+ test("BaseChatModel - 多模态输入(文本+图像)", async () => {
549
+ const gemini = new index_1.ChatGoogleGenerativeAI({
550
+ modelName: "gemini-2.0-flash-exp-image-generation",
551
+ temperature: 0.7,
552
+ provider: "gemini",
553
+ });
554
+ const response = await gemini.invoke([
555
+ new index_1.HumanMessage([
556
+ { type: "text", text: "Describe this image" },
557
+ {
558
+ type: "image_url",
559
+ image_url: "data:image/jpeg;base64,/9j/4AAQSkZJRg...",
560
+ },
561
+ ]),
562
+ ]);
563
+ expect(response).toBeDefined();
564
+ expect(response.content).toBeDefined();
565
+ console.log("✅ 多模态输入测试成功");
566
+ console.log("AI:", response.content);
567
+ }, 30000);
568
+ test("消息类型 - SystemMessage", () => {
569
+ const message = new index_1.SystemMessage("You are a helpful assistant.");
570
+ expect(message.role).toBe("system");
571
+ expect(message.content).toBe("You are a helpful assistant.");
572
+ console.log("✅ SystemMessage 测试成功");
573
+ });
574
+ test("消息类型 - AIMessage", () => {
575
+ const message = new index_1.AIMessage("Hello! How can I help you?");
576
+ expect(message.role).toBe("assistant");
577
+ expect(message.content).toBe("Hello! How can I help you?");
578
+ console.log("✅ AIMessage 测试成功");
579
+ });
580
+ test("消息类型 - HumanMessage with array content", () => {
581
+ const message = new index_1.HumanMessage([
582
+ { type: "text", text: "Hello" },
583
+ { type: "image_url", image_url: "https://example.com/image.jpg" },
584
+ ]);
585
+ expect(message.role).toBe("user");
586
+ expect(Array.isArray(message.content)).toBe(true);
587
+ console.log("✅ HumanMessage with array content 测试成功");
588
+ });
589
+ test("GeminiImageGenerationClient - 基础图像生成", async () => {
590
+ const imageClient = new index_1.GeminiImageGenerationClient({});
591
+ const result = await imageClient.generate({
592
+ prompt: "A beautiful sunset over the ocean with vibrant colors",
593
+ model: "gemini-2.0-flash-exp-image-generation",
594
+ number_of_images: 1,
595
+ aspect_ratio: "16:9",
596
+ });
597
+ expect(result).toBeDefined();
598
+ expect(result.created).toBeDefined();
599
+ expect(typeof result.created).toBe("number");
600
+ expect(result.data).toBeDefined();
601
+ expect(Array.isArray(result.data)).toBe(true);
602
+ expect(result.data.length).toBeGreaterThan(0);
603
+ result.data.forEach((item) => {
604
+ expect(item).toBeDefined();
605
+ expect(item.url || item.b64_json).toBeDefined();
606
+ });
607
+ console.log("✅ GeminiImageGenerationClient 基础测试成功");
608
+ console.log(`生成图像数量: ${result.data.length}`);
609
+ console.log("图像 URL:", result.data[0]?.url || "Base64 编码");
610
+ if (result.text) {
611
+ console.log("图像描述:", result.text);
612
+ }
613
+ }, 120000);
614
+ test("GeminiImageGenerationClient - 多图像生成", async () => {
615
+ const imageClient = new index_1.GeminiImageGenerationClient({});
616
+ const result = await imageClient.generate({
617
+ prompt: "A futuristic city skyline",
618
+ model: "gemini-2.0-flash-exp-image-generation",
619
+ number_of_images: 2,
620
+ aspect_ratio: "16:9",
621
+ });
622
+ expect(result).toBeDefined();
623
+ expect(result.data).toBeDefined();
624
+ expect(Array.isArray(result.data)).toBe(true);
625
+ expect(result.data.length).toBe(2);
626
+ console.log("✅ GeminiImageGenerationClient 多图像生成测试成功");
627
+ console.log(`生成图像数量: ${result.data.length}`);
628
+ }, 120000);
629
+ test("GeminiImageGenerationClient - 不同宽高比测试", async () => {
630
+ const imageClient = new index_1.GeminiImageGenerationClient({});
631
+ const aspectRatios = [
632
+ "1:1",
633
+ "16:9",
634
+ "9:16",
635
+ ];
636
+ for (const aspectRatio of aspectRatios) {
637
+ const result = await imageClient.generate({
638
+ prompt: "A beautiful landscape",
639
+ model: "gemini-2.0-flash-exp-image-generation",
640
+ aspect_ratio: aspectRatio,
641
+ number_of_images: 1,
642
+ });
643
+ expect(result).toBeDefined();
644
+ expect(result.data).toBeDefined();
645
+ expect(result.data.length).toBeGreaterThan(0);
646
+ console.log(`✅ Gemini 宽高比 ${aspectRatio} 测试成功`);
647
+ }
648
+ }, 180000);
649
+ test("DoubaoImageGenerationClient - quality 参数测试", async () => {
650
+ const imageClient = new index_1.DoubaoImageGenerationClient({});
651
+ const qualities = ["standard", "hd"];
652
+ for (const quality of qualities) {
653
+ const result = await imageClient.generate({
654
+ prompt: "A beautiful sunset",
655
+ model: "doubao-seedream-4-5-251128",
656
+ size: "2K",
657
+ quality: quality,
658
+ n: 1,
659
+ });
660
+ expect(result).toBeDefined();
661
+ expect(result.data).toBeDefined();
662
+ expect(result.data.length).toBeGreaterThan(0);
663
+ console.log(`✅ quality ${quality} 测试成功`);
664
+ }
665
+ }, 180000);
666
+ test("DoubaoImageGenerationClient - style 参数测试", async () => {
667
+ const imageClient = new index_1.DoubaoImageGenerationClient({});
668
+ const styles = ["vivid", "natural"];
669
+ for (const style of styles) {
670
+ const result = await imageClient.generate({
671
+ prompt: "A beautiful landscape",
672
+ model: "doubao-seedream-4-5-251128",
673
+ size: "2K",
674
+ style: style,
675
+ n: 1,
676
+ });
677
+ expect(result).toBeDefined();
678
+ expect(result.data).toBeDefined();
679
+ expect(result.data.length).toBeGreaterThan(0);
680
+ console.log(`✅ style ${style} 测试成功`);
681
+ }
682
+ }, 180000);
683
+ test("DoubaoImageGenerationClient - response_format 参数测试", async () => {
684
+ const imageClient = new index_1.DoubaoImageGenerationClient({});
685
+ // 测试 URL 格式
686
+ const urlResult = await imageClient.generate({
687
+ prompt: "A beautiful sunset",
688
+ model: "doubao-seedream-4-5-251128",
689
+ size: "2K",
690
+ response_format: "url",
691
+ n: 1,
692
+ });
693
+ expect(urlResult).toBeDefined();
694
+ expect(urlResult.data[0]?.url).toBeDefined();
695
+ console.log("✅ response_format url 测试成功");
696
+ // 测试 b64_json 格式
697
+ const b64Result = await imageClient.generate({
698
+ prompt: "A beautiful sunset",
699
+ model: "doubao-seedream-4-5-251128",
700
+ size: "2K",
701
+ response_format: "b64_json",
702
+ n: 1,
703
+ });
704
+ expect(b64Result).toBeDefined();
705
+ expect(b64Result.data[0]?.b64_json).toBeDefined();
706
+ console.log("✅ response_format b64_json 测试成功");
707
+ }, 180000);
708
+ test("DoubaoImageGenerationClient - watermark 参数测试", async () => {
709
+ const imageClient = new index_1.DoubaoImageGenerationClient({});
710
+ const result = await imageClient.generate({
711
+ prompt: "A beautiful landscape",
712
+ model: "doubao-seedream-4-5-251128",
713
+ size: "2K",
714
+ watermark: false,
715
+ n: 1,
716
+ });
717
+ expect(result).toBeDefined();
718
+ expect(result.data).toBeDefined();
719
+ expect(result.data.length).toBeGreaterThan(0);
720
+ console.log("✅ watermark 参数测试成功");
721
+ }, 120000);
722
+ test("sdkConfig - 全局配置测试", () => {
723
+ const originalBaseUrl = index_1.sdkConfig.getServerUrl();
724
+ const originalToken = index_1.sdkConfig.getToken();
725
+ const originalHeaders = index_1.sdkConfig.getHeaders();
726
+ const originalDebug = index_1.sdkConfig.getDebug();
727
+ // 测试 setBaseUrl 和 getServerUrl
728
+ index_1.sdkConfig.setBaseUrl("http://test.example.com");
729
+ expect(index_1.sdkConfig.getServerUrl()).toBe("http://test.example.com");
730
+ // 测试 setToken 和 getToken
731
+ index_1.sdkConfig.setToken("test-token-123");
732
+ expect(index_1.sdkConfig.getToken()).toBe("test-token-123");
733
+ // 测试 setHeaders 和 getHeaders
734
+ index_1.sdkConfig.setHeaders({ "X-Custom-Header": "test-value" });
735
+ const headers = index_1.sdkConfig.getHeaders();
736
+ expect(headers["X-Custom-Header"]).toBe("test-value");
737
+ // 测试 setDebug 和 getDebug
738
+ index_1.sdkConfig.setDebug(true);
739
+ expect(index_1.sdkConfig.getDebug()).toBe(true);
740
+ index_1.sdkConfig.setDebug(false);
741
+ expect(index_1.sdkConfig.getDebug()).toBe(false);
742
+ // 测试 reset
743
+ index_1.sdkConfig.reset();
744
+ expect(index_1.sdkConfig.getServerUrl()).toBeNull();
745
+ expect(index_1.sdkConfig.getToken()).toBeNull();
746
+ expect(Object.keys(index_1.sdkConfig.getHeaders()).length).toBe(0);
747
+ expect(index_1.sdkConfig.getDebug()).toBe(false);
748
+ // 恢复原始配置
749
+ if (originalBaseUrl)
750
+ index_1.sdkConfig.setBaseUrl(originalBaseUrl);
751
+ if (originalToken)
752
+ index_1.sdkConfig.setToken(originalToken);
753
+ index_1.sdkConfig.setHeaders(originalHeaders);
754
+ index_1.sdkConfig.setDebug(originalDebug);
755
+ console.log("✅ sdkConfig 全局配置测试成功");
756
+ });
757
+ test("BaseChatModel - bind 方法链式调用", async () => {
758
+ const gemini = new index_1.ChatGoogleGenerativeAI({
759
+ modelName: "gemini-2.0-flash-exp-image-generation",
760
+ temperature: 0.5,
761
+ provider: "gemini",
762
+ });
763
+ // 链式绑定多个参数
764
+ const boundModel = gemini
765
+ .bind({ temperature: 0.9 })
766
+ .bind({ maxTokens: 1000 });
767
+ const response = await boundModel.invoke([
768
+ new index_1.HumanMessage("用一句话介绍人工智能。"),
769
+ ]);
770
+ expect(response).toBeDefined();
771
+ expect(response.content).toBeDefined();
772
+ console.log("✅ bind 方法链式调用测试成功");
773
+ console.log("AI:", response.content);
774
+ }, 30000);
775
+ test("ChatOpenAI - 流式调用", async () => {
776
+ const openai = new index_1.ChatOpenAI({
777
+ modelName: "gpt-4",
778
+ temperature: 0.7,
779
+ provider: "aihubmix",
780
+ });
781
+ let fullText = "";
782
+ for await (const chunk of openai.stream([
783
+ new index_1.HumanMessage("请用一句话介绍人工智能。"),
784
+ ])) {
785
+ fullText += extractTextFromChunk(chunk);
786
+ }
787
+ expect(fullText.length).toBeGreaterThan(0);
788
+ console.log("✅ ChatOpenAI 流式测试成功");
789
+ console.log(`完整回复长度: ${fullText.length} 字符`);
790
+ }, 30000);
791
+ test("ChatOpenAI - batch 方法", async () => {
792
+ const openai = new index_1.ChatOpenAI({
793
+ modelName: "gpt-4",
794
+ temperature: 0.7,
795
+ provider: "aihubmix",
796
+ });
797
+ const responses = await openai.batch([
798
+ [new index_1.HumanMessage("什么是人工智能?")],
799
+ [new index_1.HumanMessage("什么是机器学习?")],
800
+ ]);
801
+ expect(responses).toBeDefined();
802
+ expect(Array.isArray(responses)).toBe(true);
803
+ expect(responses.length).toBe(2);
804
+ responses.forEach((response) => {
805
+ expect(response).toBeDefined();
806
+ expect(response.content).toBeDefined();
807
+ });
808
+ console.log("✅ ChatOpenAI batch 方法测试成功");
809
+ }, 30000);
810
+ test("VideoGenerationClient - 不同宽高比测试", async () => {
811
+ const videoClient = new index_1.VideoGenerationClient({});
812
+ const aspectRatios = ["16:9", "9:16", "1:1"];
813
+ for (const aspectRatio of aspectRatios) {
814
+ const task = await videoClient.create({
815
+ prompt: "A beautiful landscape",
816
+ model: "doubao-seedance-1-0-pro-fast-251015",
817
+ duration: 3,
818
+ aspect_ratio: aspectRatio,
819
+ });
820
+ expect(task).toBeDefined();
821
+ expect(task.id).toBeDefined();
822
+ console.log(`✅ 宽高比 ${aspectRatio} 测试成功,任务 ID: ${task.id}`);
823
+ }
824
+ }, 60000);
825
+ test("VideoGenerationClient - 不同时长测试", async () => {
826
+ const videoClient = new index_1.VideoGenerationClient({});
827
+ const durations = [1, 3, 5];
828
+ for (const duration of durations) {
829
+ const task = await videoClient.create({
830
+ prompt: "A short video clip",
831
+ model: "doubao-seedance-1-0-pro-fast-251015",
832
+ duration: duration,
833
+ });
834
+ expect(task).toBeDefined();
835
+ expect(task.id).toBeDefined();
836
+ console.log(`✅ 时长 ${duration} 秒测试成功,任务 ID: ${task.id}`);
837
+ }
838
+ }, 60000);
839
+ test("VideoGenerationClient - return_last_frame 参数测试", async () => {
840
+ const videoClient = new index_1.VideoGenerationClient({});
841
+ const task = await videoClient.create({
842
+ prompt: "A beautiful sunset",
843
+ model: "doubao-seedance-1-0-pro-fast-251015",
844
+ duration: 3,
845
+ return_last_frame: true,
846
+ });
847
+ expect(task).toBeDefined();
848
+ expect(task.id).toBeDefined();
849
+ // 查询任务,检查是否有 last_frame_url
850
+ const taskStatus = await videoClient.get(task.id);
851
+ if (taskStatus.status === "succeeded" && taskStatus.content?.last_frame_url) {
852
+ expect(taskStatus.content.last_frame_url).toBeDefined();
853
+ console.log("✅ return_last_frame 测试成功,最后一帧 URL:", taskStatus.content.last_frame_url);
854
+ }
855
+ else {
856
+ console.log("✅ return_last_frame 参数已设置,任务 ID:", task.id);
857
+ }
858
+ }, 30000);
859
+ test("AIMessageChunk - 构造函数测试", () => {
860
+ // 测试从字符串创建
861
+ const chunk1 = new index_1.AIMessageChunk("Hello");
862
+ expect(chunk1.content).toBe("Hello");
863
+ expect(chunk1.role).toBe("assistant");
864
+ // 测试从数组创建
865
+ const chunk2 = new index_1.AIMessageChunk([
866
+ { type: "text", text: "Hello" },
867
+ ]);
868
+ expect(Array.isArray(chunk2.content)).toBe(true);
869
+ // 测试从对象创建
870
+ const chunk3 = new index_1.AIMessageChunk({
871
+ content: "Hello",
872
+ id: "chunk-123",
873
+ response_metadata: { token_count: 5 },
874
+ });
875
+ expect(chunk3.content).toBe("Hello");
876
+ expect(chunk3.id).toBe("chunk-123");
877
+ expect(chunk3.response_metadata).toBeDefined();
878
+ console.log("✅ AIMessageChunk 构造函数测试成功");
879
+ });
880
+ test("createChatModel - 不支持的模型应该抛出错误", () => {
881
+ expect(() => {
882
+ (0, index_1.createChatModel)("unsupported-model", {
883
+ temperature: 0.7,
884
+ provider: "aihubmix",
885
+ });
886
+ }).toThrow("Unsupported model");
887
+ console.log("✅ createChatModel 错误处理测试成功");
888
+ });
450
889
  });
package/dist/base.d.ts CHANGED
@@ -3,14 +3,15 @@
3
3
  * Similar to LangChain.js BaseChatModel
4
4
  */
5
5
  import { BaseMessage, AIMessage, AIMessageChunk } from "./messages";
6
+ export type AIModelProvider = "aihubmix" | "doubao" | "gemini";
6
7
  export interface BaseChatModelParams {
8
+ provider: AIModelProvider;
7
9
  baseUrl?: string;
8
10
  headers?: Record<string, string>;
9
11
  temperature?: number;
10
12
  maxTokens?: number;
11
13
  topP?: number;
12
14
  modelName?: string;
13
- provider?: string;
14
15
  apiKey?: string;
15
16
  }
16
17
  export interface ChatResult {
@@ -44,7 +45,7 @@ export declare abstract class BaseChatModel {
44
45
  protected topP?: number;
45
46
  protected modelName: string;
46
47
  protected boundOptions?: BindOptions;
47
- protected provider?: string;
48
+ protected provider: string;
48
49
  protected apiKey?: string;
49
50
  constructor(config: {
50
51
  baseUrl?: string;
@@ -53,7 +54,7 @@ export declare abstract class BaseChatModel {
53
54
  maxTokens?: number;
54
55
  topP?: number;
55
56
  modelName: string;
56
- provider?: string;
57
+ provider: AIModelProvider;
57
58
  apiKey?: string;
58
59
  });
59
60
  /**
@@ -10,7 +10,7 @@ class ChatGoogleGenerativeAI extends base_1.BaseChatModel {
10
10
  constructor(config) {
11
11
  super({
12
12
  ...config,
13
- modelName: config.modelName || "gemini-1.5-pro",
13
+ modelName: config.modelName || "gemini-3-pro-image-preview",
14
14
  });
15
15
  }
16
16
  }
@@ -10,7 +10,7 @@ class ChatOpenAI extends base_1.BaseChatModel {
10
10
  constructor(config) {
11
11
  super({
12
12
  ...config,
13
- modelName: config.modelName || "gpt-3.5-turbo",
13
+ modelName: config.modelName || "gpt-4o-mini",
14
14
  });
15
15
  }
16
16
  }