ai-world-sdk 1.0.16 → 1.0.17
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +101 -0
- package/dist/__tests__/example.test.js +160 -3
- package/dist/base.d.ts +6 -0
- package/dist/base.js +30 -1
- package/dist/gemini-image-generation.d.ts +4 -0
- package/dist/gemini-image-generation.js +13 -0
- package/dist/messages.d.ts +3 -3
- package/package.json +1 -1
package/README.md
CHANGED
|
@@ -47,6 +47,8 @@ const geminiModel = new ChatGoogleGenerativeAI({
|
|
|
47
47
|
modelName: 'gemini-2.5-flash-image',
|
|
48
48
|
temperature: 0.7,
|
|
49
49
|
provider: 'gemini', // 或 'aihubmix', 'api2img'
|
|
50
|
+
vertexai: false, // 可选:是否使用 VertexAI(仅当 provider 为 gemini 时有效)
|
|
51
|
+
jsonSchema: undefined, // 可选:结构化输出 JSON Schema
|
|
50
52
|
});
|
|
51
53
|
|
|
52
54
|
// GPT 模型(使用 aihubmix provider)
|
|
@@ -54,6 +56,7 @@ const gptModel = new ChatOpenAI({
|
|
|
54
56
|
modelName: 'gpt-4o-mini',
|
|
55
57
|
temperature: 0.7,
|
|
56
58
|
provider: 'aihubmix', // 或 'api2img'
|
|
59
|
+
jsonSchema: undefined, // 可选:结构化输出 JSON Schema
|
|
57
60
|
});
|
|
58
61
|
|
|
59
62
|
// Claude 模型(使用 aihubmix provider)
|
|
@@ -224,6 +227,9 @@ const model = createChatModel('gemini-2.5-flash-image', {
|
|
|
224
227
|
- `provider: 'gemini'` - 直接使用 Google Gemini API
|
|
225
228
|
- `provider: 'doubao'` - 使用豆包服务
|
|
226
229
|
|
|
230
|
+
**结构化输出参数:**
|
|
231
|
+
- `jsonSchema?: Record<string, any>` - JSON Schema 定义,用于结构化输出(使用 `with_structured_output`)
|
|
232
|
+
|
|
227
233
|
### 图像生成
|
|
228
234
|
|
|
229
235
|
#### DoubaoImageGenerationClient
|
|
@@ -694,6 +700,101 @@ const response2 = await model.invoke([
|
|
|
694
700
|
]);
|
|
695
701
|
```
|
|
696
702
|
|
|
703
|
+
### 结构化输出(JSON Schema)
|
|
704
|
+
|
|
705
|
+
使用 `jsonSchema` 参数可以让模型返回结构化的 JSON 数据,而不是自由文本。
|
|
706
|
+
|
|
707
|
+
```typescript
|
|
708
|
+
import { ChatOpenAI, ChatGoogleGenerativeAI, HumanMessage, createChatModel } from 'ai-world-sdk';
|
|
709
|
+
|
|
710
|
+
// 使用 ChatOpenAI 进行结构化输出
|
|
711
|
+
const openaiModel = new ChatOpenAI({
|
|
712
|
+
modelName: 'gpt-4o-mini',
|
|
713
|
+
temperature: 0.7,
|
|
714
|
+
provider: 'aihubmix',
|
|
715
|
+
jsonSchema: {
|
|
716
|
+
type: 'object',
|
|
717
|
+
properties: {
|
|
718
|
+
name: { type: 'string', description: '用户姓名' },
|
|
719
|
+
age: { type: 'integer', description: '用户年龄' },
|
|
720
|
+
email: { type: 'string', description: '用户邮箱' },
|
|
721
|
+
},
|
|
722
|
+
required: ['name', 'age'],
|
|
723
|
+
},
|
|
724
|
+
});
|
|
725
|
+
|
|
726
|
+
const response = await openaiModel.invoke([
|
|
727
|
+
new HumanMessage('请提取以下信息:张三,25岁,邮箱是zhangsan@example.com'),
|
|
728
|
+
]);
|
|
729
|
+
|
|
730
|
+
// 响应内容将是结构化的 JSON 对象
|
|
731
|
+
console.log(response.content); // { name: '张三', age: 25, email: 'zhangsan@example.com' }
|
|
732
|
+
```
|
|
733
|
+
|
|
734
|
+
**使用 Gemini 模型的结构化输出:**
|
|
735
|
+
|
|
736
|
+
```typescript
|
|
737
|
+
const geminiModel = new ChatGoogleGenerativeAI({
|
|
738
|
+
modelName: 'gemini-2.5-flash',
|
|
739
|
+
temperature: 0.7,
|
|
740
|
+
provider: 'gemini',
|
|
741
|
+
jsonSchema: {
|
|
742
|
+
type: 'object',
|
|
743
|
+
properties: {
|
|
744
|
+
summary: { type: 'string', description: '摘要' },
|
|
745
|
+
keywords: {
|
|
746
|
+
type: 'array',
|
|
747
|
+
items: { type: 'string' },
|
|
748
|
+
description: '关键词列表',
|
|
749
|
+
},
|
|
750
|
+
sentiment: {
|
|
751
|
+
type: 'string',
|
|
752
|
+
enum: ['positive', 'neutral', 'negative'],
|
|
753
|
+
description: '情感倾向',
|
|
754
|
+
},
|
|
755
|
+
},
|
|
756
|
+
required: ['summary', 'keywords'],
|
|
757
|
+
},
|
|
758
|
+
});
|
|
759
|
+
|
|
760
|
+
const response = await geminiModel.invoke([
|
|
761
|
+
new HumanMessage("分析这句话的情感:'今天天气真好,心情很愉快!'"),
|
|
762
|
+
]);
|
|
763
|
+
```
|
|
764
|
+
|
|
765
|
+
**使用 createChatModel 工厂函数:**
|
|
766
|
+
|
|
767
|
+
```typescript
|
|
768
|
+
const model = createChatModel('gpt-4o-mini', {
|
|
769
|
+
temperature: 0.7,
|
|
770
|
+
provider: 'aihubmix',
|
|
771
|
+
jsonSchema: {
|
|
772
|
+
type: 'object',
|
|
773
|
+
properties: {
|
|
774
|
+
title: { type: 'string', description: '文章标题' },
|
|
775
|
+
content: { type: 'string', description: '文章内容' },
|
|
776
|
+
tags: {
|
|
777
|
+
type: 'array',
|
|
778
|
+
items: { type: 'string' },
|
|
779
|
+
description: '标签列表',
|
|
780
|
+
},
|
|
781
|
+
},
|
|
782
|
+
required: ['title', 'content'],
|
|
783
|
+
},
|
|
784
|
+
});
|
|
785
|
+
|
|
786
|
+
const response = await model.invoke([
|
|
787
|
+
new HumanMessage('生成一篇关于人工智能的短文,包含标题、内容和标签'),
|
|
788
|
+
]);
|
|
789
|
+
```
|
|
790
|
+
|
|
791
|
+
**注意事项:**
|
|
792
|
+
- `jsonSchema` 必须符合 [JSON Schema](https://json-schema.org/) 规范
|
|
793
|
+
- 对于 OpenAI 兼容的模型(如 GPT、Doubao),JSON Schema 会自动添加 `title` 和 `description`(如果缺失)
|
|
794
|
+
- 对于 Gemini 和 Anthropic 模型,直接使用提供的 JSON Schema
|
|
795
|
+
- 结构化输出的响应内容可能是 JSON 字符串或对象,需要根据实际情况解析
|
|
796
|
+
- 结构化输出使用 LangChain 的 `with_structured_output` 方法,底层通过 `method="json_schema"` 实现
|
|
797
|
+
|
|
697
798
|
### 流式响应
|
|
698
799
|
|
|
699
800
|
```typescript
|
|
@@ -43,6 +43,7 @@ const index_1 = require("../index");
|
|
|
43
43
|
dotenv.config();
|
|
44
44
|
index_1.sdkConfig.setBaseUrl("http://localhost:8000");
|
|
45
45
|
index_1.sdkConfig.setToken(process.env.AUTH_TOKEN || process.env.TOKEN || "");
|
|
46
|
+
// sdkConfig.setDebug(true);
|
|
46
47
|
function extractTextFromChunk(chunk) {
|
|
47
48
|
if (typeof chunk.content === "string") {
|
|
48
49
|
return chunk.content;
|
|
@@ -648,10 +649,10 @@ describe("Langchain SDK Tests", () => {
|
|
|
648
649
|
});
|
|
649
650
|
const result = await imageClient.generate({
|
|
650
651
|
prompt: 'A beautiful sunset over the ocean',
|
|
651
|
-
model: 'gemini-
|
|
652
|
+
model: 'gemini-2.5-flash-image',
|
|
652
653
|
aspect_ratio: '16:9',
|
|
653
654
|
image_size: '1K',
|
|
654
|
-
response_modalities: ['IMAGE'],
|
|
655
|
+
response_modalities: ['IMAGE', 'TEXT'],
|
|
655
656
|
});
|
|
656
657
|
expect(result).toBeDefined();
|
|
657
658
|
expect(result.created).toBeDefined();
|
|
@@ -713,7 +714,7 @@ describe("Langchain SDK Tests", () => {
|
|
|
713
714
|
model: 'gemini-3-pro-image-preview',
|
|
714
715
|
aspect_ratio: '16:9',
|
|
715
716
|
image_size: '1K',
|
|
716
|
-
response_modalities: ['IMAGE'], // 仅返回图片
|
|
717
|
+
response_modalities: ['IMAGE', 'TEXT'], // 仅返回图片
|
|
717
718
|
});
|
|
718
719
|
expect(result).toBeDefined();
|
|
719
720
|
expect(result.created).toBeDefined();
|
|
@@ -1010,4 +1011,160 @@ describe("Langchain SDK Tests", () => {
|
|
|
1010
1011
|
}).toThrow("Unsupported model");
|
|
1011
1012
|
console.log("✅ createChatModel 错误处理测试成功");
|
|
1012
1013
|
});
|
|
1014
|
+
test("ChatOpenAI - 结构化输出测试", async () => {
|
|
1015
|
+
const openai = new index_1.ChatOpenAI({
|
|
1016
|
+
modelName: "gpt-5.1",
|
|
1017
|
+
temperature: 0.7,
|
|
1018
|
+
provider: "aihubmix",
|
|
1019
|
+
jsonSchema: {
|
|
1020
|
+
type: "object",
|
|
1021
|
+
properties: {
|
|
1022
|
+
name: { type: "string", description: "用户姓名" },
|
|
1023
|
+
age: { type: "integer", description: "用户年龄" },
|
|
1024
|
+
email: { type: "string", description: "用户邮箱" },
|
|
1025
|
+
},
|
|
1026
|
+
required: ["name", "age"],
|
|
1027
|
+
},
|
|
1028
|
+
});
|
|
1029
|
+
const response = await openai.invoke([
|
|
1030
|
+
new index_1.HumanMessage("请提取以下信息:张三,25岁,邮箱是zhangsan@example.com"),
|
|
1031
|
+
]);
|
|
1032
|
+
expect(response).toBeDefined();
|
|
1033
|
+
expect(response.content).toBeDefined();
|
|
1034
|
+
// 结构化输出应该返回一个对象
|
|
1035
|
+
const content = response.content;
|
|
1036
|
+
if (typeof content === "string") {
|
|
1037
|
+
// 如果是字符串,尝试解析为 JSON
|
|
1038
|
+
try {
|
|
1039
|
+
const parsed = JSON.parse(content);
|
|
1040
|
+
expect(parsed).toHaveProperty("name");
|
|
1041
|
+
expect(parsed).toHaveProperty("age");
|
|
1042
|
+
expect(typeof parsed.name).toBe("string");
|
|
1043
|
+
expect(typeof parsed.age).toBe("number");
|
|
1044
|
+
console.log("✅ ChatOpenAI 结构化输出测试成功(字符串格式)");
|
|
1045
|
+
console.log("结构化数据:", parsed);
|
|
1046
|
+
}
|
|
1047
|
+
catch (e) {
|
|
1048
|
+
// 如果不是 JSON,至少验证有内容
|
|
1049
|
+
expect(content.length).toBeGreaterThan(0);
|
|
1050
|
+
console.log("✅ ChatOpenAI 结构化输出测试成功(文本格式)");
|
|
1051
|
+
console.log("响应内容:", content);
|
|
1052
|
+
}
|
|
1053
|
+
}
|
|
1054
|
+
else if (typeof content === "object") {
|
|
1055
|
+
// 如果直接是对象
|
|
1056
|
+
expect(content).toHaveProperty("name");
|
|
1057
|
+
expect(content).toHaveProperty("age");
|
|
1058
|
+
console.log("✅ ChatOpenAI 结构化输出测试成功(对象格式)");
|
|
1059
|
+
console.log("结构化数据:", content);
|
|
1060
|
+
}
|
|
1061
|
+
else {
|
|
1062
|
+
console.log("✅ ChatOpenAI 结构化输出测试成功(其他格式)");
|
|
1063
|
+
console.log("响应内容:", content);
|
|
1064
|
+
}
|
|
1065
|
+
}, 30000);
|
|
1066
|
+
test("ChatGoogleGenerativeAI - 结构化输出测试", async () => {
|
|
1067
|
+
const gemini = new index_1.ChatGoogleGenerativeAI({
|
|
1068
|
+
modelName: "gemini-2.5-flash",
|
|
1069
|
+
temperature: 0.7,
|
|
1070
|
+
provider: "gemini",
|
|
1071
|
+
jsonSchema: {
|
|
1072
|
+
type: "object",
|
|
1073
|
+
properties: {
|
|
1074
|
+
summary: { type: "string", description: "摘要" },
|
|
1075
|
+
keywords: {
|
|
1076
|
+
type: "array",
|
|
1077
|
+
items: { type: "string" },
|
|
1078
|
+
description: "关键词列表",
|
|
1079
|
+
},
|
|
1080
|
+
sentiment: {
|
|
1081
|
+
type: "string",
|
|
1082
|
+
enum: ["positive", "neutral", "negative"],
|
|
1083
|
+
description: "情感倾向",
|
|
1084
|
+
},
|
|
1085
|
+
},
|
|
1086
|
+
required: ["summary", "keywords"],
|
|
1087
|
+
},
|
|
1088
|
+
});
|
|
1089
|
+
const response = await gemini.invoke([
|
|
1090
|
+
new index_1.HumanMessage("分析这句话的情感:'今天天气真好,心情很愉快!'"),
|
|
1091
|
+
]);
|
|
1092
|
+
expect(response).toBeDefined();
|
|
1093
|
+
expect(response.content).toBeDefined();
|
|
1094
|
+
const content = response.content;
|
|
1095
|
+
if (typeof content === "string") {
|
|
1096
|
+
try {
|
|
1097
|
+
const parsed = JSON.parse(content);
|
|
1098
|
+
expect(parsed).toHaveProperty("summary");
|
|
1099
|
+
expect(parsed).toHaveProperty("keywords");
|
|
1100
|
+
expect(Array.isArray(parsed.keywords)).toBe(true);
|
|
1101
|
+
console.log("✅ ChatGoogleGenerativeAI 结构化输出测试成功(字符串格式)");
|
|
1102
|
+
console.log("结构化数据:", parsed);
|
|
1103
|
+
}
|
|
1104
|
+
catch (e) {
|
|
1105
|
+
expect(content.length).toBeGreaterThan(0);
|
|
1106
|
+
console.log("✅ ChatGoogleGenerativeAI 结构化输出测试成功(文本格式)");
|
|
1107
|
+
console.log("响应内容:", content);
|
|
1108
|
+
}
|
|
1109
|
+
}
|
|
1110
|
+
else if (typeof content === "object") {
|
|
1111
|
+
expect(content).toHaveProperty("summary");
|
|
1112
|
+
expect(content).toHaveProperty("keywords");
|
|
1113
|
+
console.log("✅ ChatGoogleGenerativeAI 结构化输出测试成功(对象格式)");
|
|
1114
|
+
console.log("结构化数据:", content);
|
|
1115
|
+
}
|
|
1116
|
+
else {
|
|
1117
|
+
console.log("✅ ChatGoogleGenerativeAI 结构化输出测试成功(其他格式)");
|
|
1118
|
+
console.log("响应内容:", content);
|
|
1119
|
+
}
|
|
1120
|
+
}, 30000);
|
|
1121
|
+
test("createChatModel - 结构化输出测试", async () => {
|
|
1122
|
+
const model = (0, index_1.createChatModel)("gpt-5.1", {
|
|
1123
|
+
temperature: 0.7,
|
|
1124
|
+
provider: "aihubmix",
|
|
1125
|
+
jsonSchema: {
|
|
1126
|
+
type: "object",
|
|
1127
|
+
properties: {
|
|
1128
|
+
title: { type: "string", description: "文章标题" },
|
|
1129
|
+
content: { type: "string", description: "文章内容" },
|
|
1130
|
+
tags: {
|
|
1131
|
+
type: "array",
|
|
1132
|
+
items: { type: "string" },
|
|
1133
|
+
description: "标签列表",
|
|
1134
|
+
},
|
|
1135
|
+
},
|
|
1136
|
+
required: ["title", "content"],
|
|
1137
|
+
},
|
|
1138
|
+
});
|
|
1139
|
+
const response = await model.invoke([
|
|
1140
|
+
new index_1.HumanMessage("生成一篇关于人工智能的短文,包含标题、内容和标签"),
|
|
1141
|
+
]);
|
|
1142
|
+
expect(response).toBeDefined();
|
|
1143
|
+
expect(response.content).toBeDefined();
|
|
1144
|
+
const content = response.content;
|
|
1145
|
+
if (typeof content === "string") {
|
|
1146
|
+
try {
|
|
1147
|
+
const parsed = JSON.parse(content);
|
|
1148
|
+
expect(parsed).toHaveProperty("title");
|
|
1149
|
+
expect(parsed).toHaveProperty("content");
|
|
1150
|
+
console.log("✅ createChatModel 结构化输出测试成功(字符串格式)");
|
|
1151
|
+
console.log("结构化数据:", parsed);
|
|
1152
|
+
}
|
|
1153
|
+
catch (e) {
|
|
1154
|
+
expect(content.length).toBeGreaterThan(0);
|
|
1155
|
+
console.log("✅ createChatModel 结构化输出测试成功(文本格式)");
|
|
1156
|
+
console.log("响应内容:", content);
|
|
1157
|
+
}
|
|
1158
|
+
}
|
|
1159
|
+
else if (typeof content === "object") {
|
|
1160
|
+
expect(content).toHaveProperty("title");
|
|
1161
|
+
expect(content).toHaveProperty("content");
|
|
1162
|
+
console.log("✅ createChatModel 结构化输出测试成功(对象格式)");
|
|
1163
|
+
console.log("结构化数据:", content);
|
|
1164
|
+
}
|
|
1165
|
+
else {
|
|
1166
|
+
console.log("✅ createChatModel 结构化输出测试成功(其他格式)");
|
|
1167
|
+
console.log("响应内容:", content);
|
|
1168
|
+
}
|
|
1169
|
+
}, 30000);
|
|
1013
1170
|
});
|
package/dist/base.d.ts
CHANGED
|
@@ -13,6 +13,8 @@ export interface BaseChatModelParams {
|
|
|
13
13
|
topP?: number;
|
|
14
14
|
modelName?: string;
|
|
15
15
|
apiKey?: string;
|
|
16
|
+
vertexai?: boolean;
|
|
17
|
+
jsonSchema?: Record<string, any>;
|
|
16
18
|
}
|
|
17
19
|
export interface ChatResult {
|
|
18
20
|
content: string;
|
|
@@ -47,6 +49,8 @@ export declare abstract class BaseChatModel {
|
|
|
47
49
|
protected boundOptions?: BindOptions;
|
|
48
50
|
protected provider: string;
|
|
49
51
|
protected apiKey?: string;
|
|
52
|
+
protected vertexai?: boolean;
|
|
53
|
+
protected jsonSchema?: Record<string, any>;
|
|
50
54
|
constructor(config: {
|
|
51
55
|
baseUrl?: string;
|
|
52
56
|
headers?: Record<string, string>;
|
|
@@ -56,6 +60,8 @@ export declare abstract class BaseChatModel {
|
|
|
56
60
|
modelName: string;
|
|
57
61
|
provider: AIModelProvider;
|
|
58
62
|
apiKey?: string;
|
|
63
|
+
vertexai?: boolean;
|
|
64
|
+
jsonSchema?: Record<string, any>;
|
|
59
65
|
});
|
|
60
66
|
/**
|
|
61
67
|
* Invoke the model with messages (non-streaming)
|
package/dist/base.js
CHANGED
|
@@ -14,13 +14,15 @@ class BaseChatModel {
|
|
|
14
14
|
const globalHeaders = config_1.sdkConfig.getHeaders();
|
|
15
15
|
this.headers = {
|
|
16
16
|
"Content-Type": "application/json",
|
|
17
|
-
|
|
17
|
+
Authorization: `Bearer ${config_1.sdkConfig.getToken()}`,
|
|
18
18
|
"X-Base-Url": config.baseUrl || "",
|
|
19
19
|
...globalHeaders,
|
|
20
20
|
...config.headers,
|
|
21
21
|
};
|
|
22
22
|
this.provider = config.provider;
|
|
23
23
|
this.apiKey = config.apiKey;
|
|
24
|
+
this.vertexai = config.vertexai;
|
|
25
|
+
this.jsonSchema = config.jsonSchema;
|
|
24
26
|
this.temperature = config.temperature ?? 0.7;
|
|
25
27
|
this.maxTokens = config.maxTokens;
|
|
26
28
|
this.topP = config.topP;
|
|
@@ -45,6 +47,14 @@ class BaseChatModel {
|
|
|
45
47
|
provider: this.provider,
|
|
46
48
|
api_key_env: this.apiKey,
|
|
47
49
|
};
|
|
50
|
+
// 添加 vertexai 参数(仅当 provider 为 gemini 时有效)
|
|
51
|
+
if (this.provider === "gemini" && this.vertexai !== undefined) {
|
|
52
|
+
requestBody.vertexai = this.vertexai;
|
|
53
|
+
}
|
|
54
|
+
// 添加 response_schema 参数
|
|
55
|
+
if (this.jsonSchema) {
|
|
56
|
+
requestBody.response_schema = this.jsonSchema;
|
|
57
|
+
}
|
|
48
58
|
const url = `${config_1.sdkConfig.getServerUrl()}/api/langchain-proxy/invoke`;
|
|
49
59
|
(0, log_1.logRequest)("POST", url, this.headers, requestBody);
|
|
50
60
|
const response = await fetch(url, {
|
|
@@ -60,6 +70,9 @@ class BaseChatModel {
|
|
|
60
70
|
// 返回标准 AIMessage 格式(从 message_to_dict 序列化)
|
|
61
71
|
const data = (await response.json());
|
|
62
72
|
(0, log_1.logResponse)(response.status, response.statusText, response.headers, data);
|
|
73
|
+
if (this.jsonSchema) {
|
|
74
|
+
return new messages_1.AIMessage(data);
|
|
75
|
+
}
|
|
63
76
|
// 从标准 AIMessage 格式创建 AIMessage 对象
|
|
64
77
|
const content = data.content || "";
|
|
65
78
|
return new messages_1.AIMessage(content);
|
|
@@ -83,6 +96,14 @@ class BaseChatModel {
|
|
|
83
96
|
provider: this.provider,
|
|
84
97
|
api_key_env: this.apiKey,
|
|
85
98
|
};
|
|
99
|
+
// 添加 vertexai 参数(仅当 provider 为 gemini 时有效)
|
|
100
|
+
if (this.provider === "gemini" && this.vertexai !== undefined) {
|
|
101
|
+
requestBody.vertexai = this.vertexai;
|
|
102
|
+
}
|
|
103
|
+
// 添加 response_schema 参数
|
|
104
|
+
if (this.jsonSchema) {
|
|
105
|
+
requestBody.response_schema = this.jsonSchema;
|
|
106
|
+
}
|
|
86
107
|
const url = `${config_1.sdkConfig.getServerUrl()}/api/langchain-proxy/stream`;
|
|
87
108
|
const streamHeaders = {
|
|
88
109
|
...this.headers,
|
|
@@ -143,6 +164,14 @@ class BaseChatModel {
|
|
|
143
164
|
provider: this.provider,
|
|
144
165
|
api_key_env: this.apiKey,
|
|
145
166
|
};
|
|
167
|
+
// 添加 vertexai 参数(仅当 provider 为 gemini 时有效)
|
|
168
|
+
if (this.provider === "gemini" && this.vertexai !== undefined) {
|
|
169
|
+
requestBody.vertexai = this.vertexai;
|
|
170
|
+
}
|
|
171
|
+
// 添加 response_schema 参数
|
|
172
|
+
if (this.jsonSchema) {
|
|
173
|
+
requestBody.response_schema = this.jsonSchema;
|
|
174
|
+
}
|
|
146
175
|
const url = `${config_1.sdkConfig.getServerUrl()}/api/langchain-proxy/batch`;
|
|
147
176
|
(0, log_1.logRequest)("POST", url, this.headers, requestBody);
|
|
148
177
|
const response = await fetch(url, {
|
|
@@ -7,11 +7,13 @@ export interface GeminiImageGenerationConfig {
|
|
|
7
7
|
provider?: GeminiImageGenerationProvider;
|
|
8
8
|
baseUrl?: string;
|
|
9
9
|
headers?: Record<string, string>;
|
|
10
|
+
vertexai?: boolean;
|
|
10
11
|
}
|
|
11
12
|
export interface GeminiImageGenerationRequest {
|
|
12
13
|
prompt: string;
|
|
13
14
|
model?: string;
|
|
14
15
|
image?: string | string[];
|
|
16
|
+
vertexai?: boolean;
|
|
15
17
|
aspect_ratio?: "1:1" | "2:3" | "3:2" | "3:4" | "4:3" | "4:5" | "5:4" | "9:16" | "16:9" | "21:9";
|
|
16
18
|
image_size?: "1K" | "2K" | "4K";
|
|
17
19
|
temperature?: number;
|
|
@@ -34,6 +36,7 @@ export interface GeminiImageChatRequest {
|
|
|
34
36
|
chat_id?: string;
|
|
35
37
|
model?: string;
|
|
36
38
|
provider?: GeminiImageGenerationProvider;
|
|
39
|
+
vertexai?: boolean;
|
|
37
40
|
aspect_ratio?: "1:1" | "2:3" | "3:2" | "3:4" | "4:3" | "4:5" | "5:4" | "9:16" | "16:9" | "21:9";
|
|
38
41
|
image_size?: "1K" | "2K" | "4K";
|
|
39
42
|
response_modalities?: ("TEXT" | "IMAGE")[];
|
|
@@ -48,6 +51,7 @@ export interface GeminiImageChatResponse {
|
|
|
48
51
|
export declare class GeminiImageGenerationClient {
|
|
49
52
|
private headers;
|
|
50
53
|
private provider;
|
|
54
|
+
private vertexai?;
|
|
51
55
|
constructor(config?: GeminiImageGenerationConfig);
|
|
52
56
|
/**
|
|
53
57
|
* Generate images
|
|
@@ -11,6 +11,7 @@ class GeminiImageGenerationClient {
|
|
|
11
11
|
constructor(config) {
|
|
12
12
|
// 确定 provider(默认为 gemini)
|
|
13
13
|
this.provider = config?.provider || "gemini";
|
|
14
|
+
this.vertexai = config?.vertexai;
|
|
14
15
|
// 合并全局 headers 和配置 headers
|
|
15
16
|
const globalHeaders = config_1.sdkConfig.getHeaders();
|
|
16
17
|
this.headers = {
|
|
@@ -62,6 +63,12 @@ class GeminiImageGenerationClient {
|
|
|
62
63
|
if (request.user) {
|
|
63
64
|
requestBody.user = request.user;
|
|
64
65
|
}
|
|
66
|
+
if (request.vertexai !== undefined && this.provider === "gemini") {
|
|
67
|
+
requestBody.vertexai = request.vertexai;
|
|
68
|
+
}
|
|
69
|
+
else if (this.vertexai !== undefined && this.provider === "gemini") {
|
|
70
|
+
requestBody.vertexai = this.vertexai;
|
|
71
|
+
}
|
|
65
72
|
const url = `${config_1.sdkConfig.getServerUrl()}/api/gemini-image-proxy/generate`;
|
|
66
73
|
(0, log_1.logRequest)("POST", url, this.headers, requestBody);
|
|
67
74
|
const response = await fetch(url, {
|
|
@@ -129,6 +136,12 @@ class GeminiImageGenerationClient {
|
|
|
129
136
|
if (request.provider) {
|
|
130
137
|
requestBody.provider = request.provider;
|
|
131
138
|
}
|
|
139
|
+
if (request.vertexai !== undefined && (request.provider === "gemini" || this.provider === "gemini")) {
|
|
140
|
+
requestBody.vertexai = request.vertexai;
|
|
141
|
+
}
|
|
142
|
+
else if (this.vertexai !== undefined && this.provider === "gemini") {
|
|
143
|
+
requestBody.vertexai = this.vertexai;
|
|
144
|
+
}
|
|
132
145
|
const url = `${config_1.sdkConfig.getServerUrl()}/api/gemini-image-proxy/chat`;
|
|
133
146
|
(0, log_1.logRequest)("POST", url, this.headers, requestBody);
|
|
134
147
|
const response = await fetch(url, {
|
package/dist/messages.d.ts
CHANGED
|
@@ -8,7 +8,7 @@ export interface MessageContent {
|
|
|
8
8
|
image_url?: string;
|
|
9
9
|
}
|
|
10
10
|
export interface BaseMessage {
|
|
11
|
-
content: string | MessageContent[]
|
|
11
|
+
content: string | MessageContent[] | Record<string, any>;
|
|
12
12
|
role?: "user" | "assistant" | "system";
|
|
13
13
|
}
|
|
14
14
|
export declare class HumanMessage implements BaseMessage {
|
|
@@ -17,9 +17,9 @@ export declare class HumanMessage implements BaseMessage {
|
|
|
17
17
|
constructor(content: string | MessageContent[]);
|
|
18
18
|
}
|
|
19
19
|
export declare class AIMessage implements BaseMessage {
|
|
20
|
-
content: string | MessageContent[]
|
|
20
|
+
content: string | MessageContent[] | Record<string, any>;
|
|
21
21
|
role: "assistant";
|
|
22
|
-
constructor(content: string | MessageContent[]);
|
|
22
|
+
constructor(content: string | MessageContent[] | Record<string, any>);
|
|
23
23
|
}
|
|
24
24
|
export declare class SystemMessage implements BaseMessage {
|
|
25
25
|
content: string | MessageContent[];
|
package/package.json
CHANGED