koishi-plugin-chatluna-google-gemini-adapter 1.1.1 → 1.1.3
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/lib/index.cjs +51 -51
- package/lib/index.d.ts +1 -0
- package/lib/index.mjs +41 -51
- package/lib/types.d.ts +7 -1
- package/lib/utils.d.ts +1 -0
- package/package.json +2 -2
package/lib/index.cjs
CHANGED
|
@@ -1,6 +1,8 @@
|
|
|
1
|
+
var __create = Object.create;
|
|
1
2
|
var __defProp = Object.defineProperty;
|
|
2
3
|
var __getOwnPropDesc = Object.getOwnPropertyDescriptor;
|
|
3
4
|
var __getOwnPropNames = Object.getOwnPropertyNames;
|
|
5
|
+
var __getProtoOf = Object.getPrototypeOf;
|
|
4
6
|
var __hasOwnProp = Object.prototype.hasOwnProperty;
|
|
5
7
|
var __name = (target, value) => __defProp(target, "name", { value, configurable: true });
|
|
6
8
|
var __commonJS = (cb, mod) => function __require() {
|
|
@@ -18,19 +20,27 @@ var __copyProps = (to, from, except, desc) => {
|
|
|
18
20
|
}
|
|
19
21
|
return to;
|
|
20
22
|
};
|
|
23
|
+
var __toESM = (mod, isNodeMode, target) => (target = mod != null ? __create(__getProtoOf(mod)) : {}, __copyProps(
|
|
24
|
+
// If the importer is in node compatibility mode or this is not an ESM
|
|
25
|
+
// file that has been converted to a CommonJS file using a Babel-
|
|
26
|
+
// compatible transform (i.e. "__esModule" has not been set), then set
|
|
27
|
+
// "default" to the CommonJS "module.exports" for node compatibility.
|
|
28
|
+
isNodeMode || !mod || !mod.__esModule ? __defProp(target, "default", { value: mod, enumerable: true }) : target,
|
|
29
|
+
mod
|
|
30
|
+
));
|
|
21
31
|
var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: true }), mod);
|
|
22
32
|
|
|
23
33
|
// src/locales/zh-CN.schema.yml
|
|
24
34
|
var require_zh_CN_schema = __commonJS({
|
|
25
35
|
"src/locales/zh-CN.schema.yml"(exports2, module2) {
|
|
26
|
-
module2.exports = { $inner: [{}, { $desc: "请求选项", apiKeys: { $inner: ["Gemini 的 API Key", "Gemini API 的请求地址"], $desc: "Gemini 的 API Key 和请求地址列表。" } }, { $desc: "模型配置", maxTokens: "输入的最大上下文 Token(16~2097000,必须是 16 的倍数)。注意:仅当您使用的模型最大 Token 为 8000 及以上时,才建议设置超过 2000 token。", temperature: "回复的随机性程度,数值越高,回复越随机(范围:0~2)。", googleSearch: "为模型启用谷歌搜索。", groundingContentDisplay: "是否显示谷歌搜索结果。", searchThreshold: "搜索的置信度阈值,范围:0~1,设置的数值越低,则越倾向于使用谷歌搜索。" }] };
|
|
36
|
+
module2.exports = { $inner: [{}, { $desc: "请求选项", apiKeys: { $inner: ["Gemini 的 API Key", "Gemini API 的请求地址"], $desc: "Gemini 的 API Key 和请求地址列表。" } }, { $desc: "模型配置", maxTokens: "输入的最大上下文 Token(16~2097000,必须是 16 的倍数)。注意:仅当您使用的模型最大 Token 为 8000 及以上时,才建议设置超过 2000 token。", temperature: "回复的随机性程度,数值越高,回复越随机(范围:0~2)。", googleSearch: "为模型启用谷歌搜索。", groundingContentDisplay: "是否显示谷歌搜索结果。", imageGeneration: "为模型启用图像生成。目前仅支持 `gemini-2.0-flash-exp` 模型。", searchThreshold: "搜索的置信度阈值,范围:0~1,设置的数值越低,则越倾向于使用谷歌搜索。" }] };
|
|
27
37
|
}
|
|
28
38
|
});
|
|
29
39
|
|
|
30
40
|
// src/locales/en-US.schema.yml
|
|
31
41
|
var require_en_US_schema = __commonJS({
|
|
32
42
|
"src/locales/en-US.schema.yml"(exports2, module2) {
|
|
33
|
-
module2.exports = { $inner: [{}, { $desc: "API Configuration", apiKeys: { $inner: ["Gemini API Key", "Gemini API Endpoint (optional)"], $desc: "Gemini API access credentials" } }, { $desc: "Model Parameters", maxTokens: "Max output tokens (16-2097000, multiple of 16). >2000 for 8k+ models", temperature: "Sampling temperature (0-2). Higher: more random, Lower: more deterministic", googleSearch: "Enable Google search", groundingContentDisplay: "Enable display of search results", searchThreshold: "Search confidence [threshold](https://ai.google.dev/gemini-api/docs/grounding?lang=rest#dynamic-retrieval) (0-1). Lower: more likely to use Google search" }] };
|
|
43
|
+
module2.exports = { $inner: [{}, { $desc: "API Configuration", apiKeys: { $inner: ["Gemini API Key", "Gemini API Endpoint (optional)"], $desc: "Gemini API access credentials" } }, { $desc: "Model Parameters", maxTokens: "Max output tokens (16-2097000, multiple of 16). >2000 for 8k+ models", temperature: "Sampling temperature (0-2). Higher: more random, Lower: more deterministic", googleSearch: "Enable Google search", groundingContentDisplay: "Enable display of search results", imageGeneration: "Enable image generation (only for `gemini-2.0-flash-exp` model)", searchThreshold: "Search confidence [threshold](https://ai.google.dev/gemini-api/docs/grounding?lang=rest#dynamic-retrieval) (0-1). Lower: more likely to use Google search" }] };
|
|
34
44
|
}
|
|
35
45
|
});
|
|
36
46
|
|
|
@@ -67,7 +77,7 @@ var import_zod_to_json_schema = require("zod-to-json-schema");
|
|
|
67
77
|
async function langchainMessageToGeminiMessage(messages, model) {
|
|
68
78
|
const mappedMessage = await Promise.all(
|
|
69
79
|
messages.map(async (rawMessage) => {
|
|
70
|
-
const role = messageTypeToGeminiRole(rawMessage.
|
|
80
|
+
const role = messageTypeToGeminiRole(rawMessage.getType());
|
|
71
81
|
if (role === "function" || rawMessage.additional_kwargs?.function_call != null) {
|
|
72
82
|
return {
|
|
73
83
|
role: "function",
|
|
@@ -133,14 +143,23 @@ async function langchainMessageToGeminiMessage(messages, model) {
|
|
|
133
143
|
};
|
|
134
144
|
if ((model.includes("vision") || model.includes("gemini")) && images != null && !model.includes("gemini-1.0")) {
|
|
135
145
|
for (const image of images) {
|
|
146
|
+
const mineType = image.split(";")?.[0]?.split(":")?.[1];
|
|
147
|
+
const data = image.replace(/^data:image\/\w+;base64,/, "");
|
|
136
148
|
result2.parts.push({
|
|
137
149
|
inline_data: {
|
|
138
150
|
// base64 image match type
|
|
139
|
-
data
|
|
140
|
-
mime_type: "image/jpeg"
|
|
151
|
+
data,
|
|
152
|
+
mime_type: mineType ?? "image/jpeg"
|
|
141
153
|
}
|
|
142
154
|
});
|
|
143
155
|
}
|
|
156
|
+
result2.parts = result2.parts.filter((uncheckedPart) => {
|
|
157
|
+
const part = partAsTypeCheck(
|
|
158
|
+
uncheckedPart,
|
|
159
|
+
(part2) => part2["text"] != null
|
|
160
|
+
);
|
|
161
|
+
return part == null || part.text.length > 0;
|
|
162
|
+
});
|
|
144
163
|
}
|
|
145
164
|
return result2;
|
|
146
165
|
})
|
|
@@ -187,48 +206,6 @@ async function langchainMessageToGeminiMessage(messages, model) {
|
|
|
187
206
|
]
|
|
188
207
|
});
|
|
189
208
|
}
|
|
190
|
-
if (model.includes("vision")) {
|
|
191
|
-
const textBuffer = [];
|
|
192
|
-
const last = result.pop();
|
|
193
|
-
for (let i = 0; i < result.length; i++) {
|
|
194
|
-
const message = result[i];
|
|
195
|
-
const text = message.parts[0].text;
|
|
196
|
-
textBuffer.push(`${message.role}: ${text}`);
|
|
197
|
-
}
|
|
198
|
-
const lastParts = last.parts;
|
|
199
|
-
let lastImagesParts = lastParts.filter(
|
|
200
|
-
(part) => part.inline_data?.mime_type === "image/jpeg"
|
|
201
|
-
);
|
|
202
|
-
if (lastImagesParts.length < 1) {
|
|
203
|
-
for (let i = result.length - 1; i >= 0; i--) {
|
|
204
|
-
const message = result[i];
|
|
205
|
-
const images = message.parts.filter(
|
|
206
|
-
(part) => part.inline_data?.mime_type === "image/jpeg"
|
|
207
|
-
);
|
|
208
|
-
if (images.length > 0) {
|
|
209
|
-
lastImagesParts = images;
|
|
210
|
-
break;
|
|
211
|
-
}
|
|
212
|
-
}
|
|
213
|
-
}
|
|
214
|
-
;
|
|
215
|
-
lastParts.filter(
|
|
216
|
-
(part) => part.text !== void 0 && part.text !== null
|
|
217
|
-
).forEach((part) => {
|
|
218
|
-
textBuffer.push(`${last.role}: ${part.text}`);
|
|
219
|
-
});
|
|
220
|
-
return [
|
|
221
|
-
{
|
|
222
|
-
role: "user",
|
|
223
|
-
parts: [
|
|
224
|
-
{
|
|
225
|
-
text: textBuffer.join("\n")
|
|
226
|
-
},
|
|
227
|
-
...lastImagesParts
|
|
228
|
-
]
|
|
229
|
-
}
|
|
230
|
-
];
|
|
231
|
-
}
|
|
232
209
|
return result;
|
|
233
210
|
}
|
|
234
211
|
__name(langchainMessageToGeminiMessage, "langchainMessageToGeminiMessage");
|
|
@@ -236,6 +213,10 @@ function partAsType(part) {
|
|
|
236
213
|
return part;
|
|
237
214
|
}
|
|
238
215
|
__name(partAsType, "partAsType");
|
|
216
|
+
function partAsTypeCheck(part, check) {
|
|
217
|
+
return check(part) ? part : void 0;
|
|
218
|
+
}
|
|
219
|
+
__name(partAsTypeCheck, "partAsTypeCheck");
|
|
239
220
|
function formatToolsToGeminiAITools(tools, config, model) {
|
|
240
221
|
if (tools.length < 1 && !config.googleSearch) {
|
|
241
222
|
return void 0;
|
|
@@ -335,6 +316,7 @@ function messageTypeToGeminiRole(type) {
|
|
|
335
316
|
__name(messageTypeToGeminiRole, "messageTypeToGeminiRole");
|
|
336
317
|
|
|
337
318
|
// src/requester.ts
|
|
319
|
+
var import_promises = __toESM(require("fs/promises"), 1);
|
|
338
320
|
var GeminiRequester = class extends import_api.ModelRequester {
|
|
339
321
|
constructor(_config, _plugin, _pluginConfig) {
|
|
340
322
|
super();
|
|
@@ -380,7 +362,11 @@ var GeminiRequester = class extends import_api.ModelRequester {
|
|
|
380
362
|
stopSequences: params.stop,
|
|
381
363
|
temperature: params.temperature,
|
|
382
364
|
maxOutputTokens: params.model.includes("vision") ? void 0 : params.maxTokens,
|
|
383
|
-
topP: params.topP
|
|
365
|
+
topP: params.topP,
|
|
366
|
+
responseModalities: params.model.includes(
|
|
367
|
+
// TODO: Wait for google release to all models
|
|
368
|
+
"gemini-2.0-flash-exp"
|
|
369
|
+
) && this._pluginConfig.imageGeneration ? ["TEXT", "IMAGE"] : void 0
|
|
384
370
|
// thinkingConfig: { includeThoughts: true }
|
|
385
371
|
},
|
|
386
372
|
tools: params.tools != null || this._pluginConfig.googleSearch ? formatToolsToGeminiAITools(
|
|
@@ -414,8 +400,10 @@ var GeminiRequester = class extends import_api.ModelRequester {
|
|
|
414
400
|
}
|
|
415
401
|
for (const candidate of transformValue.candidates) {
|
|
416
402
|
const parts = candidate.content?.parts;
|
|
417
|
-
if (parts == null || parts.length < 1) {
|
|
403
|
+
if ((parts == null || parts.length < 1) && candidate.finishReason !== "STOP") {
|
|
418
404
|
throw new Error(chunk);
|
|
405
|
+
} else if (candidate.finishReason === "STOP" && parts == null) {
|
|
406
|
+
continue;
|
|
419
407
|
}
|
|
420
408
|
for (const part of parts) {
|
|
421
409
|
controller.enqueue(part);
|
|
@@ -440,14 +428,21 @@ var GeminiRequester = class extends import_api.ModelRequester {
|
|
|
440
428
|
for await (const chunk of iterable) {
|
|
441
429
|
const messagePart = partAsType(chunk);
|
|
442
430
|
const chatFunctionCallingPart = partAsType(chunk);
|
|
431
|
+
const imagePart = partAsTypeCheck(
|
|
432
|
+
chunk,
|
|
433
|
+
(part) => part["inlineData"] != null
|
|
434
|
+
);
|
|
443
435
|
if (messagePart.text) {
|
|
444
436
|
if (messagePart.thought) {
|
|
445
437
|
reasoningContent += messagePart.text;
|
|
446
438
|
continue;
|
|
447
439
|
}
|
|
448
440
|
content = messagePart.text;
|
|
441
|
+
} else if (imagePart) {
|
|
442
|
+
messagePart.text = ``;
|
|
443
|
+
content = messagePart.text;
|
|
449
444
|
}
|
|
450
|
-
const deltaFunctionCall = chatFunctionCallingPart
|
|
445
|
+
const deltaFunctionCall = chatFunctionCallingPart?.functionCall;
|
|
451
446
|
if (deltaFunctionCall) {
|
|
452
447
|
let args = deltaFunctionCall.args?.input ?? deltaFunctionCall.args;
|
|
453
448
|
try {
|
|
@@ -472,7 +467,10 @@ var GeminiRequester = class extends import_api.ModelRequester {
|
|
|
472
467
|
name: functionCall.name,
|
|
473
468
|
arguments: functionCall.args,
|
|
474
469
|
args: functionCall.arguments
|
|
475
|
-
} : void 0
|
|
470
|
+
} : void 0,
|
|
471
|
+
images: imagePart ? [
|
|
472
|
+
`data:${imagePart.inlineData.mimeType ?? "image/png"};base64,${imagePart.inlineData.data}`
|
|
473
|
+
] : void 0
|
|
476
474
|
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
|
477
475
|
};
|
|
478
476
|
messageChunk.content = content;
|
|
@@ -595,6 +593,7 @@ ${groundingContent}`
|
|
|
595
593
|
}
|
|
596
594
|
}
|
|
597
595
|
const body = JSON.stringify(data);
|
|
596
|
+
import_promises.default.writeFile("./request.json", body);
|
|
598
597
|
return this._plugin.fetch(requestUrl, {
|
|
599
598
|
body,
|
|
600
599
|
headers: this._buildHeaders(),
|
|
@@ -766,6 +765,7 @@ var Config3 = import_koishi.Schema.intersect([
|
|
|
766
765
|
maxTokens: import_koishi.Schema.number().min(16).max(2097e3).step(16).default(8064),
|
|
767
766
|
temperature: import_koishi.Schema.percent().min(0).max(2).step(0.1).default(0.8),
|
|
768
767
|
googleSearch: import_koishi.Schema.boolean().default(false),
|
|
768
|
+
imageGeneration: import_koishi.Schema.boolean().default(false),
|
|
769
769
|
groundingContentDisplay: import_koishi.Schema.boolean().default(false),
|
|
770
770
|
searchThreshold: import_koishi.Schema.number().min(0).max(1).step(0.1).default(0.5)
|
|
771
771
|
})
|
package/lib/index.d.ts
CHANGED
|
@@ -9,6 +9,7 @@ export interface Config extends ChatLunaPlugin.Config {
|
|
|
9
9
|
googleSearch: boolean;
|
|
10
10
|
searchThreshold: number;
|
|
11
11
|
groundingContentDisplay: boolean;
|
|
12
|
+
imageGeneration: boolean;
|
|
12
13
|
}
|
|
13
14
|
export declare const Config: Schema<Config>;
|
|
14
15
|
export declare const inject: string[];
|
package/lib/index.mjs
CHANGED
|
@@ -8,14 +8,14 @@ var __commonJS = (cb, mod) => function __require() {
|
|
|
8
8
|
// src/locales/zh-CN.schema.yml
|
|
9
9
|
var require_zh_CN_schema = __commonJS({
|
|
10
10
|
"src/locales/zh-CN.schema.yml"(exports, module) {
|
|
11
|
-
module.exports = { $inner: [{}, { $desc: "请求选项", apiKeys: { $inner: ["Gemini 的 API Key", "Gemini API 的请求地址"], $desc: "Gemini 的 API Key 和请求地址列表。" } }, { $desc: "模型配置", maxTokens: "输入的最大上下文 Token(16~2097000,必须是 16 的倍数)。注意:仅当您使用的模型最大 Token 为 8000 及以上时,才建议设置超过 2000 token。", temperature: "回复的随机性程度,数值越高,回复越随机(范围:0~2)。", googleSearch: "为模型启用谷歌搜索。", groundingContentDisplay: "是否显示谷歌搜索结果。", searchThreshold: "搜索的置信度阈值,范围:0~1,设置的数值越低,则越倾向于使用谷歌搜索。" }] };
|
|
11
|
+
module.exports = { $inner: [{}, { $desc: "请求选项", apiKeys: { $inner: ["Gemini 的 API Key", "Gemini API 的请求地址"], $desc: "Gemini 的 API Key 和请求地址列表。" } }, { $desc: "模型配置", maxTokens: "输入的最大上下文 Token(16~2097000,必须是 16 的倍数)。注意:仅当您使用的模型最大 Token 为 8000 及以上时,才建议设置超过 2000 token。", temperature: "回复的随机性程度,数值越高,回复越随机(范围:0~2)。", googleSearch: "为模型启用谷歌搜索。", groundingContentDisplay: "是否显示谷歌搜索结果。", imageGeneration: "为模型启用图像生成。目前仅支持 `gemini-2.0-flash-exp` 模型。", searchThreshold: "搜索的置信度阈值,范围:0~1,设置的数值越低,则越倾向于使用谷歌搜索。" }] };
|
|
12
12
|
}
|
|
13
13
|
});
|
|
14
14
|
|
|
15
15
|
// src/locales/en-US.schema.yml
|
|
16
16
|
var require_en_US_schema = __commonJS({
|
|
17
17
|
"src/locales/en-US.schema.yml"(exports, module) {
|
|
18
|
-
module.exports = { $inner: [{}, { $desc: "API Configuration", apiKeys: { $inner: ["Gemini API Key", "Gemini API Endpoint (optional)"], $desc: "Gemini API access credentials" } }, { $desc: "Model Parameters", maxTokens: "Max output tokens (16-2097000, multiple of 16). >2000 for 8k+ models", temperature: "Sampling temperature (0-2). Higher: more random, Lower: more deterministic", googleSearch: "Enable Google search", groundingContentDisplay: "Enable display of search results", searchThreshold: "Search confidence [threshold](https://ai.google.dev/gemini-api/docs/grounding?lang=rest#dynamic-retrieval) (0-1). Lower: more likely to use Google search" }] };
|
|
18
|
+
module.exports = { $inner: [{}, { $desc: "API Configuration", apiKeys: { $inner: ["Gemini API Key", "Gemini API Endpoint (optional)"], $desc: "Gemini API access credentials" } }, { $desc: "Model Parameters", maxTokens: "Max output tokens (16-2097000, multiple of 16). >2000 for 8k+ models", temperature: "Sampling temperature (0-2). Higher: more random, Lower: more deterministic", googleSearch: "Enable Google search", groundingContentDisplay: "Enable display of search results", imageGeneration: "Enable image generation (only for `gemini-2.0-flash-exp` model)", searchThreshold: "Search confidence [threshold](https://ai.google.dev/gemini-api/docs/grounding?lang=rest#dynamic-retrieval) (0-1). Lower: more likely to use Google search" }] };
|
|
19
19
|
}
|
|
20
20
|
});
|
|
21
21
|
|
|
@@ -61,7 +61,7 @@ import { zodToJsonSchema } from "zod-to-json-schema";
|
|
|
61
61
|
async function langchainMessageToGeminiMessage(messages, model) {
|
|
62
62
|
const mappedMessage = await Promise.all(
|
|
63
63
|
messages.map(async (rawMessage) => {
|
|
64
|
-
const role = messageTypeToGeminiRole(rawMessage.
|
|
64
|
+
const role = messageTypeToGeminiRole(rawMessage.getType());
|
|
65
65
|
if (role === "function" || rawMessage.additional_kwargs?.function_call != null) {
|
|
66
66
|
return {
|
|
67
67
|
role: "function",
|
|
@@ -127,14 +127,23 @@ async function langchainMessageToGeminiMessage(messages, model) {
|
|
|
127
127
|
};
|
|
128
128
|
if ((model.includes("vision") || model.includes("gemini")) && images != null && !model.includes("gemini-1.0")) {
|
|
129
129
|
for (const image of images) {
|
|
130
|
+
const mineType = image.split(";")?.[0]?.split(":")?.[1];
|
|
131
|
+
const data = image.replace(/^data:image\/\w+;base64,/, "");
|
|
130
132
|
result2.parts.push({
|
|
131
133
|
inline_data: {
|
|
132
134
|
// base64 image match type
|
|
133
|
-
data
|
|
134
|
-
mime_type: "image/jpeg"
|
|
135
|
+
data,
|
|
136
|
+
mime_type: mineType ?? "image/jpeg"
|
|
135
137
|
}
|
|
136
138
|
});
|
|
137
139
|
}
|
|
140
|
+
result2.parts = result2.parts.filter((uncheckedPart) => {
|
|
141
|
+
const part = partAsTypeCheck(
|
|
142
|
+
uncheckedPart,
|
|
143
|
+
(part2) => part2["text"] != null
|
|
144
|
+
);
|
|
145
|
+
return part == null || part.text.length > 0;
|
|
146
|
+
});
|
|
138
147
|
}
|
|
139
148
|
return result2;
|
|
140
149
|
})
|
|
@@ -181,48 +190,6 @@ async function langchainMessageToGeminiMessage(messages, model) {
|
|
|
181
190
|
]
|
|
182
191
|
});
|
|
183
192
|
}
|
|
184
|
-
if (model.includes("vision")) {
|
|
185
|
-
const textBuffer = [];
|
|
186
|
-
const last = result.pop();
|
|
187
|
-
for (let i = 0; i < result.length; i++) {
|
|
188
|
-
const message = result[i];
|
|
189
|
-
const text = message.parts[0].text;
|
|
190
|
-
textBuffer.push(`${message.role}: ${text}`);
|
|
191
|
-
}
|
|
192
|
-
const lastParts = last.parts;
|
|
193
|
-
let lastImagesParts = lastParts.filter(
|
|
194
|
-
(part) => part.inline_data?.mime_type === "image/jpeg"
|
|
195
|
-
);
|
|
196
|
-
if (lastImagesParts.length < 1) {
|
|
197
|
-
for (let i = result.length - 1; i >= 0; i--) {
|
|
198
|
-
const message = result[i];
|
|
199
|
-
const images = message.parts.filter(
|
|
200
|
-
(part) => part.inline_data?.mime_type === "image/jpeg"
|
|
201
|
-
);
|
|
202
|
-
if (images.length > 0) {
|
|
203
|
-
lastImagesParts = images;
|
|
204
|
-
break;
|
|
205
|
-
}
|
|
206
|
-
}
|
|
207
|
-
}
|
|
208
|
-
;
|
|
209
|
-
lastParts.filter(
|
|
210
|
-
(part) => part.text !== void 0 && part.text !== null
|
|
211
|
-
).forEach((part) => {
|
|
212
|
-
textBuffer.push(`${last.role}: ${part.text}`);
|
|
213
|
-
});
|
|
214
|
-
return [
|
|
215
|
-
{
|
|
216
|
-
role: "user",
|
|
217
|
-
parts: [
|
|
218
|
-
{
|
|
219
|
-
text: textBuffer.join("\n")
|
|
220
|
-
},
|
|
221
|
-
...lastImagesParts
|
|
222
|
-
]
|
|
223
|
-
}
|
|
224
|
-
];
|
|
225
|
-
}
|
|
226
193
|
return result;
|
|
227
194
|
}
|
|
228
195
|
__name(langchainMessageToGeminiMessage, "langchainMessageToGeminiMessage");
|
|
@@ -230,6 +197,10 @@ function partAsType(part) {
|
|
|
230
197
|
return part;
|
|
231
198
|
}
|
|
232
199
|
__name(partAsType, "partAsType");
|
|
200
|
+
function partAsTypeCheck(part, check) {
|
|
201
|
+
return check(part) ? part : void 0;
|
|
202
|
+
}
|
|
203
|
+
__name(partAsTypeCheck, "partAsTypeCheck");
|
|
233
204
|
function formatToolsToGeminiAITools(tools, config, model) {
|
|
234
205
|
if (tools.length < 1 && !config.googleSearch) {
|
|
235
206
|
return void 0;
|
|
@@ -329,6 +300,7 @@ function messageTypeToGeminiRole(type) {
|
|
|
329
300
|
__name(messageTypeToGeminiRole, "messageTypeToGeminiRole");
|
|
330
301
|
|
|
331
302
|
// src/requester.ts
|
|
303
|
+
import fs from "fs/promises";
|
|
332
304
|
var GeminiRequester = class extends ModelRequester {
|
|
333
305
|
constructor(_config, _plugin, _pluginConfig) {
|
|
334
306
|
super();
|
|
@@ -374,7 +346,11 @@ var GeminiRequester = class extends ModelRequester {
|
|
|
374
346
|
stopSequences: params.stop,
|
|
375
347
|
temperature: params.temperature,
|
|
376
348
|
maxOutputTokens: params.model.includes("vision") ? void 0 : params.maxTokens,
|
|
377
|
-
topP: params.topP
|
|
349
|
+
topP: params.topP,
|
|
350
|
+
responseModalities: params.model.includes(
|
|
351
|
+
// TODO: Wait for google release to all models
|
|
352
|
+
"gemini-2.0-flash-exp"
|
|
353
|
+
) && this._pluginConfig.imageGeneration ? ["TEXT", "IMAGE"] : void 0
|
|
378
354
|
// thinkingConfig: { includeThoughts: true }
|
|
379
355
|
},
|
|
380
356
|
tools: params.tools != null || this._pluginConfig.googleSearch ? formatToolsToGeminiAITools(
|
|
@@ -408,8 +384,10 @@ var GeminiRequester = class extends ModelRequester {
|
|
|
408
384
|
}
|
|
409
385
|
for (const candidate of transformValue.candidates) {
|
|
410
386
|
const parts = candidate.content?.parts;
|
|
411
|
-
if (parts == null || parts.length < 1) {
|
|
387
|
+
if ((parts == null || parts.length < 1) && candidate.finishReason !== "STOP") {
|
|
412
388
|
throw new Error(chunk);
|
|
389
|
+
} else if (candidate.finishReason === "STOP" && parts == null) {
|
|
390
|
+
continue;
|
|
413
391
|
}
|
|
414
392
|
for (const part of parts) {
|
|
415
393
|
controller.enqueue(part);
|
|
@@ -434,14 +412,21 @@ var GeminiRequester = class extends ModelRequester {
|
|
|
434
412
|
for await (const chunk of iterable) {
|
|
435
413
|
const messagePart = partAsType(chunk);
|
|
436
414
|
const chatFunctionCallingPart = partAsType(chunk);
|
|
415
|
+
const imagePart = partAsTypeCheck(
|
|
416
|
+
chunk,
|
|
417
|
+
(part) => part["inlineData"] != null
|
|
418
|
+
);
|
|
437
419
|
if (messagePart.text) {
|
|
438
420
|
if (messagePart.thought) {
|
|
439
421
|
reasoningContent += messagePart.text;
|
|
440
422
|
continue;
|
|
441
423
|
}
|
|
442
424
|
content = messagePart.text;
|
|
425
|
+
} else if (imagePart) {
|
|
426
|
+
messagePart.text = ``;
|
|
427
|
+
content = messagePart.text;
|
|
443
428
|
}
|
|
444
|
-
const deltaFunctionCall = chatFunctionCallingPart
|
|
429
|
+
const deltaFunctionCall = chatFunctionCallingPart?.functionCall;
|
|
445
430
|
if (deltaFunctionCall) {
|
|
446
431
|
let args = deltaFunctionCall.args?.input ?? deltaFunctionCall.args;
|
|
447
432
|
try {
|
|
@@ -466,7 +451,10 @@ var GeminiRequester = class extends ModelRequester {
|
|
|
466
451
|
name: functionCall.name,
|
|
467
452
|
arguments: functionCall.args,
|
|
468
453
|
args: functionCall.arguments
|
|
469
|
-
} : void 0
|
|
454
|
+
} : void 0,
|
|
455
|
+
images: imagePart ? [
|
|
456
|
+
`data:${imagePart.inlineData.mimeType ?? "image/png"};base64,${imagePart.inlineData.data}`
|
|
457
|
+
] : void 0
|
|
470
458
|
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
|
471
459
|
};
|
|
472
460
|
messageChunk.content = content;
|
|
@@ -589,6 +577,7 @@ ${groundingContent}`
|
|
|
589
577
|
}
|
|
590
578
|
}
|
|
591
579
|
const body = JSON.stringify(data);
|
|
580
|
+
fs.writeFile("./request.json", body);
|
|
592
581
|
return this._plugin.fetch(requestUrl, {
|
|
593
582
|
body,
|
|
594
583
|
headers: this._buildHeaders(),
|
|
@@ -760,6 +749,7 @@ var Config3 = Schema.intersect([
|
|
|
760
749
|
maxTokens: Schema.number().min(16).max(2097e3).step(16).default(8064),
|
|
761
750
|
temperature: Schema.percent().min(0).max(2).step(0.1).default(0.8),
|
|
762
751
|
googleSearch: Schema.boolean().default(false),
|
|
752
|
+
imageGeneration: Schema.boolean().default(false),
|
|
763
753
|
groundingContentDisplay: Schema.boolean().default(false),
|
|
764
754
|
searchThreshold: Schema.number().min(0).max(1).step(0.1).default(0.5)
|
|
765
755
|
})
|
package/lib/types.d.ts
CHANGED
|
@@ -2,11 +2,17 @@ export interface ChatCompletionResponseMessage {
|
|
|
2
2
|
role: string;
|
|
3
3
|
parts?: ChatPart[];
|
|
4
4
|
}
|
|
5
|
-
export type ChatPart = ChatMessagePart |
|
|
5
|
+
export type ChatPart = ChatMessagePart | ChatInlineDataPart | ChatFunctionCallingPart | ChatFunctionResponsePart | ChatUploadDataPart;
|
|
6
6
|
export type ChatMessagePart = {
|
|
7
7
|
text: string;
|
|
8
8
|
thought?: boolean;
|
|
9
9
|
};
|
|
10
|
+
export type ChatInlineDataPart = {
|
|
11
|
+
inlineData: {
|
|
12
|
+
mimeType: string;
|
|
13
|
+
data?: string;
|
|
14
|
+
};
|
|
15
|
+
};
|
|
10
16
|
export type ChatUploadDataPart = {
|
|
11
17
|
inline_data: {
|
|
12
18
|
mime_type: string;
|
package/lib/utils.d.ts
CHANGED
|
@@ -4,6 +4,7 @@ import { ChatCompletionFunction, ChatCompletionResponseMessage, ChatCompletionRe
|
|
|
4
4
|
import { Config } from '.';
|
|
5
5
|
export declare function langchainMessageToGeminiMessage(messages: BaseMessage[], model?: string): Promise<ChatCompletionResponseMessage[]>;
|
|
6
6
|
export declare function partAsType<T extends ChatPart>(part: ChatPart): T;
|
|
7
|
+
export declare function partAsTypeCheck<T extends ChatPart>(part: ChatPart, check: (part: ChatPart & unknown) => boolean): T | undefined;
|
|
7
8
|
export declare function formatToolsToGeminiAITools(tools: StructuredTool[], config: Config, model: string): Record<string, any>;
|
|
8
9
|
export declare function formatToolToGeminiAITool(tool: StructuredTool): ChatCompletionFunction;
|
|
9
10
|
export declare function messageTypeToGeminiRole(type: MessageType): ChatCompletionResponseMessageRoleEnum;
|
package/package.json
CHANGED
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "koishi-plugin-chatluna-google-gemini-adapter",
|
|
3
3
|
"description": "google-gemini adapter for chatluna",
|
|
4
|
-
"version": "1.1.
|
|
4
|
+
"version": "1.1.3",
|
|
5
5
|
"main": "lib/index.cjs",
|
|
6
6
|
"module": "lib/index.mjs",
|
|
7
7
|
"typings": "lib/index.d.ts",
|
|
@@ -72,7 +72,7 @@
|
|
|
72
72
|
},
|
|
73
73
|
"peerDependencies": {
|
|
74
74
|
"koishi": "^4.18.4",
|
|
75
|
-
"koishi-plugin-chatluna": "^1.1.
|
|
75
|
+
"koishi-plugin-chatluna": "^1.1.3"
|
|
76
76
|
},
|
|
77
77
|
"koishi": {
|
|
78
78
|
"description": {
|