koishi-plugin-chatluna-google-gemini-adapter 1.1.0 → 1.1.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/lib/index.cjs +27 -48
- package/lib/index.d.ts +1 -0
- package/lib/index.mjs +27 -48
- package/lib/types.d.ts +7 -1
- package/lib/utils.d.ts +1 -0
- package/package.json +2 -2
package/lib/index.cjs
CHANGED
|
@@ -23,14 +23,14 @@ var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: tru
|
|
|
23
23
|
// src/locales/zh-CN.schema.yml
|
|
24
24
|
var require_zh_CN_schema = __commonJS({
|
|
25
25
|
"src/locales/zh-CN.schema.yml"(exports2, module2) {
|
|
26
|
-
module2.exports = { $inner: [{}, { $desc: "请求选项", apiKeys: { $inner: ["Gemini 的 API Key", "Gemini API 的请求地址"], $desc: "Gemini 的 API Key 和请求地址列表。" } }, { $desc: "模型配置", maxTokens: "输入的最大上下文 Token(16~2097000,必须是 16 的倍数)。注意:仅当您使用的模型最大 Token 为 8000 及以上时,才建议设置超过 2000 token。", temperature: "回复的随机性程度,数值越高,回复越随机(范围:0~2)。", googleSearch: "为模型启用谷歌搜索。", groundingContentDisplay: "是否显示谷歌搜索结果。", searchThreshold: "搜索的置信度阈值,范围:0~1,设置的数值越低,则越倾向于使用谷歌搜索。" }] };
|
|
26
|
+
module2.exports = { $inner: [{}, { $desc: "请求选项", apiKeys: { $inner: ["Gemini 的 API Key", "Gemini API 的请求地址"], $desc: "Gemini 的 API Key 和请求地址列表。" } }, { $desc: "模型配置", maxTokens: "输入的最大上下文 Token(16~2097000,必须是 16 的倍数)。注意:仅当您使用的模型最大 Token 为 8000 及以上时,才建议设置超过 2000 token。", temperature: "回复的随机性程度,数值越高,回复越随机(范围:0~2)。", googleSearch: "为模型启用谷歌搜索。", groundingContentDisplay: "是否显示谷歌搜索结果。", imageGeneration: "为模型启用图像生成。目前仅支持 `gemini-2.0-flash-exp` 模型。", searchThreshold: "搜索的置信度阈值,范围:0~1,设置的数值越低,则越倾向于使用谷歌搜索。" }] };
|
|
27
27
|
}
|
|
28
28
|
});
|
|
29
29
|
|
|
30
30
|
// src/locales/en-US.schema.yml
|
|
31
31
|
var require_en_US_schema = __commonJS({
|
|
32
32
|
"src/locales/en-US.schema.yml"(exports2, module2) {
|
|
33
|
-
module2.exports = { $inner: [{}, { $desc: "API Configuration", apiKeys: { $inner: ["Gemini API Key", "Gemini API Endpoint (optional)"], $desc: "Gemini API access credentials" } }, { $desc: "Model Parameters", maxTokens: "Max output tokens (16-2097000, multiple of 16). >2000 for 8k+ models", temperature: "Sampling temperature (0-2). Higher: more random, Lower: more deterministic", googleSearch: "Enable Google search", groundingContentDisplay: "Enable display of search results", searchThreshold: "Search confidence [threshold](https://ai.google.dev/gemini-api/docs/grounding?lang=rest#dynamic-retrieval) (0-1). Lower: more likely to use Google search" }] };
|
|
33
|
+
module2.exports = { $inner: [{}, { $desc: "API Configuration", apiKeys: { $inner: ["Gemini API Key", "Gemini API Endpoint (optional)"], $desc: "Gemini API access credentials" } }, { $desc: "Model Parameters", maxTokens: "Max output tokens (16-2097000, multiple of 16). >2000 for 8k+ models", temperature: "Sampling temperature (0-2). Higher: more random, Lower: more deterministic", googleSearch: "Enable Google search", groundingContentDisplay: "Enable display of search results", imageGeneration: "Enable image generation (only for `gemini-2.0-flash-exp` model)", searchThreshold: "Search confidence [threshold](https://ai.google.dev/gemini-api/docs/grounding?lang=rest#dynamic-retrieval) (0-1). Lower: more likely to use Google search" }] };
|
|
34
34
|
}
|
|
35
35
|
});
|
|
36
36
|
|
|
@@ -187,48 +187,6 @@ async function langchainMessageToGeminiMessage(messages, model) {
|
|
|
187
187
|
]
|
|
188
188
|
});
|
|
189
189
|
}
|
|
190
|
-
if (model.includes("vision")) {
|
|
191
|
-
const textBuffer = [];
|
|
192
|
-
const last = result.pop();
|
|
193
|
-
for (let i = 0; i < result.length; i++) {
|
|
194
|
-
const message = result[i];
|
|
195
|
-
const text = message.parts[0].text;
|
|
196
|
-
textBuffer.push(`${message.role}: ${text}`);
|
|
197
|
-
}
|
|
198
|
-
const lastParts = last.parts;
|
|
199
|
-
let lastImagesParts = lastParts.filter(
|
|
200
|
-
(part) => part.inline_data?.mime_type === "image/jpeg"
|
|
201
|
-
);
|
|
202
|
-
if (lastImagesParts.length < 1) {
|
|
203
|
-
for (let i = result.length - 1; i >= 0; i--) {
|
|
204
|
-
const message = result[i];
|
|
205
|
-
const images = message.parts.filter(
|
|
206
|
-
(part) => part.inline_data?.mime_type === "image/jpeg"
|
|
207
|
-
);
|
|
208
|
-
if (images.length > 0) {
|
|
209
|
-
lastImagesParts = images;
|
|
210
|
-
break;
|
|
211
|
-
}
|
|
212
|
-
}
|
|
213
|
-
}
|
|
214
|
-
;
|
|
215
|
-
lastParts.filter(
|
|
216
|
-
(part) => part.text !== void 0 && part.text !== null
|
|
217
|
-
).forEach((part) => {
|
|
218
|
-
textBuffer.push(`${last.role}: ${part.text}`);
|
|
219
|
-
});
|
|
220
|
-
return [
|
|
221
|
-
{
|
|
222
|
-
role: "user",
|
|
223
|
-
parts: [
|
|
224
|
-
{
|
|
225
|
-
text: textBuffer.join("\n")
|
|
226
|
-
},
|
|
227
|
-
...lastImagesParts
|
|
228
|
-
]
|
|
229
|
-
}
|
|
230
|
-
];
|
|
231
|
-
}
|
|
232
190
|
return result;
|
|
233
191
|
}
|
|
234
192
|
__name(langchainMessageToGeminiMessage, "langchainMessageToGeminiMessage");
|
|
@@ -236,6 +194,10 @@ function partAsType(part) {
|
|
|
236
194
|
return part;
|
|
237
195
|
}
|
|
238
196
|
__name(partAsType, "partAsType");
|
|
197
|
+
function partAsTypeCheck(part, check) {
|
|
198
|
+
return check(part) ? part : void 0;
|
|
199
|
+
}
|
|
200
|
+
__name(partAsTypeCheck, "partAsTypeCheck");
|
|
239
201
|
function formatToolsToGeminiAITools(tools, config, model) {
|
|
240
202
|
if (tools.length < 1 && !config.googleSearch) {
|
|
241
203
|
return void 0;
|
|
@@ -380,7 +342,11 @@ var GeminiRequester = class extends import_api.ModelRequester {
|
|
|
380
342
|
stopSequences: params.stop,
|
|
381
343
|
temperature: params.temperature,
|
|
382
344
|
maxOutputTokens: params.model.includes("vision") ? void 0 : params.maxTokens,
|
|
383
|
-
topP: params.topP
|
|
345
|
+
topP: params.topP,
|
|
346
|
+
responseModalities: params.model.includes(
|
|
347
|
+
// TODO: Wait for google release to all models
|
|
348
|
+
"gemini-2.0-flash-exp"
|
|
349
|
+
) && this._pluginConfig.imageGeneration ? ["TEXT", "IMAGE"] : void 0
|
|
384
350
|
// thinkingConfig: { includeThoughts: true }
|
|
385
351
|
},
|
|
386
352
|
tools: params.tools != null || this._pluginConfig.googleSearch ? formatToolsToGeminiAITools(
|
|
@@ -414,8 +380,10 @@ var GeminiRequester = class extends import_api.ModelRequester {
|
|
|
414
380
|
}
|
|
415
381
|
for (const candidate of transformValue.candidates) {
|
|
416
382
|
const parts = candidate.content?.parts;
|
|
417
|
-
if (parts == null || parts.length < 1) {
|
|
383
|
+
if ((parts == null || parts.length < 1) && candidate.finishReason !== "STOP") {
|
|
418
384
|
throw new Error(chunk);
|
|
385
|
+
} else if (candidate.finishReason === "STOP" && parts == null) {
|
|
386
|
+
continue;
|
|
419
387
|
}
|
|
420
388
|
for (const part of parts) {
|
|
421
389
|
controller.enqueue(part);
|
|
@@ -440,14 +408,21 @@ var GeminiRequester = class extends import_api.ModelRequester {
|
|
|
440
408
|
for await (const chunk of iterable) {
|
|
441
409
|
const messagePart = partAsType(chunk);
|
|
442
410
|
const chatFunctionCallingPart = partAsType(chunk);
|
|
411
|
+
const imagePart = partAsTypeCheck(
|
|
412
|
+
chunk,
|
|
413
|
+
(part) => part["inlineData"] != null
|
|
414
|
+
);
|
|
443
415
|
if (messagePart.text) {
|
|
444
416
|
if (messagePart.thought) {
|
|
445
417
|
reasoningContent += messagePart.text;
|
|
446
418
|
continue;
|
|
447
419
|
}
|
|
448
420
|
content = messagePart.text;
|
|
421
|
+
} else if (imagePart) {
|
|
422
|
+
messagePart.text = ``;
|
|
423
|
+
content = messagePart.text;
|
|
449
424
|
}
|
|
450
|
-
const deltaFunctionCall = chatFunctionCallingPart
|
|
425
|
+
const deltaFunctionCall = chatFunctionCallingPart?.functionCall;
|
|
451
426
|
if (deltaFunctionCall) {
|
|
452
427
|
let args = deltaFunctionCall.args?.input ?? deltaFunctionCall.args;
|
|
453
428
|
try {
|
|
@@ -472,7 +447,10 @@ var GeminiRequester = class extends import_api.ModelRequester {
|
|
|
472
447
|
name: functionCall.name,
|
|
473
448
|
arguments: functionCall.args,
|
|
474
449
|
args: functionCall.arguments
|
|
475
|
-
} : void 0
|
|
450
|
+
} : void 0,
|
|
451
|
+
images: imagePart ? [
|
|
452
|
+
`data:${imagePart.inlineData.mime_type};base64,${imagePart.inlineData.data})`
|
|
453
|
+
] : void 0
|
|
476
454
|
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
|
477
455
|
};
|
|
478
456
|
messageChunk.content = content;
|
|
@@ -766,6 +744,7 @@ var Config3 = import_koishi.Schema.intersect([
|
|
|
766
744
|
maxTokens: import_koishi.Schema.number().min(16).max(2097e3).step(16).default(8064),
|
|
767
745
|
temperature: import_koishi.Schema.percent().min(0).max(2).step(0.1).default(0.8),
|
|
768
746
|
googleSearch: import_koishi.Schema.boolean().default(false),
|
|
747
|
+
imageGeneration: import_koishi.Schema.boolean().default(false),
|
|
769
748
|
groundingContentDisplay: import_koishi.Schema.boolean().default(false),
|
|
770
749
|
searchThreshold: import_koishi.Schema.number().min(0).max(1).step(0.1).default(0.5)
|
|
771
750
|
})
|
package/lib/index.d.ts
CHANGED
|
@@ -9,6 +9,7 @@ export interface Config extends ChatLunaPlugin.Config {
|
|
|
9
9
|
googleSearch: boolean;
|
|
10
10
|
searchThreshold: number;
|
|
11
11
|
groundingContentDisplay: boolean;
|
|
12
|
+
imageGeneration: boolean;
|
|
12
13
|
}
|
|
13
14
|
export declare const Config: Schema<Config>;
|
|
14
15
|
export declare const inject: string[];
|
package/lib/index.mjs
CHANGED
|
@@ -8,14 +8,14 @@ var __commonJS = (cb, mod) => function __require() {
|
|
|
8
8
|
// src/locales/zh-CN.schema.yml
|
|
9
9
|
var require_zh_CN_schema = __commonJS({
|
|
10
10
|
"src/locales/zh-CN.schema.yml"(exports, module) {
|
|
11
|
-
module.exports = { $inner: [{}, { $desc: "请求选项", apiKeys: { $inner: ["Gemini 的 API Key", "Gemini API 的请求地址"], $desc: "Gemini 的 API Key 和请求地址列表。" } }, { $desc: "模型配置", maxTokens: "输入的最大上下文 Token(16~2097000,必须是 16 的倍数)。注意:仅当您使用的模型最大 Token 为 8000 及以上时,才建议设置超过 2000 token。", temperature: "回复的随机性程度,数值越高,回复越随机(范围:0~2)。", googleSearch: "为模型启用谷歌搜索。", groundingContentDisplay: "是否显示谷歌搜索结果。", searchThreshold: "搜索的置信度阈值,范围:0~1,设置的数值越低,则越倾向于使用谷歌搜索。" }] };
|
|
11
|
+
module.exports = { $inner: [{}, { $desc: "请求选项", apiKeys: { $inner: ["Gemini 的 API Key", "Gemini API 的请求地址"], $desc: "Gemini 的 API Key 和请求地址列表。" } }, { $desc: "模型配置", maxTokens: "输入的最大上下文 Token(16~2097000,必须是 16 的倍数)。注意:仅当您使用的模型最大 Token 为 8000 及以上时,才建议设置超过 2000 token。", temperature: "回复的随机性程度,数值越高,回复越随机(范围:0~2)。", googleSearch: "为模型启用谷歌搜索。", groundingContentDisplay: "是否显示谷歌搜索结果。", imageGeneration: "为模型启用图像生成。目前仅支持 `gemini-2.0-flash-exp` 模型。", searchThreshold: "搜索的置信度阈值,范围:0~1,设置的数值越低,则越倾向于使用谷歌搜索。" }] };
|
|
12
12
|
}
|
|
13
13
|
});
|
|
14
14
|
|
|
15
15
|
// src/locales/en-US.schema.yml
|
|
16
16
|
var require_en_US_schema = __commonJS({
|
|
17
17
|
"src/locales/en-US.schema.yml"(exports, module) {
|
|
18
|
-
module.exports = { $inner: [{}, { $desc: "API Configuration", apiKeys: { $inner: ["Gemini API Key", "Gemini API Endpoint (optional)"], $desc: "Gemini API access credentials" } }, { $desc: "Model Parameters", maxTokens: "Max output tokens (16-2097000, multiple of 16). >2000 for 8k+ models", temperature: "Sampling temperature (0-2). Higher: more random, Lower: more deterministic", googleSearch: "Enable Google search", groundingContentDisplay: "Enable display of search results", searchThreshold: "Search confidence [threshold](https://ai.google.dev/gemini-api/docs/grounding?lang=rest#dynamic-retrieval) (0-1). Lower: more likely to use Google search" }] };
|
|
18
|
+
module.exports = { $inner: [{}, { $desc: "API Configuration", apiKeys: { $inner: ["Gemini API Key", "Gemini API Endpoint (optional)"], $desc: "Gemini API access credentials" } }, { $desc: "Model Parameters", maxTokens: "Max output tokens (16-2097000, multiple of 16). >2000 for 8k+ models", temperature: "Sampling temperature (0-2). Higher: more random, Lower: more deterministic", googleSearch: "Enable Google search", groundingContentDisplay: "Enable display of search results", imageGeneration: "Enable image generation (only for `gemini-2.0-flash-exp` model)", searchThreshold: "Search confidence [threshold](https://ai.google.dev/gemini-api/docs/grounding?lang=rest#dynamic-retrieval) (0-1). Lower: more likely to use Google search" }] };
|
|
19
19
|
}
|
|
20
20
|
});
|
|
21
21
|
|
|
@@ -181,48 +181,6 @@ async function langchainMessageToGeminiMessage(messages, model) {
|
|
|
181
181
|
]
|
|
182
182
|
});
|
|
183
183
|
}
|
|
184
|
-
if (model.includes("vision")) {
|
|
185
|
-
const textBuffer = [];
|
|
186
|
-
const last = result.pop();
|
|
187
|
-
for (let i = 0; i < result.length; i++) {
|
|
188
|
-
const message = result[i];
|
|
189
|
-
const text = message.parts[0].text;
|
|
190
|
-
textBuffer.push(`${message.role}: ${text}`);
|
|
191
|
-
}
|
|
192
|
-
const lastParts = last.parts;
|
|
193
|
-
let lastImagesParts = lastParts.filter(
|
|
194
|
-
(part) => part.inline_data?.mime_type === "image/jpeg"
|
|
195
|
-
);
|
|
196
|
-
if (lastImagesParts.length < 1) {
|
|
197
|
-
for (let i = result.length - 1; i >= 0; i--) {
|
|
198
|
-
const message = result[i];
|
|
199
|
-
const images = message.parts.filter(
|
|
200
|
-
(part) => part.inline_data?.mime_type === "image/jpeg"
|
|
201
|
-
);
|
|
202
|
-
if (images.length > 0) {
|
|
203
|
-
lastImagesParts = images;
|
|
204
|
-
break;
|
|
205
|
-
}
|
|
206
|
-
}
|
|
207
|
-
}
|
|
208
|
-
;
|
|
209
|
-
lastParts.filter(
|
|
210
|
-
(part) => part.text !== void 0 && part.text !== null
|
|
211
|
-
).forEach((part) => {
|
|
212
|
-
textBuffer.push(`${last.role}: ${part.text}`);
|
|
213
|
-
});
|
|
214
|
-
return [
|
|
215
|
-
{
|
|
216
|
-
role: "user",
|
|
217
|
-
parts: [
|
|
218
|
-
{
|
|
219
|
-
text: textBuffer.join("\n")
|
|
220
|
-
},
|
|
221
|
-
...lastImagesParts
|
|
222
|
-
]
|
|
223
|
-
}
|
|
224
|
-
];
|
|
225
|
-
}
|
|
226
184
|
return result;
|
|
227
185
|
}
|
|
228
186
|
__name(langchainMessageToGeminiMessage, "langchainMessageToGeminiMessage");
|
|
@@ -230,6 +188,10 @@ function partAsType(part) {
|
|
|
230
188
|
return part;
|
|
231
189
|
}
|
|
232
190
|
__name(partAsType, "partAsType");
|
|
191
|
+
function partAsTypeCheck(part, check) {
|
|
192
|
+
return check(part) ? part : void 0;
|
|
193
|
+
}
|
|
194
|
+
__name(partAsTypeCheck, "partAsTypeCheck");
|
|
233
195
|
function formatToolsToGeminiAITools(tools, config, model) {
|
|
234
196
|
if (tools.length < 1 && !config.googleSearch) {
|
|
235
197
|
return void 0;
|
|
@@ -374,7 +336,11 @@ var GeminiRequester = class extends ModelRequester {
|
|
|
374
336
|
stopSequences: params.stop,
|
|
375
337
|
temperature: params.temperature,
|
|
376
338
|
maxOutputTokens: params.model.includes("vision") ? void 0 : params.maxTokens,
|
|
377
|
-
topP: params.topP
|
|
339
|
+
topP: params.topP,
|
|
340
|
+
responseModalities: params.model.includes(
|
|
341
|
+
// TODO: Wait for google release to all models
|
|
342
|
+
"gemini-2.0-flash-exp"
|
|
343
|
+
) && this._pluginConfig.imageGeneration ? ["TEXT", "IMAGE"] : void 0
|
|
378
344
|
// thinkingConfig: { includeThoughts: true }
|
|
379
345
|
},
|
|
380
346
|
tools: params.tools != null || this._pluginConfig.googleSearch ? formatToolsToGeminiAITools(
|
|
@@ -408,8 +374,10 @@ var GeminiRequester = class extends ModelRequester {
|
|
|
408
374
|
}
|
|
409
375
|
for (const candidate of transformValue.candidates) {
|
|
410
376
|
const parts = candidate.content?.parts;
|
|
411
|
-
if (parts == null || parts.length < 1) {
|
|
377
|
+
if ((parts == null || parts.length < 1) && candidate.finishReason !== "STOP") {
|
|
412
378
|
throw new Error(chunk);
|
|
379
|
+
} else if (candidate.finishReason === "STOP" && parts == null) {
|
|
380
|
+
continue;
|
|
413
381
|
}
|
|
414
382
|
for (const part of parts) {
|
|
415
383
|
controller.enqueue(part);
|
|
@@ -434,14 +402,21 @@ var GeminiRequester = class extends ModelRequester {
|
|
|
434
402
|
for await (const chunk of iterable) {
|
|
435
403
|
const messagePart = partAsType(chunk);
|
|
436
404
|
const chatFunctionCallingPart = partAsType(chunk);
|
|
405
|
+
const imagePart = partAsTypeCheck(
|
|
406
|
+
chunk,
|
|
407
|
+
(part) => part["inlineData"] != null
|
|
408
|
+
);
|
|
437
409
|
if (messagePart.text) {
|
|
438
410
|
if (messagePart.thought) {
|
|
439
411
|
reasoningContent += messagePart.text;
|
|
440
412
|
continue;
|
|
441
413
|
}
|
|
442
414
|
content = messagePart.text;
|
|
415
|
+
} else if (imagePart) {
|
|
416
|
+
messagePart.text = ``;
|
|
417
|
+
content = messagePart.text;
|
|
443
418
|
}
|
|
444
|
-
const deltaFunctionCall = chatFunctionCallingPart
|
|
419
|
+
const deltaFunctionCall = chatFunctionCallingPart?.functionCall;
|
|
445
420
|
if (deltaFunctionCall) {
|
|
446
421
|
let args = deltaFunctionCall.args?.input ?? deltaFunctionCall.args;
|
|
447
422
|
try {
|
|
@@ -466,7 +441,10 @@ var GeminiRequester = class extends ModelRequester {
|
|
|
466
441
|
name: functionCall.name,
|
|
467
442
|
arguments: functionCall.args,
|
|
468
443
|
args: functionCall.arguments
|
|
469
|
-
} : void 0
|
|
444
|
+
} : void 0,
|
|
445
|
+
images: imagePart ? [
|
|
446
|
+
`data:${imagePart.inlineData.mime_type};base64,${imagePart.inlineData.data})`
|
|
447
|
+
] : void 0
|
|
470
448
|
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
|
471
449
|
};
|
|
472
450
|
messageChunk.content = content;
|
|
@@ -760,6 +738,7 @@ var Config3 = Schema.intersect([
|
|
|
760
738
|
maxTokens: Schema.number().min(16).max(2097e3).step(16).default(8064),
|
|
761
739
|
temperature: Schema.percent().min(0).max(2).step(0.1).default(0.8),
|
|
762
740
|
googleSearch: Schema.boolean().default(false),
|
|
741
|
+
imageGeneration: Schema.boolean().default(false),
|
|
763
742
|
groundingContentDisplay: Schema.boolean().default(false),
|
|
764
743
|
searchThreshold: Schema.number().min(0).max(1).step(0.1).default(0.5)
|
|
765
744
|
})
|
package/lib/types.d.ts
CHANGED
|
@@ -2,11 +2,17 @@ export interface ChatCompletionResponseMessage {
|
|
|
2
2
|
role: string;
|
|
3
3
|
parts?: ChatPart[];
|
|
4
4
|
}
|
|
5
|
-
export type ChatPart = ChatMessagePart |
|
|
5
|
+
export type ChatPart = ChatMessagePart | ChatInlineDataPart | ChatFunctionCallingPart | ChatFunctionResponsePart | ChatUploadDataPart;
|
|
6
6
|
export type ChatMessagePart = {
|
|
7
7
|
text: string;
|
|
8
8
|
thought?: boolean;
|
|
9
9
|
};
|
|
10
|
+
export type ChatInlineDataPart = {
|
|
11
|
+
inlineData: {
|
|
12
|
+
mime_type: string;
|
|
13
|
+
data?: string;
|
|
14
|
+
};
|
|
15
|
+
};
|
|
10
16
|
export type ChatUploadDataPart = {
|
|
11
17
|
inline_data: {
|
|
12
18
|
mime_type: string;
|
package/lib/utils.d.ts
CHANGED
|
@@ -4,6 +4,7 @@ import { ChatCompletionFunction, ChatCompletionResponseMessage, ChatCompletionRe
|
|
|
4
4
|
import { Config } from '.';
|
|
5
5
|
export declare function langchainMessageToGeminiMessage(messages: BaseMessage[], model?: string): Promise<ChatCompletionResponseMessage[]>;
|
|
6
6
|
export declare function partAsType<T extends ChatPart>(part: ChatPart): T;
|
|
7
|
+
export declare function partAsTypeCheck<T extends ChatPart>(part: ChatPart, check: (part: ChatPart & unknown) => boolean): T | undefined;
|
|
7
8
|
export declare function formatToolsToGeminiAITools(tools: StructuredTool[], config: Config, model: string): Record<string, any>;
|
|
8
9
|
export declare function formatToolToGeminiAITool(tool: StructuredTool): ChatCompletionFunction;
|
|
9
10
|
export declare function messageTypeToGeminiRole(type: MessageType): ChatCompletionResponseMessageRoleEnum;
|
package/package.json
CHANGED
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "koishi-plugin-chatluna-google-gemini-adapter",
|
|
3
3
|
"description": "google-gemini adapter for chatluna",
|
|
4
|
-
"version": "1.1.
|
|
4
|
+
"version": "1.1.2",
|
|
5
5
|
"main": "lib/index.cjs",
|
|
6
6
|
"module": "lib/index.mjs",
|
|
7
7
|
"typings": "lib/index.d.ts",
|
|
@@ -72,7 +72,7 @@
|
|
|
72
72
|
},
|
|
73
73
|
"peerDependencies": {
|
|
74
74
|
"koishi": "^4.18.4",
|
|
75
|
-
"koishi-plugin-chatluna": "^1.1.
|
|
75
|
+
"koishi-plugin-chatluna": "^1.1.2"
|
|
76
76
|
},
|
|
77
77
|
"koishi": {
|
|
78
78
|
"description": {
|