koishi-plugin-chatluna-google-gemini-adapter 1.2.0 → 1.2.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/lib/index.cjs +47 -36
- package/lib/index.d.ts +3 -0
- package/lib/index.mjs +47 -36
- package/lib/requester.d.ts +2 -1
- package/lib/types.d.ts +13 -0
- package/package.json +2 -2
package/lib/index.cjs
CHANGED
|
@@ -33,14 +33,14 @@ var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: tru
|
|
|
33
33
|
// src/locales/zh-CN.schema.yml
|
|
34
34
|
var require_zh_CN_schema = __commonJS({
|
|
35
35
|
"src/locales/zh-CN.schema.yml"(exports2, module2) {
|
|
36
|
-
module2.exports = { $inner: [{}, { $desc: "请求选项", platform: "适配器的平台名。(不懂请不要修改)", apiKeys: { $inner: ["Gemini 的 API Key", "Gemini API 的请求地址"], $desc: "Gemini 的 API Key 和请求地址列表。" } }, { $desc: "模型配置", maxTokens: "输入的最大上下文 Token(16~2097000,必须是 16 的倍数)。注意:仅当您使用的模型最大 Token 为 8000 及以上时,才建议设置超过 2000 token。", temperature: "回复的随机性程度,数值越高,回复越随机(范围:0~2)。", googleSearch: "为模型启用谷歌搜索。", thinkingBudget: "思考预算,范围:0~24576,设置的数值越大,思考时花费的 Token 越多。目前仅支持 gemini 2.5 系列模型。", groundingContentDisplay: "是否显示谷歌搜索结果。", imageGeneration: "为模型启用图像生成。目前仅支持 `gemini-2.0-flash-exp` 模型。", searchThreshold: "
|
|
36
|
+
module2.exports = { $inner: [{}, { $desc: "请求选项", platform: "适配器的平台名。(不懂请不要修改)", apiKeys: { $inner: ["Gemini 的 API Key", "Gemini API 的请求地址"], $desc: "Gemini 的 API Key 和请求地址列表。" } }, { $desc: "模型配置", maxTokens: "输入的最大上下文 Token(16~2097000,必须是 16 的倍数)。注意:仅当您使用的模型最大 Token 为 8000 及以上时,才建议设置超过 2000 token。", temperature: "回复的随机性程度,数值越高,回复越随机(范围:0~2)。", googleSearch: "为模型启用谷歌搜索。", thinkingBudget: "思考预算,范围:0~24576,设置的数值越大,思考时花费的 Token 越多。目前仅支持 gemini 2.5 系列模型。", groundingContentDisplay: "是否显示谷歌搜索结果。", imageGeneration: "为模型启用图像生成。目前仅支持 `gemini-2.0-flash-exp` 模型。", searchThreshold: "搜索的[置信度阈值](https://ai.google.dev/gemini-api/docs/grounding?lang=rest#dynamic-retrieval),范围:0~1,设置的数值越低,则越倾向于使用谷歌搜索。(仅支持 `gemini-1.5` 系列模型。gemini 2.0 模型起使用动态的工具调用)", includeThoughts: "是否获取模型的思考内容。", codeExecution: "为模型启用代码执行工具。", urlContent: "为模型启用 URL 内容获取工具。" }] };
|
|
37
37
|
}
|
|
38
38
|
});
|
|
39
39
|
|
|
40
40
|
// src/locales/en-US.schema.yml
|
|
41
41
|
var require_en_US_schema = __commonJS({
|
|
42
42
|
"src/locales/en-US.schema.yml"(exports2, module2) {
|
|
43
|
-
module2.exports = { $inner: [{}, { $desc: "API Configuration", platform: "Adapter platform name. (Do not modify if you do not understand)", apiKeys: { $inner: ["Gemini API Key", "Gemini API Endpoint (optional)"], $desc: "Gemini API access credentials" } }, { $desc: "Model Parameters", maxTokens: "Max output tokens (16-2097000, multiple of 16). >2000 for 8k+ models", temperature: "Sampling temperature (0-2). Higher: more random, Lower: more deterministic", googleSearch: "Enable Google search", thinkingBudget: "Thinking budget (0-24576). Higher: more tokens spent on thinking. Currently only supports `gemini-2.5` series models.", groundingContentDisplay: "Enable display of search results", imageGeneration: "Enable image generation (only for `gemini-2.0-flash-exp` model)", searchThreshold: "Search confidence [threshold](https://ai.google.dev/gemini-api/docs/grounding?lang=rest#dynamic-retrieval) (0-1). Lower: more likely to use Google search" }] };
|
|
43
|
+
module2.exports = { $inner: [{}, { $desc: "API Configuration", platform: "Adapter platform name. (Do not modify if you do not understand)", apiKeys: { $inner: ["Gemini API Key", "Gemini API Endpoint (optional)"], $desc: "Gemini API access credentials" } }, { $desc: "Model Parameters", maxTokens: "Max output tokens (16-2097000, multiple of 16). >2000 for 8k+ models", temperature: "Sampling temperature (0-2). Higher: more random, Lower: more deterministic", googleSearch: "Enable Google search", thinkingBudget: "Thinking budget (0-24576). Higher: more tokens spent on thinking. Currently only supports `gemini-2.5` series models.", groundingContentDisplay: "Enable display of search results", imageGeneration: "Enable image generation (only for `gemini-2.0-flash-exp` model)", searchThreshold: "Search confidence [threshold](https://ai.google.dev/gemini-api/docs/grounding?lang=rest#dynamic-retrieval) (0-1). Lower: more likely to use Google search", includeThoughts: "Enable retrieval of model thoughts", codeExecution: "Enable code execution tool", urlContent: "Enable URL content retrieval tool" }] };
|
|
44
44
|
}
|
|
45
45
|
});
|
|
46
46
|
|
|
@@ -235,21 +235,26 @@ function formatToolsToGeminiAITools(tools, config, model) {
|
|
|
235
235
|
"gemini-2.0-flash-exp-image-generation"
|
|
236
236
|
];
|
|
237
237
|
let googleSearch = config.googleSearch;
|
|
238
|
-
|
|
238
|
+
let codeExecution = config.codeExecution;
|
|
239
|
+
let urlContent = config.urlContent;
|
|
240
|
+
const useCustomTools = config.googleSearch || config.codeExecution || config.urlContent;
|
|
241
|
+
if (functions.length > 0 && !useCustomTools) {
|
|
239
242
|
result.push({
|
|
240
243
|
functionDeclarations: functions
|
|
241
244
|
});
|
|
242
|
-
} else if (functions.length > 0 &&
|
|
243
|
-
logger.warn("
|
|
245
|
+
} else if (functions.length > 0 && useCustomTools) {
|
|
246
|
+
logger.warn("Use custom tools instead of tool calls.");
|
|
244
247
|
} else if ((unsupportedModels.some(
|
|
245
248
|
(unsupportedModel) => model.includes(unsupportedModel)
|
|
246
249
|
) || imageGenerationModels.some(
|
|
247
250
|
(unsupportedModels2) => model.includes(unsupportedModels2)
|
|
248
|
-
) && config.imageGeneration) &&
|
|
251
|
+
) && config.imageGeneration) && useCustomTools) {
|
|
249
252
|
logger.warn(
|
|
250
253
|
`The model ${model} does not support google search. google search will be disable.`
|
|
251
254
|
);
|
|
252
255
|
googleSearch = false;
|
|
256
|
+
codeExecution = false;
|
|
257
|
+
urlContent = false;
|
|
253
258
|
}
|
|
254
259
|
if (googleSearch) {
|
|
255
260
|
if (model.includes("gemini-2")) {
|
|
@@ -267,6 +272,16 @@ function formatToolsToGeminiAITools(tools, config, model) {
|
|
|
267
272
|
});
|
|
268
273
|
}
|
|
269
274
|
}
|
|
275
|
+
if (codeExecution) {
|
|
276
|
+
result.push({
|
|
277
|
+
code_execution: {}
|
|
278
|
+
});
|
|
279
|
+
}
|
|
280
|
+
if (urlContent) {
|
|
281
|
+
result.push({
|
|
282
|
+
url_content: {}
|
|
283
|
+
});
|
|
284
|
+
}
|
|
270
285
|
return result;
|
|
271
286
|
}
|
|
272
287
|
__name(formatToolsToGeminiAITools, "formatToolsToGeminiAITools");
|
|
@@ -381,12 +396,12 @@ var GeminiRequester = class extends import_api.ModelRequester {
|
|
|
381
396
|
// TODO: Wait for google release to all models
|
|
382
397
|
"gemini-2.0-flash-exp"
|
|
383
398
|
) && this._pluginConfig.imageGeneration ? ["TEXT", "IMAGE"] : void 0,
|
|
384
|
-
thinkingConfig: enabledThinking != null ? {
|
|
385
|
-
thinkingBudget: enabledThinking ? this._pluginConfig.thinkingBudget ??
|
|
386
|
-
|
|
399
|
+
thinkingConfig: enabledThinking != null || this._pluginConfig.includeThoughts ? {
|
|
400
|
+
thinkingBudget: enabledThinking ? this._pluginConfig.thinkingBudget ?? -1 : 0,
|
|
401
|
+
includeThoughts: this._pluginConfig.includeThoughts
|
|
387
402
|
} : void 0
|
|
388
403
|
},
|
|
389
|
-
tools: params.tools != null || this._pluginConfig.googleSearch ? formatToolsToGeminiAITools(
|
|
404
|
+
tools: params.tools != null || this._pluginConfig.googleSearch || this._pluginConfig.codeExecution || this._pluginConfig.urlContent ? formatToolsToGeminiAITools(
|
|
390
405
|
params.tools ?? [],
|
|
391
406
|
this._pluginConfig,
|
|
392
407
|
params.model
|
|
@@ -589,9 +604,14 @@ ${groundingContent}`
|
|
|
589
604
|
"error when listing gemini models, Result:" + JSON.stringify(data)
|
|
590
605
|
);
|
|
591
606
|
}
|
|
592
|
-
return data.models.
|
|
593
|
-
(model) => model.includes("gemini") || model.includes("gemma") || model.includes("embedding")
|
|
594
|
-
)
|
|
607
|
+
return data.models.filter(
|
|
608
|
+
(model) => model.name.includes("gemini") || model.name.includes("gemma") || model.name.includes("embedding")
|
|
609
|
+
).map((model) => {
|
|
610
|
+
return {
|
|
611
|
+
...model,
|
|
612
|
+
name: model.name.replace("models/", "")
|
|
613
|
+
};
|
|
614
|
+
});
|
|
595
615
|
} catch (e) {
|
|
596
616
|
const error = new Error(
|
|
597
617
|
"error when listing gemini models, Result: " + JSON.stringify(data)
|
|
@@ -672,39 +692,27 @@ var GeminiClient = class extends import_client.PlatformModelAndEmbeddingsClient
|
|
|
672
692
|
}
|
|
673
693
|
async refreshModels() {
|
|
674
694
|
try {
|
|
675
|
-
|
|
695
|
+
const rawModels = await this._requester.getModels();
|
|
676
696
|
if (!rawModels.length) {
|
|
677
697
|
throw new import_error2.ChatLunaError(
|
|
678
698
|
import_error2.ChatLunaErrorCode.MODEL_INIT_ERROR,
|
|
679
699
|
new Error("No model found")
|
|
680
700
|
);
|
|
681
701
|
}
|
|
682
|
-
rawModels = rawModels.map((model) => model.replace("models/", ""));
|
|
683
702
|
const models = [];
|
|
684
703
|
for (const model of rawModels) {
|
|
685
704
|
const info = {
|
|
686
|
-
name: model,
|
|
687
|
-
maxTokens:
|
|
688
|
-
|
|
689
|
-
|
|
690
|
-
}
|
|
691
|
-
if (model2.includes("gemini-1.5-flash") || model2.includes("gemini-2.0-pro") || model2.includes("gemini-2.5-pro")) {
|
|
692
|
-
return 2097152;
|
|
693
|
-
}
|
|
694
|
-
if (model2.includes("gemini-1.0-pro")) {
|
|
695
|
-
return 30720;
|
|
696
|
-
}
|
|
697
|
-
return 1048576;
|
|
698
|
-
})(model),
|
|
699
|
-
type: model.includes("embedding") ? import_types.ModelType.embeddings : import_types.ModelType.llm,
|
|
700
|
-
functionCall: !model.includes("vision"),
|
|
705
|
+
name: model.name,
|
|
706
|
+
maxTokens: model.inputTokenLimit,
|
|
707
|
+
type: model.name.includes("embedding") ? import_types.ModelType.embeddings : import_types.ModelType.llm,
|
|
708
|
+
functionCall: !model.name.includes("vision"),
|
|
701
709
|
supportMode: ["all"]
|
|
702
710
|
};
|
|
703
|
-
if (model.includes("gemini-2.5")) {
|
|
704
|
-
if (!model.includes("-thinking")) {
|
|
711
|
+
if (model.name.includes("gemini-2.5")) {
|
|
712
|
+
if (!model.name.includes("-thinking")) {
|
|
705
713
|
models.push(
|
|
706
|
-
{ ...info, name: model + "-no-thinking" },
|
|
707
|
-
{ ...info, name: model + "-thinking" },
|
|
714
|
+
{ ...info, name: model.name + "-no-thinking" },
|
|
715
|
+
{ ...info, name: model.name + "-thinking" },
|
|
708
716
|
info
|
|
709
717
|
);
|
|
710
718
|
} else {
|
|
@@ -720,7 +728,7 @@ var GeminiClient = class extends import_client.PlatformModelAndEmbeddingsClient
|
|
|
720
728
|
}
|
|
721
729
|
}
|
|
722
730
|
async getModels() {
|
|
723
|
-
if (this._models) {
|
|
731
|
+
if (this._models && Object.keys(this._models).length > 0) {
|
|
724
732
|
return Object.values(this._models);
|
|
725
733
|
}
|
|
726
734
|
const models = await this.refreshModels();
|
|
@@ -801,7 +809,10 @@ var Config3 = import_koishi.Schema.intersect([
|
|
|
801
809
|
maxTokens: import_koishi.Schema.number().min(16).max(2097e3).step(16).default(8064),
|
|
802
810
|
temperature: import_koishi.Schema.percent().min(0).max(2).step(0.1).default(0.8),
|
|
803
811
|
googleSearch: import_koishi.Schema.boolean().default(false),
|
|
804
|
-
|
|
812
|
+
codeExecution: import_koishi.Schema.boolean().default(false),
|
|
813
|
+
urlContent: import_koishi.Schema.boolean().default(false),
|
|
814
|
+
thinkingBudget: import_koishi.Schema.number().min(-1).max(24576).step(16).default(-1),
|
|
815
|
+
includeThoughts: import_koishi.Schema.boolean().default(false),
|
|
805
816
|
imageGeneration: import_koishi.Schema.boolean().default(false),
|
|
806
817
|
groundingContentDisplay: import_koishi.Schema.boolean().default(false),
|
|
807
818
|
searchThreshold: import_koishi.Schema.number().min(0).max(1).step(0.1).default(0.5)
|
package/lib/index.d.ts
CHANGED
|
@@ -9,10 +9,13 @@ export interface Config extends ChatLunaPlugin.Config {
|
|
|
9
9
|
platform: string;
|
|
10
10
|
temperature: number;
|
|
11
11
|
googleSearch: boolean;
|
|
12
|
+
codeExecution: boolean;
|
|
13
|
+
urlContent: boolean;
|
|
12
14
|
searchThreshold: number;
|
|
13
15
|
groundingContentDisplay: boolean;
|
|
14
16
|
imageGeneration: boolean;
|
|
15
17
|
thinkingBudget: number;
|
|
18
|
+
includeThoughts: boolean;
|
|
16
19
|
}
|
|
17
20
|
export declare const Config: Schema<Config>;
|
|
18
21
|
export declare const inject: string[];
|
package/lib/index.mjs
CHANGED
|
@@ -8,14 +8,14 @@ var __commonJS = (cb, mod) => function __require() {
|
|
|
8
8
|
// src/locales/zh-CN.schema.yml
|
|
9
9
|
var require_zh_CN_schema = __commonJS({
|
|
10
10
|
"src/locales/zh-CN.schema.yml"(exports, module) {
|
|
11
|
-
module.exports = { $inner: [{}, { $desc: "请求选项", platform: "适配器的平台名。(不懂请不要修改)", apiKeys: { $inner: ["Gemini 的 API Key", "Gemini API 的请求地址"], $desc: "Gemini 的 API Key 和请求地址列表。" } }, { $desc: "模型配置", maxTokens: "输入的最大上下文 Token(16~2097000,必须是 16 的倍数)。注意:仅当您使用的模型最大 Token 为 8000 及以上时,才建议设置超过 2000 token。", temperature: "回复的随机性程度,数值越高,回复越随机(范围:0~2)。", googleSearch: "为模型启用谷歌搜索。", thinkingBudget: "思考预算,范围:0~24576,设置的数值越大,思考时花费的 Token 越多。目前仅支持 gemini 2.5 系列模型。", groundingContentDisplay: "是否显示谷歌搜索结果。", imageGeneration: "为模型启用图像生成。目前仅支持 `gemini-2.0-flash-exp` 模型。", searchThreshold: "
|
|
11
|
+
module.exports = { $inner: [{}, { $desc: "请求选项", platform: "适配器的平台名。(不懂请不要修改)", apiKeys: { $inner: ["Gemini 的 API Key", "Gemini API 的请求地址"], $desc: "Gemini 的 API Key 和请求地址列表。" } }, { $desc: "模型配置", maxTokens: "输入的最大上下文 Token(16~2097000,必须是 16 的倍数)。注意:仅当您使用的模型最大 Token 为 8000 及以上时,才建议设置超过 2000 token。", temperature: "回复的随机性程度,数值越高,回复越随机(范围:0~2)。", googleSearch: "为模型启用谷歌搜索。", thinkingBudget: "思考预算,范围:0~24576,设置的数值越大,思考时花费的 Token 越多。目前仅支持 gemini 2.5 系列模型。", groundingContentDisplay: "是否显示谷歌搜索结果。", imageGeneration: "为模型启用图像生成。目前仅支持 `gemini-2.0-flash-exp` 模型。", searchThreshold: "搜索的[置信度阈值](https://ai.google.dev/gemini-api/docs/grounding?lang=rest#dynamic-retrieval),范围:0~1,设置的数值越低,则越倾向于使用谷歌搜索。(仅支持 `gemini-1.5` 系列模型。gemini 2.0 模型起使用动态的工具调用)", includeThoughts: "是否获取模型的思考内容。", codeExecution: "为模型启用代码执行工具。", urlContent: "为模型启用 URL 内容获取工具。" }] };
|
|
12
12
|
}
|
|
13
13
|
});
|
|
14
14
|
|
|
15
15
|
// src/locales/en-US.schema.yml
|
|
16
16
|
var require_en_US_schema = __commonJS({
|
|
17
17
|
"src/locales/en-US.schema.yml"(exports, module) {
|
|
18
|
-
module.exports = { $inner: [{}, { $desc: "API Configuration", platform: "Adapter platform name. (Do not modify if you do not understand)", apiKeys: { $inner: ["Gemini API Key", "Gemini API Endpoint (optional)"], $desc: "Gemini API access credentials" } }, { $desc: "Model Parameters", maxTokens: "Max output tokens (16-2097000, multiple of 16). >2000 for 8k+ models", temperature: "Sampling temperature (0-2). Higher: more random, Lower: more deterministic", googleSearch: "Enable Google search", thinkingBudget: "Thinking budget (0-24576). Higher: more tokens spent on thinking. Currently only supports `gemini-2.5` series models.", groundingContentDisplay: "Enable display of search results", imageGeneration: "Enable image generation (only for `gemini-2.0-flash-exp` model)", searchThreshold: "Search confidence [threshold](https://ai.google.dev/gemini-api/docs/grounding?lang=rest#dynamic-retrieval) (0-1). Lower: more likely to use Google search" }] };
|
|
18
|
+
module.exports = { $inner: [{}, { $desc: "API Configuration", platform: "Adapter platform name. (Do not modify if you do not understand)", apiKeys: { $inner: ["Gemini API Key", "Gemini API Endpoint (optional)"], $desc: "Gemini API access credentials" } }, { $desc: "Model Parameters", maxTokens: "Max output tokens (16-2097000, multiple of 16). >2000 for 8k+ models", temperature: "Sampling temperature (0-2). Higher: more random, Lower: more deterministic", googleSearch: "Enable Google search", thinkingBudget: "Thinking budget (0-24576). Higher: more tokens spent on thinking. Currently only supports `gemini-2.5` series models.", groundingContentDisplay: "Enable display of search results", imageGeneration: "Enable image generation (only for `gemini-2.0-flash-exp` model)", searchThreshold: "Search confidence [threshold](https://ai.google.dev/gemini-api/docs/grounding?lang=rest#dynamic-retrieval) (0-1). Lower: more likely to use Google search", includeThoughts: "Enable retrieval of model thoughts", codeExecution: "Enable code execution tool", urlContent: "Enable URL content retrieval tool" }] };
|
|
19
19
|
}
|
|
20
20
|
});
|
|
21
21
|
|
|
@@ -218,21 +218,26 @@ function formatToolsToGeminiAITools(tools, config, model) {
|
|
|
218
218
|
"gemini-2.0-flash-exp-image-generation"
|
|
219
219
|
];
|
|
220
220
|
let googleSearch = config.googleSearch;
|
|
221
|
-
|
|
221
|
+
let codeExecution = config.codeExecution;
|
|
222
|
+
let urlContent = config.urlContent;
|
|
223
|
+
const useCustomTools = config.googleSearch || config.codeExecution || config.urlContent;
|
|
224
|
+
if (functions.length > 0 && !useCustomTools) {
|
|
222
225
|
result.push({
|
|
223
226
|
functionDeclarations: functions
|
|
224
227
|
});
|
|
225
|
-
} else if (functions.length > 0 &&
|
|
226
|
-
logger.warn("
|
|
228
|
+
} else if (functions.length > 0 && useCustomTools) {
|
|
229
|
+
logger.warn("Use custom tools instead of tool calls.");
|
|
227
230
|
} else if ((unsupportedModels.some(
|
|
228
231
|
(unsupportedModel) => model.includes(unsupportedModel)
|
|
229
232
|
) || imageGenerationModels.some(
|
|
230
233
|
(unsupportedModels2) => model.includes(unsupportedModels2)
|
|
231
|
-
) && config.imageGeneration) &&
|
|
234
|
+
) && config.imageGeneration) && useCustomTools) {
|
|
232
235
|
logger.warn(
|
|
233
236
|
`The model ${model} does not support google search. google search will be disable.`
|
|
234
237
|
);
|
|
235
238
|
googleSearch = false;
|
|
239
|
+
codeExecution = false;
|
|
240
|
+
urlContent = false;
|
|
236
241
|
}
|
|
237
242
|
if (googleSearch) {
|
|
238
243
|
if (model.includes("gemini-2")) {
|
|
@@ -250,6 +255,16 @@ function formatToolsToGeminiAITools(tools, config, model) {
|
|
|
250
255
|
});
|
|
251
256
|
}
|
|
252
257
|
}
|
|
258
|
+
if (codeExecution) {
|
|
259
|
+
result.push({
|
|
260
|
+
code_execution: {}
|
|
261
|
+
});
|
|
262
|
+
}
|
|
263
|
+
if (urlContent) {
|
|
264
|
+
result.push({
|
|
265
|
+
url_content: {}
|
|
266
|
+
});
|
|
267
|
+
}
|
|
253
268
|
return result;
|
|
254
269
|
}
|
|
255
270
|
__name(formatToolsToGeminiAITools, "formatToolsToGeminiAITools");
|
|
@@ -364,12 +379,12 @@ var GeminiRequester = class extends ModelRequester {
|
|
|
364
379
|
// TODO: Wait for google release to all models
|
|
365
380
|
"gemini-2.0-flash-exp"
|
|
366
381
|
) && this._pluginConfig.imageGeneration ? ["TEXT", "IMAGE"] : void 0,
|
|
367
|
-
thinkingConfig: enabledThinking != null ? {
|
|
368
|
-
thinkingBudget: enabledThinking ? this._pluginConfig.thinkingBudget ??
|
|
369
|
-
|
|
382
|
+
thinkingConfig: enabledThinking != null || this._pluginConfig.includeThoughts ? {
|
|
383
|
+
thinkingBudget: enabledThinking ? this._pluginConfig.thinkingBudget ?? -1 : 0,
|
|
384
|
+
includeThoughts: this._pluginConfig.includeThoughts
|
|
370
385
|
} : void 0
|
|
371
386
|
},
|
|
372
|
-
tools: params.tools != null || this._pluginConfig.googleSearch ? formatToolsToGeminiAITools(
|
|
387
|
+
tools: params.tools != null || this._pluginConfig.googleSearch || this._pluginConfig.codeExecution || this._pluginConfig.urlContent ? formatToolsToGeminiAITools(
|
|
373
388
|
params.tools ?? [],
|
|
374
389
|
this._pluginConfig,
|
|
375
390
|
params.model
|
|
@@ -572,9 +587,14 @@ ${groundingContent}`
|
|
|
572
587
|
"error when listing gemini models, Result:" + JSON.stringify(data)
|
|
573
588
|
);
|
|
574
589
|
}
|
|
575
|
-
return data.models.
|
|
576
|
-
(model) => model.includes("gemini") || model.includes("gemma") || model.includes("embedding")
|
|
577
|
-
)
|
|
590
|
+
return data.models.filter(
|
|
591
|
+
(model) => model.name.includes("gemini") || model.name.includes("gemma") || model.name.includes("embedding")
|
|
592
|
+
).map((model) => {
|
|
593
|
+
return {
|
|
594
|
+
...model,
|
|
595
|
+
name: model.name.replace("models/", "")
|
|
596
|
+
};
|
|
597
|
+
});
|
|
578
598
|
} catch (e) {
|
|
579
599
|
const error = new Error(
|
|
580
600
|
"error when listing gemini models, Result: " + JSON.stringify(data)
|
|
@@ -655,39 +675,27 @@ var GeminiClient = class extends PlatformModelAndEmbeddingsClient {
|
|
|
655
675
|
}
|
|
656
676
|
async refreshModels() {
|
|
657
677
|
try {
|
|
658
|
-
|
|
678
|
+
const rawModels = await this._requester.getModels();
|
|
659
679
|
if (!rawModels.length) {
|
|
660
680
|
throw new ChatLunaError2(
|
|
661
681
|
ChatLunaErrorCode2.MODEL_INIT_ERROR,
|
|
662
682
|
new Error("No model found")
|
|
663
683
|
);
|
|
664
684
|
}
|
|
665
|
-
rawModels = rawModels.map((model) => model.replace("models/", ""));
|
|
666
685
|
const models = [];
|
|
667
686
|
for (const model of rawModels) {
|
|
668
687
|
const info = {
|
|
669
|
-
name: model,
|
|
670
|
-
maxTokens:
|
|
671
|
-
|
|
672
|
-
|
|
673
|
-
}
|
|
674
|
-
if (model2.includes("gemini-1.5-flash") || model2.includes("gemini-2.0-pro") || model2.includes("gemini-2.5-pro")) {
|
|
675
|
-
return 2097152;
|
|
676
|
-
}
|
|
677
|
-
if (model2.includes("gemini-1.0-pro")) {
|
|
678
|
-
return 30720;
|
|
679
|
-
}
|
|
680
|
-
return 1048576;
|
|
681
|
-
})(model),
|
|
682
|
-
type: model.includes("embedding") ? ModelType.embeddings : ModelType.llm,
|
|
683
|
-
functionCall: !model.includes("vision"),
|
|
688
|
+
name: model.name,
|
|
689
|
+
maxTokens: model.inputTokenLimit,
|
|
690
|
+
type: model.name.includes("embedding") ? ModelType.embeddings : ModelType.llm,
|
|
691
|
+
functionCall: !model.name.includes("vision"),
|
|
684
692
|
supportMode: ["all"]
|
|
685
693
|
};
|
|
686
|
-
if (model.includes("gemini-2.5")) {
|
|
687
|
-
if (!model.includes("-thinking")) {
|
|
694
|
+
if (model.name.includes("gemini-2.5")) {
|
|
695
|
+
if (!model.name.includes("-thinking")) {
|
|
688
696
|
models.push(
|
|
689
|
-
{ ...info, name: model + "-no-thinking" },
|
|
690
|
-
{ ...info, name: model + "-thinking" },
|
|
697
|
+
{ ...info, name: model.name + "-no-thinking" },
|
|
698
|
+
{ ...info, name: model.name + "-thinking" },
|
|
691
699
|
info
|
|
692
700
|
);
|
|
693
701
|
} else {
|
|
@@ -703,7 +711,7 @@ var GeminiClient = class extends PlatformModelAndEmbeddingsClient {
|
|
|
703
711
|
}
|
|
704
712
|
}
|
|
705
713
|
async getModels() {
|
|
706
|
-
if (this._models) {
|
|
714
|
+
if (this._models && Object.keys(this._models).length > 0) {
|
|
707
715
|
return Object.values(this._models);
|
|
708
716
|
}
|
|
709
717
|
const models = await this.refreshModels();
|
|
@@ -784,7 +792,10 @@ var Config3 = Schema.intersect([
|
|
|
784
792
|
maxTokens: Schema.number().min(16).max(2097e3).step(16).default(8064),
|
|
785
793
|
temperature: Schema.percent().min(0).max(2).step(0.1).default(0.8),
|
|
786
794
|
googleSearch: Schema.boolean().default(false),
|
|
787
|
-
|
|
795
|
+
codeExecution: Schema.boolean().default(false),
|
|
796
|
+
urlContent: Schema.boolean().default(false),
|
|
797
|
+
thinkingBudget: Schema.number().min(-1).max(24576).step(16).default(-1),
|
|
798
|
+
includeThoughts: Schema.boolean().default(false),
|
|
788
799
|
imageGeneration: Schema.boolean().default(false),
|
|
789
800
|
groundingContentDisplay: Schema.boolean().default(false),
|
|
790
801
|
searchThreshold: Schema.number().min(0).max(1).step(0.1).default(0.5)
|
package/lib/requester.d.ts
CHANGED
|
@@ -2,6 +2,7 @@ import { ChatGenerationChunk } from '@langchain/core/outputs';
|
|
|
2
2
|
import { EmbeddingsRequester, EmbeddingsRequestParams, ModelRequester, ModelRequestParams } from 'koishi-plugin-chatluna/llm-core/platform/api';
|
|
3
3
|
import { ClientConfig } from 'koishi-plugin-chatluna/llm-core/platform/config';
|
|
4
4
|
import { Config } from '.';
|
|
5
|
+
import { GeminiModelInfo } from './types';
|
|
5
6
|
import { ChatLunaPlugin } from 'koishi-plugin-chatluna/services/chat';
|
|
6
7
|
export declare class GeminiRequester extends ModelRequester implements EmbeddingsRequester {
|
|
7
8
|
private _config;
|
|
@@ -10,7 +11,7 @@ export declare class GeminiRequester extends ModelRequester implements Embedding
|
|
|
10
11
|
constructor(_config: ClientConfig, _plugin: ChatLunaPlugin, _pluginConfig: Config);
|
|
11
12
|
completionStream(params: ModelRequestParams): AsyncGenerator<ChatGenerationChunk>;
|
|
12
13
|
embeddings(params: EmbeddingsRequestParams): Promise<number[] | number[][]>;
|
|
13
|
-
getModels(): Promise<
|
|
14
|
+
getModels(): Promise<GeminiModelInfo[]>;
|
|
14
15
|
private _post;
|
|
15
16
|
private _get;
|
|
16
17
|
private _concatUrl;
|
package/lib/types.d.ts
CHANGED
|
@@ -85,3 +85,16 @@ export interface CreateEmbeddingResponse {
|
|
|
85
85
|
}[];
|
|
86
86
|
}
|
|
87
87
|
export type ChatCompletionResponseMessageRoleEnum = 'system' | 'model' | 'user' | 'function';
|
|
88
|
+
export interface GeminiModelInfo {
|
|
89
|
+
name: string;
|
|
90
|
+
version: string;
|
|
91
|
+
displayName: string;
|
|
92
|
+
description: string;
|
|
93
|
+
inputTokenLimit: number;
|
|
94
|
+
outputTokenLimit: number;
|
|
95
|
+
supportedGenerationMethods: string[];
|
|
96
|
+
temperature: number;
|
|
97
|
+
topP: number;
|
|
98
|
+
topK: number;
|
|
99
|
+
maxTemperature: number;
|
|
100
|
+
}
|
package/package.json
CHANGED
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "koishi-plugin-chatluna-google-gemini-adapter",
|
|
3
3
|
"description": "google-gemini adapter for chatluna",
|
|
4
|
-
"version": "1.2.
|
|
4
|
+
"version": "1.2.2",
|
|
5
5
|
"main": "lib/index.cjs",
|
|
6
6
|
"module": "lib/index.mjs",
|
|
7
7
|
"typings": "lib/index.d.ts",
|
|
@@ -72,7 +72,7 @@
|
|
|
72
72
|
},
|
|
73
73
|
"peerDependencies": {
|
|
74
74
|
"koishi": "^4.18.7",
|
|
75
|
-
"koishi-plugin-chatluna": "^1.
|
|
75
|
+
"koishi-plugin-chatluna": "^1.3.0-alpha.2"
|
|
76
76
|
},
|
|
77
77
|
"koishi": {
|
|
78
78
|
"description": {
|