koishi-plugin-chatluna-google-gemini-adapter 1.3.4 → 1.3.6
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/lib/index.cjs +75 -45
- package/lib/index.d.ts +1 -0
- package/lib/index.mjs +55 -25
- package/lib/utils.d.ts +14 -9
- package/package.json +1 -1
package/lib/index.cjs
CHANGED
|
@@ -23,14 +23,14 @@ var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: tru
|
|
|
23
23
|
// src/locales/zh-CN.schema.yml
|
|
24
24
|
var require_zh_CN_schema = __commonJS({
|
|
25
25
|
"src/locales/zh-CN.schema.yml"(exports2, module2) {
|
|
26
|
-
module2.exports = { $inner: [{}, { $desc: "请求选项", platform: "适配器的平台名。(不懂请不要修改)", apiKeys: { $inner: ["Gemini API Key", "Gemini API 请求地址", "是否启用此配置"], $desc: "Gemini 的 API Key 和请求地址列表。" } }, { $desc: "模型配置", maxContextRatio: "最大上下文使用比例(0~1),控制可用的模型上下文窗口大小的最大百分比。例如 0.35 表示最多使用模型上下文的 35%。", temperature: "回复的随机性程度,数值越高,回复越随机(范围:0~2)。", googleSearch: "为模型启用谷歌搜索。", thinkingBudget: "思考预算,范围:(-1~24576),设置的数值越大,思考时花费的 Token 越多,-1 为动态思考。目前仅支持 gemini 2.5 系列模型。", groundingContentDisplay: "是否显示谷歌搜索结果。", imageGeneration: "为模型启用图像生成。目前仅支持 `gemini-2.0-flash-exp` 和 `gemini-2.5-flash-image-preview` 模型。", searchThreshold: "搜索的[置信度阈值](https://ai.google.dev/gemini-api/docs/grounding?lang=rest#dynamic-retrieval),范围:0~1,设置的数值越低,则越倾向于使用谷歌搜索。(仅支持 `gemini-1.5` 系列模型。gemini 2.0 模型起使用动态的工具调用)", includeThoughts: "是否获取模型的思考内容。", codeExecution: "为模型启用代码执行工具。", urlContext: "为模型启用 URL 内容获取工具。", nonStreaming: "强制不启用流式返回。开启后,将总是以非流式发起请求,即便配置了 stream 参数。" }] };
|
|
26
|
+
module2.exports = { $inner: [{}, { $desc: "请求选项", platform: "适配器的平台名。(不懂请不要修改)", apiKeys: { $inner: ["Gemini API Key", "Gemini API 请求地址", "是否启用此配置"], $desc: "Gemini 的 API Key 和请求地址列表。" } }, { $desc: "模型配置", maxContextRatio: "最大上下文使用比例(0~1),控制可用的模型上下文窗口大小的最大百分比。例如 0.35 表示最多使用模型上下文的 35%。", temperature: "回复的随机性程度,数值越高,回复越随机(范围:0~2)。", googleSearch: "为模型启用谷歌搜索。", thinkingBudget: "思考预算,范围:(-1~24576),设置的数值越大,思考时花费的 Token 越多,-1 为动态思考。目前仅支持 gemini 2.5 系列模型。", groundingContentDisplay: "是否显示谷歌搜索结果。", imageGeneration: "为模型启用图像生成。目前仅支持 `gemini-2.0-flash-exp` 和 `gemini-2.5-flash-image-preview` 模型。", searchThreshold: "搜索的[置信度阈值](https://ai.google.dev/gemini-api/docs/grounding?lang=rest#dynamic-retrieval),范围:0~1,设置的数值越低,则越倾向于使用谷歌搜索。(仅支持 `gemini-1.5` 系列模型。gemini 2.0 模型起使用动态的工具调用)", includeThoughts: "是否获取模型的思考内容。", codeExecution: "为模型启用代码执行工具。", urlContext: "为模型启用 URL 内容获取工具。", nonStreaming: "强制不启用流式返回。开启后,将总是以非流式发起请求,即便配置了 stream 参数。", useCamelCaseSystemInstruction: "使用大写的 systemInstruction 而不是小写的 system_instruction" }] };
|
|
27
27
|
}
|
|
28
28
|
});
|
|
29
29
|
|
|
30
30
|
// src/locales/en-US.schema.yml
|
|
31
31
|
var require_en_US_schema = __commonJS({
|
|
32
32
|
"src/locales/en-US.schema.yml"(exports2, module2) {
|
|
33
|
-
module2.exports = { $inner: [{}, { $desc: "API Configuration", platform: "Adapter platform name. (Do not modify if you do not understand)", apiKeys: { $inner: ["Gemini API Key", "Gemini API Endpoint (optional)", "Enabled"], $desc: "Gemini API access credentials" } }, { $desc: "Model Parameters", maxContextRatio: "Maximum context usage ratio (0-1). Controls the maximum percentage of model context window available for use. For example, 0.35 means at most 35% of the model context can be used.", temperature: "Sampling temperature (0-2). Higher: more random, Lower: more deterministic", googleSearch: "Enable Google search", thinkingBudget: "Thinking budget (-1-24576). (0: dynamic thinking) Higher: more tokens spent on thinking. Currently only supports `gemini-2.5` series models.", groundingContentDisplay: "Enable display of search results", imageGeneration: "Enable image generation (only for `gemini-2.0-flash-exp` and `gemini-2.5-flash-image-preview` model)", searchThreshold: "Search confidence [threshold](https://ai.google.dev/gemini-api/docs/grounding?lang=rest#dynamic-retrieval) (0-1). Lower: more likely to use Google search", includeThoughts: "Enable retrieval of model thoughts", codeExecution: "Enable code execution tool", urlContext: "Enable URL context retrieval tool", nonStreaming: "Force disable streaming response. When enabled, requests will always be made in non-streaming mode, even if the stream parameter is configured." }] };
|
|
33
|
+
module2.exports = { $inner: [{}, { $desc: "API Configuration", platform: "Adapter platform name. (Do not modify if you do not understand)", apiKeys: { $inner: ["Gemini API Key", "Gemini API Endpoint (optional)", "Enabled"], $desc: "Gemini API access credentials" } }, { $desc: "Model Parameters", maxContextRatio: "Maximum context usage ratio (0-1). Controls the maximum percentage of model context window available for use. For example, 0.35 means at most 35% of the model context can be used.", temperature: "Sampling temperature (0-2). Higher: more random, Lower: more deterministic", googleSearch: "Enable Google search", thinkingBudget: "Thinking budget (-1-24576). (0: dynamic thinking) Higher: more tokens spent on thinking. Currently only supports `gemini-2.5` series models.", groundingContentDisplay: "Enable display of search results", imageGeneration: "Enable image generation (only for `gemini-2.0-flash-exp` and `gemini-2.5-flash-image-preview` model)", searchThreshold: "Search confidence [threshold](https://ai.google.dev/gemini-api/docs/grounding?lang=rest#dynamic-retrieval) (0-1). Lower: more likely to use Google search", includeThoughts: "Enable retrieval of model thoughts", codeExecution: "Enable code execution tool", urlContext: "Enable URL context retrieval tool", nonStreaming: "Force disable streaming response. When enabled, requests will always be made in non-streaming mode, even if the stream parameter is configured.", useCamelCaseSystemInstruction: "Use camelCase systemInstruction instead of snake_case system_instruction" }] };
|
|
34
34
|
}
|
|
35
35
|
});
|
|
36
36
|
|
|
@@ -46,7 +46,7 @@ __export(index_exports, {
|
|
|
46
46
|
});
|
|
47
47
|
module.exports = __toCommonJS(index_exports);
|
|
48
48
|
var import_chat = require("koishi-plugin-chatluna/services/chat");
|
|
49
|
-
var
|
|
49
|
+
var import_koishi2 = require("koishi");
|
|
50
50
|
|
|
51
51
|
// src/client.ts
|
|
52
52
|
var import_client = require("koishi-plugin-chatluna/llm-core/platform/client");
|
|
@@ -67,6 +67,7 @@ var import_v1_shared_adapter = require("@chatluna/v1-shared-adapter");
|
|
|
67
67
|
var import_string = require("koishi-plugin-chatluna/utils/string");
|
|
68
68
|
var import_types = require("@langchain/core/utils/types");
|
|
69
69
|
var import_zod_openapi = require("@anatine/zod-openapi");
|
|
70
|
+
var import_koishi = require("koishi");
|
|
70
71
|
async function langchainMessageToGeminiMessage(messages, plugin, model) {
|
|
71
72
|
return Promise.all(
|
|
72
73
|
messages.map(async (message) => {
|
|
@@ -264,11 +265,7 @@ function formatToolsToGeminiAITools(tools, config, model) {
|
|
|
264
265
|
urlContext = false;
|
|
265
266
|
}
|
|
266
267
|
if (googleSearch) {
|
|
267
|
-
if (model.includes("gemini-
|
|
268
|
-
result.push({
|
|
269
|
-
google_search: {}
|
|
270
|
-
});
|
|
271
|
-
} else {
|
|
268
|
+
if (model.includes("gemini-1")) {
|
|
272
269
|
result.push({
|
|
273
270
|
google_search_retrieval: {
|
|
274
271
|
dynamic_retrieval_config: {
|
|
@@ -277,6 +274,10 @@ function formatToolsToGeminiAITools(tools, config, model) {
|
|
|
277
274
|
}
|
|
278
275
|
}
|
|
279
276
|
});
|
|
277
|
+
} else {
|
|
278
|
+
result.push({
|
|
279
|
+
google_search: {}
|
|
280
|
+
});
|
|
280
281
|
}
|
|
281
282
|
}
|
|
282
283
|
if (codeExecution) {
|
|
@@ -322,6 +323,7 @@ __name(messageTypeToGeminiRole, "messageTypeToGeminiRole");
|
|
|
322
323
|
function prepareModelConfig(params, pluginConfig) {
|
|
323
324
|
let model = params.model;
|
|
324
325
|
let enabledThinking = null;
|
|
326
|
+
let thinkingLevel = "THINKING_LEVEL_UNSPECIFIED";
|
|
325
327
|
if (model.includes("-thinking") && model.includes("gemini-2.5")) {
|
|
326
328
|
enabledThinking = !model.includes("-non-thinking");
|
|
327
329
|
model = model.replace("-non-thinking", "").replace("-thinking", "");
|
|
@@ -332,35 +334,53 @@ function prepareModelConfig(params, pluginConfig) {
|
|
|
332
334
|
} else if (thinkingBudget >= 0 && thinkingBudget < 128) {
|
|
333
335
|
thinkingBudget = 128;
|
|
334
336
|
}
|
|
337
|
+
if (model.includes("-thinking") && model.includes("gemini-3.0")) {
|
|
338
|
+
enabledThinking = true;
|
|
339
|
+
const match = model.match(/-(low|medium|high)-thinking/);
|
|
340
|
+
if (match) {
|
|
341
|
+
thinkingLevel = match[1];
|
|
342
|
+
model = model.replace(`-${match[1]}-thinking`, "");
|
|
343
|
+
} else {
|
|
344
|
+
thinkingLevel = "THINKING_LEVEL_UNSPECIFIED";
|
|
345
|
+
model = model.replace("-thinking", "");
|
|
346
|
+
}
|
|
347
|
+
thinkingBudget = void 0;
|
|
348
|
+
}
|
|
335
349
|
let imageGeneration = pluginConfig.imageGeneration ?? false;
|
|
336
350
|
if (imageGeneration) {
|
|
337
351
|
imageGeneration = params.model.includes("gemini-2.0-flash-exp") || params.model.includes("gemini-2.5-flash-image");
|
|
338
352
|
}
|
|
339
|
-
return {
|
|
353
|
+
return {
|
|
354
|
+
model,
|
|
355
|
+
enabledThinking,
|
|
356
|
+
thinkingBudget,
|
|
357
|
+
imageGeneration,
|
|
358
|
+
thinkingLevel
|
|
359
|
+
};
|
|
340
360
|
}
|
|
341
361
|
__name(prepareModelConfig, "prepareModelConfig");
|
|
342
362
|
function createSafetySettings(model) {
|
|
343
|
-
const
|
|
363
|
+
const isNonGemini1 = !model.includes("gemini-1");
|
|
344
364
|
return [
|
|
345
365
|
{
|
|
346
366
|
category: "HARM_CATEGORY_HARASSMENT",
|
|
347
|
-
threshold:
|
|
367
|
+
threshold: isNonGemini1 ? "OFF" : "BLOCK_NONE"
|
|
348
368
|
},
|
|
349
369
|
{
|
|
350
370
|
category: "HARM_CATEGORY_HATE_SPEECH",
|
|
351
|
-
threshold:
|
|
371
|
+
threshold: isNonGemini1 ? "OFF" : "BLOCK_NONE"
|
|
352
372
|
},
|
|
353
373
|
{
|
|
354
374
|
category: "HARM_CATEGORY_SEXUALLY_EXPLICIT",
|
|
355
|
-
threshold:
|
|
375
|
+
threshold: isNonGemini1 ? "OFF" : "BLOCK_NONE"
|
|
356
376
|
},
|
|
357
377
|
{
|
|
358
378
|
category: "HARM_CATEGORY_DANGEROUS_CONTENT",
|
|
359
|
-
threshold:
|
|
379
|
+
threshold: isNonGemini1 ? "OFF" : "BLOCK_NONE"
|
|
360
380
|
},
|
|
361
381
|
{
|
|
362
382
|
category: "HARM_CATEGORY_CIVIC_INTEGRITY",
|
|
363
|
-
threshold:
|
|
383
|
+
threshold: isNonGemini1 ? "OFF" : "BLOCK_NONE"
|
|
364
384
|
}
|
|
365
385
|
];
|
|
366
386
|
}
|
|
@@ -372,10 +392,14 @@ function createGenerationConfig(params, modelConfig, pluginConfig) {
|
|
|
372
392
|
maxOutputTokens: params.model.includes("vision") ? void 0 : params.maxTokens,
|
|
373
393
|
topP: params.topP,
|
|
374
394
|
responseModalities: modelConfig.imageGeneration ? ["TEXT", "IMAGE"] : void 0,
|
|
375
|
-
thinkingConfig: modelConfig.enabledThinking != null || pluginConfig.includeThoughts ?
|
|
376
|
-
|
|
377
|
-
|
|
378
|
-
|
|
395
|
+
thinkingConfig: modelConfig.enabledThinking != null || pluginConfig.includeThoughts ? (0, import_koishi.filterKeys)(
|
|
396
|
+
{
|
|
397
|
+
thinkingBudget: modelConfig.thinkingBudget,
|
|
398
|
+
thinkingLevel: modelConfig.thinkingLevel,
|
|
399
|
+
includeThoughts: pluginConfig.includeThoughts
|
|
400
|
+
},
|
|
401
|
+
(k, v) => v != null
|
|
402
|
+
) : void 0
|
|
379
403
|
};
|
|
380
404
|
}
|
|
381
405
|
__name(createGenerationConfig, "createGenerationConfig");
|
|
@@ -386,6 +410,7 @@ async function createChatGenerationParams(params, plugin, modelConfig, pluginCon
|
|
|
386
410
|
modelConfig.model
|
|
387
411
|
);
|
|
388
412
|
const [systemInstruction, modelMessages] = extractSystemMessages(geminiMessages);
|
|
413
|
+
const systemInstructionKey = pluginConfig.useCamelCaseSystemInstruction ? "systemInstruction" : "system_instruction";
|
|
389
414
|
return {
|
|
390
415
|
contents: modelMessages,
|
|
391
416
|
safetySettings: createSafetySettings(params.model),
|
|
@@ -394,7 +419,7 @@ async function createChatGenerationParams(params, plugin, modelConfig, pluginCon
|
|
|
394
419
|
modelConfig,
|
|
395
420
|
pluginConfig
|
|
396
421
|
),
|
|
397
|
-
|
|
422
|
+
[systemInstructionKey]: systemInstruction != null ? systemInstruction : void 0,
|
|
398
423
|
tools: params.tools != null || pluginConfig.googleSearch || pluginConfig.codeExecution || pluginConfig.urlContext ? formatToolsToGeminiAITools(
|
|
399
424
|
params.tools ?? [],
|
|
400
425
|
pluginConfig,
|
|
@@ -980,11 +1005,8 @@ var GeminiClient = class extends import_client.PlatformModelAndEmbeddingsClient
|
|
|
980
1005
|
import_types2.ModelCapabilities.ToolCall
|
|
981
1006
|
]
|
|
982
1007
|
};
|
|
983
|
-
const thinkingModel = [
|
|
984
|
-
|
|
985
|
-
"gemini-2.5-flash",
|
|
986
|
-
"gemini-3.0-pro"
|
|
987
|
-
];
|
|
1008
|
+
const thinkingModel = ["gemini-2.5-pro", "gemini-2.5-flash"];
|
|
1009
|
+
const thinkingLevelModel = ["gemini-3.0-pro"];
|
|
988
1010
|
if (thinkingModel.some(
|
|
989
1011
|
(name2) => model.name.toLowerCase().includes(name2) && !model.name.toLowerCase().includes("image")
|
|
990
1012
|
)) {
|
|
@@ -997,6 +1019,13 @@ var GeminiClient = class extends import_client.PlatformModelAndEmbeddingsClient
|
|
|
997
1019
|
} else {
|
|
998
1020
|
models.push(info);
|
|
999
1021
|
}
|
|
1022
|
+
} else if (thinkingLevelModel.some(
|
|
1023
|
+
(name2) => model.name.toLowerCase().includes(name2) && !model.name.toLowerCase().includes("image")
|
|
1024
|
+
)) {
|
|
1025
|
+
models.push(
|
|
1026
|
+
{ ...info, name: model.name + "-low-thinking" },
|
|
1027
|
+
info
|
|
1028
|
+
);
|
|
1000
1029
|
} else {
|
|
1001
1030
|
models.push(info);
|
|
1002
1031
|
}
|
|
@@ -1065,32 +1094,33 @@ function apply(ctx, config) {
|
|
|
1065
1094
|
});
|
|
1066
1095
|
}
|
|
1067
1096
|
__name(apply, "apply");
|
|
1068
|
-
var Config4 =
|
|
1097
|
+
var Config4 = import_koishi2.Schema.intersect([
|
|
1069
1098
|
import_chat.ChatLunaPlugin.Config,
|
|
1070
|
-
|
|
1071
|
-
platform:
|
|
1072
|
-
apiKeys:
|
|
1073
|
-
|
|
1074
|
-
|
|
1075
|
-
|
|
1099
|
+
import_koishi2.Schema.object({
|
|
1100
|
+
platform: import_koishi2.Schema.string().default("gemini"),
|
|
1101
|
+
apiKeys: import_koishi2.Schema.array(
|
|
1102
|
+
import_koishi2.Schema.tuple([
|
|
1103
|
+
import_koishi2.Schema.string().role("secret").default(""),
|
|
1104
|
+
import_koishi2.Schema.string().default(
|
|
1076
1105
|
"https://generativelanguage.googleapis.com/v1beta"
|
|
1077
1106
|
),
|
|
1078
|
-
|
|
1107
|
+
import_koishi2.Schema.boolean().default(true)
|
|
1079
1108
|
])
|
|
1080
1109
|
).default([[]]).role("table")
|
|
1081
1110
|
}),
|
|
1082
|
-
|
|
1083
|
-
maxContextRatio:
|
|
1084
|
-
temperature:
|
|
1085
|
-
googleSearch:
|
|
1086
|
-
codeExecution:
|
|
1087
|
-
urlContext:
|
|
1088
|
-
thinkingBudget:
|
|
1089
|
-
includeThoughts:
|
|
1090
|
-
nonStreaming:
|
|
1091
|
-
imageGeneration:
|
|
1092
|
-
groundingContentDisplay:
|
|
1093
|
-
searchThreshold:
|
|
1111
|
+
import_koishi2.Schema.object({
|
|
1112
|
+
maxContextRatio: import_koishi2.Schema.number().min(0).max(1).step(1e-4).role("slider").default(0.35),
|
|
1113
|
+
temperature: import_koishi2.Schema.percent().min(0).max(2).step(0.1).default(1),
|
|
1114
|
+
googleSearch: import_koishi2.Schema.boolean().default(false),
|
|
1115
|
+
codeExecution: import_koishi2.Schema.boolean().default(false),
|
|
1116
|
+
urlContext: import_koishi2.Schema.boolean().default(false),
|
|
1117
|
+
thinkingBudget: import_koishi2.Schema.number().min(-1).max(24576).default(-1),
|
|
1118
|
+
includeThoughts: import_koishi2.Schema.boolean().default(false),
|
|
1119
|
+
nonStreaming: import_koishi2.Schema.boolean().default(false),
|
|
1120
|
+
imageGeneration: import_koishi2.Schema.boolean().default(false),
|
|
1121
|
+
groundingContentDisplay: import_koishi2.Schema.boolean().default(false),
|
|
1122
|
+
searchThreshold: import_koishi2.Schema.number().min(0).max(1).step(0.1).default(0.5),
|
|
1123
|
+
useCamelCaseSystemInstruction: import_koishi2.Schema.boolean().default(false)
|
|
1094
1124
|
})
|
|
1095
1125
|
]).i18n({
|
|
1096
1126
|
"zh-CN": require_zh_CN_schema(),
|
package/lib/index.d.ts
CHANGED
|
@@ -17,6 +17,7 @@ export interface Config extends ChatLunaPlugin.Config {
|
|
|
17
17
|
imageGeneration: boolean;
|
|
18
18
|
thinkingBudget: number;
|
|
19
19
|
includeThoughts: boolean;
|
|
20
|
+
useCamelCaseSystemInstruction: boolean;
|
|
20
21
|
}
|
|
21
22
|
export declare const Config: Schema<Config>;
|
|
22
23
|
export declare const inject: {
|
package/lib/index.mjs
CHANGED
|
@@ -8,14 +8,14 @@ var __commonJS = (cb, mod) => function __require() {
|
|
|
8
8
|
// src/locales/zh-CN.schema.yml
|
|
9
9
|
var require_zh_CN_schema = __commonJS({
|
|
10
10
|
"src/locales/zh-CN.schema.yml"(exports, module) {
|
|
11
|
-
module.exports = { $inner: [{}, { $desc: "请求选项", platform: "适配器的平台名。(不懂请不要修改)", apiKeys: { $inner: ["Gemini API Key", "Gemini API 请求地址", "是否启用此配置"], $desc: "Gemini 的 API Key 和请求地址列表。" } }, { $desc: "模型配置", maxContextRatio: "最大上下文使用比例(0~1),控制可用的模型上下文窗口大小的最大百分比。例如 0.35 表示最多使用模型上下文的 35%。", temperature: "回复的随机性程度,数值越高,回复越随机(范围:0~2)。", googleSearch: "为模型启用谷歌搜索。", thinkingBudget: "思考预算,范围:(-1~24576),设置的数值越大,思考时花费的 Token 越多,-1 为动态思考。目前仅支持 gemini 2.5 系列模型。", groundingContentDisplay: "是否显示谷歌搜索结果。", imageGeneration: "为模型启用图像生成。目前仅支持 `gemini-2.0-flash-exp` 和 `gemini-2.5-flash-image-preview` 模型。", searchThreshold: "搜索的[置信度阈值](https://ai.google.dev/gemini-api/docs/grounding?lang=rest#dynamic-retrieval),范围:0~1,设置的数值越低,则越倾向于使用谷歌搜索。(仅支持 `gemini-1.5` 系列模型。gemini 2.0 模型起使用动态的工具调用)", includeThoughts: "是否获取模型的思考内容。", codeExecution: "为模型启用代码执行工具。", urlContext: "为模型启用 URL 内容获取工具。", nonStreaming: "强制不启用流式返回。开启后,将总是以非流式发起请求,即便配置了 stream 参数。" }] };
|
|
11
|
+
module.exports = { $inner: [{}, { $desc: "请求选项", platform: "适配器的平台名。(不懂请不要修改)", apiKeys: { $inner: ["Gemini API Key", "Gemini API 请求地址", "是否启用此配置"], $desc: "Gemini 的 API Key 和请求地址列表。" } }, { $desc: "模型配置", maxContextRatio: "最大上下文使用比例(0~1),控制可用的模型上下文窗口大小的最大百分比。例如 0.35 表示最多使用模型上下文的 35%。", temperature: "回复的随机性程度,数值越高,回复越随机(范围:0~2)。", googleSearch: "为模型启用谷歌搜索。", thinkingBudget: "思考预算,范围:(-1~24576),设置的数值越大,思考时花费的 Token 越多,-1 为动态思考。目前仅支持 gemini 2.5 系列模型。", groundingContentDisplay: "是否显示谷歌搜索结果。", imageGeneration: "为模型启用图像生成。目前仅支持 `gemini-2.0-flash-exp` 和 `gemini-2.5-flash-image-preview` 模型。", searchThreshold: "搜索的[置信度阈值](https://ai.google.dev/gemini-api/docs/grounding?lang=rest#dynamic-retrieval),范围:0~1,设置的数值越低,则越倾向于使用谷歌搜索。(仅支持 `gemini-1.5` 系列模型。gemini 2.0 模型起使用动态的工具调用)", includeThoughts: "是否获取模型的思考内容。", codeExecution: "为模型启用代码执行工具。", urlContext: "为模型启用 URL 内容获取工具。", nonStreaming: "强制不启用流式返回。开启后,将总是以非流式发起请求,即便配置了 stream 参数。", useCamelCaseSystemInstruction: "使用大写的 systemInstruction 而不是小写的 system_instruction" }] };
|
|
12
12
|
}
|
|
13
13
|
});
|
|
14
14
|
|
|
15
15
|
// src/locales/en-US.schema.yml
|
|
16
16
|
var require_en_US_schema = __commonJS({
|
|
17
17
|
"src/locales/en-US.schema.yml"(exports, module) {
|
|
18
|
-
module.exports = { $inner: [{}, { $desc: "API Configuration", platform: "Adapter platform name. (Do not modify if you do not understand)", apiKeys: { $inner: ["Gemini API Key", "Gemini API Endpoint (optional)", "Enabled"], $desc: "Gemini API access credentials" } }, { $desc: "Model Parameters", maxContextRatio: "Maximum context usage ratio (0-1). Controls the maximum percentage of model context window available for use. For example, 0.35 means at most 35% of the model context can be used.", temperature: "Sampling temperature (0-2). Higher: more random, Lower: more deterministic", googleSearch: "Enable Google search", thinkingBudget: "Thinking budget (-1-24576). (0: dynamic thinking) Higher: more tokens spent on thinking. Currently only supports `gemini-2.5` series models.", groundingContentDisplay: "Enable display of search results", imageGeneration: "Enable image generation (only for `gemini-2.0-flash-exp` and `gemini-2.5-flash-image-preview` model)", searchThreshold: "Search confidence [threshold](https://ai.google.dev/gemini-api/docs/grounding?lang=rest#dynamic-retrieval) (0-1). Lower: more likely to use Google search", includeThoughts: "Enable retrieval of model thoughts", codeExecution: "Enable code execution tool", urlContext: "Enable URL context retrieval tool", nonStreaming: "Force disable streaming response. When enabled, requests will always be made in non-streaming mode, even if the stream parameter is configured." }] };
|
|
18
|
+
module.exports = { $inner: [{}, { $desc: "API Configuration", platform: "Adapter platform name. (Do not modify if you do not understand)", apiKeys: { $inner: ["Gemini API Key", "Gemini API Endpoint (optional)", "Enabled"], $desc: "Gemini API access credentials" } }, { $desc: "Model Parameters", maxContextRatio: "Maximum context usage ratio (0-1). Controls the maximum percentage of model context window available for use. For example, 0.35 means at most 35% of the model context can be used.", temperature: "Sampling temperature (0-2). Higher: more random, Lower: more deterministic", googleSearch: "Enable Google search", thinkingBudget: "Thinking budget (-1-24576). (0: dynamic thinking) Higher: more tokens spent on thinking. Currently only supports `gemini-2.5` series models.", groundingContentDisplay: "Enable display of search results", imageGeneration: "Enable image generation (only for `gemini-2.0-flash-exp` and `gemini-2.5-flash-image-preview` model)", searchThreshold: "Search confidence [threshold](https://ai.google.dev/gemini-api/docs/grounding?lang=rest#dynamic-retrieval) (0-1). Lower: more likely to use Google search", includeThoughts: "Enable retrieval of model thoughts", codeExecution: "Enable code execution tool", urlContext: "Enable URL context retrieval tool", nonStreaming: "Force disable streaming response. When enabled, requests will always be made in non-streaming mode, even if the stream parameter is configured.", useCamelCaseSystemInstruction: "Use camelCase systemInstruction instead of snake_case system_instruction" }] };
|
|
19
19
|
}
|
|
20
20
|
});
|
|
21
21
|
|
|
@@ -64,6 +64,7 @@ import {
|
|
|
64
64
|
} from "koishi-plugin-chatluna/utils/string";
|
|
65
65
|
import { isZodSchemaV3 } from "@langchain/core/utils/types";
|
|
66
66
|
import { generateSchema } from "@anatine/zod-openapi";
|
|
67
|
+
import { filterKeys } from "koishi";
|
|
67
68
|
async function langchainMessageToGeminiMessage(messages, plugin, model) {
|
|
68
69
|
return Promise.all(
|
|
69
70
|
messages.map(async (message) => {
|
|
@@ -261,11 +262,7 @@ function formatToolsToGeminiAITools(tools, config, model) {
|
|
|
261
262
|
urlContext = false;
|
|
262
263
|
}
|
|
263
264
|
if (googleSearch) {
|
|
264
|
-
if (model.includes("gemini-
|
|
265
|
-
result.push({
|
|
266
|
-
google_search: {}
|
|
267
|
-
});
|
|
268
|
-
} else {
|
|
265
|
+
if (model.includes("gemini-1")) {
|
|
269
266
|
result.push({
|
|
270
267
|
google_search_retrieval: {
|
|
271
268
|
dynamic_retrieval_config: {
|
|
@@ -274,6 +271,10 @@ function formatToolsToGeminiAITools(tools, config, model) {
|
|
|
274
271
|
}
|
|
275
272
|
}
|
|
276
273
|
});
|
|
274
|
+
} else {
|
|
275
|
+
result.push({
|
|
276
|
+
google_search: {}
|
|
277
|
+
});
|
|
277
278
|
}
|
|
278
279
|
}
|
|
279
280
|
if (codeExecution) {
|
|
@@ -319,6 +320,7 @@ __name(messageTypeToGeminiRole, "messageTypeToGeminiRole");
|
|
|
319
320
|
function prepareModelConfig(params, pluginConfig) {
|
|
320
321
|
let model = params.model;
|
|
321
322
|
let enabledThinking = null;
|
|
323
|
+
let thinkingLevel = "THINKING_LEVEL_UNSPECIFIED";
|
|
322
324
|
if (model.includes("-thinking") && model.includes("gemini-2.5")) {
|
|
323
325
|
enabledThinking = !model.includes("-non-thinking");
|
|
324
326
|
model = model.replace("-non-thinking", "").replace("-thinking", "");
|
|
@@ -329,35 +331,53 @@ function prepareModelConfig(params, pluginConfig) {
|
|
|
329
331
|
} else if (thinkingBudget >= 0 && thinkingBudget < 128) {
|
|
330
332
|
thinkingBudget = 128;
|
|
331
333
|
}
|
|
334
|
+
if (model.includes("-thinking") && model.includes("gemini-3.0")) {
|
|
335
|
+
enabledThinking = true;
|
|
336
|
+
const match = model.match(/-(low|medium|high)-thinking/);
|
|
337
|
+
if (match) {
|
|
338
|
+
thinkingLevel = match[1];
|
|
339
|
+
model = model.replace(`-${match[1]}-thinking`, "");
|
|
340
|
+
} else {
|
|
341
|
+
thinkingLevel = "THINKING_LEVEL_UNSPECIFIED";
|
|
342
|
+
model = model.replace("-thinking", "");
|
|
343
|
+
}
|
|
344
|
+
thinkingBudget = void 0;
|
|
345
|
+
}
|
|
332
346
|
let imageGeneration = pluginConfig.imageGeneration ?? false;
|
|
333
347
|
if (imageGeneration) {
|
|
334
348
|
imageGeneration = params.model.includes("gemini-2.0-flash-exp") || params.model.includes("gemini-2.5-flash-image");
|
|
335
349
|
}
|
|
336
|
-
return {
|
|
350
|
+
return {
|
|
351
|
+
model,
|
|
352
|
+
enabledThinking,
|
|
353
|
+
thinkingBudget,
|
|
354
|
+
imageGeneration,
|
|
355
|
+
thinkingLevel
|
|
356
|
+
};
|
|
337
357
|
}
|
|
338
358
|
__name(prepareModelConfig, "prepareModelConfig");
|
|
339
359
|
function createSafetySettings(model) {
|
|
340
|
-
const
|
|
360
|
+
const isNonGemini1 = !model.includes("gemini-1");
|
|
341
361
|
return [
|
|
342
362
|
{
|
|
343
363
|
category: "HARM_CATEGORY_HARASSMENT",
|
|
344
|
-
threshold:
|
|
364
|
+
threshold: isNonGemini1 ? "OFF" : "BLOCK_NONE"
|
|
345
365
|
},
|
|
346
366
|
{
|
|
347
367
|
category: "HARM_CATEGORY_HATE_SPEECH",
|
|
348
|
-
threshold:
|
|
368
|
+
threshold: isNonGemini1 ? "OFF" : "BLOCK_NONE"
|
|
349
369
|
},
|
|
350
370
|
{
|
|
351
371
|
category: "HARM_CATEGORY_SEXUALLY_EXPLICIT",
|
|
352
|
-
threshold:
|
|
372
|
+
threshold: isNonGemini1 ? "OFF" : "BLOCK_NONE"
|
|
353
373
|
},
|
|
354
374
|
{
|
|
355
375
|
category: "HARM_CATEGORY_DANGEROUS_CONTENT",
|
|
356
|
-
threshold:
|
|
376
|
+
threshold: isNonGemini1 ? "OFF" : "BLOCK_NONE"
|
|
357
377
|
},
|
|
358
378
|
{
|
|
359
379
|
category: "HARM_CATEGORY_CIVIC_INTEGRITY",
|
|
360
|
-
threshold:
|
|
380
|
+
threshold: isNonGemini1 ? "OFF" : "BLOCK_NONE"
|
|
361
381
|
}
|
|
362
382
|
];
|
|
363
383
|
}
|
|
@@ -369,10 +389,14 @@ function createGenerationConfig(params, modelConfig, pluginConfig) {
|
|
|
369
389
|
maxOutputTokens: params.model.includes("vision") ? void 0 : params.maxTokens,
|
|
370
390
|
topP: params.topP,
|
|
371
391
|
responseModalities: modelConfig.imageGeneration ? ["TEXT", "IMAGE"] : void 0,
|
|
372
|
-
thinkingConfig: modelConfig.enabledThinking != null || pluginConfig.includeThoughts ?
|
|
373
|
-
|
|
374
|
-
|
|
375
|
-
|
|
392
|
+
thinkingConfig: modelConfig.enabledThinking != null || pluginConfig.includeThoughts ? filterKeys(
|
|
393
|
+
{
|
|
394
|
+
thinkingBudget: modelConfig.thinkingBudget,
|
|
395
|
+
thinkingLevel: modelConfig.thinkingLevel,
|
|
396
|
+
includeThoughts: pluginConfig.includeThoughts
|
|
397
|
+
},
|
|
398
|
+
(k, v) => v != null
|
|
399
|
+
) : void 0
|
|
376
400
|
};
|
|
377
401
|
}
|
|
378
402
|
__name(createGenerationConfig, "createGenerationConfig");
|
|
@@ -383,6 +407,7 @@ async function createChatGenerationParams(params, plugin, modelConfig, pluginCon
|
|
|
383
407
|
modelConfig.model
|
|
384
408
|
);
|
|
385
409
|
const [systemInstruction, modelMessages] = extractSystemMessages(geminiMessages);
|
|
410
|
+
const systemInstructionKey = pluginConfig.useCamelCaseSystemInstruction ? "systemInstruction" : "system_instruction";
|
|
386
411
|
return {
|
|
387
412
|
contents: modelMessages,
|
|
388
413
|
safetySettings: createSafetySettings(params.model),
|
|
@@ -391,7 +416,7 @@ async function createChatGenerationParams(params, plugin, modelConfig, pluginCon
|
|
|
391
416
|
modelConfig,
|
|
392
417
|
pluginConfig
|
|
393
418
|
),
|
|
394
|
-
|
|
419
|
+
[systemInstructionKey]: systemInstruction != null ? systemInstruction : void 0,
|
|
395
420
|
tools: params.tools != null || pluginConfig.googleSearch || pluginConfig.codeExecution || pluginConfig.urlContext ? formatToolsToGeminiAITools(
|
|
396
421
|
params.tools ?? [],
|
|
397
422
|
pluginConfig,
|
|
@@ -977,11 +1002,8 @@ var GeminiClient = class extends PlatformModelAndEmbeddingsClient {
|
|
|
977
1002
|
ModelCapabilities.ToolCall
|
|
978
1003
|
]
|
|
979
1004
|
};
|
|
980
|
-
const thinkingModel = [
|
|
981
|
-
|
|
982
|
-
"gemini-2.5-flash",
|
|
983
|
-
"gemini-3.0-pro"
|
|
984
|
-
];
|
|
1005
|
+
const thinkingModel = ["gemini-2.5-pro", "gemini-2.5-flash"];
|
|
1006
|
+
const thinkingLevelModel = ["gemini-3.0-pro"];
|
|
985
1007
|
if (thinkingModel.some(
|
|
986
1008
|
(name2) => model.name.toLowerCase().includes(name2) && !model.name.toLowerCase().includes("image")
|
|
987
1009
|
)) {
|
|
@@ -994,6 +1016,13 @@ var GeminiClient = class extends PlatformModelAndEmbeddingsClient {
|
|
|
994
1016
|
} else {
|
|
995
1017
|
models.push(info);
|
|
996
1018
|
}
|
|
1019
|
+
} else if (thinkingLevelModel.some(
|
|
1020
|
+
(name2) => model.name.toLowerCase().includes(name2) && !model.name.toLowerCase().includes("image")
|
|
1021
|
+
)) {
|
|
1022
|
+
models.push(
|
|
1023
|
+
{ ...info, name: model.name + "-low-thinking" },
|
|
1024
|
+
info
|
|
1025
|
+
);
|
|
997
1026
|
} else {
|
|
998
1027
|
models.push(info);
|
|
999
1028
|
}
|
|
@@ -1087,7 +1116,8 @@ var Config4 = Schema.intersect([
|
|
|
1087
1116
|
nonStreaming: Schema.boolean().default(false),
|
|
1088
1117
|
imageGeneration: Schema.boolean().default(false),
|
|
1089
1118
|
groundingContentDisplay: Schema.boolean().default(false),
|
|
1090
|
-
searchThreshold: Schema.number().min(0).max(1).step(0.1).default(0.5)
|
|
1119
|
+
searchThreshold: Schema.number().min(0).max(1).step(0.1).default(0.5),
|
|
1120
|
+
useCamelCaseSystemInstruction: Schema.boolean().default(false)
|
|
1091
1121
|
})
|
|
1092
1122
|
]).i18n({
|
|
1093
1123
|
"zh-CN": require_zh_CN_schema(),
|
package/lib/utils.d.ts
CHANGED
|
@@ -16,6 +16,7 @@ export declare function prepareModelConfig(params: ModelRequestParams, pluginCon
|
|
|
16
16
|
enabledThinking: boolean;
|
|
17
17
|
thinkingBudget: number;
|
|
18
18
|
imageGeneration: boolean;
|
|
19
|
+
thinkingLevel: string;
|
|
19
20
|
};
|
|
20
21
|
export declare function createSafetySettings(model: string): {
|
|
21
22
|
category: string;
|
|
@@ -27,12 +28,20 @@ export declare function createGenerationConfig(params: ModelRequestParams, model
|
|
|
27
28
|
maxOutputTokens: number;
|
|
28
29
|
topP: number;
|
|
29
30
|
responseModalities: string[];
|
|
30
|
-
thinkingConfig:
|
|
31
|
-
thinkingBudget: number;
|
|
32
|
-
includeThoughts: boolean;
|
|
33
|
-
};
|
|
31
|
+
thinkingConfig: import("cosmokit").Dict<string | number | boolean, "thinkingBudget" | "thinkingLevel" | "includeThoughts">;
|
|
34
32
|
};
|
|
35
33
|
export declare function createChatGenerationParams(params: ModelRequestParams, plugin: ChatLunaPlugin, modelConfig: ReturnType<typeof prepareModelConfig>, pluginConfig: Config): Promise<{
|
|
34
|
+
[x: string]: {
|
|
35
|
+
stopSequences: string | string[];
|
|
36
|
+
temperature: number;
|
|
37
|
+
maxOutputTokens: number;
|
|
38
|
+
topP: number;
|
|
39
|
+
responseModalities: string[];
|
|
40
|
+
thinkingConfig: import("cosmokit").Dict<string | number | boolean, "thinkingBudget" | "thinkingLevel" | "includeThoughts">;
|
|
41
|
+
} | ChatCompletionResponseMessage | ChatCompletionResponseMessage[] | {
|
|
42
|
+
category: string;
|
|
43
|
+
threshold: string;
|
|
44
|
+
}[] | Record<string, any>;
|
|
36
45
|
contents: ChatCompletionResponseMessage[];
|
|
37
46
|
safetySettings: {
|
|
38
47
|
category: string;
|
|
@@ -44,12 +53,8 @@ export declare function createChatGenerationParams(params: ModelRequestParams, p
|
|
|
44
53
|
maxOutputTokens: number;
|
|
45
54
|
topP: number;
|
|
46
55
|
responseModalities: string[];
|
|
47
|
-
thinkingConfig:
|
|
48
|
-
thinkingBudget: number;
|
|
49
|
-
includeThoughts: boolean;
|
|
50
|
-
};
|
|
56
|
+
thinkingConfig: import("cosmokit").Dict<string | number | boolean, "thinkingBudget" | "thinkingLevel" | "includeThoughts">;
|
|
51
57
|
};
|
|
52
|
-
system_instruction: ChatCompletionResponseMessage;
|
|
53
58
|
tools: Record<string, any>;
|
|
54
59
|
}>;
|
|
55
60
|
export declare function isChatResponse(response: any): response is ChatResponse;
|
package/package.json
CHANGED