koishi-plugin-chatluna-google-gemini-adapter 1.3.5 → 1.3.6
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/lib/index.cjs +8 -6
- package/lib/index.d.ts +1 -0
- package/lib/index.mjs +8 -6
- package/lib/utils.d.ts +11 -1
- package/package.json +1 -1
package/lib/index.cjs
CHANGED
|
@@ -23,14 +23,14 @@ var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: tru
|
|
|
23
23
|
// src/locales/zh-CN.schema.yml
|
|
24
24
|
var require_zh_CN_schema = __commonJS({
|
|
25
25
|
"src/locales/zh-CN.schema.yml"(exports2, module2) {
|
|
26
|
-
module2.exports = { $inner: [{}, { $desc: "请求选项", platform: "适配器的平台名。(不懂请不要修改)", apiKeys: { $inner: ["Gemini API Key", "Gemini API 请求地址", "是否启用此配置"], $desc: "Gemini 的 API Key 和请求地址列表。" } }, { $desc: "模型配置", maxContextRatio: "最大上下文使用比例(0~1),控制可用的模型上下文窗口大小的最大百分比。例如 0.35 表示最多使用模型上下文的 35%。", temperature: "回复的随机性程度,数值越高,回复越随机(范围:0~2)。", googleSearch: "为模型启用谷歌搜索。", thinkingBudget: "思考预算,范围:(-1~24576),设置的数值越大,思考时花费的 Token 越多,-1 为动态思考。目前仅支持 gemini 2.5 系列模型。", groundingContentDisplay: "是否显示谷歌搜索结果。", imageGeneration: "为模型启用图像生成。目前仅支持 `gemini-2.0-flash-exp` 和 `gemini-2.5-flash-image-preview` 模型。", searchThreshold: "搜索的[置信度阈值](https://ai.google.dev/gemini-api/docs/grounding?lang=rest#dynamic-retrieval),范围:0~1,设置的数值越低,则越倾向于使用谷歌搜索。(仅支持 `gemini-1.5` 系列模型。gemini 2.0 模型起使用动态的工具调用)", includeThoughts: "是否获取模型的思考内容。", codeExecution: "为模型启用代码执行工具。", urlContext: "为模型启用 URL 内容获取工具。", nonStreaming: "强制不启用流式返回。开启后,将总是以非流式发起请求,即便配置了 stream 参数。" }] };
|
|
26
|
+
module2.exports = { $inner: [{}, { $desc: "请求选项", platform: "适配器的平台名。(不懂请不要修改)", apiKeys: { $inner: ["Gemini API Key", "Gemini API 请求地址", "是否启用此配置"], $desc: "Gemini 的 API Key 和请求地址列表。" } }, { $desc: "模型配置", maxContextRatio: "最大上下文使用比例(0~1),控制可用的模型上下文窗口大小的最大百分比。例如 0.35 表示最多使用模型上下文的 35%。", temperature: "回复的随机性程度,数值越高,回复越随机(范围:0~2)。", googleSearch: "为模型启用谷歌搜索。", thinkingBudget: "思考预算,范围:(-1~24576),设置的数值越大,思考时花费的 Token 越多,-1 为动态思考。目前仅支持 gemini 2.5 系列模型。", groundingContentDisplay: "是否显示谷歌搜索结果。", imageGeneration: "为模型启用图像生成。目前仅支持 `gemini-2.0-flash-exp` 和 `gemini-2.5-flash-image-preview` 模型。", searchThreshold: "搜索的[置信度阈值](https://ai.google.dev/gemini-api/docs/grounding?lang=rest#dynamic-retrieval),范围:0~1,设置的数值越低,则越倾向于使用谷歌搜索。(仅支持 `gemini-1.5` 系列模型。gemini 2.0 模型起使用动态的工具调用)", includeThoughts: "是否获取模型的思考内容。", codeExecution: "为模型启用代码执行工具。", urlContext: "为模型启用 URL 内容获取工具。", nonStreaming: "强制不启用流式返回。开启后,将总是以非流式发起请求,即便配置了 stream 参数。", useCamelCaseSystemInstruction: "使用大写的 systemInstruction 而不是小写的 system_instruction" }] };
|
|
27
27
|
}
|
|
28
28
|
});
|
|
29
29
|
|
|
30
30
|
// src/locales/en-US.schema.yml
|
|
31
31
|
var require_en_US_schema = __commonJS({
|
|
32
32
|
"src/locales/en-US.schema.yml"(exports2, module2) {
|
|
33
|
-
module2.exports = { $inner: [{}, { $desc: "API Configuration", platform: "Adapter platform name. (Do not modify if you do not understand)", apiKeys: { $inner: ["Gemini API Key", "Gemini API Endpoint (optional)", "Enabled"], $desc: "Gemini API access credentials" } }, { $desc: "Model Parameters", maxContextRatio: "Maximum context usage ratio (0-1). Controls the maximum percentage of model context window available for use. For example, 0.35 means at most 35% of the model context can be used.", temperature: "Sampling temperature (0-2). Higher: more random, Lower: more deterministic", googleSearch: "Enable Google search", thinkingBudget: "Thinking budget (-1-24576). (0: dynamic thinking) Higher: more tokens spent on thinking. Currently only supports `gemini-2.5` series models.", groundingContentDisplay: "Enable display of search results", imageGeneration: "Enable image generation (only for `gemini-2.0-flash-exp` and `gemini-2.5-flash-image-preview` model)", searchThreshold: "Search confidence [threshold](https://ai.google.dev/gemini-api/docs/grounding?lang=rest#dynamic-retrieval) (0-1). Lower: more likely to use Google search", includeThoughts: "Enable retrieval of model thoughts", codeExecution: "Enable code execution tool", urlContext: "Enable URL context retrieval tool", nonStreaming: "Force disable streaming response. When enabled, requests will always be made in non-streaming mode, even if the stream parameter is configured." }] };
|
|
33
|
+
module2.exports = { $inner: [{}, { $desc: "API Configuration", platform: "Adapter platform name. (Do not modify if you do not understand)", apiKeys: { $inner: ["Gemini API Key", "Gemini API Endpoint (optional)", "Enabled"], $desc: "Gemini API access credentials" } }, { $desc: "Model Parameters", maxContextRatio: "Maximum context usage ratio (0-1). Controls the maximum percentage of model context window available for use. For example, 0.35 means at most 35% of the model context can be used.", temperature: "Sampling temperature (0-2). Higher: more random, Lower: more deterministic", googleSearch: "Enable Google search", thinkingBudget: "Thinking budget (-1-24576). (0: dynamic thinking) Higher: more tokens spent on thinking. Currently only supports `gemini-2.5` series models.", groundingContentDisplay: "Enable display of search results", imageGeneration: "Enable image generation (only for `gemini-2.0-flash-exp` and `gemini-2.5-flash-image-preview` model)", searchThreshold: "Search confidence [threshold](https://ai.google.dev/gemini-api/docs/grounding?lang=rest#dynamic-retrieval) (0-1). Lower: more likely to use Google search", includeThoughts: "Enable retrieval of model thoughts", codeExecution: "Enable code execution tool", urlContext: "Enable URL context retrieval tool", nonStreaming: "Force disable streaming response. When enabled, requests will always be made in non-streaming mode, even if the stream parameter is configured.", useCamelCaseSystemInstruction: "Use camelCase systemInstruction instead of snake_case system_instruction" }] };
|
|
34
34
|
}
|
|
35
35
|
});
|
|
36
36
|
|
|
@@ -323,7 +323,7 @@ __name(messageTypeToGeminiRole, "messageTypeToGeminiRole");
|
|
|
323
323
|
function prepareModelConfig(params, pluginConfig) {
|
|
324
324
|
let model = params.model;
|
|
325
325
|
let enabledThinking = null;
|
|
326
|
-
let thinkingLevel = "
|
|
326
|
+
let thinkingLevel = "THINKING_LEVEL_UNSPECIFIED";
|
|
327
327
|
if (model.includes("-thinking") && model.includes("gemini-2.5")) {
|
|
328
328
|
enabledThinking = !model.includes("-non-thinking");
|
|
329
329
|
model = model.replace("-non-thinking", "").replace("-thinking", "");
|
|
@@ -341,7 +341,7 @@ function prepareModelConfig(params, pluginConfig) {
|
|
|
341
341
|
thinkingLevel = match[1];
|
|
342
342
|
model = model.replace(`-${match[1]}-thinking`, "");
|
|
343
343
|
} else {
|
|
344
|
-
thinkingLevel = "
|
|
344
|
+
thinkingLevel = "THINKING_LEVEL_UNSPECIFIED";
|
|
345
345
|
model = model.replace("-thinking", "");
|
|
346
346
|
}
|
|
347
347
|
thinkingBudget = void 0;
|
|
@@ -410,6 +410,7 @@ async function createChatGenerationParams(params, plugin, modelConfig, pluginCon
|
|
|
410
410
|
modelConfig.model
|
|
411
411
|
);
|
|
412
412
|
const [systemInstruction, modelMessages] = extractSystemMessages(geminiMessages);
|
|
413
|
+
const systemInstructionKey = pluginConfig.useCamelCaseSystemInstruction ? "systemInstruction" : "system_instruction";
|
|
413
414
|
return {
|
|
414
415
|
contents: modelMessages,
|
|
415
416
|
safetySettings: createSafetySettings(params.model),
|
|
@@ -418,7 +419,7 @@ async function createChatGenerationParams(params, plugin, modelConfig, pluginCon
|
|
|
418
419
|
modelConfig,
|
|
419
420
|
pluginConfig
|
|
420
421
|
),
|
|
421
|
-
|
|
422
|
+
[systemInstructionKey]: systemInstruction != null ? systemInstruction : void 0,
|
|
422
423
|
tools: params.tools != null || pluginConfig.googleSearch || pluginConfig.codeExecution || pluginConfig.urlContext ? formatToolsToGeminiAITools(
|
|
423
424
|
params.tools ?? [],
|
|
424
425
|
pluginConfig,
|
|
@@ -1118,7 +1119,8 @@ var Config4 = import_koishi2.Schema.intersect([
|
|
|
1118
1119
|
nonStreaming: import_koishi2.Schema.boolean().default(false),
|
|
1119
1120
|
imageGeneration: import_koishi2.Schema.boolean().default(false),
|
|
1120
1121
|
groundingContentDisplay: import_koishi2.Schema.boolean().default(false),
|
|
1121
|
-
searchThreshold: import_koishi2.Schema.number().min(0).max(1).step(0.1).default(0.5)
|
|
1122
|
+
searchThreshold: import_koishi2.Schema.number().min(0).max(1).step(0.1).default(0.5),
|
|
1123
|
+
useCamelCaseSystemInstruction: import_koishi2.Schema.boolean().default(false)
|
|
1122
1124
|
})
|
|
1123
1125
|
]).i18n({
|
|
1124
1126
|
"zh-CN": require_zh_CN_schema(),
|
package/lib/index.d.ts
CHANGED
|
@@ -17,6 +17,7 @@ export interface Config extends ChatLunaPlugin.Config {
|
|
|
17
17
|
imageGeneration: boolean;
|
|
18
18
|
thinkingBudget: number;
|
|
19
19
|
includeThoughts: boolean;
|
|
20
|
+
useCamelCaseSystemInstruction: boolean;
|
|
20
21
|
}
|
|
21
22
|
export declare const Config: Schema<Config>;
|
|
22
23
|
export declare const inject: {
|
package/lib/index.mjs
CHANGED
|
@@ -8,14 +8,14 @@ var __commonJS = (cb, mod) => function __require() {
|
|
|
8
8
|
// src/locales/zh-CN.schema.yml
|
|
9
9
|
var require_zh_CN_schema = __commonJS({
|
|
10
10
|
"src/locales/zh-CN.schema.yml"(exports, module) {
|
|
11
|
-
module.exports = { $inner: [{}, { $desc: "请求选项", platform: "适配器的平台名。(不懂请不要修改)", apiKeys: { $inner: ["Gemini API Key", "Gemini API 请求地址", "是否启用此配置"], $desc: "Gemini 的 API Key 和请求地址列表。" } }, { $desc: "模型配置", maxContextRatio: "最大上下文使用比例(0~1),控制可用的模型上下文窗口大小的最大百分比。例如 0.35 表示最多使用模型上下文的 35%。", temperature: "回复的随机性程度,数值越高,回复越随机(范围:0~2)。", googleSearch: "为模型启用谷歌搜索。", thinkingBudget: "思考预算,范围:(-1~24576),设置的数值越大,思考时花费的 Token 越多,-1 为动态思考。目前仅支持 gemini 2.5 系列模型。", groundingContentDisplay: "是否显示谷歌搜索结果。", imageGeneration: "为模型启用图像生成。目前仅支持 `gemini-2.0-flash-exp` 和 `gemini-2.5-flash-image-preview` 模型。", searchThreshold: "搜索的[置信度阈值](https://ai.google.dev/gemini-api/docs/grounding?lang=rest#dynamic-retrieval),范围:0~1,设置的数值越低,则越倾向于使用谷歌搜索。(仅支持 `gemini-1.5` 系列模型。gemini 2.0 模型起使用动态的工具调用)", includeThoughts: "是否获取模型的思考内容。", codeExecution: "为模型启用代码执行工具。", urlContext: "为模型启用 URL 内容获取工具。", nonStreaming: "强制不启用流式返回。开启后,将总是以非流式发起请求,即便配置了 stream 参数。" }] };
|
|
11
|
+
module.exports = { $inner: [{}, { $desc: "请求选项", platform: "适配器的平台名。(不懂请不要修改)", apiKeys: { $inner: ["Gemini API Key", "Gemini API 请求地址", "是否启用此配置"], $desc: "Gemini 的 API Key 和请求地址列表。" } }, { $desc: "模型配置", maxContextRatio: "最大上下文使用比例(0~1),控制可用的模型上下文窗口大小的最大百分比。例如 0.35 表示最多使用模型上下文的 35%。", temperature: "回复的随机性程度,数值越高,回复越随机(范围:0~2)。", googleSearch: "为模型启用谷歌搜索。", thinkingBudget: "思考预算,范围:(-1~24576),设置的数值越大,思考时花费的 Token 越多,-1 为动态思考。目前仅支持 gemini 2.5 系列模型。", groundingContentDisplay: "是否显示谷歌搜索结果。", imageGeneration: "为模型启用图像生成。目前仅支持 `gemini-2.0-flash-exp` 和 `gemini-2.5-flash-image-preview` 模型。", searchThreshold: "搜索的[置信度阈值](https://ai.google.dev/gemini-api/docs/grounding?lang=rest#dynamic-retrieval),范围:0~1,设置的数值越低,则越倾向于使用谷歌搜索。(仅支持 `gemini-1.5` 系列模型。gemini 2.0 模型起使用动态的工具调用)", includeThoughts: "是否获取模型的思考内容。", codeExecution: "为模型启用代码执行工具。", urlContext: "为模型启用 URL 内容获取工具。", nonStreaming: "强制不启用流式返回。开启后,将总是以非流式发起请求,即便配置了 stream 参数。", useCamelCaseSystemInstruction: "使用大写的 systemInstruction 而不是小写的 system_instruction" }] };
|
|
12
12
|
}
|
|
13
13
|
});
|
|
14
14
|
|
|
15
15
|
// src/locales/en-US.schema.yml
|
|
16
16
|
var require_en_US_schema = __commonJS({
|
|
17
17
|
"src/locales/en-US.schema.yml"(exports, module) {
|
|
18
|
-
module.exports = { $inner: [{}, { $desc: "API Configuration", platform: "Adapter platform name. (Do not modify if you do not understand)", apiKeys: { $inner: ["Gemini API Key", "Gemini API Endpoint (optional)", "Enabled"], $desc: "Gemini API access credentials" } }, { $desc: "Model Parameters", maxContextRatio: "Maximum context usage ratio (0-1). Controls the maximum percentage of model context window available for use. For example, 0.35 means at most 35% of the model context can be used.", temperature: "Sampling temperature (0-2). Higher: more random, Lower: more deterministic", googleSearch: "Enable Google search", thinkingBudget: "Thinking budget (-1-24576). (0: dynamic thinking) Higher: more tokens spent on thinking. Currently only supports `gemini-2.5` series models.", groundingContentDisplay: "Enable display of search results", imageGeneration: "Enable image generation (only for `gemini-2.0-flash-exp` and `gemini-2.5-flash-image-preview` model)", searchThreshold: "Search confidence [threshold](https://ai.google.dev/gemini-api/docs/grounding?lang=rest#dynamic-retrieval) (0-1). Lower: more likely to use Google search", includeThoughts: "Enable retrieval of model thoughts", codeExecution: "Enable code execution tool", urlContext: "Enable URL context retrieval tool", nonStreaming: "Force disable streaming response. When enabled, requests will always be made in non-streaming mode, even if the stream parameter is configured." }] };
|
|
18
|
+
module.exports = { $inner: [{}, { $desc: "API Configuration", platform: "Adapter platform name. (Do not modify if you do not understand)", apiKeys: { $inner: ["Gemini API Key", "Gemini API Endpoint (optional)", "Enabled"], $desc: "Gemini API access credentials" } }, { $desc: "Model Parameters", maxContextRatio: "Maximum context usage ratio (0-1). Controls the maximum percentage of model context window available for use. For example, 0.35 means at most 35% of the model context can be used.", temperature: "Sampling temperature (0-2). Higher: more random, Lower: more deterministic", googleSearch: "Enable Google search", thinkingBudget: "Thinking budget (-1-24576). (0: dynamic thinking) Higher: more tokens spent on thinking. Currently only supports `gemini-2.5` series models.", groundingContentDisplay: "Enable display of search results", imageGeneration: "Enable image generation (only for `gemini-2.0-flash-exp` and `gemini-2.5-flash-image-preview` model)", searchThreshold: "Search confidence [threshold](https://ai.google.dev/gemini-api/docs/grounding?lang=rest#dynamic-retrieval) (0-1). Lower: more likely to use Google search", includeThoughts: "Enable retrieval of model thoughts", codeExecution: "Enable code execution tool", urlContext: "Enable URL context retrieval tool", nonStreaming: "Force disable streaming response. When enabled, requests will always be made in non-streaming mode, even if the stream parameter is configured.", useCamelCaseSystemInstruction: "Use camelCase systemInstruction instead of snake_case system_instruction" }] };
|
|
19
19
|
}
|
|
20
20
|
});
|
|
21
21
|
|
|
@@ -320,7 +320,7 @@ __name(messageTypeToGeminiRole, "messageTypeToGeminiRole");
|
|
|
320
320
|
function prepareModelConfig(params, pluginConfig) {
|
|
321
321
|
let model = params.model;
|
|
322
322
|
let enabledThinking = null;
|
|
323
|
-
let thinkingLevel = "
|
|
323
|
+
let thinkingLevel = "THINKING_LEVEL_UNSPECIFIED";
|
|
324
324
|
if (model.includes("-thinking") && model.includes("gemini-2.5")) {
|
|
325
325
|
enabledThinking = !model.includes("-non-thinking");
|
|
326
326
|
model = model.replace("-non-thinking", "").replace("-thinking", "");
|
|
@@ -338,7 +338,7 @@ function prepareModelConfig(params, pluginConfig) {
|
|
|
338
338
|
thinkingLevel = match[1];
|
|
339
339
|
model = model.replace(`-${match[1]}-thinking`, "");
|
|
340
340
|
} else {
|
|
341
|
-
thinkingLevel = "
|
|
341
|
+
thinkingLevel = "THINKING_LEVEL_UNSPECIFIED";
|
|
342
342
|
model = model.replace("-thinking", "");
|
|
343
343
|
}
|
|
344
344
|
thinkingBudget = void 0;
|
|
@@ -407,6 +407,7 @@ async function createChatGenerationParams(params, plugin, modelConfig, pluginCon
|
|
|
407
407
|
modelConfig.model
|
|
408
408
|
);
|
|
409
409
|
const [systemInstruction, modelMessages] = extractSystemMessages(geminiMessages);
|
|
410
|
+
const systemInstructionKey = pluginConfig.useCamelCaseSystemInstruction ? "systemInstruction" : "system_instruction";
|
|
410
411
|
return {
|
|
411
412
|
contents: modelMessages,
|
|
412
413
|
safetySettings: createSafetySettings(params.model),
|
|
@@ -415,7 +416,7 @@ async function createChatGenerationParams(params, plugin, modelConfig, pluginCon
|
|
|
415
416
|
modelConfig,
|
|
416
417
|
pluginConfig
|
|
417
418
|
),
|
|
418
|
-
|
|
419
|
+
[systemInstructionKey]: systemInstruction != null ? systemInstruction : void 0,
|
|
419
420
|
tools: params.tools != null || pluginConfig.googleSearch || pluginConfig.codeExecution || pluginConfig.urlContext ? formatToolsToGeminiAITools(
|
|
420
421
|
params.tools ?? [],
|
|
421
422
|
pluginConfig,
|
|
@@ -1115,7 +1116,8 @@ var Config4 = Schema.intersect([
|
|
|
1115
1116
|
nonStreaming: Schema.boolean().default(false),
|
|
1116
1117
|
imageGeneration: Schema.boolean().default(false),
|
|
1117
1118
|
groundingContentDisplay: Schema.boolean().default(false),
|
|
1118
|
-
searchThreshold: Schema.number().min(0).max(1).step(0.1).default(0.5)
|
|
1119
|
+
searchThreshold: Schema.number().min(0).max(1).step(0.1).default(0.5),
|
|
1120
|
+
useCamelCaseSystemInstruction: Schema.boolean().default(false)
|
|
1119
1121
|
})
|
|
1120
1122
|
]).i18n({
|
|
1121
1123
|
"zh-CN": require_zh_CN_schema(),
|
package/lib/utils.d.ts
CHANGED
|
@@ -31,6 +31,17 @@ export declare function createGenerationConfig(params: ModelRequestParams, model
|
|
|
31
31
|
thinkingConfig: import("cosmokit").Dict<string | number | boolean, "thinkingBudget" | "thinkingLevel" | "includeThoughts">;
|
|
32
32
|
};
|
|
33
33
|
export declare function createChatGenerationParams(params: ModelRequestParams, plugin: ChatLunaPlugin, modelConfig: ReturnType<typeof prepareModelConfig>, pluginConfig: Config): Promise<{
|
|
34
|
+
[x: string]: {
|
|
35
|
+
stopSequences: string | string[];
|
|
36
|
+
temperature: number;
|
|
37
|
+
maxOutputTokens: number;
|
|
38
|
+
topP: number;
|
|
39
|
+
responseModalities: string[];
|
|
40
|
+
thinkingConfig: import("cosmokit").Dict<string | number | boolean, "thinkingBudget" | "thinkingLevel" | "includeThoughts">;
|
|
41
|
+
} | ChatCompletionResponseMessage | ChatCompletionResponseMessage[] | {
|
|
42
|
+
category: string;
|
|
43
|
+
threshold: string;
|
|
44
|
+
}[] | Record<string, any>;
|
|
34
45
|
contents: ChatCompletionResponseMessage[];
|
|
35
46
|
safetySettings: {
|
|
36
47
|
category: string;
|
|
@@ -44,7 +55,6 @@ export declare function createChatGenerationParams(params: ModelRequestParams, p
|
|
|
44
55
|
responseModalities: string[];
|
|
45
56
|
thinkingConfig: import("cosmokit").Dict<string | number | boolean, "thinkingBudget" | "thinkingLevel" | "includeThoughts">;
|
|
46
57
|
};
|
|
47
|
-
system_instruction: ChatCompletionResponseMessage;
|
|
48
58
|
tools: Record<string, any>;
|
|
49
59
|
}>;
|
|
50
60
|
export declare function isChatResponse(response: any): response is ChatResponse;
|
package/package.json
CHANGED