koishi-plugin-chatluna-google-gemini-adapter 1.3.0-alpha.8 → 1.3.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +7 -7
- package/lib/client.d.ts +2 -1
- package/lib/index.cjs +92 -37
- package/lib/index.d.ts +2 -2
- package/lib/index.mjs +85 -30
- package/lib/requester.d.ts +2 -1
- package/lib/types.d.ts +8 -1
- package/package.json +11 -9
package/README.md
CHANGED
|
@@ -1,7 +1,7 @@
|
|
|
1
|
-
## chatluna-google-gemini-adapter
|
|
2
|
-
|
|
3
|
-
## [](https://www.npmjs.com/package/koishi-plugin-chatluna-google-gemini) [](https://www.npmjs.com/package//koishi-plugin-chatluna-google-gemini-adapter)
|
|
4
|
-
|
|
5
|
-
> 为 ChatLuna 提供 Google Gemini 支持的适配器
|
|
6
|
-
|
|
7
|
-
[Gemini 适配器文档](https://chatluna.chat/guide/configure-model-platform/google-gemini.html)
|
|
1
|
+
## chatluna-google-gemini-adapter
|
|
2
|
+
|
|
3
|
+
## [](https://www.npmjs.com/package/koishi-plugin-chatluna-google-gemini) [](https://www.npmjs.com/package//koishi-plugin-chatluna-google-gemini-adapter)
|
|
4
|
+
|
|
5
|
+
> 为 ChatLuna 提供 Google Gemini 支持的适配器
|
|
6
|
+
|
|
7
|
+
[Gemini 适配器文档](https://chatluna.chat/guide/configure-model-platform/google-gemini.html)
|
package/lib/client.d.ts
CHANGED
|
@@ -5,6 +5,7 @@ import { ChatLunaBaseEmbeddings, ChatLunaChatModel } from 'koishi-plugin-chatlun
|
|
|
5
5
|
import { ModelInfo } from 'koishi-plugin-chatluna/llm-core/platform/types';
|
|
6
6
|
import { Config } from '.';
|
|
7
7
|
import { ChatLunaPlugin } from 'koishi-plugin-chatluna/services/chat';
|
|
8
|
+
import { RunnableConfig } from '@langchain/core/runnables';
|
|
8
9
|
export declare class GeminiClient extends PlatformModelAndEmbeddingsClient<ClientConfig> {
|
|
9
10
|
private _config;
|
|
10
11
|
plugin: ChatLunaPlugin;
|
|
@@ -12,6 +13,6 @@ export declare class GeminiClient extends PlatformModelAndEmbeddingsClient<Clien
|
|
|
12
13
|
private _requester;
|
|
13
14
|
get logger(): import("reggol");
|
|
14
15
|
constructor(ctx: Context, _config: Config, plugin: ChatLunaPlugin);
|
|
15
|
-
refreshModels(): Promise<ModelInfo[]>;
|
|
16
|
+
refreshModels(config?: RunnableConfig): Promise<ModelInfo[]>;
|
|
16
17
|
protected _createModel(model: string): ChatLunaChatModel | ChatLunaBaseEmbeddings;
|
|
17
18
|
}
|
package/lib/index.cjs
CHANGED
|
@@ -23,14 +23,14 @@ var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: tru
|
|
|
23
23
|
// src/locales/zh-CN.schema.yml
|
|
24
24
|
var require_zh_CN_schema = __commonJS({
|
|
25
25
|
"src/locales/zh-CN.schema.yml"(exports2, module2) {
|
|
26
|
-
module2.exports = { $inner: [{}, { $desc: "请求选项", platform: "适配器的平台名。(不懂请不要修改)", apiKeys: { $inner: ["Gemini
|
|
26
|
+
module2.exports = { $inner: [{}, { $desc: "请求选项", platform: "适配器的平台名。(不懂请不要修改)", apiKeys: { $inner: ["Gemini API Key", "Gemini API 请求地址", "是否启用此配置"], $desc: "Gemini 的 API Key 和请求地址列表。" } }, { $desc: "模型配置", maxContextRatio: "最大上下文使用比例(0~1),控制可用的模型上下文窗口大小的最大百分比。例如 0.35 表示最多使用模型上下文的 35%。", temperature: "回复的随机性程度,数值越高,回复越随机(范围:0~2)。", googleSearch: "为模型启用谷歌搜索。", thinkingBudget: "思考预算,范围:(-1~24576),设置的数值越大,思考时花费的 Token 越多,-1 为动态思考。目前仅支持 gemini 2.5 系列模型。", groundingContentDisplay: "是否显示谷歌搜索结果。", imageGeneration: "为模型启用图像生成。目前仅支持 `gemini-2.0-flash-exp` 和 `gemini-2.5-flash-image-preview` 模型。", searchThreshold: "搜索的[置信度阈值](https://ai.google.dev/gemini-api/docs/grounding?lang=rest#dynamic-retrieval),范围:0~1,设置的数值越低,则越倾向于使用谷歌搜索。(仅支持 `gemini-1.5` 系列模型。gemini 2.0 模型起使用动态的工具调用)", includeThoughts: "是否获取模型的思考内容。", codeExecution: "为模型启用代码执行工具。", urlContext: "为模型启用 URL 内容获取工具。", nonStreaming: "强制不启用流式返回。开启后,将总是以非流式发起请求,即便配置了 stream 参数。" }] };
|
|
27
27
|
}
|
|
28
28
|
});
|
|
29
29
|
|
|
30
30
|
// src/locales/en-US.schema.yml
|
|
31
31
|
var require_en_US_schema = __commonJS({
|
|
32
32
|
"src/locales/en-US.schema.yml"(exports2, module2) {
|
|
33
|
-
module2.exports = { $inner: [{}, { $desc: "API Configuration", platform: "Adapter platform name. (Do not modify if you do not understand)", apiKeys: { $inner: ["Gemini API Key", "Gemini API Endpoint (optional)"], $desc: "Gemini API access credentials" } }, { $desc: "Model Parameters",
|
|
33
|
+
module2.exports = { $inner: [{}, { $desc: "API Configuration", platform: "Adapter platform name. (Do not modify if you do not understand)", apiKeys: { $inner: ["Gemini API Key", "Gemini API Endpoint (optional)", "Enabled"], $desc: "Gemini API access credentials" } }, { $desc: "Model Parameters", maxContextRatio: "Maximum context usage ratio (0-1). Controls the maximum percentage of model context window available for use. For example, 0.35 means at most 35% of the model context can be used.", temperature: "Sampling temperature (0-2). Higher: more random, Lower: more deterministic", googleSearch: "Enable Google search", thinkingBudget: "Thinking budget (-1-24576). (0: dynamic thinking) Higher: more tokens spent on thinking. Currently only supports `gemini-2.5` series models.", groundingContentDisplay: "Enable display of search results", imageGeneration: "Enable image generation (only for `gemini-2.0-flash-exp` and `gemini-2.5-flash-image-preview` model)", searchThreshold: "Search confidence [threshold](https://ai.google.dev/gemini-api/docs/grounding?lang=rest#dynamic-retrieval) (0-1). Lower: more likely to use Google search", includeThoughts: "Enable retrieval of model thoughts", codeExecution: "Enable code execution tool", urlContext: "Enable URL context retrieval tool", nonStreaming: "Force disable streaming response. When enabled, requests will always be made in non-streaming mode, even if the stream parameter is configured." }] };
|
|
34
34
|
}
|
|
35
35
|
});
|
|
36
36
|
|
|
@@ -51,7 +51,7 @@ var import_koishi = require("koishi");
|
|
|
51
51
|
// src/client.ts
|
|
52
52
|
var import_client = require("koishi-plugin-chatluna/llm-core/platform/client");
|
|
53
53
|
var import_model = require("koishi-plugin-chatluna/llm-core/platform/model");
|
|
54
|
-
var
|
|
54
|
+
var import_types2 = require("koishi-plugin-chatluna/llm-core/platform/types");
|
|
55
55
|
var import_error2 = require("koishi-plugin-chatluna/utils/error");
|
|
56
56
|
|
|
57
57
|
// src/requester.ts
|
|
@@ -63,10 +63,10 @@ var import_sse = require("koishi-plugin-chatluna/utils/sse");
|
|
|
63
63
|
var import_stream = require("koishi-plugin-chatluna/utils/stream");
|
|
64
64
|
|
|
65
65
|
// src/utils.ts
|
|
66
|
-
var import_zod_to_json_schema = require("zod-to-json-schema");
|
|
67
66
|
var import_v1_shared_adapter = require("@chatluna/v1-shared-adapter");
|
|
68
67
|
var import_string = require("koishi-plugin-chatluna/utils/string");
|
|
69
|
-
var
|
|
68
|
+
var import_types = require("@langchain/core/utils/types");
|
|
69
|
+
var import_zod_openapi = require("@anatine/zod-openapi");
|
|
70
70
|
async function langchainMessageToGeminiMessage(messages, plugin, model) {
|
|
71
71
|
return Promise.all(
|
|
72
72
|
messages.map(async (message) => {
|
|
@@ -287,9 +287,7 @@ function formatToolsToGeminiAITools(tools, config, model) {
|
|
|
287
287
|
__name(formatToolsToGeminiAITools, "formatToolsToGeminiAITools");
|
|
288
288
|
function formatToolToGeminiAITool(tool) {
|
|
289
289
|
const parameters = (0, import_v1_shared_adapter.removeAdditionalProperties)(
|
|
290
|
-
tool.schema
|
|
291
|
-
allowedAdditionalProperties: void 0
|
|
292
|
-
}) : tool.schema
|
|
290
|
+
(0, import_types.isZodSchemaV3)(tool.schema) ? (0, import_zod_openapi.generateSchema)(tool.schema, true, "3.0") : tool.schema
|
|
293
291
|
);
|
|
294
292
|
return {
|
|
295
293
|
name: tool.name,
|
|
@@ -405,6 +403,7 @@ __name(isChatResponse, "isChatResponse");
|
|
|
405
403
|
|
|
406
404
|
// src/requester.ts
|
|
407
405
|
var import_string2 = require("koishi-plugin-chatluna/utils/string");
|
|
406
|
+
var import_logger = require("koishi-plugin-chatluna/utils/logger");
|
|
408
407
|
var GeminiRequester = class extends import_api.ModelRequester {
|
|
409
408
|
constructor(ctx, _configPool, _pluginConfig, _plugin) {
|
|
410
409
|
super(ctx, _configPool, _pluginConfig, _plugin);
|
|
@@ -448,6 +447,13 @@ var GeminiRequester = class extends import_api.ModelRequester {
|
|
|
448
447
|
await (0, import_sse.checkResponse)(response);
|
|
449
448
|
yield* this._processResponseStream(response);
|
|
450
449
|
} catch (e) {
|
|
450
|
+
if (this.ctx.chatluna.config.isLog) {
|
|
451
|
+
await (0, import_logger.trackLogToLocal)(
|
|
452
|
+
"Request",
|
|
453
|
+
JSON.stringify(chatGenerationParams),
|
|
454
|
+
logger
|
|
455
|
+
);
|
|
456
|
+
}
|
|
451
457
|
if (e instanceof import_error.ChatLunaError) {
|
|
452
458
|
throw e;
|
|
453
459
|
} else {
|
|
@@ -457,15 +463,16 @@ var GeminiRequester = class extends import_api.ModelRequester {
|
|
|
457
463
|
}
|
|
458
464
|
async completionInternal(params) {
|
|
459
465
|
const modelConfig = prepareModelConfig(params, this._pluginConfig);
|
|
466
|
+
const chatGenerationParams = await createChatGenerationParams(
|
|
467
|
+
params,
|
|
468
|
+
this._plugin,
|
|
469
|
+
modelConfig,
|
|
470
|
+
this._pluginConfig
|
|
471
|
+
);
|
|
460
472
|
try {
|
|
461
473
|
const response = await this._post(
|
|
462
474
|
`models/${modelConfig.model}:generateContent`,
|
|
463
|
-
|
|
464
|
-
params,
|
|
465
|
-
this._plugin,
|
|
466
|
-
modelConfig,
|
|
467
|
-
this._pluginConfig
|
|
468
|
-
),
|
|
475
|
+
chatGenerationParams,
|
|
469
476
|
{
|
|
470
477
|
signal: params.signal
|
|
471
478
|
}
|
|
@@ -473,6 +480,13 @@ var GeminiRequester = class extends import_api.ModelRequester {
|
|
|
473
480
|
await (0, import_sse.checkResponse)(response);
|
|
474
481
|
return await this._processResponse(response);
|
|
475
482
|
} catch (e) {
|
|
483
|
+
if (this.ctx.chatluna.config.isLog) {
|
|
484
|
+
await (0, import_logger.trackLogToLocal)(
|
|
485
|
+
"Request",
|
|
486
|
+
JSON.stringify(chatGenerationParams),
|
|
487
|
+
logger
|
|
488
|
+
);
|
|
489
|
+
}
|
|
476
490
|
if (e instanceof import_error.ChatLunaError) {
|
|
477
491
|
throw e;
|
|
478
492
|
} else {
|
|
@@ -522,12 +536,17 @@ var GeminiRequester = class extends import_api.ModelRequester {
|
|
|
522
536
|
"error when calling gemini embeddings, Result: " + JSON.stringify(data)
|
|
523
537
|
);
|
|
524
538
|
}
|
|
525
|
-
async getModels() {
|
|
539
|
+
async getModels(config) {
|
|
526
540
|
try {
|
|
527
|
-
const response = await this._get("models"
|
|
541
|
+
const response = await this._get("models", {
|
|
542
|
+
signal: config?.signal
|
|
543
|
+
});
|
|
528
544
|
const data = await this._parseModelsResponse(response);
|
|
529
545
|
return this._filterAndTransformModels(data.models);
|
|
530
546
|
} catch (e) {
|
|
547
|
+
if (e instanceof import_error.ChatLunaError) {
|
|
548
|
+
throw e;
|
|
549
|
+
}
|
|
531
550
|
const error = new Error(
|
|
532
551
|
"error when listing gemini models, Error: " + e.message
|
|
533
552
|
);
|
|
@@ -664,6 +683,18 @@ var GeminiRequester = class extends import_api.ModelRequester {
|
|
|
664
683
|
return;
|
|
665
684
|
}
|
|
666
685
|
const transformValue = typeof chunk === "string" ? JSON.parse(chunk) : chunk;
|
|
686
|
+
if (transformValue.usageMetadata) {
|
|
687
|
+
const promptTokens = transformValue.usageMetadata.promptTokenCount;
|
|
688
|
+
const totalTokens = transformValue.usageMetadata.totalTokenCount;
|
|
689
|
+
const completionTokens = transformValue.usageMetadata.candidatesTokenCount ?? totalTokens - promptTokens;
|
|
690
|
+
controller.enqueue({
|
|
691
|
+
usage: {
|
|
692
|
+
promptTokens,
|
|
693
|
+
completionTokens,
|
|
694
|
+
totalTokens
|
|
695
|
+
}
|
|
696
|
+
});
|
|
697
|
+
}
|
|
667
698
|
if (!transformValue?.candidates) {
|
|
668
699
|
return;
|
|
669
700
|
}
|
|
@@ -702,6 +733,22 @@ var GeminiRequester = class extends import_api.ModelRequester {
|
|
|
702
733
|
let errorCount = 0;
|
|
703
734
|
let functionIndex = 0;
|
|
704
735
|
for await (const chunk of iterable) {
|
|
736
|
+
let parsedChunk;
|
|
737
|
+
if (parsedChunk = partAsTypeCheck(
|
|
738
|
+
chunk,
|
|
739
|
+
(chunk2) => chunk2["usage"] != null
|
|
740
|
+
)) {
|
|
741
|
+
const generationChunk = new import_outputs.ChatGenerationChunk({
|
|
742
|
+
message: new import_messages.AIMessageChunk({
|
|
743
|
+
content: "",
|
|
744
|
+
response_metadata: {
|
|
745
|
+
tokenUsage: parsedChunk.usage
|
|
746
|
+
}
|
|
747
|
+
}),
|
|
748
|
+
text: ""
|
|
749
|
+
});
|
|
750
|
+
yield { type: "generation", generation: generationChunk };
|
|
751
|
+
}
|
|
705
752
|
try {
|
|
706
753
|
const { updatedContent, updatedReasoning, updatedToolCalling } = await this._processChunk(
|
|
707
754
|
chunk,
|
|
@@ -849,11 +896,12 @@ ${groundingContent}`
|
|
|
849
896
|
...params
|
|
850
897
|
});
|
|
851
898
|
}
|
|
852
|
-
_get(url) {
|
|
899
|
+
_get(url, params = {}) {
|
|
853
900
|
const requestUrl = this._concatUrl(url);
|
|
854
901
|
return this._plugin.fetch(requestUrl, {
|
|
855
902
|
method: "GET",
|
|
856
|
-
headers: this._buildHeaders()
|
|
903
|
+
headers: this._buildHeaders(),
|
|
904
|
+
...params
|
|
857
905
|
});
|
|
858
906
|
}
|
|
859
907
|
_concatUrl(url) {
|
|
@@ -901,9 +949,9 @@ var GeminiClient = class extends import_client.PlatformModelAndEmbeddingsClient
|
|
|
901
949
|
get logger() {
|
|
902
950
|
return logger;
|
|
903
951
|
}
|
|
904
|
-
async refreshModels() {
|
|
952
|
+
async refreshModels(config) {
|
|
905
953
|
try {
|
|
906
|
-
const rawModels = await this._requester.getModels();
|
|
954
|
+
const rawModels = await this._requester.getModels(config);
|
|
907
955
|
if (!rawModels.length) {
|
|
908
956
|
throw new import_error2.ChatLunaError(
|
|
909
957
|
import_error2.ChatLunaErrorCode.MODEL_INIT_ERROR,
|
|
@@ -915,10 +963,10 @@ var GeminiClient = class extends import_client.PlatformModelAndEmbeddingsClient
|
|
|
915
963
|
const info = {
|
|
916
964
|
name: model.name,
|
|
917
965
|
maxTokens: model.inputTokenLimit,
|
|
918
|
-
type: model.name.includes("embedding") ?
|
|
966
|
+
type: model.name.includes("embedding") ? import_types2.ModelType.embeddings : import_types2.ModelType.llm,
|
|
919
967
|
capabilities: [
|
|
920
|
-
|
|
921
|
-
|
|
968
|
+
import_types2.ModelCapabilities.ImageInput,
|
|
969
|
+
import_types2.ModelCapabilities.ToolCall
|
|
922
970
|
]
|
|
923
971
|
};
|
|
924
972
|
if (model.name.includes("gemini-2.5") && !model.name.includes("pro") && !model.name.includes("image")) {
|
|
@@ -937,6 +985,9 @@ var GeminiClient = class extends import_client.PlatformModelAndEmbeddingsClient
|
|
|
937
985
|
}
|
|
938
986
|
return models;
|
|
939
987
|
} catch (e) {
|
|
988
|
+
if (e instanceof import_error2.ChatLunaError) {
|
|
989
|
+
throw e;
|
|
990
|
+
}
|
|
940
991
|
throw new import_error2.ChatLunaError(import_error2.ChatLunaErrorCode.MODEL_INIT_ERROR, e);
|
|
941
992
|
}
|
|
942
993
|
}
|
|
@@ -945,13 +996,15 @@ var GeminiClient = class extends import_client.PlatformModelAndEmbeddingsClient
|
|
|
945
996
|
if (info == null) {
|
|
946
997
|
throw new import_error2.ChatLunaError(import_error2.ChatLunaErrorCode.MODEL_NOT_FOUND);
|
|
947
998
|
}
|
|
948
|
-
if (info.type ===
|
|
999
|
+
if (info.type === import_types2.ModelType.llm) {
|
|
949
1000
|
return new import_model.ChatLunaChatModel({
|
|
950
1001
|
modelInfo: info,
|
|
951
1002
|
requester: this._requester,
|
|
952
1003
|
model,
|
|
953
1004
|
modelMaxContextSize: info.maxTokens,
|
|
954
|
-
maxTokenLimit:
|
|
1005
|
+
maxTokenLimit: Math.floor(
|
|
1006
|
+
(info.maxTokens || 1e5) * this._config.maxContextRatio
|
|
1007
|
+
),
|
|
955
1008
|
timeout: this._config.timeout,
|
|
956
1009
|
temperature: this._config.temperature,
|
|
957
1010
|
maxRetries: this._config.maxRetries,
|
|
@@ -967,16 +1020,17 @@ var GeminiClient = class extends import_client.PlatformModelAndEmbeddingsClient
|
|
|
967
1020
|
};
|
|
968
1021
|
|
|
969
1022
|
// src/index.ts
|
|
970
|
-
var
|
|
1023
|
+
var import_logger2 = require("koishi-plugin-chatluna/utils/logger");
|
|
971
1024
|
var logger;
|
|
972
1025
|
var reusable = true;
|
|
973
1026
|
function apply(ctx, config) {
|
|
974
|
-
|
|
975
|
-
logger = (0, import_logger.createLogger)(ctx, "chatluna-gemini-adapter");
|
|
1027
|
+
logger = (0, import_logger2.createLogger)(ctx, "chatluna-gemini-adapter");
|
|
976
1028
|
ctx.on("ready", async () => {
|
|
977
|
-
plugin.
|
|
978
|
-
|
|
979
|
-
return config2.apiKeys.
|
|
1029
|
+
const plugin = new import_chat.ChatLunaPlugin(ctx, config, config.platform);
|
|
1030
|
+
plugin.parseConfig((config2) => {
|
|
1031
|
+
return config2.apiKeys.filter(([apiKey, _, enabled]) => {
|
|
1032
|
+
return apiKey.length > 0 && enabled;
|
|
1033
|
+
}).map(([apiKey, apiEndpoint]) => {
|
|
980
1034
|
return {
|
|
981
1035
|
apiKey,
|
|
982
1036
|
apiEndpoint,
|
|
@@ -988,8 +1042,8 @@ function apply(ctx, config) {
|
|
|
988
1042
|
};
|
|
989
1043
|
});
|
|
990
1044
|
});
|
|
991
|
-
plugin.registerClient((
|
|
992
|
-
await plugin.
|
|
1045
|
+
plugin.registerClient(() => new GeminiClient(ctx, config, plugin));
|
|
1046
|
+
await plugin.initClient();
|
|
993
1047
|
});
|
|
994
1048
|
}
|
|
995
1049
|
__name(apply, "apply");
|
|
@@ -999,15 +1053,16 @@ var Config4 = import_koishi.Schema.intersect([
|
|
|
999
1053
|
platform: import_koishi.Schema.string().default("gemini"),
|
|
1000
1054
|
apiKeys: import_koishi.Schema.array(
|
|
1001
1055
|
import_koishi.Schema.tuple([
|
|
1002
|
-
import_koishi.Schema.string().role("secret"),
|
|
1056
|
+
import_koishi.Schema.string().role("secret").default(""),
|
|
1003
1057
|
import_koishi.Schema.string().default(
|
|
1004
1058
|
"https://generativelanguage.googleapis.com/v1beta"
|
|
1005
|
-
)
|
|
1059
|
+
),
|
|
1060
|
+
import_koishi.Schema.boolean().default(true)
|
|
1006
1061
|
])
|
|
1007
|
-
).default([[
|
|
1062
|
+
).default([[]]).role("table")
|
|
1008
1063
|
}),
|
|
1009
1064
|
import_koishi.Schema.object({
|
|
1010
|
-
|
|
1065
|
+
maxContextRatio: import_koishi.Schema.number().min(0).max(1).step(1e-4).role("slider").default(0.35),
|
|
1011
1066
|
temperature: import_koishi.Schema.percent().min(0).max(2).step(0.1).default(1),
|
|
1012
1067
|
googleSearch: import_koishi.Schema.boolean().default(false),
|
|
1013
1068
|
codeExecution: import_koishi.Schema.boolean().default(false),
|
package/lib/index.d.ts
CHANGED
|
@@ -4,8 +4,8 @@ export declare let logger: Logger;
|
|
|
4
4
|
export declare const reusable = true;
|
|
5
5
|
export declare function apply(ctx: Context, config: Config): void;
|
|
6
6
|
export interface Config extends ChatLunaPlugin.Config {
|
|
7
|
-
apiKeys: [string, string][];
|
|
8
|
-
|
|
7
|
+
apiKeys: [string, string, boolean][];
|
|
8
|
+
maxContextRatio: number;
|
|
9
9
|
platform: string;
|
|
10
10
|
temperature: number;
|
|
11
11
|
googleSearch: boolean;
|
package/lib/index.mjs
CHANGED
|
@@ -8,14 +8,14 @@ var __commonJS = (cb, mod) => function __require() {
|
|
|
8
8
|
// src/locales/zh-CN.schema.yml
|
|
9
9
|
var require_zh_CN_schema = __commonJS({
|
|
10
10
|
"src/locales/zh-CN.schema.yml"(exports, module) {
|
|
11
|
-
module.exports = { $inner: [{}, { $desc: "请求选项", platform: "适配器的平台名。(不懂请不要修改)", apiKeys: { $inner: ["Gemini
|
|
11
|
+
module.exports = { $inner: [{}, { $desc: "请求选项", platform: "适配器的平台名。(不懂请不要修改)", apiKeys: { $inner: ["Gemini API Key", "Gemini API 请求地址", "是否启用此配置"], $desc: "Gemini 的 API Key 和请求地址列表。" } }, { $desc: "模型配置", maxContextRatio: "最大上下文使用比例(0~1),控制可用的模型上下文窗口大小的最大百分比。例如 0.35 表示最多使用模型上下文的 35%。", temperature: "回复的随机性程度,数值越高,回复越随机(范围:0~2)。", googleSearch: "为模型启用谷歌搜索。", thinkingBudget: "思考预算,范围:(-1~24576),设置的数值越大,思考时花费的 Token 越多,-1 为动态思考。目前仅支持 gemini 2.5 系列模型。", groundingContentDisplay: "是否显示谷歌搜索结果。", imageGeneration: "为模型启用图像生成。目前仅支持 `gemini-2.0-flash-exp` 和 `gemini-2.5-flash-image-preview` 模型。", searchThreshold: "搜索的[置信度阈值](https://ai.google.dev/gemini-api/docs/grounding?lang=rest#dynamic-retrieval),范围:0~1,设置的数值越低,则越倾向于使用谷歌搜索。(仅支持 `gemini-1.5` 系列模型。gemini 2.0 模型起使用动态的工具调用)", includeThoughts: "是否获取模型的思考内容。", codeExecution: "为模型启用代码执行工具。", urlContext: "为模型启用 URL 内容获取工具。", nonStreaming: "强制不启用流式返回。开启后,将总是以非流式发起请求,即便配置了 stream 参数。" }] };
|
|
12
12
|
}
|
|
13
13
|
});
|
|
14
14
|
|
|
15
15
|
// src/locales/en-US.schema.yml
|
|
16
16
|
var require_en_US_schema = __commonJS({
|
|
17
17
|
"src/locales/en-US.schema.yml"(exports, module) {
|
|
18
|
-
module.exports = { $inner: [{}, { $desc: "API Configuration", platform: "Adapter platform name. (Do not modify if you do not understand)", apiKeys: { $inner: ["Gemini API Key", "Gemini API Endpoint (optional)"], $desc: "Gemini API access credentials" } }, { $desc: "Model Parameters",
|
|
18
|
+
module.exports = { $inner: [{}, { $desc: "API Configuration", platform: "Adapter platform name. (Do not modify if you do not understand)", apiKeys: { $inner: ["Gemini API Key", "Gemini API Endpoint (optional)", "Enabled"], $desc: "Gemini API access credentials" } }, { $desc: "Model Parameters", maxContextRatio: "Maximum context usage ratio (0-1). Controls the maximum percentage of model context window available for use. For example, 0.35 means at most 35% of the model context can be used.", temperature: "Sampling temperature (0-2). Higher: more random, Lower: more deterministic", googleSearch: "Enable Google search", thinkingBudget: "Thinking budget (-1-24576). (0: dynamic thinking) Higher: more tokens spent on thinking. Currently only supports `gemini-2.5` series models.", groundingContentDisplay: "Enable display of search results", imageGeneration: "Enable image generation (only for `gemini-2.0-flash-exp` and `gemini-2.5-flash-image-preview` model)", searchThreshold: "Search confidence [threshold](https://ai.google.dev/gemini-api/docs/grounding?lang=rest#dynamic-retrieval) (0-1). Lower: more likely to use Google search", includeThoughts: "Enable retrieval of model thoughts", codeExecution: "Enable code execution tool", urlContext: "Enable URL context retrieval tool", nonStreaming: "Force disable streaming response. When enabled, requests will always be made in non-streaming mode, even if the stream parameter is configured." }] };
|
|
19
19
|
}
|
|
20
20
|
});
|
|
21
21
|
|
|
@@ -54,7 +54,6 @@ import { checkResponse, sseIterable } from "koishi-plugin-chatluna/utils/sse";
|
|
|
54
54
|
import { readableStreamToAsyncIterable } from "koishi-plugin-chatluna/utils/stream";
|
|
55
55
|
|
|
56
56
|
// src/utils.ts
|
|
57
|
-
import { zodToJsonSchema } from "zod-to-json-schema";
|
|
58
57
|
import {
|
|
59
58
|
fetchImageUrl,
|
|
60
59
|
removeAdditionalProperties
|
|
@@ -63,7 +62,8 @@ import {
|
|
|
63
62
|
isMessageContentImageUrl,
|
|
64
63
|
isMessageContentText
|
|
65
64
|
} from "koishi-plugin-chatluna/utils/string";
|
|
66
|
-
import {
|
|
65
|
+
import { isZodSchemaV3 } from "@langchain/core/utils/types";
|
|
66
|
+
import { generateSchema } from "@anatine/zod-openapi";
|
|
67
67
|
async function langchainMessageToGeminiMessage(messages, plugin, model) {
|
|
68
68
|
return Promise.all(
|
|
69
69
|
messages.map(async (message) => {
|
|
@@ -284,9 +284,7 @@ function formatToolsToGeminiAITools(tools, config, model) {
|
|
|
284
284
|
__name(formatToolsToGeminiAITools, "formatToolsToGeminiAITools");
|
|
285
285
|
function formatToolToGeminiAITool(tool) {
|
|
286
286
|
const parameters = removeAdditionalProperties(
|
|
287
|
-
tool.schema
|
|
288
|
-
allowedAdditionalProperties: void 0
|
|
289
|
-
}) : tool.schema
|
|
287
|
+
isZodSchemaV3(tool.schema) ? generateSchema(tool.schema, true, "3.0") : tool.schema
|
|
290
288
|
);
|
|
291
289
|
return {
|
|
292
290
|
name: tool.name,
|
|
@@ -402,6 +400,7 @@ __name(isChatResponse, "isChatResponse");
|
|
|
402
400
|
|
|
403
401
|
// src/requester.ts
|
|
404
402
|
import { getMessageContent } from "koishi-plugin-chatluna/utils/string";
|
|
403
|
+
import { trackLogToLocal } from "koishi-plugin-chatluna/utils/logger";
|
|
405
404
|
var GeminiRequester = class extends ModelRequester {
|
|
406
405
|
constructor(ctx, _configPool, _pluginConfig, _plugin) {
|
|
407
406
|
super(ctx, _configPool, _pluginConfig, _plugin);
|
|
@@ -445,6 +444,13 @@ var GeminiRequester = class extends ModelRequester {
|
|
|
445
444
|
await checkResponse(response);
|
|
446
445
|
yield* this._processResponseStream(response);
|
|
447
446
|
} catch (e) {
|
|
447
|
+
if (this.ctx.chatluna.config.isLog) {
|
|
448
|
+
await trackLogToLocal(
|
|
449
|
+
"Request",
|
|
450
|
+
JSON.stringify(chatGenerationParams),
|
|
451
|
+
logger
|
|
452
|
+
);
|
|
453
|
+
}
|
|
448
454
|
if (e instanceof ChatLunaError) {
|
|
449
455
|
throw e;
|
|
450
456
|
} else {
|
|
@@ -454,15 +460,16 @@ var GeminiRequester = class extends ModelRequester {
|
|
|
454
460
|
}
|
|
455
461
|
async completionInternal(params) {
|
|
456
462
|
const modelConfig = prepareModelConfig(params, this._pluginConfig);
|
|
463
|
+
const chatGenerationParams = await createChatGenerationParams(
|
|
464
|
+
params,
|
|
465
|
+
this._plugin,
|
|
466
|
+
modelConfig,
|
|
467
|
+
this._pluginConfig
|
|
468
|
+
);
|
|
457
469
|
try {
|
|
458
470
|
const response = await this._post(
|
|
459
471
|
`models/${modelConfig.model}:generateContent`,
|
|
460
|
-
|
|
461
|
-
params,
|
|
462
|
-
this._plugin,
|
|
463
|
-
modelConfig,
|
|
464
|
-
this._pluginConfig
|
|
465
|
-
),
|
|
472
|
+
chatGenerationParams,
|
|
466
473
|
{
|
|
467
474
|
signal: params.signal
|
|
468
475
|
}
|
|
@@ -470,6 +477,13 @@ var GeminiRequester = class extends ModelRequester {
|
|
|
470
477
|
await checkResponse(response);
|
|
471
478
|
return await this._processResponse(response);
|
|
472
479
|
} catch (e) {
|
|
480
|
+
if (this.ctx.chatluna.config.isLog) {
|
|
481
|
+
await trackLogToLocal(
|
|
482
|
+
"Request",
|
|
483
|
+
JSON.stringify(chatGenerationParams),
|
|
484
|
+
logger
|
|
485
|
+
);
|
|
486
|
+
}
|
|
473
487
|
if (e instanceof ChatLunaError) {
|
|
474
488
|
throw e;
|
|
475
489
|
} else {
|
|
@@ -519,12 +533,17 @@ var GeminiRequester = class extends ModelRequester {
|
|
|
519
533
|
"error when calling gemini embeddings, Result: " + JSON.stringify(data)
|
|
520
534
|
);
|
|
521
535
|
}
|
|
522
|
-
async getModels() {
|
|
536
|
+
async getModels(config) {
|
|
523
537
|
try {
|
|
524
|
-
const response = await this._get("models"
|
|
538
|
+
const response = await this._get("models", {
|
|
539
|
+
signal: config?.signal
|
|
540
|
+
});
|
|
525
541
|
const data = await this._parseModelsResponse(response);
|
|
526
542
|
return this._filterAndTransformModels(data.models);
|
|
527
543
|
} catch (e) {
|
|
544
|
+
if (e instanceof ChatLunaError) {
|
|
545
|
+
throw e;
|
|
546
|
+
}
|
|
528
547
|
const error = new Error(
|
|
529
548
|
"error when listing gemini models, Error: " + e.message
|
|
530
549
|
);
|
|
@@ -661,6 +680,18 @@ var GeminiRequester = class extends ModelRequester {
|
|
|
661
680
|
return;
|
|
662
681
|
}
|
|
663
682
|
const transformValue = typeof chunk === "string" ? JSON.parse(chunk) : chunk;
|
|
683
|
+
if (transformValue.usageMetadata) {
|
|
684
|
+
const promptTokens = transformValue.usageMetadata.promptTokenCount;
|
|
685
|
+
const totalTokens = transformValue.usageMetadata.totalTokenCount;
|
|
686
|
+
const completionTokens = transformValue.usageMetadata.candidatesTokenCount ?? totalTokens - promptTokens;
|
|
687
|
+
controller.enqueue({
|
|
688
|
+
usage: {
|
|
689
|
+
promptTokens,
|
|
690
|
+
completionTokens,
|
|
691
|
+
totalTokens
|
|
692
|
+
}
|
|
693
|
+
});
|
|
694
|
+
}
|
|
664
695
|
if (!transformValue?.candidates) {
|
|
665
696
|
return;
|
|
666
697
|
}
|
|
@@ -699,6 +730,22 @@ var GeminiRequester = class extends ModelRequester {
|
|
|
699
730
|
let errorCount = 0;
|
|
700
731
|
let functionIndex = 0;
|
|
701
732
|
for await (const chunk of iterable) {
|
|
733
|
+
let parsedChunk;
|
|
734
|
+
if (parsedChunk = partAsTypeCheck(
|
|
735
|
+
chunk,
|
|
736
|
+
(chunk2) => chunk2["usage"] != null
|
|
737
|
+
)) {
|
|
738
|
+
const generationChunk = new ChatGenerationChunk({
|
|
739
|
+
message: new AIMessageChunk({
|
|
740
|
+
content: "",
|
|
741
|
+
response_metadata: {
|
|
742
|
+
tokenUsage: parsedChunk.usage
|
|
743
|
+
}
|
|
744
|
+
}),
|
|
745
|
+
text: ""
|
|
746
|
+
});
|
|
747
|
+
yield { type: "generation", generation: generationChunk };
|
|
748
|
+
}
|
|
702
749
|
try {
|
|
703
750
|
const { updatedContent, updatedReasoning, updatedToolCalling } = await this._processChunk(
|
|
704
751
|
chunk,
|
|
@@ -846,11 +893,12 @@ ${groundingContent}`
|
|
|
846
893
|
...params
|
|
847
894
|
});
|
|
848
895
|
}
|
|
849
|
-
_get(url) {
|
|
896
|
+
_get(url, params = {}) {
|
|
850
897
|
const requestUrl = this._concatUrl(url);
|
|
851
898
|
return this._plugin.fetch(requestUrl, {
|
|
852
899
|
method: "GET",
|
|
853
|
-
headers: this._buildHeaders()
|
|
900
|
+
headers: this._buildHeaders(),
|
|
901
|
+
...params
|
|
854
902
|
});
|
|
855
903
|
}
|
|
856
904
|
_concatUrl(url) {
|
|
@@ -898,9 +946,9 @@ var GeminiClient = class extends PlatformModelAndEmbeddingsClient {
|
|
|
898
946
|
get logger() {
|
|
899
947
|
return logger;
|
|
900
948
|
}
|
|
901
|
-
async refreshModels() {
|
|
949
|
+
async refreshModels(config) {
|
|
902
950
|
try {
|
|
903
|
-
const rawModels = await this._requester.getModels();
|
|
951
|
+
const rawModels = await this._requester.getModels(config);
|
|
904
952
|
if (!rawModels.length) {
|
|
905
953
|
throw new ChatLunaError2(
|
|
906
954
|
ChatLunaErrorCode2.MODEL_INIT_ERROR,
|
|
@@ -934,6 +982,9 @@ var GeminiClient = class extends PlatformModelAndEmbeddingsClient {
|
|
|
934
982
|
}
|
|
935
983
|
return models;
|
|
936
984
|
} catch (e) {
|
|
985
|
+
if (e instanceof ChatLunaError2) {
|
|
986
|
+
throw e;
|
|
987
|
+
}
|
|
937
988
|
throw new ChatLunaError2(ChatLunaErrorCode2.MODEL_INIT_ERROR, e);
|
|
938
989
|
}
|
|
939
990
|
}
|
|
@@ -948,7 +999,9 @@ var GeminiClient = class extends PlatformModelAndEmbeddingsClient {
|
|
|
948
999
|
requester: this._requester,
|
|
949
1000
|
model,
|
|
950
1001
|
modelMaxContextSize: info.maxTokens,
|
|
951
|
-
maxTokenLimit:
|
|
1002
|
+
maxTokenLimit: Math.floor(
|
|
1003
|
+
(info.maxTokens || 1e5) * this._config.maxContextRatio
|
|
1004
|
+
),
|
|
952
1005
|
timeout: this._config.timeout,
|
|
953
1006
|
temperature: this._config.temperature,
|
|
954
1007
|
maxRetries: this._config.maxRetries,
|
|
@@ -968,12 +1021,13 @@ import { createLogger } from "koishi-plugin-chatluna/utils/logger";
|
|
|
968
1021
|
var logger;
|
|
969
1022
|
var reusable = true;
|
|
970
1023
|
function apply(ctx, config) {
|
|
971
|
-
const plugin = new ChatLunaPlugin(ctx, config, config.platform);
|
|
972
1024
|
logger = createLogger(ctx, "chatluna-gemini-adapter");
|
|
973
1025
|
ctx.on("ready", async () => {
|
|
974
|
-
plugin.
|
|
975
|
-
|
|
976
|
-
return config2.apiKeys.
|
|
1026
|
+
const plugin = new ChatLunaPlugin(ctx, config, config.platform);
|
|
1027
|
+
plugin.parseConfig((config2) => {
|
|
1028
|
+
return config2.apiKeys.filter(([apiKey, _, enabled]) => {
|
|
1029
|
+
return apiKey.length > 0 && enabled;
|
|
1030
|
+
}).map(([apiKey, apiEndpoint]) => {
|
|
977
1031
|
return {
|
|
978
1032
|
apiKey,
|
|
979
1033
|
apiEndpoint,
|
|
@@ -985,8 +1039,8 @@ function apply(ctx, config) {
|
|
|
985
1039
|
};
|
|
986
1040
|
});
|
|
987
1041
|
});
|
|
988
|
-
plugin.registerClient((
|
|
989
|
-
await plugin.
|
|
1042
|
+
plugin.registerClient(() => new GeminiClient(ctx, config, plugin));
|
|
1043
|
+
await plugin.initClient();
|
|
990
1044
|
});
|
|
991
1045
|
}
|
|
992
1046
|
__name(apply, "apply");
|
|
@@ -996,15 +1050,16 @@ var Config4 = Schema.intersect([
|
|
|
996
1050
|
platform: Schema.string().default("gemini"),
|
|
997
1051
|
apiKeys: Schema.array(
|
|
998
1052
|
Schema.tuple([
|
|
999
|
-
Schema.string().role("secret"),
|
|
1053
|
+
Schema.string().role("secret").default(""),
|
|
1000
1054
|
Schema.string().default(
|
|
1001
1055
|
"https://generativelanguage.googleapis.com/v1beta"
|
|
1002
|
-
)
|
|
1056
|
+
),
|
|
1057
|
+
Schema.boolean().default(true)
|
|
1003
1058
|
])
|
|
1004
|
-
).default([[
|
|
1059
|
+
).default([[]]).role("table")
|
|
1005
1060
|
}),
|
|
1006
1061
|
Schema.object({
|
|
1007
|
-
|
|
1062
|
+
maxContextRatio: Schema.number().min(0).max(1).step(1e-4).role("slider").default(0.35),
|
|
1008
1063
|
temperature: Schema.percent().min(0).max(2).step(0.1).default(1),
|
|
1009
1064
|
googleSearch: Schema.boolean().default(false),
|
|
1010
1065
|
codeExecution: Schema.boolean().default(false),
|
package/lib/requester.d.ts
CHANGED
|
@@ -5,6 +5,7 @@ import { Config } from '.';
|
|
|
5
5
|
import { GeminiModelInfo } from './types';
|
|
6
6
|
import { ChatLunaPlugin } from 'koishi-plugin-chatluna/services/chat';
|
|
7
7
|
import { Context } from 'koishi';
|
|
8
|
+
import { RunnableConfig } from '@langchain/core/runnables';
|
|
8
9
|
export declare class GeminiRequester extends ModelRequester implements EmbeddingsRequester {
|
|
9
10
|
_pluginConfig: Config;
|
|
10
11
|
constructor(ctx: Context, _configPool: ClientConfigPool<ClientConfig>, _pluginConfig: Config, _plugin: ChatLunaPlugin);
|
|
@@ -15,7 +16,7 @@ export declare class GeminiRequester extends ModelRequester implements Embedding
|
|
|
15
16
|
private _prepareEmbeddingsInput;
|
|
16
17
|
private _createEmbeddingsRequest;
|
|
17
18
|
private _processEmbeddingsResponse;
|
|
18
|
-
getModels(): Promise<GeminiModelInfo[]>;
|
|
19
|
+
getModels(config?: RunnableConfig): Promise<GeminiModelInfo[]>;
|
|
19
20
|
private _parseModelsResponse;
|
|
20
21
|
private _filterAndTransformModels;
|
|
21
22
|
private _processResponse;
|
package/lib/types.d.ts
CHANGED
|
@@ -2,11 +2,18 @@ export interface ChatCompletionResponseMessage {
|
|
|
2
2
|
role: string;
|
|
3
3
|
parts?: ChatPart[];
|
|
4
4
|
}
|
|
5
|
-
export type ChatPart = ChatMessagePart | ChatInlineDataPart | ChatFunctionCallingPart | ChatFunctionResponsePart | ChatUploadDataPart;
|
|
5
|
+
export type ChatPart = ChatMessagePart | ChatInlineDataPart | ChatFunctionCallingPart | ChatFunctionResponsePart | ChatUploadDataPart | ChatUsageMetadataPart;
|
|
6
6
|
export type ChatMessagePart = {
|
|
7
7
|
text: string;
|
|
8
8
|
thought?: boolean;
|
|
9
9
|
};
|
|
10
|
+
export type ChatUsageMetadataPart = {
|
|
11
|
+
usage: {
|
|
12
|
+
promptTokens: number;
|
|
13
|
+
completionTokens: number;
|
|
14
|
+
totalTokens: number;
|
|
15
|
+
};
|
|
16
|
+
};
|
|
10
17
|
export type ChatInlineDataPart = {
|
|
11
18
|
inlineData: {
|
|
12
19
|
mimeType: string;
|
package/package.json
CHANGED
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "koishi-plugin-chatluna-google-gemini-adapter",
|
|
3
3
|
"description": "google-gemini adapter for chatluna",
|
|
4
|
-
"version": "1.3.0
|
|
4
|
+
"version": "1.3.0",
|
|
5
5
|
"main": "lib/index.cjs",
|
|
6
6
|
"module": "lib/index.mjs",
|
|
7
7
|
"typings": "lib/index.d.ts",
|
|
@@ -22,13 +22,13 @@
|
|
|
22
22
|
"repository": {
|
|
23
23
|
"type": "git",
|
|
24
24
|
"url": "https://github.com/ChatLunaLab/chatluna.git",
|
|
25
|
-
"directory": "packages/
|
|
25
|
+
"directory": "packages/adapter-gemini"
|
|
26
26
|
},
|
|
27
27
|
"license": "AGPL-3.0",
|
|
28
28
|
"bugs": {
|
|
29
29
|
"url": "https://github.com/ChatLunaLab/chatluna/issues"
|
|
30
30
|
},
|
|
31
|
-
"homepage": "https://github.com/ChatLunaLab/chatluna/tree/v1-dev/packages/
|
|
31
|
+
"homepage": "https://github.com/ChatLunaLab/chatluna/tree/v1-dev/packages/adapter-gemini#readme",
|
|
32
32
|
"scripts": {
|
|
33
33
|
"build": "atsc -b"
|
|
34
34
|
},
|
|
@@ -62,19 +62,21 @@
|
|
|
62
62
|
"adapter"
|
|
63
63
|
],
|
|
64
64
|
"dependencies": {
|
|
65
|
-
"@
|
|
65
|
+
"@anatine/zod-openapi": "^2.2.8",
|
|
66
|
+
"@chatluna/v1-shared-adapter": "^1.0.16",
|
|
66
67
|
"@langchain/core": "0.3.62",
|
|
68
|
+
"openapi3-ts": "^4.5.0",
|
|
67
69
|
"zod": "3.25.76",
|
|
68
|
-
"zod-to-json-schema": "^3.24.
|
|
70
|
+
"zod-to-json-schema": "^3.24.6"
|
|
69
71
|
},
|
|
70
72
|
"devDependencies": {
|
|
71
73
|
"atsc": "^2.1.0",
|
|
72
|
-
"koishi": "^4.18.
|
|
74
|
+
"koishi": "^4.18.9"
|
|
73
75
|
},
|
|
74
76
|
"peerDependencies": {
|
|
75
|
-
"koishi": "^4.18.
|
|
76
|
-
"koishi-plugin-chatluna": "^1.3.0
|
|
77
|
-
"koishi-plugin-chatluna-storage-service": "^0.0.
|
|
77
|
+
"koishi": "^4.18.9",
|
|
78
|
+
"koishi-plugin-chatluna": "^1.3.0",
|
|
79
|
+
"koishi-plugin-chatluna-storage-service": "^0.0.11"
|
|
78
80
|
},
|
|
79
81
|
"peerDependenciesMeta": {
|
|
80
82
|
"koishi-plugin-chatluna-storage-service": {
|