koishi-plugin-chatluna-google-gemini-adapter 1.3.9 → 1.3.11
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/lib/index.cjs +29 -5
- package/lib/index.mjs +29 -5
- package/lib/requester.d.ts +1 -0
- package/lib/utils.d.ts +10 -10
- package/package.json +3 -3
package/lib/index.cjs
CHANGED
|
@@ -67,6 +67,7 @@ var import_v1_shared_adapter = require("@chatluna/v1-shared-adapter");
|
|
|
67
67
|
var import_string = require("koishi-plugin-chatluna/utils/string");
|
|
68
68
|
var import_types = require("@langchain/core/utils/types");
|
|
69
69
|
var import_zod_openapi = require("@anatine/zod-openapi");
|
|
70
|
+
var import_object = require("koishi-plugin-chatluna/utils/object");
|
|
70
71
|
async function langchainMessageToGeminiMessage(messages, plugin, model) {
|
|
71
72
|
return Promise.all(
|
|
72
73
|
messages.map(async (message) => {
|
|
@@ -75,6 +76,7 @@ async function langchainMessageToGeminiMessage(messages, plugin, model) {
|
|
|
75
76
|
if (role === "function" || hasFunctionCall) {
|
|
76
77
|
return processFunctionMessage(
|
|
77
78
|
message,
|
|
79
|
+
// 如果使用 new api,我们应该去掉 id,,,
|
|
78
80
|
plugin.config.useCamelCaseSystemInstruction
|
|
79
81
|
);
|
|
80
82
|
}
|
|
@@ -332,6 +334,7 @@ function prepareModelConfig(params, pluginConfig) {
|
|
|
332
334
|
let model = params.model;
|
|
333
335
|
let enabledThinking = null;
|
|
334
336
|
let thinkingLevel = "THINKING_LEVEL_UNSPECIFIED";
|
|
337
|
+
let imageSize;
|
|
335
338
|
if (model.includes("-thinking") && model.includes("gemini-2.5")) {
|
|
336
339
|
enabledThinking = !model.includes("-non-thinking");
|
|
337
340
|
model = model.replace("-non-thinking", "").replace("-thinking", "");
|
|
@@ -356,6 +359,11 @@ function prepareModelConfig(params, pluginConfig) {
|
|
|
356
359
|
} else {
|
|
357
360
|
thinkingLevel = void 0;
|
|
358
361
|
}
|
|
362
|
+
const imageSizeMatch = model.match(/-(2K|4K)$/);
|
|
363
|
+
if (imageSizeMatch) {
|
|
364
|
+
imageSize = imageSizeMatch[1];
|
|
365
|
+
model = model.replace(`-${imageSize}`, "");
|
|
366
|
+
}
|
|
359
367
|
let imageGeneration = pluginConfig.imageGeneration ?? false;
|
|
360
368
|
if (imageGeneration) {
|
|
361
369
|
imageGeneration = params.model.includes("gemini-2.0-flash-exp") || params.model.includes("image");
|
|
@@ -365,7 +373,8 @@ function prepareModelConfig(params, pluginConfig) {
|
|
|
365
373
|
enabledThinking,
|
|
366
374
|
thinkingBudget,
|
|
367
375
|
imageGeneration,
|
|
368
|
-
thinkingLevel
|
|
376
|
+
thinkingLevel,
|
|
377
|
+
imageSize
|
|
369
378
|
};
|
|
370
379
|
}
|
|
371
380
|
__name(prepareModelConfig, "prepareModelConfig");
|
|
@@ -396,12 +405,15 @@ function createSafetySettings(model) {
|
|
|
396
405
|
}
|
|
397
406
|
__name(createSafetySettings, "createSafetySettings");
|
|
398
407
|
function createGenerationConfig(params, modelConfig, pluginConfig) {
|
|
399
|
-
|
|
408
|
+
const base = {
|
|
400
409
|
stopSequences: params.stop,
|
|
401
410
|
temperature: params.temperature,
|
|
402
411
|
maxOutputTokens: params.model.includes("vision") ? void 0 : params.maxTokens,
|
|
403
412
|
topP: params.topP,
|
|
404
413
|
responseModalities: modelConfig.imageGeneration ? ["TEXT", "IMAGE"] : void 0,
|
|
414
|
+
imageConfig: modelConfig.imageSize ? {
|
|
415
|
+
imageSize: modelConfig.imageSize
|
|
416
|
+
} : void 0,
|
|
405
417
|
thinkingConfig: modelConfig.enabledThinking != null || pluginConfig.includeThoughts ? filterKeys(
|
|
406
418
|
{
|
|
407
419
|
thinkingBudget: modelConfig.thinkingBudget,
|
|
@@ -411,6 +423,7 @@ function createGenerationConfig(params, modelConfig, pluginConfig) {
|
|
|
411
423
|
notNullFn
|
|
412
424
|
) : void 0
|
|
413
425
|
};
|
|
426
|
+
return (0, import_object.deepAssign)({}, base, params.overrideRequestParams ?? {});
|
|
414
427
|
}
|
|
415
428
|
__name(createGenerationConfig, "createGenerationConfig");
|
|
416
429
|
async function createChatGenerationParams(params, plugin, modelConfig, pluginConfig) {
|
|
@@ -460,6 +473,7 @@ var GeminiRequester = class extends import_api.ModelRequester {
|
|
|
460
473
|
constructor(ctx, _configPool, _pluginConfig, _plugin) {
|
|
461
474
|
super(ctx, _configPool, _pluginConfig, _plugin);
|
|
462
475
|
this._pluginConfig = _pluginConfig;
|
|
476
|
+
this._plugin = _plugin;
|
|
463
477
|
}
|
|
464
478
|
static {
|
|
465
479
|
__name(this, "GeminiRequester");
|
|
@@ -1016,6 +1030,7 @@ var GeminiClient = class extends import_client.PlatformModelAndEmbeddingsClient
|
|
|
1016
1030
|
}
|
|
1017
1031
|
const models = [];
|
|
1018
1032
|
for (const model of rawModels) {
|
|
1033
|
+
const modelNameLower = model.name.toLowerCase();
|
|
1019
1034
|
const info = {
|
|
1020
1035
|
name: model.name,
|
|
1021
1036
|
maxTokens: model.inputTokenLimit,
|
|
@@ -1027,8 +1042,17 @@ var GeminiClient = class extends import_client.PlatformModelAndEmbeddingsClient
|
|
|
1027
1042
|
};
|
|
1028
1043
|
const thinkingModel = ["gemini-2.5-pro", "gemini-2.5-flash"];
|
|
1029
1044
|
const thinkingLevelModel = ["gemini-3.0-pro"];
|
|
1030
|
-
|
|
1031
|
-
|
|
1045
|
+
const imageResolutionModel = ["gemini-3.0-pro-image"];
|
|
1046
|
+
if (imageResolutionModel.some(
|
|
1047
|
+
(name2) => modelNameLower.includes(name2)
|
|
1048
|
+
)) {
|
|
1049
|
+
models.push(
|
|
1050
|
+
{ ...info, name: model.name + "-2K" },
|
|
1051
|
+
{ ...info, name: model.name + "-4K" },
|
|
1052
|
+
info
|
|
1053
|
+
);
|
|
1054
|
+
} else if (thinkingModel.some(
|
|
1055
|
+
(name2) => modelNameLower.includes(name2) && !modelNameLower.includes("image")
|
|
1032
1056
|
)) {
|
|
1033
1057
|
if (!model.name.includes("-thinking")) {
|
|
1034
1058
|
models.push(
|
|
@@ -1040,7 +1064,7 @@ var GeminiClient = class extends import_client.PlatformModelAndEmbeddingsClient
|
|
|
1040
1064
|
models.push(info);
|
|
1041
1065
|
}
|
|
1042
1066
|
} else if (thinkingLevelModel.some(
|
|
1043
|
-
(name2) =>
|
|
1067
|
+
(name2) => modelNameLower.includes(name2) && !modelNameLower.includes("image")
|
|
1044
1068
|
)) {
|
|
1045
1069
|
models.push(
|
|
1046
1070
|
{ ...info, name: model.name + "-low-thinking" },
|
package/lib/index.mjs
CHANGED
|
@@ -64,6 +64,7 @@ import {
|
|
|
64
64
|
} from "koishi-plugin-chatluna/utils/string";
|
|
65
65
|
import { isZodSchemaV3 } from "@langchain/core/utils/types";
|
|
66
66
|
import { generateSchema } from "@anatine/zod-openapi";
|
|
67
|
+
import { deepAssign } from "koishi-plugin-chatluna/utils/object";
|
|
67
68
|
async function langchainMessageToGeminiMessage(messages, plugin, model) {
|
|
68
69
|
return Promise.all(
|
|
69
70
|
messages.map(async (message) => {
|
|
@@ -72,6 +73,7 @@ async function langchainMessageToGeminiMessage(messages, plugin, model) {
|
|
|
72
73
|
if (role === "function" || hasFunctionCall) {
|
|
73
74
|
return processFunctionMessage(
|
|
74
75
|
message,
|
|
76
|
+
// 如果使用 new api,我们应该去掉 id,,,
|
|
75
77
|
plugin.config.useCamelCaseSystemInstruction
|
|
76
78
|
);
|
|
77
79
|
}
|
|
@@ -329,6 +331,7 @@ function prepareModelConfig(params, pluginConfig) {
|
|
|
329
331
|
let model = params.model;
|
|
330
332
|
let enabledThinking = null;
|
|
331
333
|
let thinkingLevel = "THINKING_LEVEL_UNSPECIFIED";
|
|
334
|
+
let imageSize;
|
|
332
335
|
if (model.includes("-thinking") && model.includes("gemini-2.5")) {
|
|
333
336
|
enabledThinking = !model.includes("-non-thinking");
|
|
334
337
|
model = model.replace("-non-thinking", "").replace("-thinking", "");
|
|
@@ -353,6 +356,11 @@ function prepareModelConfig(params, pluginConfig) {
|
|
|
353
356
|
} else {
|
|
354
357
|
thinkingLevel = void 0;
|
|
355
358
|
}
|
|
359
|
+
const imageSizeMatch = model.match(/-(2K|4K)$/);
|
|
360
|
+
if (imageSizeMatch) {
|
|
361
|
+
imageSize = imageSizeMatch[1];
|
|
362
|
+
model = model.replace(`-${imageSize}`, "");
|
|
363
|
+
}
|
|
356
364
|
let imageGeneration = pluginConfig.imageGeneration ?? false;
|
|
357
365
|
if (imageGeneration) {
|
|
358
366
|
imageGeneration = params.model.includes("gemini-2.0-flash-exp") || params.model.includes("image");
|
|
@@ -362,7 +370,8 @@ function prepareModelConfig(params, pluginConfig) {
|
|
|
362
370
|
enabledThinking,
|
|
363
371
|
thinkingBudget,
|
|
364
372
|
imageGeneration,
|
|
365
|
-
thinkingLevel
|
|
373
|
+
thinkingLevel,
|
|
374
|
+
imageSize
|
|
366
375
|
};
|
|
367
376
|
}
|
|
368
377
|
__name(prepareModelConfig, "prepareModelConfig");
|
|
@@ -393,12 +402,15 @@ function createSafetySettings(model) {
|
|
|
393
402
|
}
|
|
394
403
|
__name(createSafetySettings, "createSafetySettings");
|
|
395
404
|
function createGenerationConfig(params, modelConfig, pluginConfig) {
|
|
396
|
-
|
|
405
|
+
const base = {
|
|
397
406
|
stopSequences: params.stop,
|
|
398
407
|
temperature: params.temperature,
|
|
399
408
|
maxOutputTokens: params.model.includes("vision") ? void 0 : params.maxTokens,
|
|
400
409
|
topP: params.topP,
|
|
401
410
|
responseModalities: modelConfig.imageGeneration ? ["TEXT", "IMAGE"] : void 0,
|
|
411
|
+
imageConfig: modelConfig.imageSize ? {
|
|
412
|
+
imageSize: modelConfig.imageSize
|
|
413
|
+
} : void 0,
|
|
402
414
|
thinkingConfig: modelConfig.enabledThinking != null || pluginConfig.includeThoughts ? filterKeys(
|
|
403
415
|
{
|
|
404
416
|
thinkingBudget: modelConfig.thinkingBudget,
|
|
@@ -408,6 +420,7 @@ function createGenerationConfig(params, modelConfig, pluginConfig) {
|
|
|
408
420
|
notNullFn
|
|
409
421
|
) : void 0
|
|
410
422
|
};
|
|
423
|
+
return deepAssign({}, base, params.overrideRequestParams ?? {});
|
|
411
424
|
}
|
|
412
425
|
__name(createGenerationConfig, "createGenerationConfig");
|
|
413
426
|
async function createChatGenerationParams(params, plugin, modelConfig, pluginConfig) {
|
|
@@ -457,6 +470,7 @@ var GeminiRequester = class extends ModelRequester {
|
|
|
457
470
|
constructor(ctx, _configPool, _pluginConfig, _plugin) {
|
|
458
471
|
super(ctx, _configPool, _pluginConfig, _plugin);
|
|
459
472
|
this._pluginConfig = _pluginConfig;
|
|
473
|
+
this._plugin = _plugin;
|
|
460
474
|
}
|
|
461
475
|
static {
|
|
462
476
|
__name(this, "GeminiRequester");
|
|
@@ -1013,6 +1027,7 @@ var GeminiClient = class extends PlatformModelAndEmbeddingsClient {
|
|
|
1013
1027
|
}
|
|
1014
1028
|
const models = [];
|
|
1015
1029
|
for (const model of rawModels) {
|
|
1030
|
+
const modelNameLower = model.name.toLowerCase();
|
|
1016
1031
|
const info = {
|
|
1017
1032
|
name: model.name,
|
|
1018
1033
|
maxTokens: model.inputTokenLimit,
|
|
@@ -1024,8 +1039,17 @@ var GeminiClient = class extends PlatformModelAndEmbeddingsClient {
|
|
|
1024
1039
|
};
|
|
1025
1040
|
const thinkingModel = ["gemini-2.5-pro", "gemini-2.5-flash"];
|
|
1026
1041
|
const thinkingLevelModel = ["gemini-3.0-pro"];
|
|
1027
|
-
|
|
1028
|
-
|
|
1042
|
+
const imageResolutionModel = ["gemini-3.0-pro-image"];
|
|
1043
|
+
if (imageResolutionModel.some(
|
|
1044
|
+
(name2) => modelNameLower.includes(name2)
|
|
1045
|
+
)) {
|
|
1046
|
+
models.push(
|
|
1047
|
+
{ ...info, name: model.name + "-2K" },
|
|
1048
|
+
{ ...info, name: model.name + "-4K" },
|
|
1049
|
+
info
|
|
1050
|
+
);
|
|
1051
|
+
} else if (thinkingModel.some(
|
|
1052
|
+
(name2) => modelNameLower.includes(name2) && !modelNameLower.includes("image")
|
|
1029
1053
|
)) {
|
|
1030
1054
|
if (!model.name.includes("-thinking")) {
|
|
1031
1055
|
models.push(
|
|
@@ -1037,7 +1061,7 @@ var GeminiClient = class extends PlatformModelAndEmbeddingsClient {
|
|
|
1037
1061
|
models.push(info);
|
|
1038
1062
|
}
|
|
1039
1063
|
} else if (thinkingLevelModel.some(
|
|
1040
|
-
(name2) =>
|
|
1064
|
+
(name2) => modelNameLower.includes(name2) && !modelNameLower.includes("image")
|
|
1041
1065
|
)) {
|
|
1042
1066
|
models.push(
|
|
1043
1067
|
{ ...info, name: model.name + "-low-thinking" },
|
package/lib/requester.d.ts
CHANGED
|
@@ -8,6 +8,7 @@ import { Context } from 'koishi';
|
|
|
8
8
|
import { RunnableConfig } from '@langchain/core/runnables';
|
|
9
9
|
export declare class GeminiRequester extends ModelRequester<ClientConfig, Config> implements EmbeddingsRequester {
|
|
10
10
|
_pluginConfig: Config;
|
|
11
|
+
_plugin: ChatLunaPlugin<ClientConfig, Config>;
|
|
11
12
|
constructor(ctx: Context, _configPool: ClientConfigPool<ClientConfig>, _pluginConfig: Config, _plugin: ChatLunaPlugin<ClientConfig, Config>);
|
|
12
13
|
completion(params: ModelRequestParams): Promise<ChatGeneration>;
|
|
13
14
|
completionStreamInternal(params: ModelRequestParams): AsyncGenerator<ChatGenerationChunk>;
|
package/lib/utils.d.ts
CHANGED
|
@@ -18,6 +18,7 @@ export declare function prepareModelConfig(params: ModelRequestParams, pluginCon
|
|
|
18
18
|
thinkingBudget: number;
|
|
19
19
|
imageGeneration: boolean;
|
|
20
20
|
thinkingLevel: string;
|
|
21
|
+
imageSize: string;
|
|
21
22
|
};
|
|
22
23
|
export declare function createSafetySettings(model: string): {
|
|
23
24
|
category: string;
|
|
@@ -29,20 +30,16 @@ export declare function createGenerationConfig(params: ModelRequestParams, model
|
|
|
29
30
|
maxOutputTokens: number;
|
|
30
31
|
topP: number;
|
|
31
32
|
responseModalities: string[];
|
|
33
|
+
imageConfig: {
|
|
34
|
+
imageSize: string;
|
|
35
|
+
};
|
|
32
36
|
thinkingConfig: Record<"thinkingBudget" | "includeThoughts" | "thinkingLevel", string | number | boolean>;
|
|
33
|
-
}
|
|
37
|
+
} & Record<string, any>;
|
|
34
38
|
export declare function createChatGenerationParams(params: ModelRequestParams, plugin: ChatLunaPlugin<ClientConfig, Config>, modelConfig: ReturnType<typeof prepareModelConfig>, pluginConfig: Config): Promise<{
|
|
35
39
|
[x: string]: Record<string, any> | ChatCompletionResponseMessage | ChatCompletionResponseMessage[] | {
|
|
36
40
|
category: string;
|
|
37
41
|
threshold: string;
|
|
38
|
-
}[]
|
|
39
|
-
stopSequences: string | string[];
|
|
40
|
-
temperature: number;
|
|
41
|
-
maxOutputTokens: number;
|
|
42
|
-
topP: number;
|
|
43
|
-
responseModalities: string[];
|
|
44
|
-
thinkingConfig: Record<"thinkingBudget" | "includeThoughts" | "thinkingLevel", string | number | boolean>;
|
|
45
|
-
};
|
|
42
|
+
}[];
|
|
46
43
|
contents: ChatCompletionResponseMessage[];
|
|
47
44
|
safetySettings: {
|
|
48
45
|
category: string;
|
|
@@ -54,8 +51,11 @@ export declare function createChatGenerationParams(params: ModelRequestParams, p
|
|
|
54
51
|
maxOutputTokens: number;
|
|
55
52
|
topP: number;
|
|
56
53
|
responseModalities: string[];
|
|
54
|
+
imageConfig: {
|
|
55
|
+
imageSize: string;
|
|
56
|
+
};
|
|
57
57
|
thinkingConfig: Record<"thinkingBudget" | "includeThoughts" | "thinkingLevel", string | number | boolean>;
|
|
58
|
-
}
|
|
58
|
+
} & Record<string, any>;
|
|
59
59
|
tools: Record<string, any>;
|
|
60
60
|
}>;
|
|
61
61
|
export declare function isChatResponse(response: any): response is ChatResponse;
|
package/package.json
CHANGED
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "koishi-plugin-chatluna-google-gemini-adapter",
|
|
3
3
|
"description": "google-gemini adapter for chatluna",
|
|
4
|
-
"version": "1.3.
|
|
4
|
+
"version": "1.3.11",
|
|
5
5
|
"main": "lib/index.cjs",
|
|
6
6
|
"module": "lib/index.mjs",
|
|
7
7
|
"typings": "lib/index.d.ts",
|
|
@@ -63,7 +63,7 @@
|
|
|
63
63
|
],
|
|
64
64
|
"dependencies": {
|
|
65
65
|
"@anatine/zod-openapi": "^2.2.8",
|
|
66
|
-
"@chatluna/v1-shared-adapter": "^1.0.
|
|
66
|
+
"@chatluna/v1-shared-adapter": "^1.0.20",
|
|
67
67
|
"@langchain/core": "0.3.62",
|
|
68
68
|
"openapi3-ts": "^4.5.0",
|
|
69
69
|
"zod": "3.25.76",
|
|
@@ -75,7 +75,7 @@
|
|
|
75
75
|
},
|
|
76
76
|
"peerDependencies": {
|
|
77
77
|
"koishi": "^4.18.9",
|
|
78
|
-
"koishi-plugin-chatluna": "^1.3.
|
|
78
|
+
"koishi-plugin-chatluna": "^1.3.4",
|
|
79
79
|
"koishi-plugin-chatluna-storage-service": "^0.0.11"
|
|
80
80
|
},
|
|
81
81
|
"peerDependenciesMeta": {
|