koishi-plugin-chatluna-google-gemini-adapter 1.3.9 → 1.3.10
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/lib/index.cjs +25 -2
- package/lib/index.mjs +25 -2
- package/lib/requester.d.ts +1 -0
- package/lib/utils.d.ts +12 -12
- package/package.json +3 -3
package/lib/index.cjs
CHANGED
|
@@ -67,6 +67,7 @@ var import_v1_shared_adapter = require("@chatluna/v1-shared-adapter");
|
|
|
67
67
|
var import_string = require("koishi-plugin-chatluna/utils/string");
|
|
68
68
|
var import_types = require("@langchain/core/utils/types");
|
|
69
69
|
var import_zod_openapi = require("@anatine/zod-openapi");
|
|
70
|
+
var import_object = require("koishi-plugin-chatluna/utils/object");
|
|
70
71
|
async function langchainMessageToGeminiMessage(messages, plugin, model) {
|
|
71
72
|
return Promise.all(
|
|
72
73
|
messages.map(async (message) => {
|
|
@@ -75,6 +76,7 @@ async function langchainMessageToGeminiMessage(messages, plugin, model) {
|
|
|
75
76
|
if (role === "function" || hasFunctionCall) {
|
|
76
77
|
return processFunctionMessage(
|
|
77
78
|
message,
|
|
79
|
+
// 如果使用 new api,我们应该去掉 id,,,
|
|
78
80
|
plugin.config.useCamelCaseSystemInstruction
|
|
79
81
|
);
|
|
80
82
|
}
|
|
@@ -332,6 +334,7 @@ function prepareModelConfig(params, pluginConfig) {
|
|
|
332
334
|
let model = params.model;
|
|
333
335
|
let enabledThinking = null;
|
|
334
336
|
let thinkingLevel = "THINKING_LEVEL_UNSPECIFIED";
|
|
337
|
+
let imageSize;
|
|
335
338
|
if (model.includes("-thinking") && model.includes("gemini-2.5")) {
|
|
336
339
|
enabledThinking = !model.includes("-non-thinking");
|
|
337
340
|
model = model.replace("-non-thinking", "").replace("-thinking", "");
|
|
@@ -356,6 +359,11 @@ function prepareModelConfig(params, pluginConfig) {
|
|
|
356
359
|
} else {
|
|
357
360
|
thinkingLevel = void 0;
|
|
358
361
|
}
|
|
362
|
+
const imageSizeMatch = model.match(/-(2K|4K)$/);
|
|
363
|
+
if (imageSizeMatch) {
|
|
364
|
+
imageSize = imageSizeMatch[1];
|
|
365
|
+
model = model.replace(`-${imageSize}`, "");
|
|
366
|
+
}
|
|
359
367
|
let imageGeneration = pluginConfig.imageGeneration ?? false;
|
|
360
368
|
if (imageGeneration) {
|
|
361
369
|
imageGeneration = params.model.includes("gemini-2.0-flash-exp") || params.model.includes("image");
|
|
@@ -365,7 +373,8 @@ function prepareModelConfig(params, pluginConfig) {
|
|
|
365
373
|
enabledThinking,
|
|
366
374
|
thinkingBudget,
|
|
367
375
|
imageGeneration,
|
|
368
|
-
thinkingLevel
|
|
376
|
+
thinkingLevel,
|
|
377
|
+
imageSize
|
|
369
378
|
};
|
|
370
379
|
}
|
|
371
380
|
__name(prepareModelConfig, "prepareModelConfig");
|
|
@@ -396,12 +405,15 @@ function createSafetySettings(model) {
|
|
|
396
405
|
}
|
|
397
406
|
__name(createSafetySettings, "createSafetySettings");
|
|
398
407
|
function createGenerationConfig(params, modelConfig, pluginConfig) {
|
|
399
|
-
|
|
408
|
+
const base = {
|
|
400
409
|
stopSequences: params.stop,
|
|
401
410
|
temperature: params.temperature,
|
|
402
411
|
maxOutputTokens: params.model.includes("vision") ? void 0 : params.maxTokens,
|
|
403
412
|
topP: params.topP,
|
|
404
413
|
responseModalities: modelConfig.imageGeneration ? ["TEXT", "IMAGE"] : void 0,
|
|
414
|
+
imageConfig: modelConfig.imageSize ? {
|
|
415
|
+
imageSize: modelConfig.imageSize
|
|
416
|
+
} : void 0,
|
|
405
417
|
thinkingConfig: modelConfig.enabledThinking != null || pluginConfig.includeThoughts ? filterKeys(
|
|
406
418
|
{
|
|
407
419
|
thinkingBudget: modelConfig.thinkingBudget,
|
|
@@ -411,6 +423,7 @@ function createGenerationConfig(params, modelConfig, pluginConfig) {
|
|
|
411
423
|
notNullFn
|
|
412
424
|
) : void 0
|
|
413
425
|
};
|
|
426
|
+
return (0, import_object.deepAssign)({}, base, params.overrideRequestParams ?? {});
|
|
414
427
|
}
|
|
415
428
|
__name(createGenerationConfig, "createGenerationConfig");
|
|
416
429
|
async function createChatGenerationParams(params, plugin, modelConfig, pluginConfig) {
|
|
@@ -460,6 +473,7 @@ var GeminiRequester = class extends import_api.ModelRequester {
|
|
|
460
473
|
constructor(ctx, _configPool, _pluginConfig, _plugin) {
|
|
461
474
|
super(ctx, _configPool, _pluginConfig, _plugin);
|
|
462
475
|
this._pluginConfig = _pluginConfig;
|
|
476
|
+
this._plugin = _plugin;
|
|
463
477
|
}
|
|
464
478
|
static {
|
|
465
479
|
__name(this, "GeminiRequester");
|
|
@@ -1027,6 +1041,7 @@ var GeminiClient = class extends import_client.PlatformModelAndEmbeddingsClient
|
|
|
1027
1041
|
};
|
|
1028
1042
|
const thinkingModel = ["gemini-2.5-pro", "gemini-2.5-flash"];
|
|
1029
1043
|
const thinkingLevelModel = ["gemini-3.0-pro"];
|
|
1044
|
+
const imageResolutionModel = ["gemini-3.0-pro-image"];
|
|
1030
1045
|
if (thinkingModel.some(
|
|
1031
1046
|
(name2) => model.name.toLowerCase().includes(name2) && !model.name.toLowerCase().includes("image")
|
|
1032
1047
|
)) {
|
|
@@ -1046,6 +1061,14 @@ var GeminiClient = class extends import_client.PlatformModelAndEmbeddingsClient
|
|
|
1046
1061
|
{ ...info, name: model.name + "-low-thinking" },
|
|
1047
1062
|
info
|
|
1048
1063
|
);
|
|
1064
|
+
} else if (imageResolutionModel.some(
|
|
1065
|
+
(name2) => model.name.toLowerCase().includes(name2)
|
|
1066
|
+
)) {
|
|
1067
|
+
models.push(
|
|
1068
|
+
{ ...info, name: model.name + "-2K" },
|
|
1069
|
+
{ ...info, name: model.name + "-4K" },
|
|
1070
|
+
info
|
|
1071
|
+
);
|
|
1049
1072
|
} else {
|
|
1050
1073
|
models.push(info);
|
|
1051
1074
|
}
|
package/lib/index.mjs
CHANGED
|
@@ -64,6 +64,7 @@ import {
|
|
|
64
64
|
} from "koishi-plugin-chatluna/utils/string";
|
|
65
65
|
import { isZodSchemaV3 } from "@langchain/core/utils/types";
|
|
66
66
|
import { generateSchema } from "@anatine/zod-openapi";
|
|
67
|
+
import { deepAssign } from "koishi-plugin-chatluna/utils/object";
|
|
67
68
|
async function langchainMessageToGeminiMessage(messages, plugin, model) {
|
|
68
69
|
return Promise.all(
|
|
69
70
|
messages.map(async (message) => {
|
|
@@ -72,6 +73,7 @@ async function langchainMessageToGeminiMessage(messages, plugin, model) {
|
|
|
72
73
|
if (role === "function" || hasFunctionCall) {
|
|
73
74
|
return processFunctionMessage(
|
|
74
75
|
message,
|
|
76
|
+
// 如果使用 new api,我们应该去掉 id,,,
|
|
75
77
|
plugin.config.useCamelCaseSystemInstruction
|
|
76
78
|
);
|
|
77
79
|
}
|
|
@@ -329,6 +331,7 @@ function prepareModelConfig(params, pluginConfig) {
|
|
|
329
331
|
let model = params.model;
|
|
330
332
|
let enabledThinking = null;
|
|
331
333
|
let thinkingLevel = "THINKING_LEVEL_UNSPECIFIED";
|
|
334
|
+
let imageSize;
|
|
332
335
|
if (model.includes("-thinking") && model.includes("gemini-2.5")) {
|
|
333
336
|
enabledThinking = !model.includes("-non-thinking");
|
|
334
337
|
model = model.replace("-non-thinking", "").replace("-thinking", "");
|
|
@@ -353,6 +356,11 @@ function prepareModelConfig(params, pluginConfig) {
|
|
|
353
356
|
} else {
|
|
354
357
|
thinkingLevel = void 0;
|
|
355
358
|
}
|
|
359
|
+
const imageSizeMatch = model.match(/-(2K|4K)$/);
|
|
360
|
+
if (imageSizeMatch) {
|
|
361
|
+
imageSize = imageSizeMatch[1];
|
|
362
|
+
model = model.replace(`-${imageSize}`, "");
|
|
363
|
+
}
|
|
356
364
|
let imageGeneration = pluginConfig.imageGeneration ?? false;
|
|
357
365
|
if (imageGeneration) {
|
|
358
366
|
imageGeneration = params.model.includes("gemini-2.0-flash-exp") || params.model.includes("image");
|
|
@@ -362,7 +370,8 @@ function prepareModelConfig(params, pluginConfig) {
|
|
|
362
370
|
enabledThinking,
|
|
363
371
|
thinkingBudget,
|
|
364
372
|
imageGeneration,
|
|
365
|
-
thinkingLevel
|
|
373
|
+
thinkingLevel,
|
|
374
|
+
imageSize
|
|
366
375
|
};
|
|
367
376
|
}
|
|
368
377
|
__name(prepareModelConfig, "prepareModelConfig");
|
|
@@ -393,12 +402,15 @@ function createSafetySettings(model) {
|
|
|
393
402
|
}
|
|
394
403
|
__name(createSafetySettings, "createSafetySettings");
|
|
395
404
|
function createGenerationConfig(params, modelConfig, pluginConfig) {
|
|
396
|
-
|
|
405
|
+
const base = {
|
|
397
406
|
stopSequences: params.stop,
|
|
398
407
|
temperature: params.temperature,
|
|
399
408
|
maxOutputTokens: params.model.includes("vision") ? void 0 : params.maxTokens,
|
|
400
409
|
topP: params.topP,
|
|
401
410
|
responseModalities: modelConfig.imageGeneration ? ["TEXT", "IMAGE"] : void 0,
|
|
411
|
+
imageConfig: modelConfig.imageSize ? {
|
|
412
|
+
imageSize: modelConfig.imageSize
|
|
413
|
+
} : void 0,
|
|
402
414
|
thinkingConfig: modelConfig.enabledThinking != null || pluginConfig.includeThoughts ? filterKeys(
|
|
403
415
|
{
|
|
404
416
|
thinkingBudget: modelConfig.thinkingBudget,
|
|
@@ -408,6 +420,7 @@ function createGenerationConfig(params, modelConfig, pluginConfig) {
|
|
|
408
420
|
notNullFn
|
|
409
421
|
) : void 0
|
|
410
422
|
};
|
|
423
|
+
return deepAssign({}, base, params.overrideRequestParams ?? {});
|
|
411
424
|
}
|
|
412
425
|
__name(createGenerationConfig, "createGenerationConfig");
|
|
413
426
|
async function createChatGenerationParams(params, plugin, modelConfig, pluginConfig) {
|
|
@@ -457,6 +470,7 @@ var GeminiRequester = class extends ModelRequester {
|
|
|
457
470
|
constructor(ctx, _configPool, _pluginConfig, _plugin) {
|
|
458
471
|
super(ctx, _configPool, _pluginConfig, _plugin);
|
|
459
472
|
this._pluginConfig = _pluginConfig;
|
|
473
|
+
this._plugin = _plugin;
|
|
460
474
|
}
|
|
461
475
|
static {
|
|
462
476
|
__name(this, "GeminiRequester");
|
|
@@ -1024,6 +1038,7 @@ var GeminiClient = class extends PlatformModelAndEmbeddingsClient {
|
|
|
1024
1038
|
};
|
|
1025
1039
|
const thinkingModel = ["gemini-2.5-pro", "gemini-2.5-flash"];
|
|
1026
1040
|
const thinkingLevelModel = ["gemini-3.0-pro"];
|
|
1041
|
+
const imageResolutionModel = ["gemini-3.0-pro-image"];
|
|
1027
1042
|
if (thinkingModel.some(
|
|
1028
1043
|
(name2) => model.name.toLowerCase().includes(name2) && !model.name.toLowerCase().includes("image")
|
|
1029
1044
|
)) {
|
|
@@ -1043,6 +1058,14 @@ var GeminiClient = class extends PlatformModelAndEmbeddingsClient {
|
|
|
1043
1058
|
{ ...info, name: model.name + "-low-thinking" },
|
|
1044
1059
|
info
|
|
1045
1060
|
);
|
|
1061
|
+
} else if (imageResolutionModel.some(
|
|
1062
|
+
(name2) => model.name.toLowerCase().includes(name2)
|
|
1063
|
+
)) {
|
|
1064
|
+
models.push(
|
|
1065
|
+
{ ...info, name: model.name + "-2K" },
|
|
1066
|
+
{ ...info, name: model.name + "-4K" },
|
|
1067
|
+
info
|
|
1068
|
+
);
|
|
1046
1069
|
} else {
|
|
1047
1070
|
models.push(info);
|
|
1048
1071
|
}
|
package/lib/requester.d.ts
CHANGED
|
@@ -8,6 +8,7 @@ import { Context } from 'koishi';
|
|
|
8
8
|
import { RunnableConfig } from '@langchain/core/runnables';
|
|
9
9
|
export declare class GeminiRequester extends ModelRequester<ClientConfig, Config> implements EmbeddingsRequester {
|
|
10
10
|
_pluginConfig: Config;
|
|
11
|
+
_plugin: ChatLunaPlugin<ClientConfig, Config>;
|
|
11
12
|
constructor(ctx: Context, _configPool: ClientConfigPool<ClientConfig>, _pluginConfig: Config, _plugin: ChatLunaPlugin<ClientConfig, Config>);
|
|
12
13
|
completion(params: ModelRequestParams): Promise<ChatGeneration>;
|
|
13
14
|
completionStreamInternal(params: ModelRequestParams): AsyncGenerator<ChatGenerationChunk>;
|
package/lib/utils.d.ts
CHANGED
|
@@ -18,6 +18,7 @@ export declare function prepareModelConfig(params: ModelRequestParams, pluginCon
|
|
|
18
18
|
thinkingBudget: number;
|
|
19
19
|
imageGeneration: boolean;
|
|
20
20
|
thinkingLevel: string;
|
|
21
|
+
imageSize: string;
|
|
21
22
|
};
|
|
22
23
|
export declare function createSafetySettings(model: string): {
|
|
23
24
|
category: string;
|
|
@@ -29,20 +30,16 @@ export declare function createGenerationConfig(params: ModelRequestParams, model
|
|
|
29
30
|
maxOutputTokens: number;
|
|
30
31
|
topP: number;
|
|
31
32
|
responseModalities: string[];
|
|
32
|
-
|
|
33
|
-
|
|
33
|
+
imageConfig: {
|
|
34
|
+
imageSize: string;
|
|
35
|
+
};
|
|
36
|
+
thinkingConfig: Record<"thinkingBudget" | "thinkingLevel" | "includeThoughts", string | number | boolean>;
|
|
37
|
+
} & Record<string, any>;
|
|
34
38
|
export declare function createChatGenerationParams(params: ModelRequestParams, plugin: ChatLunaPlugin<ClientConfig, Config>, modelConfig: ReturnType<typeof prepareModelConfig>, pluginConfig: Config): Promise<{
|
|
35
39
|
[x: string]: Record<string, any> | ChatCompletionResponseMessage | ChatCompletionResponseMessage[] | {
|
|
36
40
|
category: string;
|
|
37
41
|
threshold: string;
|
|
38
|
-
}[]
|
|
39
|
-
stopSequences: string | string[];
|
|
40
|
-
temperature: number;
|
|
41
|
-
maxOutputTokens: number;
|
|
42
|
-
topP: number;
|
|
43
|
-
responseModalities: string[];
|
|
44
|
-
thinkingConfig: Record<"thinkingBudget" | "includeThoughts" | "thinkingLevel", string | number | boolean>;
|
|
45
|
-
};
|
|
42
|
+
}[];
|
|
46
43
|
contents: ChatCompletionResponseMessage[];
|
|
47
44
|
safetySettings: {
|
|
48
45
|
category: string;
|
|
@@ -54,8 +51,11 @@ export declare function createChatGenerationParams(params: ModelRequestParams, p
|
|
|
54
51
|
maxOutputTokens: number;
|
|
55
52
|
topP: number;
|
|
56
53
|
responseModalities: string[];
|
|
57
|
-
|
|
58
|
-
|
|
54
|
+
imageConfig: {
|
|
55
|
+
imageSize: string;
|
|
56
|
+
};
|
|
57
|
+
thinkingConfig: Record<"thinkingBudget" | "thinkingLevel" | "includeThoughts", string | number | boolean>;
|
|
58
|
+
} & Record<string, any>;
|
|
59
59
|
tools: Record<string, any>;
|
|
60
60
|
}>;
|
|
61
61
|
export declare function isChatResponse(response: any): response is ChatResponse;
|
package/package.json
CHANGED
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "koishi-plugin-chatluna-google-gemini-adapter",
|
|
3
3
|
"description": "google-gemini adapter for chatluna",
|
|
4
|
-
"version": "1.3.
|
|
4
|
+
"version": "1.3.10",
|
|
5
5
|
"main": "lib/index.cjs",
|
|
6
6
|
"module": "lib/index.mjs",
|
|
7
7
|
"typings": "lib/index.d.ts",
|
|
@@ -63,7 +63,7 @@
|
|
|
63
63
|
],
|
|
64
64
|
"dependencies": {
|
|
65
65
|
"@anatine/zod-openapi": "^2.2.8",
|
|
66
|
-
"@chatluna/v1-shared-adapter": "^1.0.
|
|
66
|
+
"@chatluna/v1-shared-adapter": "^1.0.17",
|
|
67
67
|
"@langchain/core": "0.3.62",
|
|
68
68
|
"openapi3-ts": "^4.5.0",
|
|
69
69
|
"zod": "3.25.76",
|
|
@@ -75,7 +75,7 @@
|
|
|
75
75
|
},
|
|
76
76
|
"peerDependencies": {
|
|
77
77
|
"koishi": "^4.18.9",
|
|
78
|
-
"koishi-plugin-chatluna": "^1.3.
|
|
78
|
+
"koishi-plugin-chatluna": "^1.3.1",
|
|
79
79
|
"koishi-plugin-chatluna-storage-service": "^0.0.11"
|
|
80
80
|
},
|
|
81
81
|
"peerDependenciesMeta": {
|