koishi-plugin-chatluna-google-gemini-adapter 1.0.0-rc.1 → 1.0.0-rc.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -2,6 +2,6 @@
2
2
 
3
3
  ## [![npm](https://img.shields.io/npm/v/koishi-plugin-chatluna-google-gemini-adapter)](https://www.npmjs.com/package/koishi-plugin-chatluna-google-gemini) [![npm](https://img.shields.io/npm/dm/koishi-plugin-chatluna-google-gemini-adapter)](https://www.npmjs.com/package//koishi-plugin-chatluna-google-gemini-adapter)
4
4
 
5
- > 为 ChatLuna 提供 Google-gemini 支持的适配器
5
+ > 为 ChatLuna 提供 Google Gemini 支持的适配器
6
6
 
7
- [Google-gemini 适配器文档](https://chatluna.chat/guide/configure-model-platform/google-gemini.html)
7
+ [Gemini 适配器文档](https://chatluna.chat/guide/configure-model-platform/google-gemini.html)
package/lib/index.cjs CHANGED
@@ -23,21 +23,21 @@ var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: tru
23
23
  // src/locales/zh-CN.schema.yml
24
24
  var require_zh_CN_schema = __commonJS({
25
25
  "src/locales/zh-CN.schema.yml"(exports2, module2) {
26
- module2.exports = { inner: [{}, { $desc: "请求选项", apiKeys: { $inner: ["Gemini 的 API Key", "Gemini API 的请求地址"], $desc: "Gemini 的 API Key 和请求地址列表。" } }, { $desc: "模型配置", maxTokens: "回复的最大 Token 数(16~2097000,必须是 16 的倍数)。注意:仅当您使用的模型最大 Token 为 8000 及以上时,才建议设置超过 2000 token。", temperature: "回复的随机性程度,数值越高,回复越随机(范围:0~2)。" }] };
26
+ module2.exports = { $inner: [{}, { $desc: "请求选项", apiKeys: { $inner: ["Gemini 的 API Key", "Gemini API 的请求地址"], $desc: "Gemini 的 API Key 和请求地址列表。" } }, { $desc: "模型配置", maxTokens: "回复的最大 Token 数(16~2097000,必须是 16 的倍数)。注意:仅当您使用的模型最大 Token 为 8000 及以上时,才建议设置超过 2000 token。", temperature: "回复的随机性程度,数值越高,回复越随机(范围:0~2)。", googleSearch: "为模型启用谷歌搜索。", searchThreshold: "搜索的置信度阈值,范围:0~1,设置的数值越低,则越倾向于使用谷歌搜索。" }] };
27
27
  }
28
28
  });
29
29
 
30
30
  // src/locales/en-US.schema.yml
31
31
  var require_en_US_schema = __commonJS({
32
32
  "src/locales/en-US.schema.yml"(exports2, module2) {
33
- module2.exports = { $inner: [{}, { $desc: "API Configuration", apiKeys: { $inner: ["Gemini API Key", "Gemini API Endpoint (optional)"], $desc: "Gemini API access credentials" } }, { $desc: "Model Parameters", maxTokens: "Max output tokens (16-2097000, multiple of 16). >2000 for 8k+ models", temperature: "Sampling temperature (0-2). Higher: more random, Lower: more deterministic" }] };
33
+ module2.exports = { $inner: [{}, { $desc: "API Configuration", apiKeys: { $inner: ["Gemini API Key", "Gemini API Endpoint (optional)"], $desc: "Gemini API access credentials" } }, { $desc: "Model Parameters", maxTokens: "Max output tokens (16-2097000, multiple of 16). >2000 for 8k+ models", temperature: "Sampling temperature (0-2). Higher: more random, Lower: more deterministic", googleSearch: "Enable Google search", searchThreshold: "Search confidence [threshold](https://ai.google.dev/gemini-api/docs/grounding?lang=rest#dynamic-retrieval) (0-1). Lower: more likely to use Google search" }] };
34
34
  }
35
35
  });
36
36
 
37
37
  // src/index.ts
38
38
  var src_exports = {};
39
39
  __export(src_exports, {
40
- Config: () => Config,
40
+ Config: () => Config3,
41
41
  apply: () => apply,
42
42
  inject: () => inject,
43
43
  logger: () => logger,
@@ -132,7 +132,7 @@ async function langchainMessageToGeminiMessage(messages, model) {
132
132
  }
133
133
  ]
134
134
  };
135
- if ((model.includes("vision") || model.includes("gemini-1.5")) && images != null) {
135
+ if ((model.includes("vision") || model.includes("gemini")) && images != null && !model.includes("gemini-1.0")) {
136
136
  for (const image of images) {
137
137
  result2.parts.push({
138
138
  inline_data: {
@@ -237,11 +237,36 @@ function partAsType(part) {
237
237
  return part;
238
238
  }
239
239
  __name(partAsType, "partAsType");
240
- function formatToolsToGeminiAITools(tools) {
241
- if (tools.length < 1) {
240
+ function formatToolsToGeminiAITools(tools, config, model) {
241
+ if (tools.length < 1 && !config.googleSearch) {
242
242
  return void 0;
243
243
  }
244
- return tools.map(formatToolToGeminiAITool);
244
+ const functions = tools.map(formatToolToGeminiAITool);
245
+ const result = [];
246
+ if (functions.length > 0 && !config.googleSearch) {
247
+ result.push({
248
+ functionDeclarations: functions
249
+ });
250
+ } else if (functions.length > 0 && config.googleSearch) {
251
+ logger.warn("Google search is enabled, function call will be disabled.");
252
+ }
253
+ if (config.googleSearch) {
254
+ if (model.includes("gemini-2")) {
255
+ result.push({
256
+ google_search: {}
257
+ });
258
+ } else {
259
+ result.push({
260
+ google_search_retrieval: {
261
+ dynamic_retrieval_config: {
262
+ mode: "MODE_DYNAMIC",
263
+ dynamic_threshold: config.searchThreshold
264
+ }
265
+ }
266
+ });
267
+ }
268
+ }
269
+ return result;
245
270
  }
246
271
  __name(formatToolsToGeminiAITools, "formatToolsToGeminiAITools");
247
272
  function formatToolToGeminiAITool(tool) {
@@ -274,10 +299,11 @@ __name(messageTypeToGeminiRole, "messageTypeToGeminiRole");
274
299
 
275
300
  // src/requester.ts
276
301
  var GeminiRequester = class extends import_api.ModelRequester {
277
- constructor(_config, _plugin) {
302
+ constructor(_config, _plugin, _pluginConfig) {
278
303
  super();
279
304
  this._config = _config;
280
305
  this._plugin = _plugin;
306
+ this._pluginConfig = _pluginConfig;
281
307
  }
282
308
  static {
283
309
  __name(this, "GeminiRequester");
@@ -315,9 +341,11 @@ var GeminiRequester = class extends import_api.ModelRequester {
315
341
  maxOutputTokens: params.model.includes("vision") ? void 0 : params.maxTokens,
316
342
  topP: params.topP
317
343
  },
318
- tools: !params.model.includes("vision") && params.tools != null ? {
319
- functionDeclarations: formatToolsToGeminiAITools(params.tools)
320
- } : void 0
344
+ tools: params.tools != null || this._pluginConfig.googleSearch ? formatToolsToGeminiAITools(
345
+ params.tools ?? [],
346
+ this._pluginConfig,
347
+ params.model
348
+ ) : void 0
321
349
  },
322
350
  {
323
351
  signal: params.signal
@@ -543,7 +571,11 @@ var GeminiClient = class extends import_client.PlatformModelAndEmbeddingsClient
543
571
  constructor(ctx, _config, clientConfig, plugin) {
544
572
  super(ctx, clientConfig);
545
573
  this._config = _config;
546
- this._requester = new GeminiRequester(clientConfig, plugin);
574
+ this._requester = new GeminiRequester(
575
+ clientConfig,
576
+ plugin,
577
+ this._config
578
+ );
547
579
  }
548
580
  static {
549
581
  __name(this, "GeminiClient");
@@ -651,7 +683,7 @@ function apply(ctx, config) {
651
683
  });
652
684
  }
653
685
  __name(apply, "apply");
654
- var Config = import_koishi.Schema.intersect([
686
+ var Config3 = import_koishi.Schema.intersect([
655
687
  import_chat.ChatLunaPlugin.Config,
656
688
  import_koishi.Schema.object({
657
689
  apiKeys: import_koishi.Schema.array(
@@ -665,7 +697,9 @@ var Config = import_koishi.Schema.intersect([
665
697
  }),
666
698
  import_koishi.Schema.object({
667
699
  maxTokens: import_koishi.Schema.number().min(16).max(2097e3).step(16).default(8064),
668
- temperature: import_koishi.Schema.percent().min(0).max(2).step(0.1).default(0.8)
700
+ temperature: import_koishi.Schema.percent().min(0).max(2).step(0.1).default(0.8),
701
+ googleSearch: import_koishi.Schema.boolean().default(false),
702
+ searchThreshold: import_koishi.Schema.number().min(0).max(1).step(0.1).default(0.5)
669
703
  })
670
704
  ]).i18n({
671
705
  "zh-CN": require_zh_CN_schema(),
package/lib/index.d.ts CHANGED
@@ -14,6 +14,8 @@ export interface Config extends ChatLunaPlugin.Config {
14
14
  apiKeys: [string, string][];
15
15
  maxTokens: number;
16
16
  temperature: number;
17
+ googleSearch: boolean;
18
+ searchThreshold: number;
17
19
  }
18
20
  export const Config: Schema<Config>;
19
21
  export const inject: string[];
@@ -80,14 +82,15 @@ export interface CreateEmbeddingResponse {
80
82
  export type ChatCompletionResponseMessageRoleEnum = 'system' | 'model' | 'user' | 'function';
81
83
  export function langchainMessageToGeminiMessage(messages: BaseMessage[], model?: string): Promise<ChatCompletionResponseMessage[]>;
82
84
  export function partAsType<T extends ChatPart>(part: ChatPart): T;
83
- export function formatToolsToGeminiAITools(tools: StructuredTool[]): ChatCompletionFunction[];
85
+ export function formatToolsToGeminiAITools(tools: StructuredTool[], config: Config, model: string): Record<string, any>;
84
86
  export function formatToolToGeminiAITool(tool: StructuredTool): ChatCompletionFunction;
85
87
  export function messageTypeToGeminiRole(type: MessageType): ChatCompletionResponseMessageRoleEnum;
86
88
  export function convertDeltaToMessageChunk(delta: Record<string, any>, defaultRole?: ChatCompletionResponseMessageRoleEnum): HumanMessageChunk | AIMessageChunk | SystemMessageChunk | ChatMessageChunk;
87
89
  export class GeminiRequester extends ModelRequester implements EmbeddingsRequester {
88
90
  private _config;
89
91
  private _plugin;
90
- constructor(_config: ClientConfig, _plugin: ChatLunaPlugin);
92
+ private _pluginConfig;
93
+ constructor(_config: ClientConfig, _plugin: ChatLunaPlugin, _pluginConfig: Config);
91
94
  completionStream(params: ModelRequestParams): AsyncGenerator<ChatGenerationChunk>;
92
95
  embeddings(params: EmbeddingsRequestParams): Promise<number[] | number[][]>;
93
96
  getModels(): Promise<string[]>;
package/lib/index.mjs CHANGED
@@ -8,14 +8,14 @@ var __commonJS = (cb, mod) => function __require() {
8
8
  // src/locales/zh-CN.schema.yml
9
9
  var require_zh_CN_schema = __commonJS({
10
10
  "src/locales/zh-CN.schema.yml"(exports, module) {
11
- module.exports = { inner: [{}, { $desc: "请求选项", apiKeys: { $inner: ["Gemini 的 API Key", "Gemini API 的请求地址"], $desc: "Gemini 的 API Key 和请求地址列表。" } }, { $desc: "模型配置", maxTokens: "回复的最大 Token 数(16~2097000,必须是 16 的倍数)。注意:仅当您使用的模型最大 Token 为 8000 及以上时,才建议设置超过 2000 token。", temperature: "回复的随机性程度,数值越高,回复越随机(范围:0~2)。" }] };
11
+ module.exports = { $inner: [{}, { $desc: "请求选项", apiKeys: { $inner: ["Gemini 的 API Key", "Gemini API 的请求地址"], $desc: "Gemini 的 API Key 和请求地址列表。" } }, { $desc: "模型配置", maxTokens: "回复的最大 Token 数(16~2097000,必须是 16 的倍数)。注意:仅当您使用的模型最大 Token 为 8000 及以上时,才建议设置超过 2000 token。", temperature: "回复的随机性程度,数值越高,回复越随机(范围:0~2)。", googleSearch: "为模型启用谷歌搜索。", searchThreshold: "搜索的置信度阈值,范围:0~1,设置的数值越低,则越倾向于使用谷歌搜索。" }] };
12
12
  }
13
13
  });
14
14
 
15
15
  // src/locales/en-US.schema.yml
16
16
  var require_en_US_schema = __commonJS({
17
17
  "src/locales/en-US.schema.yml"(exports, module) {
18
- module.exports = { $inner: [{}, { $desc: "API Configuration", apiKeys: { $inner: ["Gemini API Key", "Gemini API Endpoint (optional)"], $desc: "Gemini API access credentials" } }, { $desc: "Model Parameters", maxTokens: "Max output tokens (16-2097000, multiple of 16). >2000 for 8k+ models", temperature: "Sampling temperature (0-2). Higher: more random, Lower: more deterministic" }] };
18
+ module.exports = { $inner: [{}, { $desc: "API Configuration", apiKeys: { $inner: ["Gemini API Key", "Gemini API Endpoint (optional)"], $desc: "Gemini API access credentials" } }, { $desc: "Model Parameters", maxTokens: "Max output tokens (16-2097000, multiple of 16). >2000 for 8k+ models", temperature: "Sampling temperature (0-2). Higher: more random, Lower: more deterministic", googleSearch: "Enable Google search", searchThreshold: "Search confidence [threshold](https://ai.google.dev/gemini-api/docs/grounding?lang=rest#dynamic-retrieval) (0-1). Lower: more likely to use Google search" }] };
19
19
  }
20
20
  });
21
21
 
@@ -126,7 +126,7 @@ async function langchainMessageToGeminiMessage(messages, model) {
126
126
  }
127
127
  ]
128
128
  };
129
- if ((model.includes("vision") || model.includes("gemini-1.5")) && images != null) {
129
+ if ((model.includes("vision") || model.includes("gemini")) && images != null && !model.includes("gemini-1.0")) {
130
130
  for (const image of images) {
131
131
  result2.parts.push({
132
132
  inline_data: {
@@ -231,11 +231,36 @@ function partAsType(part) {
231
231
  return part;
232
232
  }
233
233
  __name(partAsType, "partAsType");
234
- function formatToolsToGeminiAITools(tools) {
235
- if (tools.length < 1) {
234
+ function formatToolsToGeminiAITools(tools, config, model) {
235
+ if (tools.length < 1 && !config.googleSearch) {
236
236
  return void 0;
237
237
  }
238
- return tools.map(formatToolToGeminiAITool);
238
+ const functions = tools.map(formatToolToGeminiAITool);
239
+ const result = [];
240
+ if (functions.length > 0 && !config.googleSearch) {
241
+ result.push({
242
+ functionDeclarations: functions
243
+ });
244
+ } else if (functions.length > 0 && config.googleSearch) {
245
+ logger.warn("Google search is enabled, function call will be disabled.");
246
+ }
247
+ if (config.googleSearch) {
248
+ if (model.includes("gemini-2")) {
249
+ result.push({
250
+ google_search: {}
251
+ });
252
+ } else {
253
+ result.push({
254
+ google_search_retrieval: {
255
+ dynamic_retrieval_config: {
256
+ mode: "MODE_DYNAMIC",
257
+ dynamic_threshold: config.searchThreshold
258
+ }
259
+ }
260
+ });
261
+ }
262
+ }
263
+ return result;
239
264
  }
240
265
  __name(formatToolsToGeminiAITools, "formatToolsToGeminiAITools");
241
266
  function formatToolToGeminiAITool(tool) {
@@ -268,10 +293,11 @@ __name(messageTypeToGeminiRole, "messageTypeToGeminiRole");
268
293
 
269
294
  // src/requester.ts
270
295
  var GeminiRequester = class extends ModelRequester {
271
- constructor(_config, _plugin) {
296
+ constructor(_config, _plugin, _pluginConfig) {
272
297
  super();
273
298
  this._config = _config;
274
299
  this._plugin = _plugin;
300
+ this._pluginConfig = _pluginConfig;
275
301
  }
276
302
  static {
277
303
  __name(this, "GeminiRequester");
@@ -309,9 +335,11 @@ var GeminiRequester = class extends ModelRequester {
309
335
  maxOutputTokens: params.model.includes("vision") ? void 0 : params.maxTokens,
310
336
  topP: params.topP
311
337
  },
312
- tools: !params.model.includes("vision") && params.tools != null ? {
313
- functionDeclarations: formatToolsToGeminiAITools(params.tools)
314
- } : void 0
338
+ tools: params.tools != null || this._pluginConfig.googleSearch ? formatToolsToGeminiAITools(
339
+ params.tools ?? [],
340
+ this._pluginConfig,
341
+ params.model
342
+ ) : void 0
315
343
  },
316
344
  {
317
345
  signal: params.signal
@@ -537,7 +565,11 @@ var GeminiClient = class extends PlatformModelAndEmbeddingsClient {
537
565
  constructor(ctx, _config, clientConfig, plugin) {
538
566
  super(ctx, clientConfig);
539
567
  this._config = _config;
540
- this._requester = new GeminiRequester(clientConfig, plugin);
568
+ this._requester = new GeminiRequester(
569
+ clientConfig,
570
+ plugin,
571
+ this._config
572
+ );
541
573
  }
542
574
  static {
543
575
  __name(this, "GeminiClient");
@@ -645,7 +677,7 @@ function apply(ctx, config) {
645
677
  });
646
678
  }
647
679
  __name(apply, "apply");
648
- var Config = Schema.intersect([
680
+ var Config3 = Schema.intersect([
649
681
  ChatLunaPlugin.Config,
650
682
  Schema.object({
651
683
  apiKeys: Schema.array(
@@ -659,7 +691,9 @@ var Config = Schema.intersect([
659
691
  }),
660
692
  Schema.object({
661
693
  maxTokens: Schema.number().min(16).max(2097e3).step(16).default(8064),
662
- temperature: Schema.percent().min(0).max(2).step(0.1).default(0.8)
694
+ temperature: Schema.percent().min(0).max(2).step(0.1).default(0.8),
695
+ googleSearch: Schema.boolean().default(false),
696
+ searchThreshold: Schema.number().min(0).max(1).step(0.1).default(0.5)
663
697
  })
664
698
  ]).i18n({
665
699
  "zh-CN": require_zh_CN_schema(),
@@ -669,7 +703,7 @@ var Config = Schema.intersect([
669
703
  var inject = ["chatluna"];
670
704
  var name = "chatluna-google-gemini-adapter";
671
705
  export {
672
- Config,
706
+ Config3 as Config,
673
707
  apply,
674
708
  inject,
675
709
  logger,
package/package.json CHANGED
@@ -1,7 +1,7 @@
1
1
  {
2
2
  "name": "koishi-plugin-chatluna-google-gemini-adapter",
3
3
  "description": "google-gemini adapter for chatluna",
4
- "version": "1.0.0-rc.1",
4
+ "version": "1.0.0-rc.2",
5
5
  "main": "lib/index.cjs",
6
6
  "module": "lib/index.mjs",
7
7
  "typings": "lib/index.d.ts",
@@ -73,7 +73,7 @@
73
73
  },
74
74
  "peerDependencies": {
75
75
  "koishi": "^4.18.1",
76
- "koishi-plugin-chatluna": "^1.0.0-rc.1"
76
+ "koishi-plugin-chatluna": "^1.0.0-rc.2"
77
77
  },
78
78
  "koishi": {
79
79
  "description": {