koishi-plugin-chatluna-google-gemini-adapter 1.2.0-alpha.3 → 1.2.0-alpha.5

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/lib/index.cjs CHANGED
@@ -33,14 +33,14 @@ var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: tru
33
33
  // src/locales/zh-CN.schema.yml
34
34
  var require_zh_CN_schema = __commonJS({
35
35
  "src/locales/zh-CN.schema.yml"(exports2, module2) {
36
- module2.exports = { $inner: [{}, { $desc: "请求选项", platform: "适配器的平台名。(不懂请不要修改)", apiKeys: { $inner: ["Gemini 的 API Key", "Gemini API 的请求地址"], $desc: "Gemini 的 API Key 和请求地址列表。" } }, { $desc: "模型配置", maxTokens: "输入的最大上下文 Token(16~2097000,必须是 16 的倍数)。注意:仅当您使用的模型最大 Token 为 8000 及以上时,才建议设置超过 2000 token。", temperature: "回复的随机性程度,数值越高,回复越随机(范围:0~2)。", googleSearch: "为模型启用谷歌搜索。", groundingContentDisplay: "是否显示谷歌搜索结果。", imageGeneration: "为模型启用图像生成。目前仅支持 `gemini-2.0-flash-exp` 模型。", searchThreshold: "搜索的置信度阈值,范围:0~1,设置的数值越低,则越倾向于使用谷歌搜索。" }] };
36
+ module2.exports = { $inner: [{}, { $desc: "请求选项", platform: "适配器的平台名。(不懂请不要修改)", apiKeys: { $inner: ["Gemini 的 API Key", "Gemini API 的请求地址"], $desc: "Gemini 的 API Key 和请求地址列表。" } }, { $desc: "模型配置", maxTokens: "输入的最大上下文 Token(16~2097000,必须是 16 的倍数)。注意:仅当您使用的模型最大 Token 为 8000 及以上时,才建议设置超过 2000 token。", temperature: "回复的随机性程度,数值越高,回复越随机(范围:0~2)。", googleSearch: "为模型启用谷歌搜索。", thinkingBudget: "思考预算,范围:0~24576,设置的数值越大,思考时花费的 Token 越多。目前仅支持 gemini 2.5 系列模型。", groundingContentDisplay: "是否显示谷歌搜索结果。", imageGeneration: "为模型启用图像生成。目前仅支持 `gemini-2.0-flash-exp` 模型。", searchThreshold: "搜索的置信度阈值,范围:0~1,设置的数值越低,则越倾向于使用谷歌搜索。" }] };
37
37
  }
38
38
  });
39
39
 
40
40
  // src/locales/en-US.schema.yml
41
41
  var require_en_US_schema = __commonJS({
42
42
  "src/locales/en-US.schema.yml"(exports2, module2) {
43
- module2.exports = { $inner: [{}, { $desc: "API Configuration", platform: "Adapter platform name. (Do not modify if you do not understand)", apiKeys: { $inner: ["Gemini API Key", "Gemini API Endpoint (optional)"], $desc: "Gemini API access credentials" } }, { $desc: "Model Parameters", maxTokens: "Max output tokens (16-2097000, multiple of 16). >2000 for 8k+ models", temperature: "Sampling temperature (0-2). Higher: more random, Lower: more deterministic", googleSearch: "Enable Google search", groundingContentDisplay: "Enable display of search results", imageGeneration: "Enable image generation (only for `gemini-2.0-flash-exp` model)", searchThreshold: "Search confidence [threshold](https://ai.google.dev/gemini-api/docs/grounding?lang=rest#dynamic-retrieval) (0-1). Lower: more likely to use Google search" }] };
43
+ module2.exports = { $inner: [{}, { $desc: "API Configuration", platform: "Adapter platform name. (Do not modify if you do not understand)", apiKeys: { $inner: ["Gemini API Key", "Gemini API Endpoint (optional)"], $desc: "Gemini API access credentials" } }, { $desc: "Model Parameters", maxTokens: "Max output tokens (16-2097000, multiple of 16). >2000 for 8k+ models", temperature: "Sampling temperature (0-2). Higher: more random, Lower: more deterministic", googleSearch: "Enable Google search", thinkingBudget: "Thinking budget (0-24576). Higher: more tokens spent on thinking. Currently only supports `gemini-2.5` series models.", groundingContentDisplay: "Enable display of search results", imageGeneration: "Enable image generation (only for `gemini-2.0-flash-exp` model)", searchThreshold: "Search confidence [threshold](https://ai.google.dev/gemini-api/docs/grounding?lang=rest#dynamic-retrieval) (0-1). Lower: more likely to use Google search" }] };
44
44
  }
45
45
  });
46
46
 
@@ -142,7 +142,7 @@ async function langchainMessageToGeminiMessage(messages, model) {
142
142
  }
143
143
  ]
144
144
  };
145
- if ((model.includes("vision") || model.includes("gemini")) && images != null && !model.includes("gemini-1.0")) {
145
+ if ((model.includes("vision") || model.includes("gemini") || model.includes("gemma")) && images != null && !model.includes("gemini-1.0")) {
146
146
  for (const image of images) {
147
147
  const mineType = image.split(";")?.[0]?.split(":")?.[1];
148
148
  const data = image.replace(/^data:image\/\w+;base64,/, "");
@@ -230,7 +230,7 @@ function formatToolsToGeminiAITools(tools, config, model) {
230
230
  "gemini-1.5-flash",
231
231
  "gemini-2.0-flash-exp"
232
232
  ];
233
- const imageInputModels = [
233
+ const imageGenerationModels = [
234
234
  "gemini-2.0-flash-exp",
235
235
  "gemini-2.0-flash-exp-image-generation"
236
236
  ];
@@ -243,7 +243,7 @@ function formatToolsToGeminiAITools(tools, config, model) {
243
243
  logger.warn("Google search is enabled, tool calling will be disable.");
244
244
  } else if ((unsupportedModels.some(
245
245
  (unsupportedModel) => model.includes(unsupportedModel)
246
- ) || imageInputModels.some(
246
+ ) || imageGenerationModels.some(
247
247
  (unsupportedModels2) => model.includes(unsupportedModels2)
248
248
  ) && config.imageGeneration) && googleSearch) {
249
249
  logger.warn(
@@ -286,29 +286,27 @@ function formatToolToGeminiAITool(tool) {
286
286
  }
287
287
  __name(formatToolToGeminiAITool, "formatToolToGeminiAITool");
288
288
  function removeAdditionalProperties(schema) {
289
- const updatedSchema = { ...schema };
290
- if (Object.hasOwn(updatedSchema, "additionalProperties")) {
291
- delete updatedSchema["additionalProperties"];
292
- }
293
- if (Object.hasOwn(updatedSchema, "$schema")) {
294
- delete updatedSchema["$schema"];
295
- }
296
- if (updatedSchema["properties"]) {
297
- const keys = Object.keys(updatedSchema["properties"]);
298
- removeProperties(updatedSchema["properties"], keys, 0);
289
+ if (!schema || typeof schema !== "object") return schema;
290
+ const stack = [[schema, null]];
291
+ while (stack.length > 0) {
292
+ const [current] = stack.pop();
293
+ if (typeof current !== "object" || current === null) continue;
294
+ if (Object.hasOwn(current, "additionalProperties")) {
295
+ delete current["additionalProperties"];
296
+ }
297
+ if (Object.hasOwn(current, "$schema")) {
298
+ delete current["$schema"];
299
+ }
300
+ for (const key of Object.keys(current)) {
301
+ const value = current[key];
302
+ if (value && typeof value === "object") {
303
+ stack.push([value, key]);
304
+ }
305
+ }
299
306
  }
300
- return updatedSchema;
307
+ return schema;
301
308
  }
302
309
  __name(removeAdditionalProperties, "removeAdditionalProperties");
303
- function removeProperties(properties, keys, index) {
304
- if (index >= keys.length) {
305
- return;
306
- }
307
- const key = keys[index];
308
- properties[key] = removeAdditionalProperties(properties[key]);
309
- removeProperties(properties, keys, index + 1);
310
- }
311
- __name(removeProperties, "removeProperties");
312
310
  function messageTypeToGeminiRole(type) {
313
311
  switch (type) {
314
312
  case "system":
@@ -339,29 +337,35 @@ var GeminiRequester = class extends import_api.ModelRequester {
339
337
  }
340
338
  async *completionStream(params) {
341
339
  try {
340
+ let model = params.model;
341
+ let enabledThinking = null;
342
+ if (model.includes("thinking")) {
343
+ enabledThinking = !model.includes("-no-thinking");
344
+ model = model.replace("-no-thinking", "").replace("-thinking", "");
345
+ }
342
346
  const response = await this._post(
343
- `models/${params.model}:streamGenerateContent?alt=sse`,
347
+ `models/${model}:streamGenerateContent?alt=sse`,
344
348
  {
345
349
  contents: await langchainMessageToGeminiMessage(
346
350
  params.input,
347
- params.model
351
+ model
348
352
  ),
349
353
  safetySettings: [
350
354
  {
351
355
  category: "HARM_CATEGORY_HARASSMENT",
352
- threshold: params.model.includes("gemini-2.0") ? "OFF" : "BLOCK_NONE"
356
+ threshold: params.model.includes("gemini-2") ? "OFF" : "BLOCK_NONE"
353
357
  },
354
358
  {
355
359
  category: "HARM_CATEGORY_HATE_SPEECH",
356
- threshold: params.model.includes("gemini-2.0") ? "OFF" : "BLOCK_NONE"
360
+ threshold: params.model.includes("gemini-2") ? "OFF" : "BLOCK_NONE"
357
361
  },
358
362
  {
359
363
  category: "HARM_CATEGORY_SEXUALLY_EXPLICIT",
360
- threshold: params.model.includes("gemini-2.0") ? "OFF" : "BLOCK_NONE"
364
+ threshold: params.model.includes("gemini-2") ? "OFF" : "BLOCK_NONE"
361
365
  },
362
366
  {
363
367
  category: "HARM_CATEGORY_DANGEROUS_CONTENT",
364
- threshold: params.model.includes("gemini-2.0") ? "OFF" : "BLOCK_NONE"
368
+ threshold: params.model.includes("gemini-2") ? "OFF" : "BLOCK_NONE"
365
369
  },
366
370
  {
367
371
  category: "HARM_CATEGORY_CIVIC_INTEGRITY",
@@ -376,8 +380,11 @@ var GeminiRequester = class extends import_api.ModelRequester {
376
380
  responseModalities: params.model.includes(
377
381
  // TODO: Wait for google release to all models
378
382
  "gemini-2.0-flash-exp"
379
- ) && this._pluginConfig.imageGeneration ? ["TEXT", "IMAGE"] : void 0
380
- // thinkingConfig: { includeThoughts: true }
383
+ ) && this._pluginConfig.imageGeneration ? ["TEXT", "IMAGE"] : void 0,
384
+ thinkingConfig: enabledThinking != null ? {
385
+ thinkingBudget: enabledThinking ? this._pluginConfig.thinkingBudget ?? 4096 : 0
386
+ // includeThoughts: true
387
+ } : void 0
381
388
  },
382
389
  tools: params.tools != null || this._pluginConfig.googleSearch ? formatToolsToGeminiAITools(
383
390
  params.tools ?? [],
@@ -583,7 +590,7 @@ ${groundingContent}`
583
590
  );
584
591
  }
585
592
  return data.models.map((model) => model.name).filter(
586
- (model) => model.includes("gemini") || model.includes("embedding")
593
+ (model) => model.includes("gemini") || model.includes("gemma") || model.includes("embedding")
587
594
  );
588
595
  } catch (e) {
589
596
  const error = new Error(
@@ -665,15 +672,17 @@ var GeminiClient = class extends import_client.PlatformModelAndEmbeddingsClient
665
672
  }
666
673
  async refreshModels() {
667
674
  try {
668
- const rawModels = await this._requester.getModels();
675
+ let rawModels = await this._requester.getModels();
669
676
  if (!rawModels.length) {
670
677
  throw new import_error2.ChatLunaError(
671
678
  import_error2.ChatLunaErrorCode.MODEL_INIT_ERROR,
672
679
  new Error("No model found")
673
680
  );
674
681
  }
675
- return rawModels.map((model) => model.replace("models/", "")).map((model) => {
676
- return {
682
+ rawModels = rawModels.map((model) => model.replace("models/", ""));
683
+ const models = [];
684
+ for (const model of rawModels) {
685
+ const info = {
677
686
  name: model,
678
687
  maxTokens: ((model2) => {
679
688
  if (model2.includes("gemini-1.5-pro")) {
@@ -691,7 +700,21 @@ var GeminiClient = class extends import_client.PlatformModelAndEmbeddingsClient
691
700
  functionCall: !model.includes("vision"),
692
701
  supportMode: ["all"]
693
702
  };
694
- });
703
+ if (model.includes("gemini-2.5")) {
704
+ if (!model.includes("-thinking")) {
705
+ models.push(
706
+ { ...info, name: model + "-no-thinking" },
707
+ { ...info, name: model + "-thinking" },
708
+ info
709
+ );
710
+ } else {
711
+ models.push(info);
712
+ }
713
+ } else {
714
+ models.push(info);
715
+ }
716
+ }
717
+ return models;
695
718
  } catch (e) {
696
719
  throw new import_error2.ChatLunaError(import_error2.ChatLunaErrorCode.MODEL_INIT_ERROR, e);
697
720
  }
@@ -778,6 +801,7 @@ var Config3 = import_koishi.Schema.intersect([
778
801
  maxTokens: import_koishi.Schema.number().min(16).max(2097e3).step(16).default(8064),
779
802
  temperature: import_koishi.Schema.percent().min(0).max(2).step(0.1).default(0.8),
780
803
  googleSearch: import_koishi.Schema.boolean().default(false),
804
+ thinkingBudget: import_koishi.Schema.number().min(0).max(24576).step(16).default(4096),
781
805
  imageGeneration: import_koishi.Schema.boolean().default(false),
782
806
  groundingContentDisplay: import_koishi.Schema.boolean().default(false),
783
807
  searchThreshold: import_koishi.Schema.number().min(0).max(1).step(0.1).default(0.5)
package/lib/index.d.ts CHANGED
@@ -12,6 +12,7 @@ export interface Config extends ChatLunaPlugin.Config {
12
12
  searchThreshold: number;
13
13
  groundingContentDisplay: boolean;
14
14
  imageGeneration: boolean;
15
+ thinkingBudget: number;
15
16
  }
16
17
  export declare const Config: Schema<Config>;
17
18
  export declare const inject: string[];
package/lib/index.mjs CHANGED
@@ -8,14 +8,14 @@ var __commonJS = (cb, mod) => function __require() {
8
8
  // src/locales/zh-CN.schema.yml
9
9
  var require_zh_CN_schema = __commonJS({
10
10
  "src/locales/zh-CN.schema.yml"(exports, module) {
11
- module.exports = { $inner: [{}, { $desc: "请求选项", platform: "适配器的平台名。(不懂请不要修改)", apiKeys: { $inner: ["Gemini 的 API Key", "Gemini API 的请求地址"], $desc: "Gemini 的 API Key 和请求地址列表。" } }, { $desc: "模型配置", maxTokens: "输入的最大上下文 Token(16~2097000,必须是 16 的倍数)。注意:仅当您使用的模型最大 Token 为 8000 及以上时,才建议设置超过 2000 token。", temperature: "回复的随机性程度,数值越高,回复越随机(范围:0~2)。", googleSearch: "为模型启用谷歌搜索。", groundingContentDisplay: "是否显示谷歌搜索结果。", imageGeneration: "为模型启用图像生成。目前仅支持 `gemini-2.0-flash-exp` 模型。", searchThreshold: "搜索的置信度阈值,范围:0~1,设置的数值越低,则越倾向于使用谷歌搜索。" }] };
11
+ module.exports = { $inner: [{}, { $desc: "请求选项", platform: "适配器的平台名。(不懂请不要修改)", apiKeys: { $inner: ["Gemini 的 API Key", "Gemini API 的请求地址"], $desc: "Gemini 的 API Key 和请求地址列表。" } }, { $desc: "模型配置", maxTokens: "输入的最大上下文 Token(16~2097000,必须是 16 的倍数)。注意:仅当您使用的模型最大 Token 为 8000 及以上时,才建议设置超过 2000 token。", temperature: "回复的随机性程度,数值越高,回复越随机(范围:0~2)。", googleSearch: "为模型启用谷歌搜索。", thinkingBudget: "思考预算,范围:0~24576,设置的数值越大,思考时花费的 Token 越多。目前仅支持 gemini 2.5 系列模型。", groundingContentDisplay: "是否显示谷歌搜索结果。", imageGeneration: "为模型启用图像生成。目前仅支持 `gemini-2.0-flash-exp` 模型。", searchThreshold: "搜索的置信度阈值,范围:0~1,设置的数值越低,则越倾向于使用谷歌搜索。" }] };
12
12
  }
13
13
  });
14
14
 
15
15
  // src/locales/en-US.schema.yml
16
16
  var require_en_US_schema = __commonJS({
17
17
  "src/locales/en-US.schema.yml"(exports, module) {
18
- module.exports = { $inner: [{}, { $desc: "API Configuration", platform: "Adapter platform name. (Do not modify if you do not understand)", apiKeys: { $inner: ["Gemini API Key", "Gemini API Endpoint (optional)"], $desc: "Gemini API access credentials" } }, { $desc: "Model Parameters", maxTokens: "Max output tokens (16-2097000, multiple of 16). >2000 for 8k+ models", temperature: "Sampling temperature (0-2). Higher: more random, Lower: more deterministic", googleSearch: "Enable Google search", groundingContentDisplay: "Enable display of search results", imageGeneration: "Enable image generation (only for `gemini-2.0-flash-exp` model)", searchThreshold: "Search confidence [threshold](https://ai.google.dev/gemini-api/docs/grounding?lang=rest#dynamic-retrieval) (0-1). Lower: more likely to use Google search" }] };
18
+ module.exports = { $inner: [{}, { $desc: "API Configuration", platform: "Adapter platform name. (Do not modify if you do not understand)", apiKeys: { $inner: ["Gemini API Key", "Gemini API Endpoint (optional)"], $desc: "Gemini API access credentials" } }, { $desc: "Model Parameters", maxTokens: "Max output tokens (16-2097000, multiple of 16). >2000 for 8k+ models", temperature: "Sampling temperature (0-2). Higher: more random, Lower: more deterministic", googleSearch: "Enable Google search", thinkingBudget: "Thinking budget (0-24576). Higher: more tokens spent on thinking. Currently only supports `gemini-2.5` series models.", groundingContentDisplay: "Enable display of search results", imageGeneration: "Enable image generation (only for `gemini-2.0-flash-exp` model)", searchThreshold: "Search confidence [threshold](https://ai.google.dev/gemini-api/docs/grounding?lang=rest#dynamic-retrieval) (0-1). Lower: more likely to use Google search" }] };
19
19
  }
20
20
  });
21
21
 
@@ -125,7 +125,7 @@ async function langchainMessageToGeminiMessage(messages, model) {
125
125
  }
126
126
  ]
127
127
  };
128
- if ((model.includes("vision") || model.includes("gemini")) && images != null && !model.includes("gemini-1.0")) {
128
+ if ((model.includes("vision") || model.includes("gemini") || model.includes("gemma")) && images != null && !model.includes("gemini-1.0")) {
129
129
  for (const image of images) {
130
130
  const mineType = image.split(";")?.[0]?.split(":")?.[1];
131
131
  const data = image.replace(/^data:image\/\w+;base64,/, "");
@@ -213,7 +213,7 @@ function formatToolsToGeminiAITools(tools, config, model) {
213
213
  "gemini-1.5-flash",
214
214
  "gemini-2.0-flash-exp"
215
215
  ];
216
- const imageInputModels = [
216
+ const imageGenerationModels = [
217
217
  "gemini-2.0-flash-exp",
218
218
  "gemini-2.0-flash-exp-image-generation"
219
219
  ];
@@ -226,7 +226,7 @@ function formatToolsToGeminiAITools(tools, config, model) {
226
226
  logger.warn("Google search is enabled, tool calling will be disable.");
227
227
  } else if ((unsupportedModels.some(
228
228
  (unsupportedModel) => model.includes(unsupportedModel)
229
- ) || imageInputModels.some(
229
+ ) || imageGenerationModels.some(
230
230
  (unsupportedModels2) => model.includes(unsupportedModels2)
231
231
  ) && config.imageGeneration) && googleSearch) {
232
232
  logger.warn(
@@ -269,29 +269,27 @@ function formatToolToGeminiAITool(tool) {
269
269
  }
270
270
  __name(formatToolToGeminiAITool, "formatToolToGeminiAITool");
271
271
  function removeAdditionalProperties(schema) {
272
- const updatedSchema = { ...schema };
273
- if (Object.hasOwn(updatedSchema, "additionalProperties")) {
274
- delete updatedSchema["additionalProperties"];
275
- }
276
- if (Object.hasOwn(updatedSchema, "$schema")) {
277
- delete updatedSchema["$schema"];
278
- }
279
- if (updatedSchema["properties"]) {
280
- const keys = Object.keys(updatedSchema["properties"]);
281
- removeProperties(updatedSchema["properties"], keys, 0);
272
+ if (!schema || typeof schema !== "object") return schema;
273
+ const stack = [[schema, null]];
274
+ while (stack.length > 0) {
275
+ const [current] = stack.pop();
276
+ if (typeof current !== "object" || current === null) continue;
277
+ if (Object.hasOwn(current, "additionalProperties")) {
278
+ delete current["additionalProperties"];
279
+ }
280
+ if (Object.hasOwn(current, "$schema")) {
281
+ delete current["$schema"];
282
+ }
283
+ for (const key of Object.keys(current)) {
284
+ const value = current[key];
285
+ if (value && typeof value === "object") {
286
+ stack.push([value, key]);
287
+ }
288
+ }
282
289
  }
283
- return updatedSchema;
290
+ return schema;
284
291
  }
285
292
  __name(removeAdditionalProperties, "removeAdditionalProperties");
286
- function removeProperties(properties, keys, index) {
287
- if (index >= keys.length) {
288
- return;
289
- }
290
- const key = keys[index];
291
- properties[key] = removeAdditionalProperties(properties[key]);
292
- removeProperties(properties, keys, index + 1);
293
- }
294
- __name(removeProperties, "removeProperties");
295
293
  function messageTypeToGeminiRole(type) {
296
294
  switch (type) {
297
295
  case "system":
@@ -322,29 +320,35 @@ var GeminiRequester = class extends ModelRequester {
322
320
  }
323
321
  async *completionStream(params) {
324
322
  try {
323
+ let model = params.model;
324
+ let enabledThinking = null;
325
+ if (model.includes("thinking")) {
326
+ enabledThinking = !model.includes("-no-thinking");
327
+ model = model.replace("-no-thinking", "").replace("-thinking", "");
328
+ }
325
329
  const response = await this._post(
326
- `models/${params.model}:streamGenerateContent?alt=sse`,
330
+ `models/${model}:streamGenerateContent?alt=sse`,
327
331
  {
328
332
  contents: await langchainMessageToGeminiMessage(
329
333
  params.input,
330
- params.model
334
+ model
331
335
  ),
332
336
  safetySettings: [
333
337
  {
334
338
  category: "HARM_CATEGORY_HARASSMENT",
335
- threshold: params.model.includes("gemini-2.0") ? "OFF" : "BLOCK_NONE"
339
+ threshold: params.model.includes("gemini-2") ? "OFF" : "BLOCK_NONE"
336
340
  },
337
341
  {
338
342
  category: "HARM_CATEGORY_HATE_SPEECH",
339
- threshold: params.model.includes("gemini-2.0") ? "OFF" : "BLOCK_NONE"
343
+ threshold: params.model.includes("gemini-2") ? "OFF" : "BLOCK_NONE"
340
344
  },
341
345
  {
342
346
  category: "HARM_CATEGORY_SEXUALLY_EXPLICIT",
343
- threshold: params.model.includes("gemini-2.0") ? "OFF" : "BLOCK_NONE"
347
+ threshold: params.model.includes("gemini-2") ? "OFF" : "BLOCK_NONE"
344
348
  },
345
349
  {
346
350
  category: "HARM_CATEGORY_DANGEROUS_CONTENT",
347
- threshold: params.model.includes("gemini-2.0") ? "OFF" : "BLOCK_NONE"
351
+ threshold: params.model.includes("gemini-2") ? "OFF" : "BLOCK_NONE"
348
352
  },
349
353
  {
350
354
  category: "HARM_CATEGORY_CIVIC_INTEGRITY",
@@ -359,8 +363,11 @@ var GeminiRequester = class extends ModelRequester {
359
363
  responseModalities: params.model.includes(
360
364
  // TODO: Wait for google release to all models
361
365
  "gemini-2.0-flash-exp"
362
- ) && this._pluginConfig.imageGeneration ? ["TEXT", "IMAGE"] : void 0
363
- // thinkingConfig: { includeThoughts: true }
366
+ ) && this._pluginConfig.imageGeneration ? ["TEXT", "IMAGE"] : void 0,
367
+ thinkingConfig: enabledThinking != null ? {
368
+ thinkingBudget: enabledThinking ? this._pluginConfig.thinkingBudget ?? 4096 : 0
369
+ // includeThoughts: true
370
+ } : void 0
364
371
  },
365
372
  tools: params.tools != null || this._pluginConfig.googleSearch ? formatToolsToGeminiAITools(
366
373
  params.tools ?? [],
@@ -566,7 +573,7 @@ ${groundingContent}`
566
573
  );
567
574
  }
568
575
  return data.models.map((model) => model.name).filter(
569
- (model) => model.includes("gemini") || model.includes("embedding")
576
+ (model) => model.includes("gemini") || model.includes("gemma") || model.includes("embedding")
570
577
  );
571
578
  } catch (e) {
572
579
  const error = new Error(
@@ -648,15 +655,17 @@ var GeminiClient = class extends PlatformModelAndEmbeddingsClient {
648
655
  }
649
656
  async refreshModels() {
650
657
  try {
651
- const rawModels = await this._requester.getModels();
658
+ let rawModels = await this._requester.getModels();
652
659
  if (!rawModels.length) {
653
660
  throw new ChatLunaError2(
654
661
  ChatLunaErrorCode2.MODEL_INIT_ERROR,
655
662
  new Error("No model found")
656
663
  );
657
664
  }
658
- return rawModels.map((model) => model.replace("models/", "")).map((model) => {
659
- return {
665
+ rawModels = rawModels.map((model) => model.replace("models/", ""));
666
+ const models = [];
667
+ for (const model of rawModels) {
668
+ const info = {
660
669
  name: model,
661
670
  maxTokens: ((model2) => {
662
671
  if (model2.includes("gemini-1.5-pro")) {
@@ -674,7 +683,21 @@ var GeminiClient = class extends PlatformModelAndEmbeddingsClient {
674
683
  functionCall: !model.includes("vision"),
675
684
  supportMode: ["all"]
676
685
  };
677
- });
686
+ if (model.includes("gemini-2.5")) {
687
+ if (!model.includes("-thinking")) {
688
+ models.push(
689
+ { ...info, name: model + "-no-thinking" },
690
+ { ...info, name: model + "-thinking" },
691
+ info
692
+ );
693
+ } else {
694
+ models.push(info);
695
+ }
696
+ } else {
697
+ models.push(info);
698
+ }
699
+ }
700
+ return models;
678
701
  } catch (e) {
679
702
  throw new ChatLunaError2(ChatLunaErrorCode2.MODEL_INIT_ERROR, e);
680
703
  }
@@ -761,6 +784,7 @@ var Config3 = Schema.intersect([
761
784
  maxTokens: Schema.number().min(16).max(2097e3).step(16).default(8064),
762
785
  temperature: Schema.percent().min(0).max(2).step(0.1).default(0.8),
763
786
  googleSearch: Schema.boolean().default(false),
787
+ thinkingBudget: Schema.number().min(0).max(24576).step(16).default(4096),
764
788
  imageGeneration: Schema.boolean().default(false),
765
789
  groundingContentDisplay: Schema.boolean().default(false),
766
790
  searchThreshold: Schema.number().min(0).max(1).step(0.1).default(0.5)
package/package.json CHANGED
@@ -1,7 +1,7 @@
1
1
  {
2
2
  "name": "koishi-plugin-chatluna-google-gemini-adapter",
3
3
  "description": "google-gemini adapter for chatluna",
4
- "version": "1.2.0-alpha.3",
4
+ "version": "1.2.0-alpha.5",
5
5
  "main": "lib/index.cjs",
6
6
  "module": "lib/index.mjs",
7
7
  "typings": "lib/index.d.ts",