koishi-plugin-chatluna-google-gemini-adapter 1.1.0-alpha.0 → 1.1.0-alpha.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/lib/index.cjs CHANGED
@@ -23,14 +23,14 @@ var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: tru
23
23
  // src/locales/zh-CN.schema.yml
24
24
  var require_zh_CN_schema = __commonJS({
25
25
  "src/locales/zh-CN.schema.yml"(exports2, module2) {
26
- module2.exports = { $inner: [{}, { $desc: "请求选项", apiKeys: { $inner: ["Gemini 的 API Key", "Gemini API 的请求地址"], $desc: "Gemini 的 API Key 和请求地址列表。" } }, { $desc: "模型配置", maxTokens: "输入的最大上下文 Token(16~2097000,必须是 16 的倍数)。注意:仅当您使用的模型最大 Token 为 8000 及以上时,才建议设置超过 2000 token。", temperature: "回复的随机性程度,数值越高,回复越随机(范围:0~2)。", googleSearch: "为模型启用谷歌搜索。", searchThreshold: "搜索的置信度阈值,范围:0~1,设置的数值越低,则越倾向于使用谷歌搜索。" }] };
26
+ module2.exports = { $inner: [{}, { $desc: "请求选项", apiKeys: { $inner: ["Gemini 的 API Key", "Gemini API 的请求地址"], $desc: "Gemini 的 API Key 和请求地址列表。" } }, { $desc: "模型配置", maxTokens: "输入的最大上下文 Token(16~2097000,必须是 16 的倍数)。注意:仅当您使用的模型最大 Token 为 8000 及以上时,才建议设置超过 2000 token。", temperature: "回复的随机性程度,数值越高,回复越随机(范围:0~2)。", googleSearch: "为模型启用谷歌搜索。", groundingContentDisplay: "是否显示谷歌搜索结果。", searchThreshold: "搜索的置信度阈值,范围:0~1,设置的数值越低,则越倾向于使用谷歌搜索。" }] };
27
27
  }
28
28
  });
29
29
 
30
30
  // src/locales/en-US.schema.yml
31
31
  var require_en_US_schema = __commonJS({
32
32
  "src/locales/en-US.schema.yml"(exports2, module2) {
33
- module2.exports = { $inner: [{}, { $desc: "API Configuration", apiKeys: { $inner: ["Gemini API Key", "Gemini API Endpoint (optional)"], $desc: "Gemini API access credentials" } }, { $desc: "Model Parameters", maxTokens: "Max output tokens (16-2097000, multiple of 16). >2000 for 8k+ models", temperature: "Sampling temperature (0-2). Higher: more random, Lower: more deterministic", googleSearch: "Enable Google search", searchThreshold: "Search confidence [threshold](https://ai.google.dev/gemini-api/docs/grounding?lang=rest#dynamic-retrieval) (0-1). Lower: more likely to use Google search" }] };
33
+ module2.exports = { $inner: [{}, { $desc: "API Configuration", apiKeys: { $inner: ["Gemini API Key", "Gemini API Endpoint (optional)"], $desc: "Gemini API access credentials" } }, { $desc: "Model Parameters", maxTokens: "Max output tokens (16-2097000, multiple of 16). >2000 for 8k+ models", temperature: "Sampling temperature (0-2). Higher: more random, Lower: more deterministic", googleSearch: "Enable Google search", groundingContentDisplay: "Enable display of search results", searchThreshold: "Search confidence [threshold](https://ai.google.dev/gemini-api/docs/grounding?lang=rest#dynamic-retrieval) (0-1). Lower: more likely to use Google search" }] };
34
34
  }
35
35
  });
36
36
 
@@ -248,7 +248,7 @@ function formatToolsToGeminiAITools(tools, config, model) {
248
248
  functionDeclarations: functions
249
249
  });
250
250
  } else if (functions.length > 0 && config.googleSearch) {
251
- logger.warn("Google search is enabled, function call will be disabled.");
251
+ logger.warn("Google search is enabled, tool calling will be disable.");
252
252
  }
253
253
  if (config.googleSearch) {
254
254
  if (model.includes("gemini-2")) {
@@ -270,9 +270,10 @@ function formatToolsToGeminiAITools(tools, config, model) {
270
270
  }
271
271
  __name(formatToolsToGeminiAITools, "formatToolsToGeminiAITools");
272
272
  function formatToolToGeminiAITool(tool) {
273
- const parameters = (0, import_zod_to_json_schema.zodToJsonSchema)(tool.schema);
274
- delete parameters["$schema"];
275
- delete parameters["additionalProperties"];
273
+ const parameters = removeAdditionalProperties(
274
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
275
+ (0, import_zod_to_json_schema.zodToJsonSchema)(tool.schema)
276
+ );
276
277
  return {
277
278
  name: tool.name,
278
279
  description: tool.description,
@@ -281,6 +282,30 @@ function formatToolToGeminiAITool(tool) {
281
282
  };
282
283
  }
283
284
  __name(formatToolToGeminiAITool, "formatToolToGeminiAITool");
285
+ function removeAdditionalProperties(schema) {
286
+ const updatedSchema = { ...schema };
287
+ if (Object.hasOwn(updatedSchema, "additionalProperties")) {
288
+ delete updatedSchema["additionalProperties"];
289
+ }
290
+ if (Object.hasOwn(updatedSchema, "$schema")) {
291
+ delete updatedSchema["$schema"];
292
+ }
293
+ if (updatedSchema["properties"]) {
294
+ const keys = Object.keys(updatedSchema["properties"]);
295
+ removeProperties(updatedSchema["properties"], keys, 0);
296
+ }
297
+ return updatedSchema;
298
+ }
299
+ __name(removeAdditionalProperties, "removeAdditionalProperties");
300
+ function removeProperties(properties, keys, index) {
301
+ if (index >= keys.length) {
302
+ return;
303
+ }
304
+ const key = keys[index];
305
+ properties[key] = removeAdditionalProperties(properties[key]);
306
+ removeProperties(properties, keys, index + 1);
307
+ }
308
+ __name(removeProperties, "removeProperties");
284
309
  function messageTypeToGeminiRole(type) {
285
310
  switch (type) {
286
311
  case "system":
@@ -308,6 +333,136 @@ var GeminiRequester = class extends import_api.ModelRequester {
308
333
  static {
309
334
  __name(this, "GeminiRequester");
310
335
  }
336
+ async completion(params) {
337
+ try {
338
+ const response = await this._post(
339
+ `models/${params.model}`,
340
+ {
341
+ contents: await langchainMessageToGeminiMessage(
342
+ params.input,
343
+ params.model
344
+ ),
345
+ safetySettings: [
346
+ {
347
+ category: "HARM_CATEGORY_HARASSMENT",
348
+ threshold: "BLOCK_NONE"
349
+ },
350
+ {
351
+ category: "HARM_CATEGORY_HATE_SPEECH",
352
+ threshold: "BLOCK_NONE"
353
+ },
354
+ {
355
+ category: "HARM_CATEGORY_SEXUALLY_EXPLICIT",
356
+ threshold: "BLOCK_NONE"
357
+ },
358
+ {
359
+ category: "HARM_CATEGORY_DANGEROUS_CONTENT",
360
+ threshold: "BLOCK_NONE"
361
+ },
362
+ {
363
+ category: "HARM_CATEGORY_CIVIC_INTEGRITY",
364
+ threshold: "BLOCK_NONE"
365
+ }
366
+ ],
367
+ generationConfig: {
368
+ stopSequences: params.stop,
369
+ temperature: params.temperature,
370
+ maxOutputTokens: params.model.includes("vision") ? void 0 : params.maxTokens,
371
+ topP: params.topP
372
+ // thinkingConfig: { includeThoughts: true }
373
+ },
374
+ tools: params.tools != null || this._pluginConfig.googleSearch ? formatToolsToGeminiAITools(
375
+ params.tools ?? [],
376
+ this._pluginConfig,
377
+ params.model
378
+ ) : void 0
379
+ },
380
+ {
381
+ signal: params.signal
382
+ }
383
+ );
384
+ const parsedResponse = await response.json();
385
+ let groundingContent = "";
386
+ let currentGroudingIndex = 0;
387
+ let reasoningContent = "";
388
+ let content = "";
389
+ const functionCall = {
390
+ name: "",
391
+ args: "",
392
+ arguments: ""
393
+ };
394
+ const part = parsedResponse.candidates[0].content;
395
+ const messagePart = partAsType(part.parts[0]);
396
+ const chatFunctionCallingPart = partAsType(
397
+ part.parts[0]
398
+ );
399
+ if (messagePart.text) {
400
+ if (messagePart.thought) {
401
+ reasoningContent = messagePart.text;
402
+ }
403
+ content = messagePart.text;
404
+ }
405
+ const deltaFunctionCall = chatFunctionCallingPart.functionCall;
406
+ if (deltaFunctionCall) {
407
+ let args = deltaFunctionCall.args?.input ?? deltaFunctionCall.args;
408
+ try {
409
+ let parsedArgs = JSON.parse(args);
410
+ if (typeof parsedArgs !== "string") {
411
+ args = parsedArgs;
412
+ }
413
+ parsedArgs = JSON.parse(args);
414
+ if (typeof parsedArgs !== "string") {
415
+ args = parsedArgs;
416
+ }
417
+ } catch (e) {
418
+ }
419
+ functionCall.args = JSON.stringify(args);
420
+ functionCall.name = deltaFunctionCall.name;
421
+ functionCall.arguments = deltaFunctionCall.args;
422
+ }
423
+ for (const source of parsedResponse.candidates[0].groundingMetadata?.groundingChunks ?? []) {
424
+ groundingContent += `[^${currentGroudingIndex++}]: [${source.web.title}](${source.web.uri})
425
+ `;
426
+ }
427
+ try {
428
+ const messageChunk = new import_messages2.AIMessageChunk(content);
429
+ messageChunk.additional_kwargs = {
430
+ function_call: functionCall.name.length > 0 ? {
431
+ name: functionCall.name,
432
+ arguments: functionCall.args,
433
+ args: functionCall.arguments
434
+ } : void 0
435
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
436
+ };
437
+ messageChunk.content = content;
438
+ if (groundingContent.length > 0) {
439
+ logger.debug(`grounding content: ${groundingContent}`);
440
+ if (this._pluginConfig.groundingContentDisplay) {
441
+ messageChunk.content += `
442
+
443
+ ${groundingContent}`;
444
+ }
445
+ }
446
+ if (reasoningContent.length > 0) {
447
+ logger.debug(`reasoning content: ${reasoningContent}`);
448
+ }
449
+ const generationChunk = new import_outputs.ChatGenerationChunk({
450
+ message: messageChunk,
451
+ text: messageChunk.content
452
+ });
453
+ return generationChunk;
454
+ } catch (e) {
455
+ logger.error("error with", parsedResponse);
456
+ throw new import_error.ChatLunaError(import_error.ChatLunaErrorCode.API_REQUEST_FAILED, e);
457
+ }
458
+ } catch (e) {
459
+ if (e instanceof import_error.ChatLunaError) {
460
+ throw e;
461
+ } else {
462
+ throw new import_error.ChatLunaError(import_error.ChatLunaErrorCode.API_REQUEST_FAILED, e);
463
+ }
464
+ }
465
+ }
311
466
  async *completionStream(params) {
312
467
  try {
313
468
  const response = await this._post(
@@ -363,6 +518,8 @@ var GeminiRequester = class extends import_api.ModelRequester {
363
518
  );
364
519
  const jsonParser = new import_json.JSONParser();
365
520
  const writable = stream.writable.getWriter();
521
+ let groundingContent = "";
522
+ let currentGroudingIndex = 0;
366
523
  jsonParser.onEnd = async () => {
367
524
  await writable.close();
368
525
  };
@@ -379,9 +536,13 @@ var GeminiRequester = class extends import_api.ModelRequester {
379
536
  for (const part of parts) {
380
537
  await writable.write(part);
381
538
  }
539
+ for (const source of candidate.groundingMetadata?.groundingChunks ?? []) {
540
+ groundingContent += `[^${currentGroudingIndex++}]: [${source.web.title}](${source.web.uri})
541
+ `;
542
+ }
382
543
  }
383
544
  };
384
- await (0, import_sse.sse)(
545
+ (0, import_sse.sse)(
385
546
  response,
386
547
  async (rawData) => {
387
548
  jsonParser.write(rawData);
@@ -389,9 +550,8 @@ var GeminiRequester = class extends import_api.ModelRequester {
389
550
  },
390
551
  0
391
552
  );
392
- let content = "";
393
553
  let reasoningContent = "";
394
- let isOldVisionModel = params.model.includes("vision");
554
+ let content = "";
395
555
  const functionCall = {
396
556
  name: "",
397
557
  args: "",
@@ -405,15 +565,7 @@ var GeminiRequester = class extends import_api.ModelRequester {
405
565
  reasoningContent += messagePart.text;
406
566
  continue;
407
567
  }
408
- if (params.tools != null) {
409
- content = messagePart.text;
410
- } else {
411
- content += messagePart.text;
412
- }
413
- if (isOldVisionModel && /\s*model:\s*/.test(content)) {
414
- isOldVisionModel = false;
415
- content = messagePart.text.replace(/\s*model:\s*/, "");
416
- }
568
+ content = messagePart.text;
417
569
  }
418
570
  const deltaFunctionCall = chatFunctionCallingPart.functionCall;
419
571
  if (deltaFunctionCall) {
@@ -466,6 +618,20 @@ var GeminiRequester = class extends import_api.ModelRequester {
466
618
  if (reasoningContent.length > 0) {
467
619
  logger.debug(`reasoning content: ${reasoningContent}`);
468
620
  }
621
+ if (groundingContent.length > 0) {
622
+ logger.debug(`grounding content: ${groundingContent}`);
623
+ if (this._pluginConfig.groundingContentDisplay) {
624
+ const groundingMessage = new import_messages2.AIMessageChunk(
625
+ `
626
+ ${groundingContent}`
627
+ );
628
+ const generationChunk = new import_outputs.ChatGenerationChunk({
629
+ message: groundingMessage,
630
+ text: "\n" + groundingContent
631
+ });
632
+ yield generationChunk;
633
+ }
634
+ }
469
635
  } catch (e) {
470
636
  if (e instanceof import_error.ChatLunaError) {
471
637
  throw e;
@@ -618,13 +784,13 @@ var GeminiClient = class extends import_client.PlatformModelAndEmbeddingsClient
618
784
  if (model2.includes("gemini-1.5-pro")) {
619
785
  return 1048576;
620
786
  }
621
- if (model2.includes("gemini-1.5-flash")) {
787
+ if (model2.includes("gemini-1.5-flash") || model2.includes("gemini-2.0-pro")) {
622
788
  return 2097152;
623
789
  }
624
790
  if (model2.includes("gemini-1.0-pro")) {
625
791
  return 30720;
626
792
  }
627
- return 30720;
793
+ return 1048576;
628
794
  })(model),
629
795
  type: model.includes("embedding") ? import_types.ModelType.embeddings : import_types.ModelType.llm,
630
796
  functionCall: !model.includes("vision"),
@@ -715,6 +881,7 @@ var Config3 = import_koishi.Schema.intersect([
715
881
  maxTokens: import_koishi.Schema.number().min(16).max(2097e3).step(16).default(8064),
716
882
  temperature: import_koishi.Schema.percent().min(0).max(2).step(0.1).default(0.8),
717
883
  googleSearch: import_koishi.Schema.boolean().default(false),
884
+ groundingContentDisplay: import_koishi.Schema.boolean().default(false),
718
885
  searchThreshold: import_koishi.Schema.number().min(0).max(1).step(0.1).default(0.5)
719
886
  })
720
887
  ]).i18n({
package/lib/index.d.ts CHANGED
@@ -2,7 +2,7 @@ import { ChatLunaPlugin } from 'koishi-plugin-chatluna/services/chat';
2
2
  import { Context, Logger, Schema } from 'koishi';
3
3
  import { AIMessageChunk, BaseMessage, ChatMessageChunk, HumanMessageChunk, MessageType, SystemMessageChunk } from '@langchain/core/messages';
4
4
  import { StructuredTool } from '@langchain/core/tools';
5
- import { ChatGenerationChunk } from '@langchain/core/outputs';
5
+ import { ChatGeneration, ChatGenerationChunk } from '@langchain/core/outputs';
6
6
  import { EmbeddingsRequester, EmbeddingsRequestParams, ModelRequester, ModelRequestParams } from 'koishi-plugin-chatluna/llm-core/platform/api';
7
7
  import { ClientConfig } from 'koishi-plugin-chatluna/llm-core/platform/config';
8
8
  import { PlatformModelAndEmbeddingsClient } from 'koishi-plugin-chatluna/llm-core/platform/client';
@@ -16,6 +16,7 @@ export interface Config extends ChatLunaPlugin.Config {
16
16
  temperature: number;
17
17
  googleSearch: boolean;
18
18
  searchThreshold: number;
19
+ groundingContentDisplay: boolean;
19
20
  }
20
21
  export const Config: Schema<Config>;
21
22
  export const inject: string[];
@@ -50,6 +51,26 @@ export type ChatFunctionResponsePart = {
50
51
  export interface ChatResponse {
51
52
  candidates: {
52
53
  content: ChatCompletionResponseMessage;
54
+ groundingMetadata: {
55
+ searchEntryPoint: {
56
+ renderedContent: string;
57
+ };
58
+ groundingChunks: {
59
+ web: {
60
+ uri: string;
61
+ title: string;
62
+ };
63
+ }[];
64
+ groundingSupports: {
65
+ segment: {
66
+ endIndex: number;
67
+ text: string;
68
+ };
69
+ groundingChunkIndices: number[];
70
+ confidenceScores: number[];
71
+ }[];
72
+ webSearchQueries: string[];
73
+ };
53
74
  finishReason: string;
54
75
  index: number;
55
76
  safetyRatings: {
@@ -92,6 +113,7 @@ export class GeminiRequester extends ModelRequester implements EmbeddingsRequest
92
113
  private _plugin;
93
114
  private _pluginConfig;
94
115
  constructor(_config: ClientConfig, _plugin: ChatLunaPlugin, _pluginConfig: Config);
116
+ completion(params: ModelRequestParams): Promise<ChatGeneration>;
95
117
  completionStream(params: ModelRequestParams): AsyncGenerator<ChatGenerationChunk>;
96
118
  embeddings(params: EmbeddingsRequestParams): Promise<number[] | number[][]>;
97
119
  getModels(): Promise<string[]>;
package/lib/index.mjs CHANGED
@@ -8,14 +8,14 @@ var __commonJS = (cb, mod) => function __require() {
8
8
  // src/locales/zh-CN.schema.yml
9
9
  var require_zh_CN_schema = __commonJS({
10
10
  "src/locales/zh-CN.schema.yml"(exports, module) {
11
- module.exports = { $inner: [{}, { $desc: "请求选项", apiKeys: { $inner: ["Gemini 的 API Key", "Gemini API 的请求地址"], $desc: "Gemini 的 API Key 和请求地址列表。" } }, { $desc: "模型配置", maxTokens: "输入的最大上下文 Token(16~2097000,必须是 16 的倍数)。注意:仅当您使用的模型最大 Token 为 8000 及以上时,才建议设置超过 2000 token。", temperature: "回复的随机性程度,数值越高,回复越随机(范围:0~2)。", googleSearch: "为模型启用谷歌搜索。", searchThreshold: "搜索的置信度阈值,范围:0~1,设置的数值越低,则越倾向于使用谷歌搜索。" }] };
11
+ module.exports = { $inner: [{}, { $desc: "请求选项", apiKeys: { $inner: ["Gemini 的 API Key", "Gemini API 的请求地址"], $desc: "Gemini 的 API Key 和请求地址列表。" } }, { $desc: "模型配置", maxTokens: "输入的最大上下文 Token(16~2097000,必须是 16 的倍数)。注意:仅当您使用的模型最大 Token 为 8000 及以上时,才建议设置超过 2000 token。", temperature: "回复的随机性程度,数值越高,回复越随机(范围:0~2)。", googleSearch: "为模型启用谷歌搜索。", groundingContentDisplay: "是否显示谷歌搜索结果。", searchThreshold: "搜索的置信度阈值,范围:0~1,设置的数值越低,则越倾向于使用谷歌搜索。" }] };
12
12
  }
13
13
  });
14
14
 
15
15
  // src/locales/en-US.schema.yml
16
16
  var require_en_US_schema = __commonJS({
17
17
  "src/locales/en-US.schema.yml"(exports, module) {
18
- module.exports = { $inner: [{}, { $desc: "API Configuration", apiKeys: { $inner: ["Gemini API Key", "Gemini API Endpoint (optional)"], $desc: "Gemini API access credentials" } }, { $desc: "Model Parameters", maxTokens: "Max output tokens (16-2097000, multiple of 16). >2000 for 8k+ models", temperature: "Sampling temperature (0-2). Higher: more random, Lower: more deterministic", googleSearch: "Enable Google search", searchThreshold: "Search confidence [threshold](https://ai.google.dev/gemini-api/docs/grounding?lang=rest#dynamic-retrieval) (0-1). Lower: more likely to use Google search" }] };
18
+ module.exports = { $inner: [{}, { $desc: "API Configuration", apiKeys: { $inner: ["Gemini API Key", "Gemini API Endpoint (optional)"], $desc: "Gemini API access credentials" } }, { $desc: "Model Parameters", maxTokens: "Max output tokens (16-2097000, multiple of 16). >2000 for 8k+ models", temperature: "Sampling temperature (0-2). Higher: more random, Lower: more deterministic", googleSearch: "Enable Google search", groundingContentDisplay: "Enable display of search results", searchThreshold: "Search confidence [threshold](https://ai.google.dev/gemini-api/docs/grounding?lang=rest#dynamic-retrieval) (0-1). Lower: more likely to use Google search" }] };
19
19
  }
20
20
  });
21
21
 
@@ -242,7 +242,7 @@ function formatToolsToGeminiAITools(tools, config, model) {
242
242
  functionDeclarations: functions
243
243
  });
244
244
  } else if (functions.length > 0 && config.googleSearch) {
245
- logger.warn("Google search is enabled, function call will be disabled.");
245
+ logger.warn("Google search is enabled, tool calling will be disable.");
246
246
  }
247
247
  if (config.googleSearch) {
248
248
  if (model.includes("gemini-2")) {
@@ -264,9 +264,10 @@ function formatToolsToGeminiAITools(tools, config, model) {
264
264
  }
265
265
  __name(formatToolsToGeminiAITools, "formatToolsToGeminiAITools");
266
266
  function formatToolToGeminiAITool(tool) {
267
- const parameters = zodToJsonSchema(tool.schema);
268
- delete parameters["$schema"];
269
- delete parameters["additionalProperties"];
267
+ const parameters = removeAdditionalProperties(
268
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
269
+ zodToJsonSchema(tool.schema)
270
+ );
270
271
  return {
271
272
  name: tool.name,
272
273
  description: tool.description,
@@ -275,6 +276,30 @@ function formatToolToGeminiAITool(tool) {
275
276
  };
276
277
  }
277
278
  __name(formatToolToGeminiAITool, "formatToolToGeminiAITool");
279
+ function removeAdditionalProperties(schema) {
280
+ const updatedSchema = { ...schema };
281
+ if (Object.hasOwn(updatedSchema, "additionalProperties")) {
282
+ delete updatedSchema["additionalProperties"];
283
+ }
284
+ if (Object.hasOwn(updatedSchema, "$schema")) {
285
+ delete updatedSchema["$schema"];
286
+ }
287
+ if (updatedSchema["properties"]) {
288
+ const keys = Object.keys(updatedSchema["properties"]);
289
+ removeProperties(updatedSchema["properties"], keys, 0);
290
+ }
291
+ return updatedSchema;
292
+ }
293
+ __name(removeAdditionalProperties, "removeAdditionalProperties");
294
+ function removeProperties(properties, keys, index) {
295
+ if (index >= keys.length) {
296
+ return;
297
+ }
298
+ const key = keys[index];
299
+ properties[key] = removeAdditionalProperties(properties[key]);
300
+ removeProperties(properties, keys, index + 1);
301
+ }
302
+ __name(removeProperties, "removeProperties");
278
303
  function messageTypeToGeminiRole(type) {
279
304
  switch (type) {
280
305
  case "system":
@@ -302,6 +327,136 @@ var GeminiRequester = class extends ModelRequester {
302
327
  static {
303
328
  __name(this, "GeminiRequester");
304
329
  }
330
+ async completion(params) {
331
+ try {
332
+ const response = await this._post(
333
+ `models/${params.model}`,
334
+ {
335
+ contents: await langchainMessageToGeminiMessage(
336
+ params.input,
337
+ params.model
338
+ ),
339
+ safetySettings: [
340
+ {
341
+ category: "HARM_CATEGORY_HARASSMENT",
342
+ threshold: "BLOCK_NONE"
343
+ },
344
+ {
345
+ category: "HARM_CATEGORY_HATE_SPEECH",
346
+ threshold: "BLOCK_NONE"
347
+ },
348
+ {
349
+ category: "HARM_CATEGORY_SEXUALLY_EXPLICIT",
350
+ threshold: "BLOCK_NONE"
351
+ },
352
+ {
353
+ category: "HARM_CATEGORY_DANGEROUS_CONTENT",
354
+ threshold: "BLOCK_NONE"
355
+ },
356
+ {
357
+ category: "HARM_CATEGORY_CIVIC_INTEGRITY",
358
+ threshold: "BLOCK_NONE"
359
+ }
360
+ ],
361
+ generationConfig: {
362
+ stopSequences: params.stop,
363
+ temperature: params.temperature,
364
+ maxOutputTokens: params.model.includes("vision") ? void 0 : params.maxTokens,
365
+ topP: params.topP
366
+ // thinkingConfig: { includeThoughts: true }
367
+ },
368
+ tools: params.tools != null || this._pluginConfig.googleSearch ? formatToolsToGeminiAITools(
369
+ params.tools ?? [],
370
+ this._pluginConfig,
371
+ params.model
372
+ ) : void 0
373
+ },
374
+ {
375
+ signal: params.signal
376
+ }
377
+ );
378
+ const parsedResponse = await response.json();
379
+ let groundingContent = "";
380
+ let currentGroudingIndex = 0;
381
+ let reasoningContent = "";
382
+ let content = "";
383
+ const functionCall = {
384
+ name: "",
385
+ args: "",
386
+ arguments: ""
387
+ };
388
+ const part = parsedResponse.candidates[0].content;
389
+ const messagePart = partAsType(part.parts[0]);
390
+ const chatFunctionCallingPart = partAsType(
391
+ part.parts[0]
392
+ );
393
+ if (messagePart.text) {
394
+ if (messagePart.thought) {
395
+ reasoningContent = messagePart.text;
396
+ }
397
+ content = messagePart.text;
398
+ }
399
+ const deltaFunctionCall = chatFunctionCallingPart.functionCall;
400
+ if (deltaFunctionCall) {
401
+ let args = deltaFunctionCall.args?.input ?? deltaFunctionCall.args;
402
+ try {
403
+ let parsedArgs = JSON.parse(args);
404
+ if (typeof parsedArgs !== "string") {
405
+ args = parsedArgs;
406
+ }
407
+ parsedArgs = JSON.parse(args);
408
+ if (typeof parsedArgs !== "string") {
409
+ args = parsedArgs;
410
+ }
411
+ } catch (e) {
412
+ }
413
+ functionCall.args = JSON.stringify(args);
414
+ functionCall.name = deltaFunctionCall.name;
415
+ functionCall.arguments = deltaFunctionCall.args;
416
+ }
417
+ for (const source of parsedResponse.candidates[0].groundingMetadata?.groundingChunks ?? []) {
418
+ groundingContent += `[^${currentGroudingIndex++}]: [${source.web.title}](${source.web.uri})
419
+ `;
420
+ }
421
+ try {
422
+ const messageChunk = new AIMessageChunk2(content);
423
+ messageChunk.additional_kwargs = {
424
+ function_call: functionCall.name.length > 0 ? {
425
+ name: functionCall.name,
426
+ arguments: functionCall.args,
427
+ args: functionCall.arguments
428
+ } : void 0
429
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
430
+ };
431
+ messageChunk.content = content;
432
+ if (groundingContent.length > 0) {
433
+ logger.debug(`grounding content: ${groundingContent}`);
434
+ if (this._pluginConfig.groundingContentDisplay) {
435
+ messageChunk.content += `
436
+
437
+ ${groundingContent}`;
438
+ }
439
+ }
440
+ if (reasoningContent.length > 0) {
441
+ logger.debug(`reasoning content: ${reasoningContent}`);
442
+ }
443
+ const generationChunk = new ChatGenerationChunk({
444
+ message: messageChunk,
445
+ text: messageChunk.content
446
+ });
447
+ return generationChunk;
448
+ } catch (e) {
449
+ logger.error("error with", parsedResponse);
450
+ throw new ChatLunaError(ChatLunaErrorCode.API_REQUEST_FAILED, e);
451
+ }
452
+ } catch (e) {
453
+ if (e instanceof ChatLunaError) {
454
+ throw e;
455
+ } else {
456
+ throw new ChatLunaError(ChatLunaErrorCode.API_REQUEST_FAILED, e);
457
+ }
458
+ }
459
+ }
305
460
  async *completionStream(params) {
306
461
  try {
307
462
  const response = await this._post(
@@ -357,6 +512,8 @@ var GeminiRequester = class extends ModelRequester {
357
512
  );
358
513
  const jsonParser = new JSONParser();
359
514
  const writable = stream.writable.getWriter();
515
+ let groundingContent = "";
516
+ let currentGroudingIndex = 0;
360
517
  jsonParser.onEnd = async () => {
361
518
  await writable.close();
362
519
  };
@@ -373,9 +530,13 @@ var GeminiRequester = class extends ModelRequester {
373
530
  for (const part of parts) {
374
531
  await writable.write(part);
375
532
  }
533
+ for (const source of candidate.groundingMetadata?.groundingChunks ?? []) {
534
+ groundingContent += `[^${currentGroudingIndex++}]: [${source.web.title}](${source.web.uri})
535
+ `;
536
+ }
376
537
  }
377
538
  };
378
- await sse(
539
+ sse(
379
540
  response,
380
541
  async (rawData) => {
381
542
  jsonParser.write(rawData);
@@ -383,9 +544,8 @@ var GeminiRequester = class extends ModelRequester {
383
544
  },
384
545
  0
385
546
  );
386
- let content = "";
387
547
  let reasoningContent = "";
388
- let isOldVisionModel = params.model.includes("vision");
548
+ let content = "";
389
549
  const functionCall = {
390
550
  name: "",
391
551
  args: "",
@@ -399,15 +559,7 @@ var GeminiRequester = class extends ModelRequester {
399
559
  reasoningContent += messagePart.text;
400
560
  continue;
401
561
  }
402
- if (params.tools != null) {
403
- content = messagePart.text;
404
- } else {
405
- content += messagePart.text;
406
- }
407
- if (isOldVisionModel && /\s*model:\s*/.test(content)) {
408
- isOldVisionModel = false;
409
- content = messagePart.text.replace(/\s*model:\s*/, "");
410
- }
562
+ content = messagePart.text;
411
563
  }
412
564
  const deltaFunctionCall = chatFunctionCallingPart.functionCall;
413
565
  if (deltaFunctionCall) {
@@ -460,6 +612,20 @@ var GeminiRequester = class extends ModelRequester {
460
612
  if (reasoningContent.length > 0) {
461
613
  logger.debug(`reasoning content: ${reasoningContent}`);
462
614
  }
615
+ if (groundingContent.length > 0) {
616
+ logger.debug(`grounding content: ${groundingContent}`);
617
+ if (this._pluginConfig.groundingContentDisplay) {
618
+ const groundingMessage = new AIMessageChunk2(
619
+ `
620
+ ${groundingContent}`
621
+ );
622
+ const generationChunk = new ChatGenerationChunk({
623
+ message: groundingMessage,
624
+ text: "\n" + groundingContent
625
+ });
626
+ yield generationChunk;
627
+ }
628
+ }
463
629
  } catch (e) {
464
630
  if (e instanceof ChatLunaError) {
465
631
  throw e;
@@ -612,13 +778,13 @@ var GeminiClient = class extends PlatformModelAndEmbeddingsClient {
612
778
  if (model2.includes("gemini-1.5-pro")) {
613
779
  return 1048576;
614
780
  }
615
- if (model2.includes("gemini-1.5-flash")) {
781
+ if (model2.includes("gemini-1.5-flash") || model2.includes("gemini-2.0-pro")) {
616
782
  return 2097152;
617
783
  }
618
784
  if (model2.includes("gemini-1.0-pro")) {
619
785
  return 30720;
620
786
  }
621
- return 30720;
787
+ return 1048576;
622
788
  })(model),
623
789
  type: model.includes("embedding") ? ModelType.embeddings : ModelType.llm,
624
790
  functionCall: !model.includes("vision"),
@@ -709,6 +875,7 @@ var Config3 = Schema.intersect([
709
875
  maxTokens: Schema.number().min(16).max(2097e3).step(16).default(8064),
710
876
  temperature: Schema.percent().min(0).max(2).step(0.1).default(0.8),
711
877
  googleSearch: Schema.boolean().default(false),
878
+ groundingContentDisplay: Schema.boolean().default(false),
712
879
  searchThreshold: Schema.number().min(0).max(1).step(0.1).default(0.5)
713
880
  })
714
881
  ]).i18n({
package/package.json CHANGED
@@ -1,7 +1,7 @@
1
1
  {
2
2
  "name": "koishi-plugin-chatluna-google-gemini-adapter",
3
3
  "description": "google-gemini adapter for chatluna",
4
- "version": "1.1.0-alpha.0",
4
+ "version": "1.1.0-alpha.2",
5
5
  "main": "lib/index.cjs",
6
6
  "module": "lib/index.mjs",
7
7
  "typings": "lib/index.d.ts",
@@ -73,7 +73,7 @@
73
73
  },
74
74
  "peerDependencies": {
75
75
  "koishi": "^4.18.4",
76
- "koishi-plugin-chatluna": "^1.1.0-alpha.0"
76
+ "koishi-plugin-chatluna": "^1.1.0-alpha.2"
77
77
  },
78
78
  "koishi": {
79
79
  "description": {