koishi-plugin-chatluna-google-gemini-adapter 1.2.18 → 1.2.20

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/lib/index.mjs CHANGED
@@ -8,14 +8,14 @@ var __commonJS = (cb, mod) => function __require() {
8
8
  // src/locales/zh-CN.schema.yml
9
9
  var require_zh_CN_schema = __commonJS({
10
10
  "src/locales/zh-CN.schema.yml"(exports, module) {
11
- module.exports = { $inner: [{}, { $desc: "请求选项", platform: "适配器的平台名。(不懂请不要修改)", apiKeys: { $inner: ["Gemini 的 API Key", "Gemini API 的请求地址"], $desc: "Gemini 的 API Key 和请求地址列表。" } }, { $desc: "模型配置", maxTokens: "输入的最大上下文 Token(16~2097000,必须是 16 的倍数)。注意:仅当您使用的模型最大 Token 为 8000 及以上时,才建议设置超过 2000 token。", temperature: "回复的随机性程度,数值越高,回复越随机(范围:0~2)。", googleSearch: "为模型启用谷歌搜索。", thinkingBudget: "思考预算,范围:(-1~24576),设置的数值越大,思考时花费的 Token 越多,-1 为动态思考。目前仅支持 gemini 2.5 系列模型。", groundingContentDisplay: "是否显示谷歌搜索结果。", imageGeneration: "为模型启用图像生成。目前仅支持 `gemini-2.0-flash-exp` 模型。", searchThreshold: "搜索的[置信度阈值](https://ai.google.dev/gemini-api/docs/grounding?lang=rest#dynamic-retrieval),范围:0~1,设置的数值越低,则越倾向于使用谷歌搜索。(仅支持 `gemini-1.5` 系列模型。gemini 2.0 模型起使用动态的工具调用)", includeThoughts: "是否获取模型的思考内容。", codeExecution: "为模型启用代码执行工具。", urlContext: "为模型启用 URL 内容获取工具。" }] };
11
+ module.exports = { $inner: [{}, { $desc: "请求选项", platform: "适配器的平台名。(不懂请不要修改)", apiKeys: { $inner: ["Gemini 的 API Key", "Gemini API 的请求地址"], $desc: "Gemini 的 API Key 和请求地址列表。" } }, { $desc: "模型配置", maxTokens: "输入的最大上下文 Token(16~2097000,必须是 16 的倍数)。注意:仅当您使用的模型最大 Token 为 8000 及以上时,才建议设置超过 2000 token。", temperature: "回复的随机性程度,数值越高,回复越随机(范围:0~2)。", googleSearch: "为模型启用谷歌搜索。", thinkingBudget: "思考预算,范围:(-1~24576),设置的数值越大,思考时花费的 Token 越多,-1 为动态思考。目前仅支持 gemini 2.5 系列模型。", groundingContentDisplay: "是否显示谷歌搜索结果。", imageGeneration: "为模型启用图像生成。目前仅支持 `gemini-2.0-flash-exp` 和 `gemini-2.5-flash-image-preview` 模型。", searchThreshold: "搜索的[置信度阈值](https://ai.google.dev/gemini-api/docs/grounding?lang=rest#dynamic-retrieval),范围:0~1,设置的数值越低,则越倾向于使用谷歌搜索。(仅支持 `gemini-1.5` 系列模型。gemini 2.0 模型起使用动态的工具调用)", includeThoughts: "是否获取模型的思考内容。", codeExecution: "为模型启用代码执行工具。", urlContext: "为模型启用 URL 内容获取工具。", nonStreaming: "强制不启用流式返回。开启后,将总是以非流式发起请求,即便配置了 stream 参数。" }] };
12
12
  }
13
13
  });
14
14
 
15
15
  // src/locales/en-US.schema.yml
16
16
  var require_en_US_schema = __commonJS({
17
17
  "src/locales/en-US.schema.yml"(exports, module) {
18
- module.exports = { $inner: [{}, { $desc: "API Configuration", platform: "Adapter platform name. (Do not modify if you do not understand)", apiKeys: { $inner: ["Gemini API Key", "Gemini API Endpoint (optional)"], $desc: "Gemini API access credentials" } }, { $desc: "Model Parameters", maxTokens: "Max output tokens (16-2097000, multiple of 16). >2000 for 8k+ models", temperature: "Sampling temperature (0-2). Higher: more random, Lower: more deterministic", googleSearch: "Enable Google search", thinkingBudget: "Thinking budget (-1-24576). (0: dynamic thinking) Higher: more tokens spent on thinking. Currently only supports `gemini-2.5` series models.", groundingContentDisplay: "Enable display of search results", imageGeneration: "Enable image generation (only for `gemini-2.0-flash-exp` model)", searchThreshold: "Search confidence [threshold](https://ai.google.dev/gemini-api/docs/grounding?lang=rest#dynamic-retrieval) (0-1). Lower: more likely to use Google search", includeThoughts: "Enable retrieval of model thoughts", codeExecution: "Enable code execution tool", urlContext: "Enable URL context retrieval tool" }] };
18
+ module.exports = { $inner: [{}, { $desc: "API Configuration", platform: "Adapter platform name. (Do not modify if you do not understand)", apiKeys: { $inner: ["Gemini API Key", "Gemini API Endpoint (optional)"], $desc: "Gemini API access credentials" } }, { $desc: "Model Parameters", maxTokens: "Max output tokens (16-2097000, multiple of 16). >2000 for 8k+ models", temperature: "Sampling temperature (0-2). Higher: more random, Lower: more deterministic", googleSearch: "Enable Google search", thinkingBudget: "Thinking budget (-1-24576). (0: dynamic thinking) Higher: more tokens spent on thinking. Currently only supports `gemini-2.5` series models.", groundingContentDisplay: "Enable display of search results", imageGeneration: "Enable image generation (only for `gemini-2.0-flash-exp` and `gemini-2.5-flash-image-preview` model)", searchThreshold: "Search confidence [threshold](https://ai.google.dev/gemini-api/docs/grounding?lang=rest#dynamic-retrieval) (0-1). Lower: more likely to use Google search", includeThoughts: "Enable retrieval of model thoughts", codeExecution: "Enable code execution tool", urlContext: "Enable URL context retrieval tool", nonStreaming: "Force disable streaming response. When enabled, requests will always be made in non-streaming mode, even if the stream parameter is configured." }] };
19
19
  }
20
20
  });
21
21
 
@@ -38,7 +38,9 @@ import {
38
38
  } from "koishi-plugin-chatluna/utils/error";
39
39
 
40
40
  // src/requester.ts
41
- import { AIMessageChunk as AIMessageChunk2 } from "@langchain/core/messages";
41
+ import {
42
+ AIMessageChunk as AIMessageChunk2
43
+ } from "@langchain/core/messages";
42
44
  import { ChatGenerationChunk } from "@langchain/core/outputs";
43
45
  import {
44
46
  ModelRequester
@@ -309,8 +311,96 @@ function messageTypeToGeminiRole(type) {
309
311
  }
310
312
  }
311
313
  __name(messageTypeToGeminiRole, "messageTypeToGeminiRole");
314
+ function prepareModelConfig(params, pluginConfig) {
315
+ let model = params.model;
316
+ let enabledThinking = null;
317
+ if (model.includes("-thinking") && model.includes("gemini-2.5")) {
318
+ enabledThinking = !model.includes("-non-thinking");
319
+ model = model.replace("-nom-thinking", "").replace("-thinking", "");
320
+ }
321
+ let thinkingBudget = pluginConfig.thinkingBudget ?? -1;
322
+ if (!enabledThinking && !model.includes("2.5-pro")) {
323
+ thinkingBudget = 0;
324
+ } else if (thinkingBudget >= 0 && thinkingBudget < 128) {
325
+ thinkingBudget = 128;
326
+ }
327
+ let imageGeneration = pluginConfig.imageGeneration ?? false;
328
+ if (imageGeneration) {
329
+ imageGeneration = params.model.includes("gemini-2.0-flash-exp") || params.model.includes("gemini-2.5-flash-image");
330
+ }
331
+ return { model, enabledThinking, thinkingBudget, imageGeneration };
332
+ }
333
+ __name(prepareModelConfig, "prepareModelConfig");
334
+ function createSafetySettings(model) {
335
+ const isGemini2 = model.includes("gemini-2");
336
+ return [
337
+ {
338
+ category: "HARM_CATEGORY_HARASSMENT",
339
+ threshold: isGemini2 ? "OFF" : "BLOCK_NONE"
340
+ },
341
+ {
342
+ category: "HARM_CATEGORY_HATE_SPEECH",
343
+ threshold: isGemini2 ? "OFF" : "BLOCK_NONE"
344
+ },
345
+ {
346
+ category: "HARM_CATEGORY_SEXUALLY_EXPLICIT",
347
+ threshold: isGemini2 ? "OFF" : "BLOCK_NONE"
348
+ },
349
+ {
350
+ category: "HARM_CATEGORY_DANGEROUS_CONTENT",
351
+ threshold: isGemini2 ? "OFF" : "BLOCK_NONE"
352
+ },
353
+ {
354
+ category: "HARM_CATEGORY_CIVIC_INTEGRITY",
355
+ threshold: isGemini2 ? "OFF" : "BLOCK_NONE"
356
+ }
357
+ ];
358
+ }
359
+ __name(createSafetySettings, "createSafetySettings");
360
+ function createGenerationConfig(params, modelConfig, pluginConfig) {
361
+ return {
362
+ stopSequences: params.stop,
363
+ temperature: params.temperature,
364
+ maxOutputTokens: params.model.includes("vision") ? void 0 : params.maxTokens,
365
+ topP: params.topP,
366
+ responseModalities: modelConfig.imageGeneration ? ["TEXT", "IMAGE"] : void 0,
367
+ thinkingConfig: modelConfig.enabledThinking != null || pluginConfig.includeThoughts ? {
368
+ thinkingBudget: modelConfig.thinkingBudget,
369
+ includeThoughts: pluginConfig.includeThoughts
370
+ } : void 0
371
+ };
372
+ }
373
+ __name(createGenerationConfig, "createGenerationConfig");
374
+ async function createChatGenerationParams(params, modelConfig, pluginConfig) {
375
+ const geminiMessages = await langchainMessageToGeminiMessage(
376
+ params.input,
377
+ modelConfig.model
378
+ );
379
+ const [systemInstruction, modelMessages] = extractSystemMessages(geminiMessages);
380
+ return {
381
+ contents: modelMessages,
382
+ safetySettings: createSafetySettings(params.model),
383
+ generationConfig: createGenerationConfig(
384
+ params,
385
+ modelConfig,
386
+ pluginConfig
387
+ ),
388
+ system_instruction: systemInstruction != null ? systemInstruction : void 0,
389
+ tools: params.tools != null || pluginConfig.googleSearch || pluginConfig.codeExecution || pluginConfig.urlContext ? formatToolsToGeminiAITools(
390
+ params.tools ?? [],
391
+ pluginConfig,
392
+ params.model
393
+ ) : void 0
394
+ };
395
+ }
396
+ __name(createChatGenerationParams, "createChatGenerationParams");
397
+ function isChatResponse(response) {
398
+ return "candidates" in response;
399
+ }
400
+ __name(isChatResponse, "isChatResponse");
312
401
 
313
402
  // src/requester.ts
403
+ import { getMessageContent } from "koishi-plugin-chatluna/utils/string";
314
404
  var GeminiRequester = class extends ModelRequester {
315
405
  constructor(ctx, _configPool, _pluginConfig, _plugin) {
316
406
  super(ctx, _configPool, _pluginConfig, _plugin);
@@ -319,211 +409,38 @@ var GeminiRequester = class extends ModelRequester {
319
409
  static {
320
410
  __name(this, "GeminiRequester");
321
411
  }
412
+ async completion(params) {
413
+ if (!this._pluginConfig.nonStreaming) {
414
+ return super.completion(params);
415
+ }
416
+ return await this.completionInternal(params);
417
+ }
322
418
  async *completionStreamInternal(params) {
419
+ if (this._pluginConfig.nonStreaming) {
420
+ const generation = await this.completion(params);
421
+ yield new ChatGenerationChunk({
422
+ generationInfo: generation.generationInfo,
423
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
424
+ message: generation.message,
425
+ text: generation.text
426
+ });
427
+ return;
428
+ }
429
+ const modelConfig = prepareModelConfig(params, this._pluginConfig);
323
430
  try {
324
- let model = params.model;
325
- let enabledThinking = null;
326
- if (model.includes("-thinking") && model.includes("gemini-2.5")) {
327
- enabledThinking = !model.includes("-non-thinking");
328
- model = model.replace("-nom-thinking", "").replace("-thinking", "");
329
- }
330
- const geminiMessages = await langchainMessageToGeminiMessage(
331
- params.input,
332
- model
333
- );
334
- const [systemInstruction, modelMessages] = extractSystemMessages(geminiMessages);
335
- let thinkingBudget = this._pluginConfig.thinkingBudget ?? -1;
336
- if (!enabledThinking && !model.includes("2.5-pro")) {
337
- thinkingBudget = 0;
338
- } else if (thinkingBudget >= 0 && thinkingBudget < 128) {
339
- thinkingBudget = 128;
340
- }
341
- let imageGeneration = this._pluginConfig.imageGeneration ?? false;
342
- if (imageGeneration) {
343
- imageGeneration = params.model.includes("gemini-2.0-flash-exp") || params.model.includes("gemini-2.5-flash-image");
344
- }
345
431
  const response = await this._post(
346
- `models/${model}:streamGenerateContent?alt=sse`,
347
- {
348
- contents: modelMessages,
349
- safetySettings: [
350
- {
351
- category: "HARM_CATEGORY_HARASSMENT",
352
- threshold: params.model.includes("gemini-2") ? "OFF" : "BLOCK_NONE"
353
- },
354
- {
355
- category: "HARM_CATEGORY_HATE_SPEECH",
356
- threshold: params.model.includes("gemini-2") ? "OFF" : "BLOCK_NONE"
357
- },
358
- {
359
- category: "HARM_CATEGORY_SEXUALLY_EXPLICIT",
360
- threshold: params.model.includes("gemini-2") ? "OFF" : "BLOCK_NONE"
361
- },
362
- {
363
- category: "HARM_CATEGORY_DANGEROUS_CONTENT",
364
- threshold: params.model.includes("gemini-2") ? "OFF" : "BLOCK_NONE"
365
- },
366
- {
367
- category: "HARM_CATEGORY_CIVIC_INTEGRITY",
368
- threshold: params.model.includes("gemini-2.0") ? "OFF" : "BLOCK_NONE"
369
- }
370
- ],
371
- generationConfig: {
372
- stopSequences: params.stop,
373
- temperature: params.temperature,
374
- maxOutputTokens: params.model.includes("vision") ? void 0 : params.maxTokens,
375
- topP: params.topP,
376
- responseModalities: imageGeneration ? ["TEXT", "IMAGE"] : void 0,
377
- thinkingConfig: enabledThinking != null || this._pluginConfig.includeThoughts ? {
378
- thinkingBudget,
379
- includeThoughts: this._pluginConfig.includeThoughts
380
- } : void 0
381
- },
382
- system_instruction: systemInstruction != null ? systemInstruction : void 0,
383
- tools: params.tools != null || this._pluginConfig.googleSearch || this._pluginConfig.codeExecution || this._pluginConfig.urlContext ? formatToolsToGeminiAITools(
384
- params.tools ?? [],
385
- this._pluginConfig,
386
- params.model
387
- ) : void 0
388
- },
432
+ `models/${modelConfig.model}:streamGenerateContent?alt=sse`,
433
+ await createChatGenerationParams(
434
+ params,
435
+ modelConfig,
436
+ this._pluginConfig
437
+ ),
389
438
  {
390
439
  signal: params.signal
391
440
  }
392
441
  );
393
- let errorCount = 0;
394
- let groundingContent = "";
395
- let currentGroundingIndex = 0;
396
442
  await checkResponse(response);
397
- const readableStream = new ReadableStream({
398
- async start(controller) {
399
- for await (const chunk of sseIterable(response)) {
400
- controller.enqueue(chunk.data);
401
- }
402
- controller.close();
403
- }
404
- });
405
- const transformToChatPartStream = new TransformStream({
406
- async transform(chunk, controller) {
407
- if (chunk === "undefined") {
408
- return;
409
- }
410
- const parsedValue = JSON.parse(chunk);
411
- const transformValue = parsedValue;
412
- if (!transformValue.candidates) {
413
- return;
414
- }
415
- for (const candidate of transformValue.candidates) {
416
- const parts = candidate.content?.parts;
417
- if ((parts == null || parts.length < 1) && candidate.finishReason !== "STOP") {
418
- throw new Error(chunk);
419
- } else if (candidate.finishReason === "STOP" && parts == null) {
420
- continue;
421
- }
422
- for (const part of parts) {
423
- controller.enqueue(part);
424
- }
425
- for (const source of candidate.groundingMetadata?.groundingChunks ?? []) {
426
- groundingContent += `[^${currentGroundingIndex++}]: [${source.web.title}](${source.web.uri})
427
- `;
428
- }
429
- }
430
- }
431
- });
432
- const iterable = readableStreamToAsyncIterable(
433
- readableStream.pipeThrough(transformToChatPartStream)
434
- );
435
- let reasoningContent = "";
436
- let content = "";
437
- const functionCall = {
438
- name: "",
439
- args: "",
440
- arguments: ""
441
- };
442
- for await (const chunk of iterable) {
443
- const messagePart = partAsType(chunk);
444
- const chatFunctionCallingPart = partAsType(chunk);
445
- const imagePart = partAsTypeCheck(
446
- chunk,
447
- (part) => part["inlineData"] != null
448
- );
449
- if (messagePart.text) {
450
- if (messagePart.thought) {
451
- reasoningContent += messagePart.text;
452
- continue;
453
- }
454
- content = messagePart.text;
455
- } else if (imagePart) {
456
- messagePart.text = `![image](data:${imagePart.inlineData.mimeType ?? "image/png"};base64,${imagePart.inlineData.data})`;
457
- content = messagePart.text;
458
- }
459
- const deltaFunctionCall = chatFunctionCallingPart?.functionCall;
460
- if (deltaFunctionCall) {
461
- let args = deltaFunctionCall.args;
462
- try {
463
- let parsedArgs = JSON.parse(args);
464
- if (typeof parsedArgs !== "string") {
465
- args = parsedArgs;
466
- }
467
- parsedArgs = JSON.parse(args);
468
- if (typeof parsedArgs !== "string") {
469
- args = parsedArgs;
470
- }
471
- } catch (e) {
472
- }
473
- functionCall.args = JSON.stringify(args);
474
- functionCall.name = deltaFunctionCall.name;
475
- functionCall.arguments = deltaFunctionCall.args;
476
- }
477
- try {
478
- const messageChunk = new AIMessageChunk2(content);
479
- messageChunk.additional_kwargs = {
480
- function_call: functionCall.name.length > 0 ? {
481
- name: functionCall.name,
482
- arguments: functionCall.args,
483
- args: functionCall.arguments
484
- } : void 0,
485
- images: imagePart ? [
486
- `data:${imagePart.inlineData.mimeType ?? "image/png"};base64,${imagePart.inlineData.data}`
487
- ] : void 0
488
- // eslint-disable-next-line @typescript-eslint/no-explicit-any
489
- };
490
- messageChunk.content = content;
491
- const generationChunk = new ChatGenerationChunk({
492
- message: messageChunk,
493
- text: messageChunk.content
494
- });
495
- yield generationChunk;
496
- content = messageChunk.content;
497
- } catch (e) {
498
- if (errorCount > 5) {
499
- logger.error("error with chunk", chunk);
500
- throw new ChatLunaError(
501
- ChatLunaErrorCode.API_REQUEST_FAILED,
502
- e
503
- );
504
- } else {
505
- errorCount++;
506
- continue;
507
- }
508
- }
509
- }
510
- if (reasoningContent.length > 0) {
511
- logger.debug(`reasoning content: ${reasoningContent}`);
512
- }
513
- if (groundingContent.length > 0) {
514
- logger.debug(`grounding content: ${groundingContent}`);
515
- if (this._pluginConfig.groundingContentDisplay) {
516
- const groundingMessage = new AIMessageChunk2(
517
- `
518
- ${groundingContent}`
519
- );
520
- const generationChunk = new ChatGenerationChunk({
521
- message: groundingMessage,
522
- text: "\n" + groundingContent
523
- });
524
- yield generationChunk;
525
- }
526
- }
443
+ yield* this._processResponseStream(response);
527
444
  } catch (e) {
528
445
  if (e instanceof ChatLunaError) {
529
446
  throw e;
@@ -532,42 +449,41 @@ ${groundingContent}`
532
449
  }
533
450
  }
534
451
  }
535
- async embeddings(params) {
536
- let data;
537
- if (typeof params.input === "string") {
538
- params.input = [params.input];
539
- }
452
+ async completionInternal(params) {
453
+ const modelConfig = prepareModelConfig(params, this._pluginConfig);
540
454
  try {
541
455
  const response = await this._post(
542
- `models/${params.model}:batchEmbedContents`,
456
+ `models/${modelConfig.model}:generateContent?alt=sse`,
457
+ await createChatGenerationParams(
458
+ params,
459
+ modelConfig,
460
+ this._pluginConfig
461
+ ),
543
462
  {
544
- requests: params.input.map((input) => {
545
- return {
546
- model: `models/${params.model}`,
547
- content: {
548
- parts: [
549
- {
550
- text: input
551
- }
552
- ]
553
- }
554
- };
555
- })
463
+ signal: params.signal
556
464
  }
557
465
  );
558
- data = await response.text();
559
- data = JSON.parse(data);
560
- if (data.embeddings && data.embeddings.length > 0) {
561
- return data.embeddings.map((embedding) => {
562
- return embedding.values;
563
- });
466
+ await checkResponse(response);
467
+ return await this._processResponse(response);
468
+ } catch (e) {
469
+ if (e instanceof ChatLunaError) {
470
+ throw e;
471
+ } else {
472
+ throw new ChatLunaError(ChatLunaErrorCode.API_REQUEST_FAILED, e);
564
473
  }
565
- throw new Error(
566
- "error when calling gemini embeddings, Result: " + JSON.stringify(data)
474
+ }
475
+ }
476
+ async embeddings(params) {
477
+ const input = this._prepareEmbeddingsInput(params.input);
478
+ try {
479
+ const response = await this._post(
480
+ `models/${params.model}:batchEmbedContents`,
481
+ this._createEmbeddingsRequest(params.model, input)
567
482
  );
483
+ return await this._processEmbeddingsResponse(response);
568
484
  } catch (e) {
569
485
  const error = new Error(
570
- "error when calling gemini embeddings, Result: " + JSON.stringify(data)
486
+ "error when calling gemini embeddings, Error: " + e.message
571
487
  );
572
488
  error.stack = e.stack;
573
489
  error.cause = e.cause;
@@ -575,34 +491,333 @@ ${groundingContent}`
575
491
  throw new ChatLunaError(ChatLunaErrorCode.API_REQUEST_FAILED, error);
576
492
  }
577
493
  }
494
+ _prepareEmbeddingsInput(input) {
495
+ return typeof input === "string" ? [input] : input;
496
+ }
497
+ _createEmbeddingsRequest(model, input) {
498
+ return {
499
+ requests: input.map((text) => ({
500
+ model: `models/${model}`,
501
+ content: {
502
+ parts: [{ text }]
503
+ }
504
+ }))
505
+ };
506
+ }
507
+ async _processEmbeddingsResponse(response) {
508
+ const data = JSON.parse(
509
+ await response.text()
510
+ );
511
+ if (data.embeddings?.length > 0) {
512
+ return data.embeddings.map((embedding) => embedding.values);
513
+ }
514
+ throw new Error(
515
+ "error when calling gemini embeddings, Result: " + JSON.stringify(data)
516
+ );
517
+ }
578
518
  async getModels() {
579
- let data;
580
519
  try {
581
520
  const response = await this._get("models");
582
- data = await response.text();
583
- data = JSON.parse(data);
584
- if (!data.models || !data.models.length) {
585
- throw new Error(
586
- "error when listing gemini models, Result:" + JSON.stringify(data)
587
- );
588
- }
589
- return data.models.filter(
590
- (model) => model.name.includes("gemini") || model.name.includes("gemma") || model.name.includes("embedding")
591
- ).map((model) => {
592
- return {
593
- ...model,
594
- name: model.name.replace("models/", "")
595
- };
596
- });
521
+ const data = await this._parseModelsResponse(response);
522
+ return this._filterAndTransformModels(data.models);
597
523
  } catch (e) {
598
524
  const error = new Error(
599
- "error when listing gemini models, Result: " + JSON.stringify(data)
525
+ "error when listing gemini models, Error: " + e.message
600
526
  );
601
527
  error.stack = e.stack;
602
528
  error.cause = e.cause;
603
529
  throw error;
604
530
  }
605
531
  }
532
+ async _parseModelsResponse(response) {
533
+ const text = await response.text();
534
+ const data = JSON.parse(text);
535
+ if (!data.models?.length) {
536
+ throw new Error(
537
+ "error when listing gemini models, Result:" + JSON.stringify(data)
538
+ );
539
+ }
540
+ return data;
541
+ }
542
+ _filterAndTransformModels(models) {
543
+ return models.filter(
544
+ (model) => ["gemini", "gemma", "embedding"].some(
545
+ (keyword) => model.name.includes(keyword)
546
+ )
547
+ ).map((model) => ({
548
+ ...model,
549
+ name: model.name.replace("models/", "")
550
+ }));
551
+ }
552
+ async _processResponse(response) {
553
+ const { groundingContent, currentGroundingIndex } = this._createStreamContext();
554
+ const responseText = await response.text();
555
+ let parsedResponse;
556
+ try {
557
+ parsedResponse = JSON.parse(responseText);
558
+ if (!parsedResponse.candidates) {
559
+ throw new ChatLunaError(
560
+ ChatLunaErrorCode.API_REQUEST_FAILED,
561
+ new Error(
562
+ "error when calling gemini, Result: " + responseText
563
+ )
564
+ );
565
+ }
566
+ } catch (e) {
567
+ if (e instanceof ChatLunaError) {
568
+ throw e;
569
+ } else {
570
+ throw new ChatLunaError(
571
+ ChatLunaErrorCode.API_REQUEST_FAILED,
572
+ new Error(
573
+ "error when calling gemini, Result: " + responseText
574
+ )
575
+ );
576
+ }
577
+ }
578
+ const iterable = this._setupStreamTransform(
579
+ parsedResponse,
580
+ groundingContent,
581
+ currentGroundingIndex
582
+ );
583
+ let result;
584
+ let reasoningContent = "";
585
+ for await (const chunk of this._processChunks(iterable)) {
586
+ if (chunk.type === "reasoning") {
587
+ reasoningContent = chunk.content;
588
+ } else {
589
+ result = result != null ? result.concat(chunk.generation) : chunk.generation;
590
+ }
591
+ }
592
+ const finalChunk = this._handleFinalContent(
593
+ reasoningContent,
594
+ groundingContent.value
595
+ );
596
+ if (finalChunk != null) {
597
+ result = result.concat(finalChunk);
598
+ }
599
+ return result;
600
+ }
601
+ async *_processResponseStream(response) {
602
+ const { groundingContent, currentGroundingIndex } = this._createStreamContext();
603
+ const iterable = this._setupStreamTransform(
604
+ response,
605
+ groundingContent,
606
+ currentGroundingIndex
607
+ );
608
+ let reasoningContent = "";
609
+ for await (const chunk of this._processChunks(iterable)) {
610
+ if (chunk.type === "reasoning") {
611
+ reasoningContent = chunk.content;
612
+ } else {
613
+ yield chunk.generation;
614
+ }
615
+ }
616
+ const finalContent = this._handleFinalContent(reasoningContent, groundingContent.value);
617
+ if (finalContent != null) {
618
+ yield finalContent;
619
+ }
620
+ }
621
+ _createStreamContext() {
622
+ return {
623
+ groundingContent: { value: "" },
624
+ currentGroundingIndex: { value: 0 }
625
+ };
626
+ }
627
+ _setupStreamTransform(response, groundingContent, currentGroundingIndex) {
628
+ const transformToChatPartStream = this._createTransformStream(
629
+ groundingContent,
630
+ currentGroundingIndex
631
+ );
632
+ const readableStream = new ReadableStream({
633
+ async start(controller) {
634
+ if (isChatResponse(response)) {
635
+ controller.enqueue(response);
636
+ controller.close();
637
+ return;
638
+ }
639
+ for await (const chunk of sseIterable(response)) {
640
+ controller.enqueue(chunk.data);
641
+ }
642
+ controller.close();
643
+ }
644
+ });
645
+ return readableStreamToAsyncIterable(
646
+ readableStream.pipeThrough(transformToChatPartStream)
647
+ );
648
+ }
649
+ _createTransformStream(groundingContent, currentGroundingIndex) {
650
+ const that = this;
651
+ return new TransformStream({
652
+ async transform(chunk, controller) {
653
+ if (chunk === "undefined") {
654
+ return;
655
+ }
656
+ const transformValue = typeof chunk === "string" ? JSON.parse(chunk) : chunk;
657
+ if (!transformValue?.candidates) {
658
+ return;
659
+ }
660
+ for (const candidate of transformValue.candidates) {
661
+ that._processCandidateChunk(
662
+ candidate,
663
+ controller,
664
+ JSON.stringify(transformValue),
665
+ groundingContent,
666
+ currentGroundingIndex
667
+ );
668
+ }
669
+ }
670
+ });
671
+ }
672
+ _processCandidateChunk(candidate, controller, chunk, groundingContent, currentGroundingIndex) {
673
+ const parts = candidate.content?.parts;
674
+ if ((parts == null || parts.length < 1) && candidate.finishReason !== "STOP" && candidate.content === null) {
675
+ throw new Error(chunk);
676
+ } else if (candidate.finishReason === "STOP" && parts == null) {
677
+ return;
678
+ }
679
+ if (parts == null) {
680
+ return;
681
+ }
682
+ for (const part of parts) {
683
+ controller.enqueue(part);
684
+ }
685
+ for (const source of candidate.groundingMetadata?.groundingChunks ?? []) {
686
+ groundingContent.value += `[^${currentGroundingIndex.value++}]: [${source.web.title}](${source.web.uri})
687
+ `;
688
+ }
689
+ }
690
+ async *_processChunks(iterable) {
691
+ let reasoningContent = "";
692
+ let errorCount = 0;
693
+ const functionCall = {
694
+ name: "",
695
+ args: "",
696
+ arguments: ""
697
+ };
698
+ for await (const chunk of iterable) {
699
+ try {
700
+ const { updatedContent, updatedReasoning } = this._processChunk(
701
+ chunk,
702
+ reasoningContent,
703
+ functionCall
704
+ );
705
+ if (updatedReasoning !== reasoningContent) {
706
+ reasoningContent = updatedReasoning;
707
+ yield { type: "reasoning", content: reasoningContent };
708
+ continue;
709
+ }
710
+ if (updatedContent || functionCall.name) {
711
+ const messageChunk = this._createMessageChunk(
712
+ updatedContent,
713
+ functionCall,
714
+ partAsTypeCheck(
715
+ chunk,
716
+ (part) => part["inlineData"] != null
717
+ )
718
+ );
719
+ const generationChunk = new ChatGenerationChunk({
720
+ message: messageChunk,
721
+ text: getMessageContent(messageChunk.content) ?? ""
722
+ });
723
+ yield { type: "generation", generation: generationChunk };
724
+ }
725
+ } catch (e) {
726
+ if (errorCount > 5) {
727
+ logger.error("error with chunk", chunk);
728
+ throw new ChatLunaError(
729
+ ChatLunaErrorCode.API_REQUEST_FAILED,
730
+ e
731
+ );
732
+ } else {
733
+ errorCount++;
734
+ continue;
735
+ }
736
+ }
737
+ }
738
+ }
739
+ _processChunk(chunk, reasoningContent, functionCall) {
740
+ const messagePart = partAsType(chunk);
741
+ const chatFunctionCallingPart = partAsType(chunk);
742
+ const imagePart = partAsTypeCheck(
743
+ chunk,
744
+ (part) => part["inlineData"] != null
745
+ );
746
+ let messageContent;
747
+ if (messagePart.text) {
748
+ if (messagePart.thought) {
749
+ return {
750
+ updatedContent: messageContent,
751
+ updatedReasoning: reasoningContent + messagePart.text
752
+ };
753
+ }
754
+ messageContent = messagePart.text;
755
+ } else if (imagePart) {
756
+ messagePart.text = `![image](data:${imagePart.inlineData.mimeType ?? "image/png"};base64,${imagePart.inlineData.data})`;
757
+ messageContent = messagePart.text;
758
+ }
759
+ const deltaFunctionCall = chatFunctionCallingPart?.functionCall;
760
+ if (deltaFunctionCall) {
761
+ this._updateFunctionCall(functionCall, deltaFunctionCall);
762
+ }
763
+ return {
764
+ updatedContent: messageContent,
765
+ updatedReasoning: reasoningContent
766
+ };
767
+ }
768
+ _updateFunctionCall(functionCall, deltaFunctionCall) {
769
+ let args = deltaFunctionCall.args;
770
+ try {
771
+ let parsedArgs = JSON.parse(args);
772
+ if (typeof parsedArgs !== "string") {
773
+ args = parsedArgs;
774
+ }
775
+ parsedArgs = JSON.parse(args);
776
+ if (typeof parsedArgs !== "string") {
777
+ args = parsedArgs;
778
+ }
779
+ } catch (e) {
780
+ }
781
+ functionCall.args = JSON.stringify(args);
782
+ functionCall.name = deltaFunctionCall.name;
783
+ functionCall.arguments = deltaFunctionCall.args;
784
+ }
785
+ _handleFinalContent(reasoningContent, groundingContent) {
786
+ if (reasoningContent.length > 0) {
787
+ logger.debug(`reasoning content: ${reasoningContent}`);
788
+ }
789
+ if (groundingContent.length > 0) {
790
+ logger.debug(`grounding content: ${groundingContent}`);
791
+ if (this._pluginConfig.groundingContentDisplay) {
792
+ const groundingMessage = new AIMessageChunk2(
793
+ `
794
+ ${groundingContent}`
795
+ );
796
+ const generationChunk = new ChatGenerationChunk({
797
+ message: groundingMessage,
798
+ text: "\n" + groundingContent
799
+ });
800
+ return generationChunk;
801
+ }
802
+ }
803
+ }
804
+ _createMessageChunk(content, functionCall, imagePart) {
805
+ const messageChunk = new AIMessageChunk2({
806
+ content
807
+ });
808
+ messageChunk.additional_kwargs = {
809
+ function_call: functionCall.name.length > 0 ? {
810
+ name: functionCall.name,
811
+ arguments: functionCall.args,
812
+ args: functionCall.arguments
813
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
814
+ } : void 0,
815
+ images: imagePart ? [
816
+ `data:${imagePart.inlineData.mimeType ?? "image/png"};base64,${imagePart.inlineData.data}`
817
+ ] : void 0
818
+ };
819
+ return messageChunk;
820
+ }
606
821
  // eslint-disable-next-line @typescript-eslint/no-explicit-any
607
822
  _post(url, data, params = {}) {
608
823
  const requestUrl = this._concatUrl(url);
@@ -782,6 +997,7 @@ var Config4 = Schema.intersect([
782
997
  urlContext: Schema.boolean().default(false),
783
998
  thinkingBudget: Schema.number().min(-1).max(24576).default(-1),
784
999
  includeThoughts: Schema.boolean().default(false),
1000
+ nonStreaming: Schema.boolean().default(false),
785
1001
  imageGeneration: Schema.boolean().default(false),
786
1002
  groundingContentDisplay: Schema.boolean().default(false),
787
1003
  searchThreshold: Schema.number().min(0).max(1).step(0.1).default(0.5)