koishi-plugin-chatluna-google-gemini-adapter 1.2.19 → 1.2.21
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/lib/index.cjs +464 -248
- package/lib/index.d.ts +1 -0
- package/lib/index.mjs +468 -249
- package/lib/requester.d.ts +19 -1
- package/lib/types.d.ts +8 -0
- package/lib/utils.d.ts +44 -1
- package/package.json +3 -3
package/lib/index.mjs
CHANGED
|
@@ -8,14 +8,14 @@ var __commonJS = (cb, mod) => function __require() {
|
|
|
8
8
|
// src/locales/zh-CN.schema.yml
|
|
9
9
|
var require_zh_CN_schema = __commonJS({
|
|
10
10
|
"src/locales/zh-CN.schema.yml"(exports, module) {
|
|
11
|
-
module.exports = { $inner: [{}, { $desc: "请求选项", platform: "适配器的平台名。(不懂请不要修改)", apiKeys: { $inner: ["Gemini 的 API Key", "Gemini API 的请求地址"], $desc: "Gemini 的 API Key 和请求地址列表。" } }, { $desc: "模型配置", maxTokens: "输入的最大上下文 Token(16~2097000,必须是 16 的倍数)。注意:仅当您使用的模型最大 Token 为 8000 及以上时,才建议设置超过 2000 token。", temperature: "回复的随机性程度,数值越高,回复越随机(范围:0~2)。", googleSearch: "为模型启用谷歌搜索。", thinkingBudget: "思考预算,范围:(-1~24576),设置的数值越大,思考时花费的 Token 越多,-1 为动态思考。目前仅支持 gemini 2.5 系列模型。", groundingContentDisplay: "是否显示谷歌搜索结果。", imageGeneration: "为模型启用图像生成。目前仅支持 `gemini-2.0-flash-exp` 模型。", searchThreshold: "搜索的[置信度阈值](https://ai.google.dev/gemini-api/docs/grounding?lang=rest#dynamic-retrieval),范围:0~1,设置的数值越低,则越倾向于使用谷歌搜索。(仅支持 `gemini-1.5` 系列模型。gemini 2.0 模型起使用动态的工具调用)", includeThoughts: "是否获取模型的思考内容。", codeExecution: "为模型启用代码执行工具。", urlContext: "为模型启用 URL 内容获取工具。" }] };
|
|
11
|
+
module.exports = { $inner: [{}, { $desc: "请求选项", platform: "适配器的平台名。(不懂请不要修改)", apiKeys: { $inner: ["Gemini 的 API Key", "Gemini API 的请求地址"], $desc: "Gemini 的 API Key 和请求地址列表。" } }, { $desc: "模型配置", maxTokens: "输入的最大上下文 Token(16~2097000,必须是 16 的倍数)。注意:仅当您使用的模型最大 Token 为 8000 及以上时,才建议设置超过 2000 token。", temperature: "回复的随机性程度,数值越高,回复越随机(范围:0~2)。", googleSearch: "为模型启用谷歌搜索。", thinkingBudget: "思考预算,范围:(-1~24576),设置的数值越大,思考时花费的 Token 越多,-1 为动态思考。目前仅支持 gemini 2.5 系列模型。", groundingContentDisplay: "是否显示谷歌搜索结果。", imageGeneration: "为模型启用图像生成。目前仅支持 `gemini-2.0-flash-exp` 和 `gemini-2.5-flash-image-preview` 模型。", searchThreshold: "搜索的[置信度阈值](https://ai.google.dev/gemini-api/docs/grounding?lang=rest#dynamic-retrieval),范围:0~1,设置的数值越低,则越倾向于使用谷歌搜索。(仅支持 `gemini-1.5` 系列模型。gemini 2.0 模型起使用动态的工具调用)", includeThoughts: "是否获取模型的思考内容。", codeExecution: "为模型启用代码执行工具。", urlContext: "为模型启用 URL 内容获取工具。", nonStreaming: "强制不启用流式返回。开启后,将总是以非流式发起请求,即便配置了 stream 参数。" }] };
|
|
12
12
|
}
|
|
13
13
|
});
|
|
14
14
|
|
|
15
15
|
// src/locales/en-US.schema.yml
|
|
16
16
|
var require_en_US_schema = __commonJS({
|
|
17
17
|
"src/locales/en-US.schema.yml"(exports, module) {
|
|
18
|
-
module.exports = { $inner: [{}, { $desc: "API Configuration", platform: "Adapter platform name. (Do not modify if you do not understand)", apiKeys: { $inner: ["Gemini API Key", "Gemini API Endpoint (optional)"], $desc: "Gemini API access credentials" } }, { $desc: "Model Parameters", maxTokens: "Max output tokens (16-2097000, multiple of 16). >2000 for 8k+ models", temperature: "Sampling temperature (0-2). Higher: more random, Lower: more deterministic", googleSearch: "Enable Google search", thinkingBudget: "Thinking budget (-1-24576). (0: dynamic thinking) Higher: more tokens spent on thinking. Currently only supports `gemini-2.5` series models.", groundingContentDisplay: "Enable display of search results", imageGeneration: "Enable image generation (only for `gemini-2.0-flash-exp` model)", searchThreshold: "Search confidence [threshold](https://ai.google.dev/gemini-api/docs/grounding?lang=rest#dynamic-retrieval) (0-1). Lower: more likely to use Google search", includeThoughts: "Enable retrieval of model thoughts", codeExecution: "Enable code execution tool", urlContext: "Enable URL context retrieval tool" }] };
|
|
18
|
+
module.exports = { $inner: [{}, { $desc: "API Configuration", platform: "Adapter platform name. (Do not modify if you do not understand)", apiKeys: { $inner: ["Gemini API Key", "Gemini API Endpoint (optional)"], $desc: "Gemini API access credentials" } }, { $desc: "Model Parameters", maxTokens: "Max output tokens (16-2097000, multiple of 16). >2000 for 8k+ models", temperature: "Sampling temperature (0-2). Higher: more random, Lower: more deterministic", googleSearch: "Enable Google search", thinkingBudget: "Thinking budget (-1-24576). (0: dynamic thinking) Higher: more tokens spent on thinking. Currently only supports `gemini-2.5` series models.", groundingContentDisplay: "Enable display of search results", imageGeneration: "Enable image generation (only for `gemini-2.0-flash-exp` and `gemini-2.5-flash-image-preview` model)", searchThreshold: "Search confidence [threshold](https://ai.google.dev/gemini-api/docs/grounding?lang=rest#dynamic-retrieval) (0-1). Lower: more likely to use Google search", includeThoughts: "Enable retrieval of model thoughts", codeExecution: "Enable code execution tool", urlContext: "Enable URL context retrieval tool", nonStreaming: "Force disable streaming response. When enabled, requests will always be made in non-streaming mode, even if the stream parameter is configured." }] };
|
|
19
19
|
}
|
|
20
20
|
});
|
|
21
21
|
|
|
@@ -30,6 +30,7 @@ import {
|
|
|
30
30
|
ChatLunaEmbeddings
|
|
31
31
|
} from "koishi-plugin-chatluna/llm-core/platform/model";
|
|
32
32
|
import {
|
|
33
|
+
ModelCapabilities,
|
|
33
34
|
ModelType
|
|
34
35
|
} from "koishi-plugin-chatluna/llm-core/platform/types";
|
|
35
36
|
import {
|
|
@@ -38,7 +39,9 @@ import {
|
|
|
38
39
|
} from "koishi-plugin-chatluna/utils/error";
|
|
39
40
|
|
|
40
41
|
// src/requester.ts
|
|
41
|
-
import {
|
|
42
|
+
import {
|
|
43
|
+
AIMessageChunk as AIMessageChunk2
|
|
44
|
+
} from "@langchain/core/messages";
|
|
42
45
|
import { ChatGenerationChunk } from "@langchain/core/outputs";
|
|
43
46
|
import {
|
|
44
47
|
ModelRequester
|
|
@@ -309,8 +312,96 @@ function messageTypeToGeminiRole(type) {
|
|
|
309
312
|
}
|
|
310
313
|
}
|
|
311
314
|
__name(messageTypeToGeminiRole, "messageTypeToGeminiRole");
|
|
315
|
+
function prepareModelConfig(params, pluginConfig) {
|
|
316
|
+
let model = params.model;
|
|
317
|
+
let enabledThinking = null;
|
|
318
|
+
if (model.includes("-thinking") && model.includes("gemini-2.5")) {
|
|
319
|
+
enabledThinking = !model.includes("-non-thinking");
|
|
320
|
+
model = model.replace("-nom-thinking", "").replace("-thinking", "");
|
|
321
|
+
}
|
|
322
|
+
let thinkingBudget = pluginConfig.thinkingBudget ?? -1;
|
|
323
|
+
if (!enabledThinking && !model.includes("2.5-pro")) {
|
|
324
|
+
thinkingBudget = 0;
|
|
325
|
+
} else if (thinkingBudget >= 0 && thinkingBudget < 128) {
|
|
326
|
+
thinkingBudget = 128;
|
|
327
|
+
}
|
|
328
|
+
let imageGeneration = pluginConfig.imageGeneration ?? false;
|
|
329
|
+
if (imageGeneration) {
|
|
330
|
+
imageGeneration = params.model.includes("gemini-2.0-flash-exp") || params.model.includes("gemini-2.5-flash-image");
|
|
331
|
+
}
|
|
332
|
+
return { model, enabledThinking, thinkingBudget, imageGeneration };
|
|
333
|
+
}
|
|
334
|
+
__name(prepareModelConfig, "prepareModelConfig");
|
|
335
|
+
function createSafetySettings(model) {
|
|
336
|
+
const isGemini2 = model.includes("gemini-2");
|
|
337
|
+
return [
|
|
338
|
+
{
|
|
339
|
+
category: "HARM_CATEGORY_HARASSMENT",
|
|
340
|
+
threshold: isGemini2 ? "OFF" : "BLOCK_NONE"
|
|
341
|
+
},
|
|
342
|
+
{
|
|
343
|
+
category: "HARM_CATEGORY_HATE_SPEECH",
|
|
344
|
+
threshold: isGemini2 ? "OFF" : "BLOCK_NONE"
|
|
345
|
+
},
|
|
346
|
+
{
|
|
347
|
+
category: "HARM_CATEGORY_SEXUALLY_EXPLICIT",
|
|
348
|
+
threshold: isGemini2 ? "OFF" : "BLOCK_NONE"
|
|
349
|
+
},
|
|
350
|
+
{
|
|
351
|
+
category: "HARM_CATEGORY_DANGEROUS_CONTENT",
|
|
352
|
+
threshold: isGemini2 ? "OFF" : "BLOCK_NONE"
|
|
353
|
+
},
|
|
354
|
+
{
|
|
355
|
+
category: "HARM_CATEGORY_CIVIC_INTEGRITY",
|
|
356
|
+
threshold: isGemini2 ? "OFF" : "BLOCK_NONE"
|
|
357
|
+
}
|
|
358
|
+
];
|
|
359
|
+
}
|
|
360
|
+
__name(createSafetySettings, "createSafetySettings");
|
|
361
|
+
function createGenerationConfig(params, modelConfig, pluginConfig) {
|
|
362
|
+
return {
|
|
363
|
+
stopSequences: params.stop,
|
|
364
|
+
temperature: params.temperature,
|
|
365
|
+
maxOutputTokens: params.model.includes("vision") ? void 0 : params.maxTokens,
|
|
366
|
+
topP: params.topP,
|
|
367
|
+
responseModalities: modelConfig.imageGeneration ? ["TEXT", "IMAGE"] : void 0,
|
|
368
|
+
thinkingConfig: modelConfig.enabledThinking != null || pluginConfig.includeThoughts ? {
|
|
369
|
+
thinkingBudget: modelConfig.thinkingBudget,
|
|
370
|
+
includeThoughts: pluginConfig.includeThoughts
|
|
371
|
+
} : void 0
|
|
372
|
+
};
|
|
373
|
+
}
|
|
374
|
+
__name(createGenerationConfig, "createGenerationConfig");
|
|
375
|
+
async function createChatGenerationParams(params, modelConfig, pluginConfig) {
|
|
376
|
+
const geminiMessages = await langchainMessageToGeminiMessage(
|
|
377
|
+
params.input,
|
|
378
|
+
modelConfig.model
|
|
379
|
+
);
|
|
380
|
+
const [systemInstruction, modelMessages] = extractSystemMessages(geminiMessages);
|
|
381
|
+
return {
|
|
382
|
+
contents: modelMessages,
|
|
383
|
+
safetySettings: createSafetySettings(params.model),
|
|
384
|
+
generationConfig: createGenerationConfig(
|
|
385
|
+
params,
|
|
386
|
+
modelConfig,
|
|
387
|
+
pluginConfig
|
|
388
|
+
),
|
|
389
|
+
system_instruction: systemInstruction != null ? systemInstruction : void 0,
|
|
390
|
+
tools: params.tools != null || pluginConfig.googleSearch || pluginConfig.codeExecution || pluginConfig.urlContext ? formatToolsToGeminiAITools(
|
|
391
|
+
params.tools ?? [],
|
|
392
|
+
pluginConfig,
|
|
393
|
+
params.model
|
|
394
|
+
) : void 0
|
|
395
|
+
};
|
|
396
|
+
}
|
|
397
|
+
__name(createChatGenerationParams, "createChatGenerationParams");
|
|
398
|
+
function isChatResponse(response) {
|
|
399
|
+
return "candidates" in response;
|
|
400
|
+
}
|
|
401
|
+
__name(isChatResponse, "isChatResponse");
|
|
312
402
|
|
|
313
403
|
// src/requester.ts
|
|
404
|
+
import { getMessageContent } from "koishi-plugin-chatluna/utils/string";
|
|
314
405
|
var GeminiRequester = class extends ModelRequester {
|
|
315
406
|
constructor(ctx, _configPool, _pluginConfig, _plugin) {
|
|
316
407
|
super(ctx, _configPool, _pluginConfig, _plugin);
|
|
@@ -319,214 +410,38 @@ var GeminiRequester = class extends ModelRequester {
|
|
|
319
410
|
static {
|
|
320
411
|
__name(this, "GeminiRequester");
|
|
321
412
|
}
|
|
413
|
+
async completion(params) {
|
|
414
|
+
if (!this._pluginConfig.nonStreaming) {
|
|
415
|
+
return super.completion(params);
|
|
416
|
+
}
|
|
417
|
+
return await this.completionInternal(params);
|
|
418
|
+
}
|
|
322
419
|
async *completionStreamInternal(params) {
|
|
420
|
+
if (this._pluginConfig.nonStreaming) {
|
|
421
|
+
const generation = await this.completion(params);
|
|
422
|
+
yield new ChatGenerationChunk({
|
|
423
|
+
generationInfo: generation.generationInfo,
|
|
424
|
+
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
|
425
|
+
message: generation.message,
|
|
426
|
+
text: generation.text
|
|
427
|
+
});
|
|
428
|
+
return;
|
|
429
|
+
}
|
|
430
|
+
const modelConfig = prepareModelConfig(params, this._pluginConfig);
|
|
323
431
|
try {
|
|
324
|
-
let model = params.model;
|
|
325
|
-
let enabledThinking = null;
|
|
326
|
-
if (model.includes("-thinking") && model.includes("gemini-2.5")) {
|
|
327
|
-
enabledThinking = !model.includes("-non-thinking");
|
|
328
|
-
model = model.replace("-nom-thinking", "").replace("-thinking", "");
|
|
329
|
-
}
|
|
330
|
-
const geminiMessages = await langchainMessageToGeminiMessage(
|
|
331
|
-
params.input,
|
|
332
|
-
model
|
|
333
|
-
);
|
|
334
|
-
const [systemInstruction, modelMessages] = extractSystemMessages(geminiMessages);
|
|
335
|
-
let thinkingBudget = this._pluginConfig.thinkingBudget ?? -1;
|
|
336
|
-
if (!enabledThinking && !model.includes("2.5-pro")) {
|
|
337
|
-
thinkingBudget = 0;
|
|
338
|
-
} else if (thinkingBudget >= 0 && thinkingBudget < 128) {
|
|
339
|
-
thinkingBudget = 128;
|
|
340
|
-
}
|
|
341
|
-
let imageGeneration = this._pluginConfig.imageGeneration ?? false;
|
|
342
|
-
if (imageGeneration) {
|
|
343
|
-
imageGeneration = params.model.includes("gemini-2.0-flash-exp") || params.model.includes("gemini-2.5-flash-image");
|
|
344
|
-
}
|
|
345
432
|
const response = await this._post(
|
|
346
|
-
`models/${model}:streamGenerateContent?alt=sse`,
|
|
347
|
-
|
|
348
|
-
|
|
349
|
-
|
|
350
|
-
|
|
351
|
-
|
|
352
|
-
threshold: params.model.includes("gemini-2") ? "OFF" : "BLOCK_NONE"
|
|
353
|
-
},
|
|
354
|
-
{
|
|
355
|
-
category: "HARM_CATEGORY_HATE_SPEECH",
|
|
356
|
-
threshold: params.model.includes("gemini-2") ? "OFF" : "BLOCK_NONE"
|
|
357
|
-
},
|
|
358
|
-
{
|
|
359
|
-
category: "HARM_CATEGORY_SEXUALLY_EXPLICIT",
|
|
360
|
-
threshold: params.model.includes("gemini-2") ? "OFF" : "BLOCK_NONE"
|
|
361
|
-
},
|
|
362
|
-
{
|
|
363
|
-
category: "HARM_CATEGORY_DANGEROUS_CONTENT",
|
|
364
|
-
threshold: params.model.includes("gemini-2") ? "OFF" : "BLOCK_NONE"
|
|
365
|
-
},
|
|
366
|
-
{
|
|
367
|
-
category: "HARM_CATEGORY_CIVIC_INTEGRITY",
|
|
368
|
-
threshold: params.model.includes("gemini-2.0") ? "OFF" : "BLOCK_NONE"
|
|
369
|
-
}
|
|
370
|
-
],
|
|
371
|
-
generationConfig: {
|
|
372
|
-
stopSequences: params.stop,
|
|
373
|
-
temperature: params.temperature,
|
|
374
|
-
maxOutputTokens: params.model.includes("vision") ? void 0 : params.maxTokens,
|
|
375
|
-
topP: params.topP,
|
|
376
|
-
responseModalities: imageGeneration ? ["TEXT", "IMAGE"] : void 0,
|
|
377
|
-
thinkingConfig: enabledThinking != null || this._pluginConfig.includeThoughts ? {
|
|
378
|
-
thinkingBudget,
|
|
379
|
-
includeThoughts: this._pluginConfig.includeThoughts
|
|
380
|
-
} : void 0
|
|
381
|
-
},
|
|
382
|
-
system_instruction: systemInstruction != null ? systemInstruction : void 0,
|
|
383
|
-
tools: params.tools != null || this._pluginConfig.googleSearch || this._pluginConfig.codeExecution || this._pluginConfig.urlContext ? formatToolsToGeminiAITools(
|
|
384
|
-
params.tools ?? [],
|
|
385
|
-
this._pluginConfig,
|
|
386
|
-
params.model
|
|
387
|
-
) : void 0
|
|
388
|
-
},
|
|
433
|
+
`models/${modelConfig.model}:streamGenerateContent?alt=sse`,
|
|
434
|
+
await createChatGenerationParams(
|
|
435
|
+
params,
|
|
436
|
+
modelConfig,
|
|
437
|
+
this._pluginConfig
|
|
438
|
+
),
|
|
389
439
|
{
|
|
390
440
|
signal: params.signal
|
|
391
441
|
}
|
|
392
442
|
);
|
|
393
|
-
let errorCount = 0;
|
|
394
|
-
let groundingContent = "";
|
|
395
|
-
let currentGroundingIndex = 0;
|
|
396
443
|
await checkResponse(response);
|
|
397
|
-
|
|
398
|
-
async start(controller) {
|
|
399
|
-
for await (const chunk of sseIterable(response)) {
|
|
400
|
-
controller.enqueue(chunk.data);
|
|
401
|
-
}
|
|
402
|
-
controller.close();
|
|
403
|
-
}
|
|
404
|
-
});
|
|
405
|
-
const transformToChatPartStream = new TransformStream({
|
|
406
|
-
async transform(chunk, controller) {
|
|
407
|
-
if (chunk === "undefined") {
|
|
408
|
-
return;
|
|
409
|
-
}
|
|
410
|
-
const parsedValue = JSON.parse(chunk);
|
|
411
|
-
const transformValue = parsedValue;
|
|
412
|
-
if (!transformValue.candidates) {
|
|
413
|
-
return;
|
|
414
|
-
}
|
|
415
|
-
for (const candidate of transformValue.candidates) {
|
|
416
|
-
const parts = candidate.content?.parts;
|
|
417
|
-
if ((parts == null || parts.length < 1) && candidate.finishReason !== "STOP" && candidate.content === null) {
|
|
418
|
-
throw new Error(chunk);
|
|
419
|
-
} else if (candidate.finishReason === "STOP" && parts == null) {
|
|
420
|
-
continue;
|
|
421
|
-
}
|
|
422
|
-
if (parts == null) {
|
|
423
|
-
continue;
|
|
424
|
-
}
|
|
425
|
-
for (const part of parts) {
|
|
426
|
-
controller.enqueue(part);
|
|
427
|
-
}
|
|
428
|
-
for (const source of candidate.groundingMetadata?.groundingChunks ?? []) {
|
|
429
|
-
groundingContent += `[^${currentGroundingIndex++}]: [${source.web.title}](${source.web.uri})
|
|
430
|
-
`;
|
|
431
|
-
}
|
|
432
|
-
}
|
|
433
|
-
}
|
|
434
|
-
});
|
|
435
|
-
const iterable = readableStreamToAsyncIterable(
|
|
436
|
-
readableStream.pipeThrough(transformToChatPartStream)
|
|
437
|
-
);
|
|
438
|
-
let reasoningContent = "";
|
|
439
|
-
let content = "";
|
|
440
|
-
const functionCall = {
|
|
441
|
-
name: "",
|
|
442
|
-
args: "",
|
|
443
|
-
arguments: ""
|
|
444
|
-
};
|
|
445
|
-
for await (const chunk of iterable) {
|
|
446
|
-
const messagePart = partAsType(chunk);
|
|
447
|
-
const chatFunctionCallingPart = partAsType(chunk);
|
|
448
|
-
const imagePart = partAsTypeCheck(
|
|
449
|
-
chunk,
|
|
450
|
-
(part) => part["inlineData"] != null
|
|
451
|
-
);
|
|
452
|
-
if (messagePart.text) {
|
|
453
|
-
if (messagePart.thought) {
|
|
454
|
-
reasoningContent += messagePart.text;
|
|
455
|
-
continue;
|
|
456
|
-
}
|
|
457
|
-
content = messagePart.text;
|
|
458
|
-
} else if (imagePart) {
|
|
459
|
-
messagePart.text = ``;
|
|
460
|
-
content = messagePart.text;
|
|
461
|
-
}
|
|
462
|
-
const deltaFunctionCall = chatFunctionCallingPart?.functionCall;
|
|
463
|
-
if (deltaFunctionCall) {
|
|
464
|
-
let args = deltaFunctionCall.args;
|
|
465
|
-
try {
|
|
466
|
-
let parsedArgs = JSON.parse(args);
|
|
467
|
-
if (typeof parsedArgs !== "string") {
|
|
468
|
-
args = parsedArgs;
|
|
469
|
-
}
|
|
470
|
-
parsedArgs = JSON.parse(args);
|
|
471
|
-
if (typeof parsedArgs !== "string") {
|
|
472
|
-
args = parsedArgs;
|
|
473
|
-
}
|
|
474
|
-
} catch (e) {
|
|
475
|
-
}
|
|
476
|
-
functionCall.args = JSON.stringify(args);
|
|
477
|
-
functionCall.name = deltaFunctionCall.name;
|
|
478
|
-
functionCall.arguments = deltaFunctionCall.args;
|
|
479
|
-
}
|
|
480
|
-
try {
|
|
481
|
-
const messageChunk = new AIMessageChunk2(content);
|
|
482
|
-
messageChunk.additional_kwargs = {
|
|
483
|
-
function_call: functionCall.name.length > 0 ? {
|
|
484
|
-
name: functionCall.name,
|
|
485
|
-
arguments: functionCall.args,
|
|
486
|
-
args: functionCall.arguments
|
|
487
|
-
} : void 0,
|
|
488
|
-
images: imagePart ? [
|
|
489
|
-
`data:${imagePart.inlineData.mimeType ?? "image/png"};base64,${imagePart.inlineData.data}`
|
|
490
|
-
] : void 0
|
|
491
|
-
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
|
492
|
-
};
|
|
493
|
-
messageChunk.content = content;
|
|
494
|
-
const generationChunk = new ChatGenerationChunk({
|
|
495
|
-
message: messageChunk,
|
|
496
|
-
text: messageChunk.content
|
|
497
|
-
});
|
|
498
|
-
yield generationChunk;
|
|
499
|
-
content = messageChunk.content;
|
|
500
|
-
} catch (e) {
|
|
501
|
-
if (errorCount > 5) {
|
|
502
|
-
logger.error("error with chunk", chunk);
|
|
503
|
-
throw new ChatLunaError(
|
|
504
|
-
ChatLunaErrorCode.API_REQUEST_FAILED,
|
|
505
|
-
e
|
|
506
|
-
);
|
|
507
|
-
} else {
|
|
508
|
-
errorCount++;
|
|
509
|
-
continue;
|
|
510
|
-
}
|
|
511
|
-
}
|
|
512
|
-
}
|
|
513
|
-
if (reasoningContent.length > 0) {
|
|
514
|
-
logger.debug(`reasoning content: ${reasoningContent}`);
|
|
515
|
-
}
|
|
516
|
-
if (groundingContent.length > 0) {
|
|
517
|
-
logger.debug(`grounding content: ${groundingContent}`);
|
|
518
|
-
if (this._pluginConfig.groundingContentDisplay) {
|
|
519
|
-
const groundingMessage = new AIMessageChunk2(
|
|
520
|
-
`
|
|
521
|
-
${groundingContent}`
|
|
522
|
-
);
|
|
523
|
-
const generationChunk = new ChatGenerationChunk({
|
|
524
|
-
message: groundingMessage,
|
|
525
|
-
text: "\n" + groundingContent
|
|
526
|
-
});
|
|
527
|
-
yield generationChunk;
|
|
528
|
-
}
|
|
529
|
-
}
|
|
444
|
+
yield* this._processResponseStream(response);
|
|
530
445
|
} catch (e) {
|
|
531
446
|
if (e instanceof ChatLunaError) {
|
|
532
447
|
throw e;
|
|
@@ -535,42 +450,41 @@ ${groundingContent}`
|
|
|
535
450
|
}
|
|
536
451
|
}
|
|
537
452
|
}
|
|
538
|
-
async
|
|
539
|
-
|
|
540
|
-
if (typeof params.input === "string") {
|
|
541
|
-
params.input = [params.input];
|
|
542
|
-
}
|
|
453
|
+
async completionInternal(params) {
|
|
454
|
+
const modelConfig = prepareModelConfig(params, this._pluginConfig);
|
|
543
455
|
try {
|
|
544
456
|
const response = await this._post(
|
|
545
|
-
`models/${
|
|
457
|
+
`models/${modelConfig.model}:generateContent?alt=sse`,
|
|
458
|
+
await createChatGenerationParams(
|
|
459
|
+
params,
|
|
460
|
+
modelConfig,
|
|
461
|
+
this._pluginConfig
|
|
462
|
+
),
|
|
546
463
|
{
|
|
547
|
-
|
|
548
|
-
return {
|
|
549
|
-
model: `models/${params.model}`,
|
|
550
|
-
content: {
|
|
551
|
-
parts: [
|
|
552
|
-
{
|
|
553
|
-
text: input
|
|
554
|
-
}
|
|
555
|
-
]
|
|
556
|
-
}
|
|
557
|
-
};
|
|
558
|
-
})
|
|
464
|
+
signal: params.signal
|
|
559
465
|
}
|
|
560
466
|
);
|
|
561
|
-
|
|
562
|
-
|
|
563
|
-
|
|
564
|
-
|
|
565
|
-
|
|
566
|
-
|
|
467
|
+
await checkResponse(response);
|
|
468
|
+
return await this._processResponse(response);
|
|
469
|
+
} catch (e) {
|
|
470
|
+
if (e instanceof ChatLunaError) {
|
|
471
|
+
throw e;
|
|
472
|
+
} else {
|
|
473
|
+
throw new ChatLunaError(ChatLunaErrorCode.API_REQUEST_FAILED, e);
|
|
567
474
|
}
|
|
568
|
-
|
|
569
|
-
|
|
475
|
+
}
|
|
476
|
+
}
|
|
477
|
+
async embeddings(params) {
|
|
478
|
+
const input = this._prepareEmbeddingsInput(params.input);
|
|
479
|
+
try {
|
|
480
|
+
const response = await this._post(
|
|
481
|
+
`models/${params.model}:batchEmbedContents`,
|
|
482
|
+
this._createEmbeddingsRequest(params.model, input)
|
|
570
483
|
);
|
|
484
|
+
return await this._processEmbeddingsResponse(response);
|
|
571
485
|
} catch (e) {
|
|
572
486
|
const error = new Error(
|
|
573
|
-
"error when calling gemini embeddings,
|
|
487
|
+
"error when calling gemini embeddings, Error: " + e.message
|
|
574
488
|
);
|
|
575
489
|
error.stack = e.stack;
|
|
576
490
|
error.cause = e.cause;
|
|
@@ -578,34 +492,336 @@ ${groundingContent}`
|
|
|
578
492
|
throw new ChatLunaError(ChatLunaErrorCode.API_REQUEST_FAILED, error);
|
|
579
493
|
}
|
|
580
494
|
}
|
|
495
|
+
_prepareEmbeddingsInput(input) {
|
|
496
|
+
return typeof input === "string" ? [input] : input;
|
|
497
|
+
}
|
|
498
|
+
_createEmbeddingsRequest(model, input) {
|
|
499
|
+
return {
|
|
500
|
+
requests: input.map((text) => ({
|
|
501
|
+
model: `models/${model}`,
|
|
502
|
+
content: {
|
|
503
|
+
parts: [{ text }]
|
|
504
|
+
}
|
|
505
|
+
}))
|
|
506
|
+
};
|
|
507
|
+
}
|
|
508
|
+
async _processEmbeddingsResponse(response) {
|
|
509
|
+
const data = JSON.parse(
|
|
510
|
+
await response.text()
|
|
511
|
+
);
|
|
512
|
+
if (data.embeddings?.length > 0) {
|
|
513
|
+
return data.embeddings.map((embedding) => embedding.values);
|
|
514
|
+
}
|
|
515
|
+
throw new Error(
|
|
516
|
+
"error when calling gemini embeddings, Result: " + JSON.stringify(data)
|
|
517
|
+
);
|
|
518
|
+
}
|
|
581
519
|
async getModels() {
|
|
582
|
-
let data;
|
|
583
520
|
try {
|
|
584
521
|
const response = await this._get("models");
|
|
585
|
-
data = await
|
|
586
|
-
|
|
587
|
-
if (!data.models || !data.models.length) {
|
|
588
|
-
throw new Error(
|
|
589
|
-
"error when listing gemini models, Result:" + JSON.stringify(data)
|
|
590
|
-
);
|
|
591
|
-
}
|
|
592
|
-
return data.models.filter(
|
|
593
|
-
(model) => model.name.includes("gemini") || model.name.includes("gemma") || model.name.includes("embedding")
|
|
594
|
-
).map((model) => {
|
|
595
|
-
return {
|
|
596
|
-
...model,
|
|
597
|
-
name: model.name.replace("models/", "")
|
|
598
|
-
};
|
|
599
|
-
});
|
|
522
|
+
const data = await this._parseModelsResponse(response);
|
|
523
|
+
return this._filterAndTransformModels(data.models);
|
|
600
524
|
} catch (e) {
|
|
601
525
|
const error = new Error(
|
|
602
|
-
"error when listing gemini models,
|
|
526
|
+
"error when listing gemini models, Error: " + e.message
|
|
603
527
|
);
|
|
604
528
|
error.stack = e.stack;
|
|
605
529
|
error.cause = e.cause;
|
|
606
530
|
throw error;
|
|
607
531
|
}
|
|
608
532
|
}
|
|
533
|
+
async _parseModelsResponse(response) {
|
|
534
|
+
const text = await response.text();
|
|
535
|
+
const data = JSON.parse(text);
|
|
536
|
+
if (!data.models?.length) {
|
|
537
|
+
throw new Error(
|
|
538
|
+
"error when listing gemini models, Result:" + JSON.stringify(data)
|
|
539
|
+
);
|
|
540
|
+
}
|
|
541
|
+
return data;
|
|
542
|
+
}
|
|
543
|
+
_filterAndTransformModels(models) {
|
|
544
|
+
return models.filter(
|
|
545
|
+
(model) => ["gemini", "gemma", "embedding"].some(
|
|
546
|
+
(keyword) => model.name.includes(keyword)
|
|
547
|
+
)
|
|
548
|
+
).map((model) => ({
|
|
549
|
+
...model,
|
|
550
|
+
name: model.name.replace("models/", "")
|
|
551
|
+
}));
|
|
552
|
+
}
|
|
553
|
+
async _processResponse(response) {
|
|
554
|
+
const { groundingContent, currentGroundingIndex } = this._createStreamContext();
|
|
555
|
+
const responseText = await response.text();
|
|
556
|
+
let parsedResponse;
|
|
557
|
+
try {
|
|
558
|
+
parsedResponse = JSON.parse(responseText);
|
|
559
|
+
if (!parsedResponse.candidates) {
|
|
560
|
+
throw new ChatLunaError(
|
|
561
|
+
ChatLunaErrorCode.API_REQUEST_FAILED,
|
|
562
|
+
new Error(
|
|
563
|
+
"error when calling gemini, Result: " + responseText
|
|
564
|
+
)
|
|
565
|
+
);
|
|
566
|
+
}
|
|
567
|
+
} catch (e) {
|
|
568
|
+
if (e instanceof ChatLunaError) {
|
|
569
|
+
throw e;
|
|
570
|
+
} else {
|
|
571
|
+
throw new ChatLunaError(
|
|
572
|
+
ChatLunaErrorCode.API_REQUEST_FAILED,
|
|
573
|
+
new Error(
|
|
574
|
+
"error when calling gemini, Result: " + responseText
|
|
575
|
+
)
|
|
576
|
+
);
|
|
577
|
+
}
|
|
578
|
+
}
|
|
579
|
+
const iterable = this._setupStreamTransform(
|
|
580
|
+
parsedResponse,
|
|
581
|
+
groundingContent,
|
|
582
|
+
currentGroundingIndex
|
|
583
|
+
);
|
|
584
|
+
let result;
|
|
585
|
+
let reasoningContent = "";
|
|
586
|
+
for await (const chunk of this._processChunks(iterable)) {
|
|
587
|
+
if (chunk.type === "reasoning") {
|
|
588
|
+
reasoningContent = chunk.content;
|
|
589
|
+
} else {
|
|
590
|
+
result = result != null ? result.concat(chunk.generation) : chunk.generation;
|
|
591
|
+
}
|
|
592
|
+
}
|
|
593
|
+
const finalChunk = this._handleFinalContent(
|
|
594
|
+
reasoningContent,
|
|
595
|
+
groundingContent.value
|
|
596
|
+
);
|
|
597
|
+
if (finalChunk != null) {
|
|
598
|
+
result = result.concat(finalChunk);
|
|
599
|
+
}
|
|
600
|
+
return result;
|
|
601
|
+
}
|
|
602
|
+
async *_processResponseStream(response) {
|
|
603
|
+
const { groundingContent, currentGroundingIndex } = this._createStreamContext();
|
|
604
|
+
const iterable = this._setupStreamTransform(
|
|
605
|
+
response,
|
|
606
|
+
groundingContent,
|
|
607
|
+
currentGroundingIndex
|
|
608
|
+
);
|
|
609
|
+
let reasoningContent = "";
|
|
610
|
+
for await (const chunk of this._processChunks(iterable)) {
|
|
611
|
+
if (chunk.type === "reasoning") {
|
|
612
|
+
reasoningContent = chunk.content;
|
|
613
|
+
} else {
|
|
614
|
+
yield chunk.generation;
|
|
615
|
+
}
|
|
616
|
+
}
|
|
617
|
+
const finalContent = this._handleFinalContent(
|
|
618
|
+
reasoningContent,
|
|
619
|
+
groundingContent.value
|
|
620
|
+
);
|
|
621
|
+
if (finalContent != null) {
|
|
622
|
+
yield finalContent;
|
|
623
|
+
}
|
|
624
|
+
}
|
|
625
|
+
_createStreamContext() {
|
|
626
|
+
return {
|
|
627
|
+
groundingContent: { value: "" },
|
|
628
|
+
currentGroundingIndex: { value: 0 }
|
|
629
|
+
};
|
|
630
|
+
}
|
|
631
|
+
_setupStreamTransform(response, groundingContent, currentGroundingIndex) {
|
|
632
|
+
const transformToChatPartStream = this._createTransformStream(
|
|
633
|
+
groundingContent,
|
|
634
|
+
currentGroundingIndex
|
|
635
|
+
);
|
|
636
|
+
const readableStream = new ReadableStream({
|
|
637
|
+
async start(controller) {
|
|
638
|
+
if (isChatResponse(response)) {
|
|
639
|
+
controller.enqueue(response);
|
|
640
|
+
controller.close();
|
|
641
|
+
return;
|
|
642
|
+
}
|
|
643
|
+
for await (const chunk of sseIterable(response)) {
|
|
644
|
+
controller.enqueue(chunk.data);
|
|
645
|
+
}
|
|
646
|
+
controller.close();
|
|
647
|
+
}
|
|
648
|
+
});
|
|
649
|
+
return readableStreamToAsyncIterable(
|
|
650
|
+
readableStream.pipeThrough(transformToChatPartStream)
|
|
651
|
+
);
|
|
652
|
+
}
|
|
653
|
+
_createTransformStream(groundingContent, currentGroundingIndex) {
|
|
654
|
+
const that = this;
|
|
655
|
+
return new TransformStream({
|
|
656
|
+
async transform(chunk, controller) {
|
|
657
|
+
if (chunk === "undefined") {
|
|
658
|
+
return;
|
|
659
|
+
}
|
|
660
|
+
const transformValue = typeof chunk === "string" ? JSON.parse(chunk) : chunk;
|
|
661
|
+
if (!transformValue?.candidates) {
|
|
662
|
+
return;
|
|
663
|
+
}
|
|
664
|
+
for (const candidate of transformValue.candidates) {
|
|
665
|
+
that._processCandidateChunk(
|
|
666
|
+
candidate,
|
|
667
|
+
controller,
|
|
668
|
+
JSON.stringify(transformValue),
|
|
669
|
+
groundingContent,
|
|
670
|
+
currentGroundingIndex
|
|
671
|
+
);
|
|
672
|
+
}
|
|
673
|
+
}
|
|
674
|
+
});
|
|
675
|
+
}
|
|
676
|
+
_processCandidateChunk(candidate, controller, chunk, groundingContent, currentGroundingIndex) {
|
|
677
|
+
const parts = candidate.content?.parts;
|
|
678
|
+
if ((parts == null || parts.length < 1) && candidate.finishReason !== "STOP" && candidate.content === null) {
|
|
679
|
+
throw new Error(chunk);
|
|
680
|
+
} else if (candidate.finishReason === "STOP" && parts == null) {
|
|
681
|
+
return;
|
|
682
|
+
}
|
|
683
|
+
if (parts == null) {
|
|
684
|
+
return;
|
|
685
|
+
}
|
|
686
|
+
for (const part of parts) {
|
|
687
|
+
controller.enqueue(part);
|
|
688
|
+
}
|
|
689
|
+
for (const source of candidate.groundingMetadata?.groundingChunks ?? []) {
|
|
690
|
+
groundingContent.value += `[^${currentGroundingIndex.value++}]: [${source.web.title}](${source.web.uri})
|
|
691
|
+
`;
|
|
692
|
+
}
|
|
693
|
+
}
|
|
694
|
+
async *_processChunks(iterable) {
|
|
695
|
+
let reasoningContent = "";
|
|
696
|
+
let errorCount = 0;
|
|
697
|
+
const functionCall = {
|
|
698
|
+
name: "",
|
|
699
|
+
args: "",
|
|
700
|
+
arguments: ""
|
|
701
|
+
};
|
|
702
|
+
for await (const chunk of iterable) {
|
|
703
|
+
try {
|
|
704
|
+
const { updatedContent, updatedReasoning } = this._processChunk(
|
|
705
|
+
chunk,
|
|
706
|
+
reasoningContent,
|
|
707
|
+
functionCall
|
|
708
|
+
);
|
|
709
|
+
if (updatedReasoning !== reasoningContent) {
|
|
710
|
+
reasoningContent = updatedReasoning;
|
|
711
|
+
yield { type: "reasoning", content: reasoningContent };
|
|
712
|
+
continue;
|
|
713
|
+
}
|
|
714
|
+
if (updatedContent || functionCall.name) {
|
|
715
|
+
const messageChunk = this._createMessageChunk(
|
|
716
|
+
updatedContent,
|
|
717
|
+
functionCall,
|
|
718
|
+
partAsTypeCheck(
|
|
719
|
+
chunk,
|
|
720
|
+
(part) => part["inlineData"] != null
|
|
721
|
+
)
|
|
722
|
+
);
|
|
723
|
+
const generationChunk = new ChatGenerationChunk({
|
|
724
|
+
message: messageChunk,
|
|
725
|
+
text: getMessageContent(messageChunk.content) ?? ""
|
|
726
|
+
});
|
|
727
|
+
yield { type: "generation", generation: generationChunk };
|
|
728
|
+
}
|
|
729
|
+
} catch (e) {
|
|
730
|
+
if (errorCount > 5) {
|
|
731
|
+
logger.error("error with chunk", chunk);
|
|
732
|
+
throw new ChatLunaError(
|
|
733
|
+
ChatLunaErrorCode.API_REQUEST_FAILED,
|
|
734
|
+
e
|
|
735
|
+
);
|
|
736
|
+
} else {
|
|
737
|
+
errorCount++;
|
|
738
|
+
continue;
|
|
739
|
+
}
|
|
740
|
+
}
|
|
741
|
+
}
|
|
742
|
+
}
|
|
743
|
+
_processChunk(chunk, reasoningContent, functionCall) {
|
|
744
|
+
const messagePart = partAsType(chunk);
|
|
745
|
+
const chatFunctionCallingPart = partAsType(chunk);
|
|
746
|
+
const imagePart = partAsTypeCheck(
|
|
747
|
+
chunk,
|
|
748
|
+
(part) => part["inlineData"] != null
|
|
749
|
+
);
|
|
750
|
+
let messageContent;
|
|
751
|
+
if (messagePart.text) {
|
|
752
|
+
if (messagePart.thought) {
|
|
753
|
+
return {
|
|
754
|
+
updatedContent: messageContent,
|
|
755
|
+
updatedReasoning: reasoningContent + messagePart.text
|
|
756
|
+
};
|
|
757
|
+
}
|
|
758
|
+
messageContent = messagePart.text;
|
|
759
|
+
} else if (imagePart) {
|
|
760
|
+
messagePart.text = ``;
|
|
761
|
+
messageContent = messagePart.text;
|
|
762
|
+
}
|
|
763
|
+
const deltaFunctionCall = chatFunctionCallingPart?.functionCall;
|
|
764
|
+
if (deltaFunctionCall) {
|
|
765
|
+
this._updateFunctionCall(functionCall, deltaFunctionCall);
|
|
766
|
+
}
|
|
767
|
+
return {
|
|
768
|
+
updatedContent: messageContent,
|
|
769
|
+
updatedReasoning: reasoningContent
|
|
770
|
+
};
|
|
771
|
+
}
|
|
772
|
+
_updateFunctionCall(functionCall, deltaFunctionCall) {
|
|
773
|
+
let args = deltaFunctionCall.args;
|
|
774
|
+
try {
|
|
775
|
+
let parsedArgs = JSON.parse(args);
|
|
776
|
+
if (typeof parsedArgs !== "string") {
|
|
777
|
+
args = parsedArgs;
|
|
778
|
+
}
|
|
779
|
+
parsedArgs = JSON.parse(args);
|
|
780
|
+
if (typeof parsedArgs !== "string") {
|
|
781
|
+
args = parsedArgs;
|
|
782
|
+
}
|
|
783
|
+
} catch (e) {
|
|
784
|
+
}
|
|
785
|
+
functionCall.args = JSON.stringify(args);
|
|
786
|
+
functionCall.name = deltaFunctionCall.name;
|
|
787
|
+
functionCall.arguments = deltaFunctionCall.args;
|
|
788
|
+
}
|
|
789
|
+
_handleFinalContent(reasoningContent, groundingContent) {
|
|
790
|
+
if (reasoningContent.length > 0) {
|
|
791
|
+
logger.debug(`reasoning content: ${reasoningContent}`);
|
|
792
|
+
}
|
|
793
|
+
if (groundingContent.length > 0) {
|
|
794
|
+
logger.debug(`grounding content: ${groundingContent}`);
|
|
795
|
+
if (this._pluginConfig.groundingContentDisplay) {
|
|
796
|
+
const groundingMessage = new AIMessageChunk2(
|
|
797
|
+
`
|
|
798
|
+
${groundingContent}`
|
|
799
|
+
);
|
|
800
|
+
const generationChunk = new ChatGenerationChunk({
|
|
801
|
+
message: groundingMessage,
|
|
802
|
+
text: "\n" + groundingContent
|
|
803
|
+
});
|
|
804
|
+
return generationChunk;
|
|
805
|
+
}
|
|
806
|
+
}
|
|
807
|
+
}
|
|
808
|
+
_createMessageChunk(content, functionCall, imagePart) {
|
|
809
|
+
const messageChunk = new AIMessageChunk2({
|
|
810
|
+
content
|
|
811
|
+
});
|
|
812
|
+
messageChunk.additional_kwargs = {
|
|
813
|
+
function_call: functionCall.name.length > 0 ? {
|
|
814
|
+
name: functionCall.name,
|
|
815
|
+
arguments: functionCall.args,
|
|
816
|
+
args: functionCall.arguments
|
|
817
|
+
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
|
818
|
+
} : void 0,
|
|
819
|
+
images: imagePart ? [
|
|
820
|
+
`data:${imagePart.inlineData.mimeType ?? "image/png"};base64,${imagePart.inlineData.data}`
|
|
821
|
+
] : void 0
|
|
822
|
+
};
|
|
823
|
+
return messageChunk;
|
|
824
|
+
}
|
|
609
825
|
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
|
610
826
|
_post(url, data, params = {}) {
|
|
611
827
|
const requestUrl = this._concatUrl(url);
|
|
@@ -689,8 +905,10 @@ var GeminiClient = class extends PlatformModelAndEmbeddingsClient {
|
|
|
689
905
|
name: model.name,
|
|
690
906
|
maxTokens: model.inputTokenLimit,
|
|
691
907
|
type: model.name.includes("embedding") ? ModelType.embeddings : ModelType.llm,
|
|
692
|
-
|
|
693
|
-
|
|
908
|
+
capabilities: [
|
|
909
|
+
ModelCapabilities.ImageInput,
|
|
910
|
+
ModelCapabilities.ToolCall
|
|
911
|
+
]
|
|
694
912
|
};
|
|
695
913
|
if (model.name.includes("gemini-2.5") && !model.name.includes("pro") && !model.name.includes("image")) {
|
|
696
914
|
if (!model.name.includes("-thinking")) {
|
|
@@ -785,6 +1003,7 @@ var Config4 = Schema.intersect([
|
|
|
785
1003
|
urlContext: Schema.boolean().default(false),
|
|
786
1004
|
thinkingBudget: Schema.number().min(-1).max(24576).default(-1),
|
|
787
1005
|
includeThoughts: Schema.boolean().default(false),
|
|
1006
|
+
nonStreaming: Schema.boolean().default(false),
|
|
788
1007
|
imageGeneration: Schema.boolean().default(false),
|
|
789
1008
|
groundingContentDisplay: Schema.boolean().default(false),
|
|
790
1009
|
searchThreshold: Schema.number().min(0).max(1).step(0.1).default(0.5)
|