koishi-plugin-chatluna-google-gemini-adapter 1.0.0-rc.2 → 1.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/lib/index.cjs +22 -6
- package/lib/index.d.ts +1 -0
- package/lib/index.mjs +19 -3
- package/package.json +4 -4
package/lib/index.cjs
CHANGED
|
@@ -23,7 +23,7 @@ var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: tru
|
|
|
23
23
|
// src/locales/zh-CN.schema.yml
|
|
24
24
|
var require_zh_CN_schema = __commonJS({
|
|
25
25
|
"src/locales/zh-CN.schema.yml"(exports2, module2) {
|
|
26
|
-
module2.exports = { $inner: [{}, { $desc: "请求选项", apiKeys: { $inner: ["Gemini 的 API Key", "Gemini API 的请求地址"], $desc: "Gemini 的 API Key 和请求地址列表。" } }, { $desc: "模型配置", maxTokens: "
|
|
26
|
+
module2.exports = { $inner: [{}, { $desc: "请求选项", apiKeys: { $inner: ["Gemini 的 API Key", "Gemini API 的请求地址"], $desc: "Gemini 的 API Key 和请求地址列表。" } }, { $desc: "模型配置", maxTokens: "输入的最大上下文 Token(16~2097000,必须是 16 的倍数)。注意:仅当您使用的模型最大 Token 为 8000 及以上时,才建议设置超过 2000 token。", temperature: "回复的随机性程度,数值越高,回复越随机(范围:0~2)。", googleSearch: "为模型启用谷歌搜索。", searchThreshold: "搜索的置信度阈值,范围:0~1,设置的数值越低,则越倾向于使用谷歌搜索。" }] };
|
|
27
27
|
}
|
|
28
28
|
});
|
|
29
29
|
|
|
@@ -35,15 +35,15 @@ var require_en_US_schema = __commonJS({
|
|
|
35
35
|
});
|
|
36
36
|
|
|
37
37
|
// src/index.ts
|
|
38
|
-
var
|
|
39
|
-
__export(
|
|
38
|
+
var index_exports = {};
|
|
39
|
+
__export(index_exports, {
|
|
40
40
|
Config: () => Config3,
|
|
41
41
|
apply: () => apply,
|
|
42
42
|
inject: () => inject,
|
|
43
43
|
logger: () => logger,
|
|
44
44
|
name: () => name
|
|
45
45
|
});
|
|
46
|
-
module.exports = __toCommonJS(
|
|
46
|
+
module.exports = __toCommonJS(index_exports);
|
|
47
47
|
var import_chat = require("koishi-plugin-chatluna/services/chat");
|
|
48
48
|
var import_koishi = require("koishi");
|
|
49
49
|
|
|
@@ -333,6 +333,10 @@ var GeminiRequester = class extends import_api.ModelRequester {
|
|
|
333
333
|
{
|
|
334
334
|
category: "HARM_CATEGORY_DANGEROUS_CONTENT",
|
|
335
335
|
threshold: "BLOCK_NONE"
|
|
336
|
+
},
|
|
337
|
+
{
|
|
338
|
+
category: "HARM_CATEGORY_CIVIC_INTEGRITY",
|
|
339
|
+
threshold: "BLOCK_NONE"
|
|
336
340
|
}
|
|
337
341
|
],
|
|
338
342
|
generationConfig: {
|
|
@@ -340,6 +344,7 @@ var GeminiRequester = class extends import_api.ModelRequester {
|
|
|
340
344
|
temperature: params.temperature,
|
|
341
345
|
maxOutputTokens: params.model.includes("vision") ? void 0 : params.maxTokens,
|
|
342
346
|
topP: params.topP
|
|
347
|
+
// thinkingConfig: { includeThoughts: true }
|
|
343
348
|
},
|
|
344
349
|
tools: params.tools != null || this._pluginConfig.googleSearch ? formatToolsToGeminiAITools(
|
|
345
350
|
params.tools ?? [],
|
|
@@ -363,8 +368,11 @@ var GeminiRequester = class extends import_api.ModelRequester {
|
|
|
363
368
|
};
|
|
364
369
|
jsonParser.onValue = async ({ value }) => {
|
|
365
370
|
const transformValue = value;
|
|
366
|
-
if (transformValue.candidates
|
|
367
|
-
|
|
371
|
+
if (!transformValue.candidates) {
|
|
372
|
+
return;
|
|
373
|
+
}
|
|
374
|
+
for (const candidate of transformValue.candidates) {
|
|
375
|
+
const parts = candidate.content?.parts;
|
|
368
376
|
if (parts == null || parts.length < 1) {
|
|
369
377
|
throw new Error(JSON.stringify(value));
|
|
370
378
|
}
|
|
@@ -382,6 +390,7 @@ var GeminiRequester = class extends import_api.ModelRequester {
|
|
|
382
390
|
0
|
|
383
391
|
);
|
|
384
392
|
let content = "";
|
|
393
|
+
let reasoningContent = "";
|
|
385
394
|
let isOldVisionModel = params.model.includes("vision");
|
|
386
395
|
const functionCall = {
|
|
387
396
|
name: "",
|
|
@@ -392,6 +401,10 @@ var GeminiRequester = class extends import_api.ModelRequester {
|
|
|
392
401
|
const messagePart = partAsType(chunk);
|
|
393
402
|
const chatFunctionCallingPart = partAsType(chunk);
|
|
394
403
|
if (messagePart.text) {
|
|
404
|
+
if (messagePart.thought) {
|
|
405
|
+
reasoningContent += messagePart.text;
|
|
406
|
+
continue;
|
|
407
|
+
}
|
|
395
408
|
if (params.tools != null) {
|
|
396
409
|
content = messagePart.text;
|
|
397
410
|
} else {
|
|
@@ -450,6 +463,9 @@ var GeminiRequester = class extends import_api.ModelRequester {
|
|
|
450
463
|
}
|
|
451
464
|
}
|
|
452
465
|
}
|
|
466
|
+
if (reasoningContent.length > 0) {
|
|
467
|
+
logger.debug(`reasoning content: ${reasoningContent}`);
|
|
468
|
+
}
|
|
453
469
|
} catch (e) {
|
|
454
470
|
if (e instanceof import_error.ChatLunaError) {
|
|
455
471
|
throw e;
|
package/lib/index.d.ts
CHANGED
|
@@ -27,6 +27,7 @@ export interface ChatCompletionResponseMessage {
|
|
|
27
27
|
export type ChatPart = ChatMessagePart | ChatUploadDataPart | ChatFunctionCallingPart | ChatFunctionResponsePart;
|
|
28
28
|
export type ChatMessagePart = {
|
|
29
29
|
text: string;
|
|
30
|
+
thought?: boolean;
|
|
30
31
|
};
|
|
31
32
|
export type ChatUploadDataPart = {
|
|
32
33
|
inline_data: {
|
package/lib/index.mjs
CHANGED
|
@@ -8,7 +8,7 @@ var __commonJS = (cb, mod) => function __require() {
|
|
|
8
8
|
// src/locales/zh-CN.schema.yml
|
|
9
9
|
var require_zh_CN_schema = __commonJS({
|
|
10
10
|
"src/locales/zh-CN.schema.yml"(exports, module) {
|
|
11
|
-
module.exports = { $inner: [{}, { $desc: "请求选项", apiKeys: { $inner: ["Gemini 的 API Key", "Gemini API 的请求地址"], $desc: "Gemini 的 API Key 和请求地址列表。" } }, { $desc: "模型配置", maxTokens: "
|
|
11
|
+
module.exports = { $inner: [{}, { $desc: "请求选项", apiKeys: { $inner: ["Gemini 的 API Key", "Gemini API 的请求地址"], $desc: "Gemini 的 API Key 和请求地址列表。" } }, { $desc: "模型配置", maxTokens: "输入的最大上下文 Token(16~2097000,必须是 16 的倍数)。注意:仅当您使用的模型最大 Token 为 8000 及以上时,才建议设置超过 2000 token。", temperature: "回复的随机性程度,数值越高,回复越随机(范围:0~2)。", googleSearch: "为模型启用谷歌搜索。", searchThreshold: "搜索的置信度阈值,范围:0~1,设置的数值越低,则越倾向于使用谷歌搜索。" }] };
|
|
12
12
|
}
|
|
13
13
|
});
|
|
14
14
|
|
|
@@ -327,6 +327,10 @@ var GeminiRequester = class extends ModelRequester {
|
|
|
327
327
|
{
|
|
328
328
|
category: "HARM_CATEGORY_DANGEROUS_CONTENT",
|
|
329
329
|
threshold: "BLOCK_NONE"
|
|
330
|
+
},
|
|
331
|
+
{
|
|
332
|
+
category: "HARM_CATEGORY_CIVIC_INTEGRITY",
|
|
333
|
+
threshold: "BLOCK_NONE"
|
|
330
334
|
}
|
|
331
335
|
],
|
|
332
336
|
generationConfig: {
|
|
@@ -334,6 +338,7 @@ var GeminiRequester = class extends ModelRequester {
|
|
|
334
338
|
temperature: params.temperature,
|
|
335
339
|
maxOutputTokens: params.model.includes("vision") ? void 0 : params.maxTokens,
|
|
336
340
|
topP: params.topP
|
|
341
|
+
// thinkingConfig: { includeThoughts: true }
|
|
337
342
|
},
|
|
338
343
|
tools: params.tools != null || this._pluginConfig.googleSearch ? formatToolsToGeminiAITools(
|
|
339
344
|
params.tools ?? [],
|
|
@@ -357,8 +362,11 @@ var GeminiRequester = class extends ModelRequester {
|
|
|
357
362
|
};
|
|
358
363
|
jsonParser.onValue = async ({ value }) => {
|
|
359
364
|
const transformValue = value;
|
|
360
|
-
if (transformValue.candidates
|
|
361
|
-
|
|
365
|
+
if (!transformValue.candidates) {
|
|
366
|
+
return;
|
|
367
|
+
}
|
|
368
|
+
for (const candidate of transformValue.candidates) {
|
|
369
|
+
const parts = candidate.content?.parts;
|
|
362
370
|
if (parts == null || parts.length < 1) {
|
|
363
371
|
throw new Error(JSON.stringify(value));
|
|
364
372
|
}
|
|
@@ -376,6 +384,7 @@ var GeminiRequester = class extends ModelRequester {
|
|
|
376
384
|
0
|
|
377
385
|
);
|
|
378
386
|
let content = "";
|
|
387
|
+
let reasoningContent = "";
|
|
379
388
|
let isOldVisionModel = params.model.includes("vision");
|
|
380
389
|
const functionCall = {
|
|
381
390
|
name: "",
|
|
@@ -386,6 +395,10 @@ var GeminiRequester = class extends ModelRequester {
|
|
|
386
395
|
const messagePart = partAsType(chunk);
|
|
387
396
|
const chatFunctionCallingPart = partAsType(chunk);
|
|
388
397
|
if (messagePart.text) {
|
|
398
|
+
if (messagePart.thought) {
|
|
399
|
+
reasoningContent += messagePart.text;
|
|
400
|
+
continue;
|
|
401
|
+
}
|
|
389
402
|
if (params.tools != null) {
|
|
390
403
|
content = messagePart.text;
|
|
391
404
|
} else {
|
|
@@ -444,6 +457,9 @@ var GeminiRequester = class extends ModelRequester {
|
|
|
444
457
|
}
|
|
445
458
|
}
|
|
446
459
|
}
|
|
460
|
+
if (reasoningContent.length > 0) {
|
|
461
|
+
logger.debug(`reasoning content: ${reasoningContent}`);
|
|
462
|
+
}
|
|
447
463
|
} catch (e) {
|
|
448
464
|
if (e instanceof ChatLunaError) {
|
|
449
465
|
throw e;
|
package/package.json
CHANGED
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "koishi-plugin-chatluna-google-gemini-adapter",
|
|
3
3
|
"description": "google-gemini adapter for chatluna",
|
|
4
|
-
"version": "1.0.0
|
|
4
|
+
"version": "1.0.0",
|
|
5
5
|
"main": "lib/index.cjs",
|
|
6
6
|
"module": "lib/index.mjs",
|
|
7
7
|
"typings": "lib/index.d.ts",
|
|
@@ -69,11 +69,11 @@
|
|
|
69
69
|
},
|
|
70
70
|
"devDependencies": {
|
|
71
71
|
"atsc": "^2.1.0",
|
|
72
|
-
"koishi": "^4.18.
|
|
72
|
+
"koishi": "^4.18.4"
|
|
73
73
|
},
|
|
74
74
|
"peerDependencies": {
|
|
75
|
-
"koishi": "^4.18.
|
|
76
|
-
"koishi-plugin-chatluna": "^1.0.0
|
|
75
|
+
"koishi": "^4.18.4",
|
|
76
|
+
"koishi-plugin-chatluna": "^1.0.0"
|
|
77
77
|
},
|
|
78
78
|
"koishi": {
|
|
79
79
|
"description": {
|