koishi-plugin-chatluna-google-gemini-adapter 1.3.0-alpha.12 → 1.3.0-alpha.14
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +7 -7
- package/lib/index.cjs +34 -15
- package/lib/index.d.ts +1 -1
- package/lib/index.mjs +32 -13
- package/package.json +8 -8
package/README.md
CHANGED
|
@@ -1,7 +1,7 @@
|
|
|
1
|
-
## chatluna-google-gemini-adapter
|
|
2
|
-
|
|
3
|
-
## [](https://www.npmjs.com/package/koishi-plugin-chatluna-google-gemini) [](https://www.npmjs.com/package//koishi-plugin-chatluna-google-gemini-adapter)
|
|
4
|
-
|
|
5
|
-
> 为 ChatLuna 提供 Google Gemini 支持的适配器
|
|
6
|
-
|
|
7
|
-
[Gemini 适配器文档](https://chatluna.chat/guide/configure-model-platform/google-gemini.html)
|
|
1
|
+
## chatluna-google-gemini-adapter
|
|
2
|
+
|
|
3
|
+
## [](https://www.npmjs.com/package/koishi-plugin-chatluna-google-gemini) [](https://www.npmjs.com/package//koishi-plugin-chatluna-google-gemini-adapter)
|
|
4
|
+
|
|
5
|
+
> 为 ChatLuna 提供 Google Gemini 支持的适配器
|
|
6
|
+
|
|
7
|
+
[Gemini 适配器文档](https://chatluna.chat/guide/configure-model-platform/google-gemini.html)
|
package/lib/index.cjs
CHANGED
|
@@ -23,14 +23,14 @@ var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: tru
|
|
|
23
23
|
// src/locales/zh-CN.schema.yml
|
|
24
24
|
var require_zh_CN_schema = __commonJS({
|
|
25
25
|
"src/locales/zh-CN.schema.yml"(exports2, module2) {
|
|
26
|
-
module2.exports = { $inner: [{}, { $desc: "请求选项", platform: "适配器的平台名。(不懂请不要修改)", apiKeys: { $inner: ["Gemini
|
|
26
|
+
module2.exports = { $inner: [{}, { $desc: "请求选项", platform: "适配器的平台名。(不懂请不要修改)", apiKeys: { $inner: ["Gemini API Key", "Gemini API 请求地址", "是否启用此配置"], $desc: "Gemini 的 API Key 和请求地址列表。" } }, { $desc: "模型配置", maxContextRatio: "最大上下文使用比例(0~1),控制可用的模型上下文窗口大小的最大百分比。例如 0.35 表示最多使用模型上下文的 35%。", temperature: "回复的随机性程度,数值越高,回复越随机(范围:0~2)。", googleSearch: "为模型启用谷歌搜索。", thinkingBudget: "思考预算,范围:(-1~24576),设置的数值越大,思考时花费的 Token 越多,-1 为动态思考。目前仅支持 gemini 2.5 系列模型。", groundingContentDisplay: "是否显示谷歌搜索结果。", imageGeneration: "为模型启用图像生成。目前仅支持 `gemini-2.0-flash-exp` 和 `gemini-2.5-flash-image-preview` 模型。", searchThreshold: "搜索的[置信度阈值](https://ai.google.dev/gemini-api/docs/grounding?lang=rest#dynamic-retrieval),范围:0~1,设置的数值越低,则越倾向于使用谷歌搜索。(仅支持 `gemini-1.5` 系列模型。gemini 2.0 模型起使用动态的工具调用)", includeThoughts: "是否获取模型的思考内容。", codeExecution: "为模型启用代码执行工具。", urlContext: "为模型启用 URL 内容获取工具。", nonStreaming: "强制不启用流式返回。开启后,将总是以非流式发起请求,即便配置了 stream 参数。" }] };
|
|
27
27
|
}
|
|
28
28
|
});
|
|
29
29
|
|
|
30
30
|
// src/locales/en-US.schema.yml
|
|
31
31
|
var require_en_US_schema = __commonJS({
|
|
32
32
|
"src/locales/en-US.schema.yml"(exports2, module2) {
|
|
33
|
-
module2.exports = { $inner: [{}, { $desc: "API Configuration", platform: "Adapter platform name. (Do not modify if you do not understand)", apiKeys: { $inner: ["Gemini API Key", "Gemini API Endpoint (optional)"], $desc: "Gemini API access credentials" } }, { $desc: "Model Parameters", maxContextRatio: "Maximum context usage ratio (0-1). Controls the maximum percentage of model context window available for use. For example, 0.35 means at most 35% of the model context can be used.", temperature: "Sampling temperature (0-2). Higher: more random, Lower: more deterministic", googleSearch: "Enable Google search", thinkingBudget: "Thinking budget (-1-24576). (0: dynamic thinking) Higher: more tokens spent on thinking. Currently only supports `gemini-2.5` series models.", groundingContentDisplay: "Enable display of search results", imageGeneration: "Enable image generation (only for `gemini-2.0-flash-exp` and `gemini-2.5-flash-image-preview` model)", searchThreshold: "Search confidence [threshold](https://ai.google.dev/gemini-api/docs/grounding?lang=rest#dynamic-retrieval) (0-1). Lower: more likely to use Google search", includeThoughts: "Enable retrieval of model thoughts", codeExecution: "Enable code execution tool", urlContext: "Enable URL context retrieval tool", nonStreaming: "Force disable streaming response. When enabled, requests will always be made in non-streaming mode, even if the stream parameter is configured." }] };
|
|
33
|
+
module2.exports = { $inner: [{}, { $desc: "API Configuration", platform: "Adapter platform name. (Do not modify if you do not understand)", apiKeys: { $inner: ["Gemini API Key", "Gemini API Endpoint (optional)", "Enabled"], $desc: "Gemini API access credentials" } }, { $desc: "Model Parameters", maxContextRatio: "Maximum context usage ratio (0-1). Controls the maximum percentage of model context window available for use. For example, 0.35 means at most 35% of the model context can be used.", temperature: "Sampling temperature (0-2). Higher: more random, Lower: more deterministic", googleSearch: "Enable Google search", thinkingBudget: "Thinking budget (-1-24576). (0: dynamic thinking) Higher: more tokens spent on thinking. Currently only supports `gemini-2.5` series models.", groundingContentDisplay: "Enable display of search results", imageGeneration: "Enable image generation (only for `gemini-2.0-flash-exp` and `gemini-2.5-flash-image-preview` model)", searchThreshold: "Search confidence [threshold](https://ai.google.dev/gemini-api/docs/grounding?lang=rest#dynamic-retrieval) (0-1). Lower: more likely to use Google search", includeThoughts: "Enable retrieval of model thoughts", codeExecution: "Enable code execution tool", urlContext: "Enable URL context retrieval tool", nonStreaming: "Force disable streaming response. When enabled, requests will always be made in non-streaming mode, even if the stream parameter is configured." }] };
|
|
34
34
|
}
|
|
35
35
|
});
|
|
36
36
|
|
|
@@ -405,6 +405,7 @@ __name(isChatResponse, "isChatResponse");
|
|
|
405
405
|
|
|
406
406
|
// src/requester.ts
|
|
407
407
|
var import_string2 = require("koishi-plugin-chatluna/utils/string");
|
|
408
|
+
var import_logger = require("koishi-plugin-chatluna/utils/logger");
|
|
408
409
|
var GeminiRequester = class extends import_api.ModelRequester {
|
|
409
410
|
constructor(ctx, _configPool, _pluginConfig, _plugin) {
|
|
410
411
|
super(ctx, _configPool, _pluginConfig, _plugin);
|
|
@@ -448,6 +449,13 @@ var GeminiRequester = class extends import_api.ModelRequester {
|
|
|
448
449
|
await (0, import_sse.checkResponse)(response);
|
|
449
450
|
yield* this._processResponseStream(response);
|
|
450
451
|
} catch (e) {
|
|
452
|
+
if (this.ctx.chatluna.config.isLog) {
|
|
453
|
+
await (0, import_logger.trackLogToLocal)(
|
|
454
|
+
"Request",
|
|
455
|
+
JSON.stringify(chatGenerationParams),
|
|
456
|
+
logger
|
|
457
|
+
);
|
|
458
|
+
}
|
|
451
459
|
if (e instanceof import_error.ChatLunaError) {
|
|
452
460
|
throw e;
|
|
453
461
|
} else {
|
|
@@ -457,15 +465,16 @@ var GeminiRequester = class extends import_api.ModelRequester {
|
|
|
457
465
|
}
|
|
458
466
|
async completionInternal(params) {
|
|
459
467
|
const modelConfig = prepareModelConfig(params, this._pluginConfig);
|
|
468
|
+
const chatGenerationParams = await createChatGenerationParams(
|
|
469
|
+
params,
|
|
470
|
+
this._plugin,
|
|
471
|
+
modelConfig,
|
|
472
|
+
this._pluginConfig
|
|
473
|
+
);
|
|
460
474
|
try {
|
|
461
475
|
const response = await this._post(
|
|
462
476
|
`models/${modelConfig.model}:generateContent`,
|
|
463
|
-
|
|
464
|
-
params,
|
|
465
|
-
this._plugin,
|
|
466
|
-
modelConfig,
|
|
467
|
-
this._pluginConfig
|
|
468
|
-
),
|
|
477
|
+
chatGenerationParams,
|
|
469
478
|
{
|
|
470
479
|
signal: params.signal
|
|
471
480
|
}
|
|
@@ -473,6 +482,13 @@ var GeminiRequester = class extends import_api.ModelRequester {
|
|
|
473
482
|
await (0, import_sse.checkResponse)(response);
|
|
474
483
|
return await this._processResponse(response);
|
|
475
484
|
} catch (e) {
|
|
485
|
+
if (this.ctx.chatluna.config.isLog) {
|
|
486
|
+
await (0, import_logger.trackLogToLocal)(
|
|
487
|
+
"Request",
|
|
488
|
+
JSON.stringify(chatGenerationParams),
|
|
489
|
+
logger
|
|
490
|
+
);
|
|
491
|
+
}
|
|
476
492
|
if (e instanceof import_error.ChatLunaError) {
|
|
477
493
|
throw e;
|
|
478
494
|
} else {
|
|
@@ -978,15 +994,17 @@ var GeminiClient = class extends import_client.PlatformModelAndEmbeddingsClient
|
|
|
978
994
|
};
|
|
979
995
|
|
|
980
996
|
// src/index.ts
|
|
981
|
-
var
|
|
997
|
+
var import_logger2 = require("koishi-plugin-chatluna/utils/logger");
|
|
982
998
|
var logger;
|
|
983
999
|
var reusable = true;
|
|
984
1000
|
function apply(ctx, config) {
|
|
985
|
-
|
|
986
|
-
logger = (0, import_logger.createLogger)(ctx, "chatluna-gemini-adapter");
|
|
1001
|
+
logger = (0, import_logger2.createLogger)(ctx, "chatluna-gemini-adapter");
|
|
987
1002
|
ctx.on("ready", async () => {
|
|
1003
|
+
const plugin = new import_chat.ChatLunaPlugin(ctx, config, config.platform);
|
|
988
1004
|
plugin.parseConfig((config2) => {
|
|
989
|
-
return config2.apiKeys.
|
|
1005
|
+
return config2.apiKeys.filter(([apiKey, _, enabled]) => {
|
|
1006
|
+
return apiKey.length > 0 && enabled;
|
|
1007
|
+
}).map(([apiKey, apiEndpoint]) => {
|
|
990
1008
|
return {
|
|
991
1009
|
apiKey,
|
|
992
1010
|
apiEndpoint,
|
|
@@ -1009,12 +1027,13 @@ var Config4 = import_koishi.Schema.intersect([
|
|
|
1009
1027
|
platform: import_koishi.Schema.string().default("gemini"),
|
|
1010
1028
|
apiKeys: import_koishi.Schema.array(
|
|
1011
1029
|
import_koishi.Schema.tuple([
|
|
1012
|
-
import_koishi.Schema.string().role("secret"),
|
|
1030
|
+
import_koishi.Schema.string().role("secret").required(true),
|
|
1013
1031
|
import_koishi.Schema.string().default(
|
|
1014
1032
|
"https://generativelanguage.googleapis.com/v1beta"
|
|
1015
|
-
)
|
|
1033
|
+
),
|
|
1034
|
+
import_koishi.Schema.boolean().default(true)
|
|
1016
1035
|
])
|
|
1017
|
-
).default([[
|
|
1036
|
+
).default([[]]).role("table")
|
|
1018
1037
|
}),
|
|
1019
1038
|
import_koishi.Schema.object({
|
|
1020
1039
|
maxContextRatio: import_koishi.Schema.number().min(0).max(1).step(1e-4).role("slider").default(0.35),
|
package/lib/index.d.ts
CHANGED
|
@@ -4,7 +4,7 @@ export declare let logger: Logger;
|
|
|
4
4
|
export declare const reusable = true;
|
|
5
5
|
export declare function apply(ctx: Context, config: Config): void;
|
|
6
6
|
export interface Config extends ChatLunaPlugin.Config {
|
|
7
|
-
apiKeys: [string, string][];
|
|
7
|
+
apiKeys: [string, string, boolean][];
|
|
8
8
|
maxContextRatio: number;
|
|
9
9
|
platform: string;
|
|
10
10
|
temperature: number;
|
package/lib/index.mjs
CHANGED
|
@@ -8,14 +8,14 @@ var __commonJS = (cb, mod) => function __require() {
|
|
|
8
8
|
// src/locales/zh-CN.schema.yml
|
|
9
9
|
var require_zh_CN_schema = __commonJS({
|
|
10
10
|
"src/locales/zh-CN.schema.yml"(exports, module) {
|
|
11
|
-
module.exports = { $inner: [{}, { $desc: "请求选项", platform: "适配器的平台名。(不懂请不要修改)", apiKeys: { $inner: ["Gemini
|
|
11
|
+
module.exports = { $inner: [{}, { $desc: "请求选项", platform: "适配器的平台名。(不懂请不要修改)", apiKeys: { $inner: ["Gemini API Key", "Gemini API 请求地址", "是否启用此配置"], $desc: "Gemini 的 API Key 和请求地址列表。" } }, { $desc: "模型配置", maxContextRatio: "最大上下文使用比例(0~1),控制可用的模型上下文窗口大小的最大百分比。例如 0.35 表示最多使用模型上下文的 35%。", temperature: "回复的随机性程度,数值越高,回复越随机(范围:0~2)。", googleSearch: "为模型启用谷歌搜索。", thinkingBudget: "思考预算,范围:(-1~24576),设置的数值越大,思考时花费的 Token 越多,-1 为动态思考。目前仅支持 gemini 2.5 系列模型。", groundingContentDisplay: "是否显示谷歌搜索结果。", imageGeneration: "为模型启用图像生成。目前仅支持 `gemini-2.0-flash-exp` 和 `gemini-2.5-flash-image-preview` 模型。", searchThreshold: "搜索的[置信度阈值](https://ai.google.dev/gemini-api/docs/grounding?lang=rest#dynamic-retrieval),范围:0~1,设置的数值越低,则越倾向于使用谷歌搜索。(仅支持 `gemini-1.5` 系列模型。gemini 2.0 模型起使用动态的工具调用)", includeThoughts: "是否获取模型的思考内容。", codeExecution: "为模型启用代码执行工具。", urlContext: "为模型启用 URL 内容获取工具。", nonStreaming: "强制不启用流式返回。开启后,将总是以非流式发起请求,即便配置了 stream 参数。" }] };
|
|
12
12
|
}
|
|
13
13
|
});
|
|
14
14
|
|
|
15
15
|
// src/locales/en-US.schema.yml
|
|
16
16
|
var require_en_US_schema = __commonJS({
|
|
17
17
|
"src/locales/en-US.schema.yml"(exports, module) {
|
|
18
|
-
module.exports = { $inner: [{}, { $desc: "API Configuration", platform: "Adapter platform name. (Do not modify if you do not understand)", apiKeys: { $inner: ["Gemini API Key", "Gemini API Endpoint (optional)"], $desc: "Gemini API access credentials" } }, { $desc: "Model Parameters", maxContextRatio: "Maximum context usage ratio (0-1). Controls the maximum percentage of model context window available for use. For example, 0.35 means at most 35% of the model context can be used.", temperature: "Sampling temperature (0-2). Higher: more random, Lower: more deterministic", googleSearch: "Enable Google search", thinkingBudget: "Thinking budget (-1-24576). (0: dynamic thinking) Higher: more tokens spent on thinking. Currently only supports `gemini-2.5` series models.", groundingContentDisplay: "Enable display of search results", imageGeneration: "Enable image generation (only for `gemini-2.0-flash-exp` and `gemini-2.5-flash-image-preview` model)", searchThreshold: "Search confidence [threshold](https://ai.google.dev/gemini-api/docs/grounding?lang=rest#dynamic-retrieval) (0-1). Lower: more likely to use Google search", includeThoughts: "Enable retrieval of model thoughts", codeExecution: "Enable code execution tool", urlContext: "Enable URL context retrieval tool", nonStreaming: "Force disable streaming response. When enabled, requests will always be made in non-streaming mode, even if the stream parameter is configured." }] };
|
|
18
|
+
module.exports = { $inner: [{}, { $desc: "API Configuration", platform: "Adapter platform name. (Do not modify if you do not understand)", apiKeys: { $inner: ["Gemini API Key", "Gemini API Endpoint (optional)", "Enabled"], $desc: "Gemini API access credentials" } }, { $desc: "Model Parameters", maxContextRatio: "Maximum context usage ratio (0-1). Controls the maximum percentage of model context window available for use. For example, 0.35 means at most 35% of the model context can be used.", temperature: "Sampling temperature (0-2). Higher: more random, Lower: more deterministic", googleSearch: "Enable Google search", thinkingBudget: "Thinking budget (-1-24576). (0: dynamic thinking) Higher: more tokens spent on thinking. Currently only supports `gemini-2.5` series models.", groundingContentDisplay: "Enable display of search results", imageGeneration: "Enable image generation (only for `gemini-2.0-flash-exp` and `gemini-2.5-flash-image-preview` model)", searchThreshold: "Search confidence [threshold](https://ai.google.dev/gemini-api/docs/grounding?lang=rest#dynamic-retrieval) (0-1). Lower: more likely to use Google search", includeThoughts: "Enable retrieval of model thoughts", codeExecution: "Enable code execution tool", urlContext: "Enable URL context retrieval tool", nonStreaming: "Force disable streaming response. When enabled, requests will always be made in non-streaming mode, even if the stream parameter is configured." }] };
|
|
19
19
|
}
|
|
20
20
|
});
|
|
21
21
|
|
|
@@ -402,6 +402,7 @@ __name(isChatResponse, "isChatResponse");
|
|
|
402
402
|
|
|
403
403
|
// src/requester.ts
|
|
404
404
|
import { getMessageContent } from "koishi-plugin-chatluna/utils/string";
|
|
405
|
+
import { trackLogToLocal } from "koishi-plugin-chatluna/utils/logger";
|
|
405
406
|
var GeminiRequester = class extends ModelRequester {
|
|
406
407
|
constructor(ctx, _configPool, _pluginConfig, _plugin) {
|
|
407
408
|
super(ctx, _configPool, _pluginConfig, _plugin);
|
|
@@ -445,6 +446,13 @@ var GeminiRequester = class extends ModelRequester {
|
|
|
445
446
|
await checkResponse(response);
|
|
446
447
|
yield* this._processResponseStream(response);
|
|
447
448
|
} catch (e) {
|
|
449
|
+
if (this.ctx.chatluna.config.isLog) {
|
|
450
|
+
await trackLogToLocal(
|
|
451
|
+
"Request",
|
|
452
|
+
JSON.stringify(chatGenerationParams),
|
|
453
|
+
logger
|
|
454
|
+
);
|
|
455
|
+
}
|
|
448
456
|
if (e instanceof ChatLunaError) {
|
|
449
457
|
throw e;
|
|
450
458
|
} else {
|
|
@@ -454,15 +462,16 @@ var GeminiRequester = class extends ModelRequester {
|
|
|
454
462
|
}
|
|
455
463
|
async completionInternal(params) {
|
|
456
464
|
const modelConfig = prepareModelConfig(params, this._pluginConfig);
|
|
465
|
+
const chatGenerationParams = await createChatGenerationParams(
|
|
466
|
+
params,
|
|
467
|
+
this._plugin,
|
|
468
|
+
modelConfig,
|
|
469
|
+
this._pluginConfig
|
|
470
|
+
);
|
|
457
471
|
try {
|
|
458
472
|
const response = await this._post(
|
|
459
473
|
`models/${modelConfig.model}:generateContent`,
|
|
460
|
-
|
|
461
|
-
params,
|
|
462
|
-
this._plugin,
|
|
463
|
-
modelConfig,
|
|
464
|
-
this._pluginConfig
|
|
465
|
-
),
|
|
474
|
+
chatGenerationParams,
|
|
466
475
|
{
|
|
467
476
|
signal: params.signal
|
|
468
477
|
}
|
|
@@ -470,6 +479,13 @@ var GeminiRequester = class extends ModelRequester {
|
|
|
470
479
|
await checkResponse(response);
|
|
471
480
|
return await this._processResponse(response);
|
|
472
481
|
} catch (e) {
|
|
482
|
+
if (this.ctx.chatluna.config.isLog) {
|
|
483
|
+
await trackLogToLocal(
|
|
484
|
+
"Request",
|
|
485
|
+
JSON.stringify(chatGenerationParams),
|
|
486
|
+
logger
|
|
487
|
+
);
|
|
488
|
+
}
|
|
473
489
|
if (e instanceof ChatLunaError) {
|
|
474
490
|
throw e;
|
|
475
491
|
} else {
|
|
@@ -979,11 +995,13 @@ import { createLogger } from "koishi-plugin-chatluna/utils/logger";
|
|
|
979
995
|
var logger;
|
|
980
996
|
var reusable = true;
|
|
981
997
|
function apply(ctx, config) {
|
|
982
|
-
const plugin = new ChatLunaPlugin(ctx, config, config.platform);
|
|
983
998
|
logger = createLogger(ctx, "chatluna-gemini-adapter");
|
|
984
999
|
ctx.on("ready", async () => {
|
|
1000
|
+
const plugin = new ChatLunaPlugin(ctx, config, config.platform);
|
|
985
1001
|
plugin.parseConfig((config2) => {
|
|
986
|
-
return config2.apiKeys.
|
|
1002
|
+
return config2.apiKeys.filter(([apiKey, _, enabled]) => {
|
|
1003
|
+
return apiKey.length > 0 && enabled;
|
|
1004
|
+
}).map(([apiKey, apiEndpoint]) => {
|
|
987
1005
|
return {
|
|
988
1006
|
apiKey,
|
|
989
1007
|
apiEndpoint,
|
|
@@ -1006,12 +1024,13 @@ var Config4 = Schema.intersect([
|
|
|
1006
1024
|
platform: Schema.string().default("gemini"),
|
|
1007
1025
|
apiKeys: Schema.array(
|
|
1008
1026
|
Schema.tuple([
|
|
1009
|
-
Schema.string().role("secret"),
|
|
1027
|
+
Schema.string().role("secret").required(true),
|
|
1010
1028
|
Schema.string().default(
|
|
1011
1029
|
"https://generativelanguage.googleapis.com/v1beta"
|
|
1012
|
-
)
|
|
1030
|
+
),
|
|
1031
|
+
Schema.boolean().default(true)
|
|
1013
1032
|
])
|
|
1014
|
-
).default([[
|
|
1033
|
+
).default([[]]).role("table")
|
|
1015
1034
|
}),
|
|
1016
1035
|
Schema.object({
|
|
1017
1036
|
maxContextRatio: Schema.number().min(0).max(1).step(1e-4).role("slider").default(0.35),
|
package/package.json
CHANGED
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "koishi-plugin-chatluna-google-gemini-adapter",
|
|
3
3
|
"description": "google-gemini adapter for chatluna",
|
|
4
|
-
"version": "1.3.0-alpha.
|
|
4
|
+
"version": "1.3.0-alpha.14",
|
|
5
5
|
"main": "lib/index.cjs",
|
|
6
6
|
"module": "lib/index.mjs",
|
|
7
7
|
"typings": "lib/index.d.ts",
|
|
@@ -22,13 +22,13 @@
|
|
|
22
22
|
"repository": {
|
|
23
23
|
"type": "git",
|
|
24
24
|
"url": "https://github.com/ChatLunaLab/chatluna.git",
|
|
25
|
-
"directory": "packages/
|
|
25
|
+
"directory": "packages/adapter-gemini"
|
|
26
26
|
},
|
|
27
27
|
"license": "AGPL-3.0",
|
|
28
28
|
"bugs": {
|
|
29
29
|
"url": "https://github.com/ChatLunaLab/chatluna/issues"
|
|
30
30
|
},
|
|
31
|
-
"homepage": "https://github.com/ChatLunaLab/chatluna/tree/v1-dev/packages/
|
|
31
|
+
"homepage": "https://github.com/ChatLunaLab/chatluna/tree/v1-dev/packages/adapter-gemini#readme",
|
|
32
32
|
"scripts": {
|
|
33
33
|
"build": "atsc -b"
|
|
34
34
|
},
|
|
@@ -62,18 +62,18 @@
|
|
|
62
62
|
"adapter"
|
|
63
63
|
],
|
|
64
64
|
"dependencies": {
|
|
65
|
-
"@chatluna/v1-shared-adapter": "^1.0.
|
|
65
|
+
"@chatluna/v1-shared-adapter": "^1.0.12",
|
|
66
66
|
"@langchain/core": "0.3.62",
|
|
67
67
|
"zod": "3.25.76",
|
|
68
|
-
"zod-to-json-schema": "^3.24.
|
|
68
|
+
"zod-to-json-schema": "^3.24.6"
|
|
69
69
|
},
|
|
70
70
|
"devDependencies": {
|
|
71
71
|
"atsc": "^2.1.0",
|
|
72
|
-
"koishi": "^4.18.
|
|
72
|
+
"koishi": "^4.18.9"
|
|
73
73
|
},
|
|
74
74
|
"peerDependencies": {
|
|
75
|
-
"koishi": "^4.18.
|
|
76
|
-
"koishi-plugin-chatluna": "^1.3.0-alpha.
|
|
75
|
+
"koishi": "^4.18.9",
|
|
76
|
+
"koishi-plugin-chatluna": "^1.3.0-alpha.53",
|
|
77
77
|
"koishi-plugin-chatluna-storage-service": "^0.0.9"
|
|
78
78
|
},
|
|
79
79
|
"peerDependenciesMeta": {
|