koishi-plugin-chatluna-google-gemini-adapter 1.0.0-beta.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md ADDED
@@ -0,0 +1,7 @@
1
+ ## chatluna-google-gemini-adapter
2
+
3
+ ## [![npm](https://img.shields.io/npm/v/koishi-plugin-chatluna-google-gemini-adapter/next)](https://www.npmjs.com/package/koishi-plugin-chatluna-google-gemini) [![npm](https://img.shields.io/npm/dm/koishi-plugin-chatluna-google-gemini-adapter)](https://www.npmjs.com/package//koishi-plugin-chatluna-google-gemini-adapter)
4
+
5
+ > 为 ChatHub 提供 Google-gemini 支持的适配器
6
+
7
+ [Google-gemini 适配器文档](https://chatluna.dingyi222666.top/guide/configure-model-platform/google-gemini.html)
@@ -0,0 +1,17 @@
1
+ import { PlatformModelAndEmbeddingsClient } from 'koishi-plugin-chatluna/lib/llm-core/platform/client';
2
+ import { ClientConfig } from 'koishi-plugin-chatluna/lib/llm-core/platform/config';
3
+ import { ChatHubBaseEmbeddings, ChatLunaChatModel } from 'koishi-plugin-chatluna/lib/llm-core/platform/model';
4
+ import { ModelInfo } from 'koishi-plugin-chatluna/lib/llm-core/platform/types';
5
+ import { Context } from 'koishi';
6
+ import { Config } from '.';
7
+ export declare class GeminiClient extends PlatformModelAndEmbeddingsClient<ClientConfig> {
8
+ private _config;
9
+ platform: string;
10
+ private _requester;
11
+ private _models;
12
+ constructor(ctx: Context, _config: Config, clientConfig: ClientConfig);
13
+ init(): Promise<void>;
14
+ refreshModels(): Promise<ModelInfo[]>;
15
+ getModels(): Promise<ModelInfo[]>;
16
+ protected _createModel(model: string): ChatLunaChatModel | ChatHubBaseEmbeddings;
17
+ }
package/lib/client.js ADDED
@@ -0,0 +1,75 @@
1
+ "use strict";
2
+ Object.defineProperty(exports, "__esModule", { value: true });
3
+ exports.GeminiClient = void 0;
4
+ const client_1 = require("koishi-plugin-chatluna/lib/llm-core/platform/client");
5
+ const model_1 = require("koishi-plugin-chatluna/lib/llm-core/platform/model");
6
+ const types_1 = require("koishi-plugin-chatluna/lib/llm-core/platform/types");
7
+ const error_1 = require("koishi-plugin-chatluna/lib/utils/error");
8
+ const requester_1 = require("./requester");
9
+ class GeminiClient extends client_1.PlatformModelAndEmbeddingsClient {
10
+ _config;
11
+ platform = 'gemini';
12
+ _requester;
13
+ _models;
14
+ constructor(ctx, _config, clientConfig) {
15
+ super(ctx, clientConfig);
16
+ this._config = _config;
17
+ this._requester = new requester_1.GeminiRequester(clientConfig);
18
+ }
19
+ async init() {
20
+ await this.getModels();
21
+ }
22
+ async refreshModels() {
23
+ try {
24
+ const rawModels = await this._requester.getModels();
25
+ return rawModels
26
+ .map((model) => model.replace('models/', ''))
27
+ .map((model) => {
28
+ return {
29
+ name: model,
30
+ type: model.includes('embedding')
31
+ ? types_1.ModelType.embeddings
32
+ : types_1.ModelType.llm,
33
+ functionCall: true,
34
+ supportMode: ['all']
35
+ };
36
+ });
37
+ }
38
+ catch (e) {
39
+ throw new error_1.ChatLunaError(error_1.ChatLunaErrorCode.MODEL_INIT_ERROR, e);
40
+ }
41
+ }
42
+ async getModels() {
43
+ if (this._models) {
44
+ return Object.values(this._models);
45
+ }
46
+ const models = await this.refreshModels();
47
+ this._models = {};
48
+ for (const model of models) {
49
+ this._models[model.name] = model;
50
+ }
51
+ }
52
+ _createModel(model) {
53
+ const info = this._models[model];
54
+ if (info == null) {
55
+ throw new error_1.ChatLunaError(error_1.ChatLunaErrorCode.MODEL_NOT_FOUND);
56
+ }
57
+ if (info.type === types_1.ModelType.llm) {
58
+ return new model_1.ChatLunaChatModel({
59
+ modelInfo: info,
60
+ requester: this._requester,
61
+ model,
62
+ maxTokens: this._config.maxTokens,
63
+ timeout: this._config.timeout,
64
+ temperature: this._config.temperature,
65
+ maxRetries: this._config.maxRetries,
66
+ llmType: 'gemini'
67
+ });
68
+ }
69
+ return new model_1.ChatLunaEmbeddings({
70
+ client: this._requester,
71
+ maxRetries: this._config.maxRetries
72
+ });
73
+ }
74
+ }
75
+ exports.GeminiClient = GeminiClient;
package/lib/index.d.ts ADDED
@@ -0,0 +1,12 @@
1
+ import { ChatLunaPlugin } from 'koishi-plugin-chatluna/lib/services/chat';
2
+ import { Context, Logger, Schema } from 'koishi';
3
+ export declare let logger: Logger;
4
+ export declare function apply(ctx: Context, config: Config): void;
5
+ export interface Config extends ChatLunaPlugin.Config {
6
+ apiKeys: [string, string][];
7
+ maxTokens: number;
8
+ temperature: number;
9
+ }
10
+ export declare const Config: Schema<Config>;
11
+ export declare const inject: string[];
12
+ export declare const name = "chatluna-gemini-adapter";
package/lib/index.js ADDED
@@ -0,0 +1,63 @@
1
+ "use strict";
2
+ Object.defineProperty(exports, "__esModule", { value: true });
3
+ exports.name = exports.inject = exports.Config = exports.apply = exports.logger = void 0;
4
+ const chat_1 = require("koishi-plugin-chatluna/lib/services/chat");
5
+ const koishi_1 = require("koishi");
6
+ const client_1 = require("./client");
7
+ const logger_1 = require("koishi-plugin-chatluna/lib/utils/logger");
8
+ function apply(ctx, config) {
9
+ const plugin = new chat_1.ChatLunaPlugin(ctx, config, 'gemini');
10
+ exports.logger = (0, logger_1.createLogger)(ctx, 'chatluna-gemini-adapter');
11
+ ctx.on('ready', async () => {
12
+ await plugin.registerToService();
13
+ await plugin.parseConfig((config) => {
14
+ return config.apiKeys.map(([apiKey, apiEndpoint]) => {
15
+ return {
16
+ apiKey,
17
+ apiEndpoint,
18
+ platform: 'gemini',
19
+ chatLimit: config.chatTimeLimit,
20
+ timeout: config.timeout,
21
+ maxRetries: config.maxRetries,
22
+ concurrentMaxSize: config.chatConcurrentMaxSize
23
+ };
24
+ });
25
+ });
26
+ await plugin.registerClient((_, clientConfig) => new client_1.GeminiClient(ctx, config, clientConfig));
27
+ await plugin.initClients();
28
+ });
29
+ }
30
+ exports.apply = apply;
31
+ exports.Config = koishi_1.Schema.intersect([
32
+ chat_1.ChatLunaPlugin.Config,
33
+ koishi_1.Schema.object({
34
+ apiKeys: koishi_1.Schema.array(koishi_1.Schema.tuple([
35
+ koishi_1.Schema.string()
36
+ .role('secret')
37
+ .description('Gemini 的 API Key')
38
+ .required(),
39
+ koishi_1.Schema.string()
40
+ .description('请求 Gemini API 的地址')
41
+ .default('https://generativelanguage.googleapis.com/v1beta')
42
+ ]))
43
+ .description('Gemini 的 API Key 和请求地址列表')
44
+ .default([['', 'https://generativelanguage.googleapis.com/v1beta']])
45
+ }).description('请求设置'),
46
+ koishi_1.Schema.object({
47
+ maxTokens: koishi_1.Schema.number()
48
+ .description('回复的最大 Token 数(16~32800,必须是16的倍数)(注意如果你目前使用的模型的最大 Token 为 8000 及以上的话才建议设置超过 512 token)')
49
+ .min(16)
50
+ .max(128000)
51
+ .step(16)
52
+ .default(1024),
53
+ temperature: koishi_1.Schema.percent()
54
+ .description('回复温度,越高越随机')
55
+ .min(0)
56
+ .max(1)
57
+ .step(0.1)
58
+ .default(0.8)
59
+ }).description('模型设置')
60
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
61
+ ]);
62
+ exports.inject = ['chatluna'];
63
+ exports.name = 'chatluna-gemini-adapter';
@@ -0,0 +1,16 @@
1
+ import { EmbeddingsRequester, EmbeddingsRequestParams, ModelRequester, ModelRequestParams } from 'koishi-plugin-chatluna/lib/llm-core/platform/api';
2
+ import { ClientConfig } from 'koishi-plugin-chatluna/lib/llm-core/platform/config';
3
+ import { ChatGenerationChunk } from 'langchain/schema';
4
+ export declare class GeminiRequester extends ModelRequester implements EmbeddingsRequester {
5
+ private _config;
6
+ constructor(_config: ClientConfig);
7
+ completionStream(params: ModelRequestParams): AsyncGenerator<ChatGenerationChunk>;
8
+ embeddings(params: EmbeddingsRequestParams): Promise<number[] | number[][]>;
9
+ getModels(): Promise<string[]>;
10
+ private _post;
11
+ private _get;
12
+ private _concatUrl;
13
+ private _buildHeaders;
14
+ init(): Promise<void>;
15
+ dispose(): Promise<void>;
16
+ }
@@ -0,0 +1,206 @@
1
+ "use strict";
2
+ Object.defineProperty(exports, "__esModule", { value: true });
3
+ exports.GeminiRequester = void 0;
4
+ const api_1 = require("koishi-plugin-chatluna/lib/llm-core/platform/api");
5
+ const schema_1 = require("langchain/schema");
6
+ const error_1 = require("koishi-plugin-chatluna/lib/utils/error");
7
+ const sse_1 = require("koishi-plugin-chatluna/lib/utils/sse");
8
+ const utils_1 = require("./utils");
9
+ const request_1 = require("koishi-plugin-chatluna/lib/utils/request");
10
+ const _1 = require(".");
11
+ const json_1 = require("@streamparser/json");
12
+ const stream_1 = require("koishi-plugin-chatluna/lib/utils/stream");
13
+ class GeminiRequester extends api_1.ModelRequester {
14
+ _config;
15
+ constructor(_config) {
16
+ super();
17
+ this._config = _config;
18
+ }
19
+ async *completionStream(params) {
20
+ try {
21
+ const response = await this._post(`models/${params.model}:streamGenerateContent`, {
22
+ contents: (0, utils_1.langchainMessageToGeminiMessage)(params.input, params.model),
23
+ safetySettings: [
24
+ {
25
+ category: 'HARM_CATEGORY_HARASSMENT',
26
+ threshold: 'BLOCK_ONLY_HIGH'
27
+ },
28
+ {
29
+ category: 'HARM_CATEGORY_HATE_SPEECH',
30
+ threshold: 'BLOCK_ONLY_HIGH'
31
+ },
32
+ {
33
+ category: 'HARM_CATEGORY_SEXUALLY_EXPLICIT',
34
+ threshold: 'BLOCK_ONLY_HIGH'
35
+ },
36
+ {
37
+ category: 'HARM_CATEGORY_DANGEROUS_CONTENT',
38
+ threshold: 'BLOCK_ONLY_HIGH'
39
+ }
40
+ ],
41
+ generationConfig: {
42
+ stopSequences: params.stop,
43
+ temperature: params.temperature,
44
+ maxOutputTokens: params.model.includes('vision')
45
+ ? undefined
46
+ : params.maxTokens,
47
+ topP: params.topP
48
+ }
49
+ }, {
50
+ signal: params.signal
51
+ });
52
+ let errorCount = 0;
53
+ const stream = new TransformStream();
54
+ const iterable = (0, stream_1.readableStreamToAsyncIterable)(stream.readable);
55
+ const jsonParser = new json_1.JSONParser();
56
+ const writable = stream.writable.getWriter();
57
+ jsonParser.onEnd = async () => {
58
+ await writable.write('[DONE]');
59
+ };
60
+ jsonParser.onValue = async ({ value }) => {
61
+ const transformValue = value;
62
+ if (transformValue.candidates && transformValue.candidates[0]) {
63
+ const parts = transformValue.candidates[0].content
64
+ .parts;
65
+ if (parts.length < 1) {
66
+ throw new Error(JSON.stringify(value));
67
+ }
68
+ const text = parts[0].text;
69
+ if (text) {
70
+ await writable.write(text);
71
+ }
72
+ }
73
+ };
74
+ await (0, sse_1.sse)(response, async (rawData) => {
75
+ jsonParser.write(rawData);
76
+ return true;
77
+ });
78
+ let content = '';
79
+ for await (const chunk of iterable) {
80
+ if (chunk === '[DONE]') {
81
+ return;
82
+ }
83
+ try {
84
+ const messageChunk = new schema_1.AIMessageChunk(chunk);
85
+ messageChunk.content = content + messageChunk.content;
86
+ const generationChunk = new schema_1.ChatGenerationChunk({
87
+ message: messageChunk,
88
+ text: messageChunk.content
89
+ });
90
+ yield generationChunk;
91
+ content = messageChunk.content;
92
+ }
93
+ catch (e) {
94
+ if (errorCount > 5) {
95
+ _1.logger.error('error with chunk', chunk);
96
+ throw new error_1.ChatLunaError(error_1.ChatLunaErrorCode.API_REQUEST_FAILED, e);
97
+ }
98
+ else {
99
+ errorCount++;
100
+ continue;
101
+ }
102
+ }
103
+ }
104
+ }
105
+ catch (e) {
106
+ if (e instanceof error_1.ChatLunaError) {
107
+ throw e;
108
+ }
109
+ else {
110
+ throw new error_1.ChatLunaError(error_1.ChatLunaErrorCode.API_REQUEST_FAILED, e);
111
+ }
112
+ }
113
+ }
114
+ async embeddings(params) {
115
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
116
+ let data;
117
+ try {
118
+ const response = await this._post(`models/${params.model}:embedContent`, {
119
+ model: `models/${params.model}`,
120
+ content: {
121
+ parts: [
122
+ {
123
+ text: params.input
124
+ }
125
+ ]
126
+ }
127
+ });
128
+ data = await response.text();
129
+ data = JSON.parse(data);
130
+ if (data.embedding && data.embedding.values?.length > 0) {
131
+ return data.embedding.values;
132
+ }
133
+ throw new Error('error when calling gemini embeddings, Result: ' +
134
+ JSON.stringify(data));
135
+ }
136
+ catch (e) {
137
+ const error = new Error('error when calling gemini embeddings, Result: ' +
138
+ JSON.stringify(data));
139
+ error.stack = e.stack;
140
+ error.cause = e.cause;
141
+ _1.logger.debug(e);
142
+ throw new error_1.ChatLunaError(error_1.ChatLunaErrorCode.API_REQUEST_FAILED, error);
143
+ }
144
+ }
145
+ async getModels() {
146
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
147
+ let data;
148
+ try {
149
+ const response = await this._get('models');
150
+ data = await response.text();
151
+ data = JSON.parse(data);
152
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
153
+ return data.models
154
+ .map((model) => model.name)
155
+ .filter((model) => model.includes('gemini') || model.includes('embedding'));
156
+ }
157
+ catch (e) {
158
+ const error = new Error('error when listing gemini models, Result: ' +
159
+ JSON.stringify(data));
160
+ error.stack = e.stack;
161
+ error.cause = e.cause;
162
+ throw error;
163
+ }
164
+ }
165
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
166
+ _post(url, data, params = {}) {
167
+ const requestUrl = this._concatUrl(url);
168
+ for (const key in data) {
169
+ if (data[key] === undefined) {
170
+ delete data[key];
171
+ }
172
+ }
173
+ const body = JSON.stringify(data);
174
+ // console.log('POST', requestUrl, body)
175
+ return (0, request_1.chatLunaFetch)(requestUrl, {
176
+ body,
177
+ headers: this._buildHeaders(),
178
+ method: 'POST',
179
+ ...params
180
+ });
181
+ }
182
+ _get(url) {
183
+ const requestUrl = this._concatUrl(url);
184
+ return (0, request_1.chatLunaFetch)(requestUrl, {
185
+ method: 'GET',
186
+ headers: this._buildHeaders()
187
+ });
188
+ }
189
+ _concatUrl(url) {
190
+ const apiEndPoint = this._config.apiEndpoint;
191
+ // match the apiEndPoint ends with '/v1' or '/v1/' using regex
192
+ if (apiEndPoint.endsWith('/')) {
193
+ return apiEndPoint + url + `?key=${this._config.apiKey}`;
194
+ }
195
+ return apiEndPoint + '/' + url + `?key=${this._config.apiKey}`;
196
+ }
197
+ _buildHeaders() {
198
+ return {
199
+ /* Authorization: `Bearer ${this._config.apiKey}`, */
200
+ 'Content-Type': 'application/json'
201
+ };
202
+ }
203
+ async init() { }
204
+ async dispose() { }
205
+ }
206
+ exports.GeminiRequester = GeminiRequester;
package/lib/types.d.ts ADDED
@@ -0,0 +1,36 @@
1
+ export interface ChatCompletionResponseMessage {
2
+ role: string;
3
+ parts?: (ChatMessagePart | ChatUploadDataPart)[];
4
+ }
5
+ export type ChatMessagePart = {
6
+ text: string;
7
+ };
8
+ export type ChatUploadDataPart = {
9
+ inline_data: {
10
+ mime_type: string;
11
+ data?: string;
12
+ };
13
+ };
14
+ export interface ChatResponse {
15
+ candidates: {
16
+ content: ChatCompletionResponseMessage;
17
+ finishReason: string;
18
+ index: number;
19
+ safetyRatings: {
20
+ category: string;
21
+ probability: string;
22
+ }[];
23
+ }[];
24
+ promptFeedback: {
25
+ safetyRatings: {
26
+ category: string;
27
+ probability: string;
28
+ }[];
29
+ };
30
+ }
31
+ export interface CreateEmbeddingResponse {
32
+ embedding: {
33
+ values: number[];
34
+ };
35
+ }
36
+ export type ChatCompletionResponseMessageRoleEnum = 'system' | 'model' | 'user';
package/lib/types.js ADDED
@@ -0,0 +1,2 @@
1
+ "use strict";
2
+ Object.defineProperty(exports, "__esModule", { value: true });
package/lib/utils.d.ts ADDED
@@ -0,0 +1,5 @@
1
+ import { AIMessageChunk, BaseMessage, ChatMessageChunk, HumanMessageChunk, MessageType, SystemMessageChunk } from 'langchain/schema';
2
+ import { ChatCompletionResponseMessage, ChatCompletionResponseMessageRoleEnum } from './types';
3
+ export declare function langchainMessageToGeminiMessage(messages: BaseMessage[], model?: string): ChatCompletionResponseMessage[];
4
+ export declare function messageTypeToGeminiRole(type: MessageType): ChatCompletionResponseMessageRoleEnum;
5
+ export declare function convertDeltaToMessageChunk(delta: Record<string, any>, defaultRole?: ChatCompletionResponseMessageRoleEnum): HumanMessageChunk | AIMessageChunk | SystemMessageChunk | ChatMessageChunk;
package/lib/utils.js ADDED
@@ -0,0 +1,112 @@
1
+ "use strict";
2
+ Object.defineProperty(exports, "__esModule", { value: true });
3
+ exports.convertDeltaToMessageChunk = exports.messageTypeToGeminiRole = exports.langchainMessageToGeminiMessage = void 0;
4
+ const schema_1 = require("langchain/schema");
5
+ function langchainMessageToGeminiMessage(messages, model) {
6
+ // TODO: image vision
7
+ const mappedMessage = messages.map((rawMessage) => {
8
+ const role = messageTypeToGeminiRole(rawMessage._getType());
9
+ const images = rawMessage.additional_kwargs.images;
10
+ const result = {
11
+ role,
12
+ parts: [
13
+ {
14
+ text: rawMessage.content
15
+ }
16
+ ]
17
+ };
18
+ if (model.includes('vision') && images != null) {
19
+ for (const image of images) {
20
+ result.parts.push({
21
+ inline_data: {
22
+ // base64 image match type
23
+ data: image,
24
+ mime_type: 'image/jpeg'
25
+ }
26
+ });
27
+ }
28
+ }
29
+ return result;
30
+ });
31
+ const result = [];
32
+ for (let i = 0; i < mappedMessage.length; i++) {
33
+ const message = mappedMessage[i];
34
+ if (message.role !== 'system') {
35
+ result.push(message);
36
+ continue;
37
+ }
38
+ /* if (removeSystemMessage) {
39
+ continue
40
+ } */
41
+ result.push({
42
+ role: 'user',
43
+ parts: message.parts
44
+ });
45
+ if (mappedMessage?.[i + 1]?.role === 'model') {
46
+ continue;
47
+ }
48
+ result.push({
49
+ role: 'model',
50
+ parts: [{ text: 'Okay, what do I need to do?' }]
51
+ });
52
+ }
53
+ if (result[result.length - 1].role === 'assistant') {
54
+ result.push({
55
+ role: 'user',
56
+ parts: [
57
+ {
58
+ text: 'Continue what I said to you last message. Follow these instructions.'
59
+ }
60
+ ]
61
+ });
62
+ }
63
+ return result;
64
+ }
65
+ exports.langchainMessageToGeminiMessage = langchainMessageToGeminiMessage;
66
+ function messageTypeToGeminiRole(type) {
67
+ switch (type) {
68
+ case 'system':
69
+ return 'system';
70
+ case 'ai':
71
+ return 'model';
72
+ case 'human':
73
+ return 'user';
74
+ default:
75
+ throw new Error(`Unknown message type: ${type}`);
76
+ }
77
+ }
78
+ exports.messageTypeToGeminiRole = messageTypeToGeminiRole;
79
+ function convertDeltaToMessageChunk(
80
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
81
+ delta, defaultRole) {
82
+ const role = delta.role ?? defaultRole;
83
+ const content = delta.content ?? '';
84
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any, @typescript-eslint/naming-convention
85
+ let additional_kwargs;
86
+ if (delta.function_call) {
87
+ additional_kwargs = {
88
+ function_call: delta.function_call
89
+ };
90
+ }
91
+ else if (delta.tool_calls) {
92
+ additional_kwargs = {
93
+ tool_calls: delta.tool_calls
94
+ };
95
+ }
96
+ else {
97
+ additional_kwargs = {};
98
+ }
99
+ if (role === 'user') {
100
+ return new schema_1.HumanMessageChunk({ content });
101
+ }
102
+ else if (role === 'assistant') {
103
+ return new schema_1.AIMessageChunk({ content, additional_kwargs });
104
+ }
105
+ else if (role === 'system') {
106
+ return new schema_1.SystemMessageChunk({ content });
107
+ }
108
+ else {
109
+ return new schema_1.ChatMessageChunk({ content, role });
110
+ }
111
+ }
112
+ exports.convertDeltaToMessageChunk = convertDeltaToMessageChunk;
package/package.json ADDED
@@ -0,0 +1,62 @@
1
+ {
2
+ "name": "koishi-plugin-chatluna-google-gemini-adapter",
3
+ "description": "google-gemini adapter for chatluna",
4
+ "version": "1.0.0-beta.1",
5
+ "main": "lib/index.js",
6
+ "typings": "lib/index.d.ts",
7
+ "files": [
8
+ "lib",
9
+ "dist"
10
+ ],
11
+ "author": "dingyi222666 <dingyi222666@foxmail.com>",
12
+ "repository": {
13
+ "type": "git",
14
+ "url": "https://github.com/ChatLunaLab/chatluna.git",
15
+ "directory": "packages/google-gemini-adapter"
16
+ },
17
+ "license": "AGPL-3.0",
18
+ "bugs": {
19
+ "url": "https://github.com/ChatLunaLab/chatluna/issues"
20
+ },
21
+ "homepage": "https://github.com/ChatLunaLab/chatluna/tree/v1-dev/packages/google-gemini-adapter#readme",
22
+ "scripts": {
23
+ "build": "atsc -b"
24
+ },
25
+ "engines": {
26
+ "node": ">=18.0.0"
27
+ },
28
+ "keywords": [
29
+ "chatbot",
30
+ "koishi",
31
+ "plugin",
32
+ "service",
33
+ "chatgpt",
34
+ "gpt",
35
+ "google",
36
+ "gemini",
37
+ "chatluna",
38
+ "adapter"
39
+ ],
40
+ "dependencies": {
41
+ "@streamparser/json": "^0.0.19",
42
+ "langchain": "^0.0.186"
43
+ },
44
+ "devDependencies": {
45
+ "atsc": "^1.2.2",
46
+ "koishi": "^4.16.1"
47
+ },
48
+ "peerDependencies": {
49
+ "koishi": "^4.16.0",
50
+ "koishi-plugin-chatluna": "^1.0.0-beta.28"
51
+ },
52
+ "koishi": {
53
+ "description": {
54
+ "zh": "ChatLuna 的 Google gemini 平台适配器"
55
+ },
56
+ "service": {
57
+ "required": [
58
+ "chatluna"
59
+ ]
60
+ }
61
+ }
62
+ }