koishi-plugin-chatluna-google-gemini-adapter 1.0.0-beta.1 → 1.0.0-beta.11
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +7 -7
- package/lib/client.d.ts +1 -1
- package/lib/client.js +5 -1
- package/lib/index.d.ts +1 -1
- package/lib/index.js +1 -1
- package/lib/requester.d.ts +1 -1
- package/lib/requester.js +76 -24
- package/lib/types.d.ts +26 -2
- package/lib/utils.d.ts +8 -4
- package/lib/utils.js +155 -16
- package/package.json +9 -7
package/README.md
CHANGED
|
@@ -1,7 +1,7 @@
|
|
|
1
|
-
## chatluna-google-gemini-adapter
|
|
2
|
-
|
|
3
|
-
## [](https://www.npmjs.com/package/koishi-plugin-chatluna-google-gemini) [](https://www.npmjs.com/package//koishi-plugin-chatluna-google-gemini-adapter)
|
|
4
|
-
|
|
5
|
-
> 为 ChatHub 提供 Google-gemini 支持的适配器
|
|
6
|
-
|
|
7
|
-
[Google-gemini 适配器文档](https://chatluna.dingyi222666.top/guide/configure-model-platform/google-gemini.html)
|
|
1
|
+
## chatluna-google-gemini-adapter
|
|
2
|
+
|
|
3
|
+
## [](https://www.npmjs.com/package/koishi-plugin-chatluna-google-gemini) [](https://www.npmjs.com/package//koishi-plugin-chatluna-google-gemini-adapter)
|
|
4
|
+
|
|
5
|
+
> 为 ChatHub 提供 Google-gemini 支持的适配器
|
|
6
|
+
|
|
7
|
+
[Google-gemini 适配器文档](https://chatluna.dingyi222666.top/guide/configure-model-platform/google-gemini.html)
|
package/lib/client.d.ts
CHANGED
|
@@ -4,7 +4,7 @@ import { ChatHubBaseEmbeddings, ChatLunaChatModel } from 'koishi-plugin-chatluna
|
|
|
4
4
|
import { ModelInfo } from 'koishi-plugin-chatluna/lib/llm-core/platform/types';
|
|
5
5
|
import { Context } from 'koishi';
|
|
6
6
|
import { Config } from '.';
|
|
7
|
-
export declare class GeminiClient extends PlatformModelAndEmbeddingsClient
|
|
7
|
+
export declare class GeminiClient extends PlatformModelAndEmbeddingsClient {
|
|
8
8
|
private _config;
|
|
9
9
|
platform: string;
|
|
10
10
|
private _requester;
|
package/lib/client.js
CHANGED
|
@@ -22,15 +22,19 @@ class GeminiClient extends client_1.PlatformModelAndEmbeddingsClient {
|
|
|
22
22
|
async refreshModels() {
|
|
23
23
|
try {
|
|
24
24
|
const rawModels = await this._requester.getModels();
|
|
25
|
+
if (!rawModels.length) {
|
|
26
|
+
throw new error_1.ChatLunaError(error_1.ChatLunaErrorCode.MODEL_INIT_ERROR, new Error('No model found'));
|
|
27
|
+
}
|
|
25
28
|
return rawModels
|
|
26
29
|
.map((model) => model.replace('models/', ''))
|
|
27
30
|
.map((model) => {
|
|
28
31
|
return {
|
|
29
32
|
name: model,
|
|
33
|
+
maxTokens: model.includes('vision') ? 12288 : 30720,
|
|
30
34
|
type: model.includes('embedding')
|
|
31
35
|
? types_1.ModelType.embeddings
|
|
32
36
|
: types_1.ModelType.llm,
|
|
33
|
-
functionCall:
|
|
37
|
+
functionCall: !model.includes('vision'),
|
|
34
38
|
supportMode: ['all']
|
|
35
39
|
};
|
|
36
40
|
});
|
package/lib/index.d.ts
CHANGED
|
@@ -9,4 +9,4 @@ export interface Config extends ChatLunaPlugin.Config {
|
|
|
9
9
|
}
|
|
10
10
|
export declare const Config: Schema<Config>;
|
|
11
11
|
export declare const inject: string[];
|
|
12
|
-
export declare const name = "chatluna-gemini-adapter";
|
|
12
|
+
export declare const name = "chatluna-google-gemini-adapter";
|
package/lib/index.js
CHANGED
package/lib/requester.d.ts
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
|
+
import { ChatGenerationChunk } from '@langchain/core/outputs';
|
|
1
2
|
import { EmbeddingsRequester, EmbeddingsRequestParams, ModelRequester, ModelRequestParams } from 'koishi-plugin-chatluna/lib/llm-core/platform/api';
|
|
2
3
|
import { ClientConfig } from 'koishi-plugin-chatluna/lib/llm-core/platform/config';
|
|
3
|
-
import { ChatGenerationChunk } from 'langchain/schema';
|
|
4
4
|
export declare class GeminiRequester extends ModelRequester implements EmbeddingsRequester {
|
|
5
5
|
private _config;
|
|
6
6
|
constructor(_config: ClientConfig);
|
package/lib/requester.js
CHANGED
|
@@ -1,15 +1,16 @@
|
|
|
1
1
|
"use strict";
|
|
2
2
|
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
3
|
exports.GeminiRequester = void 0;
|
|
4
|
+
const messages_1 = require("@langchain/core/messages");
|
|
5
|
+
const outputs_1 = require("@langchain/core/outputs");
|
|
6
|
+
const json_1 = require("@streamparser/json");
|
|
4
7
|
const api_1 = require("koishi-plugin-chatluna/lib/llm-core/platform/api");
|
|
5
|
-
const schema_1 = require("langchain/schema");
|
|
6
8
|
const error_1 = require("koishi-plugin-chatluna/lib/utils/error");
|
|
7
|
-
const sse_1 = require("koishi-plugin-chatluna/lib/utils/sse");
|
|
8
|
-
const utils_1 = require("./utils");
|
|
9
9
|
const request_1 = require("koishi-plugin-chatluna/lib/utils/request");
|
|
10
|
-
const
|
|
11
|
-
const json_1 = require("@streamparser/json");
|
|
10
|
+
const sse_1 = require("koishi-plugin-chatluna/lib/utils/sse");
|
|
12
11
|
const stream_1 = require("koishi-plugin-chatluna/lib/utils/stream");
|
|
12
|
+
const _1 = require(".");
|
|
13
|
+
const utils_1 = require("./utils");
|
|
13
14
|
class GeminiRequester extends api_1.ModelRequester {
|
|
14
15
|
_config;
|
|
15
16
|
constructor(_config) {
|
|
@@ -19,23 +20,23 @@ class GeminiRequester extends api_1.ModelRequester {
|
|
|
19
20
|
async *completionStream(params) {
|
|
20
21
|
try {
|
|
21
22
|
const response = await this._post(`models/${params.model}:streamGenerateContent`, {
|
|
22
|
-
contents: (0, utils_1.langchainMessageToGeminiMessage)(params.input, params.model),
|
|
23
|
+
contents: await (0, utils_1.langchainMessageToGeminiMessage)(params.input, params.model),
|
|
23
24
|
safetySettings: [
|
|
24
25
|
{
|
|
25
26
|
category: 'HARM_CATEGORY_HARASSMENT',
|
|
26
|
-
threshold: '
|
|
27
|
+
threshold: 'BLOCK_NONE'
|
|
27
28
|
},
|
|
28
29
|
{
|
|
29
30
|
category: 'HARM_CATEGORY_HATE_SPEECH',
|
|
30
|
-
threshold: '
|
|
31
|
+
threshold: 'BLOCK_NONE'
|
|
31
32
|
},
|
|
32
33
|
{
|
|
33
34
|
category: 'HARM_CATEGORY_SEXUALLY_EXPLICIT',
|
|
34
|
-
threshold: '
|
|
35
|
+
threshold: 'BLOCK_NONE'
|
|
35
36
|
},
|
|
36
37
|
{
|
|
37
38
|
category: 'HARM_CATEGORY_DANGEROUS_CONTENT',
|
|
38
|
-
threshold: '
|
|
39
|
+
threshold: 'BLOCK_NONE'
|
|
39
40
|
}
|
|
40
41
|
],
|
|
41
42
|
generationConfig: {
|
|
@@ -45,7 +46,12 @@ class GeminiRequester extends api_1.ModelRequester {
|
|
|
45
46
|
? undefined
|
|
46
47
|
: params.maxTokens,
|
|
47
48
|
topP: params.topP
|
|
48
|
-
}
|
|
49
|
+
},
|
|
50
|
+
tools: !params.model.includes('vision') && params.tools != null
|
|
51
|
+
? {
|
|
52
|
+
functionDeclarations: (0, utils_1.formatToolsToGeminiAITools)(params.tools)
|
|
53
|
+
}
|
|
54
|
+
: undefined
|
|
49
55
|
}, {
|
|
50
56
|
signal: params.signal
|
|
51
57
|
});
|
|
@@ -55,35 +61,78 @@ class GeminiRequester extends api_1.ModelRequester {
|
|
|
55
61
|
const jsonParser = new json_1.JSONParser();
|
|
56
62
|
const writable = stream.writable.getWriter();
|
|
57
63
|
jsonParser.onEnd = async () => {
|
|
58
|
-
await writable.
|
|
64
|
+
await writable.close();
|
|
59
65
|
};
|
|
60
66
|
jsonParser.onValue = async ({ value }) => {
|
|
61
67
|
const transformValue = value;
|
|
62
68
|
if (transformValue.candidates && transformValue.candidates[0]) {
|
|
63
|
-
const parts = transformValue.candidates[0].content
|
|
64
|
-
.parts;
|
|
69
|
+
const parts = transformValue.candidates[0].content.parts;
|
|
65
70
|
if (parts.length < 1) {
|
|
66
71
|
throw new Error(JSON.stringify(value));
|
|
67
72
|
}
|
|
68
|
-
const
|
|
69
|
-
|
|
70
|
-
await writable.write(text);
|
|
73
|
+
for (const part of parts) {
|
|
74
|
+
await writable.write(part);
|
|
71
75
|
}
|
|
72
76
|
}
|
|
73
77
|
};
|
|
74
78
|
await (0, sse_1.sse)(response, async (rawData) => {
|
|
75
79
|
jsonParser.write(rawData);
|
|
76
80
|
return true;
|
|
77
|
-
});
|
|
81
|
+
}, 10);
|
|
78
82
|
let content = '';
|
|
83
|
+
let isVisionModel = params.model.includes('vision');
|
|
84
|
+
const functionCall = {
|
|
85
|
+
name: '',
|
|
86
|
+
args: '',
|
|
87
|
+
arguments: ''
|
|
88
|
+
};
|
|
79
89
|
for await (const chunk of iterable) {
|
|
80
|
-
|
|
81
|
-
|
|
90
|
+
const messagePart = (0, utils_1.partAsType)(chunk);
|
|
91
|
+
const chatFunctionCallingPart = (0, utils_1.partAsType)(chunk);
|
|
92
|
+
if (messagePart.text) {
|
|
93
|
+
content += messagePart.text;
|
|
94
|
+
// match /w*model:
|
|
95
|
+
if (isVisionModel && /\s*model:\s*/.test(content)) {
|
|
96
|
+
isVisionModel = false;
|
|
97
|
+
content = content.replace(/\s*model:\s*/, '');
|
|
98
|
+
}
|
|
99
|
+
}
|
|
100
|
+
if (chatFunctionCallingPart.functionCall) {
|
|
101
|
+
const deltaFunctionCall = chatFunctionCallingPart.functionCall;
|
|
102
|
+
if (deltaFunctionCall) {
|
|
103
|
+
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
|
104
|
+
let args = deltaFunctionCall.args?.input ??
|
|
105
|
+
deltaFunctionCall.args;
|
|
106
|
+
try {
|
|
107
|
+
let parsedArgs = JSON.parse(args);
|
|
108
|
+
if (typeof parsedArgs !== 'string') {
|
|
109
|
+
args = parsedArgs;
|
|
110
|
+
}
|
|
111
|
+
parsedArgs = JSON.parse(args);
|
|
112
|
+
if (typeof parsedArgs !== 'string') {
|
|
113
|
+
args = parsedArgs;
|
|
114
|
+
}
|
|
115
|
+
}
|
|
116
|
+
catch (e) { }
|
|
117
|
+
functionCall.args = JSON.stringify(args);
|
|
118
|
+
functionCall.name = deltaFunctionCall.name;
|
|
119
|
+
functionCall.arguments = deltaFunctionCall.args;
|
|
120
|
+
}
|
|
82
121
|
}
|
|
83
122
|
try {
|
|
84
|
-
const messageChunk = new
|
|
85
|
-
messageChunk.
|
|
86
|
-
|
|
123
|
+
const messageChunk = new messages_1.AIMessageChunk(content);
|
|
124
|
+
messageChunk.additional_kwargs = {
|
|
125
|
+
function_call: functionCall.name.length > 0
|
|
126
|
+
? {
|
|
127
|
+
name: functionCall.name,
|
|
128
|
+
arguments: functionCall.args,
|
|
129
|
+
args: functionCall.arguments
|
|
130
|
+
}
|
|
131
|
+
: undefined
|
|
132
|
+
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
|
133
|
+
};
|
|
134
|
+
messageChunk.content = content;
|
|
135
|
+
const generationChunk = new outputs_1.ChatGenerationChunk({
|
|
87
136
|
message: messageChunk,
|
|
88
137
|
text: messageChunk.content
|
|
89
138
|
});
|
|
@@ -149,6 +198,10 @@ class GeminiRequester extends api_1.ModelRequester {
|
|
|
149
198
|
const response = await this._get('models');
|
|
150
199
|
data = await response.text();
|
|
151
200
|
data = JSON.parse(data);
|
|
201
|
+
if (!data.models || !data.models.length) {
|
|
202
|
+
throw new Error('error when listing gemini models, Result:' +
|
|
203
|
+
JSON.stringify(data));
|
|
204
|
+
}
|
|
152
205
|
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
|
153
206
|
return data.models
|
|
154
207
|
.map((model) => model.name)
|
|
@@ -171,7 +224,6 @@ class GeminiRequester extends api_1.ModelRequester {
|
|
|
171
224
|
}
|
|
172
225
|
}
|
|
173
226
|
const body = JSON.stringify(data);
|
|
174
|
-
// console.log('POST', requestUrl, body)
|
|
175
227
|
return (0, request_1.chatLunaFetch)(requestUrl, {
|
|
176
228
|
body,
|
|
177
229
|
headers: this._buildHeaders(),
|
package/lib/types.d.ts
CHANGED
|
@@ -1,7 +1,8 @@
|
|
|
1
1
|
export interface ChatCompletionResponseMessage {
|
|
2
2
|
role: string;
|
|
3
|
-
parts?:
|
|
3
|
+
parts?: ChatPart[];
|
|
4
4
|
}
|
|
5
|
+
export type ChatPart = ChatMessagePart | ChatUploadDataPart | ChatFunctionCallingPart | ChatFunctionResponsePart;
|
|
5
6
|
export type ChatMessagePart = {
|
|
6
7
|
text: string;
|
|
7
8
|
};
|
|
@@ -11,6 +12,18 @@ export type ChatUploadDataPart = {
|
|
|
11
12
|
data?: string;
|
|
12
13
|
};
|
|
13
14
|
};
|
|
15
|
+
export type ChatFunctionCallingPart = {
|
|
16
|
+
functionCall: {
|
|
17
|
+
name: string;
|
|
18
|
+
args?: any;
|
|
19
|
+
};
|
|
20
|
+
};
|
|
21
|
+
export type ChatFunctionResponsePart = {
|
|
22
|
+
functionResponse: {
|
|
23
|
+
name: string;
|
|
24
|
+
response: any;
|
|
25
|
+
};
|
|
26
|
+
};
|
|
14
27
|
export interface ChatResponse {
|
|
15
28
|
candidates: {
|
|
16
29
|
content: ChatCompletionResponseMessage;
|
|
@@ -28,9 +41,20 @@ export interface ChatResponse {
|
|
|
28
41
|
}[];
|
|
29
42
|
};
|
|
30
43
|
}
|
|
44
|
+
export interface ChatCompletionFunction {
|
|
45
|
+
name: string;
|
|
46
|
+
description?: string;
|
|
47
|
+
parameters?: {
|
|
48
|
+
[key: string]: any;
|
|
49
|
+
};
|
|
50
|
+
}
|
|
51
|
+
export interface ChatCompletionMessageFunctionCall {
|
|
52
|
+
name: string;
|
|
53
|
+
args?: any;
|
|
54
|
+
}
|
|
31
55
|
export interface CreateEmbeddingResponse {
|
|
32
56
|
embedding: {
|
|
33
57
|
values: number[];
|
|
34
58
|
};
|
|
35
59
|
}
|
|
36
|
-
export type ChatCompletionResponseMessageRoleEnum = 'system' | 'model' | 'user';
|
|
60
|
+
export type ChatCompletionResponseMessageRoleEnum = 'system' | 'model' | 'user' | 'function';
|
package/lib/utils.d.ts
CHANGED
|
@@ -1,5 +1,9 @@
|
|
|
1
|
-
import { AIMessageChunk, BaseMessage, ChatMessageChunk, HumanMessageChunk, MessageType, SystemMessageChunk } from 'langchain/
|
|
2
|
-
import { ChatCompletionResponseMessage, ChatCompletionResponseMessageRoleEnum } from './types';
|
|
3
|
-
|
|
1
|
+
import { AIMessageChunk, BaseMessage, ChatMessageChunk, HumanMessageChunk, MessageType, SystemMessageChunk } from '@langchain/core/messages';
|
|
2
|
+
import { ChatCompletionFunction, ChatCompletionResponseMessage, ChatCompletionResponseMessageRoleEnum, ChatPart } from './types';
|
|
3
|
+
import { StructuredTool } from '@langchain/core/tools';
|
|
4
|
+
export declare function langchainMessageToGeminiMessage(messages: BaseMessage[], model?: string): Promise<ChatCompletionResponseMessage[]>;
|
|
5
|
+
export declare function partAsType<T extends ChatPart>(part: ChatPart): T;
|
|
6
|
+
export declare function formatToolsToGeminiAITools(tools: StructuredTool[]): ChatCompletionFunction[];
|
|
7
|
+
export declare function formatToolToGeminiAITool(tool: StructuredTool): ChatCompletionFunction;
|
|
4
8
|
export declare function messageTypeToGeminiRole(type: MessageType): ChatCompletionResponseMessageRoleEnum;
|
|
5
|
-
export declare function convertDeltaToMessageChunk(delta: Record<string, any>, defaultRole?: ChatCompletionResponseMessageRoleEnum):
|
|
9
|
+
export declare function convertDeltaToMessageChunk(delta: Record<string, any>, defaultRole?: ChatCompletionResponseMessageRoleEnum): AIMessageChunk | HumanMessageChunk | SystemMessageChunk | ChatMessageChunk;
|
package/lib/utils.js
CHANGED
|
@@ -1,11 +1,81 @@
|
|
|
1
1
|
"use strict";
|
|
2
2
|
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
-
exports.convertDeltaToMessageChunk = exports.messageTypeToGeminiRole = exports.langchainMessageToGeminiMessage = void 0;
|
|
4
|
-
|
|
5
|
-
|
|
6
|
-
|
|
7
|
-
|
|
3
|
+
exports.convertDeltaToMessageChunk = exports.messageTypeToGeminiRole = exports.formatToolToGeminiAITool = exports.formatToolsToGeminiAITools = exports.partAsType = exports.langchainMessageToGeminiMessage = void 0;
|
|
4
|
+
/* eslint-disable @typescript-eslint/no-explicit-any */
|
|
5
|
+
const messages_1 = require("@langchain/core/messages");
|
|
6
|
+
const zod_to_json_schema_1 = require("zod-to-json-schema");
|
|
7
|
+
async function langchainMessageToGeminiMessage(messages, model) {
|
|
8
|
+
const mappedMessage = await Promise.all(messages.map(async (rawMessage) => {
|
|
8
9
|
const role = messageTypeToGeminiRole(rawMessage._getType());
|
|
10
|
+
if (role === 'function' ||
|
|
11
|
+
rawMessage.additional_kwargs?.function_call != null) {
|
|
12
|
+
return {
|
|
13
|
+
role: 'function',
|
|
14
|
+
parts: [
|
|
15
|
+
{
|
|
16
|
+
functionResponse: rawMessage.additional_kwargs?.function_call !=
|
|
17
|
+
null
|
|
18
|
+
? undefined
|
|
19
|
+
: {
|
|
20
|
+
name: rawMessage.name,
|
|
21
|
+
response: {
|
|
22
|
+
name: rawMessage.name,
|
|
23
|
+
content: (() => {
|
|
24
|
+
try {
|
|
25
|
+
const result = JSON.parse(rawMessage.content);
|
|
26
|
+
if (typeof result ===
|
|
27
|
+
'string') {
|
|
28
|
+
return {
|
|
29
|
+
response: result
|
|
30
|
+
};
|
|
31
|
+
}
|
|
32
|
+
else {
|
|
33
|
+
return result;
|
|
34
|
+
}
|
|
35
|
+
}
|
|
36
|
+
catch (e) {
|
|
37
|
+
return {
|
|
38
|
+
response: rawMessage.content
|
|
39
|
+
};
|
|
40
|
+
}
|
|
41
|
+
})()
|
|
42
|
+
}
|
|
43
|
+
},
|
|
44
|
+
functionCall: rawMessage.additional_kwargs?.function_call !=
|
|
45
|
+
null
|
|
46
|
+
? {
|
|
47
|
+
name: rawMessage.additional_kwargs
|
|
48
|
+
.function_call.name,
|
|
49
|
+
args: (() => {
|
|
50
|
+
try {
|
|
51
|
+
const result = JSON.parse(rawMessage
|
|
52
|
+
.additional_kwargs
|
|
53
|
+
.function_call
|
|
54
|
+
.arguments);
|
|
55
|
+
if (typeof result === 'string') {
|
|
56
|
+
return {
|
|
57
|
+
input: result
|
|
58
|
+
};
|
|
59
|
+
}
|
|
60
|
+
else {
|
|
61
|
+
return result;
|
|
62
|
+
}
|
|
63
|
+
}
|
|
64
|
+
catch (e) {
|
|
65
|
+
return {
|
|
66
|
+
input: rawMessage
|
|
67
|
+
.additional_kwargs
|
|
68
|
+
.function_call
|
|
69
|
+
.arguments
|
|
70
|
+
};
|
|
71
|
+
}
|
|
72
|
+
})()
|
|
73
|
+
}
|
|
74
|
+
: undefined
|
|
75
|
+
}
|
|
76
|
+
]
|
|
77
|
+
};
|
|
78
|
+
}
|
|
9
79
|
const images = rawMessage.additional_kwargs.images;
|
|
10
80
|
const result = {
|
|
11
81
|
role,
|
|
@@ -20,14 +90,14 @@ function langchainMessageToGeminiMessage(messages, model) {
|
|
|
20
90
|
result.parts.push({
|
|
21
91
|
inline_data: {
|
|
22
92
|
// base64 image match type
|
|
23
|
-
data: image,
|
|
93
|
+
data: image.replace(/^data:image\/\w+;base64,/, ''),
|
|
24
94
|
mime_type: 'image/jpeg'
|
|
25
95
|
}
|
|
26
96
|
});
|
|
27
97
|
}
|
|
28
98
|
}
|
|
29
99
|
return result;
|
|
30
|
-
});
|
|
100
|
+
}));
|
|
31
101
|
const result = [];
|
|
32
102
|
for (let i = 0; i < mappedMessage.length; i++) {
|
|
33
103
|
const message = mappedMessage[i];
|
|
@@ -45,12 +115,14 @@ function langchainMessageToGeminiMessage(messages, model) {
|
|
|
45
115
|
if (mappedMessage?.[i + 1]?.role === 'model') {
|
|
46
116
|
continue;
|
|
47
117
|
}
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
|
|
118
|
+
if (mappedMessage?.[i + 1]?.role === 'user') {
|
|
119
|
+
result.push({
|
|
120
|
+
role: 'model',
|
|
121
|
+
parts: [{ text: 'Okay, what do I need to do?' }]
|
|
122
|
+
});
|
|
123
|
+
}
|
|
52
124
|
}
|
|
53
|
-
if (result[result.length - 1].role === '
|
|
125
|
+
if (result[result.length - 1].role === 'model') {
|
|
54
126
|
result.push({
|
|
55
127
|
role: 'user',
|
|
56
128
|
parts: [
|
|
@@ -60,9 +132,74 @@ function langchainMessageToGeminiMessage(messages, model) {
|
|
|
60
132
|
]
|
|
61
133
|
});
|
|
62
134
|
}
|
|
135
|
+
if (model.includes('vision')) {
|
|
136
|
+
// format prompts
|
|
137
|
+
const textBuffer = [];
|
|
138
|
+
const last = result.pop();
|
|
139
|
+
for (let i = 0; i < result.length; i++) {
|
|
140
|
+
const message = result[i];
|
|
141
|
+
const text = message.parts[0].text;
|
|
142
|
+
textBuffer.push(`${message.role}: ${text}`);
|
|
143
|
+
}
|
|
144
|
+
const lastParts = last.parts;
|
|
145
|
+
let lastImagesParts = lastParts.filter((part) => part.inline_data?.mime_type ===
|
|
146
|
+
'image/jpeg');
|
|
147
|
+
if (lastImagesParts.length < 1) {
|
|
148
|
+
for (let i = result.length - 1; i >= 0; i--) {
|
|
149
|
+
const message = result[i];
|
|
150
|
+
const images = message.parts.filter((part) => part.inline_data?.mime_type ===
|
|
151
|
+
'image/jpeg');
|
|
152
|
+
if (images.length > 0) {
|
|
153
|
+
lastImagesParts = images;
|
|
154
|
+
break;
|
|
155
|
+
}
|
|
156
|
+
}
|
|
157
|
+
}
|
|
158
|
+
;
|
|
159
|
+
lastParts.filter((part) => part.text !== undefined &&
|
|
160
|
+
part.text !== null).forEach((part) => {
|
|
161
|
+
textBuffer.push(`${last.role}: ${part.text}`);
|
|
162
|
+
});
|
|
163
|
+
return [
|
|
164
|
+
{
|
|
165
|
+
role: 'user',
|
|
166
|
+
parts: [
|
|
167
|
+
{
|
|
168
|
+
text: textBuffer.join('\n')
|
|
169
|
+
},
|
|
170
|
+
...lastImagesParts
|
|
171
|
+
]
|
|
172
|
+
}
|
|
173
|
+
];
|
|
174
|
+
}
|
|
63
175
|
return result;
|
|
64
176
|
}
|
|
65
177
|
exports.langchainMessageToGeminiMessage = langchainMessageToGeminiMessage;
|
|
178
|
+
function partAsType(part) {
|
|
179
|
+
return part;
|
|
180
|
+
}
|
|
181
|
+
exports.partAsType = partAsType;
|
|
182
|
+
function formatToolsToGeminiAITools(tools) {
|
|
183
|
+
if (tools.length < 1) {
|
|
184
|
+
return undefined;
|
|
185
|
+
}
|
|
186
|
+
return tools.map(formatToolToGeminiAITool);
|
|
187
|
+
}
|
|
188
|
+
exports.formatToolsToGeminiAITools = formatToolsToGeminiAITools;
|
|
189
|
+
function formatToolToGeminiAITool(tool) {
|
|
190
|
+
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
|
191
|
+
const parameters = (0, zod_to_json_schema_1.zodToJsonSchema)(tool.schema);
|
|
192
|
+
// remove unsupported properties
|
|
193
|
+
delete parameters['$schema'];
|
|
194
|
+
delete parameters['additionalProperties'];
|
|
195
|
+
return {
|
|
196
|
+
name: tool.name,
|
|
197
|
+
description: tool.description,
|
|
198
|
+
// any?
|
|
199
|
+
parameters
|
|
200
|
+
};
|
|
201
|
+
}
|
|
202
|
+
exports.formatToolToGeminiAITool = formatToolToGeminiAITool;
|
|
66
203
|
function messageTypeToGeminiRole(type) {
|
|
67
204
|
switch (type) {
|
|
68
205
|
case 'system':
|
|
@@ -71,6 +208,8 @@ function messageTypeToGeminiRole(type) {
|
|
|
71
208
|
return 'model';
|
|
72
209
|
case 'human':
|
|
73
210
|
return 'user';
|
|
211
|
+
case 'function':
|
|
212
|
+
return 'function';
|
|
74
213
|
default:
|
|
75
214
|
throw new Error(`Unknown message type: ${type}`);
|
|
76
215
|
}
|
|
@@ -97,16 +236,16 @@ delta, defaultRole) {
|
|
|
97
236
|
additional_kwargs = {};
|
|
98
237
|
}
|
|
99
238
|
if (role === 'user') {
|
|
100
|
-
return new
|
|
239
|
+
return new messages_1.HumanMessageChunk({ content });
|
|
101
240
|
}
|
|
102
241
|
else if (role === 'assistant') {
|
|
103
|
-
return new
|
|
242
|
+
return new messages_1.AIMessageChunk({ content, additional_kwargs });
|
|
104
243
|
}
|
|
105
244
|
else if (role === 'system') {
|
|
106
|
-
return new
|
|
245
|
+
return new messages_1.SystemMessageChunk({ content });
|
|
107
246
|
}
|
|
108
247
|
else {
|
|
109
|
-
return new
|
|
248
|
+
return new messages_1.ChatMessageChunk({ content, role });
|
|
110
249
|
}
|
|
111
250
|
}
|
|
112
251
|
exports.convertDeltaToMessageChunk = convertDeltaToMessageChunk;
|
package/package.json
CHANGED
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "koishi-plugin-chatluna-google-gemini-adapter",
|
|
3
3
|
"description": "google-gemini adapter for chatluna",
|
|
4
|
-
"version": "1.0.0-beta.
|
|
4
|
+
"version": "1.0.0-beta.11",
|
|
5
5
|
"main": "lib/index.js",
|
|
6
6
|
"typings": "lib/index.d.ts",
|
|
7
7
|
"files": [
|
|
@@ -38,16 +38,18 @@
|
|
|
38
38
|
"adapter"
|
|
39
39
|
],
|
|
40
40
|
"dependencies": {
|
|
41
|
-
"@
|
|
42
|
-
"
|
|
41
|
+
"@langchain/core": "^0.2.5",
|
|
42
|
+
"@streamparser/json": "^0.0.21",
|
|
43
|
+
"zod": "^3.24.0-canary.20240523T174819",
|
|
44
|
+
"zod-to-json-schema": "^3.23.0"
|
|
43
45
|
},
|
|
44
46
|
"devDependencies": {
|
|
45
|
-
"atsc": "^
|
|
46
|
-
"koishi": "^4.
|
|
47
|
+
"atsc": "^2.0.1",
|
|
48
|
+
"koishi": "^4.17.7"
|
|
47
49
|
},
|
|
48
50
|
"peerDependencies": {
|
|
49
|
-
"koishi": "^4.
|
|
50
|
-
"koishi-plugin-chatluna": "^1.0.0-beta.
|
|
51
|
+
"koishi": "^4.17.0",
|
|
52
|
+
"koishi-plugin-chatluna": "^1.0.0-beta.49"
|
|
51
53
|
},
|
|
52
54
|
"koishi": {
|
|
53
55
|
"description": {
|