koishi-plugin-chatluna-google-gemini-adapter 1.0.0-beta.2 → 1.0.0-beta.4
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/lib/client.js +4 -0
- package/lib/requester.js +14 -2
- package/lib/utils.d.ts +1 -1
- package/lib/utils.js +44 -5
- package/package.json +3 -3
package/lib/client.js
CHANGED
|
@@ -22,11 +22,15 @@ class GeminiClient extends client_1.PlatformModelAndEmbeddingsClient {
|
|
|
22
22
|
async refreshModels() {
|
|
23
23
|
try {
|
|
24
24
|
const rawModels = await this._requester.getModels();
|
|
25
|
+
if (!rawModels.length) {
|
|
26
|
+
throw new error_1.ChatLunaError(error_1.ChatLunaErrorCode.MODEL_INIT_ERROR, new Error('No model found'));
|
|
27
|
+
}
|
|
25
28
|
return rawModels
|
|
26
29
|
.map((model) => model.replace('models/', ''))
|
|
27
30
|
.map((model) => {
|
|
28
31
|
return {
|
|
29
32
|
name: model,
|
|
33
|
+
maxTokens: model.includes('vision') ? 12288 : 30720,
|
|
30
34
|
type: model.includes('embedding')
|
|
31
35
|
? types_1.ModelType.embeddings
|
|
32
36
|
: types_1.ModelType.llm,
|
package/lib/requester.js
CHANGED
|
@@ -19,7 +19,7 @@ class GeminiRequester extends api_1.ModelRequester {
|
|
|
19
19
|
async *completionStream(params) {
|
|
20
20
|
try {
|
|
21
21
|
const response = await this._post(`models/${params.model}:streamGenerateContent`, {
|
|
22
|
-
contents: (0, utils_1.langchainMessageToGeminiMessage)(params.input, params.model),
|
|
22
|
+
contents: await (0, utils_1.langchainMessageToGeminiMessage)(params.input, params.model),
|
|
23
23
|
safetySettings: [
|
|
24
24
|
{
|
|
25
25
|
category: 'HARM_CATEGORY_HARASSMENT',
|
|
@@ -66,20 +66,28 @@ class GeminiRequester extends api_1.ModelRequester {
|
|
|
66
66
|
throw new Error(JSON.stringify(value));
|
|
67
67
|
}
|
|
68
68
|
const text = parts[0].text;
|
|
69
|
+
_1.logger.debug('text', text);
|
|
69
70
|
if (text) {
|
|
70
71
|
await writable.write(text);
|
|
71
72
|
}
|
|
72
73
|
}
|
|
73
74
|
};
|
|
74
75
|
await (0, sse_1.sse)(response, async (rawData) => {
|
|
76
|
+
_1.logger.debug('chunk', rawData);
|
|
75
77
|
jsonParser.write(rawData);
|
|
76
78
|
return true;
|
|
77
79
|
});
|
|
78
80
|
let content = '';
|
|
79
|
-
|
|
81
|
+
let isVisionModel = params.model.includes('vision');
|
|
82
|
+
for await (let chunk of iterable) {
|
|
80
83
|
if (chunk === '[DONE]') {
|
|
81
84
|
return;
|
|
82
85
|
}
|
|
86
|
+
// match /w*model:
|
|
87
|
+
if (isVisionModel && /\s*model:\s*/.test(chunk)) {
|
|
88
|
+
isVisionModel = false;
|
|
89
|
+
chunk = chunk.replace(/\s*model:\s*/, '');
|
|
90
|
+
}
|
|
83
91
|
try {
|
|
84
92
|
const messageChunk = new schema_1.AIMessageChunk(chunk);
|
|
85
93
|
messageChunk.content = content + messageChunk.content;
|
|
@@ -149,6 +157,10 @@ class GeminiRequester extends api_1.ModelRequester {
|
|
|
149
157
|
const response = await this._get('models');
|
|
150
158
|
data = await response.text();
|
|
151
159
|
data = JSON.parse(data);
|
|
160
|
+
if (!data.models || !data.models.length) {
|
|
161
|
+
throw new Error('error when listing gemini models, Result:' +
|
|
162
|
+
JSON.stringify(data));
|
|
163
|
+
}
|
|
152
164
|
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
|
153
165
|
return data.models
|
|
154
166
|
.map((model) => model.name)
|
package/lib/utils.d.ts
CHANGED
|
@@ -1,5 +1,5 @@
|
|
|
1
1
|
import { AIMessageChunk, BaseMessage, ChatMessageChunk, HumanMessageChunk, MessageType, SystemMessageChunk } from 'langchain/schema';
|
|
2
2
|
import { ChatCompletionResponseMessage, ChatCompletionResponseMessageRoleEnum } from './types';
|
|
3
|
-
export declare function langchainMessageToGeminiMessage(messages: BaseMessage[], model?: string): ChatCompletionResponseMessage[]
|
|
3
|
+
export declare function langchainMessageToGeminiMessage(messages: BaseMessage[], model?: string): Promise<ChatCompletionResponseMessage[]>;
|
|
4
4
|
export declare function messageTypeToGeminiRole(type: MessageType): ChatCompletionResponseMessageRoleEnum;
|
|
5
5
|
export declare function convertDeltaToMessageChunk(delta: Record<string, any>, defaultRole?: ChatCompletionResponseMessageRoleEnum): HumanMessageChunk | AIMessageChunk | SystemMessageChunk | ChatMessageChunk;
|
package/lib/utils.js
CHANGED
|
@@ -2,9 +2,8 @@
|
|
|
2
2
|
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
3
|
exports.convertDeltaToMessageChunk = exports.messageTypeToGeminiRole = exports.langchainMessageToGeminiMessage = void 0;
|
|
4
4
|
const schema_1 = require("langchain/schema");
|
|
5
|
-
function langchainMessageToGeminiMessage(messages, model) {
|
|
6
|
-
|
|
7
|
-
const mappedMessage = messages.map((rawMessage) => {
|
|
5
|
+
async function langchainMessageToGeminiMessage(messages, model) {
|
|
6
|
+
const mappedMessage = await Promise.all(messages.map(async (rawMessage) => {
|
|
8
7
|
const role = messageTypeToGeminiRole(rawMessage._getType());
|
|
9
8
|
const images = rawMessage.additional_kwargs.images;
|
|
10
9
|
const result = {
|
|
@@ -20,14 +19,14 @@ function langchainMessageToGeminiMessage(messages, model) {
|
|
|
20
19
|
result.parts.push({
|
|
21
20
|
inline_data: {
|
|
22
21
|
// base64 image match type
|
|
23
|
-
data: image,
|
|
22
|
+
data: image.replace(/^data:image\/\w+;base64,/, ''),
|
|
24
23
|
mime_type: 'image/jpeg'
|
|
25
24
|
}
|
|
26
25
|
});
|
|
27
26
|
}
|
|
28
27
|
}
|
|
29
28
|
return result;
|
|
30
|
-
});
|
|
29
|
+
}));
|
|
31
30
|
const result = [];
|
|
32
31
|
for (let i = 0; i < mappedMessage.length; i++) {
|
|
33
32
|
const message = mappedMessage[i];
|
|
@@ -60,6 +59,46 @@ function langchainMessageToGeminiMessage(messages, model) {
|
|
|
60
59
|
]
|
|
61
60
|
});
|
|
62
61
|
}
|
|
62
|
+
if (model.includes('vision')) {
|
|
63
|
+
// format prompts
|
|
64
|
+
const textBuffer = [];
|
|
65
|
+
const last = result.pop();
|
|
66
|
+
for (let i = 0; i < result.length; i++) {
|
|
67
|
+
const message = result[i];
|
|
68
|
+
const text = message.parts[0].text;
|
|
69
|
+
textBuffer.push(`${message.role}: ${text}`);
|
|
70
|
+
}
|
|
71
|
+
const lastParts = last.parts;
|
|
72
|
+
let lastImagesParts = lastParts.filter((part) => part.inline_data?.mime_type ===
|
|
73
|
+
'image/jpeg');
|
|
74
|
+
if (lastImagesParts.length < 1) {
|
|
75
|
+
for (let i = result.length - 1; i >= 0; i--) {
|
|
76
|
+
const message = result[i];
|
|
77
|
+
const images = message.parts.filter((part) => part.inline_data?.mime_type ===
|
|
78
|
+
'image/jpeg');
|
|
79
|
+
if (images.length > 0) {
|
|
80
|
+
lastImagesParts = images;
|
|
81
|
+
break;
|
|
82
|
+
}
|
|
83
|
+
}
|
|
84
|
+
}
|
|
85
|
+
;
|
|
86
|
+
lastParts.filter((part) => part.text !== undefined &&
|
|
87
|
+
part.text !== null).forEach((part) => {
|
|
88
|
+
textBuffer.push(`${last.role}: ${part.text}`);
|
|
89
|
+
});
|
|
90
|
+
return [
|
|
91
|
+
{
|
|
92
|
+
role: 'user',
|
|
93
|
+
parts: [
|
|
94
|
+
{
|
|
95
|
+
text: textBuffer.join('\n')
|
|
96
|
+
},
|
|
97
|
+
...lastImagesParts
|
|
98
|
+
]
|
|
99
|
+
}
|
|
100
|
+
];
|
|
101
|
+
}
|
|
63
102
|
return result;
|
|
64
103
|
}
|
|
65
104
|
exports.langchainMessageToGeminiMessage = langchainMessageToGeminiMessage;
|
package/package.json
CHANGED
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "koishi-plugin-chatluna-google-gemini-adapter",
|
|
3
3
|
"description": "google-gemini adapter for chatluna",
|
|
4
|
-
"version": "1.0.0-beta.
|
|
4
|
+
"version": "1.0.0-beta.4",
|
|
5
5
|
"main": "lib/index.js",
|
|
6
6
|
"typings": "lib/index.d.ts",
|
|
7
7
|
"files": [
|
|
@@ -43,11 +43,11 @@
|
|
|
43
43
|
},
|
|
44
44
|
"devDependencies": {
|
|
45
45
|
"atsc": "^1.2.2",
|
|
46
|
-
"koishi": "^4.16.
|
|
46
|
+
"koishi": "^4.16.3"
|
|
47
47
|
},
|
|
48
48
|
"peerDependencies": {
|
|
49
49
|
"koishi": "^4.16.0",
|
|
50
|
-
"koishi-plugin-chatluna": "^1.0.0-beta.
|
|
50
|
+
"koishi-plugin-chatluna": "^1.0.0-beta.31"
|
|
51
51
|
},
|
|
52
52
|
"koishi": {
|
|
53
53
|
"description": {
|