koishi-plugin-chatluna-google-gemini-adapter 1.0.0-beta.13 → 1.0.0-beta.14
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/lib/client.d.ts +4 -4
- package/lib/index.cjs +632 -0
- package/lib/index.d.ts +104 -6
- package/lib/index.mjs +619 -0
- package/lib/requester.d.ts +2 -2
- package/lib/utils.d.ts +1 -1
- package/package.json +18 -8
- package/lib/client.js +0 -80
- package/lib/index.js +0 -63
- package/lib/requester.js +0 -261
- package/lib/types.js +0 -2
- package/lib/utils.js +0 -252
package/lib/client.js
DELETED
|
@@ -1,80 +0,0 @@
|
|
|
1
|
-
"use strict";
|
|
2
|
-
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
-
exports.GeminiClient = void 0;
|
|
4
|
-
const client_1 = require("koishi-plugin-chatluna/lib/llm-core/platform/client");
|
|
5
|
-
const model_1 = require("koishi-plugin-chatluna/lib/llm-core/platform/model");
|
|
6
|
-
const types_1 = require("koishi-plugin-chatluna/lib/llm-core/platform/types");
|
|
7
|
-
const error_1 = require("koishi-plugin-chatluna/lib/utils/error");
|
|
8
|
-
const requester_1 = require("./requester");
|
|
9
|
-
class GeminiClient extends client_1.PlatformModelAndEmbeddingsClient {
|
|
10
|
-
_config;
|
|
11
|
-
platform = 'gemini';
|
|
12
|
-
_requester;
|
|
13
|
-
_models;
|
|
14
|
-
constructor(ctx, _config, clientConfig) {
|
|
15
|
-
super(ctx, clientConfig);
|
|
16
|
-
this._config = _config;
|
|
17
|
-
this._requester = new requester_1.GeminiRequester(clientConfig);
|
|
18
|
-
}
|
|
19
|
-
async init() {
|
|
20
|
-
await this.getModels();
|
|
21
|
-
}
|
|
22
|
-
async refreshModels() {
|
|
23
|
-
try {
|
|
24
|
-
const rawModels = await this._requester.getModels();
|
|
25
|
-
if (!rawModels.length) {
|
|
26
|
-
throw new error_1.ChatLunaError(error_1.ChatLunaErrorCode.MODEL_INIT_ERROR, new Error('No model found'));
|
|
27
|
-
}
|
|
28
|
-
return rawModels
|
|
29
|
-
.map((model) => model.replace('models/', ''))
|
|
30
|
-
.map((model) => {
|
|
31
|
-
return {
|
|
32
|
-
name: model,
|
|
33
|
-
maxTokens: model.includes('vision') ? 12288 : 30720,
|
|
34
|
-
type: model.includes('embedding')
|
|
35
|
-
? types_1.ModelType.embeddings
|
|
36
|
-
: types_1.ModelType.llm,
|
|
37
|
-
functionCall: !model.includes('vision'),
|
|
38
|
-
supportMode: ['all']
|
|
39
|
-
};
|
|
40
|
-
});
|
|
41
|
-
}
|
|
42
|
-
catch (e) {
|
|
43
|
-
throw new error_1.ChatLunaError(error_1.ChatLunaErrorCode.MODEL_INIT_ERROR, e);
|
|
44
|
-
}
|
|
45
|
-
}
|
|
46
|
-
async getModels() {
|
|
47
|
-
if (this._models) {
|
|
48
|
-
return Object.values(this._models);
|
|
49
|
-
}
|
|
50
|
-
const models = await this.refreshModels();
|
|
51
|
-
this._models = {};
|
|
52
|
-
for (const model of models) {
|
|
53
|
-
this._models[model.name] = model;
|
|
54
|
-
}
|
|
55
|
-
}
|
|
56
|
-
_createModel(model) {
|
|
57
|
-
const info = this._models[model];
|
|
58
|
-
if (info == null) {
|
|
59
|
-
throw new error_1.ChatLunaError(error_1.ChatLunaErrorCode.MODEL_NOT_FOUND);
|
|
60
|
-
}
|
|
61
|
-
if (info.type === types_1.ModelType.llm) {
|
|
62
|
-
return new model_1.ChatLunaChatModel({
|
|
63
|
-
modelInfo: info,
|
|
64
|
-
requester: this._requester,
|
|
65
|
-
model,
|
|
66
|
-
maxTokens: this._config.maxTokens,
|
|
67
|
-
timeout: this._config.timeout,
|
|
68
|
-
temperature: this._config.temperature,
|
|
69
|
-
maxRetries: this._config.maxRetries,
|
|
70
|
-
llmType: 'gemini'
|
|
71
|
-
});
|
|
72
|
-
}
|
|
73
|
-
return new model_1.ChatLunaEmbeddings({
|
|
74
|
-
client: this._requester,
|
|
75
|
-
model,
|
|
76
|
-
maxRetries: this._config.maxRetries
|
|
77
|
-
});
|
|
78
|
-
}
|
|
79
|
-
}
|
|
80
|
-
exports.GeminiClient = GeminiClient;
|
package/lib/index.js
DELETED
|
@@ -1,63 +0,0 @@
|
|
|
1
|
-
"use strict";
|
|
2
|
-
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
-
exports.name = exports.inject = exports.Config = exports.apply = exports.logger = void 0;
|
|
4
|
-
const chat_1 = require("koishi-plugin-chatluna/lib/services/chat");
|
|
5
|
-
const koishi_1 = require("koishi");
|
|
6
|
-
const client_1 = require("./client");
|
|
7
|
-
const logger_1 = require("koishi-plugin-chatluna/lib/utils/logger");
|
|
8
|
-
function apply(ctx, config) {
|
|
9
|
-
const plugin = new chat_1.ChatLunaPlugin(ctx, config, 'gemini');
|
|
10
|
-
exports.logger = (0, logger_1.createLogger)(ctx, 'chatluna-gemini-adapter');
|
|
11
|
-
ctx.on('ready', async () => {
|
|
12
|
-
await plugin.registerToService();
|
|
13
|
-
await plugin.parseConfig((config) => {
|
|
14
|
-
return config.apiKeys.map(([apiKey, apiEndpoint]) => {
|
|
15
|
-
return {
|
|
16
|
-
apiKey,
|
|
17
|
-
apiEndpoint,
|
|
18
|
-
platform: 'gemini',
|
|
19
|
-
chatLimit: config.chatTimeLimit,
|
|
20
|
-
timeout: config.timeout,
|
|
21
|
-
maxRetries: config.maxRetries,
|
|
22
|
-
concurrentMaxSize: config.chatConcurrentMaxSize
|
|
23
|
-
};
|
|
24
|
-
});
|
|
25
|
-
});
|
|
26
|
-
await plugin.registerClient((_, clientConfig) => new client_1.GeminiClient(ctx, config, clientConfig));
|
|
27
|
-
await plugin.initClients();
|
|
28
|
-
});
|
|
29
|
-
}
|
|
30
|
-
exports.apply = apply;
|
|
31
|
-
exports.Config = koishi_1.Schema.intersect([
|
|
32
|
-
chat_1.ChatLunaPlugin.Config,
|
|
33
|
-
koishi_1.Schema.object({
|
|
34
|
-
apiKeys: koishi_1.Schema.array(koishi_1.Schema.tuple([
|
|
35
|
-
koishi_1.Schema.string()
|
|
36
|
-
.role('secret')
|
|
37
|
-
.description('Gemini 的 API Key')
|
|
38
|
-
.required(),
|
|
39
|
-
koishi_1.Schema.string()
|
|
40
|
-
.description('请求 Gemini API 的地址')
|
|
41
|
-
.default('https://generativelanguage.googleapis.com/v1beta')
|
|
42
|
-
]))
|
|
43
|
-
.description('Gemini 的 API Key 和请求地址列表')
|
|
44
|
-
.default([['', 'https://generativelanguage.googleapis.com/v1beta']])
|
|
45
|
-
}).description('请求设置'),
|
|
46
|
-
koishi_1.Schema.object({
|
|
47
|
-
maxTokens: koishi_1.Schema.number()
|
|
48
|
-
.description('回复的最大 Token 数(16~32800,必须是16的倍数)(注意如果你目前使用的模型的最大 Token 为 8000 及以上的话才建议设置超过 512 token)')
|
|
49
|
-
.min(16)
|
|
50
|
-
.max(128000)
|
|
51
|
-
.step(16)
|
|
52
|
-
.default(1024),
|
|
53
|
-
temperature: koishi_1.Schema.percent()
|
|
54
|
-
.description('回复温度,越高越随机')
|
|
55
|
-
.min(0)
|
|
56
|
-
.max(1)
|
|
57
|
-
.step(0.1)
|
|
58
|
-
.default(0.8)
|
|
59
|
-
}).description('模型设置')
|
|
60
|
-
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
|
61
|
-
]);
|
|
62
|
-
exports.inject = ['chatluna'];
|
|
63
|
-
exports.name = 'chatluna-google-gemini-adapter';
|
package/lib/requester.js
DELETED
|
@@ -1,261 +0,0 @@
|
|
|
1
|
-
"use strict";
|
|
2
|
-
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
-
exports.GeminiRequester = void 0;
|
|
4
|
-
const messages_1 = require("@langchain/core/messages");
|
|
5
|
-
const outputs_1 = require("@langchain/core/outputs");
|
|
6
|
-
const json_1 = require("@streamparser/json");
|
|
7
|
-
const api_1 = require("koishi-plugin-chatluna/lib/llm-core/platform/api");
|
|
8
|
-
const error_1 = require("koishi-plugin-chatluna/lib/utils/error");
|
|
9
|
-
const request_1 = require("koishi-plugin-chatluna/lib/utils/request");
|
|
10
|
-
const sse_1 = require("koishi-plugin-chatluna/lib/utils/sse");
|
|
11
|
-
const stream_1 = require("koishi-plugin-chatluna/lib/utils/stream");
|
|
12
|
-
const _1 = require(".");
|
|
13
|
-
const utils_1 = require("./utils");
|
|
14
|
-
class GeminiRequester extends api_1.ModelRequester {
|
|
15
|
-
_config;
|
|
16
|
-
constructor(_config) {
|
|
17
|
-
super();
|
|
18
|
-
this._config = _config;
|
|
19
|
-
}
|
|
20
|
-
async *completionStream(params) {
|
|
21
|
-
try {
|
|
22
|
-
const response = await this._post(`models/${params.model}:streamGenerateContent`, {
|
|
23
|
-
contents: await (0, utils_1.langchainMessageToGeminiMessage)(params.input, params.model),
|
|
24
|
-
safetySettings: [
|
|
25
|
-
{
|
|
26
|
-
category: 'HARM_CATEGORY_HARASSMENT',
|
|
27
|
-
threshold: 'BLOCK_NONE'
|
|
28
|
-
},
|
|
29
|
-
{
|
|
30
|
-
category: 'HARM_CATEGORY_HATE_SPEECH',
|
|
31
|
-
threshold: 'BLOCK_NONE'
|
|
32
|
-
},
|
|
33
|
-
{
|
|
34
|
-
category: 'HARM_CATEGORY_SEXUALLY_EXPLICIT',
|
|
35
|
-
threshold: 'BLOCK_NONE'
|
|
36
|
-
},
|
|
37
|
-
{
|
|
38
|
-
category: 'HARM_CATEGORY_DANGEROUS_CONTENT',
|
|
39
|
-
threshold: 'BLOCK_NONE'
|
|
40
|
-
}
|
|
41
|
-
],
|
|
42
|
-
generationConfig: {
|
|
43
|
-
stopSequences: params.stop,
|
|
44
|
-
temperature: params.temperature,
|
|
45
|
-
maxOutputTokens: params.model.includes('vision')
|
|
46
|
-
? undefined
|
|
47
|
-
: params.maxTokens,
|
|
48
|
-
topP: params.topP
|
|
49
|
-
},
|
|
50
|
-
tools: !params.model.includes('vision') && params.tools != null
|
|
51
|
-
? {
|
|
52
|
-
functionDeclarations: (0, utils_1.formatToolsToGeminiAITools)(params.tools)
|
|
53
|
-
}
|
|
54
|
-
: undefined
|
|
55
|
-
}, {
|
|
56
|
-
signal: params.signal
|
|
57
|
-
});
|
|
58
|
-
let errorCount = 0;
|
|
59
|
-
const stream = new TransformStream();
|
|
60
|
-
const iterable = (0, stream_1.readableStreamToAsyncIterable)(stream.readable);
|
|
61
|
-
const jsonParser = new json_1.JSONParser();
|
|
62
|
-
const writable = stream.writable.getWriter();
|
|
63
|
-
jsonParser.onEnd = async () => {
|
|
64
|
-
await writable.close();
|
|
65
|
-
};
|
|
66
|
-
jsonParser.onValue = async ({ value }) => {
|
|
67
|
-
const transformValue = value;
|
|
68
|
-
if (transformValue.candidates && transformValue.candidates[0]) {
|
|
69
|
-
const parts = transformValue.candidates[0]?.content?.parts;
|
|
70
|
-
if (parts == null || parts.length < 1) {
|
|
71
|
-
throw new Error(JSON.stringify(value));
|
|
72
|
-
}
|
|
73
|
-
for (const part of parts) {
|
|
74
|
-
await writable.write(part);
|
|
75
|
-
}
|
|
76
|
-
}
|
|
77
|
-
};
|
|
78
|
-
await (0, sse_1.sse)(response, async (rawData) => {
|
|
79
|
-
jsonParser.write(rawData);
|
|
80
|
-
return true;
|
|
81
|
-
}, 0);
|
|
82
|
-
let content = '';
|
|
83
|
-
let isOldVisionModel = params.model.includes('vision');
|
|
84
|
-
const functionCall = {
|
|
85
|
-
name: '',
|
|
86
|
-
args: '',
|
|
87
|
-
arguments: ''
|
|
88
|
-
};
|
|
89
|
-
for await (const chunk of iterable) {
|
|
90
|
-
const messagePart = (0, utils_1.partAsType)(chunk);
|
|
91
|
-
const chatFunctionCallingPart = (0, utils_1.partAsType)(chunk);
|
|
92
|
-
if (messagePart.text) {
|
|
93
|
-
if (params.tools != null) {
|
|
94
|
-
content = messagePart.text;
|
|
95
|
-
}
|
|
96
|
-
else {
|
|
97
|
-
content += messagePart.text;
|
|
98
|
-
}
|
|
99
|
-
// match /w*model:
|
|
100
|
-
if (isOldVisionModel && /\s*model:\s*/.test(content)) {
|
|
101
|
-
isOldVisionModel = false;
|
|
102
|
-
content = messagePart.text.replace(/\s*model:\s*/, '');
|
|
103
|
-
}
|
|
104
|
-
}
|
|
105
|
-
const deltaFunctionCall = chatFunctionCallingPart.functionCall;
|
|
106
|
-
if (deltaFunctionCall) {
|
|
107
|
-
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
|
108
|
-
let args = deltaFunctionCall.args?.input ?? deltaFunctionCall.args;
|
|
109
|
-
try {
|
|
110
|
-
let parsedArgs = JSON.parse(args);
|
|
111
|
-
if (typeof parsedArgs !== 'string') {
|
|
112
|
-
args = parsedArgs;
|
|
113
|
-
}
|
|
114
|
-
parsedArgs = JSON.parse(args);
|
|
115
|
-
if (typeof parsedArgs !== 'string') {
|
|
116
|
-
args = parsedArgs;
|
|
117
|
-
}
|
|
118
|
-
// eslint-disable-next-line @typescript-eslint/no-unused-vars
|
|
119
|
-
}
|
|
120
|
-
catch (e) { }
|
|
121
|
-
functionCall.args = JSON.stringify(args);
|
|
122
|
-
functionCall.name = deltaFunctionCall.name;
|
|
123
|
-
functionCall.arguments = deltaFunctionCall.args;
|
|
124
|
-
}
|
|
125
|
-
try {
|
|
126
|
-
const messageChunk = new messages_1.AIMessageChunk(content);
|
|
127
|
-
messageChunk.additional_kwargs = {
|
|
128
|
-
function_call: functionCall.name.length > 0
|
|
129
|
-
? {
|
|
130
|
-
name: functionCall.name,
|
|
131
|
-
arguments: functionCall.args,
|
|
132
|
-
args: functionCall.arguments
|
|
133
|
-
}
|
|
134
|
-
: undefined
|
|
135
|
-
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
|
136
|
-
};
|
|
137
|
-
messageChunk.content = content;
|
|
138
|
-
const generationChunk = new outputs_1.ChatGenerationChunk({
|
|
139
|
-
message: messageChunk,
|
|
140
|
-
text: messageChunk.content
|
|
141
|
-
});
|
|
142
|
-
yield generationChunk;
|
|
143
|
-
content = messageChunk.content;
|
|
144
|
-
}
|
|
145
|
-
catch (e) {
|
|
146
|
-
if (errorCount > 5) {
|
|
147
|
-
_1.logger.error('error with chunk', chunk);
|
|
148
|
-
throw new error_1.ChatLunaError(error_1.ChatLunaErrorCode.API_REQUEST_FAILED, e);
|
|
149
|
-
}
|
|
150
|
-
else {
|
|
151
|
-
errorCount++;
|
|
152
|
-
continue;
|
|
153
|
-
}
|
|
154
|
-
}
|
|
155
|
-
}
|
|
156
|
-
}
|
|
157
|
-
catch (e) {
|
|
158
|
-
if (e instanceof error_1.ChatLunaError) {
|
|
159
|
-
throw e;
|
|
160
|
-
}
|
|
161
|
-
else {
|
|
162
|
-
throw new error_1.ChatLunaError(error_1.ChatLunaErrorCode.API_REQUEST_FAILED, e);
|
|
163
|
-
}
|
|
164
|
-
}
|
|
165
|
-
}
|
|
166
|
-
async embeddings(params) {
|
|
167
|
-
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
|
168
|
-
let data;
|
|
169
|
-
try {
|
|
170
|
-
const response = await this._post(`models/${params.model}:embedContent`, {
|
|
171
|
-
model: `models/${params.model}`,
|
|
172
|
-
content: {
|
|
173
|
-
parts: [
|
|
174
|
-
{
|
|
175
|
-
text: params.input
|
|
176
|
-
}
|
|
177
|
-
]
|
|
178
|
-
}
|
|
179
|
-
});
|
|
180
|
-
data = await response.text();
|
|
181
|
-
data = JSON.parse(data);
|
|
182
|
-
if (data.embedding && data.embedding.values?.length > 0) {
|
|
183
|
-
return data.embedding.values;
|
|
184
|
-
}
|
|
185
|
-
throw new Error('error when calling gemini embeddings, Result: ' +
|
|
186
|
-
JSON.stringify(data));
|
|
187
|
-
}
|
|
188
|
-
catch (e) {
|
|
189
|
-
const error = new Error('error when calling gemini embeddings, Result: ' +
|
|
190
|
-
JSON.stringify(data));
|
|
191
|
-
error.stack = e.stack;
|
|
192
|
-
error.cause = e.cause;
|
|
193
|
-
_1.logger.debug(e);
|
|
194
|
-
throw new error_1.ChatLunaError(error_1.ChatLunaErrorCode.API_REQUEST_FAILED, error);
|
|
195
|
-
}
|
|
196
|
-
}
|
|
197
|
-
async getModels() {
|
|
198
|
-
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
|
199
|
-
let data;
|
|
200
|
-
try {
|
|
201
|
-
const response = await this._get('models');
|
|
202
|
-
data = await response.text();
|
|
203
|
-
data = JSON.parse(data);
|
|
204
|
-
if (!data.models || !data.models.length) {
|
|
205
|
-
throw new Error('error when listing gemini models, Result:' +
|
|
206
|
-
JSON.stringify(data));
|
|
207
|
-
}
|
|
208
|
-
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
|
209
|
-
return data.models
|
|
210
|
-
.map((model) => model.name)
|
|
211
|
-
.filter((model) => model.includes('gemini') || model.includes('embedding'));
|
|
212
|
-
}
|
|
213
|
-
catch (e) {
|
|
214
|
-
const error = new Error('error when listing gemini models, Result: ' +
|
|
215
|
-
JSON.stringify(data));
|
|
216
|
-
error.stack = e.stack;
|
|
217
|
-
error.cause = e.cause;
|
|
218
|
-
throw error;
|
|
219
|
-
}
|
|
220
|
-
}
|
|
221
|
-
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
|
222
|
-
_post(url, data, params = {}) {
|
|
223
|
-
const requestUrl = this._concatUrl(url);
|
|
224
|
-
for (const key in data) {
|
|
225
|
-
if (data[key] === undefined) {
|
|
226
|
-
delete data[key];
|
|
227
|
-
}
|
|
228
|
-
}
|
|
229
|
-
const body = JSON.stringify(data);
|
|
230
|
-
return (0, request_1.chatLunaFetch)(requestUrl, {
|
|
231
|
-
body,
|
|
232
|
-
headers: this._buildHeaders(),
|
|
233
|
-
method: 'POST',
|
|
234
|
-
...params
|
|
235
|
-
});
|
|
236
|
-
}
|
|
237
|
-
_get(url) {
|
|
238
|
-
const requestUrl = this._concatUrl(url);
|
|
239
|
-
return (0, request_1.chatLunaFetch)(requestUrl, {
|
|
240
|
-
method: 'GET',
|
|
241
|
-
headers: this._buildHeaders()
|
|
242
|
-
});
|
|
243
|
-
}
|
|
244
|
-
_concatUrl(url) {
|
|
245
|
-
const apiEndPoint = this._config.apiEndpoint;
|
|
246
|
-
// match the apiEndPoint ends with '/v1' or '/v1/' using regex
|
|
247
|
-
if (apiEndPoint.endsWith('/')) {
|
|
248
|
-
return apiEndPoint + url + `?key=${this._config.apiKey}`;
|
|
249
|
-
}
|
|
250
|
-
return apiEndPoint + '/' + url + `?key=${this._config.apiKey}`;
|
|
251
|
-
}
|
|
252
|
-
_buildHeaders() {
|
|
253
|
-
return {
|
|
254
|
-
/* Authorization: `Bearer ${this._config.apiKey}`, */
|
|
255
|
-
'Content-Type': 'application/json'
|
|
256
|
-
};
|
|
257
|
-
}
|
|
258
|
-
async init() { }
|
|
259
|
-
async dispose() { }
|
|
260
|
-
}
|
|
261
|
-
exports.GeminiRequester = GeminiRequester;
|
package/lib/types.js
DELETED
package/lib/utils.js
DELETED
|
@@ -1,252 +0,0 @@
|
|
|
1
|
-
"use strict";
|
|
2
|
-
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
-
exports.convertDeltaToMessageChunk = exports.messageTypeToGeminiRole = exports.formatToolToGeminiAITool = exports.formatToolsToGeminiAITools = exports.partAsType = exports.langchainMessageToGeminiMessage = void 0;
|
|
4
|
-
/* eslint-disable @typescript-eslint/no-explicit-any */
|
|
5
|
-
const messages_1 = require("@langchain/core/messages");
|
|
6
|
-
const zod_to_json_schema_1 = require("zod-to-json-schema");
|
|
7
|
-
async function langchainMessageToGeminiMessage(messages, model) {
|
|
8
|
-
const mappedMessage = await Promise.all(messages.map(async (rawMessage) => {
|
|
9
|
-
const role = messageTypeToGeminiRole(rawMessage._getType());
|
|
10
|
-
if (role === 'function' ||
|
|
11
|
-
rawMessage.additional_kwargs?.function_call != null) {
|
|
12
|
-
return {
|
|
13
|
-
role: 'function',
|
|
14
|
-
parts: [
|
|
15
|
-
{
|
|
16
|
-
functionResponse: rawMessage.additional_kwargs?.function_call !=
|
|
17
|
-
null
|
|
18
|
-
? undefined
|
|
19
|
-
: {
|
|
20
|
-
name: rawMessage.name,
|
|
21
|
-
response: {
|
|
22
|
-
name: rawMessage.name,
|
|
23
|
-
content: (() => {
|
|
24
|
-
try {
|
|
25
|
-
const result = JSON.parse(rawMessage.content);
|
|
26
|
-
if (typeof result ===
|
|
27
|
-
'string') {
|
|
28
|
-
return {
|
|
29
|
-
response: result
|
|
30
|
-
};
|
|
31
|
-
}
|
|
32
|
-
else {
|
|
33
|
-
return result;
|
|
34
|
-
}
|
|
35
|
-
}
|
|
36
|
-
catch (e) {
|
|
37
|
-
return {
|
|
38
|
-
response: rawMessage.content
|
|
39
|
-
};
|
|
40
|
-
}
|
|
41
|
-
})()
|
|
42
|
-
}
|
|
43
|
-
},
|
|
44
|
-
functionCall: rawMessage.additional_kwargs?.function_call !=
|
|
45
|
-
null
|
|
46
|
-
? {
|
|
47
|
-
name: rawMessage.additional_kwargs
|
|
48
|
-
.function_call.name,
|
|
49
|
-
args: (() => {
|
|
50
|
-
try {
|
|
51
|
-
const result = JSON.parse(rawMessage
|
|
52
|
-
.additional_kwargs
|
|
53
|
-
.function_call
|
|
54
|
-
.arguments);
|
|
55
|
-
if (typeof result === 'string') {
|
|
56
|
-
return {
|
|
57
|
-
input: result
|
|
58
|
-
};
|
|
59
|
-
}
|
|
60
|
-
else {
|
|
61
|
-
return result;
|
|
62
|
-
}
|
|
63
|
-
}
|
|
64
|
-
catch (e) {
|
|
65
|
-
return {
|
|
66
|
-
input: rawMessage
|
|
67
|
-
.additional_kwargs
|
|
68
|
-
.function_call
|
|
69
|
-
.arguments
|
|
70
|
-
};
|
|
71
|
-
}
|
|
72
|
-
})()
|
|
73
|
-
}
|
|
74
|
-
: undefined
|
|
75
|
-
}
|
|
76
|
-
]
|
|
77
|
-
};
|
|
78
|
-
}
|
|
79
|
-
const images = rawMessage.additional_kwargs.images;
|
|
80
|
-
const result = {
|
|
81
|
-
role,
|
|
82
|
-
parts: [
|
|
83
|
-
{
|
|
84
|
-
text: rawMessage.content
|
|
85
|
-
}
|
|
86
|
-
]
|
|
87
|
-
};
|
|
88
|
-
if ((model.includes('vision') || model.includes('gemini-1.5')) &&
|
|
89
|
-
images != null) {
|
|
90
|
-
for (const image of images) {
|
|
91
|
-
result.parts.push({
|
|
92
|
-
inline_data: {
|
|
93
|
-
// base64 image match type
|
|
94
|
-
data: image.replace(/^data:image\/\w+;base64,/, ''),
|
|
95
|
-
mime_type: 'image/jpeg'
|
|
96
|
-
}
|
|
97
|
-
});
|
|
98
|
-
}
|
|
99
|
-
}
|
|
100
|
-
return result;
|
|
101
|
-
}));
|
|
102
|
-
const result = [];
|
|
103
|
-
for (let i = 0; i < mappedMessage.length; i++) {
|
|
104
|
-
const message = mappedMessage[i];
|
|
105
|
-
if (message.role !== 'system') {
|
|
106
|
-
result.push(message);
|
|
107
|
-
continue;
|
|
108
|
-
}
|
|
109
|
-
/* if (removeSystemMessage) {
|
|
110
|
-
continue
|
|
111
|
-
} */
|
|
112
|
-
result.push({
|
|
113
|
-
role: 'user',
|
|
114
|
-
parts: message.parts
|
|
115
|
-
});
|
|
116
|
-
if (mappedMessage?.[i + 1]?.role === 'model') {
|
|
117
|
-
continue;
|
|
118
|
-
}
|
|
119
|
-
if (mappedMessage?.[i + 1]?.role === 'user') {
|
|
120
|
-
result.push({
|
|
121
|
-
role: 'model',
|
|
122
|
-
parts: [{ text: 'Okay, what do I need to do?' }]
|
|
123
|
-
});
|
|
124
|
-
}
|
|
125
|
-
}
|
|
126
|
-
if (result[result.length - 1].role === 'model') {
|
|
127
|
-
result.push({
|
|
128
|
-
role: 'user',
|
|
129
|
-
parts: [
|
|
130
|
-
{
|
|
131
|
-
text: 'Continue what I said to you last message. Follow these instructions.'
|
|
132
|
-
}
|
|
133
|
-
]
|
|
134
|
-
});
|
|
135
|
-
}
|
|
136
|
-
if (model.includes('vision')) {
|
|
137
|
-
// format prompts
|
|
138
|
-
const textBuffer = [];
|
|
139
|
-
const last = result.pop();
|
|
140
|
-
for (let i = 0; i < result.length; i++) {
|
|
141
|
-
const message = result[i];
|
|
142
|
-
const text = message.parts[0].text;
|
|
143
|
-
textBuffer.push(`${message.role}: ${text}`);
|
|
144
|
-
}
|
|
145
|
-
const lastParts = last.parts;
|
|
146
|
-
let lastImagesParts = lastParts.filter((part) => part.inline_data?.mime_type ===
|
|
147
|
-
'image/jpeg');
|
|
148
|
-
if (lastImagesParts.length < 1) {
|
|
149
|
-
for (let i = result.length - 1; i >= 0; i--) {
|
|
150
|
-
const message = result[i];
|
|
151
|
-
const images = message.parts.filter((part) => part.inline_data?.mime_type ===
|
|
152
|
-
'image/jpeg');
|
|
153
|
-
if (images.length > 0) {
|
|
154
|
-
lastImagesParts = images;
|
|
155
|
-
break;
|
|
156
|
-
}
|
|
157
|
-
}
|
|
158
|
-
}
|
|
159
|
-
;
|
|
160
|
-
lastParts.filter((part) => part.text !== undefined &&
|
|
161
|
-
part.text !== null).forEach((part) => {
|
|
162
|
-
textBuffer.push(`${last.role}: ${part.text}`);
|
|
163
|
-
});
|
|
164
|
-
return [
|
|
165
|
-
{
|
|
166
|
-
role: 'user',
|
|
167
|
-
parts: [
|
|
168
|
-
{
|
|
169
|
-
text: textBuffer.join('\n')
|
|
170
|
-
},
|
|
171
|
-
...lastImagesParts
|
|
172
|
-
]
|
|
173
|
-
}
|
|
174
|
-
];
|
|
175
|
-
}
|
|
176
|
-
return result;
|
|
177
|
-
}
|
|
178
|
-
exports.langchainMessageToGeminiMessage = langchainMessageToGeminiMessage;
|
|
179
|
-
function partAsType(part) {
|
|
180
|
-
return part;
|
|
181
|
-
}
|
|
182
|
-
exports.partAsType = partAsType;
|
|
183
|
-
function formatToolsToGeminiAITools(tools) {
|
|
184
|
-
if (tools.length < 1) {
|
|
185
|
-
return undefined;
|
|
186
|
-
}
|
|
187
|
-
return tools.map(formatToolToGeminiAITool);
|
|
188
|
-
}
|
|
189
|
-
exports.formatToolsToGeminiAITools = formatToolsToGeminiAITools;
|
|
190
|
-
function formatToolToGeminiAITool(tool) {
|
|
191
|
-
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
|
192
|
-
const parameters = (0, zod_to_json_schema_1.zodToJsonSchema)(tool.schema);
|
|
193
|
-
// remove unsupported properties
|
|
194
|
-
delete parameters['$schema'];
|
|
195
|
-
delete parameters['additionalProperties'];
|
|
196
|
-
return {
|
|
197
|
-
name: tool.name,
|
|
198
|
-
description: tool.description,
|
|
199
|
-
// any?
|
|
200
|
-
parameters
|
|
201
|
-
};
|
|
202
|
-
}
|
|
203
|
-
exports.formatToolToGeminiAITool = formatToolToGeminiAITool;
|
|
204
|
-
function messageTypeToGeminiRole(type) {
|
|
205
|
-
switch (type) {
|
|
206
|
-
case 'system':
|
|
207
|
-
return 'system';
|
|
208
|
-
case 'ai':
|
|
209
|
-
return 'model';
|
|
210
|
-
case 'human':
|
|
211
|
-
return 'user';
|
|
212
|
-
case 'function':
|
|
213
|
-
return 'function';
|
|
214
|
-
default:
|
|
215
|
-
throw new Error(`Unknown message type: ${type}`);
|
|
216
|
-
}
|
|
217
|
-
}
|
|
218
|
-
exports.messageTypeToGeminiRole = messageTypeToGeminiRole;
|
|
219
|
-
function convertDeltaToMessageChunk(
|
|
220
|
-
// eslint-disable-next-line @typescript-eslint/no-explicit-any
|
|
221
|
-
delta, defaultRole) {
|
|
222
|
-
const role = delta.role ?? defaultRole;
|
|
223
|
-
const content = delta.content ?? '';
|
|
224
|
-
// eslint-disable-next-line @typescript-eslint/no-explicit-any, @typescript-eslint/naming-convention
|
|
225
|
-
let additional_kwargs;
|
|
226
|
-
if (delta.function_call) {
|
|
227
|
-
additional_kwargs = {
|
|
228
|
-
function_call: delta.function_call
|
|
229
|
-
};
|
|
230
|
-
}
|
|
231
|
-
else if (delta.tool_calls) {
|
|
232
|
-
additional_kwargs = {
|
|
233
|
-
tool_calls: delta.tool_calls
|
|
234
|
-
};
|
|
235
|
-
}
|
|
236
|
-
else {
|
|
237
|
-
additional_kwargs = {};
|
|
238
|
-
}
|
|
239
|
-
if (role === 'user') {
|
|
240
|
-
return new messages_1.HumanMessageChunk({ content });
|
|
241
|
-
}
|
|
242
|
-
else if (role === 'assistant') {
|
|
243
|
-
return new messages_1.AIMessageChunk({ content, additional_kwargs });
|
|
244
|
-
}
|
|
245
|
-
else if (role === 'system') {
|
|
246
|
-
return new messages_1.SystemMessageChunk({ content });
|
|
247
|
-
}
|
|
248
|
-
else {
|
|
249
|
-
return new messages_1.ChatMessageChunk({ content, role });
|
|
250
|
-
}
|
|
251
|
-
}
|
|
252
|
-
exports.convertDeltaToMessageChunk = convertDeltaToMessageChunk;
|