@chatluna/v1-shared-adapter 1.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md ADDED
@@ -0,0 +1,5 @@
1
+ ## @chatluna/v1-shared-adapter
2
+
3
+ ## [![npm](https://img.shields.io/npm/v/@chatluna/v1-shared-adapter)](https://www.npmjs.com/package/koishi-plugin-chatluna-openai-like) [![npm](https://img.shields.io/npm/dm/@chatluna/v1-shared-adapter)](https://www.npmjs.com/package//@chatluna/v1-shared-adapter)
4
+
5
+ > ChatLuna 通用 Adapter 代码共享
@@ -0,0 +1,4 @@
1
+ import { ModelInfo } from 'koishi-plugin-chatluna/llm-core/platform/types';
2
+ export declare function isEmbeddingModel(modelName: string): boolean;
3
+ export declare function isNonLLMModel(modelName: string): boolean;
4
+ export declare function getModelMaxContextSize(info: ModelInfo): number;
package/lib/index.cjs ADDED
@@ -0,0 +1,513 @@
1
+ var __defProp = Object.defineProperty;
2
+ var __getOwnPropDesc = Object.getOwnPropertyDescriptor;
3
+ var __getOwnPropNames = Object.getOwnPropertyNames;
4
+ var __hasOwnProp = Object.prototype.hasOwnProperty;
5
+ var __name = (target, value) => __defProp(target, "name", { value, configurable: true });
6
+ var __export = (target, all) => {
7
+ for (var name in all)
8
+ __defProp(target, name, { get: all[name], enumerable: true });
9
+ };
10
+ var __copyProps = (to, from, except, desc) => {
11
+ if (from && typeof from === "object" || typeof from === "function") {
12
+ for (let key of __getOwnPropNames(from))
13
+ if (!__hasOwnProp.call(to, key) && key !== except)
14
+ __defProp(to, key, { get: () => from[key], enumerable: !(desc = __getOwnPropDesc(from, key)) || desc.enumerable });
15
+ }
16
+ return to;
17
+ };
18
+ var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: true }), mod);
19
+
20
+ // src/index.ts
21
+ var index_exports = {};
22
+ __export(index_exports, {
23
+ buildChatCompletionParams: () => buildChatCompletionParams,
24
+ completionStream: () => completionStream,
25
+ convertDeltaToMessageChunk: () => convertDeltaToMessageChunk,
26
+ createEmbeddings: () => createEmbeddings,
27
+ createRequestContext: () => createRequestContext,
28
+ formatToolToOpenAITool: () => formatToolToOpenAITool,
29
+ formatToolsToOpenAITools: () => formatToolsToOpenAITools,
30
+ getModelMaxContextSize: () => getModelMaxContextSize,
31
+ getModels: () => getModels,
32
+ isEmbeddingModel: () => isEmbeddingModel,
33
+ isNonLLMModel: () => isNonLLMModel,
34
+ langchainMessageToOpenAIMessage: () => langchainMessageToOpenAIMessage,
35
+ messageTypeToOpenAIRole: () => messageTypeToOpenAIRole,
36
+ processReasoningContent: () => processReasoningContent,
37
+ processStreamResponse: () => processStreamResponse
38
+ });
39
+ module.exports = __toCommonJS(index_exports);
40
+
41
+ // src/client.ts
42
+ var import_count_tokens = require("koishi-plugin-chatluna/llm-core/utils/count_tokens");
43
+ function isEmbeddingModel(modelName) {
44
+ return modelName.includes("embed") || modelName.includes("bge") || modelName.includes("instructor-large") || modelName.includes("m3e");
45
+ }
46
+ __name(isEmbeddingModel, "isEmbeddingModel");
47
+ function isNonLLMModel(modelName) {
48
+ return ["whisper", "tts", "dall-e", "image", "rerank"].some(
49
+ (keyword) => modelName.includes(keyword)
50
+ );
51
+ }
52
+ __name(isNonLLMModel, "isNonLLMModel");
53
+ function getModelMaxContextSize(info) {
54
+ const maxTokens = info.maxTokens;
55
+ if (maxTokens != null) {
56
+ return maxTokens;
57
+ }
58
+ const modelName = info.name;
59
+ if (modelName.startsWith("gpt") || modelName.startsWith("o1") || modelName.startsWith("o3") || modelName.startsWith("o4")) {
60
+ return (0, import_count_tokens.getModelContextSize)(modelName);
61
+ }
62
+ const modelMaxContextSizeTable = {
63
+ claude: 2e6,
64
+ "gemini-1.5-pro": 1048576,
65
+ "gemini-1.5-flash": 2097152,
66
+ "gemini-1.0-pro": 30720,
67
+ "gemini-2.0-flash": 1048576,
68
+ "gemini-2.0-pro": 2097152,
69
+ "gemini-2.5-pro": 2097152,
70
+ "gemini-2.0": 2097152,
71
+ deepseek: 128e3,
72
+ "llama3.1": 128e3,
73
+ "command-r-plus": 128e3,
74
+ "moonshot-v1-8k": 8192,
75
+ "moonshot-v1-32k": 32e3,
76
+ "moonshot-v1-128k": 128e3,
77
+ qwen2: 32e3,
78
+ "qwen2.5": 128e3,
79
+ qwen3: 128e3
80
+ };
81
+ for (const key in modelMaxContextSizeTable) {
82
+ if (modelName.toLowerCase().includes(key)) {
83
+ return modelMaxContextSizeTable[key];
84
+ }
85
+ }
86
+ return (0, import_count_tokens.getModelContextSize)("o1-mini");
87
+ }
88
+ __name(getModelMaxContextSize, "getModelMaxContextSize");
89
+
90
+ // src/requester.ts
91
+ var import_outputs = require("@langchain/core/outputs");
92
+ var import_error = require("koishi-plugin-chatluna/utils/error");
93
+ var import_sse = require("koishi-plugin-chatluna/utils/sse");
94
+
95
+ // src/utils.ts
96
+ var import_messages = require("@langchain/core/messages");
97
+ var import_zod_to_json_schema = require("zod-to-json-schema");
98
+ function langchainMessageToOpenAIMessage(messages, model, supportImageInput, removeSystemMessage) {
99
+ const result = [];
100
+ for (const rawMessage of messages) {
101
+ const role = messageTypeToOpenAIRole(rawMessage.getType());
102
+ const msg = {
103
+ content: rawMessage.content || null,
104
+ name: role === "assistant" || role === "tool" ? rawMessage.name : void 0,
105
+ role,
106
+ // function_call: rawMessage.additional_kwargs.function_call,
107
+ tool_calls: rawMessage.additional_kwargs.tool_calls,
108
+ tool_call_id: rawMessage.tool_call_id
109
+ };
110
+ if (msg.tool_calls == null) {
111
+ delete msg.tool_calls;
112
+ }
113
+ if (msg.tool_call_id == null) {
114
+ delete msg.tool_call_id;
115
+ }
116
+ if (msg.tool_calls) {
117
+ for (const toolCall of msg.tool_calls) {
118
+ const tool = toolCall.function;
119
+ if (!tool.arguments) {
120
+ continue;
121
+ }
122
+ tool.arguments = JSON.stringify(JSON.parse(tool.arguments));
123
+ }
124
+ }
125
+ const images = rawMessage.additional_kwargs.images;
126
+ const lowerModel = model?.toLowerCase() ?? "";
127
+ if ((lowerModel?.includes("vision") || lowerModel?.includes("gpt-4o") || lowerModel?.includes("claude") || lowerModel?.includes("gemini") || lowerModel?.includes("qwen-vl") || lowerModel?.includes("omni") || lowerModel?.includes("qwen2.5-vl") || lowerModel?.includes("qwen2.5-omni") || lowerModel?.includes("qwen-omni") || lowerModel?.includes("qwen2-vl") || lowerModel?.includes("qvq") || supportImageInput) && images != null) {
128
+ msg.content = [
129
+ {
130
+ type: "text",
131
+ text: rawMessage.content
132
+ }
133
+ ];
134
+ for (const image of images) {
135
+ msg.content.push({
136
+ type: "image_url",
137
+ image_url: {
138
+ url: image,
139
+ detail: "low"
140
+ }
141
+ });
142
+ }
143
+ }
144
+ result.push(msg);
145
+ }
146
+ if (removeSystemMessage) {
147
+ const mappedMessage = [];
148
+ for (let i = 0; i < mappedMessage.length; i++) {
149
+ const message = mappedMessage[i];
150
+ if (message.role !== "system") {
151
+ mappedMessage.push(message);
152
+ continue;
153
+ }
154
+ if (removeSystemMessage) {
155
+ continue;
156
+ }
157
+ mappedMessage.push({
158
+ role: "user",
159
+ content: message.content
160
+ });
161
+ mappedMessage.push({
162
+ role: "assistant",
163
+ content: "Okay, what do I need to do?"
164
+ });
165
+ if (mappedMessage?.[i + 1]?.role === "assistant") {
166
+ mappedMessage.push({
167
+ role: "user",
168
+ content: "Continue what I said to you last message. Follow these instructions."
169
+ });
170
+ }
171
+ }
172
+ if (mappedMessage[mappedMessage.length - 1].role === "assistant") {
173
+ mappedMessage.push({
174
+ role: "user",
175
+ content: "Continue what I said to you last message. Follow these instructions."
176
+ });
177
+ }
178
+ if (mappedMessage[0].role === "assistant") {
179
+ mappedMessage.unshift({
180
+ role: "user",
181
+ content: "Continue what I said to you last time. Follow these instructions."
182
+ });
183
+ }
184
+ return mappedMessage;
185
+ }
186
+ return result;
187
+ }
188
+ __name(langchainMessageToOpenAIMessage, "langchainMessageToOpenAIMessage");
189
+ function messageTypeToOpenAIRole(type) {
190
+ switch (type) {
191
+ case "system":
192
+ return "system";
193
+ case "ai":
194
+ return "assistant";
195
+ case "human":
196
+ return "user";
197
+ case "function":
198
+ return "function";
199
+ case "tool":
200
+ return "tool";
201
+ default:
202
+ throw new Error(`Unknown message type: ${type}`);
203
+ }
204
+ }
205
+ __name(messageTypeToOpenAIRole, "messageTypeToOpenAIRole");
206
+ function formatToolsToOpenAITools(tools, includeGoogleSearch) {
207
+ const result = tools.map(formatToolToOpenAITool);
208
+ if (includeGoogleSearch) {
209
+ result.push({
210
+ type: "function",
211
+ function: {
212
+ name: "googleSearch"
213
+ }
214
+ });
215
+ }
216
+ if (result.length < 1) {
217
+ return void 0;
218
+ }
219
+ return result;
220
+ }
221
+ __name(formatToolsToOpenAITools, "formatToolsToOpenAITools");
222
+ function formatToolToOpenAITool(tool) {
223
+ const parameters = removeAdditionalProperties(
224
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
225
+ (0, import_zod_to_json_schema.zodToJsonSchema)(tool.schema, {
226
+ allowedAdditionalProperties: void 0
227
+ })
228
+ );
229
+ return {
230
+ type: "function",
231
+ function: {
232
+ name: tool.name,
233
+ description: tool.description,
234
+ // any?
235
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
236
+ parameters
237
+ }
238
+ };
239
+ }
240
+ __name(formatToolToOpenAITool, "formatToolToOpenAITool");
241
+ function removeAdditionalProperties(schema) {
242
+ if (!schema || typeof schema !== "object") return schema;
243
+ const stack = [[schema, null]];
244
+ while (stack.length > 0) {
245
+ const [current] = stack.pop();
246
+ if (typeof current !== "object" || current === null) continue;
247
+ if (Object.hasOwn(current, "additionalProperties")) {
248
+ delete current["additionalProperties"];
249
+ }
250
+ if (Object.hasOwn(current, "$schema")) {
251
+ delete current["$schema"];
252
+ }
253
+ for (const key of Object.keys(current)) {
254
+ const value = current[key];
255
+ if (value && typeof value === "object") {
256
+ stack.push([value, key]);
257
+ }
258
+ }
259
+ }
260
+ return schema;
261
+ }
262
+ __name(removeAdditionalProperties, "removeAdditionalProperties");
263
+ function convertDeltaToMessageChunk(delta, defaultRole) {
264
+ const role = ((delta.role?.length ?? 0) > 0 ? delta.role : defaultRole).toLowerCase();
265
+ const content = delta.content ?? "";
266
+ const reasoningContent = delta.reasoning_content ?? "";
267
+ let additionalKwargs;
268
+ if (delta.function_call) {
269
+ additionalKwargs = {
270
+ function_call: delta.function_call
271
+ };
272
+ } else if (delta.tool_calls) {
273
+ additionalKwargs = {
274
+ tool_calls: delta.tool_calls
275
+ };
276
+ } else {
277
+ additionalKwargs = {};
278
+ }
279
+ if (reasoningContent.length > 0) {
280
+ additionalKwargs.reasoning_content = reasoningContent;
281
+ }
282
+ if (role === "user") {
283
+ return new import_messages.HumanMessageChunk({ content });
284
+ } else if (role === "assistant") {
285
+ const toolCallChunks = [];
286
+ if (Array.isArray(delta.tool_calls)) {
287
+ for (const rawToolCall of delta.tool_calls) {
288
+ toolCallChunks.push({
289
+ name: rawToolCall.function?.name,
290
+ args: rawToolCall.function?.arguments,
291
+ id: rawToolCall.id,
292
+ index: rawToolCall.index
293
+ });
294
+ }
295
+ }
296
+ return new import_messages.AIMessageChunk({
297
+ content,
298
+ tool_call_chunks: toolCallChunks,
299
+ additional_kwargs: additionalKwargs
300
+ });
301
+ } else if (role === "system") {
302
+ return new import_messages.SystemMessageChunk({ content });
303
+ } else if (role === "function") {
304
+ return new import_messages.FunctionMessageChunk({
305
+ content,
306
+ additional_kwargs: additionalKwargs,
307
+ name: delta.name
308
+ });
309
+ } else if (role === "tool") {
310
+ return new import_messages.ToolMessageChunk({
311
+ content,
312
+ additional_kwargs: additionalKwargs,
313
+ tool_call_id: delta.tool_call_id
314
+ });
315
+ } else {
316
+ return new import_messages.ChatMessageChunk({ content, role });
317
+ }
318
+ }
319
+ __name(convertDeltaToMessageChunk, "convertDeltaToMessageChunk");
320
+
321
+ // src/requester.ts
322
+ var import_messages2 = require("@langchain/core/messages");
323
+ function buildChatCompletionParams(params, enableGoogleSearch, supportImageInput) {
324
+ const base = {
325
+ model: params.model,
326
+ messages: langchainMessageToOpenAIMessage(
327
+ params.input,
328
+ params.model,
329
+ supportImageInput
330
+ ),
331
+ tools: enableGoogleSearch || params.tools != null ? formatToolsToOpenAITools(
332
+ params.tools ?? [],
333
+ enableGoogleSearch
334
+ ) : void 0,
335
+ stop: params.stop || void 0,
336
+ max_tokens: params.model.includes("vision") ? void 0 : params.maxTokens,
337
+ temperature: params.temperature === 0 ? void 0 : params.temperature,
338
+ presence_penalty: params.presencePenalty === 0 ? void 0 : params.presencePenalty,
339
+ frequency_penalty: params.frequencyPenalty === 0 ? void 0 : params.frequencyPenalty,
340
+ n: params.n,
341
+ top_p: params.topP,
342
+ prompt_cache_key: params.id,
343
+ stream: true,
344
+ logit_bias: params.logitBias,
345
+ stream_options: {
346
+ include_usage: true
347
+ }
348
+ };
349
+ if (params.model.includes("o1") || params.model.includes("o3") || params.model.includes("o4") || params.model.includes("gpt-5")) {
350
+ delete base.temperature;
351
+ delete base.presence_penalty;
352
+ delete base.frequency_penalty;
353
+ delete base.n;
354
+ delete base.top_p;
355
+ }
356
+ return base;
357
+ }
358
+ __name(buildChatCompletionParams, "buildChatCompletionParams");
359
+ function processReasoningContent(delta, reasoningState) {
360
+ if (delta.reasoning_content) {
361
+ reasoningState.content += delta.reasoning_content;
362
+ if (reasoningState.time === 0) {
363
+ reasoningState.time = Date.now();
364
+ }
365
+ }
366
+ if ((delta.reasoning_content == null || delta.reasoning_content === "") && delta.content && delta.content.length > 0 && reasoningState.time > 0 && !reasoningState.isSet) {
367
+ const reasoningTime = Date.now() - reasoningState.time;
368
+ reasoningState.time = reasoningTime;
369
+ reasoningState.isSet = true;
370
+ return reasoningTime;
371
+ }
372
+ }
373
+ __name(processReasoningContent, "processReasoningContent");
374
+ async function* processStreamResponse(requestContext, iterator) {
375
+ let defaultRole = "assistant";
376
+ let errorCount = 0;
377
+ const reasoningState = { content: "", time: 0, isSet: false };
378
+ for await (const event of iterator) {
379
+ const chunk = event.data;
380
+ if (chunk === "[DONE]") break;
381
+ if (chunk === "" || chunk == null || chunk === "undefined") continue;
382
+ try {
383
+ const data = JSON.parse(chunk);
384
+ if (data.error) {
385
+ throw new import_error.ChatLunaError(
386
+ import_error.ChatLunaErrorCode.API_REQUEST_FAILED,
387
+ new Error("Error when calling completion, Result: " + chunk)
388
+ );
389
+ }
390
+ if (data.usage) {
391
+ yield new import_outputs.ChatGenerationChunk({
392
+ message: new import_messages2.AIMessageChunk(""),
393
+ text: "",
394
+ generationInfo: {
395
+ tokenUsage: data.usage
396
+ }
397
+ });
398
+ continue;
399
+ }
400
+ const choice = data.choices?.[0];
401
+ if (!choice) continue;
402
+ const { delta } = choice;
403
+ const messageChunk = convertDeltaToMessageChunk(delta, defaultRole);
404
+ const reasoningTime = processReasoningContent(delta, reasoningState);
405
+ if (reasoningTime !== void 0) {
406
+ messageChunk.additional_kwargs.reasoning_time = reasoningTime;
407
+ }
408
+ defaultRole = (delta.role?.length ?? 0) > 0 ? delta.role : defaultRole;
409
+ yield new import_outputs.ChatGenerationChunk({
410
+ message: messageChunk,
411
+ text: messageChunk.content
412
+ });
413
+ } catch (e) {
414
+ if (errorCount > 5) {
415
+ requestContext.modelRequester.logger.error(
416
+ "error with chunk",
417
+ chunk
418
+ );
419
+ throw new import_error.ChatLunaError(import_error.ChatLunaErrorCode.API_REQUEST_FAILED, e);
420
+ }
421
+ errorCount++;
422
+ }
423
+ }
424
+ if (reasoningState.content.length > 0) {
425
+ requestContext.modelRequester.logger.debug(
426
+ `reasoning content: ${reasoningState.content}. Use time: ${reasoningState.time / 1e3}s`
427
+ );
428
+ }
429
+ }
430
+ __name(processStreamResponse, "processStreamResponse");
431
+ async function* completionStream(requestContext, params, completionUrl = "chat/completions", enableGoogleSearch, supportImageInput) {
432
+ const { modelRequester } = requestContext;
433
+ try {
434
+ const response = await modelRequester.post(
435
+ completionUrl,
436
+ buildChatCompletionParams(
437
+ params,
438
+ enableGoogleSearch ?? false,
439
+ supportImageInput ?? true
440
+ ),
441
+ {
442
+ signal: params.signal
443
+ }
444
+ );
445
+ const iterator = (0, import_sse.sseIterable)(response);
446
+ yield* processStreamResponse(requestContext, iterator);
447
+ } catch (e) {
448
+ if (e instanceof import_error.ChatLunaError) {
449
+ throw e;
450
+ } else {
451
+ throw new import_error.ChatLunaError(import_error.ChatLunaErrorCode.API_REQUEST_FAILED, e);
452
+ }
453
+ }
454
+ }
455
+ __name(completionStream, "completionStream");
456
+ async function createEmbeddings(requestContext, params, embeddingUrl = "embeddings") {
457
+ const { modelRequester } = requestContext;
458
+ let data;
459
+ try {
460
+ const response = await modelRequester.post(embeddingUrl, {
461
+ input: params.input,
462
+ model: params.model
463
+ });
464
+ data = await response.text();
465
+ data = JSON.parse(data);
466
+ if (data.data && data.data.length > 0) {
467
+ return data.data.map((item) => item.embedding);
468
+ }
469
+ throw new Error(`Call Embedding Error: ${JSON.stringify(data)}`);
470
+ } catch (e) {
471
+ requestContext.modelRequester.logger.debug(e);
472
+ throw new import_error.ChatLunaError(import_error.ChatLunaErrorCode.API_REQUEST_FAILED, e);
473
+ }
474
+ }
475
+ __name(createEmbeddings, "createEmbeddings");
476
+ async function getModels(requestContext) {
477
+ const { modelRequester } = requestContext;
478
+ let data;
479
+ try {
480
+ const response = await modelRequester.get("models");
481
+ data = await response.text();
482
+ data = JSON.parse(data);
483
+ return data.data.map((model) => model.id);
484
+ } catch (e) {
485
+ requestContext.modelRequester.logger.error(e);
486
+ throw new Error(
487
+ "error when listing openai models, Result: " + JSON.stringify(data)
488
+ );
489
+ }
490
+ }
491
+ __name(getModels, "getModels");
492
+ function createRequestContext(ctx, config, pluginConfig, plugin, modelRequester) {
493
+ return { ctx, config, pluginConfig, plugin, modelRequester };
494
+ }
495
+ __name(createRequestContext, "createRequestContext");
496
+ // Annotate the CommonJS export names for ESM import in node:
497
+ 0 && (module.exports = {
498
+ buildChatCompletionParams,
499
+ completionStream,
500
+ convertDeltaToMessageChunk,
501
+ createEmbeddings,
502
+ createRequestContext,
503
+ formatToolToOpenAITool,
504
+ formatToolsToOpenAITools,
505
+ getModelMaxContextSize,
506
+ getModels,
507
+ isEmbeddingModel,
508
+ isNonLLMModel,
509
+ langchainMessageToOpenAIMessage,
510
+ messageTypeToOpenAIRole,
511
+ processReasoningContent,
512
+ processStreamResponse
513
+ });
package/lib/index.d.ts ADDED
@@ -0,0 +1,4 @@
1
+ export * from './client';
2
+ export * from './requester';
3
+ export * from './utils';
4
+ export * from './types';
package/lib/index.mjs ADDED
@@ -0,0 +1,485 @@
1
+ var __defProp = Object.defineProperty;
2
+ var __name = (target, value) => __defProp(target, "name", { value, configurable: true });
3
+
4
+ // src/client.ts
5
+ import { getModelContextSize } from "koishi-plugin-chatluna/llm-core/utils/count_tokens";
6
+ function isEmbeddingModel(modelName) {
7
+ return modelName.includes("embed") || modelName.includes("bge") || modelName.includes("instructor-large") || modelName.includes("m3e");
8
+ }
9
+ __name(isEmbeddingModel, "isEmbeddingModel");
10
+ function isNonLLMModel(modelName) {
11
+ return ["whisper", "tts", "dall-e", "image", "rerank"].some(
12
+ (keyword) => modelName.includes(keyword)
13
+ );
14
+ }
15
+ __name(isNonLLMModel, "isNonLLMModel");
16
+ function getModelMaxContextSize(info) {
17
+ const maxTokens = info.maxTokens;
18
+ if (maxTokens != null) {
19
+ return maxTokens;
20
+ }
21
+ const modelName = info.name;
22
+ if (modelName.startsWith("gpt") || modelName.startsWith("o1") || modelName.startsWith("o3") || modelName.startsWith("o4")) {
23
+ return getModelContextSize(modelName);
24
+ }
25
+ const modelMaxContextSizeTable = {
26
+ claude: 2e6,
27
+ "gemini-1.5-pro": 1048576,
28
+ "gemini-1.5-flash": 2097152,
29
+ "gemini-1.0-pro": 30720,
30
+ "gemini-2.0-flash": 1048576,
31
+ "gemini-2.0-pro": 2097152,
32
+ "gemini-2.5-pro": 2097152,
33
+ "gemini-2.0": 2097152,
34
+ deepseek: 128e3,
35
+ "llama3.1": 128e3,
36
+ "command-r-plus": 128e3,
37
+ "moonshot-v1-8k": 8192,
38
+ "moonshot-v1-32k": 32e3,
39
+ "moonshot-v1-128k": 128e3,
40
+ qwen2: 32e3,
41
+ "qwen2.5": 128e3,
42
+ qwen3: 128e3
43
+ };
44
+ for (const key in modelMaxContextSizeTable) {
45
+ if (modelName.toLowerCase().includes(key)) {
46
+ return modelMaxContextSizeTable[key];
47
+ }
48
+ }
49
+ return getModelContextSize("o1-mini");
50
+ }
51
+ __name(getModelMaxContextSize, "getModelMaxContextSize");
52
+
53
+ // src/requester.ts
54
+ import { ChatGenerationChunk } from "@langchain/core/outputs";
55
+ import {
56
+ ChatLunaError,
57
+ ChatLunaErrorCode
58
+ } from "koishi-plugin-chatluna/utils/error";
59
+ import { sseIterable } from "koishi-plugin-chatluna/utils/sse";
60
+
61
+ // src/utils.ts
62
+ import {
63
+ AIMessageChunk,
64
+ ChatMessageChunk,
65
+ FunctionMessageChunk,
66
+ HumanMessageChunk,
67
+ SystemMessageChunk,
68
+ ToolMessageChunk
69
+ } from "@langchain/core/messages";
70
+ import { zodToJsonSchema } from "zod-to-json-schema";
71
+ function langchainMessageToOpenAIMessage(messages, model, supportImageInput, removeSystemMessage) {
72
+ const result = [];
73
+ for (const rawMessage of messages) {
74
+ const role = messageTypeToOpenAIRole(rawMessage.getType());
75
+ const msg = {
76
+ content: rawMessage.content || null,
77
+ name: role === "assistant" || role === "tool" ? rawMessage.name : void 0,
78
+ role,
79
+ // function_call: rawMessage.additional_kwargs.function_call,
80
+ tool_calls: rawMessage.additional_kwargs.tool_calls,
81
+ tool_call_id: rawMessage.tool_call_id
82
+ };
83
+ if (msg.tool_calls == null) {
84
+ delete msg.tool_calls;
85
+ }
86
+ if (msg.tool_call_id == null) {
87
+ delete msg.tool_call_id;
88
+ }
89
+ if (msg.tool_calls) {
90
+ for (const toolCall of msg.tool_calls) {
91
+ const tool = toolCall.function;
92
+ if (!tool.arguments) {
93
+ continue;
94
+ }
95
+ tool.arguments = JSON.stringify(JSON.parse(tool.arguments));
96
+ }
97
+ }
98
+ const images = rawMessage.additional_kwargs.images;
99
+ const lowerModel = model?.toLowerCase() ?? "";
100
+ if ((lowerModel?.includes("vision") || lowerModel?.includes("gpt-4o") || lowerModel?.includes("claude") || lowerModel?.includes("gemini") || lowerModel?.includes("qwen-vl") || lowerModel?.includes("omni") || lowerModel?.includes("qwen2.5-vl") || lowerModel?.includes("qwen2.5-omni") || lowerModel?.includes("qwen-omni") || lowerModel?.includes("qwen2-vl") || lowerModel?.includes("qvq") || supportImageInput) && images != null) {
101
+ msg.content = [
102
+ {
103
+ type: "text",
104
+ text: rawMessage.content
105
+ }
106
+ ];
107
+ for (const image of images) {
108
+ msg.content.push({
109
+ type: "image_url",
110
+ image_url: {
111
+ url: image,
112
+ detail: "low"
113
+ }
114
+ });
115
+ }
116
+ }
117
+ result.push(msg);
118
+ }
119
+ if (removeSystemMessage) {
120
+ const mappedMessage = [];
121
+ for (let i = 0; i < mappedMessage.length; i++) {
122
+ const message = mappedMessage[i];
123
+ if (message.role !== "system") {
124
+ mappedMessage.push(message);
125
+ continue;
126
+ }
127
+ if (removeSystemMessage) {
128
+ continue;
129
+ }
130
+ mappedMessage.push({
131
+ role: "user",
132
+ content: message.content
133
+ });
134
+ mappedMessage.push({
135
+ role: "assistant",
136
+ content: "Okay, what do I need to do?"
137
+ });
138
+ if (mappedMessage?.[i + 1]?.role === "assistant") {
139
+ mappedMessage.push({
140
+ role: "user",
141
+ content: "Continue what I said to you last message. Follow these instructions."
142
+ });
143
+ }
144
+ }
145
+ if (mappedMessage[mappedMessage.length - 1].role === "assistant") {
146
+ mappedMessage.push({
147
+ role: "user",
148
+ content: "Continue what I said to you last message. Follow these instructions."
149
+ });
150
+ }
151
+ if (mappedMessage[0].role === "assistant") {
152
+ mappedMessage.unshift({
153
+ role: "user",
154
+ content: "Continue what I said to you last time. Follow these instructions."
155
+ });
156
+ }
157
+ return mappedMessage;
158
+ }
159
+ return result;
160
+ }
161
+ __name(langchainMessageToOpenAIMessage, "langchainMessageToOpenAIMessage");
162
+ function messageTypeToOpenAIRole(type) {
163
+ switch (type) {
164
+ case "system":
165
+ return "system";
166
+ case "ai":
167
+ return "assistant";
168
+ case "human":
169
+ return "user";
170
+ case "function":
171
+ return "function";
172
+ case "tool":
173
+ return "tool";
174
+ default:
175
+ throw new Error(`Unknown message type: ${type}`);
176
+ }
177
+ }
178
+ __name(messageTypeToOpenAIRole, "messageTypeToOpenAIRole");
179
+ function formatToolsToOpenAITools(tools, includeGoogleSearch) {
180
+ const result = tools.map(formatToolToOpenAITool);
181
+ if (includeGoogleSearch) {
182
+ result.push({
183
+ type: "function",
184
+ function: {
185
+ name: "googleSearch"
186
+ }
187
+ });
188
+ }
189
+ if (result.length < 1) {
190
+ return void 0;
191
+ }
192
+ return result;
193
+ }
194
+ __name(formatToolsToOpenAITools, "formatToolsToOpenAITools");
195
+ function formatToolToOpenAITool(tool) {
196
+ const parameters = removeAdditionalProperties(
197
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
198
+ zodToJsonSchema(tool.schema, {
199
+ allowedAdditionalProperties: void 0
200
+ })
201
+ );
202
+ return {
203
+ type: "function",
204
+ function: {
205
+ name: tool.name,
206
+ description: tool.description,
207
+ // any?
208
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
209
+ parameters
210
+ }
211
+ };
212
+ }
213
+ __name(formatToolToOpenAITool, "formatToolToOpenAITool");
214
+ function removeAdditionalProperties(schema) {
215
+ if (!schema || typeof schema !== "object") return schema;
216
+ const stack = [[schema, null]];
217
+ while (stack.length > 0) {
218
+ const [current] = stack.pop();
219
+ if (typeof current !== "object" || current === null) continue;
220
+ if (Object.hasOwn(current, "additionalProperties")) {
221
+ delete current["additionalProperties"];
222
+ }
223
+ if (Object.hasOwn(current, "$schema")) {
224
+ delete current["$schema"];
225
+ }
226
+ for (const key of Object.keys(current)) {
227
+ const value = current[key];
228
+ if (value && typeof value === "object") {
229
+ stack.push([value, key]);
230
+ }
231
+ }
232
+ }
233
+ return schema;
234
+ }
235
+ __name(removeAdditionalProperties, "removeAdditionalProperties");
236
+ function convertDeltaToMessageChunk(delta, defaultRole) {
237
+ const role = ((delta.role?.length ?? 0) > 0 ? delta.role : defaultRole).toLowerCase();
238
+ const content = delta.content ?? "";
239
+ const reasoningContent = delta.reasoning_content ?? "";
240
+ let additionalKwargs;
241
+ if (delta.function_call) {
242
+ additionalKwargs = {
243
+ function_call: delta.function_call
244
+ };
245
+ } else if (delta.tool_calls) {
246
+ additionalKwargs = {
247
+ tool_calls: delta.tool_calls
248
+ };
249
+ } else {
250
+ additionalKwargs = {};
251
+ }
252
+ if (reasoningContent.length > 0) {
253
+ additionalKwargs.reasoning_content = reasoningContent;
254
+ }
255
+ if (role === "user") {
256
+ return new HumanMessageChunk({ content });
257
+ } else if (role === "assistant") {
258
+ const toolCallChunks = [];
259
+ if (Array.isArray(delta.tool_calls)) {
260
+ for (const rawToolCall of delta.tool_calls) {
261
+ toolCallChunks.push({
262
+ name: rawToolCall.function?.name,
263
+ args: rawToolCall.function?.arguments,
264
+ id: rawToolCall.id,
265
+ index: rawToolCall.index
266
+ });
267
+ }
268
+ }
269
+ return new AIMessageChunk({
270
+ content,
271
+ tool_call_chunks: toolCallChunks,
272
+ additional_kwargs: additionalKwargs
273
+ });
274
+ } else if (role === "system") {
275
+ return new SystemMessageChunk({ content });
276
+ } else if (role === "function") {
277
+ return new FunctionMessageChunk({
278
+ content,
279
+ additional_kwargs: additionalKwargs,
280
+ name: delta.name
281
+ });
282
+ } else if (role === "tool") {
283
+ return new ToolMessageChunk({
284
+ content,
285
+ additional_kwargs: additionalKwargs,
286
+ tool_call_id: delta.tool_call_id
287
+ });
288
+ } else {
289
+ return new ChatMessageChunk({ content, role });
290
+ }
291
+ }
292
+ __name(convertDeltaToMessageChunk, "convertDeltaToMessageChunk");
293
+
294
+ // src/requester.ts
295
+ import { AIMessageChunk as AIMessageChunk2 } from "@langchain/core/messages";
296
+ function buildChatCompletionParams(params, enableGoogleSearch, supportImageInput) {
297
+ const base = {
298
+ model: params.model,
299
+ messages: langchainMessageToOpenAIMessage(
300
+ params.input,
301
+ params.model,
302
+ supportImageInput
303
+ ),
304
+ tools: enableGoogleSearch || params.tools != null ? formatToolsToOpenAITools(
305
+ params.tools ?? [],
306
+ enableGoogleSearch
307
+ ) : void 0,
308
+ stop: params.stop || void 0,
309
+ max_tokens: params.model.includes("vision") ? void 0 : params.maxTokens,
310
+ temperature: params.temperature === 0 ? void 0 : params.temperature,
311
+ presence_penalty: params.presencePenalty === 0 ? void 0 : params.presencePenalty,
312
+ frequency_penalty: params.frequencyPenalty === 0 ? void 0 : params.frequencyPenalty,
313
+ n: params.n,
314
+ top_p: params.topP,
315
+ prompt_cache_key: params.id,
316
+ stream: true,
317
+ logit_bias: params.logitBias,
318
+ stream_options: {
319
+ include_usage: true
320
+ }
321
+ };
322
+ if (params.model.includes("o1") || params.model.includes("o3") || params.model.includes("o4") || params.model.includes("gpt-5")) {
323
+ delete base.temperature;
324
+ delete base.presence_penalty;
325
+ delete base.frequency_penalty;
326
+ delete base.n;
327
+ delete base.top_p;
328
+ }
329
+ return base;
330
+ }
331
+ __name(buildChatCompletionParams, "buildChatCompletionParams");
332
+ function processReasoningContent(delta, reasoningState) {
333
+ if (delta.reasoning_content) {
334
+ reasoningState.content += delta.reasoning_content;
335
+ if (reasoningState.time === 0) {
336
+ reasoningState.time = Date.now();
337
+ }
338
+ }
339
+ if ((delta.reasoning_content == null || delta.reasoning_content === "") && delta.content && delta.content.length > 0 && reasoningState.time > 0 && !reasoningState.isSet) {
340
+ const reasoningTime = Date.now() - reasoningState.time;
341
+ reasoningState.time = reasoningTime;
342
+ reasoningState.isSet = true;
343
+ return reasoningTime;
344
+ }
345
+ }
346
+ __name(processReasoningContent, "processReasoningContent");
347
+ async function* processStreamResponse(requestContext, iterator) {
348
+ let defaultRole = "assistant";
349
+ let errorCount = 0;
350
+ const reasoningState = { content: "", time: 0, isSet: false };
351
+ for await (const event of iterator) {
352
+ const chunk = event.data;
353
+ if (chunk === "[DONE]") break;
354
+ if (chunk === "" || chunk == null || chunk === "undefined") continue;
355
+ try {
356
+ const data = JSON.parse(chunk);
357
+ if (data.error) {
358
+ throw new ChatLunaError(
359
+ ChatLunaErrorCode.API_REQUEST_FAILED,
360
+ new Error("Error when calling completion, Result: " + chunk)
361
+ );
362
+ }
363
+ if (data.usage) {
364
+ yield new ChatGenerationChunk({
365
+ message: new AIMessageChunk2(""),
366
+ text: "",
367
+ generationInfo: {
368
+ tokenUsage: data.usage
369
+ }
370
+ });
371
+ continue;
372
+ }
373
+ const choice = data.choices?.[0];
374
+ if (!choice) continue;
375
+ const { delta } = choice;
376
+ const messageChunk = convertDeltaToMessageChunk(delta, defaultRole);
377
+ const reasoningTime = processReasoningContent(delta, reasoningState);
378
+ if (reasoningTime !== void 0) {
379
+ messageChunk.additional_kwargs.reasoning_time = reasoningTime;
380
+ }
381
+ defaultRole = (delta.role?.length ?? 0) > 0 ? delta.role : defaultRole;
382
+ yield new ChatGenerationChunk({
383
+ message: messageChunk,
384
+ text: messageChunk.content
385
+ });
386
+ } catch (e) {
387
+ if (errorCount > 5) {
388
+ requestContext.modelRequester.logger.error(
389
+ "error with chunk",
390
+ chunk
391
+ );
392
+ throw new ChatLunaError(ChatLunaErrorCode.API_REQUEST_FAILED, e);
393
+ }
394
+ errorCount++;
395
+ }
396
+ }
397
+ if (reasoningState.content.length > 0) {
398
+ requestContext.modelRequester.logger.debug(
399
+ `reasoning content: ${reasoningState.content}. Use time: ${reasoningState.time / 1e3}s`
400
+ );
401
+ }
402
+ }
403
+ __name(processStreamResponse, "processStreamResponse");
404
+ async function* completionStream(requestContext, params, completionUrl = "chat/completions", enableGoogleSearch, supportImageInput) {
405
+ const { modelRequester } = requestContext;
406
+ try {
407
+ const response = await modelRequester.post(
408
+ completionUrl,
409
+ buildChatCompletionParams(
410
+ params,
411
+ enableGoogleSearch ?? false,
412
+ supportImageInput ?? true
413
+ ),
414
+ {
415
+ signal: params.signal
416
+ }
417
+ );
418
+ const iterator = sseIterable(response);
419
+ yield* processStreamResponse(requestContext, iterator);
420
+ } catch (e) {
421
+ if (e instanceof ChatLunaError) {
422
+ throw e;
423
+ } else {
424
+ throw new ChatLunaError(ChatLunaErrorCode.API_REQUEST_FAILED, e);
425
+ }
426
+ }
427
+ }
428
+ __name(completionStream, "completionStream");
429
+ async function createEmbeddings(requestContext, params, embeddingUrl = "embeddings") {
430
+ const { modelRequester } = requestContext;
431
+ let data;
432
+ try {
433
+ const response = await modelRequester.post(embeddingUrl, {
434
+ input: params.input,
435
+ model: params.model
436
+ });
437
+ data = await response.text();
438
+ data = JSON.parse(data);
439
+ if (data.data && data.data.length > 0) {
440
+ return data.data.map((item) => item.embedding);
441
+ }
442
+ throw new Error(`Call Embedding Error: ${JSON.stringify(data)}`);
443
+ } catch (e) {
444
+ requestContext.modelRequester.logger.debug(e);
445
+ throw new ChatLunaError(ChatLunaErrorCode.API_REQUEST_FAILED, e);
446
+ }
447
+ }
448
+ __name(createEmbeddings, "createEmbeddings");
449
+ async function getModels(requestContext) {
450
+ const { modelRequester } = requestContext;
451
+ let data;
452
+ try {
453
+ const response = await modelRequester.get("models");
454
+ data = await response.text();
455
+ data = JSON.parse(data);
456
+ return data.data.map((model) => model.id);
457
+ } catch (e) {
458
+ requestContext.modelRequester.logger.error(e);
459
+ throw new Error(
460
+ "error when listing openai models, Result: " + JSON.stringify(data)
461
+ );
462
+ }
463
+ }
464
+ __name(getModels, "getModels");
465
+ function createRequestContext(ctx, config, pluginConfig, plugin, modelRequester) {
466
+ return { ctx, config, pluginConfig, plugin, modelRequester };
467
+ }
468
+ __name(createRequestContext, "createRequestContext");
469
+ export {
470
+ buildChatCompletionParams,
471
+ completionStream,
472
+ convertDeltaToMessageChunk,
473
+ createEmbeddings,
474
+ createRequestContext,
475
+ formatToolToOpenAITool,
476
+ formatToolsToOpenAITools,
477
+ getModelMaxContextSize,
478
+ getModels,
479
+ isEmbeddingModel,
480
+ isNonLLMModel,
481
+ langchainMessageToOpenAIMessage,
482
+ messageTypeToOpenAIRole,
483
+ processReasoningContent,
484
+ processStreamResponse
485
+ };
@@ -0,0 +1,45 @@
1
+ import { ChatGenerationChunk } from '@langchain/core/outputs';
2
+ import { EmbeddingsRequestParams, ModelRequester, ModelRequestParams } from 'koishi-plugin-chatluna/llm-core/platform/api';
3
+ import { ClientConfig } from 'koishi-plugin-chatluna/llm-core/platform/config';
4
+ import { SSEEvent } from 'koishi-plugin-chatluna/utils/sse';
5
+ import { ChatLunaPlugin } from 'koishi-plugin-chatluna/services/chat';
6
+ import { Context } from 'koishi';
7
+ interface RequestContext<T extends ClientConfig = ClientConfig, R extends ChatLunaPlugin.Config = ChatLunaPlugin.Config> {
8
+ ctx: Context;
9
+ config: T;
10
+ pluginConfig: R;
11
+ plugin: ChatLunaPlugin;
12
+ modelRequester: ModelRequester<T, R>;
13
+ }
14
+ export declare function buildChatCompletionParams(params: ModelRequestParams, enableGoogleSearch: boolean, supportImageInput?: boolean): {
15
+ model: string;
16
+ messages: import("./types").ChatCompletionResponseMessage[];
17
+ tools: import("./types").ChatCompletionTool[];
18
+ stop: string | string[];
19
+ max_tokens: number;
20
+ temperature: number;
21
+ presence_penalty: number;
22
+ frequency_penalty: number;
23
+ n: number;
24
+ top_p: number;
25
+ prompt_cache_key: string;
26
+ stream: boolean;
27
+ logit_bias: Record<string, number>;
28
+ stream_options: {
29
+ include_usage: boolean;
30
+ };
31
+ };
32
+ export declare function processReasoningContent(delta: {
33
+ reasoning_content?: string;
34
+ content?: string;
35
+ }, reasoningState: {
36
+ content: string;
37
+ time: number;
38
+ isSet: boolean;
39
+ }): number;
40
+ export declare function processStreamResponse<T extends ClientConfig, R extends ChatLunaPlugin.Config>(requestContext: RequestContext<T, R>, iterator: AsyncGenerator<SSEEvent, string, unknown>): AsyncGenerator<ChatGenerationChunk, void, unknown>;
41
+ export declare function completionStream<T extends ClientConfig, R extends ChatLunaPlugin.Config>(requestContext: RequestContext<T, R>, params: ModelRequestParams, completionUrl?: string, enableGoogleSearch?: boolean, supportImageInput?: boolean): AsyncGenerator<ChatGenerationChunk>;
42
+ export declare function createEmbeddings<T extends ClientConfig, R extends ChatLunaPlugin.Config>(requestContext: RequestContext<T, R>, params: EmbeddingsRequestParams, embeddingUrl?: string): Promise<number[] | number[][]>;
43
+ export declare function getModels<T extends ClientConfig, R extends ChatLunaPlugin.Config>(requestContext: RequestContext<T, R>): Promise<string[]>;
44
+ export declare function createRequestContext<T extends ClientConfig, R extends ChatLunaPlugin.Config>(ctx: Context, config: T, pluginConfig: R, plugin: ChatLunaPlugin, modelRequester: ModelRequester<T, R>): RequestContext<T, R>;
45
+ export {};
package/lib/types.d.ts ADDED
@@ -0,0 +1,137 @@
1
+ export interface ChatCompletionResponse {
2
+ choices: {
3
+ index: number;
4
+ finish_reason: string | null;
5
+ delta: {
6
+ content?: string;
7
+ role?: string;
8
+ reasoning_content?: string;
9
+ function_call?: ChatCompletionRequestMessageToolCall;
10
+ };
11
+ message: ChatCompletionResponseMessage;
12
+ }[];
13
+ id: string;
14
+ object: string;
15
+ created: number;
16
+ model: string;
17
+ usage: {
18
+ prompt_tokens: number;
19
+ completion_tokens: number;
20
+ total_tokens: number;
21
+ };
22
+ }
23
+ export interface ChatCompletionResponseMessage {
24
+ role: string;
25
+ content?: string | ({
26
+ type: 'text';
27
+ text: string;
28
+ } | {
29
+ type: 'image_url';
30
+ image_url: {
31
+ url: string;
32
+ detail?: 'low' | 'high';
33
+ };
34
+ })[];
35
+ name?: string;
36
+ tool_calls?: ChatCompletionRequestMessageToolCall[];
37
+ tool_call_id?: string;
38
+ }
39
+ export interface ChatCompletionFunction {
40
+ name: string;
41
+ description?: string;
42
+ parameters?: {
43
+ [key: string]: any;
44
+ };
45
+ }
46
+ export interface ChatCompletionTool {
47
+ type: string;
48
+ function: ChatCompletionFunction;
49
+ }
50
+ export interface ChatCompletionRequestMessageToolCall {
51
+ id: string;
52
+ type: 'function';
53
+ function: {
54
+ name: string;
55
+ arguments: string;
56
+ };
57
+ }
58
+ /**
59
+ *
60
+ * @export
61
+ * @interface CreateEmbeddingResponse
62
+ */
63
+ export interface CreateEmbeddingResponse {
64
+ /**
65
+ *
66
+ * @type {string}
67
+ * @memberof CreateEmbeddingResponse
68
+ */
69
+ object: string;
70
+ /**
71
+ *
72
+ * @type {string}
73
+ * @memberof CreateEmbeddingResponse
74
+ */
75
+ model: string;
76
+ /**
77
+ *
78
+ * @type {Array<CreateEmbeddingResponseDataInner>}
79
+ * @memberof CreateEmbeddingResponse
80
+ */
81
+ data: CreateEmbeddingResponseDataInner[];
82
+ /**
83
+ *
84
+ * @type {CreateEmbeddingResponseUsage}
85
+ * @memberof CreateEmbeddingResponse
86
+ */
87
+ usage: CreateEmbeddingResponseUsage;
88
+ }
89
+ export interface CreateEmbeddingRequest {
90
+ model: string;
91
+ input: string | string[];
92
+ }
93
+ /**
94
+ *
95
+ * @export
96
+ * @interface CreateEmbeddingResponseDataInner
97
+ */
98
+ export interface CreateEmbeddingResponseDataInner {
99
+ /**
100
+ *
101
+ * @type {number}
102
+ * @memberof CreateEmbeddingResponseDataInner
103
+ */
104
+ index: number;
105
+ /**
106
+ *
107
+ * @type {string}
108
+ * @memberof CreateEmbeddingResponseDataInner
109
+ */
110
+ object: string;
111
+ /**
112
+ *
113
+ * @type {Array<number>}
114
+ * @memberof CreateEmbeddingResponseDataInner
115
+ */
116
+ embedding: number[];
117
+ }
118
+ /**
119
+ *
120
+ * @export
121
+ * @interface CreateEmbeddingResponseUsage
122
+ */
123
+ export interface CreateEmbeddingResponseUsage {
124
+ /**
125
+ *
126
+ * @type {number}
127
+ * @memberof CreateEmbeddingResponseUsage
128
+ */
129
+ prompt_tokens: number;
130
+ /**
131
+ *
132
+ * @type {number}
133
+ * @memberof CreateEmbeddingResponseUsage
134
+ */
135
+ total_tokens: number;
136
+ }
137
+ export type ChatCompletionResponseMessageRoleEnum = 'system' | 'assistant' | 'user' | 'function' | 'tool';
package/lib/utils.d.ts ADDED
@@ -0,0 +1,8 @@
1
+ import { AIMessageChunk, BaseMessage, ChatMessageChunk, FunctionMessageChunk, HumanMessageChunk, MessageType, SystemMessageChunk, ToolMessageChunk } from '@langchain/core/messages';
2
+ import { StructuredTool } from '@langchain/core/tools';
3
+ import { ChatCompletionResponseMessage, ChatCompletionResponseMessageRoleEnum, ChatCompletionTool } from './types';
4
+ export declare function langchainMessageToOpenAIMessage(messages: BaseMessage[], model?: string, supportImageInput?: boolean, removeSystemMessage?: boolean): ChatCompletionResponseMessage[];
5
+ export declare function messageTypeToOpenAIRole(type: MessageType): ChatCompletionResponseMessageRoleEnum;
6
+ export declare function formatToolsToOpenAITools(tools: StructuredTool[], includeGoogleSearch: boolean): ChatCompletionTool[];
7
+ export declare function formatToolToOpenAITool(tool: StructuredTool): ChatCompletionTool;
8
+ export declare function convertDeltaToMessageChunk(delta: Record<string, any>, defaultRole?: ChatCompletionResponseMessageRoleEnum): HumanMessageChunk | AIMessageChunk | SystemMessageChunk | FunctionMessageChunk | ToolMessageChunk | ChatMessageChunk;
package/package.json ADDED
@@ -0,0 +1,75 @@
1
+ {
2
+ "name": "@chatluna/v1-shared-adapter",
3
+ "description": "chatluna shared adapter",
4
+ "version": "1.0.0",
5
+ "main": "lib/index.cjs",
6
+ "module": "lib/index.mjs",
7
+ "typings": "lib/index.d.ts",
8
+ "files": [
9
+ "lib",
10
+ "dist"
11
+ ],
12
+ "exports": {
13
+ ".": {
14
+ "types": "./lib/index.d.ts",
15
+ "import": "./lib/index.mjs",
16
+ "require": "./lib/index.cjs"
17
+ },
18
+ "./package.json": "./package.json"
19
+ },
20
+ "type": "module",
21
+ "author": "dingyi222666 <dingyi222666@foxmail.com>",
22
+ "repository": {
23
+ "type": "git",
24
+ "url": "https://github.com/ChatLunaLab/chatluna.git",
25
+ "directory": "packages/openai-like-adapter"
26
+ },
27
+ "license": "AGPL-3.0",
28
+ "bugs": {
29
+ "url": "https://github.com/ChatLunaLab/chatluna/issues"
30
+ },
31
+ "homepage": "https://github.com/ChatLunaLab/chatluna/tree/v1-dev/packages/openai-like-adapter#readme",
32
+ "scripts": {
33
+ "build": "atsc -b"
34
+ },
35
+ "engines": {
36
+ "node": ">=18.0.0"
37
+ },
38
+ "resolutions": {
39
+ "@langchain/core": "0.3.43",
40
+ "js-tiktoken": "npm:@dingyi222666/js-tiktoken@^1.0.19"
41
+ },
42
+ "overrides": {
43
+ "@langchain/core": "0.3.43",
44
+ "js-tiktoken": "npm:@dingyi222666/js-tiktoken@^1.0.19"
45
+ },
46
+ "pnpm": {
47
+ "overrides": {
48
+ "@langchain/core": "0.3.43",
49
+ "js-tiktoken": "npm:@dingyi222666/js-tiktoken@^1.0.19"
50
+ }
51
+ },
52
+ "keywords": [
53
+ "chatbot",
54
+ "koishi",
55
+ "plugin",
56
+ "service",
57
+ "chatgpt",
58
+ "gpt",
59
+ "chatluna",
60
+ "adapter"
61
+ ],
62
+ "dependencies": {
63
+ "@langchain/core": "^0.3.43",
64
+ "zod": "^3.25.0-canary.20250211T214501",
65
+ "zod-to-json-schema": "^3.24.5"
66
+ },
67
+ "devDependencies": {
68
+ "atsc": "^2.1.0",
69
+ "koishi": "^4.18.7"
70
+ },
71
+ "peerDependencies": {
72
+ "koishi": "^4.18.7",
73
+ "koishi-plugin-chatluna": "^1.3.0-alpha.10"
74
+ }
75
+ }