@chatluna/v1-shared-adapter 1.0.2 → 1.0.4
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/lib/client.d.ts +1 -0
- package/lib/index.cjs +182 -8
- package/lib/index.mjs +177 -7
- package/lib/requester.d.ts +3 -0
- package/lib/types.d.ts +2 -0
- package/lib/utils.d.ts +1 -0
- package/package.json +2 -2
package/lib/client.d.ts
CHANGED
|
@@ -2,3 +2,4 @@ import { ModelInfo } from 'koishi-plugin-chatluna/llm-core/platform/types';
|
|
|
2
2
|
export declare function isEmbeddingModel(modelName: string): boolean;
|
|
3
3
|
export declare function isNonLLMModel(modelName: string): boolean;
|
|
4
4
|
export declare function getModelMaxContextSize(info: ModelInfo): number;
|
|
5
|
+
export declare function supportImageInput(modelName: string): boolean;
|
package/lib/index.cjs
CHANGED
|
@@ -21,8 +21,10 @@ var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: tru
|
|
|
21
21
|
var index_exports = {};
|
|
22
22
|
__export(index_exports, {
|
|
23
23
|
buildChatCompletionParams: () => buildChatCompletionParams,
|
|
24
|
+
completion: () => completion,
|
|
24
25
|
completionStream: () => completionStream,
|
|
25
26
|
convertDeltaToMessageChunk: () => convertDeltaToMessageChunk,
|
|
27
|
+
convertMessageToMessageChunk: () => convertMessageToMessageChunk,
|
|
26
28
|
createEmbeddings: () => createEmbeddings,
|
|
27
29
|
createRequestContext: () => createRequestContext,
|
|
28
30
|
formatToolToOpenAITool: () => formatToolToOpenAITool,
|
|
@@ -34,7 +36,9 @@ __export(index_exports, {
|
|
|
34
36
|
langchainMessageToOpenAIMessage: () => langchainMessageToOpenAIMessage,
|
|
35
37
|
messageTypeToOpenAIRole: () => messageTypeToOpenAIRole,
|
|
36
38
|
processReasoningContent: () => processReasoningContent,
|
|
37
|
-
|
|
39
|
+
processResponse: () => processResponse,
|
|
40
|
+
processStreamResponse: () => processStreamResponse,
|
|
41
|
+
supportImageInput: () => supportImageInput
|
|
38
42
|
});
|
|
39
43
|
module.exports = __toCommonJS(index_exports);
|
|
40
44
|
|
|
@@ -45,6 +49,9 @@ function isEmbeddingModel(modelName) {
|
|
|
45
49
|
}
|
|
46
50
|
__name(isEmbeddingModel, "isEmbeddingModel");
|
|
47
51
|
function isNonLLMModel(modelName) {
|
|
52
|
+
if (modelName.includes("gemini") && modelName.includes("image")) {
|
|
53
|
+
return false;
|
|
54
|
+
}
|
|
48
55
|
return ["whisper", "tts", "dall-e", "image", "rerank"].some(
|
|
49
56
|
(keyword) => modelName.includes(keyword)
|
|
50
57
|
);
|
|
@@ -86,6 +93,39 @@ function getModelMaxContextSize(info) {
|
|
|
86
93
|
return (0, import_count_tokens.getModelContextSize)("o1-mini");
|
|
87
94
|
}
|
|
88
95
|
__name(getModelMaxContextSize, "getModelMaxContextSize");
|
|
96
|
+
function createGlobMatcher(pattern) {
|
|
97
|
+
if (!pattern.includes("*")) {
|
|
98
|
+
return (text) => text.includes(pattern);
|
|
99
|
+
}
|
|
100
|
+
const regex = new RegExp("^" + pattern.replace(/\*/g, ".*") + "$");
|
|
101
|
+
return (text) => regex.test(text);
|
|
102
|
+
}
|
|
103
|
+
__name(createGlobMatcher, "createGlobMatcher");
|
|
104
|
+
var imageModelMatchers = [
|
|
105
|
+
"vision",
|
|
106
|
+
"vl",
|
|
107
|
+
"gpt-4o",
|
|
108
|
+
"claude",
|
|
109
|
+
"gemini",
|
|
110
|
+
"qwen-vl",
|
|
111
|
+
"omni",
|
|
112
|
+
"qwen2.5-omni",
|
|
113
|
+
"qwen-omni",
|
|
114
|
+
"qvq",
|
|
115
|
+
"o1",
|
|
116
|
+
"o3",
|
|
117
|
+
"o4",
|
|
118
|
+
"gpt-4.1",
|
|
119
|
+
"gpt-5",
|
|
120
|
+
"glm-*v",
|
|
121
|
+
"step3",
|
|
122
|
+
"grok-4"
|
|
123
|
+
].map((pattern) => createGlobMatcher(pattern));
|
|
124
|
+
function supportImageInput(modelName) {
|
|
125
|
+
const lowerModel = modelName.toLowerCase();
|
|
126
|
+
return imageModelMatchers.some((matcher) => matcher(lowerModel));
|
|
127
|
+
}
|
|
128
|
+
__name(supportImageInput, "supportImageInput");
|
|
89
129
|
|
|
90
130
|
// src/requester.ts
|
|
91
131
|
var import_outputs = require("@langchain/core/outputs");
|
|
@@ -95,7 +135,7 @@ var import_sse = require("koishi-plugin-chatluna/utils/sse");
|
|
|
95
135
|
// src/utils.ts
|
|
96
136
|
var import_messages = require("@langchain/core/messages");
|
|
97
137
|
var import_zod_to_json_schema = require("zod-to-json-schema");
|
|
98
|
-
function langchainMessageToOpenAIMessage(messages, model,
|
|
138
|
+
function langchainMessageToOpenAIMessage(messages, model, supportImageInput2, removeSystemMessage) {
|
|
99
139
|
const result = [];
|
|
100
140
|
for (const rawMessage of messages) {
|
|
101
141
|
const role = messageTypeToOpenAIRole(rawMessage.getType());
|
|
@@ -124,7 +164,7 @@ function langchainMessageToOpenAIMessage(messages, model, supportImageInput, rem
|
|
|
124
164
|
}
|
|
125
165
|
const images = rawMessage.additional_kwargs.images;
|
|
126
166
|
const lowerModel = model?.toLowerCase() ?? "";
|
|
127
|
-
if ((lowerModel?.includes("vision") || lowerModel?.includes("gpt-4o") || lowerModel?.includes("claude") || lowerModel?.includes("gemini") || lowerModel?.includes("qwen-vl") || lowerModel?.includes("omni") || lowerModel?.includes("qwen2.5-vl") || lowerModel?.includes("qwen2.5-omni") || lowerModel?.includes("qwen-omni") || lowerModel?.includes("qwen2-vl") || lowerModel?.includes("qvq") || model?.includes("o1") || model?.includes("o4") || model?.includes("o3") || model?.includes("gpt-4.1") || model?.includes("gpt-5") ||
|
|
167
|
+
if ((lowerModel?.includes("vision") || lowerModel?.includes("gpt-4o") || lowerModel?.includes("claude") || lowerModel?.includes("gemini") || lowerModel?.includes("qwen-vl") || lowerModel?.includes("omni") || lowerModel?.includes("qwen2.5-vl") || lowerModel?.includes("qwen2.5-omni") || lowerModel?.includes("qwen-omni") || lowerModel?.includes("qwen2-vl") || lowerModel?.includes("qvq") || model?.includes("o1") || model?.includes("o4") || model?.includes("o3") || model?.includes("gpt-4.1") || model?.includes("gpt-5") || supportImageInput2) && images != null) {
|
|
128
168
|
msg.content = [
|
|
129
169
|
{
|
|
130
170
|
type: "text",
|
|
@@ -260,6 +300,58 @@ function removeAdditionalProperties(schema) {
|
|
|
260
300
|
return schema;
|
|
261
301
|
}
|
|
262
302
|
__name(removeAdditionalProperties, "removeAdditionalProperties");
|
|
303
|
+
function convertMessageToMessageChunk(message) {
|
|
304
|
+
const content = message.content ?? "";
|
|
305
|
+
const reasoningContent = message.reasoning_content ?? "";
|
|
306
|
+
const role = ((message.role?.length ?? 0) > 0 ? message.role : "assistant").toLowerCase();
|
|
307
|
+
let additionalKwargs;
|
|
308
|
+
if (message.tool_calls) {
|
|
309
|
+
additionalKwargs = {
|
|
310
|
+
tool_calls: message.tool_calls
|
|
311
|
+
};
|
|
312
|
+
} else {
|
|
313
|
+
additionalKwargs = {};
|
|
314
|
+
}
|
|
315
|
+
if (reasoningContent.length > 0) {
|
|
316
|
+
additionalKwargs.reasoning_content = reasoningContent;
|
|
317
|
+
}
|
|
318
|
+
if (role === "user") {
|
|
319
|
+
return new import_messages.HumanMessageChunk({ content });
|
|
320
|
+
} else if (role === "assistant") {
|
|
321
|
+
const toolCallChunks = [];
|
|
322
|
+
if (Array.isArray(message.tool_calls)) {
|
|
323
|
+
for (const rawToolCall of message.tool_calls) {
|
|
324
|
+
toolCallChunks.push({
|
|
325
|
+
name: rawToolCall.function?.name,
|
|
326
|
+
args: rawToolCall.function?.arguments,
|
|
327
|
+
id: rawToolCall.id
|
|
328
|
+
});
|
|
329
|
+
}
|
|
330
|
+
}
|
|
331
|
+
return new import_messages.AIMessageChunk({
|
|
332
|
+
content,
|
|
333
|
+
tool_call_chunks: toolCallChunks,
|
|
334
|
+
additional_kwargs: additionalKwargs
|
|
335
|
+
});
|
|
336
|
+
} else if (role === "system") {
|
|
337
|
+
return new import_messages.SystemMessageChunk({ content });
|
|
338
|
+
} else if (role === "function") {
|
|
339
|
+
return new import_messages.FunctionMessageChunk({
|
|
340
|
+
content,
|
|
341
|
+
additional_kwargs: additionalKwargs,
|
|
342
|
+
name: message.name
|
|
343
|
+
});
|
|
344
|
+
} else if (role === "tool") {
|
|
345
|
+
return new import_messages.ToolMessageChunk({
|
|
346
|
+
content,
|
|
347
|
+
additional_kwargs: additionalKwargs,
|
|
348
|
+
tool_call_id: message.tool_call_id
|
|
349
|
+
});
|
|
350
|
+
} else {
|
|
351
|
+
return new import_messages.ChatMessageChunk({ content, role });
|
|
352
|
+
}
|
|
353
|
+
}
|
|
354
|
+
__name(convertMessageToMessageChunk, "convertMessageToMessageChunk");
|
|
263
355
|
function convertDeltaToMessageChunk(delta, defaultRole) {
|
|
264
356
|
const role = ((delta.role?.length ?? 0) > 0 ? delta.role : defaultRole).toLowerCase();
|
|
265
357
|
const content = delta.content ?? "";
|
|
@@ -320,13 +412,14 @@ __name(convertDeltaToMessageChunk, "convertDeltaToMessageChunk");
|
|
|
320
412
|
|
|
321
413
|
// src/requester.ts
|
|
322
414
|
var import_messages2 = require("@langchain/core/messages");
|
|
323
|
-
|
|
415
|
+
var import_string = require("koishi-plugin-chatluna/utils/string");
|
|
416
|
+
function buildChatCompletionParams(params, enableGoogleSearch, supportImageInput2) {
|
|
324
417
|
const base = {
|
|
325
418
|
model: params.model,
|
|
326
419
|
messages: langchainMessageToOpenAIMessage(
|
|
327
420
|
params.input,
|
|
328
421
|
params.model,
|
|
329
|
-
|
|
422
|
+
supportImageInput2
|
|
330
423
|
),
|
|
331
424
|
tools: enableGoogleSearch || params.tools != null ? formatToolsToOpenAITools(
|
|
332
425
|
params.tools ?? [],
|
|
@@ -428,7 +521,58 @@ async function* processStreamResponse(requestContext, iterator) {
|
|
|
428
521
|
}
|
|
429
522
|
}
|
|
430
523
|
__name(processStreamResponse, "processStreamResponse");
|
|
431
|
-
async function
|
|
524
|
+
async function processResponse(requestContext, response) {
|
|
525
|
+
if (response.status !== 200) {
|
|
526
|
+
throw new import_error.ChatLunaError(
|
|
527
|
+
import_error.ChatLunaErrorCode.API_REQUEST_FAILED,
|
|
528
|
+
new Error(
|
|
529
|
+
"Error when calling completion, Status: " + response.status + " " + response.statusText + ", Response: " + await response.text()
|
|
530
|
+
)
|
|
531
|
+
);
|
|
532
|
+
}
|
|
533
|
+
const responseText = await response.text();
|
|
534
|
+
try {
|
|
535
|
+
const data = JSON.parse(responseText);
|
|
536
|
+
if (data.error) {
|
|
537
|
+
throw new import_error.ChatLunaError(
|
|
538
|
+
import_error.ChatLunaErrorCode.API_REQUEST_FAILED,
|
|
539
|
+
new Error(
|
|
540
|
+
"Error when calling completion, Result: " + responseText
|
|
541
|
+
)
|
|
542
|
+
);
|
|
543
|
+
}
|
|
544
|
+
const choice = data.choices?.[0];
|
|
545
|
+
if (!choice) {
|
|
546
|
+
throw new import_error.ChatLunaError(
|
|
547
|
+
import_error.ChatLunaErrorCode.API_REQUEST_FAILED,
|
|
548
|
+
new Error(
|
|
549
|
+
"Error when calling completion, Result: " + responseText
|
|
550
|
+
)
|
|
551
|
+
);
|
|
552
|
+
}
|
|
553
|
+
const messageChunk = convertMessageToMessageChunk(choice.message);
|
|
554
|
+
return new import_outputs.ChatGenerationChunk({
|
|
555
|
+
message: messageChunk,
|
|
556
|
+
text: (0, import_string.getMessageContent)(messageChunk.content),
|
|
557
|
+
generationInfo: {
|
|
558
|
+
tokenUsage: data.usage
|
|
559
|
+
}
|
|
560
|
+
});
|
|
561
|
+
} catch (e) {
|
|
562
|
+
if (e instanceof import_error.ChatLunaError) {
|
|
563
|
+
throw e;
|
|
564
|
+
} else {
|
|
565
|
+
throw new import_error.ChatLunaError(
|
|
566
|
+
import_error.ChatLunaErrorCode.API_REQUEST_FAILED,
|
|
567
|
+
new Error(
|
|
568
|
+
"Error when calling completion, Error: " + e + ", Response: " + responseText
|
|
569
|
+
)
|
|
570
|
+
);
|
|
571
|
+
}
|
|
572
|
+
}
|
|
573
|
+
}
|
|
574
|
+
__name(processResponse, "processResponse");
|
|
575
|
+
async function* completionStream(requestContext, params, completionUrl = "chat/completions", enableGoogleSearch, supportImageInput2) {
|
|
432
576
|
const { modelRequester } = requestContext;
|
|
433
577
|
try {
|
|
434
578
|
const response = await modelRequester.post(
|
|
@@ -436,7 +580,7 @@ async function* completionStream(requestContext, params, completionUrl = "chat/c
|
|
|
436
580
|
buildChatCompletionParams(
|
|
437
581
|
params,
|
|
438
582
|
enableGoogleSearch ?? false,
|
|
439
|
-
|
|
583
|
+
supportImageInput2 ?? true
|
|
440
584
|
),
|
|
441
585
|
{
|
|
442
586
|
signal: params.signal
|
|
@@ -453,6 +597,32 @@ async function* completionStream(requestContext, params, completionUrl = "chat/c
|
|
|
453
597
|
}
|
|
454
598
|
}
|
|
455
599
|
__name(completionStream, "completionStream");
|
|
600
|
+
async function completion(requestContext, params, completionUrl = "chat/completions", enableGoogleSearch, supportImageInput2) {
|
|
601
|
+
const { modelRequester } = requestContext;
|
|
602
|
+
const chatCompletionParams = buildChatCompletionParams(
|
|
603
|
+
params,
|
|
604
|
+
enableGoogleSearch ?? false,
|
|
605
|
+
supportImageInput2 ?? true
|
|
606
|
+
);
|
|
607
|
+
delete chatCompletionParams.stream;
|
|
608
|
+
try {
|
|
609
|
+
const response = await modelRequester.post(
|
|
610
|
+
completionUrl,
|
|
611
|
+
chatCompletionParams,
|
|
612
|
+
{
|
|
613
|
+
signal: params.signal
|
|
614
|
+
}
|
|
615
|
+
);
|
|
616
|
+
return await processResponse(requestContext, response);
|
|
617
|
+
} catch (e) {
|
|
618
|
+
if (e instanceof import_error.ChatLunaError) {
|
|
619
|
+
throw e;
|
|
620
|
+
} else {
|
|
621
|
+
throw new import_error.ChatLunaError(import_error.ChatLunaErrorCode.API_REQUEST_FAILED, e);
|
|
622
|
+
}
|
|
623
|
+
}
|
|
624
|
+
}
|
|
625
|
+
__name(completion, "completion");
|
|
456
626
|
async function createEmbeddings(requestContext, params, embeddingUrl = "embeddings") {
|
|
457
627
|
const { modelRequester } = requestContext;
|
|
458
628
|
let data;
|
|
@@ -496,8 +666,10 @@ __name(createRequestContext, "createRequestContext");
|
|
|
496
666
|
// Annotate the CommonJS export names for ESM import in node:
|
|
497
667
|
0 && (module.exports = {
|
|
498
668
|
buildChatCompletionParams,
|
|
669
|
+
completion,
|
|
499
670
|
completionStream,
|
|
500
671
|
convertDeltaToMessageChunk,
|
|
672
|
+
convertMessageToMessageChunk,
|
|
501
673
|
createEmbeddings,
|
|
502
674
|
createRequestContext,
|
|
503
675
|
formatToolToOpenAITool,
|
|
@@ -509,5 +681,7 @@ __name(createRequestContext, "createRequestContext");
|
|
|
509
681
|
langchainMessageToOpenAIMessage,
|
|
510
682
|
messageTypeToOpenAIRole,
|
|
511
683
|
processReasoningContent,
|
|
512
|
-
|
|
684
|
+
processResponse,
|
|
685
|
+
processStreamResponse,
|
|
686
|
+
supportImageInput
|
|
513
687
|
});
|
package/lib/index.mjs
CHANGED
|
@@ -8,6 +8,9 @@ function isEmbeddingModel(modelName) {
|
|
|
8
8
|
}
|
|
9
9
|
__name(isEmbeddingModel, "isEmbeddingModel");
|
|
10
10
|
function isNonLLMModel(modelName) {
|
|
11
|
+
if (modelName.includes("gemini") && modelName.includes("image")) {
|
|
12
|
+
return false;
|
|
13
|
+
}
|
|
11
14
|
return ["whisper", "tts", "dall-e", "image", "rerank"].some(
|
|
12
15
|
(keyword) => modelName.includes(keyword)
|
|
13
16
|
);
|
|
@@ -49,6 +52,39 @@ function getModelMaxContextSize(info) {
|
|
|
49
52
|
return getModelContextSize("o1-mini");
|
|
50
53
|
}
|
|
51
54
|
__name(getModelMaxContextSize, "getModelMaxContextSize");
|
|
55
|
+
function createGlobMatcher(pattern) {
|
|
56
|
+
if (!pattern.includes("*")) {
|
|
57
|
+
return (text) => text.includes(pattern);
|
|
58
|
+
}
|
|
59
|
+
const regex = new RegExp("^" + pattern.replace(/\*/g, ".*") + "$");
|
|
60
|
+
return (text) => regex.test(text);
|
|
61
|
+
}
|
|
62
|
+
__name(createGlobMatcher, "createGlobMatcher");
|
|
63
|
+
var imageModelMatchers = [
|
|
64
|
+
"vision",
|
|
65
|
+
"vl",
|
|
66
|
+
"gpt-4o",
|
|
67
|
+
"claude",
|
|
68
|
+
"gemini",
|
|
69
|
+
"qwen-vl",
|
|
70
|
+
"omni",
|
|
71
|
+
"qwen2.5-omni",
|
|
72
|
+
"qwen-omni",
|
|
73
|
+
"qvq",
|
|
74
|
+
"o1",
|
|
75
|
+
"o3",
|
|
76
|
+
"o4",
|
|
77
|
+
"gpt-4.1",
|
|
78
|
+
"gpt-5",
|
|
79
|
+
"glm-*v",
|
|
80
|
+
"step3",
|
|
81
|
+
"grok-4"
|
|
82
|
+
].map((pattern) => createGlobMatcher(pattern));
|
|
83
|
+
function supportImageInput(modelName) {
|
|
84
|
+
const lowerModel = modelName.toLowerCase();
|
|
85
|
+
return imageModelMatchers.some((matcher) => matcher(lowerModel));
|
|
86
|
+
}
|
|
87
|
+
__name(supportImageInput, "supportImageInput");
|
|
52
88
|
|
|
53
89
|
// src/requester.ts
|
|
54
90
|
import { ChatGenerationChunk } from "@langchain/core/outputs";
|
|
@@ -68,7 +104,7 @@ import {
|
|
|
68
104
|
ToolMessageChunk
|
|
69
105
|
} from "@langchain/core/messages";
|
|
70
106
|
import { zodToJsonSchema } from "zod-to-json-schema";
|
|
71
|
-
function langchainMessageToOpenAIMessage(messages, model,
|
|
107
|
+
function langchainMessageToOpenAIMessage(messages, model, supportImageInput2, removeSystemMessage) {
|
|
72
108
|
const result = [];
|
|
73
109
|
for (const rawMessage of messages) {
|
|
74
110
|
const role = messageTypeToOpenAIRole(rawMessage.getType());
|
|
@@ -97,7 +133,7 @@ function langchainMessageToOpenAIMessage(messages, model, supportImageInput, rem
|
|
|
97
133
|
}
|
|
98
134
|
const images = rawMessage.additional_kwargs.images;
|
|
99
135
|
const lowerModel = model?.toLowerCase() ?? "";
|
|
100
|
-
if ((lowerModel?.includes("vision") || lowerModel?.includes("gpt-4o") || lowerModel?.includes("claude") || lowerModel?.includes("gemini") || lowerModel?.includes("qwen-vl") || lowerModel?.includes("omni") || lowerModel?.includes("qwen2.5-vl") || lowerModel?.includes("qwen2.5-omni") || lowerModel?.includes("qwen-omni") || lowerModel?.includes("qwen2-vl") || lowerModel?.includes("qvq") || model?.includes("o1") || model?.includes("o4") || model?.includes("o3") || model?.includes("gpt-4.1") || model?.includes("gpt-5") ||
|
|
136
|
+
if ((lowerModel?.includes("vision") || lowerModel?.includes("gpt-4o") || lowerModel?.includes("claude") || lowerModel?.includes("gemini") || lowerModel?.includes("qwen-vl") || lowerModel?.includes("omni") || lowerModel?.includes("qwen2.5-vl") || lowerModel?.includes("qwen2.5-omni") || lowerModel?.includes("qwen-omni") || lowerModel?.includes("qwen2-vl") || lowerModel?.includes("qvq") || model?.includes("o1") || model?.includes("o4") || model?.includes("o3") || model?.includes("gpt-4.1") || model?.includes("gpt-5") || supportImageInput2) && images != null) {
|
|
101
137
|
msg.content = [
|
|
102
138
|
{
|
|
103
139
|
type: "text",
|
|
@@ -233,6 +269,58 @@ function removeAdditionalProperties(schema) {
|
|
|
233
269
|
return schema;
|
|
234
270
|
}
|
|
235
271
|
__name(removeAdditionalProperties, "removeAdditionalProperties");
|
|
272
|
+
function convertMessageToMessageChunk(message) {
|
|
273
|
+
const content = message.content ?? "";
|
|
274
|
+
const reasoningContent = message.reasoning_content ?? "";
|
|
275
|
+
const role = ((message.role?.length ?? 0) > 0 ? message.role : "assistant").toLowerCase();
|
|
276
|
+
let additionalKwargs;
|
|
277
|
+
if (message.tool_calls) {
|
|
278
|
+
additionalKwargs = {
|
|
279
|
+
tool_calls: message.tool_calls
|
|
280
|
+
};
|
|
281
|
+
} else {
|
|
282
|
+
additionalKwargs = {};
|
|
283
|
+
}
|
|
284
|
+
if (reasoningContent.length > 0) {
|
|
285
|
+
additionalKwargs.reasoning_content = reasoningContent;
|
|
286
|
+
}
|
|
287
|
+
if (role === "user") {
|
|
288
|
+
return new HumanMessageChunk({ content });
|
|
289
|
+
} else if (role === "assistant") {
|
|
290
|
+
const toolCallChunks = [];
|
|
291
|
+
if (Array.isArray(message.tool_calls)) {
|
|
292
|
+
for (const rawToolCall of message.tool_calls) {
|
|
293
|
+
toolCallChunks.push({
|
|
294
|
+
name: rawToolCall.function?.name,
|
|
295
|
+
args: rawToolCall.function?.arguments,
|
|
296
|
+
id: rawToolCall.id
|
|
297
|
+
});
|
|
298
|
+
}
|
|
299
|
+
}
|
|
300
|
+
return new AIMessageChunk({
|
|
301
|
+
content,
|
|
302
|
+
tool_call_chunks: toolCallChunks,
|
|
303
|
+
additional_kwargs: additionalKwargs
|
|
304
|
+
});
|
|
305
|
+
} else if (role === "system") {
|
|
306
|
+
return new SystemMessageChunk({ content });
|
|
307
|
+
} else if (role === "function") {
|
|
308
|
+
return new FunctionMessageChunk({
|
|
309
|
+
content,
|
|
310
|
+
additional_kwargs: additionalKwargs,
|
|
311
|
+
name: message.name
|
|
312
|
+
});
|
|
313
|
+
} else if (role === "tool") {
|
|
314
|
+
return new ToolMessageChunk({
|
|
315
|
+
content,
|
|
316
|
+
additional_kwargs: additionalKwargs,
|
|
317
|
+
tool_call_id: message.tool_call_id
|
|
318
|
+
});
|
|
319
|
+
} else {
|
|
320
|
+
return new ChatMessageChunk({ content, role });
|
|
321
|
+
}
|
|
322
|
+
}
|
|
323
|
+
__name(convertMessageToMessageChunk, "convertMessageToMessageChunk");
|
|
236
324
|
function convertDeltaToMessageChunk(delta, defaultRole) {
|
|
237
325
|
const role = ((delta.role?.length ?? 0) > 0 ? delta.role : defaultRole).toLowerCase();
|
|
238
326
|
const content = delta.content ?? "";
|
|
@@ -293,13 +381,14 @@ __name(convertDeltaToMessageChunk, "convertDeltaToMessageChunk");
|
|
|
293
381
|
|
|
294
382
|
// src/requester.ts
|
|
295
383
|
import { AIMessageChunk as AIMessageChunk2 } from "@langchain/core/messages";
|
|
296
|
-
|
|
384
|
+
import { getMessageContent } from "koishi-plugin-chatluna/utils/string";
|
|
385
|
+
function buildChatCompletionParams(params, enableGoogleSearch, supportImageInput2) {
|
|
297
386
|
const base = {
|
|
298
387
|
model: params.model,
|
|
299
388
|
messages: langchainMessageToOpenAIMessage(
|
|
300
389
|
params.input,
|
|
301
390
|
params.model,
|
|
302
|
-
|
|
391
|
+
supportImageInput2
|
|
303
392
|
),
|
|
304
393
|
tools: enableGoogleSearch || params.tools != null ? formatToolsToOpenAITools(
|
|
305
394
|
params.tools ?? [],
|
|
@@ -401,7 +490,58 @@ async function* processStreamResponse(requestContext, iterator) {
|
|
|
401
490
|
}
|
|
402
491
|
}
|
|
403
492
|
__name(processStreamResponse, "processStreamResponse");
|
|
404
|
-
async function
|
|
493
|
+
async function processResponse(requestContext, response) {
|
|
494
|
+
if (response.status !== 200) {
|
|
495
|
+
throw new ChatLunaError(
|
|
496
|
+
ChatLunaErrorCode.API_REQUEST_FAILED,
|
|
497
|
+
new Error(
|
|
498
|
+
"Error when calling completion, Status: " + response.status + " " + response.statusText + ", Response: " + await response.text()
|
|
499
|
+
)
|
|
500
|
+
);
|
|
501
|
+
}
|
|
502
|
+
const responseText = await response.text();
|
|
503
|
+
try {
|
|
504
|
+
const data = JSON.parse(responseText);
|
|
505
|
+
if (data.error) {
|
|
506
|
+
throw new ChatLunaError(
|
|
507
|
+
ChatLunaErrorCode.API_REQUEST_FAILED,
|
|
508
|
+
new Error(
|
|
509
|
+
"Error when calling completion, Result: " + responseText
|
|
510
|
+
)
|
|
511
|
+
);
|
|
512
|
+
}
|
|
513
|
+
const choice = data.choices?.[0];
|
|
514
|
+
if (!choice) {
|
|
515
|
+
throw new ChatLunaError(
|
|
516
|
+
ChatLunaErrorCode.API_REQUEST_FAILED,
|
|
517
|
+
new Error(
|
|
518
|
+
"Error when calling completion, Result: " + responseText
|
|
519
|
+
)
|
|
520
|
+
);
|
|
521
|
+
}
|
|
522
|
+
const messageChunk = convertMessageToMessageChunk(choice.message);
|
|
523
|
+
return new ChatGenerationChunk({
|
|
524
|
+
message: messageChunk,
|
|
525
|
+
text: getMessageContent(messageChunk.content),
|
|
526
|
+
generationInfo: {
|
|
527
|
+
tokenUsage: data.usage
|
|
528
|
+
}
|
|
529
|
+
});
|
|
530
|
+
} catch (e) {
|
|
531
|
+
if (e instanceof ChatLunaError) {
|
|
532
|
+
throw e;
|
|
533
|
+
} else {
|
|
534
|
+
throw new ChatLunaError(
|
|
535
|
+
ChatLunaErrorCode.API_REQUEST_FAILED,
|
|
536
|
+
new Error(
|
|
537
|
+
"Error when calling completion, Error: " + e + ", Response: " + responseText
|
|
538
|
+
)
|
|
539
|
+
);
|
|
540
|
+
}
|
|
541
|
+
}
|
|
542
|
+
}
|
|
543
|
+
__name(processResponse, "processResponse");
|
|
544
|
+
async function* completionStream(requestContext, params, completionUrl = "chat/completions", enableGoogleSearch, supportImageInput2) {
|
|
405
545
|
const { modelRequester } = requestContext;
|
|
406
546
|
try {
|
|
407
547
|
const response = await modelRequester.post(
|
|
@@ -409,7 +549,7 @@ async function* completionStream(requestContext, params, completionUrl = "chat/c
|
|
|
409
549
|
buildChatCompletionParams(
|
|
410
550
|
params,
|
|
411
551
|
enableGoogleSearch ?? false,
|
|
412
|
-
|
|
552
|
+
supportImageInput2 ?? true
|
|
413
553
|
),
|
|
414
554
|
{
|
|
415
555
|
signal: params.signal
|
|
@@ -426,6 +566,32 @@ async function* completionStream(requestContext, params, completionUrl = "chat/c
|
|
|
426
566
|
}
|
|
427
567
|
}
|
|
428
568
|
__name(completionStream, "completionStream");
|
|
569
|
+
async function completion(requestContext, params, completionUrl = "chat/completions", enableGoogleSearch, supportImageInput2) {
|
|
570
|
+
const { modelRequester } = requestContext;
|
|
571
|
+
const chatCompletionParams = buildChatCompletionParams(
|
|
572
|
+
params,
|
|
573
|
+
enableGoogleSearch ?? false,
|
|
574
|
+
supportImageInput2 ?? true
|
|
575
|
+
);
|
|
576
|
+
delete chatCompletionParams.stream;
|
|
577
|
+
try {
|
|
578
|
+
const response = await modelRequester.post(
|
|
579
|
+
completionUrl,
|
|
580
|
+
chatCompletionParams,
|
|
581
|
+
{
|
|
582
|
+
signal: params.signal
|
|
583
|
+
}
|
|
584
|
+
);
|
|
585
|
+
return await processResponse(requestContext, response);
|
|
586
|
+
} catch (e) {
|
|
587
|
+
if (e instanceof ChatLunaError) {
|
|
588
|
+
throw e;
|
|
589
|
+
} else {
|
|
590
|
+
throw new ChatLunaError(ChatLunaErrorCode.API_REQUEST_FAILED, e);
|
|
591
|
+
}
|
|
592
|
+
}
|
|
593
|
+
}
|
|
594
|
+
__name(completion, "completion");
|
|
429
595
|
async function createEmbeddings(requestContext, params, embeddingUrl = "embeddings") {
|
|
430
596
|
const { modelRequester } = requestContext;
|
|
431
597
|
let data;
|
|
@@ -468,8 +634,10 @@ function createRequestContext(ctx, config, pluginConfig, plugin, modelRequester)
|
|
|
468
634
|
__name(createRequestContext, "createRequestContext");
|
|
469
635
|
export {
|
|
470
636
|
buildChatCompletionParams,
|
|
637
|
+
completion,
|
|
471
638
|
completionStream,
|
|
472
639
|
convertDeltaToMessageChunk,
|
|
640
|
+
convertMessageToMessageChunk,
|
|
473
641
|
createEmbeddings,
|
|
474
642
|
createRequestContext,
|
|
475
643
|
formatToolToOpenAITool,
|
|
@@ -481,5 +649,7 @@ export {
|
|
|
481
649
|
langchainMessageToOpenAIMessage,
|
|
482
650
|
messageTypeToOpenAIRole,
|
|
483
651
|
processReasoningContent,
|
|
484
|
-
|
|
652
|
+
processResponse,
|
|
653
|
+
processStreamResponse,
|
|
654
|
+
supportImageInput
|
|
485
655
|
};
|
package/lib/requester.d.ts
CHANGED
|
@@ -4,6 +4,7 @@ import { ClientConfig } from 'koishi-plugin-chatluna/llm-core/platform/config';
|
|
|
4
4
|
import { SSEEvent } from 'koishi-plugin-chatluna/utils/sse';
|
|
5
5
|
import { ChatLunaPlugin } from 'koishi-plugin-chatluna/services/chat';
|
|
6
6
|
import { Context } from 'koishi';
|
|
7
|
+
import { Response } from 'undici/types/fetch';
|
|
7
8
|
interface RequestContext<T extends ClientConfig = ClientConfig, R extends ChatLunaPlugin.Config = ChatLunaPlugin.Config> {
|
|
8
9
|
ctx: Context;
|
|
9
10
|
config: T;
|
|
@@ -38,7 +39,9 @@ export declare function processReasoningContent(delta: {
|
|
|
38
39
|
isSet: boolean;
|
|
39
40
|
}): number;
|
|
40
41
|
export declare function processStreamResponse<T extends ClientConfig, R extends ChatLunaPlugin.Config>(requestContext: RequestContext<T, R>, iterator: AsyncGenerator<SSEEvent, string, unknown>): AsyncGenerator<ChatGenerationChunk, void, unknown>;
|
|
42
|
+
export declare function processResponse<T extends ClientConfig, R extends ChatLunaPlugin.Config>(requestContext: RequestContext<T, R>, response: Response): Promise<ChatGenerationChunk>;
|
|
41
43
|
export declare function completionStream<T extends ClientConfig, R extends ChatLunaPlugin.Config>(requestContext: RequestContext<T, R>, params: ModelRequestParams, completionUrl?: string, enableGoogleSearch?: boolean, supportImageInput?: boolean): AsyncGenerator<ChatGenerationChunk>;
|
|
44
|
+
export declare function completion<T extends ClientConfig, R extends ChatLunaPlugin.Config>(requestContext: RequestContext<T, R>, params: ModelRequestParams, completionUrl?: string, enableGoogleSearch?: boolean, supportImageInput?: boolean): Promise<ChatGenerationChunk>;
|
|
42
45
|
export declare function createEmbeddings<T extends ClientConfig, R extends ChatLunaPlugin.Config>(requestContext: RequestContext<T, R>, params: EmbeddingsRequestParams, embeddingUrl?: string): Promise<number[] | number[][]>;
|
|
43
46
|
export declare function getModels<T extends ClientConfig, R extends ChatLunaPlugin.Config>(requestContext: RequestContext<T, R>): Promise<string[]>;
|
|
44
47
|
export declare function createRequestContext<T extends ClientConfig, R extends ChatLunaPlugin.Config>(ctx: Context, config: T, pluginConfig: R, plugin: ChatLunaPlugin, modelRequester: ModelRequester<T, R>): RequestContext<T, R>;
|
package/lib/types.d.ts
CHANGED
|
@@ -32,6 +32,7 @@ export interface ChatCompletionResponseMessage {
|
|
|
32
32
|
detail?: 'low' | 'high';
|
|
33
33
|
};
|
|
34
34
|
})[];
|
|
35
|
+
reasoning_content?: string;
|
|
35
36
|
name?: string;
|
|
36
37
|
tool_calls?: ChatCompletionRequestMessageToolCall[];
|
|
37
38
|
tool_call_id?: string;
|
|
@@ -50,6 +51,7 @@ export interface ChatCompletionTool {
|
|
|
50
51
|
export interface ChatCompletionRequestMessageToolCall {
|
|
51
52
|
id: string;
|
|
52
53
|
type: 'function';
|
|
54
|
+
index?: number;
|
|
53
55
|
function: {
|
|
54
56
|
name: string;
|
|
55
57
|
arguments: string;
|
package/lib/utils.d.ts
CHANGED
|
@@ -5,4 +5,5 @@ export declare function langchainMessageToOpenAIMessage(messages: BaseMessage[],
|
|
|
5
5
|
export declare function messageTypeToOpenAIRole(type: MessageType): ChatCompletionResponseMessageRoleEnum;
|
|
6
6
|
export declare function formatToolsToOpenAITools(tools: StructuredTool[], includeGoogleSearch: boolean): ChatCompletionTool[];
|
|
7
7
|
export declare function formatToolToOpenAITool(tool: StructuredTool): ChatCompletionTool;
|
|
8
|
+
export declare function convertMessageToMessageChunk(message: ChatCompletionResponseMessage): HumanMessageChunk | AIMessageChunk | SystemMessageChunk | FunctionMessageChunk | ToolMessageChunk | ChatMessageChunk;
|
|
8
9
|
export declare function convertDeltaToMessageChunk(delta: Record<string, any>, defaultRole?: ChatCompletionResponseMessageRoleEnum): HumanMessageChunk | AIMessageChunk | SystemMessageChunk | FunctionMessageChunk | ToolMessageChunk | ChatMessageChunk;
|
package/package.json
CHANGED
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@chatluna/v1-shared-adapter",
|
|
3
3
|
"description": "chatluna shared adapter",
|
|
4
|
-
"version": "1.0.
|
|
4
|
+
"version": "1.0.4",
|
|
5
5
|
"main": "lib/index.cjs",
|
|
6
6
|
"module": "lib/index.mjs",
|
|
7
7
|
"typings": "lib/index.d.ts",
|
|
@@ -70,6 +70,6 @@
|
|
|
70
70
|
},
|
|
71
71
|
"peerDependencies": {
|
|
72
72
|
"koishi": "^4.18.7",
|
|
73
|
-
"koishi-plugin-chatluna": "^1.3.0-alpha.
|
|
73
|
+
"koishi-plugin-chatluna": "^1.3.0-alpha.16"
|
|
74
74
|
}
|
|
75
75
|
}
|