@chatluna/v1-shared-adapter 1.0.0 → 1.0.3
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/lib/index.cjs +140 -1
- package/lib/index.mjs +137 -1
- package/lib/requester.d.ts +3 -0
- package/lib/types.d.ts +2 -0
- package/lib/utils.d.ts +2 -1
- package/package.json +2 -2
package/lib/index.cjs
CHANGED
|
@@ -21,8 +21,10 @@ var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: tru
|
|
|
21
21
|
var index_exports = {};
|
|
22
22
|
__export(index_exports, {
|
|
23
23
|
buildChatCompletionParams: () => buildChatCompletionParams,
|
|
24
|
+
completion: () => completion,
|
|
24
25
|
completionStream: () => completionStream,
|
|
25
26
|
convertDeltaToMessageChunk: () => convertDeltaToMessageChunk,
|
|
27
|
+
convertMessageToMessageChunk: () => convertMessageToMessageChunk,
|
|
26
28
|
createEmbeddings: () => createEmbeddings,
|
|
27
29
|
createRequestContext: () => createRequestContext,
|
|
28
30
|
formatToolToOpenAITool: () => formatToolToOpenAITool,
|
|
@@ -34,6 +36,7 @@ __export(index_exports, {
|
|
|
34
36
|
langchainMessageToOpenAIMessage: () => langchainMessageToOpenAIMessage,
|
|
35
37
|
messageTypeToOpenAIRole: () => messageTypeToOpenAIRole,
|
|
36
38
|
processReasoningContent: () => processReasoningContent,
|
|
39
|
+
processResponse: () => processResponse,
|
|
37
40
|
processStreamResponse: () => processStreamResponse
|
|
38
41
|
});
|
|
39
42
|
module.exports = __toCommonJS(index_exports);
|
|
@@ -45,6 +48,9 @@ function isEmbeddingModel(modelName) {
|
|
|
45
48
|
}
|
|
46
49
|
__name(isEmbeddingModel, "isEmbeddingModel");
|
|
47
50
|
function isNonLLMModel(modelName) {
|
|
51
|
+
if (modelName.includes("gemini") && modelName.includes("image")) {
|
|
52
|
+
return false;
|
|
53
|
+
}
|
|
48
54
|
return ["whisper", "tts", "dall-e", "image", "rerank"].some(
|
|
49
55
|
(keyword) => modelName.includes(keyword)
|
|
50
56
|
);
|
|
@@ -124,7 +130,7 @@ function langchainMessageToOpenAIMessage(messages, model, supportImageInput, rem
|
|
|
124
130
|
}
|
|
125
131
|
const images = rawMessage.additional_kwargs.images;
|
|
126
132
|
const lowerModel = model?.toLowerCase() ?? "";
|
|
127
|
-
if ((lowerModel?.includes("vision") || lowerModel?.includes("gpt-4o") || lowerModel?.includes("claude") || lowerModel?.includes("gemini") || lowerModel?.includes("qwen-vl") || lowerModel?.includes("omni") || lowerModel?.includes("qwen2.5-vl") || lowerModel?.includes("qwen2.5-omni") || lowerModel?.includes("qwen-omni") || lowerModel?.includes("qwen2-vl") || lowerModel?.includes("qvq") || supportImageInput) && images != null) {
|
|
133
|
+
if ((lowerModel?.includes("vision") || lowerModel?.includes("gpt-4o") || lowerModel?.includes("claude") || lowerModel?.includes("gemini") || lowerModel?.includes("qwen-vl") || lowerModel?.includes("omni") || lowerModel?.includes("qwen2.5-vl") || lowerModel?.includes("qwen2.5-omni") || lowerModel?.includes("qwen-omni") || lowerModel?.includes("qwen2-vl") || lowerModel?.includes("qvq") || model?.includes("o1") || model?.includes("o4") || model?.includes("o3") || model?.includes("gpt-4.1") || model?.includes("gpt-5") || supportImageInput) && images != null) {
|
|
128
134
|
msg.content = [
|
|
129
135
|
{
|
|
130
136
|
type: "text",
|
|
@@ -260,6 +266,58 @@ function removeAdditionalProperties(schema) {
|
|
|
260
266
|
return schema;
|
|
261
267
|
}
|
|
262
268
|
__name(removeAdditionalProperties, "removeAdditionalProperties");
|
|
269
|
+
function convertMessageToMessageChunk(message) {
|
|
270
|
+
const content = message.content ?? "";
|
|
271
|
+
const reasoningContent = message.reasoning_content ?? "";
|
|
272
|
+
const role = ((message.role?.length ?? 0) > 0 ? message.role : "assistant").toLowerCase();
|
|
273
|
+
let additionalKwargs;
|
|
274
|
+
if (message.tool_calls) {
|
|
275
|
+
additionalKwargs = {
|
|
276
|
+
tool_calls: message.tool_calls
|
|
277
|
+
};
|
|
278
|
+
} else {
|
|
279
|
+
additionalKwargs = {};
|
|
280
|
+
}
|
|
281
|
+
if (reasoningContent.length > 0) {
|
|
282
|
+
additionalKwargs.reasoning_content = reasoningContent;
|
|
283
|
+
}
|
|
284
|
+
if (role === "user") {
|
|
285
|
+
return new import_messages.HumanMessageChunk({ content });
|
|
286
|
+
} else if (role === "assistant") {
|
|
287
|
+
const toolCallChunks = [];
|
|
288
|
+
if (Array.isArray(message.tool_calls)) {
|
|
289
|
+
for (const rawToolCall of message.tool_calls) {
|
|
290
|
+
toolCallChunks.push({
|
|
291
|
+
name: rawToolCall.function?.name,
|
|
292
|
+
args: rawToolCall.function?.arguments,
|
|
293
|
+
id: rawToolCall.id
|
|
294
|
+
});
|
|
295
|
+
}
|
|
296
|
+
}
|
|
297
|
+
return new import_messages.AIMessageChunk({
|
|
298
|
+
content,
|
|
299
|
+
tool_call_chunks: toolCallChunks,
|
|
300
|
+
additional_kwargs: additionalKwargs
|
|
301
|
+
});
|
|
302
|
+
} else if (role === "system") {
|
|
303
|
+
return new import_messages.SystemMessageChunk({ content });
|
|
304
|
+
} else if (role === "function") {
|
|
305
|
+
return new import_messages.FunctionMessageChunk({
|
|
306
|
+
content,
|
|
307
|
+
additional_kwargs: additionalKwargs,
|
|
308
|
+
name: message.name
|
|
309
|
+
});
|
|
310
|
+
} else if (role === "tool") {
|
|
311
|
+
return new import_messages.ToolMessageChunk({
|
|
312
|
+
content,
|
|
313
|
+
additional_kwargs: additionalKwargs,
|
|
314
|
+
tool_call_id: message.tool_call_id
|
|
315
|
+
});
|
|
316
|
+
} else {
|
|
317
|
+
return new import_messages.ChatMessageChunk({ content, role });
|
|
318
|
+
}
|
|
319
|
+
}
|
|
320
|
+
__name(convertMessageToMessageChunk, "convertMessageToMessageChunk");
|
|
263
321
|
function convertDeltaToMessageChunk(delta, defaultRole) {
|
|
264
322
|
const role = ((delta.role?.length ?? 0) > 0 ? delta.role : defaultRole).toLowerCase();
|
|
265
323
|
const content = delta.content ?? "";
|
|
@@ -320,6 +378,7 @@ __name(convertDeltaToMessageChunk, "convertDeltaToMessageChunk");
|
|
|
320
378
|
|
|
321
379
|
// src/requester.ts
|
|
322
380
|
var import_messages2 = require("@langchain/core/messages");
|
|
381
|
+
var import_string = require("koishi-plugin-chatluna/utils/string");
|
|
323
382
|
function buildChatCompletionParams(params, enableGoogleSearch, supportImageInput) {
|
|
324
383
|
const base = {
|
|
325
384
|
model: params.model,
|
|
@@ -428,6 +487,57 @@ async function* processStreamResponse(requestContext, iterator) {
|
|
|
428
487
|
}
|
|
429
488
|
}
|
|
430
489
|
__name(processStreamResponse, "processStreamResponse");
|
|
490
|
+
async function processResponse(requestContext, response) {
|
|
491
|
+
if (response.status !== 200) {
|
|
492
|
+
throw new import_error.ChatLunaError(
|
|
493
|
+
import_error.ChatLunaErrorCode.API_REQUEST_FAILED,
|
|
494
|
+
new Error(
|
|
495
|
+
"Error when calling completion, Status: " + response.status + " " + response.statusText + ", Response: " + await response.text()
|
|
496
|
+
)
|
|
497
|
+
);
|
|
498
|
+
}
|
|
499
|
+
const responseText = await response.text();
|
|
500
|
+
try {
|
|
501
|
+
const data = JSON.parse(responseText);
|
|
502
|
+
if (data.error) {
|
|
503
|
+
throw new import_error.ChatLunaError(
|
|
504
|
+
import_error.ChatLunaErrorCode.API_REQUEST_FAILED,
|
|
505
|
+
new Error(
|
|
506
|
+
"Error when calling completion, Result: " + responseText
|
|
507
|
+
)
|
|
508
|
+
);
|
|
509
|
+
}
|
|
510
|
+
const choice = data.choices?.[0];
|
|
511
|
+
if (!choice) {
|
|
512
|
+
throw new import_error.ChatLunaError(
|
|
513
|
+
import_error.ChatLunaErrorCode.API_REQUEST_FAILED,
|
|
514
|
+
new Error(
|
|
515
|
+
"Error when calling completion, Result: " + responseText
|
|
516
|
+
)
|
|
517
|
+
);
|
|
518
|
+
}
|
|
519
|
+
const messageChunk = convertMessageToMessageChunk(choice.message);
|
|
520
|
+
return new import_outputs.ChatGenerationChunk({
|
|
521
|
+
message: messageChunk,
|
|
522
|
+
text: (0, import_string.getMessageContent)(messageChunk.content),
|
|
523
|
+
generationInfo: {
|
|
524
|
+
tokenUsage: data.usage
|
|
525
|
+
}
|
|
526
|
+
});
|
|
527
|
+
} catch (e) {
|
|
528
|
+
if (e instanceof import_error.ChatLunaError) {
|
|
529
|
+
throw e;
|
|
530
|
+
} else {
|
|
531
|
+
throw new import_error.ChatLunaError(
|
|
532
|
+
import_error.ChatLunaErrorCode.API_REQUEST_FAILED,
|
|
533
|
+
new Error(
|
|
534
|
+
"Error when calling completion, Error: " + e + ", Response: " + responseText
|
|
535
|
+
)
|
|
536
|
+
);
|
|
537
|
+
}
|
|
538
|
+
}
|
|
539
|
+
}
|
|
540
|
+
__name(processResponse, "processResponse");
|
|
431
541
|
async function* completionStream(requestContext, params, completionUrl = "chat/completions", enableGoogleSearch, supportImageInput) {
|
|
432
542
|
const { modelRequester } = requestContext;
|
|
433
543
|
try {
|
|
@@ -453,6 +563,32 @@ async function* completionStream(requestContext, params, completionUrl = "chat/c
|
|
|
453
563
|
}
|
|
454
564
|
}
|
|
455
565
|
__name(completionStream, "completionStream");
|
|
566
|
+
async function completion(requestContext, params, completionUrl = "chat/completions", enableGoogleSearch, supportImageInput) {
|
|
567
|
+
const { modelRequester } = requestContext;
|
|
568
|
+
const chatCompletionParams = buildChatCompletionParams(
|
|
569
|
+
params,
|
|
570
|
+
enableGoogleSearch ?? false,
|
|
571
|
+
supportImageInput ?? true
|
|
572
|
+
);
|
|
573
|
+
delete chatCompletionParams.stream;
|
|
574
|
+
try {
|
|
575
|
+
const response = await modelRequester.post(
|
|
576
|
+
completionUrl,
|
|
577
|
+
chatCompletionParams,
|
|
578
|
+
{
|
|
579
|
+
signal: params.signal
|
|
580
|
+
}
|
|
581
|
+
);
|
|
582
|
+
return await processResponse(requestContext, response);
|
|
583
|
+
} catch (e) {
|
|
584
|
+
if (e instanceof import_error.ChatLunaError) {
|
|
585
|
+
throw e;
|
|
586
|
+
} else {
|
|
587
|
+
throw new import_error.ChatLunaError(import_error.ChatLunaErrorCode.API_REQUEST_FAILED, e);
|
|
588
|
+
}
|
|
589
|
+
}
|
|
590
|
+
}
|
|
591
|
+
__name(completion, "completion");
|
|
456
592
|
async function createEmbeddings(requestContext, params, embeddingUrl = "embeddings") {
|
|
457
593
|
const { modelRequester } = requestContext;
|
|
458
594
|
let data;
|
|
@@ -496,8 +632,10 @@ __name(createRequestContext, "createRequestContext");
|
|
|
496
632
|
// Annotate the CommonJS export names for ESM import in node:
|
|
497
633
|
0 && (module.exports = {
|
|
498
634
|
buildChatCompletionParams,
|
|
635
|
+
completion,
|
|
499
636
|
completionStream,
|
|
500
637
|
convertDeltaToMessageChunk,
|
|
638
|
+
convertMessageToMessageChunk,
|
|
501
639
|
createEmbeddings,
|
|
502
640
|
createRequestContext,
|
|
503
641
|
formatToolToOpenAITool,
|
|
@@ -509,5 +647,6 @@ __name(createRequestContext, "createRequestContext");
|
|
|
509
647
|
langchainMessageToOpenAIMessage,
|
|
510
648
|
messageTypeToOpenAIRole,
|
|
511
649
|
processReasoningContent,
|
|
650
|
+
processResponse,
|
|
512
651
|
processStreamResponse
|
|
513
652
|
});
|
package/lib/index.mjs
CHANGED
|
@@ -8,6 +8,9 @@ function isEmbeddingModel(modelName) {
|
|
|
8
8
|
}
|
|
9
9
|
__name(isEmbeddingModel, "isEmbeddingModel");
|
|
10
10
|
function isNonLLMModel(modelName) {
|
|
11
|
+
if (modelName.includes("gemini") && modelName.includes("image")) {
|
|
12
|
+
return false;
|
|
13
|
+
}
|
|
11
14
|
return ["whisper", "tts", "dall-e", "image", "rerank"].some(
|
|
12
15
|
(keyword) => modelName.includes(keyword)
|
|
13
16
|
);
|
|
@@ -97,7 +100,7 @@ function langchainMessageToOpenAIMessage(messages, model, supportImageInput, rem
|
|
|
97
100
|
}
|
|
98
101
|
const images = rawMessage.additional_kwargs.images;
|
|
99
102
|
const lowerModel = model?.toLowerCase() ?? "";
|
|
100
|
-
if ((lowerModel?.includes("vision") || lowerModel?.includes("gpt-4o") || lowerModel?.includes("claude") || lowerModel?.includes("gemini") || lowerModel?.includes("qwen-vl") || lowerModel?.includes("omni") || lowerModel?.includes("qwen2.5-vl") || lowerModel?.includes("qwen2.5-omni") || lowerModel?.includes("qwen-omni") || lowerModel?.includes("qwen2-vl") || lowerModel?.includes("qvq") || supportImageInput) && images != null) {
|
|
103
|
+
if ((lowerModel?.includes("vision") || lowerModel?.includes("gpt-4o") || lowerModel?.includes("claude") || lowerModel?.includes("gemini") || lowerModel?.includes("qwen-vl") || lowerModel?.includes("omni") || lowerModel?.includes("qwen2.5-vl") || lowerModel?.includes("qwen2.5-omni") || lowerModel?.includes("qwen-omni") || lowerModel?.includes("qwen2-vl") || lowerModel?.includes("qvq") || model?.includes("o1") || model?.includes("o4") || model?.includes("o3") || model?.includes("gpt-4.1") || model?.includes("gpt-5") || supportImageInput) && images != null) {
|
|
101
104
|
msg.content = [
|
|
102
105
|
{
|
|
103
106
|
type: "text",
|
|
@@ -233,6 +236,58 @@ function removeAdditionalProperties(schema) {
|
|
|
233
236
|
return schema;
|
|
234
237
|
}
|
|
235
238
|
__name(removeAdditionalProperties, "removeAdditionalProperties");
|
|
239
|
+
function convertMessageToMessageChunk(message) {
|
|
240
|
+
const content = message.content ?? "";
|
|
241
|
+
const reasoningContent = message.reasoning_content ?? "";
|
|
242
|
+
const role = ((message.role?.length ?? 0) > 0 ? message.role : "assistant").toLowerCase();
|
|
243
|
+
let additionalKwargs;
|
|
244
|
+
if (message.tool_calls) {
|
|
245
|
+
additionalKwargs = {
|
|
246
|
+
tool_calls: message.tool_calls
|
|
247
|
+
};
|
|
248
|
+
} else {
|
|
249
|
+
additionalKwargs = {};
|
|
250
|
+
}
|
|
251
|
+
if (reasoningContent.length > 0) {
|
|
252
|
+
additionalKwargs.reasoning_content = reasoningContent;
|
|
253
|
+
}
|
|
254
|
+
if (role === "user") {
|
|
255
|
+
return new HumanMessageChunk({ content });
|
|
256
|
+
} else if (role === "assistant") {
|
|
257
|
+
const toolCallChunks = [];
|
|
258
|
+
if (Array.isArray(message.tool_calls)) {
|
|
259
|
+
for (const rawToolCall of message.tool_calls) {
|
|
260
|
+
toolCallChunks.push({
|
|
261
|
+
name: rawToolCall.function?.name,
|
|
262
|
+
args: rawToolCall.function?.arguments,
|
|
263
|
+
id: rawToolCall.id
|
|
264
|
+
});
|
|
265
|
+
}
|
|
266
|
+
}
|
|
267
|
+
return new AIMessageChunk({
|
|
268
|
+
content,
|
|
269
|
+
tool_call_chunks: toolCallChunks,
|
|
270
|
+
additional_kwargs: additionalKwargs
|
|
271
|
+
});
|
|
272
|
+
} else if (role === "system") {
|
|
273
|
+
return new SystemMessageChunk({ content });
|
|
274
|
+
} else if (role === "function") {
|
|
275
|
+
return new FunctionMessageChunk({
|
|
276
|
+
content,
|
|
277
|
+
additional_kwargs: additionalKwargs,
|
|
278
|
+
name: message.name
|
|
279
|
+
});
|
|
280
|
+
} else if (role === "tool") {
|
|
281
|
+
return new ToolMessageChunk({
|
|
282
|
+
content,
|
|
283
|
+
additional_kwargs: additionalKwargs,
|
|
284
|
+
tool_call_id: message.tool_call_id
|
|
285
|
+
});
|
|
286
|
+
} else {
|
|
287
|
+
return new ChatMessageChunk({ content, role });
|
|
288
|
+
}
|
|
289
|
+
}
|
|
290
|
+
__name(convertMessageToMessageChunk, "convertMessageToMessageChunk");
|
|
236
291
|
function convertDeltaToMessageChunk(delta, defaultRole) {
|
|
237
292
|
const role = ((delta.role?.length ?? 0) > 0 ? delta.role : defaultRole).toLowerCase();
|
|
238
293
|
const content = delta.content ?? "";
|
|
@@ -293,6 +348,7 @@ __name(convertDeltaToMessageChunk, "convertDeltaToMessageChunk");
|
|
|
293
348
|
|
|
294
349
|
// src/requester.ts
|
|
295
350
|
import { AIMessageChunk as AIMessageChunk2 } from "@langchain/core/messages";
|
|
351
|
+
import { getMessageContent } from "koishi-plugin-chatluna/utils/string";
|
|
296
352
|
function buildChatCompletionParams(params, enableGoogleSearch, supportImageInput) {
|
|
297
353
|
const base = {
|
|
298
354
|
model: params.model,
|
|
@@ -401,6 +457,57 @@ async function* processStreamResponse(requestContext, iterator) {
|
|
|
401
457
|
}
|
|
402
458
|
}
|
|
403
459
|
__name(processStreamResponse, "processStreamResponse");
|
|
460
|
+
async function processResponse(requestContext, response) {
|
|
461
|
+
if (response.status !== 200) {
|
|
462
|
+
throw new ChatLunaError(
|
|
463
|
+
ChatLunaErrorCode.API_REQUEST_FAILED,
|
|
464
|
+
new Error(
|
|
465
|
+
"Error when calling completion, Status: " + response.status + " " + response.statusText + ", Response: " + await response.text()
|
|
466
|
+
)
|
|
467
|
+
);
|
|
468
|
+
}
|
|
469
|
+
const responseText = await response.text();
|
|
470
|
+
try {
|
|
471
|
+
const data = JSON.parse(responseText);
|
|
472
|
+
if (data.error) {
|
|
473
|
+
throw new ChatLunaError(
|
|
474
|
+
ChatLunaErrorCode.API_REQUEST_FAILED,
|
|
475
|
+
new Error(
|
|
476
|
+
"Error when calling completion, Result: " + responseText
|
|
477
|
+
)
|
|
478
|
+
);
|
|
479
|
+
}
|
|
480
|
+
const choice = data.choices?.[0];
|
|
481
|
+
if (!choice) {
|
|
482
|
+
throw new ChatLunaError(
|
|
483
|
+
ChatLunaErrorCode.API_REQUEST_FAILED,
|
|
484
|
+
new Error(
|
|
485
|
+
"Error when calling completion, Result: " + responseText
|
|
486
|
+
)
|
|
487
|
+
);
|
|
488
|
+
}
|
|
489
|
+
const messageChunk = convertMessageToMessageChunk(choice.message);
|
|
490
|
+
return new ChatGenerationChunk({
|
|
491
|
+
message: messageChunk,
|
|
492
|
+
text: getMessageContent(messageChunk.content),
|
|
493
|
+
generationInfo: {
|
|
494
|
+
tokenUsage: data.usage
|
|
495
|
+
}
|
|
496
|
+
});
|
|
497
|
+
} catch (e) {
|
|
498
|
+
if (e instanceof ChatLunaError) {
|
|
499
|
+
throw e;
|
|
500
|
+
} else {
|
|
501
|
+
throw new ChatLunaError(
|
|
502
|
+
ChatLunaErrorCode.API_REQUEST_FAILED,
|
|
503
|
+
new Error(
|
|
504
|
+
"Error when calling completion, Error: " + e + ", Response: " + responseText
|
|
505
|
+
)
|
|
506
|
+
);
|
|
507
|
+
}
|
|
508
|
+
}
|
|
509
|
+
}
|
|
510
|
+
__name(processResponse, "processResponse");
|
|
404
511
|
async function* completionStream(requestContext, params, completionUrl = "chat/completions", enableGoogleSearch, supportImageInput) {
|
|
405
512
|
const { modelRequester } = requestContext;
|
|
406
513
|
try {
|
|
@@ -426,6 +533,32 @@ async function* completionStream(requestContext, params, completionUrl = "chat/c
|
|
|
426
533
|
}
|
|
427
534
|
}
|
|
428
535
|
__name(completionStream, "completionStream");
|
|
536
|
+
async function completion(requestContext, params, completionUrl = "chat/completions", enableGoogleSearch, supportImageInput) {
|
|
537
|
+
const { modelRequester } = requestContext;
|
|
538
|
+
const chatCompletionParams = buildChatCompletionParams(
|
|
539
|
+
params,
|
|
540
|
+
enableGoogleSearch ?? false,
|
|
541
|
+
supportImageInput ?? true
|
|
542
|
+
);
|
|
543
|
+
delete chatCompletionParams.stream;
|
|
544
|
+
try {
|
|
545
|
+
const response = await modelRequester.post(
|
|
546
|
+
completionUrl,
|
|
547
|
+
chatCompletionParams,
|
|
548
|
+
{
|
|
549
|
+
signal: params.signal
|
|
550
|
+
}
|
|
551
|
+
);
|
|
552
|
+
return await processResponse(requestContext, response);
|
|
553
|
+
} catch (e) {
|
|
554
|
+
if (e instanceof ChatLunaError) {
|
|
555
|
+
throw e;
|
|
556
|
+
} else {
|
|
557
|
+
throw new ChatLunaError(ChatLunaErrorCode.API_REQUEST_FAILED, e);
|
|
558
|
+
}
|
|
559
|
+
}
|
|
560
|
+
}
|
|
561
|
+
__name(completion, "completion");
|
|
429
562
|
async function createEmbeddings(requestContext, params, embeddingUrl = "embeddings") {
|
|
430
563
|
const { modelRequester } = requestContext;
|
|
431
564
|
let data;
|
|
@@ -468,8 +601,10 @@ function createRequestContext(ctx, config, pluginConfig, plugin, modelRequester)
|
|
|
468
601
|
__name(createRequestContext, "createRequestContext");
|
|
469
602
|
export {
|
|
470
603
|
buildChatCompletionParams,
|
|
604
|
+
completion,
|
|
471
605
|
completionStream,
|
|
472
606
|
convertDeltaToMessageChunk,
|
|
607
|
+
convertMessageToMessageChunk,
|
|
473
608
|
createEmbeddings,
|
|
474
609
|
createRequestContext,
|
|
475
610
|
formatToolToOpenAITool,
|
|
@@ -481,5 +616,6 @@ export {
|
|
|
481
616
|
langchainMessageToOpenAIMessage,
|
|
482
617
|
messageTypeToOpenAIRole,
|
|
483
618
|
processReasoningContent,
|
|
619
|
+
processResponse,
|
|
484
620
|
processStreamResponse
|
|
485
621
|
};
|
package/lib/requester.d.ts
CHANGED
|
@@ -4,6 +4,7 @@ import { ClientConfig } from 'koishi-plugin-chatluna/llm-core/platform/config';
|
|
|
4
4
|
import { SSEEvent } from 'koishi-plugin-chatluna/utils/sse';
|
|
5
5
|
import { ChatLunaPlugin } from 'koishi-plugin-chatluna/services/chat';
|
|
6
6
|
import { Context } from 'koishi';
|
|
7
|
+
import { Response } from 'undici/types/fetch';
|
|
7
8
|
interface RequestContext<T extends ClientConfig = ClientConfig, R extends ChatLunaPlugin.Config = ChatLunaPlugin.Config> {
|
|
8
9
|
ctx: Context;
|
|
9
10
|
config: T;
|
|
@@ -38,7 +39,9 @@ export declare function processReasoningContent(delta: {
|
|
|
38
39
|
isSet: boolean;
|
|
39
40
|
}): number;
|
|
40
41
|
export declare function processStreamResponse<T extends ClientConfig, R extends ChatLunaPlugin.Config>(requestContext: RequestContext<T, R>, iterator: AsyncGenerator<SSEEvent, string, unknown>): AsyncGenerator<ChatGenerationChunk, void, unknown>;
|
|
42
|
+
export declare function processResponse<T extends ClientConfig, R extends ChatLunaPlugin.Config>(requestContext: RequestContext<T, R>, response: Response): Promise<ChatGenerationChunk>;
|
|
41
43
|
export declare function completionStream<T extends ClientConfig, R extends ChatLunaPlugin.Config>(requestContext: RequestContext<T, R>, params: ModelRequestParams, completionUrl?: string, enableGoogleSearch?: boolean, supportImageInput?: boolean): AsyncGenerator<ChatGenerationChunk>;
|
|
44
|
+
export declare function completion<T extends ClientConfig, R extends ChatLunaPlugin.Config>(requestContext: RequestContext<T, R>, params: ModelRequestParams, completionUrl?: string, enableGoogleSearch?: boolean, supportImageInput?: boolean): Promise<ChatGenerationChunk>;
|
|
42
45
|
export declare function createEmbeddings<T extends ClientConfig, R extends ChatLunaPlugin.Config>(requestContext: RequestContext<T, R>, params: EmbeddingsRequestParams, embeddingUrl?: string): Promise<number[] | number[][]>;
|
|
43
46
|
export declare function getModels<T extends ClientConfig, R extends ChatLunaPlugin.Config>(requestContext: RequestContext<T, R>): Promise<string[]>;
|
|
44
47
|
export declare function createRequestContext<T extends ClientConfig, R extends ChatLunaPlugin.Config>(ctx: Context, config: T, pluginConfig: R, plugin: ChatLunaPlugin, modelRequester: ModelRequester<T, R>): RequestContext<T, R>;
|
package/lib/types.d.ts
CHANGED
|
@@ -32,6 +32,7 @@ export interface ChatCompletionResponseMessage {
|
|
|
32
32
|
detail?: 'low' | 'high';
|
|
33
33
|
};
|
|
34
34
|
})[];
|
|
35
|
+
reasoning_content?: string;
|
|
35
36
|
name?: string;
|
|
36
37
|
tool_calls?: ChatCompletionRequestMessageToolCall[];
|
|
37
38
|
tool_call_id?: string;
|
|
@@ -50,6 +51,7 @@ export interface ChatCompletionTool {
|
|
|
50
51
|
export interface ChatCompletionRequestMessageToolCall {
|
|
51
52
|
id: string;
|
|
52
53
|
type: 'function';
|
|
54
|
+
index?: number;
|
|
53
55
|
function: {
|
|
54
56
|
name: string;
|
|
55
57
|
arguments: string;
|
package/lib/utils.d.ts
CHANGED
|
@@ -5,4 +5,5 @@ export declare function langchainMessageToOpenAIMessage(messages: BaseMessage[],
|
|
|
5
5
|
export declare function messageTypeToOpenAIRole(type: MessageType): ChatCompletionResponseMessageRoleEnum;
|
|
6
6
|
export declare function formatToolsToOpenAITools(tools: StructuredTool[], includeGoogleSearch: boolean): ChatCompletionTool[];
|
|
7
7
|
export declare function formatToolToOpenAITool(tool: StructuredTool): ChatCompletionTool;
|
|
8
|
-
export declare function
|
|
8
|
+
export declare function convertMessageToMessageChunk(message: ChatCompletionResponseMessage): AIMessageChunk | HumanMessageChunk | SystemMessageChunk | FunctionMessageChunk | ToolMessageChunk | ChatMessageChunk;
|
|
9
|
+
export declare function convertDeltaToMessageChunk(delta: Record<string, any>, defaultRole?: ChatCompletionResponseMessageRoleEnum): AIMessageChunk | HumanMessageChunk | SystemMessageChunk | FunctionMessageChunk | ToolMessageChunk | ChatMessageChunk;
|
package/package.json
CHANGED
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@chatluna/v1-shared-adapter",
|
|
3
3
|
"description": "chatluna shared adapter",
|
|
4
|
-
"version": "1.0.
|
|
4
|
+
"version": "1.0.3",
|
|
5
5
|
"main": "lib/index.cjs",
|
|
6
6
|
"module": "lib/index.mjs",
|
|
7
7
|
"typings": "lib/index.d.ts",
|
|
@@ -70,6 +70,6 @@
|
|
|
70
70
|
},
|
|
71
71
|
"peerDependencies": {
|
|
72
72
|
"koishi": "^4.18.7",
|
|
73
|
-
"koishi-plugin-chatluna": "^1.3.0-alpha.
|
|
73
|
+
"koishi-plugin-chatluna": "^1.3.0-alpha.15"
|
|
74
74
|
}
|
|
75
75
|
}
|