@chatluna/v1-shared-adapter 1.0.21 → 1.0.23
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/lib/client.d.ts +2 -0
- package/lib/index.cjs +47 -19
- package/lib/index.mjs +45 -19
- package/package.json +2 -2
package/lib/client.d.ts
CHANGED
|
@@ -1,5 +1,7 @@
|
|
|
1
1
|
import { ModelInfo } from 'koishi-plugin-chatluna/llm-core/platform/types';
|
|
2
2
|
export type OpenAIReasoningEffort = 'none' | 'minimal' | 'low' | 'medium' | 'high' | 'xhigh';
|
|
3
|
+
export declare const reasoningEffortModelSuffixes: readonly ["non-thinking", "minimal-thinking", "low-thinking", "medium-thinking", "high-thinking", "xhigh-thinking", "thinking"];
|
|
4
|
+
export declare function expandReasoningEffortModelVariants(model: string): string[];
|
|
3
5
|
export declare function parseOpenAIModelNameWithReasoningEffort(modelName: string): {
|
|
4
6
|
model: string;
|
|
5
7
|
reasoningEffort?: OpenAIReasoningEffort;
|
package/lib/index.cjs
CHANGED
|
@@ -27,6 +27,7 @@ __export(index_exports, {
|
|
|
27
27
|
convertMessageToMessageChunk: () => convertMessageToMessageChunk,
|
|
28
28
|
createEmbeddings: () => createEmbeddings,
|
|
29
29
|
createRequestContext: () => createRequestContext,
|
|
30
|
+
expandReasoningEffortModelVariants: () => expandReasoningEffortModelVariants,
|
|
30
31
|
fetchImageUrl: () => fetchImageUrl,
|
|
31
32
|
formatToolToOpenAITool: () => formatToolToOpenAITool,
|
|
32
33
|
formatToolsToOpenAITools: () => formatToolsToOpenAITools,
|
|
@@ -42,6 +43,7 @@ __export(index_exports, {
|
|
|
42
43
|
processReasoningContent: () => processReasoningContent,
|
|
43
44
|
processResponse: () => processResponse,
|
|
44
45
|
processStreamResponse: () => processStreamResponse,
|
|
46
|
+
reasoningEffortModelSuffixes: () => reasoningEffortModelSuffixes,
|
|
45
47
|
removeAdditionalProperties: () => removeAdditionalProperties,
|
|
46
48
|
supportImageInput: () => supportImageInput,
|
|
47
49
|
transformSystemMessages: () => transformSystemMessages
|
|
@@ -50,6 +52,19 @@ module.exports = __toCommonJS(index_exports);
|
|
|
50
52
|
|
|
51
53
|
// src/client.ts
|
|
52
54
|
var import_count_tokens = require("koishi-plugin-chatluna/llm-core/utils/count_tokens");
|
|
55
|
+
var reasoningEffortModelSuffixes = [
|
|
56
|
+
"non-thinking",
|
|
57
|
+
"minimal-thinking",
|
|
58
|
+
"low-thinking",
|
|
59
|
+
"medium-thinking",
|
|
60
|
+
"high-thinking",
|
|
61
|
+
"xhigh-thinking",
|
|
62
|
+
"thinking"
|
|
63
|
+
];
|
|
64
|
+
function expandReasoningEffortModelVariants(model) {
|
|
65
|
+
return reasoningEffortModelSuffixes.map((suffix) => `${model}-${suffix}`);
|
|
66
|
+
}
|
|
67
|
+
__name(expandReasoningEffortModelVariants, "expandReasoningEffortModelVariants");
|
|
53
68
|
function parseOpenAIModelNameWithReasoningEffort(modelName) {
|
|
54
69
|
let model = modelName;
|
|
55
70
|
let reasoningEffort;
|
|
@@ -155,6 +170,7 @@ var imageModelMatchers = [
|
|
|
155
170
|
"gpt-4.1",
|
|
156
171
|
"gpt-5",
|
|
157
172
|
"glm-*v",
|
|
173
|
+
"kimi-k2.5",
|
|
158
174
|
"step3",
|
|
159
175
|
"grok-4"
|
|
160
176
|
].map((pattern) => createGlobMatcher(pattern));
|
|
@@ -215,17 +231,30 @@ async function langchainMessageToOpenAIMessage(messages, plugin, model, supportI
|
|
|
215
231
|
text: rawMessage.content
|
|
216
232
|
}
|
|
217
233
|
];
|
|
218
|
-
|
|
219
|
-
|
|
220
|
-
|
|
221
|
-
|
|
222
|
-
|
|
223
|
-
|
|
234
|
+
const imageContents = await Promise.all(
|
|
235
|
+
images.map(async (image) => {
|
|
236
|
+
try {
|
|
237
|
+
const url = await fetchImageUrl(plugin, {
|
|
238
|
+
type: "image_url",
|
|
239
|
+
image_url: { url: image }
|
|
240
|
+
});
|
|
241
|
+
return {
|
|
242
|
+
type: "image_url",
|
|
243
|
+
image_url: {
|
|
244
|
+
url,
|
|
245
|
+
detail: "low"
|
|
246
|
+
}
|
|
247
|
+
};
|
|
248
|
+
} catch {
|
|
249
|
+
return null;
|
|
224
250
|
}
|
|
225
|
-
})
|
|
226
|
-
|
|
251
|
+
})
|
|
252
|
+
);
|
|
253
|
+
msg.content.push(
|
|
254
|
+
...imageContents.filter((content) => content != null)
|
|
255
|
+
);
|
|
227
256
|
} else if (Array.isArray(msg.content) && msg.content.length > 0) {
|
|
228
|
-
|
|
257
|
+
const mappedContent = await Promise.all(
|
|
229
258
|
msg.content.map(async (content) => {
|
|
230
259
|
if (!(0, import_string.isMessageContentImageUrl)(content)) return content;
|
|
231
260
|
try {
|
|
@@ -238,10 +267,11 @@ async function langchainMessageToOpenAIMessage(messages, plugin, model, supportI
|
|
|
238
267
|
}
|
|
239
268
|
};
|
|
240
269
|
} catch {
|
|
241
|
-
return
|
|
270
|
+
return null;
|
|
242
271
|
}
|
|
243
272
|
})
|
|
244
273
|
);
|
|
274
|
+
msg.content = mappedContent.filter((content) => content != null);
|
|
245
275
|
}
|
|
246
276
|
result.push(msg);
|
|
247
277
|
}
|
|
@@ -695,7 +725,7 @@ async function* completionStream(requestContext, params, completionUrl = "chat/c
|
|
|
695
725
|
const iterator = (0, import_sse.sseIterable)(response);
|
|
696
726
|
yield* processStreamResponse(requestContext, iterator);
|
|
697
727
|
} catch (e) {
|
|
698
|
-
if (requestContext.ctx.chatluna.
|
|
728
|
+
if (requestContext.ctx.chatluna.currentConfig.isLog) {
|
|
699
729
|
await (0, import_logger.trackLogToLocal)(
|
|
700
730
|
"Request",
|
|
701
731
|
JSON.stringify(chatCompletionParams),
|
|
@@ -729,7 +759,7 @@ async function completion(requestContext, params, completionUrl = "chat/completi
|
|
|
729
759
|
);
|
|
730
760
|
return await processResponse(requestContext, response);
|
|
731
761
|
} catch (e) {
|
|
732
|
-
if (requestContext.ctx.chatluna.
|
|
762
|
+
if (requestContext.ctx.chatluna.currentConfig.isLog) {
|
|
733
763
|
await (0, import_logger.trackLogToLocal)(
|
|
734
764
|
"Request",
|
|
735
765
|
JSON.stringify(chatCompletionParams),
|
|
@@ -799,13 +829,9 @@ async function getModels(requestContext, config) {
|
|
|
799
829
|
push(model);
|
|
800
830
|
if (!isOpenAIReasoningModel(model)) continue;
|
|
801
831
|
if (hasThinkingTag(model)) continue;
|
|
802
|
-
|
|
803
|
-
|
|
804
|
-
|
|
805
|
-
push(`${model}-low-thinking`);
|
|
806
|
-
push(`${model}-medium-thinking`);
|
|
807
|
-
push(`${model}-high-thinking`);
|
|
808
|
-
push(`${model}-xhigh-thinking`);
|
|
832
|
+
for (const variant of expandReasoningEffortModelVariants(model)) {
|
|
833
|
+
push(variant);
|
|
834
|
+
}
|
|
809
835
|
}
|
|
810
836
|
return expanded;
|
|
811
837
|
} catch (e) {
|
|
@@ -831,6 +857,7 @@ __name(createRequestContext, "createRequestContext");
|
|
|
831
857
|
convertMessageToMessageChunk,
|
|
832
858
|
createEmbeddings,
|
|
833
859
|
createRequestContext,
|
|
860
|
+
expandReasoningEffortModelVariants,
|
|
834
861
|
fetchImageUrl,
|
|
835
862
|
formatToolToOpenAITool,
|
|
836
863
|
formatToolsToOpenAITools,
|
|
@@ -846,6 +873,7 @@ __name(createRequestContext, "createRequestContext");
|
|
|
846
873
|
processReasoningContent,
|
|
847
874
|
processResponse,
|
|
848
875
|
processStreamResponse,
|
|
876
|
+
reasoningEffortModelSuffixes,
|
|
849
877
|
removeAdditionalProperties,
|
|
850
878
|
supportImageInput,
|
|
851
879
|
transformSystemMessages
|
package/lib/index.mjs
CHANGED
|
@@ -3,6 +3,19 @@ var __name = (target, value) => __defProp(target, "name", { value, configurable:
|
|
|
3
3
|
|
|
4
4
|
// src/client.ts
|
|
5
5
|
import { getModelContextSize } from "koishi-plugin-chatluna/llm-core/utils/count_tokens";
|
|
6
|
+
var reasoningEffortModelSuffixes = [
|
|
7
|
+
"non-thinking",
|
|
8
|
+
"minimal-thinking",
|
|
9
|
+
"low-thinking",
|
|
10
|
+
"medium-thinking",
|
|
11
|
+
"high-thinking",
|
|
12
|
+
"xhigh-thinking",
|
|
13
|
+
"thinking"
|
|
14
|
+
];
|
|
15
|
+
function expandReasoningEffortModelVariants(model) {
|
|
16
|
+
return reasoningEffortModelSuffixes.map((suffix) => `${model}-${suffix}`);
|
|
17
|
+
}
|
|
18
|
+
__name(expandReasoningEffortModelVariants, "expandReasoningEffortModelVariants");
|
|
6
19
|
function parseOpenAIModelNameWithReasoningEffort(modelName) {
|
|
7
20
|
let model = modelName;
|
|
8
21
|
let reasoningEffort;
|
|
@@ -108,6 +121,7 @@ var imageModelMatchers = [
|
|
|
108
121
|
"gpt-4.1",
|
|
109
122
|
"gpt-5",
|
|
110
123
|
"glm-*v",
|
|
124
|
+
"kimi-k2.5",
|
|
111
125
|
"step3",
|
|
112
126
|
"grok-4"
|
|
113
127
|
].map((pattern) => createGlobMatcher(pattern));
|
|
@@ -181,17 +195,30 @@ async function langchainMessageToOpenAIMessage(messages, plugin, model, supportI
|
|
|
181
195
|
text: rawMessage.content
|
|
182
196
|
}
|
|
183
197
|
];
|
|
184
|
-
|
|
185
|
-
|
|
186
|
-
|
|
187
|
-
|
|
188
|
-
|
|
189
|
-
|
|
198
|
+
const imageContents = await Promise.all(
|
|
199
|
+
images.map(async (image) => {
|
|
200
|
+
try {
|
|
201
|
+
const url = await fetchImageUrl(plugin, {
|
|
202
|
+
type: "image_url",
|
|
203
|
+
image_url: { url: image }
|
|
204
|
+
});
|
|
205
|
+
return {
|
|
206
|
+
type: "image_url",
|
|
207
|
+
image_url: {
|
|
208
|
+
url,
|
|
209
|
+
detail: "low"
|
|
210
|
+
}
|
|
211
|
+
};
|
|
212
|
+
} catch {
|
|
213
|
+
return null;
|
|
190
214
|
}
|
|
191
|
-
})
|
|
192
|
-
|
|
215
|
+
})
|
|
216
|
+
);
|
|
217
|
+
msg.content.push(
|
|
218
|
+
...imageContents.filter((content) => content != null)
|
|
219
|
+
);
|
|
193
220
|
} else if (Array.isArray(msg.content) && msg.content.length > 0) {
|
|
194
|
-
|
|
221
|
+
const mappedContent = await Promise.all(
|
|
195
222
|
msg.content.map(async (content) => {
|
|
196
223
|
if (!isMessageContentImageUrl(content)) return content;
|
|
197
224
|
try {
|
|
@@ -204,10 +231,11 @@ async function langchainMessageToOpenAIMessage(messages, plugin, model, supportI
|
|
|
204
231
|
}
|
|
205
232
|
};
|
|
206
233
|
} catch {
|
|
207
|
-
return
|
|
234
|
+
return null;
|
|
208
235
|
}
|
|
209
236
|
})
|
|
210
237
|
);
|
|
238
|
+
msg.content = mappedContent.filter((content) => content != null);
|
|
211
239
|
}
|
|
212
240
|
result.push(msg);
|
|
213
241
|
}
|
|
@@ -661,7 +689,7 @@ async function* completionStream(requestContext, params, completionUrl = "chat/c
|
|
|
661
689
|
const iterator = sseIterable(response);
|
|
662
690
|
yield* processStreamResponse(requestContext, iterator);
|
|
663
691
|
} catch (e) {
|
|
664
|
-
if (requestContext.ctx.chatluna.
|
|
692
|
+
if (requestContext.ctx.chatluna.currentConfig.isLog) {
|
|
665
693
|
await trackLogToLocal(
|
|
666
694
|
"Request",
|
|
667
695
|
JSON.stringify(chatCompletionParams),
|
|
@@ -695,7 +723,7 @@ async function completion(requestContext, params, completionUrl = "chat/completi
|
|
|
695
723
|
);
|
|
696
724
|
return await processResponse(requestContext, response);
|
|
697
725
|
} catch (e) {
|
|
698
|
-
if (requestContext.ctx.chatluna.
|
|
726
|
+
if (requestContext.ctx.chatluna.currentConfig.isLog) {
|
|
699
727
|
await trackLogToLocal(
|
|
700
728
|
"Request",
|
|
701
729
|
JSON.stringify(chatCompletionParams),
|
|
@@ -765,13 +793,9 @@ async function getModels(requestContext, config) {
|
|
|
765
793
|
push(model);
|
|
766
794
|
if (!isOpenAIReasoningModel(model)) continue;
|
|
767
795
|
if (hasThinkingTag(model)) continue;
|
|
768
|
-
|
|
769
|
-
|
|
770
|
-
|
|
771
|
-
push(`${model}-low-thinking`);
|
|
772
|
-
push(`${model}-medium-thinking`);
|
|
773
|
-
push(`${model}-high-thinking`);
|
|
774
|
-
push(`${model}-xhigh-thinking`);
|
|
796
|
+
for (const variant of expandReasoningEffortModelVariants(model)) {
|
|
797
|
+
push(variant);
|
|
798
|
+
}
|
|
775
799
|
}
|
|
776
800
|
return expanded;
|
|
777
801
|
} catch (e) {
|
|
@@ -796,6 +820,7 @@ export {
|
|
|
796
820
|
convertMessageToMessageChunk,
|
|
797
821
|
createEmbeddings,
|
|
798
822
|
createRequestContext,
|
|
823
|
+
expandReasoningEffortModelVariants,
|
|
799
824
|
fetchImageUrl,
|
|
800
825
|
formatToolToOpenAITool,
|
|
801
826
|
formatToolsToOpenAITools,
|
|
@@ -811,6 +836,7 @@ export {
|
|
|
811
836
|
processReasoningContent,
|
|
812
837
|
processResponse,
|
|
813
838
|
processStreamResponse,
|
|
839
|
+
reasoningEffortModelSuffixes,
|
|
814
840
|
removeAdditionalProperties,
|
|
815
841
|
supportImageInput,
|
|
816
842
|
transformSystemMessages
|
package/package.json
CHANGED
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@chatluna/v1-shared-adapter",
|
|
3
3
|
"description": "chatluna shared adapter",
|
|
4
|
-
"version": "1.0.
|
|
4
|
+
"version": "1.0.23",
|
|
5
5
|
"main": "lib/index.cjs",
|
|
6
6
|
"module": "lib/index.mjs",
|
|
7
7
|
"typings": "lib/index.d.ts",
|
|
@@ -70,6 +70,6 @@
|
|
|
70
70
|
},
|
|
71
71
|
"peerDependencies": {
|
|
72
72
|
"koishi": "^4.18.9",
|
|
73
|
-
"koishi-plugin-chatluna": "^1.3.
|
|
73
|
+
"koishi-plugin-chatluna": "^1.3.16"
|
|
74
74
|
}
|
|
75
75
|
}
|