@chatluna/v1-shared-adapter 1.0.24 → 1.0.26

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/lib/client.d.ts CHANGED
@@ -1,7 +1,7 @@
1
1
  import { ModelInfo } from 'koishi-plugin-chatluna/llm-core/platform/types';
2
2
  export type OpenAIReasoningEffort = 'none' | 'minimal' | 'low' | 'medium' | 'high' | 'xhigh';
3
3
  export declare const reasoningEffortModelSuffixes: readonly ["non-thinking", "minimal-thinking", "low-thinking", "medium-thinking", "high-thinking", "xhigh-thinking", "thinking"];
4
- export declare function expandReasoningEffortModelVariants(model: string): string[];
4
+ export declare function expandReasoningEffortModelVariants(model: string, suffixes?: readonly string[]): string[];
5
5
  export declare function parseOpenAIModelNameWithReasoningEffort(modelName: string): {
6
6
  model: string;
7
7
  reasoningEffort?: OpenAIReasoningEffort;
package/lib/index.cjs CHANGED
@@ -61,8 +61,8 @@ var reasoningEffortModelSuffixes = [
61
61
  "xhigh-thinking",
62
62
  "thinking"
63
63
  ];
64
- function expandReasoningEffortModelVariants(model) {
65
- return reasoningEffortModelSuffixes.map((suffix) => `${model}-${suffix}`);
64
+ function expandReasoningEffortModelVariants(model, suffixes = reasoningEffortModelSuffixes) {
65
+ return suffixes.map((suffix) => `${model}-${suffix}`);
66
66
  }
67
67
  __name(expandReasoningEffortModelVariants, "expandReasoningEffortModelVariants");
68
68
  function parseOpenAIModelNameWithReasoningEffort(modelName) {
@@ -194,14 +194,15 @@ async function langchainMessageToOpenAIMessage(messages, plugin, model, supportI
194
194
  const result = [];
195
195
  const normalizedModel = model ? normalizeOpenAIModelName(model) : model;
196
196
  const isDeepseekThinkModel = normalizedModel?.includes("deepseek-reasoner");
197
+ console.log(messages);
197
198
  for (const rawMessage of messages) {
198
199
  const role = messageTypeToOpenAIRole(rawMessage.getType());
199
200
  const msg = {
200
- content: rawMessage.content,
201
+ content: rawMessage.content === "" ? null : rawMessage.content,
201
202
  name: role === "assistant" || role === "tool" ? rawMessage.name : void 0,
202
203
  role,
203
204
  // function_call: rawMessage.additional_kwargs.function_call,
204
- tool_call_id: rawMessage.tool_call_id
205
+ tool_call_id: rawMessage.tool_call_id || void 0
205
206
  };
206
207
  if (msg.tool_calls == null) {
207
208
  delete msg.tool_calls;
@@ -224,7 +225,7 @@ async function langchainMessageToOpenAIMessage(messages, plugin, model, supportI
224
225
  }
225
226
  const images = rawMessage.additional_kwargs.images;
226
227
  const lowerModel = normalizedModel?.toLowerCase() ?? "";
227
- if ((lowerModel?.includes("vision") || lowerModel?.includes("gpt-4o") || lowerModel?.includes("claude") || lowerModel?.includes("gemini") || lowerModel?.includes("qwen-vl") || lowerModel?.includes("omni") || lowerModel?.includes("qwen2.5-vl") || lowerModel?.includes("qwen2.5-omni") || lowerModel?.includes("qwen-omni") || lowerModel?.includes("qwen2-vl") || lowerModel?.includes("qvq") || normalizedModel?.includes("o1") || normalizedModel?.includes("o4") || normalizedModel?.includes("o3") || normalizedModel?.includes("gpt-4.1") || normalizedModel?.includes("gpt-5") || supportImageInput2) && images != null) {
228
+ if ((lowerModel?.includes("vision") || lowerModel?.includes("gpt-4o") || lowerModel?.includes("claude") || lowerModel?.includes("gemini") || lowerModel?.includes("qwen-vl") || lowerModel?.includes("omni") || lowerModel?.includes("qwen2.5-vl") || lowerModel?.includes("qwen2.5-omni") || lowerModel?.includes("qwen-omni") || lowerModel?.includes("qwen2-vl") || lowerModel?.includes("qwen3.5") || lowerModel?.includes("qvq") || normalizedModel?.includes("o1") || normalizedModel?.includes("o4") || normalizedModel?.includes("o3") || normalizedModel?.includes("gpt-4.1") || normalizedModel?.includes("gpt-5") || supportImageInput2) && images != null) {
228
229
  msg.content = [
229
230
  {
230
231
  type: "text",
@@ -275,6 +276,36 @@ async function langchainMessageToOpenAIMessage(messages, plugin, model, supportI
275
276
  }
276
277
  result.push(msg);
277
278
  }
279
+ for (let i = 0; i < result.length; i++) {
280
+ if (result[i].role !== "assistant") continue;
281
+ const assistantMsg = result[i];
282
+ const toolMessages = [];
283
+ for (let j = i + 1; j < result.length && result[j].role === "tool"; j++) {
284
+ toolMessages.push(result[j]);
285
+ }
286
+ if (toolMessages.length === 0) continue;
287
+ if (!assistantMsg.tool_calls) {
288
+ assistantMsg.tool_calls = [];
289
+ }
290
+ for (let k = 0; k < toolMessages.length; k++) {
291
+ if (!assistantMsg.tool_calls[k]) {
292
+ assistantMsg.tool_calls[k] = {
293
+ id: `call_${k}`,
294
+ type: "function",
295
+ function: {
296
+ name: toolMessages[k].name || "unknown",
297
+ arguments: "{}"
298
+ }
299
+ };
300
+ }
301
+ if (!assistantMsg.tool_calls[k].id) {
302
+ assistantMsg.tool_calls[k].id = `call_${k}`;
303
+ }
304
+ if (!toolMessages[k].tool_call_id) {
305
+ toolMessages[k].tool_call_id = assistantMsg.tool_calls[k].id;
306
+ }
307
+ }
308
+ }
278
309
  if (removeSystemMessage) {
279
310
  return transformSystemMessages(result);
280
311
  }
@@ -346,7 +377,12 @@ async function fetchImageUrl(plugin, content) {
346
377
  }
347
378
  const ext = url.match(/\.([^.?#]+)(?:[?#]|$)/)?.[1]?.toLowerCase();
348
379
  const imageType = (0, import_string.getImageMimeType)(ext);
349
- const buffer = await plugin.fetch(url).then((res) => res.arrayBuffer()).then(Buffer.from);
380
+ const buffer = await plugin.fetch(url).then((res) => {
381
+ if (!res.ok) {
382
+ throw new Error(`Failed to fetch image: ${res.status}`);
383
+ }
384
+ return res.arrayBuffer();
385
+ }).then(Buffer.from);
350
386
  return `data:${imageType};base64,${buffer.toString("base64")}`;
351
387
  }
352
388
  __name(fetchImageUrl, "fetchImageUrl");
package/lib/index.mjs CHANGED
@@ -12,8 +12,8 @@ var reasoningEffortModelSuffixes = [
12
12
  "xhigh-thinking",
13
13
  "thinking"
14
14
  ];
15
- function expandReasoningEffortModelVariants(model) {
16
- return reasoningEffortModelSuffixes.map((suffix) => `${model}-${suffix}`);
15
+ function expandReasoningEffortModelVariants(model, suffixes = reasoningEffortModelSuffixes) {
16
+ return suffixes.map((suffix) => `${model}-${suffix}`);
17
17
  }
18
18
  __name(expandReasoningEffortModelVariants, "expandReasoningEffortModelVariants");
19
19
  function parseOpenAIModelNameWithReasoningEffort(modelName) {
@@ -158,14 +158,15 @@ async function langchainMessageToOpenAIMessage(messages, plugin, model, supportI
158
158
  const result = [];
159
159
  const normalizedModel = model ? normalizeOpenAIModelName(model) : model;
160
160
  const isDeepseekThinkModel = normalizedModel?.includes("deepseek-reasoner");
161
+ console.log(messages);
161
162
  for (const rawMessage of messages) {
162
163
  const role = messageTypeToOpenAIRole(rawMessage.getType());
163
164
  const msg = {
164
- content: rawMessage.content,
165
+ content: rawMessage.content === "" ? null : rawMessage.content,
165
166
  name: role === "assistant" || role === "tool" ? rawMessage.name : void 0,
166
167
  role,
167
168
  // function_call: rawMessage.additional_kwargs.function_call,
168
- tool_call_id: rawMessage.tool_call_id
169
+ tool_call_id: rawMessage.tool_call_id || void 0
169
170
  };
170
171
  if (msg.tool_calls == null) {
171
172
  delete msg.tool_calls;
@@ -188,7 +189,7 @@ async function langchainMessageToOpenAIMessage(messages, plugin, model, supportI
188
189
  }
189
190
  const images = rawMessage.additional_kwargs.images;
190
191
  const lowerModel = normalizedModel?.toLowerCase() ?? "";
191
- if ((lowerModel?.includes("vision") || lowerModel?.includes("gpt-4o") || lowerModel?.includes("claude") || lowerModel?.includes("gemini") || lowerModel?.includes("qwen-vl") || lowerModel?.includes("omni") || lowerModel?.includes("qwen2.5-vl") || lowerModel?.includes("qwen2.5-omni") || lowerModel?.includes("qwen-omni") || lowerModel?.includes("qwen2-vl") || lowerModel?.includes("qvq") || normalizedModel?.includes("o1") || normalizedModel?.includes("o4") || normalizedModel?.includes("o3") || normalizedModel?.includes("gpt-4.1") || normalizedModel?.includes("gpt-5") || supportImageInput2) && images != null) {
192
+ if ((lowerModel?.includes("vision") || lowerModel?.includes("gpt-4o") || lowerModel?.includes("claude") || lowerModel?.includes("gemini") || lowerModel?.includes("qwen-vl") || lowerModel?.includes("omni") || lowerModel?.includes("qwen2.5-vl") || lowerModel?.includes("qwen2.5-omni") || lowerModel?.includes("qwen-omni") || lowerModel?.includes("qwen2-vl") || lowerModel?.includes("qwen3.5") || lowerModel?.includes("qvq") || normalizedModel?.includes("o1") || normalizedModel?.includes("o4") || normalizedModel?.includes("o3") || normalizedModel?.includes("gpt-4.1") || normalizedModel?.includes("gpt-5") || supportImageInput2) && images != null) {
192
193
  msg.content = [
193
194
  {
194
195
  type: "text",
@@ -239,6 +240,36 @@ async function langchainMessageToOpenAIMessage(messages, plugin, model, supportI
239
240
  }
240
241
  result.push(msg);
241
242
  }
243
+ for (let i = 0; i < result.length; i++) {
244
+ if (result[i].role !== "assistant") continue;
245
+ const assistantMsg = result[i];
246
+ const toolMessages = [];
247
+ for (let j = i + 1; j < result.length && result[j].role === "tool"; j++) {
248
+ toolMessages.push(result[j]);
249
+ }
250
+ if (toolMessages.length === 0) continue;
251
+ if (!assistantMsg.tool_calls) {
252
+ assistantMsg.tool_calls = [];
253
+ }
254
+ for (let k = 0; k < toolMessages.length; k++) {
255
+ if (!assistantMsg.tool_calls[k]) {
256
+ assistantMsg.tool_calls[k] = {
257
+ id: `call_${k}`,
258
+ type: "function",
259
+ function: {
260
+ name: toolMessages[k].name || "unknown",
261
+ arguments: "{}"
262
+ }
263
+ };
264
+ }
265
+ if (!assistantMsg.tool_calls[k].id) {
266
+ assistantMsg.tool_calls[k].id = `call_${k}`;
267
+ }
268
+ if (!toolMessages[k].tool_call_id) {
269
+ toolMessages[k].tool_call_id = assistantMsg.tool_calls[k].id;
270
+ }
271
+ }
272
+ }
242
273
  if (removeSystemMessage) {
243
274
  return transformSystemMessages(result);
244
275
  }
@@ -310,7 +341,12 @@ async function fetchImageUrl(plugin, content) {
310
341
  }
311
342
  const ext = url.match(/\.([^.?#]+)(?:[?#]|$)/)?.[1]?.toLowerCase();
312
343
  const imageType = getImageMimeType(ext);
313
- const buffer = await plugin.fetch(url).then((res) => res.arrayBuffer()).then(Buffer.from);
344
+ const buffer = await plugin.fetch(url).then((res) => {
345
+ if (!res.ok) {
346
+ throw new Error(`Failed to fetch image: ${res.status}`);
347
+ }
348
+ return res.arrayBuffer();
349
+ }).then(Buffer.from);
314
350
  return `data:${imageType};base64,${buffer.toString("base64")}`;
315
351
  }
316
352
  __name(fetchImageUrl, "fetchImageUrl");
package/package.json CHANGED
@@ -1,7 +1,7 @@
1
1
  {
2
2
  "name": "@chatluna/v1-shared-adapter",
3
3
  "description": "chatluna shared adapter",
4
- "version": "1.0.24",
4
+ "version": "1.0.26",
5
5
  "main": "lib/index.cjs",
6
6
  "module": "lib/index.mjs",
7
7
  "typings": "lib/index.d.ts",
@@ -70,6 +70,6 @@
70
70
  },
71
71
  "peerDependencies": {
72
72
  "koishi": "^4.18.9",
73
- "koishi-plugin-chatluna": "^1.3.20"
73
+ "koishi-plugin-chatluna": "^1.3.21"
74
74
  }
75
75
  }