@chatluna/v1-shared-adapter 1.0.3 → 1.0.4

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/lib/client.d.ts CHANGED
@@ -2,3 +2,4 @@ import { ModelInfo } from 'koishi-plugin-chatluna/llm-core/platform/types';
2
2
  export declare function isEmbeddingModel(modelName: string): boolean;
3
3
  export declare function isNonLLMModel(modelName: string): boolean;
4
4
  export declare function getModelMaxContextSize(info: ModelInfo): number;
5
+ export declare function supportImageInput(modelName: string): boolean;
package/lib/index.cjs CHANGED
@@ -37,7 +37,8 @@ __export(index_exports, {
37
37
  messageTypeToOpenAIRole: () => messageTypeToOpenAIRole,
38
38
  processReasoningContent: () => processReasoningContent,
39
39
  processResponse: () => processResponse,
40
- processStreamResponse: () => processStreamResponse
40
+ processStreamResponse: () => processStreamResponse,
41
+ supportImageInput: () => supportImageInput
41
42
  });
42
43
  module.exports = __toCommonJS(index_exports);
43
44
 
@@ -92,6 +93,39 @@ function getModelMaxContextSize(info) {
92
93
  return (0, import_count_tokens.getModelContextSize)("o1-mini");
93
94
  }
94
95
  __name(getModelMaxContextSize, "getModelMaxContextSize");
96
+ function createGlobMatcher(pattern) {
97
+ if (!pattern.includes("*")) {
98
+ return (text) => text.includes(pattern);
99
+ }
100
+ const regex = new RegExp("^" + pattern.replace(/\*/g, ".*") + "$");
101
+ return (text) => regex.test(text);
102
+ }
103
+ __name(createGlobMatcher, "createGlobMatcher");
104
+ var imageModelMatchers = [
105
+ "vision",
106
+ "vl",
107
+ "gpt-4o",
108
+ "claude",
109
+ "gemini",
110
+ "qwen-vl",
111
+ "omni",
112
+ "qwen2.5-omni",
113
+ "qwen-omni",
114
+ "qvq",
115
+ "o1",
116
+ "o3",
117
+ "o4",
118
+ "gpt-4.1",
119
+ "gpt-5",
120
+ "glm-*v",
121
+ "step3",
122
+ "grok-4"
123
+ ].map((pattern) => createGlobMatcher(pattern));
124
+ function supportImageInput(modelName) {
125
+ const lowerModel = modelName.toLowerCase();
126
+ return imageModelMatchers.some((matcher) => matcher(lowerModel));
127
+ }
128
+ __name(supportImageInput, "supportImageInput");
95
129
 
96
130
  // src/requester.ts
97
131
  var import_outputs = require("@langchain/core/outputs");
@@ -101,7 +135,7 @@ var import_sse = require("koishi-plugin-chatluna/utils/sse");
101
135
  // src/utils.ts
102
136
  var import_messages = require("@langchain/core/messages");
103
137
  var import_zod_to_json_schema = require("zod-to-json-schema");
104
- function langchainMessageToOpenAIMessage(messages, model, supportImageInput, removeSystemMessage) {
138
+ function langchainMessageToOpenAIMessage(messages, model, supportImageInput2, removeSystemMessage) {
105
139
  const result = [];
106
140
  for (const rawMessage of messages) {
107
141
  const role = messageTypeToOpenAIRole(rawMessage.getType());
@@ -130,7 +164,7 @@ function langchainMessageToOpenAIMessage(messages, model, supportImageInput, rem
130
164
  }
131
165
  const images = rawMessage.additional_kwargs.images;
132
166
  const lowerModel = model?.toLowerCase() ?? "";
133
- if ((lowerModel?.includes("vision") || lowerModel?.includes("gpt-4o") || lowerModel?.includes("claude") || lowerModel?.includes("gemini") || lowerModel?.includes("qwen-vl") || lowerModel?.includes("omni") || lowerModel?.includes("qwen2.5-vl") || lowerModel?.includes("qwen2.5-omni") || lowerModel?.includes("qwen-omni") || lowerModel?.includes("qwen2-vl") || lowerModel?.includes("qvq") || model?.includes("o1") || model?.includes("o4") || model?.includes("o3") || model?.includes("gpt-4.1") || model?.includes("gpt-5") || supportImageInput) && images != null) {
167
+ if ((lowerModel?.includes("vision") || lowerModel?.includes("gpt-4o") || lowerModel?.includes("claude") || lowerModel?.includes("gemini") || lowerModel?.includes("qwen-vl") || lowerModel?.includes("omni") || lowerModel?.includes("qwen2.5-vl") || lowerModel?.includes("qwen2.5-omni") || lowerModel?.includes("qwen-omni") || lowerModel?.includes("qwen2-vl") || lowerModel?.includes("qvq") || model?.includes("o1") || model?.includes("o4") || model?.includes("o3") || model?.includes("gpt-4.1") || model?.includes("gpt-5") || supportImageInput2) && images != null) {
134
168
  msg.content = [
135
169
  {
136
170
  type: "text",
@@ -379,13 +413,13 @@ __name(convertDeltaToMessageChunk, "convertDeltaToMessageChunk");
379
413
  // src/requester.ts
380
414
  var import_messages2 = require("@langchain/core/messages");
381
415
  var import_string = require("koishi-plugin-chatluna/utils/string");
382
- function buildChatCompletionParams(params, enableGoogleSearch, supportImageInput) {
416
+ function buildChatCompletionParams(params, enableGoogleSearch, supportImageInput2) {
383
417
  const base = {
384
418
  model: params.model,
385
419
  messages: langchainMessageToOpenAIMessage(
386
420
  params.input,
387
421
  params.model,
388
- supportImageInput
422
+ supportImageInput2
389
423
  ),
390
424
  tools: enableGoogleSearch || params.tools != null ? formatToolsToOpenAITools(
391
425
  params.tools ?? [],
@@ -538,7 +572,7 @@ async function processResponse(requestContext, response) {
538
572
  }
539
573
  }
540
574
  __name(processResponse, "processResponse");
541
- async function* completionStream(requestContext, params, completionUrl = "chat/completions", enableGoogleSearch, supportImageInput) {
575
+ async function* completionStream(requestContext, params, completionUrl = "chat/completions", enableGoogleSearch, supportImageInput2) {
542
576
  const { modelRequester } = requestContext;
543
577
  try {
544
578
  const response = await modelRequester.post(
@@ -546,7 +580,7 @@ async function* completionStream(requestContext, params, completionUrl = "chat/c
546
580
  buildChatCompletionParams(
547
581
  params,
548
582
  enableGoogleSearch ?? false,
549
- supportImageInput ?? true
583
+ supportImageInput2 ?? true
550
584
  ),
551
585
  {
552
586
  signal: params.signal
@@ -563,12 +597,12 @@ async function* completionStream(requestContext, params, completionUrl = "chat/c
563
597
  }
564
598
  }
565
599
  __name(completionStream, "completionStream");
566
- async function completion(requestContext, params, completionUrl = "chat/completions", enableGoogleSearch, supportImageInput) {
600
+ async function completion(requestContext, params, completionUrl = "chat/completions", enableGoogleSearch, supportImageInput2) {
567
601
  const { modelRequester } = requestContext;
568
602
  const chatCompletionParams = buildChatCompletionParams(
569
603
  params,
570
604
  enableGoogleSearch ?? false,
571
- supportImageInput ?? true
605
+ supportImageInput2 ?? true
572
606
  );
573
607
  delete chatCompletionParams.stream;
574
608
  try {
@@ -648,5 +682,6 @@ __name(createRequestContext, "createRequestContext");
648
682
  messageTypeToOpenAIRole,
649
683
  processReasoningContent,
650
684
  processResponse,
651
- processStreamResponse
685
+ processStreamResponse,
686
+ supportImageInput
652
687
  });
package/lib/index.mjs CHANGED
@@ -52,6 +52,39 @@ function getModelMaxContextSize(info) {
52
52
  return getModelContextSize("o1-mini");
53
53
  }
54
54
  __name(getModelMaxContextSize, "getModelMaxContextSize");
55
+ function createGlobMatcher(pattern) {
56
+ if (!pattern.includes("*")) {
57
+ return (text) => text.includes(pattern);
58
+ }
59
+ const regex = new RegExp("^" + pattern.replace(/\*/g, ".*") + "$");
60
+ return (text) => regex.test(text);
61
+ }
62
+ __name(createGlobMatcher, "createGlobMatcher");
63
+ var imageModelMatchers = [
64
+ "vision",
65
+ "vl",
66
+ "gpt-4o",
67
+ "claude",
68
+ "gemini",
69
+ "qwen-vl",
70
+ "omni",
71
+ "qwen2.5-omni",
72
+ "qwen-omni",
73
+ "qvq",
74
+ "o1",
75
+ "o3",
76
+ "o4",
77
+ "gpt-4.1",
78
+ "gpt-5",
79
+ "glm-*v",
80
+ "step3",
81
+ "grok-4"
82
+ ].map((pattern) => createGlobMatcher(pattern));
83
+ function supportImageInput(modelName) {
84
+ const lowerModel = modelName.toLowerCase();
85
+ return imageModelMatchers.some((matcher) => matcher(lowerModel));
86
+ }
87
+ __name(supportImageInput, "supportImageInput");
55
88
 
56
89
  // src/requester.ts
57
90
  import { ChatGenerationChunk } from "@langchain/core/outputs";
@@ -71,7 +104,7 @@ import {
71
104
  ToolMessageChunk
72
105
  } from "@langchain/core/messages";
73
106
  import { zodToJsonSchema } from "zod-to-json-schema";
74
- function langchainMessageToOpenAIMessage(messages, model, supportImageInput, removeSystemMessage) {
107
+ function langchainMessageToOpenAIMessage(messages, model, supportImageInput2, removeSystemMessage) {
75
108
  const result = [];
76
109
  for (const rawMessage of messages) {
77
110
  const role = messageTypeToOpenAIRole(rawMessage.getType());
@@ -100,7 +133,7 @@ function langchainMessageToOpenAIMessage(messages, model, supportImageInput, rem
100
133
  }
101
134
  const images = rawMessage.additional_kwargs.images;
102
135
  const lowerModel = model?.toLowerCase() ?? "";
103
- if ((lowerModel?.includes("vision") || lowerModel?.includes("gpt-4o") || lowerModel?.includes("claude") || lowerModel?.includes("gemini") || lowerModel?.includes("qwen-vl") || lowerModel?.includes("omni") || lowerModel?.includes("qwen2.5-vl") || lowerModel?.includes("qwen2.5-omni") || lowerModel?.includes("qwen-omni") || lowerModel?.includes("qwen2-vl") || lowerModel?.includes("qvq") || model?.includes("o1") || model?.includes("o4") || model?.includes("o3") || model?.includes("gpt-4.1") || model?.includes("gpt-5") || supportImageInput) && images != null) {
136
+ if ((lowerModel?.includes("vision") || lowerModel?.includes("gpt-4o") || lowerModel?.includes("claude") || lowerModel?.includes("gemini") || lowerModel?.includes("qwen-vl") || lowerModel?.includes("omni") || lowerModel?.includes("qwen2.5-vl") || lowerModel?.includes("qwen2.5-omni") || lowerModel?.includes("qwen-omni") || lowerModel?.includes("qwen2-vl") || lowerModel?.includes("qvq") || model?.includes("o1") || model?.includes("o4") || model?.includes("o3") || model?.includes("gpt-4.1") || model?.includes("gpt-5") || supportImageInput2) && images != null) {
104
137
  msg.content = [
105
138
  {
106
139
  type: "text",
@@ -349,13 +382,13 @@ __name(convertDeltaToMessageChunk, "convertDeltaToMessageChunk");
349
382
  // src/requester.ts
350
383
  import { AIMessageChunk as AIMessageChunk2 } from "@langchain/core/messages";
351
384
  import { getMessageContent } from "koishi-plugin-chatluna/utils/string";
352
- function buildChatCompletionParams(params, enableGoogleSearch, supportImageInput) {
385
+ function buildChatCompletionParams(params, enableGoogleSearch, supportImageInput2) {
353
386
  const base = {
354
387
  model: params.model,
355
388
  messages: langchainMessageToOpenAIMessage(
356
389
  params.input,
357
390
  params.model,
358
- supportImageInput
391
+ supportImageInput2
359
392
  ),
360
393
  tools: enableGoogleSearch || params.tools != null ? formatToolsToOpenAITools(
361
394
  params.tools ?? [],
@@ -508,7 +541,7 @@ async function processResponse(requestContext, response) {
508
541
  }
509
542
  }
510
543
  __name(processResponse, "processResponse");
511
- async function* completionStream(requestContext, params, completionUrl = "chat/completions", enableGoogleSearch, supportImageInput) {
544
+ async function* completionStream(requestContext, params, completionUrl = "chat/completions", enableGoogleSearch, supportImageInput2) {
512
545
  const { modelRequester } = requestContext;
513
546
  try {
514
547
  const response = await modelRequester.post(
@@ -516,7 +549,7 @@ async function* completionStream(requestContext, params, completionUrl = "chat/c
516
549
  buildChatCompletionParams(
517
550
  params,
518
551
  enableGoogleSearch ?? false,
519
- supportImageInput ?? true
552
+ supportImageInput2 ?? true
520
553
  ),
521
554
  {
522
555
  signal: params.signal
@@ -533,12 +566,12 @@ async function* completionStream(requestContext, params, completionUrl = "chat/c
533
566
  }
534
567
  }
535
568
  __name(completionStream, "completionStream");
536
- async function completion(requestContext, params, completionUrl = "chat/completions", enableGoogleSearch, supportImageInput) {
569
+ async function completion(requestContext, params, completionUrl = "chat/completions", enableGoogleSearch, supportImageInput2) {
537
570
  const { modelRequester } = requestContext;
538
571
  const chatCompletionParams = buildChatCompletionParams(
539
572
  params,
540
573
  enableGoogleSearch ?? false,
541
- supportImageInput ?? true
574
+ supportImageInput2 ?? true
542
575
  );
543
576
  delete chatCompletionParams.stream;
544
577
  try {
@@ -617,5 +650,6 @@ export {
617
650
  messageTypeToOpenAIRole,
618
651
  processReasoningContent,
619
652
  processResponse,
620
- processStreamResponse
653
+ processStreamResponse,
654
+ supportImageInput
621
655
  };
package/lib/utils.d.ts CHANGED
@@ -5,5 +5,5 @@ export declare function langchainMessageToOpenAIMessage(messages: BaseMessage[],
5
5
  export declare function messageTypeToOpenAIRole(type: MessageType): ChatCompletionResponseMessageRoleEnum;
6
6
  export declare function formatToolsToOpenAITools(tools: StructuredTool[], includeGoogleSearch: boolean): ChatCompletionTool[];
7
7
  export declare function formatToolToOpenAITool(tool: StructuredTool): ChatCompletionTool;
8
- export declare function convertMessageToMessageChunk(message: ChatCompletionResponseMessage): AIMessageChunk | HumanMessageChunk | SystemMessageChunk | FunctionMessageChunk | ToolMessageChunk | ChatMessageChunk;
9
- export declare function convertDeltaToMessageChunk(delta: Record<string, any>, defaultRole?: ChatCompletionResponseMessageRoleEnum): AIMessageChunk | HumanMessageChunk | SystemMessageChunk | FunctionMessageChunk | ToolMessageChunk | ChatMessageChunk;
8
+ export declare function convertMessageToMessageChunk(message: ChatCompletionResponseMessage): HumanMessageChunk | AIMessageChunk | SystemMessageChunk | FunctionMessageChunk | ToolMessageChunk | ChatMessageChunk;
9
+ export declare function convertDeltaToMessageChunk(delta: Record<string, any>, defaultRole?: ChatCompletionResponseMessageRoleEnum): HumanMessageChunk | AIMessageChunk | SystemMessageChunk | FunctionMessageChunk | ToolMessageChunk | ChatMessageChunk;
package/package.json CHANGED
@@ -1,7 +1,7 @@
1
1
  {
2
2
  "name": "@chatluna/v1-shared-adapter",
3
3
  "description": "chatluna shared adapter",
4
- "version": "1.0.3",
4
+ "version": "1.0.4",
5
5
  "main": "lib/index.cjs",
6
6
  "module": "lib/index.mjs",
7
7
  "typings": "lib/index.d.ts",
@@ -70,6 +70,6 @@
70
70
  },
71
71
  "peerDependencies": {
72
72
  "koishi": "^4.18.7",
73
- "koishi-plugin-chatluna": "^1.3.0-alpha.15"
73
+ "koishi-plugin-chatluna": "^1.3.0-alpha.16"
74
74
  }
75
75
  }