@chatluna/v1-shared-adapter 1.0.3 → 1.0.5

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/lib/client.d.ts CHANGED
@@ -2,3 +2,4 @@ import { ModelInfo } from 'koishi-plugin-chatluna/llm-core/platform/types';
2
2
  export declare function isEmbeddingModel(modelName: string): boolean;
3
3
  export declare function isNonLLMModel(modelName: string): boolean;
4
4
  export declare function getModelMaxContextSize(info: ModelInfo): number;
5
+ export declare function supportImageInput(modelName: string): boolean;
package/lib/index.cjs CHANGED
@@ -27,6 +27,7 @@ __export(index_exports, {
27
27
  convertMessageToMessageChunk: () => convertMessageToMessageChunk,
28
28
  createEmbeddings: () => createEmbeddings,
29
29
  createRequestContext: () => createRequestContext,
30
+ fetchImageUrl: () => fetchImageUrl,
30
31
  formatToolToOpenAITool: () => formatToolToOpenAITool,
31
32
  formatToolsToOpenAITools: () => formatToolsToOpenAITools,
32
33
  getModelMaxContextSize: () => getModelMaxContextSize,
@@ -37,7 +38,9 @@ __export(index_exports, {
37
38
  messageTypeToOpenAIRole: () => messageTypeToOpenAIRole,
38
39
  processReasoningContent: () => processReasoningContent,
39
40
  processResponse: () => processResponse,
40
- processStreamResponse: () => processStreamResponse
41
+ processStreamResponse: () => processStreamResponse,
42
+ removeAdditionalProperties: () => removeAdditionalProperties,
43
+ supportImageInput: () => supportImageInput
41
44
  });
42
45
  module.exports = __toCommonJS(index_exports);
43
46
 
@@ -92,6 +95,39 @@ function getModelMaxContextSize(info) {
92
95
  return (0, import_count_tokens.getModelContextSize)("o1-mini");
93
96
  }
94
97
  __name(getModelMaxContextSize, "getModelMaxContextSize");
98
+ function createGlobMatcher(pattern) {
99
+ if (!pattern.includes("*")) {
100
+ return (text) => text.includes(pattern);
101
+ }
102
+ const regex = new RegExp("^" + pattern.replace(/\*/g, ".*") + "$");
103
+ return (text) => regex.test(text);
104
+ }
105
+ __name(createGlobMatcher, "createGlobMatcher");
106
+ var imageModelMatchers = [
107
+ "vision",
108
+ "vl",
109
+ "gpt-4o",
110
+ "claude",
111
+ "gemini",
112
+ "qwen-vl",
113
+ "omni",
114
+ "qwen2.5-omni",
115
+ "qwen-omni",
116
+ "qvq",
117
+ "o1",
118
+ "o3",
119
+ "o4",
120
+ "gpt-4.1",
121
+ "gpt-5",
122
+ "glm-*v",
123
+ "step3",
124
+ "grok-4"
125
+ ].map((pattern) => createGlobMatcher(pattern));
126
+ function supportImageInput(modelName) {
127
+ const lowerModel = modelName.toLowerCase();
128
+ return imageModelMatchers.some((matcher) => matcher(lowerModel));
129
+ }
130
+ __name(supportImageInput, "supportImageInput");
95
131
 
96
132
  // src/requester.ts
97
133
  var import_outputs = require("@langchain/core/outputs");
@@ -101,12 +137,13 @@ var import_sse = require("koishi-plugin-chatluna/utils/sse");
101
137
  // src/utils.ts
102
138
  var import_messages = require("@langchain/core/messages");
103
139
  var import_zod_to_json_schema = require("zod-to-json-schema");
104
- function langchainMessageToOpenAIMessage(messages, model, supportImageInput, removeSystemMessage) {
140
+ var import_string = require("koishi-plugin-chatluna/utils/string");
141
+ async function langchainMessageToOpenAIMessage(messages, plugin, model, supportImageInput2, removeSystemMessage) {
105
142
  const result = [];
106
143
  for (const rawMessage of messages) {
107
144
  const role = messageTypeToOpenAIRole(rawMessage.getType());
108
145
  const msg = {
109
- content: rawMessage.content || null,
146
+ content: rawMessage.content,
110
147
  name: role === "assistant" || role === "tool" ? rawMessage.name : void 0,
111
148
  role,
112
149
  // function_call: rawMessage.additional_kwargs.function_call,
@@ -130,7 +167,7 @@ function langchainMessageToOpenAIMessage(messages, model, supportImageInput, rem
130
167
  }
131
168
  const images = rawMessage.additional_kwargs.images;
132
169
  const lowerModel = model?.toLowerCase() ?? "";
133
- if ((lowerModel?.includes("vision") || lowerModel?.includes("gpt-4o") || lowerModel?.includes("claude") || lowerModel?.includes("gemini") || lowerModel?.includes("qwen-vl") || lowerModel?.includes("omni") || lowerModel?.includes("qwen2.5-vl") || lowerModel?.includes("qwen2.5-omni") || lowerModel?.includes("qwen-omni") || lowerModel?.includes("qwen2-vl") || lowerModel?.includes("qvq") || model?.includes("o1") || model?.includes("o4") || model?.includes("o3") || model?.includes("gpt-4.1") || model?.includes("gpt-5") || supportImageInput) && images != null) {
170
+ if ((lowerModel?.includes("vision") || lowerModel?.includes("gpt-4o") || lowerModel?.includes("claude") || lowerModel?.includes("gemini") || lowerModel?.includes("qwen-vl") || lowerModel?.includes("omni") || lowerModel?.includes("qwen2.5-vl") || lowerModel?.includes("qwen2.5-omni") || lowerModel?.includes("qwen-omni") || lowerModel?.includes("qwen2-vl") || lowerModel?.includes("qvq") || model?.includes("o1") || model?.includes("o4") || model?.includes("o3") || model?.includes("gpt-4.1") || model?.includes("gpt-5") || supportImageInput2) && images != null) {
134
171
  msg.content = [
135
172
  {
136
173
  type: "text",
@@ -146,6 +183,24 @@ function langchainMessageToOpenAIMessage(messages, model, supportImageInput, rem
146
183
  }
147
184
  });
148
185
  }
186
+ } else if (Array.isArray(msg.content) && msg.content.length > 0) {
187
+ msg.content = await Promise.all(
188
+ msg.content.map(async (content) => {
189
+ if (!(0, import_string.isMessageContentImageUrl)(content)) return content;
190
+ try {
191
+ const url = await fetchImageUrl(plugin, content);
192
+ return {
193
+ type: "image_url",
194
+ image_url: {
195
+ url,
196
+ detail: "low"
197
+ }
198
+ };
199
+ } catch {
200
+ return content;
201
+ }
202
+ })
203
+ );
149
204
  }
150
205
  result.push(msg);
151
206
  }
@@ -192,6 +247,17 @@ function langchainMessageToOpenAIMessage(messages, model, supportImageInput, rem
192
247
  return result;
193
248
  }
194
249
  __name(langchainMessageToOpenAIMessage, "langchainMessageToOpenAIMessage");
250
+ async function fetchImageUrl(plugin, content) {
251
+ const url = typeof content.image_url === "string" ? content.image_url : content.image_url.url;
252
+ if (url.includes("data:image") && url.includes("base64")) {
253
+ return url;
254
+ }
255
+ const ext = url.match(/\.([^.?#]+)(?:[?#]|$)/)?.[1]?.toLowerCase();
256
+ const imageType = (0, import_string.getImageMimeType)(ext);
257
+ const buffer = await plugin.fetch(url).then((res) => res.arrayBuffer()).then(Buffer.from);
258
+ return `data:${imageType};base64,${buffer.toString("base64")}`;
259
+ }
260
+ __name(fetchImageUrl, "fetchImageUrl");
195
261
  function messageTypeToOpenAIRole(type) {
196
262
  switch (type) {
197
263
  case "system":
@@ -378,14 +444,15 @@ __name(convertDeltaToMessageChunk, "convertDeltaToMessageChunk");
378
444
 
379
445
  // src/requester.ts
380
446
  var import_messages2 = require("@langchain/core/messages");
381
- var import_string = require("koishi-plugin-chatluna/utils/string");
382
- function buildChatCompletionParams(params, enableGoogleSearch, supportImageInput) {
447
+ var import_string2 = require("koishi-plugin-chatluna/utils/string");
448
+ async function buildChatCompletionParams(params, plugin, enableGoogleSearch, supportImageInput2) {
383
449
  const base = {
384
450
  model: params.model,
385
- messages: langchainMessageToOpenAIMessage(
451
+ messages: await langchainMessageToOpenAIMessage(
386
452
  params.input,
453
+ plugin,
387
454
  params.model,
388
- supportImageInput
455
+ supportImageInput2
389
456
  ),
390
457
  tools: enableGoogleSearch || params.tools != null ? formatToolsToOpenAITools(
391
458
  params.tools ?? [],
@@ -519,7 +586,7 @@ async function processResponse(requestContext, response) {
519
586
  const messageChunk = convertMessageToMessageChunk(choice.message);
520
587
  return new import_outputs.ChatGenerationChunk({
521
588
  message: messageChunk,
522
- text: (0, import_string.getMessageContent)(messageChunk.content),
589
+ text: (0, import_string2.getMessageContent)(messageChunk.content),
523
590
  generationInfo: {
524
591
  tokenUsage: data.usage
525
592
  }
@@ -538,15 +605,16 @@ async function processResponse(requestContext, response) {
538
605
  }
539
606
  }
540
607
  __name(processResponse, "processResponse");
541
- async function* completionStream(requestContext, params, completionUrl = "chat/completions", enableGoogleSearch, supportImageInput) {
608
+ async function* completionStream(requestContext, params, completionUrl = "chat/completions", enableGoogleSearch, supportImageInput2) {
542
609
  const { modelRequester } = requestContext;
543
610
  try {
544
611
  const response = await modelRequester.post(
545
612
  completionUrl,
546
- buildChatCompletionParams(
613
+ await buildChatCompletionParams(
547
614
  params,
615
+ requestContext.plugin,
548
616
  enableGoogleSearch ?? false,
549
- supportImageInput ?? true
617
+ supportImageInput2 ?? true
550
618
  ),
551
619
  {
552
620
  signal: params.signal
@@ -563,12 +631,13 @@ async function* completionStream(requestContext, params, completionUrl = "chat/c
563
631
  }
564
632
  }
565
633
  __name(completionStream, "completionStream");
566
- async function completion(requestContext, params, completionUrl = "chat/completions", enableGoogleSearch, supportImageInput) {
634
+ async function completion(requestContext, params, completionUrl = "chat/completions", enableGoogleSearch, supportImageInput2) {
567
635
  const { modelRequester } = requestContext;
568
- const chatCompletionParams = buildChatCompletionParams(
636
+ const chatCompletionParams = await buildChatCompletionParams(
569
637
  params,
638
+ requestContext.plugin,
570
639
  enableGoogleSearch ?? false,
571
- supportImageInput ?? true
640
+ supportImageInput2 ?? true
572
641
  );
573
642
  delete chatCompletionParams.stream;
574
643
  try {
@@ -638,6 +707,7 @@ __name(createRequestContext, "createRequestContext");
638
707
  convertMessageToMessageChunk,
639
708
  createEmbeddings,
640
709
  createRequestContext,
710
+ fetchImageUrl,
641
711
  formatToolToOpenAITool,
642
712
  formatToolsToOpenAITools,
643
713
  getModelMaxContextSize,
@@ -648,5 +718,7 @@ __name(createRequestContext, "createRequestContext");
648
718
  messageTypeToOpenAIRole,
649
719
  processReasoningContent,
650
720
  processResponse,
651
- processStreamResponse
721
+ processStreamResponse,
722
+ removeAdditionalProperties,
723
+ supportImageInput
652
724
  });
package/lib/index.mjs CHANGED
@@ -52,6 +52,39 @@ function getModelMaxContextSize(info) {
52
52
  return getModelContextSize("o1-mini");
53
53
  }
54
54
  __name(getModelMaxContextSize, "getModelMaxContextSize");
55
+ function createGlobMatcher(pattern) {
56
+ if (!pattern.includes("*")) {
57
+ return (text) => text.includes(pattern);
58
+ }
59
+ const regex = new RegExp("^" + pattern.replace(/\*/g, ".*") + "$");
60
+ return (text) => regex.test(text);
61
+ }
62
+ __name(createGlobMatcher, "createGlobMatcher");
63
+ var imageModelMatchers = [
64
+ "vision",
65
+ "vl",
66
+ "gpt-4o",
67
+ "claude",
68
+ "gemini",
69
+ "qwen-vl",
70
+ "omni",
71
+ "qwen2.5-omni",
72
+ "qwen-omni",
73
+ "qvq",
74
+ "o1",
75
+ "o3",
76
+ "o4",
77
+ "gpt-4.1",
78
+ "gpt-5",
79
+ "glm-*v",
80
+ "step3",
81
+ "grok-4"
82
+ ].map((pattern) => createGlobMatcher(pattern));
83
+ function supportImageInput(modelName) {
84
+ const lowerModel = modelName.toLowerCase();
85
+ return imageModelMatchers.some((matcher) => matcher(lowerModel));
86
+ }
87
+ __name(supportImageInput, "supportImageInput");
55
88
 
56
89
  // src/requester.ts
57
90
  import { ChatGenerationChunk } from "@langchain/core/outputs";
@@ -71,12 +104,16 @@ import {
71
104
  ToolMessageChunk
72
105
  } from "@langchain/core/messages";
73
106
  import { zodToJsonSchema } from "zod-to-json-schema";
74
- function langchainMessageToOpenAIMessage(messages, model, supportImageInput, removeSystemMessage) {
107
+ import {
108
+ getImageMimeType,
109
+ isMessageContentImageUrl
110
+ } from "koishi-plugin-chatluna/utils/string";
111
+ async function langchainMessageToOpenAIMessage(messages, plugin, model, supportImageInput2, removeSystemMessage) {
75
112
  const result = [];
76
113
  for (const rawMessage of messages) {
77
114
  const role = messageTypeToOpenAIRole(rawMessage.getType());
78
115
  const msg = {
79
- content: rawMessage.content || null,
116
+ content: rawMessage.content,
80
117
  name: role === "assistant" || role === "tool" ? rawMessage.name : void 0,
81
118
  role,
82
119
  // function_call: rawMessage.additional_kwargs.function_call,
@@ -100,7 +137,7 @@ function langchainMessageToOpenAIMessage(messages, model, supportImageInput, rem
100
137
  }
101
138
  const images = rawMessage.additional_kwargs.images;
102
139
  const lowerModel = model?.toLowerCase() ?? "";
103
- if ((lowerModel?.includes("vision") || lowerModel?.includes("gpt-4o") || lowerModel?.includes("claude") || lowerModel?.includes("gemini") || lowerModel?.includes("qwen-vl") || lowerModel?.includes("omni") || lowerModel?.includes("qwen2.5-vl") || lowerModel?.includes("qwen2.5-omni") || lowerModel?.includes("qwen-omni") || lowerModel?.includes("qwen2-vl") || lowerModel?.includes("qvq") || model?.includes("o1") || model?.includes("o4") || model?.includes("o3") || model?.includes("gpt-4.1") || model?.includes("gpt-5") || supportImageInput) && images != null) {
140
+ if ((lowerModel?.includes("vision") || lowerModel?.includes("gpt-4o") || lowerModel?.includes("claude") || lowerModel?.includes("gemini") || lowerModel?.includes("qwen-vl") || lowerModel?.includes("omni") || lowerModel?.includes("qwen2.5-vl") || lowerModel?.includes("qwen2.5-omni") || lowerModel?.includes("qwen-omni") || lowerModel?.includes("qwen2-vl") || lowerModel?.includes("qvq") || model?.includes("o1") || model?.includes("o4") || model?.includes("o3") || model?.includes("gpt-4.1") || model?.includes("gpt-5") || supportImageInput2) && images != null) {
104
141
  msg.content = [
105
142
  {
106
143
  type: "text",
@@ -116,6 +153,24 @@ function langchainMessageToOpenAIMessage(messages, model, supportImageInput, rem
116
153
  }
117
154
  });
118
155
  }
156
+ } else if (Array.isArray(msg.content) && msg.content.length > 0) {
157
+ msg.content = await Promise.all(
158
+ msg.content.map(async (content) => {
159
+ if (!isMessageContentImageUrl(content)) return content;
160
+ try {
161
+ const url = await fetchImageUrl(plugin, content);
162
+ return {
163
+ type: "image_url",
164
+ image_url: {
165
+ url,
166
+ detail: "low"
167
+ }
168
+ };
169
+ } catch {
170
+ return content;
171
+ }
172
+ })
173
+ );
119
174
  }
120
175
  result.push(msg);
121
176
  }
@@ -162,6 +217,17 @@ function langchainMessageToOpenAIMessage(messages, model, supportImageInput, rem
162
217
  return result;
163
218
  }
164
219
  __name(langchainMessageToOpenAIMessage, "langchainMessageToOpenAIMessage");
220
+ async function fetchImageUrl(plugin, content) {
221
+ const url = typeof content.image_url === "string" ? content.image_url : content.image_url.url;
222
+ if (url.includes("data:image") && url.includes("base64")) {
223
+ return url;
224
+ }
225
+ const ext = url.match(/\.([^.?#]+)(?:[?#]|$)/)?.[1]?.toLowerCase();
226
+ const imageType = getImageMimeType(ext);
227
+ const buffer = await plugin.fetch(url).then((res) => res.arrayBuffer()).then(Buffer.from);
228
+ return `data:${imageType};base64,${buffer.toString("base64")}`;
229
+ }
230
+ __name(fetchImageUrl, "fetchImageUrl");
165
231
  function messageTypeToOpenAIRole(type) {
166
232
  switch (type) {
167
233
  case "system":
@@ -349,13 +415,14 @@ __name(convertDeltaToMessageChunk, "convertDeltaToMessageChunk");
349
415
  // src/requester.ts
350
416
  import { AIMessageChunk as AIMessageChunk2 } from "@langchain/core/messages";
351
417
  import { getMessageContent } from "koishi-plugin-chatluna/utils/string";
352
- function buildChatCompletionParams(params, enableGoogleSearch, supportImageInput) {
418
+ async function buildChatCompletionParams(params, plugin, enableGoogleSearch, supportImageInput2) {
353
419
  const base = {
354
420
  model: params.model,
355
- messages: langchainMessageToOpenAIMessage(
421
+ messages: await langchainMessageToOpenAIMessage(
356
422
  params.input,
423
+ plugin,
357
424
  params.model,
358
- supportImageInput
425
+ supportImageInput2
359
426
  ),
360
427
  tools: enableGoogleSearch || params.tools != null ? formatToolsToOpenAITools(
361
428
  params.tools ?? [],
@@ -508,15 +575,16 @@ async function processResponse(requestContext, response) {
508
575
  }
509
576
  }
510
577
  __name(processResponse, "processResponse");
511
- async function* completionStream(requestContext, params, completionUrl = "chat/completions", enableGoogleSearch, supportImageInput) {
578
+ async function* completionStream(requestContext, params, completionUrl = "chat/completions", enableGoogleSearch, supportImageInput2) {
512
579
  const { modelRequester } = requestContext;
513
580
  try {
514
581
  const response = await modelRequester.post(
515
582
  completionUrl,
516
- buildChatCompletionParams(
583
+ await buildChatCompletionParams(
517
584
  params,
585
+ requestContext.plugin,
518
586
  enableGoogleSearch ?? false,
519
- supportImageInput ?? true
587
+ supportImageInput2 ?? true
520
588
  ),
521
589
  {
522
590
  signal: params.signal
@@ -533,12 +601,13 @@ async function* completionStream(requestContext, params, completionUrl = "chat/c
533
601
  }
534
602
  }
535
603
  __name(completionStream, "completionStream");
536
- async function completion(requestContext, params, completionUrl = "chat/completions", enableGoogleSearch, supportImageInput) {
604
+ async function completion(requestContext, params, completionUrl = "chat/completions", enableGoogleSearch, supportImageInput2) {
537
605
  const { modelRequester } = requestContext;
538
- const chatCompletionParams = buildChatCompletionParams(
606
+ const chatCompletionParams = await buildChatCompletionParams(
539
607
  params,
608
+ requestContext.plugin,
540
609
  enableGoogleSearch ?? false,
541
- supportImageInput ?? true
610
+ supportImageInput2 ?? true
542
611
  );
543
612
  delete chatCompletionParams.stream;
544
613
  try {
@@ -607,6 +676,7 @@ export {
607
676
  convertMessageToMessageChunk,
608
677
  createEmbeddings,
609
678
  createRequestContext,
679
+ fetchImageUrl,
610
680
  formatToolToOpenAITool,
611
681
  formatToolsToOpenAITools,
612
682
  getModelMaxContextSize,
@@ -617,5 +687,7 @@ export {
617
687
  messageTypeToOpenAIRole,
618
688
  processReasoningContent,
619
689
  processResponse,
620
- processStreamResponse
690
+ processStreamResponse,
691
+ removeAdditionalProperties,
692
+ supportImageInput
621
693
  };
@@ -12,7 +12,7 @@ interface RequestContext<T extends ClientConfig = ClientConfig, R extends ChatLu
12
12
  plugin: ChatLunaPlugin;
13
13
  modelRequester: ModelRequester<T, R>;
14
14
  }
15
- export declare function buildChatCompletionParams(params: ModelRequestParams, enableGoogleSearch: boolean, supportImageInput?: boolean): {
15
+ export declare function buildChatCompletionParams(params: ModelRequestParams, plugin: ChatLunaPlugin, enableGoogleSearch: boolean, supportImageInput?: boolean): Promise<{
16
16
  model: string;
17
17
  messages: import("./types").ChatCompletionResponseMessage[];
18
18
  tools: import("./types").ChatCompletionTool[];
@@ -29,7 +29,7 @@ export declare function buildChatCompletionParams(params: ModelRequestParams, en
29
29
  stream_options: {
30
30
  include_usage: boolean;
31
31
  };
32
- };
32
+ }>;
33
33
  export declare function processReasoningContent(delta: {
34
34
  reasoning_content?: string;
35
35
  content?: string;
package/lib/types.d.ts CHANGED
@@ -20,18 +20,23 @@ export interface ChatCompletionResponse {
20
20
  total_tokens: number;
21
21
  };
22
22
  }
23
+ export interface ChatCompletionTextPart {
24
+ type: 'text';
25
+ text: string;
26
+ }
27
+ export interface ChatCompletionImagePart {
28
+ type: 'image_url';
29
+ image_url: string | {
30
+ url: string;
31
+ detail?: 'low' | 'high';
32
+ };
33
+ }
34
+ export type ChatCompletionParts = ChatCompletionTextPart | ChatCompletionImagePart | (Record<string, unknown> & {
35
+ type: string;
36
+ });
23
37
  export interface ChatCompletionResponseMessage {
24
38
  role: string;
25
- content?: string | ({
26
- type: 'text';
27
- text: string;
28
- } | {
29
- type: 'image_url';
30
- image_url: {
31
- url: string;
32
- detail?: 'low' | 'high';
33
- };
34
- })[];
39
+ content?: string | ChatCompletionParts[];
35
40
  reasoning_content?: string;
36
41
  name?: string;
37
42
  tool_calls?: ChatCompletionRequestMessageToolCall[];
package/lib/utils.d.ts CHANGED
@@ -1,9 +1,13 @@
1
- import { AIMessageChunk, BaseMessage, ChatMessageChunk, FunctionMessageChunk, HumanMessageChunk, MessageType, SystemMessageChunk, ToolMessageChunk } from '@langchain/core/messages';
1
+ import { AIMessageChunk, BaseMessage, ChatMessageChunk, FunctionMessageChunk, HumanMessageChunk, MessageContentImageUrl, MessageType, SystemMessageChunk, ToolMessageChunk } from '@langchain/core/messages';
2
2
  import { StructuredTool } from '@langchain/core/tools';
3
+ import { JsonSchema7Type } from 'zod-to-json-schema';
3
4
  import { ChatCompletionResponseMessage, ChatCompletionResponseMessageRoleEnum, ChatCompletionTool } from './types';
4
- export declare function langchainMessageToOpenAIMessage(messages: BaseMessage[], model?: string, supportImageInput?: boolean, removeSystemMessage?: boolean): ChatCompletionResponseMessage[];
5
+ import { ChatLunaPlugin } from 'koishi-plugin-chatluna/services/chat';
6
+ export declare function langchainMessageToOpenAIMessage(messages: BaseMessage[], plugin: ChatLunaPlugin, model?: string, supportImageInput?: boolean, removeSystemMessage?: boolean): Promise<ChatCompletionResponseMessage[]>;
7
+ export declare function fetchImageUrl(plugin: ChatLunaPlugin, content: MessageContentImageUrl): Promise<string>;
5
8
  export declare function messageTypeToOpenAIRole(type: MessageType): ChatCompletionResponseMessageRoleEnum;
6
9
  export declare function formatToolsToOpenAITools(tools: StructuredTool[], includeGoogleSearch: boolean): ChatCompletionTool[];
7
10
  export declare function formatToolToOpenAITool(tool: StructuredTool): ChatCompletionTool;
8
- export declare function convertMessageToMessageChunk(message: ChatCompletionResponseMessage): AIMessageChunk | HumanMessageChunk | SystemMessageChunk | FunctionMessageChunk | ToolMessageChunk | ChatMessageChunk;
9
- export declare function convertDeltaToMessageChunk(delta: Record<string, any>, defaultRole?: ChatCompletionResponseMessageRoleEnum): AIMessageChunk | HumanMessageChunk | SystemMessageChunk | FunctionMessageChunk | ToolMessageChunk | ChatMessageChunk;
11
+ export declare function removeAdditionalProperties(schema: JsonSchema7Type): JsonSchema7Type;
12
+ export declare function convertMessageToMessageChunk(message: ChatCompletionResponseMessage): HumanMessageChunk | AIMessageChunk | SystemMessageChunk | FunctionMessageChunk | ToolMessageChunk | ChatMessageChunk;
13
+ export declare function convertDeltaToMessageChunk(delta: Record<string, any>, defaultRole?: ChatCompletionResponseMessageRoleEnum): HumanMessageChunk | AIMessageChunk | SystemMessageChunk | FunctionMessageChunk | ToolMessageChunk | ChatMessageChunk;
package/package.json CHANGED
@@ -1,7 +1,7 @@
1
1
  {
2
2
  "name": "@chatluna/v1-shared-adapter",
3
3
  "description": "chatluna shared adapter",
4
- "version": "1.0.3",
4
+ "version": "1.0.5",
5
5
  "main": "lib/index.cjs",
6
6
  "module": "lib/index.mjs",
7
7
  "typings": "lib/index.d.ts",
@@ -70,6 +70,6 @@
70
70
  },
71
71
  "peerDependencies": {
72
72
  "koishi": "^4.18.7",
73
- "koishi-plugin-chatluna": "^1.3.0-alpha.15"
73
+ "koishi-plugin-chatluna": "^1.3.0-alpha.17"
74
74
  }
75
75
  }