@chatluna/v1-shared-adapter 1.0.32 → 1.0.33

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/lib/client.d.ts CHANGED
@@ -1,5 +1,5 @@
1
1
  import { ModelInfo } from 'koishi-plugin-chatluna/llm-core/platform/types';
2
- export type OpenAIReasoningEffort = 'none' | 'minimal' | 'low' | 'medium' | 'high' | 'xhigh';
2
+ export type OpenAIReasoningEffort = 'none' | 'minimal' | 'low' | 'medium' | 'high' | 'max' | 'xhigh';
3
3
  export declare const reasoningEffortModelSuffixes: readonly ["non-thinking", "minimal-thinking", "low-thinking", "medium-thinking", "high-thinking", "xhigh-thinking", "thinking"];
4
4
  export declare function expandReasoningEffortModelVariants(model: string, suffixes?: readonly string[]): string[];
5
5
  export declare function parseOpenAIModelNameWithReasoningEffort(modelName: string): {
@@ -9,5 +9,6 @@ export declare function parseOpenAIModelNameWithReasoningEffort(modelName: strin
9
9
  export declare function normalizeOpenAIModelName(modelName: string): string;
10
10
  export declare function isEmbeddingModel(modelName: string): boolean;
11
11
  export declare function isNonLLMModel(modelName: string): boolean;
12
+ export declare function isImageGenerationModel(modelName: string): boolean;
12
13
  export declare function getModelMaxContextSize(info: ModelInfo): number;
13
14
  export declare function supportImageInput(modelName: string): boolean;
package/lib/index.cjs CHANGED
@@ -21,6 +21,7 @@ var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: tru
21
21
  var index_exports = {};
22
22
  __export(index_exports, {
23
23
  buildChatCompletionParams: () => buildChatCompletionParams,
24
+ buildResponseParams: () => buildResponseParams,
24
25
  completion: () => completion,
25
26
  completionStream: () => completionStream,
26
27
  convertDeltaToMessageChunk: () => convertDeltaToMessageChunk,
@@ -33,20 +34,33 @@ __export(index_exports, {
33
34
  fetchImageUrl: () => fetchImageUrl,
34
35
  formatToolToOpenAITool: () => formatToolToOpenAITool,
35
36
  formatToolsToOpenAITools: () => formatToolsToOpenAITools,
37
+ formatToolsToResponseTools: () => formatToolsToResponseTools,
36
38
  getModelMaxContextSize: () => getModelMaxContextSize,
37
39
  getModels: () => getModels,
38
40
  isEmbeddingModel: () => isEmbeddingModel,
41
+ isImageGenerationModel: () => isImageGenerationModel,
39
42
  isNonLLMModel: () => isNonLLMModel,
40
43
  langchainMessageToOpenAIMessage: () => langchainMessageToOpenAIMessage,
44
+ langchainMessageToResponseInput: () => langchainMessageToResponseInput,
41
45
  messageTypeToOpenAIRole: () => messageTypeToOpenAIRole,
42
46
  normalizeOpenAIModelName: () => normalizeOpenAIModelName,
47
+ openAIResponseUsageToUsageMetadata: () => openAIResponseUsageToUsageMetadata,
43
48
  openAIUsageToUsageMetadata: () => openAIUsageToUsageMetadata,
44
49
  parseOpenAIModelNameWithReasoningEffort: () => parseOpenAIModelNameWithReasoningEffort,
45
50
  processInterleavedThinkMessages: () => processInterleavedThinkMessages,
46
51
  processResponse: () => processResponse,
52
+ processResponseApiResponse: () => processResponseApiResponse,
53
+ processResponseApiStream: () => processResponseApiStream,
47
54
  processStreamResponse: () => processStreamResponse,
48
55
  reasoningEffortModelSuffixes: () => reasoningEffortModelSuffixes,
49
56
  removeAdditionalProperties: () => removeAdditionalProperties,
57
+ responseApiCompletion: () => responseApiCompletion,
58
+ responseApiCompletionStream: () => responseApiCompletionStream,
59
+ responseInputContent: () => responseInputContent,
60
+ responseOutputImageItems: () => responseOutputImageItems,
61
+ responseOutputText: () => responseOutputText,
62
+ responseOutputToolCalls: () => responseOutputToolCalls,
63
+ responseToChatGeneration: () => responseToChatGeneration,
50
64
  supportImageInput: () => supportImageInput,
51
65
  transformSystemMessages: () => transformSystemMessages
52
66
  });
@@ -71,7 +85,7 @@ function parseOpenAIModelNameWithReasoningEffort(modelName) {
71
85
  let model = modelName;
72
86
  let reasoningEffort;
73
87
  const explicitMatch = model.match(
74
- /-(none|minimal|low|medium|high|xhigh|tiny)-thinking$/
88
+ /-(none|minimal|low|medium|high|max|xhigh|tiny)-thinking$/
75
89
  );
76
90
  if (explicitMatch?.[1]) {
77
91
  const level = explicitMatch[1];
@@ -109,6 +123,10 @@ function isNonLLMModel(modelName) {
109
123
  );
110
124
  }
111
125
  __name(isNonLLMModel, "isNonLLMModel");
126
+ function isImageGenerationModel(modelName) {
127
+ return isNonLLMModel(modelName) && ["dall-e", "image"].some((keyword) => modelName.includes(keyword));
128
+ }
129
+ __name(isImageGenerationModel, "isImageGenerationModel");
112
130
  function getModelMaxContextSize(info) {
113
131
  const maxTokens = info.maxTokens;
114
132
  if (maxTokens != null) {
@@ -178,7 +196,8 @@ var imageModelMatchers = [
178
196
  "glm-*v",
179
197
  "kimi-k2.5",
180
198
  "step3",
181
- "grok-4"
199
+ "grok-4",
200
+ "mimo-v2.5*"
182
201
  ].map((pattern) => createGlobMatcher(pattern));
183
202
  function supportImageInput(modelName) {
184
203
  const lowerModel = normalizeOpenAIModelName(modelName).toLowerCase();
@@ -229,6 +248,144 @@ function openAIUsageToUsageMetadata(usage) {
229
248
  });
230
249
  }
231
250
  __name(openAIUsageToUsageMetadata, "openAIUsageToUsageMetadata");
251
+ function openAIResponseUsageToUsageMetadata(usage) {
252
+ return createUsageMetadata({
253
+ inputTokens: usage.input_tokens,
254
+ outputTokens: usage.output_tokens,
255
+ totalTokens: usage.total_tokens,
256
+ cacheReadTokens: usage.input_tokens_details?.cached_tokens,
257
+ reasoningTokens: usage.output_tokens_details?.reasoning_tokens
258
+ });
259
+ }
260
+ __name(openAIResponseUsageToUsageMetadata, "openAIResponseUsageToUsageMetadata");
261
+ async function langchainMessageToResponseInput(messages, plugin, model, supportImageInputType) {
262
+ const chatMessages = await langchainMessageToOpenAIMessage(
263
+ messages,
264
+ plugin,
265
+ model,
266
+ supportImageInputType
267
+ );
268
+ const result = [];
269
+ for (const msg of chatMessages) {
270
+ if (msg.role === "tool") {
271
+ result.push({
272
+ type: "function_call_output",
273
+ call_id: msg.tool_call_id,
274
+ output: responseInputContent(msg.content)
275
+ });
276
+ continue;
277
+ }
278
+ if (msg.role === "function") {
279
+ result.push({
280
+ type: "message",
281
+ role: "user",
282
+ content: responseInputContent(msg.content)
283
+ });
284
+ continue;
285
+ }
286
+ if (msg.content != null && msg.content !== "") {
287
+ result.push({
288
+ type: "message",
289
+ role: msg.role === "system" || msg.role === "assistant" || msg.role === "user" ? msg.role : "user",
290
+ content: responseInputContent(msg.content)
291
+ });
292
+ }
293
+ if (msg.role !== "assistant" || !Array.isArray(msg.tool_calls)) {
294
+ continue;
295
+ }
296
+ result.push(
297
+ ...msg.tool_calls.map((toolCall) => ({
298
+ type: "function_call",
299
+ call_id: toolCall.id,
300
+ name: toolCall.function.name,
301
+ arguments: toolCall.function.arguments,
302
+ status: "completed"
303
+ }))
304
+ );
305
+ }
306
+ return result;
307
+ }
308
+ __name(langchainMessageToResponseInput, "langchainMessageToResponseInput");
309
+ function responseInputContent(content) {
310
+ if (typeof content === "string") return content;
311
+ if (!Array.isArray(content)) return "";
312
+ return content.map((part) => {
313
+ if (part.type === "text") {
314
+ const text = part.text;
315
+ return {
316
+ type: "input_text",
317
+ text
318
+ };
319
+ }
320
+ if (part.type === "image_url") {
321
+ const raw = part.image_url;
322
+ const imageUrl = typeof raw === "string" ? raw : raw.url;
323
+ const detail = typeof raw === "string" ? void 0 : raw.detail;
324
+ return {
325
+ type: "input_image",
326
+ image_url: imageUrl,
327
+ detail: detail ?? "auto"
328
+ };
329
+ }
330
+ if (part.type === "file_url") {
331
+ const raw = part["file_url"];
332
+ return {
333
+ type: "input_file",
334
+ file_url: typeof raw === "string" ? raw : raw.url,
335
+ filename: typeof raw === "string" ? void 0 : raw.filename
336
+ };
337
+ }
338
+ return void 0;
339
+ }).filter((part) => part != null);
340
+ }
341
+ __name(responseInputContent, "responseInputContent");
342
+ function formatToolsToResponseTools(tools, includeGoogleSearch, builtinTools = []) {
343
+ const result = (formatToolsToOpenAITools(tools, includeGoogleSearch) ?? []).map((tool) => {
344
+ if (tool.function.name === "googleSearch") {
345
+ return {
346
+ type: "web_search"
347
+ };
348
+ }
349
+ return {
350
+ type: "function",
351
+ name: tool.function.name,
352
+ description: tool.function.description,
353
+ parameters: tool.function.parameters
354
+ };
355
+ });
356
+ for (const tool of builtinTools) {
357
+ if (result.some((item) => item.type === tool.type)) continue;
358
+ result.push(tool);
359
+ }
360
+ return result.length ? result : void 0;
361
+ }
362
+ __name(formatToolsToResponseTools, "formatToolsToResponseTools");
363
+ function responseOutputText(response) {
364
+ if ((response.output_text?.length ?? 0) > 0) return response.output_text;
365
+ return (response.output ?? []).flatMap((item) => {
366
+ if (item.type !== "message") return [];
367
+ return (item.content ?? []).map(
368
+ (part) => {
369
+ if (part.type === "output_text") return part.text;
370
+ if (part.type === "refusal") return part.refusal;
371
+ return "";
372
+ }
373
+ );
374
+ }).join("");
375
+ }
376
+ __name(responseOutputText, "responseOutputText");
377
+ function responseOutputToolCalls(response) {
378
+ return (response.output ?? []).filter(
379
+ (item) => item.type === "function_call"
380
+ );
381
+ }
382
+ __name(responseOutputToolCalls, "responseOutputToolCalls");
383
+ function responseOutputImageItems(response) {
384
+ return (response.output ?? []).filter((item) => item.type === "image_generation_call" && item.result).map(
385
+ (item) => item
386
+ );
387
+ }
388
+ __name(responseOutputImageItems, "responseOutputImageItems");
232
389
  async function langchainMessageToOpenAIMessage(messages, plugin, model, supportImageInputType, removeSystemMessage) {
233
390
  const result = [];
234
391
  const normalizedModel = model ? normalizeOpenAIModelName(model) : model;
@@ -716,6 +873,37 @@ async function buildChatCompletionParams(params, plugin, enableGoogleSearch, sup
716
873
  return (0, import_object.deepAssign)({}, base, params.overrideRequestParams ?? {});
717
874
  }
718
875
  __name(buildChatCompletionParams, "buildChatCompletionParams");
876
+ async function buildResponseParams(params, plugin, opts = {}, supportImageInput2) {
877
+ const parsedModel = parseOpenAIModelNameWithReasoningEffort(params.model);
878
+ const normalizedModel = parsedModel.model;
879
+ const base = {
880
+ model: normalizedModel,
881
+ input: await langchainMessageToResponseInput(
882
+ params.input,
883
+ plugin,
884
+ normalizedModel,
885
+ supportImageInput2
886
+ ),
887
+ tools: opts.googleSearch || (opts.builtinTools?.length ?? 0) > 0 || params.tools != null ? formatToolsToResponseTools(
888
+ params.tools ?? [],
889
+ opts.googleSearch ?? false,
890
+ opts.builtinTools
891
+ ) : void 0,
892
+ max_output_tokens: normalizedModel.includes("vision") ? void 0 : params.maxTokens,
893
+ temperature: params.temperature === 0 ? void 0 : params.temperature,
894
+ top_p: params.topP,
895
+ prompt_cache_key: params.id,
896
+ reasoning: parsedModel.reasoningEffort == null || parsedModel.reasoningEffort === "none" ? void 0 : { effort: parsedModel.reasoningEffort },
897
+ stream: true,
898
+ stream_options: {
899
+ include_obfuscation: false
900
+ },
901
+ store: false,
902
+ parallel_tool_calls: true
903
+ };
904
+ return (0, import_object.deepAssign)({}, base, params.overrideRequestParams ?? {});
905
+ }
906
+ __name(buildResponseParams, "buildResponseParams");
719
907
  async function* processStreamResponse(requestContext, iterator) {
720
908
  let defaultRole = "assistant";
721
909
  let errorCount = 0;
@@ -885,6 +1073,190 @@ async function processResponse(requestContext, response) {
885
1073
  }
886
1074
  }
887
1075
  __name(processResponse, "processResponse");
1076
+ async function responseToChatGeneration(response, imageProvider) {
1077
+ if (response.error) {
1078
+ throw new import_error.ChatLunaError(
1079
+ import_error.ChatLunaErrorCode.API_REQUEST_FAILED,
1080
+ new Error(response.error.message ?? JSON.stringify(response.error))
1081
+ );
1082
+ }
1083
+ const text = responseOutputText(response);
1084
+ const toolCalls = responseOutputToolCalls(response);
1085
+ const images = imageProvider ? await Promise.all(
1086
+ responseOutputImageItems(response).map(
1087
+ (item) => imageProvider(item)
1088
+ )
1089
+ ) : [];
1090
+ const usageMetadata = response.usage ? openAIResponseUsageToUsageMetadata(response.usage) : void 0;
1091
+ const message = new import_messages2.AIMessageChunk({
1092
+ content: images.length > 0 ? [
1093
+ ...text.length > 0 ? [{ type: "text", text }] : [],
1094
+ ...images.map((image) => ({
1095
+ type: "image_url",
1096
+ image_url: image
1097
+ }))
1098
+ ] : text,
1099
+ tool_call_chunks: toolCalls.map((call, index) => ({
1100
+ name: call.name,
1101
+ args: call.arguments,
1102
+ id: call.call_id,
1103
+ index
1104
+ })),
1105
+ usage_metadata: usageMetadata,
1106
+ additional_kwargs: {
1107
+ conversation: response.conversation
1108
+ }
1109
+ });
1110
+ return new import_outputs.ChatGenerationChunk({
1111
+ generationInfo: usageMetadata == null ? void 0 : {
1112
+ usage_metadata: usageMetadata
1113
+ },
1114
+ message,
1115
+ text
1116
+ });
1117
+ }
1118
+ __name(responseToChatGeneration, "responseToChatGeneration");
1119
+ async function processResponseApiResponse(response, imageProvider) {
1120
+ if (response.status !== 200) {
1121
+ throw new import_error.ChatLunaError(
1122
+ import_error.ChatLunaErrorCode.API_REQUEST_FAILED,
1123
+ new Error(
1124
+ "Error when calling responses, Status: " + response.status + " " + response.statusText + ", Response: " + await response.text()
1125
+ )
1126
+ );
1127
+ }
1128
+ const responseText = await response.text();
1129
+ try {
1130
+ return await responseToChatGeneration(
1131
+ JSON.parse(responseText),
1132
+ imageProvider
1133
+ );
1134
+ } catch (e) {
1135
+ if (e instanceof import_error.ChatLunaError) throw e;
1136
+ throw new import_error.ChatLunaError(
1137
+ import_error.ChatLunaErrorCode.API_REQUEST_FAILED,
1138
+ new Error(
1139
+ "Error when calling responses, Error: " + e + ", Response: " + responseText
1140
+ )
1141
+ );
1142
+ }
1143
+ }
1144
+ __name(processResponseApiResponse, "processResponseApiResponse");
1145
+ async function* processResponseApiStream(requestContext, iterator, imageProvider) {
1146
+ const args = /* @__PURE__ */ new Map();
1147
+ const calls = /* @__PURE__ */ new Map();
1148
+ let errorCount = 0;
1149
+ let sentConversation = false;
1150
+ for await (const event of iterator) {
1151
+ const chunk = event.data;
1152
+ if (chunk === "[DONE]") break;
1153
+ if (chunk === "" || chunk == null || chunk === "undefined") continue;
1154
+ try {
1155
+ const data = JSON.parse(chunk);
1156
+ if (data.type === "response.output_text.delta" && data.delta) {
1157
+ yield new import_outputs.ChatGenerationChunk({
1158
+ message: new import_messages2.AIMessageChunk(data.delta),
1159
+ text: data.delta
1160
+ });
1161
+ continue;
1162
+ }
1163
+ if (data.type === "response.output_item.added" && data.item?.type === "function_call") {
1164
+ const item = data.item;
1165
+ calls.set(data.output_index ?? calls.size, {
1166
+ name: item.name,
1167
+ callId: item.call_id,
1168
+ itemId: item.id
1169
+ });
1170
+ continue;
1171
+ }
1172
+ if (data.type === "response.function_call_arguments.delta") {
1173
+ const index = data.output_index ?? 0;
1174
+ args.set(index, (args.get(index) ?? "") + (data.delta ?? ""));
1175
+ continue;
1176
+ }
1177
+ if (data.type === "response.function_call_arguments.done") {
1178
+ const index = data.output_index ?? 0;
1179
+ const call = calls.get(index);
1180
+ yield new import_outputs.ChatGenerationChunk({
1181
+ message: new import_messages2.AIMessageChunk({
1182
+ content: "",
1183
+ tool_call_chunks: [
1184
+ {
1185
+ name: data.name ?? call?.name,
1186
+ args: data.arguments ?? args.get(index) ?? "",
1187
+ id: call?.callId ?? data.item_id,
1188
+ index
1189
+ }
1190
+ ]
1191
+ }),
1192
+ text: ""
1193
+ });
1194
+ continue;
1195
+ }
1196
+ if (data.type === "response.completed" && data.response) {
1197
+ const usageMetadata = data.response.usage ? openAIResponseUsageToUsageMetadata(data.response.usage) : void 0;
1198
+ const images = imageProvider ? await Promise.all(
1199
+ responseOutputImageItems(data.response).map(
1200
+ (item) => imageProvider(item)
1201
+ )
1202
+ ) : [];
1203
+ if (images.length > 0) {
1204
+ yield new import_outputs.ChatGenerationChunk({
1205
+ message: new import_messages2.AIMessageChunk({
1206
+ content: images.map((image) => ({
1207
+ type: "image_url",
1208
+ image_url: image
1209
+ }))
1210
+ }),
1211
+ text: ""
1212
+ });
1213
+ }
1214
+ if (!sentConversation) {
1215
+ sentConversation = true;
1216
+ yield new import_outputs.ChatGenerationChunk({
1217
+ message: new import_messages2.AIMessageChunk({
1218
+ content: "",
1219
+ additional_kwargs: {
1220
+ conversation: data.response.conversation
1221
+ }
1222
+ }),
1223
+ text: ""
1224
+ });
1225
+ }
1226
+ if (usageMetadata) {
1227
+ yield new import_outputs.ChatGenerationChunk({
1228
+ generationInfo: {
1229
+ usage_metadata: usageMetadata
1230
+ },
1231
+ message: new import_messages2.AIMessageChunk({
1232
+ content: "",
1233
+ usage_metadata: usageMetadata
1234
+ }),
1235
+ text: ""
1236
+ });
1237
+ }
1238
+ continue;
1239
+ }
1240
+ if (data.type === "response.failed" || data.type === "response.incomplete" || data.type === "response.error") {
1241
+ throw new import_error.ChatLunaError(
1242
+ import_error.ChatLunaErrorCode.API_REQUEST_FAILED,
1243
+ new Error(chunk)
1244
+ );
1245
+ }
1246
+ } catch (e) {
1247
+ if (e instanceof import_error.ChatLunaError) throw e;
1248
+ if (errorCount > 5) {
1249
+ requestContext.modelRequester.logger.error(
1250
+ "error with responses chunk",
1251
+ chunk
1252
+ );
1253
+ throw new import_error.ChatLunaError(import_error.ChatLunaErrorCode.API_REQUEST_FAILED, e);
1254
+ }
1255
+ errorCount++;
1256
+ }
1257
+ }
1258
+ }
1259
+ __name(processResponseApiStream, "processResponseApiStream");
888
1260
  async function* completionStream(requestContext, params, completionUrl = "chat/completions", enableGoogleSearch, supportImageInput2) {
889
1261
  const { modelRequester } = requestContext;
890
1262
  const chatCompletionParams = await buildChatCompletionParams(
@@ -953,6 +1325,64 @@ async function completion(requestContext, params, completionUrl = "chat/completi
953
1325
  }
954
1326
  }
955
1327
  __name(completion, "completion");
1328
+ async function* responseApiCompletionStream(requestContext, params, opts = {}, supportImageInput2, imageProvider) {
1329
+ const { modelRequester } = requestContext;
1330
+ const request = await buildResponseParams(
1331
+ params,
1332
+ requestContext.plugin,
1333
+ opts,
1334
+ supportImageInput2 ?? true
1335
+ );
1336
+ try {
1337
+ const response = await modelRequester.post("responses", request, {
1338
+ signal: params.signal
1339
+ });
1340
+ yield* processResponseApiStream(
1341
+ requestContext,
1342
+ (0, import_sse.sseIterable)(response),
1343
+ imageProvider
1344
+ );
1345
+ } catch (e) {
1346
+ if (requestContext.ctx.chatluna.currentConfig.isLog) {
1347
+ await (0, import_logger.trackLogToLocal)(
1348
+ "Request",
1349
+ JSON.stringify(request),
1350
+ requestContext.ctx.logger("")
1351
+ );
1352
+ }
1353
+ if (e instanceof import_error.ChatLunaError) throw e;
1354
+ throw new import_error.ChatLunaError(import_error.ChatLunaErrorCode.API_REQUEST_FAILED, e);
1355
+ }
1356
+ }
1357
+ __name(responseApiCompletionStream, "responseApiCompletionStream");
1358
+ async function responseApiCompletion(requestContext, params, opts = {}, supportImageInput2, imageProvider) {
1359
+ const { modelRequester } = requestContext;
1360
+ const request = await buildResponseParams(
1361
+ params,
1362
+ requestContext.plugin,
1363
+ opts,
1364
+ supportImageInput2 ?? true
1365
+ );
1366
+ delete request.stream;
1367
+ delete request.stream_options;
1368
+ try {
1369
+ const response = await modelRequester.post("responses", request, {
1370
+ signal: params.signal
1371
+ });
1372
+ return await processResponseApiResponse(response, imageProvider);
1373
+ } catch (e) {
1374
+ if (requestContext.ctx.chatluna.currentConfig.isLog) {
1375
+ await (0, import_logger.trackLogToLocal)(
1376
+ "Request",
1377
+ JSON.stringify(request),
1378
+ requestContext.ctx.logger("")
1379
+ );
1380
+ }
1381
+ if (e instanceof import_error.ChatLunaError) throw e;
1382
+ throw new import_error.ChatLunaError(import_error.ChatLunaErrorCode.API_REQUEST_FAILED, e);
1383
+ }
1384
+ }
1385
+ __name(responseApiCompletion, "responseApiCompletion");
956
1386
  async function createEmbeddings(requestContext, params, embeddingUrl = "embeddings") {
957
1387
  const { modelRequester } = requestContext;
958
1388
  let data;
@@ -1030,6 +1460,7 @@ __name(createRequestContext, "createRequestContext");
1030
1460
  // Annotate the CommonJS export names for ESM import in node:
1031
1461
  0 && (module.exports = {
1032
1462
  buildChatCompletionParams,
1463
+ buildResponseParams,
1033
1464
  completion,
1034
1465
  completionStream,
1035
1466
  convertDeltaToMessageChunk,
@@ -1042,20 +1473,33 @@ __name(createRequestContext, "createRequestContext");
1042
1473
  fetchImageUrl,
1043
1474
  formatToolToOpenAITool,
1044
1475
  formatToolsToOpenAITools,
1476
+ formatToolsToResponseTools,
1045
1477
  getModelMaxContextSize,
1046
1478
  getModels,
1047
1479
  isEmbeddingModel,
1480
+ isImageGenerationModel,
1048
1481
  isNonLLMModel,
1049
1482
  langchainMessageToOpenAIMessage,
1483
+ langchainMessageToResponseInput,
1050
1484
  messageTypeToOpenAIRole,
1051
1485
  normalizeOpenAIModelName,
1486
+ openAIResponseUsageToUsageMetadata,
1052
1487
  openAIUsageToUsageMetadata,
1053
1488
  parseOpenAIModelNameWithReasoningEffort,
1054
1489
  processInterleavedThinkMessages,
1055
1490
  processResponse,
1491
+ processResponseApiResponse,
1492
+ processResponseApiStream,
1056
1493
  processStreamResponse,
1057
1494
  reasoningEffortModelSuffixes,
1058
1495
  removeAdditionalProperties,
1496
+ responseApiCompletion,
1497
+ responseApiCompletionStream,
1498
+ responseInputContent,
1499
+ responseOutputImageItems,
1500
+ responseOutputText,
1501
+ responseOutputToolCalls,
1502
+ responseToChatGeneration,
1059
1503
  supportImageInput,
1060
1504
  transformSystemMessages
1061
1505
  });
package/lib/index.mjs CHANGED
@@ -20,7 +20,7 @@ function parseOpenAIModelNameWithReasoningEffort(modelName) {
20
20
  let model = modelName;
21
21
  let reasoningEffort;
22
22
  const explicitMatch = model.match(
23
- /-(none|minimal|low|medium|high|xhigh|tiny)-thinking$/
23
+ /-(none|minimal|low|medium|high|max|xhigh|tiny)-thinking$/
24
24
  );
25
25
  if (explicitMatch?.[1]) {
26
26
  const level = explicitMatch[1];
@@ -58,6 +58,10 @@ function isNonLLMModel(modelName) {
58
58
  );
59
59
  }
60
60
  __name(isNonLLMModel, "isNonLLMModel");
61
+ function isImageGenerationModel(modelName) {
62
+ return isNonLLMModel(modelName) && ["dall-e", "image"].some((keyword) => modelName.includes(keyword));
63
+ }
64
+ __name(isImageGenerationModel, "isImageGenerationModel");
61
65
  function getModelMaxContextSize(info) {
62
66
  const maxTokens = info.maxTokens;
63
67
  if (maxTokens != null) {
@@ -127,7 +131,8 @@ var imageModelMatchers = [
127
131
  "glm-*v",
128
132
  "kimi-k2.5",
129
133
  "step3",
130
- "grok-4"
134
+ "grok-4",
135
+ "mimo-v2.5*"
131
136
  ].map((pattern) => createGlobMatcher(pattern));
132
137
  function supportImageInput(modelName) {
133
138
  const lowerModel = normalizeOpenAIModelName(modelName).toLowerCase();
@@ -192,6 +197,144 @@ function openAIUsageToUsageMetadata(usage) {
192
197
  });
193
198
  }
194
199
  __name(openAIUsageToUsageMetadata, "openAIUsageToUsageMetadata");
200
+ function openAIResponseUsageToUsageMetadata(usage) {
201
+ return createUsageMetadata({
202
+ inputTokens: usage.input_tokens,
203
+ outputTokens: usage.output_tokens,
204
+ totalTokens: usage.total_tokens,
205
+ cacheReadTokens: usage.input_tokens_details?.cached_tokens,
206
+ reasoningTokens: usage.output_tokens_details?.reasoning_tokens
207
+ });
208
+ }
209
+ __name(openAIResponseUsageToUsageMetadata, "openAIResponseUsageToUsageMetadata");
210
+ async function langchainMessageToResponseInput(messages, plugin, model, supportImageInputType) {
211
+ const chatMessages = await langchainMessageToOpenAIMessage(
212
+ messages,
213
+ plugin,
214
+ model,
215
+ supportImageInputType
216
+ );
217
+ const result = [];
218
+ for (const msg of chatMessages) {
219
+ if (msg.role === "tool") {
220
+ result.push({
221
+ type: "function_call_output",
222
+ call_id: msg.tool_call_id,
223
+ output: responseInputContent(msg.content)
224
+ });
225
+ continue;
226
+ }
227
+ if (msg.role === "function") {
228
+ result.push({
229
+ type: "message",
230
+ role: "user",
231
+ content: responseInputContent(msg.content)
232
+ });
233
+ continue;
234
+ }
235
+ if (msg.content != null && msg.content !== "") {
236
+ result.push({
237
+ type: "message",
238
+ role: msg.role === "system" || msg.role === "assistant" || msg.role === "user" ? msg.role : "user",
239
+ content: responseInputContent(msg.content)
240
+ });
241
+ }
242
+ if (msg.role !== "assistant" || !Array.isArray(msg.tool_calls)) {
243
+ continue;
244
+ }
245
+ result.push(
246
+ ...msg.tool_calls.map((toolCall) => ({
247
+ type: "function_call",
248
+ call_id: toolCall.id,
249
+ name: toolCall.function.name,
250
+ arguments: toolCall.function.arguments,
251
+ status: "completed"
252
+ }))
253
+ );
254
+ }
255
+ return result;
256
+ }
257
+ __name(langchainMessageToResponseInput, "langchainMessageToResponseInput");
258
+ function responseInputContent(content) {
259
+ if (typeof content === "string") return content;
260
+ if (!Array.isArray(content)) return "";
261
+ return content.map((part) => {
262
+ if (part.type === "text") {
263
+ const text = part.text;
264
+ return {
265
+ type: "input_text",
266
+ text
267
+ };
268
+ }
269
+ if (part.type === "image_url") {
270
+ const raw = part.image_url;
271
+ const imageUrl = typeof raw === "string" ? raw : raw.url;
272
+ const detail = typeof raw === "string" ? void 0 : raw.detail;
273
+ return {
274
+ type: "input_image",
275
+ image_url: imageUrl,
276
+ detail: detail ?? "auto"
277
+ };
278
+ }
279
+ if (part.type === "file_url") {
280
+ const raw = part["file_url"];
281
+ return {
282
+ type: "input_file",
283
+ file_url: typeof raw === "string" ? raw : raw.url,
284
+ filename: typeof raw === "string" ? void 0 : raw.filename
285
+ };
286
+ }
287
+ return void 0;
288
+ }).filter((part) => part != null);
289
+ }
290
+ __name(responseInputContent, "responseInputContent");
291
+ function formatToolsToResponseTools(tools, includeGoogleSearch, builtinTools = []) {
292
+ const result = (formatToolsToOpenAITools(tools, includeGoogleSearch) ?? []).map((tool) => {
293
+ if (tool.function.name === "googleSearch") {
294
+ return {
295
+ type: "web_search"
296
+ };
297
+ }
298
+ return {
299
+ type: "function",
300
+ name: tool.function.name,
301
+ description: tool.function.description,
302
+ parameters: tool.function.parameters
303
+ };
304
+ });
305
+ for (const tool of builtinTools) {
306
+ if (result.some((item) => item.type === tool.type)) continue;
307
+ result.push(tool);
308
+ }
309
+ return result.length ? result : void 0;
310
+ }
311
+ __name(formatToolsToResponseTools, "formatToolsToResponseTools");
312
+ function responseOutputText(response) {
313
+ if ((response.output_text?.length ?? 0) > 0) return response.output_text;
314
+ return (response.output ?? []).flatMap((item) => {
315
+ if (item.type !== "message") return [];
316
+ return (item.content ?? []).map(
317
+ (part) => {
318
+ if (part.type === "output_text") return part.text;
319
+ if (part.type === "refusal") return part.refusal;
320
+ return "";
321
+ }
322
+ );
323
+ }).join("");
324
+ }
325
+ __name(responseOutputText, "responseOutputText");
326
+ function responseOutputToolCalls(response) {
327
+ return (response.output ?? []).filter(
328
+ (item) => item.type === "function_call"
329
+ );
330
+ }
331
+ __name(responseOutputToolCalls, "responseOutputToolCalls");
332
+ function responseOutputImageItems(response) {
333
+ return (response.output ?? []).filter((item) => item.type === "image_generation_call" && item.result).map(
334
+ (item) => item
335
+ );
336
+ }
337
+ __name(responseOutputImageItems, "responseOutputImageItems");
195
338
  async function langchainMessageToOpenAIMessage(messages, plugin, model, supportImageInputType, removeSystemMessage) {
196
339
  const result = [];
197
340
  const normalizedModel = model ? normalizeOpenAIModelName(model) : model;
@@ -679,6 +822,37 @@ async function buildChatCompletionParams(params, plugin, enableGoogleSearch, sup
679
822
  return deepAssign({}, base, params.overrideRequestParams ?? {});
680
823
  }
681
824
  __name(buildChatCompletionParams, "buildChatCompletionParams");
825
+ async function buildResponseParams(params, plugin, opts = {}, supportImageInput2) {
826
+ const parsedModel = parseOpenAIModelNameWithReasoningEffort(params.model);
827
+ const normalizedModel = parsedModel.model;
828
+ const base = {
829
+ model: normalizedModel,
830
+ input: await langchainMessageToResponseInput(
831
+ params.input,
832
+ plugin,
833
+ normalizedModel,
834
+ supportImageInput2
835
+ ),
836
+ tools: opts.googleSearch || (opts.builtinTools?.length ?? 0) > 0 || params.tools != null ? formatToolsToResponseTools(
837
+ params.tools ?? [],
838
+ opts.googleSearch ?? false,
839
+ opts.builtinTools
840
+ ) : void 0,
841
+ max_output_tokens: normalizedModel.includes("vision") ? void 0 : params.maxTokens,
842
+ temperature: params.temperature === 0 ? void 0 : params.temperature,
843
+ top_p: params.topP,
844
+ prompt_cache_key: params.id,
845
+ reasoning: parsedModel.reasoningEffort == null || parsedModel.reasoningEffort === "none" ? void 0 : { effort: parsedModel.reasoningEffort },
846
+ stream: true,
847
+ stream_options: {
848
+ include_obfuscation: false
849
+ },
850
+ store: false,
851
+ parallel_tool_calls: true
852
+ };
853
+ return deepAssign({}, base, params.overrideRequestParams ?? {});
854
+ }
855
+ __name(buildResponseParams, "buildResponseParams");
682
856
  async function* processStreamResponse(requestContext, iterator) {
683
857
  let defaultRole = "assistant";
684
858
  let errorCount = 0;
@@ -848,6 +1022,190 @@ async function processResponse(requestContext, response) {
848
1022
  }
849
1023
  }
850
1024
  __name(processResponse, "processResponse");
1025
+ async function responseToChatGeneration(response, imageProvider) {
1026
+ if (response.error) {
1027
+ throw new ChatLunaError(
1028
+ ChatLunaErrorCode.API_REQUEST_FAILED,
1029
+ new Error(response.error.message ?? JSON.stringify(response.error))
1030
+ );
1031
+ }
1032
+ const text = responseOutputText(response);
1033
+ const toolCalls = responseOutputToolCalls(response);
1034
+ const images = imageProvider ? await Promise.all(
1035
+ responseOutputImageItems(response).map(
1036
+ (item) => imageProvider(item)
1037
+ )
1038
+ ) : [];
1039
+ const usageMetadata = response.usage ? openAIResponseUsageToUsageMetadata(response.usage) : void 0;
1040
+ const message = new AIMessageChunk2({
1041
+ content: images.length > 0 ? [
1042
+ ...text.length > 0 ? [{ type: "text", text }] : [],
1043
+ ...images.map((image) => ({
1044
+ type: "image_url",
1045
+ image_url: image
1046
+ }))
1047
+ ] : text,
1048
+ tool_call_chunks: toolCalls.map((call, index) => ({
1049
+ name: call.name,
1050
+ args: call.arguments,
1051
+ id: call.call_id,
1052
+ index
1053
+ })),
1054
+ usage_metadata: usageMetadata,
1055
+ additional_kwargs: {
1056
+ conversation: response.conversation
1057
+ }
1058
+ });
1059
+ return new ChatGenerationChunk({
1060
+ generationInfo: usageMetadata == null ? void 0 : {
1061
+ usage_metadata: usageMetadata
1062
+ },
1063
+ message,
1064
+ text
1065
+ });
1066
+ }
1067
+ __name(responseToChatGeneration, "responseToChatGeneration");
1068
+ async function processResponseApiResponse(response, imageProvider) {
1069
+ if (response.status !== 200) {
1070
+ throw new ChatLunaError(
1071
+ ChatLunaErrorCode.API_REQUEST_FAILED,
1072
+ new Error(
1073
+ "Error when calling responses, Status: " + response.status + " " + response.statusText + ", Response: " + await response.text()
1074
+ )
1075
+ );
1076
+ }
1077
+ const responseText = await response.text();
1078
+ try {
1079
+ return await responseToChatGeneration(
1080
+ JSON.parse(responseText),
1081
+ imageProvider
1082
+ );
1083
+ } catch (e) {
1084
+ if (e instanceof ChatLunaError) throw e;
1085
+ throw new ChatLunaError(
1086
+ ChatLunaErrorCode.API_REQUEST_FAILED,
1087
+ new Error(
1088
+ "Error when calling responses, Error: " + e + ", Response: " + responseText
1089
+ )
1090
+ );
1091
+ }
1092
+ }
1093
+ __name(processResponseApiResponse, "processResponseApiResponse");
1094
+ async function* processResponseApiStream(requestContext, iterator, imageProvider) {
1095
+ const args = /* @__PURE__ */ new Map();
1096
+ const calls = /* @__PURE__ */ new Map();
1097
+ let errorCount = 0;
1098
+ let sentConversation = false;
1099
+ for await (const event of iterator) {
1100
+ const chunk = event.data;
1101
+ if (chunk === "[DONE]") break;
1102
+ if (chunk === "" || chunk == null || chunk === "undefined") continue;
1103
+ try {
1104
+ const data = JSON.parse(chunk);
1105
+ if (data.type === "response.output_text.delta" && data.delta) {
1106
+ yield new ChatGenerationChunk({
1107
+ message: new AIMessageChunk2(data.delta),
1108
+ text: data.delta
1109
+ });
1110
+ continue;
1111
+ }
1112
+ if (data.type === "response.output_item.added" && data.item?.type === "function_call") {
1113
+ const item = data.item;
1114
+ calls.set(data.output_index ?? calls.size, {
1115
+ name: item.name,
1116
+ callId: item.call_id,
1117
+ itemId: item.id
1118
+ });
1119
+ continue;
1120
+ }
1121
+ if (data.type === "response.function_call_arguments.delta") {
1122
+ const index = data.output_index ?? 0;
1123
+ args.set(index, (args.get(index) ?? "") + (data.delta ?? ""));
1124
+ continue;
1125
+ }
1126
+ if (data.type === "response.function_call_arguments.done") {
1127
+ const index = data.output_index ?? 0;
1128
+ const call = calls.get(index);
1129
+ yield new ChatGenerationChunk({
1130
+ message: new AIMessageChunk2({
1131
+ content: "",
1132
+ tool_call_chunks: [
1133
+ {
1134
+ name: data.name ?? call?.name,
1135
+ args: data.arguments ?? args.get(index) ?? "",
1136
+ id: call?.callId ?? data.item_id,
1137
+ index
1138
+ }
1139
+ ]
1140
+ }),
1141
+ text: ""
1142
+ });
1143
+ continue;
1144
+ }
1145
+ if (data.type === "response.completed" && data.response) {
1146
+ const usageMetadata = data.response.usage ? openAIResponseUsageToUsageMetadata(data.response.usage) : void 0;
1147
+ const images = imageProvider ? await Promise.all(
1148
+ responseOutputImageItems(data.response).map(
1149
+ (item) => imageProvider(item)
1150
+ )
1151
+ ) : [];
1152
+ if (images.length > 0) {
1153
+ yield new ChatGenerationChunk({
1154
+ message: new AIMessageChunk2({
1155
+ content: images.map((image) => ({
1156
+ type: "image_url",
1157
+ image_url: image
1158
+ }))
1159
+ }),
1160
+ text: ""
1161
+ });
1162
+ }
1163
+ if (!sentConversation) {
1164
+ sentConversation = true;
1165
+ yield new ChatGenerationChunk({
1166
+ message: new AIMessageChunk2({
1167
+ content: "",
1168
+ additional_kwargs: {
1169
+ conversation: data.response.conversation
1170
+ }
1171
+ }),
1172
+ text: ""
1173
+ });
1174
+ }
1175
+ if (usageMetadata) {
1176
+ yield new ChatGenerationChunk({
1177
+ generationInfo: {
1178
+ usage_metadata: usageMetadata
1179
+ },
1180
+ message: new AIMessageChunk2({
1181
+ content: "",
1182
+ usage_metadata: usageMetadata
1183
+ }),
1184
+ text: ""
1185
+ });
1186
+ }
1187
+ continue;
1188
+ }
1189
+ if (data.type === "response.failed" || data.type === "response.incomplete" || data.type === "response.error") {
1190
+ throw new ChatLunaError(
1191
+ ChatLunaErrorCode.API_REQUEST_FAILED,
1192
+ new Error(chunk)
1193
+ );
1194
+ }
1195
+ } catch (e) {
1196
+ if (e instanceof ChatLunaError) throw e;
1197
+ if (errorCount > 5) {
1198
+ requestContext.modelRequester.logger.error(
1199
+ "error with responses chunk",
1200
+ chunk
1201
+ );
1202
+ throw new ChatLunaError(ChatLunaErrorCode.API_REQUEST_FAILED, e);
1203
+ }
1204
+ errorCount++;
1205
+ }
1206
+ }
1207
+ }
1208
+ __name(processResponseApiStream, "processResponseApiStream");
851
1209
  async function* completionStream(requestContext, params, completionUrl = "chat/completions", enableGoogleSearch, supportImageInput2) {
852
1210
  const { modelRequester } = requestContext;
853
1211
  const chatCompletionParams = await buildChatCompletionParams(
@@ -916,6 +1274,64 @@ async function completion(requestContext, params, completionUrl = "chat/completi
916
1274
  }
917
1275
  }
918
1276
  __name(completion, "completion");
1277
+ async function* responseApiCompletionStream(requestContext, params, opts = {}, supportImageInput2, imageProvider) {
1278
+ const { modelRequester } = requestContext;
1279
+ const request = await buildResponseParams(
1280
+ params,
1281
+ requestContext.plugin,
1282
+ opts,
1283
+ supportImageInput2 ?? true
1284
+ );
1285
+ try {
1286
+ const response = await modelRequester.post("responses", request, {
1287
+ signal: params.signal
1288
+ });
1289
+ yield* processResponseApiStream(
1290
+ requestContext,
1291
+ sseIterable(response),
1292
+ imageProvider
1293
+ );
1294
+ } catch (e) {
1295
+ if (requestContext.ctx.chatluna.currentConfig.isLog) {
1296
+ await trackLogToLocal(
1297
+ "Request",
1298
+ JSON.stringify(request),
1299
+ requestContext.ctx.logger("")
1300
+ );
1301
+ }
1302
+ if (e instanceof ChatLunaError) throw e;
1303
+ throw new ChatLunaError(ChatLunaErrorCode.API_REQUEST_FAILED, e);
1304
+ }
1305
+ }
1306
+ __name(responseApiCompletionStream, "responseApiCompletionStream");
1307
+ async function responseApiCompletion(requestContext, params, opts = {}, supportImageInput2, imageProvider) {
1308
+ const { modelRequester } = requestContext;
1309
+ const request = await buildResponseParams(
1310
+ params,
1311
+ requestContext.plugin,
1312
+ opts,
1313
+ supportImageInput2 ?? true
1314
+ );
1315
+ delete request.stream;
1316
+ delete request.stream_options;
1317
+ try {
1318
+ const response = await modelRequester.post("responses", request, {
1319
+ signal: params.signal
1320
+ });
1321
+ return await processResponseApiResponse(response, imageProvider);
1322
+ } catch (e) {
1323
+ if (requestContext.ctx.chatluna.currentConfig.isLog) {
1324
+ await trackLogToLocal(
1325
+ "Request",
1326
+ JSON.stringify(request),
1327
+ requestContext.ctx.logger("")
1328
+ );
1329
+ }
1330
+ if (e instanceof ChatLunaError) throw e;
1331
+ throw new ChatLunaError(ChatLunaErrorCode.API_REQUEST_FAILED, e);
1332
+ }
1333
+ }
1334
+ __name(responseApiCompletion, "responseApiCompletion");
919
1335
  async function createEmbeddings(requestContext, params, embeddingUrl = "embeddings") {
920
1336
  const { modelRequester } = requestContext;
921
1337
  let data;
@@ -992,6 +1408,7 @@ function createRequestContext(ctx, config, pluginConfig, plugin, modelRequester)
992
1408
  __name(createRequestContext, "createRequestContext");
993
1409
  export {
994
1410
  buildChatCompletionParams,
1411
+ buildResponseParams,
995
1412
  completion,
996
1413
  completionStream,
997
1414
  convertDeltaToMessageChunk,
@@ -1004,20 +1421,33 @@ export {
1004
1421
  fetchImageUrl,
1005
1422
  formatToolToOpenAITool,
1006
1423
  formatToolsToOpenAITools,
1424
+ formatToolsToResponseTools,
1007
1425
  getModelMaxContextSize,
1008
1426
  getModels,
1009
1427
  isEmbeddingModel,
1428
+ isImageGenerationModel,
1010
1429
  isNonLLMModel,
1011
1430
  langchainMessageToOpenAIMessage,
1431
+ langchainMessageToResponseInput,
1012
1432
  messageTypeToOpenAIRole,
1013
1433
  normalizeOpenAIModelName,
1434
+ openAIResponseUsageToUsageMetadata,
1014
1435
  openAIUsageToUsageMetadata,
1015
1436
  parseOpenAIModelNameWithReasoningEffort,
1016
1437
  processInterleavedThinkMessages,
1017
1438
  processResponse,
1439
+ processResponseApiResponse,
1440
+ processResponseApiStream,
1018
1441
  processStreamResponse,
1019
1442
  reasoningEffortModelSuffixes,
1020
1443
  removeAdditionalProperties,
1444
+ responseApiCompletion,
1445
+ responseApiCompletionStream,
1446
+ responseInputContent,
1447
+ responseOutputImageItems,
1448
+ responseOutputText,
1449
+ responseOutputToolCalls,
1450
+ responseToChatGeneration,
1021
1451
  supportImageInput,
1022
1452
  transformSystemMessages
1023
1453
  };
@@ -2,6 +2,7 @@ import { ChatGenerationChunk } from '@langchain/core/outputs';
2
2
  import { EmbeddingsRequestParams, ModelRequester, ModelRequestParams } from 'koishi-plugin-chatluna/llm-core/platform/api';
3
3
  import { ClientConfig } from 'koishi-plugin-chatluna/llm-core/platform/config';
4
4
  import { SSEEvent } from 'koishi-plugin-chatluna/utils/sse';
5
+ import { type ResponseBuiltinTool, ResponseObject, ResponseOutputItem } from './types';
5
6
  import { ChatLunaPlugin } from 'koishi-plugin-chatluna/services/chat';
6
7
  import { Context } from 'koishi';
7
8
  import { Response } from 'undici/types/fetch';
@@ -13,6 +14,13 @@ interface RequestContext<T extends ClientConfig = ClientConfig, R extends ChatLu
13
14
  plugin: ChatLunaPlugin;
14
15
  modelRequester: ModelRequester<T, R>;
15
16
  }
17
+ export type ResponseImageProvider = (item: Extract<ResponseOutputItem, {
18
+ type: 'image_generation_call';
19
+ }>) => Promise<string>;
20
+ export interface ResponseToolOptions {
21
+ googleSearch?: boolean;
22
+ builtinTools?: ResponseBuiltinTool[];
23
+ }
16
24
  export declare function buildChatCompletionParams(params: ModelRequestParams, plugin: ChatLunaPlugin, enableGoogleSearch: boolean, supportImageInput?: boolean): Promise<{
17
25
  model: string;
18
26
  messages: import("./types").ChatCompletionResponseMessage[];
@@ -37,10 +45,33 @@ export declare function buildChatCompletionParams(params: ModelRequestParams, pl
37
45
  include_usage: boolean;
38
46
  };
39
47
  } & Record<string, any>>;
48
+ export declare function buildResponseParams(params: ModelRequestParams, plugin: ChatLunaPlugin, opts?: ResponseToolOptions, supportImageInput?: boolean): Promise<{
49
+ model: string;
50
+ input: import("./types").ResponseInputItem[];
51
+ tools: import("./types").ResponseTool[];
52
+ max_output_tokens: number;
53
+ temperature: number;
54
+ top_p: number;
55
+ prompt_cache_key: string;
56
+ reasoning: {
57
+ effort: "minimal" | "low" | "medium" | "high" | "max" | "xhigh";
58
+ };
59
+ stream: boolean;
60
+ stream_options: {
61
+ include_obfuscation: boolean;
62
+ };
63
+ store: boolean;
64
+ parallel_tool_calls: boolean;
65
+ } & Record<string, any>>;
40
66
  export declare function processStreamResponse<T extends ClientConfig, R extends ChatLunaPlugin.Config>(requestContext: RequestContext<T, R>, iterator: AsyncGenerator<SSEEvent, string, unknown>): AsyncGenerator<ChatGenerationChunk, void, unknown>;
41
67
  export declare function processResponse<T extends ClientConfig, R extends ChatLunaPlugin.Config>(requestContext: RequestContext<T, R>, response: Response): Promise<ChatGenerationChunk>;
68
+ export declare function responseToChatGeneration(response: ResponseObject, imageProvider?: ResponseImageProvider): Promise<ChatGenerationChunk>;
69
+ export declare function processResponseApiResponse(response: Response, imageProvider?: ResponseImageProvider): Promise<ChatGenerationChunk>;
70
+ export declare function processResponseApiStream<T extends ClientConfig, R extends ChatLunaPlugin.Config>(requestContext: RequestContext<T, R>, iterator: AsyncGenerator<SSEEvent, string, unknown>, imageProvider?: ResponseImageProvider): AsyncGenerator<ChatGenerationChunk, void, unknown>;
42
71
  export declare function completionStream<T extends ClientConfig, R extends ChatLunaPlugin.Config>(requestContext: RequestContext<T, R>, params: ModelRequestParams, completionUrl?: string, enableGoogleSearch?: boolean, supportImageInput?: boolean): AsyncGenerator<ChatGenerationChunk>;
43
72
  export declare function completion<T extends ClientConfig, R extends ChatLunaPlugin.Config>(requestContext: RequestContext<T, R>, params: ModelRequestParams, completionUrl?: string, enableGoogleSearch?: boolean, supportImageInput?: boolean): Promise<ChatGenerationChunk>;
73
+ export declare function responseApiCompletionStream<T extends ClientConfig, R extends ChatLunaPlugin.Config>(requestContext: RequestContext<T, R>, params: ModelRequestParams, opts?: ResponseToolOptions, supportImageInput?: boolean, imageProvider?: ResponseImageProvider): AsyncGenerator<ChatGenerationChunk>;
74
+ export declare function responseApiCompletion<T extends ClientConfig, R extends ChatLunaPlugin.Config>(requestContext: RequestContext<T, R>, params: ModelRequestParams, opts?: ResponseToolOptions, supportImageInput?: boolean, imageProvider?: ResponseImageProvider): Promise<ChatGenerationChunk>;
44
75
  export declare function createEmbeddings<T extends ClientConfig, R extends ChatLunaPlugin.Config>(requestContext: RequestContext<T, R>, params: EmbeddingsRequestParams, embeddingUrl?: string): Promise<number[] | number[][]>;
45
76
  export declare function getModels<T extends ClientConfig, R extends ChatLunaPlugin.Config>(requestContext: RequestContext<T, R>, config?: RunnableConfig): Promise<string[]>;
46
77
  export declare function createRequestContext<T extends ClientConfig, R extends ChatLunaPlugin.Config>(ctx: Context, config: T, pluginConfig: R, plugin: ChatLunaPlugin, modelRequester: ModelRequester<T, R>): RequestContext<T, R>;
package/lib/types.d.ts CHANGED
@@ -34,6 +34,175 @@ export interface ChatCompletionUsage {
34
34
  prompt_tokens_details?: ChatCompletionPromptTokensDetails;
35
35
  completion_tokens_details?: ChatCompletionCompletionTokensDetails;
36
36
  }
37
+ export interface ResponseUsage {
38
+ input_tokens: number;
39
+ output_tokens: number;
40
+ total_tokens: number;
41
+ input_tokens_details?: {
42
+ cached_tokens?: number;
43
+ };
44
+ output_tokens_details?: {
45
+ reasoning_tokens?: number;
46
+ };
47
+ }
48
+ export type ResponseInputContent = {
49
+ type: 'input_text';
50
+ text: string;
51
+ } | {
52
+ type: 'input_image';
53
+ image_url: string;
54
+ detail?: 'low' | 'high' | 'auto' | 'original';
55
+ } | {
56
+ type: 'input_file';
57
+ file_url?: string;
58
+ file_data?: string;
59
+ file_id?: string;
60
+ filename?: string;
61
+ };
62
+ export type ResponseInputItem = {
63
+ type: 'message';
64
+ role: 'system' | 'developer' | 'user' | 'assistant';
65
+ content: string | ResponseInputContent[];
66
+ } | {
67
+ type: 'function_call';
68
+ call_id: string;
69
+ name: string;
70
+ arguments: string;
71
+ status?: 'completed' | 'in_progress' | 'incomplete';
72
+ } | {
73
+ type: 'function_call_output';
74
+ call_id: string;
75
+ output: string | ResponseInputContent[];
76
+ };
77
+ export type ResponseBuiltinToolName = 'web_search' | 'web_search_preview' | 'image_generation' | 'code_interpreter' | 'file_search';
78
+ export type ResponseBuiltinTool = {
79
+ type: 'web_search' | 'web_search_2025_08_26';
80
+ filters?: {
81
+ allowed_domains?: string[] | null;
82
+ } | null;
83
+ search_context_size?: 'low' | 'medium' | 'high';
84
+ user_location?: {
85
+ city?: string | null;
86
+ country?: string | null;
87
+ region?: string | null;
88
+ timezone?: string | null;
89
+ type?: 'approximate';
90
+ } | null;
91
+ } | {
92
+ type: 'web_search_preview' | 'web_search_preview_2025_03_11';
93
+ search_content_types?: ('text' | 'image')[];
94
+ search_context_size?: 'low' | 'medium' | 'high';
95
+ user_location?: {
96
+ type: 'approximate';
97
+ city?: string | null;
98
+ country?: string | null;
99
+ region?: string | null;
100
+ timezone?: string | null;
101
+ } | null;
102
+ } | {
103
+ type: 'image_generation';
104
+ action?: 'generate' | 'edit' | 'auto';
105
+ background?: 'transparent' | 'opaque' | 'auto';
106
+ input_fidelity?: 'high' | 'low' | null;
107
+ input_image_mask?: {
108
+ file_id?: string;
109
+ image_url?: string;
110
+ };
111
+ model?: string;
112
+ moderation?: 'auto' | 'low';
113
+ output_compression?: number;
114
+ output_format?: 'png' | 'webp' | 'jpeg';
115
+ partial_images?: number;
116
+ quality?: 'low' | 'medium' | 'high' | 'auto';
117
+ size?: '1024x1024' | '1024x1536' | '1536x1024' | 'auto';
118
+ } | {
119
+ type: 'code_interpreter';
120
+ container: string | {
121
+ type: 'auto';
122
+ file_ids?: string[];
123
+ memory_limit?: '1g' | '4g' | '16g' | '64g' | null;
124
+ };
125
+ } | {
126
+ type: 'file_search';
127
+ vector_store_ids: string[];
128
+ filters?: Record<string, unknown> | null;
129
+ max_num_results?: number;
130
+ ranking_options?: {
131
+ ranker?: 'auto' | 'default-2024-11-15';
132
+ score_threshold?: number;
133
+ hybrid_search?: {
134
+ embedding_weight: number;
135
+ text_weight: number;
136
+ };
137
+ };
138
+ };
139
+ export type ResponseTool = {
140
+ type: 'function';
141
+ name: string;
142
+ description?: string;
143
+ parameters?: {
144
+ [key: string]: any;
145
+ };
146
+ strict?: boolean;
147
+ } | ResponseBuiltinTool;
148
+ export interface ResponseObject {
149
+ id: string;
150
+ object: 'response';
151
+ output_text?: string;
152
+ output?: ResponseOutputItem[];
153
+ usage?: ResponseUsage;
154
+ error?: {
155
+ message?: string;
156
+ } | null;
157
+ conversation?: {
158
+ id: string;
159
+ } | null;
160
+ }
161
+ export type ResponseOutputItem = {
162
+ type: 'message';
163
+ role?: string;
164
+ content?: ResponseOutputContent[];
165
+ } | {
166
+ type: 'function_call';
167
+ id?: string;
168
+ call_id: string;
169
+ name: string;
170
+ arguments: string;
171
+ status?: string;
172
+ } | {
173
+ type: 'image_generation_call';
174
+ id?: string;
175
+ result?: string | null;
176
+ output_format?: 'png' | 'jpeg' | 'webp';
177
+ status?: string;
178
+ } | {
179
+ type: string;
180
+ [key: string]: unknown;
181
+ };
182
+ export type ResponseOutputContent = {
183
+ type: 'output_text';
184
+ text: string;
185
+ } | {
186
+ type: 'refusal';
187
+ refusal: string;
188
+ } | {
189
+ type: string;
190
+ [key: string]: unknown;
191
+ };
192
+ export interface ResponseStreamEvent {
193
+ type: string;
194
+ sequence_number?: number;
195
+ item_id?: string;
196
+ output_index?: number;
197
+ content_index?: number;
198
+ delta?: string;
199
+ text?: string;
200
+ name?: string;
201
+ arguments?: string;
202
+ item?: ResponseOutputItem;
203
+ response?: ResponseObject;
204
+ partial_image_b64?: string;
205
+ }
37
206
  export interface ChatCompletionTextPart {
38
207
  type: 'text';
39
208
  text: string;
package/lib/utils.d.ts CHANGED
@@ -1,7 +1,7 @@
1
1
  import { AIMessageChunk, BaseMessage, ChatMessageChunk, FunctionMessageChunk, HumanMessageChunk, MessageContentComplex, MessageContentImageUrl, MessageType, SystemMessageChunk, ToolMessageChunk, type UsageMetadata } from '@langchain/core/messages';
2
2
  import { StructuredTool } from '@langchain/core/tools';
3
3
  import { JsonSchema7Type } from 'zod-to-json-schema';
4
- import { ChatCompletionResponseMessage, ChatCompletionResponseMessageRoleEnum, ChatCompletionTool, ChatCompletionUsage } from './types';
4
+ import { ChatCompletionResponseMessage, ChatCompletionResponseMessageRoleEnum, ChatCompletionTool, ChatCompletionUsage, type ResponseBuiltinTool, ResponseInputContent, ResponseInputItem, ResponseObject, ResponseTool, ResponseUsage } from './types';
5
5
  import { ChatLunaPlugin } from 'koishi-plugin-chatluna/services/chat';
6
6
  export declare function createUsageMetadata(data: {
7
7
  inputTokens: number;
@@ -16,6 +16,26 @@ export declare function createUsageMetadata(data: {
16
16
  reasoningTokens?: number;
17
17
  }): UsageMetadata;
18
18
  export declare function openAIUsageToUsageMetadata(usage: ChatCompletionUsage): UsageMetadata;
19
+ export declare function openAIResponseUsageToUsageMetadata(usage: ResponseUsage): UsageMetadata;
20
+ export declare function langchainMessageToResponseInput(messages: BaseMessage[], plugin: ChatLunaPlugin, model?: string, supportImageInputType?: boolean): Promise<ResponseInputItem[]>;
21
+ export declare function responseInputContent(content: ChatCompletionResponseMessage['content']): string | ResponseInputContent[];
22
+ export declare function formatToolsToResponseTools(tools: StructuredTool[], includeGoogleSearch: boolean, builtinTools?: ResponseBuiltinTool[]): ResponseTool[] | undefined;
23
+ export declare function responseOutputText(response: ResponseObject): string;
24
+ export declare function responseOutputToolCalls(response: ResponseObject): {
25
+ type: "function_call";
26
+ id?: string;
27
+ call_id: string;
28
+ name: string;
29
+ arguments: string;
30
+ status?: string;
31
+ }[];
32
+ export declare function responseOutputImageItems(response: ResponseObject): {
33
+ type: "image_generation_call";
34
+ id?: string;
35
+ result?: string | null;
36
+ output_format?: "png" | "jpeg" | "webp";
37
+ status?: string;
38
+ }[];
19
39
  export declare function langchainMessageToOpenAIMessage(messages: BaseMessage[], plugin: ChatLunaPlugin, model?: string, supportImageInputType?: boolean, removeSystemMessage?: boolean): Promise<ChatCompletionResponseMessage[]>;
20
40
  export declare function processInterleavedThinkMessages(convertedMessages: ChatCompletionResponseMessage[], originalMessages: BaseMessage[]): ChatCompletionResponseMessage[];
21
41
  export declare function transformSystemMessages(messages: ChatCompletionResponseMessage[]): ChatCompletionResponseMessage[];
@@ -51,6 +71,6 @@ export declare function messageTypeToOpenAIRole(type: MessageType): ChatCompleti
51
71
  export declare function formatToolsToOpenAITools(tools: StructuredTool[], includeGoogleSearch: boolean): ChatCompletionTool[];
52
72
  export declare function formatToolToOpenAITool(tool: StructuredTool): ChatCompletionTool;
53
73
  export declare function removeAdditionalProperties(schema: JsonSchema7Type): JsonSchema7Type;
54
- export declare function convertMessageToMessageChunk(message: ChatCompletionResponseMessage): AIMessageChunk | HumanMessageChunk | SystemMessageChunk | FunctionMessageChunk | ToolMessageChunk | ChatMessageChunk;
55
- export declare function convertDeltaToMessageChunk(delta: Record<string, any>, defaultRole?: ChatCompletionResponseMessageRoleEnum): AIMessageChunk | HumanMessageChunk | SystemMessageChunk | FunctionMessageChunk | ToolMessageChunk | ChatMessageChunk;
74
+ export declare function convertMessageToMessageChunk(message: ChatCompletionResponseMessage): HumanMessageChunk | AIMessageChunk | SystemMessageChunk | FunctionMessageChunk | ToolMessageChunk | ChatMessageChunk;
75
+ export declare function convertDeltaToMessageChunk(delta: Record<string, any>, defaultRole?: ChatCompletionResponseMessageRoleEnum): HumanMessageChunk | AIMessageChunk | SystemMessageChunk | FunctionMessageChunk | ToolMessageChunk | ChatMessageChunk;
56
76
  export {};
package/package.json CHANGED
@@ -1,7 +1,7 @@
1
1
  {
2
2
  "name": "@chatluna/v1-shared-adapter",
3
3
  "description": "chatluna shared adapter",
4
- "version": "1.0.32",
4
+ "version": "1.0.33",
5
5
  "main": "lib/index.cjs",
6
6
  "module": "lib/index.mjs",
7
7
  "typings": "lib/index.d.ts",
@@ -70,6 +70,6 @@
70
70
  },
71
71
  "peerDependencies": {
72
72
  "koishi": "^4.18.9",
73
- "koishi-plugin-chatluna": "^1.4.0-alpha.0"
73
+ "koishi-plugin-chatluna": "^1.4.0-alpha.8"
74
74
  }
75
75
  }