@chatluna/v1-shared-adapter 1.0.31 → 1.0.33

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/lib/index.mjs CHANGED
@@ -20,7 +20,7 @@ function parseOpenAIModelNameWithReasoningEffort(modelName) {
20
20
  let model = modelName;
21
21
  let reasoningEffort;
22
22
  const explicitMatch = model.match(
23
- /-(none|minimal|low|medium|high|xhigh|tiny)-thinking$/
23
+ /-(none|minimal|low|medium|high|max|xhigh|tiny)-thinking$/
24
24
  );
25
25
  if (explicitMatch?.[1]) {
26
26
  const level = explicitMatch[1];
@@ -58,6 +58,10 @@ function isNonLLMModel(modelName) {
58
58
  );
59
59
  }
60
60
  __name(isNonLLMModel, "isNonLLMModel");
61
+ function isImageGenerationModel(modelName) {
62
+ return isNonLLMModel(modelName) && ["dall-e", "image"].some((keyword) => modelName.includes(keyword));
63
+ }
64
+ __name(isImageGenerationModel, "isImageGenerationModel");
61
65
  function getModelMaxContextSize(info) {
62
66
  const maxTokens = info.maxTokens;
63
67
  if (maxTokens != null) {
@@ -76,6 +80,7 @@ function getModelMaxContextSize(info) {
76
80
  "gemini-2.0-pro": 2097152,
77
81
  "gemini-2.5": 2097152,
78
82
  "gemini-3.0-pro": 1097152,
83
+ "gemini-3.1-pro": 1097152,
79
84
  "gemini-2.0": 2097152,
80
85
  deepseek: 128e3,
81
86
  "llama3.1": 128e3,
@@ -92,7 +97,7 @@ function getModelMaxContextSize(info) {
92
97
  return modelMaxContextSizeTable[key];
93
98
  }
94
99
  }
95
- return getModelContextSize("o1-mini");
100
+ return 2e5;
96
101
  }
97
102
  __name(getModelMaxContextSize, "getModelMaxContextSize");
98
103
  function createGlobMatcher(pattern) {
@@ -111,6 +116,7 @@ var imageModelMatchers = [
111
116
  "gemini",
112
117
  "qwen-vl",
113
118
  "omni",
119
+ "gemma",
114
120
  "qwen*-omni",
115
121
  "qwen-omni",
116
122
  "qwen*-vl",
@@ -125,7 +131,8 @@ var imageModelMatchers = [
125
131
  "glm-*v",
126
132
  "kimi-k2.5",
127
133
  "step3",
128
- "grok-4"
134
+ "grok-4",
135
+ "mimo-v2.5*"
129
136
  ].map((pattern) => createGlobMatcher(pattern));
130
137
  function supportImageInput(modelName) {
131
138
  const lowerModel = normalizeOpenAIModelName(modelName).toLowerCase();
@@ -160,11 +167,13 @@ import { isZodSchemaV3 } from "@langchain/core/utils/types";
160
167
  function createUsageMetadata(data) {
161
168
  const inputTokenDetails = {
162
169
  ...data.inputAudioTokens != null ? { audio: data.inputAudioTokens } : {},
170
+ ...data.inputImageTokens != null ? { image: data.inputImageTokens } : {},
163
171
  ...data.cacheReadTokens != null ? { cache_read: data.cacheReadTokens } : {},
164
172
  ...data.cacheCreationTokens != null ? { cache_creation: data.cacheCreationTokens } : {}
165
173
  };
166
174
  const outputTokenDetails = {
167
175
  ...data.outputAudioTokens != null ? { audio: data.outputAudioTokens } : {},
176
+ ...data.outputImageTokens != null ? { image: data.outputImageTokens } : {},
168
177
  ...data.reasoningTokens != null ? { reasoning: data.reasoningTokens } : {}
169
178
  };
170
179
  return {
@@ -188,6 +197,144 @@ function openAIUsageToUsageMetadata(usage) {
188
197
  });
189
198
  }
190
199
  __name(openAIUsageToUsageMetadata, "openAIUsageToUsageMetadata");
200
+ function openAIResponseUsageToUsageMetadata(usage) {
201
+ return createUsageMetadata({
202
+ inputTokens: usage.input_tokens,
203
+ outputTokens: usage.output_tokens,
204
+ totalTokens: usage.total_tokens,
205
+ cacheReadTokens: usage.input_tokens_details?.cached_tokens,
206
+ reasoningTokens: usage.output_tokens_details?.reasoning_tokens
207
+ });
208
+ }
209
+ __name(openAIResponseUsageToUsageMetadata, "openAIResponseUsageToUsageMetadata");
210
+ async function langchainMessageToResponseInput(messages, plugin, model, supportImageInputType) {
211
+ const chatMessages = await langchainMessageToOpenAIMessage(
212
+ messages,
213
+ plugin,
214
+ model,
215
+ supportImageInputType
216
+ );
217
+ const result = [];
218
+ for (const msg of chatMessages) {
219
+ if (msg.role === "tool") {
220
+ result.push({
221
+ type: "function_call_output",
222
+ call_id: msg.tool_call_id,
223
+ output: responseInputContent(msg.content)
224
+ });
225
+ continue;
226
+ }
227
+ if (msg.role === "function") {
228
+ result.push({
229
+ type: "message",
230
+ role: "user",
231
+ content: responseInputContent(msg.content)
232
+ });
233
+ continue;
234
+ }
235
+ if (msg.content != null && msg.content !== "") {
236
+ result.push({
237
+ type: "message",
238
+ role: msg.role === "system" || msg.role === "assistant" || msg.role === "user" ? msg.role : "user",
239
+ content: responseInputContent(msg.content)
240
+ });
241
+ }
242
+ if (msg.role !== "assistant" || !Array.isArray(msg.tool_calls)) {
243
+ continue;
244
+ }
245
+ result.push(
246
+ ...msg.tool_calls.map((toolCall) => ({
247
+ type: "function_call",
248
+ call_id: toolCall.id,
249
+ name: toolCall.function.name,
250
+ arguments: toolCall.function.arguments,
251
+ status: "completed"
252
+ }))
253
+ );
254
+ }
255
+ return result;
256
+ }
257
+ __name(langchainMessageToResponseInput, "langchainMessageToResponseInput");
258
+ function responseInputContent(content) {
259
+ if (typeof content === "string") return content;
260
+ if (!Array.isArray(content)) return "";
261
+ return content.map((part) => {
262
+ if (part.type === "text") {
263
+ const text = part.text;
264
+ return {
265
+ type: "input_text",
266
+ text
267
+ };
268
+ }
269
+ if (part.type === "image_url") {
270
+ const raw = part.image_url;
271
+ const imageUrl = typeof raw === "string" ? raw : raw.url;
272
+ const detail = typeof raw === "string" ? void 0 : raw.detail;
273
+ return {
274
+ type: "input_image",
275
+ image_url: imageUrl,
276
+ detail: detail ?? "auto"
277
+ };
278
+ }
279
+ if (part.type === "file_url") {
280
+ const raw = part["file_url"];
281
+ return {
282
+ type: "input_file",
283
+ file_url: typeof raw === "string" ? raw : raw.url,
284
+ filename: typeof raw === "string" ? void 0 : raw.filename
285
+ };
286
+ }
287
+ return void 0;
288
+ }).filter((part) => part != null);
289
+ }
290
+ __name(responseInputContent, "responseInputContent");
291
+ function formatToolsToResponseTools(tools, includeGoogleSearch, builtinTools = []) {
292
+ const result = (formatToolsToOpenAITools(tools, includeGoogleSearch) ?? []).map((tool) => {
293
+ if (tool.function.name === "googleSearch") {
294
+ return {
295
+ type: "web_search"
296
+ };
297
+ }
298
+ return {
299
+ type: "function",
300
+ name: tool.function.name,
301
+ description: tool.function.description,
302
+ parameters: tool.function.parameters
303
+ };
304
+ });
305
+ for (const tool of builtinTools) {
306
+ if (result.some((item) => item.type === tool.type)) continue;
307
+ result.push(tool);
308
+ }
309
+ return result.length ? result : void 0;
310
+ }
311
+ __name(formatToolsToResponseTools, "formatToolsToResponseTools");
312
+ function responseOutputText(response) {
313
+ if ((response.output_text?.length ?? 0) > 0) return response.output_text;
314
+ return (response.output ?? []).flatMap((item) => {
315
+ if (item.type !== "message") return [];
316
+ return (item.content ?? []).map(
317
+ (part) => {
318
+ if (part.type === "output_text") return part.text;
319
+ if (part.type === "refusal") return part.refusal;
320
+ return "";
321
+ }
322
+ );
323
+ }).join("");
324
+ }
325
+ __name(responseOutputText, "responseOutputText");
326
+ function responseOutputToolCalls(response) {
327
+ return (response.output ?? []).filter(
328
+ (item) => item.type === "function_call"
329
+ );
330
+ }
331
+ __name(responseOutputToolCalls, "responseOutputToolCalls");
332
+ function responseOutputImageItems(response) {
333
+ return (response.output ?? []).filter((item) => item.type === "image_generation_call" && item.result).map(
334
+ (item) => item
335
+ );
336
+ }
337
+ __name(responseOutputImageItems, "responseOutputImageItems");
191
338
  async function langchainMessageToOpenAIMessage(messages, plugin, model, supportImageInputType, removeSystemMessage) {
192
339
  const result = [];
193
340
  const normalizedModel = model ? normalizeOpenAIModelName(model) : model;
@@ -675,6 +822,37 @@ async function buildChatCompletionParams(params, plugin, enableGoogleSearch, sup
675
822
  return deepAssign({}, base, params.overrideRequestParams ?? {});
676
823
  }
677
824
  __name(buildChatCompletionParams, "buildChatCompletionParams");
825
+ async function buildResponseParams(params, plugin, opts = {}, supportImageInput2) {
826
+ const parsedModel = parseOpenAIModelNameWithReasoningEffort(params.model);
827
+ const normalizedModel = parsedModel.model;
828
+ const base = {
829
+ model: normalizedModel,
830
+ input: await langchainMessageToResponseInput(
831
+ params.input,
832
+ plugin,
833
+ normalizedModel,
834
+ supportImageInput2
835
+ ),
836
+ tools: opts.googleSearch || (opts.builtinTools?.length ?? 0) > 0 || params.tools != null ? formatToolsToResponseTools(
837
+ params.tools ?? [],
838
+ opts.googleSearch ?? false,
839
+ opts.builtinTools
840
+ ) : void 0,
841
+ max_output_tokens: normalizedModel.includes("vision") ? void 0 : params.maxTokens,
842
+ temperature: params.temperature === 0 ? void 0 : params.temperature,
843
+ top_p: params.topP,
844
+ prompt_cache_key: params.id,
845
+ reasoning: parsedModel.reasoningEffort == null || parsedModel.reasoningEffort === "none" ? void 0 : { effort: parsedModel.reasoningEffort },
846
+ stream: true,
847
+ stream_options: {
848
+ include_obfuscation: false
849
+ },
850
+ store: false,
851
+ parallel_tool_calls: true
852
+ };
853
+ return deepAssign({}, base, params.overrideRequestParams ?? {});
854
+ }
855
+ __name(buildResponseParams, "buildResponseParams");
678
856
  async function* processStreamResponse(requestContext, iterator) {
679
857
  let defaultRole = "assistant";
680
858
  let errorCount = 0;
@@ -844,6 +1022,190 @@ async function processResponse(requestContext, response) {
844
1022
  }
845
1023
  }
846
1024
  __name(processResponse, "processResponse");
1025
+ async function responseToChatGeneration(response, imageProvider) {
1026
+ if (response.error) {
1027
+ throw new ChatLunaError(
1028
+ ChatLunaErrorCode.API_REQUEST_FAILED,
1029
+ new Error(response.error.message ?? JSON.stringify(response.error))
1030
+ );
1031
+ }
1032
+ const text = responseOutputText(response);
1033
+ const toolCalls = responseOutputToolCalls(response);
1034
+ const images = imageProvider ? await Promise.all(
1035
+ responseOutputImageItems(response).map(
1036
+ (item) => imageProvider(item)
1037
+ )
1038
+ ) : [];
1039
+ const usageMetadata = response.usage ? openAIResponseUsageToUsageMetadata(response.usage) : void 0;
1040
+ const message = new AIMessageChunk2({
1041
+ content: images.length > 0 ? [
1042
+ ...text.length > 0 ? [{ type: "text", text }] : [],
1043
+ ...images.map((image) => ({
1044
+ type: "image_url",
1045
+ image_url: image
1046
+ }))
1047
+ ] : text,
1048
+ tool_call_chunks: toolCalls.map((call, index) => ({
1049
+ name: call.name,
1050
+ args: call.arguments,
1051
+ id: call.call_id,
1052
+ index
1053
+ })),
1054
+ usage_metadata: usageMetadata,
1055
+ additional_kwargs: {
1056
+ conversation: response.conversation
1057
+ }
1058
+ });
1059
+ return new ChatGenerationChunk({
1060
+ generationInfo: usageMetadata == null ? void 0 : {
1061
+ usage_metadata: usageMetadata
1062
+ },
1063
+ message,
1064
+ text
1065
+ });
1066
+ }
1067
+ __name(responseToChatGeneration, "responseToChatGeneration");
1068
+ async function processResponseApiResponse(response, imageProvider) {
1069
+ if (response.status !== 200) {
1070
+ throw new ChatLunaError(
1071
+ ChatLunaErrorCode.API_REQUEST_FAILED,
1072
+ new Error(
1073
+ "Error when calling responses, Status: " + response.status + " " + response.statusText + ", Response: " + await response.text()
1074
+ )
1075
+ );
1076
+ }
1077
+ const responseText = await response.text();
1078
+ try {
1079
+ return await responseToChatGeneration(
1080
+ JSON.parse(responseText),
1081
+ imageProvider
1082
+ );
1083
+ } catch (e) {
1084
+ if (e instanceof ChatLunaError) throw e;
1085
+ throw new ChatLunaError(
1086
+ ChatLunaErrorCode.API_REQUEST_FAILED,
1087
+ new Error(
1088
+ "Error when calling responses, Error: " + e + ", Response: " + responseText
1089
+ )
1090
+ );
1091
+ }
1092
+ }
1093
+ __name(processResponseApiResponse, "processResponseApiResponse");
1094
+ async function* processResponseApiStream(requestContext, iterator, imageProvider) {
1095
+ const args = /* @__PURE__ */ new Map();
1096
+ const calls = /* @__PURE__ */ new Map();
1097
+ let errorCount = 0;
1098
+ let sentConversation = false;
1099
+ for await (const event of iterator) {
1100
+ const chunk = event.data;
1101
+ if (chunk === "[DONE]") break;
1102
+ if (chunk === "" || chunk == null || chunk === "undefined") continue;
1103
+ try {
1104
+ const data = JSON.parse(chunk);
1105
+ if (data.type === "response.output_text.delta" && data.delta) {
1106
+ yield new ChatGenerationChunk({
1107
+ message: new AIMessageChunk2(data.delta),
1108
+ text: data.delta
1109
+ });
1110
+ continue;
1111
+ }
1112
+ if (data.type === "response.output_item.added" && data.item?.type === "function_call") {
1113
+ const item = data.item;
1114
+ calls.set(data.output_index ?? calls.size, {
1115
+ name: item.name,
1116
+ callId: item.call_id,
1117
+ itemId: item.id
1118
+ });
1119
+ continue;
1120
+ }
1121
+ if (data.type === "response.function_call_arguments.delta") {
1122
+ const index = data.output_index ?? 0;
1123
+ args.set(index, (args.get(index) ?? "") + (data.delta ?? ""));
1124
+ continue;
1125
+ }
1126
+ if (data.type === "response.function_call_arguments.done") {
1127
+ const index = data.output_index ?? 0;
1128
+ const call = calls.get(index);
1129
+ yield new ChatGenerationChunk({
1130
+ message: new AIMessageChunk2({
1131
+ content: "",
1132
+ tool_call_chunks: [
1133
+ {
1134
+ name: data.name ?? call?.name,
1135
+ args: data.arguments ?? args.get(index) ?? "",
1136
+ id: call?.callId ?? data.item_id,
1137
+ index
1138
+ }
1139
+ ]
1140
+ }),
1141
+ text: ""
1142
+ });
1143
+ continue;
1144
+ }
1145
+ if (data.type === "response.completed" && data.response) {
1146
+ const usageMetadata = data.response.usage ? openAIResponseUsageToUsageMetadata(data.response.usage) : void 0;
1147
+ const images = imageProvider ? await Promise.all(
1148
+ responseOutputImageItems(data.response).map(
1149
+ (item) => imageProvider(item)
1150
+ )
1151
+ ) : [];
1152
+ if (images.length > 0) {
1153
+ yield new ChatGenerationChunk({
1154
+ message: new AIMessageChunk2({
1155
+ content: images.map((image) => ({
1156
+ type: "image_url",
1157
+ image_url: image
1158
+ }))
1159
+ }),
1160
+ text: ""
1161
+ });
1162
+ }
1163
+ if (!sentConversation) {
1164
+ sentConversation = true;
1165
+ yield new ChatGenerationChunk({
1166
+ message: new AIMessageChunk2({
1167
+ content: "",
1168
+ additional_kwargs: {
1169
+ conversation: data.response.conversation
1170
+ }
1171
+ }),
1172
+ text: ""
1173
+ });
1174
+ }
1175
+ if (usageMetadata) {
1176
+ yield new ChatGenerationChunk({
1177
+ generationInfo: {
1178
+ usage_metadata: usageMetadata
1179
+ },
1180
+ message: new AIMessageChunk2({
1181
+ content: "",
1182
+ usage_metadata: usageMetadata
1183
+ }),
1184
+ text: ""
1185
+ });
1186
+ }
1187
+ continue;
1188
+ }
1189
+ if (data.type === "response.failed" || data.type === "response.incomplete" || data.type === "response.error") {
1190
+ throw new ChatLunaError(
1191
+ ChatLunaErrorCode.API_REQUEST_FAILED,
1192
+ new Error(chunk)
1193
+ );
1194
+ }
1195
+ } catch (e) {
1196
+ if (e instanceof ChatLunaError) throw e;
1197
+ if (errorCount > 5) {
1198
+ requestContext.modelRequester.logger.error(
1199
+ "error with responses chunk",
1200
+ chunk
1201
+ );
1202
+ throw new ChatLunaError(ChatLunaErrorCode.API_REQUEST_FAILED, e);
1203
+ }
1204
+ errorCount++;
1205
+ }
1206
+ }
1207
+ }
1208
+ __name(processResponseApiStream, "processResponseApiStream");
847
1209
  async function* completionStream(requestContext, params, completionUrl = "chat/completions", enableGoogleSearch, supportImageInput2) {
848
1210
  const { modelRequester } = requestContext;
849
1211
  const chatCompletionParams = await buildChatCompletionParams(
@@ -912,6 +1274,64 @@ async function completion(requestContext, params, completionUrl = "chat/completi
912
1274
  }
913
1275
  }
914
1276
  __name(completion, "completion");
1277
+ async function* responseApiCompletionStream(requestContext, params, opts = {}, supportImageInput2, imageProvider) {
1278
+ const { modelRequester } = requestContext;
1279
+ const request = await buildResponseParams(
1280
+ params,
1281
+ requestContext.plugin,
1282
+ opts,
1283
+ supportImageInput2 ?? true
1284
+ );
1285
+ try {
1286
+ const response = await modelRequester.post("responses", request, {
1287
+ signal: params.signal
1288
+ });
1289
+ yield* processResponseApiStream(
1290
+ requestContext,
1291
+ sseIterable(response),
1292
+ imageProvider
1293
+ );
1294
+ } catch (e) {
1295
+ if (requestContext.ctx.chatluna.currentConfig.isLog) {
1296
+ await trackLogToLocal(
1297
+ "Request",
1298
+ JSON.stringify(request),
1299
+ requestContext.ctx.logger("")
1300
+ );
1301
+ }
1302
+ if (e instanceof ChatLunaError) throw e;
1303
+ throw new ChatLunaError(ChatLunaErrorCode.API_REQUEST_FAILED, e);
1304
+ }
1305
+ }
1306
+ __name(responseApiCompletionStream, "responseApiCompletionStream");
1307
+ async function responseApiCompletion(requestContext, params, opts = {}, supportImageInput2, imageProvider) {
1308
+ const { modelRequester } = requestContext;
1309
+ const request = await buildResponseParams(
1310
+ params,
1311
+ requestContext.plugin,
1312
+ opts,
1313
+ supportImageInput2 ?? true
1314
+ );
1315
+ delete request.stream;
1316
+ delete request.stream_options;
1317
+ try {
1318
+ const response = await modelRequester.post("responses", request, {
1319
+ signal: params.signal
1320
+ });
1321
+ return await processResponseApiResponse(response, imageProvider);
1322
+ } catch (e) {
1323
+ if (requestContext.ctx.chatluna.currentConfig.isLog) {
1324
+ await trackLogToLocal(
1325
+ "Request",
1326
+ JSON.stringify(request),
1327
+ requestContext.ctx.logger("")
1328
+ );
1329
+ }
1330
+ if (e instanceof ChatLunaError) throw e;
1331
+ throw new ChatLunaError(ChatLunaErrorCode.API_REQUEST_FAILED, e);
1332
+ }
1333
+ }
1334
+ __name(responseApiCompletion, "responseApiCompletion");
915
1335
  async function createEmbeddings(requestContext, params, embeddingUrl = "embeddings") {
916
1336
  const { modelRequester } = requestContext;
917
1337
  let data;
@@ -988,6 +1408,7 @@ function createRequestContext(ctx, config, pluginConfig, plugin, modelRequester)
988
1408
  __name(createRequestContext, "createRequestContext");
989
1409
  export {
990
1410
  buildChatCompletionParams,
1411
+ buildResponseParams,
991
1412
  completion,
992
1413
  completionStream,
993
1414
  convertDeltaToMessageChunk,
@@ -1000,20 +1421,33 @@ export {
1000
1421
  fetchImageUrl,
1001
1422
  formatToolToOpenAITool,
1002
1423
  formatToolsToOpenAITools,
1424
+ formatToolsToResponseTools,
1003
1425
  getModelMaxContextSize,
1004
1426
  getModels,
1005
1427
  isEmbeddingModel,
1428
+ isImageGenerationModel,
1006
1429
  isNonLLMModel,
1007
1430
  langchainMessageToOpenAIMessage,
1431
+ langchainMessageToResponseInput,
1008
1432
  messageTypeToOpenAIRole,
1009
1433
  normalizeOpenAIModelName,
1434
+ openAIResponseUsageToUsageMetadata,
1010
1435
  openAIUsageToUsageMetadata,
1011
1436
  parseOpenAIModelNameWithReasoningEffort,
1012
1437
  processInterleavedThinkMessages,
1013
1438
  processResponse,
1439
+ processResponseApiResponse,
1440
+ processResponseApiStream,
1014
1441
  processStreamResponse,
1015
1442
  reasoningEffortModelSuffixes,
1016
1443
  removeAdditionalProperties,
1444
+ responseApiCompletion,
1445
+ responseApiCompletionStream,
1446
+ responseInputContent,
1447
+ responseOutputImageItems,
1448
+ responseOutputText,
1449
+ responseOutputToolCalls,
1450
+ responseToChatGeneration,
1017
1451
  supportImageInput,
1018
1452
  transformSystemMessages
1019
1453
  };
@@ -2,6 +2,7 @@ import { ChatGenerationChunk } from '@langchain/core/outputs';
2
2
  import { EmbeddingsRequestParams, ModelRequester, ModelRequestParams } from 'koishi-plugin-chatluna/llm-core/platform/api';
3
3
  import { ClientConfig } from 'koishi-plugin-chatluna/llm-core/platform/config';
4
4
  import { SSEEvent } from 'koishi-plugin-chatluna/utils/sse';
5
+ import { type ResponseBuiltinTool, ResponseObject, ResponseOutputItem } from './types';
5
6
  import { ChatLunaPlugin } from 'koishi-plugin-chatluna/services/chat';
6
7
  import { Context } from 'koishi';
7
8
  import { Response } from 'undici/types/fetch';
@@ -13,6 +14,13 @@ interface RequestContext<T extends ClientConfig = ClientConfig, R extends ChatLu
13
14
  plugin: ChatLunaPlugin;
14
15
  modelRequester: ModelRequester<T, R>;
15
16
  }
17
+ export type ResponseImageProvider = (item: Extract<ResponseOutputItem, {
18
+ type: 'image_generation_call';
19
+ }>) => Promise<string>;
20
+ export interface ResponseToolOptions {
21
+ googleSearch?: boolean;
22
+ builtinTools?: ResponseBuiltinTool[];
23
+ }
16
24
  export declare function buildChatCompletionParams(params: ModelRequestParams, plugin: ChatLunaPlugin, enableGoogleSearch: boolean, supportImageInput?: boolean): Promise<{
17
25
  model: string;
18
26
  messages: import("./types").ChatCompletionResponseMessage[];
@@ -37,10 +45,33 @@ export declare function buildChatCompletionParams(params: ModelRequestParams, pl
37
45
  include_usage: boolean;
38
46
  };
39
47
  } & Record<string, any>>;
48
+ export declare function buildResponseParams(params: ModelRequestParams, plugin: ChatLunaPlugin, opts?: ResponseToolOptions, supportImageInput?: boolean): Promise<{
49
+ model: string;
50
+ input: import("./types").ResponseInputItem[];
51
+ tools: import("./types").ResponseTool[];
52
+ max_output_tokens: number;
53
+ temperature: number;
54
+ top_p: number;
55
+ prompt_cache_key: string;
56
+ reasoning: {
57
+ effort: "minimal" | "low" | "medium" | "high" | "max" | "xhigh";
58
+ };
59
+ stream: boolean;
60
+ stream_options: {
61
+ include_obfuscation: boolean;
62
+ };
63
+ store: boolean;
64
+ parallel_tool_calls: boolean;
65
+ } & Record<string, any>>;
40
66
  export declare function processStreamResponse<T extends ClientConfig, R extends ChatLunaPlugin.Config>(requestContext: RequestContext<T, R>, iterator: AsyncGenerator<SSEEvent, string, unknown>): AsyncGenerator<ChatGenerationChunk, void, unknown>;
41
67
  export declare function processResponse<T extends ClientConfig, R extends ChatLunaPlugin.Config>(requestContext: RequestContext<T, R>, response: Response): Promise<ChatGenerationChunk>;
68
+ export declare function responseToChatGeneration(response: ResponseObject, imageProvider?: ResponseImageProvider): Promise<ChatGenerationChunk>;
69
+ export declare function processResponseApiResponse(response: Response, imageProvider?: ResponseImageProvider): Promise<ChatGenerationChunk>;
70
+ export declare function processResponseApiStream<T extends ClientConfig, R extends ChatLunaPlugin.Config>(requestContext: RequestContext<T, R>, iterator: AsyncGenerator<SSEEvent, string, unknown>, imageProvider?: ResponseImageProvider): AsyncGenerator<ChatGenerationChunk, void, unknown>;
42
71
  export declare function completionStream<T extends ClientConfig, R extends ChatLunaPlugin.Config>(requestContext: RequestContext<T, R>, params: ModelRequestParams, completionUrl?: string, enableGoogleSearch?: boolean, supportImageInput?: boolean): AsyncGenerator<ChatGenerationChunk>;
43
72
  export declare function completion<T extends ClientConfig, R extends ChatLunaPlugin.Config>(requestContext: RequestContext<T, R>, params: ModelRequestParams, completionUrl?: string, enableGoogleSearch?: boolean, supportImageInput?: boolean): Promise<ChatGenerationChunk>;
73
+ export declare function responseApiCompletionStream<T extends ClientConfig, R extends ChatLunaPlugin.Config>(requestContext: RequestContext<T, R>, params: ModelRequestParams, opts?: ResponseToolOptions, supportImageInput?: boolean, imageProvider?: ResponseImageProvider): AsyncGenerator<ChatGenerationChunk>;
74
+ export declare function responseApiCompletion<T extends ClientConfig, R extends ChatLunaPlugin.Config>(requestContext: RequestContext<T, R>, params: ModelRequestParams, opts?: ResponseToolOptions, supportImageInput?: boolean, imageProvider?: ResponseImageProvider): Promise<ChatGenerationChunk>;
44
75
  export declare function createEmbeddings<T extends ClientConfig, R extends ChatLunaPlugin.Config>(requestContext: RequestContext<T, R>, params: EmbeddingsRequestParams, embeddingUrl?: string): Promise<number[] | number[][]>;
45
76
  export declare function getModels<T extends ClientConfig, R extends ChatLunaPlugin.Config>(requestContext: RequestContext<T, R>, config?: RunnableConfig): Promise<string[]>;
46
77
  export declare function createRequestContext<T extends ClientConfig, R extends ChatLunaPlugin.Config>(ctx: Context, config: T, pluginConfig: R, plugin: ChatLunaPlugin, modelRequester: ModelRequester<T, R>): RequestContext<T, R>;