@saltcorn/large-language-model 0.7.10 → 0.8.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (3) hide show
  1. package/generate.js +122 -14
  2. package/index.js +9 -0
  3. package/package.json +1 -1
package/generate.js CHANGED
@@ -83,9 +83,12 @@ const getCompletion = async (config, opts) => {
83
83
  case "OpenAI":
84
84
  return await getCompletionOpenAICompatible(
85
85
  {
86
- chatCompleteEndpoint: "https://api.openai.com/v1/chat/completions",
86
+ chatCompleteEndpoint: config.responses_api
87
+ ? "https://api.openai.com/v1/responses"
88
+ : "https://api.openai.com/v1/chat/completions",
87
89
  bearer: opts?.api_key || opts?.bearer || config.api_key,
88
90
  model: opts?.model || config.model,
91
+ responses_api: config.responses_api,
89
92
  },
90
93
  opts
91
94
  );
@@ -144,7 +147,7 @@ const getCompletion = async (config, opts) => {
144
147
  };
145
148
 
146
149
  const getCompletionOpenAICompatible = async (
147
- { chatCompleteEndpoint, bearer, apiKey, model },
150
+ { chatCompleteEndpoint, bearer, apiKey, model, responses_api },
148
151
  {
149
152
  systemPrompt,
150
153
  prompt,
@@ -165,17 +168,87 @@ const getCompletionOpenAICompatible = async (
165
168
  const body = {
166
169
  //prompt: "How are you?",
167
170
  model: rest.model || model,
168
- messages: [
171
+ temperature: temperature || 0.7,
172
+ ...rest,
173
+ };
174
+ if (responses_api) {
175
+ for (const tool of body.tools || []) {
176
+ if (tool.type !== "function") continue;
177
+ tool.name = tool.function.name;
178
+ tool.description = tool.function.description;
179
+ tool.parameters = tool.function.parameters;
180
+ if (tool.function.required) tool.required = tool.function.required;
181
+ delete tool.function;
182
+ }
183
+ const newChat = [];
184
+ (chat || []).forEach((c) => {
185
+ if (c.tool_calls) {
186
+ c.tool_calls.forEach((tc) => {
187
+ newChat.push({
188
+ id: tc.id,
189
+ type: "function_call",
190
+ call_id: tc.call_id,
191
+ name: tc.name,
192
+ arguments: tc.arguments,
193
+ });
194
+ });
195
+ } else if (c.content?.image_calls) {
196
+ c.content.image_calls.forEach((ic) => {
197
+ newChat.push({
198
+ ...ic,
199
+ result: undefined,
200
+ filename: undefined,
201
+ });
202
+ });
203
+ } else if (c.content?.mcp_calls) {
204
+ c.content.mcp_calls.forEach((ic) => {
205
+ newChat.push({
206
+ ...ic,
207
+ });
208
+ });
209
+ } else if (c.role === "tool") {
210
+ newChat.push({
211
+ type: "function_call_output",
212
+ call_id: c.call_id,
213
+ output: c.content,
214
+ });
215
+ } else {
216
+ const fcontent = (c) => {
217
+ if (c.type === "image_url")
218
+ return {
219
+ type: "input_image",
220
+ image_url: c.image_url.url,
221
+ };
222
+ else return c;
223
+ };
224
+ newChat.push({
225
+ ...c,
226
+ content: Array.isArray(c.content)
227
+ ? c.content.map(fcontent)
228
+ : c.content,
229
+ });
230
+ }
231
+ });
232
+ body.input = [
233
+ {
234
+ role: "system",
235
+ content: systemPrompt || "You are a helpful assistant.",
236
+ },
237
+ ...newChat,
238
+ ...(prompt ? [{ role: "user", content: prompt }] : []),
239
+ ];
240
+ } else {
241
+ // not response api
242
+ body.tools = body.tools.filter((t) => t.function);
243
+ body.messages = [
169
244
  {
170
245
  role: "system",
171
246
  content: systemPrompt || "You are a helpful assistant.",
172
247
  },
173
248
  ...chat,
174
249
  ...(prompt ? [{ role: "user", content: prompt }] : []),
175
- ],
176
- temperature: temperature || 0.7,
177
- ...rest,
178
- };
250
+ ];
251
+ }
179
252
  if (debugResult)
180
253
  console.log(
181
254
  "OpenAI request",
@@ -198,19 +271,54 @@ const getCompletionOpenAICompatible = async (
198
271
  body: JSON.stringify(body),
199
272
  });
200
273
  const results = await rawResponse.json();
274
+ //console.log("results", results);
201
275
  if (debugResult)
202
276
  console.log("OpenAI response", JSON.stringify(results, null, 2));
203
277
  else getState().log(6, `OpenAI response ${JSON.stringify(results)}`);
204
278
  if (results.error) throw new Error(`OpenAI error: ${results.error.message}`);
205
-
206
- return results?.choices?.[0]?.message?.tool_calls
207
- ? {
208
- tool_calls: results?.choices?.[0]?.message?.tool_calls,
209
- content: results?.choices?.[0]?.message?.content || null,
210
- }
211
- : results?.choices?.[0]?.message?.content || null;
279
+ if (responses_api) {
280
+ const textOutput = results.output
281
+ .filter((o) => o.type === "message")
282
+ .map((o) => o.content.map((c) => c.text).join(""))
283
+ .join("");
284
+ return results.output.some(
285
+ (o) =>
286
+ o.type === "function_call" ||
287
+ o.type === "image_generation_call" ||
288
+ o.type === "mcp_list_tools" ||
289
+ o.type === "mcp_call"
290
+ )
291
+ ? {
292
+ tool_calls: emptyToUndefined(
293
+ results.output
294
+ .filter((o) => o.type === "function_call")
295
+ .map((o) => ({
296
+ function: { name: o.name, arguments: o.arguments },
297
+ ...o,
298
+ }))
299
+ ),
300
+ image_calls: emptyToUndefined(
301
+ results.output.filter((o) => o.type === "image_generation_call")
302
+ ),
303
+ mcp_calls: emptyToUndefined(
304
+ results.output.filter(
305
+ (o) => o.type === "mcp_call" || o.type === "mcp_list_tools"
306
+ )
307
+ ),
308
+ content: textOutput || null,
309
+ }
310
+ : textOutput || null;
311
+ } else
312
+ return results?.choices?.[0]?.message?.tool_calls
313
+ ? {
314
+ tool_calls: results?.choices?.[0]?.message?.tool_calls,
315
+ content: results?.choices?.[0]?.message?.content || null,
316
+ }
317
+ : results?.choices?.[0]?.message?.content || null;
212
318
  };
213
319
 
320
+ const emptyToUndefined = (xs) => (xs.length ? xs : undefined);
321
+
214
322
  const getEmbeddingOpenAICompatible = async (
215
323
  config,
216
324
  { prompt, model, debugResult }
package/index.js CHANGED
@@ -172,8 +172,17 @@ ${domReady(`
172
172
  sublabel: "From your OpenAI account",
173
173
  type: "String",
174
174
  required: true,
175
+ fieldview: "password",
175
176
  showIf: { backend: "OpenAI" },
176
177
  },
178
+ {
179
+ name: "responses_api",
180
+ label: "Response API", //gpt-3.5-turbo
181
+ type: "Bool",
182
+ sublabel: "Use the newer Responses API",
183
+ showIf: { backend: "OpenAI" },
184
+
185
+ },
177
186
  {
178
187
  name: "llama_dir",
179
188
  label: "llama.cpp directory",
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@saltcorn/large-language-model",
3
- "version": "0.7.10",
3
+ "version": "0.8.1",
4
4
  "description": "Large language models and functionality for Saltcorn",
5
5
  "main": "index.js",
6
6
  "dependencies": {