@saltcorn/large-language-model 0.9.12 → 1.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/constants.js CHANGED
@@ -7,9 +7,7 @@ const OPENAI_MODELS = [
7
7
  "gpt-5",
8
8
  "gpt-5-mini",
9
9
  "gpt-5-nano",
10
- "gpt-5.1",
11
- "gpt-5.1-mini",
12
- "gpt-5.1-nano",
10
+ "gpt-5.1",
13
11
  "gpt-5.2",
14
12
  "gpt-5.2-pro",
15
13
  "o3",
package/generate.js CHANGED
@@ -38,7 +38,7 @@ const getEmbedding = async (config, opts) => {
38
38
  apiKey: config.api_key,
39
39
  embed_model: opts?.embed_model || config.embed_model || config.model,
40
40
  },
41
- opts
41
+ opts,
42
42
  );
43
43
  case "OpenAI":
44
44
  return await getEmbeddingOpenAICompatible(
@@ -47,7 +47,7 @@ const getEmbedding = async (config, opts) => {
47
47
  bearer: opts?.api_key || config.api_key,
48
48
  embed_model: opts?.model || config.embed_model,
49
49
  },
50
- opts
50
+ opts,
51
51
  );
52
52
  case "OpenAI-compatible API":
53
53
  return await getEmbeddingOpenAICompatible(
@@ -61,7 +61,7 @@ const getEmbedding = async (config, opts) => {
61
61
  config.embed_model ||
62
62
  config.model,
63
63
  },
64
- opts
64
+ opts,
65
65
  );
66
66
  case "Local Ollama":
67
67
  if (config.embed_endpoint) {
@@ -74,14 +74,14 @@ const getEmbedding = async (config, opts) => {
74
74
  config.embed_model ||
75
75
  config.model,
76
76
  },
77
- opts
77
+ opts,
78
78
  );
79
79
  } else {
80
80
  if (!ollamaMod) throw new Error("Not implemented for this backend");
81
81
 
82
82
  const { Ollama } = ollamaMod;
83
83
  const ollama = new Ollama(
84
- config.ollama_host ? { host: config.ollama_host } : undefined
84
+ config.ollama_host ? { host: config.ollama_host } : undefined,
85
85
  );
86
86
  const olres = await ollama.embeddings({
87
87
  model: opts?.model || config.embed_model || config.model,
@@ -112,7 +112,7 @@ const getImageGeneration = async (config, opts) => {
112
112
  model: opts?.model || config.model,
113
113
  responses_api: config.responses_api,
114
114
  },
115
- opts
115
+ opts,
116
116
  );
117
117
  default:
118
118
  throw new Error("Image generation not implemented for this backend");
@@ -121,7 +121,7 @@ const getImageGeneration = async (config, opts) => {
121
121
 
122
122
  const getAudioTranscription = async (
123
123
  { backend, apiKey, api_key, provider, ai_sdk_provider },
124
- opts
124
+ opts,
125
125
  ) => {
126
126
  switch (opts.backend || backend) {
127
127
  case "ElevenLabs":
@@ -134,7 +134,7 @@ const getAudioTranscription = async (
134
134
  languageCode: opts.languageCode || "eng", // Language of the audio file. If set to null, the model will detect the language automatically.
135
135
  numSpeakers: opts.numSpeakers || null, // Language of the audio file. If set to null, the model will detect the language automatically.
136
136
  diarize: !!opts.diarize, // Whether to annotate who is speaking
137
- diarizationThreshold: opts.diarizationThreshold || null
137
+ diarizationThreshold: opts.diarizationThreshold || null,
138
138
  });
139
139
  return transcription;
140
140
  case "OpenAI":
@@ -144,10 +144,10 @@ const getAudioTranscription = async (
144
144
  const fp = opts.file.location
145
145
  ? opts.file.location
146
146
  : typeof opts.file === "string"
147
- ? await (
148
- await File.findOne(opts.file)
149
- ).location
150
- : null;
147
+ ? await (
148
+ await File.findOne(opts.file)
149
+ ).location
150
+ : null;
151
151
  const model = opts?.model || "whisper-1";
152
152
  const diarize = model === "gpt-4o-transcribe-diarize";
153
153
  const transcript1 = await client.audio.transcriptions.create({
@@ -171,8 +171,8 @@ const getAudioTranscription = async (
171
171
  (Buffer.isBuffer(opts.file)
172
172
  ? opts.file
173
173
  : typeof opts.file === "string"
174
- ? await (await File.findOne(opts.file)).get_contents()
175
- : await opts.file.get_contents());
174
+ ? await (await File.findOne(opts.file)).get_contents()
175
+ : await opts.file.get_contents());
176
176
  const extra = {};
177
177
  if (opts.prompt)
178
178
  extra.providerOptions = {
@@ -193,6 +193,104 @@ const getAudioTranscription = async (
193
193
  }
194
194
  };
195
195
 
196
+ const last = (xs) => xs[xs.length - 1];
197
+
198
+ const toolResponse = async (
199
+ { backend, apiKey, api_key, provider, ai_sdk_provider, responses_api },
200
+ opts,
201
+ ) => {
202
+ let chat = opts.chat;
203
+ let result = opts.prompt;
204
+ //console.log("chat", JSON.stringify(chat, null, 2));
205
+ switch (opts.backend || backend) {
206
+ case "OpenAI":
207
+ {
208
+ let tool_call_chat, tool_call;
209
+ if (!((opts.tool_call_id && opts.tool_name) || opts.tool_call)) {
210
+ if (opts.tool_call) tool_call_chat = opts.tool_call;
211
+ else
212
+ tool_call_chat = last(
213
+ chat.filter((c) => c.tool_calls || c.type === "function_call"),
214
+ );
215
+
216
+ tool_call = tool_call_chat.tool_calls
217
+ ? tool_call_chat.tool_calls[0] //original api
218
+ : tool_call_chat; //responses api
219
+ }
220
+ const content =
221
+ result && typeof result !== "string"
222
+ ? JSON.stringify(result)
223
+ : result || "Action run";
224
+ const new_chat_item = responses_api
225
+ ? {
226
+ type: "function_call_output",
227
+ call_id:
228
+ opts.tool_call?.tool_call_id ||
229
+ opts.tool_call_id ||
230
+ tool_call.call_id,
231
+ output: content,
232
+ }
233
+ : {
234
+ role: "tool",
235
+ tool_call_id:
236
+ opts.tool_call?.tool_call_id ||
237
+ opts.tool_call_id ||
238
+ tool_call.toolCallId ||
239
+ tool_call.id,
240
+ content,
241
+ };
242
+
243
+ chat.push(new_chat_item);
244
+ }
245
+ break;
246
+ case "AI SDK":
247
+ {
248
+ let tool_call, tc;
249
+ if (!((opts.tool_call_id && opts.tool_name) || opts.tool_call)) {
250
+ if (opts.tool_call) tool_call = opts.tool_call;
251
+ else
252
+ tool_call = last(
253
+ chat.filter(
254
+ (c) =>
255
+ c.role === "assistant" &&
256
+ Array.isArray(c.content) &&
257
+ c.content.some((cc) => cc.type === "tool-call"),
258
+ ),
259
+ );
260
+
261
+ tc = tool_call.content[0];
262
+ }
263
+
264
+ chat.push({
265
+ role: "tool",
266
+ content: [
267
+ {
268
+ type: "tool-result",
269
+ toolCallId:
270
+ opts.tool_call?.tool_call_id ||
271
+ opts.tool_call_id ||
272
+ tc.toolCallId,
273
+ toolName:
274
+ opts.tool_call?.tool_name || opts.tool_name || tc.toolName,
275
+ output:
276
+ !result || typeof result === "string"
277
+ ? {
278
+ type: "text",
279
+ value: result || "Action run",
280
+ }
281
+ : {
282
+ type: "json",
283
+ value: JSON.parse(JSON.stringify(result)),
284
+ },
285
+ },
286
+ ],
287
+ });
288
+ }
289
+ break;
290
+ default:
291
+ }
292
+ };
293
+
196
294
  const getCompletion = async (config, opts) => {
197
295
  switch (config.backend) {
198
296
  case "AI SDK":
@@ -202,7 +300,7 @@ const getCompletion = async (config, opts) => {
202
300
  apiKey: config.api_key,
203
301
  model: opts?.model || config.model,
204
302
  },
205
- opts
303
+ opts,
206
304
  );
207
305
  case "OpenAI":
208
306
  return await getCompletionOpenAICompatible(
@@ -214,7 +312,7 @@ const getCompletion = async (config, opts) => {
214
312
  model: opts?.model || config.model,
215
313
  responses_api: config.responses_api,
216
314
  },
217
- opts
315
+ opts,
218
316
  );
219
317
  case "OpenAI-compatible API":
220
318
  return await getCompletionOpenAICompatible(
@@ -228,7 +326,7 @@ const getCompletion = async (config, opts) => {
228
326
  apiKey: opts?.api_key || config.api_key,
229
327
  model: opts?.model || config.model,
230
328
  },
231
- opts
329
+ opts,
232
330
  );
233
331
  case "Local Ollama":
234
332
  return await getCompletionOpenAICompatible(
@@ -238,14 +336,14 @@ const getCompletion = async (config, opts) => {
238
336
  : "http://localhost:11434/v1/chat/completions",
239
337
  model: opts?.model || config.model,
240
338
  },
241
- opts
339
+ opts,
242
340
  );
243
341
  case "Local llama.cpp":
244
342
  //TODO only check if unsafe plugins not allowed
245
343
  const isRoot = db.getTenantSchema() === db.connectObj.default_schema;
246
344
  if (!isRoot)
247
345
  throw new Error(
248
- "llama.cpp inference is not permitted on subdomain tenants"
346
+ "llama.cpp inference is not permitted on subdomain tenants",
249
347
  );
250
348
  let hyperStr = "";
251
349
  if (opts.temperature) hyperStr += ` --temp ${opts.temperature}`;
@@ -255,7 +353,7 @@ const getCompletion = async (config, opts) => {
255
353
 
256
354
  const { stdout, stderr } = await exec(
257
355
  `./main -m ${config.model_path} -p "${opts.prompt}" ${nstr}${hyperStr}`,
258
- { cwd: config.llama_dir }
356
+ { cwd: config.llama_dir },
259
357
  );
260
358
  return stdout;
261
359
  case "Google Vertex AI":
@@ -286,12 +384,13 @@ const getCompletionAISDK = async (
286
384
  systemPrompt,
287
385
  prompt,
288
386
  debugResult,
387
+ appendToChat,
289
388
  debugCollector,
290
389
  chat = [],
291
390
  api_key,
292
391
  endpoint,
293
392
  ...rest
294
- }
393
+ },
295
394
  ) => {
296
395
  const use_model_name = rest.model || model;
297
396
  let model_obj = getAiSdkModel({
@@ -313,7 +412,7 @@ const getCompletionAISDK = async (
313
412
  ...(Array.isArray(chat.content) ? { content: chat.content.map(f) } : {}),
314
413
  };
315
414
  };
316
- const newChat = chat.map(modifyChat);
415
+ const newChat = appendToChat ? chat : chat.map(modifyChat);
317
416
 
318
417
  const body = {
319
418
  ...rest,
@@ -327,6 +426,9 @@ const getCompletionAISDK = async (
327
426
  ...(prompt ? [{ role: "user", content: prompt }] : []),
328
427
  ],
329
428
  };
429
+ if (appendToChat && chat && prompt) {
430
+ chat.push({ role: "user", content: prompt });
431
+ }
330
432
  if (rest.temperature || temperature) {
331
433
  const str_or_num = rest.temperature || temperature;
332
434
  body.temperature = +str_or_num;
@@ -342,6 +444,9 @@ const getCompletionAISDK = async (
342
444
  "gpt-5",
343
445
  "gpt-5-nano",
344
446
  "gpt-5-mini",
447
+ "gpt-5.1",
448
+ "gpt-5.1-codex",
449
+ "gpt-5.2",
345
450
  ].includes(use_model_name)
346
451
  )
347
452
  body.temperature = 0.7;
@@ -350,9 +455,9 @@ const getCompletionAISDK = async (
350
455
  const prevTools = [...body.tools];
351
456
  body.tools = {};
352
457
  prevTools.forEach((t) => {
353
- body.tools[t.function.name] = tool({
354
- description: t.function.description,
355
- inputSchema: jsonSchema(t.function.parameters),
458
+ body.tools[t.name || t.function.name] = tool({
459
+ description: t.description || t.function.description,
460
+ inputSchema: jsonSchema(t.parameters || t.function.parameters),
356
461
  });
357
462
  });
358
463
  }
@@ -367,11 +472,21 @@ const getCompletionAISDK = async (
367
472
  let results;
368
473
  if (rest.streamCallback) {
369
474
  delete body.streamCallback;
370
- results = await streamText(body);
371
- for await (const textPart of results.textStream) {
475
+ const results1 = await streamText(body);
476
+ for await (const textPart of results1.textStream) {
372
477
  rest.streamCallback(textPart);
373
478
  }
479
+ results = {
480
+ response: await results1.response,
481
+ text: await results1.text,
482
+ steps: await results1.steps,
483
+ };
374
484
  } else results = await generateText(body);
485
+
486
+ if (appendToChat && chat) {
487
+ chat.push(...results.response.messages);
488
+ }
489
+
375
490
  if (debugResult)
376
491
  console.log("AI SDK response", JSON.stringify(results, null, 2));
377
492
  else getState().log(6, `AI SDK response ${JSON.stringify(results)}`);
@@ -387,6 +502,14 @@ const getCompletionAISDK = async (
387
502
  content: await results.text,
388
503
  messages: (await results.response).messages,
389
504
  ai_sdk: true,
505
+ hasToolCalls: allToolCalls.length,
506
+ getToolCalls() {
507
+ return allToolCalls.map((tc) => ({
508
+ tool_name: tc.toolName,
509
+ input: tc.input,
510
+ tool_call_id: tc.toolCallId,
511
+ }));
512
+ },
390
513
  };
391
514
  } else return results.text;
392
515
  };
@@ -399,10 +522,11 @@ const getCompletionOpenAICompatible = async (
399
522
  debugResult,
400
523
  debugCollector,
401
524
  chat = [],
525
+ appendToChat,
402
526
  api_key,
403
527
  endpoint,
404
528
  ...rest
405
- }
529
+ },
406
530
  ) => {
407
531
  const headers = {
408
532
  "Content-Type": "application/json",
@@ -440,63 +564,67 @@ const getCompletionOpenAICompatible = async (
440
564
  delete body.streamCallback;
441
565
  }
442
566
  if (responses_api) {
567
+ delete body.tool_choice;
443
568
  for (const tool of body.tools || []) {
444
- if (tool.type !== "function") continue;
569
+ if (tool.type !== "function" || !tool.function) continue;
445
570
  tool.name = tool.function.name;
446
571
  tool.description = tool.function.description;
447
572
  tool.parameters = tool.function.parameters;
448
573
  if (tool.function.required) tool.required = tool.function.required;
449
574
  delete tool.function;
450
575
  }
451
- const newChat = [];
452
- (chat || []).forEach((c) => {
453
- if (c.tool_calls) {
454
- c.tool_calls.forEach((tc) => {
455
- newChat.push({
456
- id: tc.id,
457
- type: "function_call",
458
- call_id: tc.call_id,
459
- name: tc.name,
460
- arguments: tc.arguments,
576
+ let newChat;
577
+ if (!appendToChat) {
578
+ newChat = [];
579
+ (chat || []).forEach((c) => {
580
+ if (c.tool_calls) {
581
+ c.tool_calls.forEach((tc) => {
582
+ newChat.push({
583
+ id: tc.id,
584
+ type: "function_call",
585
+ call_id: tc.call_id,
586
+ name: tc.name,
587
+ arguments: tc.arguments,
588
+ });
461
589
  });
462
- });
463
- } else if (c.content?.image_calls) {
464
- c.content.image_calls.forEach((ic) => {
590
+ } else if (c.content?.image_calls) {
591
+ c.content.image_calls.forEach((ic) => {
592
+ newChat.push({
593
+ ...ic,
594
+ result: undefined,
595
+ filename: undefined,
596
+ });
597
+ });
598
+ } else if (c.content?.mcp_calls) {
599
+ c.content.mcp_calls.forEach((ic) => {
600
+ newChat.push({
601
+ ...ic,
602
+ });
603
+ });
604
+ } else if (c.role === "tool") {
465
605
  newChat.push({
466
- ...ic,
467
- result: undefined,
468
- filename: undefined,
606
+ type: "function_call_output",
607
+ call_id: c.call_id,
608
+ output: c.content,
469
609
  });
470
- });
471
- } else if (c.content?.mcp_calls) {
472
- c.content.mcp_calls.forEach((ic) => {
610
+ } else {
611
+ const fcontent = (c) => {
612
+ if (c.type === "image_url")
613
+ return {
614
+ type: "input_image",
615
+ image_url: c.image_url.url,
616
+ };
617
+ else return c;
618
+ };
473
619
  newChat.push({
474
- ...ic,
620
+ ...c,
621
+ content: Array.isArray(c.content)
622
+ ? c.content.map(fcontent)
623
+ : c.content,
475
624
  });
476
- });
477
- } else if (c.role === "tool") {
478
- newChat.push({
479
- type: "function_call_output",
480
- call_id: c.call_id,
481
- output: c.content,
482
- });
483
- } else {
484
- const fcontent = (c) => {
485
- if (c.type === "image_url")
486
- return {
487
- type: "input_image",
488
- image_url: c.image_url.url,
489
- };
490
- else return c;
491
- };
492
- newChat.push({
493
- ...c,
494
- content: Array.isArray(c.content)
495
- ? c.content.map(fcontent)
496
- : c.content,
497
- });
498
- }
499
- });
625
+ }
626
+ });
627
+ } else newChat = chat;
500
628
  body.input = [
501
629
  {
502
630
  role: "system",
@@ -517,6 +645,9 @@ const getCompletionOpenAICompatible = async (
517
645
  ...(prompt ? [{ role: "user", content: prompt }] : []),
518
646
  ];
519
647
  }
648
+ if (appendToChat && chat && prompt) {
649
+ chat.push({ role: "user", content: prompt });
650
+ }
520
651
  if (debugResult)
521
652
  console.log(
522
653
  "OpenAI request",
@@ -524,14 +655,14 @@ const getCompletionOpenAICompatible = async (
524
655
  "to",
525
656
  chatCompleteEndpoint,
526
657
  "headers",
527
- JSON.stringify(headers)
658
+ JSON.stringify(headers),
528
659
  );
529
660
  else
530
661
  getState().log(
531
662
  6,
532
663
  `OpenAI request ${JSON.stringify(
533
- body
534
- )} to ${chatCompleteEndpoint} headers ${JSON.stringify(headers)}`
664
+ body,
665
+ )} to ${chatCompleteEndpoint} headers ${JSON.stringify(headers)}`,
535
666
  );
536
667
  if (debugCollector) debugCollector.request = body;
537
668
  const reqTimeStart = Date.now();
@@ -623,7 +754,7 @@ const getCompletionOpenAICompatible = async (
623
754
  : streamParts.join("");
624
755
  }
625
756
  const results = await rawResponse.json();
626
- //console.log("results", results);
757
+
627
758
  if (debugResult)
628
759
  console.log("OpenAI response", JSON.stringify(results, null, 2));
629
760
  else getState().log(6, `OpenAI response ${JSON.stringify(results)}`);
@@ -633,36 +764,49 @@ const getCompletionOpenAICompatible = async (
633
764
  }
634
765
 
635
766
  if (results.error) throw new Error(`OpenAI error: ${results.error.message}`);
767
+ if (appendToChat && chat) {
768
+ if (responses_api) chat.push(...results.output);
769
+ else chat.push(results.choices[0].message);
770
+ }
636
771
  if (responses_api) {
637
772
  const textOutput = results.output
638
773
  .filter((o) => o.type === "message")
639
774
  .map((o) => o.content.map((c) => c.text).join(""))
640
775
  .join("");
776
+ const tool_calls = emptyToUndefined(
777
+ results.output
778
+ .filter((o) => o.type === "function_call")
779
+ .map((o) => ({
780
+ function: { name: o.name, arguments: o.arguments },
781
+ ...o,
782
+ })),
783
+ );
641
784
  return results.output.some(
642
785
  (o) =>
643
786
  o.type === "function_call" ||
644
787
  o.type === "image_generation_call" ||
645
788
  o.type === "mcp_list_tools" ||
646
- o.type === "mcp_call"
789
+ o.type === "mcp_call",
647
790
  )
648
791
  ? {
649
- tool_calls: emptyToUndefined(
650
- results.output
651
- .filter((o) => o.type === "function_call")
652
- .map((o) => ({
653
- function: { name: o.name, arguments: o.arguments },
654
- ...o,
655
- }))
656
- ),
792
+ tool_calls,
657
793
  image_calls: emptyToUndefined(
658
- results.output.filter((o) => o.type === "image_generation_call")
794
+ results.output.filter((o) => o.type === "image_generation_call"),
659
795
  ),
660
796
  mcp_calls: emptyToUndefined(
661
797
  results.output.filter(
662
- (o) => o.type === "mcp_call" || o.type === "mcp_list_tools"
663
- )
798
+ (o) => o.type === "mcp_call" || o.type === "mcp_list_tools",
799
+ ),
664
800
  ),
665
801
  content: textOutput || null,
802
+ hasToolCalls: tool_calls?.length,
803
+ getToolCalls() {
804
+ return tool_calls.map((tc) => ({
805
+ tool_name: tc.function.name,
806
+ input: JSON.parse(tc.function.arguments),
807
+ tool_call_id: tc.call_id,
808
+ }));
809
+ },
666
810
  }
667
811
  : textOutput || null;
668
812
  } else
@@ -670,6 +814,14 @@ const getCompletionOpenAICompatible = async (
670
814
  ? {
671
815
  tool_calls: results?.choices?.[0]?.message?.tool_calls,
672
816
  content: results?.choices?.[0]?.message?.content || null,
817
+ hasToolCalls: results?.choices?.[0]?.message?.tool_calls.length,
818
+ getToolCalls() {
819
+ return results?.choices?.[0]?.message?.tool_calls.map((tc) => ({
820
+ tool_name: tc.function.name,
821
+ input: JSON.parse(tc.function.arguments),
822
+ tool_call_id: tc.id,
823
+ }));
824
+ },
673
825
  }
674
826
  : results?.choices?.[0]?.message?.content || null;
675
827
  };
@@ -688,7 +840,7 @@ const getImageGenOpenAICompatible = async (
688
840
  n,
689
841
  output_format,
690
842
  response_format,
691
- }
843
+ },
692
844
  ) => {
693
845
  const { imageEndpoint, bearer, apiKey, image_model } = config;
694
846
  const headers = {
@@ -725,7 +877,7 @@ const getImageGenOpenAICompatible = async (
725
877
 
726
878
  const getEmbeddingOpenAICompatible = async (
727
879
  config,
728
- { prompt, model, debugResult }
880
+ { prompt, model, debugResult },
729
881
  ) => {
730
882
  const { embeddingsEndpoint, bearer, apiKey, embed_model } = config;
731
883
  const headers = {
@@ -762,7 +914,7 @@ const getEmbeddingAISDK = async (config, { prompt, model, debugResult }) => {
762
914
  case "OpenAI":
763
915
  const openai = createOpenAI({ apiKey: apiKey });
764
916
  model_obj = openai.textEmbeddingModel(
765
- model_name || "text-embedding-3-small"
917
+ model_name || "text-embedding-3-small",
766
918
  );
767
919
  //providerOptions.openai = {};
768
920
  break;
@@ -815,7 +967,7 @@ const initOAuth2Client = async (config) => {
815
967
  const oauth2Client = new google.auth.OAuth2(
816
968
  client_id,
817
969
  client_secret,
818
- redirect_uri
970
+ redirect_uri,
819
971
  );
820
972
  oauth2Client.setCredentials(pluginCfg.tokens);
821
973
  return oauth2Client;
@@ -883,7 +1035,7 @@ const getCompletionGoogleVertex = async (config, opts, oauth2Client) => {
883
1035
  chatParams.tools = [
884
1036
  {
885
1037
  functionDeclarations: opts.tools.map((t) =>
886
- prepFuncArgsForChat(t.function)
1038
+ prepFuncArgsForChat(t.function),
887
1039
  ),
888
1040
  },
889
1041
  ];
@@ -925,7 +1077,7 @@ const getEmbeddingGoogleVertex = async (config, opts, oauth2Client) => {
925
1077
  helpers.toValue({
926
1078
  content: p,
927
1079
  task_type: config.task_type || "RETRIEVAL_QUERY",
928
- })
1080
+ }),
929
1081
  );
930
1082
  } else {
931
1083
  instances = [
@@ -957,4 +1109,5 @@ module.exports = {
957
1109
  getEmbedding,
958
1110
  getImageGeneration,
959
1111
  getAudioTranscription,
1112
+ toolResponse,
960
1113
  };
package/index.js CHANGED
@@ -11,6 +11,7 @@ const {
11
11
  getEmbedding,
12
12
  getImageGeneration,
13
13
  getAudioTranscription,
14
+ toolResponse
14
15
  } = require("./generate");
15
16
  const { OPENAI_MODELS } = require("./constants.js");
16
17
  const { eval_expression } = require("@saltcorn/data/models/expression");
@@ -381,7 +382,10 @@ const functions = (config) => {
381
382
  },
382
383
  isAsync: true,
383
384
  description: "Generate text with GPT",
384
- arguments: [{ name: "prompt", type: "String" }],
385
+ arguments: [
386
+ { name: "prompt", type: "String", required: true },
387
+ { name: "options", type: "JSON", tstype: "any" },
388
+ ],
385
389
  },
386
390
  llm_image_generate: {
387
391
  run: async (prompt, opts) => {
@@ -390,7 +394,10 @@ const functions = (config) => {
390
394
  },
391
395
  isAsync: true,
392
396
  description: "Generate image",
393
- arguments: [{ name: "prompt", type: "String" }],
397
+ arguments: [
398
+ { name: "prompt", type: "String", required: true },
399
+ { name: "options", type: "JSON", tstype: "any" },
400
+ ],
394
401
  },
395
402
  llm_embedding: {
396
403
  run: async (prompt, opts) => {
@@ -399,7 +406,10 @@ const functions = (config) => {
399
406
  },
400
407
  isAsync: true,
401
408
  description: "Get vector embedding",
402
- arguments: [{ name: "prompt", type: "String" }],
409
+ arguments: [
410
+ { name: "prompt", type: "String", required: true },
411
+ { name: "options", type: "JSON", tstype: "any" },
412
+ ],
403
413
  },
404
414
  llm_transcribe: {
405
415
  run: async (opts) => {
@@ -408,7 +418,21 @@ const functions = (config) => {
408
418
  },
409
419
  isAsync: true,
410
420
  description: "Get vector embedding",
411
- arguments: [{ name: "prompt", type: "String" }],
421
+ arguments: [
422
+ { name: "options", type: "JSON", tstype: "any", required: true },
423
+ ],
424
+ },
425
+ llm_add_tool_response: {
426
+ run: async (prompt, opts) => {
427
+ const result = await toolResponse(config, { prompt, ...opts });
428
+ return result;
429
+ },
430
+ isAsync: true,
431
+ description: "Insert the response to a tool call into a chat",
432
+ arguments: [
433
+ { name: "prompt", type: "String", required: true },
434
+ { name: "options", type: "JSON", tstype: "any" },
435
+ ],
412
436
  },
413
437
  };
414
438
  };
@@ -432,7 +456,7 @@ const routes = (config) => {
432
456
  const oauth2Client = new google.auth.OAuth2(
433
457
  client_id,
434
458
  client_secret,
435
- redirect_uri
459
+ redirect_uri,
436
460
  );
437
461
  const authUrl = oauth2Client.generateAuthUrl({
438
462
  access_type: "offline",
@@ -459,7 +483,7 @@ const routes = (config) => {
459
483
  const oauth2Client = new google.auth.OAuth2(
460
484
  client_id,
461
485
  client_secret,
462
- redirect_uri
486
+ redirect_uri,
463
487
  );
464
488
  let plugin = await Plugin.findOne({ name: "large-language-model" });
465
489
  if (!plugin) {
@@ -475,8 +499,8 @@ const routes = (config) => {
475
499
  req.flash(
476
500
  "warning",
477
501
  req.__(
478
- "No refresh token received. Please revoke the plugin's access and try again."
479
- )
502
+ "No refresh token received. Please revoke the plugin's access and try again.",
503
+ ),
480
504
  );
481
505
  } else {
482
506
  const newConfig = { ...(plugin.configuration || {}), tokens };
@@ -488,7 +512,7 @@ const routes = (config) => {
488
512
  });
489
513
  req.flash(
490
514
  "success",
491
- req.__("Authentication successful! You can now use Vertex AI.")
515
+ req.__("Authentication successful! You can now use Vertex AI."),
492
516
  );
493
517
  }
494
518
  } catch (error) {
@@ -615,13 +639,13 @@ module.exports = {
615
639
  prompt_formula,
616
640
  row,
617
641
  user,
618
- "llm_generate prompt formula"
642
+ "llm_generate prompt formula",
619
643
  );
620
644
  else prompt = row[prompt_field];
621
645
  const opts = {};
622
646
  if (override_config) {
623
647
  const altcfg = config.altconfigs.find(
624
- (c) => c.name === override_config
648
+ (c) => c.name === override_config,
625
649
  );
626
650
  opts.endpoint = altcfg.endpoint;
627
651
  opts.model = altcfg.model;
@@ -679,7 +703,8 @@ module.exports = {
679
703
  {
680
704
  name: "answer_field",
681
705
  label: "Response variable",
682
- sublabel: "Set the generated response object to this context variable. The subfield <code>text</code> holds the string transcription",
706
+ sublabel:
707
+ "Set the generated response object to this context variable. The subfield <code>text</code> holds the string transcription",
683
708
  type: "String",
684
709
  required: true,
685
710
  },
@@ -766,7 +791,7 @@ module.exports = {
766
791
  else
767
792
  await table.updateRow(
768
793
  { [answer_field]: ans.text },
769
- row[table.pk_name]
794
+ row[table.pk_name],
770
795
  );
771
796
  },
772
797
  },
@@ -879,7 +904,7 @@ module.exports = {
879
904
  prompt_formula,
880
905
  row,
881
906
  user,
882
- "llm_generate prompt formula"
907
+ "llm_generate prompt formula",
883
908
  );
884
909
  else prompt = row[prompt_field];
885
910
 
@@ -906,7 +931,7 @@ module.exports = {
906
931
  "image/png",
907
932
  imgContents,
908
933
  user?.id,
909
- min_role || 1
934
+ min_role || 1,
910
935
  );
911
936
  upd[answer_field] = file.path_to_serve;
912
937
  }
@@ -998,7 +1023,7 @@ module.exports = {
998
1023
  sublabel:
999
1024
  "Use this context variable to store the chat history for subsequent prompts",
1000
1025
  type: "String",
1001
- }
1026
+ },
1002
1027
  );
1003
1028
  } else if (table) {
1004
1029
  const jsonFields = table.fields
@@ -1022,7 +1047,7 @@ module.exports = {
1022
1047
  type: "String",
1023
1048
  required: true,
1024
1049
  attributes: { options: jsonFields },
1025
- }
1050
+ },
1026
1051
  );
1027
1052
  }
1028
1053
 
@@ -1051,7 +1076,7 @@ module.exports = {
1051
1076
  input_type: "section_header",
1052
1077
  label: "JSON fields to generate",
1053
1078
  },
1054
- fieldsField
1079
+ fieldsField,
1055
1080
  );
1056
1081
  return cfgFields;
1057
1082
  },
@@ -1077,7 +1102,7 @@ module.exports = {
1077
1102
  if (model) opts.model = model;
1078
1103
  if (override_config) {
1079
1104
  const altcfg = config.altconfigs.find(
1080
- (c) => c.name === override_config
1105
+ (c) => c.name === override_config,
1081
1106
  );
1082
1107
  opts.endpoint = altcfg.endpoint;
1083
1108
  opts.model = altcfg.model;
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@saltcorn/large-language-model",
3
- "version": "0.9.12",
3
+ "version": "1.0.0",
4
4
  "description": "Large language models and functionality for Saltcorn",
5
5
  "main": "index.js",
6
6
  "dependencies": {
@@ -16,17 +16,24 @@
16
16
  "openai": "6.16.0",
17
17
  "@elevenlabs/elevenlabs-js": "2.31.0"
18
18
  },
19
+ "devDependencies": {
20
+ "jest": "^29.7.0"
21
+ },
22
+ "scripts": {
23
+ "test": "jest tests --runInBand"
24
+ },
19
25
  "author": "Tom Nielsen",
20
26
  "license": "MIT",
21
27
  "repository": "github:saltcorn/large-language-model",
22
28
  "eslintConfig": {
23
29
  "extends": "eslint:recommended",
24
30
  "parserOptions": {
25
- "ecmaVersion": 2020
31
+ "ecmaVersion": 2024
26
32
  },
27
33
  "env": {
28
34
  "node": true,
29
- "es6": true
35
+ "es6": true,
36
+ "jest/globals": true
30
37
  },
31
38
  "rules": {
32
39
  "no-unused-vars": "off",
@@ -0,0 +1,34 @@
1
+ module.exports = [
2
+ {
3
+ name: "OpenAI completions",
4
+ model: "gpt-5.1",
5
+ api_key: process.env.OPENAI_API_KEY,
6
+ backend: "OpenAI",
7
+ embed_model: "text-embedding-3-small",
8
+ image_model: "gpt-image-1",
9
+ temperature: 0.7,
10
+ responses_api: false,
11
+ ai_sdk_provider: "OpenAI",
12
+ },
13
+ {
14
+ name: "OpenAI responses",
15
+ model: "gpt-5.1",
16
+ api_key: process.env.OPENAI_API_KEY,
17
+ backend: "OpenAI",
18
+ embed_model: "text-embedding-3-small",
19
+ image_model: "gpt-image-1",
20
+ temperature: 0.7,
21
+ responses_api: true,
22
+ ai_sdk_provider: "OpenAI",
23
+ },
24
+ {
25
+ name: "AI SDK OpenAI",
26
+ model: "gpt-5.1",
27
+ api_key: process.env.OPENAI_API_KEY,
28
+ backend: "AI SDK",
29
+ embed_model: "text-embedding-3-small",
30
+ image_model: "gpt-image-1",
31
+ temperature: 0.7,
32
+ ai_sdk_provider: "OpenAI",
33
+ },
34
+ ];
@@ -0,0 +1,200 @@
1
+ const { getState } = require("@saltcorn/data/db/state");
2
+ const View = require("@saltcorn/data/models/view");
3
+ const Table = require("@saltcorn/data/models/table");
4
+ const Plugin = require("@saltcorn/data/models/plugin");
5
+
6
+ const { mockReqRes } = require("@saltcorn/data/tests/mocks");
7
+ const { afterAll, beforeAll, describe, it, expect } = require("@jest/globals");
8
+
9
+ afterAll(require("@saltcorn/data/db").close);
10
+ beforeAll(async () => {
11
+ await require("@saltcorn/data/db/reset_schema")();
12
+ await require("@saltcorn/data/db/fixtures")();
13
+
14
+ getState().registerPlugin("base", require("@saltcorn/data/base-plugin"));
15
+ });
16
+
17
+ // run with:
18
+ // saltcorn dev:plugin-test -d ~/large-language-model/
19
+
20
+ jest.setTimeout(30000);
21
+
22
+ for (const nameconfig of require("./configs")) {
23
+ const { name, ...config } = nameconfig;
24
+ describe("llm_generate function with " + name, () => {
25
+ beforeAll(async () => {
26
+ getState().registerPlugin(
27
+ "@saltcorn/large-language-model",
28
+ require(".."),
29
+ config,
30
+ );
31
+ });
32
+
33
+ it("generates text", async () => {
34
+ const answer = await getState().functions.llm_generate.run(
35
+ "What is the Capital of France?",
36
+ );
37
+ //console.log({ answer });
38
+
39
+ expect(typeof answer).toBe("string");
40
+ expect(answer).toContain("Paris");
41
+ });
42
+ it("generates text with system prompt", async () => {
43
+ const answer = await getState().functions.llm_generate.run(
44
+ "What is the name of the last week day in a normal work week?",
45
+ {
46
+ systemPrompt: "Answer in German, even when questions are in English",
47
+ },
48
+ );
49
+ //console.log({ answer });
50
+
51
+ expect(typeof answer).toBe("string");
52
+ expect(answer).toContain("Freitag");
53
+ });
54
+ it("generates text with chat history", async () => {
55
+ const chat = [
56
+ {
57
+ role: "user",
58
+ content: "What is the capital of France?",
59
+ },
60
+ {
61
+ role: "assistant",
62
+ content: "Paris.",
63
+ },
64
+ ];
65
+ const answer = await getState().functions.llm_generate.run(
66
+ "What is the name of the river running through this city?",
67
+ {
68
+ chat,
69
+ },
70
+ );
71
+ //console.log({ answer });
72
+
73
+ expect(typeof answer).toBe("string");
74
+ expect(answer).toContain("Seine");
75
+ expect(chat.length).toBe(2);
76
+ });
77
+ it("generates text with chat history and no prompt", async () => {
78
+ const answer = await getState().functions.llm_generate.run("", {
79
+ chat: [
80
+ {
81
+ role: "user",
82
+ content: "What is the capital of France?",
83
+ },
84
+ {
85
+ role: "assistant",
86
+ content: "Paris.",
87
+ },
88
+ {
89
+ role: "user",
90
+ content: "What is the name of the river running through this city?",
91
+ },
92
+ ],
93
+ });
94
+ //console.log({ answer });
95
+
96
+ expect(typeof answer).toBe("string");
97
+ expect(answer).toContain("Seine");
98
+ });
99
+ it("uses tools", async () => {
100
+ const answer = await getState().functions.llm_generate.run(
101
+ "Generate a list of EU capitals in a structured format using the provided tool",
102
+ cities_tool,
103
+ );
104
+ expect(typeof answer).toBe("object");
105
+ const cities = answer.ai_sdk
106
+ ? answer.tool_calls[0].input?.cities
107
+ : JSON.parse(answer.tool_calls[0].function.arguments).cities;
108
+ expect(cities.length).toBe(27);
109
+ });
110
+ it("appends to chat history", async () => {
111
+ const chat = [];
112
+ const answer1 = await getState().functions.llm_generate.run(
113
+ "What is the Capital of France?",
114
+ {
115
+ chat,
116
+ appendToChat: true,
117
+ },
118
+ );
119
+ const answer2 = await getState().functions.llm_generate.run(
120
+ "What is the name of the river running through this city?",
121
+ {
122
+ chat,
123
+ appendToChat: true,
124
+ },
125
+ );
126
+ //console.log({ answer });
127
+
128
+ expect(typeof answer2).toBe("string");
129
+ expect(answer2).toContain("Seine");
130
+ expect(chat.length).toBe(4);
131
+ });
132
+ it("tool use sequence", async () => {
133
+ const chat = [];
134
+ const answer = await getState().functions.llm_generate.run(
135
+ "Generate a list of EU capitals in a structured format using the provided tool",
136
+ { chat, appendToChat: true, ...cities_tool, streamCallback() {} },
137
+ );
138
+ expect(typeof answer).toBe("object");
139
+
140
+ const tc = answer.getToolCalls()[0];
141
+
142
+ const cities = tc.input.cities;
143
+ expect(cities.length).toBe(27);
144
+
145
+ await getState().functions.llm_add_tool_response.run("List received", {
146
+ chat,
147
+ tool_call: tc,
148
+ });
149
+
150
+ const answer1 = await getState().functions.llm_generate.run(
151
+ "Make the same list in a structured format using the provided tool but for the original 12 member countries of the EU",
152
+ { chat, appendToChat: true, ...cities_tool },
153
+ );
154
+
155
+ const cities1 = answer1.getToolCalls()[0].input?.cities;
156
+
157
+ expect(cities1.length).toBe(12);
158
+ });
159
+ });
160
+ }
161
+
162
+ const cities_tool = {
163
+ tools: [
164
+ {
165
+ type: "function",
166
+ function: {
167
+ name: "cities",
168
+ description: "Provide a list of cities by country and city name",
169
+ parameters: {
170
+ type: "object",
171
+ properties: {
172
+ cities: {
173
+ type: "array",
174
+ items: {
175
+ type: "object",
176
+ properties: {
177
+ country_name: {
178
+ type: "string",
179
+ description: "Country name",
180
+ },
181
+ city_name: {
182
+ type: "string",
183
+ description: "City name",
184
+ },
185
+ },
186
+ required: ["country_name", "city_name"],
187
+ },
188
+ },
189
+ },
190
+ },
191
+ },
192
+ },
193
+ ],
194
+ tool_choice: {
195
+ type: "function",
196
+ function: {
197
+ name: "cities",
198
+ },
199
+ },
200
+ };