@saltcorn/large-language-model 1.0.9 → 1.0.11

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (3) hide show
  1. package/generate.js +64 -7
  2. package/index.js +3 -0
  3. package/package.json +1 -1
package/generate.js CHANGED
@@ -198,6 +198,31 @@ const getAudioTranscription = async (
198
198
 
199
199
  const last = (xs) => xs[xs.length - 1];
200
200
 
201
+ const genericResponse = async (
202
+ { backend, apiKey, api_key, provider, ai_sdk_provider, responses_api },
203
+ role,
204
+ prompt,
205
+ opts,
206
+ ) => {
207
+ let chat = opts.chat;
208
+ let result = opts.prompt;
209
+ //console.log("chat", JSON.stringify(chat, null, 2));
210
+ switch (opts.backend || backend) {
211
+ case "AI SDK":
212
+ if (role === "assistant" && typeof prompt === "string")
213
+ opts.chat.push({
214
+ role,
215
+ content: [{
216
+ type: "text",
217
+ text: prompt,
218
+ }],
219
+ });
220
+ break;
221
+ default:
222
+ opts.chat.push({ role, content: prompt });
223
+ }
224
+ };
225
+
201
226
  const toolResponse = async (
202
227
  { backend, apiKey, api_key, provider, ai_sdk_provider, responses_api },
203
228
  opts,
@@ -553,7 +578,12 @@ const getCompletionAISDK = async (
553
578
  const debugRequest = { ...body, model: use_model_name };
554
579
  if (debugResult)
555
580
  console.log("AI SDK request", JSON.stringify(debugRequest, null, 2));
556
- getState().log(6, `AI SDK request ${JSON.stringify(debugRequest)} `);
581
+ getState().log(
582
+ 6,
583
+ `AI SDK request`,
584
+ { tools: Object.keys(debugRequest.tools || {}), model: debugRequest.model },
585
+ JSON.stringify(debugRequest.messages, null, 2),
586
+ );
557
587
  if (debugCollector) debugCollector.request = debugRequest;
558
588
  const reqTimeStart = Date.now();
559
589
 
@@ -575,15 +605,18 @@ const getCompletionAISDK = async (
575
605
  chat.push(...results.response.messages);
576
606
  }
577
607
 
578
- if (debugResult)
579
- console.log("AI SDK response", JSON.stringify(results, null, 2));
580
- else getState().log(6, `AI SDK response ${JSON.stringify(results)}`);
581
608
  if (debugCollector) {
582
609
  debugCollector.response = results;
583
610
  debugCollector.response_time_ms = Date.now() - reqTimeStart;
584
611
  }
585
612
  const allToolCalls = (await results.steps).flatMap((step) => step.toolCalls);
586
-
613
+ if (debugResult)
614
+ console.log("AI SDK response", JSON.stringify(results, null, 2));
615
+ else
616
+ getState().log(6, `AI SDK response`, {
617
+ text: results.text,
618
+ tool_calls: allToolCalls,
619
+ });
587
620
  if (allToolCalls.length) {
588
621
  return {
589
622
  tool_calls: allToolCalls,
@@ -838,10 +871,29 @@ const getCompletionOpenAICompatible = async (
838
871
  if (streamToolCalls) debugCollector.response = streamToolCalls;
839
872
  debugCollector.response_time_ms = Date.now() - reqTimeStart;
840
873
  }
874
+ if (appendToChat && chat && streamToolCalls) {
875
+ chat.push({
876
+ role: "assistant",
877
+ content: streamParts.join("") || null,
878
+ tool_calls: streamToolCalls.tool_calls,
879
+ });
880
+ } else if (appendToChat && chat && streamParts.length > 0) {
881
+ chat.push({ role: "assistant", content: streamParts.join("") });
882
+ }
841
883
  return streamToolCalls
842
884
  ? {
843
885
  content: streamParts.join(""),
844
886
  tool_calls: streamToolCalls.tool_calls,
887
+ hasToolCalls: streamToolCalls.tool_calls?.length,
888
+ getToolCalls() {
889
+ return streamToolCalls.tool_calls.map((tc) => ({
890
+ tool_name: tc.function.name,
891
+ input: tc.function.arguments
892
+ ? JSON.parse(tc.function.arguments)
893
+ : {},
894
+ tool_call_id: tc.id,
895
+ }));
896
+ },
845
897
  }
846
898
  : streamParts.join("");
847
899
  }
@@ -895,7 +947,9 @@ const getCompletionOpenAICompatible = async (
895
947
  getToolCalls() {
896
948
  return tool_calls.map((tc) => ({
897
949
  tool_name: tc.function.name,
898
- input: JSON.parse(tc.function.arguments),
950
+ input: tc.function.arguments
951
+ ? JSON.parse(tc.function.arguments)
952
+ : {},
899
953
  tool_call_id: tc.call_id,
900
954
  }));
901
955
  },
@@ -910,7 +964,9 @@ const getCompletionOpenAICompatible = async (
910
964
  getToolCalls() {
911
965
  return results?.choices?.[0]?.message?.tool_calls.map((tc) => ({
912
966
  tool_name: tc.function.name,
913
- input: JSON.parse(tc.function.arguments),
967
+ input: tc.function.arguments
968
+ ? JSON.parse(tc.function.arguments)
969
+ : {},
914
970
  tool_call_id: tc.id,
915
971
  }));
916
972
  },
@@ -1293,5 +1349,6 @@ module.exports = {
1293
1349
  getImageGeneration,
1294
1350
  getAudioTranscription,
1295
1351
  toolResponse,
1352
+ genericResponse,
1296
1353
  addImageMesssage,
1297
1354
  };
package/index.js CHANGED
@@ -13,6 +13,7 @@ const {
13
13
  getAudioTranscription,
14
14
  toolResponse,
15
15
  addImageMesssage,
16
+ genericResponse,
16
17
  } = require("./generate");
17
18
  const { OPENAI_MODELS } = require("./constants.js");
18
19
  const { eval_expression } = require("@saltcorn/data/models/expression");
@@ -525,6 +526,7 @@ const functions = (config) => {
525
526
  case "image":
526
527
  return await addImageMesssage(config, { prompt, ...opts });
527
528
  default:
529
+ return genericResponse(config, what, prompt, opts);
528
530
  break;
529
531
  }
530
532
  },
@@ -658,6 +660,7 @@ module.exports = {
658
660
  functions,
659
661
  modelpatterns: require("./model.js"),
660
662
  routes,
663
+ ready_for_mobile: false,
661
664
  actions: (config) => ({
662
665
  llm_function_call: require("./function-insert-action.js")(config),
663
666
  llm_generate: {
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@saltcorn/large-language-model",
3
- "version": "1.0.9",
3
+ "version": "1.0.11",
4
4
  "description": "Large language models and functionality for Saltcorn",
5
5
  "main": "index.js",
6
6
  "dependencies": {