@saltcorn/large-language-model 1.0.5 → 1.0.6

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (3) hide show
  1. package/generate.js +33 -21
  2. package/index.js +96 -1
  3. package/package.json +1 -1
package/generate.js CHANGED
@@ -424,13 +424,24 @@ const getCompletion = async (config, opts) => {
424
424
  }
425
425
  };
426
426
 
427
- const getAiSdkModel = (
428
- { provider, api_key, model_name, anthropic_api_key },
429
- isEmbedding,
430
- ) => {
431
- switch (provider) {
427
+ const getAiSdkModel = ({ config, alt_config, userCfg }, isEmbedding) => {
428
+ const use_config = alt_config
429
+ ? config.alt_aisdk_configs?.find?.((acfg) => acfg.name === alt_config) ||
430
+ config
431
+ : config;
432
+ const use_provider = use_config.provider;
433
+ const model_name = isEmbedding
434
+ ? userCfg.embed_model ||
435
+ userCfg.model ||
436
+ config.embed_model ||
437
+ "text-embedding-3-small"
438
+ : userCfg.model || use_config.model;
439
+
440
+ switch (use_provider) {
432
441
  case "OpenAI":
433
- const openai = createOpenAI({ apiKey: api_key });
442
+ const use_api_key =
443
+ userCfg.api_key || userCfg.apiKey || use_config.api_key;
444
+ const openai = createOpenAI({ apiKey: use_api_key });
434
445
  return isEmbedding
435
446
  ? openai.textEmbeddingModel(model_name)
436
447
  : openai(model_name);
@@ -439,7 +450,8 @@ const getAiSdkModel = (
439
450
  if (isEmbedding)
440
451
  throw new Error("Anthropic does not provide embedding models");
441
452
  const anthropic = createAnthropic({
442
- apiKey: anthropic_api_key,
453
+ apiKey:
454
+ userCfg.api_key || userCfg.apiKey || use_config.anthropic_api_key,
443
455
  });
444
456
  return anthropic(model_name);
445
457
  default:
@@ -448,7 +460,7 @@ const getAiSdkModel = (
448
460
  };
449
461
 
450
462
  const getCompletionAISDK = async (
451
- { apiKey, model, provider, temperature, anthropic_api_key },
463
+ config,
452
464
  {
453
465
  systemPrompt,
454
466
  prompt,
@@ -461,12 +473,12 @@ const getCompletionAISDK = async (
461
473
  ...rest
462
474
  },
463
475
  ) => {
476
+ const { apiKey, model, provider, temperature } = config;
464
477
  const use_model_name = rest.model || model;
465
478
  let model_obj = getAiSdkModel({
466
- model_name: use_model_name,
467
- api_key: api_key || apiKey,
468
- provider,
469
- anthropic_api_key,
479
+ config,
480
+ alt_config: rest.alt_config,
481
+ userCfg: rest,
470
482
  });
471
483
  const modifyChat = (chat) => {
472
484
  const f = (c) => {
@@ -621,7 +633,7 @@ const getCompletionOpenAICompatible = async (
621
633
  if (responses_api) {
622
634
  delete body.tool_choice;
623
635
  if (body.tools) {
624
- const newtools = JSON.parse(JSON.stringify(body.tools))
636
+ const newtools = JSON.parse(JSON.stringify(body.tools));
625
637
  for (const tool of newtools) {
626
638
  if (tool.type !== "function" || !tool.function) continue;
627
639
  tool.name = tool.function.name;
@@ -630,7 +642,7 @@ const getCompletionOpenAICompatible = async (
630
642
  if (tool.function.required) tool.required = tool.function.required;
631
643
  delete tool.function;
632
644
  }
633
- body.tools = newtools
645
+ body.tools = newtools;
634
646
  }
635
647
  if (body.response_format?.type === "json_schema" && !body.text) {
636
648
  body.text = {
@@ -974,16 +986,16 @@ const getEmbeddingOpenAICompatible = async (
974
986
  return results?.data?.[0]?.embedding;
975
987
  };
976
988
 
977
- const getEmbeddingAISDK = async (config, { prompt, model, debugResult }) => {
978
- const { provider, apiKey, embed_model } = config;
989
+ const getEmbeddingAISDK = async (
990
+ config,
991
+ { prompt, model, debugResult, alt_config },
992
+ ) => {
979
993
  let providerOptions = {};
980
- const model_name = model || embed_model || "text-embedding-3-small";
981
994
  let model_obj = getAiSdkModel(
982
995
  {
983
- ...config,
984
- model_name,
985
- api_key: apiKey,
986
- provider,
996
+ config,
997
+ userCfg: { model },
998
+ alt_config,
987
999
  },
988
1000
  true,
989
1001
  );
package/index.js CHANGED
@@ -65,12 +65,12 @@ ${domReady(`
65
65
  required: true,
66
66
  attributes: {
67
67
  options: [
68
+ "AI SDK",
68
69
  "OpenAI",
69
70
  "OpenAI-compatible API",
70
71
  "Local Ollama",
71
72
  ...(isRoot ? ["Local llama.cpp"] : []),
72
73
  "Google Vertex AI",
73
- "AI SDK",
74
74
  ],
75
75
  onChange: "backendChange(this)",
76
76
  },
@@ -357,6 +357,11 @@ ${domReady(`
357
357
  label: "Alternative configurations",
358
358
  showIf: { backend: "OpenAI-compatible API" },
359
359
  },
360
+ {
361
+ input_type: "section_header",
362
+ label: "Alternative configurations",
363
+ showIf: { backend: ["OpenAI-compatible API", "AI SDK"] },
364
+ },
360
365
  new FieldRepeat({
361
366
  name: "altconfigs",
362
367
  label: "Alternative configurations",
@@ -385,6 +390,77 @@ ${domReady(`
385
390
  },
386
391
  ],
387
392
  }),
393
+ new FieldRepeat({
394
+ name: "alt_aisdk_configs",
395
+ label: "Alternative configurations",
396
+ showIf: { backend: "AI SDK" },
397
+ fields: [
398
+ { name: "name", label: "Configuration name", type: "String" },
399
+ {
400
+ name: "alt_provider",
401
+ label: "Provider", //gpt-3.5-turbo
402
+ type: "String",
403
+ required: true,
404
+ attributes: {
405
+ options: ["OpenAI", "Anthropic"],
406
+ },
407
+ },
408
+ {
409
+ name: "api_key",
410
+ label: "API key",
411
+ type: "String",
412
+ required: true,
413
+ fieldview: "password",
414
+ showIf: { alt_provider: "OpenAI" },
415
+ },
416
+ {
417
+ name: "anthropic_api_key",
418
+ label: "API key",
419
+ type: "String",
420
+ required: true,
421
+ fieldview: "password",
422
+ showIf: { alt_provider: "Anthropic" },
423
+ },
424
+ {
425
+ name: "model",
426
+ label: "Model", //gpt-3.5-turbo
427
+ type: "String",
428
+ required: true,
429
+ attributes: {
430
+ calcOptions: [
431
+ "alt_provider",
432
+ {
433
+ OpenAI: OPENAI_MODELS,
434
+ Anthropic: [
435
+ "claude-opus-4-6",
436
+ "claude-sonnet-4-6",
437
+ "claude-haiku-4-5",
438
+ ],
439
+ },
440
+ ],
441
+ },
442
+ },
443
+ {
444
+ name: "embed_model",
445
+ label: "Embedding model", //gpt-3.5-turbo
446
+ type: "String",
447
+ required: true,
448
+ showIf: { alt_provider: ["OpenAI"] },
449
+ attributes: {
450
+ calcOptions: [
451
+ "alt_provider",
452
+ {
453
+ OpenAI: [
454
+ "text-embedding-3-small",
455
+ "text-embedding-3-large",
456
+ "text-embedding-ada-002",
457
+ ],
458
+ },
459
+ ],
460
+ },
461
+ },
462
+ ],
463
+ }),
388
464
  ],
389
465
  });
390
466
  },
@@ -465,6 +541,25 @@ const functions = (config) => {
465
541
  { name: "options", type: "JSON", tstype: "any" },
466
542
  ],
467
543
  },
544
+ llm_get_configuration: {
545
+ run() {
546
+ return {
547
+ ...config,
548
+ alt_config_names:
549
+ config?.backend === "AI SDK"
550
+ ? config?.alt_aisdk_configs
551
+ ?.map?.((cfg) => cfg.name)
552
+ .filter(Boolean)
553
+ : config?.backend === "OpenAI-compatible API"
554
+ ? config?.altconfigs?.map?.((cfg) => cfg.name).filter(Boolean)
555
+ : [],
556
+ };
557
+ },
558
+ isAsync: false,
559
+ description: "Get the LLM configuration details",
560
+ tsreturns: "{alt_config_names: string[], backend: string}",
561
+ arguments: [],
562
+ },
468
563
  };
469
564
  };
470
565
 
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@saltcorn/large-language-model",
3
- "version": "1.0.5",
3
+ "version": "1.0.6",
4
4
  "description": "Large language models and functionality for Saltcorn",
5
5
  "main": "index.js",
6
6
  "dependencies": {