@saltcorn/large-language-model 1.0.1 → 1.0.3
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/constants.js +17 -2
- package/generate.js +36 -44
- package/index.js +21 -3
- package/package.json +2 -1
- package/tests/configs.js +9 -0
- package/tests/llm.test.js +9 -0
package/constants.js
CHANGED
|
@@ -7,9 +7,10 @@ const OPENAI_MODELS = [
|
|
|
7
7
|
"gpt-5",
|
|
8
8
|
"gpt-5-mini",
|
|
9
9
|
"gpt-5-nano",
|
|
10
|
-
"gpt-5.1",
|
|
10
|
+
"gpt-5.1",
|
|
11
11
|
"gpt-5.2",
|
|
12
12
|
"gpt-5.2-pro",
|
|
13
|
+
"gpt-5.4",
|
|
13
14
|
"o3",
|
|
14
15
|
"o3-mini",
|
|
15
16
|
"o3-pro",
|
|
@@ -21,6 +22,20 @@ const OPENAI_MODELS = [
|
|
|
21
22
|
"gpt-5.1-codex-max",
|
|
22
23
|
];
|
|
23
24
|
|
|
25
|
+
const NO_TEMP_MODELS = [
|
|
26
|
+
"o1",
|
|
27
|
+
"o3",
|
|
28
|
+
"o3-mini",
|
|
29
|
+
"o4-mini",
|
|
30
|
+
"gpt-5",
|
|
31
|
+
"gpt-5-nano",
|
|
32
|
+
"gpt-5-mini",
|
|
33
|
+
"gpt-5.1",
|
|
34
|
+
"gpt-5.1-codex",
|
|
35
|
+
"gpt-5.2",
|
|
36
|
+
"gpt-5.4",
|
|
37
|
+
];
|
|
38
|
+
|
|
24
39
|
// https://github.com/ollama/ollama/blob/main/docs/faq.md#where-are-models-stored
|
|
25
40
|
const OLLAMA_MODELS_PATH = {
|
|
26
41
|
Darwin: `${process.env.HOME}/.ollama/models`,
|
|
@@ -28,4 +43,4 @@ const OLLAMA_MODELS_PATH = {
|
|
|
28
43
|
Windows_NT: "C:\\Users\\%username%\\.ollama\\models.",
|
|
29
44
|
};
|
|
30
45
|
|
|
31
|
-
module.exports = { OPENAI_MODELS, OLLAMA_MODELS_PATH };
|
|
46
|
+
module.exports = { OPENAI_MODELS, OLLAMA_MODELS_PATH, NO_TEMP_MODELS };
|
package/generate.js
CHANGED
|
@@ -23,8 +23,10 @@ const {
|
|
|
23
23
|
experimental_transcribe,
|
|
24
24
|
} = require("ai");
|
|
25
25
|
const { createOpenAI } = require("@ai-sdk/openai");
|
|
26
|
+
const { createAnthropic } = require("@ai-sdk/anthropic");
|
|
26
27
|
const OpenAI = require("openai");
|
|
27
28
|
const { ElevenLabsClient } = require("@elevenlabs/elevenlabs-js");
|
|
29
|
+
const { NO_TEMP_MODELS } = require("./constants");
|
|
28
30
|
|
|
29
31
|
let ollamaMod;
|
|
30
32
|
if (features.esm_plugins) ollamaMod = require("ollama");
|
|
@@ -348,6 +350,7 @@ const getCompletion = async (config, opts) => {
|
|
|
348
350
|
case "AI SDK":
|
|
349
351
|
return await getCompletionAISDK(
|
|
350
352
|
{
|
|
353
|
+
...config,
|
|
351
354
|
provider: config.ai_sdk_provider,
|
|
352
355
|
apiKey: config.api_key,
|
|
353
356
|
model: opts?.model || config.model,
|
|
@@ -420,18 +423,31 @@ const getCompletion = async (config, opts) => {
|
|
|
420
423
|
}
|
|
421
424
|
};
|
|
422
425
|
|
|
423
|
-
const getAiSdkModel = (
|
|
426
|
+
const getAiSdkModel = (
|
|
427
|
+
{ provider, api_key, model_name, anthropic_api_key },
|
|
428
|
+
isEmbedding,
|
|
429
|
+
) => {
|
|
424
430
|
switch (provider) {
|
|
425
431
|
case "OpenAI":
|
|
426
432
|
const openai = createOpenAI({ apiKey: api_key });
|
|
427
|
-
return
|
|
433
|
+
return isEmbedding
|
|
434
|
+
? openai.textEmbeddingModel(model_name)
|
|
435
|
+
: openai(model_name);
|
|
436
|
+
|
|
437
|
+
case "Anthropic":
|
|
438
|
+
if (isEmbedding)
|
|
439
|
+
throw new Error("Anthropic does not provide embedding models");
|
|
440
|
+
const anthropic = createAnthropic({
|
|
441
|
+
apiKey: anthropic_api_key,
|
|
442
|
+
});
|
|
443
|
+
return anthropic(model_name);
|
|
428
444
|
default:
|
|
429
445
|
throw new Error("Provider not found: " + provider);
|
|
430
446
|
}
|
|
431
447
|
};
|
|
432
448
|
|
|
433
449
|
const getCompletionAISDK = async (
|
|
434
|
-
{ apiKey, model, provider, temperature },
|
|
450
|
+
{ apiKey, model, provider, temperature, anthropic_api_key },
|
|
435
451
|
{
|
|
436
452
|
systemPrompt,
|
|
437
453
|
prompt,
|
|
@@ -449,6 +465,7 @@ const getCompletionAISDK = async (
|
|
|
449
465
|
model_name: use_model_name,
|
|
450
466
|
api_key: api_key || apiKey,
|
|
451
467
|
provider,
|
|
468
|
+
anthropic_api_key,
|
|
452
469
|
});
|
|
453
470
|
const modifyChat = (chat) => {
|
|
454
471
|
const f = (c) => {
|
|
@@ -481,27 +498,15 @@ const getCompletionAISDK = async (
|
|
|
481
498
|
if (appendToChat && chat && prompt) {
|
|
482
499
|
chat.push({ role: "user", content: prompt });
|
|
483
500
|
}
|
|
484
|
-
if (
|
|
501
|
+
if (NO_TEMP_MODELS.includes(use_model_name)) {
|
|
502
|
+
delete body.temperature;
|
|
503
|
+
} else if (rest.temperature || temperature) {
|
|
485
504
|
const str_or_num = rest.temperature || temperature;
|
|
486
505
|
body.temperature = +str_or_num;
|
|
487
506
|
} else if (rest.temperature === null) {
|
|
488
507
|
delete body.temperature;
|
|
489
508
|
} else if (typeof temperature === "undefined") {
|
|
490
|
-
if (
|
|
491
|
-
![
|
|
492
|
-
"o1",
|
|
493
|
-
"o3",
|
|
494
|
-
"o3-mini",
|
|
495
|
-
"o4-mini",
|
|
496
|
-
"gpt-5",
|
|
497
|
-
"gpt-5-nano",
|
|
498
|
-
"gpt-5-mini",
|
|
499
|
-
"gpt-5.1",
|
|
500
|
-
"gpt-5.1-codex",
|
|
501
|
-
"gpt-5.2",
|
|
502
|
-
].includes(use_model_name)
|
|
503
|
-
)
|
|
504
|
-
body.temperature = 0.7;
|
|
509
|
+
if (!NO_TEMP_MODELS.includes(use_model_name)) body.temperature = 0.7;
|
|
505
510
|
}
|
|
506
511
|
if (body.tools) {
|
|
507
512
|
const prevTools = [...body.tools];
|
|
@@ -598,18 +603,7 @@ const getCompletionOpenAICompatible = async (
|
|
|
598
603
|
} else if (rest.temperature === null) {
|
|
599
604
|
delete body.temperature;
|
|
600
605
|
} else if (typeof temperature === "undefined") {
|
|
601
|
-
if (
|
|
602
|
-
![
|
|
603
|
-
"o1",
|
|
604
|
-
"o3",
|
|
605
|
-
"o3-mini",
|
|
606
|
-
"o4-mini",
|
|
607
|
-
"gpt-5",
|
|
608
|
-
"gpt-5-nano",
|
|
609
|
-
"gpt-5-mini",
|
|
610
|
-
].includes(use_model)
|
|
611
|
-
)
|
|
612
|
-
body.temperature = 0.7;
|
|
606
|
+
if (!NO_TEMP_MODELS.includes(use_model)) body.temperature = 0.7;
|
|
613
607
|
}
|
|
614
608
|
if (rest.streamCallback && global.fetch) {
|
|
615
609
|
body.stream = true;
|
|
@@ -958,19 +952,17 @@ const getEmbeddingOpenAICompatible = async (
|
|
|
958
952
|
|
|
959
953
|
const getEmbeddingAISDK = async (config, { prompt, model, debugResult }) => {
|
|
960
954
|
const { provider, apiKey, embed_model } = config;
|
|
961
|
-
let
|
|
962
|
-
|
|
963
|
-
|
|
964
|
-
|
|
965
|
-
|
|
966
|
-
|
|
967
|
-
|
|
968
|
-
|
|
969
|
-
|
|
970
|
-
|
|
971
|
-
|
|
972
|
-
break;
|
|
973
|
-
}
|
|
955
|
+
let providerOptions = {};
|
|
956
|
+
const model_name = model || embed_model || "text-embedding-3-small";
|
|
957
|
+
let model_obj = getAiSdkModel(
|
|
958
|
+
{
|
|
959
|
+
...config,
|
|
960
|
+
model_name,
|
|
961
|
+
api_key: apiKey,
|
|
962
|
+
provider,
|
|
963
|
+
},
|
|
964
|
+
true,
|
|
965
|
+
);
|
|
974
966
|
const body = {
|
|
975
967
|
model: model_obj,
|
|
976
968
|
providerOptions,
|
package/index.js
CHANGED
|
@@ -82,7 +82,7 @@ ${domReady(`
|
|
|
82
82
|
required: true,
|
|
83
83
|
showIf: { backend: "AI SDK" },
|
|
84
84
|
attributes: {
|
|
85
|
-
options: ["OpenAI"],
|
|
85
|
+
options: ["OpenAI", "Anthropic"],
|
|
86
86
|
},
|
|
87
87
|
},
|
|
88
88
|
{
|
|
@@ -93,6 +93,14 @@ ${domReady(`
|
|
|
93
93
|
fieldview: "password",
|
|
94
94
|
showIf: { backend: "AI SDK", ai_sdk_provider: "OpenAI" },
|
|
95
95
|
},
|
|
96
|
+
{
|
|
97
|
+
name: "anthropic_api_key",
|
|
98
|
+
label: "API key",
|
|
99
|
+
type: "String",
|
|
100
|
+
required: true,
|
|
101
|
+
fieldview: "password",
|
|
102
|
+
showIf: { backend: "AI SDK", ai_sdk_provider: "Anthropic" },
|
|
103
|
+
},
|
|
96
104
|
{
|
|
97
105
|
name: "model",
|
|
98
106
|
label: "Model", //gpt-3.5-turbo
|
|
@@ -100,7 +108,17 @@ ${domReady(`
|
|
|
100
108
|
required: true,
|
|
101
109
|
showIf: { backend: "AI SDK" },
|
|
102
110
|
attributes: {
|
|
103
|
-
calcOptions: [
|
|
111
|
+
calcOptions: [
|
|
112
|
+
"ai_sdk_provider",
|
|
113
|
+
{
|
|
114
|
+
OpenAI: OPENAI_MODELS,
|
|
115
|
+
Anthropic: [
|
|
116
|
+
"claude-opus-4-6",
|
|
117
|
+
"claude-sonnet-4-6",
|
|
118
|
+
"claude-haiku-4-5",
|
|
119
|
+
],
|
|
120
|
+
},
|
|
121
|
+
],
|
|
104
122
|
},
|
|
105
123
|
},
|
|
106
124
|
{
|
|
@@ -108,7 +126,7 @@ ${domReady(`
|
|
|
108
126
|
label: "Embedding model", //gpt-3.5-turbo
|
|
109
127
|
type: "String",
|
|
110
128
|
required: true,
|
|
111
|
-
showIf: { backend: "AI SDK" },
|
|
129
|
+
showIf: { backend: "AI SDK", ai_sdk_provider: ["OpenAI"] },
|
|
112
130
|
attributes: {
|
|
113
131
|
calcOptions: [
|
|
114
132
|
"ai_sdk_provider",
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@saltcorn/large-language-model",
|
|
3
|
-
"version": "1.0.
|
|
3
|
+
"version": "1.0.3",
|
|
4
4
|
"description": "Large language models and functionality for Saltcorn",
|
|
5
5
|
"main": "index.js",
|
|
6
6
|
"dependencies": {
|
|
@@ -13,6 +13,7 @@
|
|
|
13
13
|
"googleapis": "^144.0.0",
|
|
14
14
|
"ai": "5.0.44",
|
|
15
15
|
"@ai-sdk/openai": "2.0.30",
|
|
16
|
+
"@ai-sdk/anthropic": "2.0.70",
|
|
16
17
|
"openai": "6.16.0",
|
|
17
18
|
"@elevenlabs/elevenlabs-js": "2.31.0"
|
|
18
19
|
},
|
package/tests/configs.js
CHANGED
|
@@ -31,4 +31,13 @@ module.exports = [
|
|
|
31
31
|
temperature: 0.7,
|
|
32
32
|
ai_sdk_provider: "OpenAI",
|
|
33
33
|
},
|
|
34
|
+
{
|
|
35
|
+
name: "AI SDK Anthropic",
|
|
36
|
+
model: "claude-sonnet-4-6",
|
|
37
|
+
api_key: process.env.ANTHROPIC_API_KEY,
|
|
38
|
+
backend: "AI SDK",
|
|
39
|
+
image_model: "gpt-image-1",
|
|
40
|
+
temperature: 0.7,
|
|
41
|
+
ai_sdk_provider: "Anthropic",
|
|
42
|
+
},
|
|
34
43
|
];
|
package/tests/llm.test.js
CHANGED
|
@@ -165,6 +165,15 @@ for (const nameconfig of require("./configs")) {
|
|
|
165
165
|
|
|
166
166
|
expect(cities1.length).toBe(12);
|
|
167
167
|
});
|
|
168
|
+
if (name !== "AI SDK Anthropic")
|
|
169
|
+
it("gets embedding", async () => {
|
|
170
|
+
const v = await getState().functions.llm_embedding.run(
|
|
171
|
+
"The quick brown fox jumps over the lazy dog",
|
|
172
|
+
);
|
|
173
|
+
expect(Array.isArray(v)).toBe(true);
|
|
174
|
+
expect(v.length).toBeGreaterThan(50);
|
|
175
|
+
expect(typeof v[0]).toBe("number");
|
|
176
|
+
});
|
|
168
177
|
});
|
|
169
178
|
}
|
|
170
179
|
|