@saltcorn/large-language-model 0.9.4 → 0.9.6
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/generate.js +155 -1
- package/index.js +65 -3
- package/package.json +4 -2
package/generate.js
CHANGED
|
@@ -11,11 +11,29 @@ const { google } = require("googleapis");
|
|
|
11
11
|
const Plugin = require("@saltcorn/data/models/plugin");
|
|
12
12
|
const path = require("path");
|
|
13
13
|
const { features, getState } = require("@saltcorn/data/db/state");
|
|
14
|
+
const {
|
|
15
|
+
generateText,
|
|
16
|
+
streamText,
|
|
17
|
+
tool,
|
|
18
|
+
jsonSchema,
|
|
19
|
+
embed,
|
|
20
|
+
embedMany,
|
|
21
|
+
} = require("ai");
|
|
22
|
+
const { createOpenAI } = require("@ai-sdk/openai");
|
|
14
23
|
let ollamaMod;
|
|
15
24
|
if (features.esm_plugins) ollamaMod = require("ollama");
|
|
16
25
|
|
|
17
26
|
const getEmbedding = async (config, opts) => {
|
|
18
27
|
switch (config.backend) {
|
|
28
|
+
case "AI SDK":
|
|
29
|
+
return await getEmbeddingAISDK(
|
|
30
|
+
{
|
|
31
|
+
provider: config.ai_sdk_provider,
|
|
32
|
+
apiKey: config.api_key,
|
|
33
|
+
embed_model: opts?.embed_model || config.embed_model || config.model,
|
|
34
|
+
},
|
|
35
|
+
opts
|
|
36
|
+
);
|
|
19
37
|
case "OpenAI":
|
|
20
38
|
return await getEmbeddingOpenAICompatible(
|
|
21
39
|
{
|
|
@@ -97,6 +115,15 @@ const getImageGeneration = async (config, opts) => {
|
|
|
97
115
|
|
|
98
116
|
const getCompletion = async (config, opts) => {
|
|
99
117
|
switch (config.backend) {
|
|
118
|
+
case "AI SDK":
|
|
119
|
+
return await getCompletionAISDK(
|
|
120
|
+
{
|
|
121
|
+
provider: config.ai_sdk_provider,
|
|
122
|
+
apiKey: config.api_key,
|
|
123
|
+
model: opts?.model || config.model,
|
|
124
|
+
},
|
|
125
|
+
opts
|
|
126
|
+
);
|
|
100
127
|
case "OpenAI":
|
|
101
128
|
return await getCompletionOpenAICompatible(
|
|
102
129
|
{
|
|
@@ -163,6 +190,104 @@ const getCompletion = async (config, opts) => {
|
|
|
163
190
|
}
|
|
164
191
|
};
|
|
165
192
|
|
|
193
|
+
const getCompletionAISDK = async (
|
|
194
|
+
{ apiKey, model, provider, temperature },
|
|
195
|
+
{
|
|
196
|
+
systemPrompt,
|
|
197
|
+
prompt,
|
|
198
|
+
debugResult,
|
|
199
|
+
debugCollector,
|
|
200
|
+
chat = [],
|
|
201
|
+
api_key,
|
|
202
|
+
endpoint,
|
|
203
|
+
...rest
|
|
204
|
+
}
|
|
205
|
+
) => {
|
|
206
|
+
const use_model_name = rest.model || model;
|
|
207
|
+
let model_obj;
|
|
208
|
+
switch (provider) {
|
|
209
|
+
case "OpenAI":
|
|
210
|
+
const openai = createOpenAI({ apiKey: api_key || apiKey });
|
|
211
|
+
model_obj = openai(use_model_name);
|
|
212
|
+
break;
|
|
213
|
+
}
|
|
214
|
+
|
|
215
|
+
const body = {
|
|
216
|
+
...rest,
|
|
217
|
+
model: model_obj,
|
|
218
|
+
messages: [
|
|
219
|
+
{
|
|
220
|
+
role: "system",
|
|
221
|
+
content: systemPrompt || "You are a helpful assistant.",
|
|
222
|
+
},
|
|
223
|
+
...chat,
|
|
224
|
+
...(prompt ? [{ role: "user", content: prompt }] : []),
|
|
225
|
+
],
|
|
226
|
+
};
|
|
227
|
+
if (rest.temperature || temperature) {
|
|
228
|
+
const str_or_num = rest.temperature || temperature;
|
|
229
|
+
body.temperature = +str_or_num;
|
|
230
|
+
} else if (rest.temperature === null) {
|
|
231
|
+
delete body.temperature;
|
|
232
|
+
} else if (typeof temperature === "undefined") {
|
|
233
|
+
if (
|
|
234
|
+
![
|
|
235
|
+
"o1",
|
|
236
|
+
"o3",
|
|
237
|
+
"o3-mini",
|
|
238
|
+
"o4-mini",
|
|
239
|
+
"gpt-5",
|
|
240
|
+
"gpt-5-nano",
|
|
241
|
+
"gpt-5-mini",
|
|
242
|
+
].includes(use_model_name)
|
|
243
|
+
)
|
|
244
|
+
body.temperature = 0.7;
|
|
245
|
+
}
|
|
246
|
+
if (body.tools) {
|
|
247
|
+
const prevTools = [...body.tools];
|
|
248
|
+
body.tools = {};
|
|
249
|
+
prevTools.forEach((t) => {
|
|
250
|
+
body.tools[t.function.name] = tool({
|
|
251
|
+
description: t.function.description,
|
|
252
|
+
inputSchema: jsonSchema(t.function.parameters),
|
|
253
|
+
});
|
|
254
|
+
});
|
|
255
|
+
}
|
|
256
|
+
|
|
257
|
+
const debugRequest = { ...body, model: use_model_name };
|
|
258
|
+
if (debugResult)
|
|
259
|
+
console.log("AI SDK request", JSON.stringify(debugRequest, null, 2));
|
|
260
|
+
getState().log(6, `AI SDK request ${JSON.stringify(debugRequest)} `);
|
|
261
|
+
if (debugCollector) debugCollector.request = debugRequest;
|
|
262
|
+
const reqTimeStart = Date.now();
|
|
263
|
+
|
|
264
|
+
let results;
|
|
265
|
+
if (rest.streamCallback) {
|
|
266
|
+
delete body.streamCallback;
|
|
267
|
+
results = await streamText(body);
|
|
268
|
+
for await (const textPart of results.textStream) {
|
|
269
|
+
rest.streamCallback(textPart);
|
|
270
|
+
}
|
|
271
|
+
} else results = await generateText(body);
|
|
272
|
+
if (debugResult)
|
|
273
|
+
console.log("AI SDK response", JSON.stringify(results, null, 2));
|
|
274
|
+
else getState().log(6, `AI SDK response ${JSON.stringify(results)}`);
|
|
275
|
+
if (debugCollector) {
|
|
276
|
+
debugCollector.response = results;
|
|
277
|
+
debugCollector.response_time_ms = Date.now() - reqTimeStart;
|
|
278
|
+
}
|
|
279
|
+
const allToolCalls = (await results.steps).flatMap((step) => step.toolCalls);
|
|
280
|
+
|
|
281
|
+
if (allToolCalls.length) {
|
|
282
|
+
return {
|
|
283
|
+
tool_calls: allToolCalls,
|
|
284
|
+
content: await results.text,
|
|
285
|
+
messages: (await results.response).messages,
|
|
286
|
+
ai_sdk: true,
|
|
287
|
+
};
|
|
288
|
+
} else return results.text;
|
|
289
|
+
};
|
|
290
|
+
|
|
166
291
|
const getCompletionOpenAICompatible = async (
|
|
167
292
|
{ chatCompleteEndpoint, bearer, apiKey, model, responses_api, temperature },
|
|
168
293
|
{
|
|
@@ -507,7 +632,6 @@ const getEmbeddingOpenAICompatible = async (
|
|
|
507
632
|
if (bearer) headers.Authorization = "Bearer " + bearer;
|
|
508
633
|
if (apiKey) headers["api-key"] = apiKey;
|
|
509
634
|
const body = {
|
|
510
|
-
//prompt: "How are you?",
|
|
511
635
|
model: model || embed_model || "text-embedding-3-small",
|
|
512
636
|
input: prompt,
|
|
513
637
|
};
|
|
@@ -525,6 +649,36 @@ const getEmbeddingOpenAICompatible = async (
|
|
|
525
649
|
return results?.data?.[0]?.embedding;
|
|
526
650
|
};
|
|
527
651
|
|
|
652
|
+
const getEmbeddingAISDK = async (config, { prompt, model, debugResult }) => {
|
|
653
|
+
const { provider, apiKey, embed_model } = config;
|
|
654
|
+
let model_obj,
|
|
655
|
+
providerOptions = {};
|
|
656
|
+
const model_name = model || embed_model;
|
|
657
|
+
|
|
658
|
+
switch (provider) {
|
|
659
|
+
case "OpenAI":
|
|
660
|
+
const openai = createOpenAI({ apiKey: apiKey });
|
|
661
|
+
model_obj = openai.textEmbeddingModel(
|
|
662
|
+
model_name || "text-embedding-3-small"
|
|
663
|
+
);
|
|
664
|
+
//providerOptions.openai = {};
|
|
665
|
+
break;
|
|
666
|
+
}
|
|
667
|
+
const body = {
|
|
668
|
+
model: model_obj,
|
|
669
|
+
providerOptions,
|
|
670
|
+
};
|
|
671
|
+
if (Array.isArray(prompt)) {
|
|
672
|
+
body.values = prompt;
|
|
673
|
+
const { embeddings } = await embedMany(body);
|
|
674
|
+
return embeddings;
|
|
675
|
+
} else {
|
|
676
|
+
body.value = prompt;
|
|
677
|
+
const { embedding } = await embed(body);
|
|
678
|
+
return embedding;
|
|
679
|
+
}
|
|
680
|
+
};
|
|
681
|
+
|
|
528
682
|
const updatePluginTokenCfg = async (credentials) => {
|
|
529
683
|
let plugin = await Plugin.findOne({ name: "large-language-model" });
|
|
530
684
|
if (!plugin) {
|
package/index.js
CHANGED
|
@@ -67,10 +67,58 @@ ${domReady(`
|
|
|
67
67
|
"Local Ollama",
|
|
68
68
|
...(isRoot ? ["Local llama.cpp"] : []),
|
|
69
69
|
"Google Vertex AI",
|
|
70
|
+
"AI SDK",
|
|
70
71
|
],
|
|
71
72
|
onChange: "backendChange(this)",
|
|
72
73
|
},
|
|
73
74
|
},
|
|
75
|
+
{
|
|
76
|
+
name: "ai_sdk_provider",
|
|
77
|
+
label: "Provider", //gpt-3.5-turbo
|
|
78
|
+
type: "String",
|
|
79
|
+
required: true,
|
|
80
|
+
showIf: { backend: "AI SDK" },
|
|
81
|
+
attributes: {
|
|
82
|
+
options: ["OpenAI"],
|
|
83
|
+
},
|
|
84
|
+
},
|
|
85
|
+
{
|
|
86
|
+
name: "api_key",
|
|
87
|
+
label: "API key",
|
|
88
|
+
type: "String",
|
|
89
|
+
required: true,
|
|
90
|
+
fieldview: "password",
|
|
91
|
+
showIf: { backend: "AI SDK", ai_sdk_provider: "OpenAI" },
|
|
92
|
+
},
|
|
93
|
+
{
|
|
94
|
+
name: "model",
|
|
95
|
+
label: "Model", //gpt-3.5-turbo
|
|
96
|
+
type: "String",
|
|
97
|
+
required: true,
|
|
98
|
+
showIf: { backend: "AI SDK" },
|
|
99
|
+
attributes: {
|
|
100
|
+
calcOptions: ["ai_sdk_provider", { OpenAI: OPENAI_MODELS }],
|
|
101
|
+
},
|
|
102
|
+
},
|
|
103
|
+
{
|
|
104
|
+
name: "embed_model",
|
|
105
|
+
label: "Embedding model", //gpt-3.5-turbo
|
|
106
|
+
type: "String",
|
|
107
|
+
required: true,
|
|
108
|
+
showIf: { backend: "AI SDK" },
|
|
109
|
+
attributes: {
|
|
110
|
+
calcOptions: [
|
|
111
|
+
"ai_sdk_provider",
|
|
112
|
+
{
|
|
113
|
+
OpenAI: [
|
|
114
|
+
"text-embedding-3-small",
|
|
115
|
+
"text-embedding-3-large",
|
|
116
|
+
"text-embedding-ada-002",
|
|
117
|
+
],
|
|
118
|
+
},
|
|
119
|
+
],
|
|
120
|
+
},
|
|
121
|
+
},
|
|
74
122
|
{
|
|
75
123
|
name: "ollama_host",
|
|
76
124
|
label: "Host",
|
|
@@ -761,6 +809,7 @@ module.exports = {
|
|
|
761
809
|
{
|
|
762
810
|
name: "name",
|
|
763
811
|
label: "Name",
|
|
812
|
+
class: "validate-identifier",
|
|
764
813
|
sublabel: "The field name, as a valid JavaScript identifier",
|
|
765
814
|
type: "String",
|
|
766
815
|
required: true,
|
|
@@ -780,6 +829,13 @@ module.exports = {
|
|
|
780
829
|
options: ["string", "integer", "number", "boolean"],
|
|
781
830
|
},
|
|
782
831
|
},
|
|
832
|
+
{
|
|
833
|
+
name: "options",
|
|
834
|
+
label: "Options",
|
|
835
|
+
type: "String",
|
|
836
|
+
sublabel: "Optional. Comma-separated list of values",
|
|
837
|
+
showIf: { type: "string" },
|
|
838
|
+
},
|
|
783
839
|
],
|
|
784
840
|
});
|
|
785
841
|
|
|
@@ -895,6 +951,10 @@ module.exports = {
|
|
|
895
951
|
type: field.type,
|
|
896
952
|
description: field.description,
|
|
897
953
|
};
|
|
954
|
+
if (field.type === "string" && field.options)
|
|
955
|
+
fieldArgs[field.name].enum = field.options
|
|
956
|
+
.split(",")
|
|
957
|
+
.map((s) => s.trim());
|
|
898
958
|
});
|
|
899
959
|
const argObj = { type: "object", properties: fieldArgs };
|
|
900
960
|
const args = {
|
|
@@ -921,9 +981,11 @@ module.exports = {
|
|
|
921
981
|
...opts,
|
|
922
982
|
...toolargs,
|
|
923
983
|
});
|
|
924
|
-
|
|
925
|
-
|
|
926
|
-
]
|
|
984
|
+
console.log(JSON.stringify(compl, null, 2));
|
|
985
|
+
|
|
986
|
+
const ans = compl.tool_calls[0].input
|
|
987
|
+
? compl.tool_calls[0].input[answer_field]
|
|
988
|
+
: JSON.parse(compl.tool_calls[0].function.arguments)[answer_field];
|
|
927
989
|
const upd = { [answer_field]: ans };
|
|
928
990
|
if (chat_history_field) {
|
|
929
991
|
upd[chat_history_field] = [
|
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@saltcorn/large-language-model",
|
|
3
|
-
"version": "0.9.
|
|
3
|
+
"version": "0.9.6",
|
|
4
4
|
"description": "Large language models and functionality for Saltcorn",
|
|
5
5
|
"main": "index.js",
|
|
6
6
|
"dependencies": {
|
|
@@ -10,7 +10,9 @@
|
|
|
10
10
|
"ollama": "0.5.15",
|
|
11
11
|
"@google-cloud/vertexai": "^1.9.3",
|
|
12
12
|
"@google-cloud/aiplatform": "^3.34.0",
|
|
13
|
-
"googleapis": "^144.0.0"
|
|
13
|
+
"googleapis": "^144.0.0",
|
|
14
|
+
"ai": "5.0.44",
|
|
15
|
+
"@ai-sdk/openai": "2.0.30"
|
|
14
16
|
},
|
|
15
17
|
"author": "Tom Nielsen",
|
|
16
18
|
"license": "MIT",
|