@saltcorn/large-language-model 0.7.9 → 0.8.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/function-insert-action.js +16 -5
- package/generate.js +105 -14
- package/index.js +19 -2
- package/package.json +1 -1
|
@@ -11,7 +11,7 @@ const { eval_expression } = require("@saltcorn/data/models/expression");
|
|
|
11
11
|
const noSpaces = (s) => s.replaceAll(" ", "");
|
|
12
12
|
module.exports = (config) => ({
|
|
13
13
|
description: "Use LLM function call to insert rows in tables",
|
|
14
|
-
requireRow: true,
|
|
14
|
+
//requireRow: true,
|
|
15
15
|
disableInList: true,
|
|
16
16
|
disableInBuilder: true,
|
|
17
17
|
configFields: async ({ table }) => {
|
|
@@ -22,7 +22,9 @@ module.exports = (config) => ({
|
|
|
22
22
|
label: "Prompt",
|
|
23
23
|
type: "String",
|
|
24
24
|
fieldview: "textarea",
|
|
25
|
-
sublabel:
|
|
25
|
+
sublabel: table
|
|
26
|
+
? `Use interpolations {{ }} to access variables in ${table.name} table.`
|
|
27
|
+
: undefined,
|
|
26
28
|
},
|
|
27
29
|
{
|
|
28
30
|
name: "function_name",
|
|
@@ -55,6 +57,7 @@ module.exports = (config) => ({
|
|
|
55
57
|
name: "cardinality",
|
|
56
58
|
label: "Cardinality",
|
|
57
59
|
type: "String",
|
|
60
|
+
sublabel: "How many rows to generate",
|
|
58
61
|
required: true,
|
|
59
62
|
attributes: {
|
|
60
63
|
options: ["One", /*"Zero or one",*/ "Zero to many"],
|
|
@@ -78,6 +81,9 @@ module.exports = (config) => ({
|
|
|
78
81
|
const prompt = interpolate(prompt_template, row, user);
|
|
79
82
|
let args = {};
|
|
80
83
|
const json_type = (ty) => {
|
|
84
|
+
if (ty?.name === "Date") return "string";
|
|
85
|
+
//console.log("getting type of ", ty);
|
|
86
|
+
|
|
81
87
|
if (ty?.js_type) return ty?.js_type;
|
|
82
88
|
};
|
|
83
89
|
|
|
@@ -95,7 +101,7 @@ module.exports = (config) => ({
|
|
|
95
101
|
if (typeof fixed[field.name] !== "undefined") continue;
|
|
96
102
|
tableArgs[field.name] = {
|
|
97
103
|
type: json_type(field.type),
|
|
98
|
-
description: field.description,
|
|
104
|
+
description: field.description || field.label,
|
|
99
105
|
};
|
|
100
106
|
}
|
|
101
107
|
const argObj = { type: "object", properties: tableArgs };
|
|
@@ -116,6 +122,7 @@ module.exports = (config) => ({
|
|
|
116
122
|
},
|
|
117
123
|
},
|
|
118
124
|
};
|
|
125
|
+
|
|
119
126
|
const toolargs = {
|
|
120
127
|
tools: [expert_function],
|
|
121
128
|
tool_choice: { type: "function", function: { name: function_name } },
|
|
@@ -125,10 +132,11 @@ module.exports = (config) => ({
|
|
|
125
132
|
getState().log(6, `llm_function_call completion: ${JSON.stringify(compl)}`);
|
|
126
133
|
const response = JSON.parse(compl.tool_calls[0].function.arguments);
|
|
127
134
|
//console.log("response: ", JSON.stringify(response, null, 2));
|
|
135
|
+
const retval = {};
|
|
128
136
|
for (const col of columns) {
|
|
129
137
|
const target_table = Table.findOne({ name: col.target_table });
|
|
130
138
|
const fixed = eval_expression(
|
|
131
|
-
col.fixed_values,
|
|
139
|
+
col.fixed_values || {},
|
|
132
140
|
row,
|
|
133
141
|
user,
|
|
134
142
|
"llm_function_call fixed values"
|
|
@@ -139,14 +147,17 @@ module.exports = (config) => ({
|
|
|
139
147
|
...(response[noSpaces(target_table.name)] || {}),
|
|
140
148
|
...fixed,
|
|
141
149
|
};
|
|
150
|
+
retval[noSpaces(target_table.name)] = row;
|
|
142
151
|
await target_table.insertRow(row, user);
|
|
143
152
|
} else {
|
|
153
|
+
retval[noSpaces(target_table.name)] = [];
|
|
144
154
|
for (const resp of response[noSpaces(target_table.name)] || []) {
|
|
145
155
|
const row = { ...resp, ...fixed };
|
|
156
|
+
retval[noSpaces(target_table.name)].push(row);
|
|
146
157
|
await target_table.insertRow(row, user);
|
|
147
158
|
}
|
|
148
159
|
}
|
|
149
160
|
}
|
|
150
|
-
return
|
|
161
|
+
return retval;
|
|
151
162
|
},
|
|
152
163
|
});
|
package/generate.js
CHANGED
|
@@ -83,9 +83,12 @@ const getCompletion = async (config, opts) => {
|
|
|
83
83
|
case "OpenAI":
|
|
84
84
|
return await getCompletionOpenAICompatible(
|
|
85
85
|
{
|
|
86
|
-
chatCompleteEndpoint:
|
|
86
|
+
chatCompleteEndpoint: config.responses_api
|
|
87
|
+
? "https://api.openai.com/v1/responses"
|
|
88
|
+
: "https://api.openai.com/v1/chat/completions",
|
|
87
89
|
bearer: opts?.api_key || opts?.bearer || config.api_key,
|
|
88
90
|
model: opts?.model || config.model,
|
|
91
|
+
responses_api: config.responses_api,
|
|
89
92
|
},
|
|
90
93
|
opts
|
|
91
94
|
);
|
|
@@ -144,7 +147,7 @@ const getCompletion = async (config, opts) => {
|
|
|
144
147
|
};
|
|
145
148
|
|
|
146
149
|
const getCompletionOpenAICompatible = async (
|
|
147
|
-
{ chatCompleteEndpoint, bearer, apiKey, model },
|
|
150
|
+
{ chatCompleteEndpoint, bearer, apiKey, model, responses_api },
|
|
148
151
|
{
|
|
149
152
|
systemPrompt,
|
|
150
153
|
prompt,
|
|
@@ -165,17 +168,79 @@ const getCompletionOpenAICompatible = async (
|
|
|
165
168
|
const body = {
|
|
166
169
|
//prompt: "How are you?",
|
|
167
170
|
model: rest.model || model,
|
|
168
|
-
|
|
171
|
+
temperature: temperature || 0.7,
|
|
172
|
+
...rest,
|
|
173
|
+
};
|
|
174
|
+
if (responses_api) {
|
|
175
|
+
for (const tool of body.tools || []) {
|
|
176
|
+
if (tool.type !== "function") continue;
|
|
177
|
+
tool.name = tool.function.name;
|
|
178
|
+
tool.description = tool.function.description;
|
|
179
|
+
tool.parameters = tool.function.parameters;
|
|
180
|
+
if (tool.function.required) tool.required = tool.function.required;
|
|
181
|
+
delete tool.function;
|
|
182
|
+
}
|
|
183
|
+
const newChat = [];
|
|
184
|
+
(chat || []).forEach((c) => {
|
|
185
|
+
if (c.tool_calls) {
|
|
186
|
+
c.tool_calls.forEach((tc) => {
|
|
187
|
+
newChat.push({
|
|
188
|
+
id: tc.id,
|
|
189
|
+
type: "function_call",
|
|
190
|
+
call_id: tc.call_id,
|
|
191
|
+
name: tc.name,
|
|
192
|
+
arguments: tc.arguments,
|
|
193
|
+
});
|
|
194
|
+
});
|
|
195
|
+
} else if (c.content?.image_calls) {
|
|
196
|
+
c.content.image_calls.forEach((ic) => {
|
|
197
|
+
newChat.push({
|
|
198
|
+
...ic,
|
|
199
|
+
result: undefined,
|
|
200
|
+
filename: undefined,
|
|
201
|
+
});
|
|
202
|
+
});
|
|
203
|
+
} else if (c.role === "tool") {
|
|
204
|
+
newChat.push({
|
|
205
|
+
type: "function_call_output",
|
|
206
|
+
call_id: c.call_id,
|
|
207
|
+
output: c.content,
|
|
208
|
+
});
|
|
209
|
+
} else {
|
|
210
|
+
const fcontent = (c) => {
|
|
211
|
+
if (c.type === "image_url")
|
|
212
|
+
return {
|
|
213
|
+
type: "input_image",
|
|
214
|
+
image_url: c.image_url.url,
|
|
215
|
+
};
|
|
216
|
+
else return c;
|
|
217
|
+
};
|
|
218
|
+
newChat.push({
|
|
219
|
+
...c,
|
|
220
|
+
content: Array.isArray(c.content)
|
|
221
|
+
? c.content.map(fcontent)
|
|
222
|
+
: c.content,
|
|
223
|
+
});
|
|
224
|
+
}
|
|
225
|
+
});
|
|
226
|
+
body.input = [
|
|
227
|
+
{
|
|
228
|
+
role: "system",
|
|
229
|
+
content: systemPrompt || "You are a helpful assistant.",
|
|
230
|
+
},
|
|
231
|
+
...newChat,
|
|
232
|
+
...(prompt ? [{ role: "user", content: prompt }] : []),
|
|
233
|
+
];
|
|
234
|
+
} else {
|
|
235
|
+
body.messages = [
|
|
169
236
|
{
|
|
170
237
|
role: "system",
|
|
171
238
|
content: systemPrompt || "You are a helpful assistant.",
|
|
172
239
|
},
|
|
173
240
|
...chat,
|
|
174
241
|
...(prompt ? [{ role: "user", content: prompt }] : []),
|
|
175
|
-
]
|
|
176
|
-
|
|
177
|
-
...rest,
|
|
178
|
-
};
|
|
242
|
+
];
|
|
243
|
+
}
|
|
179
244
|
if (debugResult)
|
|
180
245
|
console.log(
|
|
181
246
|
"OpenAI request",
|
|
@@ -198,19 +263,45 @@ const getCompletionOpenAICompatible = async (
|
|
|
198
263
|
body: JSON.stringify(body),
|
|
199
264
|
});
|
|
200
265
|
const results = await rawResponse.json();
|
|
266
|
+
//console.log("results", results);
|
|
201
267
|
if (debugResult)
|
|
202
268
|
console.log("OpenAI response", JSON.stringify(results, null, 2));
|
|
203
269
|
else getState().log(6, `OpenAI response ${JSON.stringify(results)}`);
|
|
204
270
|
if (results.error) throw new Error(`OpenAI error: ${results.error.message}`);
|
|
205
|
-
|
|
206
|
-
|
|
207
|
-
|
|
208
|
-
|
|
209
|
-
|
|
210
|
-
|
|
211
|
-
|
|
271
|
+
if (responses_api) {
|
|
272
|
+
const textOutput = results.output
|
|
273
|
+
.filter((o) => o.type === "message")
|
|
274
|
+
.map((o) => o.content.map((c) => c.text).join(""))
|
|
275
|
+
.join("");
|
|
276
|
+
return results.output.some(
|
|
277
|
+
(o) => o.type === "function_call" || o.type === "image_generation_call"
|
|
278
|
+
)
|
|
279
|
+
? {
|
|
280
|
+
tool_calls: emptyToUndefined(
|
|
281
|
+
results.output
|
|
282
|
+
.filter((o) => o.type === "function_call")
|
|
283
|
+
.map((o) => ({
|
|
284
|
+
function: { name: o.name, arguments: o.arguments },
|
|
285
|
+
...o,
|
|
286
|
+
}))
|
|
287
|
+
),
|
|
288
|
+
image_calls: emptyToUndefined(
|
|
289
|
+
results.output.filter((o) => o.type === "image_generation_call")
|
|
290
|
+
),
|
|
291
|
+
content: textOutput || null,
|
|
292
|
+
}
|
|
293
|
+
: textOutput || null;
|
|
294
|
+
} else
|
|
295
|
+
return results?.choices?.[0]?.message?.tool_calls
|
|
296
|
+
? {
|
|
297
|
+
tool_calls: results?.choices?.[0]?.message?.tool_calls,
|
|
298
|
+
content: results?.choices?.[0]?.message?.content || null,
|
|
299
|
+
}
|
|
300
|
+
: results?.choices?.[0]?.message?.content || null;
|
|
212
301
|
};
|
|
213
302
|
|
|
303
|
+
const emptyToUndefined = (xs) => (xs.length ? xs : undefined);
|
|
304
|
+
|
|
214
305
|
const getEmbeddingOpenAICompatible = async (
|
|
215
306
|
config,
|
|
216
307
|
{ prompt, model, debugResult }
|
package/index.js
CHANGED
|
@@ -69,9 +69,9 @@ ${domReady(`
|
|
|
69
69
|
name: "ollama_host",
|
|
70
70
|
label: "Host",
|
|
71
71
|
sublabel: "Optional, for remote ollama server",
|
|
72
|
-
type: "String",
|
|
72
|
+
type: "String",
|
|
73
73
|
showIf: { backend: "Local Ollama" },
|
|
74
|
-
},
|
|
74
|
+
},
|
|
75
75
|
{
|
|
76
76
|
name: "client_id",
|
|
77
77
|
label: "Client ID",
|
|
@@ -172,7 +172,16 @@ ${domReady(`
|
|
|
172
172
|
sublabel: "From your OpenAI account",
|
|
173
173
|
type: "String",
|
|
174
174
|
required: true,
|
|
175
|
+
fieldview: "password",
|
|
176
|
+
showIf: { backend: "OpenAI" },
|
|
177
|
+
},
|
|
178
|
+
{
|
|
179
|
+
name: "responses_api",
|
|
180
|
+
label: "Response API", //gpt-3.5-turbo
|
|
181
|
+
type: "Bool",
|
|
182
|
+
sublabel: "Use the newer Responses API",
|
|
175
183
|
showIf: { backend: "OpenAI" },
|
|
184
|
+
|
|
176
185
|
},
|
|
177
186
|
{
|
|
178
187
|
name: "llama_dir",
|
|
@@ -462,6 +471,12 @@ module.exports = {
|
|
|
462
471
|
type: "String",
|
|
463
472
|
},
|
|
464
473
|
...override_fields,
|
|
474
|
+
{
|
|
475
|
+
name: "model",
|
|
476
|
+
label: "Model",
|
|
477
|
+
sublabel: "Override default model name",
|
|
478
|
+
type: "String",
|
|
479
|
+
},
|
|
465
480
|
];
|
|
466
481
|
} else if (table) {
|
|
467
482
|
const textFields = table.fields
|
|
@@ -507,6 +522,7 @@ module.exports = {
|
|
|
507
522
|
answer_field,
|
|
508
523
|
override_config,
|
|
509
524
|
chat_history_field,
|
|
525
|
+
model,
|
|
510
526
|
},
|
|
511
527
|
}) => {
|
|
512
528
|
let prompt;
|
|
@@ -530,6 +546,7 @@ module.exports = {
|
|
|
530
546
|
opts.api_key = altcfg.api_key;
|
|
531
547
|
opts.bearer = altcfg.bearer;
|
|
532
548
|
}
|
|
549
|
+
if (model) opts.model = model;
|
|
533
550
|
let history = [];
|
|
534
551
|
if (chat_history_field && row[chat_history_field]) {
|
|
535
552
|
history = row[chat_history_field];
|