@saltcorn/large-language-model 0.8.6 → 0.8.8
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/generate.js +17 -1
- package/index.js +13 -1
- package/package.json +1 -1
package/generate.js
CHANGED
|
@@ -165,7 +165,16 @@ const getCompletion = async (config, opts) => {
|
|
|
165
165
|
|
|
166
166
|
const getCompletionOpenAICompatible = async (
|
|
167
167
|
{ chatCompleteEndpoint, bearer, apiKey, model, responses_api, temperature },
|
|
168
|
-
{
|
|
168
|
+
{
|
|
169
|
+
systemPrompt,
|
|
170
|
+
prompt,
|
|
171
|
+
debugResult,
|
|
172
|
+
debugCollector,
|
|
173
|
+
chat = [],
|
|
174
|
+
api_key,
|
|
175
|
+
endpoint,
|
|
176
|
+
...rest
|
|
177
|
+
}
|
|
169
178
|
) => {
|
|
170
179
|
const headers = {
|
|
171
180
|
"Content-Type": "application/json",
|
|
@@ -281,6 +290,8 @@ const getCompletionOpenAICompatible = async (
|
|
|
281
290
|
body
|
|
282
291
|
)} to ${chatCompleteEndpoint} headers ${JSON.stringify(headers)}`
|
|
283
292
|
);
|
|
293
|
+
if (debugCollector) debugCollector.request = body;
|
|
294
|
+
|
|
284
295
|
const rawResponse = await fetch(chatCompleteEndpoint, {
|
|
285
296
|
method: "POST",
|
|
286
297
|
headers,
|
|
@@ -291,6 +302,8 @@ const getCompletionOpenAICompatible = async (
|
|
|
291
302
|
if (debugResult)
|
|
292
303
|
console.log("OpenAI response", JSON.stringify(results, null, 2));
|
|
293
304
|
else getState().log(6, `OpenAI response ${JSON.stringify(results)}`);
|
|
305
|
+
if (debugCollector) debugCollector.response = results;
|
|
306
|
+
|
|
294
307
|
if (results.error) throw new Error(`OpenAI error: ${results.error.message}`);
|
|
295
308
|
if (responses_api) {
|
|
296
309
|
const textOutput = results.output
|
|
@@ -341,6 +354,7 @@ const getImageGenOpenAICompatible = async (
|
|
|
341
354
|
prompt,
|
|
342
355
|
model,
|
|
343
356
|
debugResult,
|
|
357
|
+
debugCollector,
|
|
344
358
|
size,
|
|
345
359
|
quality,
|
|
346
360
|
n,
|
|
@@ -367,6 +381,7 @@ const getImageGenOpenAICompatible = async (
|
|
|
367
381
|
if (response_format) body.response_format = response_format;
|
|
368
382
|
if (n) body.n = n;
|
|
369
383
|
if (debugResult) console.log("OpenAI image request", imageEndpoint, body);
|
|
384
|
+
if (debugCollector) debugCollector.request = body;
|
|
370
385
|
|
|
371
386
|
const rawResponse = await fetch(imageEndpoint, {
|
|
372
387
|
method: "POST",
|
|
@@ -374,6 +389,7 @@ const getImageGenOpenAICompatible = async (
|
|
|
374
389
|
body: JSON.stringify(body),
|
|
375
390
|
});
|
|
376
391
|
const results = await rawResponse.json();
|
|
392
|
+
if (debugCollector) debugCollector.response = results;
|
|
377
393
|
if (debugResult) console.log("OpenAI image response", results);
|
|
378
394
|
if (results.error) throw new Error(`OpenAI error: ${results.error.message}`);
|
|
379
395
|
return results?.data?.[0];
|
package/index.js
CHANGED
|
@@ -601,6 +601,13 @@ module.exports = {
|
|
|
601
601
|
label: r.role,
|
|
602
602
|
}));
|
|
603
603
|
const commonFields = [
|
|
604
|
+
{
|
|
605
|
+
name: "filename",
|
|
606
|
+
label: "File name",
|
|
607
|
+
type: "String",
|
|
608
|
+
sublabel:
|
|
609
|
+
"Name of the generated file. Interpolations <code>{{ }}</code> available",
|
|
610
|
+
},
|
|
604
611
|
{
|
|
605
612
|
label: "Minimum role to access",
|
|
606
613
|
name: "min_role",
|
|
@@ -683,6 +690,7 @@ module.exports = {
|
|
|
683
690
|
answer_field,
|
|
684
691
|
min_role,
|
|
685
692
|
model,
|
|
693
|
+
filename,
|
|
686
694
|
},
|
|
687
695
|
}) => {
|
|
688
696
|
let prompt;
|
|
@@ -696,6 +704,10 @@ module.exports = {
|
|
|
696
704
|
"llm_generate prompt formula"
|
|
697
705
|
);
|
|
698
706
|
else prompt = row[prompt_field];
|
|
707
|
+
|
|
708
|
+
const use_filename = filename
|
|
709
|
+
? interpolate(filename, row, user, "llm_generate_image file name")
|
|
710
|
+
: "generated.png";
|
|
699
711
|
const opts = { debugResult: true }; // response_format: "b64_json" };
|
|
700
712
|
|
|
701
713
|
if (model) opts.model = model;
|
|
@@ -712,7 +724,7 @@ module.exports = {
|
|
|
712
724
|
} else if (ans.b64_json) {
|
|
713
725
|
const imgContents = Buffer.from(ans.b64_json, "base64");
|
|
714
726
|
const file = await File.from_contents(
|
|
715
|
-
|
|
727
|
+
use_filename,
|
|
716
728
|
"image/png",
|
|
717
729
|
imgContents,
|
|
718
730
|
user?.id,
|