@saltcorn/large-language-model 0.8.7 → 0.8.8
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/generate.js +17 -1
- package/package.json +1 -1
package/generate.js
CHANGED
|
@@ -165,7 +165,16 @@ const getCompletion = async (config, opts) => {
|
|
|
165
165
|
|
|
166
166
|
const getCompletionOpenAICompatible = async (
|
|
167
167
|
{ chatCompleteEndpoint, bearer, apiKey, model, responses_api, temperature },
|
|
168
|
-
{
|
|
168
|
+
{
|
|
169
|
+
systemPrompt,
|
|
170
|
+
prompt,
|
|
171
|
+
debugResult,
|
|
172
|
+
debugCollector,
|
|
173
|
+
chat = [],
|
|
174
|
+
api_key,
|
|
175
|
+
endpoint,
|
|
176
|
+
...rest
|
|
177
|
+
}
|
|
169
178
|
) => {
|
|
170
179
|
const headers = {
|
|
171
180
|
"Content-Type": "application/json",
|
|
@@ -281,6 +290,8 @@ const getCompletionOpenAICompatible = async (
|
|
|
281
290
|
body
|
|
282
291
|
)} to ${chatCompleteEndpoint} headers ${JSON.stringify(headers)}`
|
|
283
292
|
);
|
|
293
|
+
if (debugCollector) debugCollector.request = body;
|
|
294
|
+
|
|
284
295
|
const rawResponse = await fetch(chatCompleteEndpoint, {
|
|
285
296
|
method: "POST",
|
|
286
297
|
headers,
|
|
@@ -291,6 +302,8 @@ const getCompletionOpenAICompatible = async (
|
|
|
291
302
|
if (debugResult)
|
|
292
303
|
console.log("OpenAI response", JSON.stringify(results, null, 2));
|
|
293
304
|
else getState().log(6, `OpenAI response ${JSON.stringify(results)}`);
|
|
305
|
+
if (debugCollector) debugCollector.response = results;
|
|
306
|
+
|
|
294
307
|
if (results.error) throw new Error(`OpenAI error: ${results.error.message}`);
|
|
295
308
|
if (responses_api) {
|
|
296
309
|
const textOutput = results.output
|
|
@@ -341,6 +354,7 @@ const getImageGenOpenAICompatible = async (
|
|
|
341
354
|
prompt,
|
|
342
355
|
model,
|
|
343
356
|
debugResult,
|
|
357
|
+
debugCollector,
|
|
344
358
|
size,
|
|
345
359
|
quality,
|
|
346
360
|
n,
|
|
@@ -367,6 +381,7 @@ const getImageGenOpenAICompatible = async (
|
|
|
367
381
|
if (response_format) body.response_format = response_format;
|
|
368
382
|
if (n) body.n = n;
|
|
369
383
|
if (debugResult) console.log("OpenAI image request", imageEndpoint, body);
|
|
384
|
+
if (debugCollector) debugCollector.request = body;
|
|
370
385
|
|
|
371
386
|
const rawResponse = await fetch(imageEndpoint, {
|
|
372
387
|
method: "POST",
|
|
@@ -374,6 +389,7 @@ const getImageGenOpenAICompatible = async (
|
|
|
374
389
|
body: JSON.stringify(body),
|
|
375
390
|
});
|
|
376
391
|
const results = await rawResponse.json();
|
|
392
|
+
if (debugCollector) debugCollector.response = results;
|
|
377
393
|
if (debugResult) console.log("OpenAI image response", results);
|
|
378
394
|
if (results.error) throw new Error(`OpenAI error: ${results.error.message}`);
|
|
379
395
|
return results?.data?.[0];
|