@saltcorn/large-language-model 0.7.7 → 0.7.9
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/generate.js +18 -14
- package/package.json +2 -2
package/generate.js
CHANGED
|
@@ -9,7 +9,7 @@ const {
|
|
|
9
9
|
} = require("@google-cloud/aiplatform");
|
|
10
10
|
const { google } = require("googleapis");
|
|
11
11
|
const Plugin = require("@saltcorn/data/models/plugin");
|
|
12
|
-
|
|
12
|
+
const path = require("path");
|
|
13
13
|
const { features, getState } = require("@saltcorn/data/db/state");
|
|
14
14
|
let ollamaMod;
|
|
15
15
|
if (features.esm_plugins) ollamaMod = require("ollama");
|
|
@@ -104,19 +104,15 @@ const getCompletion = async (config, opts) => {
|
|
|
104
104
|
opts
|
|
105
105
|
);
|
|
106
106
|
case "Local Ollama":
|
|
107
|
-
|
|
108
|
-
|
|
109
|
-
|
|
110
|
-
|
|
111
|
-
|
|
112
|
-
|
|
107
|
+
return await getCompletionOpenAICompatible(
|
|
108
|
+
{
|
|
109
|
+
chatCompleteEndpoint: config.ollama_host
|
|
110
|
+
? path.join(config.ollama_host, "v1/chat/completions")
|
|
111
|
+
: "http://localhost:11434/v1/chat/completions",
|
|
112
|
+
model: opts?.model || config.model,
|
|
113
|
+
},
|
|
114
|
+
opts
|
|
113
115
|
);
|
|
114
|
-
const olres = await ollama.generate({
|
|
115
|
-
model: opts?.model || config.model,
|
|
116
|
-
...opts,
|
|
117
|
-
});
|
|
118
|
-
//console.log("the response ", olres);
|
|
119
|
-
return olres.response;
|
|
120
116
|
case "Local llama.cpp":
|
|
121
117
|
//TODO only check if unsafe plugins not allowed
|
|
122
118
|
const isRoot = db.getTenantSchema() === db.connectObj.default_schema;
|
|
@@ -175,7 +171,7 @@ const getCompletionOpenAICompatible = async (
|
|
|
175
171
|
content: systemPrompt || "You are a helpful assistant.",
|
|
176
172
|
},
|
|
177
173
|
...chat,
|
|
178
|
-
{ role: "user", content: prompt },
|
|
174
|
+
...(prompt ? [{ role: "user", content: prompt }] : []),
|
|
179
175
|
],
|
|
180
176
|
temperature: temperature || 0.7,
|
|
181
177
|
...rest,
|
|
@@ -189,6 +185,13 @@ const getCompletionOpenAICompatible = async (
|
|
|
189
185
|
"headers",
|
|
190
186
|
JSON.stringify(headers)
|
|
191
187
|
);
|
|
188
|
+
else
|
|
189
|
+
getState().log(
|
|
190
|
+
6,
|
|
191
|
+
`OpenAI request ${JSON.stringify(
|
|
192
|
+
body
|
|
193
|
+
)} to ${chatCompleteEndpoint} headers ${JSON.stringify(headers)}`
|
|
194
|
+
);
|
|
192
195
|
const rawResponse = await fetch(chatCompleteEndpoint, {
|
|
193
196
|
method: "POST",
|
|
194
197
|
headers,
|
|
@@ -197,6 +200,7 @@ const getCompletionOpenAICompatible = async (
|
|
|
197
200
|
const results = await rawResponse.json();
|
|
198
201
|
if (debugResult)
|
|
199
202
|
console.log("OpenAI response", JSON.stringify(results, null, 2));
|
|
203
|
+
else getState().log(6, `OpenAI response ${JSON.stringify(results)}`);
|
|
200
204
|
if (results.error) throw new Error(`OpenAI error: ${results.error.message}`);
|
|
201
205
|
|
|
202
206
|
return results?.choices?.[0]?.message?.tool_calls
|
package/package.json
CHANGED
|
@@ -1,13 +1,13 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@saltcorn/large-language-model",
|
|
3
|
-
"version": "0.7.
|
|
3
|
+
"version": "0.7.9",
|
|
4
4
|
"description": "Large language models and functionality for Saltcorn",
|
|
5
5
|
"main": "index.js",
|
|
6
6
|
"dependencies": {
|
|
7
7
|
"@saltcorn/data": "^0.9.0",
|
|
8
8
|
"node-fetch": "2.6.9",
|
|
9
9
|
"underscore": "1.13.6",
|
|
10
|
-
"ollama": "0.5.
|
|
10
|
+
"ollama": "0.5.15",
|
|
11
11
|
"@google-cloud/vertexai": "^1.9.3",
|
|
12
12
|
"@google-cloud/aiplatform": "^3.34.0",
|
|
13
13
|
"googleapis": "^144.0.0"
|