@saltcorn/large-language-model 0.2.1 → 0.3.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/generate.js +76 -1
- package/index.js +32 -2
- package/package.json +3 -2
package/generate.js
CHANGED
|
@@ -3,6 +3,46 @@ const util = require("util");
|
|
|
3
3
|
const exec = util.promisify(require("child_process").exec);
|
|
4
4
|
const db = require("@saltcorn/data/db");
|
|
5
5
|
|
|
6
|
+
const { features, getState } = require("@saltcorn/data/db/state");
|
|
7
|
+
let ollamaMod;
|
|
8
|
+
if (features.esm_plugins) ollamaMod = require("ollama");
|
|
9
|
+
|
|
10
|
+
const getEmbedding = async (config, opts) => {
|
|
11
|
+
switch (config.backend) {
|
|
12
|
+
case "OpenAI":
|
|
13
|
+
return await getEmbeddingOpenAICompatible(
|
|
14
|
+
{
|
|
15
|
+
embeddingsEndpoint: "https://api.openai.com/v1/embeddings",
|
|
16
|
+
bearer: config.api_key,
|
|
17
|
+
embed_model: config.embed_model,
|
|
18
|
+
},
|
|
19
|
+
opts
|
|
20
|
+
);
|
|
21
|
+
case "OpenAI-compatible API":
|
|
22
|
+
return await getEmbeddingOpenAICompatible(
|
|
23
|
+
{
|
|
24
|
+
embeddingsEndpoint: config.embed_endpoint,
|
|
25
|
+
bearer: config.api_key,
|
|
26
|
+
embed_model: config.model,
|
|
27
|
+
},
|
|
28
|
+
opts
|
|
29
|
+
);
|
|
30
|
+
case "Local Ollama":
|
|
31
|
+
if (!ollamaMod) throw new Error("Not implemented for this backend");
|
|
32
|
+
|
|
33
|
+
const { Ollama } = ollamaMod;
|
|
34
|
+
const ollama = new Ollama();
|
|
35
|
+
const olres = await ollama.embeddings({
|
|
36
|
+
model: opts?.model || config.model,
|
|
37
|
+
prompt: opts.prompt,
|
|
38
|
+
});
|
|
39
|
+
//console.log("embedding response ", olres);
|
|
40
|
+
return olres.embedding;
|
|
41
|
+
default:
|
|
42
|
+
throw new Error("Not implemented for this backend");
|
|
43
|
+
}
|
|
44
|
+
};
|
|
45
|
+
|
|
6
46
|
const getCompletion = async (config, opts) => {
|
|
7
47
|
switch (config.backend) {
|
|
8
48
|
case "OpenAI":
|
|
@@ -23,6 +63,18 @@ const getCompletion = async (config, opts) => {
|
|
|
23
63
|
},
|
|
24
64
|
opts
|
|
25
65
|
);
|
|
66
|
+
case "Local Ollama":
|
|
67
|
+
if (!ollamaMod) throw new Error("Not implemented for this backend");
|
|
68
|
+
|
|
69
|
+
const { Ollama } = ollamaMod;
|
|
70
|
+
|
|
71
|
+
const ollama = new Ollama();
|
|
72
|
+
const olres = await ollama.generate({
|
|
73
|
+
model: config.model,
|
|
74
|
+
prompt: opts.prompt,
|
|
75
|
+
});
|
|
76
|
+
//console.log("the response ", olres);
|
|
77
|
+
return olres.response;
|
|
26
78
|
case "Local llama.cpp":
|
|
27
79
|
//TODO only check if unsafe plugins not allowed
|
|
28
80
|
const isRoot = db.getTenantSchema() === db.connectObj.default_schema;
|
|
@@ -78,4 +130,27 @@ const getCompletionOpenAICompatible = async (
|
|
|
78
130
|
return results?.choices?.[0]?.message?.content;
|
|
79
131
|
};
|
|
80
132
|
|
|
81
|
-
|
|
133
|
+
const getEmbeddingOpenAICompatible = async (config, { prompt, model }) => {
|
|
134
|
+
const { embeddingsEndpoint, bearer, embed_model } = config;
|
|
135
|
+
const headers = {
|
|
136
|
+
"Content-Type": "application/json",
|
|
137
|
+
Accept: "application/json",
|
|
138
|
+
};
|
|
139
|
+
if (bearer) headers.Authorization = "Bearer " + bearer;
|
|
140
|
+
const body = {
|
|
141
|
+
//prompt: "How are you?",
|
|
142
|
+
model: model || embed_model || "text-embedding-3-small",
|
|
143
|
+
input: prompt,
|
|
144
|
+
};
|
|
145
|
+
console.log({ body, config });
|
|
146
|
+
const rawResponse = await fetch(embeddingsEndpoint, {
|
|
147
|
+
method: "POST",
|
|
148
|
+
headers,
|
|
149
|
+
body: JSON.stringify(body),
|
|
150
|
+
});
|
|
151
|
+
const results = await rawResponse.json();
|
|
152
|
+
console.log(results);
|
|
153
|
+
|
|
154
|
+
return results?.data?.[0]?.embedding;
|
|
155
|
+
};
|
|
156
|
+
module.exports = { getCompletion, getEmbedding };
|
package/index.js
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
const Workflow = require("@saltcorn/data/models/workflow");
|
|
2
2
|
const Form = require("@saltcorn/data/models/form");
|
|
3
|
-
const { getCompletion } = require("./generate");
|
|
3
|
+
const { getCompletion, getEmbedding } = require("./generate");
|
|
4
4
|
const db = require("@saltcorn/data/db");
|
|
5
5
|
|
|
6
6
|
const configuration_workflow = () =>
|
|
@@ -21,6 +21,7 @@ const configuration_workflow = () =>
|
|
|
21
21
|
options: [
|
|
22
22
|
"OpenAI",
|
|
23
23
|
"OpenAI-compatible API",
|
|
24
|
+
"Local Ollama",
|
|
24
25
|
...(isRoot ? ["Local llama.cpp"] : []),
|
|
25
26
|
],
|
|
26
27
|
},
|
|
@@ -62,6 +63,20 @@ const configuration_workflow = () =>
|
|
|
62
63
|
],
|
|
63
64
|
},
|
|
64
65
|
},
|
|
66
|
+
{
|
|
67
|
+
name: "embed_model",
|
|
68
|
+
label: "Embedding model", //gpt-3.5-turbo
|
|
69
|
+
type: "String",
|
|
70
|
+
required: true,
|
|
71
|
+
showIf: { backend: "OpenAI" },
|
|
72
|
+
attributes: {
|
|
73
|
+
options: [
|
|
74
|
+
"text-embedding-3-small",
|
|
75
|
+
"text-embedding-3-large",
|
|
76
|
+
"text-embedding-ada-002",
|
|
77
|
+
],
|
|
78
|
+
},
|
|
79
|
+
},
|
|
65
80
|
{
|
|
66
81
|
name: "bearer_auth",
|
|
67
82
|
label: "Bearer Auth",
|
|
@@ -73,7 +88,7 @@ const configuration_workflow = () =>
|
|
|
73
88
|
name: "model",
|
|
74
89
|
label: "Model",
|
|
75
90
|
type: "String",
|
|
76
|
-
showIf: { backend: "OpenAI-compatible API" },
|
|
91
|
+
showIf: { backend: ["OpenAI-compatible API", "Local Ollama"] },
|
|
77
92
|
},
|
|
78
93
|
{
|
|
79
94
|
name: "endpoint",
|
|
@@ -82,6 +97,13 @@ const configuration_workflow = () =>
|
|
|
82
97
|
sublabel: "Example: http://localhost:8080/v1/chat/completions",
|
|
83
98
|
showIf: { backend: "OpenAI-compatible API" },
|
|
84
99
|
},
|
|
100
|
+
{
|
|
101
|
+
name: "embed_endpoint",
|
|
102
|
+
label: "Embedding endpoint",
|
|
103
|
+
type: "String",
|
|
104
|
+
sublabel: "Example: http://localhost:8080/v1/embeddings",
|
|
105
|
+
showIf: { backend: "OpenAI-compatible API" },
|
|
106
|
+
},
|
|
85
107
|
],
|
|
86
108
|
});
|
|
87
109
|
},
|
|
@@ -97,6 +119,14 @@ const functions = (config) => ({
|
|
|
97
119
|
description: "Generate text with GPT",
|
|
98
120
|
arguments: [{ name: "prompt", type: "String" }],
|
|
99
121
|
},
|
|
122
|
+
llm_embedding: {
|
|
123
|
+
run: async (prompt, opts) => {
|
|
124
|
+
return await getEmbedding(config, { prompt, ...opts });
|
|
125
|
+
},
|
|
126
|
+
isAsync: true,
|
|
127
|
+
description: "Get vector embedding",
|
|
128
|
+
arguments: [{ name: "prompt", type: "String" }],
|
|
129
|
+
},
|
|
100
130
|
});
|
|
101
131
|
|
|
102
132
|
module.exports = {
|
package/package.json
CHANGED
|
@@ -1,12 +1,13 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@saltcorn/large-language-model",
|
|
3
|
-
"version": "0.
|
|
3
|
+
"version": "0.3.0",
|
|
4
4
|
"description": "Large language models and functionality for Saltcorn",
|
|
5
5
|
"main": "index.js",
|
|
6
6
|
"dependencies": {
|
|
7
7
|
"@saltcorn/data": "^0.9.0",
|
|
8
8
|
"node-fetch": "2.6.9",
|
|
9
|
-
"underscore": "1.13.6"
|
|
9
|
+
"underscore": "1.13.6",
|
|
10
|
+
"ollama": "0.5.0"
|
|
10
11
|
},
|
|
11
12
|
"author": "Tom Nielsen",
|
|
12
13
|
"license": "MIT",
|