@saltcorn/large-language-model 0.4.3 → 0.4.5

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (3) hide show
  1. package/generate.js +27 -13
  2. package/index.js +11 -3
  3. package/package.json +1 -1
package/generate.js CHANGED
@@ -23,21 +23,31 @@ const getEmbedding = async (config, opts) => {
23
23
  {
24
24
  embeddingsEndpoint: config.embed_endpoint,
25
25
  bearer: config.api_key,
26
- embed_model: config.model,
26
+ embed_model: config.embed_model || config.model,
27
27
  },
28
28
  opts
29
29
  );
30
30
  case "Local Ollama":
31
- if (!ollamaMod) throw new Error("Not implemented for this backend");
31
+ if (config.embed_endpoint) {
32
+ return await getEmbeddingOpenAICompatible(
33
+ {
34
+ embeddingsEndpoint: config.embed_endpoint,
35
+ embed_model: config.embed_model || config.model,
36
+ },
37
+ opts
38
+ );
39
+ } else {
40
+ if (!ollamaMod) throw new Error("Not implemented for this backend");
32
41
 
33
- const { Ollama } = ollamaMod;
34
- const ollama = new Ollama();
35
- const olres = await ollama.embeddings({
36
- model: opts?.model || config.embed_model || config.model,
37
- prompt: opts.prompt,
38
- });
39
- //console.log("embedding response ", olres);
40
- return olres.embedding;
42
+ const { Ollama } = ollamaMod;
43
+ const ollama = new Ollama();
44
+ const olres = await ollama.embeddings({
45
+ model: opts?.model || config.embed_model || config.model,
46
+ prompt: opts.prompt,
47
+ });
48
+ //console.log("embedding response ", olres);
49
+ return olres.embedding;
50
+ }
41
51
  default:
42
52
  throw new Error("Not implemented for this backend");
43
53
  }
@@ -71,7 +81,7 @@ const getCompletion = async (config, opts) => {
71
81
  const ollama = new Ollama();
72
82
  const olres = await ollama.generate({
73
83
  model: config.model,
74
- prompt: opts.prompt,
84
+ ...opts,
75
85
  });
76
86
  //console.log("the response ", olres);
77
87
  return olres.response;
@@ -140,8 +150,11 @@ const getCompletionOpenAICompatible = async (
140
150
  );
141
151
  };
142
152
 
143
- const getEmbeddingOpenAICompatible = async (config, { prompt, model }) => {
144
- const { embeddingsEndpoint, bearer, debugResult, embed_model } = config;
153
+ const getEmbeddingOpenAICompatible = async (
154
+ config,
155
+ { prompt, model, debugResult }
156
+ ) => {
157
+ const { embeddingsEndpoint, bearer, embed_model } = config;
145
158
  const headers = {
146
159
  "Content-Type": "application/json",
147
160
  Accept: "application/json",
@@ -162,6 +175,7 @@ const getEmbeddingOpenAICompatible = async (config, { prompt, model }) => {
162
175
  if (debugResult)
163
176
  console.log("OpenAI response", JSON.stringify(results, null, 2));
164
177
  if (results.error) throw new Error(`OpenAI error: ${results.error.message}`);
178
+ if (Array.isArray(prompt)) return results?.data?.map?.((d) => d?.embedding);
165
179
  return results?.data?.[0]?.embedding;
166
180
  };
167
181
  module.exports = { getCompletion, getEmbedding };
package/index.js CHANGED
@@ -100,22 +100,30 @@ const configuration_workflow = () =>
100
100
  name: "embed_model",
101
101
  label: "Embedding model",
102
102
  type: "String",
103
- showIf: { backend: "Local Ollama" },
103
+ showIf: { backend: ["OpenAI-compatible API", "Local Ollama"] },
104
104
  },
105
105
  {
106
106
  name: "endpoint",
107
107
  label: "Chat completions endpoint",
108
108
  type: "String",
109
- sublabel: "Example: http://localhost:8080/v1/chat/completions",
109
+ sublabel: "Example: http://127.0.0.1:8080/v1/chat/completions",
110
110
  showIf: { backend: "OpenAI-compatible API" },
111
111
  },
112
112
  {
113
113
  name: "embed_endpoint",
114
114
  label: "Embedding endpoint",
115
115
  type: "String",
116
- sublabel: "Example: http://localhost:8080/v1/embeddings",
116
+ sublabel: "Example: http://127.0.0.1:8080/v1/embeddings",
117
117
  showIf: { backend: "OpenAI-compatible API" },
118
118
  },
119
+ {
120
+ name: "embed_endpoint",
121
+ label: "Embedding endpoint",
122
+ type: "String",
123
+ sublabel:
124
+ "Optional. Use an alternative OpenAI-compatible API for embeddings. Example: http://127.0.0.1:8080/v1/embeddings",
125
+ showIf: { backend: "Local Ollama" },
126
+ },
119
127
  ],
120
128
  });
121
129
  },
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@saltcorn/large-language-model",
3
- "version": "0.4.3",
3
+ "version": "0.4.5",
4
4
  "description": "Large language models and functionality for Saltcorn",
5
5
  "main": "index.js",
6
6
  "dependencies": {