@saltcorn/large-language-model 0.4.4 → 0.4.6

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (3) hide show
  1. package/generate.js +21 -11
  2. package/index.js +12 -3
  3. package/package.json +1 -1
package/generate.js CHANGED
@@ -23,21 +23,31 @@ const getEmbedding = async (config, opts) => {
23
23
  {
24
24
  embeddingsEndpoint: config.embed_endpoint,
25
25
  bearer: config.api_key,
26
- embed_model: config.model,
26
+ embed_model: config.embed_model || config.model,
27
27
  },
28
28
  opts
29
29
  );
30
30
  case "Local Ollama":
31
- if (!ollamaMod) throw new Error("Not implemented for this backend");
31
+ if (config.embed_endpoint) {
32
+ return await getEmbeddingOpenAICompatible(
33
+ {
34
+ embeddingsEndpoint: config.embed_endpoint,
35
+ embed_model: config.embed_model || config.model,
36
+ },
37
+ opts
38
+ );
39
+ } else {
40
+ if (!ollamaMod) throw new Error("Not implemented for this backend");
32
41
 
33
- const { Ollama } = ollamaMod;
34
- const ollama = new Ollama();
35
- const olres = await ollama.embeddings({
36
- model: opts?.model || config.embed_model || config.model,
37
- prompt: opts.prompt,
38
- });
39
- //console.log("embedding response ", olres);
40
- return olres.embedding;
42
+ const { Ollama } = ollamaMod;
43
+ const ollama = new Ollama();
44
+ const olres = await ollama.embeddings({
45
+ model: opts?.model || config.embed_model || config.model,
46
+ prompt: opts.prompt,
47
+ });
48
+ //console.log("embedding response ", olres);
49
+ return olres.embedding;
50
+ }
41
51
  default:
42
52
  throw new Error("Not implemented for this backend");
43
53
  }
@@ -71,7 +81,7 @@ const getCompletion = async (config, opts) => {
71
81
  const ollama = new Ollama();
72
82
  const olres = await ollama.generate({
73
83
  model: config.model,
74
- prompt: opts.prompt,
84
+ ...opts,
75
85
  });
76
86
  //console.log("the response ", olres);
77
87
  return olres.response;
package/index.js CHANGED
@@ -66,6 +66,7 @@ const configuration_workflow = () =>
66
66
  "gpt-4-1106-preview",
67
67
  "gpt-4-0125-preview",
68
68
  "gpt-4-turbo",
69
+ "gpt-4o",
69
70
  ],
70
71
  },
71
72
  },
@@ -100,22 +101,30 @@ const configuration_workflow = () =>
100
101
  name: "embed_model",
101
102
  label: "Embedding model",
102
103
  type: "String",
103
- showIf: { backend: "Local Ollama" },
104
+ showIf: { backend: ["OpenAI-compatible API", "Local Ollama"] },
104
105
  },
105
106
  {
106
107
  name: "endpoint",
107
108
  label: "Chat completions endpoint",
108
109
  type: "String",
109
- sublabel: "Example: http://localhost:8080/v1/chat/completions",
110
+ sublabel: "Example: http://127.0.0.1:8080/v1/chat/completions",
110
111
  showIf: { backend: "OpenAI-compatible API" },
111
112
  },
112
113
  {
113
114
  name: "embed_endpoint",
114
115
  label: "Embedding endpoint",
115
116
  type: "String",
116
- sublabel: "Example: http://localhost:8080/v1/embeddings",
117
+ sublabel: "Example: http://127.0.0.1:8080/v1/embeddings",
117
118
  showIf: { backend: "OpenAI-compatible API" },
118
119
  },
120
+ {
121
+ name: "embed_endpoint",
122
+ label: "Embedding endpoint",
123
+ type: "String",
124
+ sublabel:
125
+ "Optional. Use an alternative OpenAI-compatible API for embeddings. Example: http://127.0.0.1:8080/v1/embeddings",
126
+ showIf: { backend: "Local Ollama" },
127
+ },
119
128
  ],
120
129
  });
121
130
  },
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@saltcorn/large-language-model",
3
- "version": "0.4.4",
3
+ "version": "0.4.6",
4
4
  "description": "Large language models and functionality for Saltcorn",
5
5
  "main": "index.js",
6
6
  "dependencies": {