@saltcorn/large-language-model 0.3.1 → 0.3.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (2) hide show
  1. package/generate.js +4 -5
  2. package/package.json +1 -1
package/generate.js CHANGED
@@ -86,7 +86,7 @@ const getCompletion = async (config, opts) => {
86
86
  if (opts.temperature) hyperStr += ` --temp ${opts.temperature}`;
87
87
  let nstr = "";
88
88
  if (opts.ntokens) nstr = `-n ${opts.ntokens}`;
89
- console.log("running llama with prompt: ", opts.prompt);
89
+ //console.log("running llama with prompt: ", opts.prompt);
90
90
 
91
91
  const { stdout, stderr } = await exec(
92
92
  `./main -m ${config.model_path} -p "${opts.prompt}" ${nstr}${hyperStr}`,
@@ -100,7 +100,7 @@ const getCompletion = async (config, opts) => {
100
100
 
101
101
  const getCompletionOpenAICompatible = async (
102
102
  { chatCompleteEndpoint, bearer, model },
103
- { systemPrompt, prompt, temperature }
103
+ { systemPrompt, prompt, temperature, chat = [] }
104
104
  ) => {
105
105
  const headers = {
106
106
  "Content-Type": "application/json",
@@ -115,6 +115,7 @@ const getCompletionOpenAICompatible = async (
115
115
  role: "system",
116
116
  content: systemPrompt || "You are a helpful assistant.",
117
117
  },
118
+ ...chat,
118
119
  { role: "user", content: prompt },
119
120
  ],
120
121
  temperature: temperature || 0.7,
@@ -125,7 +126,6 @@ const getCompletionOpenAICompatible = async (
125
126
  body: JSON.stringify(body),
126
127
  });
127
128
  const results = await rawResponse.json();
128
- console.log(results);
129
129
 
130
130
  return results?.choices?.[0]?.message?.content;
131
131
  };
@@ -142,14 +142,13 @@ const getEmbeddingOpenAICompatible = async (config, { prompt, model }) => {
142
142
  model: model || embed_model || "text-embedding-3-small",
143
143
  input: prompt,
144
144
  };
145
- console.log({ body, config });
145
+
146
146
  const rawResponse = await fetch(embeddingsEndpoint, {
147
147
  method: "POST",
148
148
  headers,
149
149
  body: JSON.stringify(body),
150
150
  });
151
151
  const results = await rawResponse.json();
152
- console.log(results);
153
152
 
154
153
  return results?.data?.[0]?.embedding;
155
154
  };
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@saltcorn/large-language-model",
3
- "version": "0.3.1",
3
+ "version": "0.3.2",
4
4
  "description": "Large language models and functionality for Saltcorn",
5
5
  "main": "index.js",
6
6
  "dependencies": {