@saltcorn/large-language-model 0.3.1 → 0.3.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (2) hide show
  1. package/generate.js +5 -5
  2. package/package.json +1 -1
package/generate.js CHANGED
@@ -86,7 +86,7 @@ const getCompletion = async (config, opts) => {
86
86
  if (opts.temperature) hyperStr += ` --temp ${opts.temperature}`;
87
87
  let nstr = "";
88
88
  if (opts.ntokens) nstr = `-n ${opts.ntokens}`;
89
- console.log("running llama with prompt: ", opts.prompt);
89
+ //console.log("running llama with prompt: ", opts.prompt);
90
90
 
91
91
  const { stdout, stderr } = await exec(
92
92
  `./main -m ${config.model_path} -p "${opts.prompt}" ${nstr}${hyperStr}`,
@@ -100,7 +100,7 @@ const getCompletion = async (config, opts) => {
100
100
 
101
101
  const getCompletionOpenAICompatible = async (
102
102
  { chatCompleteEndpoint, bearer, model },
103
- { systemPrompt, prompt, temperature }
103
+ { systemPrompt, prompt, temperature, chat = [], ...rest }
104
104
  ) => {
105
105
  const headers = {
106
106
  "Content-Type": "application/json",
@@ -115,9 +115,11 @@ const getCompletionOpenAICompatible = async (
115
115
  role: "system",
116
116
  content: systemPrompt || "You are a helpful assistant.",
117
117
  },
118
+ ...chat,
118
119
  { role: "user", content: prompt },
119
120
  ],
120
121
  temperature: temperature || 0.7,
122
+ ...rest,
121
123
  };
122
124
  const rawResponse = await fetch(chatCompleteEndpoint, {
123
125
  method: "POST",
@@ -125,7 +127,6 @@ const getCompletionOpenAICompatible = async (
125
127
  body: JSON.stringify(body),
126
128
  });
127
129
  const results = await rawResponse.json();
128
- console.log(results);
129
130
 
130
131
  return results?.choices?.[0]?.message?.content;
131
132
  };
@@ -142,14 +143,13 @@ const getEmbeddingOpenAICompatible = async (config, { prompt, model }) => {
142
143
  model: model || embed_model || "text-embedding-3-small",
143
144
  input: prompt,
144
145
  };
145
- console.log({ body, config });
146
+
146
147
  const rawResponse = await fetch(embeddingsEndpoint, {
147
148
  method: "POST",
148
149
  headers,
149
150
  body: JSON.stringify(body),
150
151
  });
151
152
  const results = await rawResponse.json();
152
- console.log(results);
153
153
 
154
154
  return results?.data?.[0]?.embedding;
155
155
  };
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@saltcorn/large-language-model",
3
- "version": "0.3.1",
3
+ "version": "0.3.3",
4
4
  "description": "Large language models and functionality for Saltcorn",
5
5
  "main": "index.js",
6
6
  "dependencies": {