@saltcorn/large-language-model 0.8.2 → 0.8.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (3) hide show
  1. package/generate.js +8 -12
  2. package/index.js +56 -57
  3. package/package.json +1 -1
package/generate.js CHANGED
@@ -147,17 +147,8 @@ const getCompletion = async (config, opts) => {
147
147
  };
148
148
 
149
149
  const getCompletionOpenAICompatible = async (
150
- { chatCompleteEndpoint, bearer, apiKey, model, responses_api },
151
- {
152
- systemPrompt,
153
- prompt,
154
- temperature,
155
- debugResult,
156
- chat = [],
157
- api_key,
158
- endpoint,
159
- ...rest
160
- }
150
+ { chatCompleteEndpoint, bearer, apiKey, model, responses_api, temperature },
151
+ { systemPrompt, prompt, debugResult, chat = [], api_key, endpoint, ...rest }
161
152
  ) => {
162
153
  const headers = {
163
154
  "Content-Type": "application/json",
@@ -168,9 +159,14 @@ const getCompletionOpenAICompatible = async (
168
159
  const body = {
169
160
  //prompt: "How are you?",
170
161
  model: rest.model || model,
171
- temperature: temperature || 0.7,
172
162
  ...rest,
173
163
  };
164
+ if (rest.temperature || temperature) {
165
+ const str_or_num = rest.temperature || temperature;
166
+ body.temperature = +str_or_num;
167
+ } else if (typeof temperature==="undefined") {
168
+ body.temperature = 0.7
169
+ }
174
170
  if (responses_api) {
175
171
  for (const tool of body.tools || []) {
176
172
  if (tool.type !== "function") continue;
package/index.js CHANGED
@@ -73,6 +73,60 @@ ${domReady(`
73
73
  showIf: { backend: "Local Ollama" },
74
74
  },
75
75
  {
76
+ name: "api_key",
77
+ label: "API key",
78
+ sublabel: "From your OpenAI account",
79
+ type: "String",
80
+ required: true,
81
+ fieldview: "password",
82
+ showIf: { backend: "OpenAI" },
83
+ },
84
+ {
85
+ name: "responses_api",
86
+ label: "Response API", //gpt-3.5-turbo
87
+ type: "Bool",
88
+ sublabel: "Use the newer Responses API",
89
+ showIf: { backend: "OpenAI" },
90
+ },
91
+ {
92
+ name: "llama_dir",
93
+ label: "llama.cpp directory",
94
+ type: "String",
95
+ required: true,
96
+ showIf: { backend: "Local llama.cpp" },
97
+ },
98
+ {
99
+ name: "model_path",
100
+ label: "Model path",
101
+ type: "String",
102
+ required: true,
103
+ showIf: { backend: "Local llama.cpp" },
104
+ },
105
+ {
106
+ name: "model",
107
+ label: "Model", //gpt-3.5-turbo
108
+ type: "String",
109
+ required: true,
110
+ showIf: { backend: "OpenAI" },
111
+ attributes: {
112
+ options: OPENAI_MODELS,
113
+ },
114
+ },
115
+ {
116
+ name: "embed_model",
117
+ label: "Embedding model", //gpt-3.5-turbo
118
+ type: "String",
119
+ required: true,
120
+ showIf: { backend: "OpenAI" },
121
+ attributes: {
122
+ options: [
123
+ "text-embedding-3-small",
124
+ "text-embedding-3-large",
125
+ "text-embedding-ada-002",
126
+ ],
127
+ },
128
+ },
129
+ {
76
130
  name: "client_id",
77
131
  label: "Client ID",
78
132
  sublabel: "OAuth2 client ID from your Google Cloud account",
@@ -115,8 +169,7 @@ ${domReady(`
115
169
  label: "Temperature",
116
170
  type: "Float",
117
171
  sublabel:
118
- "Controls the randomness of predictions. Higher values make the output more random.",
119
- showIf: { backend: "Google Vertex AI" },
172
+ "Controls the randomness of predictions. Higher values make the output more random. Leave blank for models that do not support temperature",
120
173
  default: 0.7,
121
174
  attributes: {
122
175
  min: 0,
@@ -166,61 +219,7 @@ ${domReady(`
166
219
  showIf: { backend: "Google Vertex AI" },
167
220
  default: "us-central1",
168
221
  },
169
- {
170
- name: "api_key",
171
- label: "API key",
172
- sublabel: "From your OpenAI account",
173
- type: "String",
174
- required: true,
175
- fieldview: "password",
176
- showIf: { backend: "OpenAI" },
177
- },
178
- {
179
- name: "responses_api",
180
- label: "Response API", //gpt-3.5-turbo
181
- type: "Bool",
182
- sublabel: "Use the newer Responses API",
183
- showIf: { backend: "OpenAI" },
184
-
185
- },
186
- {
187
- name: "llama_dir",
188
- label: "llama.cpp directory",
189
- type: "String",
190
- required: true,
191
- showIf: { backend: "Local llama.cpp" },
192
- },
193
- {
194
- name: "model_path",
195
- label: "Model path",
196
- type: "String",
197
- required: true,
198
- showIf: { backend: "Local llama.cpp" },
199
- },
200
- {
201
- name: "model",
202
- label: "Model", //gpt-3.5-turbo
203
- type: "String",
204
- required: true,
205
- showIf: { backend: "OpenAI" },
206
- attributes: {
207
- options: OPENAI_MODELS,
208
- },
209
- },
210
- {
211
- name: "embed_model",
212
- label: "Embedding model", //gpt-3.5-turbo
213
- type: "String",
214
- required: true,
215
- showIf: { backend: "OpenAI" },
216
- attributes: {
217
- options: [
218
- "text-embedding-3-small",
219
- "text-embedding-3-large",
220
- "text-embedding-ada-002",
221
- ],
222
- },
223
- },
222
+
224
223
  {
225
224
  name: "bearer_auth",
226
225
  label: "Bearer Auth",
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@saltcorn/large-language-model",
3
- "version": "0.8.2",
3
+ "version": "0.8.3",
4
4
  "description": "Large language models and functionality for Saltcorn",
5
5
  "main": "index.js",
6
6
  "dependencies": {