garriga 1.0.12 → 1.0.14
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/index.js +68 -24
- package/package.json +1 -1
package/index.js
CHANGED
|
@@ -1,16 +1,55 @@
|
|
|
1
1
|
const axios = require("axios");
|
|
2
2
|
|
|
3
|
+
function normalizeGenerateResponseOptions(optionsOrUserToken, legacySource) {
|
|
4
|
+
if (
|
|
5
|
+
optionsOrUserToken &&
|
|
6
|
+
typeof optionsOrUserToken === "object" &&
|
|
7
|
+
!Array.isArray(optionsOrUserToken)
|
|
8
|
+
) {
|
|
9
|
+
return {
|
|
10
|
+
userToken: optionsOrUserToken.userToken ?? null,
|
|
11
|
+
source: optionsOrUserToken.source ?? null,
|
|
12
|
+
modelOverride: optionsOrUserToken.modelOverride ?? null,
|
|
13
|
+
};
|
|
14
|
+
}
|
|
15
|
+
|
|
16
|
+
return {
|
|
17
|
+
userToken: optionsOrUserToken ?? null,
|
|
18
|
+
source: legacySource ?? null,
|
|
19
|
+
modelOverride: null,
|
|
20
|
+
};
|
|
21
|
+
}
|
|
22
|
+
|
|
23
|
+
function normalizeAdapterConfig(modelOrConfig) {
|
|
24
|
+
if (
|
|
25
|
+
modelOrConfig &&
|
|
26
|
+
typeof modelOrConfig === "object" &&
|
|
27
|
+
!Array.isArray(modelOrConfig)
|
|
28
|
+
) {
|
|
29
|
+
return {
|
|
30
|
+
model: modelOrConfig.model ?? null,
|
|
31
|
+
baseUrl: modelOrConfig.baseUrl ?? null,
|
|
32
|
+
};
|
|
33
|
+
}
|
|
34
|
+
|
|
35
|
+
return {
|
|
36
|
+
model: modelOrConfig ?? null,
|
|
37
|
+
baseUrl: null,
|
|
38
|
+
};
|
|
39
|
+
}
|
|
40
|
+
|
|
3
41
|
class LLMAdapter {
|
|
4
|
-
constructor(provider = "chatgpt") {
|
|
42
|
+
constructor(provider = "chatgpt", modelOrConfig) {
|
|
43
|
+
const adapterConfig = normalizeAdapterConfig(modelOrConfig);
|
|
5
44
|
switch (provider) {
|
|
6
45
|
case "chatgpt":
|
|
7
|
-
this.adapter = new ChatGptAPI();
|
|
46
|
+
this.adapter = new ChatGptAPI(adapterConfig);
|
|
8
47
|
break;
|
|
9
48
|
case "localLLM":
|
|
10
|
-
this.adapter = new localLLM();
|
|
49
|
+
this.adapter = new localLLM(adapterConfig);
|
|
11
50
|
break;
|
|
12
51
|
case "gemini":
|
|
13
|
-
this.adapter = new GeminiAPI();
|
|
52
|
+
this.adapter = new GeminiAPI(adapterConfig);
|
|
14
53
|
break;
|
|
15
54
|
|
|
16
55
|
default:
|
|
@@ -18,8 +57,8 @@ class LLMAdapter {
|
|
|
18
57
|
}
|
|
19
58
|
}
|
|
20
59
|
|
|
21
|
-
async generateResponse(prompt,
|
|
22
|
-
return this.adapter.generateResponse(prompt,
|
|
60
|
+
async generateResponse(prompt, optionsOrUserToken, source) {
|
|
61
|
+
return this.adapter.generateResponse(prompt, optionsOrUserToken, source);
|
|
23
62
|
}
|
|
24
63
|
async generateRateResponse(responseId, ratePoints, rateText, userToken) {
|
|
25
64
|
return this.adapter.generateRateResponse(
|
|
@@ -38,10 +77,11 @@ class LLMAdapter {
|
|
|
38
77
|
}
|
|
39
78
|
|
|
40
79
|
class ChatGptAPI {
|
|
41
|
-
constructor() {
|
|
80
|
+
constructor(config) {
|
|
42
81
|
this.apiKey = process.env.CHATGPTAPIKEY;
|
|
43
|
-
this.apiUrl =
|
|
44
|
-
|
|
82
|
+
this.apiUrl =
|
|
83
|
+
config?.baseUrl || "https://api.openai.com/v1/chat/completions";
|
|
84
|
+
this.model = config?.model || "gpt-3.5-turbo";
|
|
45
85
|
}
|
|
46
86
|
async generateResponse(prompt) {
|
|
47
87
|
try {
|
|
@@ -103,10 +143,12 @@ class ChatGptAPI {
|
|
|
103
143
|
}
|
|
104
144
|
|
|
105
145
|
class GeminiAPI {
|
|
106
|
-
constructor() {
|
|
146
|
+
constructor(config) {
|
|
107
147
|
this.apiKey = process.env.GEMINIAPIKEY;
|
|
148
|
+
const modelName = config?.model || "gemini-2.0-flash";
|
|
108
149
|
this.baseURL =
|
|
109
|
-
|
|
150
|
+
config?.baseUrl ||
|
|
151
|
+
`https://generativelanguage.googleapis.com/v1beta/models/${modelName}:generateContent`;
|
|
110
152
|
}
|
|
111
153
|
|
|
112
154
|
get URL() {
|
|
@@ -172,14 +214,14 @@ class GeminiAPI {
|
|
|
172
214
|
}
|
|
173
215
|
|
|
174
216
|
class localLLM {
|
|
175
|
-
constructor() {
|
|
217
|
+
constructor(config) {
|
|
176
218
|
this.apiUrlGetToken = "http://iaService:29000/token";
|
|
177
219
|
this.apiUrlSendQuery = "http://iaService:29000/query";
|
|
178
220
|
this.apiUrlRateResponse = "http://iaService:29000/rate-response";
|
|
179
221
|
this.apiUrlHistorial = "http://iaService:29000/historial";
|
|
180
222
|
this.apiUrlQueryTypes = "http://iaService:29000/query-types";
|
|
181
|
-
this.apiUrlOllama = "http://10.20.150.102:11434/api/generate";
|
|
182
|
-
this.model = "GrupoImpuLLMtec";
|
|
223
|
+
this.apiUrlOllama = config?.baseUrl || "http://10.20.150.102:11434/api/generate";
|
|
224
|
+
this.model = config?.model || "GrupoImpuLLMtec";
|
|
183
225
|
}
|
|
184
226
|
async getToken({ username, password }) {
|
|
185
227
|
try {
|
|
@@ -204,20 +246,21 @@ class localLLM {
|
|
|
204
246
|
}
|
|
205
247
|
}
|
|
206
248
|
|
|
207
|
-
async generateResponse(
|
|
208
|
-
|
|
209
|
-
|
|
210
|
-
|
|
211
|
-
|
|
249
|
+
async generateResponse(prompt, optionsOrUserToken, legacySource) {
|
|
250
|
+
const { userToken, source, modelOverride } =
|
|
251
|
+
normalizeGenerateResponseOptions(optionsOrUserToken, legacySource);
|
|
252
|
+
const effectiveModel = modelOverride || this.model || "mistral7b-genei:latest";
|
|
253
|
+
const effectiveSource = source ?? this.model ?? "GrupoImpuLLMtec";
|
|
254
|
+
|
|
212
255
|
console.log(
|
|
213
|
-
`garriga, llamando al LLm local, modelo ${
|
|
256
|
+
`garriga, llamando al LLm local, modelo ${effectiveModel} ya que process.env.LOCALMODEL es ${process.env.LOCALMODEL}`,
|
|
214
257
|
);
|
|
215
258
|
|
|
216
259
|
if (userToken === "modeloLocalOllama") {
|
|
217
260
|
try {
|
|
218
261
|
const ollamaPayload = {
|
|
219
262
|
prompt,
|
|
220
|
-
model:
|
|
263
|
+
model: effectiveModel,
|
|
221
264
|
stream: false,
|
|
222
265
|
//format: "json",
|
|
223
266
|
|
|
@@ -226,14 +269,15 @@ class localLLM {
|
|
|
226
269
|
num_ctx: +process.env.MAX_CONTEXT_FRAUD || 65536,
|
|
227
270
|
temperature: +process.env.TEMPERATURE_FRAUD || 0.1,
|
|
228
271
|
num_predict: +process.env.MAX_TOKENS_FRAUD || 2500,
|
|
229
|
-
top_k: 40,
|
|
230
|
-
top_p: 0.9,
|
|
272
|
+
top_k: +process.env.TOP_K_FRAUD || 40,
|
|
273
|
+
top_p: +process.env.TOP_P_FRAUD || 0.9,
|
|
231
274
|
},
|
|
232
275
|
};
|
|
233
276
|
const response = await axios.post(this.apiUrlOllama, ollamaPayload, {
|
|
234
277
|
headers: {
|
|
235
278
|
"Content-Type": "application/json",
|
|
236
279
|
},
|
|
280
|
+
timeout: +process.env.TIMEOUT_OLLAMA_FRAUD || 300000,
|
|
237
281
|
});
|
|
238
282
|
|
|
239
283
|
return response?.data.response;
|
|
@@ -254,7 +298,7 @@ class localLLM {
|
|
|
254
298
|
{
|
|
255
299
|
question: prompt,
|
|
256
300
|
userToken,
|
|
257
|
-
source,
|
|
301
|
+
source: effectiveSource,
|
|
258
302
|
},
|
|
259
303
|
{
|
|
260
304
|
headers: {
|