@tiledesk/tiledesk-tybot-connector 2.0.45 → 2.0.47

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@tiledesk/tiledesk-tybot-connector",
3
- "version": "2.0.45",
3
+ "version": "2.0.47",
4
4
  "description": "Tiledesk Tybot connector",
5
5
  "main": "index.js",
6
6
  "scripts": {
@@ -32,6 +32,7 @@
32
32
  "mqtt": "^5.10.4",
33
33
  "multer": "^1.4.5-lts.1",
34
34
  "nanoid": "^3.1.25",
35
+ "path": "^0.12.7",
35
36
  "redis": "^4.7.0",
36
37
  "string-argv": "^0.3.2",
37
38
  "uuid": "^3.3.3",
@@ -73,9 +73,7 @@ class KBService {
73
73
  })
74
74
  }
75
75
 
76
- async addUnansweredQuestion(id_project, namespace, question, token) {
77
-
78
- const json = { namespace, question };
76
+ async addUnansweredQuestion(id_project, data, token) {
79
77
 
80
78
  return new Promise((resolve, reject) => {
81
79
  const http_request = {
@@ -85,7 +83,7 @@ class KBService {
85
83
  'Authorization': 'JWT ' + token
86
84
  },
87
85
  method: "POST",
88
- json: json
86
+ json: data
89
87
  }
90
88
  winston.debug("Kb HttpRequest", http_request);
91
89
 
@@ -98,6 +96,28 @@ class KBService {
98
96
  });
99
97
  });
100
98
  }
99
+
100
+ async addAnsweredQuestion(id_project, data, token) {
101
+ return new Promise((resolve, reject) => {
102
+ const http_request = {
103
+ url: API_ENDPOINT + "/" + id_project + "/kb/answered/",
104
+ headers: {
105
+ 'Content-Type': 'application/json',
106
+ 'Authorization': 'JWT ' + token
107
+ },
108
+ json: data,
109
+ method: "POST"
110
+ }
111
+ winston.debug("Kb HttpRequest", http_request);
112
+ httpUtils.request(http_request, (err, response) => {
113
+ if (err) {
114
+ reject(err);
115
+ } else {
116
+ resolve(response);
117
+ }
118
+ });
119
+ });
120
+ }
101
121
  }
102
122
 
103
123
  const kbService = new KBService();
@@ -2,6 +2,7 @@ const axios = require("axios").default;
2
2
  const { TiledeskChatbot } = require('../../engine/TiledeskChatbot');
3
3
  const { Filler } = require('../Filler');
4
4
  let https = require("https");
5
+ var path = require('path');
5
6
  const { DirIntent } = require("./DirIntent");
6
7
  const { TiledeskChatbotConst } = require("../../engine/TiledeskChatbotConst");
7
8
  const { TiledeskChatbotUtil } = require("../../utils/TiledeskChatbotUtil");
@@ -17,6 +18,40 @@ const aiController = require("../../services/AIController");
17
18
  const default_engine = require('../../config/kb/engine');
18
19
  const default_engine_hybrid = require('../../config/kb/engine.hybrid');
19
20
  const default_embedding = require("../../config/kb/embedding");
21
+ const PromptManager = require('../../config/kb/prompt/rag/PromptManager');
22
+ const { MODELS_MULTIPLIER } = require("../../utils/aiUtils");
23
+
24
+ //const ragPromptManager = new PromptManager(path.join(__dirname, '../../config/kb/prompt/rag'));
25
+ const ragPromptManager = new PromptManager(path.join(__dirname, '../../config/kb/prompt/rag'));
26
+
27
+ const RAG_CONTEXT_ENV_OVERRIDES = {
28
+ "gpt-3.5-turbo": process.env.GPT_3_5_CONTEXT,
29
+ "gpt-4": process.env.GPT_4_CONTEXT,
30
+ "gpt-4-turbo-preview": process.env.GPT_4T_CONTEXT,
31
+ "gpt-4o": process.env.GPT_4O_CONTEXT,
32
+ "gpt-4o-mini": process.env.GPT_4O_MINI_CONTEXT,
33
+ "gpt-4.1": process.env.GPT_4_1_CONTEXT,
34
+ "gpt-4.1-mini": process.env.GPT_4_1_MINI_CONTEXT,
35
+ "gpt-4.1-nano": process.env.GPT_4_1_NANO_CONTEXT,
36
+ "gpt-5": process.env.GPT_5_CONTEXT,
37
+ "gpt-5-mini": process.env.GPT_5_MINI_CONTEXT,
38
+ "gpt-5-nano": process.env.GPT_5_NANO_CONTEXT,
39
+ "general": process.env.GENERAL_CONTEXT
40
+ };
41
+
42
+ /** RAG system prompt per modello: file in config/kb/prompt/rag, sovrascrivibili via env (come prima). */
43
+ function getRagContextTemplate(modelName) {
44
+ const envOverride = RAG_CONTEXT_ENV_OVERRIDES[modelName];
45
+ if (envOverride) {
46
+ return envOverride;
47
+ }
48
+ if (!PromptManager.modelMap[modelName] && process.env.GENERAL_CONTEXT) {
49
+ return process.env.GENERAL_CONTEXT;
50
+ }
51
+ return ragPromptManager.getPrompt(modelName);
52
+ }
53
+
54
+ const PINECONE_RERANKING = process.env.PINECONE_RERANKING === true || process.env.PINECONE_RERANKING === "true";
20
55
 
21
56
  class DirAskGPTV2 {
22
57
 
@@ -77,9 +112,9 @@ class DirAskGPTV2 {
77
112
  let namespace = this.context.projectId;
78
113
  let llm = "openai";
79
114
  let model;
80
- let temperature;
81
- let max_tokens;
82
- let top_k;
115
+ let temperature = 0.7;
116
+ let max_tokens = 256;
117
+ let top_k = 4;
83
118
  let alpha;
84
119
  let transcript;
85
120
  let citations = false;
@@ -87,24 +122,10 @@ class DirAskGPTV2 {
87
122
  let engine;
88
123
  let embedding;
89
124
  let reranking;
125
+ let reranking_multiplier;
90
126
  let skip_unanswered = false;
91
-
92
- let contexts = {
93
- "gpt-3.5-turbo": process.env.GPT_3_5_CONTEXT || "You are an helpful assistant for question-answering tasks.\nUse ONLY the pieces of retrieved context delimited by #### and the chat history to answer the question.\nIf you don't know the answer, just say: \"I don't know<NOANS>\"\n\n####{context}####",
94
- "gpt-4": process.env.GPT_4_CONTEXT || "You are an helpful assistant for question-answering tasks.\nUse ONLY the pieces of retrieved context delimited by #### and the chat history to answer the question.\nIf you don't know the answer, just say that you don't know.\nIf and only if none of the retrieved context is useful for your task, add this word to the end <NOANS>\n\n####{context}####",
95
- "gpt-4-turbo-preview": process.env.GPT_4T_CONTEXT || "You are an helpful assistant for question-answering tasks.\nUse ONLY the pieces of retrieved context delimited by #### and the chat history to answer the question.\nIf you don't know the answer, just say that you don't know.\nIf and only if none of the retrieved context is useful for your task, add this word to the end <NOANS>\n\n####{context}####",
96
- "gpt-4o": process.env.GPT_4O_CONTEXT || "You are an helpful assistant for question-answering tasks. Follow these steps carefully:\n1. Answer in the same language of the user question, regardless of the retrieved context language\n2. Use ONLY the pieces of the retrieved context and the chat history to answer the question.\n3. If the retrieved context does not contain sufficient information to generate an accurate and informative answer, return <NOANS>\n\n==Retrieved context start==\n{context}\n==Retrieved context end==",
97
- "gpt-4o-mini": process.env.GPT_4O_MINI_CONTEXT || "You are an helpful assistant for question-answering tasks. Follow these steps carefully:\n1. Answer in the same language of the user question, regardless of the retrieved context language\n2. Use ONLY the pieces of the retrieved context and the chat history to answer the question.\n3. If the retrieved context does not contain sufficient information to generate an accurate and informative answer, return <NOANS>\n\n==Retrieved context start==\n{context}\n==Retrieved context end==",
98
- "gpt-4.1": process.env.GPT_4_1_CONTEXT || "You are an helpful assistant for question-answering tasks. Follow these steps carefully:\n1. Answer in the same language of the user question, regardless of the retrieved context language\n2. Use ONLY the pieces of the retrieved context and the chat history to answer the question.\n3. If the retrieved context does not contain sufficient information to generate an accurate and informative answer, append <NOANS> at the end of the answer\n\n==Retrieved context start==\n{context}\n==Retrieved context end==",
99
- "gpt-4.1-mini": process.env.GPT_4_1_MINI_CONTEXT || "You are an helpful assistant for question-answering tasks. Follow these steps carefully:\n1. Answer in the same language of the user question, regardless of the retrieved context language\n2. Use ONLY the pieces of the retrieved context and the chat history to answer the question.\n3. If the retrieved context does not contain sufficient information to generate an accurate and informative answer, append <NOANS> at the end of the answer\n\n==Retrieved context start==\n{context}\n==Retrieved context end==",
100
- "gpt-4.1-nano": process.env.GPT_4_1_NANO_CONTEXT || "You are an helpful assistant for question-answering tasks. Follow these steps carefully:\n1. Answer in the same language of the user question, regardless of the retrieved context language\n2. Use ONLY the pieces of the retrieved context and the chat history to answer the question.\n3. If the retrieved context does not contain sufficient information to generate an accurate and informative answer, append <NOANS> at the end of the answer\n\n==Retrieved context start==\n{context}\n==Retrieved context end==",
101
- "gpt-5": process.env.GPT_5_CONTEXT || "You are an helpful assistant for question-answering tasks. Follow these steps carefully:\n1. Answer in the same language of the user question, regardless of the retrieved context language\n2. Use ONLY the pieces of the retrieved context and the chat history to answer the question.\n3. If the retrieved context does not contain sufficient information to generate an accurate and informative answer, append <NOANS> at the end of the answer\n\n==Retrieved context start==\n{context}\n==Retrieved context end==",
102
- "gpt-5-mini": process.env.GPT_5_MINI_CONTEXT || "You are an helpful assistant for question-answering tasks. Follow these steps carefully:\n1. Answer in the same language of the user question, regardless of the retrieved context language\n2. Use ONLY the pieces of the retrieved context and the chat history to answer the question.\n3. If the retrieved context does not contain sufficient information to generate an accurate and informative answer, append <NOANS> at the end of the answer\n\n==Retrieved context start==\n{context}\n==Retrieved context end==",
103
- "gpt-5-nano": process.env.GPT_5_NANO_CONTEXT || "You are an helpful assistant for question-answering tasks. Follow these steps carefully:\n1. Answer in the same language of the user question, regardless of the retrieved context language\n2. Use ONLY the pieces of the retrieved context and the chat history to answer the question.\n3. If the retrieved context does not contain sufficient information to generate an accurate and informative answer, append <NOANS> at the end of the answer\n\n==Retrieved context start==\n{context}\n==Retrieved context end==",
104
- "general": process.env.GENERAL_CONTEXT || "You are an helpful assistant for question-answering tasks. Follow these steps carefully:\n1. Answer in the same language of the user question, regardless of the retrieved context language\n2. Use ONLY the pieces of the retrieved context and the chat history to answer the question.\n3. If the retrieved context does not contain sufficient information to generate an accurate and informative answer, append <NOANS> at the end of the answer\n\n==Retrieved context start==\n{context}\n==Retrieved context end=="
105
- }
106
-
107
127
  let source = null;
128
+
108
129
  if (!action.llm) {
109
130
  action.llm = "openai";
110
131
  }
@@ -152,6 +173,9 @@ class DirAskGPTV2 {
152
173
  if (action.reranking) {
153
174
  reranking = action.reranking;
154
175
  }
176
+ if (action.reranking_multiplier) {
177
+ reranking_multiplier = action.reranking_multiplier;
178
+ }
155
179
  if (action.skip_unanswered) {
156
180
  skip_unanswered = action.skip_unanswered;
157
181
  }
@@ -321,13 +345,43 @@ class DirAskGPTV2 {
321
345
 
322
346
  if (reranking === true) {
323
347
  json.reranking = true;
324
- json.reranking_multiplier = 3;
348
+ json.reranking_multiplier = reranking_multiplier || 3;
325
349
  json.reranker_model = "cross-encoder/ms-marco-MiniLM-L-6-v2";
350
+
351
+ if ((top_k * reranking_multiplier) > 100) {
352
+ // Find the largest integer reranking_multiplier so that top_k * reranking_multiplier <= 100
353
+ let calculatedRerankingMultiplier = Math.floor(100 / top_k);
354
+ // At least 1 is required
355
+ if (calculatedRerankingMultiplier < 1) {
356
+ calculatedRerankingMultiplier = 1;
357
+ }
358
+ json.reranking_multiplier = calculatedRerankingMultiplier;
359
+ }
360
+ }
361
+ }
362
+
363
+ if (!ns.hybrid && reranking === true && PINECONE_RERANKING) {
364
+ json.reranking = {
365
+ "provider": "pinecone",
366
+ "api_key": process.env.PINECONE_API_KEY,
367
+ "model": process.env.PINECONE_RERANKING_MODEL || process.env.RERANKING_MODEL || "bge-reranker-v2-m3"
368
+ }
369
+
370
+ json.reranking_multiplier = reranking_multiplier || 3;
371
+ if ((top_k * reranking_multiplier) > 100) {
372
+ // Find the largest integer reranking_multiplier so that top_k * reranking_multiplier <= 100
373
+ let calculatedRerankingMultiplier = Math.floor(100 / top_k);
374
+ // At least 1 is required
375
+ if (calculatedRerankingMultiplier < 1) {
376
+ calculatedRerankingMultiplier = 1;
377
+ }
378
+ json.reranking_multiplier = calculatedRerankingMultiplier;
326
379
  }
327
380
  }
328
381
 
329
382
  if (!action.advancedPrompt) {
330
- const contextTemplate = contexts[model.name] || contexts["general"];
383
+ const contextTemplate = getRagContextTemplate(model.name);
384
+
331
385
  if (filled_context) {
332
386
  json.system_context = filled_context + "\n" + contextTemplate;
333
387
  } else {
@@ -398,15 +452,31 @@ class DirAskGPTV2 {
398
452
 
399
453
  } else {
400
454
  await this.#assignAttributes(action, resbody.answer, resbody.source, resbody.content_chunks);
455
+ let tokens = resbody.prompt_token_size;
401
456
  if (publicKey === true && !chunks_only) {
457
+
402
458
  let tokens_usage = {
403
459
  tokens: resbody.prompt_token_size,
404
460
  model: json.model
405
461
  }
462
+
463
+ let multiplier = MODELS_MULTIPLIER[json.model.name] ?? 1;
464
+ tokens = tokens * multiplier;
406
465
  quotasService.updateQuote(this.projectId, this.token, tokens_usage).catch((err) => {
407
466
  winston.error("Error updating quota: ", err);
408
467
  })
409
468
  }
469
+
470
+ const data = {
471
+ namespace: json.namespace,
472
+ question: json.question,
473
+ answer: resbody.answer,
474
+ request_id: this.requestId,
475
+ tokens: tokens
476
+ }
477
+ kbService.addAnsweredQuestion(this.projectId, data, this.token).catch((err) => {
478
+ winston.error("Error adding answered question: ", err);
479
+ })
410
480
 
411
481
  if (trueIntent) {
412
482
  await this.#executeCondition(true, trueIntent, trueIntentAttributes, falseIntent, falseIntentAttributes);
@@ -417,9 +487,16 @@ class DirAskGPTV2 {
417
487
  return;
418
488
  }
419
489
  } else {
490
+ winston.info("DirAskGPTV2 resbody else case: ", resbody);
420
491
  await this.#assignAttributes(action, answer, source);
421
492
  if (!skip_unanswered) {
422
- kbService.addUnansweredQuestion(this.projectId, json.namespace, json.question, this.token).catch((err) => {
493
+ // console.log("this.context", JSON.stringify(this.context, null, 2));
494
+ const data = {
495
+ namespace: json.namespace,
496
+ question: json.question,
497
+ request_id: this.requestId
498
+ }
499
+ kbService.addUnansweredQuestion(this.projectId, data, this.token).catch((err) => {
423
500
  winston.error("DirAskGPTV2 - Error adding unanswered question: ", {
424
501
  status: err.response?.status,
425
502
  statusText: err.response?.statusText,
@@ -22,7 +22,6 @@ class DirIteration {
22
22
 
23
23
  execute(directive, callback) {
24
24
  winston.verbose("Execute Iteration directive");
25
- console.log("directive: ", directive);
26
25
  let action;
27
26
  if (directive.action) {
28
27
  action = directive.action;
@@ -83,7 +82,6 @@ class DirIteration {
83
82
  */
84
83
  async #initializeIteration(action, actionId, callback) {
85
84
  winston.debug("[Iteration] Initializing iteration state");
86
- console.log("[Iteration] Initializing iteration state");
87
85
 
88
86
  const iterable = action.iterable;
89
87
  const goToIntent = action.goToIntent;
@@ -96,7 +94,6 @@ class DirIteration {
96
94
 
97
95
  if (!iterableValue) {
98
96
  winston.verbose("[Iteration] Iterable object is undefined");
99
- console.log("[Iteration] Iterable object is undefined");
100
97
  this.logger.warn("[Iteration] Iterable object is undefined");
101
98
  callback(true);
102
99
  return;
@@ -107,7 +104,6 @@ class DirIteration {
107
104
 
108
105
  if (!iterableArray) {
109
106
  winston.verbose("[Iteration] Could not convert iterable to array");
110
- console.log("[Iteration] Could not convert iterable to array");
111
107
  this.logger.error(`[Iteration] Could not convert iterable '${iterable}' to array | type: ${typeof iterableValue}`);
112
108
  callback(true);
113
109
  return;
@@ -115,7 +111,6 @@ class DirIteration {
115
111
 
116
112
  if (iterableArray.length === 0) {
117
113
  winston.verbose("[Iteration] Iterable array is empty. Exit...")
118
- console.log("[Iteration] Iterable array is empty. Exit...")
119
114
  this.logger.warn("[Iteration] Iterable array is empty. Exit...")
120
115
  callback(true);
121
116
  return;
@@ -133,7 +128,6 @@ class DirIteration {
133
128
 
134
129
  await this.#saveIterationState(actionId, iterationState);
135
130
  this.logger.native(`[Iteration] Initialized iteration with ${iterableArray.length} items`);
136
- console.log(`[Iteration] Initialized iteration with ${iterableArray.length} items`);
137
131
 
138
132
  // Process first item
139
133
  await this.#processCurrentItem(iterationState, actionId, goToIntent, output, callback);
@@ -141,7 +135,6 @@ class DirIteration {
141
135
 
142
136
  async #continueIteration(iterationState, actionId, callback) {
143
137
  winston.debug(`[Iteration] Continuing iteration from index ${iterationState.currentIndex}`);
144
- console.log(`[Iteration] Continuing iteration from index ${iterationState.currentIndex}`);
145
138
 
146
139
  // Increment index
147
140
  iterationState.currentIndex += 1;
@@ -150,7 +143,6 @@ class DirIteration {
150
143
  if (iterationState.currentIndex >= iterationState.totalItems) {
151
144
  this.logger.native("[Iteration] Iteration completed");
152
145
  await this.#clearIterationState(actionId);
153
- console.log("[Iteration] Iteration completed");
154
146
  callback(false);
155
147
  return;
156
148
  }
@@ -0,0 +1,43 @@
1
+ const winston = require('./winston');
2
+
3
+ // MODELS_MULTIPLIER = {
4
+ // "gpt-3.5-turbo": 0.6,
5
+ // "gpt-4": 25,
6
+ // "gpt-4-turbo-preview": 12
7
+ // }
8
+
9
+ loadMultiplier();
10
+ function loadMultiplier() {
11
+
12
+
13
+ let models_string = process.env.AI_MODELS;
14
+ winston.debug("(loadMultiplier) models_string: ", models_string)
15
+ let models = {};
16
+
17
+ if (!models_string) {
18
+ winston.info("AI_MODELS not defined");
19
+ winston.info("AI Models: ", models)
20
+ return models;
21
+ }
22
+
23
+ let models_string_trimmed = models_string.replace(/ /g,'');
24
+ winston.debug("(loadMultiplier) models_string_trimmed: ", models_string_trimmed)
25
+
26
+ let splitted_string = models_string_trimmed.split(";");
27
+ winston.debug("splitted_string: ", splitted_string)
28
+
29
+ splitted_string.forEach(m => {
30
+ m_split = m.split(":");
31
+ if (!m_split[1]) {
32
+ multiplier = null;
33
+ } else {
34
+ multiplier = Number(m_split[1]);;
35
+ }
36
+ models[m_split[0]] = multiplier;
37
+ })
38
+
39
+ winston.info("AI Models: ", models)
40
+ return models;
41
+ }
42
+
43
+ module.exports = { MODELS_MULTIPLIER: loadMultiplier() }