@tiledesk/tiledesk-server 2.13.27 → 2.13.30
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +7 -0
- package/package.json +3 -3
- package/routes/kb.js +7 -5
- package/services/requestService.js +4 -1
- package/services/webhookService.js +1 -1
package/CHANGELOG.md
CHANGED
|
@@ -5,6 +5,13 @@
|
|
|
5
5
|
🚀 IN PRODUCTION 🚀
|
|
6
6
|
(https://www.npmjs.com/package/@tiledesk/tiledesk-server/v/2.3.77)
|
|
7
7
|
|
|
8
|
+
# 2.13.31
|
|
9
|
+
- Added default context for general LLM
|
|
10
|
+
- Updated tybot-connector to 2.0.35
|
|
11
|
+
|
|
12
|
+
# 2.13.29
|
|
13
|
+
- Minor improvements
|
|
14
|
+
|
|
8
15
|
# 2.13.27
|
|
9
16
|
- Added rate manager for webhook call
|
|
10
17
|
- Increased json body limit for /webhook endpoint
|
package/package.json
CHANGED
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "@tiledesk/tiledesk-server",
|
|
3
3
|
"description": "The Tiledesk server module",
|
|
4
|
-
"version": "2.13.
|
|
4
|
+
"version": "2.13.30",
|
|
5
5
|
"scripts": {
|
|
6
6
|
"start": "node ./bin/www",
|
|
7
7
|
"pretest": "mongodb-runner start",
|
|
@@ -49,9 +49,9 @@
|
|
|
49
49
|
"@tiledesk/tiledesk-rasa-connector": "^1.0.10",
|
|
50
50
|
"@tiledesk/tiledesk-sms-connector": "^0.1.11",
|
|
51
51
|
"@tiledesk/tiledesk-telegram-connector": "^0.1.14",
|
|
52
|
-
"@tiledesk/tiledesk-tybot-connector": "^2.0.
|
|
52
|
+
"@tiledesk/tiledesk-tybot-connector": "^2.0.35",
|
|
53
53
|
"@tiledesk/tiledesk-voice-twilio-connector": "^0.1.22",
|
|
54
|
-
"@tiledesk/tiledesk-vxml-connector": "^0.1.
|
|
54
|
+
"@tiledesk/tiledesk-vxml-connector": "^0.1.87",
|
|
55
55
|
"@tiledesk/tiledesk-whatsapp-connector": "1.0.11",
|
|
56
56
|
"@tiledesk/tiledesk-whatsapp-jobworker": "^0.0.13",
|
|
57
57
|
"amqplib": "^0.5.5",
|
package/routes/kb.js
CHANGED
|
@@ -103,7 +103,8 @@ let contexts = {
|
|
|
103
103
|
"gpt-4o-mini": "You are an helpful assistant for question-answering tasks. Follow these steps carefully:\n1. Answer in the same language of the user question, regardless of the retrieved context language\n2. Use ONLY the pieces of the retrieved context to answer the question.\n3. If the retrieved context does not contain sufficient information to generate an accurate and informative answer, return <NOANS>\n\n==Retrieved context start==\n{context}\n==Retrieved context end==",
|
|
104
104
|
"gpt-4.1": "You are an helpful assistant for question-answering tasks. Follow these steps carefully:\n1. Answer in the same language of the user question, regardless of the retrieved context language\n2. Use ONLY the pieces of the retrieved context to answer the question.\n3. If the retrieved context does not contain sufficient information to generate an accurate and informative answer, append <NOANS> at the end of the answer\n\n==Retrieved context start==\n{context}\n==Retrieved context end==",
|
|
105
105
|
"gpt-4.1-mini": "You are an helpful assistant for question-answering tasks. Follow these steps carefully:\n1. Answer in the same language of the user question, regardless of the retrieved context language\n2. Use ONLY the pieces of the retrieved context to answer the question.\n3. If the retrieved context does not contain sufficient information to generate an accurate and informative answer, append <NOANS> at the end of the answer\n\n==Retrieved context start==\n{context}\n==Retrieved context end==",
|
|
106
|
-
"gpt-4.1-nano": "You are an helpful assistant for question-answering tasks. Follow these steps carefully:\n1. Answer in the same language of the user question, regardless of the retrieved context language\n2. Use ONLY the pieces of the retrieved context to answer the question.\n3. If the retrieved context does not contain sufficient information to generate an accurate and informative answer, append <NOANS> at the end of the answer\n\n==Retrieved context start==\n{context}\n==Retrieved context end=="
|
|
106
|
+
"gpt-4.1-nano": "You are an helpful assistant for question-answering tasks. Follow these steps carefully:\n1. Answer in the same language of the user question, regardless of the retrieved context language\n2. Use ONLY the pieces of the retrieved context to answer the question.\n3. If the retrieved context does not contain sufficient information to generate an accurate and informative answer, append <NOANS> at the end of the answer\n\n==Retrieved context start==\n{context}\n==Retrieved context end==",
|
|
107
|
+
"general": "You are an helpful assistant for question-answering tasks. Follow these steps carefully:\n1. Answer in the same language of the user question, regardless of the retrieved context language\n2. Use ONLY the pieces of the retrieved context to answer the question.\n3. If the retrieved context does not contain sufficient information to generate an accurate and informative answer, append <NOANS> at the end of the answer\n\n==Retrieved context start==\n{context}\n==Retrieved context end=="
|
|
107
108
|
}
|
|
108
109
|
|
|
109
110
|
/**
|
|
@@ -348,10 +349,11 @@ router.post('/qa', async (req, res) => {
|
|
|
348
349
|
|
|
349
350
|
// Check if "Advanced Mode" is active. In such case the default_context must be not appended
|
|
350
351
|
if (!data.advancedPrompt) {
|
|
352
|
+
const contextTemplate = contexts[data.model] || contexts["general"];
|
|
351
353
|
if (data.system_context) {
|
|
352
|
-
data.system_context = data.system_context + " \n" +
|
|
354
|
+
data.system_context = data.system_context + " \n" + contextTemplate;
|
|
353
355
|
} else {
|
|
354
|
-
data.system_context =
|
|
356
|
+
data.system_context = contextTemplate;
|
|
355
357
|
}
|
|
356
358
|
}
|
|
357
359
|
|
|
@@ -376,12 +378,12 @@ router.post('/qa', async (req, res) => {
|
|
|
376
378
|
|
|
377
379
|
if (data.llm === 'vllm') {
|
|
378
380
|
if (!vllm_integration.value.url) {
|
|
379
|
-
return res.status(422).send({ success: false, error: "Server url for
|
|
381
|
+
return res.status(422).send({ success: false, error: "Server url for vllm is empty or invalid"})
|
|
380
382
|
}
|
|
381
383
|
data.model = {
|
|
382
384
|
name: data.model,
|
|
383
385
|
url: vllm_integration.value.url,
|
|
384
|
-
provider: '
|
|
386
|
+
provider: 'vllm'
|
|
385
387
|
}
|
|
386
388
|
data.stream = false;
|
|
387
389
|
}
|
|
@@ -2855,7 +2855,10 @@ class RequestService {
|
|
|
2855
2855
|
winston.debug("[RequestService] response: ", response);
|
|
2856
2856
|
resolve(response.data);
|
|
2857
2857
|
}).catch((err) => {
|
|
2858
|
-
winston.error("get request parameter error:
|
|
2858
|
+
winston.error("get request parameter error:", {
|
|
2859
|
+
message: err.message,
|
|
2860
|
+
data: err.response?.data
|
|
2861
|
+
});
|
|
2859
2862
|
reject(err);
|
|
2860
2863
|
})
|
|
2861
2864
|
})
|
|
@@ -66,7 +66,7 @@ class WebhookService {
|
|
|
66
66
|
await httpUtil.post(url, payload).then((response) => {
|
|
67
67
|
resolve(response.data);
|
|
68
68
|
}).catch((err) => {
|
|
69
|
-
winston.error("Error calling webhook on post. Status " + err?.status + " " + err?.statusText + JSON.stringify(err?.response?.data));
|
|
69
|
+
winston.error("Error calling webhook on post. Status " + err?.status + " StatusText " + err?.statusText + " Data " + JSON.stringify(err?.response?.data));
|
|
70
70
|
reject(err);
|
|
71
71
|
})
|
|
72
72
|
|