@tiledesk/tiledesk-server 2.10.57 → 2.10.58

Sign up to get free protection for your applications and to get access to all the features.
package/CHANGELOG.md CHANGED
@@ -5,12 +5,13 @@
5
5
  🚀 IN PRODUCTION 🚀
6
6
  (https://www.npmjs.com/package/@tiledesk/tiledesk-server/v/2.3.77)
7
7
 
8
- # 2.10.57
8
+ # 2.10.58
9
9
  - updated tybot-connector to 0.2.150
10
10
  - fix issue on reconnect to rabbit queue (kb indexing)
11
11
  - updated multi-worker to 0.1.19
12
12
  - fix issue on TILEBOT_ENDPOINT undefined
13
13
  - added endpoint for llm preview
14
+ - updated default contexts for gpt-4o and gpt-4o-mini
14
15
 
15
16
  # 2.10.56
16
17
  - bug fix: wrong tilebot_endpoint declaration
package/package.json CHANGED
@@ -1,7 +1,7 @@
1
1
  {
2
2
  "name": "@tiledesk/tiledesk-server",
3
3
  "description": "The Tiledesk server module",
4
- "version": "2.10.57",
4
+ "version": "2.10.58",
5
5
  "scripts": {
6
6
  "start": "node ./bin/www",
7
7
  "pretest": "mongodb-runner start",
package/routes/kb.js CHANGED
@@ -73,8 +73,8 @@ let contexts = {
73
73
  "gpt-3.5-turbo": "You are an helpful assistant for question-answering tasks.\nUse ONLY the pieces of retrieved context delimited by #### to answer the question.\nIf you don't know the answer, just say: \"I don't know<NOANS>\"\n\n####{context}####",
74
74
  "gpt-4": "You are an helpful assistant for question-answering tasks.\nUse ONLY the pieces of retrieved context delimited by #### to answer the question.\nIf you don't know the answer, just say that you don't know.\nIf and only if none of the retrieved context is useful for your task, add this word to the end <NOANS>\n\n####{context}####",
75
75
  "gpt-4-turbo-preview": "You are an helpful assistant for question-answering tasks.\nUse ONLY the pieces of retrieved context delimited by #### to answer the question.\nIf you don't know the answer, just say that you don't know.\nIf and only if none of the retrieved context is useful for your task, add this word to the end <NOANS>\n\n####{context}####",
76
- "gpt-4o": "You are an helpful assistant for question-answering tasks.\nUse ONLY the pieces of retrieved context delimited by #### to answer the question.\nIf you don't know the answer, just say that you don't know.\nIf the context does not contain sufficient information to generate an accurate and informative answer, return <NOANS>\n\n####{context}####",
77
- "gpt-4o-mini": "You are an helpful assistant for question-answering tasks.\nUse ONLY the pieces of retrieved context delimited by #### to answer the question.\nIf you don't know the answer, just say that you don't know.\nIf the context does not contain sufficient information to generate an accurate and informative answer, return <NOANS>\n\n####{context}####"
76
+ "gpt-4o": "You are an helpful assistant for question-answering tasks. Follow these steps carefully:\n1. Answer in the same language of the user question, regardless of the retrieved context language\n2. Use ONLY the pieces of the retrieved context to answer the question.\n3. If the retrieved context does not contain sufficient information to generate an accurate and informative answer, return <NOANS>\n\n==Retrieved context start==\n{{context}}\n==Retrieved context end==",
77
+ "gpt-4o-mini": "You are an helpful assistant for question-answering tasks. Follow these steps carefully:\n1. Answer in the same language of the user question, regardless of the retrieved context language\n2. Use ONLY the pieces of the retrieved context to answer the question.\n3. If the retrieved context does not contain sufficient information to generate an accurate and informative answer, return <NOANS>\n\n==Retrieved context start==\n{{context}}\n==Retrieved context end==",
78
78
  }
79
79
 
80
80
  /**
package/routes/llm.js CHANGED
@@ -51,8 +51,10 @@ router.post('/preview', async (req, res) => {
51
51
  res.status(400).send({ success: false, error: err.response.data.detail[0]?.msg, detail: err.response.data.detail });
52
52
  } else if (err.response?.data?.detail?.answer) {
53
53
  res.status(400).send({ success: false, error: err.response.data.detail.answer, detail: err.response.data.detail });
54
- } else {
54
+ } else if (err.response?.data) {
55
55
  res.status(500).send({ success: false, error: err.response.data });
56
+ } else {
57
+ res.status(500).send({ success: false, error: err });
56
58
  }
57
59
  })
58
60
 
@@ -38,12 +38,12 @@ class AiService {
38
38
 
39
39
  // LLM
40
40
  askllm(data) {
41
- winston.debug("[OPENAI SERVICE] llm endpoint: " + kb_endpoint);
41
+ winston.debug("[OPENAI SERVICE] llm endpoint: " + kb_endpoint_qa);
42
42
 
43
43
  return new Promise((resolve, reject) => {
44
44
 
45
45
  axios({
46
- url: kb_endpoint + "/ask",
46
+ url: kb_endpoint_qa + "/ask",
47
47
  headers: {
48
48
  'Content-Type': 'application/json'
49
49
  },