@tiledesk/tiledesk-server 2.10.58 → 2.10.60

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -5,6 +5,15 @@
5
5
  🚀 IN PRODUCTION 🚀
6
6
  (https://www.npmjs.com/package/@tiledesk/tiledesk-server/v/2.3.77)
7
7
 
8
+ # 2.10.59
9
+ - updated tybot-connector to 0.3.1
10
+ - updated whatsapp-connector to 0.1.78
11
+ - minor fix
12
+
13
+ # 2.10.59
14
+ - updated tybot-connector to 0.2.152
15
+ - restored old default system contexts
16
+
8
17
  # 2.10.58
9
18
  - updated tybot-connector to 0.2.150
10
19
  - fix issue on reconnect to rabbit queue (kb indexing)
package/package.json CHANGED
@@ -1,7 +1,7 @@
1
1
  {
2
2
  "name": "@tiledesk/tiledesk-server",
3
3
  "description": "The Tiledesk server module",
4
- "version": "2.10.58",
4
+ "version": "2.10.60",
5
5
  "scripts": {
6
6
  "start": "node ./bin/www",
7
7
  "pretest": "mongodb-runner start",
@@ -47,8 +47,8 @@
47
47
  "@tiledesk/tiledesk-messenger-connector": "^0.1.23",
48
48
  "@tiledesk/tiledesk-rasa-connector": "^1.0.10",
49
49
  "@tiledesk/tiledesk-telegram-connector": "^0.1.14",
50
- "@tiledesk/tiledesk-tybot-connector": "^0.2.150",
51
- "@tiledesk/tiledesk-whatsapp-connector": "^0.1.77",
50
+ "@tiledesk/tiledesk-tybot-connector": "^0.3.1",
51
+ "@tiledesk/tiledesk-whatsapp-connector": "^0.1.78",
52
52
  "@tiledesk/tiledesk-whatsapp-jobworker": "^0.0.11",
53
53
  "@tiledesk/tiledesk-sms-connector": "^0.1.11",
54
54
  "@tiledesk/tiledesk-vxml-connector": "^0.1.67",
package/routes/kb.js CHANGED
@@ -73,8 +73,8 @@ let contexts = {
73
73
  "gpt-3.5-turbo": "You are an helpful assistant for question-answering tasks.\nUse ONLY the pieces of retrieved context delimited by #### to answer the question.\nIf you don't know the answer, just say: \"I don't know<NOANS>\"\n\n####{context}####",
74
74
  "gpt-4": "You are an helpful assistant for question-answering tasks.\nUse ONLY the pieces of retrieved context delimited by #### to answer the question.\nIf you don't know the answer, just say that you don't know.\nIf and only if none of the retrieved context is useful for your task, add this word to the end <NOANS>\n\n####{context}####",
75
75
  "gpt-4-turbo-preview": "You are an helpful assistant for question-answering tasks.\nUse ONLY the pieces of retrieved context delimited by #### to answer the question.\nIf you don't know the answer, just say that you don't know.\nIf and only if none of the retrieved context is useful for your task, add this word to the end <NOANS>\n\n####{context}####",
76
- "gpt-4o": "You are an helpful assistant for question-answering tasks. Follow these steps carefully:\n1. Answer in the same language of the user question, regardless of the retrieved context language\n2. Use ONLY the pieces of the retrieved context to answer the question.\n3. If the retrieved context does not contain sufficient information to generate an accurate and informative answer, return <NOANS>\n\n==Retrieved context start==\n{{context}}\n==Retrieved context end==",
77
- "gpt-4o-mini": "You are an helpful assistant for question-answering tasks. Follow these steps carefully:\n1. Answer in the same language of the user question, regardless of the retrieved context language\n2. Use ONLY the pieces of the retrieved context to answer the question.\n3. If the retrieved context does not contain sufficient information to generate an accurate and informative answer, return <NOANS>\n\n==Retrieved context start==\n{{context}}\n==Retrieved context end==",
76
+ "gpt-4o": "You are an helpful assistant for question-answering tasks. Follow these steps carefully:\n1. Answer in the same language of the user question, regardless of the retrieved context language\n2. Use ONLY the pieces of the retrieved context to answer the question.\n3. If the retrieved context does not contain sufficient information to generate an accurate and informative answer, return <NOANS>\n\n==Retrieved context start==\n{context}\n==Retrieved context end==",
77
+ "gpt-4o-mini": "You are an helpful assistant for question-answering tasks. Follow these steps carefully:\n1. Answer in the same language of the user question, regardless of the retrieved context language\n2. Use ONLY the pieces of the retrieved context to answer the question.\n3. If the retrieved context does not contain sufficient information to generate an accurate and informative answer, return <NOANS>\n\n==Retrieved context start==\n{context}\n==Retrieved context end=="
78
78
  }
79
79
 
80
80
  /**
package/routes/llm.js CHANGED
@@ -3,6 +3,19 @@ var router = express.Router();
3
3
  var winston = require('../config/winston');
4
4
  let Integration = require('../models/integrations');
5
5
  const aiService = require('../services/aiService');
6
+ const multer = require('multer');
7
+ const fileUtils = require('../utils/fileUtils');
8
+
9
+ let MAX_UPLOAD_FILE_SIZE = process.env.MAX_UPLOAD_FILE_SIZE;
10
+ let uploadlimits = undefined;
11
+
12
+ if (MAX_UPLOAD_FILE_SIZE) {
13
+ uploadlimits = {fileSize: parseInt(MAX_UPLOAD_FILE_SIZE)} ;
14
+ winston.debug("Max upload file size is : " + MAX_UPLOAD_FILE_SIZE);
15
+ } else {
16
+ winston.debug("Max upload file size is infinity");
17
+ }
18
+ var upload = multer({limits: uploadlimits});
6
19
 
7
20
  router.post('/preview', async (req, res) => {
8
21
 
@@ -60,4 +73,44 @@ router.post('/preview', async (req, res) => {
60
73
 
61
74
  })
62
75
 
76
+ router.post('/transcription', upload.single('uploadFile'), async (req, res) => {
77
+
78
+ let id_project = req.projectid;
79
+
80
+ let file;
81
+ if (req.body.url) {
82
+ file = await fileUtils.downloadFromUrl(req.body.url);
83
+ } else if (req.file) {
84
+ file = req.file.buffer;
85
+ } else {
86
+ return res.status(400).send({ success: false, error: "No audio file or URL provided"})
87
+ }
88
+
89
+ let key;
90
+
91
+ let integration = await Integration.findOne({ id_project: id_project, name: 'openai' }).catch((err) => {
92
+ winston.error("Error finding integration for openai");
93
+ return res.status(500).send({ success: false, error: "Error finding integration for openai"});
94
+ })
95
+ if (!integration) {
96
+ winston.verbose("Integration for openai not found.")
97
+ return res.status(404).send({ success: false, error: "Integration for openai not found."})
98
+ }
99
+ if (!integration?.value?.apikey) {
100
+ return res.status(422).send({ success: false, error: "The key provided for openai is not valid or undefined." })
101
+ }
102
+
103
+ key = integration.value.apikey;
104
+
105
+ aiService.transcription(file, key).then((response) => {
106
+ winston.verbose("Transcript response: ", response.data);
107
+ res.status(200).send({ text: response.data.text});
108
+ }).catch((err) => {
109
+ winston.error("err: ", err.response?.data)
110
+ res.status(500).send({ success: false, error: err });
111
+ })
112
+
113
+ })
114
+
115
+
63
116
  module.exports = router;
@@ -2,6 +2,7 @@ var winston = require('../config/winston');
2
2
  const axios = require("axios").default;
3
3
  require('dotenv').config();
4
4
  const jwt = require("jsonwebtoken")
5
+ const fs = require("fs");
5
6
 
6
7
  let openai_endpoint = process.env.OPENAI_ENDPOINT;
7
8
  let kb_endpoint = process.env.KB_ENDPOINT;
@@ -36,6 +37,33 @@ class AiService {
36
37
 
37
38
  }
38
39
 
40
+ transcription(buffer, gptkey) {
41
+
42
+ winston.debug("[OPENAI SERVICE] openai endpoint: " + openai_endpoint);
43
+
44
+ return new Promise((resolve, reject) => {
45
+
46
+ const formData = new FormData();
47
+ formData.append('file', buffer, { filename: 'audiofile', contentType: 'audio/mpeg' });
48
+ formData.append('model', 'whisper-1');
49
+
50
+ axios({
51
+ url: openai_endpoint + "/audio/transcriptions",
52
+ headers: {
53
+ ...formData.getHeaders(),
54
+ 'Authorization': "Bearer " + gptkey
55
+ },
56
+ data: formData,
57
+ method: 'POST'
58
+ }).then((resbody) => {
59
+ resolve(resbody);
60
+ }).catch((err) => {
61
+ reject(err);
62
+ })
63
+
64
+ })
65
+ }
66
+
39
67
  // LLM
40
68
  askllm(data) {
41
69
  winston.debug("[OPENAI SERVICE] llm endpoint: " + kb_endpoint_qa);
@@ -0,0 +1,26 @@
1
+ const axios = require("axios").default;
2
+
3
+ class FileUtils {
4
+
5
+ async downloadFromUrl(url) {
6
+
7
+ return new Promise((resolve, reject) => {
8
+
9
+ axios({
10
+ url: url,
11
+ responseType: 'arraybuffer',
12
+ method: 'GET'
13
+ }).then((resbody) => {
14
+ resolve(resbody.data);
15
+ }).catch((err) => {
16
+ reject(err);
17
+ })
18
+
19
+ })
20
+
21
+ }
22
+ }
23
+
24
+ var fileUtils = new FileUtils();
25
+
26
+ module.exports = fileUtils;