@tiledesk/tiledesk-server 2.15.8 → 2.17.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/CHANGELOG.md +21 -0
- package/config/kb/prompt/rag/PromptManager.js +57 -0
- package/config/kb/prompt/rag/general.txt +9 -0
- package/config/kb/prompt/rag/gpt-3.5.txt +9 -0
- package/config/kb/prompt/rag/gpt-4.1.txt +9 -0
- package/config/kb/prompt/rag/gpt-4.txt +11 -0
- package/config/kb/prompt/rag/gpt-4o.txt +9 -0
- package/config/kb/prompt/rag/gpt-5.txt +32 -0
- package/config/kb/prompt/rag/gpt-5.x.txt +32 -0
- package/config/kb/situatedContext.js +6 -0
- package/event/messageEvent.js +24 -0
- package/middleware/file-type.js +109 -36
- package/models/request.js +1 -0
- package/package.json +1 -1
- package/pubmodules/queue/reconnect.js +16 -0
- package/pubmodules/routing-queue/listenerQueued.js +22 -3
- package/pubmodules/scheduler/tasks/closeAgentUnresponsiveRequestTask.js +2 -1
- package/pubmodules/scheduler/tasks/closeBotUnresponsiveRequestTask.js +2 -1
- package/routes/filesp.js +4 -3
- package/routes/kb.js +348 -40
- package/routes/quotes.js +9 -2
- package/routes/request.js +174 -92
- package/routes/webhook.js +5 -0
- package/services/aiManager.js +93 -1
- package/services/aiService.js +33 -8
- package/services/fileGridFsService.js +1 -2
package/CHANGELOG.md
CHANGED
|
@@ -5,6 +5,27 @@
|
|
|
5
5
|
🚀 IN PRODUCTION 🚀
|
|
6
6
|
(https://www.npmjs.com/package/@tiledesk/tiledesk-server/v/2.3.77)
|
|
7
7
|
|
|
8
|
+
# 2.17.2
|
|
9
|
+
- Added support for situated context in kb route
|
|
10
|
+
- Added RAG context management to KB routes
|
|
11
|
+
- Added support for scrape type 0 (alias: trafilatura)
|
|
12
|
+
|
|
13
|
+
# 2.16.2
|
|
14
|
+
- Improved multiplier retrieval for model types in quotes route
|
|
15
|
+
|
|
16
|
+
# 2.16.1
|
|
17
|
+
- Added stream option support to the KB /qa endpoint for real-time responses
|
|
18
|
+
- Enhanced file upload route to correctly handle .webm files
|
|
19
|
+
- Optimized token consumption and management in knowledge base operations
|
|
20
|
+
|
|
21
|
+
# 2.16.0-hf
|
|
22
|
+
- Fixed bug: issue on audio sent from widget
|
|
23
|
+
|
|
24
|
+
# 2.16.0
|
|
25
|
+
- Added possibility to update Knowledge Base content
|
|
26
|
+
- Added rated only filter in Conversations History
|
|
27
|
+
- Improved pending requests management
|
|
28
|
+
|
|
8
29
|
# 2.15.8
|
|
9
30
|
- Updated tybot-connector to 2.0.45
|
|
10
31
|
- Added support for tags management in knowledge base routes
|
|
@@ -0,0 +1,57 @@
|
|
|
1
|
+
const fs = require('fs');
|
|
2
|
+
const path = require('path');
|
|
3
|
+
|
|
4
|
+
const modelMap = {
|
|
5
|
+
"gpt-3.5-turbo": "gpt-3.5.txt",
|
|
6
|
+
"gpt-4": "gpt-4.txt",
|
|
7
|
+
"gpt-4-turbo-preview": "gpt-4.txt",
|
|
8
|
+
"gpt-4o": "gpt-4o.txt",
|
|
9
|
+
"gpt-4o-mini": "gpt-4o.txt",
|
|
10
|
+
"gpt-4.1": "gpt-4.1.txt",
|
|
11
|
+
"gpt-4.1-mini": "gpt-4.1.txt",
|
|
12
|
+
"gpt-4.1-nano": "gpt-4.1.txt",
|
|
13
|
+
"gpt-5": "gpt-5.txt",
|
|
14
|
+
"gpt-5-mini": "gpt-5.txt",
|
|
15
|
+
"gpt-5-nano": "gpt-5.txt",
|
|
16
|
+
"gpt-5.1": "gpt-5.x.txt",
|
|
17
|
+
"gpt-5.2": "gpt-5.x.txt",
|
|
18
|
+
"gpt-5.3-chat-latest": "gpt-5.x.txt",
|
|
19
|
+
"gpt-5.4": "gpt-5.x.txt",
|
|
20
|
+
"gpt-5.4-mini": "gpt-5.x.txt",
|
|
21
|
+
"gpt-5.4-nano": "gpt-5.x.txt",
|
|
22
|
+
"general": "general.txt"
|
|
23
|
+
}
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
class PromptManager {
|
|
27
|
+
|
|
28
|
+
constructor(basePath) {
|
|
29
|
+
this.basePath = basePath;
|
|
30
|
+
this.cache = new Map();
|
|
31
|
+
}
|
|
32
|
+
|
|
33
|
+
getPrompt(name) {
|
|
34
|
+
if (this.cache.has(name)) {
|
|
35
|
+
return this.cache.get(name);
|
|
36
|
+
}
|
|
37
|
+
|
|
38
|
+
const fileName = modelMap[name] || modelMap["general"];
|
|
39
|
+
const filePath = path.join(this.basePath, fileName);
|
|
40
|
+
|
|
41
|
+
let content;
|
|
42
|
+
try {
|
|
43
|
+
content = fs.readFileSync(filePath, 'utf-8');
|
|
44
|
+
} catch (err) {
|
|
45
|
+
content = fs.readFileSync(
|
|
46
|
+
path.join(this.basePath, modelMap["general"]),
|
|
47
|
+
'utf-8'
|
|
48
|
+
);
|
|
49
|
+
}
|
|
50
|
+
|
|
51
|
+
this.cache.set(name, content);
|
|
52
|
+
return content;
|
|
53
|
+
}
|
|
54
|
+
}
|
|
55
|
+
|
|
56
|
+
PromptManager.modelMap = modelMap;
|
|
57
|
+
module.exports = PromptManager;
|
|
@@ -0,0 +1,9 @@
|
|
|
1
|
+
You are an helpful assistant for question-answering tasks. Follow these steps carefully:
|
|
2
|
+
|
|
3
|
+
1. Answer in the same language of the user question, regardless of the retrieved context language
|
|
4
|
+
2. Use ONLY the pieces of the retrieved context and the chat history to answer the question.
|
|
5
|
+
3. If the retrieved context does not contain sufficient information to generate an accurate and informative answer, append <NOANS> at the end of the answer.
|
|
6
|
+
|
|
7
|
+
==Retrieved context start==
|
|
8
|
+
{context}
|
|
9
|
+
==Retrieved context end==
|
|
@@ -0,0 +1,9 @@
|
|
|
1
|
+
You are an helpful assistant for question-answering tasks. Follow these steps carefully:
|
|
2
|
+
|
|
3
|
+
1. Answer in the same language of the user question, regardless of the retrieved context language
|
|
4
|
+
2. Use ONLY the pieces of the retrieved context and the chat history to answer the question.
|
|
5
|
+
3. If the retrieved context does not contain sufficient information to generate an accurate and informative answer, append <NOANS> at the end of the answer
|
|
6
|
+
|
|
7
|
+
==Retrieved context start==
|
|
8
|
+
{context}
|
|
9
|
+
==Retrieved context end==
|
|
@@ -0,0 +1,11 @@
|
|
|
1
|
+
You are an helpful assistant for question-answering tasks.
|
|
2
|
+
|
|
3
|
+
Use ONLY the pieces of retrieved context delimited by #### and the chat history to answer the question.
|
|
4
|
+
|
|
5
|
+
If you don't know the answer, just say that you don't know.
|
|
6
|
+
|
|
7
|
+
If and only if none of the retrieved context is useful for your task, add this word to the end <NOANS>
|
|
8
|
+
|
|
9
|
+
####
|
|
10
|
+
{context}
|
|
11
|
+
####
|
|
@@ -0,0 +1,9 @@
|
|
|
1
|
+
You are an helpful assistant for question-answering tasks. Follow these steps carefully:
|
|
2
|
+
|
|
3
|
+
1. Answer in the same language of the user question, regardless of the retrieved context language
|
|
4
|
+
2. Use ONLY the pieces of the retrieved context and the chat history to answer the question.
|
|
5
|
+
3. If the retrieved context does not contain sufficient information to generate an accurate and informative answer, return <NOANS>.
|
|
6
|
+
|
|
7
|
+
==Retrieved context start==
|
|
8
|
+
{context}
|
|
9
|
+
==Retrieved context end==
|
|
@@ -0,0 +1,32 @@
|
|
|
1
|
+
# ROLE
|
|
2
|
+
You are an AI assistant that answers the user's question using only the information contained in the provided context.
|
|
3
|
+
|
|
4
|
+
# LANGUAGE
|
|
5
|
+
Answer in the same language as the user's question.
|
|
6
|
+
|
|
7
|
+
# CONTEXT
|
|
8
|
+
You will receive a context delimited by ######:
|
|
9
|
+
######
|
|
10
|
+
{context}
|
|
11
|
+
######
|
|
12
|
+
|
|
13
|
+
# INSTRUCTIONS
|
|
14
|
+
- Use only the information explicitly contained in the context.
|
|
15
|
+
- Answer the user's question directly, as a human assistant would.
|
|
16
|
+
- Do not mention the context, the document, the source, or the fact that information was provided.
|
|
17
|
+
- Do not say phrases such as:
|
|
18
|
+
- "according to the context"
|
|
19
|
+
- "in the provided context"
|
|
20
|
+
- "the document says"
|
|
21
|
+
- "based on the information provided"
|
|
22
|
+
- Do not explain your reasoning.
|
|
23
|
+
- Do not repeat the question.
|
|
24
|
+
- Keep the answer concise, clear, and natural.
|
|
25
|
+
- Do not add assumptions, external knowledge, or details not supported by the context.
|
|
26
|
+
|
|
27
|
+
# FALLBACK
|
|
28
|
+
If the context does not contain enough information to answer the question, reply with exactly:
|
|
29
|
+
<NOANS>
|
|
30
|
+
|
|
31
|
+
# OUTPUT
|
|
32
|
+
Return only the final answer, with no preamble and no meta-commentary.
|
|
@@ -0,0 +1,32 @@
|
|
|
1
|
+
# ROLE
|
|
2
|
+
You are an AI assistant that answers the user's question using only the information contained in the provided context.
|
|
3
|
+
|
|
4
|
+
# LANGUAGE
|
|
5
|
+
Answer in the same language as the user's question.
|
|
6
|
+
|
|
7
|
+
# CONTEXT
|
|
8
|
+
You will receive a context delimited by ######:
|
|
9
|
+
######
|
|
10
|
+
{context}
|
|
11
|
+
######
|
|
12
|
+
|
|
13
|
+
# INSTRUCTIONS
|
|
14
|
+
- Use only the information explicitly contained in the context.
|
|
15
|
+
- Answer the user's question directly, as a human assistant would.
|
|
16
|
+
- Do not mention the context, the document, the source, or the fact that information was provided.
|
|
17
|
+
- Do not say phrases such as:
|
|
18
|
+
- "according to the context"
|
|
19
|
+
- "in the provided context"
|
|
20
|
+
- "the document says"
|
|
21
|
+
- "based on the information provided"
|
|
22
|
+
- Do not explain your reasoning.
|
|
23
|
+
- Do not repeat the question.
|
|
24
|
+
- Keep the answer concise, clear, and natural.
|
|
25
|
+
- Do not add assumptions, external knowledge, or details not supported by the context.
|
|
26
|
+
|
|
27
|
+
# FALLBACK
|
|
28
|
+
If the context does not contain enough information to answer the question, reply with exactly:
|
|
29
|
+
<NOANS>
|
|
30
|
+
|
|
31
|
+
# OUTPUT
|
|
32
|
+
Return only the final answer, with no preamble and no meta-commentary.
|
package/event/messageEvent.js
CHANGED
|
@@ -5,6 +5,7 @@ var Message = require("../models/message");
|
|
|
5
5
|
var Faq_kb = require("../models/faq_kb");
|
|
6
6
|
var MessageConstants = require("../models/messageConstants");
|
|
7
7
|
var message2Event = require("../event/message2Event");
|
|
8
|
+
var requestEvent = require("../event/requestEvent");
|
|
8
9
|
|
|
9
10
|
var cacheUtil = require('../utils/cacheUtil');
|
|
10
11
|
var cacheEnabler = require("../services/cacheEnabler");
|
|
@@ -178,6 +179,29 @@ function populateMessageWithRequest(message, eventPrefix) {
|
|
|
178
179
|
messageEvent.on('message.create.simple', populateMessageCreate);
|
|
179
180
|
messageEvent.on('message.update.simple', populateMessageUpdate);
|
|
180
181
|
|
|
182
|
+
// When the user (lead/requester) sends a message, reopen the conversation if it was pending
|
|
183
|
+
messageEvent.on('message.create.from.requester', function (messageJson) {
|
|
184
|
+
if (!messageJson.request || messageJson.request.workingStatus !== 'pending') return;
|
|
185
|
+
var request_id = messageJson.request.request_id;
|
|
186
|
+
var id_project = messageJson.request.id_project;
|
|
187
|
+
Request.findOneAndUpdate(
|
|
188
|
+
{ request_id: request_id, id_project: id_project },
|
|
189
|
+
{ $set: { workingStatus: 'open' } },
|
|
190
|
+
{ new: true },
|
|
191
|
+
function (err, updatedRequest) {
|
|
192
|
+
if (err) {
|
|
193
|
+
winston.error("Error updating request workingStatus from pending to open", err);
|
|
194
|
+
return;
|
|
195
|
+
}
|
|
196
|
+
if (updatedRequest) {
|
|
197
|
+
winston.debug("Request workingStatus set to open (was pending)", { request_id, id_project });
|
|
198
|
+
requestEvent.emit('request.workingStatus.update', { request: updatedRequest });
|
|
199
|
+
requestEvent.emit('request.update', updatedRequest);
|
|
200
|
+
}
|
|
201
|
+
}
|
|
202
|
+
);
|
|
203
|
+
});
|
|
204
|
+
|
|
181
205
|
|
|
182
206
|
|
|
183
207
|
// // riattiva commentato per performance
|
package/middleware/file-type.js
CHANGED
|
@@ -31,9 +31,10 @@ function areMimeTypesEquivalent(mimeType1, mimeType2) {
|
|
|
31
31
|
'audio/wave': ['audio/wav', 'audio/x-wav', 'audio/vnd.wave'],
|
|
32
32
|
'audio/x-wav': ['audio/wav', 'audio/wave', 'audio/vnd.wave'],
|
|
33
33
|
'audio/vnd.wave': ['audio/wav', 'audio/wave', 'audio/x-wav'],
|
|
34
|
-
'audio/mpeg': ['audio/opus', 'audio/mp3'],
|
|
35
|
-
'audio/mp3': ['audio/mpeg', 'audio/opus'],
|
|
36
|
-
'audio/opus': ['audio/mpeg', 'audio/mp3'],
|
|
34
|
+
'audio/mpeg': ['audio/opus', 'audio/mp3', 'audio/webm'],
|
|
35
|
+
'audio/mp3': ['audio/mpeg', 'audio/opus', 'audio/webm'],
|
|
36
|
+
'audio/opus': ['audio/mpeg', 'audio/mp3', 'audio/webm'],
|
|
37
|
+
'audio/webm': ['audio/mpeg', 'audio/mp3', 'audio/opus'],
|
|
37
38
|
'image/jpeg': ['image/jpg'],
|
|
38
39
|
'image/jpg': ['image/jpeg'],
|
|
39
40
|
'application/x-zip-compressed': ['application/zip'],
|
|
@@ -47,54 +48,126 @@ function areMimeTypesEquivalent(mimeType1, mimeType2) {
|
|
|
47
48
|
return false;
|
|
48
49
|
}
|
|
49
50
|
|
|
51
|
+
// Magic bytes for fallback when file-type throws (e.g. strtok3/token-types Uint8Array vs Buffer)
|
|
52
|
+
const MAGIC_SIGNATURES = {
|
|
53
|
+
'video/webm': [[0x1A, 0x45, 0xDF, 0xA3]], // EBML
|
|
54
|
+
'audio/webm': [[0x1A, 0x45, 0xDF, 0xA3]],
|
|
55
|
+
'audio/mpeg': [[0xFF, 0xFB], [0xFF, 0xFA], [0xFF, 0xF3], [0xFF, 0xF2], [0x49, 0x44, 0x33]], // ID3 or MP3 frame
|
|
56
|
+
'audio/mp3': [[0xFF, 0xFB], [0xFF, 0xFA], [0xFF, 0xF3], [0xFF, 0xF2], [0x49, 0x44, 0x33]],
|
|
57
|
+
'image/png': [[0x89, 0x50, 0x4E, 0x47, 0x0D, 0x0A, 0x1A, 0x0A]],
|
|
58
|
+
'image/jpeg': [[0xFF, 0xD8, 0xFF]],
|
|
59
|
+
'image/gif': [[0x47, 0x49, 0x46, 0x38, 0x37, 0x61], [0x47, 0x49, 0x46, 0x38, 0x39, 0x61]],
|
|
60
|
+
'application/pdf': [[0x25, 0x50, 0x44, 0x46]],
|
|
61
|
+
};
|
|
62
|
+
|
|
63
|
+
function magicMatches(buf, mimetype) {
|
|
64
|
+
const signatures = MAGIC_SIGNATURES[mimetype && mimetype.toLowerCase()];
|
|
65
|
+
if (!signatures) return false;
|
|
66
|
+
for (const sig of signatures) {
|
|
67
|
+
if (buf.length < sig.length) continue;
|
|
68
|
+
let ok = true;
|
|
69
|
+
for (let i = 0; i < sig.length; i++) {
|
|
70
|
+
const b = buf[i] !== undefined ? (buf[i] & 0xFF) : -1;
|
|
71
|
+
if (b !== sig[i]) { ok = false; break; }
|
|
72
|
+
}
|
|
73
|
+
if (ok) return true;
|
|
74
|
+
}
|
|
75
|
+
return false;
|
|
76
|
+
}
|
|
77
|
+
|
|
78
|
+
const BASE64_REGEX = /^[A-Za-z0-9+/]+=*$/;
|
|
79
|
+
|
|
80
|
+
/**
|
|
81
|
+
* Ensures the input is a Node.js Buffer. file-type (and token-types/strtok3) require
|
|
82
|
+
* a Buffer with methods like readUInt8; GridFS or other sources may return
|
|
83
|
+
* Uint8Array, ArrayBuffer, BSON Binary, or (when client sends base64) a string.
|
|
84
|
+
* We always allocate a new Buffer and copy bytes so file-type never receives
|
|
85
|
+
* a buffer-like that loses readUInt8 when sliced (e.g. by strtok3).
|
|
86
|
+
*/
|
|
87
|
+
function ensureBuffer(buffer) {
|
|
88
|
+
if (!buffer) return buffer;
|
|
89
|
+
|
|
90
|
+
// Base64 string (e.g. client sends form body as base64): decode to binary
|
|
91
|
+
if (typeof buffer === 'string' && buffer.length > 0) {
|
|
92
|
+
const trimmed = buffer.replace(/\s/g, '');
|
|
93
|
+
if (BASE64_REGEX.test(trimmed)) {
|
|
94
|
+
return Buffer.from(trimmed, 'base64');
|
|
95
|
+
}
|
|
96
|
+
return Buffer.from(buffer, 'utf8');
|
|
97
|
+
}
|
|
98
|
+
|
|
99
|
+
// Copy into a new Buffer so file-type's internal slices are always real Buffers
|
|
100
|
+
let uint8;
|
|
101
|
+
if (buffer instanceof Uint8Array) {
|
|
102
|
+
uint8 = buffer;
|
|
103
|
+
} else if (buffer instanceof ArrayBuffer) {
|
|
104
|
+
uint8 = new Uint8Array(buffer);
|
|
105
|
+
} else if (buffer && typeof buffer.buffer === 'object' && buffer.buffer instanceof ArrayBuffer) {
|
|
106
|
+
uint8 = new Uint8Array(buffer.buffer, buffer.byteOffset, buffer.byteLength);
|
|
107
|
+
} else if (Buffer.isBuffer(buffer)) {
|
|
108
|
+
uint8 = new Uint8Array(buffer.buffer, buffer.byteOffset, buffer.byteLength);
|
|
109
|
+
} else {
|
|
110
|
+
uint8 = new Uint8Array(Buffer.from(buffer));
|
|
111
|
+
}
|
|
112
|
+
return Buffer.from(uint8);
|
|
113
|
+
}
|
|
114
|
+
|
|
50
115
|
async function verifyFileContent(buffer, mimetype) {
|
|
51
116
|
if (!buffer) throw new Error("No file provided");
|
|
52
117
|
|
|
118
|
+
const buf = ensureBuffer(buffer);
|
|
119
|
+
|
|
120
|
+
let fileType;
|
|
53
121
|
try {
|
|
54
|
-
|
|
122
|
+
fileType = await FileType.fromBuffer(buf);
|
|
123
|
+
} catch (err) {
|
|
124
|
+
// strtok3 uses Uint8Array for numBuffer but token-types expects Buffer.readUInt8 (known compat bug in deps)
|
|
125
|
+
if (err && typeof err.message === 'string' && err.message.includes('readUInt8')) {
|
|
126
|
+
if (mimetype && magicMatches(buf, mimetype)) return true;
|
|
127
|
+
const err2 = new Error(`File content could not be verified. Declared mimetype: ${mimetype}`);
|
|
128
|
+
err2.source = "FileContentVerification";
|
|
129
|
+
throw err2;
|
|
130
|
+
}
|
|
131
|
+
throw err;
|
|
132
|
+
}
|
|
55
133
|
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
|
|
65
|
-
const err = new Error(`File content is not valid text for mimetype: ${mimetype}`);
|
|
66
|
-
err.source = "FileContentVerification";
|
|
67
|
-
throw err;
|
|
68
|
-
}
|
|
69
|
-
} else if (mimetype && mimetype.startsWith('image/svg')) {
|
|
70
|
-
// Handle SVG files (can be image/svg+xml or variants)
|
|
71
|
-
try {
|
|
72
|
-
buffer.toString('utf8');
|
|
73
|
-
return true;
|
|
74
|
-
} catch (e) {
|
|
75
|
-
const err = new Error(`File content is not valid text for mimetype: ${mimetype}`);
|
|
76
|
-
err.source = "FileContentVerification";
|
|
77
|
-
throw err;
|
|
78
|
-
}
|
|
79
|
-
} else {
|
|
80
|
-
// For non-text files, FileType should be able to detect them
|
|
81
|
-
const err = new Error(`File content does not match mimetype. Detected: unknown, provided: ${mimetype}`);
|
|
134
|
+
// If FileType couldn't detect the file type (returns null/undefined)
|
|
135
|
+
if (!fileType) {
|
|
136
|
+
// For text-based MIME types, accept the declared mimetype since FileType can't detect them
|
|
137
|
+
if (mimetype && TEXT_MIME_TYPES.includes(mimetype)) {
|
|
138
|
+
try {
|
|
139
|
+
buf.toString('utf8');
|
|
140
|
+
return true;
|
|
141
|
+
} catch (e) {
|
|
142
|
+
const err = new Error(`File content is not valid text for mimetype: ${mimetype}`);
|
|
82
143
|
err.source = "FileContentVerification";
|
|
83
144
|
throw err;
|
|
84
145
|
}
|
|
85
146
|
}
|
|
86
|
-
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
|
|
147
|
+
if (mimetype && mimetype.startsWith('image/svg')) {
|
|
148
|
+
try {
|
|
149
|
+
buf.toString('utf8');
|
|
150
|
+
return true;
|
|
151
|
+
} catch (e) {
|
|
152
|
+
const err = new Error(`File content is not valid text for mimetype: ${mimetype}`);
|
|
90
153
|
err.source = "FileContentVerification";
|
|
91
154
|
throw err;
|
|
155
|
+
}
|
|
92
156
|
}
|
|
157
|
+
if (mimetype && magicMatches(buf, mimetype)) return true;
|
|
158
|
+
const err = new Error(`File content does not match mimetype. Detected: unknown, provided: ${mimetype}`);
|
|
159
|
+
err.source = "FileContentVerification";
|
|
160
|
+
throw err;
|
|
161
|
+
}
|
|
93
162
|
|
|
94
|
-
|
|
95
|
-
|
|
163
|
+
// If FileType detected a type, it must match the declared mimetype (or be equivalent)
|
|
164
|
+
if (mimetype && !areMimeTypesEquivalent(fileType.mime, mimetype)) {
|
|
165
|
+
const err = new Error(`File content does not match mimetype. Detected: ${fileType.mime}, provided: ${mimetype}`);
|
|
166
|
+
err.source = "FileContentVerification";
|
|
96
167
|
throw err;
|
|
97
168
|
}
|
|
169
|
+
|
|
170
|
+
return true;
|
|
98
171
|
}
|
|
99
172
|
|
|
100
173
|
module.exports = verifyFileContent;
|
package/models/request.js
CHANGED
|
@@ -507,6 +507,7 @@ RequestSchema.index({ id_project: 1, createdAt: -1, status: 1 })
|
|
|
507
507
|
RequestSchema.index({ id_project: 1, preflight: 1, smartAssignment: 1, "snapshot.department.routing": 1, createdAt: 1, status: 1 })
|
|
508
508
|
|
|
509
509
|
RequestSchema.index({ status: 1, hasBot: 1, updatedAt: 1 }) // For closing unresponsive requests
|
|
510
|
+
RequestSchema.index({ status: 1, hasBot: 1, workingStatus: 1, updatedAt: 1 }) // For closing unresponsive requests
|
|
510
511
|
|
|
511
512
|
// Contact search by phone / email
|
|
512
513
|
RequestSchema.index({ id_project: 1, 'contact.phone': 1 });
|
package/package.json
CHANGED
|
@@ -175,6 +175,11 @@ function startWorker() {
|
|
|
175
175
|
winston.info("Data queue", oka)
|
|
176
176
|
});
|
|
177
177
|
|
|
178
|
+
ch.bindQueue(_ok.queue, exchange, "request_workingStatus_update", {}, function(err3, oka) {
|
|
179
|
+
winston.info("Queue bind: "+_ok.queue+ " err: "+err3+ " key: request_workingStatus_update");
|
|
180
|
+
winston.info("Data queue", oka)
|
|
181
|
+
});
|
|
182
|
+
|
|
178
183
|
ch.bindQueue(_ok.queue, exchange, "message_create", {}, function(err3, oka) {
|
|
179
184
|
winston.info("Queue bind: "+_ok.queue+ " err: "+err3+ " key: message_create");
|
|
180
185
|
winston.info("Data queue", oka)
|
|
@@ -286,6 +291,11 @@ function work(msg, cb) {
|
|
|
286
291
|
requestEvent.emit('request.close.extended.queue', JSON.parse(message_string));
|
|
287
292
|
}
|
|
288
293
|
|
|
294
|
+
if (topic === 'request_workingStatus_update') {
|
|
295
|
+
winston.debug("reconnect here topic:" + topic);
|
|
296
|
+
requestEvent.emit('request.workingStatus.update.queue', JSON.parse(message_string));
|
|
297
|
+
}
|
|
298
|
+
|
|
289
299
|
if (topic === 'message_create') {
|
|
290
300
|
winston.debug("reconnect here topic:" + topic);
|
|
291
301
|
// requestEvent.emit('request.create.queue', msg.content);
|
|
@@ -410,6 +420,12 @@ function listen() {
|
|
|
410
420
|
});
|
|
411
421
|
});
|
|
412
422
|
|
|
423
|
+
requestEvent.on('request.workingStatus.update', function(request) {
|
|
424
|
+
setImmediate(() => {
|
|
425
|
+
publish(exchange, "request_workingStatus_update", Buffer.from(JSON.stringify(request)));
|
|
426
|
+
});
|
|
427
|
+
});
|
|
428
|
+
|
|
413
429
|
requestEvent.on('request.snapshot.update', function(data) {
|
|
414
430
|
setImmediate(() => {
|
|
415
431
|
winston.debug("reconnect request.snapshot.update")
|
|
@@ -50,7 +50,7 @@ class Listener {
|
|
|
50
50
|
return winston.warn("Chatbot is not a project_user. Skip update.")
|
|
51
51
|
}
|
|
52
52
|
|
|
53
|
-
return Request.countDocuments({ id_project: id_project, participantsAgents: id_user, status: { $lt: 1000 }, draft: { $in: [null, false] } }, (err, requestsCount) => {
|
|
53
|
+
return Request.countDocuments({ id_project: id_project, participantsAgents: id_user, status: { $lt: 1000 }, draft: { $in: [null, false] }, workingStatus: { $ne: 'pending' } }, (err, requestsCount) => {
|
|
54
54
|
winston.verbose("requestsCount for id_user: ", id_user, "and project: ", id_project, "-->", requestsCount);
|
|
55
55
|
if (err) {
|
|
56
56
|
return winston.error(err);
|
|
@@ -236,8 +236,27 @@ class Listener {
|
|
|
236
236
|
});
|
|
237
237
|
});
|
|
238
238
|
|
|
239
|
-
|
|
240
|
-
|
|
239
|
+
var requestWorkingStatusUpdateKey = 'request.workingStatus.update';
|
|
240
|
+
if (requestEvent.queueEnabled) {
|
|
241
|
+
requestWorkingStatusUpdateKey = 'request.workingStatus.update.queue';
|
|
242
|
+
}
|
|
243
|
+
winston.debug('Route queue requestWorkingStatusUpdateKey: ' + requestWorkingStatusUpdateKey);
|
|
244
|
+
|
|
245
|
+
requestEvent.on(requestWorkingStatusUpdateKey, async (data) => {
|
|
246
|
+
winston.debug('Route queue WorkingStatus Update');
|
|
247
|
+
|
|
248
|
+
var request = data.request;
|
|
249
|
+
var participantIds = (request.participantsAgents && request.participantsAgents.length)
|
|
250
|
+
? request.participantsAgents
|
|
251
|
+
: (request.participatingAgents || []).map(u => u._id || u.id);
|
|
252
|
+
setImmediate(() => {
|
|
253
|
+
participantIds.forEach(id_user => {
|
|
254
|
+
if (id_user && !String(id_user).startsWith('bot_')) {
|
|
255
|
+
this.updateProjectUser(id_user, request.id_project, 0);
|
|
256
|
+
}
|
|
257
|
+
});
|
|
258
|
+
});
|
|
259
|
+
});
|
|
241
260
|
}
|
|
242
261
|
|
|
243
262
|
}
|
package/routes/filesp.js
CHANGED
|
@@ -124,9 +124,10 @@ function areMimeTypesEquivalent(mimeType1, mimeType2) {
|
|
|
124
124
|
'audio/wave': ['audio/wav', 'audio/x-wav', 'audio/vnd.wave'],
|
|
125
125
|
'audio/x-wav': ['audio/wav', 'audio/wave', 'audio/vnd.wave'],
|
|
126
126
|
'audio/vnd.wave': ['audio/wav', 'audio/wave', 'audio/x-wav'],
|
|
127
|
-
'audio/mpeg': ['audio/opus', 'audio/mp3'],
|
|
128
|
-
'audio/mp3': ['audio/mpeg', 'audio/opus'],
|
|
129
|
-
'audio/opus': ['audio/mpeg', 'audio/mp3'],
|
|
127
|
+
'audio/mpeg': ['audio/opus', 'audio/mp3', 'audio/webm'],
|
|
128
|
+
'audio/mp3': ['audio/mpeg', 'audio/opus', 'audio/webm'],
|
|
129
|
+
'audio/opus': ['audio/mpeg', 'audio/mp3', 'audio/webm'],
|
|
130
|
+
'audio/webm': ['audio/mpeg', 'audio/mp3', 'audio/opus'],
|
|
130
131
|
'image/jpeg': ['image/jpg'],
|
|
131
132
|
'image/jpg': ['image/jpeg'],
|
|
132
133
|
'application/x-zip-compressed': ['application/zip'],
|