@tiledesk/tiledesk-server 2.16.0 → 2.17.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -5,6 +5,22 @@
5
5
  🚀 IN PRODUCTION 🚀
6
6
  (https://www.npmjs.com/package/@tiledesk/tiledesk-server/v/2.3.77)
7
7
 
8
+ # 2.17.2
9
+ - Added support for situated context in kb route
10
+ - Added RAG context management to KB routes
11
+ - Added support for scrape type 0 (alias: trafilatura)
12
+
13
+ # 2.16.2
14
+ - Improved multiplier retrieval for model types in quotes route
15
+
16
+ # 2.16.1
17
+ - Added stream option support to the KB /qa endpoint for real-time responses
18
+ - Enhanced file upload route to correctly handle .webm files
19
+ - Optimized token consumption and management in knowledge base operations
20
+
21
+ # 2.16.0-hf
22
+ - Fixed bug: issue on audio sent from widget
23
+
8
24
  # 2.16.0
9
25
  - Added possibility to update Knowledge Base content
10
26
  - Added rated only filter in Conversations History
@@ -0,0 +1,57 @@
1
+ const fs = require('fs');
2
+ const path = require('path');
3
+
4
+ const modelMap = {
5
+ "gpt-3.5-turbo": "gpt-3.5.txt",
6
+ "gpt-4": "gpt-4.txt",
7
+ "gpt-4-turbo-preview": "gpt-4.txt",
8
+ "gpt-4o": "gpt-4o.txt",
9
+ "gpt-4o-mini": "gpt-4o.txt",
10
+ "gpt-4.1": "gpt-4.1.txt",
11
+ "gpt-4.1-mini": "gpt-4.1.txt",
12
+ "gpt-4.1-nano": "gpt-4.1.txt",
13
+ "gpt-5": "gpt-5.txt",
14
+ "gpt-5-mini": "gpt-5.txt",
15
+ "gpt-5-nano": "gpt-5.txt",
16
+ "gpt-5.1": "gpt-5.x.txt",
17
+ "gpt-5.2": "gpt-5.x.txt",
18
+ "gpt-5.3-chat-latest": "gpt-5.x.txt",
19
+ "gpt-5.4": "gpt-5.x.txt",
20
+ "gpt-5.4-mini": "gpt-5.x.txt",
21
+ "gpt-5.4-nano": "gpt-5.x.txt",
22
+ "general": "general.txt"
23
+ }
24
+
25
+
26
+ class PromptManager {
27
+
28
+ constructor(basePath) {
29
+ this.basePath = basePath;
30
+ this.cache = new Map();
31
+ }
32
+
33
+ getPrompt(name) {
34
+ if (this.cache.has(name)) {
35
+ return this.cache.get(name);
36
+ }
37
+
38
+ const fileName = modelMap[name] || modelMap["general"];
39
+ const filePath = path.join(this.basePath, fileName);
40
+
41
+ let content;
42
+ try {
43
+ content = fs.readFileSync(filePath, 'utf-8');
44
+ } catch (err) {
45
+ content = fs.readFileSync(
46
+ path.join(this.basePath, modelMap["general"]),
47
+ 'utf-8'
48
+ );
49
+ }
50
+
51
+ this.cache.set(name, content);
52
+ return content;
53
+ }
54
+ }
55
+
56
+ PromptManager.modelMap = modelMap;
57
+ module.exports = PromptManager;
@@ -0,0 +1,9 @@
1
+ You are an helpful assistant for question-answering tasks. Follow these steps carefully:
2
+
3
+ 1. Answer in the same language of the user question, regardless of the retrieved context language
4
+ 2. Use ONLY the pieces of the retrieved context and the chat history to answer the question.
5
+ 3. If the retrieved context does not contain sufficient information to generate an accurate and informative answer, append <NOANS> at the end of the answer.
6
+
7
+ ==Retrieved context start==
8
+ {context}
9
+ ==Retrieved context end==
@@ -0,0 +1,9 @@
1
+ You are an helpful assistant for question-answering tasks.
2
+
3
+ Use ONLY the pieces of retrieved context delimited by #### and the chat history to answer the question.
4
+
5
+ If you don't know the answer, just say: "I don't know<NOANS>"
6
+
7
+ ####
8
+ {context}
9
+ ####
@@ -0,0 +1,9 @@
1
+ You are an helpful assistant for question-answering tasks. Follow these steps carefully:
2
+
3
+ 1. Answer in the same language of the user question, regardless of the retrieved context language
4
+ 2. Use ONLY the pieces of the retrieved context and the chat history to answer the question.
5
+ 3. If the retrieved context does not contain sufficient information to generate an accurate and informative answer, append <NOANS> at the end of the answer
6
+
7
+ ==Retrieved context start==
8
+ {context}
9
+ ==Retrieved context end==
@@ -0,0 +1,11 @@
1
+ You are an helpful assistant for question-answering tasks.
2
+
3
+ Use ONLY the pieces of retrieved context delimited by #### and the chat history to answer the question.
4
+
5
+ If you don't know the answer, just say that you don't know.
6
+
7
+ If and only if none of the retrieved context is useful for your task, add this word to the end <NOANS>
8
+
9
+ ####
10
+ {context}
11
+ ####
@@ -0,0 +1,9 @@
1
+ You are an helpful assistant for question-answering tasks. Follow these steps carefully:
2
+
3
+ 1. Answer in the same language of the user question, regardless of the retrieved context language
4
+ 2. Use ONLY the pieces of the retrieved context and the chat history to answer the question.
5
+ 3. If the retrieved context does not contain sufficient information to generate an accurate and informative answer, return <NOANS>.
6
+
7
+ ==Retrieved context start==
8
+ {context}
9
+ ==Retrieved context end==
@@ -0,0 +1,32 @@
1
+ # ROLE
2
+ You are an AI assistant that answers the user's question using only the information contained in the provided context.
3
+
4
+ # LANGUAGE
5
+ Answer in the same language as the user's question.
6
+
7
+ # CONTEXT
8
+ You will receive a context delimited by ######:
9
+ ######
10
+ {context}
11
+ ######
12
+
13
+ # INSTRUCTIONS
14
+ - Use only the information explicitly contained in the context.
15
+ - Answer the user's question directly, as a human assistant would.
16
+ - Do not mention the context, the document, the source, or the fact that information was provided.
17
+ - Do not say phrases such as:
18
+ - "according to the context"
19
+ - "in the provided context"
20
+ - "the document says"
21
+ - "based on the information provided"
22
+ - Do not explain your reasoning.
23
+ - Do not repeat the question.
24
+ - Keep the answer concise, clear, and natural.
25
+ - Do not add assumptions, external knowledge, or details not supported by the context.
26
+
27
+ # FALLBACK
28
+ If the context does not contain enough information to answer the question, reply with exactly:
29
+ <NOANS>
30
+
31
+ # OUTPUT
32
+ Return only the final answer, with no preamble and no meta-commentary.
@@ -0,0 +1,32 @@
1
+ # ROLE
2
+ You are an AI assistant that answers the user's question using only the information contained in the provided context.
3
+
4
+ # LANGUAGE
5
+ Answer in the same language as the user's question.
6
+
7
+ # CONTEXT
8
+ You will receive a context delimited by ######:
9
+ ######
10
+ {context}
11
+ ######
12
+
13
+ # INSTRUCTIONS
14
+ - Use only the information explicitly contained in the context.
15
+ - Answer the user's question directly, as a human assistant would.
16
+ - Do not mention the context, the document, the source, or the fact that information was provided.
17
+ - Do not say phrases such as:
18
+ - "according to the context"
19
+ - "in the provided context"
20
+ - "the document says"
21
+ - "based on the information provided"
22
+ - Do not explain your reasoning.
23
+ - Do not repeat the question.
24
+ - Keep the answer concise, clear, and natural.
25
+ - Do not add assumptions, external knowledge, or details not supported by the context.
26
+
27
+ # FALLBACK
28
+ If the context does not contain enough information to answer the question, reply with exactly:
29
+ <NOANS>
30
+
31
+ # OUTPUT
32
+ Return only the final answer, with no preamble and no meta-commentary.
@@ -0,0 +1,6 @@
1
+ module.exports = {
2
+ enable: process.env.SITUATED_CONTEXT_ENABLE === "true",
3
+ provider: process.env.SITUATED_CONTEXT_PROVIDER || "openai",
4
+ model: process.env.SITUATED_CONTEXT_MODEL || "gpt-4o",
5
+ api_key: ""
6
+ }
@@ -31,9 +31,10 @@ function areMimeTypesEquivalent(mimeType1, mimeType2) {
31
31
  'audio/wave': ['audio/wav', 'audio/x-wav', 'audio/vnd.wave'],
32
32
  'audio/x-wav': ['audio/wav', 'audio/wave', 'audio/vnd.wave'],
33
33
  'audio/vnd.wave': ['audio/wav', 'audio/wave', 'audio/x-wav'],
34
- 'audio/mpeg': ['audio/opus', 'audio/mp3'],
35
- 'audio/mp3': ['audio/mpeg', 'audio/opus'],
36
- 'audio/opus': ['audio/mpeg', 'audio/mp3'],
34
+ 'audio/mpeg': ['audio/opus', 'audio/mp3', 'audio/webm'],
35
+ 'audio/mp3': ['audio/mpeg', 'audio/opus', 'audio/webm'],
36
+ 'audio/opus': ['audio/mpeg', 'audio/mp3', 'audio/webm'],
37
+ 'audio/webm': ['audio/mpeg', 'audio/mp3', 'audio/opus'],
37
38
  'image/jpeg': ['image/jpg'],
38
39
  'image/jpg': ['image/jpeg'],
39
40
  'application/x-zip-compressed': ['application/zip'],
@@ -47,54 +48,126 @@ function areMimeTypesEquivalent(mimeType1, mimeType2) {
47
48
  return false;
48
49
  }
49
50
 
51
+ // Magic bytes for fallback when file-type throws (e.g. strtok3/token-types Uint8Array vs Buffer)
52
+ const MAGIC_SIGNATURES = {
53
+ 'video/webm': [[0x1A, 0x45, 0xDF, 0xA3]], // EBML
54
+ 'audio/webm': [[0x1A, 0x45, 0xDF, 0xA3]],
55
+ 'audio/mpeg': [[0xFF, 0xFB], [0xFF, 0xFA], [0xFF, 0xF3], [0xFF, 0xF2], [0x49, 0x44, 0x33]], // ID3 or MP3 frame
56
+ 'audio/mp3': [[0xFF, 0xFB], [0xFF, 0xFA], [0xFF, 0xF3], [0xFF, 0xF2], [0x49, 0x44, 0x33]],
57
+ 'image/png': [[0x89, 0x50, 0x4E, 0x47, 0x0D, 0x0A, 0x1A, 0x0A]],
58
+ 'image/jpeg': [[0xFF, 0xD8, 0xFF]],
59
+ 'image/gif': [[0x47, 0x49, 0x46, 0x38, 0x37, 0x61], [0x47, 0x49, 0x46, 0x38, 0x39, 0x61]],
60
+ 'application/pdf': [[0x25, 0x50, 0x44, 0x46]],
61
+ };
62
+
63
+ function magicMatches(buf, mimetype) {
64
+ const signatures = MAGIC_SIGNATURES[mimetype && mimetype.toLowerCase()];
65
+ if (!signatures) return false;
66
+ for (const sig of signatures) {
67
+ if (buf.length < sig.length) continue;
68
+ let ok = true;
69
+ for (let i = 0; i < sig.length; i++) {
70
+ const b = buf[i] !== undefined ? (buf[i] & 0xFF) : -1;
71
+ if (b !== sig[i]) { ok = false; break; }
72
+ }
73
+ if (ok) return true;
74
+ }
75
+ return false;
76
+ }
77
+
78
+ const BASE64_REGEX = /^[A-Za-z0-9+/]+=*$/;
79
+
80
+ /**
81
+ * Ensures the input is a Node.js Buffer. file-type (and token-types/strtok3) require
82
+ * a Buffer with methods like readUInt8; GridFS or other sources may return
83
+ * Uint8Array, ArrayBuffer, BSON Binary, or (when client sends base64) a string.
84
+ * We always allocate a new Buffer and copy bytes so file-type never receives
85
+ * a buffer-like that loses readUInt8 when sliced (e.g. by strtok3).
86
+ */
87
+ function ensureBuffer(buffer) {
88
+ if (!buffer) return buffer;
89
+
90
+ // Base64 string (e.g. client sends form body as base64): decode to binary
91
+ if (typeof buffer === 'string' && buffer.length > 0) {
92
+ const trimmed = buffer.replace(/\s/g, '');
93
+ if (BASE64_REGEX.test(trimmed)) {
94
+ return Buffer.from(trimmed, 'base64');
95
+ }
96
+ return Buffer.from(buffer, 'utf8');
97
+ }
98
+
99
+ // Copy into a new Buffer so file-type's internal slices are always real Buffers
100
+ let uint8;
101
+ if (buffer instanceof Uint8Array) {
102
+ uint8 = buffer;
103
+ } else if (buffer instanceof ArrayBuffer) {
104
+ uint8 = new Uint8Array(buffer);
105
+ } else if (buffer && typeof buffer.buffer === 'object' && buffer.buffer instanceof ArrayBuffer) {
106
+ uint8 = new Uint8Array(buffer.buffer, buffer.byteOffset, buffer.byteLength);
107
+ } else if (Buffer.isBuffer(buffer)) {
108
+ uint8 = new Uint8Array(buffer.buffer, buffer.byteOffset, buffer.byteLength);
109
+ } else {
110
+ uint8 = new Uint8Array(Buffer.from(buffer));
111
+ }
112
+ return Buffer.from(uint8);
113
+ }
114
+
50
115
  async function verifyFileContent(buffer, mimetype) {
51
116
  if (!buffer) throw new Error("No file provided");
52
117
 
118
+ const buf = ensureBuffer(buffer);
119
+
120
+ let fileType;
53
121
  try {
54
- const fileType = await FileType.fromBuffer(buffer);
122
+ fileType = await FileType.fromBuffer(buf);
123
+ } catch (err) {
124
+ // strtok3 uses Uint8Array for numBuffer but token-types expects Buffer.readUInt8 (known compat bug in deps)
125
+ if (err && typeof err.message === 'string' && err.message.includes('readUInt8')) {
126
+ if (mimetype && magicMatches(buf, mimetype)) return true;
127
+ const err2 = new Error(`File content could not be verified. Declared mimetype: ${mimetype}`);
128
+ err2.source = "FileContentVerification";
129
+ throw err2;
130
+ }
131
+ throw err;
132
+ }
55
133
 
56
- // If FileType couldn't detect the file type (returns null/undefined)
57
- if (!fileType) {
58
- // For text-based MIME types, accept the declared mimetype since FileType can't detect them
59
- if (mimetype && TEXT_MIME_TYPES.includes(mimetype)) {
60
- // Optionally verify that the content is valid UTF-8 text
61
- try {
62
- buffer.toString('utf8');
63
- return true;
64
- } catch (e) {
65
- const err = new Error(`File content is not valid text for mimetype: ${mimetype}`);
66
- err.source = "FileContentVerification";
67
- throw err;
68
- }
69
- } else if (mimetype && mimetype.startsWith('image/svg')) {
70
- // Handle SVG files (can be image/svg+xml or variants)
71
- try {
72
- buffer.toString('utf8');
73
- return true;
74
- } catch (e) {
75
- const err = new Error(`File content is not valid text for mimetype: ${mimetype}`);
76
- err.source = "FileContentVerification";
77
- throw err;
78
- }
79
- } else {
80
- // For non-text files, FileType should be able to detect them
81
- const err = new Error(`File content does not match mimetype. Detected: unknown, provided: ${mimetype}`);
134
+ // If FileType couldn't detect the file type (returns null/undefined)
135
+ if (!fileType) {
136
+ // For text-based MIME types, accept the declared mimetype since FileType can't detect them
137
+ if (mimetype && TEXT_MIME_TYPES.includes(mimetype)) {
138
+ try {
139
+ buf.toString('utf8');
140
+ return true;
141
+ } catch (e) {
142
+ const err = new Error(`File content is not valid text for mimetype: ${mimetype}`);
82
143
  err.source = "FileContentVerification";
83
144
  throw err;
84
145
  }
85
146
  }
86
-
87
- // If FileType detected a type, it must match the declared mimetype (or be equivalent)
88
- if (mimetype && !areMimeTypesEquivalent(fileType.mime, mimetype)) {
89
- const err = new Error(`File content does not match mimetype. Detected: ${fileType.mime}, provided: ${mimetype}`);
147
+ if (mimetype && mimetype.startsWith('image/svg')) {
148
+ try {
149
+ buf.toString('utf8');
150
+ return true;
151
+ } catch (e) {
152
+ const err = new Error(`File content is not valid text for mimetype: ${mimetype}`);
90
153
  err.source = "FileContentVerification";
91
154
  throw err;
155
+ }
92
156
  }
157
+ if (mimetype && magicMatches(buf, mimetype)) return true;
158
+ const err = new Error(`File content does not match mimetype. Detected: unknown, provided: ${mimetype}`);
159
+ err.source = "FileContentVerification";
160
+ throw err;
161
+ }
93
162
 
94
- return true;
95
- } catch (err) {
163
+ // If FileType detected a type, it must match the declared mimetype (or be equivalent)
164
+ if (mimetype && !areMimeTypesEquivalent(fileType.mime, mimetype)) {
165
+ const err = new Error(`File content does not match mimetype. Detected: ${fileType.mime}, provided: ${mimetype}`);
166
+ err.source = "FileContentVerification";
96
167
  throw err;
97
168
  }
169
+
170
+ return true;
98
171
  }
99
172
 
100
173
  module.exports = verifyFileContent;
package/package.json CHANGED
@@ -1,7 +1,7 @@
1
1
  {
2
2
  "name": "@tiledesk/tiledesk-server",
3
3
  "description": "The Tiledesk server module",
4
- "version": "2.16.0",
4
+ "version": "2.17.2",
5
5
  "scripts": {
6
6
  "start": "node ./bin/www",
7
7
  "pretest": "mongodb-runner start",
package/routes/filesp.js CHANGED
@@ -124,9 +124,10 @@ function areMimeTypesEquivalent(mimeType1, mimeType2) {
124
124
  'audio/wave': ['audio/wav', 'audio/x-wav', 'audio/vnd.wave'],
125
125
  'audio/x-wav': ['audio/wav', 'audio/wave', 'audio/vnd.wave'],
126
126
  'audio/vnd.wave': ['audio/wav', 'audio/wave', 'audio/x-wav'],
127
- 'audio/mpeg': ['audio/opus', 'audio/mp3'],
128
- 'audio/mp3': ['audio/mpeg', 'audio/opus'],
129
- 'audio/opus': ['audio/mpeg', 'audio/mp3'],
127
+ 'audio/mpeg': ['audio/opus', 'audio/mp3', 'audio/webm'],
128
+ 'audio/mp3': ['audio/mpeg', 'audio/opus', 'audio/webm'],
129
+ 'audio/opus': ['audio/mpeg', 'audio/mp3', 'audio/webm'],
130
+ 'audio/webm': ['audio/mpeg', 'audio/mp3', 'audio/opus'],
130
131
  'image/jpeg': ['image/jpg'],
131
132
  'image/jpg': ['image/jpeg'],
132
133
  'application/x-zip-compressed': ['application/zip'],
package/routes/kb.js CHANGED
@@ -80,6 +80,37 @@ let default_preview_settings = {
80
80
  const default_engine = require('../config/kb/engine');
81
81
  const default_engine_hybrid = require('../config/kb/engine.hybrid');
82
82
  const default_embedding = require('../config/kb/embedding');
83
+ const PromptManager = require('../config/kb/prompt/rag/PromptManager');
84
+ const situatedContext = require('../config/kb/situatedContext');
85
+
86
+ const ragPromptManager = new PromptManager(path.join(__dirname, '../config/kb/prompt/rag'));
87
+
88
+ const RAG_CONTEXT_ENV_OVERRIDES = {
89
+ "gpt-3.5-turbo": process.env.GPT_3_5_CONTEXT,
90
+ "gpt-4": process.env.GPT_4_CONTEXT,
91
+ "gpt-4-turbo-preview": process.env.GPT_4T_CONTEXT,
92
+ "gpt-4o": process.env.GPT_4O_CONTEXT,
93
+ "gpt-4o-mini": process.env.GPT_4O_MINI_CONTEXT,
94
+ "gpt-4.1": process.env.GPT_4_1_CONTEXT,
95
+ "gpt-4.1-mini": process.env.GPT_4_1_MINI_CONTEXT,
96
+ "gpt-4.1-nano": process.env.GPT_4_1_NANO_CONTEXT,
97
+ "gpt-5": process.env.GPT_5_CONTEXT,
98
+ "gpt-5-mini": process.env.GPT_5_MINI_CONTEXT,
99
+ "gpt-5-nano": process.env.GPT_5_NANO_CONTEXT,
100
+ "general": process.env.GENERAL_CONTEXT
101
+ };
102
+
103
+ /** RAG system prompt per modello: file in config/kb/prompt/rag, sovrascrivibili via env (come prima). */
104
+ function getRagContextTemplate(modelName) {
105
+ const envOverride = RAG_CONTEXT_ENV_OVERRIDES[modelName];
106
+ if (envOverride) {
107
+ return envOverride;
108
+ }
109
+ if (!PromptManager.modelMap[modelName] && process.env.GENERAL_CONTEXT) {
110
+ return process.env.GENERAL_CONTEXT;
111
+ }
112
+ return ragPromptManager.getPrompt(modelName);
113
+ }
83
114
 
84
115
  function normalizeEmbedding(embedding) {
85
116
  const normalizedEmbedding = (embedding && typeof embedding.toObject === 'function')
@@ -88,19 +119,13 @@ function normalizeEmbedding(embedding) {
88
119
  return { ...normalizedEmbedding };
89
120
  }
90
121
 
91
- let contexts = {
92
- "gpt-3.5-turbo": process.env.GPT_3_5_CONTEXT || "You are an helpful assistant for question-answering tasks.\nUse ONLY the pieces of retrieved context delimited by #### and the chat history to answer the question.\nIf you don't know the answer, just say: \"I don't know<NOANS>\"\n\n####{context}####",
93
- "gpt-4": process.env.GPT_4_CONTEXT || "You are an helpful assistant for question-answering tasks.\nUse ONLY the pieces of retrieved context delimited by #### and the chat history to answer the question.\nIf you don't know the answer, just say that you don't know.\nIf and only if none of the retrieved context is useful for your task, add this word to the end <NOANS>\n\n####{context}####",
94
- "gpt-4-turbo-preview": process.env.GPT_4T_CONTEXT || "You are an helpful assistant for question-answering tasks.\nUse ONLY the pieces of retrieved context delimited by #### and the chat history to answer the question.\nIf you don't know the answer, just say that you don't know.\nIf and only if none of the retrieved context is useful for your task, add this word to the end <NOANS>\n\n####{context}####",
95
- "gpt-4o": process.env.GPT_4O_CONTEXT || "You are an helpful assistant for question-answering tasks. Follow these steps carefully:\n1. Answer in the same language of the user question, regardless of the retrieved context language\n2. Use ONLY the pieces of the retrieved context and the chat history to answer the question.\n3. If the retrieved context does not contain sufficient information to generate an accurate and informative answer, return <NOANS>\n\n==Retrieved context start==\n{context}\n==Retrieved context end==",
96
- "gpt-4o-mini": process.env.GPT_4O_MINI_CONTEXT || "You are an helpful assistant for question-answering tasks. Follow these steps carefully:\n1. Answer in the same language of the user question, regardless of the retrieved context language\n2. Use ONLY the pieces of the retrieved context and the chat history to answer the question.\n3. If the retrieved context does not contain sufficient information to generate an accurate and informative answer, return <NOANS>\n\n==Retrieved context start==\n{context}\n==Retrieved context end==",
97
- "gpt-4.1": process.env.GPT_4_1_CONTEXT || "You are an helpful assistant for question-answering tasks. Follow these steps carefully:\n1. Answer in the same language of the user question, regardless of the retrieved context language\n2. Use ONLY the pieces of the retrieved context and the chat history to answer the question.\n3. If the retrieved context does not contain sufficient information to generate an accurate and informative answer, append <NOANS> at the end of the answer\n\n==Retrieved context start==\n{context}\n==Retrieved context end==",
98
- "gpt-4.1-mini": process.env.GPT_4_1_MINI_CONTEXT || "You are an helpful assistant for question-answering tasks. Follow these steps carefully:\n1. Answer in the same language of the user question, regardless of the retrieved context language\n2. Use ONLY the pieces of the retrieved context and the chat history to answer the question.\n3. If the retrieved context does not contain sufficient information to generate an accurate and informative answer, append <NOANS> at the end of the answer\n\n==Retrieved context start==\n{context}\n==Retrieved context end==",
99
- "gpt-4.1-nano": process.env.GPT_4_1_NANO_CONTEXT || "You are an helpful assistant for question-answering tasks. Follow these steps carefully:\n1. Answer in the same language of the user question, regardless of the retrieved context language\n2. Use ONLY the pieces of the retrieved context and the chat history to answer the question.\n3. If the retrieved context does not contain sufficient information to generate an accurate and informative answer, append <NOANS> at the end of the answer\n\n==Retrieved context start==\n{context}\n==Retrieved context end==",
100
- "gpt-5": process.env.GPT_5_CONTEXT || "You are an helpful assistant for question-answering tasks. Follow these steps carefully:\n1. Answer in the same language of the user question, regardless of the retrieved context language\n2. Use ONLY the pieces of the retrieved context and the chat history to answer the question.\n3. If the retrieved context does not contain sufficient information to generate an accurate and informative answer, append <NOANS> at the end of the answer\n\n==Retrieved context start==\n{context}\n==Retrieved context end==",
101
- "gpt-5-mini": process.env.GPT_5_MINI_CONTEXT || "You are an helpful assistant for question-answering tasks. Follow these steps carefully:\n1. Answer in the same language of the user question, regardless of the retrieved context language\n2. Use ONLY the pieces of the retrieved context and the chat history to answer the question.\n3. If the retrieved context does not contain sufficient information to generate an accurate and informative answer, append <NOANS> at the end of the answer\n\n==Retrieved context start==\n{context}\n==Retrieved context end==",
102
- "gpt-5-nano": process.env.GPT_5_NANO_CONTEXT || "You are an helpful assistant for question-answering tasks. Follow these steps carefully:\n1. Answer in the same language of the user question, regardless of the retrieved context language\n2. Use ONLY the pieces of the retrieved context and the chat history to answer the question.\n3. If the retrieved context does not contain sufficient information to generate an accurate and informative answer, append <NOANS> at the end of the answer\n\n==Retrieved context start==\n{context}\n==Retrieved context end==",
103
- "general": process.env.GENERAL_CONTEXT || "You are an helpful assistant for question-answering tasks. Follow these steps carefully:\n1. Answer in the same language of the user question, regardless of the retrieved context language\n2. Use ONLY the pieces of the retrieved context and the chat history to answer the question.\n3. If the retrieved context does not contain sufficient information to generate an accurate and informative answer, append <NOANS> at the end of the answer\n\n==Retrieved context start==\n{context}\n==Retrieved context end=="
122
+ function normalizeSituatedContext() {
123
+ return situatedContext.enable
124
+ ? {
125
+ ...situatedContext,
126
+ api_key: process.env.SITUATED_CONTEXT_API_KEY || process.env.GPTKEY
127
+ }
128
+ : undefined;
104
129
  }
105
130
 
106
131
  /**
@@ -236,6 +261,11 @@ router.post('/scrape/single', async (req, res) => {
236
261
  json.hybrid = true;
237
262
  }
238
263
 
264
+ const situated_context = normalizeSituatedContext();
265
+ if (situated_context) {
266
+ json.situated_context = situated_context;
267
+ }
268
+
239
269
  winston.verbose("/scrape/single json: ", json);
240
270
 
241
271
  if (process.env.NODE_ENV === "test") {
@@ -361,7 +391,7 @@ router.post('/qa', async (req, res) => {
361
391
 
362
392
  // Check if "Advanced Mode" is active. In such case the default_context must be not appended
363
393
  if (!data.advancedPrompt) {
364
- const contextTemplate = contexts[data.model.name] || contexts["general"];
394
+ const contextTemplate = getRagContextTemplate(data.model.name);
365
395
  if (data.system_context) {
366
396
  data.system_context = data.system_context + " \n" + contextTemplate;
367
397
  } else {
@@ -393,7 +423,7 @@ router.post('/qa', async (req, res) => {
393
423
  }
394
424
  }
395
425
 
396
- data.stream = false;
426
+ data.stream = data.stream === true;
397
427
  data.debug = true;
398
428
  delete data.advancedPrompt;
399
429
  winston.verbose("ask data: ", data);
@@ -402,16 +432,163 @@ router.post('/qa', async (req, res) => {
402
432
  return res.status(200).send({ success: true, message: "Question skipped in test environment", data: data });
403
433
  }
404
434
 
435
+ if (data.stream === true) {
436
+ // Streaming SSE: use askStream and forward only content as JSON SSE events
437
+ res.status(200);
438
+ res.setHeader('Content-Type', 'text/event-stream');
439
+ res.setHeader('Cache-Control', 'no-cache');
440
+ res.setHeader('Connection', 'keep-alive');
441
+ res.setHeader('Access-Control-Allow-Origin', '*');
442
+
443
+ const sendError = (message) => {
444
+ try {
445
+ res.write('data: ' + JSON.stringify({ error: message }) + '\n\n');
446
+ } catch (_) {}
447
+ res.end();
448
+ };
449
+
450
+ function extractContent(obj) {
451
+ if (obj.content != null) return obj.content;
452
+ if (obj.choices && obj.choices[0]) {
453
+ const c = obj.choices[0];
454
+ if (c.delta && c.delta.content != null) return c.delta.content;
455
+ if (c.message && c.message.content != null) return c.message.content;
456
+ }
457
+ return null;
458
+ }
459
+
460
+ /** Same JSON shape as non-stream /qa: stream may wrap it in model_used */
461
+ function normalizeKbQaPayload(obj) {
462
+ if (obj && typeof obj === 'object' && obj.model_used != null && typeof obj.model_used === 'object') {
463
+ return obj.model_used;
464
+ }
465
+ return obj;
466
+ }
467
+
468
+ /** Flat final payload like non-stream /qa (answer, prompt_token_size, …) */
469
+ function isMetadataPayload(obj, streamedContent) {
470
+ if (obj == null || typeof obj !== 'object') return false;
471
+ if (streamedContent != null && streamedContent !== '') return false;
472
+ if (typeof obj.prompt_token_size === 'number') return true;
473
+ if (obj.answer != null) return true;
474
+ if (obj.sources != null) return true;
475
+ if (obj.chunks != null) return true;
476
+ if (obj.content_chunks != null) return true;
477
+ return false;
478
+ }
479
+
480
+ /** KB stream summary: full_response + model_used (same info as non-stream body, plus envelope) */
481
+ function isKbStreamCompletedSummary(obj) {
482
+ if (obj == null || typeof obj !== 'object') return false;
483
+ if (obj.status === 'completed') return true;
484
+ if (obj.full_response != null && obj.model_used != null && typeof obj.model_used === 'object') return true;
485
+ return false;
486
+ }
487
+
488
+ function forwardSsePayload(payload) {
489
+ if (payload === '[DONE]') return;
490
+ let obj;
491
+ try {
492
+ obj = JSON.parse(payload);
493
+ } catch (_) {
494
+ return;
495
+ }
496
+
497
+ if (obj.status === 'started') {
498
+ return;
499
+ }
500
+ if (isKbStreamCompletedSummary(obj)) {
501
+ res.write('data: ' + JSON.stringify(normalizeKbQaPayload(obj)) + '\n\n');
502
+ return;
503
+ }
504
+
505
+ if (obj.type === 'metadata' || obj.event === 'metadata') {
506
+ res.write('data: ' + JSON.stringify(normalizeKbQaPayload(obj)) + '\n\n');
507
+ return;
508
+ }
509
+ const content = extractContent(obj);
510
+ if (content != null && content !== '') {
511
+ res.write('data: ' + JSON.stringify({ content }) + '\n\n');
512
+ return;
513
+ }
514
+ const normalized = normalizeKbQaPayload(obj);
515
+ if (isMetadataPayload(normalized, content)) {
516
+ res.write('data: ' + JSON.stringify(normalized) + '\n\n');
517
+ }
518
+ }
519
+
520
+ aiService.askStream(data).then((resp) => {
521
+ const stream = resp.data;
522
+ let buffer = '';
523
+
524
+ stream.on('data', (chunk) => {
525
+ buffer += chunk.toString();
526
+ const lines = buffer.split('\n');
527
+ buffer = lines.pop() || '';
528
+
529
+ for (const line of lines) {
530
+ const trimmed = line.trim();
531
+ if (!trimmed.startsWith('data: ')) continue;
532
+ const payload = trimmed.slice(6);
533
+ forwardSsePayload(payload);
534
+ }
535
+ });
536
+
537
+ stream.on('end', () => {
538
+ const tail = buffer.trim();
539
+ if (tail) {
540
+ for (const line of tail.split('\n')) {
541
+ const trimmed = line.trim();
542
+ if (!trimmed.startsWith('data: ')) continue;
543
+ forwardSsePayload(trimmed.slice(6));
544
+ }
545
+ }
546
+ res.write('data: [DONE]\n\n');
547
+ res.end();
548
+ });
549
+
550
+ stream.on('error', (err) => {
551
+ winston.error('qa stream err: ', err);
552
+ sendError(err.message || 'Stream error');
553
+ });
554
+
555
+ res.on('close', () => {
556
+ if (!res.writableEnded) {
557
+ stream.destroy();
558
+ }
559
+ });
560
+ }).catch((err) => {
561
+ winston.error('qa err: ', err);
562
+ winston.error('qa err.response: ', err.response);
563
+ const message = (err.response && err.response.data && typeof err.response.data.pipe !== 'function' && err.response.data.detail)
564
+ ? err.response.data.detail
565
+ : (err.response && err.response.statusText) || err.message || String(err);
566
+ if (!res.headersSent) {
567
+ res.status(err.response && err.response.status ? err.response.status : 500);
568
+ }
569
+ sendError(message);
570
+ });
571
+ return;
572
+ }
573
+
405
574
  aiService.askNamespace(data).then((resp) => {
406
575
  winston.debug("qa resp: ", resp.data);
407
576
  let answer = resp.data;
408
577
 
409
578
  if (publicKey === true) {
410
- let multiplier = MODELS_MULTIPLIER[data.model];
579
+ let modelKey;
580
+ if (typeof data.model === 'string') {
581
+ modelKey = data.model;
582
+ } else if (data.model && typeof data.model.name === 'string') {
583
+ modelKey = data.model.name;
584
+ }
585
+
586
+ let multiplier = MODELS_MULTIPLIER[modelKey];
411
587
  if (!multiplier) {
412
588
  multiplier = 1;
413
- winston.info("No multiplier found for AI model (qa) " + data.model);
589
+ winston.info("No multiplier found for AI model (qa) " + modelKey);
414
590
  }
591
+
415
592
  obj.multiplier = multiplier;
416
593
  obj.tokens = answer.prompt_token_size;
417
594
 
@@ -1039,6 +1216,7 @@ router.post('/namespace/import/:id', upload.single('uploadFile'), async (req, re
1039
1216
  let embedding = normalizeEmbedding(ns.embedding);
1040
1217
  embedding.api_key = process.env.EMBEDDING_API_KEY || process.env.GPTKEY;
1041
1218
  let hybrid = ns.hybrid;
1219
+ const situated_context = normalizeSituatedContext();
1042
1220
 
1043
1221
 
1044
1222
  if (process.env.NODE_ENV !== "test") {
@@ -1076,7 +1254,13 @@ router.post('/namespace/import/:id', upload.single('uploadFile'), async (req, re
1076
1254
 
1077
1255
  let resources = new_contents.map(({ name, status, __v, createdAt, updatedAt, id_project, ...keepAttrs }) => keepAttrs)
1078
1256
  resources = resources.map(({ _id, scrape_options, ...rest }) => {
1079
- return { id: _id, parameters_scrape_type_4: scrape_options, embedding: embedding, engine: engine, ...rest}
1257
+ return {
1258
+ id: _id,
1259
+ parameters_scrape_type_4: scrape_options,
1260
+ embedding: embedding,
1261
+ engine: engine,
1262
+ ...(situated_context && { situated_context: situated_context }),
1263
+ ...rest}
1080
1264
  });
1081
1265
 
1082
1266
  winston.verbose("resources to be sent to worker: ", resources);
@@ -1420,13 +1604,14 @@ router.post('/', async (req, res) => {
1420
1604
  }
1421
1605
  if (type === 'url') {
1422
1606
  new_kb.refresh_rate = refresh_rate || 'never';
1423
- if (!scrape_type || scrape_type === 2) {
1424
- new_kb.scrape_type = 2;
1425
- new_kb.scrape_options = aiManager.setDefaultScrapeOptions();
1426
- } else {
1607
+ if (scrape_type === 0 || scrape_type === 4) {
1427
1608
  new_kb.scrape_type = scrape_type;
1428
1609
  new_kb.scrape_options = scrape_options;
1429
1610
  }
1611
+ else {
1612
+ new_kb.scrape_type = 2;
1613
+ new_kb.scrape_options = aiManager.setDefaultScrapeOptions();
1614
+ }
1430
1615
  }
1431
1616
 
1432
1617
  if (tags && Array.isArray(tags) && tags.every(tag => typeof tag === "string")) {
@@ -1451,6 +1636,8 @@ router.post('/', async (req, res) => {
1451
1636
  const embedding = normalizeEmbedding(namespace.embedding);
1452
1637
  embedding.api_key = process.env.EMBEDDING_API_KEY || process.env.GPTKEY;
1453
1638
 
1639
+ const situated_context = normalizeSituatedContext();
1640
+
1454
1641
  const json = {
1455
1642
  id: saved_kb._id,
1456
1643
  type: saved_kb.type,
@@ -1461,6 +1648,7 @@ router.post('/', async (req, res) => {
1461
1648
  hybrid: namespace.hybrid,
1462
1649
  engine: namespace.engine || default_engine,
1463
1650
  embedding: embedding,
1651
+ ...(situated_context && { situated_context: situated_context }),
1464
1652
  ...(saved_kb.scrape_type && { scrape_type: saved_kb.scrape_type }),
1465
1653
  ...(saved_kb.scrape_options && { parameters_scrape_type_4: saved_kb.scrape_options }),
1466
1654
  ...(saved_kb.tags && { tags: saved_kb.tags }),
@@ -1617,10 +1805,18 @@ router.post('/csv', upload.single('uploadFile'), async (req, res) => {
1617
1805
  let embedding = normalizeEmbedding(namespace.embedding);
1618
1806
  embedding.api_key = process.env.EMBEDDING_API_KEY || process.env.GPTKEY;
1619
1807
  let hybrid = namespace.hybrid;
1808
+ const situated_context = normalizeSituatedContext();
1620
1809
 
1621
1810
  let resources = result.map(({ name, status, __v, createdAt, updatedAt, id_project, ...keepAttrs }) => keepAttrs)
1622
1811
  resources = resources.map(({ _id, ...rest}) => {
1623
- return { id: _id, webhook: webhook, embedding: embedding, engine: engine, ...rest };
1812
+ return {
1813
+ id: _id,
1814
+ webhook: webhook,
1815
+ embedding: embedding,
1816
+ engine: engine,
1817
+ ...(situated_context && { situated_context: situated_context }),
1818
+ ...rest
1819
+ };
1624
1820
  })
1625
1821
  winston.verbose("resources to be sent to worker: ", resources);
1626
1822
 
@@ -1856,13 +2052,14 @@ router.put('/:kb_id', async (req, res) => {
1856
2052
 
1857
2053
  if (new_content.type === 'url') {
1858
2054
  new_content.refresh_rate = refresh_rate || 'never';
1859
- if (!scrape_type || scrape_type === 2) {
1860
- new_content.scrape_type = 2;
1861
- new_content.scrape_options = aiManager.setDefaultScrapeOptions();
1862
- } else {
2055
+ if (scrape_type === 0 || scrape_type === 4) {
1863
2056
  new_content.scrape_type = scrape_type;
1864
2057
  new_content.scrape_options = scrape_options;
1865
2058
  }
2059
+ else {
2060
+ new_content.scrape_type = 2;
2061
+ new_content.scrape_options = aiManager.setDefaultScrapeOptions();
2062
+ }
1866
2063
  }
1867
2064
 
1868
2065
  if (kb.sitemap_origin_id) {
@@ -1887,6 +2084,7 @@ router.put('/:kb_id', async (req, res) => {
1887
2084
  const embedding = normalizeEmbedding(namespace.embedding);
1888
2085
  embedding.api_key = process.env.EMBEDDING_API_KEY || process.env.GPTKEY;
1889
2086
  let webhook = apiUrl + '/webhook/kb/status?token=' + KB_WEBHOOK_TOKEN;
2087
+ const situated_context = normalizeSituatedContext();
1890
2088
 
1891
2089
  const json = {
1892
2090
  id: updated_content._id,
@@ -1898,6 +2096,7 @@ router.put('/:kb_id', async (req, res) => {
1898
2096
  hybrid: namespace.hybrid,
1899
2097
  engine: namespace.engine || default_engine,
1900
2098
  embedding: embedding,
2099
+ ...(situated_context && { situated_context: situated_context }),
1901
2100
  ...(updated_content.scrape_type && { scrape_type: updated_content.scrape_type }),
1902
2101
  ...(updated_content.scrape_options && { parameters_scrape_type_4: updated_content.scrape_options }),
1903
2102
  ...(updated_content.tags && { tags: updated_content.tags }),
@@ -1914,40 +2113,6 @@ router.put('/:kb_id', async (req, res) => {
1914
2113
 
1915
2114
  })
1916
2115
 
1917
- // router.put('/:kb_id', async (req, res) => {
1918
-
1919
- // let kb_id = req.params.kb_id;
1920
- // winston.verbose("update kb_id " + kb_id);
1921
-
1922
- // let update = {};
1923
-
1924
- // if (req.body.name != undefined) {
1925
- // update.name = req.body.name;
1926
- // }
1927
-
1928
- // if (req.body.status != undefined) {
1929
- // update.status = req.body.status;
1930
- // }
1931
-
1932
- // winston.debug("kb update: ", update);
1933
-
1934
- // KB.findByIdAndUpdate(kb_id, update, { new: true }, (err, savedKb) => {
1935
-
1936
- // if (err) {
1937
- // winston.error("KB findByIdAndUpdate error: ", err);
1938
- // return res.status(500).send({ success: false, error: err });
1939
- // }
1940
-
1941
- // if (!savedKb) {
1942
- // winston.debug("Try to updating a non-existing kb");
1943
- // return res.status(400).send({ success: false, message: "Content not found" })
1944
- // }
1945
-
1946
- // res.status(200).send(savedKb)
1947
- // })
1948
-
1949
- // })
1950
-
1951
2116
  router.delete('/:kb_id', async (req, res) => {
1952
2117
 
1953
2118
  let project_id = req.projectid;
package/routes/quotes.js CHANGED
@@ -44,10 +44,17 @@ router.post('/incr/:type', async (req, res) => {
44
44
 
45
45
  let quoteManager = req.app.get('quote_manager');
46
46
 
47
- let multiplier = MODELS_MULTIPLIER[data.model];
47
+ let modelKey;
48
+ if (typeof data.model === 'string') {
49
+ modelKey = data.model;
50
+ } else if (data.model && typeof data.model.name === 'string') {
51
+ modelKey = data.model.name;
52
+ }
53
+
54
+ let multiplier = MODELS_MULTIPLIER[modelKey];
48
55
  if (!multiplier) {
49
56
  multiplier = 1;
50
- winston.info("No multiplier found for AI model (incr) " + data.model)
57
+ winston.info("No multiplier found for AI model (incr) " + modelKey)
51
58
  }
52
59
  data.multiplier = multiplier;
53
60
  data.createdAt = new Date();
package/routes/webhook.js CHANGED
@@ -193,6 +193,11 @@ router.post('/kb/reindex', async (req, res) => {
193
193
  embedding.api_key = process.env.EMBEDDING_API_KEY || process.env.GPTKEY;
194
194
  json.embedding = embedding;
195
195
 
196
+ const situated_context = aiManager.normalizeSituatedContext();
197
+ if (situated_context) {
198
+ json.situated_context = situated_context;
199
+ }
200
+
196
201
  let resources = [];
197
202
  resources.push(json);
198
203
 
@@ -20,6 +20,7 @@ const default_engine = require('../config/kb/engine');
20
20
  const default_engine_hybrid = require('../config/kb/engine.hybrid');
21
21
  const default_embedding = require('../config/kb/embedding');
22
22
  const integrationService = require('./integrationService');
23
+ const situatedContext = require('../config/kb/situatedContext');
23
24
 
24
25
  // Job managers
25
26
  let jobManager = new JobManager(AMQP_MANAGER_URL, {
@@ -93,11 +94,22 @@ class AiManager {
93
94
  let engine = namespace.engine || default_engine;
94
95
  let embedding = namespace.embedding || default_embedding;
95
96
  embedding.api_key = process.env.EMBEDDING_API_KEY || process.env.GPTKEY;
97
+
98
+ let situated_context = this.normalizeSituatedContext();
99
+
96
100
  let webhook = apiUrl + '/webhook/kb/status?token=' + KB_WEBHOOK_TOKEN;
97
101
 
98
102
  let resources = result.map(({ name, status, __v, createdAt, updatedAt, id_project, ...keepAttrs }) => keepAttrs)
99
103
  resources = resources.map(({ _id, scrape_options, ...rest }) => {
100
- return { id: _id, webhook: webhook, parameters_scrape_type_4: scrape_options, embedding: embedding, engine: engine, hybrid: hybrid, ...rest}
104
+ return {
105
+ id: _id,
106
+ webhook: webhook,
107
+ parameters_scrape_type_4: scrape_options,
108
+ embedding: embedding,
109
+ engine: engine,
110
+ hybrid: hybrid,
111
+ ...(situated_context && { situated_context }),
112
+ ...rest}
101
113
  });
102
114
 
103
115
  winston.verbose("resources to be sent to worker: ", resources);
@@ -120,6 +132,8 @@ class AiManager {
120
132
  async scheduleSitemap(namespace, sitemap_content, options) {
121
133
  return new Promise((resolve, reject) => {
122
134
 
135
+ const situated_context = this.normalizeSituatedContext();
136
+
123
137
  let kb = {
124
138
  id: sitemap_content._id,
125
139
  source: sitemap_content.source,
@@ -130,6 +144,7 @@ class AiManager {
130
144
  engine: namespace.engine,
131
145
  embedding: namespace.embedding,
132
146
  hybrid: namespace.hybrid,
147
+ ...(situated_context && { situated_context }),
133
148
  }
134
149
 
135
150
  if (process.env.NODE_ENV === 'test') {
@@ -552,6 +567,15 @@ class AiManager {
552
567
  })
553
568
  }
554
569
 
570
+ normalizeSituatedContext() {
571
+ return situatedContext.enable
572
+ ? {
573
+ ...situatedContext,
574
+ api_key: process.env.SITUATED_CONTEXT_API_KEY || process.env.GPTKEY
575
+ }
576
+ : undefined;
577
+ }
578
+
555
579
  }
556
580
 
557
581
  const aiManager = new AiManager();
@@ -206,16 +206,18 @@ class AiService {
206
206
  }
207
207
  winston.debug("[OPENAI SERVICE] kb endpoint: " + base_url);
208
208
 
209
+ const config = {
210
+ url: base_url + "/qa",
211
+ headers: {
212
+ 'Content-Type': 'application/json'
213
+ },
214
+ data: data,
215
+ method: 'POST'
216
+ };
217
+
209
218
  return new Promise((resolve, reject) => {
210
219
 
211
- axios({
212
- url: base_url + "/qa",
213
- headers: {
214
- 'Content-Type': 'application/json'
215
- },
216
- data: data,
217
- method: 'POST'
218
- }).then((resbody) => {
220
+ axios(config).then((resbody) => {
219
221
  resolve(resbody);
220
222
  }).catch((err) => {
221
223
  reject(err);
@@ -224,6 +226,29 @@ class AiService {
224
226
  })
225
227
  }
226
228
 
229
+ /**
230
+ * Stream /qa from KB service. Uses Axios with responseType: 'stream'.
231
+ * Returns the raw Axios response (resp.data is the Node.js Readable stream).
232
+ */
233
+ askStream(data) {
234
+ winston.debug("askStream data: ", data);
235
+ let base_url = kb_endpoint_qa;
236
+ if (data.hybrid || data.search_type === 'hybrid') {
237
+ base_url = kb_endpoint_qa_gpu;
238
+ }
239
+ winston.debug("[OPENAI SERVICE] kb stream endpoint: " + base_url);
240
+
241
+ return axios({
242
+ url: base_url + "/qa",
243
+ headers: {
244
+ 'Content-Type': 'application/json'
245
+ },
246
+ data: data,
247
+ method: 'POST',
248
+ responseType: 'stream'
249
+ });
250
+ }
251
+
227
252
  getContentChunks(namespace_id, content_id, engine, hybrid) {
228
253
  let base_url = kb_endpoint_train;
229
254
  winston.debug("[OPENAI SERVICE] kb endpoint: " + base_url);
@@ -177,10 +177,9 @@ class FileGridFsService extends FileService {
177
177
  return reject(e);
178
178
  })
179
179
  stream.on('data', (data) => {
180
- bufs.push(data);
180
+ bufs.push(Buffer.isBuffer(data) ? data : Buffer.from(data));
181
181
  });
182
182
  stream.on('end', () => {
183
-
184
183
  var buffer = Buffer.concat(bufs);
185
184
  return resolve(buffer);
186
185
  });