nodebb-plugin-search-agent 0.0.936 → 0.0.938

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -255,12 +255,25 @@ async function reRankWithAI(queryText, candidates, topicMap, apiKey, model, maxR
255
255
  const scores = JSON.parse(match[0]);
256
256
  const candidateByTid = Object.fromEntries(candidates.map(c => [String(c.tid), c]));
257
257
 
258
- return Object.entries(scores)
259
- .filter(([, score]) => Number(score) >= 7)
260
- .sort(([, a], [, b]) => Number(b) - Number(a))
261
- .slice(0, maxResults)
262
- .map(([tid]) => candidateByTid[tid])
263
- .filter(Boolean);
258
+ let filtered = Object.entries(scores)
259
+ .filter(([, score]) => Number(score) >= 7)
260
+ .sort(([, a], [, b]) => Number(b) - Number(a))
261
+ .slice(0, maxResults)
262
+ .map(([tid]) => candidateByTid[tid])
263
+ .filter(Boolean);
264
+
265
+ // If nothing passed the threshold, return the top scoring candidate (if any)
266
+ if (filtered.length === 0 && candidates.length > 0) {
267
+ // Find the tid with the highest score
268
+ const sortedAll = Object.entries(scores)
269
+ .sort(([, a], [, b]) => Number(b) - Number(a));
270
+ if (sortedAll.length > 0) {
271
+ const [topTid] = sortedAll[0];
272
+ const topCandidate = candidateByTid[topTid];
273
+ if (topCandidate) filtered = [topCandidate];
274
+ }
275
+ }
276
+ return filtered;
264
277
  }
265
278
 
266
279
  // ─── Public API ───────────────────────────────────────────────────────────────
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "nodebb-plugin-search-agent",
3
- "version": "0.0.936",
3
+ "version": "0.0.938",
4
4
  "description": "NodeBB plugin that adds a floating chat assistant to help users find relevant forum topics using TF-IDF text similarity",
5
5
  "main": "library.js",
6
6
  "author": "Racheli Bayfus",
@@ -1,28 +1,9 @@
1
- // Remove images, files, and non-text content from input
2
- function extractPureText(text) {
3
- if (typeof text !== 'string') return '';
4
- // Remove Markdown images: ![alt](url)
5
- let cleaned = text.replace(/!\[[^\]]*\]\([^)]*\)/g, '');
6
- // Remove HTML <img> tags
7
- cleaned = cleaned.replace(/<img\b[^>]*>/gi, '');
8
- // Remove links to files/images (common extensions)
9
- cleaned = cleaned.replace(/https?:\/\/(\S+\.(jpg|jpeg|png|gif|bmp|svg|webp|pdf|docx?|xlsx?|pptx?|zip|rar|7z|tar|gz|mp3|mp4|avi|mov|wmv|flv|mkv|ogg|wav|exe|bin|apk|ipa|dmg|iso|csv|json|xml|yml|yaml|psd|ai|eps|ttf|otf|woff2?))(\?\S*)?/gi, '');
10
- // Remove Markdown file links: [desc](url.ext)
11
- cleaned = cleaned.replace(/\[[^\]]*\]\([^)]*\.(jpg|jpeg|png|gif|bmp|svg|webp|pdf|docx?|xlsx?|pptx?|zip|rar|7z|tar|gz|mp3|mp4|avi|mov|wmv|flv|mkv|ogg|wav|exe|bin|apk|ipa|dmg|iso|csv|json|xml|yml|yaml|psd|ai|eps|ttf|otf|woff2?)\)/gi, '');
12
- // Remove any remaining <a href=...> tags to files
13
- cleaned = cleaned.replace(/<a\b[^>]*href=["']?[^"'>]+\.(jpg|jpeg|png|gif|bmp|svg|webp|pdf|docx?|xlsx?|pptx?|zip|rar|7z|tar|gz|mp3|mp4|avi|mov|wmv|flv|mkv|ogg|wav|exe|bin|apk|ipa|dmg|iso|csv|json|xml|yml|yaml|psd|ai|eps|ttf|otf|woff2?)[^>]*>.*?<\/a>/gi, '');
14
- // Remove any remaining HTML tags
15
- cleaned = cleaned.replace(/<[^>]+>/g, ' ');
16
- // Remove extra whitespace
17
- cleaned = cleaned.replace(/[ \t]+/g, ' ').replace(/\n{2,}/g, '\n').trim();
18
- return cleaned;
19
- }
20
1
  'use strict';
21
2
 
22
3
  const https = require('https');
23
4
 
24
5
  function winston() {
25
- return require.main.require('winston');
6
+ return require.main.require('winston');
26
7
  }
27
8
 
28
9
  const OPENAI_EMBEDDINGS_HOSTNAME = 'api.openai.com';
@@ -30,48 +11,236 @@ const OPENAI_EMBEDDINGS_PATH = '/v1/embeddings';
30
11
  const EMBEDDING_MODEL = 'text-embedding-3-small';
31
12
  const MAX_RETRIES = 3;
32
13
  const RETRY_DELAY_MS = 500;
33
- // text-embedding-3-small supports 8 192 tokens.
34
- // Hebrew/non-ASCII text tokenizes at ~1.5–2 chars/token (UTF-8 multibyte).
35
- // Using 1.5 chars/token worst-case: 8000 tokens × 1.5 = 12 000 chars — gives a safe margin.
36
-
37
- const MAX_CHARS = 12000;
38
- const CHUNK_OVERLAP = 200; // chars to overlap between chunks for context
39
-
40
- // Split a long string into chunks of maxLen, with optional overlap
41
- function splitIntoChunks(text, maxLen = MAX_CHARS, overlap = CHUNK_OVERLAP) {
42
- if (text.length <= maxLen) return [text];
43
- const chunks = [];
44
- let i = 0;
45
- while (i < text.length) {
46
- const chunk = text.slice(i, i + maxLen);
47
- chunks.push(chunk);
48
- if (i + maxLen >= text.length) break;
49
- i += maxLen - overlap;
50
- }
51
- return chunks;
14
+
15
+ // text-embedding-3-small supports 8,192 tokens.
16
+ // Conservative char limits help avoid token overflows, especially for non-ASCII text.
17
+ const MAX_CHARS = 10000;
18
+ const CHUNK_OVERLAP = 300;
19
+ const MIN_CHUNK_CHARS = 500;
20
+ const TARGET_CHUNK_CHARS = 2200;
21
+
22
+ // ─── Text cleanup ─────────────────────────────────────────────────────────────
23
+
24
+ function extractPureText(text) {
25
+ if (typeof text !== 'string') return '';
26
+
27
+ // Remove Markdown images: ![alt](url)
28
+ let cleaned = text.replace(/!\[[^\]]*\]\([^)]*\)/g, '');
29
+
30
+ // Remove HTML <img> tags
31
+ cleaned = cleaned.replace(/<img\b[^>]*>/gi, '');
32
+
33
+ // Remove links to files/images (common extensions)
34
+ cleaned = cleaned.replace(
35
+ /https?:\/\/(\S+\.(jpg|jpeg|png|gif|bmp|svg|webp|pdf|docx?|xlsx?|pptx?|zip|rar|7z|tar|gz|mp3|mp4|avi|mov|wmv|flv|mkv|ogg|wav|exe|bin|apk|ipa|dmg|iso|csv|json|xml|yml|yaml|psd|ai|eps|ttf|otf|woff2?))(\?\S*)?/gi,
36
+ ''
37
+ );
38
+
39
+ // Remove Markdown file links: [desc](url.ext)
40
+ cleaned = cleaned.replace(
41
+ /\[[^\]]*\]\([^)]*\.(jpg|jpeg|png|gif|bmp|svg|webp|pdf|docx?|xlsx?|pptx?|zip|rar|7z|tar|gz|mp3|mp4|avi|mov|wmv|flv|mkv|ogg|wav|exe|bin|apk|ipa|dmg|iso|csv|json|xml|yml|yaml|psd|ai|eps|ttf|otf|woff2?)\)/gi,
42
+ ''
43
+ );
44
+
45
+ // Remove any remaining <a href=...> tags to files
46
+ cleaned = cleaned.replace(
47
+ /<a\b[^>]*href=["']?[^"'>]+\.(jpg|jpeg|png|gif|bmp|svg|webp|pdf|docx?|xlsx?|pptx?|zip|rar|7z|tar|gz|mp3|mp4|avi|mov|wmv|flv|mkv|ogg|wav|exe|bin|apk|ipa|dmg|iso|csv|json|xml|yml|yaml|psd|ai|eps|ttf|otf|woff2?)[^>]*>.*?<\/a>/gi,
48
+ ''
49
+ );
50
+
51
+ // Remove any remaining HTML tags
52
+ cleaned = cleaned.replace(/<[^>]+>/g, ' ');
53
+
54
+ // Remove extra whitespace
55
+ cleaned = cleaned
56
+ .replace(/[ \t]+/g, ' ')
57
+ .replace(/\n{2,}/g, '\n')
58
+ .trim();
59
+
60
+ return cleaned;
61
+ }
62
+
63
+ function normalizeWhitespace(text) {
64
+ return text
65
+ .replace(/\r\n/g, '\n')
66
+ .replace(/[ \t]+/g, ' ')
67
+ .replace(/\n{3,}/g, '\n\n')
68
+ .trim();
69
+ }
70
+
71
+ function splitIntoBlocks(text) {
72
+ const normalized = normalizeWhitespace(text);
73
+
74
+ const rawBlocks = normalized
75
+ .split(/\n{2,}|(?=^#{1,6}\s)|(?=^\s*[-*+]\s)|(?=^\s*\d+\.\s)|(?=^\s*>\s)|(?=^```)/gm)
76
+ .map(block => block.trim())
77
+ .filter(Boolean);
78
+
79
+ return rawBlocks;
80
+ }
81
+
82
+ function splitLargeBlock(block, maxLen = MAX_CHARS) {
83
+ if (block.length <= maxLen) return [block];
84
+
85
+ const sentences = block.match(/[^.!?\n]+[.!?\n]+|[^.!?\n]+$/g) || [block];
86
+ const chunks = [];
87
+ let current = '';
88
+
89
+ for (const sentence of sentences) {
90
+ const s = sentence.trim();
91
+ if (!s) continue;
92
+
93
+ if ((current + ' ' + s).trim().length <= maxLen) {
94
+ current = current ? `${current} ${s}` : s;
95
+ continue;
96
+ }
97
+
98
+ if (current) {
99
+ chunks.push(current);
100
+ current = '';
101
+ }
102
+
103
+ // Fallback for very long sentence
104
+ if (s.length > maxLen) {
105
+ let i = 0;
106
+ while (i < s.length) {
107
+ chunks.push(s.slice(i, i + maxLen).trim());
108
+ i += maxLen;
109
+ }
110
+ } else {
111
+ current = s;
112
+ }
113
+ }
114
+
115
+ if (current) chunks.push(current);
116
+
117
+ return chunks;
118
+ }
119
+
120
+ function buildOverlapPrefix(prevChunk, overlapChars = CHUNK_OVERLAP) {
121
+ if (!prevChunk) return '';
122
+ return prevChunk.slice(Math.max(0, prevChunk.length - overlapChars)).trim();
123
+ }
124
+
125
+ function splitIntoSemanticChunks(
126
+ text,
127
+ {
128
+ maxLen = MAX_CHARS,
129
+ targetLen = TARGET_CHUNK_CHARS,
130
+ minLen = MIN_CHUNK_CHARS,
131
+ overlap = CHUNK_OVERLAP,
132
+ } = {}
133
+ ) {
134
+ if (!text) return [];
135
+ if (text.length <= maxLen) return [text];
136
+
137
+ const blocks = splitIntoBlocks(text).flatMap(block => splitLargeBlock(block, maxLen));
138
+
139
+ const chunks = [];
140
+ let current = '';
141
+
142
+ for (const block of blocks) {
143
+ const next = current ? `${current}\n\n${block}` : block;
144
+
145
+ if (next.length <= targetLen || current.length < minLen) {
146
+ if (next.length <= maxLen) {
147
+ current = next;
148
+ continue;
149
+ }
150
+ }
151
+
152
+ if (current) {
153
+ chunks.push(current.trim());
154
+ }
155
+
156
+ current = block;
157
+ }
158
+
159
+ if (current) {
160
+ chunks.push(current.trim());
161
+ }
162
+
163
+ const withOverlap = chunks.map((chunk, i) => {
164
+ if (i === 0) return chunk;
165
+
166
+ const prefix = buildOverlapPrefix(chunks[i - 1], overlap);
167
+ const merged = prefix ? `${prefix}\n\n${chunk}` : chunk;
168
+
169
+ return merged.length <= maxLen ? merged : merged.slice(merged.length - maxLen);
170
+ });
171
+
172
+ const finalChunks = [];
173
+ for (const chunk of withOverlap) {
174
+ if (
175
+ finalChunks.length > 0 &&
176
+ chunk.length < minLen &&
177
+ finalChunks[finalChunks.length - 1].length + 2 + chunk.length <= maxLen
178
+ ) {
179
+ finalChunks[finalChunks.length - 1] += `\n\n${chunk}`;
180
+ } else {
181
+ finalChunks.push(chunk);
182
+ }
183
+ }
184
+
185
+ return finalChunks;
52
186
  }
53
187
 
54
- // Average a list of vectors (arrays of numbers)
188
+ // ─── Vector helpers ───────────────────────────────────────────────────────────
189
+
55
190
  function averageVectors(vectors) {
56
- if (!vectors.length) return [];
57
- const len = vectors[0].length;
58
- const sum = new Array(len).fill(0);
59
- for (const v of vectors) {
60
- for (let i = 0; i < len; i++) sum[i] += v[i];
61
- }
62
- return sum.map(x => x / vectors.length);
191
+ if (!vectors.length) return [];
192
+ const len = vectors[0].length;
193
+ const sum = new Array(len).fill(0);
194
+
195
+ for (const v of vectors) {
196
+ for (let i = 0; i < len; i++) {
197
+ sum[i] += v[i];
198
+ }
199
+ }
200
+
201
+ return sum.map(x => x / vectors.length);
202
+ }
203
+
204
+ function truncate(text) {
205
+ return text.length > MAX_CHARS ? text.slice(0, MAX_CHARS) : text;
206
+ }
207
+
208
+ function estimateTokens(str) {
209
+ const ascii = /^[\x00-\x7F]*$/.test(str);
210
+ return ascii ? Math.ceil(str.length / 4) : Math.ceil(str.length / 1.5);
63
211
  }
64
212
 
65
213
  // ─── Embedding cache ──────────────────────────────────────────────────────────
66
- // Avoids calling the embeddings API for the same text within a session.
67
- // HyDE output varies, so the biggest wins come from repeated identical queries.
214
+
68
215
  const _embedCache = new Map();
69
216
  const EMBED_CACHE_MAX = 500;
70
217
 
71
- function truncate(text) {
72
- return text.length > MAX_CHARS ? text.slice(0, MAX_CHARS) : text;
218
+ function getCachedEmbedding(key) {
219
+ if (!_embedCache.has(key)) return null;
220
+
221
+ const value = _embedCache.get(key);
222
+
223
+ // Refresh LRU-ish order
224
+ _embedCache.delete(key);
225
+ _embedCache.set(key, value);
226
+
227
+ return value;
73
228
  }
74
229
 
230
+ function setCachedEmbedding(key, value) {
231
+ if (_embedCache.has(key)) {
232
+ _embedCache.delete(key);
233
+ }
234
+
235
+ while (_embedCache.size >= EMBED_CACHE_MAX) {
236
+ _embedCache.delete(_embedCache.keys().next().value);
237
+ }
238
+
239
+ _embedCache.set(key, value);
240
+ }
241
+
242
+ // ─── OpenAI request helpers ───────────────────────────────────────────────────
243
+
75
244
  /**
76
245
  * Performs an HTTPS POST request to the OpenAI embeddings endpoint.
77
246
  * @param {string} apiKey
@@ -79,203 +248,246 @@ function truncate(text) {
79
248
  * @returns {Promise<object>} Parsed JSON response body
80
249
  */
81
250
  function requestEmbeddings(apiKey, input) {
82
- return new Promise((resolve, reject) => {
83
- const body = JSON.stringify({ model: EMBEDDING_MODEL, input });
84
- const options = {
85
- hostname: OPENAI_EMBEDDINGS_HOSTNAME,
86
- path: OPENAI_EMBEDDINGS_PATH,
87
- method: 'POST',
88
- headers: {
89
- 'Content-Type': 'application/json',
90
- 'Authorization': `Bearer ${apiKey}`,
91
- 'Content-Length': Buffer.byteLength(body),
92
- },
93
- };
94
-
95
- const req = https.request(options, (res) => {
96
- const chunks = [];
97
- res.on('data', chunk => chunks.push(chunk));
98
- res.on('end', () => {
99
- let parsed;
100
- try {
101
- parsed = JSON.parse(Buffer.concat(chunks).toString('utf8'));
102
- } catch (e) {
103
- return reject(new Error(`Failed to parse OpenAI response: ${e.message}`));
104
- }
105
-
106
- if (res.statusCode >= 400) {
107
- const message = (parsed.error && parsed.error.message) || `HTTP ${res.statusCode}`;
108
- return reject(new Error(`OpenAI API error: ${message}`));
109
- }
110
-
111
- resolve(parsed);
112
- });
113
- });
114
-
115
- req.on('error', err => reject(new Error(`Network error calling OpenAI: ${err.message}`)));
116
- req.write(body);
117
- req.end();
118
- });
251
+ return new Promise((resolve, reject) => {
252
+ const body = JSON.stringify({ model: EMBEDDING_MODEL, input });
253
+
254
+ const options = {
255
+ hostname: OPENAI_EMBEDDINGS_HOSTNAME,
256
+ path: OPENAI_EMBEDDINGS_PATH,
257
+ method: 'POST',
258
+ headers: {
259
+ 'Content-Type': 'application/json',
260
+ 'Authorization': `Bearer ${apiKey}`,
261
+ 'Content-Length': Buffer.byteLength(body),
262
+ },
263
+ };
264
+
265
+ const req = https.request(options, res => {
266
+ const chunks = [];
267
+
268
+ res.on('data', chunk => chunks.push(chunk));
269
+
270
+ res.on('end', () => {
271
+ let parsed;
272
+ try {
273
+ parsed = JSON.parse(Buffer.concat(chunks).toString('utf8'));
274
+ } catch (e) {
275
+ return reject(new Error(`Failed to parse OpenAI response: ${e.message}`));
276
+ }
277
+
278
+ if (res.statusCode >= 400) {
279
+ const message = (parsed.error && parsed.error.message) || `HTTP ${res.statusCode}`;
280
+ return reject(new Error(`OpenAI API error: ${message}`));
281
+ }
282
+
283
+ resolve(parsed);
284
+ });
285
+ });
286
+
287
+ req.on('error', err => reject(new Error(`Network error calling OpenAI: ${err.message}`)));
288
+ req.write(body);
289
+ req.end();
290
+ });
119
291
  }
120
292
 
121
293
  /**
122
294
  * Retries an async operation up to maxRetries times with exponential back-off.
123
- * @param {Function} fn - Async function to retry
295
+ * @param {Function} fn
124
296
  * @param {number} retries
125
297
  * @returns {Promise<*>}
126
298
  */
127
299
  async function withRetry(fn, retries = MAX_RETRIES) {
128
- let lastError;
129
- for (let attempt = 1; attempt <= retries; attempt++) {
130
- try {
131
- return await fn();
132
- } catch (err) {
133
- lastError = err;
134
- if (attempt < retries) {
135
- winston().warn(`[search-agent] embeddingService: attempt ${attempt} failed (${err.message}), retrying in ${RETRY_DELAY_MS * attempt} ms…`);
136
- await new Promise(resolve => setTimeout(resolve, RETRY_DELAY_MS * attempt));
137
- }
138
- }
139
- }
140
- throw lastError;
300
+ let lastError;
301
+
302
+ for (let attempt = 1; attempt <= retries; attempt++) {
303
+ try {
304
+ return await fn();
305
+ } catch (err) {
306
+ lastError = err;
307
+
308
+ if (attempt < retries) {
309
+ winston().warn(
310
+ `[search-agent] embeddingService: attempt ${attempt} failed (${err.message}), retrying in ${RETRY_DELAY_MS * attempt} ms...`
311
+ );
312
+ await new Promise(resolve => setTimeout(resolve, RETRY_DELAY_MS * attempt));
313
+ }
314
+ }
315
+ }
316
+
317
+ throw lastError;
141
318
  }
142
319
 
320
+ // ─── Public API ───────────────────────────────────────────────────────────────
321
+
143
322
  /**
144
323
  * Converts a single text string into an embedding vector.
145
324
  * @param {string} text
146
325
  * @returns {Promise<number[]>}
147
326
  */
148
327
  async function embed(text) {
149
- if (typeof text !== 'string' || text.trim() === '') {
150
- throw new Error('embed() requires a non-empty string');
151
- }
152
-
153
- const apiKey = process.env.OPENAI_API_KEY;
154
- if (!apiKey) {
155
- throw new Error('OPENAI_API_KEY environment variable is not set');
156
- }
157
-
158
- // Remove non-text content
159
- const pureText = extractPureText(text);
160
- if (!pureText) {
161
- throw new Error('embed() received no usable text after filtering');
162
- }
163
- // Split into chunks if too long
164
- const chunks = splitIntoChunks(pureText, MAX_CHARS, CHUNK_OVERLAP);
165
- // Estimate tokens (roughly 1.5 chars/token for non-ASCII, 4 chars/token for ASCII)
166
- const estimateTokens = (str) => {
167
- // If mostly ASCII, use 4 chars/token, else 1.5
168
- const ascii = /^[\x00-\x7F]*$/.test(str);
169
- return ascii ? Math.ceil(str.length / 4) : Math.ceil(str.length / 1.5);
170
- };
171
- if (chunks.length === 1) {
172
- const safe = truncate(text);
173
- if (_embedCache.has(safe)) {
174
- winston().verbose('[search-agent] embeddingService: embedding cache hit');
175
- return _embedCache.get(safe);
176
- }
177
- const tokenCount = estimateTokens(safe);
178
- winston().info(`[search-agent] embeddingService: generating embedding for text (${safe.length} chars, ~${tokenCount} tokens)`);
179
- const response = await withRetry(() => requestEmbeddings(apiKey, safe));
180
- winston().verbose('[search-agent] embeddingService: embedding generated successfully');
181
- const embedding = response.data[0].embedding;
182
- if (_embedCache.size >= EMBED_CACHE_MAX) {
183
- _embedCache.delete(_embedCache.keys().next().value);
184
- }
185
- _embedCache.set(safe, embedding);
186
- return embedding;
187
- } else {
188
- // For multi-chunk, embed all and average
189
- winston().info(`[search-agent] embeddingService: splitting long text into ${chunks.length} chunks for embedding`);
190
- chunks.forEach((chunk, i) => {
191
- const tokenCount = estimateTokens(chunk);
192
- winston().info(`[search-agent] embeddingService: chunk ${i+1}/${chunks.length} — ${chunk.length} chars, ~${tokenCount} tokens`);
193
- });
194
- const vectors = await embedBatch(chunks);
195
- const avg = averageVectors(vectors);
196
- // Optionally cache the average for the full text
197
- const safe = truncate(text);
198
- if (_embedCache.size >= EMBED_CACHE_MAX) {
199
- _embedCache.delete(_embedCache.keys().next().value);
200
- }
201
- _embedCache.set(safe, avg);
202
- return avg;
203
- }
328
+ if (typeof text !== 'string' || text.trim() === '') {
329
+ throw new Error('embed() requires a non-empty string');
330
+ }
331
+
332
+ const apiKey = process.env.OPENAI_API_KEY;
333
+ if (!apiKey) {
334
+ throw new Error('OPENAI_API_KEY environment variable is not set');
335
+ }
336
+
337
+ const pureText = extractPureText(text);
338
+ if (!pureText) {
339
+ throw new Error('embed() received no usable text after filtering');
340
+ }
341
+
342
+ const chunks = splitIntoSemanticChunks(pureText, {
343
+ maxLen: MAX_CHARS,
344
+ targetLen: TARGET_CHUNK_CHARS,
345
+ minLen: MIN_CHUNK_CHARS,
346
+ overlap: CHUNK_OVERLAP,
347
+ });
348
+
349
+ if (chunks.length === 1) {
350
+ const safe = truncate(pureText);
351
+ const cached = getCachedEmbedding(safe);
352
+ if (cached) {
353
+ winston().verbose('[search-agent] embeddingService: embedding cache hit');
354
+ return cached;
355
+ }
356
+
357
+ const tokenCount = estimateTokens(safe);
358
+ winston().info(
359
+ `[search-agent] embeddingService: generating embedding for text (${safe.length} chars, ~${tokenCount} tokens)`
360
+ );
361
+
362
+ const response = await withRetry(() => requestEmbeddings(apiKey, safe));
363
+ const embedding = response.data[0].embedding;
364
+
365
+ winston().verbose('[search-agent] embeddingService: embedding generated successfully');
366
+ setCachedEmbedding(safe, embedding);
367
+
368
+ return embedding;
369
+ }
370
+
371
+ winston().info(
372
+ `[search-agent] embeddingService: splitting long text into ${chunks.length} semantic chunks for embedding`
373
+ );
374
+
375
+ chunks.forEach((chunk, i) => {
376
+ const tokenCount = estimateTokens(chunk);
377
+ winston().info(
378
+ `[search-agent] embeddingService: chunk ${i + 1}/${chunks.length} - ${chunk.length} chars, ~${tokenCount} tokens`
379
+ );
380
+ });
381
+
382
+ const vectors = await embedBatch(chunks);
383
+ const avg = averageVectors(vectors);
384
+ const safe = truncate(pureText);
385
+
386
+ setCachedEmbedding(safe, avg);
387
+
388
+ return avg;
204
389
  }
205
390
 
206
391
  /**
207
392
  * Converts an array of text strings into an array of embedding vectors.
208
- * Texts are sent in a single batched API request.
393
+ * Texts are sent in batched API requests after semantic chunking.
209
394
  * @param {string[]} texts
210
395
  * @returns {Promise<number[][]>}
211
396
  */
212
397
  async function embedBatch(texts) {
213
- if (!Array.isArray(texts) || texts.length === 0) {
214
- throw new Error('embedBatch() requires a non-empty array of strings');
215
- }
216
-
217
- const invalid = texts.findIndex(t => typeof t !== 'string' || t.trim() === '');
218
- if (invalid !== -1) {
219
- throw new Error(`embedBatch() received an empty or non-string value at index ${invalid}`);
220
- }
221
-
222
- const apiKey = process.env.OPENAI_API_KEY;
223
- if (!apiKey) {
224
- throw new Error('OPENAI_API_KEY environment variable is not set');
225
- }
226
-
227
- // For each text, filter to pure text, then split and average embeddings
228
- const allChunks = [];
229
- const chunkMap = [];
230
- // Estimate tokens (roughly 1.5 chars/token for non-ASCII, 4 chars/token for ASCII)
231
- const estimateTokens = (str) => {
232
- const ascii = /^[\x00-\x7F]*$/.test(str);
233
- return ascii ? Math.ceil(str.length / 4) : Math.ceil(str.length / 1.5);
234
- };
235
- for (const [textIdx, text] of texts.entries()) {
236
- const pureText = extractPureText(text);
237
- if (!pureText) {
238
- chunkMap.push({ count: 0 });
239
- continue;
240
- }
241
- const chunks = splitIntoChunks(pureText, MAX_CHARS, CHUNK_OVERLAP);
242
- chunkMap.push({ count: chunks.length });
243
- allChunks.push(...chunks);
244
- if (chunks.length === 1) {
245
- const tokenCount = estimateTokens(chunks[0]);
246
- winston().info(`[search-agent] embeddingService: batch input ${textIdx+1}/${texts.length} — 1 chunk, ${chunks[0].length} chars, ~${tokenCount} tokens`);
247
- } else {
248
- winston().info(`[search-agent] embeddingService: batch input ${textIdx+1}/${texts.length} — ${chunks.length} chunks`);
249
- chunks.forEach((chunk, i) => {
250
- const tokenCount = estimateTokens(chunk);
251
- winston().info(`[search-agent] embeddingService: chunk ${i+1}/${chunks.length} ${chunk.length} chars, ~${tokenCount} tokens`);
252
- });
253
- }
254
- }
255
- winston().verbose(`[search-agent] embeddingService: batch embedding ${allChunks.length} chunk(s) from ${texts.length} input(s)`);
256
- if (allChunks.length === 0) {
257
- // All texts were filtered out
258
- return chunkMap.map(({ count }) => count === 0 ? [] : null);
259
- }
260
- const safeChunks = allChunks.map(truncate);
261
- const response = await withRetry(() => requestEmbeddings(apiKey, safeChunks));
262
- winston().verbose(`[search-agent] embeddingService: batch embeddings generated successfully (${safeChunks.length} vector(s))`);
263
- const vectors = response.data.sort((a, b) => a.index - b.index).map(item => item.embedding);
264
- // Recombine chunk embeddings for each original text
265
- const result = [];
266
- let idx = 0;
267
- for (const { count } of chunkMap) {
268
- if (count === 0) {
269
- result.push([]); // No usable text
270
- } else if (count === 1) {
271
- result.push(vectors[idx]);
272
- idx += 1;
273
- } else {
274
- result.push(averageVectors(vectors.slice(idx, idx + count)));
275
- idx += count;
276
- }
277
- }
278
- return result;
398
+ if (!Array.isArray(texts) || texts.length === 0) {
399
+ throw new Error('embedBatch() requires a non-empty array of strings');
400
+ }
401
+
402
+ const invalid = texts.findIndex(t => typeof t !== 'string' || t.trim() === '');
403
+ if (invalid !== -1) {
404
+ throw new Error(`embedBatch() received an empty or non-string value at index ${invalid}`);
405
+ }
406
+
407
+ const apiKey = process.env.OPENAI_API_KEY;
408
+ if (!apiKey) {
409
+ throw new Error('OPENAI_API_KEY environment variable is not set');
410
+ }
411
+
412
+ const allChunks = [];
413
+ const chunkMap = [];
414
+
415
+ for (const [textIdx, text] of texts.entries()) {
416
+ const pureText = extractPureText(text);
417
+
418
+ if (!pureText) {
419
+ chunkMap.push({ count: 0 });
420
+ continue;
421
+ }
422
+
423
+ const chunks = splitIntoSemanticChunks(pureText, {
424
+ maxLen: MAX_CHARS,
425
+ targetLen: TARGET_CHUNK_CHARS,
426
+ minLen: MIN_CHUNK_CHARS,
427
+ overlap: CHUNK_OVERLAP,
428
+ });
429
+
430
+ chunkMap.push({ count: chunks.length });
431
+ allChunks.push(...chunks);
432
+
433
+ if (chunks.length === 1) {
434
+ const tokenCount = estimateTokens(chunks[0]);
435
+ winston().info(
436
+ `[search-agent] embeddingService: batch input ${textIdx + 1}/${texts.length} - 1 chunk, ${chunks[0].length} chars, ~${tokenCount} tokens`
437
+ );
438
+ } else {
439
+ winston().info(
440
+ `[search-agent] embeddingService: batch input ${textIdx + 1}/${texts.length} - ${chunks.length} chunks`
441
+ );
442
+ chunks.forEach((chunk, i) => {
443
+ const tokenCount = estimateTokens(chunk);
444
+ winston().info(
445
+ `[search-agent] embeddingService: chunk ${i + 1}/${chunks.length} - ${chunk.length} chars, ~${tokenCount} tokens`
446
+ );
447
+ });
448
+ }
449
+ }
450
+
451
+ winston().verbose(
452
+ `[search-agent] embeddingService: batch embedding ${allChunks.length} chunk(s) from ${texts.length} input(s)`
453
+ );
454
+
455
+ if (allChunks.length === 0) {
456
+ return chunkMap.map(({ count }) => (count === 0 ? [] : null));
457
+ }
458
+
459
+ const safeChunks = allChunks.map(chunk => truncate(chunk));
460
+ const response = await withRetry(() => requestEmbeddings(apiKey, safeChunks));
461
+
462
+ winston().verbose(
463
+ `[search-agent] embeddingService: batch embeddings generated successfully (${safeChunks.length} vector(s))`
464
+ );
465
+
466
+ const vectors = response.data
467
+ .sort((a, b) => a.index - b.index)
468
+ .map(item => item.embedding);
469
+
470
+ const result = [];
471
+ let idx = 0;
472
+
473
+ for (const { count } of chunkMap) {
474
+ if (count === 0) {
475
+ result.push([]);
476
+ } else if (count === 1) {
477
+ result.push(vectors[idx]);
478
+ idx += 1;
479
+ } else {
480
+ result.push(averageVectors(vectors.slice(idx, idx + count)));
481
+ idx += count;
482
+ }
483
+ }
484
+
485
+ return result;
279
486
  }
280
487
 
281
- module.exports = { embed, embedBatch };
488
+ module.exports = {
489
+ embed,
490
+ embedBatch,
491
+ extractPureText,
492
+ splitIntoSemanticChunks,
493
+ };
@@ -5,123 +5,409 @@ const { embed } = require('./embeddingService');
5
5
  const { getAllEmbeddings } = require('./vectorStore');
6
6
 
7
7
  function winston() {
8
- return require.main.require('winston');
8
+ return require.main.require('winston');
9
9
  }
10
10
 
11
11
  // Fetch this many candidates from Orama — cast a wide net so the AI has enough to choose from
12
12
  const TOP_K = 50;
13
- // Absolute minimum cosine similarity — only filters pure noise (near-zero similarity).
14
- // Do NOT raise this: the relevant result often scores lower than irrelevant ones.
15
- // The AI re-ranker (which reads content) is the precision gate, not this floor.
13
+
14
+ // Absolute minimum similarity only filters pure noise.
15
+ // Keep this low; the later ranking layers should handle precision.
16
16
  const MIN_SCORE = 0.15;
17
- // Rebuild the Orama index after this interval (mirrors TF-IDF cache TTL)
17
+
18
+ // Rebuild the Orama index after this interval
18
19
  const INDEX_TTL_MS = 5 * 60 * 1000;
19
20
 
21
+ // Hybrid search configuration
22
+ const VECTOR_SIMILARITY = 0.1;
23
+ const SEARCH_PROPERTIES = ['title', 'category', 'tags', 'content', 'parent_content'];
24
+ const FIELD_BOOSTS = {
25
+ title: 3.5,
26
+ tags: 2.8,
27
+ category: 2.2,
28
+ content: 1.0,
29
+ parent_content: 0.8,
30
+ };
31
+
20
32
  let _db = null;
21
33
  let _dbTs = 0;
22
34
  let _buildPromise = null;
23
35
 
36
+ // Finance-heavy Hebrew forum query expansion.
37
+ // These are intentionally conservative: good recall lift without flooding the query.
38
+ const QUERY_EXPANSIONS = {
39
+ // General finance
40
+ 'מניה': ['מניות', 'נייר ערך', 'שוק ההון', 'בורסה'],
41
+ 'מניות': ['מניה', 'ניירות ערך', 'שוק ההון', 'בורסה'],
42
+ 'אגח': ['אג"ח', 'איגרת חוב', 'איגרות חוב', 'חוב'],
43
+ 'אג"ח': ['אגח', 'איגרת חוב', 'איגרות חוב', 'חוב'],
44
+ 'קרן': ['קרנות', 'קרן נאמנות', 'קרן סל'],
45
+ 'קרנות': ['קרן', 'קרן נאמנות', 'קרן סל'],
46
+ 'קרן סל': ['etf', 'תעודת סל', 'קרן מחקה'],
47
+ 'תעודת סל': ['etf', 'קרן סל', 'קרן מחקה'],
48
+ 'etf': ['קרן סל', 'תעודת סל', 'קרן מחקה'],
49
+ 'מדד': ['מדדים', 'מדד מניות', 'תשואת מדד'],
50
+ 'מדדים': ['מדד', 'מדד מניות', 'תשואת מדד'],
51
+ 'תיק': ['תיק השקעות', 'פיזור', 'החזקות'],
52
+ 'השקעה': ['השקעות', 'להשקיע', 'תיק השקעות'],
53
+ 'השקעות': ['השקעה', 'להשקיע', 'תיק השקעות'],
54
+ 'להשקיע': ['השקעה', 'השקעות', 'תיק השקעות'],
55
+ 'תשואה': ['רווח', 'תשואות', 'רווחיות'],
56
+ 'רווח': ['רווחים', 'תשואה', 'רווחיות'],
57
+ 'הפסד': ['הפסדים', 'ירידה', 'מינוס'],
58
+ 'דיבידנד': ['דיבידנדים', 'חלוקת רווחים'],
59
+ 'מכפיל': ['מכפיל רווח', 'pe', 'p/e'],
60
+ 'pe': ['מכפיל', 'מכפיל רווח', 'p/e'],
61
+ 'p/e': ['מכפיל', 'מכפיל רווח', 'pe'],
62
+ 'מינוף': ['ממונף', 'הלוואה', 'מרגין', 'margin'],
63
+ 'מרגין': ['margin', 'מינוף'],
64
+ 'margin': ['מרגין', 'מינוף'],
65
+ 'סיכון': ['סיכונים', 'תנודתיות', 'חשיפה'],
66
+ 'נזילות': ['נזיל', 'מזומן', 'סחירות'],
67
+ 'סחירות': ['נזילות', 'נזיל'],
68
+
69
+ // Tax / regulation
70
+ 'מס': ['מיסוי', 'מסים', 'רשות המסים'],
71
+ 'מיסוי': ['מס', 'מסים', 'רשות המסים'],
72
+ 'מסים': ['מס', 'מיסוי', 'רשות המסים'],
73
+ 'קיזוז': ['קיזוז הפסדים', 'מגן מס'],
74
+ 'דוח': ['דו"ח', 'דיווח', 'טופס'],
75
+ 'דו"ח': ['דוח', 'דיווח', 'טופס'],
76
+
77
+ // Savings / pension
78
+ 'פנסיה': ['קרן פנסיה', 'חיסכון פנסיוני', 'קצבה'],
79
+ 'גמל': ['קופת גמל', 'קופ"ג'],
80
+ 'קופג': ['קופת גמל', 'קופ"ג', 'גמל'],
81
+ 'קופ"ג': ['קופת גמל', 'קופג', 'גמל'],
82
+ 'השתלמות': ['קרן השתלמות'],
83
+ 'משכנתא': ['משכנתאות', 'ריבית', 'הלוואת דיור'],
84
+ 'הלוואה': ['הלוואות', 'אשראי', 'מימון'],
85
+ 'אשראי': ['הלוואה', 'הלוואות', 'מימון'],
86
+
87
+ // Trading / technical
88
+ 'מסחר': ['טריידינג', 'קניה', 'מכירה', 'פקודה'],
89
+ 'טריידינג': ['מסחר', 'מסחר יומי', 'קניה', 'מכירה'],
90
+ 'שורט': ['short', 'מכירה בחסר'],
91
+ 'short': ['שורט', 'מכירה בחסר'],
92
+ 'לונג': ['long', 'החזקה'],
93
+ 'long': ['לונג', 'החזקה'],
94
+ 'פקודה': ['פקודות', 'לימיט', 'מרקט'],
95
+ 'לימיט': ['limit', 'פקודת לימיט'],
96
+ 'limit': ['לימיט', 'פקודת לימיט'],
97
+ 'מרקט': ['market', 'פקודת שוק'],
98
+ 'market': ['מרקט', 'פקודת שוק'],
99
+
100
+ // Crypto
101
+ 'ביטקוין': ['btc', 'קריפטו', 'מטבע דיגיטלי'],
102
+ 'btc': ['ביטקוין', 'קריפטו', 'מטבע דיגיטלי'],
103
+ 'אתריום': ['eth', 'קריפטו', 'מטבע דיגיטלי'],
104
+ 'eth': ['אתריום', 'קריפטו', 'מטבע דיגיטלי'],
105
+ 'קריפטו': ['מטבע דיגיטלי', 'ביטקוין', 'אתריום', 'בלוקציין'],
106
+ 'בלוקציין': ['קריפטו', 'מטבע דיגיטלי'],
107
+
108
+ // Hebrew forum / advice intent
109
+ 'מומלץ': ['כדאי', 'המלצה', 'עדיף'],
110
+ 'כדאי': ['מומלץ', 'המלצה', 'עדיף'],
111
+ 'המלצה': ['מומלץ', 'כדאי', 'עדיף'],
112
+ 'בעיה': ['תקלה', 'קושי', 'לא עובד'],
113
+ 'תקלה': ['בעיה', 'לא עובד', 'שגיאה'],
114
+ 'שגיאה': ['תקלה', 'בעיה', 'לא עובד'],
115
+ };
116
+
117
+ // Common phrase-level expansions that are better handled before token expansion.
118
+ const PHRASE_EXPANSIONS = [
119
+ {
120
+ pattern: /\b(?:קרן\s+סל|תעודת\s+סל|קרן\s+מחקה)\b/gi,
121
+ terms: ['etf', 'קרן סל', 'תעודת סל', 'קרן מחקה'],
122
+ },
123
+ {
124
+ pattern: /\b(?:איגרת\s+חוב|איגרות\s+חוב|אג["׳׳]?\s?ח)\b/gi,
125
+ terms: ['אגח', 'אג"ח', 'איגרת חוב', 'איגרות חוב'],
126
+ },
127
+ {
128
+ pattern: /\b(?:קופת\s+גמל|קופ["׳׳]?\s?ג)\b/gi,
129
+ terms: ['קופת גמל', 'קופג', 'קופ"ג', 'גמל'],
130
+ },
131
+ {
132
+ pattern: /\b(?:מכפיל\s+רווח|p\/e|pe)\b/gi,
133
+ terms: ['מכפיל', 'מכפיל רווח', 'pe', 'p/e'],
134
+ },
135
+ {
136
+ pattern: /\b(?:מכירה\s+בחסר|short)\b/gi,
137
+ terms: ['שורט', 'short', 'מכירה בחסר'],
138
+ },
139
+ ];
140
+
141
+ // Generic filler words to ignore for lexical expansion
142
+ const STOP_WORDS = new Set([
143
+ 'של', 'על', 'עם', 'בלי', 'גם', 'או', 'אם', 'אבל', 'כי', 'זה', 'זאת', 'זו',
144
+ 'יש', 'אין', 'אני', 'אתה', 'את', 'הוא', 'היא', 'הם', 'הן', 'אנחנו', 'מה',
145
+ 'איך', 'למה', 'מתי', 'איפה', 'האם', 'כל', 'עוד', 'כמו', 'רק', 'מאוד', 'פחות',
146
+ 'יותר', 'אחרי', 'לפני', 'תוך', 'דרך', 'לגבי', 'בנוגע', 'בשביל', 'מול',
147
+ ]);
148
+
149
+ function normalizeHebrew(text) {
150
+ if(!text) return text;
151
+
152
+ // Remove common prefixes
153
+ const prefixes = ['ה', 'ו', 'ב', 'ל', 'מ', 'ש', 'כ'];
154
+ for (const prefix of prefixes) {
155
+ if (text.startsWith(prefix) && text.length > 3) {
156
+ text = text.slice(1);
157
+ break;
158
+ }
159
+ }
160
+
161
+ // Remove common plural suffixes
162
+ const pluralSuffixes = ['ים', 'ות'];
163
+ for (const suffix of pluralSuffixes) {
164
+ if (text.endsWith(suffix) && text.length > 3) {
165
+ text = text.slice(0, -suffix.length);
166
+ break;
167
+ }
168
+ }
169
+
170
+ return String(text || '')
171
+ // remove niqqud / cantillation
172
+ .replace(/[\u0591-\u05C7]/g, '')
173
+ // normalize Hebrew punctuation variants
174
+ .replace(/[׳']/g, '\'')
175
+ .replace(/[״"]/g, '"')
176
+ // collapse whitespace
177
+ .replace(/\s+/g, ' ')
178
+ .trim();
179
+ }
180
+
181
+ function normalizeToken(token) {
182
+ return normalizeHebrew(token)
183
+ .toLowerCase()
184
+ .replace(/^[^\p{L}\p{N}]+|[^\p{L}\p{N}]+$/gu, '');
185
+ }
186
+
187
+ function uniqueTerms(terms, maxTerms = 24) {
188
+ const seen = new Set();
189
+ const out = [];
190
+
191
+ for (const raw of terms) {
192
+ const term = normalizeHebrew(raw).trim();
193
+ if (!term) continue;
194
+
195
+ const key = term.toLowerCase();
196
+ if (seen.has(key)) continue;
197
+
198
+ seen.add(key);
199
+ out.push(term);
200
+
201
+ if (out.length >= maxTerms) break;
202
+ }
203
+
204
+ return out;
205
+ }
206
+
207
+ function expandQuery(query) {
208
+ const normalized = normalizeHebrew(query);
209
+ const expanded = [normalized];
210
+
211
+ for (const phraseRule of PHRASE_EXPANSIONS) {
212
+ if (phraseRule.pattern.test(normalized)) {
213
+ expanded.push(...phraseRule.terms);
214
+ }
215
+ phraseRule.pattern.lastIndex = 0;
216
+ }
217
+
218
+ const tokens = normalized
219
+ .split(/[\s,/|()[\]{}:;!?]+/)
220
+ .map(normalizeToken)
221
+ .filter(Boolean)
222
+ .filter(token => !STOP_WORDS.has(token));
223
+
224
+ for (const token of tokens) {
225
+ expanded.push(token);
226
+
227
+ const synonyms = QUERY_EXPANSIONS[token];
228
+ if (synonyms) {
229
+ expanded.push(...synonyms);
230
+ }
231
+
232
+ // A little morphology help for Hebrew singular/plural and abbreviations
233
+ if (token.endsWith('ים') && token.length > 3) {
234
+ expanded.push(token.slice(0, -2));
235
+ }
236
+ if (token.endsWith('ות') && token.length > 3) {
237
+ expanded.push(token.slice(0, -2));
238
+ }
239
+ if (token.endsWith('ה') && token.length > 2) {
240
+ expanded.push(token.slice(0, -1));
241
+ }
242
+ }
243
+
244
+ const terms = uniqueTerms(expanded, 24);
245
+
246
+ return {
247
+ original: query,
248
+ normalized,
249
+ terms,
250
+ // Orama lexical search receives one expanded term string
251
+ term: terms.join(' '),
252
+ };
253
+ }
254
+
255
+ function coerceString(value) {
256
+ if (value == null) return '';
257
+
258
+ if (Array.isArray(value)) {
259
+ return value
260
+ .map(v => coerceString(v))
261
+ .filter(Boolean)
262
+ .join(', ');
263
+ }
264
+
265
+ return String(value).trim();
266
+ }
267
+
268
+ function buildDocument(row) {
269
+ return {
270
+ id: String(row.post_id),
271
+ post_id: row.post_id,
272
+ topic_id: row.topic_id,
273
+ title: coerceString(row.title),
274
+ category: coerceString(row.category),
275
+ tags: coerceString(row.tags),
276
+ parent_content: coerceString(row.parent_content),
277
+ content: coerceString(row.content),
278
+ embedding: row.embedding,
279
+ };
280
+ }
281
+
24
282
  async function buildIndex() {
25
- const storedEmbeddings = await getAllEmbeddings();
26
-
27
- // Detect dimension from data; fall back to 1536 (text-embedding-3-small default)
28
- const dimensions = storedEmbeddings.length > 0
29
- ? storedEmbeddings[0].embedding.length
30
- : 1536;
31
-
32
- const db = await create({
33
- schema: {
34
- post_id: 'number',
35
- topic_id: 'number',
36
- content: 'string',
37
- embedding: `vector[${dimensions}]`,
38
- },
39
- });
40
-
41
- if (storedEmbeddings.length > 0) {
42
- await insertMultiple(db, storedEmbeddings.map(e => ({
43
- id: String(e.post_id),
44
- post_id: e.post_id,
45
- topic_id: e.topic_id,
46
- content: e.content,
47
- embedding: e.embedding,
48
- })));
49
- }
50
-
51
- winston().info(`[search-agent] vectorSearchService: Orama index built with ${storedEmbeddings.length} document(s)`);
52
- return db;
283
+ const storedEmbeddings = await getAllEmbeddings();
284
+
285
+ // Detect dimension from data; fall back to 1536 (text-embedding-3-small default)
286
+ const dimensions = storedEmbeddings.length > 0
287
+ ? storedEmbeddings[0].embedding.length
288
+ : 1536;
289
+
290
+ const db = await create({
291
+ schema: {
292
+ post_id: 'number',
293
+ topic_id: 'number',
294
+ title: 'string',
295
+ category: 'string',
296
+ tags: 'string',
297
+ parent_content: 'string',
298
+ content: 'string',
299
+ embedding: `vector[${dimensions}]`,
300
+ },
301
+ });
302
+
303
+ if (storedEmbeddings.length > 0) {
304
+ await insertMultiple(db, storedEmbeddings.map(buildDocument));
305
+ }
306
+
307
+ winston().info(
308
+ `[search-agent] vectorSearchService: Orama index built with ${storedEmbeddings.length} document(s)`
309
+ );
310
+
311
+ return db;
53
312
  }
54
313
 
55
314
  async function getDb() {
56
- const now = Date.now();
57
- if (_db && (now - _dbTs) < INDEX_TTL_MS) {
58
- return _db;
59
- }
60
-
61
- if (_buildPromise) {
62
- return _buildPromise;
63
- }
64
-
65
- _buildPromise = buildIndex().then((db) => {
66
- _db = db;
67
- _dbTs = Date.now();
68
- _buildPromise = null;
69
- return db;
70
- }).catch((err) => {
71
- _buildPromise = null;
72
- throw err;
73
- });
74
-
75
- return _buildPromise;
315
+ const now = Date.now();
316
+ if (_db && (now - _dbTs) < INDEX_TTL_MS) {
317
+ return _db;
318
+ }
319
+
320
+ if (_buildPromise) {
321
+ return _buildPromise;
322
+ }
323
+
324
+ _buildPromise = buildIndex()
325
+ .then((db) => {
326
+ _db = db;
327
+ _dbTs = Date.now();
328
+ _buildPromise = null;
329
+ return db;
330
+ })
331
+ .catch((err) => {
332
+ _buildPromise = null;
333
+ throw err;
334
+ });
335
+
336
+ return _buildPromise;
76
337
  }
77
338
 
78
339
  /** Invalidate the in-memory Orama index (e.g. after new embeddings are saved). */
79
340
  function invalidateIndex() {
80
- _db = null;
81
- _dbTs = 0;
82
- winston().info('[search-agent] vectorSearchService: Orama index invalidated');
341
+ _db = null;
342
+ _dbTs = 0;
343
+ winston().info('[search-agent] vectorSearchService: Orama index invalidated');
83
344
  }
84
345
 
85
346
  /**
86
- * Performs semantic search against stored post embeddings using Orama vector search.
347
+ * Performs hybrid search against stored post embeddings using:
348
+ * 1. vector similarity on the original query embedding
349
+ * 2. lexical search on an expanded Hebrew query
350
+ * 3. field boosts to favor title/tags/category matches
87
351
  *
88
352
  * @param {string} query - The search query string.
89
- * @returns {Promise<Array<{ topic_id: number, post_id: number, content: string, score: number }>>}
90
- * Top results sorted by cosine similarity descending.
353
+ * @param {number} limit - Max results to return.
354
+ * @returns {Promise<Array<{ topic_id: number, post_id: number, title: string, category: string, tags: string, content: string, score: number }>>}
91
355
  */
92
356
  async function search(query, limit = TOP_K) {
93
- if (typeof query !== 'string' || query.trim() === '') {
94
- throw new Error('search() requires a non-empty query string');
95
- }
96
-
97
- winston().verbose(`[search-agent] vectorSearchService: running Orama vector search for "${query.trim()}"`);
98
-
99
- const [queryEmbedding, db] = await Promise.all([
100
- embed(query),
101
- getDb(),
102
- ]);
103
-
104
- const results = await oramaSearch(db, {
105
- mode: 'vector',
106
- vector: { value: queryEmbedding, property: 'embedding' },
107
- limit,
108
- similarity: 0.1,
109
- includeVectors: false,
110
- });
111
-
112
- winston().verbose(`[search-agent] vectorSearchService: Orama returned ${results.hits.length} hit(s)`);
113
-
114
- const filtered = results.hits.filter(hit => hit.score >= MIN_SCORE);
115
- winston().verbose(
116
- `[search-agent] vectorSearchService: ${filtered.length}/${results.hits.length} hit(s) passed noise floor (MIN_SCORE=${MIN_SCORE})`
117
- );
118
-
119
- return filtered.map(hit => ({
120
- topic_id: hit.document.topic_id,
121
- post_id: hit.document.post_id,
122
- content: hit.document.content,
123
- score: hit.score,
124
- }));
357
+ if (typeof query !== 'string' || query.trim() === '') {
358
+ throw new Error('search() requires a non-empty query string');
359
+ }
360
+
361
+ const trimmed = query.trim();
362
+ const expanded = expandQuery(trimmed);
363
+
364
+ winston().verbose(
365
+ `[search-agent] vectorSearchService: running Orama hybrid search for "${trimmed}" (expanded="${expanded.term}")`
366
+ );
367
+
368
+ const [queryEmbedding, db] = await Promise.all([
369
+ // Keep the embedding on the original query only.
370
+ // Expansion is mainly for lexical recall, especially in Hebrew forum language.
371
+ embed(trimmed),
372
+ getDb(),
373
+ ]);
374
+
375
+ const results = await oramaSearch(db, {
376
+ mode: 'hybrid',
377
+ term: expanded.term,
378
+ properties: SEARCH_PROPERTIES,
379
+ boost: FIELD_BOOSTS,
380
+ vector: {
381
+ value: queryEmbedding,
382
+ property: 'embedding',
383
+ },
384
+ limit,
385
+ similarity: VECTOR_SIMILARITY,
386
+ includeVectors: false,
387
+ });
388
+
389
+ const hits = Array.isArray(results && results.hits) ? results.hits : [];
390
+
391
+ winston().verbose(`[search-agent] vectorSearchService: Orama returned ${hits.length} hit(s)`);
392
+
393
+ const filtered = hits.filter(hit => typeof hit.score === 'number' && hit.score >= MIN_SCORE);
394
+
395
+ winston().verbose(
396
+ `[search-agent] vectorSearchService: ${filtered.length}/${hits.length} hit(s) passed noise floor (MIN_SCORE=${MIN_SCORE})`
397
+ );
398
+
399
+ return filtered.map(hit => ({
400
+ topic_id: hit.document.topic_id,
401
+ post_id: hit.document.post_id,
402
+ title: hit.document.title || '',
403
+ category: hit.document.category || '',
404
+ tags: hit.document.tags || '',
405
+ content: hit.document.content,
406
+ score: hit.score,
407
+ }));
125
408
  }
126
409
 
127
- module.exports = { search, invalidateIndex };
410
+ module.exports = {
411
+ search,
412
+ invalidateIndex,
413
+ };