n8n-nodes-ollama-reranker 1.1.0 → 1.2.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -2,6 +2,7 @@
2
2
  Object.defineProperty(exports, "__esModule", { value: true });
3
3
  exports.OllamaReranker = void 0;
4
4
  const n8n_workflow_1 = require("n8n-workflow");
5
+ const reranker_logic_1 = require("../shared/reranker-logic");
5
6
  /**
6
7
  * Ollama Reranker Provider
7
8
  *
@@ -98,6 +99,25 @@ class OllamaReranker {
98
99
  },
99
100
  },
100
101
  },
102
+ {
103
+ displayName: 'API Type',
104
+ name: 'apiType',
105
+ type: 'options',
106
+ options: [
107
+ {
108
+ name: 'Ollama Generate API',
109
+ value: 'ollama',
110
+ description: 'Standard Ollama /api/generate endpoint (for BGE, Qwen prompt-based rerankers)',
111
+ },
112
+ {
113
+ name: 'Custom Rerank API',
114
+ value: 'custom',
115
+ description: 'Custom /api/rerank endpoint (for deposium-embeddings-turbov2, etc.)',
116
+ },
117
+ ],
118
+ default: 'ollama',
119
+ description: 'Which API endpoint to use for reranking',
120
+ },
101
121
  {
102
122
  displayName: 'Top K',
103
123
  name: 'topK',
@@ -187,6 +207,7 @@ class OllamaReranker {
187
207
  throw new n8n_workflow_1.NodeOperationError(this.getNode(), 'Custom model name is required when "Custom Model" is selected');
188
208
  }
189
209
  }
210
+ const apiType = this.getNodeParameter('apiType', 0, 'ollama');
190
211
  const instruction = this.getNodeParameter('instruction', 0);
191
212
  const additionalOptions = this.getNodeParameter('additionalOptions', 0, {});
192
213
  const timeout = (_a = additionalOptions.timeout) !== null && _a !== void 0 ? _a : 30000;
@@ -261,8 +282,8 @@ class OllamaReranker {
261
282
  });
262
283
  self.logger.debug(`Reranking ${processedDocs.length} documents with model: ${model}`);
263
284
  try {
264
- // Rerank documents using Ollama
265
- const rerankedDocs = await rerankDocuments(self, {
285
+ // Rerank documents using Ollama or Custom API
286
+ const rerankedDocs = await (0, reranker_logic_1.rerankDocuments)(self, {
266
287
  ollamaHost,
267
288
  model,
268
289
  query,
@@ -273,6 +294,7 @@ class OllamaReranker {
273
294
  batchSize,
274
295
  timeout,
275
296
  includeOriginalScores,
297
+ apiType,
276
298
  });
277
299
  self.logger.debug(`Reranking complete: ${rerankedDocs.length} documents returned`);
278
300
  // Log output for n8n execution tracking
@@ -310,277 +332,3 @@ class OllamaReranker {
310
332
  }
311
333
  }
312
334
  exports.OllamaReranker = OllamaReranker;
313
- /**
314
- * Rerank documents using Ollama reranker model
315
- */
316
- async function rerankDocuments(context, config) {
317
- const { ollamaHost, model, query, documents, instruction, topK, threshold, batchSize, timeout, includeOriginalScores } = config;
318
- const results = [];
319
- // Process all documents concurrently with controlled concurrency
320
- const promises = [];
321
- for (let i = 0; i < documents.length; i++) {
322
- const doc = documents[i];
323
- const promise = scoreDocument(context, ollamaHost, model, query, doc.pageContent, instruction, timeout).then(score => ({
324
- index: i,
325
- score,
326
- }));
327
- promises.push(promise);
328
- // Process in batches to avoid overwhelming the API
329
- if (promises.length >= batchSize || i === documents.length - 1) {
330
- const batchResults = await Promise.all(promises);
331
- results.push(...batchResults);
332
- promises.length = 0; // Clear the array
333
- }
334
- }
335
- // Filter by threshold and sort by score (descending)
336
- const filteredResults = results
337
- .filter(r => r.score >= threshold)
338
- .sort((a, b) => b.score - a.score)
339
- .slice(0, topK);
340
- // Map back to original documents with scores
341
- return filteredResults.map(result => {
342
- const originalDoc = documents[result.index];
343
- const rerankedDoc = {
344
- ...originalDoc,
345
- _rerankScore: result.score,
346
- _originalIndex: result.index,
347
- };
348
- if (includeOriginalScores && originalDoc._originalScore !== undefined) {
349
- rerankedDoc._originalScore = originalDoc._originalScore;
350
- }
351
- return rerankedDoc;
352
- });
353
- }
354
- /**
355
- * Score a single document against the query using Ollama reranker model with retry logic
356
- */
357
- async function scoreDocument(context, ollamaHost, model, query, documentContent, instruction, timeout) {
358
- var _a, _b, _c, _d;
359
- // Format prompt based on model type
360
- const prompt = formatRerankerPrompt(model, query, documentContent, instruction);
361
- const maxRetries = 3;
362
- let lastError;
363
- for (let attempt = 0; attempt < maxRetries; attempt++) {
364
- try {
365
- // Use Ollama /api/generate endpoint for reranker models
366
- const response = await context.helpers.httpRequest({
367
- method: 'POST',
368
- url: `${ollamaHost}/api/generate`,
369
- headers: {
370
- 'Content-Type': 'application/json',
371
- Accept: 'application/json',
372
- },
373
- body: {
374
- model,
375
- prompt,
376
- stream: false,
377
- options: {
378
- temperature: 0.0, // Deterministic scoring
379
- },
380
- },
381
- json: true,
382
- timeout,
383
- });
384
- // Parse the response to extract relevance score
385
- const score = parseRerankerResponse(model, response);
386
- return score;
387
- }
388
- catch (error) {
389
- lastError = error;
390
- // Don't retry on permanent errors
391
- if (((_a = error === null || error === void 0 ? void 0 : error.response) === null || _a === void 0 ? void 0 : _a.statusCode) === 404 || ((_b = error === null || error === void 0 ? void 0 : error.response) === null || _b === void 0 ? void 0 : _b.statusCode) === 400) {
392
- break;
393
- }
394
- // Retry on transient errors (timeout, 5xx, network issues)
395
- if (attempt < maxRetries - 1) {
396
- const isTransient = (error === null || error === void 0 ? void 0 : error.name) === 'AbortError' ||
397
- (error === null || error === void 0 ? void 0 : error.code) === 'ETIMEDOUT' ||
398
- ((_c = error === null || error === void 0 ? void 0 : error.response) === null || _c === void 0 ? void 0 : _c.statusCode) >= 500;
399
- if (isTransient) {
400
- // Exponential backoff: 100ms, 200ms, 400ms
401
- await new Promise(resolve => setTimeout(resolve, 100 * Math.pow(2, attempt)));
402
- continue;
403
- }
404
- }
405
- break;
406
- }
407
- }
408
- // Handle final error after retries
409
- const error = lastError;
410
- if ((error === null || error === void 0 ? void 0 : error.name) === 'AbortError' || (error === null || error === void 0 ? void 0 : error.code) === 'ETIMEDOUT') {
411
- throw new n8n_workflow_1.NodeApiError(context.getNode(), error, {
412
- message: `Request timeout after ${timeout}ms (tried ${maxRetries} times)`,
413
- description: `Model: ${model}\nEndpoint: ${ollamaHost}/api/generate`,
414
- });
415
- }
416
- if ((_d = error === null || error === void 0 ? void 0 : error.response) === null || _d === void 0 ? void 0 : _d.body) {
417
- throw new n8n_workflow_1.NodeApiError(context.getNode(), error, {
418
- message: `Ollama API Error (${error.response.statusCode})`,
419
- description: `Endpoint: ${ollamaHost}/api/generate\nModel: ${model}\nResponse: ${JSON.stringify(error.response.body, null, 2)}`,
420
- });
421
- }
422
- throw new n8n_workflow_1.NodeApiError(context.getNode(), error, {
423
- message: 'Ollama reranking request failed',
424
- description: `Endpoint: ${ollamaHost}/api/generate\nModel: ${model}\nError: ${error.message}`,
425
- });
426
- }
427
- /**
428
- * Format prompt based on reranker model type
429
- *
430
- * Different models expect different prompt formats:
431
- * - BGE Reranker: Simple query + document format
432
- * - Qwen3-Reranker: Structured chat format with system/user/assistant tags
433
- */
434
- function formatRerankerPrompt(model, query, documentContent, instruction) {
435
- // Detect model type
436
- const isBGE = model.toLowerCase().includes('bge');
437
- const isQwen = model.toLowerCase().includes('qwen');
438
- if (isBGE) {
439
- // BGE Reranker uses a simple format
440
- // See: https://huggingface.co/BAAI/bge-reranker-v2-m3
441
- return `Instruction: ${instruction}
442
-
443
- Query: ${query}
444
-
445
- Document: ${documentContent}
446
-
447
- Relevance:`;
448
- }
449
- else if (isQwen) {
450
- // Qwen3-Reranker uses structured chat format
451
- // See: https://huggingface.co/dengcao/Qwen3-Reranker-4B
452
- return `<|im_start|>system
453
- Judge whether the Document meets the requirements based on the Query and the Instruct provided. Note that the answer can only be "yes" or "no".<|im_end|>
454
- <|im_start|>user
455
- <Instruct>: ${instruction}
456
- <Query>: ${query}
457
- <Document>: ${documentContent}<|im_end|>
458
- <|im_start|>assistant
459
- <think>`;
460
- }
461
- // Default format for unknown models (similar to BGE)
462
- return `Task: ${instruction}
463
-
464
- Query: ${query}
465
-
466
- Document: ${documentContent}
467
-
468
- Score:`;
469
- }
470
- /**
471
- * Parse BGE model response to extract relevance score
472
- */
473
- function parseBGEScore(output, outputLower) {
474
- // Try to extract floating point number
475
- const scoreRegex = /(\d*\.?\d+)/;
476
- const scoreMatch = scoreRegex.exec(output);
477
- if (scoreMatch) {
478
- const score = parseFloat(scoreMatch[1]);
479
- // BGE returns scores in various ranges, normalize to 0-1
480
- if (score > 1 && score <= 10) {
481
- return score / 10;
482
- }
483
- else if (score > 10) {
484
- return score / 100;
485
- }
486
- return Math.min(Math.max(score, 0), 1); // Clamp to 0-1
487
- }
488
- // Fallback: check for keywords
489
- if (outputLower.includes('high') || outputLower.includes('relevant')) {
490
- return 0.8;
491
- }
492
- if (outputLower.includes('low') || outputLower.includes('irrelevant')) {
493
- return 0.2;
494
- }
495
- return null;
496
- }
497
- /**
498
- * Parse Qwen model response to extract relevance score
499
- */
500
- function parseQwenScore(output, outputLower) {
501
- // Look for explicit yes/no in the response
502
- const yesRegex = /\b(yes|relevant|positive|match)\b/;
503
- const noRegex = /\b(no|irrelevant|negative|not\s+relevant)\b/;
504
- const yesMatch = yesRegex.exec(outputLower);
505
- const noMatch = noRegex.exec(outputLower);
506
- if (yesMatch && !noMatch) {
507
- // Higher confidence for detailed explanations
508
- const hasReasoning = output.length > 100;
509
- const hasMultiplePositives = (output.match(/relevant|yes|match/gi) || []).length > 1;
510
- if (hasReasoning && hasMultiplePositives)
511
- return 0.95;
512
- if (hasReasoning)
513
- return 0.85;
514
- return 0.75;
515
- }
516
- if (noMatch && !yesMatch) {
517
- // Low scores for negative responses
518
- const hasStrongNegative = outputLower.includes('completely') ||
519
- outputLower.includes('totally') ||
520
- outputLower.includes('not at all');
521
- return hasStrongNegative ? 0.05 : 0.15;
522
- }
523
- // Mixed signals - check which appears first
524
- if (yesMatch && noMatch) {
525
- const yesIndex = output.toLowerCase().indexOf(yesMatch[0]);
526
- const noIndex = output.toLowerCase().indexOf(noMatch[0]);
527
- return yesIndex < noIndex ? 0.6 : 0.4;
528
- }
529
- return null;
530
- }
531
- /**
532
- * Parse generic model response with fallback logic
533
- */
534
- function parseGenericScore(output, outputLower) {
535
- // Try numeric extraction first
536
- const numericRegex = /(\d*\.?\d+)/;
537
- const numericMatch = numericRegex.exec(output);
538
- if (numericMatch) {
539
- const score = parseFloat(numericMatch[1]);
540
- if (score >= 0 && score <= 1)
541
- return score;
542
- if (score > 1 && score <= 10)
543
- return score / 10;
544
- if (score > 10 && score <= 100)
545
- return score / 100;
546
- }
547
- // Keyword-based scoring
548
- const positiveKeywords = ['relevant', 'yes', 'high', 'strong', 'good', 'match', 'related'];
549
- const negativeKeywords = ['irrelevant', 'no', 'low', 'weak', 'poor', 'unrelated', 'different'];
550
- const positiveCount = positiveKeywords.filter(kw => outputLower.includes(kw)).length;
551
- const negativeCount = negativeKeywords.filter(kw => outputLower.includes(kw)).length;
552
- if (positiveCount > negativeCount) {
553
- return 0.5 + (positiveCount * 0.1);
554
- }
555
- else if (negativeCount > positiveCount) {
556
- return 0.5 - (negativeCount * 0.1);
557
- }
558
- // Default to neutral if completely ambiguous
559
- return 0.5;
560
- }
561
- /**
562
- * Parse Ollama reranker response to extract relevance score
563
- * Uses model-specific parsing logic for better accuracy
564
- */
565
- function parseRerankerResponse(model, response) {
566
- if (!(response === null || response === void 0 ? void 0 : response.response)) {
567
- return 0.0;
568
- }
569
- const output = response.response;
570
- const outputLower = output.toLowerCase();
571
- const isBGE = model.toLowerCase().includes('bge');
572
- const isQwen = model.toLowerCase().includes('qwen');
573
- // Try model-specific parsers
574
- if (isBGE) {
575
- const score = parseBGEScore(output, outputLower);
576
- if (score !== null)
577
- return score;
578
- }
579
- if (isQwen) {
580
- const score = parseQwenScore(output, outputLower);
581
- if (score !== null)
582
- return score;
583
- }
584
- // Fallback to generic parsing
585
- return parseGenericScore(output, outputLower);
586
- }
@@ -101,6 +101,26 @@ class OllamaRerankerWorkflow {
101
101
  },
102
102
  },
103
103
  },
104
+ // API Type selection
105
+ {
106
+ displayName: 'API Type',
107
+ name: 'apiType',
108
+ type: 'options',
109
+ options: [
110
+ {
111
+ name: 'Ollama Generate API',
112
+ value: 'ollama',
113
+ description: 'Standard Ollama /api/generate endpoint (for BGE, Qwen prompt-based rerankers)',
114
+ },
115
+ {
116
+ name: 'Custom Rerank API',
117
+ value: 'custom',
118
+ description: 'Custom /api/rerank endpoint (for deposium-embeddings-turbov2, etc.)',
119
+ },
120
+ ],
121
+ default: 'ollama',
122
+ description: 'Which API endpoint to use for reranking',
123
+ },
104
124
  // Query input (flexible like n8n nodes)
105
125
  {
106
126
  displayName: 'Query',
@@ -294,6 +314,8 @@ class OllamaRerankerWorkflow {
294
314
  throw new n8n_workflow_1.NodeOperationError(this.getNode(), 'Custom model name is required');
295
315
  }
296
316
  }
317
+ // Get API type
318
+ const apiType = this.getNodeParameter('apiType', 0, 'ollama');
297
319
  // Get common parameters
298
320
  const instruction = this.getNodeParameter('instruction', 0);
299
321
  const topK = this.getNodeParameter('topK', 0);
@@ -380,6 +402,7 @@ class OllamaRerankerWorkflow {
380
402
  batchSize,
381
403
  timeout,
382
404
  includeOriginalScores,
405
+ apiType,
383
406
  });
384
407
  // Format output
385
408
  let output;
@@ -11,9 +11,10 @@ export interface RerankConfig {
11
11
  batchSize: number;
12
12
  timeout: number;
13
13
  includeOriginalScores: boolean;
14
+ apiType?: 'ollama' | 'custom';
14
15
  }
15
16
  /**
16
- * Rerank documents using Ollama reranker model
17
+ * Rerank documents using Ollama reranker model or Custom Rerank API
17
18
  */
18
19
  export declare function rerankDocuments(context: RerankerContext, config: RerankConfig): Promise<any[]>;
19
20
  export {};
@@ -3,10 +3,15 @@ Object.defineProperty(exports, "__esModule", { value: true });
3
3
  exports.rerankDocuments = rerankDocuments;
4
4
  const n8n_workflow_1 = require("n8n-workflow");
5
5
  /**
6
- * Rerank documents using Ollama reranker model
6
+ * Rerank documents using Ollama reranker model or Custom Rerank API
7
7
  */
8
8
  async function rerankDocuments(context, config) {
9
- const { ollamaHost, model, query, documents, instruction, topK, threshold, batchSize, timeout, includeOriginalScores } = config;
9
+ const { ollamaHost, model, query, documents, instruction, topK, threshold, batchSize, timeout, includeOriginalScores, apiType = 'ollama' } = config;
10
+ // Use Custom Rerank API if specified
11
+ if (apiType === 'custom') {
12
+ return await rerankWithCustomAPI(context, config);
13
+ }
14
+ // Otherwise use Ollama Generate API (original logic)
10
15
  const results = [];
11
16
  // Process all documents concurrently with controlled concurrency
12
17
  const promises = [];
@@ -43,6 +48,77 @@ async function rerankDocuments(context, config) {
43
48
  return rerankedDoc;
44
49
  });
45
50
  }
51
+ /**
52
+ * Rerank documents using Custom Rerank API (/api/rerank endpoint)
53
+ * This is for services like deposium-embeddings-turbov2 that implement
54
+ * a custom /api/rerank endpoint with direct cosine similarity scoring
55
+ */
56
+ async function rerankWithCustomAPI(context, config) {
57
+ var _a, _b;
58
+ const { ollamaHost, model, query, documents, topK, threshold, timeout, includeOriginalScores } = config;
59
+ try {
60
+ // Extract document content as strings
61
+ const documentStrings = documents.map(doc => doc.pageContent || JSON.stringify(doc));
62
+ // Call /api/rerank endpoint
63
+ const response = await context.helpers.httpRequest({
64
+ method: 'POST',
65
+ url: `${ollamaHost}/api/rerank`,
66
+ headers: {
67
+ 'Content-Type': 'application/json',
68
+ Accept: 'application/json',
69
+ },
70
+ body: {
71
+ model,
72
+ query,
73
+ documents: documentStrings,
74
+ top_k: topK, // Custom API handles top_k filtering
75
+ },
76
+ json: true,
77
+ timeout,
78
+ });
79
+ // Parse response: { model: "...", results: [{index, document, relevance_score}] }
80
+ if (!(response === null || response === void 0 ? void 0 : response.results) || !Array.isArray(response.results)) {
81
+ throw new n8n_workflow_1.NodeApiError(context.getNode(), response, {
82
+ message: 'Invalid response from Custom Rerank API',
83
+ description: `Expected {results: [...]} but got: ${JSON.stringify(response)}`,
84
+ });
85
+ }
86
+ // Filter by threshold and map to our format
87
+ const filteredResults = response.results
88
+ .filter((r) => r.relevance_score >= threshold)
89
+ .map((result) => {
90
+ const originalDoc = documents[result.index];
91
+ const rerankedDoc = {
92
+ ...originalDoc,
93
+ _rerankScore: result.relevance_score,
94
+ _originalIndex: result.index,
95
+ };
96
+ if (includeOriginalScores && originalDoc._originalScore !== undefined) {
97
+ rerankedDoc._originalScore = originalDoc._originalScore;
98
+ }
99
+ return rerankedDoc;
100
+ });
101
+ return filteredResults;
102
+ }
103
+ catch (error) {
104
+ if (((_a = error === null || error === void 0 ? void 0 : error.response) === null || _a === void 0 ? void 0 : _a.statusCode) === 404) {
105
+ throw new n8n_workflow_1.NodeApiError(context.getNode(), error, {
106
+ message: 'Custom Rerank API endpoint not found',
107
+ description: `The /api/rerank endpoint was not found at ${ollamaHost}.\nMake sure you're using a service that supports this endpoint (like deposium-embeddings-turbov2).`,
108
+ });
109
+ }
110
+ if ((_b = error === null || error === void 0 ? void 0 : error.response) === null || _b === void 0 ? void 0 : _b.body) {
111
+ throw new n8n_workflow_1.NodeApiError(context.getNode(), error, {
112
+ message: `Custom Rerank API Error (${error.response.statusCode})`,
113
+ description: `Endpoint: ${ollamaHost}/api/rerank\nModel: ${model}\nResponse: ${JSON.stringify(error.response.body, null, 2)}`,
114
+ });
115
+ }
116
+ throw new n8n_workflow_1.NodeApiError(context.getNode(), error, {
117
+ message: 'Custom Rerank API request failed',
118
+ description: `Endpoint: ${ollamaHost}/api/rerank\nModel: ${model}\nError: ${error.message}`,
119
+ });
120
+ }
121
+ }
46
122
  /**
47
123
  * Score a single document against the query using Ollama reranker model with retry logic
48
124
  */
package/package.json CHANGED
@@ -1,7 +1,7 @@
1
1
  {
2
2
  "name": "n8n-nodes-ollama-reranker",
3
- "version": "1.1.0",
4
- "description": "Ollama Reranker for n8n - Vector Store provider + chainable workflow node with AI Agent tool support",
3
+ "version": "1.2.0",
4
+ "description": "Ollama Reranker for n8n - Supports Ollama Generate API + Custom Rerank API (Vector Store provider + chainable workflow node)",
5
5
  "main": "index.js",
6
6
  "author": "Gabriel BRUMENT",
7
7
  "license": "MIT",