fmea-api-mcp-server 1.1.0 → 1.1.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.js CHANGED
@@ -6,6 +6,7 @@ import * as fs from "fs/promises";
6
6
  import * as fsSync from "fs";
7
7
  import * as path from "path";
8
8
  import { fileURLToPath } from "url";
9
+ import { getSynonyms } from "./synonyms.js";
9
10
  const __filename = fileURLToPath(import.meta.url);
10
11
  const __dirname = path.dirname(__filename);
11
12
  // Directory where endpoint definitions are stored.
@@ -210,13 +211,11 @@ class ApiDocsServer {
210
211
  }
211
212
  return results;
212
213
  }
213
- // Smart search helper with scoring, filtering, limits, and pagination
214
+ // Smart search helper with BM25 scoring, Synonyms, and AND logic
214
215
  async searchInFiles(query, filterMethod, filterVersion, page = 1) {
215
216
  const files = await this.getAllFiles(ENDPOINTS_DIR);
216
- let allMatches = [];
217
- const isWildcard = query.trim() === "*" || query.trim() === "";
218
- // Tokenize query: split by space, filter empty
219
- const tokens = query.toLowerCase().split(/\s+/).filter(t => t.length > 0);
217
+ let documents = [];
218
+ // 1. Prepare Documents (Corpus)
220
219
  for (const filePath of files) {
221
220
  try {
222
221
  const content = await fs.readFile(filePath, "utf-8");
@@ -235,49 +234,21 @@ class ApiDocsServer {
235
234
  if (filterMethod && endpoint.method.toUpperCase() !== filterMethod) {
236
235
  continue;
237
236
  }
238
- // Scoring Logic
239
- let score = 0;
240
- if (isWildcard) {
241
- score = 1;
242
- }
243
- else {
244
- const summary = (endpoint.summary || "").toLowerCase();
245
- const description = (endpoint.description || "").toLowerCase();
246
- const apiPath = (endpoint.path || "").toLowerCase();
247
- const operationId = (endpoint.operationId || "").toLowerCase();
248
- // Calculate score for each token
249
- for (const token of tokens) {
250
- let tokenScore = 0;
251
- // Summary: Highest weight (Exact > Partial)
252
- if (summary === token)
253
- tokenScore += 20;
254
- else if (summary.includes(token))
255
- tokenScore += 10;
256
- // OperationID: High weight
257
- if (operationId === token)
258
- tokenScore += 15;
259
- else if (operationId.includes(token))
260
- tokenScore += 8;
261
- // Description: Medium weight
262
- if (description.includes(token))
263
- tokenScore += 5;
264
- // Path: Low weight
265
- if (apiPath.includes(token))
266
- tokenScore += 3;
267
- score += tokenScore;
268
- }
269
- }
270
- if (score > 0) {
271
- allMatches.push({
272
- score,
273
- file: fileName,
274
- method: endpoint.method,
275
- path: endpoint.path,
276
- summary: endpoint.summary,
277
- description: endpoint.description,
278
- operationId: endpoint.operationId
279
- });
280
- }
237
+ // Create searchable text blob
238
+ // Weighting: Summary (3x), OperationID (2x), Description (1x), Path (1x)
239
+ const searchableText = [
240
+ (endpoint.summary || "").toLowerCase().repeat(3),
241
+ (endpoint.operationId || "").toLowerCase().repeat(2),
242
+ (endpoint.description || "").toLowerCase(),
243
+ (endpoint.path || "").toLowerCase()
244
+ ].join(" ");
245
+ const tokens = searchableText.split(/\s+/).filter(t => t.length > 0);
246
+ documents.push({
247
+ file: fileName,
248
+ ...endpoint,
249
+ tokens, // For BM25 calculation
250
+ docLength: tokens.length
251
+ });
281
252
  }
282
253
  }
283
254
  }
@@ -285,32 +256,99 @@ class ApiDocsServer {
285
256
  // Ignore parse errors
286
257
  }
287
258
  }
288
- const totalFound = allMatches.length;
259
+ const totalFound = documents.length;
289
260
  if (totalFound === 0) {
290
261
  return {
291
262
  results: [],
292
- message: `No results found for '${query}'. Try using '*' to list all endpoints, or check your version/method filters.`
263
+ message: `No results found for '${query}'.`
293
264
  };
294
265
  }
295
- // Sort by score descending (only meaningful if not wildcard)
296
- if (!isWildcard) {
297
- allMatches.sort((a, b) => b.score - a.score);
266
+ // 2. Query Processing (Synonyms + AND Logic)
267
+ const rawQueryTokens = query.toLowerCase().split(/\s+/).filter(t => t.length > 0);
268
+ // Check for Wildcard
269
+ if (rawQueryTokens.length === 0 || (rawQueryTokens.length === 1 && rawQueryTokens[0] === "*")) {
270
+ // Slice for pagination
271
+ const LIMIT = 10;
272
+ const totalPages = Math.ceil(totalFound / LIMIT);
273
+ const currentPage = Math.max(1, page);
274
+ const start = (currentPage - 1) * LIMIT;
275
+ const slice = documents.slice(start, start + LIMIT);
276
+ const finalResults = slice.map(({ tokens, docLength, ...rest }) => rest);
277
+ return {
278
+ results: finalResults,
279
+ meta: { total: totalFound, page: currentPage, totalPages: totalPages }
280
+ };
298
281
  }
282
+ // Filter Documents: AND Logic with Synonym Expansion
283
+ // Every query token (or one of its synonyms) MUST be present in the document
284
+ const filteredDocs = documents.filter(doc => {
285
+ return rawQueryTokens.every(qToken => {
286
+ const synonyms = getSynonyms(qToken);
287
+ return synonyms.some((syn) => doc.tokens.includes(syn));
288
+ });
289
+ });
290
+ if (filteredDocs.length === 0) {
291
+ return {
292
+ results: [],
293
+ message: `No results found for '${query}'. Try fewer keywords or check spelling.`
294
+ };
295
+ }
296
+ // 3. BM25 Calculation on Filtered Docs
297
+ // Context: We calculate stats based on the *filtered* corpus or *full* corpus?
298
+ // Standard BM25 usages often use full corpus stats for IDF. We will use full corpus stats.
299
+ const k1 = 1.2;
300
+ const b = 0.75;
301
+ const avgdl = documents.reduce((acc, doc) => acc + doc.docLength, 0) / totalFound;
302
+ // Calculate IDF (using full corpus) for *expanded* tokens?
303
+ // Complexity: simple approach -> Calculate IDF for the specific matching token in the doc for scoring.
304
+ // If multiple synonyms match, take the max score or sum? Sum is risky (double count).
305
+ // We will iterate query tokens, find the *best matching synonym* in the doc, and score that.
306
+ // Pre-calculate IDF for all potential terms in query (raw + synonyms)
307
+ const allQueryTerms = new Set();
308
+ rawQueryTokens.forEach(t => getSynonyms(t).forEach((s) => allQueryTerms.add(s)));
309
+ const idf = {};
310
+ for (const term of allQueryTerms) {
311
+ let n_q = 0;
312
+ for (const doc of documents) {
313
+ if (doc.tokens.includes(term))
314
+ n_q++;
315
+ }
316
+ idf[term] = Math.log((totalFound - n_q + 0.5) / (n_q + 0.5) + 1);
317
+ }
318
+ // Score Filtered Documents
319
+ let scoredDocs = filteredDocs.map(doc => {
320
+ let score = 0;
321
+ for (const qToken of rawQueryTokens) {
322
+ // Find which synonyms of qToken are present in this doc
323
+ const synonyms = getSynonyms(qToken);
324
+ const presentSynonyms = synonyms.filter((syn) => doc.tokens.includes(syn));
325
+ // If multiple synonyms match (e.g. 'find' and 'get' both in doc), we should probably
326
+ // just take the best one or sum them with saturation.
327
+ // Simplified: Sum them up (assuming they add more relevance).
328
+ for (const term of presentSynonyms) {
329
+ const f_q = doc.tokens.filter((t) => t === term).length;
330
+ const numerator = idf[term] * f_q * (k1 + 1);
331
+ const denominator = f_q + k1 * (1 - b + b * (doc.docLength / avgdl));
332
+ score += numerator / denominator;
333
+ }
334
+ }
335
+ return { ...doc, score };
336
+ });
337
+ // Sort by score descending
338
+ scoredDocs.sort((a, b) => b.score - a.score);
299
339
  // Pagination
300
340
  const LIMIT = 10;
301
- const totalPages = Math.ceil(totalFound / LIMIT);
341
+ const totalHits = scoredDocs.length;
342
+ const totalPages = Math.ceil(totalHits / LIMIT);
302
343
  const currentPage = Math.max(1, page);
303
344
  const start = (currentPage - 1) * LIMIT;
304
- const end = start + LIMIT;
305
- // Get the page slice
306
- const slice = allMatches.slice(start, end);
307
- // Post-processing: Add warnings for V1 endpoints if V2 exists
345
+ // Slice
346
+ const slice = scoredDocs.slice(start, start + LIMIT);
347
+ // Post-processing: Add warnings for V1 endpoints
308
348
  const finalResults = await Promise.all(slice.map(async (item) => {
309
- // Remove score before returning to user
310
- const { score, ...rest } = item;
349
+ const { score, tokens, docLength, ...rest } = item; // Remove internal props
311
350
  if (rest.path && rest.path.includes("/v1/")) {
312
351
  const v2Path = rest.path.replace("/v1/", "/v2/");
313
- // We check if this v2 path exists using our internal lookup logic
314
352
  const v2Exists = await this.findEndpointInFiles(files, v2Path, rest.method);
315
353
  if (v2Exists) {
316
354
  rest.warning = "DEPRECATED: Version v1 is deprecated. Please use v2 endpoint: " + v2Path;
@@ -320,15 +358,12 @@ class ApiDocsServer {
320
358
  }));
321
359
  let warning = undefined;
322
360
  if (totalPages > 1) {
323
- warning = `Found ${totalFound} results. Showing page ${currentPage} of ${totalPages}.`;
324
- if (currentPage < totalPages) {
325
- warning += ` Use 'page: ${currentPage + 1}' to see next results.`;
326
- }
361
+ warning = `Found ${totalHits} results. Showing page ${currentPage} of ${totalPages}.`;
327
362
  }
328
363
  return {
329
364
  results: finalResults,
330
365
  meta: {
331
- total: totalFound,
366
+ total: totalHits,
332
367
  page: currentPage,
333
368
  totalPages: totalPages
334
369
  },
@@ -0,0 +1,38 @@
1
+ export const SYNONYM_GROUPS = {
2
+ // Read / Retrieve
3
+ "get": ["fetch", "retrieve", "read", "load", "find", "search", "query", "list"],
4
+ "find": ["get", "search", "retrieve", "lookup"],
5
+ "search": ["find", "get", "query", "lookup"],
6
+ "list": ["get", "all", "collection"],
7
+ // Create
8
+ "create": ["add", "insert", "make", "new", "post", "generate"],
9
+ "add": ["create", "insert", "append", "attach"],
10
+ "post": ["create", "add", "submit"],
11
+ // Update
12
+ "update": ["modify", "edit", "change", "save", "put", "patch", "set"],
13
+ "modify": ["update", "edit", "change", "adjust"],
14
+ "save": ["update", "store", "persist", "write"],
15
+ // Delete
16
+ "delete": ["remove", "destroy", "clear", "erase", "drop"],
17
+ "remove": ["delete", "detach", "discard"]
18
+ };
19
+ /**
20
+ * Expands a single token into a list of synonyms including itself.
21
+ */
22
+ export function getSynonyms(token) {
23
+ const lowerToken = token.toLowerCase();
24
+ // Direct lookup
25
+ if (SYNONYM_GROUPS[lowerToken]) {
26
+ return [lowerToken, ...SYNONYM_GROUPS[lowerToken]];
27
+ }
28
+ // Reverse lookup (inefficient but thorough for a small map)
29
+ const synonyms = new Set();
30
+ synonyms.add(lowerToken);
31
+ for (const [key, details] of Object.entries(SYNONYM_GROUPS)) {
32
+ if (details.includes(lowerToken)) {
33
+ synonyms.add(key);
34
+ details.forEach(d => synonyms.add(d));
35
+ }
36
+ }
37
+ return Array.from(synonyms);
38
+ }
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "fmea-api-mcp-server",
3
- "version": "1.1.0",
3
+ "version": "1.1.1",
4
4
  "description": "MCP server for serving API documentation from endpoints directory",
5
5
  "type": "module",
6
6
  "main": "dist/index.js",