fmea-api-mcp-server 1.0.7 → 1.1.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +5 -3
- package/dist/index.js +146 -47
- package/dist/synonyms.js +38 -0
- package/package.json +1 -1
package/README.md
CHANGED
|
@@ -98,10 +98,12 @@ When the package is published to NPM:
|
|
|
98
98
|
|
|
99
99
|
## Features
|
|
100
100
|
- **Resources**: Can read JSON files in the `endpoints` folder.
|
|
101
|
-
- **Tools**:
|
|
102
101
|
- **Tools**:
|
|
103
102
|
- `search_apis`:
|
|
104
|
-
- Smart search
|
|
103
|
+
- **Smart Search**: Supports multi-keyword matching (e.g., "project search"). Results are ranked by relevance (Summary > OperationID > Description > Path).
|
|
104
|
+
- **Deprecation Warnings**: Automatically detects if a V1 endpoint has a V2 equivalent and includes a warning in the results.
|
|
105
105
|
- Supports filters: `query` (use `*` for all), `method`, `version`, `page` (default 1).
|
|
106
106
|
- Results limited to 10 per page. Returns meta info (total, totalPages) and guidance.
|
|
107
|
-
- `get_api_details`:
|
|
107
|
+
- `get_api_details`:
|
|
108
|
+
- Get full details (schema, parameters) for a specific endpoint.
|
|
109
|
+
- Includes **Deprecation Warnings** if a newer version of the API exists.
|
package/dist/index.js
CHANGED
|
@@ -6,6 +6,7 @@ import * as fs from "fs/promises";
|
|
|
6
6
|
import * as fsSync from "fs";
|
|
7
7
|
import * as path from "path";
|
|
8
8
|
import { fileURLToPath } from "url";
|
|
9
|
+
import { getSynonyms } from "./synonyms.js";
|
|
9
10
|
const __filename = fileURLToPath(import.meta.url);
|
|
10
11
|
const __dirname = path.dirname(__filename);
|
|
11
12
|
// Directory where endpoint definitions are stored.
|
|
@@ -210,11 +211,11 @@ class ApiDocsServer {
|
|
|
210
211
|
}
|
|
211
212
|
return results;
|
|
212
213
|
}
|
|
213
|
-
// Smart search helper with scoring,
|
|
214
|
+
// Smart search helper with BM25 scoring, Synonyms, and AND logic
|
|
214
215
|
async searchInFiles(query, filterMethod, filterVersion, page = 1) {
|
|
215
216
|
const files = await this.getAllFiles(ENDPOINTS_DIR);
|
|
216
|
-
let
|
|
217
|
-
|
|
217
|
+
let documents = [];
|
|
218
|
+
// 1. Prepare Documents (Corpus)
|
|
218
219
|
for (const filePath of files) {
|
|
219
220
|
try {
|
|
220
221
|
const content = await fs.readFile(filePath, "utf-8");
|
|
@@ -233,34 +234,21 @@ class ApiDocsServer {
|
|
|
233
234
|
if (filterMethod && endpoint.method.toUpperCase() !== filterMethod) {
|
|
234
235
|
continue;
|
|
235
236
|
}
|
|
236
|
-
//
|
|
237
|
-
|
|
238
|
-
|
|
239
|
-
|
|
240
|
-
|
|
241
|
-
|
|
242
|
-
|
|
243
|
-
|
|
244
|
-
|
|
245
|
-
|
|
246
|
-
|
|
247
|
-
|
|
248
|
-
|
|
249
|
-
|
|
250
|
-
|
|
251
|
-
if (apiPath.includes(query))
|
|
252
|
-
score += 3;
|
|
253
|
-
}
|
|
254
|
-
if (score > 0) {
|
|
255
|
-
allMatches.push({
|
|
256
|
-
score,
|
|
257
|
-
file: fileName,
|
|
258
|
-
method: endpoint.method,
|
|
259
|
-
path: endpoint.path,
|
|
260
|
-
summary: endpoint.summary,
|
|
261
|
-
description: endpoint.description
|
|
262
|
-
});
|
|
263
|
-
}
|
|
237
|
+
// Create searchable text blob
|
|
238
|
+
// Weighting: Summary (3x), OperationID (2x), Description (1x), Path (1x)
|
|
239
|
+
const searchableText = [
|
|
240
|
+
(endpoint.summary || "").toLowerCase().repeat(3),
|
|
241
|
+
(endpoint.operationId || "").toLowerCase().repeat(2),
|
|
242
|
+
(endpoint.description || "").toLowerCase(),
|
|
243
|
+
(endpoint.path || "").toLowerCase()
|
|
244
|
+
].join(" ");
|
|
245
|
+
const tokens = searchableText.split(/\s+/).filter(t => t.length > 0);
|
|
246
|
+
documents.push({
|
|
247
|
+
file: fileName,
|
|
248
|
+
...endpoint,
|
|
249
|
+
tokens, // For BM25 calculation
|
|
250
|
+
docLength: tokens.length
|
|
251
|
+
});
|
|
264
252
|
}
|
|
265
253
|
}
|
|
266
254
|
}
|
|
@@ -268,35 +256,114 @@ class ApiDocsServer {
|
|
|
268
256
|
// Ignore parse errors
|
|
269
257
|
}
|
|
270
258
|
}
|
|
271
|
-
const totalFound =
|
|
259
|
+
const totalFound = documents.length;
|
|
272
260
|
if (totalFound === 0) {
|
|
273
261
|
return {
|
|
274
262
|
results: [],
|
|
275
|
-
message: `No results found for '${query}'
|
|
263
|
+
message: `No results found for '${query}'.`
|
|
264
|
+
};
|
|
265
|
+
}
|
|
266
|
+
// 2. Query Processing (Synonyms + AND Logic)
|
|
267
|
+
const rawQueryTokens = query.toLowerCase().split(/\s+/).filter(t => t.length > 0);
|
|
268
|
+
// Check for Wildcard
|
|
269
|
+
if (rawQueryTokens.length === 0 || (rawQueryTokens.length === 1 && rawQueryTokens[0] === "*")) {
|
|
270
|
+
// Slice for pagination
|
|
271
|
+
const LIMIT = 10;
|
|
272
|
+
const totalPages = Math.ceil(totalFound / LIMIT);
|
|
273
|
+
const currentPage = Math.max(1, page);
|
|
274
|
+
const start = (currentPage - 1) * LIMIT;
|
|
275
|
+
const slice = documents.slice(start, start + LIMIT);
|
|
276
|
+
const finalResults = slice.map(({ tokens, docLength, ...rest }) => rest);
|
|
277
|
+
return {
|
|
278
|
+
results: finalResults,
|
|
279
|
+
meta: { total: totalFound, page: currentPage, totalPages: totalPages }
|
|
280
|
+
};
|
|
281
|
+
}
|
|
282
|
+
// Filter Documents: AND Logic with Synonym Expansion
|
|
283
|
+
// Every query token (or one of its synonyms) MUST be present in the document
|
|
284
|
+
const filteredDocs = documents.filter(doc => {
|
|
285
|
+
return rawQueryTokens.every(qToken => {
|
|
286
|
+
const synonyms = getSynonyms(qToken);
|
|
287
|
+
return synonyms.some((syn) => doc.tokens.includes(syn));
|
|
288
|
+
});
|
|
289
|
+
});
|
|
290
|
+
if (filteredDocs.length === 0) {
|
|
291
|
+
return {
|
|
292
|
+
results: [],
|
|
293
|
+
message: `No results found for '${query}'. Try fewer keywords or check spelling.`
|
|
276
294
|
};
|
|
277
295
|
}
|
|
278
|
-
//
|
|
279
|
-
|
|
280
|
-
|
|
296
|
+
// 3. BM25 Calculation on Filtered Docs
|
|
297
|
+
// Context: We calculate stats based on the *filtered* corpus or *full* corpus?
|
|
298
|
+
// Standard BM25 usages often use full corpus stats for IDF. We will use full corpus stats.
|
|
299
|
+
const k1 = 1.2;
|
|
300
|
+
const b = 0.75;
|
|
301
|
+
const avgdl = documents.reduce((acc, doc) => acc + doc.docLength, 0) / totalFound;
|
|
302
|
+
// Calculate IDF (using full corpus) for *expanded* tokens?
|
|
303
|
+
// Complexity: simple approach -> Calculate IDF for the specific matching token in the doc for scoring.
|
|
304
|
+
// If multiple synonyms match, take the max score or sum? Sum is risky (double count).
|
|
305
|
+
// We will iterate query tokens, find the *best matching synonym* in the doc, and score that.
|
|
306
|
+
// Pre-calculate IDF for all potential terms in query (raw + synonyms)
|
|
307
|
+
const allQueryTerms = new Set();
|
|
308
|
+
rawQueryTokens.forEach(t => getSynonyms(t).forEach((s) => allQueryTerms.add(s)));
|
|
309
|
+
const idf = {};
|
|
310
|
+
for (const term of allQueryTerms) {
|
|
311
|
+
let n_q = 0;
|
|
312
|
+
for (const doc of documents) {
|
|
313
|
+
if (doc.tokens.includes(term))
|
|
314
|
+
n_q++;
|
|
315
|
+
}
|
|
316
|
+
idf[term] = Math.log((totalFound - n_q + 0.5) / (n_q + 0.5) + 1);
|
|
281
317
|
}
|
|
318
|
+
// Score Filtered Documents
|
|
319
|
+
let scoredDocs = filteredDocs.map(doc => {
|
|
320
|
+
let score = 0;
|
|
321
|
+
for (const qToken of rawQueryTokens) {
|
|
322
|
+
// Find which synonyms of qToken are present in this doc
|
|
323
|
+
const synonyms = getSynonyms(qToken);
|
|
324
|
+
const presentSynonyms = synonyms.filter((syn) => doc.tokens.includes(syn));
|
|
325
|
+
// If multiple synonyms match (e.g. 'find' and 'get' both in doc), we should probably
|
|
326
|
+
// just take the best one or sum them with saturation.
|
|
327
|
+
// Simplified: Sum them up (assuming they add more relevance).
|
|
328
|
+
for (const term of presentSynonyms) {
|
|
329
|
+
const f_q = doc.tokens.filter((t) => t === term).length;
|
|
330
|
+
const numerator = idf[term] * f_q * (k1 + 1);
|
|
331
|
+
const denominator = f_q + k1 * (1 - b + b * (doc.docLength / avgdl));
|
|
332
|
+
score += numerator / denominator;
|
|
333
|
+
}
|
|
334
|
+
}
|
|
335
|
+
return { ...doc, score };
|
|
336
|
+
});
|
|
337
|
+
// Sort by score descending
|
|
338
|
+
scoredDocs.sort((a, b) => b.score - a.score);
|
|
282
339
|
// Pagination
|
|
283
340
|
const LIMIT = 10;
|
|
284
|
-
const
|
|
285
|
-
const
|
|
341
|
+
const totalHits = scoredDocs.length;
|
|
342
|
+
const totalPages = Math.ceil(totalHits / LIMIT);
|
|
343
|
+
const currentPage = Math.max(1, page);
|
|
286
344
|
const start = (currentPage - 1) * LIMIT;
|
|
287
|
-
|
|
288
|
-
const
|
|
345
|
+
// Slice
|
|
346
|
+
const slice = scoredDocs.slice(start, start + LIMIT);
|
|
347
|
+
// Post-processing: Add warnings for V1 endpoints
|
|
348
|
+
const finalResults = await Promise.all(slice.map(async (item) => {
|
|
349
|
+
const { score, tokens, docLength, ...rest } = item; // Remove internal props
|
|
350
|
+
if (rest.path && rest.path.includes("/v1/")) {
|
|
351
|
+
const v2Path = rest.path.replace("/v1/", "/v2/");
|
|
352
|
+
const v2Exists = await this.findEndpointInFiles(files, v2Path, rest.method);
|
|
353
|
+
if (v2Exists) {
|
|
354
|
+
rest.warning = "DEPRECATED: Version v1 is deprecated. Please use v2 endpoint: " + v2Path;
|
|
355
|
+
}
|
|
356
|
+
}
|
|
357
|
+
return rest;
|
|
358
|
+
}));
|
|
289
359
|
let warning = undefined;
|
|
290
360
|
if (totalPages > 1) {
|
|
291
|
-
warning = `Found ${
|
|
292
|
-
if (currentPage < totalPages) {
|
|
293
|
-
warning += ` Use 'page: ${currentPage + 1}' to see next results.`;
|
|
294
|
-
}
|
|
361
|
+
warning = `Found ${totalHits} results. Showing page ${currentPage} of ${totalPages}.`;
|
|
295
362
|
}
|
|
296
363
|
return {
|
|
297
|
-
results:
|
|
364
|
+
results: finalResults,
|
|
298
365
|
meta: {
|
|
299
|
-
total:
|
|
366
|
+
total: totalHits,
|
|
300
367
|
page: currentPage,
|
|
301
368
|
totalPages: totalPages
|
|
302
369
|
},
|
|
@@ -316,10 +383,20 @@ class ApiDocsServer {
|
|
|
316
383
|
if (method && endpoint.method.toUpperCase() !== method) {
|
|
317
384
|
continue;
|
|
318
385
|
}
|
|
319
|
-
|
|
386
|
+
const result = {
|
|
320
387
|
sourceFile: path.relative(ENDPOINTS_DIR, filePath),
|
|
321
388
|
...endpoint
|
|
322
389
|
};
|
|
390
|
+
// Check for V1 Deprecation
|
|
391
|
+
if (apiPath.includes("/v1/")) {
|
|
392
|
+
const v2Path = apiPath.replace("/v1/", "/v2/");
|
|
393
|
+
const v2Exists = await this.findEndpointInFiles(files, v2Path, method);
|
|
394
|
+
if (v2Exists) {
|
|
395
|
+
// Inject a top-level deprecation warning in the details
|
|
396
|
+
result.deprecation_warning = `NOTICE: This v1 endpoint is deprecated. A newer version (v2) exists at ${v2Path}`;
|
|
397
|
+
}
|
|
398
|
+
}
|
|
399
|
+
return result;
|
|
323
400
|
}
|
|
324
401
|
}
|
|
325
402
|
}
|
|
@@ -330,6 +407,28 @@ class ApiDocsServer {
|
|
|
330
407
|
}
|
|
331
408
|
return null;
|
|
332
409
|
}
|
|
410
|
+
// Efficiently check if an endpoint exists without reading files if content is not needed
|
|
411
|
+
// Note: Since we don't cache file contents in memory for this simple server,
|
|
412
|
+
// we re-read files. For a production server with many files, we would cache the map.
|
|
413
|
+
async findEndpointInFiles(files, apiPath, method) {
|
|
414
|
+
for (const filePath of files) {
|
|
415
|
+
try {
|
|
416
|
+
const content = await fs.readFile(filePath, "utf-8");
|
|
417
|
+
const json = JSON.parse(content);
|
|
418
|
+
if (json.endpoints && Array.isArray(json.endpoints)) {
|
|
419
|
+
for (const ep of json.endpoints) {
|
|
420
|
+
if (ep.path === apiPath) {
|
|
421
|
+
if (method && ep.method.toUpperCase() !== method)
|
|
422
|
+
continue;
|
|
423
|
+
return true;
|
|
424
|
+
}
|
|
425
|
+
}
|
|
426
|
+
}
|
|
427
|
+
}
|
|
428
|
+
catch (e) { }
|
|
429
|
+
}
|
|
430
|
+
return false;
|
|
431
|
+
}
|
|
333
432
|
async run() {
|
|
334
433
|
const transport = new StdioServerTransport();
|
|
335
434
|
await this.server.connect(transport);
|
package/dist/synonyms.js
ADDED
|
@@ -0,0 +1,38 @@
|
|
|
1
|
+
export const SYNONYM_GROUPS = {
|
|
2
|
+
// Read / Retrieve
|
|
3
|
+
"get": ["fetch", "retrieve", "read", "load", "find", "search", "query", "list"],
|
|
4
|
+
"find": ["get", "search", "retrieve", "lookup"],
|
|
5
|
+
"search": ["find", "get", "query", "lookup"],
|
|
6
|
+
"list": ["get", "all", "collection"],
|
|
7
|
+
// Create
|
|
8
|
+
"create": ["add", "insert", "make", "new", "post", "generate"],
|
|
9
|
+
"add": ["create", "insert", "append", "attach"],
|
|
10
|
+
"post": ["create", "add", "submit"],
|
|
11
|
+
// Update
|
|
12
|
+
"update": ["modify", "edit", "change", "save", "put", "patch", "set"],
|
|
13
|
+
"modify": ["update", "edit", "change", "adjust"],
|
|
14
|
+
"save": ["update", "store", "persist", "write"],
|
|
15
|
+
// Delete
|
|
16
|
+
"delete": ["remove", "destroy", "clear", "erase", "drop"],
|
|
17
|
+
"remove": ["delete", "detach", "discard"]
|
|
18
|
+
};
|
|
19
|
+
/**
|
|
20
|
+
* Expands a single token into a list of synonyms including itself.
|
|
21
|
+
*/
|
|
22
|
+
export function getSynonyms(token) {
|
|
23
|
+
const lowerToken = token.toLowerCase();
|
|
24
|
+
// Direct lookup
|
|
25
|
+
if (SYNONYM_GROUPS[lowerToken]) {
|
|
26
|
+
return [lowerToken, ...SYNONYM_GROUPS[lowerToken]];
|
|
27
|
+
}
|
|
28
|
+
// Reverse lookup (inefficient but thorough for a small map)
|
|
29
|
+
const synonyms = new Set();
|
|
30
|
+
synonyms.add(lowerToken);
|
|
31
|
+
for (const [key, details] of Object.entries(SYNONYM_GROUPS)) {
|
|
32
|
+
if (details.includes(lowerToken)) {
|
|
33
|
+
synonyms.add(key);
|
|
34
|
+
details.forEach(d => synonyms.add(d));
|
|
35
|
+
}
|
|
36
|
+
}
|
|
37
|
+
return Array.from(synonyms);
|
|
38
|
+
}
|