@aiwerk/mcp-bridge 1.6.1 → 1.7.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/bin/mcp-bridge.js +20 -8
- package/dist/src/embeddings.d.ts +13 -1
- package/dist/src/embeddings.js +83 -51
- package/dist/src/intent-router.js +5 -0
- package/dist/src/protocol.js +16 -7
- package/dist/src/smart-filter.d.ts +4 -40
- package/dist/src/smart-filter.js +57 -389
- package/dist/src/standalone-server.d.ts +2 -1
- package/dist/src/standalone-server.js +61 -6
- package/dist/src/transport-base.d.ts +3 -3
- package/dist/src/transport-base.js +12 -11
- package/dist/src/types.d.ts +1 -0
- package/dist/src/update-checker.d.ts +1 -0
- package/dist/src/update-checker.js +7 -8
- package/package.json +1 -1
- package/scripts/install-server.sh +15 -11
package/dist/bin/mcp-bridge.js
CHANGED
|
@@ -2,6 +2,7 @@
|
|
|
2
2
|
import { readFileSync, existsSync } from "fs";
|
|
3
3
|
import { join, dirname, resolve } from "path";
|
|
4
4
|
import { fileURLToPath } from "url";
|
|
5
|
+
import { platform } from "os";
|
|
5
6
|
import { execFileSync } from "child_process";
|
|
6
7
|
import { loadConfig, initConfigDir } from "../src/config.js";
|
|
7
8
|
import { StandaloneServer } from "../src/standalone-server.js";
|
|
@@ -212,19 +213,30 @@ function cmdSearch(query, logger) {
|
|
|
212
213
|
return;
|
|
213
214
|
}
|
|
214
215
|
process.stdout.write(`\nSearch results for "${query}":\n\n`);
|
|
215
|
-
|
|
216
|
+
matches.forEach(([name, info], i) => {
|
|
216
217
|
process.stdout.write(` ${i + 1} ${name.padEnd(16)}${info.description || ""}\n`);
|
|
217
|
-
}
|
|
218
|
+
});
|
|
218
219
|
process.stdout.write("\n");
|
|
219
220
|
}
|
|
220
221
|
function cmdInstall(serverName, logger) {
|
|
221
|
-
const
|
|
222
|
-
if (!existsSync(scriptPath)) {
|
|
223
|
-
logger.error("Install script not found");
|
|
224
|
-
process.exit(1);
|
|
225
|
-
}
|
|
222
|
+
const scriptDir = join(PACKAGE_ROOT, "scripts");
|
|
226
223
|
try {
|
|
227
|
-
|
|
224
|
+
if (platform() === "win32") {
|
|
225
|
+
const psScript = join(scriptDir, "install-server.ps1");
|
|
226
|
+
if (!existsSync(psScript)) {
|
|
227
|
+
logger.error("Install script not found (install-server.ps1)");
|
|
228
|
+
process.exit(1);
|
|
229
|
+
}
|
|
230
|
+
execFileSync("powershell", ["-ExecutionPolicy", "Bypass", "-File", psScript, serverName], { stdio: "inherit" });
|
|
231
|
+
}
|
|
232
|
+
else {
|
|
233
|
+
const scriptPath = join(scriptDir, "install-server.sh");
|
|
234
|
+
if (!existsSync(scriptPath)) {
|
|
235
|
+
logger.error("Install script not found (install-server.sh)");
|
|
236
|
+
process.exit(1);
|
|
237
|
+
}
|
|
238
|
+
execFileSync("bash", [scriptPath, serverName], { stdio: "inherit" });
|
|
239
|
+
}
|
|
228
240
|
}
|
|
229
241
|
catch (err) {
|
|
230
242
|
process.exit(1);
|
package/dist/src/embeddings.d.ts
CHANGED
|
@@ -5,8 +5,13 @@ export interface EmbeddingProvider {
|
|
|
5
5
|
}
|
|
6
6
|
export declare class GeminiEmbedding implements EmbeddingProvider {
|
|
7
7
|
private readonly apiKey;
|
|
8
|
+
private readonly model;
|
|
9
|
+
private readonly baseUrl;
|
|
10
|
+
private static readonly BATCH_LIMIT;
|
|
8
11
|
constructor(apiKey: string);
|
|
9
12
|
embed(texts: string[]): Promise<number[][]>;
|
|
13
|
+
private batchEmbed;
|
|
14
|
+
private singleEmbed;
|
|
10
15
|
dimensions(): number;
|
|
11
16
|
}
|
|
12
17
|
export declare class OpenAIEmbedding implements EmbeddingProvider {
|
|
@@ -25,7 +30,14 @@ export declare class OllamaEmbedding implements EmbeddingProvider {
|
|
|
25
30
|
}
|
|
26
31
|
export declare class KeywordEmbedding implements EmbeddingProvider {
|
|
27
32
|
private vocabulary;
|
|
28
|
-
private
|
|
33
|
+
private frozen;
|
|
34
|
+
/**
|
|
35
|
+
* Add texts to the vocabulary (call during indexing phase).
|
|
36
|
+
* After freeze(), new words are silently ignored.
|
|
37
|
+
*/
|
|
38
|
+
buildVocabulary(texts: string[]): void;
|
|
39
|
+
/** Freeze vocabulary — no new words added after this. */
|
|
40
|
+
freeze(): void;
|
|
29
41
|
embed(texts: string[]): Promise<number[][]>;
|
|
30
42
|
dimensions(): number;
|
|
31
43
|
private tokenize;
|
package/dist/src/embeddings.js
CHANGED
|
@@ -1,26 +1,60 @@
|
|
|
1
1
|
export class GeminiEmbedding {
|
|
2
2
|
apiKey;
|
|
3
|
+
model = "gemini-embedding-001";
|
|
4
|
+
baseUrl = "https://generativelanguage.googleapis.com/v1beta";
|
|
5
|
+
static BATCH_LIMIT = 100;
|
|
3
6
|
constructor(apiKey) {
|
|
4
7
|
this.apiKey = apiKey;
|
|
5
8
|
}
|
|
6
9
|
async embed(texts) {
|
|
7
10
|
const results = [];
|
|
8
|
-
|
|
9
|
-
|
|
10
|
-
|
|
11
|
-
|
|
12
|
-
|
|
13
|
-
|
|
14
|
-
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
|
|
11
|
+
// Process in chunks of BATCH_LIMIT
|
|
12
|
+
for (let i = 0; i < texts.length; i += GeminiEmbedding.BATCH_LIMIT) {
|
|
13
|
+
const chunk = texts.slice(i, i + GeminiEmbedding.BATCH_LIMIT);
|
|
14
|
+
try {
|
|
15
|
+
const chunkResults = await this.batchEmbed(chunk);
|
|
16
|
+
results.push(...chunkResults);
|
|
17
|
+
}
|
|
18
|
+
catch {
|
|
19
|
+
// Fallback: sequential embedding if batch fails
|
|
20
|
+
for (const text of chunk) {
|
|
21
|
+
results.push(await this.singleEmbed(text));
|
|
22
|
+
}
|
|
18
23
|
}
|
|
19
|
-
const data = await response.json();
|
|
20
|
-
results.push(data.embedding.values);
|
|
21
24
|
}
|
|
22
25
|
return results;
|
|
23
26
|
}
|
|
27
|
+
async batchEmbed(texts) {
|
|
28
|
+
const response = await fetch(`${this.baseUrl}/models/${this.model}:batchEmbedContents?key=${this.apiKey}`, {
|
|
29
|
+
method: "POST",
|
|
30
|
+
headers: { "Content-Type": "application/json" },
|
|
31
|
+
body: JSON.stringify({
|
|
32
|
+
requests: texts.map(text => ({
|
|
33
|
+
model: `models/${this.model}`,
|
|
34
|
+
content: { parts: [{ text }] },
|
|
35
|
+
})),
|
|
36
|
+
}),
|
|
37
|
+
});
|
|
38
|
+
if (!response.ok) {
|
|
39
|
+
throw new Error(`Gemini API error: ${response.status} ${response.statusText}`);
|
|
40
|
+
}
|
|
41
|
+
const data = await response.json();
|
|
42
|
+
return data.embeddings.map((e) => e.values);
|
|
43
|
+
}
|
|
44
|
+
async singleEmbed(text) {
|
|
45
|
+
const response = await fetch(`${this.baseUrl}/models/${this.model}:embedContent?key=${this.apiKey}`, {
|
|
46
|
+
method: "POST",
|
|
47
|
+
headers: { "Content-Type": "application/json" },
|
|
48
|
+
body: JSON.stringify({
|
|
49
|
+
content: { parts: [{ text }] }
|
|
50
|
+
})
|
|
51
|
+
});
|
|
52
|
+
if (!response.ok) {
|
|
53
|
+
throw new Error(`Gemini API error: ${response.status} ${response.statusText}`);
|
|
54
|
+
}
|
|
55
|
+
const data = await response.json();
|
|
56
|
+
return data.embedding.values;
|
|
57
|
+
}
|
|
24
58
|
dimensions() {
|
|
25
59
|
return 768;
|
|
26
60
|
}
|
|
@@ -78,57 +112,55 @@ export class OllamaEmbedding {
|
|
|
78
112
|
}
|
|
79
113
|
export class KeywordEmbedding {
|
|
80
114
|
vocabulary = new Map();
|
|
81
|
-
|
|
82
|
-
|
|
83
|
-
|
|
84
|
-
|
|
85
|
-
|
|
86
|
-
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
|
|
90
|
-
|
|
115
|
+
frozen = false;
|
|
116
|
+
/**
|
|
117
|
+
* Add texts to the vocabulary (call during indexing phase).
|
|
118
|
+
* After freeze(), new words are silently ignored.
|
|
119
|
+
*/
|
|
120
|
+
buildVocabulary(texts) {
|
|
121
|
+
if (this.frozen)
|
|
122
|
+
return;
|
|
123
|
+
for (const text of texts) {
|
|
124
|
+
for (const word of this.tokenize(text)) {
|
|
125
|
+
if (!this.vocabulary.has(word)) {
|
|
126
|
+
this.vocabulary.set(word, this.vocabulary.size);
|
|
91
127
|
}
|
|
92
128
|
}
|
|
93
129
|
}
|
|
94
|
-
|
|
130
|
+
}
|
|
131
|
+
/** Freeze vocabulary — no new words added after this. */
|
|
132
|
+
freeze() {
|
|
133
|
+
this.frozen = true;
|
|
134
|
+
}
|
|
135
|
+
async embed(texts) {
|
|
136
|
+
// If not frozen yet, add these texts to vocabulary first
|
|
137
|
+
if (!this.frozen) {
|
|
138
|
+
this.buildVocabulary(texts);
|
|
139
|
+
}
|
|
140
|
+
const vocabSize = this.vocabulary.size;
|
|
141
|
+
if (vocabSize === 0) {
|
|
95
142
|
return texts.map(() => [0]);
|
|
96
143
|
}
|
|
97
|
-
// Create TF vectors
|
|
98
|
-
|
|
99
|
-
|
|
100
|
-
const
|
|
101
|
-
for (const
|
|
102
|
-
const idx = this.vocabulary.get(
|
|
103
|
-
|
|
144
|
+
// Create TF vectors using the FIXED vocabulary
|
|
145
|
+
return texts.map((text) => {
|
|
146
|
+
const vector = new Array(vocabSize).fill(0);
|
|
147
|
+
const words = this.tokenize(text);
|
|
148
|
+
for (const word of words) {
|
|
149
|
+
const idx = this.vocabulary.get(word);
|
|
150
|
+
if (idx !== undefined) {
|
|
151
|
+
vector[idx] += 1;
|
|
152
|
+
}
|
|
104
153
|
}
|
|
105
154
|
// Normalize by document length
|
|
106
|
-
const len =
|
|
155
|
+
const len = words.length || 1;
|
|
107
156
|
for (let i = 0; i < vector.length; i++) {
|
|
108
157
|
vector[i] /= len;
|
|
109
158
|
}
|
|
110
|
-
|
|
111
|
-
}
|
|
112
|
-
// Apply IDF weighting
|
|
113
|
-
const docCount = texts.length;
|
|
114
|
-
const idf = new Array(this.vocabSize).fill(0);
|
|
115
|
-
for (let i = 0; i < this.vocabSize; i++) {
|
|
116
|
-
let df = 0;
|
|
117
|
-
for (const vec of vectors) {
|
|
118
|
-
if (vec[i] > 0)
|
|
119
|
-
df++;
|
|
120
|
-
}
|
|
121
|
-
idf[i] = Math.log((docCount + 1) / (df + 1)) + 1;
|
|
122
|
-
}
|
|
123
|
-
for (const vec of vectors) {
|
|
124
|
-
for (let i = 0; i < vec.length; i++) {
|
|
125
|
-
vec[i] *= idf[i];
|
|
126
|
-
}
|
|
127
|
-
}
|
|
128
|
-
return vectors;
|
|
159
|
+
return vector;
|
|
160
|
+
});
|
|
129
161
|
}
|
|
130
162
|
dimensions() {
|
|
131
|
-
return this.
|
|
163
|
+
return this.vocabulary.size || 1;
|
|
132
164
|
}
|
|
133
165
|
tokenize(text) {
|
|
134
166
|
return text
|
|
@@ -1,3 +1,4 @@
|
|
|
1
|
+
import { KeywordEmbedding } from "./embeddings.js";
|
|
1
2
|
import { VectorStore } from "./vector-store.js";
|
|
2
3
|
const DEFAULT_MIN_SCORE = 0.3;
|
|
3
4
|
const DEFAULT_TOP_K = 4;
|
|
@@ -40,6 +41,10 @@ export class IntentRouter {
|
|
|
40
41
|
description: entries[i].text
|
|
41
42
|
});
|
|
42
43
|
}
|
|
44
|
+
// Freeze keyword vocabulary so query vectors use the same dimensions
|
|
45
|
+
if (this.embeddingProvider instanceof KeywordEmbedding) {
|
|
46
|
+
this.embeddingProvider.freeze();
|
|
47
|
+
}
|
|
43
48
|
this.indexed = true;
|
|
44
49
|
this.logger.debug(`[mcp-bridge] Intent router indexed ${entries.length} tools`);
|
|
45
50
|
}
|
package/dist/src/protocol.js
CHANGED
|
@@ -3,14 +3,23 @@ import { join, dirname } from "path";
|
|
|
3
3
|
import { fileURLToPath } from "url";
|
|
4
4
|
const __filename = fileURLToPath(import.meta.url);
|
|
5
5
|
const __dirname = dirname(__filename);
|
|
6
|
-
|
|
7
|
-
|
|
8
|
-
|
|
9
|
-
|
|
10
|
-
|
|
11
|
-
|
|
6
|
+
function loadPackageVersion() {
|
|
7
|
+
const candidates = [
|
|
8
|
+
join(__dirname, "..", "package.json"),
|
|
9
|
+
join(__dirname, "..", "..", "package.json"),
|
|
10
|
+
join(__dirname, "..", "..", "..", "package.json"),
|
|
11
|
+
];
|
|
12
|
+
for (const p of candidates) {
|
|
13
|
+
try {
|
|
14
|
+
const pkg = JSON.parse(readFileSync(p, "utf-8"));
|
|
15
|
+
if (pkg.version)
|
|
16
|
+
return pkg.version;
|
|
17
|
+
}
|
|
18
|
+
catch { /* try next candidate */ }
|
|
12
19
|
}
|
|
13
|
-
|
|
20
|
+
return "0.0.0";
|
|
21
|
+
}
|
|
22
|
+
export const PACKAGE_VERSION = loadPackageVersion();
|
|
14
23
|
export async function initializeProtocol(transport, version) {
|
|
15
24
|
const initRequest = {
|
|
16
25
|
jsonrpc: "2.0",
|
|
@@ -47,46 +47,10 @@ export interface UserTurn {
|
|
|
47
47
|
content: string;
|
|
48
48
|
timestamp: number;
|
|
49
49
|
}
|
|
50
|
-
/**
|
|
51
|
-
|
|
52
|
-
*/
|
|
53
|
-
export declare
|
|
54
|
-
private config;
|
|
55
|
-
private logger;
|
|
56
|
-
constructor(config: SmartFilterConfig, logger: OpenClawLogger);
|
|
57
|
-
/**
|
|
58
|
-
* Main filter entry point
|
|
59
|
-
*/
|
|
60
|
-
filter(servers: Record<string, PluginServerConfig>, allTools: Map<string, McpTool[]>, userTurns: UserTurn[]): Promise<FilterResult>;
|
|
61
|
-
private performFilter;
|
|
62
|
-
/**
|
|
63
|
-
* Extract meaningful intent from last 1-3 user turns
|
|
64
|
-
*/
|
|
65
|
-
static synthesizeQuery(userTurns: UserTurn[]): string;
|
|
66
|
-
private static extractMeaningfulContent;
|
|
67
|
-
private prepareFilterableServers;
|
|
68
|
-
private normalizeKeywords;
|
|
69
|
-
/**
|
|
70
|
-
* Score servers using weighted overlap scoring
|
|
71
|
-
*/
|
|
72
|
-
private scoreServers;
|
|
73
|
-
static tokenize(text: string): string[];
|
|
74
|
-
private calculateServerScore;
|
|
75
|
-
private getSemanticScore;
|
|
76
|
-
private countOverlap;
|
|
77
|
-
/**
|
|
78
|
-
* Select servers using dynamic topServers with confidence-based expansion
|
|
79
|
-
*/
|
|
80
|
-
private selectServers;
|
|
81
|
-
/**
|
|
82
|
-
* Filter tools within selected servers
|
|
83
|
-
*/
|
|
84
|
-
private filterTools;
|
|
85
|
-
private calculateToolScore;
|
|
86
|
-
private calculateConfidenceScore;
|
|
87
|
-
private createUnfilteredResult;
|
|
88
|
-
private logTelemetry;
|
|
89
|
-
}
|
|
50
|
+
/** Tokenize text into lowercase words, stripping punctuation. */
|
|
51
|
+
export declare function tokenize(text: string): string[];
|
|
52
|
+
/** Extract meaningful intent from last 1-3 user turns. */
|
|
53
|
+
export declare function synthesizeQuery(userTurns: UserTurn[]): string;
|
|
90
54
|
export declare const DEFAULTS: Required<SmartFilterConfig>;
|
|
91
55
|
/** Normalize keywords: lowercase, trim, dedup, strip empties, cap at MAX_KEYWORDS. */
|
|
92
56
|
export declare function validateKeywords(raw: string[]): string[];
|
package/dist/src/smart-filter.js
CHANGED
|
@@ -2,390 +2,58 @@
|
|
|
2
2
|
* Smart Filter v2 - Phase 1: Keyword-based filtering
|
|
3
3
|
* Zero external dependencies, graceful degradation
|
|
4
4
|
*/
|
|
5
|
-
|
|
6
|
-
|
|
7
|
-
|
|
8
|
-
|
|
9
|
-
|
|
10
|
-
|
|
11
|
-
|
|
12
|
-
|
|
13
|
-
|
|
14
|
-
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
if (this.config.telemetry) {
|
|
58
|
-
this.logTelemetry(result, duration);
|
|
59
|
-
}
|
|
60
|
-
return result;
|
|
61
|
-
}
|
|
62
|
-
catch (error) {
|
|
63
|
-
this.logger.warn(`[smart-filter] Filter failed: ${error instanceof Error ? error.message : String(error)}, falling back to show all`);
|
|
64
|
-
const result = this.createUnfilteredResult(servers, allTools, "keyword");
|
|
65
|
-
result.metadata.timeoutOccurred = timeoutOccurred;
|
|
66
|
-
return result;
|
|
67
|
-
}
|
|
68
|
-
}
|
|
69
|
-
async performFilter(servers, allTools, userTurns) {
|
|
70
|
-
// Step 1: Query synthesis
|
|
71
|
-
const query = SmartFilter.synthesizeQuery(userTurns);
|
|
72
|
-
if (!query) {
|
|
73
|
-
this.logger.debug("[smart-filter] No meaningful query found, showing all servers");
|
|
74
|
-
return this.createUnfilteredResult(servers, allTools, "keyword", "");
|
|
75
|
-
}
|
|
76
|
-
// Step 2: Prepare filterable servers
|
|
77
|
-
const filterableServers = this.prepareFilterableServers(servers, allTools);
|
|
78
|
-
// Step 3: Level 1 - Server filtering
|
|
79
|
-
const serverScores = this.scoreServers(query, filterableServers);
|
|
80
|
-
const selectedServers = this.selectServers(serverScores, filterableServers);
|
|
81
|
-
// Step 4: Level 2 - Tool filtering
|
|
82
|
-
const toolResults = this.filterTools(query, selectedServers);
|
|
83
|
-
return {
|
|
84
|
-
servers: selectedServers.map(s => s.server),
|
|
85
|
-
tools: toolResults,
|
|
86
|
-
metadata: {
|
|
87
|
-
queryUsed: query,
|
|
88
|
-
totalServersBeforeFilter: Object.keys(servers).length,
|
|
89
|
-
totalToolsBeforeFilter: Array.from(allTools.values()).flat().length,
|
|
90
|
-
filterMode: "keyword",
|
|
91
|
-
timeoutOccurred: false,
|
|
92
|
-
confidenceScore: this.calculateConfidenceScore(serverScores),
|
|
93
|
-
},
|
|
94
|
-
};
|
|
95
|
-
}
|
|
96
|
-
/**
|
|
97
|
-
* Extract meaningful intent from last 1-3 user turns
|
|
98
|
-
*/
|
|
99
|
-
static synthesizeQuery(userTurns) {
|
|
100
|
-
if (!userTurns || userTurns.length === 0) {
|
|
101
|
-
return "";
|
|
102
|
-
}
|
|
103
|
-
// Take last 1-3 turns, newest first
|
|
104
|
-
const recentTurns = userTurns
|
|
105
|
-
.slice(-3)
|
|
106
|
-
.reverse()
|
|
107
|
-
.map(turn => turn.content.trim());
|
|
108
|
-
for (const content of recentTurns) {
|
|
109
|
-
const cleanedQuery = SmartFilter.extractMeaningfulContent(content);
|
|
110
|
-
if (cleanedQuery.length >= 3) {
|
|
111
|
-
return cleanedQuery;
|
|
112
|
-
}
|
|
113
|
-
}
|
|
114
|
-
// If all recent turns are too short, try combining them
|
|
115
|
-
const combined = recentTurns
|
|
116
|
-
.map(content => SmartFilter.extractMeaningfulContent(content))
|
|
117
|
-
.filter(content => content.length > 0)
|
|
118
|
-
.join(" ")
|
|
119
|
-
.trim();
|
|
120
|
-
return combined.length >= 3 ? combined : "";
|
|
121
|
-
}
|
|
122
|
-
static extractMeaningfulContent(content) {
|
|
123
|
-
// Remove metadata patterns
|
|
124
|
-
const cleaned = content
|
|
125
|
-
.replace(/\[.*?\]/g, "") // [timestamps], [commands]
|
|
126
|
-
.replace(/^\s*[>]*\s*/gm, "") // quote markers
|
|
127
|
-
.replace(/^\s*[-*•]\s*/gm, "") // list markers
|
|
128
|
-
.trim();
|
|
129
|
-
// Filter out noise words/confirmations
|
|
130
|
-
const noisePatterns = [
|
|
131
|
-
/^(yes|no|ok|okay|sure|thanks?|thank you)\.?$/i,
|
|
132
|
-
/^(do it|go ahead|proceed)\.?$/i,
|
|
133
|
-
/^(yes,?\s+(do it|go ahead|proceed))\.?$/i,
|
|
134
|
-
/^\?+$/,
|
|
135
|
-
/^\.+$/,
|
|
136
|
-
/^!+$/,
|
|
137
|
-
];
|
|
138
|
-
if (noisePatterns.some(pattern => pattern.test(cleaned))) {
|
|
139
|
-
return "";
|
|
140
|
-
}
|
|
141
|
-
// Remove trailing "please" and other politeness words
|
|
142
|
-
const withoutPoliteness = cleaned
|
|
143
|
-
.replace(/\s+please\.?$/i, "")
|
|
144
|
-
.replace(/\s+thanks?\.?$/i, "")
|
|
145
|
-
.trim();
|
|
146
|
-
return withoutPoliteness;
|
|
147
|
-
}
|
|
148
|
-
prepareFilterableServers(servers, allTools) {
|
|
149
|
-
return Object.entries(servers).map(([name, config]) => ({
|
|
150
|
-
name,
|
|
151
|
-
description: config.description || "",
|
|
152
|
-
keywords: this.normalizeKeywords(config.keywords || []),
|
|
153
|
-
tools: allTools.get(name) || [],
|
|
154
|
-
}));
|
|
155
|
-
}
|
|
156
|
-
normalizeKeywords(keywords) {
|
|
157
|
-
return keywords
|
|
158
|
-
.slice(0, 30) // Max 30 keywords
|
|
159
|
-
.map(kw => kw.toLowerCase().trim())
|
|
160
|
-
.filter(kw => kw.length > 0)
|
|
161
|
-
.filter((kw, index, arr) => arr.indexOf(kw) === index); // Deduplicate
|
|
162
|
-
}
|
|
163
|
-
/**
|
|
164
|
-
* Score servers using weighted overlap scoring
|
|
165
|
-
*/
|
|
166
|
-
scoreServers(query, servers) {
|
|
167
|
-
const queryWords = SmartFilter.tokenize(query.toLowerCase());
|
|
168
|
-
return servers.map(server => ({
|
|
169
|
-
server,
|
|
170
|
-
score: this.calculateServerScore(queryWords, server),
|
|
171
|
-
}));
|
|
172
|
-
}
|
|
173
|
-
static tokenize(text) {
|
|
174
|
-
return text
|
|
175
|
-
.toLowerCase()
|
|
176
|
-
.replace(/[^\w\s]/g, " ")
|
|
177
|
-
.split(/\s+/)
|
|
178
|
-
.filter(word => word.length > 0);
|
|
179
|
-
}
|
|
180
|
-
calculateServerScore(queryWords, server) {
|
|
181
|
-
if (queryWords.length === 0)
|
|
182
|
-
return 0;
|
|
183
|
-
const descriptionWords = SmartFilter.tokenize(server.description);
|
|
184
|
-
const keywordWords = server.keywords;
|
|
185
|
-
const allServerWords = [...descriptionWords, ...keywordWords];
|
|
186
|
-
// Calculate overlaps
|
|
187
|
-
const descMatches = this.countOverlap(queryWords, descriptionWords);
|
|
188
|
-
// Count keyword matches that are NOT already counted in description
|
|
189
|
-
const keywordOnlyWords = keywordWords.filter(kw => !descriptionWords.includes(kw));
|
|
190
|
-
const keywordOnlyMatches = this.countOverlap(queryWords, keywordOnlyWords);
|
|
191
|
-
// Add basic synonym matching for common terms
|
|
192
|
-
let semanticMatches = 0;
|
|
193
|
-
for (const queryWord of queryWords) {
|
|
194
|
-
semanticMatches += this.getSemanticScore(queryWord, allServerWords);
|
|
195
|
-
}
|
|
196
|
-
// Also check for partial/substring matches for better recall
|
|
197
|
-
let partialMatches = 0;
|
|
198
|
-
for (const queryWord of queryWords) {
|
|
199
|
-
for (const serverWord of allServerWords) {
|
|
200
|
-
if (queryWord.length > 3 && serverWord.includes(queryWord)) {
|
|
201
|
-
partialMatches += 0.3; // Partial match gets partial credit
|
|
202
|
-
}
|
|
203
|
-
}
|
|
204
|
-
}
|
|
205
|
-
// Weighted scoring: description 1.0x, keywords 0.7x, semantic 0.5x, partial matches 0.3x
|
|
206
|
-
const score = (descMatches * 1.0 + keywordOnlyMatches * 0.7 + semanticMatches * 0.5 + partialMatches) / queryWords.length;
|
|
207
|
-
return score;
|
|
208
|
-
}
|
|
209
|
-
getSemanticScore(queryWord, serverWords) {
|
|
210
|
-
// Comprehensive synonym/semantic matching
|
|
211
|
-
const synonymMap = {
|
|
212
|
-
// Finance/payment terms
|
|
213
|
-
money: ["payment", "transfer", "currency", "invoice", "billing", "charge", "account", "balance"],
|
|
214
|
-
payment: ["money", "transfer", "invoice", "billing", "charge", "process"],
|
|
215
|
-
send: ["transfer", "payment", "international"],
|
|
216
|
-
transfer: ["send", "payment", "money", "international"],
|
|
217
|
-
invoice: ["bill", "charge", "payment", "billing", "customer"],
|
|
218
|
-
account: ["balance", "money", "payment"],
|
|
219
|
-
balance: ["account", "money"],
|
|
220
|
-
international: ["transfer", "money", "payment"],
|
|
221
|
-
// Task/productivity terms
|
|
222
|
-
task: ["todo", "reminder", "project", "management", "productivity"],
|
|
223
|
-
todo: ["task", "reminder", "management"],
|
|
224
|
-
create: ["add", "new", "task", "issue"],
|
|
225
|
-
project: ["task", "management", "board", "productivity"],
|
|
226
|
-
manage: ["task", "project", "productivity"],
|
|
227
|
-
schedule: ["meeting", "calendar", "appointment"],
|
|
228
|
-
meeting: ["schedule", "calendar"],
|
|
229
|
-
// Development terms
|
|
230
|
-
code: ["repo", "repository", "commit", "branch", "github"],
|
|
231
|
-
issue: ["bug", "ticket", "github", "repository"],
|
|
232
|
-
bug: ["issue", "github"],
|
|
233
|
-
repository: ["repo", "code", "github"],
|
|
234
|
-
commit: ["code", "repository", "github"],
|
|
235
|
-
// Location/maps terms
|
|
236
|
-
location: ["map", "address", "directions", "geocode", "places"],
|
|
237
|
-
directions: ["map", "route", "location"],
|
|
238
|
-
address: ["location", "geocode"],
|
|
239
|
-
geocode: ["address", "location"],
|
|
240
|
-
restaurant: ["location", "places", "map"],
|
|
241
|
-
nearby: ["location", "map"],
|
|
242
|
-
// Storage/document terms
|
|
243
|
-
upload: ["store", "save", "file", "document"],
|
|
244
|
-
document: ["file", "note", "upload", "storage"],
|
|
245
|
-
store: ["save", "upload", "note"],
|
|
246
|
-
notes: ["document", "store"],
|
|
247
|
-
// Infrastructure terms
|
|
248
|
-
deploy: ["infrastructure", "cloud", "server"],
|
|
249
|
-
cloud: ["infrastructure", "deploy"],
|
|
250
|
-
server: ["infrastructure", "monitoring"],
|
|
251
|
-
infrastructure: ["cloud", "server", "deploy"],
|
|
252
|
-
monitoring: ["server", "infrastructure"],
|
|
253
|
-
// Collaboration terms
|
|
254
|
-
whiteboard: ["collaboration", "brainstorming"],
|
|
255
|
-
brainstorming: ["whiteboard", "collaboration"],
|
|
256
|
-
collaboration: ["whiteboard", "design"],
|
|
257
|
-
// Search terms
|
|
258
|
-
search: ["find", "information", "papers"],
|
|
259
|
-
find: ["search", "information"],
|
|
260
|
-
information: ["search", "find"],
|
|
261
|
-
// Web scraping terms
|
|
262
|
-
analyze: ["data", "extract", "website"],
|
|
263
|
-
extract: ["data", "scraping", "website"],
|
|
264
|
-
website: ["scraping", "analyze", "extract"],
|
|
265
|
-
data: ["extract", "analyze", "scraping"],
|
|
266
|
-
traffic: ["website", "analyze"],
|
|
267
|
-
};
|
|
268
|
-
const synonyms = synonymMap[queryWord.toLowerCase()] || [];
|
|
269
|
-
let matches = 0;
|
|
270
|
-
for (const synonym of synonyms) {
|
|
271
|
-
if (serverWords.includes(synonym)) {
|
|
272
|
-
matches += 1;
|
|
273
|
-
}
|
|
274
|
-
}
|
|
275
|
-
return matches;
|
|
276
|
-
}
|
|
277
|
-
countOverlap(words1, words2) {
|
|
278
|
-
const set2 = new Set(words2);
|
|
279
|
-
return words1.filter(word => set2.has(word)).length;
|
|
280
|
-
}
|
|
281
|
-
/**
|
|
282
|
-
* Select servers using dynamic topServers with confidence-based expansion
|
|
283
|
-
*/
|
|
284
|
-
selectServers(serverScores, allServers) {
|
|
285
|
-
// Include always-included servers first
|
|
286
|
-
const alwaysIncluded = allServers
|
|
287
|
-
.filter(s => this.config.alwaysInclude.includes(s.name))
|
|
288
|
-
.map(server => ({ server, score: 1.0 }));
|
|
289
|
-
// Sort all servers by score
|
|
290
|
-
const allScoredServers = serverScores
|
|
291
|
-
.filter(({ server }) => !this.config.alwaysInclude.includes(server.name))
|
|
292
|
-
.sort((a, b) => b.score - a.score);
|
|
293
|
-
// Primary filter: servers that meet threshold
|
|
294
|
-
const thresholdServers = allScoredServers.filter(({ score }) => score >= this.config.serverThreshold);
|
|
295
|
-
// Fallback: if too few servers pass threshold, include more based on ranking
|
|
296
|
-
let scoredServers = thresholdServers;
|
|
297
|
-
if (thresholdServers.length < 2) {
|
|
298
|
-
// Take at least top 3 servers regardless of threshold for better recall
|
|
299
|
-
scoredServers = allScoredServers.slice(0, Math.max(3, this.config.topServers));
|
|
300
|
-
this.logger.debug(`[smart-filter] Only ${thresholdServers.length} servers met threshold, expanding to top ${scoredServers.length}`);
|
|
301
|
-
}
|
|
302
|
-
// Dynamic topServers based on confidence
|
|
303
|
-
let numServers = this.config.topServers;
|
|
304
|
-
if (scoredServers.length >= 2) {
|
|
305
|
-
const topScore = scoredServers[0].score;
|
|
306
|
-
const cutoffScore = scoredServers[Math.min(this.config.topServers - 1, scoredServers.length - 1)].score;
|
|
307
|
-
const gap = topScore - cutoffScore;
|
|
308
|
-
// If gap is small (uncertain), expand toward hard cap
|
|
309
|
-
if (gap < 0.1 && scoredServers.length > numServers) {
|
|
310
|
-
numServers = Math.min(this.config.hardCap, scoredServers.length);
|
|
311
|
-
this.logger.debug(`[smart-filter] Low confidence (gap: ${gap.toFixed(3)}), expanding to ${numServers} servers`);
|
|
312
|
-
}
|
|
313
|
-
}
|
|
314
|
-
const selectedScored = scoredServers.slice(0, numServers);
|
|
315
|
-
return [...alwaysIncluded, ...selectedScored];
|
|
316
|
-
}
|
|
317
|
-
/**
|
|
318
|
-
* Filter tools within selected servers
|
|
319
|
-
*/
|
|
320
|
-
filterTools(query, selectedServers) {
|
|
321
|
-
const queryWords = SmartFilter.tokenize(query);
|
|
322
|
-
const allTools = [];
|
|
323
|
-
for (const { server } of selectedServers) {
|
|
324
|
-
for (const tool of server.tools) {
|
|
325
|
-
const score = this.calculateToolScore(queryWords, tool);
|
|
326
|
-
if (score >= this.config.toolThreshold) {
|
|
327
|
-
allTools.push({ serverId: server.name, tool, score });
|
|
328
|
-
}
|
|
329
|
-
}
|
|
330
|
-
}
|
|
331
|
-
// Sort by score and take top N
|
|
332
|
-
return allTools
|
|
333
|
-
.sort((a, b) => b.score - a.score)
|
|
334
|
-
.slice(0, this.config.topTools)
|
|
335
|
-
.map(({ serverId, tool }) => ({ serverId, tool }));
|
|
336
|
-
}
|
|
337
|
-
calculateToolScore(queryWords, tool) {
|
|
338
|
-
if (queryWords.length === 0)
|
|
339
|
-
return 0;
|
|
340
|
-
const nameWords = SmartFilter.tokenize(tool.name);
|
|
341
|
-
const descWords = SmartFilter.tokenize(tool.description || "");
|
|
342
|
-
const nameMatches = this.countOverlap(queryWords, nameWords);
|
|
343
|
-
const descMatches = this.countOverlap(queryWords, descWords) - this.countOverlap(queryWords, nameWords);
|
|
344
|
-
// Weighted: description 1.0x, name 0.5x (name is less descriptive usually)
|
|
345
|
-
const score = (descMatches * 1.0 + nameMatches * 0.5) / queryWords.length;
|
|
346
|
-
return score;
|
|
347
|
-
}
|
|
348
|
-
calculateConfidenceScore(serverScores) {
|
|
349
|
-
if (serverScores.length < 2)
|
|
350
|
-
return 1.0;
|
|
351
|
-
const scores = serverScores.map(s => s.score).sort((a, b) => b - a);
|
|
352
|
-
const topScore = scores[0];
|
|
353
|
-
const secondScore = scores[1];
|
|
354
|
-
// Confidence based on gap between top scores
|
|
355
|
-
if (topScore === 0)
|
|
356
|
-
return 0;
|
|
357
|
-
return Math.min(1.0, (topScore - secondScore) / topScore);
|
|
358
|
-
}
|
|
359
|
-
createUnfilteredResult(servers, allTools, filterMode, queryUsed = "") {
|
|
360
|
-
const filterableServers = this.prepareFilterableServers(servers, allTools);
|
|
361
|
-
const tools = Array.from(allTools.entries()).flatMap(([serverId, tools]) => tools.map(tool => ({ serverId, tool })));
|
|
362
|
-
return {
|
|
363
|
-
servers: filterableServers,
|
|
364
|
-
tools,
|
|
365
|
-
metadata: {
|
|
366
|
-
queryUsed,
|
|
367
|
-
totalServersBeforeFilter: Object.keys(servers).length,
|
|
368
|
-
totalToolsBeforeFilter: tools.length,
|
|
369
|
-
filterMode,
|
|
370
|
-
timeoutOccurred: false,
|
|
371
|
-
},
|
|
372
|
-
};
|
|
373
|
-
}
|
|
374
|
-
logTelemetry(result, durationMs) {
|
|
375
|
-
const telemetry = {
|
|
376
|
-
timestamp: new Date().toISOString(),
|
|
377
|
-
query: result.metadata.queryUsed,
|
|
378
|
-
serversReturned: result.servers.length,
|
|
379
|
-
toolsReturned: result.tools.length,
|
|
380
|
-
totalServersBefore: result.metadata.totalServersBeforeFilter,
|
|
381
|
-
totalToolsBefore: result.metadata.totalToolsBeforeFilter,
|
|
382
|
-
filterMode: result.metadata.filterMode,
|
|
383
|
-
durationMs,
|
|
384
|
-
confidenceScore: result.metadata.confidenceScore,
|
|
385
|
-
timeoutOccurred: result.metadata.timeoutOccurred,
|
|
386
|
-
};
|
|
387
|
-
this.logger.debug("[smart-filter] Telemetry:", JSON.stringify(telemetry));
|
|
388
|
-
}
|
|
5
|
+
// ── Shared helpers (used by both standalone functions and legacy class) ───────
|
|
6
|
+
/** Tokenize text into lowercase words, stripping punctuation. */
|
|
7
|
+
export function tokenize(text) {
|
|
8
|
+
return text
|
|
9
|
+
.toLowerCase()
|
|
10
|
+
.replace(/[^\w\s]/g, " ")
|
|
11
|
+
.split(/\s+/)
|
|
12
|
+
.filter(word => word.length > 0);
|
|
13
|
+
}
|
|
14
|
+
function extractMeaningfulContent(content) {
|
|
15
|
+
const cleaned = content
|
|
16
|
+
.replace(/\[.*?\]/g, "")
|
|
17
|
+
.replace(/^\s*[>]*\s*/gm, "")
|
|
18
|
+
.replace(/^\s*[-*•]\s*/gm, "")
|
|
19
|
+
.trim();
|
|
20
|
+
const noisePatterns = [
|
|
21
|
+
/^(yes|no|ok|okay|sure|thanks?|thank you)\.?$/i,
|
|
22
|
+
/^(do it|go ahead|proceed)\.?$/i,
|
|
23
|
+
/^(yes,?\s+(do it|go ahead|proceed))\.?$/i,
|
|
24
|
+
/^\?+$/,
|
|
25
|
+
/^\.+$/,
|
|
26
|
+
/^!+$/,
|
|
27
|
+
];
|
|
28
|
+
if (noisePatterns.some(pattern => pattern.test(cleaned))) {
|
|
29
|
+
return "";
|
|
30
|
+
}
|
|
31
|
+
return cleaned
|
|
32
|
+
.replace(/\s+please\.?$/i, "")
|
|
33
|
+
.replace(/\s+thanks?\.?$/i, "")
|
|
34
|
+
.trim();
|
|
35
|
+
}
|
|
36
|
+
/** Extract meaningful intent from last 1-3 user turns. */
|
|
37
|
+
export function synthesizeQuery(userTurns) {
|
|
38
|
+
if (!userTurns || userTurns.length === 0) {
|
|
39
|
+
return "";
|
|
40
|
+
}
|
|
41
|
+
const recentTurns = userTurns
|
|
42
|
+
.slice(-3)
|
|
43
|
+
.reverse()
|
|
44
|
+
.map(turn => turn.content.trim());
|
|
45
|
+
for (const content of recentTurns) {
|
|
46
|
+
const cleanedQuery = extractMeaningfulContent(content);
|
|
47
|
+
if (cleanedQuery.length >= 3) {
|
|
48
|
+
return cleanedQuery;
|
|
49
|
+
}
|
|
50
|
+
}
|
|
51
|
+
const combined = recentTurns
|
|
52
|
+
.map(content => extractMeaningfulContent(content))
|
|
53
|
+
.filter(content => content.length > 0)
|
|
54
|
+
.join(" ")
|
|
55
|
+
.trim();
|
|
56
|
+
return combined.length >= 3 ? combined : "";
|
|
389
57
|
}
|
|
390
58
|
// ── Standalone utility exports (for testing and external use) ────────────────
|
|
391
59
|
const MAX_KEYWORDS = 30;
|
|
@@ -424,10 +92,10 @@ export function validateKeywords(raw) {
|
|
|
424
92
|
export function scoreServer(queryTokens, serverName, description, keywords) {
|
|
425
93
|
if (queryTokens.length === 0)
|
|
426
94
|
return 0;
|
|
427
|
-
const descTokens = new Set(
|
|
428
|
-
for (const t of
|
|
95
|
+
const descTokens = new Set(tokenize(description));
|
|
96
|
+
for (const t of tokenize(serverName))
|
|
429
97
|
descTokens.add(t);
|
|
430
|
-
const kwTokens = new Set(validateKeywords(keywords).flatMap(kw =>
|
|
98
|
+
const kwTokens = new Set(validateKeywords(keywords).flatMap(kw => tokenize(kw)));
|
|
431
99
|
let descMatches = 0;
|
|
432
100
|
let kwOnlyMatches = 0;
|
|
433
101
|
for (const qt of queryTokens) {
|
|
@@ -503,14 +171,14 @@ export function filterServers(servers, userTurns, config, logger) {
|
|
|
503
171
|
const merged = { ...DEFAULTS, ...config };
|
|
504
172
|
const startTime = Date.now();
|
|
505
173
|
const userTurnObjects = userTurns.map(content => ({ content, timestamp: Date.now() }));
|
|
506
|
-
const query =
|
|
174
|
+
const query = synthesizeQuery(userTurnObjects) || null;
|
|
507
175
|
if (!query)
|
|
508
176
|
return showAll("no-query");
|
|
509
177
|
if (Date.now() - startTime > merged.timeoutMs) {
|
|
510
178
|
logger?.warn("[smart-filter] Timeout during query synthesis");
|
|
511
179
|
return showAll("timeout", query);
|
|
512
180
|
}
|
|
513
|
-
const queryTokens =
|
|
181
|
+
const queryTokens = tokenize(query);
|
|
514
182
|
if (queryTokens.length === 0)
|
|
515
183
|
return showAll("no-query");
|
|
516
184
|
const scores = scoreAllServers(queryTokens, servers);
|
|
@@ -13,7 +13,8 @@ export declare class StandaloneServer {
|
|
|
13
13
|
private directConnections;
|
|
14
14
|
constructor(config: BridgeConfig, logger: Logger);
|
|
15
15
|
private isRouterMode;
|
|
16
|
-
/** Start stdio mode: read JSON-RPC from stdin, write responses to stdout.
|
|
16
|
+
/** Start stdio mode: read JSON-RPC from stdin, write responses to stdout.
|
|
17
|
+
* Supports both newline-delimited JSON and LSP Content-Length framing. */
|
|
17
18
|
startStdio(): Promise<void>;
|
|
18
19
|
private processLine;
|
|
19
20
|
private writeResponse;
|
|
@@ -27,19 +27,73 @@ export class StandaloneServer {
|
|
|
27
27
|
isRouterMode() {
|
|
28
28
|
return (this.config.mode ?? "router") === "router";
|
|
29
29
|
}
|
|
30
|
-
/** Start stdio mode: read JSON-RPC from stdin, write responses to stdout.
|
|
30
|
+
/** Start stdio mode: read JSON-RPC from stdin, write responses to stdout.
|
|
31
|
+
* Supports both newline-delimited JSON and LSP Content-Length framing. */
|
|
31
32
|
async startStdio() {
|
|
32
33
|
const stdin = process.stdin;
|
|
33
34
|
const stdout = process.stdout;
|
|
34
35
|
stdin.setEncoding("utf8");
|
|
35
36
|
let buffer = "";
|
|
37
|
+
// LSP framing state
|
|
38
|
+
let lspContentLength = -1; // -1 means not in LSP mode for current message
|
|
39
|
+
let lspHeadersDone = false;
|
|
36
40
|
stdin.on("data", (chunk) => {
|
|
37
41
|
buffer += chunk;
|
|
38
|
-
|
|
39
|
-
|
|
40
|
-
|
|
42
|
+
// Process buffer in a loop — it may contain multiple messages
|
|
43
|
+
let progress = true;
|
|
44
|
+
while (progress) {
|
|
45
|
+
progress = false;
|
|
46
|
+
// If we're reading an LSP body, check if we have enough bytes
|
|
47
|
+
if (lspContentLength >= 0 && lspHeadersDone) {
|
|
48
|
+
if (buffer.length >= lspContentLength) {
|
|
49
|
+
const body = buffer.slice(0, lspContentLength);
|
|
50
|
+
buffer = buffer.slice(lspContentLength);
|
|
51
|
+
lspContentLength = -1;
|
|
52
|
+
lspHeadersDone = false;
|
|
53
|
+
const trimmed = body.trim();
|
|
54
|
+
if (trimmed) {
|
|
55
|
+
this.processLine(trimmed, stdout);
|
|
56
|
+
}
|
|
57
|
+
progress = true;
|
|
58
|
+
continue;
|
|
59
|
+
}
|
|
60
|
+
// Not enough data yet — wait for more
|
|
61
|
+
break;
|
|
62
|
+
}
|
|
63
|
+
// Look for complete lines to detect framing
|
|
64
|
+
const newlineIdx = buffer.indexOf("\n");
|
|
65
|
+
if (newlineIdx === -1)
|
|
66
|
+
break;
|
|
67
|
+
const line = buffer.slice(0, newlineIdx);
|
|
41
68
|
const trimmed = line.trim();
|
|
42
|
-
|
|
69
|
+
// LSP header detection
|
|
70
|
+
if (lspContentLength >= 0 && !lspHeadersDone) {
|
|
71
|
+
// We're reading LSP headers — consume until empty line
|
|
72
|
+
buffer = buffer.slice(newlineIdx + 1);
|
|
73
|
+
progress = true;
|
|
74
|
+
if (trimmed === "") {
|
|
75
|
+
// End of headers — next read the body
|
|
76
|
+
lspHeadersDone = true;
|
|
77
|
+
}
|
|
78
|
+
// Ignore other headers (Content-Type, etc.)
|
|
79
|
+
continue;
|
|
80
|
+
}
|
|
81
|
+
if (trimmed.startsWith("Content-Length:")) {
|
|
82
|
+
// Start of LSP-framed message
|
|
83
|
+
const lengthStr = trimmed.slice("Content-Length:".length).trim();
|
|
84
|
+
const length = parseInt(lengthStr, 10);
|
|
85
|
+
if (!isNaN(length) && length > 0) {
|
|
86
|
+
lspContentLength = length;
|
|
87
|
+
lspHeadersDone = false;
|
|
88
|
+
buffer = buffer.slice(newlineIdx + 1);
|
|
89
|
+
progress = true;
|
|
90
|
+
continue;
|
|
91
|
+
}
|
|
92
|
+
}
|
|
93
|
+
// Newline-delimited JSON: consume the line
|
|
94
|
+
buffer = buffer.slice(newlineIdx + 1);
|
|
95
|
+
progress = true;
|
|
96
|
+
if (!trimmed || !trimmed.startsWith("{"))
|
|
43
97
|
continue;
|
|
44
98
|
this.processLine(trimmed, stdout);
|
|
45
99
|
}
|
|
@@ -185,7 +239,8 @@ export class StandaloneServer {
|
|
|
185
239
|
jsonrpc: "2.0",
|
|
186
240
|
id,
|
|
187
241
|
result: {
|
|
188
|
-
content: [{ type: "text", text: JSON.stringify(result) }]
|
|
242
|
+
content: [{ type: "text", text: JSON.stringify(result) }],
|
|
243
|
+
isError: true
|
|
189
244
|
}
|
|
190
245
|
};
|
|
191
246
|
}
|
|
@@ -54,7 +54,7 @@ export declare abstract class BaseTransport implements McpTransport {
|
|
|
54
54
|
* @param contextDescription - Human-readable context for error messages (e.g. 'header "Authorization"')
|
|
55
55
|
* @param extraEnv - Additional env vars to check before process.env (e.g. merged child process env)
|
|
56
56
|
*/
|
|
57
|
-
export declare function resolveEnvVars(value: string, contextDescription: string, extraEnv?: Record<string, string | undefined>): string;
|
|
57
|
+
export declare function resolveEnvVars(value: string, contextDescription: string, extraEnv?: Record<string, string | undefined>, envFallback?: () => Record<string, string>): string;
|
|
58
58
|
/**
|
|
59
59
|
* Resolve ${VAR} placeholders in all values of a Record<string, string>.
|
|
60
60
|
*
|
|
@@ -62,14 +62,14 @@ export declare function resolveEnvVars(value: string, contextDescription: string
|
|
|
62
62
|
* @param contextPrefix - Prefix for error context (e.g. "header", "env key")
|
|
63
63
|
* @param extraEnv - Additional env vars to check before process.env
|
|
64
64
|
*/
|
|
65
|
-
export declare function resolveEnvRecord(record: Record<string, string>, contextPrefix: string, extraEnv?: Record<string, string | undefined>): Record<string, string>;
|
|
65
|
+
export declare function resolveEnvRecord(record: Record<string, string>, contextPrefix: string, extraEnv?: Record<string, string | undefined>, envFallback?: () => Record<string, string>): Record<string, string>;
|
|
66
66
|
/**
|
|
67
67
|
* Resolve ${VAR} placeholders in an array of command arguments.
|
|
68
68
|
*
|
|
69
69
|
* @param args - Array of argument strings with potential ${VAR} placeholders
|
|
70
70
|
* @param extraEnv - Additional env vars to check before process.env
|
|
71
71
|
*/
|
|
72
|
-
export declare function resolveArgs(args: string[], extraEnv?: Record<string, string | undefined>): string[];
|
|
72
|
+
export declare function resolveArgs(args: string[], extraEnv?: Record<string, string | undefined>, envFallback?: () => Record<string, string>): string[];
|
|
73
73
|
/**
|
|
74
74
|
* Warn if a URL uses non-TLS HTTP to a remote (non-localhost) host.
|
|
75
75
|
*/
|
|
@@ -117,16 +117,17 @@ export class BaseTransport {
|
|
|
117
117
|
* @param contextDescription - Human-readable context for error messages (e.g. 'header "Authorization"')
|
|
118
118
|
* @param extraEnv - Additional env vars to check before process.env (e.g. merged child process env)
|
|
119
119
|
*/
|
|
120
|
-
export function resolveEnvVars(value, contextDescription, extraEnv) {
|
|
120
|
+
export function resolveEnvVars(value, contextDescription, extraEnv, envFallback) {
|
|
121
121
|
return value.replace(/\$\{(\w+)\}/g, (_, varName) => {
|
|
122
122
|
const resolved = extraEnv?.[varName] ?? process.env[varName];
|
|
123
|
-
// If resolved is undefined or empty string, try the
|
|
124
|
-
//
|
|
125
|
-
// pre-existing empty env var
|
|
123
|
+
// If resolved is undefined or empty string, try the env fallback.
|
|
124
|
+
// Default fallback is loadOpenClawDotEnvFallback (handles the case where
|
|
125
|
+
// dotenv(override:false) didn't overwrite a pre-existing empty env var).
|
|
126
126
|
if (resolved === undefined || resolved === "") {
|
|
127
|
-
const
|
|
128
|
-
|
|
129
|
-
|
|
127
|
+
const fallbackFn = envFallback ?? loadOpenClawDotEnvFallback;
|
|
128
|
+
const fallbackVal = fallbackFn()[varName];
|
|
129
|
+
if (fallbackVal !== undefined && fallbackVal !== "") {
|
|
130
|
+
return fallbackVal;
|
|
130
131
|
}
|
|
131
132
|
}
|
|
132
133
|
if (resolved === undefined) {
|
|
@@ -142,10 +143,10 @@ export function resolveEnvVars(value, contextDescription, extraEnv) {
|
|
|
142
143
|
* @param contextPrefix - Prefix for error context (e.g. "header", "env key")
|
|
143
144
|
* @param extraEnv - Additional env vars to check before process.env
|
|
144
145
|
*/
|
|
145
|
-
export function resolveEnvRecord(record, contextPrefix, extraEnv) {
|
|
146
|
+
export function resolveEnvRecord(record, contextPrefix, extraEnv, envFallback) {
|
|
146
147
|
const resolved = {};
|
|
147
148
|
for (const [key, value] of Object.entries(record)) {
|
|
148
|
-
resolved[key] = resolveEnvVars(value, `${contextPrefix} "${key}"`, extraEnv);
|
|
149
|
+
resolved[key] = resolveEnvVars(value, `${contextPrefix} "${key}"`, extraEnv, envFallback);
|
|
149
150
|
}
|
|
150
151
|
return resolved;
|
|
151
152
|
}
|
|
@@ -155,8 +156,8 @@ export function resolveEnvRecord(record, contextPrefix, extraEnv) {
|
|
|
155
156
|
* @param args - Array of argument strings with potential ${VAR} placeholders
|
|
156
157
|
* @param extraEnv - Additional env vars to check before process.env
|
|
157
158
|
*/
|
|
158
|
-
export function resolveArgs(args, extraEnv) {
|
|
159
|
-
return args.map(arg => resolveEnvVars(arg, `arg "${arg}"`, extraEnv));
|
|
159
|
+
export function resolveArgs(args, extraEnv, envFallback) {
|
|
160
|
+
return args.map(arg => resolveEnvVars(arg, `arg "${arg}"`, extraEnv, envFallback));
|
|
160
161
|
}
|
|
161
162
|
/**
|
|
162
163
|
* Warn if a URL uses non-TLS HTTP to a remote (non-localhost) host.
|
package/dist/src/types.d.ts
CHANGED
|
@@ -11,7 +11,8 @@ export async function checkForUpdate(logger) {
|
|
|
11
11
|
if (cachedUpdateInfo)
|
|
12
12
|
return cachedUpdateInfo;
|
|
13
13
|
const current = PACKAGE_VERSION;
|
|
14
|
-
const
|
|
14
|
+
const updateCmdParts = ["npm", "update", "-g", PACKAGE_NAME];
|
|
15
|
+
const updateCmd = updateCmdParts.join(" ");
|
|
15
16
|
try {
|
|
16
17
|
const latest = await npmViewVersion(logger);
|
|
17
18
|
const updateAvailable = latest !== current && isNewer(latest, current);
|
|
@@ -20,6 +21,7 @@ export async function checkForUpdate(logger) {
|
|
|
20
21
|
latestVersion: latest,
|
|
21
22
|
updateAvailable,
|
|
22
23
|
updateCommand: updateCmd,
|
|
24
|
+
updateCommandParts: updateCmdParts,
|
|
23
25
|
};
|
|
24
26
|
if (updateAvailable) {
|
|
25
27
|
logger.info(`[mcp-bridge] Update available: ${current} → ${latest}`);
|
|
@@ -35,6 +37,7 @@ export async function checkForUpdate(logger) {
|
|
|
35
37
|
latestVersion: current,
|
|
36
38
|
updateAvailable: false,
|
|
37
39
|
updateCommand: updateCmd,
|
|
40
|
+
updateCommandParts: updateCmdParts,
|
|
38
41
|
};
|
|
39
42
|
}
|
|
40
43
|
return cachedUpdateInfo;
|
|
@@ -66,8 +69,8 @@ export async function runUpdate(logger) {
|
|
|
66
69
|
}
|
|
67
70
|
logger.info(`[mcp-bridge] Running update: ${info.updateCommand}`);
|
|
68
71
|
try {
|
|
69
|
-
const
|
|
70
|
-
const output = await execFileAsync(
|
|
72
|
+
const [cmd, ...args] = info.updateCommandParts;
|
|
73
|
+
const output = await execFileAsync(cmd, args, 60_000);
|
|
71
74
|
// Invalidate cache so next check re-fetches
|
|
72
75
|
cachedUpdateInfo = null;
|
|
73
76
|
noticeDelivered = false;
|
|
@@ -87,9 +90,7 @@ export async function runUpdate(logger) {
|
|
|
87
90
|
// --- helpers ---
|
|
88
91
|
function npmViewVersion(_logger) {
|
|
89
92
|
return new Promise((resolve, reject) => {
|
|
90
|
-
|
|
91
|
-
execFile("npm", ["view", PACKAGE_NAME, "version"], { encoding: "utf-8" }, (err, stdout) => {
|
|
92
|
-
clearTimeout(timeout);
|
|
93
|
+
execFile("npm", ["view", PACKAGE_NAME, "version"], { encoding: "utf-8", timeout: 10_000 }, (err, stdout) => {
|
|
93
94
|
if (err)
|
|
94
95
|
return reject(err);
|
|
95
96
|
const ver = (stdout ?? "").trim();
|
|
@@ -109,9 +110,7 @@ function npmViewVersionSync(_logger) {
|
|
|
109
110
|
}
|
|
110
111
|
function execFileAsync(file, args, timeoutMs) {
|
|
111
112
|
return new Promise((resolve, reject) => {
|
|
112
|
-
const timeout = setTimeout(() => reject(new Error(`Command timed out after ${timeoutMs}ms`)), timeoutMs);
|
|
113
113
|
execFile(file, args, { encoding: "utf-8", timeout: timeoutMs }, (err, stdout, stderr) => {
|
|
114
|
-
clearTimeout(timeout);
|
|
115
114
|
if (err)
|
|
116
115
|
return reject(new Error(`${err.message}\n${stderr ?? ""}`));
|
|
117
116
|
resolve(stdout ?? "");
|
package/package.json
CHANGED
|
@@ -135,12 +135,14 @@ if [[ "$REMOVE" == "true" ]]; then
|
|
|
135
135
|
|
|
136
136
|
# Check if server exists in config
|
|
137
137
|
HAS_SERVER=$(python3 -c "
|
|
138
|
-
import json
|
|
139
|
-
|
|
138
|
+
import json, sys
|
|
139
|
+
server_name = sys.argv[1]
|
|
140
|
+
config_path = sys.argv[2]
|
|
141
|
+
with open(config_path) as f:
|
|
140
142
|
cfg = json.load(f)
|
|
141
143
|
servers = cfg.get('servers',{})
|
|
142
|
-
print('yes' if
|
|
143
|
-
" 2>/dev/null)
|
|
144
|
+
print('yes' if server_name in servers else 'no')
|
|
145
|
+
" "$SERVER_NAME" "$MCP_BRIDGE_JSON" 2>/dev/null)
|
|
144
146
|
|
|
145
147
|
if [[ "$HAS_SERVER" != "yes" ]]; then
|
|
146
148
|
echo "ℹ️ Server '$SERVER_NAME' not found in config. Nothing to remove."
|
|
@@ -154,17 +156,19 @@ print('yes' if '$SERVER_NAME' in servers else 'no')
|
|
|
154
156
|
|
|
155
157
|
# Remove server entry from config (keep servers/<name>/ directory)
|
|
156
158
|
python3 -c "
|
|
157
|
-
import json
|
|
158
|
-
|
|
159
|
+
import json, sys
|
|
160
|
+
server_name = sys.argv[1]
|
|
161
|
+
config_path = sys.argv[2]
|
|
162
|
+
with open(config_path) as f:
|
|
159
163
|
cfg = json.load(f)
|
|
160
164
|
servers = cfg.get('servers', {})
|
|
161
|
-
del servers[
|
|
162
|
-
with open(
|
|
165
|
+
del servers[server_name]
|
|
166
|
+
with open(config_path, 'w') as f:
|
|
163
167
|
json.dump(cfg, f, indent=2)
|
|
164
168
|
f.write('\n')
|
|
165
|
-
print('✅ Removed
|
|
166
|
-
print('ℹ️ Server recipe kept in servers
|
|
167
|
-
" 2>/dev/null
|
|
169
|
+
print(f'✅ Removed {server_name} from config')
|
|
170
|
+
print(f'ℹ️ Server recipe kept in servers/{server_name}/ (reinstall anytime)')
|
|
171
|
+
" "$SERVER_NAME" "$MCP_BRIDGE_JSON" 2>/dev/null
|
|
168
172
|
|
|
169
173
|
# Remove env var from .env if exists
|
|
170
174
|
if [[ -f "$ENV_VARS_FILE" ]] && [[ -s "$ENV_VARS_FILE" ]] && [[ -f "$ENV_FILE" ]]; then
|