fast-cxt-mcp 1.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +21 -0
- package/README.md +222 -0
- package/package.json +29 -0
- package/src/core.mjs +1206 -0
- package/src/executor.mjs +553 -0
- package/src/extract-key.mjs +235 -0
- package/src/protobuf.mjs +235 -0
- package/src/server.mjs +209 -0
package/src/core.mjs
ADDED
|
@@ -0,0 +1,1206 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Windsurf Fast Context — core protocol implementation (Node.js).
|
|
3
|
+
*
|
|
4
|
+
* Reverse-engineered Windsurf SWE-grep Connect-RPC/Protobuf protocol
|
|
5
|
+
* for standalone AI-driven semantic code search.
|
|
6
|
+
*
|
|
7
|
+
* Flow:
|
|
8
|
+
* query + tree → Windsurf Devstral API
|
|
9
|
+
* → Devstral returns tool_calls (rg/readfile/tree/ls/glob, up to 8 parallel)
|
|
10
|
+
* → execute locally → send results back → repeat for N rounds
|
|
11
|
+
* → ANSWER: file paths + line ranges + suggested rg patterns
|
|
12
|
+
*/
|
|
13
|
+
|
|
14
|
+
import { readdirSync, existsSync, statSync } from "node:fs";
|
|
15
|
+
import { resolve, join, relative, sep } from "node:path";
|
|
16
|
+
import { gzipSync } from "node:zlib";
|
|
17
|
+
import { randomUUID } from "node:crypto";
|
|
18
|
+
import { platform, arch, release, version as osVersion, hostname, cpus, totalmem } from "node:os";
|
|
19
|
+
import treeNodeCli from "tree-node-cli";
|
|
20
|
+
|
|
21
|
+
import {
|
|
22
|
+
ProtobufEncoder,
|
|
23
|
+
extractStrings,
|
|
24
|
+
connectFrameEncode,
|
|
25
|
+
connectFrameDecode,
|
|
26
|
+
} from "./protobuf.mjs";
|
|
27
|
+
import { ToolExecutor } from "./executor.mjs";
|
|
28
|
+
import { extractKey } from "./extract-key.mjs";
|
|
29
|
+
|
|
30
|
+
// ─── Error Classification ──────────────────────────────────
|
|
31
|
+
|
|
32
|
+
/**
|
|
33
|
+
* Classified error for fetch failures with structured error codes.
|
|
34
|
+
*/
|
|
35
|
+
class FastContextError extends Error {
|
|
36
|
+
/**
|
|
37
|
+
* @param {string} message
|
|
38
|
+
* @param {string} code - TIMEOUT | PAYLOAD_TOO_LARGE | RATE_LIMITED | AUTH_ERROR | SERVER_ERROR | NETWORK_ERROR
|
|
39
|
+
* @param {Object} [details]
|
|
40
|
+
*/
|
|
41
|
+
constructor(message, code, details = {}) {
|
|
42
|
+
super(message);
|
|
43
|
+
this.name = "FastContextError";
|
|
44
|
+
this.code = code;
|
|
45
|
+
this.details = details;
|
|
46
|
+
}
|
|
47
|
+
}
|
|
48
|
+
|
|
49
|
+
/**
|
|
50
|
+
* Classify a raw fetch/HTTP error into a FastContextError.
|
|
51
|
+
* @param {Error} err
|
|
52
|
+
* @returns {FastContextError}
|
|
53
|
+
*/
|
|
54
|
+
function _classifyError(err) {
|
|
55
|
+
if (err instanceof FastContextError) return err;
|
|
56
|
+
|
|
57
|
+
// HTTP status-based classification
|
|
58
|
+
if (err.status) {
|
|
59
|
+
const s = err.status;
|
|
60
|
+
if (s === 413) return new FastContextError(err.message, "PAYLOAD_TOO_LARGE", { status: s });
|
|
61
|
+
if (s === 429) return new FastContextError(err.message, "RATE_LIMITED", { status: s });
|
|
62
|
+
if (s === 401 || s === 403) return new FastContextError(err.message, "AUTH_ERROR", { status: s });
|
|
63
|
+
return new FastContextError(err.message, "SERVER_ERROR", { status: s });
|
|
64
|
+
}
|
|
65
|
+
|
|
66
|
+
// Timeout (AbortSignal.timeout throws AbortError or TimeoutError)
|
|
67
|
+
if (err.name === "AbortError" || err.name === "TimeoutError" || /timeout/i.test(err.message)) {
|
|
68
|
+
return new FastContextError(err.message, "TIMEOUT");
|
|
69
|
+
}
|
|
70
|
+
|
|
71
|
+
// Everything else is a network-level issue
|
|
72
|
+
return new FastContextError(err.message, "NETWORK_ERROR");
|
|
73
|
+
}
|
|
74
|
+
|
|
75
|
+
// ─── Protocol Constants ────────────────────────────────────
|
|
76
|
+
|
|
77
|
+
const API_BASE = "https://server.self-serve.windsurf.com/exa.api_server_pb.ApiServerService";
|
|
78
|
+
const AUTH_BASE = "https://server.self-serve.windsurf.com/exa.auth_pb.AuthService";
|
|
79
|
+
const WS_APP = "windsurf";
|
|
80
|
+
const WS_APP_VER = process.env.WS_APP_VER || "1.48.2";
|
|
81
|
+
const WS_LS_VER = process.env.WS_LS_VER || "1.9544.35";
|
|
82
|
+
const WS_MODEL = process.env.WS_MODEL || "MODEL_SWE_1_6_FAST";
|
|
83
|
+
|
|
84
|
+
// ─── System Prompt Template ────────────────────────────────
|
|
85
|
+
|
|
86
|
+
const SYSTEM_PROMPT_TEMPLATE = `You are an expert software engineer, responsible for providing context \
|
|
87
|
+
to another engineer to solve a code issue in the current codebase. \
|
|
88
|
+
The user will present you with a description of the issue, and it is \
|
|
89
|
+
your job to provide a series of file paths with associated line ranges \
|
|
90
|
+
that contain ALL the information relevant to understand and correctly \
|
|
91
|
+
address the issue.
|
|
92
|
+
|
|
93
|
+
# IMPORTANT:
|
|
94
|
+
- A relevant file does not mean only the files that must be modified to \
|
|
95
|
+
solve the task. It means any file that contains information relevant to \
|
|
96
|
+
planning and implementing the fix, such as the definitions of classes \
|
|
97
|
+
and functions that are relevant to the pieces of code that will have to \
|
|
98
|
+
be modified.
|
|
99
|
+
- You should include enough context around the relevant lines to allow \
|
|
100
|
+
the engineer to understand the task correctly. You must include ENTIRE \
|
|
101
|
+
semantic blocks (functions, classes, definitions, etc). For example:
|
|
102
|
+
If addressing the issue requires modifying a method within a class, then \
|
|
103
|
+
you should include the entire class definition, not just the lines around \
|
|
104
|
+
the method we want to modify.
|
|
105
|
+
- NEVER truncate these blocks unless they are very large (hundreds of \
|
|
106
|
+
lines or more, in which case providing only a relevant portion of the \
|
|
107
|
+
block is acceptable).
|
|
108
|
+
- Your job is to essentially alleviate the job of the other engineer by \
|
|
109
|
+
giving them a clean starting context from which to start working. More \
|
|
110
|
+
precisely, you should minimize the number of files the engineer has to \
|
|
111
|
+
read to understand and solve the task correctly (while not providing \
|
|
112
|
+
irrelevant code snippets).
|
|
113
|
+
|
|
114
|
+
# ENVIRONMENT
|
|
115
|
+
- Working directory: /codebase. Make sure to run commands in this \
|
|
116
|
+
directory, not \`.
|
|
117
|
+
- Tool access: use the restricted_exec tool ONLY
|
|
118
|
+
- Allowed sub-commands (schema-enforced):
|
|
119
|
+
- rg: Search for patterns in files using ripgrep
|
|
120
|
+
- Required: pattern (string), path (string)
|
|
121
|
+
- Optional: include (array of globs), exclude (array of globs)
|
|
122
|
+
- readfile: Read contents of a file with optional line range
|
|
123
|
+
- Required: file (string)
|
|
124
|
+
- Optional: start_line (int), end_line (int) — 1-indexed, inclusive
|
|
125
|
+
- tree: Display directory structure as a tree
|
|
126
|
+
- Required: path (string)
|
|
127
|
+
- Optional: levels (int)
|
|
128
|
+
|
|
129
|
+
# THINKING RULES
|
|
130
|
+
- Think step-by-step. Plan, reason, and reflect before each tool call.
|
|
131
|
+
- Use tool calls liberally and purposefully to ground every conclusion \
|
|
132
|
+
in real code, not assumptions.
|
|
133
|
+
- If a command fails, rethink and try something different; do not \
|
|
134
|
+
complain to the user.
|
|
135
|
+
|
|
136
|
+
# FAST-SEARCH DEFAULTS (optimize rg/tree on large repos)
|
|
137
|
+
- Start NARROW, then widen only if needed. Prefer searching likely code \
|
|
138
|
+
roots first (e.g., \`src/\`, \`lib/\`, \`app/\`, \`packages/\`, \`services/\`) \
|
|
139
|
+
instead of \`/codebase\`.
|
|
140
|
+
- Prefer fixed-string search for literals: escape patterns or keep regex \
|
|
141
|
+
simple. Use smart case; avoid case-insensitive unless necessary.
|
|
142
|
+
- Prefer file-type filters and globs (in include) over full-repo scans.
|
|
143
|
+
- Default EXCLUDES for speed (apply via the exclude array): \
|
|
144
|
+
node_modules, .git, dist, build, coverage, .venv, venv, target, out, \
|
|
145
|
+
.cache, __pycache__, vendor, deps, third_party, logs, data, *.min.*
|
|
146
|
+
- Skip huge files where possible; when opening files, prefer reading \
|
|
147
|
+
only relevant ranges with readfile.
|
|
148
|
+
- Limit directory traversal with tree levels to quickly orient before \
|
|
149
|
+
deeper inspection.
|
|
150
|
+
|
|
151
|
+
# SOME EXAMPLES OF WORKFLOWS
|
|
152
|
+
- MAP – Use \`tree\` with small levels; \`rg\` on likely roots to grasp \
|
|
153
|
+
structure and hotspots.
|
|
154
|
+
- ANCHOR – \`rg\` for problem keywords and anchor symbols; restrict by \
|
|
155
|
+
language globs via include.
|
|
156
|
+
- TRACE – Follow imports with targeted \`rg\` in narrowed roots; open \
|
|
157
|
+
files with \`readfile\` scoped to entire semantic blocks.
|
|
158
|
+
- VERIFY – Confirm each candidate path exists by reading or additional \
|
|
159
|
+
searches; drop false positives (tests, vendored, generated) unless they \
|
|
160
|
+
must change.
|
|
161
|
+
|
|
162
|
+
# TOOL USE GUIDELINES
|
|
163
|
+
- You must use a SINGLE restricted_exec call in your answer, that lets \
|
|
164
|
+
you execute at most {max_commands} commands in a single turn. Each command must be \
|
|
165
|
+
an object with a \`type\` field of \`rg\`, \`readfile\`, or \`tree\` and the appropriate fields for that type.
|
|
166
|
+
- Example restricted_exec usage:
|
|
167
|
+
[TOOL_CALLS]restricted_exec[ARGS]{{
|
|
168
|
+
"command1": {{
|
|
169
|
+
"type": "rg",
|
|
170
|
+
"pattern": "Controller",
|
|
171
|
+
"path": "/codebase/slime",
|
|
172
|
+
"include": ["**/*.py"],
|
|
173
|
+
"exclude": ["**/node_modules/**", "**/.git/**", "**/dist/**", \
|
|
174
|
+
"**/build/**", "**/.venv/**", "**/__pycache__/**"]
|
|
175
|
+
}},
|
|
176
|
+
"command2": {{
|
|
177
|
+
"type": "readfile",
|
|
178
|
+
"file": "/codebase/slime/train.py",
|
|
179
|
+
"start_line": 1,
|
|
180
|
+
"end_line": 200
|
|
181
|
+
}},
|
|
182
|
+
"command3": {{
|
|
183
|
+
"type": "tree",
|
|
184
|
+
"path": "/codebase/slime/",
|
|
185
|
+
"levels": 2
|
|
186
|
+
}}
|
|
187
|
+
}}
|
|
188
|
+
- You have at most {max_turns} turns to interact with the environment by calling \
|
|
189
|
+
tools, so issuing multiple commands at once is necessary and encouraged \
|
|
190
|
+
to speed up your research.
|
|
191
|
+
- Each command result may be truncated to 50 lines; prefer multiple \
|
|
192
|
+
targeted reads/searches to build complete context.
|
|
193
|
+
- DO NOT EVER USE MORE THAN {max_commands} commands in a single turn, or you will \
|
|
194
|
+
be penalized.
|
|
195
|
+
|
|
196
|
+
# ANSWER FORMAT (strict format, including tags)
|
|
197
|
+
- You will output an XML structure with a root element "ANSWER" \
|
|
198
|
+
containing "file" elements. Each "file" element will have a "path" \
|
|
199
|
+
attribute and contain "range" elements.
|
|
200
|
+
- You will output this as your final response.
|
|
201
|
+
- The line ranges must be inclusive.
|
|
202
|
+
|
|
203
|
+
Output example inside the "answer" tool argument:
|
|
204
|
+
<ANSWER>
|
|
205
|
+
<file path="/codebase/info_theory/formulas/entropy.py">
|
|
206
|
+
<range>10-60</range>
|
|
207
|
+
<range>150-210</range>
|
|
208
|
+
</file>
|
|
209
|
+
<file path="/codebase/info_theory/data_structures/bits.py">
|
|
210
|
+
<range>1-40</range>
|
|
211
|
+
<range>110-170</range>
|
|
212
|
+
</file>
|
|
213
|
+
</ANSWER>
|
|
214
|
+
|
|
215
|
+
|
|
216
|
+
Remember: Prefer narrow, fixed-string, and type-filtered searches with \
|
|
217
|
+
aggressive excludes and size/depth limits. Widen scope only as needed. \
|
|
218
|
+
Use the restricted tools available to you, and output your answer in \
|
|
219
|
+
exactly the specified format.
|
|
220
|
+
|
|
221
|
+
# NO RESULTS POLICY
|
|
222
|
+
If after thorough searching you are confident that NO relevant files exist \
|
|
223
|
+
for the given query (e.g., the function/class/concept does not exist in the \
|
|
224
|
+
codebase), you MUST return an empty ANSWER:
|
|
225
|
+
<ANSWER></ANSWER>
|
|
226
|
+
Do NOT return irrelevant files (such as entry points or config files) just \
|
|
227
|
+
to provide some output. An empty answer is always better than a misleading one.
|
|
228
|
+
|
|
229
|
+
# RESULT COUNT
|
|
230
|
+
Aim to return at most {max_results} files in your answer. Focus on the most \
|
|
231
|
+
relevant files first. If fewer files are relevant, return fewer.
|
|
232
|
+
`;
|
|
233
|
+
|
|
234
|
+
const FINAL_FORCE_ANSWER =
|
|
235
|
+
"You have no turns left. Now you MUST provide your final ANSWER, even if it's not complete.";
|
|
236
|
+
|
|
237
|
+
/**
|
|
238
|
+
* Trim accumulated messages to reduce payload size for retry.
|
|
239
|
+
* Keeps: system prompt (index 0), user query (index 1), and last 2 messages.
|
|
240
|
+
* Inserts a bridge note so the AI knows context was truncated.
|
|
241
|
+
* @param {Array} messages
|
|
242
|
+
* @returns {boolean} true if messages were actually trimmed
|
|
243
|
+
*/
|
|
244
|
+
function _trimMessages(messages) {
|
|
245
|
+
if (messages.length <= 4) return false;
|
|
246
|
+
const head = messages.slice(0, 2);
|
|
247
|
+
const tail = messages.slice(-2);
|
|
248
|
+
messages.length = 0;
|
|
249
|
+
messages.push(
|
|
250
|
+
...head,
|
|
251
|
+
{ role: 1, content: "[Prior search rounds omitted to reduce payload. Provide your best answer based on available context.]" },
|
|
252
|
+
...tail,
|
|
253
|
+
);
|
|
254
|
+
return true;
|
|
255
|
+
}
|
|
256
|
+
|
|
257
|
+
/**
|
|
258
|
+
* @param {number} maxTurns
|
|
259
|
+
* @param {number} maxCommands
|
|
260
|
+
* @param {number} maxResults
|
|
261
|
+
* @returns {string}
|
|
262
|
+
*/
|
|
263
|
+
function buildSystemPrompt(maxTurns = 3, maxCommands = 8, maxResults = 10) {
|
|
264
|
+
return SYSTEM_PROMPT_TEMPLATE
|
|
265
|
+
.replaceAll("{max_turns}", String(maxTurns))
|
|
266
|
+
.replaceAll("{max_commands}", String(maxCommands))
|
|
267
|
+
.replaceAll("{max_results}", String(maxResults));
|
|
268
|
+
}
|
|
269
|
+
|
|
270
|
+
// ─── Tool Schema ───────────────────────────────────────────
|
|
271
|
+
|
|
272
|
+
function _buildCommandSchema(n) {
|
|
273
|
+
return {
|
|
274
|
+
type: "object",
|
|
275
|
+
description: `Command ${n} to execute. Must be one of: rg, readfile, or tree.`,
|
|
276
|
+
oneOf: [
|
|
277
|
+
{
|
|
278
|
+
properties: {
|
|
279
|
+
type: { type: "string", const: "rg", description: "Search for patterns in files using ripgrep." },
|
|
280
|
+
pattern: { type: "string", description: "The regex pattern to search for." },
|
|
281
|
+
path: { type: "string", description: "The path to search in." },
|
|
282
|
+
include: { type: "array", items: { type: "string" }, description: "File patterns to include." },
|
|
283
|
+
exclude: { type: "array", items: { type: "string" }, description: "File patterns to exclude." },
|
|
284
|
+
},
|
|
285
|
+
required: ["type", "pattern", "path"],
|
|
286
|
+
},
|
|
287
|
+
{
|
|
288
|
+
properties: {
|
|
289
|
+
type: { type: "string", const: "readfile", description: "Read contents of a file with optional line range." },
|
|
290
|
+
file: { type: "string", description: "Path to the file to read." },
|
|
291
|
+
start_line: { type: "integer", description: "Starting line number (1-indexed)." },
|
|
292
|
+
end_line: { type: "integer", description: "Ending line number (1-indexed)." },
|
|
293
|
+
},
|
|
294
|
+
required: ["type", "file"],
|
|
295
|
+
},
|
|
296
|
+
{
|
|
297
|
+
properties: {
|
|
298
|
+
type: { type: "string", const: "tree", description: "Display directory structure as a tree." },
|
|
299
|
+
path: { type: "string", description: "Path to the directory." },
|
|
300
|
+
levels: { type: "integer", description: "Number of directory levels." },
|
|
301
|
+
},
|
|
302
|
+
required: ["type", "path"],
|
|
303
|
+
},
|
|
304
|
+
{
|
|
305
|
+
properties: {
|
|
306
|
+
type: { type: "string", const: "ls", description: "List files in a directory." },
|
|
307
|
+
path: { type: "string", description: "Path to the directory." },
|
|
308
|
+
long_format: { type: "boolean" },
|
|
309
|
+
all: { type: "boolean" },
|
|
310
|
+
},
|
|
311
|
+
required: ["type", "path"],
|
|
312
|
+
},
|
|
313
|
+
{
|
|
314
|
+
properties: {
|
|
315
|
+
type: { type: "string", const: "glob", description: "Find files matching a glob pattern." },
|
|
316
|
+
pattern: { type: "string" },
|
|
317
|
+
path: { type: "string" },
|
|
318
|
+
type_filter: { type: "string", enum: ["file", "directory", "all"] },
|
|
319
|
+
},
|
|
320
|
+
required: ["type", "pattern", "path"],
|
|
321
|
+
},
|
|
322
|
+
],
|
|
323
|
+
};
|
|
324
|
+
}
|
|
325
|
+
|
|
326
|
+
/**
|
|
327
|
+
* @param {number} maxCommands
|
|
328
|
+
* @returns {string}
|
|
329
|
+
*/
|
|
330
|
+
function getToolDefinitions(maxCommands = 8) {
|
|
331
|
+
const props = {};
|
|
332
|
+
for (let i = 1; i <= maxCommands; i++) {
|
|
333
|
+
props[`command${i}`] = _buildCommandSchema(i);
|
|
334
|
+
}
|
|
335
|
+
const tools = [
|
|
336
|
+
{
|
|
337
|
+
type: "function",
|
|
338
|
+
function: {
|
|
339
|
+
name: "restricted_exec",
|
|
340
|
+
description: "Execute restricted commands (rg, readfile, tree, ls, glob) in parallel.",
|
|
341
|
+
parameters: { type: "object", properties: props, required: ["command1"] },
|
|
342
|
+
},
|
|
343
|
+
},
|
|
344
|
+
{
|
|
345
|
+
type: "function",
|
|
346
|
+
function: {
|
|
347
|
+
name: "answer",
|
|
348
|
+
description: "Final answer with relevant files and line ranges.",
|
|
349
|
+
parameters: {
|
|
350
|
+
type: "object",
|
|
351
|
+
properties: { answer: { type: "string", description: "The final answer in XML format." } },
|
|
352
|
+
required: ["answer"],
|
|
353
|
+
},
|
|
354
|
+
},
|
|
355
|
+
},
|
|
356
|
+
];
|
|
357
|
+
return JSON.stringify(tools);
|
|
358
|
+
}
|
|
359
|
+
|
|
360
|
+
// ─── Credentials ───────────────────────────────────────────
|
|
361
|
+
|
|
362
|
+
/**
|
|
363
|
+
* Auto-discover Windsurf API key from local installation.
|
|
364
|
+
* @returns {string|null}
|
|
365
|
+
*/
|
|
366
|
+
function autoDiscoverApiKey() {
|
|
367
|
+
try {
|
|
368
|
+
const result = extractKey();
|
|
369
|
+
if (result.api_key && result.api_key.startsWith("sk-")) {
|
|
370
|
+
return result.api_key;
|
|
371
|
+
}
|
|
372
|
+
} catch {
|
|
373
|
+
// Extraction failed
|
|
374
|
+
}
|
|
375
|
+
return null;
|
|
376
|
+
}
|
|
377
|
+
|
|
378
|
+
/**
|
|
379
|
+
* Get API key from env var or auto-discovery.
|
|
380
|
+
* @returns {string}
|
|
381
|
+
*/
|
|
382
|
+
function getApiKey() {
|
|
383
|
+
const key = process.env.WINDSURF_API_KEY;
|
|
384
|
+
if (key) return key;
|
|
385
|
+
const discovered = autoDiscoverApiKey();
|
|
386
|
+
if (discovered) return discovered;
|
|
387
|
+
throw new Error(
|
|
388
|
+
"Windsurf API Key not found. Set WINDSURF_API_KEY env var or ensure Windsurf is logged in. " +
|
|
389
|
+
"Run extract-key.mjs to see extraction methods."
|
|
390
|
+
);
|
|
391
|
+
}
|
|
392
|
+
|
|
393
|
+
// ─── JWT Cache ──────────────────────────────────────────────
|
|
394
|
+
|
|
395
|
+
/** @type {Map<string, { token: string, expiresAt: number }>} */
|
|
396
|
+
const _jwtCache = new Map();
|
|
397
|
+
|
|
398
|
+
/**
|
|
399
|
+
* Decode JWT payload and extract expiration time.
|
|
400
|
+
* @param {string} jwt
|
|
401
|
+
* @returns {number} expiration timestamp in seconds
|
|
402
|
+
*/
|
|
403
|
+
function _getJwtExp(jwt) {
|
|
404
|
+
try {
|
|
405
|
+
const parts = jwt.split(".");
|
|
406
|
+
if (parts.length < 2) return 0;
|
|
407
|
+
const payload = JSON.parse(Buffer.from(parts[1], "base64url").toString("utf-8"));
|
|
408
|
+
return payload.exp || 0;
|
|
409
|
+
} catch {
|
|
410
|
+
return 0;
|
|
411
|
+
}
|
|
412
|
+
}
|
|
413
|
+
|
|
414
|
+
/**
|
|
415
|
+
* Get a cached or fresh JWT token.
|
|
416
|
+
* Refreshes when token expires or is within 60s of expiration.
|
|
417
|
+
* @param {string} apiKey
|
|
418
|
+
* @returns {Promise<string>}
|
|
419
|
+
*/
|
|
420
|
+
async function getCachedJwt(apiKey) {
|
|
421
|
+
const now = Math.floor(Date.now() / 1000);
|
|
422
|
+
const cached = _jwtCache.get(apiKey);
|
|
423
|
+
if (cached && cached.expiresAt > now + 60) return cached.token;
|
|
424
|
+
const token = await fetchJwt(apiKey);
|
|
425
|
+
const exp = _getJwtExp(token);
|
|
426
|
+
_jwtCache.set(apiKey, { token, expiresAt: exp || now + 3600 });
|
|
427
|
+
return token;
|
|
428
|
+
}
|
|
429
|
+
|
|
430
|
+
// ─── TLS Fallback ──────────────────────────────────────────
|
|
431
|
+
// Match Python's SSL fallback: if NODE_TLS_REJECT_UNAUTHORIZED is not set
|
|
432
|
+
// and the first fetch fails with a TLS error, disable cert verification.
|
|
433
|
+
let _tlsFallbackApplied = false;
|
|
434
|
+
|
|
435
|
+
function _applyTlsFallback() {
|
|
436
|
+
if (!_tlsFallbackApplied && !process.env.NODE_TLS_REJECT_UNAUTHORIZED) {
|
|
437
|
+
process.env.NODE_TLS_REJECT_UNAUTHORIZED = "0";
|
|
438
|
+
_tlsFallbackApplied = true;
|
|
439
|
+
process.stderr.write(
|
|
440
|
+
"[fast-context] WARNING: TLS certificate verification disabled due to connection failure. " +
|
|
441
|
+
"Set NODE_TLS_REJECT_UNAUTHORIZED=0 explicitly to suppress this warning.\n"
|
|
442
|
+
);
|
|
443
|
+
}
|
|
444
|
+
}
|
|
445
|
+
|
|
446
|
+
// ─── Network Layer ─────────────────────────────────────────
|
|
447
|
+
|
|
448
|
+
/**
|
|
449
|
+
* Standard unary HTTP POST with proto content type.
|
|
450
|
+
* @param {string} url
|
|
451
|
+
* @param {Buffer} protoBytes
|
|
452
|
+
* @param {boolean} [compress=true]
|
|
453
|
+
* @returns {Promise<Buffer>}
|
|
454
|
+
*/
|
|
455
|
+
async function _unaryRequest(url, protoBytes, compress = true) {
|
|
456
|
+
const headers = {
|
|
457
|
+
"Content-Type": "application/proto",
|
|
458
|
+
"Connect-Protocol-Version": "1",
|
|
459
|
+
"User-Agent": "connect-go/1.18.1 (go1.25.5)",
|
|
460
|
+
"Accept-Encoding": "gzip",
|
|
461
|
+
};
|
|
462
|
+
|
|
463
|
+
let body;
|
|
464
|
+
if (compress) {
|
|
465
|
+
body = gzipSync(protoBytes);
|
|
466
|
+
headers["Content-Encoding"] = "gzip";
|
|
467
|
+
} else {
|
|
468
|
+
body = protoBytes;
|
|
469
|
+
}
|
|
470
|
+
|
|
471
|
+
const doFetch = () => fetch(url, {
|
|
472
|
+
method: "POST",
|
|
473
|
+
headers,
|
|
474
|
+
body,
|
|
475
|
+
signal: AbortSignal.timeout(30000),
|
|
476
|
+
});
|
|
477
|
+
|
|
478
|
+
let resp;
|
|
479
|
+
try {
|
|
480
|
+
resp = await doFetch();
|
|
481
|
+
} catch (e) {
|
|
482
|
+
// TLS or network error — try with cert verification disabled
|
|
483
|
+
_applyTlsFallback();
|
|
484
|
+
try {
|
|
485
|
+
resp = await doFetch();
|
|
486
|
+
} catch (e2) {
|
|
487
|
+
throw _classifyError(e2);
|
|
488
|
+
}
|
|
489
|
+
}
|
|
490
|
+
|
|
491
|
+
if (!resp.ok) {
|
|
492
|
+
const err = new Error(`HTTP ${resp.status}`);
|
|
493
|
+
err.status = resp.status;
|
|
494
|
+
throw _classifyError(err);
|
|
495
|
+
}
|
|
496
|
+
|
|
497
|
+
const arrayBuf = await resp.arrayBuffer();
|
|
498
|
+
return Buffer.from(arrayBuf);
|
|
499
|
+
}
|
|
500
|
+
|
|
501
|
+
/**
|
|
502
|
+
* Connect-RPC streaming POST to GetDevstralStream with retry.
|
|
503
|
+
* @param {Buffer} protoBytes
|
|
504
|
+
* @param {number} [timeoutMs=30000]
|
|
505
|
+
* @param {number} [maxRetries=2]
|
|
506
|
+
* @returns {Promise<Buffer>}
|
|
507
|
+
*/
|
|
508
|
+
async function _streamingRequest(protoBytes, timeoutMs = 30000, maxRetries = 2) {
|
|
509
|
+
const frame = connectFrameEncode(protoBytes);
|
|
510
|
+
const url = `${API_BASE}/GetDevstralStream`;
|
|
511
|
+
const traceId = randomUUID().replace(/-/g, "");
|
|
512
|
+
const spanId = randomUUID().replace(/-/g, "").slice(0, 16);
|
|
513
|
+
const baseTimeoutMs = Number.isFinite(timeoutMs) ? timeoutMs : 30000;
|
|
514
|
+
const abortMs = baseTimeoutMs + 5000;
|
|
515
|
+
|
|
516
|
+
const headers = {
|
|
517
|
+
"Content-Type": "application/connect+proto",
|
|
518
|
+
"Connect-Protocol-Version": "1",
|
|
519
|
+
"Connect-Accept-Encoding": "gzip",
|
|
520
|
+
"Connect-Content-Encoding": "gzip",
|
|
521
|
+
"Connect-Timeout-Ms": String(baseTimeoutMs),
|
|
522
|
+
"User-Agent": "connect-go/1.18.1 (go1.25.5)",
|
|
523
|
+
"Accept-Encoding": "identity",
|
|
524
|
+
"Baggage": `sentry-release=language-server-windsurf@${WS_LS_VER},` +
|
|
525
|
+
`sentry-environment=stable,sentry-sampled=false,` +
|
|
526
|
+
`sentry-trace_id=${traceId},` +
|
|
527
|
+
`sentry-public_key=b813f73488da69eedec534dba1029111`,
|
|
528
|
+
"Sentry-Trace": `${traceId}-${spanId}-0`,
|
|
529
|
+
};
|
|
530
|
+
|
|
531
|
+
const doFetch = () => fetch(url, {
|
|
532
|
+
method: "POST",
|
|
533
|
+
headers,
|
|
534
|
+
body: frame,
|
|
535
|
+
signal: AbortSignal.timeout(abortMs),
|
|
536
|
+
});
|
|
537
|
+
|
|
538
|
+
let lastErr;
|
|
539
|
+
for (let attempt = 0; attempt <= maxRetries; attempt++) {
|
|
540
|
+
try {
|
|
541
|
+
let resp;
|
|
542
|
+
try {
|
|
543
|
+
resp = await doFetch();
|
|
544
|
+
} catch (e) {
|
|
545
|
+
if (attempt === 0) {
|
|
546
|
+
_applyTlsFallback();
|
|
547
|
+
resp = await doFetch();
|
|
548
|
+
} else {
|
|
549
|
+
throw e;
|
|
550
|
+
}
|
|
551
|
+
}
|
|
552
|
+
|
|
553
|
+
if (!resp.ok) {
|
|
554
|
+
const err = new Error(`HTTP ${resp.status}`);
|
|
555
|
+
err.status = resp.status;
|
|
556
|
+
// Don't retry on 4xx client errors (except 429)
|
|
557
|
+
if (resp.status >= 400 && resp.status < 500 && resp.status !== 429) {
|
|
558
|
+
throw err;
|
|
559
|
+
}
|
|
560
|
+
lastErr = err;
|
|
561
|
+
if (attempt < maxRetries) {
|
|
562
|
+
await new Promise((r) => setTimeout(r, 1000 * (attempt + 1)));
|
|
563
|
+
continue;
|
|
564
|
+
}
|
|
565
|
+
throw err;
|
|
566
|
+
}
|
|
567
|
+
|
|
568
|
+
const arrayBuf = await resp.arrayBuffer();
|
|
569
|
+
return Buffer.from(arrayBuf);
|
|
570
|
+
} catch (e) {
|
|
571
|
+
lastErr = e;
|
|
572
|
+
// Don't retry on 4xx client errors (except 429)
|
|
573
|
+
if (e.status && e.status >= 400 && e.status < 500 && e.status !== 429) {
|
|
574
|
+
throw _classifyError(e);
|
|
575
|
+
}
|
|
576
|
+
if (attempt < maxRetries) {
|
|
577
|
+
await new Promise((r) => setTimeout(r, 1000 * (attempt + 1)));
|
|
578
|
+
continue;
|
|
579
|
+
}
|
|
580
|
+
}
|
|
581
|
+
}
|
|
582
|
+
throw _classifyError(lastErr);
|
|
583
|
+
}
|
|
584
|
+
|
|
585
|
+
/**
|
|
586
|
+
* Authenticate with API key to get JWT token.
|
|
587
|
+
* @param {string} apiKey
|
|
588
|
+
* @returns {Promise<string>}
|
|
589
|
+
*/
|
|
590
|
+
async function fetchJwt(apiKey) {
|
|
591
|
+
const meta = new ProtobufEncoder();
|
|
592
|
+
meta.writeString(1, WS_APP);
|
|
593
|
+
meta.writeString(2, WS_APP_VER);
|
|
594
|
+
meta.writeString(3, apiKey);
|
|
595
|
+
meta.writeString(4, "zh-cn");
|
|
596
|
+
meta.writeString(7, WS_LS_VER);
|
|
597
|
+
meta.writeString(12, WS_APP);
|
|
598
|
+
meta.writeBytes(30, Buffer.from([0x00, 0x01]));
|
|
599
|
+
|
|
600
|
+
const outer = new ProtobufEncoder();
|
|
601
|
+
outer.writeMessage(1, meta);
|
|
602
|
+
|
|
603
|
+
const resp = await _unaryRequest(`${AUTH_BASE}/GetUserJwt`, outer.toBuffer(), false);
|
|
604
|
+
for (const s of extractStrings(resp)) {
|
|
605
|
+
if (s.startsWith("eyJ") && s.includes(".")) {
|
|
606
|
+
return s;
|
|
607
|
+
}
|
|
608
|
+
}
|
|
609
|
+
throw new Error("Failed to extract JWT from GetUserJwt response");
|
|
610
|
+
}
|
|
611
|
+
|
|
612
|
+
/**
|
|
613
|
+
* Check rate limit. Returns true if OK, false if rate-limited.
|
|
614
|
+
* @param {string} apiKey
|
|
615
|
+
* @param {string} jwt
|
|
616
|
+
* @returns {Promise<boolean>}
|
|
617
|
+
*/
|
|
618
|
+
async function checkRateLimit(apiKey, jwt) {
|
|
619
|
+
const req = new ProtobufEncoder();
|
|
620
|
+
req.writeMessage(1, _buildMetadata(apiKey, jwt));
|
|
621
|
+
req.writeString(3, WS_MODEL);
|
|
622
|
+
|
|
623
|
+
try {
|
|
624
|
+
await _unaryRequest(`${API_BASE}/CheckUserMessageRateLimit`, req.toBuffer(), true);
|
|
625
|
+
return true;
|
|
626
|
+
} catch (e) {
|
|
627
|
+
if (e.status === 429) return false;
|
|
628
|
+
return true; // Don't block on network issues
|
|
629
|
+
}
|
|
630
|
+
}
|
|
631
|
+
|
|
632
|
+
// ─── Request Building ──────────────────────────────────────
|
|
633
|
+
|
|
634
|
+
/**
|
|
635
|
+
* Build protobuf metadata with app info, system info, JWT, etc.
|
|
636
|
+
* @param {string} apiKey
|
|
637
|
+
* @param {string} jwt
|
|
638
|
+
* @returns {ProtobufEncoder}
|
|
639
|
+
*/
|
|
640
|
+
function _buildMetadata(apiKey, jwt) {
|
|
641
|
+
const meta = new ProtobufEncoder();
|
|
642
|
+
meta.writeString(1, WS_APP);
|
|
643
|
+
meta.writeString(2, WS_APP_VER);
|
|
644
|
+
meta.writeString(3, apiKey);
|
|
645
|
+
meta.writeString(4, "zh-cn");
|
|
646
|
+
|
|
647
|
+
const plat = platform();
|
|
648
|
+
const sysInfo = {
|
|
649
|
+
Os: plat,
|
|
650
|
+
Arch: arch(),
|
|
651
|
+
Release: release(),
|
|
652
|
+
Version: osVersion(),
|
|
653
|
+
Machine: arch(),
|
|
654
|
+
Nodename: hostname(),
|
|
655
|
+
Sysname: plat === "darwin" ? "Darwin" : plat === "win32" ? "Windows_NT" : "Linux",
|
|
656
|
+
ProductVersion: "",
|
|
657
|
+
};
|
|
658
|
+
meta.writeString(5, JSON.stringify(sysInfo));
|
|
659
|
+
meta.writeString(7, WS_LS_VER);
|
|
660
|
+
|
|
661
|
+
const cpuList = cpus();
|
|
662
|
+
const ncpu = cpuList.length || 4;
|
|
663
|
+
const mem = totalmem();
|
|
664
|
+
const cpuInfo = {
|
|
665
|
+
NumSockets: 1,
|
|
666
|
+
NumCores: ncpu,
|
|
667
|
+
NumThreads: ncpu,
|
|
668
|
+
VendorID: "",
|
|
669
|
+
Family: "0",
|
|
670
|
+
Model: "0",
|
|
671
|
+
ModelName: cpuList[0]?.model || "Unknown",
|
|
672
|
+
Memory: mem,
|
|
673
|
+
};
|
|
674
|
+
meta.writeString(8, JSON.stringify(cpuInfo));
|
|
675
|
+
meta.writeString(12, WS_APP);
|
|
676
|
+
meta.writeString(21, jwt);
|
|
677
|
+
meta.writeBytes(30, Buffer.from([0x00, 0x01]));
|
|
678
|
+
return meta;
|
|
679
|
+
}
|
|
680
|
+
|
|
681
|
+
/**
|
|
682
|
+
* Build a chat message protobuf.
|
|
683
|
+
* @param {number} role - 1=user, 2=assistant, 4=tool_result, 5=system
|
|
684
|
+
* @param {string} content
|
|
685
|
+
* @param {Object} [opts]
|
|
686
|
+
* @param {string} [opts.toolCallId]
|
|
687
|
+
* @param {string} [opts.toolName]
|
|
688
|
+
* @param {string} [opts.toolArgsJson]
|
|
689
|
+
* @param {string} [opts.refCallId]
|
|
690
|
+
* @returns {ProtobufEncoder}
|
|
691
|
+
*/
|
|
692
|
+
function _buildChatMessage(role, content, opts = {}) {
|
|
693
|
+
const msg = new ProtobufEncoder();
|
|
694
|
+
msg.writeVarint(2, role);
|
|
695
|
+
msg.writeString(3, content);
|
|
696
|
+
|
|
697
|
+
if (opts.toolCallId && opts.toolName && opts.toolArgsJson) {
|
|
698
|
+
const tc = new ProtobufEncoder();
|
|
699
|
+
tc.writeString(1, opts.toolCallId);
|
|
700
|
+
tc.writeString(2, opts.toolName);
|
|
701
|
+
tc.writeString(3, opts.toolArgsJson);
|
|
702
|
+
msg.writeMessage(6, tc);
|
|
703
|
+
}
|
|
704
|
+
|
|
705
|
+
if (opts.refCallId) {
|
|
706
|
+
msg.writeString(7, opts.refCallId);
|
|
707
|
+
}
|
|
708
|
+
|
|
709
|
+
return msg;
|
|
710
|
+
}
|
|
711
|
+
|
|
712
|
+
/**
|
|
713
|
+
* Build a full request with metadata, messages, and tool definitions.
|
|
714
|
+
* @param {string} apiKey
|
|
715
|
+
* @param {string} jwt
|
|
716
|
+
* @param {Array} messages
|
|
717
|
+
* @param {string} toolDefs
|
|
718
|
+
* @returns {Buffer}
|
|
719
|
+
*/
|
|
720
|
+
function _buildRequest(apiKey, jwt, messages, toolDefs) {
|
|
721
|
+
const req = new ProtobufEncoder();
|
|
722
|
+
req.writeMessage(1, _buildMetadata(apiKey, jwt));
|
|
723
|
+
|
|
724
|
+
for (const m of messages) {
|
|
725
|
+
const msgEnc = _buildChatMessage(m.role, m.content, {
|
|
726
|
+
toolCallId: m.tool_call_id,
|
|
727
|
+
toolName: m.tool_name,
|
|
728
|
+
toolArgsJson: m.tool_args_json,
|
|
729
|
+
refCallId: m.ref_call_id,
|
|
730
|
+
});
|
|
731
|
+
req.writeMessage(2, msgEnc);
|
|
732
|
+
}
|
|
733
|
+
|
|
734
|
+
req.writeString(3, toolDefs);
|
|
735
|
+
return req.toBuffer();
|
|
736
|
+
}
|
|
737
|
+
|
|
738
|
+
// ─── Response Parsing ──────────────────────────────────────
|
|
739
|
+
|
|
740
|
+
/**
|
|
741
|
+
* Strip invalid UTF-8 bytes from a Buffer → clean string.
|
|
742
|
+
* Matches Python's bytes.decode("utf-8", errors="ignore").
|
|
743
|
+
* @param {Buffer} buf
|
|
744
|
+
* @returns {string}
|
|
745
|
+
*/
|
|
746
|
+
function stripInvalidUtf8(buf) {
|
|
747
|
+
return buf.toString("utf-8").replace(/\ufffd/g, "");
|
|
748
|
+
}
|
|
749
|
+
|
|
750
|
+
/**
|
|
751
|
+
* Parse tool call from [TOOL_CALLS]name[ARGS]{json} format.
|
|
752
|
+
* @param {string} text
|
|
753
|
+
* @returns {[string, string, Object]|null} [thinking, name, args] or null
|
|
754
|
+
*/
|
|
755
|
+
function _parseToolCall(text) {
|
|
756
|
+
text = text.replace(/<\/s>/g, "");
|
|
757
|
+
const m = text.match(/\[TOOL_CALLS\](\w+)\[ARGS\](\{.+)/s);
|
|
758
|
+
if (!m) return null;
|
|
759
|
+
|
|
760
|
+
const name = m[1];
|
|
761
|
+
const raw = m[2].trim();
|
|
762
|
+
|
|
763
|
+
// Find matching closing brace
|
|
764
|
+
let depth = 0;
|
|
765
|
+
let end = 0;
|
|
766
|
+
for (let i = 0; i < raw.length; i++) {
|
|
767
|
+
if (raw[i] === "{") depth++;
|
|
768
|
+
else if (raw[i] === "}") {
|
|
769
|
+
depth--;
|
|
770
|
+
if (depth === 0) {
|
|
771
|
+
end = i + 1;
|
|
772
|
+
break;
|
|
773
|
+
}
|
|
774
|
+
}
|
|
775
|
+
}
|
|
776
|
+
if (end === 0) end = raw.length;
|
|
777
|
+
|
|
778
|
+
let args;
|
|
779
|
+
try {
|
|
780
|
+
args = JSON.parse(raw.slice(0, end));
|
|
781
|
+
} catch {
|
|
782
|
+
return null;
|
|
783
|
+
}
|
|
784
|
+
|
|
785
|
+
const thinking = text.slice(0, m.index).trim();
|
|
786
|
+
return [thinking, name, args];
|
|
787
|
+
}
|
|
788
|
+
|
|
789
|
+
/**
|
|
790
|
+
* Parse streaming response: decode frames, extract text, parse tool calls.
|
|
791
|
+
* @param {Buffer} data
|
|
792
|
+
* @returns {[string, [string, Object]|null]} [text, toolInfo]
|
|
793
|
+
*/
|
|
794
|
+
function _parseResponse(data) {
|
|
795
|
+
const frames = connectFrameDecode(data);
|
|
796
|
+
let allText = "";
|
|
797
|
+
|
|
798
|
+
for (const frameData of frames) {
|
|
799
|
+
// Check for error JSON
|
|
800
|
+
try {
|
|
801
|
+
const textCandidate = frameData.toString("utf-8");
|
|
802
|
+
if (textCandidate.startsWith("{")) {
|
|
803
|
+
const errObj = JSON.parse(textCandidate);
|
|
804
|
+
if (errObj.error) {
|
|
805
|
+
const code = errObj.error.code || "unknown";
|
|
806
|
+
const msg = errObj.error.message || "";
|
|
807
|
+
return [`[Error] ${code}: ${msg}`, null];
|
|
808
|
+
}
|
|
809
|
+
}
|
|
810
|
+
} catch {
|
|
811
|
+
// Not JSON, continue
|
|
812
|
+
}
|
|
813
|
+
|
|
814
|
+
// Extract text from frame — strip invalid UTF-8 (matches Python errors="ignore")
|
|
815
|
+
const rawText = stripInvalidUtf8(frameData);
|
|
816
|
+
if (rawText.includes("[TOOL_CALLS]")) {
|
|
817
|
+
allText = rawText;
|
|
818
|
+
break;
|
|
819
|
+
}
|
|
820
|
+
|
|
821
|
+
for (const s of extractStrings(frameData)) {
|
|
822
|
+
if (s.length > 10) {
|
|
823
|
+
allText += s;
|
|
824
|
+
}
|
|
825
|
+
}
|
|
826
|
+
}
|
|
827
|
+
|
|
828
|
+
const parsed = _parseToolCall(allText);
|
|
829
|
+
if (parsed) {
|
|
830
|
+
const [thinking, name, args] = parsed;
|
|
831
|
+
return [thinking, [name, args]];
|
|
832
|
+
}
|
|
833
|
+
return [allText, null];
|
|
834
|
+
}
|
|
835
|
+
|
|
836
|
+
// ─── Core Search ───────────────────────────────────────────
|
|
837
|
+
|
|
838
|
+
// Max safe tree size in bytes (server payload limit ~346KB, fixed overhead ~26KB,
|
|
839
|
+
// leave room for conversation accumulation across rounds)
|
|
840
|
+
const MAX_TREE_BYTES = 250 * 1024;
|
|
841
|
+
|
|
842
|
+
/**
|
|
843
|
+
* Convert an exclude pattern (directory/file name or simple glob) to RegExp
|
|
844
|
+
* for tree-node-cli's exclude option.
|
|
845
|
+
* @param {string} pattern - e.g. "node_modules", "dist", "*.min.*"
|
|
846
|
+
* @returns {RegExp}
|
|
847
|
+
*/
|
|
848
|
+
function _excludePatternToRegex(pattern) {
|
|
849
|
+
if (!/[*?]/.test(pattern)) {
|
|
850
|
+
// Simple name — exact match
|
|
851
|
+
return new RegExp("^" + pattern.replace(/[.*+?^${}()|[\]\\]/g, "\\$&") + "$");
|
|
852
|
+
}
|
|
853
|
+
// Glob → regex
|
|
854
|
+
let regex = "^";
|
|
855
|
+
for (const c of pattern) {
|
|
856
|
+
if (c === "*") regex += ".*";
|
|
857
|
+
else if (c === "?") regex += ".";
|
|
858
|
+
else if (".+^${}()|[]\\".includes(c)) regex += "\\" + c;
|
|
859
|
+
else regex += c;
|
|
860
|
+
}
|
|
861
|
+
regex += "$";
|
|
862
|
+
return new RegExp(regex);
|
|
863
|
+
}
|
|
864
|
+
|
|
865
|
+
/**
|
|
866
|
+
* Get a directory tree of the project with adaptive depth fallback.
|
|
867
|
+
*
|
|
868
|
+
* Tries the requested depth first. If the tree output exceeds MAX_TREE_BYTES,
|
|
869
|
+
* automatically falls back to lower depths until it fits.
|
|
870
|
+
*
|
|
871
|
+
* @param {string} projectRoot
|
|
872
|
+
* @param {number} [targetDepth=3] - Desired tree depth (1-6)
|
|
873
|
+
* @param {string[]} [excludePaths=[]] - Patterns to exclude from tree
|
|
874
|
+
* @returns {{ tree: string, depth: number, sizeBytes: number, fellBack: boolean }}
|
|
875
|
+
*/
|
|
876
|
+
function getRepoMap(projectRoot, targetDepth = 3, excludePaths = []) {
|
|
877
|
+
const rootPattern = new RegExp(projectRoot.replace(/[.*+?^${}()|[\]\\]/g, "\\$&"), "g");
|
|
878
|
+
const dirName = projectRoot.split("/").pop() || projectRoot.split("\\").pop() || projectRoot;
|
|
879
|
+
const excludeRegexes = excludePaths.length ? excludePaths.map(_excludePatternToRegex) : [];
|
|
880
|
+
|
|
881
|
+
for (let L = targetDepth; L >= 1; L--) {
|
|
882
|
+
try {
|
|
883
|
+
const opts = { maxDepth: L };
|
|
884
|
+
if (excludeRegexes.length) opts.exclude = excludeRegexes;
|
|
885
|
+
const stdout = treeNodeCli(projectRoot, opts);
|
|
886
|
+
// tree-node-cli outputs basename as root line; replace with /codebase
|
|
887
|
+
let treeStr = stdout.replace(rootPattern, "/codebase");
|
|
888
|
+
// Also replace the basename root line (first line) if full path wasn't matched
|
|
889
|
+
const lines = treeStr.split("\n");
|
|
890
|
+
if (lines[0] === dirName) {
|
|
891
|
+
lines[0] = "/codebase";
|
|
892
|
+
treeStr = lines.join("\n");
|
|
893
|
+
}
|
|
894
|
+
const sizeBytes = Buffer.byteLength(treeStr, "utf-8");
|
|
895
|
+
|
|
896
|
+
if (sizeBytes <= MAX_TREE_BYTES) {
|
|
897
|
+
return { tree: treeStr, depth: L, sizeBytes, fellBack: L < targetDepth };
|
|
898
|
+
}
|
|
899
|
+
// Too large, try lower depth
|
|
900
|
+
} catch {
|
|
901
|
+
// tree failed at this level, try lower
|
|
902
|
+
}
|
|
903
|
+
}
|
|
904
|
+
|
|
905
|
+
// Ultimate fallback: simple ls (also respects excludePaths)
|
|
906
|
+
try {
|
|
907
|
+
let entries = readdirSync(projectRoot).sort();
|
|
908
|
+
if (excludeRegexes.length) {
|
|
909
|
+
entries = entries.filter((e) => !excludeRegexes.some((rx) => rx.test(e)));
|
|
910
|
+
}
|
|
911
|
+
const treeStr = ["/codebase", ...entries.map((e) => `├── ${e}`)].join("\n");
|
|
912
|
+
return { tree: treeStr, depth: 0, sizeBytes: Buffer.byteLength(treeStr, "utf-8"), fellBack: true };
|
|
913
|
+
} catch {
|
|
914
|
+
const treeStr = "/codebase\n(empty or inaccessible)";
|
|
915
|
+
return { tree: treeStr, depth: 0, sizeBytes: treeStr.length, fellBack: true };
|
|
916
|
+
}
|
|
917
|
+
}
|
|
918
|
+
|
|
919
|
+
/**
|
|
920
|
+
* Parse answer XML into structured file + range data.
|
|
921
|
+
* @param {string} xmlText
|
|
922
|
+
* @param {string} projectRoot
|
|
923
|
+
* @returns {{ files: Array }}
|
|
924
|
+
*/
|
|
925
|
+
function _parseAnswer(xmlText, projectRoot) {
|
|
926
|
+
const files = [];
|
|
927
|
+
const resolvedRoot = resolve(projectRoot);
|
|
928
|
+
const fileRegex = /<file\s+path="([^"]+)">([\s\S]*?)<\/file>/g;
|
|
929
|
+
let fm;
|
|
930
|
+
while ((fm = fileRegex.exec(xmlText)) !== null) {
|
|
931
|
+
const vpath = fm[1];
|
|
932
|
+
const rel = vpath.replace(/^\/codebase\/?/, "");
|
|
933
|
+
|
|
934
|
+
// Path safety: reject traversal attempts (../) and paths outside project root
|
|
935
|
+
const fullPath = resolve(projectRoot, rel);
|
|
936
|
+
if (!fullPath.startsWith(resolvedRoot + sep) && fullPath !== resolvedRoot) {
|
|
937
|
+
continue;
|
|
938
|
+
}
|
|
939
|
+
|
|
940
|
+
const ranges = [];
|
|
941
|
+
const rangeRegex = /<range>(\d+)-(\d+)<\/range>/g;
|
|
942
|
+
let rm;
|
|
943
|
+
while ((rm = rangeRegex.exec(fm[2])) !== null) {
|
|
944
|
+
ranges.push([parseInt(rm[1], 10), parseInt(rm[2], 10)]);
|
|
945
|
+
}
|
|
946
|
+
|
|
947
|
+
files.push({ path: rel, full_path: fullPath, ranges });
|
|
948
|
+
}
|
|
949
|
+
return { files };
|
|
950
|
+
}
|
|
951
|
+
|
|
952
|
+
/**
|
|
953
|
+
* Execute Fast Context search.
|
|
954
|
+
*
|
|
955
|
+
* @param {Object} opts
|
|
956
|
+
* @param {string} opts.query - Natural language search query
|
|
957
|
+
* @param {string} opts.projectRoot - Project root directory
|
|
958
|
+
* @param {string} [opts.apiKey] - Windsurf API key (auto-discovered if not set)
|
|
959
|
+
* @param {string} [opts.jwt] - JWT token (auto-fetched if not set)
|
|
960
|
+
* @param {number} [opts.maxTurns=3] - Search rounds
|
|
961
|
+
* @param {number} [opts.maxCommands=8] - Max commands per round
|
|
962
|
+
* @param {number} [opts.maxResults=10] - Max number of files to return
|
|
963
|
+
* @param {number} [opts.treeDepth=3] - Directory tree depth for repo map (1-6, auto fallback)
|
|
964
|
+
* @param {number} [opts.timeoutMs=30000] - Connect-Timeout-Ms for streaming requests
|
|
965
|
+
* @param {string[]} [opts.excludePaths=[]] - Patterns to exclude from tree
|
|
966
|
+
* @param {function} [opts.onProgress] - Progress callback
|
|
967
|
+
* @returns {Promise<Object>}
|
|
968
|
+
*/
|
|
969
|
+
export async function search({
|
|
970
|
+
query,
|
|
971
|
+
projectRoot,
|
|
972
|
+
apiKey = null,
|
|
973
|
+
jwt = null,
|
|
974
|
+
maxTurns = 3,
|
|
975
|
+
maxCommands = 8,
|
|
976
|
+
maxResults = 10,
|
|
977
|
+
treeDepth = 3,
|
|
978
|
+
timeoutMs = 30000,
|
|
979
|
+
excludePaths = [],
|
|
980
|
+
onProgress = null,
|
|
981
|
+
}) {
|
|
982
|
+
const log = (msg) => onProgress?.(msg);
|
|
983
|
+
projectRoot = resolve(projectRoot);
|
|
984
|
+
|
|
985
|
+
// Get credentials
|
|
986
|
+
if (!apiKey) {
|
|
987
|
+
apiKey = getApiKey();
|
|
988
|
+
}
|
|
989
|
+
if (!jwt) {
|
|
990
|
+
log("Fetching JWT...");
|
|
991
|
+
jwt = await getCachedJwt(apiKey);
|
|
992
|
+
}
|
|
993
|
+
|
|
994
|
+
// Check rate limit
|
|
995
|
+
log("Checking rate limit...");
|
|
996
|
+
if (!(await checkRateLimit(apiKey, jwt))) {
|
|
997
|
+
return { files: [], error: "Rate limited, please try again later" };
|
|
998
|
+
}
|
|
999
|
+
|
|
1000
|
+
const executor = new ToolExecutor(projectRoot);
|
|
1001
|
+
const toolDefs = getToolDefinitions(maxCommands);
|
|
1002
|
+
const systemPrompt = buildSystemPrompt(maxTurns, maxCommands, maxResults);
|
|
1003
|
+
|
|
1004
|
+
const { tree: repoMap, depth: actualDepth, sizeBytes: treeSizeBytes, fellBack } = getRepoMap(projectRoot, treeDepth, excludePaths);
|
|
1005
|
+
log(`Repo map: tree -L ${actualDepth} (${(treeSizeBytes / 1024).toFixed(1)}KB)${fellBack ? ` [fell back from L=${treeDepth}]` : ""}`);
|
|
1006
|
+
const userContent = `Problem Statement: ${query}\n\nRepo Map (tree -L ${actualDepth} /codebase):\n\`\`\`text\n${repoMap}\n\`\`\``;
|
|
1007
|
+
|
|
1008
|
+
const messages = [
|
|
1009
|
+
{ role: 5, content: systemPrompt },
|
|
1010
|
+
{ role: 1, content: userContent },
|
|
1011
|
+
];
|
|
1012
|
+
|
|
1013
|
+
// Total API calls = maxTurns + 1 (last round for answer)
|
|
1014
|
+
const totalApiCalls = maxTurns + 1;
|
|
1015
|
+
|
|
1016
|
+
for (let turn = 0; turn < totalApiCalls; turn++) {
|
|
1017
|
+
log(`Turn ${turn + 1}/${totalApiCalls}`);
|
|
1018
|
+
|
|
1019
|
+
const proto = _buildRequest(apiKey, jwt, messages, toolDefs);
|
|
1020
|
+
let respData;
|
|
1021
|
+
try {
|
|
1022
|
+
respData = await _streamingRequest(proto, timeoutMs);
|
|
1023
|
+
} catch (e) {
|
|
1024
|
+
const errCode = e.code || "UNKNOWN";
|
|
1025
|
+
const baseMeta = { treeDepth: actualDepth, treeSizeKB: +(treeSizeBytes / 1024).toFixed(1), fellBack, projectRoot, errorCode: errCode };
|
|
1026
|
+
|
|
1027
|
+
// Auto-retry with trimmed context on payload/timeout errors
|
|
1028
|
+
if ((errCode === "PAYLOAD_TOO_LARGE" || errCode === "TIMEOUT") && messages.length > 4) {
|
|
1029
|
+
log(`${errCode} on turn ${turn + 1}: trimming context and retrying...`);
|
|
1030
|
+
_trimMessages(messages);
|
|
1031
|
+
const retryProto = _buildRequest(apiKey, jwt, messages, toolDefs);
|
|
1032
|
+
try {
|
|
1033
|
+
respData = await _streamingRequest(retryProto, timeoutMs);
|
|
1034
|
+
} catch (retryErr) {
|
|
1035
|
+
const retryCode = retryErr.code || errCode;
|
|
1036
|
+
return {
|
|
1037
|
+
files: [],
|
|
1038
|
+
error: `${retryCode}: ${retryErr.message} (retry after context trim also failed)`,
|
|
1039
|
+
_meta: { ...baseMeta, errorCode: retryCode, contextTrimmed: true },
|
|
1040
|
+
};
|
|
1041
|
+
}
|
|
1042
|
+
} else {
|
|
1043
|
+
return {
|
|
1044
|
+
files: [],
|
|
1045
|
+
error: `${errCode}: ${e.message}`,
|
|
1046
|
+
_meta: baseMeta,
|
|
1047
|
+
};
|
|
1048
|
+
}
|
|
1049
|
+
}
|
|
1050
|
+
|
|
1051
|
+
const [thinking, toolInfo] = _parseResponse(respData);
|
|
1052
|
+
|
|
1053
|
+
if (toolInfo === null) {
|
|
1054
|
+
if (thinking.startsWith("[Error]")) {
|
|
1055
|
+
return { files: [], error: thinking };
|
|
1056
|
+
}
|
|
1057
|
+
return { files: [], raw_response: thinking };
|
|
1058
|
+
}
|
|
1059
|
+
|
|
1060
|
+
const [toolName, toolArgs] = toolInfo;
|
|
1061
|
+
|
|
1062
|
+
if (toolName === "answer") {
|
|
1063
|
+
const answerXml = toolArgs.answer || "";
|
|
1064
|
+
log("Received final answer");
|
|
1065
|
+
const result = _parseAnswer(answerXml, projectRoot);
|
|
1066
|
+
result.rg_patterns = [...new Set(executor.collectedRgPatterns)];
|
|
1067
|
+
result._meta = { treeDepth: actualDepth, treeSizeKB: +(treeSizeBytes / 1024).toFixed(1), fellBack };
|
|
1068
|
+
return result;
|
|
1069
|
+
}
|
|
1070
|
+
|
|
1071
|
+
if (toolName === "restricted_exec") {
|
|
1072
|
+
const callId = randomUUID();
|
|
1073
|
+
const argsJson = JSON.stringify(toolArgs);
|
|
1074
|
+
|
|
1075
|
+
const cmds = Object.keys(toolArgs).filter((k) => k.startsWith("command"));
|
|
1076
|
+
log(`Executing ${cmds.length} local commands`);
|
|
1077
|
+
|
|
1078
|
+
const results = await executor.execToolCallAsync(toolArgs);
|
|
1079
|
+
|
|
1080
|
+
messages.push({
|
|
1081
|
+
role: 2,
|
|
1082
|
+
content: thinking,
|
|
1083
|
+
tool_call_id: callId,
|
|
1084
|
+
tool_name: "restricted_exec",
|
|
1085
|
+
tool_args_json: argsJson,
|
|
1086
|
+
});
|
|
1087
|
+
messages.push({ role: 4, content: results, ref_call_id: callId });
|
|
1088
|
+
|
|
1089
|
+
// Inject force-answer after last search round
|
|
1090
|
+
if (turn >= maxTurns - 1) {
|
|
1091
|
+
messages.push({ role: 1, content: FINAL_FORCE_ANSWER });
|
|
1092
|
+
log("Injected force-answer prompt");
|
|
1093
|
+
}
|
|
1094
|
+
}
|
|
1095
|
+
}
|
|
1096
|
+
|
|
1097
|
+
return {
|
|
1098
|
+
files: [],
|
|
1099
|
+
error: "Max turns reached without getting an answer",
|
|
1100
|
+
rg_patterns: [...new Set(executor.collectedRgPatterns)],
|
|
1101
|
+
_meta: { treeDepth: actualDepth, treeSizeKB: +(treeSizeBytes / 1024).toFixed(1), fellBack, projectRoot },
|
|
1102
|
+
};
|
|
1103
|
+
}
|
|
1104
|
+
|
|
1105
|
+
/**
|
|
1106
|
+
* Search and return formatted result suitable for MCP tool response.
|
|
1107
|
+
*
|
|
1108
|
+
* @param {Object} opts
|
|
1109
|
+
* @param {string} opts.query
|
|
1110
|
+
* @param {string} opts.projectRoot
|
|
1111
|
+
* @param {string} [opts.apiKey]
|
|
1112
|
+
* @param {number} [opts.maxTurns=3]
|
|
1113
|
+
* @param {number} [opts.maxCommands=8]
|
|
1114
|
+
* @param {number} [opts.maxResults=10]
|
|
1115
|
+
* @param {number} [opts.treeDepth=3]
|
|
1116
|
+
* @param {number} [opts.timeoutMs=30000]
|
|
1117
|
+
* @param {string[]} [opts.excludePaths=[]]
|
|
1118
|
+
* @returns {Promise<string>}
|
|
1119
|
+
*/
|
|
1120
|
+
export async function searchWithContent({
|
|
1121
|
+
query,
|
|
1122
|
+
projectRoot,
|
|
1123
|
+
apiKey = null,
|
|
1124
|
+
maxTurns = 3,
|
|
1125
|
+
maxCommands = 8,
|
|
1126
|
+
maxResults = 10,
|
|
1127
|
+
treeDepth = 3,
|
|
1128
|
+
timeoutMs = 30000,
|
|
1129
|
+
excludePaths = [],
|
|
1130
|
+
}) {
|
|
1131
|
+
const result = await search({ query, projectRoot, apiKey, maxTurns, maxCommands, maxResults, treeDepth, timeoutMs, excludePaths });
|
|
1132
|
+
|
|
1133
|
+
if (result.error) {
|
|
1134
|
+
const meta = result._meta;
|
|
1135
|
+
let errMsg = `Error: ${result.error}`;
|
|
1136
|
+
if (meta) {
|
|
1137
|
+
errMsg += `\n\n[diagnostic] error_type=${meta.errorCode || "unknown"}, tree_depth_used=${meta.treeDepth}, tree_size=${meta.treeSizeKB}KB`;
|
|
1138
|
+
if (meta.fellBack) errMsg += ` (auto fell back from requested depth)`;
|
|
1139
|
+
if (meta.contextTrimmed) errMsg += `, context_trimmed=true`;
|
|
1140
|
+
if (meta.projectRoot) errMsg += `\n[diagnostic] project_path=${meta.projectRoot}`;
|
|
1141
|
+
errMsg += `\n[config] max_turns=${maxTurns}, max_results=${maxResults}, max_commands=${maxCommands}, timeout_ms=${timeoutMs}`;
|
|
1142
|
+
if (excludePaths.length) errMsg += `, exclude_paths=[${excludePaths.join(", ")}]`;
|
|
1143
|
+
// Targeted hints based on error type
|
|
1144
|
+
if (meta.errorCode === "PAYLOAD_TOO_LARGE" || meta.errorCode === "TIMEOUT") {
|
|
1145
|
+
errMsg += `\n[hint] Payload/timeout error. Try: reduce tree_depth, reduce max_turns, add exclude_paths, or narrow project_path to a subdirectory.`;
|
|
1146
|
+
} else if (meta.errorCode === "AUTH_ERROR") {
|
|
1147
|
+
errMsg += `\n[hint] Authentication error. The API key may be expired or revoked. Try re-extracting with extract_windsurf_key, or set a fresh WINDSURF_API_KEY.`;
|
|
1148
|
+
} else if (meta.errorCode === "RATE_LIMITED") {
|
|
1149
|
+
errMsg += `\n[hint] Rate limited. Wait a moment and retry.`;
|
|
1150
|
+
} else {
|
|
1151
|
+
errMsg += `\n[hint] If the error is payload-related, try a lower tree_depth value or add exclude_paths.`;
|
|
1152
|
+
}
|
|
1153
|
+
}
|
|
1154
|
+
return errMsg;
|
|
1155
|
+
}
|
|
1156
|
+
|
|
1157
|
+
const files = result.files || [];
|
|
1158
|
+
const rgPatterns = result.rg_patterns || [];
|
|
1159
|
+
// Deduplicate + filter short patterns
|
|
1160
|
+
const uniquePatterns = [...new Set(rgPatterns)].filter((p) => p.length >= 3);
|
|
1161
|
+
|
|
1162
|
+
if (!files.length && !uniquePatterns.length) {
|
|
1163
|
+
const raw = result.raw_response || "";
|
|
1164
|
+
return raw ? `No relevant files found.\n\nRaw response:\n${raw}` : "No relevant files found.";
|
|
1165
|
+
}
|
|
1166
|
+
|
|
1167
|
+
const parts = [];
|
|
1168
|
+
const n = files.length;
|
|
1169
|
+
|
|
1170
|
+
if (files.length) {
|
|
1171
|
+
parts.push(`Found ${n} relevant files.`);
|
|
1172
|
+
parts.push("");
|
|
1173
|
+
for (let i = 0; i < files.length; i++) {
|
|
1174
|
+
const entry = files[i];
|
|
1175
|
+
const rangesStr = entry.ranges.map(([s, e]) => `L${s}-${e}`).join(", ");
|
|
1176
|
+
parts.push(` [${i + 1}/${n}] ${entry.full_path} (${rangesStr})`);
|
|
1177
|
+
}
|
|
1178
|
+
} else {
|
|
1179
|
+
parts.push("No files found.");
|
|
1180
|
+
}
|
|
1181
|
+
|
|
1182
|
+
if (uniquePatterns.length) {
|
|
1183
|
+
parts.push("");
|
|
1184
|
+
parts.push(`grep keywords: ${uniquePatterns.join(", ")}`);
|
|
1185
|
+
}
|
|
1186
|
+
|
|
1187
|
+
// Append diagnostic metadata so the calling AI knows what happened
|
|
1188
|
+
const meta = result._meta;
|
|
1189
|
+
if (meta) {
|
|
1190
|
+
const fbNote = meta.fellBack ? ` (fell back from requested depth)` : "";
|
|
1191
|
+
parts.push("");
|
|
1192
|
+
let configLine = `[config] tree_depth=${meta.treeDepth}${fbNote}, tree_size=${meta.treeSizeKB}KB, max_turns=${maxTurns}, max_results=${maxResults}, timeout_ms=${timeoutMs}`;
|
|
1193
|
+
if (excludePaths.length) configLine += `, exclude_paths=[${excludePaths.join(", ")}]`;
|
|
1194
|
+
parts.push(configLine);
|
|
1195
|
+
}
|
|
1196
|
+
|
|
1197
|
+
return parts.join("\n");
|
|
1198
|
+
}
|
|
1199
|
+
|
|
1200
|
+
/**
|
|
1201
|
+
* Extract Windsurf API Key info (for MCP tool use).
|
|
1202
|
+
* @returns {Object}
|
|
1203
|
+
*/
|
|
1204
|
+
export function extractKeyInfo() {
|
|
1205
|
+
return extractKey();
|
|
1206
|
+
}
|