docdex 0.2.25 → 0.2.26
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +17 -2
- package/assets/agents.md +141 -7
- package/bin/docdex-mcp-stdio.js +19 -0
- package/lib/mcp_stdio_bridge.js +236 -0
- package/package.json +3 -2
package/README.md
CHANGED
|
@@ -3,7 +3,6 @@
|
|
|
3
3
|

|
|
4
4
|

|
|
5
5
|

|
|
6
|
-
[](https://lobehub.com/mcp/bekirdag-docdex)
|
|
7
6
|
|
|
8
7
|
<a href="https://glama.ai/mcp/servers/@bekirdag/docdex">
|
|
9
8
|
<img width="380" height="200" src="https://glama.ai/mcp/servers/@bekirdag/docdex/badge" />
|
|
@@ -108,7 +107,23 @@ flowchart LR
|
|
|
108
107
|
|
|
109
108
|
```
|
|
110
109
|
|
|
111
|
-
Use the daemon HTTP/SSE endpoint.
|
|
110
|
+
Use the daemon HTTP/SSE endpoint. For sandboxed clients, Docdex can also serve MCP over local IPC
|
|
111
|
+
(Unix socket or Windows named pipe), while HTTP/SSE remains the default for most MCP clients.
|
|
112
|
+
|
|
113
|
+
### Stdio MCP (Smithery/stdio-only clients)
|
|
114
|
+
|
|
115
|
+
If your client requires a stdio MCP server (e.g., Smithery), use the bundled stdio bridge:
|
|
116
|
+
|
|
117
|
+
```bash
|
|
118
|
+
npx docdex docdex-mcp-stdio
|
|
119
|
+
```
|
|
120
|
+
|
|
121
|
+
The bridge reads newline-delimited JSON-RPC from stdin and forwards to Docdex. You can force IPC or
|
|
122
|
+
override endpoints with:
|
|
123
|
+
|
|
124
|
+
```bash
|
|
125
|
+
DOCDEX_MCP_TRANSPORT=ipc DOCDEX_MCP_SOCKET_PATH=/path/to/mcp.sock npx docdex docdex-mcp-stdio
|
|
126
|
+
```
|
|
112
127
|
|
|
113
128
|
### Manual Configuration
|
|
114
129
|
|
package/assets/agents.md
CHANGED
|
@@ -1,14 +1,16 @@
|
|
|
1
|
-
---- START OF DOCDEX INFO V0.2.
|
|
1
|
+
---- START OF DOCDEX INFO V0.2.26 ----
|
|
2
2
|
Docdex URL: http://127.0.0.1:28491
|
|
3
3
|
Use this base URL for Docdex HTTP endpoints.
|
|
4
|
+
Health check endpoint: `GET /healthz` (not `/v1/health`).
|
|
4
5
|
|
|
5
6
|
## Priority Directives (Mandatory)
|
|
6
7
|
|
|
7
8
|
- Always use both memory lobes early: docdex_get_profile + docdex_memory_recall; save new facts via docdex_memory_save and new preferences via docdex_save_preference.
|
|
8
9
|
- Maintain agent-specific profile memory: use your agent_id with docdex_get_profile/docdex_save_preference; store generic self-knowledge (role, tooling, preferences).
|
|
9
|
-
- Use impact analysis for every code change:
|
|
10
|
+
- Use impact analysis for every code change: prefer MCP tools `docdex_impact_graph` / `docdex_dag_export` (IPC/HTTP). If shell networking is blocked, do not use curl; use MCP/IPC instead. If unavailable, state it and proceed cautiously.
|
|
10
11
|
- Apply DAG reasoning for planning: prefer dependency graph facts (impact results and /v1/dag/export) to choose the right change order and scope.
|
|
11
12
|
- Use Docdex tools intentionally: docdex_search/symbols/ast for repo truth; docdex_stats/files/repo_inspect/index for index health.
|
|
13
|
+
- For folder structure, use docdex_tree instead of raw `rg --files`/`find` to avoid noisy folders.
|
|
12
14
|
- When you do not know something, run docdex_web_research (force_web=true). Web research is encouraged by default for non-repo facts and external APIs.
|
|
13
15
|
|
|
14
16
|
# Docdex Agent Usage Instructions
|
|
@@ -69,8 +71,8 @@ Precision tools for structural analysis. Do not rely on text search for definiti
|
|
|
69
71
|
| docdex_symbols | Get exact definitions/signatures for a file. |
|
|
70
72
|
| docdex_ast | Specific AST nodes (e.g., "Find all class definitions"). |
|
|
71
73
|
| docdex_impact_diagnostics | Check for broken/dynamic imports. |
|
|
72
|
-
|
|
|
73
|
-
|
|
|
74
|
+
| docdex_impact_graph | Impact Analysis: "What breaks if I change this?" Returns inbound/outbound dependencies. |
|
|
75
|
+
| docdex_dag_export | Export the dependency DAG for change ordering and scope. |
|
|
74
76
|
|
|
75
77
|
### C. Memory Operations
|
|
76
78
|
|
|
@@ -104,6 +106,7 @@ Use these to verify index coverage, repo binding, and to read precise file slice
|
|
|
104
106
|
| docdex_files | Indexed file coverage; confirm a file is in the index. |
|
|
105
107
|
| docdex_index | Reindex full repo or ingest specific files when stale/missing. |
|
|
106
108
|
| docdex_open | Read exact file slices after you identify targets. |
|
|
109
|
+
| docdex_tree | Render a repo folder tree with standard excludes (avoid noisy folders). |
|
|
107
110
|
|
|
108
111
|
## Quick Tool Map (Often Missed)
|
|
109
112
|
|
|
@@ -114,13 +117,144 @@ Use these to verify index coverage, repo binding, and to read precise file slice
|
|
|
114
117
|
- docdex_search diff: Limit search to working tree, staged, or ref ranges; filter by paths.
|
|
115
118
|
- docdex_web_research knobs: force_web, skip_local_search, repo_only, no_cache, web_limit, llm_filter_local_results, llm_model.
|
|
116
119
|
- docdex_open: Read narrow file slices after targets are identified.
|
|
120
|
+
- docdex_tree: Render a filtered folder tree (prefer this over `rg --files` / `find`).
|
|
117
121
|
- docdex_impact_diagnostics: Scan dynamic imports when imports are unclear or failing.
|
|
118
122
|
- docdex_local_completion: Delegate low-complexity codegen tasks (tests, docstrings, boilerplate, simple refactors).
|
|
119
123
|
- docdex_ast: Use AST queries for precise structure (class/function definitions, call sites, imports).
|
|
120
124
|
- docdex_symbols: Use symbols to confirm exact signatures/locations before edits.
|
|
121
|
-
-
|
|
122
|
-
-
|
|
123
|
-
- HTTP /v1/initialize:
|
|
125
|
+
- docdex_impact_graph: Mandatory before code changes to review inbound/outbound deps (use MCP/IPC if shell networking is blocked).
|
|
126
|
+
- docdex_dag_export: Export dependency graph to plan change order.
|
|
127
|
+
- HTTP /v1/initialize: Mount/bind a repo for HTTP daemon mode. Request JSON uses rootUri/root_uri (NOT repo_root).
|
|
128
|
+
|
|
129
|
+
## CLI Fallbacks (when MCP/IPC is unavailable)
|
|
130
|
+
Use these only when MCP tools cannot be called (e.g., blocked sandbox networking). Prefer MCP/IPC otherwise.
|
|
131
|
+
|
|
132
|
+
- `docdexd repo init --repo <path>`: initialize repo in daemon and return repo_id JSON.
|
|
133
|
+
- `docdexd repo id --repo <path>`: compute repo fingerprint locally.
|
|
134
|
+
- `docdexd repo status --repo <path>` / `docdexd repo dirty --exit-code`: git working tree status.
|
|
135
|
+
- `docdexd impact-graph --repo <path> --file <rel>`: impact graph (HTTP/local).
|
|
136
|
+
- `docdexd dag export --repo <path> <session_id>`: DAG export alias.
|
|
137
|
+
- `docdexd search --repo <path> --query "<q>"`: /search equivalent (HTTP/local).
|
|
138
|
+
- `docdexd open --repo <path> --file <rel>`: safe file slice read (head/start/end/clamp).
|
|
139
|
+
- `docdexd file ensure-newline|write --repo <path> --file <rel>`: minimal file edits.
|
|
140
|
+
- `docdexd test run-node --repo <path> --file <rel> --args "..."`: run Node scripts.
|
|
141
|
+
|
|
142
|
+
## Docdex Usage Cookbook (Mandatory, Exact Schemas)
|
|
143
|
+
|
|
144
|
+
This section is the authoritative source for how to call Docdex. Do not guess field names or payloads.
|
|
145
|
+
|
|
146
|
+
### 0) Base URL + daemon modes
|
|
147
|
+
|
|
148
|
+
- Default HTTP base URL: http://127.0.0.1:28491 (override with DOCDEX_HTTP_BASE_URL).
|
|
149
|
+
- Single-repo HTTP daemon: `docdexd serve --repo /abs/path`. /v1/initialize is NOT used. repo_id is optional, but must match the serving repo if provided.
|
|
150
|
+
- Multi-repo HTTP daemon: `docdexd daemon`. You MUST call /v1/initialize before repo-scoped HTTP endpoints. When multiple repos are mounted, repo_id is required on every repo-scoped request.
|
|
151
|
+
|
|
152
|
+
### 1) Initialize (HTTP) - exact request payload
|
|
153
|
+
|
|
154
|
+
POST /v1/initialize
|
|
155
|
+
|
|
156
|
+
Request JSON (exact field names):
|
|
157
|
+
|
|
158
|
+
```json
|
|
159
|
+
{ "rootUri": "file:///abs/path/to/repo" }
|
|
160
|
+
```
|
|
161
|
+
|
|
162
|
+
Alias accepted:
|
|
163
|
+
|
|
164
|
+
```json
|
|
165
|
+
{ "root_uri": "/abs/path/to/repo" }
|
|
166
|
+
```
|
|
167
|
+
|
|
168
|
+
Rules:
|
|
169
|
+
- Do NOT send `repo_root` in the request. `repo_root` is a response field.
|
|
170
|
+
- Use file:// URIs when possible; plain absolute paths are also accepted.
|
|
171
|
+
- Response returns `repo_id`, `status`, and `repo_root`. Use that repo_id for subsequent HTTP calls.
|
|
172
|
+
|
|
173
|
+
### 2) Repo scoping (HTTP)
|
|
174
|
+
|
|
175
|
+
- Send repo_id via header `x-docdex-repo-id: <repo_id>` or query param `repo_id=<repo_id>`.
|
|
176
|
+
- If the daemon is single-repo, do not send a repo_id for a different repo (you will get `unknown_repo`).
|
|
177
|
+
- If the daemon is multi-repo and more than one repo is mounted, repo_id is required.
|
|
178
|
+
|
|
179
|
+
### 3) Search (HTTP)
|
|
180
|
+
|
|
181
|
+
`GET /search`
|
|
182
|
+
|
|
183
|
+
Required:
|
|
184
|
+
- `q` (query string).
|
|
185
|
+
|
|
186
|
+
Common params:
|
|
187
|
+
- `limit`, `snippets`, `max_tokens`, `include_libs`, `force_web`, `skip_local_search`, `no_cache`,
|
|
188
|
+
`max_web_results`, `llm_filter_local_results`, `diff_mode`, `diff_base`, `diff_head`, `diff_path`,
|
|
189
|
+
`dag_session_id`, `repo_id`.
|
|
190
|
+
|
|
191
|
+
Notes:
|
|
192
|
+
- `skip_local_search=true` effectively forces web discovery (Tier 2).
|
|
193
|
+
- If DOCDEX_WEB_ENABLED=1, web discovery can be slow; plan timeouts accordingly.
|
|
194
|
+
|
|
195
|
+
### 4) Snippet (HTTP)
|
|
196
|
+
|
|
197
|
+
`GET /snippet/:doc_id`
|
|
198
|
+
|
|
199
|
+
Common params:
|
|
200
|
+
- `window`, `q`, `text_only`, `max_tokens`, `repo_id`.
|
|
201
|
+
|
|
202
|
+
### 5) Impact graph (HTTP)
|
|
203
|
+
|
|
204
|
+
`GET /v1/graph/impact?file=<repo-relative-path>`
|
|
205
|
+
|
|
206
|
+
Rules:
|
|
207
|
+
- `file` must be a path relative to the repo root (not an absolute path).
|
|
208
|
+
- Include repo_id header/query when required by daemon mode.
|
|
209
|
+
|
|
210
|
+
### 6) DAG export (HTTP)
|
|
211
|
+
|
|
212
|
+
`GET /v1/dag/export?session_id=<id>`
|
|
213
|
+
|
|
214
|
+
Query params:
|
|
215
|
+
- `session_id` (required)
|
|
216
|
+
- `format` (optional: json/text/dot; default json)
|
|
217
|
+
- `max_nodes` (optional)
|
|
218
|
+
- `repo_id` (required when multiple repos are mounted)
|
|
219
|
+
|
|
220
|
+
### 7) MCP over HTTP/SSE
|
|
221
|
+
|
|
222
|
+
- SSE: `/v1/mcp/sse` + `/v1/mcp/message`. When multiple repos are mounted, initialize with `rootUri` first.
|
|
223
|
+
- HTTP: `/v1/mcp` accepts repo context in the payload or via prior initialize.
|
|
224
|
+
- If HTTP/SSE is unreachable (sandboxed clients), fall back to local IPC: configure `transport = "ipc"` with `socket_path` (Unix) or `pipe_name` (Windows) and send MCP JSON-RPC to `/v1/mcp` over IPC.
|
|
225
|
+
- For stdio-only clients (e.g., Smithery), use the `docdex-mcp-stdio` entrypoint to bridge stdio JSON-RPC to Docdex MCP.
|
|
226
|
+
- For impact/DAG in sandboxed shells, prefer MCP/IPC tools over `curl` to `/v1/graph/impact` or `/v1/dag/export`.
|
|
227
|
+
- MCP tools: `docdex_impact_graph` (impact traversal) and `docdex_dag_export` (DAG export).
|
|
228
|
+
|
|
229
|
+
### 8) MCP tools (local) - required fields
|
|
230
|
+
|
|
231
|
+
Do not guess fields; use these canonical shapes.
|
|
232
|
+
|
|
233
|
+
- `docdex_search`: `{ project_root, query, limit?, diff?, repo_only?, force_web? }`
|
|
234
|
+
- `docdex_open`: `{ project_root, path, start_line?, end_line?, head?, clamp? }` (range must be valid unless clamp/head used)
|
|
235
|
+
- `docdex_files`: `{ project_root, limit?, offset? }`
|
|
236
|
+
- `docdex_stats`: `{ project_root }`
|
|
237
|
+
- `docdex_repo_inspect`: `{ project_root }`
|
|
238
|
+
- `docdex_index`: `{ project_root, paths? }` (paths empty => full reindex)
|
|
239
|
+
- `docdex_symbols`: `{ project_root, path }`
|
|
240
|
+
- `docdex_ast`: `{ project_root, path, max_nodes? }`
|
|
241
|
+
- `docdex_impact_diagnostics`: `{ project_root, file? }`
|
|
242
|
+
- `docdex_impact_graph`: `{ project_root, file, max_edges?, max_depth?, edge_types? }`
|
|
243
|
+
- `docdex_dag_export`: `{ project_root, session_id, format?, max_nodes? }`
|
|
244
|
+
- `docdex_memory_save`: `{ project_root, text }`
|
|
245
|
+
- `docdex_memory_recall`: `{ project_root, query, top_k? }`
|
|
246
|
+
- `docdex_get_profile`: `{ agent_id }`
|
|
247
|
+
- `docdex_save_preference`: `{ agent_id, category, content }`
|
|
248
|
+
- `docdex_local_completion`: `{ task_type, instruction, context, max_tokens?, timeout_ms?, mode?, max_context_chars?, agent? }`
|
|
249
|
+
- `docdex_web_research`: `{ project_root, query, force_web, skip_local_search?, web_limit?, no_cache? }`
|
|
250
|
+
|
|
251
|
+
### 9) Common error fixes (do not guess)
|
|
252
|
+
|
|
253
|
+
- `unknown_repo`: You are talking to a daemon that does not know that repo. Fix by:
|
|
254
|
+
- Starting a single-repo server for that repo (`docdexd serve --repo /abs/path`), OR
|
|
255
|
+
- Calling `/v1/initialize` on the multi-repo daemon with `rootUri`, then using the returned repo_id.
|
|
256
|
+
- `missing_repo`: Supply repo_id (HTTP) or project_root (MCP), or call /v1/initialize.
|
|
257
|
+
- `invalid_range` (docdex_open): Adjust start/end line to fit total_lines.
|
|
124
258
|
|
|
125
259
|
## Interaction Patterns
|
|
126
260
|
|
|
@@ -0,0 +1,19 @@
|
|
|
1
|
+
#!/usr/bin/env node
|
|
2
|
+
"use strict";
|
|
3
|
+
|
|
4
|
+
const { runBridge } = require("../lib/mcp_stdio_bridge");
|
|
5
|
+
|
|
6
|
+
async function main() {
|
|
7
|
+
try {
|
|
8
|
+
await runBridge({
|
|
9
|
+
stdin: process.stdin,
|
|
10
|
+
stdout: process.stdout,
|
|
11
|
+
stderr: process.stderr
|
|
12
|
+
});
|
|
13
|
+
} catch (err) {
|
|
14
|
+
process.stderr.write(`[docdex-mcp-stdio] fatal: ${err}\n`);
|
|
15
|
+
process.exit(1);
|
|
16
|
+
}
|
|
17
|
+
}
|
|
18
|
+
|
|
19
|
+
main();
|
|
@@ -0,0 +1,236 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
|
|
3
|
+
const http = require("node:http");
|
|
4
|
+
const https = require("node:https");
|
|
5
|
+
const readline = require("node:readline");
|
|
6
|
+
const { URL } = require("node:url");
|
|
7
|
+
|
|
8
|
+
const DEFAULT_HTTP_BASE = "http://127.0.0.1:28491";
|
|
9
|
+
|
|
10
|
+
function trimEnv(value) {
|
|
11
|
+
if (!value) return "";
|
|
12
|
+
const trimmed = String(value).trim();
|
|
13
|
+
return trimmed;
|
|
14
|
+
}
|
|
15
|
+
|
|
16
|
+
function jsonRpcError(message, id, code = -32000, data) {
|
|
17
|
+
const payload = {
|
|
18
|
+
jsonrpc: "2.0",
|
|
19
|
+
id: typeof id === "undefined" ? null : id,
|
|
20
|
+
error: {
|
|
21
|
+
code,
|
|
22
|
+
message
|
|
23
|
+
}
|
|
24
|
+
};
|
|
25
|
+
if (data !== undefined) {
|
|
26
|
+
payload.error.data = data;
|
|
27
|
+
}
|
|
28
|
+
return payload;
|
|
29
|
+
}
|
|
30
|
+
|
|
31
|
+
function extractIds(payload) {
|
|
32
|
+
if (Array.isArray(payload)) {
|
|
33
|
+
return payload
|
|
34
|
+
.map((item) => (item && Object.prototype.hasOwnProperty.call(item, "id") ? item.id : undefined))
|
|
35
|
+
.filter((value) => value !== undefined);
|
|
36
|
+
}
|
|
37
|
+
if (payload && Object.prototype.hasOwnProperty.call(payload, "id")) {
|
|
38
|
+
return [payload.id];
|
|
39
|
+
}
|
|
40
|
+
return [];
|
|
41
|
+
}
|
|
42
|
+
|
|
43
|
+
function normalizePipeName(name) {
|
|
44
|
+
if (!name) return "";
|
|
45
|
+
if (name.startsWith("\\\\.\\pipe\\")) return name;
|
|
46
|
+
return `\\\\.\\pipe\\${name}`;
|
|
47
|
+
}
|
|
48
|
+
|
|
49
|
+
function defaultUnixSocketPath() {
|
|
50
|
+
const runtime = trimEnv(process.env.XDG_RUNTIME_DIR);
|
|
51
|
+
if (runtime) {
|
|
52
|
+
return `${runtime.replace(/\/$/, "")}/docdex/mcp.sock`;
|
|
53
|
+
}
|
|
54
|
+
const home = trimEnv(process.env.HOME);
|
|
55
|
+
if (!home) return "";
|
|
56
|
+
return `${home.replace(/\/$/, "")}/.docdex/run/mcp.sock`;
|
|
57
|
+
}
|
|
58
|
+
|
|
59
|
+
function readTransportEnv() {
|
|
60
|
+
const transport = trimEnv(process.env.DOCDEX_MCP_TRANSPORT).toLowerCase();
|
|
61
|
+
if (transport && transport !== "http" && transport !== "ipc") {
|
|
62
|
+
throw new Error(`invalid DOCDEX_MCP_TRANSPORT: ${transport}`);
|
|
63
|
+
}
|
|
64
|
+
return transport;
|
|
65
|
+
}
|
|
66
|
+
|
|
67
|
+
function resolveIpcConfig(transport) {
|
|
68
|
+
const socketPathEnv = trimEnv(process.env.DOCDEX_MCP_SOCKET_PATH);
|
|
69
|
+
const pipeNameEnv = trimEnv(process.env.DOCDEX_MCP_PIPE_NAME);
|
|
70
|
+
const explicitIpc = transport === "ipc" || socketPathEnv || pipeNameEnv;
|
|
71
|
+
if (!explicitIpc) return null;
|
|
72
|
+
|
|
73
|
+
if (process.platform === "win32") {
|
|
74
|
+
const pipeName = normalizePipeName(pipeNameEnv || "docdex-mcp");
|
|
75
|
+
return { type: "pipe", pipeName };
|
|
76
|
+
}
|
|
77
|
+
|
|
78
|
+
const socketPath = socketPathEnv || defaultUnixSocketPath();
|
|
79
|
+
if (!socketPath) {
|
|
80
|
+
throw new Error("DOCDEX_MCP_SOCKET_PATH not set and HOME/XDG_RUNTIME_DIR unavailable");
|
|
81
|
+
}
|
|
82
|
+
return { type: "unix", socketPath };
|
|
83
|
+
}
|
|
84
|
+
|
|
85
|
+
function resolveHttpBaseUrl() {
|
|
86
|
+
const base = trimEnv(process.env.DOCDEX_HTTP_BASE_URL) || DEFAULT_HTTP_BASE;
|
|
87
|
+
return base;
|
|
88
|
+
}
|
|
89
|
+
|
|
90
|
+
function buildHttpEndpoint(baseUrl) {
|
|
91
|
+
const parsed = new URL(baseUrl);
|
|
92
|
+
const endpoint = new URL("/v1/mcp", parsed);
|
|
93
|
+
return endpoint;
|
|
94
|
+
}
|
|
95
|
+
|
|
96
|
+
function requestJson({ url, payload, socketPath }) {
|
|
97
|
+
const data = JSON.stringify(payload);
|
|
98
|
+
const isHttps = url.protocol === "https:";
|
|
99
|
+
const requestFn = isHttps ? https.request : http.request;
|
|
100
|
+
const options = {
|
|
101
|
+
method: "POST",
|
|
102
|
+
headers: {
|
|
103
|
+
"content-type": "application/json",
|
|
104
|
+
"content-length": Buffer.byteLength(data)
|
|
105
|
+
}
|
|
106
|
+
};
|
|
107
|
+
if (socketPath) {
|
|
108
|
+
options.socketPath = socketPath;
|
|
109
|
+
options.path = url.pathname;
|
|
110
|
+
} else {
|
|
111
|
+
options.hostname = url.hostname;
|
|
112
|
+
options.port = url.port || (isHttps ? 443 : 80);
|
|
113
|
+
options.path = url.pathname;
|
|
114
|
+
}
|
|
115
|
+
|
|
116
|
+
return new Promise((resolve, reject) => {
|
|
117
|
+
const req = requestFn(options, (res) => {
|
|
118
|
+
let body = "";
|
|
119
|
+
res.setEncoding("utf8");
|
|
120
|
+
res.on("data", (chunk) => {
|
|
121
|
+
body += chunk;
|
|
122
|
+
});
|
|
123
|
+
res.on("end", () => {
|
|
124
|
+
resolve({
|
|
125
|
+
status: res.statusCode || 0,
|
|
126
|
+
body
|
|
127
|
+
});
|
|
128
|
+
});
|
|
129
|
+
});
|
|
130
|
+
req.on("error", reject);
|
|
131
|
+
req.write(data);
|
|
132
|
+
req.end();
|
|
133
|
+
});
|
|
134
|
+
}
|
|
135
|
+
|
|
136
|
+
async function forwardRequest(payload, stderr) {
|
|
137
|
+
const transport = readTransportEnv();
|
|
138
|
+
const httpBase = resolveHttpBaseUrl();
|
|
139
|
+
const endpoint = buildHttpEndpoint(httpBase);
|
|
140
|
+
const ipcConfig = resolveIpcConfig(transport);
|
|
141
|
+
|
|
142
|
+
const tryHttp = async () => requestJson({ url: endpoint, payload });
|
|
143
|
+
const tryIpc = async () => {
|
|
144
|
+
if (!ipcConfig) {
|
|
145
|
+
throw new Error("IPC transport not configured");
|
|
146
|
+
}
|
|
147
|
+
const ipcUrl = new URL("http://localhost/v1/mcp");
|
|
148
|
+
const socketPath = ipcConfig.type === "unix" ? ipcConfig.socketPath : ipcConfig.pipeName;
|
|
149
|
+
return requestJson({ url: ipcUrl, payload, socketPath });
|
|
150
|
+
};
|
|
151
|
+
|
|
152
|
+
if (transport === "ipc") {
|
|
153
|
+
return await tryIpc();
|
|
154
|
+
}
|
|
155
|
+
if (transport === "http") {
|
|
156
|
+
return await tryHttp();
|
|
157
|
+
}
|
|
158
|
+
|
|
159
|
+
try {
|
|
160
|
+
return await tryHttp();
|
|
161
|
+
} catch (err) {
|
|
162
|
+
if (ipcConfig) {
|
|
163
|
+
stderr.write(`[docdex-mcp-stdio] HTTP failed, falling back to IPC: ${err}\n`);
|
|
164
|
+
return await tryIpc();
|
|
165
|
+
}
|
|
166
|
+
throw err;
|
|
167
|
+
}
|
|
168
|
+
}
|
|
169
|
+
|
|
170
|
+
async function writeLine(stdout, line) {
|
|
171
|
+
if (!line.endsWith("\n")) {
|
|
172
|
+
line += "\n";
|
|
173
|
+
}
|
|
174
|
+
if (!stdout.write(line)) {
|
|
175
|
+
await new Promise((resolve) => stdout.once("drain", resolve));
|
|
176
|
+
}
|
|
177
|
+
}
|
|
178
|
+
|
|
179
|
+
async function handlePayload(payload, stdout, stderr) {
|
|
180
|
+
const ids = extractIds(payload);
|
|
181
|
+
let response;
|
|
182
|
+
try {
|
|
183
|
+
const result = await forwardRequest(payload, stderr);
|
|
184
|
+
if (result.body) {
|
|
185
|
+
try {
|
|
186
|
+
response = JSON.parse(result.body);
|
|
187
|
+
} catch (err) {
|
|
188
|
+
response = jsonRpcError("invalid json response from docdex", ids[0], -32001, {
|
|
189
|
+
status: result.status,
|
|
190
|
+
body: result.body
|
|
191
|
+
});
|
|
192
|
+
}
|
|
193
|
+
} else if (ids.length === 1) {
|
|
194
|
+
response = { jsonrpc: "2.0", id: ids[0], result: null };
|
|
195
|
+
} else if (ids.length > 1) {
|
|
196
|
+
response = ids.map((id) => ({ jsonrpc: "2.0", id, result: null }));
|
|
197
|
+
} else {
|
|
198
|
+
response = null;
|
|
199
|
+
}
|
|
200
|
+
} catch (err) {
|
|
201
|
+
response = jsonRpcError("transport error", ids[0], -32002, {
|
|
202
|
+
error: String(err)
|
|
203
|
+
});
|
|
204
|
+
}
|
|
205
|
+
if (response !== null) {
|
|
206
|
+
await writeLine(stdout, JSON.stringify(response));
|
|
207
|
+
}
|
|
208
|
+
}
|
|
209
|
+
|
|
210
|
+
async function runBridge({ stdin, stdout, stderr }) {
|
|
211
|
+
readTransportEnv();
|
|
212
|
+
const rl = readline.createInterface({
|
|
213
|
+
input: stdin,
|
|
214
|
+
crlfDelay: Infinity
|
|
215
|
+
});
|
|
216
|
+
|
|
217
|
+
for await (const line of rl) {
|
|
218
|
+
const trimmed = line.trim();
|
|
219
|
+
if (!trimmed) {
|
|
220
|
+
continue;
|
|
221
|
+
}
|
|
222
|
+
let payload;
|
|
223
|
+
try {
|
|
224
|
+
payload = JSON.parse(trimmed);
|
|
225
|
+
} catch (err) {
|
|
226
|
+
const response = jsonRpcError("parse error", null, -32700, { error: String(err) });
|
|
227
|
+
await writeLine(stdout, JSON.stringify(response));
|
|
228
|
+
continue;
|
|
229
|
+
}
|
|
230
|
+
await handlePayload(payload, stdout, stderr);
|
|
231
|
+
}
|
|
232
|
+
}
|
|
233
|
+
|
|
234
|
+
module.exports = {
|
|
235
|
+
runBridge
|
|
236
|
+
};
|
package/package.json
CHANGED
|
@@ -1,11 +1,12 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "docdex",
|
|
3
|
-
"version": "0.2.
|
|
3
|
+
"version": "0.2.26",
|
|
4
4
|
"mcpName": "io.github.bekirdag/docdex",
|
|
5
5
|
"description": "Local-first documentation and code indexer with HTTP/MCP search, AST, and agent memory.",
|
|
6
6
|
"bin": {
|
|
7
7
|
"docdex": "bin/docdex.js",
|
|
8
|
-
"docdexd": "bin/docdex.js"
|
|
8
|
+
"docdexd": "bin/docdex.js",
|
|
9
|
+
"docdex-mcp-stdio": "bin/docdex-mcp-stdio.js"
|
|
9
10
|
},
|
|
10
11
|
"files": [
|
|
11
12
|
"bin",
|