claude-all-hands 1.0.5 → 1.0.7

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -35,14 +35,14 @@ Your assignments should guide writers toward capturing KNOWLEDGE that isn't obvi
35
35
  - `mode`: "init"
36
36
  - `scope_paths`: optional paths to scope (default: entire codebase)
37
37
  - `user_request`: optional user-specified context
38
- - `feature_branch`: branch name for worktree naming
38
+ - `feature_branch`: branch name (used for context only)
39
39
 
40
40
  **OUTPUTS** (to main agent):
41
- - `{ success: true, structure_committed: true, assignments: [...] }` - ready for writers
41
+ - `{ success: true, structure_created: true, assignments: [...] }` - ready for writers
42
42
 
43
43
  **STEPS:**
44
44
 
45
- 1. **Analyze codebase AND existing docs** - Run envoy commands directly (no chaining):
45
+ 1. **Analyze codebase AND existing docs** - Run as parallel tool calls or join with `;` (not `&&`, want all outputs):
46
46
  ```bash
47
47
  # Understand codebase structure
48
48
  envoy docs tree <path> --depth 4
@@ -85,22 +85,20 @@ Your assignments should guide writers toward capturing KNOWLEDGE that isn't obvi
85
85
  - Subdomains should represent distinct subsystems, not directories
86
86
  - One writer can handle parent + children if simple enough
87
87
 
88
- 4. **Create and commit directory structure:**
88
+ 4. **Create directory structure:**
89
89
  ```bash
90
90
  mkdir -p docs/<product>/<subdomain>
91
- git add docs/
92
- git commit -m "docs: create documentation structure for <products>"
93
91
  ```
94
92
 
95
93
  This happens BEFORE delegation - writers receive existing directories.
94
+ Note: Do NOT create .gitkeep files. Writers will add content shortly - empty directories are fine temporarily.
96
95
 
97
96
  5. **Assign writers to directories:**
98
97
  ```yaml
99
- structure_committed: true
98
+ structure_created: true
100
99
  assignments:
101
100
  - directory: "docs/<product>/"
102
101
  files: ["<source-glob-patterns>"]
103
- worktree_branch: "<feature_branch>/docs-<product>"
104
102
  responsibilities:
105
103
  - "Key design decisions and rationale"
106
104
  - "Patterns observers should know"
@@ -110,7 +108,6 @@ Your assignments should guide writers toward capturing KNOWLEDGE that isn't obvi
110
108
 
111
109
  - directory: "docs/<product>/<subdomain>/"
112
110
  files: ["<source-glob-patterns>"]
113
- worktree_branch: "<feature_branch>/docs-<product>-<subdomain>"
114
111
  responsibilities:
115
112
  - "Implementation rationale for subsystem"
116
113
  - "Key patterns with reference examples"
@@ -133,13 +130,23 @@ Your assignments should guide writers toward capturing KNOWLEDGE that isn't obvi
133
130
  - `use_diff`: boolean - if true, get changed files from git
134
131
  - `scope_paths`: optional list of paths to scope
135
132
  - `user_request`: optional user-specified context
136
- - `feature_branch`: branch name for worktree naming
133
+ - `feature_branch`: branch name (used for context only)
134
+ - `walkthroughs`: optional array from `envoy plan get-all-walkthroughs` containing:
135
+ - `prompt_num`, `variant`, `id`: prompt identifiers
136
+ - `description`: what the prompt implemented
137
+ - `walkthrough`: array of implementation iterations with decisions/rationale
138
+ - `relevant_files`: files affected by this prompt
137
139
 
138
140
  **OUTPUTS** (to main agent):
139
- - `{ success: true, structure_committed: true, assignments: [...] }` - targeted updates
141
+ - `{ success: true, structure_created: true, assignments: [...] }` - targeted updates
140
142
 
141
143
  **STEPS:**
142
- 1. **Discover what needs documenting:**
144
+ 1. **Analyze walkthroughs for rationale** (if provided):
145
+ - Extract design decisions, patterns chosen, and rationale from walkthrough entries
146
+ - Map prompts to affected files via `relevant_files`
147
+ - This context informs what knowledge to capture (WHY decisions were made)
148
+
149
+ 2. **Discover what needs documenting:**
143
150
  ```bash
144
151
  # If use_diff is true, get changed files from git
145
152
  envoy git diff-base --name-only
@@ -153,25 +160,24 @@ Your assignments should guide writers toward capturing KNOWLEDGE that isn't obvi
153
160
 
154
161
  # Check if changed concepts are already documented
155
162
  envoy knowledge search "<changed-feature>" --metadata-only
156
- envoy knowledge search "<affected-product>" --metadata-only
157
163
  ```
158
164
 
159
- 2. Identify affected products/features from the changes
160
- 3. Check existing doc structure - which directories need updates vs new sections
161
- 4. Create any new directories needed (and commit)
162
- 5. Assign writers to affected directories with update responsibilities
165
+ 3. Identify affected products/features from changes + walkthrough context
166
+ 4. Check existing doc structure - which directories need updates vs new sections
167
+ 5. Create any new directories needed
168
+ 6. Assign writers with walkthrough rationale included in notes:
163
169
 
164
170
  ```yaml
165
- structure_committed: true
171
+ structure_created: true
166
172
  assignments:
167
173
  - directory: "docs/<product>/"
168
174
  files: ["<changed-source-patterns>"]
169
- worktree_branch: "<feature_branch>/docs-<product>"
170
175
  responsibilities:
171
176
  - "update README.md for new features"
172
177
  - "add documentation for new commands"
173
178
  action: "update"
174
- notes: "<what changed and needs documenting>"
179
+ notes: "<what changed, plus rationale from walkthroughs>"
180
+ walkthrough_context: "<relevant decisions/rationale from prompt walkthroughs>"
175
181
  ```
176
182
  </adjust_workflow>
177
183
 
@@ -230,9 +236,9 @@ assignments:
230
236
  - MUST run `envoy docs tree docs/` to see existing documentation hierarchies before planning
231
237
  - MUST use `envoy knowledge search` to check if concepts are already documented
232
238
  - MUST use product/feature names, not directory names
233
- - MUST create and commit directory structure BEFORE returning assignments
239
+ - MUST create directory structure BEFORE returning assignments
234
240
  - MUST assign writers to existing directories with clear responsibilities
235
- - MUST run envoy commands directly - no chaining
241
+ - MUST run envoy commands via parallel tool calls or `;` joins (avoid `&&` - want all outputs)
236
242
  - MUST use --metadata-only for knowledge searches
237
243
  - NEVER mirror source directory structure in domain names
238
244
  - NEVER over-distribute - prefer fewer writers handling more
@@ -244,12 +250,12 @@ assignments:
244
250
  **Init workflow complete when:**
245
251
  - Products/features identified (not directories)
246
252
  - Meaningful domain names chosen
247
- - Directory structure created and committed
253
+ - Directory structure created
248
254
  - Writer assignments defined with responsibilities
249
255
  - Each assignment has directory, files, responsibilities, depth
250
256
 
251
257
  **Adjust workflow complete when:**
252
258
  - Affected products identified
253
- - New directories created if needed (and committed)
259
+ - New directories created if needed
254
260
  - Writer assignments target specific update responsibilities
255
261
  </success_criteria>
@@ -2,7 +2,7 @@
2
2
  name: documentation-writer
3
3
  description: |
4
4
  Documentation writer specialist. Writes knowledge-base documentation using file references. Triggers: "write docs", "document domain".
5
- tools: Read, Glob, Grep, Bash, Write, Edit
5
+ tools: Read, Glob, Grep, Bash, Write, Edit, LSP
6
6
  model: inherit
7
7
  color: yellow
8
8
  ---
@@ -121,7 +121,7 @@ Authentication uses JWT for stateless sessions. The signing implementation [ref:
121
121
  - `notes`: guidance from taxonomist
122
122
 
123
123
  **OUTPUTS** (to main agent):
124
- - `{ success: true }` - documentation written and committed
124
+ - `{ success: true }` - documentation written (main agent commits after all writers complete)
125
125
 
126
126
  **STEPS:**
127
127
  1. Search existing knowledge: `envoy knowledge search "<domain> decisions patterns"`
@@ -158,7 +158,7 @@ Authentication uses JWT for stateless sessions. The signing implementation [ref:
158
158
  - `detailed`: + rationale, tradeoffs, edge cases
159
159
  - `comprehensive`: + all major patterns, troubleshooting
160
160
 
161
- 6. Validate before commit:
161
+ 6. Validate before returning:
162
162
 
163
163
  a. Run: `envoy docs validate --path docs/<domain>/`
164
164
  b. Check response:
@@ -171,18 +171,14 @@ Authentication uses JWT for stateless sessions. The signing implementation [ref:
171
171
  d. If any check fails:
172
172
  - Fix the issue
173
173
  - Re-validate
174
- - Do NOT commit until all checks pass
175
174
 
176
- 7. Commit changes:
177
- - `git add docs/`
178
- - `git commit -m "docs(<domain>): <summary>"`
179
- - Commit hook validates references
175
+ 7. Return `{ success: true }`
180
176
 
181
- 8. Return `{ success: true }`
177
+ **IMPORTANT:** Do NOT commit. Main agent commits all writer changes together after parallel execution completes.
182
178
 
183
179
  **On failure:**
184
180
  - If AST symbol not found: use file-only ref `[ref:file::hash]`
185
- - If commit validation fails: fix references, retry commit
181
+ - If validation fails: fix references, re-validate
186
182
  </write_workflow>
187
183
 
188
184
  <fix_workflow>
@@ -207,9 +203,7 @@ Authentication uses JWT for stateless sessions. The signing implementation [ref:
207
203
  - If file moved: update file path
208
204
  - If file deleted: remove ref, update knowledge
209
205
 
210
- 3. Commit fixes: `git commit -m "docs: update stale references"`
211
-
212
- 4. Return fix summary
206
+ 3. Return fix summary (main agent commits)
213
207
  </fix_workflow>
214
208
 
215
209
  <audit_fix_workflow>
@@ -291,9 +285,7 @@ changes:
291
285
 
292
286
  5. Validate changes: `envoy docs validate --path <doc_file>`
293
287
 
294
- 6. Commit: `git commit -m "docs: fix stale/invalid refs in <doc_file>"`
295
-
296
- 7. Return changes summary
288
+ 6. Return changes summary (main agent commits)
297
289
  </audit_fix_workflow>
298
290
 
299
291
  <documentation_format>
@@ -304,6 +296,15 @@ description: 1-2 sentence summary enabling semantic search discovery
304
296
  ---
305
297
  ```
306
298
 
299
+ **Description quality guidelines:**
300
+ - GOOD: "Authentication system using JWT tokens with refresh rotation and Redis session storage"
301
+ - GOOD: "CLI argument parsing with subcommand routing and help generation"
302
+ - BAD: "Documentation for auth" (too vague)
303
+ - BAD: "Code documentation" (useless for search)
304
+ - BAD: "This file documents the system" (describes the doc, not the code)
305
+
306
+ The description should answer: "What would someone search for to find this?"
307
+
307
308
  **Structure (REQUIRED sections marked with *):**
308
309
 
309
310
  ```markdown
@@ -348,7 +349,8 @@ Adjust structure based on domain. The structure serves knowledge transfer, not c
348
349
  - MUST include `description` in front-matter
349
350
  - MUST include Overview, Key Decisions, and Use Cases sections
350
351
  - MUST focus on decisions, rationale, patterns - NOT capabilities
351
- - MUST validate with `envoy docs validate` before committing
352
+ - MUST validate with `envoy docs validate` before returning
353
+ - MUST NOT commit - main agent commits after all writers complete
352
354
  - NEVER write inline code blocks (zero fenced blocks allowed)
353
355
  - NEVER document what's obvious from reading code
354
356
  - NEVER create capability tables (Command/Purpose, Option/Description)
@@ -129,14 +129,20 @@ changes:
129
129
  <step name="verify_and_report">
130
130
  After all agents complete:
131
131
 
132
- 1. Run validation again:
132
+ 1. Commit all documentation fixes:
133
+ ```bash
134
+ git add docs/
135
+ git commit -m "docs: fix stale/invalid references"
136
+ ```
137
+
138
+ 2. Run validation again:
133
139
  ```bash
134
140
  envoy docs validate [--path <docs_path>]
135
141
  ```
136
142
 
137
- 2. If issues remain, report which failed and why
143
+ 3. If issues remain, report which failed and why
138
144
 
139
- 3. Report completion:
145
+ 4. Report completion:
140
146
  ```markdown
141
147
  ## Audit Complete
142
148
 
@@ -42,17 +42,8 @@ For each prompt from next:
42
42
  3. **Parallel execution**: If multiple prompts/variants returned, delegate all in parallel
43
43
  </step>
44
44
 
45
- <step name="extract_documentation">
46
- After each specialist returns (prompt merged):
47
-
48
- Call `/docs adjust --diff` to update documentation based on changes.
49
- * Uses taxonomy-based approach to identify changed areas
50
- * Writers update relevant documentation with symbol references
51
- * Returns: `{ success: true }`
52
- </step>
53
-
54
45
  <step name="loop">
55
- Repeat steps 1-3 until:
46
+ Repeat steps 1-2 until:
56
47
  - No more prompts returned from next
57
48
  - No prompts in_progress status
58
49
  </step>
@@ -80,8 +71,38 @@ If verdict = "failed" OR suggested_fixes exist:
80
71
  4. Rerun full review (repeat until passes)
81
72
  </step>
82
73
 
74
+ <step name="extract_documentation">
75
+ After full review passes, delegate to **documentation-taxonomist** agent with adjust mode:
76
+
77
+ 1. Get prompt walkthroughs for rationale context:
78
+ ```bash
79
+ envoy plan get-all-walkthroughs
80
+ ```
81
+
82
+ 2. Delegate to taxonomist with inputs:
83
+ ```yaml
84
+ mode: "adjust"
85
+ use_diff: true
86
+ feature_branch: "<current_branch>"
87
+ walkthroughs: <output from get-all-walkthroughs>
88
+ ```
89
+
90
+ 3. Taxonomist identifies affected domains, delegates to writers (writers do NOT commit)
91
+
92
+ 4. After ALL writers complete, commit documentation changes:
93
+ ```bash
94
+ git add docs/
95
+ git commit -m "docs: update documentation for feature"
96
+ ```
97
+
98
+ 5. Mark prompts documented:
99
+ ```bash
100
+ envoy plan mark-all-documented
101
+ ```
102
+ </step>
103
+
83
104
  <step name="mandatory_doc_audit">
84
- Call `/docs audit` to validate all documentation symbol references.
105
+ Call `/audit-docs` to validate all documentation symbol references.
85
106
  * Checks for stale (hash changed) and invalid (symbol deleted) references
86
107
  * If issues found: fix automatically or present to user
87
108
  * Returns: `{ success: true }`
@@ -104,7 +125,7 @@ Call /whats-next command
104
125
  <success_criteria>
105
126
  - All prompts implemented via specialist delegation
106
127
  - Variants executed in parallel
107
- - Documentation extracted for each prompt
128
+ - Documentation updated from all changes (once, after review passes)
108
129
  - Full review passes
109
130
  - Documentation audit completed
110
131
  - Plan marked complete with PR created
@@ -114,7 +135,7 @@ Call /whats-next command
114
135
  <constraints>
115
136
  - MUST respect prompt dependencies (use envoy next)
116
137
  - MUST run all variants in parallel
117
- - MUST extract documentation after each prompt
138
+ - MUST extract documentation once after full review passes
118
139
  - MUST loop until no prompts remain
119
140
  - MUST pass full review before completing
120
141
  - MUST run doc audit before completion
@@ -120,7 +120,7 @@ notes: "<segment.notes>"
120
120
  success: true
121
121
  ```
122
122
 
123
- Writers work directly on the branch. Taxonomist ensures non-overlapping output directories, so no conflicts occur.
123
+ Writers work directly on the branch without committing. Taxonomist ensures non-overlapping output directories, so no conflicts occur. Main agent commits all changes after writers complete.
124
124
  </step>
125
125
 
126
126
  <step name="validate_docs">
@@ -132,7 +132,7 @@ If stale/invalid refs found:
132
132
  </step>
133
133
 
134
134
  <step name="commit_documentation">
135
- Commit any uncommitted documentation changes (e.g., validation fixes):
135
+ Commit ALL documentation changes from parallel writers:
136
136
 
137
137
  1. Check for uncommitted changes in docs/:
138
138
  ```bash
@@ -168,7 +168,7 @@ Update semantic search index with new documentation:
168
168
 
169
169
  2. Call reindex:
170
170
  ```bash
171
- envoy knowledge reindex-from-changes docs --files '<json_array>'
171
+ envoy knowledge reindex-from-changes --files '<json_array>'
172
172
  ```
173
173
 
174
174
  3. If reindex reports missing references:
@@ -21,6 +21,15 @@ envoy <group> --help
21
21
  | `tavily extract` | Extract full content from URLs |
22
22
  | `xai search` | X/Twitter search for community opinions, alternatives, discussions |
23
23
 
24
+ ### Context7 (External Documentation)
25
+
26
+ | Tool | Use Case |
27
+ |------|----------|
28
+ | `context7 search` | Find library by name, returns IDs for context command |
29
+ | `context7 context` | Get documentation for known library (use search first) |
30
+
31
+ *Flow: search → get library ID → context with query*
32
+
24
33
  ### Vertex (Gemini)
25
34
 
26
35
  | Tool | Use Case |
@@ -44,6 +53,7 @@ envoy <group> --help
44
53
  - Pre-synthesized findings → `perplexity research`
45
54
  - Raw sources for processing → `tavily search` → `tavily extract`
46
55
  - Community opinions/alternatives → `xai search` (can build on previous findings with `--context`)
56
+ - Library documentation → `context7 search <lib>` → `context7 context <id> <query>`
47
57
 
48
58
  **Vertex:**
49
59
  - Arbitrary Gemini query → `vertex ask`
@@ -63,6 +73,7 @@ These tools read files directly and pass to external LLMs. Claude only receives
63
73
  | `TAVILY_API_KEY` | tavily | Tavily API key |
64
74
  | `VERTEX_API_KEY` | vertex | Google AI API key (Vertex Express) |
65
75
  | `X_AI_API_KEY` | xai | xAI Grok API key |
76
+ | `CONTEXT7_API_KEY` | context7 | Context7 API key (upstash.com) |
66
77
  | `ENVOY_TIMEOUT_MS` | optional | Global timeout (default: 120000) |
67
78
 
68
79
  ## Discovery
@@ -9,6 +9,7 @@
9
9
  "version": "0.1.0",
10
10
  "dependencies": {
11
11
  "@google/genai": "^0.14.0",
12
+ "@upstash/context7-sdk": "^0.3.0",
12
13
  "@visheratin/tokenizers-node": "0.1.5",
13
14
  "@visheratin/web-ai-node": "^1.4.5",
14
15
  "chokidar": "^5.0.0",
@@ -34,6 +35,9 @@
34
35
  "@types/pino": "^7.0.4",
35
36
  "tsx": "^4.7.0",
36
37
  "typescript": "^5.3.0"
38
+ },
39
+ "engines": {
40
+ "node": ">=20.0.0"
37
41
  }
38
42
  },
39
43
  "node_modules/@esbuild/aix-ppc64": {
@@ -519,6 +523,12 @@
519
523
  "pino": "*"
520
524
  }
521
525
  },
526
+ "node_modules/@upstash/context7-sdk": {
527
+ "version": "0.3.0",
528
+ "resolved": "https://registry.npmjs.org/@upstash/context7-sdk/-/context7-sdk-0.3.0.tgz",
529
+ "integrity": "sha512-kW5UV49mG9hh30sWP7nLq0mF7YHbTtfWrnm1VsT0UFW8mR6ovlYp7anobUh5qOaewSzraq9o2QyY77KVpI1twg==",
530
+ "license": "MIT"
531
+ },
522
532
  "node_modules/@visheratin/tokenizers-node": {
523
533
  "version": "0.1.5",
524
534
  "resolved": "https://registry.npmjs.org/@visheratin/tokenizers-node/-/tokenizers-node-0.1.5.tgz",
@@ -3,12 +3,16 @@
3
3
  "version": "0.1.0",
4
4
  "private": true,
5
5
  "type": "module",
6
+ "engines": {
7
+ "node": ">=20.0.0"
8
+ },
6
9
  "scripts": {
7
10
  "start": "tsx src/cli.ts",
8
11
  "typecheck": "tsc --noEmit"
9
12
  },
10
13
  "dependencies": {
11
14
  "@google/genai": "^0.14.0",
15
+ "@upstash/context7-sdk": "^0.3.0",
12
16
  "@visheratin/tokenizers-node": "0.1.5",
13
17
  "@visheratin/web-ai-node": "^1.4.5",
14
18
  "chokidar": "^5.0.0",
@@ -30,6 +30,7 @@ async function getInfo(
30
30
  TAVILY_API_KEY: process.env.TAVILY_API_KEY ? "set" : "missing",
31
31
  VERTEX_API_KEY: process.env.VERTEX_API_KEY ? "set" : "missing",
32
32
  X_AI_API_KEY: process.env.X_AI_API_KEY ? "set" : "missing",
33
+ CONTEXT7_API_KEY: process.env.CONTEXT7_API_KEY ? "set" : "missing",
33
34
  },
34
35
  timeout_ms: process.env.ENVOY_TIMEOUT_MS ?? "120000",
35
36
  },
@@ -0,0 +1,176 @@
1
+ /**
2
+ * Context7 API commands - library documentation search and context retrieval.
3
+ *
4
+ * Flow: search (find library) → context (get docs for known library)
5
+ */
6
+
7
+ import { Command } from "commander";
8
+ import { BaseCommand, type CommandResult } from "./base.js";
9
+ import { Context7, Context7Error, type Library } from "@upstash/context7-sdk";
10
+
11
+ /** Shared base for Context7 commands - DRY for auth + error handling */
12
+ abstract class Context7BaseCommand extends BaseCommand {
13
+ protected requireApiKey(): CommandResult | Context7 {
14
+ const apiKey = process.env.CONTEXT7_API_KEY;
15
+ if (!apiKey) {
16
+ return this.error("auth_error", "CONTEXT7_API_KEY not set");
17
+ }
18
+ return new Context7({ apiKey });
19
+ }
20
+
21
+ protected async withTimeout<T>(fn: () => Promise<T>): Promise<[T, number]> {
22
+ const start = performance.now();
23
+ const timeout = new Promise<never>((_, reject) =>
24
+ setTimeout(() => reject(new Error("timeout")), this.timeoutMs)
25
+ );
26
+ const result = await Promise.race([fn(), timeout]);
27
+ return [result, Math.round(performance.now() - start)];
28
+ }
29
+
30
+ protected handleError(e: unknown, extraHint?: string): CommandResult {
31
+ if (e instanceof Context7Error) {
32
+ return this.error("api_error", e.message, extraHint);
33
+ }
34
+ if (e instanceof Error && e.message.includes("timeout")) {
35
+ return this.error("timeout", `Request timed out after ${this.timeoutMs}ms`);
36
+ }
37
+ return this.error("api_error", e instanceof Error ? e.message : String(e));
38
+ }
39
+ }
40
+
41
+ class Context7SearchCommand extends Context7BaseCommand {
42
+ readonly name = "search";
43
+ readonly description = "Search for libraries by name, returns IDs for context command";
44
+
45
+ defineArguments(cmd: Command): void {
46
+ cmd
47
+ .argument("<library>", "Library name to search (e.g., react, fastify)")
48
+ .argument("[query]", "Optional query for relevance ranking")
49
+ .option("--limit <n>", "Max results (default: 5)", parseInt);
50
+ }
51
+
52
+ async execute(args: Record<string, unknown>): Promise<CommandResult> {
53
+ const clientOrError = this.requireApiKey();
54
+ if ("status" in clientOrError) return clientOrError;
55
+
56
+ const library = args.library as string;
57
+ const query = (args.query as string) ?? `How to use ${library}`;
58
+ const limit = (args.limit as number) ?? 5;
59
+
60
+ try {
61
+ const [libraries, durationMs] = await this.withTimeout(() =>
62
+ clientOrError.searchLibrary(query, library)
63
+ );
64
+
65
+ if (!Array.isArray(libraries)) {
66
+ return this.error("api_error", "Unexpected response format from Context7");
67
+ }
68
+
69
+ const results = libraries.slice(0, limit).map((lib: Library) => ({
70
+ id: lib.id, // Required for context command
71
+ name: lib.name,
72
+ description: lib.description,
73
+ snippets: lib.totalSnippets,
74
+ trust: lib.trustScore,
75
+ }));
76
+
77
+ return this.success(
78
+ {
79
+ query: library,
80
+ results,
81
+ usage: results.length > 0
82
+ ? `Use: envoy context7 context "${results[0].id}" "your question"`
83
+ : undefined,
84
+ ...(results.length === 0 && {
85
+ suggestion: "Library not found. Try different search term or library may not be indexed.",
86
+ }),
87
+ },
88
+ {
89
+ result_count: results.length,
90
+ command: "context7 search",
91
+ duration_ms: durationMs,
92
+ }
93
+ );
94
+ } catch (e) {
95
+ return this.handleError(e);
96
+ }
97
+ }
98
+ }
99
+
100
+ class Context7ContextCommand extends Context7BaseCommand {
101
+ readonly name = "context";
102
+ readonly description = "Get documentation context for a known library (use search first)";
103
+
104
+ defineArguments(cmd: Command): void {
105
+ cmd
106
+ .argument("<libraryId>", "Library ID from search (e.g., /facebook/react)")
107
+ .argument("<query>", "What you need docs for (e.g., 'hooks usage')")
108
+ .option("--text", "Return plain text instead of JSON (better for direct LLM use)");
109
+ }
110
+
111
+ async execute(args: Record<string, unknown>): Promise<CommandResult> {
112
+ const clientOrError = this.requireApiKey();
113
+ if ("status" in clientOrError) return clientOrError;
114
+
115
+ const libraryId = args.libraryId as string;
116
+ const query = args.query as string;
117
+ const useText = args.text as boolean;
118
+
119
+ try {
120
+ if (useText) {
121
+ // Plain text mode - directly usable in LLM prompts
122
+ const [content, durationMs] = await this.withTimeout(() =>
123
+ clientOrError.getContext(query, libraryId, { type: "txt" })
124
+ );
125
+
126
+ return this.success(
127
+ {
128
+ library: libraryId,
129
+ query,
130
+ content,
131
+ },
132
+ {
133
+ format: "text",
134
+ command: "context7 context",
135
+ duration_ms: durationMs,
136
+ }
137
+ );
138
+ }
139
+
140
+ // JSON mode - structured docs
141
+ const [docs, durationMs] = await this.withTimeout(() =>
142
+ clientOrError.getContext(query, libraryId, { type: "json" })
143
+ );
144
+
145
+ if (!Array.isArray(docs)) {
146
+ return this.error("api_error", "Unexpected response format from Context7");
147
+ }
148
+
149
+ const documentation = docs.map((doc) => ({
150
+ title: doc.title,
151
+ content: doc.content,
152
+ source: doc.source,
153
+ }));
154
+
155
+ return this.success(
156
+ {
157
+ library: libraryId,
158
+ query,
159
+ docs: documentation,
160
+ },
161
+ {
162
+ doc_count: documentation.length,
163
+ command: "context7 context",
164
+ duration_ms: durationMs,
165
+ }
166
+ );
167
+ } catch (e) {
168
+ return this.handleError(e, "Ensure libraryId is valid (from search results)");
169
+ }
170
+ }
171
+ }
172
+
173
+ export const COMMANDS = {
174
+ search: Context7SearchCommand,
175
+ context: Context7ContextCommand,
176
+ };
@@ -16,7 +16,6 @@ import matter from "gray-matter";
16
16
  import { BaseCommand, CommandResult } from "./base.js";
17
17
  import {
18
18
  findSymbol,
19
- symbolExists,
20
19
  getFileComplexity,
21
20
  } from "../lib/tree-sitter-utils.js";
22
21
  import { getSupportedExtensions } from "../lib/ast-queries.js";
@@ -514,9 +513,9 @@ const codeBlockPattern = /^```[a-z0-9_+-]*$/gm;
514
513
  continue;
515
514
  }
516
515
 
517
- // Check if symbol exists
518
- const symbolFound = await symbolExists(absoluteRefFile, ref.refSymbol!);
519
- if (!symbolFound) {
516
+ // Check if symbol exists and get its location
517
+ const symbol = await findSymbol(absoluteRefFile, ref.refSymbol!);
518
+ if (!symbol) {
520
519
  const reason = "Symbol not found";
521
520
  invalid.push({
522
521
  doc_file: ref.file,
@@ -530,12 +529,6 @@ const codeBlockPattern = /^```[a-z0-9_+-]*$/gm;
530
529
  continue;
531
530
  }
532
531
 
533
- // Get current hash for symbol
534
- const symbol = await findSymbol(absoluteRefFile, ref.refSymbol!);
535
- if (!symbol) {
536
- continue; // Already validated above
537
- }
538
-
539
532
  const { hash: mostRecentHash, success } = getMostRecentHashForRange(
540
533
  absoluteRefFile,
541
534
  symbol.startLine,