@comfanion/usethis_search 4.1.0-dev.3 → 4.2.0-dev.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,27 +1,29 @@
1
1
  /**
2
2
  * Workspace Context Injection Hook
3
3
  *
4
- * Uses "experimental.chat.messages.transform" to inject workspace files
5
- * into the conversation context. The AI sees attached files as part of
4
+ * Uses "experimental.chat.messages.transform" to inject workspace chunks
5
+ * into the conversation context. The AI sees attached chunks as part of
6
6
  * the message stream — no read() needed.
7
7
  *
8
8
  * Architecture:
9
- * search("auth") → workspaceCache.attach(files)
10
- * [this hook] → inject cached files into messages
11
- * AI sees: full file content in context
12
- * Chat history: search outputs auto-pruned (files already in workspace)
9
+ * search("auth") → workspaceCache.attach(chunks)
10
+ * [this hook] → inject cached chunks into messages (grouped by file)
11
+ * AI sees: chunk content organized by file
12
+ * Chat history: search outputs auto-pruned (chunks already in workspace)
13
13
  *
14
14
  * Two responsibilities:
15
15
  * 1. INJECT: synthetic <workspace_context> message before last user message
16
16
  * 2. PRUNE: replace old search tool outputs with compact summaries
17
- * (the full content is already in workspace injection — no need to keep
17
+ * (the chunk content is already in workspace injection — no need to keep
18
18
  * the big search output in chat history)
19
19
  *
20
20
  * Injection strategy:
21
21
  * - Injects a synthetic user message with <workspace_context> BEFORE
22
- * the last user message (so AI sees files as "already known" context)
22
+ * the last user message (so AI sees chunks as "already known" context)
23
23
  * - Uses cache_control: ephemeral for Anthropic prompt caching (90% savings)
24
- * - Groups files: search-main first, then search-graph, then manual
24
+ * - Groups chunks by file: search-main first, then search-graph, then manual
25
+ * - Within each file: chunks sorted by chunkIndex (order in file)
26
+ * - Shows chunk metadata: function name, heading, line numbers
25
27
  */
26
28
 
27
29
  import type { SessionState } from "./types.ts"
@@ -77,41 +79,51 @@ export function createWorkspaceInjectionHandler(state: SessionState) {
77
79
  if (entries.length === 0) return
78
80
  }
79
81
 
80
- // ── Build workspace context block ─────────────────────────────────────
81
- const totalTokens = workspaceCache.totalTokens
82
- const fileCount = workspaceCache.size
82
+ // ── Build workspace context block ─────────────────────────────────────
83
+ const totalTokens = workspaceCache.totalTokens
84
+ const chunkCount = entries.length
83
85
 
84
- let workspace = `<workspace_context files="${fileCount}" tokens="${totalTokens}">\n`
86
+ // Group chunks by file path
87
+ const byFile = new Map<string, typeof entries>()
88
+ for (const entry of entries) {
89
+ if (!byFile.has(entry.path)) {
90
+ byFile.set(entry.path, [])
91
+ }
92
+ byFile.get(entry.path)!.push(entry)
93
+ }
85
94
 
86
- // Group by role for clear structure
87
- const mainFiles = entries.filter(e => e.role === "search-main")
88
- const graphFiles = entries.filter(e => e.role === "search-graph")
89
- const manualFiles = entries.filter(e => e.role === "manual")
95
+ // Sort chunks within each file by chunkIndex
96
+ for (const chunks of byFile.values()) {
97
+ chunks.sort((a, b) => a.chunkIndex - b.chunkIndex)
98
+ }
90
99
 
91
- // Main search results
92
- if (mainFiles.length > 0) {
93
- for (const entry of mainFiles) {
94
- workspace += formatFileEntry(entry)
95
- }
96
- }
100
+ const fileCount = byFile.size
97
101
 
98
- // Graph relations (imports, extends, used_by)
99
- if (graphFiles.length > 0) {
100
- workspace += `\n<!-- Graph relations -->\n`
101
- for (const entry of graphFiles) {
102
- workspace += formatFileEntry(entry)
103
- }
104
- }
102
+ let workspace = `<workspace_context chunks="${chunkCount}" files="${fileCount}" tokens="${totalTokens}">\n`
105
103
 
106
- // Manually attached files
107
- if (manualFiles.length > 0) {
108
- workspace += `\n<!-- Manually attached -->\n`
109
- for (const entry of manualFiles) {
110
- workspace += formatFileEntry(entry)
111
- }
112
- }
104
+ // Group by role for clear structure
105
+ const mainFiles = entries.filter(e => e.role === "search-main")
106
+ const graphFiles = entries.filter(e => e.role === "search-graph")
107
+ const manualFiles = entries.filter(e => e.role === "manual")
108
+
109
+ // Main search results
110
+ if (mainFiles.length > 0) {
111
+ workspace += formatChunksByFile(mainFiles, byFile)
112
+ }
113
113
 
114
- workspace += `</workspace_context>`
114
+ // Graph relations (imports, extends, used_by)
115
+ if (graphFiles.length > 0) {
116
+ workspace += `\n<!-- Search graph relations -->\n`
117
+ workspace += formatChunksByFile(graphFiles, byFile)
118
+ }
119
+
120
+ // Manually attached chunks
121
+ if (manualFiles.length > 0) {
122
+ workspace += `\n<!-- Manually attached -->\n`
123
+ workspace += formatChunksByFile(manualFiles, byFile)
124
+ }
125
+
126
+ workspace += `</workspace_context>`
115
127
 
116
128
  // ── Inject into messages ──────────────────────────────────────────────
117
129
  // Find the last real user message and inject workspace BEFORE it
@@ -146,17 +158,71 @@ export function createWorkspaceInjectionHandler(state: SessionState) {
146
158
 
147
159
  // ── Helpers ─────────────────────────────────────────────────────────────────
148
160
 
149
- function formatFileEntry(entry: ReturnType<typeof workspaceCache.getAll>[0]): string {
150
- let block = `\n## ${entry.path}\n`
161
+ /**
162
+ * Format chunks grouped by file path.
163
+ * Groups chunks from the same file together, sorted by chunkIndex.
164
+ */
165
+ function formatChunksByFile(
166
+ entries: ReturnType<typeof workspaceCache.getAll>,
167
+ byFile: Map<string, ReturnType<typeof workspaceCache.getAll>>
168
+ ): string {
169
+ let output = ""
170
+ const processedFiles = new Set<string>()
171
+
172
+ for (const entry of entries) {
173
+ // Skip if we already processed this file
174
+ if (processedFiles.has(entry.path)) continue
175
+ processedFiles.add(entry.path)
176
+
177
+ const chunks = byFile.get(entry.path) || []
178
+ output += formatFileWithChunks(entry.path, chunks)
179
+ }
180
+
181
+ return output
182
+ }
151
183
 
152
- // Metadata line
184
+ /**
185
+ * Format a single file with all its chunks.
186
+ */
187
+ function formatFileWithChunks(
188
+ filePath: string,
189
+ chunks: ReturnType<typeof workspaceCache.getAll>
190
+ ): string {
191
+ let block = `\n## ${filePath}\n`
192
+
193
+ // Chunk list comment: "Chunks: 2, 5 (partial file)"
194
+ const chunkIndices = chunks.map(c => c.chunkIndex).join(", ")
195
+ const isPartial = chunks.length > 0 ? " (partial file)" : ""
196
+ block += `<!-- Chunks: ${chunkIndices}${isPartial} -->\n`
197
+
198
+ // Format each chunk
199
+ for (const chunk of chunks) {
200
+ block += formatChunk(chunk)
201
+ }
202
+
203
+ return block
204
+ }
205
+
206
+ /**
207
+ * Format a single chunk with metadata.
208
+ */
209
+ function formatChunk(entry: ReturnType<typeof workspaceCache.getAll>[0]): string {
210
+ let block = ""
211
+
212
+ // Chunk subheader: "### Chunk N: description"
213
+ const description = entry.metadata?.function_name || entry.metadata?.heading_context || "code"
214
+ block += `\n### Chunk ${entry.chunkIndex}: ${description}\n`
215
+
216
+ // Chunk metadata line
153
217
  const meta: string[] = []
154
218
  if (entry.score !== undefined) meta.push(`score: ${entry.score.toFixed(3)}`)
155
219
  if (entry.metadata?.language) meta.push(entry.metadata.language)
156
- if (entry.metadata?.function_name) meta.push(`fn: ${entry.metadata.function_name}`)
157
220
  if (entry.metadata?.class_name) meta.push(`class: ${entry.metadata.class_name}`)
221
+ if (entry.metadata?.startLine !== undefined && entry.metadata?.endLine !== undefined) {
222
+ meta.push(`lines: ${entry.metadata.startLine}-${entry.metadata.endLine}`)
223
+ }
158
224
  if (entry.metadata?.relation) {
159
- const mainBase = entry.metadata.mainFile?.split("/").pop() || "?"
225
+ const mainBase = entry.metadata.mainChunkId?.split(":").pop() || "?"
160
226
  meta.push(`${entry.metadata.relation} from ${mainBase}`)
161
227
  }
162
228
 
@@ -164,7 +230,7 @@ function formatFileEntry(entry: ReturnType<typeof workspaceCache.getAll>[0]): st
164
230
  block += `<!-- ${meta.join(" | ")} -->\n`
165
231
  }
166
232
 
167
- // File content
233
+ // Chunk content
168
234
  const lang = entry.metadata?.language || ""
169
235
  block += `\`\`\`${lang}\n`
170
236
  block += entry.content
@@ -53,8 +53,12 @@ export function createToolSubstitutionHandler(state: SessionState, cache?: Works
53
53
  // Mark files as dirty so read() substitution is bypassed until freshen()
54
54
  if (input.tool === "edit" || input.tool === "write" || input.tool === "Edit" || input.tool === "Write") {
55
55
  const filePath = output.metadata?.filePath || output.metadata?.path || extractFilePathFromTitle(output.title)
56
- if (filePath && wsCache.has(filePath)) {
57
- wsCache.markDirty(filePath)
56
+ if (filePath) {
57
+ // Mark dirty if file has any chunks in workspace
58
+ const chunks = wsCache.getChunksByPath(filePath)
59
+ if (chunks.length > 0) {
60
+ wsCache.markDirty(filePath)
61
+ }
58
62
  }
59
63
  return // edit/write don't need output substitution
60
64
  }
@@ -80,13 +84,13 @@ export function createToolSubstitutionHandler(state: SessionState, cache?: Works
80
84
  }
81
85
 
82
86
  /**
83
- * Substitute read() output if file is in workspace.
87
+ * Substitute read() output if file has chunks in workspace.
84
88
  *
85
89
  * Input: { filePath: "src/auth.ts", offset?: 0, limit?: 100 }
86
90
  * Output: "export function login(...)\n..."
87
91
  *
88
- * If file in workspace AND no offset/limit (full read):
89
- * Replace with: "[File "src/auth.ts" is in workspace context — see <workspace_context> for full content.]"
92
+ * If file has chunks in workspace AND no offset/limit (full read):
93
+ * Replace with: "[File "src/auth.ts" has N chunks in workspace (chunks: 2, 5, 7) — see <workspace_context>]"
90
94
  *
91
95
  * If offset/limit present (partial read):
92
96
  * Keep original (partial reads are not in workspace injection)
@@ -101,14 +105,18 @@ function substituteReadOutput(output: { title: string; output: string; metadata:
101
105
  const isPartialRead = output.metadata?.offset !== undefined || output.metadata?.limit !== undefined
102
106
  if (isPartialRead) return
103
107
 
104
- // Check if file is in workspace
105
- if (!cache.has(filePath)) return
106
-
107
108
  // Don't substitute if file was modified (dirty) — workspace has stale content
108
109
  if (cache.isDirty(filePath)) return
109
110
 
110
- // Replace output with compact message
111
- output.output = `[File "${filePath}" is in workspace context — see <workspace_context> for full content.]`
111
+ // Check if file has chunks in workspace
112
+ const chunks = cache.getChunksByPath(filePath)
113
+ if (chunks.length === 0) return
114
+
115
+ // Build chunk ID list for message
116
+ const chunkIds = chunks.map(c => c.chunkIndex).join(", ")
117
+
118
+ // Replace output with compact message showing which chunks are available
119
+ output.output = `[File "${filePath}" has ${chunks.length} chunks in workspace (chunks: ${chunkIds}) — see <workspace_context>]`
112
120
  } catch {
113
121
  // Silently fail — don't break tool execution
114
122
  }
package/package.json CHANGED
@@ -1,7 +1,7 @@
1
1
  {
2
2
  "name": "@comfanion/usethis_search",
3
- "version": "4.1.0-dev.3",
4
- "description": "OpenCode plugin: semantic search with workspace injection + tool output substitution (v4.1-dev: read() substitution, dirty file tracking)",
3
+ "version": "4.2.0-dev.1",
4
+ "description": "OpenCode plugin: semantic search with chunk-based workspace injection (v4.2-dev: chunk-level context, granular detach, improved token efficiency)",
5
5
  "type": "module",
6
6
  "main": "./index.ts",
7
7
  "exports": {
package/tools/search.ts CHANGED
@@ -1,10 +1,10 @@
1
1
  /**
2
- * Semantic Code Search Tool (v4 — workspace injection)
2
+ * Semantic Code Search Tool (v5chunk-based workspace injection)
3
3
  *
4
4
  * Uses local embeddings + LanceDB vector store via bundled vectorizer.
5
- * v4: Top results + graph relations attached to workspace with full content.
5
+ * v5: Top N chunks + graph relations attached to workspace (chunk content only).
6
6
  * Rest returned as summary only.
7
- * AI sees full files via message.before injection — no read() needed.
7
+ * AI sees chunks via message.before injection — no read() needed.
8
8
  *
9
9
  * Index data is stored in `.opencode/vectors/<index>/`.
10
10
  */
@@ -245,109 +245,89 @@ Examples:
245
245
  }
246
246
  allResults.sort((a: any, b: any) => (b._finalScore ?? 0) - (a._finalScore ?? 0))
247
247
 
248
- // ── Group by file best chunk per file, with chunk count ─────────────
249
- const fileGroups = new Map<string, { best: any, chunks: any[] }>()
250
- for (const r of allResults) {
251
- const key = r.file
252
- if (!fileGroups.has(key)) {
253
- fileGroups.set(key, { best: r, chunks: [r] })
254
- } else {
255
- const group = fileGroups.get(key)!
256
- group.chunks.push(r)
257
- if ((r._finalScore ?? 0) > (group.best._finalScore ?? 0)) {
258
- group.best = r
259
- }
260
- }
261
- }
262
-
263
- const sortedGroups = [...fileGroups.values()]
264
- .sort((a, b) => (b.best._finalScore ?? 0) - (a.best._finalScore ?? 0))
265
- .slice(0, limit)
266
-
267
- if (sortedGroups.length === 0) {
268
- const scope = args.searchAll ? "any index" : `index "${indexName}"`
269
- const filterNote = args.filter ? ` with filter "${args.filter}"` : ""
270
- return `No results found in ${scope}${filterNote} for: "${args.query}" (min score: ${minScore})\n\nTry:\n- Different keywords or phrasing\n- Remove or broaden the filter\n- search({ query: "...", searchAll: true })`
271
- }
272
-
273
- // ══════════════════════════════════════════════════════════════════════
274
- // WORKSPACE ATTACH: Top N main files + graph relations (FULL CONTENT)
275
- // ══════════════════════════════════════════════════════════════════════
276
-
277
- const topGroups = sortedGroups.slice(0, wsConfig.attachTopN)
278
- const restGroups = sortedGroups.slice(wsConfig.attachTopN)
279
-
280
- const attachedMain: string[] = []
281
- const attachedGraph: string[] = []
282
- const alreadyAttached = new Set<string>()
283
-
284
- for (const { best: r } of topGroups) {
285
- // Skip if score too low
286
- if ((r._finalScore ?? 0) < wsConfig.minScoreMain) continue
287
-
288
- // Read full file and attach
289
- try {
290
- const fullPath = path.join(projectRoot, r.file)
291
- const content = await fs.readFile(fullPath, "utf-8")
292
-
293
- workspaceCache.attach({
294
- path: r.file,
295
- content,
296
- role: "search-main",
297
- attachedAt: Date.now(),
298
- attachedBy: args.query,
299
- score: r._finalScore,
300
- metadata: {
301
- language: r.language,
302
- function_name: r.function_name,
303
- class_name: r.class_name,
304
- heading_context: r.heading_context,
305
- },
306
- })
307
-
308
- attachedMain.push(r.file)
309
- alreadyAttached.add(r.file)
310
- } catch {
311
- // File read failed — skip
312
- continue
313
- }
314
-
315
- // Attach graph relations (imports, extends, used_by)
316
- if (r.relatedContext && r.relatedContext.length > 0) {
317
- const topRelated = r.relatedContext
318
- .filter((rel: any) => rel.score >= wsConfig.minScoreRelated)
319
- .sort((a: any, b: any) => b.score - a.score)
320
- .slice(0, wsConfig.attachRelatedPerFile)
321
-
322
- for (const rel of topRelated) {
323
- if (alreadyAttached.has(rel.file)) continue
324
-
325
- try {
326
- const relFullPath = path.join(projectRoot, rel.file)
327
- const relContent = await fs.readFile(relFullPath, "utf-8")
328
-
329
- workspaceCache.attach({
330
- path: rel.file,
331
- content: relContent,
332
- role: "search-graph",
333
- attachedAt: Date.now(),
334
- attachedBy: `${args.query} (${rel.relation} from ${r.file})`,
335
- score: rel.score,
336
- metadata: {
337
- language: rel.language,
338
- relation: rel.relation,
339
- mainFile: r.file,
340
- },
341
- })
342
-
343
- attachedGraph.push(rel.file)
344
- alreadyAttached.add(rel.file)
345
- } catch {
346
- // Related file read failed — skip
347
- }
348
- }
349
- }
350
- }
248
+ // ── Sort by final score (chunks, not files) ──────────────────────────
249
+ const topChunks = allResults.slice(0, limit)
250
+
251
+ if (topChunks.length === 0) {
252
+ const scope = args.searchAll ? "any index" : `index "${indexName}"`
253
+ const filterNote = args.filter ? ` with filter "${args.filter}"` : ""
254
+ return `No results found in ${scope}${filterNote} for: "${args.query}" (min score: ${minScore})\n\nTry:\n- Different keywords or phrasing\n- Remove or broaden the filter\n- search({ query: "...", searchAll: true })`
255
+ }
256
+
257
+ // ══════════════════════════════════════════════════════════════════════
258
+ // WORKSPACE ATTACH: Top N chunks + graph relations (CHUNK CONTENT ONLY)
259
+ // ══════════════════════════════════════════════════════════════════════
260
+
261
+ const mainChunks = topChunks.slice(0, wsConfig.attachTopN)
262
+ const restChunks = topChunks.slice(wsConfig.attachTopN)
263
+
264
+ const attachedMain: Array<{ chunkId: string; path: string }> = []
265
+ const attachedGraph: Array<{ chunkId: string; path: string }> = []
266
+ const alreadyAttached = new Set<string>()
267
+
268
+ for (const chunk of mainChunks) {
269
+ // Skip if score too low
270
+ if ((chunk._finalScore ?? 0) < wsConfig.minScoreMain) continue
271
+
272
+ // Attach chunk directly (no file read needed — chunk.content already has it)
273
+ const chunkId = chunk.chunkId || `${chunk.file}:chunk-${chunk.index ?? 0}`
274
+
275
+ workspaceCache.attach({
276
+ chunkId,
277
+ path: chunk.file,
278
+ content: chunk.content,
279
+ chunkIndex: chunk.index ?? 0,
280
+ role: "search-main",
281
+ attachedAt: Date.now(),
282
+ attachedBy: args.query,
283
+ score: chunk._finalScore,
284
+ metadata: {
285
+ language: chunk.language,
286
+ function_name: chunk.function_name,
287
+ class_name: chunk.class_name,
288
+ heading_context: chunk.heading_context,
289
+ startLine: chunk.startLine,
290
+ endLine: chunk.endLine,
291
+ },
292
+ })
293
+
294
+ attachedMain.push({ chunkId, path: chunk.file })
295
+ alreadyAttached.add(chunkId)
296
+
297
+ // Attach graph relations (imports, extends, used_by)
298
+ if (chunk.relatedContext && chunk.relatedContext.length > 0) {
299
+ const topRelated = chunk.relatedContext
300
+ .filter((rel: any) => rel.score >= wsConfig.minScoreRelated)
301
+ .sort((a: any, b: any) => b.score - a.score)
302
+ .slice(0, wsConfig.attachRelatedPerChunk)
303
+
304
+ for (const rel of topRelated) {
305
+ const relChunkId = rel.chunkId || `${rel.file}:chunk-${rel.index ?? 0}`
306
+ if (alreadyAttached.has(relChunkId)) continue
307
+
308
+ workspaceCache.attach({
309
+ chunkId: relChunkId,
310
+ path: rel.file,
311
+ content: rel.content,
312
+ chunkIndex: rel.index ?? 0,
313
+ role: "search-graph",
314
+ attachedAt: Date.now(),
315
+ attachedBy: `${args.query} (${rel.relation} from ${chunkId})`,
316
+ score: rel.score,
317
+ metadata: {
318
+ language: rel.language,
319
+ relation: rel.relation,
320
+ mainChunkId: chunkId,
321
+ startLine: rel.startLine,
322
+ endLine: rel.endLine,
323
+ },
324
+ })
325
+
326
+ attachedGraph.push({ chunkId: relChunkId, path: rel.file })
327
+ alreadyAttached.add(relChunkId)
328
+ }
329
+ }
330
+ }
351
331
 
352
332
  // ── Flush workspace to disk immediately (don't rely on debounce) ─────
353
333
  if (attachedMain.length > 0 || attachedGraph.length > 0) {
@@ -358,7 +338,7 @@ Examples:
358
338
  // BUILD OUTPUT: Attached (summary) + Rest (summary only)
359
339
  // ══════════════════════════════════════════════════════════════════════
360
340
 
361
- const topScore = sortedGroups[0].best._finalScore ?? 0
341
+ const topScore = topChunks[0]?._finalScore ?? 0
362
342
  const hasBM25Only = allResults.some((r: any) => r._bm25Only)
363
343
  const scope = args.searchAll ? "all indexes" : `index "${indexName}"`
364
344
  const filterLabel = args.filter ? ` filter:"${args.filter}"` : ""
@@ -372,67 +352,65 @@ Examples:
372
352
  output += `> **Low confidence.** Best score: ${topScore.toFixed(3)}. Try more specific keywords.\n\n`
373
353
  }
374
354
 
375
- // ── Attached files (summary — full content in workspace injection) ─────
376
- if (attachedMain.length > 0) {
377
- const totalAttached = attachedMain.length + attachedGraph.length
378
- output += `### Attached to workspace (${totalAttached} files)\n\n`
379
-
380
- for (let i = 0; i < attachedMain.length; i++) {
381
- const group = topGroups.find(g => g.best.file === attachedMain[i])
382
- if (!group) continue
383
- const r = group.best
384
- const score = (r._finalScore ?? 0).toFixed(3)
385
- const chunkNote = group.chunks.length > 1 ? ` (${group.chunks.length} sections)` : ""
386
-
387
- const metaParts: string[] = []
388
- if (r.language && r.language !== "unknown") metaParts.push(r.language)
389
- if (r.function_name) metaParts.push(`fn: ${r.function_name}`)
390
- if (r.class_name) metaParts.push(`class: ${r.class_name}`)
391
- const metaLine = metaParts.length > 0 ? ` — ${metaParts.join(", ")}` : ""
392
-
393
- output += `${i + 1}. **${r.file}** score: ${score}${chunkNote}${metaLine}\n`
394
- }
395
-
396
- if (attachedGraph.length > 0) {
397
- output += `\n**Graph relations:**\n`
398
- for (const graphFile of attachedGraph) {
399
- const entry = workspaceCache.get(graphFile)
400
- const relation = entry?.metadata?.relation || "related"
401
- const mainFile = entry?.metadata?.mainFile
402
- const mainBasename = mainFile ? path.basename(mainFile) : "?"
403
- output += `- ${graphFile} (${relation} from ${mainBasename})\n`
404
- }
405
- }
406
- output += `\n`
407
- }
408
-
409
- // ── Rest files (summary only — not attached) ──────────────────────────
410
- if (restGroups.length > 0) {
411
- output += `### Additional results (summary only)\n\n`
412
- for (let i = 0; i < restGroups.length; i++) {
413
- const { best: r, chunks } = restGroups[i]
414
- const score = (r._finalScore ?? 0).toFixed(3)
415
- const chunkNote = chunks.length > 1 ? ` (${chunks.length} sections)` : ""
416
- const indexLabel = args.searchAll ? ` [${r._index}]` : ""
417
-
418
- const metaParts: string[] = []
419
- if (r.language && r.language !== "unknown") metaParts.push(r.language)
420
- if (r.function_name) metaParts.push(`fn: ${r.function_name}`)
421
- if (r.class_name) metaParts.push(`class: ${r.class_name}`)
422
- const metaLine = metaParts.length > 0 ? ` — ${metaParts.join(", ")}` : ""
423
-
424
- output += `${attachedMain.length + i + 1}. ${r.file}${indexLabel} score: ${score}${chunkNote}${metaLine}\n`
425
- }
426
- output += `\nUse \`workspace.attach("path")\` to attach additional files.\n`
427
- }
428
-
429
- // ── Footer ────────────────────────────────────────────────────────────
430
- const totalChunks = allResults.length
431
- const uniqueFiles = sortedGroups.length
432
- output += `\n---\n`
433
- output += `*${uniqueFiles} files (${totalChunks} chunks) | `
434
- output += `Workspace: ${workspaceCache.size} files, ${workspaceCache.totalTokens.toLocaleString()} tokens*\n`
435
- output += `*Attached files are in workspace context — reference them directly without read().*`
355
+ // ── Attached chunks (summary — full content in workspace injection) ─────
356
+ if (attachedMain.length > 0) {
357
+ const totalAttached = attachedMain.length + attachedGraph.length
358
+ output += `### Attached to workspace (${totalAttached} chunks)\n\n`
359
+
360
+ for (let i = 0; i < attachedMain.length; i++) {
361
+ const { chunkId, path: filePath } = attachedMain[i]
362
+ const chunk = mainChunks[i]
363
+ if (!chunk) continue
364
+
365
+ const score = (chunk._finalScore ?? 0).toFixed(3)
366
+ const metaParts: string[] = []
367
+ if (chunk.language && chunk.language !== "unknown") metaParts.push(chunk.language)
368
+ if (chunk.function_name) metaParts.push(`fn: ${chunk.function_name}`)
369
+ if (chunk.class_name) metaParts.push(`class: ${chunk.class_name}`)
370
+ const metaLine = metaParts.length > 0 ? ` ${metaParts.join(", ")}` : ""
371
+
372
+ output += `${i + 1}. **${chunkId}** (${filePath}) score: ${score}${metaLine}\n`
373
+ }
374
+
375
+ if (attachedGraph.length > 0) {
376
+ output += `\n**Graph relations:**\n`
377
+ for (const { chunkId, path: filePath } of attachedGraph) {
378
+ const entry = workspaceCache.get(chunkId)
379
+ const relation = entry?.metadata?.relation || "related"
380
+ const mainChunkId = entry?.metadata?.mainChunkId
381
+ const mainBasename = mainChunkId ? mainChunkId.split(":")[0] : "?"
382
+ output += `- ${chunkId} (${relation} from ${mainBasename})\n`
383
+ }
384
+ }
385
+ output += `\n`
386
+ }
387
+
388
+ // ── Rest chunks (summary only — not attached) ──────────────────────────
389
+ if (restChunks.length > 0) {
390
+ output += `### Additional results (summary only)\n\n`
391
+ for (let i = 0; i < restChunks.length; i++) {
392
+ const chunk = restChunks[i]
393
+ const chunkId = chunk.chunkId || `${chunk.file}:chunk-${chunk.index ?? 0}`
394
+ const score = (chunk._finalScore ?? 0).toFixed(3)
395
+ const indexLabel = args.searchAll ? ` [${chunk._index}]` : ""
396
+
397
+ const metaParts: string[] = []
398
+ if (chunk.language && chunk.language !== "unknown") metaParts.push(chunk.language)
399
+ if (chunk.function_name) metaParts.push(`fn: ${chunk.function_name}`)
400
+ if (chunk.class_name) metaParts.push(`class: ${chunk.class_name}`)
401
+ const metaLine = metaParts.length > 0 ? ` ${metaParts.join(", ")}` : ""
402
+
403
+ output += `${attachedMain.length + i + 1}. ${chunkId}${indexLabel} score: ${score}${metaLine}\n`
404
+ }
405
+ output += `\nUse \`workspace.attach(chunkId)\` to attach additional chunks.\n`
406
+ }
407
+
408
+ // ── Footer ────────────────────────────────────────────────────────────
409
+ const totalChunks = allResults.length
410
+ output += `\n---\n`
411
+ output += `*${totalChunks} chunks found | `
412
+ output += `Workspace: ${workspaceCache.size} chunks, ${workspaceCache.totalTokens.toLocaleString()} tokens*\n`
413
+ output += `*Attached chunks are in workspace context — reference them directly without read().*`
436
414
 
437
415
  return output
438
416
  } catch (error: any) {