gsd-pi 2.3.8 → 2.3.9

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (37) hide show
  1. package/README.md +5 -2
  2. package/dist/cli.js +32 -2
  3. package/dist/logo.d.ts +16 -0
  4. package/dist/logo.js +25 -0
  5. package/dist/onboarding.d.ts +43 -0
  6. package/dist/onboarding.js +425 -0
  7. package/dist/wizard.js +8 -0
  8. package/package.json +1 -1
  9. package/scripts/postinstall.js +38 -9
  10. package/src/resources/GSD-WORKFLOW.md +2 -2
  11. package/src/resources/extensions/google-search/index.ts +1 -1
  12. package/src/resources/extensions/gsd/auto.ts +353 -144
  13. package/src/resources/extensions/gsd/files.ts +9 -7
  14. package/src/resources/extensions/gsd/index.ts +3 -1
  15. package/src/resources/extensions/gsd/metrics.ts +7 -5
  16. package/src/resources/extensions/gsd/migrate/command.ts +4 -1
  17. package/src/resources/extensions/gsd/migrate/validator.ts +5 -3
  18. package/src/resources/extensions/gsd/prompts/system.md +1 -1
  19. package/src/resources/extensions/gsd/tests/migrate-parser.test.ts +5 -5
  20. package/src/resources/extensions/gsd/tests/migrate-validator-parsers.test.ts +3 -3
  21. package/src/resources/extensions/gsd/tests/parsers.test.ts +94 -0
  22. package/src/resources/extensions/gsd/tests/resolve-ts-hooks.mjs +23 -6
  23. package/src/resources/extensions/gsd/tests/worktree-integration.test.ts +253 -0
  24. package/src/resources/extensions/gsd/tests/worktree.test.ts +116 -1
  25. package/src/resources/extensions/gsd/unit-runtime.ts +22 -1
  26. package/src/resources/extensions/gsd/workspace-index.ts +2 -2
  27. package/src/resources/extensions/gsd/worktree-command.ts +147 -41
  28. package/src/resources/extensions/gsd/worktree.ts +105 -8
  29. package/src/resources/extensions/mcporter/index.ts +21 -2
  30. package/src/resources/extensions/search-the-web/command-search-provider.ts +95 -0
  31. package/src/resources/extensions/search-the-web/http.ts +1 -1
  32. package/src/resources/extensions/search-the-web/index.ts +9 -3
  33. package/src/resources/extensions/search-the-web/provider.ts +118 -0
  34. package/src/resources/extensions/search-the-web/tavily.ts +116 -0
  35. package/src/resources/extensions/search-the-web/tool-llm-context.ts +265 -108
  36. package/src/resources/extensions/search-the-web/tool-search.ts +161 -88
  37. package/src/resources/extensions/subagent/index.ts +1 -1
@@ -1,10 +1,16 @@
1
1
  /**
2
- * search_and_read tool — Brave LLM Context API.
2
+ * search_and_read tool — web search + content extraction for AI agents.
3
3
  *
4
4
  * Single-call web search + page content extraction optimized for AI agents.
5
5
  * Unlike search-the-web → fetch_page (two steps), this returns pre-extracted,
6
6
  * relevance-scored page content in one API call.
7
7
  *
8
+ * Supports two backends:
9
+ * - Tavily: POST-based, client-side token budgeting via budgetContent()
10
+ * - Brave: GET-based LLM Context API with server-side budgeting
11
+ *
12
+ * Provider is selected by resolveSearchProvider() — same as tool-search.ts.
13
+ *
8
14
  * Best for: "I need to know about X" — when you want content, not just links.
9
15
  * Use search-the-web when you want links/URLs to browse selectively.
10
16
  */
@@ -19,6 +25,9 @@ import { LRUTTLCache } from "./cache";
19
25
  import { fetchWithRetryTimed, HttpError, classifyError, type RateLimitInfo } from "./http";
20
26
  import { normalizeQuery, extractDomain } from "./url-utils";
21
27
  import { formatLLMContext, type LLMContextSnippet, type LLMContextSource } from "./format";
28
+ import type { TavilyResult, TavilySearchResponse } from "./tavily";
29
+ import { publishedDateToAge } from "./tavily";
30
+ import { getTavilyApiKey, resolveSearchProvider } from "./provider";
22
31
 
23
32
  // =============================================================================
24
33
  // Types
@@ -70,6 +79,7 @@ interface LLMContextDetails {
70
79
  errorKind?: string;
71
80
  error?: string;
72
81
  retryAfterMs?: number;
82
+ provider?: 'tavily' | 'brave';
73
83
  }
74
84
 
75
85
  // =============================================================================
@@ -101,6 +111,125 @@ function estimateTokens(text: string): number {
101
111
  return Math.ceil(text.length / 4);
102
112
  }
103
113
 
114
+ /**
115
+ * Distribute a token budget across Tavily results to build LLM context.
116
+ *
117
+ * Client-side equivalent of Brave's server-side LLM Context API budgeting.
118
+ * Filters by score threshold, sorts by relevance, and truncates content to fit
119
+ * within the token budget. Uses `raw_content` when available (richer text from
120
+ * Tavily's "advanced" search depth), falling back to `content`.
121
+ *
122
+ * @param results — Raw Tavily search results
123
+ * @param maxTokens — Caller-requested token limit
124
+ * @param threshold — Minimum score (0–1) for inclusion
125
+ * @returns Grounding snippets, source metadata, and estimated token usage
126
+ */
127
+ export function budgetContent(
128
+ results: TavilyResult[],
129
+ maxTokens: number,
130
+ threshold: number,
131
+ ): { grounding: LLMContextSnippet[]; sources: Record<string, LLMContextSource>; estimatedTokens: number } {
132
+ // Filter by score threshold and sort by score descending (highest relevance first)
133
+ const filtered = results
134
+ .filter(r => r.score >= threshold)
135
+ .sort((a, b) => b.score - a.score);
136
+
137
+ if (filtered.length === 0) {
138
+ return { grounding: [], sources: {}, estimatedTokens: 0 };
139
+ }
140
+
141
+ // Use 80% of maxTokens as effective budget (conservative to avoid overshoot)
142
+ const effectiveBudget = Math.floor(maxTokens * 0.8);
143
+ const perResultBudget = Math.max(1, Math.floor(effectiveBudget / filtered.length));
144
+
145
+ const grounding: LLMContextSnippet[] = [];
146
+ const sources: Record<string, LLMContextSource> = {};
147
+ let totalTokens = 0;
148
+
149
+ for (const result of filtered) {
150
+ if (totalTokens >= effectiveBudget) break;
151
+
152
+ const remainingBudget = effectiveBudget - totalTokens;
153
+ const budget = Math.min(perResultBudget, remainingBudget);
154
+
155
+ // Use raw_content if available, fall back to content
156
+ let text = result.raw_content ?? result.content;
157
+
158
+ // Truncate to per-result budget (tokens → chars at ~4 chars/token)
159
+ const maxChars = budget * 4;
160
+ if (text.length > maxChars) {
161
+ text = text.slice(0, maxChars);
162
+ }
163
+
164
+ const tokens = estimateTokens(text);
165
+ totalTokens += tokens;
166
+
167
+ grounding.push({
168
+ url: result.url,
169
+ title: result.title || "(untitled)",
170
+ snippets: [text],
171
+ });
172
+
173
+ // Build source with age in [null, null, ageString] format for formatLLMContext compatibility.
174
+ // formatLLMContext reads source.age?.[2] for the human-readable age display.
175
+ const ageString = result.published_date ? publishedDateToAge(result.published_date) : undefined;
176
+ sources[result.url] = {
177
+ title: result.title || "(untitled)",
178
+ hostname: extractDomain(result.url),
179
+ age: ageString ? [null as unknown as string, null as unknown as string, ageString] : null,
180
+ };
181
+ }
182
+
183
+ return { grounding, sources, estimatedTokens: totalTokens };
184
+ }
185
+
186
+ // =============================================================================
187
+ // Tavily LLM Context Execution
188
+ // =============================================================================
189
+
190
+ /** Map threshold names to Tavily score cutoffs. */
191
+ const THRESHOLD_TO_SCORE: Record<string, number> = {
192
+ strict: 0.7,
193
+ balanced: 0.5,
194
+ lenient: 0.3,
195
+ };
196
+
197
+ /**
198
+ * Execute a search_and_read query against the Tavily API.
199
+ *
200
+ * Uses POST with advanced search depth + raw_content to get full page text,
201
+ * then feeds results through budgetContent() for client-side token budgeting.
202
+ */
203
+ async function executeTavilyLLMContext(
204
+ params: { query: string; maxTokens: number; maxUrls: number; threshold: string; count: number },
205
+ signal?: AbortSignal,
206
+ ): Promise<{ cached: CachedLLMContext; latencyMs: number; rateLimit?: RateLimitInfo }> {
207
+ const scoreThreshold = THRESHOLD_TO_SCORE[params.threshold] ?? 0.5;
208
+
209
+ const requestBody: Record<string, unknown> = {
210
+ query: params.query,
211
+ max_results: params.count,
212
+ search_depth: "advanced",
213
+ include_raw_content: true,
214
+ include_answer: true,
215
+ };
216
+
217
+ const timed = await fetchWithRetryTimed("https://api.tavily.com/search", {
218
+ method: "POST",
219
+ headers: {
220
+ "Content-Type": "application/json",
221
+ "Authorization": `Bearer ${getTavilyApiKey()}`,
222
+ },
223
+ body: JSON.stringify(requestBody),
224
+ signal,
225
+ }, 2);
226
+
227
+ const data: TavilySearchResponse = await timed.response.json();
228
+ const cached = budgetContent(data.results, params.maxTokens, scoreThreshold);
229
+
230
+ return { cached, latencyMs: timed.latencyMs, rateLimit: timed.rateLimit };
231
+ }
232
+
104
233
  // =============================================================================
105
234
  // Tool Registration
106
235
  // =============================================================================
@@ -112,7 +241,7 @@ export function registerLLMContextTool(pi: ExtensionAPI) {
112
241
  description:
113
242
  "Search the web AND read page content in a single call. Returns pre-extracted, " +
114
243
  "relevance-scored text from multiple pages — no separate fetch_page needed. " +
115
- "Powered by Brave's LLM Context API. Best when you need content, not just links. " +
244
+ "Best when you need content, not just links. " +
116
245
  "For selective URL browsing, use search-the-web + fetch_page instead.",
117
246
  promptSnippet: "Search and read web page content in one step",
118
247
  promptGuidelines: [
@@ -160,12 +289,15 @@ export function registerLLMContextTool(pi: ExtensionAPI) {
160
289
  return { content: [{ type: "text", text: "Search cancelled." }] };
161
290
  }
162
291
 
163
- const apiKey = getBraveApiKey();
164
- if (!apiKey) {
292
+ // ------------------------------------------------------------------
293
+ // Resolve search provider
294
+ // ------------------------------------------------------------------
295
+ const provider = resolveSearchProvider();
296
+ if (!provider) {
165
297
  return {
166
- content: [{ type: "text", text: "Search unavailable: BRAVE_API_KEY is not set. Use secure_env_collect to set BRAVE_API_KEY." }],
298
+ content: [{ type: "text", text: "search_and_read unavailable: No search API key is set. Use secure_env_collect to set TAVILY_API_KEY or BRAVE_API_KEY." }],
167
299
  isError: true,
168
- details: { errorKind: "auth_error", error: "BRAVE_API_KEY not set" } satisfies Partial<LLMContextDetails>,
300
+ details: { errorKind: "auth_error", error: "No search API key set" } satisfies Partial<LLMContextDetails>,
169
301
  };
170
302
  }
171
303
 
@@ -175,9 +307,9 @@ export function registerLLMContextTool(pi: ExtensionAPI) {
175
307
  const count = params.count ?? 20;
176
308
 
177
309
  // ------------------------------------------------------------------
178
- // Cache lookup
310
+ // Cache lookup (provider-prefixed key)
179
311
  // ------------------------------------------------------------------
180
- const cacheKey = normalizeQuery(params.query) + `|t:${maxTokens}|u:${maxUrls}|th:${threshold}|c:${count}`;
312
+ const cacheKey = normalizeQuery(params.query) + `|t:${maxTokens}|u:${maxUrls}|th:${threshold}|c:${count}|p:${provider}`;
181
313
  const cached = contextCache.get(cacheKey);
182
314
 
183
315
  if (cached) {
@@ -202,6 +334,7 @@ export function registerLLMContextTool(pi: ExtensionAPI) {
202
334
  cached: true,
203
335
  threshold,
204
336
  maxTokens,
337
+ provider,
205
338
  };
206
339
 
207
340
  return { content: [{ type: "text", text: content }], details };
@@ -211,118 +344,139 @@ export function registerLLMContextTool(pi: ExtensionAPI) {
211
344
 
212
345
  try {
213
346
  // ------------------------------------------------------------------
214
- // Build LLM Context API request
347
+ // Provider-specific fetch
215
348
  // ------------------------------------------------------------------
216
- const url = new URL("https://api.search.brave.com/res/v1/llm/context");
217
- url.searchParams.append("q", params.query);
218
- url.searchParams.append("count", String(count));
219
- url.searchParams.append("maximum_number_of_tokens", String(maxTokens));
220
- url.searchParams.append("maximum_number_of_urls", String(maxUrls));
221
- url.searchParams.append("context_threshold_mode", threshold);
222
-
223
- // Use a custom fetch flow to read error bodies from the Brave API
224
- let timed;
225
- try {
226
- timed = await fetchWithRetryTimed(url.toString(), {
227
- method: "GET",
228
- headers: braveHeaders(),
349
+ let result: CachedLLMContext;
350
+ let latencyMs: number | undefined;
351
+ let rateLimit: RateLimitInfo | undefined;
352
+
353
+ if (provider === "tavily") {
354
+ const tavilyResult = await executeTavilyLLMContext(
355
+ { query: params.query, maxTokens, maxUrls, threshold, count },
229
356
  signal,
230
- }, 2);
231
- } catch (fetchErr) {
232
- // Try to extract Brave's structured error detail from the response body.
233
- // This is especially useful for plan/subscription errors (OPTION_NOT_IN_PLAN).
234
- let errorMessage: string | undefined;
235
- let errorKindOverride: string | undefined;
236
- if (fetchErr instanceof HttpError && fetchErr.response) {
237
- try {
238
- const body = await fetchErr.response.clone().json().catch(() => null);
239
- if (body?.error?.detail) {
240
- errorMessage = body.error.detail;
241
- if (body.error.code === "OPTION_NOT_IN_PLAN") {
242
- errorKindOverride = "plan_error";
243
- errorMessage = `LLM Context API not available on your current Brave plan. ${body.error.detail} Upgrade at https://api-dashboard.search.brave.com/app/subscriptions — or use search-the-web + fetch_page as an alternative.`;
357
+ );
358
+ result = tavilyResult.cached;
359
+ latencyMs = tavilyResult.latencyMs;
360
+ rateLimit = tavilyResult.rateLimit;
361
+ } else {
362
+ // ================================================================
363
+ // BRAVE PATH (unchanged API logic)
364
+ // ================================================================
365
+ const url = new URL("https://api.search.brave.com/res/v1/llm/context");
366
+ url.searchParams.append("q", params.query);
367
+ url.searchParams.append("count", String(count));
368
+ url.searchParams.append("maximum_number_of_tokens", String(maxTokens));
369
+ url.searchParams.append("maximum_number_of_urls", String(maxUrls));
370
+ url.searchParams.append("context_threshold_mode", threshold);
371
+
372
+ // Use a custom fetch flow to read error bodies from the Brave API
373
+ let timed;
374
+ try {
375
+ timed = await fetchWithRetryTimed(url.toString(), {
376
+ method: "GET",
377
+ headers: braveHeaders(),
378
+ signal,
379
+ }, 2);
380
+ } catch (fetchErr) {
381
+ // Try to extract Brave's structured error detail from the response body.
382
+ // This is especially useful for plan/subscription errors (OPTION_NOT_IN_PLAN).
383
+ let errorMessage: string | undefined;
384
+ let errorKindOverride: string | undefined;
385
+ if (fetchErr instanceof HttpError && fetchErr.response) {
386
+ try {
387
+ const body = await fetchErr.response.clone().json().catch(() => null);
388
+ if (body?.error?.detail) {
389
+ errorMessage = body.error.detail;
390
+ if (body.error.code === "OPTION_NOT_IN_PLAN") {
391
+ errorKindOverride = "plan_error";
392
+ errorMessage = `LLM Context API not available on your current Brave plan. ${body.error.detail} Upgrade at https://api-dashboard.search.brave.com/app/subscriptions — or use search-the-web + fetch_page as an alternative.`;
393
+ }
244
394
  }
245
- }
246
- } catch { /* body already consumed or parse error — use generic message */ }
395
+ } catch { /* body already consumed or parse error — use generic message */ }
396
+ }
397
+ const classified = classifyError(fetchErr);
398
+ const message = errorMessage || classified.message;
399
+ return {
400
+ content: [{ type: "text", text: `search_and_read unavailable: ${message}` }],
401
+ details: {
402
+ errorKind: errorKindOverride || classified.kind,
403
+ error: message,
404
+ retryAfterMs: classified.retryAfterMs,
405
+ query: params.query,
406
+ provider,
407
+ } satisfies Partial<LLMContextDetails>,
408
+ isError: true,
409
+ };
247
410
  }
248
- const classified = classifyError(fetchErr);
249
- const message = errorMessage || classified.message;
250
- return {
251
- content: [{ type: "text", text: `search_and_read unavailable: ${message}` }],
252
- details: {
253
- errorKind: errorKindOverride || classified.kind,
254
- error: message,
255
- retryAfterMs: classified.retryAfterMs,
256
- query: params.query,
257
- } satisfies Partial<LLMContextDetails>,
258
- isError: true,
259
- };
260
- }
261
-
262
- const data: BraveLLMContextResponse = await timed.response.json();
263
411
 
264
- // ------------------------------------------------------------------
265
- // Normalize response
266
- // ------------------------------------------------------------------
267
- const grounding: LLMContextSnippet[] = [];
268
-
269
- if (data.grounding?.generic) {
270
- for (const item of data.grounding.generic) {
271
- if (item.snippets && item.snippets.length > 0) {
272
- grounding.push({
273
- url: item.url,
274
- title: item.title,
275
- snippets: item.snippets,
276
- });
412
+ const data: BraveLLMContextResponse = await timed.response.json();
413
+
414
+ // ------------------------------------------------------------------
415
+ // Normalize Brave response
416
+ // ------------------------------------------------------------------
417
+ const grounding: LLMContextSnippet[] = [];
418
+
419
+ if (data.grounding?.generic) {
420
+ for (const item of data.grounding.generic) {
421
+ if (item.snippets && item.snippets.length > 0) {
422
+ grounding.push({
423
+ url: item.url,
424
+ title: item.title,
425
+ snippets: item.snippets,
426
+ });
427
+ }
277
428
  }
278
429
  }
279
- }
280
430
 
281
- // Include POI data if present
282
- if (data.grounding?.poi && data.grounding.poi.snippets?.length) {
283
- grounding.push({
284
- url: data.grounding.poi.url,
285
- title: data.grounding.poi.title || data.grounding.poi.name,
286
- snippets: data.grounding.poi.snippets,
287
- });
288
- }
431
+ // Include POI data if present
432
+ if (data.grounding?.poi && data.grounding.poi.snippets?.length) {
433
+ grounding.push({
434
+ url: data.grounding.poi.url,
435
+ title: data.grounding.poi.title || data.grounding.poi.name,
436
+ snippets: data.grounding.poi.snippets,
437
+ });
438
+ }
289
439
 
290
- // Include map data if present
291
- if (data.grounding?.map) {
292
- for (const item of data.grounding.map) {
293
- if (item.snippets?.length) {
294
- grounding.push({
295
- url: item.url,
296
- title: item.title || item.name,
297
- snippets: item.snippets,
298
- });
440
+ // Include map data if present
441
+ if (data.grounding?.map) {
442
+ for (const item of data.grounding.map) {
443
+ if (item.snippets?.length) {
444
+ grounding.push({
445
+ url: item.url,
446
+ title: item.title || item.name,
447
+ snippets: item.snippets,
448
+ });
449
+ }
299
450
  }
300
451
  }
301
- }
302
452
 
303
- const sources: Record<string, LLMContextSource> = {};
304
- if (data.sources) {
305
- for (const [sourceUrl, sourceInfo] of Object.entries(data.sources)) {
306
- sources[sourceUrl] = {
307
- title: sourceInfo.title,
308
- hostname: sourceInfo.hostname,
309
- age: sourceInfo.age,
310
- };
453
+ const sources: Record<string, LLMContextSource> = {};
454
+ if (data.sources) {
455
+ for (const [sourceUrl, sourceInfo] of Object.entries(data.sources)) {
456
+ sources[sourceUrl] = {
457
+ title: sourceInfo.title,
458
+ hostname: sourceInfo.hostname,
459
+ age: sourceInfo.age,
460
+ };
461
+ }
311
462
  }
312
- }
313
463
 
314
- // Estimate total token count from all snippets
315
- const allText = grounding.map(g => g.snippets.join(" ")).join(" ");
316
- const estimatedTokens = estimateTokens(allText);
464
+ // Estimate total token count from all snippets
465
+ const allText = grounding.map(g => g.snippets.join(" ")).join(" ");
466
+ const estimatedTokens = estimateTokens(allText);
317
467
 
318
- // Cache the results
319
- contextCache.set(cacheKey, { grounding, sources, estimatedTokens });
468
+ result = { grounding, sources, estimatedTokens };
469
+ latencyMs = timed.latencyMs;
470
+ rateLimit = timed.rateLimit;
471
+ }
320
472
 
321
473
  // ------------------------------------------------------------------
322
- // Format output
474
+ // Shared post-fetch: cache, format, truncate, return
323
475
  // ------------------------------------------------------------------
324
- const output = formatLLMContext(params.query, grounding, sources, {
325
- tokenCount: estimatedTokens,
476
+ contextCache.set(cacheKey, result);
477
+
478
+ const output = formatLLMContext(params.query, result.grounding, result.sources, {
479
+ tokenCount: result.estimatedTokens,
326
480
  });
327
481
 
328
482
  const truncation = truncateHead(output, { maxLines: DEFAULT_MAX_LINES, maxBytes: DEFAULT_MAX_BYTES });
@@ -333,17 +487,18 @@ export function registerLLMContextTool(pi: ExtensionAPI) {
333
487
  content += `\n\n[Truncated. Full content: ${tempFile}]`;
334
488
  }
335
489
 
336
- const totalSnippets = grounding.reduce((sum, g) => sum + g.snippets.length, 0);
490
+ const totalSnippets = result.grounding.reduce((sum, g) => sum + g.snippets.length, 0);
337
491
  const details: LLMContextDetails = {
338
492
  query: params.query,
339
- sourceCount: grounding.length,
493
+ sourceCount: result.grounding.length,
340
494
  snippetCount: totalSnippets,
341
- estimatedTokens,
495
+ estimatedTokens: result.estimatedTokens,
342
496
  cached: false,
343
- latencyMs: timed.latencyMs,
344
- rateLimit: timed.rateLimit,
497
+ latencyMs,
498
+ rateLimit,
345
499
  threshold,
346
500
  maxTokens,
501
+ provider,
347
502
  };
348
503
 
349
504
  return { content: [{ type: "text", text: content }], details };
@@ -355,6 +510,7 @@ export function registerLLMContextTool(pi: ExtensionAPI) {
355
510
  errorKind: classified.kind,
356
511
  error: classified.message,
357
512
  query: params.query,
513
+ provider,
358
514
  } satisfies Partial<LLMContextDetails>,
359
515
  isError: true,
360
516
  };
@@ -383,6 +539,7 @@ export function registerLLMContextTool(pi: ExtensionAPI) {
383
539
  return new Text(theme.fg("error", `✗ ${details.error ?? "Search failed"}`) + kindTag, 0, 0);
384
540
  }
385
541
 
542
+ const providerTag = details?.provider ? theme.fg("dim", ` [${details.provider}]`) : "";
386
543
  const cacheTag = details?.cached ? theme.fg("dim", " [cached]") : "";
387
544
  const latencyTag = details?.latencyMs ? theme.fg("dim", ` ${details.latencyMs}ms`) : "";
388
545
  const tokenTag = details?.estimatedTokens
@@ -391,7 +548,7 @@ export function registerLLMContextTool(pi: ExtensionAPI) {
391
548
 
392
549
  let text = theme.fg("success",
393
550
  `✓ ${details?.sourceCount ?? 0} sources, ${details?.snippetCount ?? 0} snippets for "${details?.query}"`) +
394
- tokenTag + cacheTag + latencyTag;
551
+ providerTag + tokenTag + cacheTag + latencyTag;
395
552
 
396
553
  if (expanded && result.content[0]?.type === "text") {
397
554
  const preview = result.content[0].text.split("\n").slice(0, 10).join("\n");