membot 0.5.2 → 0.6.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (40) hide show
  1. package/.claude/skills/membot.md +25 -10
  2. package/.cursor/rules/membot.mdc +25 -10
  3. package/README.md +35 -4
  4. package/package.json +8 -5
  5. package/scripts/apply-patches.sh +0 -11
  6. package/src/cli.ts +2 -2
  7. package/src/commands/login-page.mustache +50 -0
  8. package/src/commands/login.ts +83 -0
  9. package/src/config/schemas.ts +17 -5
  10. package/src/constants.ts +13 -1
  11. package/src/context.ts +1 -24
  12. package/src/db/files.ts +21 -25
  13. package/src/db/migrations/003-downloader-columns.ts +58 -0
  14. package/src/db/migrations.ts +2 -1
  15. package/src/ingest/converter/index.ts +9 -0
  16. package/src/ingest/converter/xlsx.ts +111 -0
  17. package/src/ingest/downloaders/browser.ts +180 -0
  18. package/src/ingest/downloaders/generic-web.ts +81 -0
  19. package/src/ingest/downloaders/github.ts +178 -0
  20. package/src/ingest/downloaders/google-docs.ts +56 -0
  21. package/src/ingest/downloaders/google-shared.ts +86 -0
  22. package/src/ingest/downloaders/google-sheets.ts +58 -0
  23. package/src/ingest/downloaders/google-slides.ts +53 -0
  24. package/src/ingest/downloaders/index.ts +182 -0
  25. package/src/ingest/downloaders/linear.ts +291 -0
  26. package/src/ingest/fetcher.ts +104 -129
  27. package/src/ingest/ingest.ts +43 -70
  28. package/src/mcp/instructions.ts +4 -2
  29. package/src/operations/add.ts +6 -4
  30. package/src/operations/info.ts +4 -6
  31. package/src/operations/move.ts +2 -3
  32. package/src/operations/refresh.ts +2 -4
  33. package/src/operations/remove.ts +23 -2
  34. package/src/operations/tree.ts +1 -1
  35. package/src/operations/types.ts +1 -1
  36. package/src/refresh/runner.ts +59 -114
  37. package/src/types/text-modules.d.ts +5 -0
  38. package/patches/@evantahler%2Fmcpx@0.21.4.patch +0 -51
  39. package/src/commands/mcpx.ts +0 -112
  40. package/src/ingest/agent-fetcher.ts +0 -639
@@ -20,7 +20,7 @@ export interface IngestInput {
20
20
  exclude?: string;
21
21
  follow_symlinks?: boolean;
22
22
  refresh_frequency?: string;
23
- fetcher_hint?: string;
23
+ downloader?: string;
24
24
  change_note?: string;
25
25
  force?: boolean;
26
26
  }
@@ -161,13 +161,12 @@ async function ingestInline(
161
161
  bytes: null,
162
162
  markdown: text,
163
163
  fetcher: "inline",
164
- fetcherServer: null,
165
- fetcherTool: null,
166
- fetcherArgs: null,
164
+ downloader: null,
165
+ downloaderArgs: null,
167
166
  refreshSec,
168
167
  changeNote: input.change_note ?? null,
169
168
  },
170
- (done, total) => callbacks?.onEntryProgress?.(logicalPath, `embedding ${done}/${total}`),
169
+ (sublabel) => callbacks?.onEntryProgress?.(logicalPath, sublabel),
171
170
  );
172
171
  result.version_id = versionId;
173
172
  } catch (err) {
@@ -187,38 +186,6 @@ async function ingestUrl(
187
186
  force: boolean,
188
187
  callbacks?: IngestCallbacks,
189
188
  ): Promise<IngestResult> {
190
- const mcpxAdapter = ctx.mcpx
191
- ? {
192
- async search(query: string, options?: { keywordOnly?: boolean; semanticOnly?: boolean }) {
193
- try {
194
- const results = await ctx.mcpx!.search(query, options);
195
- return results.map((r) => ({
196
- server: r.server,
197
- tool: r.tool,
198
- description: r.description ?? undefined,
199
- score: r.score,
200
- matchType: r.matchType ?? undefined,
201
- }));
202
- } catch (err) {
203
- logger.debug(`mcpx.search(${query}) failed: ${err instanceof Error ? err.message : String(err)}`);
204
- return [];
205
- }
206
- },
207
- async listTools(server?: string) {
208
- const tools = await ctx.mcpx!.listTools(server);
209
- return tools.map((t) => ({ server: t.server, tool: { name: t.tool.name, description: t.tool.description } }));
210
- },
211
- async info(server: string, tool: string) {
212
- const t = await ctx.mcpx!.info(server, tool);
213
- if (!t) return undefined;
214
- return { name: t.name, description: t.description, inputSchema: t.inputSchema };
215
- },
216
- async exec(server: string, tool: string, args?: Record<string, unknown>) {
217
- return ctx.mcpx!.exec(server, tool, args ?? {});
218
- },
219
- }
220
- : null;
221
-
222
189
  const logicalPath = input.logical_path ?? defaultLogicalForUrl(url);
223
190
  callbacks?.onEntryStart?.(url);
224
191
  const result: IngestEntryResult = {
@@ -228,20 +195,24 @@ async function ingestUrl(
228
195
  status: "ok",
229
196
  mime_type: null,
230
197
  size_bytes: 0,
231
- fetcher: "http",
198
+ fetcher: "downloader",
232
199
  source_sha256: "",
233
200
  };
234
201
 
235
202
  try {
236
- const fetched = await fetchRemote(url, {
237
- hint: input.fetcher_hint,
238
- mcpx: mcpxAdapter,
239
- llm: ctx.config.llm,
240
- onProgress: (sublabel) => callbacks?.onEntryProgress?.(url, sublabel),
241
- });
203
+ callbacks?.onEntryProgress?.(url, "fetching");
204
+ const fetched = await fetchRemote(
205
+ url,
206
+ ctx.config,
207
+ {
208
+ downloaderName: input.downloader,
209
+ onProgress: (sublabel) => callbacks?.onEntryProgress?.(url, sublabel),
210
+ },
211
+ ctx.dataDir,
212
+ );
242
213
  result.mime_type = fetched.mimeType;
243
214
  result.size_bytes = fetched.bytes.byteLength;
244
- result.fetcher = fetched.fetcher;
215
+ result.fetcher = "downloader";
245
216
  result.source_sha256 = fetched.sha256;
246
217
 
247
218
  if (!force) {
@@ -265,14 +236,13 @@ async function ingestUrl(
265
236
  sourcePath: url,
266
237
  sourceMtimeMs: null,
267
238
  sourceSha: fetched.sha256,
268
- fetcher: fetched.fetcher,
269
- fetcherServer: fetched.fetcherServer,
270
- fetcherTool: fetched.fetcherTool,
271
- fetcherArgs: fetched.fetcherArgs,
239
+ fetcher: "downloader",
240
+ downloader: fetched.downloader,
241
+ downloaderArgs: fetched.downloaderArgs,
272
242
  refreshSec,
273
243
  changeNote: input.change_note ?? null,
274
244
  },
275
- (done, total) => callbacks?.onEntryProgress?.(url, `embedding ${done}/${total}`),
245
+ (sublabel) => callbacks?.onEntryProgress?.(url, sublabel),
276
246
  );
277
247
  result.version_id = versionId;
278
248
  } catch (err) {
@@ -352,13 +322,12 @@ async function ingestLocalFiles(
352
322
  sourceMtimeMs: local.mtimeMs,
353
323
  sourceSha: local.sha256,
354
324
  fetcher: "local",
355
- fetcherServer: null,
356
- fetcherTool: null,
357
- fetcherArgs: null,
325
+ downloader: null,
326
+ downloaderArgs: null,
358
327
  refreshSec,
359
328
  changeNote: input.change_note ?? null,
360
329
  },
361
- (done, total) => callbacks?.onEntryProgress?.(entry.relPathFromBase, `embedding ${done}/${total}`),
330
+ (sublabel) => callbacks?.onEntryProgress?.(entry.relPathFromBase, sublabel),
362
331
  );
363
332
  result.version_id = versionId;
364
333
  } catch (err) {
@@ -387,9 +356,8 @@ interface PipelineParams {
387
356
  sourceMtimeMs: number | null;
388
357
  sourceSha: string;
389
358
  fetcher: FetcherKind;
390
- fetcherServer: string | null;
391
- fetcherTool: string | null;
392
- fetcherArgs: Record<string, unknown> | null;
359
+ downloader: string | null;
360
+ downloaderArgs: Record<string, unknown> | null;
393
361
  refreshSec: number | null;
394
362
  changeNote: string | null;
395
363
  }
@@ -404,8 +372,9 @@ interface PipelineParams {
404
372
  async function pipelineForBytes(
405
373
  ctx: AppContext,
406
374
  p: PipelineParams,
407
- onEmbedProgress?: (done: number, total: number) => void,
375
+ onPhase?: (sublabel: string) => void,
408
376
  ): Promise<string> {
377
+ onPhase?.("storing blob");
409
378
  await upsertBlob(ctx.db, {
410
379
  sha256: p.sourceSha,
411
380
  mime_type: p.mime,
@@ -413,6 +382,7 @@ async function pipelineForBytes(
413
382
  bytes: p.bytes,
414
383
  });
415
384
 
385
+ onPhase?.("converting");
416
386
  const conversion = await convert(p.bytes, p.mime, p.source, ctx.config.llm);
417
387
  const markdown = conversion.markdown;
418
388
  const contentSha = sha256Hex(new TextEncoder().encode(markdown));
@@ -431,13 +401,12 @@ async function pipelineForBytes(
431
401
  markdown,
432
402
  contentSha,
433
403
  fetcher: p.fetcher,
434
- fetcherServer: p.fetcherServer,
435
- fetcherTool: p.fetcherTool,
436
- fetcherArgs: p.fetcherArgs,
404
+ downloader: p.downloader,
405
+ downloaderArgs: p.downloaderArgs,
437
406
  refreshSec: p.refreshSec,
438
407
  changeNote: p.changeNote,
439
408
  },
440
- onEmbedProgress,
409
+ onPhase,
441
410
  );
442
411
  }
443
412
 
@@ -453,9 +422,8 @@ interface PersistParams {
453
422
  markdown: string;
454
423
  contentSha?: string;
455
424
  fetcher: FetcherKind;
456
- fetcherServer: string | null;
457
- fetcherTool: string | null;
458
- fetcherArgs: Record<string, unknown> | null;
425
+ downloader: string | null;
426
+ downloaderArgs: Record<string, unknown> | null;
459
427
  refreshSec: number | null;
460
428
  changeNote: string | null;
461
429
  }
@@ -469,14 +437,18 @@ interface PersistParams {
469
437
  async function persistVersion(
470
438
  ctx: AppContext,
471
439
  p: PersistParams,
472
- onEmbedProgress?: (done: number, total: number) => void,
440
+ onPhase?: (sublabel: string) => void,
473
441
  ): Promise<string> {
442
+ onPhase?.("describing");
474
443
  const description = await describe(p.logicalPath, p.mime, p.markdown, ctx.config.llm);
444
+ onPhase?.("chunking");
475
445
  const chunks = chunkDeterministic(p.markdown, ctx.config.chunker);
476
446
  const searchTexts = chunks.map((c) => buildSearchText(p.logicalPath, description, c.content));
477
447
  let embeddings: number[][];
478
448
  try {
479
- embeddings = await embed(searchTexts, ctx.config.embedding_model, { onProgress: onEmbedProgress });
449
+ embeddings = await embed(searchTexts, ctx.config.embedding_model, {
450
+ onProgress: (done, total) => onPhase?.(`embedding ${done}/${total}`),
451
+ });
480
452
  } catch (err) {
481
453
  throw asHelpful(
482
454
  err,
@@ -485,6 +457,7 @@ async function persistVersion(
485
457
  );
486
458
  }
487
459
 
460
+ onPhase?.("persisting");
488
461
  const versionId = millisIso(Date.now());
489
462
  const contentSha = p.contentSha ?? sha256Hex(new TextEncoder().encode(p.markdown));
490
463
  await insertVersion(ctx.db, {
@@ -501,9 +474,8 @@ async function persistVersion(
501
474
  mime_type: p.mime,
502
475
  size_bytes: p.bytes?.byteLength ?? new TextEncoder().encode(p.markdown).byteLength,
503
476
  fetcher: p.fetcher,
504
- fetcher_server: p.fetcherServer,
505
- fetcher_tool: p.fetcherTool,
506
- fetcher_args: p.fetcherArgs,
477
+ downloader: p.downloader,
478
+ downloader_args: p.downloaderArgs,
507
479
  refresh_frequency_sec: p.refreshSec,
508
480
  refreshed_at: new Date().toISOString(),
509
481
  last_refresh_status: "ok",
@@ -521,6 +493,7 @@ async function persistVersion(
521
493
  embedding: embeddings[i] ?? new Array(embeddings[0]?.length ?? 0).fill(0),
522
494
  })),
523
495
  );
496
+ onPhase?.("indexing");
524
497
  await rebuildFts(ctx.db);
525
498
  return versionId;
526
499
  }
@@ -11,8 +11,10 @@ indexed with BM25 — so prefer membot_search to membot_read+grep for discovery.
11
11
  Workflow:
12
12
  1. membot_tree or membot_search to find what already exists before adding new content.
13
13
  2. membot_add to ingest a local file, a URL, or a remote document. URLs are
14
- fetched via mcpx (the chosen invocation is stored so refresh is fast and
15
- deterministic).
14
+ fetched via per-service downloaders (Google Docs, Sheets, Slides, GitHub,
15
+ Linear, with a generic browser print-to-PDF fallback). Authentication
16
+ comes from the user's logged-in browser cookies (saved via \`membot login\`).
17
+ Each row stores which downloader was used so refresh is deterministic.
16
18
  3. membot_read or membot_search hits to consume content.
17
19
  4. membot_write to record agent-authored notes (source_type='inline').
18
20
 
@@ -10,7 +10,7 @@ import { type ResolvedSource, resolveSource } from "../ingest/source-resolver.ts
10
10
  import { colors } from "../output/formatter.ts";
11
11
  import { defineOperation } from "./types.ts";
12
12
 
13
- const FetcherKindEnum = z.enum(["http", "mcpx", "local", "inline"]);
13
+ const FetcherKindEnum = z.enum(["downloader", "local", "inline"]);
14
14
 
15
15
  export const addOperation = defineOperation({
16
16
  name: "membot_add",
@@ -19,7 +19,7 @@ export const addOperation = defineOperation({
19
19
  - a local file path
20
20
  - a local directory (recursive walk, symlinks followed)
21
21
  - a glob pattern (e.g. "docs/**/*.md")
22
- - a URL (fetched via mcpx if configured, otherwise plain HTTP)
22
+ - a URL (fetched via the per-service downloader registry — Google Docs/Sheets/Slides via export endpoints, GitHub + Linear as rendered HTML, anything else through a generic browser print-to-PDF fallback. All fetches authenticate via the user's logged-in browser session — run \`membot login\` once to sign in.)
23
23
  - "inline:<text>" literal
24
24
  Pass any number of args; each is resolved independently and the matched entries are concatenated into one response. PDF, DOCX, HTML, images, and other binaries are converted to markdown — native libraries first, vision/OCR for images, LLM fallback for messy or scanned input. Original bytes are kept in the blobs table; \`membot_read bytes=true\` returns them. Setting \`refresh_frequency\` enables automatic refresh from the daemon. By default, re-ingesting an unchanged source (same source_sha256 as the current version) is a no-op and reports \`status: "unchanged"\`; pass \`force=true\` to always create a new version. Each newly-ingested file becomes a new version under its own logical_path; existing versions stay queryable via membot_versions. Directory/glob ingests stream one file at a time — partial failures do not abort the rest; the response lists per-entry status.
25
25
 
@@ -54,10 +54,12 @@ Pass \`logical_path\` to override. For a multi-source / directory / glob walk it
54
54
  .default(true)
55
55
  .describe("Follow symlinks during directory walks (cycles broken via realpath)"),
56
56
  refresh_frequency: z.string().optional().describe("Auto-refresh cadence: 5m | 1h | 24h | 7d. Omit to disable."),
57
- fetcher_hint: z
57
+ downloader: z
58
58
  .string()
59
59
  .optional()
60
- .describe("Free-form hint passed to mcpx tool search (e.g. 'firecrawl', 'github', 'google docs', 'http')"),
60
+ .describe(
61
+ "Force a specific downloader by name (e.g. 'google-docs', 'github', 'generic-web'). Skips URL-based matching.",
62
+ ),
61
63
  change_note: z.string().optional().describe("Free-text note attached to the new version"),
62
64
  force: z
63
65
  .boolean()
@@ -25,9 +25,8 @@ export const infoOperation = defineOperation({
25
25
  size_bytes: z.number().nullable(),
26
26
  description: z.string().nullable(),
27
27
  fetcher: z.string().nullable(),
28
- fetcher_server: z.string().nullable(),
29
- fetcher_tool: z.string().nullable(),
30
- fetcher_args: z.record(z.string(), z.unknown()).nullable(),
28
+ downloader: z.string().nullable(),
29
+ downloader_args: z.record(z.string(), z.unknown()).nullable(),
31
30
  refresh_frequency_sec: z.number().nullable(),
32
31
  refreshed_at: z.string().nullable(),
33
32
  last_refresh_status: z.string().nullable(),
@@ -53,9 +52,8 @@ export const infoOperation = defineOperation({
53
52
  lines.push(fmt("blob_sha256", orDash(result.blob_sha256)));
54
53
  lines.push(fmt("source_sha256", orDash(result.source_sha256)));
55
54
  if (result.fetcher) lines.push(fmt("fetcher", result.fetcher));
56
- if (result.fetcher_server) lines.push(fmt("fetcher_server", result.fetcher_server));
57
- if (result.fetcher_tool) lines.push(fmt("fetcher_tool", result.fetcher_tool));
58
- if (result.fetcher_args) lines.push(fmt("fetcher_args", JSON.stringify(result.fetcher_args)));
55
+ if (result.downloader) lines.push(fmt("downloader", result.downloader));
56
+ if (result.downloader_args) lines.push(fmt("downloader_args", JSON.stringify(result.downloader_args)));
59
57
  lines.push(
60
58
  fmt(
61
59
  "refresh_frequency",
@@ -54,9 +54,8 @@ export const moveOperation = defineOperation({
54
54
  mime_type: cur.mime_type,
55
55
  size_bytes: cur.size_bytes,
56
56
  fetcher: cur.fetcher,
57
- fetcher_server: cur.fetcher_server,
58
- fetcher_tool: cur.fetcher_tool,
59
- fetcher_args: cur.fetcher_args,
57
+ downloader: cur.downloader,
58
+ downloader_args: cur.downloader_args,
60
59
  refresh_frequency_sec: cur.refresh_frequency_sec,
61
60
  refreshed_at: cur.refreshed_at,
62
61
  last_refresh_status: cur.last_refresh_status,
@@ -7,7 +7,7 @@ import { defineOperation } from "./types.ts";
7
7
  export const refreshOperation = defineOperation({
8
8
  name: "membot_refresh",
9
9
  cliName: "refresh",
10
- description: `Re-read a file's source and create a new version only if the source bytes changed. Pass \`logical_path\` to refresh one file, or omit it to refresh every file whose refresh_frequency_sec has elapsed. Local files are detected via mtime+sha; remote files are re-fetched via the same mcpx invocation that was originally used. On auth or network failure the prior version stays current — check \`last_refresh_status\`.`,
10
+ description: `Re-read a file's source and create a new version only if the source bytes changed. Pass \`logical_path\` to refresh one file, or omit it to refresh every file whose refresh_frequency_sec has elapsed. Local files are detected via mtime+sha; remote files are re-fetched via the same downloader (Google Docs, GitHub, etc.) that was originally chosen. On auth or network failure the prior version stays current — check \`last_refresh_status\`. If the failure mentions a login redirect, re-run \`membot login\` and try again.`,
11
11
  inputSchema: z.object({
12
12
  logical_path: z.string().optional().describe("Single path to refresh; omit for all-due"),
13
13
  force: z.boolean().default(false).describe("Re-embed even if source sha is unchanged"),
@@ -60,9 +60,7 @@ export const refreshOperation = defineOperation({
60
60
  for (const path of targets) {
61
61
  ctx.progress.tick(path);
62
62
  try {
63
- const r = await refreshOne(ctx, path, input.force, (done, total) =>
64
- ctx.progress.update(`embedding ${done}/${total}`),
65
- );
63
+ const r = await refreshOne(ctx, path, input.force, (sublabel) => ctx.progress.update(sublabel));
66
64
  out.push(r);
67
65
  } catch (err) {
68
66
  out.push({ logical_path: path, status: "failed", error: err instanceof Error ? err.message : String(err) });
@@ -10,7 +10,7 @@ export const removeOperation = defineOperation({
10
10
  name: "membot_delete",
11
11
  cliName: "rm",
12
12
  bashEquivalent: "rm",
13
- description: `Tombstone one or more logical_paths so they no longer appear in membot_list / membot_tree / membot_search. Each \`paths\` arg is independently treated as either a literal logical_path or a glob pattern (e.g. "docs/**/*.md"); globs are matched against current logical_paths in the DB, not the filesystem. The union of matches is deduplicated, then tombstoned one at a time — partial failures are reported per-entry without aborting the rest. An input arg that matches zero current files is an error (the response includes which arg). Old versions remain queryable via membot_versions and membot_read with an explicit version. Use membot_prune to permanently drop history.`,
13
+ description: `Tombstone one or more logical_paths so they no longer appear in membot_list / membot_tree / membot_search. Each \`paths\` arg is independently treated as either a literal logical_path or a glob pattern (e.g. "docs/**/*.md"); globs are matched against current logical_paths in the DB, not the filesystem. A literal arg that matches no exact file but is a prefix of existing paths (a "directory") is rejected unless \`recursive\` is true, in which case every path beneath it is tombstoned. The union of matches is deduplicated, then tombstoned one at a time — partial failures are reported per-entry without aborting the rest. An input arg that matches zero current files is an error (the response includes which arg). Old versions remain queryable via membot_versions and membot_read with an explicit version. Use membot_prune to permanently drop history.`,
14
14
  inputSchema: z.object({
15
15
  paths: z
16
16
  .array(z.string())
@@ -18,6 +18,12 @@ export const removeOperation = defineOperation({
18
18
  .describe(
19
19
  'One or more logical_paths or glob patterns (e.g. "docs/**/*.md"). Each arg is matched independently against current logical_paths in the DB.',
20
20
  ),
21
+ recursive: z
22
+ .boolean()
23
+ .default(false)
24
+ .describe(
25
+ "If a literal path arg matches no file but is a prefix of existing paths, treat it as a directory and remove everything beneath it. Mirrors `rm -r`. Ignored for glob args.",
26
+ ),
21
27
  change_note: z.string().optional().describe("Why this is being deleted"),
22
28
  }),
23
29
  outputSchema: z.object({
@@ -33,7 +39,7 @@ export const removeOperation = defineOperation({
33
39
  ok: z.number(),
34
40
  failed: z.number(),
35
41
  }),
36
- cli: { positional: ["paths"], aliases: { change_note: "-m" } },
42
+ cli: { positional: ["paths"], aliases: { change_note: "-m", recursive: "-r" } },
37
43
  console_formatter: (result) => {
38
44
  const lines = result.removed.map((e) =>
39
45
  e.status === "ok"
@@ -59,6 +65,21 @@ export const removeOperation = defineOperation({
59
65
  }
60
66
  } else if (currentSet.has(arg)) {
61
67
  matches.push(arg);
68
+ } else {
69
+ const normalized = arg.endsWith("/") ? arg.slice(0, -1) : arg;
70
+ const dirPrefix = `${normalized}/`;
71
+ const dirMatches = currentPaths.filter((p) => p.startsWith(dirPrefix));
72
+ if (dirMatches.length > 0) {
73
+ if (input.recursive) {
74
+ matches.push(...dirMatches);
75
+ } else {
76
+ throw new HelpfulError({
77
+ kind: "not_found",
78
+ message: `\`${arg}\` is a directory (${dirMatches.length} files); pass --recursive to remove its contents`,
79
+ hint: `Re-run with \`-r\` / \`--recursive\` to tombstone every path under \`${normalized}/\`.`,
80
+ });
81
+ }
82
+ }
62
83
  }
63
84
  if (matches.length === 0) {
64
85
  throw new HelpfulError({
@@ -18,7 +18,7 @@ export const treeOperation = defineOperation({
18
18
  description: `Render the logical-path tree of the current store. Tree is synthesised from "/" segments in logical_path — there are no real directories. Tombstoned and historical versions are hidden. Use this before membot_add to pick a sensible logical path.`,
19
19
  inputSchema: z.object({
20
20
  prefix: z.string().optional().describe("Only show paths starting with this prefix"),
21
- max_depth: z.number().default(4).describe("How many path segments deep to render"),
21
+ max_depth: z.number().default(6).describe("How many path segments deep to render"),
22
22
  max_items: z
23
23
  .number()
24
24
  .default(20)
@@ -39,7 +39,7 @@ export interface Operation<I extends z.ZodObject = z.ZodObject, O extends z.ZodT
39
39
  * falls back to pretty-printed JSON.
40
40
  */
41
41
  console_formatter?: (result: z.infer<O>) => string;
42
- /** The work itself. AppContext gives access to db, embedder, mcpx, logger, config. */
42
+ /** The work itself. AppContext gives access to db, embedder, logger, config. */
43
43
  handler: (input: z.infer<I>, ctx: AppContext) => Promise<z.infer<O>>;
44
44
  }
45
45