kotadb 2.1.0-next.20260204040224 → 2.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "kotadb",
3
- "version": "2.1.0-next.20260204040224",
3
+ "version": "2.1.0",
4
4
  "description": "Local-only code intelligence tool for CLI agents. SQLite-backed repository indexing and code search via MCP.",
5
5
  "type": "module",
6
6
  "module": "src/index.ts",
@@ -54,8 +54,8 @@
54
54
  "@asteasolutions/zod-to-openapi": "^8.4.0",
55
55
  "@modelcontextprotocol/sdk": "^1.25.0",
56
56
  "@sentry/node": "^10.25.0",
57
- "@typescript-eslint/parser": "8.54.0",
58
- "@typescript-eslint/types": "8.54.0",
57
+ "@typescript-eslint/parser": "^8.0.0",
58
+ "@typescript-eslint/types": "^8.54.0",
59
59
  "bcryptjs": "^2.4.3",
60
60
  "chokidar": "^5.0.0",
61
61
  "cors": "^2.8.6",
@@ -75,8 +75,5 @@
75
75
  "lint-staged": "^16.2.4",
76
76
  "supertest": "^7.1.4",
77
77
  "typescript": "^5.9.3"
78
- },
79
- "overrides": {
80
- "@typescript-eslint/types": "8.54.0"
81
78
  }
82
79
  }
package/src/cli.ts CHANGED
@@ -9,7 +9,6 @@
9
9
  * kotadb Start the MCP server (default port 3000)
10
10
  * kotadb --stdio Start in stdio mode (for Claude Code integration)
11
11
  * kotadb --port 4000 Start on custom port
12
- * kotadb --toolset full Select tool tier (default, core, memory, full)
13
12
  * kotadb --version Show version
14
13
  * kotadb --help Show help
15
14
  * kotadb deps Query dependency information for a file
@@ -26,23 +25,11 @@ import { fileURLToPath } from "node:url";
26
25
 
27
26
  const __dirname = dirname(fileURLToPath(import.meta.url));
28
27
 
29
- /**
30
- * Valid toolset tiers for MCP tool selection
31
- * - default: 8 tools (core + sync)
32
- * - core: 6 tools
33
- * - memory: 14 tools (core + sync + memory)
34
- * - full: 20 tools (all)
35
- */
36
- export type ToolsetTier = "default" | "core" | "memory" | "full";
37
-
38
- const VALID_TOOLSET_TIERS: ToolsetTier[] = ["default", "core", "memory", "full"];
39
-
40
28
  interface CliOptions {
41
29
  port: number;
42
30
  help: boolean;
43
31
  version: boolean;
44
32
  stdio: boolean;
45
- toolset: ToolsetTier;
46
33
  }
47
34
 
48
35
  function getVersion(): string {
@@ -83,12 +70,6 @@ COMMANDS:
83
70
  OPTIONS:
84
71
  --stdio Use stdio transport (for Claude Code integration)
85
72
  --port <number> Port to listen on (default: 3000, env: PORT)
86
- --toolset <tier> Select tool tier (default: default)
87
- Tiers:
88
- default 8 tools (core + sync)
89
- core 6 tools (search, index, deps, impact)
90
- memory 14 tools (core + sync + memory layer)
91
- full 20 tools (all available tools)
92
73
  --version, -v Show version number
93
74
  --help, -h Show this help message
94
75
 
@@ -100,8 +81,6 @@ ENVIRONMENT VARIABLES:
100
81
 
101
82
  EXAMPLES:
102
83
  kotadb --stdio Start in stdio mode (for Claude Code)
103
- kotadb --stdio --toolset full Start with all tools enabled
104
- kotadb --stdio --toolset core Start with minimal core tools
105
84
  kotadb Start HTTP server on port 3000
106
85
  kotadb --port 4000 Start HTTP server on port 4000
107
86
  kotadb deps --file src/db/client.ts Query deps for a file (text)
@@ -120,17 +99,6 @@ MCP CONFIGURATION (stdio mode - RECOMMENDED):
120
99
  }
121
100
  }
122
101
 
123
- With toolset selection:
124
-
125
- {
126
- "mcpServers": {
127
- "kotadb": {
128
- "command": "bunx",
129
- "args": ["kotadb@next", "--stdio", "--toolset", "full"]
130
- }
131
- }
132
- }
133
-
134
102
  MCP CONFIGURATION (HTTP mode - legacy):
135
103
  Add to your .mcp.json or Claude Code settings:
136
104
 
@@ -158,17 +126,12 @@ function printVersion(): void {
158
126
  process.stdout.write(`kotadb v${version}\n`);
159
127
  }
160
128
 
161
- function isValidToolsetTier(value: string): value is ToolsetTier {
162
- return VALID_TOOLSET_TIERS.includes(value as ToolsetTier);
163
- }
164
-
165
129
  function parseArgs(args: string[]): CliOptions {
166
130
  const options: CliOptions = {
167
131
  port: Number(process.env.PORT ?? 3000),
168
132
  help: false,
169
133
  version: false,
170
134
  stdio: false,
171
- toolset: "default",
172
135
  };
173
136
 
174
137
  for (let i = 0; i < args.length; i++) {
@@ -195,32 +158,6 @@ function parseArgs(args: string[]): CliOptions {
195
158
  process.exit(1);
196
159
  }
197
160
  options.port = Number(portStr);
198
- } else if (arg === "--toolset") {
199
- const tierStr = args[++i];
200
- if (!tierStr) {
201
- process.stderr.write("Error: --toolset requires a tier value\n");
202
- process.stderr.write("Valid tiers: default, core, memory, full\n");
203
- process.exit(1);
204
- }
205
- if (!isValidToolsetTier(tierStr)) {
206
- process.stderr.write(`Error: Invalid toolset tier '${tierStr}'\n`);
207
- process.stderr.write("Valid tiers: default, core, memory, full\n");
208
- process.exit(1);
209
- }
210
- options.toolset = tierStr;
211
- } else if (arg.startsWith("--toolset=")) {
212
- const tierStr = arg.split("=")[1];
213
- if (tierStr === undefined || tierStr === "") {
214
- process.stderr.write("Error: --toolset requires a tier value\n");
215
- process.stderr.write("Valid tiers: default, core, memory, full\n");
216
- process.exit(1);
217
- }
218
- if (!isValidToolsetTier(tierStr)) {
219
- process.stderr.write(`Error: Invalid toolset tier '${tierStr}'\n`);
220
- process.stderr.write("Valid tiers: default, core, memory, full\n");
221
- process.exit(1);
222
- }
223
- options.toolset = tierStr;
224
161
  } else if (arg.startsWith("-") && arg !== "-") {
225
162
  process.stderr.write(`Unknown option: ${arg}\n`);
226
163
  process.stderr.write("Use --help for usage information\n");
@@ -231,7 +168,7 @@ function parseArgs(args: string[]): CliOptions {
231
168
  return options;
232
169
  }
233
170
 
234
- async function runStdioMode(toolset: ToolsetTier): Promise<void> {
171
+ async function runStdioMode(): Promise<void> {
235
172
  // Redirect logger to stderr in stdio mode
236
173
  // This is CRITICAL - stdout is reserved for JSON-RPC protocol
237
174
  const logger = createLogger({
@@ -241,13 +178,11 @@ async function runStdioMode(toolset: ToolsetTier): Promise<void> {
241
178
 
242
179
  logger.info("KotaDB MCP server starting in stdio mode", {
243
180
  version: getVersion(),
244
- toolset,
245
181
  });
246
182
 
247
- // Create MCP server with local-only context and toolset
183
+ // Create MCP server with local-only context
248
184
  const context: McpServerContext = {
249
185
  userId: "local", // Local-only mode uses fixed user ID
250
- toolset,
251
186
  };
252
187
  const server = createMcpServer(context);
253
188
 
@@ -257,7 +192,7 @@ async function runStdioMode(toolset: ToolsetTier): Promise<void> {
257
192
  // Connect server to transport
258
193
  await server.connect(transport);
259
194
 
260
- logger.info("KotaDB MCP server connected via stdio", { toolset });
195
+ logger.info("KotaDB MCP server connected via stdio");
261
196
 
262
197
  // Server lifecycle is managed by the transport
263
198
  // Process will stay alive until stdin closes (when Claude Code terminates it)
@@ -299,7 +234,7 @@ async function main(): Promise<void> {
299
234
 
300
235
  // Handle stdio mode
301
236
  if (options.stdio) {
302
- await runStdioMode(options.toolset);
237
+ await runStdioMode();
303
238
  return; // runStdioMode() keeps process alive
304
239
  }
305
240
 
@@ -312,7 +247,6 @@ async function main(): Promise<void> {
312
247
  mode: envConfig.mode,
313
248
  port: options.port,
314
249
  localDbPath: envConfig.localDbPath,
315
- toolset: options.toolset,
316
250
  });
317
251
 
318
252
  const app = createExpressApp();
@@ -322,7 +256,6 @@ async function main(): Promise<void> {
322
256
  port: options.port,
323
257
  mcp_endpoint: `http://localhost:${options.port}/mcp`,
324
258
  health_endpoint: `http://localhost:${options.port}/health`,
325
- toolset: options.toolset,
326
259
  });
327
260
 
328
261
  // Print user-friendly startup message
@@ -332,7 +265,6 @@ async function main(): Promise<void> {
332
265
  process.stdout.write(` MCP Endpoint: http://localhost:${options.port}/mcp\n`);
333
266
  process.stdout.write(` Health Check: http://localhost:${options.port}/health\n`);
334
267
  process.stdout.write(` Database: ${envConfig.localDbPath}\n`);
335
- process.stdout.write(` Toolset: ${options.toolset}\n`);
336
268
  process.stdout.write(`\n`);
337
269
  process.stdout.write(`Press Ctrl+C to stop\n`);
338
270
  process.stdout.write(`\n`);
@@ -42,24 +42,6 @@ export interface ParseError {
42
42
  column?: number;
43
43
  }
44
44
 
45
- /**
46
- * Type guard to validate that an AST node is a valid Program.
47
- * Provides runtime type validation to prevent version compatibility issues.
48
- *
49
- * @param ast - Unknown AST node to validate
50
- * @returns true if ast is a valid Program node
51
- */
52
- function isValidProgram(ast: unknown): ast is TSESTree.Program {
53
- return (
54
- ast !== null &&
55
- typeof ast === "object" &&
56
- "type" in ast &&
57
- ast.type === "Program" &&
58
- "body" in ast &&
59
- Array.isArray(ast.body)
60
- );
61
- }
62
-
63
45
  /**
64
46
  * Result of parsing a file, including partial AST recovery information.
65
47
  */
@@ -121,7 +103,7 @@ function createParseError(error: unknown): ParseError {
121
103
 
122
104
  /**
123
105
  * Parse a file with error-tolerant options enabled.
124
- * Uses allowInvalidAST to attempt partial recovery and validates result.
106
+ * Uses allowInvalidAST to attempt partial recovery.
125
107
  */
126
108
  function parseWithRecoveryOptions(filePath: string, content: string): TSESTree.Program | null {
127
109
  try {
@@ -137,18 +119,6 @@ function parseWithRecoveryOptions(filePath: string, content: string): TSESTree.P
137
119
  allowInvalidAST: true,
138
120
  errorOnUnknownASTType: false,
139
121
  });
140
-
141
- // Validate the returned AST is actually a Program node
142
- if (!isValidProgram(ast)) {
143
- const astType = typeof ast === "object" && ast !== null && "type" in ast ? (ast as any).type : typeof ast;
144
- logger.warn(`Parser returned invalid Program type for ${filePath}`, {
145
- file_path: filePath,
146
- ast_type: astType,
147
- recovery: "failed_validation",
148
- });
149
- return null;
150
- }
151
-
152
122
  return ast;
153
123
  } catch {
154
124
  // Even with recovery options, parsing can still fail
@@ -189,18 +159,6 @@ export function parseFileWithRecovery(filePath: string, content: string): ParseR
189
159
  tokens: true,
190
160
  filePath,
191
161
  });
192
-
193
- // Validate the returned AST is actually a Program node
194
- if (!isValidProgram(ast)) {
195
- const astType = typeof ast === "object" && ast !== null && "type" in ast ? (ast as any).type : typeof ast;
196
- logger.warn(`Parser returned invalid Program type for ${filePath}`, {
197
- file_path: filePath,
198
- ast_type: astType,
199
- recovery: "failed_validation",
200
- });
201
- throw new Error(`Invalid AST type returned: expected Program, got ${astType}`);
202
- }
203
-
204
162
  return {
205
163
  ast,
206
164
  errors: [],
package/src/mcp/server.ts CHANGED
@@ -18,14 +18,17 @@ import {
18
18
  GENERATE_TASK_CONTEXT_TOOL,
19
19
  INDEX_REPOSITORY_TOOL,
20
20
  LIST_RECENT_FILES_TOOL,
21
- SEARCH_TOOL,
21
+ SEARCH_CODE_TOOL,
22
22
  SEARCH_DEPENDENCIES_TOOL,
23
23
  SYNC_EXPORT_TOOL,
24
24
  SYNC_IMPORT_TOOL,
25
25
  VALIDATE_IMPLEMENTATION_SPEC_TOOL,
26
26
  // Memory Layer tools
27
+ SEARCH_DECISIONS_TOOL,
27
28
  RECORD_DECISION_TOOL,
29
+ SEARCH_FAILURES_TOOL,
28
30
  RECORD_FAILURE_TOOL,
31
+ SEARCH_PATTERNS_TOOL,
29
32
  RECORD_INSIGHT_TOOL,
30
33
  // Dynamic Expertise tools
31
34
  GET_DOMAIN_KEY_FILES_TOOL,
@@ -37,35 +40,27 @@ import {
37
40
  executeGenerateTaskContext,
38
41
  executeIndexRepository,
39
42
  executeListRecentFiles,
40
- executeSearch,
43
+ executeSearchCode,
41
44
  executeSearchDependencies,
42
45
  executeSyncExport,
43
46
  executeSyncImport,
44
47
  executeValidateImplementationSpec,
45
48
  // Memory Layer execute functions
49
+ executeSearchDecisions,
46
50
  executeRecordDecision,
51
+ executeSearchFailures,
47
52
  executeRecordFailure,
53
+ executeSearchPatterns,
48
54
  executeRecordInsight,
49
55
  // Dynamic Expertise execute functions
50
56
  executeGetDomainKeyFiles,
51
57
  executeValidateExpertise,
52
58
  executeSyncExpertise,
53
59
  executeGetRecentPatterns,
54
- // Tool filtering
55
- filterToolsByTier,
56
60
  } from "./tools";
57
61
 
58
62
  const logger = createLogger({ module: "mcp-server" });
59
63
 
60
- /**
61
- * Valid toolset tiers for MCP tool selection
62
- * - default: 8 tools (core + sync)
63
- * - core: 6 tools
64
- * - memory: 14 tools (core + sync + memory)
65
- * - full: 20 tools (all)
66
- */
67
- export type ToolsetTier = "default" | "core" | "memory" | "full";
68
-
69
64
  /**
70
65
  * MCP Server context passed to tool handlers via closure
71
66
  *
@@ -73,7 +68,6 @@ export type ToolsetTier = "default" | "core" | "memory" | "full";
73
68
  */
74
69
  export interface McpServerContext {
75
70
  userId: string;
76
- toolset?: ToolsetTier;
77
71
  }
78
72
 
79
73
  /**
@@ -113,20 +107,35 @@ export function createMcpServer(context: McpServerContext): Server {
113
107
  },
114
108
  );
115
109
 
116
- // Register tools/list handler - filter by toolset tier
110
+ // Register tools/list handler - local-only tools
117
111
  server.setRequestHandler(ListToolsRequestSchema, async () => {
118
- const tier = context.toolset || "default";
119
- const filteredTools = filterToolsByTier(tier);
120
-
121
- logger.debug("Listing MCP tools", {
122
- tier,
123
- tool_count: filteredTools.length
124
- });
125
-
126
112
  return {
127
- tools: filteredTools,
113
+ tools: [
114
+ SEARCH_CODE_TOOL,
115
+ INDEX_REPOSITORY_TOOL,
116
+ LIST_RECENT_FILES_TOOL,
117
+ SEARCH_DEPENDENCIES_TOOL,
118
+ ANALYZE_CHANGE_IMPACT_TOOL,
119
+ VALIDATE_IMPLEMENTATION_SPEC_TOOL,
120
+ SYNC_EXPORT_TOOL,
121
+ SYNC_IMPORT_TOOL,
122
+ GENERATE_TASK_CONTEXT_TOOL,
123
+ // Memory Layer tools
124
+ SEARCH_DECISIONS_TOOL,
125
+ RECORD_DECISION_TOOL,
126
+ SEARCH_FAILURES_TOOL,
127
+ RECORD_FAILURE_TOOL,
128
+ SEARCH_PATTERNS_TOOL,
129
+ RECORD_INSIGHT_TOOL,
130
+ // Dynamic Expertise tools
131
+ GET_DOMAIN_KEY_FILES_TOOL,
132
+ VALIDATE_EXPERTISE_TOOL,
133
+ SYNC_EXPERTISE_TOOL,
134
+ GET_RECENT_PATTERNS_TOOL,
135
+ ],
128
136
  };
129
137
  });
138
+
130
139
  // Register tools/call handler
131
140
  server.setRequestHandler(CallToolRequestSchema, async (request) => {
132
141
  const { name, arguments: toolArgs } = request.params;
@@ -137,8 +146,8 @@ export function createMcpServer(context: McpServerContext): Server {
137
146
 
138
147
  try {
139
148
  switch (name) {
140
- case "search":
141
- result = await executeSearch(
149
+ case "search_code":
150
+ result = await executeSearchCode(
142
151
  toolArgs,
143
152
  "", // requestId not used
144
153
  context.userId,
@@ -193,6 +202,13 @@ export function createMcpServer(context: McpServerContext): Server {
193
202
  );
194
203
  break;
195
204
  // Memory Layer tools
205
+ case "search_decisions":
206
+ result = await executeSearchDecisions(
207
+ toolArgs,
208
+ "", // requestId not used
209
+ context.userId,
210
+ );
211
+ break;
196
212
  case "record_decision":
197
213
  result = await executeRecordDecision(
198
214
  toolArgs,
@@ -200,6 +216,13 @@ export function createMcpServer(context: McpServerContext): Server {
200
216
  context.userId,
201
217
  );
202
218
  break;
219
+ case "search_failures":
220
+ result = await executeSearchFailures(
221
+ toolArgs,
222
+ "", // requestId not used
223
+ context.userId,
224
+ );
225
+ break;
203
226
  case "record_failure":
204
227
  result = await executeRecordFailure(
205
228
  toolArgs,
@@ -207,6 +230,13 @@ export function createMcpServer(context: McpServerContext): Server {
207
230
  context.userId,
208
231
  );
209
232
  break;
233
+ case "search_patterns":
234
+ result = await executeSearchPatterns(
235
+ toolArgs,
236
+ "", // requestId not used
237
+ context.userId,
238
+ );
239
+ break;
210
240
  case "record_insight":
211
241
  result = await executeRecordInsight(
212
242
  toolArgs,
package/src/mcp/tools.ts CHANGED
@@ -34,14 +34,8 @@ const logger = createLogger({ module: "mcp-tools" });
34
34
  /**
35
35
  * MCP Tool Definition
36
36
  */
37
- /**
38
- * Tool tier for categorizing tools by feature set
39
- */
40
- export type ToolTier = "core" | "sync" | "memory" | "expertise";
41
-
42
37
  export interface ToolDefinition {
43
38
  name: string;
44
- tier: ToolTier;
45
39
  description: string;
46
40
  inputSchema: {
47
41
  type: "object";
@@ -51,165 +45,38 @@ export interface ToolDefinition {
51
45
  }
52
46
 
53
47
  /**
54
- * Toolset tier for CLI selection (maps to tool tiers)
55
- */
56
- export type ToolsetTier = "default" | "core" | "memory" | "full";
57
-
58
- /**
59
- * Filter tools by the requested toolset tier
60
- *
61
- * Tier mapping:
62
- * - core: 6 tools (core tier only)
63
- * - default: 8 tools (core + sync tiers)
64
- * - memory: 14 tools (core + sync + memory tiers)
65
- * - full: all tools
66
- *
67
- * @param tier - The toolset tier to filter by
68
- * @param tools - Optional array of tools (defaults to all tool definitions)
69
- */
70
- export function filterToolsByTier(tier: ToolsetTier, tools?: ToolDefinition[]): ToolDefinition[] {
71
- const allTools = tools ?? getToolDefinitions();
72
- switch (tier) {
73
- case "core":
74
- return allTools.filter((t) => t.tier === "core");
75
- case "default":
76
- return allTools.filter((t) => t.tier === "core" || t.tier === "sync");
77
- case "memory":
78
- return allTools.filter((t) => t.tier === "core" || t.tier === "sync" || t.tier === "memory");
79
- case "full":
80
- return allTools;
81
- default:
82
- // Default to "default" tier if unknown
83
- return allTools.filter((t) => t.tier === "core" || t.tier === "sync");
84
- }
85
- }
86
48
 
87
49
  /**
88
- * Alias for filterToolsByTier - get tool definitions filtered by toolset
89
- *
90
- * @param toolset - The toolset tier to filter by
50
+ * Tool: search_code
91
51
  */
92
- export function getToolsByTier(toolset: ToolsetTier): ToolDefinition[] {
93
- return filterToolsByTier(toolset);
94
- }
95
-
96
- /**
97
- * Validate if a string is a valid toolset tier
98
- */
99
- export function isValidToolset(value: string): value is ToolsetTier {
100
- return value === "default" || value === "core" || value === "memory" || value === "full";
101
- }
102
-
103
- // ============================================================================
104
- // UNIFIED SEARCH TOOL - Replaces search_code, search_symbols, search_decisions, search_patterns, search_failures
105
- // Issue: #143
106
- // ============================================================================
107
-
108
- /**
109
- * Tool: search (unified)
110
- */
111
- export const SEARCH_TOOL: ToolDefinition = {
112
- tier: "core",
113
- name: "search",
52
+ export const SEARCH_CODE_TOOL: ToolDefinition = {
53
+ name: "search_code",
114
54
  description:
115
- "Search indexed code, symbols, decisions, patterns, and failures. Supports multiple search scopes simultaneously with scope-specific filters and output formats.",
55
+ "Search indexed code files for a specific term. Returns matching files with context snippets.",
116
56
  inputSchema: {
117
57
  type: "object",
118
58
  properties: {
119
- query: {
59
+ term: {
120
60
  type: "string",
121
- description: "Search query term or phrase",
61
+ description: "The search term to find in code files",
122
62
  },
123
- scope: {
124
- type: "array",
125
- items: {
126
- type: "string",
127
- enum: ["code", "symbols", "decisions", "patterns", "failures"],
128
- },
129
- description: "Search scopes to query (default: ['code'])",
130
- },
131
- filters: {
132
- type: "object",
133
- description: "Scope-specific filters (invalid filters ignored)",
134
- properties: {
135
- // Code scope filters
136
- glob: {
137
- type: "string",
138
- description: "File path glob pattern (code scope only)",
139
- },
140
- exclude: {
141
- type: "array",
142
- items: { type: "string" },
143
- description: "Exclude patterns (code scope only)",
144
- },
145
- language: {
146
- type: "string",
147
- description: "Programming language filter (code scope only)",
148
- },
149
- // Symbol scope filters
150
- symbol_kind: {
151
- type: "array",
152
- items: {
153
- type: "string",
154
- enum: [
155
- "function",
156
- "class",
157
- "interface",
158
- "type",
159
- "variable",
160
- "constant",
161
- "method",
162
- "property",
163
- "module",
164
- "namespace",
165
- "enum",
166
- "enum_member",
167
- ],
168
- },
169
- description: "Symbol kinds to include (symbols scope only)",
170
- },
171
- exported_only: {
172
- type: "boolean",
173
- description: "Only exported symbols (symbols scope only)",
174
- },
175
- // Decision scope filters
176
- decision_scope: {
177
- type: "string",
178
- enum: ["architecture", "pattern", "convention", "workaround"],
179
- description: "Decision category (decisions scope only)",
180
- },
181
- // Pattern scope filters
182
- pattern_type: {
183
- type: "string",
184
- description: "Pattern type filter (patterns scope only)",
185
- },
186
- // Common filters
187
- repository: {
188
- type: "string",
189
- description: "Repository ID or full_name filter (all scopes)",
190
- },
191
- },
63
+ repository: {
64
+ type: "string",
65
+ description: "Optional: Filter results to a specific repository ID",
192
66
  },
193
67
  limit: {
194
68
  type: "number",
195
- description: "Max results per scope (default: 20, max: 100)",
196
- },
197
- output: {
198
- type: "string",
199
- enum: ["full", "paths", "compact"],
200
- description: "Output format (default: 'full')",
69
+ description: "Optional: Maximum number of results (default: 20, max: 100)",
201
70
  },
202
71
  },
203
- required: ["query"],
72
+ required: ["term"],
204
73
  },
205
74
  };
206
75
 
207
-
208
76
  /**
209
77
  * Tool: index_repository
210
78
  */
211
79
  export const INDEX_REPOSITORY_TOOL: ToolDefinition = {
212
- tier: "core",
213
80
  name: "index_repository",
214
81
  description:
215
82
  "Index a git repository by cloning/updating it and extracting code files. Performs synchronous indexing and returns immediately with status 'completed' and full indexing stats.",
@@ -237,7 +104,6 @@ export const INDEX_REPOSITORY_TOOL: ToolDefinition = {
237
104
  * Tool: list_recent_files
238
105
  */
239
106
  export const LIST_RECENT_FILES_TOOL: ToolDefinition = {
240
- tier: "core",
241
107
  name: "list_recent_files",
242
108
  description:
243
109
  "List recently indexed files, ordered by indexing timestamp. Useful for seeing what code is available.",
@@ -260,7 +126,6 @@ export const LIST_RECENT_FILES_TOOL: ToolDefinition = {
260
126
  * Tool: search_dependencies
261
127
  */
262
128
  export const SEARCH_DEPENDENCIES_TOOL: ToolDefinition = {
263
- tier: "core",
264
129
  name: "search_dependencies",
265
130
  description:
266
131
  "Search the dependency graph to find files that depend on (dependents) or are depended on by (dependencies) a target file. Useful for impact analysis before refactoring, test scope discovery, and circular dependency detection.",
@@ -309,7 +174,6 @@ export const SEARCH_DEPENDENCIES_TOOL: ToolDefinition = {
309
174
  * Tool: analyze_change_impact
310
175
  */
311
176
  export const ANALYZE_CHANGE_IMPACT_TOOL: ToolDefinition = {
312
- tier: "core",
313
177
  name: "analyze_change_impact",
314
178
  description:
315
179
  "Analyze the impact of proposed code changes by examining dependency graphs, test scope, and potential conflicts. Returns comprehensive analysis including affected files, test recommendations, architectural warnings, and risk assessment. Useful for planning implementations and avoiding breaking changes.",
@@ -357,7 +221,6 @@ export const ANALYZE_CHANGE_IMPACT_TOOL: ToolDefinition = {
357
221
  * Tool: validate_implementation_spec
358
222
  */
359
223
  export const VALIDATE_IMPLEMENTATION_SPEC_TOOL: ToolDefinition = {
360
- tier: "expertise",
361
224
  name: "validate_implementation_spec",
362
225
  description:
363
226
  "Validate an implementation specification against KotaDB conventions and repository state. Checks for file conflicts, naming conventions, path alias usage, test coverage, and dependency compatibility. Returns validation errors, warnings, and approval conditions checklist.",
@@ -440,7 +303,6 @@ export const VALIDATE_IMPLEMENTATION_SPEC_TOOL: ToolDefinition = {
440
303
  * Tool: kota_sync_export
441
304
  */
442
305
  export const SYNC_EXPORT_TOOL: ToolDefinition = {
443
- tier: "sync",
444
306
  name: "kota_sync_export",
445
307
  description:
446
308
  "Export local SQLite database to JSONL files for git sync. Uses hash-based change detection to skip unchanged tables. Exports to .kotadb/export/ by default.",
@@ -463,7 +325,6 @@ export const SYNC_EXPORT_TOOL: ToolDefinition = {
463
325
  * Tool: kota_sync_import
464
326
  */
465
327
  export const SYNC_IMPORT_TOOL: ToolDefinition = {
466
- tier: "sync",
467
328
  name: "kota_sync_import",
468
329
  description:
469
330
  "Import JSONL files into local SQLite database. Applies deletion manifest first, then imports all tables transactionally. Typically run after git pull to sync remote changes.",
@@ -486,7 +347,6 @@ export const SYNC_IMPORT_TOOL: ToolDefinition = {
486
347
  * Target: <100ms response time
487
348
  */
488
349
  export const GENERATE_TASK_CONTEXT_TOOL: ToolDefinition = {
489
- tier: "core",
490
350
  name: "generate_task_context",
491
351
  description:
492
352
  "Generate structured context for a set of files including dependency counts, impacted files, test files, and recent changes. Designed for hook-based context injection with <100ms performance target.",
@@ -523,11 +383,42 @@ export const GENERATE_TASK_CONTEXT_TOOL: ToolDefinition = {
523
383
  // Memory Layer Tool Definitions
524
384
  // ============================================================================
525
385
 
386
+ /**
387
+ * Tool: search_decisions
388
+ */
389
+ export const SEARCH_DECISIONS_TOOL: ToolDefinition = {
390
+ name: "search_decisions",
391
+ description:
392
+ "Search past architectural decisions using FTS5. Returns decisions with relevance scores.",
393
+ inputSchema: {
394
+ type: "object",
395
+ properties: {
396
+ query: {
397
+ type: "string",
398
+ description: "Search query for decisions",
399
+ },
400
+ scope: {
401
+ type: "string",
402
+ enum: ["architecture", "pattern", "convention", "workaround"],
403
+ description: "Optional: Filter by decision scope",
404
+ },
405
+ repository: {
406
+ type: "string",
407
+ description: "Optional: Filter to a specific repository ID or full_name",
408
+ },
409
+ limit: {
410
+ type: "number",
411
+ description: "Optional: Max results (default: 20)",
412
+ },
413
+ },
414
+ required: ["query"],
415
+ },
416
+ };
417
+
526
418
  /**
527
419
  * Tool: record_decision
528
420
  */
529
421
  export const RECORD_DECISION_TOOL: ToolDefinition = {
530
- tier: "memory",
531
422
  name: "record_decision",
532
423
  description:
533
424
  "Record a new architectural decision for future reference. Decisions are searchable via search_decisions.",
@@ -574,11 +465,37 @@ export const RECORD_DECISION_TOOL: ToolDefinition = {
574
465
  },
575
466
  };
576
467
 
468
+ /**
469
+ * Tool: search_failures
470
+ */
471
+ export const SEARCH_FAILURES_TOOL: ToolDefinition = {
472
+ name: "search_failures",
473
+ description:
474
+ "Search failed approaches to avoid repeating mistakes. Returns failures with relevance scores.",
475
+ inputSchema: {
476
+ type: "object",
477
+ properties: {
478
+ query: {
479
+ type: "string",
480
+ description: "Search query for failures",
481
+ },
482
+ repository: {
483
+ type: "string",
484
+ description: "Optional: Filter to a specific repository ID or full_name",
485
+ },
486
+ limit: {
487
+ type: "number",
488
+ description: "Optional: Max results (default: 20)",
489
+ },
490
+ },
491
+ required: ["query"],
492
+ },
493
+ };
494
+
577
495
  /**
578
496
  * Tool: record_failure
579
497
  */
580
498
  export const RECORD_FAILURE_TOOL: ToolDefinition = {
581
- tier: "memory",
582
499
  name: "record_failure",
583
500
  description:
584
501
  "Record a failed approach for future reference. Helps agents avoid repeating mistakes.",
@@ -615,11 +532,44 @@ export const RECORD_FAILURE_TOOL: ToolDefinition = {
615
532
  },
616
533
  };
617
534
 
535
+ /**
536
+ * Tool: search_patterns
537
+ */
538
+ export const SEARCH_PATTERNS_TOOL: ToolDefinition = {
539
+ name: "search_patterns",
540
+ description:
541
+ "Find codebase patterns by type or file. Returns discovered patterns for consistency.",
542
+ inputSchema: {
543
+ type: "object",
544
+ properties: {
545
+ query: {
546
+ type: "string",
547
+ description: "Optional: Search query for pattern name/description",
548
+ },
549
+ pattern_type: {
550
+ type: "string",
551
+ description: "Optional: Filter by pattern type (e.g., error-handling, api-call)",
552
+ },
553
+ file: {
554
+ type: "string",
555
+ description: "Optional: Filter by file path",
556
+ },
557
+ repository: {
558
+ type: "string",
559
+ description: "Optional: Filter to a specific repository ID or full_name",
560
+ },
561
+ limit: {
562
+ type: "number",
563
+ description: "Optional: Max results (default: 20)",
564
+ },
565
+ },
566
+ },
567
+ };
568
+
618
569
  /**
619
570
  * Tool: record_insight
620
571
  */
621
572
  export const RECORD_INSIGHT_TOOL: ToolDefinition = {
622
- tier: "memory",
623
573
  name: "record_insight",
624
574
  description:
625
575
  "Store a session insight for future agents. Insights are discoveries, failures, or workarounds.",
@@ -661,7 +611,6 @@ export const RECORD_INSIGHT_TOOL: ToolDefinition = {
661
611
  * Tool: get_domain_key_files
662
612
  */
663
613
  export const GET_DOMAIN_KEY_FILES_TOOL: ToolDefinition = {
664
- tier: "expertise",
665
614
  name: "get_domain_key_files",
666
615
  description:
667
616
  "Get the most-depended-on files for a domain. Key files are core infrastructure that many other files depend on.",
@@ -689,7 +638,6 @@ export const GET_DOMAIN_KEY_FILES_TOOL: ToolDefinition = {
689
638
  * Tool: validate_expertise
690
639
  */
691
640
  export const VALIDATE_EXPERTISE_TOOL: ToolDefinition = {
692
- tier: "expertise",
693
641
  name: "validate_expertise",
694
642
  description:
695
643
  "Validate that key_files defined in expertise.yaml exist in the indexed codebase. Checks for stale or missing file references.",
@@ -709,7 +657,6 @@ export const VALIDATE_EXPERTISE_TOOL: ToolDefinition = {
709
657
  * Tool: sync_expertise
710
658
  */
711
659
  export const SYNC_EXPERTISE_TOOL: ToolDefinition = {
712
- tier: "expertise",
713
660
  name: "sync_expertise",
714
661
  description:
715
662
  "Sync patterns from expertise.yaml files to the patterns table. Extracts pattern definitions and stores them for future reference.",
@@ -732,7 +679,6 @@ export const SYNC_EXPERTISE_TOOL: ToolDefinition = {
732
679
  * Tool: get_recent_patterns
733
680
  */
734
681
  export const GET_RECENT_PATTERNS_TOOL: ToolDefinition = {
735
- tier: "expertise",
736
682
  name: "get_recent_patterns",
737
683
  description:
738
684
  "Get recently observed patterns from the patterns table. Useful for understanding codebase conventions.",
@@ -765,7 +711,7 @@ export const GET_RECENT_PATTERNS_TOOL: ToolDefinition = {
765
711
  */
766
712
  export function getToolDefinitions(): ToolDefinition[] {
767
713
  return [
768
- SEARCH_TOOL,
714
+ SEARCH_CODE_TOOL,
769
715
  INDEX_REPOSITORY_TOOL,
770
716
  LIST_RECENT_FILES_TOOL,
771
717
  SEARCH_DEPENDENCIES_TOOL,
@@ -775,8 +721,11 @@ export function getToolDefinitions(): ToolDefinition[] {
775
721
  SYNC_IMPORT_TOOL,
776
722
  GENERATE_TASK_CONTEXT_TOOL,
777
723
  // Memory Layer tools
724
+ SEARCH_DECISIONS_TOOL,
778
725
  RECORD_DECISION_TOOL,
726
+ SEARCH_FAILURES_TOOL,
779
727
  RECORD_FAILURE_TOOL,
728
+ SEARCH_PATTERNS_TOOL,
780
729
  RECORD_INSIGHT_TOOL,
781
730
  // Dynamic Expertise tools
782
731
  GET_DOMAIN_KEY_FILES_TOOL,
@@ -785,6 +734,7 @@ export function getToolDefinitions(): ToolDefinition[] {
785
734
  GET_RECENT_PATTERNS_TOOL,
786
735
  ];
787
736
  }
737
+
788
738
  /**
789
739
 
790
740
  /**
@@ -799,361 +749,7 @@ function isListRecentParams(params: unknown): params is { limit?: number; reposi
799
749
  return true;
800
750
  }
801
751
 
802
- // ============================================================================
803
- // UNIFIED SEARCH - Helper Functions and Types
804
- // ============================================================================
805
-
806
- interface NormalizedFilters {
807
- // Common
808
- repositoryId?: string;
809
- // Code
810
- glob?: string;
811
- exclude?: string[];
812
- language?: string;
813
- // Symbols
814
- symbol_kind?: string[];
815
- exported_only?: boolean;
816
- // Decisions
817
- decision_scope?: string;
818
- // Patterns
819
- pattern_type?: string;
820
- }
821
-
822
- function normalizeFilters(filters: unknown): NormalizedFilters {
823
- if (!filters || typeof filters !== "object") {
824
- return {};
825
- }
826
-
827
- const f = filters as Record<string, unknown>;
828
- const normalized: NormalizedFilters = {};
829
-
830
- // Resolve repository (UUID or full_name)
831
- if (f.repository && typeof f.repository === "string") {
832
- const resolved = resolveRepositoryIdentifierWithError(f.repository);
833
- if (!("error" in resolved)) {
834
- normalized.repositoryId = resolved.id;
835
- }
836
- }
837
-
838
- // Extract typed filters (silently ignore invalid)
839
- if (f.glob && typeof f.glob === "string") {
840
- normalized.glob = f.glob;
841
- }
842
-
843
- if (Array.isArray(f.exclude)) {
844
- normalized.exclude = f.exclude.filter(e => typeof e === "string");
845
- }
846
-
847
- if (f.language && typeof f.language === "string") {
848
- normalized.language = f.language;
849
- }
850
-
851
- if (Array.isArray(f.symbol_kind)) {
852
- normalized.symbol_kind = f.symbol_kind.filter(k => typeof k === "string");
853
- }
854
-
855
- if (typeof f.exported_only === "boolean") {
856
- normalized.exported_only = f.exported_only;
857
- }
858
-
859
- if (f.decision_scope && typeof f.decision_scope === "string") {
860
- normalized.decision_scope = f.decision_scope;
861
- }
862
-
863
- if (f.pattern_type && typeof f.pattern_type === "string") {
864
- normalized.pattern_type = f.pattern_type;
865
- }
866
-
867
- return normalized;
868
- }
869
-
870
- interface SymbolResult {
871
- id: string;
872
- name: string;
873
- kind: string;
874
- signature: string | null;
875
- documentation: string | null;
876
- location: {
877
- file: string;
878
- line_start: number;
879
- line_end: number;
880
- };
881
- repository_id: string;
882
- is_exported: boolean;
883
- }
884
-
885
- async function searchSymbols(
886
- query: string,
887
- filters: NormalizedFilters,
888
- limit: number
889
- ): Promise<SymbolResult[]> {
890
- const db = getGlobalDatabase();
891
-
892
- let sql = `
893
- SELECT
894
- s.id,
895
- s.name,
896
- s.kind,
897
- s.signature,
898
- s.documentation,
899
- s.line_start,
900
- s.line_end,
901
- s.metadata,
902
- f.path as file_path,
903
- s.repository_id
904
- FROM indexed_symbols s
905
- JOIN indexed_files f ON s.file_id = f.id
906
- WHERE s.name LIKE ?
907
- `;
908
-
909
- const params: (string | number)[] = [`%${query}%`];
910
-
911
- // Apply symbol_kind filter
912
- if (filters.symbol_kind && filters.symbol_kind.length > 0) {
913
- const placeholders = filters.symbol_kind.map(() => "?").join(", ");
914
- sql += ` AND s.kind IN (${placeholders})`;
915
- params.push(...filters.symbol_kind);
916
- }
917
-
918
- // Apply exported_only filter
919
- if (filters.exported_only) {
920
- sql += ` AND json_extract(s.metadata, '$.is_exported') = 1`;
921
- }
922
-
923
- // Apply repository filter
924
- if (filters.repositoryId) {
925
- sql += ` AND s.repository_id = ?`;
926
- params.push(filters.repositoryId);
927
- }
928
-
929
- sql += ` ORDER BY s.name LIMIT ?`;
930
- params.push(limit);
931
-
932
- const rows = db.query<{
933
- id: string;
934
- name: string;
935
- kind: string;
936
- signature: string | null;
937
- documentation: string | null;
938
- line_start: number;
939
- line_end: number;
940
- metadata: string;
941
- file_path: string;
942
- repository_id: string;
943
- }>(sql, params);
944
-
945
- return rows.map(row => ({
946
- id: row.id,
947
- name: row.name,
948
- kind: row.kind,
949
- signature: row.signature,
950
- documentation: row.documentation,
951
- location: {
952
- file: row.file_path,
953
- line_start: row.line_start,
954
- line_end: row.line_end,
955
- },
956
- repository_id: row.repository_id,
957
- is_exported: JSON.parse(row.metadata || '{}').is_exported || false,
958
- }));
959
- }
960
-
961
- function formatSearchResults(
962
- query: string,
963
- scopes: string[],
964
- scopeResults: Record<string, unknown[]>,
965
- format: string
966
- ): Record<string, unknown> {
967
- const response: Record<string, unknown> = {
968
- query,
969
- scopes,
970
- results: {} as Record<string, unknown>,
971
- counts: { total: 0 } as Record<string, unknown>,
972
- };
973
-
974
- for (const scope of scopes) {
975
- const items = scopeResults[scope] || [];
976
-
977
- if (format === "paths") {
978
- // Extract file paths only
979
- (response.results as Record<string, unknown>)[scope] = items.map((item: any) => {
980
- if (item.path) return item.path;
981
- if (item.file_path) return item.file_path;
982
- if (item.location?.file) return item.location.file;
983
- return "unknown";
984
- });
985
- } else if (format === "compact") {
986
- // Summary info only
987
- (response.results as Record<string, unknown>)[scope] = items.map((item: any) => {
988
- if (scope === "code") {
989
- return { path: item.path, match_count: 1 };
990
- } else if (scope === "symbols") {
991
- return { name: item.name, kind: item.kind, file: item.location.file };
992
- } else if (scope === "decisions") {
993
- return { title: item.title, scope: item.scope };
994
- } else if (scope === "patterns") {
995
- return { pattern_type: item.pattern_type, file_path: item.file_path };
996
- } else if (scope === "failures") {
997
- return { title: item.title, problem: item.problem };
998
- }
999
- return item;
1000
- });
1001
- } else {
1002
- // Full details
1003
- (response.results as Record<string, unknown>)[scope] = items;
1004
- }
1005
-
1006
- (response.counts as Record<string, unknown>)[scope] = items.length;
1007
- (response.counts as Record<string, unknown>).total = ((response.counts as Record<string, unknown>).total as number) + items.length;
1008
- }
1009
-
1010
- return response;
1011
- }
1012
-
1013
- // ============================================================================
1014
- // UNIFIED SEARCH - Execute Function
1015
- // ============================================================================
1016
-
1017
752
  /**
1018
- * Execute search tool (unified search across multiple scopes)
1019
- */
1020
- export async function executeSearch(
1021
- params: unknown,
1022
- requestId: string | number,
1023
- userId: string,
1024
- ): Promise<unknown> {
1025
- // Validate params structure
1026
- if (typeof params !== "object" || params === null) {
1027
- throw new Error("Parameters must be an object");
1028
- }
1029
-
1030
- const p = params as Record<string, unknown>;
1031
-
1032
- // Check required parameter: query
1033
- if (p.query === undefined) {
1034
- throw new Error("Missing required parameter: query");
1035
- }
1036
- if (typeof p.query !== "string") {
1037
- throw new Error("Parameter 'query' must be a string");
1038
- }
1039
-
1040
- // Validate optional parameters
1041
- let scopes: string[] = ["code"]; // Default scope
1042
- if (p.scope !== undefined) {
1043
- if (!Array.isArray(p.scope)) {
1044
- throw new Error("Parameter 'scope' must be an array");
1045
- }
1046
- const validScopes = ["code", "symbols", "decisions", "patterns", "failures"];
1047
- for (const s of p.scope) {
1048
- if (typeof s !== "string" || !validScopes.includes(s)) {
1049
- throw new Error(`Invalid scope: ${s}. Must be one of: ${validScopes.join(", ")}`);
1050
- }
1051
- }
1052
- scopes = p.scope as string[];
1053
- }
1054
-
1055
- if (p.limit !== undefined && typeof p.limit !== "number") {
1056
- throw new Error("Parameter 'limit' must be a number");
1057
- }
1058
-
1059
- if (p.output !== undefined) {
1060
- if (typeof p.output !== "string" || !["full", "paths", "compact"].includes(p.output)) {
1061
- throw new Error("Parameter 'output' must be one of: full, paths, compact");
1062
- }
1063
- }
1064
-
1065
- const limit = Math.min(Math.max((p.limit as number) || 20, 1), 100);
1066
- const output = (p.output as string) || "full";
1067
- const filters = normalizeFilters(p.filters);
1068
-
1069
- // Route to scope handlers in parallel
1070
- const results: Record<string, unknown[]> = {};
1071
- const searchPromises: Promise<void>[] = [];
1072
-
1073
- if (scopes.includes("code")) {
1074
- searchPromises.push(
1075
- (async () => {
1076
- // Reuse existing searchFiles logic
1077
- const codeResults = searchFiles(p.query as string, {
1078
- repositoryId: filters.repositoryId,
1079
- limit,
1080
- });
1081
- results.code = codeResults;
1082
- })()
1083
- );
1084
- }
1085
-
1086
- if (scopes.includes("symbols")) {
1087
- searchPromises.push(
1088
- (async () => {
1089
- const symbolResults = await searchSymbols(p.query as string, filters, limit);
1090
- results.symbols = symbolResults;
1091
- })()
1092
- );
1093
- }
1094
-
1095
- if (scopes.includes("decisions")) {
1096
- searchPromises.push(
1097
- (async () => {
1098
- // Reuse existing executeSearchDecisions logic
1099
- const decisionParams = {
1100
- query: p.query,
1101
- scope: filters.decision_scope,
1102
- repository: filters.repositoryId,
1103
- limit,
1104
- };
1105
- const decisionResults = await executeSearchDecisions(decisionParams, requestId, userId);
1106
- results.decisions = (decisionResults as { results: unknown[] }).results;
1107
- })()
1108
- );
1109
- }
1110
-
1111
- if (scopes.includes("patterns")) {
1112
- searchPromises.push(
1113
- (async () => {
1114
- // Reuse existing executeSearchPatterns logic
1115
- const patternParams = {
1116
- query: p.query,
1117
- pattern_type: filters.pattern_type,
1118
- repository: filters.repositoryId,
1119
- limit,
1120
- };
1121
- const patternResults = await executeSearchPatterns(patternParams, requestId, userId);
1122
- results.patterns = (patternResults as { results: unknown[] }).results;
1123
- })()
1124
- );
1125
- }
1126
-
1127
- if (scopes.includes("failures")) {
1128
- searchPromises.push(
1129
- (async () => {
1130
- // Reuse existing executeSearchFailures logic
1131
- const failureParams = {
1132
- query: p.query,
1133
- repository: filters.repositoryId,
1134
- limit,
1135
- };
1136
- const failureResults = await executeSearchFailures(failureParams, requestId, userId);
1137
- results.failures = (failureResults as { results: unknown[] }).results;
1138
- })()
1139
- );
1140
- }
1141
-
1142
- await Promise.all(searchPromises);
1143
-
1144
- // Format output
1145
- const response = formatSearchResults(p.query as string, scopes, results, output);
1146
-
1147
- logger.info("Unified search completed", {
1148
- query: p.query,
1149
- scopes,
1150
- total_results: (response.counts as Record<string, unknown>).total,
1151
- user_id: userId,
1152
- });
1153
-
1154
- return response;
1155
- }
1156
-
1157
753
  /**
1158
754
  * Execute search_code tool
1159
755
  *
@@ -1347,18 +943,8 @@ export async function executeListRecentFiles(
1347
943
  ? (params.repository as string | undefined)
1348
944
  : undefined;
1349
945
 
1350
- // Resolve repository ID (supports UUID or full_name)
1351
- let repositoryId = repository;
1352
- if (repositoryId) {
1353
- const repoResult = resolveRepositoryIdentifierWithError(repositoryId);
1354
- if ("error" in repoResult) {
1355
- return { results: [], message: repoResult.error };
1356
- }
1357
- repositoryId = repoResult.id;
1358
- }
1359
-
1360
946
  // Use SQLite via listRecentFiles with optional repository filter
1361
- const files = listRecentFiles(limit, repositoryId);
947
+ const files = listRecentFiles(limit, repository);
1362
948
 
1363
949
  return {
1364
950
  results: files.map((file) => ({
@@ -1370,6 +956,8 @@ export async function executeListRecentFiles(
1370
956
  };
1371
957
  }
1372
958
 
959
+ /**
960
+
1373
961
  /**
1374
962
  * Execute search_dependencies tool
1375
963
  */
@@ -2970,8 +2558,8 @@ export async function handleToolCall(
2970
2558
  userId: string,
2971
2559
  ): Promise<unknown> {
2972
2560
  switch (toolName) {
2973
- case "search":
2974
- return await executeSearch(params, requestId, userId);
2561
+ case "search_code":
2562
+ return await executeSearchCode(params, requestId, userId);
2975
2563
  case "index_repository":
2976
2564
  return await executeIndexRepository(params, requestId, userId);
2977
2565
  case "list_recent_files":
@@ -2989,10 +2577,16 @@ export async function handleToolCall(
2989
2577
  case "generate_task_context":
2990
2578
  return await executeGenerateTaskContext(params, requestId, userId);
2991
2579
  // Memory Layer tools
2580
+ case "search_decisions":
2581
+ return await executeSearchDecisions(params, requestId, userId);
2992
2582
  case "record_decision":
2993
2583
  return await executeRecordDecision(params, requestId, userId);
2584
+ case "search_failures":
2585
+ return await executeSearchFailures(params, requestId, userId);
2994
2586
  case "record_failure":
2995
2587
  return await executeRecordFailure(params, requestId, userId);
2588
+ case "search_patterns":
2589
+ return await executeSearchPatterns(params, requestId, userId);
2996
2590
  case "record_insight":
2997
2591
  return await executeRecordInsight(params, requestId, userId);
2998
2592
  // Expertise Layer tools
package/src/cli/args.ts DELETED
@@ -1,105 +0,0 @@
1
- /**
2
- * CLI argument parsing utilities
3
- *
4
- * Extracted for testability. Used by main CLI entry point.
5
- *
6
- * @module cli/args
7
- */
8
-
9
- /**
10
- * Valid toolset tiers for MCP tool selection
11
- * - default: 8 tools (core + sync)
12
- * - core: 6 tools
13
- * - memory: 14 tools (core + sync + memory)
14
- * - full: 20 tools (all)
15
- */
16
- export type ToolsetTier = "default" | "core" | "memory" | "full";
17
-
18
- const VALID_TOOLSET_TIERS: ToolsetTier[] = ["default", "core", "memory", "full"];
19
-
20
- export interface CliOptions {
21
- port: number;
22
- help: boolean;
23
- version: boolean;
24
- stdio: boolean;
25
- toolset: ToolsetTier;
26
- }
27
-
28
- /**
29
- * Type guard for valid toolset tier
30
- */
31
- export function isValidToolsetTier(value: string): value is ToolsetTier {
32
- return VALID_TOOLSET_TIERS.includes(value as ToolsetTier);
33
- }
34
-
35
- /**
36
- * Parse CLI arguments into options object
37
- */
38
- export function parseArgs(args: string[]): CliOptions {
39
- const options: CliOptions = {
40
- port: Number(process.env.PORT ?? 3000),
41
- help: false,
42
- version: false,
43
- stdio: false,
44
- toolset: "default",
45
- };
46
-
47
- for (let i = 0; i < args.length; i++) {
48
- const arg = args[i];
49
- if (arg === undefined) continue;
50
-
51
- if (arg === "--help" || arg === "-h") {
52
- options.help = true;
53
- } else if (arg === "--version" || arg === "-v") {
54
- options.version = true;
55
- } else if (arg === "--stdio") {
56
- options.stdio = true;
57
- } else if (arg === "--port") {
58
- const portStr = args[++i];
59
- if (!portStr || Number.isNaN(Number(portStr))) {
60
- process.stderr.write("Error: --port requires a valid number\n");
61
- process.exit(1);
62
- }
63
- options.port = Number(portStr);
64
- } else if (arg.startsWith("--port=")) {
65
- const portStr = arg.split("=")[1];
66
- if (portStr === undefined || Number.isNaN(Number(portStr))) {
67
- process.stderr.write("Error: --port requires a valid number\n");
68
- process.exit(1);
69
- }
70
- options.port = Number(portStr);
71
- } else if (arg === "--toolset") {
72
- const tierStr = args[++i];
73
- if (!tierStr) {
74
- process.stderr.write("Error: --toolset requires a tier value\n");
75
- process.stderr.write("Valid tiers: default, core, memory, full\n");
76
- process.exit(1);
77
- }
78
- if (!isValidToolsetTier(tierStr)) {
79
- process.stderr.write(`Error: Invalid toolset tier '${tierStr}'\n`);
80
- process.stderr.write("Valid tiers: default, core, memory, full\n");
81
- process.exit(1);
82
- }
83
- options.toolset = tierStr;
84
- } else if (arg.startsWith("--toolset=")) {
85
- const tierStr = arg.split("=")[1];
86
- if (tierStr === undefined || tierStr === "") {
87
- process.stderr.write("Error: --toolset requires a tier value\n");
88
- process.stderr.write("Valid tiers: default, core, memory, full\n");
89
- process.exit(1);
90
- }
91
- if (!isValidToolsetTier(tierStr)) {
92
- process.stderr.write(`Error: Invalid toolset tier '${tierStr}'\n`);
93
- process.stderr.write("Valid tiers: default, core, memory, full\n");
94
- process.exit(1);
95
- }
96
- options.toolset = tierStr;
97
- } else if (arg.startsWith("-") && arg !== "-") {
98
- process.stderr.write(`Unknown option: ${arg}\n`);
99
- process.stderr.write("Use --help for usage information\n");
100
- process.exit(1);
101
- }
102
- }
103
-
104
- return options;
105
- }