@polka-codes/cli 0.10.23 → 0.10.25
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/bin.cjs +143854 -0
- package/dist/bin.d.ts +2 -0
- package/dist/bin.d.ts.map +1 -0
- package/dist/index.d.ts +1 -1
- package/dist/index.d.ts.map +1 -1
- package/dist/index.js +9201 -9195
- package/dist/program.d.ts +3 -0
- package/dist/program.d.ts.map +1 -0
- package/package.json +6 -7
- package/cli.mjs +0 -2
- package/dist/ApiProviderConfig.js +0 -57
- package/dist/ApiProviderConfig.js.map +0 -1
- package/dist/ApiProviderConfig.test.js +0 -278
- package/dist/ApiProviderConfig.test.js.map +0 -1
- package/dist/agent/advanced-discovery.js +0 -604
- package/dist/agent/advanced-discovery.js.map +0 -1
- package/dist/agent/config.js +0 -139
- package/dist/agent/config.js.map +0 -1
- package/dist/agent/config.test.js +0 -140
- package/dist/agent/config.test.js.map +0 -1
- package/dist/agent/constants.js +0 -172
- package/dist/agent/constants.js.map +0 -1
- package/dist/agent/constants.test.js +0 -132
- package/dist/agent/constants.test.js.map +0 -1
- package/dist/agent/debug-logger.js +0 -281
- package/dist/agent/debug-logger.js.map +0 -1
- package/dist/agent/debug-logger.test.js +0 -294
- package/dist/agent/debug-logger.test.js.map +0 -1
- package/dist/agent/error-handling.js +0 -114
- package/dist/agent/error-handling.js.map +0 -1
- package/dist/agent/error-handling.test.js +0 -191
- package/dist/agent/error-handling.test.js.map +0 -1
- package/dist/agent/errors.js +0 -301
- package/dist/agent/errors.js.map +0 -1
- package/dist/agent/executor.js +0 -206
- package/dist/agent/executor.js.map +0 -1
- package/dist/agent/executor.test.js +0 -228
- package/dist/agent/executor.test.js.map +0 -1
- package/dist/agent/goal-decomposer.js +0 -281
- package/dist/agent/goal-decomposer.js.map +0 -1
- package/dist/agent/goal-decomposer.test.js +0 -225
- package/dist/agent/goal-decomposer.test.js.map +0 -1
- package/dist/agent/health-monitor.js +0 -89
- package/dist/agent/health-monitor.js.map +0 -1
- package/dist/agent/improvement-loop.js +0 -177
- package/dist/agent/improvement-loop.js.map +0 -1
- package/dist/agent/index.js +0 -38
- package/dist/agent/index.js.map +0 -1
- package/dist/agent/metrics.js +0 -158
- package/dist/agent/metrics.js.map +0 -1
- package/dist/agent/metrics.test.js +0 -311
- package/dist/agent/metrics.test.js.map +0 -1
- package/dist/agent/orchestrator.js +0 -438
- package/dist/agent/orchestrator.js.map +0 -1
- package/dist/agent/planner.js +0 -199
- package/dist/agent/planner.js.map +0 -1
- package/dist/agent/planner.test.js +0 -135
- package/dist/agent/planner.test.js.map +0 -1
- package/dist/agent/progress.js +0 -298
- package/dist/agent/progress.js.map +0 -1
- package/dist/agent/progress.test.js +0 -255
- package/dist/agent/progress.test.js.map +0 -1
- package/dist/agent/resource-monitor.js +0 -114
- package/dist/agent/resource-monitor.js.map +0 -1
- package/dist/agent/safety/approval.js +0 -178
- package/dist/agent/safety/approval.js.map +0 -1
- package/dist/agent/safety/approval.test.js +0 -142
- package/dist/agent/safety/approval.test.js.map +0 -1
- package/dist/agent/safety/checks.js +0 -152
- package/dist/agent/safety/checks.js.map +0 -1
- package/dist/agent/safety/checks.test.js +0 -89
- package/dist/agent/safety/checks.test.js.map +0 -1
- package/dist/agent/safety/interrupt.js +0 -92
- package/dist/agent/safety/interrupt.js.map +0 -1
- package/dist/agent/safety/interrupt.test.js +0 -53
- package/dist/agent/safety/interrupt.test.js.map +0 -1
- package/dist/agent/session.js +0 -117
- package/dist/agent/session.js.map +0 -1
- package/dist/agent/session.test.js +0 -213
- package/dist/agent/session.test.js.map +0 -1
- package/dist/agent/state-manager.js +0 -287
- package/dist/agent/state-manager.js.map +0 -1
- package/dist/agent/task-discovery.js +0 -433
- package/dist/agent/task-discovery.js.map +0 -1
- package/dist/agent/task-discovery.test.js +0 -40
- package/dist/agent/task-discovery.test.js.map +0 -1
- package/dist/agent/task-history.js +0 -128
- package/dist/agent/task-history.js.map +0 -1
- package/dist/agent/task-prioritizer.js +0 -286
- package/dist/agent/task-prioritizer.js.map +0 -1
- package/dist/agent/test-fixtures.js +0 -112
- package/dist/agent/test-fixtures.js.map +0 -1
- package/dist/agent/types.js +0 -15
- package/dist/agent/types.js.map +0 -1
- package/dist/agent/workflow-adapter.js +0 -219
- package/dist/agent/workflow-adapter.js.map +0 -1
- package/dist/agent/workflow-adapter.test.js +0 -11
- package/dist/agent/workflow-adapter.test.js.map +0 -1
- package/dist/agent/working-dir-discovery.js +0 -54
- package/dist/agent/working-dir-discovery.js.map +0 -1
- package/dist/agent/working-space.js +0 -484
- package/dist/agent/working-space.js.map +0 -1
- package/dist/agent/working-space.test.js +0 -436
- package/dist/agent/working-space.test.js.map +0 -1
- package/dist/api.js +0 -279
- package/dist/api.js.map +0 -1
- package/dist/builtin-commands.js +0 -20
- package/dist/builtin-commands.js.map +0 -1
- package/dist/commandSummarizer.js +0 -96
- package/dist/commandSummarizer.js.map +0 -1
- package/dist/commands/__tests__/command-detection.integration.test.js +0 -238
- package/dist/commands/__tests__/command-detection.integration.test.js.map +0 -1
- package/dist/commands/__tests__/script-generator.integration.test.js +0 -271
- package/dist/commands/__tests__/script-generator.integration.test.js.map +0 -1
- package/dist/commands/agent.js +0 -218
- package/dist/commands/agent.js.map +0 -1
- package/dist/commands/code.js +0 -102
- package/dist/commands/code.js.map +0 -1
- package/dist/commands/command.constants.js +0 -13
- package/dist/commands/command.constants.js.map +0 -1
- package/dist/commands/commit.js +0 -17
- package/dist/commands/commit.js.map +0 -1
- package/dist/commands/fix.js +0 -17
- package/dist/commands/fix.js.map +0 -1
- package/dist/commands/init.js +0 -501
- package/dist/commands/init.js.map +0 -1
- package/dist/commands/mcp-server.js +0 -122
- package/dist/commands/mcp-server.js.map +0 -1
- package/dist/commands/memory.js +0 -410
- package/dist/commands/memory.js.map +0 -1
- package/dist/commands/memory.test.js +0 -453
- package/dist/commands/memory.test.js.map +0 -1
- package/dist/commands/meta.js +0 -142
- package/dist/commands/meta.js.map +0 -1
- package/dist/commands/plan.js +0 -39
- package/dist/commands/plan.js.map +0 -1
- package/dist/commands/pr.js +0 -14
- package/dist/commands/pr.js.map +0 -1
- package/dist/commands/review-json.test.js +0 -33
- package/dist/commands/review-json.test.js.map +0 -1
- package/dist/commands/review.js +0 -137
- package/dist/commands/review.js.map +0 -1
- package/dist/commands/review.usage.test.js +0 -41
- package/dist/commands/review.usage.test.js.map +0 -1
- package/dist/commands/run.js +0 -107
- package/dist/commands/run.js.map +0 -1
- package/dist/commands/skills.js +0 -175
- package/dist/commands/skills.js.map +0 -1
- package/dist/commands/task.js +0 -34
- package/dist/commands/task.js.map +0 -1
- package/dist/commands/workflow.js +0 -117
- package/dist/commands/workflow.js.map +0 -1
- package/dist/configPrompt.js +0 -39
- package/dist/configPrompt.js.map +0 -1
- package/dist/env.js +0 -22
- package/dist/env.js.map +0 -1
- package/dist/errors.js +0 -132
- package/dist/errors.js.map +0 -1
- package/dist/errors.test.js +0 -21
- package/dist/errors.test.js.map +0 -1
- package/dist/file-attachments.js +0 -77
- package/dist/file-attachments.js.map +0 -1
- package/dist/file-attachments.test.js +0 -214
- package/dist/file-attachments.test.js.map +0 -1
- package/dist/getModel.js +0 -229
- package/dist/getModel.js.map +0 -1
- package/dist/getModel.test.js +0 -67
- package/dist/getModel.test.js.map +0 -1
- package/dist/getProviderOptions.js +0 -58
- package/dist/getProviderOptions.js.map +0 -1
- package/dist/git-operations.js +0 -363
- package/dist/git-operations.js.map +0 -1
- package/dist/git-operations.test.js +0 -81
- package/dist/git-operations.test.js.map +0 -1
- package/dist/index.js.map +0 -1
- package/dist/logger.js +0 -41
- package/dist/logger.js.map +0 -1
- package/dist/mcp/client.js +0 -11
- package/dist/mcp/client.js.map +0 -1
- package/dist/mcp/error-scenarios.test.js +0 -206
- package/dist/mcp/error-scenarios.test.js.map +0 -1
- package/dist/mcp/errors.js +0 -63
- package/dist/mcp/errors.js.map +0 -1
- package/dist/mcp/index.js +0 -9
- package/dist/mcp/index.js.map +0 -1
- package/dist/mcp/manager.js +0 -231
- package/dist/mcp/manager.js.map +0 -1
- package/dist/mcp/manager.test.js +0 -40
- package/dist/mcp/manager.test.js.map +0 -1
- package/dist/mcp/sdk-client.js +0 -166
- package/dist/mcp/sdk-client.js.map +0 -1
- package/dist/mcp/shared-types.js +0 -3
- package/dist/mcp/shared-types.js.map +0 -1
- package/dist/mcp/tools-integration.test.js +0 -198
- package/dist/mcp/tools-integration.test.js.map +0 -1
- package/dist/mcp/tools.js +0 -60
- package/dist/mcp/tools.js.map +0 -1
- package/dist/mcp/transport.js +0 -257
- package/dist/mcp/transport.js.map +0 -1
- package/dist/mcp/types.js +0 -3
- package/dist/mcp/types.js.map +0 -1
- package/dist/mcp-server/index.js +0 -5
- package/dist/mcp-server/index.js.map +0 -1
- package/dist/mcp-server/plan-formatting.test.js +0 -74
- package/dist/mcp-server/plan-formatting.test.js.map +0 -1
- package/dist/mcp-server/sdk-server.js +0 -102
- package/dist/mcp-server/sdk-server.js.map +0 -1
- package/dist/mcp-server/sdk-server.test.js +0 -363
- package/dist/mcp-server/sdk-server.test.js.map +0 -1
- package/dist/mcp-server/tools.js +0 -785
- package/dist/mcp-server/tools.js.map +0 -1
- package/dist/mcp-server/types.js +0 -3
- package/dist/mcp-server/types.js.map +0 -1
- package/dist/options.js +0 -85
- package/dist/options.js.map +0 -1
- package/dist/options.test.js +0 -177
- package/dist/options.test.js.map +0 -1
- package/dist/prices.js +0 -61
- package/dist/prices.js.map +0 -1
- package/dist/prices.test.js +0 -148
- package/dist/prices.test.js.map +0 -1
- package/dist/runWorkflow.js +0 -313
- package/dist/runWorkflow.js.map +0 -1
- package/dist/script/__tests__/execution.integration.test.js +0 -274
- package/dist/script/__tests__/execution.integration.test.js.map +0 -1
- package/dist/script/__tests__/runner.test.js +0 -176
- package/dist/script/__tests__/runner.test.js.map +0 -1
- package/dist/script/__tests__/validator.test.js +0 -180
- package/dist/script/__tests__/validator.test.js.map +0 -1
- package/dist/script/executor.js +0 -127
- package/dist/script/executor.js.map +0 -1
- package/dist/script/index.js +0 -4
- package/dist/script/index.js.map +0 -1
- package/dist/script/runner.js +0 -258
- package/dist/script/runner.js.map +0 -1
- package/dist/skillIntegration.js +0 -46
- package/dist/skillIntegration.js.map +0 -1
- package/dist/skillIntegration.test.js +0 -128
- package/dist/skillIntegration.test.js.map +0 -1
- package/dist/test/utils.js +0 -200
- package/dist/test/utils.js.map +0 -1
- package/dist/test/workflow-fixtures.js +0 -120
- package/dist/test/workflow-fixtures.js.map +0 -1
- package/dist/tool-implementations.js +0 -521
- package/dist/tool-implementations.js.map +0 -1
- package/dist/tool-implementations.skill-tools.test.js +0 -106
- package/dist/tool-implementations.skill-tools.test.js.map +0 -1
- package/dist/tools/getTodoItem.js +0 -33
- package/dist/tools/getTodoItem.js.map +0 -1
- package/dist/tools/gitDiff.js +0 -108
- package/dist/tools/gitDiff.js.map +0 -1
- package/dist/tools/index.js +0 -8
- package/dist/tools/index.js.map +0 -1
- package/dist/tools/listMemoryTopics.js +0 -24
- package/dist/tools/listMemoryTopics.js.map +0 -1
- package/dist/tools/listTodoItems.js +0 -35
- package/dist/tools/listTodoItems.js.map +0 -1
- package/dist/tools/listTodoItems.test.js +0 -89
- package/dist/tools/listTodoItems.test.js.map +0 -1
- package/dist/tools/readMemory.js +0 -33
- package/dist/tools/readMemory.js.map +0 -1
- package/dist/tools/updateMemory.js +0 -62
- package/dist/tools/updateMemory.js.map +0 -1
- package/dist/tools/updateMemory.test.js +0 -109
- package/dist/tools/updateMemory.test.js.map +0 -1
- package/dist/tools/updateTodoItem.js +0 -31
- package/dist/tools/updateTodoItem.js.map +0 -1
- package/dist/tools/utils/diffLineNumbers.js +0 -178
- package/dist/tools/utils/diffLineNumbers.js.map +0 -1
- package/dist/utils/cacheControl.js +0 -59
- package/dist/utils/cacheControl.js.map +0 -1
- package/dist/utils/cacheControl.test.js +0 -128
- package/dist/utils/cacheControl.test.js.map +0 -1
- package/dist/utils/command.js +0 -50
- package/dist/utils/command.js.map +0 -1
- package/dist/utils/shell.js +0 -56
- package/dist/utils/shell.js.map +0 -1
- package/dist/utils/userInput.js +0 -47
- package/dist/utils/userInput.js.map +0 -1
- package/dist/workflow-tools.js +0 -21
- package/dist/workflow-tools.js.map +0 -1
- package/dist/workflows/agent-builder.js +0 -90
- package/dist/workflows/agent-builder.js.map +0 -1
- package/dist/workflows/agent-builder.test.js +0 -115
- package/dist/workflows/agent-builder.test.js.map +0 -1
- package/dist/workflows/code.workflow.js +0 -145
- package/dist/workflows/code.workflow.js.map +0 -1
- package/dist/workflows/commit.workflow.js +0 -111
- package/dist/workflows/commit.workflow.js.map +0 -1
- package/dist/workflows/commit.workflow.test.js +0 -141
- package/dist/workflows/commit.workflow.test.js.map +0 -1
- package/dist/workflows/fix.workflow.js +0 -172
- package/dist/workflows/fix.workflow.js.map +0 -1
- package/dist/workflows/fix.workflow.test.js +0 -137
- package/dist/workflows/fix.workflow.test.js.map +0 -1
- package/dist/workflows/git-file-tools.js +0 -408
- package/dist/workflows/git-file-tools.js.map +0 -1
- package/dist/workflows/index.js +0 -12
- package/dist/workflows/index.js.map +0 -1
- package/dist/workflows/init-interactive.workflow.js +0 -198
- package/dist/workflows/init-interactive.workflow.js.map +0 -1
- package/dist/workflows/init.workflow.js +0 -41
- package/dist/workflows/init.workflow.js.map +0 -1
- package/dist/workflows/meta.workflow.js +0 -107
- package/dist/workflows/meta.workflow.js.map +0 -1
- package/dist/workflows/plan.workflow.js +0 -275
- package/dist/workflows/plan.workflow.js.map +0 -1
- package/dist/workflows/plan.workflow.test.js +0 -419
- package/dist/workflows/plan.workflow.test.js.map +0 -1
- package/dist/workflows/pr.workflow.js +0 -54
- package/dist/workflows/pr.workflow.js.map +0 -1
- package/dist/workflows/pr.workflow.test.js +0 -98
- package/dist/workflows/pr.workflow.test.js.map +0 -1
- package/dist/workflows/prompts/coder.js +0 -85
- package/dist/workflows/prompts/coder.js.map +0 -1
- package/dist/workflows/prompts/commit.js +0 -16
- package/dist/workflows/prompts/commit.js.map +0 -1
- package/dist/workflows/prompts/fix.js +0 -44
- package/dist/workflows/prompts/fix.js.map +0 -1
- package/dist/workflows/prompts/index.js +0 -10
- package/dist/workflows/prompts/index.js.map +0 -1
- package/dist/workflows/prompts/init.js +0 -48
- package/dist/workflows/prompts/init.js.map +0 -1
- package/dist/workflows/prompts/meta.js +0 -17
- package/dist/workflows/prompts/meta.js.map +0 -1
- package/dist/workflows/prompts/plan.js +0 -212
- package/dist/workflows/prompts/plan.js.map +0 -1
- package/dist/workflows/prompts/pr.js +0 -15
- package/dist/workflows/prompts/pr.js.map +0 -1
- package/dist/workflows/prompts/review.js +0 -145
- package/dist/workflows/prompts/review.js.map +0 -1
- package/dist/workflows/prompts/shared.js +0 -93
- package/dist/workflows/prompts/shared.js.map +0 -1
- package/dist/workflows/review.workflow.js +0 -357
- package/dist/workflows/review.workflow.js.map +0 -1
- package/dist/workflows/task.workflow.js +0 -47
- package/dist/workflows/task.workflow.js.map +0 -1
- package/dist/workflows/testing/helper.js +0 -41
- package/dist/workflows/testing/helper.js.map +0 -1
- package/dist/workflows/workflow.utils.js +0 -351
- package/dist/workflows/workflow.utils.js.map +0 -1
- package/dist/workflows/workflow.utils.test.js +0 -45
- package/dist/workflows/workflow.utils.test.js.map +0 -1
package/dist/mcp-server/tools.js
DELETED
|
@@ -1,785 +0,0 @@
|
|
|
1
|
-
// generated by polka.codes
|
|
2
|
-
import * as path from 'node:path';
|
|
3
|
-
// Memory imports
|
|
4
|
-
import { getGlobalConfigPath, loadConfigAtPath, MemoryManager, SQLiteMemoryStore } from '@polka-codes/cli-shared';
|
|
5
|
-
import { DEFAULT_MEMORY_CONFIG, resolveHomePath } from '@polka-codes/core';
|
|
6
|
-
import { z } from 'zod';
|
|
7
|
-
import { commit } from '../api';
|
|
8
|
-
import { runWorkflow } from '../runWorkflow';
|
|
9
|
-
import { codeWorkflow } from '../workflows/code.workflow';
|
|
10
|
-
import { fixWorkflow } from '../workflows/fix.workflow';
|
|
11
|
-
import { planWorkflow } from '../workflows/plan.workflow';
|
|
12
|
-
import { reviewWorkflow } from '../workflows/review.workflow';
|
|
13
|
-
/**
|
|
14
|
-
* Get memory store instance for MCP server tools
|
|
15
|
-
* Lazy-loads memory store on first access
|
|
16
|
-
*/
|
|
17
|
-
// Cache memory stores per project path
|
|
18
|
-
const memoryStoreCache = new Map();
|
|
19
|
-
async function getMemoryStore(logger, projectPath) {
|
|
20
|
-
// Normalize the project path first - this ensures cache key consistency
|
|
21
|
-
// regardless of input format (relative, absolute, trailing slashes, etc.)
|
|
22
|
-
const normalizedPath = path.resolve(projectPath).split(path.sep).join('/');
|
|
23
|
-
// Check cache for existing store for this project using normalized path as key
|
|
24
|
-
const cached = memoryStoreCache.get(normalizedPath);
|
|
25
|
-
if (cached) {
|
|
26
|
-
return cached;
|
|
27
|
-
}
|
|
28
|
-
try {
|
|
29
|
-
const globalConfigPath = getGlobalConfigPath();
|
|
30
|
-
const config = (await loadConfigAtPath(globalConfigPath));
|
|
31
|
-
const memoryConfig = config?.memory || DEFAULT_MEMORY_CONFIG;
|
|
32
|
-
if (!memoryConfig.enabled || memoryConfig.type === 'memory') {
|
|
33
|
-
return null;
|
|
34
|
-
}
|
|
35
|
-
// Create scope using normalized path
|
|
36
|
-
const scope = `project:${normalizedPath}`;
|
|
37
|
-
const dbPath = memoryConfig.path || DEFAULT_MEMORY_CONFIG.path;
|
|
38
|
-
// Resolve home directory and make path absolute
|
|
39
|
-
const resolvedDbPath = path.resolve(resolveHomePath(dbPath));
|
|
40
|
-
const sqliteStore = new SQLiteMemoryStore({ enabled: true, type: 'sqlite', path: resolvedDbPath }, scope);
|
|
41
|
-
const memoryManager = new MemoryManager(sqliteStore);
|
|
42
|
-
const store = {
|
|
43
|
-
store: memoryManager,
|
|
44
|
-
close: () => {
|
|
45
|
-
sqliteStore.close();
|
|
46
|
-
memoryStoreCache.delete(normalizedPath);
|
|
47
|
-
},
|
|
48
|
-
};
|
|
49
|
-
memoryStoreCache.set(normalizedPath, store);
|
|
50
|
-
return store;
|
|
51
|
-
}
|
|
52
|
-
catch (error) {
|
|
53
|
-
// If memory store fails to initialize, return null
|
|
54
|
-
logger.error(`Failed to initialize memory store: ${error instanceof Error ? error.message : String(error)}`);
|
|
55
|
-
return null;
|
|
56
|
-
}
|
|
57
|
-
}
|
|
58
|
-
/**
|
|
59
|
-
* Schema for provider override options
|
|
60
|
-
* Can be added to any tool input to allow per-call provider/model overrides
|
|
61
|
-
*/
|
|
62
|
-
const providerOverrideSchema = z.object({
|
|
63
|
-
provider: z.string().optional().describe('Override the AI provider for this call (e.g., "anthropic", "deepseek", "openai")'),
|
|
64
|
-
model: z.string().optional().describe('Override the model for this call (e.g., "claude-sonnet-4-5", "deepseek-chat")'),
|
|
65
|
-
parameters: z.record(z.string(), z.unknown()).optional().describe('Override model parameters for this call'),
|
|
66
|
-
});
|
|
67
|
-
/**
|
|
68
|
-
* Extract provider override from tool arguments
|
|
69
|
-
*/
|
|
70
|
-
function extractProviderOverride(args) {
|
|
71
|
-
const { provider, model, parameters } = args;
|
|
72
|
-
if (provider || model || parameters) {
|
|
73
|
-
return { provider, model, parameters };
|
|
74
|
-
}
|
|
75
|
-
return undefined;
|
|
76
|
-
}
|
|
77
|
-
/**
|
|
78
|
-
* Create a minimal execution context for running workflows from MCP server
|
|
79
|
-
*
|
|
80
|
-
* NOTE: The context must include provider configuration options for workflows to work.
|
|
81
|
-
* The runWorkflow function will load the actual provider config from config files/env,
|
|
82
|
-
* but it needs these context fields to be present.
|
|
83
|
-
*/
|
|
84
|
-
function createExecutionContext(_logger) {
|
|
85
|
-
return {
|
|
86
|
-
// Set working directory to current working directory
|
|
87
|
-
cwd: process.cwd(),
|
|
88
|
-
// Default to non-interactive mode for MCP
|
|
89
|
-
yes: true,
|
|
90
|
-
// Verbose logging - default to 0 (can be enhanced if needed)
|
|
91
|
-
verbose: 0,
|
|
92
|
-
// No file specified
|
|
93
|
-
file: undefined,
|
|
94
|
-
// Provider configuration fields (will be populated by runWorkflow from config/env)
|
|
95
|
-
// These are required for runWorkflow to properly initialize the AI provider
|
|
96
|
-
model: undefined,
|
|
97
|
-
apiProvider: undefined,
|
|
98
|
-
apiKey: undefined,
|
|
99
|
-
};
|
|
100
|
-
}
|
|
101
|
-
/**
|
|
102
|
-
* Helper to run a workflow and format the result for MCP response
|
|
103
|
-
*
|
|
104
|
-
* @param workflow - The workflow function to execute
|
|
105
|
-
* @param input - Input parameters for the workflow
|
|
106
|
-
* @param commandName - Name of the command (for config lookup)
|
|
107
|
-
* @param logger - Logger instance
|
|
108
|
-
* @param providerOverride - Optional provider/model override for this call
|
|
109
|
-
* @param defaultProvider - Default provider config from server startup
|
|
110
|
-
*/
|
|
111
|
-
async function executeWorkflow(workflow, input, commandName, logger, providerOverride, defaultProvider) {
|
|
112
|
-
try {
|
|
113
|
-
const context = createExecutionContext(logger);
|
|
114
|
-
// Apply provider overrides in priority order:
|
|
115
|
-
// 1. Per-call override (highest priority)
|
|
116
|
-
// 2. Server default
|
|
117
|
-
// 3. Leave undefined to use config file/env defaults
|
|
118
|
-
const finalProvider = providerOverride?.provider || defaultProvider?.provider;
|
|
119
|
-
const finalModel = providerOverride?.model || defaultProvider?.model;
|
|
120
|
-
const finalParameters = providerOverride?.parameters || defaultProvider?.parameters;
|
|
121
|
-
const finalApiKey = defaultProvider?.apiKey; // Only use API key from server defaults (not per-call for security)
|
|
122
|
-
// Update context with overrides if provided
|
|
123
|
-
if (finalProvider) {
|
|
124
|
-
context.apiProvider = finalProvider;
|
|
125
|
-
}
|
|
126
|
-
if (finalModel) {
|
|
127
|
-
context.model = finalModel;
|
|
128
|
-
}
|
|
129
|
-
if (finalApiKey) {
|
|
130
|
-
context.apiKey = finalApiKey;
|
|
131
|
-
}
|
|
132
|
-
// Add default values for BaseWorkflowInput properties
|
|
133
|
-
const workflowInput = {
|
|
134
|
-
...input,
|
|
135
|
-
interactive: false,
|
|
136
|
-
additionalTools: {},
|
|
137
|
-
};
|
|
138
|
-
const result = await runWorkflow(workflow, workflowInput, {
|
|
139
|
-
commandName,
|
|
140
|
-
context,
|
|
141
|
-
logger,
|
|
142
|
-
interactive: false,
|
|
143
|
-
// Pass provider/model overrides to runWorkflow
|
|
144
|
-
providerOverride: {
|
|
145
|
-
provider: finalProvider,
|
|
146
|
-
model: finalModel,
|
|
147
|
-
parameters: finalParameters,
|
|
148
|
-
},
|
|
149
|
-
});
|
|
150
|
-
if (!result) {
|
|
151
|
-
// Result is undefined - this could indicate an internal error in runWorkflow
|
|
152
|
-
// that was caught and not re-thrown, or a workflow that completed with no output
|
|
153
|
-
// Return an error to avoid masking potential failures
|
|
154
|
-
return 'Error: Workflow returned no result (possible internal error or workflow produced no output)';
|
|
155
|
-
}
|
|
156
|
-
// Format the result based on its structure
|
|
157
|
-
if (typeof result === 'string') {
|
|
158
|
-
return result;
|
|
159
|
-
}
|
|
160
|
-
else if (result && typeof result === 'object') {
|
|
161
|
-
// Check if it's a workflow result object
|
|
162
|
-
if ('success' in result) {
|
|
163
|
-
if (result.success === true) {
|
|
164
|
-
// Include summary, summaries, or output if available
|
|
165
|
-
const workflowResult = result;
|
|
166
|
-
return (workflowResult.summary || workflowResult.summaries?.join('\n') || workflowResult.output || 'Workflow completed successfully');
|
|
167
|
-
}
|
|
168
|
-
else {
|
|
169
|
-
// Workflow failed
|
|
170
|
-
const workflowResult = result;
|
|
171
|
-
return `Error: ${workflowResult.reason || workflowResult.error || 'Workflow failed'}`;
|
|
172
|
-
}
|
|
173
|
-
}
|
|
174
|
-
// Generic object - stringify it
|
|
175
|
-
return JSON.stringify(result, null, 2);
|
|
176
|
-
}
|
|
177
|
-
return 'Workflow completed';
|
|
178
|
-
}
|
|
179
|
-
catch (error) {
|
|
180
|
-
logger.error(`Error executing ${commandName} workflow:`, error);
|
|
181
|
-
return `Error: ${error instanceof Error ? error.message : String(error)}`;
|
|
182
|
-
}
|
|
183
|
-
}
|
|
184
|
-
/**
|
|
185
|
-
* Create high-level polka-codes workflow tools for MCP server
|
|
186
|
-
*
|
|
187
|
-
* These tools integrate with the actual polka-codes workflows to enable
|
|
188
|
-
* AI assistants (via MCP) to execute code tasks, reviews, planning, etc.
|
|
189
|
-
*
|
|
190
|
-
* Each tool now supports optional provider/model overrides via:
|
|
191
|
-
* - provider: Override the AI provider
|
|
192
|
-
* - model: Override the model
|
|
193
|
-
* - parameters: Override model parameters
|
|
194
|
-
*/
|
|
195
|
-
export function createPolkaCodesServerTools(logger) {
|
|
196
|
-
/**
|
|
197
|
-
* Escape special regex characters in a pattern for safe regex construction
|
|
198
|
-
*/
|
|
199
|
-
function escapeRegexPattern(pattern) {
|
|
200
|
-
return pattern.replace(/[.+*?^${}()|[\]\\]/g, '\\$&');
|
|
201
|
-
}
|
|
202
|
-
/**
|
|
203
|
-
* Create a regex from a wildcard pattern, supporting * and ? wildcards
|
|
204
|
-
* All special regex characters except * and ? are escaped
|
|
205
|
-
*/
|
|
206
|
-
function createWildcardRegex(pattern) {
|
|
207
|
-
// First replace wildcards with placeholders, escape everything else, then restore wildcards
|
|
208
|
-
const withWildcardsPlaceholders = pattern.replace(/\*/g, '\0STAR\0').replace(/\?/g, '\0QUEST\0');
|
|
209
|
-
const escaped = escapeRegexPattern(withWildcardsPlaceholders);
|
|
210
|
-
const withWildcards = escaped.replace(/\0STAR\0/g, '.*').replace(/\0QUEST\0/g, '.');
|
|
211
|
-
return new RegExp(`^${withWildcards}$`);
|
|
212
|
-
}
|
|
213
|
-
return [
|
|
214
|
-
{
|
|
215
|
-
name: 'code',
|
|
216
|
-
description: `Execute a coding task using AI with comprehensive codebase analysis and modification capabilities.
|
|
217
|
-
|
|
218
|
-
The workflow will:
|
|
219
|
-
- Analyze the current codebase structure, patterns, and dependencies
|
|
220
|
-
- Understand existing conventions and architectural decisions
|
|
221
|
-
- Make targeted code changes to accomplish the specified task
|
|
222
|
-
- Ensure all changes compile and pass type checking
|
|
223
|
-
- Run and fix any failing tests
|
|
224
|
-
- Handle complex multi-file changes and refactoring
|
|
225
|
-
- Provide a detailed summary of changes made
|
|
226
|
-
|
|
227
|
-
Best used for implementing new features, refactoring existing code, fixing bugs across multiple files, adding tests, or code modernization.
|
|
228
|
-
|
|
229
|
-
Parameters:
|
|
230
|
-
- task (required): Detailed description of what needs to be implemented or changed
|
|
231
|
-
- provider (optional): Override the AI provider for this call
|
|
232
|
-
- model (optional): Override the model for this call
|
|
233
|
-
- parameters (optional): Override model parameters for this call`,
|
|
234
|
-
inputSchema: z.object({
|
|
235
|
-
task: z.string().describe('The coding task to execute - be specific about what needs to be done'),
|
|
236
|
-
...providerOverrideSchema.shape,
|
|
237
|
-
}),
|
|
238
|
-
handler: async (args, toolContext) => {
|
|
239
|
-
const { task } = args;
|
|
240
|
-
const providerOverride = extractProviderOverride(args);
|
|
241
|
-
logger.info(`MCP: Executing code workflow - task: "${task}"${providerOverride?.provider ? ` with provider: ${providerOverride.provider}` : ''}`);
|
|
242
|
-
return await executeWorkflow(codeWorkflow, { task }, 'code', logger, providerOverride, toolContext.defaultProvider);
|
|
243
|
-
},
|
|
244
|
-
},
|
|
245
|
-
{
|
|
246
|
-
name: 'review',
|
|
247
|
-
description: `Perform comprehensive code review with actionable, structured feedback.
|
|
248
|
-
|
|
249
|
-
This workflow can review:
|
|
250
|
-
- Uncommitted local changes (staged and/or unstaged files) - DEFAULT BEHAVIOR when no parameters provided
|
|
251
|
-
- Branch comparisons (e.g., feature branch vs main)
|
|
252
|
-
- Specific git ranges (e.g., HEAD~3..HEAD, origin/main..HEAD)
|
|
253
|
-
- Pull requests from GitHub/GitLab by number
|
|
254
|
-
|
|
255
|
-
The review provides:
|
|
256
|
-
- Code quality and style analysis
|
|
257
|
-
- Bug identification and potential issues
|
|
258
|
-
- Security and performance concerns
|
|
259
|
-
- Improvement suggestions with examples
|
|
260
|
-
- Best practices compliance feedback
|
|
261
|
-
- Documentation review
|
|
262
|
-
|
|
263
|
-
Output is structured with:
|
|
264
|
-
- Categorized feedback (bugs, style, performance, etc.)
|
|
265
|
-
- Specific file/line references
|
|
266
|
-
- Severity levels (critical, major, minor, nitpick)
|
|
267
|
-
- Actionable recommendations
|
|
268
|
-
|
|
269
|
-
Parameters:
|
|
270
|
-
- pr (optional): Pull request number to review
|
|
271
|
-
- range (optional): Git range to review (e.g., HEAD~3..HEAD, origin/main..HEAD). When omitted, reviews staged and unstaged local changes
|
|
272
|
-
- files (optional): Specific files to review
|
|
273
|
-
- context (optional): Additional context about the changes (purpose, constraints, technical background)
|
|
274
|
-
- provider (optional): Override the AI provider for this call
|
|
275
|
-
- model (optional): Override the model for this call
|
|
276
|
-
- parameters (optional): Override model parameters for this call`,
|
|
277
|
-
inputSchema: z.object({
|
|
278
|
-
pr: z.number().optional().describe('Pull request number to review (optional)'),
|
|
279
|
-
range: z.string().optional().describe('Git range to review (e.g., HEAD~3..HEAD, origin/main..HEAD) (optional)'),
|
|
280
|
-
files: z.array(z.string()).optional().describe('Specific files to review (optional)'),
|
|
281
|
-
context: z
|
|
282
|
-
.string()
|
|
283
|
-
.optional()
|
|
284
|
-
.describe('Additional context for the review - explains the purpose of changes, constraints, or areas of focus (optional)'),
|
|
285
|
-
...providerOverrideSchema.shape,
|
|
286
|
-
}),
|
|
287
|
-
handler: async (args, toolContext) => {
|
|
288
|
-
const { pr, range, files, context } = args;
|
|
289
|
-
const providerOverride = extractProviderOverride(args);
|
|
290
|
-
logger.info(`MCP: Executing review workflow${pr ? ` - PR: ${pr}` : ''}${range ? ` - range: ${range}` : ''}${providerOverride?.provider ? ` with provider: ${providerOverride.provider}` : ''}`);
|
|
291
|
-
return await executeWorkflow(reviewWorkflow, { pr, range, files, context }, 'review', logger, providerOverride, toolContext.defaultProvider);
|
|
292
|
-
},
|
|
293
|
-
},
|
|
294
|
-
{
|
|
295
|
-
name: 'plan',
|
|
296
|
-
description: `Create a detailed, actionable implementation plan for features or problems.
|
|
297
|
-
|
|
298
|
-
The workflow will:
|
|
299
|
-
- Analyze the current codebase to understand existing architecture
|
|
300
|
-
- Identify key files, dependencies, and potential challenges
|
|
301
|
-
- Create a step-by-step implementation roadmap
|
|
302
|
-
- Consider edge cases, error handling, and validation
|
|
303
|
-
- Suggest testing strategies and test cases
|
|
304
|
-
- Identify potential risks and mitigation approaches
|
|
305
|
-
- Recommend refactoring or preparatory work if needed
|
|
306
|
-
|
|
307
|
-
The plan includes:
|
|
308
|
-
- Overview with recommended approach
|
|
309
|
-
- Ordered list of implementation steps
|
|
310
|
-
- Files to create or modify (file paths only, no content)
|
|
311
|
-
- Dependencies and prerequisites
|
|
312
|
-
- Testing strategy
|
|
313
|
-
- Risk assessment and mitigations
|
|
314
|
-
- Rollback strategy if applicable
|
|
315
|
-
|
|
316
|
-
Best used for complex features, architecture changes, large refactorings, or migration strategies.
|
|
317
|
-
|
|
318
|
-
Parameters:
|
|
319
|
-
- task (required): Detailed description of what needs to be planned
|
|
320
|
-
- provider (optional): Override the AI provider for this call
|
|
321
|
-
- model (optional): Override the model for this call
|
|
322
|
-
- parameters (optional): Override model parameters for this call`,
|
|
323
|
-
inputSchema: z.object({
|
|
324
|
-
task: z.string().describe('The task or feature to plan - provide details about requirements, constraints, and goals'),
|
|
325
|
-
...providerOverrideSchema.shape,
|
|
326
|
-
}),
|
|
327
|
-
handler: async (args, toolContext) => {
|
|
328
|
-
const { task } = args;
|
|
329
|
-
const providerOverride = extractProviderOverride(args);
|
|
330
|
-
logger.info(`MCP: Executing plan workflow - task: "${task}"${providerOverride?.provider ? ` with provider: ${providerOverride.provider}` : ''}`);
|
|
331
|
-
try {
|
|
332
|
-
const context = createExecutionContext(logger);
|
|
333
|
-
// Apply provider overrides
|
|
334
|
-
const finalProvider = providerOverride?.provider || toolContext.defaultProvider?.provider;
|
|
335
|
-
const finalModel = providerOverride?.model || toolContext.defaultProvider?.model;
|
|
336
|
-
const finalParameters = providerOverride?.parameters || toolContext.defaultProvider?.parameters;
|
|
337
|
-
const finalApiKey = toolContext.defaultProvider?.apiKey;
|
|
338
|
-
if (finalProvider) {
|
|
339
|
-
context.apiProvider = finalProvider;
|
|
340
|
-
}
|
|
341
|
-
if (finalModel) {
|
|
342
|
-
context.model = finalModel;
|
|
343
|
-
}
|
|
344
|
-
if (finalApiKey) {
|
|
345
|
-
context.apiKey = finalApiKey;
|
|
346
|
-
}
|
|
347
|
-
const result = await runWorkflow(planWorkflow, { task, interactive: false }, {
|
|
348
|
-
commandName: 'plan',
|
|
349
|
-
context,
|
|
350
|
-
logger,
|
|
351
|
-
interactive: false,
|
|
352
|
-
providerOverride: {
|
|
353
|
-
provider: finalProvider,
|
|
354
|
-
model: finalModel,
|
|
355
|
-
parameters: finalParameters,
|
|
356
|
-
},
|
|
357
|
-
});
|
|
358
|
-
// Format plan result for MCP response
|
|
359
|
-
if (result && typeof result === 'object') {
|
|
360
|
-
const planResult = result;
|
|
361
|
-
// If there's a question, return it
|
|
362
|
-
if (planResult.question) {
|
|
363
|
-
return JSON.stringify({ question: planResult.question }, null, 2);
|
|
364
|
-
}
|
|
365
|
-
// If there's a reason (no plan needed), return it
|
|
366
|
-
if (planResult.reason) {
|
|
367
|
-
return `No plan needed: ${planResult.reason}`;
|
|
368
|
-
}
|
|
369
|
-
// Format the plan result with file paths only (no content)
|
|
370
|
-
let output = '';
|
|
371
|
-
if (planResult.plan) {
|
|
372
|
-
output += planResult.plan;
|
|
373
|
-
}
|
|
374
|
-
if (planResult.files && planResult.files.length > 0) {
|
|
375
|
-
output += '\n\nFiles to modify:\n';
|
|
376
|
-
// Extract only the file paths, not the content
|
|
377
|
-
output += planResult.files.map((f) => ` - ${f.path}`).join('\n');
|
|
378
|
-
}
|
|
379
|
-
return output || 'Plan created successfully';
|
|
380
|
-
}
|
|
381
|
-
return 'Plan created successfully';
|
|
382
|
-
}
|
|
383
|
-
catch (error) {
|
|
384
|
-
logger.error(`Error executing plan workflow:`, error);
|
|
385
|
-
return `Error: ${error instanceof Error ? error.message : String(error)}`;
|
|
386
|
-
}
|
|
387
|
-
},
|
|
388
|
-
},
|
|
389
|
-
{
|
|
390
|
-
name: 'fix',
|
|
391
|
-
description: `Diagnose and resolve issues, bugs, or test failures systematically.
|
|
392
|
-
|
|
393
|
-
The workflow will:
|
|
394
|
-
- Analyze error messages, stack traces, and failing tests
|
|
395
|
-
- Identify root causes through code examination
|
|
396
|
-
- Review relevant code and dependencies
|
|
397
|
-
- Implement targeted fixes with proper error handling
|
|
398
|
-
- Run tests to verify the fix
|
|
399
|
-
- Check for regressions in related functionality
|
|
400
|
-
- Iterate if the issue persists
|
|
401
|
-
|
|
402
|
-
Can handle:
|
|
403
|
-
- Test failures (unit, integration, e2e)
|
|
404
|
-
- Build and compilation errors
|
|
405
|
-
- Type checking errors
|
|
406
|
-
- Runtime errors and exceptions
|
|
407
|
-
- Logic bugs
|
|
408
|
-
- Performance issues
|
|
409
|
-
- Security vulnerabilities
|
|
410
|
-
|
|
411
|
-
Process:
|
|
412
|
-
1. Thoroughly analyze the failure or error
|
|
413
|
-
2. Identify and understand the root cause
|
|
414
|
-
3. Implement a minimal, targeted fix
|
|
415
|
-
4. Test to verify the fix works
|
|
416
|
-
5. Check for regressions
|
|
417
|
-
6. Iterate if needed until resolved
|
|
418
|
-
|
|
419
|
-
Parameters:
|
|
420
|
-
- task (required): Description of the issue - include error messages, stack traces, or describe what's not working
|
|
421
|
-
- provider (optional): Override the AI provider for this call
|
|
422
|
-
- model (optional): Override the model for this call
|
|
423
|
-
- parameters (optional): Override model parameters for this call`,
|
|
424
|
-
inputSchema: z.object({
|
|
425
|
-
task: z.string().describe("Description of the issue to fix - include error messages, stack traces, or describe what's not working"),
|
|
426
|
-
...providerOverrideSchema.shape,
|
|
427
|
-
}),
|
|
428
|
-
handler: async (args, toolContext) => {
|
|
429
|
-
const { task } = args;
|
|
430
|
-
const providerOverride = extractProviderOverride(args);
|
|
431
|
-
logger.info(`MCP: Executing fix workflow - task: "${task}"${providerOverride?.provider ? ` with provider: ${providerOverride.provider}` : ''}`);
|
|
432
|
-
return await executeWorkflow(fixWorkflow, { task }, 'fix', logger, providerOverride, toolContext.defaultProvider);
|
|
433
|
-
},
|
|
434
|
-
},
|
|
435
|
-
{
|
|
436
|
-
name: 'commit',
|
|
437
|
-
description: `Create a git commit with an AI-generated, well-formatted commit message.
|
|
438
|
-
|
|
439
|
-
The workflow will:
|
|
440
|
-
- Stage specified files (all files or specific files)
|
|
441
|
-
- Analyze the diff to understand what changed
|
|
442
|
-
- Generate a clear, descriptive commit message following best practices
|
|
443
|
-
- Create the commit with the generated message
|
|
444
|
-
|
|
445
|
-
Commit message format:
|
|
446
|
-
- Clear subject line (50 chars or less)
|
|
447
|
-
- Detailed body explaining what changed and why
|
|
448
|
-
- References to issues/PRs if applicable
|
|
449
|
-
- Conventional commits format when appropriate
|
|
450
|
-
|
|
451
|
-
Best practices followed:
|
|
452
|
-
- Separate subject from body with blank line
|
|
453
|
-
- Use imperative mood in subject line (e.g., "Add feature" not "Added feature")
|
|
454
|
-
- Explain what and why, not how
|
|
455
|
-
- Wrap body lines at 72 characters
|
|
456
|
-
|
|
457
|
-
Parameters:
|
|
458
|
-
- message (optional): Custom commit message. If not provided, AI analyzes changes and generates an appropriate message following best practices
|
|
459
|
-
- stageFiles (optional): Files to stage before committing. Use "all" to stage all files, or provide an array of specific file paths to stage
|
|
460
|
-
- provider (optional): Override the AI provider for this call
|
|
461
|
-
- model (optional): Override the model for this call
|
|
462
|
-
- parameters (optional): Override model parameters for this call`,
|
|
463
|
-
inputSchema: z.object({
|
|
464
|
-
message: z
|
|
465
|
-
.string()
|
|
466
|
-
.optional()
|
|
467
|
-
.describe('Optional commit message - if not provided, AI will analyze changes and generate an appropriate message'),
|
|
468
|
-
stageFiles: z
|
|
469
|
-
.union([z.literal('all'), z.array(z.string())])
|
|
470
|
-
.optional()
|
|
471
|
-
.describe('Files to stage: "all" for all files, or array of specific file paths'),
|
|
472
|
-
...providerOverrideSchema.shape,
|
|
473
|
-
}),
|
|
474
|
-
handler: async (args, toolContext) => {
|
|
475
|
-
const { message, stageFiles } = args;
|
|
476
|
-
const providerOverride = extractProviderOverride(args);
|
|
477
|
-
logger.info(`MCP: Executing commit workflow${message ? ` - message: "${message}"` : ''}${stageFiles ? ` - stageFiles: "${JSON.stringify(stageFiles)}"` : ''}${providerOverride?.provider ? ` with provider: ${providerOverride.provider}` : ''}`);
|
|
478
|
-
try {
|
|
479
|
-
// Apply provider overrides for commit
|
|
480
|
-
const context = createExecutionContext(logger);
|
|
481
|
-
const finalProvider = providerOverride?.provider || toolContext.defaultProvider?.provider;
|
|
482
|
-
const finalModel = providerOverride?.model || toolContext.defaultProvider?.model;
|
|
483
|
-
const finalApiKey = toolContext.defaultProvider?.apiKey;
|
|
484
|
-
if (finalProvider) {
|
|
485
|
-
context.apiProvider = finalProvider;
|
|
486
|
-
}
|
|
487
|
-
if (finalModel) {
|
|
488
|
-
context.model = finalModel;
|
|
489
|
-
}
|
|
490
|
-
if (finalApiKey) {
|
|
491
|
-
context.apiKey = finalApiKey;
|
|
492
|
-
}
|
|
493
|
-
const commitMessage = await commit({
|
|
494
|
-
...context,
|
|
495
|
-
context: message,
|
|
496
|
-
all: stageFiles === 'all',
|
|
497
|
-
files: Array.isArray(stageFiles) ? stageFiles : undefined,
|
|
498
|
-
interactive: false,
|
|
499
|
-
});
|
|
500
|
-
return commitMessage || 'Commit created successfully';
|
|
501
|
-
}
|
|
502
|
-
catch (error) {
|
|
503
|
-
return `Error: ${error instanceof Error ? error.message : String(error)}`;
|
|
504
|
-
}
|
|
505
|
-
},
|
|
506
|
-
},
|
|
507
|
-
{
|
|
508
|
-
name: 'memory_read',
|
|
509
|
-
description: `Read content from a memory topic.
|
|
510
|
-
|
|
511
|
-
Use this to retrieve information stored in previous workflow steps.
|
|
512
|
-
Memory persists across tool calls, allowing you to maintain context
|
|
513
|
-
between different operations.
|
|
514
|
-
|
|
515
|
-
Parameters:
|
|
516
|
-
- project (required): Absolute path to the project directory. This isolates memory to a specific project.
|
|
517
|
-
- topic (optional): The memory topic to read from. Defaults to ":default:" which stores general conversation context.
|
|
518
|
-
|
|
519
|
-
Returns the content stored in the specified topic, or a message indicating the topic is empty.`,
|
|
520
|
-
inputSchema: z.object({
|
|
521
|
-
project: z.string().describe('Absolute path to the project directory (e.g., "/home/user/my-project")'),
|
|
522
|
-
topic: z.string().optional().describe('The memory topic to read from (defaults to ":default:")'),
|
|
523
|
-
}),
|
|
524
|
-
handler: async (args, toolContext) => {
|
|
525
|
-
const { project, topic = ':default:' } = args;
|
|
526
|
-
toolContext.logger.info(`MCP: Reading from memory topic "${topic}" for project "${project}"`);
|
|
527
|
-
const memoryStore = await getMemoryStore(toolContext.logger, project);
|
|
528
|
-
if (!memoryStore) {
|
|
529
|
-
return 'Error: Memory store is not enabled. Configure it in your .polkacodes.yml with memory.enabled: true';
|
|
530
|
-
}
|
|
531
|
-
try {
|
|
532
|
-
const content = await memoryStore.store.readMemory(topic);
|
|
533
|
-
if (content) {
|
|
534
|
-
return content;
|
|
535
|
-
}
|
|
536
|
-
return `Memory topic "${topic}" is empty.`;
|
|
537
|
-
}
|
|
538
|
-
catch (error) {
|
|
539
|
-
return `Error: ${error instanceof Error ? error.message : String(error)}`;
|
|
540
|
-
}
|
|
541
|
-
},
|
|
542
|
-
},
|
|
543
|
-
{
|
|
544
|
-
name: 'memory_update',
|
|
545
|
-
description: `Update content in a memory topic.
|
|
546
|
-
|
|
547
|
-
Use this to store information for later retrieval in subsequent tool calls.
|
|
548
|
-
Memory persists across tool calls, allowing you to maintain context
|
|
549
|
-
between different operations.
|
|
550
|
-
|
|
551
|
-
Parameters:
|
|
552
|
-
- project (required): Absolute path to the project directory. This isolates memory to a specific project.
|
|
553
|
-
- operation (required): The operation to perform. Use "append" to add content, "replace" to overwrite all content, or "remove" to delete the topic.
|
|
554
|
-
- topics (optional): Array of topic names for batch operations. Content can be an array (one per topic) or a single string (broadcast to all topics).
|
|
555
|
-
- topic (optional): Single memory topic to update. Defaults to ":default:".
|
|
556
|
-
- content (optional): The content to store (required for "append" and "replace" operations). For batch operations with topics, provide an array of the same length or a single string to broadcast.
|
|
557
|
-
|
|
558
|
-
Supports wildcards in topic name for remove operation:
|
|
559
|
-
- Use "*" to remove all topics (e.g., topic ":plan:*")
|
|
560
|
-
- Use pattern matching like ":plan:*" to remove all topics starting with ":plan:"
|
|
561
|
-
|
|
562
|
-
Returns a message confirming the operation performed.`,
|
|
563
|
-
inputSchema: z
|
|
564
|
-
.object({
|
|
565
|
-
project: z.string().describe('Absolute path to the project directory (e.g., "/home/user/my-project")'),
|
|
566
|
-
operation: z
|
|
567
|
-
.enum(['append', 'replace', 'remove'])
|
|
568
|
-
.describe('The operation: append (add content), replace (overwrite), or remove (delete topic(s))'),
|
|
569
|
-
topic: z.string().optional().describe('Single memory topic to update (defaults to ":default:")'),
|
|
570
|
-
topics: z.array(z.string()).min(1).optional().describe('Array of topics for batch operations'),
|
|
571
|
-
content: z
|
|
572
|
-
.union([z.string(), z.array(z.string())])
|
|
573
|
-
.optional()
|
|
574
|
-
.describe('Content to store (string or array for batch). Required for append/replace, omitted for remove'),
|
|
575
|
-
})
|
|
576
|
-
.refine((data) => {
|
|
577
|
-
// If topics array is provided with content, validate the combination
|
|
578
|
-
if (data.topics && data.content) {
|
|
579
|
-
// Array content must match topics length
|
|
580
|
-
if (Array.isArray(data.content)) {
|
|
581
|
-
return data.content.length === data.topics.length;
|
|
582
|
-
}
|
|
583
|
-
// String content can be broadcast to any number of topics
|
|
584
|
-
return true;
|
|
585
|
-
}
|
|
586
|
-
// If topics is not provided (single topic mode), content must be a string or undefined
|
|
587
|
-
if (!data.topics && data.content !== undefined && Array.isArray(data.content)) {
|
|
588
|
-
return false;
|
|
589
|
-
}
|
|
590
|
-
return true;
|
|
591
|
-
}, {
|
|
592
|
-
message: 'For single topic mode, content must be a string. For batch mode with topics array, content can be an array (same length) or a string (broadcast to all topics)',
|
|
593
|
-
}),
|
|
594
|
-
handler: async (args, toolContext) => {
|
|
595
|
-
const { project, operation, topic: singleTopic, topics, content: contentInput, } = args;
|
|
596
|
-
toolContext.logger.info(`MCP: Memory operation "${operation}" on ${topics ? `${topics.length} topics` : `topic "${singleTopic}"`} for project "${project}"`);
|
|
597
|
-
const memoryStore = await getMemoryStore(toolContext.logger, project);
|
|
598
|
-
if (!memoryStore) {
|
|
599
|
-
return 'Error: Memory store is not enabled. Configure it in your .polkacodes.yml with memory.enabled: true';
|
|
600
|
-
}
|
|
601
|
-
try {
|
|
602
|
-
// Handle batch operations
|
|
603
|
-
if (topics) {
|
|
604
|
-
// Validate content requirement for batch
|
|
605
|
-
if (operation === 'remove' && contentInput !== undefined) {
|
|
606
|
-
return 'Error: Content must not be provided for "remove" operation';
|
|
607
|
-
}
|
|
608
|
-
if ((operation === 'append' || operation === 'replace') && contentInput === undefined) {
|
|
609
|
-
return 'Error: Content is required for "append" and "replace" operations';
|
|
610
|
-
}
|
|
611
|
-
const contents = Array.isArray(contentInput) ? contentInput : topics.map(() => contentInput);
|
|
612
|
-
// Build batch operations
|
|
613
|
-
const operations = topics.map((topic, index) => ({
|
|
614
|
-
operation,
|
|
615
|
-
name: topic,
|
|
616
|
-
content: operation === 'remove' ? undefined : contents[index],
|
|
617
|
-
}));
|
|
618
|
-
await memoryStore.store.batchUpdateMemory(operations);
|
|
619
|
-
return `Batch operation "${operation}" completed on ${topics.length} topics:\n${topics.join('\n')}`;
|
|
620
|
-
}
|
|
621
|
-
// Handle wildcard removal
|
|
622
|
-
const topic = singleTopic || ':default:';
|
|
623
|
-
if (operation === 'remove' && (topic.includes('*') || topic.includes('?'))) {
|
|
624
|
-
// Query all entries and filter by wildcard pattern
|
|
625
|
-
const allEntries = await memoryStore.store.queryMemory({ scope: 'auto' }, { operation: 'select' });
|
|
626
|
-
if (!Array.isArray(allEntries)) {
|
|
627
|
-
return 'Error: Unable to query memory entries';
|
|
628
|
-
}
|
|
629
|
-
const regex = createWildcardRegex(topic);
|
|
630
|
-
const matchingTopics = allEntries.filter((e) => regex.test(e.name)).map((e) => e.name);
|
|
631
|
-
if (matchingTopics.length === 0) {
|
|
632
|
-
return `No topics found matching pattern "${topic}"`;
|
|
633
|
-
}
|
|
634
|
-
// Batch remove all matching topics
|
|
635
|
-
const operations = matchingTopics.map((matchingTopic) => ({
|
|
636
|
-
operation: 'remove',
|
|
637
|
-
name: matchingTopic,
|
|
638
|
-
}));
|
|
639
|
-
await memoryStore.store.batchUpdateMemory(operations);
|
|
640
|
-
return `Removed ${matchingTopics.length} topic(s) matching pattern "${topic}":\n${matchingTopics.join('\n')}`;
|
|
641
|
-
}
|
|
642
|
-
// Handle single topic operation
|
|
643
|
-
if ((operation === 'append' || operation === 'replace') && contentInput === undefined) {
|
|
644
|
-
return 'Error: Content is required for "append" and "replace" operations';
|
|
645
|
-
}
|
|
646
|
-
if (operation === 'remove' && contentInput !== undefined) {
|
|
647
|
-
return 'Error: Content must not be provided for "remove" operation';
|
|
648
|
-
}
|
|
649
|
-
const content = typeof contentInput === 'string' ? contentInput : undefined;
|
|
650
|
-
await memoryStore.store.updateMemory(operation, topic, content);
|
|
651
|
-
const messages = {
|
|
652
|
-
append: `Content appended to memory topic "${topic}"`,
|
|
653
|
-
replace: `Memory topic "${topic}" replaced`,
|
|
654
|
-
remove: `Memory topic "${topic}" removed`,
|
|
655
|
-
};
|
|
656
|
-
return messages[operation];
|
|
657
|
-
}
|
|
658
|
-
catch (error) {
|
|
659
|
-
return `Error: ${error instanceof Error ? error.message : String(error)}`;
|
|
660
|
-
}
|
|
661
|
-
},
|
|
662
|
-
},
|
|
663
|
-
{
|
|
664
|
-
name: 'memory_list',
|
|
665
|
-
description: `List all available memory topics.
|
|
666
|
-
|
|
667
|
-
Use this to see what information has been stored and which topics are
|
|
668
|
-
available to read from. Returns a list of topic names that have content.
|
|
669
|
-
|
|
670
|
-
Parameters:
|
|
671
|
-
- project (required): Absolute path to the project directory. This isolates memory to a specific project.
|
|
672
|
-
- pattern (optional): Filter topics by wildcard pattern (e.g., ":plan:*" for all plan topics)`,
|
|
673
|
-
inputSchema: z.object({
|
|
674
|
-
project: z.string().describe('Absolute path to the project directory (e.g., "/home/user/my-project")'),
|
|
675
|
-
pattern: z.string().optional().describe('Filter topics by wildcard pattern (e.g., ":plan:*")'),
|
|
676
|
-
}),
|
|
677
|
-
handler: async (args, toolContext) => {
|
|
678
|
-
const { project, pattern } = args;
|
|
679
|
-
toolContext.logger.info(`MCP: Listing memory topics for project "${project}"${pattern ? ` with pattern "${pattern}"` : ''}`);
|
|
680
|
-
const memoryStore = await getMemoryStore(toolContext.logger, project);
|
|
681
|
-
if (!memoryStore) {
|
|
682
|
-
return 'Error: Memory store is not enabled. Configure it in your .polkacodes.yml with memory.enabled: true';
|
|
683
|
-
}
|
|
684
|
-
try {
|
|
685
|
-
// Query all memory entries for this project (scope is automatically set by getMemoryStore)
|
|
686
|
-
const entries = await memoryStore.store.queryMemory({}, { operation: 'select' });
|
|
687
|
-
if (!entries || !Array.isArray(entries) || entries.length === 0) {
|
|
688
|
-
return 'No memory topics found.';
|
|
689
|
-
}
|
|
690
|
-
// Extract topic names and filter by pattern if provided
|
|
691
|
-
let topics = [...new Set(entries.map((e) => e.name))];
|
|
692
|
-
if (pattern) {
|
|
693
|
-
const regex = createWildcardRegex(pattern);
|
|
694
|
-
topics = topics.filter((t) => regex.test(t));
|
|
695
|
-
}
|
|
696
|
-
if (topics.length === 0) {
|
|
697
|
-
return pattern ? `No memory topics found matching pattern "${pattern}"` : 'No memory topics found.';
|
|
698
|
-
}
|
|
699
|
-
return `Memory topics (${topics.length}):\n${topics.join('\n')}`;
|
|
700
|
-
}
|
|
701
|
-
catch (error) {
|
|
702
|
-
return `Error: ${error instanceof Error ? error.message : String(error)}`;
|
|
703
|
-
}
|
|
704
|
-
},
|
|
705
|
-
},
|
|
706
|
-
{
|
|
707
|
-
name: 'memory_query',
|
|
708
|
-
description: `Query memory with advanced filters.
|
|
709
|
-
|
|
710
|
-
Use this to search memory entries by content, metadata, or other criteria.
|
|
711
|
-
Returns detailed entry information with metadata.
|
|
712
|
-
|
|
713
|
-
Parameters:
|
|
714
|
-
- project (required): Absolute path to the project directory. This isolates memory to a specific project.
|
|
715
|
-
- search (optional): Search text to find in content
|
|
716
|
-
- type (optional): Filter by entry type (note, todo, plan, etc.)
|
|
717
|
-
- status (optional): Filter by status (open, completed, closed, etc.)
|
|
718
|
-
- priority (optional): Filter by priority (null, low, medium, high)
|
|
719
|
-
- tags (optional): Filter by tags
|
|
720
|
-
- operation (optional): Query operation - "select" returns entries, "count" returns count
|
|
721
|
-
|
|
722
|
-
Returns matching entries with full metadata.`,
|
|
723
|
-
inputSchema: z.object({
|
|
724
|
-
project: z.string().describe('Absolute path to the project directory (e.g., "/home/user/my-project")'),
|
|
725
|
-
search: z.string().optional().describe('Search text to find in content'),
|
|
726
|
-
type: z.string().optional().describe('Filter by entry type (note, todo, plan, etc.)'),
|
|
727
|
-
status: z.string().optional().describe('Filter by status (open, completed, closed, etc.)'),
|
|
728
|
-
priority: z.string().optional().describe('Filter by priority (null, low, medium, high)'),
|
|
729
|
-
tags: z.string().optional().describe('Filter by tags'),
|
|
730
|
-
operation: z.enum(['select', 'count']).optional().describe('Query operation (defaults to "select")'),
|
|
731
|
-
}),
|
|
732
|
-
handler: async (args, toolContext) => {
|
|
733
|
-
const { project, search, type, status, priority, tags, operation = 'select', } = args;
|
|
734
|
-
toolContext.logger.info(`MCP: Querying memory for project "${project}" - operation: "${operation}"`);
|
|
735
|
-
const memoryStore = await getMemoryStore(toolContext.logger, project);
|
|
736
|
-
if (!memoryStore) {
|
|
737
|
-
return 'Error: Memory store is not enabled. Configure it in your .polkacodes.yml with memory.enabled: true';
|
|
738
|
-
}
|
|
739
|
-
try {
|
|
740
|
-
// Build query object (scope is automatically set by getMemoryStore)
|
|
741
|
-
const memoryQuery = {};
|
|
742
|
-
if (search)
|
|
743
|
-
memoryQuery.search = search;
|
|
744
|
-
if (type)
|
|
745
|
-
memoryQuery.type = type;
|
|
746
|
-
if (status)
|
|
747
|
-
memoryQuery.status = status;
|
|
748
|
-
if (priority)
|
|
749
|
-
memoryQuery.priority = priority;
|
|
750
|
-
if (tags)
|
|
751
|
-
memoryQuery.tags = tags;
|
|
752
|
-
const result = await memoryStore.store.queryMemory(memoryQuery, { operation });
|
|
753
|
-
if (operation === 'count') {
|
|
754
|
-
return `Found ${typeof result === 'number' ? result : 0} matching entries`;
|
|
755
|
-
}
|
|
756
|
-
// Format entries for display
|
|
757
|
-
if (!Array.isArray(result) || result.length === 0) {
|
|
758
|
-
return 'No matching entries found.';
|
|
759
|
-
}
|
|
760
|
-
const formatted = result.map((entry) => {
|
|
761
|
-
const lines = [];
|
|
762
|
-
lines.push(`Topic: ${entry.name}`);
|
|
763
|
-
if (entry.entry_type)
|
|
764
|
-
lines.push(` Type: ${entry.entry_type}`);
|
|
765
|
-
if (entry.status)
|
|
766
|
-
lines.push(` Status: ${entry.status}`);
|
|
767
|
-
if (entry.priority)
|
|
768
|
-
lines.push(` Priority: ${entry.priority}`);
|
|
769
|
-
if (entry.tags)
|
|
770
|
-
lines.push(` Tags: ${entry.tags}`);
|
|
771
|
-
if (entry.created_at)
|
|
772
|
-
lines.push(` Created: ${new Date(entry.created_at).toISOString()}`);
|
|
773
|
-
lines.push(` Content: ${entry.content?.substring(0, 100)}${entry.content && entry.content.length > 100 ? '...' : ''}`);
|
|
774
|
-
return lines.join('\n');
|
|
775
|
-
});
|
|
776
|
-
return `Found ${result.length} entries:\n\n${formatted.join('\n\n')}`;
|
|
777
|
-
}
|
|
778
|
-
catch (error) {
|
|
779
|
-
return `Error: ${error instanceof Error ? error.message : String(error)}`;
|
|
780
|
-
}
|
|
781
|
-
},
|
|
782
|
-
},
|
|
783
|
-
];
|
|
784
|
-
}
|
|
785
|
-
//# sourceMappingURL=tools.js.map
|