@claudetools/tools 0.8.4 → 0.8.6

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/cli.js CHANGED
@@ -12,6 +12,7 @@ import { startServer } from './index.js';
12
12
  import { startWatcher, stopWatcher, watcherStatus } from './watcher.js';
13
13
  import { generateCodebaseMap, generateCodebaseMapLocal } from './helpers/codebase-mapper.js';
14
14
  import { contextStatus, contextEvict, contextSummarise, contextReset, } from './context-cli.js';
15
+ import { runTests } from './test-runner.js';
15
16
  // Get version from package.json
16
17
  const __filename = fileURLToPath(import.meta.url);
17
18
  const __dirname = dirname(__filename);
@@ -61,6 +62,9 @@ Commands:
61
62
  context evict Manually trigger eviction cycle
62
63
  context summarise Summarise and compress exchanges
63
64
  context reset Clear session state
65
+ test Run integration test suite
66
+ test --verbose Run tests with detailed output
67
+ test --json Output results in JSON format
64
68
 
65
69
  Running without options starts the MCP server.
66
70
 
@@ -206,6 +210,16 @@ else if (positionals[0] === 'context') {
206
210
  process.exit(1);
207
211
  }
208
212
  }
213
+ else if (positionals[0] === 'test') {
214
+ // Handle test command
215
+ const testArgs = process.argv.slice(3); // Get args after 'test'
216
+ const verbose = testArgs.includes('--verbose');
217
+ const json = testArgs.includes('--json');
218
+ runTests({ verbose, json }).catch((error) => {
219
+ console.error('Test run failed:', error);
220
+ process.exit(1);
221
+ });
222
+ }
209
223
  else {
210
224
  // Start MCP server
211
225
  startServer();
@@ -11,8 +11,8 @@ import { recordToolCall, getToolCallWarnings } from '../helpers/session-validati
11
11
  import { queryDependencies, analyzeImpact } from '../helpers/dependencies.js';
12
12
  import { checkPatterns } from '../helpers/patterns.js';
13
13
  import { formatContextForClaude } from '../helpers/formatter.js';
14
- import { compactTaskList, compactTaskCreated, compactTaskStart, compactTaskComplete, compactTaskClaim, compactTaskRelease, compactStatusUpdate, compactContextAdded, compactHeartbeat, compactSummary, } from '../helpers/compact-formatter.js';
15
- import { createTask, listTasks, getTask, claimTask, releaseTask, updateTaskStatus, addTaskContext, getTaskSummary, heartbeatTask, parseJsonArray, getDispatchableTasks, getExecutionContext, resolveTaskDependencies, getEpicStatus, getActiveTaskCount, } from '../helpers/tasks.js';
14
+ import { shortId, compactTaskList, compactTaskCreated, compactTaskStart, compactTaskComplete, compactTaskClaim, compactTaskRelease, compactStatusUpdate, compactContextAdded, compactHeartbeat, compactTaskHandoff, compactSummary, } from '../helpers/compact-formatter.js';
15
+ import { createTask, listTasks, getTask, claimTask, releaseTask, updateTaskStatus, addTaskContext, getTaskSummary, heartbeatTask, handoffTask, parseJsonArray, getDispatchableTasks, getExecutionContext, resolveTaskDependencies, getEpicStatus, getEpicAggregate, getActiveTaskCount, reviseEpic, } from '../helpers/tasks.js';
16
16
  import { detectTimedOutTasks, retryTask, failTask, autoRetryTimedOutTasks, } from '../helpers/tasks-retry.js';
17
17
  import { detectLibrariesFromPlan } from '../helpers/library-detection.js';
18
18
  import { handleGenerateApi, handleGenerateFrontend, handleGenerateComponent, handleListGenerators, handleValidateSpec, handleListPatterns, handleGetPattern, handleDetectPatterns, handleInitProject, } from './codedna-handlers.js';
@@ -133,9 +133,8 @@ export function registerToolHandlers(server) {
133
133
  const entity2 = args?.entity2;
134
134
  const context = args?.context;
135
135
  const is_critical = args?.is_critical;
136
- // Commercial-grade storage with blocking verification
136
+ // Storage with read-after-write verification (DO-level)
137
137
  const MAX_RETRIES = 3;
138
- const VERIFY_DELAY_MS = 200;
139
138
  let lastError = null;
140
139
  let storedFactId = null;
141
140
  let storedIsCritical = false;
@@ -144,32 +143,24 @@ export function registerToolHandlers(server) {
144
143
  for (let attempt = 1; attempt <= MAX_RETRIES && !verified; attempt++) {
145
144
  attempts = attempt;
146
145
  try {
147
- // Step 1: Store the fact
148
146
  mcpLogger.info('STORE', `Attempt ${attempt}/${MAX_RETRIES}: Storing "${entity1} ${relationship} ${entity2}"${is_critical ? ' [CRITICAL]' : ''}`);
149
147
  const result = await storeFact(projectId, entity1, relationship, entity2, context, { is_critical });
150
148
  storedFactId = result.fact_id;
151
149
  storedIsCritical = result.is_critical;
152
150
  mcpLogger.info('STORE', `Storage response: ${JSON.stringify(result)}`);
153
151
  if (!result.success || !result.fact_id) {
154
- lastError = new Error(`Storage returned unsuccessful: ${JSON.stringify(result)}`);
152
+ lastError = new Error(`Storage returned unsuccessful: ${result.error || 'Unknown error'}`);
155
153
  mcpLogger.warn('STORE', `Attempt ${attempt} failed: ${lastError.message}`);
156
154
  continue;
157
155
  }
158
- // Step 2: Wait briefly for eventual consistency
159
- await new Promise(resolve => setTimeout(resolve, VERIFY_DELAY_MS));
160
- // Step 3: Verify the fact is retrievable by searching for it
161
- mcpLogger.info('STORE', `Verifying fact ${storedFactId} is retrievable...`);
162
- const searchQuery = `${entity1} ${relationship} ${entity2}`;
163
- const searchResult = await searchMemory(projectId, searchQuery, 5);
164
- // Check if our fact appears in results
165
- const factFound = searchResult.relevant_facts?.some(f => f.fact?.includes(entity1) && f.fact?.includes(entity2)) || false;
166
- if (factFound) {
156
+ // Check DO-level verification (read-after-write in Durable Object)
157
+ if (result.verified) {
167
158
  verified = true;
168
- mcpLogger.info('STORE', `✓ Fact verified as retrievable`);
159
+ mcpLogger.info('STORE', `✓ Fact verified at storage layer (ID: ${storedFactId})`);
169
160
  }
170
161
  else {
171
- lastError = new Error(`Fact stored but not found in search results`);
172
- mcpLogger.warn('STORE', `Attempt ${attempt}: Stored but not retrievable. Search returned ${searchResult.relevant_facts?.length || 0} facts.`);
162
+ lastError = new Error(`Storage verification failed at DO level`);
163
+ mcpLogger.warn('STORE', `Attempt ${attempt}: Storage layer verification failed`);
173
164
  }
174
165
  }
175
166
  catch (err) {
@@ -827,6 +818,73 @@ export function registerToolHandlers(server) {
827
818
  content: [{ type: 'text', text: output }],
828
819
  };
829
820
  }
821
+ case 'task_plan_revise': {
822
+ const epicId = args?.epic_id;
823
+ const addTasks = args?.add_tasks;
824
+ const removeTaskIds = args?.remove_task_ids;
825
+ const updateTasks = args?.update_tasks;
826
+ try {
827
+ const result = await reviseEpic(DEFAULT_USER_ID, projectId, epicId, {
828
+ add_tasks: addTasks,
829
+ remove_task_ids: removeTaskIds,
830
+ update_tasks: updateTasks,
831
+ });
832
+ mcpLogger.toolResult(name, true, timer(), `Epic ${shortId(epicId)} revised: +${result.added.length} tasks, -${result.removed.length} tasks, ~${result.updated.length} tasks`);
833
+ let output = `# Epic Revised: ${result.epic.title}\n\n`;
834
+ output += `**Epic ID:** \`${result.epic.id}\`\n`;
835
+ output += `**Status:** ${result.epic.status}\n\n`;
836
+ if (result.added.length > 0) {
837
+ output += `## Added Tasks (${result.added.length})\n\n`;
838
+ result.added.forEach((task, i) => {
839
+ output += `${i + 1}. **${task.title}** (\`${shortId(task.id)}\`)`;
840
+ if (task.estimated_effort)
841
+ output += ` - ${task.estimated_effort}`;
842
+ output += `\n`;
843
+ if (task.description)
844
+ output += ` ${task.description}\n`;
845
+ });
846
+ output += '\n';
847
+ }
848
+ if (result.removed.length > 0) {
849
+ output += `## Cancelled Tasks (${result.removed.length})\n\n`;
850
+ result.removed.forEach((taskId, i) => {
851
+ output += `${i + 1}. \`${shortId(taskId)}\` (cancelled)\n`;
852
+ });
853
+ output += '\n';
854
+ }
855
+ if (result.updated.length > 0) {
856
+ output += `## Updated Tasks (${result.updated.length})\n\n`;
857
+ result.updated.forEach((task, i) => {
858
+ output += `${i + 1}. **${task.title}** (\`${shortId(task.id)}\`) - Context added with update details\n`;
859
+ });
860
+ output += '\n';
861
+ }
862
+ // Get updated epic status
863
+ const epicStatus = await getEpicStatus(DEFAULT_USER_ID, projectId, epicId);
864
+ output += `## Current Status\n\n`;
865
+ output += `**Total Tasks:** ${epicStatus.totalTasks}\n`;
866
+ output += `**Progress:** ${epicStatus.percentComplete}% complete\n`;
867
+ output += `**By Status:**\n`;
868
+ Object.entries(epicStatus.byStatus).forEach(([status, count]) => {
869
+ if (count > 0) {
870
+ output += `- ${status}: ${count}\n`;
871
+ }
872
+ });
873
+ return {
874
+ content: [{ type: 'text', text: output }],
875
+ };
876
+ }
877
+ catch (error) {
878
+ mcpLogger.toolResult(name, false, timer(), error instanceof Error ? error.message : 'Unknown error');
879
+ return {
880
+ content: [{
881
+ type: 'text',
882
+ text: `Error revising epic: ${error instanceof Error ? error.message : 'Unknown error'}`,
883
+ }],
884
+ isError: true,
885
+ };
886
+ }
887
+ }
830
888
  case 'task_start': {
831
889
  const taskId = args?.task_id;
832
890
  const agentId = args?.agent_id || 'claude-code';
@@ -1048,6 +1106,19 @@ export function registerToolHandlers(server) {
1048
1106
  content: [{ type: 'text', text: output }],
1049
1107
  };
1050
1108
  }
1109
+ case 'task_handoff': {
1110
+ const taskId = args?.task_id;
1111
+ const newWorkerType = args?.new_worker_type;
1112
+ const reason = args?.reason;
1113
+ const agentId = args?.agent_id || 'claude-code';
1114
+ const result = await handoffTask(DEFAULT_USER_ID, projectId, taskId, agentId, newWorkerType, reason);
1115
+ mcpLogger.toolResult(name, true, timer());
1116
+ // Compact output
1117
+ const output = compactTaskHandoff(result.data.task, result.data.handed_off, result.data.new_worker_type);
1118
+ return {
1119
+ content: [{ type: 'text', text: output }],
1120
+ };
1121
+ }
1051
1122
  // =========================================================================
1052
1123
  // ORCHESTRATION HANDLERS
1053
1124
  // =========================================================================
@@ -1100,7 +1171,12 @@ export function registerToolHandlers(server) {
1100
1171
  output += `**Task:** ${context.task.title}\n`;
1101
1172
  output += `**Task ID:** \`${context.task.id}\`\n`;
1102
1173
  output += `**Worker Type:** ${context.worker.name} (\`${context.worker.id}\`)\n`;
1103
- output += `**Status:** ${context.task.status}\n\n`;
1174
+ output += `**Status:** ${context.task.status}\n`;
1175
+ // Display lock expiry warning if present
1176
+ if (context.lockWarning?.warning && context.lockWarning.message) {
1177
+ output += `\n${context.lockWarning.message}\n`;
1178
+ }
1179
+ output += `\n`;
1104
1180
  output += `## System Prompt for Worker\n\n`;
1105
1181
  output += `\`\`\`\n${context.systemPrompt}\`\`\`\n\n`;
1106
1182
  if (context.parentTask) {
@@ -1224,6 +1300,47 @@ export function registerToolHandlers(server) {
1224
1300
  content: [{ type: 'text', text: output }],
1225
1301
  };
1226
1302
  }
1303
+ case 'task_aggregate': {
1304
+ const epicId = args?.epic_id;
1305
+ const includePending = args?.include_pending || false;
1306
+ const aggregate = await getEpicAggregate(DEFAULT_USER_ID, projectId, epicId, includePending);
1307
+ mcpLogger.toolResult(name, true, timer());
1308
+ let output = `# Epic Aggregate: ${aggregate.epic.title}\n\n`;
1309
+ output += `**Epic ID:** \`${epicId}\`\n`;
1310
+ output += `**Epic Status:** ${aggregate.epic.status}\n`;
1311
+ output += `**Description:** ${aggregate.epic.description}\n\n`;
1312
+ output += `## Summary Statistics\n\n`;
1313
+ output += `- **Total Tasks:** ${aggregate.summary_stats.total}\n`;
1314
+ output += `- **Completed:** ${aggregate.summary_stats.completed}\n`;
1315
+ output += `- **In Progress:** ${aggregate.summary_stats.in_progress}\n`;
1316
+ output += `- **Pending:** ${aggregate.summary_stats.pending}\n\n`;
1317
+ output += `## Task Work Logs\n\n`;
1318
+ if (aggregate.tasks.length === 0) {
1319
+ output += `No tasks ${includePending ? '' : 'with work logs '}found for this epic.\n`;
1320
+ }
1321
+ else {
1322
+ for (const task of aggregate.tasks) {
1323
+ const statusEmoji = task.status === 'done' ? '✅' : task.status === 'in_progress' ? '🔄' : '📋';
1324
+ output += `### ${statusEmoji} ${task.title}\n`;
1325
+ output += `- **Task ID:** \`${task.id}\`\n`;
1326
+ output += `- **Status:** ${task.status}\n`;
1327
+ if (task.completed_at) {
1328
+ const completedDate = new Date(task.completed_at).toLocaleString();
1329
+ output += `- **Completed:** ${completedDate}\n`;
1330
+ }
1331
+ if (task.work_log) {
1332
+ output += `- **Work Log:**\n\n`;
1333
+ output += ` ${task.work_log}\n\n`;
1334
+ }
1335
+ else {
1336
+ output += `- **Work Log:** None\n\n`;
1337
+ }
1338
+ }
1339
+ }
1340
+ return {
1341
+ content: [{ type: 'text', text: output }],
1342
+ };
1343
+ }
1227
1344
  case 'task_detect_timeouts': {
1228
1345
  const timedOut = await detectTimedOutTasks(DEFAULT_USER_ID, projectId);
1229
1346
  mcpLogger.toolResult(name, true, timer());
@@ -1278,6 +1395,10 @@ export function registerToolHandlers(server) {
1278
1395
  if (result.error?.includes('Retry limit exceeded')) {
1279
1396
  output += `The task has been marked as **failed** permanently.\n`;
1280
1397
  }
1398
+ else if (result.retryAfter) {
1399
+ output += `**Retry After:** ${result.retryAfter}\n`;
1400
+ output += `The task is in exponential backoff. Please wait before retrying.\n`;
1401
+ }
1281
1402
  }
1282
1403
  return {
1283
1404
  content: [{ type: 'text', text: output }],
@@ -33,6 +33,8 @@ export declare function storeFact(projectId: string, entity1: string, relationsh
33
33
  success: boolean;
34
34
  fact_id: string;
35
35
  is_critical: boolean;
36
+ verified?: boolean;
37
+ error?: string;
36
38
  }>;
37
39
  export declare function getContext(projectId: string, query?: string, userId?: string): Promise<MemoryContext>;
38
40
  export declare function getSummary(projectId: string, userId?: string): Promise<string>;
@@ -64,7 +64,18 @@ export async function injectContext(projectId, query, userId = DEFAULT_USER_ID)
64
64
  const response = await apiRequest(`/api/v1/memory/${userId}/${projectId}/inject`, 'POST', {
65
65
  query,
66
66
  });
67
- return response.data;
67
+ // Transform API response to match expected interface
68
+ // Handle cases where context or metadata might be missing
69
+ return {
70
+ augmentedSystemPrompt: response.data.context || '',
71
+ metadata: response.data.metadata || {
72
+ memoryNeeded: false,
73
+ retrievalTimeMs: 0,
74
+ factsScored: 0,
75
+ factsIncluded: 0,
76
+ avgRelevanceScore: 0,
77
+ },
78
+ };
68
79
  }
69
80
  /**
70
81
  * List all cached documentation libraries (global cache)
@@ -0,0 +1,28 @@
1
+ export interface CircuitState {
2
+ failures: number;
3
+ lastFailure: number;
4
+ openedAt: number | null;
5
+ }
6
+ /**
7
+ * Record a worker failure
8
+ * Increments failure count and potentially opens the circuit
9
+ */
10
+ export declare function recordWorkerFailure(workerType: string): void;
11
+ /**
12
+ * Check if circuit is open for a worker type
13
+ * Auto-closes circuit if cooldown period has elapsed
14
+ */
15
+ export declare function isCircuitOpen(workerType: string): boolean;
16
+ /**
17
+ * Get circuit status for all worker types
18
+ * Useful for monitoring and debugging
19
+ */
20
+ export declare function getCircuitStatus(): Map<string, CircuitState>;
21
+ /**
22
+ * Manually reset a circuit (for testing or admin override)
23
+ */
24
+ export declare function resetCircuit(workerType: string): void;
25
+ /**
26
+ * Reset all circuits (for testing)
27
+ */
28
+ export declare function resetAllCircuits(): void;
@@ -0,0 +1,97 @@
1
+ // =============================================================================
2
+ // Circuit Breaker for Worker Failure Tracking
3
+ // =============================================================================
4
+ //
5
+ // Implements the circuit breaker pattern to prevent dispatching tasks to
6
+ // failing workers. Tracks failures per worker type and opens circuit after
7
+ // threshold is reached. Circuit auto-closes after cooldown period.
8
+ //
9
+ // Circuit States:
10
+ // - CLOSED: Normal operation, tasks dispatched
11
+ // - OPEN: Too many failures, tasks NOT dispatched to this worker type
12
+ // - HALF_OPEN: Cooldown complete, attempting recovery (not implemented yet)
13
+ //
14
+ // Configuration
15
+ const FAILURE_THRESHOLD = 3; // Open circuit after N failures
16
+ const FAILURE_WINDOW_MS = 5 * 60 * 1000; // 5 minutes window for counting failures
17
+ const COOLDOWN_MS = 5 * 60 * 1000; // 5 minutes cooldown before auto-close
18
+ // In-memory circuit state (per worker type)
19
+ const circuits = new Map();
20
+ /**
21
+ * Record a worker failure
22
+ * Increments failure count and potentially opens the circuit
23
+ */
24
+ export function recordWorkerFailure(workerType) {
25
+ const now = Date.now();
26
+ const state = circuits.get(workerType) || {
27
+ failures: 0,
28
+ lastFailure: 0,
29
+ openedAt: null,
30
+ };
31
+ // If circuit is already open, don't count new failures
32
+ if (state.openedAt !== null) {
33
+ return;
34
+ }
35
+ // Check if last failure was outside the window
36
+ if (now - state.lastFailure > FAILURE_WINDOW_MS) {
37
+ // Reset failure count (new window)
38
+ state.failures = 1;
39
+ }
40
+ else {
41
+ // Within window, increment
42
+ state.failures++;
43
+ }
44
+ state.lastFailure = now;
45
+ // Check if we should open the circuit
46
+ if (state.failures >= FAILURE_THRESHOLD) {
47
+ state.openedAt = now;
48
+ console.warn(`[Circuit Breaker] OPENED for worker type: ${workerType} (${state.failures} failures)`);
49
+ }
50
+ circuits.set(workerType, state);
51
+ }
52
+ /**
53
+ * Check if circuit is open for a worker type
54
+ * Auto-closes circuit if cooldown period has elapsed
55
+ */
56
+ export function isCircuitOpen(workerType) {
57
+ const state = circuits.get(workerType);
58
+ // No failures recorded = closed
59
+ if (!state || state.openedAt === null) {
60
+ return false;
61
+ }
62
+ const now = Date.now();
63
+ const timeSinceOpened = now - state.openedAt;
64
+ // Check if cooldown has elapsed
65
+ if (timeSinceOpened >= COOLDOWN_MS) {
66
+ // Auto-close circuit
67
+ state.openedAt = null;
68
+ state.failures = 0;
69
+ circuits.set(workerType, state);
70
+ console.info(`[Circuit Breaker] AUTO-CLOSED for worker type: ${workerType} (cooldown elapsed)`);
71
+ return false;
72
+ }
73
+ // Still in cooldown
74
+ return true;
75
+ }
76
+ /**
77
+ * Get circuit status for all worker types
78
+ * Useful for monitoring and debugging
79
+ */
80
+ export function getCircuitStatus() {
81
+ // Return a copy of the current state
82
+ return new Map(circuits);
83
+ }
84
+ /**
85
+ * Manually reset a circuit (for testing or admin override)
86
+ */
87
+ export function resetCircuit(workerType) {
88
+ circuits.delete(workerType);
89
+ console.info(`[Circuit Breaker] RESET for worker type: ${workerType}`);
90
+ }
91
+ /**
92
+ * Reset all circuits (for testing)
93
+ */
94
+ export function resetAllCircuits() {
95
+ circuits.clear();
96
+ console.info('[Circuit Breaker] RESET ALL circuits');
97
+ }
@@ -49,3 +49,5 @@ export declare function compactMemoryIndex(entries: {
49
49
  relevance: number;
50
50
  is_critical?: boolean;
51
51
  }[]): string;
52
+ /** Compact task handoff */
53
+ export declare function compactTaskHandoff(task: Task, handedOff: boolean, newWorkerType: string): string;
@@ -128,3 +128,9 @@ export function compactMemoryIndex(entries) {
128
128
  return `${i + 1}. ${critical}${e.summary.slice(0, 60)} (${rel}%) [${shortId(e.id)}]`;
129
129
  }).join('\n');
130
130
  }
131
+ /** Compact task handoff */
132
+ export function compactTaskHandoff(task, handedOff, newWorkerType) {
133
+ if (!handedOff)
134
+ return `❌ Failed to hand off: ${task.title}`;
135
+ return `🔄 Handed off: ${task.title} → ${newWorkerType} (ready for re-dispatch)`;
136
+ }
@@ -7,12 +7,19 @@ export interface TimedOutTask {
7
7
  export interface RetryMetadata {
8
8
  retryCount: number;
9
9
  lastFailedAt: string;
10
+ lastRetryAt?: string;
10
11
  lastError?: string;
11
12
  failureHistory: Array<{
12
13
  timestamp: string;
13
14
  error?: string;
14
15
  }>;
15
16
  }
17
+ /**
18
+ * Calculate exponential backoff delay in milliseconds
19
+ * Formula: delay_ms = min(1000 * 2^retry_count, 300000)
20
+ * Max delay is 5 minutes (300000ms)
21
+ */
22
+ export declare function calculateBackoffDelay(retryCount: number): number;
16
23
  /**
17
24
  * Detect tasks that have timed out (lock expired while in_progress)
18
25
  * These are likely abandoned or failed tasks
@@ -28,8 +35,10 @@ export declare function retryTask(userId: string, projectId: string, taskId: str
28
35
  task: Task;
29
36
  retryCount: number;
30
37
  retriesRemaining: number;
38
+ retryAfter?: string;
31
39
  };
32
40
  error?: string;
41
+ retryAfter?: string;
33
42
  }>;
34
43
  /**
35
44
  * Mark a task as failed with context
@@ -11,10 +11,22 @@ function getRetryMetadata(task) {
11
11
  return {
12
12
  retryCount: metadata.retryCount || 0,
13
13
  lastFailedAt: metadata.lastFailedAt || '',
14
+ lastRetryAt: metadata.lastRetryAt,
14
15
  lastError: metadata.lastError,
15
16
  failureHistory: metadata.failureHistory || [],
16
17
  };
17
18
  }
19
+ /**
20
+ * Calculate exponential backoff delay in milliseconds
21
+ * Formula: delay_ms = min(1000 * 2^retry_count, 300000)
22
+ * Max delay is 5 minutes (300000ms)
23
+ */
24
+ export function calculateBackoffDelay(retryCount) {
25
+ const baseDelay = 1000; // 1 second
26
+ const maxDelay = 300000; // 5 minutes
27
+ const delay = baseDelay * Math.pow(2, retryCount);
28
+ return Math.min(delay, maxDelay);
29
+ }
18
30
  /**
19
31
  * Update retry metadata
20
32
  */
@@ -27,6 +39,7 @@ function updateRetryMetadata(metadata, error) {
27
39
  ...metadata,
28
40
  retryCount,
29
41
  lastFailedAt: timestamp,
42
+ lastRetryAt: timestamp,
30
43
  lastError: error,
31
44
  failureHistory: [
32
45
  ...failureHistory,
@@ -90,6 +103,23 @@ export async function retryTask(userId, projectId, taskId, maxRetries = 3, error
90
103
  error: `Retry limit exceeded (${maxRetries} retries)`,
91
104
  };
92
105
  }
106
+ // Check exponential backoff window
107
+ if (retryMetadata.lastRetryAt && retryMetadata.retryCount > 0) {
108
+ const lastRetryTime = new Date(retryMetadata.lastRetryAt);
109
+ const now = new Date();
110
+ const timeSinceLastRetry = now.getTime() - lastRetryTime.getTime();
111
+ const backoffDelay = calculateBackoffDelay(retryMetadata.retryCount);
112
+ if (timeSinceLastRetry < backoffDelay) {
113
+ const remainingBackoff = backoffDelay - timeSinceLastRetry;
114
+ const retryAfterTime = new Date(now.getTime() + remainingBackoff);
115
+ const retryAfter = retryAfterTime.toISOString();
116
+ return {
117
+ success: false,
118
+ error: `Task is in backoff window. Please wait ${Math.ceil(remainingBackoff / 1000)} seconds before retrying.`,
119
+ retryAfter,
120
+ };
121
+ }
122
+ }
93
123
  // Update metadata with failure info
94
124
  const updatedMetadata = updateRetryMetadata(task.metadata || {}, errorContext);
95
125
  // Update task with retry metadata and reset status
@@ -127,15 +127,30 @@ export declare function heartbeatTask(userId: string, projectId: string, taskId:
127
127
  new_expires_at: string;
128
128
  };
129
129
  }>;
130
- export declare const DEFAULT_MAX_PARALLEL = 5;
130
+ export declare const EFFORT_WEIGHTS: {
131
+ readonly xs: 1;
132
+ readonly s: 2;
133
+ readonly m: 4;
134
+ readonly l: 8;
135
+ readonly xl: 16;
136
+ };
137
+ export declare const LOCK_DURATION_BY_EFFORT: {
138
+ readonly xs: 15;
139
+ readonly s: 30;
140
+ readonly m: 60;
141
+ readonly l: 120;
142
+ readonly xl: 240;
143
+ };
144
+ export declare const DEFAULT_CAPACITY_BUDGET = 20;
131
145
  /**
132
- * Get count of currently active tasks
133
- * Returns both in_progress and claimed (locked) task counts
146
+ * Get effort-weighted load of currently active tasks
147
+ * Returns both in_progress and claimed (locked) task counts, plus effort-weighted load
134
148
  */
135
149
  export declare function getActiveTaskCount(userId: string, projectId: string, epicId?: string): Promise<{
136
150
  inProgress: number;
137
151
  claimed: number;
138
152
  total: number;
153
+ effortLoad: number;
139
154
  }>;
140
155
  /**
141
156
  * Get all tasks ready for parallel dispatch
@@ -143,9 +158,17 @@ export declare function getActiveTaskCount(userId: string, projectId: string, ep
143
158
  * - Excludes already claimed tasks
144
159
  * - Resolves dependencies (only returns unblocked tasks)
145
160
  * - Matches each to appropriate expert worker
146
- * - Respects max parallel limit by considering currently active tasks
161
+ * - Respects effort-weighted capacity budget
147
162
  */
148
- export declare function getDispatchableTasks(userId: string, projectId: string, epicId?: string, maxParallel?: number): Promise<DispatchableTask[]>;
163
+ export declare function getDispatchableTasks(userId: string, projectId: string, epicId?: string, capacityBudget?: number): Promise<DispatchableTask[]>;
164
+ /**
165
+ * Check if task lock is about to expire and return warning
166
+ */
167
+ export declare function getLockExpiryWarning(lockExpiresAt: string | null): {
168
+ warning: boolean;
169
+ minutes_remaining: number;
170
+ message?: string;
171
+ };
149
172
  /**
150
173
  * Get full execution context for a worker agent
151
174
  */
@@ -156,11 +179,41 @@ export declare function getExecutionContext(userId: string, projectId: string, t
156
179
  context: TaskContext[];
157
180
  parentTask?: Task;
158
181
  siblingTasks?: Task[];
182
+ lockWarning?: {
183
+ warning: boolean;
184
+ minutes_remaining: number;
185
+ message?: string;
186
+ };
159
187
  }>;
160
188
  /**
161
189
  * Find newly unblocked tasks after a completion and update their status to ready
162
190
  */
163
191
  export declare function resolveTaskDependencies(userId: string, projectId: string, completedTaskId: string, epicId?: string): Promise<Task[]>;
192
+ /**
193
+ * Aggregate work_log context from all tasks in an epic
194
+ * Used by orchestrator to synthesize final results
195
+ */
196
+ export declare function getEpicAggregate(userId: string, projectId: string, epicId: string, includePending?: boolean): Promise<{
197
+ epic: {
198
+ id: string;
199
+ title: string;
200
+ description: string;
201
+ status: string;
202
+ };
203
+ tasks: Array<{
204
+ id: string;
205
+ title: string;
206
+ status: string;
207
+ work_log: string | null;
208
+ completed_at: string | null;
209
+ }>;
210
+ summary_stats: {
211
+ total: number;
212
+ completed: number;
213
+ in_progress: number;
214
+ pending: number;
215
+ };
216
+ }>;
164
217
  /**
165
218
  * Get epic status with progress tracking
166
219
  * Auto-completes epic if all child tasks are done
@@ -173,3 +226,36 @@ export declare function getEpicStatus(userId: string, projectId: string, epicId:
173
226
  allComplete: boolean;
174
227
  autoCompleted: boolean;
175
228
  }>;
229
+ export declare function handoffTask(userId: string, projectId: string, taskId: string, agentId: string, newWorkerType: string, reason: string): Promise<{
230
+ success: boolean;
231
+ data: {
232
+ task: Task;
233
+ handed_off: boolean;
234
+ new_worker_type: string;
235
+ };
236
+ }>;
237
+ /**
238
+ * Revise an existing epic by adding/removing/updating tasks
239
+ * Enables iterative planning without recreating the entire epic
240
+ */
241
+ export declare function reviseEpic(userId: string, projectId: string, epicId: string, changes: {
242
+ add_tasks?: {
243
+ title: string;
244
+ description?: string;
245
+ effort?: string;
246
+ domain?: string;
247
+ blocked_by?: string[];
248
+ }[];
249
+ remove_task_ids?: string[];
250
+ update_tasks?: {
251
+ task_id: string;
252
+ title?: string;
253
+ description?: string;
254
+ effort?: string;
255
+ }[];
256
+ }): Promise<{
257
+ epic: Task;
258
+ added: Task[];
259
+ removed: string[];
260
+ updated: Task[];
261
+ }>;