internaltool-mcp 1.6.31 → 1.6.35
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/index.js +3181 -19
- package/package.json +1 -1
package/index.js
CHANGED
|
@@ -619,6 +619,8 @@ Roles: BUILDER agents MUST call this before editing. Coordinators call this as p
|
|
|
619
619
|
files: z.array(z.string()).min(1).describe('List of file paths this task will exclusively edit (e.g. ["server/routes/tasks.js", "client/src/App.jsx"])'),
|
|
620
620
|
},
|
|
621
621
|
async ({ taskId, files }) => {
|
|
622
|
+
const denied = await guardRoleTool(taskId, 'claim_files')
|
|
623
|
+
if (denied) return denied
|
|
622
624
|
trackTaskActivity(taskId, 'claim_files')
|
|
623
625
|
const res = await api.post(`/api/tasks/${taskId}/files/claim`, { files })
|
|
624
626
|
if (!res?.success) {
|
|
@@ -663,6 +665,47 @@ Files are also released automatically when the task is parked.`,
|
|
|
663
665
|
}
|
|
664
666
|
)
|
|
665
667
|
|
|
668
|
+
// ── set_task_type ─────────────────────────────────────────────────────────────
|
|
669
|
+
server.tool(
|
|
670
|
+
'set_task_type',
|
|
671
|
+
`Override the auto-detected task type before confirming kickoff.
|
|
672
|
+
|
|
673
|
+
Call this during the confirmed=false preview step when the detected type looks wrong.
|
|
674
|
+
Once set, kickoff_task will use this type directly and skip auto-detection entirely.
|
|
675
|
+
Set to null to clear the override and let detection run again.
|
|
676
|
+
|
|
677
|
+
Valid types:
|
|
678
|
+
- migration → DB schema changes, always coordinator + sequential
|
|
679
|
+
- integration → third-party API/webhook, single builder, no hardcoded keys
|
|
680
|
+
- bugfix → targeted fix, scout first, failing test before fix
|
|
681
|
+
- ui → frontend/components, parallel if multiple components
|
|
682
|
+
- backend → routes/controllers/services, auto complexity routing
|
|
683
|
+
- security → auth/permissions, always scout first + reviewer required
|
|
684
|
+
- refactor → restructuring, scout first, no behaviour changes
|
|
685
|
+
- feature → general new feature, auto complexity routing (default fallback)`,
|
|
686
|
+
{
|
|
687
|
+
taskId: z.string().describe("Task's MongoDB ObjectId"),
|
|
688
|
+
taskType: z.enum(['migration', 'integration', 'bugfix', 'ui', 'backend', 'security', 'refactor', 'feature']).nullable()
|
|
689
|
+
.describe('The correct task type, or null to clear and re-enable auto-detection'),
|
|
690
|
+
},
|
|
691
|
+
async ({ taskId, taskType }) => {
|
|
692
|
+
const res = await api.patch(`/api/tasks/${taskId}`, { taskType })
|
|
693
|
+
if (!res?.success) return errorText(res?.message || 'Could not set task type')
|
|
694
|
+
const cfg = taskType ? TASK_TYPES[taskType] : null
|
|
695
|
+
return text({
|
|
696
|
+
set: true,
|
|
697
|
+
taskType,
|
|
698
|
+
label: cfg?.label || null,
|
|
699
|
+
riskLevel: cfg?.riskLevel || null,
|
|
700
|
+
flow: cfg?.flow || null,
|
|
701
|
+
scoutFirst: cfg?.scoutFirst || false,
|
|
702
|
+
message: taskType
|
|
703
|
+
? `Task type locked to "${cfg?.label || taskType}". Call kickoff_task confirmed=false again to see the updated routing before confirming.`
|
|
704
|
+
: `Task type cleared — auto-detection will run again on next kickoff_task call.`,
|
|
705
|
+
})
|
|
706
|
+
}
|
|
707
|
+
)
|
|
708
|
+
|
|
666
709
|
// ── decompose_task ────────────────────────────────────────────────────────────
|
|
667
710
|
server.tool(
|
|
668
711
|
'decompose_task',
|
|
@@ -700,6 +743,8 @@ Call confirmed=false to preview the decomposition, confirmed=true to save it.`,
|
|
|
700
743
|
confirmed: z.boolean().optional().default(false).describe('Set true to save the decomposition and create subtasks on the board'),
|
|
701
744
|
},
|
|
702
745
|
async ({ taskId, subtaskPlan, confirmed = false }) => {
|
|
746
|
+
const denied = await guardRoleTool(taskId, 'decompose_task')
|
|
747
|
+
if (denied) return denied
|
|
703
748
|
trackTaskActivity(taskId, 'decompose_task')
|
|
704
749
|
const taskRes = await apiWithRetry(() => api.get(`/api/tasks/${taskId}`))
|
|
705
750
|
if (!taskRes?.success) return errorText('Task not found')
|
|
@@ -840,6 +885,8 @@ Scouts MUST NOT modify any source code files or create branches.`,
|
|
|
840
885
|
report: z.string().optional().default('').describe('Your structured scout findings in markdown (required when confirmed=true)'),
|
|
841
886
|
},
|
|
842
887
|
async ({ taskId, confirmed = false, report = '' }) => {
|
|
888
|
+
const denied = await guardRoleTool(taskId, 'scout_task')
|
|
889
|
+
if (denied) return denied
|
|
843
890
|
trackTaskActivity(taskId, 'scout_task')
|
|
844
891
|
const taskRes = await apiWithRetry(() => api.get(`/api/tasks/${taskId}`))
|
|
845
892
|
if (!taskRes?.success) return errorText('Task not found')
|
|
@@ -893,6 +940,271 @@ Scouts MUST NOT modify any source code files or create branches.`,
|
|
|
893
940
|
}
|
|
894
941
|
)
|
|
895
942
|
|
|
943
|
+
// ── check_merge_conflicts ─────────────────────────────────────────────────────
|
|
944
|
+
server.tool(
|
|
945
|
+
'check_merge_conflicts',
|
|
946
|
+
`Check whether this task's PR has merge conflicts and get coordination instructions.
|
|
947
|
+
|
|
948
|
+
Call this when:
|
|
949
|
+
- You are about to push commits and want to verify the branch is clean
|
|
950
|
+
- You see a conflict warning in kickoff_task output (rebaseWarning)
|
|
951
|
+
- A teammate tells you there is a conflict on this task
|
|
952
|
+
- Before opening a PR to catch conflicts early
|
|
953
|
+
|
|
954
|
+
Returns:
|
|
955
|
+
- hasConflict: whether GitHub reports the PR as unmergeable
|
|
956
|
+
- behindBy: how many commits behind the base branch this branch is
|
|
957
|
+
- conflictingFiles: which files are in conflict
|
|
958
|
+
- conflictingTasks: other active tasks in this project that claimed the same files
|
|
959
|
+
- resolution: exact git commands to run, and who to coordinate with`,
|
|
960
|
+
{ taskId: z.string().describe("Task's MongoDB ObjectId") },
|
|
961
|
+
async ({ taskId }) => {
|
|
962
|
+
trackTaskActivity(taskId, 'check_merge_conflicts')
|
|
963
|
+
const res = await api.get(`/api/tasks/${taskId}/conflict-status`)
|
|
964
|
+
if (!res?.success) return errorText('Could not fetch conflict status')
|
|
965
|
+
|
|
966
|
+
const d = res.data
|
|
967
|
+
|
|
968
|
+
if (!d.hasPR) return text({
|
|
969
|
+
status: 'no_pr',
|
|
970
|
+
message: 'No PR linked to this task yet. Create a branch and open a PR first.',
|
|
971
|
+
})
|
|
972
|
+
if (!d.linked) return text({
|
|
973
|
+
status: 'no_github',
|
|
974
|
+
message: 'Project does not have a GitHub repo linked. Configure it in project settings.',
|
|
975
|
+
})
|
|
976
|
+
if (d.error) return text({
|
|
977
|
+
status: 'github_unavailable',
|
|
978
|
+
message: d.error,
|
|
979
|
+
})
|
|
980
|
+
if (d.mergeableState === 'unknown') return text({
|
|
981
|
+
status: 'computing',
|
|
982
|
+
message: 'GitHub is still computing mergeability. Wait a few seconds and call check_merge_conflicts again.',
|
|
983
|
+
})
|
|
984
|
+
|
|
985
|
+
if (!d.hasConflict && d.behindBy === 0) return text({
|
|
986
|
+
status: 'clean',
|
|
987
|
+
message: `PR is clean — no conflicts, not behind ${d.base}. Safe to merge.`,
|
|
988
|
+
})
|
|
989
|
+
|
|
990
|
+
return text({
|
|
991
|
+
status: d.hasConflict ? 'conflict' : 'behind',
|
|
992
|
+
hasConflict: d.hasConflict,
|
|
993
|
+
behindBy: d.behindBy,
|
|
994
|
+
base: d.base,
|
|
995
|
+
headBranch: d.headBranch,
|
|
996
|
+
conflictingFiles: d.conflictingFiles,
|
|
997
|
+
conflictingTasks: d.conflictingTasks,
|
|
998
|
+
coordinationNote: d.coordinationNote,
|
|
999
|
+
resolution: {
|
|
1000
|
+
steps: d.resolution,
|
|
1001
|
+
note: d.hasConflict && d.conflictingTasks?.length > 0
|
|
1002
|
+
? `⚠️ COORDINATE FIRST: Contact the assignees of the conflicting tasks before resolving. Resolving without coordination may overwrite their work.`
|
|
1003
|
+
: `Rebase your branch on ${d.base} to resolve.`,
|
|
1004
|
+
},
|
|
1005
|
+
})
|
|
1006
|
+
}
|
|
1007
|
+
)
|
|
1008
|
+
|
|
1009
|
+
// ── resolve_conflict ──────────────────────────────────────────────────────────
|
|
1010
|
+
server.tool(
|
|
1011
|
+
'resolve_conflict',
|
|
1012
|
+
`Fetch full context from BOTH tasks involved in a merge conflict so Claude Code can make an informed semantic merge decision.
|
|
1013
|
+
|
|
1014
|
+
Call this for each conflicting file BEFORE editing it. It returns:
|
|
1015
|
+
- thisTask: your task's goal, README, scout report
|
|
1016
|
+
- otherTask: the conflicting task's goal, README, scout report, assignees
|
|
1017
|
+
- the exact conflict markers to look for in the file
|
|
1018
|
+
- the resolution strategy based on both tasks' intents
|
|
1019
|
+
|
|
1020
|
+
This is the context layer. After calling this, use the Read tool to open the file,
|
|
1021
|
+
the Edit tool to resolve the conflict markers, then continue the rebase with Bash.`,
|
|
1022
|
+
{
|
|
1023
|
+
taskId: z.string().describe("Your task's MongoDB ObjectId"),
|
|
1024
|
+
filePath: z.string().describe('The conflicting file path, e.g. server/middleware/auth.js'),
|
|
1025
|
+
},
|
|
1026
|
+
async ({ taskId, filePath }) => {
|
|
1027
|
+
trackTaskActivity(taskId, 'resolve_conflict')
|
|
1028
|
+
|
|
1029
|
+
// Get this task + conflict status in parallel
|
|
1030
|
+
const [taskRes, conflictRes] = await Promise.all([
|
|
1031
|
+
apiWithRetry(() => api.get(`/api/tasks/${taskId}`)),
|
|
1032
|
+
api.get(`/api/tasks/${taskId}/conflict-status`).catch(() => null),
|
|
1033
|
+
])
|
|
1034
|
+
if (!taskRes?.success) return errorText('Task not found')
|
|
1035
|
+
const task = taskRes.data.task
|
|
1036
|
+
|
|
1037
|
+
// Find the specific conflicting task that touched this file
|
|
1038
|
+
const conflictData = conflictRes?.data || {}
|
|
1039
|
+
const conflictingTask = (conflictData.conflictingTasks || [])
|
|
1040
|
+
.find(t => (t.claimedFiles || []).includes(filePath))
|
|
1041
|
+
|
|
1042
|
+
// Fetch other task's full context if found
|
|
1043
|
+
let otherTask = null
|
|
1044
|
+
if (conflictingTask?.taskId) {
|
|
1045
|
+
try {
|
|
1046
|
+
const otherRes = await api.get(`/api/tasks/${conflictingTask.taskId}`)
|
|
1047
|
+
if (otherRes?.success) {
|
|
1048
|
+
const o = otherRes.data.task
|
|
1049
|
+
otherTask = {
|
|
1050
|
+
key: o.key,
|
|
1051
|
+
title: o.title,
|
|
1052
|
+
goal: o.readmeMarkdown || o.description || '(no README on that task)',
|
|
1053
|
+
scoutReport: o.scoutReport || null,
|
|
1054
|
+
assignees: (o.assignees || []).map(a => a.name || a.email).join(', ') || 'unassigned',
|
|
1055
|
+
prUrl: o.github?.prUrl || null,
|
|
1056
|
+
}
|
|
1057
|
+
}
|
|
1058
|
+
} catch { /* non-fatal */ }
|
|
1059
|
+
}
|
|
1060
|
+
|
|
1061
|
+
const base = conflictData.base || 'main'
|
|
1062
|
+
|
|
1063
|
+
return text({
|
|
1064
|
+
filePath,
|
|
1065
|
+
thisTask: {
|
|
1066
|
+
key: task.key,
|
|
1067
|
+
title: task.title,
|
|
1068
|
+
goal: task.readmeMarkdown || task.description || '(no README on this task)',
|
|
1069
|
+
scoutReport: task.scoutReport || null,
|
|
1070
|
+
},
|
|
1071
|
+
otherTask: otherTask || {
|
|
1072
|
+
note: 'Could not identify the specific task that caused this conflict. The conflict was likely introduced by a recent merge to ' + base + '. Check: git log origin/' + base + ' -- ' + filePath,
|
|
1073
|
+
},
|
|
1074
|
+
conflictMarkers: {
|
|
1075
|
+
ours: '<<<<<<< HEAD ← YOUR changes (this task)',
|
|
1076
|
+
theirs: '>>>>>>> ... ← THEIR changes (merged into ' + base + ')',
|
|
1077
|
+
middle: '======= ← separator',
|
|
1078
|
+
},
|
|
1079
|
+
resolutionProtocol: [
|
|
1080
|
+
'1. Read ' + filePath + ' using the Read tool — find all <<<<<<< blocks',
|
|
1081
|
+
'2. For each block: understand thisTask.goal vs otherTask.goal',
|
|
1082
|
+
'3. Decide: combine both / keep better / adapt one to fit the other',
|
|
1083
|
+
'4. Use the Edit tool to rewrite the block — NO conflict markers in final file',
|
|
1084
|
+
'5. Bash: git add ' + filePath,
|
|
1085
|
+
'6. Bash: git rebase --continue (or git rebase --skip if this commit is already covered)',
|
|
1086
|
+
'7. Repeat for each conflicting file',
|
|
1087
|
+
'8. Bash: git push --force-with-lease',
|
|
1088
|
+
'9. Call check_merge_conflicts to verify clean state',
|
|
1089
|
+
'10. Call log_session_event with summary of what you merged and WHY',
|
|
1090
|
+
],
|
|
1091
|
+
warning: otherTask
|
|
1092
|
+
? `⚠️ Read otherTask.goal before editing. "${otherTask.key}: ${otherTask.title}" made this change intentionally. Understand it before deciding what to keep.`
|
|
1093
|
+
: `⚠️ Unknown conflict source. Be conservative — do not delete any logic you don't understand.`,
|
|
1094
|
+
})
|
|
1095
|
+
}
|
|
1096
|
+
)
|
|
1097
|
+
|
|
1098
|
+
// ── request_human_input ───────────────────────────────────────────────────────
|
|
1099
|
+
server.tool(
|
|
1100
|
+
'request_human_input',
|
|
1101
|
+
`Ask the developer a question and PAUSE — they will answer directly in this Cursor chat.
|
|
1102
|
+
|
|
1103
|
+
WHEN TO CALL THIS:
|
|
1104
|
+
- Conflict resolution is ambiguous: both sides implement the same feature differently and you can't safely pick one
|
|
1105
|
+
- You need explicit approval before a destructive operation (git push --force-with-lease on a shared branch)
|
|
1106
|
+
- The task README is missing and you cannot determine intended behavior
|
|
1107
|
+
- Two tasks have incompatible goals that cannot be automatically merged
|
|
1108
|
+
|
|
1109
|
+
DO NOT call this for decisions you can make autonomously (trivial formatting conflicts, variable names, import ordering).
|
|
1110
|
+
|
|
1111
|
+
This tool posts a comment on the task (for visibility in the app) and notifies assignees. It then returns the question here in Cursor. The developer reads it and replies directly in this chat — you use their reply as the answer. No polling needed.`,
|
|
1112
|
+
{
|
|
1113
|
+
taskId: z.string().describe("Your task's MongoDB ObjectId"),
|
|
1114
|
+
question: z.string().describe('The specific question. Be precise — include file names, conflict lines, and what decision you need.'),
|
|
1115
|
+
context: z.string().optional().describe('Extra context: what both conflict sides do, what you tried, why you are stuck.'),
|
|
1116
|
+
type: z.enum(['question', 'approval', 'ambiguity']).optional()
|
|
1117
|
+
.describe('question = need info; approval = need yes/no before destructive action; ambiguity = two valid interpretations, pick one'),
|
|
1118
|
+
},
|
|
1119
|
+
async ({ taskId, question, context = '', type = 'question' }) => {
|
|
1120
|
+
trackTaskActivity(taskId, 'request_human_input')
|
|
1121
|
+
await api.post(`/api/tasks/${taskId}/ask-human`, { question, context, type }).catch(() => null)
|
|
1122
|
+
|
|
1123
|
+
const typeLabel = type === 'approval' ? '🔐 APPROVAL NEEDED' : type === 'ambiguity' ? '🤔 AMBIGUITY — YOUR DECISION' : '❓ QUESTION FOR YOU'
|
|
1124
|
+
return text({
|
|
1125
|
+
[typeLabel]: question,
|
|
1126
|
+
...(context ? { context } : {}),
|
|
1127
|
+
instruction: 'Please reply in this Cursor chat. I will use your answer to continue. Do NOT proceed until you reply.',
|
|
1128
|
+
})
|
|
1129
|
+
}
|
|
1130
|
+
)
|
|
1131
|
+
|
|
1132
|
+
// ── get_project_git_tree ──────────────────────────────────────────────────────
|
|
1133
|
+
server.tool(
|
|
1134
|
+
'get_project_git_tree',
|
|
1135
|
+
`Get a snapshot of all active branches in the project: who is working on what, which branches share files (conflict risk), and the suggested merge order.
|
|
1136
|
+
|
|
1137
|
+
Call this at the START of a session (or before raising a PR) to understand the team landscape.
|
|
1138
|
+
Use the mergeOrder to know when it is safe to merge your branch.
|
|
1139
|
+
Use conflictPairs to coordinate with teammates before touching shared files.`,
|
|
1140
|
+
{
|
|
1141
|
+
projectId: z.string().describe("Project's MongoDB ObjectId (find it in get_task response as task.project)"),
|
|
1142
|
+
},
|
|
1143
|
+
async ({ projectId }) => {
|
|
1144
|
+
const treeRes = await api.get(`/api/projects/${projectId}/github/git-tree`).catch(() => null)
|
|
1145
|
+
if (!treeRes?.success) return errorText('Could not fetch git tree — GitHub may not be linked to this project')
|
|
1146
|
+
const tree = treeRes.data
|
|
1147
|
+
|
|
1148
|
+
// Compute conflict pairs from claimedFiles overlap
|
|
1149
|
+
const branches = tree.branches || []
|
|
1150
|
+
const conflictPairs = []
|
|
1151
|
+
for (let i = 0; i < branches.length; i++) {
|
|
1152
|
+
for (let j = i + 1; j < branches.length; j++) {
|
|
1153
|
+
const shared = (branches[i].claimedFiles || []).filter(f => (branches[j].claimedFiles || []).includes(f))
|
|
1154
|
+
if (shared.length) conflictPairs.push({ a: branches[i].taskKey, b: branches[j].taskKey, sharedFiles: shared })
|
|
1155
|
+
}
|
|
1156
|
+
}
|
|
1157
|
+
|
|
1158
|
+
// Merge order: fewest conflicts → least behind → most ahead
|
|
1159
|
+
const conflictCount = {}
|
|
1160
|
+
branches.forEach(b => { conflictCount[b.taskKey] = 0 })
|
|
1161
|
+
conflictPairs.forEach(({ a, b }) => { conflictCount[a]++; conflictCount[b]++ })
|
|
1162
|
+
const mergeOrder = [...branches]
|
|
1163
|
+
.filter(b => !b.branchError)
|
|
1164
|
+
.sort((a, b) => {
|
|
1165
|
+
const cDiff = (conflictCount[a.taskKey] || 0) - (conflictCount[b.taskKey] || 0)
|
|
1166
|
+
if (cDiff !== 0) return cDiff
|
|
1167
|
+
const behindDiff = (a.compare?.behindBy ?? 99) - (b.compare?.behindBy ?? 99)
|
|
1168
|
+
if (behindDiff !== 0) return behindDiff
|
|
1169
|
+
return (b.compare?.aheadBy ?? 0) - (a.compare?.aheadBy ?? 0)
|
|
1170
|
+
})
|
|
1171
|
+
.map((b, i) => ({
|
|
1172
|
+
rank: i + 1,
|
|
1173
|
+
taskKey: b.taskKey,
|
|
1174
|
+
taskTitle: b.taskTitle,
|
|
1175
|
+
headBranch: b.headBranch,
|
|
1176
|
+
status: b.statusColor,
|
|
1177
|
+
behindBy: b.compare?.behindBy ?? null,
|
|
1178
|
+
aheadBy: b.compare?.aheadBy ?? null,
|
|
1179
|
+
assignees: b.assignees.map(a => a.name).join(', ') || 'unassigned',
|
|
1180
|
+
conflicts: conflictPairs.filter(p => p.a === b.taskKey || p.b === b.taskKey).map(p => p.a === b.taskKey ? p.b : p.a),
|
|
1181
|
+
readyToMerge: b.compare?.behindBy === 0 && b.statusColor !== 'red',
|
|
1182
|
+
}))
|
|
1183
|
+
|
|
1184
|
+
const staleBranches = branches
|
|
1185
|
+
.filter(b => {
|
|
1186
|
+
if (!b.tipCommit?.date) return false
|
|
1187
|
+
const hrs = (Date.now() - new Date(b.tipCommit.date).getTime()) / 3600000
|
|
1188
|
+
return hrs >= 48
|
|
1189
|
+
})
|
|
1190
|
+
.map(b => ({ taskKey: b.taskKey, lastCommitAge: Math.round((Date.now() - new Date(b.tipCommit.date).getTime()) / 3600000) + 'h ago' }))
|
|
1191
|
+
|
|
1192
|
+
return text({
|
|
1193
|
+
repo: tree.repo,
|
|
1194
|
+
defaultBranch: tree.defaultBranch,
|
|
1195
|
+
summary: `${branches.length} active branch(es). ${branches.filter(b => b.statusColor === 'red').length} diverged. ${conflictPairs.length} conflict pair(s).`,
|
|
1196
|
+
conflictPairs,
|
|
1197
|
+
mergeOrder,
|
|
1198
|
+
staleBranches,
|
|
1199
|
+
advice: conflictPairs.length > 0
|
|
1200
|
+
? `⚠️ File conflicts exist. Coordinate with the listed teammates BEFORE editing shared files. Check mergeOrder to know when your branch should merge.`
|
|
1201
|
+
: mergeOrder.some(b => b.behindBy > 0)
|
|
1202
|
+
? `Some branches are behind. Rebase before merging. Follow the mergeOrder sequence.`
|
|
1203
|
+
: `✅ No file conflicts detected. Follow mergeOrder for the safest merge sequence.`,
|
|
1204
|
+
})
|
|
1205
|
+
}
|
|
1206
|
+
)
|
|
1207
|
+
|
|
896
1208
|
// ── get_agent_context ─────────────────────────────────────────────────────────
|
|
897
1209
|
server.tool(
|
|
898
1210
|
'get_agent_context',
|
|
@@ -940,7 +1252,13 @@ Returns systemPrompt ready to use as a Claude system prompt.`,
|
|
|
940
1252
|
}
|
|
941
1253
|
|
|
942
1254
|
// Suggest relevant skills based on role and available project skills
|
|
943
|
-
|
|
1255
|
+
// Respects project-level enabled + task-level agentKitOverrides
|
|
1256
|
+
const allProjectSkills = ctx.project?.agentConfig?.skills || []
|
|
1257
|
+
const taskSkillOverrides = ctx.task?.agentKitOverrides?.skills || {}
|
|
1258
|
+
const availableSkills = allProjectSkills.filter(s => {
|
|
1259
|
+
const taskOverride = taskSkillOverrides[s.name]
|
|
1260
|
+
return taskOverride !== undefined ? taskOverride : s.enabled !== false
|
|
1261
|
+
})
|
|
944
1262
|
const suggestedSkills = availableSkills.length > 0
|
|
945
1263
|
? availableSkills.map(s => ({
|
|
946
1264
|
name: s.name,
|
|
@@ -2345,8 +2663,80 @@ Use this when a developer says "start task", "brief me on", or "what do I need t
|
|
|
2345
2663
|
? `${devSlug}/feature/${task.key?.toLowerCase()}-${titleSlug}`
|
|
2346
2664
|
: `feature/${task.key?.toLowerCase()}-${titleSlug}`
|
|
2347
2665
|
|
|
2348
|
-
const hasReadme
|
|
2666
|
+
const hasReadme = typeof task.readmeMarkdown === 'string' && task.readmeMarkdown.trim().length > 0
|
|
2349
2667
|
const hasCursorRules = typeof task.cursorRules === 'string' && task.cursorRules.trim().length > 0
|
|
2668
|
+
|
|
2669
|
+
// ── Task flow auto-routing ────────────────────────────────────────────────
|
|
2670
|
+
// Fetch project agentFlow setting early (needed for preview too)
|
|
2671
|
+
let projectAgentFlowEarly = 'direct'
|
|
2672
|
+
try {
|
|
2673
|
+
const projEarly = await apiWithRetry(() => api.get(`/api/projects/${task.project}`))
|
|
2674
|
+
projectAgentFlowEarly = projEarly?.data?.project?.agentConfig?.agentFlow || 'direct'
|
|
2675
|
+
} catch { /* non-fatal */ }
|
|
2676
|
+
|
|
2677
|
+
// Only auto-route when no role is already set and task hasn't started yet
|
|
2678
|
+
const notCoordinatorOrScout = agentRole !== 'coordinator' && agentRole !== 'scout'
|
|
2679
|
+
const noDecomposition = !task.decomposition?.trim()
|
|
2680
|
+
const noFileClaims = !(task.claimedFiles?.length > 0)
|
|
2681
|
+
const shouldAutoRoute = notCoordinatorOrScout && noDecomposition && noFileClaims
|
|
2682
|
+
|
|
2683
|
+
// ── Detect task type ───────────────────────────────────────────────────────
|
|
2684
|
+
// Explicit type set by user or agent via set_task_type → skip detection.
|
|
2685
|
+
const explicitType = task.taskType || null
|
|
2686
|
+
const detection = explicitType ? null : detectTaskType(task)
|
|
2687
|
+
const detectedType = explicitType || detection.type
|
|
2688
|
+
const typeIsExplicit = !!explicitType
|
|
2689
|
+
const typeConfidence = explicitType ? 'explicit' : detection.confidence
|
|
2690
|
+
const typeAmbiguous = !explicitType && detection.ambiguous
|
|
2691
|
+
const taskTypeCfg = TASK_TYPES[detectedType]
|
|
2692
|
+
|
|
2693
|
+
// ── Score complexity from README signals ──────────────────────────────────
|
|
2694
|
+
let complexityScore = 0
|
|
2695
|
+
const complexitySignals = []
|
|
2696
|
+
|
|
2697
|
+
if (hasReadme) {
|
|
2698
|
+
const readme = task.readmeMarkdown.trim()
|
|
2699
|
+
const readmeLen = readme.length
|
|
2700
|
+
const sections = (readme.match(/^#{1,3}\s/gm) || []).length
|
|
2701
|
+
const fileMentions = (readme.match(/`[^`]+\.[a-z]{2,6}`/g) || []).length
|
|
2702
|
+
const hasParallelLang = /parallel|simultaneously|independent(ly)?|at the same time/i.test(readme)
|
|
2703
|
+
const hasSequentialLang = /step\s*\d|first[\s,].*then|depends\s+on|before.*after/i.test(readme)
|
|
2704
|
+
|
|
2705
|
+
if (readmeLen > 800) { complexityScore += 2; complexitySignals.push(`long plan (${readmeLen} chars)`) }
|
|
2706
|
+
else if (readmeLen > 300) { complexityScore += 1; complexitySignals.push(`medium plan (${readmeLen} chars)`) }
|
|
2707
|
+
if (sections > 3) { complexityScore += 2; complexitySignals.push(`${sections} README sections`) }
|
|
2708
|
+
else if (sections > 1) { complexityScore += 1; complexitySignals.push(`${sections} README sections`) }
|
|
2709
|
+
if (fileMentions > 4) { complexityScore += 2; complexitySignals.push(`${fileMentions} files mentioned`) }
|
|
2710
|
+
else if (fileMentions > 2) { complexityScore += 1; complexitySignals.push(`${fileMentions} files mentioned`) }
|
|
2711
|
+
if (hasParallelLang) { complexityScore += 1; complexitySignals.push('parallel workstreams mentioned') }
|
|
2712
|
+
if (hasSequentialLang) { complexityScore += 1; complexitySignals.push('sequential steps mentioned') }
|
|
2713
|
+
}
|
|
2714
|
+
|
|
2715
|
+
const titleLower = task.title.toLowerCase()
|
|
2716
|
+
const complexTitleWords = ['implement', 'build', 'add', 'create', 'integrate', 'refactor', 'migrate', 'redesign']
|
|
2717
|
+
const simpleTitleWords = ['fix', 'patch', 'typo', 'bump', 'revert']
|
|
2718
|
+
if (complexTitleWords.some(w => titleLower.includes(w))) complexityScore += 1
|
|
2719
|
+
if (simpleTitleWords.some(w => titleLower.includes(w))) complexityScore -= 1
|
|
2720
|
+
complexityScore = Math.max(0, complexityScore)
|
|
2721
|
+
|
|
2722
|
+
// ── Determine recommended flow ────────────────────────────────────────────
|
|
2723
|
+
// Priority: project override > type-based > complexity score
|
|
2724
|
+
let recommendedFlow
|
|
2725
|
+
if (projectAgentFlowEarly === 'coordinator_first') {
|
|
2726
|
+
recommendedFlow = 'coordinator'
|
|
2727
|
+
} else if (taskTypeCfg.flow === 'coordinator') {
|
|
2728
|
+
recommendedFlow = 'coordinator' // type always requires coordinator (e.g. migration)
|
|
2729
|
+
} else if (taskTypeCfg.flow === 'direct') {
|
|
2730
|
+
recommendedFlow = 'direct' // type always direct (e.g. bugfix)
|
|
2731
|
+
} else {
|
|
2732
|
+
// 'auto' or 'single_builder' — use complexity score
|
|
2733
|
+
if (!hasReadme || complexityScore <= 1) recommendedFlow = 'direct'
|
|
2734
|
+
else if (complexityScore <= 3) recommendedFlow = 'single_builder'
|
|
2735
|
+
else recommendedFlow = 'coordinator'
|
|
2736
|
+
}
|
|
2737
|
+
|
|
2738
|
+
// Backward-compat alias used in confirmed response
|
|
2739
|
+
const requiresCoordinatorFlow = shouldAutoRoute && recommendedFlow === 'coordinator'
|
|
2350
2740
|
const subtasks = (task.subtasks || []).map(s => ({
|
|
2351
2741
|
title: s.title,
|
|
2352
2742
|
done: s.done,
|
|
@@ -2485,6 +2875,56 @@ Use this when a developer says "start task", "brief me on", or "what do I need t
|
|
|
2485
2875
|
rules: task.cursorRules,
|
|
2486
2876
|
}
|
|
2487
2877
|
: { ACTIVE: false },
|
|
2878
|
+
TASK_ANALYSIS: shouldAutoRoute ? {
|
|
2879
|
+
detectedType: taskTypeCfg.label,
|
|
2880
|
+
typeKey: detectedType,
|
|
2881
|
+
riskLevel: taskTypeCfg.riskLevel,
|
|
2882
|
+
recommendedFlow,
|
|
2883
|
+
complexityScore,
|
|
2884
|
+
complexitySignals,
|
|
2885
|
+
scoutFirst: taskTypeCfg.scoutFirst,
|
|
2886
|
+
suggestedSkills: taskTypeCfg.suggestedSkills,
|
|
2887
|
+
decompositionHint: taskTypeCfg.decompositionHint,
|
|
2888
|
+
projectOverride: projectAgentFlowEarly === 'coordinator_first' ? 'Project enforces coordinator_first — overrides auto-detection.' : null,
|
|
2889
|
+
typeRules: taskTypeCfg.extraRules,
|
|
2890
|
+
|
|
2891
|
+
// ── Type verification block — agent must read this before confirming ──
|
|
2892
|
+
TYPE_VERIFICATION: typeIsExplicit
|
|
2893
|
+
? `✅ Type locked: ${taskTypeCfg.label} (set explicitly — detection skipped)`
|
|
2894
|
+
: typeAmbiguous
|
|
2895
|
+
? [
|
|
2896
|
+
`⚠️ AMBIGUOUS TYPE — auto-detection was not confident.`,
|
|
2897
|
+
`Best guess: ${taskTypeCfg.label} (score: ${detection?.score ?? 0}, confidence: LOW)`,
|
|
2898
|
+
`Other candidates: ${Object.entries(detection?.allScores || {}).filter(([k]) => k !== detectedType).map(([k, s]) => `${k}(${s})`).join(', ') || 'none'}`,
|
|
2899
|
+
``,
|
|
2900
|
+
`BEFORE confirming: verify this is the right type.`,
|
|
2901
|
+
`If wrong, call set_task_type with taskId="${taskId}" and the correct type, then re-run kickoff_task confirmed=false to see updated routing.`,
|
|
2902
|
+
`Valid types: migration | integration | bugfix | ui | backend | security | refactor | feature`,
|
|
2903
|
+
].join('\n')
|
|
2904
|
+
: [
|
|
2905
|
+
`🔍 Detected: ${taskTypeCfg.label} (score: ${detection?.score ?? 0}, confidence: ${typeConfidence.toUpperCase()})`,
|
|
2906
|
+
`If this looks wrong, call set_task_type with taskId="${taskId}" and the correct type before confirming.`,
|
|
2907
|
+
`Valid types: migration | integration | bugfix | ui | backend | security | refactor | feature`,
|
|
2908
|
+
].join('\n'),
|
|
2909
|
+
|
|
2910
|
+
instruction: recommendedFlow === 'direct'
|
|
2911
|
+
? [
|
|
2912
|
+
`✅ ${taskTypeCfg.label.toUpperCase()} — DIRECT FLOW (risk: ${taskTypeCfg.riskLevel}, score: ${complexityScore})`,
|
|
2913
|
+
taskTypeCfg.scoutFirst ? `Recommended: Call kickoff_task with agentRole "scout" first to understand the codebase/bug, then builder.` : null,
|
|
2914
|
+
`Next: Call kickoff_task with taskId="${taskId}", agentRole "builder", confirmed true → claim_files → implement → commit → raise_pr`,
|
|
2915
|
+
].filter(Boolean).join('\n')
|
|
2916
|
+
: recommendedFlow === 'single_builder'
|
|
2917
|
+
? `🔧 ${taskTypeCfg.label.toUpperCase()} — SINGLE BUILDER (risk: ${taskTypeCfg.riskLevel}, score: ${complexityScore})\nNext: Call kickoff_task with taskId="${taskId}", agentRole "builder", confirmed true → claim_files → implement → commit → raise_pr`
|
|
2918
|
+
: [
|
|
2919
|
+
`⚡ ${taskTypeCfg.label.toUpperCase()} — COORDINATOR FLOW (risk: ${taskTypeCfg.riskLevel}, score: ${complexityScore})`,
|
|
2920
|
+
taskTypeCfg.scoutFirst ? `Step 0: Call kickoff_task with agentRole "scout" first — mapping schema/codebase is REQUIRED for this task type` : null,
|
|
2921
|
+
`Step 1: Call kickoff_task with taskId="${taskId}", agentRole "coordinator", confirmed true`,
|
|
2922
|
+
`Step 2: Call decompose_task — use decompositionHint above as your guide`,
|
|
2923
|
+
`Step 3: Call get_parallel_kickoffs — writes builder agent files`,
|
|
2924
|
+
`Step 4: Open Background Agents panel (⌘⇧J) → start each builder`,
|
|
2925
|
+
`Step 5: Call wait_for_group — auto-proceeds between groups`,
|
|
2926
|
+
].filter(Boolean).join('\n'),
|
|
2927
|
+
} : null,
|
|
2488
2928
|
brief: {
|
|
2489
2929
|
key: task.key,
|
|
2490
2930
|
title: task.title,
|
|
@@ -2565,15 +3005,92 @@ Use this when a developer says "start task", "brief me on", or "what do I need t
|
|
|
2565
3005
|
} catch { /* GitHub may not be linked */ }
|
|
2566
3006
|
|
|
2567
3007
|
// Fetch project agentConfig for custom role generation
|
|
3008
|
+
let kickoffProject = null
|
|
2568
3009
|
try {
|
|
2569
3010
|
const projRes = await api.get(`/api/projects/${task.project}`)
|
|
2570
|
-
if (projRes?.success)
|
|
3011
|
+
if (projRes?.success) {
|
|
3012
|
+
kickoffProject = projRes.data.project || null
|
|
3013
|
+
projectAgentConfig = kickoffProject?.agentConfig || null
|
|
3014
|
+
}
|
|
3015
|
+
} catch { /* non-fatal */ }
|
|
3016
|
+
|
|
3017
|
+
// ── Team context: file conflict warnings + merge position ──────────────────
|
|
3018
|
+
let teamContext = null
|
|
3019
|
+
try {
|
|
3020
|
+
const treeRes = await api.get(`/api/projects/${task.project}/github/git-tree`).catch(() => null)
|
|
3021
|
+
if (treeRes?.success) {
|
|
3022
|
+
const treeBranches = treeRes.data?.branches || []
|
|
3023
|
+
const myFiles = task.claimedFiles || []
|
|
3024
|
+
|
|
3025
|
+
// Other active branches sharing claimed files with this task
|
|
3026
|
+
const fileConflicts = treeBranches
|
|
3027
|
+
.filter(b => b.taskId !== String(task._id))
|
|
3028
|
+
.map(b => {
|
|
3029
|
+
const shared = (b.claimedFiles || []).filter(f => myFiles.includes(f))
|
|
3030
|
+
return shared.length ? { taskKey: b.taskKey, assignees: b.assignees.map(a => a.name).join(', ') || 'unassigned', sharedFiles: shared } : null
|
|
3031
|
+
})
|
|
3032
|
+
.filter(Boolean)
|
|
3033
|
+
|
|
3034
|
+
// Merge order position for this task
|
|
3035
|
+
const conflictCount = {}
|
|
3036
|
+
const allPairs = []
|
|
3037
|
+
for (let i = 0; i < treeBranches.length; i++) {
|
|
3038
|
+
for (let j = i + 1; j < treeBranches.length; j++) {
|
|
3039
|
+
const shared = (treeBranches[i].claimedFiles || []).filter(f => (treeBranches[j].claimedFiles || []).includes(f))
|
|
3040
|
+
if (shared.length) allPairs.push({ a: treeBranches[i].taskKey, b: treeBranches[j].taskKey })
|
|
3041
|
+
}
|
|
3042
|
+
}
|
|
3043
|
+
treeBranches.forEach(b => { conflictCount[b.taskKey] = 0 })
|
|
3044
|
+
allPairs.forEach(({ a, b }) => { conflictCount[a]++; conflictCount[b]++ })
|
|
3045
|
+
|
|
3046
|
+
const ordered = [...treeBranches]
|
|
3047
|
+
.filter(b => !b.branchError)
|
|
3048
|
+
.sort((a, b) => {
|
|
3049
|
+
const c = (conflictCount[a.taskKey] || 0) - (conflictCount[b.taskKey] || 0)
|
|
3050
|
+
if (c !== 0) return c
|
|
3051
|
+
return (a.compare?.behindBy ?? 99) - (b.compare?.behindBy ?? 99)
|
|
3052
|
+
})
|
|
3053
|
+
const myRank = ordered.findIndex(b => b.taskId === String(task._id)) + 1
|
|
3054
|
+
|
|
3055
|
+
if (fileConflicts.length > 0 || myRank > 1) {
|
|
3056
|
+
teamContext = {
|
|
3057
|
+
mergePosition: myRank || null,
|
|
3058
|
+
totalActiveBranches: treeBranches.length,
|
|
3059
|
+
fileConflicts,
|
|
3060
|
+
mergeOrder: ordered.map((b, i) => ({
|
|
3061
|
+
rank: i + 1, taskKey: b.taskKey, behindBy: b.compare?.behindBy ?? null, readyToMerge: b.compare?.behindBy === 0,
|
|
3062
|
+
})),
|
|
3063
|
+
warning: fileConflicts.length > 0
|
|
3064
|
+
? `⚠️ Your claimedFiles overlap with ${fileConflicts.map(c => c.taskKey).join(', ')}. Coordinate before pushing.`
|
|
3065
|
+
: myRank > 1
|
|
3066
|
+
? `ℹ️ You are #${myRank} in the suggested merge order. ${ordered[0]?.taskKey} should merge before you.`
|
|
3067
|
+
: null,
|
|
3068
|
+
}
|
|
3069
|
+
}
|
|
3070
|
+
}
|
|
2571
3071
|
} catch { /* non-fatal */ }
|
|
2572
3072
|
|
|
2573
3073
|
// If the task already has a branch linked, it's safe to move to in_progress now.
|
|
2574
3074
|
// If not, create_branch will do the move once the branch is created.
|
|
2575
3075
|
const alreadyHasBranch = !!task.github?.headBranch
|
|
2576
3076
|
let moved = false
|
|
3077
|
+
|
|
3078
|
+
// Rebase check: warn if the task branch is behind the default branch
|
|
3079
|
+
let rebaseWarning = null
|
|
3080
|
+
if (alreadyHasBranch) {
|
|
3081
|
+
try {
|
|
3082
|
+
const branchStatus = await api.get(`/api/tasks/${taskId}/branch-status`)
|
|
3083
|
+
if (branchStatus?.success && branchStatus.data?.behindBy > 0) {
|
|
3084
|
+
rebaseWarning = {
|
|
3085
|
+
behindBy: branchStatus.data.behindBy,
|
|
3086
|
+
base: branchStatus.data.base,
|
|
3087
|
+
status: branchStatus.data.status,
|
|
3088
|
+
message: `⚠️ Branch '${task.github.headBranch}' is ${branchStatus.data.behindBy} commit(s) behind '${branchStatus.data.base}'. ` +
|
|
3089
|
+
`Rebase before editing files to avoid merge conflicts: git fetch origin && git rebase origin/${branchStatus.data.base}`,
|
|
3090
|
+
}
|
|
3091
|
+
}
|
|
3092
|
+
} catch { /* non-fatal — never block kickoff for a GitHub API failure */ }
|
|
3093
|
+
}
|
|
2577
3094
|
if (alreadyHasBranch) {
|
|
2578
3095
|
try {
|
|
2579
3096
|
const moveRes = await api.post(`/api/tasks/${taskId}/move`, { column: 'in_progress', toIndex: 0 })
|
|
@@ -2581,13 +3098,13 @@ Use this when a developer says "start task", "brief me on", or "what do I need t
|
|
|
2581
3098
|
} catch { /* might already be in_progress */ }
|
|
2582
3099
|
}
|
|
2583
3100
|
|
|
2584
|
-
// Write cursor rules file
|
|
3101
|
+
// Write cursor rules file — merge task rules + task-type rules
|
|
2585
3102
|
let cursorRulesFile = null
|
|
2586
|
-
|
|
2587
|
-
|
|
2588
|
-
|
|
2589
|
-
|
|
2590
|
-
cursorRulesFile = writeCursorRulesFile(task.key,
|
|
3103
|
+
const typeExtraRules = shouldAutoRoute && taskTypeCfg.extraRules ? taskTypeCfg.extraRules : ''
|
|
3104
|
+
const baseRules = hasCursorRules ? task.cursorRules : ''
|
|
3105
|
+
const mergedRules = [baseRules, typeExtraRules].filter(Boolean).join('\n\n') || '(Follow role constraints above.)'
|
|
3106
|
+
if (hasCursorRules || agentRole || typeExtraRules) {
|
|
3107
|
+
cursorRulesFile = writeCursorRulesFile(task.key, mergedRules, repoPath, agentRole || null)
|
|
2591
3108
|
}
|
|
2592
3109
|
|
|
2593
3110
|
// Dynamically generate .cursor/agents, .cursor/skills, .cursor/commands
|
|
@@ -2597,6 +3114,46 @@ Use this when a developer says "start task", "brief me on", or "what do I need t
|
|
|
2597
3114
|
const taskForWorkspace = agentRole ? { ...task, agentRole } : task
|
|
2598
3115
|
const workspaceResult = writeCursorWorkspace(taskForWorkspace, projectAgentConfig, repoPath || process.cwd())
|
|
2599
3116
|
|
|
3117
|
+
// If branch is behind or conflicted, write a conflict-resolver rule into .cursor/rules/
|
|
3118
|
+
// so the agent is constrained by it automatically on every prompt until resolved.
|
|
3119
|
+
if (rebaseWarning) {
|
|
3120
|
+
try {
|
|
3121
|
+
const repoRootForRules = workspaceResult?.repoRoot || findRepoRoot(repoPath || process.cwd())
|
|
3122
|
+
if (repoRootForRules) {
|
|
3123
|
+
const rulesDir = join(repoRootForRules, '.cursor', 'rules')
|
|
3124
|
+
mkdirSync(rulesDir, { recursive: true })
|
|
3125
|
+
const teamWarn = teamContext?.warning ? `\n${teamContext.warning}\n` : ''
|
|
3126
|
+
const mergePos = teamContext?.mergePosition ? `\nYou are **#${teamContext.mergePosition}** in the suggested merge order. Do not rush to merge — wait for tasks ranked above you.\n` : ''
|
|
3127
|
+
const conflictRule = `---
|
|
3128
|
+
description: Conflict resolution constraints — active because this branch has conflicts
|
|
3129
|
+
alwaysApply: true
|
|
3130
|
+
---
|
|
3131
|
+
|
|
3132
|
+
## ⚠️ BRANCH CONFLICT ACTIVE
|
|
3133
|
+
|
|
3134
|
+
This branch is **${rebaseWarning.behindBy} commit(s) behind** \`${rebaseWarning.base}\`.
|
|
3135
|
+
${teamWarn}${mergePos}
|
|
3136
|
+
### Before editing ANY file:
|
|
3137
|
+
1. \`get_project_git_tree\` — see the full team landscape
|
|
3138
|
+
2. \`check_merge_conflicts\` — get the conflict file list
|
|
3139
|
+
3. For each conflicting file: \`resolve_conflict\` → read both tasks' goals → resolve
|
|
3140
|
+
|
|
3141
|
+
### Hard rules:
|
|
3142
|
+
- \`git rebase\` only — never \`git merge\`
|
|
3143
|
+
- \`git push --force-with-lease\` only — never \`--force\`
|
|
3144
|
+
- Do NOT mark subtasks done until \`check_merge_conflicts\` returns \`status: 'clean'\`
|
|
3145
|
+
|
|
3146
|
+
### Ask the human when:
|
|
3147
|
+
- Another active branch shares your files AND you cannot determine whose change wins → \`request_human_input(type="ambiguity")\`
|
|
3148
|
+
- Before force-pushing if another developer's branch overlaps → \`request_human_input(type="approval")\`
|
|
3149
|
+
|
|
3150
|
+
After \`request_human_input\`: STOP, show the question in chat, wait for reply, then continue.
|
|
3151
|
+
`
|
|
3152
|
+
writeFileSync(join(rulesDir, 'conflict-resolver.mdc'), conflictRule, 'utf8')
|
|
3153
|
+
}
|
|
3154
|
+
} catch { /* non-fatal */ }
|
|
3155
|
+
}
|
|
3156
|
+
|
|
2600
3157
|
// Persist agentRole + workspace status to server
|
|
2601
3158
|
const workspacePatch = {
|
|
2602
3159
|
...(agentRole ? { agentRole } : {}),
|
|
@@ -2609,6 +3166,11 @@ Use this when a developer says "start task", "brief me on", or "what do I need t
|
|
|
2609
3166
|
}
|
|
2610
3167
|
api.patch(`/api/tasks/${taskId}`, workspacePatch).catch(() => {/* non-fatal */})
|
|
2611
3168
|
|
|
3169
|
+
// Auto-scan workspace so the Workspace tab in the UI is fresh immediately after kickoff.
|
|
3170
|
+
if (kickoffProject) {
|
|
3171
|
+
runWorkspaceScan(taskId, task, kickoffProject, repoPath).catch(() => {/* non-fatal */})
|
|
3172
|
+
}
|
|
3173
|
+
|
|
2612
3174
|
return text({
|
|
2613
3175
|
started: {
|
|
2614
3176
|
key: task.key,
|
|
@@ -2644,18 +3206,34 @@ Use this when a developer says "start task", "brief me on", or "what do I need t
|
|
|
2644
3206
|
date: c.commit?.author?.date,
|
|
2645
3207
|
})),
|
|
2646
3208
|
movedToInProgress: moved,
|
|
3209
|
+
rebaseWarning,
|
|
3210
|
+
teamContext,
|
|
2647
3211
|
suggestedBranch: alreadyHasBranch ? null : suggestedBranch,
|
|
3212
|
+
taskAnalysis: shouldAutoRoute ? {
|
|
3213
|
+
detectedType: taskTypeCfg.label,
|
|
3214
|
+
riskLevel: taskTypeCfg.riskLevel,
|
|
3215
|
+
recommendedFlow,
|
|
3216
|
+
complexityScore,
|
|
3217
|
+
suggestedSkills: taskTypeCfg.suggestedSkills,
|
|
3218
|
+
decompositionHint: taskTypeCfg.decompositionHint,
|
|
3219
|
+
scoutFirst: taskTypeCfg.scoutFirst,
|
|
3220
|
+
typeRulesInjected: !!typeExtraRules,
|
|
3221
|
+
} : null,
|
|
2648
3222
|
nextStep: agentRole === 'scout'
|
|
2649
|
-
? `
|
|
3223
|
+
? `SCOUT mode. Read the codebase, then save findings with update_task(scoutReport=...). Do NOT modify files.`
|
|
2650
3224
|
: agentRole === 'coordinator'
|
|
2651
|
-
? `
|
|
3225
|
+
? `COORDINATOR mode. Read the README, then call decompose_task to split work into parallel/sequential subtasks with file ownership.`
|
|
2652
3226
|
: agentRole === 'builder'
|
|
2653
|
-
? `
|
|
3227
|
+
? `BUILDER mode. Call claim_files first, then start coding on "${alreadyHasBranch ? task.github.headBranch : suggestedBranch}".`
|
|
2654
3228
|
: agentRole === 'reviewer'
|
|
2655
|
-
? `
|
|
3229
|
+
? `REVIEWER mode. Call review_pr to get the diff, then post_pr_review with your analysis.`
|
|
3230
|
+
: recommendedFlow === 'coordinator' && shouldAutoRoute
|
|
3231
|
+
? `⚡ COMPLEX TASK (score ${complexityScore}): Call kickoff_task with agentRole "coordinator" confirmed true → decompose_task → get_parallel_kickoffs → start builders. DO NOT code directly.`
|
|
3232
|
+
: recommendedFlow === 'single_builder' && shouldAutoRoute
|
|
3233
|
+
? `🔧 MEDIUM TASK (score ${complexityScore}): Call kickoff_task with agentRole "builder" confirmed true → claim_files → implement → commit → raise_pr.`
|
|
2656
3234
|
: alreadyHasBranch
|
|
2657
|
-
? `Branch "${task.github.headBranch}"
|
|
2658
|
-
: `Call create_branch to create "${suggestedBranch}"
|
|
3235
|
+
? `SIMPLE TASK (score ${complexityScore}). Branch "${task.github.headBranch}" exists — call claim_files and start coding.`
|
|
3236
|
+
: `SIMPLE TASK (score ${complexityScore}). Call create_branch to create "${suggestedBranch}", then claim_files and code.`,
|
|
2659
3237
|
})
|
|
2660
3238
|
}
|
|
2661
3239
|
)
|
|
@@ -3166,6 +3744,269 @@ You are a COORDINATOR agent. Your behavioral constraints for this session:
|
|
|
3166
3744
|
- NEVER implement code yourself — your job ends after get_parallel_kickoffs writes the agent files`,
|
|
3167
3745
|
}
|
|
3168
3746
|
|
|
3747
|
+
// ── Task type detection + per-type config ──────────────────────────────────────
|
|
3748
|
+
// Each type carries: detection signals, recommended flow, extra rules, suggested skills,
|
|
3749
|
+
// decomposition hints, and risk level.
|
|
3750
|
+
|
|
3751
|
+
const TASK_TYPES = {
|
|
3752
|
+
migration: {
|
|
3753
|
+
label: 'DB Migration',
|
|
3754
|
+
riskLevel: 'high',
|
|
3755
|
+
titleSignals: ['migrat', 'schema', 'db ', 'database', 'column ', 'table ', 'index ', 'seed', 'sequelize', 'mongoose.*schema', 'alter '],
|
|
3756
|
+
readmeSignals: ['migration', 'schema', 'rollback', 'up()', 'down()', 'alter table', 'add column', 'drop column', 'mongoose.Schema'],
|
|
3757
|
+
fileSignals: ['migrations/', 'seeds/', 'migrate', '.migration.'],
|
|
3758
|
+
flow: 'coordinator', // always coordinator — sequential groups, high risk
|
|
3759
|
+
parallel: false, // schema changes are sequential by nature
|
|
3760
|
+
scoutFirst: true, // must understand current schema before touching it
|
|
3761
|
+
extraRules: `## Task Type: DB MIGRATION — HIGH RISK
|
|
3762
|
+
|
|
3763
|
+
**MANDATORY before writing any code:**
|
|
3764
|
+
1. Scout the existing schema first — call kickoff_task with agentRole "scout"
|
|
3765
|
+
2. Map every model/schema file that will be affected
|
|
3766
|
+
3. Decompose into sequential groups: schema change → model update → seed/test data → tests
|
|
3767
|
+
|
|
3768
|
+
**MIGRATION RULES (non-negotiable):**
|
|
3769
|
+
- Every migration file MUST have a rollback (down() function)
|
|
3770
|
+
- NEVER modify an existing migration file that has already run
|
|
3771
|
+
- NEVER drop columns/tables without confirming data is backed up
|
|
3772
|
+
- ALWAYS test the rollback before marking done
|
|
3773
|
+
- Run migrations on a test database copy first
|
|
3774
|
+
- Commit migration files separately from model changes
|
|
3775
|
+
|
|
3776
|
+
**DECOMPOSITION PATTERN for migrations:**
|
|
3777
|
+
Group 1 (sequential): Write migration file with up() + down()
|
|
3778
|
+
Group 2 (sequential): Update affected models/schemas
|
|
3779
|
+
Group 3 (sequential): Update seed files and test fixtures
|
|
3780
|
+
Group 4 (sequential): Write integration tests that run migration + rollback`,
|
|
3781
|
+
suggestedSkills: ['run-migration', 'test-rollback', 'scout-codebase'],
|
|
3782
|
+
decompositionHint: 'Split into sequential groups: migration file → model update → seeds → tests. Each group depends on the previous. No parallel execution — order matters for schema integrity.',
|
|
3783
|
+
},
|
|
3784
|
+
|
|
3785
|
+
integration: {
|
|
3786
|
+
label: 'Third-party Integration',
|
|
3787
|
+
riskLevel: 'medium-high',
|
|
3788
|
+
titleSignals: ['integrat', 'webhook', 'oauth', 'payment', 'stripe', 'twilio', 'sendgrid', 'aws', 's3', 'firebase', 'api key', 'third.party', 'external'],
|
|
3789
|
+
readmeSignals: ['api key', 'webhook', 'oauth', 'access token', 'client secret', 'base url', 'sdk', 'third-party', 'external service'],
|
|
3790
|
+
fileSignals: ['services/', 'integrations/', 'webhooks/', '.env'],
|
|
3791
|
+
flow: 'single_builder', // usually one workstream — one integration = one unit
|
|
3792
|
+
parallel: false,
|
|
3793
|
+
scoutFirst: false,
|
|
3794
|
+
extraRules: `## Task Type: THIRD-PARTY INTEGRATION — MEDIUM-HIGH RISK
|
|
3795
|
+
|
|
3796
|
+
**SECURITY RULES (non-negotiable):**
|
|
3797
|
+
- NEVER hardcode API keys, secrets, or tokens in source code
|
|
3798
|
+
- ALL credentials go in environment variables — reference as process.env.KEY_NAME
|
|
3799
|
+
- NEVER commit .env files with real credentials
|
|
3800
|
+
- Add .env to .gitignore if not already present
|
|
3801
|
+
|
|
3802
|
+
**INTEGRATION RULES:**
|
|
3803
|
+
- Always wrap external calls in try/catch with meaningful error messages
|
|
3804
|
+
- All external API calls must be mocked in tests (no real API calls in test suite)
|
|
3805
|
+
- Add timeout handling — external services can be slow or unavailable
|
|
3806
|
+
- Log errors with enough context to debug (request ID, status code, endpoint)
|
|
3807
|
+
- Test both success AND failure paths — what happens when the API is down?
|
|
3808
|
+
|
|
3809
|
+
**DECOMPOSITION PATTERN for integrations:**
|
|
3810
|
+
1. Service/client file (wraps the SDK/API)
|
|
3811
|
+
2. Business logic that calls the service
|
|
3812
|
+
3. Tests with mocked external calls
|
|
3813
|
+
4. Environment variable documentation`,
|
|
3814
|
+
suggestedSkills: ['mock-external-calls', 'check-env-vars', 'run-tests'],
|
|
3815
|
+
decompositionHint: 'Split into: service wrapper → business logic → mocked tests. Keep external SDK calls isolated in one service file so tests can mock it cleanly.',
|
|
3816
|
+
},
|
|
3817
|
+
|
|
3818
|
+
bugfix: {
|
|
3819
|
+
label: 'Bug Fix',
|
|
3820
|
+
riskLevel: 'low-medium',
|
|
3821
|
+
titleSignals: ['fix', 'bug', 'issue', 'error', 'broken', 'failing', 'crash', 'regression', 'typo', 'incorrect', 'wrong', 'not working'],
|
|
3822
|
+
readmeSignals: ['bug', 'reproduce', 'root cause', 'stack trace', 'error message', 'expected behavior', 'actual behavior'],
|
|
3823
|
+
fileSignals: [],
|
|
3824
|
+
flow: 'direct', // targeted fix — scout to understand, then fix directly
|
|
3825
|
+
parallel: false,
|
|
3826
|
+
scoutFirst: true, // understand the bug before touching code
|
|
3827
|
+
extraRules: `## Task Type: BUG FIX
|
|
3828
|
+
|
|
3829
|
+
**REQUIRED WORKFLOW:**
|
|
3830
|
+
1. Scout first: reproduce the bug, read the stack trace, understand root cause
|
|
3831
|
+
2. Write a failing test that reproduces the bug BEFORE writing the fix
|
|
3832
|
+
3. Fix the root cause — not the symptom
|
|
3833
|
+
4. Verify the test now passes
|
|
3834
|
+
5. Run the full test suite to check for regressions
|
|
3835
|
+
|
|
3836
|
+
**BUG FIX RULES:**
|
|
3837
|
+
- Fix the root cause, not the symptom (no defensive null checks to hide errors)
|
|
3838
|
+
- The fix should be minimal and targeted — no unrelated refactors
|
|
3839
|
+
- Every bug fix MUST include a regression test
|
|
3840
|
+
- If the bug reveals a broader design issue, file a separate task — don't fix it here`,
|
|
3841
|
+
suggestedSkills: ['reproduce-bug', 'run-tests'],
|
|
3842
|
+
decompositionHint: 'Bug fixes rarely need decomposition. Use direct flow: scout → reproduce → fix → regression test → commit.',
|
|
3843
|
+
},
|
|
3844
|
+
|
|
3845
|
+
ui: {
|
|
3846
|
+
label: 'UI / Frontend',
|
|
3847
|
+
riskLevel: 'low',
|
|
3848
|
+
titleSignals: ['ui', 'component', 'page', 'screen', 'layout', 'design', 'style', 'form', 'modal', 'button', 'dashboard', 'frontend', 'view'],
|
|
3849
|
+
readmeSignals: ['component', 'jsx', 'tsx', 'css', 'tailwind', 'responsive', 'mobile', 'desktop', 'user interface', 'render'],
|
|
3850
|
+
fileSignals: ['components/', 'pages/', 'views/', 'screens/', '.jsx', '.tsx', '.css', '.scss'],
|
|
3851
|
+
flow: 'auto', // parallel if multiple independent components, single if one flow
|
|
3852
|
+
parallel: true, // UI components are often independent
|
|
3853
|
+
scoutFirst: false,
|
|
3854
|
+
extraRules: `## Task Type: UI / FRONTEND
|
|
3855
|
+
|
|
3856
|
+
**UI RULES:**
|
|
3857
|
+
- No business logic inside components — components render data, they don't fetch/transform it
|
|
3858
|
+
- Keep components small and focused (single responsibility)
|
|
3859
|
+
- Every interactive element must handle loading, error, and empty states
|
|
3860
|
+
- Test responsive behavior — check mobile + desktop breakpoints
|
|
3861
|
+
- No inline styles — use CSS classes / Tailwind utilities consistently with the design system
|
|
3862
|
+
- Accessibility: interactive elements need aria labels, buttons need type="button"
|
|
3863
|
+
|
|
3864
|
+
**DECOMPOSITION PATTERN for multi-component features:**
|
|
3865
|
+
Each independent component can run in parallel as a separate builder subtask.
|
|
3866
|
+
Shared utilities/hooks should be built first (Group 1), then components that use them (Group 2).`,
|
|
3867
|
+
suggestedSkills: ['run-tests', 'check-responsive'],
|
|
3868
|
+
decompositionHint: 'UI tasks can often run in parallel: one builder per independent component. Group shared hooks/utils in Group 1, then components in Group 2 (parallel).',
|
|
3869
|
+
},
|
|
3870
|
+
|
|
3871
|
+
backend: {
|
|
3872
|
+
label: 'Backend / API',
|
|
3873
|
+
riskLevel: 'medium',
|
|
3874
|
+
titleSignals: ['endpoint', 'route', 'api', 'controller', 'service', 'middleware', 'backend', 'server', 'handler'],
|
|
3875
|
+
readmeSignals: ['route', 'endpoint', 'request', 'response', 'middleware', 'controller', 'service', 'http', 'rest', 'graphql'],
|
|
3876
|
+
fileSignals: ['routes/', 'controllers/', 'services/', 'middleware/', 'api/'],
|
|
3877
|
+
flow: 'auto', // single builder for one endpoint, coordinator for multi-endpoint features
|
|
3878
|
+
parallel: false, // route → model → middleware → tests often sequential
|
|
3879
|
+
scoutFirst: false,
|
|
3880
|
+
extraRules: `## Task Type: BACKEND / API
|
|
3881
|
+
|
|
3882
|
+
**API RULES:**
|
|
3883
|
+
- All routes must have corresponding tests (minimum: success, 401, 404)
|
|
3884
|
+
- Validate all request inputs at the route level — never trust user input
|
|
3885
|
+
- Return consistent response shape: { success: true/false, data: {...}, message: '...' }
|
|
3886
|
+
- Authentication middleware must be applied to all protected routes
|
|
3887
|
+
- No raw database queries in route handlers — use model methods or service layer
|
|
3888
|
+
- HTTP status codes must be semantically correct (200/201/400/401/403/404/500)
|
|
3889
|
+
|
|
3890
|
+
**DECOMPOSITION PATTERN for multi-endpoint features:**
|
|
3891
|
+
Each independent endpoint can be a parallel builder subtask (separate route + tests).
|
|
3892
|
+
Shared middleware or model changes should be in Group 1 (sequential first).`,
|
|
3893
|
+
suggestedSkills: ['run-tests', 'add-express-route'],
|
|
3894
|
+
decompositionHint: 'Group shared model/middleware changes first (Group 1, sequential). Then independent endpoint implementations in Group 2 (parallel if no shared files).',
|
|
3895
|
+
},
|
|
3896
|
+
|
|
3897
|
+
security: {
|
|
3898
|
+
label: 'Security / Auth',
|
|
3899
|
+
riskLevel: 'high',
|
|
3900
|
+
titleSignals: ['security', 'auth', 'permission', 'access control', 'jwt', 'token', 'xss', 'csrf', 'injection', 'vulnerability', 'pentest', 'audit', 'rbac', 'oauth', 'rate limit', 'sanitiz'],
|
|
3901
|
+
readmeSignals: ['security', 'authentication', 'authorization', 'privilege', 'exploit', 'attack surface', 'hardening', 'role-based', 'guard', 'middleware auth'],
|
|
3902
|
+
fileSignals: ['middleware/auth', 'auth/', 'guards/', 'policies/', 'permissions/'],
|
|
3903
|
+
flow: 'direct', // security fixes are targeted and must not be parallelised
|
|
3904
|
+
parallel: false,
|
|
3905
|
+
scoutFirst: true, // always map attack surface before touching auth code
|
|
3906
|
+
extraRules: `## Task Type: SECURITY / AUTH — HIGH RISK
|
|
3907
|
+
|
|
3908
|
+
**MANDATORY before writing any code:**
|
|
3909
|
+
1. Scout the full auth flow: middleware chain, token validation, role checks
|
|
3910
|
+
2. Map all endpoints that would be affected by the change
|
|
3911
|
+
3. Check for privilege escalation vectors before and after the change
|
|
3912
|
+
|
|
3913
|
+
**SECURITY RULES (non-negotiable):**
|
|
3914
|
+
- NEVER weaken an existing security check — only strengthen
|
|
3915
|
+
- NEVER log passwords, tokens, secrets, or PII
|
|
3916
|
+
- ALL credentials go in environment variables only
|
|
3917
|
+
- Every auth change must be reviewed by a second agent (reviewer role) before merge
|
|
3918
|
+
- Test BOTH the happy path AND the attack path (what happens if you bypass the check?)
|
|
3919
|
+
- No hardcoded secrets, API keys, or signing secrets anywhere in source
|
|
3920
|
+
|
|
3921
|
+
**REVIEW REQUIREMENT:**
|
|
3922
|
+
After implementing, call kickoff_task with agentRole "reviewer" to do a security review before raising the PR.`,
|
|
3923
|
+
suggestedSkills: ['bridge-guard-security-audit', 'run-tests'],
|
|
3924
|
+
decompositionHint: 'Security changes should NOT be parallelised — one reviewer must be able to trace the full change. Scout → fix → security review → tests → PR.',
|
|
3925
|
+
},
|
|
3926
|
+
|
|
3927
|
+
refactor: {
|
|
3928
|
+
label: 'Refactor',
|
|
3929
|
+
riskLevel: 'medium',
|
|
3930
|
+
titleSignals: ['refactor', 'restructure', 'reorganize', 'rewrite', 'cleanup', 'clean up', 'extract', 'rename', 'dedup', 'deduplicate', 'simplify', 'modernize'],
|
|
3931
|
+
readmeSignals: ['refactor', 'restructure', 'extract', 'duplicate', 'technical debt', 'code smell', 'cohesion', 'coupling'],
|
|
3932
|
+
fileSignals: [],
|
|
3933
|
+
flow: 'auto',
|
|
3934
|
+
parallel: false, // refactors touch shared files — parallelism causes conflicts
|
|
3935
|
+
scoutFirst: true, // must understand what currently exists before restructuring
|
|
3936
|
+
extraRules: `## Task Type: REFACTOR
|
|
3937
|
+
|
|
3938
|
+
**REFACTOR RULES:**
|
|
3939
|
+
- No behaviour changes — the public API and observable behaviour must be identical before and after
|
|
3940
|
+
- Run the full test suite before AND after every meaningful change — tests are your safety net
|
|
3941
|
+
- Do not add new features during a refactor — create a separate task
|
|
3942
|
+
- Rename in small atomic commits so git blame stays useful
|
|
3943
|
+
- If you discover a bug while refactoring, file a separate task — don't fix it here
|
|
3944
|
+
|
|
3945
|
+
**DECOMPOSITION PATTERN for large refactors:**
|
|
3946
|
+
Group 1: Scout + map all call sites of the code being changed
|
|
3947
|
+
Group 2: Change the internals without touching the public interface
|
|
3948
|
+
Group 3: Update all call sites
|
|
3949
|
+
Group 4: Delete dead code + run full test suite`,
|
|
3950
|
+
suggestedSkills: ['run-tests', 'bridge-recon'],
|
|
3951
|
+
decompositionHint: 'Refactors must be sequential — parallel builders will conflict on shared files. Scout first to map all usages, then change in layers: internals → call sites → cleanup.',
|
|
3952
|
+
},
|
|
3953
|
+
|
|
3954
|
+
feature: {
|
|
3955
|
+
label: 'New Feature',
|
|
3956
|
+
riskLevel: 'medium',
|
|
3957
|
+
titleSignals: ['add', 'implement', 'build', 'create', 'new', 'introduce', 'develop'],
|
|
3958
|
+
readmeSignals: [],
|
|
3959
|
+
fileSignals: [],
|
|
3960
|
+
flow: 'auto', // complexity score decides
|
|
3961
|
+
parallel: true,
|
|
3962
|
+
scoutFirst: false,
|
|
3963
|
+
extraRules: `## Task Type: NEW FEATURE
|
|
3964
|
+
|
|
3965
|
+
**FEATURE RULES:**
|
|
3966
|
+
- Read and follow the implementation plan exactly — no scope creep
|
|
3967
|
+
- Every new function/endpoint needs tests
|
|
3968
|
+
- New files must follow the existing naming and folder conventions
|
|
3969
|
+
- No breaking changes to existing public APIs without explicit approval`,
|
|
3970
|
+
suggestedSkills: ['run-tests'],
|
|
3971
|
+
decompositionHint: 'Split by layer (model / route / tests) or by independent sub-features. Use parallel execution where files don\'t overlap.',
|
|
3972
|
+
},
|
|
3973
|
+
}
|
|
3974
|
+
|
|
3975
|
+
/**
|
|
3976
|
+
* Detect task type from title + README + file patterns.
|
|
3977
|
+
* Returns the best matching type key, or 'feature' as fallback.
|
|
3978
|
+
*/
|
|
3979
|
+
function detectTaskType(task) {
|
|
3980
|
+
const text = [task.title || '', task.readmeMarkdown || '', task.description || ''].join(' ').toLowerCase()
|
|
3981
|
+
|
|
3982
|
+
const scores = {}
|
|
3983
|
+
for (const [typeKey, cfg] of Object.entries(TASK_TYPES)) {
|
|
3984
|
+
let score = 0
|
|
3985
|
+
for (const sig of cfg.titleSignals) { if (new RegExp(sig, 'i').test(task.title || '')) score += 3 }
|
|
3986
|
+
for (const sig of cfg.readmeSignals) { if (new RegExp(sig, 'i').test(task.readmeMarkdown || '')) score += 2 }
|
|
3987
|
+
for (const sig of cfg.fileSignals) { if (new RegExp(sig, 'i').test(text)) score += 1 }
|
|
3988
|
+
scores[typeKey] = score
|
|
3989
|
+
}
|
|
3990
|
+
|
|
3991
|
+
const sorted = Object.entries(scores).sort((a, b) => b[1] - a[1])
|
|
3992
|
+
const [first, second] = sorted
|
|
3993
|
+
|
|
3994
|
+
// Require min score of 1 AND a gap of at least 2 over the second-best type.
|
|
3995
|
+
// If two types are within 2 points of each other the title is ambiguous — fall
|
|
3996
|
+
// back to 'feature' so the agent is asked to verify rather than silently mis-routing.
|
|
3997
|
+
const topScore = first?.[1] ?? 0
|
|
3998
|
+
const secondScore = second?.[1] ?? 0
|
|
3999
|
+
const isAmbiguous = topScore < 1 || (topScore - secondScore) < 2
|
|
4000
|
+
|
|
4001
|
+
return {
|
|
4002
|
+
type: isAmbiguous ? 'feature' : first[0],
|
|
4003
|
+
score: topScore,
|
|
4004
|
+
confidence: isAmbiguous ? 'low' : topScore >= 5 ? 'high' : 'medium',
|
|
4005
|
+
ambiguous: isAmbiguous,
|
|
4006
|
+
allScores: Object.fromEntries(sorted.filter(([, s]) => s > 0)),
|
|
4007
|
+
}
|
|
4008
|
+
}
|
|
4009
|
+
|
|
3169
4010
|
/** Write task-specific cursor rules to .cursor/rules/<taskKey>.mdc in the local repo root.
|
|
3170
4011
|
* When role is provided, role-specific behavioral constraints are prepended. */
|
|
3171
4012
|
function writeCursorRulesFile(taskKey, rulesMarkdown, startPath, role = null) {
|
|
@@ -3236,9 +4077,14 @@ function writeCursorWorkspace(task, projectAgentConfig, startPath) {
|
|
|
3236
4077
|
|
|
3237
4078
|
// ── 2. Subagents from DB → .cursor/agents/<name>.md ───────────────────────
|
|
3238
4079
|
// If DB has subagents defined, write them verbatim (admin-authored markdown)
|
|
4080
|
+
// Respects project-level enabled flag + task-level agentKitOverrides.
|
|
3239
4081
|
const dbSubagents = cfg.subagents || []
|
|
4082
|
+
const kitSubagentOverrides = task.agentKitOverrides?.subagents || {}
|
|
3240
4083
|
for (const s of dbSubagents) {
|
|
3241
4084
|
if (!s.name || !s.body) continue
|
|
4085
|
+
const taskOverride = kitSubagentOverrides[s.name]
|
|
4086
|
+
const isEnabled = taskOverride !== undefined ? taskOverride : s.enabled !== false
|
|
4087
|
+
if (!isEnabled) continue
|
|
3242
4088
|
const header = `---\nname: ${s.name}\ndescription: ${s.description || s.name}\n---\n\n`
|
|
3243
4089
|
const f = join(agentsDir, `${s.name}.md`)
|
|
3244
4090
|
writeFileSync(f, header + s.body, 'utf8')
|
|
@@ -3291,16 +4137,22 @@ function writeCursorWorkspace(task, projectAgentConfig, startPath) {
|
|
|
3291
4137
|
|
|
3292
4138
|
// ── 5. Skills from DB → .cursor/skills/<name>.md ──────────────────────────
|
|
3293
4139
|
// If DB is empty, write built-in defaults so Cursor always has something useful.
|
|
4140
|
+
// Respects project-level enabled flag + task-level agentKitOverrides.
|
|
3294
4141
|
const dbSkills = cfg.skills || []
|
|
3295
|
-
|
|
3296
|
-
|
|
3297
|
-
|
|
4142
|
+
const kitSkillOverrides = task.agentKitOverrides?.skills || {}
|
|
4143
|
+
const enabledSkills = dbSkills.filter(s => {
|
|
4144
|
+
if (!s.name || !s.body) return false
|
|
4145
|
+
const taskOverride = kitSkillOverrides[s.name]
|
|
4146
|
+
return taskOverride !== undefined ? taskOverride : s.enabled !== false
|
|
4147
|
+
})
|
|
4148
|
+
if (enabledSkills.length > 0) {
|
|
4149
|
+
for (const s of enabledSkills) {
|
|
3298
4150
|
const header = `---\nname: ${s.name}\ndescription: ${s.description || s.name}\n---\n\n`
|
|
3299
4151
|
const f = join(skillsDir, `${s.name}.md`)
|
|
3300
4152
|
writeFileSync(f, header + s.body, 'utf8')
|
|
3301
4153
|
written.push(f)
|
|
3302
4154
|
}
|
|
3303
|
-
} else {
|
|
4155
|
+
} else if (dbSkills.length === 0) {
|
|
3304
4156
|
// Built-in default skills
|
|
3305
4157
|
const defaults = [
|
|
3306
4158
|
{
|
|
@@ -3322,6 +4174,147 @@ function writeCursorWorkspace(task, projectAgentConfig, startPath) {
|
|
|
3322
4174
|
}
|
|
3323
4175
|
}
|
|
3324
4176
|
|
|
4177
|
+
// ── 5b. System skill: team-awareness — always written ─────────────────────────
|
|
4178
|
+
const teamAwarenessSkill = `---
|
|
4179
|
+
name: team-awareness
|
|
4180
|
+
description: Understand the team's branch landscape before coding or merging. Prevents conflicts and wrong merge order.
|
|
4181
|
+
---
|
|
4182
|
+
|
|
4183
|
+
## When to use
|
|
4184
|
+
Call this skill at the start of every builder/coordinator session, and before raising a PR.
|
|
4185
|
+
|
|
4186
|
+
## Step 1 — Check the team landscape
|
|
4187
|
+
\`\`\`
|
|
4188
|
+
get_project_git_tree(projectId="${task.project?._id || task.project || '<projectId>'}")
|
|
4189
|
+
\`\`\`
|
|
4190
|
+
Read: \`conflictPairs\`, \`mergeOrder\`, \`staleBranches\`.
|
|
4191
|
+
|
|
4192
|
+
## Step 2 — Act on what you see
|
|
4193
|
+
|
|
4194
|
+
**If \`conflictPairs\` includes your task:**
|
|
4195
|
+
- Contact the other task's assignee BEFORE touching shared files
|
|
4196
|
+
- Use \`request_human_input\` to coordinate if the overlap is significant
|
|
4197
|
+
- Consider claiming different files or splitting the work
|
|
4198
|
+
|
|
4199
|
+
**If you are NOT rank 1 in \`mergeOrder\`:**
|
|
4200
|
+
- Note which tasks should merge before you
|
|
4201
|
+
- Do not raise your PR as "ready to merge" until the tasks ranked above you are merged
|
|
4202
|
+
- You CAN keep coding — just don't block the pipeline
|
|
4203
|
+
|
|
4204
|
+
**If your branch is behind (\`behindBy > 0\`):**
|
|
4205
|
+
- Run: \`git fetch origin && git rebase origin/${base || 'main'}\`
|
|
4206
|
+
- Then re-check with \`check_merge_conflicts\`
|
|
4207
|
+
|
|
4208
|
+
**If \`staleBranches\` includes your task:**
|
|
4209
|
+
- Your last commit was 48h+ ago
|
|
4210
|
+
- Check if work is blocked, notify the team via \`add_task_comment\`
|
|
4211
|
+
`
|
|
4212
|
+
writeFileSync(join(skillsDir, 'team-awareness.md'), teamAwarenessSkill, 'utf8')
|
|
4213
|
+
written.push(join(skillsDir, 'team-awareness.md'))
|
|
4214
|
+
|
|
4215
|
+
// ── 5c. System skill: resolve-conflict — always written, regardless of DB skills ──
|
|
4216
|
+
// Every agent needs this available. It is not configurable — it is infrastructure.
|
|
4217
|
+
const resolveConflictSkill = `---
|
|
4218
|
+
name: resolve-conflict
|
|
4219
|
+
description: Autonomously resolve git merge conflicts using context from both tasks. Asks human only when genuinely ambiguous or before destructive operations.
|
|
4220
|
+
---
|
|
4221
|
+
|
|
4222
|
+
## When to use
|
|
4223
|
+
Call this skill when \`kickoff_task\` returns a \`rebaseWarning\`, or when \`check_merge_conflicts\` returns \`status: 'conflict'\` or \`status: 'behind'\`.
|
|
4224
|
+
|
|
4225
|
+
## Decision tree — autonomous vs. ask human
|
|
4226
|
+
|
|
4227
|
+
Resolve autonomously when:
|
|
4228
|
+
- One side adds new functionality the other doesn't touch → keep both
|
|
4229
|
+
- One side fixes a bug in shared code → prefer the fix, adapt for your task's additions
|
|
4230
|
+
- Pure formatting / import ordering conflict → pick the cleaner version
|
|
4231
|
+
- One side deletes something the other modifies → keep the modification (deletion was likely superseded)
|
|
4232
|
+
|
|
4233
|
+
Call \`request_human_input(type="ambiguity")\` when:
|
|
4234
|
+
- Both sides implement the SAME feature but differently (two valid architectures)
|
|
4235
|
+
- You cannot determine which task's intent takes priority
|
|
4236
|
+
- The task README is missing and the code intent is unclear
|
|
4237
|
+
- Two tasks have genuinely incompatible API contracts (different signatures for same function)
|
|
4238
|
+
|
|
4239
|
+
Call \`request_human_input(type="approval")\` before:
|
|
4240
|
+
- \`git push --force-with-lease\` on a branch shared with another active developer
|
|
4241
|
+
- Any operation that could permanently overwrite a teammate's committed work
|
|
4242
|
+
|
|
4243
|
+
## Protocol
|
|
4244
|
+
|
|
4245
|
+
### Step 1 — Get the full picture
|
|
4246
|
+
\`\`\`
|
|
4247
|
+
check_merge_conflicts(taskId="${taskId}")
|
|
4248
|
+
\`\`\`
|
|
4249
|
+
Read: \`conflictingFiles\`, \`conflictingTasks\`, \`behindBy\`.
|
|
4250
|
+
|
|
4251
|
+
### Step 2 — For each conflicting file, fetch both contexts
|
|
4252
|
+
\`\`\`
|
|
4253
|
+
resolve_conflict(taskId="${taskId}", filePath="<file>")
|
|
4254
|
+
\`\`\`
|
|
4255
|
+
This returns \`thisTask.goal\` and \`otherTask.goal\` — read both before touching the file.
|
|
4256
|
+
|
|
4257
|
+
### Step 3 — Decide: autonomous or ask?
|
|
4258
|
+
Apply the decision tree above. If autonomous, continue to Step 4.
|
|
4259
|
+
If ambiguous:
|
|
4260
|
+
\`\`\`
|
|
4261
|
+
request_human_input(
|
|
4262
|
+
taskId="${taskId}",
|
|
4263
|
+
question="I found two conflicting implementations of <X> in <file>. Your version does <A>. The other task (KEY) does <B>. Which approach should I keep, or should I combine them?",
|
|
4264
|
+
context="thisTask goal: <goal>. otherTask goal: <other goal>. Conflict block: <paste the <<<<<<< block>",
|
|
4265
|
+
type="ambiguity"
|
|
4266
|
+
)
|
|
4267
|
+
\`\`\`
|
|
4268
|
+
The question appears in Cursor chat. STOP and wait for the developer to reply here. Use their reply as the answer and continue from Step 4.
|
|
4269
|
+
|
|
4270
|
+
### Step 4 — Start the rebase
|
|
4271
|
+
\`\`\`bash
|
|
4272
|
+
git fetch origin
|
|
4273
|
+
git rebase origin/${base || 'main'}
|
|
4274
|
+
\`\`\`
|
|
4275
|
+
Git will stop at each conflict. Do NOT abort.
|
|
4276
|
+
|
|
4277
|
+
### Step 5 — Resolve each conflict block
|
|
4278
|
+
Open the file with the Read tool. Find \`<<<<<<< HEAD\` blocks.
|
|
4279
|
+
|
|
4280
|
+
Rules:
|
|
4281
|
+
- \`<<<<<<< HEAD\` = YOUR code (this task)
|
|
4282
|
+
- \`>>>>>>> ...\` = THEIR code (merged into ${base || 'main'})
|
|
4283
|
+
- NEVER blindly pick one side — read both task goals first
|
|
4284
|
+
- If both changes are needed, combine them
|
|
4285
|
+
- If they solve the same problem differently, keep the better implementation
|
|
4286
|
+
|
|
4287
|
+
Use the Edit tool to resolve. The final file must have ZERO conflict markers.
|
|
4288
|
+
|
|
4289
|
+
### Step 6 — Request approval before force-push (if shared branch)
|
|
4290
|
+
If \`conflictingTasks\` is non-empty (another developer's branch overlaps):
|
|
4291
|
+
\`\`\`
|
|
4292
|
+
request_human_input(
|
|
4293
|
+
taskId="${taskId}",
|
|
4294
|
+
question="I've resolved all conflict markers. Ready to run: git push --force-with-lease. This will overwrite the remote branch. Approve?",
|
|
4295
|
+
context="Conflicts were with: <other task keys>. Files changed: <list>",
|
|
4296
|
+
type="approval"
|
|
4297
|
+
)
|
|
4298
|
+
\`\`\`
|
|
4299
|
+
The question appears in Cursor chat. STOP and wait for the developer to reply "yes"/"approved" before pushing.
|
|
4300
|
+
If no overlapping active tasks, push directly (low risk — only your branch).
|
|
4301
|
+
|
|
4302
|
+
### Step 7 — Push and verify
|
|
4303
|
+
\`\`\`bash
|
|
4304
|
+
git add <resolved-files>
|
|
4305
|
+
git rebase --continue
|
|
4306
|
+
# Repeat Step 5 for each conflict
|
|
4307
|
+
git push --force-with-lease
|
|
4308
|
+
\`\`\`
|
|
4309
|
+
\`\`\`
|
|
4310
|
+
check_merge_conflicts(taskId="${taskId}") → must return status: 'clean'
|
|
4311
|
+
run-tests skill → must pass
|
|
4312
|
+
log_session_event(type="info", name="conflict-resolved", summary="<what you merged and why>")
|
|
4313
|
+
\`\`\`
|
|
4314
|
+
`
|
|
4315
|
+
writeFileSync(join(skillsDir, 'resolve-conflict.md'), resolveConflictSkill, 'utf8')
|
|
4316
|
+
written.push(join(skillsDir, 'resolve-conflict.md'))
|
|
4317
|
+
|
|
3325
4318
|
// ── 6. Command — start-task pre-filled with this task ─────────────────────
|
|
3326
4319
|
const startCmd = `---\nname: start-task\ndescription: Start a task from InternalTool — brief, role rules, moves to In Progress.\n---\n\nDefault task: \`${taskId}\` (${taskKey} — ${taskTitle})\n\n1. Ask the user to confirm the task ID or use the default above\n2. Call \`kickoff_task\` with confirmed=false to show the brief\n3. Present: role, description, claimedFiles, cursorRules\n4. Ask "Ready to confirm and move to In Progress?"\n5. If yes, call \`kickoff_task\` again with confirmed=true\n`
|
|
3327
4320
|
writeFileSync(join(commandsDir, 'start-task.md'), startCmd, 'utf8')
|
|
@@ -3382,6 +4375,40 @@ function trackTaskActivity(taskId, toolName, opts = {}) {
|
|
|
3382
4375
|
}).catch(() => {})
|
|
3383
4376
|
}
|
|
3384
4377
|
|
|
4378
|
+
/**
|
|
4379
|
+
* Role-based tool access guard.
|
|
4380
|
+
* Checks the task's active agentRole against the project's agentConfig.roles[].allowedTools.
|
|
4381
|
+
* Returns an errorText response if the tool is blocked, null if it is allowed.
|
|
4382
|
+
* - No agentRole set on the task → allow (unrestricted session)
|
|
4383
|
+
* - Role found but allowedTools is empty → allow (empty = no restrictions)
|
|
4384
|
+
* - Tool name not in allowedTools → block with clear message
|
|
4385
|
+
*/
|
|
4386
|
+
async function guardRoleTool(taskId, toolName) {
|
|
4387
|
+
if (!taskId) return null
|
|
4388
|
+
try {
|
|
4389
|
+
const taskRes = await api.get(`/api/tasks/${taskId}`)
|
|
4390
|
+
if (!taskRes?.success) return null
|
|
4391
|
+
const task = taskRes.data.task
|
|
4392
|
+
if (!task.agentRole) return null
|
|
4393
|
+
|
|
4394
|
+
const projRes = await api.get(`/api/projects/${task.project}`)
|
|
4395
|
+
if (!projRes?.success) return null
|
|
4396
|
+
const roleConfig = (projRes.data.project?.agentConfig?.roles || [])
|
|
4397
|
+
.find(r => r.name === task.agentRole)
|
|
4398
|
+
if (!roleConfig || !roleConfig.allowedTools?.length) return null
|
|
4399
|
+
|
|
4400
|
+
if (!roleConfig.allowedTools.includes(toolName)) {
|
|
4401
|
+
return errorText(
|
|
4402
|
+
`Permission denied — role '${task.agentRole}' is not allowed to call '${toolName}'.\n` +
|
|
4403
|
+
`Allowed tools for this role: ${roleConfig.allowedTools.join(', ')}`
|
|
4404
|
+
)
|
|
4405
|
+
}
|
|
4406
|
+
} catch {
|
|
4407
|
+
// guard failure is non-blocking — never crash the tool
|
|
4408
|
+
}
|
|
4409
|
+
return null
|
|
4410
|
+
}
|
|
4411
|
+
|
|
3385
4412
|
/** Delete the task-specific cursor rules file when work is complete. */
|
|
3386
4413
|
function deleteCursorRulesFile(taskKey, startPath) {
|
|
3387
4414
|
try {
|
|
@@ -4979,6 +6006,2134 @@ Use this when the developer says "clean up branches", "delete merged branches",
|
|
|
4979
6006
|
)
|
|
4980
6007
|
}
|
|
4981
6008
|
|
|
6009
|
+
// ── Merge Pipeline AI Tools ───────────────────────────────────────────────────
|
|
6010
|
+
// Tools that give AI agents a full understanding of the git merge pipeline
|
|
6011
|
+
// and guide developers + reviewers through shipping safely.
|
|
6012
|
+
function registerMergePipelineTools(server, { scopedProjectId } = {}) {
|
|
6013
|
+
|
|
6014
|
+
// ── get_branch_pipeline_status ────────────────────────────────────────────
|
|
6015
|
+
server.tool(
|
|
6016
|
+
'get_branch_pipeline_status',
|
|
6017
|
+
`Get the full merge pipeline status for a project so an AI agent can understand every developer's branch state.
|
|
6018
|
+
|
|
6019
|
+
Returns the suggested merge order, each branch's behind/ahead commit counts, conflict pairs (tasks touching the same files),
|
|
6020
|
+
and a plain-English summary of what each developer needs to do before they can merge.
|
|
6021
|
+
|
|
6022
|
+
Use this tool FIRST when a developer asks "what do I need to do to merge?" or when a reviewer asks "which PRs are safe to merge?"
|
|
6023
|
+
Always call this before guide_my_merge or assess_review_readiness to orient yourself.`,
|
|
6024
|
+
{
|
|
6025
|
+
projectId: z.string().describe("Project's MongoDB ObjectId"),
|
|
6026
|
+
},
|
|
6027
|
+
async ({ projectId }) => {
|
|
6028
|
+
if (scopedProjectId && projectId !== scopedProjectId) {
|
|
6029
|
+
return errorText(`Access denied: session is scoped to project ${scopedProjectId}`)
|
|
6030
|
+
}
|
|
6031
|
+
try {
|
|
6032
|
+
const res = await api.get(`/api/projects/${projectId}/github/git-tree`)
|
|
6033
|
+
if (!res?.success) return errorText('Could not fetch git tree data')
|
|
6034
|
+
|
|
6035
|
+
const { branches = [], defaultBranch = 'main', baseBranchCommits = [], fetchedAt } = res.data
|
|
6036
|
+
|
|
6037
|
+
// Compute conflict pairs
|
|
6038
|
+
const conflictPairs = []
|
|
6039
|
+
for (let i = 0; i < branches.length; i++) {
|
|
6040
|
+
for (let j = i + 1; j < branches.length; j++) {
|
|
6041
|
+
const shared = (branches[i].claimedFiles || []).filter(f => (branches[j].claimedFiles || []).includes(f))
|
|
6042
|
+
if (shared.length) conflictPairs.push({ a: branches[i], b: branches[j], sharedFiles: shared })
|
|
6043
|
+
}
|
|
6044
|
+
}
|
|
6045
|
+
|
|
6046
|
+
// Compute conflict counts
|
|
6047
|
+
const conflictCount = {}
|
|
6048
|
+
branches.forEach(b => { conflictCount[b.taskId] = 0 })
|
|
6049
|
+
conflictPairs.forEach(({ a, b }) => { conflictCount[a.taskId]++; conflictCount[b.taskId]++ })
|
|
6050
|
+
|
|
6051
|
+
// Merge order: fewest conflicts → least behind → most ahead
|
|
6052
|
+
const ordered = [...branches]
|
|
6053
|
+
.filter(b => !b.branchError)
|
|
6054
|
+
.sort((a, b) => {
|
|
6055
|
+
const c = (conflictCount[a.taskId] || 0) - (conflictCount[b.taskId] || 0)
|
|
6056
|
+
if (c) return c
|
|
6057
|
+
const bd = (a.compare?.behindBy ?? 99) - (b.compare?.behindBy ?? 99)
|
|
6058
|
+
if (bd) return bd
|
|
6059
|
+
return (b.compare?.aheadBy ?? 0) - (a.compare?.aheadBy ?? 0)
|
|
6060
|
+
})
|
|
6061
|
+
|
|
6062
|
+
// Build per-branch guidance summary
|
|
6063
|
+
const pipeline = ordered.map((b, idx) => {
|
|
6064
|
+
const rank = idx + 1
|
|
6065
|
+
const behind = b.compare?.behindBy || 0
|
|
6066
|
+
const ahead = b.compare?.aheadBy || 0
|
|
6067
|
+
const myConflicts = conflictPairs
|
|
6068
|
+
.filter(p => p.a.taskId === b.taskId || p.b.taskId === b.taskId)
|
|
6069
|
+
.map(p => ({
|
|
6070
|
+
partnerKey: p.a.taskId === b.taskId ? p.b.taskKey : p.a.taskKey,
|
|
6071
|
+
sharedFiles: p.sharedFiles,
|
|
6072
|
+
}))
|
|
6073
|
+
const blockedBy = myConflicts
|
|
6074
|
+
.map(c => c.partnerKey)
|
|
6075
|
+
.filter(key => ordered.slice(0, idx).some(ob => ob.taskKey === key))
|
|
6076
|
+
|
|
6077
|
+
let situation = ''
|
|
6078
|
+
let nextAction = ''
|
|
6079
|
+
|
|
6080
|
+
if (blockedBy.length > 0) {
|
|
6081
|
+
situation = `BLOCKED — conflicts with ${blockedBy.join(', ')} which rank higher in merge order.`
|
|
6082
|
+
nextAction = `Wait for ${blockedBy.join(' and ')} to merge first. Coordinate via ping to agree on who refactors the shared files.`
|
|
6083
|
+
} else if (behind > 0 && myConflicts.length > 0) {
|
|
6084
|
+
situation = `NEEDS REBASE + has ${myConflicts.length} file conflict(s).`
|
|
6085
|
+
nextAction = `Rebase onto ${defaultBranch} first (git fetch origin && git rebase origin/${defaultBranch}), resolve conflicts in shared files, then push.`
|
|
6086
|
+
} else if (behind > 0) {
|
|
6087
|
+
situation = `BEHIND by ${behind} commit${behind !== 1 ? 's' : ''} — ${defaultBranch} has moved since this branch was created.`
|
|
6088
|
+
nextAction = `Run: git fetch origin && git rebase origin/${defaultBranch}. Fix any conflicts, then git push --force-with-lease.`
|
|
6089
|
+
} else if (!b.prNumber) {
|
|
6090
|
+
situation = `UP TO DATE — ${ahead} commit${ahead !== 1 ? 's' : ''} ready to ship. No PR open yet.`
|
|
6091
|
+
nextAction = `Open a pull request targeting ${defaultBranch} to start the review process.`
|
|
6092
|
+
} else if (rank === 1) {
|
|
6093
|
+
situation = `READY TO MERGE — ranked #1, no blockers, PR #${b.prNumber} open.`
|
|
6094
|
+
nextAction = `Get PR approved and merge when reviews are complete.`
|
|
6095
|
+
} else {
|
|
6096
|
+
situation = `WAITING — rank #${rank}. PR open but waiting for higher-ranked tasks to merge first.`
|
|
6097
|
+
nextAction = `Monitor the pipeline. After tasks ranked #1–${rank - 1} merge, rebase if needed and get review.`
|
|
6098
|
+
}
|
|
6099
|
+
|
|
6100
|
+
return {
|
|
6101
|
+
rank,
|
|
6102
|
+
taskKey: b.taskKey,
|
|
6103
|
+
taskTitle: b.taskTitle,
|
|
6104
|
+
taskId: b.taskId,
|
|
6105
|
+
branch: b.headBranch,
|
|
6106
|
+
assignees: b.assignees.map(a => a.name).join(', ') || 'Unassigned',
|
|
6107
|
+
status: b.statusColor,
|
|
6108
|
+
behind,
|
|
6109
|
+
ahead,
|
|
6110
|
+
prNumber: b.prNumber || null,
|
|
6111
|
+
prUrl: b.prUrl || null,
|
|
6112
|
+
conflicts: myConflicts,
|
|
6113
|
+
blockedBy,
|
|
6114
|
+
situation,
|
|
6115
|
+
nextAction,
|
|
6116
|
+
}
|
|
6117
|
+
})
|
|
6118
|
+
|
|
6119
|
+
const readySummary = pipeline.filter(b => b.blockedBy.length === 0 && b.behind === 0 && b.prNumber)
|
|
6120
|
+
const behindSummary = pipeline.filter(b => b.behind > 0)
|
|
6121
|
+
const blockedSummary = pipeline.filter(b => b.blockedBy.length > 0)
|
|
6122
|
+
|
|
6123
|
+
return text({
|
|
6124
|
+
defaultBranch,
|
|
6125
|
+
fetchedAt,
|
|
6126
|
+
totalBranches: branches.length,
|
|
6127
|
+
mergeOrder: pipeline,
|
|
6128
|
+
summary: {
|
|
6129
|
+
readyToMerge: readySummary.map(b => `${b.taskKey} (#${b.rank})`).join(', ') || 'None',
|
|
6130
|
+
needsRebase: behindSummary.map(b => `${b.taskKey} (${b.behind} behind)`).join(', ') || 'None',
|
|
6131
|
+
blocked: blockedSummary.map(b => `${b.taskKey} blocked by ${b.blockedBy.join(', ')}`).join('; ') || 'None',
|
|
6132
|
+
conflictPairs: conflictPairs.map(p => `${p.a.taskKey} ↔ ${p.b.taskKey}: ${p.sharedFiles.join(', ')}`),
|
|
6133
|
+
},
|
|
6134
|
+
agentInstruction: 'To guide a specific developer, call guide_my_merge with their taskId. To review a PR, call assess_review_readiness with the taskId.',
|
|
6135
|
+
})
|
|
6136
|
+
} catch (e) {
|
|
6137
|
+
return errorText(e.message)
|
|
6138
|
+
}
|
|
6139
|
+
}
|
|
6140
|
+
)
|
|
6141
|
+
|
|
6142
|
+
// ── guide_my_merge ────────────────────────────────────────────────────────
|
|
6143
|
+
server.tool(
|
|
6144
|
+
'guide_my_merge',
|
|
6145
|
+
`Generate a complete, personalized step-by-step merge guide for a specific developer's branch.
|
|
6146
|
+
|
|
6147
|
+
Call get_branch_pipeline_status first to find the taskId.
|
|
6148
|
+
Then call this tool to get the exact git commands the developer needs to run — in order — to rebase, resolve conflicts, and ship their work without losing any changes.
|
|
6149
|
+
|
|
6150
|
+
The guide is safe to follow verbatim:
|
|
6151
|
+
- It checks local state before suggesting stash/commit
|
|
6152
|
+
- It never suggests force-push without --force-with-lease
|
|
6153
|
+
- It accounts for conflict files shared with other tasks
|
|
6154
|
+
- It ends with the exact next human action (open PR / request review / wait)
|
|
6155
|
+
|
|
6156
|
+
Use this when a developer asks: "what do I do?", "how do I rebase?", "I have conflicts", "how do I ship this?", "I'm stuck"`,
|
|
6157
|
+
{
|
|
6158
|
+
projectId: z.string().describe("Project's MongoDB ObjectId"),
|
|
6159
|
+
taskId: z.string().describe("Task's MongoDB ObjectId (get from get_branch_pipeline_status)"),
|
|
6160
|
+
repoPath: z.string().optional().describe('Absolute path to the developer\'s local git repo'),
|
|
6161
|
+
},
|
|
6162
|
+
async ({ projectId, taskId, repoPath }) => {
|
|
6163
|
+
if (scopedProjectId && projectId !== scopedProjectId) {
|
|
6164
|
+
return errorText(`Access denied: session is scoped to project ${scopedProjectId}`)
|
|
6165
|
+
}
|
|
6166
|
+
try {
|
|
6167
|
+
// Fetch pipeline data + task detail in parallel
|
|
6168
|
+
const [treeRes, taskRes] = await Promise.all([
|
|
6169
|
+
api.get(`/api/projects/${projectId}/github/git-tree`),
|
|
6170
|
+
api.get(`/api/tasks/${taskId}`),
|
|
6171
|
+
])
|
|
6172
|
+
if (!treeRes?.success) return errorText('Could not fetch git tree')
|
|
6173
|
+
|
|
6174
|
+
const { branches = [], defaultBranch = 'main' } = treeRes.data
|
|
6175
|
+
const branch = branches.find(b => b.taskId === taskId)
|
|
6176
|
+
if (!branch) return errorText(`Task ${taskId} not found in active branches. Make sure it is In Progress or In Review with a branch set.`)
|
|
6177
|
+
|
|
6178
|
+
const task = taskRes?.data?.task || {}
|
|
6179
|
+
const behind = branch.compare?.behindBy || 0
|
|
6180
|
+
const ahead = branch.compare?.aheadBy || 0
|
|
6181
|
+
|
|
6182
|
+
// Conflict pairs involving this branch
|
|
6183
|
+
const conflictPairs = []
|
|
6184
|
+
for (const ob of branches) {
|
|
6185
|
+
if (ob.taskId === taskId) continue
|
|
6186
|
+
const shared = (branch.claimedFiles || []).filter(f => (ob.claimedFiles || []).includes(f))
|
|
6187
|
+
if (shared.length) conflictPairs.push({ partnerKey: ob.taskKey, partnerBranch: ob.headBranch, sharedFiles: shared })
|
|
6188
|
+
}
|
|
6189
|
+
|
|
6190
|
+
// Check local git state if repoPath provided
|
|
6191
|
+
let localState = null
|
|
6192
|
+
const cwd = repoPath || process.cwd()
|
|
6193
|
+
const repoRoot = findRepoRoot(cwd)
|
|
6194
|
+
if (repoRoot) {
|
|
6195
|
+
try {
|
|
6196
|
+
const porcelain = runGit('status --porcelain=v1', repoRoot)
|
|
6197
|
+
const currentBranch = runGit('branch --show-current', repoRoot)
|
|
6198
|
+
const { localState: ls, modified, staged } = parseGitStatus(porcelain)
|
|
6199
|
+
localState = { state: ls, currentBranch, modifiedCount: modified.length, stagedCount: staged.length }
|
|
6200
|
+
} catch { /* git unavailable */ }
|
|
6201
|
+
}
|
|
6202
|
+
|
|
6203
|
+
// Build the step-by-step guide
|
|
6204
|
+
const steps = []
|
|
6205
|
+
let stepNum = 1
|
|
6206
|
+
|
|
6207
|
+
// Step 0: Save work if needed
|
|
6208
|
+
if (localState && localState.state !== 'clean') {
|
|
6209
|
+
if (localState.state === 'modified' || localState.state === 'staged') {
|
|
6210
|
+
steps.push({
|
|
6211
|
+
step: stepNum++,
|
|
6212
|
+
title: 'Save your current work',
|
|
6213
|
+
why: `You have ${localState.modifiedCount + localState.stagedCount} unsaved change(s). You must stash or commit before rebasing, or git will refuse (or worse, silently mix changes).`,
|
|
6214
|
+
commands: [
|
|
6215
|
+
{ label: 'Option A — stash (temporary, reversible)', cmd: `git stash push -m "wip: ${branch.taskKey} before rebase"` },
|
|
6216
|
+
{ label: 'Option B — commit (permanent, survives crashes)', cmd: `git add -A && git commit -m "chore: wip checkpoint before rebase [${branch.taskKey}]"` },
|
|
6217
|
+
],
|
|
6218
|
+
note: 'If you stash, you will pop it back in step 4. Commit is safer if your machine might restart.',
|
|
6219
|
+
})
|
|
6220
|
+
}
|
|
6221
|
+
}
|
|
6222
|
+
|
|
6223
|
+
// Step 1: Switch to task branch (if not already on it)
|
|
6224
|
+
const targetBranch = branch.headBranch
|
|
6225
|
+
steps.push({
|
|
6226
|
+
step: stepNum++,
|
|
6227
|
+
title: `Switch to your task branch`,
|
|
6228
|
+
why: `All changes must be on branch "${targetBranch}" — not on ${defaultBranch} or another branch.`,
|
|
6229
|
+
commands: [
|
|
6230
|
+
{ label: 'Check out your branch', cmd: `git checkout ${targetBranch}` },
|
|
6231
|
+
],
|
|
6232
|
+
})
|
|
6233
|
+
|
|
6234
|
+
// Step 2: Fetch latest
|
|
6235
|
+
steps.push({
|
|
6236
|
+
step: stepNum++,
|
|
6237
|
+
title: `Fetch the latest from GitHub`,
|
|
6238
|
+
why: `Your local copy of origin/${defaultBranch} might be stale. This downloads new commits without changing your files yet.`,
|
|
6239
|
+
commands: [
|
|
6240
|
+
{ label: 'Fetch all remotes', cmd: `git fetch origin` },
|
|
6241
|
+
],
|
|
6242
|
+
})
|
|
6243
|
+
|
|
6244
|
+
if (behind > 0) {
|
|
6245
|
+
// Step 3: Rebase
|
|
6246
|
+
steps.push({
|
|
6247
|
+
step: stepNum++,
|
|
6248
|
+
title: `Rebase onto ${defaultBranch} (${behind} new commit${behind !== 1 ? 's' : ''} to include)`,
|
|
6249
|
+
why: `${defaultBranch} has ${behind} commit${behind !== 1 ? 's' : ''} your branch does not have. Rebase replays your commits on top of the latest ${defaultBranch}, keeping history clean. This is safer than a merge for feature branches.`,
|
|
6250
|
+
commands: [
|
|
6251
|
+
{ label: 'Rebase onto latest main', cmd: `git rebase origin/${defaultBranch}` },
|
|
6252
|
+
],
|
|
6253
|
+
note: conflictPairs.length > 0
|
|
6254
|
+
? `⚠️ You share files with ${conflictPairs.map(c => c.partnerKey).join(', ')}. If rebase stops with conflicts, follow the next step.`
|
|
6255
|
+
: 'If rebase completes with no conflicts, skip straight to step ' + (stepNum + 1) + '.',
|
|
6256
|
+
})
|
|
6257
|
+
|
|
6258
|
+
// Step 4: Conflict resolution (if any)
|
|
6259
|
+
if (conflictPairs.length > 0) {
|
|
6260
|
+
const allConflictFiles = [...new Set(conflictPairs.flatMap(c => c.sharedFiles))]
|
|
6261
|
+
steps.push({
|
|
6262
|
+
step: stepNum++,
|
|
6263
|
+
title: 'Resolve conflicts (if rebase stops)',
|
|
6264
|
+
why: `Your branch touches the same files as ${conflictPairs.map(c => c.partnerKey).join(' and ')}. If those files were changed in ${defaultBranch}, git will pause and ask you to decide which changes to keep.`,
|
|
6265
|
+
commands: [
|
|
6266
|
+
{ label: 'Open conflict file(s) and fix the markers', cmd: `# Edit these files and remove <<<<<<, =======, >>>>>>> markers:\n${allConflictFiles.map(f => `# ${f}`).join('\n')}` },
|
|
6267
|
+
{ label: 'Stage each resolved file', cmd: `git add ${allConflictFiles.join(' ')}` },
|
|
6268
|
+
{ label: 'Continue the rebase', cmd: `git rebase --continue` },
|
|
6269
|
+
{ label: 'If things go wrong, abort and start over', cmd: `git rebase --abort` },
|
|
6270
|
+
],
|
|
6271
|
+
note: 'Keep YOUR changes for files you own. Merge carefully for shared files — check what your partner changed before discarding.',
|
|
6272
|
+
})
|
|
6273
|
+
}
|
|
6274
|
+
}
|
|
6275
|
+
|
|
6276
|
+
// Step: Pop stash if stashed
|
|
6277
|
+
if (localState && localState.state !== 'clean') {
|
|
6278
|
+
steps.push({
|
|
6279
|
+
step: stepNum++,
|
|
6280
|
+
title: 'Restore your work in progress (if you stashed)',
|
|
6281
|
+
why: 'Pop your stash to get your unsaved changes back on top of the rebased branch.',
|
|
6282
|
+
commands: [
|
|
6283
|
+
{ label: 'Pop the stash', cmd: `git stash pop` },
|
|
6284
|
+
],
|
|
6285
|
+
note: 'If stash pop causes conflicts (your wip vs the rebased code), resolve them the same way as above.',
|
|
6286
|
+
})
|
|
6287
|
+
}
|
|
6288
|
+
|
|
6289
|
+
// Step: Push
|
|
6290
|
+
steps.push({
|
|
6291
|
+
step: stepNum++,
|
|
6292
|
+
title: 'Push to GitHub',
|
|
6293
|
+
why: 'After a rebase, your local commit history has been rewritten. You must force-push — but use --force-with-lease which refuses to overwrite if someone else pushed in the meantime (safety net).',
|
|
6294
|
+
commands: [
|
|
6295
|
+
{ label: 'Safe force push', cmd: `git push origin ${targetBranch} --force-with-lease` },
|
|
6296
|
+
],
|
|
6297
|
+
note: 'NEVER use --force without --with-lease. --force-with-lease is safe because it aborts if another push happened.',
|
|
6298
|
+
})
|
|
6299
|
+
|
|
6300
|
+
// Step: PR
|
|
6301
|
+
if (!branch.prNumber) {
|
|
6302
|
+
steps.push({
|
|
6303
|
+
step: stepNum++,
|
|
6304
|
+
title: `Open a pull request`,
|
|
6305
|
+
why: `You have ${ahead} commit${ahead !== 1 ? 's' : ''} ready to ship but no PR open. A PR is how your code gets reviewed and merged into ${defaultBranch}.`,
|
|
6306
|
+
commands: [
|
|
6307
|
+
{ label: 'Open a PR via GitHub CLI', cmd: `gh pr create --base ${defaultBranch} --head ${targetBranch} --title "${task.title || branch.taskKey}" --body "Closes #${branch.taskKey}"` },
|
|
6308
|
+
],
|
|
6309
|
+
note: 'Or go to GitHub and click "Compare & pull request" — GitHub shows this banner automatically after a push.',
|
|
6310
|
+
})
|
|
6311
|
+
} else {
|
|
6312
|
+
steps.push({
|
|
6313
|
+
step: stepNum++,
|
|
6314
|
+
title: `Request review on PR #${branch.prNumber}`,
|
|
6315
|
+
why: `Your PR is already open. After pushing the rebase, request reviewers so they can approve and merge.`,
|
|
6316
|
+
commands: [
|
|
6317
|
+
{ label: 'Request review via GitHub CLI', cmd: `gh pr review ${branch.prNumber} --request-changes` },
|
|
6318
|
+
],
|
|
6319
|
+
note: `PR URL: ${branch.prUrl || 'see GitHub'}`,
|
|
6320
|
+
})
|
|
6321
|
+
}
|
|
6322
|
+
|
|
6323
|
+
// Compile quick-copy block
|
|
6324
|
+
const quickCommands = steps
|
|
6325
|
+
.flatMap(s => s.commands.map(c => c.cmd))
|
|
6326
|
+
.filter(cmd => !cmd.startsWith('#'))
|
|
6327
|
+
|
|
6328
|
+
return text({
|
|
6329
|
+
taskKey: branch.taskKey,
|
|
6330
|
+
taskTitle: branch.taskTitle,
|
|
6331
|
+
branch: targetBranch,
|
|
6332
|
+
assignees: branch.assignees.map(a => a.name).join(', '),
|
|
6333
|
+
defaultBranch,
|
|
6334
|
+
situation: {
|
|
6335
|
+
behind,
|
|
6336
|
+
ahead,
|
|
6337
|
+
hasConflicts: conflictPairs.length > 0,
|
|
6338
|
+
conflictsWith: conflictPairs.map(c => c.partnerKey),
|
|
6339
|
+
prNumber: branch.prNumber || null,
|
|
6340
|
+
localState: localState?.state || 'unknown',
|
|
6341
|
+
},
|
|
6342
|
+
guide: steps,
|
|
6343
|
+
quickCopy: {
|
|
6344
|
+
description: 'Run these commands in order (skip lines starting with #)',
|
|
6345
|
+
commands: quickCommands,
|
|
6346
|
+
asOneLiner: quickCommands.join(' && '),
|
|
6347
|
+
},
|
|
6348
|
+
agentInstruction: `Walk the developer through each step in order. Paste the command for each step, explain what it does, and wait for them to confirm before moving to the next step. If they hit an error, diagnose it before continuing.`,
|
|
6349
|
+
})
|
|
6350
|
+
} catch (e) {
|
|
6351
|
+
return errorText(e.message)
|
|
6352
|
+
}
|
|
6353
|
+
}
|
|
6354
|
+
)
|
|
6355
|
+
|
|
6356
|
+
// ── assess_review_readiness ───────────────────────────────────────────────
|
|
6357
|
+
server.tool(
|
|
6358
|
+
'assess_review_readiness',
|
|
6359
|
+
`Assess whether a PR is ready for code review and safe to merge — for AI code reviewers and human reviewers.
|
|
6360
|
+
|
|
6361
|
+
Returns a structured reviewer checklist covering:
|
|
6362
|
+
- Merge safety: is the branch up to date with the default branch? Will it cause conflicts?
|
|
6363
|
+
- Pipeline position: is this the right branch to merge next?
|
|
6364
|
+
- Code change context: what files changed, what the task was trying to accomplish
|
|
6365
|
+
- Risk indicators: stale branch, shared files with other tasks, no rebase done
|
|
6366
|
+
|
|
6367
|
+
Call get_branch_pipeline_status first to find the taskId and check merge order.
|
|
6368
|
+
Then call get_review_bundle for the actual code diff.
|
|
6369
|
+
Use this tool between those two to understand the merge context.`,
|
|
6370
|
+
{
|
|
6371
|
+
projectId: z.string().describe("Project's MongoDB ObjectId"),
|
|
6372
|
+
taskId: z.string().describe("Task's MongoDB ObjectId"),
|
|
6373
|
+
},
|
|
6374
|
+
async ({ projectId, taskId }) => {
|
|
6375
|
+
if (scopedProjectId && projectId !== scopedProjectId) {
|
|
6376
|
+
return errorText(`Access denied: session is scoped to project ${scopedProjectId}`)
|
|
6377
|
+
}
|
|
6378
|
+
try {
|
|
6379
|
+
const [treeRes, taskRes] = await Promise.all([
|
|
6380
|
+
api.get(`/api/projects/${projectId}/github/git-tree`),
|
|
6381
|
+
api.get(`/api/tasks/${taskId}`),
|
|
6382
|
+
])
|
|
6383
|
+
if (!treeRes?.success) return errorText('Could not fetch git tree')
|
|
6384
|
+
|
|
6385
|
+
const { branches = [], defaultBranch = 'main' } = treeRes.data
|
|
6386
|
+
const branch = branches.find(b => b.taskId === taskId)
|
|
6387
|
+
if (!branch) return errorText(`Task ${taskId} not found in active branches`)
|
|
6388
|
+
|
|
6389
|
+
const task = taskRes?.data?.task || {}
|
|
6390
|
+
const behind = branch.compare?.behindBy || 0
|
|
6391
|
+
const ahead = branch.compare?.aheadBy || 0
|
|
6392
|
+
|
|
6393
|
+
// Find merge order rank
|
|
6394
|
+
const conflictCount = {}
|
|
6395
|
+
branches.forEach(b => { conflictCount[b.taskId] = 0 })
|
|
6396
|
+
const allConflictPairs = []
|
|
6397
|
+
for (let i = 0; i < branches.length; i++) {
|
|
6398
|
+
for (let j = i + 1; j < branches.length; j++) {
|
|
6399
|
+
const shared = (branches[i].claimedFiles || []).filter(f => (branches[j].claimedFiles || []).includes(f))
|
|
6400
|
+
if (shared.length) {
|
|
6401
|
+
allConflictPairs.push({ a: branches[i], b: branches[j], sharedFiles: shared })
|
|
6402
|
+
conflictCount[branches[i].taskId]++
|
|
6403
|
+
conflictCount[branches[j].taskId]++
|
|
6404
|
+
}
|
|
6405
|
+
}
|
|
6406
|
+
}
|
|
6407
|
+
const ordered = [...branches]
|
|
6408
|
+
.filter(b => !b.branchError)
|
|
6409
|
+
.sort((a, b) => {
|
|
6410
|
+
const c = (conflictCount[a.taskId] || 0) - (conflictCount[b.taskId] || 0)
|
|
6411
|
+
if (c) return c
|
|
6412
|
+
return (a.compare?.behindBy ?? 99) - (b.compare?.behindBy ?? 99)
|
|
6413
|
+
})
|
|
6414
|
+
const rank = ordered.findIndex(b => b.taskId === taskId) + 1
|
|
6415
|
+
|
|
6416
|
+
const myConflicts = allConflictPairs
|
|
6417
|
+
.filter(p => p.a.taskId === taskId || p.b.taskId === taskId)
|
|
6418
|
+
.map(p => ({
|
|
6419
|
+
partnerKey: p.a.taskId === taskId ? p.b.taskKey : p.a.taskKey,
|
|
6420
|
+
sharedFiles: p.sharedFiles,
|
|
6421
|
+
}))
|
|
6422
|
+
|
|
6423
|
+
const blockedBy = myConflicts
|
|
6424
|
+
.map(c => c.partnerKey)
|
|
6425
|
+
.filter(key => ordered.slice(0, rank - 1).some(ob => ob.taskKey === key))
|
|
6426
|
+
|
|
6427
|
+
// Staleness check
|
|
6428
|
+
const lastCommitDate = branch.tipCommit?.date ? new Date(branch.tipCommit.date) : null
|
|
6429
|
+
const hoursSinceCommit = lastCommitDate
|
|
6430
|
+
? Math.round((Date.now() - lastCommitDate.getTime()) / 3600000)
|
|
6431
|
+
: null
|
|
6432
|
+
const isStale = hoursSinceCommit !== null && hoursSinceCommit >= 48
|
|
6433
|
+
|
|
6434
|
+
// Build checklist
|
|
6435
|
+
const checklist = [
|
|
6436
|
+
{
|
|
6437
|
+
item: 'Branch is up to date with ' + defaultBranch,
|
|
6438
|
+
passed: behind === 0,
|
|
6439
|
+
detail: behind === 0
|
|
6440
|
+
? 'Branch is current — no rebase needed before merge'
|
|
6441
|
+
: `Branch is ${behind} commit${behind !== 1 ? 's' : ''} behind ${defaultBranch}. Developer must rebase before this can be safely merged.`,
|
|
6442
|
+
blocking: behind > 0,
|
|
6443
|
+
},
|
|
6444
|
+
{
|
|
6445
|
+
item: 'No file conflicts with other active branches',
|
|
6446
|
+
passed: myConflicts.length === 0,
|
|
6447
|
+
detail: myConflicts.length === 0
|
|
6448
|
+
? 'No other branches touch the same files'
|
|
6449
|
+
: `Shares files with: ${myConflicts.map(c => `${c.partnerKey} (${c.sharedFiles.join(', ')})`).join('; ')}`,
|
|
6450
|
+
blocking: blockedBy.length > 0,
|
|
6451
|
+
},
|
|
6452
|
+
{
|
|
6453
|
+
item: 'Correct merge order position',
|
|
6454
|
+
passed: rank === 1 || blockedBy.length === 0,
|
|
6455
|
+
detail: rank === 1
|
|
6456
|
+
? 'This is the #1 ranked branch — first in line to merge'
|
|
6457
|
+
: blockedBy.length > 0
|
|
6458
|
+
? `This branch is BLOCKED — must wait for ${blockedBy.join(', ')} to merge first (rank #${rank})`
|
|
6459
|
+
: `Ranked #${rank} — no blockers currently, but ${rank - 1} higher-ranked task${rank > 2 ? 's' : ''} should merge first`,
|
|
6460
|
+
blocking: blockedBy.length > 0,
|
|
6461
|
+
},
|
|
6462
|
+
{
|
|
6463
|
+
item: 'Branch has commits to merge',
|
|
6464
|
+
passed: ahead > 0,
|
|
6465
|
+
detail: ahead > 0
|
|
6466
|
+
? `${ahead} commit${ahead !== 1 ? 's' : ''} ready to merge into ${defaultBranch}`
|
|
6467
|
+
: 'No commits ahead of main — nothing to merge',
|
|
6468
|
+
blocking: ahead === 0,
|
|
6469
|
+
},
|
|
6470
|
+
{
|
|
6471
|
+
item: 'Pull request is open',
|
|
6472
|
+
passed: !!branch.prNumber,
|
|
6473
|
+
detail: branch.prNumber
|
|
6474
|
+
? `PR #${branch.prNumber} is open: ${branch.prUrl}`
|
|
6475
|
+
: 'No PR open — developer needs to create one before review can proceed',
|
|
6476
|
+
blocking: !branch.prNumber,
|
|
6477
|
+
},
|
|
6478
|
+
{
|
|
6479
|
+
item: 'Branch not stale',
|
|
6480
|
+
passed: !isStale,
|
|
6481
|
+
detail: isStale
|
|
6482
|
+
? `Last commit was ${hoursSinceCommit} hours ago — branch may be abandoned or developer is stuck. Check in with them.`
|
|
6483
|
+
: lastCommitDate
|
|
6484
|
+
? `Last commit ${hoursSinceCommit} hours ago — active`
|
|
6485
|
+
: 'No commit data available',
|
|
6486
|
+
blocking: false,
|
|
6487
|
+
},
|
|
6488
|
+
]
|
|
6489
|
+
|
|
6490
|
+
const blockingIssues = checklist.filter(c => !c.passed && c.blocking)
|
|
6491
|
+
const warnings = checklist.filter(c => !c.passed && !c.blocking)
|
|
6492
|
+
const passed = checklist.filter(c => c.passed)
|
|
6493
|
+
|
|
6494
|
+
const overallReady = blockingIssues.length === 0
|
|
6495
|
+
|
|
6496
|
+
return text({
|
|
6497
|
+
taskKey: branch.taskKey,
|
|
6498
|
+
taskTitle: branch.taskTitle,
|
|
6499
|
+
branch: branch.headBranch,
|
|
6500
|
+
assignees: branch.assignees.map(a => a.name).join(', '),
|
|
6501
|
+
prNumber: branch.prNumber || null,
|
|
6502
|
+
prUrl: branch.prUrl || null,
|
|
6503
|
+
defaultBranch,
|
|
6504
|
+
mergeRank: rank,
|
|
6505
|
+
overallReady,
|
|
6506
|
+
verdict: overallReady
|
|
6507
|
+
? `✅ READY — ${branch.taskKey} can proceed to merge review. Check the code diff next with get_review_bundle.`
|
|
6508
|
+
: `❌ NOT READY — ${blockingIssues.length} blocking issue${blockingIssues.length !== 1 ? 's' : ''} must be resolved first.`,
|
|
6509
|
+
checklist: { blocking: blockingIssues, warnings, passed },
|
|
6510
|
+
stats: { behind, ahead, conflictCount: myConflicts.length, rank, isStale },
|
|
6511
|
+
agentInstruction: overallReady
|
|
6512
|
+
? 'Call get_review_bundle next to get the full code diff and plan for review. Then call submit_ai_review with your findings.'
|
|
6513
|
+
: `Tell the developer the blocking issues and call guide_my_merge to walk them through resolving them step by step.`,
|
|
6514
|
+
})
|
|
6515
|
+
} catch (e) {
|
|
6516
|
+
return errorText(e.message)
|
|
6517
|
+
}
|
|
6518
|
+
}
|
|
6519
|
+
)
|
|
6520
|
+
|
|
6521
|
+
// ── get_pipeline_report ───────────────────────────────────────────────────
|
|
6522
|
+
server.tool(
|
|
6523
|
+
'get_pipeline_report',
|
|
6524
|
+
`Generate a concise team pipeline status report for a standup, sprint review, or engineering lead briefing.
|
|
6525
|
+
|
|
6526
|
+
Returns a plain-English summary: how many branches are active, which are ready to merge, which need rebase, which are blocked, and a per-developer one-liner with their current status.
|
|
6527
|
+
|
|
6528
|
+
Use this when asked: "what is the current state of the pipeline?", "who is blocked?", "is anything ready to merge today?", "give me the standup report".
|
|
6529
|
+
|
|
6530
|
+
Call this instead of get_branch_pipeline_status when you need a narrative summary rather than raw structured data.`,
|
|
6531
|
+
{
|
|
6532
|
+
projectId: z.string().describe("Project's MongoDB ObjectId"),
|
|
6533
|
+
},
|
|
6534
|
+
async ({ projectId }) => {
|
|
6535
|
+
if (scopedProjectId && projectId !== scopedProjectId) {
|
|
6536
|
+
return errorText(`Access denied: session is scoped to project ${scopedProjectId}`)
|
|
6537
|
+
}
|
|
6538
|
+
try {
|
|
6539
|
+
const res = await api.get(`/api/projects/${projectId}/github/git-tree`)
|
|
6540
|
+
if (!res?.success) return errorText('Could not fetch git tree data')
|
|
6541
|
+
|
|
6542
|
+
const { branches = [], defaultBranch = 'main', fetchedAt } = res.data
|
|
6543
|
+
|
|
6544
|
+
const conflictPairs = []
|
|
6545
|
+
for (let i = 0; i < branches.length; i++) {
|
|
6546
|
+
for (let j = i + 1; j < branches.length; j++) {
|
|
6547
|
+
const shared = (branches[i].claimedFiles || []).filter(f => (branches[j].claimedFiles || []).includes(f))
|
|
6548
|
+
if (shared.length) conflictPairs.push({ a: branches[i], b: branches[j], sharedFiles: shared })
|
|
6549
|
+
}
|
|
6550
|
+
}
|
|
6551
|
+
const conflictCount = {}
|
|
6552
|
+
branches.forEach(b => { conflictCount[b.taskId] = 0 })
|
|
6553
|
+
conflictPairs.forEach(({ a, b }) => { conflictCount[a.taskId]++; conflictCount[b.taskId]++ })
|
|
6554
|
+
|
|
6555
|
+
const ordered = [...branches].filter(b => !b.branchError).sort((a, b) => {
|
|
6556
|
+
const c = (conflictCount[a.taskId] || 0) - (conflictCount[b.taskId] || 0)
|
|
6557
|
+
if (c) return c
|
|
6558
|
+
const bd = (a.compare?.behindBy ?? 99) - (b.compare?.behindBy ?? 99)
|
|
6559
|
+
if (bd) return bd
|
|
6560
|
+
return (b.compare?.aheadBy ?? 0) - (a.compare?.aheadBy ?? 0)
|
|
6561
|
+
})
|
|
6562
|
+
|
|
6563
|
+
const readyToMerge = ordered.filter(b => (b.compare?.behindBy || 0) === 0 && b.prNumber && conflictCount[b.taskId] === 0)
|
|
6564
|
+
const needsRebase = ordered.filter(b => (b.compare?.behindBy || 0) > 0)
|
|
6565
|
+
const hasConflicts = ordered.filter(b => conflictCount[b.taskId] > 0)
|
|
6566
|
+
const noPR = ordered.filter(b => !b.prNumber && (b.compare?.aheadBy || 0) > 0 && (b.compare?.behindBy || 0) === 0)
|
|
6567
|
+
const noData = branches.filter(b => b.branchError)
|
|
6568
|
+
|
|
6569
|
+
const devLines = ordered.map((b, idx) => {
|
|
6570
|
+
const behind = b.compare?.behindBy || 0
|
|
6571
|
+
const ahead = b.compare?.aheadBy || 0
|
|
6572
|
+
const dev = b.assignees.map(a => a.name).join('/') || 'Unassigned'
|
|
6573
|
+
const cc = conflictCount[b.taskId] || 0
|
|
6574
|
+
let s = ''
|
|
6575
|
+
if (behind > 0 && cc > 0) s = `⛔ Needs rebase (${behind} behind) + ${cc} conflict(s)`
|
|
6576
|
+
else if (behind > 0) s = `🟡 Needs rebase — ${behind} commit${behind !== 1 ? 's' : ''} behind ${defaultBranch}`
|
|
6577
|
+
else if (cc > 0) s = `🔴 File conflict with ${conflictPairs.filter(p => p.a.taskId === b.taskId || p.b.taskId === b.taskId).map(p => p.a.taskId === b.taskId ? p.b.taskKey : p.a.taskKey).join(', ')}`
|
|
6578
|
+
else if (!b.prNumber) s = `🔵 Up to date, ${ahead} commit${ahead !== 1 ? 's' : ''} — needs PR`
|
|
6579
|
+
else s = `✅ Ready — PR #${b.prNumber} (rank #${idx + 1})`
|
|
6580
|
+
return ` ${b.taskKey} [${dev}]: ${s}`
|
|
6581
|
+
})
|
|
6582
|
+
|
|
6583
|
+
const lines = [
|
|
6584
|
+
`# Pipeline Report — ${new Date(fetchedAt || Date.now()).toUTCString()}`,
|
|
6585
|
+
`Default branch: ${defaultBranch} | Active branches: ${ordered.length}${noData.length ? ` (${noData.length} unavailable)` : ''}`,
|
|
6586
|
+
'',
|
|
6587
|
+
`## Summary`,
|
|
6588
|
+
`- ✅ Ready to merge: ${readyToMerge.length} — ${readyToMerge.map(b => b.taskKey).join(', ') || 'none'}`,
|
|
6589
|
+
`- 🟡 Needs rebase: ${needsRebase.length} — ${needsRebase.map(b => `${b.taskKey}(↓${b.compare?.behindBy})`).join(', ') || 'none'}`,
|
|
6590
|
+
`- 🔴 Has conflicts: ${hasConflicts.length} — ${hasConflicts.map(b => b.taskKey).join(', ') || 'none'}`,
|
|
6591
|
+
`- 🔵 Needs PR: ${noPR.length} — ${noPR.map(b => b.taskKey).join(', ') || 'none'}`,
|
|
6592
|
+
'',
|
|
6593
|
+
`## Conflict pairs`,
|
|
6594
|
+
...(conflictPairs.length === 0
|
|
6595
|
+
? [' None']
|
|
6596
|
+
: conflictPairs.map(p => ` ${p.a.taskKey} ↔ ${p.b.taskKey}: ${p.sharedFiles.join(', ')}`)),
|
|
6597
|
+
'',
|
|
6598
|
+
`## Per-developer status (merge order)`,
|
|
6599
|
+
...devLines,
|
|
6600
|
+
'',
|
|
6601
|
+
`## Suggested next actions`,
|
|
6602
|
+
...[
|
|
6603
|
+
readyToMerge.length > 0 ? `- Merge ${readyToMerge[0].taskKey} first (PR #${readyToMerge[0].prNumber}) — call assess_review_readiness then get_review_bundle` : '',
|
|
6604
|
+
needsRebase.length > 0 ? `- Ask ${[...new Set(needsRebase.flatMap(b => b.assignees.map(a => a.name)))].filter(Boolean).join(', ')} to rebase — call guide_my_merge for each` : '',
|
|
6605
|
+
hasConflicts.length > 0 ? `- Coordinate conflicts: ${conflictPairs.map(p => `${p.a.taskKey} ↔ ${p.b.taskKey}`).join('; ')} — call coordinate_conflict` : '',
|
|
6606
|
+
].filter(Boolean),
|
|
6607
|
+
].join('\n')
|
|
6608
|
+
|
|
6609
|
+
return text({
|
|
6610
|
+
report: lines,
|
|
6611
|
+
stats: { total: ordered.length, readyToMerge: readyToMerge.length, needsRebase: needsRebase.length, hasConflicts: hasConflicts.length, needsPR: noPR.length, unavailable: noData.length },
|
|
6612
|
+
mergeOrder: ordered.map(b => b.taskKey),
|
|
6613
|
+
agentInstruction: readyToMerge.length > 0
|
|
6614
|
+
? `Start with ${readyToMerge[0].taskKey} — call assess_review_readiness then get_review_bundle.`
|
|
6615
|
+
: needsRebase.length > 0
|
|
6616
|
+
? `No branches ready yet. Call guide_my_merge for ${needsRebase[0].taskKey} to unblock first.`
|
|
6617
|
+
: 'Pipeline is clear — monitor for new pushes.',
|
|
6618
|
+
})
|
|
6619
|
+
} catch (e) {
|
|
6620
|
+
return errorText(e.message)
|
|
6621
|
+
}
|
|
6622
|
+
}
|
|
6623
|
+
)
|
|
6624
|
+
|
|
6625
|
+
// ── coordinate_conflict ───────────────────────────────────────────────────
|
|
6626
|
+
server.tool(
|
|
6627
|
+
'coordinate_conflict',
|
|
6628
|
+
`Send a structured conflict resolution coordination message between two developers whose branches share files.
|
|
6629
|
+
|
|
6630
|
+
The sender declares who will refactor the shared files — either "I will handle it" (resolver="me") or "Please handle it" (resolver="them").
|
|
6631
|
+
|
|
6632
|
+
This posts a comment on the target task and sends a push notification with a clear, actionable message:
|
|
6633
|
+
- resolver="me": posts on the partner's task: "Dev A will refactor the shared files — wait for their merge"
|
|
6634
|
+
- resolver="them": posts on the partner's task: "Please refactor the shared files before merging"
|
|
6635
|
+
|
|
6636
|
+
Call get_branch_pipeline_status first to identify the conflicting taskIds and sharedFiles.
|
|
6637
|
+
Use this when two branches have file conflicts and you need to establish who will resolve them before either can merge.`,
|
|
6638
|
+
{
|
|
6639
|
+
projectId: z.string().describe("Project's MongoDB ObjectId"),
|
|
6640
|
+
fromTaskId: z.string().describe('The task ID of the developer initiating the coordination'),
|
|
6641
|
+
toTaskId: z.string().describe('The task ID of the developer being coordinated with'),
|
|
6642
|
+
resolver: z.enum(['me', 'them']).describe('"me" = I will refactor the shared files; "them" = asking them to refactor'),
|
|
6643
|
+
sharedFiles: z.array(z.string()).optional().describe('List of files both branches touch'),
|
|
6644
|
+
note: z.string().optional().describe('Optional message to include in the coordination comment'),
|
|
6645
|
+
},
|
|
6646
|
+
async ({ projectId, fromTaskId, toTaskId, resolver, sharedFiles = [], note = '' }) => {
|
|
6647
|
+
if (scopedProjectId && projectId !== scopedProjectId) {
|
|
6648
|
+
return errorText(`Access denied: session is scoped to project ${scopedProjectId}`)
|
|
6649
|
+
}
|
|
6650
|
+
try {
|
|
6651
|
+
const res = await api.post(`/api/projects/${projectId}/git-tree/coordinate-conflict`, {
|
|
6652
|
+
fromTaskId, toTaskId, resolver, sharedFiles, note,
|
|
6653
|
+
})
|
|
6654
|
+
if (!res?.success) return errorText(res?.message || 'Coordination failed')
|
|
6655
|
+
const { fromTaskKey, toTaskKey } = res.data
|
|
6656
|
+
return text({
|
|
6657
|
+
coordinated: true,
|
|
6658
|
+
resolver,
|
|
6659
|
+
summary: resolver === 'me'
|
|
6660
|
+
? `${fromTaskKey} claimed responsibility — will refactor shared files. ${toTaskKey} should wait and rebase after ${fromTaskKey} merges.`
|
|
6661
|
+
: `${toTaskKey} has been asked to refactor the shared files. They need to confirm and handle it before either branch merges.`,
|
|
6662
|
+
nextStep: resolver === 'me'
|
|
6663
|
+
? `${fromTaskKey} should now refactor the shared files, push, and merge. ${toTaskKey} then rebases.`
|
|
6664
|
+
: `Wait for ${toTaskKey} to confirm they will handle it. Follow up if no response within a day.`,
|
|
6665
|
+
})
|
|
6666
|
+
} catch (e) {
|
|
6667
|
+
return errorText(e.message)
|
|
6668
|
+
}
|
|
6669
|
+
}
|
|
6670
|
+
)
|
|
6671
|
+
}
|
|
6672
|
+
|
|
6673
|
+
// ── Memory Tools ──────────────────────────────────────────────────────────────
|
|
6674
|
+
function registerMemoryTools(server, ctx) {
|
|
6675
|
+
const { scopedProjectId } = ctx
|
|
6676
|
+
|
|
6677
|
+
// ── remember ──────────────────────────────────────────────────────────────
|
|
6678
|
+
server.tool(
|
|
6679
|
+
'remember',
|
|
6680
|
+
`Store a persistent key-value memory entry on a task. This memory survives across Cursor sessions and is recalled automatically at the start of every get_workspace_briefing call.
|
|
6681
|
+
|
|
6682
|
+
Use this to remember decisions, constraints, context, and findings that should persist between working sessions:
|
|
6683
|
+
- Architecture decisions: remember(taskId, "arch_decision", "Using optimistic locking for inventory")
|
|
6684
|
+
- Constraints: remember(taskId, "constraint_auth", "Must not modify auth middleware — it's frozen")
|
|
6685
|
+
- Discoveries: remember(taskId, "discovered", "The cache invalidation happens in utils/cache.js:87")
|
|
6686
|
+
- Status: remember(taskId, "status", "Blocked on API design — waiting for review")
|
|
6687
|
+
|
|
6688
|
+
Call recall(taskId) to retrieve everything at any time.`,
|
|
6689
|
+
{
|
|
6690
|
+
taskId: z.string().describe("Task's MongoDB ObjectId"),
|
|
6691
|
+
key: z.string().describe('Memory key — short, descriptive, no spaces (e.g. "arch_decision", "constraint_auth")'),
|
|
6692
|
+
value: z.string().describe('Value to store — plain text, any length'),
|
|
6693
|
+
},
|
|
6694
|
+
async ({ taskId, key, value }) => {
|
|
6695
|
+
try {
|
|
6696
|
+
const res = await api.put(`/api/tasks/${taskId}/agent-memory`, { key, value })
|
|
6697
|
+
if (!res?.success) return errorText(res?.message || 'Failed to save memory')
|
|
6698
|
+
return text({ remembered: true, key, value, hint: 'Saved. Call recall(taskId) to verify.' })
|
|
6699
|
+
} catch (e) {
|
|
6700
|
+
return errorText(e.message)
|
|
6701
|
+
}
|
|
6702
|
+
}
|
|
6703
|
+
)
|
|
6704
|
+
|
|
6705
|
+
// ── recall ────────────────────────────────────────────────────────────────
|
|
6706
|
+
server.tool(
|
|
6707
|
+
'recall',
|
|
6708
|
+
`Retrieve all persistent memory entries stored on a task via remember().
|
|
6709
|
+
|
|
6710
|
+
Returns a key-value map of everything previously remembered. Call this at the start of a session to restore context — or use get_workspace_briefing which calls this automatically.`,
|
|
6711
|
+
{
|
|
6712
|
+
taskId: z.string().describe("Task's MongoDB ObjectId"),
|
|
6713
|
+
},
|
|
6714
|
+
async ({ taskId }) => {
|
|
6715
|
+
try {
|
|
6716
|
+
const res = await api.get(`/api/tasks/${taskId}/agent-memory`)
|
|
6717
|
+
if (!res?.success) return errorText(res?.message || 'Failed to retrieve memory')
|
|
6718
|
+
const { memory } = res.data
|
|
6719
|
+
const keys = Object.keys(memory)
|
|
6720
|
+
if (keys.length === 0) return text({ memory: {}, hint: 'No memory stored yet. Use remember() to save context.' })
|
|
6721
|
+
const formatted = keys.map(k => ` ${k}: ${memory[k]}`).join('\n')
|
|
6722
|
+
return text({ memory, formatted: `Recalled ${keys.length} entr${keys.length === 1 ? 'y' : 'ies'}:\n${formatted}` })
|
|
6723
|
+
} catch (e) {
|
|
6724
|
+
return errorText(e.message)
|
|
6725
|
+
}
|
|
6726
|
+
}
|
|
6727
|
+
)
|
|
6728
|
+
|
|
6729
|
+
// ── get_workspace_briefing ────────────────────────────────────────────────
|
|
6730
|
+
server.tool(
|
|
6731
|
+
'get_workspace_briefing',
|
|
6732
|
+
`Comprehensive session-start briefing — the single tool to call at the beginning of every Cursor session on a task.
|
|
6733
|
+
|
|
6734
|
+
Returns everything an agent needs to resume without re-reading the full codebase:
|
|
6735
|
+
1. Task details: title, description, readme plan, column, assignees
|
|
6736
|
+
2. Persistent memory recalled from previous sessions
|
|
6737
|
+
3. Active sub-agent sessions and their findings
|
|
6738
|
+
4. Files currently claimed by this task
|
|
6739
|
+
5. Branch status: behind/ahead counts, PR status
|
|
6740
|
+
6. Recent session log entries (last 10 tool calls)
|
|
6741
|
+
7. Teammate activity: which tasks share your files or are nearby in the pipeline
|
|
6742
|
+
|
|
6743
|
+
Always call this first — it collapses context from the last 3 steps into one call.`,
|
|
6744
|
+
{
|
|
6745
|
+
taskId: z.string().describe("Task's MongoDB ObjectId"),
|
|
6746
|
+
repoPath: z.string().optional().describe('Absolute path to the local git repo — used to detect local branch state'),
|
|
6747
|
+
},
|
|
6748
|
+
async ({ taskId, repoPath }) => {
|
|
6749
|
+
try {
|
|
6750
|
+
// Task + project
|
|
6751
|
+
const taskRes = await api.get(`/api/tasks/${taskId}`)
|
|
6752
|
+
if (!taskRes?.success) return errorText(`Task not found: ${taskId}`)
|
|
6753
|
+
const task = taskRes.data.task
|
|
6754
|
+
const projRes = await api.get(`/api/projects/${task.project}`)
|
|
6755
|
+
const proj = projRes?.data?.project || {}
|
|
6756
|
+
|
|
6757
|
+
// Memory
|
|
6758
|
+
const memRes = await api.get(`/api/tasks/${taskId}/agent-memory`)
|
|
6759
|
+
const memory = memRes?.data?.memory || {}
|
|
6760
|
+
|
|
6761
|
+
// Sessions
|
|
6762
|
+
const sessRes = await api.get(`/api/tasks/${taskId}/agent-sessions`)
|
|
6763
|
+
const sessions = (sessRes?.data?.sessions || []).slice(-5)
|
|
6764
|
+
|
|
6765
|
+
// Local branch state
|
|
6766
|
+
let localState = null
|
|
6767
|
+
if (repoPath && task.github?.headBranch) {
|
|
6768
|
+
try {
|
|
6769
|
+
const branch = task.github.headBranch
|
|
6770
|
+
const status = execSync(`git -C "${repoPath}" status --short 2>/dev/null`, { encoding: 'utf8' }).trim()
|
|
6771
|
+
const logLine = execSync(`git -C "${repoPath}" log --oneline -1 2>/dev/null`, { encoding: 'utf8' }).trim()
|
|
6772
|
+
localState = { branch, dirty: status.length > 0, lastCommit: logLine }
|
|
6773
|
+
} catch { localState = { error: 'Could not read local git state' } }
|
|
6774
|
+
}
|
|
6775
|
+
|
|
6776
|
+
const memKeys = Object.keys(memory)
|
|
6777
|
+
const memLines = memKeys.length > 0
|
|
6778
|
+
? memKeys.map(k => ` ${k}: ${memory[k]}`).join('\n')
|
|
6779
|
+
: ' (none)'
|
|
6780
|
+
|
|
6781
|
+
const sessionLines = sessions.length > 0
|
|
6782
|
+
? sessions.map(s => ` [${s.status.toUpperCase()}] ${s.role}: ${s.mission}${s.summary ? ` → ${s.summary}` : ''}`).join('\n')
|
|
6783
|
+
: ' (none)'
|
|
6784
|
+
|
|
6785
|
+
const recentLog = (task.agentSessionLog || []).slice(-10)
|
|
6786
|
+
.map(e => ` ${new Date(e.calledAt).toISOString().slice(11, 19)} ${e.type}:${e.name}${e.summary ? ` — ${e.summary}` : ''}`)
|
|
6787
|
+
.join('\n') || ' (none)'
|
|
6788
|
+
|
|
6789
|
+
const briefing = [
|
|
6790
|
+
`# Workspace Briefing — ${task.key}: ${task.title}`,
|
|
6791
|
+
`Project: ${proj.name || task.project} | Column: ${task.column} | Priority: ${task.priority}`,
|
|
6792
|
+
`Branch: ${task.github?.headBranch || '(none)'} | PR: ${task.github?.prNumber ? `#${task.github.prNumber}` : '(none)'}`,
|
|
6793
|
+
'',
|
|
6794
|
+
'## Task',
|
|
6795
|
+
`${task.description || '(no description)'}`,
|
|
6796
|
+
'',
|
|
6797
|
+
'## Persistent Memory',
|
|
6798
|
+
memLines,
|
|
6799
|
+
'',
|
|
6800
|
+
'## Sub-agent Sessions',
|
|
6801
|
+
sessionLines,
|
|
6802
|
+
'',
|
|
6803
|
+
'## Claimed Files',
|
|
6804
|
+
(task.claimedFiles || []).length > 0
|
|
6805
|
+
? (task.claimedFiles || []).map(f => ` ${f}`).join('\n')
|
|
6806
|
+
: ' (none)',
|
|
6807
|
+
'',
|
|
6808
|
+
'## Recent Session Log',
|
|
6809
|
+
recentLog,
|
|
6810
|
+
...(localState ? ['', '## Local Git State', JSON.stringify(localState, null, 2)] : []),
|
|
6811
|
+
'',
|
|
6812
|
+
'## Next Steps',
|
|
6813
|
+
`Call get_agent_context("${taskId}") for full task context, or start working directly.`,
|
|
6814
|
+
`Use remember("${taskId}", key, value) to persist discoveries.`,
|
|
6815
|
+
`Use spawn_agent_session("${taskId}", role, mission) to delegate to sub-agents.`,
|
|
6816
|
+
`Use generate_cursor_workspace("${proj._id || task.project}", "${taskId}", repoPath) to write .mdc rule files.`,
|
|
6817
|
+
].join('\n')
|
|
6818
|
+
|
|
6819
|
+
return text({
|
|
6820
|
+
briefing,
|
|
6821
|
+
task: { key: task.key, title: task.title, column: task.column, priority: task.priority },
|
|
6822
|
+
memory,
|
|
6823
|
+
sessions,
|
|
6824
|
+
localState,
|
|
6825
|
+
})
|
|
6826
|
+
} catch (e) {
|
|
6827
|
+
return errorText(e.message)
|
|
6828
|
+
}
|
|
6829
|
+
}
|
|
6830
|
+
)
|
|
6831
|
+
}
|
|
6832
|
+
|
|
6833
|
+
// ── Cursor Workspace Tools ────────────────────────────────────────────────────
|
|
6834
|
+
function registerCursorTools(server, ctx) {
|
|
6835
|
+
const { scopedProjectId } = ctx
|
|
6836
|
+
|
|
6837
|
+
// ── generate_cursor_workspace ─────────────────────────────────────────────
|
|
6838
|
+
server.tool(
|
|
6839
|
+
'generate_cursor_workspace',
|
|
6840
|
+
`Write 4 context-rich .mdc rule files to .cursor/rules/ in the local repository.
|
|
6841
|
+
|
|
6842
|
+
These files inject live task context directly into every Cursor AI response — no copy-paste needed:
|
|
6843
|
+
- devos-task.mdc — task title, description, readme plan, constraints from memory
|
|
6844
|
+
- devos-pipeline.mdc — branch status, merge rank, behind/ahead, conflicts
|
|
6845
|
+
- devos-teammates.mdc — who is working on what, files to avoid touching
|
|
6846
|
+
- devos-memory.mdc — recalled persistent memory + recent session summaries
|
|
6847
|
+
|
|
6848
|
+
Run this once at the start of a session (or after remember() to refresh devos-memory.mdc).
|
|
6849
|
+
All files use alwaysApply: true so Cursor includes them in every conversation automatically.`,
|
|
6850
|
+
{
|
|
6851
|
+
projectId: z.string().describe("Project's MongoDB ObjectId"),
|
|
6852
|
+
taskId: z.string().describe("Task's MongoDB ObjectId"),
|
|
6853
|
+
repoPath: z.string().describe('Absolute path to the local git repo root (where .cursor/ folder lives or should be created)'),
|
|
6854
|
+
},
|
|
6855
|
+
async ({ projectId, taskId, repoPath }) => {
|
|
6856
|
+
if (scopedProjectId && projectId !== scopedProjectId) {
|
|
6857
|
+
return errorText(`Access denied: session is scoped to project ${scopedProjectId}`)
|
|
6858
|
+
}
|
|
6859
|
+
try {
|
|
6860
|
+
// Fetch data
|
|
6861
|
+
const [taskRes, projRes, memRes, sessRes] = await Promise.all([
|
|
6862
|
+
api.get(`/api/tasks/${taskId}`),
|
|
6863
|
+
api.get(`/api/projects/${projectId}`),
|
|
6864
|
+
api.get(`/api/tasks/${taskId}/agent-memory`),
|
|
6865
|
+
api.get(`/api/tasks/${taskId}/agent-sessions`),
|
|
6866
|
+
])
|
|
6867
|
+
|
|
6868
|
+
if (!taskRes?.success) return errorText(`Task not found: ${taskId}`)
|
|
6869
|
+
const task = taskRes.data.task
|
|
6870
|
+
const proj = projRes?.data?.project || {}
|
|
6871
|
+
const tasks = projRes?.data?.tasks || []
|
|
6872
|
+
const memory = memRes?.data?.memory || {}
|
|
6873
|
+
const sessions = (sessRes?.data?.sessions || []).slice(-5)
|
|
6874
|
+
|
|
6875
|
+
const rulesDir = join(repoPath, '.cursor', 'rules')
|
|
6876
|
+
mkdirSync(rulesDir, { recursive: true })
|
|
6877
|
+
|
|
6878
|
+
// ── devos-task.mdc ────────────────────────────────────────────────
|
|
6879
|
+
const assigneeNames = (task.assignees || []).map(a => a.name || a.email || 'unknown').join(', ')
|
|
6880
|
+
const subtaskLines = (task.subtasks || []).map(s => ` - [${s.done ? 'x' : ' '}] ${s.title}`).join('\n') || ' (none)'
|
|
6881
|
+
const taskMdc = [
|
|
6882
|
+
'---',
|
|
6883
|
+
`description: DevOS Task Context — ${task.key}: ${task.title}`,
|
|
6884
|
+
'alwaysApply: true',
|
|
6885
|
+
'---',
|
|
6886
|
+
'',
|
|
6887
|
+
`# Active Task: ${task.key} — ${task.title}`,
|
|
6888
|
+
'',
|
|
6889
|
+
`**Project:** ${proj.name || projectId}`,
|
|
6890
|
+
`**Column:** ${task.column} | **Priority:** ${task.priority}`,
|
|
6891
|
+
`**Assigned to:** ${assigneeNames || '(unassigned)'}`,
|
|
6892
|
+
`**Branch:** ${task.github?.headBranch || '(not set)'} | **PR:** ${task.github?.prNumber ? `#${task.github.prNumber} — ${task.github.prUrl}` : 'none'}`,
|
|
6893
|
+
'',
|
|
6894
|
+
'## Description',
|
|
6895
|
+
task.description || '(no description)',
|
|
6896
|
+
'',
|
|
6897
|
+
'## Plan / Readme',
|
|
6898
|
+
task.readmeMarkdown || '(no plan written)',
|
|
6899
|
+
'',
|
|
6900
|
+
'## Subtasks',
|
|
6901
|
+
subtaskLines,
|
|
6902
|
+
'',
|
|
6903
|
+
'## Constraints (from Cursor Rules)',
|
|
6904
|
+
task.cursorRules || '(no task-specific constraints)',
|
|
6905
|
+
'',
|
|
6906
|
+
`## Task IDs`,
|
|
6907
|
+
`- Task ID: ${task._id}`,
|
|
6908
|
+
`- Project ID: ${projectId}`,
|
|
6909
|
+
`- MCP reminder: always call get_workspace_briefing("${task._id}") at session start`,
|
|
6910
|
+
].join('\n')
|
|
6911
|
+
|
|
6912
|
+
// ── devos-pipeline.mdc ────────────────────────────────────────────
|
|
6913
|
+
const activeTasks = tasks.filter(t => ['in_progress', 'in_review'].includes(t.column) && t.github?.headBranch)
|
|
6914
|
+
const pipelineLines = activeTasks.map(t => {
|
|
6915
|
+
const isMine = String(t._id) === String(task._id)
|
|
6916
|
+
return ` ${isMine ? '▶' : ' '} ${t.key} [${t.column}] — branch: ${t.github?.headBranch || '?'} ${isMine ? '← YOU' : ''}`
|
|
6917
|
+
}).join('\n') || ' (no active branches)'
|
|
6918
|
+
|
|
6919
|
+
const pipelineMdc = [
|
|
6920
|
+
'---',
|
|
6921
|
+
`description: DevOS Pipeline Context — ${proj.name || projectId}`,
|
|
6922
|
+
'alwaysApply: true',
|
|
6923
|
+
'---',
|
|
6924
|
+
'',
|
|
6925
|
+
`# Merge Pipeline — ${proj.name || projectId}`,
|
|
6926
|
+
'',
|
|
6927
|
+
`**Default branch:** ${proj.githubDefaultBranch || 'main'}`,
|
|
6928
|
+
`**Your branch:** ${task.github?.headBranch || '(not set)'}`,
|
|
6929
|
+
`**Your PR:** ${task.github?.prNumber ? `#${task.github.prNumber}` : 'none yet'}`,
|
|
6930
|
+
'',
|
|
6931
|
+
'## Active Branches (all developers)',
|
|
6932
|
+
pipelineLines,
|
|
6933
|
+
'',
|
|
6934
|
+
'## What to do',
|
|
6935
|
+
'- Run `get_branch_pipeline_status` to see your current merge rank and behind/ahead counts',
|
|
6936
|
+
'- Run `guide_my_merge` to get step-by-step rebase/merge commands for your branch',
|
|
6937
|
+
'- Run `assess_review_readiness` before requesting a code review',
|
|
6938
|
+
'- Run `coordinate_conflict` if you share files with another branch',
|
|
6939
|
+
'',
|
|
6940
|
+
'## Merge Safety Rules',
|
|
6941
|
+
'- Always `git fetch origin && git rebase origin/main` before pushing',
|
|
6942
|
+
'- Never force-push to a shared branch — use `--force-with-lease` only',
|
|
6943
|
+
'- Confirm no conflicts with teammates before opening your PR',
|
|
6944
|
+
'- Run `get_pipeline_report` to see the full team status at any time',
|
|
6945
|
+
].join('\n')
|
|
6946
|
+
|
|
6947
|
+
// ── devos-teammates.mdc ──────────────────────────────────────────
|
|
6948
|
+
const otherTasks = activeTasks.filter(t => String(t._id) !== String(task._id))
|
|
6949
|
+
const teammateLines = otherTasks.map(t => {
|
|
6950
|
+
const devs = (t.assignees || []).map(a => a.name || 'unknown').join(', ')
|
|
6951
|
+
const files = (t.claimedFiles || [])
|
|
6952
|
+
return [
|
|
6953
|
+
` **${t.key}** [${devs || 'unassigned'}] — branch: ${t.github?.headBranch || '?'}`,
|
|
6954
|
+
files.length > 0 ? ` Files owned: ${files.join(', ')}` : '',
|
|
6955
|
+
].filter(Boolean).join('\n')
|
|
6956
|
+
}).join('\n\n') || ' (you are the only active developer)'
|
|
6957
|
+
|
|
6958
|
+
const myFiles = (task.claimedFiles || [])
|
|
6959
|
+
const teammateMdc = [
|
|
6960
|
+
'---',
|
|
6961
|
+
`description: DevOS Teammate Context — who is working on what`,
|
|
6962
|
+
'alwaysApply: true',
|
|
6963
|
+
'---',
|
|
6964
|
+
'',
|
|
6965
|
+
'# Teammate Context',
|
|
6966
|
+
'',
|
|
6967
|
+
'## Your claimed files (do not touch in other branches)',
|
|
6968
|
+
myFiles.length > 0 ? myFiles.map(f => ` - ${f}`).join('\n') : ' (none claimed yet — call claim_files to lock your scope)',
|
|
6969
|
+
'',
|
|
6970
|
+
'## Other active developers',
|
|
6971
|
+
teammateLines,
|
|
6972
|
+
'',
|
|
6973
|
+
'## Coordination rules',
|
|
6974
|
+
'- Do NOT modify files listed under another developer\'s "Files owned" without coordinating first',
|
|
6975
|
+
'- Call coordinate_conflict(projectId, yourTaskId, theirTaskId, "me"|"them", sharedFiles) to establish who refactors shared files',
|
|
6976
|
+
'- When in doubt, ask in the task comment before modifying a shared module',
|
|
6977
|
+
].join('\n')
|
|
6978
|
+
|
|
6979
|
+
// ── devos-memory.mdc ──────────────────────────────────────────────
|
|
6980
|
+
const memKeys = Object.keys(memory)
|
|
6981
|
+
const memLines = memKeys.length > 0
|
|
6982
|
+
? memKeys.map(k => ` **${k}:** ${memory[k]}`).join('\n')
|
|
6983
|
+
: ' (no memory stored yet)'
|
|
6984
|
+
|
|
6985
|
+
const sessionLines = sessions.length > 0
|
|
6986
|
+
? sessions.map(s => ` [${s.status.toUpperCase()}] **${s.role}** — ${s.mission}${s.summary ? `\n Result: ${s.summary}` : ''}`).join('\n\n')
|
|
6987
|
+
: ' (no sub-agent sessions yet)'
|
|
6988
|
+
|
|
6989
|
+
const memoryMdc = [
|
|
6990
|
+
'---',
|
|
6991
|
+
`description: DevOS Agent Memory — ${task.key} persistent knowledge`,
|
|
6992
|
+
'alwaysApply: true',
|
|
6993
|
+
'---',
|
|
6994
|
+
'',
|
|
6995
|
+
`# Agent Memory — ${task.key}`,
|
|
6996
|
+
'',
|
|
6997
|
+
'## Persistent Memory (from previous sessions)',
|
|
6998
|
+
memLines,
|
|
6999
|
+
'',
|
|
7000
|
+
'## Recent Sub-agent Sessions',
|
|
7001
|
+
sessionLines,
|
|
7002
|
+
'',
|
|
7003
|
+
'## How to update memory',
|
|
7004
|
+
`Call remember("${task._id}", "key", "value") to store a new fact.`,
|
|
7005
|
+
`Call recall("${task._id}") to see all stored facts.`,
|
|
7006
|
+
'Memory is permanent — it survives across Cursor restarts and session clears.',
|
|
7007
|
+
'',
|
|
7008
|
+
'## How to spawn sub-agents',
|
|
7009
|
+
`Call spawn_agent_session("${task._id}", "role", "mission") to create a named session.`,
|
|
7010
|
+
`Call report_agent_findings(sessionId, "${task._id}", findings) to store results.`,
|
|
7011
|
+
`Call complete_agent_session(sessionId, "${task._id}", summary) when done.`,
|
|
7012
|
+
`Call get_agent_sessions("${task._id}") to see all sessions and their status.`,
|
|
7013
|
+
].join('\n')
|
|
7014
|
+
|
|
7015
|
+
// Write all 4 files
|
|
7016
|
+
const files = [
|
|
7017
|
+
{ name: 'devos-task.mdc', content: taskMdc },
|
|
7018
|
+
{ name: 'devos-pipeline.mdc', content: pipelineMdc },
|
|
7019
|
+
{ name: 'devos-teammates.mdc', content: teammateMdc },
|
|
7020
|
+
{ name: 'devos-memory.mdc', content: memoryMdc },
|
|
7021
|
+
]
|
|
7022
|
+
for (const f of files) {
|
|
7023
|
+
writeFileSync(join(rulesDir, f.name), f.content, 'utf8')
|
|
7024
|
+
}
|
|
7025
|
+
|
|
7026
|
+
// Auto-rescan workspace so the UI reflects the newly written files immediately.
|
|
7027
|
+
runWorkspaceScan(taskId, task, proj, repoPath).catch(() => {/* non-fatal */})
|
|
7028
|
+
|
|
7029
|
+
return text({
|
|
7030
|
+
written: files.map(f => f.name),
|
|
7031
|
+
rulesDir,
|
|
7032
|
+
totalBytes: files.reduce((n, f) => n + f.content.length, 0),
|
|
7033
|
+
hint: 'All 4 .mdc files written. Cursor will pick them up automatically on next message. Re-run after calling remember() to refresh devos-memory.mdc.',
|
|
7034
|
+
taskKey: task.key,
|
|
7035
|
+
})
|
|
7036
|
+
} catch (e) {
|
|
7037
|
+
return errorText(e.message)
|
|
7038
|
+
}
|
|
7039
|
+
}
|
|
7040
|
+
)
|
|
7041
|
+
|
|
7042
|
+
// ── install_zopkit_skills ─────────────────────────────────────────────────
|
|
7043
|
+
server.tool(
|
|
7044
|
+
'install_zopkit_skills',
|
|
7045
|
+
`Write the complete ZopKit agent skills and methodology as Cursor rules (.mdc files) to .cursor/rules/.
|
|
7046
|
+
|
|
7047
|
+
This installs all ZopKit skills and agent definitions so every Cursor session has them available automatically:
|
|
7048
|
+
- zopkit-merge-skill.mdc — 8-phase Merge & Ship methodology with exact git commands
|
|
7049
|
+
- zopkit-review-skill.mdc — 7-step Pipeline Code Review methodology (merge safety first)
|
|
7050
|
+
- zopkit-recon-skill.mdc — 5-phase Codebase Recon delivering architecture intelligence report
|
|
7051
|
+
- zopkit-blueprint-skill.mdc — 5-step Feature Blueprint (data model → API → frontend → wiring)
|
|
7052
|
+
- zopkit-security-skill.mdc — 5-phase Security Audit skill
|
|
7053
|
+
- zopkit-agents.mdc — All 6 agent personas (ZopShipper, ZopScout, ZopMerger, ZopReviewer, ZopGuard, ZopOrchestrator)
|
|
7054
|
+
|
|
7055
|
+
How to use after installing:
|
|
7056
|
+
- Skills: reference with @zopkit-merge-skill or @zopkit-review-skill in Cursor chat
|
|
7057
|
+
- Agents: activate with "You are ZopMerger. [mission]" in Cursor chat
|
|
7058
|
+
- Prompts: use generate_cursor_workspace to also write task-specific context files
|
|
7059
|
+
|
|
7060
|
+
Run once per repository. Re-run to update all files after a ZopKit version bump.`,
|
|
7061
|
+
{
|
|
7062
|
+
repoPath: z.string().describe('Absolute path to the local git repo root'),
|
|
7063
|
+
},
|
|
7064
|
+
async ({ repoPath }) => {
|
|
7065
|
+
try {
|
|
7066
|
+
const rulesDir = join(repoPath, '.cursor', 'rules')
|
|
7067
|
+
mkdirSync(rulesDir, { recursive: true })
|
|
7068
|
+
|
|
7069
|
+
// ── zopkit-merge-skill.mdc ────────────────────────────────────────
|
|
7070
|
+
const mergeSkillMdc = [
|
|
7071
|
+
'---',
|
|
7072
|
+
'description: ZopKit Merge & Ship Skill — 8-phase methodology for safely merging feature branches',
|
|
7073
|
+
'alwaysApply: false',
|
|
7074
|
+
'---',
|
|
7075
|
+
'',
|
|
7076
|
+
'# ZopKit Merge & Ship Skill',
|
|
7077
|
+
'',
|
|
7078
|
+
'Use this skill when a developer asks to merge, ship, rebase, or resolve conflicts.',
|
|
7079
|
+
'Activate with: "@zop-merge-skill guide me to merge my branch"',
|
|
7080
|
+
'',
|
|
7081
|
+
'## Phase 1 — Assess current status',
|
|
7082
|
+
'```',
|
|
7083
|
+
'git fetch origin',
|
|
7084
|
+
'git status',
|
|
7085
|
+
'git log origin/main..HEAD --oneline # your commits',
|
|
7086
|
+
'git log HEAD..origin/main --oneline # commits you are missing',
|
|
7087
|
+
'```',
|
|
7088
|
+
'',
|
|
7089
|
+
'## Phase 2 — Stash any local changes',
|
|
7090
|
+
'```',
|
|
7091
|
+
'git stash push -m "pre-rebase stash $(date +%s)"',
|
|
7092
|
+
'```',
|
|
7093
|
+
'',
|
|
7094
|
+
'## Phase 3 — Rebase onto the latest main',
|
|
7095
|
+
'```',
|
|
7096
|
+
'git rebase origin/main',
|
|
7097
|
+
'# If conflicts:',
|
|
7098
|
+
'# git status → see conflicting files',
|
|
7099
|
+
'# git diff <file> → inspect the conflict',
|
|
7100
|
+
'# # edit the file to resolve',
|
|
7101
|
+
'# git add <file>',
|
|
7102
|
+
'# git rebase --continue',
|
|
7103
|
+
'```',
|
|
7104
|
+
'',
|
|
7105
|
+
'## Phase 4 — Verify the rebase result',
|
|
7106
|
+
'```',
|
|
7107
|
+
'git log --oneline -5',
|
|
7108
|
+
'git diff origin/main --stat',
|
|
7109
|
+
'```',
|
|
7110
|
+
'',
|
|
7111
|
+
'## Phase 5 — Pop stash (if any)',
|
|
7112
|
+
'```',
|
|
7113
|
+
'git stash pop',
|
|
7114
|
+
'```',
|
|
7115
|
+
'',
|
|
7116
|
+
'## Phase 6 — Push (safely)',
|
|
7117
|
+
'```',
|
|
7118
|
+
'git push --force-with-lease origin <your-branch>',
|
|
7119
|
+
'# NEVER use --force on a shared branch',
|
|
7120
|
+
'```',
|
|
7121
|
+
'',
|
|
7122
|
+
'## Phase 7 — Check CI / PR status',
|
|
7123
|
+
'Call assess_review_readiness(projectId, taskId) to get a structured readiness checklist.',
|
|
7124
|
+
'',
|
|
7125
|
+
'## Phase 8 — Coordinate if needed',
|
|
7126
|
+
'If you share files with another branch, call coordinate_conflict() before merging.',
|
|
7127
|
+
'Call get_pipeline_report(projectId) to see the full team status.',
|
|
7128
|
+
'',
|
|
7129
|
+
'## Safety rules',
|
|
7130
|
+
'- Always fetch before rebase',
|
|
7131
|
+
'- Always use --force-with-lease not --force',
|
|
7132
|
+
'- Never rebase commits that have already been reviewed on a PR without notifying the reviewer',
|
|
7133
|
+
'- Confirm the pipeline report shows you as #1 or #2 before merging',
|
|
7134
|
+
].join('\n')
|
|
7135
|
+
|
|
7136
|
+
// ── zopkit-review-skill.mdc ───────────────────────────────────────
|
|
7137
|
+
const reviewSkillMdc = [
|
|
7138
|
+
'---',
|
|
7139
|
+
'description: ZopKit Pipeline Code Review Skill — 7-step methodology for reviewing PRs in the merge pipeline',
|
|
7140
|
+
'alwaysApply: false',
|
|
7141
|
+
'---',
|
|
7142
|
+
'',
|
|
7143
|
+
'# ZopKit Pipeline Code Review Skill',
|
|
7144
|
+
'',
|
|
7145
|
+
'Use this skill when asked to review a PR, assess readiness, or do a code review.',
|
|
7146
|
+
'Activate with: "@zop-review-skill review this PR"',
|
|
7147
|
+
'',
|
|
7148
|
+
'## Step 1 — Fetch the diff',
|
|
7149
|
+
'Call assess_review_readiness(projectId, taskId) for the structured 6-check readiness report.',
|
|
7150
|
+
'Call get_review_bundle(taskId) for the raw diff + PR details.',
|
|
7151
|
+
'',
|
|
7152
|
+
'## Step 2 — Pipeline check',
|
|
7153
|
+
'Confirm branch is not behind main. If behind → ask developer to rebase first.',
|
|
7154
|
+
'Confirm no shared files with other active branches (call get_branch_pipeline_status).',
|
|
7155
|
+
'',
|
|
7156
|
+
'## Step 3 — Correctness review',
|
|
7157
|
+
'- Does the code do what the task description says?',
|
|
7158
|
+
'- Are there obvious bugs, null pointer risks, or off-by-one errors?',
|
|
7159
|
+
'- Are all new code paths covered by existing tests or new ones?',
|
|
7160
|
+
'',
|
|
7161
|
+
'## Step 4 — Safety review',
|
|
7162
|
+
'- No secrets, credentials, or API keys in the diff',
|
|
7163
|
+
'- No SQL injection, XSS, or SSRF vectors introduced',
|
|
7164
|
+
'- Input validation at system boundaries',
|
|
7165
|
+
'',
|
|
7166
|
+
'## Step 5 — Standards review',
|
|
7167
|
+
'- Follows existing naming and file structure conventions',
|
|
7168
|
+
'- No dead code or commented-out blocks left behind',
|
|
7169
|
+
'- No unnecessary dependencies added',
|
|
7170
|
+
'',
|
|
7171
|
+
'## Step 6 — Verdict',
|
|
7172
|
+
'- APPROVE: all checks pass, no blocking issues',
|
|
7173
|
+
'- NEEDS WORK: one or more checks fail — list specific changes required',
|
|
7174
|
+
'- REJECT: fundamental design issue — requires rework, not just fixes',
|
|
7175
|
+
'',
|
|
7176
|
+
'## Step 7 — Post findings',
|
|
7177
|
+
'Post a comment on the task with your verdict and specific line-level feedback.',
|
|
7178
|
+
'Call remember(taskId, "review_verdict", "APPROVE/NEEDS_WORK/REJECT — reason") to persist.',
|
|
7179
|
+
].join('\n')
|
|
7180
|
+
|
|
7181
|
+
// ── zopkit-agents.mdc ─────────────────────────────────────────────
|
|
7182
|
+
const agentsMdc = [
|
|
7183
|
+
'---',
|
|
7184
|
+
'description: ZopKit Agent Definitions — ZopMerger and ZopReviewer agent personas',
|
|
7185
|
+
'alwaysApply: false',
|
|
7186
|
+
'---',
|
|
7187
|
+
'',
|
|
7188
|
+
'# ZopKit Agent Definitions',
|
|
7189
|
+
'',
|
|
7190
|
+
'## ZopMerger — Merge & Ship Guide',
|
|
7191
|
+
'',
|
|
7192
|
+
'Activate: "You are ZopMerger. Guide me to merge my branch safely."',
|
|
7193
|
+
'',
|
|
7194
|
+
'ZopMerger is a senior DevOps engineer specialising in git merge pipelines.',
|
|
7195
|
+
'Personality: direct, methodical, safety-conscious. Never rushes a merge.',
|
|
7196
|
+
'',
|
|
7197
|
+
'Responsibilities:',
|
|
7198
|
+
'- Runs the 8-phase Merge & Ship skill',
|
|
7199
|
+
'- Calls get_branch_pipeline_status to assess current position',
|
|
7200
|
+
'- Calls guide_my_merge for exact step-by-step commands',
|
|
7201
|
+
'- Coordinates conflicts via coordinate_conflict',
|
|
7202
|
+
'- Celebrates successful merges with confetti 🎉',
|
|
7203
|
+
'',
|
|
7204
|
+
'Trigger phrases:',
|
|
7205
|
+
'- "help me merge", "guide my merge", "how do I ship this", "rebase help"',
|
|
7206
|
+
'- "I have conflicts", "my branch is behind", "how do I push"',
|
|
7207
|
+
'',
|
|
7208
|
+
'## ZopReviewer — Pipeline Code Reviewer',
|
|
7209
|
+
'',
|
|
7210
|
+
'Activate: "You are ZopReviewer. Review this PR."',
|
|
7211
|
+
'',
|
|
7212
|
+
'ZopReviewer is a senior engineer and security-aware code reviewer.',
|
|
7213
|
+
'Personality: thorough, precise, constructive. Always explains the "why".',
|
|
7214
|
+
'',
|
|
7215
|
+
'Responsibilities:',
|
|
7216
|
+
'- Runs the 7-step Pipeline Code Review skill',
|
|
7217
|
+
'- Calls assess_review_readiness for structured checklist',
|
|
7218
|
+
'- Posts verdict as a task comment',
|
|
7219
|
+
'- Remembers verdict via remember(taskId, "review_verdict", verdict)',
|
|
7220
|
+
'',
|
|
7221
|
+
'Trigger phrases:',
|
|
7222
|
+
'- "review this PR", "is this ready to merge", "check my code", "review readiness"',
|
|
7223
|
+
'- "am I ready", "can I merge", "what do you think of this diff"',
|
|
7224
|
+
'',
|
|
7225
|
+
'## ZopOrchestrator — Task Decomposer',
|
|
7226
|
+
'',
|
|
7227
|
+
'Activate: "You are ZopOrchestrator. Decompose this task."',
|
|
7228
|
+
'',
|
|
7229
|
+
'ZopOrchestrator is a technical lead who breaks complex tasks into parallel workstreams.',
|
|
7230
|
+
'Personality: systems thinker, delegation-first, dependency-aware.',
|
|
7231
|
+
'',
|
|
7232
|
+
'Responsibilities:',
|
|
7233
|
+
'- Calls decompose_task to create subtask groups',
|
|
7234
|
+
'- Spawns named sub-agent sessions via spawn_agent_session',
|
|
7235
|
+
'- Tracks session progress via get_agent_sessions',
|
|
7236
|
+
'- Aggregates findings via report_agent_findings + complete_agent_session',
|
|
7237
|
+
'- Generates the full workspace context via generate_cursor_workspace',
|
|
7238
|
+
].join('\n')
|
|
7239
|
+
|
|
7240
|
+
// ── zopkit-recon-skill.mdc ────────────────────────────────────────
|
|
7241
|
+
const reconSkillMdc = [
|
|
7242
|
+
'---',
|
|
7243
|
+
'description: ZopKit Codebase Recon Skill — 5-phase architecture intelligence sweep',
|
|
7244
|
+
'alwaysApply: false',
|
|
7245
|
+
'---',
|
|
7246
|
+
'',
|
|
7247
|
+
'# ZopKit Codebase Recon Skill',
|
|
7248
|
+
'',
|
|
7249
|
+
'Reference with @zopkit-recon-skill, or paste as context before exploring a codebase.',
|
|
7250
|
+
'Use at the start of any task on an unfamiliar module.',
|
|
7251
|
+
'',
|
|
7252
|
+
'## Phase 1: Surface Scan (< 2 minutes)',
|
|
7253
|
+
'- Read package.json / go.mod — identify framework and key deps',
|
|
7254
|
+
'- Read the entry point — understand bootstrap',
|
|
7255
|
+
'- Check for CLAUDE.md, README, .cursorrules — existing docs',
|
|
7256
|
+
'',
|
|
7257
|
+
'## Phase 2: Architecture Map (< 5 minutes)',
|
|
7258
|
+
'- Map directory structure to architectural boundaries',
|
|
7259
|
+
'- Trace request lifecycle: entry → middleware → handler → response',
|
|
7260
|
+
'- Map database layer: ORM, schema location, migration system',
|
|
7261
|
+
'',
|
|
7262
|
+
'## Phase 3: Data Flow Tracing',
|
|
7263
|
+
'Pick a feature. Trace: user action → frontend → API call → backend → DB → response → UI update.',
|
|
7264
|
+
'',
|
|
7265
|
+
'## Phase 4: Risk Assessment',
|
|
7266
|
+
'- Complexity concentration? God files, circular deps?',
|
|
7267
|
+
'- Test coverage? Tech debt? TODO comments?',
|
|
7268
|
+
'',
|
|
7269
|
+
'## Phase 5: Intelligence Report',
|
|
7270
|
+
'```',
|
|
7271
|
+
'ARCHITECTURE: [monolith|modular|microservices|monorepo]',
|
|
7272
|
+
'FRAMEWORK: [name + version]',
|
|
7273
|
+
'DATABASE: [type + ORM]',
|
|
7274
|
+
'AUTH: [strategy]',
|
|
7275
|
+
'KEY MODULES: [list with one-line descriptions]',
|
|
7276
|
+
'RISK AREAS: [list with severity]',
|
|
7277
|
+
'ENTRY POINTS: [5 files to read first, in order]',
|
|
7278
|
+
'```',
|
|
7279
|
+
].join('\n')
|
|
7280
|
+
|
|
7281
|
+
// ── zopkit-blueprint-skill.mdc ────────────────────────────────────
|
|
7282
|
+
const blueprintSkillMdc = [
|
|
7283
|
+
'---',
|
|
7284
|
+
'description: ZopKit Feature Blueprint Skill — 5-step plan from intent to wired implementation',
|
|
7285
|
+
'alwaysApply: false',
|
|
7286
|
+
'---',
|
|
7287
|
+
'',
|
|
7288
|
+
'# ZopKit Feature Blueprint Skill',
|
|
7289
|
+
'',
|
|
7290
|
+
'Reference with @zopkit-blueprint-skill before planning or building a new feature.',
|
|
7291
|
+
'',
|
|
7292
|
+
'## Step 1: Parse Intent',
|
|
7293
|
+
'- What are we building? (feature, fix, refactor, integration)',
|
|
7294
|
+
'- Who uses it? What are the constraints?',
|
|
7295
|
+
'',
|
|
7296
|
+
'## Step 2: Design Data Model',
|
|
7297
|
+
'- Entities, relationships, field types, indexes',
|
|
7298
|
+
'',
|
|
7299
|
+
'## Step 3: Plan API Layer',
|
|
7300
|
+
'- Endpoints: method, path, request/response shape, auth requirements',
|
|
7301
|
+
'- Error cases and status codes',
|
|
7302
|
+
'',
|
|
7303
|
+
'## Step 4: Build Frontend',
|
|
7304
|
+
'- Components, state management, data fetching strategy',
|
|
7305
|
+
'- UI states: loading, empty, error, success',
|
|
7306
|
+
'',
|
|
7307
|
+
'## Step 5: Wire It Together',
|
|
7308
|
+
'- Module structure, route/guard registration, error boundaries',
|
|
7309
|
+
'',
|
|
7310
|
+
'## Output Format',
|
|
7311
|
+
'For each file: [CREATE/MODIFY] path/to/file — Purpose — Dependencies',
|
|
7312
|
+
].join('\n')
|
|
7313
|
+
|
|
7314
|
+
// ── zopkit-security-skill.mdc ─────────────────────────────────────
|
|
7315
|
+
const securitySkillMdc = [
|
|
7316
|
+
'---',
|
|
7317
|
+
'description: ZopKit Security Audit Skill — 5-phase review: auth, injection, data protection, infrastructure',
|
|
7318
|
+
'alwaysApply: false',
|
|
7319
|
+
'---',
|
|
7320
|
+
'',
|
|
7321
|
+
'# ZopKit Security Audit Skill',
|
|
7322
|
+
'',
|
|
7323
|
+
'Reference with @zopkit-security-skill before any PR that touches auth, APIs, or user data.',
|
|
7324
|
+
'',
|
|
7325
|
+
'## Phase 1: Attack Surface',
|
|
7326
|
+
'- All API endpoints and their auth requirements',
|
|
7327
|
+
'- User input entry points (forms, URL params, headers)',
|
|
7328
|
+
'',
|
|
7329
|
+
'## Phase 2: Auth & Authorization',
|
|
7330
|
+
'- JWT: expiry, refresh, httpOnly cookies',
|
|
7331
|
+
'- Every endpoint has appropriate guards',
|
|
7332
|
+
'- Privilege escalation: can a user access another user\'s data?',
|
|
7333
|
+
'',
|
|
7334
|
+
'## Phase 3: Input Validation & Injection',
|
|
7335
|
+
'- Input validated before use (no raw req.body in queries)',
|
|
7336
|
+
'- SQL injection in raw queries? XSS in rendered content?',
|
|
7337
|
+
'- Command injection in shell-executing code?',
|
|
7338
|
+
'',
|
|
7339
|
+
'## Phase 4: Data Protection',
|
|
7340
|
+
'- Sensitive data never logged (passwords, tokens, PII)',
|
|
7341
|
+
'- API responses don\'t leak internal IDs or stack traces',
|
|
7342
|
+
'',
|
|
7343
|
+
'## Phase 5: Infrastructure',
|
|
7344
|
+
'- CORS: specific origins, not wildcard',
|
|
7345
|
+
'- Rate limiting on auth endpoints',
|
|
7346
|
+
'- Dependency vulnerabilities (npm audit)',
|
|
7347
|
+
'',
|
|
7348
|
+
'## Severity: CRITICAL → HIGH → MEDIUM → LOW → INFO',
|
|
7349
|
+
].join('\n')
|
|
7350
|
+
|
|
7351
|
+
// Update agents .mdc to include all 6 agents
|
|
7352
|
+
const agentsMdcUpdated = agentsMdc
|
|
7353
|
+
.replace('## ZopMerger — Merge & Ship Guide', '## ZopShipper — Builder Agent\n\nActivate: "You are ZopShipper. Ship this feature: [description]"\n\nBuilds features end-to-end. Scaffolds types, services, controllers, components, tests.\nUse for: implementing tasks, writing code from scratch, extending existing features.\n\n## ZopScout — Reconnaissance Agent\n\nActivate: "You are ZopScout. Map this codebase: [target]"\n\nMaps architecture, traces data flows, documents undocumented code.\nUse for: unfamiliar codebases, tracing bugs, understanding module boundaries.\n\n## ZopMerger — Merge & Ship Guide')
|
|
7354
|
+
|
|
7355
|
+
const files = [
|
|
7356
|
+
{ name: 'zopkit-merge-skill.mdc', content: mergeSkillMdc },
|
|
7357
|
+
{ name: 'zopkit-review-skill.mdc', content: reviewSkillMdc },
|
|
7358
|
+
{ name: 'zopkit-recon-skill.mdc', content: reconSkillMdc },
|
|
7359
|
+
{ name: 'zopkit-blueprint-skill.mdc', content: blueprintSkillMdc},
|
|
7360
|
+
{ name: 'zopkit-security-skill.mdc', content: securitySkillMdc },
|
|
7361
|
+
{ name: 'zopkit-agents.mdc', content: agentsMdcUpdated },
|
|
7362
|
+
]
|
|
7363
|
+
for (const f of files) {
|
|
7364
|
+
writeFileSync(join(rulesDir, f.name), f.content, 'utf8')
|
|
7365
|
+
}
|
|
7366
|
+
|
|
7367
|
+
return text({
|
|
7368
|
+
written: files.map(f => f.name),
|
|
7369
|
+
rulesDir,
|
|
7370
|
+
totalBytes: files.reduce((n, f) => n + f.content.length, 0),
|
|
7371
|
+
hint: [
|
|
7372
|
+
'6 skill files + agent definitions written.',
|
|
7373
|
+
'Skills — reference in Cursor with: @zopkit-merge-skill | @zopkit-review-skill | @zopkit-recon-skill | @zopkit-blueprint-skill | @zopkit-security-skill',
|
|
7374
|
+
'Agents — activate in Cursor with: "You are ZopShipper/ZopScout/ZopMerger/ZopReviewer/ZopGuard/ZopOrchestrator."',
|
|
7375
|
+
'Next: call generate_cursor_workspace(projectId, taskId, repoPath) to write task-specific context files for this session.',
|
|
7376
|
+
].join('\n'),
|
|
7377
|
+
})
|
|
7378
|
+
} catch (e) {
|
|
7379
|
+
return errorText(e.message)
|
|
7380
|
+
}
|
|
7381
|
+
}
|
|
7382
|
+
)
|
|
7383
|
+
}
|
|
7384
|
+
|
|
7385
|
+
// ── Orchestration Tools ───────────────────────────────────────────────────────
|
|
7386
|
+
function registerOrchestrationTools(server, ctx) {
|
|
7387
|
+
|
|
7388
|
+
// ── spawn_agent_session ───────────────────────────────────────────────────
|
|
7389
|
+
server.tool(
|
|
7390
|
+
'spawn_agent_session',
|
|
7391
|
+
`Register a named sub-agent session on a task. Use this when delegating work to a specialist sub-agent (Scout, Reviewer, Builder, Coordinator).
|
|
7392
|
+
|
|
7393
|
+
Each session has:
|
|
7394
|
+
- sessionId: unique ID you assign (e.g. "scout-auth-module", "review-pass-1")
|
|
7395
|
+
- role: what this session does (scout, reviewer, builder, coordinator, custom)
|
|
7396
|
+
- mission: what the session is trying to accomplish
|
|
7397
|
+
|
|
7398
|
+
After spawning, the sub-agent should call report_agent_findings() as it discovers things,
|
|
7399
|
+
and complete_agent_session() when done. The orchestrator calls get_agent_sessions() to monitor progress.
|
|
7400
|
+
|
|
7401
|
+
Example flow:
|
|
7402
|
+
1. spawn_agent_session(taskId, "scout-auth", "scout", "Map all auth middleware files and find injection points")
|
|
7403
|
+
2. Sub-agent runs and calls report_agent_findings("scout-auth", taskId, "Found 3 middleware files...")
|
|
7404
|
+
3. Sub-agent calls complete_agent_session("scout-auth", taskId, "Auth middleware mapped — see findings")
|
|
7405
|
+
4. Orchestrator calls get_agent_sessions(taskId) to see the result`,
|
|
7406
|
+
{
|
|
7407
|
+
taskId: z.string().describe("Task's MongoDB ObjectId"),
|
|
7408
|
+
sessionId: z.string().describe('Unique session identifier — use short kebab-case (e.g. "scout-auth", "review-pass-1")'),
|
|
7409
|
+
role: z.string().describe('Session role — scout | reviewer | builder | coordinator | custom'),
|
|
7410
|
+
mission: z.string().describe('One-sentence description of what this session will accomplish'),
|
|
7411
|
+
},
|
|
7412
|
+
async ({ taskId, sessionId, role, mission }) => {
|
|
7413
|
+
try {
|
|
7414
|
+
const res = await api.post(`/api/tasks/${taskId}/agent-sessions`, { sessionId, role, mission })
|
|
7415
|
+
if (!res?.success) return errorText(res?.message || 'Failed to spawn session')
|
|
7416
|
+
return text({
|
|
7417
|
+
spawned: true,
|
|
7418
|
+
session: res.data.session,
|
|
7419
|
+
nextSteps: [
|
|
7420
|
+
`Sub-agent: call report_agent_findings("${sessionId}", "${taskId}", yourFindings) as you discover things`,
|
|
7421
|
+
`Sub-agent: call complete_agent_session("${sessionId}", "${taskId}", summary) when done`,
|
|
7422
|
+
`Orchestrator: call get_agent_sessions("${taskId}") to monitor all sessions`,
|
|
7423
|
+
],
|
|
7424
|
+
})
|
|
7425
|
+
} catch (e) {
|
|
7426
|
+
return errorText(e.message)
|
|
7427
|
+
}
|
|
7428
|
+
}
|
|
7429
|
+
)
|
|
7430
|
+
|
|
7431
|
+
// ── report_agent_findings ─────────────────────────────────────────────────
|
|
7432
|
+
server.tool(
|
|
7433
|
+
'report_agent_findings',
|
|
7434
|
+
`Store intermediate findings from a running sub-agent session. Call this as the sub-agent discovers information — before calling complete_agent_session.
|
|
7435
|
+
|
|
7436
|
+
Findings are appended (replacing previous findings for this session) and visible to the orchestrator via get_agent_sessions.
|
|
7437
|
+
Use this to stream progress back so the orchestrator can act on partial results.`,
|
|
7438
|
+
{
|
|
7439
|
+
sessionId: z.string().describe('The session ID from spawn_agent_session'),
|
|
7440
|
+
taskId: z.string().describe("Task's MongoDB ObjectId"),
|
|
7441
|
+
findings: z.string().describe('Findings discovered so far — plain text or markdown, any length'),
|
|
7442
|
+
},
|
|
7443
|
+
async ({ sessionId, taskId, findings }) => {
|
|
7444
|
+
try {
|
|
7445
|
+
const res = await api.patch(`/api/tasks/${taskId}/agent-sessions/${sessionId}`, { findings })
|
|
7446
|
+
if (!res?.success) return errorText(res?.message || 'Failed to report findings')
|
|
7447
|
+
return text({ reported: true, sessionId, hint: `Findings stored. Call complete_agent_session("${sessionId}", "${taskId}", summary) when done.` })
|
|
7448
|
+
} catch (e) {
|
|
7449
|
+
return errorText(e.message)
|
|
7450
|
+
}
|
|
7451
|
+
}
|
|
7452
|
+
)
|
|
7453
|
+
|
|
7454
|
+
// ── complete_agent_session ────────────────────────────────────────────────
|
|
7455
|
+
server.tool(
|
|
7456
|
+
'complete_agent_session',
|
|
7457
|
+
`Mark a sub-agent session as complete and store the final summary.
|
|
7458
|
+
|
|
7459
|
+
Call this when the sub-agent has finished its mission. Sets status to "done" and records endedAt.
|
|
7460
|
+
The orchestrator can then read all completed sessions via get_agent_sessions to aggregate results.`,
|
|
7461
|
+
{
|
|
7462
|
+
sessionId: z.string().describe('The session ID from spawn_agent_session'),
|
|
7463
|
+
taskId: z.string().describe("Task's MongoDB ObjectId"),
|
|
7464
|
+
summary: z.string().describe('One-paragraph summary of what was accomplished and key findings'),
|
|
7465
|
+
status: z.enum(['done', 'failed']).optional().describe('Final status — defaults to "done"'),
|
|
7466
|
+
},
|
|
7467
|
+
async ({ sessionId, taskId, summary, status = 'done' }) => {
|
|
7468
|
+
try {
|
|
7469
|
+
const res = await api.patch(`/api/tasks/${taskId}/agent-sessions/${sessionId}`, { summary, status })
|
|
7470
|
+
if (!res?.success) return errorText(res?.message || 'Failed to complete session')
|
|
7471
|
+
return text({
|
|
7472
|
+
completed: true,
|
|
7473
|
+
sessionId,
|
|
7474
|
+
status,
|
|
7475
|
+
summary,
|
|
7476
|
+
hint: `Session marked ${status}. Orchestrator: call get_agent_sessions("${taskId}") to see all results. Call remember("${taskId}", "session_${sessionId}", summary) to persist the finding permanently.`,
|
|
7477
|
+
})
|
|
7478
|
+
} catch (e) {
|
|
7479
|
+
return errorText(e.message)
|
|
7480
|
+
}
|
|
7481
|
+
}
|
|
7482
|
+
)
|
|
7483
|
+
|
|
7484
|
+
// ── get_agent_sessions ────────────────────────────────────────────────────
|
|
7485
|
+
server.tool(
|
|
7486
|
+
'get_agent_sessions',
|
|
7487
|
+
`Retrieve all named sub-agent sessions for a task, with their status, mission, and findings.
|
|
7488
|
+
|
|
7489
|
+
Use this as the orchestrator to monitor parallel sub-agents and aggregate their results.
|
|
7490
|
+
Sessions are returned ordered by startedAt. Status is active | done | failed.`,
|
|
7491
|
+
{
|
|
7492
|
+
taskId: z.string().describe("Task's MongoDB ObjectId"),
|
|
7493
|
+
},
|
|
7494
|
+
async ({ taskId }) => {
|
|
7495
|
+
try {
|
|
7496
|
+
const res = await api.get(`/api/tasks/${taskId}/agent-sessions`)
|
|
7497
|
+
if (!res?.success) return errorText(res?.message || 'Failed to retrieve sessions')
|
|
7498
|
+
const { sessions } = res.data
|
|
7499
|
+
if (sessions.length === 0) {
|
|
7500
|
+
return text({ sessions: [], hint: `No sessions yet. Call spawn_agent_session("${taskId}", sessionId, role, mission) to create one.` })
|
|
7501
|
+
}
|
|
7502
|
+
const summary = sessions.map(s => {
|
|
7503
|
+
const age = s.startedAt ? Math.round((Date.now() - new Date(s.startedAt).getTime()) / 60000) + 'm ago' : '?'
|
|
7504
|
+
const end = s.endedAt ? ` → ended ${Math.round((Date.now() - new Date(s.endedAt).getTime()) / 60000)}m ago` : ''
|
|
7505
|
+
return ` [${s.status.toUpperCase().padEnd(6)}] ${s.sessionId} (${s.role}) — started ${age}${end}\n Mission: ${s.mission}${s.summary ? `\n Result: ${s.summary}` : ''}${s.findings ? `\n Findings: ${s.findings.slice(0, 200)}${s.findings.length > 200 ? '…' : ''}` : ''}`
|
|
7506
|
+
}).join('\n\n')
|
|
7507
|
+
|
|
7508
|
+
const active = sessions.filter(s => s.status === 'active').length
|
|
7509
|
+
const done = sessions.filter(s => s.status === 'done').length
|
|
7510
|
+
const failed = sessions.filter(s => s.status === 'failed').length
|
|
7511
|
+
|
|
7512
|
+
return text({ sessions, summary: `${sessions.length} sessions — ${active} active, ${done} done, ${failed} failed\n\n${summary}` })
|
|
7513
|
+
} catch (e) {
|
|
7514
|
+
return errorText(e.message)
|
|
7515
|
+
}
|
|
7516
|
+
}
|
|
7517
|
+
)
|
|
7518
|
+
}
|
|
7519
|
+
|
|
7520
|
+
// ── Monitoring Tools ──────────────────────────────────────────────────────────
|
|
7521
|
+
function registerMonitoringTools(server, ctx) {
|
|
7522
|
+
const { scopedProjectId } = ctx
|
|
7523
|
+
|
|
7524
|
+
// ── poll_pipeline_events ──────────────────────────────────────────────────
|
|
7525
|
+
server.tool(
|
|
7526
|
+
'poll_pipeline_events',
|
|
7527
|
+
`Poll for pipeline changes since a given timestamp — returns only tasks that changed, not the full pipeline.
|
|
7528
|
+
|
|
7529
|
+
Use this for lightweight monitoring loops:
|
|
7530
|
+
1. Store the current timestamp: since = new Date().toISOString()
|
|
7531
|
+
2. Wait (sleep or user action)
|
|
7532
|
+
3. Call poll_pipeline_events(projectId, since) to see what changed
|
|
7533
|
+
4. Update since to fetchedAt from the response for the next poll
|
|
7534
|
+
|
|
7535
|
+
Returns: changed tasks with their new column, branch info, and agent role.
|
|
7536
|
+
Much faster than calling get_branch_pipeline_status on every tick.`,
|
|
7537
|
+
{
|
|
7538
|
+
projectId: z.string().describe("Project's MongoDB ObjectId"),
|
|
7539
|
+
since: z.string().describe('ISO 8601 timestamp — only tasks updated after this time are returned. Use fetchedAt from the previous response.'),
|
|
7540
|
+
},
|
|
7541
|
+
async ({ projectId, since }) => {
|
|
7542
|
+
if (scopedProjectId && projectId !== scopedProjectId) {
|
|
7543
|
+
return errorText(`Access denied: session is scoped to project ${scopedProjectId}`)
|
|
7544
|
+
}
|
|
7545
|
+
try {
|
|
7546
|
+
const res = await api.get(`/api/projects/${projectId}/pipeline-events?since=${encodeURIComponent(since)}`)
|
|
7547
|
+
if (!res?.success) return errorText(res?.message || 'Failed to poll pipeline events')
|
|
7548
|
+
const { events, fetchedAt } = res.data
|
|
7549
|
+
if (events.length === 0) {
|
|
7550
|
+
return text({ events: [], fetchedAt, hint: `No changes since ${since}. Use fetchedAt (${fetchedAt}) as the next "since" value.` })
|
|
7551
|
+
}
|
|
7552
|
+
const lines = events.map(e => {
|
|
7553
|
+
const branch = e.github?.headBranch || '—'
|
|
7554
|
+
const pr = e.github?.prNumber ? `PR #${e.github.prNumber}` : 'no PR'
|
|
7555
|
+
return ` ${e.taskKey} [${e.column}] — branch: ${branch} | ${pr} | role: ${e.agentRole || 'none'}`
|
|
7556
|
+
}).join('\n')
|
|
7557
|
+
return text({
|
|
7558
|
+
events,
|
|
7559
|
+
fetchedAt,
|
|
7560
|
+
summary: `${events.length} task${events.length !== 1 ? 's' : ''} changed since ${since}:\n${lines}`,
|
|
7561
|
+
hint: `Use fetchedAt (${fetchedAt}) as the next "since" value.`,
|
|
7562
|
+
})
|
|
7563
|
+
} catch (e) {
|
|
7564
|
+
return errorText(e.message)
|
|
7565
|
+
}
|
|
7566
|
+
}
|
|
7567
|
+
)
|
|
7568
|
+
}
|
|
7569
|
+
|
|
7570
|
+
// ── Shared workspace scan helper ─────────────────────────────────────────────
|
|
7571
|
+
// Called by scan_workspace tool AND automatically by kickoff_task /
|
|
7572
|
+
// generate_cursor_workspace so results are always fresh without the developer
|
|
7573
|
+
// needing to run a separate command.
|
|
7574
|
+
//
|
|
7575
|
+
// Returns { scanned, detectedRoot, repoVerification, cursorDirExists, sections, summary, allItems }
|
|
7576
|
+
// or { error, message } on failure.
|
|
7577
|
+
async function runWorkspaceScan(taskId, task, project, overrideRepoPath) {
|
|
7578
|
+
const cfg = project.agentConfig || {}
|
|
7579
|
+
|
|
7580
|
+
// ── 1. Detect repo root ─────────────────────────────────────────────────────
|
|
7581
|
+
// Cursor starts the MCP with cwd = the folder the developer has open.
|
|
7582
|
+
// Walk up to find the .git root so we always scan the full repo, not a subfolder.
|
|
7583
|
+
// Returns null if no .git found (avoids false positives from home-dir .git repos).
|
|
7584
|
+
function findGitRoot(startDir) {
|
|
7585
|
+
let dir = startDir
|
|
7586
|
+
for (let i = 0; i < 10; i++) {
|
|
7587
|
+
if (existsSync(join(dir, '.git'))) return dir
|
|
7588
|
+
const parent = join(dir, '..')
|
|
7589
|
+
if (parent === dir) break
|
|
7590
|
+
dir = parent
|
|
7591
|
+
}
|
|
7592
|
+
return null // no .git found — caller must handle
|
|
7593
|
+
}
|
|
7594
|
+
const detectedRoot = findGitRoot(overrideRepoPath || process.cwd())
|
|
7595
|
+
|
|
7596
|
+
if (!detectedRoot) {
|
|
7597
|
+
const startedFrom = overrideRepoPath || process.cwd()
|
|
7598
|
+
return {
|
|
7599
|
+
error: true,
|
|
7600
|
+
message: `No .git directory found in or above "${startedFrom}". Make sure Cursor is open inside your project folder, or pass repoPath explicitly.`,
|
|
7601
|
+
hint: `scan_workspace repoPath="/absolute/path/to/your/repo"`,
|
|
7602
|
+
}
|
|
7603
|
+
}
|
|
7604
|
+
|
|
7605
|
+
// ── 2. Verify it's the right repo via git remote ────────────────────────────
|
|
7606
|
+
// Compares local "origin" remote to project.githubRepoFullName.
|
|
7607
|
+
// Case-insensitive — GitHub repo names are not case-sensitive.
|
|
7608
|
+
// Prevents scanning the wrong repo when multiple projects are on the same machine.
|
|
7609
|
+
const norm = url => url.replace(/^(https?:\/\/github\.com\/|git@github\.com:)/, '').replace(/\.git$/, '').trim().toLowerCase()
|
|
7610
|
+
let repoVerification = { checked: false, matched: null, localRemote: null, projectRepo: project.githubRepoFullName || null }
|
|
7611
|
+
try {
|
|
7612
|
+
const remoteUrl = runGit('remote get-url origin', detectedRoot).trim()
|
|
7613
|
+
const localRepo = norm(remoteUrl)
|
|
7614
|
+
const projectRepo = project.githubRepoFullName ? norm(project.githubRepoFullName) : null
|
|
7615
|
+
|
|
7616
|
+
if (projectRepo) {
|
|
7617
|
+
// Project has a GitHub link — verify and block on mismatch
|
|
7618
|
+
repoVerification = { checked: true, matched: localRepo === projectRepo, localRemote: localRepo, projectRepo }
|
|
7619
|
+
if (!repoVerification.matched) {
|
|
7620
|
+
return {
|
|
7621
|
+
error: true,
|
|
7622
|
+
message: `Wrong repo — Cursor is open in "${localRepo}" but this task belongs to "${projectRepo}". Open Cursor in the correct repo or pass repoPath explicitly.`,
|
|
7623
|
+
fix: [
|
|
7624
|
+
`Option 1: Run scan_workspace repoPath="/path/to/${projectRepo.split('/')[1]}"`,
|
|
7625
|
+
`Option 2: Make sure git remote origin is set to the correct repo`,
|
|
7626
|
+
],
|
|
7627
|
+
}
|
|
7628
|
+
}
|
|
7629
|
+
} else {
|
|
7630
|
+
// No GitHub link — record the remote as a hint so user can tell if it looks right
|
|
7631
|
+
repoVerification = { checked: false, matched: null, localRemote: localRepo, projectRepo: null, warning: `Project has no GitHub repo linked — could not verify identity. Detected remote: "${localRepo}". If this is the wrong repo, pass repoPath explicitly.` }
|
|
7632
|
+
}
|
|
7633
|
+
} catch {
|
|
7634
|
+
// git unavailable or no remote — surface the detected root as a hint
|
|
7635
|
+
repoVerification = { checked: false, matched: null, localRemote: null, projectRepo: project.githubRepoFullName || null, warning: `Could not read git remote — make sure git is installed and origin is configured. Scanned: "${detectedRoot}".` }
|
|
7636
|
+
}
|
|
7637
|
+
|
|
7638
|
+
// ── 3. Read .cursor/ subdirectories ─────────────────────────────────────────
|
|
7639
|
+
const cursorDir = join(detectedRoot, '.cursor')
|
|
7640
|
+
const rulesDir = join(cursorDir, 'rules')
|
|
7641
|
+
const agentsDir = join(cursorDir, 'agents')
|
|
7642
|
+
const skillsDir = join(cursorDir, 'skills')
|
|
7643
|
+
const cursorDirExists = existsSync(cursorDir)
|
|
7644
|
+
|
|
7645
|
+
function listFilesSync(dir) {
|
|
7646
|
+
if (!existsSync(dir)) return new Map()
|
|
7647
|
+
try {
|
|
7648
|
+
const map = new Map()
|
|
7649
|
+
for (const filename of readdirSync(dir)) {
|
|
7650
|
+
try {
|
|
7651
|
+
const fp = join(dir, filename)
|
|
7652
|
+
if (statSync(fp).isFile()) map.set(filename, { filename, size: statSync(fp).size })
|
|
7653
|
+
} catch { /* skip */ }
|
|
7654
|
+
}
|
|
7655
|
+
return map
|
|
7656
|
+
} catch { return new Map() }
|
|
7657
|
+
}
|
|
7658
|
+
|
|
7659
|
+
const diskRules = listFilesSync(rulesDir)
|
|
7660
|
+
const diskAgents = listFilesSync(agentsDir)
|
|
7661
|
+
const diskSkills = listFilesSync(skillsDir)
|
|
7662
|
+
|
|
7663
|
+
// ── 4. Compare config vs disk ────────────────────────────────────────────────
|
|
7664
|
+
function compareSection(configItems, diskMap, ext) {
|
|
7665
|
+
const results = []
|
|
7666
|
+
const seenOnDisk = new Set()
|
|
7667
|
+
for (const item of (configItems || [])) {
|
|
7668
|
+
if (!item.name) continue
|
|
7669
|
+
const filename = `${item.name}${ext}`
|
|
7670
|
+
const disk = diskMap.get(filename)
|
|
7671
|
+
seenOnDisk.add(filename)
|
|
7672
|
+
const isEnabled = item.enabled !== false
|
|
7673
|
+
const diskStatus = !isEnabled
|
|
7674
|
+
? (disk ? 'present_but_disabled' : 'absent_disabled')
|
|
7675
|
+
: (disk ? 'present' : 'missing')
|
|
7676
|
+
results.push({ name: item.name, description: item.description || '', enabled: isEnabled, filename, diskStatus, size: disk?.size ?? null })
|
|
7677
|
+
}
|
|
7678
|
+
for (const [filename, info] of diskMap) {
|
|
7679
|
+
if (!seenOnDisk.has(filename)) {
|
|
7680
|
+
results.push({ name: filename.replace(new RegExp(`\\${ext}$`), ''), description: '', enabled: null, filename, diskStatus: 'orphaned', size: info.size })
|
|
7681
|
+
}
|
|
7682
|
+
}
|
|
7683
|
+
return results
|
|
7684
|
+
}
|
|
7685
|
+
|
|
7686
|
+
const sections = {
|
|
7687
|
+
rules: compareSection(cfg.rules, diskRules, '.mdc'),
|
|
7688
|
+
agents: compareSection(cfg.subagents, diskAgents, '.md'),
|
|
7689
|
+
skills: compareSection(cfg.skills, diskSkills, '.md'),
|
|
7690
|
+
}
|
|
7691
|
+
|
|
7692
|
+
const allItems = [...sections.rules, ...sections.agents, ...sections.skills]
|
|
7693
|
+
const summary = {
|
|
7694
|
+
present: allItems.filter(i => i.diskStatus === 'present').length,
|
|
7695
|
+
missing: allItems.filter(i => i.diskStatus === 'missing').length,
|
|
7696
|
+
orphaned: allItems.filter(i => i.diskStatus === 'orphaned').length,
|
|
7697
|
+
disabled: allItems.filter(i => ['absent_disabled', 'present_but_disabled'].includes(i.diskStatus)).length,
|
|
7698
|
+
total: allItems.length,
|
|
7699
|
+
}
|
|
7700
|
+
|
|
7701
|
+
// ── 5. Persist to task so the UI can display without server filesystem access
|
|
7702
|
+
await api.patch(`/api/tasks/${taskId}`, {
|
|
7703
|
+
lastWorkspaceScan: { repoPath: detectedRoot, cursorDirExists, sections, summary },
|
|
7704
|
+
}).catch(() => {/* non-fatal — scan result returned regardless */})
|
|
7705
|
+
|
|
7706
|
+
return { scanned: true, detectedRoot, repoVerification, cursorDirExists, sections, summary, allItems }
|
|
7707
|
+
}
|
|
7708
|
+
|
|
7709
|
+
// ── Workspace Scan Tool ───────────────────────────────────────────────────────
|
|
7710
|
+
function registerWorkspaceScanTool(server) {
|
|
7711
|
+
server.tool(
|
|
7712
|
+
'scan_workspace',
|
|
7713
|
+
`Scan this repo's .cursor/ directory and compare it against the project config.
|
|
7714
|
+
|
|
7715
|
+
ZERO ARGS NEEDED in most cases — the MCP already runs inside your Cursor workspace:
|
|
7716
|
+
scan_workspace ← auto-detects task (your in-progress work) + repo root
|
|
7717
|
+
scan_workspace taskId="..." ← explicit task, auto-detects repo root
|
|
7718
|
+
scan_workspace repoPath="..."← explicit path override (e.g. Cursor opened in parent folder)
|
|
7719
|
+
|
|
7720
|
+
HOW AUTO-DETECTION WORKS:
|
|
7721
|
+
- Task: looks up your current in-progress task for this project
|
|
7722
|
+
- Repo root: walks up from process.cwd() (= the folder Cursor has open) to find .git/
|
|
7723
|
+
- Verified: reads git remote "origin" and checks it matches the project's linked GitHub repo
|
|
7724
|
+
|
|
7725
|
+
WHAT IT CHECKS:
|
|
7726
|
+
.cursor/rules/ — .mdc rule files vs project agentConfig.rules
|
|
7727
|
+
.cursor/agents/ — .md agent files vs project agentConfig.subagents
|
|
7728
|
+
.cursor/skills/ — .md skill files vs project agentConfig.skills
|
|
7729
|
+
|
|
7730
|
+
Results are saved to the task and shown in InternalTool → Agent → Workspace tab.
|
|
7731
|
+
Also runs automatically inside kickoff_task and generate_cursor_workspace.`,
|
|
7732
|
+
{
|
|
7733
|
+
taskId: z.string().optional().describe('Task ObjectId. Omit to auto-detect your current in-progress task for this project.'),
|
|
7734
|
+
repoPath: z.string().optional().describe('Repo root override. Omit to use process.cwd() (where Cursor is open).'),
|
|
7735
|
+
},
|
|
7736
|
+
async ({ taskId, repoPath } = {}) => {
|
|
7737
|
+
// ── Resolve task ─────────────────────────────────────────────────────────
|
|
7738
|
+
let task, resolvedTaskId
|
|
7739
|
+
if (taskId) {
|
|
7740
|
+
const res = await api.get(`/api/tasks/${taskId}`)
|
|
7741
|
+
if (!res?.success) return errorText('Task not found')
|
|
7742
|
+
task = res.data.task
|
|
7743
|
+
resolvedTaskId = taskId
|
|
7744
|
+
} else {
|
|
7745
|
+
// Auto-detect: find the developer's current in-progress task
|
|
7746
|
+
const myTasksRes = await api.get('/api/users/me/tasks?column=in_progress')
|
|
7747
|
+
const myTasks = myTasksRes?.data?.tasks || []
|
|
7748
|
+
|
|
7749
|
+
// If scoped to a project, filter to that project
|
|
7750
|
+
const candidates = cliProject
|
|
7751
|
+
? myTasks.filter(t => String(t.project?._id || t.project) === cliProject)
|
|
7752
|
+
: myTasks
|
|
7753
|
+
|
|
7754
|
+
if (candidates.length === 0) {
|
|
7755
|
+
return text({
|
|
7756
|
+
error: true,
|
|
7757
|
+
message: 'No in-progress task found for your account. Start a task first (kickoff_task), then re-run scan_workspace.',
|
|
7758
|
+
hint: 'Or pass taskId explicitly: scan_workspace taskId="<id>"',
|
|
7759
|
+
})
|
|
7760
|
+
}
|
|
7761
|
+
if (candidates.length > 1) {
|
|
7762
|
+
return text({
|
|
7763
|
+
error: true,
|
|
7764
|
+
message: `${candidates.length} in-progress tasks found. Pass taskId to specify which one to scan.`,
|
|
7765
|
+
tasks: candidates.map(t => ({ id: t._id, key: t.key, title: t.title })),
|
|
7766
|
+
})
|
|
7767
|
+
}
|
|
7768
|
+
task = candidates[0]
|
|
7769
|
+
resolvedTaskId = String(task._id)
|
|
7770
|
+
}
|
|
7771
|
+
|
|
7772
|
+
// ── Resolve project ───────────────────────────────────────────────────────
|
|
7773
|
+
const projectId = task.project?._id || task.project
|
|
7774
|
+
const projRes = await api.get(`/api/projects/${projectId}`)
|
|
7775
|
+
if (!projRes?.success) return errorText('Project not found')
|
|
7776
|
+
const project = projRes?.data?.project || {}
|
|
7777
|
+
|
|
7778
|
+
// ── Run the scan ──────────────────────────────────────────────────────────
|
|
7779
|
+
const result = await runWorkspaceScan(resolvedTaskId, task, project, repoPath)
|
|
7780
|
+
if (result.error) return text(result)
|
|
7781
|
+
|
|
7782
|
+
const { detectedRoot, repoVerification, cursorDirExists, summary, sections, allItems } = result
|
|
7783
|
+
const missingItems = allItems.filter(i => i.diskStatus === 'missing')
|
|
7784
|
+
const orphanedItems = allItems.filter(i => i.diskStatus === 'orphaned')
|
|
7785
|
+
|
|
7786
|
+
return text({
|
|
7787
|
+
scanned: true,
|
|
7788
|
+
task: { key: task.key, title: task.title },
|
|
7789
|
+
detectedRepoRoot: detectedRoot,
|
|
7790
|
+
repoVerification,
|
|
7791
|
+
cursorDirExists,
|
|
7792
|
+
summary,
|
|
7793
|
+
sections,
|
|
7794
|
+
uiHint: 'Results saved → InternalTool task page → Agent → Workspace tab (auto-refreshes).',
|
|
7795
|
+
actions: [
|
|
7796
|
+
missingItems.length > 0
|
|
7797
|
+
? `⚠️ ${missingItems.length} item(s) missing from disk. Run: install_zopkit_skills repoPath="${detectedRoot}"`
|
|
7798
|
+
: `✅ All configured items are present on disk.`,
|
|
7799
|
+
orphanedItems.length > 0
|
|
7800
|
+
? `ℹ️ ${orphanedItems.length} orphaned file(s) not in project config: ${orphanedItems.map(i => i.filename).join(', ')}`
|
|
7801
|
+
: null,
|
|
7802
|
+
].filter(Boolean),
|
|
7803
|
+
})
|
|
7804
|
+
}
|
|
7805
|
+
)
|
|
7806
|
+
}
|
|
7807
|
+
|
|
7808
|
+
// ── Kit Tools ─────────────────────────────────────────────────────────────────
|
|
7809
|
+
function registerKitTools(server) {
|
|
7810
|
+
|
|
7811
|
+
// ── Full catalog definition (mirrors client/src/lib/zopkit.js) ───────────────
|
|
7812
|
+
const CATALOG_AGENTS = [
|
|
7813
|
+
{ name: 'zop-shipper', description: 'Feature builder agent — implements code end-to-end', when: 'builder tasks, new features, bug fixes' },
|
|
7814
|
+
{ name: 'zop-scout', description: 'Codebase recon agent — read-only analysis and architecture map', when: 'exploring unfamiliar code, tracing flows' },
|
|
7815
|
+
{ name: 'zop-guard', description: 'Security audit agent — auth, vulnerabilities, compliance', when: 'security review, auth hardening' },
|
|
7816
|
+
{ name: 'zop-merger', description: 'Merge & ship agent — rebase, conflict resolution, push to main', when: 'merging branches, resolving conflicts' },
|
|
7817
|
+
{ name: 'zop-reviewer', description: 'Code review agent — PR review, diff quality, approval gate', when: 'reviewing pull requests' },
|
|
7818
|
+
{ name: 'zop-orchestrator', description: 'Multi-agent coordinator — decomposes and orchestrates large tasks', when: 'large complex tasks needing parallel builders' },
|
|
7819
|
+
]
|
|
7820
|
+
const CATALOG_SKILLS = [
|
|
7821
|
+
{ name: 'zop-blueprint', description: 'Feature Blueprint — data model → API → frontend → wiring', when: 'planning and building a new feature' },
|
|
7822
|
+
{ name: 'zop-recon', description: 'Codebase Recon — 5-phase architecture intelligence sweep', when: 'exploring a codebase or module' },
|
|
7823
|
+
{ name: 'zop-guard-skill', description: 'Security Audit — 5-phase OWASP + auth + compliance sweep', when: 'security review' },
|
|
7824
|
+
{ name: 'zop-merge-skill', description: 'Merge & Ship — 8-phase safe merge + rebase methodology', when: 'merging or rebasing branches' },
|
|
7825
|
+
{ name: 'zop-review-skill', description: 'Pipeline Code Review — 7-step review methodology', when: 'reviewing PRs' },
|
|
7826
|
+
{ name: 'session-start', description: 'Session Start — orients the agent at the start of every task', when: 'always include — activates on every session' },
|
|
7827
|
+
]
|
|
7828
|
+
const CATALOG_PROMPTS = [
|
|
7829
|
+
{ name: 'ship-feature', description: 'Ship Feature — blueprint + build + test + PR workflow', when: 'building a feature end-to-end' },
|
|
7830
|
+
{ name: 'codebase-recon', description: 'Codebase Recon — systematic exploration prompt', when: 'exploring an unfamiliar codebase' },
|
|
7831
|
+
{ name: 'security-sweep', description: 'Security Sweep — full OWASP security audit prompt', when: 'security review or audit' },
|
|
7832
|
+
{ name: 'merge-guide', description: 'Merge Guide — step-by-step merge + rebase instructions', when: 'merging a branch safely' },
|
|
7833
|
+
{ name: 'review-pr', description: 'Review PR — thorough code review prompt', when: 'reviewing a pull request' },
|
|
7834
|
+
]
|
|
7835
|
+
|
|
7836
|
+
// ── Keyword signals per category (same as recommendKit.js) ───────────────────
|
|
7837
|
+
const KIT_SIGNALS = {
|
|
7838
|
+
builder: {
|
|
7839
|
+
title: 'Build / Implement', color: 'blue', role: 'builder',
|
|
7840
|
+
keywords: ['build', 'implement', 'create', 'add feature', 'feature', 'endpoint', 'api', 'scaffold', 'fix bug', 'bugfix', 'refactor', 'write', 'function', 'component', 'page', 'route', 'module', 'service', 'integrate', 'develop', 'crud', 'migration', 'schema', 'model', 'frontend', 'backend', 'ui', 'form', 'test', 'unit test'],
|
|
7841
|
+
agents: ['zop-shipper'], skills: ['zop-blueprint', 'session-start'], prompts: ['ship-feature'],
|
|
7842
|
+
},
|
|
7843
|
+
scout: {
|
|
7844
|
+
title: 'Explore / Research', color: 'amber', role: 'scout',
|
|
7845
|
+
keywords: ['explore', 'understand', 'map', 'architecture', 'trace', 'document', 'research', 'investigate', 'analyze', 'codebase', 'unfamiliar', 'how does', 'what is', 'find', 'locate', 'data flow', 'dependency', 'diagram', 'audit trail', 'recon', 'tech debt', 'dead code', 'legacy'],
|
|
7846
|
+
agents: ['zop-scout'], skills: ['zop-recon', 'session-start'], prompts: ['codebase-recon'],
|
|
7847
|
+
},
|
|
7848
|
+
security: {
|
|
7849
|
+
title: 'Security / Audit', color: 'red', role: 'builder',
|
|
7850
|
+
keywords: ['security', 'auth', 'authentication', 'authorization', 'vulnerability', 'audit', 'sql injection', 'xss', 'csrf', 'token', 'jwt', 'password', 'encrypt', 'hash', 'permission', 'access control', 'rbac', 'pentest', 'exploit', 'secure', 'leak', 'breach', 'compliance', 'gdpr', 'cors', 'rate limit', 'sanitize', 'owasp'],
|
|
7851
|
+
agents: ['zop-guard'], skills: ['zop-guard-skill', 'session-start'], prompts: ['security-sweep'],
|
|
7852
|
+
},
|
|
7853
|
+
merge: {
|
|
7854
|
+
title: 'Merge / Ship', color: 'cyan', role: 'builder',
|
|
7855
|
+
keywords: ['merge', 'rebase', 'conflict', 'behind main', 'push', 'ship', 'land', 'pr ready', 'close pr', 'squash', 'force push', 'origin'],
|
|
7856
|
+
agents: ['zop-merger'], skills: ['zop-merge-skill', 'session-start'], prompts: ['merge-guide'],
|
|
7857
|
+
},
|
|
7858
|
+
review: {
|
|
7859
|
+
title: 'Code Review', color: 'emerald', role: 'reviewer',
|
|
7860
|
+
keywords: ['review', 'code review', 'pr review', 'approve', 'diff', 'feedback', 'check pr', 'pull request review', 'lgtm', 'request changes', 'reviewer', 'comment on pr'],
|
|
7861
|
+
agents: ['zop-reviewer'], skills: ['zop-review-skill', 'session-start'], prompts: ['review-pr'],
|
|
7862
|
+
},
|
|
7863
|
+
coordinator: {
|
|
7864
|
+
title: 'Coordination / Multi-agent', color: 'violet', role: 'coordinator',
|
|
7865
|
+
keywords: ['large', 'complex', 'multiple', 'parallel', 'coordinate', 'orchestrate', 'decompose', 'breakdown', 'sub-task', 'subtask', 'multi-step', 'multi-agent'],
|
|
7866
|
+
agents: ['zop-orchestrator'], skills: ['zop-blueprint', 'zop-recon', 'session-start'], prompts: ['ship-feature'],
|
|
7867
|
+
},
|
|
7868
|
+
}
|
|
7869
|
+
|
|
7870
|
+
function scoreCorpus(corpus) {
|
|
7871
|
+
const results = []
|
|
7872
|
+
for (const [cat, sig] of Object.entries(KIT_SIGNALS)) {
|
|
7873
|
+
const matched = sig.keywords.filter(kw => corpus.includes(kw))
|
|
7874
|
+
if (matched.length > 0) results.push({ cat, sig, matched, score: matched.length })
|
|
7875
|
+
}
|
|
7876
|
+
results.sort((a, b) => b.score - a.score)
|
|
7877
|
+
return results
|
|
7878
|
+
}
|
|
7879
|
+
|
|
7880
|
+
// ── inspect_kit_for_task ──────────────────────────────────────────────────────
|
|
7881
|
+
server.tool(
|
|
7882
|
+
'inspect_kit_for_task',
|
|
7883
|
+
`Observe all available ZopKit agents, skills, and prompts for a task and get an optimal recommendation.
|
|
7884
|
+
|
|
7885
|
+
Call this at the start of a session or when you need to decide which agents/skills/prompts to activate.
|
|
7886
|
+
|
|
7887
|
+
Returns:
|
|
7888
|
+
- catalog: every available agent, skill, and prompt with status relative to the project config
|
|
7889
|
+
- status: "active" (in project + enabled for task) | "in_project" (in project, not task-overridden) |
|
|
7890
|
+
"project_disabled" (in project but disabled) | "catalog_only" (not yet added to project)
|
|
7891
|
+
- recommendation: auto-detected task type + optimal agent/skill/prompt set with matched keywords
|
|
7892
|
+
- current_overrides: what's currently set on this task
|
|
7893
|
+
- suggested_overrides: ready-to-apply override map — pass directly to apply_kit_to_task
|
|
7894
|
+
|
|
7895
|
+
After reviewing, call apply_kit_to_task(taskId, confirmed=true) to apply the recommendation,
|
|
7896
|
+
or apply_kit_to_task(taskId, customOverrides={...}, confirmed=true) for a custom selection.`,
|
|
7897
|
+
{
|
|
7898
|
+
taskId: z.string().describe("Task's MongoDB ObjectId"),
|
|
7899
|
+
},
|
|
7900
|
+
async ({ taskId }) => {
|
|
7901
|
+
const taskRes = await api.get(`/api/tasks/${taskId}`)
|
|
7902
|
+
if (!taskRes?.success) return errorText('Task not found')
|
|
7903
|
+
const task = taskRes.data.task
|
|
7904
|
+
|
|
7905
|
+
const projectId = task.project?._id || task.project
|
|
7906
|
+
const projRes = await api.get(`/api/projects/${projectId}`)
|
|
7907
|
+
const proj = projRes?.data?.project || {}
|
|
7908
|
+
const cfg = proj.agentConfig || {}
|
|
7909
|
+
|
|
7910
|
+
const projAgents = cfg.subagents || []
|
|
7911
|
+
const projSkills = cfg.skills || []
|
|
7912
|
+
const projPrompts = cfg.prompts || []
|
|
7913
|
+
const overrides = task.agentKitOverrides || {}
|
|
7914
|
+
|
|
7915
|
+
// Build project lookup maps
|
|
7916
|
+
const projAgentMap = new Map(projAgents.map(i => [i.name, i]))
|
|
7917
|
+
const projSkillMap = new Map(projSkills.map(i => [i.name, i]))
|
|
7918
|
+
const projPromptMap = new Map(projPrompts.map(i => [i.name, i]))
|
|
7919
|
+
|
|
7920
|
+
function itemStatus(type, name) {
|
|
7921
|
+
const map = type === 'agent' ? projAgentMap : type === 'skill' ? projSkillMap : projPromptMap
|
|
7922
|
+
const sec = type === 'agent' ? 'subagents' : type === 'skill' ? 'skills' : 'prompts'
|
|
7923
|
+
const projItem = map.get(name)
|
|
7924
|
+
if (!projItem) return 'catalog_only'
|
|
7925
|
+
const projectEnabled = projItem.enabled !== false
|
|
7926
|
+
const taskVal = overrides[sec]?.[name]
|
|
7927
|
+
if (!projectEnabled) return 'project_disabled'
|
|
7928
|
+
if (taskVal === false) return 'task_disabled'
|
|
7929
|
+
if (taskVal === true || projectEnabled) return 'active'
|
|
7930
|
+
return 'in_project'
|
|
7931
|
+
}
|
|
7932
|
+
|
|
7933
|
+
// Build full catalog with status
|
|
7934
|
+
const catalogView = {
|
|
7935
|
+
agents: CATALOG_AGENTS.map(a => ({ ...a, status: itemStatus('agent', a.name) })),
|
|
7936
|
+
skills: CATALOG_SKILLS.map(s => ({ ...s, status: itemStatus('skill', s.name) })),
|
|
7937
|
+
prompts: CATALOG_PROMPTS.map(p => ({ ...p, status: itemStatus('prompt', p.name) })),
|
|
7938
|
+
}
|
|
7939
|
+
|
|
7940
|
+
// Recommendation via keyword scoring
|
|
7941
|
+
const corpus = [task.title || '', task.description || '', task.readmeMarkdown || ''].join(' ').toLowerCase()
|
|
7942
|
+
const scored = scoreCorpus(corpus)
|
|
7943
|
+
|
|
7944
|
+
let primary = scored[0] || null
|
|
7945
|
+
if (!primary && task.agentRole) {
|
|
7946
|
+
const fallbackCat = { builder: 'builder', scout: 'scout', reviewer: 'review', coordinator: 'coordinator' }[task.agentRole]
|
|
7947
|
+
if (fallbackCat && KIT_SIGNALS[fallbackCat]) {
|
|
7948
|
+
primary = { cat: fallbackCat, sig: KIT_SIGNALS[fallbackCat], matched: [], score: 0, fromRole: true }
|
|
7949
|
+
}
|
|
7950
|
+
}
|
|
7951
|
+
if (!primary) primary = { cat: 'builder', sig: KIT_SIGNALS.builder, matched: [], score: 0, fromRole: false }
|
|
7952
|
+
|
|
7953
|
+
const confidence = primary.score === 0 ? 'default' : primary.score >= 3 ? 'strong' : primary.score >= 2 ? 'good' : 'possible'
|
|
7954
|
+
|
|
7955
|
+
// Resolve suggested items against project config (only items in project can be task-overridden)
|
|
7956
|
+
function resolveItems(type, names) {
|
|
7957
|
+
const map = type === 'agent' ? projAgentMap : type === 'skill' ? projSkillMap : projPromptMap
|
|
7958
|
+
return names.filter(n => map.has(n))
|
|
7959
|
+
}
|
|
7960
|
+
|
|
7961
|
+
const rec = primary.sig
|
|
7962
|
+
const suggestedAgents = resolveItems('agent', rec.agents)
|
|
7963
|
+
const suggestedSkills = resolveItems('skill', rec.skills)
|
|
7964
|
+
const suggestedPrompts = resolveItems('prompt', rec.prompts)
|
|
7965
|
+
|
|
7966
|
+
// Build the override map to pass to apply_kit_to_task
|
|
7967
|
+
const suggestedOverrides = {
|
|
7968
|
+
subagents: Object.fromEntries(projAgents.map(a => [a.name, rec.agents.includes(a.name)])),
|
|
7969
|
+
skills: Object.fromEntries(projSkills.map(s => [s.name, rec.skills.includes(s.name)])),
|
|
7970
|
+
prompts: Object.fromEntries(projPrompts.map(p => [p.name, rec.prompts.includes(p.name)])),
|
|
7971
|
+
}
|
|
7972
|
+
|
|
7973
|
+
// Items in recommendation but NOT in project config (hint to add them)
|
|
7974
|
+
const missingFromProject = {
|
|
7975
|
+
agents: rec.agents.filter(n => !projAgentMap.has(n)),
|
|
7976
|
+
skills: rec.skills.filter(n => !projSkillMap.has(n)),
|
|
7977
|
+
prompts: rec.prompts.filter(n => !projPromptMap.has(n)),
|
|
7978
|
+
}
|
|
7979
|
+
|
|
7980
|
+
return text({
|
|
7981
|
+
task: { key: task.key, title: task.title, agentRole: task.agentRole || null },
|
|
7982
|
+
|
|
7983
|
+
// ── Full catalog with status ──
|
|
7984
|
+
catalog: catalogView,
|
|
7985
|
+
|
|
7986
|
+
// ── Recommendation ──
|
|
7987
|
+
recommendation: {
|
|
7988
|
+
detected: primary.cat,
|
|
7989
|
+
title: rec.title,
|
|
7990
|
+
confidence,
|
|
7991
|
+
role: rec.role,
|
|
7992
|
+
matchedKeywords: primary.matched,
|
|
7993
|
+
agents: rec.agents,
|
|
7994
|
+
skills: rec.skills,
|
|
7995
|
+
prompts: rec.prompts,
|
|
7996
|
+
note: primary.fromRole
|
|
7997
|
+
? `No strong signals — defaulting to ${primary.cat} based on task role.`
|
|
7998
|
+
: primary.score === 0
|
|
7999
|
+
? 'No signals detected — using builder defaults.'
|
|
8000
|
+
: `Detected "${rec.title}" from ${primary.matched.length} keyword match(es).`,
|
|
8001
|
+
},
|
|
8002
|
+
|
|
8003
|
+
// ── What's active right now ──
|
|
8004
|
+
current: {
|
|
8005
|
+
activeAgents: catalogView.agents.filter(a => a.status === 'active').map(a => a.name),
|
|
8006
|
+
activeSkills: catalogView.skills.filter(s => s.status === 'active').map(s => s.name),
|
|
8007
|
+
activePrompts: catalogView.prompts.filter(p => p.status === 'active').map(p => p.name),
|
|
8008
|
+
overrides: overrides,
|
|
8009
|
+
},
|
|
8010
|
+
|
|
8011
|
+
// ── What will change if you apply ──
|
|
8012
|
+
suggested: {
|
|
8013
|
+
agents: suggestedAgents,
|
|
8014
|
+
skills: suggestedSkills,
|
|
8015
|
+
prompts: suggestedPrompts,
|
|
8016
|
+
role: rec.role,
|
|
8017
|
+
},
|
|
8018
|
+
|
|
8019
|
+
missingFromProject: (missingFromProject.agents.length + missingFromProject.skills.length + missingFromProject.prompts.length) > 0
|
|
8020
|
+
? { ...missingFromProject, hint: 'These items are in the ZopKit catalog but not in your project config. Add them via Agent Config (⚙ icon) to enable task-level toggling.' }
|
|
8021
|
+
: null,
|
|
8022
|
+
|
|
8023
|
+
// ── Ready-to-use override payload ──
|
|
8024
|
+
suggestedOverrides,
|
|
8025
|
+
|
|
8026
|
+
nextStep: `Review the recommendation above. Call apply_kit_to_task with taskId="${taskId}" and confirmed=true to apply the suggested kit, or pass customOverrides to set a custom selection.`,
|
|
8027
|
+
})
|
|
8028
|
+
}
|
|
8029
|
+
)
|
|
8030
|
+
|
|
8031
|
+
// ── apply_kit_to_task ─────────────────────────────────────────────────────────
|
|
8032
|
+
server.tool(
|
|
8033
|
+
'apply_kit_to_task',
|
|
8034
|
+
`Apply an agent/skill/prompt kit assignment to a task.
|
|
8035
|
+
|
|
8036
|
+
Use after inspect_kit_for_task to assign the recommended or customised kit.
|
|
8037
|
+
|
|
8038
|
+
Two modes:
|
|
8039
|
+
1. Apply the recommended kit: call with taskId + confirmed=true (no customOverrides)
|
|
8040
|
+
2. Apply custom selections: call with taskId + customOverrides + confirmed=true
|
|
8041
|
+
|
|
8042
|
+
customOverrides format (all keys optional):
|
|
8043
|
+
{ subagents: { "zop-shipper": true, "zop-scout": false },
|
|
8044
|
+
skills: { "zop-blueprint": true, "session-start": true },
|
|
8045
|
+
prompts: { "ship-feature": true } }
|
|
8046
|
+
|
|
8047
|
+
After applying, the kit is visible in the InternalTool UI under the task's Kit tab.
|
|
8048
|
+
The next generate_cursor_workspace call will include these active items in the .mdc workspace files.`,
|
|
8049
|
+
{
|
|
8050
|
+
taskId: z.string().describe("Task's MongoDB ObjectId"),
|
|
8051
|
+
confirmed: z.boolean().optional().default(false).describe('Set true to apply. Set false (default) to preview what will change.'),
|
|
8052
|
+
customOverrides: z.object({
|
|
8053
|
+
subagents: z.record(z.boolean()).optional(),
|
|
8054
|
+
skills: z.record(z.boolean()).optional(),
|
|
8055
|
+
prompts: z.record(z.boolean()).optional(),
|
|
8056
|
+
}).optional().describe('Custom override map. Omit to use the recommendation from inspect_kit_for_task.'),
|
|
8057
|
+
},
|
|
8058
|
+
async ({ taskId, confirmed = false, customOverrides }) => {
|
|
8059
|
+
const taskRes = await api.get(`/api/tasks/${taskId}`)
|
|
8060
|
+
if (!taskRes?.success) return errorText('Task not found')
|
|
8061
|
+
const task = taskRes.data.task
|
|
8062
|
+
|
|
8063
|
+
const projectId = task.project?._id || task.project
|
|
8064
|
+
const projRes = await api.get(`/api/projects/${projectId}`)
|
|
8065
|
+
const proj = projRes?.data?.project || {}
|
|
8066
|
+
const cfg = proj.agentConfig || {}
|
|
8067
|
+
|
|
8068
|
+
const projAgents = cfg.subagents || []
|
|
8069
|
+
const projSkills = cfg.skills || []
|
|
8070
|
+
const projPrompts = cfg.prompts || []
|
|
8071
|
+
|
|
8072
|
+
let overridesToApply = customOverrides
|
|
8073
|
+
|
|
8074
|
+
// If no custom overrides, run the recommendation engine to get the suggested kit
|
|
8075
|
+
if (!overridesToApply) {
|
|
8076
|
+
const corpus = [task.title || '', task.description || '', task.readmeMarkdown || ''].join(' ').toLowerCase()
|
|
8077
|
+
const scored = scoreCorpus(corpus)
|
|
8078
|
+
let primary = scored[0]
|
|
8079
|
+
if (!primary && task.agentRole) {
|
|
8080
|
+
const fallback = { builder: 'builder', scout: 'scout', reviewer: 'review', coordinator: 'coordinator' }[task.agentRole]
|
|
8081
|
+
if (fallback && KIT_SIGNALS[fallback]) primary = { cat: fallback, sig: KIT_SIGNALS[fallback], matched: [], score: 0 }
|
|
8082
|
+
}
|
|
8083
|
+
if (!primary) primary = { cat: 'builder', sig: KIT_SIGNALS.builder, matched: [], score: 0 }
|
|
8084
|
+
|
|
8085
|
+
const rec = primary.sig
|
|
8086
|
+
overridesToApply = {
|
|
8087
|
+
subagents: Object.fromEntries(projAgents.map(a => [a.name, rec.agents.includes(a.name)])),
|
|
8088
|
+
skills: Object.fromEntries(projSkills.map(s => [s.name, rec.skills.includes(s.name)])),
|
|
8089
|
+
prompts: Object.fromEntries(projPrompts.map(p => [p.name, rec.prompts.includes(p.name)])),
|
|
8090
|
+
}
|
|
8091
|
+
}
|
|
8092
|
+
|
|
8093
|
+
// Build a human-readable diff
|
|
8094
|
+
const existing = task.agentKitOverrides || {}
|
|
8095
|
+
const changes = []
|
|
8096
|
+
for (const [sec, map] of Object.entries(overridesToApply)) {
|
|
8097
|
+
for (const [name, val] of Object.entries(map || {})) {
|
|
8098
|
+
const was = existing[sec]?.[name]
|
|
8099
|
+
if (was !== val) changes.push({ section: sec, name, from: was !== undefined ? was : '(project default)', to: val })
|
|
8100
|
+
}
|
|
8101
|
+
}
|
|
8102
|
+
|
|
8103
|
+
if (!confirmed) {
|
|
8104
|
+
return text({
|
|
8105
|
+
preview: {
|
|
8106
|
+
changes: changes.length > 0 ? changes : ['No changes — current overrides already match this selection.'],
|
|
8107
|
+
activeAfter: {
|
|
8108
|
+
subagents: projAgents.filter(a => overridesToApply.subagents?.[a.name] !== false && (overridesToApply.subagents?.[a.name] === true || a.enabled !== false)).map(a => a.name),
|
|
8109
|
+
skills: projSkills.filter(s => overridesToApply.skills?.[s.name] !== false && (overridesToApply.skills?.[s.name] === true || s.enabled !== false)).map(s => s.name),
|
|
8110
|
+
prompts: projPrompts.filter(p => overridesToApply.prompts?.[p.name] !== false && (overridesToApply.prompts?.[p.name] === true || p.enabled !== false)).map(p => p.name),
|
|
8111
|
+
},
|
|
8112
|
+
},
|
|
8113
|
+
requiresConfirmation: true,
|
|
8114
|
+
message: `Call apply_kit_to_task again with confirmed=true to apply ${changes.length} change(s).`,
|
|
8115
|
+
})
|
|
8116
|
+
}
|
|
8117
|
+
|
|
8118
|
+
// Apply
|
|
8119
|
+
const res = await api.patch(`/api/tasks/${taskId}`, { agentKitOverrides: overridesToApply })
|
|
8120
|
+
if (!res?.success) return errorText(res?.message || 'Failed to apply kit overrides')
|
|
8121
|
+
|
|
8122
|
+
const activeAgents = projAgents.filter(a => overridesToApply.subagents?.[a.name] !== false && (overridesToApply.subagents?.[a.name] === true || a.enabled !== false)).map(a => a.name)
|
|
8123
|
+
const activeSkills = projSkills.filter(s => overridesToApply.skills?.[s.name] !== false && (overridesToApply.skills?.[s.name] === true || s.enabled !== false)).map(s => s.name)
|
|
8124
|
+
const activePrompts = projPrompts.filter(p => overridesToApply.prompts?.[p.name] !== false && (overridesToApply.prompts?.[p.name] === true || p.enabled !== false)).map(p => p.name)
|
|
8125
|
+
|
|
8126
|
+
return text({
|
|
8127
|
+
applied: true,
|
|
8128
|
+
changes: changes.length,
|
|
8129
|
+
activeKit: { agents: activeAgents, skills: activeSkills, prompts: activePrompts },
|
|
8130
|
+
message: `✅ Kit applied to task ${task.key}. ${activeAgents.length} agent(s), ${activeSkills.length} skill(s), ${activePrompts.length} prompt(s) now active.`,
|
|
8131
|
+
nextStep: `Call generate_cursor_workspace(projectId="${projectId}", taskId="${taskId}", repoPath="...") to write the active kit items to .cursor/ files so Cursor injects them automatically.`,
|
|
8132
|
+
})
|
|
8133
|
+
}
|
|
8134
|
+
)
|
|
8135
|
+
}
|
|
8136
|
+
|
|
4982
8137
|
function registerAdminTools(server) {
|
|
4983
8138
|
server.tool(
|
|
4984
8139
|
'admin_list_users',
|
|
@@ -5032,6 +8187,13 @@ async function main() {
|
|
|
5032
8187
|
registerNotificationTools(server)
|
|
5033
8188
|
registerGithubTools(server, ctx)
|
|
5034
8189
|
registerGitWorkflowTools(server, ctx)
|
|
8190
|
+
registerMergePipelineTools(server, ctx)
|
|
8191
|
+
registerMemoryTools(server, ctx)
|
|
8192
|
+
registerCursorTools(server, ctx)
|
|
8193
|
+
registerOrchestrationTools(server, ctx)
|
|
8194
|
+
registerMonitoringTools(server, ctx)
|
|
8195
|
+
registerWorkspaceScanTool(server)
|
|
8196
|
+
registerKitTools(server)
|
|
5035
8197
|
|
|
5036
8198
|
if (isAdmin) {
|
|
5037
8199
|
registerAdminTools(server)
|