@danalexilewis/taskgraph 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +21 -0
- package/README.md +47 -0
- package/dist/cli/block.js +114 -0
- package/dist/cli/context.js +139 -0
- package/dist/cli/done.js +98 -0
- package/dist/cli/edge.js +99 -0
- package/dist/cli/export.js +97 -0
- package/dist/cli/import.js +123 -0
- package/dist/cli/index.js +78 -0
- package/dist/cli/init.js +106 -0
- package/dist/cli/next.js +97 -0
- package/dist/cli/note.js +72 -0
- package/dist/cli/plan.js +108 -0
- package/dist/cli/portfolio.js +159 -0
- package/dist/cli/setup.js +142 -0
- package/dist/cli/show.js +142 -0
- package/dist/cli/split.js +191 -0
- package/dist/cli/start.js +94 -0
- package/dist/cli/status.js +149 -0
- package/dist/cli/task.js +92 -0
- package/dist/cli/utils.js +74 -0
- package/dist/db/commit.js +18 -0
- package/dist/db/connection.js +22 -0
- package/dist/db/escape.js +6 -0
- package/dist/db/migrate.js +159 -0
- package/dist/db/query.js +102 -0
- package/dist/domain/errors.js +33 -0
- package/dist/domain/invariants.js +103 -0
- package/dist/domain/types.js +120 -0
- package/dist/export/dot.js +21 -0
- package/dist/export/graph-data.js +41 -0
- package/dist/export/markdown.js +108 -0
- package/dist/export/mermaid.js +27 -0
- package/dist/plan-import/importer.js +155 -0
- package/dist/plan-import/parser.js +213 -0
- package/dist/template/.cursor/memory.md +14 -0
- package/dist/template/.cursor/rules/memory.mdc +11 -0
- package/dist/template/.cursor/rules/plan-authoring.mdc +42 -0
- package/dist/template/.cursor/rules/session-start.mdc +18 -0
- package/dist/template/.cursor/rules/taskgraph-workflow.mdc +35 -0
- package/dist/template/AGENT.md +73 -0
- package/dist/template/docs/backend.md +33 -0
- package/dist/template/docs/frontend.md +31 -0
- package/dist/template/docs/infra.md +26 -0
- package/dist/template/docs/skills/README.md +14 -0
- package/dist/template/docs/skills/plan-authoring.md +38 -0
- package/dist/template/docs/skills/refactoring-safely.md +21 -0
- package/dist/template/docs/skills/taskgraph-lifecycle-execution.md +23 -0
- package/package.json +47 -0
|
@@ -0,0 +1,155 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
3
|
+
exports.upsertTasksAndEdges = upsertTasksAndEdges;
|
|
4
|
+
const uuid_1 = require("uuid");
|
|
5
|
+
const commit_1 = require("../db/commit");
|
|
6
|
+
const neverthrow_1 = require("neverthrow");
|
|
7
|
+
const query_1 = require("../db/query");
|
|
8
|
+
const escape_1 = require("../db/escape");
|
|
9
|
+
function upsertTasksAndEdges(planId, parsedTasks, repoPath, noCommit = false) {
|
|
10
|
+
const currentTimestamp = (0, query_1.now)();
|
|
11
|
+
const q = (0, query_1.query)(repoPath);
|
|
12
|
+
return q
|
|
13
|
+
.select("task", {
|
|
14
|
+
columns: ["task_id", "external_key"],
|
|
15
|
+
where: { plan_id: planId },
|
|
16
|
+
})
|
|
17
|
+
.andThen((existingTasksResult) => {
|
|
18
|
+
return neverthrow_1.ResultAsync.fromPromise((async () => {
|
|
19
|
+
const existingTasks = existingTasksResult;
|
|
20
|
+
const externalKeyToTaskId = new Map();
|
|
21
|
+
existingTasks.forEach((task) => {
|
|
22
|
+
if (task.external_key) {
|
|
23
|
+
externalKeyToTaskId.set(task.external_key, task.task_id);
|
|
24
|
+
}
|
|
25
|
+
});
|
|
26
|
+
let importedTasksCount = 0;
|
|
27
|
+
for (const parsedTask of parsedTasks) {
|
|
28
|
+
let taskId = externalKeyToTaskId.get(parsedTask.stableKey);
|
|
29
|
+
if (taskId) {
|
|
30
|
+
// Update existing task
|
|
31
|
+
const updateResult = await q.update("task", {
|
|
32
|
+
title: parsedTask.title,
|
|
33
|
+
feature_key: parsedTask.feature ?? null,
|
|
34
|
+
area: parsedTask.area ?? null,
|
|
35
|
+
change_type: parsedTask.changeType ?? null,
|
|
36
|
+
intent: parsedTask.intent ?? null,
|
|
37
|
+
suggested_changes: parsedTask.suggestedChanges ?? null,
|
|
38
|
+
acceptance: parsedTask.acceptance.length > 0
|
|
39
|
+
? (0, query_1.jsonObj)({ val: JSON.stringify(parsedTask.acceptance) })
|
|
40
|
+
: null,
|
|
41
|
+
...(parsedTask.status !== undefined && {
|
|
42
|
+
status: parsedTask.status,
|
|
43
|
+
}),
|
|
44
|
+
updated_at: currentTimestamp,
|
|
45
|
+
}, { task_id: taskId });
|
|
46
|
+
if (updateResult.isErr()) {
|
|
47
|
+
console.error("Error updating task:", updateResult.error);
|
|
48
|
+
throw updateResult.error;
|
|
49
|
+
}
|
|
50
|
+
}
|
|
51
|
+
else {
|
|
52
|
+
// Insert new task
|
|
53
|
+
taskId = (0, uuid_1.v4)();
|
|
54
|
+
importedTasksCount++;
|
|
55
|
+
const taskStatus = parsedTask.status ?? "todo";
|
|
56
|
+
const insertResult = await q.insert("task", {
|
|
57
|
+
task_id: taskId,
|
|
58
|
+
plan_id: planId,
|
|
59
|
+
external_key: parsedTask.stableKey,
|
|
60
|
+
title: parsedTask.title,
|
|
61
|
+
feature_key: parsedTask.feature ?? null,
|
|
62
|
+
area: parsedTask.area ?? null,
|
|
63
|
+
change_type: parsedTask.changeType ?? null,
|
|
64
|
+
intent: parsedTask.intent ?? null,
|
|
65
|
+
suggested_changes: parsedTask.suggestedChanges ?? null,
|
|
66
|
+
acceptance: parsedTask.acceptance.length > 0
|
|
67
|
+
? (0, query_1.jsonObj)({ val: JSON.stringify(parsedTask.acceptance) })
|
|
68
|
+
: null,
|
|
69
|
+
status: taskStatus,
|
|
70
|
+
created_at: currentTimestamp,
|
|
71
|
+
updated_at: currentTimestamp,
|
|
72
|
+
});
|
|
73
|
+
if (insertResult.isErr()) {
|
|
74
|
+
console.error("Error inserting new task:", insertResult.error);
|
|
75
|
+
throw insertResult.error;
|
|
76
|
+
}
|
|
77
|
+
const insertEventResult = await q.insert("event", {
|
|
78
|
+
event_id: (0, uuid_1.v4)(),
|
|
79
|
+
task_id: taskId,
|
|
80
|
+
kind: "created",
|
|
81
|
+
body: (0, query_1.jsonObj)({
|
|
82
|
+
title: parsedTask.title,
|
|
83
|
+
externalKey: parsedTask.stableKey,
|
|
84
|
+
}),
|
|
85
|
+
created_at: currentTimestamp,
|
|
86
|
+
});
|
|
87
|
+
if (insertEventResult.isErr()) {
|
|
88
|
+
console.error("Error inserting new task event:", insertEventResult.error);
|
|
89
|
+
throw insertEventResult.error;
|
|
90
|
+
}
|
|
91
|
+
}
|
|
92
|
+
// Register for edge resolution (blocker keys may reference tasks just inserted)
|
|
93
|
+
externalKeyToTaskId.set(parsedTask.stableKey, taskId);
|
|
94
|
+
// Sync task_domain and task_skill junction tables
|
|
95
|
+
const delDomainResult = await q.raw(`DELETE FROM \`task_domain\` WHERE task_id = '${(0, escape_1.sqlEscape)(taskId)}'`);
|
|
96
|
+
if (delDomainResult.isErr())
|
|
97
|
+
throw delDomainResult.error;
|
|
98
|
+
const delSkillResult = await q.raw(`DELETE FROM \`task_skill\` WHERE task_id = '${(0, escape_1.sqlEscape)(taskId)}'`);
|
|
99
|
+
if (delSkillResult.isErr())
|
|
100
|
+
throw delSkillResult.error;
|
|
101
|
+
for (const domain of parsedTask.domains ?? []) {
|
|
102
|
+
const ins = await q.insert("task_domain", {
|
|
103
|
+
task_id: taskId,
|
|
104
|
+
domain,
|
|
105
|
+
});
|
|
106
|
+
if (ins.isErr())
|
|
107
|
+
throw ins.error;
|
|
108
|
+
}
|
|
109
|
+
for (const skill of parsedTask.skills ?? []) {
|
|
110
|
+
const ins = await q.insert("task_skill", {
|
|
111
|
+
task_id: taskId,
|
|
112
|
+
skill,
|
|
113
|
+
});
|
|
114
|
+
if (ins.isErr())
|
|
115
|
+
throw ins.error;
|
|
116
|
+
}
|
|
117
|
+
// Handle edges
|
|
118
|
+
for (const blockerKey of parsedTask.blockedBy) {
|
|
119
|
+
const blockerTaskId = externalKeyToTaskId.get(blockerKey);
|
|
120
|
+
if (!blockerTaskId) {
|
|
121
|
+
console.warn(`Blocker task with stable key '${blockerKey}' not found. Skipping edge creation for task '${parsedTask.stableKey}'.`);
|
|
122
|
+
continue;
|
|
123
|
+
}
|
|
124
|
+
const edgeExistsResult = await q.count("edge", {
|
|
125
|
+
from_task_id: blockerTaskId,
|
|
126
|
+
to_task_id: taskId,
|
|
127
|
+
type: "blocks",
|
|
128
|
+
});
|
|
129
|
+
if (edgeExistsResult.isErr())
|
|
130
|
+
throw edgeExistsResult.error;
|
|
131
|
+
const edgeExists = edgeExistsResult.value;
|
|
132
|
+
if (edgeExists === 0) {
|
|
133
|
+
const insertEdgeResult = await q.insert("edge", {
|
|
134
|
+
from_task_id: blockerTaskId,
|
|
135
|
+
to_task_id: taskId,
|
|
136
|
+
type: "blocks",
|
|
137
|
+
reason: "Blocked by plan import",
|
|
138
|
+
});
|
|
139
|
+
if (insertEdgeResult.isErr()) {
|
|
140
|
+
console.error("Error inserting new edge:", insertEdgeResult.error);
|
|
141
|
+
throw insertEdgeResult.error;
|
|
142
|
+
}
|
|
143
|
+
}
|
|
144
|
+
}
|
|
145
|
+
}
|
|
146
|
+
const commitResult = await (0, commit_1.doltCommit)("plan-import: upsert tasks and edges", repoPath, noCommit);
|
|
147
|
+
if (commitResult.isErr())
|
|
148
|
+
throw commitResult.error;
|
|
149
|
+
return {
|
|
150
|
+
importedTasksCount,
|
|
151
|
+
createdPlansCount: 0, // This logic is in the cli/import.ts, not here.
|
|
152
|
+
};
|
|
153
|
+
})(), (e) => e);
|
|
154
|
+
});
|
|
155
|
+
}
|
|
@@ -0,0 +1,213 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
var __importDefault = (this && this.__importDefault) || function (mod) {
|
|
3
|
+
return (mod && mod.__esModule) ? mod : { "default": mod };
|
|
4
|
+
};
|
|
5
|
+
Object.defineProperty(exports, "__esModule", { value: true });
|
|
6
|
+
exports.parsePlanMarkdown = parsePlanMarkdown;
|
|
7
|
+
exports.parseCursorPlan = parseCursorPlan;
|
|
8
|
+
const fs_1 = require("fs");
|
|
9
|
+
const js_yaml_1 = __importDefault(require("js-yaml"));
|
|
10
|
+
const neverthrow_1 = require("neverthrow");
|
|
11
|
+
const errors_1 = require("../domain/errors");
|
|
12
|
+
const CHANGE_TYPES = [
|
|
13
|
+
"create",
|
|
14
|
+
"modify",
|
|
15
|
+
"refactor",
|
|
16
|
+
"fix",
|
|
17
|
+
"investigate",
|
|
18
|
+
"test",
|
|
19
|
+
"document",
|
|
20
|
+
];
|
|
21
|
+
function isChangeType(s) {
|
|
22
|
+
return (typeof s === "string" &&
|
|
23
|
+
CHANGE_TYPES.includes(s));
|
|
24
|
+
}
|
|
25
|
+
function parsePlanMarkdown(filePath) {
|
|
26
|
+
try {
|
|
27
|
+
const content = (0, fs_1.readFileSync)(filePath, "utf-8");
|
|
28
|
+
const lines = content.split("\n");
|
|
29
|
+
let planTitle = null;
|
|
30
|
+
let planIntent = null;
|
|
31
|
+
const tasks = [];
|
|
32
|
+
let currentTask = null;
|
|
33
|
+
let inAcceptanceBlock = false;
|
|
34
|
+
for (const line of lines) {
|
|
35
|
+
const trimmedLine = line.trim();
|
|
36
|
+
if (line.startsWith("# ")) {
|
|
37
|
+
planTitle = line.substring(2).trim();
|
|
38
|
+
}
|
|
39
|
+
else if (line.startsWith("INTENT:")) {
|
|
40
|
+
planIntent = line.substring("INTENT:".length).trim();
|
|
41
|
+
}
|
|
42
|
+
else if (trimmedLine.startsWith("TASK:")) {
|
|
43
|
+
if (currentTask && currentTask.stableKey) {
|
|
44
|
+
tasks.push(currentTask);
|
|
45
|
+
}
|
|
46
|
+
currentTask = {
|
|
47
|
+
stableKey: trimmedLine.substring("TASK:".length).trim(),
|
|
48
|
+
blockedBy: [],
|
|
49
|
+
acceptance: [],
|
|
50
|
+
};
|
|
51
|
+
inAcceptanceBlock = false;
|
|
52
|
+
}
|
|
53
|
+
else if (currentTask && trimmedLine.startsWith("TITLE:")) {
|
|
54
|
+
currentTask.title = trimmedLine.substring("TITLE:".length).trim();
|
|
55
|
+
inAcceptanceBlock = false;
|
|
56
|
+
}
|
|
57
|
+
else if (currentTask && trimmedLine.startsWith("FEATURE:")) {
|
|
58
|
+
currentTask.feature = trimmedLine.substring("FEATURE:".length).trim();
|
|
59
|
+
inAcceptanceBlock = false;
|
|
60
|
+
}
|
|
61
|
+
else if (currentTask && trimmedLine.startsWith("AREA:")) {
|
|
62
|
+
currentTask.area = trimmedLine.substring("AREA:".length).trim();
|
|
63
|
+
inAcceptanceBlock = false;
|
|
64
|
+
}
|
|
65
|
+
else if (currentTask && trimmedLine.startsWith("DOMAIN:")) {
|
|
66
|
+
const parts = trimmedLine
|
|
67
|
+
.substring("DOMAIN:".length)
|
|
68
|
+
.split(",")
|
|
69
|
+
.map((s) => s.trim())
|
|
70
|
+
.filter(Boolean);
|
|
71
|
+
currentTask.domains = [...(currentTask.domains || []), ...parts];
|
|
72
|
+
inAcceptanceBlock = false;
|
|
73
|
+
}
|
|
74
|
+
else if (currentTask && trimmedLine.startsWith("SKILL:")) {
|
|
75
|
+
const parts = trimmedLine
|
|
76
|
+
.substring("SKILL:".length)
|
|
77
|
+
.split(",")
|
|
78
|
+
.map((s) => s.trim())
|
|
79
|
+
.filter(Boolean);
|
|
80
|
+
currentTask.skills = [...(currentTask.skills || []), ...parts];
|
|
81
|
+
inAcceptanceBlock = false;
|
|
82
|
+
}
|
|
83
|
+
else if (currentTask && trimmedLine.startsWith("CHANGE_TYPE:")) {
|
|
84
|
+
const val = trimmedLine.substring("CHANGE_TYPE:".length).trim();
|
|
85
|
+
if (isChangeType(val))
|
|
86
|
+
currentTask.changeType = val;
|
|
87
|
+
inAcceptanceBlock = false;
|
|
88
|
+
}
|
|
89
|
+
else if (currentTask && trimmedLine.startsWith("BLOCKED_BY:")) {
|
|
90
|
+
const blockers = trimmedLine
|
|
91
|
+
.substring("BLOCKED_BY:".length)
|
|
92
|
+
.split(",")
|
|
93
|
+
.map((s) => s.trim())
|
|
94
|
+
.filter(Boolean);
|
|
95
|
+
currentTask.blockedBy = [...(currentTask.blockedBy || []), ...blockers];
|
|
96
|
+
inAcceptanceBlock = false;
|
|
97
|
+
}
|
|
98
|
+
else if (currentTask && trimmedLine.startsWith("ACCEPTANCE:")) {
|
|
99
|
+
inAcceptanceBlock = true;
|
|
100
|
+
}
|
|
101
|
+
else if (currentTask &&
|
|
102
|
+
inAcceptanceBlock &&
|
|
103
|
+
trimmedLine.startsWith("-")) {
|
|
104
|
+
currentTask.acceptance = [
|
|
105
|
+
...(currentTask.acceptance || []),
|
|
106
|
+
trimmedLine.substring(1).trim(),
|
|
107
|
+
];
|
|
108
|
+
}
|
|
109
|
+
else {
|
|
110
|
+
inAcceptanceBlock = false;
|
|
111
|
+
}
|
|
112
|
+
}
|
|
113
|
+
if (currentTask && currentTask.stableKey) {
|
|
114
|
+
tasks.push(currentTask);
|
|
115
|
+
}
|
|
116
|
+
return (0, neverthrow_1.ok)({ planTitle, planIntent, tasks });
|
|
117
|
+
}
|
|
118
|
+
catch (e) {
|
|
119
|
+
return (0, neverthrow_1.err)((0, errors_1.buildError)(errors_1.ErrorCode.FILE_READ_FAILED, `Failed to read or parse markdown file at ${filePath}`, e));
|
|
120
|
+
}
|
|
121
|
+
}
|
|
122
|
+
/** Normalize risks from frontmatter to { description, severity, mitigation }[]. */
|
|
123
|
+
function normalizeRisks(raw) {
|
|
124
|
+
if (!raw || !Array.isArray(raw))
|
|
125
|
+
return null;
|
|
126
|
+
return raw
|
|
127
|
+
.filter((r) => r != null &&
|
|
128
|
+
typeof r === "object" &&
|
|
129
|
+
typeof r.description === "string" &&
|
|
130
|
+
typeof r.severity === "string" &&
|
|
131
|
+
typeof r.mitigation === "string")
|
|
132
|
+
.map((r) => ({
|
|
133
|
+
description: r.description,
|
|
134
|
+
severity: r.severity,
|
|
135
|
+
mitigation: r.mitigation,
|
|
136
|
+
}));
|
|
137
|
+
}
|
|
138
|
+
/** Parses a Cursor Plan file (YAML frontmatter with todos). */
|
|
139
|
+
function parseCursorPlan(filePath) {
|
|
140
|
+
try {
|
|
141
|
+
const content = (0, fs_1.readFileSync)(filePath, "utf-8");
|
|
142
|
+
const frontmatterMatch = content.match(/^---\s*\n([\s\S]*?)\n---/);
|
|
143
|
+
if (!frontmatterMatch) {
|
|
144
|
+
return (0, neverthrow_1.err)((0, errors_1.buildError)(errors_1.ErrorCode.FILE_READ_FAILED, `File ${filePath} does not have YAML frontmatter (--- ... ---)`));
|
|
145
|
+
}
|
|
146
|
+
const body = content.slice(frontmatterMatch[0].length).trim() || null;
|
|
147
|
+
const parsed = js_yaml_1.default.load(frontmatterMatch[1]);
|
|
148
|
+
if (!parsed || typeof parsed !== "object") {
|
|
149
|
+
return (0, neverthrow_1.err)((0, errors_1.buildError)(errors_1.ErrorCode.FILE_READ_FAILED, `Invalid YAML frontmatter in ${filePath}`));
|
|
150
|
+
}
|
|
151
|
+
const todos = parsed.todos ?? [];
|
|
152
|
+
if (!Array.isArray(todos)) {
|
|
153
|
+
return (0, neverthrow_1.err)((0, errors_1.buildError)(errors_1.ErrorCode.FILE_READ_FAILED, `Expected 'todos' to be an array in ${filePath}`));
|
|
154
|
+
}
|
|
155
|
+
const tasks = todos
|
|
156
|
+
.filter((t) => t != null &&
|
|
157
|
+
typeof t === "object" &&
|
|
158
|
+
typeof t.id === "string" &&
|
|
159
|
+
typeof t.content === "string")
|
|
160
|
+
.map((t) => {
|
|
161
|
+
const status = t.status === "completed" ? "done" : "todo";
|
|
162
|
+
const changeType = t.changeType != null && isChangeType(t.changeType)
|
|
163
|
+
? t.changeType
|
|
164
|
+
: undefined;
|
|
165
|
+
const domains = t.domain === undefined
|
|
166
|
+
? undefined
|
|
167
|
+
: Array.isArray(t.domain)
|
|
168
|
+
? t.domain.filter((x) => typeof x === "string")
|
|
169
|
+
: typeof t.domain === "string"
|
|
170
|
+
? [t.domain]
|
|
171
|
+
: undefined;
|
|
172
|
+
const skills = t.skill === undefined
|
|
173
|
+
? undefined
|
|
174
|
+
: Array.isArray(t.skill)
|
|
175
|
+
? t.skill.filter((x) => typeof x === "string")
|
|
176
|
+
: typeof t.skill === "string"
|
|
177
|
+
? [t.skill]
|
|
178
|
+
: undefined;
|
|
179
|
+
return {
|
|
180
|
+
stableKey: t.id,
|
|
181
|
+
title: t.content,
|
|
182
|
+
blockedBy: Array.isArray(t.blockedBy) ? t.blockedBy : [],
|
|
183
|
+
acceptance: [],
|
|
184
|
+
status,
|
|
185
|
+
domains: domains?.length ? domains : undefined,
|
|
186
|
+
skills: skills?.length ? skills : undefined,
|
|
187
|
+
changeType,
|
|
188
|
+
intent: typeof t.intent === "string" ? t.intent : undefined,
|
|
189
|
+
suggestedChanges: typeof t.suggestedChanges === "string"
|
|
190
|
+
? t.suggestedChanges
|
|
191
|
+
: undefined,
|
|
192
|
+
};
|
|
193
|
+
});
|
|
194
|
+
const fileTree = typeof parsed.fileTree === "string" ? parsed.fileTree : null;
|
|
195
|
+
const risks = normalizeRisks(parsed.risks);
|
|
196
|
+
const tests = Array.isArray(parsed.tests) &&
|
|
197
|
+
parsed.tests.every((x) => typeof x === "string")
|
|
198
|
+
? parsed.tests
|
|
199
|
+
: null;
|
|
200
|
+
return (0, neverthrow_1.ok)({
|
|
201
|
+
planTitle: parsed.name ?? null,
|
|
202
|
+
planIntent: parsed.overview ?? null,
|
|
203
|
+
tasks,
|
|
204
|
+
fileTree: fileTree ?? undefined,
|
|
205
|
+
risks: risks ?? undefined,
|
|
206
|
+
tests: tests ?? undefined,
|
|
207
|
+
body: body ?? undefined,
|
|
208
|
+
});
|
|
209
|
+
}
|
|
210
|
+
catch (e) {
|
|
211
|
+
return (0, neverthrow_1.err)((0, errors_1.buildError)(errors_1.ErrorCode.FILE_READ_FAILED, `Failed to read or parse Cursor plan at ${filePath}`, e));
|
|
212
|
+
}
|
|
213
|
+
}
|
|
@@ -0,0 +1,14 @@
|
|
|
1
|
+
# Persistent Memory
|
|
2
|
+
|
|
3
|
+
## Preferences
|
|
4
|
+
|
|
5
|
+
- (Add repo-specific conventions here: tooling, naming, testing expectations.)
|
|
6
|
+
|
|
7
|
+
## Patterns
|
|
8
|
+
|
|
9
|
+
- (Add implementation patterns that worked well in this repo.)
|
|
10
|
+
|
|
11
|
+
## Corrections
|
|
12
|
+
|
|
13
|
+
- (Add agent mistakes / environment quirks so they aren’t repeated.)
|
|
14
|
+
|
|
@@ -0,0 +1,11 @@
|
|
|
1
|
+
---
|
|
2
|
+
description: Maintain repo-specific persistent memory at .cursor/memory.md
|
|
3
|
+
alwaysApply: true
|
|
4
|
+
---
|
|
5
|
+
|
|
6
|
+
# Persistent Memory
|
|
7
|
+
|
|
8
|
+
Every request should start by reading `.cursor/memory.md` to learn repo-specific conventions and prior decisions.
|
|
9
|
+
|
|
10
|
+
When you discover a tooling quirk, fix a recurring mistake, or establish a useful pattern, update `.cursor/memory.md` at the end of the session.
|
|
11
|
+
|
|
@@ -0,0 +1,42 @@
|
|
|
1
|
+
---
|
|
2
|
+
description: Cursor plan format for tg import (stable ids, dependencies, domain/skill)
|
|
3
|
+
globs: plans/**/*.md
|
|
4
|
+
alwaysApply: false
|
|
5
|
+
---
|
|
6
|
+
|
|
7
|
+
# Plan Authoring (Cursor Format)
|
|
8
|
+
|
|
9
|
+
Plans in `plans/` can be imported into TaskGraph with:
|
|
10
|
+
|
|
11
|
+
```bash
|
|
12
|
+
tg import plans/<file> --plan "<Plan Name>" --format cursor
|
|
13
|
+
```
|
|
14
|
+
|
|
15
|
+
## Minimal YAML frontmatter
|
|
16
|
+
|
|
17
|
+
```yaml
|
|
18
|
+
---
|
|
19
|
+
name: Plan Name
|
|
20
|
+
overview: "Brief description of the plan."
|
|
21
|
+
todos:
|
|
22
|
+
- id: stable-key
|
|
23
|
+
content: "Small task"
|
|
24
|
+
status: pending
|
|
25
|
+
- id: another-task
|
|
26
|
+
content: "Depends on first"
|
|
27
|
+
blockedBy: [stable-key]
|
|
28
|
+
---
|
|
29
|
+
```
|
|
30
|
+
|
|
31
|
+
## Useful fields
|
|
32
|
+
|
|
33
|
+
- `domain`: string or array → `docs/<domain>.md`
|
|
34
|
+
- `skill`: string or array → `docs/skills/<skill>.md`
|
|
35
|
+
- `changeType`: `create` | `modify` | `refactor` | `fix` | `investigate` | `test` | `document`
|
|
36
|
+
|
|
37
|
+
## Best practices
|
|
38
|
+
|
|
39
|
+
- Keep `id` stable over time (kebab-case).
|
|
40
|
+
- Keep tasks scoped (roughly ≤ 90 minutes); split big ones.
|
|
41
|
+
- Use `blockedBy` for true dependencies, not mere sequencing preferences.
|
|
42
|
+
|
|
@@ -0,0 +1,18 @@
|
|
|
1
|
+
---
|
|
2
|
+
description: Run tg status at session start for orientation
|
|
3
|
+
alwaysApply: true
|
|
4
|
+
---
|
|
5
|
+
|
|
6
|
+
# Session Start
|
|
7
|
+
|
|
8
|
+
At the start of every session, run:
|
|
9
|
+
|
|
10
|
+
```bash
|
|
11
|
+
tg status
|
|
12
|
+
```
|
|
13
|
+
|
|
14
|
+
This surfaces:
|
|
15
|
+
- Plans in progress
|
|
16
|
+
- Tasks stuck in `doing` (may need cleanup)
|
|
17
|
+
- Next runnable tasks
|
|
18
|
+
|
|
@@ -0,0 +1,35 @@
|
|
|
1
|
+
---
|
|
2
|
+
description: TaskGraph plan creation/review + execution loop (per task)
|
|
3
|
+
alwaysApply: true
|
|
4
|
+
---
|
|
5
|
+
|
|
6
|
+
# TaskGraph Workflow
|
|
7
|
+
|
|
8
|
+
## Orientation
|
|
9
|
+
|
|
10
|
+
Before executing work, run `tg status` to see what's outstanding. If there are stale `doing` tasks or forgotten `todo` tasks for completed work, clean them up first.
|
|
11
|
+
|
|
12
|
+
## Plan creation and review
|
|
13
|
+
|
|
14
|
+
- Plans live in `plans/<name>.md` (Cursor YAML frontmatter format).
|
|
15
|
+
- After creating a plan: summarize it, then pause for human review. Don’t import or execute until the user says to proceed.
|
|
16
|
+
|
|
17
|
+
## Execution loop (per task — mandatory)
|
|
18
|
+
|
|
19
|
+
For EACH task, follow all steps in order.
|
|
20
|
+
|
|
21
|
+
1. `tg start <taskId>` — must run before doing any work
|
|
22
|
+
2. `tg context <taskId>` — read the printed domain doc paths (`docs/<domain>.md`) and skill guide paths (`docs/skills/<skill>.md`)
|
|
23
|
+
3. Do the work (stay within scope)
|
|
24
|
+
4. `tg done <taskId> --evidence "tests run, commands, commit hashes"` — must run immediately after finishing
|
|
25
|
+
|
|
26
|
+
## Recovery (out-of-sync tasks)
|
|
27
|
+
|
|
28
|
+
- Task is `todo` but already done: `tg done <taskId> --force --evidence "completed previously"`
|
|
29
|
+
- Task is `doing` but already done: `tg done <taskId> --evidence "completed previously"`
|
|
30
|
+
|
|
31
|
+
## When blocked
|
|
32
|
+
|
|
33
|
+
- `tg block <taskId> --on <blockerId> --reason "..."`
|
|
34
|
+
- If blocker does not exist: create a new task (owner=human), then block on it.
|
|
35
|
+
|
|
@@ -0,0 +1,73 @@
|
|
|
1
|
+
Plan creation and review
|
|
2
|
+
|
|
3
|
+
- When the user asks for a plan: create `plans/<name>.md` in Cursor format (YAML frontmatter with `name`, `overview`, `todos`). Summarize the plan, then pause and ask for review.
|
|
4
|
+
- Interpret user response using this table:
|
|
5
|
+
|
|
6
|
+
| User says | Meaning | Agent action |
|
|
7
|
+
|-----------|---------|--------------|
|
|
8
|
+
| proceed, go ahead, execute, run it, let's do it | Approve and execute | Run `tg import plans/<file> --plan "<Plan Name>" --format cursor`, then enter the execution loop below |
|
|
9
|
+
| just add the tasks, add to taskgraph only, import but don't run | Add to graph only | Run `tg import plans/<file> --plan "<Plan Name>" --format cursor`. Do not execute tasks |
|
|
10
|
+
| thanks, that's good, looks good, ok, don't do anything | Acknowledgement only | Do nothing. No import, no execution |
|
|
11
|
+
|
|
12
|
+
Agent operating loop
|
|
13
|
+
|
|
14
|
+
- Always begin with: tg status to orient — surface stale tasks, plan state, and other agents' active work (if any).
|
|
15
|
+
- Then: tg next --limit 5 and choose the top runnable task.
|
|
16
|
+
- Before coding: tg show <taskId> and restate:
|
|
17
|
+
- intent
|
|
18
|
+
- scope in/out
|
|
19
|
+
- acceptance checks
|
|
20
|
+
- Then: tg start <taskId> [--agent <name>] — MUST run before any work. When multiple agents may be active, pass --agent so others see who is working.
|
|
21
|
+
- Execute exactly within scope.
|
|
22
|
+
- When done: tg done <taskId> --evidence "..." — MUST run immediately after work is complete.
|
|
23
|
+
Include in evidence:
|
|
24
|
+
- tests run
|
|
25
|
+
- commands output summary
|
|
26
|
+
- git commit hash(es)
|
|
27
|
+
|
|
28
|
+
Per-task discipline
|
|
29
|
+
|
|
30
|
+
- Complete start→work→done for EACH task individually.
|
|
31
|
+
- Never batch-skip transitions (e.g., doing all work then marking all done).
|
|
32
|
+
|
|
33
|
+
Recovery (out-of-sync tasks)
|
|
34
|
+
|
|
35
|
+
- Task is `todo` but work is already done: `tg done <taskId> --force --evidence "completed previously"`
|
|
36
|
+
- Task is `doing` but work is already done: `tg done <taskId> --evidence "completed previously"`
|
|
37
|
+
- Run `tg status` after cleanup to verify.
|
|
38
|
+
- Use `--force` only for legitimate out-of-band completion, never to bypass workflow.
|
|
39
|
+
|
|
40
|
+
Plan completion
|
|
41
|
+
|
|
42
|
+
After marking the last task in a plan as done, run:
|
|
43
|
+
tg export markdown --plan <planId> --out plans/<file>
|
|
44
|
+
This updates the plan file with final statuses.
|
|
45
|
+
|
|
46
|
+
When blocked
|
|
47
|
+
|
|
48
|
+
- If blocked by missing prerequisite, run:
|
|
49
|
+
- tg block <taskId> --on <blockerTaskId> --reason "..."
|
|
50
|
+
- If blocker does not exist:
|
|
51
|
+
- create a new task with owner=human and status todo, then block on it.
|
|
52
|
+
|
|
53
|
+
Decisions
|
|
54
|
+
|
|
55
|
+
- If a decision is required to proceed:
|
|
56
|
+
- create a task: "Decide: …" with owner=human
|
|
57
|
+
- add a decision_needed event with options + recommendation
|
|
58
|
+
- stop and ask for approval
|
|
59
|
+
|
|
60
|
+
Safe graph edits the agent may do without asking
|
|
61
|
+
|
|
62
|
+
- status transitions (todo→doing→done, blocked when real blocker exists)
|
|
63
|
+
- add a dependency when it's objectively required ("API endpoint must exist before UI integration")
|
|
64
|
+
- split a task when it exceeds ~90 minutes, keeping scope and acceptance intact
|
|
65
|
+
|
|
66
|
+
Everything else is proposal-only.
|
|
67
|
+
|
|
68
|
+
Multi-agent awareness (when 2–3 agents work alongside the human)
|
|
69
|
+
|
|
70
|
+
- Always pass --agent <session-name> on tg start so other agents see who claimed each task.
|
|
71
|
+
- Read "Active work" from tg status before picking a task; avoid overlapping on the same files/area.
|
|
72
|
+
- Use tg note <taskId> --msg "..." to leave breadcrumbs when changing shared interfaces (types, schema, parser).
|
|
73
|
+
- Do not pick a task in the same area as another agent's doing task without human approval.
|
|
@@ -0,0 +1,33 @@
|
|
|
1
|
+
# Backend
|
|
2
|
+
|
|
3
|
+
This doc is a **domain guide**. Tasks with `domain: backend` should point here (`docs/backend.md`).
|
|
4
|
+
|
|
5
|
+
## Purpose
|
|
6
|
+
|
|
7
|
+
- What this domain owns
|
|
8
|
+
- What it explicitly does *not* own
|
|
9
|
+
|
|
10
|
+
## Key entrypoints
|
|
11
|
+
|
|
12
|
+
- `src/...`
|
|
13
|
+
- `api/...`
|
|
14
|
+
|
|
15
|
+
## Data + invariants
|
|
16
|
+
|
|
17
|
+
- Storage model, important tables/collections
|
|
18
|
+
- Cross-service contracts
|
|
19
|
+
|
|
20
|
+
## Local dev
|
|
21
|
+
|
|
22
|
+
- How to run the backend locally
|
|
23
|
+
- Required env vars / secrets handling
|
|
24
|
+
|
|
25
|
+
## Testing
|
|
26
|
+
|
|
27
|
+
- Unit/integration/e2e strategy
|
|
28
|
+
- Where tests live and how to run them
|
|
29
|
+
|
|
30
|
+
## Decisions / gotchas
|
|
31
|
+
|
|
32
|
+
- Important constraints and historical context
|
|
33
|
+
|
|
@@ -0,0 +1,31 @@
|
|
|
1
|
+
# Frontend
|
|
2
|
+
|
|
3
|
+
This doc is a **domain guide**. Tasks with `domain: frontend` should point here (`docs/frontend.md`).
|
|
4
|
+
|
|
5
|
+
## Purpose
|
|
6
|
+
|
|
7
|
+
- What user journeys and surfaces this domain owns
|
|
8
|
+
|
|
9
|
+
## Key entrypoints
|
|
10
|
+
|
|
11
|
+
- `src/app/...`
|
|
12
|
+
- `src/components/...`
|
|
13
|
+
|
|
14
|
+
## State + data fetching
|
|
15
|
+
|
|
16
|
+
- Where state lives (local, global, server state)
|
|
17
|
+
- Caching/invalidation patterns
|
|
18
|
+
|
|
19
|
+
## Styling + UI conventions
|
|
20
|
+
|
|
21
|
+
- Design system / component library conventions
|
|
22
|
+
- Accessibility requirements
|
|
23
|
+
|
|
24
|
+
## Testing
|
|
25
|
+
|
|
26
|
+
- Component tests and e2e expectations
|
|
27
|
+
|
|
28
|
+
## Decisions / gotchas
|
|
29
|
+
|
|
30
|
+
- Performance footguns, routing rules, etc.
|
|
31
|
+
|
|
@@ -0,0 +1,26 @@
|
|
|
1
|
+
# Infra
|
|
2
|
+
|
|
3
|
+
This doc is a **domain guide**. Tasks with `domain: infra` should point here (`docs/infra.md`).
|
|
4
|
+
|
|
5
|
+
## Purpose
|
|
6
|
+
|
|
7
|
+
- Deployments, CI/CD, cloud resources, networking, secrets, observability
|
|
8
|
+
|
|
9
|
+
## Environments
|
|
10
|
+
|
|
11
|
+
- dev/staging/prod differences
|
|
12
|
+
- Release process
|
|
13
|
+
|
|
14
|
+
## CI/CD
|
|
15
|
+
|
|
16
|
+
- How builds/tests run
|
|
17
|
+
- Required checks
|
|
18
|
+
|
|
19
|
+
## Runbooks
|
|
20
|
+
|
|
21
|
+
- Common incidents and how to respond
|
|
22
|
+
|
|
23
|
+
## Decisions / gotchas
|
|
24
|
+
|
|
25
|
+
- Cost constraints, security constraints, and hard-won lessons
|
|
26
|
+
|
|
@@ -0,0 +1,14 @@
|
|
|
1
|
+
# Skill guides
|
|
2
|
+
|
|
3
|
+
Tasks can set a `skill` that maps to a guide here. The agent reads `docs/skills/<skill>.md` before starting work.
|
|
4
|
+
|
|
5
|
+
## Example skills
|
|
6
|
+
|
|
7
|
+
| Skill | Purpose |
|
|
8
|
+
|-------|---------|
|
|
9
|
+
| [taskgraph-lifecycle-execution](taskgraph-lifecycle-execution.md) | Execute tasks with correct `start → context → done` transitions |
|
|
10
|
+
| [plan-authoring](plan-authoring.md) | Write Cursor-format plans that import cleanly into TaskGraph |
|
|
11
|
+
| [refactoring-safely](refactoring-safely.md) | Behavior-preserving changes: small steps, test before/after |
|
|
12
|
+
|
|
13
|
+
Use the slug (e.g. `taskgraph-lifecycle-execution`) as the task's `skill` in plan YAML.
|
|
14
|
+
|