@punks/cli 1.0.4 → 1.0.6
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +1 -1
- package/dist/data/hooks/format-edited-file.mjs +52 -0
- package/dist/data/hooks/require-tests-for-pr.mjs +64 -2
- package/dist/data/hooks.test.ts +87 -0
- package/dist/index.js +156 -95
- package/dist/skills/agnostic/docs/docs-maintenance/SKILL.md +1 -1
- package/dist/skills/agnostic/planning/create-plan/REFERENCE.md +1 -1
- package/dist/skills/agnostic/planning/create-plan/SKILL.md +6 -5
- package/dist/skills/agnostic/planning/create-plan/references/grill-phase.md +2 -1
- package/dist/skills/agnostic/planning/create-plan/references/plan-schema.md +4 -0
- package/dist/skills/agnostic/planning/create-plan/references/planner-phase.md +5 -2
- package/dist/skills/agnostic/planning/create-spec/SKILL.md +17 -11
- package/dist/skills/agnostic/planning/create-spec/assets/SPEC-TEMPLATE.md +1 -1
- package/dist/skills/agnostic/planning/create-spec/references/backlog-sync.md +46 -0
- package/dist/skills/agnostic/planning/create-spec/references/grill-phase.md +47 -0
- package/dist/skills/agnostic/planning/create-spec/references/questioning.md +12 -2
- package/dist/skills/agnostic/planning/implement-spec/SKILL.md +7 -5
- package/dist/skills/agnostic/planning/implement-spec/references/parallel-orchestration.md +1 -0
- package/dist/skills/agnostic/planning/implement-spec/references/parallel-reasoning.md +19 -0
- package/dist/skills/agnostic/planning/implement-spec/references/parallel-worker-brief.md +3 -0
- package/dist/skills/agnostic/planning/implement-spec/references/parallel.md +7 -5
- package/docs/README.md +2 -3
- package/docs/runbooks/dp-cli-scaffolding.md +4 -2
- package/package.json +1 -1
package/README.md
CHANGED
|
@@ -67,7 +67,7 @@ Current scaffold-managed global tools:
|
|
|
67
67
|
- `portless`
|
|
68
68
|
- `skills`
|
|
69
69
|
|
|
70
|
-
On startup, `punks`
|
|
70
|
+
On startup, `punks` may start a detached best-effort worker for a newer CLI version and the presence of the `dp-cli` operator skill. The requested command renders immediately; startup checks are advisory, run at most once per 12 hours by default, and never install/update packages or skills while another CLI command is starting. Set `DP_NO_UPDATE_CHECK=1` or `DP_NO_SKILL_UPDATE_CHECK=1` to skip those checks, or `DP_STARTUP_CHECK_INTERVAL_MS=0` to force the worker during local testing.
|
|
71
71
|
|
|
72
72
|
## Publishing
|
|
73
73
|
|
|
@@ -11,6 +11,7 @@ const formattableSuffixes = new Set([".ts", ".tsx", ".js", ".jsx", ".mjs", ".cjs
|
|
|
11
11
|
const lintableSuffixes = new Set([".ts", ".tsx", ".js", ".jsx", ".mjs", ".cjs"]);
|
|
12
12
|
const ignoredParts = new Set([".git", "node_modules", ".next", "dist"]);
|
|
13
13
|
const productRoots = new Set(["apps", "packages"]);
|
|
14
|
+
const packageManagers = new Set(["bun", "pnpm", "npm", "yarn"]);
|
|
14
15
|
|
|
15
16
|
function readStdinJson() {
|
|
16
17
|
try {
|
|
@@ -93,7 +94,54 @@ function commandExists(command) {
|
|
|
93
94
|
);
|
|
94
95
|
}
|
|
95
96
|
|
|
97
|
+
function packageManagerFromPackageJson(root) {
|
|
98
|
+
const packageManager = loadJson(path.join(root, "package.json"))?.packageManager;
|
|
99
|
+
|
|
100
|
+
if (typeof packageManager !== "string") {
|
|
101
|
+
return null;
|
|
102
|
+
}
|
|
103
|
+
|
|
104
|
+
const name = packageManager.split("@")[0];
|
|
105
|
+
return packageManagers.has(name) ? name : null;
|
|
106
|
+
}
|
|
107
|
+
|
|
108
|
+
function packageManagerFromLockfile(root) {
|
|
109
|
+
if (existsSync(path.join(root, "bun.lock")) || existsSync(path.join(root, "bun.lockb"))) {
|
|
110
|
+
return "bun";
|
|
111
|
+
}
|
|
112
|
+
|
|
113
|
+
if (
|
|
114
|
+
existsSync(path.join(root, "pnpm-lock.yaml")) ||
|
|
115
|
+
existsSync(path.join(root, "pnpm-workspace.yaml"))
|
|
116
|
+
) {
|
|
117
|
+
return "pnpm";
|
|
118
|
+
}
|
|
119
|
+
|
|
120
|
+
if (existsSync(path.join(root, "yarn.lock"))) {
|
|
121
|
+
return "yarn";
|
|
122
|
+
}
|
|
123
|
+
|
|
124
|
+
if (existsSync(path.join(root, "package-lock.json"))) {
|
|
125
|
+
return "npm";
|
|
126
|
+
}
|
|
127
|
+
|
|
128
|
+
return null;
|
|
129
|
+
}
|
|
130
|
+
|
|
96
131
|
function toolCommand(root, tool, args) {
|
|
132
|
+
switch (packageManagerFromPackageJson(root) ?? packageManagerFromLockfile(root)) {
|
|
133
|
+
case "bun":
|
|
134
|
+
return ["bunx", tool, ...args];
|
|
135
|
+
case "pnpm":
|
|
136
|
+
return ["pnpm", "exec", tool, ...args];
|
|
137
|
+
case "yarn":
|
|
138
|
+
return ["yarn", "exec", tool, ...args];
|
|
139
|
+
case "npm":
|
|
140
|
+
return ["npm", "exec", "--", tool, ...args];
|
|
141
|
+
default:
|
|
142
|
+
break;
|
|
143
|
+
}
|
|
144
|
+
|
|
97
145
|
if (commandExists("bunx")) {
|
|
98
146
|
return ["bunx", tool, ...args];
|
|
99
147
|
}
|
|
@@ -524,6 +572,10 @@ export const FormatAndLintPlugin = async ({ client, worktree }) => {
|
|
|
524
572
|
};
|
|
525
573
|
};
|
|
526
574
|
|
|
575
|
+
export const testHooks = {
|
|
576
|
+
toolCommand,
|
|
577
|
+
};
|
|
578
|
+
|
|
527
579
|
function main() {
|
|
528
580
|
const mode = process.argv[2] ?? "";
|
|
529
581
|
|
|
@@ -1,13 +1,14 @@
|
|
|
1
1
|
#!/usr/bin/env node
|
|
2
2
|
|
|
3
3
|
import { spawnSync } from "node:child_process";
|
|
4
|
-
import { readFileSync } from "node:fs";
|
|
4
|
+
import { existsSync, readFileSync } from "node:fs";
|
|
5
5
|
import path from "node:path";
|
|
6
6
|
import { fileURLToPath } from "node:url";
|
|
7
7
|
|
|
8
8
|
const denyMessage = "Tests are failing. Fix all test failures before creating a PR.";
|
|
9
9
|
const prCommandPattern = /\bgh\s+pr\s+create\b/i;
|
|
10
10
|
const prToolPattern = /(^|[._-])create[_-]?pull[_-]?request([._-]|$)/i;
|
|
11
|
+
const packageManagers = new Set(["bun", "pnpm", "npm", "yarn"]);
|
|
11
12
|
|
|
12
13
|
function readStdinJson() {
|
|
13
14
|
try {
|
|
@@ -63,8 +64,65 @@ function matchesPrAction(...values) {
|
|
|
63
64
|
return strings.some((value) => prCommandPattern.test(value) || prToolPattern.test(value));
|
|
64
65
|
}
|
|
65
66
|
|
|
67
|
+
function readPackageJson(root) {
|
|
68
|
+
try {
|
|
69
|
+
return JSON.parse(readFileSync(path.join(root, "package.json"), "utf8"));
|
|
70
|
+
} catch {
|
|
71
|
+
return {};
|
|
72
|
+
}
|
|
73
|
+
}
|
|
74
|
+
|
|
75
|
+
function packageManagerFromPackageJson(root) {
|
|
76
|
+
const packageManager = readPackageJson(root)?.packageManager;
|
|
77
|
+
|
|
78
|
+
if (typeof packageManager !== "string") {
|
|
79
|
+
return null;
|
|
80
|
+
}
|
|
81
|
+
|
|
82
|
+
const name = packageManager.split("@")[0];
|
|
83
|
+
return packageManagers.has(name) ? name : null;
|
|
84
|
+
}
|
|
85
|
+
|
|
86
|
+
function packageManagerFromLockfile(root) {
|
|
87
|
+
if (existsSync(path.join(root, "bun.lock")) || existsSync(path.join(root, "bun.lockb"))) {
|
|
88
|
+
return "bun";
|
|
89
|
+
}
|
|
90
|
+
|
|
91
|
+
if (
|
|
92
|
+
existsSync(path.join(root, "pnpm-lock.yaml")) ||
|
|
93
|
+
existsSync(path.join(root, "pnpm-workspace.yaml"))
|
|
94
|
+
) {
|
|
95
|
+
return "pnpm";
|
|
96
|
+
}
|
|
97
|
+
|
|
98
|
+
if (existsSync(path.join(root, "yarn.lock"))) {
|
|
99
|
+
return "yarn";
|
|
100
|
+
}
|
|
101
|
+
|
|
102
|
+
if (existsSync(path.join(root, "package-lock.json"))) {
|
|
103
|
+
return "npm";
|
|
104
|
+
}
|
|
105
|
+
|
|
106
|
+
return null;
|
|
107
|
+
}
|
|
108
|
+
|
|
109
|
+
function testCommand(root) {
|
|
110
|
+
switch (packageManagerFromPackageJson(root) ?? packageManagerFromLockfile(root) ?? "npm") {
|
|
111
|
+
case "bun":
|
|
112
|
+
return ["bun", "run", "test"];
|
|
113
|
+
case "pnpm":
|
|
114
|
+
return ["pnpm", "--silent", "test"];
|
|
115
|
+
case "yarn":
|
|
116
|
+
return ["yarn", "--silent", "test"];
|
|
117
|
+
case "npm":
|
|
118
|
+
default:
|
|
119
|
+
return ["npm", "--silent", "test"];
|
|
120
|
+
}
|
|
121
|
+
}
|
|
122
|
+
|
|
66
123
|
function runTests(root) {
|
|
67
|
-
const
|
|
124
|
+
const command = testCommand(root);
|
|
125
|
+
const result = spawnSync(command[0], command.slice(1), {
|
|
68
126
|
cwd: root,
|
|
69
127
|
stdio: "ignore",
|
|
70
128
|
});
|
|
@@ -128,6 +186,10 @@ export const RequireTestsForPrPlugin = async ({ worktree }) => {
|
|
|
128
186
|
};
|
|
129
187
|
};
|
|
130
188
|
|
|
189
|
+
export const testHooks = {
|
|
190
|
+
testCommand,
|
|
191
|
+
};
|
|
192
|
+
|
|
131
193
|
function main() {
|
|
132
194
|
const mode = process.argv[2] ?? "";
|
|
133
195
|
|
|
@@ -0,0 +1,87 @@
|
|
|
1
|
+
import { mkdtempSync, writeFileSync } from "node:fs";
|
|
2
|
+
import { tmpdir } from "node:os";
|
|
3
|
+
import path from "node:path";
|
|
4
|
+
|
|
5
|
+
import { describe, expect, it } from "@effect/vitest";
|
|
6
|
+
|
|
7
|
+
type HookModule = {
|
|
8
|
+
readonly testHooks: {
|
|
9
|
+
readonly testCommand?: (root: string) => ReadonlyArray<string>;
|
|
10
|
+
readonly toolCommand?: (
|
|
11
|
+
root: string,
|
|
12
|
+
tool: string,
|
|
13
|
+
args: ReadonlyArray<string>,
|
|
14
|
+
) => ReadonlyArray<string>;
|
|
15
|
+
};
|
|
16
|
+
};
|
|
17
|
+
|
|
18
|
+
const repoRoot = path.resolve(import.meta.dirname, "../..");
|
|
19
|
+
|
|
20
|
+
const loadHookModule = (relativePath: string) =>
|
|
21
|
+
import(path.join(repoRoot, "src/data/hooks", relativePath)) as Promise<HookModule>;
|
|
22
|
+
|
|
23
|
+
const tempRepo = (files: Record<string, string>) => {
|
|
24
|
+
const root = mkdtempSync(path.join(tmpdir(), "dp-hook-test-"));
|
|
25
|
+
|
|
26
|
+
for (const [filePath, content] of Object.entries(files)) {
|
|
27
|
+
writeFileSync(path.join(root, filePath), content);
|
|
28
|
+
}
|
|
29
|
+
|
|
30
|
+
return root;
|
|
31
|
+
};
|
|
32
|
+
|
|
33
|
+
describe("scaffolded hooks", () => {
|
|
34
|
+
it("runs the PR test gate with the repo package manager", async () => {
|
|
35
|
+
const { testHooks } = await loadHookModule("require-tests-for-pr.mjs");
|
|
36
|
+
|
|
37
|
+
expect(
|
|
38
|
+
testHooks.testCommand?.(
|
|
39
|
+
tempRepo({
|
|
40
|
+
"package.json": JSON.stringify({ packageManager: "bun@1.3.5" }),
|
|
41
|
+
}),
|
|
42
|
+
),
|
|
43
|
+
).toEqual(["bun", "run", "test"]);
|
|
44
|
+
|
|
45
|
+
expect(
|
|
46
|
+
testHooks.testCommand?.(
|
|
47
|
+
tempRepo({
|
|
48
|
+
"package.json": JSON.stringify({ packageManager: "pnpm@10.0.0" }),
|
|
49
|
+
}),
|
|
50
|
+
),
|
|
51
|
+
).toEqual(["pnpm", "--silent", "test"]);
|
|
52
|
+
|
|
53
|
+
expect(
|
|
54
|
+
testHooks.testCommand?.(
|
|
55
|
+
tempRepo({
|
|
56
|
+
"package.json": "{}",
|
|
57
|
+
"package-lock.json": "",
|
|
58
|
+
}),
|
|
59
|
+
),
|
|
60
|
+
).toEqual(["npm", "--silent", "test"]);
|
|
61
|
+
});
|
|
62
|
+
|
|
63
|
+
it("executes formatter tools with the repo package manager", async () => {
|
|
64
|
+
const { testHooks } = await loadHookModule("format-edited-file.mjs");
|
|
65
|
+
|
|
66
|
+
expect(
|
|
67
|
+
testHooks.toolCommand?.(
|
|
68
|
+
tempRepo({
|
|
69
|
+
"package.json": JSON.stringify({ packageManager: "bun@1.3.5" }),
|
|
70
|
+
}),
|
|
71
|
+
"oxfmt",
|
|
72
|
+
["--write", "src/index.ts"],
|
|
73
|
+
),
|
|
74
|
+
).toEqual(["bunx", "oxfmt", "--write", "src/index.ts"]);
|
|
75
|
+
|
|
76
|
+
expect(
|
|
77
|
+
testHooks.toolCommand?.(
|
|
78
|
+
tempRepo({
|
|
79
|
+
"package.json": "{}",
|
|
80
|
+
"pnpm-lock.yaml": "",
|
|
81
|
+
}),
|
|
82
|
+
"oxlint",
|
|
83
|
+
["-c", ".oxlintrc.json", "src/index.ts"],
|
|
84
|
+
),
|
|
85
|
+
).toEqual(["pnpm", "exec", "oxlint", "-c", ".oxlintrc.json", "src/index.ts"]);
|
|
86
|
+
});
|
|
87
|
+
});
|
package/dist/index.js
CHANGED
|
@@ -10883,6 +10883,17 @@ var require_public_api = __commonJS((exports) => {
|
|
|
10883
10883
|
exports.stringify = stringify;
|
|
10884
10884
|
});
|
|
10885
10885
|
|
|
10886
|
+
// src/index.ts
|
|
10887
|
+
import { spawn as spawn2 } from "child_process";
|
|
10888
|
+
import {
|
|
10889
|
+
existsSync as existsSync7,
|
|
10890
|
+
mkdirSync as mkdirSync6,
|
|
10891
|
+
readFileSync as readFileSync7,
|
|
10892
|
+
writeFileSync as writeFileSync5
|
|
10893
|
+
} from "fs";
|
|
10894
|
+
import { homedir as homedir2 } from "os";
|
|
10895
|
+
import path14 from "path";
|
|
10896
|
+
|
|
10886
10897
|
// node_modules/.pnpm/effect@3.19.19/node_modules/effect/dist/esm/Array.js
|
|
10887
10898
|
var exports_Array = {};
|
|
10888
10899
|
__export(exports_Array, {
|
|
@@ -42283,16 +42294,15 @@ var toolCatalog = [
|
|
|
42283
42294
|
}
|
|
42284
42295
|
];
|
|
42285
42296
|
|
|
42286
|
-
//
|
|
42287
|
-
var
|
|
42288
|
-
var
|
|
42289
|
-
|
|
42297
|
+
// package.json
|
|
42298
|
+
var name = "@punks/cli";
|
|
42299
|
+
var version = "1.0.6";
|
|
42290
42300
|
// src/baseline/bundled.ts
|
|
42291
42301
|
var bundledBaseline = {
|
|
42292
42302
|
summary: {
|
|
42293
42303
|
source: "bundled",
|
|
42294
42304
|
channel: "bundled",
|
|
42295
|
-
version
|
|
42305
|
+
version,
|
|
42296
42306
|
tag: null,
|
|
42297
42307
|
commit: null,
|
|
42298
42308
|
sha256: null
|
|
@@ -42778,8 +42788,8 @@ var collectPackageNames = (pkg) => {
|
|
|
42778
42788
|
if (!record2) {
|
|
42779
42789
|
return;
|
|
42780
42790
|
}
|
|
42781
|
-
for (const
|
|
42782
|
-
names.add(
|
|
42791
|
+
for (const name2 of Object.keys(record2)) {
|
|
42792
|
+
names.add(name2);
|
|
42783
42793
|
}
|
|
42784
42794
|
};
|
|
42785
42795
|
addEntries(pkg.dependencies);
|
|
@@ -43570,12 +43580,13 @@ var renderHint = (value5) => paint(brandPalette.muted, value5);
|
|
|
43570
43580
|
var renderSelfUpdateNotice = ({
|
|
43571
43581
|
currentVersion,
|
|
43572
43582
|
latestVersion,
|
|
43573
|
-
packageName
|
|
43583
|
+
packageName,
|
|
43584
|
+
command
|
|
43574
43585
|
}) => renderPanel("CLI update available", [
|
|
43575
43586
|
`${packageName} ${currentVersion} is behind ${latestVersion}.`,
|
|
43576
43587
|
"",
|
|
43577
43588
|
"usage:",
|
|
43578
|
-
` \u2022 run
|
|
43589
|
+
` \u2022 run \`${command?.join(" ") ?? `npm install -g ${packageName}@latest`}\` to update this CLI`
|
|
43579
43590
|
].join(`
|
|
43580
43591
|
`));
|
|
43581
43592
|
var renderQuestion = (value5) => [
|
|
@@ -44075,8 +44086,8 @@ var packsForWorkspace = ({
|
|
|
44075
44086
|
const dependencyNames = manifest?.dependencies ?? [];
|
|
44076
44087
|
const dependencies = new Set(dependencyNames);
|
|
44077
44088
|
const selectedPacks = new Set(finalPacks);
|
|
44078
|
-
const hasDependency = (
|
|
44079
|
-
const hasDependencyPrefix = (prefix) => dependencyNames.some((
|
|
44089
|
+
const hasDependency = (name2) => dependencies.has(name2);
|
|
44090
|
+
const hasDependencyPrefix = (prefix) => dependencyNames.some((name2) => name2.startsWith(prefix));
|
|
44080
44091
|
const hasBackendDependency = () => hasDependency("better-auth") || hasDependency("drizzle-orm") || hasDependency("drizzle-kit") || hasDependency("elysia") || hasDependencyPrefix("@trpc/");
|
|
44081
44092
|
const hasFrontendDependency = () => hasDependency("next") || hasDependency("react") || hasDependency("react-dom") || hasDependency("@tanstack/react-query");
|
|
44082
44093
|
const hasFrontendWorkspaceName = () => relativeWorkspacePath.split(path8.sep).some((segment) => segment === "frontend" || segment === "ui" || segment === "web") || (manifest?.packageName?.replace(/^@[^/]+\//u, "").split(/[^a-zA-Z0-9]+/u).filter((segment) => segment.length > 0).map((segment) => segment.toLowerCase()).some((segment) => segment === "frontend" || segment === "ui" || segment === "web") ?? false);
|
|
@@ -44682,7 +44693,7 @@ var writeScaffoldOutput = ({
|
|
|
44682
44693
|
lintAssets,
|
|
44683
44694
|
subagentManifestSpec,
|
|
44684
44695
|
baseline: baseline.summary,
|
|
44685
|
-
|
|
44696
|
+
version,
|
|
44686
44697
|
managedFiles,
|
|
44687
44698
|
generatedFiles: manifestGeneratedFiles
|
|
44688
44699
|
})
|
|
@@ -46418,7 +46429,7 @@ var managedFileSchema = Struct({
|
|
|
46418
46429
|
});
|
|
46419
46430
|
var scaffoldManifestSchema = parseJson(Struct({
|
|
46420
46431
|
repoShapeMode: optional(String$),
|
|
46421
|
-
|
|
46432
|
+
version: optional(String$),
|
|
46422
46433
|
finalPacks: Array$(String$),
|
|
46423
46434
|
baseline: optional(Unknown),
|
|
46424
46435
|
generatedFiles: optional(Array$(String$)),
|
|
@@ -46835,7 +46846,7 @@ var checkSelfUpdate = async ({
|
|
|
46835
46846
|
updateAvailable: isNewerVersion(latestVersion, currentVersion)
|
|
46836
46847
|
};
|
|
46837
46848
|
};
|
|
46838
|
-
var commandExists2 = (command) => spawnSync("which", [command], { stdio: "ignore" }).status === 0;
|
|
46849
|
+
var commandExists2 = (command) => spawnSync("which", [command], { stdio: "ignore", timeout: 750 }).status === 0;
|
|
46839
46850
|
var detectCliInstallPackageManager = ({
|
|
46840
46851
|
entrypoint = process.argv[1] ?? "",
|
|
46841
46852
|
userAgent = process.env.npm_config_user_agent
|
|
@@ -46907,39 +46918,47 @@ var autoUpdateCliIfStale = async ({
|
|
|
46907
46918
|
packageName,
|
|
46908
46919
|
tag: tag4
|
|
46909
46920
|
});
|
|
46910
|
-
const result = spawnSync(command[0], command.slice(1), {
|
|
46911
|
-
stdio: "inherit",
|
|
46912
|
-
shell: false
|
|
46913
|
-
});
|
|
46914
|
-
if (result.status !== 0) {
|
|
46915
|
-
return {
|
|
46916
|
-
status: "failed",
|
|
46917
|
-
check: check2,
|
|
46918
|
-
packageManager,
|
|
46919
|
-
command
|
|
46920
|
-
};
|
|
46921
|
-
}
|
|
46922
46921
|
return {
|
|
46923
|
-
status: "
|
|
46922
|
+
status: "available",
|
|
46924
46923
|
check: check2,
|
|
46925
46924
|
packageManager,
|
|
46926
46925
|
command
|
|
46927
46926
|
};
|
|
46928
46927
|
};
|
|
46929
46928
|
var parseSkillList = (value5) => {
|
|
46930
|
-
|
|
46931
|
-
|
|
46932
|
-
|
|
46929
|
+
try {
|
|
46930
|
+
const parsed = JSON.parse(value5);
|
|
46931
|
+
if (!Array.isArray(parsed)) {
|
|
46932
|
+
return [];
|
|
46933
|
+
}
|
|
46934
|
+
return parsed.flatMap((entry) => {
|
|
46935
|
+
if (typeof entry === "object" && entry !== null && "name" in entry && "path" in entry && "scope" in entry && typeof entry.name === "string" && typeof entry.path === "string" && typeof entry.scope === "string") {
|
|
46936
|
+
return [{
|
|
46937
|
+
name: entry.name,
|
|
46938
|
+
path: entry.path,
|
|
46939
|
+
scope: entry.scope
|
|
46940
|
+
}];
|
|
46941
|
+
}
|
|
46942
|
+
return [];
|
|
46943
|
+
});
|
|
46944
|
+
} catch {
|
|
46945
|
+
return parseTextSkillList(value5);
|
|
46933
46946
|
}
|
|
46934
|
-
|
|
46935
|
-
|
|
46936
|
-
|
|
46937
|
-
|
|
46938
|
-
|
|
46939
|
-
|
|
46940
|
-
|
|
46947
|
+
};
|
|
46948
|
+
var stripAnsi = (value5) => value5.replace(/\x1B\[[0-?]*[ -/]*[@-~]/gu, "");
|
|
46949
|
+
var parseTextSkillList = (value5) => {
|
|
46950
|
+
const scope5 = value5.includes("Global Skills") ? "global" : "project";
|
|
46951
|
+
return stripAnsi(value5).split(`
|
|
46952
|
+
`).flatMap((line4) => {
|
|
46953
|
+
const match18 = line4.match(/^([^\s]+)\s+(.+)$/u);
|
|
46954
|
+
if (match18 === null || match18[1] === "Project" || match18[1] === "Global") {
|
|
46955
|
+
return [];
|
|
46941
46956
|
}
|
|
46942
|
-
return [
|
|
46957
|
+
return [{
|
|
46958
|
+
name: match18[1],
|
|
46959
|
+
path: match18[2].trim(),
|
|
46960
|
+
scope: scope5
|
|
46961
|
+
}];
|
|
46943
46962
|
});
|
|
46944
46963
|
};
|
|
46945
46964
|
var listSkills = (scope5) => {
|
|
@@ -46948,98 +46967,140 @@ var listSkills = (scope5) => {
|
|
|
46948
46967
|
encoding: "utf8",
|
|
46949
46968
|
shell: false,
|
|
46950
46969
|
stdio: ["ignore", "pipe", "ignore"],
|
|
46951
|
-
timeout:
|
|
46970
|
+
timeout: 1500
|
|
46952
46971
|
});
|
|
46953
46972
|
if (result.status !== 0) {
|
|
46954
46973
|
return [];
|
|
46955
46974
|
}
|
|
46956
|
-
|
|
46957
|
-
return parseSkillList(result.stdout);
|
|
46958
|
-
} catch {
|
|
46959
|
-
return [];
|
|
46960
|
-
}
|
|
46975
|
+
return parseSkillList(result.stdout);
|
|
46961
46976
|
};
|
|
46962
|
-
var
|
|
46963
|
-
|
|
46964
|
-
|
|
46965
|
-
|
|
46966
|
-
|
|
46967
|
-
|
|
46968
|
-
|
|
46969
|
-
|
|
46970
|
-
|
|
46971
|
-
var
|
|
46972
|
-
const result = spawnSync("skills", [
|
|
46973
|
-
"add",
|
|
46974
|
-
"wearedevpunks/skills",
|
|
46975
|
-
"--global",
|
|
46976
|
-
"--skill",
|
|
46977
|
-
"dp-cli",
|
|
46978
|
-
"--yes"
|
|
46979
|
-
], {
|
|
46980
|
-
stdio: "ignore",
|
|
46981
|
-
shell: false,
|
|
46982
|
-
timeout: 30000
|
|
46983
|
-
});
|
|
46984
|
-
return result.status === 0;
|
|
46985
|
-
};
|
|
46986
|
-
var ensureDpCliSkillPresent = () => {
|
|
46977
|
+
var skillInstallCommand = () => [
|
|
46978
|
+
"skills",
|
|
46979
|
+
"add",
|
|
46980
|
+
"wearedevpunks/skills",
|
|
46981
|
+
"--global",
|
|
46982
|
+
"--skill",
|
|
46983
|
+
"dp-cli",
|
|
46984
|
+
"--yes"
|
|
46985
|
+
];
|
|
46986
|
+
var checkDpCliSkillPresence = () => {
|
|
46987
46987
|
if (!commandExists2("skills")) {
|
|
46988
46988
|
return {
|
|
46989
46989
|
skillsCliAvailable: false,
|
|
46990
46990
|
detected: false,
|
|
46991
46991
|
project: false,
|
|
46992
|
-
global: false
|
|
46993
|
-
installedGlobal: false,
|
|
46994
|
-
updatedProject: false,
|
|
46995
|
-
updatedGlobal: false
|
|
46992
|
+
global: false
|
|
46996
46993
|
};
|
|
46997
46994
|
}
|
|
46998
46995
|
const projectBeforeInstall = listSkills("project").some((skill) => skill.name === "dp-cli");
|
|
46999
46996
|
const globalBeforeInstall = listSkills("global").some((skill) => skill.name === "dp-cli");
|
|
47000
|
-
const installedGlobal = projectBeforeInstall || globalBeforeInstall ? false : installDpCliSkillGlobal();
|
|
47001
46997
|
const project3 = projectBeforeInstall;
|
|
47002
|
-
const global = globalBeforeInstall
|
|
46998
|
+
const global = globalBeforeInstall;
|
|
47003
46999
|
return {
|
|
47004
47000
|
skillsCliAvailable: true,
|
|
47005
47001
|
detected: project3 || global,
|
|
47006
47002
|
project: project3,
|
|
47007
|
-
global
|
|
47008
|
-
installedGlobal,
|
|
47009
|
-
updatedProject: project3 ? updateDpCliSkill("project") : false,
|
|
47010
|
-
updatedGlobal: global ? updateDpCliSkill("global") : false
|
|
47003
|
+
global
|
|
47011
47004
|
};
|
|
47012
47005
|
};
|
|
47013
47006
|
|
|
47014
47007
|
// src/index.ts
|
|
47008
|
+
var startupCheckWorkerEnv = "DP_STARTUP_CHECK_WORKER";
|
|
47009
|
+
var startupCheckCliEnv = "DP_STARTUP_CHECK_CLI";
|
|
47010
|
+
var startupCheckSkillEnv = "DP_STARTUP_CHECK_SKILL";
|
|
47011
|
+
var startupCheckIntervalMs = Number.parseInt(process.env.DP_STARTUP_CHECK_INTERVAL_MS ?? String(12 * 60 * 60 * 1000), 10);
|
|
47012
|
+
var startupCheckCacheFile = path14.join(homedir2(), ".cache", "punks", "startup-check.json");
|
|
47015
47013
|
var app = exports_Command.make("punks", {}, () => exports_Effect.sync(() => console.log(renderCommandGuide()))).pipe(exports_Command.withDescription("Devpunks AI scaffolding CLI."), exports_Command.withSubcommands([scaffoldCommand, updateCommand]));
|
|
47016
47014
|
var cli = exports_Command.run(app, {
|
|
47017
47015
|
name: "punks",
|
|
47018
|
-
version
|
|
47016
|
+
version
|
|
47019
47017
|
});
|
|
47020
47018
|
var shouldCheckSelfUpdate = () => process.env.DP_NO_UPDATE_CHECK !== "1" && process.env.CI === undefined && process.argv.includes("--help") === false && process.argv.includes("-h") === false && process.argv.includes("--version") === false && process.argv.includes("-v") === false;
|
|
47021
|
-
var
|
|
47022
|
-
|
|
47023
|
-
return;
|
|
47024
|
-
}
|
|
47019
|
+
var shouldCheckDpCliSkillUpdate = () => process.env.DP_NO_SKILL_UPDATE_CHECK !== "1" && process.env.CI === undefined && process.argv.includes("--help") === false && process.argv.includes("-h") === false && process.argv.includes("--version") === false && process.argv.includes("-v") === false;
|
|
47020
|
+
var runCliUpdateCheck = async () => {
|
|
47025
47021
|
const result = await autoUpdateCliIfStale({
|
|
47026
|
-
currentVersion:
|
|
47027
|
-
packageName:
|
|
47022
|
+
currentVersion: version,
|
|
47023
|
+
packageName: name,
|
|
47028
47024
|
tag: process.env.DP_UPDATE_TAG ?? "latest"
|
|
47029
47025
|
});
|
|
47030
|
-
if (result.status === "manager-not-detected" || result.status === "failed") {
|
|
47026
|
+
if (result.status === "available" || result.status === "manager-not-detected" || result.status === "failed") {
|
|
47031
47027
|
console.error(renderSelfUpdateNotice({
|
|
47032
47028
|
currentVersion: result.check.currentVersion,
|
|
47033
47029
|
latestVersion: result.check.latestVersion,
|
|
47034
|
-
packageName: result.check.packageName
|
|
47030
|
+
packageName: result.check.packageName,
|
|
47031
|
+
command: result.command
|
|
47035
47032
|
}));
|
|
47036
47033
|
}
|
|
47037
|
-
}
|
|
47038
|
-
var
|
|
47039
|
-
|
|
47040
|
-
if (!
|
|
47034
|
+
};
|
|
47035
|
+
var runDpCliSkillCheck = () => {
|
|
47036
|
+
const skillResult = checkDpCliSkillPresence();
|
|
47037
|
+
if (!skillResult.skillsCliAvailable) {
|
|
47041
47038
|
return;
|
|
47042
47039
|
}
|
|
47043
|
-
|
|
47044
|
-
});
|
|
47045
|
-
|
|
47040
|
+
if (!skillResult.detected) {
|
|
47041
|
+
console.error(`punks startup checks: \`dp-cli\` skill not found. Install it with \`${skillInstallCommand().join(" ")}\`.`);
|
|
47042
|
+
}
|
|
47043
|
+
};
|
|
47044
|
+
var runStartupChecks = async ({
|
|
47045
|
+
checkCli,
|
|
47046
|
+
checkSkill
|
|
47047
|
+
}) => {
|
|
47048
|
+
if (checkCli) {
|
|
47049
|
+
await runCliUpdateCheck();
|
|
47050
|
+
}
|
|
47051
|
+
if (checkSkill) {
|
|
47052
|
+
runDpCliSkillCheck();
|
|
47053
|
+
}
|
|
47054
|
+
};
|
|
47055
|
+
var hasFreshStartupCheck = () => {
|
|
47056
|
+
if (!Number.isFinite(startupCheckIntervalMs) || startupCheckIntervalMs <= 0) {
|
|
47057
|
+
return false;
|
|
47058
|
+
}
|
|
47059
|
+
try {
|
|
47060
|
+
const parsed = JSON.parse(readFileSync7(startupCheckCacheFile, "utf8"));
|
|
47061
|
+
return typeof parsed.checkedAt === "number" && Date.now() - parsed.checkedAt < startupCheckIntervalMs;
|
|
47062
|
+
} catch {
|
|
47063
|
+
return false;
|
|
47064
|
+
}
|
|
47065
|
+
};
|
|
47066
|
+
var markStartupCheckStarted = () => {
|
|
47067
|
+
mkdirSync6(path14.dirname(startupCheckCacheFile), { recursive: true });
|
|
47068
|
+
writeFileSync5(startupCheckCacheFile, JSON.stringify({ checkedAt: Date.now() }, null, 2));
|
|
47069
|
+
};
|
|
47070
|
+
var startBackgroundStartupChecks = () => {
|
|
47071
|
+
const checkCli = shouldCheckSelfUpdate();
|
|
47072
|
+
const checkSkill = shouldCheckDpCliSkillUpdate();
|
|
47073
|
+
if (!checkCli && !checkSkill || hasFreshStartupCheck()) {
|
|
47074
|
+
return;
|
|
47075
|
+
}
|
|
47076
|
+
const entrypoint = process.argv[1];
|
|
47077
|
+
if (entrypoint === undefined || !existsSync7(entrypoint)) {
|
|
47078
|
+
return;
|
|
47079
|
+
}
|
|
47080
|
+
markStartupCheckStarted();
|
|
47081
|
+
const worker = spawn2(process.execPath, [entrypoint], {
|
|
47082
|
+
cwd: process.cwd(),
|
|
47083
|
+
detached: true,
|
|
47084
|
+
env: {
|
|
47085
|
+
...process.env,
|
|
47086
|
+
[startupCheckWorkerEnv]: "1",
|
|
47087
|
+
[startupCheckCliEnv]: checkCli ? "1" : "0",
|
|
47088
|
+
[startupCheckSkillEnv]: checkSkill ? "1" : "0"
|
|
47089
|
+
},
|
|
47090
|
+
stdio: ["ignore", "ignore", "inherit"]
|
|
47091
|
+
});
|
|
47092
|
+
worker.unref();
|
|
47093
|
+
};
|
|
47094
|
+
if (process.env[startupCheckWorkerEnv] === "1") {
|
|
47095
|
+
runStartupChecks({
|
|
47096
|
+
checkCli: process.env[startupCheckCliEnv] === "1",
|
|
47097
|
+
checkSkill: process.env[startupCheckSkillEnv] === "1"
|
|
47098
|
+
}).finally(() => {
|
|
47099
|
+
process.exit(0);
|
|
47100
|
+
});
|
|
47101
|
+
} else {
|
|
47102
|
+
try {
|
|
47103
|
+
startBackgroundStartupChecks();
|
|
47104
|
+
} catch {}
|
|
47105
|
+
cli(process.argv).pipe(exports_Effect.provide(exports_Layer.mergeAll(exports_BunContext.layer)), exports_BunRuntime.runMain);
|
|
47106
|
+
}
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
---
|
|
2
2
|
name: docs-maintenance
|
|
3
|
-
description: Ingest a spec folder into the wiki domain layer by extracting and writing flow pages first, then concept pages, then syncing ingest metadata. Secondary: update docs/ when code changes alter architecture, setup, contracts, or operator workflow. Use when a spec is ready to be captured as domain knowledge after review or implementation, or when a code task changes non-obvious behavior that docs/ should reflect.
|
|
3
|
+
description: "Ingest a spec folder into the wiki domain layer by extracting and writing flow pages first, then concept pages, then syncing ingest metadata. Secondary: update docs/ when code changes alter architecture, setup, contracts, or operator workflow. Use when a spec is ready to be captured as domain knowledge after review or implementation, or when a code task changes non-obvious behavior that docs/ should reflect."
|
|
4
4
|
---
|
|
5
5
|
|
|
6
6
|
# Docs Maintenance
|
|
@@ -33,5 +33,5 @@
|
|
|
33
33
|
- Derive repo ownership and hosting from git state instead of hardcoding assumptions.
|
|
34
34
|
- If a required tool is unavailable, stop clearly and report the missing dependency.
|
|
35
35
|
- Keep the canonical backlog model aligned with [../write-backlog/assets/concepts/backlog-model.md](../write-backlog/assets/concepts/backlog-model.md).
|
|
36
|
-
- For every planned task, locate the relevant scoped `AGENTS.md` chain and assign the existing skills that the executor must load before editing.
|
|
36
|
+
- For every planned task, locate the relevant scoped `AGENTS.md` chain, load the relevant skill guidance during planning, and assign the existing skills that the executor must load again before editing.
|
|
37
37
|
- Never start implementation from this skill.
|
|
@@ -31,7 +31,7 @@ Create a plan first. Never implement code in this skill.
|
|
|
31
31
|
4. Update a running decision ledger after every answer so the user never has to reconstruct state from memory.
|
|
32
32
|
5. Insert a synthesis checkpoint before the thread gets noisy, then continue only if more ambiguity reduction is still needed.
|
|
33
33
|
6. Research with `opensrc path <package>` or `opensrc path <owner>/<repo>` plus primary-source web docs when current behavior matters.
|
|
34
|
-
7. Locate scoped `AGENTS.md` files for every planned task path
|
|
34
|
+
7. Locate scoped `AGENTS.md` files for every planned task path, extract `Primary skills here` lists, and load the relevant skill guidance before finalizing task design.
|
|
35
35
|
8. Read `references/planner-phase.md` and run `$swarm-planner` as an explicit inner phase.
|
|
36
36
|
9. Read `references/tdd-phase.md` and run `$tdd` as an explicit inner phase.
|
|
37
37
|
10. Read `references/backlog-sync.md` and sync backlog at epic/story level, not one item per plan task.
|
|
@@ -46,10 +46,11 @@ Create a plan first. Never implement code in this skill.
|
|
|
46
46
|
3. Every plan-shaping question must use the exact block: `Decision`, `Recommendation`, `Question`, `Why it matters`.
|
|
47
47
|
4. Keep `$grill-me`, `$swarm-planner`, and `$tdd` as visible required inner phases of one planning run.
|
|
48
48
|
5. Resolve each task's scoped guidance from root `AGENTS.md` down to the nearest `AGENTS.md` for its `location`.
|
|
49
|
-
6.
|
|
50
|
-
7.
|
|
51
|
-
8.
|
|
52
|
-
9.
|
|
49
|
+
6. Use each task's required skill guidance while shaping its scope, dependencies, validation, RED target, and review mode; do not merely list skills after the plan is written.
|
|
50
|
+
7. Assign each task the exact existing skills required by those scoped `Primary skills here` lists, merging all scopes for cross-directory tasks.
|
|
51
|
+
8. Normalize every task with stable ids, `depends_on`, `location`, `description`, `validation`, `status`, `log`, `files edited/created`, owning-story backlog references, `assigned_skills`, `tdd_target`, and `review_mode`.
|
|
52
|
+
9. Keep the saved plan standalone: include situation, issue, solution shape, assumptions, findings, research, dependency graph, testing strategy, skill-routing notes, risks, validation gates, unresolved questions, and a resolved decision ledger.
|
|
53
|
+
10. Stop after plan creation and backlog sync. Do not implement code or spawn implementation workers.
|
|
53
54
|
|
|
54
55
|
### Review modes
|
|
55
56
|
|
|
@@ -57,7 +57,8 @@ Preserve `$grill-me` behavior:
|
|
|
57
57
|
- provide a recommended answer with each question
|
|
58
58
|
- if a question can be answered from the codebase, answer it by inspecting instead
|
|
59
59
|
- keep grilling until every plan-shaping branch is resolved enough to plan safely
|
|
60
|
-
-
|
|
60
|
+
- before recording an unresolved question in the plan, ask the user whether to resolve it now or defer it
|
|
61
|
+
- when the user defers a branch, record the assumption or unresolved question explicitly in the plan with its planning impact
|
|
61
62
|
|
|
62
63
|
## Decision ledger
|
|
63
64
|
|
|
@@ -22,6 +22,8 @@ Include:
|
|
|
22
22
|
- validation gates per phase when phases exist
|
|
23
23
|
- unresolved questions
|
|
24
24
|
|
|
25
|
+
`unresolved questions` is not a hiding place for skipped planning. Before saving a plan with unresolved questions, prompt the user to resolve each plan-shaping question that they can reasonably answer now. Keep only deferred, externally blocked, or non-blocking questions, and state why each remains open.
|
|
26
|
+
|
|
25
27
|
## Task contract
|
|
26
28
|
|
|
27
29
|
Every task must include:
|
|
@@ -47,6 +49,8 @@ Multiple tasks may point to the same story when one story needs several executio
|
|
|
47
49
|
|
|
48
50
|
Do not create a new backlog item only because a task boundary exists in the plan.
|
|
49
51
|
|
|
52
|
+
`assigned_skills` must list the skills used to shape the task during planning, not only skills expected during implementation. Skill guidance should be reflected in the task's boundary, validation, `tdd_target`, and `review_mode`.
|
|
53
|
+
|
|
50
54
|
```md
|
|
51
55
|
### T3: Example task
|
|
52
56
|
|
|
@@ -18,10 +18,13 @@ Before finalizing task boundaries:
|
|
|
18
18
|
2. For each location, inspect the `AGENTS.md` chain from repo root to the nearest scoped file.
|
|
19
19
|
3. Extract every `Primary skills here` entry from applicable scoped files.
|
|
20
20
|
4. Verify each named skill exists in `.agents/skills/` or an installed skill source visible to the agent.
|
|
21
|
-
5.
|
|
21
|
+
5. Load the relevant skill instructions before finalizing the task's boundary, validation, RED target, and review mode.
|
|
22
|
+
6. Add the merged, deduplicated list to the task as `assigned_skills`.
|
|
22
23
|
|
|
23
24
|
If a task spans multiple scopes, include all required skills from all touched scopes. If a scope names a missing skill, keep the task planned but record the missing skill in risks and unresolved questions.
|
|
24
25
|
|
|
26
|
+
`assigned_skills` is both planning input and executor handoff. Do not design the task first and attach skills afterward. Use the skill guidance to decide what a correct task slice, dependency, validation, and test target should look like.
|
|
27
|
+
|
|
25
28
|
## Planner behavior
|
|
26
29
|
|
|
27
30
|
Produce exactly one named `PLAN.md` in the target spec folder.
|
|
@@ -33,7 +36,7 @@ Preserve `$swarm-planner` behavior:
|
|
|
33
36
|
- validations per task
|
|
34
37
|
- parallel execution waves
|
|
35
38
|
- risks and mitigations
|
|
36
|
-
- explicit `assigned_skills` per task from scoped `AGENTS.md
|
|
39
|
+
- explicit `assigned_skills` per task from scoped `AGENTS.md`, with task design shaped by those skills
|
|
37
40
|
- a final subagent review for missing deps, ordering issues, edge cases, and holes before yielding
|
|
38
41
|
|
|
39
42
|
Do not stop between the grill and planner phases unless a true blocking ambiguity remains.
|
|
@@ -10,7 +10,7 @@ description: Create a SPEC.md file for a new feature, product, or system using t
|
|
|
10
10
|
- **Role:** higher-order spec authoring skill
|
|
11
11
|
- **Entrypoint type:** public entrypoint
|
|
12
12
|
- **Upstream:** new idea, feature request, epic/capability issue, or problem statement
|
|
13
|
-
- **Delegates to:**
|
|
13
|
+
- **Delegates to:** `$requirements-grill` when discovery leaves meaningful spec-affecting unknowns; `$write-backlog` when grill outcomes change epic/story scope
|
|
14
14
|
- **Downstream:** reviewed `SPEC.md`, then usually `create-plan` or `implement-spec`
|
|
15
15
|
- **Entry conditions:** wiki domain can be resolved, or the user creates one first with `create-wiki-domain`
|
|
16
16
|
- **Stop conditions:** `SPEC.md`, wiki index, and wiki log are updated, then wait for user review
|
|
@@ -27,12 +27,14 @@ The output lives at `apps/wiki/specs/<domain>/<folder-name>/SPEC.md`.
|
|
|
27
27
|
2. Read `references/discovery.md` and orient yourself in the right wiki domain before asking questions.
|
|
28
28
|
3. If backlog context exists, read the parent epic and every child story before asking questions.
|
|
29
29
|
4. If the user did not provide a concrete request, ask for a rough description first.
|
|
30
|
-
5. Read `references/questioning.md` and ask only the clarifying questions needed to
|
|
31
|
-
6.
|
|
32
|
-
7.
|
|
33
|
-
8. Read `references/
|
|
34
|
-
9. Read `
|
|
35
|
-
10. Read `references/
|
|
30
|
+
5. Read `references/questioning.md` and ask only the lightweight clarifying questions needed to identify whether a grill phase is required.
|
|
31
|
+
6. If draft `Open Questions` would affect spec trust, read `references/grill-phase.md` and run a bounded `$requirements-grill` phase before writing.
|
|
32
|
+
7. If the grill changes accepted scope, child stories, deferred scope, or story order, read `references/backlog-sync.md` and run `$write-backlog` to sync the backlog automatically.
|
|
33
|
+
8. Read `references/folder-naming.md` to resolve the domain and spec folder path.
|
|
34
|
+
9. Read `assets/SPEC-TEMPLATE.md` and write the spec.
|
|
35
|
+
10. Read `references/spec-quality-bar.md` before saving.
|
|
36
|
+
11. Read `references/wiki-bookkeeping.md` to update `index.md`, `<domain>-specs.md`, and `log.md`.
|
|
37
|
+
12. Read `references/handoff.md` to choose the next-step recommendation and stop after user review.
|
|
36
38
|
|
|
37
39
|
## Workflow
|
|
38
40
|
|
|
@@ -41,15 +43,19 @@ The output lives at `apps/wiki/specs/<domain>/<folder-name>/SPEC.md`.
|
|
|
41
43
|
1. Build orientation first; do not jump straight into writing.
|
|
42
44
|
2. Ask only enough to make the spec crisp, testable, and bounded.
|
|
43
45
|
3. When an epic has child stories, harvest and preserve each story's requirements before drafting.
|
|
44
|
-
4.
|
|
45
|
-
5.
|
|
46
|
-
6.
|
|
47
|
-
7.
|
|
46
|
+
4. Use `$requirements-grill` for meaningful spec-affecting unknowns; do not replace that phase with ad hoc `Open Questions` prompts.
|
|
47
|
+
5. After a grill phase, use `$write-backlog` automatically when accepted decisions imply backlog changes.
|
|
48
|
+
6. Keep the spec free of implementation detail.
|
|
49
|
+
7. Use the template structure exactly, then remove all template scaffolding.
|
|
50
|
+
8. Update wiki bookkeeping in the same run.
|
|
51
|
+
9. Stop after presenting the spec and the recommended next step.
|
|
48
52
|
|
|
49
53
|
## Advanced features
|
|
50
54
|
|
|
51
55
|
- Discovery and repo orientation: see [references/discovery.md](references/discovery.md)
|
|
52
56
|
- Clarifying-question strategy: see [references/questioning.md](references/questioning.md)
|
|
57
|
+
- Requirements grill phase: see [references/grill-phase.md](references/grill-phase.md)
|
|
58
|
+
- Backlog sync after grilling: see [references/backlog-sync.md](references/backlog-sync.md)
|
|
53
59
|
- Domain and folder naming rules: see [references/folder-naming.md](references/folder-naming.md)
|
|
54
60
|
- Acceptance-criteria and quality bar: see [references/spec-quality-bar.md](references/spec-quality-bar.md)
|
|
55
61
|
- Wiki index and log updates: see [references/wiki-bookkeeping.md](references/wiki-bookkeeping.md)
|
|
@@ -74,7 +74,7 @@ _Optional. Technical discoveries, known system constraints, or early implementat
|
|
|
74
74
|
|
|
75
75
|
## Open Questions
|
|
76
76
|
|
|
77
|
-
_Unresolved questions that could affect implementation._
|
|
77
|
+
_Unresolved questions that could affect implementation. Before saving, route meaningful spec-affecting unknowns through the requirements-grill phase. Only keep questions here when the user explicitly defers them, the answer requires external validation, or the issue is non-blocking for spec review._
|
|
78
78
|
|
|
79
79
|
| # | Question | Affects | Owner | Status |
|
|
80
80
|
|---|----------|---------|-------|--------|
|
|
@@ -0,0 +1,46 @@
|
|
|
1
|
+
# Backlog Sync
|
|
2
|
+
|
|
3
|
+
Use after `$requirements-grill` and before final spec drafting.
|
|
4
|
+
|
|
5
|
+
## Trigger
|
|
6
|
+
|
|
7
|
+
Run `$write-backlog` automatically when grill outcomes change:
|
|
8
|
+
|
|
9
|
+
- the epic/capability boundary
|
|
10
|
+
- child-story acceptance signals, scope, or canonical terms
|
|
11
|
+
- missing, parked, moved, or future-scope stories
|
|
12
|
+
- story ordering, blockers, or parent/child relationships
|
|
13
|
+
|
|
14
|
+
Skip only for wording clarifications that do not change backlog scope, story meaning, or ordering.
|
|
15
|
+
|
|
16
|
+
## Load
|
|
17
|
+
|
|
18
|
+
Follow:
|
|
19
|
+
|
|
20
|
+
- `../../../requirements/write-backlog/SKILL.md`
|
|
21
|
+
- `../../../requirements/write-backlog/REFERENCE.md`
|
|
22
|
+
- `../../../requirements/write-backlog/assets/concepts/backlog-model.md`
|
|
23
|
+
|
|
24
|
+
If grill artifacts exist, read:
|
|
25
|
+
|
|
26
|
+
1. `docs/<topic>-grill-status.md`
|
|
27
|
+
2. `docs/<topic>-grill-log.md`
|
|
28
|
+
|
|
29
|
+
## Rules
|
|
30
|
+
|
|
31
|
+
- The parent epic remains the spec anchor.
|
|
32
|
+
- Child stories remain product-facing slices beneath that epic.
|
|
33
|
+
- Derive backlog changes only from accepted decisions and locked direction.
|
|
34
|
+
- Preserve parked branches as deferred scope, follow-up epic/story candidates, or backlog notes.
|
|
35
|
+
- Keep unresolved still-open items out of committed story scope unless explicitly marked.
|
|
36
|
+
- Use native parent/child and `blockedBy` / `blocks` relations when the provider supports them.
|
|
37
|
+
- Do not add implementation details, file paths, TDD targets, validation commands, or worker handoffs to backlog bodies.
|
|
38
|
+
|
|
39
|
+
## Handoff
|
|
40
|
+
|
|
41
|
+
- reread the updated epic and child stories before drafting
|
|
42
|
+
- incorporate all child-story requirements into the spec
|
|
43
|
+
- add backlog item ids/URLs to spec links when available
|
|
44
|
+
- mention deferred backlog items only as non-goals, future scope, or `Open Questions`
|
|
45
|
+
|
|
46
|
+
Do not finalize a backlog-backed spec from stale pre-grill story text.
|
|
@@ -0,0 +1,47 @@
|
|
|
1
|
+
# Grill Phase
|
|
2
|
+
|
|
3
|
+
Use after discovery/questioning when unresolved spec questions would affect review trust.
|
|
4
|
+
|
|
5
|
+
## Trigger
|
|
6
|
+
|
|
7
|
+
Run bounded `$requirements-grill` when:
|
|
8
|
+
|
|
9
|
+
- multiple child stories conflict or leave cross-story behavior unclear
|
|
10
|
+
- the target actor, outcome, boundary, non-goal, or acceptance signal is ambiguous
|
|
11
|
+
- candidate `Open Questions` would materially change scope, trust, or acceptance criteria
|
|
12
|
+
|
|
13
|
+
Use lightweight direct clarification instead when only one small naming or wording detail is missing.
|
|
14
|
+
|
|
15
|
+
## Load
|
|
16
|
+
|
|
17
|
+
Follow:
|
|
18
|
+
|
|
19
|
+
- `../../../requirements/requirements-grill/references/grilling-flow.md`
|
|
20
|
+
- `../../../requirements/requirements-grill/references/artifact-output.md` for serious grilling sessions
|
|
21
|
+
|
|
22
|
+
## Rules
|
|
23
|
+
|
|
24
|
+
- Ask one question at a time by default.
|
|
25
|
+
- Include a recommended answer and why it is preferred.
|
|
26
|
+
- Inspect repo/docs/backlog first when the answer can be found locally.
|
|
27
|
+
- Force precise choices when multiple interpretations exist.
|
|
28
|
+
- Close, park, or explicitly defer each branch.
|
|
29
|
+
|
|
30
|
+
## Artifacts
|
|
31
|
+
|
|
32
|
+
For serious sessions, create or reuse:
|
|
33
|
+
|
|
34
|
+
- `docs/<topic>-grill-log.md`
|
|
35
|
+
- `docs/<topic>-grill-status.md`
|
|
36
|
+
|
|
37
|
+
Tiny clarification-only runs do not need durable grill artifacts.
|
|
38
|
+
|
|
39
|
+
## Handoff
|
|
40
|
+
|
|
41
|
+
- accepted answers become requirements, constraints, non-goals, acceptance criteria, or decisions
|
|
42
|
+
- parked branches outside the epic become non-goals or future scope
|
|
43
|
+
- explicitly deferred branches become `Open Questions`
|
|
44
|
+
- external-validation branches become `Open Questions` with owner/status
|
|
45
|
+
- accepted scope changes must flow through `backlog-sync.md` before final spec drafting
|
|
46
|
+
|
|
47
|
+
Do not write the spec until each discovered branch is closed, parked, or explicitly deferred.
|
|
@@ -4,7 +4,9 @@ Use this reference after discovery and before writing.
|
|
|
4
4
|
|
|
5
5
|
## Goal
|
|
6
6
|
|
|
7
|
-
Ask only enough to
|
|
7
|
+
Ask only enough to decide whether the spec is ready to draft or needs a bounded `$requirements-grill` phase. A vague spec creates false confidence.
|
|
8
|
+
|
|
9
|
+
Before writing `Open Questions`, route meaningful spec-affecting unknowns through `grill-phase.md`. Do not silently invent an open-question table as a substitute for requirements work.
|
|
8
10
|
|
|
9
11
|
## Priority topics
|
|
10
12
|
|
|
@@ -27,6 +29,8 @@ Surface these as needed:
|
|
|
27
29
|
- If a fact is ambiguous and matters to the spec, ask directly.
|
|
28
30
|
- Prefer concrete examples over abstract wording.
|
|
29
31
|
- When backlog context exists, ask about cross-story interactions only after reading all child stories first.
|
|
32
|
+
- If multiple or material unknowns remain, stop lightweight questioning and run the grill phase.
|
|
33
|
+
- If a tiny unknown would become an `Open Question`, ask whether the user can resolve it now or wants to defer it.
|
|
30
34
|
|
|
31
35
|
## Stop asking when
|
|
32
36
|
|
|
@@ -38,4 +42,10 @@ Surface these as needed:
|
|
|
38
42
|
- major functional requirements are identifiable
|
|
39
43
|
- key non-goals are explicit
|
|
40
44
|
|
|
41
|
-
|
|
45
|
+
Only leave `Open Questions` in the spec when one of these is true:
|
|
46
|
+
|
|
47
|
+
- the user explicitly chose to defer the question
|
|
48
|
+
- the answer requires external validation outside the current spec session
|
|
49
|
+
- the question is non-blocking and the spec can still be reviewed honestly
|
|
50
|
+
|
|
51
|
+
If a branch remains open after the grill phase or lightweight prompt, capture it as an open question or assumption inside the spec rather than pretending certainty. Include the prompt/grill outcome so reviewers know why it remains unresolved.
|
|
@@ -41,11 +41,12 @@ That means `implement-spec` itself owns all of the following in parallel mode:
|
|
|
41
41
|
3. Choose the execution mode explicitly:
|
|
42
42
|
- Read `references/sequential.md` for one-thread execution.
|
|
43
43
|
- Read `references/parallel.md` for wave-based worker execution.
|
|
44
|
-
4.
|
|
45
|
-
5.
|
|
46
|
-
6.
|
|
47
|
-
7.
|
|
48
|
-
8.
|
|
44
|
+
4. In Codex parallel mode, read `references/parallel-reasoning.md` before spawning workers and apply the Codex-only worker reasoning policy.
|
|
45
|
+
5. Record the chosen mode under **Execution mode** in `IMPLEMENTATION-NOTES.md` before coding.
|
|
46
|
+
6. Execute only the chosen mode. Do not mix modes inside one run.
|
|
47
|
+
7. After each completed task or wave, update `PLAN.md`, `IMPLEMENTATION-NOTES.md`, and spec-linked tech debt before advancing.
|
|
48
|
+
8. If backlog sync is in scope, keep epic/story bodies product-facing and use native metadata or comments instead of execution handoff rewrites.
|
|
49
|
+
9. Finish with the shared acceptance audit and spec finalization contract.
|
|
49
50
|
|
|
50
51
|
## Mode selection
|
|
51
52
|
|
|
@@ -70,3 +71,4 @@ If the user already chose a mode, honor it. If not, make the smallest safe choic
|
|
|
70
71
|
- Parallel execution specifics: see [references/parallel.md](references/parallel.md)
|
|
71
72
|
- Parallel plan parsing and wave construction: see [references/parallel-orchestration.md](references/parallel-orchestration.md)
|
|
72
73
|
- Parallel worker brief contract: see [references/parallel-worker-brief.md](references/parallel-worker-brief.md)
|
|
74
|
+
- Codex parallel worker reasoning policy: see [references/parallel-reasoning.md](references/parallel-reasoning.md)
|
|
@@ -56,6 +56,7 @@ Launch all unblocked tasks in parallel.
|
|
|
56
56
|
For each unblocked task:
|
|
57
57
|
|
|
58
58
|
- choose the worker template from `.agents/subagents/manifest.mjs`
|
|
59
|
+
- in Codex, apply the worker `reasoning_effort` policy from [parallel-reasoning.md](parallel-reasoning.md)
|
|
59
60
|
- use the worker-brief contract from [parallel-worker-brief.md](parallel-worker-brief.md)
|
|
60
61
|
- keep the task scope narrow
|
|
61
62
|
- ensure the worker owns only the assigned task and its required validation
|
|
@@ -0,0 +1,19 @@
|
|
|
1
|
+
# Codex Parallel Reasoning
|
|
2
|
+
|
|
3
|
+
Use this reference only when `implement-spec` runs in Codex parallel mode and calls Codex `spawn_agent`.
|
|
4
|
+
|
|
5
|
+
Set worker `reasoning_effort` lower than the parent orchestrator:
|
|
6
|
+
|
|
7
|
+
| Orchestrator reasoning | Worker reasoning |
|
|
8
|
+
|------------------------|------------------|
|
|
9
|
+
| `xhigh` | `high` |
|
|
10
|
+
| `high` | `medium` |
|
|
11
|
+
| `medium` | `low` |
|
|
12
|
+
| `low` | `low` |
|
|
13
|
+
|
|
14
|
+
Rules:
|
|
15
|
+
|
|
16
|
+
- This policy is Codex-only. Do not apply it to other models or hosts.
|
|
17
|
+
- Pass the mapped value as `reasoning_effort` in each Codex `spawn_agent` call.
|
|
18
|
+
- Keep orchestration, retries, dependency decisions, and acceptance audit in the parent.
|
|
19
|
+
- If a worker task needs equal or higher reasoning, keep that task in the parent instead of spawning it.
|
|
@@ -34,6 +34,8 @@ Each worker brief should require:
|
|
|
34
34
|
9. running the exact task validation evidence before returning, plus extra plan validation when feasible
|
|
35
35
|
10. updating the plan entry with status, log, touched files, and gotchas before handoff closes
|
|
36
36
|
|
|
37
|
+
When spawning Codex workers, apply [parallel-reasoning.md](parallel-reasoning.md): `xhigh -> high`, `high -> medium`, `medium -> low`, `low -> low`.
|
|
38
|
+
|
|
37
39
|
## Worker output contract
|
|
38
40
|
|
|
39
41
|
Require the worker to return:
|
|
@@ -63,3 +65,4 @@ The parent `implement-spec` run owns:
|
|
|
63
65
|
- review of worker outputs
|
|
64
66
|
- retry and escalation decisions
|
|
65
67
|
- deciding when the wave is complete
|
|
68
|
+
- the final acceptance audit
|
|
@@ -31,16 +31,18 @@ Do not treat worker spawning as the whole job. The orchestration loop is part of
|
|
|
31
31
|
1. Load the shared lifecycle from `lifecycle.md`.
|
|
32
32
|
2. Record `parallel` under **Execution mode** in `IMPLEMENTATION-NOTES.md`.
|
|
33
33
|
3. Read `.agents/subagents/manifest.mjs` before the first spawn and choose explicit worker templates per task.
|
|
34
|
-
4.
|
|
35
|
-
5.
|
|
36
|
-
6.
|
|
37
|
-
7.
|
|
38
|
-
8.
|
|
34
|
+
4. In Codex, read [parallel-reasoning.md](parallel-reasoning.md) and set each worker `reasoning_effort` lower than the parent orchestrator.
|
|
35
|
+
5. Read [parallel-orchestration.md](parallel-orchestration.md) and parse `PLAN.md` into a task graph.
|
|
36
|
+
6. Build the current wave from the unblocked tasks only.
|
|
37
|
+
7. Read [parallel-worker-brief.md](parallel-worker-brief.md) and use that contract when spawning workers.
|
|
38
|
+
8. Validate each wave before moving on. Fix failures before the next wave.
|
|
39
|
+
9. Update the plan, notes, and tech debt after every wave.
|
|
39
40
|
|
|
40
41
|
## Required evidence
|
|
41
42
|
|
|
42
43
|
- plan-derived wave selection
|
|
43
44
|
- explicit worker briefs per task
|
|
45
|
+
- explicit Codex worker `reasoning_effort` when running in Codex
|
|
44
46
|
- post-wave review of worker outputs
|
|
45
47
|
- acceptance-criteria coverage plus RED -> GREEN evidence, or explicit non-testable verification
|
|
46
48
|
- task completion only after validation and plan/log updates
|
package/docs/README.md
CHANGED
|
@@ -11,10 +11,9 @@ Implementation notes:
|
|
|
11
11
|
- distributed skill assets live in `skills/`
|
|
12
12
|
- runtime projection/writing logic lives in `src/scaffold/`
|
|
13
13
|
- `punks update` refreshes scaffold-managed assets from `.devpunks/scaffold-manifest.json`
|
|
14
|
-
- CLI startup checks npm's `latest` dist-tag
|
|
15
|
-
- CLI startup also checks for the `dp-cli` skill through the `skills` CLI, installing it globally when absent and updating it when present. Set `DP_NO_SKILL_UPDATE_CHECK=1` to skip this best-effort pass.
|
|
14
|
+
- CLI startup checks run in a detached advisory worker at most once per 12 hours by default. They check npm's `latest` dist-tag and whether the named `dp-cli` skill is present, but startup never installs or updates packages/skills while another CLI command is starting. Set `DP_NO_UPDATE_CHECK=1` or `DP_NO_SKILL_UPDATE_CHECK=1` to skip those checks, `DP_UPDATE_TAG=next` to compare against another dist-tag, and `DP_STARTUP_CHECK_INTERVAL_MS=0` to force the worker during local testing.
|
|
16
15
|
- baseline releases use `baseline/stable/*` GitHub release tags, separate from npm executable tags such as `v1.0.1`
|
|
17
|
-
- shared neutral hook and sync assets live in `src/data/hooks/` and `src/data/scripts
|
|
16
|
+
- shared neutral hook and sync assets live in `src/data/hooks/` and `src/data/scripts/`; hook commands infer the target repo package manager from `packageManager` and lockfiles
|
|
18
17
|
- scaffolded required tools always include `portless` and `skills` so generated guidance can standardize local dev origins and keep skill entrypoints up to date
|
|
19
18
|
- `punks scaffold setup` checks the base required tools (`portless`, `skills`) before repo detection and checks selected-pack tools after pack confirmation.
|
|
20
19
|
- Oxlint specs/starter config are scaffolded only when scanned manifests already declare `oxlint`; the auto format/lint hook is scaffolded only when manifests declare `oxfmt`. Other lint/format stacks are intentionally left untouched for now.
|
|
@@ -95,6 +95,7 @@ Current scope:
|
|
|
95
95
|
- check the base required tools (`portless`, `skills`) at the start of setup before repo detection, then check selected-pack tools after pack confirmation
|
|
96
96
|
- include `debug-agent` through the default debug pack and install/verify the `debug-agent` CLI without running `debug-agent init`, because the CLI already scaffolds the project-local skill
|
|
97
97
|
- scaffold Oxlint specs/starter config only when scanned package manifests declare `oxlint`, and scaffold the `format-edited-file` Oxfmt/Oxlint hook only when manifests declare `oxfmt`; repos without those tools keep their existing lint/format setup untouched
|
|
98
|
+
- scaffolded hooks infer the target repo package manager from `packageManager` first, then lockfiles, so PR test gates and Oxfmt/Oxlint execution do not hardcode the CLI repo's package manager
|
|
98
99
|
- select language packs separately from framework packs; TypeScript is selected from a `typescript` package dependency or nested `.ts` / `.tsx` files, and Python is selected from nested `.py` files, while ignoring root config files plus vendor, virtualenv, scaffold, docs, examples, scripts, `opensrc`, cache, and build output
|
|
99
100
|
- seed Python subagent templates that combine the Python language skills into `python-app`, `python-async`, and `python-testing` specialists
|
|
100
101
|
- seed a read-only `code-review` subagent template that uses `simplify` for changed-code cleanup review and `improve-codebase-architecture` for grounded architecture-friction findings
|
|
@@ -217,14 +218,15 @@ The npm account must have publish access to `@punks/cli`; otherwise npm may repo
|
|
|
217
218
|
|
|
218
219
|
## CLI Self-Update Detection
|
|
219
220
|
|
|
220
|
-
The CLI
|
|
221
|
+
The CLI may start best-effort startup checks on normal command startup. The requested command must render immediately; checks run in a detached worker and are rate-limited by a local cache marker to at most once per 12 hours by default. The worker checks the npm package version for `@punks/cli`, and it checks the `dp-cli` skill through the `skills` CLI. It only prints advisory install/update commands. Startup must never install or update packages/skills while another CLI command is starting, and it must never run or suggest plain root `skills update`. These checks are separate from `punks update`, which updates scaffold-managed repo assets.
|
|
221
222
|
|
|
222
223
|
- checks `https://registry.npmjs.org/%40punks%2Fcli/latest`
|
|
223
224
|
- compares that dist-tag version with the bundled CLI version
|
|
224
|
-
- prints
|
|
225
|
+
- prints the inferred package-manager update command only when the registry version is newer
|
|
225
226
|
- silently skips the notice when npm is unreachable, the registry response is invalid, or the command is `--help` / `--version`
|
|
226
227
|
- skips in CI and when `DP_NO_UPDATE_CHECK=1`
|
|
227
228
|
- supports `DP_UPDATE_TAG=next` for canary/operator testing against another dist-tag
|
|
229
|
+
- supports `DP_STARTUP_CHECK_INTERVAL_MS=0` to force the detached worker during local testing
|
|
228
230
|
|
|
229
231
|
To test the built command locally without preparing another repo first, use the committed fixtures:
|
|
230
232
|
|