tmux-agent 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.codex/skills/speckit/SKILL.md +173 -0
- package/.codex/skills/speckit/assets/templates/checklist-template.md +49 -0
- package/.codex/skills/speckit/assets/templates/notes-entrypoints-template.md +11 -0
- package/.codex/skills/speckit/assets/templates/notes-questions-template.md +7 -0
- package/.codex/skills/speckit/assets/templates/notes-readme-template.md +36 -0
- package/.codex/skills/speckit/assets/templates/notes-session-template.md +21 -0
- package/.codex/skills/speckit/assets/templates/plan-template.md +126 -0
- package/.codex/skills/speckit/assets/templates/spec-template.md +135 -0
- package/.codex/skills/speckit/assets/templates/tasks-template.md +269 -0
- package/.codex/skills/speckit/references/acceptance.md +183 -0
- package/.codex/skills/speckit/references/analyze.md +186 -0
- package/.codex/skills/speckit/references/checklist.md +302 -0
- package/.codex/skills/speckit/references/clarify-auto.md +69 -0
- package/.codex/skills/speckit/references/clarify-detailed.md +78 -0
- package/.codex/skills/speckit/references/clarify.md +189 -0
- package/.codex/skills/speckit/references/constitution.md +90 -0
- package/.codex/skills/speckit/references/group.md +89 -0
- package/.codex/skills/speckit/references/implement-task.md +115 -0
- package/.codex/skills/speckit/references/implement.md +129 -0
- package/.codex/skills/speckit/references/notes.md +82 -0
- package/.codex/skills/speckit/references/plan-deep.md +87 -0
- package/.codex/skills/speckit/references/plan-from-questions.md +115 -0
- package/.codex/skills/speckit/references/plan-from-review.md +89 -0
- package/.codex/skills/speckit/references/plan.md +97 -0
- package/.codex/skills/speckit/references/review-plan.md +156 -0
- package/.codex/skills/speckit/references/specify.md +246 -0
- package/.codex/skills/speckit/references/tasks.md +155 -0
- package/.codex/skills/speckit/references/taskstoissues.md +33 -0
- package/.codex/skills/speckit/scripts/bash/check-prerequisites.sh +206 -0
- package/.codex/skills/speckit/scripts/bash/common.sh +191 -0
- package/.codex/skills/speckit/scripts/bash/create-new-feature.sh +259 -0
- package/.codex/skills/speckit/scripts/bash/extract-coded-points.sh +322 -0
- package/.codex/skills/speckit/scripts/bash/extract-spec-ids.sh +238 -0
- package/.codex/skills/speckit/scripts/bash/extract-tasks.sh +295 -0
- package/.codex/skills/speckit/scripts/bash/extract-user-stories.sh +312 -0
- package/.codex/skills/speckit/scripts/bash/setup-notes.sh +182 -0
- package/.codex/skills/speckit/scripts/bash/setup-plan.sh +110 -0
- package/.codex/skills/speckit/scripts/bash/show-todo-tasks.sh +257 -0
- package/.codex/skills/speckit/scripts/bash/spec-group-checklist.sh +402 -0
- package/.codex/skills/speckit/scripts/bash/spec-group-members.sh +215 -0
- package/.codex/skills/speckit/scripts/bash/spec-registry-graph.sh +399 -0
- package/.specify/memory/constitution.md +67 -0
- package/.specify/templates/agent-file-template.md +28 -0
- package/.specify/templates/checklist-template.md +49 -0
- package/.specify/templates/plan-template.md +126 -0
- package/.specify/templates/spec-template.md +135 -0
- package/.specify/templates/tasks-template.md +269 -0
- package/README.md +128 -0
- package/README.zh-CN.md +127 -0
- package/bun.lock +269 -0
- package/dist/cli/commands/codex/forkHome.js +88 -0
- package/dist/cli/commands/codex/send.js +55 -0
- package/dist/cli/commands/codex/sessionInfo.js +42 -0
- package/dist/cli/commands/codex/spawn.js +68 -0
- package/dist/cli/commands/find.js +26 -0
- package/dist/cli/commands/paneKill.js +33 -0
- package/dist/cli/commands/paneSpawn.js +40 -0
- package/dist/cli/commands/paneTitle.js +33 -0
- package/dist/cli/commands/read.js +34 -0
- package/dist/cli/commands/send.js +51 -0
- package/dist/cli/commands/snapshot.js +19 -0
- package/dist/cli/commands/ui/select.js +41 -0
- package/dist/cli/commands/windowKill.js +25 -0
- package/dist/cli/commands/windowLs.js +15 -0
- package/dist/cli/commands/windowNew.js +28 -0
- package/dist/cli/commands/windowRename.js +25 -0
- package/dist/cli/index.js +365 -0
- package/dist/cli/parse.js +39 -0
- package/dist/lib/codex/forkHome.js +101 -0
- package/dist/lib/codex/isCodexPane.js +55 -0
- package/dist/lib/codex/send.js +58 -0
- package/dist/lib/codex/sessionInfo.js +449 -0
- package/dist/lib/codex/spawn.js +246 -0
- package/dist/lib/contracts/types.js +2 -0
- package/dist/lib/fs/safeRm.js +32 -0
- package/dist/lib/io/readStdin.js +14 -0
- package/dist/lib/os/process.js +55 -0
- package/dist/lib/output/format.js +95 -0
- package/dist/lib/proc/lsof.js +42 -0
- package/dist/lib/proc/ps.js +60 -0
- package/dist/lib/targeting/errors.js +13 -0
- package/dist/lib/targeting/resolvePaneTarget.js +91 -0
- package/dist/lib/targeting/resolveWindowTarget.js +40 -0
- package/dist/lib/targeting/scope.js +58 -0
- package/dist/lib/tmux/capturePane.js +20 -0
- package/dist/lib/tmux/exec.js +66 -0
- package/dist/lib/tmux/paneOps.js +29 -0
- package/dist/lib/tmux/paste.js +23 -0
- package/dist/lib/tmux/sendKeys.js +47 -0
- package/dist/lib/tmux/session.js +29 -0
- package/dist/lib/tmux/snapshotPanes.js +46 -0
- package/dist/lib/tmux/snapshotWindows.js +24 -0
- package/dist/lib/tmux/windowOps.js +32 -0
- package/dist/lib/ui/popupSelect.js +432 -0
- package/dist/lib/ui/popupSupport.js +76 -0
- package/package.json +23 -0
- package/src/cli/commands/codex/forkHome.ts +141 -0
- package/src/cli/commands/codex/send.ts +83 -0
- package/src/cli/commands/codex/sessionInfo.ts +59 -0
- package/src/cli/commands/codex/spawn.ts +90 -0
- package/src/cli/commands/find.ts +40 -0
- package/src/cli/commands/paneKill.ts +49 -0
- package/src/cli/commands/paneSpawn.ts +53 -0
- package/src/cli/commands/paneTitle.ts +50 -0
- package/src/cli/commands/read.ts +48 -0
- package/src/cli/commands/send.ts +71 -0
- package/src/cli/commands/snapshot.ts +28 -0
- package/src/cli/commands/ui/select.ts +49 -0
- package/src/cli/commands/windowKill.ts +35 -0
- package/src/cli/commands/windowLs.ts +20 -0
- package/src/cli/commands/windowNew.ts +40 -0
- package/src/cli/commands/windowRename.ts +36 -0
- package/src/cli/index.ts +430 -0
- package/src/lib/codex/forkHome.ts +148 -0
- package/src/lib/codex/isCodexPane.ts +56 -0
- package/src/lib/codex/send.ts +84 -0
- package/src/lib/codex/sessionInfo.ts +521 -0
- package/src/lib/codex/spawn.ts +305 -0
- package/src/lib/contracts/types.ts +30 -0
- package/src/lib/fs/safeRm.ts +32 -0
- package/src/lib/io/readStdin.ts +11 -0
- package/src/lib/output/format.ts +105 -0
- package/src/lib/proc/lsof.ts +44 -0
- package/src/lib/proc/ps.ts +70 -0
- package/src/lib/targeting/errors.ts +25 -0
- package/src/lib/targeting/resolvePaneTarget.ts +106 -0
- package/src/lib/targeting/resolveWindowTarget.ts +45 -0
- package/src/lib/targeting/scope.ts +76 -0
- package/src/lib/tmux/capturePane.ts +21 -0
- package/src/lib/tmux/exec.ts +90 -0
- package/src/lib/tmux/paneOps.ts +35 -0
- package/src/lib/tmux/paste.ts +20 -0
- package/src/lib/tmux/sendKeys.ts +72 -0
- package/src/lib/tmux/session.ts +27 -0
- package/src/lib/tmux/snapshotPanes.ts +52 -0
- package/src/lib/tmux/snapshotWindows.ts +23 -0
- package/src/lib/tmux/windowOps.ts +43 -0
- package/src/lib/ui/popupSelect.ts +561 -0
- package/src/lib/ui/popupSupport.ts +84 -0
- package/tests/e2e/codexForkHome.test.ts +146 -0
- package/tests/e2e/codexSessionInfo.test.ts +112 -0
- package/tests/e2e/codexTuiSend.test.ts +68 -0
- package/tests/integration/codexSpawn.test.ts +113 -0
- package/tests/integration/paneOps.test.ts +60 -0
- package/tests/integration/sendRead.test.ts +52 -0
- package/tests/integration/snapshot.test.ts +39 -0
- package/tests/integration/tmuxHarness.ts +39 -0
- package/tests/integration/windowOps.test.ts +60 -0
- package/tests/unit/codexSend.test.ts +105 -0
- package/tests/unit/codexSessionInfo.test.ts +88 -0
- package/tests/unit/codexSpawn.test.ts +34 -0
- package/tests/unit/keys.test.ts +30 -0
- package/tests/unit/outputFormat.test.ts +52 -0
- package/tests/unit/popupSelect.test.ts +77 -0
- package/tests/unit/popupSupport.test.ts +109 -0
- package/tests/unit/resolvePaneTarget.test.ts +43 -0
- package/tests/unit/resolveWindowTarget.test.ts +36 -0
- package/tests/unit/safeRm.test.ts +41 -0
- package/tests/unit/scope.test.ts +57 -0
- package/tsconfig.json +14 -0
- package/vitest.config.ts +16 -0
|
@@ -0,0 +1,295 @@
|
|
|
1
|
+
#!/usr/bin/env bash
|
|
2
|
+
|
|
3
|
+
# Extract tasks (checkbox + id + status) from one or more tasks.md files.
|
|
4
|
+
#
|
|
5
|
+
# This script is intentionally read-only and deterministic: it emits an inventory
|
|
6
|
+
# of tasks with file+line evidence so acceptance review does not rely on ad-hoc grep.
|
|
7
|
+
#
|
|
8
|
+
# Usage:
|
|
9
|
+
# ./extract-tasks.sh [--json] [--feature <id>]... [<id>...]
|
|
10
|
+
#
|
|
11
|
+
# Examples:
|
|
12
|
+
# ./extract-tasks.sh --json --feature 024 --feature 025
|
|
13
|
+
# ./extract-tasks.sh 024 025
|
|
14
|
+
#
|
|
15
|
+
# Notes:
|
|
16
|
+
# - <id> can be "024" (numeric prefix) or "024-some-feature" (full directory).
|
|
17
|
+
# - Feature resolution reuses check-prerequisites.sh to find absolute paths.
|
|
18
|
+
|
|
19
|
+
set -euo pipefail
|
|
20
|
+
|
|
21
|
+
JSON_MODE=false
|
|
22
|
+
FEATURES=()
|
|
23
|
+
|
|
24
|
+
while [[ $# -gt 0 ]]; do
|
|
25
|
+
case "$1" in
|
|
26
|
+
--json)
|
|
27
|
+
JSON_MODE=true
|
|
28
|
+
shift
|
|
29
|
+
;;
|
|
30
|
+
--feature)
|
|
31
|
+
if [[ $# -lt 2 || "${2:-}" == --* ]]; then
|
|
32
|
+
echo "ERROR: --feature requires a value." >&2
|
|
33
|
+
exit 1
|
|
34
|
+
fi
|
|
35
|
+
FEATURES+=("$2")
|
|
36
|
+
shift 2
|
|
37
|
+
;;
|
|
38
|
+
--feature=*)
|
|
39
|
+
FEATURES+=("${1#--feature=}")
|
|
40
|
+
shift
|
|
41
|
+
;;
|
|
42
|
+
--help|-h)
|
|
43
|
+
cat << 'EOF'
|
|
44
|
+
Usage: extract-tasks.sh [--json] [--feature <id>]... [<id>...]
|
|
45
|
+
|
|
46
|
+
Extract checkbox tasks from one or more tasks.md files and emit an inventory
|
|
47
|
+
with file+line evidence (useful for speckit.acceptance).
|
|
48
|
+
|
|
49
|
+
Options:
|
|
50
|
+
--json Output JSON (default: text)
|
|
51
|
+
--feature <id> Target a specific spec (repeatable)
|
|
52
|
+
--help, -h Show this help message
|
|
53
|
+
|
|
54
|
+
Examples:
|
|
55
|
+
./extract-tasks.sh --json --feature 024 --feature 025
|
|
56
|
+
./extract-tasks.sh 024 025
|
|
57
|
+
EOF
|
|
58
|
+
exit 0
|
|
59
|
+
;;
|
|
60
|
+
--*)
|
|
61
|
+
echo "ERROR: Unknown option '$1'. Use --help for usage information." >&2
|
|
62
|
+
exit 1
|
|
63
|
+
;;
|
|
64
|
+
*)
|
|
65
|
+
if [[ "$1" =~ ^[0-9]{3}(-.+)?$ ]]; then
|
|
66
|
+
FEATURES+=("$1")
|
|
67
|
+
shift
|
|
68
|
+
else
|
|
69
|
+
echo "ERROR: Unknown argument '$1'. Expected feature id like 024 or 024-some-feature." >&2
|
|
70
|
+
exit 1
|
|
71
|
+
fi
|
|
72
|
+
;;
|
|
73
|
+
esac
|
|
74
|
+
done
|
|
75
|
+
|
|
76
|
+
SCRIPT_DIR="$(CDPATH="" cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
|
77
|
+
CHECK_PREREQ="$SCRIPT_DIR/check-prerequisites.sh"
|
|
78
|
+
|
|
79
|
+
python3 - "$CHECK_PREREQ" "$JSON_MODE" "${FEATURES[@]}" << 'PY'
|
|
80
|
+
from __future__ import annotations
|
|
81
|
+
|
|
82
|
+
import json
|
|
83
|
+
import re
|
|
84
|
+
import subprocess
|
|
85
|
+
import sys
|
|
86
|
+
from pathlib import Path
|
|
87
|
+
from typing import Any
|
|
88
|
+
|
|
89
|
+
|
|
90
|
+
CHECK = sys.argv[1]
|
|
91
|
+
JSON_MODE = sys.argv[2].lower() == "true"
|
|
92
|
+
INPUT_FEATURES = sys.argv[3:]
|
|
93
|
+
|
|
94
|
+
|
|
95
|
+
TASK_PATTERN = re.compile(r"^\s*-\s*\[(?P<state>[ xX])\]\s+(?P<id>[A-Z]\d{3,})\b(?P<rest>.*)$")
|
|
96
|
+
HEADING_PATTERN = re.compile(r"^(#{1,6})\s+(.+?)\s*$")
|
|
97
|
+
STORY_PATTERN = re.compile(r"\[(US\d+)\]")
|
|
98
|
+
PARALLEL_PATTERN = re.compile(r"\[P\]")
|
|
99
|
+
CODE_PATTERN = re.compile(r"\b(?:FR|NFR|SC)-\d{3}\b")
|
|
100
|
+
REFS_MARKER_PATTERN = re.compile(r"\bRefs:\s*(?P<body>.+?)\s*[))]\s*$")
|
|
101
|
+
|
|
102
|
+
|
|
103
|
+
def run_check(feature: str | None) -> dict[str, Any]:
|
|
104
|
+
cmd = [CHECK, "--json", "--paths-only"]
|
|
105
|
+
if feature:
|
|
106
|
+
cmd += ["--feature", feature]
|
|
107
|
+
out = subprocess.check_output(cmd, text=True)
|
|
108
|
+
return json.loads(out)
|
|
109
|
+
|
|
110
|
+
|
|
111
|
+
def parse_tasks(tasks_path: Path) -> tuple[list[dict[str, Any]], list[dict[str, Any]]]:
|
|
112
|
+
if not tasks_path.exists():
|
|
113
|
+
raise FileNotFoundError(str(tasks_path))
|
|
114
|
+
|
|
115
|
+
lines = tasks_path.read_text(encoding="utf-8", errors="replace").splitlines()
|
|
116
|
+
last_h2: str | None = None
|
|
117
|
+
last_h3: str | None = None
|
|
118
|
+
|
|
119
|
+
tasks: list[dict[str, Any]] = []
|
|
120
|
+
non_task_checkboxes: list[dict[str, Any]] = []
|
|
121
|
+
|
|
122
|
+
for line_no, line in enumerate(lines, start=1):
|
|
123
|
+
heading_match = HEADING_PATTERN.match(line)
|
|
124
|
+
if heading_match:
|
|
125
|
+
level = len(heading_match.group(1))
|
|
126
|
+
title = heading_match.group(2).strip() or None
|
|
127
|
+
if level == 2:
|
|
128
|
+
last_h2 = title
|
|
129
|
+
last_h3 = None
|
|
130
|
+
elif level == 3:
|
|
131
|
+
last_h3 = title
|
|
132
|
+
|
|
133
|
+
m = TASK_PATTERN.match(line)
|
|
134
|
+
if not m:
|
|
135
|
+
# Keep compatibility: some tasks files may include checkboxes without IDs;
|
|
136
|
+
# we intentionally ignore them, but record them for diagnostics.
|
|
137
|
+
if re.match(r"^\s*-\s*\[[ xX]\]\s+", line):
|
|
138
|
+
non_task_checkboxes.append({"line": line_no, "raw": line})
|
|
139
|
+
continue
|
|
140
|
+
|
|
141
|
+
state = m.group("state")
|
|
142
|
+
task_id = m.group("id")
|
|
143
|
+
rest = m.group("rest").strip()
|
|
144
|
+
|
|
145
|
+
story_match = STORY_PATTERN.search(rest)
|
|
146
|
+
story = story_match.group(1) if story_match else None
|
|
147
|
+
|
|
148
|
+
parallel = bool(PARALLEL_PATTERN.search(rest))
|
|
149
|
+
|
|
150
|
+
refs_codes: list[str] = []
|
|
151
|
+
refs_marker = REFS_MARKER_PATTERN.search(rest)
|
|
152
|
+
if refs_marker:
|
|
153
|
+
refs_body = refs_marker.group("body")
|
|
154
|
+
refs_codes = sorted(set(CODE_PATTERN.findall(refs_body)))
|
|
155
|
+
|
|
156
|
+
tasks.append(
|
|
157
|
+
{
|
|
158
|
+
"id": task_id,
|
|
159
|
+
"done": state.lower() == "x",
|
|
160
|
+
"line": line_no,
|
|
161
|
+
"phase": last_h2,
|
|
162
|
+
"section": last_h3,
|
|
163
|
+
"parallel": parallel,
|
|
164
|
+
"story": story,
|
|
165
|
+
"refs": refs_codes,
|
|
166
|
+
"raw": line.rstrip("\n"),
|
|
167
|
+
}
|
|
168
|
+
)
|
|
169
|
+
|
|
170
|
+
return tasks, non_task_checkboxes
|
|
171
|
+
|
|
172
|
+
|
|
173
|
+
def summarize_duplicates(tasks: list[dict[str, Any]]) -> list[dict[str, Any]]:
|
|
174
|
+
by_id: dict[str, list[dict[str, Any]]] = {}
|
|
175
|
+
for t in tasks:
|
|
176
|
+
by_id.setdefault(t["id"], []).append(t)
|
|
177
|
+
duplicates = []
|
|
178
|
+
for task_id, items in by_id.items():
|
|
179
|
+
if len(items) > 1:
|
|
180
|
+
duplicates.append(
|
|
181
|
+
{
|
|
182
|
+
"id": task_id,
|
|
183
|
+
"occurrences": [{"line": it["line"], "raw": it["raw"]} for it in items],
|
|
184
|
+
}
|
|
185
|
+
)
|
|
186
|
+
return sorted(duplicates, key=lambda x: x["id"])
|
|
187
|
+
|
|
188
|
+
|
|
189
|
+
def summarize_by_key(tasks: list[dict[str, Any]], key: str) -> dict[str, Any]:
|
|
190
|
+
out: dict[str, Any] = {}
|
|
191
|
+
for t in tasks:
|
|
192
|
+
k = t.get(key) or "(none)"
|
|
193
|
+
bucket = out.setdefault(k, {"total": 0, "done": 0, "todo": 0})
|
|
194
|
+
bucket["total"] += 1
|
|
195
|
+
if t["done"]:
|
|
196
|
+
bucket["done"] += 1
|
|
197
|
+
else:
|
|
198
|
+
bucket["todo"] += 1
|
|
199
|
+
return out
|
|
200
|
+
|
|
201
|
+
|
|
202
|
+
def main() -> int:
|
|
203
|
+
features = list(INPUT_FEATURES)
|
|
204
|
+
if not features:
|
|
205
|
+
features = [None] # type: ignore[list-item]
|
|
206
|
+
|
|
207
|
+
targets: list[dict[str, Any]] = []
|
|
208
|
+
had_error = False
|
|
209
|
+
|
|
210
|
+
for feature in features:
|
|
211
|
+
try:
|
|
212
|
+
paths = run_check(feature)
|
|
213
|
+
tasks_path = Path(paths.get("TASKS", ""))
|
|
214
|
+
tasks, non_task_checkboxes = parse_tasks(tasks_path)
|
|
215
|
+
except Exception as e:
|
|
216
|
+
had_error = True
|
|
217
|
+
targets.append(
|
|
218
|
+
{
|
|
219
|
+
"input": feature,
|
|
220
|
+
"error": {"message": str(e), "type": type(e).__name__},
|
|
221
|
+
}
|
|
222
|
+
)
|
|
223
|
+
continue
|
|
224
|
+
|
|
225
|
+
total = len(tasks)
|
|
226
|
+
done = sum(1 for t in tasks if t["done"])
|
|
227
|
+
todo = total - done
|
|
228
|
+
|
|
229
|
+
duplicates = summarize_duplicates(tasks)
|
|
230
|
+
if duplicates:
|
|
231
|
+
had_error = True
|
|
232
|
+
|
|
233
|
+
targets.append(
|
|
234
|
+
{
|
|
235
|
+
"input": feature,
|
|
236
|
+
"feature": paths.get("BRANCH"),
|
|
237
|
+
"repoRoot": paths.get("REPO_ROOT"),
|
|
238
|
+
"featureDir": paths.get("FEATURE_DIR"),
|
|
239
|
+
"tasksFile": str(Path(paths.get("FEATURE_DIR", "")) / "tasks.md"),
|
|
240
|
+
"counts": {
|
|
241
|
+
"total": total,
|
|
242
|
+
"done": done,
|
|
243
|
+
"todo": todo,
|
|
244
|
+
"duplicates": len(duplicates),
|
|
245
|
+
"nonTaskCheckboxes": len(non_task_checkboxes),
|
|
246
|
+
},
|
|
247
|
+
"byPhase": summarize_by_key(tasks, "phase"),
|
|
248
|
+
"byStory": summarize_by_key(tasks, "story"),
|
|
249
|
+
"duplicates": duplicates,
|
|
250
|
+
"nonTaskCheckboxes": non_task_checkboxes,
|
|
251
|
+
"tasks": tasks,
|
|
252
|
+
}
|
|
253
|
+
)
|
|
254
|
+
|
|
255
|
+
if JSON_MODE:
|
|
256
|
+
print(json.dumps({"targets": targets}, ensure_ascii=False, indent=2))
|
|
257
|
+
return 1 if had_error else 0
|
|
258
|
+
|
|
259
|
+
for t in targets:
|
|
260
|
+
if "error" in t:
|
|
261
|
+
print(f"[ERROR] {t.get('input')}: {t['error']['type']}: {t['error']['message']}")
|
|
262
|
+
continue
|
|
263
|
+
|
|
264
|
+
counts = t.get("counts", {})
|
|
265
|
+
print(f"{t.get('feature')} ({t.get('tasksFile')})")
|
|
266
|
+
print(
|
|
267
|
+
" "
|
|
268
|
+
+ " ".join(
|
|
269
|
+
[
|
|
270
|
+
f"total={counts.get('total')}",
|
|
271
|
+
f"done={counts.get('done')}",
|
|
272
|
+
f"todo={counts.get('todo')}",
|
|
273
|
+
f"dup={counts.get('duplicates')}",
|
|
274
|
+
f"nonTaskCheckboxes={counts.get('nonTaskCheckboxes')}",
|
|
275
|
+
]
|
|
276
|
+
)
|
|
277
|
+
)
|
|
278
|
+
for task in t.get("tasks", []):
|
|
279
|
+
mark = "x" if task["done"] else " "
|
|
280
|
+
phase = task.get("phase") or ""
|
|
281
|
+
story = task.get("story") or ""
|
|
282
|
+
refs = task.get("refs") or []
|
|
283
|
+
refs_part = f" refs={','.join(refs)}" if refs else ""
|
|
284
|
+
context = " / ".join([p for p in [phase, story] if p]) if (phase or story) else ""
|
|
285
|
+
context_part = f" [{context}]" if context else ""
|
|
286
|
+
print(f" - [{mark}] {task['id']}:{task['line']}{context_part}{refs_part} {task['raw']}")
|
|
287
|
+
print()
|
|
288
|
+
|
|
289
|
+
return 1 if had_error else 0
|
|
290
|
+
|
|
291
|
+
|
|
292
|
+
if __name__ == "__main__":
|
|
293
|
+
raise SystemExit(main())
|
|
294
|
+
PY
|
|
295
|
+
|
|
@@ -0,0 +1,312 @@
|
|
|
1
|
+
#!/usr/bin/env bash
|
|
2
|
+
|
|
3
|
+
# Extract user stories (US) from one or more spec.md files.
|
|
4
|
+
#
|
|
5
|
+
# This script is intentionally read-only and deterministic: it emits an inventory
|
|
6
|
+
# of User Story definitions with file+line evidence.
|
|
7
|
+
#
|
|
8
|
+
# Usage:
|
|
9
|
+
# ./extract-user-stories.sh [--json] [--feature <id>]... [<id>...]
|
|
10
|
+
#
|
|
11
|
+
# Examples:
|
|
12
|
+
# ./extract-user-stories.sh --json --feature 061
|
|
13
|
+
# ./extract-user-stories.sh 060 061
|
|
14
|
+
#
|
|
15
|
+
# Notes:
|
|
16
|
+
# - <id> can be "024" (numeric prefix) or "024-some-feature" (full directory).
|
|
17
|
+
# - Feature resolution reuses check-prerequisites.sh to find absolute paths.
|
|
18
|
+
|
|
19
|
+
set -euo pipefail
|
|
20
|
+
|
|
21
|
+
JSON_MODE=false
|
|
22
|
+
FEATURES=()
|
|
23
|
+
|
|
24
|
+
while [[ $# -gt 0 ]]; do
|
|
25
|
+
case "$1" in
|
|
26
|
+
--json)
|
|
27
|
+
JSON_MODE=true
|
|
28
|
+
shift
|
|
29
|
+
;;
|
|
30
|
+
--feature)
|
|
31
|
+
if [[ $# -lt 2 || "${2:-}" == --* ]]; then
|
|
32
|
+
echo "ERROR: --feature requires a value." >&2
|
|
33
|
+
exit 1
|
|
34
|
+
fi
|
|
35
|
+
FEATURES+=("$2")
|
|
36
|
+
shift 2
|
|
37
|
+
;;
|
|
38
|
+
--feature=*)
|
|
39
|
+
FEATURES+=("${1#--feature=}")
|
|
40
|
+
shift
|
|
41
|
+
;;
|
|
42
|
+
--help|-h)
|
|
43
|
+
cat << 'EOF'
|
|
44
|
+
Usage: extract-user-stories.sh [--json] [--feature <id>]... [<id>...]
|
|
45
|
+
|
|
46
|
+
Extract User Story (US) definitions from one or more spec files and emit an inventory
|
|
47
|
+
with file+line evidence.
|
|
48
|
+
|
|
49
|
+
Options:
|
|
50
|
+
--json Output JSON (default: text)
|
|
51
|
+
--feature <id> Target a specific spec (repeatable)
|
|
52
|
+
--help, -h Show this help message
|
|
53
|
+
|
|
54
|
+
Examples:
|
|
55
|
+
./extract-user-stories.sh --json --feature 061
|
|
56
|
+
./extract-user-stories.sh 060 061
|
|
57
|
+
EOF
|
|
58
|
+
exit 0
|
|
59
|
+
;;
|
|
60
|
+
--*)
|
|
61
|
+
echo "ERROR: Unknown option '$1'. Use --help for usage information." >&2
|
|
62
|
+
exit 1
|
|
63
|
+
;;
|
|
64
|
+
*)
|
|
65
|
+
if [[ "$1" =~ ^[0-9]{3}(-.+)?$ ]]; then
|
|
66
|
+
FEATURES+=("$1")
|
|
67
|
+
shift
|
|
68
|
+
else
|
|
69
|
+
echo "ERROR: Unknown argument '$1'. Expected feature id like 024 or 024-some-feature." >&2
|
|
70
|
+
exit 1
|
|
71
|
+
fi
|
|
72
|
+
;;
|
|
73
|
+
esac
|
|
74
|
+
done
|
|
75
|
+
|
|
76
|
+
SCRIPT_DIR="$(CDPATH="" cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
|
77
|
+
CHECK_PREREQ="$SCRIPT_DIR/check-prerequisites.sh"
|
|
78
|
+
|
|
79
|
+
python3 - "$CHECK_PREREQ" "$JSON_MODE" "${FEATURES[@]}" << 'PY'
|
|
80
|
+
from __future__ import annotations
|
|
81
|
+
|
|
82
|
+
import json
|
|
83
|
+
import re
|
|
84
|
+
import subprocess
|
|
85
|
+
import sys
|
|
86
|
+
from pathlib import Path
|
|
87
|
+
from typing import Any
|
|
88
|
+
|
|
89
|
+
|
|
90
|
+
CHECK = sys.argv[1]
|
|
91
|
+
JSON_MODE = sys.argv[2].lower() == "true"
|
|
92
|
+
INPUT_FEATURES = sys.argv[3:]
|
|
93
|
+
|
|
94
|
+
|
|
95
|
+
HEADING_PATTERN = re.compile(r"^(#{1,6})\s+(.+?)\s*$")
|
|
96
|
+
USER_STORY_PATTERN = re.compile(r"^User\s+Story\s+(?P<num>\d+)\b(?P<rest>.*)$", re.IGNORECASE)
|
|
97
|
+
PRIORITY_PATTERN = re.compile(r"\(\s*Priority\s*:\s*(?P<priority>P\d+)\s*\)\s*$", re.IGNORECASE)
|
|
98
|
+
|
|
99
|
+
US_CODE_PATTERN = re.compile(r"\bUS(?P<num>\d+)\b")
|
|
100
|
+
|
|
101
|
+
|
|
102
|
+
def run_check(feature: str | None) -> dict[str, Any]:
|
|
103
|
+
cmd = [CHECK, "--json", "--paths-only"]
|
|
104
|
+
if feature:
|
|
105
|
+
cmd += ["--feature", feature]
|
|
106
|
+
out = subprocess.check_output(cmd, text=True)
|
|
107
|
+
return json.loads(out)
|
|
108
|
+
|
|
109
|
+
|
|
110
|
+
def normalize_title(rest: str) -> tuple[str, str | None]:
|
|
111
|
+
rest = rest.strip()
|
|
112
|
+
priority: str | None = None
|
|
113
|
+
|
|
114
|
+
pri = PRIORITY_PATTERN.search(rest)
|
|
115
|
+
if pri:
|
|
116
|
+
priority = pri.group("priority")
|
|
117
|
+
rest = rest[: pri.start()].rstrip()
|
|
118
|
+
|
|
119
|
+
rest = rest.lstrip()
|
|
120
|
+
if rest.startswith(("-", "–", "—")):
|
|
121
|
+
rest = rest[1:].lstrip()
|
|
122
|
+
if rest.startswith(":"):
|
|
123
|
+
rest = rest[1:].lstrip()
|
|
124
|
+
|
|
125
|
+
return rest.strip(), priority
|
|
126
|
+
|
|
127
|
+
|
|
128
|
+
def parse_user_stories(spec_path: Path) -> tuple[list[dict[str, Any]], list[dict[str, Any]], list[dict[str, Any]]]:
|
|
129
|
+
if not spec_path.exists():
|
|
130
|
+
return [], [], []
|
|
131
|
+
|
|
132
|
+
lines = spec_path.read_text(encoding="utf-8", errors="replace").splitlines()
|
|
133
|
+
|
|
134
|
+
# Collect story headings (definitions).
|
|
135
|
+
definitions: list[dict[str, Any]] = []
|
|
136
|
+
by_code: dict[str, list[dict[str, Any]]] = {}
|
|
137
|
+
|
|
138
|
+
for line_no, line in enumerate(lines, start=1):
|
|
139
|
+
heading = HEADING_PATTERN.match(line)
|
|
140
|
+
if not heading:
|
|
141
|
+
continue
|
|
142
|
+
|
|
143
|
+
heading_text = heading.group(2).strip()
|
|
144
|
+
m = USER_STORY_PATTERN.match(heading_text)
|
|
145
|
+
if not m:
|
|
146
|
+
continue
|
|
147
|
+
|
|
148
|
+
num = int(m.group("num"))
|
|
149
|
+
raw_rest = m.group("rest")
|
|
150
|
+
title, priority = normalize_title(raw_rest)
|
|
151
|
+
code = f"US{num}"
|
|
152
|
+
|
|
153
|
+
definition = {
|
|
154
|
+
"code": code,
|
|
155
|
+
"index": num,
|
|
156
|
+
"line": line_no,
|
|
157
|
+
"title": title,
|
|
158
|
+
"priority": priority,
|
|
159
|
+
"raw": line.rstrip("\n"),
|
|
160
|
+
}
|
|
161
|
+
definitions.append(definition)
|
|
162
|
+
by_code.setdefault(code, []).append(definition)
|
|
163
|
+
|
|
164
|
+
# Pick canonical definitions per code (keep order by index).
|
|
165
|
+
stories: list[dict[str, Any]] = []
|
|
166
|
+
duplicates: list[dict[str, Any]] = []
|
|
167
|
+
|
|
168
|
+
for code, defs in sorted(by_code.items(), key=lambda kv: int(kv[0][2:])):
|
|
169
|
+
canonical = defs[0]
|
|
170
|
+
story = dict(canonical)
|
|
171
|
+
story["references"] = []
|
|
172
|
+
stories.append(story)
|
|
173
|
+
|
|
174
|
+
if len(defs) > 1:
|
|
175
|
+
duplicates.append(
|
|
176
|
+
{
|
|
177
|
+
"code": code,
|
|
178
|
+
"definitions": [{"line": d["line"], "raw": d["raw"]} for d in defs],
|
|
179
|
+
}
|
|
180
|
+
)
|
|
181
|
+
|
|
182
|
+
by_code_story: dict[str, dict[str, Any]] = {s["code"]: s for s in stories}
|
|
183
|
+
|
|
184
|
+
# Collect references (USn mentions) and orphan references (USn without heading).
|
|
185
|
+
orphan_refs: dict[str, list[dict[str, Any]]] = {}
|
|
186
|
+
total_refs = 0
|
|
187
|
+
|
|
188
|
+
for line_no, line in enumerate(lines, start=1):
|
|
189
|
+
for match in US_CODE_PATTERN.finditer(line):
|
|
190
|
+
code = f"US{match.group('num')}"
|
|
191
|
+
occ = {"line": line_no, "col": match.start() + 1, "raw": line.rstrip("\n")}
|
|
192
|
+
total_refs += 1
|
|
193
|
+
|
|
194
|
+
story = by_code_story.get(code)
|
|
195
|
+
if story is not None:
|
|
196
|
+
story["references"].append(occ)
|
|
197
|
+
else:
|
|
198
|
+
orphan_refs.setdefault(code, []).append(occ)
|
|
199
|
+
|
|
200
|
+
orphan_references = [
|
|
201
|
+
{"code": code, "references": refs} for code, refs in sorted(orphan_refs.items(), key=lambda kv: int(kv[0][2:]))
|
|
202
|
+
]
|
|
203
|
+
|
|
204
|
+
return stories, duplicates, orphan_references
|
|
205
|
+
|
|
206
|
+
|
|
207
|
+
def summarize_by_priority(stories: list[dict[str, Any]]) -> dict[str, int]:
|
|
208
|
+
out: dict[str, int] = {}
|
|
209
|
+
for s in stories:
|
|
210
|
+
p = s.get("priority") or "(none)"
|
|
211
|
+
out[p] = out.get(p, 0) + 1
|
|
212
|
+
return dict(sorted(out.items(), key=lambda kv: kv[0]))
|
|
213
|
+
|
|
214
|
+
|
|
215
|
+
def main() -> int:
|
|
216
|
+
features = list(INPUT_FEATURES)
|
|
217
|
+
if not features:
|
|
218
|
+
features = [None] # type: ignore[list-item]
|
|
219
|
+
|
|
220
|
+
targets: list[dict[str, Any]] = []
|
|
221
|
+
had_error = False
|
|
222
|
+
|
|
223
|
+
for feature in features:
|
|
224
|
+
try:
|
|
225
|
+
paths = run_check(feature)
|
|
226
|
+
except subprocess.CalledProcessError as e:
|
|
227
|
+
had_error = True
|
|
228
|
+
targets.append(
|
|
229
|
+
{
|
|
230
|
+
"input": feature,
|
|
231
|
+
"error": {
|
|
232
|
+
"message": "check-prerequisites failed",
|
|
233
|
+
"exitCode": e.returncode,
|
|
234
|
+
"stdout": (e.stdout or "").strip(),
|
|
235
|
+
"stderr": (e.stderr or "").strip(),
|
|
236
|
+
},
|
|
237
|
+
}
|
|
238
|
+
)
|
|
239
|
+
continue
|
|
240
|
+
|
|
241
|
+
spec_path = Path(paths.get("FEATURE_SPEC", ""))
|
|
242
|
+
stories, duplicate_definitions, orphan_references = parse_user_stories(spec_path)
|
|
243
|
+
|
|
244
|
+
if duplicate_definitions:
|
|
245
|
+
had_error = True
|
|
246
|
+
|
|
247
|
+
counts = {
|
|
248
|
+
"definitions": len(stories),
|
|
249
|
+
"byPriority": summarize_by_priority(stories),
|
|
250
|
+
"references": sum(len(s.get("references") or []) for s in stories),
|
|
251
|
+
"duplicateDefinitions": len(duplicate_definitions),
|
|
252
|
+
"orphanReferences": len(orphan_references),
|
|
253
|
+
}
|
|
254
|
+
|
|
255
|
+
targets.append(
|
|
256
|
+
{
|
|
257
|
+
"input": feature,
|
|
258
|
+
"feature": paths.get("BRANCH"),
|
|
259
|
+
"repoRoot": paths.get("REPO_ROOT"),
|
|
260
|
+
"featureDir": paths.get("FEATURE_DIR"),
|
|
261
|
+
"specFile": str(spec_path),
|
|
262
|
+
"planFile": paths.get("IMPL_PLAN"),
|
|
263
|
+
"tasksFile": paths.get("TASKS"),
|
|
264
|
+
"counts": counts,
|
|
265
|
+
"duplicateDefinitions": duplicate_definitions,
|
|
266
|
+
"orphanReferences": orphan_references,
|
|
267
|
+
"stories": stories,
|
|
268
|
+
}
|
|
269
|
+
)
|
|
270
|
+
|
|
271
|
+
if JSON_MODE:
|
|
272
|
+
print(json.dumps({"targets": targets}, ensure_ascii=False, indent=2))
|
|
273
|
+
return 1 if had_error else 0
|
|
274
|
+
|
|
275
|
+
for t in targets:
|
|
276
|
+
if "error" in t:
|
|
277
|
+
err = t.get("error") or {}
|
|
278
|
+
print(f"[ERROR] {t.get('input')}: {err.get('message')}")
|
|
279
|
+
continue
|
|
280
|
+
|
|
281
|
+
print(f"{t.get('feature')} ({t.get('specFile')})")
|
|
282
|
+
counts = t.get("counts") or {}
|
|
283
|
+
by_pri = counts.get("byPriority") or {}
|
|
284
|
+
print(
|
|
285
|
+
" "
|
|
286
|
+
+ " ".join(
|
|
287
|
+
[
|
|
288
|
+
f"definitions={counts.get('definitions')}",
|
|
289
|
+
f"byPriority={by_pri}",
|
|
290
|
+
f"refs={counts.get('references')}",
|
|
291
|
+
f"dupDefs={counts.get('duplicateDefinitions')}",
|
|
292
|
+
f"orphanRefs={counts.get('orphanReferences')}",
|
|
293
|
+
]
|
|
294
|
+
)
|
|
295
|
+
)
|
|
296
|
+
|
|
297
|
+
for s in t.get("stories") or []:
|
|
298
|
+
pri = s.get("priority")
|
|
299
|
+
pri_part = f" pri={pri}" if pri else ""
|
|
300
|
+
title = s.get("title") or ""
|
|
301
|
+
refs = s.get("references") or []
|
|
302
|
+
refs_part = f" refs={len(refs)}" if refs else ""
|
|
303
|
+
print(f" - {s['code']}:{s['line']}{pri_part}{refs_part} {title}".rstrip())
|
|
304
|
+
print()
|
|
305
|
+
|
|
306
|
+
return 1 if had_error else 0
|
|
307
|
+
|
|
308
|
+
|
|
309
|
+
if __name__ == "__main__":
|
|
310
|
+
raise SystemExit(main())
|
|
311
|
+
PY
|
|
312
|
+
|