@mindfoldhq/trellis 0.5.0-beta.11 → 0.5.0-beta.13

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (34) hide show
  1. package/README.md +3 -0
  2. package/dist/cli/index.js +6 -0
  3. package/dist/cli/index.js.map +1 -1
  4. package/dist/commands/init.d.ts +2 -0
  5. package/dist/commands/init.d.ts.map +1 -1
  6. package/dist/commands/init.js +68 -66
  7. package/dist/commands/init.js.map +1 -1
  8. package/dist/commands/update.d.ts.map +1 -1
  9. package/dist/commands/update.js +35 -12
  10. package/dist/commands/update.js.map +1 -1
  11. package/dist/configurators/shared.d.ts +5 -0
  12. package/dist/configurators/shared.d.ts.map +1 -1
  13. package/dist/configurators/shared.js +6 -3
  14. package/dist/configurators/shared.js.map +1 -1
  15. package/dist/migrations/manifests/0.5.0-beta.10.json +9 -0
  16. package/dist/migrations/manifests/0.5.0-beta.12.json +9 -0
  17. package/dist/migrations/manifests/0.5.0-beta.13.json +9 -0
  18. package/dist/templates/codex/agents/trellis-check.toml +16 -0
  19. package/dist/templates/codex/agents/trellis-implement.toml +16 -0
  20. package/dist/templates/codex/hooks/session-start.py +26 -3
  21. package/dist/templates/codex/skills/start/SKILL.md +11 -8
  22. package/dist/templates/common/commands/continue.md +1 -1
  23. package/dist/templates/copilot/hooks/session-start.py +26 -3
  24. package/dist/templates/copilot/prompts/start.prompt.md +11 -8
  25. package/dist/templates/opencode/agents/trellis-implement.md +6 -3
  26. package/dist/templates/opencode/plugins/session-start.js +32 -12
  27. package/dist/templates/shared-hooks/inject-subagent-context.py +27 -1
  28. package/dist/templates/shared-hooks/session-start.py +41 -4
  29. package/dist/templates/trellis/scripts/common/task_context.py +27 -209
  30. package/dist/templates/trellis/scripts/common/task_store.py +76 -2
  31. package/dist/templates/trellis/scripts/task.py +41 -25
  32. package/dist/templates/trellis/workflow.md +100 -22
  33. package/dist/types/ai-tools.d.ts +2 -2
  34. package/package.json +7 -7
@@ -24,6 +24,29 @@ def should_skip_injection() -> bool:
24
24
  return os.environ.get("COPILOT_NON_INTERACTIVE") == "1"
25
25
 
26
26
 
27
+ def _has_curated_jsonl_entry(jsonl_path: Path) -> bool:
28
+ """Return True iff jsonl has at least one row with a ``file`` field.
29
+
30
+ A freshly seeded jsonl only contains a ``{"_example": ...}`` row (no
31
+ ``file`` key) — that is NOT "ready". Readiness requires at least one
32
+ curated entry. Matches the contract used by ``inject-subagent-context.py``.
33
+ """
34
+ try:
35
+ for line in jsonl_path.read_text(encoding="utf-8").splitlines():
36
+ line = line.strip()
37
+ if not line:
38
+ continue
39
+ try:
40
+ row = json.loads(line)
41
+ except json.JSONDecodeError:
42
+ continue
43
+ if isinstance(row, dict) and row.get("file"):
44
+ return True
45
+ except (OSError, UnicodeDecodeError):
46
+ return False
47
+ return False
48
+
49
+
27
50
  def read_file(path: Path, fallback: str = "") -> str:
28
51
  try:
29
52
  return path.read_text(encoding="utf-8")
@@ -110,17 +133,17 @@ def _get_task_status(trellis_dir: Path) -> str:
110
133
  has_context = False
111
134
  for jsonl_name in ("implement.jsonl", "check.jsonl", "spec.jsonl"):
112
135
  jsonl_path = task_dir / jsonl_name
113
- if jsonl_path.is_file() and jsonl_path.stat().st_size > 0:
136
+ if jsonl_path.is_file() and _has_curated_jsonl_entry(jsonl_path):
114
137
  has_context = True
115
138
  break
116
139
 
117
140
  has_prd = (task_dir / "prd.md").is_file()
118
141
 
119
142
  if not has_prd:
120
- return f"Status: NOT READY\nTask: {task_title}\nMissing: prd.md not created\nNext: Write PRD, then research init-context start"
143
+ return f"Status: NOT READY\nTask: {task_title}\nMissing: prd.md not created\nNext: Write PRD (see workflow.md Phase 1.1) then curate implement.jsonl per Phase 1.3"
121
144
 
122
145
  if not has_context:
123
- return f"Status: NOT READY\nTask: {task_title}\nMissing: Context not configured (no jsonl files)\nNext: Complete Phase 2 (research init-context start) before implementing"
146
+ return f"Status: NOT READY\nTask: {task_title}\nMissing: implement.jsonl / check.jsonl missing or empty\nNext: Curate entries per workflow.md Phase 1.3 (spec + research files only), then `task.py start`"
124
147
 
125
148
  return f"Status: READY\nTask: {task_title}\nNext: Continue with implement or check"
126
149
 
@@ -273,17 +273,21 @@ Task(
273
273
 
274
274
  **Step 6: Configure Context** `[AI]`
275
275
 
276
- Initialize default context:
276
+ `implement.jsonl` and `check.jsonl` were seeded on `task.py create` with a single self-describing `_example` line. Curate real entries now (see workflow.md Phase 1.3 for the full rule):
277
+
278
+ - Put **spec files** (`.trellis/spec/<package>/<layer>/*.md`) and **research files** (`{TASK_DIR}/research/*.md`) only.
279
+ - Do NOT put code files — those are read during implementation, not pre-registered here.
280
+ - Split: `implement.jsonl` = specs the implement sub-agent needs; `check.jsonl` = specs the check sub-agent needs.
281
+
282
+ Discover available specs:
277
283
 
278
284
  ```bash
279
- python3 ./.trellis/scripts/task.py init-context "$TASK_DIR" <type> --platform {{CLI_FLAG}}
280
- # type: backend | frontend | fullstack
285
+ python3 ./.trellis/scripts/get_context.py --mode packages
281
286
  ```
282
287
 
283
- Add code-spec files found by Research Agent:
288
+ Append entries (either edit the jsonl file directly, or):
284
289
 
285
290
  ```bash
286
- # For each relevant code-spec and code pattern:
287
291
  python3 ./.trellis/scripts/task.py add-context "$TASK_DIR" implement "<path>" "<reason>"
288
292
  python3 ./.trellis/scripts/task.py add-context "$TASK_DIR" check "<path>" "<reason>"
289
293
  ```
@@ -368,9 +372,8 @@ If yes, resume from the appropriate step (usually Step 7 or 8).
368
372
  | Script | Purpose |
369
373
  |--------|---------|
370
374
  | `python3 ./.trellis/scripts/get_context.py` | Get session context |
371
- | `python3 ./.trellis/scripts/task.py create` | Create task directory |
372
- | `python3 ./.trellis/scripts/task.py init-context` | Initialize jsonl files |
373
- | `python3 ./.trellis/scripts/task.py add-context` | Add code-spec/context file to jsonl |
375
+ | `python3 ./.trellis/scripts/task.py create` | Create task directory (seeds jsonl on sub-agent platforms) |
376
+ | `python3 ./.trellis/scripts/task.py add-context` | Append code-spec/research entry to jsonl |
374
377
  | `python3 ./.trellis/scripts/task.py start` | Set current task |
375
378
  | `python3 ./.trellis/scripts/task.py finish` | Clear current task |
376
379
  | `python3 ./.trellis/scripts/task.py archive` | Archive completed task |
@@ -23,12 +23,15 @@ Otherwise, load context yourself:
23
23
 
24
24
  1. Read `.trellis/.current-task` → get task directory (e.g., `.trellis/tasks/xxx`)
25
25
  2. Read `{task_dir}/implement.jsonl`
26
- 3. For each entry in JSONL:
27
- - If `path` is a file Read it
28
- - If `path` is a directory → Read all `.md` files in it
26
+ 3. For each entry in JSONL (JSON object per line):
27
+ - Skip rows without a `"file"` field (e.g. `{"_example": "..."}` seed rows)
28
+ - If `file` points at a file → Read it
29
+ - If `file` ends with `/` (directory) → Read all `.md` files in it
29
30
  4. Read `{task_dir}/prd.md` for requirements
30
31
  5. Read `{task_dir}/info.md` for technical design (if exists)
31
32
 
33
+ **If `implement.jsonl` has no curated entries (only a seed row, or the file is missing)**: read `prd.md` to understand the task domain, then decide which specs apply based on `.trellis/spec/` layout. You can list available specs with `python3 ./.trellis/scripts/get_context.py --mode packages`. Do not block on the missing jsonl — proceed with prd-only context plus your own spec judgment.
34
+
32
35
  Then proceed with the workflow below using the loaded context.
33
36
 
34
37
  ---
@@ -15,6 +15,33 @@ import { TrellisContext, contextCollector, debugLog } from "../lib/trellis-conte
15
15
  const PYTHON_CMD = platform() === "win32" ? "python" : "python3"
16
16
 
17
17
 
18
+ /**
19
+ * Return true iff jsonl has at least one row with a `file` field.
20
+ * A freshly seeded jsonl only contains a `{"_example": ...}` row (no `file`
21
+ * key) — that is NOT "ready". Readiness requires at least one curated entry.
22
+ * Matches the contract used by `shared-hooks/inject-subagent-context.py`.
23
+ */
24
+ function hasCuratedJsonlEntry(jsonlPath) {
25
+ try {
26
+ const content = readFileSync(jsonlPath, "utf-8")
27
+ for (const rawLine of content.split(/\r?\n/)) {
28
+ const line = rawLine.trim()
29
+ if (!line) continue
30
+ try {
31
+ const row = JSON.parse(line)
32
+ if (row && typeof row === "object" && typeof row.file === "string" && row.file) {
33
+ return true
34
+ }
35
+ } catch {
36
+ // Ignore malformed line — move on.
37
+ }
38
+ }
39
+ } catch {
40
+ return false
41
+ }
42
+ return false
43
+ }
44
+
18
45
  /**
19
46
  * Check current task status and return structured status string.
20
47
  * JavaScript equivalent of _get_task_status in Claude's session-start.py.
@@ -52,27 +79,20 @@ function getTaskStatus(ctx) {
52
79
  let hasContext = false
53
80
  for (const jsonlName of ["implement.jsonl", "check.jsonl"]) {
54
81
  const jsonlPath = join(taskDir, jsonlName)
55
- if (existsSync(jsonlPath)) {
56
- try {
57
- const st = statSync(jsonlPath)
58
- if (st.size > 0) {
59
- hasContext = true
60
- break
61
- }
62
- } catch {
63
- // Ignore stat errors
64
- }
82
+ if (existsSync(jsonlPath) && hasCuratedJsonlEntry(jsonlPath)) {
83
+ hasContext = true
84
+ break
65
85
  }
66
86
  }
67
87
 
68
88
  const hasPrd = existsSync(join(taskDir, "prd.md"))
69
89
 
70
90
  if (!hasPrd) {
71
- return `Status: NOT READY\nTask: ${taskTitle}\nMissing: prd.md not created\nNext: Write PRD, then research init-context start`
91
+ return `Status: NOT READY\nTask: ${taskTitle}\nMissing: prd.md not created\nNext: Write PRD (see workflow.md Phase 1.1) then curate implement.jsonl per Phase 1.3`
72
92
  }
73
93
 
74
94
  if (!hasContext) {
75
- return `Status: NOT READY\nTask: ${taskTitle}\nMissing: Context not configured (no jsonl files)\nNext: Complete Phase 2 (research init-context start) before implementing`
95
+ return `Status: NOT READY\nTask: ${taskTitle}\nMissing: implement.jsonl / check.jsonl missing or empty\nNext: Curate entries per workflow.md Phase 1.3 (spec + research files only), then \`task.py start\``
76
96
  }
77
97
 
78
98
  return `Status: READY\nTask: ${taskTitle}\nNext: Continue with implement or check`
@@ -32,7 +32,7 @@ from pathlib import Path
32
32
 
33
33
  # IMPORTANT: Force stdout to use UTF-8 on Windows
34
34
  # This fixes UnicodeEncodeError when outputting non-ASCII characters
35
- if sys.platform == "win32":
35
+ if sys.platform.startswith("win"):
36
36
  import io as _io
37
37
  if hasattr(sys.stdout, "reconfigure"):
38
38
  sys.stdout.reconfigure(encoding="utf-8", errors="replace") # type: ignore[union-attr]
@@ -168,15 +168,27 @@ def read_jsonl_entries(base_path: str, jsonl_path: str) -> list[tuple[str, str]]
168
168
  Schema:
169
169
  {"file": "path/to/file.md", "reason": "..."}
170
170
  {"file": "path/to/dir/", "type": "directory", "reason": "..."}
171
+ {"_example": "..."} # seed row — skipped (no `file` field)
172
+
173
+ Rows without a ``file`` field (e.g. the self-describing seed line written
174
+ by ``task.py create`` before the agent has curated entries) are skipped
175
+ silently. If the resulting entry list is empty, a stderr warning is
176
+ emitted so the operator can debug missing context.
171
177
 
172
178
  Returns:
173
179
  [(path, content), ...]
174
180
  """
175
181
  full_path = os.path.join(base_path, jsonl_path)
176
182
  if not os.path.exists(full_path):
183
+ print(
184
+ f"[inject-subagent-context] WARN: {jsonl_path} not found — "
185
+ f"sub-agent will receive only prd.md",
186
+ file=sys.stderr,
187
+ )
177
188
  return []
178
189
 
179
190
  results = []
191
+ saw_real_entry = False
180
192
  try:
181
193
  with open(full_path, "r", encoding="utf-8") as f:
182
194
  for line in f:
@@ -189,8 +201,10 @@ def read_jsonl_entries(base_path: str, jsonl_path: str) -> list[tuple[str, str]]
189
201
  entry_type = item.get("type", "file")
190
202
 
191
203
  if not file_path:
204
+ # Seed / comment row — skip silently
192
205
  continue
193
206
 
207
+ saw_real_entry = True
194
208
  if entry_type == "directory":
195
209
  # Read all .md files in directory
196
210
  dir_contents = read_directory_contents(base_path, file_path)
@@ -205,6 +219,14 @@ def read_jsonl_entries(base_path: str, jsonl_path: str) -> list[tuple[str, str]]
205
219
  except Exception:
206
220
  pass
207
221
 
222
+ if not saw_real_entry:
223
+ print(
224
+ f"[inject-subagent-context] WARN: {jsonl_path} has no curated "
225
+ f"entries (only seed / empty) — sub-agent will receive only "
226
+ f"prd.md. See workflow.md Phase 1.3 for curation guidance.",
227
+ file=sys.stderr,
228
+ )
229
+
208
230
  return results
209
231
 
210
232
 
@@ -392,7 +414,11 @@ Finish checklist and requirements:
392
414
  def get_research_context(repo_root: str, task_dir: str | None) -> str:
393
415
  """
394
416
  Context for Research Agent — project structure overview for spec directories.
417
+
418
+ `task_dir` kept for signature parity with get_implement_context / get_check_context
419
+ so the dispatcher can call them uniformly.
395
420
  """
421
+ _ = task_dir
396
422
  context_parts = []
397
423
 
398
424
  # 1. Project structure overview (dynamically discover spec directories)
@@ -18,7 +18,7 @@ from pathlib import Path
18
18
 
19
19
  # IMPORTANT: Force stdout to use UTF-8 on Windows
20
20
  # This fixes UnicodeEncodeError when outputting non-ASCII characters
21
- if sys.platform == "win32":
21
+ if sys.platform.startswith("win"):
22
22
  import io as _io
23
23
  if hasattr(sys.stdout, "reconfigure"):
24
24
  sys.stdout.reconfigure(encoding="utf-8", errors="replace") # type: ignore[union-attr]
@@ -27,6 +27,29 @@ if sys.platform == "win32":
27
27
 
28
28
 
29
29
 
30
+ def _has_curated_jsonl_entry(jsonl_path: Path) -> bool:
31
+ """Return True iff jsonl has at least one row with a ``file`` field.
32
+
33
+ A freshly seeded jsonl only contains a ``{"_example": ...}`` row (no
34
+ ``file`` key) — that is NOT "ready". Readiness requires at least one
35
+ curated entry. Matches the contract used by ``inject-subagent-context.py``.
36
+ """
37
+ try:
38
+ for line in jsonl_path.read_text(encoding="utf-8").splitlines():
39
+ line = line.strip()
40
+ if not line:
41
+ continue
42
+ try:
43
+ row = json.loads(line)
44
+ except json.JSONDecodeError:
45
+ continue
46
+ if isinstance(row, dict) and row.get("file"):
47
+ return True
48
+ except (OSError, UnicodeDecodeError):
49
+ return False
50
+ return False
51
+
52
+
30
53
  def should_skip_injection() -> bool:
31
54
  """Check if any platform's non-interactive flag is set."""
32
55
  non_interactive_vars = [
@@ -170,11 +193,25 @@ def _get_task_status(trellis_dir: Path) -> str:
170
193
  "inline in the main session. Findings go to `{task_dir}/research/*.md`; PRD only links to them."
171
194
  )
172
195
 
173
- # Case 5: PRD readyenter Execute phase
196
+ # Case 4b: PRD exists but implement.jsonl has only seed (no curated entries) Phase 1.3 gate
197
+ implement_jsonl = task_dir / "implement.jsonl"
198
+ if implement_jsonl.is_file() and not _has_curated_jsonl_entry(implement_jsonl):
199
+ return (
200
+ f"Status: PLANNING (Phase 1.3)\nTask: {task_title}\n"
201
+ "Next-Action: Curate `implement.jsonl` and `check.jsonl` with the spec + research files "
202
+ "the Phase 2 sub-agents will need. Only spec paths (`.trellis/spec/**/*.md`) and research "
203
+ "files (`{TASK_DIR}/research/*.md`) — no code paths. Run "
204
+ "`python3 ./.trellis/scripts/get_context.py --mode packages` to list available specs, "
205
+ "then edit the jsonl files or use `python3 ./.trellis/scripts/task.py add-context`. "
206
+ "See `.trellis/workflow.md` Phase 1.3 for details."
207
+ )
208
+
209
+ # Case 5: PRD + curated jsonl (or agent-less platform with no jsonl) — enter Execute phase
174
210
  return (
175
211
  f"Status: READY\nTask: {task_title}\n"
176
- "Next-Action: Load skill `trellis-before-dev` to read relevant specs, "
177
- "then spawn `trellis-implement` sub-agent via the Task tool. "
212
+ "Next-Action: Follow Phase 2.1 for your platform. For agent-capable platforms, "
213
+ "spawn `trellis-implement` via the Task tool; if you stay in the main session, "
214
+ "load `trellis-before-dev` before writing code. "
178
215
  "After implementation, spawn `trellis-check` sub-agent for quality verification.\n"
179
216
  "Sub-agent roster: `trellis-implement` (writes code), `trellis-check` (verifies + self-fixes), "
180
217
  "`trellis-research` (persists findings to `research/*.md` — use when you'd otherwise do "
@@ -3,225 +3,29 @@
3
3
  Task JSONL context management.
4
4
 
5
5
  Provides:
6
- cmd_init_context - Initialize JSONL context files for a task
7
6
  cmd_add_context - Add entry to JSONL context file
8
7
  cmd_validate - Validate JSONL context files
9
8
  cmd_list_context - List JSONL context entries
9
+
10
+ Note:
11
+ ``cmd_init_context`` was removed in v0.5.0-beta.12. JSONL context files
12
+ are now seeded at ``task.py create`` time with a self-describing
13
+ ``_example`` line; the AI agent curates real entries during Phase 1.3 of
14
+ the workflow. See ``.trellis/workflow.md`` Phase 1.3 for the current
15
+ instructions.
10
16
  """
11
17
 
12
18
  from __future__ import annotations
13
19
 
14
20
  import argparse
15
21
  import json
16
- import sys
17
22
  from pathlib import Path
18
23
 
19
- from .cli_adapter import get_cli_adapter, get_cli_adapter_auto
20
- from .config import (
21
- get_packages,
22
- is_monorepo,
23
- resolve_package,
24
- validate_package,
25
- )
26
- from .io import read_json, write_json
27
24
  from .log import Colors, colored
28
- from .paths import (
29
- DIR_SPEC,
30
- DIR_WORKFLOW,
31
- FILE_TASK_JSON,
32
- get_repo_root,
33
- )
25
+ from .paths import get_repo_root
34
26
  from .task_utils import resolve_task_dir
35
27
 
36
28
 
37
- # =============================================================================
38
- # JSONL Default Content Generators
39
- # =============================================================================
40
-
41
- def get_implement_base() -> list[dict]:
42
- """Get base implement context entries."""
43
- return [
44
- {"file": f"{DIR_WORKFLOW}/workflow.md", "reason": "Project workflow and conventions"},
45
- ]
46
-
47
-
48
- def get_implement_backend(package: str | None = None) -> list[dict]:
49
- """Get backend implement context entries."""
50
- spec_base = f"{DIR_SPEC}/{package}" if package else DIR_SPEC
51
- return [
52
- {"file": f"{DIR_WORKFLOW}/{spec_base}/backend/index.md", "reason": "Backend development guide"},
53
- ]
54
-
55
-
56
- def get_implement_frontend(package: str | None = None) -> list[dict]:
57
- """Get frontend implement context entries."""
58
- spec_base = f"{DIR_SPEC}/{package}" if package else DIR_SPEC
59
- return [
60
- {"file": f"{DIR_WORKFLOW}/{spec_base}/frontend/index.md", "reason": "Frontend development guide"},
61
- ]
62
-
63
-
64
- def get_check_context(repo_root: Path, platform: str | None = None) -> list[dict]:
65
- """Get check context entries.
66
-
67
- Args:
68
- repo_root: Project root directory.
69
- platform: Explicit platform override (e.g. "codex", "kiro"). When
70
- provided, skip filesystem auto-detection and use this platform
71
- directly. Caller (e.g. `cmd_init_context`) passes the
72
- `--platform` CLI argument, which itself is substituted from
73
- `{{CLI_FLAG}}` in skill/command templates rendered per platform.
74
- Falls back to `get_cli_adapter_auto` when omitted (CLI-direct
75
- users).
76
- """
77
- if platform:
78
- adapter = get_cli_adapter(platform)
79
- else:
80
- adapter = get_cli_adapter_auto(repo_root)
81
-
82
- return [
83
- {"file": adapter.get_trellis_command_path("finish-work"), "reason": "Finish work checklist"},
84
- {"file": adapter.get_trellis_command_path("check"), "reason": "Code quality check spec"},
85
- ]
86
-
87
-
88
- def _write_jsonl(path: Path, entries: list[dict]) -> None:
89
- """Write entries to JSONL file."""
90
- lines = [json.dumps(entry, ensure_ascii=False) for entry in entries]
91
- path.write_text("\n".join(lines) + "\n", encoding="utf-8")
92
-
93
-
94
- # =============================================================================
95
- # Command: init-context
96
- # =============================================================================
97
-
98
- def cmd_init_context(args: argparse.Namespace) -> int:
99
- """Initialize JSONL context files for a task."""
100
- repo_root = get_repo_root()
101
- target_dir = resolve_task_dir(args.dir, repo_root)
102
- dev_type = args.type
103
-
104
- if not dev_type:
105
- print(colored("Error: Missing arguments", Colors.RED))
106
- print("Usage: python3 task.py init-context <task-dir> <dev_type>")
107
- print(" dev_type: backend | frontend | fullstack | test | docs")
108
- return 1
109
-
110
- if not target_dir.is_dir():
111
- print(colored(f"Error: Directory not found: {target_dir}", Colors.RED))
112
- return 1
113
-
114
- # Resolve package: --package CLI → task.json.package → default_package
115
- cli_package: str | None = getattr(args, "package", None)
116
- package: str | None = None
117
- if not is_monorepo(repo_root):
118
- # Single-repo: ignore --package, no package prefix
119
- if cli_package:
120
- print(colored("Warning: --package ignored in single-repo project", Colors.YELLOW), file=sys.stderr)
121
- elif cli_package:
122
- if not validate_package(cli_package, repo_root):
123
- packages = get_packages(repo_root)
124
- available = ", ".join(sorted(packages.keys())) if packages else "(none)"
125
- print(colored(f"Error: unknown package '{cli_package}'. Available: {available}", Colors.RED), file=sys.stderr)
126
- return 1
127
- package = cli_package
128
- else:
129
- # Read task.json.package as inferred source
130
- task_json_path = target_dir / FILE_TASK_JSON
131
- task_pkg_value = None
132
- if task_json_path.is_file():
133
- task_data = read_json(task_json_path)
134
- if isinstance(task_data, dict):
135
- task_pkg_value = task_data.get("package")
136
- # Only pass string values to resolve_package (guard against malformed JSON)
137
- task_package = task_pkg_value if isinstance(task_pkg_value, str) else None
138
- package = resolve_package(task_package=task_package, repo_root=repo_root)
139
-
140
- # Monorepo fallback prohibition
141
- if package is None:
142
- packages = get_packages(repo_root)
143
- available = ", ".join(sorted(packages.keys())) if packages else "(none)"
144
- print(colored(
145
- f"Error: monorepo project requires --package (or set default_package in config.yaml). Available: {available}",
146
- Colors.RED,
147
- ), file=sys.stderr)
148
- return 1
149
-
150
- print(colored("=== Initializing Agent Context Files ===", Colors.BLUE))
151
- print(f"Target dir: {target_dir}")
152
- print(f"Dev type: {dev_type}")
153
- if package:
154
- print(f"Package: {package}")
155
- print()
156
-
157
- # implement.jsonl
158
- print(colored("Creating implement.jsonl...", Colors.CYAN))
159
- implement_entries = get_implement_base()
160
- if dev_type in ("backend", "test"):
161
- implement_entries.extend(get_implement_backend(package))
162
- elif dev_type == "frontend":
163
- implement_entries.extend(get_implement_frontend(package))
164
- elif dev_type == "fullstack":
165
- implement_entries.extend(get_implement_backend(package))
166
- implement_entries.extend(get_implement_frontend(package))
167
-
168
- implement_file = target_dir / "implement.jsonl"
169
- _write_jsonl(implement_file, implement_entries)
170
- print(f" {colored('✓', Colors.GREEN)} {len(implement_entries)} entries")
171
-
172
- # check.jsonl
173
- platform: str | None = getattr(args, "platform", None)
174
- print(colored("Creating check.jsonl...", Colors.CYAN))
175
- check_entries = get_check_context(repo_root, platform=platform)
176
- check_file = target_dir / "check.jsonl"
177
- _write_jsonl(check_file, check_entries)
178
- print(f" {colored('✓', Colors.GREEN)} {len(check_entries)} entries")
179
-
180
- # Update task.json dev_type and package
181
- task_json_path = target_dir / FILE_TASK_JSON
182
- if task_json_path.is_file():
183
- task_data = read_json(task_json_path)
184
- if isinstance(task_data, dict):
185
- task_data["dev_type"] = dev_type
186
- task_data["package"] = package # Always sync to match resolved value
187
- write_json(task_json_path, task_data)
188
-
189
- print()
190
- print(colored("✓ All context files created", Colors.GREEN))
191
- print()
192
-
193
- # Show what was auto-injected
194
- all_injected = [e["file"] for e in implement_entries]
195
- print(colored("Auto-injected (defaults only):", Colors.YELLOW))
196
- for f in all_injected:
197
- print(f" - {f}")
198
- print()
199
-
200
- # Scan spec directory for available spec files the AI should consider
201
- spec_base = repo_root / DIR_WORKFLOW / DIR_SPEC
202
- if package:
203
- spec_base = spec_base / package
204
- available_specs: list[str] = []
205
- if spec_base.is_dir():
206
- for md_file in sorted(spec_base.rglob("*.md")):
207
- rel = str(md_file.relative_to(repo_root))
208
- if rel not in all_injected:
209
- available_specs.append(rel)
210
-
211
- if available_specs:
212
- print(colored("Available spec files (not yet injected):", Colors.BLUE))
213
- for spec in available_specs:
214
- print(f" - {spec}")
215
- print()
216
-
217
- print(colored("Next steps:", Colors.BLUE))
218
- print(" 1. Review the spec files above and add relevant ones for your task:")
219
- print(f" python3 task.py add-context <dir> implement <spec-path> \"<reason>\"")
220
- print(" 2. Set as current: python3 task.py start <dir>")
221
-
222
- return 0
223
-
224
-
225
29
  # =============================================================================
226
30
  # Command: add-context
227
31
  # =============================================================================
@@ -309,7 +113,11 @@ def cmd_validate(args: argparse.Namespace) -> int:
309
113
 
310
114
 
311
115
  def _validate_jsonl(jsonl_file: Path, repo_root: Path) -> int:
312
- """Validate a single JSONL file."""
116
+ """Validate a single JSONL file.
117
+
118
+ Seed rows (no ``file`` field — typically ``{"_example": "..."}``) are
119
+ skipped silently; they are self-describing comments, not real entries.
120
+ """
313
121
  file_name = jsonl_file.name
314
122
  errors = 0
315
123
 
@@ -318,6 +126,7 @@ def _validate_jsonl(jsonl_file: Path, repo_root: Path) -> int:
318
126
  return 0
319
127
 
320
128
  line_num = 0
129
+ real_entries = 0
321
130
  for line in jsonl_file.read_text(encoding="utf-8").splitlines():
322
131
  line_num += 1
323
132
  if not line.strip():
@@ -334,10 +143,10 @@ def _validate_jsonl(jsonl_file: Path, repo_root: Path) -> int:
334
143
  entry_type = data.get("type", "file")
335
144
 
336
145
  if not file_path:
337
- print(f" {colored(f'{file_name}:{line_num}: Missing file field', Colors.RED)}")
338
- errors += 1
146
+ # Seed / comment row — skip silently
339
147
  continue
340
148
 
149
+ real_entries += 1
341
150
  full_path = repo_root / file_path
342
151
  if entry_type == "directory":
343
152
  if not full_path.is_dir():
@@ -349,7 +158,7 @@ def _validate_jsonl(jsonl_file: Path, repo_root: Path) -> int:
349
158
  errors += 1
350
159
 
351
160
  if errors == 0:
352
- print(f" {colored(f'{file_name}: ✓ ({line_num} entries)', Colors.GREEN)}")
161
+ print(f" {colored(f'{file_name}: ✓ ({real_entries} entries)', Colors.GREEN)}")
353
162
  else:
354
163
  print(f" {colored(f'{file_name}: ✗ ({errors} errors)', Colors.RED)}")
355
164
 
@@ -380,6 +189,7 @@ def cmd_list_context(args: argparse.Namespace) -> int:
380
189
  print(colored(f"[{jsonl_name}]", Colors.CYAN))
381
190
 
382
191
  count = 0
192
+ seed_only = True
383
193
  for line in jsonl_file.read_text(encoding="utf-8").splitlines():
384
194
  if not line.strip():
385
195
  continue
@@ -389,8 +199,13 @@ def cmd_list_context(args: argparse.Namespace) -> int:
389
199
  except json.JSONDecodeError:
390
200
  continue
391
201
 
202
+ file_path = data.get("file")
203
+ if not file_path:
204
+ # Seed / comment row — don't count as a real entry
205
+ continue
206
+ seed_only = False
207
+
392
208
  count += 1
393
- file_path = data.get("file", "?")
394
209
  entry_type = data.get("type", "file")
395
210
  reason = data.get("reason", "-")
396
211
 
@@ -400,6 +215,9 @@ def cmd_list_context(args: argparse.Namespace) -> int:
400
215
  print(f" {colored(f'{count}.', Colors.GREEN)} {file_path}")
401
216
  print(f" {colored('→', Colors.YELLOW)} {reason}")
402
217
 
218
+ if seed_only:
219
+ print(f" {colored('(no curated entries yet — only seed row)', Colors.YELLOW)}")
220
+
403
221
  print()
404
222
 
405
223
  return 0