compass-st 1.1.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (118) hide show
  1. package/README.md +105 -0
  2. package/VERSION +1 -0
  3. package/bin/install +174 -0
  4. package/bootstrap.sh +95 -0
  5. package/cli/Cargo.lock +270 -0
  6. package/cli/Cargo.toml +24 -0
  7. package/cli/src/cmd/context.rs +59 -0
  8. package/cli/src/cmd/dag.rs +133 -0
  9. package/cli/src/cmd/git.rs +148 -0
  10. package/cli/src/cmd/hook.rs +51 -0
  11. package/cli/src/cmd/index.rs +363 -0
  12. package/cli/src/cmd/manifest.rs +34 -0
  13. package/cli/src/cmd/memory.rs +680 -0
  14. package/cli/src/cmd/migrate.rs +790 -0
  15. package/cli/src/cmd/mod.rs +14 -0
  16. package/cli/src/cmd/progress.rs +107 -0
  17. package/cli/src/cmd/project.rs +1700 -0
  18. package/cli/src/cmd/session.rs +64 -0
  19. package/cli/src/cmd/state.rs +317 -0
  20. package/cli/src/cmd/validate/mod.rs +506 -0
  21. package/cli/src/cmd/validate/prd.rs +472 -0
  22. package/cli/src/cmd/version.rs +89 -0
  23. package/cli/src/helpers.rs +40 -0
  24. package/cli/src/main.rs +75 -0
  25. package/cli/tests/fixtures/plan_empty_pointers.json +60 -0
  26. package/cli/tests/fixtures/plan_missing_pointers.json +59 -0
  27. package/cli/tests/fixtures/plan_too_many_pointers.json +92 -0
  28. package/cli/tests/fixtures/plan_v1_valid.json +64 -0
  29. package/cli/tests/fixtures/prd_bad_flow_bullet.md +37 -0
  30. package/cli/tests/fixtures/prd_bad_flow_prose.md +33 -0
  31. package/cli/tests/fixtures/prd_good_flow.md +41 -0
  32. package/cli/tests/fixtures/prd_xref_dangling.md +38 -0
  33. package/cli/tests/fixtures/prd_xref_valid.md +53 -0
  34. package/cli/tests/fixtures/projects/proj_a/.compass/.state/config.json +12 -0
  35. package/cli/tests/fixtures/projects/proj_b/.compass/.state/config.json +12 -0
  36. package/cli/tests/fixtures/projects/proj_c/.compass/.state/config.json +12 -0
  37. package/cli/tests/fixtures/registry/all_dead.json +18 -0
  38. package/cli/tests/fixtures/registry/corrupt.json +1 -0
  39. package/cli/tests/fixtures/registry/empty.json +1 -0
  40. package/cli/tests/fixtures/registry/last_active_dead.json +24 -0
  41. package/cli/tests/fixtures/registry/multi_alive.json +24 -0
  42. package/cli/tests/fixtures/registry/one_alive.json +12 -0
  43. package/cli/tests/fixtures/v0_project/.compass/.state/config.json +5 -0
  44. package/cli/tests/fixtures/v0_project/.compass/.state/sessions/onboarding-redesign/plan.json +29 -0
  45. package/cli/tests/fixtures/v0_project/.compass/.state/sessions/sample-feature/context.json +11 -0
  46. package/cli/tests/fixtures/v0_project/.compass/.state/sessions/sample-feature/plan.json +49 -0
  47. package/core/colleagues/base-rules.md +112 -0
  48. package/core/colleagues/manifest.json +85 -0
  49. package/core/colleagues/market-analyst.md +50 -0
  50. package/core/colleagues/prioritizer.md +53 -0
  51. package/core/colleagues/researcher.md +54 -0
  52. package/core/colleagues/reviewer.md +55 -0
  53. package/core/colleagues/stakeholder-comm.md +59 -0
  54. package/core/colleagues/story-breaker.md +57 -0
  55. package/core/colleagues/ux-reviewer.md +54 -0
  56. package/core/colleagues/writer.md +55 -0
  57. package/core/commands/compass/brief.md +28 -0
  58. package/core/commands/compass/check.md +27 -0
  59. package/core/commands/compass/epic.md +32 -0
  60. package/core/commands/compass/feedback.md +32 -0
  61. package/core/commands/compass/help.md +24 -0
  62. package/core/commands/compass/ideate.md +32 -0
  63. package/core/commands/compass/init.md +30 -0
  64. package/core/commands/compass/plan.md +27 -0
  65. package/core/commands/compass/prd.md +39 -0
  66. package/core/commands/compass/prioritize.md +36 -0
  67. package/core/commands/compass/prototype.md +28 -0
  68. package/core/commands/compass/release.md +32 -0
  69. package/core/commands/compass/research.md +31 -0
  70. package/core/commands/compass/roadmap.md +32 -0
  71. package/core/commands/compass/run.md +28 -0
  72. package/core/commands/compass/setup.md +32 -0
  73. package/core/commands/compass/sprint.md +32 -0
  74. package/core/commands/compass/status.md +32 -0
  75. package/core/commands/compass/story.md +37 -0
  76. package/core/commands/compass/undo.md +33 -0
  77. package/core/commands/compass/update.md +29 -0
  78. package/core/hooks/context-monitor.sh +5 -0
  79. package/core/hooks/manifest-tracker.sh +62 -0
  80. package/core/hooks/statusline.sh +12 -0
  81. package/core/hooks/update-checker.sh +24 -0
  82. package/core/integrations/confluence.md +267 -0
  83. package/core/integrations/figma.md +277 -0
  84. package/core/integrations/jira.md +436 -0
  85. package/core/integrations/vercel.md +170 -0
  86. package/core/manifest.json +172 -0
  87. package/core/shared/SCHEMAS-v1.md +404 -0
  88. package/core/shared/progress.md +145 -0
  89. package/core/shared/project-scan.md +293 -0
  90. package/core/shared/resolve-project.md +136 -0
  91. package/core/shared/ux-rules.md +52 -0
  92. package/core/shared/version-backup.md +38 -0
  93. package/core/templates/prd-template.md +145 -0
  94. package/core/templates/story-template.md +99 -0
  95. package/core/workflows/brief.md +184 -0
  96. package/core/workflows/check.md +436 -0
  97. package/core/workflows/epic.md +177 -0
  98. package/core/workflows/feedback.md +164 -0
  99. package/core/workflows/help.md +79 -0
  100. package/core/workflows/ideate.md +320 -0
  101. package/core/workflows/init.md +524 -0
  102. package/core/workflows/migrate.md +136 -0
  103. package/core/workflows/plan.md +320 -0
  104. package/core/workflows/prd.md +632 -0
  105. package/core/workflows/prioritize.md +301 -0
  106. package/core/workflows/project.md +177 -0
  107. package/core/workflows/prototype.md +174 -0
  108. package/core/workflows/release.md +179 -0
  109. package/core/workflows/research.md +613 -0
  110. package/core/workflows/roadmap.md +152 -0
  111. package/core/workflows/run.md +367 -0
  112. package/core/workflows/setup.md +294 -0
  113. package/core/workflows/sprint.md +187 -0
  114. package/core/workflows/status.md +185 -0
  115. package/core/workflows/story.md +477 -0
  116. package/core/workflows/undo.md +42 -0
  117. package/core/workflows/update.md +127 -0
  118. package/package.json +37 -0
@@ -0,0 +1,790 @@
1
+ //! v0.x → v1.0 plan migration.
2
+ //!
3
+ //! Walks `<project_root>/.compass/.state/sessions/<slug>/plan.json` files and
4
+ //! rewrites each pre-1.0 plan into the v1.0 schema documented in
5
+ //! `core/shared/SCHEMAS-v1.md`. A `plan.v0.json` sibling backup is written
6
+ //! before the rewrite (idempotent: never overwrites an existing backup). The
7
+ //! top-level project memory file is ensured via `crate::cmd::memory::init`.
8
+ //!
9
+ //! Design rules (see REQ-05):
10
+ //! - Idempotent: re-running yields `already v1.0, no-op` per session.
11
+ //! - `PARSE_ERROR` on invalid JSON — do not touch the file.
12
+ //! - `NEWER_VERSION_THAN_CLI` on `plan_version` > "1.0" — exit 1.
13
+ //! - Missing `.compass/.state` — exit 0, emit a friendly log line.
14
+
15
+ use crate::cmd::memory;
16
+ use crate::helpers;
17
+ use serde_json::{json, Value};
18
+ use std::collections::{BTreeMap, HashSet};
19
+ use std::fs;
20
+ use std::path::{Path, PathBuf};
21
+
22
+ /// Entry point invoked from `main.rs` with the sub-args slice.
23
+ pub fn run(args: &[String]) -> Result<String, String> {
24
+ if args.is_empty() {
25
+ return Err(usage());
26
+ }
27
+ match args[0].as_str() {
28
+ "--help" | "-h" | "help" => Ok(help_text()),
29
+ project_root => migrate(project_root),
30
+ }
31
+ }
32
+
33
+ fn usage() -> String {
34
+ "Usage: compass-cli migrate <project_root>".into()
35
+ }
36
+
37
+ fn help_text() -> String {
38
+ "compass-cli migrate <project_root>\n\
39
+ \n\
40
+ Migrate a Compass project's on-disk state from v0.x to v1.0:\n \
41
+ * For each .compass/.state/sessions/<slug>/plan.json:\n \
42
+ - Back up to plan.v0.json (never overwrites).\n \
43
+ - Rewrite plan.json to the v1.0 schema (SCHEMAS-v1.md).\n \
44
+ - context_pointers is seeded with [\"TBD_BY_MIGRATE\"].\n \
45
+ * Ensure .compass/.state/project-memory.json exists.\n \
46
+ * Idempotent. Safe to re-run.\n\
47
+ \n\
48
+ Exit codes:\n \
49
+ 0 success (or nothing to migrate)\n \
50
+ 1 parse error, or plan_version newer than this CLI supports\n"
51
+ .to_string()
52
+ }
53
+
54
+ /// Main driver — returns JSON summary on success, error string on failure.
55
+ fn migrate(project_root: &str) -> Result<String, String> {
56
+ let state_dir = Path::new(project_root).join(".compass").join(".state");
57
+
58
+ if !state_dir.exists() {
59
+ eprintln!("no compass state found, nothing to migrate");
60
+ return Ok(json!({
61
+ "ok": true,
62
+ "project_root": project_root,
63
+ "state_dir": state_dir.to_string_lossy(),
64
+ "sessions": [],
65
+ "memory": null,
66
+ "note": "no compass state found, nothing to migrate",
67
+ })
68
+ .to_string());
69
+ }
70
+
71
+ let sessions_dir = state_dir.join("sessions");
72
+ let mut session_results: Vec<Value> = Vec::new();
73
+
74
+ if sessions_dir.exists() {
75
+ let mut entries: Vec<PathBuf> = fs::read_dir(&sessions_dir)
76
+ .map_err(|e| format!("Cannot read {}: {}", sessions_dir.display(), e))?
77
+ .filter_map(|e| e.ok().map(|e| e.path()))
78
+ .filter(|p| p.is_dir())
79
+ .collect();
80
+ entries.sort();
81
+
82
+ for session_dir in entries {
83
+ let result = migrate_session(&session_dir)?;
84
+ session_results.push(result);
85
+ }
86
+ }
87
+
88
+ // Ensure the project-memory.json exists — delegate to the memory module
89
+ // via its public `run` entry point so we don't touch its internals. The
90
+ // init path is itself idempotent (returns already_exists: true on re-run).
91
+ let memory_args = vec!["init".to_string(), project_root.to_string()];
92
+ let memory_result_str = memory::run(&memory_args)?;
93
+ let memory_result: Value = serde_json::from_str(&memory_result_str)
94
+ .map_err(|e| format!("memory init returned non-JSON: {}", e))?;
95
+
96
+ Ok(json!({
97
+ "ok": true,
98
+ "project_root": project_root,
99
+ "state_dir": state_dir.to_string_lossy(),
100
+ "sessions": session_results,
101
+ "memory": memory_result,
102
+ })
103
+ .to_string())
104
+ }
105
+
106
+ /// Migrate a single session directory. The only I/O side effects are the
107
+ /// backup file and the rewritten plan.json — both gated on the plan being
108
+ /// pre-1.0.
109
+ fn migrate_session(session_dir: &Path) -> Result<Value, String> {
110
+ let slug = session_dir
111
+ .file_name()
112
+ .and_then(|s| s.to_str())
113
+ .unwrap_or("")
114
+ .to_string();
115
+ let plan_path = session_dir.join("plan.json");
116
+
117
+ if !plan_path.exists() {
118
+ return Ok(json!({
119
+ "session": slug,
120
+ "status": "skipped",
121
+ "reason": "no plan.json",
122
+ }));
123
+ }
124
+
125
+ // Read + parse. PARSE_ERROR bubbles up as a hard failure so we don't
126
+ // silently rewrite garbage.
127
+ let raw = fs::read_to_string(&plan_path)
128
+ .map_err(|e| format!("Cannot read {}: {}", plan_path.display(), e))?;
129
+ let plan: Value = serde_json::from_str(&raw)
130
+ .map_err(|e| format!("PARSE_ERROR: {}: {}", plan_path.display(), e))?;
131
+
132
+ let version = plan.get("plan_version").and_then(|v| v.as_str());
133
+
134
+ match version {
135
+ Some("1.0") => {
136
+ eprintln!("{}: already v1.0, no-op", slug);
137
+ Ok(json!({
138
+ "session": slug,
139
+ "status": "already_v1",
140
+ }))
141
+ }
142
+ Some(other) if is_newer_than_1_0(other) => Err(format!(
143
+ "NEWER_VERSION_THAN_CLI: {} has plan_version '{}', this CLI only supports 1.0",
144
+ plan_path.display(),
145
+ other
146
+ )),
147
+ _ => {
148
+ // Any pre-1.0 version (including missing plan_version) → migrate.
149
+ let backup_path = session_dir.join("plan.v0.json");
150
+ if !backup_path.exists() {
151
+ fs::write(&backup_path, &raw)
152
+ .map_err(|e| format!("Cannot write {}: {}", backup_path.display(), e))?;
153
+ }
154
+
155
+ let migrated = build_v1_plan(&plan, &slug);
156
+ helpers::write_json(&plan_path, &migrated)?;
157
+
158
+ eprintln!("{}: migrated to v1.0", slug);
159
+ Ok(json!({
160
+ "session": slug,
161
+ "status": "migrated",
162
+ "backup": backup_path.to_string_lossy(),
163
+ "from_version": version.unwrap_or(""),
164
+ }))
165
+ }
166
+ }
167
+ }
168
+
169
+ /// Return true if `v` parses as a version strictly newer than 1.0 on the
170
+ /// MAJOR.MINOR axis. Unparseable → false (treated as pre-1.0 legacy).
171
+ fn is_newer_than_1_0(v: &str) -> bool {
172
+ let mut parts = v.split('.');
173
+ let major = parts.next().and_then(|s| s.parse::<u32>().ok());
174
+ let minor = parts.next().and_then(|s| s.parse::<u32>().ok()).unwrap_or(0);
175
+ match major {
176
+ Some(m) if m > 1 => true,
177
+ Some(1) if minor > 0 => true,
178
+ _ => false,
179
+ }
180
+ }
181
+
182
+ /// Translate a legacy plan into the v1.0 schema. The v0 shape we know from
183
+ /// fixtures is `colleagues[]` with `{id, type, budget_tokens, depends_on,
184
+ /// output_files, briefing{...}, acceptance{...}}`. We also honour the rarer
185
+ /// `tasks[]` variant defensively.
186
+ fn build_v1_plan(legacy: &Value, slug: &str) -> Value {
187
+ let empty_vec: Vec<Value> = Vec::new();
188
+ let legacy_tasks = legacy
189
+ .get("colleagues")
190
+ .and_then(|v| v.as_array())
191
+ .or_else(|| legacy.get("tasks").and_then(|v| v.as_array()))
192
+ .unwrap_or(&empty_vec);
193
+
194
+ // Compute dependency layers → waves. Tasks whose deps are all already
195
+ // scheduled land in the next wave. Tasks with no deps start in wave 1.
196
+ let waves = layer_into_waves(legacy_tasks);
197
+
198
+ // Collect unique colleague types ordered by first appearance.
199
+ let mut seen: HashSet<String> = HashSet::new();
200
+ let mut colleagues_selected: Vec<String> = Vec::new();
201
+ for t in legacy_tasks {
202
+ let colleague = t
203
+ .get("type")
204
+ .or_else(|| t.get("colleague"))
205
+ .and_then(|v| v.as_str())
206
+ .unwrap_or("")
207
+ .to_string();
208
+ if !colleague.is_empty() && seen.insert(colleague.clone()) {
209
+ colleagues_selected.push(colleague);
210
+ }
211
+ }
212
+
213
+ let wave_array: Vec<Value> = waves
214
+ .into_iter()
215
+ .enumerate()
216
+ .map(|(i, ids)| {
217
+ let wave_id = (i as u64) + 1;
218
+ let tasks: Vec<Value> = ids
219
+ .into_iter()
220
+ .filter_map(|id| {
221
+ legacy_tasks
222
+ .iter()
223
+ .find(|t| task_id_of(t) == id)
224
+ .map(|t| task_to_v1(t, slug))
225
+ })
226
+ .collect();
227
+ json!({
228
+ "wave_id": wave_id,
229
+ "tasks": tasks,
230
+ })
231
+ })
232
+ .collect();
233
+
234
+ json!({
235
+ "plan_version": "1.0",
236
+ "session_id": slug,
237
+ "colleagues_selected": colleagues_selected,
238
+ "memory_ref": ".compass/.state/project-memory.json",
239
+ "domain": Value::Null,
240
+ "waves": wave_array,
241
+ })
242
+ }
243
+
244
+ fn task_id_of(t: &Value) -> String {
245
+ t.get("task_id")
246
+ .or_else(|| t.get("id"))
247
+ .and_then(|v| v.as_str())
248
+ .unwrap_or("")
249
+ .to_string()
250
+ }
251
+
252
+ fn task_to_v1(legacy_task: &Value, slug: &str) -> Value {
253
+ let task_id = task_id_of(legacy_task);
254
+ let colleague = legacy_task
255
+ .get("type")
256
+ .or_else(|| legacy_task.get("colleague"))
257
+ .and_then(|v| v.as_str())
258
+ .unwrap_or("")
259
+ .to_string();
260
+ let budget = legacy_task
261
+ .get("budget_tokens")
262
+ .or_else(|| legacy_task.get("budget"))
263
+ .and_then(|v| v.as_u64())
264
+ .unwrap_or(0);
265
+ let depends_on: Vec<Value> = legacy_task
266
+ .get("depends_on")
267
+ .and_then(|v| v.as_array())
268
+ .cloned()
269
+ .unwrap_or_default();
270
+ let briefing_notes = extract_briefing_notes(legacy_task);
271
+ let output_pattern = legacy_task
272
+ .get("output_files")
273
+ .and_then(|v| v.as_array())
274
+ .and_then(|a| a.first())
275
+ .and_then(|v| v.as_str())
276
+ .map(|s| s.to_string())
277
+ .or_else(|| {
278
+ legacy_task
279
+ .get("output_pattern")
280
+ .and_then(|v| v.as_str())
281
+ .map(|s| s.to_string())
282
+ })
283
+ .unwrap_or_else(|| format!("outputs/{}-{}.md", slug, task_id));
284
+
285
+ json!({
286
+ "task_id": task_id,
287
+ "colleague": colleague,
288
+ "budget": budget,
289
+ "depends_on": depends_on,
290
+ "briefing_notes": briefing_notes,
291
+ // Placeholder per REQ-05: downstream tooling will re-populate with
292
+ // real pointers the first time /compass:plan revisits the session.
293
+ "context_pointers": ["TBD_BY_MIGRATE"],
294
+ "output_pattern": output_pattern,
295
+ })
296
+ }
297
+
298
+ /// Flatten the v0 `briefing` object into a single human-readable string. We
299
+ /// don't lose data — we just serialize it compactly. Downstream consumers
300
+ /// treat `briefing_notes` as free-form per SCHEMAS-v1.md.
301
+ fn extract_briefing_notes(legacy_task: &Value) -> String {
302
+ let briefing = match legacy_task.get("briefing") {
303
+ Some(b) => b,
304
+ None => {
305
+ return legacy_task
306
+ .get("briefing_notes")
307
+ .and_then(|v| v.as_str())
308
+ .unwrap_or("")
309
+ .to_string();
310
+ }
311
+ };
312
+
313
+ let mut parts: Vec<String> = Vec::new();
314
+ if let Some(ctx) = briefing.get("context").and_then(|v| v.as_array()) {
315
+ let items: Vec<String> = ctx
316
+ .iter()
317
+ .filter_map(|v| v.as_str().map(|s| s.to_string()))
318
+ .collect();
319
+ if !items.is_empty() {
320
+ parts.push(format!("Context: {}", items.join(", ")));
321
+ }
322
+ }
323
+ if let Some(cs) = briefing.get("constraints").and_then(|v| v.as_array()) {
324
+ let items: Vec<String> = cs
325
+ .iter()
326
+ .filter_map(|v| v.as_str().map(|s| s.to_string()))
327
+ .collect();
328
+ if !items.is_empty() {
329
+ parts.push(format!("Constraints: {}", items.join("; ")));
330
+ }
331
+ }
332
+ if let Some(sh) = briefing.get("stakeholders").and_then(|v| v.as_array()) {
333
+ let items: Vec<String> = sh
334
+ .iter()
335
+ .filter_map(|v| v.as_str().map(|s| s.to_string()))
336
+ .collect();
337
+ if !items.is_empty() {
338
+ parts.push(format!("Stakeholders: {}", items.join(", ")));
339
+ }
340
+ }
341
+ if let Some(d) = briefing.get("deadline").and_then(|v| v.as_str()) {
342
+ parts.push(format!("Deadline: {}", d));
343
+ }
344
+ parts.join(" | ")
345
+ }
346
+
347
+ /// Kahn-ish topological layering: each output layer is the set of tasks whose
348
+ /// dependencies all belong to earlier layers. Preserves input order within a
349
+ /// layer. Tasks with unknown/dangling deps land in the first layer they're
350
+ /// eligible for (treating unknown ids as already-satisfied) so a malformed v0
351
+ /// plan still migrates rather than wedging.
352
+ fn layer_into_waves(tasks: &[Value]) -> Vec<Vec<String>> {
353
+ let ids_in_order: Vec<String> = tasks.iter().map(task_id_of).collect();
354
+ let id_set: HashSet<String> = ids_in_order.iter().cloned().collect();
355
+
356
+ let deps: BTreeMap<String, Vec<String>> = tasks
357
+ .iter()
358
+ .map(|t| {
359
+ let id = task_id_of(t);
360
+ let d: Vec<String> = t
361
+ .get("depends_on")
362
+ .and_then(|v| v.as_array())
363
+ .map(|a| {
364
+ a.iter()
365
+ .filter_map(|x| x.as_str().map(|s| s.to_string()))
366
+ .filter(|s| id_set.contains(s))
367
+ .collect()
368
+ })
369
+ .unwrap_or_default();
370
+ (id, d)
371
+ })
372
+ .collect();
373
+
374
+ let mut placed: HashSet<String> = HashSet::new();
375
+ let mut layers: Vec<Vec<String>> = Vec::new();
376
+
377
+ while placed.len() < ids_in_order.len() {
378
+ let mut layer: Vec<String> = Vec::new();
379
+ for id in &ids_in_order {
380
+ if placed.contains(id) {
381
+ continue;
382
+ }
383
+ let ready = deps
384
+ .get(id)
385
+ .map(|ds| ds.iter().all(|d| placed.contains(d)))
386
+ .unwrap_or(true);
387
+ if ready {
388
+ layer.push(id.clone());
389
+ }
390
+ }
391
+ if layer.is_empty() {
392
+ // Cycle or bug — dump the remainder into a final layer so we
393
+ // don't loop forever. v0 plans aren't supposed to cycle.
394
+ for id in &ids_in_order {
395
+ if !placed.contains(id) {
396
+ layer.push(id.clone());
397
+ }
398
+ }
399
+ }
400
+ for id in &layer {
401
+ placed.insert(id.clone());
402
+ }
403
+ layers.push(layer);
404
+ }
405
+
406
+ if layers.is_empty() {
407
+ layers.push(Vec::new());
408
+ }
409
+ layers
410
+ }
411
+
412
+ #[cfg(test)]
413
+ mod tests {
414
+ use super::*;
415
+
416
+ #[test]
417
+ fn newer_version_detection_smoke() {
418
+ assert!(is_newer_than_1_0("1.1"));
419
+ assert!(is_newer_than_1_0("2.0"));
420
+ assert!(!is_newer_than_1_0("1.0"));
421
+ assert!(!is_newer_than_1_0("0.5"));
422
+ assert!(!is_newer_than_1_0("garbage"));
423
+ }
424
+
425
+ #[test]
426
+ fn build_v1_preserves_dependency_order_smoke() {
427
+ let legacy = json!({
428
+ "plan_version": "0.5",
429
+ "colleagues": [
430
+ {"id": "C-01", "type": "researcher", "budget_tokens": 1000, "depends_on": [],
431
+ "output_files": ["research/x.md"], "briefing": {"context": ["a.md"]}},
432
+ {"id": "C-02", "type": "writer", "budget_tokens": 2000, "depends_on": ["C-01"],
433
+ "output_files": ["PRDs/y.md"], "briefing": {"constraints": ["ship Q1"]}}
434
+ ]
435
+ });
436
+ let v1 = build_v1_plan(&legacy, "slug");
437
+ assert_eq!(v1["plan_version"], "1.0");
438
+ assert_eq!(v1["session_id"], "slug");
439
+ assert_eq!(v1["memory_ref"], ".compass/.state/project-memory.json");
440
+ assert!(v1["domain"].is_null());
441
+ let waves = v1["waves"].as_array().unwrap();
442
+ assert_eq!(waves.len(), 2);
443
+ assert_eq!(waves[0]["tasks"][0]["task_id"], "C-01");
444
+ assert_eq!(waves[1]["tasks"][0]["task_id"], "C-02");
445
+ assert_eq!(
446
+ waves[0]["tasks"][0]["context_pointers"][0],
447
+ "TBD_BY_MIGRATE"
448
+ );
449
+ assert_eq!(v1["colleagues_selected"][0], "researcher");
450
+ assert_eq!(v1["colleagues_selected"][1], "writer");
451
+ }
452
+
453
+ #[test]
454
+ fn idempotent_on_v1_input_smoke() {
455
+ let dir = tempdir();
456
+ let session = dir.join("sessions").join("s1");
457
+ fs::create_dir_all(&session).unwrap();
458
+ let v1 = json!({
459
+ "plan_version": "1.0",
460
+ "session_id": "s1",
461
+ "colleagues_selected": [],
462
+ "memory_ref": ".compass/.state/project-memory.json",
463
+ "domain": null,
464
+ "waves": [],
465
+ });
466
+ fs::write(session.join("plan.json"), serde_json::to_string(&v1).unwrap()).unwrap();
467
+ let res = migrate_session(&session).unwrap();
468
+ assert_eq!(res["status"], "already_v1");
469
+ // Backup must NOT be created for already-v1 plans.
470
+ assert!(!session.join("plan.v0.json").exists());
471
+ }
472
+
473
+ #[test]
474
+ fn migrates_legacy_and_writes_backup_smoke() {
475
+ let dir = tempdir();
476
+ let session = dir.join("sessions").join("legacy");
477
+ fs::create_dir_all(&session).unwrap();
478
+ let legacy = json!({
479
+ "plan_version": "0.5",
480
+ "colleagues": [
481
+ {"id": "C-01", "type": "writer", "budget_tokens": 1000,
482
+ "depends_on": [], "output_files": ["PRDs/x.md"], "briefing": {}}
483
+ ]
484
+ });
485
+ let plan_path = session.join("plan.json");
486
+ fs::write(&plan_path, serde_json::to_string(&legacy).unwrap()).unwrap();
487
+
488
+ let res = migrate_session(&session).unwrap();
489
+ assert_eq!(res["status"], "migrated");
490
+
491
+ let rewritten: Value =
492
+ serde_json::from_str(&fs::read_to_string(&plan_path).unwrap()).unwrap();
493
+ assert_eq!(rewritten["plan_version"], "1.0");
494
+ assert!(session.join("plan.v0.json").exists());
495
+
496
+ // Running again is a no-op.
497
+ let again = migrate_session(&session).unwrap();
498
+ assert_eq!(again["status"], "already_v1");
499
+ }
500
+
501
+ #[test]
502
+ fn parse_error_does_not_rewrite_smoke() {
503
+ let dir = tempdir();
504
+ let session = dir.join("sessions").join("broken");
505
+ fs::create_dir_all(&session).unwrap();
506
+ let plan_path = session.join("plan.json");
507
+ fs::write(&plan_path, "{ this is not json").unwrap();
508
+ let err = migrate_session(&session).unwrap_err();
509
+ assert!(err.starts_with("PARSE_ERROR"), "got: {}", err);
510
+ assert!(!session.join("plan.v0.json").exists());
511
+ }
512
+
513
+ #[test]
514
+ fn newer_version_errors_out_smoke() {
515
+ let dir = tempdir();
516
+ let session = dir.join("sessions").join("future");
517
+ fs::create_dir_all(&session).unwrap();
518
+ let plan = json!({"plan_version": "2.0"});
519
+ fs::write(
520
+ session.join("plan.json"),
521
+ serde_json::to_string(&plan).unwrap(),
522
+ )
523
+ .unwrap();
524
+ let err = migrate_session(&session).unwrap_err();
525
+ assert!(err.starts_with("NEWER_VERSION_THAN_CLI"), "got: {}", err);
526
+ }
527
+
528
+ // ---------- Full-coverage tests per TEST-SPEC T-14 ----------
529
+
530
+ /// Recursively copy a directory tree. Used to stage the v0_project fixture
531
+ /// into an isolated tmp dir so each test mutates its own copy.
532
+ fn copy_dir_recursive(src: &Path, dst: &Path) {
533
+ fs::create_dir_all(dst).unwrap();
534
+ for entry in fs::read_dir(src).unwrap() {
535
+ let entry = entry.unwrap();
536
+ let ft = entry.file_type().unwrap();
537
+ let src_path = entry.path();
538
+ let dst_path = dst.join(entry.file_name());
539
+ if ft.is_dir() {
540
+ copy_dir_recursive(&src_path, &dst_path);
541
+ } else {
542
+ fs::copy(&src_path, &dst_path).unwrap();
543
+ }
544
+ }
545
+ }
546
+
547
+ /// Path to the checked-in v0 fixture (two session dirs with v0.5 plans).
548
+ fn fixture_root() -> PathBuf {
549
+ PathBuf::from(env!("CARGO_MANIFEST_DIR"))
550
+ .join("tests")
551
+ .join("fixtures")
552
+ .join("v0_project")
553
+ }
554
+
555
+ /// Stage the fixture into a fresh tmp dir and return the staged root.
556
+ fn stage_fixture(label: &str) -> (PathBuf, PathBuf) {
557
+ let td = tempdir();
558
+ let root = td.join(label);
559
+ copy_dir_recursive(&fixture_root(), &root);
560
+ (td, root)
561
+ }
562
+
563
+ #[test]
564
+ fn v0_to_v1_plan() {
565
+ let (_td, root) = stage_fixture("v0_to_v1_plan");
566
+ let args = vec![root.to_string_lossy().to_string()];
567
+ let summary_str = run(&args).expect("migrate should succeed on v0 fixture");
568
+ let summary: Value = serde_json::from_str(&summary_str).unwrap();
569
+ assert_eq!(summary["ok"], json!(true));
570
+
571
+ let sessions_dir = root.join(".compass").join(".state").join("sessions");
572
+ let session_entries: Vec<PathBuf> = fs::read_dir(&sessions_dir)
573
+ .unwrap()
574
+ .filter_map(|e| e.ok().map(|e| e.path()))
575
+ .filter(|p| p.is_dir())
576
+ .collect();
577
+ assert!(
578
+ session_entries.len() >= 2,
579
+ "fixture should provide >= 2 session dirs, got {}",
580
+ session_entries.len()
581
+ );
582
+
583
+ for session_dir in &session_entries {
584
+ let plan_path = session_dir.join("plan.json");
585
+ let backup_path = session_dir.join("plan.v0.json");
586
+ assert!(
587
+ plan_path.exists(),
588
+ "plan.json missing in {}",
589
+ session_dir.display()
590
+ );
591
+ assert!(
592
+ backup_path.exists(),
593
+ "plan.v0.json backup missing in {}",
594
+ session_dir.display()
595
+ );
596
+
597
+ let migrated: Value =
598
+ serde_json::from_str(&fs::read_to_string(&plan_path).unwrap()).unwrap();
599
+ assert_eq!(
600
+ migrated["plan_version"], "1.0",
601
+ "plan_version not rewritten in {}",
602
+ session_dir.display()
603
+ );
604
+ assert!(
605
+ migrated["domain"].is_null(),
606
+ "domain should be null in {}",
607
+ session_dir.display()
608
+ );
609
+ assert_eq!(
610
+ migrated["memory_ref"], ".compass/.state/project-memory.json",
611
+ "memory_ref not set in {}",
612
+ session_dir.display()
613
+ );
614
+
615
+ // Every task in every wave should carry the TBD_BY_MIGRATE placeholder.
616
+ let waves = migrated["waves"].as_array().expect("waves array");
617
+ let mut saw_task = false;
618
+ for wave in waves {
619
+ for task in wave["tasks"].as_array().expect("tasks array") {
620
+ saw_task = true;
621
+ let cp = task["context_pointers"]
622
+ .as_array()
623
+ .expect("context_pointers array");
624
+ assert_eq!(cp.len(), 1, "context_pointers should be single placeholder");
625
+ assert_eq!(cp[0], "TBD_BY_MIGRATE");
626
+ }
627
+ }
628
+ assert!(
629
+ saw_task,
630
+ "expected at least one task in migrated plan at {}",
631
+ session_dir.display()
632
+ );
633
+ }
634
+ }
635
+
636
+ #[test]
637
+ fn idempotent() {
638
+ let (_td, root) = stage_fixture("idempotent");
639
+ let args = vec![root.to_string_lossy().to_string()];
640
+
641
+ // First run: perform the migration.
642
+ let first = run(&args).expect("first migrate run should succeed");
643
+ let first_json: Value = serde_json::from_str(&first).unwrap();
644
+ assert_eq!(first_json["ok"], json!(true));
645
+
646
+ // Capture each session's backup mtime before the second run so we can
647
+ // later assert it was not rewritten (no double-backup).
648
+ let sessions_dir = root.join(".compass").join(".state").join("sessions");
649
+ let session_paths: Vec<PathBuf> = fs::read_dir(&sessions_dir)
650
+ .unwrap()
651
+ .filter_map(|e| e.ok().map(|e| e.path()))
652
+ .filter(|p| p.is_dir())
653
+ .collect();
654
+
655
+ // Second run: must be a no-op, exit 0, every session already_v1.
656
+ let second = run(&args).expect("second migrate run should also succeed (exit 0)");
657
+ let second_json: Value = serde_json::from_str(&second).unwrap();
658
+ assert_eq!(second_json["ok"], json!(true));
659
+
660
+ let statuses: Vec<&str> = second_json["sessions"]
661
+ .as_array()
662
+ .expect("sessions array")
663
+ .iter()
664
+ .map(|s| s.get("status").and_then(|v| v.as_str()).unwrap_or(""))
665
+ .collect();
666
+ assert!(
667
+ !statuses.is_empty(),
668
+ "expected session statuses on second run, got none"
669
+ );
670
+ for status in &statuses {
671
+ assert_eq!(
672
+ *status, "already_v1",
673
+ "second run should report already_v1 for every session, got {:?}",
674
+ statuses
675
+ );
676
+ }
677
+
678
+ // No double-backup: plan.v0.plan.v0.json must NOT exist in any session.
679
+ // Also: plan.v0.json must exist (from the first run) and must NOT have
680
+ // been re-created as a nested backup.
681
+ for session_dir in &session_paths {
682
+ let nested = session_dir.join("plan.v0.plan.v0.json");
683
+ assert!(
684
+ !nested.exists(),
685
+ "double-backup detected at {}",
686
+ nested.display()
687
+ );
688
+ assert!(
689
+ session_dir.join("plan.v0.json").exists(),
690
+ "first-run backup missing at {}",
691
+ session_dir.display()
692
+ );
693
+ }
694
+ }
695
+
696
+ #[test]
697
+ fn creates_memory() {
698
+ let (_td, root) = stage_fixture("creates_memory");
699
+ let args = vec![root.to_string_lossy().to_string()];
700
+ let summary_str = run(&args).expect("migrate should succeed");
701
+ let summary: Value = serde_json::from_str(&summary_str).unwrap();
702
+ assert_eq!(summary["ok"], json!(true));
703
+
704
+ let memory_path = root
705
+ .join(".compass")
706
+ .join(".state")
707
+ .join("project-memory.json");
708
+ assert!(
709
+ memory_path.exists(),
710
+ "project-memory.json should be created at {}",
711
+ memory_path.display()
712
+ );
713
+
714
+ let memory: Value =
715
+ serde_json::from_str(&fs::read_to_string(&memory_path).unwrap()).unwrap();
716
+ assert_eq!(
717
+ memory["memory_version"], "1.0",
718
+ "memory_version must be \"1.0\""
719
+ );
720
+ let sessions = memory["sessions"]
721
+ .as_array()
722
+ .expect("sessions must be an array");
723
+ assert!(
724
+ sessions.is_empty(),
725
+ "newly created memory must have empty sessions, got {:?}",
726
+ sessions
727
+ );
728
+ }
729
+
730
+ #[test]
731
+ fn corrupt_plan() {
732
+ let (_td, root) = stage_fixture("corrupt_plan");
733
+ let sessions_dir = root.join(".compass").join(".state").join("sessions");
734
+
735
+ // Overwrite one of the fixture session plans with invalid JSON.
736
+ let mut session_paths: Vec<PathBuf> = fs::read_dir(&sessions_dir)
737
+ .unwrap()
738
+ .filter_map(|e| e.ok().map(|e| e.path()))
739
+ .filter(|p| p.is_dir())
740
+ .collect();
741
+ session_paths.sort();
742
+ let target_session = session_paths
743
+ .first()
744
+ .expect("fixture must provide at least one session");
745
+ let plan_path = target_session.join("plan.json");
746
+ let garbage = "{ this is not valid json";
747
+ fs::write(&plan_path, garbage).unwrap();
748
+
749
+ let args = vec![root.to_string_lossy().to_string()];
750
+ let result = run(&args);
751
+ assert!(
752
+ result.is_err(),
753
+ "migrate must fail (exit != 0) when a plan.json is corrupt"
754
+ );
755
+ let err = result.unwrap_err();
756
+ assert!(
757
+ err.contains("PARSE_ERROR"),
758
+ "error must advertise PARSE_ERROR, got: {}",
759
+ err
760
+ );
761
+
762
+ // File was NOT rewritten — raw bytes unchanged on disk.
763
+ let on_disk = fs::read_to_string(&plan_path).unwrap();
764
+ assert_eq!(
765
+ on_disk, garbage,
766
+ "corrupt plan.json must not be rewritten by migrate"
767
+ );
768
+ // And no backup was written for the corrupt file.
769
+ assert!(
770
+ !target_session.join("plan.v0.json").exists(),
771
+ "no backup should be written for a plan that failed to parse"
772
+ );
773
+ }
774
+
775
+ // Minimal tempdir helper — we avoid pulling the `tempfile` crate just for
776
+ // smoke tests. Each test gets a unique subdir under the OS temp root.
777
+ fn tempdir() -> PathBuf {
778
+ let nanos = std::time::SystemTime::now()
779
+ .duration_since(std::time::UNIX_EPOCH)
780
+ .map(|d| d.as_nanos())
781
+ .unwrap_or(0);
782
+ let p = std::env::temp_dir().join(format!(
783
+ "compass-migrate-test-{}-{}",
784
+ std::process::id(),
785
+ nanos
786
+ ));
787
+ fs::create_dir_all(&p).unwrap();
788
+ p
789
+ }
790
+ }