prizmkit 1.1.26 → 1.1.29

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,5 +1,5 @@
1
1
  {
2
- "version": "1.1.26",
2
+ "version": "1.1.29",
3
3
  "skills": {
4
4
  "prizm-kit": {
5
5
  "description": "Full-lifecycle dev toolkit. Covers spec-driven development, Prizm context docs, code quality, debugging, deployment, and knowledge management.",
@@ -20,6 +20,38 @@ User says:
20
20
  - User wants a clean restart → use the original workflow skill directly (`/feature-workflow`, `/bug-fix-workflow`, `/refactor-workflow`)
21
21
  - Nothing was ever started → use the original workflow skill
22
22
 
23
+ ## Pipeline Recovery (Recommended)
24
+
25
+ For reliable recovery that completes ALL remaining phases autonomously, use the shell-driven pipeline:
26
+
27
+ ```bash
28
+ # Auto-detect and recover (with confirmation)
29
+ ./dev-pipeline/run-recovery.sh
30
+
31
+ # Detection report only (no execution)
32
+ ./dev-pipeline/run-recovery.sh detect
33
+
34
+ # Generate prompt without executing (inspect it)
35
+ ./dev-pipeline/run-recovery.sh run --dry-run
36
+
37
+ # Skip confirmation (for scripted/daemon use)
38
+ ./dev-pipeline/run-recovery.sh run --yes
39
+
40
+ # Override AI model
41
+ ./dev-pipeline/run-recovery.sh run --model claude-opus-4.6
42
+ ```
43
+
44
+ The pipeline approach generates a comprehensive bootstrap prompt that explicitly lists every remaining phase with full instructions, ensuring the AI completes the full workflow — not just the implementation part.
45
+
46
+ ### When to use pipeline vs interactive recovery
47
+
48
+ | Scenario | Approach |
49
+ |----------|----------|
50
+ | Pipeline session timed out / crashed | `./run-recovery.sh` — autonomous, completes all phases reliably |
51
+ | Interactive session interrupted | This skill (`/recovery-workflow`) — for in-session interactive use |
52
+ | Want to inspect before recovering | `./run-recovery.sh detect` or `./run-recovery.sh run --dry-run` |
53
+ | Daemon/scripted use | `./run-recovery.sh run --yes` — no user confirmation needed |
54
+
23
55
  ## Supported Workflows
24
56
 
25
57
  | Workflow | Branch Pattern | Key Artifacts |
@@ -181,6 +213,8 @@ Phase inference table:
181
213
  | All docs + review passed | Phase 6: User Verification | Ask user to verify the fix works |
182
214
  | All docs + committed | Phase 7: Merge Decision | Ask merge vs keep branch |
183
215
 
216
+ **Note**: Bug-fix Phases 1-3 (Diagnosis, Triage, Reproduce) collapse to Phase 1 for detection purposes because these phases don't produce persistent artifacts. If interrupted during these phases, recovery restarts from Phase 1 (diagnosis), which re-derives understanding from available inputs (bug description, code) without interactive Q&A.
217
+
184
218
  **Execution for each remaining phase**: Follow the bug-fix-workflow SKILL.md instructions exactly. Call the same prizmkit sub-commands (`/prizmkit-code-review`, `/prizmkit-committer`) at the same points.
185
219
 
186
220
  **Special handling**:
@@ -259,6 +293,7 @@ Recovery complete.
259
293
  | `feature-pipeline-launcher` | **Called in Phase 2.2** — launches or checks pipeline status for feature recovery |
260
294
  | `reset-feature.sh --clean --run` | **Alternative** — full clean retry for pipeline failures; this skill is the smart interactive alternative |
261
295
  | `reset-bug.sh --clean --run` | **Alternative** — full clean retry for bugfix pipeline failures |
296
+ | `run-recovery.sh` | **Pipeline counterpart** — shell-driven recovery that generates bootstrap prompt and spawns AI CLI session for autonomous completion |
262
297
  | `/prizmkit-code-review` | **Called in Phase 2.1** — reviews recovered bug-fix code |
263
298
  | `/prizmkit-committer` | **Called in Phase 2.1** — commits the recovered result |
264
299
 
@@ -114,6 +114,58 @@ def detect_workflow_type(project_root):
114
114
  return (None, None)
115
115
 
116
116
 
117
+ def detect_other_workflows(project_root, primary_type):
118
+ """Scan for other interrupted workflow signals beyond the primary match.
119
+
120
+ Returns a list of workflow type strings that also have signals present,
121
+ excluding the primary_type already detected.
122
+ """
123
+ others = []
124
+ branch = run_git(["branch", "--show-current"], cwd=project_root)
125
+
126
+ # Bug-fix signals
127
+ if primary_type != "bug-fix-workflow":
128
+ if branch.startswith("fix/"):
129
+ others.append("bug-fix-workflow")
130
+ else:
131
+ bugfix_dir = os.path.join(project_root, ".prizmkit", "bugfix")
132
+ if os.path.isdir(bugfix_dir):
133
+ bug_ids = [
134
+ d for d in os.listdir(bugfix_dir)
135
+ if os.path.isdir(os.path.join(bugfix_dir, d))
136
+ ]
137
+ if bug_ids:
138
+ others.append("bug-fix-workflow")
139
+
140
+ # Refactor signals
141
+ if primary_type != "refactor-workflow":
142
+ if branch.startswith("refactor/"):
143
+ others.append("refactor-workflow")
144
+ else:
145
+ for path in [
146
+ os.path.join(project_root, ".prizmkit", "plans", "refactor-list.json"),
147
+ os.path.join(project_root, "refactor-list.json"),
148
+ ]:
149
+ if os.path.isfile(path):
150
+ others.append("refactor-workflow")
151
+ break
152
+
153
+ # Feature signals
154
+ if primary_type != "feature-workflow":
155
+ if branch.startswith("feat/"):
156
+ others.append("feature-workflow")
157
+ else:
158
+ for path in [
159
+ os.path.join(project_root, ".prizmkit", "plans", "feature-list.json"),
160
+ os.path.join(project_root, "feature-list.json"),
161
+ ]:
162
+ if os.path.isfile(path):
163
+ others.append("feature-workflow")
164
+ break
165
+
166
+ return others
167
+
168
+
117
169
  # ---------------------------------------------------------------------------
118
170
  # Phase inference — one function per workflow
119
171
  # ---------------------------------------------------------------------------
@@ -285,18 +337,21 @@ def detect_code_changes(project_root, main_branch="main"):
285
337
 
286
338
  Filters out pipeline/config files that aren't source code — only counts
287
339
  files that represent actual implementation work.
340
+
341
+ Uses a file_statuses dict keyed by filepath to avoid double-counting
342
+ files that appear in both committed diff and uncommitted changes.
288
343
  """
289
344
  IGNORED_FILES = {
290
- ".prizmkit/plans/feature-list.json",
291
- ".prizmkit/plans/bug-fix-list.json",
292
- ".prizmkit/plans/refactor-list.json",
345
+ # Basename-matched list files (root-level legacy paths)
293
346
  "feature-list.json",
294
347
  "bug-fix-list.json",
295
348
  "refactor-list.json",
349
+ # Lock files
296
350
  "package-lock.json",
297
351
  "yarn.lock",
298
352
  "pnpm-lock.yaml",
299
353
  }
354
+ # Note: .prizmkit/plans/*.json paths are caught by IGNORED_PREFIXES below
300
355
  IGNORED_PREFIXES = (
301
356
  ".prizmkit/",
302
357
  ".prizm-docs/",
@@ -314,29 +369,16 @@ def detect_code_changes(project_root, main_branch="main"):
314
369
  return False
315
370
  return True
316
371
 
317
- result = {
318
- "files_modified": 0,
319
- "files_added": 0,
320
- "files_deleted": 0,
321
- "test_files_touched": 0,
322
- "directories_touched": [],
323
- "has_changes": False,
324
- }
372
+ # Track unique file → status to avoid double-counting.
373
+ # Later sources (uncommitted, untracked) update the status if the file
374
+ # was already seen in a committed diff.
375
+ file_statuses = {} # filepath → "M" | "A" | "D"
325
376
 
326
- # Diff relative to main
377
+ # Diff relative to main (committed changes on branch)
327
378
  diff_output = run_git(
328
379
  ["diff", main_branch, "--name-status"], cwd=project_root
329
380
  )
330
381
 
331
- # Also include uncommitted changes
332
- uncommitted = run_git(["diff", "--name-status"], cwd=project_root)
333
- untracked = run_git(
334
- ["ls-files", "--others", "--exclude-standard"], cwd=project_root
335
- )
336
-
337
- all_files = set()
338
- dirs = set()
339
-
340
382
  if diff_output:
341
383
  for line in diff_output.strip().split("\n"):
342
384
  if not line.strip():
@@ -344,40 +386,61 @@ def detect_code_changes(project_root, main_branch="main"):
344
386
  parts = line.split("\t", 1)
345
387
  if len(parts) < 2:
346
388
  continue
347
- status, filepath = parts[0], parts[1]
389
+ status, filepath = parts[0][0], parts[1] # first char of status
348
390
  if not is_source_file(filepath):
349
391
  continue
350
- all_files.add(filepath)
351
- if status.startswith("M"):
352
- result["files_modified"] += 1
353
- elif status.startswith("A"):
354
- result["files_added"] += 1
355
- elif status.startswith("D"):
356
- result["files_deleted"] += 1
392
+ file_statuses[filepath] = status
357
393
 
394
+ # Uncommitted working tree changes — update status for already-seen files
395
+ uncommitted = run_git(["diff", "--name-status"], cwd=project_root)
358
396
  if uncommitted:
359
397
  for line in uncommitted.strip().split("\n"):
360
398
  if not line.strip():
361
399
  continue
362
400
  parts = line.split("\t", 1)
363
- if len(parts) >= 2:
364
- filepath = parts[1]
365
- if not is_source_file(filepath):
366
- continue
367
- all_files.add(filepath)
368
- result["files_modified"] += 1
401
+ if len(parts) < 2:
402
+ continue
403
+ status, filepath = parts[0][0], parts[1]
404
+ if not is_source_file(filepath):
405
+ continue
406
+ if filepath not in file_statuses:
407
+ file_statuses[filepath] = "M" # uncommitted change = modified
408
+ # If already tracked from branch diff, keep the branch-level status
369
409
 
410
+ # Untracked files
411
+ untracked = run_git(
412
+ ["ls-files", "--others", "--exclude-standard"], cwd=project_root
413
+ )
370
414
  if untracked:
371
415
  for filepath in untracked.strip().split("\n"):
372
- if filepath.strip() and is_source_file(filepath.strip()):
373
- all_files.add(filepath.strip())
374
- result["files_added"] += 1
416
+ filepath = filepath.strip()
417
+ if filepath and is_source_file(filepath):
418
+ if filepath not in file_statuses:
419
+ file_statuses[filepath] = "A" # untracked = added
420
+
421
+ # Count by status
422
+ result = {
423
+ "files_modified": 0,
424
+ "files_added": 0,
425
+ "files_deleted": 0,
426
+ "test_files_touched": 0,
427
+ "directories_touched": [],
428
+ "has_changes": False,
429
+ }
375
430
 
376
- # Analyze file set
377
431
  test_patterns = re.compile(
378
432
  r"(test|spec|__tests__|\.test\.|\.spec\.)", re.IGNORECASE
379
433
  )
380
- for filepath in all_files:
434
+ dirs = set()
435
+
436
+ for filepath, status in file_statuses.items():
437
+ if status == "M":
438
+ result["files_modified"] += 1
439
+ elif status == "A":
440
+ result["files_added"] += 1
441
+ elif status == "D":
442
+ result["files_deleted"] += 1
443
+
381
444
  if test_patterns.search(filepath):
382
445
  result["test_files_touched"] += 1
383
446
  parent = os.path.dirname(filepath)
@@ -386,7 +449,7 @@ def detect_code_changes(project_root, main_branch="main"):
386
449
  dirs.add(os.sep.join(parts[:2]) + "/")
387
450
 
388
451
  result["directories_touched"] = sorted(dirs)
389
- result["has_changes"] = len(all_files) > 0
452
+ result["has_changes"] = len(file_statuses) > 0
390
453
 
391
454
  return result
392
455
 
@@ -428,7 +491,10 @@ def main():
428
491
  ),
429
492
  }
430
493
  print(json.dumps(report, indent=2))
431
- sys.exit(1)
494
+ sys.exit(0)
495
+
496
+ # Check for other interrupted workflows (informational)
497
+ other_workflows = detect_other_workflows(project_root, workflow_type)
432
498
 
433
499
  # Step 2: Collect git state and code changes once (shared across phase inference + report)
434
500
  cached_branch = context.get("branch")
@@ -467,6 +533,9 @@ def main():
467
533
  },
468
534
  }
469
535
 
536
+ if other_workflows:
537
+ report["other_interrupted_workflows"] = other_workflows
538
+
470
539
  print(json.dumps(report, indent=2))
471
540
 
472
541
 
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "prizmkit",
3
- "version": "1.1.26",
3
+ "version": "1.1.29",
4
4
  "description": "Create a new PrizmKit-powered project with clean initialization — no framework dev files, just what you need.",
5
5
  "type": "module",
6
6
  "bin": {
package/src/upgrade.js CHANGED
@@ -198,17 +198,24 @@ export async function runUpgrade(directory, options = {}) {
198
198
  const rulesPreset = oldManifest?.options?.rules || 'recommended';
199
199
  const aiCli = userConfig.ai_cli || oldManifest?.options?.aiCli || '';
200
200
 
201
- // Filesystem-based platform detection (overrides manifest if dirs exist)
202
- const hasClaude = await fs.pathExists(path.join(projectRoot, '.claude', 'commands'))
203
- || await fs.pathExists(path.join(projectRoot, '.claude', 'agents'));
204
- const hasCodeBuddy = await fs.pathExists(path.join(projectRoot, '.codebuddy', 'skills'))
205
- || await fs.pathExists(path.join(projectRoot, '.codebuddy', 'agents'));
206
-
201
+ // Platform detection: manifest is the source of truth (set by install/config).
202
+ // Filesystem detection is only used as fallback when manifest has no platform field
203
+ // (e.g. legacy installs before manifest tracked platform).
207
204
  let platform;
208
- if (hasClaude && hasCodeBuddy) platform = 'both';
209
- else if (hasCodeBuddy) platform = 'codebuddy';
210
- else if (hasClaude) platform = 'claude';
211
- else platform = oldManifestPlatform; // fallback to manifest
205
+ if (oldManifest?.platform) {
206
+ platform = oldManifest.platform;
207
+ } else {
208
+ // Fallback: detect from filesystem for legacy manifests without platform field
209
+ const hasClaude = await fs.pathExists(path.join(projectRoot, '.claude', 'commands'))
210
+ || await fs.pathExists(path.join(projectRoot, '.claude', 'agents'));
211
+ const hasCodeBuddy = await fs.pathExists(path.join(projectRoot, '.codebuddy', 'skills'))
212
+ || await fs.pathExists(path.join(projectRoot, '.codebuddy', 'agents'));
213
+
214
+ if (hasClaude && hasCodeBuddy) platform = 'both';
215
+ else if (hasCodeBuddy) platform = 'codebuddy';
216
+ else if (hasClaude) platform = 'claude';
217
+ else platform = 'claude'; // ultimate fallback
218
+ }
212
219
 
213
220
  const newSkillList = await resolveSkillList(suite);
214
221
  const agentsDir = getAgentsDir();
@@ -253,7 +260,7 @@ export async function runUpgrade(directory, options = {}) {
253
260
  const oldVersion = oldManifest?.version || 'unknown';
254
261
  console.log(chalk.bold(' Upgrade Summary:'));
255
262
  console.log(` Version: ${chalk.gray(oldVersion)} → ${chalk.cyan(pkg.version)}`);
256
- console.log(` Platform: ${platform}${platform !== oldManifestPlatform ? chalk.yellow(` (detected from filesystem, manifest had: ${oldManifestPlatform})`) : ''}`);
263
+ console.log(` Platform: ${platform}${!oldManifest?.platform ? chalk.yellow(' (detected from filesystem no platform in manifest)') : ''}`);
257
264
  console.log(` Suite: ${suite}`);
258
265
  console.log('');
259
266