bosun 0.41.2 → 0.41.4

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (73) hide show
  1. package/.env.example +1 -1
  2. package/agent/agent-pool.mjs +9 -2
  3. package/agent/agent-prompt-catalog.mjs +971 -0
  4. package/agent/agent-prompts.mjs +2 -970
  5. package/agent/agent-supervisor.mjs +119 -6
  6. package/agent/autofix-git.mjs +33 -0
  7. package/agent/autofix-prompts.mjs +151 -0
  8. package/agent/autofix.mjs +11 -175
  9. package/agent/bosun-skills.mjs +3 -2
  10. package/bosun.config.example.json +17 -0
  11. package/bosun.schema.json +87 -188
  12. package/cli.mjs +34 -1
  13. package/config/config-doctor.mjs +5 -250
  14. package/config/config-file-names.mjs +5 -0
  15. package/config/config.mjs +89 -493
  16. package/config/executor-config.mjs +493 -0
  17. package/config/repo-root.mjs +1 -2
  18. package/config/workspace-health.mjs +242 -0
  19. package/git/git-safety.mjs +15 -0
  20. package/github/github-oauth-portal.mjs +46 -0
  21. package/infra/library-manager-utils.mjs +22 -0
  22. package/infra/library-manager-well-known-sources.mjs +578 -0
  23. package/infra/library-manager.mjs +512 -1030
  24. package/infra/monitor.mjs +35 -9
  25. package/infra/session-tracker.mjs +10 -7
  26. package/kanban/kanban-adapter.mjs +17 -1
  27. package/lib/codebase-audit-manifests.mjs +117 -0
  28. package/lib/codebase-audit.mjs +18 -115
  29. package/package.json +18 -3
  30. package/server/setup-web-server.mjs +58 -5
  31. package/server/ui-server.mjs +1394 -79
  32. package/shell/codex-config-file.mjs +178 -0
  33. package/shell/codex-config.mjs +538 -575
  34. package/task/task-cli.mjs +54 -3
  35. package/task/task-executor.mjs +143 -13
  36. package/task/task-store.mjs +409 -1
  37. package/telegram/telegram-bot.mjs +127 -0
  38. package/tools/apply-pr-suggestions.mjs +401 -0
  39. package/tools/syntax-check.mjs +28 -9
  40. package/ui/app.js +3 -14
  41. package/ui/components/kanban-board.js +227 -4
  42. package/ui/components/session-list.js +85 -5
  43. package/ui/demo-defaults.js +338 -84
  44. package/ui/demo.html +155 -0
  45. package/ui/modules/session-api.js +96 -0
  46. package/ui/modules/settings-schema.js +1 -2
  47. package/ui/modules/state.js +43 -3
  48. package/ui/setup.html +4 -5
  49. package/ui/styles/components.css +58 -4
  50. package/ui/tabs/agents.js +12 -15
  51. package/ui/tabs/control.js +1 -0
  52. package/ui/tabs/library.js +484 -22
  53. package/ui/tabs/manual-flows.js +105 -29
  54. package/ui/tabs/tasks.js +848 -141
  55. package/ui/tabs/telemetry.js +129 -11
  56. package/ui/tabs/workflow-canvas-utils.mjs +130 -0
  57. package/ui/tabs/workflows.js +293 -23
  58. package/voice/voice-tool-definitions.mjs +757 -0
  59. package/voice/voice-tools.mjs +34 -778
  60. package/workflow/manual-flow-audit.mjs +165 -0
  61. package/workflow/manual-flows.mjs +164 -259
  62. package/workflow/workflow-engine.mjs +147 -58
  63. package/workflow/workflow-nodes/definitions.mjs +1207 -0
  64. package/workflow/workflow-nodes/transforms.mjs +612 -0
  65. package/workflow/workflow-nodes.mjs +358 -63
  66. package/workflow/workflow-templates.mjs +313 -191
  67. package/workflow-templates/_helpers.mjs +154 -0
  68. package/workflow-templates/agents.mjs +61 -4
  69. package/workflow-templates/code-quality.mjs +7 -7
  70. package/workflow-templates/github.mjs +20 -10
  71. package/workflow-templates/task-batch.mjs +44 -11
  72. package/workflow-templates/task-lifecycle.mjs +31 -6
  73. package/workspace/worktree-manager.mjs +277 -3
@@ -23,7 +23,7 @@
23
23
  "enabled": true,
24
24
  "trigger": "trigger.workflow_call",
25
25
  "variables": {
26
- "mergeMethod": "squash",
26
+ "mergeMethod": "merge",
27
27
  "labelNeedsFix": "bosun-needs-fix",
28
28
  "labelNeedsReview": "bosun-needs-human-review",
29
29
  "suspiciousDeletionRatio": 3,
@@ -230,7 +230,7 @@
230
230
  "type": "action.run_command",
231
231
  "label": "Review Gate: Merge Single PR",
232
232
  "config": {
233
- "command": "node -e \" const {execFileSync}=require('child_process'); const pr=(()=>{try{return JSON.parse(String(process.env.BOSUN_PR_INSPECT||'{}'))}catch{return {}}})(); const repo=String(pr.repo||'').trim(); const n=String(pr.prNumber||'').trim(); const ratio=Number('{{suspiciousDeletionRatio}}')||3; const minDel=Number('{{minDestructiveDeletions}}')||500; const labelReview=String('{{labelNeedsReview}}'||'bosun-needs-human-review'); const method=String('{{mergeMethod}}'||'squash').toLowerCase(); function gh(args){return execFileSync('gh',args,{encoding:'utf8',stdio:['pipe','pipe','pipe']}).trim();} if(!repo||!n){console.log(JSON.stringify({mergedCount:0,heldCount:0,skippedCount:1,skipped:[{repo,number:n,reason:'missing_repo_or_pr'}]}));process.exit(0);} try{ const viewRaw=gh(['pr','view',n,'--repo',repo,'--json','number,title,additions,deletions,changedFiles,isDraft']); const view=(()=>{try{return JSON.parse(viewRaw||'{}')}catch{return {}}})(); if(view?.isDraft===true){console.log(JSON.stringify({mergedCount:0,heldCount:0,skippedCount:1,skipped:[{repo,number:n,reason:'draft'}]}));process.exit(0);} const add=Number(view?.additions||0); const del=Number(view?.deletions||0); const changed=Number(view?.changedFiles||0); const destructive=(del>(add*ratio))&&(del>minDel); const tooWide=changed>250; if(destructive||tooWide){ gh(['pr','edit',n,'--repo',repo,'--add-label',labelReview]); gh(['pr','comment',n,'--repo',repo,'--body',':warning: Bosun held this PR for human review due to suspicious diff footprint.']); console.log(JSON.stringify({mergedCount:0,heldCount:1,skippedCount:0,held:[{repo,number:n,reason:destructive?'destructive_diff':'changed_files_too_large',additions:add,deletions:del,changedFiles:changed}]})); process.exit(0); } const checksRaw=gh(['pr','checks',n,'--repo',repo,'--json','name,state,bucket']); const checks=(()=>{try{return JSON.parse(checksRaw||'[]')}catch{return []}})(); const hasFailure=(Array.isArray(checks)?checks:[]).some((x)=>{const s=String(x?.state||'').toUpperCase();const b=String(x?.bucket||'').toUpperCase();return ['FAILURE','ERROR','TIMED_OUT','CANCELLED','STARTUP_FAILURE'].includes(s)||b==='FAIL';}); const hasPending=(Array.isArray(checks)?checks:[]).some((x)=>['QUEUED','IN_PROGRESS','PENDING','WAITING','REQUESTED'].includes(String(x?.state||'').toUpperCase())); if(hasFailure){console.log(JSON.stringify({mergedCount:0,heldCount:0,skippedCount:1,skipped:[{repo,number:n,reason:'ci_failed'}]}));process.exit(0);} if(hasPending){console.log(JSON.stringify({mergedCount:0,heldCount:0,skippedCount:1,skipped:[{repo,number:n,reason:'ci_pending'}]}));process.exit(0);} const mergeArgs=['pr','merge',n,'--repo',repo,'--delete-branch']; if(method==='rebase') mergeArgs.push('--rebase'); else if(method==='merge') mergeArgs.push('--merge'); else mergeArgs.push('--squash'); try{gh(mergeArgs);}catch(directErr){mergeArgs.push('--auto');gh(mergeArgs);} console.log(JSON.stringify({mergedCount:1,heldCount:0,skippedCount:0,merged:[{repo,number:n,title:String(view?.title||'')}] })); }catch(e){ console.log(JSON.stringify({mergedCount:0,heldCount:1,skippedCount:0,held:[{repo,number:n,reason:'merge_attempt_failed',error:String(e?.message||e)}]})); } \"",
233
+ "command": "node -e \" const {execFileSync}=require('child_process'); const pr=(()=>{try{return JSON.parse(String(process.env.BOSUN_PR_INSPECT||'{}'))}catch{return {}}})(); const repo=String(pr.repo||'').trim(); const n=String(pr.prNumber||'').trim(); const ratio=Number('{{suspiciousDeletionRatio}}')||3; const minDel=Number('{{minDestructiveDeletions}}')||500; const labelReview=String('{{labelNeedsReview}}'||'bosun-needs-human-review'); const method=String('{{mergeMethod}}'||'merge').toLowerCase(); function gh(args){return execFileSync('gh',args,{encoding:'utf8',stdio:['pipe','pipe','pipe']}).trim();} if(!repo||!n){console.log(JSON.stringify({mergedCount:0,heldCount:0,skippedCount:1,skipped:[{repo,number:n,reason:'missing_repo_or_pr'}]}));process.exit(0);} try{ const viewRaw=gh(['pr','view',n,'--repo',repo,'--json','number,title,additions,deletions,changedFiles,isDraft']); const view=(()=>{try{return JSON.parse(viewRaw||'{}')}catch{return {}}})(); if(view?.isDraft===true){console.log(JSON.stringify({mergedCount:0,heldCount:0,skippedCount:1,skipped:[{repo,number:n,reason:'draft'}]}));process.exit(0);} const add=Number(view?.additions||0); const del=Number(view?.deletions||0); const changed=Number(view?.changedFiles||0); const destructive=(del>(add*ratio))&&(del>minDel); const tooWide=changed>250; if(destructive||tooWide){ gh(['pr','edit',n,'--repo',repo,'--add-label',labelReview]); gh(['pr','comment',n,'--repo',repo,'--body',':warning: Bosun held this PR for human review due to suspicious diff footprint.']); console.log(JSON.stringify({mergedCount:0,heldCount:1,skippedCount:0,held:[{repo,number:n,reason:destructive?'destructive_diff':'changed_files_too_large',additions:add,deletions:del,changedFiles:changed}]})); process.exit(0); } const checksRaw=gh(['pr','checks',n,'--repo',repo,'--json','name,state,bucket']); const checks=(()=>{try{return JSON.parse(checksRaw||'[]')}catch{return []}})(); const hasFailure=(Array.isArray(checks)?checks:[]).some((x)=>{const s=String(x?.state||'').toUpperCase();const b=String(x?.bucket||'').toUpperCase();return ['FAILURE','ERROR','TIMED_OUT','CANCELLED','STARTUP_FAILURE'].includes(s)||b==='FAIL';}); const hasPending=(Array.isArray(checks)?checks:[]).some((x)=>['QUEUED','IN_PROGRESS','PENDING','WAITING','REQUESTED'].includes(String(x?.state||'').toUpperCase())); if(hasFailure){console.log(JSON.stringify({mergedCount:0,heldCount:0,skippedCount:1,skipped:[{repo,number:n,reason:'ci_failed'}]}));process.exit(0);} if(hasPending){console.log(JSON.stringify({mergedCount:0,heldCount:0,skippedCount:1,skipped:[{repo,number:n,reason:'ci_pending'}]}));process.exit(0);} const mergeArgs=['pr','merge',n,'--repo',repo,'--delete-branch']; if(method==='rebase') mergeArgs.push('--rebase'); else if(method==='merge') mergeArgs.push('--merge'); else mergeArgs.push('--squash'); try{gh(mergeArgs);}catch(directErr){mergeArgs.push('--auto');gh(mergeArgs);} console.log(JSON.stringify({mergedCount:1,heldCount:0,skippedCount:0,merged:[{repo,number:n,title:String(view?.title||'')}] })); }catch(e){ console.log(JSON.stringify({mergedCount:0,heldCount:1,skippedCount:0,held:[{repo,number:n,reason:'merge_attempt_failed',error:String(e?.message||e)}]})); } \"",
234
234
  "continueOnError": true,
235
235
  "failOnError": false,
236
236
  "env": {
@@ -424,14 +424,15 @@
424
424
  "enabled": true,
425
425
  "trigger": "trigger.schedule",
426
426
  "variables": {
427
- "mergeMethod": "squash",
427
+ "mergeMethod": "merge",
428
428
  "labelNeedsFix": "bosun-needs-fix",
429
429
  "labelNeedsReview": "bosun-needs-human-review",
430
430
  "repoScope": "auto",
431
431
  "maxPrs": 25,
432
432
  "intervalMs": 90000,
433
433
  "suspiciousDeletionRatio": 3,
434
- "minDestructiveDeletions": 500
434
+ "minDestructiveDeletions": 500,
435
+ "autoApplySuggestions": true
435
436
  },
436
437
  "metadata": {
437
438
  "author": "bosun",
@@ -679,7 +680,7 @@
679
680
  "type": "action.run_command",
680
681
  "label": "Review Gate: Programmatic Merge",
681
682
  "config": {
682
- "command": "node -e \" const {execFileSync}=require('child_process'); const raw=String(process.env.BOSUN_FETCH_AND_CLASSIFY||''); const payload=(()=>{try{return JSON.parse(raw||'{}')}catch{return {}}})(); const candidates=Array.isArray(payload.readyCandidates)?payload.readyCandidates:[]; const ratio=Number('{{suspiciousDeletionRatio}}')||3; const minDel=Number('{{minDestructiveDeletions}}')||500; const labelReview=String('{{labelNeedsReview}}'||'bosun-needs-human-review'); const method=String('{{mergeMethod}}'||'squash').toLowerCase(); const merged=[]; const held=[]; const skipped=[]; function gh(args){return execFileSync('gh',args,{encoding:'utf8',stdio:['pipe','pipe','pipe']}).trim();} for(const c of candidates){ const repo=String(c?.repo||'').trim(); const n=String(c?.n||'').trim(); if(!repo||!n){skipped.push({repo,number:n,reason:'missing_repo_or_pr'});continue;} try{ const viewRaw=gh(['pr','view',n,'--repo',repo,'--json','number,title,additions,deletions,changedFiles,isDraft']); const view=(()=>{try{return JSON.parse(viewRaw||'{}')}catch{return {}}})(); if(view?.isDraft===true){skipped.push({repo,number:n,reason:'draft'});continue;} const add=Number(view?.additions||0); const del=Number(view?.deletions||0); const changed=Number(view?.changedFiles||0); const destructive=(del>(add*ratio))&&(del>minDel); const tooWide=changed>250; if(destructive||tooWide){ gh(['pr','edit',n,'--repo',repo,'--add-label',labelReview]); gh(['pr','comment',n,'--repo',repo,'--body',':warning: Bosun held this PR for human review due to suspicious diff footprint.']); held.push({repo,number:n,reason:destructive?'destructive_diff':'changed_files_too_large',additions:add,deletions:del,changedFiles:changed}); continue; } const checksRaw=gh(['pr','checks',n,'--repo',repo,'--json','name,state,bucket']); const checks=(()=>{try{return JSON.parse(checksRaw||'[]')}catch{return []}})(); const hasFailure=(Array.isArray(checks)?checks:[]).some((x)=>{ const s=String(x?.state||'').toUpperCase(); const b=String(x?.bucket||'').toUpperCase(); return ['FAILURE','ERROR','TIMED_OUT','CANCELLED','STARTUP_FAILURE'].includes(s) || b==='FAIL'; }); const hasPending=(Array.isArray(checks)?checks:[]).some((x)=>{ const s=String(x?.state||'').toUpperCase(); return ['QUEUED','IN_PROGRESS','PENDING','WAITING','REQUESTED'].includes(s); }); if(hasFailure){skipped.push({repo,number:n,reason:'ci_failed'});continue;} if(hasPending){skipped.push({repo,number:n,reason:'ci_pending'});continue;} const mergeArgs=['pr','merge',n,'--repo',repo,'--delete-branch']; if(method==='rebase') mergeArgs.push('--rebase'); else if(method==='merge') mergeArgs.push('--merge'); else mergeArgs.push('--squash'); try{gh(mergeArgs);}catch(directErr){ mergeArgs.push('--auto'); gh(mergeArgs); } merged.push({repo,number:n,title:String(view?.title||'')}); }catch(e){ held.push({repo,number:n,reason:'merge_attempt_failed',error:String(e?.message||e)}); } } console.log(JSON.stringify({mergedCount:merged.length,heldCount:held.length,skippedCount:skipped.length,merged,held,skipped})); \"",
683
+ "command": "node -e \" const {execFileSync}=require('child_process'); const raw=String(process.env.BOSUN_FETCH_AND_CLASSIFY||''); const payload=(()=>{try{return JSON.parse(raw||'{}')}catch{return {}}})(); const candidates=Array.isArray(payload.readyCandidates)?payload.readyCandidates:[]; const ratio=Number('{{suspiciousDeletionRatio}}')||3; const minDel=Number('{{minDestructiveDeletions}}')||500; const labelReview=String('{{labelNeedsReview}}'||'bosun-needs-human-review'); const method=String('{{mergeMethod}}'||'merge').toLowerCase(); const merged=[]; const held=[]; const skipped=[]; function gh(args){return execFileSync('gh',args,{encoding:'utf8',stdio:['pipe','pipe','pipe']}).trim();} for(const c of candidates){ const repo=String(c?.repo||'').trim(); const n=String(c?.n||'').trim(); if(!repo||!n){skipped.push({repo,number:n,reason:'missing_repo_or_pr'});continue;} try{ const viewRaw=gh(['pr','view',n,'--repo',repo,'--json','number,title,additions,deletions,changedFiles,isDraft']); const view=(()=>{try{return JSON.parse(viewRaw||'{}')}catch{return {}}})(); if(view?.isDraft===true){skipped.push({repo,number:n,reason:'draft'});continue;} const add=Number(view?.additions||0); const del=Number(view?.deletions||0); const changed=Number(view?.changedFiles||0); const destructive=(del>(add*ratio))&&(del>minDel); const tooWide=changed>250; if(destructive||tooWide){ gh(['pr','edit',n,'--repo',repo,'--add-label',labelReview]); gh(['pr','comment',n,'--repo',repo,'--body',':warning: Bosun held this PR for human review due to suspicious diff footprint.']); held.push({repo,number:n,reason:destructive?'destructive_diff':'changed_files_too_large',additions:add,deletions:del,changedFiles:changed}); continue; } const checksRaw=gh(['pr','checks',n,'--repo',repo,'--json','name,state,bucket']); const checks=(()=>{try{return JSON.parse(checksRaw||'[]')}catch{return []}})(); const hasFailure=(Array.isArray(checks)?checks:[]).some((x)=>{ const s=String(x?.state||'').toUpperCase(); const b=String(x?.bucket||'').toUpperCase(); return ['FAILURE','ERROR','TIMED_OUT','CANCELLED','STARTUP_FAILURE'].includes(s) || b==='FAIL'; }); const hasPending=(Array.isArray(checks)?checks:[]).some((x)=>{ const s=String(x?.state||'').toUpperCase(); return ['QUEUED','IN_PROGRESS','PENDING','WAITING','REQUESTED'].includes(s); }); if(hasFailure){skipped.push({repo,number:n,reason:'ci_failed'});continue;} if(hasPending){skipped.push({repo,number:n,reason:'ci_pending'});continue;} const doApplySuggestions=String('{{autoApplySuggestions}}'||'true')==='true'&&process.env.BOSUN_AUTO_APPLY_SUGGESTIONS!=='false'; if(doApplySuggestions){ try{ const toolPath=require('path').resolve(process.cwd(),'tools','apply-pr-suggestions.mjs'); if(require('fs').existsSync(toolPath)){ const sugOut=execFileSync('node',[toolPath,'--owner',repo.split('/')[0],'--repo',repo.split('/')[1],n,'--json'],{encoding:'utf8',timeout:60000,stdio:['pipe','pipe','pipe']}); const sugRes=(()=>{try{return JSON.parse(sugOut)}catch{return null}})(); if(sugRes?.commitSha) console.error('[watchdog] auto-applied '+sugRes.applied+' suggestion(s) on PR #'+n+' → '+sugRes.commitSha.slice(0,8)); } }catch(sugErr){console.error('[watchdog] suggestion auto-apply skipped for PR #'+n+': '+String(sugErr?.message||sugErr).slice(0,120));} } const mergeArgs=['pr','merge',n,'--repo',repo,'--delete-branch']; if(method==='rebase') mergeArgs.push('--rebase'); else if(method==='merge') mergeArgs.push('--merge'); else mergeArgs.push('--squash'); try{gh(mergeArgs);}catch(directErr){ mergeArgs.push('--auto'); gh(mergeArgs); } merged.push({repo,number:n,title:String(view?.title||'')}); }catch(e){ held.push({repo,number:n,reason:'merge_attempt_failed',error:String(e?.message||e)}); } } console.log(JSON.stringify({mergedCount:merged.length,heldCount:held.length,skippedCount:skipped.length,merged,held,skipped})); \"",
683
684
  "continueOnError": true,
684
685
  "failOnError": false,
685
686
  "env": {
@@ -696,11 +697,11 @@
696
697
  },
697
698
  {
698
699
  "id": "notify",
699
- "type": "notify.telegram",
700
+ "type": "notify.log",
700
701
  "label": "Watchdog Report",
701
702
  "config": {
702
- "message": ":bug: Bosun PR Watchdog cycle complete — fix-dispatched: {{fixNeeded}} | candidates-reviewed: {{readyCandidates}}",
703
- "silent": true
703
+ "message": "Bosun PR Watchdog cycle complete — see live digest/status board for streaming updates",
704
+ "level": "info"
704
705
  },
705
706
  "position": {
706
707
  "x": 400,
@@ -1642,7 +1643,7 @@
1642
1643
  "type": "action.run_command",
1643
1644
  "label": "Auto-Merge PR",
1644
1645
  "config": {
1645
- "command": "gh pr merge {{prNumber}} --auto --squash",
1646
+ "command": "gh pr merge {{prNumber}} --auto --merge",
1646
1647
  "failOnError": true,
1647
1648
  "maxRetries": "{{maxRetries}}",
1648
1649
  "retryDelayMs": 30000,
@@ -3401,7 +3402,7 @@
3401
3402
  "type": "action.run_command",
3402
3403
  "label": "List Active Sessions",
3403
3404
  "config": {
3404
- "command": "bosun agent list --json --active",
3405
+ "command": "node -e \"const fs = require('node:fs');const path = require('node:path');const { pathToFileURL } = require('node:url');const cwd = process.cwd();const mirrorMarker = `${path.sep}.bosun${path.sep}workspaces${path.sep}`.toLowerCase();let repoRoot = cwd;if (cwd.toLowerCase().includes(mirrorMarker)) {const sourceRepoRoot = path.resolve(cwd, '..', '..', '..', '..');if (fs.existsSync(path.join(sourceRepoRoot, 'infra', 'session-tracker.mjs'))) repoRoot = sourceRepoRoot;}const trackerModuleUrl = pathToFileURL(path.join(repoRoot, 'infra', 'session-tracker.mjs')).href;import(trackerModuleUrl).then(({ getSessionTracker }) => {const tracker = getSessionTracker();const sessions = tracker.getActiveSessions().map((session) => {const progress = tracker.getProgressStatus(session.taskId);return {id: session.taskId,taskId: session.taskId,taskTitle: session.taskTitle || null,status: progress.status,idleMs: progress.idleMs,totalEvents: progress.totalEvents,elapsedMs: progress.elapsedMs,lastEventType: progress.lastEventType,recommendation: progress.recommendation,tokenPercent: null};});console.log(JSON.stringify(sessions));}).catch((err) => { console.error(err?.stack || String(err)); process.exit(1); });\"",
3405
3406
  "continueOnError": true
3406
3407
  },
3407
3408
  "position": {
@@ -3417,7 +3418,7 @@
3417
3418
  "type": "condition.expression",
3418
3419
  "label": "Any Active?",
3419
3420
  "config": {
3420
- "expression": "($ctx.getNodeOutput('list-sessions')?.output || '[]') !== '[]' && ($ctx.getNodeOutput('list-sessions')?.output || '').length > 5"
3421
+ "expression": "(() => {const raw = String($ctx.getNodeOutput('list-sessions')?.output || '[]');const lines = raw.split(/\\r?\\n/).map((line) => line.trim()).filter(Boolean);const candidate = lines.length ? lines[lines.length - 1] : '[]';try {const parsed = JSON.parse(candidate);return Array.isArray(parsed) ? parsed : [];} catch {return [];}})().length > 0"
3421
3422
  },
3422
3423
  "position": {
3423
3424
  "x": 400,
@@ -3432,7 +3433,7 @@
3432
3433
  "type": "action.run_command",
3433
3434
  "label": "Check Session Health",
3434
3435
  "config": {
3435
- "command": "bosun agent health --json",
3436
+ "command": "node -e \"const fs = require('node:fs');const path = require('node:path');const { pathToFileURL } = require('node:url');const cwd = process.cwd();const mirrorMarker = `${path.sep}.bosun${path.sep}workspaces${path.sep}`.toLowerCase();let repoRoot = cwd;if (cwd.toLowerCase().includes(mirrorMarker)) {const sourceRepoRoot = path.resolve(cwd, '..', '..', '..', '..');if (fs.existsSync(path.join(sourceRepoRoot, 'infra', 'session-tracker.mjs'))) repoRoot = sourceRepoRoot;}const trackerModuleUrl = pathToFileURL(path.join(repoRoot, 'infra', 'session-tracker.mjs')).href;import(trackerModuleUrl).then(({ getSessionTracker }) => {const tracker = getSessionTracker();const sessions = tracker.getActiveSessions().map((session) => {const progress = tracker.getProgressStatus(session.taskId);return {id: session.taskId,taskId: session.taskId,taskTitle: session.taskTitle || null,status: progress.status,idleMs: progress.idleMs,totalEvents: progress.totalEvents,elapsedMs: progress.elapsedMs,lastEventType: progress.lastEventType,recommendation: progress.recommendation,tokenPercent: null};});console.log(JSON.stringify(sessions));}).catch((err) => { console.error(err?.stack || String(err)); process.exit(1); });\"",
3436
3437
  "continueOnError": true
3437
3438
  },
3438
3439
  "position": {
@@ -3448,7 +3449,7 @@
3448
3449
  "type": "condition.expression",
3449
3450
  "label": "Any Unhealthy?",
3450
3451
  "config": {
3451
- "expression": "(() => { const out = String($ctx.getNodeOutput('check-health')?.output || ''); const maxIdleMs = Number($data?.maxIdleMs || 600000); const maxTokenPercent = Number($data?.maxTokenPercent || 85); const idleMatch = out.match(/idle(?:_ms)?\\s*[:=]\\s*(\\d+)/i); const tokenMatch = out.match(/token(?:_usage|_percent)?\\s*[:=]\\s*(\\d+(?:\\.\\d+)?)/i); const idleExceeded = idleMatch ? Number(idleMatch[1]) > maxIdleMs : false; const tokenExceeded = tokenMatch ? Number(tokenMatch[1]) >= maxTokenPercent : false; return out.includes('stalled') || out.includes('timeout') || idleExceeded || tokenExceeded; })()"
3452
+ "expression": "(() => { const sessions = (() => {const raw = String($ctx.getNodeOutput('check-health')?.output || '[]');const lines = raw.split(/\\r?\\n/).map((line) => line.trim()).filter(Boolean);const candidate = lines.length ? lines[lines.length - 1] : '[]';try {const parsed = JSON.parse(candidate);return Array.isArray(parsed) ? parsed : [];} catch {return [];}})(); const maxIdleMs = Number($data?.maxIdleMs || 600000); const maxTokenPercent = Number($data?.maxTokenPercent || 85); return sessions.some((item) => { const status = String(item?.status || '').toLowerCase(); const idleMs = Number(item?.idleMs || 0); const tokenPercent = Number(item?.tokenPercent); return status === 'idle' || status === 'stalled' || status === 'timeout' || idleMs > maxIdleMs || (Number.isFinite(tokenPercent) && tokenPercent >= maxTokenPercent); }); })()"
3452
3453
  },
3453
3454
  "position": {
3454
3455
  "x": 200,
@@ -14848,7 +14849,7 @@
14848
14849
  "trigger": "trigger.schedule",
14849
14850
  "variables": {
14850
14851
  "sessionTimeoutMs": 5400000,
14851
- "branch": "chore/code-quality-striker-{{_runId}}",
14852
+ "branch": "chore/code-quality-striker",
14852
14853
  "baseBranch": "main",
14853
14854
  "sessionLogPath": ".bosun-monitor/code-quality-striker.md",
14854
14855
  "maxFilesPerSession": 6,
@@ -14945,7 +14946,7 @@
14945
14946
  "config": {
14946
14947
  "timeoutMs": "{{sessionTimeoutMs}}",
14947
14948
  "sdk": "auto",
14948
- "prompt": "# Code Quality Striker\n\nYou are a **structural quality agent**. Your sole mandate is to improve the\ninternal structure of the codebase so that future agentic models can work on\nit more efficiently — smaller files, clearer module boundaries, zero\nduplication, and self-contained functions.\n\n## Session Constraints\n\n- **Hard session cap**: you have at most 90 minutes total. Budget your time.\n- **You MUST open a PR before ending** — a session with no PR is a failed\n session. If you run out of time mid-refactor, commit what you have, push,\n and open the PR immediately even if the work is partial, AS LONG AS all\n tests pass.\n- **Maximum {{maxFilesPerSession}} source files changed** in a single PR.\n Keep diffs small and reviewable. Better to do one clean split per session\n than attempt a mega-refactor.\n- You may run multiple sessions and PRs over time. Prefer incremental progress.\n\n## ✅ Allowed Changes (ONLY these)\n\n1. **Module decomposition** — extract a large file into smaller, focused\n modules. The extracted module must be imported back so the public surface\n is identical.\n2. **Function splitting** — break functions > ~80 lines into smaller,\n well-named helpers within the same file or a co-located util module.\n3. **Deduplication** — extract identical or near-identical logic blocks into\n a shared helper. Must not change call-site behaviour.\n4. **Dead code removal** — remove functions, variables, or imports that are\n verifiably unreferenced (no callers anywhere in the repo).\n5. **Import cleanup** — remove unused imports, deduplicate import statements,\n consolidate barrel imports.\n\n## ❌ Forbidden Changes (HARD STOPS — never do these)\n\n- Adding, removing, or changing any exported function signature or return value\n- Changing any HTTP route path, method, or response shape\n- Changing any config key names or default values\n- Adding new features, flags, or options of any kind\n- Changing test assertions or test logic\n- Renaming exported symbols (only rename internal/private symbols)\n- Adding comments, JSDoc, or inline documentation (unless minimal and necessary)\n- Changing error messages visible to users or logs (string literals)\n- Any change to .json, .sh, .md, .yaml, .html, .css, or non-.mjs/.js files\n (unless you are only touching an import path string that is broken by a move)\n\n## Workflow\n\n### Step 1 — Identify your target\n\nUse the candidate file list provided (sorted by size, largest first):\n\n```\n{{scan-candidates.output}}\n```\n\nPick **1–3 files** for this session. Prioritise:\n- Files > 500 lines used by multiple modules (high parallel-conflict risk)\n- Files with clearly repeated logic blocks\n- Files with functions > 100 lines that have distinct sub-responsibilities\n\nRead each target file in full before making any decision. Do NOT edit\nanything you have not fully read.\n\n### Step 2 — Plan your split in writing\n\nBefore touching the file, write a short plan (to yourself, as a comment in\nyour reasoning — DO NOT add it to the code):\n- What gets extracted and where\n- New file names (follow existing naming conventions in that directory)\n- Which exports stay vs. move\n- Any callers that will need their import paths updated\n\n### Step 3 — Extract and wire up\n\n- Create the new module file(s) under the same directory as the source file.\n- Update the source file to re-export or directly import the extracted piece\n so every existing call-site continues to work without modification.\n- Update any OTHER files that directly imported from the source file, if and\n only if you moved an export that those files reference. Use\n `grep -r 'importedName' --include='*.mjs' --include='*.js'` to find callers.\n- **Do not touch callers unless strictly required by the move.**\n\n### Step 4 — Validate before committing\n\nRun ALL of the following in order. Do not commit if any fail:\n\n```bash\n# 1. Syntax check every file you touched\n{{syntaxCheckCommand}} <file1> <file2> ...\n\n# 2. Lint check (if configured)\n{{lintCommand}}\n\n# 3. Full test suite — must be 0 failures, 0 unexpected skips\n{{testCommand}}\n\n# 4. Build — must pass clean\n{{buildCommand}}\n```\n\nIf tests fail, **revert your change** (`git checkout -- <file>`) and either:\n- Attempt a smaller, safer split of the same file, OR\n- Move to a different target file\n\n**Never push a failing test suite.**\n\n### Step 5 — Commit, push, and open the PR\n\nBranch name: `chore/code-quality-striker-{{_runId}}`\nBase branch: `{{baseBranch}}`\n\nCommit message format:\n```\nrefactor(<module>): split <description>\n\n- extracted <what> into <new-file>\n- <any other bullet points>\n\nNo functional changes. All tests pass.\n```\n\nPR title: `refactor: code quality pass — <one-line summary>`\n\nPR body template:\n```markdown\n## Code Quality Pass\n\n**Session**: code-quality-striker {{_runId}}\n**Scope**: structural refactor only — zero functional changes\n\n### Changes\n- <bullet per extracted module or dedup>\n\n### Validation\n- `{{syntaxCheckCommand}}` passed on all touched files\n- `{{testCommand}}` passed (N tests)\n- `{{buildCommand}}` passed\n\n### Why\n<one sentence: \"X was Y lines with Z responsibilities; split to improve\nparallel edit safety for future agent sessions.\">\n```\n\n### Step 6 — Write session log\n\nAppend a new entry to `{{sessionLogPath}}` using this\nexact format (create the file if it does not exist):\n\n```markdown\n## <ISO timestamp with timezone>\n\n- Scope: <one sentence describing what was refactored>\n- Files changed: <comma-separated list>\n- Strategy: <what split/dedup/cleanup was performed and why>\n- Validation evidence:\n - `{{syntaxCheckCommand}}` passed on all touched files\n - `{{testCommand}}` passed (N tests)\n - `{{buildCommand}}` passed\n- PR: #<number> — `<branch name>`\n```\n\n## Time Budget Warning\n\nIf you have fewer than 15 minutes remaining:\n- Stop new analysis immediately\n- Commit and push whatever passing changes you have\n- Open the PR even if the scope is smaller than planned\n- Write the session log\n- Stop\n\nA small, clean, tested PR is always better than nothing."
14949
+ "prompt": "# Code Quality Striker\n\nYou are a **structural quality agent**. Your sole mandate is to improve the\ninternal structure of the codebase so that future agentic models can work on\nit more efficiently — smaller files, clearer module boundaries, zero\nduplication, and self-contained functions.\n\n## Session Constraints\n\n- **Hard session cap**: you have at most 90 minutes total. Budget your time.\n- **You MUST open a PR before ending** — a session with no PR is a failed\n session. If you run out of time mid-refactor, commit what you have, push,\n and open the PR immediately even if the work is partial, AS LONG AS all\n tests pass.\n- **Maximum {{maxFilesPerSession}} source files changed** in a single PR.\n Keep diffs small and reviewable. Better to do one clean split per session\n than attempt a mega-refactor.\n- You may run multiple sessions and PRs over time. Prefer incremental progress.\n\n## ✅ Allowed Changes (ONLY these)\n\n1. **Module decomposition** — extract a large file into smaller, focused\n modules. The extracted module must be imported back so the public surface\n is identical.\n2. **Function splitting** — break functions > ~80 lines into smaller,\n well-named helpers within the same file or a co-located util module.\n3. **Deduplication** — extract identical or near-identical logic blocks into\n a shared helper. Must not change call-site behaviour.\n4. **Dead code removal** — remove functions, variables, or imports that are\n verifiably unreferenced (no callers anywhere in the repo).\n5. **Import cleanup** — remove unused imports, deduplicate import statements,\n consolidate barrel imports.\n\n## ❌ Forbidden Changes (HARD STOPS — never do these)\n\n- Adding, removing, or changing any exported function signature or return value\n- Changing any HTTP route path, method, or response shape\n- Changing any config key names or default values\n- Adding new features, flags, or options of any kind\n- Changing test assertions or test logic\n- Renaming exported symbols (only rename internal/private symbols)\n- Adding comments, JSDoc, or inline documentation (unless minimal and necessary)\n- Changing error messages visible to users or logs (string literals)\n- Any change to .json, .sh, .md, .yaml, .html, .css, or non-.mjs/.js files\n (unless you are only touching an import path string that is broken by a move)\n\n## Workflow\n\n### Step 1 — Identify your target\n\nUse the candidate file list provided (sorted by size, largest first):\n\n```\n{{scan-candidates.output}}\n```\n\nPick **1–3 files** for this session. Prioritise:\n- Files > 500 lines used by multiple modules (high parallel-conflict risk)\n- Files with clearly repeated logic blocks\n- Files with functions > 100 lines that have distinct sub-responsibilities\n\nRead each target file in full before making any decision. Do NOT edit\nanything you have not fully read.\n\n### Step 2 — Plan your split in writing\n\nBefore touching the file, write a short plan (to yourself, as a comment in\nyour reasoning — DO NOT add it to the code):\n- What gets extracted and where\n- New file names (follow existing naming conventions in that directory)\n- Which exports stay vs. move\n- Any callers that will need their import paths updated\n\n### Step 3 — Extract and wire up\n\n- Create the new module file(s) under the same directory as the source file.\n- Update the source file to re-export or directly import the extracted piece\n so every existing call-site continues to work without modification.\n- Update any OTHER files that directly imported from the source file, if and\n only if you moved an export that those files reference. Use\n `grep -r 'importedName' --include='*.mjs' --include='*.js'` to find callers.\n- **Do not touch callers unless strictly required by the move.**\n\n### Step 4 — Validate before committing\n\nRun ALL of the following in order. Do not commit if any fail:\n\n```bash\n# 1. Syntax check every file you touched\n{{syntaxCheckCommand}} <file1> <file2> ...\n\n# 2. Lint check (if configured)\n{{lintCommand}}\n\n# 3. Full test suite — must be 0 failures, 0 unexpected skips\n{{testCommand}}\n\n# 4. Build — must pass clean\n{{buildCommand}}\n```\n\nIf tests fail, **revert your change** (`git checkout -- <file>`) and either:\n- Attempt a smaller, safer split of the same file, OR\n- Move to a different target file\n\n**Never push a failing test suite.**\n\n### Step 5 — Commit, push, and open the PR\n\nBranch name: `{{branch}}`\nBase branch: `{{baseBranch}}`\n\nCommit message format:\n```\nrefactor(<module>): split <description>\n\n- extracted <what> into <new-file>\n- <any other bullet points>\n\nNo functional changes. All tests pass.\n```\n\nPR title: `refactor: code quality pass — <one-line summary>`\n\nPR body template:\n```markdown\n## Code Quality Pass\n\n**Session**: {{branch}}\n**Scope**: structural refactor only — zero functional changes\n\n### Changes\n- <bullet per extracted module or dedup>\n\n### Validation\n- `{{syntaxCheckCommand}}` passed on all touched files\n- `{{testCommand}}` passed (N tests)\n- `{{buildCommand}}` passed\n\n### Why\n<one sentence: \"X was Y lines with Z responsibilities; split to improve\nparallel edit safety for future agent sessions.\">\n```\n\n### Step 6 — Write session log\n\nAppend a new entry to `{{sessionLogPath}}` using this\nexact format (create the file if it does not exist):\n\n```markdown\n## <ISO timestamp with timezone>\n\n- Scope: <one sentence describing what was refactored>\n- Files changed: <comma-separated list>\n- Strategy: <what split/dedup/cleanup was performed and why>\n- Validation evidence:\n - `{{syntaxCheckCommand}}` passed on all touched files\n - `{{testCommand}}` passed (N tests)\n - `{{buildCommand}}` passed\n- PR: #<number> — `<branch name>`\n```\n\n## Time Budget Warning\n\nIf you have fewer than 15 minutes remaining:\n- Stop new analysis immediately\n- Commit and push whatever passing changes you have\n- Open the PR even if the scope is smaller than planned\n- Write the session log\n- Stop\n\nA small, clean, tested PR is always better than nothing."
14949
14950
  },
14950
14951
  "position": {
14951
14952
  "x": 400,
@@ -15007,7 +15008,7 @@
15007
15008
  "type": "action.create_pr",
15008
15009
  "label": "Open Quality PR",
15009
15010
  "config": {
15010
- "title": "refactor: code quality pass {{_runId}}",
15011
+ "title": "refactor: code quality pass",
15011
15012
  "body": "Automated code-quality session. Structural refactor only — zero functional changes. See `.bosun-monitor/code-quality-striker.md` for session details.",
15012
15013
  "branch": "{{branch}}",
15013
15014
  "baseBranch": "{{baseBranch}}",
@@ -15030,7 +15031,7 @@
15030
15031
  "type": "notify.telegram",
15031
15032
  "label": "Notify PR Opened",
15032
15033
  "config": {
15033
- "message": ":check: Code quality striker session complete.\nPR opened: **{{branch}}**\nRun ID: `{{_runId}}`",
15034
+ "message": ":check: Code quality striker session complete.\nPR opened: **{{branch}}**",
15034
15035
  "silent": true
15035
15036
  },
15036
15037
  "position": {
@@ -15046,7 +15047,7 @@
15046
15047
  "type": "notify.telegram",
15047
15048
  "label": "Notify — Validation Failed",
15048
15049
  "config": {
15049
- "message": ":alert: Code quality striker **validation failed** for run `{{_runId}}`.\n\nThe agent produced changes that broke tests or build. No PR was created.\nCheck `.bosun-monitor/code-quality-striker.md` for details."
15050
+ "message": ":alert: Code quality striker **validation failed**.\n\nThe agent produced changes that broke tests or build. No PR was created.\nCheck `.bosun-monitor/code-quality-striker.md` for details."
15050
15051
  },
15051
15052
  "position": {
15052
15053
  "x": 600,
@@ -15061,7 +15062,7 @@
15061
15062
  "type": "notify.log",
15062
15063
  "label": "Log Failure",
15063
15064
  "config": {
15064
- "message": "Code quality striker run {{_runId}} failed validation — no PR created.",
15065
+ "message": "Code quality striker validation failed — no PR created.",
15065
15066
  "level": "warn"
15066
15067
  },
15067
15068
  "position": {
@@ -17832,8 +17833,7 @@
17832
17833
  "pollStatus": "todo",
17833
17834
  "maxBatchSize": 5,
17834
17835
  "defaultBaseBranch": "main",
17835
- "draftPR": true,
17836
- "notifyChannel": "telegram"
17836
+ "draftPR": true
17837
17837
  },
17838
17838
  "metadata": {
17839
17839
  "author": "bosun",
@@ -17877,7 +17877,7 @@
17877
17877
  "command": "node",
17878
17878
  "args": [
17879
17879
  "-e",
17880
- "\n import(\"./kanban-adapter.mjs\")\n .then(k => k.listTasks(undefined, { status: \"todo\" }))\n .then(tasks => {\n const filtered = (tasks || []).filter((task) => {\n const repository = typeof task?.repository === \"string\" ? task.repository.trim() : \"\";\n const workspace = typeof task?.workspace === \"string\" ? task.workspace.trim() : \"\";\n return task && task.status === \"todo\" && !task.draft && repository.length > 0 && workspace.length > 0;\n });\n const batch = filtered.slice(0, parseInt(process.env.MAX_BATCH || \"5\"));\n console.log(JSON.stringify(batch.map(t => ({\n taskId: t.id,\n taskTitle: t.title || t.id,\n branch: t.branch || t.metadata?.branch || null,\n repository: t.repository || null,\n workspace: t.workspace || null,\n }))));\n })\n .catch(e => { console.error(e.message); process.exit(1); });\n "
17880
+ "\n const fs = require(\"node:fs\");\n const path = require(\"node:path\");\n const { pathToFileURL } = require(\"node:url\");\n const cwd = process.cwd();\n const mirrorMarker = (path.sep + \".bosun\" + path.sep + \"workspaces\" + path.sep).toLowerCase();\n let repoRoot = cwd;\n if (cwd.toLowerCase().includes(mirrorMarker)) {\n const sourceRepoRoot = path.resolve(cwd, \"..\", \"..\", \"..\", \"..\");\n if (fs.existsSync(path.join(sourceRepoRoot, \"kanban\", \"kanban-adapter.mjs\"))) repoRoot = sourceRepoRoot;\n }\n const kanbanModuleUrl = pathToFileURL(path.join(repoRoot, \"kanban\", \"kanban-adapter.mjs\")).href;\n import(kanbanModuleUrl)\n .then(k => k.listTasks(undefined, { status: \"todo\" }))\n .then(tasks => {\n const filtered = (tasks || []).filter((task) => {\n const repository = typeof task?.repository === \"string\" ? task.repository.trim() : \"\";\n const workspace = typeof task?.workspace === \"string\" ? task.workspace.trim() : \"\";\n return task && task.status === \"todo\" && !task.draft && repository.length > 0 && workspace.length > 0;\n });\n const batch = filtered.slice(0, parseInt(process.env.MAX_BATCH || \"5\"));\n console.log(JSON.stringify(batch.map(t => ({\n taskId: t.id,\n taskTitle: t.title || t.id,\n branch: t.branch || t.metadata?.branch || null,\n repository: t.repository || null,\n workspace: t.workspace || null,\n }))));\n })\n .catch(e => { console.error(e.message); process.exit(1); });\n "
17881
17881
  ],
17882
17882
  "env": {
17883
17883
  "MAX_BATCH": "{{maxBatchSize}}"
@@ -18058,11 +18058,11 @@
18058
18058
  },
18059
18059
  {
18060
18060
  "id": "notify",
18061
- "type": "notify.telegram",
18061
+ "type": "notify.log",
18062
18062
  "label": "Batch Complete",
18063
18063
  "config": {
18064
- "channel": "{{notifyChannel}}",
18065
- "message": "Task batch PR pipeline complete"
18064
+ "message": "Task batch PR pipeline complete",
18065
+ "level": "info"
18066
18066
  },
18067
18067
  "position": {
18068
18068
  "x": 400,
@@ -18165,8 +18165,8 @@
18165
18165
  "dispatch",
18166
18166
  "lifecycle"
18167
18167
  ],
18168
- "nodeCount": 7,
18169
- "edgeCount": 6,
18168
+ "nodeCount": 9,
18169
+ "edgeCount": 8,
18170
18170
  "recommended": true,
18171
18171
  "enabled": true,
18172
18172
  "trigger": "trigger.task_available",
@@ -18231,7 +18231,7 @@
18231
18231
  "command": "node",
18232
18232
  "args": [
18233
18233
  "-e",
18234
- "\n import(\"./kanban-adapter.mjs\")\n .then(k => k.listTasks(undefined, { status: \"todo\" }))\n .then(tasks => {\n const filtered = (tasks || []).filter((task) => {\n const repository = typeof task?.repository === \"string\" ? task.repository.trim() : \"\";\n const workspace = typeof task?.workspace === \"string\" ? task.workspace.trim() : \"\";\n return task && task.status === \"todo\" && !task.draft && repository.length > 0 && workspace.length > 0;\n });\n const batch = filtered.slice(0, parseInt(process.env.MAX_BATCH || \"10\"));\n console.log(JSON.stringify(batch.map(t => ({\n taskId: t.id,\n taskTitle: t.title || t.id,\n status: t.status,\n branch: t.branch || t.metadata?.branch || null,\n scope: t.scope || t.metadata?.scope || null,\n repository: typeof t?.repository === \"string\" ? t.repository.trim() : null,\n workspace: typeof t?.workspace === \"string\" ? t.workspace.trim() : null,\n }))));\n })\n .catch(e => { console.error(e.message); process.exit(1); });\n "
18234
+ "\n const fs = require(\"node:fs\");\n const path = require(\"node:path\");\n const { pathToFileURL } = require(\"node:url\");\n const cwd = process.cwd();\n const mirrorMarker = (path.sep + \".bosun\" + path.sep + \"workspaces\" + path.sep).toLowerCase();\n let repoRoot = cwd;\n if (cwd.toLowerCase().includes(mirrorMarker)) {\n const sourceRepoRoot = path.resolve(cwd, \"..\", \"..\", \"..\", \"..\");\n if (fs.existsSync(path.join(sourceRepoRoot, \"kanban\", \"kanban-adapter.mjs\"))) repoRoot = sourceRepoRoot;\n }\n const kanbanModuleUrl = pathToFileURL(path.join(repoRoot, \"kanban\", \"kanban-adapter.mjs\")).href;\n import(kanbanModuleUrl)\n .then(k => k.listTasks(undefined, { status: \"todo\" }))\n .then(tasks => {\n const filtered = (tasks || []).filter((task) => {\n const repository = typeof task?.repository === \"string\" ? task.repository.trim() : \"\";\n const workspace = typeof task?.workspace === \"string\" ? task.workspace.trim() : \"\";\n return task && task.status === \"todo\" && !task.draft && repository.length > 0 && workspace.length > 0;\n });\n const batch = filtered.slice(0, parseInt(process.env.MAX_BATCH || \"10\"));\n console.log(JSON.stringify(batch.map(t => ({\n taskId: t.id,\n taskTitle: t.title || t.id,\n status: t.status,\n branch: t.branch || t.metadata?.branch || null,\n scope: t.scope || t.metadata?.scope || null,\n repository: typeof t?.repository === \"string\" ? t.repository.trim() : null,\n workspace: typeof t?.workspace === \"string\" ? t.workspace.trim() : null,\n }))));\n })\n .catch(e => { console.error(e.message); process.exit(1); });\n "
18235
18235
  ],
18236
18236
  "env": {
18237
18237
  "MAX_BATCH": "{{maxBatchSize}}"
@@ -18301,12 +18301,11 @@
18301
18301
  ]
18302
18302
  },
18303
18303
  {
18304
- "id": "notify-complete",
18305
- "type": "notify.telegram",
18306
- "label": "Batch Summary",
18304
+ "id": "has-batch-failures",
18305
+ "type": "condition.expression",
18306
+ "label": "Any Batch Failures?",
18307
18307
  "config": {
18308
- "channel": "{{notifyChannel}}",
18309
- "message": "Task batch completed: {{dispatch-tasks.successCount}}/{{dispatch-tasks.totalItems}} succeeded ({{dispatch-tasks.failCount}} failed)"
18308
+ "expression": "Number($data?.batchResult?.failCount || 0) > 0"
18310
18309
  },
18311
18310
  "position": {
18312
18311
  "x": 400,
@@ -18315,6 +18314,38 @@
18315
18314
  "outputs": [
18316
18315
  "default"
18317
18316
  ]
18317
+ },
18318
+ {
18319
+ "id": "notify-failures",
18320
+ "type": "notify.telegram",
18321
+ "label": "Batch Failure Alert",
18322
+ "config": {
18323
+ "channel": "{{notifyChannel}}",
18324
+ "message": "Task batch needs attention: {{batchResult.failCount}} failed out of {{batchResult.totalItems}} ({{batchResult.successCount}} succeeded)"
18325
+ },
18326
+ "position": {
18327
+ "x": 220,
18328
+ "y": 830
18329
+ },
18330
+ "outputs": [
18331
+ "default"
18332
+ ]
18333
+ },
18334
+ {
18335
+ "id": "log-summary",
18336
+ "type": "notify.log",
18337
+ "label": "Batch Summary",
18338
+ "config": {
18339
+ "message": "Task batch completed: {{batchResult.successCount}}/{{batchResult.totalItems}} succeeded ({{batchResult.failCount}} failed)",
18340
+ "level": "info"
18341
+ },
18342
+ "position": {
18343
+ "x": 580,
18344
+ "y": 830
18345
+ },
18346
+ "outputs": [
18347
+ "default"
18348
+ ]
18318
18349
  }
18319
18350
  ],
18320
18351
  "edges": [
@@ -18350,10 +18381,24 @@
18350
18381
  "sourcePort": "default"
18351
18382
  },
18352
18383
  {
18353
- "id": "record-results->notify-complete",
18384
+ "id": "record-results->has-batch-failures",
18354
18385
  "source": "record-results",
18355
- "target": "notify-complete",
18386
+ "target": "has-batch-failures",
18356
18387
  "sourcePort": "default"
18388
+ },
18389
+ {
18390
+ "id": "has-batch-failures->notify-failures",
18391
+ "source": "has-batch-failures",
18392
+ "target": "notify-failures",
18393
+ "sourcePort": "default",
18394
+ "condition": "$output?.result === true"
18395
+ },
18396
+ {
18397
+ "id": "has-batch-failures->log-summary",
18398
+ "source": "has-batch-failures",
18399
+ "target": "log-summary",
18400
+ "sourcePort": "default",
18401
+ "condition": "$output?.result !== true"
18357
18402
  }
18358
18403
  ]
18359
18404
  },
@@ -18372,8 +18417,8 @@
18372
18417
  "workflow-first",
18373
18418
  "core"
18374
18419
  ],
18375
- "nodeCount": 50,
18376
- "edgeCount": 55,
18420
+ "nodeCount": 53,
18421
+ "edgeCount": 59,
18377
18422
  "recommended": true,
18378
18423
  "enabled": true,
18379
18424
  "trigger": "trigger.task_available",
@@ -19279,6 +19324,62 @@
19279
19324
  "default"
19280
19325
  ]
19281
19326
  },
19327
+ {
19328
+ "id": "wt-failure-blocking",
19329
+ "type": "condition.expression",
19330
+ "label": "Non-Retryable WT Failure?",
19331
+ "config": {
19332
+ "expression": "$ctx.getNodeOutput('acquire-worktree')?.retryable === false"
19333
+ },
19334
+ "position": {
19335
+ "x": 600,
19336
+ "y": 1220
19337
+ },
19338
+ "outputs": [
19339
+ "yes",
19340
+ "no"
19341
+ ]
19342
+ },
19343
+ {
19344
+ "id": "set-blocked-wt-failed",
19345
+ "type": "action.update_task_status",
19346
+ "label": "Set Blocked (WT Fail)",
19347
+ "config": {
19348
+ "taskId": "{{taskId}}",
19349
+ "status": "blocked",
19350
+ "taskTitle": "{{taskTitle}}"
19351
+ },
19352
+ "position": {
19353
+ "x": 470,
19354
+ "y": 1350
19355
+ },
19356
+ "outputs": [
19357
+ "default"
19358
+ ]
19359
+ },
19360
+ {
19361
+ "id": "annotate-blocked-wt-failed",
19362
+ "type": "action.bosun_function",
19363
+ "label": "Annotate Blocked (WT Fail)",
19364
+ "config": {
19365
+ "function": "tasks.update",
19366
+ "args": {
19367
+ "taskId": "{{taskId}}",
19368
+ "fields": {
19369
+ "cooldownUntil": "{{acquire-worktree.retryAt}}",
19370
+ "blockedReason": "{{acquire-worktree.blockedReason}}",
19371
+ "meta": "{{(() => { const current = ($data.taskMeta && typeof $data.taskMeta === 'object') ? $data.taskMeta : {}; const output = $ctx.getNodeOutput('acquire-worktree') || {}; return { ...current, autoRecovery: { active: true, reason: 'worktree_failure', failureKind: output.failureKind || 'branch_refresh_conflict', retryAt: output.retryAt || null, recoveryDelayMs: output.autoRecoverDelayMs || null, error: output.error || '', recordedAt: output.recordedAt || null }, worktreeFailure: { failureKind: output.failureKind || 'branch_refresh_conflict', retryable: output.retryable !== false, retryAt: output.retryAt || null, blockedReason: output.blockedReason || '', error: output.error || '', recordedAt: output.recordedAt || null } }; })()}}"
19372
+ }
19373
+ }
19374
+ },
19375
+ "position": {
19376
+ "x": 470,
19377
+ "y": 1480
19378
+ },
19379
+ "outputs": [
19380
+ "default"
19381
+ ]
19382
+ },
19282
19383
  {
19283
19384
  "id": "set-todo-wt-failed",
19284
19385
  "type": "action.update_task_status",
@@ -19289,8 +19390,8 @@
19289
19390
  "taskTitle": "{{taskTitle}}"
19290
19391
  },
19291
19392
  "position": {
19292
- "x": 600,
19293
- "y": 1220
19393
+ "x": 730,
19394
+ "y": 1350
19294
19395
  },
19295
19396
  "outputs": [
19296
19397
  "default"
@@ -19305,7 +19406,7 @@
19305
19406
  },
19306
19407
  "position": {
19307
19408
  "x": 600,
19308
- "y": 1350
19409
+ "y": 1480
19309
19410
  },
19310
19411
  "outputs": [
19311
19412
  "default"
@@ -19316,11 +19417,11 @@
19316
19417
  "type": "notify.telegram",
19317
19418
  "label": "Notify WT Failed",
19318
19419
  "config": {
19319
- "message": "⚠️ Worktree failed for \"{{taskTitle}}\" ({{taskId}})"
19420
+ "message": "⚠️ Worktree failed for \"{{taskTitle}}\" ({{taskId}}){{acquire-worktree.recoveryNote}}"
19320
19421
  },
19321
19422
  "position": {
19322
19423
  "x": 600,
19323
- "y": 1480
19424
+ "y": 1740
19324
19425
  },
19325
19426
  "outputs": [
19326
19427
  "default"
@@ -19658,9 +19759,35 @@
19658
19759
  "condition": "$output?.result !== true"
19659
19760
  },
19660
19761
  {
19661
- "id": "release-claim-wt-failed->set-todo-wt-failed",
19762
+ "id": "release-claim-wt-failed->wt-failure-blocking",
19662
19763
  "source": "release-claim-wt-failed",
19764
+ "target": "wt-failure-blocking",
19765
+ "sourcePort": "default"
19766
+ },
19767
+ {
19768
+ "id": "wt-failure-blocking->set-blocked-wt-failed",
19769
+ "source": "wt-failure-blocking",
19770
+ "target": "set-blocked-wt-failed",
19771
+ "sourcePort": "yes",
19772
+ "condition": "$output?.result === true"
19773
+ },
19774
+ {
19775
+ "id": "wt-failure-blocking->set-todo-wt-failed",
19776
+ "source": "wt-failure-blocking",
19663
19777
  "target": "set-todo-wt-failed",
19778
+ "sourcePort": "no",
19779
+ "condition": "$output?.result !== true"
19780
+ },
19781
+ {
19782
+ "id": "set-blocked-wt-failed->annotate-blocked-wt-failed",
19783
+ "source": "set-blocked-wt-failed",
19784
+ "target": "annotate-blocked-wt-failed",
19785
+ "sourcePort": "default"
19786
+ },
19787
+ {
19788
+ "id": "annotate-blocked-wt-failed->release-slot-wt-failed",
19789
+ "source": "annotate-blocked-wt-failed",
19790
+ "target": "release-slot-wt-failed",
19664
19791
  "sourcePort": "default"
19665
19792
  },
19666
19793
  {
@@ -20720,7 +20847,7 @@
20720
20847
  "nodeCount": 13,
20721
20848
  "trigger": "trigger.workflow_call",
20722
20849
  "variables": {
20723
- "mergeMethod": "squash",
20850
+ "mergeMethod": "merge",
20724
20851
  "labelNeedsFix": "bosun-needs-fix",
20725
20852
  "labelNeedsReview": "bosun-needs-human-review",
20726
20853
  "suspiciousDeletionRatio": 3,
@@ -20914,7 +21041,7 @@
20914
21041
  "type": "action.run_command",
20915
21042
  "label": "Review Gate: Merge Single PR",
20916
21043
  "config": {
20917
- "command": "node -e \" const {execFileSync}=require('child_process'); const pr=(()=>{try{return JSON.parse(String(process.env.BOSUN_PR_INSPECT||'{}'))}catch{return {}}})(); const repo=String(pr.repo||'').trim(); const n=String(pr.prNumber||'').trim(); const ratio=Number('{{suspiciousDeletionRatio}}')||3; const minDel=Number('{{minDestructiveDeletions}}')||500; const labelReview=String('{{labelNeedsReview}}'||'bosun-needs-human-review'); const method=String('{{mergeMethod}}'||'squash').toLowerCase(); function gh(args){return execFileSync('gh',args,{encoding:'utf8',stdio:['pipe','pipe','pipe']}).trim();} if(!repo||!n){console.log(JSON.stringify({mergedCount:0,heldCount:0,skippedCount:1,skipped:[{repo,number:n,reason:'missing_repo_or_pr'}]}));process.exit(0);} try{ const viewRaw=gh(['pr','view',n,'--repo',repo,'--json','number,title,additions,deletions,changedFiles,isDraft']); const view=(()=>{try{return JSON.parse(viewRaw||'{}')}catch{return {}}})(); if(view?.isDraft===true){console.log(JSON.stringify({mergedCount:0,heldCount:0,skippedCount:1,skipped:[{repo,number:n,reason:'draft'}]}));process.exit(0);} const add=Number(view?.additions||0); const del=Number(view?.deletions||0); const changed=Number(view?.changedFiles||0); const destructive=(del>(add*ratio))&&(del>minDel); const tooWide=changed>250; if(destructive||tooWide){ gh(['pr','edit',n,'--repo',repo,'--add-label',labelReview]); gh(['pr','comment',n,'--repo',repo,'--body',':warning: Bosun held this PR for human review due to suspicious diff footprint.']); console.log(JSON.stringify({mergedCount:0,heldCount:1,skippedCount:0,held:[{repo,number:n,reason:destructive?'destructive_diff':'changed_files_too_large',additions:add,deletions:del,changedFiles:changed}]})); process.exit(0); } const checksRaw=gh(['pr','checks',n,'--repo',repo,'--json','name,state,bucket']); const checks=(()=>{try{return JSON.parse(checksRaw||'[]')}catch{return []}})(); const hasFailure=(Array.isArray(checks)?checks:[]).some((x)=>{const s=String(x?.state||'').toUpperCase();const b=String(x?.bucket||'').toUpperCase();return ['FAILURE','ERROR','TIMED_OUT','CANCELLED','STARTUP_FAILURE'].includes(s)||b==='FAIL';}); const hasPending=(Array.isArray(checks)?checks:[]).some((x)=>['QUEUED','IN_PROGRESS','PENDING','WAITING','REQUESTED'].includes(String(x?.state||'').toUpperCase())); if(hasFailure){console.log(JSON.stringify({mergedCount:0,heldCount:0,skippedCount:1,skipped:[{repo,number:n,reason:'ci_failed'}]}));process.exit(0);} if(hasPending){console.log(JSON.stringify({mergedCount:0,heldCount:0,skippedCount:1,skipped:[{repo,number:n,reason:'ci_pending'}]}));process.exit(0);} const mergeArgs=['pr','merge',n,'--repo',repo,'--delete-branch']; if(method==='rebase') mergeArgs.push('--rebase'); else if(method==='merge') mergeArgs.push('--merge'); else mergeArgs.push('--squash'); try{gh(mergeArgs);}catch(directErr){mergeArgs.push('--auto');gh(mergeArgs);} console.log(JSON.stringify({mergedCount:1,heldCount:0,skippedCount:0,merged:[{repo,number:n,title:String(view?.title||'')}] })); }catch(e){ console.log(JSON.stringify({mergedCount:0,heldCount:1,skippedCount:0,held:[{repo,number:n,reason:'merge_attempt_failed',error:String(e?.message||e)}]})); } \"",
21044
+ "command": "node -e \" const {execFileSync}=require('child_process'); const pr=(()=>{try{return JSON.parse(String(process.env.BOSUN_PR_INSPECT||'{}'))}catch{return {}}})(); const repo=String(pr.repo||'').trim(); const n=String(pr.prNumber||'').trim(); const ratio=Number('{{suspiciousDeletionRatio}}')||3; const minDel=Number('{{minDestructiveDeletions}}')||500; const labelReview=String('{{labelNeedsReview}}'||'bosun-needs-human-review'); const method=String('{{mergeMethod}}'||'merge').toLowerCase(); function gh(args){return execFileSync('gh',args,{encoding:'utf8',stdio:['pipe','pipe','pipe']}).trim();} if(!repo||!n){console.log(JSON.stringify({mergedCount:0,heldCount:0,skippedCount:1,skipped:[{repo,number:n,reason:'missing_repo_or_pr'}]}));process.exit(0);} try{ const viewRaw=gh(['pr','view',n,'--repo',repo,'--json','number,title,additions,deletions,changedFiles,isDraft']); const view=(()=>{try{return JSON.parse(viewRaw||'{}')}catch{return {}}})(); if(view?.isDraft===true){console.log(JSON.stringify({mergedCount:0,heldCount:0,skippedCount:1,skipped:[{repo,number:n,reason:'draft'}]}));process.exit(0);} const add=Number(view?.additions||0); const del=Number(view?.deletions||0); const changed=Number(view?.changedFiles||0); const destructive=(del>(add*ratio))&&(del>minDel); const tooWide=changed>250; if(destructive||tooWide){ gh(['pr','edit',n,'--repo',repo,'--add-label',labelReview]); gh(['pr','comment',n,'--repo',repo,'--body',':warning: Bosun held this PR for human review due to suspicious diff footprint.']); console.log(JSON.stringify({mergedCount:0,heldCount:1,skippedCount:0,held:[{repo,number:n,reason:destructive?'destructive_diff':'changed_files_too_large',additions:add,deletions:del,changedFiles:changed}]})); process.exit(0); } const checksRaw=gh(['pr','checks',n,'--repo',repo,'--json','name,state,bucket']); const checks=(()=>{try{return JSON.parse(checksRaw||'[]')}catch{return []}})(); const hasFailure=(Array.isArray(checks)?checks:[]).some((x)=>{const s=String(x?.state||'').toUpperCase();const b=String(x?.bucket||'').toUpperCase();return ['FAILURE','ERROR','TIMED_OUT','CANCELLED','STARTUP_FAILURE'].includes(s)||b==='FAIL';}); const hasPending=(Array.isArray(checks)?checks:[]).some((x)=>['QUEUED','IN_PROGRESS','PENDING','WAITING','REQUESTED'].includes(String(x?.state||'').toUpperCase())); if(hasFailure){console.log(JSON.stringify({mergedCount:0,heldCount:0,skippedCount:1,skipped:[{repo,number:n,reason:'ci_failed'}]}));process.exit(0);} if(hasPending){console.log(JSON.stringify({mergedCount:0,heldCount:0,skippedCount:1,skipped:[{repo,number:n,reason:'ci_pending'}]}));process.exit(0);} const mergeArgs=['pr','merge',n,'--repo',repo,'--delete-branch']; if(method==='rebase') mergeArgs.push('--rebase'); else if(method==='merge') mergeArgs.push('--merge'); else mergeArgs.push('--squash'); try{gh(mergeArgs);}catch(directErr){mergeArgs.push('--auto');gh(mergeArgs);} console.log(JSON.stringify({mergedCount:1,heldCount:0,skippedCount:0,merged:[{repo,number:n,title:String(view?.title||'')}] })); }catch(e){ console.log(JSON.stringify({mergedCount:0,heldCount:1,skippedCount:0,held:[{repo,number:n,reason:'merge_attempt_failed',error:String(e?.message||e)}]})); } \"",
20918
21045
  "continueOnError": true,
20919
21046
  "failOnError": false,
20920
21047
  "env": {
@@ -21107,14 +21234,15 @@
21107
21234
  "nodeCount": 17,
21108
21235
  "trigger": "trigger.schedule",
21109
21236
  "variables": {
21110
- "mergeMethod": "squash",
21237
+ "mergeMethod": "merge",
21111
21238
  "labelNeedsFix": "bosun-needs-fix",
21112
21239
  "labelNeedsReview": "bosun-needs-human-review",
21113
21240
  "repoScope": "auto",
21114
21241
  "maxPrs": 25,
21115
21242
  "intervalMs": 90000,
21116
21243
  "suspiciousDeletionRatio": 3,
21117
- "minDestructiveDeletions": 500
21244
+ "minDestructiveDeletions": 500,
21245
+ "autoApplySuggestions": true
21118
21246
  },
21119
21247
  "nodes": [
21120
21248
  {
@@ -21339,7 +21467,7 @@
21339
21467
  "type": "action.run_command",
21340
21468
  "label": "Review Gate: Programmatic Merge",
21341
21469
  "config": {
21342
- "command": "node -e \" const {execFileSync}=require('child_process'); const raw=String(process.env.BOSUN_FETCH_AND_CLASSIFY||''); const payload=(()=>{try{return JSON.parse(raw||'{}')}catch{return {}}})(); const candidates=Array.isArray(payload.readyCandidates)?payload.readyCandidates:[]; const ratio=Number('{{suspiciousDeletionRatio}}')||3; const minDel=Number('{{minDestructiveDeletions}}')||500; const labelReview=String('{{labelNeedsReview}}'||'bosun-needs-human-review'); const method=String('{{mergeMethod}}'||'squash').toLowerCase(); const merged=[]; const held=[]; const skipped=[]; function gh(args){return execFileSync('gh',args,{encoding:'utf8',stdio:['pipe','pipe','pipe']}).trim();} for(const c of candidates){ const repo=String(c?.repo||'').trim(); const n=String(c?.n||'').trim(); if(!repo||!n){skipped.push({repo,number:n,reason:'missing_repo_or_pr'});continue;} try{ const viewRaw=gh(['pr','view',n,'--repo',repo,'--json','number,title,additions,deletions,changedFiles,isDraft']); const view=(()=>{try{return JSON.parse(viewRaw||'{}')}catch{return {}}})(); if(view?.isDraft===true){skipped.push({repo,number:n,reason:'draft'});continue;} const add=Number(view?.additions||0); const del=Number(view?.deletions||0); const changed=Number(view?.changedFiles||0); const destructive=(del>(add*ratio))&&(del>minDel); const tooWide=changed>250; if(destructive||tooWide){ gh(['pr','edit',n,'--repo',repo,'--add-label',labelReview]); gh(['pr','comment',n,'--repo',repo,'--body',':warning: Bosun held this PR for human review due to suspicious diff footprint.']); held.push({repo,number:n,reason:destructive?'destructive_diff':'changed_files_too_large',additions:add,deletions:del,changedFiles:changed}); continue; } const checksRaw=gh(['pr','checks',n,'--repo',repo,'--json','name,state,bucket']); const checks=(()=>{try{return JSON.parse(checksRaw||'[]')}catch{return []}})(); const hasFailure=(Array.isArray(checks)?checks:[]).some((x)=>{ const s=String(x?.state||'').toUpperCase(); const b=String(x?.bucket||'').toUpperCase(); return ['FAILURE','ERROR','TIMED_OUT','CANCELLED','STARTUP_FAILURE'].includes(s) || b==='FAIL'; }); const hasPending=(Array.isArray(checks)?checks:[]).some((x)=>{ const s=String(x?.state||'').toUpperCase(); return ['QUEUED','IN_PROGRESS','PENDING','WAITING','REQUESTED'].includes(s); }); if(hasFailure){skipped.push({repo,number:n,reason:'ci_failed'});continue;} if(hasPending){skipped.push({repo,number:n,reason:'ci_pending'});continue;} const mergeArgs=['pr','merge',n,'--repo',repo,'--delete-branch']; if(method==='rebase') mergeArgs.push('--rebase'); else if(method==='merge') mergeArgs.push('--merge'); else mergeArgs.push('--squash'); try{gh(mergeArgs);}catch(directErr){ mergeArgs.push('--auto'); gh(mergeArgs); } merged.push({repo,number:n,title:String(view?.title||'')}); }catch(e){ held.push({repo,number:n,reason:'merge_attempt_failed',error:String(e?.message||e)}); } } console.log(JSON.stringify({mergedCount:merged.length,heldCount:held.length,skippedCount:skipped.length,merged,held,skipped})); \"",
21470
+ "command": "node -e \" const {execFileSync}=require('child_process'); const raw=String(process.env.BOSUN_FETCH_AND_CLASSIFY||''); const payload=(()=>{try{return JSON.parse(raw||'{}')}catch{return {}}})(); const candidates=Array.isArray(payload.readyCandidates)?payload.readyCandidates:[]; const ratio=Number('{{suspiciousDeletionRatio}}')||3; const minDel=Number('{{minDestructiveDeletions}}')||500; const labelReview=String('{{labelNeedsReview}}'||'bosun-needs-human-review'); const method=String('{{mergeMethod}}'||'merge').toLowerCase(); const merged=[]; const held=[]; const skipped=[]; function gh(args){return execFileSync('gh',args,{encoding:'utf8',stdio:['pipe','pipe','pipe']}).trim();} for(const c of candidates){ const repo=String(c?.repo||'').trim(); const n=String(c?.n||'').trim(); if(!repo||!n){skipped.push({repo,number:n,reason:'missing_repo_or_pr'});continue;} try{ const viewRaw=gh(['pr','view',n,'--repo',repo,'--json','number,title,additions,deletions,changedFiles,isDraft']); const view=(()=>{try{return JSON.parse(viewRaw||'{}')}catch{return {}}})(); if(view?.isDraft===true){skipped.push({repo,number:n,reason:'draft'});continue;} const add=Number(view?.additions||0); const del=Number(view?.deletions||0); const changed=Number(view?.changedFiles||0); const destructive=(del>(add*ratio))&&(del>minDel); const tooWide=changed>250; if(destructive||tooWide){ gh(['pr','edit',n,'--repo',repo,'--add-label',labelReview]); gh(['pr','comment',n,'--repo',repo,'--body',':warning: Bosun held this PR for human review due to suspicious diff footprint.']); held.push({repo,number:n,reason:destructive?'destructive_diff':'changed_files_too_large',additions:add,deletions:del,changedFiles:changed}); continue; } const checksRaw=gh(['pr','checks',n,'--repo',repo,'--json','name,state,bucket']); const checks=(()=>{try{return JSON.parse(checksRaw||'[]')}catch{return []}})(); const hasFailure=(Array.isArray(checks)?checks:[]).some((x)=>{ const s=String(x?.state||'').toUpperCase(); const b=String(x?.bucket||'').toUpperCase(); return ['FAILURE','ERROR','TIMED_OUT','CANCELLED','STARTUP_FAILURE'].includes(s) || b==='FAIL'; }); const hasPending=(Array.isArray(checks)?checks:[]).some((x)=>{ const s=String(x?.state||'').toUpperCase(); return ['QUEUED','IN_PROGRESS','PENDING','WAITING','REQUESTED'].includes(s); }); if(hasFailure){skipped.push({repo,number:n,reason:'ci_failed'});continue;} if(hasPending){skipped.push({repo,number:n,reason:'ci_pending'});continue;} const doApplySuggestions=String('{{autoApplySuggestions}}'||'true')==='true'&&process.env.BOSUN_AUTO_APPLY_SUGGESTIONS!=='false'; if(doApplySuggestions){ try{ const toolPath=require('path').resolve(process.cwd(),'tools','apply-pr-suggestions.mjs'); if(require('fs').existsSync(toolPath)){ const sugOut=execFileSync('node',[toolPath,'--owner',repo.split('/')[0],'--repo',repo.split('/')[1],n,'--json'],{encoding:'utf8',timeout:60000,stdio:['pipe','pipe','pipe']}); const sugRes=(()=>{try{return JSON.parse(sugOut)}catch{return null}})(); if(sugRes?.commitSha) console.error('[watchdog] auto-applied '+sugRes.applied+' suggestion(s) on PR #'+n+' → '+sugRes.commitSha.slice(0,8)); } }catch(sugErr){console.error('[watchdog] suggestion auto-apply skipped for PR #'+n+': '+String(sugErr?.message||sugErr).slice(0,120));} } const mergeArgs=['pr','merge',n,'--repo',repo,'--delete-branch']; if(method==='rebase') mergeArgs.push('--rebase'); else if(method==='merge') mergeArgs.push('--merge'); else mergeArgs.push('--squash'); try{gh(mergeArgs);}catch(directErr){ mergeArgs.push('--auto'); gh(mergeArgs); } merged.push({repo,number:n,title:String(view?.title||'')}); }catch(e){ held.push({repo,number:n,reason:'merge_attempt_failed',error:String(e?.message||e)}); } } console.log(JSON.stringify({mergedCount:merged.length,heldCount:held.length,skippedCount:skipped.length,merged,held,skipped})); \"",
21343
21471
  "continueOnError": true,
21344
21472
  "failOnError": false,
21345
21473
  "env": {
@@ -21356,11 +21484,11 @@
21356
21484
  },
21357
21485
  {
21358
21486
  "id": "notify",
21359
- "type": "notify.telegram",
21487
+ "type": "notify.log",
21360
21488
  "label": "Watchdog Report",
21361
21489
  "config": {
21362
- "message": ":bug: Bosun PR Watchdog cycle complete — fix-dispatched: {{fixNeeded}} | candidates-reviewed: {{readyCandidates}}",
21363
- "silent": true
21490
+ "message": "Bosun PR Watchdog cycle complete — see live digest/status board for streaming updates",
21491
+ "level": "info"
21364
21492
  },
21365
21493
  "position": {
21366
21494
  "x": 400,
@@ -22226,7 +22354,7 @@
22226
22354
  "type": "action.run_command",
22227
22355
  "label": "Auto-Merge PR",
22228
22356
  "config": {
22229
- "command": "gh pr merge {{prNumber}} --auto --squash",
22357
+ "command": "gh pr merge {{prNumber}} --auto --merge",
22230
22358
  "failOnError": true,
22231
22359
  "maxRetries": "{{maxRetries}}",
22232
22360
  "retryDelayMs": 30000,
@@ -23856,7 +23984,7 @@
23856
23984
  "type": "action.run_command",
23857
23985
  "label": "List Active Sessions",
23858
23986
  "config": {
23859
- "command": "bosun agent list --json --active",
23987
+ "command": "node -e \"const fs = require('node:fs');const path = require('node:path');const { pathToFileURL } = require('node:url');const cwd = process.cwd();const mirrorMarker = `${path.sep}.bosun${path.sep}workspaces${path.sep}`.toLowerCase();let repoRoot = cwd;if (cwd.toLowerCase().includes(mirrorMarker)) {const sourceRepoRoot = path.resolve(cwd, '..', '..', '..', '..');if (fs.existsSync(path.join(sourceRepoRoot, 'infra', 'session-tracker.mjs'))) repoRoot = sourceRepoRoot;}const trackerModuleUrl = pathToFileURL(path.join(repoRoot, 'infra', 'session-tracker.mjs')).href;import(trackerModuleUrl).then(({ getSessionTracker }) => {const tracker = getSessionTracker();const sessions = tracker.getActiveSessions().map((session) => {const progress = tracker.getProgressStatus(session.taskId);return {id: session.taskId,taskId: session.taskId,taskTitle: session.taskTitle || null,status: progress.status,idleMs: progress.idleMs,totalEvents: progress.totalEvents,elapsedMs: progress.elapsedMs,lastEventType: progress.lastEventType,recommendation: progress.recommendation,tokenPercent: null};});console.log(JSON.stringify(sessions));}).catch((err) => { console.error(err?.stack || String(err)); process.exit(1); });\"",
23860
23988
  "continueOnError": true
23861
23989
  },
23862
23990
  "position": {
@@ -23872,7 +24000,7 @@
23872
24000
  "type": "condition.expression",
23873
24001
  "label": "Any Active?",
23874
24002
  "config": {
23875
- "expression": "($ctx.getNodeOutput('list-sessions')?.output || '[]') !== '[]' && ($ctx.getNodeOutput('list-sessions')?.output || '').length > 5"
24003
+ "expression": "(() => {const raw = String($ctx.getNodeOutput('list-sessions')?.output || '[]');const lines = raw.split(/\\r?\\n/).map((line) => line.trim()).filter(Boolean);const candidate = lines.length ? lines[lines.length - 1] : '[]';try {const parsed = JSON.parse(candidate);return Array.isArray(parsed) ? parsed : [];} catch {return [];}})().length > 0"
23876
24004
  },
23877
24005
  "position": {
23878
24006
  "x": 400,
@@ -23887,7 +24015,7 @@
23887
24015
  "type": "action.run_command",
23888
24016
  "label": "Check Session Health",
23889
24017
  "config": {
23890
- "command": "bosun agent health --json",
24018
+ "command": "node -e \"const fs = require('node:fs');const path = require('node:path');const { pathToFileURL } = require('node:url');const cwd = process.cwd();const mirrorMarker = `${path.sep}.bosun${path.sep}workspaces${path.sep}`.toLowerCase();let repoRoot = cwd;if (cwd.toLowerCase().includes(mirrorMarker)) {const sourceRepoRoot = path.resolve(cwd, '..', '..', '..', '..');if (fs.existsSync(path.join(sourceRepoRoot, 'infra', 'session-tracker.mjs'))) repoRoot = sourceRepoRoot;}const trackerModuleUrl = pathToFileURL(path.join(repoRoot, 'infra', 'session-tracker.mjs')).href;import(trackerModuleUrl).then(({ getSessionTracker }) => {const tracker = getSessionTracker();const sessions = tracker.getActiveSessions().map((session) => {const progress = tracker.getProgressStatus(session.taskId);return {id: session.taskId,taskId: session.taskId,taskTitle: session.taskTitle || null,status: progress.status,idleMs: progress.idleMs,totalEvents: progress.totalEvents,elapsedMs: progress.elapsedMs,lastEventType: progress.lastEventType,recommendation: progress.recommendation,tokenPercent: null};});console.log(JSON.stringify(sessions));}).catch((err) => { console.error(err?.stack || String(err)); process.exit(1); });\"",
23891
24019
  "continueOnError": true
23892
24020
  },
23893
24021
  "position": {
@@ -23903,7 +24031,7 @@
23903
24031
  "type": "condition.expression",
23904
24032
  "label": "Any Unhealthy?",
23905
24033
  "config": {
23906
- "expression": "(() => { const out = String($ctx.getNodeOutput('check-health')?.output || ''); const maxIdleMs = Number($data?.maxIdleMs || 600000); const maxTokenPercent = Number($data?.maxTokenPercent || 85); const idleMatch = out.match(/idle(?:_ms)?\\s*[:=]\\s*(\\d+)/i); const tokenMatch = out.match(/token(?:_usage|_percent)?\\s*[:=]\\s*(\\d+(?:\\.\\d+)?)/i); const idleExceeded = idleMatch ? Number(idleMatch[1]) > maxIdleMs : false; const tokenExceeded = tokenMatch ? Number(tokenMatch[1]) >= maxTokenPercent : false; return out.includes('stalled') || out.includes('timeout') || idleExceeded || tokenExceeded; })()"
24034
+ "expression": "(() => { const sessions = (() => {const raw = String($ctx.getNodeOutput('check-health')?.output || '[]');const lines = raw.split(/\\r?\\n/).map((line) => line.trim()).filter(Boolean);const candidate = lines.length ? lines[lines.length - 1] : '[]';try {const parsed = JSON.parse(candidate);return Array.isArray(parsed) ? parsed : [];} catch {return [];}})(); const maxIdleMs = Number($data?.maxIdleMs || 600000); const maxTokenPercent = Number($data?.maxTokenPercent || 85); return sessions.some((item) => { const status = String(item?.status || '').toLowerCase(); const idleMs = Number(item?.idleMs || 0); const tokenPercent = Number(item?.tokenPercent); return status === 'idle' || status === 'stalled' || status === 'timeout' || idleMs > maxIdleMs || (Number.isFinite(tokenPercent) && tokenPercent >= maxTokenPercent); }); })()"
23907
24035
  },
23908
24036
  "position": {
23909
24037
  "x": 200,
@@ -34683,7 +34811,7 @@
34683
34811
  "trigger": "trigger.schedule",
34684
34812
  "variables": {
34685
34813
  "sessionTimeoutMs": 5400000,
34686
- "branch": "chore/code-quality-striker-{{_runId}}",
34814
+ "branch": "chore/code-quality-striker",
34687
34815
  "baseBranch": "main",
34688
34816
  "sessionLogPath": ".bosun-monitor/code-quality-striker.md",
34689
34817
  "maxFilesPerSession": 6,
@@ -34766,7 +34894,7 @@
34766
34894
  "config": {
34767
34895
  "timeoutMs": "{{sessionTimeoutMs}}",
34768
34896
  "sdk": "auto",
34769
- "prompt": "# Code Quality Striker\n\nYou are a **structural quality agent**. Your sole mandate is to improve the\ninternal structure of the codebase so that future agentic models can work on\nit more efficiently — smaller files, clearer module boundaries, zero\nduplication, and self-contained functions.\n\n## Session Constraints\n\n- **Hard session cap**: you have at most 90 minutes total. Budget your time.\n- **You MUST open a PR before ending** — a session with no PR is a failed\n session. If you run out of time mid-refactor, commit what you have, push,\n and open the PR immediately even if the work is partial, AS LONG AS all\n tests pass.\n- **Maximum {{maxFilesPerSession}} source files changed** in a single PR.\n Keep diffs small and reviewable. Better to do one clean split per session\n than attempt a mega-refactor.\n- You may run multiple sessions and PRs over time. Prefer incremental progress.\n\n## ✅ Allowed Changes (ONLY these)\n\n1. **Module decomposition** — extract a large file into smaller, focused\n modules. The extracted module must be imported back so the public surface\n is identical.\n2. **Function splitting** — break functions > ~80 lines into smaller,\n well-named helpers within the same file or a co-located util module.\n3. **Deduplication** — extract identical or near-identical logic blocks into\n a shared helper. Must not change call-site behaviour.\n4. **Dead code removal** — remove functions, variables, or imports that are\n verifiably unreferenced (no callers anywhere in the repo).\n5. **Import cleanup** — remove unused imports, deduplicate import statements,\n consolidate barrel imports.\n\n## ❌ Forbidden Changes (HARD STOPS — never do these)\n\n- Adding, removing, or changing any exported function signature or return value\n- Changing any HTTP route path, method, or response shape\n- Changing any config key names or default values\n- Adding new features, flags, or options of any kind\n- Changing test assertions or test logic\n- Renaming exported symbols (only rename internal/private symbols)\n- Adding comments, JSDoc, or inline documentation (unless minimal and necessary)\n- Changing error messages visible to users or logs (string literals)\n- Any change to .json, .sh, .md, .yaml, .html, .css, or non-.mjs/.js files\n (unless you are only touching an import path string that is broken by a move)\n\n## Workflow\n\n### Step 1 — Identify your target\n\nUse the candidate file list provided (sorted by size, largest first):\n\n```\n{{scan-candidates.output}}\n```\n\nPick **1–3 files** for this session. Prioritise:\n- Files > 500 lines used by multiple modules (high parallel-conflict risk)\n- Files with clearly repeated logic blocks\n- Files with functions > 100 lines that have distinct sub-responsibilities\n\nRead each target file in full before making any decision. Do NOT edit\nanything you have not fully read.\n\n### Step 2 — Plan your split in writing\n\nBefore touching the file, write a short plan (to yourself, as a comment in\nyour reasoning — DO NOT add it to the code):\n- What gets extracted and where\n- New file names (follow existing naming conventions in that directory)\n- Which exports stay vs. move\n- Any callers that will need their import paths updated\n\n### Step 3 — Extract and wire up\n\n- Create the new module file(s) under the same directory as the source file.\n- Update the source file to re-export or directly import the extracted piece\n so every existing call-site continues to work without modification.\n- Update any OTHER files that directly imported from the source file, if and\n only if you moved an export that those files reference. Use\n `grep -r 'importedName' --include='*.mjs' --include='*.js'` to find callers.\n- **Do not touch callers unless strictly required by the move.**\n\n### Step 4 — Validate before committing\n\nRun ALL of the following in order. Do not commit if any fail:\n\n```bash\n# 1. Syntax check every file you touched\n{{syntaxCheckCommand}} <file1> <file2> ...\n\n# 2. Lint check (if configured)\n{{lintCommand}}\n\n# 3. Full test suite — must be 0 failures, 0 unexpected skips\n{{testCommand}}\n\n# 4. Build — must pass clean\n{{buildCommand}}\n```\n\nIf tests fail, **revert your change** (`git checkout -- <file>`) and either:\n- Attempt a smaller, safer split of the same file, OR\n- Move to a different target file\n\n**Never push a failing test suite.**\n\n### Step 5 — Commit, push, and open the PR\n\nBranch name: `chore/code-quality-striker-{{_runId}}`\nBase branch: `{{baseBranch}}`\n\nCommit message format:\n```\nrefactor(<module>): split <description>\n\n- extracted <what> into <new-file>\n- <any other bullet points>\n\nNo functional changes. All tests pass.\n```\n\nPR title: `refactor: code quality pass — <one-line summary>`\n\nPR body template:\n```markdown\n## Code Quality Pass\n\n**Session**: code-quality-striker {{_runId}}\n**Scope**: structural refactor only — zero functional changes\n\n### Changes\n- <bullet per extracted module or dedup>\n\n### Validation\n- `{{syntaxCheckCommand}}` passed on all touched files\n- `{{testCommand}}` passed (N tests)\n- `{{buildCommand}}` passed\n\n### Why\n<one sentence: \"X was Y lines with Z responsibilities; split to improve\nparallel edit safety for future agent sessions.\">\n```\n\n### Step 6 — Write session log\n\nAppend a new entry to `{{sessionLogPath}}` using this\nexact format (create the file if it does not exist):\n\n```markdown\n## <ISO timestamp with timezone>\n\n- Scope: <one sentence describing what was refactored>\n- Files changed: <comma-separated list>\n- Strategy: <what split/dedup/cleanup was performed and why>\n- Validation evidence:\n - `{{syntaxCheckCommand}}` passed on all touched files\n - `{{testCommand}}` passed (N tests)\n - `{{buildCommand}}` passed\n- PR: #<number> — `<branch name>`\n```\n\n## Time Budget Warning\n\nIf you have fewer than 15 minutes remaining:\n- Stop new analysis immediately\n- Commit and push whatever passing changes you have\n- Open the PR even if the scope is smaller than planned\n- Write the session log\n- Stop\n\nA small, clean, tested PR is always better than nothing."
34897
+ "prompt": "# Code Quality Striker\n\nYou are a **structural quality agent**. Your sole mandate is to improve the\ninternal structure of the codebase so that future agentic models can work on\nit more efficiently — smaller files, clearer module boundaries, zero\nduplication, and self-contained functions.\n\n## Session Constraints\n\n- **Hard session cap**: you have at most 90 minutes total. Budget your time.\n- **You MUST open a PR before ending** — a session with no PR is a failed\n session. If you run out of time mid-refactor, commit what you have, push,\n and open the PR immediately even if the work is partial, AS LONG AS all\n tests pass.\n- **Maximum {{maxFilesPerSession}} source files changed** in a single PR.\n Keep diffs small and reviewable. Better to do one clean split per session\n than attempt a mega-refactor.\n- You may run multiple sessions and PRs over time. Prefer incremental progress.\n\n## ✅ Allowed Changes (ONLY these)\n\n1. **Module decomposition** — extract a large file into smaller, focused\n modules. The extracted module must be imported back so the public surface\n is identical.\n2. **Function splitting** — break functions > ~80 lines into smaller,\n well-named helpers within the same file or a co-located util module.\n3. **Deduplication** — extract identical or near-identical logic blocks into\n a shared helper. Must not change call-site behaviour.\n4. **Dead code removal** — remove functions, variables, or imports that are\n verifiably unreferenced (no callers anywhere in the repo).\n5. **Import cleanup** — remove unused imports, deduplicate import statements,\n consolidate barrel imports.\n\n## ❌ Forbidden Changes (HARD STOPS — never do these)\n\n- Adding, removing, or changing any exported function signature or return value\n- Changing any HTTP route path, method, or response shape\n- Changing any config key names or default values\n- Adding new features, flags, or options of any kind\n- Changing test assertions or test logic\n- Renaming exported symbols (only rename internal/private symbols)\n- Adding comments, JSDoc, or inline documentation (unless minimal and necessary)\n- Changing error messages visible to users or logs (string literals)\n- Any change to .json, .sh, .md, .yaml, .html, .css, or non-.mjs/.js files\n (unless you are only touching an import path string that is broken by a move)\n\n## Workflow\n\n### Step 1 — Identify your target\n\nUse the candidate file list provided (sorted by size, largest first):\n\n```\n{{scan-candidates.output}}\n```\n\nPick **1–3 files** for this session. Prioritise:\n- Files > 500 lines used by multiple modules (high parallel-conflict risk)\n- Files with clearly repeated logic blocks\n- Files with functions > 100 lines that have distinct sub-responsibilities\n\nRead each target file in full before making any decision. Do NOT edit\nanything you have not fully read.\n\n### Step 2 — Plan your split in writing\n\nBefore touching the file, write a short plan (to yourself, as a comment in\nyour reasoning — DO NOT add it to the code):\n- What gets extracted and where\n- New file names (follow existing naming conventions in that directory)\n- Which exports stay vs. move\n- Any callers that will need their import paths updated\n\n### Step 3 — Extract and wire up\n\n- Create the new module file(s) under the same directory as the source file.\n- Update the source file to re-export or directly import the extracted piece\n so every existing call-site continues to work without modification.\n- Update any OTHER files that directly imported from the source file, if and\n only if you moved an export that those files reference. Use\n `grep -r 'importedName' --include='*.mjs' --include='*.js'` to find callers.\n- **Do not touch callers unless strictly required by the move.**\n\n### Step 4 — Validate before committing\n\nRun ALL of the following in order. Do not commit if any fail:\n\n```bash\n# 1. Syntax check every file you touched\n{{syntaxCheckCommand}} <file1> <file2> ...\n\n# 2. Lint check (if configured)\n{{lintCommand}}\n\n# 3. Full test suite — must be 0 failures, 0 unexpected skips\n{{testCommand}}\n\n# 4. Build — must pass clean\n{{buildCommand}}\n```\n\nIf tests fail, **revert your change** (`git checkout -- <file>`) and either:\n- Attempt a smaller, safer split of the same file, OR\n- Move to a different target file\n\n**Never push a failing test suite.**\n\n### Step 5 — Commit, push, and open the PR\n\nBranch name: `{{branch}}`\nBase branch: `{{baseBranch}}`\n\nCommit message format:\n```\nrefactor(<module>): split <description>\n\n- extracted <what> into <new-file>\n- <any other bullet points>\n\nNo functional changes. All tests pass.\n```\n\nPR title: `refactor: code quality pass — <one-line summary>`\n\nPR body template:\n```markdown\n## Code Quality Pass\n\n**Session**: {{branch}}\n**Scope**: structural refactor only — zero functional changes\n\n### Changes\n- <bullet per extracted module or dedup>\n\n### Validation\n- `{{syntaxCheckCommand}}` passed on all touched files\n- `{{testCommand}}` passed (N tests)\n- `{{buildCommand}}` passed\n\n### Why\n<one sentence: \"X was Y lines with Z responsibilities; split to improve\nparallel edit safety for future agent sessions.\">\n```\n\n### Step 6 — Write session log\n\nAppend a new entry to `{{sessionLogPath}}` using this\nexact format (create the file if it does not exist):\n\n```markdown\n## <ISO timestamp with timezone>\n\n- Scope: <one sentence describing what was refactored>\n- Files changed: <comma-separated list>\n- Strategy: <what split/dedup/cleanup was performed and why>\n- Validation evidence:\n - `{{syntaxCheckCommand}}` passed on all touched files\n - `{{testCommand}}` passed (N tests)\n - `{{buildCommand}}` passed\n- PR: #<number> — `<branch name>`\n```\n\n## Time Budget Warning\n\nIf you have fewer than 15 minutes remaining:\n- Stop new analysis immediately\n- Commit and push whatever passing changes you have\n- Open the PR even if the scope is smaller than planned\n- Write the session log\n- Stop\n\nA small, clean, tested PR is always better than nothing."
34770
34898
  },
34771
34899
  "position": {
34772
34900
  "x": 400,
@@ -34828,7 +34956,7 @@
34828
34956
  "type": "action.create_pr",
34829
34957
  "label": "Open Quality PR",
34830
34958
  "config": {
34831
- "title": "refactor: code quality pass {{_runId}}",
34959
+ "title": "refactor: code quality pass",
34832
34960
  "body": "Automated code-quality session. Structural refactor only — zero functional changes. See `.bosun-monitor/code-quality-striker.md` for session details.",
34833
34961
  "branch": "{{branch}}",
34834
34962
  "baseBranch": "{{baseBranch}}",
@@ -34851,7 +34979,7 @@
34851
34979
  "type": "notify.telegram",
34852
34980
  "label": "Notify PR Opened",
34853
34981
  "config": {
34854
- "message": ":check: Code quality striker session complete.\nPR opened: **{{branch}}**\nRun ID: `{{_runId}}`",
34982
+ "message": ":check: Code quality striker session complete.\nPR opened: **{{branch}}**",
34855
34983
  "silent": true
34856
34984
  },
34857
34985
  "position": {
@@ -34867,7 +34995,7 @@
34867
34995
  "type": "notify.telegram",
34868
34996
  "label": "Notify — Validation Failed",
34869
34997
  "config": {
34870
- "message": ":alert: Code quality striker **validation failed** for run `{{_runId}}`.\n\nThe agent produced changes that broke tests or build. No PR was created.\nCheck `.bosun-monitor/code-quality-striker.md` for details."
34998
+ "message": ":alert: Code quality striker **validation failed**.\n\nThe agent produced changes that broke tests or build. No PR was created.\nCheck `.bosun-monitor/code-quality-striker.md` for details."
34871
34999
  },
34872
35000
  "position": {
34873
35001
  "x": 600,
@@ -34882,7 +35010,7 @@
34882
35010
  "type": "notify.log",
34883
35011
  "label": "Log Failure",
34884
35012
  "config": {
34885
- "message": "Code quality striker run {{_runId}} failed validation — no PR created.",
35013
+ "message": "Code quality striker validation failed — no PR created.",
34886
35014
  "level": "warn"
34887
35015
  },
34888
35016
  "position": {
@@ -37531,8 +37659,7 @@
37531
37659
  "pollStatus": "todo",
37532
37660
  "maxBatchSize": 5,
37533
37661
  "defaultBaseBranch": "main",
37534
- "draftPR": true,
37535
- "notifyChannel": "telegram"
37662
+ "draftPR": true
37536
37663
  },
37537
37664
  "nodes": [
37538
37665
  {
@@ -37560,7 +37687,7 @@
37560
37687
  "command": "node",
37561
37688
  "args": [
37562
37689
  "-e",
37563
- "\n import(\"./kanban-adapter.mjs\")\n .then(k => k.listTasks(undefined, { status: \"todo\" }))\n .then(tasks => {\n const filtered = (tasks || []).filter((task) => {\n const repository = typeof task?.repository === \"string\" ? task.repository.trim() : \"\";\n const workspace = typeof task?.workspace === \"string\" ? task.workspace.trim() : \"\";\n return task && task.status === \"todo\" && !task.draft && repository.length > 0 && workspace.length > 0;\n });\n const batch = filtered.slice(0, parseInt(process.env.MAX_BATCH || \"5\"));\n console.log(JSON.stringify(batch.map(t => ({\n taskId: t.id,\n taskTitle: t.title || t.id,\n branch: t.branch || t.metadata?.branch || null,\n repository: t.repository || null,\n workspace: t.workspace || null,\n }))));\n })\n .catch(e => { console.error(e.message); process.exit(1); });\n "
37690
+ "\n const fs = require(\"node:fs\");\n const path = require(\"node:path\");\n const { pathToFileURL } = require(\"node:url\");\n const cwd = process.cwd();\n const mirrorMarker = (path.sep + \".bosun\" + path.sep + \"workspaces\" + path.sep).toLowerCase();\n let repoRoot = cwd;\n if (cwd.toLowerCase().includes(mirrorMarker)) {\n const sourceRepoRoot = path.resolve(cwd, \"..\", \"..\", \"..\", \"..\");\n if (fs.existsSync(path.join(sourceRepoRoot, \"kanban\", \"kanban-adapter.mjs\"))) repoRoot = sourceRepoRoot;\n }\n const kanbanModuleUrl = pathToFileURL(path.join(repoRoot, \"kanban\", \"kanban-adapter.mjs\")).href;\n import(kanbanModuleUrl)\n .then(k => k.listTasks(undefined, { status: \"todo\" }))\n .then(tasks => {\n const filtered = (tasks || []).filter((task) => {\n const repository = typeof task?.repository === \"string\" ? task.repository.trim() : \"\";\n const workspace = typeof task?.workspace === \"string\" ? task.workspace.trim() : \"\";\n return task && task.status === \"todo\" && !task.draft && repository.length > 0 && workspace.length > 0;\n });\n const batch = filtered.slice(0, parseInt(process.env.MAX_BATCH || \"5\"));\n console.log(JSON.stringify(batch.map(t => ({\n taskId: t.id,\n taskTitle: t.title || t.id,\n branch: t.branch || t.metadata?.branch || null,\n repository: t.repository || null,\n workspace: t.workspace || null,\n }))));\n })\n .catch(e => { console.error(e.message); process.exit(1); });\n "
37564
37691
  ],
37565
37692
  "env": {
37566
37693
  "MAX_BATCH": "{{maxBatchSize}}"
@@ -37741,11 +37868,11 @@
37741
37868
  },
37742
37869
  {
37743
37870
  "id": "notify",
37744
- "type": "notify.telegram",
37871
+ "type": "notify.log",
37745
37872
  "label": "Batch Complete",
37746
37873
  "config": {
37747
- "channel": "{{notifyChannel}}",
37748
- "message": "Task batch PR pipeline complete"
37874
+ "message": "Task batch PR pipeline complete",
37875
+ "level": "info"
37749
37876
  },
37750
37877
  "position": {
37751
37878
  "x": 400,
@@ -37852,7 +37979,7 @@
37852
37979
  "description": "Monitors the task backlog and dispatches multiple tasks in parallel using the Task Lifecycle sub-workflow. Automatically picks up tasks when backlog drops below threshold, fans out execution across available slots, and reports batch results.",
37853
37980
  "category": "task-execution",
37854
37981
  "enabled": true,
37855
- "nodeCount": 7,
37982
+ "nodeCount": 9,
37856
37983
  "trigger": "trigger.task_available",
37857
37984
  "variables": {
37858
37985
  "maxConcurrent": 3,
@@ -37902,7 +38029,7 @@
37902
38029
  "command": "node",
37903
38030
  "args": [
37904
38031
  "-e",
37905
- "\n import(\"./kanban-adapter.mjs\")\n .then(k => k.listTasks(undefined, { status: \"todo\" }))\n .then(tasks => {\n const filtered = (tasks || []).filter((task) => {\n const repository = typeof task?.repository === \"string\" ? task.repository.trim() : \"\";\n const workspace = typeof task?.workspace === \"string\" ? task.workspace.trim() : \"\";\n return task && task.status === \"todo\" && !task.draft && repository.length > 0 && workspace.length > 0;\n });\n const batch = filtered.slice(0, parseInt(process.env.MAX_BATCH || \"10\"));\n console.log(JSON.stringify(batch.map(t => ({\n taskId: t.id,\n taskTitle: t.title || t.id,\n status: t.status,\n branch: t.branch || t.metadata?.branch || null,\n scope: t.scope || t.metadata?.scope || null,\n repository: typeof t?.repository === \"string\" ? t.repository.trim() : null,\n workspace: typeof t?.workspace === \"string\" ? t.workspace.trim() : null,\n }))));\n })\n .catch(e => { console.error(e.message); process.exit(1); });\n "
38032
+ "\n const fs = require(\"node:fs\");\n const path = require(\"node:path\");\n const { pathToFileURL } = require(\"node:url\");\n const cwd = process.cwd();\n const mirrorMarker = (path.sep + \".bosun\" + path.sep + \"workspaces\" + path.sep).toLowerCase();\n let repoRoot = cwd;\n if (cwd.toLowerCase().includes(mirrorMarker)) {\n const sourceRepoRoot = path.resolve(cwd, \"..\", \"..\", \"..\", \"..\");\n if (fs.existsSync(path.join(sourceRepoRoot, \"kanban\", \"kanban-adapter.mjs\"))) repoRoot = sourceRepoRoot;\n }\n const kanbanModuleUrl = pathToFileURL(path.join(repoRoot, \"kanban\", \"kanban-adapter.mjs\")).href;\n import(kanbanModuleUrl)\n .then(k => k.listTasks(undefined, { status: \"todo\" }))\n .then(tasks => {\n const filtered = (tasks || []).filter((task) => {\n const repository = typeof task?.repository === \"string\" ? task.repository.trim() : \"\";\n const workspace = typeof task?.workspace === \"string\" ? task.workspace.trim() : \"\";\n return task && task.status === \"todo\" && !task.draft && repository.length > 0 && workspace.length > 0;\n });\n const batch = filtered.slice(0, parseInt(process.env.MAX_BATCH || \"10\"));\n console.log(JSON.stringify(batch.map(t => ({\n taskId: t.id,\n taskTitle: t.title || t.id,\n status: t.status,\n branch: t.branch || t.metadata?.branch || null,\n scope: t.scope || t.metadata?.scope || null,\n repository: typeof t?.repository === \"string\" ? t.repository.trim() : null,\n workspace: typeof t?.workspace === \"string\" ? t.workspace.trim() : null,\n }))));\n })\n .catch(e => { console.error(e.message); process.exit(1); });\n "
37906
38033
  ],
37907
38034
  "env": {
37908
38035
  "MAX_BATCH": "{{maxBatchSize}}"
@@ -37972,12 +38099,11 @@
37972
38099
  ]
37973
38100
  },
37974
38101
  {
37975
- "id": "notify-complete",
37976
- "type": "notify.telegram",
37977
- "label": "Batch Summary",
38102
+ "id": "has-batch-failures",
38103
+ "type": "condition.expression",
38104
+ "label": "Any Batch Failures?",
37978
38105
  "config": {
37979
- "channel": "{{notifyChannel}}",
37980
- "message": "Task batch completed: {{dispatch-tasks.successCount}}/{{dispatch-tasks.totalItems}} succeeded ({{dispatch-tasks.failCount}} failed)"
38106
+ "expression": "Number($data?.batchResult?.failCount || 0) > 0"
37981
38107
  },
37982
38108
  "position": {
37983
38109
  "x": 400,
@@ -37986,6 +38112,38 @@
37986
38112
  "outputs": [
37987
38113
  "default"
37988
38114
  ]
38115
+ },
38116
+ {
38117
+ "id": "notify-failures",
38118
+ "type": "notify.telegram",
38119
+ "label": "Batch Failure Alert",
38120
+ "config": {
38121
+ "channel": "{{notifyChannel}}",
38122
+ "message": "Task batch needs attention: {{batchResult.failCount}} failed out of {{batchResult.totalItems}} ({{batchResult.successCount}} succeeded)"
38123
+ },
38124
+ "position": {
38125
+ "x": 220,
38126
+ "y": 830
38127
+ },
38128
+ "outputs": [
38129
+ "default"
38130
+ ]
38131
+ },
38132
+ {
38133
+ "id": "log-summary",
38134
+ "type": "notify.log",
38135
+ "label": "Batch Summary",
38136
+ "config": {
38137
+ "message": "Task batch completed: {{batchResult.successCount}}/{{batchResult.totalItems}} succeeded ({{batchResult.failCount}} failed)",
38138
+ "level": "info"
38139
+ },
38140
+ "position": {
38141
+ "x": 580,
38142
+ "y": 830
38143
+ },
38144
+ "outputs": [
38145
+ "default"
38146
+ ]
37989
38147
  }
37990
38148
  ],
37991
38149
  "edges": [
@@ -38021,10 +38179,24 @@
38021
38179
  "sourcePort": "default"
38022
38180
  },
38023
38181
  {
38024
- "id": "record-results->notify-complete",
38182
+ "id": "record-results->has-batch-failures",
38025
38183
  "source": "record-results",
38026
- "target": "notify-complete",
38184
+ "target": "has-batch-failures",
38027
38185
  "sourcePort": "default"
38186
+ },
38187
+ {
38188
+ "id": "has-batch-failures->notify-failures",
38189
+ "source": "has-batch-failures",
38190
+ "target": "notify-failures",
38191
+ "sourcePort": "default",
38192
+ "condition": "$output?.result === true"
38193
+ },
38194
+ {
38195
+ "id": "has-batch-failures->log-summary",
38196
+ "source": "has-batch-failures",
38197
+ "target": "log-summary",
38198
+ "sourcePort": "default",
38199
+ "condition": "$output?.result !== true"
38028
38200
  }
38029
38201
  ],
38030
38202
  "metadata": {
@@ -38047,7 +38219,7 @@
38047
38219
  "description": "Complete task execution pipeline: poll for tasks → claim → worktree → agent dispatch → commit detection → PR creation → status transition. Replaces the monolithic TaskExecutor.executeTask() method with a composable workflow DAG.",
38048
38220
  "category": "task-execution",
38049
38221
  "enabled": true,
38050
- "nodeCount": 50,
38222
+ "nodeCount": 53,
38051
38223
  "trigger": "trigger.task_available",
38052
38224
  "variables": {
38053
38225
  "maxParallel": 3,
@@ -38920,6 +39092,62 @@
38920
39092
  "default"
38921
39093
  ]
38922
39094
  },
39095
+ {
39096
+ "id": "wt-failure-blocking",
39097
+ "type": "condition.expression",
39098
+ "label": "Non-Retryable WT Failure?",
39099
+ "config": {
39100
+ "expression": "$ctx.getNodeOutput('acquire-worktree')?.retryable === false"
39101
+ },
39102
+ "position": {
39103
+ "x": 600,
39104
+ "y": 1220
39105
+ },
39106
+ "outputs": [
39107
+ "yes",
39108
+ "no"
39109
+ ]
39110
+ },
39111
+ {
39112
+ "id": "set-blocked-wt-failed",
39113
+ "type": "action.update_task_status",
39114
+ "label": "Set Blocked (WT Fail)",
39115
+ "config": {
39116
+ "taskId": "{{taskId}}",
39117
+ "status": "blocked",
39118
+ "taskTitle": "{{taskTitle}}"
39119
+ },
39120
+ "position": {
39121
+ "x": 470,
39122
+ "y": 1350
39123
+ },
39124
+ "outputs": [
39125
+ "default"
39126
+ ]
39127
+ },
39128
+ {
39129
+ "id": "annotate-blocked-wt-failed",
39130
+ "type": "action.bosun_function",
39131
+ "label": "Annotate Blocked (WT Fail)",
39132
+ "config": {
39133
+ "function": "tasks.update",
39134
+ "args": {
39135
+ "taskId": "{{taskId}}",
39136
+ "fields": {
39137
+ "cooldownUntil": "{{acquire-worktree.retryAt}}",
39138
+ "blockedReason": "{{acquire-worktree.blockedReason}}",
39139
+ "meta": "{{(() => { const current = ($data.taskMeta && typeof $data.taskMeta === 'object') ? $data.taskMeta : {}; const output = $ctx.getNodeOutput('acquire-worktree') || {}; return { ...current, autoRecovery: { active: true, reason: 'worktree_failure', failureKind: output.failureKind || 'branch_refresh_conflict', retryAt: output.retryAt || null, recoveryDelayMs: output.autoRecoverDelayMs || null, error: output.error || '', recordedAt: output.recordedAt || null }, worktreeFailure: { failureKind: output.failureKind || 'branch_refresh_conflict', retryable: output.retryable !== false, retryAt: output.retryAt || null, blockedReason: output.blockedReason || '', error: output.error || '', recordedAt: output.recordedAt || null } }; })()}}"
39140
+ }
39141
+ }
39142
+ },
39143
+ "position": {
39144
+ "x": 470,
39145
+ "y": 1480
39146
+ },
39147
+ "outputs": [
39148
+ "default"
39149
+ ]
39150
+ },
38923
39151
  {
38924
39152
  "id": "set-todo-wt-failed",
38925
39153
  "type": "action.update_task_status",
@@ -38930,8 +39158,8 @@
38930
39158
  "taskTitle": "{{taskTitle}}"
38931
39159
  },
38932
39160
  "position": {
38933
- "x": 600,
38934
- "y": 1220
39161
+ "x": 730,
39162
+ "y": 1350
38935
39163
  },
38936
39164
  "outputs": [
38937
39165
  "default"
@@ -38946,7 +39174,7 @@
38946
39174
  },
38947
39175
  "position": {
38948
39176
  "x": 600,
38949
- "y": 1350
39177
+ "y": 1480
38950
39178
  },
38951
39179
  "outputs": [
38952
39180
  "default"
@@ -38957,11 +39185,11 @@
38957
39185
  "type": "notify.telegram",
38958
39186
  "label": "Notify WT Failed",
38959
39187
  "config": {
38960
- "message": "⚠️ Worktree failed for \"{{taskTitle}}\" ({{taskId}})"
39188
+ "message": "⚠️ Worktree failed for \"{{taskTitle}}\" ({{taskId}}){{acquire-worktree.recoveryNote}}"
38961
39189
  },
38962
39190
  "position": {
38963
39191
  "x": 600,
38964
- "y": 1480
39192
+ "y": 1740
38965
39193
  },
38966
39194
  "outputs": [
38967
39195
  "default"
@@ -39299,9 +39527,35 @@
39299
39527
  "condition": "$output?.result !== true"
39300
39528
  },
39301
39529
  {
39302
- "id": "release-claim-wt-failed->set-todo-wt-failed",
39530
+ "id": "release-claim-wt-failed->wt-failure-blocking",
39303
39531
  "source": "release-claim-wt-failed",
39532
+ "target": "wt-failure-blocking",
39533
+ "sourcePort": "default"
39534
+ },
39535
+ {
39536
+ "id": "wt-failure-blocking->set-blocked-wt-failed",
39537
+ "source": "wt-failure-blocking",
39538
+ "target": "set-blocked-wt-failed",
39539
+ "sourcePort": "yes",
39540
+ "condition": "$output?.result === true"
39541
+ },
39542
+ {
39543
+ "id": "wt-failure-blocking->set-todo-wt-failed",
39544
+ "source": "wt-failure-blocking",
39304
39545
  "target": "set-todo-wt-failed",
39546
+ "sourcePort": "no",
39547
+ "condition": "$output?.result !== true"
39548
+ },
39549
+ {
39550
+ "id": "set-blocked-wt-failed->annotate-blocked-wt-failed",
39551
+ "source": "set-blocked-wt-failed",
39552
+ "target": "annotate-blocked-wt-failed",
39553
+ "sourcePort": "default"
39554
+ },
39555
+ {
39556
+ "id": "annotate-blocked-wt-failed->release-slot-wt-failed",
39557
+ "source": "annotate-blocked-wt-failed",
39558
+ "target": "release-slot-wt-failed",
39305
39559
  "sourcePort": "default"
39306
39560
  },
39307
39561
  {