@adaptic/maestro 1.1.8 → 1.5.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (47) hide show
  1. package/.claude/commands/init-maestro.md +304 -8
  2. package/README.md +28 -0
  3. package/bin/maestro.mjs +258 -56
  4. package/docs/guides/agents-observe-setup.md +64 -0
  5. package/docs/guides/ccxray-diagnostics.md +65 -0
  6. package/docs/guides/claude-mem-setup.md +79 -0
  7. package/docs/guides/claude-pace-setup.md +56 -0
  8. package/docs/guides/claudraband-sessions.md +98 -0
  9. package/docs/guides/clawteam-swarm.md +116 -0
  10. package/docs/guides/code-review-graph-setup.md +86 -0
  11. package/docs/guides/self-optimization-pattern.md +82 -0
  12. package/docs/guides/slack-setup.md +4 -2
  13. package/docs/guides/twilio-subaccounts-setup.md +223 -0
  14. package/docs/guides/webhook-relay-setup.md +349 -0
  15. package/package.json +2 -1
  16. package/plugins/maestro-skills/plugin.json +16 -0
  17. package/plugins/maestro-skills/skills/agents-observe.md +110 -0
  18. package/plugins/maestro-skills/skills/ccxray-diagnostics.md +91 -0
  19. package/plugins/maestro-skills/skills/claude-pace.md +61 -0
  20. package/plugins/maestro-skills/skills/code-review-graph.md +99 -0
  21. package/scaffold/CLAUDE.md +64 -0
  22. package/scaffold/config/agent.ts.example +2 -1
  23. package/scaffold/config/known-agents.json +35 -0
  24. package/scripts/daemon/classifier.mjs +264 -50
  25. package/scripts/daemon/dispatcher.mjs +109 -5
  26. package/scripts/daemon/launchd-wrapper-generic.sh +96 -0
  27. package/scripts/daemon/launchd-wrapper-slack-events.sh +37 -0
  28. package/scripts/daemon/launchd-wrapper.sh +91 -0
  29. package/scripts/daemon/lib/session-router.mjs +274 -0
  30. package/scripts/daemon/lib/session-router.test.mjs +295 -0
  31. package/scripts/daemon/prompt-builder.mjs +51 -11
  32. package/scripts/daemon/responder.mjs +234 -19
  33. package/scripts/daemon/session-lock.mjs +194 -0
  34. package/scripts/daemon/sophie-daemon.mjs +16 -2
  35. package/scripts/email-signature.html +20 -4
  36. package/scripts/local-triggers/generate-plists.sh +62 -10
  37. package/scripts/poller/imap-client.mjs +4 -2
  38. package/scripts/poller/slack-poller.mjs +104 -52
  39. package/scripts/setup/init-agent.sh +91 -1
  40. package/scripts/setup/install-dev-tools.sh +150 -0
  41. package/scripts/spawn-session.sh +21 -6
  42. package/workflows/continuous/backlog-executor.yaml +141 -0
  43. package/workflows/daily/evening-wrap.yaml +41 -1
  44. package/workflows/daily/morning-brief.yaml +17 -0
  45. package/workflows/event-driven/agent-failure-investigation.yaml +137 -0
  46. package/workflows/event-driven/pr-review.yaml +104 -0
  47. package/workflows/weekly/engineering-health.yaml +154 -0
@@ -111,13 +111,48 @@ PLIST_SCHED
111
111
  PLIST_INTERVAL
112
112
  fi
113
113
 
114
+ # ── EnvironmentVariables block ──────────────────────────────────────────
115
+ # Exports for every spawned process:
116
+ # PATH — homebrew + standard paths so node/python/etc are findable
117
+ # CLAUDE_CODE_TMPDIR — redirect Claude Code per-cwd temp to external SSD if mounted
118
+ # AGENT_ROOT — used by maestro's singleton lock and other helpers
119
+ cat >> "$FILE" << PLIST_ENV
120
+
121
+ <key>EnvironmentVariables</key>
122
+ <dict>
123
+ <key>PATH</key>
124
+ <string>/opt/homebrew/bin:/opt/homebrew/sbin:/usr/local/bin:/usr/bin:/bin:/usr/sbin:/sbin</string>
125
+ <key>HOME</key>
126
+ <string>$HOME</string>
127
+ <key>USER</key>
128
+ <string>$AGENT_USER</string>
129
+ <key>AGENT_ROOT</key>
130
+ <string>$AGENT_DIR</string>
131
+ PLIST_ENV
132
+
133
+ if [ -d "/Volumes/4TB-SSD" ]; then
134
+ cat >> "$FILE" << PLIST_SSD
135
+ <key>CLAUDE_CODE_TMPDIR</key>
136
+ <string>/Volumes/4TB-SSD/maestro/${AGENT_FIRST}/claude-tmp</string>
137
+ PLIST_SSD
138
+ fi
139
+
140
+ cat >> "$FILE" << 'PLIST_ENV_END'
141
+ </dict>
142
+ PLIST_ENV_END
143
+
144
+ # Resolve symlinked log path to a real path so launchd can write to it.
145
+ # If logs/ is a symlink (e.g. when redirected to an external SSD), launchd's
146
+ # StandardErrorPath/StandardOutPath sometimes fail with EX_CONFIG (78).
147
+ LOG_REAL=$(cd -P "$AGENT_DIR/logs/daemon" 2>/dev/null && pwd -P || echo "$AGENT_DIR/logs/daemon")
148
+
114
149
  cat >> "$FILE" << PLIST_FOOTER
115
150
 
116
151
  <key>StandardOutPath</key>
117
- <string>$AGENT_DIR/logs/daemon/launchd-stdout.log</string>
152
+ <string>$LOG_REAL/launchd-stdout.log</string>
118
153
 
119
154
  <key>StandardErrorPath</key>
120
- <string>$AGENT_DIR/logs/daemon/launchd-stderr.log</string>
155
+ <string>$LOG_REAL/launchd-stderr.log</string>
121
156
  </dict>
122
157
  </plist>
123
158
  PLIST_FOOTER
@@ -126,6 +161,15 @@ PLIST_FOOTER
126
161
  }
127
162
 
128
163
  # ── Helper: trigger plist (runs claude with a trigger prompt) ────────────────
164
+ #
165
+ # Routes through scripts/daemon/launchd-wrapper-generic.sh so the trigger's
166
+ # stdout/stderr lands on the SSD (when /Volumes/{name}-SSD is mounted) and
167
+ # CLAUDE_CODE_TMPDIR is set for any Claude Code session it spawns.
168
+ #
169
+ # IMPORTANT: run-trigger.sh expects the trigger NAME (e.g. "meeting-prep"),
170
+ # not the full path to the .md file. The script constructs the full path
171
+ # itself. Passing the full path causes a doubled path bug:
172
+ # /agent/schedules/triggers//agent/schedules/triggers/meeting-prep.md.md
129
173
 
130
174
  generate_trigger_plist() {
131
175
  local TRIGGER_NAME="$1"
@@ -133,11 +177,11 @@ generate_trigger_plist() {
133
177
  local INTERVAL="$3"
134
178
 
135
179
  local LABEL="ai.adaptic.${AGENT_FIRST}-${TRIGGER_NAME}"
136
- local TRIGGER_FILE="$AGENT_DIR/schedules/triggers/${TRIGGER_NAME}.md"
137
180
  local RUN_TRIGGER="$AGENT_DIR/scripts/local-triggers/run-trigger.sh"
181
+ local WRAPPER="$AGENT_DIR/scripts/daemon/launchd-wrapper-generic.sh"
138
182
 
139
183
  generate_plist "$LABEL" \
140
- "$RUN_TRIGGER|$TRIGGER_FILE" \
184
+ "$WRAPPER|$RUN_TRIGGER|$TRIGGER_NAME" \
141
185
  "$SCHEDULE" \
142
186
  "$INTERVAL" \
143
187
  ""
@@ -148,15 +192,23 @@ generate_trigger_plist() {
148
192
  echo ""
149
193
  echo "Generating launchd plist files..."
150
194
 
151
- # 1. Main daemon (KeepAlive)
195
+ # 1. Main daemon (KeepAlive) — uses wrapper to bootstrap env (HOME, PATH, etc).
196
+ # Wrapper is at scripts/daemon/launchd-wrapper.sh. This avoids passing
197
+ # EnvironmentVariables in the plist, which has been observed to cause
198
+ # EX_CONFIG (78) failures on macOS when the agent's logs/state dirs are
199
+ # symlinked to an external SSD.
152
200
  generate_plist "ai.adaptic.${AGENT_FIRST}-daemon" \
153
- "${NODE_PATH}|${AGENT_DIR}/scripts/daemon/maestro-daemon.mjs" \
201
+ "${AGENT_DIR}/scripts/daemon/launchd-wrapper.sh" \
154
202
  "" "" "true"
155
203
 
156
- # 2. Slack events server (KeepAlive) real-time Slack message handling
157
- generate_plist "ai.adaptic.${AGENT_FIRST}-slack-events" \
158
- "${NODE_PATH}|${AGENT_DIR}/scripts/slack-events-server.mjs" \
159
- "" "" "true"
204
+ # 2. Slack events relay polling job (every 5 sec).
205
+ # The local Mac mini polls the Railway-hosted webhook relay for buffered
206
+ # Slack events. This replaces the older local slack-events-server.mjs which
207
+ # needed an inbound tunnel from the Mac. See docs/guides/webhook-relay-setup.md.
208
+ # Routed through the generic wrapper so its output goes to the SSD.
209
+ generate_plist "ai.adaptic.${AGENT_FIRST}-poll-relay" \
210
+ "${AGENT_DIR}/scripts/daemon/launchd-wrapper-generic.sh|${AGENT_DIR}/scripts/poll-slack-events.sh" \
211
+ "" "5" ""
160
212
 
161
213
  # 3. Inbox processor (every 5 minutes)
162
214
  generate_trigger_plist "inbox-processor" "" "300"
@@ -157,6 +157,7 @@ export async function pollImapInbox({
157
157
  const now = new Date().toISOString();
158
158
  let newCount = 0;
159
159
  let skipped = 0;
160
+ let unseenCount = 0;
160
161
  let lastMid = cursor.last_message_id;
161
162
  let totalProcessed = cursor.messages_processed;
162
163
 
@@ -185,7 +186,8 @@ export async function pollImapInbox({
185
186
  return { newCount: 0, errors };
186
187
  }
187
188
 
188
- console.log(`${logPrefix} Found ${unseen.length} unseen messages`);
189
+ unseenCount = unseen.length;
190
+ console.log(`${logPrefix} Found ${unseenCount} unseen messages`);
189
191
 
190
192
  // Rate limit: process most recent N
191
193
  const toFetch = unseen.length > MAX_EMAILS_PER_CYCLE
@@ -273,7 +275,7 @@ export async function pollImapInbox({
273
275
  await client.logout();
274
276
 
275
277
  console.log(
276
- `${logPrefix} Poll complete: ${newCount} new, ${skipped} skipped, ${unseen.length} unseen total`,
278
+ `${logPrefix} Poll complete: ${newCount} new, ${skipped} skipped, ${unseenCount} unseen total`,
277
279
  );
278
280
  } catch (err) {
279
281
  errors.push(`${logPrefix} IMAP error: ${err.message}`);
@@ -82,25 +82,42 @@ async function getMonitoredChannels() {
82
82
  const CEO_USER_ID = "U097N5R0M7U";
83
83
  const SOPHIE_USER_ID = "U099N1JFPRQ";
84
84
 
85
- // Rate-limit-aware delay: space requests ~1500ms apart to stay under Slack Tier 3 limits
86
- // (~50 req/min for conversations.history). With ~25 calls per cycle (half channels
87
- // rotated), 1500ms spacing = ~38s total, well within the 60s poll interval.
88
- // Previous: 800ms caused persistent rate limiting with 11 channels + threads.
89
- const INTER_REQUEST_DELAY_MS = 1500;
85
+ // Rate-limit-aware delay with random jitter to reduce Slack API rate limiting.
86
+ // Base delay 400ms + random 0-600ms jitter = 400-1000ms between requests.
87
+ // With broader-fetch elimination and DM/thread rotation, total calls per cycle
88
+ // are ~20-30 (down from 90+), so this timing keeps us well under both the
89
+ // Slack Tier 3 limit (~50 req/min) and the 50s cycle budget.
90
+ // Previous: fixed 1500ms with redundant broader fetches caused 25-30 min cycles.
91
+ const INTER_REQUEST_BASE_MS = 400;
92
+ const INTER_REQUEST_JITTER_MS = 600;
90
93
  const MAX_RETRIES = 3;
91
94
 
95
+ // Cycle time budget — abort non-critical work if approaching this limit.
96
+ // Launchd fires every 60s; we must finish well before the next invocation.
97
+ const CYCLE_TIME_BUDGET_MS = 55_000;
98
+ let cycleStartTime = 0;
99
+
92
100
  function sleep(ms) {
93
101
  return new Promise((resolve) => setTimeout(resolve, ms));
94
102
  }
95
103
 
96
104
  let lastRequestTime = 0;
97
105
 
106
+ /**
107
+ * Check whether the cycle time budget has been exceeded.
108
+ * Returns true if we should skip non-critical work to finish on time.
109
+ */
110
+ function isCycleBudgetExceeded() {
111
+ return cycleStartTime > 0 && (Date.now() - cycleStartTime) > CYCLE_TIME_BUDGET_MS;
112
+ }
113
+
98
114
  async function slackApi(method, params = {}) {
99
- // Enforce minimum spacing between requests
115
+ // Enforce minimum spacing between requests with random jitter
100
116
  const now = Date.now();
117
+ const jitteredDelay = INTER_REQUEST_BASE_MS + Math.floor(Math.random() * INTER_REQUEST_JITTER_MS);
101
118
  const elapsed = now - lastRequestTime;
102
- if (elapsed < INTER_REQUEST_DELAY_MS) {
103
- await sleep(INTER_REQUEST_DELAY_MS - elapsed);
119
+ if (elapsed < jitteredDelay) {
120
+ await sleep(jitteredDelay - elapsed);
104
121
  }
105
122
  lastRequestTime = Date.now();
106
123
 
@@ -219,6 +236,8 @@ function getChannelsForCycle(allChannels) {
219
236
  }
220
237
 
221
238
  export async function pollSlack() {
239
+ cycleStartTime = Date.now();
240
+
222
241
  if (!SLACK_TOKEN) {
223
242
  return { items: [], errors: ["SLACK_TOKEN not set"] };
224
243
  }
@@ -243,6 +262,12 @@ export async function pollSlack() {
243
262
  }
244
263
 
245
264
  for (const channel of channelsThisCycle) {
265
+ // Check cycle time budget — skip remaining channels if running long
266
+ if (isCycleBudgetExceeded()) {
267
+ errors.push("Cycle time budget exceeded — skipping remaining channels");
268
+ break;
269
+ }
270
+
246
271
  try {
247
272
  const result = await slackApi("conversations.history", {
248
273
  channel: channel.id,
@@ -256,31 +281,14 @@ export async function pollSlack() {
256
281
  }
257
282
 
258
283
  // Track channel threads with replies so the thread scanner picks them up.
259
- // IMPORTANT: conversations.history only returns messages newer than `oldest`,
260
- // but threads with old parent messages can have NEW replies. We must also
261
- // check slightly older messages to discover threads with recent activity.
284
+ // Thread discovery relies on persisted activeThreads state (which survives
285
+ // across cycles) plus any threaded messages found in the current history
286
+ // response. The previous "broader" second fetch (without oldest filter)
287
+ // has been removed — it doubled API calls per channel and was the primary
288
+ // cause of rate-limiting. Active threads with old parent messages are
289
+ // already tracked in activeThreads from prior cycles.
262
290
  const channelThreads = new Set(activeThreads[channel.id] || []);
263
291
 
264
- // Second pass: fetch recent messages WITHOUT oldest filter to find threads
265
- // with new replies. Parent messages older than cursor are invisible to the
266
- // first pass, but their threads can have new replies we need to scan.
267
- // Always run this — it's the only way to discover active threads in channels.
268
- try {
269
- const broader = await slackApi("conversations.history", {
270
- channel: channel.id,
271
- limit: 15,
272
- });
273
- if (broader.ok) {
274
- for (const m of broader.messages || []) {
275
- if (m.reply_count > 0 && m.ts) {
276
- // Track ALL threads with replies — the thread scanner will
277
- // filter by cursor timestamp when fetching individual replies
278
- channelThreads.add(m.ts);
279
- }
280
- }
281
- }
282
- } catch { /* non-critical — thread discovery is best-effort */ }
283
-
284
292
  for (const msg of result.messages || []) {
285
293
  // Register any message with replies as an active thread (even Sophie's)
286
294
  if (msg.reply_count > 0 && msg.ts) {
@@ -357,14 +365,33 @@ export async function pollSlack() {
357
365
  }
358
366
  }
359
367
 
360
- // Also check DMs
368
+ // Also check DMs — with rotation.
369
+ // CEO DMs are polled every cycle (critical). Other DMs rotate in halves
370
+ // to reduce API calls. DM thread discovery uses persisted activeThreads
371
+ // state instead of a redundant broader fetch (same fix as channels above).
372
+ // Cache CEO DM channel IDs for thread scanning priority later.
373
+ const ceoDMChannelIds = new Set();
361
374
  try {
362
375
  const convos = await slackApi("conversations.list", {
363
376
  types: "im",
364
377
  limit: 50,
365
378
  });
366
379
  if (convos.ok) {
367
- for (const im of convos.channels || []) {
380
+ const allDMs = convos.channels || [];
381
+ // Separate CEO DMs (always poll) from other DMs (rotate in halves)
382
+ const ceoDMs = allDMs.filter((im) => im.user === CEO_USER_ID);
383
+ for (const dm of ceoDMs) ceoDMChannelIds.add(dm.id);
384
+ const otherDMs = allDMs.filter((im) => im.user !== CEO_USER_ID);
385
+ const rotatedOtherDMs = otherDMs.filter((_, i) => i % 2 === pollCycleCount % 2);
386
+ const dmsThisCycle = [...ceoDMs, ...rotatedOtherDMs];
387
+
388
+ for (const im of dmsThisCycle) {
389
+ // Check cycle time budget — skip remaining DMs if running long
390
+ if (isCycleBudgetExceeded()) {
391
+ errors.push("Cycle time budget exceeded — skipping remaining DMs");
392
+ break;
393
+ }
394
+
368
395
  const history = await slackApi("conversations.history", {
369
396
  channel: im.id,
370
397
  oldest,
@@ -372,26 +399,10 @@ export async function pollSlack() {
372
399
  });
373
400
  if (!history.ok) continue;
374
401
 
375
- // Track threads discovered from DM history messages with replies
402
+ // Track threads discovered from DM history uses persisted state
403
+ // from prior cycles instead of a redundant broader API call.
376
404
  const channelThreads = new Set(activeThreads[im.id] || []);
377
405
 
378
- // Broader thread discovery for DMs — same pattern as channels.
379
- // Fetch recent messages WITHOUT oldest filter to find DM threads
380
- // whose parent message predates the cursor but have new replies.
381
- try {
382
- const broader = await slackApi("conversations.history", {
383
- channel: im.id,
384
- limit: 10,
385
- });
386
- if (broader.ok) {
387
- for (const m of broader.messages || []) {
388
- if (m.reply_count > 0 && m.ts) {
389
- channelThreads.add(m.ts);
390
- }
391
- }
392
- }
393
- } catch { /* non-critical — DM thread discovery is best-effort */ }
394
-
395
406
  for (const msg of history.messages || []) {
396
407
  // Register any message with replies as an active thread
397
408
  if (msg.reply_count > 0 && msg.ts) {
@@ -474,6 +485,11 @@ export async function pollSlack() {
474
485
  // This catches thread replies that conversations.history misses.
475
486
  // Use a 30-minute lookback window instead of the exact cursor to avoid
476
487
  // missing replies that arrived between poll cycles or during processing.
488
+ //
489
+ // Thread scanning rotation: threads in high-priority channels (CEO DMs,
490
+ // critical/high channels) are scanned every cycle. Threads in normal
491
+ // channels rotate in halves to reduce API calls. If the cycle time
492
+ // budget is exceeded, remaining thread scans are skipped.
477
493
  const THREAD_LOOKBACK_MS = 30 * 60 * 1000;
478
494
  const threadOldest = String((Date.now() - THREAD_LOOKBACK_MS) / 1000);
479
495
  const threadOldestTs = parseFloat(threadOldest);
@@ -490,9 +506,37 @@ export async function pollSlack() {
490
506
  existingInboxFiles = readdirSync(SLACK_INBOX);
491
507
  } catch { /* inbox dir may not exist yet */ }
492
508
 
493
- for (const [channelId, threadTsList] of Object.entries(activeThreads)) {
509
+ // Determine which channels are high-priority for thread scanning.
510
+ // ceoDMChannelIds was populated during DM polling above — no extra API call needed.
511
+ const highPriorityChannelIds = new Set(
512
+ allChannels
513
+ .filter((c) => c.priority === "critical" || c.priority === "high")
514
+ .map((c) => c.id),
515
+ );
516
+
517
+ const threadEntries = Object.entries(activeThreads);
518
+ // Sort: high-priority channels first, then rotate others
519
+ const priorityThreadEntries = threadEntries.filter(
520
+ ([chId]) => highPriorityChannelIds.has(chId) || ceoDMChannelIds.has(chId),
521
+ );
522
+ const normalThreadEntries = threadEntries.filter(
523
+ ([chId]) => !highPriorityChannelIds.has(chId) && !ceoDMChannelIds.has(chId),
524
+ );
525
+ // Rotate normal thread entries in halves
526
+ const rotatedNormalThreadEntries = normalThreadEntries.filter(
527
+ (_, i) => i % 2 === pollCycleCount % 2,
528
+ );
529
+ const threadEntriesToScan = [...priorityThreadEntries, ...rotatedNormalThreadEntries];
530
+
531
+ for (const [channelId, threadTsList] of threadEntriesToScan) {
494
532
  if (!threadTsList || threadTsList.length === 0) continue;
495
533
 
534
+ // Check cycle time budget before scanning this channel's threads
535
+ if (isCycleBudgetExceeded()) {
536
+ errors.push("Cycle time budget exceeded — skipping remaining thread scans");
537
+ break;
538
+ }
539
+
496
540
  for (const threadTs of threadTsList) {
497
541
  try {
498
542
  const repliesResult = await slackApi("conversations.replies", {
@@ -727,5 +771,13 @@ export async function pollSlack() {
727
771
  }
728
772
 
729
773
  writeCursor("slack", new Date().toISOString());
774
+
775
+ const cycleDurationMs = Date.now() - cycleStartTime;
776
+ const cycleDurationSec = (cycleDurationMs / 1000).toFixed(1);
777
+ console.log(
778
+ `[slack-poller] Cycle ${pollCycleCount} completed in ${cycleDurationSec}s ` +
779
+ `(${channelsThisCycle.length} channels, ${items.length} items, ${errors.length} errors)`,
780
+ );
781
+
730
782
  return { items, errors };
731
783
  }
@@ -109,6 +109,94 @@ fi
109
109
 
110
110
  echo ""
111
111
 
112
+ # ---------------------------------------------------------------------------
113
+ # Step 1b: Claude-Mem (persistent session memory plugin)
114
+ # ---------------------------------------------------------------------------
115
+
116
+ log "Setting up Claude-Mem (persistent session memory)..."
117
+
118
+ if command -v claude &>/dev/null; then
119
+ # Install via Claude Code plugin system
120
+ if npx claude-mem install --non-interactive 2>/dev/null; then
121
+ ok "Claude-Mem installed and configured"
122
+ else
123
+ warn "Claude-Mem auto-install failed — install manually: npx claude-mem install"
124
+ fi
125
+ else
126
+ warn "Claude CLI required for Claude-Mem — install Claude CLI first, then run: npx claude-mem install"
127
+ fi
128
+
129
+ echo ""
130
+
131
+ # ---------------------------------------------------------------------------
132
+ # Step 1c: Claude-Pace (real-time rate limit tracker)
133
+ # ---------------------------------------------------------------------------
134
+
135
+ log "Setting up Claude-Pace (rate limit tracker)..."
136
+
137
+ if command -v claude &>/dev/null; then
138
+ # Install via Claude Code plugin system
139
+ if claude plugin marketplace add Astro-Han/claude-pace 2>/dev/null && \
140
+ claude plugin install claude-pace 2>/dev/null; then
141
+ ok "Claude-Pace installed"
142
+ else
143
+ warn "Claude-Pace auto-install failed — install manually:"
144
+ warn " claude plugin marketplace add Astro-Han/claude-pace"
145
+ warn " claude plugin install claude-pace"
146
+ fi
147
+ else
148
+ warn "Claude CLI required for Claude-Pace — install Claude CLI first"
149
+ fi
150
+
151
+ echo ""
152
+
153
+ # ---------------------------------------------------------------------------
154
+ # Step 1d: Claudraband (persistent sessions + daemon mode) [optional]
155
+ # ---------------------------------------------------------------------------
156
+
157
+ log "Checking claudraband availability (persistent session backend)..."
158
+
159
+ if npx @halfwhey/claudraband --version &>/dev/null 2>&1; then
160
+ ok "Claudraband available (npx @halfwhey/claudraband)"
161
+ else
162
+ warn "Claudraband not cached — will be fetched on first use via npx"
163
+ warn " Pre-cache: npx @halfwhey/claudraband --version"
164
+ fi
165
+
166
+ echo ""
167
+
168
+ # ---------------------------------------------------------------------------
169
+ # Step 1e: ClawTeam swarm orchestrator [optional]
170
+ # ---------------------------------------------------------------------------
171
+
172
+ if [ "${MAESTRO_ENABLE_SWARM:-0}" = "1" ]; then
173
+ log "Setting up ClawTeam swarm orchestrator..."
174
+
175
+ CLAWTEAM_DIR="$HOME/ClawTeam-OpenClaw"
176
+ if [ -d "$CLAWTEAM_DIR" ]; then
177
+ ok "ClawTeam already cloned at $CLAWTEAM_DIR"
178
+ (cd "$CLAWTEAM_DIR" && git pull --ff-only 2>/dev/null) && ok "ClawTeam updated" || warn "ClawTeam update failed — check manually"
179
+ else
180
+ if git clone https://github.com/win4r/ClawTeam-OpenClaw.git "$CLAWTEAM_DIR" 2>/dev/null; then
181
+ ok "ClawTeam cloned to $CLAWTEAM_DIR"
182
+ else
183
+ warn "ClawTeam clone failed — install manually: git clone https://github.com/win4r/ClawTeam-OpenClaw.git ~/ClawTeam-OpenClaw"
184
+ fi
185
+ fi
186
+
187
+ # Ensure tmux is available (required by ClawTeam)
188
+ if ! command -v tmux &>/dev/null; then
189
+ warn "tmux not found — ClawTeam requires tmux. Install via: brew install tmux"
190
+ else
191
+ ok "tmux $(tmux -V 2>/dev/null)"
192
+ fi
193
+
194
+ echo ""
195
+ else
196
+ log "Skipping ClawTeam (set MAESTRO_ENABLE_SWARM=1 to enable)"
197
+ echo ""
198
+ fi
199
+
112
200
  # ---------------------------------------------------------------------------
113
201
  # Step 2: npm install
114
202
  # ---------------------------------------------------------------------------
@@ -351,7 +439,9 @@ cat > "$CLAUDE_SETTINGS" << 'SETTINGS_EOF'
351
439
  "mcp-server-dev@claude-plugins-official": true,
352
440
  "zapier@claude-plugins-official": true,
353
441
  "explanatory-output-style@claude-plugins-official": true,
354
- "learning-output-style@claude-plugins-official": true
442
+ "learning-output-style@claude-plugins-official": true,
443
+ "claude-mem": true,
444
+ "claude-pace": true
355
445
  },
356
446
  "effortLevel": "high",
357
447
  "skipDangerousModePermissionPrompt": true
@@ -0,0 +1,150 @@
1
+ #!/bin/bash
2
+ # install-dev-tools.sh — Install and configure approved dev tooling for Maestro agents
3
+ #
4
+ # Approved tools (audited 2026-04-18 from #dev-tooling channel):
5
+ # - claude-pace — Rate limit status line tracker (Bash plugin)
6
+ # - agents-observe — Multi-agent observability dashboard (CC plugin)
7
+ # - ccxray — Claude Code token/cost observability proxy
8
+ # - ClawTeam — Git worktree swarm orchestrator (CLI)
9
+ # - code-review-graph — Tree-sitter codebase knowledge graph (MCP)
10
+ #
11
+ # Skipped:
12
+ # - Superset — GUI-only, Elastic License v2, not automatable
13
+ # - claudraband — Evaluate later; overlaps with maestro session mgmt
14
+ #
15
+ # Usage: ./scripts/setup/install-dev-tools.sh [--all | --tool <name>]
16
+ # Examples:
17
+ # ./scripts/setup/install-dev-tools.sh --all
18
+ # ./scripts/setup/install-dev-tools.sh --tool claude-pace
19
+ # ./scripts/setup/install-dev-tools.sh --tool ccxray
20
+
21
+ set -e
22
+
23
+ SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)"
24
+ AGENT_DIR="$(cd "$SCRIPT_DIR/../.." && pwd)"
25
+
26
+ # ── Helpers ──────────────────────────────────────────────────────────────────
27
+
28
+ info() { echo " [INFO] $1"; }
29
+ ok() { echo " [OK] $1"; }
30
+ skip() { echo " [SKIP] $1"; }
31
+ fail() { echo " [FAIL] $1" >&2; }
32
+
33
+ # ── Tool installers ─────────────────────────────────────────────────────────
34
+
35
+ install_claude_pace() {
36
+ info "Installing claude-pace (rate limit status line tracker)..."
37
+ if command -v claude >/dev/null 2>&1; then
38
+ # Install as Claude Code plugin
39
+ claude plugin install claude-pace 2>/dev/null && ok "claude-pace plugin installed" || {
40
+ info "Plugin install failed, trying npx setup..."
41
+ npx claude-pace setup 2>/dev/null && ok "claude-pace configured via npx" || skip "claude-pace: manual setup needed — see https://github.com/Astro-Han/claude-pace"
42
+ }
43
+ else
44
+ skip "claude-pace: Claude Code CLI not found"
45
+ fi
46
+ }
47
+
48
+ install_agents_observe() {
49
+ info "Installing agents-observe (multi-agent observability dashboard)..."
50
+ if command -v claude >/dev/null 2>&1; then
51
+ claude plugin install agents-observe 2>/dev/null && ok "agents-observe plugin installed" || {
52
+ info "Plugin install failed, trying npm global..."
53
+ npm install -g agents-observe 2>/dev/null && ok "agents-observe installed globally" || skip "agents-observe: manual setup needed — see https://github.com/simple10/agents-observe"
54
+ }
55
+ else
56
+ skip "agents-observe: Claude Code CLI not found"
57
+ fi
58
+ }
59
+
60
+ install_ccxray() {
61
+ info "Installing ccxray (Claude Code token/cost observability)..."
62
+ # ccxray is zero-install via npx — just verify it's accessible
63
+ if npx ccxray --version >/dev/null 2>&1; then
64
+ ok "ccxray available via npx"
65
+ else
66
+ info "ccxray will be available via: npx ccxray claude"
67
+ ok "ccxray: zero-install, run with 'npx ccxray claude' when needed"
68
+ fi
69
+
70
+ # Create a convenience wrapper
71
+ cat > "$AGENT_DIR/scripts/ccxray-session.sh" << 'WRAPPER'
72
+ #!/bin/bash
73
+ # Launch Claude Code through ccxray observability proxy
74
+ # Usage: ./scripts/ccxray-session.sh [claude args...]
75
+ exec npx ccxray claude "$@"
76
+ WRAPPER
77
+ chmod +x "$AGENT_DIR/scripts/ccxray-session.sh"
78
+ ok "Created scripts/ccxray-session.sh convenience wrapper"
79
+ }
80
+
81
+ install_clawteam() {
82
+ info "Installing ClawTeam (git worktree swarm orchestrator)..."
83
+ if command -v pip3 >/dev/null 2>&1; then
84
+ pip3 install clawteam 2>/dev/null && ok "ClawTeam installed via pip" || {
85
+ info "pip install failed, trying git clone..."
86
+ if [ ! -d "$HOME/.clawteam" ]; then
87
+ git clone https://github.com/win4r/ClawTeam-OpenClaw.git "$HOME/.clawteam" 2>/dev/null && ok "ClawTeam cloned to ~/.clawteam" || skip "ClawTeam: manual setup needed — see https://github.com/win4r/ClawTeam-OpenClaw"
88
+ else
89
+ ok "ClawTeam already present at ~/.clawteam"
90
+ fi
91
+ }
92
+ else
93
+ skip "ClawTeam: pip3 not found"
94
+ fi
95
+ }
96
+
97
+ install_code_review_graph() {
98
+ info "Installing code-review-graph (Tree-sitter codebase knowledge graph)..."
99
+ if npx code-review-graph --version >/dev/null 2>&1; then
100
+ ok "code-review-graph available via npx"
101
+ else
102
+ npm install -g code-review-graph 2>/dev/null && ok "code-review-graph installed globally" || {
103
+ info "code-review-graph: available via npx when needed"
104
+ ok "Run with: npx code-review-graph"
105
+ }
106
+ fi
107
+ }
108
+
109
+ # ── Main ─────────────────────────────────────────────────────────────────────
110
+
111
+ echo "=========================================="
112
+ echo " Maestro Dev Tooling Installer"
113
+ echo " Agent dir: $AGENT_DIR"
114
+ echo "=========================================="
115
+ echo ""
116
+
117
+ TOOL="${2:-}"
118
+ case "${1:-}" in
119
+ --all)
120
+ install_claude_pace
121
+ install_agents_observe
122
+ install_ccxray
123
+ install_clawteam
124
+ install_code_review_graph
125
+ ;;
126
+ --tool)
127
+ case "$TOOL" in
128
+ claude-pace) install_claude_pace ;;
129
+ agents-observe) install_agents_observe ;;
130
+ ccxray) install_ccxray ;;
131
+ clawteam) install_clawteam ;;
132
+ code-review-graph) install_code_review_graph ;;
133
+ *) echo "Unknown tool: $TOOL"; echo "Available: claude-pace, agents-observe, ccxray, clawteam, code-review-graph"; exit 1 ;;
134
+ esac
135
+ ;;
136
+ *)
137
+ echo "Usage: $0 [--all | --tool <name>]"
138
+ echo ""
139
+ echo "Available tools:"
140
+ echo " claude-pace — Rate limit status line tracker"
141
+ echo " agents-observe — Multi-agent observability dashboard"
142
+ echo " ccxray — Token/cost observability proxy"
143
+ echo " clawteam — Git worktree swarm orchestrator"
144
+ echo " code-review-graph — Tree-sitter codebase knowledge graph"
145
+ exit 0
146
+ ;;
147
+ esac
148
+
149
+ echo ""
150
+ echo "Done. See docs/guides/dev-tooling.md for usage details."
@@ -67,16 +67,31 @@ YAML
67
67
  echo "[$TIMESTAMP] Spawning session $SESSION_ID (budget: ${TIME_BUDGET}s)" | tee -a "$LOG_FILE"
68
68
 
69
69
  # Spawn Claude Code session in background with timeout
70
- (
71
- timeout "$TIME_BUDGET" claude -p "$TASK_CONTENT
70
+ # Supports two backends:
71
+ # MAESTRO_SESSION_BACKEND=claude (default) direct claude -p
72
+ # MAESTRO_SESSION_BACKEND=claudraband — persistent sessions via claudraband daemon
73
+ SESSION_BACKEND="${MAESTRO_SESSION_BACKEND:-claude}"
74
+
75
+ SESSION_PROMPT="$TASK_CONTENT
72
76
 
73
- IMPORTANT: You are a sub-session spawned by Sophie Nguyen, Chief of Staff at Adaptic.ai.
77
+ IMPORTANT: You are a sub-session spawned for an Adaptic AI agent.
74
78
  Write your output to: $SESSION_DIR/output.md
75
79
  When complete, write 'DONE' to: $SESSION_DIR/complete
76
80
  Working directory: $SOPHIE_AI_DIR
77
- Do NOT send any communications. Only produce the requested output." \
78
- --output-format text \
79
- >> "$LOG_FILE" 2>&1
81
+ Do NOT send any communications. Only produce the requested output."
82
+
83
+ (
84
+ if [ "$SESSION_BACKEND" = "claudraband" ]; then
85
+ # Use claudraband for persistent, resumable sessions
86
+ timeout "$TIME_BUDGET" npx @halfwhey/claudraband "$SESSION_PROMPT" \
87
+ --output-format text \
88
+ >> "$LOG_FILE" 2>&1
89
+ else
90
+ # Default: direct claude -p
91
+ timeout "$TIME_BUDGET" claude -p "$SESSION_PROMPT" \
92
+ --output-format text \
93
+ >> "$LOG_FILE" 2>&1
94
+ fi
80
95
 
81
96
  EXIT_CODE=$?
82
97