opencode-swarm-plugin 0.17.1 → 0.19.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/plugin.js CHANGED
@@ -12807,9 +12807,24 @@ var init_events = __esm(() => {
12807
12807
  });
12808
12808
 
12809
12809
  // src/streams/store.ts
12810
+ function parseTimestamp(timestamp) {
12811
+ const ts = parseInt(timestamp, 10);
12812
+ if (Number.isNaN(ts)) {
12813
+ throw new Error(`[SwarmMail] Invalid timestamp: ${timestamp}`);
12814
+ }
12815
+ if (ts > Number.MAX_SAFE_INTEGER) {
12816
+ console.warn(`[SwarmMail] Timestamp ${timestamp} exceeds MAX_SAFE_INTEGER (year 2286+), precision may be lost`);
12817
+ }
12818
+ return ts;
12819
+ }
12810
12820
  async function appendEvent(event, projectPath) {
12811
12821
  const db = await getDatabase(projectPath);
12812
12822
  const { type, project_key, timestamp, ...rest } = event;
12823
+ console.log("[SwarmMail] Appending event", {
12824
+ type,
12825
+ projectKey: project_key,
12826
+ timestamp
12827
+ });
12813
12828
  const result = await db.query(`INSERT INTO events (type, project_key, timestamp, data)
12814
12829
  VALUES ($1, $2, $3, $4)
12815
12830
  RETURNING id, sequence`, [type, project_key, timestamp, JSON.stringify(rest)]);
@@ -12818,86 +12833,109 @@ async function appendEvent(event, projectPath) {
12818
12833
  throw new Error("Failed to insert event - no row returned");
12819
12834
  }
12820
12835
  const { id, sequence } = row;
12836
+ console.log("[SwarmMail] Event appended", {
12837
+ type,
12838
+ id,
12839
+ sequence,
12840
+ projectKey: project_key
12841
+ });
12842
+ console.debug("[SwarmMail] Updating materialized views", { type, id });
12821
12843
  await updateMaterializedViews(db, { ...event, id, sequence });
12822
12844
  return { ...event, id, sequence };
12823
12845
  }
12824
12846
  async function appendEvents(events, projectPath) {
12825
- const db = await getDatabase(projectPath);
12826
- const results = [];
12827
- await db.exec("BEGIN");
12828
- try {
12829
- for (const event of events) {
12830
- const { type, project_key, timestamp, ...rest } = event;
12831
- const result = await db.query(`INSERT INTO events (type, project_key, timestamp, data)
12832
- VALUES ($1, $2, $3, $4)
12833
- RETURNING id, sequence`, [type, project_key, timestamp, JSON.stringify(rest)]);
12834
- const row = result.rows[0];
12835
- if (!row) {
12836
- throw new Error("Failed to insert event - no row returned");
12837
- }
12838
- const { id, sequence } = row;
12839
- const enrichedEvent = { ...event, id, sequence };
12840
- await updateMaterializedViews(db, enrichedEvent);
12841
- results.push(enrichedEvent);
12842
- }
12843
- await db.exec("COMMIT");
12844
- } catch (error45) {
12845
- await db.exec("ROLLBACK");
12846
- throw error45;
12847
- }
12848
- return results;
12847
+ return withTiming("appendEvents", async () => {
12848
+ const db = await getDatabase(projectPath);
12849
+ const results = [];
12850
+ await db.exec("BEGIN");
12851
+ try {
12852
+ for (const event of events) {
12853
+ const { type, project_key, timestamp, ...rest } = event;
12854
+ const result = await db.query(`INSERT INTO events (type, project_key, timestamp, data)
12855
+ VALUES ($1, $2, $3, $4)
12856
+ RETURNING id, sequence`, [type, project_key, timestamp, JSON.stringify(rest)]);
12857
+ const row = result.rows[0];
12858
+ if (!row) {
12859
+ throw new Error("Failed to insert event - no row returned");
12860
+ }
12861
+ const { id, sequence } = row;
12862
+ const enrichedEvent = { ...event, id, sequence };
12863
+ await updateMaterializedViews(db, enrichedEvent);
12864
+ results.push(enrichedEvent);
12865
+ }
12866
+ await db.exec("COMMIT");
12867
+ } catch (e) {
12868
+ let rollbackError = null;
12869
+ try {
12870
+ await db.exec("ROLLBACK");
12871
+ } catch (rbErr) {
12872
+ rollbackError = rbErr;
12873
+ console.error("[SwarmMail] ROLLBACK failed:", rbErr);
12874
+ }
12875
+ if (rollbackError) {
12876
+ const compositeError = new Error(`Transaction failed: ${e instanceof Error ? e.message : String(e)}. ` + `ROLLBACK also failed: ${rollbackError instanceof Error ? rollbackError.message : String(rollbackError)}. ` + `Database may be in inconsistent state.`);
12877
+ compositeError.originalError = e;
12878
+ compositeError.rollbackError = rollbackError;
12879
+ throw compositeError;
12880
+ }
12881
+ throw e;
12882
+ }
12883
+ return results;
12884
+ });
12849
12885
  }
12850
12886
  async function readEvents(options2 = {}, projectPath) {
12851
- const db = await getDatabase(projectPath);
12852
- const conditions = [];
12853
- const params = [];
12854
- let paramIndex = 1;
12855
- if (options2.projectKey) {
12856
- conditions.push(`project_key = $${paramIndex++}`);
12857
- params.push(options2.projectKey);
12858
- }
12859
- if (options2.types && options2.types.length > 0) {
12860
- conditions.push(`type = ANY($${paramIndex++})`);
12861
- params.push(options2.types);
12862
- }
12863
- if (options2.since !== undefined) {
12864
- conditions.push(`timestamp >= $${paramIndex++}`);
12865
- params.push(options2.since);
12866
- }
12867
- if (options2.until !== undefined) {
12868
- conditions.push(`timestamp <= $${paramIndex++}`);
12869
- params.push(options2.until);
12870
- }
12871
- if (options2.afterSequence !== undefined) {
12872
- conditions.push(`sequence > $${paramIndex++}`);
12873
- params.push(options2.afterSequence);
12874
- }
12875
- const whereClause = conditions.length > 0 ? `WHERE ${conditions.join(" AND ")}` : "";
12876
- let query = `
12877
- SELECT id, type, project_key, timestamp, sequence, data
12878
- FROM events
12879
- ${whereClause}
12880
- ORDER BY sequence ASC
12881
- `;
12882
- if (options2.limit) {
12883
- query += ` LIMIT $${paramIndex++}`;
12884
- params.push(options2.limit);
12885
- }
12886
- if (options2.offset) {
12887
- query += ` OFFSET $${paramIndex++}`;
12888
- params.push(options2.offset);
12889
- }
12890
- const result = await db.query(query, params);
12891
- return result.rows.map((row) => {
12892
- const data = typeof row.data === "string" ? JSON.parse(row.data) : row.data;
12893
- return {
12894
- id: row.id,
12895
- type: row.type,
12896
- project_key: row.project_key,
12897
- timestamp: parseInt(row.timestamp),
12898
- sequence: row.sequence,
12899
- ...data
12900
- };
12887
+ return withTiming("readEvents", async () => {
12888
+ const db = await getDatabase(projectPath);
12889
+ const conditions = [];
12890
+ const params = [];
12891
+ let paramIndex = 1;
12892
+ if (options2.projectKey) {
12893
+ conditions.push(`project_key = $${paramIndex++}`);
12894
+ params.push(options2.projectKey);
12895
+ }
12896
+ if (options2.types && options2.types.length > 0) {
12897
+ conditions.push(`type = ANY($${paramIndex++})`);
12898
+ params.push(options2.types);
12899
+ }
12900
+ if (options2.since !== undefined) {
12901
+ conditions.push(`timestamp >= $${paramIndex++}`);
12902
+ params.push(options2.since);
12903
+ }
12904
+ if (options2.until !== undefined) {
12905
+ conditions.push(`timestamp <= $${paramIndex++}`);
12906
+ params.push(options2.until);
12907
+ }
12908
+ if (options2.afterSequence !== undefined) {
12909
+ conditions.push(`sequence > $${paramIndex++}`);
12910
+ params.push(options2.afterSequence);
12911
+ }
12912
+ const whereClause = conditions.length > 0 ? `WHERE ${conditions.join(" AND ")}` : "";
12913
+ let query = `
12914
+ SELECT id, type, project_key, timestamp, sequence, data
12915
+ FROM events
12916
+ ${whereClause}
12917
+ ORDER BY sequence ASC
12918
+ `;
12919
+ if (options2.limit) {
12920
+ query += ` LIMIT $${paramIndex++}`;
12921
+ params.push(options2.limit);
12922
+ }
12923
+ if (options2.offset) {
12924
+ query += ` OFFSET $${paramIndex++}`;
12925
+ params.push(options2.offset);
12926
+ }
12927
+ const result = await db.query(query, params);
12928
+ return result.rows.map((row) => {
12929
+ const data = typeof row.data === "string" ? JSON.parse(row.data) : row.data;
12930
+ return {
12931
+ id: row.id,
12932
+ type: row.type,
12933
+ project_key: row.project_key,
12934
+ timestamp: parseTimestamp(row.timestamp),
12935
+ sequence: row.sequence,
12936
+ ...data
12937
+ };
12938
+ });
12901
12939
  });
12902
12940
  }
12903
12941
  async function getLatestSequence(projectKey, projectPath) {
@@ -12908,71 +12946,131 @@ async function getLatestSequence(projectKey, projectPath) {
12908
12946
  return result.rows[0]?.seq ?? 0;
12909
12947
  }
12910
12948
  async function replayEvents(options2 = {}, projectPath) {
12911
- const startTime = Date.now();
12912
- const db = await getDatabase(projectPath);
12913
- if (options2.clearViews) {
12914
- if (options2.projectKey) {
12949
+ return withTiming("replayEvents", async () => {
12950
+ const startTime = Date.now();
12951
+ const db = await getDatabase(projectPath);
12952
+ if (options2.clearViews) {
12953
+ if (options2.projectKey) {
12954
+ await db.query(`DELETE FROM message_recipients WHERE message_id IN (
12955
+ SELECT id FROM messages WHERE project_key = $1
12956
+ )`, [options2.projectKey]);
12957
+ await db.query(`DELETE FROM messages WHERE project_key = $1`, [
12958
+ options2.projectKey
12959
+ ]);
12960
+ await db.query(`DELETE FROM reservations WHERE project_key = $1`, [
12961
+ options2.projectKey
12962
+ ]);
12963
+ await db.query(`DELETE FROM agents WHERE project_key = $1`, [
12964
+ options2.projectKey
12965
+ ]);
12966
+ } else {
12967
+ await db.exec(`
12968
+ DELETE FROM message_recipients;
12969
+ DELETE FROM messages;
12970
+ DELETE FROM reservations;
12971
+ DELETE FROM agents;
12972
+ `);
12973
+ }
12974
+ }
12975
+ const events = await readEvents({
12976
+ projectKey: options2.projectKey,
12977
+ afterSequence: options2.fromSequence
12978
+ }, projectPath);
12979
+ for (const event of events) {
12980
+ await updateMaterializedViews(db, event);
12981
+ }
12982
+ return {
12983
+ eventsReplayed: events.length,
12984
+ duration: Date.now() - startTime
12985
+ };
12986
+ });
12987
+ }
12988
+ async function replayEventsBatched(projectKey, onBatch, options2 = {}, projectPath) {
12989
+ return withTiming("replayEventsBatched", async () => {
12990
+ const startTime = Date.now();
12991
+ const batchSize = options2.batchSize ?? 1000;
12992
+ const fromSequence = options2.fromSequence ?? 0;
12993
+ const db = await getDatabase(projectPath);
12994
+ if (options2.clearViews) {
12915
12995
  await db.query(`DELETE FROM message_recipients WHERE message_id IN (
12916
12996
  SELECT id FROM messages WHERE project_key = $1
12917
- )`, [options2.projectKey]);
12997
+ )`, [projectKey]);
12918
12998
  await db.query(`DELETE FROM messages WHERE project_key = $1`, [
12919
- options2.projectKey
12999
+ projectKey
12920
13000
  ]);
12921
13001
  await db.query(`DELETE FROM reservations WHERE project_key = $1`, [
12922
- options2.projectKey
12923
- ]);
12924
- await db.query(`DELETE FROM agents WHERE project_key = $1`, [
12925
- options2.projectKey
13002
+ projectKey
12926
13003
  ]);
12927
- } else {
12928
- await db.exec(`
12929
- DELETE FROM message_recipients;
12930
- DELETE FROM messages;
12931
- DELETE FROM reservations;
12932
- DELETE FROM agents;
12933
- `);
13004
+ await db.query(`DELETE FROM agents WHERE project_key = $1`, [projectKey]);
13005
+ }
13006
+ const countResult = await db.query(`SELECT COUNT(*) as count FROM events WHERE project_key = $1 AND sequence > $2`, [projectKey, fromSequence]);
13007
+ const total = parseInt(countResult.rows[0]?.count ?? "0");
13008
+ if (total === 0) {
13009
+ return { eventsReplayed: 0, duration: Date.now() - startTime };
13010
+ }
13011
+ let processed = 0;
13012
+ let offset = 0;
13013
+ while (processed < total) {
13014
+ const events = await readEvents({
13015
+ projectKey,
13016
+ afterSequence: fromSequence,
13017
+ limit: batchSize,
13018
+ offset
13019
+ }, projectPath);
13020
+ if (events.length === 0)
13021
+ break;
13022
+ for (const event of events) {
13023
+ await updateMaterializedViews(db, event);
13024
+ }
13025
+ processed += events.length;
13026
+ const percent = Math.round(processed / total * 100);
13027
+ await onBatch(events, { processed, total, percent });
13028
+ console.log(`[SwarmMail] Replaying events: ${processed}/${total} (${percent}%)`);
13029
+ offset += batchSize;
12934
13030
  }
12935
- }
12936
- const events = await readEvents({
12937
- projectKey: options2.projectKey,
12938
- afterSequence: options2.fromSequence
12939
- }, projectPath);
12940
- for (const event of events) {
12941
- await updateMaterializedViews(db, event);
12942
- }
12943
- return {
12944
- eventsReplayed: events.length,
12945
- duration: Date.now() - startTime
12946
- };
13031
+ return {
13032
+ eventsReplayed: processed,
13033
+ duration: Date.now() - startTime
13034
+ };
13035
+ });
12947
13036
  }
12948
13037
  async function updateMaterializedViews(db, event) {
12949
- switch (event.type) {
12950
- case "agent_registered":
12951
- await handleAgentRegistered(db, event);
12952
- break;
12953
- case "agent_active":
12954
- await db.query(`UPDATE agents SET last_active_at = $1 WHERE project_key = $2 AND name = $3`, [event.timestamp, event.project_key, event.agent_name]);
12955
- break;
12956
- case "message_sent":
12957
- await handleMessageSent(db, event);
12958
- break;
12959
- case "message_read":
12960
- await db.query(`UPDATE message_recipients SET read_at = $1 WHERE message_id = $2 AND agent_name = $3`, [event.timestamp, event.message_id, event.agent_name]);
12961
- break;
12962
- case "message_acked":
12963
- await db.query(`UPDATE message_recipients SET acked_at = $1 WHERE message_id = $2 AND agent_name = $3`, [event.timestamp, event.message_id, event.agent_name]);
12964
- break;
12965
- case "file_reserved":
12966
- await handleFileReserved(db, event);
12967
- break;
12968
- case "file_released":
12969
- await handleFileReleased(db, event);
12970
- break;
12971
- case "task_started":
12972
- case "task_progress":
12973
- case "task_completed":
12974
- case "task_blocked":
12975
- break;
13038
+ try {
13039
+ switch (event.type) {
13040
+ case "agent_registered":
13041
+ await handleAgentRegistered(db, event);
13042
+ break;
13043
+ case "agent_active":
13044
+ await db.query(`UPDATE agents SET last_active_at = $1 WHERE project_key = $2 AND name = $3`, [event.timestamp, event.project_key, event.agent_name]);
13045
+ break;
13046
+ case "message_sent":
13047
+ await handleMessageSent(db, event);
13048
+ break;
13049
+ case "message_read":
13050
+ await db.query(`UPDATE message_recipients SET read_at = $1 WHERE message_id = $2 AND agent_name = $3`, [event.timestamp, event.message_id, event.agent_name]);
13051
+ break;
13052
+ case "message_acked":
13053
+ await db.query(`UPDATE message_recipients SET acked_at = $1 WHERE message_id = $2 AND agent_name = $3`, [event.timestamp, event.message_id, event.agent_name]);
13054
+ break;
13055
+ case "file_reserved":
13056
+ await handleFileReserved(db, event);
13057
+ break;
13058
+ case "file_released":
13059
+ await handleFileReleased(db, event);
13060
+ break;
13061
+ case "task_started":
13062
+ case "task_progress":
13063
+ case "task_completed":
13064
+ case "task_blocked":
13065
+ break;
13066
+ }
13067
+ } catch (error45) {
13068
+ console.error("[SwarmMail] Failed to update materialized views", {
13069
+ eventType: event.type,
13070
+ eventId: event.id,
13071
+ error: error45
13072
+ });
13073
+ throw error45;
12976
13074
  }
12977
13075
  }
12978
13076
  async function handleAgentRegistered(db, event) {
@@ -12992,6 +13090,12 @@ async function handleAgentRegistered(db, event) {
12992
13090
  ]);
12993
13091
  }
12994
13092
  async function handleMessageSent(db, event) {
13093
+ console.log("[SwarmMail] Handling message sent event", {
13094
+ from: event.from_agent,
13095
+ to: event.to_agents,
13096
+ subject: event.subject,
13097
+ projectKey: event.project_key
13098
+ });
12995
13099
  const result = await db.query(`INSERT INTO messages (project_key, from_agent, subject, body, thread_id, importance, ack_required, created_at)
12996
13100
  VALUES ($1, $2, $3, $4, $5, $6, $7, $8)
12997
13101
  RETURNING id`, [
@@ -13009,24 +13113,49 @@ async function handleMessageSent(db, event) {
13009
13113
  throw new Error("Failed to insert message - no row returned");
13010
13114
  }
13011
13115
  const messageId = msgRow.id;
13012
- for (const agent of event.to_agents) {
13116
+ if (event.to_agents.length > 0) {
13117
+ const values = event.to_agents.map((_, i) => `($1, $${i + 2})`).join(", ");
13118
+ const params = [messageId, ...event.to_agents];
13013
13119
  await db.query(`INSERT INTO message_recipients (message_id, agent_name)
13014
- VALUES ($1, $2)
13015
- ON CONFLICT DO NOTHING`, [messageId, agent]);
13120
+ VALUES ${values}
13121
+ ON CONFLICT DO NOTHING`, params);
13122
+ console.log("[SwarmMail] Message recipients inserted", {
13123
+ messageId,
13124
+ recipientCount: event.to_agents.length
13125
+ });
13016
13126
  }
13017
13127
  }
13018
13128
  async function handleFileReserved(db, event) {
13019
- for (const path of event.paths) {
13020
- await db.query(`INSERT INTO reservations (project_key, agent_name, path_pattern, exclusive, reason, created_at, expires_at)
13021
- VALUES ($1, $2, $3, $4, $5, $6, $7)`, [
13129
+ console.log("[SwarmMail] Handling file reservation event", {
13130
+ agent: event.agent_name,
13131
+ paths: event.paths,
13132
+ exclusive: event.exclusive,
13133
+ projectKey: event.project_key
13134
+ });
13135
+ if (event.paths.length > 0) {
13136
+ const values = event.paths.map((_, i) => `($1, $2, $${i + 3}, $${event.paths.length + 3}, $${event.paths.length + 4}, $${event.paths.length + 5}, $${event.paths.length + 6})`).join(", ");
13137
+ const params = [
13022
13138
  event.project_key,
13023
13139
  event.agent_name,
13024
- path,
13140
+ ...event.paths,
13025
13141
  event.exclusive,
13026
13142
  event.reason || null,
13027
13143
  event.timestamp,
13028
13144
  event.expires_at
13029
- ]);
13145
+ ];
13146
+ if (event.paths.length > 0) {
13147
+ await db.query(`DELETE FROM reservations
13148
+ WHERE project_key = $1
13149
+ AND agent_name = $2
13150
+ AND path_pattern = ANY($3)
13151
+ AND released_at IS NULL`, [event.project_key, event.agent_name, event.paths]);
13152
+ }
13153
+ await db.query(`INSERT INTO reservations (project_key, agent_name, path_pattern, exclusive, reason, created_at, expires_at)
13154
+ VALUES ${values}`, params);
13155
+ console.log("[SwarmMail] File reservations inserted", {
13156
+ agent: event.agent_name,
13157
+ reservationCount: event.paths.length
13158
+ });
13030
13159
  }
13031
13160
  }
13032
13161
  async function handleFileReleased(db, event) {
@@ -13078,9 +13207,11 @@ async function reserveFiles(projectKey, agentName, paths, options2 = {}, project
13078
13207
  });
13079
13208
  return appendEvent(event, projectPath);
13080
13209
  }
13210
+ var TIMESTAMP_SAFE_UNTIL;
13081
13211
  var init_store = __esm(() => {
13082
13212
  init_streams();
13083
13213
  init_events();
13214
+ TIMESTAMP_SAFE_UNTIL = new Date("2286-01-01").getTime();
13084
13215
  });
13085
13216
 
13086
13217
  // node_modules/.pnpm/@isaacs+balanced-match@4.0.1/node_modules/@isaacs/balanced-match/dist/esm/index.js
@@ -14592,6 +14723,12 @@ async function checkConflicts(projectKey, agentName, paths, projectPath) {
14592
14723
  }
14593
14724
  for (const path2 of paths) {
14594
14725
  if (pathMatches(path2, reservation.path_pattern)) {
14726
+ console.warn("[SwarmMail] Conflict detected", {
14727
+ path: path2,
14728
+ holder: reservation.agent_name,
14729
+ pattern: reservation.path_pattern,
14730
+ requestedBy: agentName
14731
+ });
14595
14732
  conflicts.push({
14596
14733
  path: path2,
14597
14734
  holder: reservation.agent_name,
@@ -14601,6 +14738,13 @@ async function checkConflicts(projectKey, agentName, paths, projectPath) {
14601
14738
  }
14602
14739
  }
14603
14740
  }
14741
+ if (conflicts.length > 0) {
14742
+ console.warn("[SwarmMail] Total conflicts detected", {
14743
+ count: conflicts.length,
14744
+ requestedBy: agentName,
14745
+ paths
14746
+ });
14747
+ }
14604
14748
  return conflicts;
14605
14749
  }
14606
14750
  function pathMatches(path2, pattern) {
@@ -14892,7 +15036,18 @@ function getAgentFromEvent(event) {
14892
15036
  return "unknown";
14893
15037
  }
14894
15038
  async function debugEvents(options2) {
14895
- const { projectPath, types: types2, agentName, limit = 50, since, until } = options2;
15039
+ const {
15040
+ projectPath,
15041
+ types: types2,
15042
+ agentName,
15043
+ limit = 50,
15044
+ since,
15045
+ until,
15046
+ batchSize
15047
+ } = options2;
15048
+ if (batchSize && batchSize > 0) {
15049
+ return await debugEventsPaginated({ ...options2, batchSize });
15050
+ }
14896
15051
  const allEvents = await readEvents({
14897
15052
  projectKey: projectPath,
14898
15053
  types: types2,
@@ -14929,6 +15084,63 @@ async function debugEvents(options2) {
14929
15084
  total: filteredEvents.length
14930
15085
  };
14931
15086
  }
15087
+ async function debugEventsPaginated(options2) {
15088
+ const {
15089
+ projectPath,
15090
+ types: types2,
15091
+ agentName,
15092
+ limit = 50,
15093
+ since,
15094
+ until,
15095
+ batchSize
15096
+ } = options2;
15097
+ const allEvents = [];
15098
+ let offset = 0;
15099
+ let hasMore = true;
15100
+ while (hasMore && allEvents.length < limit) {
15101
+ const batch = await readEvents({
15102
+ projectKey: projectPath,
15103
+ types: types2,
15104
+ since,
15105
+ until,
15106
+ limit: batchSize,
15107
+ offset
15108
+ }, projectPath);
15109
+ if (batch.length === 0) {
15110
+ hasMore = false;
15111
+ break;
15112
+ }
15113
+ const filtered = agentName ? batch.filter((e) => {
15114
+ if ("agent_name" in e && e.agent_name === agentName)
15115
+ return true;
15116
+ if ("from_agent" in e && e.from_agent === agentName)
15117
+ return true;
15118
+ if ("to_agents" in e && e.to_agents?.includes(agentName))
15119
+ return true;
15120
+ return false;
15121
+ }) : batch;
15122
+ allEvents.push(...filtered);
15123
+ offset += batchSize;
15124
+ console.log(`[SwarmMail] Fetched ${allEvents.length} events (batch size: ${batchSize})`);
15125
+ }
15126
+ allEvents.sort((a, b) => b.sequence - a.sequence);
15127
+ const limitedEvents = allEvents.slice(0, limit);
15128
+ const events = limitedEvents.map((e) => {
15129
+ const { id, sequence, type, timestamp, project_key, ...rest } = e;
15130
+ return {
15131
+ id,
15132
+ sequence,
15133
+ type,
15134
+ timestamp,
15135
+ timestamp_human: formatTimestamp(timestamp),
15136
+ ...rest
15137
+ };
15138
+ });
15139
+ return {
15140
+ events,
15141
+ total: allEvents.length
15142
+ };
15143
+ }
14932
15144
  async function debugAgent(options2) {
14933
15145
  const { projectPath, agentName, includeEvents = false } = options2;
14934
15146
  const agent = await getAgent(projectPath, agentName, projectPath);
@@ -15271,6 +15483,8 @@ var init_migrations = __esm(() => {
15271
15483
  // src/streams/index.ts
15272
15484
  var exports_streams = {};
15273
15485
  __export(exports_streams, {
15486
+ withTiming: () => withTiming,
15487
+ withTimeout: () => withTimeout,
15274
15488
  sendSwarmMessage: () => sendSwarmMessage,
15275
15489
  sendMessage: () => sendMessage,
15276
15490
  sendAgentMessage: () => sendAgentMessage,
@@ -15280,6 +15494,7 @@ __export(exports_streams, {
15280
15494
  reserveSwarmFiles: () => reserveSwarmFiles,
15281
15495
  reserveFiles: () => reserveFiles,
15282
15496
  reserveAgentFiles: () => reserveAgentFiles,
15497
+ replayEventsBatched: () => replayEventsBatched,
15283
15498
  replayEvents: () => replayEvents,
15284
15499
  releaseSwarmFiles: () => releaseSwarmFiles,
15285
15500
  releaseAgentFiles: () => releaseAgentFiles,
@@ -15343,6 +15558,21 @@ import { PGlite } from "@electric-sql/pglite";
15343
15558
  import { existsSync, mkdirSync, appendFileSync } from "node:fs";
15344
15559
  import { join } from "node:path";
15345
15560
  import { homedir } from "node:os";
15561
+ async function withTimeout(promise2, ms, operation) {
15562
+ const timeout = new Promise((_, reject) => setTimeout(() => reject(new Error(`${operation} timed out after ${ms}ms`)), ms));
15563
+ return Promise.race([promise2, timeout]);
15564
+ }
15565
+ async function withTiming(operation, fn) {
15566
+ const start = performance.now();
15567
+ try {
15568
+ return await fn();
15569
+ } finally {
15570
+ const duration3 = performance.now() - start;
15571
+ if (duration3 > SLOW_QUERY_THRESHOLD_MS) {
15572
+ console.warn(`[SwarmMail] Slow operation: ${operation} took ${duration3.toFixed(1)}ms`);
15573
+ }
15574
+ }
15575
+ }
15346
15576
  function debugLog(message, data) {
15347
15577
  const timestamp = new Date().toISOString();
15348
15578
  const logLine = data ? `[${timestamp}] ${message}: ${JSON.stringify(data, null, 2)}
@@ -15622,7 +15852,7 @@ function handleExit() {
15622
15852
  } catch {}
15623
15853
  }
15624
15854
  }
15625
- var DEBUG_LOG_PATH, instances, pendingInstances, schemaInitialized, degradedInstances, lastAccess, MAX_CACHE_SIZE = 10;
15855
+ var SLOW_QUERY_THRESHOLD_MS = 100, DEBUG_LOG_PATH, instances, pendingInstances, schemaInitialized, degradedInstances, lastAccess, MAX_CACHE_SIZE = 10;
15626
15856
  var init_streams = __esm(() => {
15627
15857
  init_agent_mail();
15628
15858
  init_debug();
@@ -25060,93 +25290,878 @@ var require_built3 = __commonJS((exports, module) => {
25060
25290
  exports.print = print;
25061
25291
  });
25062
25292
 
25063
- // node_modules/.pnpm/kind-of@6.0.3/node_modules/kind-of/index.js
25064
- var require_kind_of = __commonJS((exports, module) => {
25065
- var toString = Object.prototype.toString;
25066
- module.exports = function kindOf(val) {
25067
- if (val === undefined)
25068
- return "undefined";
25069
- if (val === null)
25070
- return "null";
25071
- var type = typeof val;
25072
- if (type === "boolean")
25073
- return "boolean";
25074
- if (type === "string")
25075
- return "string";
25076
- if (type === "number")
25077
- return "number";
25078
- if (type === "symbol")
25079
- return "symbol";
25080
- if (type === "function") {
25081
- return isGeneratorFn(val) ? "generatorfunction" : "function";
25082
- }
25083
- if (isArray(val))
25084
- return "array";
25085
- if (isBuffer(val))
25086
- return "buffer";
25087
- if (isArguments(val))
25088
- return "arguments";
25089
- if (isDate(val))
25090
- return "date";
25091
- if (isError(val))
25092
- return "error";
25093
- if (isRegexp(val))
25094
- return "regexp";
25095
- switch (ctorName(val)) {
25096
- case "Symbol":
25097
- return "symbol";
25098
- case "Promise":
25099
- return "promise";
25100
- case "WeakMap":
25101
- return "weakmap";
25102
- case "WeakSet":
25103
- return "weakset";
25104
- case "Map":
25105
- return "map";
25106
- case "Set":
25107
- return "set";
25108
- case "Int8Array":
25109
- return "int8array";
25110
- case "Uint8Array":
25111
- return "uint8array";
25112
- case "Uint8ClampedArray":
25113
- return "uint8clampedarray";
25114
- case "Int16Array":
25115
- return "int16array";
25116
- case "Uint16Array":
25117
- return "uint16array";
25118
- case "Int32Array":
25119
- return "int32array";
25120
- case "Uint32Array":
25121
- return "uint32array";
25122
- case "Float32Array":
25123
- return "float32array";
25124
- case "Float64Array":
25125
- return "float64array";
25126
- }
25127
- if (isGeneratorObj(val)) {
25128
- return "generator";
25129
- }
25130
- type = toString.call(val);
25131
- switch (type) {
25132
- case "[object Object]":
25133
- return "object";
25134
- case "[object Map Iterator]":
25135
- return "mapiterator";
25136
- case "[object Set Iterator]":
25137
- return "setiterator";
25138
- case "[object String Iterator]":
25139
- return "stringiterator";
25140
- case "[object Array Iterator]":
25141
- return "arrayiterator";
25142
- }
25143
- return type.slice(8, -1).toLowerCase().replace(/\s/g, "");
25293
+ // src/swarm-strategies.ts
25294
+ var exports_swarm_strategies = {};
25295
+ __export(exports_swarm_strategies, {
25296
+ swarm_select_strategy: () => swarm_select_strategy,
25297
+ strategyTools: () => strategyTools,
25298
+ selectStrategy: () => selectStrategy,
25299
+ formatStrategyGuidelines: () => formatStrategyGuidelines,
25300
+ STRATEGIES: () => STRATEGIES,
25301
+ POSITIVE_MARKERS: () => POSITIVE_MARKERS,
25302
+ NEGATIVE_MARKERS: () => NEGATIVE_MARKERS,
25303
+ DecompositionStrategySchema: () => DecompositionStrategySchema
25304
+ });
25305
+ function selectStrategy(task) {
25306
+ const taskLower = task.toLowerCase();
25307
+ const scores = {
25308
+ "file-based": 0,
25309
+ "feature-based": 0,
25310
+ "risk-based": 0,
25311
+ "research-based": 0
25144
25312
  };
25145
- function ctorName(val) {
25146
- return typeof val.constructor === "function" ? val.constructor.name : null;
25147
- }
25148
- function isArray(val) {
25149
- if (Array.isArray)
25313
+ for (const [strategyName, definition] of Object.entries(STRATEGIES)) {
25314
+ const name = strategyName;
25315
+ for (const keyword of definition.keywords) {
25316
+ if (keyword.includes(" ")) {
25317
+ if (taskLower.includes(keyword)) {
25318
+ scores[name] += 1;
25319
+ }
25320
+ } else {
25321
+ const regex = new RegExp(`\\b${keyword}\\b`, "i");
25322
+ if (regex.test(taskLower)) {
25323
+ scores[name] += 1;
25324
+ }
25325
+ }
25326
+ }
25327
+ }
25328
+ const entries = Object.entries(scores);
25329
+ entries.sort((a, b) => b[1] - a[1]);
25330
+ const [winner, winnerScore] = entries[0];
25331
+ const [, runnerUpScore] = entries[1] || [null, 0];
25332
+ const totalScore = entries.reduce((sum, [, score]) => sum + score, 0);
25333
+ const confidence = totalScore > 0 ? Math.min(0.95, 0.5 + (winnerScore - runnerUpScore) / totalScore) : 0.5;
25334
+ let reasoning;
25335
+ if (winnerScore === 0) {
25336
+ reasoning = `No strong keyword signals. Defaulting to feature-based as it's most versatile.`;
25337
+ } else {
25338
+ const matchedKeywords = STRATEGIES[winner].keywords.filter((k) => taskLower.includes(k));
25339
+ reasoning = `Matched keywords: ${matchedKeywords.join(", ")}. ${STRATEGIES[winner].description}`;
25340
+ }
25341
+ const finalStrategy = winnerScore === 0 ? "feature-based" : winner;
25342
+ return {
25343
+ strategy: finalStrategy,
25344
+ confidence,
25345
+ reasoning,
25346
+ alternatives: entries.filter(([s]) => s !== finalStrategy).map(([strategy, score]) => ({ strategy, score }))
25347
+ };
25348
+ }
25349
+ function formatStrategyGuidelines(strategy) {
25350
+ const def = STRATEGIES[strategy];
25351
+ const guidelines = def.guidelines.map((g) => `- ${g}`).join(`
25352
+ `);
25353
+ const antiPatterns = def.antiPatterns.map((a) => `- ${a}`).join(`
25354
+ `);
25355
+ const examples = def.examples.map((e) => `- ${e}`).join(`
25356
+ `);
25357
+ return `## Strategy: ${strategy}
25358
+
25359
+ ${def.description}
25360
+
25361
+ ### Guidelines
25362
+ ${guidelines}
25363
+
25364
+ ### Anti-Patterns (Avoid These)
25365
+ ${antiPatterns}
25366
+
25367
+ ### Examples
25368
+ ${examples}`;
25369
+ }
25370
+ var DecompositionStrategySchema, POSITIVE_MARKERS, NEGATIVE_MARKERS, STRATEGIES, swarm_select_strategy, strategyTools;
25371
+ var init_swarm_strategies = __esm(() => {
25372
+ init_dist();
25373
+ init_zod();
25374
+ DecompositionStrategySchema = exports_external.enum([
25375
+ "file-based",
25376
+ "feature-based",
25377
+ "risk-based",
25378
+ "research-based",
25379
+ "auto"
25380
+ ]);
25381
+ POSITIVE_MARKERS = [
25382
+ "always",
25383
+ "must",
25384
+ "required",
25385
+ "ensure",
25386
+ "use",
25387
+ "prefer"
25388
+ ];
25389
+ NEGATIVE_MARKERS = [
25390
+ "never",
25391
+ "dont",
25392
+ "don't",
25393
+ "avoid",
25394
+ "forbid",
25395
+ "no ",
25396
+ "not "
25397
+ ];
25398
+ STRATEGIES = {
25399
+ "file-based": {
25400
+ name: "file-based",
25401
+ description: "Group by file type or directory. Best for refactoring, migrations, and pattern changes across codebase.",
25402
+ keywords: [
25403
+ "refactor",
25404
+ "migrate",
25405
+ "update all",
25406
+ "rename",
25407
+ "replace",
25408
+ "convert",
25409
+ "upgrade",
25410
+ "deprecate",
25411
+ "remove",
25412
+ "cleanup",
25413
+ "lint",
25414
+ "format"
25415
+ ],
25416
+ guidelines: [
25417
+ "Group files by directory or type (e.g., all components, all tests)",
25418
+ "Minimize cross-directory dependencies within a subtask",
25419
+ "Handle shared types/utilities first if they change",
25420
+ "Each subtask should be a complete transformation of its file set",
25421
+ "Consider import/export relationships when grouping"
25422
+ ],
25423
+ antiPatterns: [
25424
+ "Don't split tightly coupled files across subtasks",
25425
+ "Don't group files that have no relationship",
25426
+ "Don't forget to update imports when moving/renaming"
25427
+ ],
25428
+ examples: [
25429
+ "Migrate all components to new API → split by component directory",
25430
+ "Rename userId to accountId → split by module (types first, then consumers)",
25431
+ "Update all tests to use new matcher → split by test directory"
25432
+ ]
25433
+ },
25434
+ "feature-based": {
25435
+ name: "feature-based",
25436
+ description: "Vertical slices with UI + API + data. Best for new features and adding functionality.",
25437
+ keywords: [
25438
+ "add",
25439
+ "implement",
25440
+ "build",
25441
+ "create",
25442
+ "feature",
25443
+ "new",
25444
+ "integrate",
25445
+ "connect",
25446
+ "enable",
25447
+ "support"
25448
+ ],
25449
+ guidelines: [
25450
+ "Each subtask is a complete vertical slice (UI + logic + data)",
25451
+ "Start with data layer/types, then logic, then UI",
25452
+ "Keep related components together (form + validation + submission)",
25453
+ "Separate concerns that can be developed independently",
25454
+ "Consider user-facing features as natural boundaries"
25455
+ ],
25456
+ antiPatterns: [
25457
+ "Don't split a single feature across multiple subtasks",
25458
+ "Don't create subtasks that can't be tested independently",
25459
+ "Don't forget integration points between features"
25460
+ ],
25461
+ examples: [
25462
+ "Add user auth → [OAuth setup, Session management, Protected routes]",
25463
+ "Build dashboard → [Data fetching, Chart components, Layout/navigation]",
25464
+ "Add search → [Search API, Search UI, Results display]"
25465
+ ]
25466
+ },
25467
+ "risk-based": {
25468
+ name: "risk-based",
25469
+ description: "Isolate high-risk changes, add tests first. Best for bug fixes, security issues, and critical changes.",
25470
+ keywords: [
25471
+ "fix",
25472
+ "bug",
25473
+ "security",
25474
+ "vulnerability",
25475
+ "critical",
25476
+ "urgent",
25477
+ "hotfix",
25478
+ "patch",
25479
+ "audit",
25480
+ "review"
25481
+ ],
25482
+ guidelines: [
25483
+ "Write tests FIRST to capture expected behavior",
25484
+ "Isolate the risky change to minimize blast radius",
25485
+ "Add monitoring/logging around the change",
25486
+ "Create rollback plan as part of the task",
25487
+ "Audit similar code for the same issue"
25488
+ ],
25489
+ antiPatterns: [
25490
+ "Don't make multiple risky changes in one subtask",
25491
+ "Don't skip tests for 'simple' fixes",
25492
+ "Don't forget to check for similar issues elsewhere"
25493
+ ],
25494
+ examples: [
25495
+ "Fix auth bypass → [Add regression test, Fix vulnerability, Audit similar endpoints]",
25496
+ "Fix race condition → [Add test reproducing issue, Implement fix, Add concurrency tests]",
25497
+ "Security audit → [Scan for vulnerabilities, Fix critical issues, Document remaining risks]"
25498
+ ]
25499
+ },
25500
+ "research-based": {
25501
+ name: "research-based",
25502
+ description: "Parallel search across multiple sources, then synthesize. Best for investigation, learning, and discovery tasks.",
25503
+ keywords: [
25504
+ "research",
25505
+ "investigate",
25506
+ "explore",
25507
+ "find out",
25508
+ "discover",
25509
+ "understand",
25510
+ "learn about",
25511
+ "analyze",
25512
+ "what is",
25513
+ "what are",
25514
+ "how does",
25515
+ "how do",
25516
+ "why does",
25517
+ "why do",
25518
+ "compare",
25519
+ "evaluate",
25520
+ "study",
25521
+ "look up",
25522
+ "look into",
25523
+ "search for",
25524
+ "dig into",
25525
+ "figure out",
25526
+ "debug options",
25527
+ "debug levers",
25528
+ "configuration options",
25529
+ "environment variables",
25530
+ "available options",
25531
+ "documentation"
25532
+ ],
25533
+ guidelines: [
25534
+ "Split by information source (PDFs, repos, history, web)",
25535
+ "Each agent searches with different query angles",
25536
+ "Include a synthesis subtask that depends on all search subtasks",
25537
+ "Use pdf-brain for documentation/books if available",
25538
+ "Use repo-crawl for GitHub repos if URL provided",
25539
+ "Use cass for past agent session history",
25540
+ "Assign NO files to research subtasks (read-only)"
25541
+ ],
25542
+ antiPatterns: [
25543
+ "Don't have one agent search everything sequentially",
25544
+ "Don't skip synthesis - raw search results need consolidation",
25545
+ "Don't forget to check tool availability before assigning sources"
25546
+ ],
25547
+ examples: [
25548
+ "Research auth patterns → [Search PDFs, Search repos, Search history, Synthesize]",
25549
+ "Investigate error → [Search cass for similar errors, Search repo for error handling, Synthesize]",
25550
+ "Learn about library → [Search docs, Search examples, Search issues, Synthesize findings]"
25551
+ ]
25552
+ }
25553
+ };
25554
+ swarm_select_strategy = tool({
25555
+ description: "Analyze task and recommend decomposition strategy (file-based, feature-based, or risk-based)",
25556
+ args: {
25557
+ task: tool.schema.string().min(1).describe("Task description to analyze"),
25558
+ codebase_context: tool.schema.string().optional().describe("Optional codebase context (file structure, tech stack, etc.)")
25559
+ },
25560
+ async execute(args) {
25561
+ const result = selectStrategy(args.task);
25562
+ let enhancedReasoning = result.reasoning;
25563
+ if (args.codebase_context) {
25564
+ enhancedReasoning += `
25565
+
25566
+ Codebase context considered: ${args.codebase_context.slice(0, 200)}...`;
25567
+ }
25568
+ return JSON.stringify({
25569
+ strategy: result.strategy,
25570
+ confidence: Math.round(result.confidence * 100) / 100,
25571
+ reasoning: enhancedReasoning,
25572
+ description: STRATEGIES[result.strategy].description,
25573
+ guidelines: STRATEGIES[result.strategy].guidelines,
25574
+ anti_patterns: STRATEGIES[result.strategy].antiPatterns,
25575
+ alternatives: result.alternatives.map((alt) => ({
25576
+ strategy: alt.strategy,
25577
+ description: STRATEGIES[alt.strategy].description,
25578
+ score: alt.score
25579
+ }))
25580
+ }, null, 2);
25581
+ }
25582
+ });
25583
+ strategyTools = {
25584
+ swarm_select_strategy
25585
+ };
25586
+ });
25587
+
25588
+ // src/learning.ts
25589
+ var exports_learning = {};
25590
+ __export(exports_learning, {
25591
+ shouldDeprecateCriterion: () => shouldDeprecateCriterion,
25592
+ scoreImplicitFeedback: () => scoreImplicitFeedback,
25593
+ outcomeToFeedback: () => outcomeToFeedback,
25594
+ learningSchemas: () => learningSchemas,
25595
+ isStrikedOut: () => isStrikedOut,
25596
+ getStrikes: () => getStrikes,
25597
+ getArchitecturePrompt: () => getArchitecturePrompt,
25598
+ formatMemoryValidationHint: () => formatMemoryValidationHint,
25599
+ formatMemoryStoreOnSuccess: () => formatMemoryStoreOnSuccess,
25600
+ formatMemoryStoreOn3Strike: () => formatMemoryStoreOn3Strike,
25601
+ formatMemoryQueryForDecomposition: () => formatMemoryQueryForDecomposition,
25602
+ clearStrikes: () => clearStrikes,
25603
+ calculateDecayedValue: () => calculateDecayedValue,
25604
+ calculateCriterionWeight: () => calculateCriterionWeight,
25605
+ applyWeights: () => applyWeights,
25606
+ addStrike: () => addStrike,
25607
+ StrikeRecordSchema: () => StrikeRecordSchema,
25608
+ ScoredOutcomeSchema: () => ScoredOutcomeSchema,
25609
+ OutcomeSignalsSchema: () => OutcomeSignalsSchema,
25610
+ InMemoryStrikeStorage: () => InMemoryStrikeStorage,
25611
+ InMemoryFeedbackStorage: () => InMemoryFeedbackStorage,
25612
+ InMemoryErrorStorage: () => InMemoryErrorStorage,
25613
+ FeedbackTypeSchema: () => FeedbackTypeSchema,
25614
+ FeedbackEventSchema: () => FeedbackEventSchema,
25615
+ FailureModeSchema: () => FailureModeSchema2,
25616
+ ErrorTypeSchema: () => ErrorTypeSchema,
25617
+ ErrorEntrySchema: () => ErrorEntrySchema,
25618
+ ErrorAccumulator: () => ErrorAccumulator,
25619
+ DecompositionStrategySchema: () => DecompositionStrategySchema2,
25620
+ DEFAULT_LEARNING_CONFIG: () => DEFAULT_LEARNING_CONFIG,
25621
+ CriterionWeightSchema: () => CriterionWeightSchema
25622
+ });
25623
+ function calculateDecayedValue(timestamp, now = new Date, halfLifeDays = 90) {
25624
+ const safeHalfLife = halfLifeDays <= 0 ? 1 : halfLifeDays;
25625
+ const eventTime = new Date(timestamp).getTime();
25626
+ const nowTime = now.getTime();
25627
+ const ageDays = Math.max(0, (nowTime - eventTime) / (24 * 60 * 60 * 1000));
25628
+ return Math.pow(0.5, ageDays / safeHalfLife);
25629
+ }
25630
+ function calculateCriterionWeight(events2, config2 = DEFAULT_LEARNING_CONFIG) {
25631
+ if (events2.length === 0) {
25632
+ return {
25633
+ criterion: "unknown",
25634
+ weight: 1,
25635
+ helpful_count: 0,
25636
+ harmful_count: 0,
25637
+ last_validated: undefined,
25638
+ half_life_days: config2.halfLifeDays
25639
+ };
25640
+ }
25641
+ const now = new Date;
25642
+ let helpfulSum = 0;
25643
+ let harmfulSum = 0;
25644
+ let helpfulCount = 0;
25645
+ let harmfulCount = 0;
25646
+ let lastValidated;
25647
+ for (const event of events2) {
25648
+ const decayed = calculateDecayedValue(event.timestamp, now, config2.halfLifeDays);
25649
+ const value = event.raw_value * decayed;
25650
+ if (event.type === "helpful") {
25651
+ helpfulSum += value;
25652
+ helpfulCount++;
25653
+ if (!lastValidated || event.timestamp > lastValidated) {
25654
+ lastValidated = event.timestamp;
25655
+ }
25656
+ } else if (event.type === "harmful") {
25657
+ harmfulSum += value;
25658
+ harmfulCount++;
25659
+ }
25660
+ }
25661
+ const total = helpfulSum + harmfulSum;
25662
+ const weight = total > 0 ? Math.max(0.1, helpfulSum / total) : 1;
25663
+ return {
25664
+ criterion: events2[0].criterion,
25665
+ weight,
25666
+ helpful_count: helpfulCount,
25667
+ harmful_count: harmfulCount,
25668
+ last_validated: lastValidated,
25669
+ half_life_days: config2.halfLifeDays
25670
+ };
25671
+ }
25672
+ function scoreImplicitFeedback(signals, config2 = DEFAULT_LEARNING_CONFIG) {
25673
+ const now = new Date;
25674
+ const decayed = calculateDecayedValue(signals.timestamp, now, config2.halfLifeDays);
25675
+ const durationScore = signals.duration_ms < config2.fastCompletionThresholdMs ? 1 : signals.duration_ms > config2.slowCompletionThresholdMs ? 0.2 : 0.6;
25676
+ const errorScore = signals.error_count === 0 ? 1 : signals.error_count <= config2.maxErrorsForHelpful ? 0.6 : 0.2;
25677
+ const retryScore = signals.retry_count === 0 ? 1 : signals.retry_count === 1 ? 0.7 : 0.3;
25678
+ const successScore = signals.success ? 1 : 0;
25679
+ const rawScore = successScore * 0.4 + durationScore * 0.2 + errorScore * 0.2 + retryScore * 0.2;
25680
+ let type;
25681
+ let reasoning;
25682
+ if (rawScore >= 0.7) {
25683
+ type = "helpful";
25684
+ reasoning = `Fast completion (${Math.round(signals.duration_ms / 1000)}s), ` + `${signals.error_count} errors, ${signals.retry_count} retries, ` + `${signals.success ? "succeeded" : "failed"}`;
25685
+ } else if (rawScore <= 0.4) {
25686
+ type = "harmful";
25687
+ reasoning = `Slow completion (${Math.round(signals.duration_ms / 1000)}s), ` + `${signals.error_count} errors, ${signals.retry_count} retries, ` + `${signals.success ? "succeeded" : "failed"}`;
25688
+ } else {
25689
+ type = "neutral";
25690
+ reasoning = `Mixed signals: ${Math.round(signals.duration_ms / 1000)}s, ` + `${signals.error_count} errors, ${signals.retry_count} retries`;
25691
+ }
25692
+ return {
25693
+ signals,
25694
+ type,
25695
+ decayed_value: rawScore * decayed,
25696
+ reasoning
25697
+ };
25698
+ }
25699
+ function outcomeToFeedback(outcome, criterion) {
25700
+ return {
25701
+ id: `${outcome.signals.bead_id}-${criterion}-${Date.now()}`,
25702
+ criterion,
25703
+ type: outcome.type,
25704
+ timestamp: outcome.signals.timestamp,
25705
+ context: outcome.reasoning,
25706
+ bead_id: outcome.signals.bead_id,
25707
+ raw_value: outcome.decayed_value
25708
+ };
25709
+ }
25710
+ function applyWeights(criteria, weights) {
25711
+ const result = {};
25712
+ for (const [name, rawScore] of Object.entries(criteria)) {
25713
+ const weight = weights[name]?.weight ?? 1;
25714
+ result[name] = {
25715
+ raw: rawScore,
25716
+ weighted: rawScore * weight,
25717
+ weight
25718
+ };
25719
+ }
25720
+ return result;
25721
+ }
25722
+ function shouldDeprecateCriterion(weight, config2 = DEFAULT_LEARNING_CONFIG) {
25723
+ const total = weight.helpful_count + weight.harmful_count;
25724
+ if (total < config2.minFeedbackForAdjustment) {
25725
+ return false;
25726
+ }
25727
+ const harmfulRatio = weight.harmful_count / total;
25728
+ return harmfulRatio > config2.maxHarmfulRatio;
25729
+ }
25730
+
25731
+ class InMemoryFeedbackStorage {
25732
+ events = [];
25733
+ maxSize;
25734
+ constructor(maxSize = 1e4) {
25735
+ this.maxSize = maxSize;
25736
+ }
25737
+ async store(event) {
25738
+ this.events.push(event);
25739
+ if (this.events.length > this.maxSize) {
25740
+ this.events = this.events.slice(this.events.length - this.maxSize);
25741
+ }
25742
+ }
25743
+ async getByCriterion(criterion) {
25744
+ return this.events.filter((e) => e.criterion === criterion);
25745
+ }
25746
+ async getByBead(beadId) {
25747
+ return this.events.filter((e) => e.bead_id === beadId);
25748
+ }
25749
+ async getAll() {
25750
+ return [...this.events];
25751
+ }
25752
+ }
25753
+
25754
+ class InMemoryStrikeStorage {
25755
+ strikes = new Map;
25756
+ async store(record2) {
25757
+ this.strikes.set(record2.bead_id, record2);
25758
+ }
25759
+ async get(beadId) {
25760
+ return this.strikes.get(beadId) ?? null;
25761
+ }
25762
+ async getAll() {
25763
+ return Array.from(this.strikes.values());
25764
+ }
25765
+ async clear(beadId) {
25766
+ this.strikes.delete(beadId);
25767
+ }
25768
+ }
25769
+ async function addStrike(beadId, attempt, reason, storage = new InMemoryStrikeStorage) {
25770
+ const existing = await storage.get(beadId);
25771
+ const now = new Date().toISOString();
25772
+ const record2 = existing ?? {
25773
+ bead_id: beadId,
25774
+ strike_count: 0,
25775
+ failures: []
25776
+ };
25777
+ record2.strike_count = Math.min(3, record2.strike_count + 1);
25778
+ record2.failures.push({ attempt, reason, timestamp: now });
25779
+ record2.last_strike_at = now;
25780
+ if (!record2.first_strike_at) {
25781
+ record2.first_strike_at = now;
25782
+ }
25783
+ await storage.store(record2);
25784
+ return record2;
25785
+ }
25786
+ async function getStrikes(beadId, storage = new InMemoryStrikeStorage) {
25787
+ const record2 = await storage.get(beadId);
25788
+ return record2?.strike_count ?? 0;
25789
+ }
25790
+ async function isStrikedOut(beadId, storage = new InMemoryStrikeStorage) {
25791
+ const count = await getStrikes(beadId, storage);
25792
+ return count >= 3;
25793
+ }
25794
+ async function getArchitecturePrompt(beadId, storage = new InMemoryStrikeStorage) {
25795
+ const record2 = await storage.get(beadId);
25796
+ if (!record2 || record2.strike_count < 3) {
25797
+ return "";
25798
+ }
25799
+ const failuresList = record2.failures.map((f, i) => `${i + 1}. **${f.attempt}** - Failed: ${f.reason}`).join(`
25800
+ `);
25801
+ return `## Architecture Review Required
25802
+
25803
+ This bead (\`${beadId}\`) has failed 3 consecutive fix attempts:
25804
+
25805
+ ${failuresList}
25806
+
25807
+ This pattern suggests an **architectural problem**, not a bug.
25808
+
25809
+ **Questions to consider:**
25810
+ - Is the current approach fundamentally sound?
25811
+ - Should we refactor the architecture instead?
25812
+ - Are we fixing symptoms instead of root cause?
25813
+
25814
+ **Options:**
25815
+ 1. **Refactor architecture** (describe new approach)
25816
+ 2. **Continue with Fix #4** (explain why this time is different)
25817
+ 3. **Abandon this approach entirely**
25818
+
25819
+ **DO NOT attempt Fix #4 without answering these questions.**
25820
+ `;
25821
+ }
25822
+ async function clearStrikes(beadId, storage = new InMemoryStrikeStorage) {
25823
+ await storage.clear(beadId);
25824
+ }
25825
+
25826
+ class InMemoryErrorStorage {
25827
+ errors = [];
25828
+ async store(entry) {
25829
+ this.errors.push(entry);
25830
+ }
25831
+ async getByBead(beadId) {
25832
+ return this.errors.filter((e) => e.bead_id === beadId);
25833
+ }
25834
+ async getUnresolvedByBead(beadId) {
25835
+ return this.errors.filter((e) => e.bead_id === beadId && !e.resolved);
25836
+ }
25837
+ async markResolved(id) {
25838
+ const error45 = this.errors.find((e) => e.id === id);
25839
+ if (error45) {
25840
+ error45.resolved = true;
25841
+ }
25842
+ }
25843
+ async getAll() {
25844
+ return [...this.errors];
25845
+ }
25846
+ }
25847
+
25848
+ class ErrorAccumulator {
25849
+ storage;
25850
+ constructor(storage) {
25851
+ this.storage = storage ?? new InMemoryErrorStorage;
25852
+ }
25853
+ async recordError(beadId, errorType, message, options2) {
25854
+ const entry = {
25855
+ id: `${beadId}-${errorType}-${Date.now()}`,
25856
+ bead_id: beadId,
25857
+ error_type: errorType,
25858
+ message,
25859
+ stack_trace: options2?.stack_trace,
25860
+ tool_name: options2?.tool_name,
25861
+ timestamp: new Date().toISOString(),
25862
+ resolved: false,
25863
+ context: options2?.context
25864
+ };
25865
+ const validated = ErrorEntrySchema.parse(entry);
25866
+ await this.storage.store(validated);
25867
+ return validated;
25868
+ }
25869
+ async getErrors(beadId) {
25870
+ return this.storage.getByBead(beadId);
25871
+ }
25872
+ async getUnresolvedErrors(beadId) {
25873
+ return this.storage.getUnresolvedByBead(beadId);
25874
+ }
25875
+ async resolveError(errorId) {
25876
+ await this.storage.markResolved(errorId);
25877
+ }
25878
+ async getErrorContext(beadId, includeResolved = false) {
25879
+ const errors3 = includeResolved ? await this.getErrors(beadId) : await this.getUnresolvedErrors(beadId);
25880
+ if (errors3.length === 0) {
25881
+ return "";
25882
+ }
25883
+ const byType = errors3.reduce((acc, err) => {
25884
+ const type = err.error_type;
25885
+ if (!acc[type]) {
25886
+ acc[type] = [];
25887
+ }
25888
+ acc[type].push(err);
25889
+ return acc;
25890
+ }, {});
25891
+ const lines = [
25892
+ "## Previous Errors",
25893
+ "",
25894
+ "The following errors were encountered during execution:",
25895
+ ""
25896
+ ];
25897
+ for (const [type, typeErrors] of Object.entries(byType)) {
25898
+ lines.push(`### ${type} (${typeErrors.length} error${typeErrors.length > 1 ? "s" : ""})`);
25899
+ lines.push("");
25900
+ for (const err of typeErrors) {
25901
+ lines.push(`- **${err.message}**`);
25902
+ if (err.context) {
25903
+ lines.push(` - Context: ${err.context}`);
25904
+ }
25905
+ if (err.tool_name) {
25906
+ lines.push(` - Tool: ${err.tool_name}`);
25907
+ }
25908
+ if (err.stack_trace) {
25909
+ lines.push(` - Stack: \`${err.stack_trace.slice(0, 100)}...\``);
25910
+ }
25911
+ lines.push(` - Time: ${new Date(err.timestamp).toLocaleString()}${err.resolved ? " (resolved)" : ""}`);
25912
+ lines.push("");
25913
+ }
25914
+ }
25915
+ lines.push("**Action Required**: Address these errors before proceeding. Consider:");
25916
+ lines.push("- What caused each error?");
25917
+ lines.push("- How can you prevent similar errors?");
25918
+ lines.push("- Are there patterns across error types?");
25919
+ lines.push("");
25920
+ return lines.join(`
25921
+ `);
25922
+ }
25923
+ async getErrorStats(beadId) {
25924
+ const allErrors = await this.getErrors(beadId);
25925
+ const unresolved = await this.getUnresolvedErrors(beadId);
25926
+ const byType = allErrors.reduce((acc, err) => {
25927
+ acc[err.error_type] = (acc[err.error_type] || 0) + 1;
25928
+ return acc;
25929
+ }, {});
25930
+ return {
25931
+ total: allErrors.length,
25932
+ unresolved: unresolved.length,
25933
+ by_type: byType
25934
+ };
25935
+ }
25936
+ }
25937
+ function formatMemoryStoreOnSuccess(beadId, summary, filesTouched, strategy) {
25938
+ const strategyInfo = strategy ? ` using ${strategy} strategy` : "";
25939
+ return {
25940
+ information: `Task "${beadId}" completed successfully${strategyInfo}.
25941
+ Key insight: ${summary}
25942
+ Files touched: ${filesTouched.join(", ") || "none"}`,
25943
+ metadata: `swarm, success, ${beadId}, ${strategy || "completion"}`,
25944
+ instruction: "Store this successful completion in semantic-memory for future reference"
25945
+ };
25946
+ }
25947
+ function formatMemoryStoreOn3Strike(beadId, failures) {
25948
+ const failuresList = failures.map((f, i) => `${i + 1}. ${f.attempt} - Failed: ${f.reason}`).join(`
25949
+ `);
25950
+ return {
25951
+ information: `Architecture problem detected in ${beadId}: Task failed after 3 attempts.
25952
+ Attempts:
25953
+ ${failuresList}
25954
+
25955
+ This indicates a structural issue requiring human decision, not another fix attempt.`,
25956
+ metadata: `architecture, 3-strike, ${beadId}, failure`,
25957
+ instruction: "Store this architectural problem in semantic-memory to avoid similar patterns in future"
25958
+ };
25959
+ }
25960
+ function formatMemoryQueryForDecomposition(task, limit = 3) {
25961
+ return {
25962
+ query: task,
25963
+ limit,
25964
+ instruction: "Query semantic-memory for relevant past learnings about similar tasks before decomposition"
25965
+ };
25966
+ }
25967
+ function formatMemoryValidationHint(beadId) {
25968
+ return {
25969
+ instruction: "If any semantic-memory entries helped with this task, validate them to reset decay timer",
25970
+ context: `Task ${beadId} completed successfully with assistance from past learnings`
25971
+ };
25972
+ }
25973
+ var FeedbackTypeSchema, FeedbackEventSchema, CriterionWeightSchema, ErrorTypeSchema, ErrorEntrySchema, DecompositionStrategySchema2, FailureModeSchema2, OutcomeSignalsSchema, ScoredOutcomeSchema, DEFAULT_LEARNING_CONFIG, StrikeRecordSchema, learningSchemas;
25974
+ var init_learning = __esm(() => {
25975
+ init_zod();
25976
+ FeedbackTypeSchema = exports_external.enum(["helpful", "harmful", "neutral"]);
25977
+ FeedbackEventSchema = exports_external.object({
25978
+ id: exports_external.string(),
25979
+ criterion: exports_external.string(),
25980
+ type: FeedbackTypeSchema,
25981
+ timestamp: exports_external.string(),
25982
+ context: exports_external.string().optional(),
25983
+ bead_id: exports_external.string().optional(),
25984
+ raw_value: exports_external.number().min(0).max(1).default(1)
25985
+ });
25986
+ CriterionWeightSchema = exports_external.object({
25987
+ criterion: exports_external.string(),
25988
+ weight: exports_external.number().min(0).max(1),
25989
+ helpful_count: exports_external.number().int().min(0),
25990
+ harmful_count: exports_external.number().int().min(0),
25991
+ last_validated: exports_external.string().optional(),
25992
+ half_life_days: exports_external.number().positive().default(90)
25993
+ });
25994
+ ErrorTypeSchema = exports_external.enum([
25995
+ "validation",
25996
+ "timeout",
25997
+ "conflict",
25998
+ "tool_failure",
25999
+ "unknown"
26000
+ ]);
26001
+ ErrorEntrySchema = exports_external.object({
26002
+ id: exports_external.string(),
26003
+ bead_id: exports_external.string(),
26004
+ error_type: ErrorTypeSchema,
26005
+ message: exports_external.string(),
26006
+ stack_trace: exports_external.string().optional(),
26007
+ tool_name: exports_external.string().optional(),
26008
+ timestamp: exports_external.string(),
26009
+ resolved: exports_external.boolean().default(false),
26010
+ context: exports_external.string().optional()
26011
+ });
26012
+ DecompositionStrategySchema2 = exports_external.enum([
26013
+ "file-based",
26014
+ "feature-based",
26015
+ "risk-based",
26016
+ "research-based"
26017
+ ]);
26018
+ FailureModeSchema2 = exports_external.enum([
26019
+ "timeout",
26020
+ "conflict",
26021
+ "validation",
26022
+ "tool_failure",
26023
+ "context_overflow",
26024
+ "dependency_blocked",
26025
+ "user_cancelled",
26026
+ "unknown"
26027
+ ]);
26028
+ OutcomeSignalsSchema = exports_external.object({
26029
+ bead_id: exports_external.string(),
26030
+ duration_ms: exports_external.number().int().min(0),
26031
+ error_count: exports_external.number().int().min(0),
26032
+ retry_count: exports_external.number().int().min(0),
26033
+ success: exports_external.boolean(),
26034
+ files_touched: exports_external.array(exports_external.string()).default([]),
26035
+ timestamp: exports_external.string(),
26036
+ strategy: DecompositionStrategySchema2.optional(),
26037
+ failure_mode: FailureModeSchema2.optional(),
26038
+ failure_details: exports_external.string().optional()
26039
+ });
26040
+ ScoredOutcomeSchema = exports_external.object({
26041
+ signals: OutcomeSignalsSchema,
26042
+ type: FeedbackTypeSchema,
26043
+ decayed_value: exports_external.number().min(0).max(1),
26044
+ reasoning: exports_external.string()
26045
+ });
26046
+ DEFAULT_LEARNING_CONFIG = {
26047
+ halfLifeDays: 90,
26048
+ minFeedbackForAdjustment: 3,
26049
+ maxHarmfulRatio: 0.3,
26050
+ fastCompletionThresholdMs: 5 * 60 * 1000,
26051
+ slowCompletionThresholdMs: 30 * 60 * 1000,
26052
+ maxErrorsForHelpful: 2
26053
+ };
26054
+ StrikeRecordSchema = exports_external.object({
26055
+ bead_id: exports_external.string(),
26056
+ strike_count: exports_external.number().int().min(0).max(3),
26057
+ failures: exports_external.array(exports_external.object({
26058
+ attempt: exports_external.string(),
26059
+ reason: exports_external.string(),
26060
+ timestamp: exports_external.string()
26061
+ })),
26062
+ first_strike_at: exports_external.string().optional(),
26063
+ last_strike_at: exports_external.string().optional()
26064
+ });
26065
+ learningSchemas = {
26066
+ FeedbackTypeSchema,
26067
+ FeedbackEventSchema,
26068
+ CriterionWeightSchema,
26069
+ OutcomeSignalsSchema,
26070
+ ScoredOutcomeSchema,
26071
+ DecompositionStrategySchema: DecompositionStrategySchema2,
26072
+ ErrorTypeSchema,
26073
+ ErrorEntrySchema,
26074
+ StrikeRecordSchema
26075
+ };
26076
+ });
26077
+
26078
+ // node_modules/.pnpm/kind-of@6.0.3/node_modules/kind-of/index.js
26079
+ var require_kind_of = __commonJS((exports, module) => {
26080
+ var toString = Object.prototype.toString;
26081
+ module.exports = function kindOf(val) {
26082
+ if (val === undefined)
26083
+ return "undefined";
26084
+ if (val === null)
26085
+ return "null";
26086
+ var type = typeof val;
26087
+ if (type === "boolean")
26088
+ return "boolean";
26089
+ if (type === "string")
26090
+ return "string";
26091
+ if (type === "number")
26092
+ return "number";
26093
+ if (type === "symbol")
26094
+ return "symbol";
26095
+ if (type === "function") {
26096
+ return isGeneratorFn(val) ? "generatorfunction" : "function";
26097
+ }
26098
+ if (isArray(val))
26099
+ return "array";
26100
+ if (isBuffer(val))
26101
+ return "buffer";
26102
+ if (isArguments(val))
26103
+ return "arguments";
26104
+ if (isDate(val))
26105
+ return "date";
26106
+ if (isError(val))
26107
+ return "error";
26108
+ if (isRegexp(val))
26109
+ return "regexp";
26110
+ switch (ctorName(val)) {
26111
+ case "Symbol":
26112
+ return "symbol";
26113
+ case "Promise":
26114
+ return "promise";
26115
+ case "WeakMap":
26116
+ return "weakmap";
26117
+ case "WeakSet":
26118
+ return "weakset";
26119
+ case "Map":
26120
+ return "map";
26121
+ case "Set":
26122
+ return "set";
26123
+ case "Int8Array":
26124
+ return "int8array";
26125
+ case "Uint8Array":
26126
+ return "uint8array";
26127
+ case "Uint8ClampedArray":
26128
+ return "uint8clampedarray";
26129
+ case "Int16Array":
26130
+ return "int16array";
26131
+ case "Uint16Array":
26132
+ return "uint16array";
26133
+ case "Int32Array":
26134
+ return "int32array";
26135
+ case "Uint32Array":
26136
+ return "uint32array";
26137
+ case "Float32Array":
26138
+ return "float32array";
26139
+ case "Float64Array":
26140
+ return "float64array";
26141
+ }
26142
+ if (isGeneratorObj(val)) {
26143
+ return "generator";
26144
+ }
26145
+ type = toString.call(val);
26146
+ switch (type) {
26147
+ case "[object Object]":
26148
+ return "object";
26149
+ case "[object Map Iterator]":
26150
+ return "mapiterator";
26151
+ case "[object Set Iterator]":
26152
+ return "setiterator";
26153
+ case "[object String Iterator]":
26154
+ return "stringiterator";
26155
+ case "[object Array Iterator]":
26156
+ return "arrayiterator";
26157
+ }
26158
+ return type.slice(8, -1).toLowerCase().replace(/\s/g, "");
26159
+ };
26160
+ function ctorName(val) {
26161
+ return typeof val.constructor === "function" ? val.constructor.name : null;
26162
+ }
26163
+ function isArray(val) {
26164
+ if (Array.isArray)
25150
26165
  return Array.isArray(val);
25151
26166
  return val instanceof Array;
25152
26167
  }
@@ -29459,17 +30474,23 @@ var BeadDependencySchema = exports_external.object({
29459
30474
  type: exports_external.enum(["blocks", "blocked-by", "related", "discovered-from"])
29460
30475
  });
29461
30476
  var BeadSchema = exports_external.object({
29462
- id: exports_external.string().regex(/^[a-z0-9]+(-[a-z0-9]+)+(\.[\w-]+)?$/, "Invalid bead ID format"),
30477
+ id: exports_external.string().regex(/^[a-z0-9]+(-[a-z0-9]+)+(\.[\w-]+)?$/, "Invalid bead ID format (expected: project-slug-hash or project-slug-hash.N)"),
29463
30478
  title: exports_external.string().min(1, "Title required"),
29464
30479
  description: exports_external.string().optional().default(""),
29465
30480
  status: BeadStatusSchema.default("open"),
29466
30481
  priority: exports_external.number().int().min(0).max(3).default(2),
29467
30482
  issue_type: BeadTypeSchema.default("task"),
29468
- created_at: exports_external.string().datetime({ offset: true }),
29469
- updated_at: exports_external.string().datetime({ offset: true }).optional(),
30483
+ created_at: exports_external.string().datetime({
30484
+ offset: true,
30485
+ message: "Must be ISO-8601 datetime with timezone (e.g., 2024-01-15T10:30:00Z)"
30486
+ }),
30487
+ updated_at: exports_external.string().datetime({
30488
+ offset: true,
30489
+ message: "Must be ISO-8601 datetime with timezone (e.g., 2024-01-15T10:30:00Z)"
30490
+ }).optional(),
29470
30491
  closed_at: exports_external.string().datetime({ offset: true }).optional(),
29471
30492
  parent_id: exports_external.string().optional(),
29472
- dependencies: exports_external.array(BeadDependencySchema).optional().default([]),
30493
+ dependencies: exports_external.array(BeadDependencySchema).default([]),
29473
30494
  metadata: exports_external.record(exports_external.string(), exports_external.unknown()).optional()
29474
30495
  });
29475
30496
  var BeadCreateArgsSchema = exports_external.object({
@@ -29546,16 +30567,11 @@ var EvaluationSchema = exports_external.object({
29546
30567
  retry_suggestion: exports_external.string().nullable(),
29547
30568
  timestamp: exports_external.string().datetime({ offset: true }).optional()
29548
30569
  });
29549
- var DEFAULT_CRITERIA = [
29550
- "type_safe",
29551
- "no_bugs",
29552
- "patterns",
29553
- "readable"
29554
- ];
29555
30570
  var EvaluationRequestSchema = exports_external.object({
29556
- subtask_id: exports_external.string(),
29557
- criteria: exports_external.array(exports_external.string()).default([...DEFAULT_CRITERIA]),
29558
- context: exports_external.string().optional()
30571
+ bead_id: exports_external.string(),
30572
+ subtask_title: exports_external.string(),
30573
+ files_touched: exports_external.array(exports_external.string()),
30574
+ requested_at: exports_external.string().datetime().optional()
29559
30575
  });
29560
30576
  var WeightedEvaluationSchema = exports_external.object({
29561
30577
  passed: exports_external.boolean(),
@@ -29657,7 +30673,7 @@ var AgentProgressSchema = exports_external.object({
29657
30673
  files_touched: exports_external.array(exports_external.string()).optional(),
29658
30674
  blockers: exports_external.array(exports_external.string()).optional(),
29659
30675
  timestamp: exports_external.string().datetime({ offset: true })
29660
- });
30676
+ }).refine((data) => data.status !== "blocked" || data.blockers && data.blockers.length > 0, { message: "blockers array required when status is 'blocked'" });
29661
30677
  var SwarmStatusSchema = exports_external.object({
29662
30678
  epic_id: exports_external.string(),
29663
30679
  total_agents: exports_external.number().int().min(0),
@@ -30251,6 +31267,8 @@ init_dist();
30251
31267
 
30252
31268
  // src/tool-availability.ts
30253
31269
  init_swarm_mail();
31270
+ var DEFAULT_URL_TIMEOUT_MS = 2000;
31271
+ var BUNX_TIMEOUT_MS = 1e4;
30254
31272
  var toolCache = new Map;
30255
31273
  var warningsLogged = new Set;
30256
31274
  async function commandExists(cmd) {
@@ -30261,7 +31279,7 @@ async function commandExists(cmd) {
30261
31279
  return false;
30262
31280
  }
30263
31281
  }
30264
- async function urlReachable(url2, timeoutMs = 2000) {
31282
+ async function urlReachable(url2, timeoutMs = DEFAULT_URL_TIMEOUT_MS) {
30265
31283
  try {
30266
31284
  const controller = new AbortController;
30267
31285
  const timeout = setTimeout(() => controller.abort(), timeoutMs);
@@ -30299,7 +31317,7 @@ var toolCheckers = {
30299
31317
  stdout: "pipe",
30300
31318
  stderr: "pipe"
30301
31319
  });
30302
- const timeout = setTimeout(() => proc.kill(), 1e4);
31320
+ const timeout = setTimeout(() => proc.kill(), BUNX_TIMEOUT_MS);
30303
31321
  const exitCode = await proc.exited;
30304
31322
  clearTimeout(timeout);
30305
31323
  return {
@@ -30659,14 +31677,35 @@ class SqliteRateLimiter {
30659
31677
  }
30660
31678
  return { allowed, remaining, resetAt };
30661
31679
  }
31680
+ cleanup() {
31681
+ const BATCH_SIZE = 1000;
31682
+ const MAX_BATCHES = 10;
31683
+ const cutoff = Date.now() - 7200000;
31684
+ let totalDeleted = 0;
31685
+ for (let i = 0;i < MAX_BATCHES; i++) {
31686
+ const result = this.db.run(`DELETE FROM rate_limits
31687
+ WHERE rowid IN (
31688
+ SELECT rowid FROM rate_limits
31689
+ WHERE timestamp < ?
31690
+ LIMIT ?
31691
+ )`, [cutoff, BATCH_SIZE]);
31692
+ totalDeleted += result.changes;
31693
+ if (result.changes < BATCH_SIZE)
31694
+ break;
31695
+ }
31696
+ if (totalDeleted > 0) {
31697
+ console.log("[RateLimiter] Cleanup completed:", {
31698
+ deletedRows: totalDeleted
31699
+ });
31700
+ }
31701
+ }
30662
31702
  async recordRequest(agentName, endpoint) {
30663
31703
  const now = Date.now();
30664
31704
  const stmt = this.db.prepare(`INSERT INTO rate_limits (agent_name, endpoint, window, timestamp) VALUES (?, ?, ?, ?)`);
30665
31705
  stmt.run(agentName, endpoint, "minute", now);
30666
31706
  stmt.run(agentName, endpoint, "hour", now);
30667
31707
  if (Math.random() < 0.01) {
30668
- const cutoff = Date.now() - 7200000;
30669
- this.db.run(`DELETE FROM rate_limits WHERE timestamp < ?`, [cutoff]);
31708
+ this.cleanup();
30670
31709
  }
30671
31710
  }
30672
31711
  async close() {
@@ -31947,6 +32986,7 @@ function extractJsonFromText(text) {
31947
32986
  }
31948
32987
  throw new JsonExtractionError("Could not extract valid JSON from response", text, strategies);
31949
32988
  }
32989
+ var MAX_BRACE_DEPTH = 100;
31950
32990
  function findBalancedBraces(text, open, close) {
31951
32991
  const startIdx = text.indexOf(open);
31952
32992
  if (startIdx === -1)
@@ -31972,8 +33012,14 @@ function findBalancedBraces(text, open, close) {
31972
33012
  continue;
31973
33013
  if (char === open) {
31974
33014
  depth++;
33015
+ if (depth > MAX_BRACE_DEPTH) {
33016
+ return null;
33017
+ }
31975
33018
  } else if (char === close) {
31976
33019
  depth--;
33020
+ if (depth < 0) {
33021
+ return null;
33022
+ }
31977
33023
  if (depth === 0) {
31978
33024
  return text.slice(startIdx, i + 1);
31979
33025
  }
@@ -31992,6 +33038,7 @@ function attemptJsonRepair(text) {
31992
33038
  repaired = repaired.replace(/"([^"]*)\n([^"]*)"/g, (_, before, after) => `"${before}\\n${after}"`);
31993
33039
  return repaired;
31994
33040
  }
33041
+ var RAW_INPUT_PREVIEW_LENGTH = 200;
31995
33042
  var structured_extract_json = tool({
31996
33043
  description: "Extract JSON from markdown/text response. Tries multiple strategies: direct parse, code blocks, brace matching, JSON repair.",
31997
33044
  args: {
@@ -32011,7 +33058,7 @@ var structured_extract_json = tool({
32011
33058
  success: false,
32012
33059
  error: error45.message,
32013
33060
  attempted_strategies: error45.attemptedStrategies,
32014
- raw_input_preview: args.text.slice(0, 200)
33061
+ raw_input_preview: args.text.slice(0, RAW_INPUT_PREVIEW_LENGTH)
32015
33062
  }, null, 2);
32016
33063
  }
32017
33064
  throw error45;
@@ -32022,7 +33069,7 @@ var structured_validate = tool({
32022
33069
  description: "Validate agent response against a schema. Extracts JSON and validates with Zod. Returns structured errors for retry feedback.",
32023
33070
  args: {
32024
33071
  response: tool.schema.string().describe("Agent response to validate"),
32025
- schema_name: tool.schema.enum(["evaluation", "task_decomposition", "bead_tree"]).describe("Schema to validate against"),
33072
+ schema_name: tool.schema.enum(["evaluation", "task_decomposition", "bead_tree"]).describe("Schema to validate against: " + "evaluation = agent self-eval with criteria, " + "task_decomposition = swarm task breakdown, " + "bead_tree = epic with subtasks"),
32026
33073
  max_retries: tool.schema.number().min(1).max(5).optional().describe("Max retries (for tracking - actual retry logic is external)")
32027
33074
  },
32028
33075
  async execute(args, ctx) {
@@ -32032,6 +33079,13 @@ var structured_validate = tool({
32032
33079
  attempts: 1,
32033
33080
  errors: []
32034
33081
  };
33082
+ if (!args.response || args.response.trim().length === 0) {
33083
+ return JSON.stringify({
33084
+ valid: false,
33085
+ error: "Response is empty or contains only whitespace",
33086
+ raw_input: "(empty)"
33087
+ });
33088
+ }
32035
33089
  let extracted;
32036
33090
  let extractionMethod;
32037
33091
  try {
@@ -32041,7 +33095,7 @@ var structured_validate = tool({
32041
33095
  if (error45 instanceof JsonExtractionError) {
32042
33096
  result.errors = [
32043
33097
  `JSON extraction failed after trying: ${error45.attemptedStrategies.join(", ")}`,
32044
- `Input preview: ${args.response.slice(0, 100)}...`
33098
+ `Input preview: ${args.response.slice(0, RAW_INPUT_PREVIEW_LENGTH)}...`
32045
33099
  ];
32046
33100
  return JSON.stringify(result, null, 2);
32047
33101
  }
@@ -32084,7 +33138,10 @@ var structured_parse_evaluation = tool({
32084
33138
  summary: {
32085
33139
  passed: validated.passed,
32086
33140
  criteria_count: Object.keys(validated.criteria).length,
32087
- failed_criteria: Object.entries(validated.criteria).filter(([_, v]) => !v.passed).map(([k]) => k)
33141
+ failed_criteria: Object.entries(validated.criteria).filter(([_, v]) => {
33142
+ const criterion = v;
33143
+ return !criterion.passed;
33144
+ }).map(([k]) => k)
32088
33145
  }
32089
33146
  }, null, 2);
32090
33147
  } catch (error45) {
@@ -32269,350 +33326,137 @@ var structuredTools = {
32269
33326
  };
32270
33327
 
32271
33328
  // src/swarm.ts
33329
+ init_swarm_strategies();
33330
+
33331
+ // src/swarm-decompose.ts
32272
33332
  init_dist();
32273
33333
  init_zod();
32274
- init_swarm_mail();
33334
+ init_swarm_strategies();
33335
+ var DECOMPOSITION_PROMPT = `You are decomposing a task into parallelizable subtasks for a swarm of agents.
32275
33336
 
32276
- // src/learning.ts
32277
- init_zod();
32278
- var FeedbackTypeSchema = exports_external.enum(["helpful", "harmful", "neutral"]);
32279
- var FeedbackEventSchema = exports_external.object({
32280
- id: exports_external.string(),
32281
- criterion: exports_external.string(),
32282
- type: FeedbackTypeSchema,
32283
- timestamp: exports_external.string(),
32284
- context: exports_external.string().optional(),
32285
- bead_id: exports_external.string().optional(),
32286
- raw_value: exports_external.number().min(0).max(1).default(1)
32287
- });
32288
- var CriterionWeightSchema = exports_external.object({
32289
- criterion: exports_external.string(),
32290
- weight: exports_external.number().min(0).max(1),
32291
- helpful_count: exports_external.number().int().min(0),
32292
- harmful_count: exports_external.number().int().min(0),
32293
- last_validated: exports_external.string().optional(),
32294
- half_life_days: exports_external.number().positive().default(90)
32295
- });
32296
- var ErrorTypeSchema = exports_external.enum([
32297
- "validation",
32298
- "timeout",
32299
- "conflict",
32300
- "tool_failure",
32301
- "unknown"
32302
- ]);
32303
- var ErrorEntrySchema = exports_external.object({
32304
- id: exports_external.string(),
32305
- bead_id: exports_external.string(),
32306
- error_type: ErrorTypeSchema,
32307
- message: exports_external.string(),
32308
- stack_trace: exports_external.string().optional(),
32309
- tool_name: exports_external.string().optional(),
32310
- timestamp: exports_external.string(),
32311
- resolved: exports_external.boolean().default(false),
32312
- context: exports_external.string().optional()
32313
- });
32314
- var DecompositionStrategySchema = exports_external.enum([
32315
- "file-based",
32316
- "feature-based",
32317
- "risk-based",
32318
- "research-based"
32319
- ]);
32320
- var FailureModeSchema2 = exports_external.enum([
32321
- "timeout",
32322
- "conflict",
32323
- "validation",
32324
- "tool_failure",
32325
- "context_overflow",
32326
- "dependency_blocked",
32327
- "user_cancelled",
32328
- "unknown"
32329
- ]);
32330
- var OutcomeSignalsSchema = exports_external.object({
32331
- bead_id: exports_external.string(),
32332
- duration_ms: exports_external.number().int().min(0),
32333
- error_count: exports_external.number().int().min(0),
32334
- retry_count: exports_external.number().int().min(0),
32335
- success: exports_external.boolean(),
32336
- files_touched: exports_external.array(exports_external.string()).default([]),
32337
- timestamp: exports_external.string(),
32338
- strategy: DecompositionStrategySchema.optional(),
32339
- failure_mode: FailureModeSchema2.optional(),
32340
- failure_details: exports_external.string().optional()
32341
- });
32342
- var ScoredOutcomeSchema = exports_external.object({
32343
- signals: OutcomeSignalsSchema,
32344
- type: FeedbackTypeSchema,
32345
- decayed_value: exports_external.number().min(0).max(1),
32346
- reasoning: exports_external.string()
32347
- });
32348
- var DEFAULT_LEARNING_CONFIG = {
32349
- halfLifeDays: 90,
32350
- minFeedbackForAdjustment: 3,
32351
- maxHarmfulRatio: 0.3,
32352
- fastCompletionThresholdMs: 5 * 60 * 1000,
32353
- slowCompletionThresholdMs: 30 * 60 * 1000,
32354
- maxErrorsForHelpful: 2
32355
- };
32356
- function calculateDecayedValue(timestamp, now = new Date, halfLifeDays = 90) {
32357
- const safeHalfLife = halfLifeDays <= 0 ? 1 : halfLifeDays;
32358
- const eventTime = new Date(timestamp).getTime();
32359
- const nowTime = now.getTime();
32360
- const ageDays = Math.max(0, (nowTime - eventTime) / (24 * 60 * 60 * 1000));
32361
- return Math.pow(0.5, ageDays / safeHalfLife);
32362
- }
32363
- function scoreImplicitFeedback(signals, config2 = DEFAULT_LEARNING_CONFIG) {
32364
- const now = new Date;
32365
- const decayed = calculateDecayedValue(signals.timestamp, now, config2.halfLifeDays);
32366
- const durationScore = signals.duration_ms < config2.fastCompletionThresholdMs ? 1 : signals.duration_ms > config2.slowCompletionThresholdMs ? 0.2 : 0.6;
32367
- const errorScore = signals.error_count === 0 ? 1 : signals.error_count <= config2.maxErrorsForHelpful ? 0.6 : 0.2;
32368
- const retryScore = signals.retry_count === 0 ? 1 : signals.retry_count === 1 ? 0.7 : 0.3;
32369
- const successScore = signals.success ? 1 : 0;
32370
- const rawScore = successScore * 0.4 + durationScore * 0.2 + errorScore * 0.2 + retryScore * 0.2;
32371
- let type;
32372
- let reasoning;
32373
- if (rawScore >= 0.7) {
32374
- type = "helpful";
32375
- reasoning = `Fast completion (${Math.round(signals.duration_ms / 1000)}s), ` + `${signals.error_count} errors, ${signals.retry_count} retries, ` + `${signals.success ? "succeeded" : "failed"}`;
32376
- } else if (rawScore <= 0.4) {
32377
- type = "harmful";
32378
- reasoning = `Slow completion (${Math.round(signals.duration_ms / 1000)}s), ` + `${signals.error_count} errors, ${signals.retry_count} retries, ` + `${signals.success ? "succeeded" : "failed"}`;
32379
- } else {
32380
- type = "neutral";
32381
- reasoning = `Mixed signals: ${Math.round(signals.duration_ms / 1000)}s, ` + `${signals.error_count} errors, ${signals.retry_count} retries`;
32382
- }
32383
- return {
32384
- signals,
32385
- type,
32386
- decayed_value: rawScore * decayed,
32387
- reasoning
32388
- };
32389
- }
32390
- function outcomeToFeedback(outcome, criterion) {
32391
- return {
32392
- id: `${outcome.signals.bead_id}-${criterion}-${Date.now()}`,
32393
- criterion,
32394
- type: outcome.type,
32395
- timestamp: outcome.signals.timestamp,
32396
- context: outcome.reasoning,
32397
- bead_id: outcome.signals.bead_id,
32398
- raw_value: outcome.decayed_value
32399
- };
32400
- }
32401
- var StrikeRecordSchema = exports_external.object({
32402
- bead_id: exports_external.string(),
32403
- strike_count: exports_external.number().int().min(0).max(3),
32404
- failures: exports_external.array(exports_external.object({
32405
- attempt: exports_external.string(),
32406
- reason: exports_external.string(),
32407
- timestamp: exports_external.string()
32408
- })),
32409
- first_strike_at: exports_external.string().optional(),
32410
- last_strike_at: exports_external.string().optional()
32411
- });
33337
+ ## Task
33338
+ {task}
32412
33339
 
32413
- class InMemoryStrikeStorage {
32414
- strikes = new Map;
32415
- async store(record2) {
32416
- this.strikes.set(record2.bead_id, record2);
32417
- }
32418
- async get(beadId) {
32419
- return this.strikes.get(beadId) ?? null;
32420
- }
32421
- async getAll() {
32422
- return Array.from(this.strikes.values());
32423
- }
32424
- async clear(beadId) {
32425
- this.strikes.delete(beadId);
32426
- }
32427
- }
32428
- async function addStrike(beadId, attempt, reason, storage = new InMemoryStrikeStorage) {
32429
- const existing = await storage.get(beadId);
32430
- const now = new Date().toISOString();
32431
- const record2 = existing ?? {
32432
- bead_id: beadId,
32433
- strike_count: 0,
32434
- failures: []
32435
- };
32436
- record2.strike_count = Math.min(3, record2.strike_count + 1);
32437
- record2.failures.push({ attempt, reason, timestamp: now });
32438
- record2.last_strike_at = now;
32439
- if (!record2.first_strike_at) {
32440
- record2.first_strike_at = now;
32441
- }
32442
- await storage.store(record2);
32443
- return record2;
32444
- }
32445
- async function getStrikes(beadId, storage = new InMemoryStrikeStorage) {
32446
- const record2 = await storage.get(beadId);
32447
- return record2?.strike_count ?? 0;
32448
- }
32449
- async function isStrikedOut(beadId, storage = new InMemoryStrikeStorage) {
32450
- const count = await getStrikes(beadId, storage);
32451
- return count >= 3;
33340
+ {context_section}
33341
+
33342
+ ## MANDATORY: Beads Issue Tracking
33343
+
33344
+ **Every subtask MUST become a bead.** This is non-negotiable.
33345
+
33346
+ After decomposition, the coordinator will:
33347
+ 1. Create an epic bead for the overall task
33348
+ 2. Create child beads for each subtask
33349
+ 3. Track progress through bead status updates
33350
+ 4. Close beads with summaries when complete
33351
+
33352
+ Agents MUST update their bead status as they work. No silent progress.
33353
+
33354
+ ## Requirements
33355
+
33356
+ 1. **Break into 2-{max_subtasks} independent subtasks** that can run in parallel
33357
+ 2. **Assign files** - each subtask must specify which files it will modify
33358
+ 3. **No file overlap** - files cannot appear in multiple subtasks (they get exclusive locks)
33359
+ 4. **Order by dependency** - if subtask B needs subtask A's output, A must come first in the array
33360
+ 5. **Estimate complexity** - 1 (trivial) to 5 (complex)
33361
+ 6. **Plan aggressively** - break down more than you think necessary, smaller is better
33362
+
33363
+ ## Response Format
33364
+
33365
+ Respond with a JSON object matching this schema:
33366
+
33367
+ \`\`\`typescript
33368
+ {
33369
+ epic: {
33370
+ title: string, // Epic title for the beads tracker
33371
+ description?: string // Brief description of the overall goal
33372
+ },
33373
+ subtasks: [
33374
+ {
33375
+ title: string, // What this subtask accomplishes
33376
+ description?: string, // Detailed instructions for the agent
33377
+ files: string[], // Files this subtask will modify (globs allowed)
33378
+ dependencies: number[], // Indices of subtasks this depends on (0-indexed)
33379
+ estimated_complexity: 1-5 // Effort estimate
33380
+ },
33381
+ // ... more subtasks
33382
+ ]
32452
33383
  }
32453
- async function getArchitecturePrompt(beadId, storage = new InMemoryStrikeStorage) {
32454
- const record2 = await storage.get(beadId);
32455
- if (!record2 || record2.strike_count < 3) {
32456
- return "";
32457
- }
32458
- const failuresList = record2.failures.map((f, i) => `${i + 1}. **${f.attempt}** - Failed: ${f.reason}`).join(`
32459
- `);
32460
- return `## Architecture Review Required
33384
+ \`\`\`
32461
33385
 
32462
- This bead (\`${beadId}\`) has failed 3 consecutive fix attempts:
33386
+ ## Guidelines
32463
33387
 
32464
- ${failuresList}
33388
+ - **Plan aggressively** - when in doubt, split further. 3 small tasks > 1 medium task
33389
+ - **Prefer smaller, focused subtasks** over large complex ones
33390
+ - **Include test files** in the same subtask as the code they test
33391
+ - **Consider shared types** - if multiple files share types, handle that first
33392
+ - **Think about imports** - changes to exported APIs affect downstream files
33393
+ - **Explicit > implicit** - spell out what each subtask should do, don't assume
32465
33394
 
32466
- This pattern suggests an **architectural problem**, not a bug.
33395
+ ## File Assignment Examples
32467
33396
 
32468
- **Questions to consider:**
32469
- - Is the current approach fundamentally sound?
32470
- - Should we refactor the architecture instead?
32471
- - Are we fixing symptoms instead of root cause?
33397
+ - Schema change: \`["src/schemas/user.ts", "src/schemas/index.ts"]\`
33398
+ - Component + test: \`["src/components/Button.tsx", "src/components/Button.test.tsx"]\`
33399
+ - API route: \`["src/app/api/users/route.ts"]\`
32472
33400
 
32473
- **Options:**
32474
- 1. **Refactor architecture** (describe new approach)
32475
- 2. **Continue with Fix #4** (explain why this time is different)
32476
- 3. **Abandon this approach entirely**
33401
+ Now decompose the task:`;
33402
+ var STRATEGY_DECOMPOSITION_PROMPT = `You are decomposing a task into parallelizable subtasks for a swarm of agents.
33403
+
33404
+ ## Task
33405
+ {task}
33406
+
33407
+ {strategy_guidelines}
33408
+
33409
+ {context_section}
33410
+
33411
+ {cass_history}
33412
+
33413
+ {skills_context}
33414
+
33415
+ ## MANDATORY: Beads Issue Tracking
33416
+
33417
+ **Every subtask MUST become a bead.** This is non-negotiable.
33418
+
33419
+ After decomposition, the coordinator will:
33420
+ 1. Create an epic bead for the overall task
33421
+ 2. Create child beads for each subtask
33422
+ 3. Track progress through bead status updates
33423
+ 4. Close beads with summaries when complete
33424
+
33425
+ Agents MUST update their bead status as they work. No silent progress.
33426
+
33427
+ ## Requirements
33428
+
33429
+ 1. **Break into 2-{max_subtasks} independent subtasks** that can run in parallel
33430
+ 2. **Assign files** - each subtask must specify which files it will modify
33431
+ 3. **No file overlap** - files cannot appear in multiple subtasks (they get exclusive locks)
33432
+ 4. **Order by dependency** - if subtask B needs subtask A's output, A must come first in the array
33433
+ 5. **Estimate complexity** - 1 (trivial) to 5 (complex)
33434
+ 6. **Plan aggressively** - break down more than you think necessary, smaller is better
32477
33435
 
32478
- **DO NOT attempt Fix #4 without answering these questions.**
32479
- `;
32480
- }
32481
- async function clearStrikes(beadId, storage = new InMemoryStrikeStorage) {
32482
- await storage.clear(beadId);
32483
- }
33436
+ ## Response Format
32484
33437
 
32485
- class InMemoryErrorStorage {
32486
- errors = [];
32487
- async store(entry) {
32488
- this.errors.push(entry);
32489
- }
32490
- async getByBead(beadId) {
32491
- return this.errors.filter((e) => e.bead_id === beadId);
32492
- }
32493
- async getUnresolvedByBead(beadId) {
32494
- return this.errors.filter((e) => e.bead_id === beadId && !e.resolved);
32495
- }
32496
- async markResolved(id) {
32497
- const error45 = this.errors.find((e) => e.id === id);
32498
- if (error45) {
32499
- error45.resolved = true;
32500
- }
32501
- }
32502
- async getAll() {
32503
- return [...this.errors];
32504
- }
32505
- }
33438
+ Respond with a JSON object matching this schema:
32506
33439
 
32507
- class ErrorAccumulator {
32508
- storage;
32509
- constructor(storage) {
32510
- this.storage = storage ?? new InMemoryErrorStorage;
32511
- }
32512
- async recordError(beadId, errorType, message, options2) {
32513
- const entry = {
32514
- id: `${beadId}-${errorType}-${Date.now()}`,
32515
- bead_id: beadId,
32516
- error_type: errorType,
32517
- message,
32518
- stack_trace: options2?.stack_trace,
32519
- tool_name: options2?.tool_name,
32520
- timestamp: new Date().toISOString(),
32521
- resolved: false,
32522
- context: options2?.context
32523
- };
32524
- const validated = ErrorEntrySchema.parse(entry);
32525
- await this.storage.store(validated);
32526
- return validated;
32527
- }
32528
- async getErrors(beadId) {
32529
- return this.storage.getByBead(beadId);
32530
- }
32531
- async getUnresolvedErrors(beadId) {
32532
- return this.storage.getUnresolvedByBead(beadId);
32533
- }
32534
- async resolveError(errorId) {
32535
- await this.storage.markResolved(errorId);
32536
- }
32537
- async getErrorContext(beadId, includeResolved = false) {
32538
- const errors3 = includeResolved ? await this.getErrors(beadId) : await this.getUnresolvedErrors(beadId);
32539
- if (errors3.length === 0) {
32540
- return "";
32541
- }
32542
- const byType = errors3.reduce((acc, err) => {
32543
- const type = err.error_type;
32544
- if (!acc[type]) {
32545
- acc[type] = [];
32546
- }
32547
- acc[type].push(err);
32548
- return acc;
32549
- }, {});
32550
- const lines = [
32551
- "## Previous Errors",
32552
- "",
32553
- "The following errors were encountered during execution:",
32554
- ""
32555
- ];
32556
- for (const [type, typeErrors] of Object.entries(byType)) {
32557
- lines.push(`### ${type} (${typeErrors.length} error${typeErrors.length > 1 ? "s" : ""})`);
32558
- lines.push("");
32559
- for (const err of typeErrors) {
32560
- lines.push(`- **${err.message}**`);
32561
- if (err.context) {
32562
- lines.push(` - Context: ${err.context}`);
32563
- }
32564
- if (err.tool_name) {
32565
- lines.push(` - Tool: ${err.tool_name}`);
32566
- }
32567
- if (err.stack_trace) {
32568
- lines.push(` - Stack: \`${err.stack_trace.slice(0, 100)}...\``);
32569
- }
32570
- lines.push(` - Time: ${new Date(err.timestamp).toLocaleString()}${err.resolved ? " (resolved)" : ""}`);
32571
- lines.push("");
32572
- }
32573
- }
32574
- lines.push("**Action Required**: Address these errors before proceeding. Consider:");
32575
- lines.push("- What caused each error?");
32576
- lines.push("- How can you prevent similar errors?");
32577
- lines.push("- Are there patterns across error types?");
32578
- lines.push("");
32579
- return lines.join(`
32580
- `);
32581
- }
32582
- async getErrorStats(beadId) {
32583
- const allErrors = await this.getErrors(beadId);
32584
- const unresolved = await this.getUnresolvedErrors(beadId);
32585
- const byType = allErrors.reduce((acc, err) => {
32586
- acc[err.error_type] = (acc[err.error_type] || 0) + 1;
32587
- return acc;
32588
- }, {});
32589
- return {
32590
- total: allErrors.length,
32591
- unresolved: unresolved.length,
32592
- by_type: byType
32593
- };
32594
- }
33440
+ \`\`\`typescript
33441
+ {
33442
+ epic: {
33443
+ title: string, // Epic title for the beads tracker
33444
+ description?: string // Brief description of the overall goal
33445
+ },
33446
+ subtasks: [
33447
+ {
33448
+ title: string, // What this subtask accomplishes
33449
+ description?: string, // Detailed instructions for the agent
33450
+ files: string[], // Files this subtask will modify (globs allowed)
33451
+ dependencies: number[], // Indices of subtasks this depends on (0-indexed)
33452
+ estimated_complexity: 1-5 // Effort estimate
33453
+ },
33454
+ // ... more subtasks
33455
+ ]
32595
33456
  }
33457
+ \`\`\`
32596
33458
 
32597
- // src/swarm.ts
32598
- init_skills();
32599
- var POSITIVE_MARKERS = [
32600
- "always",
32601
- "must",
32602
- "required",
32603
- "ensure",
32604
- "use",
32605
- "prefer"
32606
- ];
32607
- var NEGATIVE_MARKERS = [
32608
- "never",
32609
- "dont",
32610
- "don't",
32611
- "avoid",
32612
- "forbid",
32613
- "no ",
32614
- "not "
32615
- ];
33459
+ Now decompose the task:`;
32616
33460
  function extractDirectives(text) {
32617
33461
  const sentences = text.split(/[.!?\n]+/).map((s) => s.trim().toLowerCase());
32618
33462
  const positive = [];
@@ -32659,254 +33503,372 @@ function detectInstructionConflicts(subtasks) {
32659
33503
  description: `Subtask ${i} says "${posA}" but subtask ${j} says "${negB}"`
32660
33504
  });
32661
33505
  }
32662
- }
33506
+ }
33507
+ }
33508
+ for (const posB of b.positive) {
33509
+ for (const negA of a.negative) {
33510
+ if (directivesConflict(posB, negA)) {
33511
+ conflicts.push({
33512
+ subtask_a: j,
33513
+ subtask_b: i,
33514
+ directive_a: posB,
33515
+ directive_b: negA,
33516
+ conflict_type: "positive_negative",
33517
+ description: `Subtask ${j} says "${posB}" but subtask ${i} says "${negA}"`
33518
+ });
33519
+ }
33520
+ }
33521
+ }
33522
+ }
33523
+ }
33524
+ return conflicts;
33525
+ }
33526
+ function detectFileConflicts(subtasks) {
33527
+ const allFiles = new Map;
33528
+ const conflicts = [];
33529
+ for (const subtask of subtasks) {
33530
+ for (const file2 of subtask.files) {
33531
+ const count = allFiles.get(file2) || 0;
33532
+ allFiles.set(file2, count + 1);
33533
+ if (count === 1) {
33534
+ conflicts.push(file2);
33535
+ }
33536
+ }
33537
+ }
33538
+ return conflicts;
33539
+ }
33540
+ async function queryCassHistory(task, limit = 3) {
33541
+ try {
33542
+ const result = await Bun.$`cass search ${task} --limit ${limit} --json`.quiet().nothrow();
33543
+ if (result.exitCode !== 0) {
33544
+ const error45 = result.stderr.toString();
33545
+ console.warn(`[swarm] CASS search failed (exit ${result.exitCode}):`, error45);
33546
+ return { status: "failed", error: error45 };
33547
+ }
33548
+ const output = result.stdout.toString();
33549
+ if (!output.trim()) {
33550
+ return { status: "empty", query: task };
33551
+ }
33552
+ try {
33553
+ const parsed = JSON.parse(output);
33554
+ const searchResult = {
33555
+ query: task,
33556
+ results: Array.isArray(parsed) ? parsed : parsed.results || []
33557
+ };
33558
+ if (searchResult.results.length === 0) {
33559
+ return { status: "empty", query: task };
33560
+ }
33561
+ return { status: "success", data: searchResult };
33562
+ } catch (error45) {
33563
+ console.warn(`[swarm] Failed to parse CASS output:`, error45);
33564
+ return { status: "failed", error: String(error45) };
33565
+ }
33566
+ } catch (error45) {
33567
+ console.error(`[swarm] CASS query error:`, error45);
33568
+ return { status: "unavailable" };
33569
+ }
33570
+ }
33571
+ function formatCassHistoryForPrompt(history) {
33572
+ if (history.results.length === 0) {
33573
+ return "";
33574
+ }
33575
+ const lines = [
33576
+ "## Similar Past Tasks",
33577
+ "",
33578
+ "These similar tasks were found in agent history:",
33579
+ "",
33580
+ ...history.results.slice(0, 3).map((r, i) => {
33581
+ const preview = r.preview.slice(0, 200).replace(/\n/g, " ");
33582
+ return `${i + 1}. [${r.agent}] ${preview}...`;
33583
+ }),
33584
+ "",
33585
+ "Consider patterns that worked in these past tasks.",
33586
+ ""
33587
+ ];
33588
+ return lines.join(`
33589
+ `);
33590
+ }
33591
+ var swarm_decompose = tool({
33592
+ description: "Generate decomposition prompt for breaking task into parallelizable subtasks. Optionally queries CASS for similar past tasks.",
33593
+ args: {
33594
+ task: tool.schema.string().min(1).describe("Task description to decompose"),
33595
+ max_subtasks: tool.schema.number().int().min(2).max(10).default(5).describe("Maximum number of subtasks (default: 5)"),
33596
+ context: tool.schema.string().optional().describe("Additional context (codebase info, constraints, etc.)"),
33597
+ query_cass: tool.schema.boolean().optional().describe("Query CASS for similar past tasks (default: true)"),
33598
+ cass_limit: tool.schema.number().int().min(1).max(10).optional().describe("Max CASS results to include (default: 3)")
33599
+ },
33600
+ async execute(args) {
33601
+ const { formatMemoryQueryForDecomposition: formatMemoryQueryForDecomposition2 } = await Promise.resolve().then(() => (init_learning(), exports_learning));
33602
+ let cassContext = "";
33603
+ let cassResultInfo;
33604
+ if (args.query_cass !== false) {
33605
+ const cassResult = await queryCassHistory(args.task, args.cass_limit ?? 3);
33606
+ if (cassResult.status === "success") {
33607
+ cassContext = formatCassHistoryForPrompt(cassResult.data);
33608
+ cassResultInfo = {
33609
+ queried: true,
33610
+ results_found: cassResult.data.results.length,
33611
+ included_in_context: true
33612
+ };
33613
+ } else {
33614
+ cassResultInfo = {
33615
+ queried: true,
33616
+ results_found: 0,
33617
+ included_in_context: false,
33618
+ reason: cassResult.status
33619
+ };
33620
+ }
33621
+ } else {
33622
+ cassResultInfo = { queried: false, reason: "disabled" };
33623
+ }
33624
+ const fullContext = [args.context, cassContext].filter(Boolean).join(`
33625
+
33626
+ `);
33627
+ const contextSection = fullContext ? `## Additional Context
33628
+ ${fullContext}` : `## Additional Context
33629
+ (none provided)`;
33630
+ const prompt = DECOMPOSITION_PROMPT.replace("{task}", args.task).replace("{max_subtasks}", (args.max_subtasks ?? 5).toString()).replace("{context_section}", contextSection);
33631
+ return JSON.stringify({
33632
+ prompt,
33633
+ expected_schema: "BeadTree",
33634
+ schema_hint: {
33635
+ epic: { title: "string", description: "string?" },
33636
+ subtasks: [
33637
+ {
33638
+ title: "string",
33639
+ description: "string?",
33640
+ files: "string[]",
33641
+ dependencies: "number[]",
33642
+ estimated_complexity: "1-5"
33643
+ }
33644
+ ]
33645
+ },
33646
+ validation_note: "Parse agent response as JSON and validate with BeadTreeSchema from schemas/bead.ts",
33647
+ cass_history: cassResultInfo,
33648
+ memory_query: formatMemoryQueryForDecomposition2(args.task, 3)
33649
+ }, null, 2);
33650
+ }
33651
+ });
33652
+ var swarm_validate_decomposition = tool({
33653
+ description: "Validate a decomposition response against BeadTreeSchema",
33654
+ args: {
33655
+ response: tool.schema.string().describe("JSON response from agent (BeadTree format)")
33656
+ },
33657
+ async execute(args) {
33658
+ try {
33659
+ const parsed = JSON.parse(args.response);
33660
+ const validated = BeadTreeSchema.parse(parsed);
33661
+ const conflicts = detectFileConflicts(validated.subtasks);
33662
+ if (conflicts.length > 0) {
33663
+ return JSON.stringify({
33664
+ valid: false,
33665
+ error: `File conflicts detected: ${conflicts.join(", ")}`,
33666
+ hint: "Each file can only be assigned to one subtask"
33667
+ }, null, 2);
32663
33668
  }
32664
- for (const posB of b.positive) {
32665
- for (const negA of a.negative) {
32666
- if (directivesConflict(posB, negA)) {
32667
- conflicts.push({
32668
- subtask_a: j,
32669
- subtask_b: i,
32670
- directive_a: posB,
32671
- directive_b: negA,
32672
- conflict_type: "positive_negative",
32673
- description: `Subtask ${j} says "${posB}" but subtask ${i} says "${negA}"`
32674
- });
33669
+ for (let i = 0;i < validated.subtasks.length; i++) {
33670
+ const deps = validated.subtasks[i].dependencies;
33671
+ for (const dep of deps) {
33672
+ if (dep < 0 || dep >= validated.subtasks.length) {
33673
+ return JSON.stringify({
33674
+ valid: false,
33675
+ error: `Invalid dependency: subtask ${i} depends on ${dep}, but only ${validated.subtasks.length} subtasks exist (indices 0-${validated.subtasks.length - 1})`,
33676
+ hint: "Dependency index is out of bounds"
33677
+ }, null, 2);
33678
+ }
33679
+ if (dep >= i) {
33680
+ return JSON.stringify({
33681
+ valid: false,
33682
+ error: `Invalid dependency: subtask ${i} depends on ${dep}, but dependencies must be earlier in the array`,
33683
+ hint: "Reorder subtasks so dependencies come before dependents"
33684
+ }, null, 2);
32675
33685
  }
32676
33686
  }
32677
33687
  }
33688
+ const instructionConflicts = detectInstructionConflicts(validated.subtasks);
33689
+ return JSON.stringify({
33690
+ valid: true,
33691
+ bead_tree: validated,
33692
+ stats: {
33693
+ subtask_count: validated.subtasks.length,
33694
+ total_files: new Set(validated.subtasks.flatMap((s) => s.files)).size,
33695
+ total_complexity: validated.subtasks.reduce((sum, s) => sum + s.estimated_complexity, 0)
33696
+ },
33697
+ warnings: instructionConflicts.length > 0 ? {
33698
+ instruction_conflicts: instructionConflicts,
33699
+ hint: "Review these potential conflicts between subtask instructions"
33700
+ } : undefined
33701
+ }, null, 2);
33702
+ } catch (error45) {
33703
+ if (error45 instanceof exports_external.ZodError) {
33704
+ return JSON.stringify({
33705
+ valid: false,
33706
+ error: "Schema validation failed",
33707
+ details: error45.issues
33708
+ }, null, 2);
33709
+ }
33710
+ if (error45 instanceof SyntaxError) {
33711
+ return JSON.stringify({
33712
+ valid: false,
33713
+ error: "Invalid JSON",
33714
+ details: error45.message
33715
+ }, null, 2);
33716
+ }
33717
+ throw error45;
32678
33718
  }
32679
33719
  }
32680
- return conflicts;
32681
- }
32682
- var STRATEGIES = {
32683
- "file-based": {
32684
- name: "file-based",
32685
- description: "Group by file type or directory. Best for refactoring, migrations, and pattern changes across codebase.",
32686
- keywords: [
32687
- "refactor",
32688
- "migrate",
32689
- "update all",
32690
- "rename",
32691
- "replace",
32692
- "convert",
32693
- "upgrade",
32694
- "deprecate",
32695
- "remove",
32696
- "cleanup",
32697
- "lint",
32698
- "format"
32699
- ],
32700
- guidelines: [
32701
- "Group files by directory or type (e.g., all components, all tests)",
32702
- "Minimize cross-directory dependencies within a subtask",
32703
- "Handle shared types/utilities first if they change",
32704
- "Each subtask should be a complete transformation of its file set",
32705
- "Consider import/export relationships when grouping"
32706
- ],
32707
- antiPatterns: [
32708
- "Don't split tightly coupled files across subtasks",
32709
- "Don't group files that have no relationship",
32710
- "Don't forget to update imports when moving/renaming"
32711
- ],
32712
- examples: [
32713
- "Migrate all components to new API → split by component directory",
32714
- "Rename userId to accountId → split by module (types first, then consumers)",
32715
- "Update all tests to use new matcher → split by test directory"
32716
- ]
32717
- },
32718
- "feature-based": {
32719
- name: "feature-based",
32720
- description: "Vertical slices with UI + API + data. Best for new features and adding functionality.",
32721
- keywords: [
32722
- "add",
32723
- "implement",
32724
- "build",
32725
- "create",
32726
- "feature",
32727
- "new",
32728
- "integrate",
32729
- "connect",
32730
- "enable",
32731
- "support"
32732
- ],
32733
- guidelines: [
32734
- "Each subtask is a complete vertical slice (UI + logic + data)",
32735
- "Start with data layer/types, then logic, then UI",
32736
- "Keep related components together (form + validation + submission)",
32737
- "Separate concerns that can be developed independently",
32738
- "Consider user-facing features as natural boundaries"
32739
- ],
32740
- antiPatterns: [
32741
- "Don't split a single feature across multiple subtasks",
32742
- "Don't create subtasks that can't be tested independently",
32743
- "Don't forget integration points between features"
32744
- ],
32745
- examples: [
32746
- "Add user auth → [OAuth setup, Session management, Protected routes]",
32747
- "Build dashboard → [Data fetching, Chart components, Layout/navigation]",
32748
- "Add search → [Search API, Search UI, Results display]"
32749
- ]
32750
- },
32751
- "risk-based": {
32752
- name: "risk-based",
32753
- description: "Isolate high-risk changes, add tests first. Best for bug fixes, security issues, and critical changes.",
32754
- keywords: [
32755
- "fix",
32756
- "bug",
32757
- "security",
32758
- "vulnerability",
32759
- "critical",
32760
- "urgent",
32761
- "hotfix",
32762
- "patch",
32763
- "audit",
32764
- "review"
32765
- ],
32766
- guidelines: [
32767
- "Write tests FIRST to capture expected behavior",
32768
- "Isolate the risky change to minimize blast radius",
32769
- "Add monitoring/logging around the change",
32770
- "Create rollback plan as part of the task",
32771
- "Audit similar code for the same issue"
32772
- ],
32773
- antiPatterns: [
32774
- "Don't make multiple risky changes in one subtask",
32775
- "Don't skip tests for 'simple' fixes",
32776
- "Don't forget to check for similar issues elsewhere"
32777
- ],
32778
- examples: [
32779
- "Fix auth bypass → [Add regression test, Fix vulnerability, Audit similar endpoints]",
32780
- "Fix race condition → [Add test reproducing issue, Implement fix, Add concurrency tests]",
32781
- "Security audit → [Scan for vulnerabilities, Fix critical issues, Document remaining risks]"
32782
- ]
33720
+ });
33721
+ var swarm_delegate_planning = tool({
33722
+ description: "Delegate task decomposition to a swarm/planner subagent. Returns a prompt to spawn the planner. Use this to keep coordinator context lean - all planning reasoning happens in the subagent.",
33723
+ args: {
33724
+ task: tool.schema.string().min(1).describe("The task to decompose"),
33725
+ context: tool.schema.string().optional().describe("Additional context to include"),
33726
+ max_subtasks: tool.schema.number().int().min(2).max(10).optional().default(5).describe("Maximum number of subtasks (default: 5)"),
33727
+ strategy: tool.schema.enum(["auto", "file-based", "feature-based", "risk-based"]).optional().default("auto").describe("Decomposition strategy (default: auto-detect)"),
33728
+ query_cass: tool.schema.boolean().optional().default(true).describe("Query CASS for similar past tasks (default: true)")
32783
33729
  },
32784
- "research-based": {
32785
- name: "research-based",
32786
- description: "Parallel search across multiple sources, then synthesize. Best for investigation, learning, and discovery tasks.",
32787
- keywords: [
32788
- "research",
32789
- "investigate",
32790
- "explore",
32791
- "find out",
32792
- "discover",
32793
- "understand",
32794
- "learn about",
32795
- "analyze",
32796
- "what is",
32797
- "what are",
32798
- "how does",
32799
- "how do",
32800
- "why does",
32801
- "why do",
32802
- "compare",
32803
- "evaluate",
32804
- "study",
32805
- "look up",
32806
- "look into",
32807
- "search for",
32808
- "dig into",
32809
- "figure out",
32810
- "debug options",
32811
- "debug levers",
32812
- "configuration options",
32813
- "environment variables",
32814
- "available options",
32815
- "documentation"
32816
- ],
32817
- guidelines: [
32818
- "Split by information source (PDFs, repos, history, web)",
32819
- "Each agent searches with different query angles",
32820
- "Include a synthesis subtask that depends on all search subtasks",
32821
- "Use pdf-brain for documentation/books if available",
32822
- "Use repo-crawl for GitHub repos if URL provided",
32823
- "Use cass for past agent session history",
32824
- "Assign NO files to research subtasks (read-only)"
32825
- ],
32826
- antiPatterns: [
32827
- "Don't have one agent search everything sequentially",
32828
- "Don't skip synthesis - raw search results need consolidation",
32829
- "Don't forget to check tool availability before assigning sources"
32830
- ],
32831
- examples: [
32832
- "Research auth patterns → [Search PDFs, Search repos, Search history, Synthesize]",
32833
- "Investigate error → [Search cass for similar errors, Search repo for error handling, Synthesize]",
32834
- "Learn about library → [Search docs, Search examples, Search issues, Synthesize findings]"
32835
- ]
32836
- }
32837
- };
32838
- function selectStrategy(task) {
32839
- const taskLower = task.toLowerCase();
32840
- const scores = {
32841
- "file-based": 0,
32842
- "feature-based": 0,
32843
- "risk-based": 0,
32844
- "research-based": 0
32845
- };
32846
- for (const [strategyName, definition] of Object.entries(STRATEGIES)) {
32847
- const name = strategyName;
32848
- for (const keyword of definition.keywords) {
32849
- if (keyword.includes(" ")) {
32850
- if (taskLower.includes(keyword)) {
32851
- scores[name] += 1;
32852
- }
33730
+ async execute(args) {
33731
+ const { selectStrategy: selectStrategy2, formatStrategyGuidelines: formatStrategyGuidelines2 } = await Promise.resolve().then(() => (init_swarm_strategies(), exports_swarm_strategies));
33732
+ const { formatMemoryQueryForDecomposition: formatMemoryQueryForDecomposition2 } = await Promise.resolve().then(() => (init_learning(), exports_learning));
33733
+ const { listSkills: listSkills2, getSkillsContextForSwarm: getSkillsContextForSwarm2, findRelevantSkills: findRelevantSkills2 } = await Promise.resolve().then(() => (init_skills(), exports_skills));
33734
+ let selectedStrategy;
33735
+ let strategyReasoning;
33736
+ if (args.strategy && args.strategy !== "auto") {
33737
+ selectedStrategy = args.strategy;
33738
+ strategyReasoning = `User-specified strategy: ${selectedStrategy}`;
33739
+ } else {
33740
+ const selection = selectStrategy2(args.task);
33741
+ selectedStrategy = selection.strategy;
33742
+ strategyReasoning = selection.reasoning;
33743
+ }
33744
+ let cassContext = "";
33745
+ let cassResultInfo;
33746
+ if (args.query_cass !== false) {
33747
+ const cassResult = await queryCassHistory(args.task, 3);
33748
+ if (cassResult.status === "success") {
33749
+ cassContext = formatCassHistoryForPrompt(cassResult.data);
33750
+ cassResultInfo = {
33751
+ queried: true,
33752
+ results_found: cassResult.data.results.length,
33753
+ included_in_context: true
33754
+ };
32853
33755
  } else {
32854
- const regex = new RegExp(`\\b${keyword}\\b`, "i");
32855
- if (regex.test(taskLower)) {
32856
- scores[name] += 1;
32857
- }
33756
+ cassResultInfo = {
33757
+ queried: true,
33758
+ results_found: 0,
33759
+ included_in_context: false,
33760
+ reason: cassResult.status
33761
+ };
32858
33762
  }
33763
+ } else {
33764
+ cassResultInfo = { queried: false, reason: "disabled" };
32859
33765
  }
32860
- }
32861
- const entries = Object.entries(scores);
32862
- entries.sort((a, b) => b[1] - a[1]);
32863
- const [winner, winnerScore] = entries[0];
32864
- const [runnerUp, runnerUpScore] = entries[1] || [null, 0];
32865
- const totalScore = entries.reduce((sum, [, score]) => sum + score, 0);
32866
- const confidence = totalScore > 0 ? Math.min(0.95, 0.5 + (winnerScore - runnerUpScore) / totalScore) : 0.5;
32867
- let reasoning;
32868
- if (winnerScore === 0) {
32869
- reasoning = `No strong keyword signals. Defaulting to feature-based as it's most versatile.`;
32870
- } else {
32871
- const matchedKeywords = STRATEGIES[winner].keywords.filter((k) => taskLower.includes(k));
32872
- reasoning = `Matched keywords: ${matchedKeywords.join(", ")}. ${STRATEGIES[winner].description}`;
32873
- }
32874
- const finalStrategy = winnerScore === 0 ? "feature-based" : winner;
32875
- return {
32876
- strategy: finalStrategy,
32877
- confidence,
32878
- reasoning,
32879
- alternatives: entries.filter(([s]) => s !== finalStrategy).map(([strategy, score]) => ({ strategy, score }))
32880
- };
32881
- }
32882
- function formatStrategyGuidelines(strategy) {
32883
- const def = STRATEGIES[strategy];
32884
- const guidelines = def.guidelines.map((g) => `- ${g}`).join(`
32885
- `);
32886
- const antiPatterns = def.antiPatterns.map((a) => `- ${a}`).join(`
32887
- `);
32888
- const examples = def.examples.map((e) => `- ${e}`).join(`
32889
- `);
32890
- return `## Strategy: ${strategy}
33766
+ let skillsContext = "";
33767
+ let skillsInfo = {
33768
+ included: false
33769
+ };
33770
+ const allSkills = await listSkills2();
33771
+ if (allSkills.length > 0) {
33772
+ skillsContext = await getSkillsContextForSwarm2();
33773
+ const relevantSkills = await findRelevantSkills2(args.task);
33774
+ skillsInfo = {
33775
+ included: true,
33776
+ count: allSkills.length,
33777
+ relevant: relevantSkills
33778
+ };
33779
+ if (relevantSkills.length > 0) {
33780
+ skillsContext += `
32891
33781
 
32892
- ${def.description}
33782
+ **Suggested skills for this task**: ${relevantSkills.join(", ")}`;
33783
+ }
33784
+ }
33785
+ const strategyGuidelines = formatStrategyGuidelines2(selectedStrategy);
33786
+ const contextSection = args.context ? `## Additional Context
33787
+ ${args.context}` : `## Additional Context
33788
+ (none provided)`;
33789
+ const planningPrompt = STRATEGY_DECOMPOSITION_PROMPT.replace("{task}", args.task).replace("{strategy_guidelines}", strategyGuidelines).replace("{context_section}", contextSection).replace("{cass_history}", cassContext || "").replace("{skills_context}", skillsContext || "").replace("{max_subtasks}", (args.max_subtasks ?? 5).toString());
33790
+ const subagentInstructions = `
33791
+ ## CRITICAL: Output Format
32893
33792
 
32894
- ### Guidelines
32895
- ${guidelines}
33793
+ You are a planner subagent. Your ONLY output must be valid JSON matching the BeadTree schema.
32896
33794
 
32897
- ### Anti-Patterns (Avoid These)
32898
- ${antiPatterns}
33795
+ DO NOT include:
33796
+ - Explanatory text before or after the JSON
33797
+ - Markdown code fences (\`\`\`json)
33798
+ - Commentary or reasoning
32899
33799
 
32900
- ### Examples
32901
- ${examples}`;
33800
+ OUTPUT ONLY the raw JSON object.
33801
+
33802
+ ## Example Output
33803
+
33804
+ {
33805
+ "epic": {
33806
+ "title": "Add user authentication",
33807
+ "description": "Implement OAuth-based authentication system"
33808
+ },
33809
+ "subtasks": [
33810
+ {
33811
+ "title": "Set up OAuth provider",
33812
+ "description": "Configure OAuth client credentials and redirect URLs",
33813
+ "files": ["src/auth/oauth.ts", "src/config/auth.ts"],
33814
+ "dependencies": [],
33815
+ "estimated_complexity": 2
33816
+ },
33817
+ {
33818
+ "title": "Create auth routes",
33819
+ "description": "Implement login, logout, and callback routes",
33820
+ "files": ["src/app/api/auth/[...nextauth]/route.ts"],
33821
+ "dependencies": [0],
33822
+ "estimated_complexity": 3
33823
+ }
33824
+ ]
32902
33825
  }
32903
- var DECOMPOSITION_PROMPT = `You are decomposing a task into parallelizable subtasks for a swarm of agents.
33826
+
33827
+ Now generate the BeadTree for the given task.`;
33828
+ const fullPrompt = `${planningPrompt}
33829
+
33830
+ ${subagentInstructions}`;
33831
+ return JSON.stringify({
33832
+ prompt: fullPrompt,
33833
+ subagent_type: "swarm/planner",
33834
+ description: "Task decomposition planning",
33835
+ strategy: {
33836
+ selected: selectedStrategy,
33837
+ reasoning: strategyReasoning
33838
+ },
33839
+ expected_output: "BeadTree JSON (raw JSON, no markdown)",
33840
+ next_steps: [
33841
+ "1. Spawn subagent with Task tool using returned prompt",
33842
+ "2. Parse subagent response as JSON",
33843
+ "3. Validate with swarm_validate_decomposition",
33844
+ "4. Create beads with beads_create_epic"
33845
+ ],
33846
+ cass_history: cassResultInfo,
33847
+ skills: skillsInfo,
33848
+ memory_query: formatMemoryQueryForDecomposition2(args.task, 3)
33849
+ }, null, 2);
33850
+ }
33851
+ });
33852
+ var decomposeTools = {
33853
+ swarm_decompose,
33854
+ swarm_validate_decomposition,
33855
+ swarm_delegate_planning
33856
+ };
33857
+ // src/swarm-prompts.ts
33858
+ init_dist();
33859
+ var STRATEGY_DECOMPOSITION_PROMPT2 = `You are decomposing a task into parallelizable subtasks for a swarm of agents.
32904
33860
 
32905
33861
  ## Task
32906
33862
  {task}
32907
33863
 
33864
+ {strategy_guidelines}
33865
+
32908
33866
  {context_section}
32909
33867
 
33868
+ {cass_history}
33869
+
33870
+ {skills_context}
33871
+
32910
33872
  ## MANDATORY: Beads Issue Tracking
32911
33873
 
32912
33874
  **Every subtask MUST become a bead.** This is non-negotiable.
@@ -32951,21 +33913,6 @@ Respond with a JSON object matching this schema:
32951
33913
  }
32952
33914
  \`\`\`
32953
33915
 
32954
- ## Guidelines
32955
-
32956
- - **Plan aggressively** - when in doubt, split further. 3 small tasks > 1 medium task
32957
- - **Prefer smaller, focused subtasks** over large complex ones
32958
- - **Include test files** in the same subtask as the code they test
32959
- - **Consider shared types** - if multiple files share types, handle that first
32960
- - **Think about imports** - changes to exported APIs affect downstream files
32961
- - **Explicit > implicit** - spell out what each subtask should do, don't assume
32962
-
32963
- ## File Assignment Examples
32964
-
32965
- - Schema change: \`["src/schemas/user.ts", "src/schemas/index.ts"]\`
32966
- - Component + test: \`["src/components/Button.tsx", "src/components/Button.test.tsx"]\`
32967
- - API route: \`["src/app/api/users/route.ts"]\`
32968
-
32969
33916
  Now decompose the task:`;
32970
33917
  var SUBTASK_PROMPT = `You are a swarm agent working on a subtask of a larger epic.
32971
33918
 
@@ -33148,13 +34095,6 @@ As you work, note reusable patterns, best practices, or domain insights:
33148
34095
  **CRITICAL: Never work silently. Send progress updates via swarmmail_send every significant milestone.**
33149
34096
 
33150
34097
  Begin now.`;
33151
- function formatSubtaskPromptV2(params) {
33152
- const fileList = params.files.length > 0 ? params.files.map((f) => `- \`${f}\``).join(`
33153
- `) : "(no specific files - use judgment)";
33154
- const compressedSection = params.compressed_context ? params.compressed_context : "";
33155
- const errorSection = params.error_context ? params.error_context : "";
33156
- return SUBTASK_PROMPT_V2.replace(/{bead_id}/g, params.bead_id).replace(/{epic_id}/g, params.epic_id).replace("{subtask_title}", params.subtask_title).replace("{subtask_description}", params.subtask_description || "(see title)").replace("{file_list}", fileList).replace("{shared_context}", params.shared_context || "(none)").replace("{compressed_context}", compressedSection).replace("{error_context}", errorSection);
33157
- }
33158
34098
  var EVALUATION_PROMPT = `Evaluate the work completed for this subtask.
33159
34099
 
33160
34100
  ## Subtask
@@ -33191,22 +34131,12 @@ For each criterion, assess passed/failed and provide brief feedback:
33191
34131
 
33192
34132
  If any criterion fails, the overall evaluation fails and retry_suggestion
33193
34133
  should describe what needs to be fixed.`;
33194
-
33195
- class SwarmError extends Error {
33196
- operation;
33197
- details;
33198
- constructor(message, operation, details) {
33199
- super(message);
33200
- this.operation = operation;
33201
- this.details = details;
33202
- this.name = "SwarmError";
33203
- }
33204
- }
33205
- function formatDecompositionPrompt(task, maxSubtasks, context) {
33206
- const contextSection = context ? `## Additional Context
33207
- ${context}` : `## Additional Context
33208
- (none provided)`;
33209
- return DECOMPOSITION_PROMPT.replace("{task}", task).replace("{max_subtasks}", maxSubtasks.toString()).replace("{context_section}", contextSection);
34134
+ function formatSubtaskPromptV2(params) {
34135
+ const fileList = params.files.length > 0 ? params.files.map((f) => `- \`${f}\``).join(`
34136
+ `) : "(no specific files - use judgment)";
34137
+ const compressedSection = params.compressed_context ? params.compressed_context : "";
34138
+ const errorSection = params.error_context ? params.error_context : "";
34139
+ return SUBTASK_PROMPT_V2.replace(/{bead_id}/g, params.bead_id).replace(/{epic_id}/g, params.epic_id).replace("{subtask_title}", params.subtask_title).replace("{subtask_description}", params.subtask_description || "(see title)").replace("{file_list}", fileList).replace("{shared_context}", params.shared_context || "(none)").replace("{compressed_context}", compressedSection).replace("{error_context}", errorSection);
33210
34140
  }
33211
34141
  function formatSubtaskPrompt(params) {
33212
34142
  const fileList = params.files.map((f) => `- \`${f}\``).join(`
@@ -33218,260 +34148,121 @@ function formatEvaluationPrompt(params) {
33218
34148
  `);
33219
34149
  return EVALUATION_PROMPT.replace("{bead_id}", params.bead_id).replace("{subtask_title}", params.subtask_title).replace("{files_touched}", filesList || "(no files recorded)");
33220
34150
  }
33221
- async function queryEpicSubtasks(epicId) {
33222
- const beadsAvailable = await isToolAvailable("beads");
33223
- if (!beadsAvailable) {
33224
- warnMissingTool("beads");
33225
- return [];
33226
- }
33227
- const result = await Bun.$`bd list --parent ${epicId} --json`.quiet().nothrow();
33228
- if (result.exitCode !== 0) {
33229
- console.error(`[swarm] ERROR: Failed to query subtasks for epic ${epicId}:`, result.stderr.toString());
33230
- return [];
33231
- }
33232
- try {
33233
- const parsed = JSON.parse(result.stdout.toString());
33234
- return exports_external.array(BeadSchema).parse(parsed);
33235
- } catch (error45) {
33236
- if (error45 instanceof exports_external.ZodError) {
33237
- console.error(`[swarm] ERROR: Invalid bead data for epic ${epicId}:`, error45.message);
33238
- return [];
33239
- }
33240
- console.error(`[swarm] ERROR: Failed to parse beads for epic ${epicId}:`, error45);
33241
- throw error45;
33242
- }
33243
- }
33244
- async function querySwarmMessages(projectKey, threadId) {
33245
- const agentMailAvailable2 = await isToolAvailable("agent-mail");
33246
- if (!agentMailAvailable2) {
33247
- return 0;
33248
- }
33249
- try {
33250
- const inbox = await getSwarmInbox({
33251
- projectPath: projectKey,
33252
- agentName: "coordinator",
33253
- limit: 5,
33254
- includeBodies: false
34151
+ var swarm_subtask_prompt = tool({
34152
+ description: "Generate the prompt for a spawned subtask agent",
34153
+ args: {
34154
+ agent_name: tool.schema.string().describe("Agent Mail name for the agent"),
34155
+ bead_id: tool.schema.string().describe("Subtask bead ID"),
34156
+ epic_id: tool.schema.string().describe("Epic bead ID"),
34157
+ subtask_title: tool.schema.string().describe("Subtask title"),
34158
+ subtask_description: tool.schema.string().optional().describe("Detailed subtask instructions"),
34159
+ files: tool.schema.array(tool.schema.string()).describe("Files assigned to this subtask"),
34160
+ shared_context: tool.schema.string().optional().describe("Context shared across all agents")
34161
+ },
34162
+ async execute(args) {
34163
+ const prompt = formatSubtaskPrompt({
34164
+ agent_name: args.agent_name,
34165
+ bead_id: args.bead_id,
34166
+ epic_id: args.epic_id,
34167
+ subtask_title: args.subtask_title,
34168
+ subtask_description: args.subtask_description || "",
34169
+ files: args.files,
34170
+ shared_context: args.shared_context
33255
34171
  });
33256
- const threadMessages = inbox.messages.filter((m) => m.thread_id === threadId);
33257
- return threadMessages.length;
33258
- } catch (error45) {
33259
- console.warn(`[swarm] Failed to query swarm messages for thread ${threadId}:`, error45);
33260
- return 0;
33261
- }
33262
- }
33263
- function formatProgressMessage(progress) {
33264
- const lines = [
33265
- `**Status**: ${progress.status}`,
33266
- progress.progress_percent !== undefined ? `**Progress**: ${progress.progress_percent}%` : null,
33267
- progress.message ? `**Message**: ${progress.message}` : null,
33268
- progress.files_touched && progress.files_touched.length > 0 ? `**Files touched**:
33269
- ${progress.files_touched.map((f) => `- \`${f}\``).join(`
33270
- `)}` : null,
33271
- progress.blockers && progress.blockers.length > 0 ? `**Blockers**:
33272
- ${progress.blockers.map((b) => `- ${b}`).join(`
33273
- `)}` : null
33274
- ];
33275
- return lines.filter(Boolean).join(`
33276
-
33277
- `);
33278
- }
33279
- async function queryCassHistory(task, limit = 3) {
33280
- const cassAvailable = await isToolAvailable("cass");
33281
- if (!cassAvailable) {
33282
- warnMissingTool("cass");
33283
- return { status: "unavailable" };
33284
- }
33285
- try {
33286
- const result = await Bun.$`cass search ${task} --limit ${limit} --json`.quiet().nothrow();
33287
- if (result.exitCode !== 0) {
33288
- const error45 = result.stderr.toString();
33289
- console.warn(`[swarm] CASS search failed (exit ${result.exitCode}):`, error45);
33290
- return { status: "failed", error: error45 };
33291
- }
33292
- const output = result.stdout.toString();
33293
- if (!output.trim()) {
33294
- return { status: "empty", query: task };
33295
- }
33296
- try {
33297
- const parsed = JSON.parse(output);
33298
- const searchResult = {
33299
- query: task,
33300
- results: Array.isArray(parsed) ? parsed : parsed.results || []
33301
- };
33302
- if (searchResult.results.length === 0) {
33303
- return { status: "empty", query: task };
33304
- }
33305
- return { status: "success", data: searchResult };
33306
- } catch (error45) {
33307
- console.warn(`[swarm] Failed to parse CASS output:`, error45);
33308
- return { status: "failed", error: String(error45) };
33309
- }
33310
- } catch (error45) {
33311
- console.error(`[swarm] CASS query error:`, error45);
33312
- return { status: "failed", error: String(error45) };
33313
- }
33314
- }
33315
- function formatCassHistoryForPrompt(history) {
33316
- if (history.results.length === 0) {
33317
- return "";
34172
+ return prompt;
33318
34173
  }
33319
- const lines = [
33320
- "## Similar Past Tasks",
33321
- "",
33322
- "These similar tasks were found in agent history:",
33323
- "",
33324
- ...history.results.slice(0, 3).map((r, i) => {
33325
- const preview = r.preview.slice(0, 200).replace(/\n/g, " ");
33326
- return `${i + 1}. [${r.agent}] ${preview}...`;
33327
- }),
33328
- "",
33329
- "Consider patterns that worked in these past tasks.",
33330
- ""
33331
- ];
33332
- return lines.join(`
33333
- `);
33334
- }
33335
- var swarm_select_strategy = tool({
33336
- description: "Analyze task and recommend decomposition strategy (file-based, feature-based, or risk-based)",
34174
+ });
34175
+ var swarm_spawn_subtask = tool({
34176
+ description: "Prepare a subtask for spawning. Returns prompt with Agent Mail/beads instructions.",
33337
34177
  args: {
33338
- task: tool.schema.string().min(1).describe("Task description to analyze"),
33339
- codebase_context: tool.schema.string().optional().describe("Optional codebase context (file structure, tech stack, etc.)")
34178
+ bead_id: tool.schema.string().describe("Subtask bead ID"),
34179
+ epic_id: tool.schema.string().describe("Parent epic bead ID"),
34180
+ subtask_title: tool.schema.string().describe("Subtask title"),
34181
+ subtask_description: tool.schema.string().optional().describe("Detailed subtask instructions"),
34182
+ files: tool.schema.array(tool.schema.string()).describe("Files assigned to this subtask"),
34183
+ shared_context: tool.schema.string().optional().describe("Context shared across all agents")
33340
34184
  },
33341
34185
  async execute(args) {
33342
- const result = selectStrategy(args.task);
33343
- let enhancedReasoning = result.reasoning;
33344
- if (args.codebase_context) {
33345
- enhancedReasoning += `
33346
-
33347
- Codebase context considered: ${args.codebase_context.slice(0, 200)}...`;
33348
- }
34186
+ const prompt = formatSubtaskPromptV2({
34187
+ bead_id: args.bead_id,
34188
+ epic_id: args.epic_id,
34189
+ subtask_title: args.subtask_title,
34190
+ subtask_description: args.subtask_description || "",
34191
+ files: args.files,
34192
+ shared_context: args.shared_context
34193
+ });
33349
34194
  return JSON.stringify({
33350
- strategy: result.strategy,
33351
- confidence: Math.round(result.confidence * 100) / 100,
33352
- reasoning: enhancedReasoning,
33353
- description: STRATEGIES[result.strategy].description,
33354
- guidelines: STRATEGIES[result.strategy].guidelines,
33355
- anti_patterns: STRATEGIES[result.strategy].antiPatterns,
33356
- alternatives: result.alternatives.map((alt) => ({
33357
- strategy: alt.strategy,
33358
- description: STRATEGIES[alt.strategy].description,
33359
- score: alt.score
33360
- }))
34195
+ prompt,
34196
+ bead_id: args.bead_id,
34197
+ epic_id: args.epic_id,
34198
+ files: args.files
33361
34199
  }, null, 2);
33362
34200
  }
33363
34201
  });
33364
- var STRATEGY_DECOMPOSITION_PROMPT = `You are decomposing a task into parallelizable subtasks for a swarm of agents.
33365
-
33366
- ## Task
33367
- {task}
33368
-
33369
- {strategy_guidelines}
33370
-
33371
- {context_section}
33372
-
33373
- {cass_history}
33374
-
33375
- {skills_context}
33376
-
33377
- ## MANDATORY: Beads Issue Tracking
33378
-
33379
- **Every subtask MUST become a bead.** This is non-negotiable.
33380
-
33381
- After decomposition, the coordinator will:
33382
- 1. Create an epic bead for the overall task
33383
- 2. Create child beads for each subtask
33384
- 3. Track progress through bead status updates
33385
- 4. Close beads with summaries when complete
33386
-
33387
- Agents MUST update their bead status as they work. No silent progress.
33388
-
33389
- ## Requirements
33390
-
33391
- 1. **Break into 2-{max_subtasks} independent subtasks** that can run in parallel
33392
- 2. **Assign files** - each subtask must specify which files it will modify
33393
- 3. **No file overlap** - files cannot appear in multiple subtasks (they get exclusive locks)
33394
- 4. **Order by dependency** - if subtask B needs subtask A's output, A must come first in the array
33395
- 5. **Estimate complexity** - 1 (trivial) to 5 (complex)
33396
- 6. **Plan aggressively** - break down more than you think necessary, smaller is better
33397
-
33398
- ## Response Format
33399
-
33400
- Respond with a JSON object matching this schema:
33401
-
33402
- \`\`\`typescript
33403
- {
33404
- epic: {
33405
- title: string, // Epic title for the beads tracker
33406
- description?: string // Brief description of the overall goal
34202
+ var swarm_evaluation_prompt = tool({
34203
+ description: "Generate self-evaluation prompt for a completed subtask",
34204
+ args: {
34205
+ bead_id: tool.schema.string().describe("Subtask bead ID"),
34206
+ subtask_title: tool.schema.string().describe("Subtask title"),
34207
+ files_touched: tool.schema.array(tool.schema.string()).describe("Files that were modified")
33407
34208
  },
33408
- subtasks: [
33409
- {
33410
- title: string, // What this subtask accomplishes
33411
- description?: string, // Detailed instructions for the agent
33412
- files: string[], // Files this subtask will modify (globs allowed)
33413
- dependencies: number[], // Indices of subtasks this depends on (0-indexed)
33414
- estimated_complexity: 1-5 // Effort estimate
33415
- },
33416
- // ... more subtasks
33417
- ]
33418
- }
33419
- \`\`\`
33420
-
33421
- Now decompose the task:`;
34209
+ async execute(args) {
34210
+ const prompt = formatEvaluationPrompt({
34211
+ bead_id: args.bead_id,
34212
+ subtask_title: args.subtask_title,
34213
+ files_touched: args.files_touched
34214
+ });
34215
+ return JSON.stringify({
34216
+ prompt,
34217
+ expected_schema: "Evaluation",
34218
+ schema_hint: {
34219
+ passed: "boolean",
34220
+ criteria: {
34221
+ type_safe: { passed: "boolean", feedback: "string" },
34222
+ no_bugs: { passed: "boolean", feedback: "string" },
34223
+ patterns: { passed: "boolean", feedback: "string" },
34224
+ readable: { passed: "boolean", feedback: "string" }
34225
+ },
34226
+ overall_feedback: "string",
34227
+ retry_suggestion: "string | null"
34228
+ }
34229
+ }, null, 2);
34230
+ }
34231
+ });
33422
34232
  var swarm_plan_prompt = tool({
33423
34233
  description: "Generate strategy-specific decomposition prompt. Auto-selects strategy or uses provided one. Queries CASS for similar tasks.",
33424
34234
  args: {
33425
34235
  task: tool.schema.string().min(1).describe("Task description to decompose"),
33426
34236
  strategy: tool.schema.enum(["file-based", "feature-based", "risk-based", "auto"]).optional().describe("Decomposition strategy (default: auto-detect)"),
33427
- max_subtasks: tool.schema.number().int().min(2).default(5).describe("Maximum number of subtasks (default: 5)"),
34237
+ max_subtasks: tool.schema.number().int().min(2).max(10).default(5).describe("Maximum number of subtasks (default: 5)"),
33428
34238
  context: tool.schema.string().optional().describe("Additional context (codebase info, constraints, etc.)"),
33429
34239
  query_cass: tool.schema.boolean().optional().describe("Query CASS for similar past tasks (default: true)"),
33430
- cass_limit: tool.schema.number().int().min(1).optional().describe("Max CASS results to include (default: 3)"),
34240
+ cass_limit: tool.schema.number().int().min(1).max(10).optional().describe("Max CASS results to include (default: 3)"),
33431
34241
  include_skills: tool.schema.boolean().optional().describe("Include available skills in context (default: true)")
33432
34242
  },
33433
34243
  async execute(args) {
34244
+ const { selectStrategy: selectStrategy2, formatStrategyGuidelines: formatStrategyGuidelines2, STRATEGIES: STRATEGIES2 } = await Promise.resolve().then(() => (init_swarm_strategies(), exports_swarm_strategies));
34245
+ const { formatMemoryQueryForDecomposition: formatMemoryQueryForDecomposition2 } = await Promise.resolve().then(() => (init_learning(), exports_learning));
34246
+ const { listSkills: listSkills2, getSkillsContextForSwarm: getSkillsContextForSwarm2, findRelevantSkills: findRelevantSkills2 } = await Promise.resolve().then(() => (init_skills(), exports_skills));
33434
34247
  let selectedStrategy;
33435
34248
  let strategyReasoning;
33436
34249
  if (args.strategy && args.strategy !== "auto") {
33437
34250
  selectedStrategy = args.strategy;
33438
34251
  strategyReasoning = `User-specified strategy: ${selectedStrategy}`;
33439
34252
  } else {
33440
- const selection = selectStrategy(args.task);
34253
+ const selection = selectStrategy2(args.task);
33441
34254
  selectedStrategy = selection.strategy;
33442
34255
  strategyReasoning = selection.reasoning;
33443
34256
  }
33444
- let cassContext = "";
33445
- let cassResultInfo;
33446
- if (args.query_cass !== false) {
33447
- const cassResult = await queryCassHistory(args.task, args.cass_limit ?? 3);
33448
- if (cassResult.status === "success") {
33449
- cassContext = formatCassHistoryForPrompt(cassResult.data);
33450
- cassResultInfo = {
33451
- queried: true,
33452
- results_found: cassResult.data.results.length,
33453
- included_in_context: true
33454
- };
33455
- } else {
33456
- cassResultInfo = {
33457
- queried: true,
33458
- results_found: 0,
33459
- included_in_context: false,
33460
- reason: cassResult.status
33461
- };
33462
- }
33463
- } else {
33464
- cassResultInfo = { queried: false, reason: "disabled" };
33465
- }
33466
34257
  let skillsContext = "";
33467
34258
  let skillsInfo = {
33468
34259
  included: false
33469
34260
  };
33470
34261
  if (args.include_skills !== false) {
33471
- const allSkills = await listSkills();
34262
+ const allSkills = await listSkills2();
33472
34263
  if (allSkills.length > 0) {
33473
- skillsContext = await getSkillsContextForSwarm();
33474
- const relevantSkills = await findRelevantSkills(args.task);
34264
+ skillsContext = await getSkillsContextForSwarm2();
34265
+ const relevantSkills = await findRelevantSkills2(args.task);
33475
34266
  skillsInfo = {
33476
34267
  included: true,
33477
34268
  count: allSkills.length,
@@ -33484,18 +34275,18 @@ var swarm_plan_prompt = tool({
33484
34275
  }
33485
34276
  }
33486
34277
  }
33487
- const strategyGuidelines = formatStrategyGuidelines(selectedStrategy);
34278
+ const strategyGuidelines = formatStrategyGuidelines2(selectedStrategy);
33488
34279
  const contextSection = args.context ? `## Additional Context
33489
34280
  ${args.context}` : `## Additional Context
33490
34281
  (none provided)`;
33491
- const prompt = STRATEGY_DECOMPOSITION_PROMPT.replace("{task}", args.task).replace("{strategy_guidelines}", strategyGuidelines).replace("{context_section}", contextSection).replace("{cass_history}", cassContext || "").replace("{skills_context}", skillsContext || "").replace("{max_subtasks}", (args.max_subtasks ?? 5).toString());
34282
+ const prompt = STRATEGY_DECOMPOSITION_PROMPT2.replace("{task}", args.task).replace("{strategy_guidelines}", strategyGuidelines).replace("{context_section}", contextSection).replace("{cass_history}", "").replace("{skills_context}", skillsContext || "").replace("{max_subtasks}", (args.max_subtasks ?? 5).toString());
33492
34283
  return JSON.stringify({
33493
34284
  prompt,
33494
34285
  strategy: {
33495
34286
  selected: selectedStrategy,
33496
34287
  reasoning: strategyReasoning,
33497
- guidelines: STRATEGIES[selectedStrategy].guidelines,
33498
- anti_patterns: STRATEGIES[selectedStrategy].antiPatterns
34288
+ guidelines: STRATEGIES2[selectedStrategy].guidelines,
34289
+ anti_patterns: STRATEGIES2[selectedStrategy].antiPatterns
33499
34290
  },
33500
34291
  expected_schema: "BeadTree",
33501
34292
  schema_hint: {
@@ -33511,243 +34302,135 @@ ${args.context}` : `## Additional Context
33511
34302
  ]
33512
34303
  },
33513
34304
  validation_note: "Parse agent response as JSON and validate with swarm_validate_decomposition",
33514
- cass_history: cassResultInfo,
33515
- skills: skillsInfo
33516
- }, null, 2);
33517
- }
33518
- });
33519
- var swarm_decompose = tool({
33520
- description: "Generate decomposition prompt for breaking task into parallelizable subtasks. Optionally queries CASS for similar past tasks.",
33521
- args: {
33522
- task: tool.schema.string().min(1).describe("Task description to decompose"),
33523
- max_subtasks: tool.schema.number().int().min(2).default(5).describe("Maximum number of subtasks (default: 5)"),
33524
- context: tool.schema.string().optional().describe("Additional context (codebase info, constraints, etc.)"),
33525
- query_cass: tool.schema.boolean().optional().describe("Query CASS for similar past tasks (default: true)"),
33526
- cass_limit: tool.schema.number().int().min(1).optional().describe("Max CASS results to include (default: 3)")
33527
- },
33528
- async execute(args) {
33529
- let cassContext = "";
33530
- let cassResultInfo;
33531
- if (args.query_cass !== false) {
33532
- const cassResult = await queryCassHistory(args.task, args.cass_limit ?? 3);
33533
- if (cassResult.status === "success") {
33534
- cassContext = formatCassHistoryForPrompt(cassResult.data);
33535
- cassResultInfo = {
33536
- queried: true,
33537
- results_found: cassResult.data.results.length,
33538
- included_in_context: true
33539
- };
33540
- } else {
33541
- cassResultInfo = {
33542
- queried: true,
33543
- results_found: 0,
33544
- included_in_context: false,
33545
- reason: cassResult.status
33546
- };
33547
- }
33548
- } else {
33549
- cassResultInfo = { queried: false, reason: "disabled" };
33550
- }
33551
- const fullContext = [args.context, cassContext].filter(Boolean).join(`
33552
-
33553
- `);
33554
- const prompt = formatDecompositionPrompt(args.task, args.max_subtasks ?? 5, fullContext || undefined);
33555
- return JSON.stringify({
33556
- prompt,
33557
- expected_schema: "BeadTree",
33558
- schema_hint: {
33559
- epic: { title: "string", description: "string?" },
33560
- subtasks: [
33561
- {
33562
- title: "string",
33563
- description: "string?",
33564
- files: "string[]",
33565
- dependencies: "number[]",
33566
- estimated_complexity: "1-5"
33567
- }
33568
- ]
33569
- },
33570
- validation_note: "Parse agent response as JSON and validate with BeadTreeSchema from schemas/bead.ts",
33571
- cass_history: cassResultInfo
34305
+ skills: skillsInfo,
34306
+ memory_query: formatMemoryQueryForDecomposition2(args.task, 3)
33572
34307
  }, null, 2);
33573
34308
  }
33574
34309
  });
33575
- var swarm_validate_decomposition = tool({
33576
- description: "Validate a decomposition response against BeadTreeSchema",
33577
- args: {
33578
- response: tool.schema.string().describe("JSON response from agent (BeadTree format)")
33579
- },
33580
- async execute(args) {
33581
- try {
33582
- const parsed = JSON.parse(args.response);
33583
- const validated = BeadTreeSchema.parse(parsed);
33584
- const allFiles = new Set;
33585
- const conflicts = [];
33586
- for (const subtask of validated.subtasks) {
33587
- for (const file2 of subtask.files) {
33588
- if (allFiles.has(file2)) {
33589
- conflicts.push(file2);
33590
- }
33591
- allFiles.add(file2);
33592
- }
33593
- }
33594
- if (conflicts.length > 0) {
33595
- return JSON.stringify({
33596
- valid: false,
33597
- error: `File conflicts detected: ${conflicts.join(", ")}`,
33598
- hint: "Each file can only be assigned to one subtask"
33599
- }, null, 2);
33600
- }
33601
- for (let i = 0;i < validated.subtasks.length; i++) {
33602
- const deps = validated.subtasks[i].dependencies;
33603
- for (const dep of deps) {
33604
- if (dep < 0 || dep >= validated.subtasks.length) {
33605
- return JSON.stringify({
33606
- valid: false,
33607
- error: `Invalid dependency: subtask ${i} depends on ${dep}, but only ${validated.subtasks.length} subtasks exist (indices 0-${validated.subtasks.length - 1})`,
33608
- hint: "Dependency index is out of bounds"
33609
- }, null, 2);
33610
- }
33611
- if (dep >= i) {
33612
- return JSON.stringify({
33613
- valid: false,
33614
- error: `Invalid dependency: subtask ${i} depends on ${dep}, but dependencies must be earlier in the array`,
33615
- hint: "Reorder subtasks so dependencies come before dependents"
33616
- }, null, 2);
33617
- }
33618
- }
33619
- }
33620
- const instructionConflicts = detectInstructionConflicts(validated.subtasks);
33621
- return JSON.stringify({
33622
- valid: true,
33623
- bead_tree: validated,
33624
- stats: {
33625
- subtask_count: validated.subtasks.length,
33626
- total_files: allFiles.size,
33627
- total_complexity: validated.subtasks.reduce((sum, s) => sum + s.estimated_complexity, 0)
33628
- },
33629
- warnings: instructionConflicts.length > 0 ? {
33630
- instruction_conflicts: instructionConflicts,
33631
- hint: "Review these potential conflicts between subtask instructions"
33632
- } : undefined
33633
- }, null, 2);
33634
- } catch (error45) {
33635
- if (error45 instanceof exports_external.ZodError) {
33636
- return JSON.stringify({
33637
- valid: false,
33638
- error: "Schema validation failed",
33639
- details: error45.issues
33640
- }, null, 2);
33641
- }
33642
- if (error45 instanceof SyntaxError) {
33643
- return JSON.stringify({
33644
- valid: false,
33645
- error: "Invalid JSON",
33646
- details: error45.message
33647
- }, null, 2);
33648
- }
33649
- throw error45;
33650
- }
34310
+ var promptTools = {
34311
+ swarm_subtask_prompt,
34312
+ swarm_spawn_subtask,
34313
+ swarm_evaluation_prompt,
34314
+ swarm_plan_prompt
34315
+ };
34316
+ // src/swarm-orchestrate.ts
34317
+ init_dist();
34318
+ init_zod();
34319
+ init_swarm_mail();
34320
+ init_learning();
34321
+ init_skills();
34322
+ async function queryEpicSubtasks(epicId) {
34323
+ const beadsAvailable = await isToolAvailable("beads");
34324
+ if (!beadsAvailable) {
34325
+ warnMissingTool("beads");
34326
+ return [];
33651
34327
  }
33652
- });
33653
- var swarm_status = tool({
33654
- description: "Get status of a swarm by epic ID",
33655
- args: {
33656
- epic_id: tool.schema.string().describe("Epic bead ID (e.g., bd-abc123)"),
33657
- project_key: tool.schema.string().describe("Project path (for Agent Mail queries)")
33658
- },
33659
- async execute(args) {
33660
- const subtasks = await queryEpicSubtasks(args.epic_id);
33661
- const statusCounts = {
33662
- running: 0,
33663
- completed: 0,
33664
- failed: 0,
33665
- blocked: 0
33666
- };
33667
- const agents = [];
33668
- for (const bead of subtasks) {
33669
- let agentStatus = "pending";
33670
- switch (bead.status) {
33671
- case "in_progress":
33672
- agentStatus = "running";
33673
- statusCounts.running++;
33674
- break;
33675
- case "closed":
33676
- agentStatus = "completed";
33677
- statusCounts.completed++;
33678
- break;
33679
- case "blocked":
33680
- agentStatus = "pending";
33681
- statusCounts.blocked++;
33682
- break;
33683
- default:
33684
- break;
33685
- }
33686
- agents.push({
33687
- bead_id: bead.id,
33688
- agent_name: "",
33689
- status: agentStatus,
33690
- files: []
33691
- });
33692
- }
33693
- const messageCount = await querySwarmMessages(args.project_key, args.epic_id);
33694
- const status = {
33695
- epic_id: args.epic_id,
33696
- total_agents: subtasks.length,
33697
- running: statusCounts.running,
33698
- completed: statusCounts.completed,
33699
- failed: statusCounts.failed,
33700
- blocked: statusCounts.blocked,
33701
- agents,
33702
- last_update: new Date().toISOString()
33703
- };
33704
- const validated = SwarmStatusSchema.parse(status);
33705
- return JSON.stringify({
33706
- ...validated,
33707
- message_count: messageCount,
33708
- progress_percent: subtasks.length > 0 ? Math.round(statusCounts.completed / subtasks.length * 100) : 0
33709
- }, null, 2);
34328
+ const result = await Bun.$`bd list --parent ${epicId} --json`.quiet().nothrow();
34329
+ if (result.exitCode !== 0) {
34330
+ console.error(`[swarm] ERROR: Failed to query subtasks for epic ${epicId}:`, result.stderr.toString());
34331
+ return [];
33710
34332
  }
33711
- });
33712
- var swarm_progress = tool({
33713
- description: "Report progress on a subtask to coordinator",
33714
- args: {
33715
- project_key: tool.schema.string().describe("Project path"),
33716
- agent_name: tool.schema.string().describe("Your Agent Mail name"),
33717
- bead_id: tool.schema.string().describe("Subtask bead ID"),
33718
- status: tool.schema.enum(["in_progress", "blocked", "completed", "failed"]).describe("Current status"),
33719
- message: tool.schema.string().optional().describe("Progress message or blockers"),
33720
- progress_percent: tool.schema.number().min(0).max(100).optional().describe("Completion percentage"),
33721
- files_touched: tool.schema.array(tool.schema.string()).optional().describe("Files modified so far")
33722
- },
33723
- async execute(args) {
33724
- const progress = {
33725
- bead_id: args.bead_id,
33726
- agent_name: args.agent_name,
33727
- status: args.status,
33728
- progress_percent: args.progress_percent,
33729
- message: args.message,
33730
- files_touched: args.files_touched,
33731
- timestamp: new Date().toISOString()
33732
- };
33733
- const validated = AgentProgressSchema.parse(progress);
33734
- if (args.status === "blocked" || args.status === "in_progress") {
33735
- const beadStatus = args.status === "blocked" ? "blocked" : "in_progress";
33736
- await Bun.$`bd update ${args.bead_id} --status ${beadStatus} --json`.quiet().nothrow();
34333
+ try {
34334
+ const parsed = JSON.parse(result.stdout.toString());
34335
+ return exports_external.array(BeadSchema).parse(parsed);
34336
+ } catch (error45) {
34337
+ if (error45 instanceof exports_external.ZodError) {
34338
+ console.error(`[swarm] ERROR: Invalid bead data for epic ${epicId}:`, error45.message);
34339
+ return [];
33737
34340
  }
33738
- const epicId = args.bead_id.includes(".") ? args.bead_id.split(".")[0] : args.bead_id;
33739
- await sendSwarmMessage({
33740
- projectPath: args.project_key,
33741
- fromAgent: args.agent_name,
33742
- toAgents: [],
33743
- subject: `Progress: ${args.bead_id} - ${args.status}`,
33744
- body: formatProgressMessage(validated),
33745
- threadId: epicId,
33746
- importance: args.status === "blocked" ? "high" : "normal"
34341
+ console.error(`[swarm] ERROR: Failed to parse beads for epic ${epicId}:`, error45);
34342
+ throw error45;
34343
+ }
34344
+ }
34345
+ async function querySwarmMessages(projectKey, threadId) {
34346
+ const agentMailAvailable2 = await isToolAvailable("agent-mail");
34347
+ if (!agentMailAvailable2) {
34348
+ return 0;
34349
+ }
34350
+ try {
34351
+ const inbox = await getSwarmInbox({
34352
+ projectPath: projectKey,
34353
+ agentName: "coordinator",
34354
+ limit: 5,
34355
+ includeBodies: false
33747
34356
  });
33748
- return `Progress reported: ${args.status}${args.progress_percent !== undefined ? ` (${args.progress_percent}%)` : ""}`;
34357
+ const threadMessages = inbox.messages.filter((m) => m.thread_id === threadId);
34358
+ return threadMessages.length;
34359
+ } catch (error45) {
34360
+ console.warn(`[swarm] Failed to query swarm messages for thread ${threadId}:`, error45);
34361
+ return 0;
33749
34362
  }
33750
- });
34363
+ }
34364
+ function formatProgressMessage(progress) {
34365
+ const lines = [
34366
+ `**Status**: ${progress.status}`,
34367
+ progress.progress_percent !== undefined ? `**Progress**: ${progress.progress_percent}%` : null,
34368
+ progress.message ? `**Message**: ${progress.message}` : null,
34369
+ progress.files_touched && progress.files_touched.length > 0 ? `**Files touched**:
34370
+ ${progress.files_touched.map((f) => `- \`${f}\``).join(`
34371
+ `)}` : null,
34372
+ progress.blockers && progress.blockers.length > 0 ? `**Blockers**:
34373
+ ${progress.blockers.map((b) => `- ${b}`).join(`
34374
+ `)}` : null
34375
+ ];
34376
+ return lines.filter(Boolean).join(`
34377
+
34378
+ `);
34379
+ }
34380
+ async function runUbsScan(files) {
34381
+ if (files.length === 0) {
34382
+ return null;
34383
+ }
34384
+ const ubsAvailable = await isToolAvailable("ubs");
34385
+ if (!ubsAvailable) {
34386
+ warnMissingTool("ubs");
34387
+ return null;
34388
+ }
34389
+ try {
34390
+ const result = await Bun.$`ubs scan ${files.join(" ")} --json`.quiet().nothrow();
34391
+ const output = result.stdout.toString();
34392
+ if (!output.trim()) {
34393
+ return {
34394
+ exitCode: result.exitCode,
34395
+ bugs: [],
34396
+ summary: { total: 0, critical: 0, high: 0, medium: 0, low: 0 }
34397
+ };
34398
+ }
34399
+ try {
34400
+ const parsed = JSON.parse(output);
34401
+ if (typeof parsed !== "object" || parsed === null) {
34402
+ throw new Error("UBS output is not an object");
34403
+ }
34404
+ if (!Array.isArray(parsed.bugs)) {
34405
+ console.warn("[swarm] UBS output missing bugs array, using empty");
34406
+ }
34407
+ if (typeof parsed.summary !== "object" || parsed.summary === null) {
34408
+ console.warn("[swarm] UBS output missing summary object, using empty");
34409
+ }
34410
+ return {
34411
+ exitCode: result.exitCode,
34412
+ bugs: Array.isArray(parsed.bugs) ? parsed.bugs : [],
34413
+ summary: parsed.summary || {
34414
+ total: 0,
34415
+ critical: 0,
34416
+ high: 0,
34417
+ medium: 0,
34418
+ low: 0
34419
+ }
34420
+ };
34421
+ } catch (error45) {
34422
+ console.error(`[swarm] CRITICAL: UBS scan failed to parse JSON output because output is malformed:`, error45);
34423
+ console.error(`[swarm] Raw output: ${output}. Try: Run 'ubs doctor' to check installation, verify UBS version with 'ubs --version' (need v1.0.0+), or check if UBS supports --json flag.`);
34424
+ return {
34425
+ exitCode: result.exitCode,
34426
+ bugs: [],
34427
+ summary: { total: 0, critical: 0, high: 0, medium: 0, low: 0 }
34428
+ };
34429
+ }
34430
+ } catch {
34431
+ return null;
34432
+ }
34433
+ }
33751
34434
  async function runTypecheckVerification() {
33752
34435
  const step = {
33753
34436
  name: "typecheck",
@@ -33881,60 +34564,191 @@ async function runVerificationGate(filesTouched, skipUbs = false) {
33881
34564
  blockers
33882
34565
  };
33883
34566
  }
33884
- async function runUbsScan(files) {
33885
- if (files.length === 0) {
33886
- return null;
33887
- }
33888
- const ubsAvailable = await isToolAvailable("ubs");
33889
- if (!ubsAvailable) {
33890
- warnMissingTool("ubs");
33891
- return null;
34567
+ function classifyFailure(error45) {
34568
+ const msg = (typeof error45 === "string" ? error45 : error45.message).toLowerCase();
34569
+ if (msg.includes("timeout"))
34570
+ return "timeout";
34571
+ if (msg.includes("conflict") || msg.includes("reservation"))
34572
+ return "conflict";
34573
+ if (msg.includes("validation") || msg.includes("schema"))
34574
+ return "validation";
34575
+ if (msg.includes("context") || msg.includes("token"))
34576
+ return "context_overflow";
34577
+ if (msg.includes("blocked") || msg.includes("dependency"))
34578
+ return "dependency_blocked";
34579
+ if (msg.includes("cancel"))
34580
+ return "user_cancelled";
34581
+ if (msg.includes("tool") || msg.includes("command") || msg.includes("failed to execute")) {
34582
+ return "tool_failure";
33892
34583
  }
33893
- try {
33894
- const result = await Bun.$`ubs scan ${files.join(" ")} --json`.quiet().nothrow();
33895
- const output = result.stdout.toString();
33896
- if (!output.trim()) {
33897
- return {
33898
- exitCode: result.exitCode,
33899
- bugs: [],
33900
- summary: { total: 0, critical: 0, high: 0, medium: 0, low: 0 }
33901
- };
34584
+ return "unknown";
34585
+ }
34586
+ var globalErrorAccumulator = new ErrorAccumulator;
34587
+ var globalStrikeStorage = new InMemoryStrikeStorage;
34588
+ var swarm_init = tool({
34589
+ description: "Initialize swarm session: discovers available skills, checks tool availability. ALWAYS call at swarm start.",
34590
+ args: {
34591
+ project_path: tool.schema.string().optional().describe("Project path (for Agent Mail init)")
34592
+ },
34593
+ async execute(args) {
34594
+ const availability = await checkAllTools();
34595
+ const report = formatToolAvailability(availability);
34596
+ const beadsAvailable = availability.get("beads")?.status.available ?? false;
34597
+ const agentMailAvailable2 = availability.get("agent-mail")?.status.available ?? false;
34598
+ const warnings = [];
34599
+ const degradedFeatures = [];
34600
+ if (!beadsAvailable) {
34601
+ warnings.push("⚠️ beads (bd) not available - issue tracking disabled, swarm coordination will be limited");
34602
+ degradedFeatures.push("issue tracking", "progress persistence");
33902
34603
  }
33903
- try {
33904
- const parsed = JSON.parse(output);
33905
- if (typeof parsed !== "object" || parsed === null) {
33906
- throw new Error("UBS output is not an object");
33907
- }
33908
- if (!Array.isArray(parsed.bugs)) {
33909
- console.warn("[swarm] UBS output missing bugs array, using empty");
33910
- }
33911
- if (typeof parsed.summary !== "object" || parsed.summary === null) {
33912
- console.warn("[swarm] UBS output missing summary object, using empty");
33913
- }
33914
- return {
33915
- exitCode: result.exitCode,
33916
- bugs: Array.isArray(parsed.bugs) ? parsed.bugs : [],
33917
- summary: parsed.summary || {
33918
- total: 0,
33919
- critical: 0,
33920
- high: 0,
33921
- medium: 0,
33922
- low: 0
34604
+ if (!agentMailAvailable2) {
34605
+ warnings.push("⚠️ agent-mail not available - multi-agent communication disabled");
34606
+ degradedFeatures.push("agent communication", "file reservations");
34607
+ }
34608
+ if (!availability.get("cass")?.status.available) {
34609
+ degradedFeatures.push("historical context from past sessions");
34610
+ }
34611
+ if (!availability.get("ubs")?.status.available) {
34612
+ degradedFeatures.push("pre-completion bug scanning");
34613
+ }
34614
+ if (!availability.get("semantic-memory")?.status.available) {
34615
+ degradedFeatures.push("persistent learning (using in-memory fallback)");
34616
+ }
34617
+ const availableSkills = await listSkills();
34618
+ const skillsInfo = {
34619
+ count: availableSkills.length,
34620
+ available: availableSkills.length > 0,
34621
+ skills: availableSkills.map((s) => ({
34622
+ name: s.name,
34623
+ description: s.description,
34624
+ hasScripts: s.hasScripts
34625
+ }))
34626
+ };
34627
+ let skillsGuidance;
34628
+ if (availableSkills.length > 0) {
34629
+ skillsGuidance = `Found ${availableSkills.length} skill(s). Use skills_list to see details, skills_use to activate.`;
34630
+ } else {
34631
+ skillsGuidance = "No skills found. Add skills to .opencode/skills/ or .claude/skills/ for specialized guidance.";
34632
+ }
34633
+ return JSON.stringify({
34634
+ ready: true,
34635
+ tool_availability: Object.fromEntries(Array.from(availability.entries()).map(([k, v]) => [
34636
+ k,
34637
+ {
34638
+ available: v.status.available,
34639
+ fallback: v.status.available ? null : v.fallbackBehavior
33923
34640
  }
33924
- };
33925
- } catch (error45) {
33926
- console.error(`[swarm] CRITICAL: UBS scan failed to parse JSON output because output is malformed:`, error45);
33927
- console.error(`[swarm] Raw output: ${output}. Try: Run 'ubs doctor' to check installation, verify UBS version with 'ubs --version' (need v1.0.0+), or check if UBS supports --json flag.`);
33928
- return {
33929
- exitCode: result.exitCode,
33930
- bugs: [],
33931
- summary: { total: 0, critical: 0, high: 0, medium: 0, low: 0 }
33932
- };
34641
+ ])),
34642
+ skills: skillsInfo,
34643
+ warnings: warnings.length > 0 ? warnings : undefined,
34644
+ degraded_features: degradedFeatures.length > 0 ? degradedFeatures : undefined,
34645
+ recommendations: {
34646
+ skills: skillsGuidance,
34647
+ beads: beadsAvailable ? "✓ Use beads for all task tracking" : "Install beads: npm i -g @joelhooks/beads",
34648
+ agent_mail: agentMailAvailable2 ? "✓ Use Agent Mail for coordination" : "Start Agent Mail: agent-mail serve"
34649
+ },
34650
+ report
34651
+ }, null, 2);
34652
+ }
34653
+ });
34654
+ var swarm_status = tool({
34655
+ description: "Get status of a swarm by epic ID",
34656
+ args: {
34657
+ epic_id: tool.schema.string().describe("Epic bead ID (e.g., bd-abc123)"),
34658
+ project_key: tool.schema.string().describe("Project path (for Agent Mail queries)")
34659
+ },
34660
+ async execute(args) {
34661
+ const subtasks = await queryEpicSubtasks(args.epic_id);
34662
+ const statusCounts = {
34663
+ running: 0,
34664
+ completed: 0,
34665
+ failed: 0,
34666
+ blocked: 0
34667
+ };
34668
+ const agents = [];
34669
+ for (const bead of subtasks) {
34670
+ let agentStatus = "pending";
34671
+ switch (bead.status) {
34672
+ case "in_progress":
34673
+ agentStatus = "running";
34674
+ statusCounts.running++;
34675
+ break;
34676
+ case "closed":
34677
+ agentStatus = "completed";
34678
+ statusCounts.completed++;
34679
+ break;
34680
+ case "blocked":
34681
+ agentStatus = "pending";
34682
+ statusCounts.blocked++;
34683
+ break;
34684
+ default:
34685
+ break;
34686
+ }
34687
+ agents.push({
34688
+ bead_id: bead.id,
34689
+ agent_name: "",
34690
+ status: agentStatus,
34691
+ files: []
34692
+ });
33933
34693
  }
33934
- } catch {
33935
- return null;
34694
+ const messageCount = await querySwarmMessages(args.project_key, args.epic_id);
34695
+ const status = {
34696
+ epic_id: args.epic_id,
34697
+ total_agents: subtasks.length,
34698
+ running: statusCounts.running,
34699
+ completed: statusCounts.completed,
34700
+ failed: statusCounts.failed,
34701
+ blocked: statusCounts.blocked,
34702
+ agents,
34703
+ last_update: new Date().toISOString()
34704
+ };
34705
+ const validated = SwarmStatusSchema.parse(status);
34706
+ return JSON.stringify({
34707
+ ...validated,
34708
+ message_count: messageCount,
34709
+ progress_percent: subtasks.length > 0 ? Math.round(statusCounts.completed / subtasks.length * 100) : 0
34710
+ }, null, 2);
33936
34711
  }
33937
- }
34712
+ });
34713
+ var swarm_progress = tool({
34714
+ description: "Report progress on a subtask to coordinator",
34715
+ args: {
34716
+ project_key: tool.schema.string().describe("Project path"),
34717
+ agent_name: tool.schema.string().describe("Your Agent Mail name"),
34718
+ bead_id: tool.schema.string().describe("Subtask bead ID"),
34719
+ status: tool.schema.enum(["in_progress", "blocked", "completed", "failed"]).describe("Current status"),
34720
+ message: tool.schema.string().optional().describe("Progress message or blockers"),
34721
+ progress_percent: tool.schema.number().min(0).max(100).optional().describe("Completion percentage"),
34722
+ files_touched: tool.schema.array(tool.schema.string()).optional().describe("Files modified so far")
34723
+ },
34724
+ async execute(args) {
34725
+ const progress = {
34726
+ bead_id: args.bead_id,
34727
+ agent_name: args.agent_name,
34728
+ status: args.status,
34729
+ progress_percent: args.progress_percent,
34730
+ message: args.message,
34731
+ files_touched: args.files_touched,
34732
+ timestamp: new Date().toISOString()
34733
+ };
34734
+ const validated = AgentProgressSchema.parse(progress);
34735
+ if (args.status === "blocked" || args.status === "in_progress") {
34736
+ const beadStatus = args.status === "blocked" ? "blocked" : "in_progress";
34737
+ await Bun.$`bd update ${args.bead_id} --status ${beadStatus} --json`.quiet().nothrow();
34738
+ }
34739
+ const epicId = args.bead_id.includes(".") ? args.bead_id.split(".")[0] : args.bead_id;
34740
+ await sendSwarmMessage({
34741
+ projectPath: args.project_key,
34742
+ fromAgent: args.agent_name,
34743
+ toAgents: [],
34744
+ subject: `Progress: ${args.bead_id} - ${args.status}`,
34745
+ body: formatProgressMessage(validated),
34746
+ threadId: epicId,
34747
+ importance: args.status === "blocked" ? "high" : "normal"
34748
+ });
34749
+ return `Progress reported: ${args.status}${args.progress_percent !== undefined ? ` (${args.progress_percent}%)` : ""}`;
34750
+ }
34751
+ });
33938
34752
  var swarm_broadcast = tool({
33939
34753
  description: "Broadcast context update to all agents working on the same epic",
33940
34754
  args: {
@@ -33945,8 +34759,8 @@ var swarm_broadcast = tool({
33945
34759
  importance: tool.schema.enum(["info", "warning", "blocker"]).default("info").describe("Priority level (default: info)"),
33946
34760
  files_affected: tool.schema.array(tool.schema.string()).optional().describe("Files this context relates to")
33947
34761
  },
33948
- async execute(args, ctx) {
33949
- const beadId = ctx.beadId || "unknown";
34762
+ async execute(args) {
34763
+ const beadId = "unknown";
33950
34764
  const body = [
33951
34765
  `## Context Update`,
33952
34766
  "",
@@ -34056,7 +34870,7 @@ var swarm_complete = tool({
34056
34870
  }
34057
34871
  const closeResult = await Bun.$`bd close ${args.bead_id} --reason ${args.summary} --json`.quiet().nothrow();
34058
34872
  if (closeResult.exitCode !== 0) {
34059
- throw new SwarmError(`Failed to close bead because bd close command failed: ${closeResult.stderr.toString()}. Try: Verify bead exists and is not already closed with 'bd show ${args.bead_id}', check if bead ID is correct with 'beads_query()', or use beads_close tool directly.`, "complete");
34873
+ throw new Error(`Failed to close bead because bd close command failed: ${closeResult.stderr.toString()}. Try: Verify bead exists and is not already closed with 'bd show ${args.bead_id}', check if bead ID is correct with 'beads_query()', or use beads_close tool directly.`);
34060
34874
  }
34061
34875
  try {
34062
34876
  await releaseSwarmFiles({
@@ -34085,7 +34899,7 @@ var swarm_complete = tool({
34085
34899
  threadId: epicId,
34086
34900
  importance: "normal"
34087
34901
  });
34088
- return JSON.stringify({
34902
+ const response = {
34089
34903
  success: true,
34090
34904
  bead_id: args.bead_id,
34091
34905
  closed: true,
@@ -34121,29 +34935,12 @@ Did you learn anything reusable during this subtask? Consider:
34121
34935
 
34122
34936
  If you discovered something valuable, use \`swarm_learn\` or \`skills_create\` to preserve it as a skill for future swarms.
34123
34937
 
34124
- Files touched: ${args.files_touched?.join(", ") || "none recorded"}`
34125
- }, null, 2);
34938
+ Files touched: ${args.files_touched?.join(", ") || "none recorded"}`,
34939
+ memory_store: formatMemoryStoreOnSuccess(args.bead_id, args.summary, args.files_touched || [])
34940
+ };
34941
+ return JSON.stringify(response, null, 2);
34126
34942
  }
34127
34943
  });
34128
- function classifyFailure(error45) {
34129
- const msg = (typeof error45 === "string" ? error45 : error45.message).toLowerCase();
34130
- if (msg.includes("timeout"))
34131
- return "timeout";
34132
- if (msg.includes("conflict") || msg.includes("reservation"))
34133
- return "conflict";
34134
- if (msg.includes("validation") || msg.includes("schema"))
34135
- return "validation";
34136
- if (msg.includes("context") || msg.includes("token"))
34137
- return "context_overflow";
34138
- if (msg.includes("blocked") || msg.includes("dependency"))
34139
- return "dependency_blocked";
34140
- if (msg.includes("cancel"))
34141
- return "user_cancelled";
34142
- if (msg.includes("tool") || msg.includes("command") || msg.includes("failed to execute")) {
34143
- return "tool_failure";
34144
- }
34145
- return "unknown";
34146
- }
34147
34944
  var swarm_record_outcome = tool({
34148
34945
  description: "Record subtask outcome for implicit feedback scoring. Tracks duration, errors, retries to learn decomposition quality.",
34149
34946
  args: {
@@ -34181,7 +34978,8 @@ var swarm_record_outcome = tool({
34181
34978
  failure_details: args.failure_details
34182
34979
  };
34183
34980
  if (!args.success && !args.failure_mode && args.failure_details) {
34184
- signals.failure_mode = classifyFailure(args.failure_details);
34981
+ const classified = classifyFailure(args.failure_details);
34982
+ signals.failure_mode = classified;
34185
34983
  }
34186
34984
  const validated = OutcomeSignalsSchema.parse(signals);
34187
34985
  const scored = scoreImplicitFeedback(validated, DEFAULT_LEARNING_CONFIG);
@@ -34231,168 +35029,141 @@ var swarm_record_outcome = tool({
34231
35029
  }, null, 2);
34232
35030
  }
34233
35031
  });
34234
- var swarm_subtask_prompt = tool({
34235
- description: "Generate the prompt for a spawned subtask agent",
35032
+ var swarm_accumulate_error = tool({
35033
+ description: "Record an error during subtask execution. Errors feed into retry prompts.",
34236
35034
  args: {
34237
- agent_name: tool.schema.string().describe("Agent Mail name for the agent"),
34238
- bead_id: tool.schema.string().describe("Subtask bead ID"),
34239
- epic_id: tool.schema.string().describe("Epic bead ID"),
34240
- subtask_title: tool.schema.string().describe("Subtask title"),
34241
- subtask_description: tool.schema.string().optional().describe("Detailed subtask instructions"),
34242
- files: tool.schema.array(tool.schema.string()).describe("Files assigned to this subtask"),
34243
- shared_context: tool.schema.string().optional().describe("Context shared across all agents")
35035
+ bead_id: tool.schema.string().describe("Bead ID where error occurred"),
35036
+ error_type: tool.schema.enum(["validation", "timeout", "conflict", "tool_failure", "unknown"]).describe("Category of error"),
35037
+ message: tool.schema.string().describe("Human-readable error message"),
35038
+ stack_trace: tool.schema.string().optional().describe("Stack trace for debugging"),
35039
+ tool_name: tool.schema.string().optional().describe("Tool that failed"),
35040
+ context: tool.schema.string().optional().describe("What was happening when error occurred")
34244
35041
  },
34245
35042
  async execute(args) {
34246
- const prompt = formatSubtaskPrompt({
34247
- agent_name: args.agent_name,
34248
- bead_id: args.bead_id,
34249
- epic_id: args.epic_id,
34250
- subtask_title: args.subtask_title,
34251
- subtask_description: args.subtask_description || "",
34252
- files: args.files,
34253
- shared_context: args.shared_context
35043
+ const entry = await globalErrorAccumulator.recordError(args.bead_id, args.error_type, args.message, {
35044
+ stack_trace: args.stack_trace,
35045
+ tool_name: args.tool_name,
35046
+ context: args.context
34254
35047
  });
34255
- return prompt;
35048
+ return JSON.stringify({
35049
+ success: true,
35050
+ error_id: entry.id,
35051
+ bead_id: entry.bead_id,
35052
+ error_type: entry.error_type,
35053
+ message: entry.message,
35054
+ timestamp: entry.timestamp,
35055
+ note: "Error recorded for retry context. Use swarm_get_error_context to retrieve accumulated errors."
35056
+ }, null, 2);
34256
35057
  }
34257
35058
  });
34258
- var swarm_spawn_subtask = tool({
34259
- description: "Prepare a subtask for spawning. Returns prompt with Agent Mail/beads instructions.",
35059
+ var swarm_get_error_context = tool({
35060
+ description: "Get accumulated errors for a bead. Returns formatted context for retry prompts.",
34260
35061
  args: {
34261
- bead_id: tool.schema.string().describe("Subtask bead ID"),
34262
- epic_id: tool.schema.string().describe("Parent epic bead ID"),
34263
- subtask_title: tool.schema.string().describe("Subtask title"),
34264
- subtask_description: tool.schema.string().optional().describe("Detailed subtask instructions"),
34265
- files: tool.schema.array(tool.schema.string()).describe("Files assigned to this subtask"),
34266
- shared_context: tool.schema.string().optional().describe("Context shared across all agents")
35062
+ bead_id: tool.schema.string().describe("Bead ID to get errors for"),
35063
+ include_resolved: tool.schema.boolean().optional().describe("Include resolved errors (default: false)")
34267
35064
  },
34268
35065
  async execute(args) {
34269
- const prompt = formatSubtaskPromptV2({
34270
- bead_id: args.bead_id,
34271
- epic_id: args.epic_id,
34272
- subtask_title: args.subtask_title,
34273
- subtask_description: args.subtask_description || "",
34274
- files: args.files,
34275
- shared_context: args.shared_context
34276
- });
35066
+ const errorContext = await globalErrorAccumulator.getErrorContext(args.bead_id, args.include_resolved ?? false);
35067
+ const stats = await globalErrorAccumulator.getErrorStats(args.bead_id);
34277
35068
  return JSON.stringify({
34278
- prompt,
34279
35069
  bead_id: args.bead_id,
34280
- epic_id: args.epic_id,
34281
- files: args.files
35070
+ error_context: errorContext,
35071
+ stats: {
35072
+ total_errors: stats.total,
35073
+ unresolved: stats.unresolved,
35074
+ by_type: stats.by_type
35075
+ },
35076
+ has_errors: errorContext.length > 0,
35077
+ usage: "Inject error_context into retry prompt using {error_context} placeholder"
34282
35078
  }, null, 2);
34283
35079
  }
34284
35080
  });
34285
- var TaskResultSchema = exports_external.object({
34286
- success: exports_external.boolean(),
34287
- summary: exports_external.string(),
34288
- files_modified: exports_external.array(exports_external.string()).optional().default([]),
34289
- files_created: exports_external.array(exports_external.string()).optional().default([]),
34290
- issues_found: exports_external.array(exports_external.string()).optional().default([]),
34291
- tests_passed: exports_external.boolean().optional(),
34292
- notes: exports_external.string().optional(),
34293
- blocker: exports_external.string().optional(),
34294
- suggestions: exports_external.array(exports_external.string()).optional()
34295
- });
34296
- var swarm_complete_subtask = tool({
34297
- description: "Handle subtask completion after Task agent returns. Parses result JSON, closes bead on success, creates new beads for issues found.",
35081
+ var swarm_resolve_error = tool({
35082
+ description: "Mark an error as resolved after fixing it. Updates error accumulator state.",
34298
35083
  args: {
34299
- bead_id: exports_external.string().describe("Subtask bead ID to close"),
34300
- task_result: exports_external.string().describe("JSON result from the Task agent (TaskResult schema)"),
34301
- files_touched: exports_external.array(exports_external.string()).optional().describe("Override files touched (uses task_result.files_modified if not provided)")
35084
+ error_id: tool.schema.string().describe("Error ID to mark as resolved")
34302
35085
  },
34303
35086
  async execute(args) {
34304
- let result;
34305
- try {
34306
- const parsed = JSON.parse(args.task_result);
34307
- result = TaskResultSchema.parse(parsed);
34308
- } catch (error45) {
34309
- const errorMessage = error45 instanceof SyntaxError ? `Invalid JSON: ${error45.message}` : error45 instanceof exports_external.ZodError ? `Schema validation failed: ${error45.issues.map((i) => i.message).join(", ")}` : String(error45);
34310
- return JSON.stringify({
34311
- success: false,
34312
- error: "Failed to parse task result",
34313
- details: errorMessage,
34314
- hint: "Task agent should return JSON matching TaskResult schema: { success, summary, files_modified?, issues_found?, ... }"
34315
- }, null, 2);
34316
- }
34317
- const filesTouched = args.files_touched ?? [
34318
- ...result.files_modified,
34319
- ...result.files_created
34320
- ];
34321
- const issuesCreated = [];
34322
- if (!result.success) {
34323
- return JSON.stringify({
34324
- success: false,
34325
- bead_id: args.bead_id,
34326
- task_failed: true,
34327
- summary: result.summary,
34328
- blocker: result.blocker,
34329
- suggestions: result.suggestions,
34330
- files_touched: filesTouched,
34331
- action_needed: "Task failed - review blocker and decide whether to retry or close as failed"
34332
- }, null, 2);
34333
- }
34334
- const closeReason = result.summary.slice(0, 200);
34335
- await Bun.$`bd close ${args.bead_id} -r "${closeReason}"`.quiet().nothrow();
34336
- if (result.issues_found.length > 0) {
34337
- for (const issue2 of result.issues_found) {
34338
- const issueTitle = issue2.slice(0, 100);
34339
- const createResult = await Bun.$`bd create "${issueTitle}" -t bug`.quiet().nothrow();
34340
- if (createResult.exitCode === 0) {
34341
- const output = createResult.stdout.toString();
34342
- const idMatch = output.match(/bd-[a-z0-9]+/);
34343
- issuesCreated.push({
34344
- title: issueTitle,
34345
- id: idMatch?.[0]
34346
- });
34347
- } else {
34348
- issuesCreated.push({
34349
- title: issueTitle,
34350
- id: undefined
34351
- });
35087
+ await globalErrorAccumulator.resolveError(args.error_id);
35088
+ return JSON.stringify({
35089
+ success: true,
35090
+ error_id: args.error_id,
35091
+ resolved: true
35092
+ }, null, 2);
35093
+ }
35094
+ });
35095
+ var swarm_check_strikes = tool({
35096
+ description: "Check 3-strike status for a bead. Records failures, detects architectural problems, generates architecture review prompts.",
35097
+ args: {
35098
+ bead_id: tool.schema.string().describe("Bead ID to check"),
35099
+ action: tool.schema.enum(["check", "add_strike", "clear", "get_prompt"]).describe("Action: check count, add strike, clear strikes, or get prompt"),
35100
+ attempt: tool.schema.string().optional().describe("Description of fix attempt (required for add_strike)"),
35101
+ reason: tool.schema.string().optional().describe("Why the fix failed (required for add_strike)")
35102
+ },
35103
+ async execute(args) {
35104
+ switch (args.action) {
35105
+ case "check": {
35106
+ const count = await getStrikes(args.bead_id, globalStrikeStorage);
35107
+ const strikedOut = await isStrikedOut(args.bead_id, globalStrikeStorage);
35108
+ return JSON.stringify({
35109
+ bead_id: args.bead_id,
35110
+ strike_count: count,
35111
+ is_striked_out: strikedOut,
35112
+ message: strikedOut ? "⚠️ STRUCK OUT: 3 strikes reached. Use get_prompt action for architecture review." : count === 0 ? "No strikes. Clear to proceed." : `${count} strike${count > 1 ? "s" : ""}. ${3 - count} remaining before architecture review required.`,
35113
+ next_action: strikedOut ? "Call with action=get_prompt to get architecture review questions" : "Continue with fix attempt"
35114
+ }, null, 2);
35115
+ }
35116
+ case "add_strike": {
35117
+ if (!args.attempt || !args.reason) {
35118
+ return JSON.stringify({
35119
+ error: "add_strike requires 'attempt' and 'reason' parameters"
35120
+ }, null, 2);
35121
+ }
35122
+ const record2 = await addStrike(args.bead_id, args.attempt, args.reason, globalStrikeStorage);
35123
+ const strikedOut = record2.strike_count >= 3;
35124
+ const response = {
35125
+ bead_id: args.bead_id,
35126
+ strike_count: record2.strike_count,
35127
+ is_striked_out: strikedOut,
35128
+ failures: record2.failures,
35129
+ message: strikedOut ? "⚠️ STRUCK OUT: 3 strikes reached. STOP and question the architecture." : `Strike ${record2.strike_count} recorded. ${3 - record2.strike_count} remaining.`,
35130
+ warning: strikedOut ? "DO NOT attempt Fix #4. Call with action=get_prompt for architecture review." : undefined
35131
+ };
35132
+ if (strikedOut) {
35133
+ response.memory_store = formatMemoryStoreOn3Strike(args.bead_id, record2.failures);
35134
+ }
35135
+ return JSON.stringify(response, null, 2);
35136
+ }
35137
+ case "clear": {
35138
+ await clearStrikes(args.bead_id, globalStrikeStorage);
35139
+ return JSON.stringify({
35140
+ bead_id: args.bead_id,
35141
+ strike_count: 0,
35142
+ is_striked_out: false,
35143
+ message: "Strikes cleared. Fresh start."
35144
+ }, null, 2);
35145
+ }
35146
+ case "get_prompt": {
35147
+ const prompt = await getArchitecturePrompt(args.bead_id, globalStrikeStorage);
35148
+ if (!prompt) {
35149
+ return JSON.stringify({
35150
+ bead_id: args.bead_id,
35151
+ has_prompt: false,
35152
+ message: "No architecture prompt (not struck out yet)"
35153
+ }, null, 2);
34352
35154
  }
35155
+ return JSON.stringify({
35156
+ bead_id: args.bead_id,
35157
+ has_prompt: true,
35158
+ architecture_review_prompt: prompt,
35159
+ message: "Architecture review required. Present this prompt to the human partner."
35160
+ }, null, 2);
34353
35161
  }
35162
+ default:
35163
+ return JSON.stringify({
35164
+ error: `Unknown action: ${args.action}`
35165
+ }, null, 2);
34354
35166
  }
34355
- return JSON.stringify({
34356
- success: true,
34357
- bead_id: args.bead_id,
34358
- bead_closed: true,
34359
- summary: result.summary,
34360
- files_touched: filesTouched,
34361
- tests_passed: result.tests_passed,
34362
- notes: result.notes,
34363
- issues_created: issuesCreated.length > 0 ? issuesCreated : undefined,
34364
- issues_count: issuesCreated.length
34365
- }, null, 2);
34366
- }
34367
- });
34368
- var swarm_evaluation_prompt = tool({
34369
- description: "Generate self-evaluation prompt for a completed subtask",
34370
- args: {
34371
- bead_id: tool.schema.string().describe("Subtask bead ID"),
34372
- subtask_title: tool.schema.string().describe("Subtask title"),
34373
- files_touched: tool.schema.array(tool.schema.string()).describe("Files that were modified")
34374
- },
34375
- async execute(args) {
34376
- const prompt = formatEvaluationPrompt({
34377
- bead_id: args.bead_id,
34378
- subtask_title: args.subtask_title,
34379
- files_touched: args.files_touched
34380
- });
34381
- return JSON.stringify({
34382
- prompt,
34383
- expected_schema: "Evaluation",
34384
- schema_hint: {
34385
- passed: "boolean",
34386
- criteria: {
34387
- type_safe: { passed: "boolean", feedback: "string" },
34388
- no_bugs: { passed: "boolean", feedback: "string" },
34389
- patterns: { passed: "boolean", feedback: "string" },
34390
- readable: { passed: "boolean", feedback: "string" }
34391
- },
34392
- overall_feedback: "string",
34393
- retry_suggestion: "string | null"
34394
- }
34395
- }, null, 2);
34396
35167
  }
34397
35168
  });
34398
35169
  var swarm_learn = tool({
@@ -34465,8 +35236,8 @@ ${args.files_context.map((f) => `- \`${f}\``).join(`
34465
35236
  ---
34466
35237
  *Learned from swarm execution on ${new Date().toISOString().split("T")[0]}*`;
34467
35238
  const { getSkill: getSkill2, invalidateSkillsCache: invalidateSkillsCache2 } = await Promise.resolve().then(() => (init_skills(), exports_skills));
34468
- const { mkdir: mkdir2, writeFile: writeFile2 } = await import("fs/promises");
34469
- const { join: join6 } = await import("path");
35239
+ const { mkdir: mkdir2, writeFile: writeFile2 } = await import("node:fs/promises");
35240
+ const { join: join6 } = await import("node:path");
34470
35241
  const existing = await getSkill2(args.skill_name);
34471
35242
  if (existing) {
34472
35243
  return JSON.stringify({
@@ -34524,227 +35295,28 @@ ${skillBody}`, "utf-8");
34524
35295
  }, null, 2);
34525
35296
  }
34526
35297
  });
34527
- var globalErrorAccumulator = new ErrorAccumulator;
34528
- var swarm_accumulate_error = tool({
34529
- description: "Record an error during subtask execution. Errors feed into retry prompts.",
34530
- args: {
34531
- bead_id: tool.schema.string().describe("Bead ID where error occurred"),
34532
- error_type: tool.schema.enum(["validation", "timeout", "conflict", "tool_failure", "unknown"]).describe("Category of error"),
34533
- message: tool.schema.string().describe("Human-readable error message"),
34534
- stack_trace: tool.schema.string().optional().describe("Stack trace for debugging"),
34535
- tool_name: tool.schema.string().optional().describe("Tool that failed"),
34536
- context: tool.schema.string().optional().describe("What was happening when error occurred")
34537
- },
34538
- async execute(args) {
34539
- const entry = await globalErrorAccumulator.recordError(args.bead_id, args.error_type, args.message, {
34540
- stack_trace: args.stack_trace,
34541
- tool_name: args.tool_name,
34542
- context: args.context
34543
- });
34544
- return JSON.stringify({
34545
- success: true,
34546
- error_id: entry.id,
34547
- bead_id: entry.bead_id,
34548
- error_type: entry.error_type,
34549
- message: entry.message,
34550
- timestamp: entry.timestamp,
34551
- note: "Error recorded for retry context. Use swarm_get_error_context to retrieve accumulated errors."
34552
- }, null, 2);
34553
- }
34554
- });
34555
- var swarm_get_error_context = tool({
34556
- description: "Get accumulated errors for a bead. Returns formatted context for retry prompts.",
34557
- args: {
34558
- bead_id: tool.schema.string().describe("Bead ID to get errors for"),
34559
- include_resolved: tool.schema.boolean().optional().describe("Include resolved errors (default: false)")
34560
- },
34561
- async execute(args) {
34562
- const errorContext = await globalErrorAccumulator.getErrorContext(args.bead_id, args.include_resolved ?? false);
34563
- const stats = await globalErrorAccumulator.getErrorStats(args.bead_id);
34564
- return JSON.stringify({
34565
- bead_id: args.bead_id,
34566
- error_context: errorContext,
34567
- stats: {
34568
- total_errors: stats.total,
34569
- unresolved: stats.unresolved,
34570
- by_type: stats.by_type
34571
- },
34572
- has_errors: errorContext.length > 0,
34573
- usage: "Inject error_context into retry prompt using {error_context} placeholder"
34574
- }, null, 2);
34575
- }
34576
- });
34577
- var swarm_resolve_error = tool({
34578
- description: "Mark an error as resolved after fixing it. Updates error accumulator state.",
34579
- args: {
34580
- error_id: tool.schema.string().describe("Error ID to mark as resolved")
34581
- },
34582
- async execute(args) {
34583
- await globalErrorAccumulator.resolveError(args.error_id);
34584
- return JSON.stringify({
34585
- success: true,
34586
- error_id: args.error_id,
34587
- resolved: true
34588
- }, null, 2);
34589
- }
34590
- });
34591
- var swarm_init = tool({
34592
- description: "Initialize swarm session: discovers available skills, checks tool availability. ALWAYS call at swarm start.",
34593
- args: {
34594
- project_path: tool.schema.string().optional().describe("Project path (for Agent Mail init)")
34595
- },
34596
- async execute(args) {
34597
- const availability = await checkAllTools();
34598
- const report = formatToolAvailability(availability);
34599
- const beadsAvailable = availability.get("beads")?.status.available ?? false;
34600
- const agentMailAvailable2 = availability.get("agent-mail")?.status.available ?? false;
34601
- const warnings = [];
34602
- const degradedFeatures = [];
34603
- if (!beadsAvailable) {
34604
- warnings.push("⚠️ beads (bd) not available - issue tracking disabled, swarm coordination will be limited");
34605
- degradedFeatures.push("issue tracking", "progress persistence");
34606
- }
34607
- if (!agentMailAvailable2) {
34608
- warnings.push("⚠️ agent-mail not available - multi-agent communication disabled");
34609
- degradedFeatures.push("agent communication", "file reservations");
34610
- }
34611
- if (!availability.get("cass")?.status.available) {
34612
- degradedFeatures.push("historical context from past sessions");
34613
- }
34614
- if (!availability.get("ubs")?.status.available) {
34615
- degradedFeatures.push("pre-completion bug scanning");
34616
- }
34617
- if (!availability.get("semantic-memory")?.status.available) {
34618
- degradedFeatures.push("persistent learning (using in-memory fallback)");
34619
- }
34620
- const availableSkills = await listSkills();
34621
- const skillsInfo = {
34622
- count: availableSkills.length,
34623
- available: availableSkills.length > 0,
34624
- skills: availableSkills.map((s) => ({
34625
- name: s.name,
34626
- description: s.description,
34627
- hasScripts: s.hasScripts
34628
- }))
34629
- };
34630
- let skillsGuidance;
34631
- if (availableSkills.length > 0) {
34632
- skillsGuidance = `Found ${availableSkills.length} skill(s). Use skills_list to see details, skills_use to activate.`;
34633
- } else {
34634
- skillsGuidance = "No skills found. Add skills to .opencode/skills/ or .claude/skills/ for specialized guidance.";
34635
- }
34636
- return JSON.stringify({
34637
- ready: true,
34638
- tool_availability: Object.fromEntries(Array.from(availability.entries()).map(([k, v]) => [
34639
- k,
34640
- {
34641
- available: v.status.available,
34642
- fallback: v.status.available ? null : v.fallbackBehavior
34643
- }
34644
- ])),
34645
- skills: skillsInfo,
34646
- warnings: warnings.length > 0 ? warnings : undefined,
34647
- degraded_features: degradedFeatures.length > 0 ? degradedFeatures : undefined,
34648
- recommendations: {
34649
- skills: skillsGuidance,
34650
- beads: beadsAvailable ? "✓ Use beads for all task tracking" : "Install beads: npm i -g @joelhooks/beads",
34651
- agent_mail: agentMailAvailable2 ? "✓ Use Agent Mail for coordination" : "Start Agent Mail: agent-mail serve"
34652
- },
34653
- report
34654
- }, null, 2);
34655
- }
34656
- });
34657
- var swarmTools = {
35298
+ var orchestrateTools = {
34658
35299
  swarm_init,
34659
- swarm_select_strategy,
34660
- swarm_plan_prompt,
34661
- swarm_decompose,
34662
- swarm_validate_decomposition,
34663
35300
  swarm_status,
34664
35301
  swarm_progress,
34665
35302
  swarm_broadcast,
34666
35303
  swarm_complete,
34667
- swarm_learn,
34668
35304
  swarm_record_outcome,
34669
- swarm_subtask_prompt,
34670
- swarm_spawn_subtask,
34671
- swarm_complete_subtask,
34672
- swarm_evaluation_prompt,
34673
35305
  swarm_accumulate_error,
34674
35306
  swarm_get_error_context,
34675
- swarm_resolve_error
35307
+ swarm_resolve_error,
35308
+ swarm_check_strikes,
35309
+ swarm_learn
35310
+ };
35311
+
35312
+ // src/swarm.ts
35313
+ init_swarm_strategies();
35314
+ var swarmTools = {
35315
+ ...strategyTools,
35316
+ ...decomposeTools,
35317
+ ...promptTools,
35318
+ ...orchestrateTools
34676
35319
  };
34677
- var globalStrikeStorage = new InMemoryStrikeStorage;
34678
- var swarm_check_strikes = tool({
34679
- description: "Check 3-strike status for a bead. Records failures, detects architectural problems, generates architecture review prompts.",
34680
- args: {
34681
- bead_id: tool.schema.string().describe("Bead ID to check"),
34682
- action: tool.schema.enum(["check", "add_strike", "clear", "get_prompt"]).describe("Action: check count, add strike, clear strikes, or get prompt"),
34683
- attempt: tool.schema.string().optional().describe("Description of fix attempt (required for add_strike)"),
34684
- reason: tool.schema.string().optional().describe("Why the fix failed (required for add_strike)")
34685
- },
34686
- async execute(args) {
34687
- switch (args.action) {
34688
- case "check": {
34689
- const count = await getStrikes(args.bead_id, globalStrikeStorage);
34690
- const strikedOut = await isStrikedOut(args.bead_id, globalStrikeStorage);
34691
- return JSON.stringify({
34692
- bead_id: args.bead_id,
34693
- strike_count: count,
34694
- is_striked_out: strikedOut,
34695
- message: strikedOut ? "⚠️ STRUCK OUT: 3 strikes reached. Use get_prompt action for architecture review." : count === 0 ? "No strikes. Clear to proceed." : `${count} strike${count > 1 ? "s" : ""}. ${3 - count} remaining before architecture review required.`,
34696
- next_action: strikedOut ? "Call with action=get_prompt to get architecture review questions" : "Continue with fix attempt"
34697
- }, null, 2);
34698
- }
34699
- case "add_strike": {
34700
- if (!args.attempt || !args.reason) {
34701
- return JSON.stringify({
34702
- error: "add_strike requires 'attempt' and 'reason' parameters"
34703
- }, null, 2);
34704
- }
34705
- const record2 = await addStrike(args.bead_id, args.attempt, args.reason, globalStrikeStorage);
34706
- const strikedOut = record2.strike_count >= 3;
34707
- return JSON.stringify({
34708
- bead_id: args.bead_id,
34709
- strike_count: record2.strike_count,
34710
- is_striked_out: strikedOut,
34711
- failures: record2.failures,
34712
- message: strikedOut ? "⚠️ STRUCK OUT: 3 strikes reached. STOP and question the architecture." : `Strike ${record2.strike_count} recorded. ${3 - record2.strike_count} remaining.`,
34713
- warning: strikedOut ? "DO NOT attempt Fix #4. Call with action=get_prompt for architecture review." : undefined
34714
- }, null, 2);
34715
- }
34716
- case "clear": {
34717
- await clearStrikes(args.bead_id, globalStrikeStorage);
34718
- return JSON.stringify({
34719
- bead_id: args.bead_id,
34720
- strike_count: 0,
34721
- is_striked_out: false,
34722
- message: "Strikes cleared. Fresh start."
34723
- }, null, 2);
34724
- }
34725
- case "get_prompt": {
34726
- const prompt = await getArchitecturePrompt(args.bead_id, globalStrikeStorage);
34727
- if (!prompt) {
34728
- return JSON.stringify({
34729
- bead_id: args.bead_id,
34730
- has_prompt: false,
34731
- message: "No architecture prompt (not struck out yet)"
34732
- }, null, 2);
34733
- }
34734
- return JSON.stringify({
34735
- bead_id: args.bead_id,
34736
- has_prompt: true,
34737
- architecture_review_prompt: prompt,
34738
- message: "Architecture review required. Present this prompt to the human partner."
34739
- }, null, 2);
34740
- }
34741
- default:
34742
- return JSON.stringify({
34743
- error: `Unknown action: ${args.action}`
34744
- }, null, 2);
34745
- }
34746
- }
34747
- });
34748
35320
 
34749
35321
  // src/repo-crawl.ts
34750
35322
  init_dist();
@@ -35075,6 +35647,7 @@ init_skills();
35075
35647
  init_dist();
35076
35648
 
35077
35649
  // src/mandate-storage.ts
35650
+ init_learning();
35078
35651
  var cachedCommand = null;
35079
35652
  async function resolveSemanticMemoryCommand() {
35080
35653
  if (cachedCommand)
@@ -35781,6 +36354,9 @@ var mandateTools = {
35781
36354
  mandate_list,
35782
36355
  mandate_stats
35783
36356
  };
36357
+ // src/storage.ts
36358
+ init_learning();
36359
+
35784
36360
  // src/anti-patterns.ts
35785
36361
  init_zod();
35786
36362
  var PatternKindSchema = exports_external.enum(["pattern", "anti_pattern"]);
@@ -35827,6 +36403,7 @@ class InMemoryPatternStorage {
35827
36403
 
35828
36404
  // src/pattern-maturity.ts
35829
36405
  init_zod();
36406
+ init_learning();
35830
36407
  var MaturityStateSchema = exports_external.enum([
35831
36408
  "candidate",
35832
36409
  "established",