bosun 0.40.21 → 0.41.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (80) hide show
  1. package/.env.example +8 -0
  2. package/README.md +20 -0
  3. package/agent/agent-custom-tools.mjs +23 -5
  4. package/agent/agent-event-bus.mjs +248 -6
  5. package/agent/agent-pool.mjs +131 -30
  6. package/agent/agent-work-analyzer.mjs +8 -16
  7. package/agent/primary-agent.mjs +81 -7
  8. package/agent/retry-queue.mjs +164 -0
  9. package/bench/swebench/bosun-swebench.mjs +5 -0
  10. package/bosun.config.example.json +25 -0
  11. package/bosun.schema.json +825 -183
  12. package/cli.mjs +267 -8
  13. package/config/config-doctor.mjs +51 -2
  14. package/config/config.mjs +232 -5
  15. package/github/github-auth-manager.mjs +70 -19
  16. package/infra/library-manager.mjs +894 -60
  17. package/infra/monitor.mjs +701 -69
  18. package/infra/runtime-accumulator.mjs +376 -84
  19. package/infra/session-tracker.mjs +95 -28
  20. package/infra/test-runtime.mjs +267 -0
  21. package/lib/codebase-audit.mjs +133 -18
  22. package/package.json +30 -8
  23. package/server/setup-web-server.mjs +29 -1
  24. package/server/ui-server.mjs +1571 -49
  25. package/setup.mjs +27 -24
  26. package/shell/codex-shell.mjs +34 -3
  27. package/shell/copilot-shell.mjs +50 -8
  28. package/task/msg-hub.mjs +193 -0
  29. package/task/pipeline.mjs +544 -0
  30. package/task/task-claims.mjs +6 -10
  31. package/task/task-cli.mjs +38 -2
  32. package/task/task-executor-pipeline.mjs +143 -0
  33. package/task/task-executor.mjs +36 -27
  34. package/telegram/get-telegram-chat-id.mjs +57 -47
  35. package/ui/components/chat-view.js +18 -1
  36. package/ui/components/workspace-switcher.js +321 -9
  37. package/ui/demo-defaults.js +17830 -10433
  38. package/ui/demo.html +9 -1
  39. package/ui/modules/router.js +1 -1
  40. package/ui/modules/settings-schema.js +2 -0
  41. package/ui/modules/state.js +54 -57
  42. package/ui/modules/voice-client-sdk.js +376 -37
  43. package/ui/modules/voice-client.js +173 -33
  44. package/ui/setup.html +68 -2
  45. package/ui/styles/components.css +571 -1
  46. package/ui/styles.css +201 -1
  47. package/ui/tabs/dashboard.js +74 -0
  48. package/ui/tabs/library.js +410 -55
  49. package/ui/tabs/logs.js +10 -0
  50. package/ui/tabs/settings.js +178 -99
  51. package/ui/tabs/tasks.js +1083 -507
  52. package/ui/tabs/telemetry.js +34 -0
  53. package/ui/tabs/workflow-canvas-utils.mjs +38 -1
  54. package/ui/tabs/workflows.js +1275 -402
  55. package/voice/voice-agents-sdk.mjs +2 -2
  56. package/voice/voice-relay.mjs +28 -20
  57. package/workflow/declarative-workflows.mjs +145 -0
  58. package/workflow/msg-hub.mjs +237 -0
  59. package/workflow/pipeline-workflows.mjs +287 -0
  60. package/workflow/pipeline.mjs +828 -315
  61. package/workflow/project-detection.mjs +559 -0
  62. package/workflow/workflow-cli.mjs +128 -0
  63. package/workflow/workflow-contract.mjs +433 -232
  64. package/workflow/workflow-engine.mjs +510 -47
  65. package/workflow/workflow-nodes/custom-loader.mjs +251 -0
  66. package/workflow/workflow-nodes.mjs +2024 -184
  67. package/workflow/workflow-templates.mjs +118 -24
  68. package/workflow-templates/agents.mjs +20 -20
  69. package/workflow-templates/bosun-native.mjs +212 -2
  70. package/workflow-templates/code-quality.mjs +20 -14
  71. package/workflow-templates/continuation-loop.mjs +339 -0
  72. package/workflow-templates/github.mjs +516 -40
  73. package/workflow-templates/planning.mjs +446 -17
  74. package/workflow-templates/reliability.mjs +65 -12
  75. package/workflow-templates/task-batch.mjs +27 -10
  76. package/workflow-templates/task-execution.mjs +752 -0
  77. package/workflow-templates/task-lifecycle.mjs +117 -14
  78. package/workspace/context-cache.mjs +66 -18
  79. package/workspace/workspace-manager.mjs +153 -1
  80. package/workflow-templates/issue-continuation.mjs +0 -243
@@ -249,12 +249,18 @@ let _traceTtsFirstAudioMarked = false;
249
249
 
250
250
  const RECONNECT_AT_MS = 28 * 60 * 1000; // 28 minutes
251
251
  const MAX_RECONNECT_ATTEMPTS = 3;
252
- const AUTO_BARGE_IN_COOLDOWN_MS = 700;
252
+ const AUTO_BARGE_IN_COOLDOWN_MS = 1200;
253
253
  const AUTO_BARGE_IN_MIC_LEVEL_THRESHOLD = 0.08;
254
254
  const AUTO_BARGE_IN_FADE_MS = 220;
255
- // Noise-control default: disable user-side live ASR transcript output/persistence.
256
- // Assistant response text remains enabled.
257
- const ENABLE_USER_TRANSCRIPT = false;
255
+ // Minimum speech duration (ms) before an interrupt is allowed — filters keyboard/click noise
256
+ let _speechStartedAt = 0;
257
+ const MIN_SPEECH_DURATION_FOR_INTERRUPT_MS = 400;
258
+ // Delayed response clear — keep response visible in center after turn ends
259
+ let _responseClearTimer = null;
260
+ const RESPONSE_DISPLAY_HOLD_MS = 8000;
261
+ // User transcript is always enabled — transcription is surfaced from the API's
262
+ // input_audio_transcription feature (primary) or browser SpeechRecognition (backup).
263
+ const ENABLE_USER_TRANSCRIPT = true;
258
264
  let _reconnectAttempts = 0;
259
265
  let _pendingResponseCreateTimer = null;
260
266
  let _awaitingAutoResponse = false;
@@ -266,6 +272,64 @@ const SpeechRecognition = typeof globalThis !== "undefined"
266
272
  ? (globalThis.SpeechRecognition || globalThis.webkitSpeechRecognition)
267
273
  : null;
268
274
 
275
+ // ── Browser SpeechRecognition (parallel backup for user transcription) ──────
276
+
277
+ let _browserRecognition = null;
278
+ let _browserTranscriptActive = false;
279
+ let _apiTranscriptDelivered = false;
280
+
281
+ function _startBrowserTranscription() {
282
+ if (!SpeechRecognition || _browserRecognition) return;
283
+ try {
284
+ const recognition = new SpeechRecognition();
285
+ recognition.continuous = true;
286
+ recognition.interimResults = true;
287
+ recognition.maxAlternatives = 1;
288
+ recognition.lang = navigator?.language || "en-US";
289
+
290
+ recognition.onresult = (event) => {
291
+ if (_apiTranscriptDelivered) return;
292
+ let transcript = "";
293
+ for (let i = event.resultIndex; i < event.results.length; i++) {
294
+ transcript += event.results[i][0].transcript;
295
+ }
296
+ const text = transcript.trim();
297
+ if (!text) return;
298
+ voiceTranscript.value = text;
299
+ emit("transcript", { text, final: event.results[event.resultIndex]?.isFinal || false, source: "browser" });
300
+ if (event.results[event.resultIndex]?.isFinal) {
301
+ _recordVoiceTranscriptIfNew("user", text, "browser.speech_recognition.final");
302
+ }
303
+ };
304
+
305
+ recognition.onerror = (e) => {
306
+ if (e.error !== "no-speech" && e.error !== "aborted") {
307
+ console.warn("[voice-client] Browser SpeechRecognition error:", e.error);
308
+ }
309
+ };
310
+
311
+ recognition.onend = () => {
312
+ if (_browserTranscriptActive && (_dc || _ws)) {
313
+ try { recognition.start(); } catch { /* already running or stopped */ }
314
+ }
315
+ };
316
+
317
+ recognition.start();
318
+ _browserRecognition = recognition;
319
+ _browserTranscriptActive = true;
320
+ } catch (err) {
321
+ console.warn("[voice-client] Browser SpeechRecognition unavailable:", err?.message);
322
+ }
323
+ }
324
+
325
+ function _stopBrowserTranscription() {
326
+ _browserTranscriptActive = false;
327
+ if (_browserRecognition) {
328
+ try { _browserRecognition.stop(); } catch { /* ignore */ }
329
+ _browserRecognition = null;
330
+ }
331
+ }
332
+
269
333
  function _normalizeCallContext(options = {}) {
270
334
  const sessionId = String(options?.sessionId || "").trim() || null;
271
335
  const executor = String(options?.executor || "").trim() || null;
@@ -448,12 +512,8 @@ async function _processResponsesAudioTurn(text) {
448
512
  });
449
513
 
450
514
  voiceState.value = "thinking";
451
- if (ENABLE_USER_TRANSCRIPT) {
452
- voiceTranscript.value = inputText;
453
- emit("transcript", { text: inputText, final: true });
454
- } else {
455
- voiceTranscript.value = "";
456
- }
515
+ voiceTranscript.value = inputText;
516
+ emit("transcript", { text: inputText, final: true, source: "api" });
457
517
  _recordVoiceTranscriptIfNew("user", inputText, "responses-audio.user_input");
458
518
 
459
519
  if (_responsesAbortController) {
@@ -503,7 +563,7 @@ async function _processResponsesAudioTurn(text) {
503
563
  _traceEndTurn("turn_end", {
504
564
  reason: "responses-audio.turn_completed",
505
565
  });
506
- voiceResponse.value = "";
566
+ _scheduleResponseClear();
507
567
  voiceState.value = "listening";
508
568
  }
509
569
 
@@ -580,6 +640,9 @@ async function _startResponsesAudioSession(tokenData) {
580
640
  _sessionStartTime = Date.now();
581
641
  startDurationTimer();
582
642
  voiceState.value = "connected";
643
+ // Start browser SpeechRecognition as parallel/backup transcription
644
+ _apiTranscriptDelivered = false;
645
+ _startBrowserTranscription();
583
646
  emit("connected", {
584
647
  provider: tokenData?.provider || "openai",
585
648
  sessionId: voiceSessionId.value,
@@ -668,6 +731,26 @@ function _markAssistantToolResponseObserved() {
668
731
  _clearToolCompletionAckTimer();
669
732
  }
670
733
 
734
+ // ── Response display hold ──────────────────────────────────────────────────
735
+ // Keep assistant response visible in center for RESPONSE_DISPLAY_HOLD_MS
736
+ // after the turn ends, instead of clearing immediately.
737
+
738
+ function _scheduleResponseClear() {
739
+ if (_responseClearTimer) clearTimeout(_responseClearTimer);
740
+ _responseClearTimer = setTimeout(() => {
741
+ _responseClearTimer = null;
742
+ voiceResponse.value = "";
743
+ }, RESPONSE_DISPLAY_HOLD_MS);
744
+ }
745
+
746
+ function _clearResponseForNewTurn() {
747
+ if (_responseClearTimer) {
748
+ clearTimeout(_responseClearTimer);
749
+ _responseClearTimer = null;
750
+ }
751
+ voiceResponse.value = "";
752
+ }
753
+
671
754
  // ── Event System ────────────────────────────────────────────────────────────
672
755
 
673
756
  export function onVoiceEvent(event, handler) {
@@ -747,18 +830,18 @@ function sendSessionUpdate(tokenData = {}) {
747
830
  type: turnDetection,
748
831
  ...(turnDetection === "server_vad"
749
832
  ? {
750
- threshold: 0.7,
751
- prefix_padding_ms: 400,
752
- silence_duration_ms: 1200,
833
+ threshold: 0.82,
834
+ prefix_padding_ms: 500,
835
+ silence_duration_ms: 1600,
753
836
  create_response: true,
754
- interrupt_response: true,
837
+ interrupt_response: false,
755
838
  }
756
839
  : {}),
757
840
  ...(turnDetection === "semantic_vad"
758
841
  ? {
759
- eagerness: "medium",
842
+ eagerness: "low",
760
843
  create_response: true,
761
- interrupt_response: true,
844
+ interrupt_response: false,
762
845
  }
763
846
  : {}),
764
847
  };
@@ -769,6 +852,18 @@ function sendSessionUpdate(tokenData = {}) {
769
852
  const transcriptionEnabled =
770
853
  sessionConfig?.input_audio_transcription !== undefined;
771
854
 
855
+ // Include instructions from the server session config so the voice agent
856
+ // receives its system prompt (persona, tools, behaviour rules).
857
+ const instructions = sessionConfig?.instructions || tokenData?.instructions || undefined;
858
+
859
+ // Include tool definitions so the realtime model can invoke them.
860
+ const tools = Array.isArray(sessionConfig?.tools) && sessionConfig.tools.length
861
+ ? sessionConfig.tools
862
+ : Array.isArray(tokenData?.tools) && tokenData.tools.length
863
+ ? tokenData.tools
864
+ : undefined;
865
+ const toolChoice = sessionConfig?.tool_choice || undefined;
866
+
772
867
  sendRealtimeEvent({
773
868
  type: "session.update",
774
869
  session: {
@@ -776,6 +871,8 @@ function sendSessionUpdate(tokenData = {}) {
776
871
  voice: voiceId,
777
872
  input_audio_format: "pcm16",
778
873
  output_audio_format: "pcm16",
874
+ ...(instructions ? { instructions } : {}),
875
+ ...(tools ? { tools, tool_choice: toolChoice || "auto" } : {}),
779
876
  ...(transcriptionEnabled
780
877
  ? { input_audio_transcription: { model: transcriptionModel } }
781
878
  : {}),
@@ -964,6 +1061,10 @@ async function _startWebSocketTransport(tokenData, mediaStream) {
964
1061
  _sessionStartTime = Date.now();
965
1062
  startDurationTimer();
966
1063
 
1064
+ // Start browser SpeechRecognition as parallel/backup transcription
1065
+ _apiTranscriptDelivered = false;
1066
+ _startBrowserTranscription();
1067
+
967
1068
  emit("connected", {
968
1069
  provider: tokenData.provider || "azure",
969
1070
  sessionId: voiceSessionId.value,
@@ -1222,6 +1323,9 @@ export async function startVoiceSession(options = {}) {
1222
1323
  voiceSessionId.value = _callContext.sessionId || `voice-${Date.now()}`;
1223
1324
  startDurationTimer();
1224
1325
  startReconnectTimer();
1326
+ // Start browser SpeechRecognition as parallel/backup transcription
1327
+ _apiTranscriptDelivered = false;
1328
+ _startBrowserTranscription();
1225
1329
  emit("connected", {
1226
1330
  provider: tokenData.provider,
1227
1331
  sessionId: voiceSessionId.value,
@@ -1340,6 +1444,7 @@ export function stopVoiceSession() {
1340
1444
  _explicitStop = true;
1341
1445
  emit("session-ending", { sessionId: voiceSessionId.value });
1342
1446
  _stopMicLevelMonitor();
1447
+ _stopBrowserTranscription();
1343
1448
  cleanup();
1344
1449
  voiceState.value = "idle";
1345
1450
  voiceTranscript.value = "";
@@ -1348,6 +1453,8 @@ export function stopVoiceSession() {
1348
1453
  voiceSessionId.value = null;
1349
1454
  voiceBoundSessionId.value = null;
1350
1455
  voiceDuration.value = 0;
1456
+ _speechStartedAt = 0;
1457
+ if (_responseClearTimer) { clearTimeout(_responseClearTimer); _responseClearTimer = null; }
1351
1458
  _webrtcUnavailableForProvider = false;
1352
1459
  _lastTokenData = null;
1353
1460
  _callContext = {
@@ -1372,31 +1479,40 @@ function handleServerEvent(event) {
1372
1479
  break;
1373
1480
 
1374
1481
  case "input_audio_buffer.speech_started":
1482
+ _speechStartedAt = Date.now();
1375
1483
  _traceBeginTurn("turn_start", { reason: type });
1376
- triggerAutoBargeIn("speech-started");
1484
+ // Clear lingering response so center shows user's new transcript
1485
+ _clearResponseForNewTurn();
1486
+ // Don't interrupt immediately — wait for MIN_SPEECH_DURATION_FOR_INTERRUPT_MS
1487
+ setTimeout(() => {
1488
+ if (_speechStartedAt > 0 && (Date.now() - _speechStartedAt) >= MIN_SPEECH_DURATION_FOR_INTERRUPT_MS) {
1489
+ triggerAutoBargeIn("speech-started-confirmed");
1490
+ }
1491
+ }, MIN_SPEECH_DURATION_FOR_INTERRUPT_MS);
1377
1492
  voiceState.value = "listening";
1378
1493
  emit("speech-started", {});
1379
1494
  break;
1380
1495
 
1381
1496
  case "input_audio_buffer.speech_stopped":
1497
+ _speechStartedAt = 0;
1382
1498
  voiceState.value = "thinking";
1383
1499
  scheduleManualResponseCreate("speech-stopped");
1384
1500
  emit("speech-stopped", {});
1385
1501
  break;
1386
1502
 
1387
1503
  case "conversation.item.input_audio_transcription.completed":
1388
- if (ENABLE_USER_TRANSCRIPT) {
1389
- voiceTranscript.value = event.transcript || "";
1390
- emit("transcript", { text: event.transcript, final: true });
1391
- } else {
1392
- voiceTranscript.value = "";
1393
- }
1504
+ // API-level transcript delivered — prefer over browser SpeechRecognition
1505
+ _apiTranscriptDelivered = true;
1506
+ voiceTranscript.value = event.transcript || "";
1507
+ emit("transcript", { text: event.transcript, final: true, source: "api" });
1394
1508
  _recordVoiceTranscriptIfNew(
1395
1509
  "user",
1396
1510
  event.transcript || "",
1397
1511
  "conversation.item.input_audio_transcription.completed",
1398
1512
  );
1399
1513
  scheduleManualResponseCreate("transcription-completed");
1514
+ // Reset for next utterance
1515
+ setTimeout(() => { _apiTranscriptDelivered = false; }, 500);
1400
1516
  break;
1401
1517
 
1402
1518
  case "conversation.item.created": {
@@ -1407,11 +1523,11 @@ function handleServerEvent(event) {
1407
1523
  .map((part) => String(part?.transcript || part?.text || ""))
1408
1524
  .join("")
1409
1525
  .trim();
1410
- if (transcript && ENABLE_USER_TRANSCRIPT) {
1526
+ if (transcript) {
1527
+ _apiTranscriptDelivered = true;
1411
1528
  voiceTranscript.value = transcript;
1412
- emit("transcript", { text: transcript, final: true });
1413
- } else if (!ENABLE_USER_TRANSCRIPT) {
1414
- voiceTranscript.value = "";
1529
+ emit("transcript", { text: transcript, final: true, source: "api" });
1530
+ setTimeout(() => { _apiTranscriptDelivered = false; }, 500);
1415
1531
  }
1416
1532
  _recordVoiceTranscriptIfNew(
1417
1533
  "user",
@@ -1463,7 +1579,7 @@ function handleServerEvent(event) {
1463
1579
  "response.audio_transcript.done",
1464
1580
  );
1465
1581
  _traceEndTurn("turn_end", { reason: type });
1466
- voiceResponse.value = "";
1582
+ _scheduleResponseClear();
1467
1583
  break;
1468
1584
 
1469
1585
  case "response.text.done":
@@ -1475,7 +1591,7 @@ function handleServerEvent(event) {
1475
1591
  "response.text.done",
1476
1592
  );
1477
1593
  _traceEndTurn("turn_end", { reason: type });
1478
- voiceResponse.value = "";
1594
+ _scheduleResponseClear();
1479
1595
  break;
1480
1596
 
1481
1597
  case "response.output_text.done":
@@ -1487,7 +1603,7 @@ function handleServerEvent(event) {
1487
1603
  "response.output_text.done",
1488
1604
  );
1489
1605
  _traceEndTurn("turn_end", { reason: type });
1490
- voiceResponse.value = "";
1606
+ _scheduleResponseClear();
1491
1607
  break;
1492
1608
 
1493
1609
  case "response.audio.delta":
@@ -1539,7 +1655,7 @@ function handleServerEvent(event) {
1539
1655
  voiceResponse.value,
1540
1656
  "response.done.fallback",
1541
1657
  );
1542
- voiceResponse.value = "";
1658
+ _scheduleResponseClear();
1543
1659
  }
1544
1660
  if (voiceState.value !== "listening") {
1545
1661
  voiceState.value = "connected";
@@ -1606,6 +1722,23 @@ async function handleToolCall(event) {
1606
1722
  });
1607
1723
  const result = await res.json();
1608
1724
 
1725
+ // Normalize tool output — handle empty strings, objects, and large payloads
1726
+ let toolOutput = "";
1727
+ if (result.error) {
1728
+ toolOutput = `Tool error: ${result.error}`;
1729
+ } else if (result.result != null && result.result !== "") {
1730
+ toolOutput = typeof result.result === "string"
1731
+ ? result.result
1732
+ : JSON.stringify(result.result);
1733
+ } else {
1734
+ toolOutput = "Tool completed with no output";
1735
+ }
1736
+ // Truncate very large outputs to avoid overwhelming the Realtime API context
1737
+ const VOICE_TOOL_OUTPUT_MAX = 6000;
1738
+ if (toolOutput.length > VOICE_TOOL_OUTPUT_MAX) {
1739
+ toolOutput = toolOutput.slice(0, VOICE_TOOL_OUTPUT_MAX) + "\n... (truncated for voice — full result available in chat)";
1740
+ }
1741
+
1609
1742
  // Update tool call status
1610
1743
  voiceToolCalls.value = voiceToolCalls.value.map(tc =>
1611
1744
  tc.callId === callId ? { ...tc, status: "complete", result: result.result } : tc
@@ -1617,7 +1750,7 @@ async function handleToolCall(event) {
1617
1750
  item: {
1618
1751
  type: "function_call_output",
1619
1752
  call_id: callId,
1620
- output: result.result || result.error || "No output",
1753
+ output: toolOutput,
1621
1754
  },
1622
1755
  });
1623
1756
  // Trigger response generation
@@ -1678,6 +1811,13 @@ function fadeElementVolumeTo(el, targetVolume, durationMs) {
1678
1811
 
1679
1812
  function triggerAutoBargeIn(reason = "speech-started") {
1680
1813
  const now = Date.now();
1814
+ // Only interrupt if speech has been ongoing long enough to be real speech
1815
+ if (_speechStartedAt > 0) {
1816
+ const speechDuration = now - _speechStartedAt;
1817
+ if (speechDuration < MIN_SPEECH_DURATION_FOR_INTERRUPT_MS) {
1818
+ return false;
1819
+ }
1820
+ }
1681
1821
  const audioActive = isAssistantPlaybackActive();
1682
1822
  if (!shouldAutoBargeIn({
1683
1823
  muted: isVoiceMicMuted.value,
package/ui/setup.html CHANGED
@@ -909,6 +909,9 @@ function App() {
909
909
  const [kanbanBackend, setKanbanBackend] = useState("internal");
910
910
  const [telegramToken, setTelegramToken] = useState("");
911
911
  const [telegramChatId, setTelegramChatId] = useState("");
912
+ const [telegramDiscoveredChats, setTelegramDiscoveredChats] = useState([]);
913
+ const [telegramChatLookupLoading, setTelegramChatLookupLoading] = useState(false);
914
+ const [telegramChatLookupMessage, setTelegramChatLookupMessage] = useState("");
912
915
  const [maxParallel, setMaxParallel] = useState(4);
913
916
  const [maxRetries, setMaxRetries] = useState(3);
914
917
  const [failoverStrategy, setFailoverStrategy] = useState("next-in-line");
@@ -2270,6 +2273,42 @@ function App() {
2270
2273
  if (idx <= step || completedSteps.has(idx)) setStep(idx);
2271
2274
  };
2272
2275
 
2276
+ const discoverTelegramChatIds = async () => {
2277
+ const token = String(telegramToken || "").trim();
2278
+ if (!token) {
2279
+ setTelegramDiscoveredChats([]);
2280
+ setTelegramChatLookupMessage("Enter a Telegram bot token first.");
2281
+ return;
2282
+ }
2283
+
2284
+ setTelegramChatLookupLoading(true);
2285
+ setTelegramChatLookupMessage("");
2286
+ try {
2287
+ const result = await apiPost("telegram-chat-id", { token });
2288
+ if (!result?.ok) {
2289
+ setTelegramDiscoveredChats([]);
2290
+ setTelegramChatLookupMessage(result?.error || "Failed to discover Telegram chats.");
2291
+ return;
2292
+ }
2293
+
2294
+ const chats = Array.isArray(result.chats) ? result.chats : [];
2295
+ setTelegramDiscoveredChats(chats);
2296
+ if (chats.length === 1) {
2297
+ setTelegramChatId(String(chats[0].id));
2298
+ setTelegramChatLookupMessage(`Found 1 chat: ${chats[0].id}`);
2299
+ } else if (chats.length > 1) {
2300
+ setTelegramChatLookupMessage(`Found ${chats.length} chats. Choose the one Bosun should use.`);
2301
+ } else {
2302
+ setTelegramChatLookupMessage(result.message || "No chats found yet. Send a message to your bot, then try again.");
2303
+ }
2304
+ } catch (err) {
2305
+ setTelegramDiscoveredChats([]);
2306
+ setTelegramChatLookupMessage(err.message || "Failed to discover Telegram chats.");
2307
+ } finally {
2308
+ setTelegramChatLookupLoading(false);
2309
+ }
2310
+ };
2311
+
2273
2312
  // ── Build EXECUTORS env string ─────────────────────────────────────────────
2274
2313
 
2275
2314
  const buildExecutorsEnv = () =>
@@ -3645,7 +3684,11 @@ function App() {
3645
3684
  ${telegramEnabled && html`
3646
3685
  <div class="form-group">
3647
3686
  <label>Telegram Bot Token</label>
3648
- <input type="password" value=${telegramToken} oninput=${(e) => setTelegramToken(e.target.value)}
3687
+ <input type="password" value=${telegramToken} oninput=${(e) => {
3688
+ setTelegramToken(e.target.value);
3689
+ setTelegramDiscoveredChats([]);
3690
+ setTelegramChatLookupMessage("");
3691
+ }}
3649
3692
  placeholder="123456:ABCdefGHIjklMNO..." />
3650
3693
  <div class="hint">Create a bot via <a href="https://t.me/botfather" target="_blank">@BotFather</a>.</div>
3651
3694
  </div>
@@ -3653,7 +3696,30 @@ function App() {
3653
3696
  <label>Telegram Chat ID</label>
3654
3697
  <input type="text" value=${telegramChatId} oninput=${(e) => setTelegramChatId(e.target.value)}
3655
3698
  placeholder="-1001234567890" />
3656
- <div class="hint">Use <code style="font-family:var(--font-mono)">bosun --get-chat-id</code> to find your chat ID after the bot is running.</div>
3699
+ <div style="display:flex;gap:10px;align-items:center;flex-wrap:wrap;margin-top:8px">
3700
+ <button class="btn" type="button" onclick=${discoverTelegramChatIds}
3701
+ disabled=${telegramChatLookupLoading || !String(telegramToken || "").trim()}>
3702
+ ${telegramChatLookupLoading ? "Finding Chats..." : "Discover Chats"}
3703
+ </button>
3704
+ ${telegramDiscoveredChats.length > 1 && html`
3705
+ <select
3706
+ value=${telegramDiscoveredChats.some((chat) => String(chat.id) === String(telegramChatId)) ? String(telegramChatId) : ""}
3707
+ onchange=${(e) => setTelegramChatId(e.target.value)}
3708
+ style="min-width:260px;flex:1"
3709
+ >
3710
+ <option value="">Choose a discovered chat</option>
3711
+ ${telegramDiscoveredChats.map((chat) => html`
3712
+ <option value=${String(chat.id)}>
3713
+ ${`${chat.id}${chat.username ? ` · @${chat.username}` : ""}${chat.title ? ` · ${chat.title}` : ""}${chat.type ? ` · ${chat.type}` : ""}`}
3714
+ </option>
3715
+ `)}
3716
+ </select>
3717
+ `}
3718
+ </div>
3719
+ ${telegramChatLookupMessage && html`
3720
+ <div class="hint" style="margin-top:8px">${telegramChatLookupMessage}</div>
3721
+ `}
3722
+ <div class="hint">Use Discover Chats after sending your bot a message, or run <code style="font-family:var(--font-mono)">bosun --get-chat-id</code>.</div>
3657
3723
  </div>
3658
3724
  `}
3659
3725
  <div class="nav-buttons">