crewly 1.6.1 → 1.6.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/config/roles/orchestrator/prompt.md +16 -0
- package/config/skills/agent/core/get-my-active-work/SKILL.md +101 -0
- package/config/skills/agent/core/get-my-active-work/execute.sh +122 -0
- package/config/skills/agent/core/record-learning/SKILL.md +29 -0
- package/config/skills/agent/core/reply-channel/SKILL.md +41 -0
- package/config/skills/agent/core/reply-channel/execute.sh +165 -0
- package/config/skills/agent/core/reply-channel/execute.test.sh +148 -0
- package/config/skills/agent/remote-browser/execute.sh +296 -14
- package/config/skills/agent/remote-browser/execute.test.sh +482 -0
- package/config/skills/orchestrator/send-message/SKILL.md +30 -7
- package/config/skills/orchestrator/team-health-scan/SKILL.md +98 -0
- package/config/skills/orchestrator/team-health-scan/execute.sh +44 -0
- package/config/skills/registry.json +62 -1
- package/config/sops/developer/git-workflow.md +38 -3
- package/dist/backend/backend/src/constants.d.ts +69 -1
- package/dist/backend/backend/src/constants.d.ts.map +1 -1
- package/dist/backend/backend/src/constants.js +69 -2
- package/dist/backend/backend/src/constants.js.map +1 -1
- package/dist/backend/backend/src/controllers/active-work/active-work.controller.d.ts +53 -0
- package/dist/backend/backend/src/controllers/active-work/active-work.controller.d.ts.map +1 -0
- package/dist/backend/backend/src/controllers/active-work/active-work.controller.js +92 -0
- package/dist/backend/backend/src/controllers/active-work/active-work.controller.js.map +1 -0
- package/dist/backend/backend/src/controllers/agent-stream/agent-stream.controller.d.ts.map +1 -1
- package/dist/backend/backend/src/controllers/agent-stream/agent-stream.controller.js +18 -1
- package/dist/backend/backend/src/controllers/agent-stream/agent-stream.controller.js.map +1 -1
- package/dist/backend/backend/src/controllers/browser/browser.controller.d.ts +68 -0
- package/dist/backend/backend/src/controllers/browser/browser.controller.d.ts.map +1 -1
- package/dist/backend/backend/src/controllers/browser/browser.controller.js +233 -5
- package/dist/backend/backend/src/controllers/browser/browser.controller.js.map +1 -1
- package/dist/backend/backend/src/controllers/browser/browser.routes.d.ts.map +1 -1
- package/dist/backend/backend/src/controllers/browser/browser.routes.js +10 -1
- package/dist/backend/backend/src/controllers/browser/browser.routes.js.map +1 -1
- package/dist/backend/backend/src/controllers/chat/chat.controller.d.ts.map +1 -1
- package/dist/backend/backend/src/controllers/chat/chat.controller.js +8 -3
- package/dist/backend/backend/src/controllers/chat/chat.controller.js.map +1 -1
- package/dist/backend/backend/src/controllers/chat-v2/chat-v2.controller.d.ts +132 -0
- package/dist/backend/backend/src/controllers/chat-v2/chat-v2.controller.d.ts.map +1 -0
- package/dist/backend/backend/src/controllers/chat-v2/chat-v2.controller.js +401 -0
- package/dist/backend/backend/src/controllers/chat-v2/chat-v2.controller.js.map +1 -0
- package/dist/backend/backend/src/controllers/chat-v2/chat-v2.routes.d.ts +29 -0
- package/dist/backend/backend/src/controllers/chat-v2/chat-v2.routes.d.ts.map +1 -0
- package/dist/backend/backend/src/controllers/chat-v2/chat-v2.routes.js +39 -0
- package/dist/backend/backend/src/controllers/chat-v2/chat-v2.routes.js.map +1 -0
- package/dist/backend/backend/src/controllers/chat-v2/index.d.ts +8 -0
- package/dist/backend/backend/src/controllers/chat-v2/index.d.ts.map +1 -0
- package/dist/backend/backend/src/controllers/chat-v2/index.js +8 -0
- package/dist/backend/backend/src/controllers/chat-v2/index.js.map +1 -0
- package/dist/backend/backend/src/controllers/onboarding/onboarding.routes.d.ts +13 -13
- package/dist/backend/backend/src/controllers/onboarding/onboarding.routes.d.ts.map +1 -1
- package/dist/backend/backend/src/controllers/onboarding/onboarding.routes.js +74 -234
- package/dist/backend/backend/src/controllers/onboarding/onboarding.routes.js.map +1 -1
- package/dist/backend/backend/src/controllers/request/request.controller.d.ts.map +1 -1
- package/dist/backend/backend/src/controllers/request/request.controller.js +4 -6
- package/dist/backend/backend/src/controllers/request/request.controller.js.map +1 -1
- package/dist/backend/backend/src/controllers/task-management/tasks.controller.d.ts +43 -0
- package/dist/backend/backend/src/controllers/task-management/tasks.controller.d.ts.map +1 -1
- package/dist/backend/backend/src/controllers/task-management/tasks.controller.js +200 -72
- package/dist/backend/backend/src/controllers/task-management/tasks.controller.js.map +1 -1
- package/dist/backend/backend/src/controllers/team/team.controller.d.ts.map +1 -1
- package/dist/backend/backend/src/controllers/team/team.controller.js +46 -0
- package/dist/backend/backend/src/controllers/team/team.controller.js.map +1 -1
- package/dist/backend/backend/src/controllers/team-health/team-health.controller.d.ts +59 -0
- package/dist/backend/backend/src/controllers/team-health/team-health.controller.d.ts.map +1 -0
- package/dist/backend/backend/src/controllers/team-health/team-health.controller.js +127 -0
- package/dist/backend/backend/src/controllers/team-health/team-health.controller.js.map +1 -0
- package/dist/backend/backend/src/controllers/team-health/team-health.routes.d.ts +13 -0
- package/dist/backend/backend/src/controllers/team-health/team-health.routes.d.ts.map +1 -0
- package/dist/backend/backend/src/controllers/team-health/team-health.routes.js +20 -0
- package/dist/backend/backend/src/controllers/team-health/team-health.routes.js.map +1 -0
- package/dist/backend/backend/src/index.d.ts +9 -0
- package/dist/backend/backend/src/index.d.ts.map +1 -1
- package/dist/backend/backend/src/index.js +233 -0
- package/dist/backend/backend/src/index.js.map +1 -1
- package/dist/backend/backend/src/routes/api.routes.d.ts.map +1 -1
- package/dist/backend/backend/src/routes/api.routes.js +40 -6
- package/dist/backend/backend/src/routes/api.routes.js.map +1 -1
- package/dist/backend/backend/src/services/agent/active-work-briefing.service.d.ts +498 -0
- package/dist/backend/backend/src/services/agent/active-work-briefing.service.d.ts.map +1 -0
- package/dist/backend/backend/src/services/agent/active-work-briefing.service.js +759 -0
- package/dist/backend/backend/src/services/agent/active-work-briefing.service.js.map +1 -0
- package/dist/backend/backend/src/services/agent/agent-registration.service.d.ts +25 -0
- package/dist/backend/backend/src/services/agent/agent-registration.service.d.ts.map +1 -1
- package/dist/backend/backend/src/services/agent/agent-registration.service.js +193 -57
- package/dist/backend/backend/src/services/agent/agent-registration.service.js.map +1 -1
- package/dist/backend/backend/src/services/agent/crewly-agent/model-manager.d.ts +9 -2
- package/dist/backend/backend/src/services/agent/crewly-agent/model-manager.d.ts.map +1 -1
- package/dist/backend/backend/src/services/agent/crewly-agent/model-manager.js +35 -2
- package/dist/backend/backend/src/services/agent/crewly-agent/model-manager.js.map +1 -1
- package/dist/backend/backend/src/services/agent/crewly-agent/types.d.ts +8 -2
- package/dist/backend/backend/src/services/agent/crewly-agent/types.d.ts.map +1 -1
- package/dist/backend/backend/src/services/agent/crewly-agent/types.js +1 -0
- package/dist/backend/backend/src/services/agent/crewly-agent/types.js.map +1 -1
- package/dist/backend/backend/src/services/agent/tmux-command.service.d.ts.map +1 -1
- package/dist/backend/backend/src/services/agent/tmux-command.service.js +2 -1
- package/dist/backend/backend/src/services/agent/tmux-command.service.js.map +1 -1
- package/dist/backend/backend/src/services/agent/tmux.service.d.ts.map +1 -1
- package/dist/backend/backend/src/services/agent/tmux.service.js +2 -1
- package/dist/backend/backend/src/services/agent/tmux.service.js.map +1 -1
- package/dist/backend/backend/src/services/ai/prompt-builder.service.d.ts +148 -3
- package/dist/backend/backend/src/services/ai/prompt-builder.service.d.ts.map +1 -1
- package/dist/backend/backend/src/services/ai/prompt-builder.service.js +241 -2
- package/dist/backend/backend/src/services/ai/prompt-builder.service.js.map +1 -1
- package/dist/backend/backend/src/services/ai/prompt-modules/recovery.module.d.ts.map +1 -1
- package/dist/backend/backend/src/services/ai/prompt-modules/recovery.module.js +13 -0
- package/dist/backend/backend/src/services/ai/prompt-modules/recovery.module.js.map +1 -1
- package/dist/backend/backend/src/services/ai/prompt-modules/role-boundary.module.d.ts.map +1 -1
- package/dist/backend/backend/src/services/ai/prompt-modules/role-boundary.module.js +26 -1
- package/dist/backend/backend/src/services/ai/prompt-modules/role-boundary.module.js.map +1 -1
- package/dist/backend/backend/src/services/ai/prompt-modules/sop-norm-distinction.module.d.ts +79 -0
- package/dist/backend/backend/src/services/ai/prompt-modules/sop-norm-distinction.module.d.ts.map +1 -0
- package/dist/backend/backend/src/services/ai/prompt-modules/sop-norm-distinction.module.js +118 -0
- package/dist/backend/backend/src/services/ai/prompt-modules/sop-norm-distinction.module.js.map +1 -0
- package/dist/backend/backend/src/services/browser/browser-bridge.service.d.ts +161 -0
- package/dist/backend/backend/src/services/browser/browser-bridge.service.d.ts.map +1 -1
- package/dist/backend/backend/src/services/browser/browser-bridge.service.js +382 -2
- package/dist/backend/backend/src/services/browser/browser-bridge.service.js.map +1 -1
- package/dist/backend/backend/src/services/browser/browser-proxy.service.d.ts +105 -0
- package/dist/backend/backend/src/services/browser/browser-proxy.service.d.ts.map +1 -1
- package/dist/backend/backend/src/services/browser/browser-proxy.service.js +232 -13
- package/dist/backend/backend/src/services/browser/browser-proxy.service.js.map +1 -1
- package/dist/backend/backend/src/services/chat-v2/chat-v2.dispatcher.service.d.ts +178 -0
- package/dist/backend/backend/src/services/chat-v2/chat-v2.dispatcher.service.d.ts.map +1 -0
- package/dist/backend/backend/src/services/chat-v2/chat-v2.dispatcher.service.js +254 -0
- package/dist/backend/backend/src/services/chat-v2/chat-v2.dispatcher.service.js.map +1 -0
- package/dist/backend/backend/src/services/chat-v2/chat-v2.mention-resolver.d.ts +134 -0
- package/dist/backend/backend/src/services/chat-v2/chat-v2.mention-resolver.d.ts.map +1 -0
- package/dist/backend/backend/src/services/chat-v2/chat-v2.mention-resolver.js +232 -0
- package/dist/backend/backend/src/services/chat-v2/chat-v2.mention-resolver.js.map +1 -0
- package/dist/backend/backend/src/services/chat-v2/chat-v2.realtime-holder.d.ts +25 -0
- package/dist/backend/backend/src/services/chat-v2/chat-v2.realtime-holder.d.ts.map +1 -0
- package/dist/backend/backend/src/services/chat-v2/chat-v2.realtime-holder.js +23 -0
- package/dist/backend/backend/src/services/chat-v2/chat-v2.realtime-holder.js.map +1 -0
- package/dist/backend/backend/src/services/chat-v2/chat-v2.service.d.ts +254 -0
- package/dist/backend/backend/src/services/chat-v2/chat-v2.service.d.ts.map +1 -0
- package/dist/backend/backend/src/services/chat-v2/chat-v2.service.js +467 -0
- package/dist/backend/backend/src/services/chat-v2/chat-v2.service.js.map +1 -0
- package/dist/backend/backend/src/services/chat-v2/chat-v2.singleton.d.ts +27 -0
- package/dist/backend/backend/src/services/chat-v2/chat-v2.singleton.d.ts.map +1 -0
- package/dist/backend/backend/src/services/chat-v2/chat-v2.singleton.js +57 -0
- package/dist/backend/backend/src/services/chat-v2/chat-v2.singleton.js.map +1 -0
- package/dist/backend/backend/src/services/chat-v2/chat-v2.team-membership.d.ts +43 -0
- package/dist/backend/backend/src/services/chat-v2/chat-v2.team-membership.d.ts.map +1 -0
- package/dist/backend/backend/src/services/chat-v2/chat-v2.team-membership.js +54 -0
- package/dist/backend/backend/src/services/chat-v2/chat-v2.team-membership.js.map +1 -0
- package/dist/backend/backend/src/services/chat-v2/config.d.ts +100 -0
- package/dist/backend/backend/src/services/chat-v2/config.d.ts.map +1 -0
- package/dist/backend/backend/src/services/chat-v2/config.js +174 -0
- package/dist/backend/backend/src/services/chat-v2/config.js.map +1 -0
- package/dist/backend/backend/src/services/chat-v2/index.d.ts +11 -0
- package/dist/backend/backend/src/services/chat-v2/index.d.ts.map +1 -0
- package/dist/backend/backend/src/services/chat-v2/index.js +12 -0
- package/dist/backend/backend/src/services/chat-v2/index.js.map +1 -0
- package/dist/backend/backend/src/services/chat-v2/sqlite/channel.store.d.ts +114 -0
- package/dist/backend/backend/src/services/chat-v2/sqlite/channel.store.d.ts.map +1 -0
- package/dist/backend/backend/src/services/chat-v2/sqlite/channel.store.js +194 -0
- package/dist/backend/backend/src/services/chat-v2/sqlite/channel.store.js.map +1 -0
- package/dist/backend/backend/src/services/chat-v2/sqlite/chat-db.d.ts +100 -0
- package/dist/backend/backend/src/services/chat-v2/sqlite/chat-db.d.ts.map +1 -0
- package/dist/backend/backend/src/services/chat-v2/sqlite/chat-db.js +351 -0
- package/dist/backend/backend/src/services/chat-v2/sqlite/chat-db.js.map +1 -0
- package/dist/backend/backend/src/services/chat-v2/sqlite/message.store.d.ts +132 -0
- package/dist/backend/backend/src/services/chat-v2/sqlite/message.store.d.ts.map +1 -0
- package/dist/backend/backend/src/services/chat-v2/sqlite/message.store.js +281 -0
- package/dist/backend/backend/src/services/chat-v2/sqlite/message.store.js.map +1 -0
- package/dist/backend/backend/src/services/chat-v2/types.d.ts +295 -0
- package/dist/backend/backend/src/services/chat-v2/types.d.ts.map +1 -0
- package/dist/backend/backend/src/services/chat-v2/types.js +61 -0
- package/dist/backend/backend/src/services/chat-v2/types.js.map +1 -0
- package/dist/backend/backend/src/services/cloud/cloud-event-bridge.service.d.ts +113 -0
- package/dist/backend/backend/src/services/cloud/cloud-event-bridge.service.d.ts.map +1 -0
- package/dist/backend/backend/src/services/cloud/cloud-event-bridge.service.js +179 -0
- package/dist/backend/backend/src/services/cloud/cloud-event-bridge.service.js.map +1 -0
- package/dist/backend/backend/src/services/cloud/cloud-event-forwarder.service.d.ts +131 -0
- package/dist/backend/backend/src/services/cloud/cloud-event-forwarder.service.d.ts.map +1 -0
- package/dist/backend/backend/src/services/cloud/cloud-event-forwarder.service.js +227 -0
- package/dist/backend/backend/src/services/cloud/cloud-event-forwarder.service.js.map +1 -0
- package/dist/backend/backend/src/services/core/config.service.js +3 -3
- package/dist/backend/backend/src/services/core/config.service.js.map +1 -1
- package/dist/backend/backend/src/services/core/storage.service.d.ts +7 -0
- package/dist/backend/backend/src/services/core/storage.service.d.ts.map +1 -1
- package/dist/backend/backend/src/services/core/storage.service.js +15 -0
- package/dist/backend/backend/src/services/core/storage.service.js.map +1 -1
- package/dist/backend/backend/src/services/event-bus/event-bus.service.d.ts +69 -1
- package/dist/backend/backend/src/services/event-bus/event-bus.service.d.ts.map +1 -1
- package/dist/backend/backend/src/services/event-bus/event-bus.service.js +118 -0
- package/dist/backend/backend/src/services/event-bus/event-bus.service.js.map +1 -1
- package/dist/backend/backend/src/services/event-bus/event-to-workitem-bridge.service.d.ts +275 -0
- package/dist/backend/backend/src/services/event-bus/event-to-workitem-bridge.service.d.ts.map +1 -0
- package/dist/backend/backend/src/services/event-bus/event-to-workitem-bridge.service.js +736 -0
- package/dist/backend/backend/src/services/event-bus/event-to-workitem-bridge.service.js.map +1 -0
- package/dist/backend/backend/src/services/knowledge/fts5-index.service.d.ts.map +1 -1
- package/dist/backend/backend/src/services/knowledge/fts5-index.service.js +18 -2
- package/dist/backend/backend/src/services/knowledge/fts5-index.service.js.map +1 -1
- package/dist/backend/backend/src/services/knowledge/knowledge-search.service.d.ts +49 -13
- package/dist/backend/backend/src/services/knowledge/knowledge-search.service.d.ts.map +1 -1
- package/dist/backend/backend/src/services/knowledge/knowledge-search.service.js +123 -29
- package/dist/backend/backend/src/services/knowledge/knowledge-search.service.js.map +1 -1
- package/dist/backend/backend/src/services/knowledge/learnings-index.service.d.ts +159 -0
- package/dist/backend/backend/src/services/knowledge/learnings-index.service.d.ts.map +1 -0
- package/dist/backend/backend/src/services/knowledge/learnings-index.service.js +304 -0
- package/dist/backend/backend/src/services/knowledge/learnings-index.service.js.map +1 -0
- package/dist/backend/backend/src/services/knowledge/vector-store.service.d.ts.map +1 -1
- package/dist/backend/backend/src/services/knowledge/vector-store.service.js +24 -4
- package/dist/backend/backend/src/services/knowledge/vector-store.service.js.map +1 -1
- package/dist/backend/backend/src/services/memory/auto-learning.subscriber.d.ts +174 -0
- package/dist/backend/backend/src/services/memory/auto-learning.subscriber.d.ts.map +1 -0
- package/dist/backend/backend/src/services/memory/auto-learning.subscriber.js +375 -0
- package/dist/backend/backend/src/services/memory/auto-learning.subscriber.js.map +1 -0
- package/dist/backend/backend/src/services/memory/learning-format.validator.d.ts +97 -0
- package/dist/backend/backend/src/services/memory/learning-format.validator.d.ts.map +1 -0
- package/dist/backend/backend/src/services/memory/learning-format.validator.js +209 -0
- package/dist/backend/backend/src/services/memory/learning-format.validator.js.map +1 -0
- package/dist/backend/backend/src/services/memory/vector-store.service.d.ts.map +1 -1
- package/dist/backend/backend/src/services/memory/vector-store.service.js +19 -4
- package/dist/backend/backend/src/services/memory/vector-store.service.js.map +1 -1
- package/dist/backend/backend/src/services/onboarding/onboarding-provision.service.d.ts +16 -5
- package/dist/backend/backend/src/services/onboarding/onboarding-provision.service.d.ts.map +1 -1
- package/dist/backend/backend/src/services/onboarding/onboarding-provision.service.js +32 -5
- package/dist/backend/backend/src/services/onboarding/onboarding-provision.service.js.map +1 -1
- package/dist/backend/backend/src/services/onboarding/onboarding.service.d.ts +157 -0
- package/dist/backend/backend/src/services/onboarding/onboarding.service.d.ts.map +1 -0
- package/dist/backend/backend/src/services/onboarding/onboarding.service.js +229 -0
- package/dist/backend/backend/src/services/onboarding/onboarding.service.js.map +1 -0
- package/dist/backend/backend/src/services/onboarding/onboarding.types.d.ts +141 -0
- package/dist/backend/backend/src/services/onboarding/onboarding.types.d.ts.map +1 -0
- package/dist/backend/backend/src/services/onboarding/onboarding.types.js +18 -0
- package/dist/backend/backend/src/services/onboarding/onboarding.types.js.map +1 -0
- package/dist/backend/backend/src/services/pr-review/pr-review.service.d.ts.map +1 -1
- package/dist/backend/backend/src/services/pr-review/pr-review.service.js +1 -1
- package/dist/backend/backend/src/services/pr-review/pr-review.service.js.map +1 -1
- package/dist/backend/backend/src/services/slack/cross-machine-message.service.d.ts.map +1 -1
- package/dist/backend/backend/src/services/slack/cross-machine-message.service.js +17 -1
- package/dist/backend/backend/src/services/slack/cross-machine-message.service.js.map +1 -1
- package/dist/backend/backend/src/services/slack/slack-orchestrator-bridge.d.ts +39 -1
- package/dist/backend/backend/src/services/slack/slack-orchestrator-bridge.d.ts.map +1 -1
- package/dist/backend/backend/src/services/slack/slack-orchestrator-bridge.js +158 -26
- package/dist/backend/backend/src/services/slack/slack-orchestrator-bridge.js.map +1 -1
- package/dist/backend/backend/src/services/task-pool/task-pool.service.d.ts +248 -6
- package/dist/backend/backend/src/services/task-pool/task-pool.service.d.ts.map +1 -1
- package/dist/backend/backend/src/services/task-pool/task-pool.service.js +531 -51
- package/dist/backend/backend/src/services/task-pool/task-pool.service.js.map +1 -1
- package/dist/backend/backend/src/services/team-health/index.d.ts +16 -0
- package/dist/backend/backend/src/services/team-health/index.d.ts.map +1 -0
- package/dist/backend/backend/src/services/team-health/index.js +16 -0
- package/dist/backend/backend/src/services/team-health/index.js.map +1 -0
- package/dist/backend/backend/src/services/team-health/live-team-health-data-provider.d.ts +52 -0
- package/dist/backend/backend/src/services/team-health/live-team-health-data-provider.d.ts.map +1 -0
- package/dist/backend/backend/src/services/team-health/live-team-health-data-provider.js +161 -0
- package/dist/backend/backend/src/services/team-health/live-team-health-data-provider.js.map +1 -0
- package/dist/backend/backend/src/services/team-health/lost-dispatch-detector.d.ts +53 -0
- package/dist/backend/backend/src/services/team-health/lost-dispatch-detector.d.ts.map +1 -0
- package/dist/backend/backend/src/services/team-health/lost-dispatch-detector.js +88 -0
- package/dist/backend/backend/src/services/team-health/lost-dispatch-detector.js.map +1 -0
- package/dist/backend/backend/src/services/team-health/stale-trigger-detector.d.ts +44 -0
- package/dist/backend/backend/src/services/team-health/stale-trigger-detector.d.ts.map +1 -0
- package/dist/backend/backend/src/services/team-health/stale-trigger-detector.js +83 -0
- package/dist/backend/backend/src/services/team-health/stale-trigger-detector.js.map +1 -0
- package/dist/backend/backend/src/services/team-health/team-health-alert-router.d.ts +92 -0
- package/dist/backend/backend/src/services/team-health/team-health-alert-router.d.ts.map +1 -0
- package/dist/backend/backend/src/services/team-health/team-health-alert-router.js +328 -0
- package/dist/backend/backend/src/services/team-health/team-health-alert-router.js.map +1 -0
- package/dist/backend/backend/src/services/team-health/team-health-config.d.ts +41 -0
- package/dist/backend/backend/src/services/team-health/team-health-config.d.ts.map +1 -0
- package/dist/backend/backend/src/services/team-health/team-health-config.js +213 -0
- package/dist/backend/backend/src/services/team-health/team-health-config.js.map +1 -0
- package/dist/backend/backend/src/services/team-health/team-health-detector.d.ts +46 -0
- package/dist/backend/backend/src/services/team-health/team-health-detector.d.ts.map +1 -0
- package/dist/backend/backend/src/services/team-health/team-health-detector.js +347 -0
- package/dist/backend/backend/src/services/team-health/team-health-detector.js.map +1 -0
- package/dist/backend/backend/src/services/team-health/team-health-types.d.ts +154 -0
- package/dist/backend/backend/src/services/team-health/team-health-types.d.ts.map +1 -0
- package/dist/backend/backend/src/services/team-health/team-health-types.js +94 -0
- package/dist/backend/backend/src/services/team-health/team-health-types.js.map +1 -0
- package/dist/backend/backend/src/services/team-health/team-health-watchdog.service.d.ts +111 -0
- package/dist/backend/backend/src/services/team-health/team-health-watchdog.service.d.ts.map +1 -0
- package/dist/backend/backend/src/services/team-health/team-health-watchdog.service.js +226 -0
- package/dist/backend/backend/src/services/team-health/team-health-watchdog.service.js.map +1 -0
- package/dist/backend/backend/src/services/v3/mission-reminder.service.d.ts +148 -0
- package/dist/backend/backend/src/services/v3/mission-reminder.service.d.ts.map +1 -0
- package/dist/backend/backend/src/services/v3/mission-reminder.service.js +545 -0
- package/dist/backend/backend/src/services/v3/mission-reminder.service.js.map +1 -0
- package/dist/backend/backend/src/services/v3/request-sla.subscriber.d.ts +499 -0
- package/dist/backend/backend/src/services/v3/request-sla.subscriber.d.ts.map +1 -0
- package/dist/backend/backend/src/services/v3/request-sla.subscriber.js +1105 -0
- package/dist/backend/backend/src/services/v3/request-sla.subscriber.js.map +1 -0
- package/dist/backend/backend/src/services/v3/request.service.d.ts +22 -0
- package/dist/backend/backend/src/services/v3/request.service.d.ts.map +1 -1
- package/dist/backend/backend/src/services/v3/request.service.js +71 -0
- package/dist/backend/backend/src/services/v3/request.service.js.map +1 -1
- package/dist/backend/backend/src/services/v3/v3-data.service.d.ts +1 -0
- package/dist/backend/backend/src/services/v3/v3-data.service.d.ts.map +1 -1
- package/dist/backend/backend/src/services/v3/v3-data.service.js +22 -6
- package/dist/backend/backend/src/services/v3/v3-data.service.js.map +1 -1
- package/dist/backend/backend/src/types/event-bus.types.d.ts +19 -1
- package/dist/backend/backend/src/types/event-bus.types.d.ts.map +1 -1
- package/dist/backend/backend/src/types/event-bus.types.js +43 -0
- package/dist/backend/backend/src/types/event-bus.types.js.map +1 -1
- package/dist/backend/backend/src/types/index.d.ts +22 -1
- package/dist/backend/backend/src/types/index.d.ts.map +1 -1
- package/dist/backend/backend/src/types/index.js.map +1 -1
- package/dist/backend/backend/src/types/review-reason.types.d.ts +63 -0
- package/dist/backend/backend/src/types/review-reason.types.d.ts.map +1 -0
- package/dist/backend/backend/src/types/review-reason.types.js +50 -0
- package/dist/backend/backend/src/types/review-reason.types.js.map +1 -0
- package/dist/backend/backend/src/types/slack.types.d.ts +4 -1
- package/dist/backend/backend/src/types/slack.types.d.ts.map +1 -1
- package/dist/backend/backend/src/types/slack.types.js.map +1 -1
- package/dist/backend/backend/src/types/v2/mission.types.d.ts +18 -0
- package/dist/backend/backend/src/types/v2/mission.types.d.ts.map +1 -1
- package/dist/backend/backend/src/types/v2/mission.types.js +1 -0
- package/dist/backend/backend/src/types/v2/mission.types.js.map +1 -1
- package/dist/backend/backend/src/types/v2/work-item.types.d.ts.map +1 -1
- package/dist/backend/backend/src/types/v2/work-item.types.js +25 -1
- package/dist/backend/backend/src/types/v2/work-item.types.js.map +1 -1
- package/dist/backend/backend/src/utils/team.utils.d.ts +38 -0
- package/dist/backend/backend/src/utils/team.utils.d.ts.map +1 -0
- package/dist/backend/backend/src/utils/team.utils.js +45 -0
- package/dist/backend/backend/src/utils/team.utils.js.map +1 -0
- package/dist/backend/backend/src/websocket/chat-v2.gateway.d.ts +195 -0
- package/dist/backend/backend/src/websocket/chat-v2.gateway.d.ts.map +1 -0
- package/dist/backend/backend/src/websocket/chat-v2.gateway.js +401 -0
- package/dist/backend/backend/src/websocket/chat-v2.gateway.js.map +1 -0
- package/dist/backend/backend/src/websocket/terminal.gateway.d.ts +37 -2
- package/dist/backend/backend/src/websocket/terminal.gateway.d.ts.map +1 -1
- package/dist/backend/backend/src/websocket/terminal.gateway.js +106 -5
- package/dist/backend/backend/src/websocket/terminal.gateway.js.map +1 -1
- package/dist/cli/backend/src/constants.d.ts +69 -1
- package/dist/cli/backend/src/constants.d.ts.map +1 -1
- package/dist/cli/backend/src/constants.js +69 -2
- package/dist/cli/backend/src/constants.js.map +1 -1
- package/dist/cli/backend/src/services/core/config.service.js +3 -3
- package/dist/cli/backend/src/services/core/config.service.js.map +1 -1
- package/dist/cli/backend/src/services/core/storage.service.d.ts +7 -0
- package/dist/cli/backend/src/services/core/storage.service.d.ts.map +1 -1
- package/dist/cli/backend/src/services/core/storage.service.js +15 -0
- package/dist/cli/backend/src/services/core/storage.service.js.map +1 -1
- package/dist/cli/backend/src/services/knowledge/fts5-index.service.d.ts.map +1 -1
- package/dist/cli/backend/src/services/knowledge/fts5-index.service.js +18 -2
- package/dist/cli/backend/src/services/knowledge/fts5-index.service.js.map +1 -1
- package/dist/cli/backend/src/services/knowledge/knowledge-search.service.d.ts +49 -13
- package/dist/cli/backend/src/services/knowledge/knowledge-search.service.d.ts.map +1 -1
- package/dist/cli/backend/src/services/knowledge/knowledge-search.service.js +123 -29
- package/dist/cli/backend/src/services/knowledge/knowledge-search.service.js.map +1 -1
- package/dist/cli/backend/src/services/knowledge/vector-store.service.d.ts.map +1 -1
- package/dist/cli/backend/src/services/knowledge/vector-store.service.js +24 -4
- package/dist/cli/backend/src/services/knowledge/vector-store.service.js.map +1 -1
- package/dist/cli/backend/src/types/index.d.ts +22 -1
- package/dist/cli/backend/src/types/index.d.ts.map +1 -1
- package/dist/cli/backend/src/types/index.js.map +1 -1
- package/dist/cli/backend/src/types/v2/work-item.types.d.ts.map +1 -1
- package/dist/cli/backend/src/types/v2/work-item.types.js +25 -1
- package/dist/cli/backend/src/types/v2/work-item.types.js.map +1 -1
- package/frontend/dist/assets/{index-70356616.js → index-7a4e7df5.js} +328 -326
- package/frontend/dist/assets/index-b7e59b2b.css +33 -0
- package/frontend/dist/index.html +2 -2
- package/package.json +2 -1
- package/config/skills/orchestrator/recall/SKILL.md +0 -47
- package/config/skills/orchestrator/recall/execute.sh +0 -13
- package/config/skills/orchestrator/record-learning/SKILL.md +0 -47
- package/config/skills/orchestrator/record-learning/execute.sh +0 -13
- package/config/skills/orchestrator/remember/SKILL.md +0 -55
- package/config/skills/orchestrator/remember/execute.sh +0 -15
- package/frontend/dist/assets/index-6aaa0630.css +0 -33
|
@@ -0,0 +1,1105 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* RequestSlaSubscriber (INBOUND-1)
|
|
3
|
+
*
|
|
4
|
+
* Closes the user-facing reliability gap noted by Steve on 2026-04-27:
|
|
5
|
+
* inbound user messages on Slack/Chat-v2 land as Requests today
|
|
6
|
+
* (`slack-orchestrator-bridge.ts:377`), but no WorkItem is auto-created on
|
|
7
|
+
* the orchestrator's plate. The orc therefore has no SLA to track and can
|
|
8
|
+
* silently drop a user request when busy with other work.
|
|
9
|
+
*
|
|
10
|
+
* This subscriber listens to `request:created` and — when the Request was
|
|
11
|
+
* sourced from an inbound user channel (tags include `slack` or `chat-v2`)
|
|
12
|
+
* — auto-creates a `respond_to_user` WorkItem assigned to the orc with a
|
|
13
|
+
* 5-minute SLA deadline. If the orc has not transitioned the WI to a
|
|
14
|
+
* terminal status by 5 minutes, the subscriber emits `request:sla_breached`
|
|
15
|
+
* (CRITICAL — surfaces in the orc terminal). At 10 minutes the subscriber
|
|
16
|
+
* fires the *escalation* hook, which the production wiring uses to send a
|
|
17
|
+
* Slack DM nudge back to the user (so they're never blind to the miss).
|
|
18
|
+
*
|
|
19
|
+
* Auto-close paths:
|
|
20
|
+
* (a) {@link markResolvedByThread} — `slack-orchestrator-bridge` calls
|
|
21
|
+
* this when the orc replies in a thread, so the WI auto-transitions
|
|
22
|
+
* to `done` and the SLA timers no-op against a terminal status.
|
|
23
|
+
* (b) {@link handleWorkItemQueued} (INBOUND-1.f1) — when the orc decomposes
|
|
24
|
+
* a Request into other WorkItems via `taskPool.addToPool`, every new
|
|
25
|
+
* WI fires `workitem:queued`. The handler treats decomposition as
|
|
26
|
+
* "the orc has done the right thing" and resolves the tracked
|
|
27
|
+
* respond_to_user WI with reason `workitem_decompose`. Self-recursion
|
|
28
|
+
* is prevented by an id-shape guard
|
|
29
|
+
* (`request:${requestId}:respond_to_user`) — the respond_to_user WI's
|
|
30
|
+
* own enqueue cannot trigger its own resolution.
|
|
31
|
+
* (c) Timer self-check — every breach handler reads the WI status before
|
|
32
|
+
* publishing or escalating, so a manual orc completeItem() also
|
|
33
|
+
* silences the chain.
|
|
34
|
+
*
|
|
35
|
+
* Idempotency contract (Arch Veto V1):
|
|
36
|
+
* The respond_to_user WorkItem id is deterministic
|
|
37
|
+
* (`request:${requestId}:respond_to_user`); the underlying
|
|
38
|
+
* {@link TaskPoolService.addToPool} already short-circuits on duplicate
|
|
39
|
+
* id. A redelivered `request:created` event therefore fires the handler
|
|
40
|
+
* again, the bridge re-builds the same id, and addToPool no-ops — no
|
|
41
|
+
* separate idempotency store is needed.
|
|
42
|
+
*
|
|
43
|
+
* No new ingress event types (Arch Veto V7 spirit):
|
|
44
|
+
* The orc's revised framing on 2026-04-27 explicitly preferred reusing
|
|
45
|
+
* the existing Request creation path over adding new `inbound:*` event
|
|
46
|
+
* vocabulary. INBOUND-1 adds two events (`request:created`,
|
|
47
|
+
* `request:sla_breached`) that match the Request lifecycle the rest of
|
|
48
|
+
* the system already knows about — no parallel ingress entity.
|
|
49
|
+
*
|
|
50
|
+
* @module services/v3/request-sla.subscriber
|
|
51
|
+
*/
|
|
52
|
+
import { LoggerService } from '../core/logger.service.js';
|
|
53
|
+
import { ORCHESTRATOR_SESSION_NAME } from '../../constants.js';
|
|
54
|
+
import { formatError } from '../../utils/format-error.js';
|
|
55
|
+
import { DEFAULT_MAX_RETRIES, } from '../../types/v2/work-item.types.js';
|
|
56
|
+
import { TERMINAL_REQUEST_STATUSES, isValidRequestTransition, } from '../../types/v2/request.types.js';
|
|
57
|
+
// ---------------------------------------------------------------------------
|
|
58
|
+
// Module-level singleton accessor (DI for slack-orchestrator-bridge)
|
|
59
|
+
// ---------------------------------------------------------------------------
|
|
60
|
+
/**
|
|
61
|
+
* The currently-wired RequestSlaSubscriber instance, set by the backend
|
|
62
|
+
* boot path. The slack-orchestrator-bridge calls
|
|
63
|
+
* {@link getRequestSlaSubscriber} from a lazy import so we don't form a
|
|
64
|
+
* static cycle between the bridge and the subscriber at module load.
|
|
65
|
+
*/
|
|
66
|
+
let injectedSubscriber = null;
|
|
67
|
+
/**
|
|
68
|
+
* Wire the subscriber instance accessible via {@link getRequestSlaSubscriber}.
|
|
69
|
+
* Called once from boot before the slack listener can dispatch a reply.
|
|
70
|
+
*
|
|
71
|
+
* @param sub - The live subscriber, or null to clear (tests)
|
|
72
|
+
*/
|
|
73
|
+
export function setRequestSlaSubscriber(sub) {
|
|
74
|
+
injectedSubscriber = sub;
|
|
75
|
+
}
|
|
76
|
+
/**
|
|
77
|
+
* Read the currently-wired subscriber, or null if boot has not finished yet.
|
|
78
|
+
* Returns null in test setups that don't wire one — callers must tolerate.
|
|
79
|
+
*/
|
|
80
|
+
export function getRequestSlaSubscriber() {
|
|
81
|
+
return injectedSubscriber;
|
|
82
|
+
}
|
|
83
|
+
// ---------------------------------------------------------------------------
|
|
84
|
+
// Constants
|
|
85
|
+
// ---------------------------------------------------------------------------
|
|
86
|
+
/**
|
|
87
|
+
* Default SLA breach threshold — orc must respond within this window or
|
|
88
|
+
* `request:sla_breached` fires with breachLevel=5.
|
|
89
|
+
*/
|
|
90
|
+
export const DEFAULT_SLA_MS = 5 * 60 * 1000;
|
|
91
|
+
/**
|
|
92
|
+
* Default escalation threshold — at this point the user-facing escalation
|
|
93
|
+
* hook fires (production: Slack DM back to user).
|
|
94
|
+
*/
|
|
95
|
+
export const DEFAULT_ESCALATION_MS = 10 * 60 * 1000;
|
|
96
|
+
/**
|
|
97
|
+
* Tags we treat as "user-facing inbound channels" — Requests with any of
|
|
98
|
+
* these tags get an SLA-tracked respond_to_user WI. Slack is wired today;
|
|
99
|
+
* `chat-v2` is reserved for the channel-rail Phase E surface.
|
|
100
|
+
*/
|
|
101
|
+
export const DEFAULT_INBOUND_TAGS = ['slack', 'chat-v2'];
|
|
102
|
+
/**
|
|
103
|
+
* Event types the subscriber listens to.
|
|
104
|
+
*
|
|
105
|
+
* - `request:created` (INBOUND-1) — seed a respond_to_user WI for inbound
|
|
106
|
+
* user messages.
|
|
107
|
+
* - `workitem:queued` (INBOUND-1.f1) — auto-close path b: when the orc
|
|
108
|
+
* decomposes a Request into other WorkItems, the respond_to_user WI for
|
|
109
|
+
* that Request resolves automatically.
|
|
110
|
+
*
|
|
111
|
+
* Future iterations may add `request:cancelled` etc. for cleanup.
|
|
112
|
+
*/
|
|
113
|
+
export const REQUEST_SLA_SUBSCRIBED_EVENTS = [
|
|
114
|
+
'request:created',
|
|
115
|
+
'workitem:queued',
|
|
116
|
+
];
|
|
117
|
+
/**
|
|
118
|
+
* Build the deterministic respond_to_user WorkItem id for a Request. The
|
|
119
|
+
* SLA subscriber uses this both for creating the WI and as the id-shape
|
|
120
|
+
* guard against self-resolve recursion in {@link handleWorkItemQueued}.
|
|
121
|
+
*
|
|
122
|
+
* @param requestId - The Request id
|
|
123
|
+
* @returns The deterministic WI id
|
|
124
|
+
*/
|
|
125
|
+
export function respondToUserWorkItemId(requestId) {
|
|
126
|
+
return `request:${requestId}:respond_to_user`;
|
|
127
|
+
}
|
|
128
|
+
/**
|
|
129
|
+
* Pick the legal terminal status for resolving an SLA-tracked WI. The V3
|
|
130
|
+
* `WORK_ITEM_TRANSITIONS` matrix (see `types/v2/work-item.types.ts`)
|
|
131
|
+
* forbids `queued → done`, so the previous `markResolved` always-`'done'`
|
|
132
|
+
* path was throwing in production while the test fake silently accepted it.
|
|
133
|
+
*
|
|
134
|
+
* Mapping:
|
|
135
|
+
* - `running` → `done` (someone explicitly claimed; close cleanly).
|
|
136
|
+
* - `done_by_worker` → `verified` (the only edge `done_by_worker` permits
|
|
137
|
+
* toward terminal-success; `done_by_worker → cancelled` is illegal).
|
|
138
|
+
* - `proposed` → `accepted` then handled separately — but in practice
|
|
139
|
+
* the SLA WI never lands here, so we route to `cancelled` (legal).
|
|
140
|
+
* - everything else → `cancelled` (the V3 matrix permits `* → cancelled`
|
|
141
|
+
* from all of `queued`/`scheduled`/`accepted`/`blocked`/`escalated`).
|
|
142
|
+
*
|
|
143
|
+
* @param current - The WI's current (non-terminal) status.
|
|
144
|
+
* @returns The legal terminal status to transition into.
|
|
145
|
+
*/
|
|
146
|
+
export function pickResolveTarget(current) {
|
|
147
|
+
if (current === 'running')
|
|
148
|
+
return 'done';
|
|
149
|
+
if (current === 'done_by_worker')
|
|
150
|
+
return 'verified';
|
|
151
|
+
return 'cancelled';
|
|
152
|
+
}
|
|
153
|
+
/**
|
|
154
|
+
* Pick the legal terminal status for FAILING an SLA-tracked WI on
|
|
155
|
+
* escalation timeout (10-minute miss).
|
|
156
|
+
*
|
|
157
|
+
* Mapping:
|
|
158
|
+
* - `running` → `failed` (canonical fail edge).
|
|
159
|
+
* - `done_by_worker` → `rejected` (the only fail-shaped edge from
|
|
160
|
+
* `done_by_worker`; `done_by_worker → failed` is illegal).
|
|
161
|
+
* - everything else → `cancelled` (queued/scheduled/etc. cannot reach
|
|
162
|
+
* `failed` directly, so we route them to `cancelled` — matches the
|
|
163
|
+
* "we gave up tracking, nothing actually failed" semantic).
|
|
164
|
+
*
|
|
165
|
+
* @param current - The WI's current (non-terminal) status.
|
|
166
|
+
* @returns The legal terminal status to transition into.
|
|
167
|
+
*/
|
|
168
|
+
export function pickFailTarget(current) {
|
|
169
|
+
if (current === 'running')
|
|
170
|
+
return 'failed';
|
|
171
|
+
if (current === 'done_by_worker')
|
|
172
|
+
return 'rejected';
|
|
173
|
+
return 'cancelled';
|
|
174
|
+
}
|
|
175
|
+
/**
|
|
176
|
+
* Compute the legal Request status path from the current state to `done`.
|
|
177
|
+
* Returns an empty array if the Request is already terminal (no work to
|
|
178
|
+
* do) — the call site is expected to guard for this.
|
|
179
|
+
*
|
|
180
|
+
* Per `REQUEST_TRANSITIONS` in `types/v2/request.types.ts`:
|
|
181
|
+
* - `open` → `done` (direct)
|
|
182
|
+
* - `running` → `done` (direct)
|
|
183
|
+
* - `waiting_confirmation` → `done` (direct)
|
|
184
|
+
* - `ready` → `running` → `done` (two-step; ready→done illegal)
|
|
185
|
+
* - `blocked` → `running` → `done` (two-step; blocked→done illegal)
|
|
186
|
+
*
|
|
187
|
+
* @param from - Current Request status (must be non-terminal).
|
|
188
|
+
* @returns Ordered array of statuses to transition through (excluding `from`).
|
|
189
|
+
*/
|
|
190
|
+
export function closeRequestPath(from) {
|
|
191
|
+
if (from === 'done' || from === 'cancelled')
|
|
192
|
+
return [];
|
|
193
|
+
if (isValidRequestTransition(from, 'done'))
|
|
194
|
+
return ['done'];
|
|
195
|
+
// ready / blocked: route via running.
|
|
196
|
+
if (isValidRequestTransition(from, 'running') && isValidRequestTransition('running', 'done')) {
|
|
197
|
+
return ['running', 'done'];
|
|
198
|
+
}
|
|
199
|
+
return [];
|
|
200
|
+
}
|
|
201
|
+
/**
|
|
202
|
+
* Terminal WorkItem statuses — the SLA timers no-op when the WI has reached
|
|
203
|
+
* any of these by the time the timer fires. Typed as
|
|
204
|
+
* `ReadonlySet<WorkItemStatus>` so a future addition (e.g.
|
|
205
|
+
* `'verified_with_warnings'`) or a typo'd member fails compilation here
|
|
206
|
+
* rather than silently leaking through. Aligns the JSDoc claim with reality.
|
|
207
|
+
*/
|
|
208
|
+
const TERMINAL_WI_STATUSES = new Set([
|
|
209
|
+
'done',
|
|
210
|
+
'cancelled',
|
|
211
|
+
'failed',
|
|
212
|
+
'verified',
|
|
213
|
+
'rejected',
|
|
214
|
+
]);
|
|
215
|
+
/**
|
|
216
|
+
* Reason tags that represent a VERIFIED actual reply / decomposition by an
|
|
217
|
+
* agent and therefore permit the parent Request to cascade-close to `done`.
|
|
218
|
+
*
|
|
219
|
+
* Defense-in-depth gate for {@link RequestSlaSubscriber.maybeCloseRequest}
|
|
220
|
+
* (Steve 2026-04-30 incident): even if an upstream caller somehow invokes
|
|
221
|
+
* `markResolved` with a non-reply reason, the Request must NOT be flipped
|
|
222
|
+
* to `done` unless the reason is one of these verified paths.
|
|
223
|
+
*
|
|
224
|
+
* - `orc_reply` — slackResolve callback fired (real orc reply via
|
|
225
|
+
* reply-slack skill). Gated by the `fromOrcReply`
|
|
226
|
+
* flag on the orchestrator bridge so timeout
|
|
227
|
+
* placeholders cannot reach this branch.
|
|
228
|
+
* - `chatv2_reply` — chat-v2 controller persisted an agent-typed
|
|
229
|
+
* reply to the channel (real agent reply).
|
|
230
|
+
* - `workitem_decompose` — the orc decomposed the Request into other WIs;
|
|
231
|
+
* those WIs carry the actual work, so the
|
|
232
|
+
* respond_to_user tracker is silenced and the
|
|
233
|
+
* Request close is gated separately by the
|
|
234
|
+
* sibling-count check.
|
|
235
|
+
*
|
|
236
|
+
* Any other reason ({@link RequestSlaSubscriber.failOrphanRespondWi} fires
|
|
237
|
+
* `escalation_timeout`, callers MAY pass arbitrary diagnostic strings) is
|
|
238
|
+
* treated as "do NOT auto-close the parent Request".
|
|
239
|
+
*/
|
|
240
|
+
export const VERIFIED_REPLY_REASONS = new Set([
|
|
241
|
+
'orc_reply',
|
|
242
|
+
'chatv2_reply',
|
|
243
|
+
'workitem_decompose',
|
|
244
|
+
]);
|
|
245
|
+
// ---------------------------------------------------------------------------
|
|
246
|
+
// Helpers
|
|
247
|
+
// ---------------------------------------------------------------------------
|
|
248
|
+
/**
|
|
249
|
+
* Extract the Slack thread timestamp from a Request's
|
|
250
|
+
* `sourceConversationItemId`. The slack-orchestrator-bridge stamps these
|
|
251
|
+
* as `slack-${channelId}-${ts}` (see slack-orchestrator-bridge.ts:372),
|
|
252
|
+
* so the trailing dotted-decimal segment is the threadTs.
|
|
253
|
+
*
|
|
254
|
+
* @param sourceId - The Request's sourceConversationItemId
|
|
255
|
+
* @returns The threadTs, or null if the id doesn't match the Slack shape
|
|
256
|
+
*/
|
|
257
|
+
export function extractSlackThreadTs(sourceId) {
|
|
258
|
+
if (!sourceId || !sourceId.startsWith('slack-'))
|
|
259
|
+
return null;
|
|
260
|
+
// slack-${channelId}-${ts}; channelId may not contain dashes, but ts is
|
|
261
|
+
// dotted-decimal. We split on '-' and take the last segment as ts.
|
|
262
|
+
const lastDash = sourceId.lastIndexOf('-');
|
|
263
|
+
if (lastDash < 0 || lastDash === sourceId.length - 1)
|
|
264
|
+
return null;
|
|
265
|
+
const ts = sourceId.slice(lastDash + 1);
|
|
266
|
+
// ts looks like 1772899923.865659 — at minimum digits + '.'
|
|
267
|
+
if (!/^\d+\.\d+$/.test(ts))
|
|
268
|
+
return null;
|
|
269
|
+
return ts;
|
|
270
|
+
}
|
|
271
|
+
/**
|
|
272
|
+
* Extract the Slack channel id from a sourceConversationItemId.
|
|
273
|
+
*
|
|
274
|
+
* @param sourceId - The Request's sourceConversationItemId
|
|
275
|
+
* @returns The channelId, or null if the id doesn't match the Slack shape
|
|
276
|
+
*/
|
|
277
|
+
export function extractSlackChannelId(sourceId) {
|
|
278
|
+
if (!sourceId || !sourceId.startsWith('slack-'))
|
|
279
|
+
return null;
|
|
280
|
+
const rest = sourceId.slice('slack-'.length);
|
|
281
|
+
const lastDash = rest.lastIndexOf('-');
|
|
282
|
+
if (lastDash < 1)
|
|
283
|
+
return null;
|
|
284
|
+
return rest.slice(0, lastDash);
|
|
285
|
+
}
|
|
286
|
+
/**
|
|
287
|
+
* Inter-field delimiter inside a chat-v2 sourceConversationItemId.
|
|
288
|
+
* Picked as the double-underscore `__` because production channel +
|
|
289
|
+
* message ids are minted via `randomUUID()` (4 dashes per UUID) — a
|
|
290
|
+
* single-dash delimiter would collide with the embedded UUID dashes
|
|
291
|
+
* and corrupt the round-trip. UUIDs are hex-digits + dashes only and
|
|
292
|
+
* cannot contain `_`, so `__` is collision-free against any current
|
|
293
|
+
* or future hex-shaped id. See Arch on PR #364 / INBOUND-2.f1.
|
|
294
|
+
*/
|
|
295
|
+
const CHATV2_FIELD_DELIM = '__';
|
|
296
|
+
/**
|
|
297
|
+
* Extract the chat-v2 channel id from a Request's
|
|
298
|
+
* `sourceConversationItemId`. The chat-v2 controller (INBOUND-2) stamps
|
|
299
|
+
* these as `chatv2-${channelId}__${messageId}` — UUID-safe delimiter.
|
|
300
|
+
*
|
|
301
|
+
* @param sourceId - The Request's sourceConversationItemId
|
|
302
|
+
* @returns The channelId, or null if the id doesn't match the chat-v2 shape
|
|
303
|
+
*/
|
|
304
|
+
export function extractChatV2ChannelId(sourceId) {
|
|
305
|
+
if (!sourceId || !sourceId.startsWith('chatv2-'))
|
|
306
|
+
return null;
|
|
307
|
+
const rest = sourceId.slice('chatv2-'.length);
|
|
308
|
+
const sep = rest.indexOf(CHATV2_FIELD_DELIM);
|
|
309
|
+
if (sep < 1)
|
|
310
|
+
return null;
|
|
311
|
+
return rest.slice(0, sep);
|
|
312
|
+
}
|
|
313
|
+
/**
|
|
314
|
+
* Extract the chat-v2 message id from a `chatv2-${channelId}__${messageId}`
|
|
315
|
+
* sourceConversationItemId. The messageId acts as the auto-close lookup
|
|
316
|
+
* key analog to a Slack threadTs.
|
|
317
|
+
*
|
|
318
|
+
* @param sourceId - The Request's sourceConversationItemId
|
|
319
|
+
* @returns The messageId, or null if the id doesn't match the chat-v2 shape
|
|
320
|
+
*/
|
|
321
|
+
export function extractChatV2MessageId(sourceId) {
|
|
322
|
+
if (!sourceId || !sourceId.startsWith('chatv2-'))
|
|
323
|
+
return null;
|
|
324
|
+
const rest = sourceId.slice('chatv2-'.length);
|
|
325
|
+
const sep = rest.indexOf(CHATV2_FIELD_DELIM);
|
|
326
|
+
if (sep < 0)
|
|
327
|
+
return null;
|
|
328
|
+
const id = rest.slice(sep + CHATV2_FIELD_DELIM.length);
|
|
329
|
+
if (id.length === 0)
|
|
330
|
+
return null;
|
|
331
|
+
return id;
|
|
332
|
+
}
|
|
333
|
+
// ---------------------------------------------------------------------------
|
|
334
|
+
// Errors
|
|
335
|
+
// ---------------------------------------------------------------------------
|
|
336
|
+
/**
|
|
337
|
+
* Thrown when the subscriber cannot resolve a Request for an event. The
|
|
338
|
+
* Request was deleted between publish and dispatch — rare, but observable
|
|
339
|
+
* in tests, so we encode the case as a typed error.
|
|
340
|
+
*/
|
|
341
|
+
export class RequestNotFoundError extends Error {
|
|
342
|
+
constructor(message) {
|
|
343
|
+
super(message);
|
|
344
|
+
this.name = 'RequestNotFoundError';
|
|
345
|
+
}
|
|
346
|
+
}
|
|
347
|
+
/**
|
|
348
|
+
* RequestSlaSubscriber — the INBOUND-1 deliverable.
|
|
349
|
+
*
|
|
350
|
+
* @example
|
|
351
|
+
* ```typescript
|
|
352
|
+
* const sub = RequestSlaSubscriber.boot(eventBus, slackEscalationFn);
|
|
353
|
+
* sub.start();
|
|
354
|
+
* // … on shutdown:
|
|
355
|
+
* sub.stop();
|
|
356
|
+
* ```
|
|
357
|
+
*/
|
|
358
|
+
export class RequestSlaSubscriber {
|
|
359
|
+
eventBus;
|
|
360
|
+
taskPool;
|
|
361
|
+
requestService;
|
|
362
|
+
sendEscalationDm;
|
|
363
|
+
orchestratorSession;
|
|
364
|
+
slaMs;
|
|
365
|
+
escalationMs;
|
|
366
|
+
inboundTags;
|
|
367
|
+
logger;
|
|
368
|
+
unsubscribers = [];
|
|
369
|
+
started = false;
|
|
370
|
+
/** requestId → tracked record; primary index for breach handlers. */
|
|
371
|
+
trackedByRequest = new Map();
|
|
372
|
+
/** Slack threadTs → requestId; secondary index for {@link markResolvedByThread}. */
|
|
373
|
+
threadIndex = new Map();
|
|
374
|
+
/**
|
|
375
|
+
* chat-v2 channelId → requestId. INBOUND-2 secondary index used by
|
|
376
|
+
* {@link markResolvedByChatV2}. Last-write-wins when multiple inbound
|
|
377
|
+
* Requests pile up in the same channel — v1 polish accepts the simpler
|
|
378
|
+
* 1:1 semantics; the orphan handler still cleans the WI on escalation.
|
|
379
|
+
*/
|
|
380
|
+
chatV2Index = new Map();
|
|
381
|
+
/** In-flight async dispatch promises (test affordance). */
|
|
382
|
+
pendingDispatches = new Set();
|
|
383
|
+
constructor(deps) {
|
|
384
|
+
this.eventBus = deps.eventBus;
|
|
385
|
+
this.taskPool = deps.taskPool;
|
|
386
|
+
this.requestService = deps.requestService;
|
|
387
|
+
this.sendEscalationDm = deps.sendEscalationDm;
|
|
388
|
+
this.orchestratorSession = deps.orchestratorSession ?? ORCHESTRATOR_SESSION_NAME;
|
|
389
|
+
this.slaMs = deps.slaMs ?? DEFAULT_SLA_MS;
|
|
390
|
+
this.escalationMs = deps.escalationMs ?? DEFAULT_ESCALATION_MS;
|
|
391
|
+
this.inboundTags = new Set(deps.inboundTags ?? DEFAULT_INBOUND_TAGS);
|
|
392
|
+
this.logger =
|
|
393
|
+
deps.logger ?? LoggerService.getInstance().createComponentLogger('RequestSlaSubscriber');
|
|
394
|
+
}
|
|
395
|
+
/**
|
|
396
|
+
* Production wiring helper. Tests should use the constructor directly.
|
|
397
|
+
*
|
|
398
|
+
* @param eventBus - Live event bus
|
|
399
|
+
* @param requestService - Live RequestService
|
|
400
|
+
* @param taskPool - Live TaskPoolService
|
|
401
|
+
* @param sendEscalationDm - Slack DM callback for the 10-minute escalation
|
|
402
|
+
* @returns A subscriber ready to `start()`
|
|
403
|
+
*/
|
|
404
|
+
static boot(eventBus, requestService, taskPool, sendEscalationDm) {
|
|
405
|
+
return new RequestSlaSubscriber({
|
|
406
|
+
eventBus,
|
|
407
|
+
taskPool,
|
|
408
|
+
requestService,
|
|
409
|
+
sendEscalationDm,
|
|
410
|
+
});
|
|
411
|
+
}
|
|
412
|
+
/**
|
|
413
|
+
* Subscribe + register the in-process handlers. Idempotent.
|
|
414
|
+
*/
|
|
415
|
+
start() {
|
|
416
|
+
if (this.started)
|
|
417
|
+
return;
|
|
418
|
+
this.started = true;
|
|
419
|
+
for (const eventType of REQUEST_SLA_SUBSCRIBED_EVENTS) {
|
|
420
|
+
this.unsubscribers.push(this.eventBus.onInProcess(eventType, (e) => this.safeDispatch(eventType, e)));
|
|
421
|
+
}
|
|
422
|
+
this.logger.info('RequestSlaSubscriber subscribed', {
|
|
423
|
+
eventTypes: REQUEST_SLA_SUBSCRIBED_EVENTS,
|
|
424
|
+
slaMs: this.slaMs,
|
|
425
|
+
escalationMs: this.escalationMs,
|
|
426
|
+
orchestratorSession: this.orchestratorSession,
|
|
427
|
+
});
|
|
428
|
+
}
|
|
429
|
+
/**
|
|
430
|
+
* Wait for in-flight async dispatches to settle. Test affordance.
|
|
431
|
+
*/
|
|
432
|
+
async flushPending() {
|
|
433
|
+
while (this.pendingDispatches.size > 0) {
|
|
434
|
+
const inFlight = Array.from(this.pendingDispatches);
|
|
435
|
+
await Promise.allSettled(inFlight);
|
|
436
|
+
}
|
|
437
|
+
}
|
|
438
|
+
/**
|
|
439
|
+
* Detach subscriptions + clear all SLA timers. Safe to call repeatedly.
|
|
440
|
+
*/
|
|
441
|
+
stop() {
|
|
442
|
+
for (const unsubscribe of this.unsubscribers) {
|
|
443
|
+
try {
|
|
444
|
+
unsubscribe();
|
|
445
|
+
}
|
|
446
|
+
catch (err) {
|
|
447
|
+
this.logger.warn('SLA unsubscribe threw', { error: formatError(err) });
|
|
448
|
+
}
|
|
449
|
+
}
|
|
450
|
+
this.unsubscribers = [];
|
|
451
|
+
for (const tracked of this.trackedByRequest.values()) {
|
|
452
|
+
clearTimeout(tracked.breachTimer);
|
|
453
|
+
clearTimeout(tracked.escalationTimer);
|
|
454
|
+
}
|
|
455
|
+
this.trackedByRequest.clear();
|
|
456
|
+
this.threadIndex.clear();
|
|
457
|
+
this.chatV2Index.clear();
|
|
458
|
+
this.started = false;
|
|
459
|
+
this.logger.info('RequestSlaSubscriber stopped');
|
|
460
|
+
}
|
|
461
|
+
/**
|
|
462
|
+
* Mark an in-flight respond_to_user WI as done because the orchestrator
|
|
463
|
+
* just replied to the matching Slack thread. Called by
|
|
464
|
+
* {@link slack-orchestrator-bridge.sendSlackResponse}.
|
|
465
|
+
*
|
|
466
|
+
* Best-effort: a non-Slack-shaped or unknown threadTs is a no-op.
|
|
467
|
+
*
|
|
468
|
+
* @param threadTs - The Slack message timestamp the orc replied to
|
|
469
|
+
*/
|
|
470
|
+
async markResolvedByThread(threadTs) {
|
|
471
|
+
if (!threadTs)
|
|
472
|
+
return;
|
|
473
|
+
const requestId = this.threadIndex.get(threadTs);
|
|
474
|
+
if (!requestId)
|
|
475
|
+
return;
|
|
476
|
+
await this.markResolved(requestId, 'orc_reply');
|
|
477
|
+
}
|
|
478
|
+
/**
|
|
479
|
+
* INBOUND-2: mark the in-flight respond_to_user WI as done because an
|
|
480
|
+
* agent replied in the chat-v2 channel where the user's message arrived.
|
|
481
|
+
* Called by `chat-v2.controller.sendMessage` after a `senderType=agent`
|
|
482
|
+
* message persists to the channel.
|
|
483
|
+
*
|
|
484
|
+
* Best-effort: a non-tracked or unknown channelId is a no-op.
|
|
485
|
+
*
|
|
486
|
+
* @param channelId - The chat-v2 channel where the agent just replied
|
|
487
|
+
*/
|
|
488
|
+
async markResolvedByChatV2(channelId) {
|
|
489
|
+
if (!channelId)
|
|
490
|
+
return;
|
|
491
|
+
const requestId = this.chatV2Index.get(channelId);
|
|
492
|
+
if (!requestId)
|
|
493
|
+
return;
|
|
494
|
+
await this.markResolved(requestId, 'chatv2_reply');
|
|
495
|
+
}
|
|
496
|
+
/**
|
|
497
|
+
* Mark an in-flight respond_to_user WI as resolved by Request id (e.g. the
|
|
498
|
+
* orc replied on Slack, or decomposed the Request into other WorkItems and
|
|
499
|
+
* we want to silence the SLA chain). After the WI transitions, we also
|
|
500
|
+
* cascade the close to the parent Request when this was the last
|
|
501
|
+
* non-terminal WI for it (Steve 2026-04-29: Requests stuck on "Active" in
|
|
502
|
+
* /tasks UI even after the team replied).
|
|
503
|
+
*
|
|
504
|
+
* Transition path is selected from the WI's current status to satisfy the
|
|
505
|
+
* V3 state machine — `transitionStatus` enforces `WORK_ITEM_TRANSITIONS`
|
|
506
|
+
* and `queued → done` is NOT a legal edge:
|
|
507
|
+
* - `queued` → `cancelled`: SLA tracker was a placeholder, never claimed.
|
|
508
|
+
* Semantic: "no longer needed because the orc handled this directly".
|
|
509
|
+
* - `running` → `done`: Someone explicitly claimed the SLA WI; close
|
|
510
|
+
* it as a normal completion.
|
|
511
|
+
* - terminal status: no-op (already settled).
|
|
512
|
+
*
|
|
513
|
+
* Before this fix, `markResolved` always called `transitionStatus(_, 'done')`
|
|
514
|
+
* which threw on the queued case, the catch swallowed it at warn level, the
|
|
515
|
+
* WI stayed `queued` forever, and the Request never closed — the
|
|
516
|
+
* user-reported "Active count never goes down" bug.
|
|
517
|
+
*
|
|
518
|
+
* @param requestId - The Request whose SLA chain should be silenced
|
|
519
|
+
* @param reason - Diagnostic tag (`orc_reply` / `chatv2_reply` /
|
|
520
|
+
* `workitem_decompose`) recorded in WI metadata + Request `result`.
|
|
521
|
+
*/
|
|
522
|
+
async markResolved(requestId, reason) {
|
|
523
|
+
const tracked = this.trackedByRequest.get(requestId);
|
|
524
|
+
if (!tracked)
|
|
525
|
+
return;
|
|
526
|
+
// Clear timers BEFORE the await so a concurrent breach can't fire after
|
|
527
|
+
// we've decided to resolve.
|
|
528
|
+
clearTimeout(tracked.breachTimer);
|
|
529
|
+
clearTimeout(tracked.escalationTimer);
|
|
530
|
+
this.trackedByRequest.delete(requestId);
|
|
531
|
+
if (tracked.threadTs)
|
|
532
|
+
this.threadIndex.delete(tracked.threadTs);
|
|
533
|
+
if (tracked.chatV2ChannelId)
|
|
534
|
+
this.chatV2Index.delete(tracked.chatV2ChannelId);
|
|
535
|
+
try {
|
|
536
|
+
const wi = await this.taskPool.findWorkItem(tracked.workItemId);
|
|
537
|
+
if (wi && !TERMINAL_WI_STATUSES.has(wi.status)) {
|
|
538
|
+
const target = pickResolveTarget(wi.status);
|
|
539
|
+
await this.taskPool.transitionStatus(tracked.workItemId, target, 'system', (item) => {
|
|
540
|
+
item.metadata = {
|
|
541
|
+
...(item.metadata ?? {}),
|
|
542
|
+
slaResolvedReason: reason,
|
|
543
|
+
slaResolvedAt: new Date().toISOString(),
|
|
544
|
+
};
|
|
545
|
+
});
|
|
546
|
+
this.logger.info('SLA WorkItem auto-resolved', {
|
|
547
|
+
workItemId: tracked.workItemId,
|
|
548
|
+
requestId,
|
|
549
|
+
reason,
|
|
550
|
+
fromStatus: wi.status,
|
|
551
|
+
toStatus: target,
|
|
552
|
+
});
|
|
553
|
+
}
|
|
554
|
+
// Cascade: close the parent Request when this was the last live WI.
|
|
555
|
+
// Runs even if the WI was already terminal — covers the case where
|
|
556
|
+
// the WI was closed out-of-band but the Request didn't get cascaded.
|
|
557
|
+
await this.maybeCloseRequest(requestId, reason);
|
|
558
|
+
}
|
|
559
|
+
catch (err) {
|
|
560
|
+
this.logger.warn('SLA auto-resolve threw', {
|
|
561
|
+
workItemId: tracked.workItemId,
|
|
562
|
+
requestId,
|
|
563
|
+
error: formatError(err),
|
|
564
|
+
});
|
|
565
|
+
}
|
|
566
|
+
}
|
|
567
|
+
/**
|
|
568
|
+
* Cascade close the parent Request after an SLA-tracked WI resolves.
|
|
569
|
+
*
|
|
570
|
+
* The Request is moved to `done` only when ALL of:
|
|
571
|
+
* 1. `reason` is in {@link VERIFIED_REPLY_REASONS} (defense-in-depth
|
|
572
|
+
* against false-resolve paths — see Steve 2026-04-30 incident,
|
|
573
|
+
* where a `responseTimeoutMs` placeholder fired
|
|
574
|
+
* `markResolvedByThread` and cascaded the Request to `done`
|
|
575
|
+
* without an actual orc reply).
|
|
576
|
+
* 2. it exists and is not already terminal (`done`/`cancelled`), AND
|
|
577
|
+
* 3. no other non-terminal WIs remain for it (the orc may have
|
|
578
|
+
* decomposed the Request into other WIs that are still in flight —
|
|
579
|
+
* in that case we leave the Request alone and let the existing
|
|
580
|
+
* `cascadeRequestStatus` machinery in v3-data.service close it
|
|
581
|
+
* when those WIs finish).
|
|
582
|
+
*
|
|
583
|
+
* Picks the shortest legal transition path per `REQUEST_TRANSITIONS`:
|
|
584
|
+
* - `open` / `running` / `waiting_confirmation` → `done` (direct)
|
|
585
|
+
* - `ready` / `blocked` → `running` → `done`
|
|
586
|
+
*
|
|
587
|
+
* Errors are caught at the call site (markResolved); this method must
|
|
588
|
+
* never propagate, so a Request-update failure does not leak into the
|
|
589
|
+
* Slack-reply flow.
|
|
590
|
+
*
|
|
591
|
+
* @param requestId - The Request to close.
|
|
592
|
+
* @param reason - The same reason tag recorded on the WI; passed
|
|
593
|
+
* through to `Request.result` so the UI shows why it auto-closed.
|
|
594
|
+
*/
|
|
595
|
+
async maybeCloseRequest(requestId, reason) {
|
|
596
|
+
// Defense-in-depth: even if an upstream caller wires `markResolved` with
|
|
597
|
+
// a non-reply reason in the future, the cascade close is suppressed
|
|
598
|
+
// unless the reason is one we recognise as a verified actual reply or
|
|
599
|
+
// decomposition path. The primary fix lives in the orchestrator bridge
|
|
600
|
+
// (`fromOrcReply` flag); this gate is the second line of defense.
|
|
601
|
+
if (!VERIFIED_REPLY_REASONS.has(reason)) {
|
|
602
|
+
this.logger.debug('Request cascade close skipped — reason not in verified-reply set', {
|
|
603
|
+
requestId,
|
|
604
|
+
reason,
|
|
605
|
+
});
|
|
606
|
+
return;
|
|
607
|
+
}
|
|
608
|
+
const request = await this.requestService.getById(requestId);
|
|
609
|
+
if (!request)
|
|
610
|
+
return;
|
|
611
|
+
if (TERMINAL_REQUEST_STATUSES.has(request.status))
|
|
612
|
+
return;
|
|
613
|
+
const otherActiveCount = await this.countOtherActiveWorkItems(requestId);
|
|
614
|
+
if (otherActiveCount > 0) {
|
|
615
|
+
this.logger.debug('Request kept open — other non-terminal WIs still in flight', {
|
|
616
|
+
requestId,
|
|
617
|
+
otherActiveCount,
|
|
618
|
+
reason,
|
|
619
|
+
});
|
|
620
|
+
return;
|
|
621
|
+
}
|
|
622
|
+
const path = closeRequestPath(request.status);
|
|
623
|
+
if (path.length === 0) {
|
|
624
|
+
// Should be impossible given the TERMINAL guard above, but be defensive.
|
|
625
|
+
return;
|
|
626
|
+
}
|
|
627
|
+
try {
|
|
628
|
+
for (const next of path) {
|
|
629
|
+
await this.requestService.update(requestId, {
|
|
630
|
+
status: next,
|
|
631
|
+
...(next === 'done' ? { result: `Auto-closed by SLA: ${reason}` } : {}),
|
|
632
|
+
});
|
|
633
|
+
}
|
|
634
|
+
this.logger.info('Request auto-closed by SLA cascade', {
|
|
635
|
+
requestId,
|
|
636
|
+
from: request.status,
|
|
637
|
+
path,
|
|
638
|
+
reason,
|
|
639
|
+
});
|
|
640
|
+
}
|
|
641
|
+
catch (err) {
|
|
642
|
+
this.logger.warn('Request auto-close threw', {
|
|
643
|
+
requestId,
|
|
644
|
+
from: request.status,
|
|
645
|
+
path,
|
|
646
|
+
error: formatError(err),
|
|
647
|
+
});
|
|
648
|
+
}
|
|
649
|
+
}
|
|
650
|
+
/**
|
|
651
|
+
* Count WorkItems linked to the given Request that are NOT the SLA tracker
|
|
652
|
+
* AND are still non-terminal. Returns 0 when only the SLA tracker existed.
|
|
653
|
+
*
|
|
654
|
+
* @param requestId - The Request id to scan.
|
|
655
|
+
* @returns Count of other in-flight WorkItems.
|
|
656
|
+
*/
|
|
657
|
+
async countOtherActiveWorkItems(requestId) {
|
|
658
|
+
const slaWiId = respondToUserWorkItemId(requestId);
|
|
659
|
+
const all = await this.taskPool.getAllItems();
|
|
660
|
+
let count = 0;
|
|
661
|
+
for (const wi of all) {
|
|
662
|
+
if (wi.requestId !== requestId)
|
|
663
|
+
continue;
|
|
664
|
+
if (wi.id === slaWiId)
|
|
665
|
+
continue;
|
|
666
|
+
if (TERMINAL_WI_STATUSES.has(wi.status))
|
|
667
|
+
continue;
|
|
668
|
+
count += 1;
|
|
669
|
+
}
|
|
670
|
+
return count;
|
|
671
|
+
}
|
|
672
|
+
/**
|
|
673
|
+
* Snapshot of the tracked-WI count. Test affordance.
|
|
674
|
+
*/
|
|
675
|
+
get trackedCount() {
|
|
676
|
+
return this.trackedByRequest.size;
|
|
677
|
+
}
|
|
678
|
+
// -------------------------------------------------------------------------
|
|
679
|
+
// Internals
|
|
680
|
+
// -------------------------------------------------------------------------
|
|
681
|
+
/**
|
|
682
|
+
* `request:created` handler. Filters by inbound tag, creates the
|
|
683
|
+
* respond_to_user WI, and schedules the breach + escalation timers.
|
|
684
|
+
*/
|
|
685
|
+
handleRequestCreated = async (event) => {
|
|
686
|
+
if (!event.requestId) {
|
|
687
|
+
this.logger.debug('request:created event missing requestId', { eventId: event.id });
|
|
688
|
+
return;
|
|
689
|
+
}
|
|
690
|
+
const request = await this.requestService.getById(event.requestId);
|
|
691
|
+
if (!request) {
|
|
692
|
+
// Persistence failed asynchronously, or the Request was deleted
|
|
693
|
+
// between publish and dispatch. Log and bail.
|
|
694
|
+
this.logger.warn('request:created references unknown Request', {
|
|
695
|
+
requestId: event.requestId,
|
|
696
|
+
});
|
|
697
|
+
return;
|
|
698
|
+
}
|
|
699
|
+
if (!this.matchesInboundTag(request.tags)) {
|
|
700
|
+
this.logger.debug('request:created skipping non-inbound source', {
|
|
701
|
+
requestId: request.id,
|
|
702
|
+
tags: request.tags,
|
|
703
|
+
});
|
|
704
|
+
return;
|
|
705
|
+
}
|
|
706
|
+
const wiId = respondToUserWorkItemId(request.id);
|
|
707
|
+
// Detect source kind from sourceConversationItemId shape. Slack ids
|
|
708
|
+
// start with `slack-`; chat-v2 ids start with `chatv2-` (INBOUND-2).
|
|
709
|
+
const threadTs = extractSlackThreadTs(request.sourceConversationItemId);
|
|
710
|
+
const channelId = extractSlackChannelId(request.sourceConversationItemId);
|
|
711
|
+
const chatV2ChannelId = extractChatV2ChannelId(request.sourceConversationItemId);
|
|
712
|
+
const chatV2MessageId = extractChatV2MessageId(request.sourceConversationItemId);
|
|
713
|
+
const source = threadTs
|
|
714
|
+
? 'slack'
|
|
715
|
+
: chatV2ChannelId
|
|
716
|
+
? 'chat-v2'
|
|
717
|
+
: 'unknown';
|
|
718
|
+
const wi = this.buildRespondWorkItem(request, wiId, {
|
|
719
|
+
source,
|
|
720
|
+
threadTs,
|
|
721
|
+
channelId,
|
|
722
|
+
chatV2ChannelId,
|
|
723
|
+
chatV2MessageId,
|
|
724
|
+
});
|
|
725
|
+
// addToPool short-circuits on duplicate id (V1 dedup).
|
|
726
|
+
await this.taskPool.addToPool(wi);
|
|
727
|
+
// Schedule the breach + escalation timers. We unref the timers so a
|
|
728
|
+
// hung subscriber on shutdown doesn't keep the node process alive.
|
|
729
|
+
const breachTimer = setTimeout(() => {
|
|
730
|
+
void this.handleBreach(request.id, /*level*/ 5);
|
|
731
|
+
}, this.slaMs);
|
|
732
|
+
breachTimer.unref?.();
|
|
733
|
+
const escalationTimer = setTimeout(() => {
|
|
734
|
+
void this.handleEscalation(request.id);
|
|
735
|
+
}, this.escalationMs);
|
|
736
|
+
escalationTimer.unref?.();
|
|
737
|
+
this.trackedByRequest.set(request.id, {
|
|
738
|
+
workItemId: wiId,
|
|
739
|
+
requestId: request.id,
|
|
740
|
+
source,
|
|
741
|
+
threadTs,
|
|
742
|
+
channelId,
|
|
743
|
+
chatV2ChannelId,
|
|
744
|
+
chatV2MessageId,
|
|
745
|
+
breachTimer,
|
|
746
|
+
escalationTimer,
|
|
747
|
+
request,
|
|
748
|
+
});
|
|
749
|
+
if (threadTs)
|
|
750
|
+
this.threadIndex.set(threadTs, request.id);
|
|
751
|
+
if (chatV2ChannelId)
|
|
752
|
+
this.chatV2Index.set(chatV2ChannelId, request.id);
|
|
753
|
+
this.logger.info('SLA respond_to_user WorkItem queued', {
|
|
754
|
+
workItemId: wiId,
|
|
755
|
+
requestId: request.id,
|
|
756
|
+
source,
|
|
757
|
+
threadTs,
|
|
758
|
+
chatV2ChannelId,
|
|
759
|
+
slaMs: this.slaMs,
|
|
760
|
+
});
|
|
761
|
+
};
|
|
762
|
+
/**
|
|
763
|
+
* `workitem:queued` handler (INBOUND-1.f1, auto-close path b).
|
|
764
|
+
*
|
|
765
|
+
* Treats decomposition of a Request into other WorkItems as "the orc has
|
|
766
|
+
* done the right thing" and resolves the tracked respond_to_user WI for
|
|
767
|
+
* that Request.
|
|
768
|
+
*
|
|
769
|
+
* Self-recursion guard: the respond_to_user WI itself fires
|
|
770
|
+
* `workitem:queued` from {@link handleRequestCreated}'s `addToPool` call.
|
|
771
|
+
* Without a guard this handler would call markResolved against its own
|
|
772
|
+
* enqueue and prematurely close the SLA chain. The id-shape check
|
|
773
|
+
* (`incomingId === respondToUserWorkItemId(requestId)`) is more reliable
|
|
774
|
+
* than reading `wi.metadata.slaSource` — the metadata can in principle
|
|
775
|
+
* be mutated, the id cannot.
|
|
776
|
+
*
|
|
777
|
+
* No-ops:
|
|
778
|
+
* - event missing `requestId` (orphan WI, can't correlate)
|
|
779
|
+
* - event missing `workItemId` (malformed publisher)
|
|
780
|
+
* - the respond_to_user WI's own enqueue (id-shape match)
|
|
781
|
+
* - no tracked respond_to_user WI for the requestId (already resolved
|
|
782
|
+
* or never tracked because the source Request wasn't inbound-tagged)
|
|
783
|
+
*
|
|
784
|
+
* @param event - The `workitem:queued` event from TaskPoolService.addToPool
|
|
785
|
+
*/
|
|
786
|
+
handleWorkItemQueued = async (event) => {
|
|
787
|
+
const requestId = event.requestId;
|
|
788
|
+
const incomingWorkItemId = event.workItemId;
|
|
789
|
+
if (!requestId) {
|
|
790
|
+
// Per the f1 spec: undefined requestId = no auto-close. Most enqueues
|
|
791
|
+
// fall here (queue mutations not derived from a Request).
|
|
792
|
+
this.logger.debug('workitem:queued event missing requestId — auto-close no-op', {
|
|
793
|
+
eventId: event.id,
|
|
794
|
+
workItemId: incomingWorkItemId,
|
|
795
|
+
});
|
|
796
|
+
return;
|
|
797
|
+
}
|
|
798
|
+
if (!incomingWorkItemId) {
|
|
799
|
+
this.logger.warn('workitem:queued event missing workItemId — malformed', {
|
|
800
|
+
eventId: event.id,
|
|
801
|
+
requestId,
|
|
802
|
+
});
|
|
803
|
+
return;
|
|
804
|
+
}
|
|
805
|
+
// Self-recursion guard. The respond_to_user WI's own enqueue must NOT
|
|
806
|
+
// trigger its own resolution.
|
|
807
|
+
if (incomingWorkItemId === respondToUserWorkItemId(requestId)) {
|
|
808
|
+
this.logger.debug('workitem:queued is the respond_to_user WI itself — skip', {
|
|
809
|
+
workItemId: incomingWorkItemId,
|
|
810
|
+
requestId,
|
|
811
|
+
});
|
|
812
|
+
return;
|
|
813
|
+
}
|
|
814
|
+
// Only act when we're actively tracking this Request — otherwise the
|
|
815
|
+
// queue mutation is for a Request we never SLA-tracked (no inbound tag,
|
|
816
|
+
// already resolved, etc.).
|
|
817
|
+
if (!this.trackedByRequest.has(requestId)) {
|
|
818
|
+
this.logger.debug('workitem:queued for untracked Request — skip', {
|
|
819
|
+
workItemId: incomingWorkItemId,
|
|
820
|
+
requestId,
|
|
821
|
+
});
|
|
822
|
+
return;
|
|
823
|
+
}
|
|
824
|
+
await this.markResolved(requestId, 'workitem_decompose');
|
|
825
|
+
};
|
|
826
|
+
/**
|
|
827
|
+
* 5-minute breach handler. Re-checks the WI status and emits
|
|
828
|
+
* `request:sla_breached` if the WI is still non-terminal.
|
|
829
|
+
*
|
|
830
|
+
* @param requestId - The Request whose breach is firing
|
|
831
|
+
* @param level - Breach level: 5 (first SLA) or 10 (escalation)
|
|
832
|
+
*/
|
|
833
|
+
async handleBreach(requestId, level) {
|
|
834
|
+
const tracked = this.trackedByRequest.get(requestId);
|
|
835
|
+
if (!tracked)
|
|
836
|
+
return;
|
|
837
|
+
try {
|
|
838
|
+
const wi = await this.taskPool.findWorkItem(tracked.workItemId);
|
|
839
|
+
if (!wi || TERMINAL_WI_STATUSES.has(wi.status)) {
|
|
840
|
+
// Auto-resolved before the timer fired — clean up tracking.
|
|
841
|
+
this.cleanupTracked(requestId);
|
|
842
|
+
return;
|
|
843
|
+
}
|
|
844
|
+
this.eventBus.publish({
|
|
845
|
+
id: `request:sla_breached:${requestId}:${level}`,
|
|
846
|
+
type: 'request:sla_breached',
|
|
847
|
+
timestamp: new Date().toISOString(),
|
|
848
|
+
teamId: '',
|
|
849
|
+
teamName: '',
|
|
850
|
+
memberId: '',
|
|
851
|
+
memberName: '',
|
|
852
|
+
sessionName: this.orchestratorSession,
|
|
853
|
+
previousValue: 'in_sla',
|
|
854
|
+
newValue: `breached_${level}m`,
|
|
855
|
+
changedField: 'taskStatus',
|
|
856
|
+
requestId,
|
|
857
|
+
workItemId: tracked.workItemId,
|
|
858
|
+
});
|
|
859
|
+
this.logger.warn('SLA breach', {
|
|
860
|
+
requestId,
|
|
861
|
+
workItemId: tracked.workItemId,
|
|
862
|
+
level,
|
|
863
|
+
});
|
|
864
|
+
}
|
|
865
|
+
catch (err) {
|
|
866
|
+
this.logger.error('SLA breach handler threw', {
|
|
867
|
+
requestId,
|
|
868
|
+
error: formatError(err),
|
|
869
|
+
});
|
|
870
|
+
}
|
|
871
|
+
}
|
|
872
|
+
/**
|
|
873
|
+
* 10-minute escalation handler. Emits the level-10 breach event and —
|
|
874
|
+
* if a Slack DM callback is wired — sends the user a "still working on
|
|
875
|
+
* it" nudge so they're never blind to the miss. After the DM (or DM-skip),
|
|
876
|
+
* transitions the orphaned respond_to_user WI to `'failed'` with
|
|
877
|
+
* `slaResolvedReason: 'escalation_timeout'` so the orc queue does not keep
|
|
878
|
+
* a stale `queued` WI forever (Arch N3 on PR #357).
|
|
879
|
+
*/
|
|
880
|
+
async handleEscalation(requestId) {
|
|
881
|
+
const tracked = this.trackedByRequest.get(requestId);
|
|
882
|
+
if (!tracked)
|
|
883
|
+
return;
|
|
884
|
+
// Re-emit the breach event at level=10 so the orc terminal sees the
|
|
885
|
+
// escalation arc explicitly.
|
|
886
|
+
await this.handleBreach(requestId, 10);
|
|
887
|
+
// Re-fetch in case the breach handler cleaned up.
|
|
888
|
+
const stillTracked = this.trackedByRequest.get(requestId);
|
|
889
|
+
if (!stillTracked)
|
|
890
|
+
return;
|
|
891
|
+
// Capture the WI id BEFORE cleanupTracked() drops the record so we can
|
|
892
|
+
// still transition the orphan WI to 'failed' afterwards.
|
|
893
|
+
const wiId = stillTracked.workItemId;
|
|
894
|
+
// INBOUND-2: chat-v2 source has no DM-back analog yet. Log the
|
|
895
|
+
// escalation, clean up tracking, and close the orphan WI. A follow-up
|
|
896
|
+
// ticket can wire a chat-v2 nudge (e.g. agent-side reply via
|
|
897
|
+
// reply-channel).
|
|
898
|
+
if (stillTracked.source === 'chat-v2') {
|
|
899
|
+
this.logger.warn('SLA escalation reached 10min on chat-v2 — no chat-v2 nudge hook wired', {
|
|
900
|
+
requestId,
|
|
901
|
+
chatV2ChannelId: stillTracked.chatV2ChannelId,
|
|
902
|
+
});
|
|
903
|
+
this.cleanupTracked(requestId);
|
|
904
|
+
await this.failOrphanRespondWi(wiId, requestId);
|
|
905
|
+
return;
|
|
906
|
+
}
|
|
907
|
+
if (!this.sendEscalationDm) {
|
|
908
|
+
this.logger.warn('SLA escalation reached 10min — no Slack DM hook wired', {
|
|
909
|
+
requestId,
|
|
910
|
+
});
|
|
911
|
+
this.cleanupTracked(requestId);
|
|
912
|
+
await this.failOrphanRespondWi(wiId, requestId);
|
|
913
|
+
return;
|
|
914
|
+
}
|
|
915
|
+
if (!stillTracked.channelId || !stillTracked.threadTs) {
|
|
916
|
+
this.logger.warn('SLA escalation missing Slack thread context — skipping DM', {
|
|
917
|
+
requestId,
|
|
918
|
+
});
|
|
919
|
+
this.cleanupTracked(requestId);
|
|
920
|
+
await this.failOrphanRespondWi(wiId, requestId);
|
|
921
|
+
return;
|
|
922
|
+
}
|
|
923
|
+
try {
|
|
924
|
+
const wi = await this.taskPool.findWorkItem(stillTracked.workItemId);
|
|
925
|
+
const messageText = ":hourglass: It's been a few minutes — I'm still on this. " +
|
|
926
|
+
'I will reply as soon as I have an answer. (auto-nudge)';
|
|
927
|
+
await this.sendEscalationDm({
|
|
928
|
+
channelId: stillTracked.channelId,
|
|
929
|
+
threadTs: stillTracked.threadTs,
|
|
930
|
+
messageText,
|
|
931
|
+
request: stillTracked.request,
|
|
932
|
+
workItem: wi,
|
|
933
|
+
});
|
|
934
|
+
this.logger.info('SLA escalation Slack DM sent', {
|
|
935
|
+
requestId,
|
|
936
|
+
channelId: stillTracked.channelId,
|
|
937
|
+
});
|
|
938
|
+
}
|
|
939
|
+
catch (err) {
|
|
940
|
+
this.logger.error('SLA escalation DM failed', {
|
|
941
|
+
requestId,
|
|
942
|
+
error: formatError(err),
|
|
943
|
+
});
|
|
944
|
+
}
|
|
945
|
+
finally {
|
|
946
|
+
// Escalation is the terminal hook in v1 — drop tracking + close the
|
|
947
|
+
// orphan WI either way (DM success or failure).
|
|
948
|
+
this.cleanupTracked(requestId);
|
|
949
|
+
await this.failOrphanRespondWi(wiId, requestId);
|
|
950
|
+
}
|
|
951
|
+
}
|
|
952
|
+
/**
|
|
953
|
+
* Transition an escalated respond_to_user WI to `'failed'` with
|
|
954
|
+
* `slaResolvedReason: 'escalation_timeout'` so the orc queue does not
|
|
955
|
+
* keep a stale `queued` WI forever after a 10-min escalation. No-op if
|
|
956
|
+
* the WI is already terminal (e.g. user gave up + an out-of-band cleanup
|
|
957
|
+
* already closed it).
|
|
958
|
+
*
|
|
959
|
+
* Mirrors {@link markResolved}'s terminal-status guard. Errors are
|
|
960
|
+
* logged but never propagated — the SLA chain is already terminal at
|
|
961
|
+
* this point and we do not want to mask the original DM-path outcome.
|
|
962
|
+
*
|
|
963
|
+
* @param workItemId - The respond_to_user WI id to close.
|
|
964
|
+
* @param requestId - The originating Request id (logging context only).
|
|
965
|
+
*/
|
|
966
|
+
async failOrphanRespondWi(workItemId, requestId) {
|
|
967
|
+
try {
|
|
968
|
+
const wi = await this.taskPool.findWorkItem(workItemId);
|
|
969
|
+
if (!wi)
|
|
970
|
+
return;
|
|
971
|
+
if (TERMINAL_WI_STATUSES.has(wi.status)) {
|
|
972
|
+
// Already terminal — nothing to do.
|
|
973
|
+
return;
|
|
974
|
+
}
|
|
975
|
+
// Same state-machine constraint as markResolved: `queued → failed`
|
|
976
|
+
// is illegal per WORK_ITEM_TRANSITIONS. Route queued WIs to
|
|
977
|
+
// `cancelled`; running WIs go to `failed` as before.
|
|
978
|
+
const target = pickFailTarget(wi.status);
|
|
979
|
+
await this.taskPool.transitionStatus(workItemId, target, 'system', (item) => {
|
|
980
|
+
item.metadata = {
|
|
981
|
+
...(item.metadata ?? {}),
|
|
982
|
+
slaResolvedReason: 'escalation_timeout',
|
|
983
|
+
slaResolvedAt: new Date().toISOString(),
|
|
984
|
+
};
|
|
985
|
+
});
|
|
986
|
+
this.logger.info('SLA escalation orphan WI auto-closed', {
|
|
987
|
+
workItemId,
|
|
988
|
+
requestId,
|
|
989
|
+
fromStatus: wi.status,
|
|
990
|
+
toStatus: target,
|
|
991
|
+
});
|
|
992
|
+
}
|
|
993
|
+
catch (err) {
|
|
994
|
+
this.logger.warn('SLA escalation orphan-fail threw', {
|
|
995
|
+
workItemId,
|
|
996
|
+
requestId,
|
|
997
|
+
error: formatError(err),
|
|
998
|
+
});
|
|
999
|
+
}
|
|
1000
|
+
}
|
|
1001
|
+
/**
|
|
1002
|
+
* Drop tracking + clear timers for a Request. Used both on auto-resolve
|
|
1003
|
+
* and on terminal escalation — once we reach 10min the SLA chain is done.
|
|
1004
|
+
*/
|
|
1005
|
+
cleanupTracked(requestId) {
|
|
1006
|
+
const tracked = this.trackedByRequest.get(requestId);
|
|
1007
|
+
if (!tracked)
|
|
1008
|
+
return;
|
|
1009
|
+
clearTimeout(tracked.breachTimer);
|
|
1010
|
+
clearTimeout(tracked.escalationTimer);
|
|
1011
|
+
this.trackedByRequest.delete(requestId);
|
|
1012
|
+
if (tracked.threadTs)
|
|
1013
|
+
this.threadIndex.delete(tracked.threadTs);
|
|
1014
|
+
if (tracked.chatV2ChannelId)
|
|
1015
|
+
this.chatV2Index.delete(tracked.chatV2ChannelId);
|
|
1016
|
+
}
|
|
1017
|
+
/**
|
|
1018
|
+
* Check whether a Request's tags include any of the configured inbound
|
|
1019
|
+
* channel tags.
|
|
1020
|
+
*/
|
|
1021
|
+
matchesInboundTag(tags) {
|
|
1022
|
+
for (const t of tags) {
|
|
1023
|
+
if (this.inboundTags.has(t))
|
|
1024
|
+
return true;
|
|
1025
|
+
}
|
|
1026
|
+
return false;
|
|
1027
|
+
}
|
|
1028
|
+
/**
|
|
1029
|
+
* Build the respond_to_user WorkItem with the standard metadata invariants.
|
|
1030
|
+
*
|
|
1031
|
+
* INBOUND-2: metadata embeds slack-* OR chatV2-* fields based on the
|
|
1032
|
+
* source surface so downstream consumers (status panes, escalation
|
|
1033
|
+
* hooks) can branch without re-parsing `sourceConversationItemId`.
|
|
1034
|
+
*/
|
|
1035
|
+
buildRespondWorkItem(request, wiId, sourceContext) {
|
|
1036
|
+
const now = new Date().toISOString();
|
|
1037
|
+
const slaDeadline = new Date(Date.now() + this.slaMs).toISOString();
|
|
1038
|
+
const escalationDeadline = new Date(Date.now() + this.escalationMs).toISOString();
|
|
1039
|
+
return {
|
|
1040
|
+
id: wiId,
|
|
1041
|
+
type: 'review',
|
|
1042
|
+
owner: 'orchestrator',
|
|
1043
|
+
target: this.orchestratorSession,
|
|
1044
|
+
title: `Respond to user: ${request.title.slice(0, 60)}`,
|
|
1045
|
+
description: `Inbound user message arrived as Request ${request.id}.\n\n` +
|
|
1046
|
+
`Original message:\n${request.description.slice(0, 400)}`,
|
|
1047
|
+
status: 'queued',
|
|
1048
|
+
createdAt: now,
|
|
1049
|
+
retryCount: 0,
|
|
1050
|
+
maxRetries: DEFAULT_MAX_RETRIES,
|
|
1051
|
+
requestId: request.id,
|
|
1052
|
+
missionId: request.missionId,
|
|
1053
|
+
inputTokens: 0,
|
|
1054
|
+
outputTokens: 0,
|
|
1055
|
+
cost: 0,
|
|
1056
|
+
metadata: {
|
|
1057
|
+
idempotencyKey: wiId,
|
|
1058
|
+
triggerSource: 'event',
|
|
1059
|
+
slaSource: 'inbound-1',
|
|
1060
|
+
slaDeadline,
|
|
1061
|
+
slaEscalationDeadline: escalationDeadline,
|
|
1062
|
+
slaBreachLevel: 0,
|
|
1063
|
+
inboundTag: request.tags.find((t) => this.inboundTags.has(t)),
|
|
1064
|
+
inboundSource: sourceContext.source,
|
|
1065
|
+
slackThreadTs: sourceContext.threadTs,
|
|
1066
|
+
slackChannelId: sourceContext.channelId,
|
|
1067
|
+
chatV2ChannelId: sourceContext.chatV2ChannelId,
|
|
1068
|
+
chatV2MessageId: sourceContext.chatV2MessageId,
|
|
1069
|
+
},
|
|
1070
|
+
};
|
|
1071
|
+
}
|
|
1072
|
+
/**
|
|
1073
|
+
* Wrap a dispatch so a thrown handler is logged and isolated. Mirrors
|
|
1074
|
+
* EventToWorkItemBridge.safeDispatch.
|
|
1075
|
+
*/
|
|
1076
|
+
safeDispatch(eventType, event) {
|
|
1077
|
+
const dispatch = (async () => {
|
|
1078
|
+
try {
|
|
1079
|
+
if (eventType === 'request:created') {
|
|
1080
|
+
await this.handleRequestCreated(event);
|
|
1081
|
+
}
|
|
1082
|
+
else if (eventType === 'workitem:queued') {
|
|
1083
|
+
await this.handleWorkItemQueued(event);
|
|
1084
|
+
}
|
|
1085
|
+
}
|
|
1086
|
+
catch (err) {
|
|
1087
|
+
this.logger.error('SLA subscriber handler threw', {
|
|
1088
|
+
eventType,
|
|
1089
|
+
eventId: event.id,
|
|
1090
|
+
error: formatError(err),
|
|
1091
|
+
});
|
|
1092
|
+
}
|
|
1093
|
+
})();
|
|
1094
|
+
this.pendingDispatches.add(dispatch);
|
|
1095
|
+
dispatch
|
|
1096
|
+
.finally(() => {
|
|
1097
|
+
this.pendingDispatches.delete(dispatch);
|
|
1098
|
+
})
|
|
1099
|
+
.catch(() => {
|
|
1100
|
+
// suppress unhandled-rejection — flushPending owners use allSettled.
|
|
1101
|
+
});
|
|
1102
|
+
return dispatch;
|
|
1103
|
+
}
|
|
1104
|
+
}
|
|
1105
|
+
//# sourceMappingURL=request-sla.subscriber.js.map
|