claude-team-mcp 0.8.0__tar.gz → 0.8.2__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (107) hide show
  1. {claude_team_mcp-0.8.0 → claude_team_mcp-0.8.2}/.pebbles/events.jsonl +46 -0
  2. {claude_team_mcp-0.8.0 → claude_team_mcp-0.8.2}/.pebbles/pebbles.db +0 -0
  3. {claude_team_mcp-0.8.0 → claude_team_mcp-0.8.2}/CHANGELOG.md +5 -0
  4. {claude_team_mcp-0.8.0 → claude_team_mcp-0.8.2}/PKG-INFO +1 -1
  5. claude_team_mcp-0.8.2/docs/design/unified-worker-state.md +190 -0
  6. {claude_team_mcp-0.8.0 → claude_team_mcp-0.8.2}/pyproject.toml +1 -1
  7. {claude_team_mcp-0.8.0 → claude_team_mcp-0.8.2}/src/claude_team_mcp/terminal_backends/tmux.py +7 -9
  8. {claude_team_mcp-0.8.0 → claude_team_mcp-0.8.2}/src/claude_team_mcp/worktree.py +2 -2
  9. {claude_team_mcp-0.8.0 → claude_team_mcp-0.8.2}/tests/test_tmux_backend.py +27 -0
  10. {claude_team_mcp-0.8.0 → claude_team_mcp-0.8.2}/uv.lock +1 -1
  11. {claude_team_mcp-0.8.0 → claude_team_mcp-0.8.2}/.claude/settings.json +0 -0
  12. {claude_team_mcp-0.8.0 → claude_team_mcp-0.8.2}/.claude/settings.local.json +0 -0
  13. {claude_team_mcp-0.8.0 → claude_team_mcp-0.8.2}/.claude-plugin/marketplace.json +0 -0
  14. {claude_team_mcp-0.8.0 → claude_team_mcp-0.8.2}/.claude-plugin/plugin.json +0 -0
  15. {claude_team_mcp-0.8.0 → claude_team_mcp-0.8.2}/.gitattributes +0 -0
  16. {claude_team_mcp-0.8.0 → claude_team_mcp-0.8.2}/.gitignore +0 -0
  17. {claude_team_mcp-0.8.0 → claude_team_mcp-0.8.2}/.mcp.json +0 -0
  18. {claude_team_mcp-0.8.0 → claude_team_mcp-0.8.2}/.pebbles/.gitignore +0 -0
  19. {claude_team_mcp-0.8.0 → claude_team_mcp-0.8.2}/.pebbles/config.json +0 -0
  20. {claude_team_mcp-0.8.0 → claude_team_mcp-0.8.2}/AGENTS.md +0 -0
  21. {claude_team_mcp-0.8.0 → claude_team_mcp-0.8.2}/CLAUDE.md +0 -0
  22. {claude_team_mcp-0.8.0 → claude_team_mcp-0.8.2}/HAPPY_INTEGRATION_RESEARCH.md +0 -0
  23. {claude_team_mcp-0.8.0 → claude_team_mcp-0.8.2}/Makefile +0 -0
  24. {claude_team_mcp-0.8.0 → claude_team_mcp-0.8.2}/NOTES.md +0 -0
  25. {claude_team_mcp-0.8.0 → claude_team_mcp-0.8.2}/README.md +0 -0
  26. {claude_team_mcp-0.8.0 → claude_team_mcp-0.8.2}/commands/check-workers.md +0 -0
  27. {claude_team_mcp-0.8.0 → claude_team_mcp-0.8.2}/commands/cleanup-worktrees.md +0 -0
  28. {claude_team_mcp-0.8.0 → claude_team_mcp-0.8.2}/commands/merge-worker.md +0 -0
  29. {claude_team_mcp-0.8.0 → claude_team_mcp-0.8.2}/commands/pr-worker.md +0 -0
  30. {claude_team_mcp-0.8.0 → claude_team_mcp-0.8.2}/commands/spawn-workers.md +0 -0
  31. {claude_team_mcp-0.8.0 → claude_team_mcp-0.8.2}/commands/team-summary.md +0 -0
  32. {claude_team_mcp-0.8.0 → claude_team_mcp-0.8.2}/config/mcporter.json +0 -0
  33. {claude_team_mcp-0.8.0 → claude_team_mcp-0.8.2}/docs/ISSUE_TRACKER_ABSTRACTION.md +0 -0
  34. {claude_team_mcp-0.8.0 → claude_team_mcp-0.8.2}/scripts/install-commands.py +0 -0
  35. {claude_team_mcp-0.8.0 → claude_team_mcp-0.8.2}/scripts/team-status.sh +0 -0
  36. {claude_team_mcp-0.8.0 → claude_team_mcp-0.8.2}/settings.json +0 -0
  37. {claude_team_mcp-0.8.0 → claude_team_mcp-0.8.2}/src/claude_team/__init__.py +0 -0
  38. {claude_team_mcp-0.8.0 → claude_team_mcp-0.8.2}/src/claude_team/events.py +0 -0
  39. {claude_team_mcp-0.8.0 → claude_team_mcp-0.8.2}/src/claude_team/idle_detection.py +0 -0
  40. {claude_team_mcp-0.8.0 → claude_team_mcp-0.8.2}/src/claude_team/poller.py +0 -0
  41. {claude_team_mcp-0.8.0 → claude_team_mcp-0.8.2}/src/claude_team_mcp/__init__.py +0 -0
  42. {claude_team_mcp-0.8.0 → claude_team_mcp-0.8.2}/src/claude_team_mcp/__main__.py +0 -0
  43. {claude_team_mcp-0.8.0 → claude_team_mcp-0.8.2}/src/claude_team_mcp/cli_backends/__init__.py +0 -0
  44. {claude_team_mcp-0.8.0 → claude_team_mcp-0.8.2}/src/claude_team_mcp/cli_backends/base.py +0 -0
  45. {claude_team_mcp-0.8.0 → claude_team_mcp-0.8.2}/src/claude_team_mcp/cli_backends/claude.py +0 -0
  46. {claude_team_mcp-0.8.0 → claude_team_mcp-0.8.2}/src/claude_team_mcp/cli_backends/codex.py +0 -0
  47. {claude_team_mcp-0.8.0 → claude_team_mcp-0.8.2}/src/claude_team_mcp/colors.py +0 -0
  48. {claude_team_mcp-0.8.0 → claude_team_mcp-0.8.2}/src/claude_team_mcp/config.py +0 -0
  49. {claude_team_mcp-0.8.0 → claude_team_mcp-0.8.2}/src/claude_team_mcp/config_cli.py +0 -0
  50. {claude_team_mcp-0.8.0 → claude_team_mcp-0.8.2}/src/claude_team_mcp/formatting.py +0 -0
  51. {claude_team_mcp-0.8.0 → claude_team_mcp-0.8.2}/src/claude_team_mcp/idle_detection.py +0 -0
  52. {claude_team_mcp-0.8.0 → claude_team_mcp-0.8.2}/src/claude_team_mcp/issue_tracker/__init__.py +0 -0
  53. {claude_team_mcp-0.8.0 → claude_team_mcp-0.8.2}/src/claude_team_mcp/iterm_utils.py +0 -0
  54. {claude_team_mcp-0.8.0 → claude_team_mcp-0.8.2}/src/claude_team_mcp/names.py +0 -0
  55. {claude_team_mcp-0.8.0 → claude_team_mcp-0.8.2}/src/claude_team_mcp/profile.py +0 -0
  56. {claude_team_mcp-0.8.0 → claude_team_mcp-0.8.2}/src/claude_team_mcp/registry.py +0 -0
  57. {claude_team_mcp-0.8.0 → claude_team_mcp-0.8.2}/src/claude_team_mcp/schemas/__init__.py +0 -0
  58. {claude_team_mcp-0.8.0 → claude_team_mcp-0.8.2}/src/claude_team_mcp/schemas/codex.py +0 -0
  59. {claude_team_mcp-0.8.0 → claude_team_mcp-0.8.2}/src/claude_team_mcp/server.py +0 -0
  60. {claude_team_mcp-0.8.0 → claude_team_mcp-0.8.2}/src/claude_team_mcp/session_state.py +0 -0
  61. {claude_team_mcp-0.8.0 → claude_team_mcp-0.8.2}/src/claude_team_mcp/subprocess_cache.py +0 -0
  62. {claude_team_mcp-0.8.0 → claude_team_mcp-0.8.2}/src/claude_team_mcp/terminal_backends/__init__.py +0 -0
  63. {claude_team_mcp-0.8.0 → claude_team_mcp-0.8.2}/src/claude_team_mcp/terminal_backends/base.py +0 -0
  64. {claude_team_mcp-0.8.0 → claude_team_mcp-0.8.2}/src/claude_team_mcp/terminal_backends/iterm.py +0 -0
  65. {claude_team_mcp-0.8.0 → claude_team_mcp-0.8.2}/src/claude_team_mcp/tools/__init__.py +0 -0
  66. {claude_team_mcp-0.8.0 → claude_team_mcp-0.8.2}/src/claude_team_mcp/tools/adopt_worker.py +0 -0
  67. {claude_team_mcp-0.8.0 → claude_team_mcp-0.8.2}/src/claude_team_mcp/tools/annotate_worker.py +0 -0
  68. {claude_team_mcp-0.8.0 → claude_team_mcp-0.8.2}/src/claude_team_mcp/tools/check_idle_workers.py +0 -0
  69. {claude_team_mcp-0.8.0 → claude_team_mcp-0.8.2}/src/claude_team_mcp/tools/close_workers.py +0 -0
  70. {claude_team_mcp-0.8.0 → claude_team_mcp-0.8.2}/src/claude_team_mcp/tools/discover_workers.py +0 -0
  71. {claude_team_mcp-0.8.0 → claude_team_mcp-0.8.2}/src/claude_team_mcp/tools/examine_worker.py +0 -0
  72. {claude_team_mcp-0.8.0 → claude_team_mcp-0.8.2}/src/claude_team_mcp/tools/issue_tracker_help.py +0 -0
  73. {claude_team_mcp-0.8.0 → claude_team_mcp-0.8.2}/src/claude_team_mcp/tools/list_workers.py +0 -0
  74. {claude_team_mcp-0.8.0 → claude_team_mcp-0.8.2}/src/claude_team_mcp/tools/list_worktrees.py +0 -0
  75. {claude_team_mcp-0.8.0 → claude_team_mcp-0.8.2}/src/claude_team_mcp/tools/message_workers.py +0 -0
  76. {claude_team_mcp-0.8.0 → claude_team_mcp-0.8.2}/src/claude_team_mcp/tools/poll_worker_changes.py +0 -0
  77. {claude_team_mcp-0.8.0 → claude_team_mcp-0.8.2}/src/claude_team_mcp/tools/read_worker_logs.py +0 -0
  78. {claude_team_mcp-0.8.0 → claude_team_mcp-0.8.2}/src/claude_team_mcp/tools/spawn_workers.py +0 -0
  79. {claude_team_mcp-0.8.0 → claude_team_mcp-0.8.2}/src/claude_team_mcp/tools/wait_idle_workers.py +0 -0
  80. {claude_team_mcp-0.8.0 → claude_team_mcp-0.8.2}/src/claude_team_mcp/utils/__init__.py +0 -0
  81. {claude_team_mcp-0.8.0 → claude_team_mcp-0.8.2}/src/claude_team_mcp/utils/constants.py +0 -0
  82. {claude_team_mcp-0.8.0 → claude_team_mcp-0.8.2}/src/claude_team_mcp/utils/errors.py +0 -0
  83. {claude_team_mcp-0.8.0 → claude_team_mcp-0.8.2}/src/claude_team_mcp/utils/worktree_detection.py +0 -0
  84. {claude_team_mcp-0.8.0 → claude_team_mcp-0.8.2}/src/claude_team_mcp/worker_prompt.py +0 -0
  85. {claude_team_mcp-0.8.0 → claude_team_mcp-0.8.2}/tests/__init__.py +0 -0
  86. {claude_team_mcp-0.8.0 → claude_team_mcp-0.8.2}/tests/conftest.py +0 -0
  87. {claude_team_mcp-0.8.0 → claude_team_mcp-0.8.2}/tests/test_cli_backends.py +0 -0
  88. {claude_team_mcp-0.8.0 → claude_team_mcp-0.8.2}/tests/test_codex_schema.py +0 -0
  89. {claude_team_mcp-0.8.0 → claude_team_mcp-0.8.2}/tests/test_colors.py +0 -0
  90. {claude_team_mcp-0.8.0 → claude_team_mcp-0.8.2}/tests/test_config.py +0 -0
  91. {claude_team_mcp-0.8.0 → claude_team_mcp-0.8.2}/tests/test_config_cli.py +0 -0
  92. {claude_team_mcp-0.8.0 → claude_team_mcp-0.8.2}/tests/test_events.py +0 -0
  93. {claude_team_mcp-0.8.0 → claude_team_mcp-0.8.2}/tests/test_formatting.py +0 -0
  94. {claude_team_mcp-0.8.0 → claude_team_mcp-0.8.2}/tests/test_idle_detection.py +0 -0
  95. {claude_team_mcp-0.8.0 → claude_team_mcp-0.8.2}/tests/test_idle_detection_module.py +0 -0
  96. {claude_team_mcp-0.8.0 → claude_team_mcp-0.8.2}/tests/test_issue_tracker.py +0 -0
  97. {claude_team_mcp-0.8.0 → claude_team_mcp-0.8.2}/tests/test_issue_tracker_integration.py +0 -0
  98. {claude_team_mcp-0.8.0 → claude_team_mcp-0.8.2}/tests/test_iterm_utils.py +0 -0
  99. {claude_team_mcp-0.8.0 → claude_team_mcp-0.8.2}/tests/test_names.py +0 -0
  100. {claude_team_mcp-0.8.0 → claude_team_mcp-0.8.2}/tests/test_poller.py +0 -0
  101. {claude_team_mcp-0.8.0 → claude_team_mcp-0.8.2}/tests/test_registry.py +0 -0
  102. {claude_team_mcp-0.8.0 → claude_team_mcp-0.8.2}/tests/test_session_state.py +0 -0
  103. {claude_team_mcp-0.8.0 → claude_team_mcp-0.8.2}/tests/test_spawn_workers_defaults.py +0 -0
  104. {claude_team_mcp-0.8.0 → claude_team_mcp-0.8.2}/tests/test_terminal_backends.py +0 -0
  105. {claude_team_mcp-0.8.0 → claude_team_mcp-0.8.2}/tests/test_worker_prompt.py +0 -0
  106. {claude_team_mcp-0.8.0 → claude_team_mcp-0.8.2}/tests/test_worktree.py +0 -0
  107. {claude_team_mcp-0.8.0 → claude_team_mcp-0.8.2}/tests/test_worktree_detection.py +0 -0
@@ -182,3 +182,49 @@
182
182
  {"type":"create","timestamp":"2026-01-30T17:41:16.111959Z","issue_id":"cic-345","payload":{"description":"Currently the tmux backend uses a single global 'claude-team' session for all workers. Instead, create a separate tmux session per project path. This makes it easier to monitor workers by project when using tmux locally.\n\nScope: investigate current session naming, identify all touch points, propose naming scheme, assess impact on discovery/adoption/cleanup.","priority":"2","title":"Per-project tmux sessions instead of global claude-team session","type":"task"}}
183
183
  {"type":"status_update","timestamp":"2026-01-30T17:45:04.28959Z","issue_id":"cic-345","payload":{"status":"in_progress"}}
184
184
  {"type":"close","timestamp":"2026-01-30T17:52:20.629185Z","issue_id":"cic-345","payload":{}}
185
+ {"type":"create","timestamp":"2026-01-31T16:47:58.166575Z","issue_id":"cic-bbd","payload":{"description":"## Problem\n\nTwo disconnected views of worker state:\n\n1. **list_workers (MCP)** — in-memory SessionRegistry, wiped on restart. No persistence.\n2. **Event log (events.jsonl)** — persistent lifecycle events, but no MCP query API. External consumers resort to shell scripts to parse it.\n\nAfter restart, list_workers returns empty even if workers are alive. Event log has history but no programmatic access.\n\n## Proposed Solution (Option C)\n\n### Part 1: Enhance list_workers with event log recovery\n- On startup, reconstruct known worker state from event log snapshots\n- list_workers becomes single source of truth for current state\n\n### Part 2: New worker_events MCP tool\n- Wraps read_events_since() as MCP tool\n- Returns state transitions, completions, stuck workers\n- Supersedes closed cic-467 (poll_worker_changes)\n\n## Context\n- events.py has read_events_since(), get_latest_snapshot() — not wired to MCP\n- Registry has tmux adoption but doesn't use event log for recovery\n- Monitoring agents currently shell out to parse events.jsonl directly","priority":"1","title":"Unified worker state API — merge registry + event log","type":"feature"}}
186
+ {"type":"status_update","timestamp":"2026-01-31T16:48:32.350157Z","issue_id":"cic-bbd","payload":{"status":"in_progress"}}
187
+ {"type":"close","timestamp":"2026-01-31T17:00:14.556752Z","issue_id":"cic-bbd","payload":{}}
188
+ {"type":"create","timestamp":"2026-01-31T17:00:32.968359Z","issue_id":"cic-68b","payload":{"description":"## Problem\n\nWhen Codex has a pending update, it shows an interactive TUI prompt (Update now / Skip / Skip until next version) that blocks the session from becoming interactive. The claude-team spawn logic times out at 30s waiting for Codex to be ready, but the prompt is sitting there waiting for user input.\n\nThis is a silent failure — the server logs show a timeout but there's no indication it's an update prompt specifically.\n\n## Immediate Workaround\n\nSet `check_for_update_on_startup = false` in `~/.codex/config.toml`. Done for Josh's machine.\n\n## Proposed Solution for claude-team\n\n### Option A: Detect and auto-dismiss (recommended)\nAfter spawning Codex in tmux, scrape the pane for update prompt indicators (e.g. 'Update now', 'Skip until next version') and send key '2' (Skip) automatically. This keeps the user's codex up-to-date awareness while not blocking automation.\n\n### Option B: Set env/config to suppress\nHave claude-team set an env var or write a temporary config overlay that disables the update check for spawned workers. Codex supports `check_for_update_on_startup = false` in config.toml.\n\n### Option C: Document and warn\nAdd to spawn_workers output a warning when Codex times out, suggesting the config flag.\n\n## Context\n- Codex source: `codex-rs/tui/src/update_prompt.rs` — uses `get_upgrade_version_for_popup(config)`\n- Config flag: `check_for_update_on_startup` (defaults true)\n- Dismiss mechanism: option 3 persists to `~/.codex/version.json` dismissed_version field\n- The prompt accepts key inputs: 1=Update, 2=Skip, 3=Skip until next version, Esc=Skip","priority":"1","title":"Handle Codex update prompt blocking worker spawn","type":"bug"}}
189
+ {"type":"status_update","timestamp":"2026-01-31T22:34:51.762832Z","issue_id":"cic-bbd","payload":{"status":"open"}}
190
+ {"type":"update","timestamp":"2026-01-31T22:34:51.762832Z","issue_id":"cic-bbd","payload":{"type":"epic"}}
191
+ {"type":"create","timestamp":"2026-01-31T22:35:00.158594Z","issue_id":"cic-fd1","payload":{"description":"Create a lightweight RecoveredSession type in registry.py that represents sessions restored from the event log.\n\n## Requirements\n- Fields: session_id, name, project_path, terminal_id, agent_type, status, last_activity, created_at\n- Implement to_dict() matching ManagedSession output format\n- Add source field: 'event_log' (vs 'registry' for live sessions)\n- Add event_state field: 'idle' | 'active' | 'closed'\n- Add recovered_at and last_event_ts timestamps\n- is_idle() should return based on snapshot state only (no JSONL access)\n\n## State Mapping\n- event state 'idle' -\u003e SessionStatus.READY\n- event state 'active' -\u003e SessionStatus.BUSY\n- event state 'closed' -\u003e new or virtual status\n\n## Files\n- src/claude_team_mcp/registry.py\n\n## Design Doc\ndocs/design/unified-worker-state.md — 'Recovered session representation' section","priority":"1","title":"RecoveredSession dataclass","type":"task"}}
192
+ {"type":"create","timestamp":"2026-01-31T22:35:07.170878Z","issue_id":"cic-169","payload":{"description":"Add a recovery method to SessionRegistry that merges event log state into the registry without overwriting live sessions.\n\n## Signature\nrecover_from_events(snapshot: dict | None, events: list[WorkerEvent]) -\u003e RecoveryReport\n\n## Behavior\n- Input: output of get_latest_snapshot() + read_events_since(snapshot_ts)\n- If session already exists in registry, skip (don't override live state)\n- If session only in event log, create RecoveredSession entry\n- If session closed by events, mark closed in recovered state\n- Return RecoveryReport with counts (added, skipped, closed)\n\n## Integration\n- list_all() should return merged list of ManagedSession + RecoveredSession\n- list_by_status() should include recovered entries\n\n## Dependencies\n- Depends on: cic-fd1 (RecoveredSession dataclass)\n\n## Files\n- src/claude_team_mcp/registry.py\n\n## Design Doc\ndocs/design/unified-worker-state.md — 'Proposed recovery entry point' section","priority":"1","title":"SessionRegistry.recover_from_events() method","type":"task"}}
193
+ {"type":"create","timestamp":"2026-01-31T22:35:13.527812Z","issue_id":"cic-80f","payload":{"description":"Wire recover_from_events() into server startup so list_workers returns useful data after restart.\n\n## Approach\n- Eager recovery at startup: call get_latest_snapshot() + read_events_since() and feed into registry.recover_from_events()\n- Add lazy fallback in list_workers: if registry is empty on first call, attempt recovery\n- Log recovery results (added/skipped/closed counts)\n\n## Integration Point\n- Server lifespan in src/claude_team_mcp/server.py (AppContext setup)\n\n## Dependencies\n- Depends on: cic-169 (recover_from_events method)\n\n## Files\n- src/claude_team_mcp/server.py\n\n## Design Doc\ndocs/design/unified-worker-state.md — 'Recovery timing' section","priority":"1","title":"Startup recovery — seed registry from event log on boot","type":"task"}}
194
+ {"type":"create","timestamp":"2026-01-31T22:35:20.470243Z","issue_id":"cic-22e","payload":{"description":"New MCP tool exposing event log queries. Supersedes closed cic-467.\n\n## Tool Signature\nworker_events(\n since: str | None = None,\n limit: int = 1000,\n include_snapshot: bool = False,\n include_summary: bool = False,\n stale_threshold_minutes: int = 10,\n project_filter: str | None = None,\n) -\u003e dict\n\n## Response Shape\n{\n events: [{ts, type, worker_id, data}],\n count: N,\n summary: {started, closed, idle, active, stuck, last_event_ts}, // if include_summary\n snapshot: {ts, data} // if include_snapshot\n}\n\n## Summary Semantics\n- started/closed/idle/active: lists from event window\n- stuck: active workers with last_activity \u003e stale_threshold_minutes\n- last_event_ts: newest event timestamp\n\n## Implementation\n- Wraps events.read_events_since() and events.get_latest_snapshot()\n- New file: src/claude_team_mcp/tools/worker_events.py\n- Register in src/claude_team_mcp/tools/__init__.py\n\n## Design Doc\ndocs/design/unified-worker-state.md — 'Part 2' section","priority":"1","title":"worker_events MCP tool","type":"task"}}
195
+ {"type":"create","timestamp":"2026-01-31T22:35:26.908995Z","issue_id":"cic-71d","payload":{"description":"Extend list_workers output with provenance fields so clients can distinguish live vs recovered sessions.\n\n## New Fields in Worker Output\n- source: 'registry' | 'event_log'\n- event_state: 'idle' | 'active' | 'closed' (only for recovered entries)\n- recovered_at: ISO timestamp (only for recovered entries)\n- last_event_ts: ISO timestamp of last applied event (only for recovered entries)\n\n## Backward Compatibility\n- Existing fields (status, session_id, name, etc.) unchanged\n- New fields are additive only\n- Live sessions get source='registry', no event_state/recovered_at fields\n\n## Dependencies\n- Depends on: cic-fd1 (RecoveredSession with these fields)\n\n## Files\n- src/claude_team_mcp/tools/list_workers.py","priority":"2","title":"list_workers: add source and event_state fields","type":"task"}}
196
+ {"type":"create","timestamp":"2026-01-31T22:35:34.422147Z","issue_id":"cic-c8e","payload":{"description":"Comprehensive tests for the unified worker state feature.\n\n## Test Cases\n\n### RecoveredSession\n- Construction from snapshot data\n- to_dict() output matches expected format\n- State mapping (idle-\u003eready, active-\u003ebusy, closed)\n- is_idle() returns snapshot-based state\n\n### recover_from_events()\n- Recovery from snapshot only (no events)\n- Recovery from snapshot + events\n- Live sessions not overwritten\n- Closed sessions marked correctly\n- Empty snapshot/events returns empty recovery\n- RecoveryReport counts are accurate\n\n### Startup recovery\n- Server startup seeds registry from event log\n- Lazy fallback triggers on empty registry\n\n### worker_events tool\n- Basic since/limit filtering\n- include_snapshot flag\n- include_summary with stuck detection\n- project_filter\n- Empty event log returns empty result\n\n## Dependencies\n- Depends on: cic-fd1, cic-169, cic-80f, cic-22e","priority":"1","title":"Tests for recovery + worker_events","type":"task"}}
197
+ {"type":"rename","timestamp":"2026-01-31T22:35:46.410975Z","issue_id":"cic-fd1","payload":{"new_id":"cic-bbd.1"}}
198
+ {"type":"dep_add","timestamp":"2026-01-31T22:35:46.410983Z","issue_id":"cic-bbd.1","payload":{"dep_type":"parent-child","depends_on":"cic-bbd"}}
199
+ {"type":"rename","timestamp":"2026-01-31T22:35:46.475283Z","issue_id":"cic-169","payload":{"new_id":"cic-bbd.2"}}
200
+ {"type":"dep_add","timestamp":"2026-01-31T22:35:46.47529Z","issue_id":"cic-bbd.2","payload":{"dep_type":"parent-child","depends_on":"cic-bbd"}}
201
+ {"type":"rename","timestamp":"2026-01-31T22:35:46.547381Z","issue_id":"cic-80f","payload":{"new_id":"cic-bbd.3"}}
202
+ {"type":"dep_add","timestamp":"2026-01-31T22:35:46.547389Z","issue_id":"cic-bbd.3","payload":{"dep_type":"parent-child","depends_on":"cic-bbd"}}
203
+ {"type":"rename","timestamp":"2026-01-31T22:35:46.776865Z","issue_id":"cic-22e","payload":{"new_id":"cic-bbd.4"}}
204
+ {"type":"dep_add","timestamp":"2026-01-31T22:35:46.776871Z","issue_id":"cic-bbd.4","payload":{"dep_type":"parent-child","depends_on":"cic-bbd"}}
205
+ {"type":"rename","timestamp":"2026-01-31T22:35:47.05952Z","issue_id":"cic-71d","payload":{"new_id":"cic-bbd.5"}}
206
+ {"type":"dep_add","timestamp":"2026-01-31T22:35:47.059527Z","issue_id":"cic-bbd.5","payload":{"dep_type":"parent-child","depends_on":"cic-bbd"}}
207
+ {"type":"rename","timestamp":"2026-01-31T22:35:47.216144Z","issue_id":"cic-c8e","payload":{"new_id":"cic-bbd.6"}}
208
+ {"type":"dep_add","timestamp":"2026-01-31T22:35:47.216151Z","issue_id":"cic-bbd.6","payload":{"dep_type":"parent-child","depends_on":"cic-bbd"}}
209
+ {"type":"dep_add","timestamp":"2026-01-31T22:35:52.361655Z","issue_id":"cic-bbd.2","payload":{"dep_type":"blocks","depends_on":"cic-bbd.1"}}
210
+ {"type":"dep_add","timestamp":"2026-01-31T22:35:52.436176Z","issue_id":"cic-bbd.3","payload":{"dep_type":"blocks","depends_on":"cic-bbd.2"}}
211
+ {"type":"dep_add","timestamp":"2026-01-31T22:35:52.519314Z","issue_id":"cic-bbd.5","payload":{"dep_type":"blocks","depends_on":"cic-bbd.1"}}
212
+ {"type":"dep_add","timestamp":"2026-01-31T22:35:52.717406Z","issue_id":"cic-bbd.6","payload":{"dep_type":"blocks","depends_on":"cic-bbd.1"}}
213
+ {"type":"dep_add","timestamp":"2026-01-31T22:35:52.91719Z","issue_id":"cic-bbd.6","payload":{"dep_type":"blocks","depends_on":"cic-bbd.2"}}
214
+ {"type":"dep_add","timestamp":"2026-01-31T22:35:53.301899Z","issue_id":"cic-bbd.6","payload":{"dep_type":"blocks","depends_on":"cic-bbd.3"}}
215
+ {"type":"dep_add","timestamp":"2026-01-31T22:35:53.507305Z","issue_id":"cic-bbd.6","payload":{"dep_type":"blocks","depends_on":"cic-bbd.4"}}
216
+ {"type":"status_update","timestamp":"2026-01-31T22:53:29.845364Z","issue_id":"cic-bbd.1","payload":{"status":"in_progress"}}
217
+ {"type":"status_update","timestamp":"2026-01-31T22:53:31.744371Z","issue_id":"cic-bbd.4","payload":{"status":"in_progress"}}
218
+ {"type":"close","timestamp":"2026-01-31T22:55:03.007686Z","issue_id":"cic-bbd.4","payload":{}}
219
+ {"type":"close","timestamp":"2026-01-31T22:55:09.910056Z","issue_id":"cic-bbd.1","payload":{}}
220
+ {"type":"status_update","timestamp":"2026-02-01T00:03:44.3476Z","issue_id":"cic-bbd.5","payload":{"status":"in_progress"}}
221
+ {"type":"status_update","timestamp":"2026-02-01T00:03:49.271441Z","issue_id":"cic-bbd.2","payload":{"status":"in_progress"}}
222
+ {"type":"close","timestamp":"2026-02-01T00:05:11.852731Z","issue_id":"cic-bbd.5","payload":{}}
223
+ {"type":"close","timestamp":"2026-02-01T00:06:14.749225Z","issue_id":"cic-bbd.2","payload":{}}
224
+ {"type":"status_update","timestamp":"2026-02-01T00:14:33.94628Z","issue_id":"cic-bbd.3","payload":{"status":"in_progress"}}
225
+ {"type":"close","timestamp":"2026-02-01T00:16:29.352825Z","issue_id":"cic-bbd.3","payload":{}}
226
+ {"type":"status_update","timestamp":"2026-02-01T01:37:20.777219Z","issue_id":"cic-bbd.6","payload":{"status":"in_progress"}}
227
+ {"type":"close","timestamp":"2026-02-01T01:40:51.835629Z","issue_id":"cic-bbd.6","payload":{}}
228
+ {"type":"create","timestamp":"2026-02-02T23:29:52.330658Z","issue_id":"cic-ebd","payload":{"description":"tmux_session_name_for_project() hashes the full project_path. Worktree paths produce unique hashes, creating one tmux session per worker instead of one per project. Fix: resolve worktree paths to main repo before hashing.","priority":"1","title":"tmux: worktree paths create separate sessions instead of sharing per-project session","type":"bug"}}
229
+ {"type":"status_update","timestamp":"2026-02-02T23:30:32.877924Z","issue_id":"cic-ebd","payload":{"status":"in_progress"}}
230
+ {"type":"close","timestamp":"2026-02-02T23:33:33.785116Z","issue_id":"cic-ebd","payload":{}}
@@ -7,6 +7,11 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
7
7
 
8
8
  ## [Unreleased]
9
9
 
10
+ ## [0.8.2] - 2026-02-02
11
+
12
+ ### Fixed
13
+ - **tmux session naming**: Worktree paths no longer create separate tmux sessions per worker. Removed hash from session names — format is now `claude-team-{slug}` (e.g. `claude-team-pagedrop-infra`). All workers for the same project share one tmux session with separate windows.
14
+
10
15
  ## [0.8.0] - 2026-01-30
11
16
 
12
17
  ### Added
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: claude-team-mcp
3
- Version: 0.8.0
3
+ Version: 0.8.2
4
4
  Summary: MCP server for managing multiple Claude Code sessions via iTerm2
5
5
  Project-URL: Homepage, https://github.com/Martian-Engineering/claude-team
6
6
  Project-URL: Repository, https://github.com/Martian-Engineering/claude-team
@@ -0,0 +1,190 @@
1
+ # Unified Worker State: list_workers recovery + worker_events API
2
+
3
+ Status: Proposed
4
+ Date: 2026-01-31
5
+ Issue: cic-bbd
6
+
7
+ ## Context
8
+
9
+ Today we have two sources of worker state:
10
+
11
+ - `SessionRegistry` (in-memory, cleared on restart) drives `list_workers`.
12
+ - `events.jsonl` (persistent) stores snapshots + transitions from `WorkerPoller` in
13
+ `src/claude_team/poller.py`, with helpers in `src/claude_team/events.py`.
14
+
15
+ After restart, `list_workers` returns empty even though workers may still exist.
16
+ External consumers resort to parsing `events.jsonl` because MCP exposes no event API.
17
+
18
+ ## Goals
19
+
20
+ - `list_workers` should return a useful view after restart by recovering from the
21
+ latest persisted events.
22
+ - Expose event log data via an MCP tool (`worker_events`) with a stable response
23
+ schema for consumers.
24
+ - Keep changes additive and avoid breaking existing client expectations.
25
+
26
+ ## Non-Goals
27
+
28
+ - Perfect real-time accuracy after restart (terminal liveness still requires
29
+ backend adoption).
30
+ - Changing polling cadence or event log format.
31
+ - Backfilling old historical events beyond what exists in `events.jsonl`.
32
+
33
+ ## Part 1: list_workers recovery API surface
34
+
35
+ ### Proposed recovery entry point
36
+
37
+ Add a registry-level recovery API that merges the event log into the registry
38
+ state without overwriting live sessions.
39
+
40
+ Suggested API shape (names illustrative):
41
+
42
+ - `SessionRegistry.recover_from_events(snapshot: dict | None, events: list[WorkerEvent]) -> RecoveryReport`
43
+ - **Input:**
44
+ - `snapshot`: output of `get_latest_snapshot()` (may be `None`).
45
+ - `events`: `read_events_since(snapshot_ts)` (may be empty).
46
+ - **Behavior:**
47
+ - If a session already exists in the registry, do not override it.
48
+ - If a session is only in the event log, create a lightweight recovered entry.
49
+ - If a session is closed by events, mark it closed in recovered state.
50
+ - **Output:**
51
+ - `RecoveryReport` with counts (added, updated, ignored) and timestamp used.
52
+
53
+ ### Recovered session representation
54
+
55
+ Recovered entries should be distinguishable and safe for read-only usage.
56
+
57
+ Proposed interface (implementation can vary):
58
+
59
+ - A new lightweight `RecoveredSession` object that implements:
60
+ - `session_id`, `name`, `project_path`, `terminal_id`, `agent_type` (from snapshot)
61
+ - `status` mapped from event state (see mapping below)
62
+ - `last_activity` / `created_at` from snapshot when available
63
+ - `to_dict()` for MCP output
64
+ - `is_idle()` returns `None` or uses snapshot state only (never touches JSONL)
65
+ - `SessionRegistry.list_all()` returns a merged list of:
66
+ - live `ManagedSession` objects, plus
67
+ - recovered entries not present in the registry
68
+
69
+ ### State mapping
70
+
71
+ Event log snapshots record:
72
+
73
+ - `state`: `"idle"` or `"active"` (from `detect_worker_idle`)
74
+ - `status`: `"spawning" | "ready" | "busy"` (from `ManagedSession.to_dict()`)
75
+
76
+ Recommended mapping rules:
77
+
78
+ - Prefer snapshot `state` for consistency across restarts.
79
+ - Map `state` -> `SessionStatus` for output:
80
+ - `idle` -> `ready`
81
+ - `active` -> `busy`
82
+ - `closed` -> (new virtual state or keep `busy` + `state="closed"`)
83
+
84
+ To preserve backwards compatibility, keep the existing `status` field but add
85
+ new fields so clients can detect recovery state explicitly:
86
+
87
+ - `source`: `"registry" | "event_log"`
88
+ - `event_state`: `"idle" | "active" | "closed"` (when recovered)
89
+ - `recovered_at`: ISO timestamp when recovery occurred
90
+ - `last_event_ts`: ISO timestamp of the last applied event
91
+
92
+ ### Recovery timing
93
+
94
+ Two compatible entry points:
95
+
96
+ 1. **Eager (startup):** in server boot, call recovery once and seed the registry.
97
+ 2. **Lazy (first list):** in `list_workers`, if registry is empty, perform recovery
98
+ then return merged output.
99
+
100
+ Recommendation: **eager** recovery at startup for predictable behavior, plus a
101
+ lazy fallback in `list_workers` for safety if startup recovery fails.
102
+
103
+ ### Tradeoffs (list_workers recovery)
104
+
105
+ - **Pros:** `list_workers` no longer empty after restart; preserves metadata and
106
+ session IDs for monitoring tools.
107
+ - **Cons:** recovered entries may be stale; terminal handles are missing, so
108
+ control actions (send/close) still require adoption.
109
+ - **Risk mitigation:** mark `source=event_log` and include `last_event_ts` to
110
+ communicate staleness to clients.
111
+
112
+ ## Part 2: worker_events MCP tool API surface
113
+
114
+ ### Proposed tool signature
115
+
116
+ Tool name: `worker_events`
117
+
118
+ Parameters:
119
+
120
+ - `since` (string | null): ISO 8601 timestamp; returns events at or after this
121
+ time. If omitted, returns most recent events (bounded by `limit`).
122
+ - `limit` (int, default 1000): maximum number of events returned.
123
+ - `include_snapshot` (bool, default false): if true, include the latest snapshot
124
+ event (even if it predates `since`) in the response.
125
+ - `include_summary` (bool, default false): include summary aggregates.
126
+ - `stale_threshold_minutes` (int, default 10): used only when
127
+ `include_summary=true` to classify “stuck” workers.
128
+
129
+ ### Proposed response shape
130
+
131
+ ```
132
+ {
133
+ "events": [
134
+ {"ts": "...", "type": "snapshot|worker_started|worker_idle|worker_active|worker_closed",
135
+ "worker_id": "...", "data": { ... }}
136
+ ],
137
+ "count": 123,
138
+ "summary": {
139
+ "started": ["id1", "id2"],
140
+ "closed": ["id3"],
141
+ "idle": ["id4"],
142
+ "active": ["id5"],
143
+ "stuck": ["id6"],
144
+ "last_event_ts": "..."
145
+ },
146
+ "snapshot": {
147
+ "ts": "...",
148
+ "data": {"count": 2, "workers": [ ... ]}
149
+ }
150
+ }
151
+ ```
152
+
153
+ ### Summary semantics
154
+
155
+ - **started/closed/idle/active** lists come from the returned event window.
156
+ - **stuck** is derived from the latest known state (snapshot + events) where:
157
+ - worker is `active`, and
158
+ - last activity is older than `stale_threshold_minutes`.
159
+ - **last_event_ts** is the newest event timestamp in the response.
160
+
161
+ This aligns with the intent of the former `poll_worker_changes` output while
162
+ exposing the raw events for richer client-side handling.
163
+
164
+ ### Tradeoffs (worker_events)
165
+
166
+ - **Pros:** simple API around existing persistence; consumers can poll with a
167
+ timestamp cursor instead of parsing JSONL.
168
+ - **Cons:** no stable event IDs; clients should track the last timestamp and may
169
+ receive duplicates if multiple events share the same timestamp.
170
+ - **Mitigation:** include `last_event_ts` and recommend clients request
171
+ `since=last_event_ts` and de-duplicate by `(ts, type, worker_id)`.
172
+
173
+ ## Open Questions
174
+
175
+ - Do we want a new explicit `SessionStatus.CLOSED` for recovered entries, or is
176
+ `status` plus `event_state="closed"` sufficient?
177
+ - Should recovery include an opt-in `include_closed` flag to hide sessions that
178
+ have closed since the last snapshot?
179
+ - Should `worker_events` support an optional `project_filter` (parity with
180
+ `list_workers`)?
181
+
182
+ ## Recommendation
183
+
184
+ Implement recovery as an additive merge from `events.get_latest_snapshot()` plus
185
+ `events.read_events_since(snapshot_ts)`, surfaced via a registry recovery helper
186
+ and a new `RecoveredSession` type. Add explicit `source` and `event_state` fields
187
+ in `list_workers` output to communicate provenance and staleness.
188
+
189
+ Expose a new `worker_events` MCP tool with a minimal `since/limit` API and an
190
+ optional summary section for consumers that want quick status deltas.
@@ -1,6 +1,6 @@
1
1
  [project]
2
2
  name = "claude-team-mcp"
3
- version = "0.8.0"
3
+ version = "0.8.2"
4
4
  description = "MCP server for managing multiple Claude Code sessions via iTerm2"
5
5
  readme = "README.md"
6
6
  requires-python = ">=3.11"
@@ -7,7 +7,6 @@ Provides a TerminalBackend implementation backed by tmux CLI commands.
7
7
  from __future__ import annotations
8
8
 
9
9
  import asyncio
10
- import hashlib
11
10
  import re
12
11
  import subprocess
13
12
  import uuid
@@ -46,7 +45,6 @@ ISSUE_ID_PATTERN = re.compile(r"\b[A-Za-z][A-Za-z0-9]*-[A-Za-z0-9]*\d[A-Za-z0-9]
46
45
  SHELL_READY_MARKER = "CLAUDE_TEAM_READY_7f3a9c"
47
46
  CODEX_PRE_ENTER_DELAY = 0.5
48
47
  TMUX_SESSION_PREFIX = "claude-team"
49
- TMUX_SESSION_HASH_LEN = 8
50
48
  TMUX_SESSION_SLUG_MAX = 32
51
49
  TMUX_SESSION_FALLBACK = "project"
52
50
  TMUX_SESSION_PREFIXED = f"{TMUX_SESSION_PREFIX}-"
@@ -92,15 +90,15 @@ def project_name_from_path(project_path: str | None) -> str | None:
92
90
 
93
91
 
94
92
  def tmux_session_name_for_project(project_path: str | None) -> str:
95
- """Return the per-project tmux session name for a given project path."""
93
+ """Return the per-project tmux session name for a given project path.
94
+
95
+ Worktree paths produce the same session name as their main repository
96
+ since project_name_from_path extracts the project name from the path.
97
+ Session names follow the format: claude-team-{project-slug}
98
+ """
96
99
  project_name = project_name_from_path(project_path) or TMUX_SESSION_FALLBACK
97
100
  slug = _tmux_safe_slug(project_name)
98
- if project_path:
99
- digest_source = project_path
100
- else:
101
- digest_source = uuid.uuid4().hex
102
- digest = hashlib.sha1(digest_source.encode("utf-8")).hexdigest()[:TMUX_SESSION_HASH_LEN]
103
- return f"{TMUX_SESSION_PREFIXED}{slug}-{digest}"
101
+ return f"{TMUX_SESSION_PREFIXED}{slug}"
104
102
 
105
103
 
106
104
  # Determine whether a tmux session is managed by claude-team.
@@ -333,7 +333,7 @@ def create_local_worktree(
333
333
  if bead_id:
334
334
  # Bead-based naming: {bead_id}-{annotation}
335
335
  if annotation:
336
- dir_name = f"{bead_id}-{slugify(annotation)}"
336
+ dir_name = f"{bead_id}-{short_slug(annotation)}"
337
337
  else:
338
338
  dir_name = bead_id
339
339
  else:
@@ -341,7 +341,7 @@ def create_local_worktree(
341
341
  short_uuid = uuid.uuid4().hex[:8]
342
342
  name_slug = slugify(worker_name)
343
343
  if annotation:
344
- dir_name = f"{name_slug}-{short_uuid}-{slugify(annotation)}"
344
+ dir_name = f"{name_slug}-{short_uuid}-{short_slug(annotation)}"
345
345
  else:
346
346
  dir_name = f"{name_slug}-{short_uuid}"
347
347
 
@@ -8,6 +8,9 @@ from claude_team_mcp.terminal_backends.base import TerminalSession
8
8
  from claude_team_mcp.terminal_backends.tmux import TmuxBackend, tmux_session_name_for_project
9
9
 
10
10
 
11
+ # subprocess is still needed for tests that mock tmux CalledProcessError
12
+
13
+
11
14
  @pytest.mark.asyncio
12
15
  async def test_send_text_uses_send_keys(monkeypatch):
13
16
  backend = TmuxBackend()
@@ -197,3 +200,27 @@ async def test_find_available_window_returns_none_when_full(monkeypatch):
197
200
  result = await backend.find_available_window(max_panes=2)
198
201
 
199
202
  assert result is None
203
+
204
+
205
+ def test_tmux_session_name_format():
206
+ """Test that session names follow the format claude-team-{slug}."""
207
+ session = tmux_session_name_for_project("/Users/test/my-project")
208
+ assert session == "claude-team-my-project"
209
+
210
+
211
+ def test_tmux_session_name_same_for_worktree_and_main():
212
+ """Test that worktree and main repo produce the same session name."""
213
+ worktree_path = "/Users/test/claude-team/.worktrees/feature-foo"
214
+ main_repo_path = "/Users/test/claude-team"
215
+
216
+ worktree_session = tmux_session_name_for_project(worktree_path)
217
+ main_session = tmux_session_name_for_project(main_repo_path)
218
+
219
+ assert worktree_session == main_session
220
+ assert worktree_session == "claude-team-claude-team"
221
+
222
+
223
+ def test_tmux_session_name_fallback_for_none():
224
+ """Test that None project path produces fallback session name."""
225
+ session = tmux_session_name_for_project(None)
226
+ assert session == "claude-team-project"
@@ -114,7 +114,7 @@ wheels = [
114
114
 
115
115
  [[package]]
116
116
  name = "claude-team-mcp"
117
- version = "0.7.0"
117
+ version = "0.8.1"
118
118
  source = { editable = "." }
119
119
  dependencies = [
120
120
  { name = "iterm2" },