claude-team-mcp 0.7.0__tar.gz → 0.8.2__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {claude_team_mcp-0.7.0 → claude_team_mcp-0.8.2}/.pebbles/events.jsonl +49 -0
- {claude_team_mcp-0.7.0 → claude_team_mcp-0.8.2}/.pebbles/pebbles.db +0 -0
- {claude_team_mcp-0.7.0 → claude_team_mcp-0.8.2}/CHANGELOG.md +24 -0
- {claude_team_mcp-0.7.0 → claude_team_mcp-0.8.2}/PKG-INFO +1 -1
- claude_team_mcp-0.8.2/docs/design/unified-worker-state.md +190 -0
- {claude_team_mcp-0.7.0 → claude_team_mcp-0.8.2}/pyproject.toml +1 -1
- {claude_team_mcp-0.7.0 → claude_team_mcp-0.8.2}/src/claude_team/events.py +30 -6
- {claude_team_mcp-0.7.0 → claude_team_mcp-0.8.2}/src/claude_team_mcp/cli_backends/__init__.py +4 -2
- {claude_team_mcp-0.7.0 → claude_team_mcp-0.8.2}/src/claude_team_mcp/cli_backends/claude.py +45 -5
- {claude_team_mcp-0.7.0 → claude_team_mcp-0.8.2}/src/claude_team_mcp/cli_backends/codex.py +44 -3
- claude_team_mcp-0.8.2/src/claude_team_mcp/config.py +350 -0
- claude_team_mcp-0.8.2/src/claude_team_mcp/config_cli.py +263 -0
- {claude_team_mcp-0.7.0 → claude_team_mcp-0.8.2}/src/claude_team_mcp/issue_tracker/__init__.py +68 -3
- {claude_team_mcp-0.7.0 → claude_team_mcp-0.8.2}/src/claude_team_mcp/server.py +69 -0
- claude_team_mcp-0.8.2/src/claude_team_mcp/terminal_backends/__init__.py +49 -0
- {claude_team_mcp-0.7.0 → claude_team_mcp-0.8.2}/src/claude_team_mcp/terminal_backends/tmux.py +59 -24
- {claude_team_mcp-0.7.0 → claude_team_mcp-0.8.2}/src/claude_team_mcp/tools/discover_workers.py +1 -1
- {claude_team_mcp-0.7.0 → claude_team_mcp-0.8.2}/src/claude_team_mcp/tools/spawn_workers.py +36 -14
- {claude_team_mcp-0.7.0 → claude_team_mcp-0.8.2}/src/claude_team_mcp/worktree.py +16 -2
- claude_team_mcp-0.8.2/tests/conftest.py +20 -0
- {claude_team_mcp-0.7.0 → claude_team_mcp-0.8.2}/tests/test_cli_backends.py +52 -0
- claude_team_mcp-0.8.2/tests/test_config.py +703 -0
- claude_team_mcp-0.8.2/tests/test_config_cli.py +104 -0
- {claude_team_mcp-0.7.0 → claude_team_mcp-0.8.2}/tests/test_events.py +55 -10
- claude_team_mcp-0.8.2/tests/test_issue_tracker.py +228 -0
- claude_team_mcp-0.8.2/tests/test_spawn_workers_defaults.py +230 -0
- claude_team_mcp-0.8.2/tests/test_terminal_backends.py +50 -0
- {claude_team_mcp-0.7.0 → claude_team_mcp-0.8.2}/tests/test_tmux_backend.py +71 -22
- claude_team_mcp-0.8.2/tests/test_worktree.py +29 -0
- {claude_team_mcp-0.7.0 → claude_team_mcp-0.8.2}/uv.lock +1 -1
- claude_team_mcp-0.7.0/src/claude_team_mcp/terminal_backends/__init__.py +0 -31
- claude_team_mcp-0.7.0/tests/test_issue_tracker.py +0 -103
- {claude_team_mcp-0.7.0 → claude_team_mcp-0.8.2}/.claude/settings.json +0 -0
- {claude_team_mcp-0.7.0 → claude_team_mcp-0.8.2}/.claude/settings.local.json +0 -0
- {claude_team_mcp-0.7.0 → claude_team_mcp-0.8.2}/.claude-plugin/marketplace.json +0 -0
- {claude_team_mcp-0.7.0 → claude_team_mcp-0.8.2}/.claude-plugin/plugin.json +0 -0
- {claude_team_mcp-0.7.0 → claude_team_mcp-0.8.2}/.gitattributes +0 -0
- {claude_team_mcp-0.7.0 → claude_team_mcp-0.8.2}/.gitignore +0 -0
- {claude_team_mcp-0.7.0 → claude_team_mcp-0.8.2}/.mcp.json +0 -0
- {claude_team_mcp-0.7.0 → claude_team_mcp-0.8.2}/.pebbles/.gitignore +0 -0
- {claude_team_mcp-0.7.0 → claude_team_mcp-0.8.2}/.pebbles/config.json +0 -0
- {claude_team_mcp-0.7.0 → claude_team_mcp-0.8.2}/AGENTS.md +0 -0
- {claude_team_mcp-0.7.0 → claude_team_mcp-0.8.2}/CLAUDE.md +0 -0
- {claude_team_mcp-0.7.0 → claude_team_mcp-0.8.2}/HAPPY_INTEGRATION_RESEARCH.md +0 -0
- {claude_team_mcp-0.7.0 → claude_team_mcp-0.8.2}/Makefile +0 -0
- {claude_team_mcp-0.7.0 → claude_team_mcp-0.8.2}/NOTES.md +0 -0
- {claude_team_mcp-0.7.0 → claude_team_mcp-0.8.2}/README.md +0 -0
- {claude_team_mcp-0.7.0 → claude_team_mcp-0.8.2}/commands/check-workers.md +0 -0
- {claude_team_mcp-0.7.0 → claude_team_mcp-0.8.2}/commands/cleanup-worktrees.md +0 -0
- {claude_team_mcp-0.7.0 → claude_team_mcp-0.8.2}/commands/merge-worker.md +0 -0
- {claude_team_mcp-0.7.0 → claude_team_mcp-0.8.2}/commands/pr-worker.md +0 -0
- {claude_team_mcp-0.7.0 → claude_team_mcp-0.8.2}/commands/spawn-workers.md +0 -0
- {claude_team_mcp-0.7.0 → claude_team_mcp-0.8.2}/commands/team-summary.md +0 -0
- {claude_team_mcp-0.7.0 → claude_team_mcp-0.8.2}/config/mcporter.json +0 -0
- {claude_team_mcp-0.7.0 → claude_team_mcp-0.8.2}/docs/ISSUE_TRACKER_ABSTRACTION.md +0 -0
- {claude_team_mcp-0.7.0 → claude_team_mcp-0.8.2}/scripts/install-commands.py +0 -0
- {claude_team_mcp-0.7.0 → claude_team_mcp-0.8.2}/scripts/team-status.sh +0 -0
- {claude_team_mcp-0.7.0 → claude_team_mcp-0.8.2}/settings.json +0 -0
- {claude_team_mcp-0.7.0 → claude_team_mcp-0.8.2}/src/claude_team/__init__.py +0 -0
- {claude_team_mcp-0.7.0 → claude_team_mcp-0.8.2}/src/claude_team/idle_detection.py +0 -0
- {claude_team_mcp-0.7.0 → claude_team_mcp-0.8.2}/src/claude_team/poller.py +0 -0
- {claude_team_mcp-0.7.0 → claude_team_mcp-0.8.2}/src/claude_team_mcp/__init__.py +0 -0
- {claude_team_mcp-0.7.0 → claude_team_mcp-0.8.2}/src/claude_team_mcp/__main__.py +0 -0
- {claude_team_mcp-0.7.0 → claude_team_mcp-0.8.2}/src/claude_team_mcp/cli_backends/base.py +0 -0
- {claude_team_mcp-0.7.0 → claude_team_mcp-0.8.2}/src/claude_team_mcp/colors.py +0 -0
- {claude_team_mcp-0.7.0 → claude_team_mcp-0.8.2}/src/claude_team_mcp/formatting.py +0 -0
- {claude_team_mcp-0.7.0 → claude_team_mcp-0.8.2}/src/claude_team_mcp/idle_detection.py +0 -0
- {claude_team_mcp-0.7.0 → claude_team_mcp-0.8.2}/src/claude_team_mcp/iterm_utils.py +0 -0
- {claude_team_mcp-0.7.0 → claude_team_mcp-0.8.2}/src/claude_team_mcp/names.py +0 -0
- {claude_team_mcp-0.7.0 → claude_team_mcp-0.8.2}/src/claude_team_mcp/profile.py +0 -0
- {claude_team_mcp-0.7.0 → claude_team_mcp-0.8.2}/src/claude_team_mcp/registry.py +0 -0
- {claude_team_mcp-0.7.0 → claude_team_mcp-0.8.2}/src/claude_team_mcp/schemas/__init__.py +0 -0
- {claude_team_mcp-0.7.0 → claude_team_mcp-0.8.2}/src/claude_team_mcp/schemas/codex.py +0 -0
- {claude_team_mcp-0.7.0 → claude_team_mcp-0.8.2}/src/claude_team_mcp/session_state.py +0 -0
- {claude_team_mcp-0.7.0 → claude_team_mcp-0.8.2}/src/claude_team_mcp/subprocess_cache.py +0 -0
- {claude_team_mcp-0.7.0 → claude_team_mcp-0.8.2}/src/claude_team_mcp/terminal_backends/base.py +0 -0
- {claude_team_mcp-0.7.0 → claude_team_mcp-0.8.2}/src/claude_team_mcp/terminal_backends/iterm.py +0 -0
- {claude_team_mcp-0.7.0 → claude_team_mcp-0.8.2}/src/claude_team_mcp/tools/__init__.py +0 -0
- {claude_team_mcp-0.7.0 → claude_team_mcp-0.8.2}/src/claude_team_mcp/tools/adopt_worker.py +0 -0
- {claude_team_mcp-0.7.0 → claude_team_mcp-0.8.2}/src/claude_team_mcp/tools/annotate_worker.py +0 -0
- {claude_team_mcp-0.7.0 → claude_team_mcp-0.8.2}/src/claude_team_mcp/tools/check_idle_workers.py +0 -0
- {claude_team_mcp-0.7.0 → claude_team_mcp-0.8.2}/src/claude_team_mcp/tools/close_workers.py +0 -0
- {claude_team_mcp-0.7.0 → claude_team_mcp-0.8.2}/src/claude_team_mcp/tools/examine_worker.py +0 -0
- {claude_team_mcp-0.7.0 → claude_team_mcp-0.8.2}/src/claude_team_mcp/tools/issue_tracker_help.py +0 -0
- {claude_team_mcp-0.7.0 → claude_team_mcp-0.8.2}/src/claude_team_mcp/tools/list_workers.py +0 -0
- {claude_team_mcp-0.7.0 → claude_team_mcp-0.8.2}/src/claude_team_mcp/tools/list_worktrees.py +0 -0
- {claude_team_mcp-0.7.0 → claude_team_mcp-0.8.2}/src/claude_team_mcp/tools/message_workers.py +0 -0
- {claude_team_mcp-0.7.0 → claude_team_mcp-0.8.2}/src/claude_team_mcp/tools/poll_worker_changes.py +0 -0
- {claude_team_mcp-0.7.0 → claude_team_mcp-0.8.2}/src/claude_team_mcp/tools/read_worker_logs.py +0 -0
- {claude_team_mcp-0.7.0 → claude_team_mcp-0.8.2}/src/claude_team_mcp/tools/wait_idle_workers.py +0 -0
- {claude_team_mcp-0.7.0 → claude_team_mcp-0.8.2}/src/claude_team_mcp/utils/__init__.py +0 -0
- {claude_team_mcp-0.7.0 → claude_team_mcp-0.8.2}/src/claude_team_mcp/utils/constants.py +0 -0
- {claude_team_mcp-0.7.0 → claude_team_mcp-0.8.2}/src/claude_team_mcp/utils/errors.py +0 -0
- {claude_team_mcp-0.7.0 → claude_team_mcp-0.8.2}/src/claude_team_mcp/utils/worktree_detection.py +0 -0
- {claude_team_mcp-0.7.0 → claude_team_mcp-0.8.2}/src/claude_team_mcp/worker_prompt.py +0 -0
- {claude_team_mcp-0.7.0 → claude_team_mcp-0.8.2}/tests/__init__.py +0 -0
- {claude_team_mcp-0.7.0 → claude_team_mcp-0.8.2}/tests/test_codex_schema.py +0 -0
- {claude_team_mcp-0.7.0 → claude_team_mcp-0.8.2}/tests/test_colors.py +0 -0
- {claude_team_mcp-0.7.0 → claude_team_mcp-0.8.2}/tests/test_formatting.py +0 -0
- {claude_team_mcp-0.7.0 → claude_team_mcp-0.8.2}/tests/test_idle_detection.py +0 -0
- {claude_team_mcp-0.7.0 → claude_team_mcp-0.8.2}/tests/test_idle_detection_module.py +0 -0
- {claude_team_mcp-0.7.0 → claude_team_mcp-0.8.2}/tests/test_issue_tracker_integration.py +0 -0
- {claude_team_mcp-0.7.0 → claude_team_mcp-0.8.2}/tests/test_iterm_utils.py +0 -0
- {claude_team_mcp-0.7.0 → claude_team_mcp-0.8.2}/tests/test_names.py +0 -0
- {claude_team_mcp-0.7.0 → claude_team_mcp-0.8.2}/tests/test_poller.py +0 -0
- {claude_team_mcp-0.7.0 → claude_team_mcp-0.8.2}/tests/test_registry.py +0 -0
- {claude_team_mcp-0.7.0 → claude_team_mcp-0.8.2}/tests/test_session_state.py +0 -0
- {claude_team_mcp-0.7.0 → claude_team_mcp-0.8.2}/tests/test_worker_prompt.py +0 -0
- {claude_team_mcp-0.7.0 → claude_team_mcp-0.8.2}/tests/test_worktree_detection.py +0 -0
|
@@ -179,3 +179,52 @@
|
|
|
179
179
|
{"type":"close","timestamp":"2026-01-28T23:37:13.690021Z","issue_id":"cic-ba6","payload":{}}
|
|
180
180
|
{"type":"comment","timestamp":"2026-01-28T23:37:28.697959Z","issue_id":"cic-2d8","payload":{"body":"Completed tmux window naming update: format now '\u003cname\u003e | \u003cproject\u003e [issue]'; project name derived from project_path with .worktrees handling; issue id pulled from bead or annotation. Updated tmux backend + spawn logic and added tests."}}
|
|
181
181
|
{"type":"close","timestamp":"2026-01-28T23:37:30.840687Z","issue_id":"cic-2d8","payload":{}}
|
|
182
|
+
{"type":"create","timestamp":"2026-01-30T17:41:16.111959Z","issue_id":"cic-345","payload":{"description":"Currently the tmux backend uses a single global 'claude-team' session for all workers. Instead, create a separate tmux session per project path. This makes it easier to monitor workers by project when using tmux locally.\n\nScope: investigate current session naming, identify all touch points, propose naming scheme, assess impact on discovery/adoption/cleanup.","priority":"2","title":"Per-project tmux sessions instead of global claude-team session","type":"task"}}
|
|
183
|
+
{"type":"status_update","timestamp":"2026-01-30T17:45:04.28959Z","issue_id":"cic-345","payload":{"status":"in_progress"}}
|
|
184
|
+
{"type":"close","timestamp":"2026-01-30T17:52:20.629185Z","issue_id":"cic-345","payload":{}}
|
|
185
|
+
{"type":"create","timestamp":"2026-01-31T16:47:58.166575Z","issue_id":"cic-bbd","payload":{"description":"## Problem\n\nTwo disconnected views of worker state:\n\n1. **list_workers (MCP)** — in-memory SessionRegistry, wiped on restart. No persistence.\n2. **Event log (events.jsonl)** — persistent lifecycle events, but no MCP query API. External consumers resort to shell scripts to parse it.\n\nAfter restart, list_workers returns empty even if workers are alive. Event log has history but no programmatic access.\n\n## Proposed Solution (Option C)\n\n### Part 1: Enhance list_workers with event log recovery\n- On startup, reconstruct known worker state from event log snapshots\n- list_workers becomes single source of truth for current state\n\n### Part 2: New worker_events MCP tool\n- Wraps read_events_since() as MCP tool\n- Returns state transitions, completions, stuck workers\n- Supersedes closed cic-467 (poll_worker_changes)\n\n## Context\n- events.py has read_events_since(), get_latest_snapshot() — not wired to MCP\n- Registry has tmux adoption but doesn't use event log for recovery\n- Monitoring agents currently shell out to parse events.jsonl directly","priority":"1","title":"Unified worker state API — merge registry + event log","type":"feature"}}
|
|
186
|
+
{"type":"status_update","timestamp":"2026-01-31T16:48:32.350157Z","issue_id":"cic-bbd","payload":{"status":"in_progress"}}
|
|
187
|
+
{"type":"close","timestamp":"2026-01-31T17:00:14.556752Z","issue_id":"cic-bbd","payload":{}}
|
|
188
|
+
{"type":"create","timestamp":"2026-01-31T17:00:32.968359Z","issue_id":"cic-68b","payload":{"description":"## Problem\n\nWhen Codex has a pending update, it shows an interactive TUI prompt (Update now / Skip / Skip until next version) that blocks the session from becoming interactive. The claude-team spawn logic times out at 30s waiting for Codex to be ready, but the prompt is sitting there waiting for user input.\n\nThis is a silent failure — the server logs show a timeout but there's no indication it's an update prompt specifically.\n\n## Immediate Workaround\n\nSet `check_for_update_on_startup = false` in `~/.codex/config.toml`. Done for Josh's machine.\n\n## Proposed Solution for claude-team\n\n### Option A: Detect and auto-dismiss (recommended)\nAfter spawning Codex in tmux, scrape the pane for update prompt indicators (e.g. 'Update now', 'Skip until next version') and send key '2' (Skip) automatically. This keeps the user's codex up-to-date awareness while not blocking automation.\n\n### Option B: Set env/config to suppress\nHave claude-team set an env var or write a temporary config overlay that disables the update check for spawned workers. Codex supports `check_for_update_on_startup = false` in config.toml.\n\n### Option C: Document and warn\nAdd to spawn_workers output a warning when Codex times out, suggesting the config flag.\n\n## Context\n- Codex source: `codex-rs/tui/src/update_prompt.rs` — uses `get_upgrade_version_for_popup(config)`\n- Config flag: `check_for_update_on_startup` (defaults true)\n- Dismiss mechanism: option 3 persists to `~/.codex/version.json` dismissed_version field\n- The prompt accepts key inputs: 1=Update, 2=Skip, 3=Skip until next version, Esc=Skip","priority":"1","title":"Handle Codex update prompt blocking worker spawn","type":"bug"}}
|
|
189
|
+
{"type":"status_update","timestamp":"2026-01-31T22:34:51.762832Z","issue_id":"cic-bbd","payload":{"status":"open"}}
|
|
190
|
+
{"type":"update","timestamp":"2026-01-31T22:34:51.762832Z","issue_id":"cic-bbd","payload":{"type":"epic"}}
|
|
191
|
+
{"type":"create","timestamp":"2026-01-31T22:35:00.158594Z","issue_id":"cic-fd1","payload":{"description":"Create a lightweight RecoveredSession type in registry.py that represents sessions restored from the event log.\n\n## Requirements\n- Fields: session_id, name, project_path, terminal_id, agent_type, status, last_activity, created_at\n- Implement to_dict() matching ManagedSession output format\n- Add source field: 'event_log' (vs 'registry' for live sessions)\n- Add event_state field: 'idle' | 'active' | 'closed'\n- Add recovered_at and last_event_ts timestamps\n- is_idle() should return based on snapshot state only (no JSONL access)\n\n## State Mapping\n- event state 'idle' -\u003e SessionStatus.READY\n- event state 'active' -\u003e SessionStatus.BUSY\n- event state 'closed' -\u003e new or virtual status\n\n## Files\n- src/claude_team_mcp/registry.py\n\n## Design Doc\ndocs/design/unified-worker-state.md — 'Recovered session representation' section","priority":"1","title":"RecoveredSession dataclass","type":"task"}}
|
|
192
|
+
{"type":"create","timestamp":"2026-01-31T22:35:07.170878Z","issue_id":"cic-169","payload":{"description":"Add a recovery method to SessionRegistry that merges event log state into the registry without overwriting live sessions.\n\n## Signature\nrecover_from_events(snapshot: dict | None, events: list[WorkerEvent]) -\u003e RecoveryReport\n\n## Behavior\n- Input: output of get_latest_snapshot() + read_events_since(snapshot_ts)\n- If session already exists in registry, skip (don't override live state)\n- If session only in event log, create RecoveredSession entry\n- If session closed by events, mark closed in recovered state\n- Return RecoveryReport with counts (added, skipped, closed)\n\n## Integration\n- list_all() should return merged list of ManagedSession + RecoveredSession\n- list_by_status() should include recovered entries\n\n## Dependencies\n- Depends on: cic-fd1 (RecoveredSession dataclass)\n\n## Files\n- src/claude_team_mcp/registry.py\n\n## Design Doc\ndocs/design/unified-worker-state.md — 'Proposed recovery entry point' section","priority":"1","title":"SessionRegistry.recover_from_events() method","type":"task"}}
|
|
193
|
+
{"type":"create","timestamp":"2026-01-31T22:35:13.527812Z","issue_id":"cic-80f","payload":{"description":"Wire recover_from_events() into server startup so list_workers returns useful data after restart.\n\n## Approach\n- Eager recovery at startup: call get_latest_snapshot() + read_events_since() and feed into registry.recover_from_events()\n- Add lazy fallback in list_workers: if registry is empty on first call, attempt recovery\n- Log recovery results (added/skipped/closed counts)\n\n## Integration Point\n- Server lifespan in src/claude_team_mcp/server.py (AppContext setup)\n\n## Dependencies\n- Depends on: cic-169 (recover_from_events method)\n\n## Files\n- src/claude_team_mcp/server.py\n\n## Design Doc\ndocs/design/unified-worker-state.md — 'Recovery timing' section","priority":"1","title":"Startup recovery — seed registry from event log on boot","type":"task"}}
|
|
194
|
+
{"type":"create","timestamp":"2026-01-31T22:35:20.470243Z","issue_id":"cic-22e","payload":{"description":"New MCP tool exposing event log queries. Supersedes closed cic-467.\n\n## Tool Signature\nworker_events(\n since: str | None = None,\n limit: int = 1000,\n include_snapshot: bool = False,\n include_summary: bool = False,\n stale_threshold_minutes: int = 10,\n project_filter: str | None = None,\n) -\u003e dict\n\n## Response Shape\n{\n events: [{ts, type, worker_id, data}],\n count: N,\n summary: {started, closed, idle, active, stuck, last_event_ts}, // if include_summary\n snapshot: {ts, data} // if include_snapshot\n}\n\n## Summary Semantics\n- started/closed/idle/active: lists from event window\n- stuck: active workers with last_activity \u003e stale_threshold_minutes\n- last_event_ts: newest event timestamp\n\n## Implementation\n- Wraps events.read_events_since() and events.get_latest_snapshot()\n- New file: src/claude_team_mcp/tools/worker_events.py\n- Register in src/claude_team_mcp/tools/__init__.py\n\n## Design Doc\ndocs/design/unified-worker-state.md — 'Part 2' section","priority":"1","title":"worker_events MCP tool","type":"task"}}
|
|
195
|
+
{"type":"create","timestamp":"2026-01-31T22:35:26.908995Z","issue_id":"cic-71d","payload":{"description":"Extend list_workers output with provenance fields so clients can distinguish live vs recovered sessions.\n\n## New Fields in Worker Output\n- source: 'registry' | 'event_log'\n- event_state: 'idle' | 'active' | 'closed' (only for recovered entries)\n- recovered_at: ISO timestamp (only for recovered entries)\n- last_event_ts: ISO timestamp of last applied event (only for recovered entries)\n\n## Backward Compatibility\n- Existing fields (status, session_id, name, etc.) unchanged\n- New fields are additive only\n- Live sessions get source='registry', no event_state/recovered_at fields\n\n## Dependencies\n- Depends on: cic-fd1 (RecoveredSession with these fields)\n\n## Files\n- src/claude_team_mcp/tools/list_workers.py","priority":"2","title":"list_workers: add source and event_state fields","type":"task"}}
|
|
196
|
+
{"type":"create","timestamp":"2026-01-31T22:35:34.422147Z","issue_id":"cic-c8e","payload":{"description":"Comprehensive tests for the unified worker state feature.\n\n## Test Cases\n\n### RecoveredSession\n- Construction from snapshot data\n- to_dict() output matches expected format\n- State mapping (idle-\u003eready, active-\u003ebusy, closed)\n- is_idle() returns snapshot-based state\n\n### recover_from_events()\n- Recovery from snapshot only (no events)\n- Recovery from snapshot + events\n- Live sessions not overwritten\n- Closed sessions marked correctly\n- Empty snapshot/events returns empty recovery\n- RecoveryReport counts are accurate\n\n### Startup recovery\n- Server startup seeds registry from event log\n- Lazy fallback triggers on empty registry\n\n### worker_events tool\n- Basic since/limit filtering\n- include_snapshot flag\n- include_summary with stuck detection\n- project_filter\n- Empty event log returns empty result\n\n## Dependencies\n- Depends on: cic-fd1, cic-169, cic-80f, cic-22e","priority":"1","title":"Tests for recovery + worker_events","type":"task"}}
|
|
197
|
+
{"type":"rename","timestamp":"2026-01-31T22:35:46.410975Z","issue_id":"cic-fd1","payload":{"new_id":"cic-bbd.1"}}
|
|
198
|
+
{"type":"dep_add","timestamp":"2026-01-31T22:35:46.410983Z","issue_id":"cic-bbd.1","payload":{"dep_type":"parent-child","depends_on":"cic-bbd"}}
|
|
199
|
+
{"type":"rename","timestamp":"2026-01-31T22:35:46.475283Z","issue_id":"cic-169","payload":{"new_id":"cic-bbd.2"}}
|
|
200
|
+
{"type":"dep_add","timestamp":"2026-01-31T22:35:46.47529Z","issue_id":"cic-bbd.2","payload":{"dep_type":"parent-child","depends_on":"cic-bbd"}}
|
|
201
|
+
{"type":"rename","timestamp":"2026-01-31T22:35:46.547381Z","issue_id":"cic-80f","payload":{"new_id":"cic-bbd.3"}}
|
|
202
|
+
{"type":"dep_add","timestamp":"2026-01-31T22:35:46.547389Z","issue_id":"cic-bbd.3","payload":{"dep_type":"parent-child","depends_on":"cic-bbd"}}
|
|
203
|
+
{"type":"rename","timestamp":"2026-01-31T22:35:46.776865Z","issue_id":"cic-22e","payload":{"new_id":"cic-bbd.4"}}
|
|
204
|
+
{"type":"dep_add","timestamp":"2026-01-31T22:35:46.776871Z","issue_id":"cic-bbd.4","payload":{"dep_type":"parent-child","depends_on":"cic-bbd"}}
|
|
205
|
+
{"type":"rename","timestamp":"2026-01-31T22:35:47.05952Z","issue_id":"cic-71d","payload":{"new_id":"cic-bbd.5"}}
|
|
206
|
+
{"type":"dep_add","timestamp":"2026-01-31T22:35:47.059527Z","issue_id":"cic-bbd.5","payload":{"dep_type":"parent-child","depends_on":"cic-bbd"}}
|
|
207
|
+
{"type":"rename","timestamp":"2026-01-31T22:35:47.216144Z","issue_id":"cic-c8e","payload":{"new_id":"cic-bbd.6"}}
|
|
208
|
+
{"type":"dep_add","timestamp":"2026-01-31T22:35:47.216151Z","issue_id":"cic-bbd.6","payload":{"dep_type":"parent-child","depends_on":"cic-bbd"}}
|
|
209
|
+
{"type":"dep_add","timestamp":"2026-01-31T22:35:52.361655Z","issue_id":"cic-bbd.2","payload":{"dep_type":"blocks","depends_on":"cic-bbd.1"}}
|
|
210
|
+
{"type":"dep_add","timestamp":"2026-01-31T22:35:52.436176Z","issue_id":"cic-bbd.3","payload":{"dep_type":"blocks","depends_on":"cic-bbd.2"}}
|
|
211
|
+
{"type":"dep_add","timestamp":"2026-01-31T22:35:52.519314Z","issue_id":"cic-bbd.5","payload":{"dep_type":"blocks","depends_on":"cic-bbd.1"}}
|
|
212
|
+
{"type":"dep_add","timestamp":"2026-01-31T22:35:52.717406Z","issue_id":"cic-bbd.6","payload":{"dep_type":"blocks","depends_on":"cic-bbd.1"}}
|
|
213
|
+
{"type":"dep_add","timestamp":"2026-01-31T22:35:52.91719Z","issue_id":"cic-bbd.6","payload":{"dep_type":"blocks","depends_on":"cic-bbd.2"}}
|
|
214
|
+
{"type":"dep_add","timestamp":"2026-01-31T22:35:53.301899Z","issue_id":"cic-bbd.6","payload":{"dep_type":"blocks","depends_on":"cic-bbd.3"}}
|
|
215
|
+
{"type":"dep_add","timestamp":"2026-01-31T22:35:53.507305Z","issue_id":"cic-bbd.6","payload":{"dep_type":"blocks","depends_on":"cic-bbd.4"}}
|
|
216
|
+
{"type":"status_update","timestamp":"2026-01-31T22:53:29.845364Z","issue_id":"cic-bbd.1","payload":{"status":"in_progress"}}
|
|
217
|
+
{"type":"status_update","timestamp":"2026-01-31T22:53:31.744371Z","issue_id":"cic-bbd.4","payload":{"status":"in_progress"}}
|
|
218
|
+
{"type":"close","timestamp":"2026-01-31T22:55:03.007686Z","issue_id":"cic-bbd.4","payload":{}}
|
|
219
|
+
{"type":"close","timestamp":"2026-01-31T22:55:09.910056Z","issue_id":"cic-bbd.1","payload":{}}
|
|
220
|
+
{"type":"status_update","timestamp":"2026-02-01T00:03:44.3476Z","issue_id":"cic-bbd.5","payload":{"status":"in_progress"}}
|
|
221
|
+
{"type":"status_update","timestamp":"2026-02-01T00:03:49.271441Z","issue_id":"cic-bbd.2","payload":{"status":"in_progress"}}
|
|
222
|
+
{"type":"close","timestamp":"2026-02-01T00:05:11.852731Z","issue_id":"cic-bbd.5","payload":{}}
|
|
223
|
+
{"type":"close","timestamp":"2026-02-01T00:06:14.749225Z","issue_id":"cic-bbd.2","payload":{}}
|
|
224
|
+
{"type":"status_update","timestamp":"2026-02-01T00:14:33.94628Z","issue_id":"cic-bbd.3","payload":{"status":"in_progress"}}
|
|
225
|
+
{"type":"close","timestamp":"2026-02-01T00:16:29.352825Z","issue_id":"cic-bbd.3","payload":{}}
|
|
226
|
+
{"type":"status_update","timestamp":"2026-02-01T01:37:20.777219Z","issue_id":"cic-bbd.6","payload":{"status":"in_progress"}}
|
|
227
|
+
{"type":"close","timestamp":"2026-02-01T01:40:51.835629Z","issue_id":"cic-bbd.6","payload":{}}
|
|
228
|
+
{"type":"create","timestamp":"2026-02-02T23:29:52.330658Z","issue_id":"cic-ebd","payload":{"description":"tmux_session_name_for_project() hashes the full project_path. Worktree paths produce unique hashes, creating one tmux session per worker instead of one per project. Fix: resolve worktree paths to main repo before hashing.","priority":"1","title":"tmux: worktree paths create separate sessions instead of sharing per-project session","type":"bug"}}
|
|
229
|
+
{"type":"status_update","timestamp":"2026-02-02T23:30:32.877924Z","issue_id":"cic-ebd","payload":{"status":"in_progress"}}
|
|
230
|
+
{"type":"close","timestamp":"2026-02-02T23:33:33.785116Z","issue_id":"cic-ebd","payload":{}}
|
|
Binary file
|
|
@@ -7,6 +7,30 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
|
|
|
7
7
|
|
|
8
8
|
## [Unreleased]
|
|
9
9
|
|
|
10
|
+
## [0.8.2] - 2026-02-02
|
|
11
|
+
|
|
12
|
+
### Fixed
|
|
13
|
+
- **tmux session naming**: Worktree paths no longer create separate tmux sessions per worker. Removed hash from session names — format is now `claude-team-{slug}` (e.g. `claude-team-pagedrop-infra`). All workers for the same project share one tmux session with separate windows.
|
|
14
|
+
|
|
15
|
+
## [0.8.0] - 2026-01-30
|
|
16
|
+
|
|
17
|
+
### Added
|
|
18
|
+
- **System-wide config file** (`~/.claude-team/config.json`): Centralized configuration replacing environment variables
|
|
19
|
+
- Typed dataclasses with JSON validation
|
|
20
|
+
- Version field for future migrations
|
|
21
|
+
- Precedence: env var → config file → built-in default
|
|
22
|
+
- **Config CLI**: `claude-team config init|show|get|set` commands
|
|
23
|
+
- **Per-project tmux sessions**: Each project gets its own tmux session (`claude-team-<slug>-<hash>`) instead of a single shared session
|
|
24
|
+
- Easier local monitoring — `tmux ls` shows projects separately
|
|
25
|
+
- Discovery scans all tmux panes and filters by managed prefix
|
|
26
|
+
|
|
27
|
+
### Fixed
|
|
28
|
+
- Worktree branch/directory names capped at 30 chars to avoid filesystem limits
|
|
29
|
+
- Test isolation from user config file (tests no longer affected by `~/.claude-team/config.json`)
|
|
30
|
+
|
|
31
|
+
### Changed
|
|
32
|
+
- Tmux `list_sessions` and discovery now scan all sessions with prefix filter instead of targeting a single session
|
|
33
|
+
|
|
10
34
|
## [0.7.0] - 2026-01-29
|
|
11
35
|
|
|
12
36
|
### Added
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: claude-team-mcp
|
|
3
|
-
Version: 0.
|
|
3
|
+
Version: 0.8.2
|
|
4
4
|
Summary: MCP server for managing multiple Claude Code sessions via iTerm2
|
|
5
5
|
Project-URL: Homepage, https://github.com/Martian-Engineering/claude-team
|
|
6
6
|
Project-URL: Repository, https://github.com/Martian-Engineering/claude-team
|
|
@@ -0,0 +1,190 @@
|
|
|
1
|
+
# Unified Worker State: list_workers recovery + worker_events API
|
|
2
|
+
|
|
3
|
+
Status: Proposed
|
|
4
|
+
Date: 2026-01-31
|
|
5
|
+
Issue: cic-bbd
|
|
6
|
+
|
|
7
|
+
## Context
|
|
8
|
+
|
|
9
|
+
Today we have two sources of worker state:
|
|
10
|
+
|
|
11
|
+
- `SessionRegistry` (in-memory, cleared on restart) drives `list_workers`.
|
|
12
|
+
- `events.jsonl` (persistent) stores snapshots + transitions from `WorkerPoller` in
|
|
13
|
+
`src/claude_team/poller.py`, with helpers in `src/claude_team/events.py`.
|
|
14
|
+
|
|
15
|
+
After restart, `list_workers` returns empty even though workers may still exist.
|
|
16
|
+
External consumers resort to parsing `events.jsonl` because MCP exposes no event API.
|
|
17
|
+
|
|
18
|
+
## Goals
|
|
19
|
+
|
|
20
|
+
- `list_workers` should return a useful view after restart by recovering from the
|
|
21
|
+
latest persisted events.
|
|
22
|
+
- Expose event log data via an MCP tool (`worker_events`) with a stable response
|
|
23
|
+
schema for consumers.
|
|
24
|
+
- Keep changes additive and avoid breaking existing client expectations.
|
|
25
|
+
|
|
26
|
+
## Non-Goals
|
|
27
|
+
|
|
28
|
+
- Perfect real-time accuracy after restart (terminal liveness still requires
|
|
29
|
+
backend adoption).
|
|
30
|
+
- Changing polling cadence or event log format.
|
|
31
|
+
- Backfilling old historical events beyond what exists in `events.jsonl`.
|
|
32
|
+
|
|
33
|
+
## Part 1: list_workers recovery API surface
|
|
34
|
+
|
|
35
|
+
### Proposed recovery entry point
|
|
36
|
+
|
|
37
|
+
Add a registry-level recovery API that merges the event log into the registry
|
|
38
|
+
state without overwriting live sessions.
|
|
39
|
+
|
|
40
|
+
Suggested API shape (names illustrative):
|
|
41
|
+
|
|
42
|
+
- `SessionRegistry.recover_from_events(snapshot: dict | None, events: list[WorkerEvent]) -> RecoveryReport`
|
|
43
|
+
- **Input:**
|
|
44
|
+
- `snapshot`: output of `get_latest_snapshot()` (may be `None`).
|
|
45
|
+
- `events`: `read_events_since(snapshot_ts)` (may be empty).
|
|
46
|
+
- **Behavior:**
|
|
47
|
+
- If a session already exists in the registry, do not override it.
|
|
48
|
+
- If a session is only in the event log, create a lightweight recovered entry.
|
|
49
|
+
- If a session is closed by events, mark it closed in recovered state.
|
|
50
|
+
- **Output:**
|
|
51
|
+
- `RecoveryReport` with counts (added, updated, ignored) and timestamp used.
|
|
52
|
+
|
|
53
|
+
### Recovered session representation
|
|
54
|
+
|
|
55
|
+
Recovered entries should be distinguishable and safe for read-only usage.
|
|
56
|
+
|
|
57
|
+
Proposed interface (implementation can vary):
|
|
58
|
+
|
|
59
|
+
- A new lightweight `RecoveredSession` object that implements:
|
|
60
|
+
- `session_id`, `name`, `project_path`, `terminal_id`, `agent_type` (from snapshot)
|
|
61
|
+
- `status` mapped from event state (see mapping below)
|
|
62
|
+
- `last_activity` / `created_at` from snapshot when available
|
|
63
|
+
- `to_dict()` for MCP output
|
|
64
|
+
- `is_idle()` returns `None` or uses snapshot state only (never touches JSONL)
|
|
65
|
+
- `SessionRegistry.list_all()` returns a merged list of:
|
|
66
|
+
- live `ManagedSession` objects, plus
|
|
67
|
+
- recovered entries not present in the registry
|
|
68
|
+
|
|
69
|
+
### State mapping
|
|
70
|
+
|
|
71
|
+
Event log snapshots record:
|
|
72
|
+
|
|
73
|
+
- `state`: `"idle"` or `"active"` (from `detect_worker_idle`)
|
|
74
|
+
- `status`: `"spawning" | "ready" | "busy"` (from `ManagedSession.to_dict()`)
|
|
75
|
+
|
|
76
|
+
Recommended mapping rules:
|
|
77
|
+
|
|
78
|
+
- Prefer snapshot `state` for consistency across restarts.
|
|
79
|
+
- Map `state` -> `SessionStatus` for output:
|
|
80
|
+
- `idle` -> `ready`
|
|
81
|
+
- `active` -> `busy`
|
|
82
|
+
- `closed` -> (new virtual state or keep `busy` + `state="closed"`)
|
|
83
|
+
|
|
84
|
+
To preserve backwards compatibility, keep the existing `status` field but add
|
|
85
|
+
new fields so clients can detect recovery state explicitly:
|
|
86
|
+
|
|
87
|
+
- `source`: `"registry" | "event_log"`
|
|
88
|
+
- `event_state`: `"idle" | "active" | "closed"` (when recovered)
|
|
89
|
+
- `recovered_at`: ISO timestamp when recovery occurred
|
|
90
|
+
- `last_event_ts`: ISO timestamp of the last applied event
|
|
91
|
+
|
|
92
|
+
### Recovery timing
|
|
93
|
+
|
|
94
|
+
Two compatible entry points:
|
|
95
|
+
|
|
96
|
+
1. **Eager (startup):** in server boot, call recovery once and seed the registry.
|
|
97
|
+
2. **Lazy (first list):** in `list_workers`, if registry is empty, perform recovery
|
|
98
|
+
then return merged output.
|
|
99
|
+
|
|
100
|
+
Recommendation: **eager** recovery at startup for predictable behavior, plus a
|
|
101
|
+
lazy fallback in `list_workers` for safety if startup recovery fails.
|
|
102
|
+
|
|
103
|
+
### Tradeoffs (list_workers recovery)
|
|
104
|
+
|
|
105
|
+
- **Pros:** `list_workers` no longer empty after restart; preserves metadata and
|
|
106
|
+
session IDs for monitoring tools.
|
|
107
|
+
- **Cons:** recovered entries may be stale; terminal handles are missing, so
|
|
108
|
+
control actions (send/close) still require adoption.
|
|
109
|
+
- **Risk mitigation:** mark `source=event_log` and include `last_event_ts` to
|
|
110
|
+
communicate staleness to clients.
|
|
111
|
+
|
|
112
|
+
## Part 2: worker_events MCP tool API surface
|
|
113
|
+
|
|
114
|
+
### Proposed tool signature
|
|
115
|
+
|
|
116
|
+
Tool name: `worker_events`
|
|
117
|
+
|
|
118
|
+
Parameters:
|
|
119
|
+
|
|
120
|
+
- `since` (string | null): ISO 8601 timestamp; returns events at or after this
|
|
121
|
+
time. If omitted, returns most recent events (bounded by `limit`).
|
|
122
|
+
- `limit` (int, default 1000): maximum number of events returned.
|
|
123
|
+
- `include_snapshot` (bool, default false): if true, include the latest snapshot
|
|
124
|
+
event (even if it predates `since`) in the response.
|
|
125
|
+
- `include_summary` (bool, default false): include summary aggregates.
|
|
126
|
+
- `stale_threshold_minutes` (int, default 10): used only when
|
|
127
|
+
`include_summary=true` to classify “stuck” workers.
|
|
128
|
+
|
|
129
|
+
### Proposed response shape
|
|
130
|
+
|
|
131
|
+
```
|
|
132
|
+
{
|
|
133
|
+
"events": [
|
|
134
|
+
{"ts": "...", "type": "snapshot|worker_started|worker_idle|worker_active|worker_closed",
|
|
135
|
+
"worker_id": "...", "data": { ... }}
|
|
136
|
+
],
|
|
137
|
+
"count": 123,
|
|
138
|
+
"summary": {
|
|
139
|
+
"started": ["id1", "id2"],
|
|
140
|
+
"closed": ["id3"],
|
|
141
|
+
"idle": ["id4"],
|
|
142
|
+
"active": ["id5"],
|
|
143
|
+
"stuck": ["id6"],
|
|
144
|
+
"last_event_ts": "..."
|
|
145
|
+
},
|
|
146
|
+
"snapshot": {
|
|
147
|
+
"ts": "...",
|
|
148
|
+
"data": {"count": 2, "workers": [ ... ]}
|
|
149
|
+
}
|
|
150
|
+
}
|
|
151
|
+
```
|
|
152
|
+
|
|
153
|
+
### Summary semantics
|
|
154
|
+
|
|
155
|
+
- **started/closed/idle/active** lists come from the returned event window.
|
|
156
|
+
- **stuck** is derived from the latest known state (snapshot + events) where:
|
|
157
|
+
- worker is `active`, and
|
|
158
|
+
- last activity is older than `stale_threshold_minutes`.
|
|
159
|
+
- **last_event_ts** is the newest event timestamp in the response.
|
|
160
|
+
|
|
161
|
+
This aligns with the intent of the former `poll_worker_changes` output while
|
|
162
|
+
exposing the raw events for richer client-side handling.
|
|
163
|
+
|
|
164
|
+
### Tradeoffs (worker_events)
|
|
165
|
+
|
|
166
|
+
- **Pros:** simple API around existing persistence; consumers can poll with a
|
|
167
|
+
timestamp cursor instead of parsing JSONL.
|
|
168
|
+
- **Cons:** no stable event IDs; clients should track the last timestamp and may
|
|
169
|
+
receive duplicates if multiple events share the same timestamp.
|
|
170
|
+
- **Mitigation:** include `last_event_ts` and recommend clients request
|
|
171
|
+
`since=last_event_ts` and de-duplicate by `(ts, type, worker_id)`.
|
|
172
|
+
|
|
173
|
+
## Open Questions
|
|
174
|
+
|
|
175
|
+
- Do we want a new explicit `SessionStatus.CLOSED` for recovered entries, or is
|
|
176
|
+
`status` plus `event_state="closed"` sufficient?
|
|
177
|
+
- Should recovery include an opt-in `include_closed` flag to hide sessions that
|
|
178
|
+
have closed since the last snapshot?
|
|
179
|
+
- Should `worker_events` support an optional `project_filter` (parity with
|
|
180
|
+
`list_workers`)?
|
|
181
|
+
|
|
182
|
+
## Recommendation
|
|
183
|
+
|
|
184
|
+
Implement recovery as an additive merge from `events.get_latest_snapshot()` plus
|
|
185
|
+
`events.read_events_since(snapshot_ts)`, surfaced via a registry recovery helper
|
|
186
|
+
and a new `RecoveredSession` type. Add explicit `source` and `event_state` fields
|
|
187
|
+
in `list_workers` output to communicate provenance and staleness.
|
|
188
|
+
|
|
189
|
+
Expose a new `worker_events` MCP tool with a minimal `since/limit` API and an
|
|
190
|
+
optional summary section for consumers that want quick status deltas.
|
|
@@ -4,11 +4,14 @@ from __future__ import annotations
|
|
|
4
4
|
|
|
5
5
|
from dataclasses import asdict, dataclass
|
|
6
6
|
from datetime import datetime, timedelta, timezone
|
|
7
|
+
import logging
|
|
7
8
|
import json
|
|
8
9
|
import os
|
|
9
10
|
from pathlib import Path
|
|
10
11
|
from typing import Literal
|
|
11
12
|
|
|
13
|
+
from claude_team_mcp.config import ConfigError, EventsConfig, load_config
|
|
14
|
+
|
|
12
15
|
try:
|
|
13
16
|
import fcntl
|
|
14
17
|
except ImportError: # pragma: no cover - platform-specific
|
|
@@ -19,6 +22,8 @@ try:
|
|
|
19
22
|
except ImportError: # pragma: no cover - platform-specific
|
|
20
23
|
msvcrt = None
|
|
21
24
|
|
|
25
|
+
logger = logging.getLogger("claude-team-mcp")
|
|
26
|
+
|
|
22
27
|
|
|
23
28
|
EventType = Literal[
|
|
24
29
|
"snapshot",
|
|
@@ -40,8 +45,20 @@ def _int_env(name: str, default: int) -> int:
|
|
|
40
45
|
return default
|
|
41
46
|
|
|
42
47
|
|
|
43
|
-
|
|
44
|
-
|
|
48
|
+
def _load_rotation_config() -> EventsConfig:
|
|
49
|
+
# Resolve rotation defaults from config, applying env overrides.
|
|
50
|
+
try:
|
|
51
|
+
config = load_config()
|
|
52
|
+
events_config = config.events
|
|
53
|
+
except ConfigError as exc:
|
|
54
|
+
logger.warning(
|
|
55
|
+
"Invalid config file; using default event rotation config: %s", exc
|
|
56
|
+
)
|
|
57
|
+
events_config = EventsConfig()
|
|
58
|
+
return EventsConfig(
|
|
59
|
+
max_size_mb=_int_env("CLAUDE_TEAM_EVENTS_MAX_SIZE_MB", events_config.max_size_mb),
|
|
60
|
+
recent_hours=_int_env("CLAUDE_TEAM_EVENTS_RECENT_HOURS", events_config.recent_hours),
|
|
61
|
+
)
|
|
45
62
|
|
|
46
63
|
|
|
47
64
|
@dataclass
|
|
@@ -89,6 +106,7 @@ def append_events(events: list[WorkerEvent]) -> None:
|
|
|
89
106
|
payloads = [json.dumps(_event_to_dict(event), ensure_ascii=False) for event in events]
|
|
90
107
|
block = "\n".join(payloads) + "\n"
|
|
91
108
|
event_ts = _latest_event_timestamp(events)
|
|
109
|
+
rotation_config = _load_rotation_config()
|
|
92
110
|
|
|
93
111
|
with path.open("r+", encoding="utf-8") as handle:
|
|
94
112
|
_lock_file(handle)
|
|
@@ -97,8 +115,8 @@ def append_events(events: list[WorkerEvent]) -> None:
|
|
|
97
115
|
handle,
|
|
98
116
|
path,
|
|
99
117
|
current_ts=event_ts,
|
|
100
|
-
max_size_mb=
|
|
101
|
-
recent_hours=
|
|
118
|
+
max_size_mb=rotation_config.max_size_mb,
|
|
119
|
+
recent_hours=rotation_config.recent_hours,
|
|
102
120
|
)
|
|
103
121
|
# Hold the lock across the entire write and flush cycle.
|
|
104
122
|
handle.seek(0, os.SEEK_END)
|
|
@@ -169,8 +187,8 @@ def get_latest_snapshot() -> dict | None:
|
|
|
169
187
|
|
|
170
188
|
|
|
171
189
|
def rotate_events_log(
|
|
172
|
-
max_size_mb: int =
|
|
173
|
-
recent_hours: int =
|
|
190
|
+
max_size_mb: int | None = None,
|
|
191
|
+
recent_hours: int | None = None,
|
|
174
192
|
now: datetime | None = None,
|
|
175
193
|
) -> None:
|
|
176
194
|
"""Rotate the log daily or by size, retaining active/recent workers."""
|
|
@@ -179,6 +197,12 @@ def rotate_events_log(
|
|
|
179
197
|
return
|
|
180
198
|
|
|
181
199
|
current_ts = now or datetime.now(timezone.utc)
|
|
200
|
+
if max_size_mb is None or recent_hours is None:
|
|
201
|
+
rotation_config = _load_rotation_config()
|
|
202
|
+
if max_size_mb is None:
|
|
203
|
+
max_size_mb = rotation_config.max_size_mb
|
|
204
|
+
if recent_hours is None:
|
|
205
|
+
recent_hours = rotation_config.recent_hours
|
|
182
206
|
|
|
183
207
|
with path.open("r+", encoding="utf-8") as handle:
|
|
184
208
|
_lock_file(handle)
|
{claude_team_mcp-0.7.0 → claude_team_mcp-0.8.2}/src/claude_team_mcp/cli_backends/__init__.py
RENAMED
|
@@ -6,15 +6,17 @@ This allows claude-team to orchestrate multiple agent types through a unified in
|
|
|
6
6
|
"""
|
|
7
7
|
|
|
8
8
|
from .base import AgentCLI
|
|
9
|
-
from .claude import ClaudeCLI, claude_cli
|
|
10
|
-
from .codex import CodexCLI, codex_cli
|
|
9
|
+
from .claude import ClaudeCLI, claude_cli, get_claude_command
|
|
10
|
+
from .codex import CodexCLI, codex_cli, get_codex_command
|
|
11
11
|
|
|
12
12
|
__all__ = [
|
|
13
13
|
"AgentCLI",
|
|
14
14
|
"ClaudeCLI",
|
|
15
15
|
"claude_cli",
|
|
16
|
+
"get_claude_command",
|
|
16
17
|
"CodexCLI",
|
|
17
18
|
"codex_cli",
|
|
19
|
+
"get_codex_command",
|
|
18
20
|
"get_cli_backend",
|
|
19
21
|
]
|
|
20
22
|
|
|
@@ -10,6 +10,45 @@ from typing import Literal
|
|
|
10
10
|
|
|
11
11
|
from .base import AgentCLI
|
|
12
12
|
|
|
13
|
+
# Built-in default command.
|
|
14
|
+
_DEFAULT_COMMAND = "claude"
|
|
15
|
+
|
|
16
|
+
# Environment variable for command override (takes highest precedence).
|
|
17
|
+
_ENV_VAR = "CLAUDE_TEAM_COMMAND"
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
def get_claude_command() -> str:
|
|
21
|
+
"""
|
|
22
|
+
Get the Claude CLI command with precedence: env var > config > default.
|
|
23
|
+
|
|
24
|
+
Resolution order:
|
|
25
|
+
1. CLAUDE_TEAM_COMMAND environment variable (for override)
|
|
26
|
+
2. Config file commands.claude setting
|
|
27
|
+
3. Built-in default "claude"
|
|
28
|
+
|
|
29
|
+
Returns:
|
|
30
|
+
The command to use for Claude CLI
|
|
31
|
+
"""
|
|
32
|
+
# Environment variable takes highest precedence (for override).
|
|
33
|
+
env_val = os.environ.get(_ENV_VAR)
|
|
34
|
+
if env_val:
|
|
35
|
+
return env_val
|
|
36
|
+
|
|
37
|
+
# Try config file next.
|
|
38
|
+
# Import here to avoid circular imports and lazy-load config.
|
|
39
|
+
try:
|
|
40
|
+
from ..config import ConfigError, load_config
|
|
41
|
+
|
|
42
|
+
config = load_config()
|
|
43
|
+
except ConfigError:
|
|
44
|
+
return _DEFAULT_COMMAND
|
|
45
|
+
|
|
46
|
+
if config.commands.claude:
|
|
47
|
+
return config.commands.claude
|
|
48
|
+
|
|
49
|
+
# Fall back to built-in default.
|
|
50
|
+
return _DEFAULT_COMMAND
|
|
51
|
+
|
|
13
52
|
|
|
14
53
|
class ClaudeCLI(AgentCLI):
|
|
15
54
|
"""
|
|
@@ -31,10 +70,12 @@ class ClaudeCLI(AgentCLI):
|
|
|
31
70
|
"""
|
|
32
71
|
Return the Claude CLI command.
|
|
33
72
|
|
|
34
|
-
|
|
35
|
-
|
|
73
|
+
Resolution order:
|
|
74
|
+
1. CLAUDE_TEAM_COMMAND environment variable (for override)
|
|
75
|
+
2. Config file commands.claude setting
|
|
76
|
+
3. Built-in default "claude"
|
|
36
77
|
"""
|
|
37
|
-
return
|
|
78
|
+
return get_claude_command()
|
|
38
79
|
|
|
39
80
|
def build_args(
|
|
40
81
|
self,
|
|
@@ -102,8 +143,7 @@ class ClaudeCLI(AgentCLI):
|
|
|
102
143
|
|
|
103
144
|
def _is_default_command(self) -> bool:
|
|
104
145
|
"""Check if using the default 'claude' command (not a custom wrapper)."""
|
|
105
|
-
|
|
106
|
-
return cmd == "claude"
|
|
146
|
+
return get_claude_command() == _DEFAULT_COMMAND
|
|
107
147
|
|
|
108
148
|
|
|
109
149
|
# Singleton instance for convenience
|
|
@@ -12,6 +12,45 @@ from typing import Literal
|
|
|
12
12
|
|
|
13
13
|
from .base import AgentCLI
|
|
14
14
|
|
|
15
|
+
# Built-in default command.
|
|
16
|
+
_DEFAULT_COMMAND = "codex"
|
|
17
|
+
|
|
18
|
+
# Environment variable for command override (takes highest precedence).
|
|
19
|
+
_ENV_VAR = "CLAUDE_TEAM_CODEX_COMMAND"
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
def get_codex_command() -> str:
|
|
23
|
+
"""
|
|
24
|
+
Get the Codex CLI command with precedence: env var > config > default.
|
|
25
|
+
|
|
26
|
+
Resolution order:
|
|
27
|
+
1. CLAUDE_TEAM_CODEX_COMMAND environment variable (for override)
|
|
28
|
+
2. Config file commands.codex setting
|
|
29
|
+
3. Built-in default "codex"
|
|
30
|
+
|
|
31
|
+
Returns:
|
|
32
|
+
The command to use for Codex CLI
|
|
33
|
+
"""
|
|
34
|
+
# Environment variable takes highest precedence (for override).
|
|
35
|
+
env_val = os.environ.get(_ENV_VAR)
|
|
36
|
+
if env_val:
|
|
37
|
+
return env_val
|
|
38
|
+
|
|
39
|
+
# Try config file next.
|
|
40
|
+
# Import here to avoid circular imports and lazy-load config.
|
|
41
|
+
try:
|
|
42
|
+
from ..config import ConfigError, load_config
|
|
43
|
+
|
|
44
|
+
config = load_config()
|
|
45
|
+
except ConfigError:
|
|
46
|
+
return _DEFAULT_COMMAND
|
|
47
|
+
|
|
48
|
+
if config.commands.codex:
|
|
49
|
+
return config.commands.codex
|
|
50
|
+
|
|
51
|
+
# Fall back to built-in default.
|
|
52
|
+
return _DEFAULT_COMMAND
|
|
53
|
+
|
|
15
54
|
|
|
16
55
|
class CodexCLI(AgentCLI):
|
|
17
56
|
"""
|
|
@@ -35,10 +74,12 @@ class CodexCLI(AgentCLI):
|
|
|
35
74
|
"""
|
|
36
75
|
Return the Codex CLI command.
|
|
37
76
|
|
|
38
|
-
|
|
39
|
-
|
|
77
|
+
Resolution order:
|
|
78
|
+
1. CLAUDE_TEAM_CODEX_COMMAND environment variable (for override)
|
|
79
|
+
2. Config file commands.codex setting
|
|
80
|
+
3. Built-in default "codex"
|
|
40
81
|
"""
|
|
41
|
-
return
|
|
82
|
+
return get_codex_command()
|
|
42
83
|
|
|
43
84
|
def build_args(
|
|
44
85
|
self,
|