claude-team-mcp 0.6.1__tar.gz → 0.8.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {claude_team_mcp-0.6.1 → claude_team_mcp-0.8.0}/.gitignore +2 -6
- claude_team_mcp-0.8.0/.pebbles/events.jsonl +184 -0
- claude_team_mcp-0.8.0/.pebbles/pebbles.db +0 -0
- claude_team_mcp-0.8.0/AGENTS.md +1 -0
- {claude_team_mcp-0.6.1 → claude_team_mcp-0.8.0}/CHANGELOG.md +40 -0
- {claude_team_mcp-0.6.1 → claude_team_mcp-0.8.0}/CLAUDE.md +19 -0
- {claude_team_mcp-0.6.1 → claude_team_mcp-0.8.0}/PKG-INFO +1 -1
- {claude_team_mcp-0.6.1 → claude_team_mcp-0.8.0}/pyproject.toml +2 -2
- claude_team_mcp-0.8.0/scripts/team-status.sh +105 -0
- claude_team_mcp-0.8.0/src/claude_team/__init__.py +11 -0
- claude_team_mcp-0.8.0/src/claude_team/events.py +501 -0
- claude_team_mcp-0.8.0/src/claude_team/idle_detection.py +173 -0
- claude_team_mcp-0.8.0/src/claude_team/poller.py +245 -0
- {claude_team_mcp-0.6.1 → claude_team_mcp-0.8.0}/src/claude_team_mcp/cli_backends/__init__.py +4 -2
- {claude_team_mcp-0.6.1 → claude_team_mcp-0.8.0}/src/claude_team_mcp/cli_backends/claude.py +45 -5
- {claude_team_mcp-0.6.1 → claude_team_mcp-0.8.0}/src/claude_team_mcp/cli_backends/codex.py +44 -3
- claude_team_mcp-0.8.0/src/claude_team_mcp/config.py +350 -0
- claude_team_mcp-0.8.0/src/claude_team_mcp/config_cli.py +263 -0
- {claude_team_mcp-0.6.1 → claude_team_mcp-0.8.0}/src/claude_team_mcp/idle_detection.py +16 -3
- {claude_team_mcp-0.6.1 → claude_team_mcp-0.8.0}/src/claude_team_mcp/issue_tracker/__init__.py +68 -3
- {claude_team_mcp-0.6.1 → claude_team_mcp-0.8.0}/src/claude_team_mcp/iterm_utils.py +5 -73
- {claude_team_mcp-0.6.1 → claude_team_mcp-0.8.0}/src/claude_team_mcp/registry.py +43 -26
- {claude_team_mcp-0.6.1 → claude_team_mcp-0.8.0}/src/claude_team_mcp/server.py +164 -61
- {claude_team_mcp-0.6.1 → claude_team_mcp-0.8.0}/src/claude_team_mcp/session_state.py +364 -2
- claude_team_mcp-0.8.0/src/claude_team_mcp/terminal_backends/__init__.py +49 -0
- claude_team_mcp-0.8.0/src/claude_team_mcp/terminal_backends/base.py +106 -0
- claude_team_mcp-0.8.0/src/claude_team_mcp/terminal_backends/iterm.py +251 -0
- claude_team_mcp-0.8.0/src/claude_team_mcp/terminal_backends/tmux.py +683 -0
- {claude_team_mcp-0.6.1 → claude_team_mcp-0.8.0}/src/claude_team_mcp/tools/__init__.py +4 -2
- claude_team_mcp-0.8.0/src/claude_team_mcp/tools/adopt_worker.py +179 -0
- {claude_team_mcp-0.6.1 → claude_team_mcp-0.8.0}/src/claude_team_mcp/tools/close_workers.py +39 -10
- claude_team_mcp-0.8.0/src/claude_team_mcp/tools/discover_workers.py +273 -0
- {claude_team_mcp-0.6.1 → claude_team_mcp-0.8.0}/src/claude_team_mcp/tools/list_workers.py +29 -0
- {claude_team_mcp-0.6.1 → claude_team_mcp-0.8.0}/src/claude_team_mcp/tools/message_workers.py +35 -5
- claude_team_mcp-0.8.0/src/claude_team_mcp/tools/poll_worker_changes.py +227 -0
- {claude_team_mcp-0.6.1 → claude_team_mcp-0.8.0}/src/claude_team_mcp/tools/spawn_workers.py +254 -153
- {claude_team_mcp-0.6.1 → claude_team_mcp-0.8.0}/src/claude_team_mcp/tools/wait_idle_workers.py +1 -0
- {claude_team_mcp-0.6.1 → claude_team_mcp-0.8.0}/src/claude_team_mcp/utils/errors.py +7 -3
- {claude_team_mcp-0.6.1 → claude_team_mcp-0.8.0}/src/claude_team_mcp/worktree.py +73 -12
- claude_team_mcp-0.8.0/tests/conftest.py +20 -0
- {claude_team_mcp-0.6.1 → claude_team_mcp-0.8.0}/tests/test_cli_backends.py +52 -0
- claude_team_mcp-0.8.0/tests/test_config.py +703 -0
- claude_team_mcp-0.8.0/tests/test_config_cli.py +104 -0
- claude_team_mcp-0.8.0/tests/test_events.py +320 -0
- claude_team_mcp-0.8.0/tests/test_idle_detection_module.py +180 -0
- claude_team_mcp-0.8.0/tests/test_issue_tracker.py +228 -0
- claude_team_mcp-0.8.0/tests/test_poller.py +90 -0
- claude_team_mcp-0.8.0/tests/test_spawn_workers_defaults.py +230 -0
- claude_team_mcp-0.8.0/tests/test_terminal_backends.py +50 -0
- claude_team_mcp-0.8.0/tests/test_tmux_backend.py +199 -0
- claude_team_mcp-0.8.0/tests/test_worktree.py +29 -0
- {claude_team_mcp-0.6.1 → claude_team_mcp-0.8.0}/uv.lock +1 -1
- claude_team_mcp-0.6.1/.beads/issues.jsonl +0 -75
- claude_team_mcp-0.6.1/.pebbles/events.jsonl +0 -59
- claude_team_mcp-0.6.1/.pebbles/pebbles.db +0 -0
- claude_team_mcp-0.6.1/src/claude_team_mcp/tools/adopt_worker.py +0 -122
- claude_team_mcp-0.6.1/src/claude_team_mcp/tools/discover_workers.py +0 -129
- claude_team_mcp-0.6.1/tests/test_issue_tracker.py +0 -103
- {claude_team_mcp-0.6.1 → claude_team_mcp-0.8.0}/.claude/settings.json +0 -0
- {claude_team_mcp-0.6.1 → claude_team_mcp-0.8.0}/.claude/settings.local.json +0 -0
- {claude_team_mcp-0.6.1 → claude_team_mcp-0.8.0}/.claude-plugin/marketplace.json +0 -0
- {claude_team_mcp-0.6.1 → claude_team_mcp-0.8.0}/.claude-plugin/plugin.json +0 -0
- {claude_team_mcp-0.6.1 → claude_team_mcp-0.8.0}/.gitattributes +0 -0
- {claude_team_mcp-0.6.1 → claude_team_mcp-0.8.0}/.mcp.json +0 -0
- {claude_team_mcp-0.6.1 → claude_team_mcp-0.8.0}/.pebbles/.gitignore +0 -0
- {claude_team_mcp-0.6.1 → claude_team_mcp-0.8.0}/.pebbles/config.json +0 -0
- {claude_team_mcp-0.6.1 → claude_team_mcp-0.8.0}/HAPPY_INTEGRATION_RESEARCH.md +0 -0
- {claude_team_mcp-0.6.1 → claude_team_mcp-0.8.0}/Makefile +0 -0
- {claude_team_mcp-0.6.1 → claude_team_mcp-0.8.0}/NOTES.md +0 -0
- {claude_team_mcp-0.6.1 → claude_team_mcp-0.8.0}/README.md +0 -0
- {claude_team_mcp-0.6.1 → claude_team_mcp-0.8.0}/commands/check-workers.md +0 -0
- {claude_team_mcp-0.6.1 → claude_team_mcp-0.8.0}/commands/cleanup-worktrees.md +0 -0
- {claude_team_mcp-0.6.1 → claude_team_mcp-0.8.0}/commands/merge-worker.md +0 -0
- {claude_team_mcp-0.6.1 → claude_team_mcp-0.8.0}/commands/pr-worker.md +0 -0
- {claude_team_mcp-0.6.1 → claude_team_mcp-0.8.0}/commands/spawn-workers.md +0 -0
- {claude_team_mcp-0.6.1 → claude_team_mcp-0.8.0}/commands/team-summary.md +0 -0
- {claude_team_mcp-0.6.1 → claude_team_mcp-0.8.0}/config/mcporter.json +0 -0
- {claude_team_mcp-0.6.1 → claude_team_mcp-0.8.0}/docs/ISSUE_TRACKER_ABSTRACTION.md +0 -0
- {claude_team_mcp-0.6.1 → claude_team_mcp-0.8.0}/scripts/install-commands.py +0 -0
- {claude_team_mcp-0.6.1 → claude_team_mcp-0.8.0}/settings.json +0 -0
- {claude_team_mcp-0.6.1 → claude_team_mcp-0.8.0}/src/claude_team_mcp/__init__.py +0 -0
- {claude_team_mcp-0.6.1 → claude_team_mcp-0.8.0}/src/claude_team_mcp/__main__.py +0 -0
- {claude_team_mcp-0.6.1 → claude_team_mcp-0.8.0}/src/claude_team_mcp/cli_backends/base.py +0 -0
- {claude_team_mcp-0.6.1 → claude_team_mcp-0.8.0}/src/claude_team_mcp/colors.py +0 -0
- {claude_team_mcp-0.6.1 → claude_team_mcp-0.8.0}/src/claude_team_mcp/formatting.py +0 -0
- {claude_team_mcp-0.6.1 → claude_team_mcp-0.8.0}/src/claude_team_mcp/names.py +0 -0
- {claude_team_mcp-0.6.1 → claude_team_mcp-0.8.0}/src/claude_team_mcp/profile.py +0 -0
- {claude_team_mcp-0.6.1 → claude_team_mcp-0.8.0}/src/claude_team_mcp/schemas/__init__.py +0 -0
- {claude_team_mcp-0.6.1 → claude_team_mcp-0.8.0}/src/claude_team_mcp/schemas/codex.py +0 -0
- {claude_team_mcp-0.6.1 → claude_team_mcp-0.8.0}/src/claude_team_mcp/subprocess_cache.py +0 -0
- {claude_team_mcp-0.6.1 → claude_team_mcp-0.8.0}/src/claude_team_mcp/tools/annotate_worker.py +0 -0
- {claude_team_mcp-0.6.1 → claude_team_mcp-0.8.0}/src/claude_team_mcp/tools/check_idle_workers.py +0 -0
- {claude_team_mcp-0.6.1 → claude_team_mcp-0.8.0}/src/claude_team_mcp/tools/examine_worker.py +0 -0
- {claude_team_mcp-0.6.1 → claude_team_mcp-0.8.0}/src/claude_team_mcp/tools/issue_tracker_help.py +0 -0
- {claude_team_mcp-0.6.1 → claude_team_mcp-0.8.0}/src/claude_team_mcp/tools/list_worktrees.py +0 -0
- {claude_team_mcp-0.6.1 → claude_team_mcp-0.8.0}/src/claude_team_mcp/tools/read_worker_logs.py +0 -0
- {claude_team_mcp-0.6.1 → claude_team_mcp-0.8.0}/src/claude_team_mcp/utils/__init__.py +0 -0
- {claude_team_mcp-0.6.1 → claude_team_mcp-0.8.0}/src/claude_team_mcp/utils/constants.py +0 -0
- {claude_team_mcp-0.6.1 → claude_team_mcp-0.8.0}/src/claude_team_mcp/utils/worktree_detection.py +0 -0
- {claude_team_mcp-0.6.1 → claude_team_mcp-0.8.0}/src/claude_team_mcp/worker_prompt.py +0 -0
- {claude_team_mcp-0.6.1 → claude_team_mcp-0.8.0}/tests/__init__.py +0 -0
- {claude_team_mcp-0.6.1 → claude_team_mcp-0.8.0}/tests/test_codex_schema.py +0 -0
- {claude_team_mcp-0.6.1 → claude_team_mcp-0.8.0}/tests/test_colors.py +0 -0
- {claude_team_mcp-0.6.1 → claude_team_mcp-0.8.0}/tests/test_formatting.py +0 -0
- {claude_team_mcp-0.6.1 → claude_team_mcp-0.8.0}/tests/test_idle_detection.py +0 -0
- {claude_team_mcp-0.6.1 → claude_team_mcp-0.8.0}/tests/test_issue_tracker_integration.py +0 -0
- {claude_team_mcp-0.6.1 → claude_team_mcp-0.8.0}/tests/test_iterm_utils.py +0 -0
- {claude_team_mcp-0.6.1 → claude_team_mcp-0.8.0}/tests/test_names.py +0 -0
- {claude_team_mcp-0.6.1 → claude_team_mcp-0.8.0}/tests/test_registry.py +0 -0
- {claude_team_mcp-0.6.1 → claude_team_mcp-0.8.0}/tests/test_session_state.py +0 -0
- {claude_team_mcp-0.6.1 → claude_team_mcp-0.8.0}/tests/test_worker_prompt.py +0 -0
- {claude_team_mcp-0.6.1 → claude_team_mcp-0.8.0}/tests/test_worktree_detection.py +0 -0
|
@@ -54,9 +54,5 @@ htmlcov/
|
|
|
54
54
|
# Git worktrees (internal)
|
|
55
55
|
.worktrees/
|
|
56
56
|
|
|
57
|
-
# Beads local
|
|
58
|
-
.beads
|
|
59
|
-
.beads/last-touched
|
|
60
|
-
.beads/.sync.lock
|
|
61
|
-
.beads/sync-state.json
|
|
62
|
-
.beads/sync_base.jsonl
|
|
57
|
+
# Beads local state (not synced)
|
|
58
|
+
.beads/
|
|
@@ -0,0 +1,184 @@
|
|
|
1
|
+
{"type":"create","timestamp":"2025-12-15T00:33:52.292079Z","issue_id":"cic-une","payload":{"description":"Currently send_message only works FROM coordinator TO workers. Workers have no way to message back to the coordinator because the coordinator doesn't have a session_id in the registry.nnProposed solution: Add a 'coordinator inbox' mechanism where:n1. Workers can call something like send_to_coordinator(message)n2. Coordinator can poll/read inbox via get_coordinator_messages()nnThis would enable async communication patterns without workers writing to temp files.","priority":"2","title":"Add coordinator messaging - allow workers to send messages back to coordinator","type":"feature"}}
|
|
2
|
+
{"type":"create","timestamp":"2025-12-15T01:56:09.65449Z","issue_id":"cic-h75","payload":{"description":"Add a Textual-based TUI companion app to claude-team for managing workers, viewing session state, and integrating with beads task tracking. Key features: dual transport (stdio+HTTP), real-time session monitoring, worker spawn/kill, beads task assignment, advisory messages to lead session.","priority":"2","title":"Claude Team TUI Companion App","type":"epic"}}
|
|
3
|
+
{"type":"create","timestamp":"2025-12-16T17:20:11.113365Z","issue_id":"cic-l8u","payload":{"description":"Allow claude-team to spawn terminal sessions that do NOT run Claude Code. Useful for long-running shell programs like dev servers (npm run dev), build watchers, database clients, etc. Sessions should be trackable and manageable like Claude sessions but without JSONL/conversation tracking.","priority":"2","title":"Shell-only terminal sessions for long-running processes","type":"epic"}}
|
|
4
|
+
{"type":"create","timestamp":"2025-12-16T17:20:34.630863Z","issue_id":"cic-avg","payload":{"description":"Create new MCP tool spawn_shell() similar to spawn_session but without starting Claude. Parameters: project_path, session_name, command (optional - command to run), layout. Should create iTerm window/pane, cd to project, optionally run initial command.","priority":"2","title":"Add spawn_shell MCP tool","type":"task"}}
|
|
5
|
+
{"type":"create","timestamp":"2025-12-16T17:20:45.501187Z","issue_id":"cic-q6l","payload":{"description":"Create MCP tool to send commands/input to shell-only sessions. Similar to send_message but for raw shell input. Should handle Enter key properly. May want option to wait for command completion.","priority":"2","title":"Add send_to_shell MCP tool","type":"task"}}
|
|
6
|
+
{"type":"create","timestamp":"2025-12-16T17:20:50.891408Z","issue_id":"cic-hr4","payload":{"description":"Create MCP tool to read terminal output from shell-only sessions. Use iTerm2 screen content API (already have this in get_session_status). Return recent lines of output. May want options for line count, follow mode.","priority":"2","title":"Add get_shell_output MCP tool","type":"task"}}
|
|
7
|
+
{"type":"create","timestamp":"2025-12-16T17:20:56.307156Z","issue_id":"cic-2q6","payload":{"description":"Ensure spawn_team and layout options work with mixed session types. E.g., quad layout with 3 Claude workers + 1 shell running dev server. May need to extend spawn_team API or add spawn_mixed_team.","priority":"2","title":"Support mixed layouts with shell and Claude sessions","type":"task"}}
|
|
8
|
+
{"type":"create","timestamp":"2025-12-16T17:21:01.708275Z","issue_id":"cic-7pt","payload":{"description":"Handle shell session lifecycle: detect if process is still running, graceful shutdown (SIGTERM then SIGKILL), restart capability. Reuse close_session or add close_shell. Add is_process_running check.","priority":"2","title":"Add shell session lifecycle management","type":"task"}}
|
|
9
|
+
{"type":"create","timestamp":"2025-12-17T04:38:23.214042Z","issue_id":"cic-x5t","payload":{"description":"Consider adding a close_team() or close_all_sessions() helper for batch closing workers.nn**Context from dogfooding:**nWhen coordinating a team of 4 workers, had to call close_session 4 times sequentially. Minor friction but noticeable.nn**Open questions:**n- Should this close all sessions, or just sessions from a specific spawn_team call?n- If the latter, need team tracking (see related spike)n- Is this worth the API surface area or is 4 close calls fine?nn**Status:** Thought experiment - not ready to build. Consider independently from main orchestration epic.","priority":"3","title":"Thought experiment: close_team / batch session close","type":"chore"}}
|
|
10
|
+
{"type":"create","timestamp":"2025-12-17T04:38:23.497704Z","issue_id":"cic-1vn","payload":{"description":"Consider tracking which workers were spawned together as a 'team'.nn**Context from dogfooding:**nAfter spawning 4 workers, they become individual sessions with no memory that they were spawned together. If I wanted to 'close the team I just spawned' I had to remember which worker IDs belonged to it.nn**Possible approaches:**n1. spawn_team returns a team_id, sessions store itn2. list_sessions can filter by team_idn3. close_team(team_id) closes all sessions in that teamnn**Open questions:**n- Is this over-engineering? Coordinator could just track IDs themselvesn- Does this conflict with the 'iconic names' approach? (team = name_set used?)n- What happens if you spawn multiple teams with same name_set?nn**Status:** Thought experiment - not ready to build. Consider independently from main orchestration epic.","priority":"3","title":"Thought experiment: team tracking / team_id concept","type":"chore"}}
|
|
11
|
+
{"type":"create","timestamp":"2025-12-17T04:42:36.631081Z","issue_id":"cic-bcf","payload":{"description":"Evaluate whether the existing wait_for_completion tool should be kept, modified, or removed.nn**Current state:**n- wait_for_completion exists with 5 min default timeout, polls every 2 secondsn- Uses multiple detection methods (markers, git, beads, screen, idle time)n- Unclear if MCP tool timeouts from Claude Code side would even allow 5 min waitsnn**Concerns:**n- MCP tool timeout behavior is undocumentedn- Blocking waits don't let coordinator do other workn- If one worker is slow/blocked, coordinator is stucknn**Alternative pattern (used in dogfooding):**n- Non-blocking send_message callsn- Coordinator polls list_sessions / get_conversation_historyn- Coordinator decides when workers are donen- Each tool call returns quickly, no timeout issuesnn**Options:**n1. Keep wait_for_completion as-is (useful for simple single-worker cases?)n2. Remove it for clarity - polling pattern is safern3. Replace with non-blocking check_completion_statusn4. Add wait_for_team that returns when ANY worker finishes (not all)nn**Status:** Thought experiment - consider removing existing implementation for clarity.","priority":"3","title":"Thought experiment: wait_for_completion / wait_for_team","type":"chore"}}
|
|
12
|
+
{"type":"create","timestamp":"2025-12-21T06:17:51.72494Z","issue_id":"cic-7on","payload":{"description":"Improve the message format returned by read_worker_logs for better coordinator usability.nn## Key Improvementsnn### Add New Fieldsn- `index`: Message number in conversation (1-based) for easy referencen- `time_ago`: Relative timestamp (e.g., '2 min ago') for quick scanningn- `content_length`: Character count to know message size before readingn- `content_preview`: First 200 chars for long messagesn- `tools_summary`: List of readable tool summaries (e.g., 'Read → src/foo.py')n- `tool_count`: Quick count of tools usedn- `has_thinking`: Boolean flag instead of full thinking text by defaultnn### Remove/Simplifyn- Remove `uuid` from default output (noise for coordinators)n- Simplify `tool_uses` - summarize large inputs instead of full contentn- Replace full `thinking` blocks with boolean flag by defaultnn### Optional Enhancementn- Consider adding `format` parameter: 'full' (current), 'compact' (previews only), 'summary' (one-line per message)nn## Files to Modifyn- src/claude_team_mcp/session_state.py - Message.to_dict() methodn- src/claude_team_mcp/server.py - read_worker_logs toolnn## ReferencesnBased on JSONL Conversation Formatting Review research.","priority":"2","title":"Enhance read_worker_logs output formatting","type":"feature"}}
|
|
13
|
+
{"type":"create","timestamp":"2025-12-23T04:33:57.891092Z","issue_id":"cic-ivhu","payload":{"description":"When message_workers receives a message that begins with forward slash (/), return immediately despite wait conditions. Interpret this as an attempt at a slash command which doesn't produce reliable telemetry (no stop hook fires). Include in the return that we detected a slash command and recommend the coordinator read the conversation logs to verify success.","priority":"2","title":"Return immediately on slash command messages","type":"feature"}}
|
|
14
|
+
{"type":"create","timestamp":"2025-12-23T04:36:31.89944Z","issue_id":"cic-5xot","payload":{"description":"Create a comprehensive terminal abstraction layer that captures full feature parity with both iTerm2 and Zed Terminal CLI backends. The abstraction should be complete and extensible, covering all current and future capabilities.nn## Core Featuresn- Self-identification (which terminal am I in?)n- Layout introspection (what panes exist, who are my neighbors?)n- Splitting panesn- Spawning new terminals (with or without Claude)n- Sending text/keysn- Reading screen contentn- Managing focusn- Setting titlesn- Layout reorganizationnn## Advanced Features (iTerm2)n- Event subscriptions (new session, keystroke, screen update, prompt, location change, custom escape sequences, session terminate, layout change, focus change)n- Custom control sequences for bidirectional communicationn- Profile customizations (tab colors, badges)nn## Advanced Features (Zed)n- Layout control (TileVertical, TileHorizontal, Consolidate)n- Layout tree introspection with bounding boxesn- Move terminal between panesn- Terminal idle detectionn- in_pane_of targeting for terminal creation","priority":"1","title":"Comprehensive Terminal Abstraction Layer","type":"epic"}}
|
|
15
|
+
{"type":"create","timestamp":"2025-12-23T04:37:13.825284Z","issue_id":"cic-bt3a","payload":{"description":"Update existing code in claude_team_mcp to use the new terminal abstraction layer.nn## Files to Updaten- server.py - Use TerminalBackend instead of direct iTerm2 connectionn- registry.py - Update ManagedSession to use TerminalHandlen- tools/*.py - Update all tools to use abstractionnn## Migration Strategyn1. Create TerminalBackend instance in app_lifespann2. Update AppContext to hold backend instead of raw connectionn3. Update ensure_connection pattern to work with backendn4. Migrate iterm_utils.py calls to backend methodsn5. Update ManagedSession to store TerminalHandle instead of iterm_sessionnn## Backwards Compatibilityn- Keep iterm_utils.py as internal implementation detail for nown- Eventually deprecate direct iterm_utils usage","priority":"2","title":"Migrate existing code to terminal abstraction","type":"task"}}
|
|
16
|
+
{"type":"create","timestamp":"2026-01-13T19:23:23.632251Z","issue_id":"cic-d9j","payload":{"description":"When listing or examining workers, the agent_type field (claude vs codex) is not included in the output. Coordinators need this to know which type of worker they're managing.","priority":"2","title":"agent_type not shown in list_workers/examine_worker","type":"bug"}}
|
|
17
|
+
{"type":"create","timestamp":"2026-01-13T22:26:56.533366Z","issue_id":"cic-fgx","payload":{"description":"When the MCP server is running inside iTerm2, automatically detect the coordinator's window and prefer spawning workers there (when space is available) before falling back to find_available_window().nn## BackgroundniTerm2 sets ITERM_SESSION_ID in the environment for processes running inside it. We can use this to detect the coordinator's session and find its parent window.nn## Implementationn1. In spawn_workers.py, check os.environ.get('ITERM_SESSION_ID')n2. If present, use iTerm2 API to look up that sessionn3. Use get_window_for_session() to find the coordinator's windown4. When layout='auto', prefer that window first (if it has space)n5. Fall back to existing find_available_window() logic if coordinator window is fullnn## Existing Infrastructuren- get_window_for_session(app, session) already exists in iterm_utils.py:1098-1117n- split_pane() supports incremental pane additionn- find_available_window() handles the general casenn## Notesn- Only affects layout='auto' moden- Gracefully degrades if not running in iTerm or window is fulln- No new parameters needed - this is transparent behavior improvement","priority":"2","title":"Auto-detect coordinator's iTerm window and prefer it for spawning workers","type":"feature"}}
|
|
18
|
+
{"type":"create","timestamp":"2026-01-13T23:13:50.495475Z","issue_id":"cic-7ni","payload":{"description":"Add name sets featuring famous Romans from antiquity to names.py, covering multiple worker configurations (1, 2, 3, and 4 workers).nn## Requirementsn- Add entries to SOLOS, DUOS, TRIOS, QUARTETS dicts in src/claude_team_mcp/names.pyn- Follow existing naming conventions (lowercase keys with underscores)n- Choose historically notable Romans and groupings","priority":"3","title":"Add famous Romans from antiquity to worker name sets","type":"feature"}}
|
|
19
|
+
{"type":"comment","timestamp":"2025-12-15T19:38:22Z","issue_id":"cic-une","payload":{"body":"Author: rabsef-bicrymnPartial progress: Added get_conversation_history tool (cic-8mk) which allows coordinator to observe worker activity with reverse pagination. This addresses the OBSERVATION part of the problem. The back-channel messaging (workers sending TO coordinator) still needs a queue/inbox system since coordinator isn't in iTerm."}}
|
|
20
|
+
{"type":"dep_add","timestamp":"2025-12-16T17:21:17.258226Z","issue_id":"cic-l8u","payload":{"dep_type":"blocks","depends_on":"cic-avg"}}
|
|
21
|
+
{"type":"dep_add","timestamp":"2025-12-16T17:21:27.546817Z","issue_id":"cic-l8u","payload":{"dep_type":"blocks","depends_on":"cic-q6l"}}
|
|
22
|
+
{"type":"dep_add","timestamp":"2025-12-16T17:21:32.679775Z","issue_id":"cic-l8u","payload":{"dep_type":"blocks","depends_on":"cic-hr4"}}
|
|
23
|
+
{"type":"dep_add","timestamp":"2025-12-16T17:21:37.820296Z","issue_id":"cic-l8u","payload":{"dep_type":"blocks","depends_on":"cic-2q6"}}
|
|
24
|
+
{"type":"dep_add","timestamp":"2025-12-16T17:21:42.959352Z","issue_id":"cic-l8u","payload":{"dep_type":"blocks","depends_on":"cic-7pt"}}
|
|
25
|
+
{"type":"dep_add","timestamp":"2025-12-16T17:21:54.814672Z","issue_id":"cic-q6l","payload":{"dep_type":"blocks","depends_on":"cic-avg"}}
|
|
26
|
+
{"type":"dep_add","timestamp":"2025-12-16T17:21:59.958568Z","issue_id":"cic-hr4","payload":{"dep_type":"blocks","depends_on":"cic-avg"}}
|
|
27
|
+
{"type":"dep_add","timestamp":"2025-12-16T17:22:05.095812Z","issue_id":"cic-2q6","payload":{"dep_type":"blocks","depends_on":"cic-avg"}}
|
|
28
|
+
{"type":"dep_add","timestamp":"2025-12-23T04:37:23.162209Z","issue_id":"cic-5xot","payload":{"dep_type":"blocks","depends_on":"cic-bt3a"}}
|
|
29
|
+
{"type":"comment","timestamp":"2026-01-13T23:10:14.082126Z","issue_id":"cic-fgx","payload":{"body":"Close reason: Implemented auto-detect coordinator window with proper ITERM_SESSION_ID parsing and 3-pane layout"}}
|
|
30
|
+
{"type":"close","timestamp":"2026-01-13T23:10:14.082126Z","issue_id":"cic-fgx","payload":{}}
|
|
31
|
+
{"type":"create","timestamp":"2026-01-20T04:09:10.311493Z","issue_id":"cic-367","payload":{"description":"Abstract claude-team issue tracking so it can work with either Beads (bd) or Pebbles (pb), including detection and prompt/help updates.","priority":"1","title":"Issue tracker abstraction","type":"epic"}}
|
|
32
|
+
{"type":"create","timestamp":"2026-01-20T04:09:16.716586Z","issue_id":"cic-90d","payload":{"description":"Add an issue tracker abstraction module with backend registry, command templates, capabilities, and detection (beads vs pebbles).","priority":"1","title":"Phase 1: Issue tracker abstraction module","type":"task"}}
|
|
33
|
+
{"type":"create","timestamp":"2026-01-20T04:09:21.078029Z","issue_id":"cic-107","payload":{"description":"Update worktree detection to return tracker-specific env var paths (e.g., BEADS_DIR or Pebbles equivalent).","priority":"1","title":"Phase 2: Worktree detection for tracker env","type":"task"}}
|
|
34
|
+
{"type":"create","timestamp":"2026-01-20T04:09:26.691584Z","issue_id":"cic-7c5","payload":{"description":"Replace Beads-specific prompt/help strings with backend data (worker_prompt, constants, bd_help/message hint).","priority":"1","title":"Phase 3: Tracker-neutral prompts and help","type":"task"}}
|
|
35
|
+
{"type":"create","timestamp":"2026-01-20T04:09:32.962712Z","issue_id":"cic-a97","payload":{"description":"Update docs and tests to remove Beads-only language and align with the new abstraction.","priority":"2","title":"Phase 4: Tracker-neutral docs and tests","type":"task"}}
|
|
36
|
+
{"type":"dep_add","timestamp":"2026-01-20T04:09:38.868502Z","issue_id":"cic-367","payload":{"dep_type":"blocks","depends_on":"cic-90d"}}
|
|
37
|
+
{"type":"create","timestamp":"2026-01-20T04:09:41.079133Z","issue_id":"cic-b54","payload":{"description":"Update tests and documentation to be tracker-neutral. Adjust tests/test_worker_prompt.py, CLAUDE.md, README.md, and commands/*.md.","priority":"1","title":"Phase 4: Update docs and tests","type":"task"}}
|
|
38
|
+
{"type":"dep_add","timestamp":"2026-01-20T04:09:41.09764Z","issue_id":"cic-367","payload":{"dep_type":"blocks","depends_on":"cic-90d"}}
|
|
39
|
+
{"type":"dep_add","timestamp":"2026-01-20T04:09:41.116226Z","issue_id":"cic-367","payload":{"dep_type":"blocks","depends_on":"cic-107"}}
|
|
40
|
+
{"type":"dep_add","timestamp":"2026-01-20T04:09:41.133989Z","issue_id":"cic-367","payload":{"dep_type":"blocks","depends_on":"cic-7c5"}}
|
|
41
|
+
{"type":"dep_add","timestamp":"2026-01-20T04:09:41.16431Z","issue_id":"cic-107","payload":{"dep_type":"blocks","depends_on":"cic-90d"}}
|
|
42
|
+
{"type":"dep_add","timestamp":"2026-01-20T04:09:41.183647Z","issue_id":"cic-7c5","payload":{"dep_type":"blocks","depends_on":"cic-107"}}
|
|
43
|
+
{"type":"dep_add","timestamp":"2026-01-20T04:09:44.032047Z","issue_id":"cic-367","payload":{"dep_type":"blocks","depends_on":"cic-107"}}
|
|
44
|
+
{"type":"dep_add","timestamp":"2026-01-20T04:09:47.87402Z","issue_id":"cic-367","payload":{"dep_type":"blocks","depends_on":"cic-7c5"}}
|
|
45
|
+
{"type":"dep_add","timestamp":"2026-01-20T04:09:51.578675Z","issue_id":"cic-367","payload":{"dep_type":"blocks","depends_on":"cic-a97"}}
|
|
46
|
+
{"type":"close","timestamp":"2026-01-20T04:09:52.39411Z","issue_id":"cic-a97","payload":{}}
|
|
47
|
+
{"type":"dep_add","timestamp":"2026-01-20T04:09:52.415024Z","issue_id":"cic-367","payload":{"dep_type":"blocks","depends_on":"cic-b54"}}
|
|
48
|
+
{"type":"dep_add","timestamp":"2026-01-20T04:09:52.434493Z","issue_id":"cic-b54","payload":{"dep_type":"blocks","depends_on":"cic-7c5"}}
|
|
49
|
+
{"type":"dep_add","timestamp":"2026-01-20T04:09:54.91112Z","issue_id":"cic-107","payload":{"dep_type":"blocks","depends_on":"cic-90d"}}
|
|
50
|
+
{"type":"dep_add","timestamp":"2026-01-20T04:10:00.488585Z","issue_id":"cic-7c5","payload":{"dep_type":"blocks","depends_on":"cic-107"}}
|
|
51
|
+
{"type":"dep_add","timestamp":"2026-01-20T04:10:06.055452Z","issue_id":"cic-a97","payload":{"dep_type":"blocks","depends_on":"cic-7c5"}}
|
|
52
|
+
{"type":"status_update","timestamp":"2026-01-20T05:05:20.944388Z","issue_id":"cic-b54","payload":{"status":"in_progress"}}
|
|
53
|
+
{"type":"close","timestamp":"2026-01-20T05:18:20.542131Z","issue_id":"cic-b54","payload":{}}
|
|
54
|
+
{"type":"create","timestamp":"2026-01-21T19:06:11.932386Z","issue_id":"cic-c09","payload":{"description":"Three tests in test_cli_backends.py are failing because they expect --full-auto flag but implementation uses --dangerously-bypass-approvals-and-sandbox. Need to investigate which is correct and update either the implementation or tests accordingly.","priority":"2","title":"Fix failing Codex CLI tests","type":"task"}}
|
|
55
|
+
{"type":"status_update","timestamp":"2026-01-21T19:06:35.580259Z","issue_id":"cic-c09","payload":{"status":"in_progress"}}
|
|
56
|
+
{"type":"close","timestamp":"2026-01-21T19:08:45.621746Z","issue_id":"cic-c09","payload":{}}
|
|
57
|
+
{"type":"create","timestamp":"2026-01-21T19:49:17.679651Z","issue_id":"cic-13e","payload":{"description":"Codex CLI should use --dangerously-bypass-approvals-and-sandbox instead of --full-auto when skip_permissions is requested.","priority":"1","title":"Fix Codex skip permissions flag","type":"bug"}}
|
|
58
|
+
{"type":"status_update","timestamp":"2026-01-21T19:49:19.650961Z","issue_id":"cic-13e","payload":{"status":"in_progress"}}
|
|
59
|
+
{"type":"close","timestamp":"2026-01-21T19:50:32.252535Z","issue_id":"cic-13e","payload":{}}
|
|
60
|
+
{"type":"create","timestamp":"2026-01-23T21:48:15.471039Z","issue_id":"cic-aef","payload":{"description":"Implement Smart Forking inside claude-team with HTTP-mode indexing, qmd integration, and agent-specific forking for Claude + Codex. Includes background indexing lifecycle, launchd installer, and smart_fork MCP tool. See SMART_FORK_PROPOSAL.md for the detailed requirements and assumptions.","priority":"1","title":"Smart Forking (HTTP, qmd, indexing lifecycle)","type":"epic"}}
|
|
61
|
+
{"type":"create","timestamp":"2026-01-23T21:48:44.788243Z","issue_id":"cic-8b0","payload":{"description":"Implement HTTP-only gating and env flags for Smart Fork indexing. Requirements:n- Indexing only runs when claude-team is in persistent HTTP server mode.n- Enable via CLAUDE_TEAM_QMD_INDEXING=true.n- On startup, check prerequisites: qmd on PATH, ~/.claude/projects exists, ~/.codex/sessions exists, ~/.claude-team/index is creatable, and qmd collections can be created/read.n- If any prerequisite fails, log a clear actionable error and disable indexing; server continues running.n- Record in logs that smart_fork is unavailable until indexing is enabled/healthy.n- No retries, caches, or delays beyond the scheduler interval.","priority":"1","title":"Indexing gate + env flags (HTTP-only)","type":"task"}}
|
|
62
|
+
{"type":"create","timestamp":"2026-01-23T21:48:50.997821Z","issue_id":"cic-641","payload":{"description":"Implement claude-team-owned export of Claude Code session JSONLs to Markdown. Requirements:n- Source: ~/.claude/projects/u003cproject-slugu003e/*.jsonl.n- Output root: ~/.claude-team/index/claude/.n- One Markdown file per session id.n- Normalize header fields: Session ID, Working Directory, Date, Agent (claude), optionally Repo Root.n- Preserve user/assistant message content; keep format similar to existing claude-sessions markdown exports.n- Ensure Working Directory is populated from JSONL entries.n- Avoid extra caching or retries; only skip if output is newer than JSONL.n- Keep file format stable for qmd parsing and smart_fork metadata extraction.","priority":"1","title":"Claude session markdown export (claude-team owned)","type":"task"}}
|
|
63
|
+
{"type":"create","timestamp":"2026-01-23T21:48:58.747322Z","issue_id":"cic-37d","payload":{"description":"Implement claude-team-owned export of Codex session JSONLs to Markdown. Requirements:n- Source: ~/.codex/sessions/**/**/*.jsonl (date-based folders).n- Use session_meta payload fields: id (session id), cwd (working directory), timestamp.n- Output root: ~/.claude-team/index/codex/.n- Normalize headers to match Claude: Session ID, Working Directory, Date, Agent (codex), optionally Repo Root.n- Preserve user/assistant message content with a stable format for qmd parsing.n- Ensure exported Session ID matches the id used by `codex fork/resume`.n- Skip if output is newer than JSONL.n- No cross-agent forking; codex sessions are only for codex.","priority":"1","title":"Codex session markdown export (claude-team owned)","type":"task"}}
|
|
64
|
+
{"type":"create","timestamp":"2026-01-23T21:49:05.825511Z","issue_id":"cic-7d9","payload":{"description":"Create/manage qmd collections and run indexing for both agents. Requirements:n- Collections: claude-sessions -u003e ~/.claude-team/index/claude, codex-sessions -u003e ~/.claude-team/index/codex.n- Bootstrap: create collections if missing, then run qmd update + qmd embed for both.n- Ongoing: after each export run, call qmd update and qmd embed for the affected collections.n- Log errors but keep server running (no fatal exit on qmd failure).n- Keep commands deterministic; no extra caching or retries beyond scheduled runs.","priority":"1","title":"qmd collections + indexing pipeline","type":"task"}}
|
|
65
|
+
{"type":"create","timestamp":"2026-01-23T21:49:12.615227Z","issue_id":"cic-aad","payload":{"description":"Add a background scheduler for indexing runs when HTTP mode + CLAUDE_TEAM_QMD_INDEXING=true. Requirements:n- Run initial full sync on startup (non-blocking to HTTP requests).n- Then schedule periodic refreshes in a background worker.n- Interval from CLAUDE_TEAM_INDEX_CRON using simple interval strings (e.g., 15m, 1h, 6h); default hourly.n- Ensure runs do not overlap; if a run is in progress, skip the next trigger.n- No retries or delays other than the configured interval.","priority":"2","title":"Background indexing scheduler (interval parsing)","type":"task"}}
|
|
66
|
+
{"type":"create","timestamp":"2026-01-23T21:49:22.368982Z","issue_id":"cic-e08","payload":{"description":"Implement smart_fork MCP tool. Requirements:n- Input: intent, agent_type (claude|codex, default claude), limit, min_score, include_snippets, auto_fork, fork_index, project_path (optional), collection override (optional).n- Collection selection: claude -u003e claude-sessions, codex -u003e codex-sessions unless explicit collection provided. No cross-agent forking.n- Search order: qmd query -u003e vsearch -u003e search (fallback on error). Include qmd_error + fallback_used in response.n- Parse Markdown headers: Session ID, Working Directory, Date, Agent (claude|codex). Use Agent header for type.n- Map to JSONL path: Claude via session_state.get_project_dir, Codex via ~/.codex/sessions/**/u003csession-idu003e.jsonl.n- Enforce current-repo-only: if project_path not provided, default to HTTP server project root; exclude sessions whose working_directory is not inside project_path.n- On missing qmd/collections: log error, return guidance payload, and include a fallback list of recent sessions (by agent + repo) from JSONL.n- Optional fork: if auto_fork or fork_index provided, spawn worker using resume/fork semantics for the chosen agent.n- Output: ranked list with score, snippet, agent_type, working_directory, date, source_path, jsonl_path, and optional forked_session info.","priority":"1","title":"smart_fork MCP tool (qmd search + fork)","type":"task"}}
|
|
67
|
+
{"type":"create","timestamp":"2026-01-23T21:49:29.962881Z","issue_id":"cic-a3d","payload":{"description":"Extend spawn_workers/WorkerConfig to support resume and fork for both agent types. Requirements:n- WorkerConfig fields: resume_session_id, fork_session (bool), continue_session (bool), agent_type (claude|codex).n- Claude: build args with --resume u003cidu003e and --fork-session; allow --continue only when explicitly requested.n- Codex: invoke codex fork u003cidu003e for fork_session; codex resume u003cidu003e for explicit resume.n- Ensure no cross-agent mismatches (codex sessions only used with codex agent).n- Integrate with existing CLI backend abstractions (claude.py, codex.py).n- Keep behavior additive and isolated; no changes to default spawn behavior unless fields are set.","priority":"1","title":"spawn_workers resume/fork support for Claude + Codex","type":"task"}}
|
|
68
|
+
{"type":"create","timestamp":"2026-01-23T21:49:35.81704Z","issue_id":"cic-297","payload":{"description":"Provide an installer script that generates and loads a launchd plist for running claude-team in persistent HTTP mode with indexing enabled. Requirements:n- Script should generate a plist under ~/Library/LaunchAgents and load/unload it (reversible).n- Plist sets CLAUDE_TEAM_QMD_INDEXING=true and CLAUDE_TEAM_INDEX_CRON interval.n- Provide a documented example/default config (HTTP port, working directory, env vars).n- Keep as a small utility in repo (scripts/ or docs/), no external dependencies.","priority":"2","title":"launchd installer for claude-team HTTP mode","type":"task"}}
|
|
69
|
+
{"type":"create","timestamp":"2026-01-23T21:49:43.65652Z","issue_id":"cic-232","payload":{"description":"Add basic tests and logging around smart_fork and indexing. Requirements:n- Unit tests for interval parsing and repo filtering logic.n- Happy-path tests for qmd command invocation with mocks (query/vsearch/search fallbacks).n- Logging: indexing start/finish, qmd command failures, smart_fork fallback usage.n- Avoid snapshotting large outputs; keep tests small and deterministic.","priority":"2","title":"Smart fork/indexing tests + logging","type":"task"}}
|
|
70
|
+
{"type":"dep_add","timestamp":"2026-01-23T21:49:48.779034Z","issue_id":"cic-aef","payload":{"dep_type":"blocks","depends_on":"cic-8b0"}}
|
|
71
|
+
{"type":"dep_add","timestamp":"2026-01-23T21:49:53.452251Z","issue_id":"cic-aef","payload":{"dep_type":"blocks","depends_on":"cic-641"}}
|
|
72
|
+
{"type":"dep_add","timestamp":"2026-01-23T21:49:58.444834Z","issue_id":"cic-aef","payload":{"dep_type":"blocks","depends_on":"cic-37d"}}
|
|
73
|
+
{"type":"dep_add","timestamp":"2026-01-23T21:50:02.815238Z","issue_id":"cic-aef","payload":{"dep_type":"blocks","depends_on":"cic-7d9"}}
|
|
74
|
+
{"type":"dep_add","timestamp":"2026-01-23T21:50:06.995874Z","issue_id":"cic-aef","payload":{"dep_type":"blocks","depends_on":"cic-aad"}}
|
|
75
|
+
{"type":"dep_add","timestamp":"2026-01-23T21:50:14.205421Z","issue_id":"cic-aef","payload":{"dep_type":"blocks","depends_on":"cic-e08"}}
|
|
76
|
+
{"type":"dep_add","timestamp":"2026-01-23T21:50:18.306135Z","issue_id":"cic-aef","payload":{"dep_type":"blocks","depends_on":"cic-a3d"}}
|
|
77
|
+
{"type":"dep_add","timestamp":"2026-01-23T21:50:23.737545Z","issue_id":"cic-aef","payload":{"dep_type":"blocks","depends_on":"cic-297"}}
|
|
78
|
+
{"type":"dep_add","timestamp":"2026-01-23T21:50:29.516671Z","issue_id":"cic-aef","payload":{"dep_type":"blocks","depends_on":"cic-232"}}
|
|
79
|
+
{"type":"dep_add","timestamp":"2026-01-23T21:51:51.425922Z","issue_id":"cic-641","payload":{"dep_type":"blocks","depends_on":"cic-8b0"}}
|
|
80
|
+
{"type":"dep_add","timestamp":"2026-01-23T21:51:56.219512Z","issue_id":"cic-37d","payload":{"dep_type":"blocks","depends_on":"cic-8b0"}}
|
|
81
|
+
{"type":"dep_add","timestamp":"2026-01-23T21:52:01.594869Z","issue_id":"cic-7d9","payload":{"dep_type":"blocks","depends_on":"cic-641"}}
|
|
82
|
+
{"type":"dep_add","timestamp":"2026-01-23T21:52:06.110635Z","issue_id":"cic-7d9","payload":{"dep_type":"blocks","depends_on":"cic-37d"}}
|
|
83
|
+
{"type":"dep_add","timestamp":"2026-01-23T21:52:10.987387Z","issue_id":"cic-7d9","payload":{"dep_type":"blocks","depends_on":"cic-8b0"}}
|
|
84
|
+
{"type":"dep_add","timestamp":"2026-01-23T21:52:16.128444Z","issue_id":"cic-aad","payload":{"dep_type":"blocks","depends_on":"cic-7d9"}}
|
|
85
|
+
{"type":"dep_add","timestamp":"2026-01-23T21:52:22.763806Z","issue_id":"cic-aad","payload":{"dep_type":"blocks","depends_on":"cic-8b0"}}
|
|
86
|
+
{"type":"dep_add","timestamp":"2026-01-23T21:52:27.010062Z","issue_id":"cic-e08","payload":{"dep_type":"blocks","depends_on":"cic-7d9"}}
|
|
87
|
+
{"type":"dep_add","timestamp":"2026-01-23T21:52:33.103817Z","issue_id":"cic-e08","payload":{"dep_type":"blocks","depends_on":"cic-a3d"}}
|
|
88
|
+
{"type":"dep_add","timestamp":"2026-01-23T21:52:38.426629Z","issue_id":"cic-e08","payload":{"dep_type":"blocks","depends_on":"cic-8b0"}}
|
|
89
|
+
{"type":"dep_add","timestamp":"2026-01-23T21:52:43.505843Z","issue_id":"cic-297","payload":{"dep_type":"blocks","depends_on":"cic-aad"}}
|
|
90
|
+
{"type":"dep_add","timestamp":"2026-01-23T21:52:48.942468Z","issue_id":"cic-297","payload":{"dep_type":"blocks","depends_on":"cic-8b0"}}
|
|
91
|
+
{"type":"dep_add","timestamp":"2026-01-23T21:52:53.363731Z","issue_id":"cic-232","payload":{"dep_type":"blocks","depends_on":"cic-e08"}}
|
|
92
|
+
{"type":"dep_add","timestamp":"2026-01-23T21:52:58.109017Z","issue_id":"cic-232","payload":{"dep_type":"blocks","depends_on":"cic-aad"}}
|
|
93
|
+
{"type":"dep_add","timestamp":"2026-01-23T21:53:04.058789Z","issue_id":"cic-232","payload":{"dep_type":"blocks","depends_on":"cic-a3d"}}
|
|
94
|
+
{"type":"status_update","timestamp":"2026-01-23T22:09:37.8467Z","issue_id":"cic-a3d","payload":{"status":"in_progress"}}
|
|
95
|
+
{"type":"comment","timestamp":"2026-01-23T22:09:42.675593Z","issue_id":"cic-a3d","payload":{"body":"Completed via cherry-pick commit 91f8fc5 (from worker branch). Added WorkerConfig resume/fork/continue, AgentCLI updates, Claude/Codex CLI integration, validation, and tests (16 new)."}}
|
|
96
|
+
{"type":"close","timestamp":"2026-01-23T22:09:51.124417Z","issue_id":"cic-a3d","payload":{}}
|
|
97
|
+
{"type":"status_update","timestamp":"2026-01-23T22:15:27.837697Z","issue_id":"cic-8b0","payload":{"status":"in_progress"}}
|
|
98
|
+
{"type":"close","timestamp":"2026-01-24T01:07:36.021417Z","issue_id":"cic-37d","payload":{}}
|
|
99
|
+
{"type":"close","timestamp":"2026-01-24T01:30:31.449028Z","issue_id":"cic-641","payload":{}}
|
|
100
|
+
{"type":"close","timestamp":"2026-01-24T01:30:54.832995Z","issue_id":"cic-8b0","payload":{}}
|
|
101
|
+
{"type":"close","timestamp":"2026-01-24T01:45:26.376769Z","issue_id":"cic-7d9","payload":{}}
|
|
102
|
+
{"type":"close","timestamp":"2026-01-24T01:55:54.435888Z","issue_id":"cic-aad","payload":{}}
|
|
103
|
+
{"type":"close","timestamp":"2026-01-24T02:30:45.856936Z","issue_id":"cic-232","payload":{}}
|
|
104
|
+
{"type":"close","timestamp":"2026-01-24T02:45:38.80895Z","issue_id":"cic-297","payload":{}}
|
|
105
|
+
{"type":"create","timestamp":"2026-01-24T19:50:18.476801Z","issue_id":"cic-059","payload":{"description":"When spawning workers with fork_from in a worktree, Claude Code can't find the session because it looks in the worktree's project folder instead of the original project folder.nn**Fix:** Before launching Claude with --resume --fork-session, copy the source session file to the worktree's project folder.nn**Implementation:**n1. In spawn_workers, detect fork_from parametern2. Compute source project folder (from main repo path) n3. Compute target project folder (from worktree path)n4. Copy {session_id}.jsonl from source to targetn5. Then launch Claude as normalnn~10 lines of Python.","priority":"2","title":"Fix fork_from session lookup for worktrees","type":"bug"}}
|
|
106
|
+
{"type":"status_update","timestamp":"2026-01-24T19:52:25.775255Z","issue_id":"cic-059","payload":{"status":"closed"}}
|
|
107
|
+
{"type":"create","timestamp":"2026-01-27T20:42:49.042737Z","issue_id":"cic-023","payload":{"description":"Add project_filter parameter to list_workers MCP tool to filter by project path/basename/partial match; update team-status.sh --project.","priority":"2","title":"Add project filter to list_workers","type":"task"}}
|
|
108
|
+
{"type":"rename","timestamp":"2026-01-27T20:42:51.792118Z","issue_id":"cic-023","payload":{"new_id":"cic-b3c"}}
|
|
109
|
+
{"type":"status_update","timestamp":"2026-01-27T20:42:53.464671Z","issue_id":"cic-b3c","payload":{"status":"in_progress"}}
|
|
110
|
+
{"type":"close","timestamp":"2026-01-27T20:47:28.975031Z","issue_id":"cic-b3c","payload":{}}
|
|
111
|
+
{"type":"create","timestamp":"2026-01-27T21:48:30.250806Z","issue_id":"cic-e18","payload":{"description":"Add background asyncio task to HTTP server that periodically snapshots worker state.nn## Locationnsrc/claude_team/poller.py (new file)nIntegration in src/claude_team/mcp_server.pynn## Requirementsn- WorkerPoller class with start/stop methodsn- Snapshot workers every 60sn- Diff snapshots to detect state transitions (started/idle/active/closed)n- Write events to event log (use events.py module)n- Emit full snapshot every 5 minutes for recoverynn## Integrationn- Start poller on HTTP server startupn- Stop gracefully on shutdownnn## DependenciesnUses cic-d0d (events.py) and cic-116 (idle_detection.py)","priority":"1","title":"Background poller for worker state snapshots","type":"task"}}
|
|
112
|
+
{"type":"create","timestamp":"2026-01-27T21:48:38.868204Z","issue_id":"cic-467","payload":{"description":"Add new MCP tool to query worker state changes since last poll.nn## Locationnsrc/claude_team/mcp_server.py (add to existing tools)nn## Tool Signaturenpoll_worker_changes(n since: str | None = None,n stale_threshold_minutes: int = 20,n include_snapshots: bool = False,n) -u003e dictnn## Returnsn{n 'events': [...], # Raw events if requestedn 'summary': {n 'completed': [{'name': '...', 'bead': '...', 'duration_min': N}],n 'stuck': [{'name': '...', 'inactive_minutes': N}],n 'started': [{'name': '...', 'project': '...'}],n },n 'active_count': N,n 'idle_count': N,n 'poll_ts': '2026-01-27T...'n}nn## Implementationn1. Read events from log since timestampn2. Build summary of completed/stuck/started workersn3. Return current countsnn## DependenciesnUses events.py module and poller for state","priority":"1","title":"poll_worker_changes MCP tool","type":"task"}}
|
|
113
|
+
{"type":"status_update","timestamp":"2026-01-27T21:49:05.277437Z","issue_id":"cic-467","payload":{"status":"in_progress"}}
|
|
114
|
+
{"type":"close","timestamp":"2026-01-27T21:57:54.35541Z","issue_id":"cic-467","payload":{}}
|
|
115
|
+
{"type":"create","timestamp":"2026-01-27T22:25:57.608217Z","issue_id":"cic-c1b","payload":{"description":"The background poller emits worker_started on every snapshot cycle instead of just once when a worker first appears.nn## Observedn- Yanni spawned oncen- 5x worker_started events in event logn- poll_worker_changes summary shows Yanni 5x in 'started'nn## Expectedn- Single worker_started when worker first appearsn- Subsequent snapshots should NOT emit worker_started for same workernn## Root CausenThe diff logic in poller.py isn't correctly tracking 'already seen' workers between snapshots. The previous_snapshot comparison may not be persisting correctly.nn## Relatedncic-9da (worker state tracking epic)","priority":"1","title":"Poller emits duplicate worker_started events","type":"bug"}}
|
|
116
|
+
{"type":"update","timestamp":"2026-01-27T22:29:47.773131Z","issue_id":"cic-c1b","payload":{"description":"The background poller's diff logic is fundamentally broken. Multiple issues:nn## Bugsnn### 1. Duplicate worker_started eventsnEvery snapshot cycle emits worker_started for ALL workers, not just new ones.nn### 2. No worker_idle events nWhen a worker transitions to is_idle=true, no worker_idle event is emitted.nn### 3. Wrong event typenWorkers with is_idle=true still get worker_started events instead of worker_idle.nn### 4. No worker_active eventsnWhen a worker transitions from idle back to active, no event is emitted.nn## Root CausenThe poller isn't properly tracking previous state to detect transitions. It appears to emit worker_started for every worker present in each snapshot.nn## Expected Behaviorn1. Track previous snapshot's worker IDs and is_idle statesn2. New worker (not in previous) → worker_startedn3. is_idle: false → true → worker_idlen4. is_idle: true → false → worker_active n5. Worker removed from registry → worker_closedn6. Existing workers with same state → NO eventnn## Filesn- src/claude_team/poller.py - _diff_snapshots methodnn## Test Casen```bashn# Spawn workernmcporter call claude-team-http.spawn_workers workers='[{...}]'n# Wait 10s, check eventsntail -10 ~/.claude-team/events.jsonln# Should see ONE worker_started, not multiplen```"}}
|
|
117
|
+
{"type":"status_update","timestamp":"2026-01-27T22:30:21.430249Z","issue_id":"cic-c1b","payload":{"status":"in_progress"}}
|
|
118
|
+
{"type":"close","timestamp":"2026-01-27T22:33:26.969394Z","issue_id":"cic-c1b","payload":{}}
|
|
119
|
+
{"type":"status_update","timestamp":"2026-01-27T23:31:42.153326Z","issue_id":"cic-c1b","payload":{"status":"in_progress"}}
|
|
120
|
+
{"type":"comment","timestamp":"2026-01-27T23:32:24.386198Z","issue_id":"cic-c1b","payload":{"body":"## Root Cause Analysisnn**Problem:** Duplicate worker_started events (3x for Adele)nn**Root cause:** StreamableHTTP transport creates **per-session lifespan contexts**. Each HTTP request (mcporter call) enters app_lifespan(), which creates a NEW WorkerPoller with empty _last_snapshot. When multiple HTTP sessions are active simultaneously, each poller sees workers as 'new' and emits worker_started.nnEvidence from debug logs:n- 'Connected to iTerm2 successfully' and 'Created global singleton registry' appear at multiple timestampsn- 3 duplicate worker_started events at 23:01:48Z (x2) and 23:01:51Z (x1)n- Timestamps align with spawn_workers call creating multiple HTTP sessionsnn**Fix:** Make WorkerPoller a module-level singleton (like _global_registry):n1. Add _global_poller: WorkerPoller | None = None at module leveln2. Add get_global_poller(registry) that creates once and returns existingn3. In app_lifespan, use get_global_poller() instead of WorkerPoller()n4. Guard poller.start() to be idempotentnnThe _last_snapshot must persist across HTTP sessions, not reset per-session."}}
|
|
121
|
+
{"type":"close","timestamp":"2026-01-27T23:36:10.476116Z","issue_id":"cic-c1b","payload":{}}
|
|
122
|
+
{"type":"status_update","timestamp":"2026-01-28T00:19:38.133642Z","issue_id":"cic-c1b","payload":{"status":"in_progress"}}
|
|
123
|
+
{"type":"close","timestamp":"2026-01-28T00:19:42.014781Z","issue_id":"cic-c1b","payload":{}}
|
|
124
|
+
{"type":"create","timestamp":"2026-01-28T00:37:49.638471Z","issue_id":"cic-f80","payload":{"description":"Change the poller's _build_snapshot() to use file mtime for idle detection instead of Stop hook detection.nn## Current BehaviornIn src/claude_team/poller.py, _build_snapshot() calls session.is_idle() which uses Stop hook detection. This requires Claude Code's Stop hook to be configured, which workers may not have.nn## Required ChangenReplace:n```pythonnis_idle = session.is_idle()n```nnWith:n```pythonnfrom claude_team.idle_detection import detect_worker_idlenis_idle, _reason = detect_worker_idle(session, idle_threshold_seconds=300)n```nn## Notesn- detect_worker_idle() already exists in src/claude_team/idle_detection.pyn- It uses file mtime (JSONL for Claude, output file for Codex)n- The Worker protocol may need adaptation - check that session object has required attributes (agent_type, project_path, claude_session_id)n- Default threshold is 300 seconds (5 min)nn## Testn1. Spawn a workern2. Wait 5+ minutes with no activityn3. Check events.jsonl for worker_idle event","priority":"1","title":"Use file mtime for poller idle detection instead of Stop hooks","type":"task"}}
|
|
125
|
+
{"type":"status_update","timestamp":"2026-01-28T00:38:19.581186Z","issue_id":"cic-f80","payload":{"status":"in_progress"}}
|
|
126
|
+
{"type":"close","timestamp":"2026-01-28T00:40:01.15665Z","issue_id":"cic-f80","payload":{}}
|
|
127
|
+
{"type":"create","timestamp":"2026-01-28T01:12:32.699494Z","issue_id":"cic-b16","payload":{"description":"The events.jsonl file currently grows unbounded. Implement a log rotation policy.nn## Requirementsn- Daily rotation (roll over at midnight or on first write of new day)n- Backup old log with timestamp (e.g., events.2026-01-27.jsonl)n- New log should preserve events for:n - Currently active workersn - Most recently active workers (configurable threshold, e.g., last 24h)n- Consider max file size trigger as secondary rotation conditionnn## Implementation Ideasn- Check date on each write, rotate if day changedn- On rotation: read old file, filter to recent/active workers, write to new filen- Move rest to timestamped backupn- Could also implement max_events or max_size_mb thresholdsnn## Filesn- src/claude_team/events.py - append_events, rotate_events_log","priority":"2","title":"Event log rotation policy","type":"task"}}
|
|
128
|
+
{"type":"close","timestamp":"2026-01-28T03:02:06.65654Z","issue_id":"cic-e18","payload":{}}
|
|
129
|
+
{"type":"status_update","timestamp":"2026-01-28T04:25:46.953814Z","issue_id":"cic-b16","payload":{"status":"in_progress"}}
|
|
130
|
+
{"type":"close","timestamp":"2026-01-28T04:34:55.6503Z","issue_id":"cic-b16","payload":{}}
|
|
131
|
+
{"type":"create","timestamp":"2026-01-28T03:36:07.918938Z","issue_id":"cic-745","payload":{"description":"Introduce ~/.claude-team/config.json to replace growing number of env vars. Provides centralized configuration with versioning, auto-creation of defaults, and backwards-compatible env var overrides.","priority":"2","title":"System-wide config file (~/.claude-team/config.json)","type":"epic"}}
|
|
132
|
+
{"type":"create","timestamp":"2026-01-28T03:36:14.000096Z","issue_id":"cic-f41","payload":{"description":"Define dataclasses for config schema (ClaudeTeamConfig, CommandsConfig, DefaultsConfig, IssueTrackerConfig). Implement load_config() with JSON validation, handle missing file by creating defaults, implement version field for future migrations. Location: ~/.claude-team/config.json","priority":"2","title":"Create config module (src/claude_team_mcp/config.py)","type":"task"}}
|
|
133
|
+
{"type":"create","timestamp":"2026-01-28T03:36:26.885274Z","issue_id":"cic-cc8","payload":{"description":"Update cli_backends/claude.py and cli_backends/codex.py to use config module. Add get_claude_command() and get_codex_command() helpers that check env var first (for override), then config file, then built-in default. Maintains backwards compatibility.","priority":"2","title":"Update CLI backends to use config","type":"task"}}
|
|
134
|
+
{"type":"create","timestamp":"2026-01-28T03:36:28.068295Z","issue_id":"cic-1c1","payload":{"description":"Update tools/spawn_workers.py to read defaults from config: use_worktrees, agent_type, skip_permissions, layout. WorkerConfig values override config defaults. Explicit None in WorkerConfig means use config default.","priority":"2","title":"Update spawn_workers to use config defaults","type":"task"}}
|
|
135
|
+
{"type":"create","timestamp":"2026-01-28T03:36:29.305099Z","issue_id":"cic-8df","payload":{"description":"Update issue_tracker/__init__.py detect_issue_tracker() to check: 1) CLAUDE_TEAM_ISSUE_TRACKER env var, 2) config.issue_tracker.override, 3) marker directory detection. Implements the previously-documented but never-implemented env var.","priority":"2","title":"Implement issue_tracker.override in config","type":"task"}}
|
|
136
|
+
{"type":"create","timestamp":"2026-01-28T03:36:30.104901Z","issue_id":"cic-0a5","payload":{"description":"Add tests/test_config.py covering: config loading/saving, env var override precedence, auto-creation of default config when missing, JSON validation errors, version migration (future-proofing).","priority":"2","title":"Add tests for config module","type":"task"}}
|
|
137
|
+
{"type":"create","timestamp":"2026-01-28T03:36:30.872849Z","issue_id":"cic-f0a","payload":{"description":"Update README.md to document ~/.claude-team/config.json. Add example config. Document precedence (env var u003e config u003e default). Update Environment Variables section to note config file alternative.","priority":"3","title":"Update documentation for config file","type":"task"}}
|
|
138
|
+
{"type":"rename","timestamp":"2026-01-28T03:37:03.563993Z","issue_id":"cic-f41","payload":{"new_id":"cic-745.1"}}
|
|
139
|
+
{"type":"dep_add","timestamp":"2026-01-28T03:37:03.563999Z","issue_id":"cic-745.1","payload":{"dep_type":"parent-child","depends_on":"cic-745"}}
|
|
140
|
+
{"type":"rename","timestamp":"2026-01-28T03:37:03.614405Z","issue_id":"cic-cc8","payload":{"new_id":"cic-745.2"}}
|
|
141
|
+
{"type":"dep_add","timestamp":"2026-01-28T03:37:03.614411Z","issue_id":"cic-745.2","payload":{"dep_type":"parent-child","depends_on":"cic-745"}}
|
|
142
|
+
{"type":"rename","timestamp":"2026-01-28T03:37:03.671282Z","issue_id":"cic-1c1","payload":{"new_id":"cic-745.3"}}
|
|
143
|
+
{"type":"dep_add","timestamp":"2026-01-28T03:37:03.671289Z","issue_id":"cic-745.3","payload":{"dep_type":"parent-child","depends_on":"cic-745"}}
|
|
144
|
+
{"type":"rename","timestamp":"2026-01-28T03:37:03.728328Z","issue_id":"cic-8df","payload":{"new_id":"cic-745.4"}}
|
|
145
|
+
{"type":"dep_add","timestamp":"2026-01-28T03:37:03.728334Z","issue_id":"cic-745.4","payload":{"dep_type":"parent-child","depends_on":"cic-745"}}
|
|
146
|
+
{"type":"rename","timestamp":"2026-01-28T03:37:03.790408Z","issue_id":"cic-0a5","payload":{"new_id":"cic-745.5"}}
|
|
147
|
+
{"type":"dep_add","timestamp":"2026-01-28T03:37:03.790422Z","issue_id":"cic-745.5","payload":{"dep_type":"parent-child","depends_on":"cic-745"}}
|
|
148
|
+
{"type":"rename","timestamp":"2026-01-28T03:37:03.854379Z","issue_id":"cic-f0a","payload":{"new_id":"cic-745.6"}}
|
|
149
|
+
{"type":"dep_add","timestamp":"2026-01-28T03:37:03.854384Z","issue_id":"cic-745.6","payload":{"dep_type":"parent-child","depends_on":"cic-745"}}
|
|
150
|
+
{"type":"dep_add","timestamp":"2026-01-28T03:37:12.074548Z","issue_id":"cic-745.2","payload":{"dep_type":"blocks","depends_on":"cic-745.1"}}
|
|
151
|
+
{"type":"dep_add","timestamp":"2026-01-28T03:37:12.132494Z","issue_id":"cic-745.3","payload":{"dep_type":"blocks","depends_on":"cic-745.1"}}
|
|
152
|
+
{"type":"dep_add","timestamp":"2026-01-28T03:37:12.196907Z","issue_id":"cic-745.4","payload":{"dep_type":"blocks","depends_on":"cic-745.1"}}
|
|
153
|
+
{"type":"dep_add","timestamp":"2026-01-28T03:37:12.257668Z","issue_id":"cic-745.5","payload":{"dep_type":"blocks","depends_on":"cic-745.1"}}
|
|
154
|
+
{"type":"dep_add","timestamp":"2026-01-28T03:37:12.33493Z","issue_id":"cic-745.6","payload":{"dep_type":"blocks","depends_on":"cic-745.2"}}
|
|
155
|
+
{"type":"dep_add","timestamp":"2026-01-28T03:37:12.402761Z","issue_id":"cic-745.6","payload":{"dep_type":"blocks","depends_on":"cic-745.3"}}
|
|
156
|
+
{"type":"dep_add","timestamp":"2026-01-28T03:37:12.464015Z","issue_id":"cic-745.6","payload":{"dep_type":"blocks","depends_on":"cic-745.4"}}
|
|
157
|
+
{"type":"dep_add","timestamp":"2026-01-28T03:37:12.524492Z","issue_id":"cic-745.6","payload":{"dep_type":"blocks","depends_on":"cic-745.5"}}
|
|
158
|
+
{"type":"create","timestamp":"2026-01-28T05:13:26.217794Z","issue_id":"cic-e1b","payload":{"description":"Migrate CLAUDE_TEAM_EVENTS_MAX_SIZE_MB and CLAUDE_TEAM_EVENTS_RECENT_HOURS to config.json","priority":"2","title":"Add event rotation settings to config","type":"task"}}
|
|
159
|
+
{"type":"dep_add","timestamp":"2026-01-28T05:13:29.196306Z","issue_id":"cic-745","payload":{"dep_type":"blocks","depends_on":"cic-e1b"}}
|
|
160
|
+
{"type":"create","timestamp":"2026-01-28T05:54:23.580297Z","issue_id":"cic-5f3","payload":{"description":"_FakeSession fixture missing agent_type attribute required by detect_worker_idle() after Codex support was added. 4 tests failing.","priority":"1","title":"Fix test_poller.py - FakeSession missing agent_type","type":"bug"}}
|
|
161
|
+
{"type":"status_update","timestamp":"2026-01-28T05:54:51.734078Z","issue_id":"cic-5f3","payload":{"status":"in_progress"}}
|
|
162
|
+
{"type":"close","timestamp":"2026-01-28T05:55:55.408423Z","issue_id":"cic-5f3","payload":{}}
|
|
163
|
+
{"type":"create","timestamp":"2026-01-28T15:24:30.6653Z","issue_id":"cic-d23","payload":{"description":"","priority":"2","title":"Codex idle detection oscillates after work completion","type":"bug"}}
|
|
164
|
+
{"type":"comment","timestamp":"2026-01-28T15:24:37.319042Z","issue_id":"cic-d23","payload":{"body":"Observed with Nefertiti (session 0adba9d2) on 2026-01-28:\n- 15:01 UTC → idle (correctly detected completion, committed at 07:00:43 PST)\n- 15:11 UTC → active (10 min after work done, spurious)\n- 15:17 UTC → idle\n\nExpected: once idle after completion, should stay idle unless new input.\n\nLikely cause: Codex idle detection uses file mtime or process signals that fluctuate after work complete."}}
|
|
165
|
+
{"type":"create","timestamp":"2026-01-28T20:01:18.24539Z","issue_id":"cic-b5d","payload":{"description":"The worktree config object (branch, base) passed to spawn_workers is silently ignored. Worktrees always create from HEAD regardless of specified base branch.\n\n**Expected:** `worktree: {branch: 'feature-x', base: 'develop'}` creates worktree based on develop\n**Actual:** Worktree is created from HEAD/main\n\n**Root cause:**\n1. spawn_workers only reads `use_worktree` boolean, ignores full config object\n2. create_local_worktree() doesn't accept branch/base parameters\n3. Uses auto-generated branch names from bead_id/annotation\n\n**Fix needed:**\n1. Parse worktree config dict in spawn_workers (branch, base fields)\n2. Add base parameter to create_local_worktree()\n3. Use `git worktree add -b \u003cbranch\u003e \u003cpath\u003e \u003cbase\u003e` when base specified","priority":"1","title":"worktree.base parameter is silently ignored in spawn_workers","type":"bug"}}
|
|
166
|
+
{"type":"create","timestamp":"2026-01-28T20:03:14.928016Z","issue_id":"cic-774","payload":{"description":"spawn_workers intermittently fails with 'list index out of range' when there are existing managed sessions. Happens on both iTerm and tmux backends.\n\n**Symptoms:**\n- Worktree gets created successfully\n- Logs show 'Batch of 1 fits in existing window (X panes, Y slots)'\n- Then fails with 'list index out of range'\n\n**Workaround:** Close existing workers first, then spawn works\n\n**Likely cause:** Bug in find_available_window or split_pane logic when calculating available slots with existing sessions.\n\n**To reproduce:**\n1. Spawn a worker, let it idle\n2. Try to spawn another worker\n3. May fail with list index out of range","priority":"1","title":"spawn_workers fails with 'list index out of range' when existing sessions present","type":"bug"}}
|
|
167
|
+
{"type":"status_update","timestamp":"2026-01-28T20:05:56.214096Z","issue_id":"cic-b5d","payload":{"status":"in_progress"}}
|
|
168
|
+
{"type":"close","timestamp":"2026-01-28T20:09:42.814136Z","issue_id":"cic-b5d","payload":{}}
|
|
169
|
+
{"type":"status_update","timestamp":"2026-01-28T20:05:59.985932Z","issue_id":"cic-774","payload":{"status":"in_progress"}}
|
|
170
|
+
{"type":"close","timestamp":"2026-01-28T20:07:53.546775Z","issue_id":"cic-774","payload":{}}
|
|
171
|
+
{"type":"create","timestamp":"2026-01-28T21:12:30.144634Z","issue_id":"cic-c0b","payload":{"description":"close_workers sends /exit for all agent types, but this is Claude-specific. Codex uses Ctrl+C to quit (possibly double-press). Check session.agent_type before sending quit command.","priority":"1","title":"close_workers sends /exit for Codex workers - should use Ctrl+C","type":"bug"}}
|
|
172
|
+
{"type":"close","timestamp":"2026-01-28T21:25:28.072194Z","issue_id":"cic-c0b","payload":{}}
|
|
173
|
+
{"type":"status_update","timestamp":"2026-01-28T21:31:46.873894Z","issue_id":"cic-c0b","payload":{"status":"in_progress"}}
|
|
174
|
+
{"type":"create","timestamp":"2026-01-28T23:30:36.114182Z","issue_id":"cic-2d8","payload":{"description":"Update tmux window naming to '\u003cname\u003e | \u003cproject\u003e [\u003cissue-id\u003e]'. Extract project basename, strip .worktrees prefix, include issue ID when present in annotation/metadata. Modify tmux backend naming logic.","priority":"2","title":"Improve tmux window naming format","type":"task"}}
|
|
175
|
+
{"type":"status_update","timestamp":"2026-01-28T23:30:37.913775Z","issue_id":"cic-2d8","payload":{"status":"in_progress"}}
|
|
176
|
+
{"type":"create","timestamp":"2026-01-28T23:35:11.079637Z","issue_id":"cic-ba6","payload":{"description":"When worktree config is explicitly provided and creation fails, spawn_workers should return an error instead of silently falling back. If no explicit config, still fallback but add warning in response.","priority":"1","title":"Fix silent worktree failure in spawn_workers","type":"bug"}}
|
|
177
|
+
{"type":"status_update","timestamp":"2026-01-28T23:35:13.299992Z","issue_id":"cic-ba6","payload":{"status":"in_progress"}}
|
|
178
|
+
{"type":"comment","timestamp":"2026-01-28T23:37:10.603849Z","issue_id":"cic-ba6","payload":{"body":"Implemented hard failure when explicit worktree config fails; added response warnings when fallback to repo."}}
|
|
179
|
+
{"type":"close","timestamp":"2026-01-28T23:37:13.690021Z","issue_id":"cic-ba6","payload":{}}
|
|
180
|
+
{"type":"comment","timestamp":"2026-01-28T23:37:28.697959Z","issue_id":"cic-2d8","payload":{"body":"Completed tmux window naming update: format now '\u003cname\u003e | \u003cproject\u003e [issue]'; project name derived from project_path with .worktrees handling; issue id pulled from bead or annotation. Updated tmux backend + spawn logic and added tests."}}
|
|
181
|
+
{"type":"close","timestamp":"2026-01-28T23:37:30.840687Z","issue_id":"cic-2d8","payload":{}}
|
|
182
|
+
{"type":"create","timestamp":"2026-01-30T17:41:16.111959Z","issue_id":"cic-345","payload":{"description":"Currently the tmux backend uses a single global 'claude-team' session for all workers. Instead, create a separate tmux session per project path. This makes it easier to monitor workers by project when using tmux locally.\n\nScope: investigate current session naming, identify all touch points, propose naming scheme, assess impact on discovery/adoption/cleanup.","priority":"2","title":"Per-project tmux sessions instead of global claude-team session","type":"task"}}
|
|
183
|
+
{"type":"status_update","timestamp":"2026-01-30T17:45:04.28959Z","issue_id":"cic-345","payload":{"status":"in_progress"}}
|
|
184
|
+
{"type":"close","timestamp":"2026-01-30T17:52:20.629185Z","issue_id":"cic-345","payload":{}}
|
|
Binary file
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
CLAUDE.md
|
|
@@ -7,6 +7,46 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
|
|
|
7
7
|
|
|
8
8
|
## [Unreleased]
|
|
9
9
|
|
|
10
|
+
## [0.8.0] - 2026-01-30
|
|
11
|
+
|
|
12
|
+
### Added
|
|
13
|
+
- **System-wide config file** (`~/.claude-team/config.json`): Centralized configuration replacing environment variables
|
|
14
|
+
- Typed dataclasses with JSON validation
|
|
15
|
+
- Version field for future migrations
|
|
16
|
+
- Precedence: env var → config file → built-in default
|
|
17
|
+
- **Config CLI**: `claude-team config init|show|get|set` commands
|
|
18
|
+
- **Per-project tmux sessions**: Each project gets its own tmux session (`claude-team-<slug>-<hash>`) instead of a single shared session
|
|
19
|
+
- Easier local monitoring — `tmux ls` shows projects separately
|
|
20
|
+
- Discovery scans all tmux panes and filters by managed prefix
|
|
21
|
+
|
|
22
|
+
### Fixed
|
|
23
|
+
- Worktree branch/directory names capped at 30 chars to avoid filesystem limits
|
|
24
|
+
- Test isolation from user config file (tests no longer affected by `~/.claude-team/config.json`)
|
|
25
|
+
|
|
26
|
+
### Changed
|
|
27
|
+
- Tmux `list_sessions` and discovery now scan all sessions with prefix filter instead of targeting a single session
|
|
28
|
+
|
|
29
|
+
## [0.7.0] - 2026-01-29
|
|
30
|
+
|
|
31
|
+
### Added
|
|
32
|
+
- **Tmux terminal backend**: Run workers in tmux sessions instead of iTerm2
|
|
33
|
+
- Terminal backend abstraction layer (`TerminalBackend` protocol)
|
|
34
|
+
- Backend auto-detection: uses tmux if `$TMUX` is set, otherwise iTerm
|
|
35
|
+
- `CLAUDE_TEAM_TERMINAL_BACKEND` env var for explicit backend selection
|
|
36
|
+
- One tmux window per worker with descriptive naming (`<name> | <project> [<issue>]`)
|
|
37
|
+
- Tmux discovery and adoption of orphaned worker sessions
|
|
38
|
+
- Codex discovery/adopt fallbacks for tmux
|
|
39
|
+
- New test suite: `tests/test_tmux_backend.py`
|
|
40
|
+
|
|
41
|
+
### Fixed
|
|
42
|
+
- Close Codex via Ctrl+C instead of `/exit`
|
|
43
|
+
- `wait_idle_workers` Codex idle detection
|
|
44
|
+
- Explicit worktree config now fails loudly instead of silent fallback
|
|
45
|
+
|
|
46
|
+
### Changed
|
|
47
|
+
- All tools refactored to operate on `TerminalSession` rather than iTerm-specific handles
|
|
48
|
+
- Default behavior (no explicit worktree config) still falls back but returns warnings
|
|
49
|
+
|
|
10
50
|
## [0.6.1] - 2026-01-21
|
|
11
51
|
|
|
12
52
|
### Fixed
|
|
@@ -2,6 +2,25 @@
|
|
|
2
2
|
|
|
3
3
|
An MCP server that enables a "manager" Claude Code session to spawn and orchestrate multiple "worker" Claude Code sessions via iTerm2.
|
|
4
4
|
|
|
5
|
+
## ⚠️ IMPORTANT: Running Tests
|
|
6
|
+
|
|
7
|
+
**Always use `uv run pytest` to run tests.** Do NOT use `pytest` directly.
|
|
8
|
+
|
|
9
|
+
```bash
|
|
10
|
+
# Run all tests
|
|
11
|
+
uv run pytest
|
|
12
|
+
|
|
13
|
+
# Run specific test file
|
|
14
|
+
uv run pytest tests/test_tmux_backend.py
|
|
15
|
+
|
|
16
|
+
# Run with verbose output
|
|
17
|
+
uv run pytest -v
|
|
18
|
+
```
|
|
19
|
+
|
|
20
|
+
If you get "pytest not found" or similar errors, run `uv sync` first to install dependencies.
|
|
21
|
+
|
|
22
|
+
**DO NOT use:** `pytest`, `python -m pytest`, or `python3 -m pytest` — these will fail.
|
|
23
|
+
|
|
5
24
|
## Project Structure
|
|
6
25
|
|
|
7
26
|
```
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: claude-team-mcp
|
|
3
|
-
Version: 0.
|
|
3
|
+
Version: 0.8.0
|
|
4
4
|
Summary: MCP server for managing multiple Claude Code sessions via iTerm2
|
|
5
5
|
Project-URL: Homepage, https://github.com/Martian-Engineering/claude-team
|
|
6
6
|
Project-URL: Repository, https://github.com/Martian-Engineering/claude-team
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
[project]
|
|
2
2
|
name = "claude-team-mcp"
|
|
3
|
-
version = "0.
|
|
3
|
+
version = "0.8.0"
|
|
4
4
|
description = "MCP server for managing multiple Claude Code sessions via iTerm2"
|
|
5
5
|
readme = "README.md"
|
|
6
6
|
requires-python = ">=3.11"
|
|
@@ -48,7 +48,7 @@ requires = ["hatchling"]
|
|
|
48
48
|
build-backend = "hatchling.build"
|
|
49
49
|
|
|
50
50
|
[tool.hatch.build.targets.wheel]
|
|
51
|
-
packages = ["src/claude_team_mcp"]
|
|
51
|
+
packages = ["src/claude_team", "src/claude_team_mcp"]
|
|
52
52
|
|
|
53
53
|
[tool.pytest.ini_options]
|
|
54
54
|
asyncio_mode = "auto"
|
|
@@ -0,0 +1,105 @@
|
|
|
1
|
+
#!/usr/bin/env bash
|
|
2
|
+
# team-status.sh - Formatted team overview
|
|
3
|
+
# Usage: team-status.sh [--json] [--project <filter>]
|
|
4
|
+
|
|
5
|
+
set -euo pipefail
|
|
6
|
+
|
|
7
|
+
JSON_OUTPUT=false
|
|
8
|
+
PROJECT_FILTER=""
|
|
9
|
+
IDLE_TIMEOUT_MINUTES=10
|
|
10
|
+
|
|
11
|
+
while [[ $# -gt 0 ]]; do
|
|
12
|
+
case "$1" in
|
|
13
|
+
--json)
|
|
14
|
+
JSON_OUTPUT=true
|
|
15
|
+
shift
|
|
16
|
+
;;
|
|
17
|
+
--project)
|
|
18
|
+
PROJECT_FILTER="${2:-}"
|
|
19
|
+
shift 2
|
|
20
|
+
;;
|
|
21
|
+
--project=*)
|
|
22
|
+
PROJECT_FILTER="${1#*=}"
|
|
23
|
+
shift
|
|
24
|
+
;;
|
|
25
|
+
-h|--help)
|
|
26
|
+
echo "Usage: team-status.sh [--json] [--project <filter>]"
|
|
27
|
+
exit 0
|
|
28
|
+
;;
|
|
29
|
+
*)
|
|
30
|
+
echo "Unknown option: $1" >&2
|
|
31
|
+
exit 1
|
|
32
|
+
;;
|
|
33
|
+
esac
|
|
34
|
+
done
|
|
35
|
+
|
|
36
|
+
if [[ -n "$PROJECT_FILTER" ]]; then
|
|
37
|
+
WORKERS_JSON=$(mcporter call claude-team-http.list_workers project_filter="$PROJECT_FILTER" 2>/dev/null || echo '{"workers":[],"count":0}')
|
|
38
|
+
else
|
|
39
|
+
WORKERS_JSON=$(mcporter call claude-team-http.list_workers 2>/dev/null || echo '{"workers":[],"count":0}')
|
|
40
|
+
fi
|
|
41
|
+
|
|
42
|
+
COUNT=$(echo "$WORKERS_JSON" | jq -r '.count // 0')
|
|
43
|
+
|
|
44
|
+
if [[ "$JSON_OUTPUT" == "true" ]]; then
|
|
45
|
+
echo "$WORKERS_JSON"
|
|
46
|
+
exit 0
|
|
47
|
+
fi
|
|
48
|
+
|
|
49
|
+
HEADER="Claude Team"
|
|
50
|
+
if [[ -n "$PROJECT_FILTER" ]]; then
|
|
51
|
+
HEADER+=" (project: $PROJECT_FILTER)"
|
|
52
|
+
fi
|
|
53
|
+
|
|
54
|
+
if [[ "$COUNT" -eq 0 ]]; then
|
|
55
|
+
echo "$HEADER - No active workers"
|
|
56
|
+
exit 0
|
|
57
|
+
fi
|
|
58
|
+
|
|
59
|
+
if [[ "$COUNT" -eq 1 ]]; then
|
|
60
|
+
echo "$HEADER - 1 worker"
|
|
61
|
+
else
|
|
62
|
+
echo "$HEADER - $COUNT workers"
|
|
63
|
+
fi
|
|
64
|
+
|
|
65
|
+
echo ""
|
|
66
|
+
|
|
67
|
+
# Format each worker
|
|
68
|
+
echo "$WORKERS_JSON" | jq -r '.workers[] | @base64' | while read -r worker_b64; do
|
|
69
|
+
worker=$(echo "$worker_b64" | base64 -d)
|
|
70
|
+
|
|
71
|
+
name=$(echo "$worker" | jq -r '.name // "unnamed"')
|
|
72
|
+
session_id=$(echo "$worker" | jq -r '.session_id // "?"' | cut -c1-8)
|
|
73
|
+
main_repo=$(echo "$worker" | jq -r '.main_repo_path // .project_path // "unknown"' | xargs basename)
|
|
74
|
+
annotation=$(echo "$worker" | jq -r '.coordinator_annotation // .bead // "No description"')
|
|
75
|
+
msg_count=$(echo "$worker" | jq -r '.message_count // 0')
|
|
76
|
+
is_idle=$(echo "$worker" | jq -r '.is_idle // false')
|
|
77
|
+
agent_type=$(echo "$worker" | jq -r '.agent_type // "claude"')
|
|
78
|
+
claude_session_id=$(echo "$worker" | jq -r '.claude_session_id // ""')
|
|
79
|
+
project_path=$(echo "$worker" | jq -r '.project_path // ""')
|
|
80
|
+
|
|
81
|
+
status="active"
|
|
82
|
+
if [[ "$is_idle" == "true" ]]; then
|
|
83
|
+
status="idle"
|
|
84
|
+
elif [[ -n "$claude_session_id" && -n "$project_path" ]]; then
|
|
85
|
+
project_slug=$(echo "$project_path" | sed 's|/|-|g; s|\.|-|g')
|
|
86
|
+
jsonl_path="$HOME/.claude/projects/${project_slug}/${claude_session_id}.jsonl"
|
|
87
|
+
|
|
88
|
+
if [[ -f "$jsonl_path" ]]; then
|
|
89
|
+
now=$(date +%s)
|
|
90
|
+
file_mtime=$(stat -f %m "$jsonl_path" 2>/dev/null || stat -c %Y "$jsonl_path" 2>/dev/null || echo "$now")
|
|
91
|
+
age_seconds=$((now - file_mtime))
|
|
92
|
+
age_minutes=$((age_seconds / 60))
|
|
93
|
+
|
|
94
|
+
if [[ $age_minutes -ge $IDLE_TIMEOUT_MINUTES ]]; then
|
|
95
|
+
status="idle (${age_minutes}m)"
|
|
96
|
+
fi
|
|
97
|
+
fi
|
|
98
|
+
fi
|
|
99
|
+
|
|
100
|
+
echo "$name [$agent_type] ($session_id)"
|
|
101
|
+
echo " repo: $main_repo"
|
|
102
|
+
echo " note: $annotation"
|
|
103
|
+
echo " msgs: $msg_count, status: $status"
|
|
104
|
+
echo ""
|
|
105
|
+
done
|
|
@@ -0,0 +1,11 @@
|
|
|
1
|
+
"""Core modules for the claude-team tooling."""
|
|
2
|
+
|
|
3
|
+
from .idle_detection import Worker, check_file_idle, detect_worker_idle, get_claude_jsonl_path, get_project_slug
|
|
4
|
+
|
|
5
|
+
__all__ = [
|
|
6
|
+
"Worker",
|
|
7
|
+
"check_file_idle",
|
|
8
|
+
"detect_worker_idle",
|
|
9
|
+
"get_claude_jsonl_path",
|
|
10
|
+
"get_project_slug",
|
|
11
|
+
]
|