claude-team-mcp 0.8.0__py3-none-any.whl → 0.9.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -4,6 +4,7 @@ List workers tool.
4
4
  Provides list_workers for viewing all managed Claude Code sessions.
5
5
  """
6
6
 
7
+ import logging
7
8
  from pathlib import Path
8
9
  from typing import TYPE_CHECKING
9
10
 
@@ -16,6 +17,8 @@ if TYPE_CHECKING:
16
17
  from ..registry import SessionStatus
17
18
  from ..utils import error_response
18
19
 
20
+ logger = logging.getLogger("claude-team-mcp")
21
+
19
22
 
20
23
  def register_tools(mcp: FastMCP) -> None:
21
24
  """Register list_workers tool on the MCP server."""
@@ -44,6 +47,22 @@ def register_tools(mcp: FastMCP) -> None:
44
47
  app_ctx = ctx.request_context.lifespan_context
45
48
  registry = app_ctx.registry
46
49
 
50
+ # Lazy fallback: if registry is empty and recovery hasn't been attempted,
51
+ # try to recover from the event log. This handles edge cases where startup
52
+ # recovery may have failed or wasn't triggered.
53
+ from ..server import is_recovery_attempted, recover_registry
54
+
55
+ if not is_recovery_attempted() and len(registry.list_all()) == 0:
56
+ logger.info("Registry empty on first list_workers call, attempting lazy recovery...")
57
+ report = recover_registry(registry)
58
+ if report is not None:
59
+ logger.info(
60
+ "Lazy recovery complete: added=%d, skipped=%d, closed=%d",
61
+ report.added,
62
+ report.skipped,
63
+ report.closed,
64
+ )
65
+
47
66
  # Get sessions, optionally filtered by status
48
67
  if status_filter:
49
68
  try:
@@ -84,17 +103,26 @@ def register_tools(mcp: FastMCP) -> None:
84
103
  filtered_sessions.append(session)
85
104
  sessions = filtered_sessions
86
105
 
87
- # Sort by created_at
88
- sessions = sorted(sessions, key=lambda s: s.created_at)
106
+ # Sort by created_at (normalize to UTC-aware for mixed live/recovered)
107
+ from datetime import timezone as _tz
108
+
109
+ def _sort_key(s):
110
+ dt = s.created_at
111
+ if dt.tzinfo is None:
112
+ dt = dt.replace(tzinfo=_tz.utc)
113
+ return dt
114
+
115
+ sessions = sorted(sessions, key=_sort_key)
89
116
 
90
117
  # Convert to dicts and add message count + idle status
91
118
  workers = []
92
119
  for session in sessions:
93
120
  info = session.to_dict()
94
- # Try to get conversation stats
95
- state = session.get_conversation_state()
96
- if state:
97
- info["message_count"] = state.message_count
121
+ # Try to get conversation stats (only available on live ManagedSessions)
122
+ if hasattr(session, "get_conversation_state"):
123
+ state = session.get_conversation_state()
124
+ if state:
125
+ info["message_count"] = state.message_count
98
126
  # Check idle using stop hook detection
99
127
  info["is_idle"] = session.is_idle()
100
128
  workers.append(info)
@@ -18,6 +18,7 @@ from claude_team.events import WorkerEvent
18
18
  if TYPE_CHECKING:
19
19
  from ..server import AppContext
20
20
 
21
+ from ..config import load_config
21
22
  from ..utils import error_response
22
23
 
23
24
 
@@ -120,7 +121,7 @@ def register_tools(mcp: FastMCP) -> None:
120
121
  async def poll_worker_changes(
121
122
  ctx: Context[ServerSession, "AppContext"],
122
123
  since: str | None = None,
123
- stale_threshold_minutes: int = 20,
124
+ stale_threshold_minutes: int | None = None,
124
125
  include_snapshots: bool = False,
125
126
  ) -> dict:
126
127
  """
@@ -132,6 +133,7 @@ def register_tools(mcp: FastMCP) -> None:
132
133
  Args:
133
134
  since: ISO timestamp to filter events from (inclusive), or None for latest.
134
135
  stale_threshold_minutes: Minutes without activity before a worker is marked stuck.
136
+ Defaults to the value in ~/.claude-team/config.json (events.stale_threshold_minutes).
135
137
  include_snapshots: Whether to include snapshot events in the response.
136
138
 
137
139
  Returns:
@@ -145,6 +147,11 @@ def register_tools(mcp: FastMCP) -> None:
145
147
  app_ctx = ctx.request_context.lifespan_context
146
148
  registry = app_ctx.registry
147
149
 
150
+ # Resolve stale threshold: tool param overrides config default.
151
+ if stale_threshold_minutes is None:
152
+ config = load_config()
153
+ stale_threshold_minutes = config.events.stale_threshold_minutes
154
+
148
155
  # Validate inputs before reading the log.
149
156
  if stale_threshold_minutes <= 0:
150
157
  return error_response(
@@ -0,0 +1,273 @@
1
+ """
2
+ Worker events tool.
3
+
4
+ Provides worker_events for querying the event log with optional summary and snapshot.
5
+ """
6
+
7
+ from __future__ import annotations
8
+
9
+ from dataclasses import asdict
10
+ from datetime import datetime, timezone
11
+ from typing import TYPE_CHECKING
12
+
13
+ from mcp.server.fastmcp import Context, FastMCP
14
+ from mcp.server.session import ServerSession
15
+
16
+ from claude_team import events as events_module
17
+ from claude_team.events import WorkerEvent
18
+
19
+ if TYPE_CHECKING:
20
+ from ..server import AppContext
21
+
22
+
23
+ def _parse_iso_timestamp(value: str) -> datetime | None:
24
+ """Parse ISO timestamps for query filtering."""
25
+ value = value.strip()
26
+ if not value:
27
+ return None
28
+ # Normalize Zulu timestamps for fromisoformat.
29
+ if value.endswith("Z"):
30
+ value = value[:-1] + "+00:00"
31
+ try:
32
+ parsed = datetime.fromisoformat(value)
33
+ except ValueError:
34
+ return None
35
+ # Default to UTC when no timezone is provided.
36
+ if parsed.tzinfo is None:
37
+ return parsed.replace(tzinfo=timezone.utc)
38
+ return parsed.astimezone(timezone.utc)
39
+
40
+
41
+ def _serialize_event(event: WorkerEvent) -> dict:
42
+ """Convert a WorkerEvent into a JSON-serializable payload."""
43
+ return {
44
+ "ts": event.ts,
45
+ "type": event.type,
46
+ "worker_id": event.worker_id,
47
+ "data": event.data,
48
+ }
49
+
50
+
51
+ def _event_project(event: WorkerEvent) -> str | None:
52
+ """Extract a project identifier from event data."""
53
+ data = event.data or {}
54
+ for key in ("project", "project_path"):
55
+ value = data.get(key)
56
+ if value:
57
+ return str(value)
58
+ return None
59
+
60
+
61
+ def _filter_by_project(events: list[WorkerEvent], project_filter: str) -> list[WorkerEvent]:
62
+ """Filter events to only those matching the project filter."""
63
+ filtered = []
64
+ for event in events:
65
+ project = _event_project(event)
66
+ # Include events with no project (e.g. snapshots) or matching project.
67
+ if project is None or project_filter in project:
68
+ filtered.append(event)
69
+ return filtered
70
+
71
+
72
+ def _build_summary(
73
+ events: list[WorkerEvent],
74
+ stale_threshold_minutes: int,
75
+ ) -> dict:
76
+ """
77
+ Build summary from the event window.
78
+
79
+ Returns:
80
+ Dict with started, closed, idle, active, stuck lists and last_event_ts.
81
+ """
82
+ # Track worker states from events.
83
+ started: list[str] = []
84
+ closed: list[str] = []
85
+ idle: list[str] = []
86
+ active: list[str] = []
87
+
88
+ # Track last known state and activity time per worker.
89
+ last_state: dict[str, str] = {}
90
+ last_activity: dict[str, datetime] = {}
91
+
92
+ last_event_ts: str | None = None
93
+
94
+ for event in events:
95
+ # Track latest event timestamp.
96
+ last_event_ts = event.ts
97
+
98
+ worker_id = event.worker_id
99
+ if not worker_id:
100
+ # Handle snapshot events for state tracking.
101
+ if event.type == "snapshot":
102
+ _process_snapshot_for_summary(
103
+ event.data,
104
+ event.ts,
105
+ last_state,
106
+ last_activity,
107
+ )
108
+ continue
109
+
110
+ # Update activity time.
111
+ ts = _parse_iso_timestamp(event.ts)
112
+ if ts:
113
+ last_activity[worker_id] = ts
114
+
115
+ if event.type == "worker_started":
116
+ started.append(worker_id)
117
+ last_state[worker_id] = "active"
118
+ elif event.type == "worker_closed":
119
+ closed.append(worker_id)
120
+ last_state[worker_id] = "closed"
121
+ elif event.type == "worker_idle":
122
+ idle.append(worker_id)
123
+ last_state[worker_id] = "idle"
124
+ elif event.type == "worker_active":
125
+ active.append(worker_id)
126
+ last_state[worker_id] = "active"
127
+
128
+ # Compute stuck workers: active with last_activity > threshold.
129
+ stuck: list[str] = []
130
+ now = datetime.now(timezone.utc)
131
+ threshold_seconds = stale_threshold_minutes * 60
132
+
133
+ for worker_id, state in last_state.items():
134
+ if state != "active":
135
+ continue
136
+ activity_ts = last_activity.get(worker_id)
137
+ if activity_ts is None:
138
+ continue
139
+ elapsed = (now - activity_ts).total_seconds()
140
+ if elapsed > threshold_seconds:
141
+ stuck.append(worker_id)
142
+
143
+ return {
144
+ "started": started,
145
+ "closed": closed,
146
+ "idle": idle,
147
+ "active": active,
148
+ "stuck": stuck,
149
+ "last_event_ts": last_event_ts,
150
+ }
151
+
152
+
153
+ def _process_snapshot_for_summary(
154
+ data: dict,
155
+ event_ts: str,
156
+ last_state: dict[str, str],
157
+ last_activity: dict[str, datetime],
158
+ ) -> None:
159
+ """Update state tracking from a snapshot event."""
160
+ workers = data.get("workers")
161
+ if not isinstance(workers, list):
162
+ return
163
+
164
+ ts = _parse_iso_timestamp(event_ts)
165
+
166
+ for worker in workers:
167
+ if not isinstance(worker, dict):
168
+ continue
169
+
170
+ # Find worker ID from various possible keys.
171
+ worker_id = None
172
+ for key in ("session_id", "worker_id", "id"):
173
+ value = worker.get(key)
174
+ if value:
175
+ worker_id = str(value)
176
+ break
177
+
178
+ if not worker_id:
179
+ continue
180
+
181
+ # Extract state from snapshot.
182
+ state = worker.get("state")
183
+ if isinstance(state, str) and state:
184
+ last_state[worker_id] = state
185
+ if ts and state == "active":
186
+ last_activity[worker_id] = ts
187
+
188
+
189
+ def register_tools(mcp: FastMCP) -> None:
190
+ """Register worker_events tool on the MCP server."""
191
+
192
+ @mcp.tool()
193
+ async def worker_events(
194
+ ctx: Context[ServerSession, "AppContext"],
195
+ since: str | None = None,
196
+ limit: int = 1000,
197
+ include_snapshot: bool = False,
198
+ include_summary: bool = False,
199
+ stale_threshold_minutes: int = 10,
200
+ project_filter: str | None = None,
201
+ ) -> dict:
202
+ """
203
+ Query worker events from the event log.
204
+
205
+ Provides access to the persisted worker event log with optional summary
206
+ aggregation and snapshot inclusion. This is the primary API for external
207
+ consumers to monitor worker lifecycle events.
208
+
209
+ Args:
210
+ since: ISO 8601 timestamp; returns events at or after this time.
211
+ If omitted, returns most recent events (bounded by limit).
212
+ limit: Maximum number of events returned (default 1000).
213
+ include_snapshot: If true, include the latest snapshot event in response.
214
+ include_summary: If true, include summary aggregates (started, closed,
215
+ idle, active, stuck lists).
216
+ stale_threshold_minutes: Minutes without activity before a worker is
217
+ marked stuck (only used when include_summary=true, default 10).
218
+ project_filter: Optional project path substring to filter events.
219
+
220
+ Returns:
221
+ Dict with:
222
+ - events: List of worker events [{ts, type, worker_id, data}]
223
+ - count: Number of events returned
224
+ - summary: (if include_summary) Aggregates from event window:
225
+ - started: worker IDs that started
226
+ - closed: worker IDs that closed
227
+ - idle: worker IDs that became idle
228
+ - active: worker IDs that became active
229
+ - stuck: active workers with last_activity > stale_threshold
230
+ - last_event_ts: newest event timestamp
231
+ - snapshot: (if include_snapshot) Latest snapshot {ts, data}
232
+ """
233
+ # Parse the since timestamp if provided.
234
+ parsed_since = None
235
+ if since is not None and since.strip():
236
+ parsed_since = _parse_iso_timestamp(since)
237
+ if parsed_since is None:
238
+ return {
239
+ "error": f"Invalid since timestamp: {since}",
240
+ "hint": "Use ISO format like 2026-01-27T11:40:00Z",
241
+ }
242
+
243
+ # Read events from the log.
244
+ events = events_module.read_events_since(parsed_since, limit=limit)
245
+
246
+ # Apply project filter if specified.
247
+ if project_filter:
248
+ events = _filter_by_project(events, project_filter)
249
+
250
+ # Build the response.
251
+ response: dict = {
252
+ "events": [_serialize_event(event) for event in events],
253
+ "count": len(events),
254
+ }
255
+
256
+ # Add summary if requested.
257
+ if include_summary:
258
+ response["summary"] = _build_summary(events, stale_threshold_minutes)
259
+
260
+ # Add snapshot if requested.
261
+ if include_snapshot:
262
+ snapshot_data = events_module.get_latest_snapshot()
263
+ if snapshot_data is not None:
264
+ # Find the timestamp from the snapshot data or use a sentinel.
265
+ snapshot_ts = snapshot_data.get("ts")
266
+ response["snapshot"] = {
267
+ "ts": snapshot_ts,
268
+ "data": snapshot_data,
269
+ }
270
+ else:
271
+ response["snapshot"] = None
272
+
273
+ return response
@@ -333,7 +333,7 @@ def create_local_worktree(
333
333
  if bead_id:
334
334
  # Bead-based naming: {bead_id}-{annotation}
335
335
  if annotation:
336
- dir_name = f"{bead_id}-{slugify(annotation)}"
336
+ dir_name = f"{bead_id}-{short_slug(annotation)}"
337
337
  else:
338
338
  dir_name = bead_id
339
339
  else:
@@ -341,7 +341,7 @@ def create_local_worktree(
341
341
  short_uuid = uuid.uuid4().hex[:8]
342
342
  name_slug = slugify(worker_name)
343
343
  if annotation:
344
- dir_name = f"{name_slug}-{short_uuid}-{slugify(annotation)}"
344
+ dir_name = f"{name_slug}-{short_uuid}-{short_slug(annotation)}"
345
345
  else:
346
346
  dir_name = f"{name_slug}-{short_uuid}"
347
347