claude-team-mcp 0.6.1__py3-none-any.whl → 0.8.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (35) hide show
  1. claude_team/__init__.py +11 -0
  2. claude_team/events.py +501 -0
  3. claude_team/idle_detection.py +173 -0
  4. claude_team/poller.py +245 -0
  5. claude_team_mcp/cli_backends/__init__.py +4 -2
  6. claude_team_mcp/cli_backends/claude.py +45 -5
  7. claude_team_mcp/cli_backends/codex.py +44 -3
  8. claude_team_mcp/config.py +350 -0
  9. claude_team_mcp/config_cli.py +263 -0
  10. claude_team_mcp/idle_detection.py +16 -3
  11. claude_team_mcp/issue_tracker/__init__.py +68 -3
  12. claude_team_mcp/iterm_utils.py +5 -73
  13. claude_team_mcp/registry.py +43 -26
  14. claude_team_mcp/server.py +164 -61
  15. claude_team_mcp/session_state.py +364 -2
  16. claude_team_mcp/terminal_backends/__init__.py +49 -0
  17. claude_team_mcp/terminal_backends/base.py +106 -0
  18. claude_team_mcp/terminal_backends/iterm.py +251 -0
  19. claude_team_mcp/terminal_backends/tmux.py +683 -0
  20. claude_team_mcp/tools/__init__.py +4 -2
  21. claude_team_mcp/tools/adopt_worker.py +89 -32
  22. claude_team_mcp/tools/close_workers.py +39 -10
  23. claude_team_mcp/tools/discover_workers.py +176 -32
  24. claude_team_mcp/tools/list_workers.py +29 -0
  25. claude_team_mcp/tools/message_workers.py +35 -5
  26. claude_team_mcp/tools/poll_worker_changes.py +227 -0
  27. claude_team_mcp/tools/spawn_workers.py +254 -153
  28. claude_team_mcp/tools/wait_idle_workers.py +1 -0
  29. claude_team_mcp/utils/errors.py +7 -3
  30. claude_team_mcp/worktree.py +73 -12
  31. {claude_team_mcp-0.6.1.dist-info → claude_team_mcp-0.8.0.dist-info}/METADATA +1 -1
  32. claude_team_mcp-0.8.0.dist-info/RECORD +54 -0
  33. claude_team_mcp-0.6.1.dist-info/RECORD +0 -43
  34. {claude_team_mcp-0.6.1.dist-info → claude_team_mcp-0.8.0.dist-info}/WHEEL +0 -0
  35. {claude_team_mcp-0.6.1.dist-info → claude_team_mcp-0.8.0.dist-info}/entry_points.txt +0 -0
@@ -0,0 +1,11 @@
1
+ """Core modules for the claude-team tooling."""
2
+
3
+ from .idle_detection import Worker, check_file_idle, detect_worker_idle, get_claude_jsonl_path, get_project_slug
4
+
5
+ __all__ = [
6
+ "Worker",
7
+ "check_file_idle",
8
+ "detect_worker_idle",
9
+ "get_claude_jsonl_path",
10
+ "get_project_slug",
11
+ ]
claude_team/events.py ADDED
@@ -0,0 +1,501 @@
1
+ """Event log persistence for worker lifecycle activity."""
2
+
3
+ from __future__ import annotations
4
+
5
+ from dataclasses import asdict, dataclass
6
+ from datetime import datetime, timedelta, timezone
7
+ import logging
8
+ import json
9
+ import os
10
+ from pathlib import Path
11
+ from typing import Literal
12
+
13
+ from claude_team_mcp.config import ConfigError, EventsConfig, load_config
14
+
15
+ try:
16
+ import fcntl
17
+ except ImportError: # pragma: no cover - platform-specific
18
+ fcntl = None
19
+
20
+ try:
21
+ import msvcrt
22
+ except ImportError: # pragma: no cover - platform-specific
23
+ msvcrt = None
24
+
25
+ logger = logging.getLogger("claude-team-mcp")
26
+
27
+
28
+ EventType = Literal[
29
+ "snapshot",
30
+ "worker_started",
31
+ "worker_idle",
32
+ "worker_active",
33
+ "worker_closed",
34
+ ]
35
+
36
+
37
+ def _int_env(name: str, default: int) -> int:
38
+ # Parse integer environment overrides with a safe fallback.
39
+ value = os.environ.get(name)
40
+ if value is None or value == "":
41
+ return default
42
+ try:
43
+ return int(value)
44
+ except ValueError:
45
+ return default
46
+
47
+
48
+ def _load_rotation_config() -> EventsConfig:
49
+ # Resolve rotation defaults from config, applying env overrides.
50
+ try:
51
+ config = load_config()
52
+ events_config = config.events
53
+ except ConfigError as exc:
54
+ logger.warning(
55
+ "Invalid config file; using default event rotation config: %s", exc
56
+ )
57
+ events_config = EventsConfig()
58
+ return EventsConfig(
59
+ max_size_mb=_int_env("CLAUDE_TEAM_EVENTS_MAX_SIZE_MB", events_config.max_size_mb),
60
+ recent_hours=_int_env("CLAUDE_TEAM_EVENTS_RECENT_HOURS", events_config.recent_hours),
61
+ )
62
+
63
+
64
+ @dataclass
65
+ class WorkerEvent:
66
+ """Represents a persisted worker event."""
67
+
68
+ ts: str
69
+ type: EventType
70
+ worker_id: str | None
71
+ data: dict
72
+
73
+
74
+ def get_events_path() -> Path:
75
+ """Returns ~/.claude-team/events.jsonl, creating parent dir if needed."""
76
+ base_dir = Path.home() / ".claude-team"
77
+ base_dir.mkdir(parents=True, exist_ok=True)
78
+ return base_dir / "events.jsonl"
79
+
80
+
81
+ def append_event(event: WorkerEvent) -> None:
82
+ """Append single event to log file (atomic write with file locking)."""
83
+ append_events([event])
84
+
85
+
86
+ def _event_to_dict(event: WorkerEvent) -> dict:
87
+ """Convert WorkerEvent to dict without using asdict (avoids deepcopy issues)."""
88
+ return {
89
+ "ts": event.ts,
90
+ "type": event.type,
91
+ "worker_id": event.worker_id,
92
+ "data": event.data, # Already sanitized by caller
93
+ }
94
+
95
+
96
+ def append_events(events: list[WorkerEvent]) -> None:
97
+ """Append multiple events atomically."""
98
+ if not events:
99
+ return
100
+
101
+ path = get_events_path()
102
+ if not path.exists():
103
+ path.touch()
104
+ # Serialize upfront so the file write is a single, ordered block.
105
+ # Use _event_to_dict instead of asdict to avoid deepcopy pickle issues.
106
+ payloads = [json.dumps(_event_to_dict(event), ensure_ascii=False) for event in events]
107
+ block = "\n".join(payloads) + "\n"
108
+ event_ts = _latest_event_timestamp(events)
109
+ rotation_config = _load_rotation_config()
110
+
111
+ with path.open("r+", encoding="utf-8") as handle:
112
+ _lock_file(handle)
113
+ try:
114
+ _rotate_events_log_locked(
115
+ handle,
116
+ path,
117
+ current_ts=event_ts,
118
+ max_size_mb=rotation_config.max_size_mb,
119
+ recent_hours=rotation_config.recent_hours,
120
+ )
121
+ # Hold the lock across the entire write and flush cycle.
122
+ handle.seek(0, os.SEEK_END)
123
+ handle.write(block)
124
+ handle.flush()
125
+ os.fsync(handle.fileno())
126
+ finally:
127
+ _unlock_file(handle)
128
+
129
+
130
+ def read_events_since(
131
+ since: datetime | None = None,
132
+ limit: int = 1000,
133
+ ) -> list[WorkerEvent]:
134
+ """Read events from log, optionally filtered by timestamp."""
135
+ if limit <= 0:
136
+ return []
137
+
138
+ path = get_events_path()
139
+ if not path.exists():
140
+ return []
141
+
142
+ normalized_since = _normalize_since(since)
143
+ events: list[WorkerEvent] = []
144
+
145
+ with path.open("r", encoding="utf-8") as handle:
146
+ # Stream the file so we don't load the entire log into memory.
147
+ for line in handle:
148
+ line = line.strip()
149
+ if not line:
150
+ continue
151
+
152
+ event = _parse_event(json.loads(line))
153
+ # Compare timestamps only when a filter is provided.
154
+ if normalized_since is not None:
155
+ event_ts = _parse_timestamp(event.ts)
156
+ if event_ts < normalized_since:
157
+ continue
158
+
159
+ events.append(event)
160
+ # Keep only the most recent events within the requested limit.
161
+ if len(events) > limit:
162
+ events.pop(0)
163
+
164
+ return events
165
+
166
+
167
+ def get_latest_snapshot() -> dict | None:
168
+ """Get most recent snapshot event for recovery."""
169
+ path = get_events_path()
170
+ if not path.exists():
171
+ return None
172
+
173
+ latest_snapshot: dict | None = None
174
+
175
+ with path.open("r", encoding="utf-8") as handle:
176
+ # Walk the log to track the latest snapshot without extra storage.
177
+ for line in handle:
178
+ line = line.strip()
179
+ if not line:
180
+ continue
181
+
182
+ event = _parse_event(json.loads(line))
183
+ if event.type == "snapshot":
184
+ latest_snapshot = event.data
185
+
186
+ return latest_snapshot
187
+
188
+
189
+ def rotate_events_log(
190
+ max_size_mb: int | None = None,
191
+ recent_hours: int | None = None,
192
+ now: datetime | None = None,
193
+ ) -> None:
194
+ """Rotate the log daily or by size, retaining active/recent workers."""
195
+ path = get_events_path()
196
+ if not path.exists():
197
+ return
198
+
199
+ current_ts = now or datetime.now(timezone.utc)
200
+ if max_size_mb is None or recent_hours is None:
201
+ rotation_config = _load_rotation_config()
202
+ if max_size_mb is None:
203
+ max_size_mb = rotation_config.max_size_mb
204
+ if recent_hours is None:
205
+ recent_hours = rotation_config.recent_hours
206
+
207
+ with path.open("r+", encoding="utf-8") as handle:
208
+ _lock_file(handle)
209
+ try:
210
+ _rotate_events_log_locked(
211
+ handle,
212
+ path,
213
+ current_ts=current_ts,
214
+ max_size_mb=max_size_mb,
215
+ recent_hours=recent_hours,
216
+ )
217
+ finally:
218
+ _unlock_file(handle)
219
+
220
+
221
+ def _rotate_events_log_locked(
222
+ handle,
223
+ path: Path,
224
+ current_ts: datetime,
225
+ max_size_mb: int,
226
+ recent_hours: int,
227
+ ) -> None:
228
+ # Rotate the log while holding the caller's lock.
229
+ if not _should_rotate(path, current_ts, max_size_mb):
230
+ return
231
+
232
+ rotation_day = _rotation_day(path, current_ts)
233
+ backup_path = _backup_path(path, rotation_day)
234
+
235
+ last_seen, last_state = _copy_and_collect_activity(handle, backup_path)
236
+ keep_ids = _select_workers_to_keep(last_seen, last_state, current_ts, recent_hours)
237
+ retained_lines = _filter_retained_events(handle, keep_ids)
238
+
239
+ # Reset the log to only retained events.
240
+ handle.seek(0)
241
+ handle.truncate(0)
242
+ if retained_lines:
243
+ handle.write("\n".join(retained_lines) + "\n")
244
+ handle.flush()
245
+ os.fsync(handle.fileno())
246
+
247
+
248
+ def _should_rotate(path: Path, current_ts: datetime, max_size_mb: int) -> bool:
249
+ # Decide whether a daily or size-based rotation is needed.
250
+ if not path.exists():
251
+ return False
252
+
253
+ current_day = current_ts.astimezone(timezone.utc).date()
254
+ last_write = datetime.fromtimestamp(path.stat().st_mtime, tz=timezone.utc)
255
+ last_day = last_write.date()
256
+ if last_day != current_day:
257
+ return True
258
+
259
+ if max_size_mb <= 0:
260
+ return False
261
+ max_bytes = max_size_mb * 1024 * 1024
262
+ return path.stat().st_size > max_bytes
263
+
264
+
265
+ def _rotation_day(path: Path, current_ts: datetime) -> datetime.date:
266
+ # Use the last write date for backups to align with daily rotations.
267
+ if not path.exists():
268
+ return current_ts.astimezone(timezone.utc).date()
269
+ last_write = datetime.fromtimestamp(path.stat().st_mtime, tz=timezone.utc)
270
+ return last_write.date()
271
+
272
+
273
+ def _backup_path(path: Path, rotation_day: datetime.date) -> Path:
274
+ # Build a date-stamped backup path that avoids clobbering older files.
275
+ date_suffix = rotation_day.strftime("%Y-%m-%d")
276
+ candidate = path.with_name(f"{path.stem}.{date_suffix}{path.suffix}")
277
+ if not candidate.exists():
278
+ return candidate
279
+ index = 1
280
+ while True:
281
+ indexed = path.with_name(f"{path.stem}.{date_suffix}.{index}{path.suffix}")
282
+ if not indexed.exists():
283
+ return indexed
284
+ index += 1
285
+
286
+
287
+ def _copy_and_collect_activity(handle, backup_path: Path) -> tuple[dict[str, datetime], dict[str, str]]:
288
+ # Copy the current log to a backup while recording worker activity.
289
+ last_seen: dict[str, datetime] = {}
290
+ last_state: dict[str, str] = {}
291
+ handle.seek(0)
292
+ with backup_path.open("w", encoding="utf-8") as backup:
293
+ for line in handle:
294
+ backup.write(line)
295
+ line = line.strip()
296
+ if not line:
297
+ continue
298
+ # Ignore malformed JSON while copying the raw line.
299
+ try:
300
+ payload = json.loads(line)
301
+ except json.JSONDecodeError:
302
+ continue
303
+ event = _parse_event(payload)
304
+ _track_event_activity(event, last_seen, last_state)
305
+ return last_seen, last_state
306
+
307
+
308
+ def _track_event_activity(
309
+ event: WorkerEvent,
310
+ last_seen: dict[str, datetime],
311
+ last_state: dict[str, str],
312
+ ) -> None:
313
+ # Update last-seen and last-state maps from a worker event.
314
+ try:
315
+ event_ts = _parse_timestamp(event.ts)
316
+ except ValueError:
317
+ return
318
+
319
+ if event.type == "snapshot":
320
+ _track_snapshot_activity(event.data, event_ts, last_seen, last_state)
321
+ return
322
+
323
+ if not event.worker_id:
324
+ return
325
+
326
+ last_seen[event.worker_id] = event_ts
327
+ state = _state_from_event_type(event.type)
328
+ if state:
329
+ last_state[event.worker_id] = state
330
+
331
+
332
+ def _track_snapshot_activity(
333
+ data: dict,
334
+ event_ts: datetime,
335
+ last_seen: dict[str, datetime],
336
+ last_state: dict[str, str],
337
+ ) -> None:
338
+ # Update state from snapshot payloads.
339
+ workers = data.get("workers")
340
+ if not isinstance(workers, list):
341
+ return
342
+ for worker in workers:
343
+ if not isinstance(worker, dict):
344
+ continue
345
+ worker_id = _snapshot_worker_id(worker)
346
+ if not worker_id:
347
+ continue
348
+ state = worker.get("state")
349
+ if isinstance(state, str) and state:
350
+ last_state[worker_id] = state
351
+ if state == "active":
352
+ last_seen[worker_id] = event_ts
353
+
354
+
355
+ def _state_from_event_type(event_type: EventType) -> str | None:
356
+ # Map event types to "active"/"idle"/"closed" state labels.
357
+ if event_type in ("worker_started", "worker_active"):
358
+ return "active"
359
+ if event_type == "worker_idle":
360
+ return "idle"
361
+ if event_type == "worker_closed":
362
+ return "closed"
363
+ return None
364
+
365
+
366
+ def _snapshot_worker_id(worker: dict) -> str | None:
367
+ # Identify a worker id inside snapshot payloads.
368
+ for key in ("session_id", "worker_id", "id"):
369
+ value = worker.get(key)
370
+ if value:
371
+ return str(value)
372
+ return None
373
+
374
+
375
+ def _select_workers_to_keep(
376
+ last_seen: dict[str, datetime],
377
+ last_state: dict[str, str],
378
+ current_ts: datetime,
379
+ recent_hours: int,
380
+ ) -> set[str]:
381
+ # Build the retention set from active and recently active workers.
382
+ keep_ids = {worker_id for worker_id, state in last_state.items() if state == "active"}
383
+ if recent_hours <= 0:
384
+ return keep_ids
385
+ threshold = current_ts.astimezone(timezone.utc) - timedelta(hours=recent_hours)
386
+ for worker_id, seen in last_seen.items():
387
+ if seen >= threshold:
388
+ keep_ids.add(worker_id)
389
+ return keep_ids
390
+
391
+
392
+ def _filter_retained_events(handle, keep_ids: set[str]) -> list[str]:
393
+ # Filter events to only those associated with retained workers.
394
+ retained: list[str] = []
395
+ handle.seek(0)
396
+ for line in handle:
397
+ line = line.strip()
398
+ if not line:
399
+ continue
400
+ # Skip malformed JSON entries without failing rotation.
401
+ try:
402
+ payload = json.loads(line)
403
+ except json.JSONDecodeError:
404
+ continue
405
+ event = _parse_event(payload)
406
+ if event.type == "snapshot":
407
+ # Retain only snapshot entries related to preserved workers.
408
+ filtered = _filter_snapshot_event(event, keep_ids)
409
+ if filtered is None:
410
+ continue
411
+ retained.append(json.dumps(_event_to_dict(filtered), ensure_ascii=False))
412
+ continue
413
+ if event.worker_id and event.worker_id in keep_ids:
414
+ retained.append(json.dumps(_event_to_dict(event), ensure_ascii=False))
415
+ return retained
416
+
417
+
418
+ def _filter_snapshot_event(event: WorkerEvent, keep_ids: set[str]) -> WorkerEvent | None:
419
+ # Drop snapshot entries that don't include retained workers.
420
+ data = dict(event.data or {})
421
+ workers = data.get("workers")
422
+ if not isinstance(workers, list):
423
+ return None
424
+ filtered_workers = []
425
+ for worker in workers:
426
+ if not isinstance(worker, dict):
427
+ continue
428
+ worker_id = _snapshot_worker_id(worker)
429
+ if worker_id and worker_id in keep_ids:
430
+ filtered_workers.append(worker)
431
+ if not filtered_workers:
432
+ return None
433
+ data["workers"] = filtered_workers
434
+ data["count"] = len(filtered_workers)
435
+ return WorkerEvent(ts=event.ts, type=event.type, worker_id=None, data=data)
436
+
437
+
438
+ def _latest_event_timestamp(events: list[WorkerEvent]) -> datetime:
439
+ # Use the newest timestamp in a batch to evaluate rotation boundaries.
440
+ latest = datetime.min.replace(tzinfo=timezone.utc)
441
+ for event in events:
442
+ try:
443
+ event_ts = _parse_timestamp(event.ts)
444
+ except ValueError:
445
+ continue
446
+ if event_ts > latest:
447
+ latest = event_ts
448
+ if latest == datetime.min.replace(tzinfo=timezone.utc):
449
+ return datetime.now(timezone.utc)
450
+ return latest
451
+
452
+
453
+ def _lock_file(handle) -> None:
454
+ # Acquire an exclusive lock for the file handle.
455
+ if fcntl is not None:
456
+ fcntl.flock(handle.fileno(), fcntl.LOCK_EX)
457
+ return
458
+ if msvcrt is not None: # pragma: no cover - platform-specific
459
+ msvcrt.locking(handle.fileno(), msvcrt.LK_LOCK, 1)
460
+ return
461
+ raise RuntimeError("File locking is not supported on this platform.")
462
+
463
+
464
+ def _unlock_file(handle) -> None:
465
+ # Release any lock held on the file handle.
466
+ if fcntl is not None:
467
+ fcntl.flock(handle.fileno(), fcntl.LOCK_UN)
468
+ return
469
+ if msvcrt is not None: # pragma: no cover - platform-specific
470
+ msvcrt.locking(handle.fileno(), msvcrt.LK_UNLCK, 1)
471
+ return
472
+ raise RuntimeError("File locking is not supported on this platform.")
473
+
474
+
475
+ def _normalize_since(since: datetime | None) -> datetime | None:
476
+ # Normalize timestamps for consistent comparisons.
477
+ if since is None:
478
+ return None
479
+ if since.tzinfo is None:
480
+ return since.replace(tzinfo=timezone.utc)
481
+ return since.astimezone(timezone.utc)
482
+
483
+
484
+ def _parse_timestamp(value: str) -> datetime:
485
+ # Parse ISO 8601 timestamps, including Zulu suffixes.
486
+ if value.endswith("Z"):
487
+ value = value[:-1] + "+00:00"
488
+ parsed = datetime.fromisoformat(value)
489
+ if parsed.tzinfo is None:
490
+ return parsed.replace(tzinfo=timezone.utc)
491
+ return parsed
492
+
493
+
494
+ def _parse_event(payload: dict) -> WorkerEvent:
495
+ # Convert a JSON payload into a WorkerEvent instance.
496
+ return WorkerEvent(
497
+ ts=str(payload["ts"]),
498
+ type=payload["type"],
499
+ worker_id=payload.get("worker_id"),
500
+ data=payload.get("data") or {},
501
+ )
@@ -0,0 +1,173 @@
1
+ """Idle detection based on file activity and process state."""
2
+
3
+ from __future__ import annotations
4
+
5
+ import os
6
+ import subprocess
7
+ import time
8
+ from dataclasses import dataclass
9
+ from pathlib import Path
10
+ from typing import Literal
11
+
12
+ AgentType = Literal["claude", "codex"]
13
+
14
+
15
+ @dataclass
16
+ class Worker:
17
+ """Minimal worker state for idle detection."""
18
+
19
+ project_path: str
20
+ claude_session_id: str | None
21
+ agent_type: AgentType
22
+ is_idle: bool = False
23
+ message_count: int | None = None
24
+ last_message_count: int | None = None
25
+ last_message_timestamp: float | None = None
26
+ output_path: Path | None = None
27
+ pid: int | None = None
28
+
29
+
30
+ def get_project_slug(project_path: str) -> str:
31
+ """Convert a filesystem path to Claude's project directory slug."""
32
+ return project_path.replace("/", "-").replace(".", "-")
33
+
34
+
35
+ def get_claude_jsonl_path(worker: Worker) -> Path | None:
36
+ """Construct JSONL path for Claude Code worker."""
37
+ if not worker.project_path or not worker.claude_session_id:
38
+ return None
39
+ project_slug = get_project_slug(worker.project_path)
40
+ return Path.home() / ".claude" / "projects" / project_slug / f"{worker.claude_session_id}.jsonl"
41
+
42
+
43
+ def check_file_idle(path: Path, threshold_seconds: int) -> tuple[bool, int]:
44
+ """Check if file mtime exceeds threshold, return (is_idle, age_seconds)."""
45
+ try:
46
+ mtime = path.stat().st_mtime
47
+ except OSError:
48
+ return False, 0
49
+
50
+ age_seconds = max(0, int(time.time() - mtime))
51
+ return age_seconds >= threshold_seconds, age_seconds
52
+
53
+
54
+ # Compare message counts and update worker state when activity changes.
55
+ def _detect_idle_from_message_count(
56
+ worker: Worker,
57
+ idle_threshold_seconds: int,
58
+ ) -> tuple[bool, str | None] | None:
59
+ message_count = getattr(worker, "message_count", None)
60
+ if message_count is None:
61
+ return None
62
+
63
+ now = time.time()
64
+ last_count = getattr(worker, "last_message_count", None)
65
+ last_timestamp = getattr(worker, "last_message_timestamp", None)
66
+
67
+ if last_count is None or last_timestamp is None:
68
+ # Seed tracking state on first observation.
69
+ setattr(worker, "last_message_count", message_count)
70
+ setattr(worker, "last_message_timestamp", now)
71
+ return None
72
+
73
+ if message_count != last_count:
74
+ # Activity observed, reset tracking window.
75
+ setattr(worker, "last_message_count", message_count)
76
+ setattr(worker, "last_message_timestamp", now)
77
+ return False, None
78
+
79
+ idle_for = now - last_timestamp
80
+ if idle_for >= idle_threshold_seconds:
81
+ # No message activity within the threshold window.
82
+ return True, f"message_count_stalled:{int(idle_for)}s"
83
+
84
+ return False, None
85
+
86
+
87
+ # Best-effort process probe for Codex workers without output file updates.
88
+ def _detect_idle_from_process(worker: Worker) -> tuple[bool, str | None] | None:
89
+ pid = getattr(worker, "pid", None)
90
+ if not pid:
91
+ return None
92
+
93
+ try:
94
+ # Raises OSError when the PID does not exist.
95
+ os.kill(pid, 0)
96
+ except OSError:
97
+ return True, "process_exited"
98
+
99
+ try:
100
+ result = subprocess.run(
101
+ ["ps", "-o", "state=", "-p", str(pid)],
102
+ check=False,
103
+ capture_output=True,
104
+ text=True,
105
+ )
106
+ except OSError:
107
+ return None
108
+
109
+ state = result.stdout.strip()
110
+ if not state:
111
+ return None
112
+
113
+ # "S" (sleeping) is a best-effort proxy for waiting on stdin.
114
+ if state[0] in {"S", "I"}:
115
+ return True, "process_sleeping"
116
+
117
+ return False, None
118
+
119
+
120
+ def detect_worker_idle(
121
+ worker: Worker,
122
+ idle_threshold_seconds: int = 300,
123
+ ) -> tuple[bool, str | None]:
124
+ """
125
+ Detect if worker is idle based on file activity.
126
+
127
+ Returns (is_idle, reason) where reason explains how idle was detected.
128
+ """
129
+ # Get current idle state, handling both attributes and methods
130
+ current_idle_attr = getattr(worker, "is_idle", False)
131
+ if callable(current_idle_attr):
132
+ # ManagedSession has is_idle() method, call it
133
+ try:
134
+ current_idle = current_idle_attr()
135
+ except Exception:
136
+ current_idle = False
137
+ else:
138
+ current_idle = bool(current_idle_attr)
139
+
140
+ # Claude workers: JSONL mtime is primary, message count is secondary.
141
+ if worker.agent_type == "claude":
142
+ jsonl_path = get_claude_jsonl_path(worker)
143
+ if jsonl_path and jsonl_path.exists():
144
+ is_idle, age_seconds = check_file_idle(jsonl_path, idle_threshold_seconds)
145
+ if is_idle:
146
+ return True, f"jsonl_mtime:{age_seconds}s"
147
+ return False, None
148
+
149
+ # Fall back to message count when the JSONL path is missing.
150
+ message_result = _detect_idle_from_message_count(worker, idle_threshold_seconds)
151
+ if message_result is not None:
152
+ return message_result
153
+
154
+ # No signal available, keep existing idle state.
155
+ return current_idle, None
156
+
157
+ # Codex workers: output file mtime is primary, process state is fallback.
158
+ if worker.agent_type == "codex":
159
+ output_path = getattr(worker, "output_path", None)
160
+ if output_path and output_path.exists():
161
+ is_idle, age_seconds = check_file_idle(output_path, idle_threshold_seconds)
162
+ if is_idle:
163
+ return True, f"output_mtime:{age_seconds}s"
164
+ return False, None
165
+
166
+ process_result = _detect_idle_from_process(worker)
167
+ if process_result is not None:
168
+ return process_result
169
+
170
+ # Nothing to inspect, preserve current state.
171
+ return current_idle, None
172
+
173
+ return current_idle, "unknown_agent_type"