beadhub 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- beadhub/__init__.py +12 -0
- beadhub/api.py +260 -0
- beadhub/auth.py +101 -0
- beadhub/aweb_context.py +65 -0
- beadhub/aweb_introspection.py +70 -0
- beadhub/beads_sync.py +514 -0
- beadhub/cli.py +330 -0
- beadhub/config.py +65 -0
- beadhub/db.py +129 -0
- beadhub/defaults/invariants/01-tracking-bdh-only.md +11 -0
- beadhub/defaults/invariants/02-communication-mail-first.md +36 -0
- beadhub/defaults/invariants/03-communication-chat.md +60 -0
- beadhub/defaults/invariants/04-identity-no-impersonation.md +17 -0
- beadhub/defaults/invariants/05-collaborate.md +12 -0
- beadhub/defaults/roles/backend.md +55 -0
- beadhub/defaults/roles/coordinator.md +44 -0
- beadhub/defaults/roles/frontend.md +77 -0
- beadhub/defaults/roles/implementer.md +73 -0
- beadhub/defaults/roles/reviewer.md +56 -0
- beadhub/defaults/roles/startup-expert.md +93 -0
- beadhub/defaults.py +262 -0
- beadhub/events.py +704 -0
- beadhub/internal_auth.py +121 -0
- beadhub/jsonl.py +68 -0
- beadhub/logging.py +62 -0
- beadhub/migrations/beads/001_initial.sql +70 -0
- beadhub/migrations/beads/002_search_indexes.sql +20 -0
- beadhub/migrations/server/001_initial.sql +279 -0
- beadhub/names.py +33 -0
- beadhub/notifications.py +275 -0
- beadhub/pagination.py +125 -0
- beadhub/presence.py +495 -0
- beadhub/rate_limit.py +152 -0
- beadhub/redis_client.py +11 -0
- beadhub/roles.py +35 -0
- beadhub/routes/__init__.py +1 -0
- beadhub/routes/agents.py +303 -0
- beadhub/routes/bdh.py +655 -0
- beadhub/routes/beads.py +778 -0
- beadhub/routes/claims.py +141 -0
- beadhub/routes/escalations.py +471 -0
- beadhub/routes/init.py +348 -0
- beadhub/routes/mcp.py +338 -0
- beadhub/routes/policies.py +833 -0
- beadhub/routes/repos.py +538 -0
- beadhub/routes/status.py +568 -0
- beadhub/routes/subscriptions.py +362 -0
- beadhub/routes/workspaces.py +1642 -0
- beadhub/workspace_config.py +202 -0
- beadhub-0.1.0.dist-info/METADATA +254 -0
- beadhub-0.1.0.dist-info/RECORD +54 -0
- beadhub-0.1.0.dist-info/WHEEL +4 -0
- beadhub-0.1.0.dist-info/entry_points.txt +2 -0
- beadhub-0.1.0.dist-info/licenses/LICENSE +21 -0
beadhub/routes/status.py
ADDED
|
@@ -0,0 +1,568 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import time
|
|
4
|
+
import uuid
|
|
5
|
+
from dataclasses import dataclass
|
|
6
|
+
from datetime import datetime, timezone
|
|
7
|
+
from typing import Any, Dict, List, Optional
|
|
8
|
+
|
|
9
|
+
from fastapi import APIRouter, Depends, HTTPException, Query, Request
|
|
10
|
+
from fastapi.responses import StreamingResponse
|
|
11
|
+
from redis.asyncio import Redis
|
|
12
|
+
|
|
13
|
+
from beadhub.auth import validate_workspace_id
|
|
14
|
+
from beadhub.aweb_introspection import get_project_from_auth
|
|
15
|
+
|
|
16
|
+
from ..db import DatabaseInfra, get_db_infra
|
|
17
|
+
from ..events import stream_events_multi
|
|
18
|
+
from ..presence import (
|
|
19
|
+
list_agent_presences_by_workspace_ids,
|
|
20
|
+
)
|
|
21
|
+
from ..redis_client import get_redis
|
|
22
|
+
from .workspaces import is_valid_canonical_origin
|
|
23
|
+
|
|
24
|
+
DEFAULT_WORKSPACE_LIMIT = 200
|
|
25
|
+
MAX_WORKSPACE_LIMIT = 1000
|
|
26
|
+
# Short TTL keeps SSE subscriptions fresh while reducing DB churn.
|
|
27
|
+
WORKSPACE_IDS_CACHE_TTL_SECONDS = 10
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
@dataclass
|
|
31
|
+
class _WorkspaceIDsCacheEntry:
|
|
32
|
+
workspace_ids: List[str]
|
|
33
|
+
fetched_at: float
|
|
34
|
+
limit: int
|
|
35
|
+
|
|
36
|
+
|
|
37
|
+
_WORKSPACE_IDS_CACHE: dict[tuple[int, str], _WorkspaceIDsCacheEntry] = {}
|
|
38
|
+
|
|
39
|
+
|
|
40
|
+
def _get_workspace_ids_cache_key(db_infra: DatabaseInfra, project_id: str) -> tuple[int, str]:
|
|
41
|
+
# Scope cache to the DatabaseInfra instance to avoid cross-DB bleed.
|
|
42
|
+
return (id(db_infra), project_id)
|
|
43
|
+
|
|
44
|
+
|
|
45
|
+
def _get_cached_workspace_ids(
|
|
46
|
+
db_infra: DatabaseInfra, limit: int, project_id: str
|
|
47
|
+
) -> Optional[List[str]]:
|
|
48
|
+
key = _get_workspace_ids_cache_key(db_infra, project_id)
|
|
49
|
+
entry = _WORKSPACE_IDS_CACHE.get(key)
|
|
50
|
+
if entry is None:
|
|
51
|
+
return None
|
|
52
|
+
if time.monotonic() - entry.fetched_at > WORKSPACE_IDS_CACHE_TTL_SECONDS:
|
|
53
|
+
_WORKSPACE_IDS_CACHE.pop(key, None)
|
|
54
|
+
return None
|
|
55
|
+
if entry.limit < limit:
|
|
56
|
+
return None
|
|
57
|
+
return entry.workspace_ids[:limit]
|
|
58
|
+
|
|
59
|
+
|
|
60
|
+
def _update_workspace_ids_cache(
|
|
61
|
+
db_infra: DatabaseInfra, limit: int, project_id: str, workspace_ids: List[str]
|
|
62
|
+
) -> None:
|
|
63
|
+
key = _get_workspace_ids_cache_key(db_infra, project_id)
|
|
64
|
+
_WORKSPACE_IDS_CACHE[key] = _WorkspaceIDsCacheEntry(
|
|
65
|
+
workspace_ids=workspace_ids,
|
|
66
|
+
fetched_at=time.monotonic(),
|
|
67
|
+
limit=limit,
|
|
68
|
+
)
|
|
69
|
+
|
|
70
|
+
|
|
71
|
+
async def get_all_workspace_ids_from_db(
|
|
72
|
+
db_infra: DatabaseInfra,
|
|
73
|
+
limit: int = DEFAULT_WORKSPACE_LIMIT,
|
|
74
|
+
project_id: str = "",
|
|
75
|
+
) -> List[str]:
|
|
76
|
+
"""Get all registered workspace IDs from the database (excluding soft-deleted).
|
|
77
|
+
|
|
78
|
+
Args:
|
|
79
|
+
db_infra: Database infrastructure.
|
|
80
|
+
limit: Maximum number of workspace IDs to return.
|
|
81
|
+
project_id: Scope to this project (tenant isolation).
|
|
82
|
+
|
|
83
|
+
Returns:
|
|
84
|
+
List of workspace IDs, ordered by most recently updated first.
|
|
85
|
+
"""
|
|
86
|
+
if not project_id:
|
|
87
|
+
raise ValueError("project_id is required")
|
|
88
|
+
|
|
89
|
+
cached = _get_cached_workspace_ids(db_infra, limit, project_id)
|
|
90
|
+
if cached is not None:
|
|
91
|
+
return cached
|
|
92
|
+
|
|
93
|
+
server_db = db_infra.get_manager("server")
|
|
94
|
+
rows = await server_db.fetch_all(
|
|
95
|
+
"""
|
|
96
|
+
SELECT workspace_id FROM {{tables.workspaces}}
|
|
97
|
+
WHERE project_id = $1 AND deleted_at IS NULL
|
|
98
|
+
ORDER BY updated_at DESC LIMIT $2
|
|
99
|
+
""",
|
|
100
|
+
uuid.UUID(project_id),
|
|
101
|
+
limit,
|
|
102
|
+
)
|
|
103
|
+
workspace_ids = [str(row["workspace_id"]) for row in rows]
|
|
104
|
+
_update_workspace_ids_cache(db_infra, limit, project_id, workspace_ids)
|
|
105
|
+
return workspace_ids
|
|
106
|
+
|
|
107
|
+
|
|
108
|
+
async def get_workspace_ids_by_repo_from_db(
|
|
109
|
+
db_infra: DatabaseInfra,
|
|
110
|
+
repo: str,
|
|
111
|
+
limit: int = DEFAULT_WORKSPACE_LIMIT,
|
|
112
|
+
project_id: str = "",
|
|
113
|
+
) -> List[str]:
|
|
114
|
+
"""Get workspace IDs for a repo by canonical_origin from the database.
|
|
115
|
+
|
|
116
|
+
Args:
|
|
117
|
+
db_infra: Database infrastructure.
|
|
118
|
+
repo: Canonical origin (e.g., "github.com/org/repo").
|
|
119
|
+
limit: Maximum number of workspace IDs to return.
|
|
120
|
+
project_id: Scope by project (tenant isolation).
|
|
121
|
+
|
|
122
|
+
Returns:
|
|
123
|
+
List of workspace IDs belonging to the repo.
|
|
124
|
+
"""
|
|
125
|
+
if not project_id:
|
|
126
|
+
raise ValueError("project_id is required")
|
|
127
|
+
|
|
128
|
+
server_db = db_infra.get_manager("server")
|
|
129
|
+
rows = await server_db.fetch_all(
|
|
130
|
+
"""
|
|
131
|
+
SELECT w.workspace_id
|
|
132
|
+
FROM {{tables.workspaces}} w
|
|
133
|
+
JOIN {{tables.repos}} r ON w.repo_id = r.id
|
|
134
|
+
WHERE r.canonical_origin = $1 AND w.project_id = $2 AND w.deleted_at IS NULL AND r.deleted_at IS NULL
|
|
135
|
+
ORDER BY w.updated_at DESC
|
|
136
|
+
LIMIT $3
|
|
137
|
+
""",
|
|
138
|
+
repo,
|
|
139
|
+
uuid.UUID(project_id),
|
|
140
|
+
limit,
|
|
141
|
+
)
|
|
142
|
+
return [str(row["workspace_id"]) for row in rows]
|
|
143
|
+
|
|
144
|
+
|
|
145
|
+
async def get_workspace_ids_by_repo_id_from_db(
|
|
146
|
+
db_infra: DatabaseInfra,
|
|
147
|
+
repo_id: str,
|
|
148
|
+
limit: int = DEFAULT_WORKSPACE_LIMIT,
|
|
149
|
+
project_id: str = "",
|
|
150
|
+
) -> List[str]:
|
|
151
|
+
"""Get workspace IDs for a repo by repo UUID from the database.
|
|
152
|
+
|
|
153
|
+
Args:
|
|
154
|
+
db_infra: Database infrastructure.
|
|
155
|
+
repo_id: Repo UUID.
|
|
156
|
+
limit: Maximum number of workspace IDs to return.
|
|
157
|
+
project_id: Scope by project (tenant isolation).
|
|
158
|
+
|
|
159
|
+
Returns:
|
|
160
|
+
List of workspace IDs belonging to the repo.
|
|
161
|
+
"""
|
|
162
|
+
if not project_id:
|
|
163
|
+
raise ValueError("project_id is required")
|
|
164
|
+
|
|
165
|
+
server_db = db_infra.get_manager("server")
|
|
166
|
+
rows = await server_db.fetch_all(
|
|
167
|
+
"""
|
|
168
|
+
SELECT workspace_id
|
|
169
|
+
FROM {{tables.workspaces}}
|
|
170
|
+
WHERE repo_id = $1 AND project_id = $2 AND deleted_at IS NULL
|
|
171
|
+
ORDER BY updated_at DESC
|
|
172
|
+
LIMIT $3
|
|
173
|
+
""",
|
|
174
|
+
uuid.UUID(repo_id),
|
|
175
|
+
uuid.UUID(project_id),
|
|
176
|
+
limit,
|
|
177
|
+
)
|
|
178
|
+
return [str(row["workspace_id"]) for row in rows]
|
|
179
|
+
|
|
180
|
+
|
|
181
|
+
async def get_workspace_ids_by_human_name_from_db(
|
|
182
|
+
db_infra: DatabaseInfra,
|
|
183
|
+
human_name: str,
|
|
184
|
+
limit: int = DEFAULT_WORKSPACE_LIMIT,
|
|
185
|
+
project_id: str = "",
|
|
186
|
+
) -> List[str]:
|
|
187
|
+
"""Get workspace IDs for workspaces owned by a specific human.
|
|
188
|
+
|
|
189
|
+
Args:
|
|
190
|
+
db_infra: Database infrastructure.
|
|
191
|
+
human_name: Owner name to filter by.
|
|
192
|
+
limit: Maximum number of workspace IDs to return.
|
|
193
|
+
project_id: Scope by project (tenant isolation).
|
|
194
|
+
|
|
195
|
+
Returns:
|
|
196
|
+
List of workspace IDs owned by the human.
|
|
197
|
+
"""
|
|
198
|
+
if not project_id:
|
|
199
|
+
raise ValueError("project_id is required")
|
|
200
|
+
|
|
201
|
+
server_db = db_infra.get_manager("server")
|
|
202
|
+
rows = await server_db.fetch_all(
|
|
203
|
+
"""
|
|
204
|
+
SELECT workspace_id
|
|
205
|
+
FROM {{tables.workspaces}}
|
|
206
|
+
WHERE human_name = $1 AND project_id = $2 AND deleted_at IS NULL
|
|
207
|
+
ORDER BY updated_at DESC
|
|
208
|
+
LIMIT $3
|
|
209
|
+
""",
|
|
210
|
+
human_name,
|
|
211
|
+
uuid.UUID(project_id),
|
|
212
|
+
limit,
|
|
213
|
+
)
|
|
214
|
+
return [str(row["workspace_id"]) for row in rows]
|
|
215
|
+
|
|
216
|
+
|
|
217
|
+
router = APIRouter(prefix="/v1", tags=["status"])
|
|
218
|
+
|
|
219
|
+
|
|
220
|
+
@router.get("/status")
|
|
221
|
+
async def status(
|
|
222
|
+
request: Request,
|
|
223
|
+
workspace_id: Optional[str] = Query(None, min_length=1),
|
|
224
|
+
repo_id: Optional[str] = Query(None, min_length=36, max_length=36),
|
|
225
|
+
redis: Redis = Depends(get_redis),
|
|
226
|
+
db_infra: DatabaseInfra = Depends(get_db_infra),
|
|
227
|
+
) -> Dict[str, Any]:
|
|
228
|
+
"""
|
|
229
|
+
Aggregate workspace status: agent presence and escalations.
|
|
230
|
+
|
|
231
|
+
Filter by:
|
|
232
|
+
- workspace_id: Show status for a specific workspace
|
|
233
|
+
- repo_id: Show aggregated status for all workspaces in a repo (UUID)
|
|
234
|
+
"""
|
|
235
|
+
project_id = await get_project_from_auth(request, db_infra)
|
|
236
|
+
project_uuid = uuid.UUID(project_id)
|
|
237
|
+
server_db = db_infra.get_manager("server")
|
|
238
|
+
|
|
239
|
+
project_row = await server_db.fetch_one(
|
|
240
|
+
"""
|
|
241
|
+
SELECT slug
|
|
242
|
+
FROM {{tables.projects}}
|
|
243
|
+
WHERE id = $1 AND deleted_at IS NULL
|
|
244
|
+
""",
|
|
245
|
+
project_uuid,
|
|
246
|
+
)
|
|
247
|
+
if not project_row:
|
|
248
|
+
raise HTTPException(status_code=500, detail="Authenticated project not found")
|
|
249
|
+
project_slug = project_row["slug"]
|
|
250
|
+
|
|
251
|
+
# Determine which workspace_ids to include
|
|
252
|
+
workspace_ids: List[str] = []
|
|
253
|
+
|
|
254
|
+
if workspace_id:
|
|
255
|
+
# Validate specific workspace_id
|
|
256
|
+
try:
|
|
257
|
+
validated_workspace_id = validate_workspace_id(workspace_id)
|
|
258
|
+
except ValueError as e:
|
|
259
|
+
raise HTTPException(status_code=422, detail=str(e))
|
|
260
|
+
|
|
261
|
+
row = await server_db.fetch_one(
|
|
262
|
+
"""
|
|
263
|
+
SELECT workspace_id FROM {{tables.workspaces}}
|
|
264
|
+
WHERE workspace_id = $1 AND project_id = $2 AND deleted_at IS NULL
|
|
265
|
+
""",
|
|
266
|
+
uuid.UUID(validated_workspace_id),
|
|
267
|
+
project_uuid,
|
|
268
|
+
)
|
|
269
|
+
if not row:
|
|
270
|
+
raise HTTPException(status_code=404, detail="Workspace not found")
|
|
271
|
+
workspace_ids = [validated_workspace_id]
|
|
272
|
+
elif repo_id:
|
|
273
|
+
# Validate UUID format at API boundary
|
|
274
|
+
try:
|
|
275
|
+
uuid.UUID(repo_id)
|
|
276
|
+
except ValueError:
|
|
277
|
+
raise HTTPException(status_code=422, detail="Invalid repo_id format: expected UUID")
|
|
278
|
+
workspace_ids = await get_workspace_ids_by_repo_id_from_db(
|
|
279
|
+
db_infra, repo_id, DEFAULT_WORKSPACE_LIMIT, project_id=project_id
|
|
280
|
+
)
|
|
281
|
+
else:
|
|
282
|
+
workspace_ids = await get_all_workspace_ids_from_db(
|
|
283
|
+
db_infra, DEFAULT_WORKSPACE_LIMIT, project_id=project_id
|
|
284
|
+
)
|
|
285
|
+
|
|
286
|
+
# Build workspace info based on the filter that was used
|
|
287
|
+
if workspace_id:
|
|
288
|
+
workspace_info: Dict[str, Any] = {
|
|
289
|
+
"workspace_id": workspace_id,
|
|
290
|
+
"project_id": project_id,
|
|
291
|
+
"project_slug": project_slug,
|
|
292
|
+
}
|
|
293
|
+
elif repo_id:
|
|
294
|
+
workspace_info = {
|
|
295
|
+
"repo_id": repo_id,
|
|
296
|
+
"workspace_count": len(workspace_ids),
|
|
297
|
+
"project_id": project_id,
|
|
298
|
+
"project_slug": project_slug,
|
|
299
|
+
}
|
|
300
|
+
else:
|
|
301
|
+
workspace_info = {
|
|
302
|
+
"project_id": project_id,
|
|
303
|
+
"project_slug": project_slug,
|
|
304
|
+
"workspace_count": len(workspace_ids),
|
|
305
|
+
}
|
|
306
|
+
|
|
307
|
+
# Agent presences from Redis (filtered by workspace_ids from database)
|
|
308
|
+
# Always use workspace_ids for filtering - the database is the authoritative source
|
|
309
|
+
# for which workspaces exist. Empty workspace_ids = empty presences (fail closed).
|
|
310
|
+
all_presences: List[Dict[str, str]] = []
|
|
311
|
+
if workspace_ids:
|
|
312
|
+
all_presences = await list_agent_presences_by_workspace_ids(redis, workspace_ids)
|
|
313
|
+
|
|
314
|
+
# Convert workspace_ids to UUIDs for database queries
|
|
315
|
+
uuid_workspace_ids = [uuid.UUID(ws_id) for ws_id in workspace_ids] if workspace_ids else []
|
|
316
|
+
|
|
317
|
+
row = await server_db.fetch_one(
|
|
318
|
+
"SELECT COUNT(*) AS count FROM {{tables.escalations}} WHERE status = 'pending' AND project_id = $1",
|
|
319
|
+
project_uuid,
|
|
320
|
+
)
|
|
321
|
+
escalations_pending = int(row["count"]) if row and "count" in row else 0
|
|
322
|
+
|
|
323
|
+
# Claims - active bead claims with claimant count for conflict detection
|
|
324
|
+
if uuid_workspace_ids:
|
|
325
|
+
placeholders = ", ".join(f"${i}" for i in range(1, len(uuid_workspace_ids) + 1))
|
|
326
|
+
where_clause = f"WHERE c.workspace_id IN ({placeholders})"
|
|
327
|
+
params = uuid_workspace_ids
|
|
328
|
+
else:
|
|
329
|
+
where_clause = ""
|
|
330
|
+
params = []
|
|
331
|
+
|
|
332
|
+
# Query claims with a count of how many workspaces have claimed each bead
|
|
333
|
+
# LEFT JOIN with beads.beads_issues to get titles (pick any matching title via DISTINCT ON)
|
|
334
|
+
claim_rows = await server_db.fetch_all(
|
|
335
|
+
f"""
|
|
336
|
+
SELECT c.bead_id, c.workspace_id, c.alias, c.human_name, c.claimed_at, c.project_id,
|
|
337
|
+
counts.claimant_count, bi.title
|
|
338
|
+
FROM {{{{tables.bead_claims}}}} c
|
|
339
|
+
JOIN (
|
|
340
|
+
SELECT project_id, bead_id, COUNT(*) as claimant_count
|
|
341
|
+
FROM {{{{tables.bead_claims}}}}
|
|
342
|
+
GROUP BY project_id, bead_id
|
|
343
|
+
) counts ON c.project_id = counts.project_id AND c.bead_id = counts.bead_id
|
|
344
|
+
LEFT JOIN LATERAL (
|
|
345
|
+
SELECT title FROM beads.beads_issues
|
|
346
|
+
WHERE project_id = c.project_id AND bead_id = c.bead_id
|
|
347
|
+
ORDER BY synced_at DESC
|
|
348
|
+
LIMIT 1
|
|
349
|
+
) bi ON true
|
|
350
|
+
{where_clause}
|
|
351
|
+
ORDER BY c.claimed_at DESC
|
|
352
|
+
""",
|
|
353
|
+
*params,
|
|
354
|
+
)
|
|
355
|
+
claims = [
|
|
356
|
+
{
|
|
357
|
+
"bead_id": r["bead_id"],
|
|
358
|
+
"workspace_id": str(r["workspace_id"]),
|
|
359
|
+
"alias": r["alias"],
|
|
360
|
+
"human_name": r["human_name"],
|
|
361
|
+
"claimed_at": r["claimed_at"].isoformat(),
|
|
362
|
+
"claimant_count": r["claimant_count"],
|
|
363
|
+
"title": r["title"],
|
|
364
|
+
"project_id": str(r["project_id"]),
|
|
365
|
+
}
|
|
366
|
+
for r in claim_rows
|
|
367
|
+
]
|
|
368
|
+
|
|
369
|
+
# Build claims lookup map for populating current_issue in agents
|
|
370
|
+
# Note: A workspace may have multiple claims; use the most recent (first in list due to ORDER BY)
|
|
371
|
+
claims_by_workspace: Dict[str, str] = {}
|
|
372
|
+
for r in claim_rows:
|
|
373
|
+
ws_id = str(r["workspace_id"])
|
|
374
|
+
if ws_id not in claims_by_workspace:
|
|
375
|
+
claims_by_workspace[ws_id] = r["bead_id"]
|
|
376
|
+
|
|
377
|
+
# Build agent info for all agents, enriched with current_issue from claims
|
|
378
|
+
agents: List[Dict[str, Any]] = []
|
|
379
|
+
for presence in all_presences:
|
|
380
|
+
ws_id = presence.get("workspace_id", "")
|
|
381
|
+
agents.append(
|
|
382
|
+
{
|
|
383
|
+
"workspace_id": ws_id,
|
|
384
|
+
"alias": presence.get("alias", ""),
|
|
385
|
+
"member": presence.get("member_email") or None,
|
|
386
|
+
"program": presence.get("program") or None,
|
|
387
|
+
"role": presence.get("role") or None,
|
|
388
|
+
"status": presence.get("status") or "unknown",
|
|
389
|
+
"current_issue": claims_by_workspace.get(ws_id),
|
|
390
|
+
"last_seen": presence.get("last_seen"),
|
|
391
|
+
}
|
|
392
|
+
)
|
|
393
|
+
|
|
394
|
+
# Identify conflicts: beads with multiple claimants
|
|
395
|
+
conflicts = []
|
|
396
|
+
seen_beads: Dict[str, List[Dict[str, Any]]] = {}
|
|
397
|
+
for claim in claims:
|
|
398
|
+
if claim["claimant_count"] > 1:
|
|
399
|
+
bead_id = claim["bead_id"]
|
|
400
|
+
if bead_id not in seen_beads:
|
|
401
|
+
seen_beads[bead_id] = []
|
|
402
|
+
seen_beads[bead_id].append(
|
|
403
|
+
{
|
|
404
|
+
"alias": claim["alias"],
|
|
405
|
+
"human_name": claim["human_name"],
|
|
406
|
+
"workspace_id": claim["workspace_id"],
|
|
407
|
+
}
|
|
408
|
+
)
|
|
409
|
+
for bead_id, claimants in seen_beads.items():
|
|
410
|
+
conflicts.append(
|
|
411
|
+
{
|
|
412
|
+
"bead_id": bead_id,
|
|
413
|
+
"claimants": claimants,
|
|
414
|
+
}
|
|
415
|
+
)
|
|
416
|
+
|
|
417
|
+
return {
|
|
418
|
+
"workspace": workspace_info,
|
|
419
|
+
"agents": agents,
|
|
420
|
+
"claims": claims,
|
|
421
|
+
"conflicts": conflicts,
|
|
422
|
+
"escalations_pending": escalations_pending,
|
|
423
|
+
"timestamp": datetime.now(timezone.utc).isoformat(),
|
|
424
|
+
}
|
|
425
|
+
|
|
426
|
+
|
|
427
|
+
@router.get("/status/stream")
|
|
428
|
+
async def status_stream(
|
|
429
|
+
request: Request,
|
|
430
|
+
workspace_id: Optional[str] = Query(None, min_length=1),
|
|
431
|
+
repo: Optional[str] = Query(
|
|
432
|
+
None,
|
|
433
|
+
max_length=255,
|
|
434
|
+
description="Filter by repo canonical origin (e.g., 'github.com/org/repo')",
|
|
435
|
+
),
|
|
436
|
+
human_name: Optional[str] = Query(
|
|
437
|
+
None,
|
|
438
|
+
max_length=64,
|
|
439
|
+
description="Filter by workspace owner name",
|
|
440
|
+
),
|
|
441
|
+
limit: int = Query(
|
|
442
|
+
DEFAULT_WORKSPACE_LIMIT,
|
|
443
|
+
ge=1,
|
|
444
|
+
le=MAX_WORKSPACE_LIMIT,
|
|
445
|
+
description="Maximum workspaces to subscribe to (ignored when workspace_id is specified)",
|
|
446
|
+
),
|
|
447
|
+
event_types: Optional[str] = Query(
|
|
448
|
+
None, description="Comma-separated event categories to filter (e.g., 'message,bead')"
|
|
449
|
+
),
|
|
450
|
+
redis: Redis = Depends(get_redis),
|
|
451
|
+
db_infra: DatabaseInfra = Depends(get_db_infra),
|
|
452
|
+
) -> StreamingResponse:
|
|
453
|
+
"""
|
|
454
|
+
Server-Sent Events (SSE) stream for real-time updates.
|
|
455
|
+
|
|
456
|
+
Subscribes to events and streams them as they occur. Events include
|
|
457
|
+
messages, escalations, and bead status changes.
|
|
458
|
+
|
|
459
|
+
Filter by:
|
|
460
|
+
- workspace_id: Stream events for a specific workspace
|
|
461
|
+
- repo: Stream aggregated events for all workspaces in a repo (canonical origin)
|
|
462
|
+
- human_name: Stream events for all workspaces owned by a specific human
|
|
463
|
+
- No filter: Stream events for all workspaces in the authenticated project (bounded, ordered by recent activity)
|
|
464
|
+
|
|
465
|
+
Args:
|
|
466
|
+
workspace_id: UUID of a specific workspace to stream events for
|
|
467
|
+
repo: Repo canonical origin (e.g., "github.com/org/repo") to stream events
|
|
468
|
+
for all its workspaces
|
|
469
|
+
human_name: Owner name to stream events for all their workspaces
|
|
470
|
+
limit: Maximum number of workspaces to subscribe to (default 200, max 1000).
|
|
471
|
+
Ignored when workspace_id is specified. Workspaces are ordered by
|
|
472
|
+
recent activity, so the limit prioritizes active workspaces.
|
|
473
|
+
event_types: Optional comma-separated filter for event categories.
|
|
474
|
+
Valid categories: reservation, message, escalation, bead.
|
|
475
|
+
If not specified, all events are streamed.
|
|
476
|
+
|
|
477
|
+
Returns:
|
|
478
|
+
SSE stream with events in the format:
|
|
479
|
+
```
|
|
480
|
+
data: {"type": "message.delivered", "workspace_id": "...", ...}
|
|
481
|
+
|
|
482
|
+
data: {"type": "bead.status_changed", "workspace_id": "...", ...}
|
|
483
|
+
```
|
|
484
|
+
"""
|
|
485
|
+
effective_project_id = await get_project_from_auth(request, db_infra)
|
|
486
|
+
|
|
487
|
+
# Determine which workspace_ids to subscribe to
|
|
488
|
+
workspace_ids: List[str] = []
|
|
489
|
+
|
|
490
|
+
if workspace_id:
|
|
491
|
+
# Validate specific workspace_id
|
|
492
|
+
try:
|
|
493
|
+
validated_workspace_id = validate_workspace_id(workspace_id)
|
|
494
|
+
except ValueError as e:
|
|
495
|
+
raise HTTPException(status_code=422, detail=str(e))
|
|
496
|
+
server_db = db_infra.get_manager("server")
|
|
497
|
+
row = await server_db.fetch_one(
|
|
498
|
+
"""
|
|
499
|
+
SELECT 1 FROM {{tables.workspaces}}
|
|
500
|
+
WHERE workspace_id = $1 AND project_id = $2 AND deleted_at IS NULL
|
|
501
|
+
""",
|
|
502
|
+
uuid.UUID(validated_workspace_id),
|
|
503
|
+
uuid.UUID(effective_project_id),
|
|
504
|
+
)
|
|
505
|
+
if not row:
|
|
506
|
+
raise HTTPException(status_code=404, detail="Workspace not found")
|
|
507
|
+
workspace_ids = [validated_workspace_id]
|
|
508
|
+
elif repo:
|
|
509
|
+
# Validate repo format (canonical origin)
|
|
510
|
+
if not is_valid_canonical_origin(repo):
|
|
511
|
+
raise HTTPException(
|
|
512
|
+
status_code=422,
|
|
513
|
+
detail=f"Invalid repo format: {repo[:50]}",
|
|
514
|
+
)
|
|
515
|
+
# Look up workspace_ids for this repo from database (scoped by project_id if present)
|
|
516
|
+
workspace_ids = await get_workspace_ids_by_repo_from_db(
|
|
517
|
+
db_infra, repo, limit, project_id=effective_project_id
|
|
518
|
+
)
|
|
519
|
+
elif human_name:
|
|
520
|
+
# Look up workspace_ids for this owner from database
|
|
521
|
+
workspace_ids = await get_workspace_ids_by_human_name_from_db(
|
|
522
|
+
db_infra, human_name, limit, project_id=effective_project_id
|
|
523
|
+
)
|
|
524
|
+
else:
|
|
525
|
+
# No filter - stream registered workspaces from database (limited)
|
|
526
|
+
workspace_ids = await get_all_workspace_ids_from_db(
|
|
527
|
+
db_infra, limit, project_id=effective_project_id
|
|
528
|
+
)
|
|
529
|
+
|
|
530
|
+
# Handle empty workspace lists:
|
|
531
|
+
# - If user provided specific filters (repo/human_name) that matched nothing,
|
|
532
|
+
# return 404 so they know their filter was wrong
|
|
533
|
+
# - If just project-level filtering (or no filter), allow keepalive stream
|
|
534
|
+
# for new projects that don't have workspaces yet
|
|
535
|
+
if not workspace_ids:
|
|
536
|
+
if repo or human_name:
|
|
537
|
+
raise HTTPException(
|
|
538
|
+
status_code=404,
|
|
539
|
+
detail="No workspaces found for the provided filter",
|
|
540
|
+
)
|
|
541
|
+
|
|
542
|
+
# Parse event type filter
|
|
543
|
+
event_type_set: Optional[set[str]] = None
|
|
544
|
+
if event_types:
|
|
545
|
+
event_type_set = {t.strip().lower() for t in event_types.split(",")}
|
|
546
|
+
# Validate event types
|
|
547
|
+
valid_types = {"reservation", "message", "escalation", "bead"}
|
|
548
|
+
invalid = event_type_set - valid_types
|
|
549
|
+
if invalid:
|
|
550
|
+
raise HTTPException(
|
|
551
|
+
status_code=422,
|
|
552
|
+
detail=f"Invalid event types: {invalid}. Valid types: {valid_types}",
|
|
553
|
+
)
|
|
554
|
+
|
|
555
|
+
return StreamingResponse(
|
|
556
|
+
stream_events_multi(
|
|
557
|
+
redis,
|
|
558
|
+
workspace_ids,
|
|
559
|
+
event_type_set,
|
|
560
|
+
check_disconnected=request.is_disconnected,
|
|
561
|
+
),
|
|
562
|
+
media_type="text/event-stream",
|
|
563
|
+
headers={
|
|
564
|
+
"Cache-Control": "no-cache",
|
|
565
|
+
"Connection": "keep-alive",
|
|
566
|
+
"X-Accel-Buffering": "no", # Disable nginx buffering
|
|
567
|
+
},
|
|
568
|
+
)
|