beadhub 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- beadhub/__init__.py +12 -0
- beadhub/api.py +260 -0
- beadhub/auth.py +101 -0
- beadhub/aweb_context.py +65 -0
- beadhub/aweb_introspection.py +70 -0
- beadhub/beads_sync.py +514 -0
- beadhub/cli.py +330 -0
- beadhub/config.py +65 -0
- beadhub/db.py +129 -0
- beadhub/defaults/invariants/01-tracking-bdh-only.md +11 -0
- beadhub/defaults/invariants/02-communication-mail-first.md +36 -0
- beadhub/defaults/invariants/03-communication-chat.md +60 -0
- beadhub/defaults/invariants/04-identity-no-impersonation.md +17 -0
- beadhub/defaults/invariants/05-collaborate.md +12 -0
- beadhub/defaults/roles/backend.md +55 -0
- beadhub/defaults/roles/coordinator.md +44 -0
- beadhub/defaults/roles/frontend.md +77 -0
- beadhub/defaults/roles/implementer.md +73 -0
- beadhub/defaults/roles/reviewer.md +56 -0
- beadhub/defaults/roles/startup-expert.md +93 -0
- beadhub/defaults.py +262 -0
- beadhub/events.py +704 -0
- beadhub/internal_auth.py +121 -0
- beadhub/jsonl.py +68 -0
- beadhub/logging.py +62 -0
- beadhub/migrations/beads/001_initial.sql +70 -0
- beadhub/migrations/beads/002_search_indexes.sql +20 -0
- beadhub/migrations/server/001_initial.sql +279 -0
- beadhub/names.py +33 -0
- beadhub/notifications.py +275 -0
- beadhub/pagination.py +125 -0
- beadhub/presence.py +495 -0
- beadhub/rate_limit.py +152 -0
- beadhub/redis_client.py +11 -0
- beadhub/roles.py +35 -0
- beadhub/routes/__init__.py +1 -0
- beadhub/routes/agents.py +303 -0
- beadhub/routes/bdh.py +655 -0
- beadhub/routes/beads.py +778 -0
- beadhub/routes/claims.py +141 -0
- beadhub/routes/escalations.py +471 -0
- beadhub/routes/init.py +348 -0
- beadhub/routes/mcp.py +338 -0
- beadhub/routes/policies.py +833 -0
- beadhub/routes/repos.py +538 -0
- beadhub/routes/status.py +568 -0
- beadhub/routes/subscriptions.py +362 -0
- beadhub/routes/workspaces.py +1642 -0
- beadhub/workspace_config.py +202 -0
- beadhub-0.1.0.dist-info/METADATA +254 -0
- beadhub-0.1.0.dist-info/RECORD +54 -0
- beadhub-0.1.0.dist-info/WHEEL +4 -0
- beadhub-0.1.0.dist-info/entry_points.txt +2 -0
- beadhub-0.1.0.dist-info/licenses/LICENSE +21 -0
|
@@ -0,0 +1,1642 @@
|
|
|
1
|
+
"""Workspace discovery and registration endpoints."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
import logging
|
|
6
|
+
import uuid as uuid_module
|
|
7
|
+
from datetime import datetime, timezone
|
|
8
|
+
from typing import Dict, List, Optional
|
|
9
|
+
from uuid import UUID
|
|
10
|
+
|
|
11
|
+
import asyncpg.exceptions
|
|
12
|
+
from aweb.presence import update_agent_presence as update_aweb_agent_presence
|
|
13
|
+
from fastapi import APIRouter, Depends, HTTPException, Path, Query, Request
|
|
14
|
+
from pgdbm.errors import QueryError
|
|
15
|
+
from pydantic import BaseModel, Field, field_validator
|
|
16
|
+
from redis.asyncio import Redis
|
|
17
|
+
|
|
18
|
+
from beadhub.auth import validate_workspace_id
|
|
19
|
+
from beadhub.aweb_context import resolve_aweb_identity
|
|
20
|
+
from beadhub.aweb_introspection import get_identity_from_auth, get_project_from_auth
|
|
21
|
+
|
|
22
|
+
from ..beads_sync import is_valid_alias, is_valid_canonical_origin, is_valid_human_name
|
|
23
|
+
from ..config import get_settings
|
|
24
|
+
from ..db import DatabaseInfra, get_db_infra
|
|
25
|
+
from ..names import CLASSIC_NAMES
|
|
26
|
+
from ..pagination import encode_cursor, validate_pagination_params
|
|
27
|
+
from ..presence import (
|
|
28
|
+
list_agent_presences,
|
|
29
|
+
list_agent_presences_by_workspace_ids,
|
|
30
|
+
update_agent_presence,
|
|
31
|
+
)
|
|
32
|
+
from ..redis_client import get_redis
|
|
33
|
+
from ..roles import (
|
|
34
|
+
ROLE_ERROR_MESSAGE,
|
|
35
|
+
ROLE_MAX_LENGTH,
|
|
36
|
+
is_valid_role,
|
|
37
|
+
normalize_role,
|
|
38
|
+
)
|
|
39
|
+
from .bdh import check_alias_collision, ensure_repo, upsert_workspace
|
|
40
|
+
from .repos import canonicalize_git_url, extract_repo_name
|
|
41
|
+
|
|
42
|
+
logger = logging.getLogger(__name__)
|
|
43
|
+
|
|
44
|
+
TEAM_STATUS_DEFAULT_LIMIT = 15
|
|
45
|
+
TEAM_STATUS_MAX_LIMIT = 200
|
|
46
|
+
TEAM_STATUS_CANDIDATE_MULTIPLIER = 5
|
|
47
|
+
TEAM_STATUS_CANDIDATE_MAX = 500
|
|
48
|
+
REGISTRY_MAX_LIMIT = 1000
|
|
49
|
+
|
|
50
|
+
router = APIRouter(prefix="/v1/workspaces", tags=["workspaces"])
|
|
51
|
+
|
|
52
|
+
# Request/Response models for suggest-name-prefix
|
|
53
|
+
|
|
54
|
+
|
|
55
|
+
class SuggestNamePrefixRequest(BaseModel):
|
|
56
|
+
"""Request body for /v1/workspaces/suggest-name-prefix endpoint."""
|
|
57
|
+
|
|
58
|
+
origin_url: str = Field(..., min_length=1, max_length=2048, description="Git origin URL")
|
|
59
|
+
|
|
60
|
+
@field_validator("origin_url")
|
|
61
|
+
@classmethod
|
|
62
|
+
def validate_origin_url(cls, v: str) -> str:
|
|
63
|
+
try:
|
|
64
|
+
canonicalize_git_url(v)
|
|
65
|
+
except ValueError as e:
|
|
66
|
+
raise ValueError(f"Invalid origin_url: {e}")
|
|
67
|
+
return v
|
|
68
|
+
|
|
69
|
+
|
|
70
|
+
class SuggestNamePrefixResponse(BaseModel):
|
|
71
|
+
"""Response for /v1/workspaces/suggest-name-prefix endpoint."""
|
|
72
|
+
|
|
73
|
+
name_prefix: str # The next available name (e.g., "alice", "bob", "alice-01")
|
|
74
|
+
project_id: str
|
|
75
|
+
project_slug: str
|
|
76
|
+
repo_id: str
|
|
77
|
+
canonical_origin: str
|
|
78
|
+
|
|
79
|
+
|
|
80
|
+
@router.post("/suggest-name-prefix", response_model=SuggestNamePrefixResponse)
|
|
81
|
+
async def suggest_name_prefix(
|
|
82
|
+
request: Request,
|
|
83
|
+
payload: SuggestNamePrefixRequest,
|
|
84
|
+
db: DatabaseInfra = Depends(get_db_infra),
|
|
85
|
+
) -> SuggestNamePrefixResponse:
|
|
86
|
+
"""
|
|
87
|
+
Get the next available name prefix for a new workspace.
|
|
88
|
+
|
|
89
|
+
Given an origin_url, this endpoint:
|
|
90
|
+
1. Looks up the repo to find the project
|
|
91
|
+
2. Queries existing aliases to find used name prefixes
|
|
92
|
+
3. Returns the first available classic name (e.g., alice, bob, alice-01)
|
|
93
|
+
|
|
94
|
+
Classic names are used: alice, bob, charlie, etc. The client combines
|
|
95
|
+
the name_prefix with their role to form the full alias.
|
|
96
|
+
|
|
97
|
+
Returns 404 if the repo is not registered and the caller is not authenticated.
|
|
98
|
+
Returns 409 if the repo exists in multiple projects and the caller is not authenticated
|
|
99
|
+
(or the authenticated project is not among them), or all names are taken.
|
|
100
|
+
"""
|
|
101
|
+
server_db = db.get_manager("server")
|
|
102
|
+
|
|
103
|
+
canonical_origin = canonicalize_git_url(payload.origin_url)
|
|
104
|
+
|
|
105
|
+
auth_project_id: UUID | None = None
|
|
106
|
+
if "Authorization" in request.headers or "X-BH-Auth" in request.headers:
|
|
107
|
+
auth_project_id = UUID(await get_project_from_auth(request, db))
|
|
108
|
+
|
|
109
|
+
# Look up repo to get project context (exclude soft-deleted repos and projects)
|
|
110
|
+
results = await server_db.fetch_all(
|
|
111
|
+
"""
|
|
112
|
+
SELECT r.id as repo_id, r.canonical_origin,
|
|
113
|
+
p.id as project_id, p.slug as project_slug
|
|
114
|
+
FROM {{tables.repos}} r
|
|
115
|
+
JOIN {{tables.projects}} p ON r.project_id = p.id AND p.deleted_at IS NULL
|
|
116
|
+
WHERE r.canonical_origin = $1 AND r.deleted_at IS NULL
|
|
117
|
+
ORDER BY p.slug
|
|
118
|
+
""",
|
|
119
|
+
canonical_origin,
|
|
120
|
+
)
|
|
121
|
+
|
|
122
|
+
if not results:
|
|
123
|
+
if auth_project_id is None:
|
|
124
|
+
raise HTTPException(
|
|
125
|
+
status_code=404,
|
|
126
|
+
detail=f"Repo not registered: {canonical_origin}. Run 'bdh :init' to register.",
|
|
127
|
+
)
|
|
128
|
+
|
|
129
|
+
project_row = await server_db.fetch_one(
|
|
130
|
+
"""
|
|
131
|
+
SELECT id, slug
|
|
132
|
+
FROM {{tables.projects}}
|
|
133
|
+
WHERE id = $1 AND deleted_at IS NULL
|
|
134
|
+
""",
|
|
135
|
+
auth_project_id,
|
|
136
|
+
)
|
|
137
|
+
if not project_row:
|
|
138
|
+
raise HTTPException(status_code=404, detail="Project not found")
|
|
139
|
+
|
|
140
|
+
project_id = project_row["id"]
|
|
141
|
+
project_slug = project_row["slug"]
|
|
142
|
+
repo_id = ""
|
|
143
|
+
else:
|
|
144
|
+
if len(results) > 1:
|
|
145
|
+
if auth_project_id is not None:
|
|
146
|
+
matched = next((r for r in results if r["project_id"] == auth_project_id), None)
|
|
147
|
+
if matched is None:
|
|
148
|
+
raise HTTPException(
|
|
149
|
+
status_code=403, detail="Repo does not belong to your project"
|
|
150
|
+
)
|
|
151
|
+
result = matched
|
|
152
|
+
else:
|
|
153
|
+
project_slugs = [r["project_slug"] for r in results]
|
|
154
|
+
raise HTTPException(
|
|
155
|
+
status_code=409,
|
|
156
|
+
detail=f"Repo exists in multiple projects: {', '.join(project_slugs)}. "
|
|
157
|
+
"Specify project with BEADHUB_PROJECT or --project.",
|
|
158
|
+
)
|
|
159
|
+
else:
|
|
160
|
+
result = results[0]
|
|
161
|
+
|
|
162
|
+
project_id = result["project_id"]
|
|
163
|
+
repo_id = str(result["repo_id"])
|
|
164
|
+
project_slug = result["project_slug"]
|
|
165
|
+
|
|
166
|
+
# Query all existing aliases in this project to extract used name prefixes
|
|
167
|
+
existing = await server_db.fetch_all(
|
|
168
|
+
"""
|
|
169
|
+
SELECT alias FROM {{tables.workspaces}}
|
|
170
|
+
WHERE project_id = $1
|
|
171
|
+
AND deleted_at IS NULL
|
|
172
|
+
ORDER BY alias
|
|
173
|
+
""",
|
|
174
|
+
project_id,
|
|
175
|
+
)
|
|
176
|
+
|
|
177
|
+
# Extract name prefixes from existing aliases
|
|
178
|
+
# An alias like "alice-programmer" has prefix "alice"
|
|
179
|
+
# An alias like "alice-01-programmer" has prefix "alice-01"
|
|
180
|
+
# An alias like "alice" (no role) also has prefix "alice"
|
|
181
|
+
used_prefixes: set[str] = set()
|
|
182
|
+
for row in existing:
|
|
183
|
+
alias = row["alias"]
|
|
184
|
+
parts = alias.split("-")
|
|
185
|
+
if len(parts) >= 2 and parts[1].isdigit():
|
|
186
|
+
# Format: name-NN-role or name-NN → prefix is "name-NN"
|
|
187
|
+
prefix = f"{parts[0]}-{parts[1]}".lower()
|
|
188
|
+
else:
|
|
189
|
+
# Format: name-role or name → prefix is "name"
|
|
190
|
+
prefix = parts[0].lower()
|
|
191
|
+
if prefix: # Skip empty prefixes from malformed aliases
|
|
192
|
+
used_prefixes.add(prefix)
|
|
193
|
+
|
|
194
|
+
# Find first available name prefix
|
|
195
|
+
# First try base names (alice, bob, ...), then numbered (alice-01, bob-01, ...)
|
|
196
|
+
available_prefix = None
|
|
197
|
+
|
|
198
|
+
# Try base names first
|
|
199
|
+
for name in CLASSIC_NAMES:
|
|
200
|
+
if name not in used_prefixes:
|
|
201
|
+
available_prefix = name
|
|
202
|
+
break
|
|
203
|
+
|
|
204
|
+
# If all base names taken, try numbered names
|
|
205
|
+
if available_prefix is None:
|
|
206
|
+
for num in range(1, 100): # Up to 99 numbered suffixes
|
|
207
|
+
for name in CLASSIC_NAMES:
|
|
208
|
+
numbered = f"{name}-{num:02d}"
|
|
209
|
+
if numbered not in used_prefixes:
|
|
210
|
+
available_prefix = numbered
|
|
211
|
+
break
|
|
212
|
+
if available_prefix:
|
|
213
|
+
break
|
|
214
|
+
|
|
215
|
+
if available_prefix is None:
|
|
216
|
+
raise HTTPException(
|
|
217
|
+
status_code=409,
|
|
218
|
+
detail=f"All name prefixes are taken (tried {len(CLASSIC_NAMES)} names × 100 variants). "
|
|
219
|
+
"Use --alias to specify a custom alias.",
|
|
220
|
+
)
|
|
221
|
+
|
|
222
|
+
return SuggestNamePrefixResponse(
|
|
223
|
+
name_prefix=available_prefix,
|
|
224
|
+
project_id=str(project_id),
|
|
225
|
+
project_slug=project_slug,
|
|
226
|
+
repo_id=repo_id,
|
|
227
|
+
canonical_origin=canonical_origin,
|
|
228
|
+
)
|
|
229
|
+
|
|
230
|
+
|
|
231
|
+
# Request/Response models for registration
|
|
232
|
+
|
|
233
|
+
|
|
234
|
+
class RegisterWorkspaceRequest(BaseModel):
|
|
235
|
+
"""Request body for /v1/workspaces/register endpoint (clean-slate split)."""
|
|
236
|
+
|
|
237
|
+
repo_origin: str = Field(..., min_length=1, max_length=2048)
|
|
238
|
+
role: Optional[str] = Field(
|
|
239
|
+
None,
|
|
240
|
+
max_length=ROLE_MAX_LENGTH,
|
|
241
|
+
description="Brief description of workspace purpose",
|
|
242
|
+
)
|
|
243
|
+
hostname: Optional[str] = Field(
|
|
244
|
+
None,
|
|
245
|
+
max_length=255,
|
|
246
|
+
description="Machine hostname for gone workspace detection",
|
|
247
|
+
)
|
|
248
|
+
workspace_path: Optional[str] = Field(
|
|
249
|
+
None,
|
|
250
|
+
max_length=4096,
|
|
251
|
+
description="Directory path for gone workspace detection",
|
|
252
|
+
)
|
|
253
|
+
|
|
254
|
+
@field_validator("repo_origin")
|
|
255
|
+
@classmethod
|
|
256
|
+
def validate_repo_origin(cls, v: str) -> str:
|
|
257
|
+
try:
|
|
258
|
+
canonicalize_git_url(v)
|
|
259
|
+
except ValueError as e:
|
|
260
|
+
raise ValueError(f"Invalid repo_origin: {e}")
|
|
261
|
+
return v
|
|
262
|
+
|
|
263
|
+
@field_validator("role")
|
|
264
|
+
@classmethod
|
|
265
|
+
def validate_role(cls, v: Optional[str]) -> Optional[str]:
|
|
266
|
+
if v is None:
|
|
267
|
+
return None
|
|
268
|
+
if not is_valid_role(v):
|
|
269
|
+
raise ValueError(ROLE_ERROR_MESSAGE)
|
|
270
|
+
return normalize_role(v)
|
|
271
|
+
|
|
272
|
+
@field_validator("hostname")
|
|
273
|
+
@classmethod
|
|
274
|
+
def validate_hostname(cls, v: Optional[str]) -> Optional[str]:
|
|
275
|
+
if v is None:
|
|
276
|
+
return None
|
|
277
|
+
if "\x00" in v or any(ord(c) < 32 for c in v):
|
|
278
|
+
raise ValueError(
|
|
279
|
+
"hostname contains invalid characters (null bytes or control characters)"
|
|
280
|
+
)
|
|
281
|
+
return v
|
|
282
|
+
|
|
283
|
+
@field_validator("workspace_path")
|
|
284
|
+
@classmethod
|
|
285
|
+
def validate_workspace_path(cls, v: Optional[str]) -> Optional[str]:
|
|
286
|
+
if v is None:
|
|
287
|
+
return None
|
|
288
|
+
if "\x00" in v or any(ord(c) < 32 and c not in "\t\n" for c in v):
|
|
289
|
+
raise ValueError(
|
|
290
|
+
"workspace_path contains invalid characters (null bytes or control characters)"
|
|
291
|
+
)
|
|
292
|
+
return v
|
|
293
|
+
|
|
294
|
+
|
|
295
|
+
class RegisterWorkspaceResponse(BaseModel):
|
|
296
|
+
"""Response for /v1/workspaces/register endpoint."""
|
|
297
|
+
|
|
298
|
+
workspace_id: str
|
|
299
|
+
project_id: str
|
|
300
|
+
project_slug: str
|
|
301
|
+
repo_id: str
|
|
302
|
+
canonical_origin: str
|
|
303
|
+
alias: str
|
|
304
|
+
human_name: str
|
|
305
|
+
created: bool # True if new workspace, False if already existed
|
|
306
|
+
|
|
307
|
+
|
|
308
|
+
@router.post("/register", response_model=RegisterWorkspaceResponse)
|
|
309
|
+
async def register_workspace(
|
|
310
|
+
request: Request,
|
|
311
|
+
payload: RegisterWorkspaceRequest,
|
|
312
|
+
db: DatabaseInfra = Depends(get_db_infra),
|
|
313
|
+
) -> RegisterWorkspaceResponse:
|
|
314
|
+
"""
|
|
315
|
+
Register a workspace for the authenticated aweb agent.
|
|
316
|
+
|
|
317
|
+
Identity and project scoping are derived from aweb:
|
|
318
|
+
- project_id comes from aweb auth (introspection)
|
|
319
|
+
- workspace_id is the aweb agent_id (v1 mapping)
|
|
320
|
+
- alias/human_name come from aweb agent profile
|
|
321
|
+
"""
|
|
322
|
+
server_db = db.get_manager("server")
|
|
323
|
+
|
|
324
|
+
identity = await resolve_aweb_identity(request, db)
|
|
325
|
+
project_id = identity.project_id
|
|
326
|
+
workspace_id = identity.agent_id
|
|
327
|
+
alias = identity.alias
|
|
328
|
+
human_name = identity.human_name
|
|
329
|
+
if not is_valid_alias(alias):
|
|
330
|
+
raise HTTPException(status_code=502, detail="aweb returned invalid alias format")
|
|
331
|
+
if human_name and not is_valid_human_name(human_name):
|
|
332
|
+
raise HTTPException(status_code=502, detail="aweb returned invalid human_name format")
|
|
333
|
+
|
|
334
|
+
project_slug = identity.project_slug
|
|
335
|
+
project_name = identity.project_name or ""
|
|
336
|
+
|
|
337
|
+
canonical_origin = canonicalize_git_url(payload.repo_origin)
|
|
338
|
+
repo_name = extract_repo_name(canonical_origin)
|
|
339
|
+
|
|
340
|
+
created = False
|
|
341
|
+
async with server_db.transaction() as tx:
|
|
342
|
+
# Ensure local project row for FK integrity.
|
|
343
|
+
await tx.execute(
|
|
344
|
+
"""
|
|
345
|
+
INSERT INTO {{tables.projects}} (id, tenant_id, slug, name, deleted_at)
|
|
346
|
+
VALUES ($1, NULL, $2, $3, NULL)
|
|
347
|
+
ON CONFLICT (id)
|
|
348
|
+
DO UPDATE SET slug = EXCLUDED.slug, name = EXCLUDED.name, deleted_at = NULL
|
|
349
|
+
""",
|
|
350
|
+
UUID(project_id),
|
|
351
|
+
project_slug,
|
|
352
|
+
project_name or None,
|
|
353
|
+
)
|
|
354
|
+
|
|
355
|
+
repo = await tx.fetch_one(
|
|
356
|
+
"""
|
|
357
|
+
INSERT INTO {{tables.repos}} (project_id, origin_url, canonical_origin, name)
|
|
358
|
+
VALUES ($1, $2, $3, $4)
|
|
359
|
+
ON CONFLICT (project_id, canonical_origin)
|
|
360
|
+
DO UPDATE SET origin_url = EXCLUDED.origin_url, deleted_at = NULL
|
|
361
|
+
RETURNING id
|
|
362
|
+
""",
|
|
363
|
+
UUID(project_id),
|
|
364
|
+
payload.repo_origin,
|
|
365
|
+
canonical_origin,
|
|
366
|
+
repo_name,
|
|
367
|
+
)
|
|
368
|
+
assert repo is not None
|
|
369
|
+
repo_id = str(repo["id"])
|
|
370
|
+
|
|
371
|
+
existing = await tx.fetch_one(
|
|
372
|
+
"""
|
|
373
|
+
SELECT workspace_id, project_id, repo_id, alias, deleted_at
|
|
374
|
+
FROM {{tables.workspaces}}
|
|
375
|
+
WHERE workspace_id = $1
|
|
376
|
+
""",
|
|
377
|
+
UUID(workspace_id),
|
|
378
|
+
)
|
|
379
|
+
if existing:
|
|
380
|
+
if str(existing["project_id"]) != project_id:
|
|
381
|
+
raise HTTPException(
|
|
382
|
+
status_code=409, detail="Workspace already registered in another project"
|
|
383
|
+
)
|
|
384
|
+
if existing["repo_id"] is not None and str(existing["repo_id"]) != repo_id:
|
|
385
|
+
raise HTTPException(
|
|
386
|
+
status_code=409, detail="Workspace already registered for another repo"
|
|
387
|
+
)
|
|
388
|
+
if existing["alias"] != alias:
|
|
389
|
+
raise HTTPException(
|
|
390
|
+
status_code=409, detail="Workspace already registered with a different alias"
|
|
391
|
+
)
|
|
392
|
+
|
|
393
|
+
await tx.execute(
|
|
394
|
+
"""
|
|
395
|
+
UPDATE {{tables.workspaces}}
|
|
396
|
+
SET deleted_at = NULL,
|
|
397
|
+
hostname = $2,
|
|
398
|
+
workspace_path = $3,
|
|
399
|
+
role = $4,
|
|
400
|
+
human_name = $5,
|
|
401
|
+
updated_at = NOW()
|
|
402
|
+
WHERE workspace_id = $1
|
|
403
|
+
""",
|
|
404
|
+
UUID(workspace_id),
|
|
405
|
+
payload.hostname,
|
|
406
|
+
payload.workspace_path,
|
|
407
|
+
payload.role,
|
|
408
|
+
human_name,
|
|
409
|
+
)
|
|
410
|
+
created = False
|
|
411
|
+
else:
|
|
412
|
+
try:
|
|
413
|
+
await tx.execute(
|
|
414
|
+
"""
|
|
415
|
+
INSERT INTO {{tables.workspaces}}
|
|
416
|
+
(workspace_id, project_id, repo_id, alias, human_name, role, hostname, workspace_path, workspace_type)
|
|
417
|
+
VALUES ($1, $2, $3, $4, $5, $6, $7, $8, 'agent')
|
|
418
|
+
""",
|
|
419
|
+
UUID(workspace_id),
|
|
420
|
+
UUID(project_id),
|
|
421
|
+
UUID(repo_id),
|
|
422
|
+
alias,
|
|
423
|
+
human_name,
|
|
424
|
+
payload.role,
|
|
425
|
+
payload.hostname,
|
|
426
|
+
payload.workspace_path,
|
|
427
|
+
)
|
|
428
|
+
except (QueryError, asyncpg.exceptions.UniqueViolationError) as e:
|
|
429
|
+
# Alias uniqueness violation within the project.
|
|
430
|
+
if isinstance(e, QueryError) and not isinstance(
|
|
431
|
+
e.__cause__, asyncpg.exceptions.UniqueViolationError
|
|
432
|
+
):
|
|
433
|
+
raise
|
|
434
|
+
raise HTTPException(
|
|
435
|
+
status_code=409, detail=f"Alias '{alias}' is already used in this project"
|
|
436
|
+
)
|
|
437
|
+
created = True
|
|
438
|
+
|
|
439
|
+
return RegisterWorkspaceResponse(
|
|
440
|
+
workspace_id=workspace_id,
|
|
441
|
+
project_id=project_id,
|
|
442
|
+
project_slug=project_slug,
|
|
443
|
+
repo_id=repo_id,
|
|
444
|
+
canonical_origin=canonical_origin,
|
|
445
|
+
alias=alias,
|
|
446
|
+
human_name=human_name,
|
|
447
|
+
created=created,
|
|
448
|
+
)
|
|
449
|
+
|
|
450
|
+
|
|
451
|
+
# Heartbeat models and endpoint
|
|
452
|
+
# IMPORTANT: This endpoint MUST be defined BEFORE /{workspace_id} to prevent
|
|
453
|
+
# "heartbeat" from matching as a workspace_id parameter.
|
|
454
|
+
|
|
455
|
+
|
|
456
|
+
class WorkspaceHeartbeatRequest(BaseModel):
|
|
457
|
+
workspace_id: str = Field(..., min_length=1)
|
|
458
|
+
alias: str = Field(..., min_length=1, max_length=64)
|
|
459
|
+
repo_origin: str = Field(..., min_length=1, max_length=512, description="Git remote origin URL")
|
|
460
|
+
|
|
461
|
+
role: Optional[str] = Field(
|
|
462
|
+
None,
|
|
463
|
+
max_length=ROLE_MAX_LENGTH,
|
|
464
|
+
description="Brief description of workspace purpose",
|
|
465
|
+
)
|
|
466
|
+
current_branch: Optional[str] = Field(None, max_length=255)
|
|
467
|
+
hostname: Optional[str] = Field(None, max_length=255)
|
|
468
|
+
workspace_path: Optional[str] = Field(None, max_length=1024)
|
|
469
|
+
human_name: Optional[str] = Field(None, max_length=64)
|
|
470
|
+
|
|
471
|
+
@field_validator("workspace_id")
|
|
472
|
+
@classmethod
|
|
473
|
+
def validate_workspace_id_field(cls, v: str) -> str:
|
|
474
|
+
try:
|
|
475
|
+
return validate_workspace_id(v)
|
|
476
|
+
except ValueError as e:
|
|
477
|
+
raise ValueError(str(e))
|
|
478
|
+
|
|
479
|
+
@field_validator("alias")
|
|
480
|
+
@classmethod
|
|
481
|
+
def validate_alias_field(cls, v: str) -> str:
|
|
482
|
+
if not is_valid_alias(v):
|
|
483
|
+
raise ValueError(
|
|
484
|
+
"Invalid alias: must be alphanumeric with hyphens/underscores, 1-64 chars"
|
|
485
|
+
)
|
|
486
|
+
return v
|
|
487
|
+
|
|
488
|
+
@field_validator("repo_origin")
|
|
489
|
+
@classmethod
|
|
490
|
+
def validate_repo_origin_field(cls, v: str) -> str:
|
|
491
|
+
try:
|
|
492
|
+
canonicalize_git_url(v)
|
|
493
|
+
except ValueError as e:
|
|
494
|
+
raise ValueError(f"Invalid repo_origin: {e}")
|
|
495
|
+
return v
|
|
496
|
+
|
|
497
|
+
@field_validator("role")
|
|
498
|
+
@classmethod
|
|
499
|
+
def validate_role_field(cls, v: Optional[str]) -> Optional[str]:
|
|
500
|
+
if v is None:
|
|
501
|
+
return None
|
|
502
|
+
if not is_valid_role(v):
|
|
503
|
+
raise ValueError(ROLE_ERROR_MESSAGE)
|
|
504
|
+
return normalize_role(v)
|
|
505
|
+
|
|
506
|
+
|
|
507
|
+
class WorkspaceHeartbeatResponse(BaseModel):
|
|
508
|
+
ok: bool = True
|
|
509
|
+
workspace_id: str
|
|
510
|
+
|
|
511
|
+
|
|
512
|
+
@router.post("/heartbeat", response_model=WorkspaceHeartbeatResponse)
|
|
513
|
+
async def heartbeat(
|
|
514
|
+
payload: WorkspaceHeartbeatRequest,
|
|
515
|
+
request: Request,
|
|
516
|
+
redis: Redis = Depends(get_redis),
|
|
517
|
+
db: DatabaseInfra = Depends(get_db_infra),
|
|
518
|
+
) -> WorkspaceHeartbeatResponse:
|
|
519
|
+
"""
|
|
520
|
+
Refresh workspace presence, enforcing "presence is a cache of SQL".
|
|
521
|
+
|
|
522
|
+
Order of operations:
|
|
523
|
+
1) Ensure repo/workspace exists (SQL)
|
|
524
|
+
2) Update presence (Redis)
|
|
525
|
+
|
|
526
|
+
Note: If Redis is unavailable, SQL is still authoritative; presence updates
|
|
527
|
+
are best-effort and will converge once the client retries.
|
|
528
|
+
"""
|
|
529
|
+
project_id = UUID(await get_project_from_auth(request, db))
|
|
530
|
+
settings = get_settings()
|
|
531
|
+
|
|
532
|
+
server_db = db.get_manager("server")
|
|
533
|
+
|
|
534
|
+
# Pre-check immutability to avoid leaking DB trigger errors as 500s.
|
|
535
|
+
existing = await server_db.fetch_one(
|
|
536
|
+
"""
|
|
537
|
+
SELECT workspace_id, project_id, alias, repo_id, deleted_at
|
|
538
|
+
FROM {{tables.workspaces}}
|
|
539
|
+
WHERE workspace_id = $1
|
|
540
|
+
""",
|
|
541
|
+
UUID(payload.workspace_id),
|
|
542
|
+
)
|
|
543
|
+
if existing:
|
|
544
|
+
if existing.get("deleted_at") is not None:
|
|
545
|
+
raise HTTPException(
|
|
546
|
+
status_code=410,
|
|
547
|
+
detail="Workspace was deleted. Run 'bdh :init' to re-register.",
|
|
548
|
+
)
|
|
549
|
+
if existing.get("project_id") and existing["project_id"] != project_id:
|
|
550
|
+
raise HTTPException(
|
|
551
|
+
status_code=400,
|
|
552
|
+
detail=f"Workspace {payload.workspace_id} does not belong to this project. "
|
|
553
|
+
"This may indicate a corrupted .beadhub file. Try running 'bdh :init' again.",
|
|
554
|
+
)
|
|
555
|
+
if existing.get("alias") and existing["alias"] != payload.alias:
|
|
556
|
+
raise HTTPException(
|
|
557
|
+
status_code=409,
|
|
558
|
+
detail=(
|
|
559
|
+
f"Alias mismatch for workspace {payload.workspace_id} "
|
|
560
|
+
f"(expected '{existing['alias']}', got '{payload.alias}'). "
|
|
561
|
+
"Run 'bdh :init' to re-register."
|
|
562
|
+
),
|
|
563
|
+
)
|
|
564
|
+
|
|
565
|
+
# Resolve repo_id without creating partial state in mismatch scenarios.
|
|
566
|
+
repo_id: UUID
|
|
567
|
+
if existing and existing.get("repo_id"):
|
|
568
|
+
repo_id = existing["repo_id"]
|
|
569
|
+
|
|
570
|
+
canonical_origin = canonicalize_git_url(payload.repo_origin)
|
|
571
|
+
repo_row = await server_db.fetch_one(
|
|
572
|
+
"""
|
|
573
|
+
SELECT canonical_origin
|
|
574
|
+
FROM {{tables.repos}}
|
|
575
|
+
WHERE id = $1 AND project_id = $2 AND deleted_at IS NULL
|
|
576
|
+
""",
|
|
577
|
+
repo_id,
|
|
578
|
+
project_id,
|
|
579
|
+
)
|
|
580
|
+
if not repo_row:
|
|
581
|
+
raise HTTPException(
|
|
582
|
+
status_code=410,
|
|
583
|
+
detail="Workspace repository was deleted. Run 'bdh :init' to re-register.",
|
|
584
|
+
)
|
|
585
|
+
if repo_row.get("canonical_origin") != canonical_origin:
|
|
586
|
+
raise HTTPException(
|
|
587
|
+
status_code=400,
|
|
588
|
+
detail=(
|
|
589
|
+
"Repo mismatch: workspace is registered with a different repository. "
|
|
590
|
+
"This may indicate a corrupted .beadhub file. Run 'bdh :init' again."
|
|
591
|
+
),
|
|
592
|
+
)
|
|
593
|
+
else:
|
|
594
|
+
colliding_workspace = await check_alias_collision(
|
|
595
|
+
db, redis, project_id, payload.workspace_id, payload.alias
|
|
596
|
+
)
|
|
597
|
+
if colliding_workspace:
|
|
598
|
+
raise HTTPException(
|
|
599
|
+
status_code=409,
|
|
600
|
+
detail=f"Alias '{payload.alias}' is already used by another workspace in this project. "
|
|
601
|
+
"Please choose a different alias and run 'bdh :init' again.",
|
|
602
|
+
)
|
|
603
|
+
|
|
604
|
+
# Ensure repo exists for this project (normalizes to canonical_origin).
|
|
605
|
+
repo_id = await ensure_repo(db, project_id, payload.repo_origin)
|
|
606
|
+
|
|
607
|
+
# Upsert workspace record first (SQL), then update presence (Redis; best-effort).
|
|
608
|
+
try:
|
|
609
|
+
await upsert_workspace(
|
|
610
|
+
db,
|
|
611
|
+
workspace_id=payload.workspace_id,
|
|
612
|
+
project_id=project_id,
|
|
613
|
+
repo_id=repo_id,
|
|
614
|
+
alias=payload.alias,
|
|
615
|
+
human_name=payload.human_name or "",
|
|
616
|
+
role=payload.role,
|
|
617
|
+
hostname=payload.hostname,
|
|
618
|
+
workspace_path=payload.workspace_path,
|
|
619
|
+
)
|
|
620
|
+
except QueryError as e:
|
|
621
|
+
if isinstance(e.__cause__, asyncpg.exceptions.UniqueViolationError):
|
|
622
|
+
raise HTTPException(
|
|
623
|
+
status_code=409,
|
|
624
|
+
detail=f"Alias '{payload.alias}' is already used by another workspace in this project. "
|
|
625
|
+
"Please choose a different alias and run 'bdh :init' again.",
|
|
626
|
+
) from e
|
|
627
|
+
raise
|
|
628
|
+
|
|
629
|
+
if payload.current_branch is not None:
|
|
630
|
+
await server_db.execute(
|
|
631
|
+
"""
|
|
632
|
+
UPDATE {{tables.workspaces}}
|
|
633
|
+
SET current_branch = $1, last_seen_at = NOW()
|
|
634
|
+
WHERE workspace_id = $2
|
|
635
|
+
""",
|
|
636
|
+
payload.current_branch,
|
|
637
|
+
UUID(payload.workspace_id),
|
|
638
|
+
)
|
|
639
|
+
|
|
640
|
+
project_row = await server_db.fetch_one(
|
|
641
|
+
"SELECT slug FROM {{tables.projects}} WHERE id = $1",
|
|
642
|
+
project_id,
|
|
643
|
+
)
|
|
644
|
+
project_slug = project_row["slug"] if project_row else None
|
|
645
|
+
|
|
646
|
+
try:
|
|
647
|
+
await update_agent_presence(
|
|
648
|
+
redis,
|
|
649
|
+
workspace_id=payload.workspace_id,
|
|
650
|
+
alias=payload.alias,
|
|
651
|
+
human_name=payload.human_name or "",
|
|
652
|
+
project_id=str(project_id),
|
|
653
|
+
project_slug=project_slug,
|
|
654
|
+
repo_id=str(repo_id),
|
|
655
|
+
program="bdh",
|
|
656
|
+
model=None,
|
|
657
|
+
current_branch=payload.current_branch,
|
|
658
|
+
role=payload.role,
|
|
659
|
+
ttl_seconds=settings.presence_ttl_seconds,
|
|
660
|
+
)
|
|
661
|
+
except Exception as e:
|
|
662
|
+
logger.warning(
|
|
663
|
+
"Heartbeat SQL upsert succeeded but presence update failed",
|
|
664
|
+
extra={
|
|
665
|
+
"workspace_id": payload.workspace_id,
|
|
666
|
+
"project_id": str(project_id),
|
|
667
|
+
"error": str(e),
|
|
668
|
+
},
|
|
669
|
+
)
|
|
670
|
+
|
|
671
|
+
# Update aweb agent-level presence (best-effort, non-blocking).
|
|
672
|
+
try:
|
|
673
|
+
await update_aweb_agent_presence(
|
|
674
|
+
redis,
|
|
675
|
+
agent_id=payload.workspace_id,
|
|
676
|
+
alias=payload.alias,
|
|
677
|
+
project_id=str(project_id),
|
|
678
|
+
ttl_seconds=settings.presence_ttl_seconds,
|
|
679
|
+
)
|
|
680
|
+
except Exception as e:
|
|
681
|
+
logger.warning(
|
|
682
|
+
"Aweb agent presence update failed",
|
|
683
|
+
extra={
|
|
684
|
+
"workspace_id": payload.workspace_id,
|
|
685
|
+
"project_id": str(project_id),
|
|
686
|
+
"error": str(e),
|
|
687
|
+
},
|
|
688
|
+
)
|
|
689
|
+
|
|
690
|
+
# Track workspace for usage metering (Cloud mode only, best effort, non-blocking).
|
|
691
|
+
# In Cloud deployments, usage_service is injected into app.state by Cloud middleware.
|
|
692
|
+
usage_service = getattr(request.app.state, "usage_service", None)
|
|
693
|
+
if usage_service:
|
|
694
|
+
try:
|
|
695
|
+
await usage_service.track_workspace(
|
|
696
|
+
project_id=str(project_id),
|
|
697
|
+
workspace_id=payload.workspace_id,
|
|
698
|
+
presence_ttl_seconds=settings.presence_ttl_seconds,
|
|
699
|
+
)
|
|
700
|
+
except ValueError as e:
|
|
701
|
+
logger.warning(
|
|
702
|
+
"Usage metering track_workspace configuration error",
|
|
703
|
+
extra={
|
|
704
|
+
"workspace_id": payload.workspace_id,
|
|
705
|
+
"project_id": str(project_id),
|
|
706
|
+
"error": str(e),
|
|
707
|
+
},
|
|
708
|
+
)
|
|
709
|
+
except Exception as e:
|
|
710
|
+
logger.warning(
|
|
711
|
+
"Usage metering track_workspace failed",
|
|
712
|
+
extra={
|
|
713
|
+
"workspace_id": payload.workspace_id,
|
|
714
|
+
"project_id": str(project_id),
|
|
715
|
+
"error": str(e),
|
|
716
|
+
},
|
|
717
|
+
)
|
|
718
|
+
|
|
719
|
+
return WorkspaceHeartbeatResponse(ok=True, workspace_id=payload.workspace_id)
|
|
720
|
+
|
|
721
|
+
|
|
722
|
+
class DeleteWorkspaceResponse(BaseModel):
|
|
723
|
+
"""Response for DELETE /v1/workspaces/{workspace_id} endpoint."""
|
|
724
|
+
|
|
725
|
+
workspace_id: str
|
|
726
|
+
alias: str
|
|
727
|
+
deleted_at: str
|
|
728
|
+
|
|
729
|
+
|
|
730
|
+
@router.delete("/{workspace_id}", response_model=DeleteWorkspaceResponse)
|
|
731
|
+
async def delete_workspace(
|
|
732
|
+
workspace_id: str = Path(..., description="Workspace ID to delete"),
|
|
733
|
+
request: Request = None,
|
|
734
|
+
db: DatabaseInfra = Depends(get_db_infra),
|
|
735
|
+
) -> DeleteWorkspaceResponse:
|
|
736
|
+
"""
|
|
737
|
+
Soft-delete a workspace by setting deleted_at timestamp.
|
|
738
|
+
|
|
739
|
+
This marks the workspace as deleted without removing the database record.
|
|
740
|
+
After deletion:
|
|
741
|
+
- The workspace won't appear in list endpoints by default
|
|
742
|
+
- The alias becomes available for reuse by other workspaces
|
|
743
|
+
- The workspace can still be found with include_deleted=true
|
|
744
|
+
- All bead claims for this workspace are released
|
|
745
|
+
|
|
746
|
+
Returns 404 if workspace doesn't exist or is already deleted.
|
|
747
|
+
"""
|
|
748
|
+
# Validate workspace_id format
|
|
749
|
+
try:
|
|
750
|
+
validated_id = validate_workspace_id(workspace_id)
|
|
751
|
+
except ValueError as e:
|
|
752
|
+
raise HTTPException(status_code=422, detail=str(e))
|
|
753
|
+
|
|
754
|
+
identity = await get_identity_from_auth(request, db)
|
|
755
|
+
project_id = identity.project_id
|
|
756
|
+
|
|
757
|
+
server_db = db.get_manager("server")
|
|
758
|
+
|
|
759
|
+
existing = await server_db.fetch_one(
|
|
760
|
+
"""
|
|
761
|
+
SELECT workspace_id, alias, project_id, deleted_at
|
|
762
|
+
FROM {{tables.workspaces}}
|
|
763
|
+
WHERE workspace_id = $1 AND project_id = $2
|
|
764
|
+
""",
|
|
765
|
+
UUID(validated_id),
|
|
766
|
+
UUID(project_id),
|
|
767
|
+
)
|
|
768
|
+
|
|
769
|
+
if not existing:
|
|
770
|
+
raise HTTPException(
|
|
771
|
+
status_code=404,
|
|
772
|
+
detail=f"Workspace {workspace_id} not found",
|
|
773
|
+
)
|
|
774
|
+
|
|
775
|
+
if existing["deleted_at"] is not None:
|
|
776
|
+
raise HTTPException(
|
|
777
|
+
status_code=404,
|
|
778
|
+
detail=f"Workspace {workspace_id} is already deleted",
|
|
779
|
+
)
|
|
780
|
+
|
|
781
|
+
# Soft-delete by setting deleted_at
|
|
782
|
+
deleted_at = datetime.now(timezone.utc)
|
|
783
|
+
await server_db.execute(
|
|
784
|
+
"""
|
|
785
|
+
UPDATE {{tables.workspaces}}
|
|
786
|
+
SET deleted_at = $2
|
|
787
|
+
WHERE workspace_id = $1
|
|
788
|
+
""",
|
|
789
|
+
validated_id,
|
|
790
|
+
deleted_at,
|
|
791
|
+
)
|
|
792
|
+
|
|
793
|
+
# Release all bead claims for this workspace.
|
|
794
|
+
# The CASCADE constraint only fires on hard delete, not soft-delete.
|
|
795
|
+
await server_db.execute(
|
|
796
|
+
"""
|
|
797
|
+
DELETE FROM {{tables.bead_claims}}
|
|
798
|
+
WHERE workspace_id = $1
|
|
799
|
+
""",
|
|
800
|
+
validated_id,
|
|
801
|
+
)
|
|
802
|
+
|
|
803
|
+
return DeleteWorkspaceResponse(
|
|
804
|
+
workspace_id=validated_id,
|
|
805
|
+
alias=existing["alias"],
|
|
806
|
+
deleted_at=deleted_at.isoformat(),
|
|
807
|
+
)
|
|
808
|
+
|
|
809
|
+
|
|
810
|
+
class RestoreWorkspaceResponse(BaseModel):
|
|
811
|
+
"""Response for POST /v1/workspaces/{workspace_id}/restore endpoint."""
|
|
812
|
+
|
|
813
|
+
workspace_id: str
|
|
814
|
+
alias: str
|
|
815
|
+
restored_at: str
|
|
816
|
+
|
|
817
|
+
|
|
818
|
+
@router.post("/{workspace_id}/restore", response_model=RestoreWorkspaceResponse)
|
|
819
|
+
async def restore_workspace(
|
|
820
|
+
workspace_id: str = Path(..., description="Workspace ID to restore"),
|
|
821
|
+
request: Request = None,
|
|
822
|
+
db: DatabaseInfra = Depends(get_db_infra),
|
|
823
|
+
) -> RestoreWorkspaceResponse:
|
|
824
|
+
"""
|
|
825
|
+
Restore a soft-deleted workspace by clearing deleted_at timestamp.
|
|
826
|
+
|
|
827
|
+
This reverses a soft-delete, making the workspace active again.
|
|
828
|
+
After restoration:
|
|
829
|
+
- The workspace will appear in list endpoints
|
|
830
|
+
- The alias is reclaimed (may conflict if reused)
|
|
831
|
+
- Bead claims are NOT restored (were deleted on soft-delete)
|
|
832
|
+
|
|
833
|
+
Returns 404 if workspace doesn't exist or is not deleted.
|
|
834
|
+
Returns 409 if the alias is now used by another workspace.
|
|
835
|
+
"""
|
|
836
|
+
# Validate workspace_id format
|
|
837
|
+
try:
|
|
838
|
+
validated_id = validate_workspace_id(workspace_id)
|
|
839
|
+
except ValueError as e:
|
|
840
|
+
raise HTTPException(status_code=422, detail=str(e))
|
|
841
|
+
|
|
842
|
+
identity = await get_identity_from_auth(request, db)
|
|
843
|
+
project_id = identity.project_id
|
|
844
|
+
|
|
845
|
+
server_db = db.get_manager("server")
|
|
846
|
+
|
|
847
|
+
existing = await server_db.fetch_one(
|
|
848
|
+
"""
|
|
849
|
+
SELECT workspace_id, alias, project_id, deleted_at
|
|
850
|
+
FROM {{tables.workspaces}}
|
|
851
|
+
WHERE workspace_id = $1 AND project_id = $2
|
|
852
|
+
""",
|
|
853
|
+
UUID(validated_id),
|
|
854
|
+
UUID(project_id),
|
|
855
|
+
)
|
|
856
|
+
|
|
857
|
+
if not existing:
|
|
858
|
+
raise HTTPException(
|
|
859
|
+
status_code=404,
|
|
860
|
+
detail=f"Workspace {workspace_id} not found",
|
|
861
|
+
)
|
|
862
|
+
|
|
863
|
+
if existing["deleted_at"] is None:
|
|
864
|
+
raise HTTPException(
|
|
865
|
+
status_code=409,
|
|
866
|
+
detail=f"Workspace {workspace_id} is already active (not deleted)",
|
|
867
|
+
)
|
|
868
|
+
|
|
869
|
+
# Check if alias is now used by another active workspace
|
|
870
|
+
alias_conflict = await server_db.fetch_one(
|
|
871
|
+
"""
|
|
872
|
+
SELECT workspace_id FROM {{tables.workspaces}}
|
|
873
|
+
WHERE project_id = $1
|
|
874
|
+
AND alias = $2
|
|
875
|
+
AND workspace_id != $3
|
|
876
|
+
AND deleted_at IS NULL
|
|
877
|
+
""",
|
|
878
|
+
existing["project_id"],
|
|
879
|
+
existing["alias"],
|
|
880
|
+
validated_id,
|
|
881
|
+
)
|
|
882
|
+
|
|
883
|
+
if alias_conflict:
|
|
884
|
+
raise HTTPException(
|
|
885
|
+
status_code=409,
|
|
886
|
+
detail=f"Cannot restore: alias '{existing['alias']}' is now used by another workspace",
|
|
887
|
+
)
|
|
888
|
+
|
|
889
|
+
# Restore by clearing deleted_at
|
|
890
|
+
restored_at = datetime.now(timezone.utc)
|
|
891
|
+
await server_db.execute(
|
|
892
|
+
"""
|
|
893
|
+
UPDATE {{tables.workspaces}}
|
|
894
|
+
SET deleted_at = NULL, updated_at = $2
|
|
895
|
+
WHERE workspace_id = $1
|
|
896
|
+
""",
|
|
897
|
+
validated_id,
|
|
898
|
+
restored_at,
|
|
899
|
+
)
|
|
900
|
+
|
|
901
|
+
return RestoreWorkspaceResponse(
|
|
902
|
+
workspace_id=validated_id,
|
|
903
|
+
alias=existing["alias"],
|
|
904
|
+
restored_at=restored_at.isoformat(),
|
|
905
|
+
)
|
|
906
|
+
|
|
907
|
+
|
|
908
|
+
class Claim(BaseModel):
|
|
909
|
+
"""A bead claim - represents a workspace working on a bead.
|
|
910
|
+
|
|
911
|
+
The apex (apex_id/apex_title/apex_type) is stored on the claim to avoid
|
|
912
|
+
recursive read-time computation. Titles/types are joined from beads_issues.
|
|
913
|
+
"""
|
|
914
|
+
|
|
915
|
+
bead_id: str
|
|
916
|
+
title: Optional[str] = None
|
|
917
|
+
claimed_at: str
|
|
918
|
+
apex_id: Optional[str] = None
|
|
919
|
+
apex_title: Optional[str] = None
|
|
920
|
+
apex_type: Optional[str] = None
|
|
921
|
+
|
|
922
|
+
|
|
923
|
+
class WorkspaceInfo(BaseModel):
|
|
924
|
+
"""Workspace information from database with optional presence data."""
|
|
925
|
+
|
|
926
|
+
workspace_id: str
|
|
927
|
+
alias: str
|
|
928
|
+
human_name: Optional[str] = None
|
|
929
|
+
project_id: Optional[str] = None
|
|
930
|
+
project_slug: Optional[str] = None
|
|
931
|
+
program: Optional[str] = None
|
|
932
|
+
model: Optional[str] = None
|
|
933
|
+
repo: Optional[str] = None
|
|
934
|
+
branch: Optional[str] = None
|
|
935
|
+
member_email: Optional[str] = None
|
|
936
|
+
role: Optional[str] = None
|
|
937
|
+
hostname: Optional[str] = None # For gone workspace detection
|
|
938
|
+
workspace_path: Optional[str] = None # For gone workspace detection
|
|
939
|
+
apex_id: Optional[str] = None # Apex from first claim (root of parent chain)
|
|
940
|
+
apex_title: Optional[str] = None
|
|
941
|
+
apex_type: Optional[str] = None # Bead type: epic, feature, task, bug, chore
|
|
942
|
+
focus_apex_id: Optional[str] = None
|
|
943
|
+
focus_apex_title: Optional[str] = None
|
|
944
|
+
focus_apex_type: Optional[str] = None
|
|
945
|
+
focus_apex_repo_name: Optional[str] = None
|
|
946
|
+
focus_apex_branch: Optional[str] = None
|
|
947
|
+
focus_updated_at: Optional[str] = None
|
|
948
|
+
status: str # "active", "idle", "offline"
|
|
949
|
+
last_seen: Optional[str] = None
|
|
950
|
+
deleted_at: Optional[str] = None # ISO timestamp if soft-deleted
|
|
951
|
+
claims: List[Claim] = [] # All active bead claims for this workspace (with apex computed)
|
|
952
|
+
|
|
953
|
+
|
|
954
|
+
class ListWorkspacesResponse(BaseModel):
|
|
955
|
+
"""Response for listing workspaces."""
|
|
956
|
+
|
|
957
|
+
workspaces: List[WorkspaceInfo]
|
|
958
|
+
has_more: bool = False
|
|
959
|
+
next_cursor: Optional[str] = None
|
|
960
|
+
|
|
961
|
+
|
|
962
|
+
def _build_workspace_claims_query(placeholders: str) -> str:
|
|
963
|
+
return f"""
|
|
964
|
+
SELECT
|
|
965
|
+
c.workspace_id,
|
|
966
|
+
c.bead_id AS bead_id,
|
|
967
|
+
c.claimed_at,
|
|
968
|
+
c.apex_bead_id,
|
|
969
|
+
c.apex_repo_name,
|
|
970
|
+
c.apex_branch,
|
|
971
|
+
claim_issue.title AS claim_title,
|
|
972
|
+
apex_issue.title AS apex_title,
|
|
973
|
+
apex_issue.issue_type AS apex_type
|
|
974
|
+
FROM {{{{tables.bead_claims}}}} c
|
|
975
|
+
LEFT JOIN LATERAL (
|
|
976
|
+
SELECT title
|
|
977
|
+
FROM beads.beads_issues
|
|
978
|
+
WHERE project_id = c.project_id AND bead_id = c.bead_id
|
|
979
|
+
ORDER BY synced_at DESC
|
|
980
|
+
LIMIT 1
|
|
981
|
+
) claim_issue ON true
|
|
982
|
+
LEFT JOIN LATERAL (
|
|
983
|
+
SELECT title, issue_type
|
|
984
|
+
FROM beads.beads_issues
|
|
985
|
+
WHERE c.apex_bead_id IS NOT NULL
|
|
986
|
+
AND project_id = c.project_id
|
|
987
|
+
AND bead_id = c.apex_bead_id
|
|
988
|
+
AND repo = c.apex_repo_name
|
|
989
|
+
AND branch = c.apex_branch
|
|
990
|
+
ORDER BY synced_at DESC
|
|
991
|
+
LIMIT 1
|
|
992
|
+
) apex_issue ON true
|
|
993
|
+
WHERE c.workspace_id IN ({placeholders})
|
|
994
|
+
ORDER BY c.workspace_id, c.claimed_at DESC
|
|
995
|
+
"""
|
|
996
|
+
|
|
997
|
+
|
|
998
|
+
def _to_iso(value: Optional[datetime]) -> Optional[str]:
|
|
999
|
+
if not value:
|
|
1000
|
+
return None
|
|
1001
|
+
return value.isoformat()
|
|
1002
|
+
|
|
1003
|
+
|
|
1004
|
+
def _timestamp(value: Optional[datetime] | Optional[str]) -> float:
|
|
1005
|
+
if not value:
|
|
1006
|
+
return 0.0
|
|
1007
|
+
if isinstance(value, datetime):
|
|
1008
|
+
return value.timestamp()
|
|
1009
|
+
try:
|
|
1010
|
+
return datetime.fromisoformat(value).timestamp()
|
|
1011
|
+
except ValueError:
|
|
1012
|
+
return 0.0
|
|
1013
|
+
|
|
1014
|
+
|
|
1015
|
+
@router.get("", response_model=ListWorkspacesResponse)
|
|
1016
|
+
async def list_workspaces(
|
|
1017
|
+
request: Request,
|
|
1018
|
+
human_name: Optional[str] = Query(None, description="Filter by workspace owner", max_length=64),
|
|
1019
|
+
repo: Optional[str] = Query(
|
|
1020
|
+
None, description="Filter by repo canonical origin", max_length=255
|
|
1021
|
+
),
|
|
1022
|
+
alias: Optional[str] = Query(None, description="Filter by workspace alias", max_length=64),
|
|
1023
|
+
hostname: Optional[str] = Query(None, description="Filter by machine hostname", max_length=255),
|
|
1024
|
+
include_deleted: bool = Query(False, description="Include soft-deleted workspaces"),
|
|
1025
|
+
include_claims: bool = Query(False, description="Include active bead claims"),
|
|
1026
|
+
include_presence: bool = Query(True, description="Include Redis presence data"),
|
|
1027
|
+
limit: Optional[int] = Query(None, description="Maximum items per page", ge=1, le=200),
|
|
1028
|
+
cursor: Optional[str] = Query(None, description="Pagination cursor from previous response"),
|
|
1029
|
+
db_infra: DatabaseInfra = Depends(get_db_infra),
|
|
1030
|
+
redis: Redis = Depends(get_redis),
|
|
1031
|
+
) -> ListWorkspacesResponse:
|
|
1032
|
+
"""
|
|
1033
|
+
List all registered workspaces from database with cursor-based pagination.
|
|
1034
|
+
|
|
1035
|
+
Returns workspace information with optional presence/claim enrichment.
|
|
1036
|
+
Workspaces without active presence show status='offline'.
|
|
1037
|
+
Deleted workspaces are excluded by default (use include_deleted=true to show).
|
|
1038
|
+
|
|
1039
|
+
Tenant isolation:
|
|
1040
|
+
- Always derived from authentication (project API key or proxy-injected internal context).
|
|
1041
|
+
|
|
1042
|
+
Args:
|
|
1043
|
+
limit: Maximum number of workspaces to return (default 50, max 200).
|
|
1044
|
+
cursor: Pagination cursor from previous response for fetching next page.
|
|
1045
|
+
|
|
1046
|
+
Returns:
|
|
1047
|
+
List of workspaces ordered by most recently updated first.
|
|
1048
|
+
Includes has_more and next_cursor for pagination.
|
|
1049
|
+
|
|
1050
|
+
Use /v1/workspaces/online for only currently active workspaces.
|
|
1051
|
+
"""
|
|
1052
|
+
project_id = await get_project_from_auth(request, db_infra)
|
|
1053
|
+
|
|
1054
|
+
# Validate pagination params
|
|
1055
|
+
try:
|
|
1056
|
+
validated_limit, cursor_data = validate_pagination_params(limit, cursor)
|
|
1057
|
+
except ValueError as e:
|
|
1058
|
+
raise HTTPException(status_code=422, detail=str(e))
|
|
1059
|
+
|
|
1060
|
+
server_db = db_infra.get_manager("server")
|
|
1061
|
+
|
|
1062
|
+
# Build query with optional filters
|
|
1063
|
+
# Note: agent workspaces have repo_id, dashboard workspaces don't
|
|
1064
|
+
query = """
|
|
1065
|
+
SELECT
|
|
1066
|
+
w.workspace_id,
|
|
1067
|
+
w.alias,
|
|
1068
|
+
w.human_name,
|
|
1069
|
+
w.current_branch,
|
|
1070
|
+
w.project_id,
|
|
1071
|
+
w.role,
|
|
1072
|
+
w.hostname,
|
|
1073
|
+
w.workspace_path,
|
|
1074
|
+
w.last_seen_at,
|
|
1075
|
+
w.updated_at,
|
|
1076
|
+
w.deleted_at,
|
|
1077
|
+
w.focus_apex_bead_id,
|
|
1078
|
+
w.focus_apex_repo_name,
|
|
1079
|
+
w.focus_apex_branch,
|
|
1080
|
+
w.focus_updated_at,
|
|
1081
|
+
focus_issue.title AS focus_apex_title,
|
|
1082
|
+
focus_issue.issue_type AS focus_apex_type,
|
|
1083
|
+
p.slug as project_slug,
|
|
1084
|
+
r.canonical_origin as repo
|
|
1085
|
+
FROM {{tables.workspaces}} w
|
|
1086
|
+
JOIN {{tables.projects}} p ON w.project_id = p.id AND p.deleted_at IS NULL
|
|
1087
|
+
LEFT JOIN {{tables.repos}} r ON w.repo_id = r.id AND r.deleted_at IS NULL
|
|
1088
|
+
LEFT JOIN LATERAL (
|
|
1089
|
+
SELECT title, issue_type
|
|
1090
|
+
FROM beads.beads_issues
|
|
1091
|
+
WHERE w.focus_apex_bead_id IS NOT NULL
|
|
1092
|
+
AND project_id = w.project_id
|
|
1093
|
+
AND bead_id = w.focus_apex_bead_id
|
|
1094
|
+
AND repo = w.focus_apex_repo_name
|
|
1095
|
+
AND branch = w.focus_apex_branch
|
|
1096
|
+
ORDER BY synced_at DESC
|
|
1097
|
+
LIMIT 1
|
|
1098
|
+
) focus_issue ON true
|
|
1099
|
+
WHERE 1=1
|
|
1100
|
+
"""
|
|
1101
|
+
params: list = []
|
|
1102
|
+
param_idx = 1
|
|
1103
|
+
|
|
1104
|
+
query += f" AND w.project_id = ${param_idx}"
|
|
1105
|
+
params.append(uuid_module.UUID(project_id))
|
|
1106
|
+
param_idx += 1
|
|
1107
|
+
|
|
1108
|
+
if human_name:
|
|
1109
|
+
query += f" AND w.human_name = ${param_idx}"
|
|
1110
|
+
params.append(human_name)
|
|
1111
|
+
param_idx += 1
|
|
1112
|
+
|
|
1113
|
+
if repo:
|
|
1114
|
+
if not is_valid_canonical_origin(repo):
|
|
1115
|
+
raise HTTPException(
|
|
1116
|
+
status_code=422,
|
|
1117
|
+
detail=f"Invalid repo format: {repo[:50]}",
|
|
1118
|
+
)
|
|
1119
|
+
query += f" AND r.canonical_origin = ${param_idx}"
|
|
1120
|
+
params.append(repo)
|
|
1121
|
+
param_idx += 1
|
|
1122
|
+
|
|
1123
|
+
if alias:
|
|
1124
|
+
if not is_valid_alias(alias):
|
|
1125
|
+
raise HTTPException(
|
|
1126
|
+
status_code=422,
|
|
1127
|
+
detail="Invalid alias: must be alphanumeric with hyphens/underscores, 1-64 chars",
|
|
1128
|
+
)
|
|
1129
|
+
query += f" AND w.alias = ${param_idx}"
|
|
1130
|
+
params.append(alias)
|
|
1131
|
+
param_idx += 1
|
|
1132
|
+
|
|
1133
|
+
if hostname:
|
|
1134
|
+
# Validate hostname (same as RegisterWorkspaceRequest)
|
|
1135
|
+
if "\x00" in hostname or any(ord(c) < 32 for c in hostname):
|
|
1136
|
+
raise HTTPException(
|
|
1137
|
+
status_code=422,
|
|
1138
|
+
detail="Invalid hostname: contains null bytes or control characters",
|
|
1139
|
+
)
|
|
1140
|
+
query += f" AND w.hostname = ${param_idx}"
|
|
1141
|
+
params.append(hostname)
|
|
1142
|
+
param_idx += 1
|
|
1143
|
+
|
|
1144
|
+
# Filter deleted workspaces by default
|
|
1145
|
+
if not include_deleted:
|
|
1146
|
+
query += " AND w.deleted_at IS NULL"
|
|
1147
|
+
|
|
1148
|
+
# Apply cursor (updated_at < cursor_timestamp for DESC order)
|
|
1149
|
+
if cursor_data and "updated_at" in cursor_data:
|
|
1150
|
+
try:
|
|
1151
|
+
cursor_timestamp = datetime.fromisoformat(cursor_data["updated_at"])
|
|
1152
|
+
except (ValueError, TypeError) as e:
|
|
1153
|
+
raise HTTPException(status_code=422, detail=f"Invalid cursor timestamp: {e}")
|
|
1154
|
+
query += f" AND w.updated_at < ${param_idx}"
|
|
1155
|
+
params.append(cursor_timestamp)
|
|
1156
|
+
param_idx += 1
|
|
1157
|
+
|
|
1158
|
+
query += " ORDER BY w.updated_at DESC"
|
|
1159
|
+
|
|
1160
|
+
# Fetch limit + 1 to detect has_more
|
|
1161
|
+
query += f" LIMIT ${param_idx}"
|
|
1162
|
+
params.append(validated_limit + 1)
|
|
1163
|
+
param_idx += 1
|
|
1164
|
+
|
|
1165
|
+
rows = await server_db.fetch_all(query, *params)
|
|
1166
|
+
|
|
1167
|
+
# Check if there are more results
|
|
1168
|
+
has_more = len(rows) > validated_limit
|
|
1169
|
+
rows = rows[:validated_limit] # Trim to requested limit
|
|
1170
|
+
|
|
1171
|
+
workspace_ids = [row["workspace_id"] for row in rows] # UUIDs from database
|
|
1172
|
+
workspace_id_strings = [str(ws_id) for ws_id in workspace_ids]
|
|
1173
|
+
|
|
1174
|
+
presence_map: Dict[str, dict] = {}
|
|
1175
|
+
if include_presence and workspace_id_strings:
|
|
1176
|
+
presences = await list_agent_presences_by_workspace_ids(redis, workspace_id_strings)
|
|
1177
|
+
presence_map = {str(p["workspace_id"]): p for p in presences if p.get("workspace_id")}
|
|
1178
|
+
|
|
1179
|
+
claims_map: Dict[str, List[Claim]] = {}
|
|
1180
|
+
if include_claims and workspace_ids:
|
|
1181
|
+
placeholders = ", ".join(f"${i}" for i in range(1, len(workspace_ids) + 1))
|
|
1182
|
+
claim_rows = await server_db.fetch_all(
|
|
1183
|
+
_build_workspace_claims_query(placeholders),
|
|
1184
|
+
*workspace_ids,
|
|
1185
|
+
)
|
|
1186
|
+
for cr in claim_rows:
|
|
1187
|
+
ws_id = str(cr["workspace_id"])
|
|
1188
|
+
claim = Claim(
|
|
1189
|
+
bead_id=cr["bead_id"],
|
|
1190
|
+
title=cr["claim_title"],
|
|
1191
|
+
claimed_at=cr["claimed_at"].isoformat() if cr["claimed_at"] else "",
|
|
1192
|
+
apex_id=cr["apex_bead_id"],
|
|
1193
|
+
apex_title=cr["apex_title"],
|
|
1194
|
+
apex_type=cr["apex_type"],
|
|
1195
|
+
)
|
|
1196
|
+
if ws_id not in claims_map:
|
|
1197
|
+
claims_map[ws_id] = []
|
|
1198
|
+
claims_map[ws_id].append(claim)
|
|
1199
|
+
|
|
1200
|
+
workspaces: List[WorkspaceInfo] = []
|
|
1201
|
+
for row in rows:
|
|
1202
|
+
workspace_id = str(row["workspace_id"])
|
|
1203
|
+
presence = presence_map.get(workspace_id)
|
|
1204
|
+
workspace_claims = claims_map.get(workspace_id, []) if include_claims else []
|
|
1205
|
+
|
|
1206
|
+
# Extract apex from first claim (most recent by claimed_at)
|
|
1207
|
+
first_apex_id = workspace_claims[0].apex_id if workspace_claims else None
|
|
1208
|
+
first_apex_title = workspace_claims[0].apex_title if workspace_claims else None
|
|
1209
|
+
first_apex_type = workspace_claims[0].apex_type if workspace_claims else None
|
|
1210
|
+
|
|
1211
|
+
role = row["role"]
|
|
1212
|
+
status = "offline"
|
|
1213
|
+
last_seen = _to_iso(row["last_seen_at"])
|
|
1214
|
+
program = None
|
|
1215
|
+
model = None
|
|
1216
|
+
member_email = None
|
|
1217
|
+
branch = row["current_branch"]
|
|
1218
|
+
|
|
1219
|
+
if include_presence and presence:
|
|
1220
|
+
program = presence.get("program")
|
|
1221
|
+
model = presence.get("model")
|
|
1222
|
+
member_email = presence.get("member_email")
|
|
1223
|
+
branch = presence.get("current_branch") or branch
|
|
1224
|
+
role = presence.get("role") or role
|
|
1225
|
+
status = presence.get("status") or "active"
|
|
1226
|
+
last_seen = presence.get("last_seen") or last_seen
|
|
1227
|
+
|
|
1228
|
+
workspaces.append(
|
|
1229
|
+
WorkspaceInfo(
|
|
1230
|
+
workspace_id=workspace_id,
|
|
1231
|
+
alias=row["alias"],
|
|
1232
|
+
human_name=row["human_name"],
|
|
1233
|
+
project_id=str(row["project_id"]),
|
|
1234
|
+
project_slug=row["project_slug"],
|
|
1235
|
+
program=program,
|
|
1236
|
+
model=model,
|
|
1237
|
+
repo=row["repo"],
|
|
1238
|
+
branch=branch,
|
|
1239
|
+
member_email=member_email,
|
|
1240
|
+
role=role,
|
|
1241
|
+
hostname=row["hostname"],
|
|
1242
|
+
workspace_path=row["workspace_path"],
|
|
1243
|
+
apex_id=first_apex_id,
|
|
1244
|
+
apex_title=first_apex_title,
|
|
1245
|
+
apex_type=first_apex_type,
|
|
1246
|
+
focus_apex_id=row["focus_apex_bead_id"],
|
|
1247
|
+
focus_apex_title=row["focus_apex_title"],
|
|
1248
|
+
focus_apex_type=row["focus_apex_type"],
|
|
1249
|
+
focus_apex_repo_name=row["focus_apex_repo_name"],
|
|
1250
|
+
focus_apex_branch=row["focus_apex_branch"],
|
|
1251
|
+
focus_updated_at=_to_iso(row["focus_updated_at"]),
|
|
1252
|
+
status=status,
|
|
1253
|
+
last_seen=last_seen,
|
|
1254
|
+
deleted_at=_to_iso(row["deleted_at"]),
|
|
1255
|
+
claims=workspace_claims,
|
|
1256
|
+
)
|
|
1257
|
+
)
|
|
1258
|
+
|
|
1259
|
+
# Generate next_cursor if there are more results
|
|
1260
|
+
next_cursor = None
|
|
1261
|
+
if has_more and rows:
|
|
1262
|
+
last_row = rows[-1]
|
|
1263
|
+
next_cursor = encode_cursor({"updated_at": last_row["updated_at"].isoformat()})
|
|
1264
|
+
|
|
1265
|
+
return ListWorkspacesResponse(workspaces=workspaces, has_more=has_more, next_cursor=next_cursor)
|
|
1266
|
+
|
|
1267
|
+
|
|
1268
|
+
@router.get("/team", response_model=ListWorkspacesResponse)
|
|
1269
|
+
async def list_team_workspaces(
|
|
1270
|
+
request: Request,
|
|
1271
|
+
human_name: Optional[str] = Query(None, description="Filter by workspace owner", max_length=64),
|
|
1272
|
+
repo: Optional[str] = Query(
|
|
1273
|
+
None, description="Filter by repo canonical origin", max_length=255
|
|
1274
|
+
),
|
|
1275
|
+
include_claims: bool = Query(True, description="Include active bead claims"),
|
|
1276
|
+
include_presence: bool = Query(True, description="Include Redis presence data"),
|
|
1277
|
+
only_with_claims: bool = Query(True, description="Only return workspaces with active claims"),
|
|
1278
|
+
always_include_workspace_id: Optional[str] = Query(
|
|
1279
|
+
None,
|
|
1280
|
+
description="Ensure a workspace is included even if filtered out",
|
|
1281
|
+
),
|
|
1282
|
+
limit: int = Query(
|
|
1283
|
+
TEAM_STATUS_DEFAULT_LIMIT,
|
|
1284
|
+
ge=1,
|
|
1285
|
+
le=TEAM_STATUS_MAX_LIMIT,
|
|
1286
|
+
description="Maximum workspaces to return",
|
|
1287
|
+
),
|
|
1288
|
+
db_infra: DatabaseInfra = Depends(get_db_infra),
|
|
1289
|
+
redis: Redis = Depends(get_redis),
|
|
1290
|
+
) -> ListWorkspacesResponse:
|
|
1291
|
+
"""
|
|
1292
|
+
List a bounded team-status view of workspaces for coordination.
|
|
1293
|
+
|
|
1294
|
+
This endpoint is optimized for CLI/dashboard usage and always returns a
|
|
1295
|
+
limited, prioritized set of workspaces.
|
|
1296
|
+
"""
|
|
1297
|
+
project_id = await get_project_from_auth(request, db_infra)
|
|
1298
|
+
|
|
1299
|
+
server_db = db_infra.get_manager("server")
|
|
1300
|
+
|
|
1301
|
+
params: list = [uuid_module.UUID(project_id)]
|
|
1302
|
+
param_idx = 2
|
|
1303
|
+
claim_stats_where = ""
|
|
1304
|
+
claim_stats_where = "WHERE project_id = $1"
|
|
1305
|
+
|
|
1306
|
+
query = f"""
|
|
1307
|
+
WITH claim_stats AS (
|
|
1308
|
+
SELECT workspace_id,
|
|
1309
|
+
COUNT(*) AS claim_count,
|
|
1310
|
+
MAX(claimed_at) AS last_claimed_at
|
|
1311
|
+
FROM {{{{tables.bead_claims}}}}
|
|
1312
|
+
{claim_stats_where}
|
|
1313
|
+
GROUP BY workspace_id
|
|
1314
|
+
)
|
|
1315
|
+
SELECT
|
|
1316
|
+
w.workspace_id,
|
|
1317
|
+
w.alias,
|
|
1318
|
+
w.human_name,
|
|
1319
|
+
w.current_branch,
|
|
1320
|
+
w.project_id,
|
|
1321
|
+
w.role,
|
|
1322
|
+
w.hostname,
|
|
1323
|
+
w.workspace_path,
|
|
1324
|
+
w.last_seen_at,
|
|
1325
|
+
w.focus_apex_bead_id,
|
|
1326
|
+
w.focus_apex_repo_name,
|
|
1327
|
+
w.focus_apex_branch,
|
|
1328
|
+
w.focus_updated_at,
|
|
1329
|
+
focus_issue.title AS focus_apex_title,
|
|
1330
|
+
focus_issue.issue_type AS focus_apex_type,
|
|
1331
|
+
p.slug as project_slug,
|
|
1332
|
+
r.canonical_origin as repo,
|
|
1333
|
+
COALESCE(cs.claim_count, 0) AS claim_count,
|
|
1334
|
+
cs.last_claimed_at
|
|
1335
|
+
FROM {{{{tables.workspaces}}}} w
|
|
1336
|
+
JOIN {{{{tables.projects}}}} p ON w.project_id = p.id
|
|
1337
|
+
LEFT JOIN {{{{tables.repos}}}} r ON w.repo_id = r.id
|
|
1338
|
+
LEFT JOIN claim_stats cs ON cs.workspace_id = w.workspace_id
|
|
1339
|
+
LEFT JOIN LATERAL (
|
|
1340
|
+
SELECT title, issue_type
|
|
1341
|
+
FROM beads.beads_issues
|
|
1342
|
+
WHERE w.focus_apex_bead_id IS NOT NULL
|
|
1343
|
+
AND project_id = w.project_id
|
|
1344
|
+
AND bead_id = w.focus_apex_bead_id
|
|
1345
|
+
AND repo = w.focus_apex_repo_name
|
|
1346
|
+
AND branch = w.focus_apex_branch
|
|
1347
|
+
ORDER BY synced_at DESC
|
|
1348
|
+
LIMIT 1
|
|
1349
|
+
) focus_issue ON true
|
|
1350
|
+
WHERE 1=1
|
|
1351
|
+
"""
|
|
1352
|
+
|
|
1353
|
+
query += " AND w.project_id = $1"
|
|
1354
|
+
|
|
1355
|
+
if human_name:
|
|
1356
|
+
query += f" AND w.human_name = ${param_idx}"
|
|
1357
|
+
params.append(human_name)
|
|
1358
|
+
param_idx += 1
|
|
1359
|
+
|
|
1360
|
+
if repo:
|
|
1361
|
+
if not is_valid_canonical_origin(repo):
|
|
1362
|
+
raise HTTPException(
|
|
1363
|
+
status_code=422,
|
|
1364
|
+
detail=f"Invalid repo format: {repo[:50]}",
|
|
1365
|
+
)
|
|
1366
|
+
query += f" AND r.canonical_origin = ${param_idx}"
|
|
1367
|
+
params.append(repo)
|
|
1368
|
+
param_idx += 1
|
|
1369
|
+
|
|
1370
|
+
query += " AND w.deleted_at IS NULL"
|
|
1371
|
+
|
|
1372
|
+
if only_with_claims:
|
|
1373
|
+
query += " AND COALESCE(cs.claim_count, 0) > 0"
|
|
1374
|
+
|
|
1375
|
+
candidate_limit = limit
|
|
1376
|
+
if include_presence:
|
|
1377
|
+
candidate_limit = min(
|
|
1378
|
+
limit * TEAM_STATUS_CANDIDATE_MULTIPLIER,
|
|
1379
|
+
TEAM_STATUS_CANDIDATE_MAX,
|
|
1380
|
+
)
|
|
1381
|
+
|
|
1382
|
+
query += """
|
|
1383
|
+
ORDER BY
|
|
1384
|
+
(COALESCE(cs.claim_count, 0) > 0) DESC,
|
|
1385
|
+
w.last_seen_at DESC NULLS LAST,
|
|
1386
|
+
cs.last_claimed_at DESC NULLS LAST,
|
|
1387
|
+
w.alias ASC
|
|
1388
|
+
"""
|
|
1389
|
+
query += f" LIMIT ${param_idx}"
|
|
1390
|
+
params.append(candidate_limit)
|
|
1391
|
+
|
|
1392
|
+
rows = await server_db.fetch_all(query, *params)
|
|
1393
|
+
|
|
1394
|
+
if always_include_workspace_id:
|
|
1395
|
+
try:
|
|
1396
|
+
validated_id = validate_workspace_id(always_include_workspace_id)
|
|
1397
|
+
except ValueError as e:
|
|
1398
|
+
raise HTTPException(status_code=422, detail=str(e))
|
|
1399
|
+
|
|
1400
|
+
if validated_id not in {str(row["workspace_id"]) for row in rows}:
|
|
1401
|
+
id_params: list = []
|
|
1402
|
+
id_param_idx = 1
|
|
1403
|
+
id_query = """
|
|
1404
|
+
SELECT
|
|
1405
|
+
w.workspace_id,
|
|
1406
|
+
w.alias,
|
|
1407
|
+
w.human_name,
|
|
1408
|
+
w.current_branch,
|
|
1409
|
+
w.project_id,
|
|
1410
|
+
w.role,
|
|
1411
|
+
w.hostname,
|
|
1412
|
+
w.workspace_path,
|
|
1413
|
+
w.last_seen_at,
|
|
1414
|
+
w.focus_apex_bead_id,
|
|
1415
|
+
w.focus_apex_repo_name,
|
|
1416
|
+
w.focus_apex_branch,
|
|
1417
|
+
w.focus_updated_at,
|
|
1418
|
+
focus_issue.title AS focus_apex_title,
|
|
1419
|
+
focus_issue.issue_type AS focus_apex_type,
|
|
1420
|
+
p.slug as project_slug,
|
|
1421
|
+
r.canonical_origin as repo,
|
|
1422
|
+
COALESCE(cs.claim_count, 0) AS claim_count,
|
|
1423
|
+
cs.last_claimed_at
|
|
1424
|
+
FROM {{tables.workspaces}} w
|
|
1425
|
+
JOIN {{tables.projects}} p ON w.project_id = p.id AND p.deleted_at IS NULL
|
|
1426
|
+
LEFT JOIN {{tables.repos}} r ON w.repo_id = r.id AND r.deleted_at IS NULL
|
|
1427
|
+
LEFT JOIN (
|
|
1428
|
+
SELECT workspace_id,
|
|
1429
|
+
COUNT(*) AS claim_count,
|
|
1430
|
+
MAX(claimed_at) AS last_claimed_at
|
|
1431
|
+
FROM {{tables.bead_claims}}
|
|
1432
|
+
GROUP BY workspace_id
|
|
1433
|
+
) cs ON cs.workspace_id = w.workspace_id
|
|
1434
|
+
LEFT JOIN LATERAL (
|
|
1435
|
+
SELECT title, issue_type
|
|
1436
|
+
FROM beads.beads_issues
|
|
1437
|
+
WHERE w.focus_apex_bead_id IS NOT NULL
|
|
1438
|
+
AND project_id = w.project_id
|
|
1439
|
+
AND bead_id = w.focus_apex_bead_id
|
|
1440
|
+
AND repo = w.focus_apex_repo_name
|
|
1441
|
+
AND branch = w.focus_apex_branch
|
|
1442
|
+
ORDER BY synced_at DESC
|
|
1443
|
+
LIMIT 1
|
|
1444
|
+
) focus_issue ON true
|
|
1445
|
+
WHERE w.workspace_id = $1
|
|
1446
|
+
"""
|
|
1447
|
+
id_params.append(uuid_module.UUID(validated_id))
|
|
1448
|
+
id_param_idx = 2
|
|
1449
|
+
|
|
1450
|
+
# always_include_workspace_id is still scoped to the authenticated project
|
|
1451
|
+
id_query += " AND w.project_id = $2"
|
|
1452
|
+
id_params.append(uuid_module.UUID(project_id))
|
|
1453
|
+
id_param_idx = 3
|
|
1454
|
+
|
|
1455
|
+
if human_name:
|
|
1456
|
+
id_query += f" AND w.human_name = ${id_param_idx}"
|
|
1457
|
+
id_params.append(human_name)
|
|
1458
|
+
id_param_idx += 1
|
|
1459
|
+
|
|
1460
|
+
if repo:
|
|
1461
|
+
id_query += f" AND r.canonical_origin = ${id_param_idx}"
|
|
1462
|
+
id_params.append(repo)
|
|
1463
|
+
id_param_idx += 1
|
|
1464
|
+
|
|
1465
|
+
id_query += " AND w.deleted_at IS NULL"
|
|
1466
|
+
|
|
1467
|
+
extra_row = await server_db.fetch_one(id_query, *id_params)
|
|
1468
|
+
if extra_row:
|
|
1469
|
+
rows.append(extra_row)
|
|
1470
|
+
|
|
1471
|
+
workspace_ids = [row["workspace_id"] for row in rows]
|
|
1472
|
+
workspace_id_strings = [str(ws_id) for ws_id in workspace_ids]
|
|
1473
|
+
|
|
1474
|
+
presence_map: Dict[str, dict] = {}
|
|
1475
|
+
if include_presence and workspace_id_strings:
|
|
1476
|
+
presences = await list_agent_presences_by_workspace_ids(redis, workspace_id_strings)
|
|
1477
|
+
presence_map = {str(p["workspace_id"]): p for p in presences if p.get("workspace_id")}
|
|
1478
|
+
|
|
1479
|
+
claims_map: Dict[str, List[Claim]] = {}
|
|
1480
|
+
if include_claims and workspace_ids:
|
|
1481
|
+
placeholders = ", ".join(f"${i}" for i in range(1, len(workspace_ids) + 1))
|
|
1482
|
+
claim_rows = await server_db.fetch_all(
|
|
1483
|
+
_build_workspace_claims_query(placeholders),
|
|
1484
|
+
*workspace_ids,
|
|
1485
|
+
)
|
|
1486
|
+
for cr in claim_rows:
|
|
1487
|
+
ws_id = str(cr["workspace_id"])
|
|
1488
|
+
claim = Claim(
|
|
1489
|
+
bead_id=cr["bead_id"],
|
|
1490
|
+
title=cr["claim_title"],
|
|
1491
|
+
claimed_at=cr["claimed_at"].isoformat() if cr["claimed_at"] else "",
|
|
1492
|
+
apex_id=cr["apex_bead_id"],
|
|
1493
|
+
apex_title=cr["apex_title"],
|
|
1494
|
+
apex_type=cr["apex_type"],
|
|
1495
|
+
)
|
|
1496
|
+
if ws_id not in claims_map:
|
|
1497
|
+
claims_map[ws_id] = []
|
|
1498
|
+
claims_map[ws_id].append(claim)
|
|
1499
|
+
|
|
1500
|
+
entries: List[tuple[WorkspaceInfo, int, float, float, int, int]] = []
|
|
1501
|
+
for row in rows:
|
|
1502
|
+
workspace_id = str(row["workspace_id"])
|
|
1503
|
+
presence = presence_map.get(workspace_id) if include_presence else None
|
|
1504
|
+
workspace_claims = claims_map.get(workspace_id, []) if include_claims else []
|
|
1505
|
+
|
|
1506
|
+
first_apex_id = workspace_claims[0].apex_id if workspace_claims else None
|
|
1507
|
+
first_apex_title = workspace_claims[0].apex_title if workspace_claims else None
|
|
1508
|
+
first_apex_type = workspace_claims[0].apex_type if workspace_claims else None
|
|
1509
|
+
|
|
1510
|
+
role = row["role"]
|
|
1511
|
+
status = "offline"
|
|
1512
|
+
last_seen = _to_iso(row["last_seen_at"])
|
|
1513
|
+
last_seen_ts = _timestamp(row["last_seen_at"])
|
|
1514
|
+
program = None
|
|
1515
|
+
model = None
|
|
1516
|
+
member_email = None
|
|
1517
|
+
branch = row["current_branch"]
|
|
1518
|
+
|
|
1519
|
+
if include_presence and presence:
|
|
1520
|
+
program = presence.get("program")
|
|
1521
|
+
model = presence.get("model")
|
|
1522
|
+
member_email = presence.get("member_email")
|
|
1523
|
+
branch = presence.get("current_branch") or branch
|
|
1524
|
+
role = presence.get("role") or role
|
|
1525
|
+
status = presence.get("status") or "active"
|
|
1526
|
+
last_seen = presence.get("last_seen") or last_seen
|
|
1527
|
+
last_seen_ts = _timestamp(presence.get("last_seen")) or last_seen_ts
|
|
1528
|
+
|
|
1529
|
+
claim_count = int(row["claim_count"] or 0)
|
|
1530
|
+
last_claimed_ts = _timestamp(row["last_claimed_at"])
|
|
1531
|
+
has_claims = 1 if claim_count > 0 else 0
|
|
1532
|
+
is_online = 1 if (include_presence and presence) else 0
|
|
1533
|
+
|
|
1534
|
+
workspace_info = WorkspaceInfo(
|
|
1535
|
+
workspace_id=workspace_id,
|
|
1536
|
+
alias=row["alias"],
|
|
1537
|
+
human_name=row["human_name"],
|
|
1538
|
+
project_id=str(row["project_id"]),
|
|
1539
|
+
project_slug=row["project_slug"],
|
|
1540
|
+
program=program,
|
|
1541
|
+
model=model,
|
|
1542
|
+
repo=row["repo"],
|
|
1543
|
+
branch=branch,
|
|
1544
|
+
member_email=member_email,
|
|
1545
|
+
role=role,
|
|
1546
|
+
hostname=row["hostname"],
|
|
1547
|
+
workspace_path=row["workspace_path"],
|
|
1548
|
+
apex_id=first_apex_id,
|
|
1549
|
+
apex_title=first_apex_title,
|
|
1550
|
+
apex_type=first_apex_type,
|
|
1551
|
+
focus_apex_id=row["focus_apex_bead_id"],
|
|
1552
|
+
focus_apex_title=row["focus_apex_title"],
|
|
1553
|
+
focus_apex_type=row["focus_apex_type"],
|
|
1554
|
+
focus_apex_repo_name=row["focus_apex_repo_name"],
|
|
1555
|
+
focus_apex_branch=row["focus_apex_branch"],
|
|
1556
|
+
focus_updated_at=_to_iso(row["focus_updated_at"]),
|
|
1557
|
+
status=status,
|
|
1558
|
+
last_seen=last_seen,
|
|
1559
|
+
claims=workspace_claims,
|
|
1560
|
+
)
|
|
1561
|
+
entries.append(
|
|
1562
|
+
(
|
|
1563
|
+
workspace_info,
|
|
1564
|
+
has_claims,
|
|
1565
|
+
last_seen_ts,
|
|
1566
|
+
last_claimed_ts,
|
|
1567
|
+
is_online,
|
|
1568
|
+
claim_count,
|
|
1569
|
+
)
|
|
1570
|
+
)
|
|
1571
|
+
|
|
1572
|
+
entries.sort(
|
|
1573
|
+
key=lambda item: (
|
|
1574
|
+
-item[1],
|
|
1575
|
+
-item[4],
|
|
1576
|
+
-item[2],
|
|
1577
|
+
-item[3],
|
|
1578
|
+
item[0].alias,
|
|
1579
|
+
)
|
|
1580
|
+
)
|
|
1581
|
+
|
|
1582
|
+
workspaces = [entry[0] for entry in entries][:limit]
|
|
1583
|
+
|
|
1584
|
+
# /team endpoint doesn't support cursor-based pagination (uses complex sorting)
|
|
1585
|
+
return ListWorkspacesResponse(workspaces=workspaces, has_more=False)
|
|
1586
|
+
|
|
1587
|
+
|
|
1588
|
+
@router.get("/online", response_model=ListWorkspacesResponse)
|
|
1589
|
+
async def list_online_workspaces(
|
|
1590
|
+
request: Request,
|
|
1591
|
+
human_name: Optional[str] = Query(None, description="Filter by workspace owner", max_length=64),
|
|
1592
|
+
redis: Redis = Depends(get_redis),
|
|
1593
|
+
db_infra: DatabaseInfra = Depends(get_db_infra),
|
|
1594
|
+
) -> ListWorkspacesResponse:
|
|
1595
|
+
"""
|
|
1596
|
+
List only currently online workspaces (active presence in Redis).
|
|
1597
|
+
|
|
1598
|
+
This is a filtered view showing workspaces with recent activity.
|
|
1599
|
+
Presence expires after ~5 minutes of inactivity.
|
|
1600
|
+
|
|
1601
|
+
For all registered workspaces (including offline), use GET /v1/workspaces.
|
|
1602
|
+
"""
|
|
1603
|
+
project_id = await get_project_from_auth(request, db_infra)
|
|
1604
|
+
|
|
1605
|
+
presences = await list_agent_presences(redis)
|
|
1606
|
+
|
|
1607
|
+
workspaces: List[WorkspaceInfo] = []
|
|
1608
|
+
for presence in presences:
|
|
1609
|
+
workspace_id = presence.get("workspace_id")
|
|
1610
|
+
alias = presence.get("alias")
|
|
1611
|
+
if not workspace_id or not alias:
|
|
1612
|
+
continue
|
|
1613
|
+
|
|
1614
|
+
if presence.get("project_id") != project_id:
|
|
1615
|
+
continue
|
|
1616
|
+
|
|
1617
|
+
# Filter by human_name if specified
|
|
1618
|
+
if human_name and presence.get("human_name") != human_name:
|
|
1619
|
+
continue
|
|
1620
|
+
|
|
1621
|
+
workspaces.append(
|
|
1622
|
+
WorkspaceInfo(
|
|
1623
|
+
workspace_id=workspace_id,
|
|
1624
|
+
alias=alias,
|
|
1625
|
+
human_name=presence.get("human_name"),
|
|
1626
|
+
project_slug=presence.get("project_slug"),
|
|
1627
|
+
program=presence.get("program"),
|
|
1628
|
+
model=presence.get("model"),
|
|
1629
|
+
repo=None, # Not stored in presence
|
|
1630
|
+
branch=presence.get("current_branch"),
|
|
1631
|
+
member_email=presence.get("member_email"),
|
|
1632
|
+
role=presence.get("role") or None,
|
|
1633
|
+
status=presence.get("status") or "unknown",
|
|
1634
|
+
last_seen=presence.get("last_seen") or "",
|
|
1635
|
+
)
|
|
1636
|
+
)
|
|
1637
|
+
|
|
1638
|
+
# Sort by last_seen descending (most recent first)
|
|
1639
|
+
workspaces.sort(key=lambda w: w.last_seen or "", reverse=True)
|
|
1640
|
+
|
|
1641
|
+
# /online endpoint returns all currently online workspaces (no pagination needed)
|
|
1642
|
+
return ListWorkspacesResponse(workspaces=workspaces, has_more=False)
|