beadhub 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- beadhub/__init__.py +12 -0
- beadhub/api.py +260 -0
- beadhub/auth.py +101 -0
- beadhub/aweb_context.py +65 -0
- beadhub/aweb_introspection.py +70 -0
- beadhub/beads_sync.py +514 -0
- beadhub/cli.py +330 -0
- beadhub/config.py +65 -0
- beadhub/db.py +129 -0
- beadhub/defaults/invariants/01-tracking-bdh-only.md +11 -0
- beadhub/defaults/invariants/02-communication-mail-first.md +36 -0
- beadhub/defaults/invariants/03-communication-chat.md +60 -0
- beadhub/defaults/invariants/04-identity-no-impersonation.md +17 -0
- beadhub/defaults/invariants/05-collaborate.md +12 -0
- beadhub/defaults/roles/backend.md +55 -0
- beadhub/defaults/roles/coordinator.md +44 -0
- beadhub/defaults/roles/frontend.md +77 -0
- beadhub/defaults/roles/implementer.md +73 -0
- beadhub/defaults/roles/reviewer.md +56 -0
- beadhub/defaults/roles/startup-expert.md +93 -0
- beadhub/defaults.py +262 -0
- beadhub/events.py +704 -0
- beadhub/internal_auth.py +121 -0
- beadhub/jsonl.py +68 -0
- beadhub/logging.py +62 -0
- beadhub/migrations/beads/001_initial.sql +70 -0
- beadhub/migrations/beads/002_search_indexes.sql +20 -0
- beadhub/migrations/server/001_initial.sql +279 -0
- beadhub/names.py +33 -0
- beadhub/notifications.py +275 -0
- beadhub/pagination.py +125 -0
- beadhub/presence.py +495 -0
- beadhub/rate_limit.py +152 -0
- beadhub/redis_client.py +11 -0
- beadhub/roles.py +35 -0
- beadhub/routes/__init__.py +1 -0
- beadhub/routes/agents.py +303 -0
- beadhub/routes/bdh.py +655 -0
- beadhub/routes/beads.py +778 -0
- beadhub/routes/claims.py +141 -0
- beadhub/routes/escalations.py +471 -0
- beadhub/routes/init.py +348 -0
- beadhub/routes/mcp.py +338 -0
- beadhub/routes/policies.py +833 -0
- beadhub/routes/repos.py +538 -0
- beadhub/routes/status.py +568 -0
- beadhub/routes/subscriptions.py +362 -0
- beadhub/routes/workspaces.py +1642 -0
- beadhub/workspace_config.py +202 -0
- beadhub-0.1.0.dist-info/METADATA +254 -0
- beadhub-0.1.0.dist-info/RECORD +54 -0
- beadhub-0.1.0.dist-info/WHEEL +4 -0
- beadhub-0.1.0.dist-info/entry_points.txt +2 -0
- beadhub-0.1.0.dist-info/licenses/LICENSE +21 -0
beadhub/routes/beads.py
ADDED
|
@@ -0,0 +1,778 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import json
|
|
4
|
+
import logging
|
|
5
|
+
from datetime import datetime
|
|
6
|
+
from typing import List, Optional
|
|
7
|
+
|
|
8
|
+
from fastapi import APIRouter, Body, Depends, HTTPException, Path, Query, Request
|
|
9
|
+
from pydantic import BaseModel, Field, field_validator
|
|
10
|
+
from redis.asyncio import Redis
|
|
11
|
+
|
|
12
|
+
from beadhub.auth import validate_workspace_id
|
|
13
|
+
from beadhub.aweb_context import resolve_aweb_identity
|
|
14
|
+
from beadhub.aweb_introspection import get_project_from_auth
|
|
15
|
+
|
|
16
|
+
from ..beads_sync import (
|
|
17
|
+
DEFAULT_BRANCH,
|
|
18
|
+
BeadsSyncResult,
|
|
19
|
+
_sync_issues_to_db,
|
|
20
|
+
is_valid_branch_name,
|
|
21
|
+
is_valid_canonical_origin,
|
|
22
|
+
validate_issues_from_list,
|
|
23
|
+
)
|
|
24
|
+
from ..db import DatabaseInfra, get_db_infra
|
|
25
|
+
from ..jsonl import JSONLParseError, parse_jsonl
|
|
26
|
+
from ..notifications import process_notification_outbox, record_notification_intents
|
|
27
|
+
from ..pagination import encode_cursor, validate_pagination_params
|
|
28
|
+
from ..redis_client import get_redis
|
|
29
|
+
|
|
30
|
+
logger = logging.getLogger(__name__)
|
|
31
|
+
|
|
32
|
+
VALID_ISSUE_TYPES = {"bug", "feature", "task", "epic", "chore"}
|
|
33
|
+
VALID_STATUSES = {"open", "in_progress", "closed"}
|
|
34
|
+
|
|
35
|
+
|
|
36
|
+
def _escape_like_pattern(s: str) -> str:
|
|
37
|
+
r"""Escape SQL LIKE metacharacters in user input.
|
|
38
|
+
|
|
39
|
+
Prevents user input containing %, _, or \ from being interpreted
|
|
40
|
+
as wildcards in LIKE/ILIKE patterns.
|
|
41
|
+
"""
|
|
42
|
+
return s.replace("\\", r"\\").replace("%", r"\%").replace("_", r"\_")
|
|
43
|
+
|
|
44
|
+
|
|
45
|
+
router = APIRouter(prefix="/v1/beads", tags=["beads"])
|
|
46
|
+
|
|
47
|
+
|
|
48
|
+
class BeadsUploadRequest(BaseModel):
|
|
49
|
+
"""Request body for uploading beads issues."""
|
|
50
|
+
|
|
51
|
+
repo: str = Field(
|
|
52
|
+
..., min_length=1, max_length=255, description="Canonical origin (e.g. github.com/org/repo)"
|
|
53
|
+
)
|
|
54
|
+
branch: Optional[str] = Field(None, description="Git branch name (default: 'main')")
|
|
55
|
+
issues: List[dict] = Field(..., description="List of issue objects to sync")
|
|
56
|
+
|
|
57
|
+
@field_validator("repo")
|
|
58
|
+
@classmethod
|
|
59
|
+
def validate_repo(cls, v: str) -> str:
|
|
60
|
+
if not is_valid_canonical_origin(v):
|
|
61
|
+
raise ValueError(
|
|
62
|
+
"Invalid repository: must be canonical origin format like github.com/org/repo"
|
|
63
|
+
)
|
|
64
|
+
return v
|
|
65
|
+
|
|
66
|
+
|
|
67
|
+
@router.post("/upload")
|
|
68
|
+
async def beads_upload(
|
|
69
|
+
request: Request,
|
|
70
|
+
payload: BeadsUploadRequest,
|
|
71
|
+
db_infra: DatabaseInfra = Depends(get_db_infra),
|
|
72
|
+
redis: Redis = Depends(get_redis),
|
|
73
|
+
) -> dict:
|
|
74
|
+
"""
|
|
75
|
+
Upload beads issues via JSON payload.
|
|
76
|
+
|
|
77
|
+
This endpoint accepts issues directly without requiring filesystem access,
|
|
78
|
+
making it suitable for deployments where the server doesn't have
|
|
79
|
+
access to the client's git repository.
|
|
80
|
+
|
|
81
|
+
Requires an authenticated project context.
|
|
82
|
+
"""
|
|
83
|
+
project_id = await get_project_from_auth(request, db_infra)
|
|
84
|
+
beads_db = db_infra.get_manager("beads")
|
|
85
|
+
server_db = db_infra.get_manager("server")
|
|
86
|
+
|
|
87
|
+
# repo is already validated by BeadsUploadRequest.validate_repo field_validator
|
|
88
|
+
|
|
89
|
+
# Apply defaults and validate branch
|
|
90
|
+
branch_name = payload.branch or DEFAULT_BRANCH
|
|
91
|
+
if not is_valid_branch_name(branch_name):
|
|
92
|
+
raise HTTPException(
|
|
93
|
+
status_code=422,
|
|
94
|
+
detail=f"Invalid branch name: {branch_name[:50]}",
|
|
95
|
+
)
|
|
96
|
+
|
|
97
|
+
# Validate issues
|
|
98
|
+
issues = validate_issues_from_list(payload.issues)
|
|
99
|
+
|
|
100
|
+
# Sync to database
|
|
101
|
+
result: BeadsSyncResult = await _sync_issues_to_db(
|
|
102
|
+
issues, beads_db, project_id=project_id, repo=payload.repo, branch=branch_name
|
|
103
|
+
)
|
|
104
|
+
|
|
105
|
+
# Record audit log
|
|
106
|
+
await server_db.execute(
|
|
107
|
+
"""
|
|
108
|
+
INSERT INTO {{tables.audit_log}} (
|
|
109
|
+
project_id,
|
|
110
|
+
event_type,
|
|
111
|
+
details
|
|
112
|
+
)
|
|
113
|
+
VALUES ($1, $2, $3::jsonb)
|
|
114
|
+
""",
|
|
115
|
+
project_id,
|
|
116
|
+
"beads_uploaded",
|
|
117
|
+
json.dumps(
|
|
118
|
+
{
|
|
119
|
+
"project_id": project_id,
|
|
120
|
+
"repo": payload.repo,
|
|
121
|
+
"branch": branch_name,
|
|
122
|
+
"issues_synced": result.issues_synced,
|
|
123
|
+
"issues_added": result.issues_added,
|
|
124
|
+
"issues_updated": result.issues_updated,
|
|
125
|
+
"source": "json",
|
|
126
|
+
}
|
|
127
|
+
),
|
|
128
|
+
)
|
|
129
|
+
|
|
130
|
+
# Record notification intents in outbox, then process them
|
|
131
|
+
# This ensures we have a record of what should be sent even if processing fails
|
|
132
|
+
notifications_sent = 0
|
|
133
|
+
notifications_failed = 0
|
|
134
|
+
if result.status_changes:
|
|
135
|
+
await record_notification_intents(result.status_changes, project_id, db_infra)
|
|
136
|
+
identity = await resolve_aweb_identity(request, db_infra)
|
|
137
|
+
notifications_sent, notifications_failed = await process_notification_outbox(
|
|
138
|
+
project_id,
|
|
139
|
+
db_infra,
|
|
140
|
+
sender_agent_id=identity.agent_id,
|
|
141
|
+
sender_alias=identity.alias,
|
|
142
|
+
)
|
|
143
|
+
|
|
144
|
+
return {
|
|
145
|
+
"status": "completed" if notifications_failed == 0 else "completed_with_errors",
|
|
146
|
+
"repo": payload.repo,
|
|
147
|
+
"branch": result.branch,
|
|
148
|
+
"issues_synced": result.issues_synced,
|
|
149
|
+
"issues_added": result.issues_added,
|
|
150
|
+
"issues_updated": result.issues_updated,
|
|
151
|
+
"conflicts": result.conflicts,
|
|
152
|
+
"conflicts_count": result.conflicts_count,
|
|
153
|
+
"notifications_sent": notifications_sent,
|
|
154
|
+
"notifications_failed": notifications_failed,
|
|
155
|
+
"synced_at": result.synced_at,
|
|
156
|
+
}
|
|
157
|
+
|
|
158
|
+
|
|
159
|
+
MAX_JSONL_SIZE = 10 * 1024 * 1024 # 10MB
|
|
160
|
+
MAX_ISSUES_COUNT = 10000 # Maximum issues per upload
|
|
161
|
+
MAX_JSON_DEPTH = 10 # Maximum nesting depth per issue
|
|
162
|
+
|
|
163
|
+
|
|
164
|
+
@router.post("/upload-jsonl")
|
|
165
|
+
async def beads_upload_jsonl(
|
|
166
|
+
request: Request,
|
|
167
|
+
repo: str = Query(..., min_length=1, max_length=255, description="Repository name"),
|
|
168
|
+
branch: Optional[str] = Query(None, description="Git branch name (default: 'main')"),
|
|
169
|
+
body: str = Body(..., media_type="text/plain", max_length=MAX_JSONL_SIZE),
|
|
170
|
+
db_infra: DatabaseInfra = Depends(get_db_infra),
|
|
171
|
+
redis: Redis = Depends(get_redis),
|
|
172
|
+
) -> dict:
|
|
173
|
+
"""
|
|
174
|
+
Upload beads issues via raw JSONL content.
|
|
175
|
+
|
|
176
|
+
Accepts the raw content of a .beads/issues.jsonl file directly.
|
|
177
|
+
Each line should be a valid JSON object representing an issue.
|
|
178
|
+
Empty lines are skipped.
|
|
179
|
+
|
|
180
|
+
Limits:
|
|
181
|
+
- Maximum body size: 10MB
|
|
182
|
+
- Maximum issues per upload: 10,000
|
|
183
|
+
- Maximum JSON nesting depth: 10 levels
|
|
184
|
+
|
|
185
|
+
This enables shell scripts to upload without jq dependency.
|
|
186
|
+
|
|
187
|
+
Requires an authenticated project context.
|
|
188
|
+
"""
|
|
189
|
+
project_id = await get_project_from_auth(request, db_infra)
|
|
190
|
+
beads_db = db_infra.get_manager("beads")
|
|
191
|
+
server_db = db_infra.get_manager("server")
|
|
192
|
+
|
|
193
|
+
# Validate repo (canonical origin format like github.com/org/repo)
|
|
194
|
+
if not is_valid_canonical_origin(repo):
|
|
195
|
+
raise HTTPException(
|
|
196
|
+
status_code=422,
|
|
197
|
+
detail=f"Invalid repo: {repo[:50]}",
|
|
198
|
+
)
|
|
199
|
+
|
|
200
|
+
# Apply defaults and validate branch
|
|
201
|
+
branch_name = branch or DEFAULT_BRANCH
|
|
202
|
+
if not is_valid_branch_name(branch_name):
|
|
203
|
+
raise HTTPException(
|
|
204
|
+
status_code=422,
|
|
205
|
+
detail=f"Invalid branch name: {branch_name[:50]}",
|
|
206
|
+
)
|
|
207
|
+
|
|
208
|
+
# Parse JSONL into list of issues (validates count and depth incrementally)
|
|
209
|
+
try:
|
|
210
|
+
issues_raw = parse_jsonl(body, max_depth=MAX_JSON_DEPTH, max_count=MAX_ISSUES_COUNT)
|
|
211
|
+
except JSONLParseError as e:
|
|
212
|
+
raise HTTPException(status_code=400, detail=str(e)) from e
|
|
213
|
+
|
|
214
|
+
# Validate issues
|
|
215
|
+
issues = validate_issues_from_list(issues_raw)
|
|
216
|
+
|
|
217
|
+
# Sync to database
|
|
218
|
+
result: BeadsSyncResult = await _sync_issues_to_db(
|
|
219
|
+
issues, beads_db, project_id=project_id, repo=repo, branch=branch_name
|
|
220
|
+
)
|
|
221
|
+
|
|
222
|
+
# Record audit log
|
|
223
|
+
await server_db.execute(
|
|
224
|
+
"""
|
|
225
|
+
INSERT INTO {{tables.audit_log}} (
|
|
226
|
+
project_id,
|
|
227
|
+
event_type,
|
|
228
|
+
details
|
|
229
|
+
)
|
|
230
|
+
VALUES ($1, $2, $3::jsonb)
|
|
231
|
+
""",
|
|
232
|
+
project_id,
|
|
233
|
+
"beads_uploaded",
|
|
234
|
+
json.dumps(
|
|
235
|
+
{
|
|
236
|
+
"project_id": project_id,
|
|
237
|
+
"repo": repo,
|
|
238
|
+
"branch": branch_name,
|
|
239
|
+
"issues_synced": result.issues_synced,
|
|
240
|
+
"issues_added": result.issues_added,
|
|
241
|
+
"issues_updated": result.issues_updated,
|
|
242
|
+
"source": "jsonl",
|
|
243
|
+
}
|
|
244
|
+
),
|
|
245
|
+
)
|
|
246
|
+
|
|
247
|
+
# Record notification intents in outbox, then process them
|
|
248
|
+
# This ensures we have a record of what should be sent even if processing fails
|
|
249
|
+
notifications_sent = 0
|
|
250
|
+
notifications_failed = 0
|
|
251
|
+
if result.status_changes:
|
|
252
|
+
await record_notification_intents(result.status_changes, project_id, db_infra)
|
|
253
|
+
identity = await resolve_aweb_identity(request, db_infra)
|
|
254
|
+
notifications_sent, notifications_failed = await process_notification_outbox(
|
|
255
|
+
project_id,
|
|
256
|
+
db_infra,
|
|
257
|
+
sender_agent_id=identity.agent_id,
|
|
258
|
+
sender_alias=identity.alias,
|
|
259
|
+
)
|
|
260
|
+
|
|
261
|
+
return {
|
|
262
|
+
"status": "completed" if notifications_failed == 0 else "completed_with_errors",
|
|
263
|
+
"repo": repo,
|
|
264
|
+
"branch": result.branch,
|
|
265
|
+
"issues_synced": result.issues_synced,
|
|
266
|
+
"issues_added": result.issues_added,
|
|
267
|
+
"issues_updated": result.issues_updated,
|
|
268
|
+
"conflicts": result.conflicts,
|
|
269
|
+
"conflicts_count": result.conflicts_count,
|
|
270
|
+
"notifications_sent": notifications_sent,
|
|
271
|
+
"notifications_failed": notifications_failed,
|
|
272
|
+
"synced_at": result.synced_at,
|
|
273
|
+
}
|
|
274
|
+
|
|
275
|
+
|
|
276
|
+
@router.get("/issues")
|
|
277
|
+
async def beads_issues(
|
|
278
|
+
request: Request,
|
|
279
|
+
repo: Optional[str] = Query(
|
|
280
|
+
None,
|
|
281
|
+
max_length=255,
|
|
282
|
+
description="Filter by repo (canonical origin, e.g. github.com/org/repo)",
|
|
283
|
+
),
|
|
284
|
+
branch: Optional[str] = Query(None, max_length=255, description="Filter by branch name"),
|
|
285
|
+
status: Optional[str] = Query(
|
|
286
|
+
None,
|
|
287
|
+
description="Filter by status (open, in_progress, closed). Supports comma-separated values.",
|
|
288
|
+
),
|
|
289
|
+
assignee: Optional[str] = Query(None),
|
|
290
|
+
created_by: Optional[str] = Query(None, max_length=255, description="Filter by creator"),
|
|
291
|
+
label: Optional[str] = Query(None),
|
|
292
|
+
type: Optional[str] = Query(
|
|
293
|
+
None, description="Filter by issue type (bug, feature, task, epic, chore)"
|
|
294
|
+
),
|
|
295
|
+
q: Optional[str] = Query(
|
|
296
|
+
None,
|
|
297
|
+
max_length=255,
|
|
298
|
+
description="Search by bead_id (prefix) or title (substring, case-insensitive)",
|
|
299
|
+
),
|
|
300
|
+
limit: int = Query(50, gt=0, le=200, description="Maximum items to return (1-200)"),
|
|
301
|
+
cursor: Optional[str] = Query(None, description="Pagination cursor from previous response"),
|
|
302
|
+
db_infra: DatabaseInfra = Depends(get_db_infra),
|
|
303
|
+
) -> dict:
|
|
304
|
+
"""
|
|
305
|
+
List synced Beads issues from Postgres, enriched with simple reservation info.
|
|
306
|
+
|
|
307
|
+
Supports filtering by repo, branch, status, assignee, created_by, label, type.
|
|
308
|
+
Supports search via `q` parameter: matches bead_id prefix or title substring.
|
|
309
|
+
Supports cursor-based pagination with limit/cursor parameters.
|
|
310
|
+
|
|
311
|
+
Requires an authenticated project context.
|
|
312
|
+
|
|
313
|
+
Returns:
|
|
314
|
+
issues: List of issues for current page
|
|
315
|
+
count: Number of issues in current page
|
|
316
|
+
has_more: True if more results exist
|
|
317
|
+
next_cursor: Cursor for fetching next page (null if no more)
|
|
318
|
+
synced_at: Timestamp of last sync (may be null)
|
|
319
|
+
"""
|
|
320
|
+
|
|
321
|
+
project_id = await get_project_from_auth(request, db_infra)
|
|
322
|
+
db = db_infra.get_manager("beads")
|
|
323
|
+
|
|
324
|
+
# Validate pagination params
|
|
325
|
+
try:
|
|
326
|
+
validated_limit, cursor_data = validate_pagination_params(limit, cursor)
|
|
327
|
+
except ValueError as e:
|
|
328
|
+
raise HTTPException(status_code=422, detail=str(e))
|
|
329
|
+
|
|
330
|
+
# Validate optional filters
|
|
331
|
+
if repo and not is_valid_canonical_origin(repo):
|
|
332
|
+
raise HTTPException(
|
|
333
|
+
status_code=422,
|
|
334
|
+
detail=f"Invalid repo: {repo[:50]}",
|
|
335
|
+
)
|
|
336
|
+
if branch and not is_valid_branch_name(branch):
|
|
337
|
+
raise HTTPException(
|
|
338
|
+
status_code=422,
|
|
339
|
+
detail=f"Invalid branch name: {branch[:50]}",
|
|
340
|
+
)
|
|
341
|
+
if type and type not in VALID_ISSUE_TYPES:
|
|
342
|
+
raise HTTPException(
|
|
343
|
+
status_code=422,
|
|
344
|
+
detail=f"Invalid issue type: {type}. Must be one of: {', '.join(sorted(VALID_ISSUE_TYPES))}",
|
|
345
|
+
)
|
|
346
|
+
|
|
347
|
+
# Always filter by project_id for tenant isolation
|
|
348
|
+
conditions: List[str] = ["project_id = $1"]
|
|
349
|
+
params: List[object] = [project_id]
|
|
350
|
+
param_idx = 2
|
|
351
|
+
|
|
352
|
+
if repo:
|
|
353
|
+
conditions.append(f"repo = ${param_idx}")
|
|
354
|
+
params.append(repo)
|
|
355
|
+
param_idx += 1
|
|
356
|
+
if branch:
|
|
357
|
+
conditions.append(f"branch = ${param_idx}")
|
|
358
|
+
params.append(branch)
|
|
359
|
+
param_idx += 1
|
|
360
|
+
if status:
|
|
361
|
+
status_list = [s.strip() for s in status.split(",") if s.strip()]
|
|
362
|
+
if status_list:
|
|
363
|
+
invalid_statuses = [s for s in status_list if s not in VALID_STATUSES]
|
|
364
|
+
if invalid_statuses:
|
|
365
|
+
raise HTTPException(
|
|
366
|
+
status_code=422,
|
|
367
|
+
detail=f"Invalid status: {', '.join(invalid_statuses)}. Must be one of: {', '.join(sorted(VALID_STATUSES))}",
|
|
368
|
+
)
|
|
369
|
+
if len(status_list) == 1:
|
|
370
|
+
conditions.append(f"status = ${param_idx}")
|
|
371
|
+
params.append(status_list[0])
|
|
372
|
+
else:
|
|
373
|
+
conditions.append(f"status = ANY(${param_idx})")
|
|
374
|
+
params.append(status_list)
|
|
375
|
+
param_idx += 1
|
|
376
|
+
if assignee:
|
|
377
|
+
conditions.append(f"assignee = ${param_idx}")
|
|
378
|
+
params.append(assignee)
|
|
379
|
+
param_idx += 1
|
|
380
|
+
if created_by:
|
|
381
|
+
conditions.append(f"created_by = ${param_idx}")
|
|
382
|
+
params.append(created_by)
|
|
383
|
+
param_idx += 1
|
|
384
|
+
if label:
|
|
385
|
+
conditions.append(f"${param_idx} = ANY(labels)")
|
|
386
|
+
params.append(label)
|
|
387
|
+
param_idx += 1
|
|
388
|
+
if type:
|
|
389
|
+
conditions.append(f"issue_type = ${param_idx}")
|
|
390
|
+
params.append(type)
|
|
391
|
+
param_idx += 1
|
|
392
|
+
if q:
|
|
393
|
+
# Search: bead_id prefix match OR title case-insensitive substring
|
|
394
|
+
# Escape LIKE metacharacters (%, _, \) to prevent unintended wildcards
|
|
395
|
+
escaped_q = _escape_like_pattern(q)
|
|
396
|
+
conditions.append(
|
|
397
|
+
f"(bead_id ILIKE ${param_idx} ESCAPE '\\' OR title ILIKE ${param_idx + 1} ESCAPE '\\')"
|
|
398
|
+
)
|
|
399
|
+
params.append(f"{escaped_q}%") # bead_id prefix
|
|
400
|
+
params.append(f"%{escaped_q}%") # title substring
|
|
401
|
+
param_idx += 2
|
|
402
|
+
|
|
403
|
+
base_query = """
|
|
404
|
+
SELECT bead_id,
|
|
405
|
+
repo,
|
|
406
|
+
branch,
|
|
407
|
+
title,
|
|
408
|
+
status,
|
|
409
|
+
priority,
|
|
410
|
+
issue_type,
|
|
411
|
+
assignee,
|
|
412
|
+
created_by,
|
|
413
|
+
labels,
|
|
414
|
+
blocked_by,
|
|
415
|
+
parent_id,
|
|
416
|
+
updated_at,
|
|
417
|
+
synced_at
|
|
418
|
+
FROM {{tables.beads_issues}}
|
|
419
|
+
"""
|
|
420
|
+
|
|
421
|
+
# conditions always has at least project_id
|
|
422
|
+
base_query += " WHERE " + " AND ".join(conditions)
|
|
423
|
+
|
|
424
|
+
# Apply cursor condition AFTER all filters (filters narrow, cursor paginates)
|
|
425
|
+
# ORDER BY: COALESCE(updated_at, synced_at) DESC, priority ASC, bead_id ASC
|
|
426
|
+
# "After" cursor means: smaller sort_time, OR same time with larger (priority, bead_id)
|
|
427
|
+
if cursor_data:
|
|
428
|
+
cursor_sort_time_str = cursor_data.get("sort_time")
|
|
429
|
+
cursor_priority = cursor_data.get("priority")
|
|
430
|
+
cursor_bead_id = cursor_data.get("bead_id")
|
|
431
|
+
|
|
432
|
+
# All three fields must be present together or none
|
|
433
|
+
has_any = any(
|
|
434
|
+
x is not None for x in [cursor_sort_time_str, cursor_priority, cursor_bead_id]
|
|
435
|
+
)
|
|
436
|
+
has_all = all(
|
|
437
|
+
x is not None for x in [cursor_sort_time_str, cursor_priority, cursor_bead_id]
|
|
438
|
+
)
|
|
439
|
+
if has_any and not has_all:
|
|
440
|
+
raise HTTPException(
|
|
441
|
+
status_code=422,
|
|
442
|
+
detail="Invalid cursor: incomplete sort key (missing sort_time, priority, or bead_id)",
|
|
443
|
+
)
|
|
444
|
+
|
|
445
|
+
if has_all:
|
|
446
|
+
# Parse ISO timestamp string to datetime (asyncpg requires datetime objects)
|
|
447
|
+
# Type assertion: has_all guarantees cursor_sort_time_str is not None
|
|
448
|
+
assert isinstance(cursor_sort_time_str, str)
|
|
449
|
+
try:
|
|
450
|
+
cursor_sort_time = datetime.fromisoformat(cursor_sort_time_str)
|
|
451
|
+
except (ValueError, TypeError) as e:
|
|
452
|
+
raise HTTPException(status_code=422, detail=f"Invalid cursor: bad timestamp ({e})")
|
|
453
|
+
# Each $N needs its own param slot (positional, not named)
|
|
454
|
+
base_query += f""" AND (
|
|
455
|
+
COALESCE(updated_at, synced_at) < ${param_idx}
|
|
456
|
+
OR (
|
|
457
|
+
COALESCE(updated_at, synced_at) = ${param_idx + 1}
|
|
458
|
+
AND (priority, bead_id) > (${param_idx + 2}, ${param_idx + 3})
|
|
459
|
+
)
|
|
460
|
+
)"""
|
|
461
|
+
params.append(cursor_sort_time) # for < comparison
|
|
462
|
+
params.append(cursor_sort_time) # for = comparison
|
|
463
|
+
params.append(cursor_priority)
|
|
464
|
+
params.append(cursor_bead_id)
|
|
465
|
+
param_idx += 4
|
|
466
|
+
|
|
467
|
+
base_query += (
|
|
468
|
+
f" ORDER BY COALESCE(updated_at, synced_at) DESC, priority ASC, bead_id ASC"
|
|
469
|
+
f" LIMIT ${param_idx}"
|
|
470
|
+
)
|
|
471
|
+
# Fetch limit+1 to detect if there are more results
|
|
472
|
+
params.append(validated_limit + 1)
|
|
473
|
+
|
|
474
|
+
rows = await db.fetch_all(base_query, *params)
|
|
475
|
+
|
|
476
|
+
# Check if there are more results beyond this page
|
|
477
|
+
has_more = len(rows) > validated_limit
|
|
478
|
+
rows = rows[:validated_limit] # Trim to requested limit
|
|
479
|
+
|
|
480
|
+
issues: list[dict] = []
|
|
481
|
+
for row in rows:
|
|
482
|
+
bead_id = row["bead_id"]
|
|
483
|
+
# Reservation enrichment: simple scan for reservations with matching bead_id in reason.
|
|
484
|
+
reservations: list[dict] = []
|
|
485
|
+
# For now, we do not implement full reservation scanning here to keep
|
|
486
|
+
# implementation minimal; current_reservation will be null.
|
|
487
|
+
current_reservation = None
|
|
488
|
+
|
|
489
|
+
# asyncpg returns JSONB as strings (no auto-deserialization by default)
|
|
490
|
+
blocked_by = row["blocked_by"]
|
|
491
|
+
if isinstance(blocked_by, str):
|
|
492
|
+
blocked_by = json.loads(blocked_by)
|
|
493
|
+
|
|
494
|
+
parent_id = row["parent_id"]
|
|
495
|
+
if isinstance(parent_id, str):
|
|
496
|
+
parent_id = json.loads(parent_id)
|
|
497
|
+
|
|
498
|
+
issues.append(
|
|
499
|
+
{
|
|
500
|
+
"bead_id": bead_id,
|
|
501
|
+
"repo": row["repo"],
|
|
502
|
+
"branch": row["branch"],
|
|
503
|
+
"title": row["title"],
|
|
504
|
+
"status": row["status"],
|
|
505
|
+
"priority": row["priority"],
|
|
506
|
+
"type": row["issue_type"],
|
|
507
|
+
"assignee": row["assignee"],
|
|
508
|
+
"created_by": row["created_by"],
|
|
509
|
+
"labels": row["labels"],
|
|
510
|
+
"blocked_by": blocked_by,
|
|
511
|
+
"parent_id": parent_id,
|
|
512
|
+
"current_reservation": current_reservation,
|
|
513
|
+
"reservations": reservations,
|
|
514
|
+
}
|
|
515
|
+
)
|
|
516
|
+
|
|
517
|
+
# Generate next_cursor from last row if there are more results
|
|
518
|
+
next_cursor = None
|
|
519
|
+
if has_more and rows:
|
|
520
|
+
last_row = rows[-1]
|
|
521
|
+
# Cursor encodes the sort key values for the last item
|
|
522
|
+
sort_time = last_row["updated_at"] or last_row["synced_at"]
|
|
523
|
+
next_cursor = encode_cursor(
|
|
524
|
+
{
|
|
525
|
+
"sort_time": sort_time.isoformat() if sort_time else None,
|
|
526
|
+
"priority": last_row["priority"],
|
|
527
|
+
"bead_id": last_row["bead_id"],
|
|
528
|
+
}
|
|
529
|
+
)
|
|
530
|
+
|
|
531
|
+
return {
|
|
532
|
+
"issues": issues,
|
|
533
|
+
"count": len(issues),
|
|
534
|
+
"has_more": has_more,
|
|
535
|
+
"next_cursor": next_cursor,
|
|
536
|
+
"synced_at": None,
|
|
537
|
+
}
|
|
538
|
+
|
|
539
|
+
|
|
540
|
+
@router.get("/issues/{bead_id}")
|
|
541
|
+
async def get_issue_by_bead_id(
|
|
542
|
+
request: Request,
|
|
543
|
+
bead_id: str = Path(..., min_length=1, max_length=255),
|
|
544
|
+
repo: Optional[str] = Query(
|
|
545
|
+
None,
|
|
546
|
+
max_length=255,
|
|
547
|
+
description="Filter by repo (canonical origin) for O(1) indexed lookup",
|
|
548
|
+
),
|
|
549
|
+
branch: Optional[str] = Query(
|
|
550
|
+
None, max_length=255, description="Filter by branch name for O(1) indexed lookup"
|
|
551
|
+
),
|
|
552
|
+
db_infra: DatabaseInfra = Depends(get_db_infra),
|
|
553
|
+
) -> dict:
|
|
554
|
+
"""
|
|
555
|
+
Get a single issue by its bead_id.
|
|
556
|
+
|
|
557
|
+
When repo and branch are provided, uses the unique index
|
|
558
|
+
(project_id, repo, branch, bead_id) for O(1) lookup.
|
|
559
|
+
|
|
560
|
+
When repo/branch are omitted, falls back to O(log N) lookup and returns
|
|
561
|
+
the alphabetically first match by repo, then branch.
|
|
562
|
+
|
|
563
|
+
Requires an authenticated project context.
|
|
564
|
+
"""
|
|
565
|
+
project_id = await get_project_from_auth(request, db_infra)
|
|
566
|
+
db = db_infra.get_manager("beads")
|
|
567
|
+
|
|
568
|
+
# Validate repo/branch format if provided
|
|
569
|
+
if repo is not None and not is_valid_canonical_origin(repo):
|
|
570
|
+
raise HTTPException(status_code=422, detail=f"Invalid repo: {repo}")
|
|
571
|
+
if branch is not None and not is_valid_branch_name(branch):
|
|
572
|
+
raise HTTPException(status_code=422, detail=f"Invalid branch name: {branch}")
|
|
573
|
+
|
|
574
|
+
if repo is not None and branch is not None:
|
|
575
|
+
# O(1) indexed lookup using unique index
|
|
576
|
+
row = await db.fetch_one(
|
|
577
|
+
"""
|
|
578
|
+
SELECT bead_id,
|
|
579
|
+
repo,
|
|
580
|
+
branch,
|
|
581
|
+
title,
|
|
582
|
+
description,
|
|
583
|
+
status,
|
|
584
|
+
priority,
|
|
585
|
+
issue_type,
|
|
586
|
+
assignee,
|
|
587
|
+
created_by,
|
|
588
|
+
labels,
|
|
589
|
+
blocked_by,
|
|
590
|
+
parent_id
|
|
591
|
+
FROM {{tables.beads_issues}}
|
|
592
|
+
WHERE project_id = $1 AND repo = $2 AND branch = $3 AND bead_id = $4
|
|
593
|
+
""",
|
|
594
|
+
project_id,
|
|
595
|
+
repo,
|
|
596
|
+
branch,
|
|
597
|
+
bead_id,
|
|
598
|
+
)
|
|
599
|
+
else:
|
|
600
|
+
# Fallback: scan by project_id + bead_id, return first match
|
|
601
|
+
row = await db.fetch_one(
|
|
602
|
+
"""
|
|
603
|
+
SELECT bead_id,
|
|
604
|
+
repo,
|
|
605
|
+
branch,
|
|
606
|
+
title,
|
|
607
|
+
description,
|
|
608
|
+
status,
|
|
609
|
+
priority,
|
|
610
|
+
issue_type,
|
|
611
|
+
assignee,
|
|
612
|
+
created_by,
|
|
613
|
+
labels,
|
|
614
|
+
blocked_by,
|
|
615
|
+
parent_id
|
|
616
|
+
FROM {{tables.beads_issues}}
|
|
617
|
+
WHERE project_id = $1 AND bead_id = $2
|
|
618
|
+
ORDER BY repo ASC, branch ASC
|
|
619
|
+
LIMIT 1
|
|
620
|
+
""",
|
|
621
|
+
project_id,
|
|
622
|
+
bead_id,
|
|
623
|
+
)
|
|
624
|
+
|
|
625
|
+
if row is None:
|
|
626
|
+
raise HTTPException(status_code=404, detail="Issue not found")
|
|
627
|
+
|
|
628
|
+
blocked_by = row["blocked_by"]
|
|
629
|
+
if isinstance(blocked_by, str):
|
|
630
|
+
blocked_by = json.loads(blocked_by)
|
|
631
|
+
|
|
632
|
+
parent_id = row["parent_id"]
|
|
633
|
+
if isinstance(parent_id, str):
|
|
634
|
+
parent_id = json.loads(parent_id)
|
|
635
|
+
|
|
636
|
+
return {
|
|
637
|
+
"bead_id": row["bead_id"],
|
|
638
|
+
"project_id": project_id,
|
|
639
|
+
"repo": row["repo"],
|
|
640
|
+
"branch": row["branch"],
|
|
641
|
+
"title": row["title"],
|
|
642
|
+
"description": row["description"],
|
|
643
|
+
"status": row["status"],
|
|
644
|
+
"priority": row["priority"],
|
|
645
|
+
"type": row["issue_type"],
|
|
646
|
+
"assignee": row["assignee"],
|
|
647
|
+
"created_by": row["created_by"],
|
|
648
|
+
"labels": row["labels"],
|
|
649
|
+
"blocked_by": blocked_by,
|
|
650
|
+
"parent_id": parent_id,
|
|
651
|
+
"current_reservation": None,
|
|
652
|
+
}
|
|
653
|
+
|
|
654
|
+
|
|
655
|
+
@router.get("/ready")
|
|
656
|
+
async def beads_ready(
|
|
657
|
+
request: Request,
|
|
658
|
+
workspace_id: str = Query(..., min_length=1),
|
|
659
|
+
repo: Optional[str] = Query(
|
|
660
|
+
None,
|
|
661
|
+
max_length=255,
|
|
662
|
+
description="Filter by repo (canonical origin, e.g. github.com/org/repo)",
|
|
663
|
+
),
|
|
664
|
+
branch: Optional[str] = Query(None, max_length=255, description="Filter by branch name"),
|
|
665
|
+
limit: int = Query(10, gt=0),
|
|
666
|
+
db_infra: DatabaseInfra = Depends(get_db_infra),
|
|
667
|
+
) -> dict:
|
|
668
|
+
"""
|
|
669
|
+
Get issues that are ready to work on (open, unblocked, not reserved).
|
|
670
|
+
|
|
671
|
+
Returns ready issues across all repos, or filtered by repo if specified.
|
|
672
|
+
|
|
673
|
+
Requires an authenticated project context.
|
|
674
|
+
|
|
675
|
+
An issue is ready if:
|
|
676
|
+
- status = 'open'
|
|
677
|
+
- All blockers in blocked_by are either closed or don't exist in DB
|
|
678
|
+
(supports cross-repo dependencies)
|
|
679
|
+
"""
|
|
680
|
+
# Validate workspace_id
|
|
681
|
+
try:
|
|
682
|
+
validate_workspace_id(workspace_id)
|
|
683
|
+
except ValueError as e:
|
|
684
|
+
raise HTTPException(status_code=422, detail=str(e))
|
|
685
|
+
|
|
686
|
+
# Validate optional filters
|
|
687
|
+
if repo and not is_valid_canonical_origin(repo):
|
|
688
|
+
raise HTTPException(
|
|
689
|
+
status_code=422,
|
|
690
|
+
detail=f"Invalid repo: {repo[:50]}",
|
|
691
|
+
)
|
|
692
|
+
if branch and not is_valid_branch_name(branch):
|
|
693
|
+
raise HTTPException(
|
|
694
|
+
status_code=422,
|
|
695
|
+
detail=f"Invalid branch name: {branch[:50]}",
|
|
696
|
+
)
|
|
697
|
+
|
|
698
|
+
project_id = await get_project_from_auth(request, db_infra)
|
|
699
|
+
db = db_infra.get_manager("beads")
|
|
700
|
+
|
|
701
|
+
# Build WHERE conditions - always filter by project_id for tenant isolation
|
|
702
|
+
conditions = [
|
|
703
|
+
"i.project_id = $1",
|
|
704
|
+
"i.status = 'open'",
|
|
705
|
+
# No open or missing blockers exist for this issue.
|
|
706
|
+
# Uses LEFT JOIN so missing blockers (not yet synced) are treated as blocking.
|
|
707
|
+
# An issue is ready only if ALL blockers exist in DB AND are closed.
|
|
708
|
+
# blocked_by is JSONB array of {repo, branch, bead_id} objects.
|
|
709
|
+
# Note: blockers must also be in same project for cross-project isolation
|
|
710
|
+
"""NOT EXISTS (
|
|
711
|
+
SELECT 1
|
|
712
|
+
FROM jsonb_array_elements(i.blocked_by) AS blocker
|
|
713
|
+
LEFT JOIN {{tables.beads_issues}} b ON
|
|
714
|
+
b.project_id = i.project_id AND
|
|
715
|
+
b.repo = blocker->>'repo' AND
|
|
716
|
+
b.branch = blocker->>'branch' AND
|
|
717
|
+
b.bead_id = blocker->>'bead_id'
|
|
718
|
+
WHERE b.bead_id IS NULL OR b.status != 'closed'
|
|
719
|
+
)""",
|
|
720
|
+
]
|
|
721
|
+
params: List[object] = [project_id]
|
|
722
|
+
param_idx = 2
|
|
723
|
+
|
|
724
|
+
if repo:
|
|
725
|
+
conditions.append(f"i.repo = ${param_idx}")
|
|
726
|
+
params.append(repo)
|
|
727
|
+
param_idx += 1
|
|
728
|
+
if branch:
|
|
729
|
+
conditions.append(f"i.branch = ${param_idx}")
|
|
730
|
+
params.append(branch)
|
|
731
|
+
param_idx += 1
|
|
732
|
+
|
|
733
|
+
base_query = f"""
|
|
734
|
+
SELECT i.bead_id,
|
|
735
|
+
i.repo,
|
|
736
|
+
i.branch,
|
|
737
|
+
i.title,
|
|
738
|
+
i.status,
|
|
739
|
+
i.priority,
|
|
740
|
+
i.issue_type,
|
|
741
|
+
i.blocked_by
|
|
742
|
+
FROM {{{{tables.beads_issues}}}} i
|
|
743
|
+
WHERE {" AND ".join(conditions)}
|
|
744
|
+
ORDER BY i.priority ASC, i.bead_id ASC
|
|
745
|
+
LIMIT ${param_idx}
|
|
746
|
+
"""
|
|
747
|
+
params.append(limit)
|
|
748
|
+
|
|
749
|
+
rows = await db.fetch_all(base_query, *params)
|
|
750
|
+
|
|
751
|
+
ready = []
|
|
752
|
+
for row in rows:
|
|
753
|
+
bead_id = row["bead_id"]
|
|
754
|
+
# For now, we ignore Redis reservation state and assume no reservations.
|
|
755
|
+
|
|
756
|
+
# asyncpg returns JSONB as strings (no auto-deserialization by default)
|
|
757
|
+
blocked_by = row["blocked_by"]
|
|
758
|
+
if isinstance(blocked_by, str):
|
|
759
|
+
blocked_by = json.loads(blocked_by)
|
|
760
|
+
|
|
761
|
+
ready.append(
|
|
762
|
+
{
|
|
763
|
+
"bead_id": bead_id,
|
|
764
|
+
"repo": row["repo"],
|
|
765
|
+
"branch": row["branch"],
|
|
766
|
+
"title": row["title"],
|
|
767
|
+
"status": row["status"],
|
|
768
|
+
"priority": row["priority"],
|
|
769
|
+
"type": row["issue_type"],
|
|
770
|
+
"blocked_by": blocked_by,
|
|
771
|
+
"current_reservation": None,
|
|
772
|
+
}
|
|
773
|
+
)
|
|
774
|
+
|
|
775
|
+
return {
|
|
776
|
+
"issues": ready,
|
|
777
|
+
"count": len(ready),
|
|
778
|
+
}
|