beadhub 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- beadhub/__init__.py +12 -0
- beadhub/api.py +260 -0
- beadhub/auth.py +101 -0
- beadhub/aweb_context.py +65 -0
- beadhub/aweb_introspection.py +70 -0
- beadhub/beads_sync.py +514 -0
- beadhub/cli.py +330 -0
- beadhub/config.py +65 -0
- beadhub/db.py +129 -0
- beadhub/defaults/invariants/01-tracking-bdh-only.md +11 -0
- beadhub/defaults/invariants/02-communication-mail-first.md +36 -0
- beadhub/defaults/invariants/03-communication-chat.md +60 -0
- beadhub/defaults/invariants/04-identity-no-impersonation.md +17 -0
- beadhub/defaults/invariants/05-collaborate.md +12 -0
- beadhub/defaults/roles/backend.md +55 -0
- beadhub/defaults/roles/coordinator.md +44 -0
- beadhub/defaults/roles/frontend.md +77 -0
- beadhub/defaults/roles/implementer.md +73 -0
- beadhub/defaults/roles/reviewer.md +56 -0
- beadhub/defaults/roles/startup-expert.md +93 -0
- beadhub/defaults.py +262 -0
- beadhub/events.py +704 -0
- beadhub/internal_auth.py +121 -0
- beadhub/jsonl.py +68 -0
- beadhub/logging.py +62 -0
- beadhub/migrations/beads/001_initial.sql +70 -0
- beadhub/migrations/beads/002_search_indexes.sql +20 -0
- beadhub/migrations/server/001_initial.sql +279 -0
- beadhub/names.py +33 -0
- beadhub/notifications.py +275 -0
- beadhub/pagination.py +125 -0
- beadhub/presence.py +495 -0
- beadhub/rate_limit.py +152 -0
- beadhub/redis_client.py +11 -0
- beadhub/roles.py +35 -0
- beadhub/routes/__init__.py +1 -0
- beadhub/routes/agents.py +303 -0
- beadhub/routes/bdh.py +655 -0
- beadhub/routes/beads.py +778 -0
- beadhub/routes/claims.py +141 -0
- beadhub/routes/escalations.py +471 -0
- beadhub/routes/init.py +348 -0
- beadhub/routes/mcp.py +338 -0
- beadhub/routes/policies.py +833 -0
- beadhub/routes/repos.py +538 -0
- beadhub/routes/status.py +568 -0
- beadhub/routes/subscriptions.py +362 -0
- beadhub/routes/workspaces.py +1642 -0
- beadhub/workspace_config.py +202 -0
- beadhub-0.1.0.dist-info/METADATA +254 -0
- beadhub-0.1.0.dist-info/RECORD +54 -0
- beadhub-0.1.0.dist-info/WHEEL +4 -0
- beadhub-0.1.0.dist-info/entry_points.txt +2 -0
- beadhub-0.1.0.dist-info/licenses/LICENSE +21 -0
beadhub/internal_auth.py
ADDED
|
@@ -0,0 +1,121 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import hashlib
|
|
4
|
+
import hmac
|
|
5
|
+
import logging
|
|
6
|
+
import os
|
|
7
|
+
import uuid
|
|
8
|
+
from typing import Optional, TypedDict
|
|
9
|
+
|
|
10
|
+
from fastapi import HTTPException, Request
|
|
11
|
+
|
|
12
|
+
logger = logging.getLogger(__name__)
|
|
13
|
+
|
|
14
|
+
INTERNAL_AUTH_HEADER = "X-BH-Auth"
|
|
15
|
+
INTERNAL_PROJECT_HEADER = "X-Project-ID"
|
|
16
|
+
INTERNAL_USER_HEADER = "X-User-ID"
|
|
17
|
+
INTERNAL_API_KEY_ID_HEADER = "X-API-Key"
|
|
18
|
+
INTERNAL_ACTOR_ID_HEADER = "X-Aweb-Actor-ID"
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
class InternalAuthContext(TypedDict):
|
|
22
|
+
project_id: str
|
|
23
|
+
principal_type: str # "u" or "k"
|
|
24
|
+
principal_id: str
|
|
25
|
+
actor_id: str
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
def _get_internal_auth_secret() -> Optional[str]:
|
|
29
|
+
# Some embedded/proxy deployments may reuse SESSION_SECRET_KEY to sign X-BH-Auth.
|
|
30
|
+
# For standalone OSS this is typically unset.
|
|
31
|
+
return os.getenv("BEADHUB_INTERNAL_AUTH_SECRET") or os.getenv("SESSION_SECRET_KEY")
|
|
32
|
+
|
|
33
|
+
|
|
34
|
+
def _internal_auth_header_value(
|
|
35
|
+
*, secret: str, project_id: str, principal_type: str, principal_id: str, actor_id: str
|
|
36
|
+
) -> str:
|
|
37
|
+
msg = f"v2:{project_id}:{principal_type}:{principal_id}:{actor_id}"
|
|
38
|
+
sig = hmac.new(
|
|
39
|
+
secret.encode("utf-8"),
|
|
40
|
+
msg.encode("utf-8"),
|
|
41
|
+
hashlib.sha256,
|
|
42
|
+
).hexdigest()
|
|
43
|
+
return f"{msg}:{sig}"
|
|
44
|
+
|
|
45
|
+
|
|
46
|
+
def parse_internal_auth_context(request: Request) -> Optional[InternalAuthContext]:
|
|
47
|
+
"""Parse and validate proxy-injected auth context headers for BeadHub OSS.
|
|
48
|
+
|
|
49
|
+
This is intended for proxy/wrapper deployments where the wrapper authenticates the caller
|
|
50
|
+
(JWT/cookie/API key) and injects project scope to the core service.
|
|
51
|
+
|
|
52
|
+
The core MUST treat these headers as untrusted unless `X-BH-Auth` validates.
|
|
53
|
+
"""
|
|
54
|
+
internal_auth = request.headers.get(INTERNAL_AUTH_HEADER)
|
|
55
|
+
if not internal_auth:
|
|
56
|
+
return None
|
|
57
|
+
|
|
58
|
+
# In standalone OSS mode the internal auth secret is intentionally unset. Treat any
|
|
59
|
+
# client-supplied internal headers as untrusted and ignore them rather than
|
|
60
|
+
# failing with a 500.
|
|
61
|
+
secret = _get_internal_auth_secret()
|
|
62
|
+
if not secret:
|
|
63
|
+
path = request.scope.get("path") or ""
|
|
64
|
+
logger.warning(
|
|
65
|
+
"Ignoring %s header because BEADHUB_INTERNAL_AUTH_SECRET is not configured (path=%s)",
|
|
66
|
+
INTERNAL_AUTH_HEADER,
|
|
67
|
+
path,
|
|
68
|
+
)
|
|
69
|
+
return None
|
|
70
|
+
|
|
71
|
+
project_id = request.headers.get(INTERNAL_PROJECT_HEADER)
|
|
72
|
+
if not project_id:
|
|
73
|
+
raise HTTPException(status_code=401, detail="Authentication required")
|
|
74
|
+
try:
|
|
75
|
+
project_id = str(uuid.UUID(project_id))
|
|
76
|
+
except ValueError:
|
|
77
|
+
raise HTTPException(status_code=401, detail="Authentication required")
|
|
78
|
+
|
|
79
|
+
user_id = request.headers.get(INTERNAL_USER_HEADER)
|
|
80
|
+
api_key_id = request.headers.get(INTERNAL_API_KEY_ID_HEADER)
|
|
81
|
+
if user_id:
|
|
82
|
+
try:
|
|
83
|
+
user_id = str(uuid.UUID(user_id))
|
|
84
|
+
except ValueError:
|
|
85
|
+
raise HTTPException(status_code=401, detail="Authentication required")
|
|
86
|
+
principal_type = "u"
|
|
87
|
+
principal_id = user_id
|
|
88
|
+
elif api_key_id:
|
|
89
|
+
try:
|
|
90
|
+
api_key_id = str(uuid.UUID(api_key_id))
|
|
91
|
+
except ValueError:
|
|
92
|
+
raise HTTPException(status_code=401, detail="Authentication required")
|
|
93
|
+
principal_type = "k"
|
|
94
|
+
principal_id = api_key_id
|
|
95
|
+
else:
|
|
96
|
+
raise HTTPException(status_code=401, detail="Authentication required")
|
|
97
|
+
|
|
98
|
+
actor_id = request.headers.get(INTERNAL_ACTOR_ID_HEADER)
|
|
99
|
+
if not actor_id:
|
|
100
|
+
raise HTTPException(status_code=401, detail="Authentication required")
|
|
101
|
+
try:
|
|
102
|
+
actor_id = str(uuid.UUID(actor_id))
|
|
103
|
+
except ValueError:
|
|
104
|
+
raise HTTPException(status_code=401, detail="Authentication required")
|
|
105
|
+
|
|
106
|
+
expected = _internal_auth_header_value(
|
|
107
|
+
secret=secret,
|
|
108
|
+
project_id=project_id,
|
|
109
|
+
principal_type=principal_type,
|
|
110
|
+
principal_id=principal_id,
|
|
111
|
+
actor_id=actor_id,
|
|
112
|
+
)
|
|
113
|
+
if not hmac.compare_digest(internal_auth, expected):
|
|
114
|
+
raise HTTPException(status_code=401, detail="Authentication required")
|
|
115
|
+
|
|
116
|
+
return {
|
|
117
|
+
"project_id": project_id,
|
|
118
|
+
"principal_type": principal_type,
|
|
119
|
+
"principal_id": principal_id,
|
|
120
|
+
"actor_id": actor_id,
|
|
121
|
+
}
|
beadhub/jsonl.py
ADDED
|
@@ -0,0 +1,68 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import json
|
|
4
|
+
from typing import Any, List
|
|
5
|
+
|
|
6
|
+
|
|
7
|
+
class JSONLParseError(ValueError):
|
|
8
|
+
"""Raised when JSONL parsing fails with line context."""
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
def _check_json_depth(obj: object, max_depth: int, current_depth: int = 0) -> bool:
|
|
12
|
+
"""Return True if JSON nesting depth is within limit."""
|
|
13
|
+
|
|
14
|
+
if current_depth >= max_depth:
|
|
15
|
+
return False
|
|
16
|
+
if isinstance(obj, dict):
|
|
17
|
+
for v in obj.values():
|
|
18
|
+
if not _check_json_depth(v, max_depth, current_depth + 1):
|
|
19
|
+
return False
|
|
20
|
+
return True
|
|
21
|
+
if isinstance(obj, list):
|
|
22
|
+
for item in obj:
|
|
23
|
+
if not _check_json_depth(item, max_depth, current_depth + 1):
|
|
24
|
+
return False
|
|
25
|
+
return True
|
|
26
|
+
return True
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
def parse_jsonl(
|
|
30
|
+
content: str,
|
|
31
|
+
*,
|
|
32
|
+
max_depth: int = 10,
|
|
33
|
+
max_count: int = 10000,
|
|
34
|
+
) -> List[dict[str, Any]]:
|
|
35
|
+
"""
|
|
36
|
+
Parse JSONL content into a list of dicts.
|
|
37
|
+
|
|
38
|
+
- Skips empty lines
|
|
39
|
+
- Validates max_count incrementally (fails fast)
|
|
40
|
+
- Validates per-record nesting depth
|
|
41
|
+
|
|
42
|
+
Raises:
|
|
43
|
+
JSONLParseError: on invalid JSON, recursion errors, or depth/count violations
|
|
44
|
+
"""
|
|
45
|
+
|
|
46
|
+
issues: list[dict[str, Any]] = []
|
|
47
|
+
for line_num, line in enumerate(content.splitlines(), start=1):
|
|
48
|
+
line = line.strip()
|
|
49
|
+
if not line:
|
|
50
|
+
continue
|
|
51
|
+
if len(issues) >= max_count:
|
|
52
|
+
raise JSONLParseError(f"Too many issues: exceeds limit of {max_count}")
|
|
53
|
+
try:
|
|
54
|
+
issue = json.loads(line)
|
|
55
|
+
except json.JSONDecodeError as e:
|
|
56
|
+
raise JSONLParseError(f"Invalid JSON on line {line_num}: {e.msg}") from e
|
|
57
|
+
except RecursionError as e:
|
|
58
|
+
raise JSONLParseError(
|
|
59
|
+
f"JSON nesting too deep on line {line_num}: exceeds recursion limit"
|
|
60
|
+
) from e
|
|
61
|
+
if not isinstance(issue, dict):
|
|
62
|
+
raise JSONLParseError(f"JSON on line {line_num} must be an object")
|
|
63
|
+
if not _check_json_depth(issue, max_depth):
|
|
64
|
+
raise JSONLParseError(
|
|
65
|
+
f"JSON nesting depth exceeds limit ({max_depth}) on line {line_num}"
|
|
66
|
+
)
|
|
67
|
+
issues.append(issue)
|
|
68
|
+
return issues
|
beadhub/logging.py
ADDED
|
@@ -0,0 +1,62 @@
|
|
|
1
|
+
"""Structured logging configuration for BeadHub."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
import json
|
|
6
|
+
import logging
|
|
7
|
+
import sys
|
|
8
|
+
from datetime import datetime, timezone
|
|
9
|
+
from typing import Any
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
class JSONFormatter(logging.Formatter):
|
|
13
|
+
"""Format log records as JSON for structured logging."""
|
|
14
|
+
|
|
15
|
+
def format(self, record: logging.LogRecord) -> str:
|
|
16
|
+
log_data: dict[str, Any] = {
|
|
17
|
+
"timestamp": datetime.fromtimestamp(record.created, timezone.utc).isoformat(),
|
|
18
|
+
"level": record.levelname,
|
|
19
|
+
"logger": record.name,
|
|
20
|
+
"message": record.getMessage(),
|
|
21
|
+
"function": record.funcName,
|
|
22
|
+
"line": record.lineno,
|
|
23
|
+
}
|
|
24
|
+
|
|
25
|
+
if record.exc_info:
|
|
26
|
+
log_data["exception"] = self.formatException(record.exc_info)
|
|
27
|
+
|
|
28
|
+
if hasattr(record, "request_id"):
|
|
29
|
+
log_data["request_id"] = record.request_id
|
|
30
|
+
|
|
31
|
+
return json.dumps(log_data)
|
|
32
|
+
|
|
33
|
+
|
|
34
|
+
def configure_logging(log_level: str = "INFO", json_format: bool = True) -> None:
|
|
35
|
+
"""
|
|
36
|
+
Configure logging for BeadHub.
|
|
37
|
+
|
|
38
|
+
Args:
|
|
39
|
+
log_level: Logging level (DEBUG, INFO, WARNING, ERROR, CRITICAL)
|
|
40
|
+
json_format: If True, use JSON format. If False, use simple text format.
|
|
41
|
+
"""
|
|
42
|
+
root_logger = logging.getLogger()
|
|
43
|
+
root_logger.setLevel(log_level.upper())
|
|
44
|
+
|
|
45
|
+
# Remove existing handlers
|
|
46
|
+
for handler in root_logger.handlers[:]:
|
|
47
|
+
root_logger.removeHandler(handler)
|
|
48
|
+
|
|
49
|
+
handler = logging.StreamHandler(sys.stdout)
|
|
50
|
+
handler.setLevel(log_level.upper())
|
|
51
|
+
|
|
52
|
+
if json_format:
|
|
53
|
+
handler.setFormatter(JSONFormatter())
|
|
54
|
+
else:
|
|
55
|
+
handler.setFormatter(logging.Formatter("%(asctime)s %(levelname)s %(name)s: %(message)s"))
|
|
56
|
+
|
|
57
|
+
root_logger.addHandler(handler)
|
|
58
|
+
|
|
59
|
+
# Quiet noisy libraries
|
|
60
|
+
logging.getLogger("httpx").setLevel(logging.WARNING)
|
|
61
|
+
logging.getLogger("httpcore").setLevel(logging.WARNING)
|
|
62
|
+
logging.getLogger("asyncio").setLevel(logging.WARNING)
|
|
@@ -0,0 +1,70 @@
|
|
|
1
|
+
-- 001_initial.sql
|
|
2
|
+
-- Description: Baseline BeadHub beads schema (issues)
|
|
3
|
+
|
|
4
|
+
CREATE EXTENSION IF NOT EXISTS pgcrypto;
|
|
5
|
+
|
|
6
|
+
CREATE TABLE IF NOT EXISTS {{tables.beads_issues}} (
|
|
7
|
+
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
|
|
8
|
+
|
|
9
|
+
-- Tenant isolation: project_id scopes all data
|
|
10
|
+
project_id UUID NOT NULL,
|
|
11
|
+
|
|
12
|
+
-- Identity: bead_id is unique within (project_id, repo, branch)
|
|
13
|
+
bead_id TEXT NOT NULL,
|
|
14
|
+
repo TEXT NOT NULL DEFAULT 'default', -- canonical origin e.g. github.com/org/repo
|
|
15
|
+
branch TEXT NOT NULL DEFAULT 'main',
|
|
16
|
+
|
|
17
|
+
-- Issue content
|
|
18
|
+
title TEXT,
|
|
19
|
+
description TEXT,
|
|
20
|
+
status TEXT,
|
|
21
|
+
priority INTEGER,
|
|
22
|
+
issue_type TEXT,
|
|
23
|
+
assignee TEXT,
|
|
24
|
+
labels TEXT[],
|
|
25
|
+
|
|
26
|
+
-- Dependencies as JSONB for cross-repo/branch references
|
|
27
|
+
blocked_by JSONB DEFAULT '[]'::jsonb,
|
|
28
|
+
parent_id JSONB,
|
|
29
|
+
|
|
30
|
+
-- Creator attribution
|
|
31
|
+
created_by TEXT,
|
|
32
|
+
|
|
33
|
+
-- Timestamps
|
|
34
|
+
created_at TIMESTAMPTZ,
|
|
35
|
+
updated_at TIMESTAMPTZ,
|
|
36
|
+
synced_at TIMESTAMPTZ DEFAULT NOW(),
|
|
37
|
+
|
|
38
|
+
-- Bead history: who closed this bead (cross-schema FK to server.workspaces)
|
|
39
|
+
closed_by_workspace_id UUID REFERENCES server.workspaces(workspace_id) ON DELETE SET NULL
|
|
40
|
+
);
|
|
41
|
+
|
|
42
|
+
CREATE UNIQUE INDEX IF NOT EXISTS idx_beads_issues_project_repo_branch_bead
|
|
43
|
+
ON {{tables.beads_issues}}(project_id, repo, branch, bead_id);
|
|
44
|
+
|
|
45
|
+
CREATE INDEX IF NOT EXISTS idx_beads_issues_project_id
|
|
46
|
+
ON {{tables.beads_issues}}(project_id);
|
|
47
|
+
|
|
48
|
+
CREATE INDEX IF NOT EXISTS idx_beads_issues_status
|
|
49
|
+
ON {{tables.beads_issues}}(status);
|
|
50
|
+
|
|
51
|
+
CREATE INDEX IF NOT EXISTS idx_beads_issues_project_repo
|
|
52
|
+
ON {{tables.beads_issues}}(project_id, repo);
|
|
53
|
+
|
|
54
|
+
CREATE INDEX IF NOT EXISTS idx_beads_issues_parent
|
|
55
|
+
ON {{tables.beads_issues}}((parent_id->>'bead_id'))
|
|
56
|
+
WHERE parent_id IS NOT NULL;
|
|
57
|
+
|
|
58
|
+
CREATE INDEX IF NOT EXISTS idx_beads_issues_project_bead
|
|
59
|
+
ON {{tables.beads_issues}}(project_id, bead_id);
|
|
60
|
+
|
|
61
|
+
CREATE INDEX IF NOT EXISTS idx_beads_issues_closed_by
|
|
62
|
+
ON {{tables.beads_issues}}(closed_by_workspace_id)
|
|
63
|
+
WHERE closed_by_workspace_id IS NOT NULL;
|
|
64
|
+
|
|
65
|
+
CREATE INDEX IF NOT EXISTS idx_beads_issues_project_status
|
|
66
|
+
ON {{tables.beads_issues}}(project_id, status);
|
|
67
|
+
|
|
68
|
+
CREATE INDEX IF NOT EXISTS idx_beads_issues_project_created_by
|
|
69
|
+
ON {{tables.beads_issues}}(project_id, created_by)
|
|
70
|
+
WHERE created_by IS NOT NULL;
|
|
@@ -0,0 +1,20 @@
|
|
|
1
|
+
-- pgdbm:no-transaction
|
|
2
|
+
-- 002_search_indexes.sql
|
|
3
|
+
-- Description: Add indexes for bead search (q= parameter) performance
|
|
4
|
+
-- Requires no-transaction mode because CREATE INDEX CONCURRENTLY
|
|
5
|
+
-- cannot run inside a transaction block.
|
|
6
|
+
|
|
7
|
+
-- Enable pg_trgm extension for trigram-based text search
|
|
8
|
+
-- This supports efficient ILIKE queries with leading wildcards
|
|
9
|
+
CREATE EXTENSION IF NOT EXISTS pg_trgm;
|
|
10
|
+
|
|
11
|
+
-- GIN trigram index on title for substring search (ILIKE '%query%')
|
|
12
|
+
-- Supports case-insensitive substring matching efficiently
|
|
13
|
+
CREATE INDEX CONCURRENTLY IF NOT EXISTS idx_beads_issues_title_trgm
|
|
14
|
+
ON {{tables.beads_issues}} USING gin (title gin_trgm_ops);
|
|
15
|
+
|
|
16
|
+
-- GIN trigram index on bead_id for case-insensitive prefix search
|
|
17
|
+
-- Supports ILIKE with ESCAPE clause for safely handling user input with wildcards
|
|
18
|
+
-- (see _escape_like_pattern() in routes/beads.py)
|
|
19
|
+
CREATE INDEX CONCURRENTLY IF NOT EXISTS idx_beads_issues_bead_id_trgm
|
|
20
|
+
ON {{tables.beads_issues}} USING gin (bead_id gin_trgm_ops);
|
|
@@ -0,0 +1,279 @@
|
|
|
1
|
+
-- 001_initial.sql
|
|
2
|
+
-- Description: Baseline BeadHub server schema (clean-slate split)
|
|
3
|
+
--
|
|
4
|
+
-- Coordination primitives (mail/chat/locks/auth keys) live in `aweb`.
|
|
5
|
+
-- BeadHub owns: repos, workspaces, beads, claims, subscriptions, policies, escalations.
|
|
6
|
+
|
|
7
|
+
CREATE EXTENSION IF NOT EXISTS pgcrypto;
|
|
8
|
+
|
|
9
|
+
CREATE TABLE IF NOT EXISTS {{tables.projects}} (
|
|
10
|
+
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
|
|
11
|
+
-- Partition key for multi-tenant SaaS (NULL for single-tenant OSS mode)
|
|
12
|
+
tenant_id UUID,
|
|
13
|
+
slug TEXT NOT NULL,
|
|
14
|
+
name TEXT,
|
|
15
|
+
-- Active policy pointer (FK added after project_policies exists)
|
|
16
|
+
active_policy_id UUID,
|
|
17
|
+
created_at TIMESTAMPTZ DEFAULT NOW(),
|
|
18
|
+
-- Soft-delete support: NULL means active
|
|
19
|
+
deleted_at TIMESTAMPTZ
|
|
20
|
+
);
|
|
21
|
+
|
|
22
|
+
-- Slugs unique within tenant (or globally if no tenant), for non-deleted projects.
|
|
23
|
+
CREATE UNIQUE INDEX IF NOT EXISTS idx_projects_slug_no_tenant
|
|
24
|
+
ON {{tables.projects}}(slug) WHERE tenant_id IS NULL AND deleted_at IS NULL;
|
|
25
|
+
CREATE UNIQUE INDEX IF NOT EXISTS idx_projects_slug_with_tenant
|
|
26
|
+
ON {{tables.projects}}(tenant_id, slug) WHERE tenant_id IS NOT NULL AND deleted_at IS NULL;
|
|
27
|
+
|
|
28
|
+
CREATE INDEX IF NOT EXISTS idx_projects_active ON {{tables.projects}}(id) WHERE deleted_at IS NULL;
|
|
29
|
+
|
|
30
|
+
CREATE TABLE IF NOT EXISTS {{tables.repos}} (
|
|
31
|
+
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
|
|
32
|
+
-- CASCADE: repos are deleted when their project is deleted (repos without projects are invalid)
|
|
33
|
+
project_id UUID NOT NULL REFERENCES {{tables.projects}}(id) ON DELETE CASCADE,
|
|
34
|
+
origin_url TEXT NOT NULL,
|
|
35
|
+
canonical_origin TEXT NOT NULL,
|
|
36
|
+
name TEXT NOT NULL,
|
|
37
|
+
created_at TIMESTAMPTZ DEFAULT NOW(),
|
|
38
|
+
-- Soft-delete support: NULL means active, timestamp means deleted
|
|
39
|
+
-- Unique constraint retained for ON CONFLICT support in ensure_repo.
|
|
40
|
+
-- When a soft-deleted repo is re-registered, ensure_repo clears deleted_at.
|
|
41
|
+
deleted_at TIMESTAMPTZ,
|
|
42
|
+
CONSTRAINT unique_repo_per_project UNIQUE (project_id, canonical_origin)
|
|
43
|
+
);
|
|
44
|
+
|
|
45
|
+
CREATE INDEX IF NOT EXISTS idx_repos_project ON {{tables.repos}}(project_id);
|
|
46
|
+
CREATE INDEX IF NOT EXISTS idx_repos_active ON {{tables.repos}}(project_id)
|
|
47
|
+
WHERE deleted_at IS NULL;
|
|
48
|
+
|
|
49
|
+
CREATE TABLE IF NOT EXISTS {{tables.workspaces}} (
|
|
50
|
+
workspace_id UUID PRIMARY KEY,
|
|
51
|
+
-- CASCADE: workspaces are deleted when their project is deleted
|
|
52
|
+
project_id UUID NOT NULL REFERENCES {{tables.projects}}(id) ON DELETE CASCADE,
|
|
53
|
+
-- SET NULL: when repo is hard-deleted (e.g., project deletion cascade), preserve workspace for audit
|
|
54
|
+
-- When repo is soft-deleted via API, workspaces are soft-deleted manually in application code
|
|
55
|
+
repo_id UUID REFERENCES {{tables.repos}}(id) ON DELETE SET NULL,
|
|
56
|
+
alias TEXT NOT NULL,
|
|
57
|
+
human_name TEXT NOT NULL DEFAULT '',
|
|
58
|
+
role TEXT,
|
|
59
|
+
current_branch TEXT,
|
|
60
|
+
focus_apex_bead_id TEXT,
|
|
61
|
+
focus_apex_repo_name TEXT,
|
|
62
|
+
focus_apex_branch TEXT,
|
|
63
|
+
focus_updated_at TIMESTAMPTZ,
|
|
64
|
+
-- Physical location: for detecting "gone" workspaces (directory no longer exists)
|
|
65
|
+
-- Only checked for workspaces on the current hostname to avoid cross-machine false positives
|
|
66
|
+
hostname TEXT,
|
|
67
|
+
workspace_path TEXT,
|
|
68
|
+
-- Workspace classification
|
|
69
|
+
workspace_type TEXT NOT NULL DEFAULT 'agent',
|
|
70
|
+
created_at TIMESTAMPTZ DEFAULT NOW(),
|
|
71
|
+
updated_at TIMESTAMPTZ DEFAULT NOW(),
|
|
72
|
+
-- Soft-delete support: NULL means active, timestamp means deleted
|
|
73
|
+
deleted_at TIMESTAMPTZ,
|
|
74
|
+
-- Track when workspace was last seen (updated on every bdh command)
|
|
75
|
+
last_seen_at TIMESTAMPTZ,
|
|
76
|
+
CONSTRAINT chk_workspace_repo CHECK (
|
|
77
|
+
(workspace_type = 'agent' AND repo_id IS NOT NULL) OR
|
|
78
|
+
(workspace_type IN ('dashboard') AND repo_id IS NULL)
|
|
79
|
+
),
|
|
80
|
+
CONSTRAINT chk_workspace_role_length CHECK (role IS NULL OR length(role) <= 50)
|
|
81
|
+
);
|
|
82
|
+
|
|
83
|
+
-- Partial unique index: aliases unique within project for non-deleted workspaces only
|
|
84
|
+
CREATE UNIQUE INDEX IF NOT EXISTS idx_unique_active_alias
|
|
85
|
+
ON {{tables.workspaces}}(project_id, alias)
|
|
86
|
+
WHERE deleted_at IS NULL;
|
|
87
|
+
|
|
88
|
+
CREATE INDEX IF NOT EXISTS idx_workspaces_alias ON {{tables.workspaces}}(alias);
|
|
89
|
+
CREATE INDEX IF NOT EXISTS idx_workspaces_project ON {{tables.workspaces}}(project_id);
|
|
90
|
+
CREATE INDEX IF NOT EXISTS idx_workspaces_repo ON {{tables.workspaces}}(repo_id);
|
|
91
|
+
CREATE INDEX IF NOT EXISTS idx_workspaces_active ON {{tables.workspaces}}(project_id)
|
|
92
|
+
WHERE deleted_at IS NULL;
|
|
93
|
+
CREATE INDEX IF NOT EXISTS idx_workspaces_hostname ON {{tables.workspaces}}(hostname)
|
|
94
|
+
WHERE hostname IS NOT NULL AND deleted_at IS NULL;
|
|
95
|
+
CREATE INDEX IF NOT EXISTS idx_workspaces_dashboard
|
|
96
|
+
ON {{tables.workspaces}}(project_id, human_name)
|
|
97
|
+
WHERE workspace_type = 'dashboard';
|
|
98
|
+
|
|
99
|
+
-- Trigger to update updated_at and enforce immutability
|
|
100
|
+
CREATE OR REPLACE FUNCTION {{schema}}.update_workspace_timestamp()
|
|
101
|
+
RETURNS TRIGGER AS $$
|
|
102
|
+
BEGIN
|
|
103
|
+
-- Enforce immutability of key fields
|
|
104
|
+
IF NEW.project_id IS DISTINCT FROM OLD.project_id THEN
|
|
105
|
+
RAISE EXCEPTION 'project_id is immutable';
|
|
106
|
+
END IF;
|
|
107
|
+
-- repo_id can become NULL via FK SET NULL cascade (when repo is hard-deleted), but not change to different repo
|
|
108
|
+
IF NEW.repo_id IS DISTINCT FROM OLD.repo_id AND NEW.repo_id IS NOT NULL THEN
|
|
109
|
+
RAISE EXCEPTION 'repo_id is immutable (cannot change to different repo)';
|
|
110
|
+
END IF;
|
|
111
|
+
IF NEW.alias IS DISTINCT FROM OLD.alias THEN
|
|
112
|
+
RAISE EXCEPTION 'alias is immutable';
|
|
113
|
+
END IF;
|
|
114
|
+
IF NEW.workspace_type IS DISTINCT FROM OLD.workspace_type THEN
|
|
115
|
+
RAISE EXCEPTION 'workspace_type is immutable';
|
|
116
|
+
END IF;
|
|
117
|
+
|
|
118
|
+
-- Auto-soft-delete when repo_id becomes NULL (repo was hard-deleted)
|
|
119
|
+
IF OLD.repo_id IS NOT NULL AND NEW.repo_id IS NULL AND NEW.deleted_at IS NULL THEN
|
|
120
|
+
NEW.deleted_at = NOW();
|
|
121
|
+
END IF;
|
|
122
|
+
|
|
123
|
+
NEW.updated_at = NOW();
|
|
124
|
+
RETURN NEW;
|
|
125
|
+
END;
|
|
126
|
+
$$ LANGUAGE plpgsql;
|
|
127
|
+
|
|
128
|
+
DROP TRIGGER IF EXISTS workspace_updated_at ON {{tables.workspaces}};
|
|
129
|
+
CREATE TRIGGER workspace_updated_at
|
|
130
|
+
BEFORE UPDATE ON {{tables.workspaces}}
|
|
131
|
+
FOR EACH ROW
|
|
132
|
+
EXECUTE FUNCTION {{schema}}.update_workspace_timestamp();
|
|
133
|
+
|
|
134
|
+
CREATE TABLE IF NOT EXISTS {{tables.bead_claims}} (
|
|
135
|
+
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
|
|
136
|
+
-- CASCADE: claims deleted when project is deleted
|
|
137
|
+
project_id UUID NOT NULL REFERENCES {{tables.projects}}(id) ON DELETE CASCADE,
|
|
138
|
+
-- CASCADE: claims deleted when workspace is deleted
|
|
139
|
+
workspace_id UUID NOT NULL REFERENCES {{tables.workspaces}}(workspace_id) ON DELETE CASCADE,
|
|
140
|
+
alias TEXT NOT NULL,
|
|
141
|
+
human_name TEXT NOT NULL,
|
|
142
|
+
bead_id TEXT NOT NULL,
|
|
143
|
+
-- Apex reference for fast team status listings
|
|
144
|
+
apex_bead_id TEXT,
|
|
145
|
+
apex_repo_name TEXT,
|
|
146
|
+
apex_branch TEXT,
|
|
147
|
+
claimed_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
|
148
|
+
-- Multiple workspaces can claim the same bead (coordinated work with --:jump-in)
|
|
149
|
+
UNIQUE(project_id, bead_id, workspace_id)
|
|
150
|
+
);
|
|
151
|
+
|
|
152
|
+
CREATE INDEX IF NOT EXISTS idx_bead_claims_workspace
|
|
153
|
+
ON {{tables.bead_claims}}(workspace_id, claimed_at DESC);
|
|
154
|
+
CREATE INDEX IF NOT EXISTS idx_bead_claims_project ON {{tables.bead_claims}}(project_id);
|
|
155
|
+
|
|
156
|
+
CREATE TABLE IF NOT EXISTS {{tables.escalations}} (
|
|
157
|
+
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
|
|
158
|
+
project_id UUID NOT NULL REFERENCES {{tables.projects}}(id) ON DELETE CASCADE,
|
|
159
|
+
workspace_id UUID NOT NULL REFERENCES {{tables.workspaces}}(workspace_id) ON DELETE CASCADE,
|
|
160
|
+
alias TEXT NOT NULL,
|
|
161
|
+
member_email TEXT,
|
|
162
|
+
subject TEXT NOT NULL,
|
|
163
|
+
situation TEXT NOT NULL,
|
|
164
|
+
options JSONB,
|
|
165
|
+
status TEXT DEFAULT 'pending',
|
|
166
|
+
response TEXT,
|
|
167
|
+
response_note TEXT,
|
|
168
|
+
created_at TIMESTAMPTZ DEFAULT NOW(),
|
|
169
|
+
responded_at TIMESTAMPTZ,
|
|
170
|
+
expires_at TIMESTAMPTZ
|
|
171
|
+
);
|
|
172
|
+
|
|
173
|
+
CREATE INDEX IF NOT EXISTS idx_escalations_status ON {{tables.escalations}}(status);
|
|
174
|
+
CREATE INDEX IF NOT EXISTS idx_escalations_member ON {{tables.escalations}}(member_email);
|
|
175
|
+
CREATE INDEX IF NOT EXISTS idx_escalations_workspace ON {{tables.escalations}}(workspace_id);
|
|
176
|
+
CREATE INDEX IF NOT EXISTS idx_escalations_alias_created
|
|
177
|
+
ON {{tables.escalations}}(alias, created_at DESC);
|
|
178
|
+
|
|
179
|
+
CREATE TABLE IF NOT EXISTS {{tables.subscriptions}} (
|
|
180
|
+
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
|
|
181
|
+
project_id UUID NOT NULL REFERENCES {{tables.projects}}(id) ON DELETE CASCADE,
|
|
182
|
+
workspace_id UUID NOT NULL REFERENCES {{tables.workspaces}}(workspace_id) ON DELETE CASCADE,
|
|
183
|
+
alias TEXT NOT NULL,
|
|
184
|
+
bead_id TEXT NOT NULL,
|
|
185
|
+
repo TEXT,
|
|
186
|
+
event_types TEXT[] NOT NULL DEFAULT '{status_change}',
|
|
187
|
+
created_at TIMESTAMPTZ DEFAULT NOW()
|
|
188
|
+
);
|
|
189
|
+
|
|
190
|
+
CREATE UNIQUE INDEX IF NOT EXISTS idx_subscriptions_unique
|
|
191
|
+
ON {{tables.subscriptions}}(project_id, workspace_id, bead_id, COALESCE(repo, ''));
|
|
192
|
+
CREATE INDEX IF NOT EXISTS idx_subscriptions_bead
|
|
193
|
+
ON {{tables.subscriptions}}(project_id, bead_id, repo);
|
|
194
|
+
CREATE INDEX IF NOT EXISTS idx_subscriptions_workspace
|
|
195
|
+
ON {{tables.subscriptions}}(project_id, workspace_id, created_at DESC);
|
|
196
|
+
|
|
197
|
+
CREATE TABLE IF NOT EXISTS {{tables.notification_outbox}} (
|
|
198
|
+
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
|
|
199
|
+
-- Tenant isolation
|
|
200
|
+
project_id UUID NOT NULL REFERENCES {{tables.projects}}(id) ON DELETE CASCADE,
|
|
201
|
+
-- Event details
|
|
202
|
+
event_type TEXT NOT NULL DEFAULT 'bead_status_change',
|
|
203
|
+
-- Full payload for the notification (bead_id, status change, etc.)
|
|
204
|
+
payload JSONB NOT NULL,
|
|
205
|
+
-- Target workspace for the notification (no FK - subscriptions may outlive workspaces)
|
|
206
|
+
recipient_workspace_id UUID NOT NULL,
|
|
207
|
+
recipient_alias TEXT NOT NULL,
|
|
208
|
+
-- Processing status
|
|
209
|
+
status TEXT NOT NULL DEFAULT 'pending'
|
|
210
|
+
CHECK (status IN ('pending', 'processing', 'completed', 'failed')),
|
|
211
|
+
attempts INTEGER NOT NULL DEFAULT 0,
|
|
212
|
+
last_error TEXT,
|
|
213
|
+
-- Timestamps
|
|
214
|
+
created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(),
|
|
215
|
+
processed_at TIMESTAMPTZ,
|
|
216
|
+
-- Message ID if successfully sent (no FK - messages may be deleted)
|
|
217
|
+
message_id UUID
|
|
218
|
+
);
|
|
219
|
+
|
|
220
|
+
CREATE INDEX IF NOT EXISTS idx_notification_outbox_pending
|
|
221
|
+
ON {{tables.notification_outbox}}(project_id, status, attempts, created_at)
|
|
222
|
+
WHERE status IN ('pending', 'failed');
|
|
223
|
+
CREATE INDEX IF NOT EXISTS idx_notification_outbox_completed
|
|
224
|
+
ON {{tables.notification_outbox}}(project_id, status, processed_at)
|
|
225
|
+
WHERE status = 'completed';
|
|
226
|
+
|
|
227
|
+
CREATE TABLE IF NOT EXISTS {{tables.audit_log}} (
|
|
228
|
+
id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
|
|
229
|
+
project_id UUID NOT NULL REFERENCES {{tables.projects}}(id) ON DELETE CASCADE,
|
|
230
|
+
workspace_id UUID REFERENCES {{tables.workspaces}}(workspace_id) ON DELETE SET NULL,
|
|
231
|
+
event_type TEXT NOT NULL,
|
|
232
|
+
alias TEXT,
|
|
233
|
+
member_email TEXT,
|
|
234
|
+
resource TEXT,
|
|
235
|
+
bead_id TEXT,
|
|
236
|
+
details JSONB,
|
|
237
|
+
created_at TIMESTAMPTZ DEFAULT NOW()
|
|
238
|
+
);
|
|
239
|
+
|
|
240
|
+
CREATE INDEX IF NOT EXISTS idx_audit_log_created ON {{tables.audit_log}}(created_at);
|
|
241
|
+
CREATE INDEX IF NOT EXISTS idx_audit_log_alias ON {{tables.audit_log}}(alias);
|
|
242
|
+
CREATE INDEX IF NOT EXISTS idx_audit_log_project ON {{tables.audit_log}}(project_id);
|
|
243
|
+
|
|
244
|
+
CREATE TABLE IF NOT EXISTS {{tables.project_policies}} (
|
|
245
|
+
policy_id UUID PRIMARY KEY DEFAULT gen_random_uuid(),
|
|
246
|
+
project_id UUID NOT NULL REFERENCES {{tables.projects}}(id) ON DELETE CASCADE,
|
|
247
|
+
version INT NOT NULL,
|
|
248
|
+
bundle_json JSONB NOT NULL,
|
|
249
|
+
created_by_workspace_id UUID REFERENCES {{tables.workspaces}}(workspace_id) ON DELETE SET NULL,
|
|
250
|
+
created_at TIMESTAMPTZ DEFAULT NOW(),
|
|
251
|
+
updated_at TIMESTAMPTZ DEFAULT NOW(),
|
|
252
|
+
CONSTRAINT project_policies_version_unique UNIQUE (project_id, version)
|
|
253
|
+
);
|
|
254
|
+
|
|
255
|
+
-- Add FK after project_policies to avoid circular dependency during create
|
|
256
|
+
ALTER TABLE {{tables.projects}}
|
|
257
|
+
ADD CONSTRAINT fk_projects_active_policy
|
|
258
|
+
FOREIGN KEY (active_policy_id)
|
|
259
|
+
REFERENCES {{tables.project_policies}}(policy_id) ON DELETE SET NULL;
|
|
260
|
+
|
|
261
|
+
CREATE INDEX IF NOT EXISTS idx_project_policies_project_version
|
|
262
|
+
ON {{tables.project_policies}}(project_id, version DESC);
|
|
263
|
+
CREATE INDEX IF NOT EXISTS idx_project_policies_created_by
|
|
264
|
+
ON {{tables.project_policies}}(created_by_workspace_id)
|
|
265
|
+
WHERE created_by_workspace_id IS NOT NULL;
|
|
266
|
+
|
|
267
|
+
CREATE OR REPLACE FUNCTION {{schema}}.project_policies_update_timestamp()
|
|
268
|
+
RETURNS TRIGGER AS $$
|
|
269
|
+
BEGIN
|
|
270
|
+
NEW.updated_at = NOW();
|
|
271
|
+
RETURN NEW;
|
|
272
|
+
END;
|
|
273
|
+
$$ LANGUAGE plpgsql;
|
|
274
|
+
|
|
275
|
+
DROP TRIGGER IF EXISTS project_policies_update_timestamp ON {{tables.project_policies}};
|
|
276
|
+
CREATE TRIGGER project_policies_update_timestamp
|
|
277
|
+
BEFORE UPDATE ON {{tables.project_policies}}
|
|
278
|
+
FOR EACH ROW
|
|
279
|
+
EXECUTE FUNCTION {{schema}}.project_policies_update_timestamp();
|
beadhub/names.py
ADDED
|
@@ -0,0 +1,33 @@
|
|
|
1
|
+
"""Shared naming constants (e.g. classic workspace name prefixes)."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
# Classic names for alias generation (alice, bob, charlie, etc.)
|
|
6
|
+
CLASSIC_NAMES = (
|
|
7
|
+
"alice",
|
|
8
|
+
"bob",
|
|
9
|
+
"charlie",
|
|
10
|
+
"dave",
|
|
11
|
+
"eve",
|
|
12
|
+
"frank",
|
|
13
|
+
"grace",
|
|
14
|
+
"henry",
|
|
15
|
+
"ivy",
|
|
16
|
+
"jack",
|
|
17
|
+
"kate",
|
|
18
|
+
"leo",
|
|
19
|
+
"mia",
|
|
20
|
+
"noah",
|
|
21
|
+
"olivia",
|
|
22
|
+
"peter",
|
|
23
|
+
"quinn",
|
|
24
|
+
"rose",
|
|
25
|
+
"sam",
|
|
26
|
+
"tara",
|
|
27
|
+
"uma",
|
|
28
|
+
"victor",
|
|
29
|
+
"wendy",
|
|
30
|
+
"xavier",
|
|
31
|
+
"yara",
|
|
32
|
+
"zoe",
|
|
33
|
+
)
|