beadhub 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- beadhub/__init__.py +12 -0
- beadhub/api.py +260 -0
- beadhub/auth.py +101 -0
- beadhub/aweb_context.py +65 -0
- beadhub/aweb_introspection.py +70 -0
- beadhub/beads_sync.py +514 -0
- beadhub/cli.py +330 -0
- beadhub/config.py +65 -0
- beadhub/db.py +129 -0
- beadhub/defaults/invariants/01-tracking-bdh-only.md +11 -0
- beadhub/defaults/invariants/02-communication-mail-first.md +36 -0
- beadhub/defaults/invariants/03-communication-chat.md +60 -0
- beadhub/defaults/invariants/04-identity-no-impersonation.md +17 -0
- beadhub/defaults/invariants/05-collaborate.md +12 -0
- beadhub/defaults/roles/backend.md +55 -0
- beadhub/defaults/roles/coordinator.md +44 -0
- beadhub/defaults/roles/frontend.md +77 -0
- beadhub/defaults/roles/implementer.md +73 -0
- beadhub/defaults/roles/reviewer.md +56 -0
- beadhub/defaults/roles/startup-expert.md +93 -0
- beadhub/defaults.py +262 -0
- beadhub/events.py +704 -0
- beadhub/internal_auth.py +121 -0
- beadhub/jsonl.py +68 -0
- beadhub/logging.py +62 -0
- beadhub/migrations/beads/001_initial.sql +70 -0
- beadhub/migrations/beads/002_search_indexes.sql +20 -0
- beadhub/migrations/server/001_initial.sql +279 -0
- beadhub/names.py +33 -0
- beadhub/notifications.py +275 -0
- beadhub/pagination.py +125 -0
- beadhub/presence.py +495 -0
- beadhub/rate_limit.py +152 -0
- beadhub/redis_client.py +11 -0
- beadhub/roles.py +35 -0
- beadhub/routes/__init__.py +1 -0
- beadhub/routes/agents.py +303 -0
- beadhub/routes/bdh.py +655 -0
- beadhub/routes/beads.py +778 -0
- beadhub/routes/claims.py +141 -0
- beadhub/routes/escalations.py +471 -0
- beadhub/routes/init.py +348 -0
- beadhub/routes/mcp.py +338 -0
- beadhub/routes/policies.py +833 -0
- beadhub/routes/repos.py +538 -0
- beadhub/routes/status.py +568 -0
- beadhub/routes/subscriptions.py +362 -0
- beadhub/routes/workspaces.py +1642 -0
- beadhub/workspace_config.py +202 -0
- beadhub-0.1.0.dist-info/METADATA +254 -0
- beadhub-0.1.0.dist-info/RECORD +54 -0
- beadhub-0.1.0.dist-info/WHEEL +4 -0
- beadhub-0.1.0.dist-info/entry_points.txt +2 -0
- beadhub-0.1.0.dist-info/licenses/LICENSE +21 -0
beadhub/notifications.py
ADDED
|
@@ -0,0 +1,275 @@
|
|
|
1
|
+
"""Notification outbox pattern for reliable delivery.
|
|
2
|
+
|
|
3
|
+
This module implements the outbox pattern to ensure notifications are:
|
|
4
|
+
1. Recorded atomically (as close as possible to the triggering event)
|
|
5
|
+
2. Delivered reliably with retry capability
|
|
6
|
+
3. Tracked for observability
|
|
7
|
+
"""
|
|
8
|
+
|
|
9
|
+
from __future__ import annotations
|
|
10
|
+
|
|
11
|
+
import json
|
|
12
|
+
import logging
|
|
13
|
+
import uuid
|
|
14
|
+
from typing import TYPE_CHECKING, List
|
|
15
|
+
from uuid import UUID
|
|
16
|
+
|
|
17
|
+
from aweb.messages_service import deliver_message
|
|
18
|
+
|
|
19
|
+
if TYPE_CHECKING:
|
|
20
|
+
from .beads_sync import BeadStatusChange
|
|
21
|
+
from .db import DatabaseInfra
|
|
22
|
+
|
|
23
|
+
logger = logging.getLogger(__name__)
|
|
24
|
+
|
|
25
|
+
MAX_RETRY_ATTEMPTS = 3
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
async def record_notification_intents(
|
|
29
|
+
status_changes: List["BeadStatusChange"],
|
|
30
|
+
project_id: str,
|
|
31
|
+
db_infra: "DatabaseInfra",
|
|
32
|
+
) -> int:
|
|
33
|
+
"""Record notification intents in the outbox for later processing.
|
|
34
|
+
|
|
35
|
+
This should be called immediately after the triggering event commits.
|
|
36
|
+
Each status change that has subscribers will get an outbox entry.
|
|
37
|
+
|
|
38
|
+
Args:
|
|
39
|
+
status_changes: List of bead status changes to notify about
|
|
40
|
+
project_id: Project UUID for tenant isolation
|
|
41
|
+
db_infra: Database infrastructure
|
|
42
|
+
|
|
43
|
+
Returns:
|
|
44
|
+
Number of outbox entries created
|
|
45
|
+
"""
|
|
46
|
+
from .routes.subscriptions import get_subscribers_for_bead
|
|
47
|
+
|
|
48
|
+
server_db = db_infra.get_manager("server")
|
|
49
|
+
entries_created = 0
|
|
50
|
+
|
|
51
|
+
for change in status_changes:
|
|
52
|
+
# Skip notifications for new issues (no old_status)
|
|
53
|
+
if change.old_status is None:
|
|
54
|
+
continue
|
|
55
|
+
|
|
56
|
+
# Get subscribers for this bead
|
|
57
|
+
subscribers = await get_subscribers_for_bead(
|
|
58
|
+
db_infra=db_infra,
|
|
59
|
+
project_id=project_id,
|
|
60
|
+
bead_id=change.bead_id,
|
|
61
|
+
event_type="status_change",
|
|
62
|
+
repo=change.repo,
|
|
63
|
+
)
|
|
64
|
+
|
|
65
|
+
if not subscribers:
|
|
66
|
+
continue
|
|
67
|
+
|
|
68
|
+
# Build notification payload
|
|
69
|
+
payload = {
|
|
70
|
+
"bead_id": change.bead_id,
|
|
71
|
+
"repo": change.repo,
|
|
72
|
+
"branch": change.branch,
|
|
73
|
+
"old_status": change.old_status,
|
|
74
|
+
"new_status": change.new_status,
|
|
75
|
+
"title": change.title,
|
|
76
|
+
}
|
|
77
|
+
|
|
78
|
+
# Create outbox entry for each subscriber
|
|
79
|
+
for sub in subscribers:
|
|
80
|
+
await server_db.execute(
|
|
81
|
+
"""
|
|
82
|
+
INSERT INTO {{tables.notification_outbox}}
|
|
83
|
+
(project_id, event_type, payload, recipient_workspace_id, recipient_alias)
|
|
84
|
+
VALUES ($1, $2, $3, $4, $5)
|
|
85
|
+
""",
|
|
86
|
+
project_id,
|
|
87
|
+
"bead_status_change",
|
|
88
|
+
json.dumps(payload),
|
|
89
|
+
sub["workspace_id"],
|
|
90
|
+
sub["alias"],
|
|
91
|
+
)
|
|
92
|
+
entries_created += 1
|
|
93
|
+
|
|
94
|
+
return entries_created
|
|
95
|
+
|
|
96
|
+
|
|
97
|
+
async def process_notification_outbox(
|
|
98
|
+
project_id: str,
|
|
99
|
+
db_infra: "DatabaseInfra",
|
|
100
|
+
*,
|
|
101
|
+
sender_agent_id: str,
|
|
102
|
+
sender_alias: str,
|
|
103
|
+
limit: int = 100,
|
|
104
|
+
) -> tuple[int, int]:
|
|
105
|
+
"""Process pending notifications from the outbox.
|
|
106
|
+
|
|
107
|
+
Fetches pending/failed entries, attempts to send each notification,
|
|
108
|
+
and updates the outbox entry with the result.
|
|
109
|
+
|
|
110
|
+
Args:
|
|
111
|
+
project_id: Project UUID to process notifications for
|
|
112
|
+
db_infra: Database infrastructure
|
|
113
|
+
redis: Redis client for message events
|
|
114
|
+
limit: Maximum number of entries to process in one batch
|
|
115
|
+
|
|
116
|
+
Returns:
|
|
117
|
+
Tuple of (sent_count, failed_count)
|
|
118
|
+
"""
|
|
119
|
+
server_db = db_infra.get_manager("server")
|
|
120
|
+
sent_count = 0
|
|
121
|
+
failed_count = 0
|
|
122
|
+
|
|
123
|
+
# Fetch pending entries (including failed entries under retry limit)
|
|
124
|
+
rows = await server_db.fetch_all(
|
|
125
|
+
"""
|
|
126
|
+
SELECT id, payload, recipient_workspace_id, recipient_alias, attempts
|
|
127
|
+
FROM {{tables.notification_outbox}}
|
|
128
|
+
WHERE project_id = $1
|
|
129
|
+
AND status IN ('pending', 'failed')
|
|
130
|
+
AND attempts < $2
|
|
131
|
+
ORDER BY created_at ASC
|
|
132
|
+
LIMIT $3
|
|
133
|
+
FOR UPDATE SKIP LOCKED
|
|
134
|
+
""",
|
|
135
|
+
project_id,
|
|
136
|
+
MAX_RETRY_ATTEMPTS,
|
|
137
|
+
limit,
|
|
138
|
+
)
|
|
139
|
+
|
|
140
|
+
for row in rows:
|
|
141
|
+
outbox_id = row["id"]
|
|
142
|
+
payload = row["payload"]
|
|
143
|
+
recipient_workspace_id = str(row["recipient_workspace_id"])
|
|
144
|
+
attempts = row["attempts"] + 1
|
|
145
|
+
|
|
146
|
+
if isinstance(payload, str):
|
|
147
|
+
payload = json.loads(payload)
|
|
148
|
+
|
|
149
|
+
# Mark as processing
|
|
150
|
+
await server_db.execute(
|
|
151
|
+
"""
|
|
152
|
+
UPDATE {{tables.notification_outbox}}
|
|
153
|
+
SET status = 'processing', attempts = $2
|
|
154
|
+
WHERE id = $1
|
|
155
|
+
""",
|
|
156
|
+
outbox_id,
|
|
157
|
+
attempts,
|
|
158
|
+
)
|
|
159
|
+
|
|
160
|
+
try:
|
|
161
|
+
# Skip sending to deleted/missing workspaces (subscriptions may outlive workspaces).
|
|
162
|
+
recipient_row = await server_db.fetch_one(
|
|
163
|
+
"""
|
|
164
|
+
SELECT deleted_at
|
|
165
|
+
FROM {{tables.workspaces}}
|
|
166
|
+
WHERE workspace_id = $1 AND project_id = $2
|
|
167
|
+
""",
|
|
168
|
+
UUID(recipient_workspace_id),
|
|
169
|
+
UUID(project_id),
|
|
170
|
+
)
|
|
171
|
+
if not recipient_row or recipient_row.get("deleted_at") is not None:
|
|
172
|
+
raise RuntimeError("Recipient workspace not found or deleted")
|
|
173
|
+
|
|
174
|
+
# Build notification message
|
|
175
|
+
bead_id = payload.get("bead_id", "unknown")
|
|
176
|
+
old_status = payload.get("old_status", "unknown")
|
|
177
|
+
new_status = payload.get("new_status", "unknown")
|
|
178
|
+
title = payload.get("title", "")
|
|
179
|
+
repo = payload.get("repo", "")
|
|
180
|
+
branch = payload.get("branch", "")
|
|
181
|
+
|
|
182
|
+
subject = f"Bead status changed: {bead_id}"
|
|
183
|
+
body = f"**{bead_id}** status changed from `{old_status}` to `{new_status}`\n\n"
|
|
184
|
+
if title:
|
|
185
|
+
body += f"Title: {title}\n"
|
|
186
|
+
if repo:
|
|
187
|
+
body += f"Repo: {repo}\n"
|
|
188
|
+
if branch:
|
|
189
|
+
body += f"Branch: {branch}\n"
|
|
190
|
+
|
|
191
|
+
# Generate deterministic thread ID for this bead
|
|
192
|
+
thread_uuid = uuid.uuid5(uuid.NAMESPACE_URL, f"bead:{bead_id}")
|
|
193
|
+
|
|
194
|
+
message_id, _created_at = await deliver_message(
|
|
195
|
+
db_infra,
|
|
196
|
+
project_id=project_id,
|
|
197
|
+
from_agent_id=sender_agent_id,
|
|
198
|
+
from_alias=sender_alias,
|
|
199
|
+
to_agent_id=recipient_workspace_id,
|
|
200
|
+
subject=subject,
|
|
201
|
+
body=body,
|
|
202
|
+
priority="normal",
|
|
203
|
+
thread_id=str(thread_uuid),
|
|
204
|
+
)
|
|
205
|
+
|
|
206
|
+
# Mark as completed
|
|
207
|
+
await server_db.execute(
|
|
208
|
+
"""
|
|
209
|
+
UPDATE {{tables.notification_outbox}}
|
|
210
|
+
SET status = 'completed',
|
|
211
|
+
processed_at = NOW(),
|
|
212
|
+
message_id = $2,
|
|
213
|
+
last_error = NULL
|
|
214
|
+
WHERE id = $1
|
|
215
|
+
""",
|
|
216
|
+
outbox_id,
|
|
217
|
+
message_id,
|
|
218
|
+
)
|
|
219
|
+
sent_count += 1
|
|
220
|
+
|
|
221
|
+
except Exception as e:
|
|
222
|
+
logger.exception(
|
|
223
|
+
"Failed to send notification for outbox entry %s (attempt %d)",
|
|
224
|
+
outbox_id,
|
|
225
|
+
attempts,
|
|
226
|
+
)
|
|
227
|
+
# Mark as failed - stays retriable until MAX_RETRY_ATTEMPTS exhausted
|
|
228
|
+
error_msg = str(e)[:500] # Truncate long errors
|
|
229
|
+
await server_db.execute(
|
|
230
|
+
"""
|
|
231
|
+
UPDATE {{tables.notification_outbox}}
|
|
232
|
+
SET status = 'failed',
|
|
233
|
+
last_error = $2
|
|
234
|
+
WHERE id = $1
|
|
235
|
+
""",
|
|
236
|
+
outbox_id,
|
|
237
|
+
error_msg,
|
|
238
|
+
)
|
|
239
|
+
failed_count += 1
|
|
240
|
+
|
|
241
|
+
return sent_count, failed_count
|
|
242
|
+
|
|
243
|
+
|
|
244
|
+
async def cleanup_old_notifications(
|
|
245
|
+
db_infra: "DatabaseInfra",
|
|
246
|
+
project_id: str,
|
|
247
|
+
days_old: int = 7,
|
|
248
|
+
) -> int:
|
|
249
|
+
"""Delete old completed notifications from the outbox.
|
|
250
|
+
|
|
251
|
+
Args:
|
|
252
|
+
db_infra: Database infrastructure
|
|
253
|
+
project_id: Project UUID for tenant isolation
|
|
254
|
+
days_old: Delete completed entries older than this many days
|
|
255
|
+
|
|
256
|
+
Returns:
|
|
257
|
+
Number of entries deleted
|
|
258
|
+
"""
|
|
259
|
+
server_db = db_infra.get_manager("server")
|
|
260
|
+
|
|
261
|
+
result = await server_db.fetch_value(
|
|
262
|
+
"""
|
|
263
|
+
WITH deleted AS (
|
|
264
|
+
DELETE FROM {{tables.notification_outbox}}
|
|
265
|
+
WHERE project_id = $1
|
|
266
|
+
AND status = 'completed'
|
|
267
|
+
AND processed_at < NOW() - INTERVAL '1 day' * $2
|
|
268
|
+
RETURNING id
|
|
269
|
+
)
|
|
270
|
+
SELECT COUNT(*) FROM deleted
|
|
271
|
+
""",
|
|
272
|
+
project_id,
|
|
273
|
+
days_old,
|
|
274
|
+
)
|
|
275
|
+
return int(result or 0)
|
beadhub/pagination.py
ADDED
|
@@ -0,0 +1,125 @@
|
|
|
1
|
+
"""Cursor-based pagination helpers for BeadHub API endpoints.
|
|
2
|
+
|
|
3
|
+
This module provides consistent pagination across all list endpoints:
|
|
4
|
+
- Cursor encoding/decoding for stateless pagination
|
|
5
|
+
- Standard response format with {items, has_more, next_cursor}
|
|
6
|
+
- Parameter validation with sensible defaults and limits
|
|
7
|
+
"""
|
|
8
|
+
|
|
9
|
+
from __future__ import annotations
|
|
10
|
+
|
|
11
|
+
import base64
|
|
12
|
+
import json
|
|
13
|
+
from typing import Any, Generic, Optional, TypeVar
|
|
14
|
+
|
|
15
|
+
from pydantic import BaseModel
|
|
16
|
+
|
|
17
|
+
# Pagination constants per spec
|
|
18
|
+
DEFAULT_LIMIT = 50
|
|
19
|
+
MAX_LIMIT = 200
|
|
20
|
+
MAX_CURSOR_SIZE_BYTES = 8192 # 8KB max cursor size to prevent DoS
|
|
21
|
+
|
|
22
|
+
T = TypeVar("T")
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
class PaginatedResponse(BaseModel, Generic[T]):
|
|
26
|
+
"""Standard paginated response format.
|
|
27
|
+
|
|
28
|
+
Attributes:
|
|
29
|
+
items: List of items for the current page
|
|
30
|
+
has_more: True if there are more items after this page
|
|
31
|
+
next_cursor: Opaque cursor string for fetching the next page, None if no more items
|
|
32
|
+
"""
|
|
33
|
+
|
|
34
|
+
items: list[T]
|
|
35
|
+
has_more: bool
|
|
36
|
+
next_cursor: Optional[str] = None
|
|
37
|
+
|
|
38
|
+
|
|
39
|
+
def encode_cursor(data: dict[str, Any]) -> str:
|
|
40
|
+
"""Encode pagination state as a URL-safe cursor string.
|
|
41
|
+
|
|
42
|
+
Args:
|
|
43
|
+
data: Dictionary containing pagination state (e.g., last id, timestamp)
|
|
44
|
+
|
|
45
|
+
Returns:
|
|
46
|
+
URL-safe base64 encoded string representing the pagination state
|
|
47
|
+
"""
|
|
48
|
+
json_bytes = json.dumps(data, separators=(",", ":")).encode("utf-8")
|
|
49
|
+
return base64.urlsafe_b64encode(json_bytes).decode("ascii").rstrip("=")
|
|
50
|
+
|
|
51
|
+
|
|
52
|
+
def decode_cursor(cursor: Optional[str]) -> Optional[dict[str, Any]]:
|
|
53
|
+
"""Decode a cursor string back to pagination state.
|
|
54
|
+
|
|
55
|
+
Args:
|
|
56
|
+
cursor: URL-safe base64 encoded cursor string, or None/empty
|
|
57
|
+
|
|
58
|
+
Returns:
|
|
59
|
+
Dictionary containing pagination state, or None if cursor was None/empty
|
|
60
|
+
|
|
61
|
+
Raises:
|
|
62
|
+
ValueError: If cursor is malformed (invalid base64 or JSON) or too large
|
|
63
|
+
"""
|
|
64
|
+
if cursor is None or cursor == "":
|
|
65
|
+
return None
|
|
66
|
+
|
|
67
|
+
# Validate size before decoding
|
|
68
|
+
if len(cursor) > MAX_CURSOR_SIZE_BYTES:
|
|
69
|
+
raise ValueError(f"Invalid cursor: exceeds maximum size of {MAX_CURSOR_SIZE_BYTES} bytes")
|
|
70
|
+
|
|
71
|
+
try:
|
|
72
|
+
# Over-pad to let base64 library handle any valid padding
|
|
73
|
+
json_bytes = base64.urlsafe_b64decode(cursor + "===")
|
|
74
|
+
except ValueError as e:
|
|
75
|
+
raise ValueError(f"Invalid cursor: malformed encoding ({e})") from e
|
|
76
|
+
|
|
77
|
+
# Validate decoded size
|
|
78
|
+
if len(json_bytes) > MAX_CURSOR_SIZE_BYTES:
|
|
79
|
+
raise ValueError("Invalid cursor: decoded data exceeds maximum size")
|
|
80
|
+
|
|
81
|
+
try:
|
|
82
|
+
data = json.loads(json_bytes.decode("utf-8"))
|
|
83
|
+
except json.JSONDecodeError as e:
|
|
84
|
+
raise ValueError(f"Invalid cursor: malformed data at position {e.pos}") from e
|
|
85
|
+
except UnicodeDecodeError:
|
|
86
|
+
raise ValueError("Invalid cursor: contains invalid UTF-8 data")
|
|
87
|
+
|
|
88
|
+
if not isinstance(data, dict):
|
|
89
|
+
raise ValueError("Invalid cursor: must decode to a dictionary")
|
|
90
|
+
|
|
91
|
+
return data
|
|
92
|
+
|
|
93
|
+
|
|
94
|
+
def validate_pagination_params(
|
|
95
|
+
limit: Optional[int],
|
|
96
|
+
cursor: Optional[str],
|
|
97
|
+
) -> tuple[int, Optional[dict[str, Any]]]:
|
|
98
|
+
"""Validate and normalize pagination parameters.
|
|
99
|
+
|
|
100
|
+
Args:
|
|
101
|
+
limit: Requested page size (will be clamped to valid range)
|
|
102
|
+
cursor: Opaque cursor string from previous response
|
|
103
|
+
|
|
104
|
+
Returns:
|
|
105
|
+
Tuple of (validated_limit, decoded_cursor_dict)
|
|
106
|
+
- limit is clamped to [1, MAX_LIMIT], defaults to DEFAULT_LIMIT
|
|
107
|
+
- cursor is decoded to dict, or None if not provided
|
|
108
|
+
|
|
109
|
+
Raises:
|
|
110
|
+
ValueError: If cursor is malformed
|
|
111
|
+
"""
|
|
112
|
+
# Validate and clamp limit
|
|
113
|
+
if limit is None:
|
|
114
|
+
validated_limit = DEFAULT_LIMIT
|
|
115
|
+
elif limit < 1:
|
|
116
|
+
validated_limit = 1
|
|
117
|
+
elif limit > MAX_LIMIT:
|
|
118
|
+
validated_limit = MAX_LIMIT
|
|
119
|
+
else:
|
|
120
|
+
validated_limit = limit
|
|
121
|
+
|
|
122
|
+
# Decode cursor (will raise ValueError if malformed)
|
|
123
|
+
decoded_cursor = decode_cursor(cursor)
|
|
124
|
+
|
|
125
|
+
return validated_limit, decoded_cursor
|