beaver-db 2.0rc2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
beaver/locks.py ADDED
@@ -0,0 +1,186 @@
1
+ import asyncio
2
+ import random
3
+ import time
4
+ import os
5
+ import uuid
6
+ from typing import Optional, Protocol, runtime_checkable, TYPE_CHECKING
7
+
8
+ if TYPE_CHECKING:
9
+ from .core import AsyncBeaverDB
10
+
11
+
12
+ @runtime_checkable
13
+ class IBeaverLock[T: BaseModel](Protocol):
14
+ def acquire(
15
+ self,
16
+ timeout: float | None = None,
17
+ lock_ttl: float | None = None,
18
+ poll_interval: float | None = None,
19
+ block: bool = True,
20
+ ) -> bool: ...
21
+
22
+ def release(self) -> None: ...
23
+ def renew(self, lock_ttl: float | None = None) -> bool: ...
24
+ def clear(self) -> bool: ...
25
+ def __enter__(self) -> "IBeaverLock": ...
26
+ def __exit__(self, exc_type, exc_val, exc_tb) -> None: ...
27
+
28
+
29
+ class AsyncBeaverLock:
30
+ def __init__(
31
+ self,
32
+ db: "AsyncBeaverDB",
33
+ name: str,
34
+ timeout: Optional[float] = None,
35
+ lock_ttl: float = 60.0,
36
+ poll_interval: float = 0.1,
37
+ ):
38
+ if not isinstance(name, str) or not name:
39
+ raise ValueError("Lock name must be a non-empty string.")
40
+ self._db = db
41
+ self._lock_name = name
42
+ self._timeout = timeout
43
+ self._lock_ttl = lock_ttl
44
+ self._poll_interval = poll_interval
45
+ self._waiter_id = f"pid:{os.getpid()}:id:{uuid.uuid4()}"
46
+ self._acquired = False
47
+
48
+ async def renew(self, lock_ttl: Optional[float] = None) -> bool:
49
+ if not self._acquired:
50
+ return False
51
+
52
+ ttl = lock_ttl or self._lock_ttl
53
+ new_expires_at = time.time() + ttl
54
+
55
+ async with self._db.transaction():
56
+ cursor = await self._db.connection.execute(
57
+ "UPDATE __beaver_lock_waiters__ SET expires_at = ? WHERE lock_name = ? AND waiter_id = ?",
58
+ (new_expires_at, self._lock_name, self._waiter_id),
59
+ )
60
+ return cursor.rowcount > 0
61
+
62
+ async def clear(self) -> bool:
63
+ async with self._db.transaction():
64
+ cursor = await self._db.connection.execute(
65
+ "DELETE FROM __beaver_lock_waiters__ WHERE lock_name = ?",
66
+ (self._lock_name,),
67
+ )
68
+ count = cursor.rowcount
69
+
70
+ self._acquired = False
71
+ return count > 0
72
+
73
+ async def acquire(
74
+ self,
75
+ timeout: float | None = None,
76
+ lock_ttl: float | None = None,
77
+ poll_interval: float | None = None,
78
+ block: bool = True,
79
+ ) -> bool:
80
+ if self._acquired:
81
+ return True
82
+
83
+ current_timeout = timeout if timeout is not None else self._timeout
84
+ current_lock_ttl = lock_ttl if lock_ttl is not None else self._lock_ttl
85
+ current_poll_interval = (
86
+ poll_interval if poll_interval is not None else self._poll_interval
87
+ )
88
+
89
+ start_time = time.time()
90
+ requested_at = time.time()
91
+ expires_at = requested_at + current_lock_ttl
92
+
93
+ try:
94
+ # 1. Add self to the FIFO queue (Atomic via transaction() lock)
95
+ async with self._db.transaction():
96
+ await self._db.connection.execute(
97
+ """
98
+ INSERT INTO __beaver_lock_waiters__
99
+ (lock_name, waiter_id, requested_at, expires_at)
100
+ VALUES (?, ?, ?, ?)
101
+ """,
102
+ (self._lock_name, self._waiter_id, requested_at, expires_at),
103
+ )
104
+
105
+ # 2. Start Polling Loop
106
+ while True:
107
+ async with self._db.transaction():
108
+ # A. Clean up expired locks from crashed processes
109
+ now = time.time()
110
+ await self._db.connection.execute(
111
+ "DELETE FROM __beaver_lock_waiters__ WHERE lock_name = ? AND expires_at < ?",
112
+ (self._lock_name, now),
113
+ )
114
+
115
+ # B. Check who is at the front of the queue
116
+ cursor = await self._db.connection.execute(
117
+ """
118
+ SELECT waiter_id FROM __beaver_lock_waiters__
119
+ WHERE lock_name = ?
120
+ ORDER BY requested_at ASC, rowid ASC
121
+ LIMIT 1
122
+ """,
123
+ (self._lock_name,),
124
+ )
125
+ result = await cursor.fetchone()
126
+
127
+ # C. Sanity Check: Ensure we are still in the queue
128
+ check_self = await self._db.connection.execute(
129
+ "SELECT 1 FROM __beaver_lock_waiters__ WHERE waiter_id = ?",
130
+ (self._waiter_id,),
131
+ )
132
+ if not await check_self.fetchone():
133
+ return False # We were deleted (cleared or expired)
134
+
135
+ if result and result["waiter_id"] == self._waiter_id:
136
+ self._acquired = True
137
+ return True
138
+
139
+ # 3. Check for timeout or non-blocking return
140
+ elapsed = time.time() - start_time
141
+ if current_timeout is not None and elapsed > current_timeout:
142
+ await self._release_from_queue()
143
+ return False
144
+
145
+ if not block:
146
+ await self._release_from_queue()
147
+ return False
148
+
149
+ # 4. Wait safely
150
+ jitter = current_poll_interval * 0.1
151
+ sleep_time = random.uniform(
152
+ current_poll_interval - jitter, current_poll_interval + jitter
153
+ )
154
+ await asyncio.sleep(sleep_time)
155
+
156
+ except Exception:
157
+ await self._release_from_queue()
158
+ raise
159
+
160
+ async def _release_from_queue(self):
161
+ try:
162
+ async with self._db.transaction():
163
+ await self._db.connection.execute(
164
+ "DELETE FROM __beaver_lock_waiters__ WHERE lock_name = ? AND waiter_id = ?",
165
+ (self._lock_name, self._waiter_id),
166
+ )
167
+ except Exception:
168
+ pass
169
+
170
+ async def release(self):
171
+ if not self._acquired:
172
+ return
173
+
174
+ await self._release_from_queue()
175
+ self._acquired = False
176
+
177
+ async def __aenter__(self) -> "AsyncBeaverLock":
178
+ if await self.acquire():
179
+ return self
180
+ raise TimeoutError("Cannot acquire lock.")
181
+
182
+ async def __aexit__(self, exc_type, exc_val, exc_tb):
183
+ await self.release()
184
+
185
+ def __repr__(self) -> str:
186
+ return f"AsyncBeaverLock(name='{self._lock_name}', acquired={self._acquired})"
beaver/logs.py ADDED
@@ -0,0 +1,187 @@
1
+ import asyncio
2
+ import json
3
+ import time
4
+ import sqlite3
5
+ from typing import (
6
+ IO,
7
+ Iterator,
8
+ AsyncIterator,
9
+ Protocol,
10
+ runtime_checkable,
11
+ TYPE_CHECKING,
12
+ NamedTuple,
13
+ )
14
+
15
+ from pydantic import BaseModel
16
+
17
+ from .manager import AsyncBeaverBase, atomic, emits
18
+
19
+ if TYPE_CHECKING:
20
+ from .core import AsyncBeaverDB
21
+
22
+
23
+ class LogEntry[T](NamedTuple):
24
+ """A single log entry with timestamp and data."""
25
+
26
+ timestamp: float
27
+ data: T
28
+
29
+
30
+ @runtime_checkable
31
+ class IBeaverLog[T: BaseModel](Protocol):
32
+ """
33
+ The Synchronous Protocol exposed to the user via BeaverBridge.
34
+ """
35
+
36
+ def log(self, data: T, timestamp: float | None = None) -> None: ...
37
+ def range(
38
+ self,
39
+ start: float | None = None,
40
+ end: float | None = None,
41
+ limit: int | None = None,
42
+ ) -> list[LogEntry[T]]: ...
43
+
44
+ def live(self, poll_interval: float = 0.1) -> Iterator[LogEntry[T]]: ...
45
+
46
+ def clear(self) -> None: ...
47
+ def count(self) -> int: ...
48
+ def dump(self, fp: IO[str] | None = None) -> dict | None: ...
49
+
50
+
51
+ class AsyncBeaverLog[T: BaseModel](AsyncBeaverBase[T]):
52
+ """
53
+ A wrapper providing a Pythonic interface to a time-indexed log.
54
+ Refactored for Async-First architecture (v2.0).
55
+ """
56
+
57
+ @emits("log", payload=lambda data, *args, **kwargs: dict(data=data))
58
+ @atomic
59
+ async def log(self, data: T, timestamp: float | None = None):
60
+ """
61
+ Appends an entry to the log.
62
+ Ensures timestamp uniqueness (PK constraint) by micro-incrementing on collision.
63
+ """
64
+ ts = timestamp or time.time()
65
+ serialized_data = self._serialize(data)
66
+
67
+ # Retry loop to handle PK collisions (same microsecond)
68
+ while True:
69
+ try:
70
+ await self.connection.execute(
71
+ "INSERT INTO __beaver_logs__ (log_name, timestamp, data) VALUES (?, ?, ?)",
72
+ (self._name, ts, serialized_data),
73
+ )
74
+ break
75
+ except sqlite3.IntegrityError:
76
+ # Collision detected: shift by 1 microsecond and retry
77
+ ts += 0.000001
78
+
79
+ async def range(
80
+ self,
81
+ start: float | None = None,
82
+ end: float | None = None,
83
+ limit: int | None = None,
84
+ ) -> list[LogEntry[T]]:
85
+ """
86
+ Retrieves a list of log entries within a time range.
87
+ """
88
+ query = "SELECT timestamp, data FROM __beaver_logs__ WHERE log_name = ?"
89
+ params = [self._name]
90
+
91
+ if start is not None:
92
+ query += " AND timestamp >= ?"
93
+ params.append(start)
94
+
95
+ if end is not None:
96
+ query += " AND timestamp <= ?"
97
+ params.append(end)
98
+
99
+ query += " ORDER BY timestamp ASC"
100
+
101
+ if limit is not None:
102
+ query += " LIMIT ?"
103
+ params.append(limit)
104
+
105
+ cursor = await self.connection.execute(query, tuple(params))
106
+ rows = await cursor.fetchall()
107
+
108
+ return [
109
+ LogEntry(timestamp=row["timestamp"], data=self._deserialize(row["data"]))
110
+ for row in rows
111
+ ]
112
+
113
+ async def live(self, poll_interval: float = 0.1) -> AsyncIterator[LogEntry[T]]:
114
+ """
115
+ Yields new log entries as they are added in real-time.
116
+ This is an infinite async generator.
117
+ """
118
+ # Start trailing from "now"
119
+ last_ts = time.time()
120
+
121
+ while True:
122
+ # Poll for new items since last_ts
123
+ cursor = await self.connection.execute(
124
+ """
125
+ SELECT timestamp, data FROM __beaver_logs__
126
+ WHERE log_name = ? AND timestamp > ?
127
+ ORDER BY timestamp ASC
128
+ """,
129
+ (self._name, last_ts),
130
+ )
131
+ rows = await cursor.fetchall()
132
+
133
+ if rows:
134
+ last_ts = rows[-1]["timestamp"]
135
+ for row in rows:
136
+ yield LogEntry(
137
+ timestamp=row["timestamp"], data=self._deserialize(row["data"])
138
+ )
139
+
140
+ # Non-blocking sleep yields control to the event loop
141
+ await asyncio.sleep(poll_interval)
142
+
143
+ async def count(self) -> int:
144
+ """Returns the total number of entries in the log."""
145
+ cursor = await self.connection.execute(
146
+ "SELECT COUNT(*) FROM __beaver_logs__ WHERE log_name = ?", (self._name,)
147
+ )
148
+ row = await cursor.fetchone()
149
+ return row[0] if row else 0
150
+
151
+ @emits("clear", payload=lambda *args, **kwargs: dict())
152
+ @atomic
153
+ async def clear(self):
154
+ """Clears all entries in this log."""
155
+ await self.connection.execute(
156
+ "DELETE FROM __beaver_logs__ WHERE log_name = ?", (self._name,)
157
+ )
158
+
159
+ async def dump(self, fp: IO[str] | None = None) -> dict | None:
160
+ """
161
+ Dumps the entire log to a JSON-compatible object.
162
+ """
163
+ # Retrieve all items
164
+ entries = await self.range()
165
+
166
+ items_list = []
167
+ for entry in entries:
168
+ val = entry.data
169
+ if self._model and isinstance(val, BaseModel):
170
+ val = json.loads(val.model_dump_json())
171
+
172
+ items_list.append({"timestamp": entry.timestamp, "data": val})
173
+
174
+ dump_obj = {
175
+ "metadata": {
176
+ "type": "Log",
177
+ "name": self._name,
178
+ "count": len(items_list),
179
+ },
180
+ "items": items_list,
181
+ }
182
+
183
+ if fp:
184
+ json.dump(dump_obj, fp, indent=2)
185
+ return None
186
+
187
+ return dump_obj
beaver/manager.py ADDED
@@ -0,0 +1,203 @@
1
+ import json
2
+ import functools
3
+ import weakref
4
+ from typing import Callable, Type, Optional, Self, Any, TYPE_CHECKING
5
+
6
+ from pydantic import BaseModel
7
+
8
+ from .locks import AsyncBeaverLock
9
+
10
+ # Forward reference for type checking to avoid circular imports
11
+ if TYPE_CHECKING:
12
+ from .core import AsyncBeaverDB
13
+ from .cache import ICache
14
+ from .events import AsyncBeaverEvents, EventHandler
15
+
16
+
17
+ class AsyncBeaverBase[T: BaseModel]:
18
+ """
19
+ Base class for async data managers.
20
+ Handles serialization, locking, and basic connection access.
21
+ """
22
+
23
+ def __init__(self, name: str, db: "AsyncBeaverDB", model: Type[T] | None = None):
24
+ """
25
+ Initializes the base manager.
26
+ """
27
+ # Automatically determine the prefix from the child class name
28
+ cls_name = self.__class__.__name__
29
+ manager_type_prefix = cls_name.replace("AsyncBeaver", "").lower()
30
+
31
+ if not isinstance(name, str) or not name:
32
+ raise TypeError(
33
+ f"{manager_type_prefix.capitalize()} name must be a non-empty string."
34
+ )
35
+
36
+ self._name = name
37
+ self._db = db
38
+ self._model = model
39
+ self._topic = f"{manager_type_prefix}:{self._name}"
40
+
41
+ # Lazy-loaded event manager
42
+ self._event_manager: "AsyncBeaverEvents | None" = None
43
+
44
+ # Public lock for batch operations
45
+ public_lock_name = f"__lock__{manager_type_prefix}__{name}"
46
+ self._lock = AsyncBeaverLock(db, public_lock_name)
47
+
48
+ # Internal lock for atomic methods
49
+ internal_lock_name = f"__internal_lock__{manager_type_prefix}__{name}"
50
+ self._internal_lock = AsyncBeaverLock(
51
+ db,
52
+ internal_lock_name,
53
+ timeout=5.0, # Short timeout for internal operations
54
+ lock_ttl=5.0, # Short TTL to clear crashes
55
+ )
56
+
57
+ @property
58
+ def locked(self) -> bool:
59
+ """Returns whether the current manager is locked by this process."""
60
+ return self._lock._acquired
61
+
62
+ @property
63
+ def connection(self) -> Any:
64
+ """Returns the shared async connection."""
65
+ return self._db.connection
66
+
67
+ @property
68
+ def cache(self) -> "ICache":
69
+ """Returns the thread-local cache for this manager (Stub)."""
70
+ return self._db.cache(self._topic)
71
+
72
+ @property
73
+ def events(self) -> "AsyncBeaverEvents":
74
+ """
75
+ Returns the Event Manager attached to this data structure.
76
+ Lazy-loaded to avoid circular imports during init.
77
+ """
78
+ if self._event_manager is None:
79
+ # Import here to avoid circular dependency loop
80
+ from .events import AsyncBeaverEvents
81
+
82
+ # We create an event manager scoped to this manager's unique topic name
83
+ # This ensures events like "set" are unique to THIS dictionary instance.
84
+ # We use the same model T so event payloads are typed correctly if applicable.
85
+ self._event_manager = AsyncBeaverEvents(
86
+ name=self._topic, db=self._db, model=self._model
87
+ )
88
+
89
+ return self._event_manager
90
+
91
+ def _serialize(self, value: T) -> str:
92
+ """Serializes the given value to a JSON string (Sync CPU bound)."""
93
+ if isinstance(value, BaseModel):
94
+ return value.model_dump_json()
95
+ return json.dumps(value)
96
+
97
+ def _deserialize(self, value: str) -> T:
98
+ """Deserializes a JSON string (Sync CPU bound)."""
99
+ if self._model:
100
+ return self._model.model_validate_json(value)
101
+ return json.loads(value)
102
+
103
+ # --- Public Lock Interface ---
104
+
105
+ async def acquire(
106
+ self,
107
+ timeout: Optional[float] = None,
108
+ lock_ttl: Optional[float] = None,
109
+ poll_interval: Optional[float] = None,
110
+ block: bool = True,
111
+ ) -> bool:
112
+ """Acquires the public inter-process lock on this manager."""
113
+ return await self._lock.acquire(
114
+ timeout=timeout,
115
+ lock_ttl=lock_ttl,
116
+ poll_interval=poll_interval,
117
+ block=block,
118
+ )
119
+
120
+ async def release(self):
121
+ """Releases the public inter-process lock on this manager."""
122
+ await self._lock.release()
123
+
124
+ async def renew(self, lock_ttl: Optional[float] = None) -> bool:
125
+ """Renews the TTL (heartbeat) of the public lock."""
126
+ return await self._lock.renew(lock_ttl)
127
+
128
+ async def __aenter__(self) -> Self:
129
+ """Async Context Manager for public locking."""
130
+ if await self.acquire():
131
+ return self
132
+ raise TimeoutError(f"Failed to acquire public lock for '{self._name}'")
133
+
134
+ async def __aexit__(self, exc_type, exc_val, exc_tb):
135
+ await self.release()
136
+
137
+ # --- Events Portal ---
138
+
139
+ async def on(self, event: str, callback: Callable) -> "EventHandler":
140
+ """
141
+ Subscribes to an event on this manager (e.g. "set", "push").
142
+ This is a convenience wrapper around self.events.attach().
143
+ """
144
+ return await self.events.attach(event, callback)
145
+
146
+ async def off(self, event: str, callback: Callable):
147
+ """
148
+ Unsubscribes from an event.
149
+ Convenience wrapper around self.events.detach().
150
+ """
151
+ await self.events.detach(event, callback)
152
+
153
+
154
+ def atomic(func):
155
+ """
156
+ A decorator to wrap a manager method in the manager's *internal* lock
157
+ AND a database transaction.
158
+ """
159
+
160
+ @functools.wraps(func)
161
+ async def wrapper(self: AsyncBeaverBase, *args, **kwargs):
162
+ async with self._internal_lock:
163
+ async with self._db.transaction():
164
+ return await func(self, *args, **kwargs)
165
+
166
+ return wrapper
167
+
168
+
169
+ def emits(event: str | None = None, payload: Callable | None = None):
170
+ """
171
+ A decorator to emit an event after a manager method completes.
172
+ Uses the manager's attached Event Bus.
173
+ """
174
+
175
+ def decorator(func):
176
+ event_name = event or func.__name__
177
+ payload_func = payload or (lambda *args, **kwargs: dict(args=args, **kwargs))
178
+
179
+ @functools.wraps(func)
180
+ async def wrapper(self: AsyncBeaverBase, *args, **kwargs):
181
+ # Calculate payload BEFORE mutation (to capture args)
182
+ # or AFTER? Usually we want the *result* or the *input*.
183
+ # The current lambda often uses args.
184
+
185
+ # Execute the actual async operation
186
+ result = await func(self, *args, **kwargs)
187
+
188
+ # PERFORMANCE FIX: Only emit if the event manager has been initialized.
189
+ # This prevents starting the background polling loop for every manager
190
+ # unless the user has explicitly attached a listener (which inits the manager).
191
+ if self._event_manager is not None:
192
+ try:
193
+ payload_data = payload_func(*args, **kwargs)
194
+ # We await it to ensure the event is persisted to the log before returning
195
+ await self._event_manager.emit(event_name, payload_data)
196
+ except Exception:
197
+ pass
198
+
199
+ return result
200
+
201
+ return wrapper
202
+
203
+ return decorator
beaver/queries.py ADDED
@@ -0,0 +1,66 @@
1
+ from dataclasses import dataclass
2
+ from typing import Any, cast, overload
3
+
4
+
5
+ @dataclass
6
+ class Filter:
7
+ path: str
8
+ operator: str
9
+ value: Any
10
+
11
+
12
+ class Query[T]:
13
+ def __init__(self, model: type[T] | None = None, path: str = "") -> None:
14
+ self._model = model
15
+ self._path = path
16
+
17
+ def __getattr__(self, name: str):
18
+ new_path = f"{self._path}.{name}" if self._path else name
19
+ return Query(self._model, new_path)
20
+
21
+ # --- Standard Operators ---
22
+
23
+ def __eq__(self, other) -> Filter: # type: ignore
24
+ return Filter(self._path, "==", other)
25
+
26
+ def __ne__(self, other) -> Filter: # type: ignore
27
+ return Filter(self._path, "!=", other)
28
+
29
+ # --- Arithmetic Operators
30
+
31
+ def __gt__(self, other) -> Filter:
32
+ return Filter(self._path, ">", other)
33
+
34
+ def __ge__(self, other) -> Filter:
35
+ return Filter(self._path, ">=", other)
36
+
37
+ def __lt__(self, other) -> Filter:
38
+ return Filter(self._path, "<", other)
39
+
40
+ def __le__(self, other) -> Filter:
41
+ return Filter(self._path, "<=", other)
42
+
43
+
44
+ @overload
45
+ def q[T](model_or_path: type[T]) -> T:
46
+ pass
47
+
48
+
49
+ @overload
50
+ def q(model_or_path: str) -> Query:
51
+ pass
52
+
53
+
54
+ @overload
55
+ def q() -> Query:
56
+ pass
57
+
58
+
59
+ def q(model_or_path: type | str | None = None) -> Any:
60
+ if isinstance(model_or_path, type):
61
+ return Query(model=model_or_path)
62
+
63
+ if isinstance(model_or_path, str):
64
+ return Query(path=model_or_path)
65
+
66
+ return Query()