hypernote 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
hypernote/__init__.py ADDED
@@ -0,0 +1,58 @@
1
+ """Hypernote: server-owned notebook execution with actor attribution."""
2
+
3
+ __version__ = "0.1.0"
4
+
5
+ from hypernote.errors import (
6
+ CellNotFoundError,
7
+ ExecutionTimeoutError,
8
+ HypernoteError,
9
+ InputNotExpectedError,
10
+ NotebookNotFoundError,
11
+ RuntimeUnavailableError,
12
+ )
13
+ from hypernote.sdk import (
14
+ CellCollection,
15
+ CellHandle,
16
+ CellStatus,
17
+ CellType,
18
+ ChangeKind,
19
+ Job,
20
+ JobStatus,
21
+ Notebook,
22
+ NotebookStatus,
23
+ Runtime,
24
+ RuntimeStatus,
25
+ Snapshot,
26
+ connect,
27
+ )
28
+ from hypernote.server.extension import HypernoteExtension
29
+
30
+
31
+ def _jupyter_server_extension_points():
32
+ return [{"module": "hypernote", "app": HypernoteExtension}]
33
+
34
+
35
+ load_jupyter_server_extension = HypernoteExtension.load_classic_server_extension
36
+ _load_jupyter_server_extension = HypernoteExtension.load_classic_server_extension
37
+
38
+ __all__ = [
39
+ "connect",
40
+ "Notebook",
41
+ "CellCollection",
42
+ "CellHandle",
43
+ "Runtime",
44
+ "Job",
45
+ "Snapshot",
46
+ "NotebookStatus",
47
+ "CellStatus",
48
+ "CellType",
49
+ "RuntimeStatus",
50
+ "JobStatus",
51
+ "ChangeKind",
52
+ "HypernoteError",
53
+ "NotebookNotFoundError",
54
+ "CellNotFoundError",
55
+ "RuntimeUnavailableError",
56
+ "ExecutionTimeoutError",
57
+ "InputNotExpectedError",
58
+ ]
@@ -0,0 +1,371 @@
1
+ """Ephemeral ledger for job tracking and cell attribution metadata.
2
+
3
+ Hypernote treats runtimes as server-owned but notebook-scoped and ephemeral.
4
+ This ledger mirrors that model: it keeps recent job state and attribution in
5
+ memory, and callers can evict a notebook's state when its runtime is stopped.
6
+ """
7
+
8
+ from __future__ import annotations
9
+
10
+ import asyncio
11
+ import time
12
+ import uuid
13
+ from collections import defaultdict
14
+ from dataclasses import dataclass, field
15
+ from enum import Enum
16
+ from typing import Protocol
17
+
18
+
19
+ class JobStatus(str, Enum):
20
+ QUEUED = "queued"
21
+ RUNNING = "running"
22
+ AWAITING_INPUT = "awaiting_input"
23
+ SUCCEEDED = "succeeded"
24
+ FAILED = "failed"
25
+ INTERRUPTED = "interrupted"
26
+
27
+
28
+ class ActorType(str, Enum):
29
+ HUMAN = "human"
30
+ AGENT = "agent"
31
+
32
+
33
+ class JobAction(str, Enum):
34
+ EXECUTE = "execute"
35
+ INTERRUPT = "interrupt"
36
+ STDIN_REPLY = "stdin_reply"
37
+
38
+
39
+ @dataclass
40
+ class Job:
41
+ job_id: str
42
+ notebook_id: str
43
+ actor_id: str
44
+ actor_type: ActorType
45
+ action: JobAction
46
+ status: JobStatus
47
+ created_at: float
48
+ runtime_id: str | None = None
49
+ target_cells: str | None = None
50
+ request_uids: list[str] = field(default_factory=list)
51
+ started_at: float | None = None
52
+ completed_at: float | None = None
53
+
54
+
55
+ @dataclass
56
+ class CellAttribution:
57
+ notebook_id: str
58
+ cell_id: str
59
+ updated_at: float
60
+ last_editor_id: str | None = None
61
+ last_editor_type: str | None = None
62
+ last_executor_id: str | None = None
63
+ last_executor_type: str | None = None
64
+
65
+
66
+ class Ledger(Protocol):
67
+ async def initialize(self) -> None: ...
68
+
69
+ async def close(self) -> None: ...
70
+
71
+ async def create_job(
72
+ self,
73
+ notebook_id: str,
74
+ actor_id: str,
75
+ actor_type: ActorType,
76
+ action: JobAction,
77
+ target_cells: str | None = None,
78
+ runtime_id: str | None = None,
79
+ ) -> Job: ...
80
+
81
+ async def update_job_status(
82
+ self,
83
+ job_id: str,
84
+ status: JobStatus,
85
+ runtime_id: str | None = None,
86
+ ) -> None: ...
87
+
88
+ async def append_request_uid(self, job_id: str, request_uid: str) -> None: ...
89
+
90
+ async def get_job(self, job_id: str) -> Job | None: ...
91
+
92
+ async def list_jobs(
93
+ self,
94
+ notebook_id: str | None = None,
95
+ status: JobStatus | None = None,
96
+ limit: int = 100,
97
+ ) -> list[Job]: ...
98
+
99
+ async def list_active_jobs(self, notebook_id: str) -> list[Job]: ...
100
+
101
+ async def update_cell_attribution(
102
+ self,
103
+ notebook_id: str,
104
+ cell_id: str,
105
+ editor_id: str | None = None,
106
+ editor_type: ActorType | None = None,
107
+ executor_id: str | None = None,
108
+ executor_type: ActorType | None = None,
109
+ ) -> None: ...
110
+
111
+ async def get_cell_attribution(
112
+ self,
113
+ notebook_id: str,
114
+ cell_id: str,
115
+ ) -> CellAttribution | None: ...
116
+
117
+ async def list_cell_attributions(self, notebook_id: str) -> list[CellAttribution]: ...
118
+
119
+ async def evict_notebook(self, notebook_id: str) -> None: ...
120
+
121
+
122
+ @dataclass(frozen=True)
123
+ class MemoryLedgerPolicy:
124
+ max_completed_jobs_per_notebook: int = 20
125
+
126
+
127
+ class MemoryLedger:
128
+ """In-memory notebook-scoped ledger.
129
+
130
+ Jobs remain queryable while a notebook runtime is alive and retain a small
131
+ bounded recent history. Notebook eviction clears both jobs and attribution.
132
+ """
133
+
134
+ def __init__(self, policy: MemoryLedgerPolicy | None = None):
135
+ self._policy = policy or MemoryLedgerPolicy()
136
+ self._lock = asyncio.Lock()
137
+ self._jobs_by_id: dict[str, Job] = {}
138
+ self._job_ids_by_notebook: dict[str, list[str]] = defaultdict(list)
139
+ self._cell_attribution: dict[tuple[str, str], CellAttribution] = {}
140
+
141
+ async def initialize(self) -> None:
142
+ return None
143
+
144
+ async def close(self) -> None:
145
+ async with self._lock:
146
+ self._jobs_by_id.clear()
147
+ self._job_ids_by_notebook.clear()
148
+ self._cell_attribution.clear()
149
+
150
+ async def create_job(
151
+ self,
152
+ notebook_id: str,
153
+ actor_id: str,
154
+ actor_type: ActorType,
155
+ action: JobAction,
156
+ target_cells: str | None = None,
157
+ runtime_id: str | None = None,
158
+ ) -> Job:
159
+ async with self._lock:
160
+ job = Job(
161
+ job_id=uuid.uuid4().hex[:12],
162
+ notebook_id=notebook_id,
163
+ actor_id=actor_id,
164
+ actor_type=actor_type,
165
+ action=action,
166
+ status=JobStatus.QUEUED,
167
+ created_at=time.time(),
168
+ runtime_id=runtime_id,
169
+ target_cells=target_cells,
170
+ request_uids=[],
171
+ )
172
+ self._jobs_by_id[job.job_id] = job
173
+ self._job_ids_by_notebook[notebook_id].append(job.job_id)
174
+ self._prune_completed_jobs_locked(notebook_id)
175
+ return _copy_job(job)
176
+
177
+ async def update_job_status(
178
+ self,
179
+ job_id: str,
180
+ status: JobStatus,
181
+ runtime_id: str | None = None,
182
+ ) -> None:
183
+ async with self._lock:
184
+ job = self._jobs_by_id.get(job_id)
185
+ if job is None:
186
+ return
187
+
188
+ now = time.time()
189
+ job.status = status
190
+ if status == JobStatus.RUNNING and job.started_at is None:
191
+ job.started_at = now
192
+ if status in {
193
+ JobStatus.SUCCEEDED,
194
+ JobStatus.FAILED,
195
+ JobStatus.INTERRUPTED,
196
+ }:
197
+ job.completed_at = now
198
+ if runtime_id is not None:
199
+ job.runtime_id = runtime_id
200
+
201
+ self._prune_completed_jobs_locked(job.notebook_id)
202
+
203
+ async def append_request_uid(self, job_id: str, request_uid: str) -> None:
204
+ async with self._lock:
205
+ job = self._jobs_by_id.get(job_id)
206
+ if job is None:
207
+ raise ValueError(f"Job {job_id} not found")
208
+ job.request_uids.append(request_uid)
209
+
210
+ async def get_job(self, job_id: str) -> Job | None:
211
+ async with self._lock:
212
+ job = self._jobs_by_id.get(job_id)
213
+ return None if job is None else _copy_job(job)
214
+
215
+ async def list_jobs(
216
+ self,
217
+ notebook_id: str | None = None,
218
+ status: JobStatus | None = None,
219
+ limit: int = 100,
220
+ ) -> list[Job]:
221
+ async with self._lock:
222
+ jobs = self._iter_jobs_locked(notebook_id=notebook_id, status=status)
223
+ return [_copy_job(job) for job in jobs[:limit]]
224
+
225
+ async def list_active_jobs(self, notebook_id: str) -> list[Job]:
226
+ async with self._lock:
227
+ jobs = self._iter_jobs_locked(notebook_id=notebook_id)
228
+ active = [
229
+ job
230
+ for job in jobs
231
+ if job.status in {
232
+ JobStatus.QUEUED,
233
+ JobStatus.RUNNING,
234
+ JobStatus.AWAITING_INPUT,
235
+ }
236
+ ]
237
+ return [_copy_job(job) for job in active]
238
+
239
+ async def update_cell_attribution(
240
+ self,
241
+ notebook_id: str,
242
+ cell_id: str,
243
+ editor_id: str | None = None,
244
+ editor_type: ActorType | None = None,
245
+ executor_id: str | None = None,
246
+ executor_type: ActorType | None = None,
247
+ ) -> None:
248
+ async with self._lock:
249
+ key = (notebook_id, cell_id)
250
+ existing = self._cell_attribution.get(key)
251
+ now = time.time()
252
+ if existing is None:
253
+ existing = CellAttribution(
254
+ notebook_id=notebook_id,
255
+ cell_id=cell_id,
256
+ updated_at=now,
257
+ )
258
+ self._cell_attribution[key] = existing
259
+
260
+ if editor_id is not None:
261
+ existing.last_editor_id = editor_id
262
+ existing.last_editor_type = editor_type.value if editor_type else None
263
+ if executor_id is not None:
264
+ existing.last_executor_id = executor_id
265
+ existing.last_executor_type = executor_type.value if executor_type else None
266
+ existing.updated_at = now
267
+
268
+ async def get_cell_attribution(
269
+ self,
270
+ notebook_id: str,
271
+ cell_id: str,
272
+ ) -> CellAttribution | None:
273
+ async with self._lock:
274
+ attr = self._cell_attribution.get((notebook_id, cell_id))
275
+ return None if attr is None else _copy_attribution(attr)
276
+
277
+ async def list_cell_attributions(self, notebook_id: str) -> list[CellAttribution]:
278
+ async with self._lock:
279
+ attrs = [
280
+ attr
281
+ for attr in self._cell_attribution.values()
282
+ if attr.notebook_id == notebook_id
283
+ ]
284
+ attrs.sort(key=lambda attr: attr.updated_at, reverse=True)
285
+ return [_copy_attribution(attr) for attr in attrs]
286
+
287
+ async def evict_notebook(self, notebook_id: str) -> None:
288
+ async with self._lock:
289
+ for job_id in self._job_ids_by_notebook.pop(notebook_id, []):
290
+ self._jobs_by_id.pop(job_id, None)
291
+
292
+ stale_keys = [
293
+ key for key in self._cell_attribution if key[0] == notebook_id
294
+ ]
295
+ for key in stale_keys:
296
+ self._cell_attribution.pop(key, None)
297
+
298
+ def _iter_jobs_locked(
299
+ self,
300
+ *,
301
+ notebook_id: str | None = None,
302
+ status: JobStatus | None = None,
303
+ ) -> list[Job]:
304
+ if notebook_id is None:
305
+ jobs = list(self._jobs_by_id.values())
306
+ else:
307
+ jobs = [
308
+ self._jobs_by_id[job_id]
309
+ for job_id in self._job_ids_by_notebook.get(notebook_id, [])
310
+ if job_id in self._jobs_by_id
311
+ ]
312
+
313
+ if status is not None:
314
+ jobs = [job for job in jobs if job.status == status]
315
+
316
+ jobs.sort(key=lambda job: job.created_at, reverse=True)
317
+ return jobs
318
+
319
+ def _prune_completed_jobs_locked(self, notebook_id: str) -> None:
320
+ max_completed = self._policy.max_completed_jobs_per_notebook
321
+ if max_completed < 0:
322
+ return
323
+
324
+ completed_ids = [
325
+ job_id
326
+ for job_id in self._job_ids_by_notebook.get(notebook_id, [])
327
+ if self._jobs_by_id.get(job_id) is not None
328
+ and self._jobs_by_id[job_id].status
329
+ in {JobStatus.SUCCEEDED, JobStatus.FAILED, JobStatus.INTERRUPTED}
330
+ ]
331
+ overflow = len(completed_ids) - max_completed
332
+ if overflow <= 0:
333
+ return
334
+
335
+ stale_ids = set(completed_ids[:overflow])
336
+ self._job_ids_by_notebook[notebook_id] = [
337
+ job_id
338
+ for job_id in self._job_ids_by_notebook.get(notebook_id, [])
339
+ if job_id not in stale_ids
340
+ ]
341
+ for job_id in stale_ids:
342
+ self._jobs_by_id.pop(job_id, None)
343
+
344
+
345
+ def _copy_job(job: Job) -> Job:
346
+ return Job(
347
+ job_id=job.job_id,
348
+ notebook_id=job.notebook_id,
349
+ runtime_id=job.runtime_id,
350
+ actor_id=job.actor_id,
351
+ actor_type=job.actor_type,
352
+ action=job.action,
353
+ status=job.status,
354
+ created_at=job.created_at,
355
+ target_cells=job.target_cells,
356
+ request_uids=list(job.request_uids),
357
+ started_at=job.started_at,
358
+ completed_at=job.completed_at,
359
+ )
360
+
361
+
362
+ def _copy_attribution(attr: CellAttribution) -> CellAttribution:
363
+ return CellAttribution(
364
+ notebook_id=attr.notebook_id,
365
+ cell_id=attr.cell_id,
366
+ last_editor_id=attr.last_editor_id,
367
+ last_editor_type=attr.last_editor_type,
368
+ last_executor_id=attr.last_executor_id,
369
+ last_executor_type=attr.last_executor_type,
370
+ updated_at=attr.updated_at,
371
+ )
File without changes