graphrefly 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (47) hide show
  1. graphrefly/__init__.py +160 -0
  2. graphrefly/compat/__init__.py +18 -0
  3. graphrefly/compat/async_utils.py +228 -0
  4. graphrefly/compat/asyncio_runner.py +89 -0
  5. graphrefly/compat/trio_runner.py +81 -0
  6. graphrefly/core/__init__.py +142 -0
  7. graphrefly/core/clock.py +20 -0
  8. graphrefly/core/dynamic_node.py +749 -0
  9. graphrefly/core/guard.py +277 -0
  10. graphrefly/core/meta.py +149 -0
  11. graphrefly/core/node.py +963 -0
  12. graphrefly/core/protocol.py +460 -0
  13. graphrefly/core/runner.py +107 -0
  14. graphrefly/core/subgraph_locks.py +296 -0
  15. graphrefly/core/sugar.py +138 -0
  16. graphrefly/core/versioning.py +193 -0
  17. graphrefly/extra/__init__.py +313 -0
  18. graphrefly/extra/adapters.py +2149 -0
  19. graphrefly/extra/backoff.py +287 -0
  20. graphrefly/extra/backpressure.py +113 -0
  21. graphrefly/extra/checkpoint.py +307 -0
  22. graphrefly/extra/composite.py +303 -0
  23. graphrefly/extra/cron.py +133 -0
  24. graphrefly/extra/data_structures.py +707 -0
  25. graphrefly/extra/resilience.py +727 -0
  26. graphrefly/extra/sources.py +766 -0
  27. graphrefly/extra/tier1.py +1067 -0
  28. graphrefly/extra/tier2.py +1802 -0
  29. graphrefly/graph/__init__.py +31 -0
  30. graphrefly/graph/graph.py +2249 -0
  31. graphrefly/integrations/__init__.py +1 -0
  32. graphrefly/integrations/fastapi.py +767 -0
  33. graphrefly/patterns/__init__.py +5 -0
  34. graphrefly/patterns/ai.py +2132 -0
  35. graphrefly/patterns/cqrs.py +515 -0
  36. graphrefly/patterns/memory.py +639 -0
  37. graphrefly/patterns/messaging.py +553 -0
  38. graphrefly/patterns/orchestration.py +536 -0
  39. graphrefly/patterns/reactive_layout/__init__.py +81 -0
  40. graphrefly/patterns/reactive_layout/measurement_adapters.py +276 -0
  41. graphrefly/patterns/reactive_layout/reactive_block_layout.py +434 -0
  42. graphrefly/patterns/reactive_layout/reactive_layout.py +943 -0
  43. graphrefly/py.typed +1 -0
  44. graphrefly-0.1.0.dist-info/METADATA +253 -0
  45. graphrefly-0.1.0.dist-info/RECORD +47 -0
  46. graphrefly-0.1.0.dist-info/WHEEL +4 -0
  47. graphrefly-0.1.0.dist-info/licenses/LICENSE +21 -0
@@ -0,0 +1,963 @@
1
+ """GraphReFly ``node`` primitive — aligned with graphrefly-ts ``src/core/node.ts``."""
2
+
3
+ from __future__ import annotations
4
+
5
+ import operator
6
+ import threading
7
+ from collections.abc import Callable, Mapping, Sequence
8
+ from contextlib import suppress
9
+ from functools import partial
10
+ from types import MappingProxyType
11
+ from typing import Any, cast
12
+
13
+ from graphrefly.core.guard import (
14
+ Actor,
15
+ GuardAction,
16
+ GuardDenied,
17
+ normalize_actor,
18
+ record_mutation,
19
+ )
20
+ from graphrefly.core.protocol import Messages, MessageType, emit_with_batch, propagates_to_meta
21
+ from graphrefly.core.subgraph_locks import (
22
+ acquire_subgraph_write_lock_with_defer,
23
+ ensure_registered,
24
+ union_nodes,
25
+ )
26
+ from graphrefly.core.versioning import (
27
+ HashFn,
28
+ NodeVersionInfo,
29
+ VersioningLevel,
30
+ advance_version,
31
+ create_versioning,
32
+ default_hash,
33
+ )
34
+
35
+ # --- Status & typing (graphrefly-ts node.ts) ---------------------------------
36
+
37
+ type NodeStatus = str # structural: same strings as TS NodeStatus
38
+
39
+ # --- BitSet: Python int bitmask (unlimited precision; TS uses int + Uint32Array) ---
40
+
41
+
42
+ class _BitSet:
43
+ __slots__ = ("_bits", "_width")
44
+
45
+ def __init__(self, width: int) -> None:
46
+ self._width = width
47
+ self._bits = 0
48
+
49
+ def set(self, index: int) -> None:
50
+ self._bits |= 1 << index
51
+
52
+ def clear(self, index: int) -> None:
53
+ self._bits &= ~(1 << index)
54
+
55
+ def has(self, index: int) -> bool:
56
+ return bool(self._bits & (1 << index))
57
+
58
+ def covers(self, other: _BitSet) -> bool:
59
+ ob = other._bits
60
+ return (self._bits & ob) == ob
61
+
62
+ def any(self) -> bool:
63
+ return self._bits != 0
64
+
65
+ def reset(self) -> None:
66
+ self._bits = 0
67
+
68
+
69
+ def _create_bit_set(size: int) -> _BitSet:
70
+ return _BitSet(size)
71
+
72
+
73
+ def _status_after_message(status: NodeStatus, msg: Message) -> NodeStatus:
74
+ t = msg[0]
75
+ if t is MessageType.DIRTY:
76
+ return "dirty"
77
+ if t is MessageType.DATA:
78
+ return "settled"
79
+ if t is MessageType.RESOLVED:
80
+ return "resolved"
81
+ if t is MessageType.COMPLETE:
82
+ return "completed"
83
+ if t is MessageType.ERROR:
84
+ return "errored"
85
+ if t is MessageType.INVALIDATE:
86
+ return "dirty"
87
+ if t is MessageType.TEARDOWN:
88
+ return "disconnected"
89
+ return status
90
+
91
+
92
+ # Open wire set: first element may be MessageType or any hashable tag (forward compat).
93
+ type Message = tuple[Any, Any] | tuple[Any]
94
+
95
+
96
+ def _is_cleanup_fn(value: object) -> bool:
97
+ """Matches TS ``typeof out === 'function'`` (cleanup vs emitted value)."""
98
+ return callable(value)
99
+
100
+
101
+ def _is_node_sequence(value: object) -> bool:
102
+ if not isinstance(value, (list, tuple)):
103
+ return False
104
+ if len(value) == 0:
105
+ return True
106
+ return callable(getattr(value[0], "subscribe", None))
107
+
108
+
109
+ def _is_node_options(value: object) -> bool:
110
+ return isinstance(value, dict) or (
111
+ value is not None
112
+ and not callable(value)
113
+ and not isinstance(value, (list, tuple))
114
+ and hasattr(value, "keys")
115
+ )
116
+
117
+
118
+ def _as_options_dict(value: object) -> dict[str, Any]:
119
+ if isinstance(value, dict):
120
+ return dict(value)
121
+ if isinstance(value, Mapping):
122
+ return dict(value)
123
+ o: Any = value
124
+ return {k: getattr(o, k) for k in o}
125
+
126
+
127
+ class NodeActions:
128
+ """Imperative ``actions`` object passed to the node compute function."""
129
+
130
+ __slots__ = ("_down", "_emit", "_up")
131
+
132
+ def __init__(
133
+ self,
134
+ down: Callable[[Messages], None],
135
+ emit: Callable[[Any], None],
136
+ up: Callable[[Messages], None],
137
+ ) -> None:
138
+ self._down = down
139
+ self._emit = emit
140
+ self._up = up
141
+
142
+ def down(self, messages: Messages) -> None:
143
+ self._down(messages)
144
+
145
+ def emit(self, value: Any) -> None:
146
+ self._emit(value)
147
+
148
+ def up(self, messages: Messages) -> None:
149
+ self._up(messages)
150
+
151
+
152
+ type NodeFn = Callable[[list[Any], NodeActions], Any]
153
+
154
+
155
+ class SubscribeHints:
156
+ """Hints passed to :meth:`~graphrefly.core.node.NodeImpl.subscribe` to enable optimizations.
157
+
158
+ Args:
159
+ single_dep: When ``True``, the subscribing node has exactly one dependency,
160
+ enabling the single-dep fast path that skips redundant ``DIRTY`` messages.
161
+
162
+ Example:
163
+ ```python
164
+ from graphrefly import state
165
+ from graphrefly.core.node import SubscribeHints
166
+ x = state(1)
167
+ hints = SubscribeHints(single_dep=True)
168
+ unsub = x.subscribe(lambda msgs: None, hints)
169
+ ```
170
+ """
171
+
172
+ __slots__ = ("single_dep",)
173
+
174
+ def __init__(self, *, single_dep: bool = False) -> None:
175
+ self.single_dep = single_dep
176
+
177
+
178
+ class NodeImpl[T]:
179
+ """Internal implementation — use :func:`node` factory."""
180
+
181
+ __slots__ = (
182
+ "__weakref__",
183
+ "_actions",
184
+ "_all_deps_complete_mask",
185
+ "_auto_complete",
186
+ "_cache_lock",
187
+ "_cached",
188
+ "_cleanup",
189
+ "_connected",
190
+ "_connecting",
191
+ "_dep_complete_mask",
192
+ "_dep_dirty_mask",
193
+ "_dep_settled_mask",
194
+ "_deps",
195
+ "_describe_kind",
196
+ "_equals",
197
+ "_fn",
198
+ "_guard",
199
+ "_has_deps",
200
+ "_last_dep_values",
201
+ "_last_mutation",
202
+ "_manual_emit_used",
203
+ "_meta",
204
+ "_name",
205
+ "_on_message",
206
+ "_opts",
207
+ "_producer_started",
208
+ "_resubscribable",
209
+ "_reset_on_teardown",
210
+ "_sink_count",
211
+ "_single_dep_sink_count",
212
+ "_single_dep_sinks",
213
+ "_sinks",
214
+ "_status",
215
+ "_terminal",
216
+ "_thread_safe",
217
+ "_upstream_unsubs",
218
+ "_inspector_hook",
219
+ "_versioning",
220
+ "_hash_fn",
221
+ )
222
+
223
+ def __init__(
224
+ self,
225
+ deps: list[NodeImpl[Any]],
226
+ fn: NodeFn | None,
227
+ opts: dict[str, Any],
228
+ ) -> None:
229
+ self._opts = opts
230
+ self._name: str | None = opts.get("name")
231
+ self._describe_kind: str | None = opts.get("describe_kind")
232
+ self._equals: Callable[[Any, Any], bool] = opts.get("equals", operator.is_)
233
+ self._resubscribable: bool = bool(opts.get("resubscribable", False))
234
+ self._reset_on_teardown: bool = bool(opts.get("reset_on_teardown", False))
235
+ self._auto_complete: bool = bool(opts.get("complete_when_deps_complete", True))
236
+ self._thread_safe: bool = bool(opts.get("thread_safe", True))
237
+
238
+ self._on_message = opts.get("on_message")
239
+ self._fn = fn
240
+ self._deps = deps
241
+ self._has_deps = len(deps) > 0
242
+
243
+ raw_guard = opts.get("guard")
244
+ if raw_guard is not None and not callable(raw_guard):
245
+ msg = "node option 'guard' must be callable or None"
246
+ raise TypeError(msg)
247
+ self._guard: Callable[[Actor, GuardAction], bool] | None = raw_guard
248
+ self._last_mutation: dict[str, Any] | None = None
249
+
250
+ self._cache_lock = threading.Lock() if self._thread_safe else None
251
+ self._cached: T | None = opts.get("initial")
252
+ self._status: NodeStatus = "disconnected" if self._has_deps else "settled"
253
+
254
+ # Versioning (GRAPHREFLY-SPEC §7)
255
+ versioning_level: VersioningLevel | None = opts.get("versioning")
256
+ self._hash_fn: HashFn = opts.get("versioning_hash", default_hash)
257
+ self._versioning: NodeVersionInfo | None = (
258
+ create_versioning(
259
+ versioning_level,
260
+ self._cached,
261
+ id=opts.get("versioning_id"),
262
+ hash_fn=self._hash_fn,
263
+ )
264
+ if versioning_level is not None
265
+ else None
266
+ )
267
+
268
+ self._terminal = False
269
+ self._connected = False
270
+ self._connecting = False
271
+ self._producer_started = False
272
+
273
+ self._dep_dirty_mask = _create_bit_set(len(deps))
274
+ self._dep_settled_mask = _create_bit_set(len(deps))
275
+ self._dep_complete_mask = _create_bit_set(len(deps))
276
+ self._all_deps_complete_mask = _create_bit_set(len(deps))
277
+ for i in range(len(deps)):
278
+ self._all_deps_complete_mask.set(i)
279
+
280
+ self._last_dep_values: list[Any] | None = None
281
+ self._cleanup: Callable[[], None] | None = None
282
+ self._manual_emit_used = False
283
+
284
+ self._sinks: Callable[[Messages], None] | set[Callable[[Messages], None]] | None = None
285
+ self._sink_count = 0
286
+ self._single_dep_sink_count = 0
287
+ self._single_dep_sinks: set[Callable[[Messages], None]] = set()
288
+ self._upstream_unsubs: list[Callable[[], None]] = []
289
+ self._inspector_hook: Callable[[dict[str, Any]], None] | None = None
290
+
291
+ self._meta: dict[str, NodeImpl[Any]] = {}
292
+ for k, v in (opts.get("meta") or {}).items():
293
+ meta_name = f"{self._name or 'node'}:meta:{k}"
294
+ meta_opts: dict[str, Any] = {
295
+ "initial": v,
296
+ "name": meta_name,
297
+ "thread_safe": self._thread_safe,
298
+ }
299
+ if self._guard is not None:
300
+ meta_opts["guard"] = self._guard
301
+ self._meta[k] = node(**meta_opts)
302
+
303
+ if self._thread_safe:
304
+ ensure_registered(self)
305
+ for d in self._deps:
306
+ union_nodes(self, d)
307
+ for meta_node in self._meta.values():
308
+ union_nodes(self, meta_node)
309
+
310
+ self._actions = NodeActions(
311
+ down=lambda msgs: self._manual_down(msgs),
312
+ emit=lambda v: self._manual_emit(v),
313
+ up=lambda msgs: self.up(msgs, internal=True),
314
+ )
315
+
316
+ def _set_inspector_hook(
317
+ self, hook: Callable[[dict[str, Any]], None] | None
318
+ ) -> Callable[[], None]:
319
+ """Internal inspector hook attach/detach for graph observability."""
320
+ prev = self._inspector_hook
321
+ self._inspector_hook = hook
322
+
323
+ def dispose() -> None:
324
+ if self._inspector_hook is hook:
325
+ self._inspector_hook = prev
326
+
327
+ return dispose
328
+
329
+ # --- Private methods (promoted from closures) ---
330
+
331
+ def _manual_down(self, messages: Messages) -> None:
332
+ self._manual_emit_used = True
333
+ self.down(messages, internal=True)
334
+
335
+ def _manual_emit(self, value: Any) -> None:
336
+ self._manual_emit_used = True
337
+ self._emit_auto_value(value)
338
+
339
+ def _emit_to_sinks(self, msgs: Messages) -> None:
340
+ if self._sinks is None:
341
+ return
342
+ if isinstance(self._sinks, set):
343
+ # Snapshot: a sink callback may unsubscribe itself or others mid-iteration.
344
+ # Iterating the live set would raise RuntimeError on mutation.
345
+ snapshot = list(self._sinks)
346
+ for s in snapshot:
347
+ s(msgs)
348
+ else:
349
+ self._sinks(msgs)
350
+
351
+ def _handle_local_lifecycle(self, messages: Messages) -> None:
352
+ lock = self._cache_lock
353
+ for m in messages:
354
+ t = m[0]
355
+ if t is MessageType.DATA:
356
+ if lock is not None:
357
+ with lock:
358
+ self._cached = m[1] # type: ignore[misc]
359
+ else:
360
+ self._cached = m[1] # type: ignore[misc]
361
+ if self._versioning is not None:
362
+ advance_version(self._versioning, m[1], self._hash_fn)
363
+ if t is MessageType.INVALIDATE:
364
+ # GRAPHREFLY-SPEC §1.2: clear cached state; do not auto-emit from here.
365
+ if self._cleanup is not None:
366
+ cb = self._cleanup
367
+ self._cleanup = None
368
+ cb()
369
+ if lock is not None:
370
+ with lock:
371
+ self._cached = None
372
+ else:
373
+ self._cached = None
374
+ self._last_dep_values = None
375
+ self._status = _status_after_message(self._status, m)
376
+ if t is MessageType.COMPLETE or t is MessageType.ERROR:
377
+ self._terminal = True
378
+ if t is MessageType.TEARDOWN:
379
+ if self._reset_on_teardown:
380
+ if lock is not None:
381
+ with lock:
382
+ self._cached = None
383
+ else:
384
+ self._cached = None
385
+ # Invoke cleanup for compute nodes (deps+fn) — spec §2.4
386
+ if self._cleanup is not None:
387
+ cb = self._cleanup
388
+ self._cleanup = None
389
+ cb()
390
+ try:
391
+ self._propagate_to_meta(t)
392
+ finally:
393
+ self._disconnect_upstream()
394
+ self._stop_producer()
395
+ # Propagate other meta-eligible signals (centralized in protocol.py).
396
+ if t is not MessageType.TEARDOWN and propagates_to_meta(t):
397
+ self._propagate_to_meta(t)
398
+
399
+ def _propagate_to_meta(self, t: MessageType) -> None:
400
+ """Propagate a signal to all companion meta nodes (best-effort)."""
401
+ for meta_node in self._meta.values():
402
+ with suppress(Exception):
403
+ meta_node.down([(t,)], internal=True)
404
+
405
+ def _can_skip_dirty(self) -> bool:
406
+ return self._sink_count == 1 and self._single_dep_sink_count == 1
407
+
408
+ def _emit_auto_value(self, value: Any) -> None:
409
+ # Note: the read-compare-write on _cached looks like a TOCTOU race, but
410
+ # callers always hold the subgraph RLock (via _run_fn or down), which
411
+ # serializes all writes. _cache_lock only guards get() reads from outside.
412
+ was_dirty = self._status == "dirty"
413
+ lock = self._cache_lock
414
+ if lock is not None:
415
+ with lock:
416
+ cached_snapshot = self._cached
417
+ else:
418
+ cached_snapshot = self._cached
419
+ unchanged = self._equals(cached_snapshot, value)
420
+ if unchanged:
421
+ if was_dirty:
422
+ self.down([(MessageType.RESOLVED,)], internal=True)
423
+ else:
424
+ self.down([(MessageType.DIRTY,), (MessageType.RESOLVED,)], internal=True)
425
+ return
426
+ if lock is not None:
427
+ with lock:
428
+ self._cached = cast("T", value)
429
+ else:
430
+ self._cached = cast("T", value)
431
+ if was_dirty:
432
+ self.down([(MessageType.DATA, value)], internal=True)
433
+ else:
434
+ self.down([(MessageType.DIRTY,), (MessageType.DATA, value)], internal=True)
435
+
436
+ def _run_fn_body(self) -> None:
437
+ if self._terminal and not self._resubscribable:
438
+ return
439
+
440
+ try:
441
+ dep_values = [d.get() for d in self._deps]
442
+ # Identity check BEFORE cleanup: if all dep values are unchanged,
443
+ # skip cleanup+fn entirely so effect nodes don't teardown/restart on no-op.
444
+ prev = self._last_dep_values
445
+ n = len(dep_values)
446
+ if (
447
+ n > 0
448
+ and prev is not None
449
+ and len(prev) == n
450
+ and all(dep_values[i] is prev[i] for i in range(n))
451
+ ):
452
+ if self._status == "dirty":
453
+ self.down([(MessageType.RESOLVED,)], internal=True)
454
+ return
455
+ if self._cleanup is not None:
456
+ cb = self._cleanup
457
+ self._cleanup = None
458
+ cb()
459
+ self._manual_emit_used = False
460
+ self._last_dep_values = dep_values
461
+ if self._inspector_hook is not None:
462
+ self._inspector_hook({"kind": "run", "dep_values": dep_values})
463
+ out = self._fn(dep_values, self._actions) # type: ignore[misc]
464
+ if _is_cleanup_fn(out):
465
+ self._cleanup = out
466
+ return
467
+ if self._manual_emit_used:
468
+ return
469
+ if out is None:
470
+ return
471
+ self._emit_auto_value(out)
472
+ except Exception as err:
473
+ self.down([(MessageType.ERROR, err)], internal=True)
474
+
475
+ def _run_fn(self) -> None:
476
+ if self._fn is None:
477
+ return
478
+ # Suppress re-entrant recompute while wiring upstream deps (TS connect order).
479
+ if self._connecting:
480
+ return
481
+ if self._thread_safe:
482
+ with acquire_subgraph_write_lock_with_defer(self):
483
+ self._run_fn_body()
484
+ else:
485
+ self._run_fn_body()
486
+
487
+ def _on_dep_dirty(self, index: int) -> None:
488
+ was_dirty = self._dep_dirty_mask.has(index)
489
+ self._dep_dirty_mask.set(index)
490
+ self._dep_settled_mask.clear(index)
491
+ if not was_dirty:
492
+ self.down([(MessageType.DIRTY,)], internal=True)
493
+
494
+ def _on_dep_settled(self, index: int) -> None:
495
+ if not self._dep_dirty_mask.has(index):
496
+ self._on_dep_dirty(index)
497
+ self._dep_settled_mask.set(index)
498
+ if self._dep_dirty_mask.any() and self._dep_settled_mask.covers(self._dep_dirty_mask):
499
+ self._dep_dirty_mask.reset()
500
+ self._dep_settled_mask.reset()
501
+ self._run_fn()
502
+
503
+ def _maybe_complete_from_deps(self) -> None:
504
+ if (
505
+ self._auto_complete
506
+ and len(self._deps) > 0
507
+ and self._dep_complete_mask.covers(self._all_deps_complete_mask)
508
+ ):
509
+ self.down([(MessageType.COMPLETE,)], internal=True)
510
+
511
+ def _handle_dep_messages(self, index: int, messages: Messages) -> None:
512
+ for msg in messages:
513
+ if self._inspector_hook is not None:
514
+ self._inspector_hook({"kind": "dep_message", "dep_index": index, "message": msg})
515
+ t = msg[0]
516
+ # User-defined message handler gets first look (spec §2.6).
517
+ if self._on_message is not None:
518
+ try:
519
+ if self._on_message(msg, index, self._actions):
520
+ continue
521
+ except Exception as err:
522
+ self.down([(MessageType.ERROR, err)], internal=True)
523
+ return
524
+ if self._fn is None:
525
+ if t is MessageType.COMPLETE and len(self._deps) > 1:
526
+ self._dep_complete_mask.set(index)
527
+ self._maybe_complete_from_deps()
528
+ continue
529
+ self.down([msg], internal=True)
530
+ continue
531
+ if t is MessageType.DIRTY:
532
+ self._on_dep_dirty(index)
533
+ continue
534
+ if t is MessageType.DATA or t is MessageType.RESOLVED:
535
+ self._on_dep_settled(index)
536
+ continue
537
+ if t is MessageType.COMPLETE:
538
+ self._dep_complete_mask.set(index)
539
+ self._dep_dirty_mask.clear(index)
540
+ self._dep_settled_mask.clear(index)
541
+ if self._dep_dirty_mask.any() and self._dep_settled_mask.covers(
542
+ self._dep_dirty_mask
543
+ ):
544
+ self._dep_dirty_mask.reset()
545
+ self._dep_settled_mask.reset()
546
+ self._run_fn()
547
+ elif not self._dep_dirty_mask.any() and self._status == "dirty":
548
+ # D2: dep went DIRTY→COMPLETE without DATA — node was marked
549
+ # dirty but no settlement came. Recompute so downstream
550
+ # gets RESOLVED (value unchanged) or DATA (value changed).
551
+ self._dep_settled_mask.reset()
552
+ self._run_fn()
553
+ self._maybe_complete_from_deps()
554
+ continue
555
+ if t is MessageType.ERROR:
556
+ self.down([msg], internal=True)
557
+ continue
558
+ if (
559
+ t is MessageType.INVALIDATE
560
+ or t is MessageType.TEARDOWN
561
+ or t is MessageType.PAUSE
562
+ or t is MessageType.RESUME
563
+ ):
564
+ self.down([msg], internal=True)
565
+ continue
566
+ # Forward unknown message types
567
+ self.down([msg], internal=True)
568
+
569
+ def _connect_upstream(self) -> None:
570
+ if not self._has_deps or self._connected:
571
+ return
572
+ self._connected = True
573
+ self._dep_dirty_mask.reset()
574
+ self._dep_settled_mask.reset()
575
+ self._dep_complete_mask.reset()
576
+ self._status = "settled"
577
+ is_single = len(self._deps) == 1 and self._fn is not None
578
+ hints = SubscribeHints(single_dep=True) if is_single else SubscribeHints()
579
+ self._connecting = True
580
+ try:
581
+ for i, dep in enumerate(self._deps):
582
+ unsub = dep.subscribe(partial(self._handle_dep_messages, i), hints)
583
+ self._upstream_unsubs.append(unsub)
584
+ finally:
585
+ self._connecting = False
586
+ if self._fn is not None:
587
+ self._run_fn()
588
+
589
+ def _stop_producer(self) -> None:
590
+ if not self._producer_started:
591
+ return
592
+ self._producer_started = False
593
+ if self._cleanup is not None:
594
+ cb = self._cleanup
595
+ self._cleanup = None
596
+ cb()
597
+ self._cleanup = None
598
+
599
+ def _start_producer(self) -> None:
600
+ if len(self._deps) != 0 or self._fn is None or self._producer_started:
601
+ return
602
+ self._producer_started = True
603
+ self._run_fn()
604
+
605
+ def _disconnect_upstream(self) -> None:
606
+ if not self._connected:
607
+ return
608
+ for u in self._upstream_unsubs:
609
+ u()
610
+ self._upstream_unsubs.clear()
611
+ self._connected = False
612
+ self._dep_dirty_mask.reset()
613
+ self._dep_settled_mask.reset()
614
+ self._dep_complete_mask.reset()
615
+ self._status = "disconnected"
616
+
617
+ # --- Public interface ---
618
+
619
+ def _subscribe_body(
620
+ self,
621
+ sink: Callable[[Messages], None],
622
+ hints: SubscribeHints | None,
623
+ ) -> None:
624
+ if self._terminal and self._resubscribable:
625
+ self._terminal = False
626
+ self._status = "disconnected" if self._has_deps else "settled"
627
+
628
+ h = hints or SubscribeHints()
629
+ self._sink_count += 1
630
+ if h.single_dep:
631
+ self._single_dep_sink_count += 1
632
+ self._single_dep_sinks.add(sink)
633
+
634
+ if self._sinks is None:
635
+ self._sinks = sink
636
+ elif isinstance(self._sinks, set):
637
+ self._sinks.add(sink)
638
+ else:
639
+ self._sinks = {self._sinks, sink}
640
+
641
+ if self._has_deps:
642
+ self._connect_upstream()
643
+ elif self._fn is not None:
644
+ self._start_producer()
645
+
646
+ def _unsubscribe_body(self, sink: Callable[[Messages], None]) -> None:
647
+ self._sink_count -= 1
648
+ if sink in self._single_dep_sinks:
649
+ self._single_dep_sink_count -= 1
650
+ self._single_dep_sinks.discard(sink)
651
+
652
+ if self._sinks is None:
653
+ return
654
+ if isinstance(self._sinks, set):
655
+ self._sinks.discard(sink)
656
+ if len(self._sinks) == 1:
657
+ self._sinks = next(iter(self._sinks))
658
+ elif len(self._sinks) == 0:
659
+ self._sinks = None
660
+ elif self._sinks is sink:
661
+ self._sinks = None
662
+
663
+ if self._sinks is None:
664
+ self._disconnect_upstream()
665
+ self._stop_producer()
666
+
667
+ def _guard_observe_or_raise(self, actor: Mapping[str, Any] | Actor | None) -> None:
668
+ g = self._guard
669
+ if g is None:
670
+ return
671
+ a = normalize_actor(actor)
672
+ if not g(cast("Actor", a), "observe"):
673
+ raise GuardDenied(a, self._name or "<unnamed>", "observe")
674
+
675
+ def subscribe(
676
+ self,
677
+ sink: Callable[[Messages], None],
678
+ hints: SubscribeHints | None = None,
679
+ *,
680
+ actor: Mapping[str, Any] | Actor | None = None,
681
+ ) -> Callable[[], None]:
682
+ if self._thread_safe:
683
+ with acquire_subgraph_write_lock_with_defer(self):
684
+ self._guard_observe_or_raise(actor)
685
+ self._subscribe_body(sink, hints)
686
+ else:
687
+ self._guard_observe_or_raise(actor)
688
+ self._subscribe_body(sink, hints)
689
+
690
+ removed = False
691
+
692
+ if self._thread_safe:
693
+
694
+ def unsubscribe() -> None:
695
+ nonlocal removed
696
+ with acquire_subgraph_write_lock_with_defer(self):
697
+ if removed:
698
+ return
699
+ removed = True
700
+ self._unsubscribe_body(sink)
701
+
702
+ else:
703
+
704
+ def unsubscribe() -> None:
705
+ nonlocal removed
706
+ if removed:
707
+ return
708
+ removed = True
709
+ self._unsubscribe_body(sink)
710
+
711
+ return unsubscribe
712
+
713
+ @property
714
+ def name(self) -> str | None:
715
+ return self._name
716
+
717
+ @property
718
+ def status(self) -> NodeStatus:
719
+ return self._status
720
+
721
+ @property
722
+ def meta(self) -> Mapping[str, NodeImpl[Any]]:
723
+ return MappingProxyType(self._meta)
724
+
725
+ def get(self) -> T | None:
726
+ lock = self._cache_lock
727
+ if lock is not None:
728
+ with lock:
729
+ return self._cached
730
+ return self._cached
731
+
732
+ @property
733
+ def last_mutation(self) -> dict[str, Any] | None:
734
+ """Last non-internal ``write`` attribution (``actor``, ``timestamp_ns``), if any."""
735
+ return self._last_mutation
736
+
737
+ @property
738
+ def v(self) -> NodeVersionInfo | None:
739
+ """Versioning info (GRAPHREFLY-SPEC §7). ``None`` when versioning is not enabled."""
740
+ return self._versioning
741
+
742
+ def _apply_versioning(
743
+ self,
744
+ level: VersioningLevel,
745
+ *,
746
+ id: str | None = None,
747
+ hash_fn: HashFn | None = None,
748
+ ) -> None:
749
+ """Retroactively apply versioning to a node created without it.
750
+
751
+ No-op if versioning is already enabled. Version starts at 0 regardless
752
+ of prior DATA emissions — it tracks changes from the moment versioning
753
+ is enabled, not historical ones.
754
+
755
+ Used by :meth:`Graph.set_versioning`.
756
+ """
757
+ if self._versioning is not None:
758
+ return
759
+ if hash_fn is not None:
760
+ self._hash_fn = hash_fn
761
+ self._versioning = create_versioning(
762
+ level,
763
+ self._cached,
764
+ id=id,
765
+ hash_fn=self._hash_fn,
766
+ )
767
+
768
+ def _guard_and_record(
769
+ self,
770
+ actor: Mapping[str, Any] | Actor | None,
771
+ guard_action: GuardAction,
772
+ ) -> None:
773
+ a = normalize_actor(actor)
774
+ g = self._guard
775
+ if g is not None and not g(cast("Actor", a), guard_action):
776
+ raise GuardDenied(a, self._name or "<unnamed>", guard_action)
777
+ if guard_action == "write":
778
+ self._last_mutation = record_mutation(a)
779
+
780
+ def _down_body(self, messages: Messages, sg_lock: object | None) -> None:
781
+ lifecycle_messages = messages
782
+ sink_messages = messages
783
+ if self._terminal and not self._resubscribable:
784
+ terminal_passthrough = [
785
+ m
786
+ for m in messages
787
+ if m[0] is MessageType.TEARDOWN or m[0] is MessageType.INVALIDATE
788
+ ]
789
+ if not terminal_passthrough:
790
+ return
791
+ lifecycle_messages = terminal_passthrough
792
+ sink_messages = terminal_passthrough
793
+ self._handle_local_lifecycle(lifecycle_messages)
794
+ if self._can_skip_dirty():
795
+ has_phase2 = False
796
+ for m in sink_messages:
797
+ t = m[0]
798
+ if t is MessageType.DATA or t is MessageType.RESOLVED:
799
+ has_phase2 = True
800
+ break
801
+ if has_phase2:
802
+ filtered = [m for m in sink_messages if m[0] is not MessageType.DIRTY]
803
+ if filtered:
804
+ emit_with_batch(
805
+ self._emit_to_sinks,
806
+ filtered,
807
+ strategy="partition",
808
+ defer_when="batching",
809
+ subgraph_lock=sg_lock,
810
+ )
811
+ return
812
+ emit_with_batch(
813
+ self._emit_to_sinks,
814
+ sink_messages,
815
+ strategy="partition",
816
+ defer_when="batching",
817
+ subgraph_lock=sg_lock,
818
+ )
819
+
820
+ def down(
821
+ self,
822
+ messages: Messages,
823
+ *,
824
+ actor: Mapping[str, Any] | Actor | None = None,
825
+ internal: bool = False,
826
+ guard_action: GuardAction = "write",
827
+ ) -> None:
828
+ if not messages:
829
+ return
830
+ if self._thread_safe:
831
+ with acquire_subgraph_write_lock_with_defer(self):
832
+ if not internal:
833
+ self._guard_and_record(actor, guard_action)
834
+ self._down_body(messages, self)
835
+ else:
836
+ if not internal:
837
+ self._guard_and_record(actor, guard_action)
838
+ self._down_body(messages, None)
839
+
840
+ def up(
841
+ self,
842
+ messages: Messages,
843
+ *,
844
+ actor: Any = None,
845
+ internal: bool = False,
846
+ guard_action: GuardAction = "write",
847
+ ) -> None:
848
+ """Send messages upstream (no-op on source nodes; matches TS optional ``up``)."""
849
+ if not self._has_deps:
850
+ return
851
+ if not internal and self._guard is not None:
852
+ self._guard_and_record(actor, guard_action)
853
+ for dep in self._deps:
854
+ u = getattr(dep, "up", None)
855
+ if u is not None:
856
+ u(messages)
857
+
858
+ def allows_observe(self, actor: Any = None) -> bool:
859
+ """Whether ``actor`` may observe this node (``True`` if no guard is set)."""
860
+ if self._guard is None:
861
+ return True
862
+ a = normalize_actor(actor)
863
+ return bool(self._guard(cast("Actor", a), "observe"))
864
+
865
+ def has_guard(self) -> bool:
866
+ """Whether a guard is installed on this node."""
867
+ return self._guard is not None
868
+
869
+ def unsubscribe(self) -> None:
870
+ """Disconnect from upstream deps (no-op on source nodes)."""
871
+ if not self._has_deps:
872
+ return
873
+ if self._thread_safe:
874
+ with acquire_subgraph_write_lock_with_defer(self):
875
+ self._disconnect_upstream()
876
+ else:
877
+ self._disconnect_upstream()
878
+
879
+ def __or__(self, other: object) -> Any:
880
+ """Pipe: ``left | op`` with unary ``(Node) -> Node`` (GRAPHREFLY-SPEC §4.1)."""
881
+ if not callable(other):
882
+ return NotImplemented
883
+ cast_other = cast("Callable[[NodeImpl[Any]], NodeImpl[Any]]", other)
884
+ return cast_other(self)
885
+
886
+
887
+ def node(
888
+ deps_or_fn: Sequence[NodeImpl[Any]] | NodeFn | dict[str, Any] | None = None,
889
+ fn_or_opts: NodeFn | dict[str, Any] | None = None,
890
+ opts_arg: dict[str, Any] | None = None,
891
+ **kwargs: Any,
892
+ ) -> NodeImpl[Any]:
893
+ """Create a reactive node (mirrors graphrefly-ts ``node`` overloads).
894
+
895
+ Accepts multiple call signatures::
896
+
897
+ node() # no-dep, no-fn source
898
+ node(fn) # producer (no deps)
899
+ node([dep1, dep2], fn) # derived / operator
900
+ node([dep1], fn, {"name": ...}) # with options dict
901
+ node(name="x", initial=0) # kwargs shorthand
902
+
903
+ Args:
904
+ deps_or_fn: Either a sequence of upstream :class:`NodeImpl` dependencies,
905
+ a bare compute function (producer), an options dict, or ``None``.
906
+ fn_or_opts: Compute function (when ``deps_or_fn`` is a dep list) or an
907
+ options dict.
908
+ opts_arg: Additional options dict when both deps and fn are provided
909
+ positionally.
910
+ **kwargs: Any option key accepted by :class:`NodeImpl` (e.g. ``name``,
911
+ ``initial``, ``equals``, ``guard``, ``thread_safe``).
912
+
913
+ Returns:
914
+ A new :class:`NodeImpl` instance.
915
+
916
+ Example:
917
+ ```python
918
+ from graphrefly import node, state
919
+ x = state(0)
920
+ doubled = node([x], lambda deps, _: deps[0] * 2, name="doubled")
921
+ ```
922
+ """
923
+ opts: dict[str, Any] = {**kwargs}
924
+ deps: list[NodeImpl[Any]] = []
925
+ fn: NodeFn | None = None
926
+
927
+ if _is_node_sequence(deps_or_fn):
928
+ deps = list(cast("Sequence[NodeImpl[Any]]", deps_or_fn))
929
+ if callable(fn_or_opts):
930
+ fn = fn_or_opts # narrowed: NodeFn
931
+ if _is_node_options(fn_or_opts):
932
+ opts = {**_as_options_dict(fn_or_opts), **opts}
933
+ elif _is_node_options(opts_arg):
934
+ opts = {**_as_options_dict(opts_arg), **opts}
935
+ elif _is_node_options(deps_or_fn):
936
+ opts = {**_as_options_dict(deps_or_fn), **opts}
937
+ elif callable(deps_or_fn):
938
+ fn = deps_or_fn # narrowed: NodeFn
939
+ if _is_node_options(fn_or_opts):
940
+ opts = {**_as_options_dict(fn_or_opts), **opts}
941
+ elif deps_or_fn is None:
942
+ if fn_or_opts is not None or opts_arg is not None:
943
+ raise TypeError("node() invalid arguments")
944
+ else:
945
+ raise TypeError(f"node() unexpected first argument: {type(deps_or_fn).__name__}")
946
+
947
+ # snake_case option aliases matching TS camelCase in docs
948
+ if "resetOnTeardown" in opts and "reset_on_teardown" not in opts:
949
+ opts["reset_on_teardown"] = opts.pop("resetOnTeardown")
950
+ if "completeWhenDepsComplete" in opts and "complete_when_deps_complete" not in opts:
951
+ opts["complete_when_deps_complete"] = opts.pop("completeWhenDepsComplete")
952
+ if "threadSafe" in opts and "thread_safe" not in opts:
953
+ opts["thread_safe"] = opts.pop("threadSafe")
954
+ if "Guard" in opts and "guard" not in opts:
955
+ opts["guard"] = opts.pop("Guard")
956
+
957
+ return NodeImpl(deps, fn, opts)
958
+
959
+
960
+ # Public alias for type hints
961
+ Node = NodeImpl
962
+
963
+ __all__ = ["Node", "NodeActions", "NodeFn", "NodeImpl", "NodeStatus", "SubscribeHints", "node"]