graphrefly 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (47) hide show
  1. graphrefly/__init__.py +160 -0
  2. graphrefly/compat/__init__.py +18 -0
  3. graphrefly/compat/async_utils.py +228 -0
  4. graphrefly/compat/asyncio_runner.py +89 -0
  5. graphrefly/compat/trio_runner.py +81 -0
  6. graphrefly/core/__init__.py +142 -0
  7. graphrefly/core/clock.py +20 -0
  8. graphrefly/core/dynamic_node.py +749 -0
  9. graphrefly/core/guard.py +277 -0
  10. graphrefly/core/meta.py +149 -0
  11. graphrefly/core/node.py +963 -0
  12. graphrefly/core/protocol.py +460 -0
  13. graphrefly/core/runner.py +107 -0
  14. graphrefly/core/subgraph_locks.py +296 -0
  15. graphrefly/core/sugar.py +138 -0
  16. graphrefly/core/versioning.py +193 -0
  17. graphrefly/extra/__init__.py +313 -0
  18. graphrefly/extra/adapters.py +2149 -0
  19. graphrefly/extra/backoff.py +287 -0
  20. graphrefly/extra/backpressure.py +113 -0
  21. graphrefly/extra/checkpoint.py +307 -0
  22. graphrefly/extra/composite.py +303 -0
  23. graphrefly/extra/cron.py +133 -0
  24. graphrefly/extra/data_structures.py +707 -0
  25. graphrefly/extra/resilience.py +727 -0
  26. graphrefly/extra/sources.py +766 -0
  27. graphrefly/extra/tier1.py +1067 -0
  28. graphrefly/extra/tier2.py +1802 -0
  29. graphrefly/graph/__init__.py +31 -0
  30. graphrefly/graph/graph.py +2249 -0
  31. graphrefly/integrations/__init__.py +1 -0
  32. graphrefly/integrations/fastapi.py +767 -0
  33. graphrefly/patterns/__init__.py +5 -0
  34. graphrefly/patterns/ai.py +2132 -0
  35. graphrefly/patterns/cqrs.py +515 -0
  36. graphrefly/patterns/memory.py +639 -0
  37. graphrefly/patterns/messaging.py +553 -0
  38. graphrefly/patterns/orchestration.py +536 -0
  39. graphrefly/patterns/reactive_layout/__init__.py +81 -0
  40. graphrefly/patterns/reactive_layout/measurement_adapters.py +276 -0
  41. graphrefly/patterns/reactive_layout/reactive_block_layout.py +434 -0
  42. graphrefly/patterns/reactive_layout/reactive_layout.py +943 -0
  43. graphrefly/py.typed +1 -0
  44. graphrefly-0.1.0.dist-info/METADATA +253 -0
  45. graphrefly-0.1.0.dist-info/RECORD +47 -0
  46. graphrefly-0.1.0.dist-info/WHEEL +4 -0
  47. graphrefly-0.1.0.dist-info/licenses/LICENSE +21 -0
@@ -0,0 +1,296 @@
1
+ """Union-find registry for per-subgraph write locking (roadmap 0.4).
2
+
3
+ Contract (aligned with callbag-recharge-py and GRAPHREFLY-SPEC §6.1):
4
+
5
+ - ``get()`` does not take the subgraph write lock; it uses a per-node ``threading.Lock``
6
+ (``NodeImpl._cache_lock``) so the cached value is read/written with proper synchronization
7
+ under free-threaded Python (nogil), independent of the component ``RLock``.
8
+ - Mutations that change node state or topology (``down``, recompute, subscribe /
9
+ unsubscribe) run under the subgraph RLock for the node's union component.
10
+ - ``union_nodes`` merges components when nodes are linked by dependency edges so one
11
+ logical subgraph serializes writes on one lock.
12
+ - ``defer_set`` / ``defer_down`` queue cross-subgraph work until the current write lock
13
+ is released, avoiding deadlocks.
14
+
15
+ Each thread has its own defer queue (TLS), matching :func:`graphrefly.core.protocol.batch`
16
+ isolation per thread.
17
+ """
18
+
19
+ from __future__ import annotations
20
+
21
+ import threading
22
+ import weakref
23
+ from contextlib import contextmanager
24
+ from typing import TYPE_CHECKING, Any
25
+
26
+ if TYPE_CHECKING:
27
+ from collections.abc import Callable, Generator
28
+
29
+
30
+ class _LockBox:
31
+ """Mutable holder for a component RLock; redirect ``.lock`` on union."""
32
+
33
+ __slots__ = ("lock",)
34
+
35
+ def __init__(self) -> None:
36
+ self.lock = threading.RLock()
37
+
38
+
39
+ _MAX_LOCK_RETRIES: int = 100
40
+
41
+
42
+ class _SubgraphRegistry:
43
+ __slots__ = ("_children", "_meta_lock", "_parent", "_rank", "_boxes", "_refs")
44
+
45
+ def __init__(self) -> None:
46
+ self._meta_lock = threading.RLock()
47
+ self._parent: dict[int, int] = {}
48
+ self._rank: dict[int, int] = {}
49
+ self._boxes: dict[int, _LockBox] = {}
50
+ self._refs: dict[int, weakref.ref[object]] = {}
51
+ self._children: dict[int, set[int]] = {}
52
+
53
+ def _on_gc(self, node_id: int, ref_obj: weakref.ref[object]) -> None:
54
+ with self._meta_lock:
55
+ if self._refs.get(node_id) is not ref_obj:
56
+ return
57
+ self._refs.pop(node_id, None)
58
+ parent = self._parent.get(node_id)
59
+ if parent is None:
60
+ return
61
+
62
+ direct_children = list(self._children.get(node_id, ()))
63
+
64
+ if parent == node_id:
65
+ if direct_children:
66
+ new_root = direct_children[0]
67
+ self._parent[new_root] = new_root
68
+ new_root_kids = self._children.setdefault(new_root, set())
69
+ for child in direct_children[1:]:
70
+ self._parent[child] = new_root
71
+ new_root_kids.add(child)
72
+ kids = self._children.get(child)
73
+ if kids is not None:
74
+ kids.discard(node_id)
75
+
76
+ box = self._boxes.get(node_id)
77
+ if box is not None:
78
+ self._boxes[new_root] = box
79
+ self._rank[new_root] = self._rank.get(new_root, self._rank.get(node_id, 0))
80
+ else:
81
+ parent_kids = self._children.get(parent)
82
+ if parent_kids is not None:
83
+ parent_kids.discard(node_id)
84
+ for child in direct_children:
85
+ self._parent[child] = parent
86
+ if parent_kids is not None:
87
+ parent_kids.add(child)
88
+
89
+ self._children.pop(node_id, None)
90
+ self._parent.pop(node_id, None)
91
+ self._rank.pop(node_id, None)
92
+ self._boxes.pop(node_id, None)
93
+
94
+ def _find_locked(self, node_id: int) -> int:
95
+ parent = self._parent.get(node_id)
96
+ if parent is None:
97
+ return node_id
98
+ if parent != node_id:
99
+ root = self._find_locked(parent)
100
+ if root != parent:
101
+ old_parent = parent
102
+ self._parent[node_id] = root
103
+ # Maintain _children reverse map during path compression.
104
+ old_kids = self._children.get(old_parent)
105
+ if old_kids is not None:
106
+ old_kids.discard(node_id)
107
+ self._children.setdefault(root, set()).add(node_id)
108
+ return root
109
+ return node_id
110
+
111
+ def _ensure_locked(self, node: object) -> int:
112
+ node_id = id(node)
113
+ existing_ref = self._refs.get(node_id)
114
+ existing_obj = existing_ref() if existing_ref is not None else None
115
+
116
+ if node_id not in self._parent or existing_obj is None:
117
+ self._parent[node_id] = node_id
118
+ self._rank[node_id] = 0
119
+ self._boxes[node_id] = _LockBox()
120
+ self._children[node_id] = set()
121
+ self._refs[node_id] = weakref.ref(node, lambda _ref: self._on_gc(node_id, _ref))
122
+ return node_id
123
+
124
+ def ensure_node(self, node: object) -> None:
125
+ with self._meta_lock:
126
+ self._ensure_locked(node)
127
+
128
+ def union(self, node_a: object, node_b: object) -> None:
129
+ with self._meta_lock:
130
+ id_a = self._ensure_locked(node_a)
131
+ id_b = self._ensure_locked(node_b)
132
+
133
+ root_a = self._find_locked(id_a)
134
+ root_b = self._find_locked(id_b)
135
+ if root_a == root_b:
136
+ return
137
+
138
+ rank_a = self._rank.get(root_a, 0)
139
+ rank_b = self._rank.get(root_b, 0)
140
+ if rank_a < rank_b:
141
+ root_a, root_b = root_b, root_a
142
+ self._parent[root_b] = root_a
143
+ self._children.setdefault(root_a, set()).add(root_b)
144
+ if rank_a == rank_b:
145
+ self._rank[root_a] = rank_a + 1
146
+
147
+ canonical_lock = self._boxes[root_a].lock
148
+ box_b = self._boxes.get(root_b)
149
+ if box_b is not None:
150
+ box_b.lock = canonical_lock
151
+
152
+ @contextmanager
153
+ def lock_for(self, node: object) -> Generator[None]:
154
+ for _attempt in range(_MAX_LOCK_RETRIES):
155
+ with self._meta_lock:
156
+ node_id = self._ensure_locked(node)
157
+ root = self._find_locked(node_id)
158
+ box = self._boxes.get(root)
159
+ if box is None:
160
+ box = _LockBox()
161
+ self._boxes[root] = box
162
+ lock = box.lock
163
+
164
+ lock.acquire()
165
+ valid = False
166
+ try:
167
+ with self._meta_lock:
168
+ current_root = self._find_locked(node_id)
169
+ current_box = self._boxes.get(current_root)
170
+ if current_box is None:
171
+ current_box = _LockBox()
172
+ self._boxes[current_root] = current_box
173
+ valid = current_box.lock is lock
174
+ if valid:
175
+ yield
176
+ return
177
+ finally:
178
+ lock.release()
179
+ raise RuntimeError(
180
+ f"subgraph lock acquisition failed after {_MAX_LOCK_RETRIES} retries "
181
+ "(continuous union activity?)"
182
+ )
183
+
184
+
185
+ _REGISTRY = _SubgraphRegistry()
186
+
187
+
188
+ def ensure_registered(node: object) -> None:
189
+ """Register *node* in the subgraph registry (weak-ref; auto cleanup on GC)."""
190
+ _REGISTRY.ensure_node(node)
191
+
192
+
193
+ def union_nodes(a: object, b: object) -> None:
194
+ """Merge the subgraph components containing *a* and *b* (same write lock)."""
195
+ _REGISTRY.union(a, b)
196
+
197
+
198
+ @contextmanager
199
+ def acquire_subgraph_write_lock(node: object) -> Generator[None]:
200
+ """Acquire the write lock for the component containing *node*."""
201
+ with _REGISTRY.lock_for(node):
202
+ yield
203
+
204
+
205
+ # --- defer_set / defer_down (TLS queue, flushed when defer-aware lock exits) ---
206
+
207
+ _deferred_tls = threading.local()
208
+
209
+
210
+ def _get_deferred_depth() -> int:
211
+ return getattr(_deferred_tls, "depth", 0)
212
+
213
+
214
+ def _inc_deferred_depth() -> None:
215
+ _deferred_tls.depth = getattr(_deferred_tls, "depth", 0) + 1
216
+
217
+
218
+ def _dec_deferred_depth() -> int:
219
+ current = getattr(_deferred_tls, "depth", 0)
220
+ if current <= 0:
221
+ raise RuntimeError("deferred depth underflow: lock/defer bookkeeping out of balance")
222
+ depth = current - 1
223
+ _deferred_tls.depth = depth
224
+ return depth
225
+
226
+
227
+ def _get_deferred_queue() -> list[Callable[[], None]]:
228
+ q: list[Callable[[], None]] | None = getattr(_deferred_tls, "queue", None)
229
+ if q is None:
230
+ q = []
231
+ _deferred_tls.queue = q
232
+ return q
233
+
234
+
235
+ @contextmanager
236
+ def acquire_subgraph_write_lock_with_defer(node: object) -> Generator[None]:
237
+ """Acquire the subgraph write lock; flush deferred cross-subgraph work on exit."""
238
+ _inc_deferred_depth()
239
+ try:
240
+ with _REGISTRY.lock_for(node):
241
+ yield
242
+ finally:
243
+ if _dec_deferred_depth() == 0:
244
+ queue = _get_deferred_queue()
245
+ errors: list[Exception] = []
246
+ first_base: BaseException | None = None
247
+ while queue:
248
+ pending = queue[:]
249
+ queue.clear()
250
+ for fn in pending:
251
+ try:
252
+ fn()
253
+ except Exception as e:
254
+ errors.append(e)
255
+ except BaseException as e:
256
+ if first_base is None:
257
+ first_base = e
258
+ if first_base is not None:
259
+ raise first_base
260
+ if len(errors) == 1:
261
+ raise errors[0]
262
+ if len(errors) > 1:
263
+ raise ExceptionGroup("deferred subgraph work", errors)
264
+
265
+
266
+ def defer_set(target: Any, value: Any) -> None:
267
+ """Schedule ``target.set(value)`` after the current defer-aware lock exits.
268
+
269
+ If not inside :func:`acquire_subgraph_write_lock_with_defer`, runs immediately.
270
+ *target* must provide a ``set`` method (e.g. future state sugar).
271
+ """
272
+ if _get_deferred_depth() > 0:
273
+ _get_deferred_queue().append(lambda: target.set(value))
274
+ else:
275
+ target.set(value)
276
+
277
+
278
+ def defer_down(node: Any, messages: Any) -> None:
279
+ """Schedule ``node.down(messages)`` after the current defer-aware lock exits.
280
+
281
+ If not inside :func:`acquire_subgraph_write_lock_with_defer`, runs immediately.
282
+ """
283
+ if _get_deferred_depth() > 0:
284
+ _get_deferred_queue().append(lambda: node.down(messages))
285
+ else:
286
+ node.down(messages)
287
+
288
+
289
+ __all__ = [
290
+ "acquire_subgraph_write_lock",
291
+ "acquire_subgraph_write_lock_with_defer",
292
+ "defer_down",
293
+ "defer_set",
294
+ "ensure_registered",
295
+ "union_nodes",
296
+ ]
@@ -0,0 +1,138 @@
1
+ """Sugar constructors over :func:`graphrefly.core.node.node` (GRAPHREFLY-SPEC §2.7, §4.1)."""
2
+
3
+ from __future__ import annotations
4
+
5
+ from collections.abc import Callable, Sequence
6
+ from typing import Any
7
+
8
+ from graphrefly.core.node import Node, NodeFn, node
9
+
10
+ type PipeOperator = Callable[[Node[Any]], Node[Any]]
11
+
12
+
13
+ def state(initial: Any, **opts: Any) -> Node[Any]:
14
+ """Create a manually-settable source node with a fixed initial value.
15
+
16
+ Args:
17
+ initial: The initial cached value for the node.
18
+ **opts: Additional node options passed through to :func:`~graphrefly.core.node.node`.
19
+
20
+ Returns:
21
+ A :class:`~graphrefly.core.node.Node` with no deps and no compute function.
22
+
23
+ Example:
24
+ ```python
25
+ from graphrefly import state
26
+ counter = state(0, name="counter")
27
+ counter.down([("DATA", 1)])
28
+ assert counter.get() == 1
29
+ ```
30
+ """
31
+ return node([], {**opts, "initial": initial})
32
+
33
+
34
+ def producer(fn: NodeFn, **opts: Any) -> Node[Any]:
35
+ """Create an auto-starting producer node with no dependencies.
36
+
37
+ Args:
38
+ fn: The compute function invoked when the first sink subscribes.
39
+ **opts: Additional node options passed through to :func:`~graphrefly.core.node.node`.
40
+
41
+ Returns:
42
+ A :class:`~graphrefly.core.node.Node` whose producer starts on first subscribe.
43
+
44
+ Example:
45
+ ```python
46
+ from graphrefly import producer
47
+ from graphrefly.core.protocol import MessageType
48
+
49
+ def ticker(deps, actions):
50
+ actions.emit(42)
51
+ actions.down([(MessageType.COMPLETE,)])
52
+ return lambda: None
53
+
54
+ p = producer(ticker, name="once")
55
+ ```
56
+ """
57
+ return node(fn, describe_kind="producer", **opts)
58
+
59
+
60
+ def derived(deps: Sequence[Node[Any]], fn: NodeFn, **opts: Any) -> Node[Any]:
61
+ """Create a derived node that recomputes whenever its dependencies settle.
62
+
63
+ Args:
64
+ deps: Upstream nodes whose settled values are passed to ``fn``.
65
+ fn: Compute function receiving ``(dep_values, actions)``; may return a
66
+ value to emit or a cleanup callable.
67
+ **opts: Additional node options passed through to :func:`~graphrefly.core.node.node`.
68
+
69
+ Returns:
70
+ A :class:`~graphrefly.core.node.Node` that reacts to its upstream deps.
71
+
72
+ Example:
73
+ ```python
74
+ from graphrefly import state, derived
75
+ x = state(2)
76
+ doubled = derived([x], lambda deps, _: deps[0] * 2)
77
+ ```
78
+ """
79
+ return node(list(deps), fn, describe_kind="derived", **opts)
80
+
81
+
82
+ def effect(deps: Sequence[Node[Any]], fn: NodeFn, **opts: Any) -> Node[Any]:
83
+ """Create a side-effect leaf node; ``fn`` should return ``None`` (no auto-emit).
84
+
85
+ Args:
86
+ deps: Upstream nodes whose settled values trigger ``fn``.
87
+ fn: Side-effect function receiving ``(dep_values, actions)``; return value
88
+ is ignored unless it is a cleanup callable.
89
+ **opts: Additional node options passed through to :func:`~graphrefly.core.node.node`.
90
+
91
+ Returns:
92
+ A :class:`~graphrefly.core.node.Node` that runs ``fn`` on each settlement
93
+ but does not emit reactive values downstream.
94
+
95
+ Example:
96
+ ```python
97
+ from graphrefly import state, effect
98
+ x = state(0)
99
+ log = []
100
+ e = effect([x], lambda deps, _: log.append(deps[0]))
101
+ x.down([("DATA", 1)])
102
+ ```
103
+ """
104
+ return node(list(deps), fn, describe_kind="effect", **opts)
105
+
106
+
107
+ def pipe(source: Node[Any], *ops: PipeOperator) -> Node[Any]:
108
+ """Compose a linear pipeline of unary operators over ``source``.
109
+
110
+ Args:
111
+ source: The root node to pipe through operators.
112
+ *ops: Unary operator callables each transforming ``Node -> Node``.
113
+
114
+ Returns:
115
+ The last node in the pipeline (result of applying all operators in order).
116
+
117
+ Example:
118
+ ```python
119
+ from graphrefly import state, pipe
120
+ from graphrefly.extra import map_val, filter_val
121
+ x = state(0)
122
+ result = pipe(x, map_val(lambda v: v * 2), filter_val(lambda v: v > 0))
123
+ ```
124
+ """
125
+ cur: Node[Any] = source
126
+ for op in ops:
127
+ cur = op(cur)
128
+ return cur
129
+
130
+
131
+ __all__ = [
132
+ "PipeOperator",
133
+ "derived",
134
+ "effect",
135
+ "pipe",
136
+ "producer",
137
+ "state",
138
+ ]
@@ -0,0 +1,193 @@
1
+ """Node versioning — GRAPHREFLY-SPEC §7.
2
+
3
+ Progressive, optional versioning for node identity and change tracking.
4
+
5
+ - **V0**: ``id`` + ``version`` — identity & change detection (~16 bytes overhead)
6
+ - **V1**: + ``cid`` + ``prev`` — content addressing & linked history (~60 bytes overhead)
7
+
8
+ **Lifecycle notes:**
9
+
10
+ - Version advances only on DATA (not RESOLVED, INVALIDATE, or TEARDOWN).
11
+ - ``reset_on_teardown`` clears the cached value but does NOT reset versioning state.
12
+ After teardown, ``v.cid`` still reflects the last DATA value, not the cleared cache.
13
+ The invariant ``hash(node.get()) == v.cid`` only holds in ``settled``/``resolved`` status.
14
+ - Resubscribable nodes preserve versioning across subscription lifetimes (monotonic counter).
15
+ """
16
+
17
+ from __future__ import annotations
18
+
19
+ import hashlib
20
+ import json
21
+ import math
22
+ import uuid
23
+ from collections.abc import Callable
24
+ from dataclasses import dataclass
25
+ from typing import Any
26
+
27
+ __all__ = [
28
+ "V0",
29
+ "V1",
30
+ "NodeVersionInfo",
31
+ "VersioningLevel",
32
+ "HashFn",
33
+ "create_versioning",
34
+ "advance_version",
35
+ "default_hash",
36
+ "canonicalize_for_hash",
37
+ "is_v1",
38
+ ]
39
+
40
+ type VersioningLevel = int # 0 or 1; extensible to 2, 3 later
41
+ type HashFn = Callable[[Any], str]
42
+
43
+
44
+ # ---------------------------------------------------------------------------
45
+ # Types
46
+ # ---------------------------------------------------------------------------
47
+
48
+
49
+ @dataclass
50
+ class V0:
51
+ """V0: identity + monotonic version counter."""
52
+
53
+ id: str
54
+ version: int = 0
55
+
56
+
57
+ @dataclass
58
+ class V1(V0):
59
+ """V1: V0 + content-addressed identifier + previous cid link."""
60
+
61
+ cid: str = ""
62
+ prev: str | None = None
63
+
64
+
65
+ # Union alias for type hints.
66
+ type NodeVersionInfo = V0 | V1
67
+
68
+
69
+ # ---------------------------------------------------------------------------
70
+ # Canonical normalizer
71
+ # ---------------------------------------------------------------------------
72
+
73
+
74
+ def canonicalize_for_hash(value: Any) -> Any:
75
+ """Normalize *value* into a JSON-safe canonical form for deterministic hashing.
76
+
77
+ - ``None`` → ``None``
78
+ - ``float``: rejects non-finite (``NaN``, ``±Inf``) with ``TypeError``;
79
+ normalizes integer-valued floats to ``int`` (``1.0`` → ``1``).
80
+ - ``int``, ``str``, ``bool``: pass through unchanged.
81
+ - ``list`` / ``tuple``: recursively canonicalize each element, return as ``list``.
82
+ - ``dict``: sort keys, recursively canonicalize values, return as sorted ``dict``.
83
+ - Fallback: return ``None``.
84
+ """
85
+ if value is None:
86
+ return None
87
+ if isinstance(value, bool):
88
+ return value
89
+ if isinstance(value, float):
90
+ if math.isnan(value) or math.isinf(value):
91
+ msg = f"Cannot canonicalize non-finite float: {value}"
92
+ raise TypeError(msg)
93
+ if value == int(value):
94
+ iv = int(value)
95
+ if abs(iv) > 2**53 - 1:
96
+ msg = (
97
+ f"Cannot hash integer outside safe range (|n| > 2^53-1): {value}. "
98
+ "Cross-language cid parity is not guaranteed for unsafe integers."
99
+ )
100
+ raise TypeError(msg)
101
+ return iv
102
+ return value
103
+ if isinstance(value, int):
104
+ if abs(value) > 2**53 - 1:
105
+ msg = (
106
+ f"Cannot hash integer outside safe range (|n| > 2^53-1): {value}. "
107
+ "Cross-language cid parity is not guaranteed for unsafe integers."
108
+ )
109
+ raise TypeError(msg)
110
+ return value
111
+ if isinstance(value, str):
112
+ return value
113
+ if isinstance(value, (list, tuple)):
114
+ return [canonicalize_for_hash(item) for item in value]
115
+ if isinstance(value, dict):
116
+ return {k: canonicalize_for_hash(v) for k, v in sorted(value.items())}
117
+ return None
118
+
119
+
120
+ # ---------------------------------------------------------------------------
121
+ # Default hash
122
+ # ---------------------------------------------------------------------------
123
+
124
+
125
+ def default_hash(value: Any) -> str:
126
+ """SHA-256 of deterministic JSON, truncated to 16 hex chars (~64-bit).
127
+
128
+ Object keys are sorted for determinism. Values are canonicalized first
129
+ via :func:`canonicalize_for_hash`.
130
+ """
131
+ canonical = canonicalize_for_hash(value)
132
+ json_bytes = json.dumps(canonical, sort_keys=True, separators=(",", ":")).encode()
133
+ return hashlib.sha256(json_bytes).hexdigest()[:16]
134
+
135
+
136
+ # ---------------------------------------------------------------------------
137
+ # Factory
138
+ # ---------------------------------------------------------------------------
139
+
140
+
141
+ def create_versioning(
142
+ level: VersioningLevel,
143
+ initial_value: Any = None,
144
+ *,
145
+ id: str | None = None,
146
+ hash_fn: HashFn | None = None,
147
+ ) -> NodeVersionInfo:
148
+ """Create initial versioning state for a node.
149
+
150
+ Args:
151
+ level: 0 for V0, 1 for V1.
152
+ initial_value: The node's initial cached value (used for V1 cid).
153
+ id: Override auto-generated id.
154
+ hash_fn: Custom hash function for V1 cid (default: SHA-256 truncated).
155
+ """
156
+ # RFC 4122 string with hyphens — matches TypeScript `crypto.randomUUID()`.
157
+ node_id = id or str(uuid.uuid4())
158
+ if level == 0:
159
+ return V0(id=node_id)
160
+ h = hash_fn or default_hash
161
+ cid = h(initial_value)
162
+ return V1(id=node_id, cid=cid, prev=None)
163
+
164
+
165
+ # ---------------------------------------------------------------------------
166
+ # Advance
167
+ # ---------------------------------------------------------------------------
168
+
169
+
170
+ def advance_version(
171
+ info: NodeVersionInfo,
172
+ new_value: Any,
173
+ hash_fn: HashFn,
174
+ ) -> None:
175
+ """Advance versioning state after a DATA emission (value changed).
176
+
177
+ Mutates ``info`` in place for performance (called on every DATA).
178
+ Only call when the cached value has actually changed (not on RESOLVED).
179
+ """
180
+ info.version += 1
181
+ if isinstance(info, V1):
182
+ info.prev = info.cid
183
+ info.cid = hash_fn(new_value)
184
+
185
+
186
+ # ---------------------------------------------------------------------------
187
+ # Guards
188
+ # ---------------------------------------------------------------------------
189
+
190
+
191
+ def is_v1(info: NodeVersionInfo) -> bool:
192
+ """Type guard: is this V1 versioning info?"""
193
+ return isinstance(info, V1)