graphrefly 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- graphrefly/__init__.py +160 -0
- graphrefly/compat/__init__.py +18 -0
- graphrefly/compat/async_utils.py +228 -0
- graphrefly/compat/asyncio_runner.py +89 -0
- graphrefly/compat/trio_runner.py +81 -0
- graphrefly/core/__init__.py +142 -0
- graphrefly/core/clock.py +20 -0
- graphrefly/core/dynamic_node.py +749 -0
- graphrefly/core/guard.py +277 -0
- graphrefly/core/meta.py +149 -0
- graphrefly/core/node.py +963 -0
- graphrefly/core/protocol.py +460 -0
- graphrefly/core/runner.py +107 -0
- graphrefly/core/subgraph_locks.py +296 -0
- graphrefly/core/sugar.py +138 -0
- graphrefly/core/versioning.py +193 -0
- graphrefly/extra/__init__.py +313 -0
- graphrefly/extra/adapters.py +2149 -0
- graphrefly/extra/backoff.py +287 -0
- graphrefly/extra/backpressure.py +113 -0
- graphrefly/extra/checkpoint.py +307 -0
- graphrefly/extra/composite.py +303 -0
- graphrefly/extra/cron.py +133 -0
- graphrefly/extra/data_structures.py +707 -0
- graphrefly/extra/resilience.py +727 -0
- graphrefly/extra/sources.py +766 -0
- graphrefly/extra/tier1.py +1067 -0
- graphrefly/extra/tier2.py +1802 -0
- graphrefly/graph/__init__.py +31 -0
- graphrefly/graph/graph.py +2249 -0
- graphrefly/integrations/__init__.py +1 -0
- graphrefly/integrations/fastapi.py +767 -0
- graphrefly/patterns/__init__.py +5 -0
- graphrefly/patterns/ai.py +2132 -0
- graphrefly/patterns/cqrs.py +515 -0
- graphrefly/patterns/memory.py +639 -0
- graphrefly/patterns/messaging.py +553 -0
- graphrefly/patterns/orchestration.py +536 -0
- graphrefly/patterns/reactive_layout/__init__.py +81 -0
- graphrefly/patterns/reactive_layout/measurement_adapters.py +276 -0
- graphrefly/patterns/reactive_layout/reactive_block_layout.py +434 -0
- graphrefly/patterns/reactive_layout/reactive_layout.py +943 -0
- graphrefly/py.typed +1 -0
- graphrefly-0.1.0.dist-info/METADATA +253 -0
- graphrefly-0.1.0.dist-info/RECORD +47 -0
- graphrefly-0.1.0.dist-info/WHEEL +4 -0
- graphrefly-0.1.0.dist-info/licenses/LICENSE +21 -0
|
@@ -0,0 +1,749 @@
|
|
|
1
|
+
"""``dynamic_node`` — runtime dep tracking with diamond resolution.
|
|
2
|
+
|
|
3
|
+
Unlike ``node()`` where deps are fixed at construction, ``dynamic_node``
|
|
4
|
+
discovers deps at runtime via a tracking ``get()`` proxy. After each
|
|
5
|
+
recompute, deps are diffed: new deps are connected, removed deps are
|
|
6
|
+
disconnected, and bitmasks are rebuilt. Kept deps retain their
|
|
7
|
+
subscriptions (no teardown/reconnect churn).
|
|
8
|
+
"""
|
|
9
|
+
|
|
10
|
+
from __future__ import annotations
|
|
11
|
+
|
|
12
|
+
import threading
|
|
13
|
+
from collections.abc import Callable, Mapping
|
|
14
|
+
from contextlib import suppress
|
|
15
|
+
from types import MappingProxyType
|
|
16
|
+
from typing import Any
|
|
17
|
+
|
|
18
|
+
from graphrefly.core.protocol import Messages, MessageType, emit_with_batch, propagates_to_meta
|
|
19
|
+
|
|
20
|
+
# ---------------------------------------------------------------------------
|
|
21
|
+
# Public types
|
|
22
|
+
# ---------------------------------------------------------------------------
|
|
23
|
+
|
|
24
|
+
type DynGet = Callable[..., Any] # (dep: Node) -> value | None
|
|
25
|
+
type DynamicNodeFn[T] = Callable[[DynGet], T]
|
|
26
|
+
|
|
27
|
+
# ---------------------------------------------------------------------------
|
|
28
|
+
# Factory
|
|
29
|
+
# ---------------------------------------------------------------------------
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
def dynamic_node[T](
|
|
33
|
+
fn: DynamicNodeFn[T],
|
|
34
|
+
*,
|
|
35
|
+
name: str | None = None,
|
|
36
|
+
equals: Callable[[Any, Any], bool] | None = None,
|
|
37
|
+
meta: dict[str, Any] | None = None,
|
|
38
|
+
guard: Callable[[Any, str], bool] | None = None,
|
|
39
|
+
on_message: Callable[[Any, int, Any], bool] | None = None,
|
|
40
|
+
on_resubscribe: Callable[[], None] | None = None,
|
|
41
|
+
complete_when_deps_complete: bool = True,
|
|
42
|
+
describe_kind: str | None = None,
|
|
43
|
+
resubscribable: bool = False,
|
|
44
|
+
reset_on_teardown: bool = False,
|
|
45
|
+
thread_safe: bool = True,
|
|
46
|
+
) -> DynamicNodeImpl[T]:
|
|
47
|
+
"""Create a node with runtime dep tracking.
|
|
48
|
+
|
|
49
|
+
Deps are discovered each time the compute function runs by tracking
|
|
50
|
+
which nodes are passed to the ``get()`` proxy.
|
|
51
|
+
|
|
52
|
+
After each recompute:
|
|
53
|
+
|
|
54
|
+
- New deps (not in previous set) are subscribed.
|
|
55
|
+
- Removed deps (not in current set) are unsubscribed.
|
|
56
|
+
- Kept deps retain their existing subscriptions.
|
|
57
|
+
|
|
58
|
+
The node participates fully in diamond resolution via the standard
|
|
59
|
+
two-phase DIRTY/RESOLVED protocol.
|
|
60
|
+
|
|
61
|
+
Example::
|
|
62
|
+
|
|
63
|
+
cond = state(True)
|
|
64
|
+
a = state(1)
|
|
65
|
+
b = state(2)
|
|
66
|
+
|
|
67
|
+
d = dynamic_node(lambda get: get(a) if get(cond) else get(b))
|
|
68
|
+
"""
|
|
69
|
+
return DynamicNodeImpl(
|
|
70
|
+
fn,
|
|
71
|
+
name=name,
|
|
72
|
+
equals=equals or (lambda a, b: a is b),
|
|
73
|
+
meta=meta,
|
|
74
|
+
guard=guard,
|
|
75
|
+
on_message=on_message,
|
|
76
|
+
on_resubscribe=on_resubscribe,
|
|
77
|
+
complete_when_deps_complete=complete_when_deps_complete,
|
|
78
|
+
describe_kind=describe_kind,
|
|
79
|
+
resubscribable=resubscribable,
|
|
80
|
+
reset_on_teardown=reset_on_teardown,
|
|
81
|
+
thread_safe=thread_safe,
|
|
82
|
+
)
|
|
83
|
+
|
|
84
|
+
|
|
85
|
+
# ---------------------------------------------------------------------------
|
|
86
|
+
# Actions object (for on_message handler)
|
|
87
|
+
# ---------------------------------------------------------------------------
|
|
88
|
+
|
|
89
|
+
|
|
90
|
+
class _DynamicNodeActions:
|
|
91
|
+
"""Imperative ``actions`` object exposed to on_message handlers."""
|
|
92
|
+
|
|
93
|
+
__slots__ = ("_down", "_emit", "_up")
|
|
94
|
+
|
|
95
|
+
def __init__(
|
|
96
|
+
self,
|
|
97
|
+
down: Callable[[Messages], None],
|
|
98
|
+
emit: Callable[[Any], None],
|
|
99
|
+
up: Callable[[Messages], None],
|
|
100
|
+
) -> None:
|
|
101
|
+
self._down = down
|
|
102
|
+
self._emit = emit
|
|
103
|
+
self._up = up
|
|
104
|
+
|
|
105
|
+
def down(self, messages: Messages) -> None:
|
|
106
|
+
self._down(messages)
|
|
107
|
+
|
|
108
|
+
def emit(self, value: Any) -> None:
|
|
109
|
+
self._emit(value)
|
|
110
|
+
|
|
111
|
+
def up(self, messages: Messages) -> None:
|
|
112
|
+
self._up(messages)
|
|
113
|
+
|
|
114
|
+
|
|
115
|
+
# ---------------------------------------------------------------------------
|
|
116
|
+
# Implementation
|
|
117
|
+
# ---------------------------------------------------------------------------
|
|
118
|
+
|
|
119
|
+
|
|
120
|
+
class DynamicNodeImpl[T]:
|
|
121
|
+
"""Internal implementation of ``dynamic_node``."""
|
|
122
|
+
|
|
123
|
+
__slots__ = (
|
|
124
|
+
"__weakref__",
|
|
125
|
+
"_actions",
|
|
126
|
+
"_auto_complete",
|
|
127
|
+
"_cache_lock",
|
|
128
|
+
"_cached",
|
|
129
|
+
"_complete_bits",
|
|
130
|
+
"_connected",
|
|
131
|
+
"_dep_index_map",
|
|
132
|
+
"_dep_unsubs",
|
|
133
|
+
"_deps",
|
|
134
|
+
"_describe_kind",
|
|
135
|
+
"_dirty_bits",
|
|
136
|
+
"_equals",
|
|
137
|
+
"_fn",
|
|
138
|
+
"_guard",
|
|
139
|
+
"_inspector_hook",
|
|
140
|
+
"_last_mutation",
|
|
141
|
+
"_meta",
|
|
142
|
+
"_name",
|
|
143
|
+
"_on_message",
|
|
144
|
+
"_on_resubscribe",
|
|
145
|
+
"_resubscribable",
|
|
146
|
+
"_reset_on_teardown",
|
|
147
|
+
"_rewiring",
|
|
148
|
+
"_settled_bits",
|
|
149
|
+
"_single_dep_sink_count",
|
|
150
|
+
"_single_dep_sinks",
|
|
151
|
+
"_sink_count",
|
|
152
|
+
"_sinks",
|
|
153
|
+
"_status",
|
|
154
|
+
"_terminal",
|
|
155
|
+
"_thread_safe",
|
|
156
|
+
)
|
|
157
|
+
|
|
158
|
+
def __init__(
|
|
159
|
+
self,
|
|
160
|
+
fn: DynamicNodeFn[T],
|
|
161
|
+
*,
|
|
162
|
+
name: str | None,
|
|
163
|
+
equals: Callable[[Any, Any], bool],
|
|
164
|
+
meta: dict[str, Any] | None,
|
|
165
|
+
guard: Callable[[Any, str], bool] | None,
|
|
166
|
+
on_message: Callable[[Any, int, Any], bool] | None,
|
|
167
|
+
on_resubscribe: Callable[[], None] | None,
|
|
168
|
+
complete_when_deps_complete: bool,
|
|
169
|
+
describe_kind: str | None,
|
|
170
|
+
resubscribable: bool,
|
|
171
|
+
reset_on_teardown: bool,
|
|
172
|
+
thread_safe: bool,
|
|
173
|
+
) -> None:
|
|
174
|
+
# Deferred import to avoid circular dependency
|
|
175
|
+
from graphrefly.core.node import node as create_node
|
|
176
|
+
from graphrefly.core.subgraph_locks import ensure_registered, union_nodes
|
|
177
|
+
|
|
178
|
+
self._fn = fn
|
|
179
|
+
self._name = name
|
|
180
|
+
self._equals = equals
|
|
181
|
+
self._guard = guard
|
|
182
|
+
self._on_message = on_message
|
|
183
|
+
self._on_resubscribe = on_resubscribe
|
|
184
|
+
self._auto_complete = complete_when_deps_complete
|
|
185
|
+
self._describe_kind = describe_kind
|
|
186
|
+
self._last_mutation: dict[str, Any] | None = None
|
|
187
|
+
self._resubscribable = resubscribable
|
|
188
|
+
self._reset_on_teardown = reset_on_teardown
|
|
189
|
+
self._thread_safe = bool(thread_safe)
|
|
190
|
+
self._inspector_hook: Callable[[dict[str, Any]], None] | None = None
|
|
191
|
+
|
|
192
|
+
self._cached: T | None = None
|
|
193
|
+
self._status: str = "disconnected"
|
|
194
|
+
self._terminal = False
|
|
195
|
+
self._connected = False
|
|
196
|
+
self._rewiring = False
|
|
197
|
+
|
|
198
|
+
# Thread safety
|
|
199
|
+
self._cache_lock = threading.Lock() if self._thread_safe else None
|
|
200
|
+
|
|
201
|
+
# Dynamic deps tracking
|
|
202
|
+
self._deps: list[Any] = []
|
|
203
|
+
self._dep_unsubs: list[Callable[[], None]] = []
|
|
204
|
+
self._dep_index_map: dict[int, int] = {} # id(dep) -> index
|
|
205
|
+
self._dirty_bits: set[int] = set()
|
|
206
|
+
self._settled_bits: set[int] = set()
|
|
207
|
+
self._complete_bits: set[int] = set()
|
|
208
|
+
|
|
209
|
+
# Sinks
|
|
210
|
+
self._sinks: Callable[[Messages], None] | set[Callable[[Messages], None]] | None = None
|
|
211
|
+
self._sink_count = 0
|
|
212
|
+
self._single_dep_sink_count = 0
|
|
213
|
+
self._single_dep_sinks: set[Callable[..., Any]] = set()
|
|
214
|
+
|
|
215
|
+
# Build companion meta nodes (same pattern as NodeImpl)
|
|
216
|
+
built_meta: dict[str, Any] = {}
|
|
217
|
+
for k, v in (meta or {}).items():
|
|
218
|
+
meta_opts: dict[str, Any] = {
|
|
219
|
+
"initial": v,
|
|
220
|
+
"name": f"{name or 'dynamicNode'}:meta:{k}",
|
|
221
|
+
"describe_kind": "state",
|
|
222
|
+
"thread_safe": self._thread_safe,
|
|
223
|
+
}
|
|
224
|
+
if guard is not None:
|
|
225
|
+
meta_opts["guard"] = guard
|
|
226
|
+
built_meta[k] = create_node(**meta_opts)
|
|
227
|
+
self._meta: Mapping[str, Any] = MappingProxyType(built_meta)
|
|
228
|
+
|
|
229
|
+
# Register with subgraph lock registry
|
|
230
|
+
if self._thread_safe:
|
|
231
|
+
ensure_registered(self)
|
|
232
|
+
for meta_node in self._meta.values():
|
|
233
|
+
union_nodes(self, meta_node)
|
|
234
|
+
|
|
235
|
+
# Actions object for on_message handler
|
|
236
|
+
self._actions = _DynamicNodeActions(
|
|
237
|
+
down=lambda msgs: self._down_internal(msgs),
|
|
238
|
+
emit=lambda v: self._emit_auto_value(v),
|
|
239
|
+
up=lambda msgs: self.up(msgs, internal=True),
|
|
240
|
+
)
|
|
241
|
+
|
|
242
|
+
# --- Public interface (Node protocol) ---
|
|
243
|
+
|
|
244
|
+
@property
|
|
245
|
+
def name(self) -> str | None:
|
|
246
|
+
return self._name
|
|
247
|
+
|
|
248
|
+
@property
|
|
249
|
+
def status(self) -> str:
|
|
250
|
+
return self._status
|
|
251
|
+
|
|
252
|
+
@property
|
|
253
|
+
def meta(self) -> Mapping[str, Any]:
|
|
254
|
+
return self._meta
|
|
255
|
+
|
|
256
|
+
@property
|
|
257
|
+
def last_mutation(self) -> dict[str, Any] | None:
|
|
258
|
+
return self._last_mutation
|
|
259
|
+
|
|
260
|
+
@property
|
|
261
|
+
def v(self) -> None:
|
|
262
|
+
"""Versioning not yet supported on DynamicNodeImpl."""
|
|
263
|
+
return None
|
|
264
|
+
|
|
265
|
+
def has_guard(self) -> bool:
|
|
266
|
+
return self._guard is not None
|
|
267
|
+
|
|
268
|
+
def allows_observe(self, actor: Any = None) -> bool:
|
|
269
|
+
if self._guard is None:
|
|
270
|
+
return True
|
|
271
|
+
from graphrefly.core.guard import normalize_actor
|
|
272
|
+
|
|
273
|
+
a = normalize_actor(actor)
|
|
274
|
+
return bool(self._guard(a, "observe"))
|
|
275
|
+
|
|
276
|
+
def get(self) -> T | None:
|
|
277
|
+
lock = self._cache_lock
|
|
278
|
+
if lock is not None:
|
|
279
|
+
with lock:
|
|
280
|
+
return self._cached
|
|
281
|
+
return self._cached
|
|
282
|
+
|
|
283
|
+
def down(
|
|
284
|
+
self,
|
|
285
|
+
messages: Messages,
|
|
286
|
+
*,
|
|
287
|
+
actor: Any = None,
|
|
288
|
+
internal: bool = False,
|
|
289
|
+
guard_action: str = "write",
|
|
290
|
+
**_kwargs: Any,
|
|
291
|
+
) -> None:
|
|
292
|
+
if not messages:
|
|
293
|
+
return
|
|
294
|
+
if self._thread_safe:
|
|
295
|
+
from graphrefly.core.subgraph_locks import acquire_subgraph_write_lock_with_defer
|
|
296
|
+
|
|
297
|
+
with acquire_subgraph_write_lock_with_defer(self):
|
|
298
|
+
if not internal and self._guard is not None:
|
|
299
|
+
from graphrefly.core.guard import GuardDenied, normalize_actor, record_mutation
|
|
300
|
+
|
|
301
|
+
a = normalize_actor(actor)
|
|
302
|
+
if not self._guard(a, guard_action):
|
|
303
|
+
raise GuardDenied(a, self._name or "<unnamed>", guard_action)
|
|
304
|
+
if guard_action == "write":
|
|
305
|
+
self._last_mutation = record_mutation(a)
|
|
306
|
+
self._down_internal(messages)
|
|
307
|
+
else:
|
|
308
|
+
if not internal and self._guard is not None:
|
|
309
|
+
from graphrefly.core.guard import GuardDenied, normalize_actor, record_mutation
|
|
310
|
+
|
|
311
|
+
a = normalize_actor(actor)
|
|
312
|
+
if not self._guard(a, guard_action):
|
|
313
|
+
raise GuardDenied(a, self._name or "<unnamed>", guard_action)
|
|
314
|
+
if guard_action == "write":
|
|
315
|
+
self._last_mutation = record_mutation(a)
|
|
316
|
+
self._down_internal(messages)
|
|
317
|
+
|
|
318
|
+
def subscribe(
|
|
319
|
+
self,
|
|
320
|
+
sink: Callable[[Messages], None],
|
|
321
|
+
hints: Any = None,
|
|
322
|
+
*,
|
|
323
|
+
actor: Any = None,
|
|
324
|
+
**_kwargs: Any,
|
|
325
|
+
) -> Callable[[], None]:
|
|
326
|
+
check_actor = actor or (getattr(hints, "actor", None) if hints else None)
|
|
327
|
+
if check_actor is not None and self._guard is not None:
|
|
328
|
+
from graphrefly.core.guard import GuardDenied, normalize_actor
|
|
329
|
+
|
|
330
|
+
a = normalize_actor(check_actor)
|
|
331
|
+
if not self._guard(a, "observe"):
|
|
332
|
+
raise GuardDenied(a, self._name or "<unnamed>", "observe")
|
|
333
|
+
|
|
334
|
+
if self._thread_safe:
|
|
335
|
+
from graphrefly.core.subgraph_locks import acquire_subgraph_write_lock_with_defer
|
|
336
|
+
|
|
337
|
+
with acquire_subgraph_write_lock_with_defer(self):
|
|
338
|
+
return self._subscribe_body(sink, hints)
|
|
339
|
+
else:
|
|
340
|
+
return self._subscribe_body(sink, hints)
|
|
341
|
+
|
|
342
|
+
def _subscribe_body(
|
|
343
|
+
self,
|
|
344
|
+
sink: Callable[[Messages], None],
|
|
345
|
+
hints: Any,
|
|
346
|
+
) -> Callable[[], None]:
|
|
347
|
+
if self._terminal and self._resubscribable:
|
|
348
|
+
self._terminal = False
|
|
349
|
+
self._status = "disconnected"
|
|
350
|
+
if self._on_resubscribe is not None:
|
|
351
|
+
self._on_resubscribe()
|
|
352
|
+
|
|
353
|
+
# Track sink counts
|
|
354
|
+
self._sink_count += 1
|
|
355
|
+
h_single = getattr(hints, "single_dep", False) if hints else False
|
|
356
|
+
if h_single:
|
|
357
|
+
self._single_dep_sink_count += 1
|
|
358
|
+
self._single_dep_sinks.add(sink)
|
|
359
|
+
|
|
360
|
+
if self._sinks is None:
|
|
361
|
+
self._sinks = sink
|
|
362
|
+
elif callable(self._sinks) and not isinstance(self._sinks, set):
|
|
363
|
+
self._sinks = {self._sinks, sink}
|
|
364
|
+
else:
|
|
365
|
+
assert isinstance(self._sinks, set)
|
|
366
|
+
self._sinks.add(sink)
|
|
367
|
+
|
|
368
|
+
if not self._connected:
|
|
369
|
+
self._connect()
|
|
370
|
+
|
|
371
|
+
removed = False
|
|
372
|
+
|
|
373
|
+
if self._thread_safe:
|
|
374
|
+
from graphrefly.core.subgraph_locks import acquire_subgraph_write_lock_with_defer
|
|
375
|
+
|
|
376
|
+
def unsubscribe() -> None:
|
|
377
|
+
nonlocal removed
|
|
378
|
+
with acquire_subgraph_write_lock_with_defer(self):
|
|
379
|
+
if removed:
|
|
380
|
+
return
|
|
381
|
+
removed = True
|
|
382
|
+
self._unsubscribe_body(sink)
|
|
383
|
+
else:
|
|
384
|
+
|
|
385
|
+
def unsubscribe() -> None:
|
|
386
|
+
nonlocal removed
|
|
387
|
+
if removed:
|
|
388
|
+
return
|
|
389
|
+
removed = True
|
|
390
|
+
self._unsubscribe_body(sink)
|
|
391
|
+
|
|
392
|
+
return unsubscribe
|
|
393
|
+
|
|
394
|
+
def _unsubscribe_body(self, sink: Callable[[Messages], None]) -> None:
|
|
395
|
+
self._sink_count -= 1
|
|
396
|
+
if sink in self._single_dep_sinks:
|
|
397
|
+
self._single_dep_sink_count -= 1
|
|
398
|
+
self._single_dep_sinks.discard(sink)
|
|
399
|
+
|
|
400
|
+
if self._sinks is None:
|
|
401
|
+
return
|
|
402
|
+
if callable(self._sinks) and not isinstance(self._sinks, set):
|
|
403
|
+
if self._sinks is sink:
|
|
404
|
+
self._sinks = None
|
|
405
|
+
else:
|
|
406
|
+
assert isinstance(self._sinks, set)
|
|
407
|
+
self._sinks.discard(sink)
|
|
408
|
+
if len(self._sinks) == 1:
|
|
409
|
+
(only,) = self._sinks
|
|
410
|
+
self._sinks = only
|
|
411
|
+
elif len(self._sinks) == 0:
|
|
412
|
+
self._sinks = None
|
|
413
|
+
if self._sinks is None:
|
|
414
|
+
self._disconnect()
|
|
415
|
+
|
|
416
|
+
def up(
|
|
417
|
+
self,
|
|
418
|
+
messages: Messages,
|
|
419
|
+
*,
|
|
420
|
+
actor: Any = None,
|
|
421
|
+
internal: bool = False,
|
|
422
|
+
guard_action: str = "write",
|
|
423
|
+
**_kwargs: Any,
|
|
424
|
+
) -> None:
|
|
425
|
+
"""Send messages upstream to currently-tracked deps."""
|
|
426
|
+
if not self._deps:
|
|
427
|
+
return
|
|
428
|
+
if not internal and self._guard is not None:
|
|
429
|
+
from graphrefly.core.guard import GuardDenied, normalize_actor, record_mutation
|
|
430
|
+
|
|
431
|
+
a = normalize_actor(actor)
|
|
432
|
+
if not self._guard(a, guard_action):
|
|
433
|
+
raise GuardDenied(a, self._name or "<unnamed>", guard_action)
|
|
434
|
+
if guard_action == "write":
|
|
435
|
+
self._last_mutation = record_mutation(a)
|
|
436
|
+
for dep in self._deps:
|
|
437
|
+
u = getattr(dep, "up", None)
|
|
438
|
+
if u is not None:
|
|
439
|
+
u(messages, internal=internal)
|
|
440
|
+
|
|
441
|
+
def unsubscribe(self) -> None:
|
|
442
|
+
"""Disconnect from all upstream deps."""
|
|
443
|
+
self._disconnect()
|
|
444
|
+
|
|
445
|
+
def __or__(self, other: object) -> Any:
|
|
446
|
+
if not callable(other):
|
|
447
|
+
return NotImplemented
|
|
448
|
+
return other(self)
|
|
449
|
+
|
|
450
|
+
# --- Inspector hook ---
|
|
451
|
+
|
|
452
|
+
def _set_inspector_hook(
|
|
453
|
+
self, hook: Callable[[dict[str, Any]], None] | None
|
|
454
|
+
) -> Callable[[], None]:
|
|
455
|
+
"""Internal inspector hook attach/detach for graph observability."""
|
|
456
|
+
prev = self._inspector_hook
|
|
457
|
+
self._inspector_hook = hook
|
|
458
|
+
|
|
459
|
+
def dispose() -> None:
|
|
460
|
+
if self._inspector_hook is hook:
|
|
461
|
+
self._inspector_hook = prev
|
|
462
|
+
|
|
463
|
+
return dispose
|
|
464
|
+
|
|
465
|
+
# --- Private methods ---
|
|
466
|
+
|
|
467
|
+
def _emit_to_sinks(self, messages: Messages) -> None:
|
|
468
|
+
if self._sinks is None:
|
|
469
|
+
return
|
|
470
|
+
if callable(self._sinks) and not isinstance(self._sinks, set):
|
|
471
|
+
self._sinks(messages)
|
|
472
|
+
return
|
|
473
|
+
snapshot = list(self._sinks)
|
|
474
|
+
for s in snapshot:
|
|
475
|
+
s(messages)
|
|
476
|
+
|
|
477
|
+
def _can_skip_dirty(self) -> bool:
|
|
478
|
+
return self._sink_count == 1 and self._single_dep_sink_count == 1
|
|
479
|
+
|
|
480
|
+
def _down_internal(self, messages: Messages) -> None:
|
|
481
|
+
if not messages:
|
|
482
|
+
return
|
|
483
|
+
if self._terminal and not self._resubscribable:
|
|
484
|
+
filtered = [
|
|
485
|
+
m
|
|
486
|
+
for m in messages
|
|
487
|
+
if m[0] is MessageType.TEARDOWN or m[0] is MessageType.INVALIDATE
|
|
488
|
+
]
|
|
489
|
+
if not filtered:
|
|
490
|
+
return
|
|
491
|
+
messages = filtered
|
|
492
|
+
|
|
493
|
+
self._handle_local_lifecycle(messages)
|
|
494
|
+
|
|
495
|
+
# singleDep DIRTY skip optimization
|
|
496
|
+
if self._can_skip_dirty():
|
|
497
|
+
has_phase2 = any(
|
|
498
|
+
m[0] is MessageType.DATA or m[0] is MessageType.RESOLVED for m in messages
|
|
499
|
+
)
|
|
500
|
+
if has_phase2:
|
|
501
|
+
filtered = [m for m in messages if m[0] is not MessageType.DIRTY]
|
|
502
|
+
if filtered:
|
|
503
|
+
emit_with_batch(self._emit_to_sinks, filtered)
|
|
504
|
+
return
|
|
505
|
+
|
|
506
|
+
emit_with_batch(self._emit_to_sinks, messages)
|
|
507
|
+
|
|
508
|
+
def _handle_local_lifecycle(self, messages: Messages) -> None:
|
|
509
|
+
lock = self._cache_lock
|
|
510
|
+
for m in messages:
|
|
511
|
+
t = m[0]
|
|
512
|
+
if t is MessageType.DATA:
|
|
513
|
+
val = m[1] if len(m) > 1 else None
|
|
514
|
+
if lock is not None:
|
|
515
|
+
with lock:
|
|
516
|
+
self._cached = val
|
|
517
|
+
else:
|
|
518
|
+
self._cached = val
|
|
519
|
+
if t is MessageType.INVALIDATE:
|
|
520
|
+
if lock is not None:
|
|
521
|
+
with lock:
|
|
522
|
+
self._cached = None
|
|
523
|
+
else:
|
|
524
|
+
self._cached = None
|
|
525
|
+
if t is MessageType.DATA or t is MessageType.RESOLVED:
|
|
526
|
+
self._status = "settled"
|
|
527
|
+
elif t is MessageType.DIRTY:
|
|
528
|
+
self._status = "dirty"
|
|
529
|
+
elif t is MessageType.COMPLETE:
|
|
530
|
+
self._status = "completed"
|
|
531
|
+
self._terminal = True
|
|
532
|
+
elif t is MessageType.ERROR:
|
|
533
|
+
self._status = "errored"
|
|
534
|
+
self._terminal = True
|
|
535
|
+
if t is MessageType.TEARDOWN:
|
|
536
|
+
if self._reset_on_teardown:
|
|
537
|
+
if lock is not None:
|
|
538
|
+
with lock:
|
|
539
|
+
self._cached = None
|
|
540
|
+
else:
|
|
541
|
+
self._cached = None
|
|
542
|
+
try:
|
|
543
|
+
self._propagate_to_meta(t)
|
|
544
|
+
finally:
|
|
545
|
+
self._disconnect()
|
|
546
|
+
# Propagate other meta-eligible signals (centralized in protocol.py).
|
|
547
|
+
if t is not MessageType.TEARDOWN and propagates_to_meta(t):
|
|
548
|
+
self._propagate_to_meta(t)
|
|
549
|
+
|
|
550
|
+
def _propagate_to_meta(self, t: MessageType) -> None:
|
|
551
|
+
"""Propagate a signal to all companion meta nodes (best-effort)."""
|
|
552
|
+
for meta_node in self._meta.values():
|
|
553
|
+
with suppress(Exception):
|
|
554
|
+
meta_node.down([(t,)], internal=True)
|
|
555
|
+
|
|
556
|
+
def _emit_auto_value(self, value: Any) -> None:
|
|
557
|
+
was_dirty = self._status == "dirty"
|
|
558
|
+
lock = self._cache_lock
|
|
559
|
+
if lock is not None:
|
|
560
|
+
with lock:
|
|
561
|
+
cached_snapshot = self._cached
|
|
562
|
+
else:
|
|
563
|
+
cached_snapshot = self._cached
|
|
564
|
+
unchanged = self._equals(cached_snapshot, value)
|
|
565
|
+
if unchanged:
|
|
566
|
+
msgs: Messages = (
|
|
567
|
+
[(MessageType.RESOLVED,)]
|
|
568
|
+
if was_dirty
|
|
569
|
+
else [(MessageType.DIRTY,), (MessageType.RESOLVED,)]
|
|
570
|
+
)
|
|
571
|
+
self._down_internal(msgs)
|
|
572
|
+
return
|
|
573
|
+
if lock is not None:
|
|
574
|
+
with lock:
|
|
575
|
+
self._cached = value
|
|
576
|
+
else:
|
|
577
|
+
self._cached = value
|
|
578
|
+
msgs = (
|
|
579
|
+
[(MessageType.DATA, value)]
|
|
580
|
+
if was_dirty
|
|
581
|
+
else [(MessageType.DIRTY,), (MessageType.DATA, value)]
|
|
582
|
+
)
|
|
583
|
+
self._down_internal(msgs)
|
|
584
|
+
|
|
585
|
+
def _connect(self) -> None:
|
|
586
|
+
if self._connected:
|
|
587
|
+
return
|
|
588
|
+
self._connected = True
|
|
589
|
+
self._status = "settled"
|
|
590
|
+
self._dirty_bits.clear()
|
|
591
|
+
self._settled_bits.clear()
|
|
592
|
+
self._complete_bits.clear()
|
|
593
|
+
self._run_fn()
|
|
594
|
+
|
|
595
|
+
def _disconnect(self) -> None:
|
|
596
|
+
if not self._connected:
|
|
597
|
+
return
|
|
598
|
+
for unsub in self._dep_unsubs:
|
|
599
|
+
unsub()
|
|
600
|
+
self._dep_unsubs = []
|
|
601
|
+
self._deps = []
|
|
602
|
+
self._dep_index_map.clear()
|
|
603
|
+
self._dirty_bits.clear()
|
|
604
|
+
self._settled_bits.clear()
|
|
605
|
+
self._complete_bits.clear()
|
|
606
|
+
self._connected = False
|
|
607
|
+
self._status = "disconnected"
|
|
608
|
+
|
|
609
|
+
def _run_fn(self) -> None:
|
|
610
|
+
if self._terminal and not self._resubscribable:
|
|
611
|
+
return
|
|
612
|
+
if self._rewiring:
|
|
613
|
+
return
|
|
614
|
+
|
|
615
|
+
tracked_deps: list[Any] = []
|
|
616
|
+
tracked_ids: set[int] = set()
|
|
617
|
+
|
|
618
|
+
def get(dep: Any) -> Any:
|
|
619
|
+
dep_id = id(dep)
|
|
620
|
+
if dep_id not in tracked_ids:
|
|
621
|
+
tracked_ids.add(dep_id)
|
|
622
|
+
tracked_deps.append(dep)
|
|
623
|
+
return dep.get()
|
|
624
|
+
|
|
625
|
+
try:
|
|
626
|
+
result = self._fn(get)
|
|
627
|
+
|
|
628
|
+
# Inspector hook: collect dep values BEFORE rewire (pre-rewire deps
|
|
629
|
+
# show what triggered recompute, matching TS semantics)
|
|
630
|
+
if self._inspector_hook is not None:
|
|
631
|
+
dep_values = [d.get() for d in self._deps]
|
|
632
|
+
self._inspector_hook({"kind": "run", "dep_values": dep_values})
|
|
633
|
+
|
|
634
|
+
self._rewire(tracked_deps)
|
|
635
|
+
|
|
636
|
+
if result is None:
|
|
637
|
+
return
|
|
638
|
+
self._emit_auto_value(result)
|
|
639
|
+
except Exception as err:
|
|
640
|
+
self._down_internal([(MessageType.ERROR, err)])
|
|
641
|
+
|
|
642
|
+
def _rewire(self, new_deps: list[Any]) -> None:
|
|
643
|
+
self._rewiring = True
|
|
644
|
+
try:
|
|
645
|
+
old_map = self._dep_index_map
|
|
646
|
+
new_map: dict[int, int] = {}
|
|
647
|
+
new_unsubs: list[Callable[[], None]] = []
|
|
648
|
+
|
|
649
|
+
for i, dep in enumerate(new_deps):
|
|
650
|
+
dep_id = id(dep)
|
|
651
|
+
new_map[dep_id] = i
|
|
652
|
+
old_idx = old_map.get(dep_id)
|
|
653
|
+
if old_idx is not None:
|
|
654
|
+
# Kept dep — reuse subscription
|
|
655
|
+
new_unsubs.append(self._dep_unsubs[old_idx])
|
|
656
|
+
self._dep_unsubs[old_idx] = lambda: None
|
|
657
|
+
else:
|
|
658
|
+
# New dep — subscribe
|
|
659
|
+
idx = i
|
|
660
|
+
unsub = dep.subscribe(
|
|
661
|
+
lambda msgs, _idx=idx: self._handle_dep_messages(_idx, msgs)
|
|
662
|
+
)
|
|
663
|
+
new_unsubs.append(unsub)
|
|
664
|
+
# Union with new dep for thread safety
|
|
665
|
+
if self._thread_safe:
|
|
666
|
+
from graphrefly.core.subgraph_locks import union_nodes
|
|
667
|
+
|
|
668
|
+
union_nodes(self, dep)
|
|
669
|
+
|
|
670
|
+
# Disconnect removed deps
|
|
671
|
+
for dep_id, old_idx in old_map.items():
|
|
672
|
+
if dep_id not in new_map:
|
|
673
|
+
self._dep_unsubs[old_idx]()
|
|
674
|
+
|
|
675
|
+
self._deps = new_deps
|
|
676
|
+
self._dep_unsubs = new_unsubs
|
|
677
|
+
self._dep_index_map = new_map
|
|
678
|
+
self._dirty_bits.clear()
|
|
679
|
+
self._settled_bits.clear()
|
|
680
|
+
|
|
681
|
+
# Preserve complete bits for deps still present
|
|
682
|
+
new_complete: set[int] = set()
|
|
683
|
+
for old_idx in self._complete_bits:
|
|
684
|
+
# Find dep at old_idx and check if in new_map
|
|
685
|
+
for dep_id, idx in old_map.items():
|
|
686
|
+
if idx == old_idx and dep_id in new_map:
|
|
687
|
+
new_complete.add(new_map[dep_id])
|
|
688
|
+
break
|
|
689
|
+
self._complete_bits = new_complete
|
|
690
|
+
finally:
|
|
691
|
+
self._rewiring = False
|
|
692
|
+
|
|
693
|
+
def _handle_dep_messages(self, index: int, messages: Messages) -> None:
|
|
694
|
+
if self._rewiring:
|
|
695
|
+
return
|
|
696
|
+
|
|
697
|
+
for msg in messages:
|
|
698
|
+
# Inspector hook
|
|
699
|
+
if self._inspector_hook is not None:
|
|
700
|
+
self._inspector_hook({"kind": "dep_message", "dep_index": index, "message": msg})
|
|
701
|
+
|
|
702
|
+
t = msg[0]
|
|
703
|
+
|
|
704
|
+
# User-defined message handler gets first look
|
|
705
|
+
if self._on_message is not None:
|
|
706
|
+
try:
|
|
707
|
+
if self._on_message(msg, index, self._actions):
|
|
708
|
+
continue
|
|
709
|
+
except Exception as err:
|
|
710
|
+
self._down_internal([(MessageType.ERROR, err)])
|
|
711
|
+
return
|
|
712
|
+
|
|
713
|
+
if t is MessageType.DIRTY:
|
|
714
|
+
self._dirty_bits.add(index)
|
|
715
|
+
self._settled_bits.discard(index)
|
|
716
|
+
if len(self._dirty_bits) == 1:
|
|
717
|
+
emit_with_batch(self._emit_to_sinks, [(MessageType.DIRTY,)])
|
|
718
|
+
continue
|
|
719
|
+
if t is MessageType.DATA or t is MessageType.RESOLVED:
|
|
720
|
+
if index not in self._dirty_bits:
|
|
721
|
+
self._dirty_bits.add(index)
|
|
722
|
+
emit_with_batch(self._emit_to_sinks, [(MessageType.DIRTY,)])
|
|
723
|
+
self._settled_bits.add(index)
|
|
724
|
+
if self._all_dirty_settled():
|
|
725
|
+
self._dirty_bits.clear()
|
|
726
|
+
self._settled_bits.clear()
|
|
727
|
+
self._run_fn()
|
|
728
|
+
continue
|
|
729
|
+
if t is MessageType.COMPLETE:
|
|
730
|
+
self._complete_bits.add(index)
|
|
731
|
+
self._dirty_bits.discard(index)
|
|
732
|
+
self._settled_bits.discard(index)
|
|
733
|
+
if self._all_dirty_settled():
|
|
734
|
+
self._dirty_bits.clear()
|
|
735
|
+
self._settled_bits.clear()
|
|
736
|
+
self._run_fn()
|
|
737
|
+
if self._auto_complete and len(self._complete_bits) >= len(self._deps) > 0:
|
|
738
|
+
self._down_internal([(MessageType.COMPLETE,)])
|
|
739
|
+
continue
|
|
740
|
+
if t is MessageType.ERROR:
|
|
741
|
+
self._down_internal([msg])
|
|
742
|
+
continue
|
|
743
|
+
# INVALIDATE, TEARDOWN, PAUSE, RESUME — pass through
|
|
744
|
+
self._down_internal([msg])
|
|
745
|
+
|
|
746
|
+
def _all_dirty_settled(self) -> bool:
|
|
747
|
+
if not self._dirty_bits:
|
|
748
|
+
return False
|
|
749
|
+
return self._dirty_bits <= self._settled_bits
|