@volynets/reflex 0.1.0 → 0.1.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +25 -25
- package/dist/cjs/index.cjs +1 -1
- package/dist/cjs/unstable/index.cjs +1 -1
- package/dist/esm/index.js +1 -1
- package/dist/esm/unstable/index.js +1 -1
- package/dist/globals.d.ts +333 -32
- package/dist/unstable/index.d.ts +195 -0
- package/package.json +10 -1
- package/src/api/derived.ts +90 -0
- package/src/api/effect.ts +120 -0
- package/src/api/event.ts +257 -0
- package/src/api/index.ts +4 -0
- package/src/api/signal.ts +68 -0
- package/src/globals.d.ts +169 -0
- package/src/index.ts +6 -0
- package/src/infra/event.ts +182 -0
- package/src/infra/factory.ts +46 -0
- package/src/infra/index.ts +2 -0
- package/src/infra/runtime.ts +189 -0
- package/src/policy/SCHEDULER_SEMANTICS.md +389 -0
- package/src/policy/event_dispatcher.ts +39 -0
- package/src/policy/index.ts +1 -0
- package/src/policy/scheduler/index.ts +6 -0
- package/src/policy/scheduler/scheduler.constants.ts +17 -0
- package/src/policy/scheduler/scheduler.core.ts +165 -0
- package/src/policy/scheduler/scheduler.infra.ts +37 -0
- package/src/policy/scheduler/scheduler.queue.ts +74 -0
- package/src/policy/scheduler/scheduler.types.ts +54 -0
- package/src/policy/scheduler/variants/index.ts +3 -0
- package/src/policy/scheduler/variants/scheduler.eager.ts +46 -0
- package/src/policy/scheduler/variants/scheduler.flush.ts +35 -0
- package/src/policy/scheduler/variants/scheduler.sab.ts +37 -0
- package/src/unstable/index.ts +4 -0
- package/src/unstable/resource.ts +505 -0
|
@@ -0,0 +1,389 @@
|
|
|
1
|
+
# Effect Scheduler Semantics
|
|
2
|
+
|
|
3
|
+
This document defines the observable semantics of Reflex effect scheduling and
|
|
4
|
+
the test invariants that should hold for each policy:
|
|
5
|
+
|
|
6
|
+
- `flush`
|
|
7
|
+
- `sab`
|
|
8
|
+
- `eager`
|
|
9
|
+
|
|
10
|
+
It is intentionally written from the perspective of externally observable
|
|
11
|
+
behavior rather than implementation details. The goal is to make benchmarks,
|
|
12
|
+
tests, and adapter integrations agree on what is "correct" for each mode.
|
|
13
|
+
|
|
14
|
+
## Terms
|
|
15
|
+
|
|
16
|
+
- `signal`: mutable reactive source
|
|
17
|
+
- `computed`: pull-based derived value
|
|
18
|
+
- `effect`: push-style observer scheduled for re-execution
|
|
19
|
+
- `batch`: transaction boundary that groups writes
|
|
20
|
+
- `flush`: explicit delivery of queued effects
|
|
21
|
+
- "stable": no more queued effect work remains for the current snapshot
|
|
22
|
+
- "scheduled": an effect is queued to run but has not run yet
|
|
23
|
+
|
|
24
|
+
## Shared Guarantees
|
|
25
|
+
|
|
26
|
+
These guarantees hold in every mode:
|
|
27
|
+
|
|
28
|
+
1. `signal` writes performed inside `batch()` become the committed source of
|
|
29
|
+
truth when the batch exits.
|
|
30
|
+
2. `computed` reads after the batch must observe the latest committed signal
|
|
31
|
+
state, even if effects have not been delivered yet.
|
|
32
|
+
3. `effect` re-execution is deduplicated per scheduled node.
|
|
33
|
+
4. Effects must not flush in the middle of propagation.
|
|
34
|
+
5. Nested batches behave like one outer transaction for delivery purposes.
|
|
35
|
+
6. Disposed effects must not run again.
|
|
36
|
+
7. FIFO delivery order is preserved for the scheduler queue.
|
|
37
|
+
|
|
38
|
+
The main semantic difference between modes is not whether values are correct,
|
|
39
|
+
but when queued effects become observable.
|
|
40
|
+
|
|
41
|
+
## Mode: `flush`
|
|
42
|
+
|
|
43
|
+
### Intent
|
|
44
|
+
|
|
45
|
+
`flush` is the low-overhead mode. It separates mutation/propagation from
|
|
46
|
+
effect delivery.
|
|
47
|
+
|
|
48
|
+
### Observable Contract
|
|
49
|
+
|
|
50
|
+
After `batch(fn)` in `flush` mode:
|
|
51
|
+
|
|
52
|
+
- signal state is up to date
|
|
53
|
+
- computed reads are up to date
|
|
54
|
+
- effects may still be pending
|
|
55
|
+
- the system is not guaranteed to be stable
|
|
56
|
+
|
|
57
|
+
This means the following is correct:
|
|
58
|
+
|
|
59
|
+
```ts
|
|
60
|
+
rt.batch(() => {
|
|
61
|
+
setSource(3);
|
|
62
|
+
});
|
|
63
|
+
|
|
64
|
+
expect(source()).toBe(3);
|
|
65
|
+
expect(derived()).toBe(6);
|
|
66
|
+
expect(effectSpy).toHaveBeenCalledTimes(1);
|
|
67
|
+
|
|
68
|
+
rt.flush();
|
|
69
|
+
|
|
70
|
+
expect(effectSpy).toHaveBeenCalledTimes(2);
|
|
71
|
+
```
|
|
72
|
+
|
|
73
|
+
### Correct Test Invariants
|
|
74
|
+
|
|
75
|
+
Tests in `flush` mode may assert immediately after `batch()`:
|
|
76
|
+
|
|
77
|
+
- signal values changed
|
|
78
|
+
- computed values changed
|
|
79
|
+
- effects are still at their previous call count
|
|
80
|
+
|
|
81
|
+
Tests in `flush` mode may assert after `flush()`:
|
|
82
|
+
|
|
83
|
+
- queued effects have run
|
|
84
|
+
- cleanup has executed if applicable
|
|
85
|
+
- the system is stable for the current snapshot
|
|
86
|
+
|
|
87
|
+
### Incorrect Expectation
|
|
88
|
+
|
|
89
|
+
This is not a valid invariant for strict `flush`:
|
|
90
|
+
|
|
91
|
+
```ts
|
|
92
|
+
rt.batch(() => {
|
|
93
|
+
setSource(3);
|
|
94
|
+
});
|
|
95
|
+
|
|
96
|
+
expect(effectSpy).toHaveBeenCalledTimes(2);
|
|
97
|
+
```
|
|
98
|
+
|
|
99
|
+
That expectation asks for stable-after-batch semantics, which `flush` does not
|
|
100
|
+
promise.
|
|
101
|
+
|
|
102
|
+
## Mode: `sab`
|
|
103
|
+
|
|
104
|
+
### Intent
|
|
105
|
+
|
|
106
|
+
`sab` means "stable after batch".
|
|
107
|
+
|
|
108
|
+
It keeps lazy enqueue semantics during propagation, but auto-delivers pending
|
|
109
|
+
effects when the outermost batch exits and the runtime is in a safe idle state.
|
|
110
|
+
|
|
111
|
+
### Observable Contract
|
|
112
|
+
|
|
113
|
+
Inside a batch:
|
|
114
|
+
|
|
115
|
+
- effects stay queued
|
|
116
|
+
- no mid-propagation flush occurs
|
|
117
|
+
|
|
118
|
+
After the outermost batch exits:
|
|
119
|
+
|
|
120
|
+
- signal state is current
|
|
121
|
+
- computed reads are current
|
|
122
|
+
- queued effects are auto-delivered
|
|
123
|
+
- the system is stable for the current snapshot
|
|
124
|
+
|
|
125
|
+
This means the following is correct:
|
|
126
|
+
|
|
127
|
+
```ts
|
|
128
|
+
rt.batch(() => {
|
|
129
|
+
setSource(3);
|
|
130
|
+
});
|
|
131
|
+
|
|
132
|
+
expect(source()).toBe(3);
|
|
133
|
+
expect(derived()).toBe(6);
|
|
134
|
+
expect(effectSpy).toHaveBeenCalledTimes(2);
|
|
135
|
+
```
|
|
136
|
+
|
|
137
|
+
### Correct Test Invariants
|
|
138
|
+
|
|
139
|
+
Tests in `sab` mode should assert:
|
|
140
|
+
|
|
141
|
+
- no effect rerun occurs inside the batch body
|
|
142
|
+
- effect reruns are visible immediately after the outermost batch exits
|
|
143
|
+
- reads inside the batch can still observe current pull-based values
|
|
144
|
+
|
|
145
|
+
### Important Nuance
|
|
146
|
+
|
|
147
|
+
If the outermost batch exits while propagation is still active or a computed is
|
|
148
|
+
currently evaluating, `sab` does not flush yet. The queue stays pending until a
|
|
149
|
+
later explicit `flush()`. The contract is "stable after batch when safe", not
|
|
150
|
+
"flush under every circumstance".
|
|
151
|
+
|
|
152
|
+
## Mode: `eager`
|
|
153
|
+
|
|
154
|
+
### Intent
|
|
155
|
+
|
|
156
|
+
`eager` auto-delivers effects whenever it is safe to do so.
|
|
157
|
+
|
|
158
|
+
### Observable Contract
|
|
159
|
+
|
|
160
|
+
When the runtime is idle and not inside propagation:
|
|
161
|
+
|
|
162
|
+
- enqueue can trigger immediate delivery
|
|
163
|
+
- exiting the outermost batch stabilizes the system automatically
|
|
164
|
+
- explicit `flush()` is normally unnecessary
|
|
165
|
+
|
|
166
|
+
This means the following is correct:
|
|
167
|
+
|
|
168
|
+
```ts
|
|
169
|
+
rt.batch(() => {
|
|
170
|
+
setSource(3);
|
|
171
|
+
});
|
|
172
|
+
|
|
173
|
+
expect(source()).toBe(3);
|
|
174
|
+
expect(derived()).toBe(6);
|
|
175
|
+
expect(effectSpy).toHaveBeenCalledTimes(2);
|
|
176
|
+
```
|
|
177
|
+
|
|
178
|
+
### Correct Test Invariants
|
|
179
|
+
|
|
180
|
+
Tests in `eager` mode may assert immediately after:
|
|
181
|
+
|
|
182
|
+
- a plain write performed while idle
|
|
183
|
+
- an outermost `batch()` exit
|
|
184
|
+
- event delivery completion
|
|
185
|
+
|
|
186
|
+
that:
|
|
187
|
+
|
|
188
|
+
- signal values are current
|
|
189
|
+
- computed values are current
|
|
190
|
+
- effects have already observed the latest stable snapshot
|
|
191
|
+
|
|
192
|
+
### Important Distinction From `sab`
|
|
193
|
+
|
|
194
|
+
`eager` is more aggressive than `sab`:
|
|
195
|
+
|
|
196
|
+
- `eager` may flush on idle enqueue outside batches
|
|
197
|
+
- `sab` does not change enqueue into an auto-flushing operation
|
|
198
|
+
- `sab` only changes what happens at outermost batch exit
|
|
199
|
+
|
|
200
|
+
### Important Nuance
|
|
201
|
+
|
|
202
|
+
`eager` still must not flush during active propagation or while an enclosing
|
|
203
|
+
computed is running. Delivery happens at the earliest safe point, not literally
|
|
204
|
+
"immediately no matter what".
|
|
205
|
+
|
|
206
|
+
## Test Matrix
|
|
207
|
+
|
|
208
|
+
The same scenario should be asserted differently depending on mode.
|
|
209
|
+
|
|
210
|
+
### Scenario A: Write inside batch, then read signal and computed
|
|
211
|
+
|
|
212
|
+
```ts
|
|
213
|
+
rt.batch(() => {
|
|
214
|
+
setSource(3);
|
|
215
|
+
});
|
|
216
|
+
```
|
|
217
|
+
|
|
218
|
+
Valid in all modes:
|
|
219
|
+
|
|
220
|
+
```ts
|
|
221
|
+
expect(source()).toBe(3);
|
|
222
|
+
expect(derived()).toBe(6);
|
|
223
|
+
```
|
|
224
|
+
|
|
225
|
+
### Scenario B: Write inside batch, then inspect effect call count
|
|
226
|
+
|
|
227
|
+
Initial effect call count: `1`
|
|
228
|
+
|
|
229
|
+
Valid expectations:
|
|
230
|
+
|
|
231
|
+
- `flush`: still `1` until explicit `flush()`
|
|
232
|
+
- `sab`: already `2` after batch exit
|
|
233
|
+
- `eager`: already `2` after batch exit
|
|
234
|
+
|
|
235
|
+
### Scenario C: Multiple writes in one batch
|
|
236
|
+
|
|
237
|
+
```ts
|
|
238
|
+
rt.batch(() => {
|
|
239
|
+
setLeft(2);
|
|
240
|
+
setRight(20);
|
|
241
|
+
});
|
|
242
|
+
```
|
|
243
|
+
|
|
244
|
+
Valid in all modes:
|
|
245
|
+
|
|
246
|
+
- effects must observe one consistent final snapshot
|
|
247
|
+
- intermediate partial snapshots must not leak through effect delivery
|
|
248
|
+
|
|
249
|
+
Expected timing:
|
|
250
|
+
|
|
251
|
+
- `flush`: after explicit `rt.flush()`
|
|
252
|
+
- `sab`: after batch exit
|
|
253
|
+
- `eager`: after batch exit
|
|
254
|
+
|
|
255
|
+
### Scenario D: Nested batch
|
|
256
|
+
|
|
257
|
+
```ts
|
|
258
|
+
rt.batch(() => {
|
|
259
|
+
setA(1);
|
|
260
|
+
rt.batch(() => {
|
|
261
|
+
setB(2);
|
|
262
|
+
});
|
|
263
|
+
});
|
|
264
|
+
```
|
|
265
|
+
|
|
266
|
+
Valid expectations:
|
|
267
|
+
|
|
268
|
+
- no post-batch delivery after the inner batch alone
|
|
269
|
+
- delivery happens only when the outermost batch exits
|
|
270
|
+
- in `flush`, even outermost batch exit still does not deliver without
|
|
271
|
+
explicit `flush()`
|
|
272
|
+
|
|
273
|
+
### Scenario E: Write outside batch while idle
|
|
274
|
+
|
|
275
|
+
```ts
|
|
276
|
+
setSource(3);
|
|
277
|
+
```
|
|
278
|
+
|
|
279
|
+
Expected timing:
|
|
280
|
+
|
|
281
|
+
- `flush`: effect remains queued until `flush()`
|
|
282
|
+
- `sab`: effect remains queued until `flush()`
|
|
283
|
+
- `eager`: effect may run automatically
|
|
284
|
+
|
|
285
|
+
This is the most important behavioral difference between `eager` and `sab`.
|
|
286
|
+
|
|
287
|
+
## Benchmark Interpretation
|
|
288
|
+
|
|
289
|
+
These modes are expected to have different costs:
|
|
290
|
+
|
|
291
|
+
- `flush` without `flush()` is cheaper because it postpones effect work
|
|
292
|
+
- `batch(); flush();` is more expensive because it actually delivers effects
|
|
293
|
+
- `sab` is close in cost to `batch(); flush();`
|
|
294
|
+
- `eager` can be cheaper or more expensive depending on workload shape, but it
|
|
295
|
+
still pays for auto-delivery
|
|
296
|
+
|
|
297
|
+
Therefore, this comparison is not apples-to-apples:
|
|
298
|
+
|
|
299
|
+
- `flush` without delivery
|
|
300
|
+
- `flush` with delivery
|
|
301
|
+
|
|
302
|
+
If a benchmark expects the effect to have already re-run, then it is measuring
|
|
303
|
+
delivery cost, not only propagation cost.
|
|
304
|
+
|
|
305
|
+
## Adapter Guidance
|
|
306
|
+
|
|
307
|
+
Adapters should decide explicitly which contract they expose.
|
|
308
|
+
|
|
309
|
+
### Strict `flush` Adapter
|
|
310
|
+
|
|
311
|
+
Use this when you want cheap writes and explicit stabilization:
|
|
312
|
+
|
|
313
|
+
```ts
|
|
314
|
+
withBatch(fn) {
|
|
315
|
+
return rt.batch(fn);
|
|
316
|
+
}
|
|
317
|
+
|
|
318
|
+
settleEffects() {
|
|
319
|
+
rt.flush();
|
|
320
|
+
}
|
|
321
|
+
```
|
|
322
|
+
|
|
323
|
+
### Stable-After-Batch Adapter
|
|
324
|
+
|
|
325
|
+
Use this when tests or integrations require post-batch stability:
|
|
326
|
+
|
|
327
|
+
```ts
|
|
328
|
+
withBatch(fn) {
|
|
329
|
+
return rt.batch(fn);
|
|
330
|
+
}
|
|
331
|
+
|
|
332
|
+
// runtime created with effectStrategy: "sab"
|
|
333
|
+
```
|
|
334
|
+
|
|
335
|
+
or:
|
|
336
|
+
|
|
337
|
+
```ts
|
|
338
|
+
withBatch(fn) {
|
|
339
|
+
const result = rt.batch(fn);
|
|
340
|
+
rt.flush();
|
|
341
|
+
return result;
|
|
342
|
+
}
|
|
343
|
+
```
|
|
344
|
+
|
|
345
|
+
### Eager Adapter
|
|
346
|
+
|
|
347
|
+
Use this when the integration wants auto-delivery semantics generally:
|
|
348
|
+
|
|
349
|
+
```ts
|
|
350
|
+
createRuntime({ effectStrategy: "eager" });
|
|
351
|
+
```
|
|
352
|
+
|
|
353
|
+
## Recommended Test Strategy
|
|
354
|
+
|
|
355
|
+
Do not force one invariant onto all scheduler modes.
|
|
356
|
+
|
|
357
|
+
Instead, split tests into:
|
|
358
|
+
|
|
359
|
+
1. Pull correctness tests
|
|
360
|
+
- signal state after writes
|
|
361
|
+
- computed consistency after writes
|
|
362
|
+
|
|
363
|
+
2. Delivery timing tests
|
|
364
|
+
- whether effects are still queued
|
|
365
|
+
- when effects become observable
|
|
366
|
+
|
|
367
|
+
3. Stabilization tests
|
|
368
|
+
- whether the system is guaranteed settled after a boundary
|
|
369
|
+
|
|
370
|
+
In practice:
|
|
371
|
+
|
|
372
|
+
- if a test asserts `effectSpy === 2` immediately after `batch()`, it is a
|
|
373
|
+
test for `sab` or `eager`, not strict `flush`
|
|
374
|
+
- if a test asserts `signal` and `computed` only, it is valid across all modes
|
|
375
|
+
- if a benchmark wants the cheapest `flush` path, it must not demand settled
|
|
376
|
+
effects as part of correctness
|
|
377
|
+
|
|
378
|
+
## Summary
|
|
379
|
+
|
|
380
|
+
- `flush` is correct when effects remain pending after `batch()`
|
|
381
|
+
- `sab` is correct when effects stay lazy during the batch but are delivered at
|
|
382
|
+
outermost batch exit
|
|
383
|
+
- `eager` is correct when effects are auto-delivered at the earliest safe point
|
|
384
|
+
|
|
385
|
+
The key rule is:
|
|
386
|
+
|
|
387
|
+
Correctness is mode-relative.
|
|
388
|
+
|
|
389
|
+
The runtime should not be judged by an invariant it never promised to uphold.
|
|
@@ -0,0 +1,39 @@
|
|
|
1
|
+
import type { EventBoundary, EventSource } from "../infra/event";
|
|
2
|
+
import { identityBoundary, emitEvent } from "../infra/event";
|
|
3
|
+
|
|
4
|
+
export class EventDispatcher {
|
|
5
|
+
readonly queue: unknown[] = [];
|
|
6
|
+
head: number = 0;
|
|
7
|
+
flushing: boolean = false;
|
|
8
|
+
readonly runBoundary: EventBoundary;
|
|
9
|
+
readonly flush: () => void;
|
|
10
|
+
|
|
11
|
+
constructor(runBoundary: EventBoundary = identityBoundary) {
|
|
12
|
+
this.runBoundary = runBoundary;
|
|
13
|
+
this.flush = (): void => this._flush();
|
|
14
|
+
this.flush = this._flush.bind(this);
|
|
15
|
+
}
|
|
16
|
+
|
|
17
|
+
emit<T>(source: EventSource<T>, value: T): void {
|
|
18
|
+
this.queue.push(source, value);
|
|
19
|
+
if (!this.flushing) this.runBoundary(this.flush);
|
|
20
|
+
}
|
|
21
|
+
|
|
22
|
+
private _flush(): void {
|
|
23
|
+
if (this.flushing) return;
|
|
24
|
+
this.flushing = true;
|
|
25
|
+
|
|
26
|
+
try {
|
|
27
|
+
const q = this.queue;
|
|
28
|
+
while (this.head < q.length) {
|
|
29
|
+
const source = q[this.head++] as EventSource<unknown>;
|
|
30
|
+
const value = q[this.head++];
|
|
31
|
+
emitEvent(source, value);
|
|
32
|
+
}
|
|
33
|
+
} finally {
|
|
34
|
+
this.queue.length = 0;
|
|
35
|
+
this.head = 0;
|
|
36
|
+
this.flushing = false;
|
|
37
|
+
}
|
|
38
|
+
}
|
|
39
|
+
}
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
export * from "./event_dispatcher";
|
|
@@ -0,0 +1,17 @@
|
|
|
1
|
+
import { ReactiveNodeState } from "@reflex/runtime";
|
|
2
|
+
|
|
3
|
+
export const enum EffectSchedulerMode {
|
|
4
|
+
Flush = 0,
|
|
5
|
+
Eager = 1,
|
|
6
|
+
SAB = 2,
|
|
7
|
+
}
|
|
8
|
+
|
|
9
|
+
export const enum SchedulerPhase {
|
|
10
|
+
Idle = 0,
|
|
11
|
+
Batching = 1,
|
|
12
|
+
Flushing = 2,
|
|
13
|
+
}
|
|
14
|
+
|
|
15
|
+
export const SCHEDULED_OR_DISPOSED =
|
|
16
|
+
ReactiveNodeState.Disposed | ReactiveNodeState.Scheduled;
|
|
17
|
+
export const UNSCHEDULE_MASK = ~ReactiveNodeState.Scheduled;
|
|
@@ -0,0 +1,165 @@
|
|
|
1
|
+
import type { ExecutionContext, ReactiveNode } from "@reflex/runtime";
|
|
2
|
+
import { ReactiveNodeState, runWatcher } from "@reflex/runtime";
|
|
3
|
+
import { createWatcherQueue } from "./scheduler.queue";
|
|
4
|
+
import type { EffectSchedulerMode } from "./scheduler.constants";
|
|
5
|
+
import {
|
|
6
|
+
SCHEDULED_OR_DISPOSED,
|
|
7
|
+
SchedulerPhase,
|
|
8
|
+
UNSCHEDULE_MASK,
|
|
9
|
+
} from "./scheduler.constants";
|
|
10
|
+
import type {
|
|
11
|
+
SchedulerCore,
|
|
12
|
+
SchedulerEnqueue,
|
|
13
|
+
SchedulerBatch,
|
|
14
|
+
SchedulerNotifySettled,
|
|
15
|
+
SchedulerRuntimeNotifySettled,
|
|
16
|
+
EffectScheduler,
|
|
17
|
+
EffectNode,
|
|
18
|
+
WatcherQueue,
|
|
19
|
+
} from "./scheduler.types";
|
|
20
|
+
|
|
21
|
+
const runner = runWatcher.bind(null);
|
|
22
|
+
|
|
23
|
+
/**
|
|
24
|
+
* Marks an effect watcher node as scheduled.
|
|
25
|
+
*
|
|
26
|
+
* This is a low-level helper used by scheduler integrations and tests to set
|
|
27
|
+
* the runtime's scheduled flag on a watcher node.
|
|
28
|
+
*/
|
|
29
|
+
export function effectScheduled(node: EffectNode) {
|
|
30
|
+
node.state |= ReactiveNodeState.Scheduled;
|
|
31
|
+
}
|
|
32
|
+
|
|
33
|
+
/**
|
|
34
|
+
* Clears the scheduled flag from an effect watcher node.
|
|
35
|
+
*
|
|
36
|
+
* This is a low-level helper used by scheduler integrations and tests to mark
|
|
37
|
+
* a watcher as no longer queued for execution.
|
|
38
|
+
*/
|
|
39
|
+
export function effectUnscheduled(node: EffectNode) {
|
|
40
|
+
node.state &= ~ReactiveNodeState.Scheduled;
|
|
41
|
+
}
|
|
42
|
+
|
|
43
|
+
export function isContextSettled(context: ExecutionContext): boolean {
|
|
44
|
+
return context.propagationDepth === 0 && context.activeComputed === null;
|
|
45
|
+
}
|
|
46
|
+
|
|
47
|
+
export function isRuntimeInactive(
|
|
48
|
+
context: ExecutionContext,
|
|
49
|
+
core: SchedulerCore,
|
|
50
|
+
): boolean {
|
|
51
|
+
return (
|
|
52
|
+
core.phase === SchedulerPhase.Idle &&
|
|
53
|
+
core.batchDepth === 0 &&
|
|
54
|
+
isContextSettled(context)
|
|
55
|
+
);
|
|
56
|
+
}
|
|
57
|
+
|
|
58
|
+
export function createSchedulerCore(): SchedulerCore {
|
|
59
|
+
const queue = createWatcherQueue();
|
|
60
|
+
let batchDepth = 0;
|
|
61
|
+
let phase = SchedulerPhase.Idle;
|
|
62
|
+
|
|
63
|
+
function flush(): void {
|
|
64
|
+
if (phase === SchedulerPhase.Flushing) return;
|
|
65
|
+
if (queue.size === 0) return;
|
|
66
|
+
|
|
67
|
+
phase = SchedulerPhase.Flushing;
|
|
68
|
+
|
|
69
|
+
try {
|
|
70
|
+
while (queue.size !== 0) {
|
|
71
|
+
const node = queue.shift()!,
|
|
72
|
+
s = node.state;
|
|
73
|
+
node.state = s & UNSCHEDULE_MASK;
|
|
74
|
+
runner(node);
|
|
75
|
+
}
|
|
76
|
+
} finally {
|
|
77
|
+
queue.clear();
|
|
78
|
+
phase = batchDepth > 0 ? SchedulerPhase.Batching : SchedulerPhase.Idle;
|
|
79
|
+
}
|
|
80
|
+
}
|
|
81
|
+
|
|
82
|
+
return {
|
|
83
|
+
queue,
|
|
84
|
+
flush,
|
|
85
|
+
|
|
86
|
+
enterBatch() {
|
|
87
|
+
if (++batchDepth === 1 && phase !== SchedulerPhase.Flushing) {
|
|
88
|
+
phase = SchedulerPhase.Batching;
|
|
89
|
+
}
|
|
90
|
+
},
|
|
91
|
+
|
|
92
|
+
leaveBatch() {
|
|
93
|
+
if (--batchDepth !== 0) {
|
|
94
|
+
return false;
|
|
95
|
+
}
|
|
96
|
+
|
|
97
|
+
if (phase === SchedulerPhase.Flushing) {
|
|
98
|
+
return false;
|
|
99
|
+
}
|
|
100
|
+
|
|
101
|
+
phase = SchedulerPhase.Idle;
|
|
102
|
+
return true;
|
|
103
|
+
},
|
|
104
|
+
|
|
105
|
+
reset() {
|
|
106
|
+
while (queue.size !== 0) {
|
|
107
|
+
queue.shift()!.state &= UNSCHEDULE_MASK;
|
|
108
|
+
}
|
|
109
|
+
|
|
110
|
+
queue.clear();
|
|
111
|
+
batchDepth = 0;
|
|
112
|
+
phase = SchedulerPhase.Idle;
|
|
113
|
+
},
|
|
114
|
+
get batchDepth() {
|
|
115
|
+
return batchDepth;
|
|
116
|
+
},
|
|
117
|
+
get phase() {
|
|
118
|
+
return phase;
|
|
119
|
+
},
|
|
120
|
+
};
|
|
121
|
+
}
|
|
122
|
+
|
|
123
|
+
export function tryEnqueue(queue: WatcherQueue, node: ReactiveNode): boolean {
|
|
124
|
+
const effectNode = node as EffectNode;
|
|
125
|
+
const state = effectNode.state;
|
|
126
|
+
if ((state & SCHEDULED_OR_DISPOSED) !== 0) {
|
|
127
|
+
return false;
|
|
128
|
+
}
|
|
129
|
+
|
|
130
|
+
effectNode.state = state | ReactiveNodeState.Scheduled;
|
|
131
|
+
queue.push(effectNode);
|
|
132
|
+
return true;
|
|
133
|
+
}
|
|
134
|
+
|
|
135
|
+
export function createSchedulerInstance(
|
|
136
|
+
mode: EffectSchedulerMode,
|
|
137
|
+
context: ExecutionContext,
|
|
138
|
+
core: SchedulerCore,
|
|
139
|
+
enqueue: SchedulerEnqueue,
|
|
140
|
+
batch: SchedulerBatch,
|
|
141
|
+
notifySettled: SchedulerNotifySettled,
|
|
142
|
+
runtimeNotifySettled: SchedulerRuntimeNotifySettled,
|
|
143
|
+
): EffectScheduler {
|
|
144
|
+
return {
|
|
145
|
+
ring: core.queue.ring,
|
|
146
|
+
mode,
|
|
147
|
+
context,
|
|
148
|
+
runtimeNotifySettled,
|
|
149
|
+
enqueue,
|
|
150
|
+
batch,
|
|
151
|
+
flush: core.flush,
|
|
152
|
+
notifySettled,
|
|
153
|
+
reset: core.reset,
|
|
154
|
+
|
|
155
|
+
get head() {
|
|
156
|
+
return core.queue.head;
|
|
157
|
+
},
|
|
158
|
+
get batchDepth() {
|
|
159
|
+
return core.batchDepth;
|
|
160
|
+
},
|
|
161
|
+
get phase() {
|
|
162
|
+
return core.phase;
|
|
163
|
+
},
|
|
164
|
+
};
|
|
165
|
+
}
|
|
@@ -0,0 +1,37 @@
|
|
|
1
|
+
import type { ExecutionContext } from "@reflex/runtime";
|
|
2
|
+
import { getDefaultContext } from "@reflex/runtime";
|
|
3
|
+
import { EffectSchedulerMode } from "./scheduler.constants";
|
|
4
|
+
import type { EffectScheduler } from "./scheduler.types";
|
|
5
|
+
import {
|
|
6
|
+
createEagerScheduler,
|
|
7
|
+
createSabScheduler,
|
|
8
|
+
createFlushScheduler,
|
|
9
|
+
} from "./variants";
|
|
10
|
+
|
|
11
|
+
export type EffectStrategy = "flush" | "eager" | "sab";
|
|
12
|
+
|
|
13
|
+
const strategyMap: Record<EffectStrategy, EffectSchedulerMode> = {
|
|
14
|
+
eager: EffectSchedulerMode.Eager,
|
|
15
|
+
sab: EffectSchedulerMode.SAB,
|
|
16
|
+
flush: EffectSchedulerMode.Flush,
|
|
17
|
+
};
|
|
18
|
+
|
|
19
|
+
export function resolveEffectSchedulerMode(
|
|
20
|
+
strategy?: EffectStrategy,
|
|
21
|
+
): EffectSchedulerMode {
|
|
22
|
+
return strategy ? strategyMap[strategy] : EffectSchedulerMode.Flush;
|
|
23
|
+
}
|
|
24
|
+
|
|
25
|
+
export function createEffectScheduler(
|
|
26
|
+
mode: EffectSchedulerMode = EffectSchedulerMode.Flush,
|
|
27
|
+
context: ExecutionContext = getDefaultContext(),
|
|
28
|
+
): EffectScheduler {
|
|
29
|
+
switch (mode) {
|
|
30
|
+
case EffectSchedulerMode.Eager:
|
|
31
|
+
return createEagerScheduler(context);
|
|
32
|
+
case EffectSchedulerMode.SAB:
|
|
33
|
+
return createSabScheduler(context);
|
|
34
|
+
default:
|
|
35
|
+
return createFlushScheduler(context);
|
|
36
|
+
}
|
|
37
|
+
}
|