@statezero/core 0.2.8 → 0.2.10
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
|
@@ -16,3 +16,29 @@ export function ModelAdaptor(modelInstance: Object, reactivityFn?: Function): an
|
|
|
16
16
|
*/
|
|
17
17
|
export function QuerySetAdaptor(liveQuerySet: Object, reactivityFn?: Function): any | import("vue").Ref;
|
|
18
18
|
export function MetricAdaptor(metric: any): any;
|
|
19
|
+
export const modelEventBatcher: ModelEventBatcher;
|
|
20
|
+
export const BATCH_THRESHOLD: 50;
|
|
21
|
+
/**
|
|
22
|
+
* Batches model events and decides whether to use fine-grained (touch) or
|
|
23
|
+
* coarse-grained (queryset refresh) reactivity based on batch size.
|
|
24
|
+
*/
|
|
25
|
+
declare class ModelEventBatcher {
|
|
26
|
+
pendingTouches: Map<any, any>;
|
|
27
|
+
debounceTimers: Map<any, any>;
|
|
28
|
+
maxWaitTimers: Map<any, any>;
|
|
29
|
+
processQueue: PQueue<import("p-queue/dist/priority-queue.js").default, import("p-queue").QueueAddOptions>;
|
|
30
|
+
/**
|
|
31
|
+
* Queue a touch call for batching
|
|
32
|
+
*/
|
|
33
|
+
queueTouch(configKey: any, modelName: any, pk: any, wrapper: any): void;
|
|
34
|
+
_resetDebounce(modelKey: any): void;
|
|
35
|
+
_ensureMaxWait(modelKey: any): void;
|
|
36
|
+
_clearTimers(modelKey: any): void;
|
|
37
|
+
_flush(modelKey: any): void;
|
|
38
|
+
_processBatch(modelKey: any, touchBatch: any): void;
|
|
39
|
+
_touchModels(touchBatch: any): void;
|
|
40
|
+
_refreshQuerysets(modelKey: any): void;
|
|
41
|
+
clear(): void;
|
|
42
|
+
}
|
|
43
|
+
import PQueue from "p-queue";
|
|
44
|
+
export {};
|
|
@@ -4,9 +4,154 @@ import { initEventHandler } from "../../syncEngine/stores/operationEventHandlers
|
|
|
4
4
|
import { isEqual, isNil } from 'lodash-es';
|
|
5
5
|
import hash from 'object-hash';
|
|
6
6
|
import { registerAdapterReset } from "../../reset.js";
|
|
7
|
+
import PQueue from "p-queue";
|
|
7
8
|
initEventHandler();
|
|
8
9
|
const wrappedQuerysetCache = new Map();
|
|
9
10
|
const wrappedMetricCache = new Map();
|
|
11
|
+
// =============================================================================
|
|
12
|
+
// Model Event Batcher - Batches rapid model events to prevent watcher overload
|
|
13
|
+
// =============================================================================
|
|
14
|
+
const BATCH_THRESHOLD = 50; // Above this, use queryset refresh instead of individual touch
|
|
15
|
+
const DEBOUNCE_MS = 16; // ~1 frame, gather rapid events
|
|
16
|
+
const MAX_WAIT_MS = 100; // Don't wait longer than this to flush
|
|
17
|
+
/**
|
|
18
|
+
* Batches model events and decides whether to use fine-grained (touch) or
|
|
19
|
+
* coarse-grained (queryset refresh) reactivity based on batch size.
|
|
20
|
+
*/
|
|
21
|
+
class ModelEventBatcher {
|
|
22
|
+
constructor() {
|
|
23
|
+
// Queue of pending touch calls: Map<modelKey, Map<pk, Set<wrapper>>>
|
|
24
|
+
this.pendingTouches = new Map();
|
|
25
|
+
// Debounce timers per model
|
|
26
|
+
this.debounceTimers = new Map();
|
|
27
|
+
// Max wait timers per model
|
|
28
|
+
this.maxWaitTimers = new Map();
|
|
29
|
+
// Rate-limited queue for processing
|
|
30
|
+
this.processQueue = new PQueue({
|
|
31
|
+
concurrency: 1,
|
|
32
|
+
interval: 16, // ~1 frame
|
|
33
|
+
intervalCap: 10 // max 10 flushes per frame
|
|
34
|
+
});
|
|
35
|
+
}
|
|
36
|
+
/**
|
|
37
|
+
* Queue a touch call for batching
|
|
38
|
+
*/
|
|
39
|
+
queueTouch(configKey, modelName, pk, wrapper) {
|
|
40
|
+
const modelKey = `${configKey}::${modelName}`;
|
|
41
|
+
// Initialize maps if needed
|
|
42
|
+
if (!this.pendingTouches.has(modelKey)) {
|
|
43
|
+
this.pendingTouches.set(modelKey, new Map());
|
|
44
|
+
}
|
|
45
|
+
const pkMap = this.pendingTouches.get(modelKey);
|
|
46
|
+
if (!pkMap.has(pk)) {
|
|
47
|
+
pkMap.set(pk, new Set());
|
|
48
|
+
}
|
|
49
|
+
pkMap.get(pk).add(wrapper);
|
|
50
|
+
// Reset debounce timer
|
|
51
|
+
this._resetDebounce(modelKey);
|
|
52
|
+
// Start max wait timer if not already running
|
|
53
|
+
this._ensureMaxWait(modelKey);
|
|
54
|
+
}
|
|
55
|
+
_resetDebounce(modelKey) {
|
|
56
|
+
// Clear existing debounce
|
|
57
|
+
if (this.debounceTimers.has(modelKey)) {
|
|
58
|
+
clearTimeout(this.debounceTimers.get(modelKey));
|
|
59
|
+
}
|
|
60
|
+
// Set new debounce
|
|
61
|
+
const timer = setTimeout(() => {
|
|
62
|
+
this._flush(modelKey);
|
|
63
|
+
}, DEBOUNCE_MS);
|
|
64
|
+
this.debounceTimers.set(modelKey, timer);
|
|
65
|
+
}
|
|
66
|
+
_ensureMaxWait(modelKey) {
|
|
67
|
+
if (this.maxWaitTimers.has(modelKey)) {
|
|
68
|
+
return; // Already waiting
|
|
69
|
+
}
|
|
70
|
+
const timer = setTimeout(() => {
|
|
71
|
+
this._flush(modelKey);
|
|
72
|
+
}, MAX_WAIT_MS);
|
|
73
|
+
this.maxWaitTimers.set(modelKey, timer);
|
|
74
|
+
}
|
|
75
|
+
_clearTimers(modelKey) {
|
|
76
|
+
if (this.debounceTimers.has(modelKey)) {
|
|
77
|
+
clearTimeout(this.debounceTimers.get(modelKey));
|
|
78
|
+
this.debounceTimers.delete(modelKey);
|
|
79
|
+
}
|
|
80
|
+
if (this.maxWaitTimers.has(modelKey)) {
|
|
81
|
+
clearTimeout(this.maxWaitTimers.get(modelKey));
|
|
82
|
+
this.maxWaitTimers.delete(modelKey);
|
|
83
|
+
}
|
|
84
|
+
}
|
|
85
|
+
_flush(modelKey) {
|
|
86
|
+
this._clearTimers(modelKey);
|
|
87
|
+
const pkMap = this.pendingTouches.get(modelKey);
|
|
88
|
+
if (!pkMap || pkMap.size === 0) {
|
|
89
|
+
return;
|
|
90
|
+
}
|
|
91
|
+
// Take the pending touches and clear
|
|
92
|
+
const touchBatch = new Map(pkMap);
|
|
93
|
+
pkMap.clear();
|
|
94
|
+
// Queue for rate-limited processing
|
|
95
|
+
this.processQueue.add(() => this._processBatch(modelKey, touchBatch));
|
|
96
|
+
}
|
|
97
|
+
_processBatch(modelKey, touchBatch) {
|
|
98
|
+
const batchSize = touchBatch.size;
|
|
99
|
+
if (batchSize < BATCH_THRESHOLD) {
|
|
100
|
+
// Small batch: fine-grained touch for each model
|
|
101
|
+
this._touchModels(touchBatch);
|
|
102
|
+
}
|
|
103
|
+
else {
|
|
104
|
+
// Large batch: refresh all querysets for this model instead
|
|
105
|
+
this._refreshQuerysets(modelKey);
|
|
106
|
+
}
|
|
107
|
+
}
|
|
108
|
+
_touchModels(touchBatch) {
|
|
109
|
+
// Call touch() on each wrapper
|
|
110
|
+
for (const [pk, wrappers] of touchBatch) {
|
|
111
|
+
for (const wrapper of wrappers) {
|
|
112
|
+
try {
|
|
113
|
+
wrapper.touch();
|
|
114
|
+
}
|
|
115
|
+
catch (e) {
|
|
116
|
+
console.warn('[ModelEventBatcher] Error touching model:', e);
|
|
117
|
+
}
|
|
118
|
+
}
|
|
119
|
+
}
|
|
120
|
+
}
|
|
121
|
+
_refreshQuerysets(modelKey) {
|
|
122
|
+
// Find all cached querysets for this model and refresh them
|
|
123
|
+
// modelKey is "configKey::modelName"
|
|
124
|
+
const [configKey, modelName] = modelKey.split('::');
|
|
125
|
+
for (const [semanticKey, wrapper] of wrappedQuerysetCache) {
|
|
126
|
+
const liveQuerySet = wrapper.original;
|
|
127
|
+
if (!liveQuerySet)
|
|
128
|
+
continue;
|
|
129
|
+
const qs = liveQuerySet.queryset;
|
|
130
|
+
if (qs?.ModelClass?.configKey === configKey &&
|
|
131
|
+
qs?.ModelClass?.modelName === modelName) {
|
|
132
|
+
// Refresh this queryset's reactive wrapper
|
|
133
|
+
try {
|
|
134
|
+
wrapper.splice(0, wrapper.length);
|
|
135
|
+
wrapper.push(...liveQuerySet);
|
|
136
|
+
}
|
|
137
|
+
catch (e) {
|
|
138
|
+
console.warn('[ModelEventBatcher] Error refreshing queryset:', e);
|
|
139
|
+
}
|
|
140
|
+
}
|
|
141
|
+
}
|
|
142
|
+
}
|
|
143
|
+
clear() {
|
|
144
|
+
// Clear all pending state
|
|
145
|
+
for (const modelKey of this.pendingTouches.keys()) {
|
|
146
|
+
this._clearTimers(modelKey);
|
|
147
|
+
}
|
|
148
|
+
this.pendingTouches.clear();
|
|
149
|
+
this.processQueue.clear();
|
|
150
|
+
}
|
|
151
|
+
}
|
|
152
|
+
const modelEventBatcher = new ModelEventBatcher();
|
|
153
|
+
// Export for testing
|
|
154
|
+
export { modelEventBatcher, BATCH_THRESHOLD };
|
|
10
155
|
/**
|
|
11
156
|
* Adapts a model instance to a Vue reactive object by directly wrapping
|
|
12
157
|
* the instance and incrementing an internal version on relevant events.
|
|
@@ -23,17 +168,15 @@ export function ModelAdaptor(modelInstance, reactivityFn = reactive) {
|
|
|
23
168
|
// Make the model instance reactive using the specified function
|
|
24
169
|
const wrapper = reactivityFn(modelInstance);
|
|
25
170
|
const eventName = `${configKey}::${modelName}::render`;
|
|
26
|
-
// Handler
|
|
171
|
+
// Handler queues touch calls through the batcher for optimal batching
|
|
27
172
|
const renderHandler = (eventData) => {
|
|
28
173
|
const isRef = reactivityFn === ref;
|
|
29
174
|
const model = isRef ? wrapper.value : wrapper;
|
|
30
175
|
if (eventData.pk === model[pkField]) {
|
|
31
|
-
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
|
|
35
|
-
wrapper.touch();
|
|
36
|
-
}
|
|
176
|
+
// Queue the touch through the batcher instead of calling directly
|
|
177
|
+
// This allows batching of rapid events and smart routing to queryset refresh for bulk ops
|
|
178
|
+
const targetWrapper = isRef ? wrapper.value : wrapper;
|
|
179
|
+
modelEventBatcher.queueTouch(configKey, modelName, eventData.pk, targetWrapper);
|
|
37
180
|
}
|
|
38
181
|
};
|
|
39
182
|
// Subscribe to model events indefinitely
|
|
@@ -127,4 +270,5 @@ export function MetricAdaptor(metric) {
|
|
|
127
270
|
registerAdapterReset(() => {
|
|
128
271
|
wrappedQuerysetCache.clear();
|
|
129
272
|
wrappedMetricCache.clear();
|
|
273
|
+
modelEventBatcher.clear();
|
|
130
274
|
});
|
|
@@ -44,6 +44,12 @@ export class QuerysetStore {
|
|
|
44
44
|
private _getValidatedAndFilteredPks;
|
|
45
45
|
render(optimistic?: boolean, fromCache?: boolean): any[];
|
|
46
46
|
renderFromRoot(optimistic: boolean | undefined, rootStore: any): any[];
|
|
47
|
+
/**
|
|
48
|
+
* Render by getting ALL instances from the model store and applying
|
|
49
|
+
* the queryset's filters locally. Used for temp stores (e.g., optimistic
|
|
50
|
+
* chained filters) that don't have their own ground truth.
|
|
51
|
+
*/
|
|
52
|
+
renderFromModelStore(optimistic?: boolean): any[];
|
|
47
53
|
renderFromData(optimistic?: boolean): any[];
|
|
48
54
|
applyOperation(operation: any, currentPks: any): any;
|
|
49
55
|
sync(forceFromDb?: boolean): Promise<void>;
|
|
@@ -211,6 +211,11 @@ export class QuerysetStore {
|
|
|
211
211
|
pks = this.renderFromRoot(optimistic, rootStore);
|
|
212
212
|
}
|
|
213
213
|
}
|
|
214
|
+
// For temp stores with no ground truth (e.g., chained optimistic filters),
|
|
215
|
+
// render from the model store instead of empty ground truth
|
|
216
|
+
if (isNil(pks) && this.isTemp && this.groundTruthPks.length === 0) {
|
|
217
|
+
pks = this.renderFromModelStore(optimistic);
|
|
218
|
+
}
|
|
214
219
|
if (isNil(pks)) {
|
|
215
220
|
pks = this.renderFromData(optimistic);
|
|
216
221
|
}
|
|
@@ -231,6 +236,24 @@ export class QuerysetStore {
|
|
|
231
236
|
let result = filter(renderedData, ast, this.modelClass, false);
|
|
232
237
|
return result;
|
|
233
238
|
}
|
|
239
|
+
/**
|
|
240
|
+
* Render by getting ALL instances from the model store and applying
|
|
241
|
+
* the queryset's filters locally. Used for temp stores (e.g., optimistic
|
|
242
|
+
* chained filters) that don't have their own ground truth.
|
|
243
|
+
*/
|
|
244
|
+
renderFromModelStore(optimistic = true) {
|
|
245
|
+
const modelStore = modelStoreRegistry.getStore(this.modelClass);
|
|
246
|
+
// Get all PKs from the model store
|
|
247
|
+
const allPks = modelStore.groundTruthPks;
|
|
248
|
+
// Convert to model instances (like renderFromRoot does)
|
|
249
|
+
const allInstances = allPks.map((pk) => {
|
|
250
|
+
return this.modelClass.fromPk(pk, this.queryset);
|
|
251
|
+
});
|
|
252
|
+
// Apply the queryset's AST filters locally
|
|
253
|
+
const ast = this.queryset.build();
|
|
254
|
+
const result = filter(allInstances, ast, this.modelClass, false);
|
|
255
|
+
return result;
|
|
256
|
+
}
|
|
234
257
|
renderFromData(optimistic = true) {
|
|
235
258
|
const renderedPks = this.groundTruthSet;
|
|
236
259
|
for (const op of this.operations) {
|
package/package.json
CHANGED