@businessmaps/metaontology-nuxt 0.63.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +201 -0
- package/README.md +430 -0
- package/composables/idbConnection.ts +81 -0
- package/composables/idbHelpers.ts +165 -0
- package/composables/idbSchema.ts +93 -0
- package/composables/syncTypes.ts +145 -0
- package/composables/useCommitLog.ts +521 -0
- package/composables/useCrossTab.ts +246 -0
- package/composables/useModelStore.ts +174 -0
- package/composables/useSyncEngine.ts +494 -0
- package/composables/useTripleStore.ts +135 -0
- package/index.ts +15 -0
- package/nuxt.config.ts +16 -0
- package/package.json +56 -0
|
@@ -0,0 +1,494 @@
|
|
|
1
|
+
import { ref, readonly, computed } from 'vue'
|
|
2
|
+
import { useCommitLog } from './useCommitLog'
|
|
3
|
+
import { saveSyncCursor, loadSyncCursor } from './idbHelpers'
|
|
4
|
+
import { threeWayMerge } from '@businessmaps/metaontology/engine/merge'
|
|
5
|
+
import type { RootContext } from '@businessmaps/metaontology/types/context'
|
|
6
|
+
import type { Commit, Checkpoint } from '@businessmaps/metaontology/types/commits'
|
|
7
|
+
import type { MergeResult } from '@businessmaps/metaontology/types/branch'
|
|
8
|
+
import type { SyncStatus, SyncTargetDescriptor, SyncAdapter, SyncErrorCategory } from './syncTypes'
|
|
9
|
+
import { classifySyncError, friendlySyncErrorMessage } from './syncTypes'
|
|
10
|
+
|
|
11
|
+
// Sync-related types (`SyncStatus`, `SyncTargetDescriptor`, `SyncAdapter`,
|
|
12
|
+
// `PushResult`, `PullResult`) are owned by `./syncTypes` - import them from
|
|
13
|
+
// there directly. This file does not re-export them to avoid double-auto-
|
|
14
|
+
// import warnings in Nuxt (the layer's composables folder auto-imports every
|
|
15
|
+
// file, so re-exporting creates a duplicate).
|
|
16
|
+
|
|
17
|
+
// ── SyncHost: app-side coupling via dependency injection ────────────────────
|
|
18
|
+
//
|
|
19
|
+
// The sync engine needs to drive app-side state mutations (apply a fast-forward
|
|
20
|
+
// of remote commits, apply a merged model, surface conflicts for user
|
|
21
|
+
// resolution). The `SyncHost` interface is the narrow seam: the consuming app
|
|
22
|
+
// implements it and passes an instance to `activate()`. The engine owns its own
|
|
23
|
+
// status and retry state; it delegates side effects to the host.
|
|
24
|
+
|
|
25
|
+
export interface SyncHost {
|
|
26
|
+
/** Read the current local model state (our side of the merge). */
|
|
27
|
+
getRoot(): RootContext
|
|
28
|
+
/** Fast-forward: replay remote commits into local state. The host owns
|
|
29
|
+
* layout replay, awareness marking, and persistence scheduling. */
|
|
30
|
+
applyFastForward(commits: Commit[]): void
|
|
31
|
+
/** Three-way merge succeeded: install the merged model into local state.
|
|
32
|
+
* The host preserves the current layout (sync merge is model-only). */
|
|
33
|
+
applyMerged(mergedModel: RootContext): void
|
|
34
|
+
/** Three-way merge produced conflicts: expose the result for UI resolution. */
|
|
35
|
+
onConflict(result: MergeResult): void
|
|
36
|
+
}
|
|
37
|
+
|
|
38
|
+
// ── Module-level singleton state ────────────────────────────────────────────
|
|
39
|
+
|
|
40
|
+
const status = ref<SyncStatus>('idle')
|
|
41
|
+
const lastSyncedSequence = ref(0)
|
|
42
|
+
const lastSyncedAt = ref<string | null>(null)
|
|
43
|
+
const lastError = ref<string | null>(null)
|
|
44
|
+
const lastErrorCategory = ref<SyncErrorCategory | null>(null)
|
|
45
|
+
const enabled = ref(false)
|
|
46
|
+
const target = ref<SyncTargetDescriptor | null>(null)
|
|
47
|
+
|
|
48
|
+
// Track which error categories we've already logged to the browser console
|
|
49
|
+
// since the last successful sync. The retry loop fires every 5–60s; without
|
|
50
|
+
// throttling, a CORS-blocked R2 bucket fills the console with hundreds of
|
|
51
|
+
// identical fetch failures across a single tab session. We log the first
|
|
52
|
+
// occurrence of each category at info level (no scary red errors for known
|
|
53
|
+
// transient issues), then go quiet until the category changes or sync
|
|
54
|
+
// recovers. The user can still inspect the live state via the branch
|
|
55
|
+
// manager dropdown - it always reflects the latest error.
|
|
56
|
+
const loggedErrorCategories = new Set<SyncErrorCategory>()
|
|
57
|
+
|
|
58
|
+
let activeAdapter: SyncAdapter | null = null
|
|
59
|
+
let activeHost: SyncHost | null = null
|
|
60
|
+
let flushTimer: ReturnType<typeof setTimeout> | null = null
|
|
61
|
+
let retryTimer: ReturnType<typeof setTimeout> | null = null
|
|
62
|
+
let retryCount = 0
|
|
63
|
+
|
|
64
|
+
// Push filter - pluggable predicate that excludes commits from the push set.
|
|
65
|
+
//
|
|
66
|
+
// `useCrossTab` sets this to skip commits that were received via
|
|
67
|
+
// BroadcastChannel from a sibling tab on the same device. Without this, two
|
|
68
|
+
// tabs on the same device would each push the other's commits to the cloud,
|
|
69
|
+
// doubling traffic and creating phantom conflicts.
|
|
70
|
+
//
|
|
71
|
+
// The engine itself does not know about cross-tab - it just runs the filter.
|
|
72
|
+
// `null` means "no filter, push everything past lastSyncedSequence."
|
|
73
|
+
type PushFilter = (commit: Commit) => boolean
|
|
74
|
+
let pushFilter: PushFilter | null = null
|
|
75
|
+
|
|
76
|
+
const SYNC_DEBOUNCE_MS = 5_000
|
|
77
|
+
const RETRY_BASE_MS = 5_000
|
|
78
|
+
const RETRY_CAP_MS = 60_000
|
|
79
|
+
const RETRY_MAX_ATTEMPTS = 5
|
|
80
|
+
|
|
81
|
+
// ── Retry helpers ───────────────────────────────────────────────────────────
|
|
82
|
+
|
|
83
|
+
function retryDelay(): number {
|
|
84
|
+
return Math.min(RETRY_BASE_MS * Math.pow(2, retryCount), RETRY_CAP_MS)
|
|
85
|
+
}
|
|
86
|
+
|
|
87
|
+
function resetRetry() {
|
|
88
|
+
retryCount = 0
|
|
89
|
+
if (retryTimer) {
|
|
90
|
+
clearTimeout(retryTimer)
|
|
91
|
+
retryTimer = null
|
|
92
|
+
}
|
|
93
|
+
// Successful sync clears the throttle so subsequent failures of any
|
|
94
|
+
// category get a fresh log line.
|
|
95
|
+
loggedErrorCategories.clear()
|
|
96
|
+
}
|
|
97
|
+
|
|
98
|
+
function scheduleRetry(fn: () => Promise<boolean>) {
|
|
99
|
+
if (retryCount >= RETRY_MAX_ATTEMPTS) {
|
|
100
|
+
// Stay quiet on the final attempt - the dropdown's "Sync paused" message
|
|
101
|
+
// tells the user. Repeated console.warns from the engine just add noise.
|
|
102
|
+
return
|
|
103
|
+
}
|
|
104
|
+
const delay = retryDelay()
|
|
105
|
+
retryCount++
|
|
106
|
+
retryTimer = setTimeout(() => fn(), delay)
|
|
107
|
+
}
|
|
108
|
+
|
|
109
|
+
/**
|
|
110
|
+
* Record a sync failure: classify, store the category, throttle the console
|
|
111
|
+
* log so we only emit one line per (category × retry-cycle) instead of one
|
|
112
|
+
* per attempt. CORS errors against R2 fill the console with ~10 lines per
|
|
113
|
+
* push attempt without this; throttling brings it down to 1.
|
|
114
|
+
*/
|
|
115
|
+
function recordSyncError(context: 'push' | 'pull', e: unknown, mapId: string, branchId: string) {
|
|
116
|
+
const category = classifySyncError(e)
|
|
117
|
+
lastErrorCategory.value = category
|
|
118
|
+
lastError.value = friendlySyncErrorMessage(category)
|
|
119
|
+
|
|
120
|
+
if (loggedErrorCategories.has(category)) return
|
|
121
|
+
loggedErrorCategories.add(category)
|
|
122
|
+
|
|
123
|
+
// Use info level for known transient categories - these aren't bugs, they
|
|
124
|
+
// are environmental conditions. Reserve `console.error` for truly unknown
|
|
125
|
+
// failures the developer needs to investigate.
|
|
126
|
+
const detail = e instanceof Error ? e.message : String(e)
|
|
127
|
+
if (category === 'network' || category === 'cors' || category === 'crypto') {
|
|
128
|
+
console.warn(`[Sync] ${context} paused: ${friendlySyncErrorMessage(category)}`, { mapId, branchId, detail })
|
|
129
|
+
} else if (category === 'unknown') {
|
|
130
|
+
console.error(`[Sync] ${context} failed (uncategorized):`, { mapId, branchId, error: e })
|
|
131
|
+
} else {
|
|
132
|
+
console.warn(`[Sync] ${context} ${category}: ${friendlySyncErrorMessage(category)}`, { mapId, branchId, detail })
|
|
133
|
+
}
|
|
134
|
+
}
|
|
135
|
+
|
|
136
|
+
// ── Composable ──────────────────────────────────────────────────────────────
|
|
137
|
+
|
|
138
|
+
export function useSyncEngine() {
|
|
139
|
+
const commitLog = useCommitLog()
|
|
140
|
+
|
|
141
|
+
const pendingCount = computed(() => {
|
|
142
|
+
if (!enabled.value) return 0
|
|
143
|
+
return commitLog.commits.value.filter(c => c.sequence > lastSyncedSequence.value).length
|
|
144
|
+
})
|
|
145
|
+
|
|
146
|
+
/** Activate sync with an adapter and an app-side host. */
|
|
147
|
+
function activate(adapter: SyncAdapter, host: SyncHost) {
|
|
148
|
+
activeAdapter = adapter
|
|
149
|
+
activeHost = host
|
|
150
|
+
target.value = adapter.descriptor
|
|
151
|
+
enabled.value = true
|
|
152
|
+
resetRetry()
|
|
153
|
+
|
|
154
|
+
// Drain any backlog left from the previous session.
|
|
155
|
+
//
|
|
156
|
+
// Scenario: the user dispatches a command, an unload handler flushes
|
|
157
|
+
// the pending commit to IDB, then they refresh BEFORE the 5-second
|
|
158
|
+
// sync debounce fires. On the new page load, `loadByContextId` replays
|
|
159
|
+
// from IDB and `useCollabWiring.activateForMap` captures
|
|
160
|
+
// `lastCommitCount = store.commitLog.length` as a baseline - which
|
|
161
|
+
// means the watcher only fires on FUTURE dispatches, not the ones
|
|
162
|
+
// already in the log. Without this check, those pre-existing commits
|
|
163
|
+
// would sit past `lastSyncedSequence` forever, waiting for a local
|
|
164
|
+
// dispatch that may never come.
|
|
165
|
+
//
|
|
166
|
+
// `pendingCount` reads `commitLog.commits.value` (already loaded from
|
|
167
|
+
// IDB by the caller) against the freshly-primed `lastSyncedSequence`,
|
|
168
|
+
// so it accurately reflects the backlog at activation time.
|
|
169
|
+
if (pendingCount.value > 0) {
|
|
170
|
+
void push().catch(() => {
|
|
171
|
+
// push() already records errors and schedules retries; swallow here
|
|
172
|
+
// to avoid unhandled rejection at the activation site.
|
|
173
|
+
})
|
|
174
|
+
}
|
|
175
|
+
}
|
|
176
|
+
|
|
177
|
+
/** Deactivate sync. */
|
|
178
|
+
function deactivate() {
|
|
179
|
+
enabled.value = false
|
|
180
|
+
target.value = null
|
|
181
|
+
activeAdapter = null
|
|
182
|
+
activeHost = null
|
|
183
|
+
resetRetry()
|
|
184
|
+
if (flushTimer) {
|
|
185
|
+
clearTimeout(flushTimer)
|
|
186
|
+
flushTimer = null
|
|
187
|
+
}
|
|
188
|
+
}
|
|
189
|
+
|
|
190
|
+
/** Update sync pointer after WebRTC direct replay. */
|
|
191
|
+
function updateLastSynced(seq: number) {
|
|
192
|
+
if (seq > lastSyncedSequence.value) {
|
|
193
|
+
lastSyncedSequence.value = seq
|
|
194
|
+
persistCursor()
|
|
195
|
+
}
|
|
196
|
+
}
|
|
197
|
+
|
|
198
|
+
/**
|
|
199
|
+
* Restore the persisted sync cursor for the active map/branch.
|
|
200
|
+
*
|
|
201
|
+
* `lastSyncedSequence` was originally ephemeral - held only in the engine
|
|
202
|
+
* singleton. After a fresh page load it started at 0, which caused:
|
|
203
|
+
* (a) every local commit to look "unsynced" on first push, provoking
|
|
204
|
+
* 409s if the server already had them
|
|
205
|
+
* (b) `pendingCount` to report the full commit history as pending
|
|
206
|
+
* (c) the engine to re-sync commits that had long since been confirmed
|
|
207
|
+
*
|
|
208
|
+
* Callers (typically `useCollabWiring.activateForMap`) await this before
|
|
209
|
+
* calling `activate(adapter, host)` so the first push carries the
|
|
210
|
+
* correct baseSequence. If the load fails, the cursor stays at 0 - the
|
|
211
|
+
* worst case is a single recoverable 409.
|
|
212
|
+
*/
|
|
213
|
+
async function primeCursor(mapId: string, branchId: string): Promise<void> {
|
|
214
|
+
try {
|
|
215
|
+
const persisted = await loadSyncCursor(mapId, branchId)
|
|
216
|
+
lastSyncedSequence.value = persisted
|
|
217
|
+
} catch (e) {
|
|
218
|
+
console.warn('[Sync] Could not load persisted sync cursor:', e)
|
|
219
|
+
}
|
|
220
|
+
}
|
|
221
|
+
|
|
222
|
+
/** Internal: persist the current cursor for the active map/branch. Fire-and-forget. */
|
|
223
|
+
function persistCursor(): void {
|
|
224
|
+
const mapId = commitLog.mapId.value
|
|
225
|
+
const branchId = commitLog.activeBranchId.value
|
|
226
|
+
if (!mapId) return
|
|
227
|
+
const seq = lastSyncedSequence.value
|
|
228
|
+
void saveSyncCursor(mapId, branchId, seq).catch((e) => {
|
|
229
|
+
console.warn('[Sync] Could not save sync cursor:', e)
|
|
230
|
+
})
|
|
231
|
+
}
|
|
232
|
+
|
|
233
|
+
/**
|
|
234
|
+
* Install or clear a push filter. The filter receives each candidate
|
|
235
|
+
* commit and returns `true` to *exclude* it from the push.
|
|
236
|
+
*
|
|
237
|
+
* Used by `useCrossTab` so a tab does not push commits another tab on the
|
|
238
|
+
* same device already pushed. Pass `null` to clear.
|
|
239
|
+
*/
|
|
240
|
+
function setPushFilter(filter: PushFilter | null) {
|
|
241
|
+
pushFilter = filter
|
|
242
|
+
}
|
|
243
|
+
|
|
244
|
+
/** Schedule a push after debounce. Called after commit append. */
|
|
245
|
+
function schedulePush() {
|
|
246
|
+
if (!enabled.value) return
|
|
247
|
+
if (flushTimer) clearTimeout(flushTimer)
|
|
248
|
+
flushTimer = setTimeout(() => push(), SYNC_DEBOUNCE_MS)
|
|
249
|
+
}
|
|
250
|
+
|
|
251
|
+
/** Push new local commits to the remote target. */
|
|
252
|
+
async function push(): Promise<boolean> {
|
|
253
|
+
if (!enabled.value || !activeAdapter) return false
|
|
254
|
+
|
|
255
|
+
const mapId = commitLog.mapId.value
|
|
256
|
+
const branchId = commitLog.activeBranchId.value
|
|
257
|
+
if (!mapId) return false
|
|
258
|
+
|
|
259
|
+
// Collect commits since last sync.
|
|
260
|
+
//
|
|
261
|
+
// `pushFilter` (set by useCrossTab) excludes commits this tab received
|
|
262
|
+
// via BroadcastChannel from a sibling tab - those will be pushed by the
|
|
263
|
+
// originating tab. Without the filter, two tabs on the same device would
|
|
264
|
+
// double-push and create phantom conflicts.
|
|
265
|
+
const allCommits = commitLog.commits.value
|
|
266
|
+
const newCommits = allCommits.filter(
|
|
267
|
+
c => c.sequence > lastSyncedSequence.value && (!pushFilter || !pushFilter(c)),
|
|
268
|
+
)
|
|
269
|
+
if (newCommits.length === 0) return true
|
|
270
|
+
|
|
271
|
+
status.value = 'pushing'
|
|
272
|
+
lastError.value = null
|
|
273
|
+
|
|
274
|
+
try {
|
|
275
|
+
const result = await activeAdapter.push(
|
|
276
|
+
mapId,
|
|
277
|
+
branchId,
|
|
278
|
+
newCommits,
|
|
279
|
+
lastSyncedSequence.value,
|
|
280
|
+
)
|
|
281
|
+
|
|
282
|
+
if (!result.success) {
|
|
283
|
+
if (result.conflict) {
|
|
284
|
+
status.value = 'conflict'
|
|
285
|
+
lastErrorCategory.value = 'conflict'
|
|
286
|
+
lastError.value = friendlySyncErrorMessage('conflict')
|
|
287
|
+
if (!loggedErrorCategories.has('conflict')) {
|
|
288
|
+
loggedErrorCategories.add('conflict')
|
|
289
|
+
console.warn('[Sync] push conflict - pull required', { mapId, branchId })
|
|
290
|
+
}
|
|
291
|
+
return false
|
|
292
|
+
}
|
|
293
|
+
throw new Error(result.error || 'Push failed')
|
|
294
|
+
}
|
|
295
|
+
|
|
296
|
+
lastSyncedSequence.value = result.newHeadSequence
|
|
297
|
+
lastSyncedAt.value = new Date().toISOString()
|
|
298
|
+
status.value = 'idle'
|
|
299
|
+
lastErrorCategory.value = null
|
|
300
|
+
lastError.value = null
|
|
301
|
+
resetRetry()
|
|
302
|
+
persistCursor()
|
|
303
|
+
return true
|
|
304
|
+
} catch (e: any) {
|
|
305
|
+
if (e?.statusCode === 409) {
|
|
306
|
+
status.value = 'conflict'
|
|
307
|
+
lastErrorCategory.value = 'conflict'
|
|
308
|
+
lastError.value = friendlySyncErrorMessage('conflict')
|
|
309
|
+
if (!loggedErrorCategories.has('conflict')) {
|
|
310
|
+
loggedErrorCategories.add('conflict')
|
|
311
|
+
console.warn('[Sync] push conflict - pull required', { mapId, branchId })
|
|
312
|
+
}
|
|
313
|
+
return false
|
|
314
|
+
}
|
|
315
|
+
status.value = 'error'
|
|
316
|
+
recordSyncError('push', e, mapId, branchId)
|
|
317
|
+
scheduleRetry(() => push())
|
|
318
|
+
return false
|
|
319
|
+
}
|
|
320
|
+
}
|
|
321
|
+
|
|
322
|
+
/** Pull new remote commits and merge if needed. */
|
|
323
|
+
async function pull(): Promise<boolean> {
|
|
324
|
+
if (!enabled.value || !activeAdapter || !activeHost) return false
|
|
325
|
+
|
|
326
|
+
const mapId = commitLog.mapId.value
|
|
327
|
+
const branchId = commitLog.activeBranchId.value
|
|
328
|
+
if (!mapId) return false
|
|
329
|
+
|
|
330
|
+
const host = activeHost
|
|
331
|
+
status.value = 'pulling'
|
|
332
|
+
lastError.value = null
|
|
333
|
+
|
|
334
|
+
try {
|
|
335
|
+
const result = await activeAdapter.pull(mapId, branchId, lastSyncedSequence.value)
|
|
336
|
+
|
|
337
|
+
if (!result.success) {
|
|
338
|
+
throw new Error(result.error || 'Pull failed')
|
|
339
|
+
}
|
|
340
|
+
|
|
341
|
+
if (result.commits.length === 0) {
|
|
342
|
+
lastSyncedAt.value = new Date().toISOString()
|
|
343
|
+
status.value = 'idle'
|
|
344
|
+
resetRetry()
|
|
345
|
+
return true
|
|
346
|
+
}
|
|
347
|
+
|
|
348
|
+
// Determine if we've diverged: local commits beyond last sync point?
|
|
349
|
+
const localDeviceId = commitLog.getDeviceId()
|
|
350
|
+
const localUnsynced = commitLog.commits.value.filter(
|
|
351
|
+
c => c.sequence > lastSyncedSequence.value && c.deviceId === localDeviceId,
|
|
352
|
+
)
|
|
353
|
+
|
|
354
|
+
if (localUnsynced.length === 0) {
|
|
355
|
+
// Fast-forward - replay remote commits in sequence order via the host
|
|
356
|
+
const sorted = result.commits.sort((a, b) => a.sequence - b.sequence)
|
|
357
|
+
host.applyFastForward(sorted)
|
|
358
|
+
lastSyncedSequence.value = result.remoteHead
|
|
359
|
+
lastSyncedAt.value = new Date().toISOString()
|
|
360
|
+
status.value = 'idle'
|
|
361
|
+
resetRetry()
|
|
362
|
+
persistCursor()
|
|
363
|
+
return true
|
|
364
|
+
} else {
|
|
365
|
+
// Diverged - model-only three-way merge
|
|
366
|
+
|
|
367
|
+
// 1. Compute merge base from checkpoint
|
|
368
|
+
const checkpoint = commitLog.latestCheckpoint.value
|
|
369
|
+
if (!checkpoint) {
|
|
370
|
+
status.value = 'error'
|
|
371
|
+
lastError.value = 'No checkpoint available for merge base'
|
|
372
|
+
return false
|
|
373
|
+
}
|
|
374
|
+
|
|
375
|
+
// Replay from checkpoint to lastSyncedSequence to get base state.
|
|
376
|
+
const commitsToBase = commitLog.commits.value.filter(
|
|
377
|
+
c => c.sequence > checkpoint.sequence && c.sequence <= lastSyncedSequence.value,
|
|
378
|
+
)
|
|
379
|
+
const baseState = commitLog.replayCommits(checkpoint, commitsToBase)
|
|
380
|
+
|
|
381
|
+
// 2. Replay remote commits from base to get "theirs"
|
|
382
|
+
const theirsCheckpoint: Checkpoint = {
|
|
383
|
+
id: 'merge-base',
|
|
384
|
+
mapId: commitLog.mapId.value,
|
|
385
|
+
commitId: 'merge-base',
|
|
386
|
+
sequence: lastSyncedSequence.value,
|
|
387
|
+
branchId: commitLog.activeBranchId.value,
|
|
388
|
+
model: baseState.model,
|
|
389
|
+
timestamp: new Date().toISOString(),
|
|
390
|
+
}
|
|
391
|
+
const sortedRemote = result.commits.sort((a, b) => a.sequence - b.sequence)
|
|
392
|
+
const theirsState = commitLog.replayCommits(theirsCheckpoint, sortedRemote)
|
|
393
|
+
|
|
394
|
+
// 3. Current local state is "ours" - merge (model-only)
|
|
395
|
+
const mergeResult = threeWayMerge({
|
|
396
|
+
base: baseState.model,
|
|
397
|
+
ours: host.getRoot(),
|
|
398
|
+
theirs: theirsState.model,
|
|
399
|
+
})
|
|
400
|
+
|
|
401
|
+
if (mergeResult.success) {
|
|
402
|
+
// Clean merge - install via host and push
|
|
403
|
+
host.applyMerged(mergeResult.mergedModel)
|
|
404
|
+
lastSyncedSequence.value = result.remoteHead
|
|
405
|
+
lastSyncedAt.value = new Date().toISOString()
|
|
406
|
+
status.value = 'idle'
|
|
407
|
+
resetRetry()
|
|
408
|
+
persistCursor()
|
|
409
|
+
await push()
|
|
410
|
+
return true
|
|
411
|
+
} else {
|
|
412
|
+
// Conflicts - surface via host callback
|
|
413
|
+
host.onConflict(mergeResult)
|
|
414
|
+
status.value = 'conflict'
|
|
415
|
+
return false
|
|
416
|
+
}
|
|
417
|
+
}
|
|
418
|
+
} catch (e: any) {
|
|
419
|
+
status.value = 'error'
|
|
420
|
+
recordSyncError('pull', e, mapId, branchId)
|
|
421
|
+
scheduleRetry(() => pull())
|
|
422
|
+
return false
|
|
423
|
+
}
|
|
424
|
+
}
|
|
425
|
+
|
|
426
|
+
/** Full sync cycle: pull then push. */
|
|
427
|
+
async function sync(): Promise<boolean> {
|
|
428
|
+
resetRetry()
|
|
429
|
+
const pulled = await pull()
|
|
430
|
+
if (!pulled) return false
|
|
431
|
+
return push()
|
|
432
|
+
}
|
|
433
|
+
|
|
434
|
+
/**
|
|
435
|
+
* User-initiated sync recovery. Called from the branch manager dropdown
|
|
436
|
+
* when the engine is in `error` or `conflict` state. Clears the retry
|
|
437
|
+
* backoff, re-primes the cursor from IDB (in case navigation or another
|
|
438
|
+
* tab moved it), and runs a full sync cycle. Returns true on success.
|
|
439
|
+
*/
|
|
440
|
+
async function retryNow(): Promise<boolean> {
|
|
441
|
+
resetRetry()
|
|
442
|
+
status.value = 'idle'
|
|
443
|
+
const mapId = commitLog.mapId.value
|
|
444
|
+
const branchId = commitLog.activeBranchId.value
|
|
445
|
+
if (mapId) await primeCursor(mapId, branchId)
|
|
446
|
+
return sync()
|
|
447
|
+
}
|
|
448
|
+
|
|
449
|
+
return {
|
|
450
|
+
status: readonly(status),
|
|
451
|
+
lastSyncedSequence: readonly(lastSyncedSequence),
|
|
452
|
+
lastSyncedAt: readonly(lastSyncedAt),
|
|
453
|
+
lastError: readonly(lastError),
|
|
454
|
+
lastErrorCategory: readonly(lastErrorCategory),
|
|
455
|
+
enabled: readonly(enabled),
|
|
456
|
+
target: readonly(target),
|
|
457
|
+
pendingCount,
|
|
458
|
+
activate,
|
|
459
|
+
deactivate,
|
|
460
|
+
updateLastSynced,
|
|
461
|
+
setPushFilter,
|
|
462
|
+
primeCursor,
|
|
463
|
+
retryNow,
|
|
464
|
+
schedulePush,
|
|
465
|
+
push,
|
|
466
|
+
pull,
|
|
467
|
+
sync,
|
|
468
|
+
}
|
|
469
|
+
}
|
|
470
|
+
|
|
471
|
+
// ── Test-only: reset singleton state ────────────────────────────────────────
|
|
472
|
+
|
|
473
|
+
export function resetSyncEngineSingleton(): void {
|
|
474
|
+
status.value = 'idle'
|
|
475
|
+
lastSyncedSequence.value = 0
|
|
476
|
+
lastSyncedAt.value = null
|
|
477
|
+
lastError.value = null
|
|
478
|
+
lastErrorCategory.value = null
|
|
479
|
+
enabled.value = false
|
|
480
|
+
target.value = null
|
|
481
|
+
activeAdapter = null
|
|
482
|
+
activeHost = null
|
|
483
|
+
pushFilter = null
|
|
484
|
+
loggedErrorCategories.clear()
|
|
485
|
+
if (flushTimer) {
|
|
486
|
+
clearTimeout(flushTimer)
|
|
487
|
+
flushTimer = null
|
|
488
|
+
}
|
|
489
|
+
if (retryTimer) {
|
|
490
|
+
clearTimeout(retryTimer)
|
|
491
|
+
retryTimer = null
|
|
492
|
+
}
|
|
493
|
+
retryCount = 0
|
|
494
|
+
}
|
|
@@ -0,0 +1,135 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Reactive Triple Store - Vue wrapper around the pure triple engine.
|
|
3
|
+
*
|
|
4
|
+
* The pure projection, indexing, and RDF serialisation logic lives in
|
|
5
|
+
* `@businessmaps/metaontology/engine/triples`. This module adds Vue reactivity via
|
|
6
|
+
* `computed()` and re-exports everything for backward compatibility.
|
|
7
|
+
*/
|
|
8
|
+
|
|
9
|
+
import { computed, type ComputedRef } from 'vue'
|
|
10
|
+
import type { RootContext } from '@businessmaps/metaontology/types/context'
|
|
11
|
+
import type { M0State } from '@businessmaps/metaontology/types/m0'
|
|
12
|
+
import type { EntityClassId, PredicateId } from '@businessmaps/metaontology/meta/ontology'
|
|
13
|
+
import {
|
|
14
|
+
type Triple,
|
|
15
|
+
projectToTriples,
|
|
16
|
+
buildIndexes,
|
|
17
|
+
sp,
|
|
18
|
+
po,
|
|
19
|
+
spo,
|
|
20
|
+
} from '@businessmaps/metaontology/engine/triples'
|
|
21
|
+
import { projectM0Triples } from '@businessmaps/metaontology/engine/m0Triples'
|
|
22
|
+
|
|
23
|
+
// ── Re-export pure module for backward compatibility ────────────────────────
|
|
24
|
+
|
|
25
|
+
export {
|
|
26
|
+
type Triple,
|
|
27
|
+
type TripleIndexData,
|
|
28
|
+
projectToTriples,
|
|
29
|
+
buildIndexes,
|
|
30
|
+
serialiseAsNTriples,
|
|
31
|
+
serialiseAsTurtle,
|
|
32
|
+
serialiseAsJsonLd,
|
|
33
|
+
} from '@businessmaps/metaontology/engine/triples'
|
|
34
|
+
|
|
35
|
+
// ── Reactive index type ─────────────────────────────────────────────────────
|
|
36
|
+
|
|
37
|
+
export interface TripleIndex {
|
|
38
|
+
/** All triples in the store. */
|
|
39
|
+
triples: ComputedRef<Triple[]>
|
|
40
|
+
/** All triples where subject === id. */
|
|
41
|
+
bySubject: (id: string) => Triple[]
|
|
42
|
+
/** All triples where predicate === p. */
|
|
43
|
+
byPredicate: (p: string) => Triple[]
|
|
44
|
+
/** All triples where object === id. */
|
|
45
|
+
byObject: (id: string) => Triple[]
|
|
46
|
+
/** All triples where subject === s AND predicate === p. */
|
|
47
|
+
bySP: (s: string, p: string) => Triple[]
|
|
48
|
+
/** All triples where predicate === p AND object === o. */
|
|
49
|
+
byPO: (p: string, o: string) => Triple[]
|
|
50
|
+
/** Check if a specific triple exists. */
|
|
51
|
+
has: (s: string, p: string, o: string) => boolean
|
|
52
|
+
/** Object IDs where subject === s for predicate p (convenience). */
|
|
53
|
+
objectIds: (s: string, p: PredicateId) => string[]
|
|
54
|
+
/** Subject IDs where object === o for predicate p (convenience). */
|
|
55
|
+
subjectIds: (o: string, p: PredicateId) => string[]
|
|
56
|
+
/** First object ID (for 1:1 relationships). */
|
|
57
|
+
firstObjectId: (s: string, p: PredicateId) => string | undefined
|
|
58
|
+
/** First subject ID (for reverse 1:1 lookups). */
|
|
59
|
+
firstSubjectId: (o: string, p: PredicateId) => string | undefined
|
|
60
|
+
/** Entity type classification: entityId → EntityClassId. */
|
|
61
|
+
entityType: (id: string) => EntityClassId | undefined
|
|
62
|
+
}
|
|
63
|
+
|
|
64
|
+
// ── Factory ──────────────────────────────────────────────────────────────────
|
|
65
|
+
|
|
66
|
+
const EMPTY: Triple[] = []
|
|
67
|
+
|
|
68
|
+
/**
|
|
69
|
+
* Create a reactive triple index over a RootContext and optional M0State.
|
|
70
|
+
* The getRoot function should return a reactive reference (e.g. `() => store.root`).
|
|
71
|
+
* The optional getM0 function provides M0 state for cross-tier triple projection.
|
|
72
|
+
* The entire index rebuilds reactively when either root or m0 changes.
|
|
73
|
+
*/
|
|
74
|
+
export function createTripleIndex(
|
|
75
|
+
getRoot: () => Readonly<RootContext>,
|
|
76
|
+
getM0?: () => M0State | undefined,
|
|
77
|
+
): TripleIndex {
|
|
78
|
+
const triples = computed(() => {
|
|
79
|
+
const m1 = projectToTriples(getRoot())
|
|
80
|
+
const m0State = getM0?.()
|
|
81
|
+
if (!m0State) return m1
|
|
82
|
+
const m0 = projectM0Triples(m0State, getRoot())
|
|
83
|
+
return [...m1, ...m0]
|
|
84
|
+
})
|
|
85
|
+
const indexes = computed(() => buildIndexes(triples.value))
|
|
86
|
+
|
|
87
|
+
function bySubject(id: string): Triple[] {
|
|
88
|
+
return indexes.value.byS.get(id) ?? EMPTY
|
|
89
|
+
}
|
|
90
|
+
function byPredicate(p: string): Triple[] {
|
|
91
|
+
return indexes.value.byP.get(p) ?? EMPTY
|
|
92
|
+
}
|
|
93
|
+
function byObject(id: string): Triple[] {
|
|
94
|
+
return indexes.value.byO.get(id) ?? EMPTY
|
|
95
|
+
}
|
|
96
|
+
function bySP(s: string, p: string): Triple[] {
|
|
97
|
+
return indexes.value.bySP.get(sp(s, p)) ?? EMPTY
|
|
98
|
+
}
|
|
99
|
+
function byPO(p: string, o: string): Triple[] {
|
|
100
|
+
return indexes.value.byPO.get(po(p, o)) ?? EMPTY
|
|
101
|
+
}
|
|
102
|
+
function has(s: string, p: string, o: string): boolean {
|
|
103
|
+
return indexes.value.bySPO.has(spo(s, p, o))
|
|
104
|
+
}
|
|
105
|
+
function objectIds(s: string, p: PredicateId): string[] {
|
|
106
|
+
return bySP(s, p).map(t => t.object)
|
|
107
|
+
}
|
|
108
|
+
function subjectIds(o: string, p: PredicateId): string[] {
|
|
109
|
+
return byPO(p, o).map(t => t.subject)
|
|
110
|
+
}
|
|
111
|
+
function firstObjectId(s: string, p: PredicateId): string | undefined {
|
|
112
|
+
return bySP(s, p)[0]?.object
|
|
113
|
+
}
|
|
114
|
+
function firstSubjectId(o: string, p: PredicateId): string | undefined {
|
|
115
|
+
return byPO(p, o)[0]?.subject
|
|
116
|
+
}
|
|
117
|
+
function entityType(id: string): EntityClassId | undefined {
|
|
118
|
+
return indexes.value.typeOf.get(id) as EntityClassId | undefined
|
|
119
|
+
}
|
|
120
|
+
|
|
121
|
+
return {
|
|
122
|
+
triples,
|
|
123
|
+
bySubject,
|
|
124
|
+
byPredicate,
|
|
125
|
+
byObject,
|
|
126
|
+
bySP,
|
|
127
|
+
byPO,
|
|
128
|
+
has,
|
|
129
|
+
objectIds,
|
|
130
|
+
subjectIds,
|
|
131
|
+
firstObjectId,
|
|
132
|
+
firstSubjectId,
|
|
133
|
+
entityType,
|
|
134
|
+
}
|
|
135
|
+
}
|
package/index.ts
ADDED
|
@@ -0,0 +1,15 @@
|
|
|
1
|
+
// Public entrypoint for the Nuxt integration package.
|
|
2
|
+
//
|
|
3
|
+
// - Nuxt auto-imports composables from ./composables when this package is used
|
|
4
|
+
// as a layer via `extends: ['@businessmaps/metaontology-nuxt']`.
|
|
5
|
+
// - This barrel is for explicit imports by consumers (and for non-Nuxt tools
|
|
6
|
+
// that want the same dependency).
|
|
7
|
+
|
|
8
|
+
export * from '@businessmaps/metaontology'
|
|
9
|
+
export * from './composables/syncTypes'
|
|
10
|
+
export * from './composables/useCommitLog'
|
|
11
|
+
export * from './composables/useCrossTab'
|
|
12
|
+
export * from './composables/useModelStore'
|
|
13
|
+
export * from './composables/useSyncEngine'
|
|
14
|
+
export * from './composables/useTripleStore'
|
|
15
|
+
|
package/nuxt.config.ts
ADDED
|
@@ -0,0 +1,16 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Domain-Agnostic Metaontology - Nuxt Layer
|
|
3
|
+
*
|
|
4
|
+
* Provides the universal business modeling vocabulary (Context, Thing, Persona,
|
|
5
|
+
* Action, Workflow, Interface, Event, Measure, Port) as a reusable Nuxt layer.
|
|
6
|
+
*
|
|
7
|
+
* Pure ontology code lives in the companion @businessmaps/metaontology package
|
|
8
|
+
* (zero Vue deps). Vue-reactive composables live in ./composables/ (auto-imported
|
|
9
|
+
* by Nuxt).
|
|
10
|
+
*
|
|
11
|
+
* Consumers extend this layer and provide their own domain models and config.
|
|
12
|
+
*/
|
|
13
|
+
export default defineNuxtConfig({
|
|
14
|
+
// Composables in ./composables/ are auto-imported by Nuxt layer convention.
|
|
15
|
+
// Pure ontology code is imported explicitly via relative paths.
|
|
16
|
+
})
|