@businessmaps/metaontology-nuxt 0.63.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,81 @@
1
+ import { openDB } from 'idb'
2
+ import type { IDBPDatabase } from 'idb'
3
+ import type { BusinessMapsDB } from './idbSchema'
4
+ import { DB_NAME, DB_VERSION } from './idbSchema'
5
+
6
+ // ── Singleton IDB connection ────────────────────────────────────────────────
7
+ //
8
+ // The layer owns the IDB connection because IndexedDB only allows one upgrade
9
+ // callback per database version. Splitting the upgrade between layer and app
10
+ // would create a coordination problem (which side runs first on a fresh user?
11
+ // who creates the legacy stores?). The clean answer is that the layer owns
12
+ // the connection and the upgrade callback creates every store the database
13
+ // needs to function - model-tier stores fully typed, legacy stores typed as
14
+ // `unknown` value (see idbSchema.ts).
15
+ //
16
+ // Consuming apps access the same connection by re-exporting `getDb` and
17
+ // `closeDb` from this module via a thin proxy in their own codebase.
18
+
19
+ let _db: Promise<IDBPDatabase<BusinessMapsDB>> | null = null
20
+
21
+ export function getDb(): Promise<IDBPDatabase<BusinessMapsDB>> {
22
+ if (!_db) {
23
+ _db = openDB<BusinessMapsDB>(DB_NAME, DB_VERSION, {
24
+ upgrade(db, oldVersion) {
25
+ // Legacy stores (created by older versions of the consuming app).
26
+ // Layer creates them with no value typing because the layer never
27
+ // reads or writes them - they exist so the app's helpers can.
28
+ if (!db.objectStoreNames.contains('documents')) {
29
+ db.createObjectStore('documents', { keyPath: 'id' })
30
+ }
31
+ if (!db.objectStoreNames.contains('branches')) {
32
+ db.createObjectStore('branches', { keyPath: 'mapId' })
33
+ }
34
+ if (!db.objectStoreNames.contains('history')) {
35
+ db.createObjectStore('history', { keyPath: 'mapId' })
36
+ }
37
+ if (!db.objectStoreNames.contains('config')) {
38
+ db.createObjectStore('config', { keyPath: 'key' })
39
+ }
40
+ if (!db.objectStoreNames.contains('tabs')) {
41
+ db.createObjectStore('tabs', { keyPath: 'workspaceId' })
42
+ }
43
+ if (!db.objectStoreNames.contains('conversations')) {
44
+ db.createObjectStore('conversations', { keyPath: 'id' })
45
+ }
46
+ if (!db.objectStoreNames.contains('blobs')) {
47
+ db.createObjectStore('blobs', { keyPath: 'id' })
48
+ }
49
+ if (!db.objectStoreNames.contains('sync_queue')) {
50
+ db.createObjectStore('sync_queue', {
51
+ keyPath: 'id',
52
+ autoIncrement: true,
53
+ })
54
+ }
55
+
56
+ // Model-tier stores (commit-sourced persistence, v2).
57
+ if (oldVersion < 2) {
58
+ const commits = db.createObjectStore('commits', { keyPath: 'id' })
59
+ commits.createIndex('by-map-branch-seq', ['mapId', 'branchId', 'sequence'])
60
+
61
+ const checkpoints = db.createObjectStore('checkpoints', { keyPath: 'id' })
62
+ checkpoints.createIndex('by-map-branch-seq', ['mapId', 'branchId', 'sequence'])
63
+
64
+ db.createObjectStore('heads', { keyPath: ['mapId', 'branchId'] })
65
+ }
66
+ },
67
+ }).catch(err => {
68
+ console.warn('[persistence] IndexedDB unavailable - data will not persist this session:', err.message)
69
+ throw err
70
+ })
71
+ }
72
+ return _db
73
+ }
74
+
75
+ /** Close connection and reset singleton (for tests and cleanup). */
76
+ export function closeDb(): void {
77
+ if (_db) {
78
+ _db.then(db => db.close())
79
+ _db = null
80
+ }
81
+ }
@@ -0,0 +1,165 @@
1
+ import type { IDBCommitRecord, IDBCheckpointRecord, IDBBranchHeadRecord } from './idbSchema'
2
+ import { getDb } from './idbConnection'
3
+
4
+ // ── Commit store helpers ──────────────────────────────────────────────
5
+
6
+ export async function saveCommits(commits: IDBCommitRecord[]): Promise<void> {
7
+ if (commits.length === 0) return
8
+ const db = await getDb()
9
+ const tx = db.transaction('commits', 'readwrite')
10
+ for (const commit of commits) {
11
+ await tx.store.put(commit)
12
+ }
13
+ await tx.done
14
+ }
15
+
16
+ export async function loadCommitsSince(
17
+ mapId: string,
18
+ branchId: string,
19
+ sinceSequence: number,
20
+ ): Promise<IDBCommitRecord[]> {
21
+ const db = await getDb()
22
+ const index = db.transaction('commits', 'readonly').store.index('by-map-branch-seq')
23
+ const range = IDBKeyRange.bound(
24
+ [mapId, branchId, sinceSequence + 1],
25
+ [mapId, branchId, Number.MAX_SAFE_INTEGER],
26
+ )
27
+ return index.getAll(range)
28
+ }
29
+
30
+ // ── Checkpoint store helpers ─────────────────────────────────────────
31
+
32
+ export async function saveCheckpoint(checkpoint: IDBCheckpointRecord): Promise<void> {
33
+ const db = await getDb()
34
+ await db.put('checkpoints', checkpoint)
35
+ }
36
+
37
+ export async function loadLatestCheckpoint(
38
+ mapId: string,
39
+ branchId: string,
40
+ ): Promise<IDBCheckpointRecord | null> {
41
+ const db = await getDb()
42
+ const index = db.transaction('checkpoints', 'readonly').store.index('by-map-branch-seq')
43
+ const range = IDBKeyRange.bound(
44
+ [mapId, branchId, 0],
45
+ [mapId, branchId, Number.MAX_SAFE_INTEGER],
46
+ )
47
+ // Get all and return the last one (highest sequence)
48
+ const all = await index.getAll(range)
49
+ return all.length > 0 ? all[all.length - 1]! : null
50
+ }
51
+
52
+ export async function pruneOldCheckpoints(
53
+ mapId: string,
54
+ branchId: string,
55
+ keepCount: number,
56
+ ): Promise<void> {
57
+ const db = await getDb()
58
+ const index = db.transaction('checkpoints', 'readwrite').store.index('by-map-branch-seq')
59
+ const range = IDBKeyRange.bound(
60
+ [mapId, branchId, 0],
61
+ [mapId, branchId, Number.MAX_SAFE_INTEGER],
62
+ )
63
+ const all = await index.getAll(range)
64
+ if (all.length <= keepCount) return
65
+
66
+ const tx = db.transaction('checkpoints', 'readwrite')
67
+ const toDelete = all.slice(0, all.length - keepCount)
68
+ for (const cp of toDelete) {
69
+ await tx.store.delete(cp.id)
70
+ }
71
+ await tx.done
72
+ }
73
+
74
+ // ── Branch head store helpers ────────────────────────────────────────
75
+
76
+ export async function saveBranchHead(head: IDBBranchHeadRecord): Promise<void> {
77
+ const db = await getDb()
78
+ await db.put('heads', head)
79
+ }
80
+
81
+ // ── Sync cursor persistence ───────────────────────────────────────────
82
+ //
83
+ // The sync engine's `lastSyncedSequence` was originally ephemeral - held
84
+ // only in the engine singleton and reset to 0 on every fresh page load.
85
+ // That caused two user-visible failures:
86
+ //
87
+ // 1. After any refresh, the engine re-pushed every local commit starting
88
+ // from sequence 0, which 409'd against whatever the server already had.
89
+ // 2. Navigating back then forward during an AI tool run could leave the
90
+ // engine thinking "0 commits synced" while the server had received
91
+ // 200+ of them via `sync.schedulePush()` firing off `commits.value`
92
+ // before the debounced IDB flush caught up.
93
+ //
94
+ // We now persist the cursor alongside the branch head in the `config`
95
+ // store under a keyed entry so it survives navigation, refresh, and tab
96
+ // close. The value is a small JSON object keyed by `${mapId}:${branchId}`.
97
+ //
98
+ // `config` is typed as `unknown` at the layer (it's an app-owned legacy
99
+ // store). We read/write via cast. If a future schema lifts it into the
100
+ // typed layer surface, these helpers become the migration site.
101
+
102
+ // The `config` store has `keyPath: 'key'` (see idbConnection.ts) so records
103
+ // must carry the primary key inline as a `key` field. The value is a small
104
+ // JSON object keyed by `sync-cursor:${mapId}:${branchId}`.
105
+ interface SyncCursorRecord {
106
+ key: string
107
+ mapId: string
108
+ branchId: string
109
+ lastSyncedSequence: number
110
+ updatedAt: string
111
+ }
112
+
113
+ function syncCursorKey(mapId: string, branchId: string): string {
114
+ return `sync-cursor:${mapId}:${branchId}`
115
+ }
116
+
117
+ export async function saveSyncCursor(
118
+ mapId: string,
119
+ branchId: string,
120
+ lastSyncedSequence: number,
121
+ ): Promise<void> {
122
+ const db = await getDb()
123
+ const record: SyncCursorRecord = {
124
+ key: syncCursorKey(mapId, branchId),
125
+ mapId,
126
+ branchId,
127
+ lastSyncedSequence,
128
+ updatedAt: new Date().toISOString(),
129
+ }
130
+ // The `config` store is typed as `unknown` in the layer schema - the
131
+ // double-cast tells TypeScript we know what we're doing. The key is
132
+ // pulled from `record.key` via the store's keyPath.
133
+ await db.put('config', record as unknown as never)
134
+ }
135
+
136
+ export async function loadSyncCursor(
137
+ mapId: string,
138
+ branchId: string,
139
+ ): Promise<number> {
140
+ const db = await getDb()
141
+ const record = (await db.get('config', syncCursorKey(mapId, branchId))) as
142
+ | SyncCursorRecord
143
+ | undefined
144
+ return record?.lastSyncedSequence ?? 0
145
+ }
146
+
147
+ export async function clearSyncCursor(
148
+ mapId: string,
149
+ branchId: string,
150
+ ): Promise<void> {
151
+ const db = await getDb()
152
+ await db.delete('config', syncCursorKey(mapId, branchId))
153
+ }
154
+
155
+ // ── Storage quota helpers ─────────────────────────────────────────────
156
+
157
+ export async function checkStorageQuota(): Promise<{
158
+ usage: number
159
+ quota: number
160
+ percentUsed: number
161
+ } | null> {
162
+ if (!navigator.storage?.estimate) return null
163
+ const { usage = 0, quota = 0 } = await navigator.storage.estimate()
164
+ return { usage, quota, percentUsed: quota > 0 ? (usage / quota) * 100 : 0 }
165
+ }
@@ -0,0 +1,93 @@
1
+ import type { DBSchema } from 'idb'
2
+ import type { RootContext } from '@businessmaps/metaontology/types/context'
3
+ import type { DispatchableCommand } from '@businessmaps/metaontology/types/commands'
4
+ import type { M0State } from '@businessmaps/metaontology/types/m0'
5
+
6
+ // ── Model-tier records (layer-owned persistence) ────────────────────────────
7
+ //
8
+ // These three records are the model-tier persistence layer. They live in the
9
+ // ontology layer because the engine owns commit-sourced state derivation.
10
+ // The consuming app's view-state records (documents, branches, history, tabs,
11
+ // conversations, blobs, sync_queue) are typed in the app's own schema file.
12
+ // The layer's BusinessMapsDB schema below names those
13
+ // stores so the upgrade callback can create them, but types their values as
14
+ // `unknown` - the layer never reads or writes them directly.
15
+
16
+ export interface IDBCommitRecord {
17
+ id: string
18
+ mapId: string
19
+ branchId: string
20
+ sequence: number
21
+ command: DispatchableCommand
22
+ inverse: DispatchableCommand
23
+ timestamp: string
24
+ deviceId: string
25
+ parentId: string | null
26
+ }
27
+
28
+ export interface IDBCheckpointRecord {
29
+ id: string
30
+ mapId: string
31
+ branchId: string
32
+ commitId: string
33
+ sequence: number
34
+ model: RootContext
35
+ m0?: M0State
36
+ timestamp: string
37
+ }
38
+
39
+ export interface IDBBranchHeadRecord {
40
+ mapId: string
41
+ branchId: string
42
+ name: string
43
+ headCommitId: string
44
+ forkPointCommitId: string
45
+ parentBranchId: string
46
+ createdAt: string
47
+ }
48
+
49
+ // ── BusinessMapsDB ──────────────────────────────────────────────────────────
50
+ //
51
+ // The full IDB schema for the `businessmaps` database. The layer owns the
52
+ // connection (because IDB only allows one upgrade callback per DB version),
53
+ // so the layer's schema must name every store. Legacy stores have their value
54
+ // types narrowed to `unknown` - the layer doesn't import app types. The
55
+ // consuming app re-exports a typed view of the same database in its own
56
+ // schema file for its own helpers.
57
+
58
+ export interface BusinessMapsDB extends DBSchema {
59
+ // ── Model-tier (layer-owned, fully typed) ──
60
+ commits: {
61
+ key: string
62
+ value: IDBCommitRecord
63
+ indexes: { 'by-map-branch-seq': [string, string, number] }
64
+ }
65
+ checkpoints: {
66
+ key: string
67
+ value: IDBCheckpointRecord
68
+ indexes: { 'by-map-branch-seq': [string, string, number] }
69
+ }
70
+ heads: {
71
+ key: [string, string] // [mapId, branchId]
72
+ value: IDBBranchHeadRecord
73
+ }
74
+
75
+ // ── Legacy / canvas-tier (app-owned, narrowed to unknown at the layer) ──
76
+ documents: { key: string; value: unknown }
77
+ branches: { key: string; value: unknown }
78
+ history: { key: string; value: unknown }
79
+ config: { key: string; value: unknown }
80
+ tabs: { key: string; value: unknown }
81
+ conversations: { key: string; value: unknown }
82
+ blobs: { key: string; value: unknown }
83
+ sync_queue: {
84
+ key: number
85
+ value: unknown
86
+ autoIncrement: true
87
+ }
88
+ }
89
+
90
+ export const DB_NAME = 'businessmaps'
91
+ export const DB_VERSION = 3
92
+
93
+ export const MODEL_STORE_NAMES = ['commits', 'checkpoints', 'heads'] as const
@@ -0,0 +1,145 @@
1
+ import type { Commit } from '@businessmaps/metaontology/types/commits'
2
+
3
+ // ── Sync target descriptor ──────────────────────────────────────────────────
4
+ // A named destination where commits are pushed. Maps without auth have no
5
+ // target (local only). The target is the declared identity of sync, not an
6
+ // implementation detail.
7
+
8
+ export type SyncTargetKind = 'cloud' | 'filesystem'
9
+
10
+ export interface SyncTargetDescriptor {
11
+ kind: SyncTargetKind
12
+ label: string // "Business Maps Cloud", "~/projects/mymap"
13
+ icon: 'cloud' | 'folder'
14
+ }
15
+
16
+ // ── Sync status ─────────────────────────────────────────────────────────────
17
+
18
+ export type SyncStatus = 'idle' | 'pushing' | 'pulling' | 'conflict' | 'error'
19
+
20
+ // ── Error categories ────────────────────────────────────────────────────────
21
+ // Classification of sync failures so the UI can show a friendly message and
22
+ // the engine can adjust its retry/log behavior. The raw error string is still
23
+ // available via `lastError` for advanced views; `errorCategory` drives default
24
+ // UX. Add new categories here as new failure modes appear in the wild.
25
+
26
+ export type SyncErrorCategory =
27
+ | 'network' // generic network failure (DNS, fetch failed, ECONNREFUSED, offline)
28
+ | 'cors' // CORS preflight rejection - usually a misconfigured bucket
29
+ | 'auth' // 401/403 - token expired, no permission
30
+ | 'conflict' // 409 - remote has newer changes (covered by status='conflict' too)
31
+ | 'server' // 5xx - remote service is broken
32
+ | 'crypto' // missing/invalid encryption key
33
+ | 'unknown' // doesn't match any of the above
34
+
35
+ /**
36
+ * Classify a thrown error or error message into a SyncErrorCategory.
37
+ *
38
+ * Used by both `useSyncEngine` (to drive log throttling and category-aware
39
+ * retry decisions) and the UI (to render friendly status text). Pure: takes
40
+ * any value, returns one category. Defensive against unknown error shapes.
41
+ */
42
+ export function classifySyncError(e: unknown): SyncErrorCategory {
43
+ if (!e) return 'unknown'
44
+
45
+ // Status-code-based classification - most reliable signal when present.
46
+ const status = (e as { statusCode?: number; status?: number }).statusCode
47
+ ?? (e as { statusCode?: number; status?: number }).status
48
+ if (status === 401 || status === 403) return 'auth'
49
+ if (status === 409) return 'conflict'
50
+ if (status && status >= 500 && status < 600) return 'server'
51
+
52
+ const message = e instanceof Error ? e.message : String(e)
53
+ const lower = message.toLowerCase()
54
+
55
+ // CORS-specific patterns. Browsers report CORS failures as "fetch failed"
56
+ // or "Failed to fetch" with no status code (preflight blocked). The most
57
+ // reliable signal is `ERR_FAILED` in the message or a fetch failure with
58
+ // no status against a known cross-origin endpoint.
59
+ if (lower.includes('cors')) return 'cors'
60
+ if (lower.includes('err_failed') && lower.includes('fetch')) return 'cors'
61
+
62
+ // Network-down / unreachable patterns. The two distinct patterns here
63
+ // come from different runtimes:
64
+ // - Browser fetch: "Failed to fetch", "NetworkError", and the bare
65
+ // "net::ERR_FAILED" Chrome surfaces when the OS-level connection
66
+ // cannot complete (also surfaces for opaque CORS preflight failures
67
+ // where the browser refuses to expose the response to JS).
68
+ // - Node fetch: ECONNREFUSED, ENOTFOUND, ETIMEDOUT.
69
+ if (lower.includes('failed to fetch')) return 'network'
70
+ if (lower.includes('networkerror')) return 'network'
71
+ if (lower.includes('err_failed')) return 'network'
72
+ if (lower.includes('econnrefused')) return 'network'
73
+ if (lower.includes('enotfound')) return 'network'
74
+ if (lower.includes('etimedout')) return 'network'
75
+ if (lower.includes('offline')) return 'network'
76
+
77
+ // Crypto/auth-key cases (the adapter returns these as Error('No encryption key')).
78
+ if (lower.includes('encryption key')) return 'crypto'
79
+
80
+ return 'unknown'
81
+ }
82
+
83
+ /**
84
+ * Map a SyncErrorCategory to a short, user-facing status string. Used in the
85
+ * branch manager dropdown and the sync indicator. Keep these terse - the
86
+ * dropdown is narrow, and the user wants to know "is sync working?" first,
87
+ * "why not?" second.
88
+ */
89
+ export function friendlySyncErrorMessage(category: SyncErrorCategory): string {
90
+ switch (category) {
91
+ case 'network': return 'Cloud unreachable - working locally'
92
+ case 'cors': return 'Cloud sync misconfigured (CORS)'
93
+ case 'auth': return 'Sign-in expired'
94
+ case 'conflict': return 'Remote has newer changes - pull required'
95
+ case 'server': return 'Cloud sync temporarily unavailable'
96
+ case 'crypto': return 'Encryption key missing'
97
+ case 'unknown': return 'Sync error'
98
+ }
99
+ }
100
+
101
+ // ── Adapter result types ────────────────────────────────────────────────────
102
+
103
+ export interface PushResult {
104
+ success: boolean
105
+ newHeadSequence: number
106
+ error?: string
107
+ conflict?: boolean // 409 - remote has newer changes
108
+ }
109
+
110
+ export interface PullResult {
111
+ success: boolean
112
+ commits: Commit[]
113
+ remoteHead: number
114
+ error?: string
115
+ }
116
+
117
+ // ── Sync adapter interface ──────────────────────────────────────────────────
118
+ // Transport-agnostic contract for pushing/pulling commits to a sync target.
119
+ // Encryption, HTTP, presigned URLs, filesystem writes - all internal to the
120
+ // adapter. The sync engine doesn't care how commits move, only that they do.
121
+
122
+ export interface SyncAdapter {
123
+ readonly descriptor: SyncTargetDescriptor
124
+
125
+ /** Push local commits to the remote target. */
126
+ push(
127
+ mapId: string,
128
+ branchId: string,
129
+ commits: Commit[],
130
+ baseSequence: number,
131
+ ): Promise<PushResult>
132
+
133
+ /** Pull remote commits since a given sequence. */
134
+ pull(
135
+ mapId: string,
136
+ branchId: string,
137
+ sinceSequence: number,
138
+ ): Promise<PullResult>
139
+
140
+ /** Get the current remote head sequence for optimistic locking. */
141
+ getRemoteHead(
142
+ mapId: string,
143
+ branchId: string,
144
+ ): Promise<number>
145
+ }