@myrialabs/clopen 0.1.4 → 0.1.5

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (37) hide show
  1. package/backend/lib/chat/stream-manager.ts +8 -0
  2. package/backend/lib/database/migrations/022_add_snapshot_changes_column.ts +35 -0
  3. package/backend/lib/database/migrations/index.ts +7 -0
  4. package/backend/lib/database/queries/snapshot-queries.ts +7 -4
  5. package/backend/lib/files/file-watcher.ts +34 -0
  6. package/backend/lib/project/status-manager.ts +6 -4
  7. package/backend/lib/snapshot/snapshot-service.ts +471 -316
  8. package/backend/lib/terminal/pty-session-manager.ts +1 -32
  9. package/backend/ws/chat/stream.ts +45 -2
  10. package/backend/ws/snapshot/restore.ts +77 -67
  11. package/frontend/lib/components/chat/ChatInterface.svelte +14 -14
  12. package/frontend/lib/components/chat/input/ChatInput.svelte +2 -2
  13. package/frontend/lib/components/chat/input/components/ChatInputActions.svelte +1 -1
  14. package/frontend/lib/components/chat/input/components/EngineModelPicker.svelte +8 -3
  15. package/frontend/lib/components/chat/input/composables/use-textarea-resize.svelte.ts +12 -2
  16. package/frontend/lib/components/chat/tools/AskUserQuestionTool.svelte +3 -8
  17. package/frontend/lib/components/checkpoint/TimelineModal.svelte +222 -30
  18. package/frontend/lib/components/common/MonacoEditor.svelte +14 -0
  19. package/frontend/lib/components/common/xterm/XTerm.svelte +9 -0
  20. package/frontend/lib/components/common/xterm/xterm-service.ts +9 -0
  21. package/frontend/lib/components/git/DiffViewer.svelte +16 -2
  22. package/frontend/lib/components/history/HistoryModal.svelte +3 -4
  23. package/frontend/lib/components/settings/appearance/AppearanceSettings.svelte +59 -0
  24. package/frontend/lib/components/terminal/Terminal.svelte +1 -7
  25. package/frontend/lib/components/workspace/DesktopNavigator.svelte +11 -19
  26. package/frontend/lib/components/workspace/MobileNavigator.svelte +4 -15
  27. package/frontend/lib/components/workspace/panels/FilesPanel.svelte +3 -2
  28. package/frontend/lib/components/workspace/panels/GitPanel.svelte +3 -2
  29. package/frontend/lib/services/notification/global-stream-monitor.ts +56 -16
  30. package/frontend/lib/services/snapshot/snapshot.service.ts +71 -32
  31. package/frontend/lib/stores/core/presence.svelte.ts +63 -1
  32. package/frontend/lib/stores/features/settings.svelte.ts +9 -1
  33. package/frontend/lib/stores/features/terminal.svelte.ts +6 -0
  34. package/frontend/lib/stores/ui/workspace.svelte.ts +4 -3
  35. package/package.json +1 -1
  36. package/shared/types/database/schema.ts +18 -0
  37. package/shared/types/stores/settings.ts +2 -0
@@ -1,44 +1,77 @@
1
1
  /**
2
- * Snapshot Service for Time Travel Feature
2
+ * Snapshot Service for Time Travel Feature (v2 - Session-Scoped)
3
3
  *
4
- * Uses git-like content-addressable blob storage for efficient snapshots:
5
- * - File contents stored as compressed blobs in ~/.clopen/snapshots/blobs/
6
- * - Each snapshot has a tree file mapping filepath -> blob hash
7
- * - DB only stores lightweight metadata and hash references
8
- * - Deduplication: identical file content across snapshots stored once
9
- * - mtime cache: skip re-reading files that haven't changed
10
- * - Respects .gitignore rules (via git ls-files or manual parsing)
11
- * - All files read/written as Buffer (binary-safe for images, PDFs, etc.)
4
+ * Architecture:
5
+ * - Session baseline: hash-only scan at session start, background blob storage
6
+ * - Per-checkpoint delta: only stores files that changed during the stream
7
+ * - Session-scoped restore: bidirectional (forward + backward) using session_changes
8
+ * - Cross-session conflict detection: warns when restoring would affect other sessions' changes
9
+ *
10
+ * Storage:
11
+ * - Blob store: ~/.clopen/snapshots/blobs/ (content-addressable, deduped, gzipped)
12
+ * - DB: lightweight metadata + session_changes JSON
12
13
  */
13
14
 
14
15
  import fs from 'fs/promises';
15
16
  import path from 'path';
16
- import { snapshotQueries } from '../database/queries';
17
+ import { snapshotQueries, sessionQueries, messageQueries } from '../database/queries';
18
+ import { getDatabase } from '../database/index';
17
19
  import { blobStore, type TreeMap } from './blob-store';
18
20
  import { getSnapshotFiles } from './gitignore';
19
- import type { MessageSnapshot, DeltaChanges } from '$shared/types/database/schema';
21
+ import { fileWatcher } from '../files/file-watcher';
22
+ import type { MessageSnapshot, SessionScopedChanges } from '$shared/types/database/schema';
20
23
  import { calculateFileChangeStats } from '$shared/utils/diff-calculator';
21
24
  import { debug } from '$shared/utils/logger';
22
25
 
23
- interface FileSnapshot {
24
- [filepath: string]: Buffer; // filepath -> content (Buffer for binary safety)
25
- }
26
-
27
26
  interface SnapshotMetadata {
28
27
  totalFiles: number;
29
28
  totalSize: number;
30
29
  capturedAt: string;
31
- snapshotType: 'full' | 'delta';
30
+ snapshotType: 'delta';
32
31
  deltaSize?: number;
33
- storageFormat?: 'blob-store';
32
+ storageFormat: 'blob-store';
34
33
  }
35
34
 
36
35
  // Maximum file size to include (5MB)
37
36
  const MAX_FILE_SIZE = 5 * 1024 * 1024;
38
37
 
38
+ /**
39
+ * Conflict information for a single file during restore
40
+ */
41
+ export interface RestoreConflict {
42
+ filepath: string;
43
+ modifiedBySessionId: string;
44
+ modifiedBySnapshotId: string;
45
+ modifiedAt: string;
46
+ restoreContent?: string;
47
+ currentContent?: string;
48
+ }
49
+
50
+ /**
51
+ * Result of conflict detection before restore
52
+ */
53
+ export interface RestoreConflictCheck {
54
+ hasConflicts: boolean;
55
+ conflicts: RestoreConflict[];
56
+ checkpointsToUndo: string[];
57
+ }
58
+
59
+ /**
60
+ * User's resolution decision for each conflicting file
61
+ */
62
+ export interface ConflictResolution {
63
+ [filepath: string]: 'restore' | 'keep';
64
+ }
65
+
39
66
  export class SnapshotService {
40
67
  private static instance: SnapshotService;
41
68
 
69
+ /**
70
+ * Per-session running tree: sessionId → TreeMap
71
+ * Updated after each capture and restore.
72
+ */
73
+ private sessionBaselines = new Map<string, TreeMap>();
74
+
42
75
  private constructor() {}
43
76
 
44
77
  static getInstance(): SnapshotService {
@@ -48,87 +81,170 @@ export class SnapshotService {
48
81
  return SnapshotService.instance;
49
82
  }
50
83
 
84
+ // ========================================================================
85
+ // Session Baseline
86
+ // ========================================================================
87
+
51
88
  /**
52
- * Capture snapshot of current project state using blob store.
53
- * Only changed files are read and stored (mtime cache + hash dedup).
54
- * Respects .gitignore rules for file exclusion.
89
+ * Initialize session baseline: hash-only scan + blob storage.
90
+ * Called when a session is first activated for a project.
55
91
  */
56
- async captureSnapshot(
92
+ async initializeSessionBaseline(
57
93
  projectPath: string,
58
- projectId: string,
59
- sessionId: string,
60
- messageId: string
61
- ): Promise<MessageSnapshot> {
94
+ sessionId: string
95
+ ): Promise<void> {
96
+ if (this.sessionBaselines.has(sessionId)) return;
97
+
62
98
  try {
63
- // Scan files respecting .gitignore (git ls-files or manual parsing)
64
99
  const files = await getSnapshotFiles(projectPath);
65
-
66
- // Build current tree: hash each file using blob store
67
- const currentTree: TreeMap = {};
68
- const readContents = new Map<string, Buffer>();
69
- let totalSize = 0;
100
+ const baseline: TreeMap = {};
70
101
 
71
102
  for (const filepath of files) {
72
103
  try {
73
104
  const stat = await fs.stat(filepath);
74
- if (stat.size > MAX_FILE_SIZE) {
75
- debug.warn('snapshot', `Skipping large file: ${filepath} (${stat.size} bytes)`);
76
- continue;
77
- }
105
+ if (stat.size > MAX_FILE_SIZE) continue;
78
106
 
79
107
  const relativePath = path.relative(projectPath, filepath);
80
108
  const normalizedPath = relativePath.replace(/\\/g, '/');
81
109
 
82
110
  const result = await blobStore.hashFile(normalizedPath, filepath);
83
- currentTree[normalizedPath] = result.hash;
84
- totalSize += stat.size;
85
-
86
- if (result.content !== null) {
87
- readContents.set(normalizedPath, result.content);
88
- }
89
- } catch (error) {
90
- debug.warn('snapshot', `Could not process file ${filepath}:`, error);
111
+ baseline[normalizedPath] = result.hash;
112
+ } catch {
113
+ // Skip unreadable files
91
114
  }
92
115
  }
93
116
 
94
- // Get previous snapshot's tree for delta computation
117
+ this.sessionBaselines.set(sessionId, baseline);
118
+ debug.log('snapshot', `Session baseline initialized: ${Object.keys(baseline).length} files for session ${sessionId}`);
119
+ } catch (error) {
120
+ debug.error('snapshot', 'Error initializing session baseline:', error);
121
+ }
122
+ }
123
+
124
+ private async getSessionBaseline(
125
+ projectPath: string,
126
+ sessionId: string
127
+ ): Promise<TreeMap> {
128
+ if (!this.sessionBaselines.has(sessionId)) {
129
+ await this.initializeSessionBaseline(projectPath, sessionId);
130
+ }
131
+ return this.sessionBaselines.get(sessionId) || {};
132
+ }
133
+
134
+ // ========================================================================
135
+ // Snapshot Capture
136
+ // ========================================================================
137
+
138
+ /**
139
+ * Capture snapshot of current project state.
140
+ * Only processes files detected as dirty by the file watcher.
141
+ * Stores session-scoped changes (oldHash/newHash per file).
142
+ */
143
+ async captureSnapshot(
144
+ projectPath: string,
145
+ projectId: string,
146
+ sessionId: string,
147
+ messageId: string
148
+ ): Promise<MessageSnapshot> {
149
+ try {
150
+ const dirtyFiles = fileWatcher.getDirtyFiles(projectId);
151
+
95
152
  const previousSnapshots = snapshotQueries.getBySessionId(sessionId);
96
153
  const previousSnapshot = previousSnapshots.length > 0
97
154
  ? previousSnapshots[previousSnapshots.length - 1]
98
155
  : null;
99
156
 
100
- let previousTree: TreeMap = {};
101
- if (previousSnapshot) {
102
- previousTree = await this.getSnapshotTree(previousSnapshot);
157
+ // FAST PATH: no file changes detected → skip snapshot
158
+ if (dirtyFiles.size === 0 && previousSnapshot) {
159
+ debug.log('snapshot', 'No file changes detected, skipping snapshot');
160
+ return previousSnapshot;
103
161
  }
104
162
 
105
- // Compute delta by comparing tree hashes (fast!)
106
- const delta = this.calculateTreeDelta(previousTree, currentTree);
107
- const deltaSize =
108
- Object.keys(delta.added).length +
109
- Object.keys(delta.modified).length +
110
- delta.deleted.length;
163
+ // Get previous tree (in-memory baseline)
164
+ const previousTree = await this.getSessionBaseline(projectPath, sessionId);
165
+
166
+ // Build current tree incrementally
167
+ let currentTree: TreeMap;
168
+ const sessionChanges: SessionScopedChanges = {};
169
+ const readContents = new Map<string, Buffer>();
170
+
171
+ if (dirtyFiles.size === 0 && !previousSnapshot) {
172
+ // First snapshot ever, no dirty files → initial baseline
173
+ currentTree = { ...previousTree };
174
+ } else if (dirtyFiles.size > 0) {
175
+ // Incremental: start from previous tree, update only dirty files
176
+ currentTree = { ...previousTree };
177
+
178
+ for (const relativePath of dirtyFiles) {
179
+ const fullPath = path.join(projectPath, relativePath);
111
180
 
112
- // Calculate line-level file change stats for changed files only
181
+ try {
182
+ const stat = await fs.stat(fullPath);
183
+ if (stat.size > MAX_FILE_SIZE) {
184
+ if (currentTree[relativePath]) {
185
+ const oldHash = currentTree[relativePath];
186
+ sessionChanges[relativePath] = { oldHash, newHash: '' };
187
+ delete currentTree[relativePath];
188
+ }
189
+ continue;
190
+ }
191
+
192
+ const result = await blobStore.hashFile(relativePath, fullPath);
193
+ const newHash = result.hash;
194
+ const oldHash = previousTree[relativePath] || '';
195
+
196
+ if (oldHash !== newHash) {
197
+ currentTree[relativePath] = newHash;
198
+ sessionChanges[relativePath] = { oldHash, newHash };
199
+
200
+ if (result.content !== null) {
201
+ readContents.set(relativePath, result.content);
202
+ }
203
+
204
+ if (oldHash && !(await blobStore.hasBlob(oldHash))) {
205
+ debug.warn('snapshot', `Old blob missing for ${relativePath} (${oldHash.slice(0, 8)}), restore may be limited`);
206
+ }
207
+ }
208
+ } catch {
209
+ // File was deleted
210
+ if (currentTree[relativePath]) {
211
+ const oldHash = currentTree[relativePath];
212
+ sessionChanges[relativePath] = { oldHash, newHash: '' };
213
+ delete currentTree[relativePath];
214
+ }
215
+ }
216
+ }
217
+ } else {
218
+ currentTree = { ...previousTree };
219
+ }
220
+
221
+ fileWatcher.clearDirtyFiles(projectId);
222
+
223
+ // If no actual changes after processing, skip
224
+ if (Object.keys(sessionChanges).length === 0 && previousSnapshot) {
225
+ debug.log('snapshot', 'No actual file changes after hash comparison, skipping snapshot');
226
+ return previousSnapshot;
227
+ }
228
+
229
+ // Calculate line-level file change stats
113
230
  const fileStats = await this.calculateChangeStats(
114
- previousTree, currentTree, delta, readContents
231
+ previousTree, currentTree, sessionChanges, readContents
115
232
  );
116
233
 
117
234
  const metadata: SnapshotMetadata = {
118
235
  totalFiles: Object.keys(currentTree).length,
119
- totalSize,
236
+ totalSize: 0,
120
237
  capturedAt: new Date().toISOString(),
121
238
  snapshotType: 'delta',
122
- deltaSize,
239
+ deltaSize: Object.keys(sessionChanges).length,
123
240
  storageFormat: 'blob-store'
124
241
  };
125
242
 
126
243
  const snapshotId = `snapshot_${Date.now()}_${Math.random().toString(36).substr(2, 9)}`;
127
244
 
128
- // Store tree file to disk
129
- const treeHash = await blobStore.storeTree(snapshotId, currentTree);
245
+ // Update in-memory baseline
246
+ this.sessionBaselines.set(sessionId, { ...currentTree });
130
247
 
131
- // Store lightweight record in DB (no file content!)
132
248
  const dbSnapshot = snapshotQueries.createSnapshot({
133
249
  id: snapshotId,
134
250
  message_id: messageId,
@@ -138,15 +254,16 @@ export class SnapshotService {
138
254
  project_metadata: metadata,
139
255
  snapshot_type: 'delta',
140
256
  parent_snapshot_id: previousSnapshot?.id,
141
- delta_changes: delta,
257
+ delta_changes: {},
142
258
  files_changed: fileStats.filesChanged,
143
259
  insertions: fileStats.insertions,
144
260
  deletions: fileStats.deletions,
145
- tree_hash: treeHash
261
+ tree_hash: undefined,
262
+ session_changes: sessionChanges
146
263
  });
147
264
 
148
- const typeLabel = previousSnapshot ? 'delta' : 'initial delta';
149
- debug.log('snapshot', `Created ${typeLabel} snapshot [blob-store]: ${deltaSize} changes (${Object.keys(delta.added).length} added, ${Object.keys(delta.modified).length} modified, ${delta.deleted.length} deleted) - ${fileStats.filesChanged} files, +${fileStats.insertions}/-${fileStats.deletions} lines`);
265
+ const changesCount = Object.keys(sessionChanges).length;
266
+ debug.log('snapshot', `Snapshot captured: ${changesCount} changes - ${fileStats.filesChanged} files, +${fileStats.insertions}/-${fileStats.deletions} lines`);
150
267
  return dbSnapshot;
151
268
  } catch (error) {
152
269
  debug.error('snapshot', 'Error capturing snapshot:', error);
@@ -154,328 +271,366 @@ export class SnapshotService {
154
271
  }
155
272
  }
156
273
 
274
+ // ========================================================================
275
+ // Conflict Detection
276
+ // ========================================================================
277
+
157
278
  /**
158
- * Calculate delta between two trees by comparing hashes.
279
+ * Check for conflicts before restoring to a checkpoint.
280
+ * Works bidirectionally (undo and redo).
281
+ *
282
+ * A conflict occurs when a file that would be changed by the restore
283
+ * was also modified by a different session after the reference time.
284
+ * Reference time = min(targetTime, currentHeadTime) to cover both directions.
159
285
  */
160
- private calculateTreeDelta(
161
- previousTree: TreeMap,
162
- currentTree: TreeMap
163
- ): DeltaChanges {
164
- const delta: DeltaChanges = {
165
- added: {},
166
- modified: {},
167
- deleted: []
168
- };
286
+ async checkRestoreConflicts(
287
+ sessionId: string,
288
+ targetCheckpointMessageId: string,
289
+ projectPath?: string
290
+ ): Promise<RestoreConflictCheck> {
291
+ const sessionSnapshots = snapshotQueries.getBySessionId(sessionId);
169
292
 
170
- for (const [filepath, hash] of Object.entries(currentTree)) {
171
- if (!previousTree[filepath]) {
172
- delta.added[filepath] = hash;
173
- } else if (previousTree[filepath] !== hash) {
174
- delta.modified[filepath] = hash;
175
- }
176
- }
293
+ const targetIndex = sessionSnapshots.findIndex(
294
+ s => s.message_id === targetCheckpointMessageId
295
+ );
177
296
 
178
- for (const filepath of Object.keys(previousTree)) {
179
- if (!currentTree[filepath]) {
180
- delta.deleted.push(filepath);
181
- }
297
+ if (targetIndex === -1) {
298
+ return { hasConflicts: false, conflicts: [], checkpointsToUndo: [] };
182
299
  }
183
300
 
184
- return delta;
185
- }
186
-
187
- /**
188
- * Calculate line-level change stats for changed files.
189
- * Only reads blob content for files that actually changed.
190
- */
191
- private async calculateChangeStats(
192
- previousTree: TreeMap,
193
- currentTree: TreeMap,
194
- delta: DeltaChanges,
195
- readContents: Map<string, Buffer>
196
- ): Promise<{ filesChanged: number; insertions: number; deletions: number }> {
197
- const previousSnapshot: Record<string, Buffer> = {};
198
- const currentSnapshot: Record<string, Buffer> = {};
301
+ // Build expected state at target (same bidirectional algorithm as restoreSessionScoped)
302
+ // This determines ALL files that would be affected by the restore
303
+ const expectedState = new Map<string, string>(); // filepath → expectedHash
199
304
 
200
- for (const filepath of Object.keys(delta.added)) {
201
- const hash = currentTree[filepath];
202
- currentSnapshot[filepath] = readContents.get(filepath) ?? await blobStore.readBlob(hash);
305
+ for (let i = 0; i <= targetIndex; i++) {
306
+ const snap = sessionSnapshots[i];
307
+ if (!snap.session_changes) continue;
308
+ try {
309
+ const changes = JSON.parse(snap.session_changes) as SessionScopedChanges;
310
+ for (const [filepath, change] of Object.entries(changes)) {
311
+ expectedState.set(filepath, change.newHash);
312
+ }
313
+ } catch { /* skip malformed */ }
203
314
  }
204
315
 
205
- for (const filepath of Object.keys(delta.modified)) {
206
- const oldHash = previousTree[filepath];
207
- const newHash = currentTree[filepath];
208
- previousSnapshot[filepath] = await blobStore.readBlob(oldHash);
209
- currentSnapshot[filepath] = readContents.get(filepath) ?? await blobStore.readBlob(newHash);
316
+ for (let i = targetIndex + 1; i < sessionSnapshots.length; i++) {
317
+ const snap = sessionSnapshots[i];
318
+ if (!snap.session_changes) continue;
319
+ try {
320
+ const changes = JSON.parse(snap.session_changes) as SessionScopedChanges;
321
+ for (const [filepath, change] of Object.entries(changes)) {
322
+ if (!expectedState.has(filepath)) {
323
+ expectedState.set(filepath, change.oldHash);
324
+ }
325
+ }
326
+ } catch { /* skip malformed */ }
210
327
  }
211
328
 
212
- for (const filepath of delta.deleted) {
213
- const oldHash = previousTree[filepath];
214
- if (oldHash) {
215
- previousSnapshot[filepath] = await blobStore.readBlob(oldHash);
216
- }
329
+ if (expectedState.size === 0) {
330
+ return { hasConflicts: false, conflicts: [], checkpointsToUndo: [] };
217
331
  }
218
332
 
219
- return calculateFileChangeStats(previousSnapshot, currentSnapshot);
220
- }
333
+ // Filter out files already in expected state on disk (no actual change needed)
334
+ if (projectPath) {
335
+ for (const [filepath, expectedHash] of expectedState) {
336
+ const fullPath = path.join(projectPath, filepath);
337
+ let currentHash = '';
338
+ try {
339
+ const content = await fs.readFile(fullPath);
340
+ currentHash = blobStore.hashContent(content);
341
+ } catch {
342
+ // File doesn't exist on disk
343
+ currentHash = '';
344
+ }
345
+ if (currentHash === expectedHash) {
346
+ expectedState.delete(filepath);
347
+ }
348
+ }
221
349
 
222
- /**
223
- * Get the tree map for a snapshot.
224
- * New format: read from tree file on disk.
225
- * Old format: reconstruct from delta chain in DB.
226
- */
227
- private async getSnapshotTree(snapshot: MessageSnapshot): Promise<TreeMap> {
228
- if (snapshot.tree_hash) {
229
- try {
230
- return await blobStore.readTree(snapshot.id);
231
- } catch (err) {
232
- debug.warn('snapshot', `Could not read tree file for ${snapshot.id}, falling back to chain replay:`, err);
350
+ if (expectedState.size === 0) {
351
+ return { hasConflicts: false, conflicts: [], checkpointsToUndo: [] };
233
352
  }
234
353
  }
235
354
 
236
- // Old format: reconstruct complete state from delta chain (returns string content)
237
- const fileSnapshot = await this.reconstructSnapshotLegacy(snapshot);
238
-
239
- // Convert legacy FileSnapshot to TreeMap by hashing and storing each file as blob
240
- const tree: TreeMap = {};
241
- for (const [filepath, content] of Object.entries(fileSnapshot)) {
242
- const hash = await blobStore.storeBlob(content);
243
- tree[filepath] = hash;
355
+ // Determine reference time for cross-session conflict check
356
+ // Use min(targetTime, currentHeadTime) to cover both undo and redo
357
+ const targetSnapshot = sessionSnapshots[targetIndex];
358
+ const targetTime = targetSnapshot.created_at;
359
+ let referenceTime = targetTime;
360
+
361
+ const currentHead = sessionQueries.getHead(sessionId);
362
+ if (currentHead) {
363
+ // Try direct match (HEAD is a checkpoint message with a snapshot)
364
+ const directMatch = sessionSnapshots.find(s => s.message_id === currentHead);
365
+ if (directMatch) {
366
+ if (directMatch.created_at < targetTime) {
367
+ referenceTime = directMatch.created_at;
368
+ }
369
+ } else {
370
+ // HEAD is a session end (assistant msg), find its checkpoint snapshot
371
+ const headMsg = messageQueries.getById(currentHead);
372
+ if (headMsg) {
373
+ for (let i = sessionSnapshots.length - 1; i >= 0; i--) {
374
+ if (sessionSnapshots[i].created_at <= headMsg.timestamp) {
375
+ if (sessionSnapshots[i].created_at < targetTime) {
376
+ referenceTime = sessionSnapshots[i].created_at;
377
+ }
378
+ break;
379
+ }
380
+ }
381
+ }
382
+ }
244
383
  }
245
- return tree;
246
- }
247
384
 
248
- /**
249
- * Restore project to a previous snapshot.
250
- * Only modifies files that are different from current state.
251
- * Uses .gitignore-aware scanning for current state comparison.
252
- */
253
- async restoreSnapshot(
254
- projectPath: string,
255
- snapshot: MessageSnapshot
256
- ): Promise<void> {
257
- try {
258
- const targetState = await this.reconstructSnapshot(snapshot);
385
+ // Check for cross-session conflicts
386
+ const conflicts: RestoreConflict[] = [];
387
+ const projectId = targetSnapshot.project_id;
388
+ const allProjectSnapshots = this.getAllProjectSnapshots(projectId);
259
389
 
260
- // Scan current files respecting .gitignore
261
- const currentFiles = await getSnapshotFiles(projectPath);
262
- const currentState = await this.createFileSnapshot(projectPath, currentFiles);
390
+ for (const otherSnap of allProjectSnapshots) {
391
+ if (otherSnap.session_id === sessionId) continue;
392
+ if (otherSnap.created_at <= referenceTime) continue;
393
+ if (!otherSnap.session_changes) continue;
263
394
 
264
- let restoredCount = 0;
265
- let deletedCount = 0;
395
+ try {
396
+ const otherChanges = JSON.parse(otherSnap.session_changes) as SessionScopedChanges;
397
+ for (const filepath of Object.keys(otherChanges)) {
398
+ if (expectedState.has(filepath)) {
399
+ conflicts.push({
400
+ filepath,
401
+ modifiedBySessionId: otherSnap.session_id,
402
+ modifiedBySnapshotId: otherSnap.id,
403
+ modifiedAt: otherSnap.created_at
404
+ });
405
+ }
406
+ }
407
+ } catch { /* skip malformed */ }
408
+ }
266
409
 
267
- debug.log('snapshot', 'SNAPSHOT RESTORE START');
268
- debug.log('snapshot', `Snapshot ID: ${snapshot.id}`);
269
- debug.log('snapshot', `Message ID: ${snapshot.message_id}`);
270
- debug.log('snapshot', `Project path: ${projectPath}`);
271
- debug.log('snapshot', `Target state files: ${Object.keys(targetState).length}`);
272
- debug.log('snapshot', `Current state files: ${currentFiles.length}`);
410
+ // Deduplicate by filepath (keep the most recent)
411
+ const conflictMap = new Map<string, RestoreConflict>();
412
+ for (const conflict of conflicts) {
413
+ const existing = conflictMap.get(conflict.filepath);
414
+ if (!existing || conflict.modifiedAt > existing.modifiedAt) {
415
+ conflictMap.set(conflict.filepath, conflict);
416
+ }
417
+ }
273
418
 
274
- // Delete files that exist now but not in target state
275
- for (const currentFile of currentFiles) {
276
- const relativePath = path.relative(projectPath, currentFile);
277
- const normalizedPath = relativePath.replace(/\\/g, '/');
419
+ const uniqueConflicts = Array.from(conflictMap.values());
278
420
 
279
- if (!targetState[normalizedPath]) {
421
+ // Populate file contents for diff display
422
+ if (uniqueConflicts.length > 0 && projectPath) {
423
+ await Promise.all(uniqueConflicts.map(async (conflict) => {
424
+ const restoreHash = expectedState.get(conflict.filepath);
425
+ if (restoreHash) {
280
426
  try {
281
- await fs.unlink(currentFile);
282
- debug.log('snapshot', `Deleted: ${currentFile}`);
283
- deletedCount++;
284
- } catch (err) {
285
- debug.warn('snapshot', `Could not delete ${currentFile}:`, err);
427
+ const restoreBuf = await blobStore.readBlob(restoreHash);
428
+ conflict.restoreContent = restoreBuf.toString('utf-8');
429
+ } catch {
430
+ conflict.restoreContent = '(binary or unavailable)';
286
431
  }
432
+ } else {
433
+ conflict.restoreContent = '(file would be deleted)';
287
434
  }
288
- }
289
435
 
290
- // Write only files that are different or don't exist
291
- for (const [relativePath, targetContent] of Object.entries(targetState)) {
292
- const fullPath = path.join(projectPath, relativePath);
293
- const currentContent = currentState[relativePath];
294
-
295
- // Compare as Buffer (binary-safe comparison)
296
- const isDifferent = !currentContent || !currentContent.equals(targetContent);
297
-
298
- if (isDifferent) {
299
- const dir = path.dirname(fullPath);
300
- await fs.mkdir(dir, { recursive: true });
301
- // Write as Buffer directly — no encoding, preserves binary files
302
- await fs.writeFile(fullPath, targetContent);
303
-
304
- const action = currentContent === undefined ? 'Created' : 'Modified';
305
- debug.log('snapshot', `${action}: ${fullPath}`);
306
- restoredCount++;
436
+ try {
437
+ const fullPath = path.join(projectPath, conflict.filepath);
438
+ const currentBuf = await fs.readFile(fullPath);
439
+ conflict.currentContent = currentBuf.toString('utf-8');
440
+ } catch {
441
+ conflict.currentContent = '(file not found on disk)';
307
442
  }
308
- }
309
-
310
- debug.log('snapshot', `Project restored successfully: ${restoredCount} files restored, ${deletedCount} files deleted`);
311
- debug.log('snapshot', 'SNAPSHOT RESTORE COMPLETE');
312
- } catch (error) {
313
- debug.error('snapshot', 'Error restoring snapshot:', error);
314
- throw new Error(`Failed to restore snapshot: ${error}`);
443
+ }));
315
444
  }
316
- }
317
445
 
318
- /**
319
- * Reconstruct the complete file state from a snapshot.
320
- * New format (tree_hash): Read tree -> resolve blobs (O(1), no chain replay).
321
- * Old format: Replay delta chain from root (legacy).
322
- */
323
- private async reconstructSnapshot(snapshot: MessageSnapshot): Promise<FileSnapshot> {
324
- if (snapshot.tree_hash) {
325
- try {
326
- const tree = await blobStore.readTree(snapshot.id);
327
- return await blobStore.resolveTree(tree);
328
- } catch (err) {
329
- debug.warn('snapshot', `Could not resolve tree for ${snapshot.id}, falling back to legacy:`, err);
330
- }
331
- }
446
+ // Collect affected snapshot IDs
447
+ const affectedSnapshotIds = sessionSnapshots
448
+ .filter(s => s.session_changes)
449
+ .map(s => s.id);
332
450
 
333
- return this.reconstructSnapshotLegacy(snapshot);
451
+ return {
452
+ hasConflicts: uniqueConflicts.length > 0,
453
+ conflicts: uniqueConflicts,
454
+ checkpointsToUndo: affectedSnapshotIds
455
+ };
334
456
  }
335
457
 
336
- /**
337
- * Legacy reconstruction: replay all deltas from root to target snapshot.
338
- */
339
- private async reconstructSnapshotLegacy(snapshot: MessageSnapshot): Promise<FileSnapshot> {
340
- const chain = await this.getSnapshotChain(snapshot);
341
- let state: FileSnapshot = {};
342
-
343
- for (const deltaSnapshot of chain) {
344
- if (!deltaSnapshot.delta_changes) {
345
- debug.warn('snapshot', `Delta snapshot ${deltaSnapshot.id} missing delta_changes`);
346
- continue;
347
- }
348
-
349
- const delta = JSON.parse(deltaSnapshot.delta_changes) as DeltaChanges;
350
- state = this.applyDelta(state, delta);
351
- }
352
-
353
- return state;
458
+ private getAllProjectSnapshots(projectId: string): MessageSnapshot[] {
459
+ const db = getDatabase();
460
+ return db.prepare(`
461
+ SELECT * FROM message_snapshots
462
+ WHERE project_id = ? AND (is_deleted IS NULL OR is_deleted = 0)
463
+ ORDER BY created_at ASC
464
+ `).all(projectId) as MessageSnapshot[];
354
465
  }
355
466
 
467
+ // ========================================================================
468
+ // Session-Scoped Restore (Bidirectional)
469
+ // ========================================================================
470
+
356
471
  /**
357
- * Get the chain of snapshots from the first snapshot to the target.
472
+ * Restore to a checkpoint using session-scoped changes.
473
+ * Works in both directions (forward and backward).
474
+ *
475
+ * Algorithm:
476
+ * 1. Walk snapshots [0..targetIndex] → build expected file state at target
477
+ * 2. Walk snapshots [targetIndex+1..end] → files changed only after target need reverting
478
+ * 3. For each file in the expected state map, compare with current disk and restore if different
479
+ * 4. Update in-memory baseline to match restored state
358
480
  */
359
- private async getSnapshotChain(targetSnapshot: MessageSnapshot): Promise<MessageSnapshot[]> {
360
- const chain: MessageSnapshot[] = [];
361
- let current: MessageSnapshot | null = targetSnapshot;
481
+ async restoreSessionScoped(
482
+ projectPath: string,
483
+ sessionId: string,
484
+ targetCheckpointMessageId: string,
485
+ conflictResolutions?: ConflictResolution
486
+ ): Promise<{ restoredFiles: number; skippedFiles: number }> {
487
+ try {
488
+ const sessionSnapshots = snapshotQueries.getBySessionId(sessionId);
362
489
 
363
- while (current) {
364
- chain.unshift(current);
490
+ const targetIndex = sessionSnapshots.findIndex(
491
+ s => s.message_id === targetCheckpointMessageId
492
+ );
365
493
 
366
- if (!current.parent_snapshot_id) {
367
- break;
494
+ if (targetIndex === -1) {
495
+ debug.warn('snapshot', 'Target checkpoint snapshot not found');
496
+ return { restoredFiles: 0, skippedFiles: 0 };
368
497
  }
369
498
 
370
- const parent = snapshotQueries.getById(current.parent_snapshot_id);
371
- if (!parent) {
372
- throw new Error(`Parent snapshot ${current.parent_snapshot_id} not found`);
499
+ // Build expected file state at the target checkpoint
500
+ // filepath → hash that the file should be at the target
501
+ const expectedState = new Map<string, string>();
502
+
503
+ // Walk snapshots from first to target (inclusive): apply forward changes
504
+ for (let i = 0; i <= targetIndex; i++) {
505
+ const snap = sessionSnapshots[i];
506
+ if (!snap.session_changes) continue;
507
+ try {
508
+ const changes = JSON.parse(snap.session_changes) as SessionScopedChanges;
509
+ for (const [filepath, change] of Object.entries(changes)) {
510
+ expectedState.set(filepath, change.newHash);
511
+ }
512
+ } catch { /* skip */ }
373
513
  }
374
514
 
375
- current = parent;
376
- }
515
+ // Walk snapshots after target: files changed only after target need reverting to oldHash
516
+ for (let i = targetIndex + 1; i < sessionSnapshots.length; i++) {
517
+ const snap = sessionSnapshots[i];
518
+ if (!snap.session_changes) continue;
519
+ try {
520
+ const changes = JSON.parse(snap.session_changes) as SessionScopedChanges;
521
+ for (const [filepath, change] of Object.entries(changes)) {
522
+ if (!expectedState.has(filepath)) {
523
+ // File was first changed AFTER target → revert to pre-change state
524
+ expectedState.set(filepath, change.oldHash);
525
+ }
526
+ }
527
+ } catch { /* skip */ }
528
+ }
377
529
 
378
- return chain;
379
- }
530
+ debug.log('snapshot', `Restore to checkpoint: ${expectedState.size} files in expected state`);
380
531
 
381
- /**
382
- * Apply a delta to a file state (legacy format - full content in delta as strings).
383
- * Converts string content to Buffer for the new binary-safe interface.
384
- */
385
- private applyDelta(state: FileSnapshot, delta: DeltaChanges): FileSnapshot {
386
- const newState = { ...state };
532
+ let restoredFiles = 0;
533
+ let skippedFiles = 0;
387
534
 
388
- for (const [filepath, content] of Object.entries(delta.added)) {
389
- newState[filepath] = Buffer.from(content, 'utf-8');
390
- }
391
-
392
- for (const [filepath, content] of Object.entries(delta.modified)) {
393
- newState[filepath] = Buffer.from(content, 'utf-8');
394
- }
535
+ // Update in-memory baseline as we restore
536
+ const baseline = this.sessionBaselines.get(sessionId) || {};
395
537
 
396
- for (const filepath of delta.deleted) {
397
- delete newState[filepath];
398
- }
538
+ for (const [filepath, expectedHash] of expectedState) {
539
+ // Check conflict resolution
540
+ if (conflictResolutions && conflictResolutions[filepath] === 'keep') {
541
+ debug.log('snapshot', `Skipping ${filepath} (user chose to keep)`);
542
+ skippedFiles++;
543
+ continue;
544
+ }
399
545
 
400
- return newState;
401
- }
546
+ const fullPath = path.join(projectPath, filepath);
402
547
 
403
- /**
404
- * Get diff between current state and a snapshot
405
- */
406
- async getDiff(
407
- projectPath: string,
408
- snapshot: MessageSnapshot
409
- ): Promise<{
410
- added: string[];
411
- modified: string[];
412
- deleted: string[];
413
- }> {
414
- try {
415
- const snapshotFiles = await this.reconstructSnapshot(snapshot);
416
- const currentSnapshot = await this.createFileSnapshot(
417
- projectPath,
418
- await getSnapshotFiles(projectPath)
419
- );
548
+ // Check current disk state
549
+ let currentHash = '';
550
+ try {
551
+ const content = await fs.readFile(fullPath);
552
+ currentHash = blobStore.hashContent(content);
553
+ } catch {
554
+ // File doesn't exist on disk
555
+ currentHash = '';
556
+ }
420
557
 
421
- const added: string[] = [];
422
- const modified: string[] = [];
423
- const deleted: string[] = [];
558
+ // Skip if already in expected state
559
+ if (currentHash === expectedHash) continue;
424
560
 
425
- for (const [filepath, content] of Object.entries(currentSnapshot)) {
426
- if (!snapshotFiles[filepath]) {
427
- added.push(filepath);
428
- } else if (!snapshotFiles[filepath].equals(content)) {
429
- modified.push(filepath);
561
+ if (!expectedHash || expectedHash === '') {
562
+ // File should not exist at the target → delete it
563
+ try {
564
+ await fs.unlink(fullPath);
565
+ delete baseline[filepath];
566
+ debug.log('snapshot', `Deleted: ${filepath}`);
567
+ restoredFiles++;
568
+ } catch {
569
+ debug.warn('snapshot', `Could not delete ${filepath}`);
570
+ }
571
+ } else {
572
+ // Restore file content from blob
573
+ try {
574
+ const content = await blobStore.readBlob(expectedHash);
575
+ const dir = path.dirname(fullPath);
576
+ await fs.mkdir(dir, { recursive: true });
577
+ await fs.writeFile(fullPath, content);
578
+ baseline[filepath] = expectedHash;
579
+ debug.log('snapshot', `Restored: ${filepath}`);
580
+ restoredFiles++;
581
+ } catch (err) {
582
+ debug.warn('snapshot', `Could not restore ${filepath}:`, err);
583
+ skippedFiles++;
584
+ }
430
585
  }
431
586
  }
432
587
 
433
- for (const filepath of Object.keys(snapshotFiles)) {
434
- if (!currentSnapshot[filepath]) {
435
- deleted.push(filepath);
436
- }
437
- }
588
+ // Update in-memory baseline to reflect restored state
589
+ this.sessionBaselines.set(sessionId, baseline);
438
590
 
439
- return { added, modified, deleted };
591
+ debug.log('snapshot', `Restore complete: ${restoredFiles} restored, ${skippedFiles} skipped`);
592
+ return { restoredFiles, skippedFiles };
440
593
  } catch (error) {
441
- debug.error('snapshot', 'Error getting diff:', error);
442
- throw new Error(`Failed to get diff: ${error}`);
594
+ debug.error('snapshot', 'Error in session-scoped restore:', error);
595
+ throw new Error(`Failed to restore: ${error}`);
443
596
  }
444
597
  }
445
598
 
599
+ // ========================================================================
600
+ // Helpers
601
+ // ========================================================================
602
+
446
603
  /**
447
- * Create snapshot of file contents (used for restore comparison and getDiff).
448
- * Reads as Buffer for binary-safe handling.
604
+ * Calculate line-level change stats for changed files.
449
605
  */
450
- private async createFileSnapshot(
451
- projectPath: string,
452
- files: string[]
453
- ): Promise<FileSnapshot> {
454
- const snapshot: FileSnapshot = {};
606
+ private async calculateChangeStats(
607
+ previousTree: TreeMap,
608
+ currentTree: TreeMap,
609
+ sessionChanges: SessionScopedChanges,
610
+ readContents: Map<string, Buffer>
611
+ ): Promise<{ filesChanged: number; insertions: number; deletions: number }> {
612
+ const previousSnapshot: Record<string, Buffer> = {};
613
+ const currentSnapshot: Record<string, Buffer> = {};
455
614
 
456
- for (const filepath of files) {
615
+ for (const [filepath, change] of Object.entries(sessionChanges)) {
457
616
  try {
458
- const stats = await fs.stat(filepath);
459
- if (stats.size > MAX_FILE_SIZE) continue;
460
-
461
- // Read as Buffer — no encoding, preserves binary files
462
- const content = await fs.readFile(filepath);
463
- const relativePath = path.relative(projectPath, filepath);
464
- const normalizedPath = relativePath.replace(/\\/g, '/');
465
- snapshot[normalizedPath] = content;
466
- } catch (error) {
467
- debug.warn('snapshot', `Could not read file ${filepath}:`, error);
468
- }
617
+ if (change.oldHash) {
618
+ previousSnapshot[filepath] = await blobStore.readBlob(change.oldHash);
619
+ }
620
+ if (change.newHash) {
621
+ currentSnapshot[filepath] = readContents.get(filepath) ?? await blobStore.readBlob(change.newHash);
622
+ }
623
+ } catch { /* skip */ }
469
624
  }
470
625
 
471
- return snapshot;
626
+ return calculateFileChangeStats(previousSnapshot, currentSnapshot);
472
627
  }
473
628
 
474
629
  /**
475
- * Clean up old snapshots (older than 30 days)
630
+ * Clean up session baseline cache when session is no longer active.
476
631
  */
477
- async cleanupOldSnapshots(): Promise<void> {
478
- // This could be implemented later if needed
632
+ clearSessionBaseline(sessionId: string): void {
633
+ this.sessionBaselines.delete(sessionId);
479
634
  }
480
635
  }
481
636