prjct-cli 1.14.0 → 1.16.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -33,6 +33,7 @@ import configManager from '../infrastructure/config-manager'
33
33
  import pathManager from '../infrastructure/path-manager'
34
34
  import { analysisStorage } from '../storage/analysis-storage'
35
35
  import { metricsStorage } from '../storage/metrics-storage'
36
+ import { migrateJsonToSqlite } from '../storage/migrate-json'
36
37
  import type {
37
38
  GitData,
38
39
  ProjectCommands,
@@ -124,6 +125,10 @@ class SyncService {
124
125
  // 2. Ensure directories exist (non-blocking)
125
126
  const ensureDirsPromise = this.ensureDirectories()
126
127
 
128
+ // 2b. Auto-migrate JSON → SQLite (idempotent, skips if already done)
129
+ await ensureDirsPromise
130
+ await migrateJsonToSqlite(this.projectId)
131
+
127
132
  // 3. Gather all data IN PARALLEL (30-50% speedup)
128
133
  // These operations are independent and can run concurrently
129
134
  const [git, stats, commands, stack] = await Promise.all([
@@ -133,9 +138,6 @@ class SyncService {
133
138
  this.detectStack(),
134
139
  ])
135
140
 
136
- // Wait for directories before writing files
137
- await ensureDirsPromise
138
-
139
141
  // 4. Generate all files (depends on gathered data)
140
142
  const agents = await this.generateAgents(stack, stats)
141
143
  const skills = this.configureSkills(agents)
@@ -0,0 +1,551 @@
1
+ /**
2
+ * SQLite Database Manager (PRJ-303)
3
+ *
4
+ * Single SQLite database per project replaces 7+ JSON files.
5
+ * Uses Bun's built-in SQLite (`bun:sqlite`) — zero external deps.
6
+ *
7
+ * Benefits over JSON files:
8
+ * - Atomic writes (WAL mode, no race conditions)
9
+ * - Indexed queries (<1ms lookups vs 10-50ms JSON parse)
10
+ * - Concurrent reads + single writer (WAL)
11
+ * - No file locking needed
12
+ *
13
+ * Storage architecture:
14
+ * - `kv_store` table: Document-style storage (drop-in replacement for JSON files)
15
+ * - Normalized tables: For indexed queries on frequently accessed entities
16
+ * - `events` table: Append-only event log (replaces events.jsonl)
17
+ *
18
+ * @version 1.0.0
19
+ */
20
+
21
+ import { Database, type SQLQueryBindings } from 'bun:sqlite'
22
+ import fs from 'node:fs'
23
+ import path from 'node:path'
24
+ import pathManager from '../infrastructure/path-manager'
25
+
26
+ // =============================================================================
27
+ // Types
28
+ // =============================================================================
29
+
30
+ export interface Migration {
31
+ version: number
32
+ name: string
33
+ up: (db: Database) => void
34
+ }
35
+
36
+ export interface MigrationRecord {
37
+ version: number
38
+ name: string
39
+ applied_at: string
40
+ }
41
+
42
+ // =============================================================================
43
+ // Schema Migrations
44
+ // =============================================================================
45
+
46
+ const migrations: Migration[] = [
47
+ {
48
+ version: 1,
49
+ name: 'initial-schema',
50
+ up: (db: Database) => {
51
+ db.run(`
52
+ -- =======================================================================
53
+ -- Document storage (backward-compatible with JSON file pattern)
54
+ -- =======================================================================
55
+ -- Each row replaces one JSON file (state.json, queue.json, etc.)
56
+ -- StorageManager reads/writes entire documents via key lookup.
57
+ CREATE TABLE kv_store (
58
+ key TEXT PRIMARY KEY,
59
+ data TEXT NOT NULL,
60
+ updated_at TEXT NOT NULL
61
+ );
62
+
63
+ -- =======================================================================
64
+ -- Normalized: Tasks
65
+ -- =======================================================================
66
+ CREATE TABLE tasks (
67
+ id TEXT PRIMARY KEY,
68
+ description TEXT NOT NULL,
69
+ type TEXT,
70
+ status TEXT NOT NULL,
71
+ parent_description TEXT,
72
+ branch TEXT,
73
+ linear_id TEXT,
74
+ linear_uuid TEXT,
75
+ session_id TEXT,
76
+ feature_id TEXT,
77
+ started_at TEXT NOT NULL,
78
+ completed_at TEXT,
79
+ shipped_at TEXT,
80
+ paused_at TEXT,
81
+ pause_reason TEXT,
82
+ pr_url TEXT,
83
+ expected_value TEXT,
84
+ data TEXT
85
+ );
86
+
87
+ CREATE INDEX idx_tasks_status ON tasks(status);
88
+ CREATE INDEX idx_tasks_type ON tasks(type);
89
+ CREATE INDEX idx_tasks_branch ON tasks(branch);
90
+ CREATE INDEX idx_tasks_linear_id ON tasks(linear_id);
91
+
92
+ -- =======================================================================
93
+ -- Normalized: Subtasks
94
+ -- =======================================================================
95
+ CREATE TABLE subtasks (
96
+ id TEXT PRIMARY KEY,
97
+ task_id TEXT NOT NULL,
98
+ description TEXT NOT NULL,
99
+ status TEXT NOT NULL,
100
+ domain TEXT,
101
+ agent TEXT,
102
+ sort_order INTEGER NOT NULL,
103
+ depends_on TEXT,
104
+ started_at TEXT,
105
+ completed_at TEXT,
106
+ output TEXT,
107
+ summary TEXT,
108
+ FOREIGN KEY (task_id) REFERENCES tasks(id)
109
+ );
110
+
111
+ CREATE INDEX idx_subtasks_task_id ON subtasks(task_id);
112
+ CREATE INDEX idx_subtasks_status ON subtasks(status);
113
+
114
+ -- =======================================================================
115
+ -- Normalized: Queue Tasks
116
+ -- =======================================================================
117
+ CREATE TABLE queue_tasks (
118
+ id TEXT PRIMARY KEY,
119
+ description TEXT NOT NULL,
120
+ type TEXT,
121
+ priority TEXT,
122
+ section TEXT,
123
+ created_at TEXT NOT NULL,
124
+ completed INTEGER DEFAULT 0,
125
+ completed_at TEXT,
126
+ feature_id TEXT,
127
+ feature_name TEXT
128
+ );
129
+
130
+ CREATE INDEX idx_queue_tasks_section ON queue_tasks(section);
131
+ CREATE INDEX idx_queue_tasks_priority ON queue_tasks(priority);
132
+ CREATE INDEX idx_queue_tasks_completed ON queue_tasks(completed);
133
+
134
+ -- =======================================================================
135
+ -- Normalized: Ideas
136
+ -- =======================================================================
137
+ CREATE TABLE ideas (
138
+ id TEXT PRIMARY KEY,
139
+ text TEXT NOT NULL,
140
+ status TEXT NOT NULL DEFAULT 'pending',
141
+ priority TEXT NOT NULL DEFAULT 'medium',
142
+ tags TEXT,
143
+ added_at TEXT NOT NULL,
144
+ converted_to TEXT,
145
+ details TEXT,
146
+ data TEXT
147
+ );
148
+
149
+ CREATE INDEX idx_ideas_status ON ideas(status);
150
+ CREATE INDEX idx_ideas_priority ON ideas(priority);
151
+
152
+ -- =======================================================================
153
+ -- Normalized: Shipped Features
154
+ -- =======================================================================
155
+ CREATE TABLE shipped_features (
156
+ id TEXT PRIMARY KEY,
157
+ name TEXT NOT NULL,
158
+ shipped_at TEXT NOT NULL,
159
+ version TEXT NOT NULL,
160
+ description TEXT,
161
+ type TEXT,
162
+ duration TEXT,
163
+ data TEXT
164
+ );
165
+
166
+ CREATE INDEX idx_shipped_version ON shipped_features(version);
167
+ CREATE INDEX idx_shipped_at ON shipped_features(shipped_at);
168
+
169
+ -- =======================================================================
170
+ -- Events (replaces events.jsonl)
171
+ -- =======================================================================
172
+ CREATE TABLE events (
173
+ id INTEGER PRIMARY KEY AUTOINCREMENT,
174
+ type TEXT NOT NULL,
175
+ task_id TEXT,
176
+ data TEXT,
177
+ timestamp TEXT NOT NULL
178
+ );
179
+
180
+ CREATE INDEX idx_events_type ON events(type);
181
+ CREATE INDEX idx_events_task_id ON events(task_id);
182
+ CREATE INDEX idx_events_timestamp ON events(timestamp);
183
+
184
+ -- =======================================================================
185
+ -- Analysis (draft + sealed)
186
+ -- =======================================================================
187
+ CREATE TABLE analysis (
188
+ id TEXT PRIMARY KEY,
189
+ status TEXT NOT NULL,
190
+ commit_hash TEXT,
191
+ signature TEXT,
192
+ sealed_at TEXT,
193
+ analyzed_at TEXT,
194
+ data TEXT NOT NULL
195
+ );
196
+
197
+ -- =======================================================================
198
+ -- Index: File scores and checksums
199
+ -- =======================================================================
200
+ CREATE TABLE index_files (
201
+ path TEXT PRIMARY KEY,
202
+ score REAL,
203
+ size INTEGER,
204
+ mtime TEXT,
205
+ language TEXT,
206
+ categories TEXT,
207
+ domain TEXT
208
+ );
209
+
210
+ CREATE INDEX idx_index_files_domain ON index_files(domain);
211
+ CREATE INDEX idx_index_files_score ON index_files(score);
212
+
213
+ CREATE TABLE index_checksums (
214
+ path TEXT PRIMARY KEY,
215
+ checksum TEXT NOT NULL,
216
+ size INTEGER,
217
+ mtime TEXT
218
+ );
219
+
220
+ -- =======================================================================
221
+ -- Index: Metadata (project-index, domains, categories-cache)
222
+ -- =======================================================================
223
+ CREATE TABLE index_meta (
224
+ key TEXT PRIMARY KEY,
225
+ data TEXT NOT NULL,
226
+ updated_at TEXT NOT NULL
227
+ );
228
+
229
+ -- =======================================================================
230
+ -- Memory (key-value with domain tagging)
231
+ -- =======================================================================
232
+ CREATE TABLE memory (
233
+ key TEXT PRIMARY KEY,
234
+ domain TEXT,
235
+ value TEXT,
236
+ confidence REAL DEFAULT 1.0,
237
+ updated_at TEXT NOT NULL
238
+ );
239
+
240
+ CREATE INDEX idx_memory_domain ON memory(domain);
241
+
242
+ -- =======================================================================
243
+ -- Metrics: Daily stats for trend analysis
244
+ -- =======================================================================
245
+ CREATE TABLE metrics_daily (
246
+ date TEXT PRIMARY KEY,
247
+ tokens_saved INTEGER NOT NULL DEFAULT 0,
248
+ syncs INTEGER NOT NULL DEFAULT 0,
249
+ avg_compression_rate REAL NOT NULL DEFAULT 0,
250
+ total_duration INTEGER NOT NULL DEFAULT 0
251
+ );
252
+
253
+ -- =======================================================================
254
+ -- Velocity: Sprint data
255
+ -- =======================================================================
256
+ CREATE TABLE velocity_sprints (
257
+ sprint_number INTEGER PRIMARY KEY,
258
+ points_completed REAL NOT NULL DEFAULT 0,
259
+ tasks_completed INTEGER NOT NULL DEFAULT 0,
260
+ estimation_accuracy REAL NOT NULL DEFAULT 0,
261
+ avg_variance REAL NOT NULL DEFAULT 0,
262
+ started_at TEXT,
263
+ ended_at TEXT
264
+ );
265
+ `)
266
+ },
267
+ },
268
+ ]
269
+
270
+ // =============================================================================
271
+ // Database Manager
272
+ // =============================================================================
273
+
274
+ class PrjctDatabase {
275
+ private connections = new Map<string, Database>()
276
+
277
+ /**
278
+ * Get the database file path for a project.
279
+ */
280
+ getDbPath(projectId: string): string {
281
+ return path.join(pathManager.getGlobalProjectPath(projectId), 'prjct.db')
282
+ }
283
+
284
+ /**
285
+ * Get or create a database connection for a project.
286
+ * Lazily opens the database, runs migrations, and enables WAL mode.
287
+ */
288
+ getDb(projectId: string): Database {
289
+ const existing = this.connections.get(projectId)
290
+ if (existing) return existing
291
+
292
+ const dbPath = this.getDbPath(projectId)
293
+ // Ensure parent directory exists before creating DB
294
+ const dbDir = path.dirname(dbPath)
295
+ if (!fs.existsSync(dbDir)) {
296
+ fs.mkdirSync(dbDir, { recursive: true })
297
+ }
298
+ const db = new Database(dbPath, { create: true })
299
+
300
+ // Enable WAL mode for concurrent reads + single writer
301
+ db.run('PRAGMA journal_mode = WAL')
302
+
303
+ // Performance tuning
304
+ db.run('PRAGMA synchronous = NORMAL')
305
+ db.run('PRAGMA cache_size = -2000') // 2MB cache
306
+ db.run('PRAGMA temp_store = MEMORY')
307
+ db.run('PRAGMA mmap_size = 268435456') // 256MB mmap
308
+
309
+ // Run pending migrations
310
+ this.runMigrations(db)
311
+
312
+ this.connections.set(projectId, db)
313
+ return db
314
+ }
315
+
316
+ /**
317
+ * Close a specific project's database connection, or all connections.
318
+ */
319
+ close(projectId?: string): void {
320
+ if (projectId) {
321
+ const db = this.connections.get(projectId)
322
+ if (db) {
323
+ db.close()
324
+ this.connections.delete(projectId)
325
+ }
326
+ } else {
327
+ this.connections.forEach((db) => {
328
+ db.close()
329
+ })
330
+ this.connections.clear()
331
+ }
332
+ }
333
+
334
+ /**
335
+ * Check if a database exists for a project (without creating one).
336
+ */
337
+ exists(projectId: string): boolean {
338
+ return fs.existsSync(this.getDbPath(projectId))
339
+ }
340
+
341
+ // ===========================================================================
342
+ // Document Storage (kv_store)
343
+ // ===========================================================================
344
+
345
+ /**
346
+ * Read a document from kv_store by key.
347
+ * Returns parsed JSON or null if not found.
348
+ */
349
+ getDoc<T>(projectId: string, key: string): T | null {
350
+ const db = this.getDb(projectId)
351
+ const row = db.prepare('SELECT data FROM kv_store WHERE key = ?').get(key) as {
352
+ data: string
353
+ } | null
354
+ if (!row) return null
355
+ return JSON.parse(row.data) as T
356
+ }
357
+
358
+ /**
359
+ * Write a document to kv_store.
360
+ * Replaces existing document with same key.
361
+ */
362
+ setDoc<T>(projectId: string, key: string, data: T): void {
363
+ const db = this.getDb(projectId)
364
+ const json = JSON.stringify(data)
365
+ const now = new Date().toISOString()
366
+ db.prepare('INSERT OR REPLACE INTO kv_store (key, data, updated_at) VALUES (?, ?, ?)').run(
367
+ key,
368
+ json,
369
+ now
370
+ )
371
+ }
372
+
373
+ /**
374
+ * Delete a document from kv_store.
375
+ */
376
+ deleteDoc(projectId: string, key: string): void {
377
+ const db = this.getDb(projectId)
378
+ db.prepare('DELETE FROM kv_store WHERE key = ?').run(key)
379
+ }
380
+
381
+ /**
382
+ * Check if a document exists in kv_store.
383
+ */
384
+ hasDoc(projectId: string, key: string): boolean {
385
+ const db = this.getDb(projectId)
386
+ const row = db.prepare('SELECT 1 FROM kv_store WHERE key = ?').get(key)
387
+ return row !== null
388
+ }
389
+
390
+ // ===========================================================================
391
+ // Event Log
392
+ // ===========================================================================
393
+
394
+ /**
395
+ * Append an event to the event log.
396
+ */
397
+ appendEvent(
398
+ projectId: string,
399
+ type: string,
400
+ data: Record<string, unknown>,
401
+ taskId?: string
402
+ ): void {
403
+ const db = this.getDb(projectId)
404
+ const now = new Date().toISOString()
405
+ db.prepare('INSERT INTO events (type, task_id, data, timestamp) VALUES (?, ?, ?, ?)').run(
406
+ type,
407
+ taskId ?? null,
408
+ JSON.stringify(data),
409
+ now
410
+ )
411
+ }
412
+
413
+ /**
414
+ * Query events by type, with optional limit.
415
+ */
416
+ getEvents(
417
+ projectId: string,
418
+ type?: string,
419
+ limit = 100
420
+ ): Array<{ id: number; type: string; task_id: string | null; data: string; timestamp: string }> {
421
+ const db = this.getDb(projectId)
422
+ if (type) {
423
+ return db
424
+ .prepare('SELECT * FROM events WHERE type = ? ORDER BY id DESC LIMIT ?')
425
+ .all(type, limit) as Array<{
426
+ id: number
427
+ type: string
428
+ task_id: string | null
429
+ data: string
430
+ timestamp: string
431
+ }>
432
+ }
433
+ return db.prepare('SELECT * FROM events ORDER BY id DESC LIMIT ?').all(limit) as Array<{
434
+ id: number
435
+ type: string
436
+ task_id: string | null
437
+ data: string
438
+ timestamp: string
439
+ }>
440
+ }
441
+
442
+ // ===========================================================================
443
+ // Raw Query Access
444
+ // ===========================================================================
445
+
446
+ /**
447
+ * Execute a raw SQL query that returns rows.
448
+ */
449
+ query<T = Record<string, unknown>>(
450
+ projectId: string,
451
+ sql: string,
452
+ ...params: SQLQueryBindings[]
453
+ ): T[] {
454
+ const db = this.getDb(projectId)
455
+ return db.prepare(sql).all(...params) as T[]
456
+ }
457
+
458
+ /**
459
+ * Execute a raw SQL statement (INSERT/UPDATE/DELETE).
460
+ */
461
+ run(projectId: string, sql: string, ...params: SQLQueryBindings[]): void {
462
+ const db = this.getDb(projectId)
463
+ db.prepare(sql).run(...params)
464
+ }
465
+
466
+ /**
467
+ * Execute a raw SQL query that returns a single row.
468
+ */
469
+ get<T = Record<string, unknown>>(
470
+ projectId: string,
471
+ sql: string,
472
+ ...params: SQLQueryBindings[]
473
+ ): T | null {
474
+ const db = this.getDb(projectId)
475
+ return (db.prepare(sql).get(...params) as T) ?? null
476
+ }
477
+
478
+ /**
479
+ * Run multiple statements in a transaction.
480
+ */
481
+ transaction<T>(projectId: string, fn: (db: Database) => T): T {
482
+ const db = this.getDb(projectId)
483
+ return db.transaction(fn)(db)
484
+ }
485
+
486
+ // ===========================================================================
487
+ // Migration System
488
+ // ===========================================================================
489
+
490
+ /**
491
+ * Run all pending migrations.
492
+ */
493
+ private runMigrations(db: Database): void {
494
+ // Create migrations table if it doesn't exist
495
+ db.run(`
496
+ CREATE TABLE IF NOT EXISTS _migrations (
497
+ version INTEGER PRIMARY KEY,
498
+ name TEXT NOT NULL,
499
+ applied_at TEXT NOT NULL
500
+ )
501
+ `)
502
+
503
+ // Get applied versions
504
+ const applied = new Set(
505
+ (db.prepare('SELECT version FROM _migrations').all() as Array<{ version: number }>).map(
506
+ (r) => r.version
507
+ )
508
+ )
509
+
510
+ // Run pending migrations in order
511
+ for (const migration of migrations) {
512
+ if (applied.has(migration.version)) continue
513
+
514
+ db.transaction(() => {
515
+ migration.up(db)
516
+ db.prepare('INSERT INTO _migrations (version, name, applied_at) VALUES (?, ?, ?)').run(
517
+ migration.version,
518
+ migration.name,
519
+ new Date().toISOString()
520
+ )
521
+ })()
522
+ }
523
+ }
524
+
525
+ /**
526
+ * Get applied migrations for a project.
527
+ */
528
+ getMigrations(projectId: string): MigrationRecord[] {
529
+ const db = this.getDb(projectId)
530
+ return db.prepare('SELECT * FROM _migrations ORDER BY version').all() as MigrationRecord[]
531
+ }
532
+
533
+ /**
534
+ * Get the current schema version.
535
+ */
536
+ getSchemaVersion(projectId: string): number {
537
+ const db = this.getDb(projectId)
538
+ const row = db.prepare('SELECT MAX(version) as version FROM _migrations').get() as {
539
+ version: number | null
540
+ } | null
541
+ return row?.version ?? 0
542
+ }
543
+ }
544
+
545
+ // =============================================================================
546
+ // Singleton Export
547
+ // =============================================================================
548
+
549
+ export const prjctDb = new PrjctDatabase()
550
+ export default prjctDb
551
+ export { PrjctDatabase }