kotadb 2.2.0-next.20260204184036 → 2.2.0-next.20260204230500
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/package.json +1 -1
- package/src/db/migrations/004_memory_layer.sql +16 -144
- package/src/db/migrations/006_add_migration_checksums.sql +15 -0
- package/src/db/sqlite/migration-runner.ts +335 -0
- package/src/db/sqlite/sqlite-client.ts +26 -1
- package/src/db/sqlite-schema.sql +48 -0
- package/src/indexer/constants.ts +1 -0
- package/src/indexer/parsers.ts +3 -1
package/package.json
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "kotadb",
|
|
3
|
-
"version": "2.2.0-next.
|
|
3
|
+
"version": "2.2.0-next.20260204230500",
|
|
4
4
|
"description": "Local-only code intelligence tool for CLI agents. SQLite-backed repository indexing and code search via MCP.",
|
|
5
5
|
"type": "module",
|
|
6
6
|
"module": "src/index.ts",
|
|
@@ -3,181 +3,53 @@
|
|
|
3
3
|
-- Migration: 004_memory_layer
|
|
4
4
|
-- Issue: Memory Layer for Agent Intelligence
|
|
5
5
|
-- Author: Claude Code
|
|
6
|
-
-- Date: 2026-02-03
|
|
6
|
+
-- Date: 2026-02-03 (UPDATED: 2026-02-04)
|
|
7
7
|
--
|
|
8
|
-
-- This migration extends the memory layer
|
|
9
|
-
-- - decisions
|
|
10
|
-
-- -
|
|
11
|
-
-- - pattern_annotations: Enhanced patterns with evidence/confidence scoring
|
|
12
|
-
-- - agent_sessions: Track agent work sessions
|
|
13
|
-
-- - session_insights: Insights linked to sessions with file references
|
|
8
|
+
-- This migration extends the memory layer with:
|
|
9
|
+
-- - decisions.status column (active, superseded, deprecated)
|
|
10
|
+
-- - agent_sessions table for tracking agent work
|
|
14
11
|
--
|
|
15
12
|
-- Note: The base sqlite-schema.sql already has decisions, failures, patterns,
|
|
16
|
-
-- and insights tables. This migration adds
|
|
17
|
-
-- agent_sessions table for complete session tracking.
|
|
13
|
+
-- and insights tables. This migration only adds what's missing.
|
|
18
14
|
|
|
19
15
|
-- ============================================================================
|
|
20
16
|
-- 1. Extend Decisions Table - Add status column
|
|
21
17
|
-- ============================================================================
|
|
22
18
|
-- Add status column to track decision lifecycle
|
|
19
|
+
-- Note: SQLite ALTER TABLE ADD COLUMN will fail if column exists,
|
|
20
|
+
-- which is the expected behavior (migration already applied)
|
|
23
21
|
|
|
24
22
|
ALTER TABLE decisions ADD COLUMN status TEXT DEFAULT 'active';
|
|
25
23
|
|
|
26
|
-
-- Add index for active decisions (most common query)
|
|
27
24
|
CREATE INDEX IF NOT EXISTS idx_decisions_status ON decisions(status) WHERE status = 'active';
|
|
28
25
|
|
|
29
26
|
-- ============================================================================
|
|
30
|
-
-- 2.
|
|
31
|
-
-- ============================================================================
|
|
32
|
-
-- Tracks what didn't work to prevent repeating mistakes
|
|
33
|
-
|
|
34
|
-
CREATE TABLE IF NOT EXISTS failed_approaches (
|
|
35
|
-
id TEXT PRIMARY KEY, -- uuid → TEXT
|
|
36
|
-
repository_id TEXT NOT NULL, -- Foreign key to repositories
|
|
37
|
-
title TEXT NOT NULL, -- Short description
|
|
38
|
-
problem TEXT NOT NULL, -- What problem was being solved
|
|
39
|
-
approach TEXT NOT NULL, -- What was tried
|
|
40
|
-
failure_reason TEXT NOT NULL, -- Why it failed
|
|
41
|
-
related_files TEXT, -- JSON array of related file paths
|
|
42
|
-
created_at TEXT NOT NULL DEFAULT (datetime('now')),
|
|
43
|
-
|
|
44
|
-
FOREIGN KEY (repository_id) REFERENCES repositories(id) ON DELETE CASCADE
|
|
45
|
-
);
|
|
46
|
-
|
|
47
|
-
-- Indexes for common queries
|
|
48
|
-
CREATE INDEX IF NOT EXISTS idx_failed_approaches_repository_id ON failed_approaches(repository_id);
|
|
49
|
-
CREATE INDEX IF NOT EXISTS idx_failed_approaches_created_at ON failed_approaches(created_at DESC);
|
|
50
|
-
|
|
51
|
-
-- ============================================================================
|
|
52
|
-
-- 3. Failed Approaches FTS5 Virtual Table
|
|
53
|
-
-- ============================================================================
|
|
54
|
-
-- External content FTS5 for searching failed approaches
|
|
55
|
-
|
|
56
|
-
CREATE VIRTUAL TABLE IF NOT EXISTS failed_approaches_fts USING fts5(
|
|
57
|
-
title,
|
|
58
|
-
problem,
|
|
59
|
-
approach,
|
|
60
|
-
failure_reason,
|
|
61
|
-
content='failed_approaches',
|
|
62
|
-
content_rowid='rowid'
|
|
63
|
-
);
|
|
64
|
-
|
|
65
|
-
-- ============================================================================
|
|
66
|
-
-- 4. Failed Approaches FTS5 Sync Triggers
|
|
67
|
-
-- ============================================================================
|
|
68
|
-
|
|
69
|
-
-- After INSERT: Add new failed approach to FTS index
|
|
70
|
-
CREATE TRIGGER IF NOT EXISTS failed_approaches_fts_ai
|
|
71
|
-
AFTER INSERT ON failed_approaches
|
|
72
|
-
BEGIN
|
|
73
|
-
INSERT INTO failed_approaches_fts(rowid, title, problem, approach, failure_reason)
|
|
74
|
-
VALUES (new.rowid, new.title, new.problem, new.approach, new.failure_reason);
|
|
75
|
-
END;
|
|
76
|
-
|
|
77
|
-
-- After DELETE: Remove failed approach from FTS index
|
|
78
|
-
CREATE TRIGGER IF NOT EXISTS failed_approaches_fts_ad
|
|
79
|
-
AFTER DELETE ON failed_approaches
|
|
80
|
-
BEGIN
|
|
81
|
-
INSERT INTO failed_approaches_fts(failed_approaches_fts, rowid, title, problem, approach, failure_reason)
|
|
82
|
-
VALUES ('delete', old.rowid, old.title, old.problem, old.approach, old.failure_reason);
|
|
83
|
-
END;
|
|
84
|
-
|
|
85
|
-
-- After UPDATE: Update failed approach in FTS index (delete old, insert new)
|
|
86
|
-
CREATE TRIGGER IF NOT EXISTS failed_approaches_fts_au
|
|
87
|
-
AFTER UPDATE ON failed_approaches
|
|
88
|
-
BEGIN
|
|
89
|
-
INSERT INTO failed_approaches_fts(failed_approaches_fts, rowid, title, problem, approach, failure_reason)
|
|
90
|
-
VALUES ('delete', old.rowid, old.title, old.problem, old.approach, old.failure_reason);
|
|
91
|
-
INSERT INTO failed_approaches_fts(rowid, title, problem, approach, failure_reason)
|
|
92
|
-
VALUES (new.rowid, new.title, new.problem, new.approach, new.failure_reason);
|
|
93
|
-
END;
|
|
94
|
-
|
|
95
|
-
-- ============================================================================
|
|
96
|
-
-- 5. Pattern Annotations Table
|
|
97
|
-
-- ============================================================================
|
|
98
|
-
-- Enhanced pattern detection with evidence counting and confidence scoring
|
|
99
|
-
|
|
100
|
-
CREATE TABLE IF NOT EXISTS pattern_annotations (
|
|
101
|
-
id TEXT PRIMARY KEY, -- uuid → TEXT
|
|
102
|
-
repository_id TEXT NOT NULL, -- Foreign key to repositories
|
|
103
|
-
pattern_type TEXT NOT NULL, -- Pattern category (logging, error-handling, testing, etc.)
|
|
104
|
-
pattern_name TEXT NOT NULL, -- Pattern identifier
|
|
105
|
-
description TEXT NOT NULL, -- Human-readable description
|
|
106
|
-
example_code TEXT, -- Code example (optional)
|
|
107
|
-
evidence_count INTEGER NOT NULL DEFAULT 1, -- Number of occurrences found
|
|
108
|
-
confidence REAL NOT NULL DEFAULT 1.0, -- Confidence score (0.0-1.0)
|
|
109
|
-
created_at TEXT NOT NULL DEFAULT (datetime('now')),
|
|
110
|
-
|
|
111
|
-
FOREIGN KEY (repository_id) REFERENCES repositories(id) ON DELETE CASCADE,
|
|
112
|
-
|
|
113
|
-
CHECK (confidence >= 0.0 AND confidence <= 1.0),
|
|
114
|
-
CHECK (evidence_count >= 1)
|
|
115
|
-
);
|
|
116
|
-
|
|
117
|
-
-- Indexes for common queries
|
|
118
|
-
CREATE INDEX IF NOT EXISTS idx_pattern_annotations_repository_id ON pattern_annotations(repository_id);
|
|
119
|
-
CREATE INDEX IF NOT EXISTS idx_pattern_annotations_pattern_type ON pattern_annotations(pattern_type);
|
|
120
|
-
CREATE INDEX IF NOT EXISTS idx_pattern_annotations_confidence ON pattern_annotations(confidence DESC);
|
|
121
|
-
-- Composite index for high-confidence patterns by type
|
|
122
|
-
CREATE INDEX IF NOT EXISTS idx_pattern_annotations_type_confidence
|
|
123
|
-
ON pattern_annotations(repository_id, pattern_type, confidence DESC);
|
|
124
|
-
|
|
125
|
-
-- ============================================================================
|
|
126
|
-
-- 6. Agent Sessions Table
|
|
27
|
+
-- 2. Agent Sessions Table
|
|
127
28
|
-- ============================================================================
|
|
128
29
|
-- Tracks agent work sessions for learning and analysis
|
|
129
30
|
|
|
130
31
|
CREATE TABLE IF NOT EXISTS agent_sessions (
|
|
131
|
-
id TEXT PRIMARY KEY,
|
|
132
|
-
repository_id TEXT NOT NULL,
|
|
133
|
-
agent_type TEXT,
|
|
134
|
-
task_summary TEXT,
|
|
135
|
-
outcome TEXT,
|
|
136
|
-
files_modified TEXT,
|
|
32
|
+
id TEXT PRIMARY KEY,
|
|
33
|
+
repository_id TEXT NOT NULL,
|
|
34
|
+
agent_type TEXT,
|
|
35
|
+
task_summary TEXT,
|
|
36
|
+
outcome TEXT,
|
|
37
|
+
files_modified TEXT,
|
|
137
38
|
started_at TEXT NOT NULL DEFAULT (datetime('now')),
|
|
138
|
-
ended_at TEXT,
|
|
39
|
+
ended_at TEXT,
|
|
139
40
|
|
|
140
41
|
FOREIGN KEY (repository_id) REFERENCES repositories(id) ON DELETE CASCADE,
|
|
141
|
-
|
|
142
42
|
CHECK (outcome IS NULL OR outcome IN ('success', 'failure', 'partial'))
|
|
143
43
|
);
|
|
144
44
|
|
|
145
|
-
-- Indexes for common queries
|
|
146
45
|
CREATE INDEX IF NOT EXISTS idx_agent_sessions_repository_id ON agent_sessions(repository_id);
|
|
147
46
|
CREATE INDEX IF NOT EXISTS idx_agent_sessions_agent_type ON agent_sessions(agent_type) WHERE agent_type IS NOT NULL;
|
|
148
47
|
CREATE INDEX IF NOT EXISTS idx_agent_sessions_outcome ON agent_sessions(outcome) WHERE outcome IS NOT NULL;
|
|
149
48
|
CREATE INDEX IF NOT EXISTS idx_agent_sessions_started_at ON agent_sessions(started_at DESC);
|
|
150
|
-
-- Partial index for ongoing sessions
|
|
151
49
|
CREATE INDEX IF NOT EXISTS idx_agent_sessions_ongoing ON agent_sessions(repository_id) WHERE ended_at IS NULL;
|
|
152
50
|
|
|
153
51
|
-- ============================================================================
|
|
154
|
-
--
|
|
155
|
-
-- ============================================================================
|
|
156
|
-
-- Insights discovered during agent sessions with proper foreign keys
|
|
157
|
-
|
|
158
|
-
CREATE TABLE IF NOT EXISTS session_insights (
|
|
159
|
-
id TEXT PRIMARY KEY, -- uuid → TEXT
|
|
160
|
-
session_id TEXT NOT NULL, -- Foreign key to agent_sessions
|
|
161
|
-
insight_type TEXT NOT NULL, -- Type of insight
|
|
162
|
-
content TEXT NOT NULL, -- The insight content
|
|
163
|
-
related_file_id TEXT, -- Optional reference to indexed_files
|
|
164
|
-
created_at TEXT NOT NULL DEFAULT (datetime('now')),
|
|
165
|
-
|
|
166
|
-
FOREIGN KEY (session_id) REFERENCES agent_sessions(id) ON DELETE CASCADE,
|
|
167
|
-
FOREIGN KEY (related_file_id) REFERENCES indexed_files(id) ON DELETE SET NULL,
|
|
168
|
-
|
|
169
|
-
CHECK (insight_type IN ('discovery', 'failure', 'workaround'))
|
|
170
|
-
);
|
|
171
|
-
|
|
172
|
-
-- Indexes for common queries
|
|
173
|
-
CREATE INDEX IF NOT EXISTS idx_session_insights_session_id ON session_insights(session_id);
|
|
174
|
-
CREATE INDEX IF NOT EXISTS idx_session_insights_insight_type ON session_insights(insight_type);
|
|
175
|
-
CREATE INDEX IF NOT EXISTS idx_session_insights_related_file ON session_insights(related_file_id)
|
|
176
|
-
WHERE related_file_id IS NOT NULL;
|
|
177
|
-
CREATE INDEX IF NOT EXISTS idx_session_insights_created_at ON session_insights(created_at DESC);
|
|
178
|
-
|
|
179
|
-
-- ============================================================================
|
|
180
|
-
-- 8. Record Migration
|
|
52
|
+
-- 3. Record Migration
|
|
181
53
|
-- ============================================================================
|
|
182
54
|
|
|
183
55
|
INSERT OR IGNORE INTO schema_migrations (name) VALUES ('004_memory_layer');
|
|
@@ -0,0 +1,15 @@
|
|
|
1
|
+
-- SQLite Migration: Add Checksum Tracking to Schema Migrations
|
|
2
|
+
--
|
|
3
|
+
-- Migration: 006_add_migration_checksums
|
|
4
|
+
-- Issue: #166 - Migration infrastructure
|
|
5
|
+
-- Author: Claude Code
|
|
6
|
+
-- Date: 2026-02-04
|
|
7
|
+
--
|
|
8
|
+
-- Adds checksum column to schema_migrations table for drift detection.
|
|
9
|
+
-- Existing migrations will have NULL checksums (validation skipped).
|
|
10
|
+
|
|
11
|
+
ALTER TABLE schema_migrations ADD COLUMN checksum TEXT;
|
|
12
|
+
|
|
13
|
+
CREATE INDEX IF NOT EXISTS idx_schema_migrations_name ON schema_migrations(name);
|
|
14
|
+
|
|
15
|
+
INSERT OR IGNORE INTO schema_migrations (name) VALUES ('006_add_migration_checksums');
|
|
@@ -0,0 +1,335 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* Migration runner for KotaDB SQLite schema management.
|
|
3
|
+
*
|
|
4
|
+
* Features:
|
|
5
|
+
* - Scans migrations directory for .sql files
|
|
6
|
+
* - Applies pending migrations in order
|
|
7
|
+
* - Validates checksums for drift detection
|
|
8
|
+
* - Transactional execution with automatic rollback
|
|
9
|
+
*
|
|
10
|
+
* @module @db/sqlite/migration-runner
|
|
11
|
+
*/
|
|
12
|
+
|
|
13
|
+
import { readdirSync, readFileSync, existsSync } from "node:fs";
|
|
14
|
+
import { join } from "node:path";
|
|
15
|
+
import { createHash } from "node:crypto";
|
|
16
|
+
import type { KotaDatabase } from "./sqlite-client.js";
|
|
17
|
+
import { createLogger } from "@logging/logger.js";
|
|
18
|
+
|
|
19
|
+
const logger = createLogger({ module: "migration-runner" });
|
|
20
|
+
|
|
21
|
+
/**
|
|
22
|
+
* Parsed migration file information
|
|
23
|
+
*/
|
|
24
|
+
export interface Migration {
|
|
25
|
+
filename: string;
|
|
26
|
+
name: string;
|
|
27
|
+
number: number;
|
|
28
|
+
path: string;
|
|
29
|
+
content: string;
|
|
30
|
+
checksum: string;
|
|
31
|
+
}
|
|
32
|
+
|
|
33
|
+
/**
|
|
34
|
+
* Migration record from the database
|
|
35
|
+
*/
|
|
36
|
+
export interface AppliedMigration {
|
|
37
|
+
name: string;
|
|
38
|
+
applied_at: string;
|
|
39
|
+
checksum?: string | null;
|
|
40
|
+
}
|
|
41
|
+
|
|
42
|
+
/**
|
|
43
|
+
* Result of a migration run
|
|
44
|
+
*/
|
|
45
|
+
export interface MigrationResult {
|
|
46
|
+
appliedCount: number;
|
|
47
|
+
driftDetected: boolean;
|
|
48
|
+
appliedMigrations: string[];
|
|
49
|
+
errors: string[];
|
|
50
|
+
}
|
|
51
|
+
|
|
52
|
+
/**
|
|
53
|
+
* Parse migration filename to extract number and name.
|
|
54
|
+
* Format: {number}_{name}.sql
|
|
55
|
+
*
|
|
56
|
+
* @param filename - Migration filename (e.g., "004_memory_layer.sql")
|
|
57
|
+
* @returns Parsed number and name, or null if invalid format
|
|
58
|
+
*/
|
|
59
|
+
export function parseMigrationFilename(
|
|
60
|
+
filename: string
|
|
61
|
+
): { number: number; name: string } | null {
|
|
62
|
+
const match = filename.match(/^(\d+)_(.+)\.sql$/);
|
|
63
|
+
if (!match) return null;
|
|
64
|
+
|
|
65
|
+
const numStr = match[1];
|
|
66
|
+
const name = match[2];
|
|
67
|
+
if (!numStr || !name) return null;
|
|
68
|
+
|
|
69
|
+
return { number: parseInt(numStr, 10), name: `${numStr}_${name}` };
|
|
70
|
+
}
|
|
71
|
+
|
|
72
|
+
/**
|
|
73
|
+
* Compute SHA-256 checksum of migration content.
|
|
74
|
+
*
|
|
75
|
+
* @param content - Migration SQL content
|
|
76
|
+
* @returns Hex-encoded SHA-256 hash
|
|
77
|
+
*/
|
|
78
|
+
export function computeChecksum(content: string): string {
|
|
79
|
+
return createHash("sha256").update(content, "utf-8").digest("hex");
|
|
80
|
+
}
|
|
81
|
+
|
|
82
|
+
/**
|
|
83
|
+
* Scan migrations directory and return sorted list of migrations.
|
|
84
|
+
*
|
|
85
|
+
* @param migrationsDir - Path to migrations directory
|
|
86
|
+
* @returns Array of migrations sorted by number
|
|
87
|
+
*/
|
|
88
|
+
export function scanMigrations(migrationsDir: string): Migration[] {
|
|
89
|
+
if (!existsSync(migrationsDir)) {
|
|
90
|
+
logger.warn("Migrations directory does not exist", { path: migrationsDir });
|
|
91
|
+
return [];
|
|
92
|
+
}
|
|
93
|
+
|
|
94
|
+
const files = readdirSync(migrationsDir)
|
|
95
|
+
.filter((f) => f.endsWith(".sql"))
|
|
96
|
+
.sort(); // Alphabetical sort ensures numeric order for 001, 002, etc.
|
|
97
|
+
|
|
98
|
+
const migrations: Migration[] = [];
|
|
99
|
+
|
|
100
|
+
for (const filename of files) {
|
|
101
|
+
const parsed = parseMigrationFilename(filename);
|
|
102
|
+
if (!parsed) {
|
|
103
|
+
logger.warn("Invalid migration filename (skipping)", { filename });
|
|
104
|
+
continue;
|
|
105
|
+
}
|
|
106
|
+
|
|
107
|
+
const path = join(migrationsDir, filename);
|
|
108
|
+
const content = readFileSync(path, "utf-8");
|
|
109
|
+
const checksum = computeChecksum(content);
|
|
110
|
+
|
|
111
|
+
migrations.push({
|
|
112
|
+
filename,
|
|
113
|
+
name: parsed.name,
|
|
114
|
+
number: parsed.number,
|
|
115
|
+
path,
|
|
116
|
+
content,
|
|
117
|
+
checksum,
|
|
118
|
+
});
|
|
119
|
+
}
|
|
120
|
+
|
|
121
|
+
// Sort by number (redundant if naming is correct, but ensures correctness)
|
|
122
|
+
migrations.sort((a, b) => a.number - b.number);
|
|
123
|
+
|
|
124
|
+
return migrations;
|
|
125
|
+
}
|
|
126
|
+
|
|
127
|
+
/**
|
|
128
|
+
* Get list of applied migrations from schema_migrations table.
|
|
129
|
+
*
|
|
130
|
+
* @param db - Database instance
|
|
131
|
+
* @returns Map of migration name to applied migration record
|
|
132
|
+
*/
|
|
133
|
+
export function getAppliedMigrations(
|
|
134
|
+
db: KotaDatabase
|
|
135
|
+
): Map<string, AppliedMigration> {
|
|
136
|
+
// Ensure schema_migrations table exists
|
|
137
|
+
if (!db.tableExists("schema_migrations")) {
|
|
138
|
+
logger.warn("schema_migrations table does not exist");
|
|
139
|
+
return new Map();
|
|
140
|
+
}
|
|
141
|
+
|
|
142
|
+
// Check if checksum column exists
|
|
143
|
+
const columns = db.query<{ name: string }>(
|
|
144
|
+
"SELECT name FROM pragma_table_info('schema_migrations')"
|
|
145
|
+
);
|
|
146
|
+
const hasChecksum = columns.some((c) => c.name === "checksum");
|
|
147
|
+
|
|
148
|
+
// Query with or without checksum column
|
|
149
|
+
const query = hasChecksum
|
|
150
|
+
? "SELECT name, applied_at, checksum FROM schema_migrations ORDER BY id"
|
|
151
|
+
: "SELECT name, applied_at, NULL as checksum FROM schema_migrations ORDER BY id";
|
|
152
|
+
|
|
153
|
+
const rows = db.query<AppliedMigration>(query);
|
|
154
|
+
|
|
155
|
+
return new Map(rows.map((r) => [r.name, r]));
|
|
156
|
+
}
|
|
157
|
+
|
|
158
|
+
/**
|
|
159
|
+
* Apply a single migration within a transaction.
|
|
160
|
+
*
|
|
161
|
+
* @param db - Database instance
|
|
162
|
+
* @param migration - Migration to apply
|
|
163
|
+
*/
|
|
164
|
+
export function applyMigration(db: KotaDatabase, migration: Migration): void {
|
|
165
|
+
logger.info("Applying migration", { name: migration.name });
|
|
166
|
+
|
|
167
|
+
db.immediateTransaction(() => {
|
|
168
|
+
// Execute migration SQL
|
|
169
|
+
db.exec(migration.content);
|
|
170
|
+
|
|
171
|
+
// Check if checksum column exists for INSERT
|
|
172
|
+
const columns = db.query<{ name: string }>(
|
|
173
|
+
"SELECT name FROM pragma_table_info('schema_migrations')"
|
|
174
|
+
);
|
|
175
|
+
const hasChecksum = columns.some((c) => c.name === "checksum");
|
|
176
|
+
|
|
177
|
+
// Record migration (with checksum if column exists)
|
|
178
|
+
if (hasChecksum) {
|
|
179
|
+
db.run(
|
|
180
|
+
"INSERT OR REPLACE INTO schema_migrations (name, checksum, applied_at) VALUES (?, ?, datetime('now'))",
|
|
181
|
+
[migration.name, migration.checksum]
|
|
182
|
+
);
|
|
183
|
+
} else {
|
|
184
|
+
// Migration file already contains INSERT OR IGNORE for name
|
|
185
|
+
// Just ensure the record exists
|
|
186
|
+
db.run(
|
|
187
|
+
"INSERT OR IGNORE INTO schema_migrations (name, applied_at) VALUES (?, datetime('now'))",
|
|
188
|
+
[migration.name]
|
|
189
|
+
);
|
|
190
|
+
}
|
|
191
|
+
});
|
|
192
|
+
|
|
193
|
+
logger.info("Migration applied successfully", { name: migration.name });
|
|
194
|
+
}
|
|
195
|
+
|
|
196
|
+
/**
|
|
197
|
+
* Validate checksum for already-applied migration (drift detection).
|
|
198
|
+
*
|
|
199
|
+
* @param migration - Migration file info
|
|
200
|
+
* @param applied - Applied migration record
|
|
201
|
+
* @returns True if checksum matches or no checksum stored
|
|
202
|
+
*/
|
|
203
|
+
export function validateChecksum(
|
|
204
|
+
migration: Migration,
|
|
205
|
+
applied: AppliedMigration
|
|
206
|
+
): boolean {
|
|
207
|
+
if (!applied.checksum) {
|
|
208
|
+
// Old migrations may not have checksum - skip validation
|
|
209
|
+
logger.debug("No checksum stored for migration (skipping validation)", {
|
|
210
|
+
name: migration.name,
|
|
211
|
+
});
|
|
212
|
+
return true;
|
|
213
|
+
}
|
|
214
|
+
|
|
215
|
+
if (migration.checksum !== applied.checksum) {
|
|
216
|
+
logger.warn("Migration checksum mismatch (DRIFT DETECTED)", {
|
|
217
|
+
name: migration.name,
|
|
218
|
+
expected: applied.checksum,
|
|
219
|
+
actual: migration.checksum,
|
|
220
|
+
message:
|
|
221
|
+
"Migration file was modified after being applied. This may indicate schema drift.",
|
|
222
|
+
});
|
|
223
|
+
return false;
|
|
224
|
+
}
|
|
225
|
+
|
|
226
|
+
return true;
|
|
227
|
+
}
|
|
228
|
+
|
|
229
|
+
/**
|
|
230
|
+
* Run all pending migrations.
|
|
231
|
+
*
|
|
232
|
+
* @param db - Database instance
|
|
233
|
+
* @param migrationsDir - Path to migrations directory
|
|
234
|
+
* @returns Migration result with count and status
|
|
235
|
+
*/
|
|
236
|
+
export function runMigrations(
|
|
237
|
+
db: KotaDatabase,
|
|
238
|
+
migrationsDir: string
|
|
239
|
+
): MigrationResult {
|
|
240
|
+
logger.info("Starting migration runner", { migrationsDir });
|
|
241
|
+
|
|
242
|
+
const result: MigrationResult = {
|
|
243
|
+
appliedCount: 0,
|
|
244
|
+
driftDetected: false,
|
|
245
|
+
appliedMigrations: [],
|
|
246
|
+
errors: [],
|
|
247
|
+
};
|
|
248
|
+
|
|
249
|
+
// Scan filesystem for migration files
|
|
250
|
+
const availableMigrations = scanMigrations(migrationsDir);
|
|
251
|
+
logger.debug("Found migration files", { count: availableMigrations.length });
|
|
252
|
+
|
|
253
|
+
// Get applied migrations from database
|
|
254
|
+
const appliedMigrations = getAppliedMigrations(db);
|
|
255
|
+
logger.debug("Found applied migrations", { count: appliedMigrations.size });
|
|
256
|
+
|
|
257
|
+
for (const migration of availableMigrations) {
|
|
258
|
+
const applied = appliedMigrations.get(migration.name);
|
|
259
|
+
|
|
260
|
+
if (applied) {
|
|
261
|
+
// Migration already applied - validate checksum
|
|
262
|
+
const valid = validateChecksum(migration, applied);
|
|
263
|
+
if (!valid) {
|
|
264
|
+
result.driftDetected = true;
|
|
265
|
+
}
|
|
266
|
+
} else {
|
|
267
|
+
// Migration not yet applied - apply it
|
|
268
|
+
try {
|
|
269
|
+
applyMigration(db, migration);
|
|
270
|
+
result.appliedCount++;
|
|
271
|
+
result.appliedMigrations.push(migration.name);
|
|
272
|
+
} catch (error) {
|
|
273
|
+
const errorMessage =
|
|
274
|
+
error instanceof Error ? error.message : String(error);
|
|
275
|
+
logger.error("Migration failed", {
|
|
276
|
+
name: migration.name,
|
|
277
|
+
error: errorMessage,
|
|
278
|
+
});
|
|
279
|
+
result.errors.push(`Migration ${migration.name} failed: ${errorMessage}`);
|
|
280
|
+
// Stop applying further migrations on failure
|
|
281
|
+
break;
|
|
282
|
+
}
|
|
283
|
+
}
|
|
284
|
+
}
|
|
285
|
+
|
|
286
|
+
if (result.driftDetected) {
|
|
287
|
+
logger.warn(
|
|
288
|
+
"Schema drift detected. Some migration files have been modified after being applied."
|
|
289
|
+
);
|
|
290
|
+
}
|
|
291
|
+
|
|
292
|
+
logger.info("Migration runner completed", {
|
|
293
|
+
appliedCount: result.appliedCount,
|
|
294
|
+
driftDetected: result.driftDetected,
|
|
295
|
+
});
|
|
296
|
+
|
|
297
|
+
return result;
|
|
298
|
+
}
|
|
299
|
+
|
|
300
|
+
/**
|
|
301
|
+
* Update checksums for all existing migrations.
|
|
302
|
+
* Useful after adding checksum column to update records.
|
|
303
|
+
*
|
|
304
|
+
* @param db - Database instance
|
|
305
|
+
* @param migrationsDir - Path to migrations directory
|
|
306
|
+
* @returns Number of records updated
|
|
307
|
+
*/
|
|
308
|
+
export function updateExistingChecksums(
|
|
309
|
+
db: KotaDatabase,
|
|
310
|
+
migrationsDir: string
|
|
311
|
+
): number {
|
|
312
|
+
const migrations = scanMigrations(migrationsDir);
|
|
313
|
+
const applied = getAppliedMigrations(db);
|
|
314
|
+
let updatedCount = 0;
|
|
315
|
+
|
|
316
|
+
for (const migration of migrations) {
|
|
317
|
+
const record = applied.get(migration.name);
|
|
318
|
+
if (record && !record.checksum) {
|
|
319
|
+
db.run("UPDATE schema_migrations SET checksum = ? WHERE name = ?", [
|
|
320
|
+
migration.checksum,
|
|
321
|
+
migration.name,
|
|
322
|
+
]);
|
|
323
|
+
updatedCount++;
|
|
324
|
+
logger.debug("Updated checksum for migration", { name: migration.name });
|
|
325
|
+
}
|
|
326
|
+
}
|
|
327
|
+
|
|
328
|
+
if (updatedCount > 0) {
|
|
329
|
+
logger.info("Updated checksums for existing migrations", {
|
|
330
|
+
count: updatedCount,
|
|
331
|
+
});
|
|
332
|
+
}
|
|
333
|
+
|
|
334
|
+
return updatedCount;
|
|
335
|
+
}
|
|
@@ -19,6 +19,7 @@ import { cpus } from "node:os";
|
|
|
19
19
|
import { createLogger } from "@logging/logger.js";
|
|
20
20
|
import { findProjectRoot } from "@config/project-root.js";
|
|
21
21
|
import { ensureKotadbIgnored } from "@config/gitignore.js";
|
|
22
|
+
import { runMigrations, updateExistingChecksums } from "./migration-runner.js";
|
|
22
23
|
|
|
23
24
|
const logger = createLogger({ module: "sqlite-client" });
|
|
24
25
|
|
|
@@ -137,8 +138,11 @@ export class KotaDatabase {
|
|
|
137
138
|
this.configurePragmas();
|
|
138
139
|
|
|
139
140
|
// Auto-initialize schema if not already present (writer only)
|
|
141
|
+
// - New database: Apply full base schema
|
|
142
|
+
// - Existing database: Run pending migrations
|
|
140
143
|
if (!this.config.readonly && !this.config.skipSchemaInit) {
|
|
141
144
|
if (!this.tableExists("indexed_files")) {
|
|
145
|
+
// NEW DATABASE: Apply base schema
|
|
142
146
|
const schemaPath = join(__dirname, "../sqlite-schema.sql");
|
|
143
147
|
const schema = readFileSync(schemaPath, "utf-8");
|
|
144
148
|
this.exec(schema);
|
|
@@ -146,7 +150,28 @@ export class KotaDatabase {
|
|
|
146
150
|
path: this.config.path,
|
|
147
151
|
});
|
|
148
152
|
} else {
|
|
149
|
-
|
|
153
|
+
// EXISTING DATABASE: Run migrations
|
|
154
|
+
const migrationsDir = join(__dirname, "../migrations");
|
|
155
|
+
try {
|
|
156
|
+
const result = runMigrations(this, migrationsDir);
|
|
157
|
+
if (result.appliedCount > 0) {
|
|
158
|
+
logger.info("Applied pending migrations", {
|
|
159
|
+
count: result.appliedCount,
|
|
160
|
+
migrations: result.appliedMigrations,
|
|
161
|
+
});
|
|
162
|
+
}
|
|
163
|
+
// Update checksums for existing migrations (after checksum column added)
|
|
164
|
+
updateExistingChecksums(this, migrationsDir);
|
|
165
|
+
if (result.errors.length > 0) {
|
|
166
|
+
logger.error("Migration errors", { errors: result.errors });
|
|
167
|
+
}
|
|
168
|
+
} catch (error) {
|
|
169
|
+
logger.error("Migration runner failed", {
|
|
170
|
+
error: error instanceof Error ? error.message : String(error),
|
|
171
|
+
});
|
|
172
|
+
// Do not throw - allow database to continue operating with current schema
|
|
173
|
+
// This prevents startup failures due to migration issues
|
|
174
|
+
}
|
|
150
175
|
}
|
|
151
176
|
}
|
|
152
177
|
|
package/src/db/sqlite-schema.sql
CHANGED
|
@@ -462,6 +462,54 @@ BEGIN
|
|
|
462
462
|
VALUES (new.rowid, new.content);
|
|
463
463
|
END;
|
|
464
464
|
|
|
465
|
+
-- ============================================================================
|
|
466
|
+
-- 11. Workflow Contexts Table (Issue #144)
|
|
467
|
+
-- ============================================================================
|
|
468
|
+
-- Stores curated context data for each workflow phase
|
|
469
|
+
|
|
470
|
+
CREATE TABLE IF NOT EXISTS workflow_contexts (
|
|
471
|
+
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
|
472
|
+
workflow_id TEXT NOT NULL,
|
|
473
|
+
phase TEXT NOT NULL,
|
|
474
|
+
context_data TEXT NOT NULL,
|
|
475
|
+
created_at TEXT NOT NULL DEFAULT (datetime('now')),
|
|
476
|
+
updated_at TEXT NOT NULL DEFAULT (datetime('now')),
|
|
477
|
+
|
|
478
|
+
UNIQUE(workflow_id, phase),
|
|
479
|
+
CHECK (phase IN ('analysis', 'plan', 'build', 'improve'))
|
|
480
|
+
);
|
|
481
|
+
|
|
482
|
+
CREATE INDEX IF NOT EXISTS idx_workflow_contexts_workflow_id
|
|
483
|
+
ON workflow_contexts(workflow_id);
|
|
484
|
+
|
|
485
|
+
CREATE INDEX IF NOT EXISTS idx_workflow_contexts_created_at
|
|
486
|
+
ON workflow_contexts(created_at DESC);
|
|
487
|
+
|
|
488
|
+
-- ============================================================================
|
|
489
|
+
-- 12. Agent Sessions Table
|
|
490
|
+
-- ============================================================================
|
|
491
|
+
-- Tracks agent work sessions for learning and analysis
|
|
492
|
+
|
|
493
|
+
CREATE TABLE IF NOT EXISTS agent_sessions (
|
|
494
|
+
id TEXT PRIMARY KEY,
|
|
495
|
+
repository_id TEXT NOT NULL,
|
|
496
|
+
agent_type TEXT,
|
|
497
|
+
task_summary TEXT,
|
|
498
|
+
outcome TEXT,
|
|
499
|
+
files_modified TEXT,
|
|
500
|
+
started_at TEXT NOT NULL DEFAULT (datetime('now')),
|
|
501
|
+
ended_at TEXT,
|
|
502
|
+
|
|
503
|
+
FOREIGN KEY (repository_id) REFERENCES repositories(id) ON DELETE CASCADE,
|
|
504
|
+
CHECK (outcome IS NULL OR outcome IN ('success', 'failure', 'partial'))
|
|
505
|
+
);
|
|
506
|
+
|
|
507
|
+
CREATE INDEX IF NOT EXISTS idx_agent_sessions_repository_id ON agent_sessions(repository_id);
|
|
508
|
+
CREATE INDEX IF NOT EXISTS idx_agent_sessions_agent_type ON agent_sessions(agent_type) WHERE agent_type IS NOT NULL;
|
|
509
|
+
CREATE INDEX IF NOT EXISTS idx_agent_sessions_outcome ON agent_sessions(outcome) WHERE outcome IS NOT NULL;
|
|
510
|
+
CREATE INDEX IF NOT EXISTS idx_agent_sessions_started_at ON agent_sessions(started_at DESC);
|
|
511
|
+
CREATE INDEX IF NOT EXISTS idx_agent_sessions_ongoing ON agent_sessions(repository_id) WHERE ended_at IS NULL;
|
|
512
|
+
|
|
465
513
|
-- Record memory layer migration
|
|
466
514
|
INSERT OR IGNORE INTO schema_migrations (name) VALUES ('002_memory_layer_tables');
|
|
467
515
|
|
package/src/indexer/constants.ts
CHANGED
package/src/indexer/parsers.ts
CHANGED
|
@@ -15,6 +15,7 @@ const SUPPORTED_EXTENSIONS = new Set<string>([
|
|
|
15
15
|
".cjs",
|
|
16
16
|
".mjs",
|
|
17
17
|
".json",
|
|
18
|
+
".sql",
|
|
18
19
|
]);
|
|
19
20
|
|
|
20
21
|
const IGNORED_DIRECTORIES = new Set<string>([
|
|
@@ -161,7 +162,8 @@ export async function parseSourceFile(
|
|
|
161
162
|
return null;
|
|
162
163
|
}
|
|
163
164
|
|
|
164
|
-
|
|
165
|
+
// Skip dependency extraction for SQL files
|
|
166
|
+
const dependencies = extname(path) === ".sql" ? [] : extractDependencies(content);
|
|
165
167
|
|
|
166
168
|
return {
|
|
167
169
|
projectRoot: resolve(projectRoot),
|