@runcontext/cli 0.3.5 → 0.4.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.js CHANGED
@@ -1,7 +1,7 @@
1
1
  #!/usr/bin/env node
2
2
 
3
3
  // src/index.ts
4
- import { Command as Command16 } from "commander";
4
+ import { Command as Command18 } from "commander";
5
5
 
6
6
  // src/commands/lint.ts
7
7
  import { Command } from "commander";
@@ -141,7 +141,7 @@ function formatSarif(diagnostics) {
141
141
  tool: {
142
142
  driver: {
143
143
  name: "ContextKit",
144
- version: "0.2.1",
144
+ version: "0.4.1",
145
145
  informationUri: "https://github.com/erickittelson/ContextKit",
146
146
  rules: Array.from(ruleMap.values())
147
147
  }
@@ -525,11 +525,13 @@ import {
525
525
  import { Command as Command5 } from "commander";
526
526
  import chalk6 from "chalk";
527
527
  import path5 from "path";
528
- import { mkdirSync, writeFileSync as writeFileSync2, existsSync } from "fs";
528
+ import { mkdirSync, writeFileSync as writeFileSync2, existsSync, readFileSync as readFileSync2, unlinkSync } from "fs";
529
+ import * as p from "@clack/prompts";
529
530
  import {
530
531
  loadConfig as loadConfig4,
531
532
  createAdapter,
532
- scaffoldFromSchema
533
+ scaffoldFromSchema,
534
+ MissingDriverError
533
535
  } from "@runcontext/core";
534
536
  function parseDbUrl(db) {
535
537
  if (db.startsWith("duckdb://")) {
@@ -538,20 +540,52 @@ function parseDbUrl(db) {
538
540
  if (db.startsWith("postgres://") || db.startsWith("postgresql://")) {
539
541
  return { adapter: "postgres", connection: db };
540
542
  }
541
- if (db.endsWith(".duckdb") || db.endsWith(".db")) {
543
+ if (db.startsWith("mysql://")) {
544
+ return { adapter: "mysql", connection: db };
545
+ }
546
+ if (db.startsWith("mssql://") || db.startsWith("sqlserver://")) {
547
+ return { adapter: "mssql", connection: db };
548
+ }
549
+ if (db.startsWith("clickhouse://")) {
550
+ return { adapter: "clickhouse", host: db };
551
+ }
552
+ if (db.startsWith("snowflake://")) {
553
+ const parts = db.slice("snowflake://".length).split("/");
554
+ return {
555
+ adapter: "snowflake",
556
+ account: parts[0] ?? "",
557
+ database: parts[1] ?? "",
558
+ schema: parts[2] ?? ""
559
+ };
560
+ }
561
+ if (db.startsWith("bigquery://")) {
562
+ const parts = db.slice("bigquery://".length).split("/");
563
+ return {
564
+ adapter: "bigquery",
565
+ project: parts[0] ?? "",
566
+ dataset: parts[1] ?? ""
567
+ };
568
+ }
569
+ if (db.endsWith(".duckdb")) {
570
+ return { adapter: "duckdb", path: db };
571
+ }
572
+ if (db.endsWith(".sqlite") || db.endsWith(".sqlite3")) {
573
+ return { adapter: "sqlite", path: db };
574
+ }
575
+ if (db.endsWith(".db")) {
542
576
  return { adapter: "duckdb", path: db };
543
577
  }
544
578
  throw new Error(
545
- `Cannot determine adapter from "${db}". Use duckdb:// or postgres:// prefix.`
579
+ `Cannot determine adapter from "${db}". Use a URL prefix (duckdb://, postgres://, mysql://, mssql://, clickhouse://, snowflake://, bigquery://) or a recognized file extension (.duckdb, .db, .sqlite, .sqlite3).`
546
580
  );
547
581
  }
548
- var introspectCommand = new Command5("introspect").description("Introspect a database and scaffold Bronze-level OSI metadata").option(
582
+ var introspectCommand = new Command5("introspect").description("Introspect a database and scaffold Bronze-level OSI metadata. Supports: duckdb://, postgres://, mysql://, mssql://, snowflake://, bigquery://, clickhouse://, .sqlite, .duckdb files, and Databricks (via config).").option(
549
583
  "--db <url>",
550
- "Database URL (e.g., duckdb://path.duckdb or postgres://...)"
584
+ "Database URL (duckdb://path.duckdb, postgres://user:pass@host/db, mysql://..., mssql://..., snowflake://account/db/schema, bigquery://project/dataset, clickhouse://host, or file.sqlite)"
551
585
  ).option(
552
586
  "--source <name>",
553
587
  "Use a named data_source from contextkit.config.yaml"
554
- ).option("--tables <glob>", 'Filter tables by glob pattern (e.g., "vw_*")').option(
588
+ ).option("--tables <glob>", 'Filter tables by glob pattern (e.g., "vw_*")').option("--select", "Interactively select which tables to include").option(
555
589
  "--model-name <name>",
556
590
  "Name for the generated model (default: derived from source)"
557
591
  ).action(async (opts) => {
@@ -602,6 +636,25 @@ var introspectCommand = new Command5("introspect").description("Introspect a dat
602
636
  tables = tables.filter((t) => regex.test(t.name));
603
637
  }
604
638
  console.log(`Discovered ${tables.length} tables/views`);
639
+ if (opts.select && tables.length > 1) {
640
+ const selection = await p.multiselect({
641
+ message: `Select tables to include (${tables.length} found)`,
642
+ options: tables.map((t) => ({
643
+ value: t.name,
644
+ label: t.name,
645
+ hint: `${t.row_count.toLocaleString()} rows`
646
+ })),
647
+ initialValues: tables.map((t) => t.name),
648
+ required: true
649
+ });
650
+ if (p.isCancel(selection)) {
651
+ await adapter.disconnect();
652
+ process.exit(0);
653
+ }
654
+ const selected = new Set(selection);
655
+ tables = tables.filter((t) => selected.has(t.name));
656
+ console.log(`Selected ${tables.length} tables`);
657
+ }
605
658
  const columns = {};
606
659
  for (const table of tables) {
607
660
  columns[table.name] = await adapter.listColumns(table.name);
@@ -640,15 +693,46 @@ var introspectCommand = new Command5("introspect").description("Introspect a dat
640
693
  console.log(` ${path5.relative(process.cwd(), osiPath)}`);
641
694
  console.log(` ${path5.relative(process.cwd(), govPath)}`);
642
695
  console.log(` ${path5.relative(process.cwd(), ownerPath)}`);
696
+ const exampleFiles = [
697
+ path5.join(contextDir, "models", "example-model.osi.yaml"),
698
+ path5.join(contextDir, "governance", "example-model.governance.yaml"),
699
+ path5.join(contextDir, "glossary", "glossary.term.yaml"),
700
+ path5.join(contextDir, "owners", "data-team.owner.yaml")
701
+ ];
702
+ let removedCount = 0;
703
+ for (const exFile of exampleFiles) {
704
+ if (existsSync(exFile)) {
705
+ try {
706
+ const content = readFileSync2(exFile, "utf-8");
707
+ if (content.includes("example-model") || content.includes("Replace this") || content.includes("Example Term") || content.includes("team: data-team")) {
708
+ unlinkSync(exFile);
709
+ removedCount++;
710
+ }
711
+ } catch {
712
+ }
713
+ }
714
+ }
715
+ if (removedCount > 0) {
716
+ p.log.info(`Removed ${removedCount} example template file(s) from init`);
717
+ }
643
718
  console.log("");
644
719
  console.log(chalk6.cyan("Run `context tier` to check your tier score."));
645
720
  console.log(
646
721
  chalk6.cyan("Run `context verify` to validate against data.")
647
722
  );
648
723
  } catch (err) {
649
- console.error(
650
- chalk6.red(`Introspect failed: ${err.message}`)
651
- );
724
+ if (err instanceof MissingDriverError) {
725
+ console.error(chalk6.yellow(`
726
+ Missing driver: "${err.driverPackage}" is required for ${err.adapter}.
727
+ `));
728
+ console.error(chalk6.white(`Install it with:
729
+ npm install ${err.driverPackage}
730
+ `));
731
+ } else {
732
+ console.error(
733
+ chalk6.red(`Introspect failed: ${err.message}`)
734
+ );
735
+ }
652
736
  process.exit(1);
653
737
  }
654
738
  });
@@ -914,7 +998,7 @@ var fixCommand = new Command7("fix").description("Auto-fix lint issues").option(
914
998
  import { Command as Command8 } from "commander";
915
999
  import chalk9 from "chalk";
916
1000
  import path8 from "path";
917
- import { readFileSync as readFileSync2, writeFileSync as writeFileSync3 } from "fs";
1001
+ import { readFileSync as readFileSync3, writeFileSync as writeFileSync3 } from "fs";
918
1002
  import {
919
1003
  compile as compile7,
920
1004
  loadConfig as loadConfig7,
@@ -944,7 +1028,7 @@ async function runLint(contextDir, fix) {
944
1028
  if (fix) {
945
1029
  const fixable = allDiags.filter((d) => d.fixable && d.fix);
946
1030
  if (fixable.length > 0) {
947
- const fixes = applyFixes3(fixable, (filePath) => readFileSync2(filePath, "utf-8"));
1031
+ const fixes = applyFixes3(fixable, (filePath) => readFileSync3(filePath, "utf-8"));
948
1032
  for (const [file, content] of fixes) {
949
1033
  writeFileSync3(file, content, "utf-8");
950
1034
  }
@@ -993,7 +1077,7 @@ async function runLint(contextDir, fix) {
993
1077
  console.log("");
994
1078
  previousDiags = currentDiags;
995
1079
  }
996
- var devCommand = new Command8("dev").description("Watch mode \u2014 re-run lint on file changes").option("--context-dir <path>", "Path to context directory").option("--fix", "Auto-fix problems on each re-lint").action(async (opts) => {
1080
+ var devCommand = new Command8("dev").description("Watch mode \u2014 re-run lint on file changes").option("--context-dir <path>", "Path to context directory").option("--fix", "Auto-fix problems on each re-lint").option("--studio", "Open interactive metadata editor in the browser").option("--port <number>", "Studio server port (default: 4040)", "4040").option("--host <address>", "Studio server host (default: localhost)", "localhost").action(async (opts) => {
997
1081
  try {
998
1082
  const config = loadConfig7(process.cwd());
999
1083
  const contextDir = opts.contextDir ? path8.resolve(opts.contextDir) : path8.resolve(config.context_dir);
@@ -1002,6 +1086,28 @@ var devCommand = new Command8("dev").description("Watch mode \u2014 re-run lint
1002
1086
  if (fix) console.log(chalk9.blue("Auto-fix enabled."));
1003
1087
  console.log(chalk9.gray("Press Ctrl+C to stop.\n"));
1004
1088
  await runLint(contextDir, fix);
1089
+ let recompileAndBroadcast;
1090
+ if (opts.studio) {
1091
+ const { startStudioServer } = await import("./server-DEKWPP3H.js");
1092
+ const studioPort = parseInt(opts.port, 10);
1093
+ const { server: _studioServer, recompileAndBroadcast: rab } = await startStudioServer({
1094
+ contextDir,
1095
+ rootDir: process.cwd(),
1096
+ port: studioPort,
1097
+ host: opts.host
1098
+ });
1099
+ recompileAndBroadcast = rab;
1100
+ const studioUrl = `http://${opts.host === "0.0.0.0" ? "localhost" : opts.host}:${studioPort}`;
1101
+ console.log(chalk9.green(`
1102
+ Studio running at ${chalk9.bold(studioUrl)}
1103
+ `));
1104
+ const { execFile } = await import("child_process");
1105
+ const openCmd = process.platform === "darwin" ? "open" : process.platform === "win32" ? "cmd" : "xdg-open";
1106
+ const openArgs = process.platform === "win32" ? ["/c", "start", studioUrl] : [studioUrl];
1107
+ execFile(openCmd, openArgs, (err) => {
1108
+ if (err) console.log(chalk9.gray(` Open ${studioUrl} in your browser`));
1109
+ });
1110
+ }
1005
1111
  const { watch } = await import("chokidar");
1006
1112
  let debounceTimer = null;
1007
1113
  const watcher = watch(contextDir, {
@@ -1015,6 +1121,9 @@ var devCommand = new Command8("dev").description("Watch mode \u2014 re-run lint
1015
1121
  debounceTimer = setTimeout(async () => {
1016
1122
  try {
1017
1123
  await runLint(contextDir, fix);
1124
+ if (recompileAndBroadcast) {
1125
+ await recompileAndBroadcast();
1126
+ }
1018
1127
  } catch (err) {
1019
1128
  console.error(
1020
1129
  chalk9.red(`Lint error: ${err.message}`)
@@ -1293,7 +1402,7 @@ var validateOsiCommand = new Command12("validate-osi").description("Validate a s
1293
1402
  import { Command as Command13 } from "commander";
1294
1403
  import chalk14 from "chalk";
1295
1404
  import path12 from "path";
1296
- import { readFileSync as readFileSync3, writeFileSync as writeFileSync4, mkdirSync as mkdirSync2, existsSync as existsSync2, readdirSync } from "fs";
1405
+ import { readFileSync as readFileSync4, writeFileSync as writeFileSync4, mkdirSync as mkdirSync2, existsSync as existsSync2, readdirSync } from "fs";
1297
1406
  import * as yaml from "yaml";
1298
1407
  import {
1299
1408
  compile as compile9,
@@ -1373,7 +1482,7 @@ var enrichCommand = new Command13("enrich").description("Suggest or apply metada
1373
1482
  }
1374
1483
  const govFilePath = findFileRecursive(contextDir, `${modelName}.governance.yaml`);
1375
1484
  if (govFilePath) {
1376
- const govContent = readFileSync3(govFilePath, "utf-8");
1485
+ const govContent = readFileSync4(govFilePath, "utf-8");
1377
1486
  const govDoc = yaml.parse(govContent) ?? {};
1378
1487
  if (suggestions.governance?.trust) {
1379
1488
  govDoc.trust = suggestions.governance.trust;
@@ -1576,15 +1685,479 @@ var rulesCommand = new Command14("rules").description("List all lint rules with
1576
1685
 
1577
1686
  // src/commands/setup.ts
1578
1687
  import { Command as Command15 } from "commander";
1579
- import * as p9 from "@clack/prompts";
1688
+ import * as p10 from "@clack/prompts";
1580
1689
  import chalk16 from "chalk";
1581
1690
 
1582
1691
  // src/setup/steps/connect.ts
1583
- import * as p from "@clack/prompts";
1584
- import path13 from "path";
1585
- import { existsSync as existsSync3, readFileSync as readFileSync4, writeFileSync as writeFileSync5 } from "fs";
1692
+ import * as p2 from "@clack/prompts";
1693
+ import path14 from "path";
1694
+ import { existsSync as existsSync4, readFileSync as readFileSync6, writeFileSync as writeFileSync5 } from "fs";
1695
+ import { execFileSync } from "child_process";
1586
1696
  import * as yaml2 from "yaml";
1587
- import { loadConfig as loadConfig10, createAdapter as createAdapter5 } from "@runcontext/core";
1697
+ import { loadConfig as loadConfig10, createAdapter as createAdapter5, MissingDriverError as MissingDriverError2 } from "@runcontext/core";
1698
+
1699
+ // src/setup/mcp-discovery.ts
1700
+ import * as fs4 from "fs";
1701
+ import * as path13 from "path";
1702
+ import * as os from "os";
1703
+ var NAME_PATTERNS = {
1704
+ duckdb: "duckdb",
1705
+ motherduck: "duckdb",
1706
+ postgres: "postgres",
1707
+ postgresql: "postgres",
1708
+ neon: "postgres",
1709
+ supabase: "postgres",
1710
+ mysql: "mysql",
1711
+ sqlite: "sqlite",
1712
+ snowflake: "snowflake",
1713
+ bigquery: "bigquery",
1714
+ clickhouse: "clickhouse",
1715
+ databricks: "databricks",
1716
+ mssql: "mssql",
1717
+ "sql-server": "mssql",
1718
+ redshift: "postgres"
1719
+ // Redshift is Postgres-compatible
1720
+ };
1721
+ var PACKAGE_PATTERNS = {
1722
+ "@motherduck/mcp": "duckdb",
1723
+ "mcp-server-duckdb": "duckdb",
1724
+ "mcp-server-postgres": "postgres",
1725
+ "mcp-server-postgresql": "postgres",
1726
+ "@neon/mcp": "postgres",
1727
+ "@supabase/mcp": "postgres",
1728
+ "mcp-server-mysql": "mysql",
1729
+ "mcp-server-sqlite": "sqlite",
1730
+ "mcp-server-snowflake": "snowflake",
1731
+ "mcp-server-bigquery": "bigquery",
1732
+ "mcp-server-clickhouse": "clickhouse",
1733
+ "mcp-server-databricks": "databricks",
1734
+ "mcp-server-mssql": "mssql",
1735
+ "mcp-server-redshift": "postgres"
1736
+ };
1737
+ var CONNECTION_ENV_VARS = [
1738
+ "DATABASE_URL",
1739
+ "POSTGRES_URL",
1740
+ "POSTGRESQL_URL",
1741
+ "PG_CONNECTION_STRING",
1742
+ "MYSQL_URL",
1743
+ "MYSQL_CONNECTION_STRING",
1744
+ "DUCKDB_PATH",
1745
+ "SQLITE_PATH",
1746
+ "SNOWFLAKE_ACCOUNT",
1747
+ "BIGQUERY_PROJECT",
1748
+ "CLICKHOUSE_URL",
1749
+ "DATABRICKS_HOST",
1750
+ "MSSQL_CONNECTION_STRING",
1751
+ "REDSHIFT_URL"
1752
+ ];
1753
+ var CONNECTION_FLAGS = [
1754
+ "--db-path",
1755
+ "--database",
1756
+ "--connection-string",
1757
+ "--connection",
1758
+ "--host",
1759
+ "--port",
1760
+ "--db",
1761
+ "--dsn"
1762
+ ];
1763
+ var URI_SCHEMES = {
1764
+ "postgres://": "postgres",
1765
+ "postgresql://": "postgres",
1766
+ "mysql://": "mysql",
1767
+ "clickhouse://": "clickhouse",
1768
+ "mssql://": "mssql",
1769
+ "jdbc:": "postgres"
1770
+ // conservative fallback
1771
+ };
1772
+ function getConfigLocations(cwd) {
1773
+ const home = os.homedir();
1774
+ const isMac = process.platform === "darwin";
1775
+ const locations = [
1776
+ // Claude Code
1777
+ { ide: "claude-code", scope: "user", path: path13.join(home, ".claude.json") },
1778
+ { ide: "claude-code", scope: "project", path: path13.join(cwd, ".mcp.json") },
1779
+ // Cursor
1780
+ { ide: "cursor", scope: "user", path: path13.join(home, ".cursor", "mcp.json") },
1781
+ { ide: "cursor", scope: "project", path: path13.join(cwd, ".cursor", "mcp.json") },
1782
+ // VS Code / Copilot
1783
+ { ide: "vscode", scope: "project", path: path13.join(cwd, ".vscode", "mcp.json") },
1784
+ // Windsurf
1785
+ { ide: "windsurf", scope: "user", path: path13.join(home, ".codeium", "windsurf", "mcp_config.json") }
1786
+ ];
1787
+ if (isMac) {
1788
+ locations.push(
1789
+ {
1790
+ ide: "claude-code",
1791
+ scope: "managed",
1792
+ path: path13.join("/", "Library", "Application Support", "ClaudeCode", "managed-mcp.json")
1793
+ },
1794
+ {
1795
+ ide: "claude-desktop",
1796
+ scope: "user",
1797
+ path: path13.join(home, "Library", "Application Support", "Claude", "claude_desktop_config.json")
1798
+ }
1799
+ );
1800
+ }
1801
+ return locations;
1802
+ }
1803
+ function readJsonSafe(filePath) {
1804
+ try {
1805
+ if (!fs4.existsSync(filePath)) return null;
1806
+ const raw = fs4.readFileSync(filePath, "utf-8");
1807
+ return JSON.parse(raw);
1808
+ } catch {
1809
+ return null;
1810
+ }
1811
+ }
1812
+ function extractServers(json, ide) {
1813
+ if (json === null || typeof json !== "object") return null;
1814
+ const obj = json;
1815
+ if (ide === "vscode") {
1816
+ const servers = obj["servers"];
1817
+ if (servers && typeof servers === "object") {
1818
+ return servers;
1819
+ }
1820
+ return null;
1821
+ }
1822
+ const mcpServers = obj["mcpServers"];
1823
+ if (mcpServers && typeof mcpServers === "object") {
1824
+ return mcpServers;
1825
+ }
1826
+ return null;
1827
+ }
1828
+ function expandEnvValue(value) {
1829
+ return value.replace(/\$\{([^}]+)\}/g, (_match, expr) => {
1830
+ const defaultSep = expr.indexOf(":-");
1831
+ if (defaultSep !== -1) {
1832
+ const varName = expr.slice(0, defaultSep);
1833
+ const defaultVal = expr.slice(defaultSep + 2);
1834
+ return process.env[varName] ?? defaultVal;
1835
+ }
1836
+ return process.env[expr] ?? "";
1837
+ });
1838
+ }
1839
+ function expandEnvMap(env) {
1840
+ if (!env) return void 0;
1841
+ const expanded = {};
1842
+ for (const [key, value] of Object.entries(env)) {
1843
+ expanded[key] = expandEnvValue(String(value));
1844
+ }
1845
+ return expanded;
1846
+ }
1847
+ function detectAdapterType(serverName, entry) {
1848
+ const nameLower = serverName.toLowerCase();
1849
+ for (const [pattern, adapter] of Object.entries(NAME_PATTERNS)) {
1850
+ if (nameLower.includes(pattern)) return adapter;
1851
+ }
1852
+ const allTokens = [entry.command ?? "", ...entry.args ?? []].map(
1853
+ (s) => String(s).toLowerCase()
1854
+ );
1855
+ for (const [pkg, adapter] of Object.entries(PACKAGE_PATTERNS)) {
1856
+ if (allTokens.some((t) => t.includes(pkg.toLowerCase()))) return adapter;
1857
+ }
1858
+ const argsStr = (entry.args ?? []).join(" ").toLowerCase();
1859
+ for (const flag of CONNECTION_FLAGS) {
1860
+ if (argsStr.includes(flag)) {
1861
+ return inferAdapterFromArgs(entry.args ?? []);
1862
+ }
1863
+ }
1864
+ if (entry.env) {
1865
+ for (const envVar of CONNECTION_ENV_VARS) {
1866
+ if (envVar in entry.env) {
1867
+ return inferAdapterFromEnvVar(envVar, entry.env[envVar] ?? "");
1868
+ }
1869
+ }
1870
+ }
1871
+ for (const arg of entry.args ?? []) {
1872
+ for (const [scheme, adapter] of Object.entries(URI_SCHEMES)) {
1873
+ if (String(arg).startsWith(scheme)) return adapter;
1874
+ }
1875
+ }
1876
+ return null;
1877
+ }
1878
+ function inferAdapterFromEnvVar(varName, value) {
1879
+ const upper = varName.toUpperCase();
1880
+ if (upper.includes("POSTGRES") || upper.includes("PG_")) return "postgres";
1881
+ if (upper.includes("MYSQL")) return "mysql";
1882
+ if (upper.includes("DUCKDB")) return "duckdb";
1883
+ if (upper.includes("SQLITE")) return "sqlite";
1884
+ if (upper.includes("SNOWFLAKE")) return "snowflake";
1885
+ if (upper.includes("BIGQUERY")) return "bigquery";
1886
+ if (upper.includes("CLICKHOUSE")) return "clickhouse";
1887
+ if (upper.includes("DATABRICKS")) return "databricks";
1888
+ if (upper.includes("MSSQL")) return "mssql";
1889
+ if (upper.includes("REDSHIFT")) return "postgres";
1890
+ if (upper === "DATABASE_URL") {
1891
+ for (const [scheme, adapter] of Object.entries(URI_SCHEMES)) {
1892
+ if (String(value).startsWith(scheme)) return adapter;
1893
+ }
1894
+ }
1895
+ return "postgres";
1896
+ }
1897
+ function inferAdapterFromArgs(args) {
1898
+ const joined = args.join(" ").toLowerCase();
1899
+ for (const [scheme, adapter] of Object.entries(URI_SCHEMES)) {
1900
+ if (joined.includes(scheme)) return adapter;
1901
+ }
1902
+ if (joined.includes(".duckdb") || joined.includes(".db") || joined.includes("duckdb")) return "duckdb";
1903
+ if (joined.includes(".sqlite") || joined.includes(".sqlite3")) return "sqlite";
1904
+ return null;
1905
+ }
1906
+ function extractConnectionDetails(entry, adapterType) {
1907
+ const args = (entry.args ?? []).map(String);
1908
+ const env = expandEnvMap(entry.env);
1909
+ const command = String(entry.command ?? "");
1910
+ const details = {
1911
+ command,
1912
+ args,
1913
+ env
1914
+ };
1915
+ for (let i = 0; i < args.length; i++) {
1916
+ const arg = args[i];
1917
+ const nextArg = i + 1 < args.length ? args[i + 1] : void 0;
1918
+ if ((arg === "--db-path" || arg === "--database" || arg === "--db") && nextArg) {
1919
+ if (adapterType === "duckdb" || adapterType === "sqlite") {
1920
+ details.path = nextArg;
1921
+ } else {
1922
+ details.database = nextArg;
1923
+ }
1924
+ }
1925
+ if ((arg === "--connection-string" || arg === "--connection" || arg === "--dsn") && nextArg) {
1926
+ details.connection = nextArg;
1927
+ }
1928
+ if (arg === "--host" && nextArg) {
1929
+ details.host = nextArg;
1930
+ }
1931
+ if (arg === "--port" && nextArg) {
1932
+ const port = parseInt(nextArg, 10);
1933
+ if (!isNaN(port)) details.port = port;
1934
+ }
1935
+ }
1936
+ if (!details.connection && !details.path) {
1937
+ for (const arg of args) {
1938
+ for (const scheme of Object.keys(URI_SCHEMES)) {
1939
+ if (arg.startsWith(scheme)) {
1940
+ details.connection = arg;
1941
+ break;
1942
+ }
1943
+ }
1944
+ if (details.connection) break;
1945
+ if ((adapterType === "duckdb" || adapterType === "sqlite") && (arg.endsWith(".duckdb") || arg.endsWith(".db") || arg.endsWith(".sqlite") || arg.endsWith(".sqlite3"))) {
1946
+ details.path = arg;
1947
+ break;
1948
+ }
1949
+ }
1950
+ }
1951
+ if (env) {
1952
+ if (!details.connection) {
1953
+ for (const varName of CONNECTION_ENV_VARS) {
1954
+ if (env[varName]) {
1955
+ const val = env[varName];
1956
+ for (const scheme of Object.keys(URI_SCHEMES)) {
1957
+ if (val.startsWith(scheme)) {
1958
+ details.connection = val;
1959
+ break;
1960
+ }
1961
+ }
1962
+ if (details.connection) break;
1963
+ if ((adapterType === "duckdb" || adapterType === "sqlite") && (varName.includes("PATH") || varName.includes("DATABASE"))) {
1964
+ details.path = val;
1965
+ break;
1966
+ }
1967
+ }
1968
+ }
1969
+ }
1970
+ if (!details.host && env["HOST"]) details.host = env["HOST"];
1971
+ if (!details.host && env["DB_HOST"]) details.host = env["DB_HOST"];
1972
+ if (!details.port && env["PORT"]) {
1973
+ const p11 = parseInt(env["PORT"], 10);
1974
+ if (!isNaN(p11)) details.port = p11;
1975
+ }
1976
+ if (!details.port && env["DB_PORT"]) {
1977
+ const p11 = parseInt(env["DB_PORT"], 10);
1978
+ if (!isNaN(p11)) details.port = p11;
1979
+ }
1980
+ if (!details.database && env["DB_NAME"]) details.database = env["DB_NAME"];
1981
+ }
1982
+ return details;
1983
+ }
1984
+ var IDE_LABELS = {
1985
+ "claude-code": "Claude Code",
1986
+ cursor: "Cursor",
1987
+ vscode: "VS Code",
1988
+ windsurf: "Windsurf",
1989
+ "claude-desktop": "Claude Desktop"
1990
+ };
1991
+ var ADAPTER_LABELS = {
1992
+ duckdb: "DuckDB",
1993
+ postgres: "PostgreSQL",
1994
+ mysql: "MySQL",
1995
+ sqlite: "SQLite",
1996
+ snowflake: "Snowflake",
1997
+ bigquery: "BigQuery",
1998
+ clickhouse: "ClickHouse",
1999
+ databricks: "Databricks",
2000
+ mssql: "SQL Server"
2001
+ };
2002
+ function buildLabel(serverName, adapterType, details, ide) {
2003
+ const adapterLabel = ADAPTER_LABELS[adapterType] ?? adapterType;
2004
+ const ideLabel = IDE_LABELS[ide] ?? ide;
2005
+ let descriptor = "";
2006
+ if (details.path) {
2007
+ descriptor = path13.basename(details.path);
2008
+ } else if (details.database) {
2009
+ descriptor = details.database;
2010
+ } else if (details.connection) {
2011
+ try {
2012
+ const url = new URL(details.connection);
2013
+ const dbName = url.pathname.replace(/^\//, "");
2014
+ if (dbName) descriptor = dbName;
2015
+ } catch {
2016
+ }
2017
+ }
2018
+ if (!descriptor && details.host) {
2019
+ descriptor = details.host;
2020
+ }
2021
+ if (descriptor) {
2022
+ return `${adapterLabel} \u2014 ${descriptor} (from ${ideLabel})`;
2023
+ }
2024
+ return `${adapterLabel} \u2014 ${serverName} (from ${ideLabel})`;
2025
+ }
2026
+ function deduplicationKey(db) {
2027
+ const parts = [db.adapterType];
2028
+ if (db.connectionDetails.path) parts.push(db.connectionDetails.path);
2029
+ if (db.connectionDetails.connection) parts.push(db.connectionDetails.connection);
2030
+ if (db.connectionDetails.host) parts.push(db.connectionDetails.host);
2031
+ if (db.connectionDetails.port) parts.push(String(db.connectionDetails.port));
2032
+ if (db.connectionDetails.database) parts.push(db.connectionDetails.database);
2033
+ if (parts.length === 1) parts.push(db.serverName);
2034
+ return parts.join("|");
2035
+ }
2036
+ function discoverDatabases(cwd) {
2037
+ const results = [];
2038
+ try {
2039
+ const locations = getConfigLocations(cwd);
2040
+ for (const loc of locations) {
2041
+ try {
2042
+ const json = readJsonSafe(loc.path);
2043
+ if (json === null) continue;
2044
+ const servers = extractServers(json, loc.ide);
2045
+ if (!servers) continue;
2046
+ for (const [serverName, entry] of Object.entries(servers)) {
2047
+ try {
2048
+ if (!entry || typeof entry !== "object") continue;
2049
+ const adapterType = detectAdapterType(serverName, entry);
2050
+ if (!adapterType) continue;
2051
+ const connectionDetails = extractConnectionDetails(entry, adapterType);
2052
+ const label = buildLabel(serverName, adapterType, connectionDetails, loc.ide);
2053
+ results.push({
2054
+ ide: loc.ide,
2055
+ scope: loc.scope,
2056
+ serverName,
2057
+ adapterType,
2058
+ connectionDetails,
2059
+ label
2060
+ });
2061
+ } catch {
2062
+ }
2063
+ }
2064
+ } catch {
2065
+ }
2066
+ }
2067
+ } catch {
2068
+ }
2069
+ const scopeOrder = { project: 0, user: 1, managed: 2 };
2070
+ results.sort((a, b) => {
2071
+ const sa = scopeOrder[a.scope] ?? 9;
2072
+ const sb = scopeOrder[b.scope] ?? 9;
2073
+ return sa - sb;
2074
+ });
2075
+ const seen = /* @__PURE__ */ new Set();
2076
+ const deduped = [];
2077
+ for (const db of results) {
2078
+ const key = deduplicationKey(db);
2079
+ if (!seen.has(key)) {
2080
+ seen.add(key);
2081
+ deduped.push(db);
2082
+ }
2083
+ }
2084
+ return deduped;
2085
+ }
2086
+ function toDataSourceConfig(discovered) {
2087
+ const { adapterType, connectionDetails } = discovered;
2088
+ switch (adapterType) {
2089
+ case "duckdb":
2090
+ case "sqlite": {
2091
+ const filePath = connectionDetails.path;
2092
+ if (!filePath) return null;
2093
+ return { adapter: adapterType, path: filePath };
2094
+ }
2095
+ case "postgres":
2096
+ case "mysql":
2097
+ case "mssql": {
2098
+ if (connectionDetails.connection) {
2099
+ return { adapter: adapterType, connection: connectionDetails.connection };
2100
+ }
2101
+ if (connectionDetails.host) {
2102
+ const config = {
2103
+ adapter: adapterType,
2104
+ host: connectionDetails.host
2105
+ };
2106
+ if (connectionDetails.port) config.port = connectionDetails.port;
2107
+ if (connectionDetails.database) config.database = connectionDetails.database;
2108
+ return config;
2109
+ }
2110
+ return null;
2111
+ }
2112
+ case "snowflake": {
2113
+ if (connectionDetails.host) {
2114
+ return {
2115
+ adapter: "snowflake",
2116
+ account: connectionDetails.host,
2117
+ database: connectionDetails.database
2118
+ };
2119
+ }
2120
+ return null;
2121
+ }
2122
+ case "bigquery": {
2123
+ if (connectionDetails.database) {
2124
+ return {
2125
+ adapter: "bigquery",
2126
+ project: connectionDetails.database
2127
+ };
2128
+ }
2129
+ return null;
2130
+ }
2131
+ case "clickhouse": {
2132
+ if (connectionDetails.connection) {
2133
+ return { adapter: "clickhouse", host: connectionDetails.connection };
2134
+ }
2135
+ if (connectionDetails.host) {
2136
+ const config = {
2137
+ adapter: "clickhouse",
2138
+ host: connectionDetails.host
2139
+ };
2140
+ if (connectionDetails.port) config.port = connectionDetails.port;
2141
+ if (connectionDetails.database) config.database = connectionDetails.database;
2142
+ return config;
2143
+ }
2144
+ return null;
2145
+ }
2146
+ case "databricks": {
2147
+ if (connectionDetails.host) {
2148
+ return {
2149
+ adapter: "databricks",
2150
+ serverHostname: connectionDetails.host
2151
+ };
2152
+ }
2153
+ return null;
2154
+ }
2155
+ default:
2156
+ return null;
2157
+ }
2158
+ }
2159
+
2160
+ // src/setup/steps/connect.ts
1588
2161
  function autoDetectDb(cwd) {
1589
2162
  try {
1590
2163
  const config = loadConfig10(cwd);
@@ -1603,26 +2176,37 @@ function autoDetectDb(cwd) {
1603
2176
  } catch {
1604
2177
  }
1605
2178
  }
1606
- if (process.env.DUCKDB_PATH && existsSync3(process.env.DUCKDB_PATH)) {
2179
+ if (process.env.DUCKDB_PATH && existsSync4(process.env.DUCKDB_PATH)) {
1607
2180
  return {
1608
2181
  dsConfig: { adapter: "duckdb", path: process.env.DUCKDB_PATH },
1609
2182
  label: `duckdb \u2014 $DUCKDB_PATH`
1610
2183
  };
1611
2184
  }
1612
- const mcpPath = path13.join(cwd, ".claude", "mcp.json");
1613
- if (existsSync3(mcpPath)) {
2185
+ try {
2186
+ const discovered = discoverDatabases(cwd);
2187
+ if (discovered.length > 0) {
2188
+ const first = discovered[0];
2189
+ const ds = toDataSourceConfig(first);
2190
+ if (ds) {
2191
+ return { dsConfig: ds, label: first.label };
2192
+ }
2193
+ }
2194
+ } catch {
2195
+ }
2196
+ const mcpPath = path14.join(cwd, ".claude", "mcp.json");
2197
+ if (existsSync4(mcpPath)) {
1614
2198
  try {
1615
- const mcpConfig = JSON.parse(readFileSync4(mcpPath, "utf-8"));
2199
+ const mcpConfig = JSON.parse(readFileSync6(mcpPath, "utf-8"));
1616
2200
  const duckdbServer = mcpConfig.mcpServers?.duckdb;
1617
2201
  if (duckdbServer?.args) {
1618
2202
  const args = duckdbServer.args;
1619
2203
  const idx = args.indexOf("--db-path");
1620
2204
  if (idx >= 0 && args[idx + 1]) {
1621
2205
  const dbPath = args[idx + 1];
1622
- if (existsSync3(dbPath)) {
2206
+ if (existsSync4(dbPath)) {
1623
2207
  return {
1624
2208
  dsConfig: { adapter: "duckdb", path: dbPath },
1625
- label: `duckdb \u2014 ${path13.basename(dbPath)} (from .claude/mcp.json)`
2209
+ label: `duckdb \u2014 ${path14.basename(dbPath)} (from .claude/mcp.json)`
1626
2210
  };
1627
2211
  }
1628
2212
  }
@@ -1632,84 +2216,308 @@ function autoDetectDb(cwd) {
1632
2216
  }
1633
2217
  return void 0;
1634
2218
  }
1635
- async function promptForConnection() {
1636
- const connector = await p.select({
1637
- message: "Select your database",
2219
+ function discoverAllDatabases(cwd) {
2220
+ try {
2221
+ return discoverDatabases(cwd).map((d) => {
2222
+ const dsConfig = toDataSourceConfig(d);
2223
+ if (!dsConfig) return null;
2224
+ return { dsConfig, label: d.label };
2225
+ }).filter((d) => d !== null);
2226
+ } catch {
2227
+ return [];
2228
+ }
2229
+ }
2230
+ async function promptForFileDb(adapter, ext, envDefault) {
2231
+ const method = await p2.select({
2232
+ message: "How do you connect?",
1638
2233
  options: [
1639
- { value: "duckdb", label: "DuckDB", hint: "Local .duckdb file" },
1640
- { value: "postgres", label: "PostgreSQL", hint: "Connection string" }
2234
+ { value: "env", label: "Environment variable", hint: `e.g. ${envDefault}` },
2235
+ { value: "path", label: "File path", hint: `e.g. ./warehouse${ext}` }
1641
2236
  ]
1642
2237
  });
1643
- if (p.isCancel(connector)) return void 0;
1644
- if (connector === "duckdb") {
1645
- const method = await p.select({
1646
- message: "How do you connect?",
1647
- options: [
1648
- { value: "env", label: "Environment variable", hint: "e.g. DUCKDB_PATH" },
1649
- { value: "path", label: "File path", hint: "e.g. ./warehouse.duckdb" }
1650
- ]
2238
+ if (p2.isCancel(method)) return void 0;
2239
+ if (method === "env") {
2240
+ const envName = await p2.text({
2241
+ message: "Environment variable name",
2242
+ initialValue: envDefault,
2243
+ validate(value) {
2244
+ if (!value) return "Required";
2245
+ const resolved = process.env[value];
2246
+ if (!resolved) return `$${value} is not set`;
2247
+ if (!existsSync4(resolved)) return `$${value} points to "${resolved}" which does not exist`;
2248
+ }
1651
2249
  });
1652
- if (p.isCancel(method)) return void 0;
1653
- if (method === "env") {
1654
- const envName = await p.text({
1655
- message: "Environment variable name",
1656
- initialValue: "DUCKDB_PATH",
1657
- validate(value) {
1658
- if (!value) return "Required";
1659
- const resolved = process.env[value];
1660
- if (!resolved) return `$${value} is not set`;
1661
- if (!existsSync3(resolved)) return `$${value} points to "${resolved}" which does not exist`;
1662
- }
1663
- });
1664
- if (p.isCancel(envName)) return void 0;
1665
- return { adapter: "duckdb", path: process.env[envName] };
1666
- } else {
1667
- const filePath = await p.text({
1668
- message: "Path to .duckdb file",
1669
- placeholder: "./warehouse.duckdb",
1670
- validate(value) {
1671
- if (!value) return "Required";
1672
- if (!existsSync3(value)) return `File not found: ${value}`;
1673
- }
1674
- });
1675
- if (p.isCancel(filePath)) return void 0;
1676
- return { adapter: "duckdb", path: path13.resolve(filePath) };
1677
- }
2250
+ if (p2.isCancel(envName)) return void 0;
2251
+ return { adapter, path: process.env[envName] };
1678
2252
  } else {
1679
- const method = await p.select({
1680
- message: "How do you connect?",
1681
- options: [
1682
- { value: "env", label: "Environment variable", hint: "e.g. DATABASE_URL" },
1683
- { value: "url", label: "Connection string", hint: "postgres://..." }
1684
- ]
2253
+ const filePath = await p2.text({
2254
+ message: `Path to ${ext} file`,
2255
+ placeholder: `./warehouse${ext}`,
2256
+ validate(value) {
2257
+ if (!value) return "Required";
2258
+ if (!existsSync4(value)) return `File not found: ${value}`;
2259
+ }
1685
2260
  });
1686
- if (p.isCancel(method)) return void 0;
1687
- if (method === "env") {
1688
- const envName = await p.text({
1689
- message: "Environment variable name",
1690
- initialValue: "DATABASE_URL",
1691
- validate(value) {
1692
- if (!value) return "Required";
1693
- const resolved = process.env[value];
1694
- if (!resolved) return `$${value} is not set`;
1695
- }
1696
- });
1697
- if (p.isCancel(envName)) return void 0;
1698
- return { adapter: "postgres", connection: process.env[envName] };
1699
- } else {
1700
- const url = await p.text({
1701
- message: "Connection string",
1702
- placeholder: "postgres://user:pass@host:5432/dbname",
1703
- validate(value) {
1704
- if (!value) return "Required";
1705
- if (!value.startsWith("postgres://") && !value.startsWith("postgresql://")) {
1706
- return "Must start with postgres:// or postgresql://";
1707
- }
2261
+ if (p2.isCancel(filePath)) return void 0;
2262
+ return { adapter, path: path14.resolve(filePath) };
2263
+ }
2264
+ }
2265
+ async function promptForConnectionString(adapter, scheme, envDefault) {
2266
+ const method = await p2.select({
2267
+ message: "How do you connect?",
2268
+ options: [
2269
+ { value: "env", label: "Environment variable", hint: `e.g. ${envDefault}` },
2270
+ { value: "url", label: "Connection string", hint: `${scheme}://...` }
2271
+ ]
2272
+ });
2273
+ if (p2.isCancel(method)) return void 0;
2274
+ if (method === "env") {
2275
+ const envName = await p2.text({
2276
+ message: "Environment variable name",
2277
+ initialValue: envDefault,
2278
+ validate(value) {
2279
+ if (!value) return "Required";
2280
+ const resolved = process.env[value];
2281
+ if (!resolved) return `$${value} is not set`;
2282
+ }
2283
+ });
2284
+ if (p2.isCancel(envName)) return void 0;
2285
+ return { adapter, connection: process.env[envName] };
2286
+ } else {
2287
+ const url = await p2.text({
2288
+ message: "Connection string",
2289
+ placeholder: `${scheme}://user:pass@host:5432/dbname`,
2290
+ validate(value) {
2291
+ if (!value) return "Required";
2292
+ if (!value.startsWith(`${scheme}://`)) {
2293
+ return `Must start with ${scheme}://`;
1708
2294
  }
1709
- });
1710
- if (p.isCancel(url)) return void 0;
1711
- return { adapter: "postgres", connection: url };
2295
+ }
2296
+ });
2297
+ if (p2.isCancel(url)) return void 0;
2298
+ return { adapter, connection: url };
2299
+ }
2300
+ }
2301
+ async function promptForSnowflake() {
2302
+ const account = await p2.text({
2303
+ message: "Snowflake account identifier",
2304
+ placeholder: "xy12345.us-east-1",
2305
+ validate(value) {
2306
+ if (!value) return "Required";
2307
+ }
2308
+ });
2309
+ if (p2.isCancel(account)) return void 0;
2310
+ const username = await p2.text({
2311
+ message: "Username",
2312
+ validate(value) {
2313
+ if (!value) return "Required";
2314
+ }
2315
+ });
2316
+ if (p2.isCancel(username)) return void 0;
2317
+ const password2 = await p2.password({
2318
+ message: "Password",
2319
+ validate(value) {
2320
+ if (!value) return "Required";
2321
+ }
2322
+ });
2323
+ if (p2.isCancel(password2)) return void 0;
2324
+ const warehouse = await p2.text({
2325
+ message: "Warehouse",
2326
+ placeholder: "COMPUTE_WH",
2327
+ validate(value) {
2328
+ if (!value) return "Required";
2329
+ }
2330
+ });
2331
+ if (p2.isCancel(warehouse)) return void 0;
2332
+ const database = await p2.text({
2333
+ message: "Database",
2334
+ validate(value) {
2335
+ if (!value) return "Required";
2336
+ }
2337
+ });
2338
+ if (p2.isCancel(database)) return void 0;
2339
+ const schema = await p2.text({
2340
+ message: "Schema",
2341
+ initialValue: "PUBLIC",
2342
+ validate(value) {
2343
+ if (!value) return "Required";
2344
+ }
2345
+ });
2346
+ if (p2.isCancel(schema)) return void 0;
2347
+ return {
2348
+ adapter: "snowflake",
2349
+ account,
2350
+ username,
2351
+ password: password2,
2352
+ warehouse,
2353
+ database,
2354
+ schema
2355
+ };
2356
+ }
2357
+ async function promptForBigQuery() {
2358
+ const project = await p2.text({
2359
+ message: "Google Cloud project ID",
2360
+ validate(value) {
2361
+ if (!value) return "Required";
2362
+ }
2363
+ });
2364
+ if (p2.isCancel(project)) return void 0;
2365
+ const dataset = await p2.text({
2366
+ message: "Dataset",
2367
+ validate(value) {
2368
+ if (!value) return "Required";
2369
+ }
2370
+ });
2371
+ if (p2.isCancel(dataset)) return void 0;
2372
+ const keyFilename = await p2.text({
2373
+ message: "Path to service account key file (JSON)",
2374
+ placeholder: "./service-account.json",
2375
+ validate(value) {
2376
+ if (!value) return "Required";
2377
+ if (!existsSync4(value)) return `File not found: ${value}`;
2378
+ }
2379
+ });
2380
+ if (p2.isCancel(keyFilename)) return void 0;
2381
+ return {
2382
+ adapter: "bigquery",
2383
+ project,
2384
+ dataset,
2385
+ keyFilename: path14.resolve(keyFilename)
2386
+ };
2387
+ }
2388
+ async function promptForClickHouse() {
2389
+ const host = await p2.text({
2390
+ message: "Host",
2391
+ initialValue: "localhost",
2392
+ validate(value) {
2393
+ if (!value) return "Required";
2394
+ }
2395
+ });
2396
+ if (p2.isCancel(host)) return void 0;
2397
+ const port = await p2.text({
2398
+ message: "HTTP port",
2399
+ initialValue: "8123",
2400
+ validate(value) {
2401
+ if (!value) return "Required";
2402
+ if (!/^\d+$/.test(value)) return "Must be a number";
2403
+ }
2404
+ });
2405
+ if (p2.isCancel(port)) return void 0;
2406
+ const database = await p2.text({
2407
+ message: "Database",
2408
+ initialValue: "default",
2409
+ validate(value) {
2410
+ if (!value) return "Required";
2411
+ }
2412
+ });
2413
+ if (p2.isCancel(database)) return void 0;
2414
+ const username = await p2.text({
2415
+ message: "Username",
2416
+ initialValue: "default",
2417
+ validate(value) {
2418
+ if (!value) return "Required";
2419
+ }
2420
+ });
2421
+ if (p2.isCancel(username)) return void 0;
2422
+ const password2 = await p2.password({
2423
+ message: "Password (leave empty if none)"
2424
+ });
2425
+ if (p2.isCancel(password2)) return void 0;
2426
+ return {
2427
+ adapter: "clickhouse",
2428
+ host,
2429
+ port: parseInt(port, 10),
2430
+ database,
2431
+ username,
2432
+ password: password2 || void 0
2433
+ };
2434
+ }
2435
+ async function promptForDatabricks() {
2436
+ const serverHostname = await p2.text({
2437
+ message: "Server hostname",
2438
+ placeholder: "abc-12345678-wxyz.cloud.databricks.com",
2439
+ validate(value) {
2440
+ if (!value) return "Required";
2441
+ }
2442
+ });
2443
+ if (p2.isCancel(serverHostname)) return void 0;
2444
+ const httpPath = await p2.text({
2445
+ message: "HTTP path",
2446
+ placeholder: "/sql/1.0/warehouses/abcdef1234567890",
2447
+ validate(value) {
2448
+ if (!value) return "Required";
2449
+ }
2450
+ });
2451
+ if (p2.isCancel(httpPath)) return void 0;
2452
+ const token = await p2.password({
2453
+ message: "Personal access token",
2454
+ validate(value) {
2455
+ if (!value) return "Required";
1712
2456
  }
2457
+ });
2458
+ if (p2.isCancel(token)) return void 0;
2459
+ return {
2460
+ adapter: "databricks",
2461
+ serverHostname,
2462
+ httpPath,
2463
+ token
2464
+ };
2465
+ }
2466
+ async function promptForConnection() {
2467
+ const cwd = process.cwd();
2468
+ const discovered = discoverAllDatabases(cwd);
2469
+ const discoveredMap = /* @__PURE__ */ new Map();
2470
+ const options = [];
2471
+ for (let i = 0; i < discovered.length; i++) {
2472
+ const key = `discovered:${i}`;
2473
+ discoveredMap.set(key, discovered[i].dsConfig);
2474
+ options.push({ value: key, label: discovered[i].label });
2475
+ }
2476
+ if (discovered.length > 0) {
2477
+ options.push({ value: "__separator__", label: "\u2500\u2500\u2500 Or connect manually \u2500\u2500\u2500" });
2478
+ }
2479
+ options.push(
2480
+ { value: "duckdb", label: "DuckDB", hint: "Local .duckdb file" },
2481
+ { value: "postgres", label: "PostgreSQL", hint: "Connection string" },
2482
+ { value: "mysql", label: "MySQL / MariaDB", hint: "Connection string" },
2483
+ { value: "mssql", label: "SQL Server", hint: "Connection string" },
2484
+ { value: "snowflake", label: "Snowflake", hint: "Account credentials" },
2485
+ { value: "bigquery", label: "BigQuery", hint: "Google Cloud project" },
2486
+ { value: "clickhouse", label: "ClickHouse", hint: "HTTP connection" },
2487
+ { value: "databricks", label: "Databricks", hint: "Workspace connection" },
2488
+ { value: "sqlite", label: "SQLite", hint: "Local .db file" }
2489
+ );
2490
+ const selection = await p2.select({
2491
+ message: "Select your database",
2492
+ options
2493
+ });
2494
+ if (p2.isCancel(selection)) return void 0;
2495
+ const connector = selection;
2496
+ if (discoveredMap.has(connector)) {
2497
+ return discoveredMap.get(connector);
2498
+ }
2499
+ if (connector === "__separator__") return promptForConnection();
2500
+ switch (connector) {
2501
+ case "duckdb":
2502
+ return promptForFileDb("duckdb", ".duckdb", "DUCKDB_PATH");
2503
+ case "sqlite":
2504
+ return promptForFileDb("sqlite", ".db", "SQLITE_PATH");
2505
+ case "postgres":
2506
+ return promptForConnectionString("postgres", "postgres", "DATABASE_URL");
2507
+ case "mysql":
2508
+ return promptForConnectionString("mysql", "mysql", "MYSQL_URL");
2509
+ case "mssql":
2510
+ return promptForConnectionString("mssql", "mssql", "MSSQL_URL");
2511
+ case "snowflake":
2512
+ return promptForSnowflake();
2513
+ case "bigquery":
2514
+ return promptForBigQuery();
2515
+ case "clickhouse":
2516
+ return promptForClickHouse();
2517
+ case "databricks":
2518
+ return promptForDatabricks();
2519
+ default:
2520
+ return void 0;
1713
2521
  }
1714
2522
  }
1715
2523
  async function runConnectStep() {
@@ -1717,10 +2525,10 @@ async function runConnectStep() {
1717
2525
  let dsConfig;
1718
2526
  const detected = autoDetectDb(cwd);
1719
2527
  if (detected) {
1720
- p.log.info(`Detected: ${detected.label}`);
1721
- const useDetected = await p.confirm({ message: "Use this database?" });
1722
- if (p.isCancel(useDetected)) {
1723
- p.cancel("Setup cancelled.");
2528
+ p2.log.info(`Detected: ${detected.label}`);
2529
+ const useDetected = await p2.confirm({ message: "Use this database?" });
2530
+ if (p2.isCancel(useDetected)) {
2531
+ p2.cancel("Setup cancelled.");
1724
2532
  return void 0;
1725
2533
  }
1726
2534
  if (useDetected) {
@@ -1728,7 +2536,7 @@ async function runConnectStep() {
1728
2536
  } else {
1729
2537
  const manual = await promptForConnection();
1730
2538
  if (!manual) {
1731
- p.cancel("Setup cancelled.");
2539
+ p2.cancel("Setup cancelled.");
1732
2540
  return void 0;
1733
2541
  }
1734
2542
  dsConfig = manual;
@@ -1736,12 +2544,12 @@ async function runConnectStep() {
1736
2544
  } else {
1737
2545
  const manual = await promptForConnection();
1738
2546
  if (!manual) {
1739
- p.cancel("Setup cancelled.");
2547
+ p2.cancel("Setup cancelled.");
1740
2548
  return void 0;
1741
2549
  }
1742
2550
  dsConfig = manual;
1743
2551
  }
1744
- const spin = p.spinner();
2552
+ const spin = p2.spinner();
1745
2553
  spin.start("Connecting to database...");
1746
2554
  let adapter;
1747
2555
  try {
@@ -1749,21 +2557,84 @@ async function runConnectStep() {
1749
2557
  await adapter.connect();
1750
2558
  } catch (err) {
1751
2559
  spin.stop("Connection failed");
1752
- p.log.error(err.message);
1753
- p.cancel("Could not connect to database.");
1754
- return void 0;
2560
+ if (err instanceof MissingDriverError2) {
2561
+ p2.log.warn(`The ${err.adapter} adapter requires the "${err.driverPackage}" npm package.`);
2562
+ const shouldInstall = await p2.confirm({
2563
+ message: `Install "${err.driverPackage}" now?`
2564
+ });
2565
+ if (!p2.isCancel(shouldInstall) && shouldInstall) {
2566
+ const installSpin = p2.spinner();
2567
+ installSpin.start(`Installing ${err.driverPackage}...`);
2568
+ try {
2569
+ execFileSync("npm", ["install", err.driverPackage], {
2570
+ stdio: "pipe",
2571
+ cwd: process.cwd()
2572
+ });
2573
+ installSpin.stop(`Installed ${err.driverPackage}`);
2574
+ spin.start("Retrying connection...");
2575
+ try {
2576
+ adapter = await createAdapter5(dsConfig);
2577
+ await adapter.connect();
2578
+ spin.stop("Connected");
2579
+ } catch (retryErr) {
2580
+ spin.stop("Connection failed");
2581
+ p2.log.error(retryErr.message);
2582
+ p2.cancel("Could not connect to database.");
2583
+ return void 0;
2584
+ }
2585
+ } catch {
2586
+ installSpin.stop("Installation failed");
2587
+ p2.log.error(`Could not install ${err.driverPackage}. Try manually:
2588
+ npm install ${err.driverPackage}`);
2589
+ p2.cancel("Could not connect to database.");
2590
+ return void 0;
2591
+ }
2592
+ } else {
2593
+ p2.log.info(`Install it manually with:
2594
+ npm install ${err.driverPackage}`);
2595
+ p2.cancel("Could not connect to database.");
2596
+ return void 0;
2597
+ }
2598
+ } else {
2599
+ p2.log.error(err.message);
2600
+ p2.cancel("Could not connect to database.");
2601
+ return void 0;
2602
+ }
1755
2603
  }
1756
- const tables = await adapter.listTables();
2604
+ const allTables = await adapter.listTables();
2605
+ spin.stop(`Found ${allTables.length} tables`);
2606
+ let tables = allTables;
2607
+ if (allTables.length > 1) {
2608
+ const tableSelection = await p2.multiselect({
2609
+ message: `Select tables to include (${allTables.length} found)`,
2610
+ options: allTables.map((t) => ({
2611
+ value: t.name,
2612
+ label: t.name,
2613
+ hint: `${t.row_count.toLocaleString()} rows`
2614
+ })),
2615
+ initialValues: allTables.map((t) => t.name),
2616
+ required: true
2617
+ });
2618
+ if (p2.isCancel(tableSelection)) {
2619
+ p2.cancel("Setup cancelled.");
2620
+ await adapter.disconnect();
2621
+ return void 0;
2622
+ }
2623
+ const selected = new Set(tableSelection);
2624
+ tables = allTables.filter((t) => selected.has(t.name));
2625
+ }
2626
+ const colSpin = p2.spinner();
2627
+ colSpin.start(`Introspecting ${tables.length} tables...`);
1757
2628
  const columns = {};
1758
2629
  for (const table of tables) {
1759
2630
  columns[table.name] = await adapter.listColumns(table.name);
1760
2631
  }
1761
2632
  const totalCols = Object.values(columns).reduce((sum, c) => sum + c.length, 0);
1762
- spin.stop(`Found ${tables.length} tables, ${totalCols} columns`);
2633
+ colSpin.stop(`${tables.length} tables, ${totalCols} columns`);
1763
2634
  const tableLines = tables.map((t) => ` ${t.name.padEnd(30)} ${t.row_count.toLocaleString()} rows`).join("\n");
1764
- p.note(tableLines, "Discovered Tables");
1765
- const defaultModel = path13.basename(cwd).replace(/[^a-z0-9-]/gi, "-").toLowerCase();
1766
- const modelInput = await p.text({
2635
+ p2.note(tableLines, "Selected Tables");
2636
+ const defaultModel = path14.basename(cwd).replace(/[^a-z0-9-]/gi, "-").toLowerCase();
2637
+ const modelInput = await p2.text({
1767
2638
  message: "Model name",
1768
2639
  initialValue: defaultModel,
1769
2640
  validate(value) {
@@ -1771,12 +2642,12 @@ async function runConnectStep() {
1771
2642
  if (!/^[a-z0-9-]+$/.test(value)) return "Use lowercase letters, numbers, and hyphens only";
1772
2643
  }
1773
2644
  });
1774
- if (p.isCancel(modelInput)) {
1775
- p.cancel("Setup cancelled.");
2645
+ if (p2.isCancel(modelInput)) {
2646
+ p2.cancel("Setup cancelled.");
1776
2647
  await adapter.disconnect();
1777
2648
  return void 0;
1778
2649
  }
1779
- const tierInput = await p.select({
2650
+ const tierInput = await p2.select({
1780
2651
  message: "Target metadata tier",
1781
2652
  options: [
1782
2653
  { value: "bronze", label: "Bronze", hint: "Schema + ownership + grain" },
@@ -1784,12 +2655,40 @@ async function runConnectStep() {
1784
2655
  { value: "gold", label: "Gold", hint: "+ semantic roles, rules, golden queries (needs curation)" }
1785
2656
  ]
1786
2657
  });
1787
- if (p.isCancel(tierInput)) {
1788
- p.cancel("Setup cancelled.");
2658
+ if (p2.isCancel(tierInput)) {
2659
+ p2.cancel("Setup cancelled.");
1789
2660
  await adapter.disconnect();
1790
2661
  return void 0;
1791
2662
  }
1792
- const configPath = path13.join(cwd, "contextkit.config.yaml");
2663
+ let intent;
2664
+ const wantsIntent = await p2.confirm({
2665
+ message: "Describe what you're building? (helps AI agents curate better metadata)"
2666
+ });
2667
+ if (!p2.isCancel(wantsIntent) && wantsIntent) {
2668
+ const goalsInput = await p2.text({
2669
+ message: "What are you trying to accomplish with this data?",
2670
+ placeholder: "e.g., Analyze coffee shop site selection using demographic and market signals"
2671
+ });
2672
+ if (p2.isCancel(goalsInput)) {
2673
+ p2.cancel("Setup cancelled.");
2674
+ await adapter.disconnect();
2675
+ return void 0;
2676
+ }
2677
+ const metricsInput = await p2.text({
2678
+ message: "What metrics or outcomes matter most? (optional)",
2679
+ placeholder: "e.g., opportunity score, supply saturation, demand signals"
2680
+ });
2681
+ const audienceInput = await p2.text({
2682
+ message: "Who will consume this data? (optional)",
2683
+ placeholder: "e.g., AI agents writing SQL, analysts building dashboards"
2684
+ });
2685
+ intent = {
2686
+ goals: goalsInput,
2687
+ metrics: p2.isCancel(metricsInput) ? void 0 : metricsInput || void 0,
2688
+ audience: p2.isCancel(audienceInput) ? void 0 : audienceInput || void 0
2689
+ };
2690
+ }
2691
+ const configPath = path14.join(cwd, "contextkit.config.yaml");
1793
2692
  let config;
1794
2693
  try {
1795
2694
  config = loadConfig10(cwd);
@@ -1804,7 +2703,7 @@ async function runConnectStep() {
1804
2703
  writeFileSync5(configPath, yaml2.stringify(newConfig, { lineWidth: 120 }), "utf-8");
1805
2704
  config = loadConfig10(cwd);
1806
2705
  }
1807
- const contextDir = path13.resolve(cwd, config.context_dir ?? "./context");
2706
+ const contextDir = path14.resolve(cwd, config.context_dir ?? "./context");
1808
2707
  return {
1809
2708
  cwd,
1810
2709
  contextDir,
@@ -1813,31 +2712,32 @@ async function runConnectStep() {
1813
2712
  tables,
1814
2713
  columns,
1815
2714
  modelName: modelInput,
1816
- targetTier: tierInput
2715
+ targetTier: tierInput,
2716
+ intent
1817
2717
  };
1818
2718
  }
1819
2719
 
1820
2720
  // src/setup/steps/scaffold.ts
1821
- import * as p3 from "@clack/prompts";
1822
- import path14 from "path";
1823
- import { mkdirSync as mkdirSync3, writeFileSync as writeFileSync6, existsSync as existsSync4 } from "fs";
2721
+ import * as p4 from "@clack/prompts";
2722
+ import path15 from "path";
2723
+ import { mkdirSync as mkdirSync3, writeFileSync as writeFileSync6, existsSync as existsSync5 } from "fs";
1824
2724
  import { scaffoldFromSchema as scaffoldFromSchema2, compile as compile10, computeTier as computeTier3, loadConfig as loadConfig11 } from "@runcontext/core";
1825
2725
 
1826
2726
  // src/setup/display.ts
1827
- import * as p2 from "@clack/prompts";
2727
+ import * as p3 from "@clack/prompts";
1828
2728
  function displayTierScore(score) {
1829
- p2.note(formatTierScore(score), "Tier Scorecard");
2729
+ p3.note(formatTierScore(score), "Tier Scorecard");
1830
2730
  }
1831
2731
 
1832
2732
  // src/setup/steps/scaffold.ts
1833
2733
  async function runScaffoldStep(ctx) {
1834
- const shouldRun = await p3.confirm({
2734
+ const shouldRun = await p4.confirm({
1835
2735
  message: "Scaffold Bronze metadata from database schema?"
1836
2736
  });
1837
- if (p3.isCancel(shouldRun) || !shouldRun) {
2737
+ if (p4.isCancel(shouldRun) || !shouldRun) {
1838
2738
  return { skipped: true, summary: "Skipped" };
1839
2739
  }
1840
- const spin = p3.spinner();
2740
+ const spin = p4.spinner();
1841
2741
  spin.start("Scaffolding Bronze metadata...");
1842
2742
  const result = scaffoldFromSchema2({
1843
2743
  modelName: ctx.modelName,
@@ -1845,18 +2745,40 @@ async function runScaffoldStep(ctx) {
1845
2745
  tables: ctx.tables,
1846
2746
  columns: ctx.columns
1847
2747
  });
1848
- for (const dir of ["models", "governance", "owners"]) {
1849
- const dirPath = path14.join(ctx.contextDir, dir);
1850
- if (!existsSync4(dirPath)) mkdirSync3(dirPath, { recursive: true });
2748
+ for (const dir of ["models", "governance", "owners", "reference"]) {
2749
+ const dirPath = path15.join(ctx.contextDir, dir);
2750
+ if (!existsSync5(dirPath)) mkdirSync3(dirPath, { recursive: true });
2751
+ }
2752
+ const refReadme = path15.join(ctx.contextDir, "reference", "README.md");
2753
+ if (!existsSync5(refReadme)) {
2754
+ writeFileSync6(refReadme, `# Reference Documents
2755
+
2756
+ Drop files here that help describe your data \u2014 the AI agent will read them when curating metadata.
2757
+
2758
+ Examples of useful reference documents:
2759
+ - Data dictionaries (CSV, Excel, PDF)
2760
+ - Confluence or wiki exports
2761
+ - ERD diagrams or schema docs
2762
+ - Business glossaries from your organization
2763
+ - Dashboard screenshots or descriptions
2764
+ - Data pipeline documentation
2765
+ - Slack/email threads explaining metric definitions
2766
+
2767
+ The agent will use these as context when writing descriptions, defining metrics,
2768
+ creating glossary terms, and building business context. The more context you
2769
+ provide, the better the metadata quality.
2770
+
2771
+ Supported formats: .md, .txt, .csv, .json, .yaml, .pdf
2772
+ `, "utf-8");
1851
2773
  }
1852
2774
  const created = [];
1853
2775
  const files = [
1854
- { rel: path14.join("models", result.files.osi), content: result.osiYaml },
1855
- { rel: path14.join("governance", result.files.governance), content: result.governanceYaml },
1856
- { rel: path14.join("owners", result.files.owner), content: result.ownerYaml }
2776
+ { rel: path15.join("models", result.files.osi), content: result.osiYaml },
2777
+ { rel: path15.join("governance", result.files.governance), content: result.governanceYaml },
2778
+ { rel: path15.join("owners", result.files.owner), content: result.ownerYaml }
1857
2779
  ];
1858
2780
  for (const f of files) {
1859
- const fullPath = path14.join(ctx.contextDir, f.rel);
2781
+ const fullPath = path15.join(ctx.contextDir, f.rel);
1860
2782
  writeFileSync6(fullPath, f.content, "utf-8");
1861
2783
  created.push(f.rel);
1862
2784
  }
@@ -1866,15 +2788,15 @@ async function runScaffoldStep(ctx) {
1866
2788
  ctx.tierScore = computeTier3(ctx.modelName, graph);
1867
2789
  spin.stop(`Created ${created.length} files`);
1868
2790
  const fileList = created.map((f) => ` ${f}`).join("\n");
1869
- p3.note(fileList, "Files Created");
2791
+ p4.note(fileList, "Files Created");
1870
2792
  displayTierScore(ctx.tierScore);
1871
2793
  return { skipped: false, summary: `${created.length} files \u2192 ${ctx.tierScore.tier.toUpperCase()}` };
1872
2794
  }
1873
2795
 
1874
2796
  // src/setup/steps/enrich-silver.ts
1875
- import * as p4 from "@clack/prompts";
1876
- import path15 from "path";
1877
- import { readFileSync as readFileSync5, writeFileSync as writeFileSync7, mkdirSync as mkdirSync4, existsSync as existsSync5, readdirSync as readdirSync2 } from "fs";
2797
+ import * as p5 from "@clack/prompts";
2798
+ import path16 from "path";
2799
+ import { readFileSync as readFileSync7, writeFileSync as writeFileSync7, mkdirSync as mkdirSync4, existsSync as existsSync6, readdirSync as readdirSync2 } from "fs";
1878
2800
  import * as yaml3 from "yaml";
1879
2801
  import {
1880
2802
  compile as compile11,
@@ -1883,10 +2805,10 @@ import {
1883
2805
  loadConfig as loadConfig12
1884
2806
  } from "@runcontext/core";
1885
2807
  function findFileRecursive2(dir, suffix) {
1886
- if (!existsSync5(dir)) return void 0;
2808
+ if (!existsSync6(dir)) return void 0;
1887
2809
  const entries = readdirSync2(dir, { withFileTypes: true });
1888
2810
  for (const entry of entries) {
1889
- const fullPath = path15.join(dir, entry.name);
2811
+ const fullPath = path16.join(dir, entry.name);
1890
2812
  if (entry.isDirectory()) {
1891
2813
  const found = findFileRecursive2(fullPath, suffix);
1892
2814
  if (found) return found;
@@ -1902,13 +2824,13 @@ async function runEnrichSilverStep(ctx) {
1902
2824
  ctx.graph = graph;
1903
2825
  const tierScore = computeTier4(ctx.modelName, graph);
1904
2826
  if (tierScore.silver.passed) {
1905
- p4.log.success("Already at Silver or above \u2014 skipping.");
2827
+ p5.log.success("Already at Silver or above \u2014 skipping.");
1906
2828
  ctx.tierScore = tierScore;
1907
2829
  return { skipped: true, summary: "Already Silver" };
1908
2830
  }
1909
2831
  const model = graph.models.get(ctx.modelName);
1910
2832
  if (!model) {
1911
- p4.log.error(`Model "${ctx.modelName}" not found in graph.`);
2833
+ p5.log.error(`Model "${ctx.modelName}" not found in graph.`);
1912
2834
  return { skipped: true, summary: "Model not found" };
1913
2835
  }
1914
2836
  const datasetNames = model.datasets.map((d) => d.name);
@@ -1921,19 +2843,19 @@ async function runEnrichSilverStep(ctx) {
1921
2843
  if (suggestions.glossaryTerms) preview.push(`+ ${suggestions.glossaryTerms.length} glossary term(s)`);
1922
2844
  if (suggestions.needsSampleValues) preview.push("+ sample_values from live data");
1923
2845
  if (preview.length > 0) {
1924
- p4.note(preview.join("\n"), "Silver Enrichments");
2846
+ p5.note(preview.join("\n"), "Silver Enrichments");
1925
2847
  }
1926
- const shouldRun = await p4.confirm({
2848
+ const shouldRun = await p5.confirm({
1927
2849
  message: "Apply Silver enrichments?"
1928
2850
  });
1929
- if (p4.isCancel(shouldRun) || !shouldRun) {
2851
+ if (p5.isCancel(shouldRun) || !shouldRun) {
1930
2852
  return { skipped: true, summary: "Skipped" };
1931
2853
  }
1932
- const spin = p4.spinner();
2854
+ const spin = p5.spinner();
1933
2855
  spin.start("Enriching to Silver...");
1934
2856
  const govFilePath = findFileRecursive2(ctx.contextDir, `${ctx.modelName}.governance.yaml`);
1935
2857
  if (govFilePath) {
1936
- const govContent = readFileSync5(govFilePath, "utf-8");
2858
+ const govContent = readFileSync7(govFilePath, "utf-8");
1937
2859
  const govDoc = yaml3.parse(govContent) ?? {};
1938
2860
  if (suggestions.governance?.trust) govDoc.trust = suggestions.governance.trust;
1939
2861
  if (suggestions.governance?.tags) govDoc.tags = suggestions.governance.tags;
@@ -1972,20 +2894,20 @@ async function runEnrichSilverStep(ctx) {
1972
2894
  writeFileSync7(govFilePath, yaml3.stringify(govDoc, { lineWidth: 120 }), "utf-8");
1973
2895
  }
1974
2896
  if (suggestions.lineage) {
1975
- const lineageDir = path15.join(ctx.contextDir, "lineage");
1976
- if (!existsSync5(lineageDir)) mkdirSync4(lineageDir, { recursive: true });
1977
- const lineagePath = path15.join(lineageDir, `${ctx.modelName}.lineage.yaml`);
1978
- if (!existsSync5(lineagePath)) {
2897
+ const lineageDir = path16.join(ctx.contextDir, "lineage");
2898
+ if (!existsSync6(lineageDir)) mkdirSync4(lineageDir, { recursive: true });
2899
+ const lineagePath = path16.join(lineageDir, `${ctx.modelName}.lineage.yaml`);
2900
+ if (!existsSync6(lineagePath)) {
1979
2901
  const lineageDoc = { model: ctx.modelName, upstream: suggestions.lineage.upstream };
1980
2902
  writeFileSync7(lineagePath, yaml3.stringify(lineageDoc, { lineWidth: 120 }), "utf-8");
1981
2903
  }
1982
2904
  }
1983
2905
  if (suggestions.glossaryTerms) {
1984
- const glossaryDir = path15.join(ctx.contextDir, "glossary");
1985
- if (!existsSync5(glossaryDir)) mkdirSync4(glossaryDir, { recursive: true });
2906
+ const glossaryDir = path16.join(ctx.contextDir, "glossary");
2907
+ if (!existsSync6(glossaryDir)) mkdirSync4(glossaryDir, { recursive: true });
1986
2908
  for (const term of suggestions.glossaryTerms) {
1987
- const termPath = path15.join(glossaryDir, `${term.id}.term.yaml`);
1988
- if (!existsSync5(termPath)) {
2909
+ const termPath = path16.join(glossaryDir, `${term.id}.term.yaml`);
2910
+ if (!existsSync6(termPath)) {
1989
2911
  writeFileSync7(termPath, yaml3.stringify(term, { lineWidth: 120 }), "utf-8");
1990
2912
  }
1991
2913
  }
@@ -1999,9 +2921,9 @@ async function runEnrichSilverStep(ctx) {
1999
2921
  }
2000
2922
 
2001
2923
  // src/setup/steps/enrich-gold.ts
2002
- import * as p5 from "@clack/prompts";
2003
- import path16 from "path";
2004
- import { readFileSync as readFileSync6, writeFileSync as writeFileSync8, mkdirSync as mkdirSync5, existsSync as existsSync6, readdirSync as readdirSync3 } from "fs";
2924
+ import * as p6 from "@clack/prompts";
2925
+ import path17 from "path";
2926
+ import { readFileSync as readFileSync8, writeFileSync as writeFileSync8, mkdirSync as mkdirSync5, existsSync as existsSync7, readdirSync as readdirSync3 } from "fs";
2005
2927
  import * as yaml4 from "yaml";
2006
2928
  import {
2007
2929
  compile as compile12,
@@ -2012,10 +2934,10 @@ import {
2012
2934
  loadConfig as loadConfig13
2013
2935
  } from "@runcontext/core";
2014
2936
  function findFileRecursive3(dir, suffix) {
2015
- if (!existsSync6(dir)) return void 0;
2937
+ if (!existsSync7(dir)) return void 0;
2016
2938
  const entries = readdirSync3(dir, { withFileTypes: true });
2017
2939
  for (const entry of entries) {
2018
- const fullPath = path16.join(dir, entry.name);
2940
+ const fullPath = path17.join(dir, entry.name);
2019
2941
  if (entry.isDirectory()) {
2020
2942
  const found = findFileRecursive3(fullPath, suffix);
2021
2943
  if (found) return found;
@@ -2031,13 +2953,13 @@ async function runEnrichGoldStep(ctx) {
2031
2953
  ctx.graph = graph;
2032
2954
  const tierScore = computeTier5(ctx.modelName, graph);
2033
2955
  if (tierScore.gold.passed) {
2034
- p5.log.success("Already at Gold \u2014 skipping.");
2956
+ p6.log.success("Already at Gold \u2014 skipping.");
2035
2957
  ctx.tierScore = tierScore;
2036
2958
  return { skipped: true, summary: "Already Gold" };
2037
2959
  }
2038
2960
  const model = graph.models.get(ctx.modelName);
2039
2961
  if (!model) {
2040
- p5.log.error(`Model "${ctx.modelName}" not found.`);
2962
+ p6.log.error(`Model "${ctx.modelName}" not found.`);
2041
2963
  return { skipped: true, summary: "Model not found" };
2042
2964
  }
2043
2965
  const datasetNames = model.datasets.map((d) => d.name);
@@ -2050,20 +2972,20 @@ async function runEnrichGoldStep(ctx) {
2050
2972
  preview.push("+ Add ai_context placeholder to model");
2051
2973
  preview.push("+ Infer relationships from column name patterns");
2052
2974
  if (preview.length > 0) {
2053
- p5.note(preview.join("\n"), "Gold Enrichments");
2975
+ p6.note(preview.join("\n"), "Gold Enrichments");
2054
2976
  }
2055
- p5.log.warning("Gold enrichments create TODO placeholders that need manual curation.");
2056
- const shouldRun = await p5.confirm({
2977
+ p6.log.warning("Gold enrichments create TODO placeholders that need manual curation.");
2978
+ const shouldRun = await p6.confirm({
2057
2979
  message: "Apply Gold enrichments?"
2058
2980
  });
2059
- if (p5.isCancel(shouldRun) || !shouldRun) {
2981
+ if (p6.isCancel(shouldRun) || !shouldRun) {
2060
2982
  return { skipped: true, summary: "Skipped" };
2061
2983
  }
2062
- const spin = p5.spinner();
2984
+ const spin = p6.spinner();
2063
2985
  spin.start("Enriching to Gold...");
2064
2986
  const govFilePath = findFileRecursive3(ctx.contextDir, `${ctx.modelName}.governance.yaml`);
2065
2987
  if (govFilePath) {
2066
- const govContent = readFileSync6(govFilePath, "utf-8");
2988
+ const govContent = readFileSync8(govFilePath, "utf-8");
2067
2989
  const govDoc = yaml4.parse(govContent) ?? {};
2068
2990
  if (suggestions.governance?.trust) govDoc.trust = suggestions.governance.trust;
2069
2991
  if (suggestions.needsSemanticRoles) {
@@ -2103,7 +3025,7 @@ async function runEnrichGoldStep(ctx) {
2103
3025
  }
2104
3026
  const modelFilePath = findFileRecursive3(ctx.contextDir, `${ctx.modelName}.osi.yaml`);
2105
3027
  if (modelFilePath) {
2106
- const modelContent = readFileSync6(modelFilePath, "utf-8");
3028
+ const modelContent = readFileSync8(modelFilePath, "utf-8");
2107
3029
  const modelDoc = yaml4.parse(modelContent) ?? {};
2108
3030
  const semModels = modelDoc.semantic_model ?? [];
2109
3031
  let changed = false;
@@ -2156,10 +3078,10 @@ async function runEnrichGoldStep(ctx) {
2156
3078
  }
2157
3079
  }
2158
3080
  if (suggestions.needsRulesFile) {
2159
- const rulesDir = path16.join(ctx.contextDir, "rules");
2160
- if (!existsSync6(rulesDir)) mkdirSync5(rulesDir, { recursive: true });
2161
- const rulesPath = path16.join(rulesDir, `${ctx.modelName}.rules.yaml`);
2162
- if (!existsSync6(rulesPath)) {
3081
+ const rulesDir = path17.join(ctx.contextDir, "rules");
3082
+ if (!existsSync7(rulesDir)) mkdirSync5(rulesDir, { recursive: true });
3083
+ const rulesPath = path17.join(rulesDir, `${ctx.modelName}.rules.yaml`);
3084
+ if (!existsSync7(rulesPath)) {
2163
3085
  const rulesDoc = {
2164
3086
  model: ctx.modelName,
2165
3087
  golden_queries: [
@@ -2185,13 +3107,13 @@ async function runEnrichGoldStep(ctx) {
2185
3107
  ctx.tierScore = computeTier5(ctx.modelName, newGraph);
2186
3108
  spin.stop("Applied Gold enrichments");
2187
3109
  const todos = suggestions.needsRulesFile ? "\nThe rules file contains TODO placeholders \u2014 edit context/rules/ to complete Gold." : "";
2188
- if (todos) p5.log.warning(todos);
3110
+ if (todos) p6.log.warning(todos);
2189
3111
  displayTierScore(ctx.tierScore);
2190
3112
  return { skipped: false, summary: `${ctx.tierScore.tier.toUpperCase()} (may need curation)` };
2191
3113
  }
2192
3114
 
2193
3115
  // src/setup/steps/verify.ts
2194
- import * as p6 from "@clack/prompts";
3116
+ import * as p7 from "@clack/prompts";
2195
3117
  import {
2196
3118
  compile as compile13,
2197
3119
  LintEngine as LintEngine5,
@@ -2200,13 +3122,13 @@ import {
2200
3122
  loadConfig as loadConfig14
2201
3123
  } from "@runcontext/core";
2202
3124
  async function runVerifyStep(ctx) {
2203
- const shouldRun = await p6.confirm({
3125
+ const shouldRun = await p7.confirm({
2204
3126
  message: "Verify metadata against live data?"
2205
3127
  });
2206
- if (p6.isCancel(shouldRun) || !shouldRun) {
3128
+ if (p7.isCancel(shouldRun) || !shouldRun) {
2207
3129
  return { skipped: true, summary: "Skipped" };
2208
3130
  }
2209
- const spin = p6.spinner();
3131
+ const spin = p7.spinner();
2210
3132
  spin.start("Verifying against database...");
2211
3133
  const config = loadConfig14(ctx.cwd);
2212
3134
  const { graph } = await compile13({ contextDir: ctx.contextDir, config, rootDir: ctx.cwd });
@@ -2225,7 +3147,7 @@ async function runVerifyStep(ctx) {
2225
3147
  } else {
2226
3148
  spin.stop(`${errors} error(s), ${warnings} warning(s)`);
2227
3149
  const details = dataDiags.map((d) => ` ${d.severity === "error" ? "x" : "!"} ${d.message}`).join("\n");
2228
- p6.note(details, "Data Validation Issues");
3150
+ p7.note(details, "Data Validation Issues");
2229
3151
  }
2230
3152
  return {
2231
3153
  skipped: false,
@@ -2234,8 +3156,8 @@ async function runVerifyStep(ctx) {
2234
3156
  }
2235
3157
 
2236
3158
  // src/setup/steps/autofix.ts
2237
- import * as p7 from "@clack/prompts";
2238
- import fs4 from "fs";
3159
+ import * as p8 from "@clack/prompts";
3160
+ import fs5 from "fs";
2239
3161
  import {
2240
3162
  compile as compile14,
2241
3163
  LintEngine as LintEngine6,
@@ -2253,23 +3175,23 @@ async function runAutofixStep(ctx) {
2253
3175
  const diagnostics = engine.run(graph);
2254
3176
  const fixable = diagnostics.filter((d) => d.fixable);
2255
3177
  if (fixable.length === 0) {
2256
- p7.log.success("No fixable issues found.");
3178
+ p8.log.success("No fixable issues found.");
2257
3179
  ctx.graph = graph;
2258
3180
  ctx.tierScore = computeTier7(ctx.modelName, graph);
2259
3181
  return { skipped: true, summary: "Nothing to fix" };
2260
3182
  }
2261
- const shouldRun = await p7.confirm({
3183
+ const shouldRun = await p8.confirm({
2262
3184
  message: `Auto-fix ${fixable.length} issue(s)?`
2263
3185
  });
2264
- if (p7.isCancel(shouldRun) || !shouldRun) {
3186
+ if (p8.isCancel(shouldRun) || !shouldRun) {
2265
3187
  return { skipped: true, summary: "Skipped" };
2266
3188
  }
2267
- const spin = p7.spinner();
3189
+ const spin = p8.spinner();
2268
3190
  spin.start("Fixing...");
2269
- const readFile = (filePath) => fs4.readFileSync(filePath, "utf-8");
3191
+ const readFile = (filePath) => fs5.readFileSync(filePath, "utf-8");
2270
3192
  const fixedFiles = applyFixes4(fixable, readFile);
2271
3193
  for (const [file, content] of fixedFiles) {
2272
- fs4.writeFileSync(file, content, "utf-8");
3194
+ fs5.writeFileSync(file, content, "utf-8");
2273
3195
  }
2274
3196
  const { graph: newGraph } = await compile14({ contextDir: ctx.contextDir, config, rootDir: ctx.cwd });
2275
3197
  ctx.graph = newGraph;
@@ -2280,12 +3202,14 @@ async function runAutofixStep(ctx) {
2280
3202
  }
2281
3203
 
2282
3204
  // src/setup/steps/claude-md.ts
2283
- import * as p8 from "@clack/prompts";
2284
- import path17 from "path";
2285
- import { existsSync as existsSync7, writeFileSync as writeFileSync9 } from "fs";
2286
- function buildClaudeMd(ctx) {
3205
+ import * as p9 from "@clack/prompts";
3206
+ import path18 from "path";
3207
+ import { existsSync as existsSync8, writeFileSync as writeFileSync9 } from "fs";
3208
+ function buildAgentInstructions(ctx) {
2287
3209
  const modelName = ctx.modelName;
2288
3210
  const tier = ctx.tierScore?.tier?.toUpperCase() ?? "UNKNOWN";
3211
+ const tableList = ctx.tables.map((t) => t.name).join(", ");
3212
+ const dbInfo = ctx.dsConfig.path ?? ctx.dsConfig.connection ?? "configured";
2289
3213
  const model = ctx.graph?.models.get(modelName);
2290
3214
  const datasets = model?.datasets ?? [];
2291
3215
  const datasetList = datasets.map((ds) => `- \`${ds.name}\` \u2014 ${ds.fields?.length ?? 0} fields`).join("\n");
@@ -2304,13 +3228,24 @@ function buildClaudeMd(ctx) {
2304
3228
  const failingSection = failingChecks.length > 0 ? `### Failing Checks
2305
3229
 
2306
3230
  ${failingChecks.join("\n")}` : "All checks passing.";
2307
- return `# ContextKit Agent Instructions
3231
+ const intentSection = ctx.intent ? `## Project Goals
3232
+
3233
+ ${ctx.intent.goals}
3234
+ ${ctx.intent.metrics ? `
3235
+ **Key metrics/outcomes:** ${ctx.intent.metrics}` : ""}
3236
+ ${ctx.intent.audience ? `
3237
+ **Audience:** ${ctx.intent.audience}` : ""}
2308
3238
 
2309
- You have two MCP servers: **duckdb** (query data) and **contextkit** (query metadata).
3239
+ Use these goals to prioritize which datasets, fields, and metrics to curate first.
3240
+
3241
+ ` : "";
3242
+ return `# ContextKit Agent Instructions
2310
3243
 
2311
3244
  Model: **${modelName}** | Current Tier: **${tier}**
3245
+ Database: ${ctx.dsConfig.adapter} (${dbInfo})
3246
+ Tables: ${tableList}
2312
3247
 
2313
- ## The Cardinal Rule: Never Fabricate Metadata
3248
+ ${intentSection}## The Cardinal Rule: Never Fabricate Metadata
2314
3249
 
2315
3250
  **Every piece of metadata you write must be grounded in evidence from the actual data.**
2316
3251
 
@@ -2322,24 +3257,102 @@ Model: **${modelName}** | Current Tier: **${tier}**
2322
3257
  - NEVER write a business_context narrative you can't justify from the data
2323
3258
  - NEVER create a glossary definition that is just "Definition for X"
2324
3259
 
2325
- If you don't know something, say so. Leave it as a TODO with a note about what you'd need to determine the answer. A honest TODO is infinitely better than fabricated metadata that looks plausible but is wrong.
3260
+ If you don't know something, **ask the user**. A honest "I'm not sure \u2014 can you tell me what this field means?" is infinitely better than fabricated metadata that looks plausible but is wrong.
3261
+
3262
+ ## Reference Documents
3263
+
3264
+ Check \`context/reference/\` for any files the user has provided \u2014 data dictionaries, Confluence exports, ERDs, business glossaries, dashboard docs, etc. **Read these first** before querying the database. They contain domain knowledge that will dramatically improve your metadata quality.
3265
+
3266
+ If the folder is empty, ask the user: "Do you have any existing documentation about this data? Data dictionaries, wiki pages, spreadsheets? Drop them in context/reference/ and I'll use them."
2326
3267
 
2327
3268
  ## On Session Start
2328
3269
 
2329
- 1. Run \`context_tier\` to check the current metadata tier (Bronze/Silver/Gold)
2330
- 2. Report the current tier and list failing checks
2331
- 3. Ask the user what they'd like to work on \u2014 don't start changing files unprompted
3270
+ 1. Check \`context/reference/\` for any reference documents \u2014 read them if present
3271
+ 2. Run \`context tier\` to check the current metadata tier (Bronze/Silver/Gold)
3272
+ 3. Report the current tier and summarize failing checks
3273
+ 4. Ask the user what they'd like to focus on \u2014 don't start changing files unprompted
3274
+ 5. If the user says "get me to Gold" or "build my semantic layer," follow the iterative workflow below
3275
+
3276
+ ## The Iterative Workflow
3277
+
3278
+ Building a semantic layer is a **conversation**. You and the user go back and forth \u2014 you query the data, propose metadata, ask questions, and iterate. Here's the loop:
3279
+
3280
+ \`\`\`
3281
+ \u250C\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510
3282
+ \u2502 context tier \u2502
3283
+ \u2502 (check failing checks) \u2502
3284
+ \u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u252C\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518
3285
+ \u2502
3286
+ \u250C\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u25BC\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510
3287
+ \u2502 Pick highest-impact \u2502
3288
+ \u2502 failing check \u2502
3289
+ \u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u252C\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518
3290
+ \u2502
3291
+ \u250C\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u25BC\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510
3292
+ \u2502 Query the database \u2502
3293
+ \u2502 to gather evidence \u2502
3294
+ \u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u252C\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518
3295
+ \u2502
3296
+ \u250C\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u25BC\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510
3297
+ \u2502 Need user input? \u2502\u2500\u2500\u2500\u2500 YES \u2500\u2500\u2192 Ask the user
3298
+ \u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u252C\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518 (then continue)
3299
+ \u2502 NO
3300
+ \u250C\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u25BC\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510
3301
+ \u2502 Edit YAML metadata \u2502
3302
+ \u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u252C\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518
3303
+ \u2502
3304
+ \u250C\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u25BC\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510
3305
+ \u2502 context lint \u2502
3306
+ \u2502 context tier \u2502
3307
+ \u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u252C\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518
3308
+ \u2502
3309
+ \u250C\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u25BC\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2510
3310
+ \u2502 All Gold checks pass? \u2502\u2500\u2500\u2500\u2500 NO \u2500\u2500\u2192 Loop back
3311
+ \u2514\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u252C\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2518
3312
+ \u2502 YES
3313
+ \u2713 DONE
3314
+ \`\`\`
3315
+
3316
+ ### What to Ask the User About
3317
+
3318
+ You know the data. The user knows the business. Ask them about:
2332
3319
 
2333
- ## When Asked to Reach Gold
3320
+ - **Ownership** \u2014 "Who owns this data? What team maintains it?"
3321
+ - **Metric intent** \u2014 "Should revenue be summed or averaged? Is this an additive metric?"
3322
+ - **Business rules** \u2014 "Are there any filters that should always be applied? (e.g., only active records)"
3323
+ - **Data lineage** \u2014 "Where does this data come from originally? What upstream system?"
3324
+ - **Use cases** \u2014 "What questions do people ask this data? What dashboards use it?"
3325
+ - **Glossary terms** \u2014 "What does 'churn rate' mean in your organization?"
3326
+ - **Data quality** \u2014 "I see 15% null values in this column \u2014 is that expected?"
2334
3327
 
2335
- Work through ALL failing Gold checks iteratively until \`context tier\` reports Gold:
3328
+ Don't batch all questions at once. Ask as you encounter each field or concept that needs clarification.
2336
3329
 
2337
- 1. Run \`context_tier\` and collect every failing check
3330
+ ### CLI Commands to Use
3331
+
3332
+ \`\`\`bash
3333
+ # Check where you are
3334
+ context tier # Show Bronze/Silver/Gold scorecard with failing checks
3335
+ context lint # Run all 40 lint rules
3336
+ context explain ${modelName} # Full model details \u2014 governance, rules, lineage, tier
3337
+
3338
+ # Build and fix
3339
+ context enrich --target silver --apply --source default # Auto-enrich to Silver
3340
+ context fix --write # Auto-fix lint issues where possible
3341
+ context verify # Validate metadata against the live database
3342
+
3343
+ # Serve to other agents
3344
+ context serve --stdio # Start MCP server (for Claude, Cursor, etc.)
3345
+ context dev --studio # Open visual editor in browser
3346
+ \`\`\`
3347
+
3348
+ ### Do NOT Stop Until Gold
3349
+
3350
+ 1. Run \`context tier\` and collect every failing check
2338
3351
  2. For each failing check, query the database to gather evidence, then fix the metadata
2339
- 3. Run \`context_tier\` again
3352
+ 3. Run \`context tier\` again
2340
3353
  4. If checks still fail, go back to step 2
2341
- 5. **Do NOT stop until every Gold check passes** or you hit something that genuinely requires human input (like real owner contact info)
2342
- 6. For checks you cannot fix (e.g., owner email), leave a clear TODO explaining what a human needs to provide
3354
+ 5. **Do NOT stop until every Gold check passes** or you hit something that genuinely requires human input
3355
+ 6. For checks you cannot fix, ask the user or leave a clear TODO explaining what's needed
2343
3356
 
2344
3357
  You must iterate \u2014 a single pass is never enough. Each \`context tier\` run may reveal new failures after earlier ones are fixed.
2345
3358
 
@@ -2441,7 +3454,7 @@ ${datasetList || "(none detected)"}
2441
3454
 
2442
3455
  ${failingSection}
2443
3456
 
2444
- ## MCP Tools
3457
+ ## MCP Tools (if using ContextKit as an MCP server)
2445
3458
 
2446
3459
  | Tool | Parameters | What it does |
2447
3460
  |------|-----------|-------------|
@@ -2593,36 +3606,40 @@ hierarchies:
2593
3606
  dataset: my_table
2594
3607
  \`\`\`
2595
3608
 
2596
- ## CLI Commands
3609
+ ## File Structure
2597
3610
 
2598
- \`\`\`bash
2599
- context tier # Check scorecard
2600
- context verify --db <path> # Validate against live data
2601
- context fix --db <path> # Auto-fix data warnings
2602
- context setup # Interactive setup wizard
2603
- context dev # Watch mode for live editing
3611
+ \`\`\`
3612
+ context/
3613
+ models/*.osi.yaml # OSI semantic model (schema, relationships, metrics)
3614
+ governance/*.governance.yaml # Ownership, trust, security, semantic roles
3615
+ rules/*.rules.yaml # Golden queries, business rules, guardrails
3616
+ lineage/*.lineage.yaml # Upstream sources
3617
+ glossary/*.term.yaml # Business term definitions
3618
+ owners/*.owner.yaml # Team ownership records
3619
+ reference/ # User-provided docs (data dictionaries, wiki exports, etc.)
3620
+ AGENT_INSTRUCTIONS.md # This file
2604
3621
  \`\`\`
2605
3622
  `;
2606
3623
  }
2607
- async function runClaudeMdStep(ctx) {
2608
- const instructionsPath = path17.join(ctx.contextDir, "AGENT_INSTRUCTIONS.md");
2609
- if (existsSync7(instructionsPath)) {
2610
- const shouldOverwrite = await p8.confirm({
3624
+ async function runAgentInstructionsStep(ctx) {
3625
+ const instructionsPath = path18.join(ctx.contextDir, "AGENT_INSTRUCTIONS.md");
3626
+ if (existsSync8(instructionsPath)) {
3627
+ const shouldOverwrite = await p9.confirm({
2611
3628
  message: "context/AGENT_INSTRUCTIONS.md already exists. Overwrite with updated instructions?"
2612
3629
  });
2613
- if (p8.isCancel(shouldOverwrite) || !shouldOverwrite) {
3630
+ if (p9.isCancel(shouldOverwrite) || !shouldOverwrite) {
2614
3631
  return { skipped: true, summary: "context/AGENT_INSTRUCTIONS.md already exists, kept existing" };
2615
3632
  }
2616
3633
  }
2617
- const content = buildClaudeMd(ctx);
3634
+ const content = buildAgentInstructions(ctx);
2618
3635
  writeFileSync9(instructionsPath, content, "utf-8");
2619
- p8.log.success("Generated context/AGENT_INSTRUCTIONS.md with agent curation instructions");
3636
+ p9.log.success("Generated context/AGENT_INSTRUCTIONS.md \u2014 the agent curation guide");
2620
3637
  return { skipped: false, summary: "Generated context/AGENT_INSTRUCTIONS.md" };
2621
3638
  }
2622
3639
 
2623
3640
  // src/commands/setup.ts
2624
- var setupCommand = new Command15("setup").description("Interactive wizard to scaffold and enrich metadata from a database").action(async () => {
2625
- p9.intro(chalk16.bgCyan(chalk16.black(" ContextKit Setup ")));
3641
+ var setupCommand = new Command15("setup").description("Interactive wizard \u2014 detects databases, introspects schema, scaffolds metadata, enriches to Silver, generates agent instructions. Supports DuckDB, PostgreSQL, MySQL, SQL Server, SQLite, Snowflake, BigQuery, ClickHouse, and Databricks.").action(async () => {
3642
+ p10.intro(chalk16.bgCyan(chalk16.black(" ContextKit Setup ")));
2626
3643
  const ctx = await runConnectStep();
2627
3644
  if (!ctx) return;
2628
3645
  try {
@@ -2638,39 +3655,42 @@ var setupCommand = new Command15("setup").description("Interactive wizard to sca
2638
3655
  steps.push(
2639
3656
  { name: "Verify data", fn: runVerifyStep },
2640
3657
  { name: "Auto-fix", fn: runAutofixStep },
2641
- { name: "Generate agent instructions", fn: runClaudeMdStep }
3658
+ { name: "Generate agent instructions", fn: runAgentInstructionsStep }
2642
3659
  );
2643
3660
  const results = [];
2644
3661
  for (let i = 0; i < steps.length; i++) {
2645
3662
  const step = steps[i];
2646
- p9.log.step(`${chalk16.dim(`[${i + 1}/${steps.length}]`)} ${step.name}`);
3663
+ p10.log.step(`${chalk16.dim(`[${i + 1}/${steps.length}]`)} ${step.name}`);
2647
3664
  const result = await step.fn(ctx);
2648
3665
  results.push({ name: step.name, summary: result.summary });
2649
3666
  }
2650
3667
  const summaryLines = results.map((r) => ` ${chalk16.green("+")} ${r.name}: ${r.summary}`).join("\n");
2651
- p9.note(summaryLines, "Summary");
3668
+ p10.note(summaryLines, "Summary");
2652
3669
  if (ctx.tierScore) {
2653
3670
  displayTierScore(ctx.tierScore);
2654
3671
  }
2655
3672
  const currentTier = ctx.tierScore?.tier ?? "none";
2656
3673
  const reachedTarget = ctx.targetTier === "bronze" && ["bronze", "silver", "gold"].includes(currentTier) || ctx.targetTier === "silver" && ["silver", "gold"].includes(currentTier) || ctx.targetTier === "gold" && currentTier === "gold";
2657
3674
  if (reachedTarget) {
2658
- p9.outro(`Done! You're at ${chalk16.bold(currentTier.toUpperCase())}. Run ${chalk16.cyan("context tier")} anytime to check.`);
3675
+ p10.outro(`Done! You're at ${chalk16.bold(currentTier.toUpperCase())}. Run ${chalk16.cyan("context tier")} anytime to check.`);
2659
3676
  } else if (ctx.targetTier === "gold" && currentTier !== "gold") {
2660
3677
  const nextSteps = [
2661
- `Your metadata is at ${chalk16.bold(currentTier.toUpperCase())} \u2014 Gold needs curation.`,
3678
+ `Your metadata is at ${chalk16.bold(currentTier.toUpperCase())} \u2014 Gold needs human curation.`,
2662
3679
  "",
2663
- `${chalk16.bold("To reach Gold, tell your AI assistant:")}`,
3680
+ `${chalk16.bold("If you're an AI agent:")}`,
3681
+ ` Read ${chalk16.cyan("context/AGENT_INSTRUCTIONS.md")} for the full curation guide.`,
3682
+ ` Run ${chalk16.cyan("context tier")} \u2192 pick a failing check \u2192 query the database \u2192 fix it \u2192 repeat.`,
3683
+ ` Ask the user about ownership, metric intent, and business rules as you go.`,
2664
3684
  "",
2665
- ` "Read ${chalk16.cyan("context/AGENT_INSTRUCTIONS.md")} for curation guidelines.`,
2666
- ` Run ${chalk16.cyan("context tier")} and fix every failing Gold check.`,
2667
- ` Query the database before writing any metadata.`,
2668
- ` Keep iterating until ${chalk16.cyan("context tier")} reports Gold."`
3685
+ `${chalk16.bold("If you're a human:")}`,
3686
+ ` Tell your AI agent: "Read context/AGENT_INSTRUCTIONS.md and get this project to Gold."`,
3687
+ ` The agent will go back and forth with you, asking about your data as it builds.`,
3688
+ ` Or use ${chalk16.cyan("context dev --studio")} to edit metadata visually in the browser.`
2669
3689
  ];
2670
- p9.note(nextSteps.join("\n"), "Next Steps");
2671
- p9.outro(`Run ${chalk16.cyan("context dev")} to watch for changes as you edit.`);
3690
+ p10.note(nextSteps.join("\n"), "Next Steps");
3691
+ p10.outro(`Run ${chalk16.cyan("context tier")} to check your scorecard.`);
2672
3692
  } else {
2673
- p9.outro(`Run ${chalk16.cyan("context tier")} to check your scorecard.`);
3693
+ p10.outro(`Run ${chalk16.cyan("context tier")} to check your scorecard.`);
2674
3694
  }
2675
3695
  } finally {
2676
3696
  try {
@@ -2680,9 +3700,438 @@ var setupCommand = new Command15("setup").description("Interactive wizard to sca
2680
3700
  }
2681
3701
  });
2682
3702
 
3703
+ // src/commands/blueprint.ts
3704
+ import { Command as Command16 } from "commander";
3705
+ import chalk17 from "chalk";
3706
+ import path19 from "path";
3707
+ import fs6 from "fs";
3708
+ import { compile as compile15, loadConfig as loadConfig16, emitManifest as emitManifest3 } from "@runcontext/core";
3709
+ var blueprintCommand = new Command16("blueprint").description("Export AI Blueprints \u2014 portable, Gold-tier data product specs (OSI v1.0 YAML)").argument("[model]", "Model name to export (omit for all models)").option("--context-dir <path>", "Path to context directory").option("--out <path>", "Output directory for blueprint files", "./blueprints").option("--stdout", "Print to stdout instead of writing files").action(async (modelArg, opts) => {
3710
+ try {
3711
+ const config = loadConfig16(process.cwd());
3712
+ const contextDir = opts.contextDir ? path19.resolve(opts.contextDir) : path19.resolve(config.context_dir);
3713
+ const { graph, diagnostics } = await compile15({ contextDir, config, rootDir: process.cwd() });
3714
+ const errors = diagnostics.filter((d) => d.severity === "error");
3715
+ if (errors.length > 0) {
3716
+ console.error(chalk17.red(`Build failed with ${errors.length} error(s). Run "context build" to see details.`));
3717
+ process.exit(1);
3718
+ }
3719
+ const manifest = emitManifest3(graph, config);
3720
+ const modelNames = modelArg ? [modelArg] : Object.keys(manifest.models);
3721
+ if (modelNames.length === 0) {
3722
+ console.error(chalk17.yellow('No models found. Run "context introspect" first.'));
3723
+ process.exit(1);
3724
+ }
3725
+ if (modelArg && !manifest.models[modelArg]) {
3726
+ const available = Object.keys(manifest.models).join(", ");
3727
+ console.error(formatError(`Model "${modelArg}" not found. Available: ${available}`));
3728
+ process.exit(1);
3729
+ }
3730
+ if (!opts.stdout) {
3731
+ const outDir = path19.resolve(opts.out);
3732
+ fs6.mkdirSync(outDir, { recursive: true });
3733
+ }
3734
+ for (const name of modelNames) {
3735
+ const yaml6 = emitBlueprint(name, manifest);
3736
+ if (opts.stdout) {
3737
+ if (modelNames.length > 1) {
3738
+ console.log(chalk17.gray(`# \u2500\u2500 ${name} ${"\u2500".repeat(60 - name.length)}`));
3739
+ }
3740
+ console.log(yaml6);
3741
+ } else {
3742
+ const outDir = path19.resolve(opts.out);
3743
+ const filePath = path19.join(outDir, `${name}.data-product.osi.yaml`);
3744
+ fs6.writeFileSync(filePath, yaml6, "utf-8");
3745
+ console.log(formatSuccess(`${filePath}`));
3746
+ }
3747
+ }
3748
+ if (!opts.stdout) {
3749
+ const outDir = path19.resolve(opts.out);
3750
+ console.log(
3751
+ chalk17.blue(`
3752
+ ${modelNames.length} AI Blueprint(s) written to ${outDir}/`)
3753
+ );
3754
+ console.log(chalk17.gray("These portable YAML files contain the full semantic spec for each data product."));
3755
+ console.log(chalk17.gray("Share them, serve them via MCP, or import them into any OSI-compliant tool."));
3756
+ }
3757
+ } catch (err) {
3758
+ console.error(formatError(err.message));
3759
+ process.exit(1);
3760
+ }
3761
+ });
3762
+ function emitBlueprint(name, manifest) {
3763
+ const model = manifest.models[name];
3764
+ const governance = manifest.governance[name];
3765
+ const rules = manifest.rules[name];
3766
+ const tier = manifest.tiers[name];
3767
+ const lines = [];
3768
+ lines.push(`# ${name}.data-product.osi.yaml`);
3769
+ lines.push(`# AI Blueprint \u2014 Open Semantic Interchange (OSI) v1.0`);
3770
+ lines.push(`# The complete semantic spec for this data product, ready for any AI agent`);
3771
+ lines.push(`# Generated by ContextKit (context blueprint)`);
3772
+ lines.push(``);
3773
+ lines.push(`osi_version: "1.0"`);
3774
+ lines.push(``);
3775
+ lines.push(`semantic_model:`);
3776
+ lines.push(` name: ${name}`);
3777
+ if (model?.description) {
3778
+ lines.push(` description: >`);
3779
+ wrapText(lines, model.description, 4);
3780
+ }
3781
+ if (governance) {
3782
+ if (governance.owner) lines.push(` owner: ${governance.owner}`);
3783
+ if (governance.trust) lines.push(` trust_status: ${governance.trust}`);
3784
+ }
3785
+ if (tier) {
3786
+ lines.push(` tier: ${tier.tier ?? "bronze"}`);
3787
+ }
3788
+ if (governance?.tags && governance.tags.length > 0) {
3789
+ lines.push(` tags:`);
3790
+ for (const tag of governance.tags) {
3791
+ lines.push(` - ${tag}`);
3792
+ }
3793
+ }
3794
+ const relatedTerms = Object.entries(manifest.terms).filter(([, term]) => {
3795
+ const maps = term.maps_to ?? [];
3796
+ return maps.some((m) => m.startsWith(`${name}.`));
3797
+ });
3798
+ if (relatedTerms.length > 0) {
3799
+ lines.push(``);
3800
+ lines.push(` # \u2500\u2500 Glossary \u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500`);
3801
+ lines.push(` glossary:`);
3802
+ for (const [, term] of relatedTerms) {
3803
+ lines.push(` - term: ${term.id}`);
3804
+ if (term.definition) {
3805
+ lines.push(` definition: >`);
3806
+ wrapText(lines, term.definition, 8);
3807
+ }
3808
+ if (term.synonyms && term.synonyms.length > 0) {
3809
+ lines.push(` synonyms:`);
3810
+ for (const s of term.synonyms) {
3811
+ lines.push(` - ${s}`);
3812
+ }
3813
+ }
3814
+ if (term.maps_to && term.maps_to.length > 0) {
3815
+ lines.push(` related_fields:`);
3816
+ for (const m of term.maps_to) {
3817
+ lines.push(` - ${m}`);
3818
+ }
3819
+ }
3820
+ }
3821
+ }
3822
+ if (model?.datasets && model.datasets.length > 0) {
3823
+ lines.push(``);
3824
+ lines.push(` # \u2500\u2500 Datasets \u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500`);
3825
+ lines.push(` datasets:`);
3826
+ for (const ds of model.datasets) {
3827
+ lines.push(` - name: ${ds.name}`);
3828
+ if (ds.description) {
3829
+ lines.push(` description: ${yamlStr(ds.description)}`);
3830
+ }
3831
+ if (ds.source) lines.push(` source: ${ds.source}`);
3832
+ if (ds.primary_key && ds.primary_key.length > 0) {
3833
+ lines.push(` primary_key: [${ds.primary_key.join(", ")}]`);
3834
+ }
3835
+ const dsGov = governance?.datasets?.[ds.name];
3836
+ if (dsGov) {
3837
+ if (dsGov.grain) lines.push(` grain: ${dsGov.grain}`);
3838
+ if (dsGov.table_type) lines.push(` table_type: ${dsGov.table_type}`);
3839
+ }
3840
+ if (ds.ai_context) {
3841
+ emitAiContext(lines, ds.ai_context, 6);
3842
+ }
3843
+ if (ds.fields && ds.fields.length > 0) {
3844
+ lines.push(` fields:`);
3845
+ for (const f of ds.fields) {
3846
+ lines.push(` - name: ${f.name}`);
3847
+ if (f.expression) {
3848
+ const expr = typeof f.expression === "string" ? f.expression : JSON.stringify(f.expression);
3849
+ lines.push(` expression: ${expr}`);
3850
+ }
3851
+ if (f.description) {
3852
+ lines.push(` description: ${yamlStr(f.description)}`);
3853
+ }
3854
+ if (f.dimension) lines.push(` dimension: true`);
3855
+ if (f.label) lines.push(` label: true`);
3856
+ const fGov = governance?.fields?.[`${ds.name}.${f.name}`] ?? governance?.fields?.[f.name];
3857
+ if (fGov) {
3858
+ if (fGov.semantic_role) lines.push(` semantic_role: ${fGov.semantic_role}`);
3859
+ if (fGov.default_aggregation) lines.push(` aggregation: ${fGov.default_aggregation}`);
3860
+ if (fGov.sample_values && fGov.sample_values.length > 0) {
3861
+ lines.push(` sample_values:`);
3862
+ for (const sv of fGov.sample_values) {
3863
+ lines.push(` - ${sv}`);
3864
+ }
3865
+ }
3866
+ if (fGov.default_filter) lines.push(` default_filter: ${yamlStr(fGov.default_filter)}`);
3867
+ }
3868
+ if (f.ai_context) {
3869
+ emitAiContext(lines, f.ai_context, 10);
3870
+ }
3871
+ }
3872
+ }
3873
+ }
3874
+ }
3875
+ if (model?.relationships && model.relationships.length > 0) {
3876
+ lines.push(``);
3877
+ lines.push(` # \u2500\u2500 Relationships \u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500`);
3878
+ lines.push(` relationships:`);
3879
+ for (const rel of model.relationships) {
3880
+ lines.push(` - name: ${rel.name}`);
3881
+ lines.push(` from:`);
3882
+ lines.push(` dataset: ${rel.from}`);
3883
+ lines.push(` columns: [${rel.from_columns.join(", ")}]`);
3884
+ lines.push(` to:`);
3885
+ lines.push(` dataset: ${rel.to}`);
3886
+ lines.push(` columns: [${rel.to_columns.join(", ")}]`);
3887
+ if (rel.cardinality) lines.push(` cardinality: ${rel.cardinality}`);
3888
+ }
3889
+ }
3890
+ if (model?.metrics && model.metrics.length > 0) {
3891
+ lines.push(``);
3892
+ lines.push(` # \u2500\u2500 Metrics \u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500`);
3893
+ lines.push(` metrics:`);
3894
+ for (const met of model.metrics) {
3895
+ lines.push(` - name: ${met.name}`);
3896
+ if (met.expression) {
3897
+ const expr = typeof met.expression === "string" ? met.expression : JSON.stringify(met.expression);
3898
+ lines.push(` expression: ${expr}`);
3899
+ }
3900
+ if (met.description) {
3901
+ lines.push(` description: ${yamlStr(met.description)}`);
3902
+ }
3903
+ if (met.ai_context) {
3904
+ emitAiContext(lines, met.ai_context, 6);
3905
+ }
3906
+ }
3907
+ }
3908
+ if (rules?.business_rules && rules.business_rules.length > 0) {
3909
+ lines.push(``);
3910
+ lines.push(` # \u2500\u2500 Business Rules \u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500`);
3911
+ lines.push(` business_rules:`);
3912
+ for (const br of rules.business_rules) {
3913
+ lines.push(` - name: ${br.name}`);
3914
+ if (br.definition) lines.push(` definition: ${yamlStr(br.definition)}`);
3915
+ if (br.enforcement && br.enforcement.length > 0) {
3916
+ lines.push(` enforcement:`);
3917
+ for (const e of br.enforcement) {
3918
+ lines.push(` - ${yamlStr(e)}`);
3919
+ }
3920
+ }
3921
+ if (br.avoid && br.avoid.length > 0) {
3922
+ lines.push(` avoid:`);
3923
+ for (const a of br.avoid) {
3924
+ lines.push(` - ${yamlStr(a)}`);
3925
+ }
3926
+ }
3927
+ if (br.applied_always) lines.push(` applied_always: true`);
3928
+ }
3929
+ }
3930
+ if (rules?.guardrail_filters && rules.guardrail_filters.length > 0) {
3931
+ lines.push(``);
3932
+ lines.push(` # \u2500\u2500 Guardrails \u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500`);
3933
+ lines.push(` guardrail_filters:`);
3934
+ for (const gf of rules.guardrail_filters) {
3935
+ lines.push(` - name: ${gf.name}`);
3936
+ if (gf.reason) lines.push(` reason: ${yamlStr(gf.reason)}`);
3937
+ if (gf.filter) lines.push(` filter: ${yamlStr(gf.filter)}`);
3938
+ if (gf.tables && gf.tables.length > 0) {
3939
+ lines.push(` tables: [${gf.tables.join(", ")}]`);
3940
+ }
3941
+ }
3942
+ }
3943
+ if (rules?.golden_queries && rules.golden_queries.length > 0) {
3944
+ lines.push(``);
3945
+ lines.push(` # \u2500\u2500 Golden Queries \u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500\u2500`);
3946
+ lines.push(` golden_queries:`);
3947
+ for (const gq of rules.golden_queries) {
3948
+ lines.push(` - question: ${yamlStr(gq.question)}`);
3949
+ if (gq.intent) lines.push(` intent: ${yamlStr(gq.intent)}`);
3950
+ if (gq.sql) {
3951
+ lines.push(` sql: |`);
3952
+ for (const sqlLine of gq.sql.split("\n")) {
3953
+ lines.push(` ${sqlLine}`);
3954
+ }
3955
+ }
3956
+ if (gq.dialect) lines.push(` dialect: ${gq.dialect}`);
3957
+ if (gq.caveats) lines.push(` caveats: ${yamlStr(gq.caveats)}`);
3958
+ }
3959
+ }
3960
+ lines.push(``);
3961
+ return lines.join("\n");
3962
+ }
3963
+ function emitAiContext(lines, ctx, indent) {
3964
+ const pad = " ".repeat(indent);
3965
+ if (typeof ctx === "string") {
3966
+ lines.push(`${pad}ai_context: ${yamlStr(ctx)}`);
3967
+ } else {
3968
+ lines.push(`${pad}ai_context:`);
3969
+ if (ctx.instructions) {
3970
+ lines.push(`${pad} instructions: >`);
3971
+ wrapText(lines, ctx.instructions, indent + 4);
3972
+ }
3973
+ if (ctx.synonyms && ctx.synonyms.length > 0) {
3974
+ lines.push(`${pad} synonyms:`);
3975
+ for (const s of ctx.synonyms) {
3976
+ lines.push(`${pad} - ${s}`);
3977
+ }
3978
+ }
3979
+ if (ctx.examples && ctx.examples.length > 0) {
3980
+ lines.push(`${pad} examples:`);
3981
+ for (const e of ctx.examples) {
3982
+ lines.push(`${pad} - ${yamlStr(e)}`);
3983
+ }
3984
+ }
3985
+ }
3986
+ }
3987
+ function yamlStr(s) {
3988
+ if (s.includes("\n") || s.includes(": ") || s.includes("#") || s.includes('"') || s.includes("'")) {
3989
+ return JSON.stringify(s);
3990
+ }
3991
+ return s;
3992
+ }
3993
+ function wrapText(lines, text2, indent) {
3994
+ const pad = " ".repeat(indent);
3995
+ const words = text2.replace(/\n/g, " ").split(/\s+/);
3996
+ let line = "";
3997
+ for (const word of words) {
3998
+ if (line.length + word.length + 1 > 76) {
3999
+ lines.push(`${pad}${line.trim()}`);
4000
+ line = "";
4001
+ }
4002
+ line += word + " ";
4003
+ }
4004
+ if (line.trim()) {
4005
+ lines.push(`${pad}${line.trim()}`);
4006
+ }
4007
+ }
4008
+
4009
+ // src/commands/new.ts
4010
+ import { Command as Command17 } from "commander";
4011
+ import chalk18 from "chalk";
4012
+ import path20 from "path";
4013
+ import fs7 from "fs";
4014
+ import * as yaml5 from "yaml";
4015
+ import { loadConfig as loadConfig17 } from "@runcontext/core";
4016
+ var STARTER_OSI = (name, dataSource) => {
4017
+ const doc = {
4018
+ version: "1.0",
4019
+ semantic_model: [
4020
+ {
4021
+ name,
4022
+ description: `Data product: ${name}`,
4023
+ ...dataSource ? { data_source: dataSource } : {},
4024
+ datasets: []
4025
+ }
4026
+ ]
4027
+ };
4028
+ return yaml5.stringify(doc, { lineWidth: 120 });
4029
+ };
4030
+ var STARTER_GOVERNANCE = (name) => {
4031
+ const doc = {
4032
+ model: name,
4033
+ owner: "default-team",
4034
+ security: "internal",
4035
+ datasets: {}
4036
+ };
4037
+ return yaml5.stringify(doc, { lineWidth: 120 });
4038
+ };
4039
+ var STARTER_RULES = (name) => {
4040
+ const doc = {
4041
+ model: name,
4042
+ business_rules: [],
4043
+ guardrail_filters: [],
4044
+ golden_queries: []
4045
+ };
4046
+ return yaml5.stringify(doc, { lineWidth: 120 });
4047
+ };
4048
+ var STARTER_OWNER = () => {
4049
+ const doc = {
4050
+ id: "default-team",
4051
+ display_name: "Default Team"
4052
+ };
4053
+ return yaml5.stringify(doc, { lineWidth: 120 });
4054
+ };
4055
+ var newCommand = new Command17("new").description("Scaffold a new data product inside your context directory").argument("<name>", "Name for the data product (e.g. sales-analytics)").option("--source <name>", "Bind to a named data source from contextkit.config.yaml").option("--context-dir <path>", "Path to context directory").action(async (name, opts) => {
4056
+ try {
4057
+ const config = loadConfig17(process.cwd());
4058
+ const contextDir = opts.contextDir ? path20.resolve(opts.contextDir) : path20.resolve(config.context_dir);
4059
+ if (opts.source && config.data_sources) {
4060
+ const sources = config.data_sources;
4061
+ if (!sources[opts.source]) {
4062
+ const available = Object.keys(sources).join(", ");
4063
+ console.error(formatError(
4064
+ `Data source "${opts.source}" not found in contextkit.config.yaml. Available: ${available || "(none)"}`
4065
+ ));
4066
+ process.exit(1);
4067
+ }
4068
+ }
4069
+ const dirs = [
4070
+ path20.join(contextDir, "models"),
4071
+ path20.join(contextDir, "governance"),
4072
+ path20.join(contextDir, "owners"),
4073
+ path20.join(contextDir, "reference")
4074
+ ];
4075
+ for (const dir of dirs) {
4076
+ if (!fs7.existsSync(dir)) fs7.mkdirSync(dir, { recursive: true });
4077
+ }
4078
+ const files = [
4079
+ {
4080
+ rel: path20.join("models", `${name}.osi.yaml`),
4081
+ content: STARTER_OSI(name, opts.source)
4082
+ },
4083
+ {
4084
+ rel: path20.join("governance", `${name}.governance.yaml`),
4085
+ content: STARTER_GOVERNANCE(name)
4086
+ },
4087
+ {
4088
+ rel: path20.join("governance", `${name}.rules.yaml`),
4089
+ content: STARTER_RULES(name)
4090
+ },
4091
+ {
4092
+ rel: path20.join("owners", "default-team.owner.yaml"),
4093
+ content: STARTER_OWNER()
4094
+ }
4095
+ ];
4096
+ const created = [];
4097
+ const skipped = [];
4098
+ for (const f of files) {
4099
+ const fullPath = path20.join(contextDir, f.rel);
4100
+ if (fs7.existsSync(fullPath)) {
4101
+ skipped.push(f.rel);
4102
+ } else {
4103
+ fs7.writeFileSync(fullPath, f.content, "utf-8");
4104
+ created.push(f.rel);
4105
+ }
4106
+ }
4107
+ console.log("");
4108
+ if (created.length > 0) {
4109
+ console.log(formatSuccess(`Data product "${name}" scaffolded`));
4110
+ console.log("");
4111
+ for (const f of created) {
4112
+ console.log(chalk18.green(` + ${f}`));
4113
+ }
4114
+ }
4115
+ if (skipped.length > 0) {
4116
+ for (const f of skipped) {
4117
+ console.log(chalk18.gray(` ~ ${f} (exists)`));
4118
+ }
4119
+ }
4120
+ console.log("");
4121
+ console.log(chalk18.gray("Next steps:"));
4122
+ console.log(chalk18.gray(` 1. Run ${chalk18.cyan(`context introspect --db <url>`)} to populate from a database`));
4123
+ console.log(chalk18.gray(` 2. Run ${chalk18.cyan(`context enrich --target silver --apply`)} to auto-fill descriptions`));
4124
+ console.log(chalk18.gray(` 3. Run ${chalk18.cyan(`context tier`)} to check progress toward Gold`));
4125
+ console.log(chalk18.gray(` 4. Run ${chalk18.cyan(`context blueprint ${name}`)} to export the AI Blueprint`));
4126
+ } catch (err) {
4127
+ console.error(formatError(err.message));
4128
+ process.exit(1);
4129
+ }
4130
+ });
4131
+
2683
4132
  // src/index.ts
2684
- var program = new Command16();
2685
- program.name("context").description("ContextKit \u2014 AI-ready metadata governance over OSI").version("0.3.1");
4133
+ var program = new Command18();
4134
+ program.name("context").description("ContextKit \u2014 AI-ready metadata governance over OSI").version("0.4.1");
2686
4135
  program.addCommand(lintCommand);
2687
4136
  program.addCommand(buildCommand);
2688
4137
  program.addCommand(tierCommand);
@@ -2698,5 +4147,7 @@ program.addCommand(verifyCommand);
2698
4147
  program.addCommand(enrichCommand);
2699
4148
  program.addCommand(rulesCommand);
2700
4149
  program.addCommand(setupCommand);
4150
+ program.addCommand(blueprintCommand);
4151
+ program.addCommand(newCommand);
2701
4152
  program.parse();
2702
4153
  //# sourceMappingURL=index.js.map