@runcontext/cli 0.3.1 → 0.3.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.js CHANGED
@@ -1,7 +1,7 @@
1
1
  #!/usr/bin/env node
2
2
 
3
3
  // src/index.ts
4
- import { Command as Command15 } from "commander";
4
+ import { Command as Command16 } from "commander";
5
5
 
6
6
  // src/commands/lint.ts
7
7
  import { Command } from "commander";
@@ -1562,8 +1562,1028 @@ var rulesCommand = new Command14("rules").description("List all lint rules with
1562
1562
  }
1563
1563
  });
1564
1564
 
1565
+ // src/commands/setup.ts
1566
+ import { Command as Command15 } from "commander";
1567
+ import * as p9 from "@clack/prompts";
1568
+ import chalk16 from "chalk";
1569
+
1570
+ // src/setup/steps/connect.ts
1571
+ import * as p from "@clack/prompts";
1572
+ import path13 from "path";
1573
+ import { existsSync as existsSync3, readFileSync as readFileSync4, writeFileSync as writeFileSync5 } from "fs";
1574
+ import * as yaml2 from "yaml";
1575
+ import { loadConfig as loadConfig10, createAdapter as createAdapter5 } from "@runcontext/core";
1576
+ function autoDetectDb(cwd) {
1577
+ try {
1578
+ const config = loadConfig10(cwd);
1579
+ if (config.data_sources && Object.keys(config.data_sources).length > 0) {
1580
+ const name = Object.keys(config.data_sources)[0];
1581
+ const ds = config.data_sources[name];
1582
+ const loc = ds.path ?? ds.connection ?? name;
1583
+ return { dsConfig: ds, label: `${ds.adapter} \u2014 ${loc} (from contextkit.config.yaml)` };
1584
+ }
1585
+ } catch {
1586
+ }
1587
+ if (process.env.DATABASE_URL) {
1588
+ try {
1589
+ const ds = parseDbUrl(process.env.DATABASE_URL);
1590
+ return { dsConfig: ds, label: `${ds.adapter} \u2014 $DATABASE_URL` };
1591
+ } catch {
1592
+ }
1593
+ }
1594
+ if (process.env.DUCKDB_PATH && existsSync3(process.env.DUCKDB_PATH)) {
1595
+ return {
1596
+ dsConfig: { adapter: "duckdb", path: process.env.DUCKDB_PATH },
1597
+ label: `duckdb \u2014 $DUCKDB_PATH`
1598
+ };
1599
+ }
1600
+ const mcpPath = path13.join(cwd, ".claude", "mcp.json");
1601
+ if (existsSync3(mcpPath)) {
1602
+ try {
1603
+ const mcpConfig = JSON.parse(readFileSync4(mcpPath, "utf-8"));
1604
+ const duckdbServer = mcpConfig.mcpServers?.duckdb;
1605
+ if (duckdbServer?.args) {
1606
+ const args = duckdbServer.args;
1607
+ const idx = args.indexOf("--db-path");
1608
+ if (idx >= 0 && args[idx + 1]) {
1609
+ const dbPath = args[idx + 1];
1610
+ if (existsSync3(dbPath)) {
1611
+ return {
1612
+ dsConfig: { adapter: "duckdb", path: dbPath },
1613
+ label: `duckdb \u2014 ${path13.basename(dbPath)} (from .claude/mcp.json)`
1614
+ };
1615
+ }
1616
+ }
1617
+ }
1618
+ } catch {
1619
+ }
1620
+ }
1621
+ return void 0;
1622
+ }
1623
+ async function promptForConnection() {
1624
+ const connector = await p.select({
1625
+ message: "Select your database",
1626
+ options: [
1627
+ { value: "duckdb", label: "DuckDB", hint: "Local .duckdb file" },
1628
+ { value: "postgres", label: "PostgreSQL", hint: "Connection string" }
1629
+ ]
1630
+ });
1631
+ if (p.isCancel(connector)) return void 0;
1632
+ if (connector === "duckdb") {
1633
+ const method = await p.select({
1634
+ message: "How do you connect?",
1635
+ options: [
1636
+ { value: "env", label: "Environment variable", hint: "e.g. DUCKDB_PATH" },
1637
+ { value: "path", label: "File path", hint: "e.g. ./warehouse.duckdb" }
1638
+ ]
1639
+ });
1640
+ if (p.isCancel(method)) return void 0;
1641
+ if (method === "env") {
1642
+ const envName = await p.text({
1643
+ message: "Environment variable name",
1644
+ initialValue: "DUCKDB_PATH",
1645
+ validate(value) {
1646
+ if (!value) return "Required";
1647
+ const resolved = process.env[value];
1648
+ if (!resolved) return `$${value} is not set`;
1649
+ if (!existsSync3(resolved)) return `$${value} points to "${resolved}" which does not exist`;
1650
+ }
1651
+ });
1652
+ if (p.isCancel(envName)) return void 0;
1653
+ return { adapter: "duckdb", path: process.env[envName] };
1654
+ } else {
1655
+ const filePath = await p.text({
1656
+ message: "Path to .duckdb file",
1657
+ placeholder: "./warehouse.duckdb",
1658
+ validate(value) {
1659
+ if (!value) return "Required";
1660
+ if (!existsSync3(value)) return `File not found: ${value}`;
1661
+ }
1662
+ });
1663
+ if (p.isCancel(filePath)) return void 0;
1664
+ return { adapter: "duckdb", path: path13.resolve(filePath) };
1665
+ }
1666
+ } else {
1667
+ const method = await p.select({
1668
+ message: "How do you connect?",
1669
+ options: [
1670
+ { value: "env", label: "Environment variable", hint: "e.g. DATABASE_URL" },
1671
+ { value: "url", label: "Connection string", hint: "postgres://..." }
1672
+ ]
1673
+ });
1674
+ if (p.isCancel(method)) return void 0;
1675
+ if (method === "env") {
1676
+ const envName = await p.text({
1677
+ message: "Environment variable name",
1678
+ initialValue: "DATABASE_URL",
1679
+ validate(value) {
1680
+ if (!value) return "Required";
1681
+ const resolved = process.env[value];
1682
+ if (!resolved) return `$${value} is not set`;
1683
+ }
1684
+ });
1685
+ if (p.isCancel(envName)) return void 0;
1686
+ return { adapter: "postgres", connection: process.env[envName] };
1687
+ } else {
1688
+ const url = await p.text({
1689
+ message: "Connection string",
1690
+ placeholder: "postgres://user:pass@host:5432/dbname",
1691
+ validate(value) {
1692
+ if (!value) return "Required";
1693
+ if (!value.startsWith("postgres://") && !value.startsWith("postgresql://")) {
1694
+ return "Must start with postgres:// or postgresql://";
1695
+ }
1696
+ }
1697
+ });
1698
+ if (p.isCancel(url)) return void 0;
1699
+ return { adapter: "postgres", connection: url };
1700
+ }
1701
+ }
1702
+ }
1703
+ async function runConnectStep() {
1704
+ const cwd = process.cwd();
1705
+ let dsConfig;
1706
+ const detected = autoDetectDb(cwd);
1707
+ if (detected) {
1708
+ p.log.info(`Detected: ${detected.label}`);
1709
+ const useDetected = await p.confirm({ message: "Use this database?" });
1710
+ if (p.isCancel(useDetected)) {
1711
+ p.cancel("Setup cancelled.");
1712
+ return void 0;
1713
+ }
1714
+ if (useDetected) {
1715
+ dsConfig = detected.dsConfig;
1716
+ } else {
1717
+ const manual = await promptForConnection();
1718
+ if (!manual) {
1719
+ p.cancel("Setup cancelled.");
1720
+ return void 0;
1721
+ }
1722
+ dsConfig = manual;
1723
+ }
1724
+ } else {
1725
+ const manual = await promptForConnection();
1726
+ if (!manual) {
1727
+ p.cancel("Setup cancelled.");
1728
+ return void 0;
1729
+ }
1730
+ dsConfig = manual;
1731
+ }
1732
+ const spin = p.spinner();
1733
+ spin.start("Connecting to database...");
1734
+ let adapter;
1735
+ try {
1736
+ adapter = await createAdapter5(dsConfig);
1737
+ await adapter.connect();
1738
+ } catch (err) {
1739
+ spin.stop("Connection failed");
1740
+ p.log.error(err.message);
1741
+ p.cancel("Could not connect to database.");
1742
+ return void 0;
1743
+ }
1744
+ const tables = await adapter.listTables();
1745
+ const columns = {};
1746
+ for (const table of tables) {
1747
+ columns[table.name] = await adapter.listColumns(table.name);
1748
+ }
1749
+ const totalCols = Object.values(columns).reduce((sum, c) => sum + c.length, 0);
1750
+ spin.stop(`Found ${tables.length} tables, ${totalCols} columns`);
1751
+ const tableLines = tables.map((t) => ` ${t.name.padEnd(30)} ${t.row_count.toLocaleString()} rows`).join("\n");
1752
+ p.note(tableLines, "Discovered Tables");
1753
+ const defaultModel = path13.basename(cwd).replace(/[^a-z0-9-]/gi, "-").toLowerCase();
1754
+ const modelInput = await p.text({
1755
+ message: "Model name",
1756
+ initialValue: defaultModel,
1757
+ validate(value) {
1758
+ if (!value) return "Required";
1759
+ if (!/^[a-z0-9-]+$/.test(value)) return "Use lowercase letters, numbers, and hyphens only";
1760
+ }
1761
+ });
1762
+ if (p.isCancel(modelInput)) {
1763
+ p.cancel("Setup cancelled.");
1764
+ await adapter.disconnect();
1765
+ return void 0;
1766
+ }
1767
+ const tierInput = await p.select({
1768
+ message: "Target metadata tier",
1769
+ options: [
1770
+ { value: "bronze", label: "Bronze", hint: "Schema + ownership + grain" },
1771
+ { value: "silver", label: "Silver", hint: "+ trust, lineage, glossary, refresh, sample values" },
1772
+ { value: "gold", label: "Gold", hint: "+ semantic roles, rules, golden queries (needs curation)" }
1773
+ ]
1774
+ });
1775
+ if (p.isCancel(tierInput)) {
1776
+ p.cancel("Setup cancelled.");
1777
+ await adapter.disconnect();
1778
+ return void 0;
1779
+ }
1780
+ const configPath = path13.join(cwd, "contextkit.config.yaml");
1781
+ let config;
1782
+ try {
1783
+ config = loadConfig10(cwd);
1784
+ } catch {
1785
+ config = { context_dir: "./context" };
1786
+ }
1787
+ if (!config.data_sources || Object.keys(config.data_sources).length === 0) {
1788
+ const newConfig = {
1789
+ context_dir: config.context_dir ?? "./context",
1790
+ data_sources: { default: dsConfig }
1791
+ };
1792
+ writeFileSync5(configPath, yaml2.stringify(newConfig, { lineWidth: 120 }), "utf-8");
1793
+ config = loadConfig10(cwd);
1794
+ }
1795
+ const contextDir = path13.resolve(cwd, config.context_dir ?? "./context");
1796
+ return {
1797
+ cwd,
1798
+ contextDir,
1799
+ dsConfig,
1800
+ adapter,
1801
+ tables,
1802
+ columns,
1803
+ modelName: modelInput,
1804
+ targetTier: tierInput
1805
+ };
1806
+ }
1807
+
1808
+ // src/setup/steps/scaffold.ts
1809
+ import * as p3 from "@clack/prompts";
1810
+ import path14 from "path";
1811
+ import { mkdirSync as mkdirSync3, writeFileSync as writeFileSync6, existsSync as existsSync4 } from "fs";
1812
+ import { scaffoldFromSchema as scaffoldFromSchema2, compile as compile10, computeTier as computeTier3, loadConfig as loadConfig11 } from "@runcontext/core";
1813
+
1814
+ // src/setup/display.ts
1815
+ import * as p2 from "@clack/prompts";
1816
+ function displayTierScore(score) {
1817
+ p2.note(formatTierScore(score), "Tier Scorecard");
1818
+ }
1819
+
1820
+ // src/setup/steps/scaffold.ts
1821
+ async function runScaffoldStep(ctx) {
1822
+ const shouldRun = await p3.confirm({
1823
+ message: "Scaffold Bronze metadata from database schema?"
1824
+ });
1825
+ if (p3.isCancel(shouldRun) || !shouldRun) {
1826
+ return { skipped: true, summary: "Skipped" };
1827
+ }
1828
+ const spin = p3.spinner();
1829
+ spin.start("Scaffolding Bronze metadata...");
1830
+ const result = scaffoldFromSchema2({
1831
+ modelName: ctx.modelName,
1832
+ dataSourceName: "default",
1833
+ tables: ctx.tables,
1834
+ columns: ctx.columns
1835
+ });
1836
+ for (const dir of ["models", "governance", "owners"]) {
1837
+ const dirPath = path14.join(ctx.contextDir, dir);
1838
+ if (!existsSync4(dirPath)) mkdirSync3(dirPath, { recursive: true });
1839
+ }
1840
+ const created = [];
1841
+ const files = [
1842
+ { rel: path14.join("models", result.files.osi), content: result.osiYaml },
1843
+ { rel: path14.join("governance", result.files.governance), content: result.governanceYaml },
1844
+ { rel: path14.join("owners", result.files.owner), content: result.ownerYaml }
1845
+ ];
1846
+ for (const f of files) {
1847
+ const fullPath = path14.join(ctx.contextDir, f.rel);
1848
+ writeFileSync6(fullPath, f.content, "utf-8");
1849
+ created.push(f.rel);
1850
+ }
1851
+ const config = loadConfig11(ctx.cwd);
1852
+ const { graph } = await compile10({ contextDir: ctx.contextDir, config, rootDir: ctx.cwd });
1853
+ ctx.graph = graph;
1854
+ ctx.tierScore = computeTier3(ctx.modelName, graph);
1855
+ spin.stop(`Created ${created.length} files`);
1856
+ const fileList = created.map((f) => ` ${f}`).join("\n");
1857
+ p3.note(fileList, "Files Created");
1858
+ displayTierScore(ctx.tierScore);
1859
+ return { skipped: false, summary: `${created.length} files \u2192 ${ctx.tierScore.tier.toUpperCase()}` };
1860
+ }
1861
+
1862
+ // src/setup/steps/enrich-silver.ts
1863
+ import * as p4 from "@clack/prompts";
1864
+ import path15 from "path";
1865
+ import { readFileSync as readFileSync5, writeFileSync as writeFileSync7, mkdirSync as mkdirSync4, existsSync as existsSync5, readdirSync as readdirSync2 } from "fs";
1866
+ import * as yaml3 from "yaml";
1867
+ import {
1868
+ compile as compile11,
1869
+ computeTier as computeTier4,
1870
+ suggestEnrichments as suggestEnrichments2,
1871
+ loadConfig as loadConfig12
1872
+ } from "@runcontext/core";
1873
+ function findFileRecursive2(dir, suffix) {
1874
+ if (!existsSync5(dir)) return void 0;
1875
+ const entries = readdirSync2(dir, { withFileTypes: true });
1876
+ for (const entry of entries) {
1877
+ const fullPath = path15.join(dir, entry.name);
1878
+ if (entry.isDirectory()) {
1879
+ const found = findFileRecursive2(fullPath, suffix);
1880
+ if (found) return found;
1881
+ } else if (entry.name.endsWith(suffix)) {
1882
+ return fullPath;
1883
+ }
1884
+ }
1885
+ return void 0;
1886
+ }
1887
+ async function runEnrichSilverStep(ctx) {
1888
+ const config = loadConfig12(ctx.cwd);
1889
+ const { graph } = await compile11({ contextDir: ctx.contextDir, config, rootDir: ctx.cwd });
1890
+ ctx.graph = graph;
1891
+ const tierScore = computeTier4(ctx.modelName, graph);
1892
+ if (tierScore.silver.passed) {
1893
+ p4.log.success("Already at Silver or above \u2014 skipping.");
1894
+ ctx.tierScore = tierScore;
1895
+ return { skipped: true, summary: "Already Silver" };
1896
+ }
1897
+ const model = graph.models.get(ctx.modelName);
1898
+ if (!model) {
1899
+ p4.log.error(`Model "${ctx.modelName}" not found in graph.`);
1900
+ return { skipped: true, summary: "Model not found" };
1901
+ }
1902
+ const datasetNames = model.datasets.map((d) => d.name);
1903
+ const suggestions = suggestEnrichments2("silver", tierScore, datasetNames);
1904
+ const preview = [];
1905
+ if (suggestions.governance?.trust) preview.push(`+ trust: ${suggestions.governance.trust}`);
1906
+ if (suggestions.governance?.tags) preview.push(`+ tags: [${suggestions.governance.tags.join(", ")}]`);
1907
+ if (suggestions.governance?.refreshAll) preview.push(`+ refresh: ${suggestions.governance.refreshAll} (all datasets)`);
1908
+ if (suggestions.lineage) preview.push(`+ ${suggestions.lineage.upstream?.length ?? 0} lineage upstream source(s)`);
1909
+ if (suggestions.glossaryTerms) preview.push(`+ ${suggestions.glossaryTerms.length} glossary term(s)`);
1910
+ if (suggestions.needsSampleValues) preview.push("+ sample_values from live data");
1911
+ if (preview.length > 0) {
1912
+ p4.note(preview.join("\n"), "Silver Enrichments");
1913
+ }
1914
+ const shouldRun = await p4.confirm({
1915
+ message: "Apply Silver enrichments?"
1916
+ });
1917
+ if (p4.isCancel(shouldRun) || !shouldRun) {
1918
+ return { skipped: true, summary: "Skipped" };
1919
+ }
1920
+ const spin = p4.spinner();
1921
+ spin.start("Enriching to Silver...");
1922
+ const govFilePath = findFileRecursive2(ctx.contextDir, `${ctx.modelName}.governance.yaml`);
1923
+ if (govFilePath) {
1924
+ const govContent = readFileSync5(govFilePath, "utf-8");
1925
+ const govDoc = yaml3.parse(govContent) ?? {};
1926
+ if (suggestions.governance?.trust) govDoc.trust = suggestions.governance.trust;
1927
+ if (suggestions.governance?.tags) govDoc.tags = suggestions.governance.tags;
1928
+ if (suggestions.governance?.refreshAll) {
1929
+ for (const dsName of Object.keys(govDoc.datasets ?? {})) {
1930
+ govDoc.datasets[dsName].refresh = suggestions.governance.refreshAll;
1931
+ }
1932
+ }
1933
+ if (suggestions.needsSampleValues) {
1934
+ govDoc.fields = govDoc.fields ?? {};
1935
+ try {
1936
+ let count = 0;
1937
+ for (const ds of model.datasets) {
1938
+ if (count >= 2) break;
1939
+ const tableName = ds.source?.split(".").pop() ?? ds.name;
1940
+ for (const field of ds.fields ?? []) {
1941
+ if (count >= 2) break;
1942
+ const fieldKey = `${ds.name}.${field.name}`;
1943
+ if (govDoc.fields[fieldKey]?.sample_values?.length > 0) continue;
1944
+ try {
1945
+ const result = await ctx.adapter.query(
1946
+ `SELECT DISTINCT CAST("${field.name}" AS VARCHAR) AS val FROM "${tableName}" WHERE "${field.name}" IS NOT NULL LIMIT 5`
1947
+ );
1948
+ if (result.rows.length > 0) {
1949
+ govDoc.fields[fieldKey] = govDoc.fields[fieldKey] ?? {};
1950
+ govDoc.fields[fieldKey].sample_values = result.rows.map((r) => String(r.val));
1951
+ count++;
1952
+ }
1953
+ } catch {
1954
+ }
1955
+ }
1956
+ }
1957
+ } catch {
1958
+ }
1959
+ }
1960
+ writeFileSync7(govFilePath, yaml3.stringify(govDoc, { lineWidth: 120 }), "utf-8");
1961
+ }
1962
+ if (suggestions.lineage) {
1963
+ const lineageDir = path15.join(ctx.contextDir, "lineage");
1964
+ if (!existsSync5(lineageDir)) mkdirSync4(lineageDir, { recursive: true });
1965
+ const lineagePath = path15.join(lineageDir, `${ctx.modelName}.lineage.yaml`);
1966
+ if (!existsSync5(lineagePath)) {
1967
+ const lineageDoc = { model: ctx.modelName, upstream: suggestions.lineage.upstream };
1968
+ writeFileSync7(lineagePath, yaml3.stringify(lineageDoc, { lineWidth: 120 }), "utf-8");
1969
+ }
1970
+ }
1971
+ if (suggestions.glossaryTerms) {
1972
+ const glossaryDir = path15.join(ctx.contextDir, "glossary");
1973
+ if (!existsSync5(glossaryDir)) mkdirSync4(glossaryDir, { recursive: true });
1974
+ for (const term of suggestions.glossaryTerms) {
1975
+ const termPath = path15.join(glossaryDir, `${term.id}.term.yaml`);
1976
+ if (!existsSync5(termPath)) {
1977
+ writeFileSync7(termPath, yaml3.stringify(term, { lineWidth: 120 }), "utf-8");
1978
+ }
1979
+ }
1980
+ }
1981
+ const { graph: newGraph } = await compile11({ contextDir: ctx.contextDir, config, rootDir: ctx.cwd });
1982
+ ctx.graph = newGraph;
1983
+ ctx.tierScore = computeTier4(ctx.modelName, newGraph);
1984
+ spin.stop("Applied Silver enrichments");
1985
+ displayTierScore(ctx.tierScore);
1986
+ return { skipped: false, summary: ctx.tierScore.tier.toUpperCase() };
1987
+ }
1988
+
1989
+ // src/setup/steps/enrich-gold.ts
1990
+ import * as p5 from "@clack/prompts";
1991
+ import path16 from "path";
1992
+ import { readFileSync as readFileSync6, writeFileSync as writeFileSync8, mkdirSync as mkdirSync5, existsSync as existsSync6, readdirSync as readdirSync3 } from "fs";
1993
+ import * as yaml4 from "yaml";
1994
+ import {
1995
+ compile as compile12,
1996
+ computeTier as computeTier5,
1997
+ suggestEnrichments as suggestEnrichments3,
1998
+ inferSemanticRole as inferSemanticRole2,
1999
+ inferAggregation as inferAggregation2,
2000
+ loadConfig as loadConfig13
2001
+ } from "@runcontext/core";
2002
+ function findFileRecursive3(dir, suffix) {
2003
+ if (!existsSync6(dir)) return void 0;
2004
+ const entries = readdirSync3(dir, { withFileTypes: true });
2005
+ for (const entry of entries) {
2006
+ const fullPath = path16.join(dir, entry.name);
2007
+ if (entry.isDirectory()) {
2008
+ const found = findFileRecursive3(fullPath, suffix);
2009
+ if (found) return found;
2010
+ } else if (entry.name.endsWith(suffix)) {
2011
+ return fullPath;
2012
+ }
2013
+ }
2014
+ return void 0;
2015
+ }
2016
+ async function runEnrichGoldStep(ctx) {
2017
+ const config = loadConfig13(ctx.cwd);
2018
+ const { graph } = await compile12({ contextDir: ctx.contextDir, config, rootDir: ctx.cwd });
2019
+ ctx.graph = graph;
2020
+ const tierScore = computeTier5(ctx.modelName, graph);
2021
+ if (tierScore.gold.passed) {
2022
+ p5.log.success("Already at Gold \u2014 skipping.");
2023
+ ctx.tierScore = tierScore;
2024
+ return { skipped: true, summary: "Already Gold" };
2025
+ }
2026
+ const model = graph.models.get(ctx.modelName);
2027
+ if (!model) {
2028
+ p5.log.error(`Model "${ctx.modelName}" not found.`);
2029
+ return { skipped: true, summary: "Model not found" };
2030
+ }
2031
+ const datasetNames = model.datasets.map((d) => d.name);
2032
+ const suggestions = suggestEnrichments3("gold", tierScore, datasetNames);
2033
+ const preview = [];
2034
+ if (suggestions.needsSemanticRoles) preview.push("+ Infer semantic_role for all fields");
2035
+ if (suggestions.needsRulesFile) preview.push("+ Generate rules file (golden queries, guardrails, hierarchies)");
2036
+ if (suggestions.governance?.trust) preview.push(`+ trust: ${suggestions.governance.trust}`);
2037
+ preview.push("+ Add version, business_context stubs to governance");
2038
+ preview.push("+ Add ai_context placeholder to model");
2039
+ preview.push("+ Infer relationships from column name patterns");
2040
+ if (preview.length > 0) {
2041
+ p5.note(preview.join("\n"), "Gold Enrichments");
2042
+ }
2043
+ p5.log.warning("Gold enrichments create TODO placeholders that need manual curation.");
2044
+ const shouldRun = await p5.confirm({
2045
+ message: "Apply Gold enrichments?"
2046
+ });
2047
+ if (p5.isCancel(shouldRun) || !shouldRun) {
2048
+ return { skipped: true, summary: "Skipped" };
2049
+ }
2050
+ const spin = p5.spinner();
2051
+ spin.start("Enriching to Gold...");
2052
+ const govFilePath = findFileRecursive3(ctx.contextDir, `${ctx.modelName}.governance.yaml`);
2053
+ if (govFilePath) {
2054
+ const govContent = readFileSync6(govFilePath, "utf-8");
2055
+ const govDoc = yaml4.parse(govContent) ?? {};
2056
+ if (suggestions.governance?.trust) govDoc.trust = suggestions.governance.trust;
2057
+ if (suggestions.needsSemanticRoles) {
2058
+ govDoc.fields = govDoc.fields ?? {};
2059
+ for (const ds of model.datasets) {
2060
+ const tableName = ds.source?.split(".").pop() ?? ds.name;
2061
+ let dbColumns = [];
2062
+ try {
2063
+ dbColumns = await ctx.adapter.listColumns(tableName);
2064
+ } catch {
2065
+ }
2066
+ for (const field of ds.fields ?? []) {
2067
+ const fieldKey = `${ds.name}.${field.name}`;
2068
+ if (govDoc.fields[fieldKey]?.semantic_role) continue;
2069
+ const col = dbColumns.find((c) => c.name === field.name);
2070
+ const isPK = col?.is_primary_key ?? field.name.endsWith("_id");
2071
+ const dataType = col?.data_type ?? "VARCHAR";
2072
+ govDoc.fields[fieldKey] = govDoc.fields[fieldKey] ?? {};
2073
+ const role = inferSemanticRole2(field.name, dataType, isPK);
2074
+ govDoc.fields[fieldKey].semantic_role = role;
2075
+ if (role === "metric") {
2076
+ govDoc.fields[fieldKey].default_aggregation = inferAggregation2(field.name);
2077
+ govDoc.fields[fieldKey].additive = govDoc.fields[fieldKey].default_aggregation === "SUM";
2078
+ }
2079
+ }
2080
+ }
2081
+ }
2082
+ if (!govDoc.version) {
2083
+ govDoc.version = "0.1.0";
2084
+ }
2085
+ if (!govDoc.business_context || govDoc.business_context.length === 0) {
2086
+ govDoc.business_context = [
2087
+ { name: "TODO: Use Case Name", description: "TODO: Describe the analytical use case and business value." }
2088
+ ];
2089
+ }
2090
+ writeFileSync8(govFilePath, yaml4.stringify(govDoc, { lineWidth: 120 }), "utf-8");
2091
+ }
2092
+ const modelFilePath = findFileRecursive3(ctx.contextDir, `${ctx.modelName}.osi.yaml`);
2093
+ if (modelFilePath) {
2094
+ const modelContent = readFileSync6(modelFilePath, "utf-8");
2095
+ const modelDoc = yaml4.parse(modelContent) ?? {};
2096
+ const semModels = modelDoc.semantic_model ?? [];
2097
+ let changed = false;
2098
+ for (const sm of semModels) {
2099
+ if (sm.name !== ctx.modelName) continue;
2100
+ if (!sm.ai_context) {
2101
+ sm.ai_context = "TODO: Describe how an AI agent should use this model, common pitfalls, and important filters.";
2102
+ changed = true;
2103
+ }
2104
+ if (!sm.relationships || sm.relationships.length === 0) {
2105
+ const datasets = sm.datasets ?? [];
2106
+ const dsNames = new Set(datasets.map((d) => d.name));
2107
+ const inferred = [];
2108
+ for (const ds of datasets) {
2109
+ for (const field of ds.fields ?? []) {
2110
+ const fname = field.name;
2111
+ const idMatch = fname.match(/^(.+)_id$/);
2112
+ if (idMatch && idMatch[1]) {
2113
+ const targetBase = idMatch[1];
2114
+ for (const targetDs of datasets) {
2115
+ if (targetDs.name === ds.name) continue;
2116
+ const targetName = targetDs.name;
2117
+ if (targetName.includes(targetBase) || targetBase.includes(targetName)) {
2118
+ const targetHasField = (targetDs.fields ?? []).some((f) => f.name === fname);
2119
+ if (targetHasField) {
2120
+ const relName = `${ds.name}-to-${targetName}`;
2121
+ if (!inferred.some((r) => r.name === relName)) {
2122
+ inferred.push({
2123
+ name: relName,
2124
+ from: ds.name,
2125
+ to: targetName,
2126
+ from_columns: [fname],
2127
+ to_columns: [fname]
2128
+ });
2129
+ }
2130
+ }
2131
+ }
2132
+ }
2133
+ }
2134
+ }
2135
+ }
2136
+ if (inferred.length > 0) {
2137
+ sm.relationships = inferred;
2138
+ changed = true;
2139
+ }
2140
+ }
2141
+ }
2142
+ if (changed) {
2143
+ writeFileSync8(modelFilePath, yaml4.stringify(modelDoc, { lineWidth: 120 }), "utf-8");
2144
+ }
2145
+ }
2146
+ if (suggestions.needsRulesFile) {
2147
+ const rulesDir = path16.join(ctx.contextDir, "rules");
2148
+ if (!existsSync6(rulesDir)) mkdirSync5(rulesDir, { recursive: true });
2149
+ const rulesPath = path16.join(rulesDir, `${ctx.modelName}.rules.yaml`);
2150
+ if (!existsSync6(rulesPath)) {
2151
+ const rulesDoc = {
2152
+ model: ctx.modelName,
2153
+ golden_queries: [
2154
+ { question: "TODO: What is the total count?", sql: "SELECT COUNT(*) FROM table_name" },
2155
+ { question: "TODO: What are the top records?", sql: "SELECT * FROM table_name LIMIT 10" },
2156
+ { question: "TODO: What is the distribution?", sql: "SELECT column, COUNT(*) FROM table_name GROUP BY column" }
2157
+ ],
2158
+ business_rules: [
2159
+ { name: "TODO: rule-name", definition: "TODO: describe the business rule" }
2160
+ ],
2161
+ guardrail_filters: [
2162
+ { name: "TODO: filter-name", filter: "column IS NOT NULL", reason: "TODO: explain why" }
2163
+ ],
2164
+ hierarchies: [
2165
+ { name: "TODO: hierarchy-name", levels: ["level1", "level2"], dataset: datasetNames[0] ?? "dataset" }
2166
+ ]
2167
+ };
2168
+ writeFileSync8(rulesPath, yaml4.stringify(rulesDoc, { lineWidth: 120 }), "utf-8");
2169
+ }
2170
+ }
2171
+ const { graph: newGraph } = await compile12({ contextDir: ctx.contextDir, config, rootDir: ctx.cwd });
2172
+ ctx.graph = newGraph;
2173
+ ctx.tierScore = computeTier5(ctx.modelName, newGraph);
2174
+ spin.stop("Applied Gold enrichments");
2175
+ const todos = suggestions.needsRulesFile ? "\nThe rules file contains TODO placeholders \u2014 edit context/rules/ to complete Gold." : "";
2176
+ if (todos) p5.log.warning(todos);
2177
+ displayTierScore(ctx.tierScore);
2178
+ return { skipped: false, summary: `${ctx.tierScore.tier.toUpperCase()} (may need curation)` };
2179
+ }
2180
+
2181
+ // src/setup/steps/verify.ts
2182
+ import * as p6 from "@clack/prompts";
2183
+ import {
2184
+ compile as compile13,
2185
+ LintEngine as LintEngine5,
2186
+ ALL_RULES as ALL_RULES6,
2187
+ computeTier as computeTier6,
2188
+ loadConfig as loadConfig14
2189
+ } from "@runcontext/core";
2190
+ async function runVerifyStep(ctx) {
2191
+ const shouldRun = await p6.confirm({
2192
+ message: "Verify metadata against live data?"
2193
+ });
2194
+ if (p6.isCancel(shouldRun) || !shouldRun) {
2195
+ return { skipped: true, summary: "Skipped" };
2196
+ }
2197
+ const spin = p6.spinner();
2198
+ spin.start("Verifying against database...");
2199
+ const config = loadConfig14(ctx.cwd);
2200
+ const { graph } = await compile13({ contextDir: ctx.contextDir, config, rootDir: ctx.cwd });
2201
+ graph.dataValidation = await collectDataValidation(ctx.adapter, graph);
2202
+ const engine = new LintEngine5();
2203
+ for (const rule of ALL_RULES6) {
2204
+ if (rule.id.startsWith("data/")) engine.register(rule);
2205
+ }
2206
+ const dataDiags = engine.run(graph);
2207
+ ctx.graph = graph;
2208
+ ctx.tierScore = computeTier6(ctx.modelName, graph);
2209
+ const errors = dataDiags.filter((d) => d.severity === "error").length;
2210
+ const warnings = dataDiags.filter((d) => d.severity === "warning").length;
2211
+ if (dataDiags.length === 0) {
2212
+ spin.stop("All data validation checks passed");
2213
+ } else {
2214
+ spin.stop(`${errors} error(s), ${warnings} warning(s)`);
2215
+ const details = dataDiags.map((d) => ` ${d.severity === "error" ? "x" : "!"} ${d.message}`).join("\n");
2216
+ p6.note(details, "Data Validation Issues");
2217
+ }
2218
+ return {
2219
+ skipped: false,
2220
+ summary: dataDiags.length === 0 ? "Clean" : `${errors} errors, ${warnings} warnings`
2221
+ };
2222
+ }
2223
+
2224
+ // src/setup/steps/autofix.ts
2225
+ import * as p7 from "@clack/prompts";
2226
+ import fs4 from "fs";
2227
+ import {
2228
+ compile as compile14,
2229
+ LintEngine as LintEngine6,
2230
+ ALL_RULES as ALL_RULES7,
2231
+ applyFixes as applyFixes4,
2232
+ computeTier as computeTier7,
2233
+ loadConfig as loadConfig15
2234
+ } from "@runcontext/core";
2235
+ async function runAutofixStep(ctx) {
2236
+ const config = loadConfig15(ctx.cwd);
2237
+ const { graph } = await compile14({ contextDir: ctx.contextDir, config, rootDir: ctx.cwd });
2238
+ graph.dataValidation = await collectDataValidation(ctx.adapter, graph);
2239
+ const engine = new LintEngine6();
2240
+ for (const rule of ALL_RULES7) engine.register(rule);
2241
+ const diagnostics = engine.run(graph);
2242
+ const fixable = diagnostics.filter((d) => d.fixable);
2243
+ if (fixable.length === 0) {
2244
+ p7.log.success("No fixable issues found.");
2245
+ ctx.graph = graph;
2246
+ ctx.tierScore = computeTier7(ctx.modelName, graph);
2247
+ return { skipped: true, summary: "Nothing to fix" };
2248
+ }
2249
+ const shouldRun = await p7.confirm({
2250
+ message: `Auto-fix ${fixable.length} issue(s)?`
2251
+ });
2252
+ if (p7.isCancel(shouldRun) || !shouldRun) {
2253
+ return { skipped: true, summary: "Skipped" };
2254
+ }
2255
+ const spin = p7.spinner();
2256
+ spin.start("Fixing...");
2257
+ const readFile = (filePath) => fs4.readFileSync(filePath, "utf-8");
2258
+ const fixedFiles = applyFixes4(fixable, readFile);
2259
+ for (const [file, content] of fixedFiles) {
2260
+ fs4.writeFileSync(file, content, "utf-8");
2261
+ }
2262
+ const { graph: newGraph } = await compile14({ contextDir: ctx.contextDir, config, rootDir: ctx.cwd });
2263
+ ctx.graph = newGraph;
2264
+ ctx.tierScore = computeTier7(ctx.modelName, newGraph);
2265
+ spin.stop(`Fixed ${fixable.length} issue(s) in ${fixedFiles.size} file(s)`);
2266
+ displayTierScore(ctx.tierScore);
2267
+ return { skipped: false, summary: `${fixable.length} issues fixed` };
2268
+ }
2269
+
2270
+ // src/setup/steps/claude-md.ts
2271
+ import * as p8 from "@clack/prompts";
2272
+ import path17 from "path";
2273
+ import { existsSync as existsSync7, writeFileSync as writeFileSync9 } from "fs";
2274
+ function buildClaudeMd(ctx) {
2275
+ const modelName = ctx.modelName;
2276
+ const tier = ctx.tierScore?.tier?.toUpperCase() ?? "UNKNOWN";
2277
+ const model = ctx.graph?.models.get(modelName);
2278
+ const datasets = model?.datasets ?? [];
2279
+ const datasetList = datasets.map((ds) => `- \`${ds.name}\` \u2014 ${ds.fields?.length ?? 0} fields`).join("\n");
2280
+ const failingChecks = [];
2281
+ if (ctx.tierScore) {
2282
+ for (const check of ctx.tierScore.bronze.checks) {
2283
+ if (!check.passed) failingChecks.push(`- ${check.id}: ${check.detail ?? check.label}`);
2284
+ }
2285
+ for (const check of ctx.tierScore.silver.checks) {
2286
+ if (!check.passed) failingChecks.push(`- ${check.id}: ${check.detail ?? check.label}`);
2287
+ }
2288
+ for (const check of ctx.tierScore.gold.checks) {
2289
+ if (!check.passed) failingChecks.push(`- ${check.id}: ${check.detail ?? check.label}`);
2290
+ }
2291
+ }
2292
+ const failingSection = failingChecks.length > 0 ? `### Failing Checks
2293
+
2294
+ ${failingChecks.join("\n")}` : "All checks passing.";
2295
+ return `# ContextKit Agent Instructions
2296
+
2297
+ You have two MCP servers: **duckdb** (query data) and **contextkit** (query metadata).
2298
+
2299
+ Model: **${modelName}** | Current Tier: **${tier}**
2300
+
2301
+ ## The Cardinal Rule: Never Fabricate Metadata
2302
+
2303
+ **Every piece of metadata you write must be grounded in evidence from the actual data.**
2304
+
2305
+ - NEVER invent owner names, emails, team names, or contact info
2306
+ - NEVER write a field description that is just the column name repeated
2307
+ - NEVER assign a semantic_role without first querying the column's actual values
2308
+ - NEVER mark a field as additive without understanding what summing it means
2309
+ - NEVER write lineage entries without knowing the actual data sources
2310
+ - NEVER write a business_context narrative you can't justify from the data
2311
+ - NEVER create a glossary definition that is just "Definition for X"
2312
+
2313
+ If you don't know something, say so. Leave it as a TODO with a note about what you'd need to determine the answer. A honest TODO is infinitely better than fabricated metadata that looks plausible but is wrong.
2314
+
2315
+ ## On Session Start
2316
+
2317
+ 1. Run \`context_tier\` to check the current metadata tier (Bronze/Silver/Gold)
2318
+ 2. Report the current tier and list failing checks
2319
+ 3. Ask the user what they'd like to work on \u2014 don't start changing files unprompted
2320
+
2321
+ ## When Asked to Reach Gold
2322
+
2323
+ Work through ALL failing Gold checks iteratively until \`context tier\` reports Gold:
2324
+
2325
+ 1. Run \`context_tier\` and collect every failing check
2326
+ 2. For each failing check, query the database to gather evidence, then fix the metadata
2327
+ 3. Run \`context_tier\` again
2328
+ 4. If checks still fail, go back to step 2
2329
+ 5. **Do NOT stop until every Gold check passes** or you hit something that genuinely requires human input (like real owner contact info)
2330
+ 6. For checks you cannot fix (e.g., owner email), leave a clear TODO explaining what a human needs to provide
2331
+
2332
+ You must iterate \u2014 a single pass is never enough. Each \`context tier\` run may reveal new failures after earlier ones are fixed.
2333
+
2334
+ ## How to Curate Metadata (the right way)
2335
+
2336
+ ### Before writing ANY metadata, query the database first
2337
+
2338
+ For every field you're about to describe or classify:
2339
+
2340
+ \`\`\`sql
2341
+ -- What type of values does this column contain?
2342
+ SELECT DISTINCT column_name FROM table LIMIT 20;
2343
+
2344
+ -- For numeric columns: is this a metric or dimension?
2345
+ SELECT MIN(col), MAX(col), AVG(col), COUNT(DISTINCT col) FROM table;
2346
+
2347
+ -- For potential metrics: does SUM make sense?
2348
+ -- If SUM produces a meaningful business number \u2192 additive: true
2349
+ -- If SUM is meaningless (e.g., summing percentages, scores, ratings) \u2192 additive: false
2350
+ \`\`\`
2351
+
2352
+ ### Semantic Role Decision Tree
2353
+
2354
+ Query the column first, then apply this logic:
2355
+
2356
+ 1. **Is it a primary key or foreign key?** \u2192 \`identifier\`
2357
+ 2. **Is it a date or timestamp?** \u2192 \`date\`
2358
+ 3. **Is it numeric AND does aggregation make business sense?**
2359
+ - Does SUM make sense? (counts, amounts, quantities) \u2192 \`metric\`, \`additive: true\`
2360
+ - Does only AVG/MIN/MAX make sense? (rates, percentages, scores, ratings) \u2192 \`metric\`, \`additive: false\`
2361
+ 4. **Everything else** \u2192 \`dimension\`
2362
+
2363
+ Common mistakes to avoid:
2364
+ - \`stars\` (ratings) \u2192 metric with AVG, NOT additive (summing star ratings is meaningless)
2365
+ - \`_per_10k_people\` (rates) \u2192 metric with AVG, NOT additive
2366
+ - \`_score\` (composite scores) \u2192 metric with AVG, NOT additive
2367
+ - \`useful/funny/cool\` (vote counts) \u2192 metric with SUM, additive
2368
+ - \`_count\` fields \u2192 metric with SUM, additive (usually)
2369
+
2370
+ ### Field Descriptions
2371
+
2372
+ Write descriptions that help someone who has never seen this database understand what the column contains. Include:
2373
+ - What the value represents
2374
+ - Units or scale (if applicable)
2375
+ - Where the data comes from (if known)
2376
+ - Any known quirks or caveats
2377
+
2378
+ Bad: \`description: total_population\`
2379
+ Good: \`description: Total resident population of the census tract from American Community Survey 5-year estimates\`
2380
+
2381
+ Bad: \`description: stars\`
2382
+ Good: \`description: Average Yelp star rating (1.0-5.0 scale) based on all reviews for this business\`
2383
+
2384
+ ### Lineage
2385
+
2386
+ Upstream sources are the EXTERNAL systems that feed data into this warehouse. They are NOT the tables in the warehouse itself.
2387
+
2388
+ Ask yourself: "Where did this data originally come from before it was loaded here?"
2389
+
2390
+ Bad lineage:
2391
+ \`\`\`yaml
2392
+ upstream:
2393
+ - source: yelp_business # This is a table IN the warehouse, not an upstream source
2394
+ type: pipeline
2395
+ \`\`\`
2396
+
2397
+ Good lineage:
2398
+ \`\`\`yaml
2399
+ upstream:
2400
+ - source: yelp-academic-dataset
2401
+ type: file
2402
+ notes: Yelp Open Dataset (academic use), loaded via CSV import
2403
+ \`\`\`
2404
+
2405
+ ### Owner Files
2406
+
2407
+ Do NOT create fake owner identities. If the real owner is unknown:
2408
+ - Keep the existing owner file as-is
2409
+ - Note in the file that contact info needs to be filled in by a real person
2410
+ - NEVER invent email addresses like \`analytics@example.com\`
2411
+
2412
+ ### Business Context
2413
+
2414
+ Write business_context entries that describe real analytical use cases you can verify from the data. Query the data to understand what questions it can answer before writing narratives.
2415
+
2416
+ ### Golden Queries
2417
+
2418
+ Every golden query MUST be tested against the actual database before you write it. Run the SQL, verify it returns sensible results, then document it.
2419
+
2420
+ ### Data Quality
2421
+
2422
+ When you discover data quality issues (null values, broken joins, missing data), FLAG THEM \u2014 don't hide them. Add notes in governance or report them to the user.
2423
+
2424
+ ## This Project
2425
+
2426
+ ### Datasets
2427
+
2428
+ ${datasetList || "(none detected)"}
2429
+
2430
+ ${failingSection}
2431
+
2432
+ ## MCP Tools
2433
+
2434
+ | Tool | Parameters | What it does |
2435
+ |------|-----------|-------------|
2436
+ | \`context_search\` | \`query\` | Find models, datasets, fields, terms by keyword |
2437
+ | \`context_explain\` | \`model\` | Full model details \u2014 governance, rules, lineage, tier |
2438
+ | \`context_validate\` | \u2014 | Run linter, get errors and warnings |
2439
+ | \`context_tier\` | \`model\` | Tier scorecard with all check results |
2440
+ | \`context_golden_query\` | \`question\` | Find pre-validated SQL for a question |
2441
+ | \`context_guardrails\` | \`tables[]\` | Get required WHERE clauses for tables |
2442
+
2443
+ ## Tier Checks Quick Reference
2444
+
2445
+ **Bronze (7):** descriptions, owner, security, grain, table_type
2446
+ **Silver (+6):** trust, 2+ tags, glossary linked, lineage, refresh, 2+ sample_values
2447
+ **Gold (+21):** semantic_role on ALL fields, metric aggregation/additive, 1+ guardrail, 3+ golden queries, 1+ business rule, 1+ hierarchy, 1+ default_filter, trust=endorsed, contactable owner, 1+ relationship, description \u226550 chars, ai_context (no TODO), 1+ business_context, version, field descriptions not lazy, glossary definitions substantive, lineage references real sources, grain statements specific, ai_context filled in
2448
+
2449
+ ## YAML Formats
2450
+
2451
+ **Governance** (\`context/governance/*.governance.yaml\`):
2452
+ \`\`\`yaml
2453
+ model: my-model
2454
+ owner: team-name
2455
+ version: "1.0.0"
2456
+ trust: endorsed
2457
+ security: internal
2458
+ tags: [domain-tag-1, domain-tag-2]
2459
+ business_context:
2460
+ - name: Use Case Name
2461
+ description: What analytical question this data answers and for whom.
2462
+ datasets:
2463
+ my_table:
2464
+ grain: "One row per [entity] identified by [key]"
2465
+ table_type: fact # fact | dimension | event | view
2466
+ refresh: daily
2467
+ fields:
2468
+ dataset.field:
2469
+ semantic_role: metric # metric | dimension | identifier | date
2470
+ default_aggregation: SUM # SUM | AVG | COUNT | COUNT_DISTINCT | MIN | MAX
2471
+ additive: true # can this metric be summed across dimensions?
2472
+ default_filter: "is_open = 1"
2473
+ sample_values: ["val1", "val2"]
2474
+ \`\`\`
2475
+
2476
+ **Rules** (\`context/rules/*.rules.yaml\`):
2477
+ \`\`\`yaml
2478
+ model: my-model
2479
+ golden_queries:
2480
+ - question: What are the top items by count?
2481
+ sql: SELECT name, count FROM my_table ORDER BY count DESC LIMIT 10
2482
+ intent: Identify top performers by volume
2483
+ caveats: Filters to active records only
2484
+ business_rules:
2485
+ - name: valid-ratings
2486
+ definition: All ratings must be between 1 and 5
2487
+ guardrail_filters:
2488
+ - name: active-only
2489
+ filter: "status = 'active'"
2490
+ reason: Exclude inactive records from analytics
2491
+ tables: [my_table]
2492
+ hierarchies:
2493
+ - name: geography
2494
+ levels: [state, city, postal_code]
2495
+ dataset: my_table
2496
+ \`\`\`
2497
+
2498
+ ## CLI Commands
2499
+
2500
+ \`\`\`bash
2501
+ context tier # Check scorecard
2502
+ context verify --db <path> # Validate against live data
2503
+ context fix --db <path> # Auto-fix data warnings
2504
+ context setup # Interactive setup wizard
2505
+ context dev # Watch mode for live editing
2506
+ \`\`\`
2507
+ `;
2508
+ }
2509
+ async function runClaudeMdStep(ctx) {
2510
+ const instructionsPath = path17.join(ctx.contextDir, "AGENT_INSTRUCTIONS.md");
2511
+ if (existsSync7(instructionsPath)) {
2512
+ const shouldOverwrite = await p8.confirm({
2513
+ message: "context/AGENT_INSTRUCTIONS.md already exists. Overwrite with updated instructions?"
2514
+ });
2515
+ if (p8.isCancel(shouldOverwrite) || !shouldOverwrite) {
2516
+ return { skipped: true, summary: "context/AGENT_INSTRUCTIONS.md already exists, kept existing" };
2517
+ }
2518
+ }
2519
+ const content = buildClaudeMd(ctx);
2520
+ writeFileSync9(instructionsPath, content, "utf-8");
2521
+ p8.log.success("Generated context/AGENT_INSTRUCTIONS.md with agent curation instructions");
2522
+ return { skipped: false, summary: "Generated context/AGENT_INSTRUCTIONS.md" };
2523
+ }
2524
+
2525
+ // src/commands/setup.ts
2526
+ var setupCommand = new Command15("setup").description("Interactive wizard to scaffold and enrich metadata from a database").action(async () => {
2527
+ p9.intro(chalk16.bgCyan(chalk16.black(" ContextKit Setup ")));
2528
+ const ctx = await runConnectStep();
2529
+ if (!ctx) return;
2530
+ try {
2531
+ const steps = [
2532
+ { name: "Scaffold Bronze", fn: runScaffoldStep }
2533
+ ];
2534
+ if (ctx.targetTier === "silver" || ctx.targetTier === "gold") {
2535
+ steps.push({ name: "Enrich to Silver", fn: runEnrichSilverStep });
2536
+ }
2537
+ if (ctx.targetTier === "gold") {
2538
+ steps.push({ name: "Enrich to Gold", fn: runEnrichGoldStep });
2539
+ }
2540
+ steps.push(
2541
+ { name: "Verify data", fn: runVerifyStep },
2542
+ { name: "Auto-fix", fn: runAutofixStep },
2543
+ { name: "Generate agent instructions", fn: runClaudeMdStep }
2544
+ );
2545
+ const results = [];
2546
+ for (let i = 0; i < steps.length; i++) {
2547
+ const step = steps[i];
2548
+ p9.log.step(`${chalk16.dim(`[${i + 1}/${steps.length}]`)} ${step.name}`);
2549
+ const result = await step.fn(ctx);
2550
+ results.push({ name: step.name, summary: result.summary });
2551
+ }
2552
+ const summaryLines = results.map((r) => ` ${chalk16.green("+")} ${r.name}: ${r.summary}`).join("\n");
2553
+ p9.note(summaryLines, "Summary");
2554
+ if (ctx.tierScore) {
2555
+ displayTierScore(ctx.tierScore);
2556
+ }
2557
+ const currentTier = ctx.tierScore?.tier ?? "none";
2558
+ const reachedTarget = ctx.targetTier === "bronze" && ["bronze", "silver", "gold"].includes(currentTier) || ctx.targetTier === "silver" && ["silver", "gold"].includes(currentTier) || ctx.targetTier === "gold" && currentTier === "gold";
2559
+ if (reachedTarget) {
2560
+ p9.outro(`Done! You're at ${chalk16.bold(currentTier.toUpperCase())}. Run ${chalk16.cyan("context tier")} anytime to check.`);
2561
+ } else if (ctx.targetTier === "gold" && currentTier !== "gold") {
2562
+ const nextSteps = [
2563
+ `Your metadata is at ${chalk16.bold(currentTier.toUpperCase())} \u2014 Gold needs curation.`,
2564
+ "",
2565
+ `${chalk16.bold("To reach Gold, tell your AI assistant:")}`,
2566
+ "",
2567
+ ` "Read ${chalk16.cyan("context/AGENT_INSTRUCTIONS.md")} for curation guidelines.`,
2568
+ ` Run ${chalk16.cyan("context tier")} and fix every failing Gold check.`,
2569
+ ` Query the database before writing any metadata.`,
2570
+ ` Keep iterating until ${chalk16.cyan("context tier")} reports Gold."`
2571
+ ];
2572
+ p9.note(nextSteps.join("\n"), "Next Steps");
2573
+ p9.outro(`Run ${chalk16.cyan("context dev")} to watch for changes as you edit.`);
2574
+ } else {
2575
+ p9.outro(`Run ${chalk16.cyan("context tier")} to check your scorecard.`);
2576
+ }
2577
+ } finally {
2578
+ try {
2579
+ await ctx.adapter.disconnect();
2580
+ } catch {
2581
+ }
2582
+ }
2583
+ });
2584
+
1565
2585
  // src/index.ts
1566
- var program = new Command15();
2586
+ var program = new Command16();
1567
2587
  program.name("context").description("ContextKit \u2014 AI-ready metadata governance over OSI").version("0.3.1");
1568
2588
  program.addCommand(lintCommand);
1569
2589
  program.addCommand(buildCommand);
@@ -1579,5 +2599,6 @@ program.addCommand(introspectCommand);
1579
2599
  program.addCommand(verifyCommand);
1580
2600
  program.addCommand(enrichCommand);
1581
2601
  program.addCommand(rulesCommand);
2602
+ program.addCommand(setupCommand);
1582
2603
  program.parse();
1583
2604
  //# sourceMappingURL=index.js.map