@runcontext/cli 0.3.0 → 0.3.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.js CHANGED
@@ -1,7 +1,7 @@
1
1
  #!/usr/bin/env node
2
2
 
3
3
  // src/index.ts
4
- import { Command as Command12 } from "commander";
4
+ import { Command as Command16 } from "commander";
5
5
 
6
6
  // src/commands/lint.ts
7
7
  import { Command } from "commander";
@@ -508,25 +508,352 @@ var explainCommand = new Command4("explain").description("Look up models, terms,
508
508
  });
509
509
 
510
510
  // src/commands/fix.ts
511
+ import { Command as Command7 } from "commander";
512
+ import chalk8 from "chalk";
513
+ import path7 from "path";
514
+ import fs2 from "fs";
515
+ import {
516
+ compile as compile6,
517
+ loadConfig as loadConfig6,
518
+ LintEngine as LintEngine3,
519
+ ALL_RULES as ALL_RULES3,
520
+ applyFixes as applyFixes2,
521
+ createAdapter as createAdapter3
522
+ } from "@runcontext/core";
523
+
524
+ // src/commands/introspect.ts
511
525
  import { Command as Command5 } from "commander";
512
526
  import chalk6 from "chalk";
513
527
  import path5 from "path";
514
- import fs2 from "fs";
528
+ import { mkdirSync, writeFileSync as writeFileSync2, existsSync } from "fs";
515
529
  import {
516
- compile as compile5,
517
530
  loadConfig as loadConfig4,
531
+ createAdapter,
532
+ scaffoldFromSchema
533
+ } from "@runcontext/core";
534
+ function parseDbUrl(db) {
535
+ if (db.startsWith("duckdb://")) {
536
+ return { adapter: "duckdb", path: db.slice("duckdb://".length) };
537
+ }
538
+ if (db.startsWith("postgres://") || db.startsWith("postgresql://")) {
539
+ return { adapter: "postgres", connection: db };
540
+ }
541
+ if (db.endsWith(".duckdb") || db.endsWith(".db")) {
542
+ return { adapter: "duckdb", path: db };
543
+ }
544
+ throw new Error(
545
+ `Cannot determine adapter from "${db}". Use duckdb:// or postgres:// prefix.`
546
+ );
547
+ }
548
+ var introspectCommand = new Command5("introspect").description("Introspect a database and scaffold Bronze-level OSI metadata").option(
549
+ "--db <url>",
550
+ "Database URL (e.g., duckdb://path.duckdb or postgres://...)"
551
+ ).option(
552
+ "--source <name>",
553
+ "Use a named data_source from contextkit.config.yaml"
554
+ ).option("--tables <glob>", 'Filter tables by glob pattern (e.g., "vw_*")').option(
555
+ "--model-name <name>",
556
+ "Name for the generated model (default: derived from source)"
557
+ ).action(async (opts) => {
558
+ try {
559
+ const config = loadConfig4(process.cwd());
560
+ const contextDir = path5.resolve(config.context_dir);
561
+ let dsConfig;
562
+ let dsName;
563
+ if (opts.db) {
564
+ dsConfig = parseDbUrl(opts.db);
565
+ dsName = opts.source ?? "default";
566
+ } else if (opts.source) {
567
+ if (!config.data_sources?.[opts.source]) {
568
+ console.error(
569
+ chalk6.red(
570
+ `Data source "${opts.source}" not found in config`
571
+ )
572
+ );
573
+ process.exit(1);
574
+ }
575
+ dsConfig = config.data_sources[opts.source];
576
+ dsName = opts.source;
577
+ } else {
578
+ const sources = config.data_sources;
579
+ if (!sources || Object.keys(sources).length === 0) {
580
+ console.error(
581
+ chalk6.red(
582
+ "No data source specified. Use --db <url> or configure data_sources in config"
583
+ )
584
+ );
585
+ process.exit(1);
586
+ }
587
+ const firstName = Object.keys(sources)[0];
588
+ dsConfig = sources[firstName];
589
+ dsName = firstName;
590
+ }
591
+ const adapter = await createAdapter(dsConfig);
592
+ await adapter.connect();
593
+ console.log(
594
+ chalk6.green(
595
+ `Connected to ${dsConfig.adapter}: ${dsConfig.path ?? dsConfig.connection}`
596
+ )
597
+ );
598
+ let tables = await adapter.listTables();
599
+ if (opts.tables) {
600
+ const pattern = opts.tables.replace(/\*/g, ".*");
601
+ const regex = new RegExp(`^${pattern}$`, "i");
602
+ tables = tables.filter((t) => regex.test(t.name));
603
+ }
604
+ console.log(`Discovered ${tables.length} tables/views`);
605
+ const columns = {};
606
+ for (const table of tables) {
607
+ columns[table.name] = await adapter.listColumns(table.name);
608
+ }
609
+ const totalCols = Object.values(columns).reduce(
610
+ (sum, cols) => sum + cols.length,
611
+ 0
612
+ );
613
+ console.log(`Found ${totalCols} columns total`);
614
+ await adapter.disconnect();
615
+ const modelName = opts.modelName ?? dsName.replace(/[^a-z0-9-]/gi, "-").toLowerCase();
616
+ const result = scaffoldFromSchema({
617
+ modelName,
618
+ dataSourceName: dsName,
619
+ tables,
620
+ columns
621
+ });
622
+ for (const dir of ["models", "governance", "owners"]) {
623
+ const dirPath = path5.join(contextDir, dir);
624
+ if (!existsSync(dirPath)) mkdirSync(dirPath, { recursive: true });
625
+ }
626
+ const osiPath = path5.join(contextDir, "models", result.files.osi);
627
+ const govPath = path5.join(
628
+ contextDir,
629
+ "governance",
630
+ result.files.governance
631
+ );
632
+ const ownerPath = path5.join(contextDir, "owners", result.files.owner);
633
+ writeFileSync2(osiPath, result.osiYaml, "utf-8");
634
+ writeFileSync2(govPath, result.governanceYaml, "utf-8");
635
+ if (!existsSync(ownerPath)) {
636
+ writeFileSync2(ownerPath, result.ownerYaml, "utf-8");
637
+ }
638
+ console.log("");
639
+ console.log(chalk6.green("Scaffolded:"));
640
+ console.log(` ${path5.relative(process.cwd(), osiPath)}`);
641
+ console.log(` ${path5.relative(process.cwd(), govPath)}`);
642
+ console.log(` ${path5.relative(process.cwd(), ownerPath)}`);
643
+ console.log("");
644
+ console.log(chalk6.cyan("Run `context tier` to check your tier score."));
645
+ console.log(
646
+ chalk6.cyan("Run `context verify` to validate against data.")
647
+ );
648
+ } catch (err) {
649
+ console.error(
650
+ chalk6.red(`Introspect failed: ${err.message}`)
651
+ );
652
+ process.exit(1);
653
+ }
654
+ });
655
+
656
+ // src/commands/verify.ts
657
+ import { Command as Command6 } from "commander";
658
+ import chalk7 from "chalk";
659
+ import path6 from "path";
660
+ import {
661
+ compile as compile5,
662
+ loadConfig as loadConfig5,
518
663
  LintEngine as LintEngine2,
519
664
  ALL_RULES as ALL_RULES2,
520
- applyFixes as applyFixes2
665
+ createAdapter as createAdapter2
521
666
  } from "@runcontext/core";
522
- var fixCommand = new Command5("fix").description("Auto-fix lint issues").option("--context-dir <path>", "Path to context directory").option("--format <type>", "Output format: pretty or json", "pretty").option("--dry-run", "Show what would be fixed without writing files").action(async (opts) => {
667
+ function findTable(dsName, graph, existingTables) {
668
+ if (existingTables.has(dsName)) return dsName;
669
+ for (const [, model] of graph.models) {
670
+ const ds = model.datasets.find((d) => d.name === dsName);
671
+ if (ds?.source) {
672
+ const tableName = ds.source.split(".").pop();
673
+ if (existingTables.has(tableName)) return tableName;
674
+ }
675
+ }
676
+ return void 0;
677
+ }
678
+ async function collectDataValidation(adapter, graph) {
679
+ const validation = {
680
+ existingTables: /* @__PURE__ */ new Map(),
681
+ existingColumns: /* @__PURE__ */ new Map(),
682
+ actualSampleValues: /* @__PURE__ */ new Map(),
683
+ goldenQueryResults: /* @__PURE__ */ new Map(),
684
+ guardrailResults: /* @__PURE__ */ new Map()
685
+ };
686
+ const tables = await adapter.listTables();
687
+ for (const t of tables) {
688
+ validation.existingTables.set(t.name, t.row_count);
689
+ }
690
+ for (const t of tables) {
691
+ const cols = await adapter.listColumns(t.name);
692
+ const colMap = new Map(cols.map((c) => [c.name, c.data_type]));
693
+ validation.existingColumns.set(t.name, colMap);
694
+ }
695
+ for (const [, gov] of graph.governance) {
696
+ if (!gov.fields) continue;
697
+ for (const [fieldKey, fieldGov] of Object.entries(gov.fields)) {
698
+ if (!fieldGov.sample_values || fieldGov.sample_values.length === 0)
699
+ continue;
700
+ const dotIdx = fieldKey.indexOf(".");
701
+ if (dotIdx < 0) continue;
702
+ const dsName = fieldKey.substring(0, dotIdx);
703
+ const fieldName = fieldKey.substring(dotIdx + 1);
704
+ const tableName = findTable(dsName, graph, validation.existingTables);
705
+ if (!tableName) continue;
706
+ try {
707
+ const result = await adapter.query(
708
+ `SELECT DISTINCT CAST("${fieldName}" AS VARCHAR) AS val FROM "${tableName}" WHERE "${fieldName}" IS NOT NULL LIMIT 50`
709
+ );
710
+ validation.actualSampleValues.set(
711
+ fieldKey,
712
+ result.rows.map((r) => String(r.val))
713
+ );
714
+ } catch {
715
+ }
716
+ }
717
+ }
718
+ for (const [, rules] of graph.rules) {
719
+ if (!rules.golden_queries) continue;
720
+ for (let i = 0; i < rules.golden_queries.length; i++) {
721
+ const gq = rules.golden_queries[i];
722
+ try {
723
+ const result = await adapter.query(gq.sql);
724
+ validation.goldenQueryResults.set(i, {
725
+ success: true,
726
+ rowCount: result.row_count
727
+ });
728
+ } catch (err) {
729
+ validation.goldenQueryResults.set(i, {
730
+ success: false,
731
+ error: err.message
732
+ });
733
+ }
734
+ }
735
+ }
736
+ for (const [, rules] of graph.rules) {
737
+ if (!rules.guardrail_filters) continue;
738
+ for (let i = 0; i < rules.guardrail_filters.length; i++) {
739
+ const gf = rules.guardrail_filters[i];
740
+ const testTable = gf.tables?.[0] ?? "unknown";
741
+ const tableName = findTable(testTable, graph, validation.existingTables);
742
+ if (!tableName) {
743
+ validation.guardrailResults.set(i, {
744
+ valid: false,
745
+ error: `Table "${testTable}" not found`
746
+ });
747
+ continue;
748
+ }
749
+ try {
750
+ await adapter.query(
751
+ `SELECT 1 FROM "${tableName}" WHERE ${gf.filter} LIMIT 1`
752
+ );
753
+ validation.guardrailResults.set(i, { valid: true });
754
+ } catch (err) {
755
+ validation.guardrailResults.set(i, {
756
+ valid: false,
757
+ error: err.message
758
+ });
759
+ }
760
+ }
761
+ }
762
+ return validation;
763
+ }
764
+ var verifyCommand = new Command6("verify").description("Validate metadata accuracy against a live database").option("--source <name>", "Use a specific data_source from config").option("--db <url>", "Database URL override (postgres:// or path.duckdb)").option("--context-dir <path>", "Path to context directory").option("--format <type>", "Output format: pretty or json", "pretty").action(async (opts) => {
523
765
  try {
524
- const config = loadConfig4(process.cwd());
525
- const contextDir = opts.contextDir ? path5.resolve(opts.contextDir) : path5.resolve(config.context_dir);
526
- const { graph } = await compile5({ contextDir, config, rootDir: process.cwd() });
527
- const overrides = config.lint?.severity_overrides;
528
- const engine = new LintEngine2(overrides);
766
+ const config = loadConfig5(process.cwd());
767
+ const contextDir = opts.contextDir ? path6.resolve(opts.contextDir) : path6.resolve(config.context_dir);
768
+ const { graph, diagnostics: compileDiags } = await compile5({
769
+ contextDir,
770
+ config
771
+ });
772
+ let dsConfig;
773
+ if (opts.db) {
774
+ dsConfig = parseDbUrl(opts.db);
775
+ } else {
776
+ const sources = config.data_sources;
777
+ if (!sources || Object.keys(sources).length === 0) {
778
+ console.error(
779
+ chalk7.red(
780
+ "No data source configured. Add data_sources to contextkit.config.yaml or use --db."
781
+ )
782
+ );
783
+ process.exit(1);
784
+ }
785
+ const name = opts.source ?? Object.keys(sources)[0];
786
+ const resolved = sources[name];
787
+ if (!resolved) {
788
+ console.error(
789
+ chalk7.red(
790
+ `Data source "${name}" not found. Available: ${Object.keys(sources).join(", ")}`
791
+ )
792
+ );
793
+ process.exit(1);
794
+ return;
795
+ }
796
+ dsConfig = resolved;
797
+ }
798
+ const adapter = await createAdapter2(dsConfig);
799
+ await adapter.connect();
800
+ console.log(chalk7.green(`Connected to ${dsConfig.adapter}`));
801
+ console.log("Collecting validation data...\n");
802
+ graph.dataValidation = await collectDataValidation(adapter, graph);
803
+ await adapter.disconnect();
804
+ const engine = new LintEngine2();
529
805
  for (const rule of ALL_RULES2) {
806
+ if (rule.id.startsWith("data/")) {
807
+ engine.register(rule);
808
+ }
809
+ }
810
+ const dataDiags = engine.run(graph);
811
+ const allDiags = [...dataDiags];
812
+ if (allDiags.length === 0) {
813
+ const tableCount = graph.dataValidation.existingTables.size;
814
+ const totalRows = [
815
+ ...graph.dataValidation.existingTables.values()
816
+ ].reduce((a, b) => a + b, 0);
817
+ console.log(chalk7.green("All data validation checks passed.\n"));
818
+ console.log(
819
+ `Verified against ${tableCount} table(s) (${totalRows.toLocaleString()} total rows)`
820
+ );
821
+ } else {
822
+ console.log(formatDiagnostics(allDiags));
823
+ }
824
+ const hasErrors = allDiags.some((d) => d.severity === "error");
825
+ if (hasErrors) process.exit(1);
826
+ } catch (err) {
827
+ console.error(chalk7.red(`Verify failed: ${err.message}`));
828
+ process.exit(1);
829
+ }
830
+ });
831
+
832
+ // src/commands/fix.ts
833
+ var fixCommand = new Command7("fix").description("Auto-fix lint issues").option("--context-dir <path>", "Path to context directory").option("--format <type>", "Output format: pretty or json", "pretty").option("--dry-run", "Show what would be fixed without writing files").option("--db <url>", "Database URL for data-aware fixes (postgres:// or path.duckdb)").option("--source <name>", "Use a specific data_source from config").action(async (opts) => {
834
+ try {
835
+ const config = loadConfig6(process.cwd());
836
+ const contextDir = opts.contextDir ? path7.resolve(opts.contextDir) : path7.resolve(config.context_dir);
837
+ const { graph } = await compile6({ contextDir, config, rootDir: process.cwd() });
838
+ let dsConfig;
839
+ if (opts.db) {
840
+ dsConfig = parseDbUrl(opts.db);
841
+ } else {
842
+ const sources = config.data_sources;
843
+ if (sources && Object.keys(sources).length > 0) {
844
+ const name = opts.source ?? Object.keys(sources)[0];
845
+ dsConfig = sources[name];
846
+ }
847
+ }
848
+ if (dsConfig) {
849
+ const adapter = await createAdapter3(dsConfig);
850
+ await adapter.connect();
851
+ graph.dataValidation = await collectDataValidation(adapter, graph);
852
+ await adapter.disconnect();
853
+ }
854
+ const overrides = config.lint?.severity_overrides;
855
+ const engine = new LintEngine3(overrides);
856
+ for (const rule of ALL_RULES3) {
530
857
  engine.register(rule);
531
858
  }
532
859
  const diagnostics = engine.run(graph);
@@ -535,7 +862,7 @@ var fixCommand = new Command5("fix").description("Auto-fix lint issues").option(
535
862
  if (opts.format === "json") {
536
863
  console.log(formatJson({ fixedFiles: [], fixCount: 0 }));
537
864
  } else {
538
- console.log(chalk6.green("No fixable issues found."));
865
+ console.log(chalk8.green("No fixable issues found."));
539
866
  }
540
867
  return;
541
868
  }
@@ -552,10 +879,10 @@ var fixCommand = new Command5("fix").description("Auto-fix lint issues").option(
552
879
  );
553
880
  } else {
554
881
  console.log(
555
- chalk6.yellow(`Dry run: ${fixable.length} issue(s) would be fixed in ${fixedFiles.size} file(s):`)
882
+ chalk8.yellow(`Dry run: ${fixable.length} issue(s) would be fixed in ${fixedFiles.size} file(s):`)
556
883
  );
557
884
  for (const file of fixedFiles.keys()) {
558
- console.log(chalk6.gray(` ${file}`));
885
+ console.log(chalk8.gray(` ${file}`));
559
886
  }
560
887
  }
561
888
  return;
@@ -584,15 +911,15 @@ var fixCommand = new Command5("fix").description("Auto-fix lint issues").option(
584
911
  });
585
912
 
586
913
  // src/commands/dev.ts
587
- import { Command as Command6 } from "commander";
588
- import chalk7 from "chalk";
589
- import path6 from "path";
590
- import { readFileSync as readFileSync2, writeFileSync as writeFileSync2 } from "fs";
914
+ import { Command as Command8 } from "commander";
915
+ import chalk9 from "chalk";
916
+ import path8 from "path";
917
+ import { readFileSync as readFileSync2, writeFileSync as writeFileSync3 } from "fs";
591
918
  import {
592
- compile as compile6,
593
- loadConfig as loadConfig5,
594
- LintEngine as LintEngine3,
595
- ALL_RULES as ALL_RULES3,
919
+ compile as compile7,
920
+ loadConfig as loadConfig7,
921
+ LintEngine as LintEngine4,
922
+ ALL_RULES as ALL_RULES4,
596
923
  filterByDirectives as filterByDirectives2,
597
924
  applyFixes as applyFixes3
598
925
  } from "@runcontext/core";
@@ -601,15 +928,15 @@ function diagKey(d) {
601
928
  }
602
929
  var previousDiags = /* @__PURE__ */ new Map();
603
930
  async function runLint(contextDir, fix) {
604
- const config = loadConfig5(process.cwd());
605
- const { graph, diagnostics: compileDiags, directives } = await compile6({
931
+ const config = loadConfig7(process.cwd());
932
+ const { graph, diagnostics: compileDiags, directives } = await compile7({
606
933
  contextDir,
607
934
  config,
608
935
  rootDir: process.cwd()
609
936
  });
610
937
  const overrides = config.lint?.severity_overrides;
611
- const engine = new LintEngine3(overrides);
612
- for (const rule of ALL_RULES3) {
938
+ const engine = new LintEngine4(overrides);
939
+ for (const rule of ALL_RULES4) {
613
940
  engine.register(rule);
614
941
  }
615
942
  const lintDiags = engine.run(graph);
@@ -619,20 +946,20 @@ async function runLint(contextDir, fix) {
619
946
  if (fixable.length > 0) {
620
947
  const fixes = applyFixes3(fixable, (filePath) => readFileSync2(filePath, "utf-8"));
621
948
  for (const [file, content] of fixes) {
622
- writeFileSync2(file, content, "utf-8");
949
+ writeFileSync3(file, content, "utf-8");
623
950
  }
624
- const { graph: reGraph, diagnostics: reCompileDiags, directives: reDirs } = await compile6({
951
+ const { graph: reGraph, diagnostics: reCompileDiags, directives: reDirs } = await compile7({
625
952
  contextDir,
626
953
  config,
627
954
  rootDir: process.cwd()
628
955
  });
629
- const reEngine = new LintEngine3(overrides);
630
- for (const rule of ALL_RULES3) {
956
+ const reEngine = new LintEngine4(overrides);
957
+ for (const rule of ALL_RULES4) {
631
958
  reEngine.register(rule);
632
959
  }
633
960
  allDiags = filterByDirectives2([...reCompileDiags, ...reEngine.run(reGraph)], reDirs);
634
961
  if (fixable.length > 0) {
635
- console.log(chalk7.green(` Auto-fixed ${fixable.length} issue(s).`));
962
+ console.log(chalk9.green(` Auto-fixed ${fixable.length} issue(s).`));
636
963
  }
637
964
  }
638
965
  }
@@ -649,16 +976,16 @@ async function runLint(contextDir, fix) {
649
976
  if (!currentDiags.has(key)) resolved.push(d);
650
977
  }
651
978
  console.clear();
652
- console.log(chalk7.gray(`[${(/* @__PURE__ */ new Date()).toLocaleTimeString()}] Linting...`));
979
+ console.log(chalk9.gray(`[${(/* @__PURE__ */ new Date()).toLocaleTimeString()}] Linting...`));
653
980
  if (previousDiags.size > 0) {
654
981
  if (resolved.length > 0) {
655
- console.log(chalk7.green(` ${resolved.length} issue(s) resolved`));
982
+ console.log(chalk9.green(` ${resolved.length} issue(s) resolved`));
656
983
  }
657
984
  if (newIssues.length > 0) {
658
- console.log(chalk7.red(` ${newIssues.length} new issue(s)`));
985
+ console.log(chalk9.red(` ${newIssues.length} new issue(s)`));
659
986
  }
660
987
  if (resolved.length === 0 && newIssues.length === 0) {
661
- console.log(chalk7.gray(" No changes"));
988
+ console.log(chalk9.gray(" No changes"));
662
989
  }
663
990
  console.log("");
664
991
  }
@@ -666,14 +993,14 @@ async function runLint(contextDir, fix) {
666
993
  console.log("");
667
994
  previousDiags = currentDiags;
668
995
  }
669
- var devCommand = new Command6("dev").description("Watch mode \u2014 re-run lint on file changes").option("--context-dir <path>", "Path to context directory").option("--fix", "Auto-fix problems on each re-lint").action(async (opts) => {
996
+ var devCommand = new Command8("dev").description("Watch mode \u2014 re-run lint on file changes").option("--context-dir <path>", "Path to context directory").option("--fix", "Auto-fix problems on each re-lint").action(async (opts) => {
670
997
  try {
671
- const config = loadConfig5(process.cwd());
672
- const contextDir = opts.contextDir ? path6.resolve(opts.contextDir) : path6.resolve(config.context_dir);
998
+ const config = loadConfig7(process.cwd());
999
+ const contextDir = opts.contextDir ? path8.resolve(opts.contextDir) : path8.resolve(config.context_dir);
673
1000
  const fix = opts.fix === true;
674
- console.log(chalk7.blue(`Watching ${contextDir} for changes...`));
675
- if (fix) console.log(chalk7.blue("Auto-fix enabled."));
676
- console.log(chalk7.gray("Press Ctrl+C to stop.\n"));
1001
+ console.log(chalk9.blue(`Watching ${contextDir} for changes...`));
1002
+ if (fix) console.log(chalk9.blue("Auto-fix enabled."));
1003
+ console.log(chalk9.gray("Press Ctrl+C to stop.\n"));
677
1004
  await runLint(contextDir, fix);
678
1005
  const { watch } = await import("chokidar");
679
1006
  let debounceTimer = null;
@@ -690,21 +1017,21 @@ var devCommand = new Command6("dev").description("Watch mode \u2014 re-run lint
690
1017
  await runLint(contextDir, fix);
691
1018
  } catch (err) {
692
1019
  console.error(
693
- chalk7.red(`Lint error: ${err.message}`)
1020
+ chalk9.red(`Lint error: ${err.message}`)
694
1021
  );
695
1022
  }
696
1023
  }, 300);
697
1024
  });
698
1025
  } catch (err) {
699
- console.error(chalk7.red(`Dev mode failed: ${err.message}`));
1026
+ console.error(chalk9.red(`Dev mode failed: ${err.message}`));
700
1027
  process.exit(1);
701
1028
  }
702
1029
  });
703
1030
 
704
1031
  // src/commands/init.ts
705
- import { Command as Command7 } from "commander";
706
- import chalk8 from "chalk";
707
- import path7 from "path";
1032
+ import { Command as Command9 } from "commander";
1033
+ import chalk10 from "chalk";
1034
+ import path9 from "path";
708
1035
  import fs3 from "fs";
709
1036
  var EXAMPLE_OSI = `version: "1.0"
710
1037
 
@@ -769,26 +1096,26 @@ var EXAMPLE_CONFIG = `context_dir: context
769
1096
  output_dir: dist
770
1097
  minimum_tier: bronze
771
1098
  `;
772
- var initCommand = new Command7("init").description("Scaffold a v0.2 ContextKit project structure").option("--dir <path>", "Root directory for the project", ".").action(async (opts) => {
1099
+ var initCommand = new Command9("init").description("Scaffold a v0.2 ContextKit project structure").option("--dir <path>", "Root directory for the project", ".").action(async (opts) => {
773
1100
  try {
774
- const rootDir = path7.resolve(opts.dir);
775
- const contextDir = path7.join(rootDir, "context");
1101
+ const rootDir = path9.resolve(opts.dir);
1102
+ const contextDir = path9.join(rootDir, "context");
776
1103
  const dirs = [
777
- path7.join(contextDir, "models"),
778
- path7.join(contextDir, "governance"),
779
- path7.join(contextDir, "glossary"),
780
- path7.join(contextDir, "owners")
1104
+ path9.join(contextDir, "models"),
1105
+ path9.join(contextDir, "governance"),
1106
+ path9.join(contextDir, "glossary"),
1107
+ path9.join(contextDir, "owners")
781
1108
  ];
782
1109
  for (const dir of dirs) {
783
1110
  fs3.mkdirSync(dir, { recursive: true });
784
1111
  }
785
1112
  const files = [
786
1113
  {
787
- path: path7.join(contextDir, "models", "example-model.osi.yaml"),
1114
+ path: path9.join(contextDir, "models", "example-model.osi.yaml"),
788
1115
  content: EXAMPLE_OSI
789
1116
  },
790
1117
  {
791
- path: path7.join(
1118
+ path: path9.join(
792
1119
  contextDir,
793
1120
  "governance",
794
1121
  "example-model.governance.yaml"
@@ -796,15 +1123,15 @@ var initCommand = new Command7("init").description("Scaffold a v0.2 ContextKit p
796
1123
  content: EXAMPLE_GOVERNANCE
797
1124
  },
798
1125
  {
799
- path: path7.join(contextDir, "glossary", "glossary.term.yaml"),
1126
+ path: path9.join(contextDir, "glossary", "glossary.term.yaml"),
800
1127
  content: EXAMPLE_TERM
801
1128
  },
802
1129
  {
803
- path: path7.join(contextDir, "owners", "data-team.owner.yaml"),
1130
+ path: path9.join(contextDir, "owners", "data-team.owner.yaml"),
804
1131
  content: EXAMPLE_OWNER
805
1132
  },
806
1133
  {
807
- path: path7.join(rootDir, "contextkit.config.yaml"),
1134
+ path: path9.join(rootDir, "contextkit.config.yaml"),
808
1135
  content: EXAMPLE_CONFIG
809
1136
  }
810
1137
  ];
@@ -812,11 +1139,11 @@ var initCommand = new Command7("init").description("Scaffold a v0.2 ContextKit p
812
1139
  let skipped = 0;
813
1140
  for (const file of files) {
814
1141
  if (fs3.existsSync(file.path)) {
815
- console.log(chalk8.gray(` skip ${path7.relative(rootDir, file.path)} (exists)`));
1142
+ console.log(chalk10.gray(` skip ${path9.relative(rootDir, file.path)} (exists)`));
816
1143
  skipped++;
817
1144
  } else {
818
1145
  fs3.writeFileSync(file.path, file.content, "utf-8");
819
- console.log(chalk8.green(` create ${path7.relative(rootDir, file.path)}`));
1146
+ console.log(chalk10.green(` create ${path9.relative(rootDir, file.path)}`));
820
1147
  created++;
821
1148
  }
822
1149
  }
@@ -827,10 +1154,10 @@ var initCommand = new Command7("init").description("Scaffold a v0.2 ContextKit p
827
1154
  )
828
1155
  );
829
1156
  console.log("");
830
- console.log(chalk8.gray("Next steps:"));
831
- console.log(chalk8.gray(" 1. Edit the example files in context/"));
832
- console.log(chalk8.gray(" 2. Run: context lint"));
833
- console.log(chalk8.gray(" 3. Run: context build"));
1157
+ console.log(chalk10.gray("Next steps:"));
1158
+ console.log(chalk10.gray(" 1. Edit the example files in context/"));
1159
+ console.log(chalk10.gray(" 2. Run: context lint"));
1160
+ console.log(chalk10.gray(" 3. Run: context build"));
834
1161
  } catch (err) {
835
1162
  console.error(formatError(err.message));
836
1163
  process.exit(1);
@@ -838,15 +1165,15 @@ var initCommand = new Command7("init").description("Scaffold a v0.2 ContextKit p
838
1165
  });
839
1166
 
840
1167
  // src/commands/site.ts
841
- import { Command as Command8 } from "commander";
842
- import chalk9 from "chalk";
843
- import path8 from "path";
844
- import { compile as compile7, loadConfig as loadConfig6, emitManifest as emitManifest2 } from "@runcontext/core";
845
- var siteCommand = new Command8("site").description("Build a static documentation site from compiled context").option("--context-dir <path>", "Path to context directory").option("--output-dir <path>", "Path to site output directory").action(async (opts) => {
1168
+ import { Command as Command10 } from "commander";
1169
+ import chalk11 from "chalk";
1170
+ import path10 from "path";
1171
+ import { compile as compile8, loadConfig as loadConfig8, emitManifest as emitManifest2 } from "@runcontext/core";
1172
+ var siteCommand = new Command10("site").description("Build a static documentation site from compiled context").option("--context-dir <path>", "Path to context directory").option("--output-dir <path>", "Path to site output directory").action(async (opts) => {
846
1173
  try {
847
- const config = loadConfig6(process.cwd());
848
- const contextDir = opts.contextDir ? path8.resolve(opts.contextDir) : path8.resolve(config.context_dir);
849
- const { graph } = await compile7({ contextDir, config, rootDir: process.cwd() });
1174
+ const config = loadConfig8(process.cwd());
1175
+ const contextDir = opts.contextDir ? path10.resolve(opts.contextDir) : path10.resolve(config.context_dir);
1176
+ const { graph } = await compile8({ contextDir, config, rootDir: process.cwd() });
850
1177
  const manifest = emitManifest2(graph, config);
851
1178
  let buildSite;
852
1179
  try {
@@ -856,15 +1183,15 @@ var siteCommand = new Command8("site").description("Build a static documentation
856
1183
  }
857
1184
  if (!buildSite) {
858
1185
  console.log(
859
- chalk9.yellow(
1186
+ chalk11.yellow(
860
1187
  "Site generator is not yet available. Install @runcontext/site to enable this command."
861
1188
  )
862
1189
  );
863
1190
  process.exit(0);
864
1191
  }
865
- const outputDir = opts.outputDir ? path8.resolve(opts.outputDir) : path8.resolve(config.site?.base_path ?? "site");
1192
+ const outputDir = opts.outputDir ? path10.resolve(opts.outputDir) : path10.resolve(config.site?.base_path ?? "site");
866
1193
  await buildSite(manifest, config, outputDir);
867
- console.log(chalk9.green(`Site built to ${outputDir}`));
1194
+ console.log(chalk11.green(`Site built to ${outputDir}`));
868
1195
  } catch (err) {
869
1196
  console.error(formatError(err.message));
870
1197
  process.exit(1);
@@ -872,9 +1199,9 @@ var siteCommand = new Command8("site").description("Build a static documentation
872
1199
  });
873
1200
 
874
1201
  // src/commands/serve.ts
875
- import { Command as Command9 } from "commander";
876
- import chalk10 from "chalk";
877
- var serveCommand = new Command9("serve").description("Start the MCP server (stdio transport)").option("--context-dir <path>", "Path to context directory").action(async (opts) => {
1202
+ import { Command as Command11 } from "commander";
1203
+ import chalk12 from "chalk";
1204
+ var serveCommand = new Command11("serve").description("Start the MCP server (stdio transport)").option("--context-dir <path>", "Path to context directory").action(async (opts) => {
878
1205
  try {
879
1206
  let startServer;
880
1207
  try {
@@ -884,13 +1211,13 @@ var serveCommand = new Command9("serve").description("Start the MCP server (stdi
884
1211
  }
885
1212
  if (!startServer) {
886
1213
  console.log(
887
- chalk10.yellow(
1214
+ chalk12.yellow(
888
1215
  "MCP server is not available. Install @runcontext/mcp to enable this command."
889
1216
  )
890
1217
  );
891
1218
  process.exit(1);
892
1219
  }
893
- console.log(chalk10.blue("Starting MCP server (stdio transport)..."));
1220
+ console.log(chalk12.blue("Starting MCP server (stdio transport)..."));
894
1221
  await startServer({
895
1222
  contextDir: opts.contextDir,
896
1223
  rootDir: process.cwd()
@@ -902,13 +1229,13 @@ var serveCommand = new Command9("serve").description("Start the MCP server (stdi
902
1229
  });
903
1230
 
904
1231
  // src/commands/validate-osi.ts
905
- import { Command as Command10 } from "commander";
906
- import chalk11 from "chalk";
907
- import path9 from "path";
1232
+ import { Command as Command12 } from "commander";
1233
+ import chalk13 from "chalk";
1234
+ import path11 from "path";
908
1235
  import { parseFile, osiDocumentSchema } from "@runcontext/core";
909
- var validateOsiCommand = new Command10("validate-osi").description("Validate a single OSI file against the schema").argument("<file>", "Path to the OSI YAML file").option("--format <type>", "Output format: pretty or json", "pretty").action(async (file, opts) => {
1236
+ var validateOsiCommand = new Command12("validate-osi").description("Validate a single OSI file against the schema").argument("<file>", "Path to the OSI YAML file").option("--format <type>", "Output format: pretty or json", "pretty").action(async (file, opts) => {
910
1237
  try {
911
- const filePath = path9.resolve(file);
1238
+ const filePath = path11.resolve(file);
912
1239
  const parsed = await parseFile(filePath, "model");
913
1240
  const result = osiDocumentSchema.safeParse(parsed.data);
914
1241
  if (result.success) {
@@ -937,9 +1264,9 @@ var validateOsiCommand = new Command10("validate-osi").description("Validate a s
937
1264
  })
938
1265
  );
939
1266
  } else {
940
- console.error(chalk11.red(`Validation failed for ${filePath}:`));
1267
+ console.error(chalk13.red(`Validation failed for ${filePath}:`));
941
1268
  for (const issue of issues) {
942
- console.error(chalk11.red(` ${issue.path}: ${issue.message}`));
1269
+ console.error(chalk13.red(` ${issue.path}: ${issue.message}`));
943
1270
  }
944
1271
  }
945
1272
  process.exit(1);
@@ -950,46 +1277,268 @@ var validateOsiCommand = new Command10("validate-osi").description("Validate a s
950
1277
  }
951
1278
  });
952
1279
 
1280
+ // src/commands/enrich.ts
1281
+ import { Command as Command13 } from "commander";
1282
+ import chalk14 from "chalk";
1283
+ import path12 from "path";
1284
+ import { readFileSync as readFileSync3, writeFileSync as writeFileSync4, mkdirSync as mkdirSync2, existsSync as existsSync2, readdirSync } from "fs";
1285
+ import * as yaml from "yaml";
1286
+ import {
1287
+ compile as compile9,
1288
+ loadConfig as loadConfig9,
1289
+ computeTier as computeTier2,
1290
+ createAdapter as createAdapter4,
1291
+ suggestEnrichments,
1292
+ inferSemanticRole,
1293
+ inferAggregation
1294
+ } from "@runcontext/core";
1295
+ function findFileRecursive(dir, suffix) {
1296
+ if (!existsSync2(dir)) return void 0;
1297
+ const entries = readdirSync(dir, { withFileTypes: true });
1298
+ for (const entry of entries) {
1299
+ const fullPath = path12.join(dir, entry.name);
1300
+ if (entry.isDirectory()) {
1301
+ const found = findFileRecursive(fullPath, suffix);
1302
+ if (found) return found;
1303
+ } else if (entry.name.endsWith(suffix)) {
1304
+ return fullPath;
1305
+ }
1306
+ }
1307
+ return void 0;
1308
+ }
1309
+ var enrichCommand = new Command13("enrich").description("Suggest or apply metadata enrichments to reach a target tier").option("--target <tier>", "Target tier: silver or gold", "silver").option("--apply", "Write suggestions to YAML files").option("--source <name>", "Data source for sample values").option("--db <url>", "Database URL for sample values").option("--context-dir <path>", "Path to context directory").action(async (opts) => {
1310
+ try {
1311
+ const config = loadConfig9(process.cwd());
1312
+ const contextDir = opts.contextDir ? path12.resolve(opts.contextDir) : path12.resolve(config.context_dir);
1313
+ const target = opts.target;
1314
+ if (!["silver", "gold"].includes(target)) {
1315
+ console.error(chalk14.red('--target must be "silver" or "gold"'));
1316
+ process.exit(1);
1317
+ }
1318
+ const { graph } = await compile9({ contextDir, config });
1319
+ for (const [modelName] of graph.models) {
1320
+ const tierScore = computeTier2(modelName, graph);
1321
+ console.log(chalk14.bold(`${modelName}: ${tierScore.tier.toUpperCase()}`));
1322
+ if (tierScore.tier === target || target === "silver" && tierScore.tier === "gold") {
1323
+ console.log(chalk14.green(` Already at ${target} or above.
1324
+ `));
1325
+ continue;
1326
+ }
1327
+ const model = graph.models.get(modelName);
1328
+ const datasetNames = model.datasets.map((d) => d.name);
1329
+ const suggestions = suggestEnrichments(target, tierScore, datasetNames);
1330
+ if (!suggestions.governance && !suggestions.lineage && !suggestions.glossaryTerms && !suggestions.needsRulesFile && !suggestions.needsSampleValues && !suggestions.needsSemanticRoles) {
1331
+ console.log(chalk14.green(" No suggestions needed.\n"));
1332
+ continue;
1333
+ }
1334
+ if (suggestions.governance?.trust) {
1335
+ console.log(chalk14.yellow(` + Add trust: ${suggestions.governance.trust}`));
1336
+ }
1337
+ if (suggestions.governance?.tags) {
1338
+ console.log(chalk14.yellow(` + Add tags: [${suggestions.governance.tags.join(", ")}]`));
1339
+ }
1340
+ if (suggestions.governance?.refreshAll) {
1341
+ console.log(chalk14.yellow(` + Add refresh: ${suggestions.governance.refreshAll}`));
1342
+ }
1343
+ if (suggestions.lineage) {
1344
+ console.log(chalk14.yellow(` + Add lineage with ${suggestions.lineage.upstream?.length ?? 0} upstream sources`));
1345
+ }
1346
+ if (suggestions.glossaryTerms) {
1347
+ console.log(chalk14.yellow(` + Generate ${suggestions.glossaryTerms.length} glossary term(s)`));
1348
+ }
1349
+ if (suggestions.needsSampleValues) {
1350
+ console.log(chalk14.yellow(" + Populate sample_values from database"));
1351
+ }
1352
+ if (suggestions.needsSemanticRoles) {
1353
+ console.log(chalk14.yellow(" + Infer semantic_role for all fields"));
1354
+ }
1355
+ if (suggestions.needsRulesFile) {
1356
+ console.log(chalk14.yellow(" + Generate rules file"));
1357
+ }
1358
+ if (!opts.apply) {
1359
+ console.log(chalk14.cyan("\n Run with --apply to write these changes.\n"));
1360
+ continue;
1361
+ }
1362
+ const govFilePath = findFileRecursive(contextDir, `${modelName}.governance.yaml`);
1363
+ if (govFilePath) {
1364
+ const govContent = readFileSync3(govFilePath, "utf-8");
1365
+ const govDoc = yaml.parse(govContent) ?? {};
1366
+ if (suggestions.governance?.trust) {
1367
+ govDoc.trust = suggestions.governance.trust;
1368
+ }
1369
+ if (suggestions.governance?.tags) {
1370
+ govDoc.tags = suggestions.governance.tags;
1371
+ }
1372
+ if (suggestions.governance?.refreshAll) {
1373
+ for (const dsName of Object.keys(govDoc.datasets ?? {})) {
1374
+ govDoc.datasets[dsName].refresh = suggestions.governance.refreshAll;
1375
+ }
1376
+ }
1377
+ if (suggestions.needsSemanticRoles) {
1378
+ govDoc.fields = govDoc.fields ?? {};
1379
+ let adapter = null;
1380
+ const dsConfig = opts.db ? parseDbUrl(opts.db) : config.data_sources?.[opts.source ?? Object.keys(config.data_sources ?? {})[0]];
1381
+ if (dsConfig) {
1382
+ adapter = await createAdapter4(dsConfig);
1383
+ await adapter.connect();
1384
+ }
1385
+ for (const ds of model.datasets) {
1386
+ let columns = [];
1387
+ if (adapter) {
1388
+ const tableName = ds.source?.split(".").pop() ?? ds.name;
1389
+ try {
1390
+ columns = await adapter.listColumns(tableName);
1391
+ } catch {
1392
+ }
1393
+ }
1394
+ for (const field of ds.fields ?? []) {
1395
+ const fieldKey = `${ds.name}.${field.name}`;
1396
+ if (govDoc.fields[fieldKey]?.semantic_role) continue;
1397
+ const col = columns.find((c) => c.name === field.name);
1398
+ const isPK = col?.is_primary_key ?? field.name.endsWith("_id");
1399
+ const dataType = col?.data_type ?? "VARCHAR";
1400
+ govDoc.fields[fieldKey] = govDoc.fields[fieldKey] ?? {};
1401
+ const role = inferSemanticRole(field.name, dataType, isPK);
1402
+ govDoc.fields[fieldKey].semantic_role = role;
1403
+ if (role === "metric") {
1404
+ govDoc.fields[fieldKey].default_aggregation = inferAggregation(field.name);
1405
+ govDoc.fields[fieldKey].additive = govDoc.fields[fieldKey].default_aggregation === "SUM";
1406
+ }
1407
+ }
1408
+ }
1409
+ if (adapter) await adapter.disconnect();
1410
+ }
1411
+ if (suggestions.needsSampleValues) {
1412
+ govDoc.fields = govDoc.fields ?? {};
1413
+ const dsConfig2 = opts.db ? parseDbUrl(opts.db) : config.data_sources?.[opts.source ?? Object.keys(config.data_sources ?? {})[0]];
1414
+ if (dsConfig2) {
1415
+ const adapter2 = await createAdapter4(dsConfig2);
1416
+ await adapter2.connect();
1417
+ let count = 0;
1418
+ for (const ds of model.datasets) {
1419
+ if (count >= 2) break;
1420
+ const tableName = ds.source?.split(".").pop() ?? ds.name;
1421
+ for (const field of ds.fields ?? []) {
1422
+ if (count >= 2) break;
1423
+ const fieldKey = `${ds.name}.${field.name}`;
1424
+ if (govDoc.fields[fieldKey]?.sample_values?.length > 0) continue;
1425
+ try {
1426
+ const result = await adapter2.query(
1427
+ `SELECT DISTINCT CAST("${field.name}" AS VARCHAR) AS val FROM "${tableName}" WHERE "${field.name}" IS NOT NULL LIMIT 5`
1428
+ );
1429
+ if (result.rows.length > 0) {
1430
+ govDoc.fields[fieldKey] = govDoc.fields[fieldKey] ?? {};
1431
+ govDoc.fields[fieldKey].sample_values = result.rows.map((r) => String(r.val));
1432
+ count++;
1433
+ }
1434
+ } catch {
1435
+ }
1436
+ }
1437
+ }
1438
+ await adapter2.disconnect();
1439
+ }
1440
+ }
1441
+ writeFileSync4(govFilePath, yaml.stringify(govDoc, { lineWidth: 120 }), "utf-8");
1442
+ console.log(chalk14.green(` Updated: ${path12.relative(process.cwd(), govFilePath)}`));
1443
+ }
1444
+ if (suggestions.lineage) {
1445
+ const lineageDir = path12.join(contextDir, "lineage");
1446
+ if (!existsSync2(lineageDir)) mkdirSync2(lineageDir, { recursive: true });
1447
+ const lineagePath = path12.join(lineageDir, `${modelName}.lineage.yaml`);
1448
+ if (!existsSync2(lineagePath)) {
1449
+ const lineageDoc = {
1450
+ model: modelName,
1451
+ upstream: suggestions.lineage.upstream
1452
+ };
1453
+ writeFileSync4(lineagePath, yaml.stringify(lineageDoc, { lineWidth: 120 }), "utf-8");
1454
+ console.log(chalk14.green(` Created: ${path12.relative(process.cwd(), lineagePath)}`));
1455
+ }
1456
+ }
1457
+ if (suggestions.glossaryTerms) {
1458
+ const glossaryDir = path12.join(contextDir, "glossary");
1459
+ if (!existsSync2(glossaryDir)) mkdirSync2(glossaryDir, { recursive: true });
1460
+ for (const term of suggestions.glossaryTerms) {
1461
+ const termPath = path12.join(glossaryDir, `${term.id}.term.yaml`);
1462
+ if (!existsSync2(termPath)) {
1463
+ writeFileSync4(termPath, yaml.stringify(term, { lineWidth: 120 }), "utf-8");
1464
+ console.log(chalk14.green(` Created: ${path12.relative(process.cwd(), termPath)}`));
1465
+ }
1466
+ }
1467
+ }
1468
+ if (suggestions.needsRulesFile) {
1469
+ const rulesDir = path12.join(contextDir, "rules");
1470
+ if (!existsSync2(rulesDir)) mkdirSync2(rulesDir, { recursive: true });
1471
+ const rulesPath = path12.join(rulesDir, `${modelName}.rules.yaml`);
1472
+ if (!existsSync2(rulesPath)) {
1473
+ const rulesDoc = {
1474
+ model: modelName,
1475
+ golden_queries: [
1476
+ { question: "TODO: What is the total count?", sql: "SELECT COUNT(*) FROM table_name" },
1477
+ { question: "TODO: What are the top records?", sql: "SELECT * FROM table_name LIMIT 10" },
1478
+ { question: "TODO: What is the distribution?", sql: "SELECT column, COUNT(*) FROM table_name GROUP BY column" }
1479
+ ],
1480
+ business_rules: [
1481
+ { name: "TODO: rule-name", definition: "TODO: describe the business rule" }
1482
+ ],
1483
+ guardrail_filters: [
1484
+ { name: "TODO: filter-name", filter: "column IS NOT NULL", reason: "TODO: explain why" }
1485
+ ],
1486
+ hierarchies: [
1487
+ { name: "TODO: hierarchy-name", levels: ["level1", "level2"], dataset: datasetNames[0] ?? "dataset" }
1488
+ ]
1489
+ };
1490
+ writeFileSync4(rulesPath, yaml.stringify(rulesDoc, { lineWidth: 120 }), "utf-8");
1491
+ console.log(chalk14.green(` Created: ${path12.relative(process.cwd(), rulesPath)} (with TODOs)`));
1492
+ }
1493
+ }
1494
+ console.log("");
1495
+ }
1496
+ } catch (err) {
1497
+ console.error(chalk14.red(`Enrich failed: ${err.message}`));
1498
+ process.exit(1);
1499
+ }
1500
+ });
1501
+
953
1502
  // src/commands/rules.ts
954
- import { Command as Command11 } from "commander";
955
- import chalk12 from "chalk";
956
- import { ALL_RULES as ALL_RULES4 } from "@runcontext/core";
1503
+ import { Command as Command14 } from "commander";
1504
+ import chalk15 from "chalk";
1505
+ import { ALL_RULES as ALL_RULES5 } from "@runcontext/core";
957
1506
  function formatRuleTable(rules) {
958
1507
  if (rules.length === 0) {
959
- return chalk12.gray("No rules match the filters.");
1508
+ return chalk15.gray("No rules match the filters.");
960
1509
  }
961
1510
  const lines = [];
962
1511
  const header = `${"ID".padEnd(40)} ${"Tier".padEnd(8)} ${"Severity".padEnd(10)} ${"Fix".padEnd(5)} Description`;
963
- lines.push(chalk12.bold(header));
964
- lines.push(chalk12.gray("\u2500".repeat(100)));
1512
+ lines.push(chalk15.bold(header));
1513
+ lines.push(chalk15.gray("\u2500".repeat(100)));
965
1514
  for (const rule of rules) {
966
1515
  const tier = rule.tier ?? "\u2014";
967
1516
  const tierCol = colorTier(tier);
968
- const fixCol = rule.fixable ? chalk12.green("yes") : chalk12.gray("no");
969
- const sevCol = rule.defaultSeverity === "error" ? chalk12.red(rule.defaultSeverity) : chalk12.yellow(rule.defaultSeverity);
970
- const deprecated = rule.deprecated ? chalk12.gray(" (deprecated)") : "";
1517
+ const fixCol = rule.fixable ? chalk15.green("yes") : chalk15.gray("no");
1518
+ const sevCol = rule.defaultSeverity === "error" ? chalk15.red(rule.defaultSeverity) : chalk15.yellow(rule.defaultSeverity);
1519
+ const deprecated = rule.deprecated ? chalk15.gray(" (deprecated)") : "";
971
1520
  lines.push(
972
1521
  `${rule.id.padEnd(40)} ${tierCol.padEnd(8 + (tierCol.length - tier.length))} ${sevCol.padEnd(10 + (sevCol.length - rule.defaultSeverity.length))} ${fixCol.padEnd(5 + (fixCol.length - (rule.fixable ? 3 : 2)))} ${rule.description}${deprecated}`
973
1522
  );
974
1523
  }
975
1524
  lines.push("");
976
- lines.push(chalk12.gray(`${rules.length} rule(s) total`));
1525
+ lines.push(chalk15.gray(`${rules.length} rule(s) total`));
977
1526
  return lines.join("\n");
978
1527
  }
979
1528
  function colorTier(tier) {
980
1529
  switch (tier) {
981
1530
  case "gold":
982
- return chalk12.yellow(tier);
1531
+ return chalk15.yellow(tier);
983
1532
  case "silver":
984
- return chalk12.white(tier);
1533
+ return chalk15.white(tier);
985
1534
  case "bronze":
986
- return chalk12.hex("#CD7F32")(tier);
1535
+ return chalk15.hex("#CD7F32")(tier);
987
1536
  default:
988
- return chalk12.gray(tier);
1537
+ return chalk15.gray(tier);
989
1538
  }
990
1539
  }
991
- var rulesCommand = new Command11("rules").description("List all lint rules with metadata").option("--tier <tier>", "Filter by tier: bronze, silver, gold").option("--fixable", "Show only fixable rules").option("--format <type>", "Output format: pretty or json", "pretty").action((opts) => {
992
- let rules = [...ALL_RULES4];
1540
+ var rulesCommand = new Command14("rules").description("List all lint rules with metadata").option("--tier <tier>", "Filter by tier: bronze, silver, gold").option("--fixable", "Show only fixable rules").option("--format <type>", "Output format: pretty or json", "pretty").action((opts) => {
1541
+ let rules = [...ALL_RULES5];
993
1542
  if (opts.tier) {
994
1543
  const tier = opts.tier;
995
1544
  rules = rules.filter((r) => r.tier === tier);
@@ -1013,9 +1562,1029 @@ var rulesCommand = new Command11("rules").description("List all lint rules with
1013
1562
  }
1014
1563
  });
1015
1564
 
1565
+ // src/commands/setup.ts
1566
+ import { Command as Command15 } from "commander";
1567
+ import * as p9 from "@clack/prompts";
1568
+ import chalk16 from "chalk";
1569
+
1570
+ // src/setup/steps/connect.ts
1571
+ import * as p from "@clack/prompts";
1572
+ import path13 from "path";
1573
+ import { existsSync as existsSync3, readFileSync as readFileSync4, writeFileSync as writeFileSync5 } from "fs";
1574
+ import * as yaml2 from "yaml";
1575
+ import { loadConfig as loadConfig10, createAdapter as createAdapter5 } from "@runcontext/core";
1576
+ function autoDetectDb(cwd) {
1577
+ try {
1578
+ const config = loadConfig10(cwd);
1579
+ if (config.data_sources && Object.keys(config.data_sources).length > 0) {
1580
+ const name = Object.keys(config.data_sources)[0];
1581
+ const ds = config.data_sources[name];
1582
+ const loc = ds.path ?? ds.connection ?? name;
1583
+ return { dsConfig: ds, label: `${ds.adapter} \u2014 ${loc} (from contextkit.config.yaml)` };
1584
+ }
1585
+ } catch {
1586
+ }
1587
+ if (process.env.DATABASE_URL) {
1588
+ try {
1589
+ const ds = parseDbUrl(process.env.DATABASE_URL);
1590
+ return { dsConfig: ds, label: `${ds.adapter} \u2014 $DATABASE_URL` };
1591
+ } catch {
1592
+ }
1593
+ }
1594
+ if (process.env.DUCKDB_PATH && existsSync3(process.env.DUCKDB_PATH)) {
1595
+ return {
1596
+ dsConfig: { adapter: "duckdb", path: process.env.DUCKDB_PATH },
1597
+ label: `duckdb \u2014 $DUCKDB_PATH`
1598
+ };
1599
+ }
1600
+ const mcpPath = path13.join(cwd, ".claude", "mcp.json");
1601
+ if (existsSync3(mcpPath)) {
1602
+ try {
1603
+ const mcpConfig = JSON.parse(readFileSync4(mcpPath, "utf-8"));
1604
+ const duckdbServer = mcpConfig.mcpServers?.duckdb;
1605
+ if (duckdbServer?.args) {
1606
+ const args = duckdbServer.args;
1607
+ const idx = args.indexOf("--db-path");
1608
+ if (idx >= 0 && args[idx + 1]) {
1609
+ const dbPath = args[idx + 1];
1610
+ if (existsSync3(dbPath)) {
1611
+ return {
1612
+ dsConfig: { adapter: "duckdb", path: dbPath },
1613
+ label: `duckdb \u2014 ${path13.basename(dbPath)} (from .claude/mcp.json)`
1614
+ };
1615
+ }
1616
+ }
1617
+ }
1618
+ } catch {
1619
+ }
1620
+ }
1621
+ return void 0;
1622
+ }
1623
+ async function promptForConnection() {
1624
+ const connector = await p.select({
1625
+ message: "Select your database",
1626
+ options: [
1627
+ { value: "duckdb", label: "DuckDB", hint: "Local .duckdb file" },
1628
+ { value: "postgres", label: "PostgreSQL", hint: "Connection string" }
1629
+ ]
1630
+ });
1631
+ if (p.isCancel(connector)) return void 0;
1632
+ if (connector === "duckdb") {
1633
+ const method = await p.select({
1634
+ message: "How do you connect?",
1635
+ options: [
1636
+ { value: "env", label: "Environment variable", hint: "e.g. DUCKDB_PATH" },
1637
+ { value: "path", label: "File path", hint: "e.g. ./warehouse.duckdb" }
1638
+ ]
1639
+ });
1640
+ if (p.isCancel(method)) return void 0;
1641
+ if (method === "env") {
1642
+ const envName = await p.text({
1643
+ message: "Environment variable name",
1644
+ initialValue: "DUCKDB_PATH",
1645
+ validate(value) {
1646
+ if (!value) return "Required";
1647
+ const resolved = process.env[value];
1648
+ if (!resolved) return `$${value} is not set`;
1649
+ if (!existsSync3(resolved)) return `$${value} points to "${resolved}" which does not exist`;
1650
+ }
1651
+ });
1652
+ if (p.isCancel(envName)) return void 0;
1653
+ return { adapter: "duckdb", path: process.env[envName] };
1654
+ } else {
1655
+ const filePath = await p.text({
1656
+ message: "Path to .duckdb file",
1657
+ placeholder: "./warehouse.duckdb",
1658
+ validate(value) {
1659
+ if (!value) return "Required";
1660
+ if (!existsSync3(value)) return `File not found: ${value}`;
1661
+ }
1662
+ });
1663
+ if (p.isCancel(filePath)) return void 0;
1664
+ return { adapter: "duckdb", path: path13.resolve(filePath) };
1665
+ }
1666
+ } else {
1667
+ const method = await p.select({
1668
+ message: "How do you connect?",
1669
+ options: [
1670
+ { value: "env", label: "Environment variable", hint: "e.g. DATABASE_URL" },
1671
+ { value: "url", label: "Connection string", hint: "postgres://..." }
1672
+ ]
1673
+ });
1674
+ if (p.isCancel(method)) return void 0;
1675
+ if (method === "env") {
1676
+ const envName = await p.text({
1677
+ message: "Environment variable name",
1678
+ initialValue: "DATABASE_URL",
1679
+ validate(value) {
1680
+ if (!value) return "Required";
1681
+ const resolved = process.env[value];
1682
+ if (!resolved) return `$${value} is not set`;
1683
+ }
1684
+ });
1685
+ if (p.isCancel(envName)) return void 0;
1686
+ return { adapter: "postgres", connection: process.env[envName] };
1687
+ } else {
1688
+ const url = await p.text({
1689
+ message: "Connection string",
1690
+ placeholder: "postgres://user:pass@host:5432/dbname",
1691
+ validate(value) {
1692
+ if (!value) return "Required";
1693
+ if (!value.startsWith("postgres://") && !value.startsWith("postgresql://")) {
1694
+ return "Must start with postgres:// or postgresql://";
1695
+ }
1696
+ }
1697
+ });
1698
+ if (p.isCancel(url)) return void 0;
1699
+ return { adapter: "postgres", connection: url };
1700
+ }
1701
+ }
1702
+ }
1703
+ async function runConnectStep() {
1704
+ const cwd = process.cwd();
1705
+ let dsConfig;
1706
+ const detected = autoDetectDb(cwd);
1707
+ if (detected) {
1708
+ p.log.info(`Detected: ${detected.label}`);
1709
+ const useDetected = await p.confirm({ message: "Use this database?" });
1710
+ if (p.isCancel(useDetected)) {
1711
+ p.cancel("Setup cancelled.");
1712
+ return void 0;
1713
+ }
1714
+ if (useDetected) {
1715
+ dsConfig = detected.dsConfig;
1716
+ } else {
1717
+ const manual = await promptForConnection();
1718
+ if (!manual) {
1719
+ p.cancel("Setup cancelled.");
1720
+ return void 0;
1721
+ }
1722
+ dsConfig = manual;
1723
+ }
1724
+ } else {
1725
+ const manual = await promptForConnection();
1726
+ if (!manual) {
1727
+ p.cancel("Setup cancelled.");
1728
+ return void 0;
1729
+ }
1730
+ dsConfig = manual;
1731
+ }
1732
+ const spin = p.spinner();
1733
+ spin.start("Connecting to database...");
1734
+ let adapter;
1735
+ try {
1736
+ adapter = await createAdapter5(dsConfig);
1737
+ await adapter.connect();
1738
+ } catch (err) {
1739
+ spin.stop("Connection failed");
1740
+ p.log.error(err.message);
1741
+ p.cancel("Could not connect to database.");
1742
+ return void 0;
1743
+ }
1744
+ const tables = await adapter.listTables();
1745
+ const columns = {};
1746
+ for (const table of tables) {
1747
+ columns[table.name] = await adapter.listColumns(table.name);
1748
+ }
1749
+ const totalCols = Object.values(columns).reduce((sum, c) => sum + c.length, 0);
1750
+ spin.stop(`Found ${tables.length} tables, ${totalCols} columns`);
1751
+ const tableLines = tables.map((t) => ` ${t.name.padEnd(30)} ${t.row_count.toLocaleString()} rows`).join("\n");
1752
+ p.note(tableLines, "Discovered Tables");
1753
+ const defaultModel = path13.basename(cwd).replace(/[^a-z0-9-]/gi, "-").toLowerCase();
1754
+ const modelInput = await p.text({
1755
+ message: "Model name",
1756
+ initialValue: defaultModel,
1757
+ validate(value) {
1758
+ if (!value) return "Required";
1759
+ if (!/^[a-z0-9-]+$/.test(value)) return "Use lowercase letters, numbers, and hyphens only";
1760
+ }
1761
+ });
1762
+ if (p.isCancel(modelInput)) {
1763
+ p.cancel("Setup cancelled.");
1764
+ await adapter.disconnect();
1765
+ return void 0;
1766
+ }
1767
+ const tierInput = await p.select({
1768
+ message: "Target metadata tier",
1769
+ options: [
1770
+ { value: "bronze", label: "Bronze", hint: "Schema + ownership + grain" },
1771
+ { value: "silver", label: "Silver", hint: "+ trust, lineage, glossary, refresh, sample values" },
1772
+ { value: "gold", label: "Gold", hint: "+ semantic roles, rules, golden queries (needs curation)" }
1773
+ ]
1774
+ });
1775
+ if (p.isCancel(tierInput)) {
1776
+ p.cancel("Setup cancelled.");
1777
+ await adapter.disconnect();
1778
+ return void 0;
1779
+ }
1780
+ const configPath = path13.join(cwd, "contextkit.config.yaml");
1781
+ let config;
1782
+ try {
1783
+ config = loadConfig10(cwd);
1784
+ } catch {
1785
+ config = { context_dir: "./context" };
1786
+ }
1787
+ if (!config.data_sources || Object.keys(config.data_sources).length === 0) {
1788
+ const newConfig = {
1789
+ context_dir: config.context_dir ?? "./context",
1790
+ data_sources: { default: dsConfig }
1791
+ };
1792
+ writeFileSync5(configPath, yaml2.stringify(newConfig, { lineWidth: 120 }), "utf-8");
1793
+ config = loadConfig10(cwd);
1794
+ }
1795
+ const contextDir = path13.resolve(cwd, config.context_dir ?? "./context");
1796
+ return {
1797
+ cwd,
1798
+ contextDir,
1799
+ dsConfig,
1800
+ adapter,
1801
+ tables,
1802
+ columns,
1803
+ modelName: modelInput,
1804
+ targetTier: tierInput
1805
+ };
1806
+ }
1807
+
1808
+ // src/setup/steps/scaffold.ts
1809
+ import * as p3 from "@clack/prompts";
1810
+ import path14 from "path";
1811
+ import { mkdirSync as mkdirSync3, writeFileSync as writeFileSync6, existsSync as existsSync4 } from "fs";
1812
+ import { scaffoldFromSchema as scaffoldFromSchema2, compile as compile10, computeTier as computeTier3, loadConfig as loadConfig11 } from "@runcontext/core";
1813
+
1814
+ // src/setup/display.ts
1815
+ import * as p2 from "@clack/prompts";
1816
+ function displayTierScore(score) {
1817
+ p2.note(formatTierScore(score), "Tier Scorecard");
1818
+ }
1819
+
1820
+ // src/setup/steps/scaffold.ts
1821
+ async function runScaffoldStep(ctx) {
1822
+ const shouldRun = await p3.confirm({
1823
+ message: "Scaffold Bronze metadata from database schema?"
1824
+ });
1825
+ if (p3.isCancel(shouldRun) || !shouldRun) {
1826
+ return { skipped: true, summary: "Skipped" };
1827
+ }
1828
+ const spin = p3.spinner();
1829
+ spin.start("Scaffolding Bronze metadata...");
1830
+ const result = scaffoldFromSchema2({
1831
+ modelName: ctx.modelName,
1832
+ dataSourceName: "default",
1833
+ tables: ctx.tables,
1834
+ columns: ctx.columns
1835
+ });
1836
+ for (const dir of ["models", "governance", "owners"]) {
1837
+ const dirPath = path14.join(ctx.contextDir, dir);
1838
+ if (!existsSync4(dirPath)) mkdirSync3(dirPath, { recursive: true });
1839
+ }
1840
+ const created = [];
1841
+ const files = [
1842
+ { rel: path14.join("models", result.files.osi), content: result.osiYaml },
1843
+ { rel: path14.join("governance", result.files.governance), content: result.governanceYaml },
1844
+ { rel: path14.join("owners", result.files.owner), content: result.ownerYaml }
1845
+ ];
1846
+ for (const f of files) {
1847
+ const fullPath = path14.join(ctx.contextDir, f.rel);
1848
+ writeFileSync6(fullPath, f.content, "utf-8");
1849
+ created.push(f.rel);
1850
+ }
1851
+ const config = loadConfig11(ctx.cwd);
1852
+ const { graph } = await compile10({ contextDir: ctx.contextDir, config, rootDir: ctx.cwd });
1853
+ ctx.graph = graph;
1854
+ ctx.tierScore = computeTier3(ctx.modelName, graph);
1855
+ spin.stop(`Created ${created.length} files`);
1856
+ const fileList = created.map((f) => ` ${f}`).join("\n");
1857
+ p3.note(fileList, "Files Created");
1858
+ displayTierScore(ctx.tierScore);
1859
+ return { skipped: false, summary: `${created.length} files \u2192 ${ctx.tierScore.tier.toUpperCase()}` };
1860
+ }
1861
+
1862
+ // src/setup/steps/enrich-silver.ts
1863
+ import * as p4 from "@clack/prompts";
1864
+ import path15 from "path";
1865
+ import { readFileSync as readFileSync5, writeFileSync as writeFileSync7, mkdirSync as mkdirSync4, existsSync as existsSync5, readdirSync as readdirSync2 } from "fs";
1866
+ import * as yaml3 from "yaml";
1867
+ import {
1868
+ compile as compile11,
1869
+ computeTier as computeTier4,
1870
+ suggestEnrichments as suggestEnrichments2,
1871
+ loadConfig as loadConfig12
1872
+ } from "@runcontext/core";
1873
+ function findFileRecursive2(dir, suffix) {
1874
+ if (!existsSync5(dir)) return void 0;
1875
+ const entries = readdirSync2(dir, { withFileTypes: true });
1876
+ for (const entry of entries) {
1877
+ const fullPath = path15.join(dir, entry.name);
1878
+ if (entry.isDirectory()) {
1879
+ const found = findFileRecursive2(fullPath, suffix);
1880
+ if (found) return found;
1881
+ } else if (entry.name.endsWith(suffix)) {
1882
+ return fullPath;
1883
+ }
1884
+ }
1885
+ return void 0;
1886
+ }
1887
+ async function runEnrichSilverStep(ctx) {
1888
+ const config = loadConfig12(ctx.cwd);
1889
+ const { graph } = await compile11({ contextDir: ctx.contextDir, config, rootDir: ctx.cwd });
1890
+ ctx.graph = graph;
1891
+ const tierScore = computeTier4(ctx.modelName, graph);
1892
+ if (tierScore.silver.passed) {
1893
+ p4.log.success("Already at Silver or above \u2014 skipping.");
1894
+ ctx.tierScore = tierScore;
1895
+ return { skipped: true, summary: "Already Silver" };
1896
+ }
1897
+ const model = graph.models.get(ctx.modelName);
1898
+ if (!model) {
1899
+ p4.log.error(`Model "${ctx.modelName}" not found in graph.`);
1900
+ return { skipped: true, summary: "Model not found" };
1901
+ }
1902
+ const datasetNames = model.datasets.map((d) => d.name);
1903
+ const suggestions = suggestEnrichments2("silver", tierScore, datasetNames);
1904
+ const preview = [];
1905
+ if (suggestions.governance?.trust) preview.push(`+ trust: ${suggestions.governance.trust}`);
1906
+ if (suggestions.governance?.tags) preview.push(`+ tags: [${suggestions.governance.tags.join(", ")}]`);
1907
+ if (suggestions.governance?.refreshAll) preview.push(`+ refresh: ${suggestions.governance.refreshAll} (all datasets)`);
1908
+ if (suggestions.lineage) preview.push(`+ ${suggestions.lineage.upstream?.length ?? 0} lineage upstream source(s)`);
1909
+ if (suggestions.glossaryTerms) preview.push(`+ ${suggestions.glossaryTerms.length} glossary term(s)`);
1910
+ if (suggestions.needsSampleValues) preview.push("+ sample_values from live data");
1911
+ if (preview.length > 0) {
1912
+ p4.note(preview.join("\n"), "Silver Enrichments");
1913
+ }
1914
+ const shouldRun = await p4.confirm({
1915
+ message: "Apply Silver enrichments?"
1916
+ });
1917
+ if (p4.isCancel(shouldRun) || !shouldRun) {
1918
+ return { skipped: true, summary: "Skipped" };
1919
+ }
1920
+ const spin = p4.spinner();
1921
+ spin.start("Enriching to Silver...");
1922
+ const govFilePath = findFileRecursive2(ctx.contextDir, `${ctx.modelName}.governance.yaml`);
1923
+ if (govFilePath) {
1924
+ const govContent = readFileSync5(govFilePath, "utf-8");
1925
+ const govDoc = yaml3.parse(govContent) ?? {};
1926
+ if (suggestions.governance?.trust) govDoc.trust = suggestions.governance.trust;
1927
+ if (suggestions.governance?.tags) govDoc.tags = suggestions.governance.tags;
1928
+ if (suggestions.governance?.refreshAll) {
1929
+ for (const dsName of Object.keys(govDoc.datasets ?? {})) {
1930
+ govDoc.datasets[dsName].refresh = suggestions.governance.refreshAll;
1931
+ }
1932
+ }
1933
+ if (suggestions.needsSampleValues) {
1934
+ govDoc.fields = govDoc.fields ?? {};
1935
+ try {
1936
+ let count = 0;
1937
+ for (const ds of model.datasets) {
1938
+ if (count >= 2) break;
1939
+ const tableName = ds.source?.split(".").pop() ?? ds.name;
1940
+ for (const field of ds.fields ?? []) {
1941
+ if (count >= 2) break;
1942
+ const fieldKey = `${ds.name}.${field.name}`;
1943
+ if (govDoc.fields[fieldKey]?.sample_values?.length > 0) continue;
1944
+ try {
1945
+ const result = await ctx.adapter.query(
1946
+ `SELECT DISTINCT CAST("${field.name}" AS VARCHAR) AS val FROM "${tableName}" WHERE "${field.name}" IS NOT NULL LIMIT 5`
1947
+ );
1948
+ if (result.rows.length > 0) {
1949
+ govDoc.fields[fieldKey] = govDoc.fields[fieldKey] ?? {};
1950
+ govDoc.fields[fieldKey].sample_values = result.rows.map((r) => String(r.val));
1951
+ count++;
1952
+ }
1953
+ } catch {
1954
+ }
1955
+ }
1956
+ }
1957
+ } catch {
1958
+ }
1959
+ }
1960
+ writeFileSync7(govFilePath, yaml3.stringify(govDoc, { lineWidth: 120 }), "utf-8");
1961
+ }
1962
+ if (suggestions.lineage) {
1963
+ const lineageDir = path15.join(ctx.contextDir, "lineage");
1964
+ if (!existsSync5(lineageDir)) mkdirSync4(lineageDir, { recursive: true });
1965
+ const lineagePath = path15.join(lineageDir, `${ctx.modelName}.lineage.yaml`);
1966
+ if (!existsSync5(lineagePath)) {
1967
+ const lineageDoc = { model: ctx.modelName, upstream: suggestions.lineage.upstream };
1968
+ writeFileSync7(lineagePath, yaml3.stringify(lineageDoc, { lineWidth: 120 }), "utf-8");
1969
+ }
1970
+ }
1971
+ if (suggestions.glossaryTerms) {
1972
+ const glossaryDir = path15.join(ctx.contextDir, "glossary");
1973
+ if (!existsSync5(glossaryDir)) mkdirSync4(glossaryDir, { recursive: true });
1974
+ for (const term of suggestions.glossaryTerms) {
1975
+ const termPath = path15.join(glossaryDir, `${term.id}.term.yaml`);
1976
+ if (!existsSync5(termPath)) {
1977
+ writeFileSync7(termPath, yaml3.stringify(term, { lineWidth: 120 }), "utf-8");
1978
+ }
1979
+ }
1980
+ }
1981
+ const { graph: newGraph } = await compile11({ contextDir: ctx.contextDir, config, rootDir: ctx.cwd });
1982
+ ctx.graph = newGraph;
1983
+ ctx.tierScore = computeTier4(ctx.modelName, newGraph);
1984
+ spin.stop("Applied Silver enrichments");
1985
+ displayTierScore(ctx.tierScore);
1986
+ return { skipped: false, summary: ctx.tierScore.tier.toUpperCase() };
1987
+ }
1988
+
1989
+ // src/setup/steps/enrich-gold.ts
1990
+ import * as p5 from "@clack/prompts";
1991
+ import path16 from "path";
1992
+ import { readFileSync as readFileSync6, writeFileSync as writeFileSync8, mkdirSync as mkdirSync5, existsSync as existsSync6, readdirSync as readdirSync3 } from "fs";
1993
+ import * as yaml4 from "yaml";
1994
+ import {
1995
+ compile as compile12,
1996
+ computeTier as computeTier5,
1997
+ suggestEnrichments as suggestEnrichments3,
1998
+ inferSemanticRole as inferSemanticRole2,
1999
+ inferAggregation as inferAggregation2,
2000
+ loadConfig as loadConfig13
2001
+ } from "@runcontext/core";
2002
+ function findFileRecursive3(dir, suffix) {
2003
+ if (!existsSync6(dir)) return void 0;
2004
+ const entries = readdirSync3(dir, { withFileTypes: true });
2005
+ for (const entry of entries) {
2006
+ const fullPath = path16.join(dir, entry.name);
2007
+ if (entry.isDirectory()) {
2008
+ const found = findFileRecursive3(fullPath, suffix);
2009
+ if (found) return found;
2010
+ } else if (entry.name.endsWith(suffix)) {
2011
+ return fullPath;
2012
+ }
2013
+ }
2014
+ return void 0;
2015
+ }
2016
+ async function runEnrichGoldStep(ctx) {
2017
+ const config = loadConfig13(ctx.cwd);
2018
+ const { graph } = await compile12({ contextDir: ctx.contextDir, config, rootDir: ctx.cwd });
2019
+ ctx.graph = graph;
2020
+ const tierScore = computeTier5(ctx.modelName, graph);
2021
+ if (tierScore.gold.passed) {
2022
+ p5.log.success("Already at Gold \u2014 skipping.");
2023
+ ctx.tierScore = tierScore;
2024
+ return { skipped: true, summary: "Already Gold" };
2025
+ }
2026
+ const model = graph.models.get(ctx.modelName);
2027
+ if (!model) {
2028
+ p5.log.error(`Model "${ctx.modelName}" not found.`);
2029
+ return { skipped: true, summary: "Model not found" };
2030
+ }
2031
+ const datasetNames = model.datasets.map((d) => d.name);
2032
+ const suggestions = suggestEnrichments3("gold", tierScore, datasetNames);
2033
+ const preview = [];
2034
+ if (suggestions.needsSemanticRoles) preview.push("+ Infer semantic_role for all fields");
2035
+ if (suggestions.needsRulesFile) preview.push("+ Generate rules file (golden queries, guardrails, hierarchies)");
2036
+ if (suggestions.governance?.trust) preview.push(`+ trust: ${suggestions.governance.trust}`);
2037
+ preview.push("+ Add version, business_context stubs to governance");
2038
+ preview.push("+ Add ai_context placeholder to model");
2039
+ preview.push("+ Infer relationships from column name patterns");
2040
+ if (preview.length > 0) {
2041
+ p5.note(preview.join("\n"), "Gold Enrichments");
2042
+ }
2043
+ p5.log.warning("Gold enrichments create TODO placeholders that need manual curation.");
2044
+ const shouldRun = await p5.confirm({
2045
+ message: "Apply Gold enrichments?"
2046
+ });
2047
+ if (p5.isCancel(shouldRun) || !shouldRun) {
2048
+ return { skipped: true, summary: "Skipped" };
2049
+ }
2050
+ const spin = p5.spinner();
2051
+ spin.start("Enriching to Gold...");
2052
+ const govFilePath = findFileRecursive3(ctx.contextDir, `${ctx.modelName}.governance.yaml`);
2053
+ if (govFilePath) {
2054
+ const govContent = readFileSync6(govFilePath, "utf-8");
2055
+ const govDoc = yaml4.parse(govContent) ?? {};
2056
+ if (suggestions.governance?.trust) govDoc.trust = suggestions.governance.trust;
2057
+ if (suggestions.needsSemanticRoles) {
2058
+ govDoc.fields = govDoc.fields ?? {};
2059
+ for (const ds of model.datasets) {
2060
+ const tableName = ds.source?.split(".").pop() ?? ds.name;
2061
+ let dbColumns = [];
2062
+ try {
2063
+ dbColumns = await ctx.adapter.listColumns(tableName);
2064
+ } catch {
2065
+ }
2066
+ for (const field of ds.fields ?? []) {
2067
+ const fieldKey = `${ds.name}.${field.name}`;
2068
+ if (govDoc.fields[fieldKey]?.semantic_role) continue;
2069
+ const col = dbColumns.find((c) => c.name === field.name);
2070
+ const isPK = col?.is_primary_key ?? field.name.endsWith("_id");
2071
+ const dataType = col?.data_type ?? "VARCHAR";
2072
+ govDoc.fields[fieldKey] = govDoc.fields[fieldKey] ?? {};
2073
+ const role = inferSemanticRole2(field.name, dataType, isPK);
2074
+ govDoc.fields[fieldKey].semantic_role = role;
2075
+ if (role === "metric") {
2076
+ govDoc.fields[fieldKey].default_aggregation = inferAggregation2(field.name);
2077
+ govDoc.fields[fieldKey].additive = govDoc.fields[fieldKey].default_aggregation === "SUM";
2078
+ }
2079
+ }
2080
+ }
2081
+ }
2082
+ if (!govDoc.version) {
2083
+ govDoc.version = "0.1.0";
2084
+ }
2085
+ if (!govDoc.business_context || govDoc.business_context.length === 0) {
2086
+ govDoc.business_context = [
2087
+ { name: "TODO: Use Case Name", description: "TODO: Describe the analytical use case and business value." }
2088
+ ];
2089
+ }
2090
+ writeFileSync8(govFilePath, yaml4.stringify(govDoc, { lineWidth: 120 }), "utf-8");
2091
+ }
2092
+ const modelFilePath = findFileRecursive3(ctx.contextDir, `${ctx.modelName}.osi.yaml`);
2093
+ if (modelFilePath) {
2094
+ const modelContent = readFileSync6(modelFilePath, "utf-8");
2095
+ const modelDoc = yaml4.parse(modelContent) ?? {};
2096
+ const semModels = modelDoc.semantic_model ?? [];
2097
+ let changed = false;
2098
+ for (const sm of semModels) {
2099
+ if (sm.name !== ctx.modelName) continue;
2100
+ if (!sm.ai_context) {
2101
+ sm.ai_context = "TODO: Describe how an AI agent should use this model, common pitfalls, and important filters.";
2102
+ changed = true;
2103
+ }
2104
+ if (!sm.relationships || sm.relationships.length === 0) {
2105
+ const datasets = sm.datasets ?? [];
2106
+ const dsNames = new Set(datasets.map((d) => d.name));
2107
+ const inferred = [];
2108
+ for (const ds of datasets) {
2109
+ for (const field of ds.fields ?? []) {
2110
+ const fname = field.name;
2111
+ const idMatch = fname.match(/^(.+)_id$/);
2112
+ if (idMatch && idMatch[1]) {
2113
+ const targetBase = idMatch[1];
2114
+ for (const targetDs of datasets) {
2115
+ if (targetDs.name === ds.name) continue;
2116
+ const targetName = targetDs.name;
2117
+ if (targetName.includes(targetBase) || targetBase.includes(targetName)) {
2118
+ const targetHasField = (targetDs.fields ?? []).some((f) => f.name === fname);
2119
+ if (targetHasField) {
2120
+ const relName = `${ds.name}-to-${targetName}`;
2121
+ if (!inferred.some((r) => r.name === relName)) {
2122
+ inferred.push({
2123
+ name: relName,
2124
+ from: ds.name,
2125
+ to: targetName,
2126
+ from_columns: [fname],
2127
+ to_columns: [fname]
2128
+ });
2129
+ }
2130
+ }
2131
+ }
2132
+ }
2133
+ }
2134
+ }
2135
+ }
2136
+ if (inferred.length > 0) {
2137
+ sm.relationships = inferred;
2138
+ changed = true;
2139
+ }
2140
+ }
2141
+ }
2142
+ if (changed) {
2143
+ writeFileSync8(modelFilePath, yaml4.stringify(modelDoc, { lineWidth: 120 }), "utf-8");
2144
+ }
2145
+ }
2146
+ if (suggestions.needsRulesFile) {
2147
+ const rulesDir = path16.join(ctx.contextDir, "rules");
2148
+ if (!existsSync6(rulesDir)) mkdirSync5(rulesDir, { recursive: true });
2149
+ const rulesPath = path16.join(rulesDir, `${ctx.modelName}.rules.yaml`);
2150
+ if (!existsSync6(rulesPath)) {
2151
+ const rulesDoc = {
2152
+ model: ctx.modelName,
2153
+ golden_queries: [
2154
+ { question: "TODO: What is the total count?", sql: "SELECT COUNT(*) FROM table_name" },
2155
+ { question: "TODO: What are the top records?", sql: "SELECT * FROM table_name LIMIT 10" },
2156
+ { question: "TODO: What is the distribution?", sql: "SELECT column, COUNT(*) FROM table_name GROUP BY column" }
2157
+ ],
2158
+ business_rules: [
2159
+ { name: "TODO: rule-name", definition: "TODO: describe the business rule" }
2160
+ ],
2161
+ guardrail_filters: [
2162
+ { name: "TODO: filter-name", filter: "column IS NOT NULL", reason: "TODO: explain why" }
2163
+ ],
2164
+ hierarchies: [
2165
+ { name: "TODO: hierarchy-name", levels: ["level1", "level2"], dataset: datasetNames[0] ?? "dataset" }
2166
+ ]
2167
+ };
2168
+ writeFileSync8(rulesPath, yaml4.stringify(rulesDoc, { lineWidth: 120 }), "utf-8");
2169
+ }
2170
+ }
2171
+ const { graph: newGraph } = await compile12({ contextDir: ctx.contextDir, config, rootDir: ctx.cwd });
2172
+ ctx.graph = newGraph;
2173
+ ctx.tierScore = computeTier5(ctx.modelName, newGraph);
2174
+ spin.stop("Applied Gold enrichments");
2175
+ const todos = suggestions.needsRulesFile ? "\nThe rules file contains TODO placeholders \u2014 edit context/rules/ to complete Gold." : "";
2176
+ if (todos) p5.log.warning(todos);
2177
+ displayTierScore(ctx.tierScore);
2178
+ return { skipped: false, summary: `${ctx.tierScore.tier.toUpperCase()} (may need curation)` };
2179
+ }
2180
+
2181
+ // src/setup/steps/verify.ts
2182
+ import * as p6 from "@clack/prompts";
2183
+ import {
2184
+ compile as compile13,
2185
+ LintEngine as LintEngine5,
2186
+ ALL_RULES as ALL_RULES6,
2187
+ computeTier as computeTier6,
2188
+ loadConfig as loadConfig14
2189
+ } from "@runcontext/core";
2190
+ async function runVerifyStep(ctx) {
2191
+ const shouldRun = await p6.confirm({
2192
+ message: "Verify metadata against live data?"
2193
+ });
2194
+ if (p6.isCancel(shouldRun) || !shouldRun) {
2195
+ return { skipped: true, summary: "Skipped" };
2196
+ }
2197
+ const spin = p6.spinner();
2198
+ spin.start("Verifying against database...");
2199
+ const config = loadConfig14(ctx.cwd);
2200
+ const { graph } = await compile13({ contextDir: ctx.contextDir, config, rootDir: ctx.cwd });
2201
+ graph.dataValidation = await collectDataValidation(ctx.adapter, graph);
2202
+ const engine = new LintEngine5();
2203
+ for (const rule of ALL_RULES6) {
2204
+ if (rule.id.startsWith("data/")) engine.register(rule);
2205
+ }
2206
+ const dataDiags = engine.run(graph);
2207
+ ctx.graph = graph;
2208
+ ctx.tierScore = computeTier6(ctx.modelName, graph);
2209
+ const errors = dataDiags.filter((d) => d.severity === "error").length;
2210
+ const warnings = dataDiags.filter((d) => d.severity === "warning").length;
2211
+ if (dataDiags.length === 0) {
2212
+ spin.stop("All data validation checks passed");
2213
+ } else {
2214
+ spin.stop(`${errors} error(s), ${warnings} warning(s)`);
2215
+ const details = dataDiags.map((d) => ` ${d.severity === "error" ? "x" : "!"} ${d.message}`).join("\n");
2216
+ p6.note(details, "Data Validation Issues");
2217
+ }
2218
+ return {
2219
+ skipped: false,
2220
+ summary: dataDiags.length === 0 ? "Clean" : `${errors} errors, ${warnings} warnings`
2221
+ };
2222
+ }
2223
+
2224
+ // src/setup/steps/autofix.ts
2225
+ import * as p7 from "@clack/prompts";
2226
+ import fs4 from "fs";
2227
+ import {
2228
+ compile as compile14,
2229
+ LintEngine as LintEngine6,
2230
+ ALL_RULES as ALL_RULES7,
2231
+ applyFixes as applyFixes4,
2232
+ computeTier as computeTier7,
2233
+ loadConfig as loadConfig15
2234
+ } from "@runcontext/core";
2235
+ async function runAutofixStep(ctx) {
2236
+ const config = loadConfig15(ctx.cwd);
2237
+ const { graph } = await compile14({ contextDir: ctx.contextDir, config, rootDir: ctx.cwd });
2238
+ graph.dataValidation = await collectDataValidation(ctx.adapter, graph);
2239
+ const engine = new LintEngine6();
2240
+ for (const rule of ALL_RULES7) engine.register(rule);
2241
+ const diagnostics = engine.run(graph);
2242
+ const fixable = diagnostics.filter((d) => d.fixable);
2243
+ if (fixable.length === 0) {
2244
+ p7.log.success("No fixable issues found.");
2245
+ ctx.graph = graph;
2246
+ ctx.tierScore = computeTier7(ctx.modelName, graph);
2247
+ return { skipped: true, summary: "Nothing to fix" };
2248
+ }
2249
+ const shouldRun = await p7.confirm({
2250
+ message: `Auto-fix ${fixable.length} issue(s)?`
2251
+ });
2252
+ if (p7.isCancel(shouldRun) || !shouldRun) {
2253
+ return { skipped: true, summary: "Skipped" };
2254
+ }
2255
+ const spin = p7.spinner();
2256
+ spin.start("Fixing...");
2257
+ const readFile = (filePath) => fs4.readFileSync(filePath, "utf-8");
2258
+ const fixedFiles = applyFixes4(fixable, readFile);
2259
+ for (const [file, content] of fixedFiles) {
2260
+ fs4.writeFileSync(file, content, "utf-8");
2261
+ }
2262
+ const { graph: newGraph } = await compile14({ contextDir: ctx.contextDir, config, rootDir: ctx.cwd });
2263
+ ctx.graph = newGraph;
2264
+ ctx.tierScore = computeTier7(ctx.modelName, newGraph);
2265
+ spin.stop(`Fixed ${fixable.length} issue(s) in ${fixedFiles.size} file(s)`);
2266
+ displayTierScore(ctx.tierScore);
2267
+ return { skipped: false, summary: `${fixable.length} issues fixed` };
2268
+ }
2269
+
2270
+ // src/setup/steps/claude-md.ts
2271
+ import * as p8 from "@clack/prompts";
2272
+ import path17 from "path";
2273
+ import { existsSync as existsSync7, writeFileSync as writeFileSync9 } from "fs";
2274
+ function buildClaudeMd(ctx) {
2275
+ const modelName = ctx.modelName;
2276
+ const tier = ctx.tierScore?.tier?.toUpperCase() ?? "UNKNOWN";
2277
+ const model = ctx.graph?.models.get(modelName);
2278
+ const datasets = model?.datasets ?? [];
2279
+ const datasetList = datasets.map((ds) => `- \`${ds.name}\` \u2014 ${ds.fields?.length ?? 0} fields`).join("\n");
2280
+ const failingChecks = [];
2281
+ if (ctx.tierScore) {
2282
+ for (const check of ctx.tierScore.bronze.checks) {
2283
+ if (!check.passed) failingChecks.push(`- ${check.id}: ${check.detail ?? check.label}`);
2284
+ }
2285
+ for (const check of ctx.tierScore.silver.checks) {
2286
+ if (!check.passed) failingChecks.push(`- ${check.id}: ${check.detail ?? check.label}`);
2287
+ }
2288
+ for (const check of ctx.tierScore.gold.checks) {
2289
+ if (!check.passed) failingChecks.push(`- ${check.id}: ${check.detail ?? check.label}`);
2290
+ }
2291
+ }
2292
+ const failingSection = failingChecks.length > 0 ? `### Failing Checks
2293
+
2294
+ ${failingChecks.join("\n")}` : "All checks passing.";
2295
+ return `# ContextKit Agent Instructions
2296
+
2297
+ You have two MCP servers: **duckdb** (query data) and **contextkit** (query metadata).
2298
+
2299
+ Model: **${modelName}** | Current Tier: **${tier}**
2300
+
2301
+ ## The Cardinal Rule: Never Fabricate Metadata
2302
+
2303
+ **Every piece of metadata you write must be grounded in evidence from the actual data.**
2304
+
2305
+ - NEVER invent owner names, emails, team names, or contact info
2306
+ - NEVER write a field description that is just the column name repeated
2307
+ - NEVER assign a semantic_role without first querying the column's actual values
2308
+ - NEVER mark a field as additive without understanding what summing it means
2309
+ - NEVER write lineage entries without knowing the actual data sources
2310
+ - NEVER write a business_context narrative you can't justify from the data
2311
+ - NEVER create a glossary definition that is just "Definition for X"
2312
+
2313
+ If you don't know something, say so. Leave it as a TODO with a note about what you'd need to determine the answer. A honest TODO is infinitely better than fabricated metadata that looks plausible but is wrong.
2314
+
2315
+ ## On Session Start
2316
+
2317
+ 1. Run \`context_tier\` to check the current metadata tier (Bronze/Silver/Gold)
2318
+ 2. Report the current tier and list failing checks
2319
+ 3. Ask the user what they'd like to work on \u2014 don't start changing files unprompted
2320
+
2321
+ ## When Asked to Reach Gold
2322
+
2323
+ Work through ALL failing Gold checks iteratively until \`context tier\` reports Gold:
2324
+
2325
+ 1. Run \`context_tier\` and collect every failing check
2326
+ 2. For each failing check, query the database to gather evidence, then fix the metadata
2327
+ 3. Run \`context_tier\` again
2328
+ 4. If checks still fail, go back to step 2
2329
+ 5. **Do NOT stop until every Gold check passes** or you hit something that genuinely requires human input (like real owner contact info)
2330
+ 6. For checks you cannot fix (e.g., owner email), leave a clear TODO explaining what a human needs to provide
2331
+
2332
+ You must iterate \u2014 a single pass is never enough. Each \`context tier\` run may reveal new failures after earlier ones are fixed.
2333
+
2334
+ ## How to Curate Metadata (the right way)
2335
+
2336
+ ### Before writing ANY metadata, query the database first
2337
+
2338
+ For every field you're about to describe or classify:
2339
+
2340
+ \`\`\`sql
2341
+ -- What type of values does this column contain?
2342
+ SELECT DISTINCT column_name FROM table LIMIT 20;
2343
+
2344
+ -- For numeric columns: is this a metric or dimension?
2345
+ SELECT MIN(col), MAX(col), AVG(col), COUNT(DISTINCT col) FROM table;
2346
+
2347
+ -- For potential metrics: does SUM make sense?
2348
+ -- If SUM produces a meaningful business number \u2192 additive: true
2349
+ -- If SUM is meaningless (e.g., summing percentages, scores, ratings) \u2192 additive: false
2350
+ \`\`\`
2351
+
2352
+ ### Semantic Role Decision Tree
2353
+
2354
+ Query the column first, then apply this logic:
2355
+
2356
+ 1. **Is it a primary key or foreign key?** \u2192 \`identifier\`
2357
+ 2. **Is it a date or timestamp?** \u2192 \`date\`
2358
+ 3. **Is it numeric AND does aggregation make business sense?**
2359
+ - Does SUM make sense? (counts, amounts, quantities) \u2192 \`metric\`, \`additive: true\`
2360
+ - Does only AVG/MIN/MAX make sense? (rates, percentages, scores, ratings) \u2192 \`metric\`, \`additive: false\`
2361
+ 4. **Everything else** \u2192 \`dimension\`
2362
+
2363
+ Common mistakes to avoid:
2364
+ - \`stars\` (ratings) \u2192 metric with AVG, NOT additive (summing star ratings is meaningless)
2365
+ - \`_per_10k_people\` (rates) \u2192 metric with AVG, NOT additive
2366
+ - \`_score\` (composite scores) \u2192 metric with AVG, NOT additive
2367
+ - \`useful/funny/cool\` (vote counts) \u2192 metric with SUM, additive
2368
+ - \`_count\` fields \u2192 metric with SUM, additive (usually)
2369
+
2370
+ ### Field Descriptions
2371
+
2372
+ Write descriptions that help someone who has never seen this database understand what the column contains. Include:
2373
+ - What the value represents
2374
+ - Units or scale (if applicable)
2375
+ - Where the data comes from (if known)
2376
+ - Any known quirks or caveats
2377
+
2378
+ Bad: \`description: total_population\`
2379
+ Good: \`description: Total resident population of the census tract from American Community Survey 5-year estimates\`
2380
+
2381
+ Bad: \`description: stars\`
2382
+ Good: \`description: Average Yelp star rating (1.0-5.0 scale) based on all reviews for this business\`
2383
+
2384
+ ### Lineage
2385
+
2386
+ Upstream sources are the EXTERNAL systems that feed data into this warehouse. They are NOT the tables in the warehouse itself.
2387
+
2388
+ Ask yourself: "Where did this data originally come from before it was loaded here?"
2389
+
2390
+ Bad lineage:
2391
+ \`\`\`yaml
2392
+ upstream:
2393
+ - source: yelp_business # This is a table IN the warehouse, not an upstream source
2394
+ type: pipeline
2395
+ \`\`\`
2396
+
2397
+ Good lineage:
2398
+ \`\`\`yaml
2399
+ upstream:
2400
+ - source: yelp-academic-dataset
2401
+ type: file
2402
+ notes: Yelp Open Dataset (academic use), loaded via CSV import
2403
+ \`\`\`
2404
+
2405
+ ### Owner Files
2406
+
2407
+ Do NOT create fake owner identities. If the real owner is unknown:
2408
+ - Keep the existing owner file as-is
2409
+ - Note in the file that contact info needs to be filled in by a real person
2410
+ - NEVER invent email addresses like \`analytics@example.com\`
2411
+
2412
+ ### Business Context
2413
+
2414
+ Write business_context entries that describe real analytical use cases you can verify from the data. Query the data to understand what questions it can answer before writing narratives.
2415
+
2416
+ ### Golden Queries
2417
+
2418
+ Every golden query MUST be tested against the actual database before you write it. Run the SQL, verify it returns sensible results, then document it.
2419
+
2420
+ ### Data Quality
2421
+
2422
+ When you discover data quality issues (null values, broken joins, missing data), FLAG THEM \u2014 don't hide them. Add notes in governance or report them to the user.
2423
+
2424
+ ## This Project
2425
+
2426
+ ### Datasets
2427
+
2428
+ ${datasetList || "(none detected)"}
2429
+
2430
+ ${failingSection}
2431
+
2432
+ ## MCP Tools
2433
+
2434
+ | Tool | Parameters | What it does |
2435
+ |------|-----------|-------------|
2436
+ | \`context_search\` | \`query\` | Find models, datasets, fields, terms by keyword |
2437
+ | \`context_explain\` | \`model\` | Full model details \u2014 governance, rules, lineage, tier |
2438
+ | \`context_validate\` | \u2014 | Run linter, get errors and warnings |
2439
+ | \`context_tier\` | \`model\` | Tier scorecard with all check results |
2440
+ | \`context_golden_query\` | \`question\` | Find pre-validated SQL for a question |
2441
+ | \`context_guardrails\` | \`tables[]\` | Get required WHERE clauses for tables |
2442
+
2443
+ ## Tier Checks Quick Reference
2444
+
2445
+ **Bronze (7):** descriptions, owner, security, grain, table_type
2446
+ **Silver (+6):** trust, 2+ tags, glossary linked, lineage, refresh, 2+ sample_values
2447
+ **Gold (+21):** semantic_role on ALL fields, metric aggregation/additive, 1+ guardrail, 3+ golden queries, 1+ business rule, 1+ hierarchy, 1+ default_filter, trust=endorsed, contactable owner, 1+ relationship, description \u226550 chars, ai_context (no TODO), 1+ business_context, version, field descriptions not lazy, glossary definitions substantive, lineage references real sources, grain statements specific, ai_context filled in
2448
+
2449
+ ## YAML Formats
2450
+
2451
+ **Governance** (\`context/governance/*.governance.yaml\`):
2452
+ \`\`\`yaml
2453
+ model: my-model
2454
+ owner: team-name
2455
+ version: "1.0.0"
2456
+ trust: endorsed
2457
+ security: internal
2458
+ tags: [domain-tag-1, domain-tag-2]
2459
+ business_context:
2460
+ - name: Use Case Name
2461
+ description: What analytical question this data answers and for whom.
2462
+ datasets:
2463
+ my_table:
2464
+ grain: "One row per [entity] identified by [key]"
2465
+ table_type: fact # fact | dimension | event | view
2466
+ refresh: daily
2467
+ fields:
2468
+ dataset.field:
2469
+ semantic_role: metric # metric | dimension | identifier | date
2470
+ default_aggregation: SUM # SUM | AVG | COUNT | COUNT_DISTINCT | MIN | MAX
2471
+ additive: true # can this metric be summed across dimensions?
2472
+ default_filter: "is_open = 1"
2473
+ sample_values: ["val1", "val2"]
2474
+ \`\`\`
2475
+
2476
+ **Rules** (\`context/rules/*.rules.yaml\`):
2477
+ \`\`\`yaml
2478
+ model: my-model
2479
+ golden_queries:
2480
+ - question: What are the top items by count?
2481
+ sql: SELECT name, count FROM my_table ORDER BY count DESC LIMIT 10
2482
+ intent: Identify top performers by volume
2483
+ caveats: Filters to active records only
2484
+ business_rules:
2485
+ - name: valid-ratings
2486
+ definition: All ratings must be between 1 and 5
2487
+ guardrail_filters:
2488
+ - name: active-only
2489
+ filter: "status = 'active'"
2490
+ reason: Exclude inactive records from analytics
2491
+ tables: [my_table]
2492
+ hierarchies:
2493
+ - name: geography
2494
+ levels: [state, city, postal_code]
2495
+ dataset: my_table
2496
+ \`\`\`
2497
+
2498
+ ## CLI Commands
2499
+
2500
+ \`\`\`bash
2501
+ context tier # Check scorecard
2502
+ context verify --db <path> # Validate against live data
2503
+ context fix --db <path> # Auto-fix data warnings
2504
+ context setup # Interactive setup wizard
2505
+ context dev # Watch mode for live editing
2506
+ \`\`\`
2507
+ `;
2508
+ }
2509
+ async function runClaudeMdStep(ctx) {
2510
+ const instructionsPath = path17.join(ctx.contextDir, "AGENT_INSTRUCTIONS.md");
2511
+ if (existsSync7(instructionsPath)) {
2512
+ const shouldOverwrite = await p8.confirm({
2513
+ message: "context/AGENT_INSTRUCTIONS.md already exists. Overwrite with updated instructions?"
2514
+ });
2515
+ if (p8.isCancel(shouldOverwrite) || !shouldOverwrite) {
2516
+ return { skipped: true, summary: "context/AGENT_INSTRUCTIONS.md already exists, kept existing" };
2517
+ }
2518
+ }
2519
+ const content = buildClaudeMd(ctx);
2520
+ writeFileSync9(instructionsPath, content, "utf-8");
2521
+ p8.log.success("Generated context/AGENT_INSTRUCTIONS.md with agent curation instructions");
2522
+ return { skipped: false, summary: "Generated context/AGENT_INSTRUCTIONS.md" };
2523
+ }
2524
+
2525
+ // src/commands/setup.ts
2526
+ var setupCommand = new Command15("setup").description("Interactive wizard to scaffold and enrich metadata from a database").action(async () => {
2527
+ p9.intro(chalk16.bgCyan(chalk16.black(" ContextKit Setup ")));
2528
+ const ctx = await runConnectStep();
2529
+ if (!ctx) return;
2530
+ try {
2531
+ const steps = [
2532
+ { name: "Scaffold Bronze", fn: runScaffoldStep }
2533
+ ];
2534
+ if (ctx.targetTier === "silver" || ctx.targetTier === "gold") {
2535
+ steps.push({ name: "Enrich to Silver", fn: runEnrichSilverStep });
2536
+ }
2537
+ if (ctx.targetTier === "gold") {
2538
+ steps.push({ name: "Enrich to Gold", fn: runEnrichGoldStep });
2539
+ }
2540
+ steps.push(
2541
+ { name: "Verify data", fn: runVerifyStep },
2542
+ { name: "Auto-fix", fn: runAutofixStep },
2543
+ { name: "Generate agent instructions", fn: runClaudeMdStep }
2544
+ );
2545
+ const results = [];
2546
+ for (let i = 0; i < steps.length; i++) {
2547
+ const step = steps[i];
2548
+ p9.log.step(`${chalk16.dim(`[${i + 1}/${steps.length}]`)} ${step.name}`);
2549
+ const result = await step.fn(ctx);
2550
+ results.push({ name: step.name, summary: result.summary });
2551
+ }
2552
+ const summaryLines = results.map((r) => ` ${chalk16.green("+")} ${r.name}: ${r.summary}`).join("\n");
2553
+ p9.note(summaryLines, "Summary");
2554
+ if (ctx.tierScore) {
2555
+ displayTierScore(ctx.tierScore);
2556
+ }
2557
+ const currentTier = ctx.tierScore?.tier ?? "none";
2558
+ const reachedTarget = ctx.targetTier === "bronze" && ["bronze", "silver", "gold"].includes(currentTier) || ctx.targetTier === "silver" && ["silver", "gold"].includes(currentTier) || ctx.targetTier === "gold" && currentTier === "gold";
2559
+ if (reachedTarget) {
2560
+ p9.outro(`Done! You're at ${chalk16.bold(currentTier.toUpperCase())}. Run ${chalk16.cyan("context tier")} anytime to check.`);
2561
+ } else if (ctx.targetTier === "gold" && currentTier !== "gold") {
2562
+ const nextSteps = [
2563
+ `Your metadata is at ${chalk16.bold(currentTier.toUpperCase())} \u2014 Gold needs curation.`,
2564
+ "",
2565
+ `${chalk16.bold("To reach Gold, tell your AI assistant:")}`,
2566
+ "",
2567
+ ` "Read ${chalk16.cyan("context/AGENT_INSTRUCTIONS.md")} for curation guidelines.`,
2568
+ ` Run ${chalk16.cyan("context tier")} and fix every failing Gold check.`,
2569
+ ` Query the database before writing any metadata.`,
2570
+ ` Keep iterating until ${chalk16.cyan("context tier")} reports Gold."`
2571
+ ];
2572
+ p9.note(nextSteps.join("\n"), "Next Steps");
2573
+ p9.outro(`Run ${chalk16.cyan("context dev")} to watch for changes as you edit.`);
2574
+ } else {
2575
+ p9.outro(`Run ${chalk16.cyan("context tier")} to check your scorecard.`);
2576
+ }
2577
+ } finally {
2578
+ try {
2579
+ await ctx.adapter.disconnect();
2580
+ } catch {
2581
+ }
2582
+ }
2583
+ });
2584
+
1016
2585
  // src/index.ts
1017
- var program = new Command12();
1018
- program.name("context").description("ContextKit \u2014 AI-ready metadata governance over OSI").version("0.2.1");
2586
+ var program = new Command16();
2587
+ program.name("context").description("ContextKit \u2014 AI-ready metadata governance over OSI").version("0.3.1");
1019
2588
  program.addCommand(lintCommand);
1020
2589
  program.addCommand(buildCommand);
1021
2590
  program.addCommand(tierCommand);
@@ -1026,6 +2595,10 @@ program.addCommand(initCommand);
1026
2595
  program.addCommand(siteCommand);
1027
2596
  program.addCommand(serveCommand);
1028
2597
  program.addCommand(validateOsiCommand);
2598
+ program.addCommand(introspectCommand);
2599
+ program.addCommand(verifyCommand);
2600
+ program.addCommand(enrichCommand);
1029
2601
  program.addCommand(rulesCommand);
2602
+ program.addCommand(setupCommand);
1030
2603
  program.parse();
1031
2604
  //# sourceMappingURL=index.js.map