altimate-code 0.4.7 → 0.5.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (40) hide show
  1. package/CHANGELOG.md +34 -0
  2. package/bin/altimate +46 -3
  3. package/bin/altimate-code +46 -3
  4. package/package.json +16 -13
  5. package/postinstall.mjs +35 -0
  6. package/skills/cost-report/SKILL.md +134 -0
  7. package/skills/data-viz/SKILL.md +135 -0
  8. package/skills/data-viz/references/component-guide.md +394 -0
  9. package/skills/dbt-analyze/SKILL.md +130 -0
  10. package/skills/dbt-analyze/references/altimate-dbt-commands.md +66 -0
  11. package/skills/dbt-analyze/references/lineage-interpretation.md +58 -0
  12. package/skills/dbt-develop/SKILL.md +151 -0
  13. package/skills/dbt-develop/references/altimate-dbt-commands.md +66 -0
  14. package/skills/dbt-develop/references/common-mistakes.md +49 -0
  15. package/skills/dbt-develop/references/incremental-strategies.md +118 -0
  16. package/skills/dbt-develop/references/layer-patterns.md +158 -0
  17. package/skills/dbt-develop/references/medallion-architecture.md +125 -0
  18. package/skills/dbt-develop/references/yaml-generation.md +90 -0
  19. package/skills/dbt-docs/SKILL.md +99 -0
  20. package/skills/dbt-docs/references/altimate-dbt-commands.md +66 -0
  21. package/skills/dbt-docs/references/documentation-standards.md +94 -0
  22. package/skills/dbt-test/SKILL.md +121 -0
  23. package/skills/dbt-test/references/altimate-dbt-commands.md +66 -0
  24. package/skills/dbt-test/references/custom-tests.md +59 -0
  25. package/skills/dbt-test/references/schema-test-patterns.md +103 -0
  26. package/skills/dbt-test/references/unit-test-guide.md +121 -0
  27. package/skills/dbt-troubleshoot/SKILL.md +187 -0
  28. package/skills/dbt-troubleshoot/references/altimate-dbt-commands.md +66 -0
  29. package/skills/dbt-troubleshoot/references/compilation-errors.md +57 -0
  30. package/skills/dbt-troubleshoot/references/runtime-errors.md +71 -0
  31. package/skills/dbt-troubleshoot/references/test-failures.md +95 -0
  32. package/skills/lineage-diff/SKILL.md +64 -0
  33. package/skills/pii-audit/SKILL.md +117 -0
  34. package/skills/query-optimize/SKILL.md +86 -0
  35. package/skills/schema-migration/SKILL.md +119 -0
  36. package/skills/sql-review/SKILL.md +118 -0
  37. package/skills/sql-translate/SKILL.md +68 -0
  38. package/skills/teach/SKILL.md +54 -0
  39. package/skills/train/SKILL.md +51 -0
  40. package/skills/training-status/SKILL.md +45 -0
package/CHANGELOG.md CHANGED
@@ -5,6 +5,40 @@ All notable changes to this project will be documented in this file.
5
5
  The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.1.0/),
6
6
  and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
7
7
 
8
+ ## [0.5.0] - 2026-03-18
9
+
10
+ ### Added
11
+
12
+ - Smooth streaming mode for TUI response rendering (#281)
13
+ - Ship builtin skills to customers via `postinstall` (#279)
14
+ - `/configure-claude` and `/configure-codex` built-in commands (#235)
15
+
16
+ ### Fixed
17
+
18
+ - Brew formula stuck at v0.3.1 — version normalization in publish pipeline (#286)
19
+ - Harden auth field handling for all warehouse drivers (#271)
20
+ - Suppress console logging that corrupts TUI display (#269)
21
+
22
+ ## [0.4.9] - 2026-03-18
23
+
24
+ ### Added
25
+
26
+ - Script to build and run compiled binary locally (#262)
27
+
28
+ ### Fixed
29
+
30
+ - Snowflake auth — support all auth methods (`password`, `keypair`, `externalbrowser`, `oauth`), fix field name mismatches (#268)
31
+ - dbt tool regression — schema format mismatch, silent failures, wrong results (#263)
32
+ - `altimate-dbt compile`, `execute`, and children commands fail with runtime errors (#255)
33
+ - `Cannot find module @altimateai/altimate-core` on `npm install` (#259)
34
+ - Dispatcher tests fail in CI due to shared module state (#257)
35
+
36
+ ### Changed
37
+
38
+ - CI: parallel per-target builds — 12 jobs, ~5 min wall clock instead of ~20 min (#254)
39
+ - CI: faster release — build parallel with test, lower compression, tighter timeouts (#251)
40
+ - Docker E2E tests skip in CI unless explicitly opted in (#253)
41
+
8
42
  ## [0.4.1] - 2026-03-16
9
43
  ## [0.4.2] - 2026-03-18
10
44
 
package/bin/altimate CHANGED
@@ -5,9 +5,55 @@ const fs = require("fs")
5
5
  const path = require("path")
6
6
  const os = require("os")
7
7
 
8
+ // Resolve script location early — needed by both run() and findBinary().
9
+ const scriptPath = fs.realpathSync(__filename)
10
+ const scriptDir = path.dirname(scriptPath)
11
+
12
+ // Collect ALL node_modules directories walking upward from startDir.
13
+ // Bun's single-file executable uses a virtual filesystem (/$bunfs/root/) without
14
+ // node_modules. External packages are resolved via NODE_PATH instead.
15
+ // We collect every node_modules in the hierarchy (not just the first) to handle
16
+ // pnpm strict layouts, hoisted monorepos, and npm flat installs alike.
17
+ function findAllNodeModules(startDir) {
18
+ const paths = []
19
+ let current = startDir
20
+ for (;;) {
21
+ const modules = path.join(current, "node_modules")
22
+ if (fs.existsSync(modules)) paths.push(modules)
23
+ const parent = path.dirname(current)
24
+ if (parent === current) break
25
+ current = parent
26
+ }
27
+ return paths
28
+ }
29
+
8
30
  function run(target) {
31
+ // Resolve NODE_PATH so the compiled Bun binary can find external packages
32
+ // installed alongside the wrapper (e.g. @altimateai/altimate-core NAPI module).
33
+ // Search from BOTH the binary's location AND the wrapper script's location
34
+ // to cover npm flat installs, pnpm isolated stores, and hoisted monorepos.
35
+ const env = { ...process.env }
36
+ try {
37
+ const resolvedTarget = fs.realpathSync(target)
38
+ const targetDir = path.dirname(path.dirname(resolvedTarget))
39
+
40
+ const targetModules = findAllNodeModules(targetDir)
41
+ const scriptModules = findAllNodeModules(scriptDir)
42
+ const allPaths = [...new Set([...scriptModules, ...targetModules])]
43
+
44
+ if (allPaths.length > 0) {
45
+ const sep = process.platform === "win32" ? ";" : ":"
46
+ const joined = allPaths.join(sep)
47
+ env.NODE_PATH = env.NODE_PATH ? joined + sep + env.NODE_PATH : joined
48
+ }
49
+ } catch {
50
+ // realpathSync failed (e.g. target doesn't exist) — continue without
51
+ // NODE_PATH; spawnSync will report the missing binary via result.error.
52
+ }
53
+
9
54
  const result = childProcess.spawnSync(target, process.argv.slice(2), {
10
55
  stdio: "inherit",
56
+ env,
11
57
  })
12
58
  if (result.error) {
13
59
  console.error(result.error.message)
@@ -22,9 +68,6 @@ if (envPath) {
22
68
  run(envPath)
23
69
  }
24
70
 
25
- const scriptPath = fs.realpathSync(__filename)
26
- const scriptDir = path.dirname(scriptPath)
27
-
28
71
  //
29
72
  const cached = path.join(scriptDir, ".altimate-code")
30
73
  if (fs.existsSync(cached)) {
package/bin/altimate-code CHANGED
@@ -5,9 +5,55 @@ const fs = require("fs")
5
5
  const path = require("path")
6
6
  const os = require("os")
7
7
 
8
+ // Resolve script location early — needed by both run() and findBinary().
9
+ const scriptPath = fs.realpathSync(__filename)
10
+ const scriptDir = path.dirname(scriptPath)
11
+
12
+ // Collect ALL node_modules directories walking upward from startDir.
13
+ // Bun's single-file executable uses a virtual filesystem (/$bunfs/root/) without
14
+ // node_modules. External packages are resolved via NODE_PATH instead.
15
+ // We collect every node_modules in the hierarchy (not just the first) to handle
16
+ // pnpm strict layouts, hoisted monorepos, and npm flat installs alike.
17
+ function findAllNodeModules(startDir) {
18
+ const paths = []
19
+ let current = startDir
20
+ for (;;) {
21
+ const modules = path.join(current, "node_modules")
22
+ if (fs.existsSync(modules)) paths.push(modules)
23
+ const parent = path.dirname(current)
24
+ if (parent === current) break
25
+ current = parent
26
+ }
27
+ return paths
28
+ }
29
+
8
30
  function run(target) {
31
+ // Resolve NODE_PATH so the compiled Bun binary can find external packages
32
+ // installed alongside the wrapper (e.g. @altimateai/altimate-core NAPI module).
33
+ // Search from BOTH the binary's location AND the wrapper script's location
34
+ // to cover npm flat installs, pnpm isolated stores, and hoisted monorepos.
35
+ const env = { ...process.env }
36
+ try {
37
+ const resolvedTarget = fs.realpathSync(target)
38
+ const targetDir = path.dirname(path.dirname(resolvedTarget))
39
+
40
+ const targetModules = findAllNodeModules(targetDir)
41
+ const scriptModules = findAllNodeModules(scriptDir)
42
+ const allPaths = [...new Set([...scriptModules, ...targetModules])]
43
+
44
+ if (allPaths.length > 0) {
45
+ const sep = process.platform === "win32" ? ";" : ":"
46
+ const joined = allPaths.join(sep)
47
+ env.NODE_PATH = env.NODE_PATH ? joined + sep + env.NODE_PATH : joined
48
+ }
49
+ } catch {
50
+ // realpathSync failed (e.g. target doesn't exist) — continue without
51
+ // NODE_PATH; spawnSync will report the missing binary via result.error.
52
+ }
53
+
9
54
  const result = childProcess.spawnSync(target, process.argv.slice(2), {
10
55
  stdio: "inherit",
56
+ env,
11
57
  })
12
58
  if (result.error) {
13
59
  console.error(result.error.message)
@@ -22,9 +68,6 @@ if (envPath) {
22
68
  run(envPath)
23
69
  }
24
70
 
25
- const scriptPath = fs.realpathSync(__filename)
26
- const scriptDir = path.dirname(scriptPath)
27
-
28
71
  //
29
72
  const cached = path.join(scriptDir, ".altimate-code")
30
73
  if (fs.existsSync(cached)) {
package/package.json CHANGED
@@ -14,20 +14,23 @@
14
14
  "scripts": {
15
15
  "postinstall": "bun ./postinstall.mjs || node ./postinstall.mjs"
16
16
  },
17
- "version": "v0.4.7",
17
+ "version": "0.5.0",
18
18
  "license": "MIT",
19
+ "dependencies": {
20
+ "@altimateai/altimate-core": "^0.2.3"
21
+ },
19
22
  "optionalDependencies": {
20
- "@altimateai/altimate-code-linux-x64": "v0.4.7",
21
- "@altimateai/altimate-code-windows-arm64": "v0.4.7",
22
- "@altimateai/altimate-code-linux-arm64-musl": "v0.4.7",
23
- "@altimateai/altimate-code-darwin-x64": "v0.4.7",
24
- "@altimateai/altimate-code-windows-x64": "v0.4.7",
25
- "@altimateai/altimate-code-linux-x64-musl": "v0.4.7",
26
- "@altimateai/altimate-code-darwin-x64-baseline": "v0.4.7",
27
- "@altimateai/altimate-code-linux-x64-baseline-musl": "v0.4.7",
28
- "@altimateai/altimate-code-linux-x64-baseline": "v0.4.7",
29
- "@altimateai/altimate-code-linux-arm64": "v0.4.7",
30
- "@altimateai/altimate-code-darwin-arm64": "v0.4.7",
31
- "@altimateai/altimate-code-windows-x64-baseline": "v0.4.7"
23
+ "@altimateai/altimate-code-linux-x64": "0.5.0",
24
+ "@altimateai/altimate-code-windows-arm64": "0.5.0",
25
+ "@altimateai/altimate-code-linux-arm64-musl": "0.5.0",
26
+ "@altimateai/altimate-code-darwin-x64": "0.5.0",
27
+ "@altimateai/altimate-code-windows-x64": "0.5.0",
28
+ "@altimateai/altimate-code-linux-x64-musl": "0.5.0",
29
+ "@altimateai/altimate-code-darwin-x64-baseline": "0.5.0",
30
+ "@altimateai/altimate-code-linux-x64-baseline-musl": "0.5.0",
31
+ "@altimateai/altimate-code-linux-x64-baseline": "0.5.0",
32
+ "@altimateai/altimate-code-linux-arm64": "0.5.0",
33
+ "@altimateai/altimate-code-darwin-arm64": "0.5.0",
34
+ "@altimateai/altimate-code-windows-x64-baseline": "0.5.0"
32
35
  }
33
36
  }
package/postinstall.mjs CHANGED
@@ -115,6 +115,39 @@ function printWelcome(version) {
115
115
  out(bot)
116
116
  }
117
117
 
118
+ function copyDirRecursive(src, dst) {
119
+ fs.mkdirSync(dst, { recursive: true })
120
+ for (const entry of fs.readdirSync(src, { withFileTypes: true })) {
121
+ const srcPath = path.join(src, entry.name)
122
+ const dstPath = path.join(dst, entry.name)
123
+ if (entry.isDirectory()) {
124
+ copyDirRecursive(srcPath, dstPath)
125
+ } else {
126
+ fs.copyFileSync(srcPath, dstPath)
127
+ }
128
+ }
129
+ }
130
+
131
+ /**
132
+ * Copy bundled skills to ~/.altimate/builtin/ on every install/upgrade.
133
+ * The entire directory is wiped and replaced so each release is the single
134
+ * source of truth. Intentionally separate from ~/.altimate/skills/ which users own.
135
+ */
136
+ function copySkillsToAltimate() {
137
+ try {
138
+ const skillsSrc = path.join(__dirname, "skills")
139
+ if (!fs.existsSync(skillsSrc)) return // skills not in package (shouldn't happen)
140
+
141
+ const builtinDst = path.join(os.homedir(), ".altimate", "builtin")
142
+
143
+ // Full wipe-and-replace — each release owns this directory entirely
144
+ if (fs.existsSync(builtinDst)) fs.rmSync(builtinDst, { recursive: true, force: true })
145
+ copyDirRecursive(skillsSrc, builtinDst)
146
+ } catch {
147
+ // Non-fatal — skills can be installed manually
148
+ }
149
+ }
150
+
118
151
  /**
119
152
  * Write a marker file so the CLI can show a welcome/upgrade banner on first run.
120
153
  * npm v7+ silences postinstall stdout, so the CLI reads this marker at startup instead.
@@ -144,6 +177,7 @@ async function main() {
144
177
  // On Windows, the .exe is already included in the package and bin field points to it
145
178
  // No postinstall setup needed
146
179
  if (version) writeUpgradeMarker(version)
180
+ copySkillsToAltimate()
147
181
  return
148
182
  }
149
183
 
@@ -161,6 +195,7 @@ async function main() {
161
195
  // Write marker only — npm v7+ suppresses all postinstall output.
162
196
  // The CLI picks up the marker and shows the welcome box on first run.
163
197
  if (version) writeUpgradeMarker(version)
198
+ copySkillsToAltimate()
164
199
  } catch (error) {
165
200
  console.error("Failed to setup altimate-code binary:", error.message)
166
201
  process.exit(1)
@@ -0,0 +1,134 @@
1
+ ---
2
+ name: cost-report
3
+ description: Analyze Snowflake query costs and identify optimization opportunities
4
+ ---
5
+
6
+ # Cost Report
7
+
8
+ ## Requirements
9
+ **Agent:** any (read-only analysis)
10
+ **Tools used:** sql_execute, sql_analyze, finops_analyze_credits, finops_expensive_queries, finops_warehouse_advice, finops_unused_resources, finops_query_history
11
+
12
+ Analyze Snowflake warehouse query costs, identify the most expensive queries, detect anti-patterns, and recommend optimizations.
13
+
14
+ ## Workflow
15
+
16
+ 1. **Query SNOWFLAKE.ACCOUNT_USAGE.QUERY_HISTORY** for the top 20 most expensive queries by credits used:
17
+
18
+ ```sql
19
+ SELECT
20
+ query_id,
21
+ query_text,
22
+ user_name,
23
+ warehouse_name,
24
+ query_type,
25
+ credits_used_cloud_services,
26
+ bytes_scanned,
27
+ rows_produced,
28
+ total_elapsed_time,
29
+ execution_status,
30
+ start_time
31
+ FROM SNOWFLAKE.ACCOUNT_USAGE.QUERY_HISTORY
32
+ WHERE start_time >= DATEADD('day', -30, CURRENT_TIMESTAMP())
33
+ AND execution_status = 'SUCCESS'
34
+ AND credits_used_cloud_services > 0
35
+ ORDER BY credits_used_cloud_services DESC
36
+ LIMIT 20;
37
+ ```
38
+
39
+ Use `sql_execute` to run this query against the connected Snowflake warehouse.
40
+
41
+ 2. **Group and summarize** the results by:
42
+ - **User**: Which users are driving the most cost?
43
+ - **Warehouse**: Which warehouses consume the most credits?
44
+ - **Query type**: SELECT vs INSERT vs CREATE TABLE AS SELECT vs MERGE, etc.
45
+
46
+ Present each grouping as a markdown table.
47
+
48
+ 3. **Analyze the top offenders** - For each of the top 10 most expensive queries:
49
+ - Run `sql_analyze` on the query text to detect anti-patterns (SELECT *, missing LIMIT, cartesian products, correlated subqueries, etc.)
50
+ - Summarize anti-patterns found and their severity
51
+
52
+ 4. **Classify each query into a cost tier**:
53
+
54
+ | Tier | Credits | Label | Action |
55
+ |------|---------|-------|--------|
56
+ | 1 | < $0.01 | Cheap | No action needed |
57
+ | 2 | $0.01 - $1.00 | Moderate | Review if frequent |
58
+ | 3 | $1.00 - $100.00 | Expensive | Optimize or review warehouse sizing |
59
+ | 4 | > $100.00 | Dangerous | Immediate review required |
60
+
61
+ 5. **Warehouse analysis** - Run `finops_warehouse_advice` to check if warehouses used by the top offenders are right-sized.
62
+
63
+ 6. **Unused resource detection** - Run `finops_unused_resources` to find:
64
+ - **Stale tables**: Tables not accessed in the last 30+ days (candidates for archival/drop)
65
+ - **Idle warehouses**: Warehouses with no query activity (candidates for suspension/removal)
66
+
67
+ Include findings in the report under a "Waste Detection" section.
68
+
69
+ 7. **Query history enrichment** - Run `finops_query_history` to fetch recent execution patterns:
70
+ - Identify frequently-run expensive queries (high frequency × high cost = top optimization target)
71
+ - Find queries that could benefit from result caching or materialization
72
+
73
+ 8. **Output the final report** as a structured markdown document:
74
+
75
+ ```
76
+ # Snowflake Cost Report (Last 30 Days)
77
+
78
+ ## Summary
79
+ - Total credits consumed: X
80
+ - Number of unique queries: Y
81
+ - Most expensive query: Z credits
82
+
83
+ ## Cost by User
84
+ | User | Total Credits | Query Count | Avg Credits/Query |
85
+ |------|--------------|-------------|-------------------|
86
+
87
+ ## Cost by Warehouse
88
+ | Warehouse | Total Credits | Query Count | Avg Credits/Query |
89
+ |-----------|--------------|-------------|-------------------|
90
+
91
+ ## Cost by Query Type
92
+ | Query Type | Total Credits | Query Count | Avg Credits/Query |
93
+ |------------|--------------|-------------|-------------------|
94
+
95
+ ## Top 10 Expensive Queries (Detailed Analysis)
96
+
97
+ ### Query 1 (X credits) - DANGEROUS
98
+ **User:** user_name | **Warehouse:** wh_name | **Type:** SELECT
99
+ **Anti-patterns found:**
100
+ - SELECT_STAR (warning): Query uses SELECT * ...
101
+ - MISSING_LIMIT (info): ...
102
+
103
+ **Optimization suggestions:**
104
+ 1. Select only needed columns
105
+ 2. Add LIMIT clause
106
+ 3. Consider partitioning strategy
107
+
108
+ **Cost tier:** Tier 1 (based on credits used)
109
+
110
+ ...
111
+
112
+ ## Waste Detection
113
+ ### Unused Tables
114
+ | Table | Last Accessed | Size | Recommendation |
115
+ |-------|--------------|------|----------------|
116
+
117
+ ### Idle Warehouses
118
+ | Warehouse | Last Query | Size | Recommendation |
119
+ |-----------|-----------|------|----------------|
120
+
121
+ ## Recommendations
122
+ 1. Top priority optimizations
123
+ 2. Warehouse sizing suggestions
124
+ 3. Unused resource cleanup
125
+ 4. Scheduling recommendations
126
+ ```
127
+
128
+ ## Usage
129
+
130
+ The user invokes this skill with:
131
+ - `/cost-report` -- Analyze the last 30 days
132
+ - `/cost-report 7` -- Analyze the last 7 days (adjust the DATEADD interval)
133
+
134
+ Use the tools: `sql_execute`, `sql_analyze`, `finops_analyze_credits`, `finops_expensive_queries`, `finops_warehouse_advice`, `finops_unused_resources`, `finops_query_history`.
@@ -0,0 +1,135 @@
1
+ ---
2
+ name: data-viz
3
+ description: >
4
+ Build modern, interactive data visualizations and dashboards using code-based
5
+ component libraries (shadcn/ui, Recharts, Tremor, Nivo, D3, Victory, visx).
6
+ Use this skill whenever the user asks to visualize data, build dashboards,
7
+ create analytics views, chart metrics, tell a data story, build a reporting
8
+ interface, create KPI cards, plot graphs, or explore a dataset — even if they
9
+ mention PowerBI, Tableau, Streamlit, Metabase, Looker, Grafana, or similar
10
+ tools. Also trigger when the user says "make a dashboard", "show me the data",
11
+ "chart this", "visualize trends", "build an analytics page", "data story", or
12
+ anything involving turning raw data into interactive visual interfaces. If the
13
+ task involves presenting data visually — this is the skill. Always prefer
14
+ building a real, interactive, code-based UI over exporting to or recommending
15
+ a BI platform.
16
+ ---
17
+
18
+ # AI-First Data Visualization
19
+
20
+ ## Philosophy
21
+
22
+ Build production-quality interactive data interfaces with modern component libraries — no vendor lock-in, embeddable anywhere. When no tool is specified, build code-first. When the user explicitly names a BI tool, use it — only suggest code-first if they ask for options or hit a technical blocker.
23
+
24
+ ## Technology Stack
25
+
26
+ Full API patterns & code: `references/component-guide.md`
27
+
28
+ ### Framework Priority
29
+
30
+ 1. **React + Tailwind** — Default when JSX/TSX supported
31
+ 2. **HTML + CSS + Vanilla JS** — Fallback (use D3 or Chart.js)
32
+ 3. **Python (Plotly/Dash)** — Python-only environments only
33
+
34
+ ### Library Selection
35
+
36
+ | Library | Best For |
37
+ |---------|----------|
38
+ | **shadcn/ui charts** | Default first choice — general dashboards, most chart types |
39
+ | **Recharts** | Line, bar, area, composed, radar — fine-grained control |
40
+ | **Tremor** | KPI cards, metric displays, full dashboard layouts |
41
+ | **Nivo** | Heatmaps, treemaps, choropleth, calendar, Sankey, funnel |
42
+ | **visx** | Bespoke custom viz — D3-level control with React |
43
+ | **D3.js** | Force-directed graphs, DAGs, maps — maximum flexibility |
44
+ | **Victory** | When animation quality matters most |
45
+
46
+ **Supporting**: Tailwind CSS · Radix UI · Framer Motion · Lucide React · date-fns · Papaparse · lodash
47
+
48
+ ## Building a Visualization
49
+
50
+ ### Step 1: Understand the Data Story
51
+
52
+ Before code, identify: **What question does the data answer?** Who is the audience (exec → KPIs only, analyst → drill-down, public → narrative)? **What's the ONE key insight?** Design around it.
53
+
54
+ ### Step 2: Choose Chart Type
55
+
56
+ | Data Relationship | Chart Type | Library |
57
+ |---|---|---|
58
+ | Trend over time | Line, Area | shadcn/Recharts |
59
+ | Category comparison | Bar (horizontal if many) | shadcn/Recharts |
60
+ | Part of whole | Donut, Treemap | shadcn/Nivo |
61
+ | Distribution | Histogram, Box, Violin | Nivo/visx |
62
+ | Correlation | Scatter, Bubble | Recharts/visx |
63
+ | Geographic | Choropleth, Dot map | Nivo/D3 |
64
+ | Hierarchical | Treemap, Sunburst | Nivo |
65
+ | Flow / Process | Sankey, Funnel | Nivo/D3 |
66
+ | Single KPI | Metric card, Gauge, Sparkline | Tremor/shadcn |
67
+ | Multi-metric overview | Dashboard grid of cards | Tremor + shadcn |
68
+ | Ranking | Horizontal bar, Bar list | Tremor |
69
+ | Column/model lineage | Force-directed DAG | D3 |
70
+ | Pipeline dependencies | Hierarchical tree, DAG | D3/Nivo |
71
+ | Multi-dimensional quality | Radar/Spider | Recharts |
72
+ | Activity density over time | Calendar heatmap | Nivo |
73
+ | Incremental change breakdown | Waterfall | Recharts (custom) |
74
+
75
+ ### Step 3: Build the Interface
76
+
77
+ Start from this layout — remove what the data doesn't need:
78
+
79
+ ```
80
+ ┌─────────────────────────────────────────┐
81
+ │ Header: Title + Description + Date Range│
82
+ ├─────────────────────────────────────────┤
83
+ │ KPI Row: 3-5 metric cards + sparklines │
84
+ ├─────────────────────────────────────────┤
85
+ │ Primary Visualization (largest chart) │
86
+ ├──────────────────┬──────────────────────┤
87
+ │ Secondary Chart │ Supporting Chart/Tbl │
88
+ ├──────────────────┴──────────────────────┤
89
+ │ Detail Table (sortable, filterable) │
90
+ └─────────────────────────────────────────┘
91
+ ```
92
+
93
+ A single insight might just be one chart with a headline and annotation. Scale complexity to audience.
94
+
95
+ ### Step 4: Design Principles
96
+
97
+ - **Data-ink ratio**: Remove chartjunk — unnecessary gridlines, redundant labels, decorative borders
98
+ - **Color with purpose**: Encode meaning (red=bad, green=good, blue=neutral). Max 5-7 colors. Single-hue gradient for sequential data
99
+ - **Typography hierarchy**: Title → subtitle (muted) → axis labels (small) → data labels
100
+ - **Responsive**: `min-h-[VALUE]` on all charts. Grid stacks on mobile
101
+ - **Animation**: Entry transitions only, `duration-300` to `duration-500`. Never continuous
102
+ - **Accessibility**: `aria-label` on charts, WCAG AA contrast, don't rely on color alone
103
+
104
+ ### Step 5: Interactivity & Annotations
105
+
106
+ **Priority**: Tooltips (every chart) → Filtering → Sorting → Drill-down → Cross-filtering → Export → Annotations
107
+
108
+ **Annotations** turn charts into stories. Mark: inflection points, threshold crossings (amber), external events (indigo/red), anomalies (red), achievements (green). **Limit 3 per chart.** Implementation: `references/component-guide.md` → Annotation Patterns.
109
+
110
+ ### Step 6: Tell the Story
111
+
112
+ - **Headline states insight**: "Revenue grew 23% QoQ, driven by enterprise" — not "Q3 Revenue Chart"
113
+ - **Annotate key moments** directly on chart
114
+ - **Contextual comparisons**: vs. prior period, vs. target, vs. benchmark
115
+ - **Progressive disclosure**: Overview first — detail on demand
116
+
117
+ ## Environment-Specific Guidance
118
+
119
+ | Environment | Approach |
120
+ |---|---|
121
+ | **Claude Artifacts** | React (JSX), single file, default export. Available: `recharts`, `lodash`, `d3`, `lucide-react`, shadcn via `@/components/ui/*`, Tailwind |
122
+ | **Claude Code / Terminal** | Vite + React + Tailwind. Add shadcn/ui + Recharts. Structure: `src/components/charts/`, `src/components/cards/`, `src/data/` |
123
+ | **Python / Jupyter** | Plotly for charts, Plotly Dash for dashboards |
124
+ | **Cursor / Bolt / other IDEs** | Match existing framework. Prefer shadcn/ui if present |
125
+
126
+ ## Anti-Patterns
127
+
128
+ - Screenshot/static charts — build interactive components
129
+ - Defaulting to BI tools unprompted — build code-first when no tool specified
130
+ - Default matplotlib — always customize in Python
131
+ - Rainbow palettes — use deliberate, meaningful colors
132
+ - 3D charts — almost never appropriate
133
+ - Pie charts > 5 slices — use horizontal bar
134
+ - Unlabeled dual y-axes — use two separate charts
135
+ - Truncated bar axes — always start at zero