@ai-lighthouse/cli 1.0.2 → 1.0.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (3) hide show
  1. package/README.md +126 -53
  2. package/dist/index.js +252 -37
  3. package/package.json +2 -2
package/README.md CHANGED
@@ -9,30 +9,123 @@ pnpm install
9
9
  pnpm build
10
10
  ```
11
11
 
12
+ ## Quick Start
13
+
14
+ The simplest way to audit a website:
15
+
16
+ ```bash
17
+ # Interactive wizard (recommended for beginners)
18
+ ai-lighthouse audit https://example.com
19
+
20
+ # Using presets (recommended for most users)
21
+ ai-lighthouse audit https://example.com --preset ai-optimized
22
+ ```
23
+
24
+ ## Interactive Features
25
+
26
+ ### Beautiful Terminal UI
27
+
28
+ The CLI features a beautiful interactive terminal UI built with React (Ink) that includes:
29
+
30
+ - **Animated Score Display** - Gradient text and grade badges
31
+ - **Tab Navigation** - Navigate through 6 analysis sections using arrow keys (1-6)
32
+ - **Real-time Loading** - Progress indicators for each analysis step
33
+ - **Color-coded Data** - All severity levels and scores are color-coded
34
+ - **Progress Bars** - Visual progress bars for dimension scores
35
+
36
+ ### Interactive Wizard
37
+
38
+ When you run `ai-lighthouse audit <url>` without flags, the interactive wizard appears:
39
+
40
+ 1. **Feature Selection** - Choose which analyses to run (AI Understanding, Chunking, etc.)
41
+ 2. **LLM Configuration** - If AI features selected, configure your LLM provider
42
+ 3. **Confirmation** - Review settings before starting the audit
43
+ 4. **Results** - View results in the interactive terminal UI
44
+
45
+ The wizard mode makes it easy to configure audits without memorizing command-line flags.
46
+
12
47
  ## Usage
13
48
 
14
- ### Audit a single page
49
+ ### Audit with Presets (Recommended)
50
+
51
+ Presets provide pre-configured scanning profiles for common use cases:
15
52
 
16
53
  ```bash
17
- ai-lighthouse audit https://example.com --output html
54
+ # Fast scan with core rules only (~5-10 seconds)
55
+ ai-lighthouse audit https://example.com --preset basic
56
+
57
+ # Balanced scan with AI insights (~30-60 seconds) - Recommended
58
+ ai-lighthouse audit https://example.com --preset ai-optimized
59
+
60
+ # Comprehensive scan with all features (~2-5 minutes)
61
+ ai-lighthouse audit https://example.com --preset full
62
+
63
+ # Quick scan showing only critical issues (~3-5 seconds)
64
+ ai-lighthouse audit https://example.com --preset minimal
18
65
  ```
19
66
 
20
- ### Audit with specific rules
67
+ ### List Available Presets
21
68
 
22
69
  ```bash
23
- ai-lighthouse audit https://example.com --rules strict --enable-llm
70
+ ai-lighthouse presets
24
71
  ```
25
72
 
26
- ### Crawl multiple pages
73
+ ### Configure LLM Provider (for AI-powered presets)
27
74
 
28
75
  ```bash
29
- ai-lighthouse crawl https://example.com --depth 2 --sitemap
76
+ # Using Ollama (local, free)
77
+ ai-lighthouse audit https://example.com --preset ai-optimized \
78
+ --llm-provider ollama \
79
+ --llm-model qwen2.5:0.5b
80
+
81
+ # Using OpenAI
82
+ ai-lighthouse audit https://example.com --preset ai-optimized \
83
+ --llm-provider openai \
84
+ --llm-model gpt-4o-mini \
85
+ --llm-api-key sk-...
86
+
87
+ # Using Anthropic
88
+ ai-lighthouse audit https://example.com --preset full \
89
+ --llm-provider anthropic \
90
+ --llm-model claude-3-5-sonnet-20241022 \
91
+ --llm-api-key sk-ant-...
30
92
  ```
31
93
 
32
- ### Generate report from saved results
94
+ ### Output Formats
33
95
 
34
96
  ```bash
35
- ai-lighthouse report ./.ai-lighthouse/last_run.json --open
97
+ # Save as JSON
98
+ ai-lighthouse audit https://example.com --preset basic --output json
99
+
100
+ # Generate HTML report
101
+ ai-lighthouse audit https://example.com --preset ai-optimized --output html
102
+
103
+ # Generate PDF report
104
+ ai-lighthouse audit https://example.com --preset full --output pdf
105
+
106
+ # Interactive terminal UI (default)
107
+ ai-lighthouse audit https://example.com --preset ai-optimized
108
+ ```
109
+
110
+ ### CI/CD Integration
111
+
112
+ ```bash
113
+ # Exit with code 1 if score is below threshold
114
+ ai-lighthouse audit https://example.com --preset minimal --threshold 80
115
+ ```
116
+
117
+ ### Advanced: Override Preset Defaults
118
+
119
+ You can override any preset option:
120
+
121
+ ```bash
122
+ # Use ai-optimized preset but enable hallucination detection
123
+ ai-lighthouse audit https://example.com --preset ai-optimized \
124
+ --enable-hallucination
125
+
126
+ # Use full preset but limit to 10 issues
127
+ ai-lighthouse audit https://example.com --preset full \
128
+ --max-issues 10
36
129
  ```
37
130
 
38
131
  ## Commands
@@ -41,57 +134,37 @@ ai-lighthouse report ./.ai-lighthouse/last_run.json --open
41
134
 
42
135
  Audit a single webpage for AI readiness.
43
136
 
44
- **Options:**
45
- - `-o, --output <format>` - Output format: json, html, lhr, csv (default: json)
46
- - `-r, --rules <preset>` - Rule preset: default, strict, minimal (default: default)
47
- - `-d, --depth <number>` - Crawl depth for multi-page audits (default: 1)
48
- - `-p, --pages <urls>` - Comma-separated list of specific pages to audit
49
- - `--cache-ttl <seconds>` - Cache TTL in seconds to avoid re-fetching
50
- - `--threshold <score>` - Minimum score threshold (exit 1 if below)
51
- - `--max-chunk-tokens <number>` - Maximum tokens per content chunk (default: 1200)
52
- - `--chunking-strategy <strategy>` - Chunking strategy: auto, heading-based, paragraph-based (default: auto)
53
- - `--enable-chunking` - Enable detailed content chunking analysis
54
- - `--enable-extractability` - Enable extractability mapping
55
- - `--enable-hallucination` - Enable hallucination detection
56
- - `--enable-llm` - Enable LLM comprehension analysis
57
- - `--min-impact <number>` - Minimum impact score to include (default: 8)
58
- - `--min-confidence <number>` - Minimum confidence to include 0-1 (default: 0.7)
59
- - `--max-issues <number>` - Maximum issues to return (default: 20)
60
- - `--llm-provider <provider>` - LLM provider: openai, anthropic, ollama, local
61
- - `--llm-model <model>` - LLM model name
62
- - `--llm-base-url <url>` - LLM API base URL
63
- - `--llm-api-key <key>` - LLM API key
137
+ **Primary Options:**
64
138
 
65
- **Examples:**
139
+ - `-o, --output <format>` - Output format: json, html, pdf, interactive (default: interactive)
140
+ - `-p, --preset <name>` - Preset configuration: basic, ai-optimized, full, minimal
66
141
 
67
- ```bash
68
- # Basic audit
69
- ai-lighthouse audit https://example.com
142
+ **LLM Configuration (for AI-powered presets):**
70
143
 
71
- # HTML report with all features enabled
72
- ai-lighthouse audit https://example.com \
73
- --output html \
74
- --enable-chunking \
75
- --enable-extractability \
76
- --enable-hallucination \
77
- --enable-llm \
78
- --llm-provider ollama \
79
- --llm-model qwen2.5:0.5b
144
+ - `--llm-provider <provider>` - LLM provider: openai, anthropic, ollama (default: ollama)
145
+ - `--llm-model <model>` - LLM model name (e.g., qwen2.5:0.5b, gpt-4o-mini)
146
+ - `--llm-api-key <key>` - LLM API key (for OpenAI, Anthropic, etc.)
147
+ - `--llm-base-url <url>` - LLM API base URL (for custom endpoints)
80
148
 
81
- # Force paragraph-based chunking for consistent chunk sizes
82
- ai-lighthouse audit https://example.com \
83
- --enable-chunking \
84
- --chunking-strategy paragraph-based \
85
- --max-chunk-tokens 1000
149
+ **Advanced Overrides (for power users):**
86
150
 
87
- # Force heading-based chunking for semantic sections
88
- ai-lighthouse audit https://example.com \
89
- --enable-chunking \
90
- --chunking-strategy heading-based
151
+ - `--enable-chunking` - Override: Enable chunking analysis
152
+ - `--enable-extractability` - Override: Enable extractability mapping
153
+ - `--enable-hallucination` - Override: Enable hallucination detection
154
+ - `--enable-llm` - Override: Enable LLM comprehension
155
+ - `--min-impact <number>` - Override: Minimum impact score to include
156
+ - `--min-confidence <number>` - Override: Minimum confidence (0-1)
157
+ - `--max-issues <number>` - Override: Maximum issues to show
158
+ - `--max-chunk-tokens <number>` - Override: Max tokens per chunk
159
+ - `--chunking-strategy <strategy>` - Override: Chunking strategy (auto, heading-based, paragraph-based)
91
160
 
92
- # CI/CD integration with score threshold
93
- ai-lighthouse audit https://example.com --threshold 80
94
- ```
161
+ **Utility Options:**
162
+
163
+ - `--threshold <score>` - Exit with code 1 if score is below this threshold
164
+
165
+ ### `presets`
166
+
167
+ List all available preset configurations with descriptions and estimated durations.
95
168
 
96
169
  ### `crawl <url>`
97
170
 
package/dist/index.js CHANGED
@@ -1063,11 +1063,162 @@ var SetupWizard = ({ onComplete, initialUrl }) => {
1063
1063
  )), /* @__PURE__ */ React9.createElement(Box9, { marginTop: 1 }, /* @__PURE__ */ React9.createElement(Text9, { dimColor: true }, "Press Enter to continue (leave empty for default)"))));
1064
1064
  };
1065
1065
 
1066
+ // src/presets.ts
1067
+ var BASIC_PRESET = {
1068
+ name: "basic",
1069
+ description: "Fast scan with core rules only (no LLM)",
1070
+ estimatedDuration: "5-10 seconds",
1071
+ // Filtering
1072
+ minImpactScore: 8,
1073
+ minConfidence: 0.7,
1074
+ maxIssues: 20,
1075
+ // Features
1076
+ enableLLM: false,
1077
+ enableChunking: false,
1078
+ enableExtractability: false,
1079
+ enableHallucinationDetection: false,
1080
+ // LLM config (disabled)
1081
+ llmConfig: void 0
1082
+ };
1083
+ var AI_OPTIMIZED_PRESET = {
1084
+ name: "ai-optimized",
1085
+ description: "Balanced scan with AI comprehension and messaging alignment",
1086
+ estimatedDuration: "30-60 seconds",
1087
+ // Filtering
1088
+ minImpactScore: 8,
1089
+ minConfidence: 0.7,
1090
+ maxIssues: 20,
1091
+ // Features (balanced approach)
1092
+ enableLLM: true,
1093
+ enableChunking: false,
1094
+ // Skip for speed
1095
+ enableExtractability: false,
1096
+ // Skip for speed
1097
+ enableHallucinationDetection: false,
1098
+ // Skip for speed
1099
+ // LLM config (will be populated from CLI flags)
1100
+ llmConfig: {
1101
+ provider: "ollama",
1102
+ // Default to local
1103
+ model: "qwen2.5:0.5b",
1104
+ maxTokens: 2e3,
1105
+ temperature: 0.3
1106
+ }
1107
+ };
1108
+ var FULL_PRESET = {
1109
+ name: "full",
1110
+ description: "Comprehensive scan with all features enabled",
1111
+ estimatedDuration: "2-5 minutes",
1112
+ // Filtering (show more)
1113
+ minImpactScore: 5,
1114
+ minConfidence: 0.6,
1115
+ maxIssues: 50,
1116
+ // Features (everything enabled)
1117
+ enableLLM: true,
1118
+ enableChunking: true,
1119
+ enableExtractability: true,
1120
+ enableHallucinationDetection: true,
1121
+ // Chunking config
1122
+ chunkingStrategy: "auto",
1123
+ maxChunkTokens: 1200,
1124
+ // LLM config (will be populated from CLI flags)
1125
+ llmConfig: {
1126
+ provider: "ollama",
1127
+ model: "qwen2.5:0.5b",
1128
+ maxTokens: 2e3,
1129
+ temperature: 0.3
1130
+ }
1131
+ };
1132
+ var MINIMAL_PRESET = {
1133
+ name: "minimal",
1134
+ description: "Quick scan showing only critical issues",
1135
+ estimatedDuration: "3-5 seconds",
1136
+ // Strict filtering
1137
+ minImpactScore: 15,
1138
+ minConfidence: 0.8,
1139
+ maxIssues: 10,
1140
+ // Features (minimal)
1141
+ enableLLM: false,
1142
+ enableChunking: false,
1143
+ enableExtractability: false,
1144
+ enableHallucinationDetection: false,
1145
+ // LLM config (disabled)
1146
+ llmConfig: void 0
1147
+ };
1148
+ var PRESETS = {
1149
+ basic: BASIC_PRESET,
1150
+ "ai-optimized": AI_OPTIMIZED_PRESET,
1151
+ full: FULL_PRESET,
1152
+ minimal: MINIMAL_PRESET
1153
+ };
1154
+ function getPreset(name) {
1155
+ const preset = PRESETS[name];
1156
+ if (!preset) {
1157
+ throw new Error(`Unknown preset: ${name}. Available: ${Object.keys(PRESETS).join(", ")}`);
1158
+ }
1159
+ return { ...preset };
1160
+ }
1161
+ function listPresets() {
1162
+ return Object.values(PRESETS).map((preset) => ({
1163
+ name: preset.name,
1164
+ description: preset.description,
1165
+ duration: preset.estimatedDuration
1166
+ }));
1167
+ }
1168
+ function mergePresetWithOptions(presetName, userOptions) {
1169
+ const preset = getPreset(presetName);
1170
+ const merged = {
1171
+ minImpactScore: preset.minImpactScore,
1172
+ minConfidence: preset.minConfidence,
1173
+ maxIssues: preset.maxIssues,
1174
+ enableLLM: preset.enableLLM,
1175
+ enableChunking: preset.enableChunking,
1176
+ enableExtractability: preset.enableExtractability,
1177
+ enableHallucinationDetection: preset.enableHallucinationDetection,
1178
+ chunkingStrategy: preset.chunkingStrategy,
1179
+ maxChunkTokens: preset.maxChunkTokens
1180
+ };
1181
+ if (userOptions.minImpactScore !== void 0) merged.minImpactScore = userOptions.minImpactScore;
1182
+ if (userOptions.minConfidence !== void 0) merged.minConfidence = userOptions.minConfidence;
1183
+ if (userOptions.maxIssues !== void 0) merged.maxIssues = userOptions.maxIssues;
1184
+ if (userOptions.enableLLM !== void 0) merged.enableLLM = userOptions.enableLLM;
1185
+ if (userOptions.enableChunking !== void 0) merged.enableChunking = userOptions.enableChunking;
1186
+ if (userOptions.enableExtractability !== void 0) merged.enableExtractability = userOptions.enableExtractability;
1187
+ if (userOptions.enableHallucinationDetection !== void 0) merged.enableHallucinationDetection = userOptions.enableHallucinationDetection;
1188
+ if (userOptions.chunkingStrategy !== void 0) merged.chunkingStrategy = userOptions.chunkingStrategy;
1189
+ if (userOptions.maxChunkTokens !== void 0) merged.maxChunkTokens = userOptions.maxChunkTokens;
1190
+ if (merged.enableLLM) {
1191
+ merged.llmConfig = {
1192
+ provider: userOptions.llmProvider || preset.llmConfig?.provider || "ollama",
1193
+ model: userOptions.llmModel || preset.llmConfig?.model,
1194
+ apiKey: userOptions.llmApiKey || preset.llmConfig?.apiKey,
1195
+ baseUrl: userOptions.llmBaseUrl || preset.llmConfig?.baseUrl,
1196
+ maxTokens: preset.llmConfig?.maxTokens,
1197
+ temperature: preset.llmConfig?.temperature
1198
+ };
1199
+ }
1200
+ return merged;
1201
+ }
1202
+
1066
1203
  // src/commands/audit.ts
1067
1204
  function auditCommand(program2) {
1068
- program2.command("audit").description("Audit a website for AI readiness").argument("<url>", "URL to audit").option("-o, --output <format>", "Output format: json, html, pdf, lhr, csv, interactive", "interactive").option("-r, --rules <preset>", "Rule preset: default, strict, minimal", "default").option("-d, --depth <number>", "Crawl depth (for multi-page audits)", parseInt, 1).option("-p, --pages <urls>", "Comma-separated list of specific pages to audit").option("--cache-ttl <seconds>", "Cache TTL in seconds to avoid re-fetching", parseInt).option("--threshold <score>", "Minimum score threshold (exit 1 if below)", parseInt).option("--max-chunk-tokens <number>", "Maximum tokens per content chunk", parseInt, 1200).option("--chunking-strategy <strategy>", "Chunking strategy: auto, heading-based, paragraph-based", "auto").option("--enable-chunking", "Enable detailed content chunking analysis", false).option("--enable-extractability", "Enable extractability mapping", false).option("--enable-hallucination", "Enable hallucination detection", false).option("--enable-llm", "Enable LLM comprehension analysis", false).option("--min-impact <number>", "Minimum impact score to include", parseInt, 8).option("--min-confidence <number>", "Minimum confidence to include (0-1)", parseFloat, 0.7).option("--max-issues <number>", "Maximum issues to return", parseInt, 20).option("--llm-provider <provider>", "LLM provider: openai, anthropic, ollama, local").option("--llm-model <model>", "LLM model name").option("--llm-base-url <url>", "LLM API base URL").option("--llm-api-key <key>", "LLM API key").action(async (url, options) => {
1205
+ program2.command("audit").description("Audit a website for AI readiness").argument("<url>", "URL to audit").option("-o, --output <format>", "Output format: json, html, pdf, lhr, csv, interactive", "interactive").option("-p, --preset <name>", "Preset configuration: basic, ai-optimized, full, minimal").option("--llm-provider <provider>", "LLM provider: openai, anthropic, ollama (default: ollama)").option("--llm-model <model>", "LLM model name (e.g., qwen2.5:0.5b, gpt-4o-mini)").option("--llm-api-key <key>", "LLM API key (for OpenAI, Anthropic, etc.)").option("--llm-base-url <url>", "LLM API base URL (for custom endpoints)").option("--enable-chunking", "Override: Enable chunking analysis", false).option("--enable-extractability", "Override: Enable extractability mapping", false).option("--enable-hallucination", "Override: Enable hallucination detection", false).option("--enable-llm", "Override: Enable LLM comprehension", false).option("--min-impact <number>", "Override: Minimum impact score", parseInt).option("--min-confidence <number>", "Override: Minimum confidence (0-1)", parseFloat).option("--max-issues <number>", "Override: Maximum issues to show", parseInt).option("--max-chunk-tokens <number>", "Override: Max tokens per chunk", parseInt).option("--chunking-strategy <strategy>", "Override: Chunking strategy (auto, heading-based, paragraph-based)").option("--threshold <score>", "Exit with code 1 if score is below this threshold", parseInt).addHelpText("after", `
1206
+ Presets:
1207
+ basic Fast scan with core rules only (~5-10 seconds)
1208
+ ai-optimized Balanced scan with AI insights (~30-60 seconds) [Recommended]
1209
+ full Comprehensive scan with all features (~2-5 minutes)
1210
+ minimal Only critical issues (~3-5 seconds)
1211
+
1212
+ Examples:
1213
+ $ ai-lighthouse audit https://example.com
1214
+ $ ai-lighthouse audit https://example.com --preset ai-optimized
1215
+ $ ai-lighthouse audit https://example.com --preset full --llm-provider openai --llm-api-key sk-...
1216
+ $ ai-lighthouse audit https://example.com --preset basic --output json
1217
+ $ ai-lighthouse audit https://example.com --preset minimal --threshold 80
1218
+ `).action(async (url, options) => {
1219
+ const hasPreset = options.preset !== void 0;
1069
1220
  const hasFeatureFlags = options.enableChunking || options.enableExtractability || options.enableHallucination || options.enableLlm || options.llmProvider;
1070
- if (options.output === "interactive" && !hasFeatureFlags) {
1221
+ if (options.output === "interactive" && !hasPreset && !hasFeatureFlags) {
1071
1222
  const originalConsoleError = console.error;
1072
1223
  const originalConsoleWarn = console.warn;
1073
1224
  console.error = () => {
@@ -1118,24 +1269,43 @@ function auditCommand(program2) {
1118
1269
  );
1119
1270
  try {
1120
1271
  const urlObj = new URL(url);
1121
- const scanOptions = {
1122
- maxChunkTokens: options.maxChunkTokens,
1123
- chunkingStrategy: options.chunkingStrategy,
1124
- enableChunking: options.enableChunking,
1125
- enableExtractability: options.enableExtractability,
1126
- enableHallucinationDetection: options.enableHallucination,
1127
- enableLLM: options.enableLlm,
1128
- minImpactScore: options.minImpact,
1129
- minConfidence: options.minConfidence,
1130
- maxIssues: options.maxIssues
1131
- };
1132
- if (options.enableLlm && options.llmProvider) {
1133
- scanOptions.llmConfig = {
1134
- provider: options.llmProvider,
1135
- model: options.llmModel,
1136
- baseUrl: options.llmBaseUrl,
1137
- apiKey: options.llmApiKey
1272
+ let scanOptions;
1273
+ if (hasPreset) {
1274
+ scanOptions = mergePresetWithOptions(options.preset, {
1275
+ maxChunkTokens: options.maxChunkTokens,
1276
+ chunkingStrategy: options.chunkingStrategy,
1277
+ enableChunking: options.enableChunking,
1278
+ enableExtractability: options.enableExtractability,
1279
+ enableHallucinationDetection: options.enableHallucination,
1280
+ enableLLM: options.enableLlm,
1281
+ minImpactScore: options.minImpact,
1282
+ minConfidence: options.minConfidence,
1283
+ maxIssues: options.maxIssues,
1284
+ llmProvider: options.llmProvider,
1285
+ llmModel: options.llmModel,
1286
+ llmApiKey: options.llmApiKey,
1287
+ llmBaseUrl: options.llmBaseUrl
1288
+ });
1289
+ } else {
1290
+ scanOptions = {
1291
+ maxChunkTokens: options.maxChunkTokens || 1200,
1292
+ chunkingStrategy: options.chunkingStrategy || "auto",
1293
+ enableChunking: options.enableChunking || false,
1294
+ enableExtractability: options.enableExtractability || false,
1295
+ enableHallucinationDetection: options.enableHallucination || false,
1296
+ enableLLM: options.enableLlm || false,
1297
+ minImpactScore: options.minImpact ?? 8,
1298
+ minConfidence: options.minConfidence ?? 0.7,
1299
+ maxIssues: options.maxIssues ?? 20
1138
1300
  };
1301
+ if (options.enableLlm && options.llmProvider) {
1302
+ scanOptions.llmConfig = {
1303
+ provider: options.llmProvider,
1304
+ model: options.llmModel,
1305
+ baseUrl: options.llmBaseUrl,
1306
+ apiKey: options.llmApiKey
1307
+ };
1308
+ }
1139
1309
  }
1140
1310
  rerender(
1141
1311
  React10.createElement(AuditReportUI, {
@@ -1190,25 +1360,46 @@ function auditCommand(program2) {
1190
1360
  const spinner = ora("Starting audit...").start();
1191
1361
  try {
1192
1362
  const urlObj = new URL(url);
1193
- spinner.text = `Auditing ${chalk2.cyan(urlObj.href)}...`;
1194
- const scanOptions = {
1195
- maxChunkTokens: options.maxChunkTokens,
1196
- chunkingStrategy: options.chunkingStrategy,
1197
- enableChunking: options.enableChunking,
1198
- enableExtractability: options.enableExtractability,
1199
- enableHallucinationDetection: options.enableHallucination,
1200
- enableLLM: options.enableLlm,
1201
- minImpactScore: options.minImpact,
1202
- minConfidence: options.minConfidence,
1203
- maxIssues: options.maxIssues
1204
- };
1205
- if (options.enableLlm && options.llmProvider) {
1206
- scanOptions.llmConfig = {
1207
- provider: options.llmProvider,
1208
- model: options.llmModel,
1209
- baseUrl: options.llmBaseUrl,
1210
- apiKey: options.llmApiKey
1363
+ let scanOptions;
1364
+ if (hasPreset) {
1365
+ const presetConfig = getPreset(options.preset);
1366
+ spinner.text = `Using ${chalk2.cyan(options.preset)} preset (${presetConfig.estimatedDuration})...`;
1367
+ scanOptions = mergePresetWithOptions(options.preset, {
1368
+ maxChunkTokens: options.maxChunkTokens,
1369
+ chunkingStrategy: options.chunkingStrategy,
1370
+ enableChunking: options.enableChunking,
1371
+ enableExtractability: options.enableExtractability,
1372
+ enableHallucinationDetection: options.enableHallucination,
1373
+ enableLLM: options.enableLlm,
1374
+ minImpactScore: options.minImpact,
1375
+ minConfidence: options.minConfidence,
1376
+ maxIssues: options.maxIssues,
1377
+ llmProvider: options.llmProvider,
1378
+ llmModel: options.llmModel,
1379
+ llmApiKey: options.llmApiKey,
1380
+ llmBaseUrl: options.llmBaseUrl
1381
+ });
1382
+ } else {
1383
+ spinner.text = `Auditing ${chalk2.cyan(urlObj.href)}...`;
1384
+ scanOptions = {
1385
+ maxChunkTokens: options.maxChunkTokens || 1200,
1386
+ chunkingStrategy: options.chunkingStrategy || "auto",
1387
+ enableChunking: options.enableChunking || false,
1388
+ enableExtractability: options.enableExtractability || false,
1389
+ enableHallucinationDetection: options.enableHallucination || false,
1390
+ enableLLM: options.enableLlm || false,
1391
+ minImpactScore: options.minImpact ?? 8,
1392
+ minConfidence: options.minConfidence ?? 0.7,
1393
+ maxIssues: options.maxIssues ?? 20
1211
1394
  };
1395
+ if (options.enableLlm && options.llmProvider) {
1396
+ scanOptions.llmConfig = {
1397
+ provider: options.llmProvider,
1398
+ model: options.llmModel,
1399
+ baseUrl: options.llmBaseUrl,
1400
+ apiKey: options.llmApiKey
1401
+ };
1402
+ }
1212
1403
  }
1213
1404
  spinner.text = "Scanning page...";
1214
1405
  const result = await analyzeUrlWithRules(url, scanOptions);
@@ -2567,6 +2758,29 @@ function auditWizardCommand(program2) {
2567
2758
  });
2568
2759
  }
2569
2760
 
2761
+ // src/commands/presets.ts
2762
+ import chalk5 from "chalk";
2763
+ function presetsCommand(program2) {
2764
+ program2.command("presets").description("List available preset configurations").action(() => {
2765
+ console.log("\n" + chalk5.bold.cyan("\u{1F3AF} Available Presets\n"));
2766
+ const presets = listPresets();
2767
+ presets.forEach((preset) => {
2768
+ const badge = preset.name === "ai-optimized" ? chalk5.green("[Recommended]") : "";
2769
+ console.log(chalk5.bold(preset.name) + " " + badge);
2770
+ console.log(chalk5.dim(` ${preset.description}`));
2771
+ console.log(chalk5.dim(` Duration: ${preset.duration}`));
2772
+ console.log("");
2773
+ });
2774
+ console.log(chalk5.dim("Usage:"));
2775
+ console.log(chalk5.dim(" ai-lighthouse audit <url> --preset <name>"));
2776
+ console.log("");
2777
+ console.log(chalk5.dim("Examples:"));
2778
+ console.log(chalk5.dim(" ai-lighthouse audit https://example.com --preset basic"));
2779
+ console.log(chalk5.dim(" ai-lighthouse audit https://example.com --preset ai-optimized"));
2780
+ console.log("");
2781
+ });
2782
+ }
2783
+
2570
2784
  // src/index.ts
2571
2785
  var program = new Command();
2572
2786
  program.name("ai-lighthouse").description("AI Lighthouse - Audit websites for AI readiness and SEO optimization").version("1.0.0");
@@ -2574,4 +2788,5 @@ auditWizardCommand(program);
2574
2788
  auditCommand(program);
2575
2789
  crawlCommand(program);
2576
2790
  reportCommand(program);
2791
+ presetsCommand(program);
2577
2792
  program.parse();
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@ai-lighthouse/cli",
3
- "version": "1.0.2",
3
+ "version": "1.0.3",
4
4
  "description": "AI Lighthouse CLI - Audit websites for AI readiness",
5
5
  "type": "module",
6
6
  "bin": {
@@ -28,7 +28,7 @@
28
28
  "author": "",
29
29
  "license": "MIT",
30
30
  "dependencies": {
31
- "@ai-lighthouse/scanner": "^1.0.2",
31
+ "@ai-lighthouse/scanner": "^1.0.3",
32
32
  "chalk": "^5.3.0",
33
33
  "commander": "^12.1.0",
34
34
  "html-pdf-node": "^1.0.8",