seo-intel 1.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (46) hide show
  1. package/.env.example +41 -0
  2. package/LICENSE +75 -0
  3. package/README.md +243 -0
  4. package/Start SEO Intel.bat +9 -0
  5. package/Start SEO Intel.command +8 -0
  6. package/cli.js +3727 -0
  7. package/config/example.json +29 -0
  8. package/config/setup-wizard.js +522 -0
  9. package/crawler/index.js +566 -0
  10. package/crawler/robots.js +103 -0
  11. package/crawler/sanitize.js +124 -0
  12. package/crawler/schema-parser.js +168 -0
  13. package/crawler/sitemap.js +103 -0
  14. package/crawler/stealth.js +393 -0
  15. package/crawler/subdomain-discovery.js +341 -0
  16. package/db/db.js +213 -0
  17. package/db/schema.sql +120 -0
  18. package/exports/competitive.js +186 -0
  19. package/exports/heuristics.js +67 -0
  20. package/exports/queries.js +197 -0
  21. package/exports/suggestive.js +230 -0
  22. package/exports/technical.js +180 -0
  23. package/exports/templates.js +77 -0
  24. package/lib/gate.js +204 -0
  25. package/lib/license.js +369 -0
  26. package/lib/oauth.js +432 -0
  27. package/lib/updater.js +324 -0
  28. package/package.json +68 -0
  29. package/reports/generate-html.js +6194 -0
  30. package/reports/generate-site-graph.js +949 -0
  31. package/reports/gsc-loader.js +190 -0
  32. package/scheduler.js +142 -0
  33. package/seo-audit.js +619 -0
  34. package/seo-intel.png +0 -0
  35. package/server.js +602 -0
  36. package/setup/ROADMAP.md +109 -0
  37. package/setup/checks.js +483 -0
  38. package/setup/config-builder.js +227 -0
  39. package/setup/engine.js +65 -0
  40. package/setup/installers.js +197 -0
  41. package/setup/models.js +328 -0
  42. package/setup/openclaw-bridge.js +329 -0
  43. package/setup/validator.js +395 -0
  44. package/setup/web-routes.js +688 -0
  45. package/setup/wizard.html +2920 -0
  46. package/start-seo-intel.sh +8 -0
@@ -0,0 +1,328 @@
1
+ /**
2
+ * SEO Intel — Model Recommendations
3
+ *
4
+ * Two-tier model system:
5
+ * Extraction tier: local Ollama model for structured JSON extraction during crawl
6
+ * Analysis tier: powerful cloud model (or large local) for strategic gap analysis
7
+ *
8
+ * Includes hardware-based auto-recommendation using VRAM detection from checks.js
9
+ */
10
+
11
+ // ── Extraction Models (local, runs during crawl) ────────────────────────────
12
+ //
13
+ // The extraction task is structured JSON extraction:
14
+ // Input: ~1-4KB text (URL + title + headings + body excerpt)
15
+ // Output: ~900 tokens JSON (13 fields: enums, keyword arrays, entities)
16
+ // Complexity: low-medium — pattern matching + classification
17
+ // Minimum viable: 4B parameters for reliable JSON output
18
+
19
+ export const EXTRACTION_MODELS = [
20
+ {
21
+ id: 'qwen3.5:4b',
22
+ name: 'Qwen 3.5 4B',
23
+ family: 'qwen3.5',
24
+ tier: 'budget',
25
+ vram: '~3 GB',
26
+ minVramMB: 2500,
27
+ speed: '~2s/page',
28
+ quality: 'good',
29
+ description: 'Minimum recommended. Reliable JSON extraction, decent keyword detection. Great for laptops and older GPUs.',
30
+ recommended: false,
31
+ },
32
+ {
33
+ id: 'qwen3.5:9b',
34
+ name: 'Qwen 3.5 9B',
35
+ family: 'qwen3.5',
36
+ tier: 'balanced',
37
+ vram: '~5 GB',
38
+ minVramMB: 4500,
39
+ speed: '~3s/page',
40
+ quality: 'better',
41
+ description: 'Default recommendation. Better entity detection and intent classification. Works on most modern GPUs.',
42
+ recommended: true,
43
+ },
44
+ {
45
+ id: 'qwen3.5:27b',
46
+ name: 'Qwen 3.5 27B',
47
+ family: 'qwen3.5',
48
+ tier: 'quality',
49
+ vram: '~17 GB',
50
+ minVramMB: 15000,
51
+ speed: '~6s/page',
52
+ quality: 'great',
53
+ description: 'Nuanced intent classification, better keyword quality. Needs RTX 3090+ or M-series with 24GB+.',
54
+ recommended: false,
55
+ },
56
+ {
57
+ id: 'qwen3.5:35b',
58
+ name: 'Qwen 3.5 35B',
59
+ family: 'qwen3.5',
60
+ tier: 'power',
61
+ vram: '~22 GB',
62
+ minVramMB: 18000,
63
+ speed: '~8s/page',
64
+ quality: 'excellent',
65
+ description: 'Near-cloud quality extraction. Needs RTX 3090/4090 or M2 Ultra. Overkill for most users.',
66
+ recommended: false,
67
+ },
68
+ // Alternative providers
69
+ {
70
+ id: 'nemotron-nano:4b',
71
+ name: 'Nemotron 3 Nano 4B',
72
+ family: 'nemotron',
73
+ tier: 'budget',
74
+ vram: '~3 GB',
75
+ minVramMB: 2500,
76
+ speed: '~2s/page',
77
+ quality: 'good',
78
+ description: 'NVIDIA agentic model. Efficient extraction with tool-use training. Good alternative to Qwen 3.5 4B.',
79
+ recommended: false,
80
+ },
81
+ // Legacy / fallback models (already installed by many users)
82
+ {
83
+ id: 'qwen3:4b',
84
+ name: 'Qwen 3 4B (legacy)',
85
+ family: 'qwen3',
86
+ tier: 'budget',
87
+ vram: '~3 GB',
88
+ minVramMB: 2500,
89
+ speed: '~2s/page',
90
+ quality: 'good',
91
+ description: 'Previous generation. Works well but Qwen 3.5 is better if you can upgrade.',
92
+ recommended: false,
93
+ legacy: true,
94
+ },
95
+ {
96
+ id: 'qwen3:8b',
97
+ name: 'Qwen 3 8B (legacy)',
98
+ family: 'qwen3',
99
+ tier: 'balanced',
100
+ vram: '~5 GB',
101
+ minVramMB: 4500,
102
+ speed: '~3s/page',
103
+ quality: 'better',
104
+ description: 'Previous generation. Solid extraction. Qwen 3.5 recommended for new installs.',
105
+ recommended: false,
106
+ legacy: true,
107
+ },
108
+ ];
109
+
110
+ // ── Analysis Models (local Ollama, runs during analysis) ─────────────────────
111
+ //
112
+ // The analysis task is heavy strategic reasoning:
113
+ // Input: 10K-100K tokens (full crawl dataset, keyword matrices, competitor data)
114
+ // Output: structured JSON with strategic recommendations, positioning, gap analysis
115
+ // Complexity: high — comparative reasoning across multiple domains
116
+ // Minimum viable: 14B+ parameters for reliable strategic output
117
+ // Cloud models (Claude, GPT-4o, DeepSeek) available via OpenClaw agent setup
118
+
119
+ export const ANALYSIS_MODELS = [
120
+ {
121
+ id: 'qwen3:14b',
122
+ name: 'Qwen 3 14B',
123
+ family: 'qwen3',
124
+ type: 'local',
125
+ vram: '~9 GB',
126
+ minVramMB: 8000,
127
+ context: '32K tokens',
128
+ costNote: 'Free (your GPU)',
129
+ quality: 'decent',
130
+ recommended: false,
131
+ description: 'Minimum viable for analysis. Handles small-medium projects. Needs RTX 3070+ or M1 Pro+.',
132
+ },
133
+ {
134
+ id: 'qwen3.5:27b',
135
+ name: 'Qwen 3.5 27B',
136
+ family: 'qwen3.5',
137
+ type: 'local',
138
+ vram: '~17 GB',
139
+ minVramMB: 15000,
140
+ context: '32K tokens',
141
+ costNote: 'Free (your GPU)',
142
+ quality: 'good',
143
+ recommended: true,
144
+ description: 'Sweet spot for local analysis. Strong reasoning with 27.8B params. Needs RTX 3090/4080+ or M-series with 24GB+.',
145
+ },
146
+ {
147
+ id: 'qwen3.5:35b',
148
+ name: 'Qwen 3.5 35B',
149
+ family: 'qwen3.5',
150
+ type: 'local',
151
+ vram: '~22 GB',
152
+ minVramMB: 18000,
153
+ context: '32K tokens',
154
+ costNote: 'Free (your GPU)',
155
+ quality: 'great',
156
+ recommended: false,
157
+ description: 'High quality analysis. Best Qwen 3.5 for strategic reasoning. Needs RTX 3090/4090 or M2 Ultra.',
158
+ },
159
+ {
160
+ id: 'nemotron-3-super:120b',
161
+ name: 'Nemotron 3 Super 120B',
162
+ family: 'nemotron-3-super',
163
+ type: 'local',
164
+ vram: '~87 GB',
165
+ minVramMB: 48000,
166
+ context: '32K tokens',
167
+ costNote: 'Free (your GPU)',
168
+ quality: 'excellent',
169
+ recommended: false,
170
+ description: 'MoE — 120B total but only 12B active params. Excellent reasoning at efficient compute. Needs 64GB+ unified memory or multi-GPU.',
171
+ },
172
+ ];
173
+
174
+ // ── VRAM-Based Recommendations ──────────────────────────────────────────────
175
+
176
+ const VRAM_TIERS = [
177
+ { maxMB: 2500, extraction: null, note: 'Not enough VRAM for local extraction. Use cloud or CPU mode (slow).' },
178
+ { maxMB: 4500, extraction: 'qwen3.5:4b', note: 'Budget tier — Qwen 3.5 4B fits your GPU.' },
179
+ { maxMB: 8000, extraction: 'qwen3.5:9b', note: 'Balanced tier — Qwen 3.5 9B recommended for best quality/speed.' },
180
+ { maxMB: 18000, extraction: 'qwen3.5:27b', note: 'Quality tier — Qwen 3.5 27B for nuanced extraction.' },
181
+ { maxMB: 48000, extraction: 'qwen3.5:35b', note: 'Power tier — Qwen 3.5 35B for near-cloud quality.' },
182
+ { maxMB: Infinity, extraction: 'qwen3.5:35b', note: 'Power tier — Qwen 3.5 35B recommended. Your GPU can handle anything.' },
183
+ ];
184
+
185
+ /**
186
+ * Recommend an extraction model based on available models and VRAM.
187
+ * Priority: prefer already-installed models that match VRAM tier.
188
+ *
189
+ * @param {string[]} availableModels - models currently in Ollama
190
+ * @param {number} [vramMB] - detected VRAM in MB (0 if unknown)
191
+ * @returns {{ model: object, installed: boolean, autoRecommended: boolean, note: string } | null}
192
+ */
193
+ export function recommendExtractionModel(availableModels = [], vramMB = 0) {
194
+ // Find VRAM tier
195
+ const tier = VRAM_TIERS.find(t => vramMB <= t.maxMB) || VRAM_TIERS[VRAM_TIERS.length - 1];
196
+
197
+ // Preferred model order (newest → legacy)
198
+ const preferenceOrder = [
199
+ 'qwen3.5:9b', 'qwen3.5:27b', 'qwen3.5:4b', 'qwen3.5:35b',
200
+ 'qwen3:8b', 'qwen3:4b', 'qwen3.5:0.6b',
201
+ ];
202
+
203
+ // Filter to models that fit VRAM
204
+ const fittingModels = EXTRACTION_MODELS.filter(m => !vramMB || vramMB >= m.minVramMB);
205
+
206
+ // 1. Best installed model that fits VRAM
207
+ for (const prefId of preferenceOrder) {
208
+ const isInstalled = availableModels.some(m => m.startsWith(prefId.split(':')[0]) && m.includes(prefId.split(':')[1]));
209
+ const modelDef = fittingModels.find(m => m.id === prefId);
210
+ if (isInstalled && modelDef) {
211
+ return {
212
+ model: modelDef,
213
+ installed: true,
214
+ autoRecommended: modelDef.id === tier.extraction,
215
+ note: `Already installed. ${tier.note}`,
216
+ };
217
+ }
218
+ }
219
+
220
+ // 2. VRAM-recommended model (not installed)
221
+ if (tier.extraction) {
222
+ const modelDef = EXTRACTION_MODELS.find(m => m.id === tier.extraction);
223
+ if (modelDef) {
224
+ return {
225
+ model: modelDef,
226
+ installed: false,
227
+ autoRecommended: true,
228
+ note: `Recommended for your hardware. ${tier.note}`,
229
+ };
230
+ }
231
+ }
232
+
233
+ // 3. Any installed model
234
+ for (const prefId of preferenceOrder) {
235
+ const isInstalled = availableModels.some(m => m.startsWith(prefId.split(':')[0]));
236
+ const modelDef = EXTRACTION_MODELS.find(m => m.id === prefId);
237
+ if (isInstalled && modelDef) {
238
+ return {
239
+ model: modelDef,
240
+ installed: true,
241
+ autoRecommended: false,
242
+ note: `Installed but may be slow for your VRAM. Consider upgrading.`,
243
+ };
244
+ }
245
+ }
246
+
247
+ return null;
248
+ }
249
+
250
+ /**
251
+ * Recommend an analysis model based on available Ollama models and VRAM.
252
+ *
253
+ * @param {string[]} availableModels - models currently in Ollama
254
+ * @param {number} [vramMB] - detected VRAM in MB
255
+ * @returns {{ model: object, installed: boolean, note: string }}
256
+ */
257
+ export function recommendAnalysisModel(availableModels = [], vramMB = 0) {
258
+ const preferenceOrder = [
259
+ 'qwen3.5:27b', 'qwen3.5:35b', 'qwen3:14b', 'nemotron-3-super:120b',
260
+ ];
261
+
262
+ // Filter to models that fit VRAM
263
+ const fittingModels = ANALYSIS_MODELS.filter(m => !vramMB || vramMB >= m.minVramMB);
264
+
265
+ // 1. Best installed model that fits VRAM
266
+ for (const prefId of preferenceOrder) {
267
+ const isInstalled = availableModels.some(m => m.startsWith(prefId.split(':')[0]) && m.includes(prefId.split(':')[1]));
268
+ const modelDef = fittingModels.find(m => m.id === prefId);
269
+ if (isInstalled && modelDef) {
270
+ return {
271
+ model: modelDef,
272
+ installed: true,
273
+ note: `Already installed. Ready for analysis.`,
274
+ };
275
+ }
276
+ }
277
+
278
+ // 2. VRAM-based recommendation
279
+ let recId = 'qwen3:14b'; // default minimum
280
+ if (vramMB >= 48000) recId = 'nemotron-3-super:120b';
281
+ else if (vramMB >= 18000) recId = 'qwen3.5:35b';
282
+ else if (vramMB >= 15000) recId = 'qwen3.5:27b';
283
+
284
+ const recModel = ANALYSIS_MODELS.find(m => m.id === recId);
285
+ if (recModel) {
286
+ return {
287
+ model: recModel,
288
+ installed: false,
289
+ note: `Recommended for your hardware. Use OpenClaw for cloud models.`,
290
+ };
291
+ }
292
+
293
+ return {
294
+ model: ANALYSIS_MODELS[0],
295
+ installed: false,
296
+ note: 'Minimum viable model for local analysis.',
297
+ };
298
+ }
299
+
300
+ /**
301
+ * Get all model recommendations for display.
302
+ *
303
+ * @param {string[]} availableModels - models in Ollama
304
+ * @param {object} envKeys - { GEMINI_API_KEY: true, ... }
305
+ * @param {number} vramMB - detected VRAM
306
+ * @returns {{ extraction: object, analysis: object, allExtraction: object[], allAnalysis: object[] }}
307
+ */
308
+ export function getModelRecommendations(availableModels = [], envKeys = {}, vramMB = 0) {
309
+ return {
310
+ extraction: recommendExtractionModel(availableModels, vramMB),
311
+ analysis: recommendAnalysisModel(availableModels, vramMB),
312
+ allExtraction: EXTRACTION_MODELS.map(m => ({
313
+ ...m,
314
+ installed: availableModels.some(am =>
315
+ am.startsWith(m.family) && am.includes(m.id.split(':')[1])
316
+ ),
317
+ fitsVram: !vramMB || vramMB >= m.minVramMB,
318
+ })),
319
+ allAnalysis: ANALYSIS_MODELS.map(m => ({
320
+ ...m,
321
+ installed: availableModels.some(am =>
322
+ am.startsWith(m.family) && am.includes(m.id.split(':')[1])
323
+ ),
324
+ fitsVram: !vramMB || vramMB >= m.minVramMB,
325
+ })),
326
+ vramMB,
327
+ };
328
+ }
@@ -0,0 +1,329 @@
1
+ /**
2
+ * SEO Intel — OpenClaw Setup Bridge
3
+ *
4
+ * When OpenClaw gateway is running, this module lets the setup process
5
+ * delegate to the agent for a conversational, intelligent setup flow.
6
+ *
7
+ * Instead of a rigid wizard with fixed steps, the agent:
8
+ * 1. Reads the system check results
9
+ * 2. Knows what's missing / misconfigured
10
+ * 3. Guides the user conversationally through fixes
11
+ * 4. Can troubleshoot errors in real-time
12
+ * 5. Configures everything via the setup engine API
13
+ *
14
+ * Falls back to the standard wizard if OpenClaw is not available.
15
+ */
16
+
17
+ import { existsSync, readFileSync, writeFileSync, mkdirSync } from 'fs';
18
+ import { join, dirname } from 'path';
19
+ import { fileURLToPath } from 'url';
20
+
21
+ const __dirname = dirname(fileURLToPath(import.meta.url));
22
+ const ROOT = join(__dirname, '..');
23
+
24
+ const OPENCLAW_API = 'http://127.0.0.1:18789';
25
+
26
+ // ── OpenClaw Gateway Communication ─────────────────────────────────────────
27
+
28
+ /**
29
+ * Send a message to OpenClaw's agent via the OpenAI-compatible API.
30
+ * Returns the agent's text response.
31
+ */
32
+ async function askAgent(messages, opts = {}) {
33
+ const controller = new AbortController();
34
+ const timeout = setTimeout(() => controller.abort(), opts.timeout || 60000);
35
+
36
+ try {
37
+ const res = await fetch(`${OPENCLAW_API}/v1/chat/completions`, {
38
+ method: 'POST',
39
+ signal: controller.signal,
40
+ headers: {
41
+ 'Content-Type': 'application/json',
42
+ ...(opts.token ? { 'Authorization': `Bearer ${opts.token}` } : {}),
43
+ },
44
+ body: JSON.stringify({
45
+ model: opts.model || 'default',
46
+ messages,
47
+ temperature: 0.3,
48
+ max_tokens: 2000,
49
+ }),
50
+ });
51
+
52
+ if (!res.ok) {
53
+ const err = await res.text();
54
+ throw new Error(`OpenClaw API error: ${res.status} ${err}`);
55
+ }
56
+
57
+ const data = await res.json();
58
+ return data.choices?.[0]?.message?.content || '';
59
+ } finally {
60
+ clearTimeout(timeout);
61
+ }
62
+ }
63
+
64
+ /**
65
+ * Check if the OpenClaw gateway is reachable and ready.
66
+ */
67
+ export async function isGatewayReady() {
68
+ try {
69
+ const controller = new AbortController();
70
+ const timeout = setTimeout(() => controller.abort(), 3000);
71
+ const res = await fetch(`${OPENCLAW_API}/v1/models`, {
72
+ signal: controller.signal,
73
+ });
74
+ clearTimeout(timeout);
75
+ return res.ok;
76
+ } catch {
77
+ return false;
78
+ }
79
+ }
80
+
81
+ // ── Setup Context Builder ──────────────────────────────────────────────────
82
+
83
+ /**
84
+ * Build a context message for the agent with full system status.
85
+ * This gives the agent everything it needs to guide the setup.
86
+ */
87
+ function buildSetupContext(systemCheck) {
88
+ const ctx = {
89
+ tool: 'SEO Intel',
90
+ version: systemCheck._version || '0.2.0',
91
+ installDir: ROOT,
92
+ systemStatus: {
93
+ node: {
94
+ version: systemCheck.node.version,
95
+ ok: systemCheck.node.meetsMinimum,
96
+ },
97
+ ollama: {
98
+ available: systemCheck.ollama.available,
99
+ host: systemCheck.ollama.host,
100
+ models: systemCheck.ollama.models?.map(m => m.name) || [],
101
+ },
102
+ playwright: {
103
+ installed: systemCheck.playwright.installed,
104
+ },
105
+ apiKeys: {
106
+ gemini: systemCheck.env.keys.GEMINI_API_KEY || false,
107
+ anthropic: systemCheck.env.keys.ANTHROPIC_API_KEY || false,
108
+ openai: systemCheck.env.keys.OPENAI_API_KEY || false,
109
+ deepseek: systemCheck.env.keys.DEEPSEEK_API_KEY || false,
110
+ },
111
+ gpu: {
112
+ vram: systemCheck.vram.vramMB,
113
+ name: systemCheck.vram.gpuName,
114
+ },
115
+ existingProjects: systemCheck.configs.projects?.map(p => p.project) || [],
116
+ gsc: {
117
+ hasData: systemCheck.gsc?.hasData || false,
118
+ },
119
+ },
120
+ capabilities: systemCheck.summary,
121
+ setupApiBase: 'http://localhost:3000/api/setup',
122
+ };
123
+
124
+ return JSON.stringify(ctx, null, 2);
125
+ }
126
+
127
+ // ── The System Prompt ──────────────────────────────────────────────────────
128
+
129
+ const SETUP_SYSTEM_PROMPT = `You are the SEO Intel setup assistant. You help users configure SEO Intel — a competitive SEO intelligence tool that runs locally.
130
+
131
+ IMPORTANT RULES:
132
+ - Be concise and friendly. No walls of text.
133
+ - Guide step by step. One thing at a time.
134
+ - When something is already configured, acknowledge it and move on.
135
+ - If something is broken, explain what's wrong and offer to fix it.
136
+ - You have access to bash to run commands. Use it to install things, check status, and configure.
137
+
138
+ THE SETUP FLOW:
139
+ 1. Check what's already working (I'll give you the system status)
140
+ 2. Fix any missing dependencies (Node, npm deps, Playwright, Ollama)
141
+ 3. Help choose and configure the extraction model (local Ollama recommended)
142
+ 4. Help configure an analysis API key (Gemini recommended for best value)
143
+ 5. Create a project config (target domain + competitors)
144
+ 6. Optionally set up Google Search Console data
145
+ 7. Run a quick pipeline test to verify everything works
146
+ 8. Show the user their first commands to run
147
+
148
+ AVAILABLE COMMANDS (run these from ${ROOT}):
149
+ - node cli.js setup-web → opens web wizard at http://localhost:3000/setup
150
+ - node cli.js crawl <project> → crawl domains
151
+ - node cli.js extract <project> → extract with local AI
152
+ - node cli.js analyze <project> → run gap analysis
153
+ - node cli.js html <project> → generate dashboard
154
+ - node cli.js serve → start dashboard server
155
+ - node cli.js status → show system status
156
+ - node cli.js competitors <project> → manage domains
157
+ - node cli.js auth → show auth connections
158
+ - node cli.js export-actions <project> [--scope technical|competitive|suggestive|all] [--format json|brief] → agentic export (technical free, rest Solo)
159
+ - node cli.js competitive-actions <project> [--vs domain] [--format json|brief] → competitive gap export (Solo)
160
+ - node cli.js suggest-usecases <project> [--scope docs|product-pages|onboarding|all] [--format json|brief] → suggest what to build (Solo)
161
+
162
+ TO INSTALL THINGS:
163
+ - npm install (in ${ROOT}) → install Node dependencies
164
+ - npx playwright install chromium → install browser
165
+ - ollama pull qwen3.5:9b → install extraction model
166
+
167
+ TO CONFIGURE:
168
+ - Edit ${ROOT}/.env for API keys and settings
169
+ - Project configs go in ${ROOT}/config/<project>.json
170
+
171
+ ANALYSIS MODELS (user needs at least one API key):
172
+ - Gemini: Best value, 1M context (~$0.01-0.05/analysis) → GEMINI_API_KEY
173
+ - Claude: Best quality, nuanced reasoning (~$0.10-0.30) → ANTHROPIC_API_KEY
174
+ - OpenAI: Solid all-around (~$0.05-0.15) → OPENAI_API_KEY
175
+ - DeepSeek: Budget option (~$0.02-0.08) → DEEPSEEK_API_KEY
176
+
177
+ EXTRACTION MODELS (local, free):
178
+ - qwen3.5:9b (recommended, needs 6GB+ VRAM)
179
+ - qwen3.5:4b (budget, needs 3GB+ VRAM)
180
+ - qwen3.5:27b (quality, needs 16GB+ VRAM)`;
181
+
182
+ // ── Agent-Driven Setup Flow ────────────────────────────────────────────────
183
+
184
+ /**
185
+ * Start an agent-driven setup session.
186
+ * Uses OpenClaw's agent to guide the user conversationally.
187
+ *
188
+ * @param {object} systemCheck - Result from fullSystemCheck()
189
+ * @param {object} [opts]
190
+ * @param {function} [opts.onMessage] - Callback for agent messages
191
+ * @param {function} [opts.onInput] - Callback to get user input
192
+ * @returns {Promise<{ completed: boolean, project?: string }>}
193
+ */
194
+ export async function runAgentSetup(systemCheck, opts = {}) {
195
+ const { onMessage, onInput } = opts;
196
+
197
+ const context = buildSetupContext(systemCheck);
198
+
199
+ const messages = [
200
+ {
201
+ role: 'system',
202
+ content: SETUP_SYSTEM_PROMPT,
203
+ },
204
+ {
205
+ role: 'user',
206
+ content: `Here's the current system status. Guide me through setting up SEO Intel. Be concise — one step at a time.\n\n${context}`,
207
+ },
208
+ ];
209
+
210
+ // Get initial response
211
+ const initialResponse = await askAgent(messages);
212
+ messages.push({ role: 'assistant', content: initialResponse });
213
+
214
+ if (onMessage) onMessage(initialResponse);
215
+
216
+ // Conversational loop
217
+ let completed = false;
218
+ let maxTurns = 20; // safety limit
219
+
220
+ while (!completed && maxTurns > 0) {
221
+ maxTurns--;
222
+
223
+ // Get user input
224
+ const userInput = onInput ? await onInput() : null;
225
+ if (!userInput || userInput.toLowerCase() === 'done' || userInput.toLowerCase() === 'exit') {
226
+ completed = true;
227
+ break;
228
+ }
229
+
230
+ messages.push({ role: 'user', content: userInput });
231
+
232
+ const response = await askAgent(messages);
233
+ messages.push({ role: 'assistant', content: response });
234
+
235
+ if (onMessage) onMessage(response);
236
+
237
+ // Check if setup seems complete
238
+ if (response.includes('setup is complete') || response.includes('all set') || response.includes('ready to go')) {
239
+ completed = true;
240
+ }
241
+ }
242
+
243
+ return { completed };
244
+ }
245
+
246
+ // ── CLI Integration ────────────────────────────────────────────────────────
247
+
248
+ /**
249
+ * Run the OpenClaw-powered setup from the CLI.
250
+ * Falls back to web wizard if OpenClaw isn't available.
251
+ */
252
+ export async function cliAgentSetup(systemCheck) {
253
+ const readline = await import('readline');
254
+ const rl = readline.createInterface({
255
+ input: process.stdin,
256
+ output: process.stdout,
257
+ });
258
+
259
+ const ask = (prompt) => new Promise(resolve => rl.question(prompt, resolve));
260
+
261
+ console.log('\n \x1b[36m\x1b[1m🐸 SEO Intel — Agent-Powered Setup\x1b[0m\n');
262
+ console.log(' \x1b[2mOpenClaw is guiding your setup. Type your answers, or "done" to finish.\x1b[0m\n');
263
+
264
+ try {
265
+ await runAgentSetup(systemCheck, {
266
+ onMessage: (msg) => {
267
+ // Format agent output with indentation
268
+ const lines = msg.split('\n');
269
+ for (const line of lines) {
270
+ console.log(` ${line}`);
271
+ }
272
+ console.log();
273
+ },
274
+ onInput: async () => {
275
+ const input = await ask(' \x1b[36m>\x1b[0m ');
276
+ return input;
277
+ },
278
+ });
279
+ } finally {
280
+ rl.close();
281
+ }
282
+
283
+ console.log('\n \x1b[32m✓ Setup session ended.\x1b[0m\n');
284
+ }
285
+
286
+ // ── Web API Integration ────────────────────────────────────────────────────
287
+
288
+ /**
289
+ * Handle a setup chat message via the web API.
290
+ * Used by the web wizard's "Agent Mode" chat panel.
291
+ *
292
+ * @param {object} body - { message, history, systemCheck }
293
+ * @returns {Promise<{ response: string }>}
294
+ */
295
+ export async function handleAgentChat(body) {
296
+ const { message, history = [], systemCheck } = body;
297
+
298
+ const context = systemCheck ? buildSetupContext(systemCheck) : '';
299
+
300
+ const messages = [
301
+ {
302
+ role: 'system',
303
+ content: SETUP_SYSTEM_PROMPT,
304
+ },
305
+ ];
306
+
307
+ // Add context as first user message if this is a new conversation
308
+ if (history.length === 0 && context) {
309
+ messages.push({
310
+ role: 'user',
311
+ content: `System status:\n${context}`,
312
+ });
313
+ }
314
+
315
+ // Add conversation history
316
+ for (const msg of history) {
317
+ messages.push({
318
+ role: msg.role,
319
+ content: msg.content,
320
+ });
321
+ }
322
+
323
+ // Add current message
324
+ messages.push({ role: 'user', content: message });
325
+
326
+ const response = await askAgent(messages);
327
+
328
+ return { response };
329
+ }