seo-intel 1.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/.env.example +41 -0
- package/LICENSE +75 -0
- package/README.md +243 -0
- package/Start SEO Intel.bat +9 -0
- package/Start SEO Intel.command +8 -0
- package/cli.js +3727 -0
- package/config/example.json +29 -0
- package/config/setup-wizard.js +522 -0
- package/crawler/index.js +566 -0
- package/crawler/robots.js +103 -0
- package/crawler/sanitize.js +124 -0
- package/crawler/schema-parser.js +168 -0
- package/crawler/sitemap.js +103 -0
- package/crawler/stealth.js +393 -0
- package/crawler/subdomain-discovery.js +341 -0
- package/db/db.js +213 -0
- package/db/schema.sql +120 -0
- package/exports/competitive.js +186 -0
- package/exports/heuristics.js +67 -0
- package/exports/queries.js +197 -0
- package/exports/suggestive.js +230 -0
- package/exports/technical.js +180 -0
- package/exports/templates.js +77 -0
- package/lib/gate.js +204 -0
- package/lib/license.js +369 -0
- package/lib/oauth.js +432 -0
- package/lib/updater.js +324 -0
- package/package.json +68 -0
- package/reports/generate-html.js +6194 -0
- package/reports/generate-site-graph.js +949 -0
- package/reports/gsc-loader.js +190 -0
- package/scheduler.js +142 -0
- package/seo-audit.js +619 -0
- package/seo-intel.png +0 -0
- package/server.js +602 -0
- package/setup/ROADMAP.md +109 -0
- package/setup/checks.js +483 -0
- package/setup/config-builder.js +227 -0
- package/setup/engine.js +65 -0
- package/setup/installers.js +197 -0
- package/setup/models.js +328 -0
- package/setup/openclaw-bridge.js +329 -0
- package/setup/validator.js +395 -0
- package/setup/web-routes.js +688 -0
- package/setup/wizard.html +2920 -0
- package/start-seo-intel.sh +8 -0
package/setup/ROADMAP.md
ADDED
|
@@ -0,0 +1,109 @@
|
|
|
1
|
+
# SEO Intel Setup — Roadmap
|
|
2
|
+
|
|
3
|
+
> From open-source CLI tool → standalone product
|
|
4
|
+
|
|
5
|
+
## Current State (v0.2)
|
|
6
|
+
- [x] System detection (Node, npm, Ollama, Playwright, VRAM)
|
|
7
|
+
- [x] Model recommendations (VRAM-based extraction + analysis tiers)
|
|
8
|
+
- [x] Project configuration (target domain, competitors, crawl mode)
|
|
9
|
+
- [x] API key setup (Gemini, Claude, OpenAI, DeepSeek)
|
|
10
|
+
- [x] Pipeline validation (Ollama → API → crawl → extraction)
|
|
11
|
+
- [x] CLI wizard + Web wizard at /setup
|
|
12
|
+
- [x] GSC setup step (CSV upload + export guide + auto-detection)
|
|
13
|
+
- [x] License system (lib/license.js + lib/gate.js)
|
|
14
|
+
- [x] Free/Pro tier gating on all 23 CLI commands
|
|
15
|
+
- [x] Page limit enforcement (500/domain on free tier)
|
|
16
|
+
- [x] License status in `status` command
|
|
17
|
+
|
|
18
|
+
## Priority 1 — GSC Setup Guide
|
|
19
|
+
**Status: ✅ Done (CSV upload, export guide, auto-detection)**
|
|
20
|
+
|
|
21
|
+
Google Search Console is the #1 data source users need but can't figure out alone.
|
|
22
|
+
Currently: manual CSV export, no API, no guidance in wizard.
|
|
23
|
+
|
|
24
|
+
- [ ] Add Step 3.5: "Connect Google Search Console" in web wizard
|
|
25
|
+
- [ ] Visual walkthrough: how to export CSVs from GSC UI (screenshots/steps)
|
|
26
|
+
- [ ] Auto-detect existing GSC data in `gsc/` folder
|
|
27
|
+
- [ ] GSC API integration (service account JSON key upload)
|
|
28
|
+
- [ ] Auto-fetch GSC data on schedule (replaces manual CSV)
|
|
29
|
+
|
|
30
|
+
## Priority 2 — Ollama Auto-Install
|
|
31
|
+
**Status: 📋 Planned**
|
|
32
|
+
|
|
33
|
+
If Ollama isn't found, offer to install it instead of just warning.
|
|
34
|
+
|
|
35
|
+
- [ ] macOS: `brew install ollama` or direct download
|
|
36
|
+
- [ ] Linux: `curl -fsSL https://ollama.com/install.sh | sh`
|
|
37
|
+
- [ ] Windows: direct user to installer URL
|
|
38
|
+
- [ ] Auto-start Ollama after install
|
|
39
|
+
- [ ] Auto-pull recommended model after install
|
|
40
|
+
|
|
41
|
+
## Priority 3 — Scheduling / Automation
|
|
42
|
+
**Status: 📋 Planned**
|
|
43
|
+
|
|
44
|
+
After setup, users need recurring crawls. "Set and forget."
|
|
45
|
+
|
|
46
|
+
- [ ] "Schedule weekly crawl?" step in wizard
|
|
47
|
+
- [ ] Cron job generator (macOS launchd / Linux cron / Windows Task Scheduler)
|
|
48
|
+
- [ ] Built-in scheduler (node-cron or setTimeout loop in server.js)
|
|
49
|
+
- [ ] Crawl → Extract → Analyze → Regenerate dashboard pipeline
|
|
50
|
+
- [ ] "Last run" / "Next run" display on dashboard
|
|
51
|
+
|
|
52
|
+
## Priority 4 — First Run Experience
|
|
53
|
+
**Status: 📋 Planned**
|
|
54
|
+
|
|
55
|
+
Don't just show CLI commands — offer to run the first crawl right there.
|
|
56
|
+
|
|
57
|
+
- [ ] "Run your first crawl now?" button on Step 5
|
|
58
|
+
- [ ] SSE progress stream showing crawl progress in real-time
|
|
59
|
+
- [ ] Auto-trigger extraction + analysis after crawl
|
|
60
|
+
- [ ] Redirect to dashboard when done
|
|
61
|
+
- [ ] Estimated time based on competitor count × pages per domain
|
|
62
|
+
|
|
63
|
+
## Priority 5 — Proxy & Rate Limiting
|
|
64
|
+
**Status: 📋 Planned**
|
|
65
|
+
|
|
66
|
+
Stealth mode users need proxy config to avoid blocks.
|
|
67
|
+
|
|
68
|
+
- [ ] Proxy URL input (HTTP/SOCKS5)
|
|
69
|
+
- [ ] Proxy rotation list upload
|
|
70
|
+
- [ ] Rate limit slider (requests/minute)
|
|
71
|
+
- [ ] Per-domain delay configuration
|
|
72
|
+
- [ ] "Test proxy" validation step
|
|
73
|
+
|
|
74
|
+
## Priority 6 — Notifications
|
|
75
|
+
**Status: 📋 Planned**
|
|
76
|
+
|
|
77
|
+
Know when things happen without checking manually.
|
|
78
|
+
|
|
79
|
+
- [ ] Email notifications (SMTP setup in wizard)
|
|
80
|
+
- [ ] Slack webhook integration
|
|
81
|
+
- [ ] Discord webhook integration
|
|
82
|
+
- [ ] Configurable triggers: crawl complete, ranking drop, new competitor page
|
|
83
|
+
- [ ] Weekly digest email with key metrics
|
|
84
|
+
|
|
85
|
+
## Priority 7 — Data & Backup
|
|
86
|
+
**Status: 📋 Planned**
|
|
87
|
+
|
|
88
|
+
Where data lives, how big it gets, how to manage it.
|
|
89
|
+
|
|
90
|
+
- [ ] Show data directory + size in dashboard footer
|
|
91
|
+
- [ ] One-click export (SQLite → JSON/CSV)
|
|
92
|
+
- [ ] Auto-backup before major operations
|
|
93
|
+
- [ ] Data retention settings (keep last N crawls)
|
|
94
|
+
- [ ] Cloud backup option (S3/GCS)
|
|
95
|
+
|
|
96
|
+
---
|
|
97
|
+
|
|
98
|
+
## Open Source → Product Progression
|
|
99
|
+
|
|
100
|
+
| Feature | Open Source (froggo.pro) | Standalone SaaS |
|
|
101
|
+
|---------|------------------------|-----------------|
|
|
102
|
+
| Setup | CLI wizard | Web wizard + onboarding email |
|
|
103
|
+
| Auth | None (local) | User accounts + API keys |
|
|
104
|
+
| GSC | Manual CSV or API key | OAuth "Connect GSC" button |
|
|
105
|
+
| Scheduling | Cron jobs | Built-in + hosted workers |
|
|
106
|
+
| Notifications | Webhook only | Email + Slack + in-app |
|
|
107
|
+
| Data | Local SQLite | Cloud DB + CDN dashboards |
|
|
108
|
+
| Multi-user | Single | Teams + permissions |
|
|
109
|
+
| Billing | Free / one-time | Subscription tiers |
|
package/setup/checks.js
ADDED
|
@@ -0,0 +1,483 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* SEO Intel — System Detection
|
|
3
|
+
*
|
|
4
|
+
* Stateless check functions that detect installed software,
|
|
5
|
+
* available models, and environment configuration.
|
|
6
|
+
* Used by both CLI wizard and web setup wizard.
|
|
7
|
+
*/
|
|
8
|
+
|
|
9
|
+
import { execSync, spawnSync } from 'child_process';
|
|
10
|
+
import { existsSync, readFileSync, readdirSync } from 'fs';
|
|
11
|
+
import { join, dirname } from 'path';
|
|
12
|
+
import { fileURLToPath } from 'url';
|
|
13
|
+
|
|
14
|
+
const __dirname = dirname(fileURLToPath(import.meta.url));
|
|
15
|
+
const ROOT = join(__dirname, '..');
|
|
16
|
+
|
|
17
|
+
// ── Node.js ────────────────────────────────────────────────────────────────
|
|
18
|
+
|
|
19
|
+
export function checkNodeVersion() {
|
|
20
|
+
try {
|
|
21
|
+
const version = process.version; // e.g. 'v20.11.0'
|
|
22
|
+
const major = parseInt(version.slice(1).split('.')[0], 10);
|
|
23
|
+
return { installed: true, version, major, meetsMinimum: major >= 18 };
|
|
24
|
+
} catch {
|
|
25
|
+
return { installed: false, version: null, major: 0, meetsMinimum: false };
|
|
26
|
+
}
|
|
27
|
+
}
|
|
28
|
+
|
|
29
|
+
// ── npm ─────────────────────────────────────────────────────────────────────
|
|
30
|
+
|
|
31
|
+
export function checkNpm() {
|
|
32
|
+
try {
|
|
33
|
+
const version = execSync('npm --version', { encoding: 'utf8', timeout: 5000 }).trim();
|
|
34
|
+
return { installed: true, version };
|
|
35
|
+
} catch {
|
|
36
|
+
return { installed: false, version: null };
|
|
37
|
+
}
|
|
38
|
+
}
|
|
39
|
+
|
|
40
|
+
// ── Ollama (local) ──────────────────────────────────────────────────────────
|
|
41
|
+
|
|
42
|
+
export function checkOllamaLocal() {
|
|
43
|
+
const installed = commandExists('ollama');
|
|
44
|
+
if (!installed) return { installed: false, running: false, models: [], host: null };
|
|
45
|
+
|
|
46
|
+
try {
|
|
47
|
+
const out = execSync('ollama list 2>/dev/null', { encoding: 'utf8', timeout: 5000 });
|
|
48
|
+
const models = out.split('\n')
|
|
49
|
+
.slice(1)
|
|
50
|
+
.map(l => l.split(/\s+/)[0])
|
|
51
|
+
.filter(Boolean)
|
|
52
|
+
.filter(m => m !== 'NAME');
|
|
53
|
+
return { installed: true, running: true, models, host: 'http://localhost:11434' };
|
|
54
|
+
} catch {
|
|
55
|
+
return { installed: true, running: false, models: [], host: 'http://localhost:11434' };
|
|
56
|
+
}
|
|
57
|
+
}
|
|
58
|
+
|
|
59
|
+
// ── Ollama (remote host) ────────────────────────────────────────────────────
|
|
60
|
+
|
|
61
|
+
export async function checkOllamaRemote(host) {
|
|
62
|
+
try {
|
|
63
|
+
const controller = new AbortController();
|
|
64
|
+
const timeout = setTimeout(() => controller.abort(), 3000);
|
|
65
|
+
const res = await fetch(`${host}/api/tags`, { signal: controller.signal });
|
|
66
|
+
clearTimeout(timeout);
|
|
67
|
+
|
|
68
|
+
if (!res.ok) return { reachable: false, models: [], host };
|
|
69
|
+
const data = await res.json();
|
|
70
|
+
const models = (data.models || []).map(m => m.name || m.model).filter(Boolean);
|
|
71
|
+
return { reachable: true, models, host };
|
|
72
|
+
} catch {
|
|
73
|
+
return { reachable: false, models: [], host };
|
|
74
|
+
}
|
|
75
|
+
}
|
|
76
|
+
|
|
77
|
+
// ── Ollama auto-detect (local → custom hosts) ──────────────────────────────
|
|
78
|
+
|
|
79
|
+
export async function checkOllamaAuto(customHosts = []) {
|
|
80
|
+
// 1. Try local
|
|
81
|
+
const local = checkOllamaLocal();
|
|
82
|
+
if (local.running && local.models.length > 0) {
|
|
83
|
+
return {
|
|
84
|
+
available: true,
|
|
85
|
+
mode: 'local',
|
|
86
|
+
host: local.host,
|
|
87
|
+
models: local.models,
|
|
88
|
+
installed: local.installed,
|
|
89
|
+
};
|
|
90
|
+
}
|
|
91
|
+
|
|
92
|
+
// 2. Try custom/LAN hosts
|
|
93
|
+
for (const host of customHosts) {
|
|
94
|
+
const remote = await checkOllamaRemote(host);
|
|
95
|
+
if (remote.reachable && remote.models.length > 0) {
|
|
96
|
+
return {
|
|
97
|
+
available: true,
|
|
98
|
+
mode: 'remote',
|
|
99
|
+
host: remote.host,
|
|
100
|
+
models: remote.models,
|
|
101
|
+
installed: local.installed,
|
|
102
|
+
};
|
|
103
|
+
}
|
|
104
|
+
}
|
|
105
|
+
|
|
106
|
+
// 3. Local installed but not running or no models
|
|
107
|
+
if (local.installed) {
|
|
108
|
+
return {
|
|
109
|
+
available: false,
|
|
110
|
+
mode: 'installed-not-ready',
|
|
111
|
+
host: local.host,
|
|
112
|
+
models: [],
|
|
113
|
+
installed: true,
|
|
114
|
+
};
|
|
115
|
+
}
|
|
116
|
+
|
|
117
|
+
return {
|
|
118
|
+
available: false,
|
|
119
|
+
mode: 'none',
|
|
120
|
+
host: null,
|
|
121
|
+
models: [],
|
|
122
|
+
installed: false,
|
|
123
|
+
};
|
|
124
|
+
}
|
|
125
|
+
|
|
126
|
+
// ── Playwright ──────────────────────────────────────────────────────────────
|
|
127
|
+
|
|
128
|
+
export function checkPlaywright() {
|
|
129
|
+
const pkgPath = join(ROOT, 'node_modules', 'playwright');
|
|
130
|
+
const installed = existsSync(pkgPath);
|
|
131
|
+
|
|
132
|
+
if (!installed) return { installed: false, chromiumReady: false };
|
|
133
|
+
|
|
134
|
+
// Check if Chromium binary is actually available
|
|
135
|
+
let chromiumReady = false;
|
|
136
|
+
try {
|
|
137
|
+
// Playwright stores browser paths in its package
|
|
138
|
+
const browserPath = join(pkgPath, '.local-browsers');
|
|
139
|
+
if (existsSync(browserPath)) {
|
|
140
|
+
const browsers = readdirSync(browserPath);
|
|
141
|
+
chromiumReady = browsers.some(b => b.toLowerCase().includes('chromium'));
|
|
142
|
+
}
|
|
143
|
+
// Alternative check: playwright's own registry
|
|
144
|
+
if (!chromiumReady) {
|
|
145
|
+
const result = spawnSync('npx', ['playwright', 'install', '--dry-run'], {
|
|
146
|
+
encoding: 'utf8',
|
|
147
|
+
timeout: 10000,
|
|
148
|
+
stdio: ['pipe', 'pipe', 'pipe'],
|
|
149
|
+
});
|
|
150
|
+
// If dry-run shows chromium is already installed, it's ready
|
|
151
|
+
chromiumReady = result.status === 0 && !result.stdout.includes('chromium');
|
|
152
|
+
}
|
|
153
|
+
} catch {
|
|
154
|
+
// If we can't determine, assume it needs install
|
|
155
|
+
chromiumReady = false;
|
|
156
|
+
}
|
|
157
|
+
|
|
158
|
+
return { installed, chromiumReady };
|
|
159
|
+
}
|
|
160
|
+
|
|
161
|
+
// ── npm dependencies ────────────────────────────────────────────────────────
|
|
162
|
+
|
|
163
|
+
export function checkNpmDeps() {
|
|
164
|
+
const nodeModules = join(ROOT, 'node_modules');
|
|
165
|
+
if (!existsSync(nodeModules)) return { installed: false, missing: [] };
|
|
166
|
+
|
|
167
|
+
const pkgPath = join(ROOT, 'package.json');
|
|
168
|
+
if (!existsSync(pkgPath)) return { installed: false, missing: [] };
|
|
169
|
+
|
|
170
|
+
try {
|
|
171
|
+
const pkg = JSON.parse(readFileSync(pkgPath, 'utf8'));
|
|
172
|
+
const deps = Object.keys(pkg.dependencies || {});
|
|
173
|
+
const missing = deps.filter(d => !existsSync(join(nodeModules, d)));
|
|
174
|
+
return { installed: missing.length === 0, missing };
|
|
175
|
+
} catch {
|
|
176
|
+
return { installed: false, missing: [] };
|
|
177
|
+
}
|
|
178
|
+
}
|
|
179
|
+
|
|
180
|
+
// ── .env file ───────────────────────────────────────────────────────────────
|
|
181
|
+
|
|
182
|
+
export function checkEnvFile() {
|
|
183
|
+
const envPath = join(ROOT, '.env');
|
|
184
|
+
if (!existsSync(envPath)) {
|
|
185
|
+
return {
|
|
186
|
+
exists: false,
|
|
187
|
+
keys: {},
|
|
188
|
+
raw: {},
|
|
189
|
+
};
|
|
190
|
+
}
|
|
191
|
+
|
|
192
|
+
const raw = parseEnvFile(envPath);
|
|
193
|
+
return {
|
|
194
|
+
exists: true,
|
|
195
|
+
keys: {
|
|
196
|
+
GEMINI_API_KEY: !!raw.GEMINI_API_KEY,
|
|
197
|
+
ANTHROPIC_API_KEY: !!raw.ANTHROPIC_API_KEY,
|
|
198
|
+
OPENAI_API_KEY: !!raw.OPENAI_API_KEY,
|
|
199
|
+
OLLAMA_URL: raw.OLLAMA_URL || null,
|
|
200
|
+
OLLAMA_MODEL: raw.OLLAMA_MODEL || null,
|
|
201
|
+
OLLAMA_CTX: raw.OLLAMA_CTX || null,
|
|
202
|
+
CRAWL_MAX_PAGES: raw.CRAWL_MAX_PAGES || null,
|
|
203
|
+
},
|
|
204
|
+
raw,
|
|
205
|
+
};
|
|
206
|
+
}
|
|
207
|
+
|
|
208
|
+
// ── Existing project configs ────────────────────────────────────────────────
|
|
209
|
+
|
|
210
|
+
export function checkExistingConfigs() {
|
|
211
|
+
const configDir = join(ROOT, 'config');
|
|
212
|
+
if (!existsSync(configDir)) return { configs: [] };
|
|
213
|
+
|
|
214
|
+
try {
|
|
215
|
+
const files = readdirSync(configDir).filter(f => f.endsWith('.json'));
|
|
216
|
+
const configs = files.map(f => {
|
|
217
|
+
try {
|
|
218
|
+
const data = JSON.parse(readFileSync(join(configDir, f), 'utf8'));
|
|
219
|
+
return {
|
|
220
|
+
project: data.project || f.replace('.json', ''),
|
|
221
|
+
domain: data.target?.domain || '',
|
|
222
|
+
competitors: (data.competitors || []).length,
|
|
223
|
+
path: join(configDir, f),
|
|
224
|
+
};
|
|
225
|
+
} catch {
|
|
226
|
+
return null;
|
|
227
|
+
}
|
|
228
|
+
}).filter(Boolean);
|
|
229
|
+
|
|
230
|
+
return { configs };
|
|
231
|
+
} catch {
|
|
232
|
+
return { configs: [] };
|
|
233
|
+
}
|
|
234
|
+
}
|
|
235
|
+
|
|
236
|
+
// ── OS Detection ────────────────────────────────────────────────────────────
|
|
237
|
+
|
|
238
|
+
export function detectOS() {
|
|
239
|
+
const platform = process.platform === 'darwin' ? 'macos'
|
|
240
|
+
: process.platform === 'win32' ? 'windows'
|
|
241
|
+
: 'linux';
|
|
242
|
+
return { platform, arch: process.arch };
|
|
243
|
+
}
|
|
244
|
+
|
|
245
|
+
// ── VRAM Detection ──────────────────────────────────────────────────────────
|
|
246
|
+
|
|
247
|
+
export function detectVRAM() {
|
|
248
|
+
const os = detectOS();
|
|
249
|
+
|
|
250
|
+
// NVIDIA GPU (Linux / Windows)
|
|
251
|
+
try {
|
|
252
|
+
const out = execSync(
|
|
253
|
+
'nvidia-smi --query-gpu=name,memory.total --format=csv,noheader,nounits 2>/dev/null',
|
|
254
|
+
{ encoding: 'utf8', timeout: 5000 }
|
|
255
|
+
);
|
|
256
|
+
const lines = out.trim().split('\n').filter(Boolean);
|
|
257
|
+
if (lines.length > 0) {
|
|
258
|
+
const parts = lines[0].split(',').map(s => s.trim());
|
|
259
|
+
const gpuName = parts[0];
|
|
260
|
+
const vramMB = parseInt(parts[1], 10) || 0;
|
|
261
|
+
return { available: true, gpuName, vramMB, source: 'nvidia-smi' };
|
|
262
|
+
}
|
|
263
|
+
} catch {}
|
|
264
|
+
|
|
265
|
+
// macOS Metal GPU
|
|
266
|
+
if (os.platform === 'macos') {
|
|
267
|
+
try {
|
|
268
|
+
const out = execSync(
|
|
269
|
+
'system_profiler SPDisplaysDataType 2>/dev/null',
|
|
270
|
+
{ encoding: 'utf8', timeout: 5000 }
|
|
271
|
+
);
|
|
272
|
+
|
|
273
|
+
// Extract GPU name
|
|
274
|
+
const nameMatch = out.match(/Chipset Model:\s*(.+)/i) || out.match(/Chip:\s*(.+)/i);
|
|
275
|
+
const gpuName = nameMatch ? nameMatch[1].trim() : 'Unknown';
|
|
276
|
+
|
|
277
|
+
// Extract VRAM — Apple Silicon uses unified memory
|
|
278
|
+
const vramMatch = out.match(/VRAM.*?:\s*(\d+)\s*(MB|GB)/i);
|
|
279
|
+
if (vramMatch) {
|
|
280
|
+
const val = parseInt(vramMatch[1], 10);
|
|
281
|
+
const vramMB = vramMatch[2].toUpperCase() === 'GB' ? val * 1024 : val;
|
|
282
|
+
return { available: true, gpuName, vramMB, source: 'system_profiler' };
|
|
283
|
+
}
|
|
284
|
+
|
|
285
|
+
// Apple Silicon: use total system memory as proxy (shared with GPU)
|
|
286
|
+
const memMatch = execSync('sysctl -n hw.memsize 2>/dev/null', { encoding: 'utf8', timeout: 3000 });
|
|
287
|
+
const totalBytes = parseInt(memMatch.trim(), 10);
|
|
288
|
+
if (totalBytes > 0) {
|
|
289
|
+
// Apple Silicon can use ~75% of system RAM for GPU
|
|
290
|
+
const vramMB = Math.floor((totalBytes / 1024 / 1024) * 0.75);
|
|
291
|
+
return { available: true, gpuName, vramMB, source: 'apple-silicon-unified' };
|
|
292
|
+
}
|
|
293
|
+
} catch {}
|
|
294
|
+
}
|
|
295
|
+
|
|
296
|
+
// AMD GPU (Linux)
|
|
297
|
+
try {
|
|
298
|
+
const out = execSync('rocm-smi --showmeminfo vram 2>/dev/null', { encoding: 'utf8', timeout: 5000 });
|
|
299
|
+
const match = out.match(/Total.*?(\d+)\s*MB/i);
|
|
300
|
+
if (match) {
|
|
301
|
+
return { available: true, gpuName: 'AMD GPU', vramMB: parseInt(match[1], 10), source: 'rocm-smi' };
|
|
302
|
+
}
|
|
303
|
+
} catch {}
|
|
304
|
+
|
|
305
|
+
return { available: false, gpuName: null, vramMB: 0, source: 'none' };
|
|
306
|
+
}
|
|
307
|
+
|
|
308
|
+
// ── Google Search Console data ───────────────────────────────────────────
|
|
309
|
+
|
|
310
|
+
export function checkGscData(project) {
|
|
311
|
+
const gscDir = join(ROOT, 'gsc');
|
|
312
|
+
if (!existsSync(gscDir)) return { hasData: false, folders: [], project };
|
|
313
|
+
|
|
314
|
+
try {
|
|
315
|
+
const allFolders = readdirSync(gscDir).filter(f => !f.startsWith('.'));
|
|
316
|
+
|
|
317
|
+
// If project specified, filter to matching folders
|
|
318
|
+
const folders = project
|
|
319
|
+
? allFolders.filter(f => f.toLowerCase().startsWith(project.toLowerCase()))
|
|
320
|
+
: allFolders;
|
|
321
|
+
|
|
322
|
+
if (folders.length === 0) return { hasData: false, folders: allFolders, project };
|
|
323
|
+
|
|
324
|
+
// Check what CSV files exist in the latest folder
|
|
325
|
+
const latest = folders.sort().pop();
|
|
326
|
+
const folderPath = join(gscDir, latest);
|
|
327
|
+
const expectedFiles = ['Chart.csv', 'Queries.csv', 'Pages.csv', 'Countries.csv', 'Devices.csv'];
|
|
328
|
+
const found = [];
|
|
329
|
+
const missing = [];
|
|
330
|
+
|
|
331
|
+
for (const f of expectedFiles) {
|
|
332
|
+
if (existsSync(join(folderPath, f))) {
|
|
333
|
+
found.push(f);
|
|
334
|
+
} else {
|
|
335
|
+
missing.push(f);
|
|
336
|
+
}
|
|
337
|
+
}
|
|
338
|
+
|
|
339
|
+
return {
|
|
340
|
+
hasData: found.length >= 2, // At minimum Chart + Queries
|
|
341
|
+
folder: latest,
|
|
342
|
+
folderPath,
|
|
343
|
+
found,
|
|
344
|
+
missing,
|
|
345
|
+
allFolders,
|
|
346
|
+
project,
|
|
347
|
+
};
|
|
348
|
+
} catch {
|
|
349
|
+
return { hasData: false, folders: [], project };
|
|
350
|
+
}
|
|
351
|
+
}
|
|
352
|
+
|
|
353
|
+
// ── Full System Check ───────────────────────────────────────────────────────
|
|
354
|
+
|
|
355
|
+
export async function fullSystemCheck(options = {}) {
|
|
356
|
+
const { customOllamaHosts = [] } = options;
|
|
357
|
+
|
|
358
|
+
const { project } = options;
|
|
359
|
+
|
|
360
|
+
const [node, npm, ollama, playwright, npmDeps, env, configs, os, vram, gsc] = await Promise.all([
|
|
361
|
+
checkNodeVersion(),
|
|
362
|
+
checkNpm(),
|
|
363
|
+
checkOllamaAuto(customOllamaHosts),
|
|
364
|
+
checkPlaywright(),
|
|
365
|
+
checkNpmDeps(),
|
|
366
|
+
checkEnvFile(),
|
|
367
|
+
checkExistingConfigs(),
|
|
368
|
+
detectOS(),
|
|
369
|
+
detectVRAM(),
|
|
370
|
+
checkGscData(project),
|
|
371
|
+
]);
|
|
372
|
+
|
|
373
|
+
// OpenClaw detection (sync, fast)
|
|
374
|
+
const openclaw = checkOpenClaw();
|
|
375
|
+
|
|
376
|
+
const ready = node.meetsMinimum && npm.installed;
|
|
377
|
+
const hasAnalysisKey = env.keys.GEMINI_API_KEY || env.keys.ANTHROPIC_API_KEY || env.keys.OPENAI_API_KEY;
|
|
378
|
+
|
|
379
|
+
return {
|
|
380
|
+
node,
|
|
381
|
+
npm,
|
|
382
|
+
ollama,
|
|
383
|
+
playwright,
|
|
384
|
+
npmDeps,
|
|
385
|
+
env,
|
|
386
|
+
configs,
|
|
387
|
+
os,
|
|
388
|
+
vram,
|
|
389
|
+
gsc,
|
|
390
|
+
openclaw,
|
|
391
|
+
ready,
|
|
392
|
+
hasAnalysisKey,
|
|
393
|
+
summary: {
|
|
394
|
+
canCrawl: node.meetsMinimum && playwright.installed,
|
|
395
|
+
canExtract: ollama.available,
|
|
396
|
+
canAnalyze: hasAnalysisKey,
|
|
397
|
+
canGenerateHtml: node.meetsMinimum,
|
|
398
|
+
hasGscData: gsc.hasData,
|
|
399
|
+
hasOpenClaw: openclaw.installed,
|
|
400
|
+
canAgentSetup: openclaw.canAgentSetup,
|
|
401
|
+
},
|
|
402
|
+
};
|
|
403
|
+
}
|
|
404
|
+
|
|
405
|
+
// ── Helpers (private) ───────────────────────────────────────────────────────
|
|
406
|
+
|
|
407
|
+
function commandExists(cmd) {
|
|
408
|
+
try {
|
|
409
|
+
execSync(`which ${cmd} 2>/dev/null`, { stdio: 'ignore', timeout: 3000 });
|
|
410
|
+
return true;
|
|
411
|
+
} catch {
|
|
412
|
+
return false;
|
|
413
|
+
}
|
|
414
|
+
}
|
|
415
|
+
|
|
416
|
+
// ── OpenClaw Detection ──────────────────────────────────────────────────────
|
|
417
|
+
|
|
418
|
+
/**
|
|
419
|
+
* Detect OpenClaw installation and capabilities.
|
|
420
|
+
* Returns info about the gateway, available models, and agent readiness.
|
|
421
|
+
*/
|
|
422
|
+
export function checkOpenClaw() {
|
|
423
|
+
const result = {
|
|
424
|
+
installed: false,
|
|
425
|
+
version: null,
|
|
426
|
+
gatewayRunning: false,
|
|
427
|
+
gatewayUrl: 'ws://127.0.0.1:18789',
|
|
428
|
+
apiUrl: 'http://127.0.0.1:18789',
|
|
429
|
+
hasSkillsDir: false,
|
|
430
|
+
skillsPath: null,
|
|
431
|
+
canAgentSetup: false,
|
|
432
|
+
};
|
|
433
|
+
|
|
434
|
+
// 1. Check if openclaw binary exists
|
|
435
|
+
if (!commandExists('openclaw')) return result;
|
|
436
|
+
result.installed = true;
|
|
437
|
+
|
|
438
|
+
// 2. Get version
|
|
439
|
+
try {
|
|
440
|
+
const ver = execSync('openclaw --version 2>/dev/null', { timeout: 5000 }).toString().trim();
|
|
441
|
+
const match = ver.match(/OpenClaw\s+([\d.]+)/);
|
|
442
|
+
result.version = match ? match[1] : ver;
|
|
443
|
+
} catch { /* ok */ }
|
|
444
|
+
|
|
445
|
+
// 3. Check if gateway is running (quick HTTP ping)
|
|
446
|
+
try {
|
|
447
|
+
execSync('curl -s --max-time 2 http://127.0.0.1:18789/v1/models >/dev/null 2>&1', { timeout: 5000 });
|
|
448
|
+
result.gatewayRunning = true;
|
|
449
|
+
} catch {
|
|
450
|
+
result.gatewayRunning = false;
|
|
451
|
+
}
|
|
452
|
+
|
|
453
|
+
// 4. Check skills directory
|
|
454
|
+
const homeDir = process.env.HOME || process.env.USERPROFILE;
|
|
455
|
+
const possiblePaths = [
|
|
456
|
+
join(homeDir, '.openclaw', 'skills'),
|
|
457
|
+
join(homeDir, '.openclaw', 'managed-skills'),
|
|
458
|
+
];
|
|
459
|
+
|
|
460
|
+
for (const p of possiblePaths) {
|
|
461
|
+
if (existsSync(p)) {
|
|
462
|
+
result.hasSkillsDir = true;
|
|
463
|
+
result.skillsPath = p;
|
|
464
|
+
break;
|
|
465
|
+
}
|
|
466
|
+
}
|
|
467
|
+
|
|
468
|
+
// Agent setup is possible if gateway is running
|
|
469
|
+
result.canAgentSetup = result.gatewayRunning;
|
|
470
|
+
|
|
471
|
+
return result;
|
|
472
|
+
}
|
|
473
|
+
|
|
474
|
+
export function parseEnvFile(envPath) {
|
|
475
|
+
if (!existsSync(envPath)) return {};
|
|
476
|
+
const lines = readFileSync(envPath, 'utf8').split('\n');
|
|
477
|
+
const env = {};
|
|
478
|
+
for (const line of lines) {
|
|
479
|
+
const m = line.match(/^([^#=]+)=(.*)$/);
|
|
480
|
+
if (m) env[m[1].trim()] = m[2].trim().replace(/^["']|["']$/g, '');
|
|
481
|
+
}
|
|
482
|
+
return env;
|
|
483
|
+
}
|