subto 4.0.0 → 6.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -14,3 +14,30 @@ subto scan <url> # request a scan
14
14
  ```
15
15
 
16
16
  This package is a production CLI; it intentionally omits development instructions.
17
+
18
+ Advanced features
19
+ -----------------
20
+
21
+ - `subto upload [dir]` — upload your project directory to the built-in AI analyzer. The command samples files from the target directory, respecting a `.subtoignore` file (one pattern per line, `#` for comments). For safety the uploader always ignores `.env` files. The uploader limits total files and bytes to avoid extremely large uploads; see CLI flags `--max-files` and `--max-bytes`.
22
+
23
+ - `subto scan upload [dir]` — upload a directory to the server and request a scan. This command respects `.subtoignore` and always ignores `.env`. It uploads sampled file snippets (small files fully, larger files head only) and returns an `uploadId` and `scanId` you can use with the AI chat or to fetch scan status.
24
+
25
+ - `subto upload [dir]` — run a local-only AI analysis on the sampled files (does not send files to the server). Useful when you want quick on-device feedback without uploading.
26
+
27
+ `.subtoignore` format
28
+ - One pattern per line.
29
+ - Lines starting with `#` are comments.
30
+ - Patterns may be folder names (e.g. `node_modules`), file globs/partials (e.g. `*.lock`), or specific files (`secret.txt`). The uploader uses simple matching: exact token, prefix, contains `/token`, or endsWith token. Examples:
31
+
32
+ ```
33
+ # ignore node modules and build artifacts
34
+ node_modules
35
+ dist
36
+ *.lock
37
+ # ignore specific file
38
+ secret.txt
39
+ ```
40
+
41
+ - AI provider: The CLI prefers `OPENAI_API_KEY` / `AI_API_KEY` for OpenAI calls; if not present it will fall back to `OPENROUTER_API_KEY`. To pick a specific model set `AI_MODEL` (example: `openai/gpt-oss-120b:free` or `gpt-4o-mini`).
42
+
43
+ Security note: Do not commit secrets. If sensitive keys are accidentally present, rotate them immediately. The `subto upload` tool attempts to avoid printing secret values; it will report their presence and recommend remediation.
@@ -40,7 +40,7 @@ Commands
40
40
  {
41
41
  "url": "https://example.com",
42
42
  "source": "cli",
43
- "client": { "name": "subto-cli", "version": "4.0.0" }
43
+ "client": { "name": "subto-cli", "version": "5.0.0" }
44
44
  }
45
45
  ```
46
46
 
@@ -98,11 +98,11 @@ Download
98
98
  After publishing or packing, a distributable tarball will be available under `./dist/` e.g.:
99
99
 
100
100
  ```
101
- ./dist/subto-4.0.0.tgz
101
+ ./dist/subto-5.0.0.tgz
102
102
  ```
103
103
 
104
104
  You can download that file directly and install locally with:
105
105
 
106
106
  ```bash
107
- npm install -g ./dist/subto-4.0.0.tgz
107
+ npm install -g ./dist/subto-5.0.0.tgz
108
108
  ```
@@ -11,7 +11,7 @@ const chalk = (_chalk && _chalk.default) ? _chalk.default : _chalk;
11
11
  const CONFIG_DIR = path.join(os.homedir(), '.subto');
12
12
  const CONFIG_PATH = path.join(CONFIG_DIR, 'config.json');
13
13
  const DEFAULT_API_BASE = 'https://subto.one';
14
- const CLIENT_META = { name: 'subto-cli', version: '4.0.0' };
14
+ const CLIENT_META = { name: 'subto-cli', version: '5.0.0' };
15
15
 
16
16
  function configFilePath() { return CONFIG_PATH; }
17
17
 
@@ -91,7 +91,7 @@ function printScanSummary(obj) {
91
91
 
92
92
  async function run(argv) {
93
93
  const program = new Command();
94
- program.name('subto').description('Subto CLI — wrapper around Subto.One API').version(CLIENT_META.version || '4.0.0');
94
+ program.name('subto').description('Subto CLI — wrapper around Subto.One API').version(CLIENT_META.version || '5.0.0');
95
95
 
96
96
  program.command('login').description('Store your API key in ~/.subto/config.json').action(async () => {
97
97
  try {
package/index.js CHANGED
@@ -11,7 +11,22 @@ const chalk = (_chalk && _chalk.default) ? _chalk.default : _chalk;
11
11
  const CONFIG_DIR = path.join(os.homedir(), '.subto');
12
12
  const CONFIG_PATH = path.join(CONFIG_DIR, 'config.json');
13
13
  const DEFAULT_API_BASE = 'https://subto.one';
14
- const CLIENT_META = { name: 'subto-cli', version: '4.0.0' };
14
+ const CLIENT_META = { name: 'subto-cli', version: '5.0.0' };
15
+ const cp = require('child_process');
16
+
17
+ // Load local CLI .env if present (safe, optional)
18
+ try {
19
+ const dotenvPath = path.join(__dirname, '.env');
20
+ const fsSync = require('fs');
21
+ if (fsSync.existsSync(dotenvPath)) {
22
+ try {
23
+ const dotenv = require('dotenv');
24
+ dotenv.config({ path: dotenvPath });
25
+ } catch (e) {
26
+ // dotenv not installed in this environment; ignore silently
27
+ }
28
+ }
29
+ } catch (e) { /* non-fatal */ }
15
30
 
16
31
  function configFilePath() { return CONFIG_PATH; }
17
32
 
@@ -77,6 +92,18 @@ async function postScan(url, apiKey) {
77
92
  return { status: res.status, headers: res.headers, body: data };
78
93
  }
79
94
 
95
+ function extractScanIdFromHtml(html) {
96
+ if (!html || typeof html !== 'string') return null;
97
+ // common patterns: /scan/ID or scanId="..." or data-scan-id="..."
98
+ const m1 = html.match(/\/scan\/(?:id\/)?([a-zA-Z0-9_-]{8,})/i);
99
+ if (m1 && m1[1]) return m1[1];
100
+ const m2 = html.match(/scanId["'\s:=]+([a-zA-Z0-9_-]{8,})/i);
101
+ if (m2 && m2[1]) return m2[1];
102
+ const m3 = html.match(/data-scan-id["'\s=:\>]+([a-zA-Z0-9_-]{8,})/i);
103
+ if (m3 && m3[1]) return m3[1];
104
+ return null;
105
+ }
106
+
80
107
  function printScanSummary(obj) {
81
108
  if (!obj || typeof obj !== 'object') { console.log(obj); return; }
82
109
  console.log(chalk.bold('Scan result:'));
@@ -89,12 +116,12 @@ function printScanSummary(obj) {
89
116
  if (keys.length) console.log(chalk.dim(' Additional keys: ' + keys.join(', ')));
90
117
  }
91
118
 
92
- function printFullReport(data) {
119
+ async function printFullReport(data) {
93
120
  const scan = (data && data.results) ? data.results : data;
94
121
  if (!scan || typeof scan !== 'object') { console.log(JSON.stringify(data, null, 2)); return; }
95
122
 
96
123
  const lh = scan.lighthouse || scan.lhr || (scan.results && scan.results.lighthouse) || null;
97
- const vt = scan.virustotal || (scan.results && scan.results.virustotal) || null;
124
+ // VirusTotal reporting removed from API responses; omit vt variable.
98
125
  const timings = scan.timings || scan.coreWebVitals || (scan.results && scan.results.timings) || null;
99
126
 
100
127
  console.log(chalk.bold.underline('\nOverview'));
@@ -154,29 +181,218 @@ function printFullReport(data) {
154
181
  console.log(' No issues reported');
155
182
  }
156
183
 
157
- // Malware / VirusTotal
158
- if (vt) {
159
- console.log(chalk.bold.underline('\nMalware Scan (VirusTotal)'));
160
- if (vt.scanDate) console.log(' Scan Date:', vt.scanDate);
161
- console.log(' Harmless:', vt.harmless ?? vt.undetected ?? '--');
162
- console.log(' Malicious:', vt.malicious ?? '--');
163
- console.log(' Suspicious:', vt.suspicious ?? '--');
164
- if (vt.resultUrl) console.log(' View Full VirusTotal Report:', vt.resultUrl.replace('https://www.virustotal.com/gui/url/','https://www.virustotal.com/gui/url/'));
165
- }
184
+ // Malware reporting via VirusTotal has been disabled in API responses.
166
185
 
167
186
  // Video link
168
187
  if (scan.videoUrl || scan.videoPath || scan.hasVideo) {
169
188
  const vurl = scan.videoUrl || ((scan.videoPath) ? `${process.env.SUBTO_API_BASE_URL || DEFAULT_API_BASE}/video/${scan.scanId || scan.id}` : null);
170
- if (vurl) console.log(chalk.bold('\nSession video:'), chalk.cyan(vurl));
189
+ if (vurl) {
190
+ // attempt to HEAD the video URL to detect expiry / 404
191
+ let note = '';
192
+ try {
193
+ const fetchFn = global.fetch;
194
+ if (typeof fetchFn === 'function') {
195
+ const head = await fetchFn(vurl, { method: 'HEAD' });
196
+ if (head && head.status === 404) note = ' (not found / expired)';
197
+ }
198
+ } catch (e) { /* ignore network errors */ }
199
+ console.log(chalk.bold('\nSession video:'), chalk.cyan(vurl) + (note ? chalk.red(note) : ''));
200
+ console.log(chalk.dim(' Note: session videos are short-lived (≈1 day) and may return 404 after expiry.'));
201
+ }
171
202
  }
172
203
 
173
204
  // AI assistant hint
174
- console.log(chalk.dim('\nTo inspect full JSON output, run with --json'));
205
+ console.log(chalk.dim('\nTo inspect full JSON output, run with --json'));
206
+ console.log(chalk.dim('Start the built-in assistant with `--chat` to ask questions interactively.'));
207
+
208
+ // Helpful interactive links (browser-based) — point to server-side chat and video endpoints
209
+ try {
210
+ const base = (process.env.SUBTO_API_BASE_URL || DEFAULT_API_BASE).replace(/\/$/, '');
211
+ const id = scan.scanId || scan.id;
212
+ if (id) {
213
+ console.log(chalk.bold.underline('\nInteractive Links'));
214
+ console.log(' AI Chat:', chalk.cyan(`${base}/aichat/${id}`));
215
+ console.log(' Session Video:', chalk.cyan(`${base}/video/${id}`));
216
+ console.log(chalk.dim(' You can also open the local assistant with `--chat`'));
217
+ }
218
+ } catch (e) { /* no-op if env malformed */ }
219
+ }
220
+
221
+ function copyToClipboard(text){
222
+ try{
223
+ if (process.platform === 'darwin'){
224
+ const p = cp.spawnSync('pbcopy');
225
+ p.stdin && p.stdin.end(String(text));
226
+ return true;
227
+ }
228
+ if (process.platform === 'win32'){
229
+ const p = cp.spawnSync('clip'); p.stdin && p.stdin.end(String(text)); return true;
230
+ }
231
+ }catch(e){}
232
+ return false;
233
+ }
234
+
235
+ async function callOpenAI(prompt){
236
+ // Test hook: when SUBTO_LOCAL_AI=1, return a canned local response
237
+ if (process.env.SUBTO_LOCAL_AI === '1') {
238
+ return 'LOCAL_AI: sample analysis (dry-run) — 1 issue: Example secret detected; Recommendation: rotate.';
239
+ }
240
+ // Prefer OpenAI env vars, but also accept OpenRouter key from env or local config for compatibility.
241
+ const openaiKey = process.env.OPENAI_API_KEY || process.env.AI_API_KEY;
242
+ let openrouterKey = process.env.OPENROUTER_API_KEY;
243
+ let configuredModel = process.env.AI_MODEL || process.env.OPENROUTER_MODEL || null;
244
+ try {
245
+ const cfg = await readConfig();
246
+ if (cfg) {
247
+ if (!openrouterKey && cfg.openrouterKey) openrouterKey = cfg.openrouterKey;
248
+ if (!configuredModel && cfg.openrouterModel) configuredModel = cfg.openrouterModel;
249
+ if (!configuredModel && cfg.aiModel) configuredModel = cfg.aiModel;
250
+ }
251
+ } catch (e) { /* ignore config read errors */ }
252
+ const fetchFn = global.fetch;
253
+ if(typeof fetchFn !== 'function') throw new Error('Global fetch() is not available in this Node runtime. Use Node 18+');
254
+
255
+ // If OpenAI key present, call OpenAI API; otherwise, if OpenRouter key present, call OpenRouter endpoint.
256
+ if (openaiKey) {
257
+ const model = process.env.AI_MODEL || process.env.OPENAI_MODEL || 'gpt-4o-mini';
258
+ const body = {
259
+ model,
260
+ messages: [
261
+ { role: 'system', content: 'You are an assistant that answers questions using only the provided scan data. If the answer is not contained in the scan data, say you don\'t know. Keep answers concise and reference fields when useful.' },
262
+ { role: 'user', content: prompt }
263
+ ],
264
+ temperature: 0.2,
265
+ max_tokens: 800
266
+ };
267
+ const res = await fetchFn('https://api.openai.com/v1/chat/completions', { method: 'POST', headers: { 'Content-Type': 'application/json', 'Authorization': `Bearer ${openaiKey}` }, body: JSON.stringify(body) });
268
+ if (!res.ok) {
269
+ // Avoid including response bodies in errors to prevent accidental leakage of sensitive data
270
+ throw new Error(`OpenAI API error ${res.status}: ${res.statusText || 'request failed'}`);
271
+ }
272
+ const j = await res.json();
273
+ const msg = j && j.choices && j.choices[0] && (j.choices[0].message && j.choices[0].message.content || j.choices[0].text);
274
+ return String(msg || '').trim();
275
+ }
276
+
277
+ if (openrouterKey) {
278
+ // Use OpenRouter chat completions endpoint
279
+ const model = configuredModel || 'openai/gpt-oss-120b:free';
280
+ const body = {
281
+ model,
282
+ messages: [
283
+ { role: 'system', content: 'You are an assistant that answers questions using only the provided scan data. If the answer is not contained in the scan data, say you don\'t know. Keep answers concise and reference fields when useful.' },
284
+ { role: 'user', content: prompt }
285
+ ],
286
+ temperature: 0.2,
287
+ max_tokens: 800
288
+ };
289
+ const res = await fetchFn('https://openrouter.ai/api/v1/chat/completions', { method: 'POST', headers: { 'Content-Type': 'application/json', 'Authorization': `Bearer ${openrouterKey}` }, body: JSON.stringify(body) });
290
+ if (!res.ok) {
291
+ // Avoid including response bodies in errors to prevent accidental leakage of sensitive data
292
+ throw new Error(`OpenRouter API error ${res.status}: ${res.statusText || 'request failed'}`);
293
+ }
294
+ const j = await res.json();
295
+ // OpenRouter responses mimic OpenAI shape (choices[0].message.content)
296
+ const msg = j && j.choices && j.choices[0] && (j.choices[0].message && j.choices[0].message.content || j.choices[0].text);
297
+ return String(msg || '').trim();
298
+ }
299
+
300
+ throw new Error('No AI API key configured in environment (set OPENAI_API_KEY, AI_API_KEY, or OPENROUTER_API_KEY)');
301
+ }
302
+
303
+ function summarizeScanForPrompt(scan){
304
+ if(!scan || typeof scan !== 'object') return 'No scan data available.';
305
+ const parts = [];
306
+ parts.push(`URL: ${scan.url || scan.results && scan.results.url || 'unknown'}`);
307
+ parts.push(`ScanId: ${scan.scanId || scan.id || (scan.results && scan.results.scanId) || 'unknown'}`);
308
+ if(scan.status) parts.push(`Status: ${scan.status}`);
309
+ const lh = scan.lighthouse || scan.lhr || (scan.results && scan.results.lighthouse);
310
+ if(lh) {
311
+ const perf = lh.performance ?? lh.performanceScore ?? (lh.categories && lh.categories.performance && Math.round((lh.categories.performance.score||0)*100));
312
+ if(perf!=null) parts.push(`Performance: ${perf}`);
313
+ }
314
+ const vitals = scan.timings || scan.coreWebVitals || (scan.results && scan.results.timings);
315
+ if(vitals) {
316
+ const fcp = vitals.firstContentfulPaint || vitals.fcp; if(fcp!=null) parts.push(`FCP: ${fcp}`);
317
+ const lcp = vitals.largestContentfulPaint || vitals.lcp; if(lcp!=null) parts.push(`LCP: ${lcp}`);
318
+ const cls = vitals.cumulativeLayoutShift || vitals.cls; if(cls!=null) parts.push(`CLS: ${cls}`);
319
+ }
320
+ const issues = scan.issues || (scan.results && scan.results.issues) || [];
321
+ if(Array.isArray(issues) && issues.length) parts.push(`Issues: ${issues.slice(0,10).map(i=>i.title||i.message||i.rule||i.id).join('; ')}`);
322
+ if(scan.videoUrl || scan.videoPath) parts.push(`Video: ${scan.videoUrl || `${process.env.SUBTO_API_BASE_URL||DEFAULT_API_BASE}/video/${scan.scanId||scan.id}`}`);
323
+ return parts.join('\n');
324
+ }
325
+
326
+ async function answerFromScan(scan, question){
327
+ const q = String(question||'').trim();
328
+ if(!scan) return 'No scan data available.';
329
+ // If an external AI key is configured, prefer it for richer answers
330
+ const aiKey = process.env.OPENAI_API_KEY || process.env.AI_API_KEY;
331
+ if(aiKey) {
332
+ const summary = summarizeScanForPrompt(scan);
333
+ const prompt = `Scan summary:\n${summary}\n\nQuestion: ${q}\nAnswer concisely, referencing the scan when relevant.`;
334
+ try {
335
+ const out = await callOpenAI(prompt);
336
+ return out || 'No answer.';
337
+ } catch (e) {
338
+ // fall back to local heuristic on API errors
339
+ // sanitize any bearer tokens that might appear in error messages
340
+ const raw = e && e.message ? String(e.message) : String(e);
341
+ const safe = raw.replace(/(Bearer\s+)([^\s]+)/ig, '$1[REDACTED]');
342
+ console.error('AI request failed:', safe);
343
+ }
344
+ }
345
+
346
+ // Local heuristic fallback
347
+ const lq = q.toLowerCase();
348
+ if(lq.includes('video')||lq.includes('record')){
349
+ const url = scan.videoUrl || (scan.videoPath? `${process.env.SUBTO_API_BASE_URL||DEFAULT_API_BASE}/video/${scan.scanId||scan.id}` : null);
350
+ return url? `Session video: ${url}` : 'No session video available for this scan.';
351
+ }
352
+ if(lq.includes('issues')||lq.includes('problem')){
353
+ const issues = scan.issues || (scan.results && scan.results.issues) || [];
354
+ if(!issues.length) return 'No issues recorded.';
355
+ return issues.slice(0,10).map((it,i)=> `${i+1}. ${it.title||it.message||it.rule||JSON.stringify(it).slice(0,80)}`).join('\n');
356
+ }
357
+ if(lq.includes('performance')||lq.includes('lighthouse')||lq.includes('score')){
358
+ const lh = scan.lighthouse || scan.lhr || (scan.results && scan.results.lighthouse);
359
+ if(!lh) return 'No Lighthouse data available.';
360
+ const cats = lh.categories || {};
361
+ return Object.keys(cats).map(k=> `${k}: ${Math.round((cats[k].score||0)*100)}%`).join('\n');
362
+ }
363
+ const parts = [];
364
+ if(scan.overview && scan.overview.title) parts.push(`Title: ${scan.overview.title}`);
365
+ if(scan.results && scan.results.lighthouse && scan.results.lighthouse.categories){
366
+ const cat = scan.results.lighthouse.categories.performance || scan.results.lighthouse.categories.performance;
367
+ if(cat && cat.score!=null) parts.push(`Lighthouse performance: ${Math.round(cat.score*100)}%`);
368
+ }
369
+ if(scan.issues && scan.issues.length) parts.push(`${scan.issues.length} issues detected.`);
370
+ return parts.length? parts.join(' — ') : 'Scan complete.';
371
+ }
372
+
373
+ async function startChatREPL(scanData){
374
+ if(!process.stdin.isTTY){ console.log(chalk.yellow('Interactive chat not available in non-interactive terminal.')); return; }
375
+ console.log(chalk.cyan('\nStarting interactive assistant. Type `exit` or Ctrl-D to quit.'));
376
+ const rl = readline.createInterface({ input: process.stdin, output: process.stdout, prompt: 'AI> ' });
377
+ rl.prompt();
378
+ rl.on('line', async (line)=>{
379
+ const t = line.trim();
380
+ if(!t){ rl.prompt(); return; }
381
+ if(['exit','quit'].includes(t.toLowerCase())){ rl.close(); return; }
382
+ try {
383
+ const ans = await answerFromScan(scanData, t);
384
+ console.log(chalk.green('\nAssistant:'), ans.replace(/\n/g,'\n'));
385
+ } catch (e) {
386
+ console.error(chalk.red('Assistant error:'), e && e.message ? e.message : String(e));
387
+ }
388
+ rl.prompt();
389
+ }).on('close', ()=>{ console.log(chalk.dim('Assistant closed.')); });
175
390
  }
176
391
 
177
392
  async function run(argv) {
178
393
  const program = new Command();
179
- program.name('subto').description('Subto CLI — wrapper around Subto.One API').version(CLIENT_META.version || '4.0.0');
394
+ program.name('subto').description('Subto CLI — wrapper around Subto.One API').version(CLIENT_META.version || '5.0.0');
395
+ program.option('--chat', 'Start local AI assistant (no command required)');
180
396
 
181
397
  program.command('login').description('Store your API key in ~/.subto/config.json').action(async () => {
182
398
  try {
@@ -194,6 +410,7 @@ async function run(argv) {
194
410
  .option('--json', 'Output raw JSON')
195
411
  .option('--wait', 'Poll for completion and show progress')
196
412
  .option('--no-wait', 'Do not poll; return immediately')
413
+ .option('--chat', 'Open interactive AI assistant after scan completes')
197
414
  .action(async (url, opts) => {
198
415
  if (!validateUrl(url)) { console.error(chalk.red('Invalid URL. Provide a full URL including http:// or https://')); process.exit(1); }
199
416
  const cfg = await readConfig(); if (!cfg || !cfg.apiKey) { console.error(chalk.red('Missing API key. Run:'), chalk.cyan('subto login')); process.exit(1); }
@@ -212,6 +429,33 @@ async function run(argv) {
212
429
  // - if user passed --wait -> poll
213
430
  // - if user passed --no-wait -> do not poll
214
431
  // - otherwise, poll by default when server returns a queued/started status
432
+ // If server returned HTML (some proxies or web routes), avoid dumping raw HTML to terminal.
433
+ // Try to recover a scanId from the HTML and fetch the JSON scan resource instead.
434
+ if (typeof resp.body === 'string' && resp.body.indexOf('<') !== -1) {
435
+ // attempt to extract scan id
436
+ const attemptId = extractScanIdFromHtml(resp.body);
437
+ const fetchFn = global.fetch;
438
+ if (attemptId && typeof fetchFn === 'function') {
439
+ try {
440
+ const base = process.env.SUBTO_API_BASE_URL || DEFAULT_API_BASE;
441
+ const statusUrl = new URL(`/api/v1/scan/${attemptId}`, base).toString();
442
+ const r2 = await fetchFn(statusUrl, { headers: { 'Authorization': `Bearer ${cfg.apiKey}`, 'Accept': 'application/json' } });
443
+ if (r2 && r2.ok) {
444
+ try { resp.body = await r2.json(); } catch (e) { /* leave as-is */ }
445
+ } else {
446
+ // replace body with minimal metadata so CLI can continue
447
+ resp.body = { scanId: attemptId, status: 'accepted' };
448
+ }
449
+ } catch (e) {
450
+ // couldn't fetch, fall through and keep original body but avoid printing HTML
451
+ resp.body = { status: 'unknown', note: 'Server returned HTML and scanId could not be resolved.' };
452
+ }
453
+ } else {
454
+ // No scan id found; replace with a safe message (do NOT print raw HTML)
455
+ resp.body = { status: 'unknown', note: 'Server returned HTML. Use --wait or provide the scanId to fetch structured JSON.' };
456
+ }
457
+ }
458
+
215
459
  const serverStatus = resp.body && resp.body.status;
216
460
  const serverIndicatesQueued = serverStatus && ['started', 'queued', 'pending', 'accepted'].includes(String(serverStatus).toLowerCase());
217
461
  const shouldPoll = (!opts.noWait) && (opts.wait || serverIndicatesQueued);
@@ -244,6 +488,9 @@ async function run(argv) {
244
488
  // Percent smoothing: last displayed percent and target from server
245
489
  let lastDisplayedPercent = 0;
246
490
  let targetPercent = 0;
491
+ // Track last server-provided percent to detect stalls
492
+ let lastServerPercent = null;
493
+ let lastServerChangeAt = Date.now();
247
494
  let lastPollTs = Date.now();
248
495
  const terminalStates = ['finished', 'completed', 'done', 'complete', 'success', 'succeeded'];
249
496
 
@@ -315,17 +562,28 @@ async function run(argv) {
315
562
  const spinner = spinnerFrames[spinnerIndex % spinnerFrames.length];
316
563
  spinnerIndex += 1;
317
564
 
318
- // robustly extract status string
319
- let rawLabel = 'waiting';
320
- if (d && d.status) {
321
- if (typeof d.status === 'string') rawLabel = d.status;
322
- else if (typeof d.status === 'object' && (d.status.state || d.status.name)) rawLabel = d.status.state || d.status.name;
323
- else rawLabel = String(d.status);
324
- }
325
- const label = String(rawLabel).replace(/[_-]+/g, ' ').trim().replace(/\b\w/g, c => c.toUpperCase());
565
+ // concise status label to avoid dumping long stage strings
566
+ const normalize = s => String(s || '').toLowerCase().replace(/[_-]+/g,' ').trim();
567
+ const statusRaw = d && d.status ? (typeof d.status === 'string' ? d.status : (d.status.state||d.status.name||String(d.status))) : '';
568
+ const statusNorm = normalize(statusRaw);
569
+ let statusLabel = 'Waiting';
570
+ if (statusNorm.includes('queued') || statusNorm.includes('pending') || statusNorm.includes('accepted')) statusLabel = 'Queued';
571
+ else if (statusNorm.includes('start') || statusNorm.includes('running') || statusNorm.includes('in progress') || statusNorm.includes('processing') || statusNorm.includes('scanning')) statusLabel = 'In Progress';
572
+ else if (statusNorm && terminalStates.includes(statusNorm)) statusLabel = 'Finished';
573
+
574
+ // short stage (truncated)
575
+ const rawStage = d && (d.stage || d.step || d.substage) ? (d.stage || d.step || d.substage) : '';
576
+ const stageShort = String(rawStage).replace(/[_-]+/g,' ').trim().slice(0,24);
577
+ const label = stageShort ? `${statusLabel} • ${stageShort}` : statusLabel;
326
578
 
327
579
  // Update targetPercent from latest data but don't immediately drop lastDisplayedPercent
328
580
  const serverPct = computePercentFromData(d);
581
+ // detect server percent changes
582
+ if (lastServerPercent === null || serverPct !== lastServerPercent) {
583
+ lastServerPercent = serverPct;
584
+ lastServerChangeAt = Date.now();
585
+ }
586
+
329
587
  if (serverPct >= 100) {
330
588
  targetPercent = 100;
331
589
  } else if (serverPct > lastDisplayedPercent) {
@@ -334,11 +592,26 @@ async function run(argv) {
334
592
  targetPercent = Math.max(targetPercent, serverPct);
335
593
  }
336
594
 
337
- // If explicit numeric progress provided and > lastDisplayed, accept it as target
338
- if (d && typeof d.progress === 'number' && d.progress > lastDisplayedPercent) {
339
- targetPercent = Math.max(targetPercent, Math.max(0, Math.min(100, Math.round(d.progress))));
595
+ // If explicit numeric progress provided, accept and trust it
596
+ if (d && typeof d.progress === 'number') {
597
+ const p = Math.max(0, Math.min(100, Math.round(d.progress)));
598
+ if (p > targetPercent) targetPercent = p;
599
+ if (p !== lastServerPercent) { lastServerPercent = p; lastServerChangeAt = Date.now(); }
340
600
  }
341
601
 
602
+ // Nudging: if server percent hasn't changed for a while, nudge progress slowly toward next bucket
603
+ try {
604
+ const now = Date.now();
605
+ const stallMs = now - lastServerChangeAt;
606
+ if (stallMs > 5000 && serverPct > 0 && serverPct < 100) {
607
+ const BUCKETS = [1,5,10,50,70,90,99,100];
608
+ const next = BUCKETS.find(b => b > serverPct) || 100;
609
+ // gently increment by 1 per render tick but do not exceed next bucket
610
+ const proposed = Math.min(next, lastDisplayedPercent + 1);
611
+ if (proposed > lastDisplayedPercent) targetPercent = Math.max(targetPercent, proposed);
612
+ }
613
+ } catch (e) { /* ignore nudging errors */ }
614
+
342
615
  // Smooth display: approach targetPercent gradually each tick
343
616
  if (lastDisplayedPercent < targetPercent) {
344
617
  const diff = targetPercent - lastDisplayedPercent;
@@ -370,7 +643,6 @@ async function run(argv) {
370
643
  lastPollTs = Date.now();
371
644
  }
372
645
 
373
- console.log(chalk.blue('Queued scan. Polling for progress...'));
374
646
  let headerState = 'queued';
375
647
  function startSpinner(){
376
648
  if (spinnerTimer) return;
@@ -402,18 +674,9 @@ async function run(argv) {
402
674
  const nonQueueStates = ['started','running','in progress','inprogress','processing','scanning'];
403
675
  const statusStr = data && data.status ? String(data.status).toLowerCase().trim() : '';
404
676
  if (headerState === 'queued' && statusStr && !['queued','pending','accepted'].includes(statusStr)) {
677
+ // transition to started but keep single-line spinner (do not print extra lines)
405
678
  headerState = 'started';
406
- try {
407
- if (process.stdout && process.stdout.isTTY) {
408
- readline.moveCursor(process.stdout, 0, -1);
409
- readline.clearLine(process.stdout, 0);
410
- console.log(chalk.blue('Started scan.'));
411
- } else {
412
- console.log(chalk.blue('Started scan.'));
413
- }
414
- } catch (e) {
415
- console.log(chalk.blue('Started scan.'));
416
- }
679
+ lastRender = ''; // force render refresh
417
680
  }
418
681
 
419
682
  // render progress line
@@ -441,7 +704,8 @@ async function run(argv) {
441
704
  try { readline.clearLine(process.stdout, 0); readline.cursorTo(process.stdout, 0); } catch (e) { /* ignore */ }
442
705
  console.log(chalk.green('Scan finished. Full results:'));
443
706
  if (opts && opts.json) console.log(JSON.stringify(data, null, 2));
444
- else printFullReport(data);
707
+ else await printFullReport(data);
708
+ if (opts && opts.chat) await startChatREPL(data);
445
709
  return;
446
710
  }
447
711
 
@@ -453,10 +717,273 @@ async function run(argv) {
453
717
 
454
718
  // Default (no wait): pretty summary
455
719
  printScanSummary(resp.body);
720
+ if (opts && opts.chat) await startChatREPL(resp.body);
456
721
  } catch (err) { const msg = err && err.message ? err.message : String(err); console.error(chalk.red('Network error:'), msg); process.exit(1); }
457
722
  });
458
723
 
724
+ // Upload and scan locally via server: `subto scan upload [dir]`
725
+ program
726
+ .command('scan upload [dir]')
727
+ .description('Upload a directory to the server and request a scan (respects .subtoignore)')
728
+ .option('--wait', 'Poll until analysis completes')
729
+ .action(async (dir, opts) => {
730
+ const target = dir ? path.resolve(dir) : process.cwd();
731
+ const cfg = await readConfig(); if (!cfg || !cfg.apiKey) { console.error(chalk.red('Missing API key. Run:'), chalk.cyan('subto login')); process.exit(1); }
732
+
733
+ // reuse uploader logic to collect & sample files (simple version)
734
+ async function readIgnore(root){ const ignPath = path.join(root, '.subtoignore'); try{ const txt = await fs.readFile(ignPath,'utf8'); return txt.split(/\r?\n/).map(l=>l.trim()).filter(l=>l && !l.startsWith('#')); }catch(e){ return []; } }
735
+ const ignoreList = (await readIgnore(target)).concat(['.env']);
736
+ function isIgnored(rel){ if(!rel) return false; for(const ig of ignoreList){ if(!ig) continue; if(ig===rel) return true; if(rel===ig) return true; if(rel.startsWith(ig) || rel.includes('/'+ig) || rel.endsWith(ig)) return true; } return false; }
737
+ const collected = []; let totalBytes = 0;
738
+ async function walk(dirPath, base){ const entries = await fs.readdir(dirPath, { withFileTypes: true }); for(const ent of entries){ const rel = path.relative(base, path.join(dirPath, ent.name)).replace(/\\/g,'/'); if (isIgnored(rel)) continue; const full = path.join(dirPath, ent.name); if (ent.isDirectory()) { await walk(full, base); } else if (ent.isFile()) { try { const st = await fs.stat(full); if (st.size <=0) continue; collected.push({ path: rel, full, size: st.size }); totalBytes += st.size; } catch(e){} } } }
739
+ await walk(target, target);
740
+ if (!collected.length) { console.error(chalk.yellow('No files collected for upload.')); return; }
741
+ // sample up to 80 smallest files with content snippets
742
+ const bySize = collected.slice().sort((a,b)=>a.size-b.size).slice(0,80);
743
+ const samples = [];
744
+ for(const f of bySize){ try { if (f.size <= 16*1024) { const txt = await fs.readFile(f.full,'utf8'); samples.push({ path: f.path, size: f.size, content: txt.slice(0, 12000) }); } else { const fd = await fs.open(f.full,'r'); const buf = Buffer.alloc(8192); const { bytesRead } = await fd.read(buf,0,buf.length,0); await fd.close(); samples.push({ path: f.path, size: f.size, content: buf.slice(0,bytesRead).toString('utf8') }); } } catch(e){ samples.push({ path: f.path, size: f.size, content: '' }); } }
745
+
746
+ // Send to server
747
+ try {
748
+ const base = process.env.SUBTO_API_BASE_URL || DEFAULT_API_BASE;
749
+ const endpoint = new URL('/api/v1/upload', base).toString();
750
+ const fetchFn = global.fetch; if (typeof fetchFn !== 'function') throw new Error('Global fetch() is not available in this Node runtime. Use Node 18+');
751
+ const body = { files: samples, meta: { collected: collected.length, totalBytes } };
752
+ const r = await fetchFn(endpoint, { method: 'POST', headers: { 'Content-Type': 'application/json', 'Authorization': `Bearer ${cfg.apiKey}` }, body: JSON.stringify(body) });
753
+ if (!r.ok) {
754
+ const txt = await r.text().catch(()=>null);
755
+ console.error(chalk.red('Upload failed:'), r.status, txt || r.statusText);
756
+ try {
757
+ const t = String(txt || '').toLowerCase();
758
+ if (t.includes('openrouter') && (t.includes('429') || t.includes('too many requests') || t.includes('rate limit'))) {
759
+ console.error(chalk.yellow('Experiencing This To Much Upload Your Own Key At "subto upload key"'));
760
+ }
761
+ } catch (e) { /* ignore */ }
762
+ process.exit(1);
763
+ }
764
+ const j = await r.json(); console.log(chalk.green('Upload queued:'), j.uploadId, 'scanId:', j.scanId, 'expiresAt:', new Date(j.expiresAt).toString());
765
+ if (opts.wait) {
766
+ // Poll the scan resource until completed (reuse existing polling behavior)
767
+ const statusUrl = new URL(`/api/v1/scan/${j.scanId}`, base).toString();
768
+ const sleep = ms => new Promise(r=>setTimeout(r,ms));
769
+ while (true) {
770
+ const s = await fetchFn(statusUrl, { headers: { 'Authorization': `Bearer ${cfg.apiKey}`, 'Accept': 'application/json' } });
771
+ if (!s.ok) { console.error(chalk.red('Failed to fetch scan status:', s.status)); break; }
772
+ const data = await s.json(); if (data.status && ['completed','done','finished','success'].includes(String(data.status).toLowerCase())) { console.log(chalk.green('Scan complete.')); await printFullReport(data); break; }
773
+ console.log(chalk.dim('Scan status:'), data.status || 'queued', '— polling again in 4s'); await sleep(4000);
774
+ }
775
+ }
776
+ } catch (e) { console.error(chalk.red('Upload request error:'), e && e.message ? e.message : String(e)); process.exit(1); }
777
+ });
778
+
779
+ program
780
+ .command('chat [scanId]')
781
+ .description('Start the local AI assistant for a scan (optionally provide scanId)')
782
+ .action(async (scanId) => {
783
+ const cfg = await readConfig();
784
+ if (!cfg || !cfg.apiKey) { console.error(chalk.red('Missing API key. Run:'), chalk.cyan('subto login')); process.exit(1); }
785
+ let scanData = null;
786
+ try {
787
+ if (!scanId) {
788
+ console.log(chalk.yellow('No scanId provided. You can paste raw JSON or enter a scanId. Press Enter to cancel.'));
789
+ const rl = readline.createInterface({ input: process.stdin, output: process.stdout });
790
+ const answer = await new Promise(res => rl.question('ScanId or JSON file path: ', a => { rl.close(); res(a && a.trim()); }));
791
+ if (!answer) { console.log('Aborted.'); return; }
792
+ // if answer looks like a file, try to read JSON
793
+ try { if (await fs.stat(answer).then(s=>s.isFile()).catch(()=>false)) { const txt = await fs.readFile(answer,'utf8'); scanData = JSON.parse(txt); } } catch(e) { /* not a file */ }
794
+ if (!scanData) scanId = answer;
795
+ }
796
+
797
+ if (!scanData && scanId) {
798
+ const base = process.env.SUBTO_API_BASE_URL || DEFAULT_API_BASE;
799
+ const fetchFn = global.fetch;
800
+ if (typeof fetchFn !== 'function') throw new Error('Global fetch() is not available in this Node runtime. Use Node 18+');
801
+ const statusUrl = new URL(`/api/v1/scan/${scanId}`, base).toString();
802
+ const r = await fetchFn(statusUrl, { headers: { 'Authorization': `Bearer ${cfg.apiKey}`, 'Accept': 'application/json' } });
803
+ if (!r.ok) { throw new Error(`Failed to fetch scan ${scanId}: ${r.status}`); }
804
+ scanData = await r.json();
805
+ }
806
+
807
+ if (!scanData) { console.error(chalk.red('No scan data available.')); return; }
808
+ await startChatREPL(scanData);
809
+ } catch (e) { console.error(chalk.red('Error starting chat:'), e && e.message ? e.message : String(e)); process.exit(1); }
810
+ });
811
+
812
+ // Upload project files to AI for analysis. Respects `.subtoignore` and always ignores `.env`.
813
+ program
814
+ .command('upload [dir]')
815
+ .description('Upload your project (or directory) to the AI assistant for analysis')
816
+ .option('--max-files <n>', 'Maximum number of files to include', '300')
817
+ .option('--max-bytes <n>', 'Maximum total bytes to include', String(5 * 1024 * 1024))
818
+ .action(async (dir, opts) => {
819
+ try {
820
+ const target = dir ? path.resolve(dir) : process.cwd();
821
+ const maxFiles = parseInt(opts.maxFiles || opts.maxFiles === 0 ? opts.maxFiles : opts.maxFiles, 10) || parseInt(opts.maxFiles || 300, 10) || 300;
822
+ const maxBytes = parseInt(opts.maxBytes || opts.maxBytes === 0 ? opts.maxBytes : opts.maxBytes, 10) || (5 * 1024 * 1024);
823
+
824
+ // Read .subtoignore if present
825
+ async function readIgnore(root){
826
+ const ignPath = path.join(root, '.subtoignore');
827
+ try { const txt = await fs.readFile(ignPath, 'utf8'); return txt.split(/\r?\n/).map(l=>l.trim()).filter(l=>l && !l.startsWith('#')); } catch(e){ return []; }
828
+ }
829
+
830
+ const ignoreList = (await readIgnore(target)).concat(['.env']);
831
+
832
+ // Simple matcher: skip if relative path equals or contains any ignore token
833
+ function isIgnored(rel){
834
+ if (!rel) return false;
835
+ for (const ig of ignoreList) {
836
+ if (!ig) continue;
837
+ if (ig === rel) return true;
838
+ if (rel === ig) return true;
839
+ if (rel.startsWith(ig) || rel.includes('/' + ig) || rel.endsWith(ig)) return true;
840
+ }
841
+ return false;
842
+ }
843
+
844
+ // Walk directory
845
+ const collected = [];
846
+ let totalBytes = 0;
847
+ async function walk(dirPath, base){
848
+ const entries = await fs.readdir(dirPath, { withFileTypes: true });
849
+ for (const ent of entries) {
850
+ const rel = path.relative(base, path.join(dirPath, ent.name)).replace(/\\\\/g,'/');
851
+ if (isIgnored(rel)) continue;
852
+ const full = path.join(dirPath, ent.name);
853
+ if (ent.isDirectory()) {
854
+ await walk(full, base);
855
+ if (collected.length >= maxFiles) return;
856
+ } else if (ent.isFile()) {
857
+ try {
858
+ const st = await fs.stat(full);
859
+ if (st.size <= 0) continue;
860
+ if (totalBytes + st.size > maxBytes) continue;
861
+ collected.push({ path: rel, full, size: st.size });
862
+ totalBytes += st.size;
863
+ if (collected.length >= maxFiles) return;
864
+ } catch (e) { continue; }
865
+ }
866
+ }
867
+ }
868
+
869
+ await walk(target, target);
870
+
871
+ if (!collected.length) { console.log(chalk.yellow('No files collected for upload (check .subtoignore or target directory).')); return; }
872
+
873
+ // Sample files (small files included fully; large files include head)
874
+ async function sampleFile(f) {
875
+ try {
876
+ if (f.size <= 16 * 1024) {
877
+ const txt = await fs.readFile(f.full, 'utf8'); return { path: f.path, size: f.size, snippet: txt };
878
+ }
879
+ const fd = await fs.open(f.full, 'r');
880
+ const buf = Buffer.alloc(8192);
881
+ const { bytesRead } = await fd.read(buf, 0, buf.length, 0);
882
+ await fd.close();
883
+ return { path: f.path, size: f.size, snippet: buf.slice(0, bytesRead).toString('utf8') };
884
+ } catch (e) { return { path: f.path, size: f.size, snippet: null }; }
885
+ }
886
+
887
+ const samples = [];
888
+ // choose up to 80 files to sample, preferring smaller files
889
+ const bySize = collected.slice().sort((a,b)=>a.size-b.size).slice(0, 80);
890
+ for (const f of bySize) samples.push(await sampleFile(f));
891
+
892
+ // Build prompt
893
+ const manifestLines = collected.slice(0, 1000).map(f => `${f.path} (${f.size} bytes)`);
894
+ const promptParts = [];
895
+ promptParts.push('You are a codebase analysis assistant.');
896
+ promptParts.push('The user asked: Analyze the uploaded project and provide actionable fixes, security issues, performance improvements, and a prioritized TODO list. NEVER print any secrets found in files; instead, flag their presence and recommend rotation/removal.');
897
+ promptParts.push('\nMANIFEST:\n' + manifestLines.join('\n'));
898
+ promptParts.push('\nSAMPLES:\n');
899
+ for (const s of samples) {
900
+ promptParts.push(`--- FILE: ${s.path} (${s.size} bytes) ---\n` + (s.snippet ? s.snippet.slice(0, 12000) : '[binary or unreadable]'));
901
+ }
902
+ promptParts.push('\nProvide a concise prioritized list of suggested fixes (max 12 items), and mark any potential secrets or sensitive config entries found.');
903
+
904
+ const prompt = promptParts.join('\n\n');
905
+
906
+ console.log(chalk.dim(`Collected ${collected.length} files, ${Math.round(totalBytes/1024)} KB total. Sending summary to AI...`));
907
+ const answer = await callOpenAI(prompt);
908
+ console.log(chalk.bold('\nAI Analysis:'));
909
+ console.log(answer);
910
+ console.log(chalk.dim('\nNote: sensitive values are not printed; rotate any exposed keys if they were stored in the repo.'));
911
+ } catch (e) {
912
+ console.error(chalk.red('Upload failed:'), e && e.message ? e.message : String(e));
913
+ process.exit(1);
914
+ }
915
+ });
916
+
917
+ // Store a local OpenRouter key + model for client-side AI calls (kept in ~/.subto)
918
+ program
919
+ .command('upload key')
920
+ .description('Store a local OpenRouter API key and model for analysis (kept locally only)')
921
+ .action(async () => {
922
+ try {
923
+ const key = await promptHidden('OpenRouter API key: ');
924
+ if (!key || !key.trim()) { console.log('Aborted.'); return; }
925
+ // Ask for preferred model
926
+ const rl = readline.createInterface({ input: process.stdin, output: process.stdout });
927
+ const model = await new Promise(res => rl.question('Model (e.g. openai/gpt-oss-120b:free): ', a => { rl.close(); res(a && a.trim()); }));
928
+ const chosenModel = model && model.length ? model : 'openai/gpt-oss-120b:free';
929
+
930
+ // Try to validate model exists (best-effort). If validation fails, ask user to confirm saving.
931
+ let validated = false;
932
+ const fetchFn = global.fetch;
933
+ if (typeof fetchFn === 'function') {
934
+ try {
935
+ const res = await fetchFn('https://openrouter.ai/api/v1/models', { headers: { 'Authorization': `Bearer ${key}` } });
936
+ if (res && res.ok) {
937
+ const jd = await res.json().catch(()=>null);
938
+ const modelsList = jd && (jd.models || jd.data || jd) ;
939
+ const names = Array.isArray(modelsList) ? modelsList.map(m => m.id || m.name || m.model || m.modelId).filter(Boolean) : [];
940
+ if (names.length && names.includes(chosenModel)) validated = true;
941
+ }
942
+ } catch (e) { /* ignore network errors */ }
943
+ }
944
+
945
+ if (!validated) {
946
+ const rl2 = readline.createInterface({ input: process.stdin, output: process.stdout });
947
+ const answer = await new Promise(res => rl2.question('Could not verify model with OpenRouter. Save anyway? (y/N): ', a => { rl2.close(); res(a && a.trim().toLowerCase()); }));
948
+ if (answer !== 'y' && answer !== 'yes') { console.log('Aborted.'); return; }
949
+ }
950
+
951
+ const cfg = await readConfig() || {};
952
+ cfg.openrouterKey = key.trim();
953
+ cfg.openrouterModel = chosenModel;
954
+ await writeConfig(cfg);
955
+ console.log(chalk.green('OpenRouter key and model saved to'), chalk.cyan(configFilePath()));
956
+ } catch (e) { console.error(chalk.red('Failed to save key:'), e && e.message ? e.message : String(e)); process.exit(1); }
957
+ });
958
+
459
959
  if (!argv || argv.length === 0) { program.help(); return; }
960
+ // Support global `--chat` flag when used without a subcommand (e.g. `subto --chat`)
961
+ const rawArgs = Array.isArray(argv) ? argv.slice() : [];
962
+ const chatFlagOnly = rawArgs.includes('--chat') && !rawArgs.some(a => ['scan','login','chat'].includes(a));
963
+ if (chatFlagOnly) {
964
+ // emulate the `chat` command action
965
+ const cfg = await readConfig();
966
+ if (!cfg || !cfg.apiKey) { console.error(chalk.red('Missing API key. Run:'), chalk.cyan('subto login')); process.exit(1); }
967
+ // prompt for scanId or JSON path
968
+ const rl = readline.createInterface({ input: process.stdin, output: process.stdout });
969
+ const answer = await new Promise(res => rl.question('ScanId or JSON file path (blank to cancel): ', a => { rl.close(); res(a && a.trim()); }));
970
+ if (!answer) { console.log('Aborted.'); return; }
971
+ let scanData = null;
972
+ try { if (await fs.stat(answer).then(s=>s.isFile()).catch(()=>false)) { const txt = await fs.readFile(answer,'utf8'); scanData = JSON.parse(txt); } } catch(e) {}
973
+ try {
974
+ if (!scanData) {
975
+ const base = process.env.SUBTO_API_BASE_URL || DEFAULT_API_BASE; const fetchFn = global.fetch;
976
+ if (typeof fetchFn !== 'function') throw new Error('Global fetch() is not available in this Node runtime. Use Node 18+');
977
+ const statusUrl = new URL(`/api/v1/scan/${answer}`, base).toString();
978
+ const r = await fetchFn(statusUrl, { headers: { 'Authorization': `Bearer ${cfg.apiKey}`, 'Accept': 'application/json' } });
979
+ if (!r.ok) throw new Error(`Failed to fetch scan ${answer}: ${r.status}`);
980
+ scanData = await r.json();
981
+ }
982
+ await startChatREPL(scanData);
983
+ return;
984
+ } catch (e) { console.error(chalk.red('Error starting chat:'), e && e.message ? e.message : String(e)); process.exit(1); }
985
+ }
986
+
460
987
  await program.parseAsync(argv, { from: 'user' });
461
988
  }
462
989
 
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "subto",
3
- "version": "4.0.0",
3
+ "version": "6.0.0",
4
4
  "description": "Subto CLI — thin wrapper around the Subto.One API",
5
5
  "bin": {
6
6
  "subto": "bin/subto.js"
@@ -29,10 +29,12 @@
29
29
  },
30
30
  "scripts": {
31
31
  "prepublishOnly": "node ./scripts/prepublish-check.js",
32
- "postinstall": "node ./scripts/fix-node-domexception.js"
32
+ "postinstall": "node ./scripts/fix-node-domexception.js",
33
+ "test:smoke": "bash test/smoke_cli_test.sh"
33
34
  },
34
35
  "dependencies": {
35
36
  "commander": "^11.0.0",
36
37
  "chalk": "^5.3.0"
37
38
  }
39
+
38
40
  }