serpentstack 0.2.14 → 0.2.15

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -23,7 +23,7 @@ import {
23
23
  getEffectiveModel,
24
24
  isAgentEnabled,
25
25
  } from '../utils/config.js';
26
- import { detectModels, modelShortName } from '../utils/models.js';
26
+ import { detectModels, modelShortName, detectSystemCapabilities } from '../utils/models.js';
27
27
 
28
28
  // ─── Helpers ────────────────────────────────────────────────
29
29
 
@@ -244,13 +244,13 @@ async function pickModel(rl, agentName, currentModel, available) {
244
244
  }
245
245
  }
246
246
 
247
- // Section 2: Recommended models (not installed, auto-download on select)
248
- if (available.ollamaInstalled && available.recommended.length > 0) {
247
+ // Section 2: Downloadable models always shown if we have recommendations
248
+ if (available.recommended.length > 0) {
249
249
  const liveTag = available.recommendedLive
250
- ? dim(`fetched from ollama.com`)
250
+ ? dim(`live from ollama.com`)
251
251
  : dim(`cached list`);
252
- console.log(` ${dim('── Download')} ${cyan('free')} ${dim('(')}${liveTag}${dim(') ──')}`);
253
- // Show a reasonable subset (not 50 models)
252
+ const needsOllama = !available.ollamaInstalled ? dim(' · requires Ollama') : '';
253
+ console.log(` ${dim('── Download')} ${cyan('free')} ${dim('(')}${liveTag}${needsOllama}${dim(') ──')}`);
254
254
  const toShow = available.recommended.slice(0, 8);
255
255
  for (const r of toShow) {
256
256
  const idx = choices.length;
@@ -316,9 +316,33 @@ async function pickModel(rl, agentName, currentModel, available) {
316
316
 
317
317
  const selected = (idx >= 0 && idx < choices.length) ? choices[idx] : choices[Math.max(0, currentIdx)];
318
318
 
319
- // If they selected a downloadable model, pull it now
319
+ // If they selected a downloadable model, handle Ollama install + pull
320
320
  if (selected.action === 'download') {
321
- // Close rl temporarily so ollama pull can use the terminal
321
+ if (!available.ollamaInstalled) {
322
+ console.log();
323
+ warn('Ollama is required to run local models.');
324
+ console.log();
325
+ console.log(` ${dim('Install Ollama (free, open-source):')}`);
326
+ console.log(` ${dim('$')} ${bold('curl -fsSL https://ollama.com/install.sh | sh')}`);
327
+ console.log(` ${dim('$')} ${bold('ollama serve')}`);
328
+ console.log();
329
+ info(`After installing, re-run ${bold('serpentstack persistent --agents')} to download and select ${bold(selected.name)}.`);
330
+ console.log();
331
+
332
+ // Save the selection anyway so it's remembered
333
+ return selected.id;
334
+ }
335
+
336
+ if (!available.ollamaRunning) {
337
+ console.log();
338
+ warn('Ollama is installed but not running.');
339
+ console.log(` ${dim('$')} ${bold('ollama serve')}`);
340
+ console.log();
341
+ info(`Start Ollama, then re-run ${bold('serpentstack persistent --agents')} to download ${bold(selected.name)}.`);
342
+ console.log();
343
+ return selected.id;
344
+ }
345
+
322
346
  rl.pause();
323
347
  const pulled = await ollamaPull(selected.name);
324
348
  rl.resume();
@@ -601,9 +625,25 @@ async function runConfigure(projectDir, config, soulPath) {
601
625
 
602
626
  // ─── Agents Flow ────────────────────────────────────────────
603
627
 
628
+ // Agent description summaries for the enable/disable flow
629
+ const AGENT_SUMMARIES = {
630
+ 'log-watcher': 'Monitors your dev server health and log output every 30–60s. Catches backend crashes, frontend build errors, and import failures — reports them with file paths and suggested fixes.',
631
+ 'test-runner': 'Runs your test suite every 5 min and lint/typecheck every 15 min. Catches regressions before you commit — shows which test failed, what changed, and whether the test or source needs fixing.',
632
+ 'skill-maintainer': 'Checks every hour whether your .skills/ files still match the actual codebase. When code patterns drift from what skills describe, it proposes exact updates so IDE agents stay accurate.',
633
+ };
634
+
604
635
  async function runAgents(projectDir, config, parsed, available) {
636
+ // Show system capabilities so users know what models they can run
637
+ const sys = detectSystemCapabilities();
638
+
639
+ divider('Your System');
640
+ console.log(` ${dim('RAM:')} ${bold(sys.totalGB + ' GB')} total, ${sys.freeGB} GB free`);
641
+ console.log(` ${dim(sys.recommendation)}`);
642
+ console.log();
643
+
605
644
  divider('Agents');
606
- console.log(` ${dim('Enable/disable each agent and pick a model.')}`);
645
+ console.log(` ${dim('Each agent runs in its own terminal on a schedule.')}`);
646
+ console.log(` ${dim('Enable the ones you want, then pick a model for each.')}`);
607
647
  console.log();
608
648
 
609
649
  const rl = createInterface({ input: stdin, output: stdout });
@@ -615,8 +655,23 @@ async function runAgents(projectDir, config, parsed, available) {
615
655
  const currentModel = existingAgent?.model || 'ollama/llama3.2';
616
656
  const schedule = (agentMd.meta.schedule || []).map(s => s.every).join(', ');
617
657
 
618
- console.log(` ${bold(name)} ${dim(agentMd.meta.description || '')}`);
619
- console.log(` ${dim(`Schedule: ${schedule || 'none'}`)}`);
658
+ // Show rich description
659
+ console.log(` ${bold(name)} ${dim(`(${schedule || 'manual'})`)}`);
660
+ const summary = AGENT_SUMMARIES[name] || agentMd.meta.description || '';
661
+ if (summary) {
662
+ // Word-wrap summary to ~70 chars, indented
663
+ const words = summary.split(' ');
664
+ let line = '';
665
+ for (const word of words) {
666
+ if (line.length + word.length + 1 > 68) {
667
+ console.log(` ${dim(line)}`);
668
+ line = word;
669
+ } else {
670
+ line = line ? `${line} ${word}` : word;
671
+ }
672
+ }
673
+ if (line) console.log(` ${dim(line)}`);
674
+ }
620
675
 
621
676
  const enabled = await askYesNo(rl, `Enable ${bold(name)}?`, currentEnabled);
622
677
 
@@ -1,4 +1,5 @@
1
1
  import { execFile } from 'node:child_process';
2
+ import { freemem, totalmem } from 'node:os';
2
3
 
3
4
  // ─── Fallback Recommendations ───────────────────────────────
4
5
  // Used only when the Ollama library API is unreachable.
@@ -273,6 +274,38 @@ export function modelShortName(model) {
273
274
  return model;
274
275
  }
275
276
 
277
+ /**
278
+ * Detect system capabilities for model recommendations.
279
+ * Returns { totalRAM, freeRAM, maxModelSize, recommendation }.
280
+ */
281
+ export function detectSystemCapabilities() {
282
+ const total = totalmem();
283
+ const free = freemem();
284
+ const totalGB = total / (1024 ** 3);
285
+ const freeGB = free / (1024 ** 3);
286
+
287
+ // Ollama needs ~2GB overhead; model needs to fit in remaining RAM
288
+ const availableForModel = Math.max(0, freeGB - 2);
289
+
290
+ let recommendation;
291
+ if (totalGB >= 32) {
292
+ recommendation = 'Your system can handle large models (up to 24B parameters)';
293
+ } else if (totalGB >= 16) {
294
+ recommendation = 'Good for medium models (up to 8B parameters)';
295
+ } else if (totalGB >= 8) {
296
+ recommendation = 'Best with small models (3B–4B parameters)';
297
+ } else {
298
+ recommendation = 'Limited RAM — use cloud models or very small local models';
299
+ }
300
+
301
+ return {
302
+ totalGB: totalGB.toFixed(0),
303
+ freeGB: freeGB.toFixed(1),
304
+ availableGB: availableForModel.toFixed(1),
305
+ recommendation,
306
+ };
307
+ }
308
+
276
309
  function execAsync(cmd, args) {
277
310
  return new Promise((resolve, reject) => {
278
311
  execFile(cmd, args, { timeout: 5000 }, (err, stdout) => {
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "serpentstack",
3
- "version": "0.2.14",
3
+ "version": "0.2.15",
4
4
  "description": "CLI for SerpentStack — AI-driven development standards with project-specific skills and persistent agents",
5
5
  "type": "module",
6
6
  "bin": {