llm-checker 3.1.1 → 3.2.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -52,6 +52,20 @@ Choosing the right LLM for your hardware is complex. With thousands of model var
52
52
 
53
53
  ---
54
54
 
55
+ ## Comparison with Other Tooling (e.g. `llmfit`)
56
+
57
+ LLM Checker and `llmfit` solve related but different problems:
58
+
59
+ | Tool | Primary Focus | Typical Output |
60
+ |------|---------------|----------------|
61
+ | **LLM Checker** | Hardware-aware **model selection** for local inference | Ranked recommendations, compatibility scores, pull/run commands |
62
+ | **llmfit** | LLM workflow support and model-fit evaluation from another angle | Different optimization workflow and selection heuristics |
63
+
64
+ If your goal is: *"What should I run on this exact machine right now?"*, use **LLM Checker** first.
65
+ If your goal is broader experimentation across custom pipelines, using both tools can be complementary.
66
+
67
+ ---
68
+
55
69
  ## Installation
56
70
 
57
71
  ```bash
@@ -118,6 +132,8 @@ Restart Claude Code and you're done.
118
132
 
119
133
  Once connected, Claude can use these tools:
120
134
 
135
+ **Core Analysis:**
136
+
121
137
  | Tool | Description |
122
138
  |------|-------------|
123
139
  | `hw_detect` | Detect your hardware (CPU, GPU, RAM, acceleration backend) |
@@ -126,19 +142,38 @@ Once connected, Claude can use these tools:
126
142
  | `installed` | Rank your already-downloaded Ollama models |
127
143
  | `search` | Search the Ollama model catalog with filters |
128
144
  | `smart_recommend` | Advanced recommendations using the full scoring engine |
129
- | `ollama_list` | List all downloaded Ollama models |
145
+
146
+ **Ollama Management:**
147
+
148
+ | Tool | Description |
149
+ |------|-------------|
150
+ | `ollama_list` | List all downloaded models with params, quant, family, and size |
130
151
  | `ollama_pull` | Download a model from the Ollama registry |
131
- | `ollama_run` | Run a prompt against a local Ollama model |
152
+ | `ollama_run` | Run a prompt against a local model (with tok/s metrics) |
153
+ | `ollama_remove` | Delete a model to free disk space |
154
+
155
+ **Advanced (MCP-exclusive):**
156
+
157
+ | Tool | Description |
158
+ |------|-------------|
159
+ | `ollama_optimize` | Generate optimal Ollama env vars for your hardware (NUM_GPU, PARALLEL, FLASH_ATTENTION, etc.) |
160
+ | `benchmark` | Benchmark a model with 3 standardized prompts — measures tok/s, load time, prompt eval |
161
+ | `compare_models` | Head-to-head comparison of two models on the same prompt with speed + response side-by-side |
162
+ | `cleanup_models` | Analyze installed models — find redundancies, cloud-only models, oversized models, and upgrade candidates |
163
+ | `project_recommend` | Scan a project directory (languages, frameworks, size) and recommend the best model for that codebase |
164
+ | `ollama_monitor` | Real-time system status: RAM usage, loaded models, memory headroom analysis |
132
165
 
133
166
  ### Example Prompts
134
167
 
135
168
  After setup, you can ask Claude things like:
136
169
 
137
170
  - *"What's the best coding model for my hardware?"*
138
- - *"What models do I have installed and how do they rank?"*
139
- - *"Pull the top reasoning model for my system"*
140
- - *"Search for multimodal models under 8GB"*
141
- - *"Run this prompt on qwen2.5-coder"*
171
+ - *"Benchmark qwen2.5-coder and show me the tok/s"*
172
+ - *"Compare llama3.2 vs codellama for coding tasks"*
173
+ - *"Clean up my Ollama what should I remove?"*
174
+ - *"What model should I use for this Rust project?"*
175
+ - *"Optimize my Ollama config for maximum performance"*
176
+ - *"How much RAM is Ollama using right now?"*
142
177
 
143
178
  Claude will automatically call the right tools and give you actionable results.
144
179
 
@@ -1,4 +1,10 @@
1
1
  const { getLogger } = require('../src/utils/logger');
2
+ const {
3
+ normalizeRuntime,
4
+ getRuntimeDisplayName,
5
+ runtimeSupportedOnHardware,
6
+ runtimeSupportsSpeculativeDecoding
7
+ } = require('../src/runtime/runtime-support');
2
8
 
3
9
  class CompatibilityAnalyzer {
4
10
  constructor() {
@@ -451,6 +457,7 @@ class CompatibilityAnalyzer {
451
457
 
452
458
  generateRecommendations(hardware, results, options = {}) {
453
459
  const recommendations = [];
460
+ const runtime = normalizeRuntime(options.runtime || 'ollama');
454
461
  const tier = this.getHardwareTier(hardware);
455
462
 
456
463
  if (hardware.memory.total < 16) {
@@ -511,6 +518,19 @@ class CompatibilityAnalyzer {
511
518
  }
512
519
  }
513
520
 
521
+ if (runtime !== 'ollama') {
522
+ const runtimeLabel = getRuntimeDisplayName(runtime);
523
+ if (runtimeSupportedOnHardware(runtime, hardware)) {
524
+ recommendations.push(`Runtime selected: ${runtimeLabel}`);
525
+ } else {
526
+ recommendations.push(`${runtimeLabel} is not recommended on this hardware (fallback to Ollama).`);
527
+ }
528
+
529
+ if (runtimeSupportsSpeculativeDecoding(runtime)) {
530
+ recommendations.push(`Enable speculative decoding in ${runtimeLabel} for higher throughput.`);
531
+ }
532
+ }
533
+
514
534
  return recommendations;
515
535
  }
516
536
 
package/bin/cli.js ADDED
@@ -0,0 +1,14 @@
1
+ #!/usr/bin/env node
2
+ 'use strict';
3
+
4
+ const majorNodeVersion = Number.parseInt(process.versions.node.split('.')[0], 10);
5
+
6
+ if (!Number.isFinite(majorNodeVersion) || majorNodeVersion < 16) {
7
+ console.error(
8
+ `[llm-checker] Unsupported Node.js version: ${process.versions.node}. ` +
9
+ 'Please use Node.js 16 or newer.'
10
+ );
11
+ process.exit(1);
12
+ }
13
+
14
+ require('./enhanced_cli');
@@ -16,6 +16,14 @@ function getLLMChecker() {
16
16
  const { getLogger } = require('../src/utils/logger');
17
17
  const fs = require('fs');
18
18
  const path = require('path');
19
+ const {
20
+ SUPPORTED_RUNTIMES,
21
+ normalizeRuntime,
22
+ runtimeSupportedOnHardware,
23
+ getRuntimeDisplayName,
24
+ getRuntimeCommandSet
25
+ } = require('../src/runtime/runtime-support');
26
+ const SpeculativeDecodingEstimator = require('../src/models/speculative-decoding-estimator');
19
27
 
20
28
  // ASCII Art for each command - Large text banners
21
29
  const ASCII_ART = {
@@ -1406,10 +1414,18 @@ function displaySimplifiedSystemInfo(hardware) {
1406
1414
  console.log(`Hardware Tier: ${tierColor.bold(tier)}`);
1407
1415
  }
1408
1416
 
1409
- async function displayModelRecommendations(analysis, hardware, useCase = 'general', limit = 1) {
1417
+ async function displayModelRecommendations(analysis, hardware, useCase = 'general', limit = 1, runtime = 'ollama') {
1410
1418
  const title = limit === 1 ? 'RECOMMENDED MODEL' : `TOP ${limit} COMPATIBLE MODELS`;
1411
1419
  console.log(chalk.green.bold(`\n${title}`));
1412
1420
  console.log(chalk.gray('─'.repeat(50)));
1421
+
1422
+ const selectedRuntime = normalizeRuntime(runtime);
1423
+ const runtimeLabel = getRuntimeDisplayName(selectedRuntime);
1424
+ const speculativeEstimator = new SpeculativeDecodingEstimator();
1425
+ const speculativeCandidatePool = [
1426
+ ...(analysis?.compatible || []),
1427
+ ...(analysis?.marginal || [])
1428
+ ];
1413
1429
 
1414
1430
  // Find the best models from compatible models considering use case
1415
1431
  let selectedModels = [];
@@ -1760,42 +1776,75 @@ async function displayModelRecommendations(analysis, hardware, useCase = 'genera
1760
1776
  if (model.performanceEstimate) {
1761
1777
  console.log(`Estimated Speed: ${chalk.yellow(model.performanceEstimate.estimatedTokensPerSecond || 'N/A')} tokens/sec`);
1762
1778
  }
1763
-
1764
- // Check if it's already installed by comparing with Ollama integration
1779
+
1780
+ console.log(`Runtime: ${chalk.white(runtimeLabel)}`);
1781
+ const runtimeCommands = getRuntimeCommandSet(model, selectedRuntime);
1782
+
1783
+ // Check installation only when using Ollama runtime.
1765
1784
  let isInstalled = false;
1766
- try {
1767
- isInstalled = await checkIfModelInstalled(model, analysis.ollamaInfo);
1768
- if (isInstalled) {
1769
- console.log(`Status: ${chalk.green('Already installed in Ollama')}`);
1770
- } else if (analysis.ollamaInfo && analysis.ollamaInfo.available) {
1771
- console.log(`Status: ${chalk.gray('Available for installation')}`);
1772
- } else {
1773
- console.log(`Status: ${chalk.yellow('Requires Ollama (not detected)')}`);
1785
+ if (selectedRuntime === 'ollama') {
1786
+ try {
1787
+ isInstalled = await checkIfModelInstalled(model, analysis.ollamaInfo);
1788
+ if (isInstalled) {
1789
+ console.log(`Status: ${chalk.green('Already installed in Ollama')}`);
1790
+ } else if (analysis.ollamaInfo && analysis.ollamaInfo.available) {
1791
+ console.log(`Status: ${chalk.gray('Available for installation')}`);
1792
+ } else {
1793
+ console.log(`Status: ${chalk.yellow('Requires Ollama (not detected)')}`);
1794
+ }
1795
+ } catch (installCheckError) {
1796
+ if (analysis.ollamaInfo && analysis.ollamaInfo.available) {
1797
+ console.log(`Status: ${chalk.gray('Available for installation')}`);
1798
+ } else {
1799
+ console.log(`Status: ${chalk.yellow('Requires Ollama (not detected)')}`);
1800
+ }
1774
1801
  }
1775
- } catch (installCheckError) {
1776
- // If checking installation status fails, show based on Ollama availability
1777
- if (analysis.ollamaInfo && analysis.ollamaInfo.available) {
1778
- console.log(`Status: ${chalk.gray('Available for installation')}`);
1779
- } else {
1780
- console.log(`Status: ${chalk.yellow('Requires Ollama (not detected)')}`);
1802
+
1803
+ const ollamaCommand = getOllamaInstallCommand(model);
1804
+ if (ollamaCommand) {
1805
+ const modelName = extractModelName(ollamaCommand);
1806
+ if (isInstalled) {
1807
+ console.log(`\nRun: ${chalk.cyan.bold(`ollama run ${modelName}`)}`);
1808
+ } else {
1809
+ console.log(`\nPull: ${chalk.cyan.bold(ollamaCommand)}`);
1810
+ }
1811
+ } else if (model.ollamaTag || model.ollamaId) {
1812
+ const tag = model.ollamaTag || model.ollamaId;
1813
+ if (isInstalled) {
1814
+ console.log(`\nRun: ${chalk.cyan.bold(`ollama run ${tag}`)}`);
1815
+ } else {
1816
+ console.log(`\nPull: ${chalk.cyan.bold(`ollama pull ${tag}`)}`);
1817
+ }
1818
+ }
1819
+ } else {
1820
+ console.log(`Status: ${chalk.gray(`${runtimeLabel} runtime selected`)}`);
1821
+ console.log(`\nRun: ${chalk.cyan.bold(runtimeCommands.run)}`);
1822
+ if (index === 0) {
1823
+ console.log(`Install runtime: ${chalk.cyan.bold(runtimeCommands.install)}`);
1824
+ console.log(`Fetch model: ${chalk.cyan.bold(runtimeCommands.pull)}`);
1781
1825
  }
1782
1826
  }
1783
1827
 
1784
- // Show pull/run command directly in each model block (Issue #3)
1785
- const ollamaCommand = getOllamaInstallCommand(model);
1786
- if (ollamaCommand) {
1787
- const modelName = extractModelName(ollamaCommand);
1788
- if (isInstalled) {
1789
- console.log(`\nCommand: ${chalk.cyan.bold(`ollama run ${modelName}`)}`);
1790
- } else {
1791
- console.log(`\nCommand: ${chalk.cyan.bold(ollamaCommand)}`);
1792
- }
1793
- } else if (model.ollamaTag || model.ollamaId) {
1794
- const tag = model.ollamaTag || model.ollamaId;
1795
- if (isInstalled) {
1796
- console.log(`\nCommand: ${chalk.cyan.bold(`ollama run ${tag}`)}`);
1797
- } else {
1798
- console.log(`\nCommand: ${chalk.cyan.bold(`ollama pull ${tag}`)}`);
1828
+ const speculativeInfo =
1829
+ model.speculativeDecoding ||
1830
+ speculativeEstimator.estimate({
1831
+ model,
1832
+ candidates: speculativeCandidatePool,
1833
+ hardware,
1834
+ runtime: selectedRuntime
1835
+ });
1836
+
1837
+ if (speculativeInfo && speculativeInfo.runtime === selectedRuntime) {
1838
+ if (speculativeInfo.enabled) {
1839
+ console.log(
1840
+ `SpecDec: ${chalk.green(`+${speculativeInfo.estimatedThroughputGainPct}%`)} ` +
1841
+ `(${chalk.gray(`draft: ${speculativeInfo.draftModel}`)})`
1842
+ );
1843
+ } else if (speculativeInfo.estimatedSpeedup) {
1844
+ const suggested = speculativeInfo.suggestedDraftModel ? ` with ${speculativeInfo.suggestedDraftModel}` : '';
1845
+ console.log(
1846
+ `SpecDec estimate: ${chalk.yellow(`+${speculativeInfo.estimatedThroughputGainPct}%`)}${chalk.gray(suggested)}`
1847
+ );
1799
1848
  }
1800
1849
  }
1801
1850
  }
@@ -1807,9 +1856,12 @@ async function displayModelRecommendations(analysis, hardware, useCase = 'genera
1807
1856
  return selectedModels;
1808
1857
  }
1809
1858
 
1810
- async function displayQuickStartCommands(analysis, recommendedModel = null, allRecommended = null) {
1859
+ async function displayQuickStartCommands(analysis, recommendedModel = null, allRecommended = null, runtime = 'ollama') {
1811
1860
  console.log(chalk.yellow.bold('\nQUICK START'));
1812
1861
  console.log(chalk.gray('─'.repeat(50)));
1862
+
1863
+ const selectedRuntime = normalizeRuntime(runtime);
1864
+ const runtimeLabel = getRuntimeDisplayName(selectedRuntime);
1813
1865
 
1814
1866
  // Use the first model from allRecommended if available, otherwise fallback to recommendedModel
1815
1867
  let bestModel = (allRecommended && allRecommended.length > 0) ? allRecommended[0] : recommendedModel;
@@ -1824,6 +1876,33 @@ async function displayQuickStartCommands(analysis, recommendedModel = null, allR
1824
1876
  }
1825
1877
  }
1826
1878
 
1879
+ if (selectedRuntime !== 'ollama') {
1880
+ if (!bestModel) {
1881
+ console.log(`1. Try expanding search: ${chalk.cyan('llm-checker check --include-cloud')}`);
1882
+ return;
1883
+ }
1884
+
1885
+ const runtimeCommands = getRuntimeCommandSet(bestModel, selectedRuntime);
1886
+ console.log(`1. Install ${runtimeLabel}:`);
1887
+ console.log(` ${chalk.cyan.bold(runtimeCommands.install)}`);
1888
+ console.log(`2. Fetch model weights:`);
1889
+ console.log(` ${chalk.cyan.bold(runtimeCommands.pull)}`);
1890
+ console.log(`3. Run model:`);
1891
+ console.log(` ${chalk.cyan.bold(runtimeCommands.run)}`);
1892
+
1893
+ const speculative = bestModel.speculativeDecoding;
1894
+ if (speculative && speculative.enabled) {
1895
+ console.log(`4. SpecDec suggestion (${chalk.green(`+${speculative.estimatedThroughputGainPct}%`)}):`);
1896
+ if (selectedRuntime === 'vllm') {
1897
+ console.log(` ${chalk.cyan.bold(`${runtimeCommands.run} --speculative-model '${speculative.draftModelRef || speculative.draftModel}'`)}`);
1898
+ } else if (selectedRuntime === 'mlx') {
1899
+ console.log(` ${chalk.gray(`Use draft model ${speculative.draftModelRef || speculative.draftModel} when enabling speculative decoding in MLX-LM`)}`);
1900
+ }
1901
+ }
1902
+
1903
+ return;
1904
+ }
1905
+
1827
1906
  if (analysis.ollamaInfo && !analysis.ollamaInfo.available) {
1828
1907
  console.log(`1. Install Ollama: ${chalk.underline('https://ollama.ai')}`);
1829
1908
  console.log(`2. Come back and run this command again`);
@@ -1992,6 +2071,7 @@ program
1992
2071
  .option('--min-size <size>', 'Minimum model size to consider (e.g., "7B" or "7GB")')
1993
2072
  .option('--include-cloud', 'Include cloud models in analysis')
1994
2073
  .option('--ollama-only', 'Only show models available in Ollama')
2074
+ .option('--runtime <runtime>', `Inference runtime (${SUPPORTED_RUNTIMES.join('|')})`, 'ollama')
1995
2075
  .option('--performance-test', 'Run performance benchmarks')
1996
2076
  .option('--show-ollama-analysis', 'Show detailed Ollama model analysis')
1997
2077
  .option('--no-verbose', 'Disable step-by-step progress display')
@@ -2008,6 +2088,16 @@ program
2008
2088
  }
2009
2089
 
2010
2090
  const hardware = await checker.getSystemInfo();
2091
+ let selectedRuntime = normalizeRuntime(options.runtime);
2092
+ if (!runtimeSupportedOnHardware(selectedRuntime, hardware)) {
2093
+ const runtimeLabel = getRuntimeDisplayName(selectedRuntime);
2094
+ console.log(
2095
+ chalk.yellow(
2096
+ `\nWarning: ${runtimeLabel} is not supported on this hardware. Falling back to Ollama.`
2097
+ )
2098
+ );
2099
+ selectedRuntime = 'ollama';
2100
+ }
2011
2101
 
2012
2102
  // Normalize and fix use-case typos
2013
2103
  const normalizeUseCase = (useCase = '') => {
@@ -2049,7 +2139,8 @@ program
2049
2139
  performanceTest: options.performanceTest,
2050
2140
  limit: parseInt(options.limit) || 10,
2051
2141
  maxSize: maxSize,
2052
- minSize: minSize
2142
+ minSize: minSize,
2143
+ runtime: selectedRuntime
2053
2144
  });
2054
2145
 
2055
2146
  if (!verboseEnabled) {
@@ -2058,8 +2149,14 @@ program
2058
2149
 
2059
2150
  // Simplified output - show only essential information
2060
2151
  displaySimplifiedSystemInfo(hardware);
2061
- const recommendedModels = await displayModelRecommendations(analysis, hardware, normalizedUseCase, parseInt(options.limit) || 1);
2062
- await displayQuickStartCommands(analysis, recommendedModels[0], recommendedModels);
2152
+ const recommendedModels = await displayModelRecommendations(
2153
+ analysis,
2154
+ hardware,
2155
+ normalizedUseCase,
2156
+ parseInt(options.limit) || 1,
2157
+ selectedRuntime
2158
+ );
2159
+ await displayQuickStartCommands(analysis, recommendedModels[0], recommendedModels, selectedRuntime);
2063
2160
 
2064
2161
  } catch (error) {
2065
2162
  console.error(chalk.red('\nError:'), error.message);