free-coding-models 0.1.3 → 0.1.5

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -5,7 +5,17 @@
5
5
  <img src="https://img.shields.io/badge/models-44-76b900?logo=nvidia" alt="models count">
6
6
  </p>
7
7
 
8
- <h1 align="center">⚡ Free Coding Models</h1>
8
+ <h1 align="center">free-coding-models</h1>
9
+
10
+ <p align="center">
11
+
12
+ ```
13
+ 1. Create a free API key on NVIDIA → https://build.nvidia.com
14
+ 2. npm i -g free-coding-models
15
+ 3. free-coding-models
16
+ ```
17
+
18
+ </p>
9
19
 
10
20
  <p align="center">
11
21
  <strong>Find the fastest coding LLM models in seconds</strong><br>
@@ -288,41 +298,48 @@ Or run without flags and choose **OpenClaw** from the startup menu.
288
298
 
289
299
  ```json
290
300
  {
291
- "providers": {
292
- "nvidia": {
293
- "baseUrl": "https://integrate.api.nvidia.com/v1",
294
- "apiKey": "nvapi-xxxx-your-key",
295
- "api": "openai-completions",
296
- "models": [
297
- {
298
- "id": "deepseek-ai/deepseek-v3.2",
299
- "name": "DeepSeek V3.2",
300
- "contextWindow": 128000,
301
- "maxTokens": 8192
302
- }
303
- ]
301
+ "models": {
302
+ "providers": {
303
+ "nvidia": {
304
+ "baseUrl": "https://integrate.api.nvidia.com/v1",
305
+ "api": "openai-completions"
306
+ }
304
307
  }
305
308
  },
309
+ "env": {
310
+ "NVIDIA_API_KEY": "nvapi-xxxx-your-key"
311
+ },
306
312
  "agents": {
307
313
  "defaults": {
308
314
  "model": {
309
315
  "primary": "nvidia/deepseek-ai/deepseek-v3.2"
316
+ },
317
+ "models": {
318
+ "nvidia/deepseek-ai/deepseek-v3.2": {}
310
319
  }
311
320
  }
312
321
  }
313
322
  }
314
323
  ```
315
324
 
325
+ > ⚠️ **Note:** `providers` must be nested under `models.providers` — not at the config root. A root-level `providers` key is ignored by OpenClaw.
326
+
327
+ > ⚠️ **Note:** The model must also be listed in `agents.defaults.models` (the allowlist). Without this entry, OpenClaw rejects the model with *"not allowed"* even if it is set as primary.
328
+
316
329
  ### After updating OpenClaw config
317
330
 
318
- Restart OpenClaw or run the CLI command to apply the new model:
331
+ OpenClaw's gateway **auto-reloads** config file changes (depending on `gateway.reload.mode`). To apply manually:
319
332
 
320
333
  ```bash
321
- openclaw restart
322
- # or
334
+ # Apply via CLI
323
335
  openclaw models set nvidia/deepseek-ai/deepseek-v3.2
336
+
337
+ # Or re-run the interactive setup wizard
338
+ openclaw configure
324
339
  ```
325
340
 
341
+ > ⚠️ **Note:** `openclaw restart` does **not** exist as a CLI command. Kill and relaunch the process manually if you need a full restart.
342
+
326
343
  > 💡 **Why use remote NIM models with OpenClaw?** NVIDIA NIM serves models via a fast API — no local GPU required, no VRAM limits, free credits for developers. You get frontier-class coding models (DeepSeek V3, Kimi K2, Qwen3 Coder) without downloading anything.
327
344
 
328
345
  ---
@@ -391,6 +408,21 @@ npm install
391
408
  npm start -- YOUR_API_KEY
392
409
  ```
393
410
 
411
+ ### Releasing a new version
412
+
413
+ 1. Make your changes and commit them with a descriptive message
414
+ 2. Update `CHANGELOG.md` with the new version entry
415
+ 3. Bump `"version"` in `package.json` (e.g. `0.1.3` → `0.1.4`)
416
+ 4. Commit with **just the version number** as the message:
417
+
418
+ ```bash
419
+ git add .
420
+ git commit -m "0.1.4"
421
+ git push
422
+ ```
423
+
424
+ The GitHub Actions workflow automatically publishes to npm on every push to `main`.
425
+
394
426
  ---
395
427
 
396
428
  ## 📄 License
@@ -755,48 +755,59 @@ async function startOpenClaw(model, apiKey) {
755
755
  console.log(chalk.dim(` 💾 Backup: ${backupPath}`))
756
756
  }
757
757
 
758
- // 📖 Ensure providers section exists with nvidia NIM block
759
- // 📖 Only injects if not already present - we don't overwrite existing provider config
760
- if (!config.providers) config.providers = {}
761
- if (!config.providers.nvidia) {
762
- config.providers.nvidia = {
758
+ // 📖 Ensure models.providers section exists with nvidia NIM block.
759
+ // 📖 Per OpenClaw docs (docs.openclaw.ai/providers/nvidia), providers MUST be nested under
760
+ // 📖 "models.providers", NOT at the config root. Root-level "providers" is ignored by OpenClaw.
761
+ // 📖 API key is NOT stored in the provider block — it's read from env var NVIDIA_API_KEY.
762
+ // 📖 If needed, it can be stored under the root "env" key: { env: { NVIDIA_API_KEY: "nvapi-..." } }
763
+ if (!config.models) config.models = {}
764
+ if (!config.models.providers) config.models.providers = {}
765
+ if (!config.models.providers.nvidia) {
766
+ config.models.providers.nvidia = {
763
767
  baseUrl: 'https://integrate.api.nvidia.com/v1',
764
- // 📖 Store key reference as env var name — avoid hardcoding key in config file
765
- apiKey: apiKey || process.env.NVIDIA_API_KEY || 'YOUR_NVIDIA_API_KEY',
766
768
  api: 'openai-completions',
767
- models: [],
768
769
  }
769
- console.log(chalk.dim(' ➕ Added nvidia provider block to OpenClaw config'))
770
+ console.log(chalk.dim(' ➕ Added nvidia provider block to OpenClaw config (models.providers.nvidia)'))
770
771
  }
771
772
 
772
- // 📖 Ensure the chosen model is in the nvidia models array
773
- const modelsArr = config.providers.nvidia.models
774
- const modelEntry = {
775
- id: model.modelId,
776
- name: model.label,
777
- contextWindow: 128000,
778
- maxTokens: 8192,
779
- }
780
- const alreadyListed = modelsArr.some(m => m.id === model.modelId)
781
- if (!alreadyListed) {
782
- modelsArr.push(modelEntry)
783
- console.log(chalk.dim(` ➕ Added ${model.label} to nvidia models list`))
773
+ // 📖 Store API key in the root "env" section so OpenClaw can read it as NVIDIA_API_KEY env var.
774
+ // 📖 Only writes if not already set to avoid overwriting an existing key.
775
+ const resolvedKey = apiKey || process.env.NVIDIA_API_KEY
776
+ if (resolvedKey) {
777
+ if (!config.env) config.env = {}
778
+ if (!config.env.NVIDIA_API_KEY) {
779
+ config.env.NVIDIA_API_KEY = resolvedKey
780
+ console.log(chalk.dim(' 🔑 Stored NVIDIA_API_KEY in config env section'))
781
+ }
784
782
  }
785
783
 
786
- // 📖 Set as the default primary model for all agents
784
+ // 📖 Set as the default primary model for all agents.
785
+ // 📖 Format: "provider/model-id" — e.g. "nvidia/deepseek-ai/deepseek-v3.2"
786
+ // 📖 Set as the default primary model for all agents.
787
+ // 📖 Format: "provider/model-id" — e.g. "nvidia/deepseek-ai/deepseek-v3.2"
787
788
  if (!config.agents) config.agents = {}
788
789
  if (!config.agents.defaults) config.agents.defaults = {}
789
790
  if (!config.agents.defaults.model) config.agents.defaults.model = {}
790
791
  config.agents.defaults.model.primary = `nvidia/${model.modelId}`
791
792
 
793
+ // 📖 REQUIRED: OpenClaw requires the model to be explicitly listed in agents.defaults.models
794
+ // 📖 (the allowlist). Without this entry, OpenClaw rejects the model with "not allowed".
795
+ // 📖 See: https://docs.openclaw.ai/gateway/configuration-reference
796
+ if (!config.agents.defaults.models) config.agents.defaults.models = {}
797
+ config.agents.defaults.models[`nvidia/${model.modelId}`] = {}
798
+
792
799
  saveOpenClawConfig(config)
793
800
 
794
801
  console.log(chalk.rgb(255, 140, 0)(` ✓ Default model set to: nvidia/${model.modelId}`))
795
802
  console.log()
796
803
  console.log(chalk.dim(' 📄 Config updated: ' + OPENCLAW_CONFIG))
797
804
  console.log()
798
- console.log(chalk.dim(' 💡 Restart OpenClaw for changes to take effect:'))
799
- console.log(chalk.dim(' openclaw restart') + chalk.dim(' or ') + chalk.dim('openclaw models set nvidia/' + model.modelId))
805
+ // 📖 "openclaw restart" does NOT exist. The gateway auto-reloads on config file changes.
806
+ // 📖 To apply manually: use "openclaw models set" or "openclaw configure"
807
+ // 📖 See: https://docs.openclaw.ai/gateway/configuration
808
+ console.log(chalk.dim(' 💡 OpenClaw will reload config automatically (gateway.reload.mode).'))
809
+ console.log(chalk.dim(' To apply manually: openclaw models set nvidia/' + model.modelId))
810
+ console.log(chalk.dim(' Or run the setup wizard: openclaw configure'))
800
811
  console.log()
801
812
  }
802
813
 
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "free-coding-models",
3
- "version": "0.1.3",
3
+ "version": "0.1.5",
4
4
  "description": "Find the fastest coding LLM models in seconds — ping free models from multiple providers, pick the best one for OpenCode, Cursor, or any AI coding assistant.",
5
5
  "keywords": [
6
6
  "nvidia",