free-coding-models 0.1.3 → 0.1.4

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -5,7 +5,17 @@
5
5
  <img src="https://img.shields.io/badge/models-44-76b900?logo=nvidia" alt="models count">
6
6
  </p>
7
7
 
8
- <h1 align="center">⚡ Free Coding Models</h1>
8
+ <h1 align="center">free-coding-models</h1>
9
+
10
+ <p align="center">
11
+
12
+ ```
13
+ 1. Create a free API key on NVIDIA → https://build.nvidia.com
14
+ 2. npm i -g free-coding-models
15
+ 3. free-coding-models
16
+ ```
17
+
18
+ </p>
9
19
 
10
20
  <p align="center">
11
21
  <strong>Find the fastest coding LLM models in seconds</strong><br>
@@ -288,21 +298,17 @@ Or run without flags and choose **OpenClaw** from the startup menu.
288
298
 
289
299
  ```json
290
300
  {
291
- "providers": {
292
- "nvidia": {
293
- "baseUrl": "https://integrate.api.nvidia.com/v1",
294
- "apiKey": "nvapi-xxxx-your-key",
295
- "api": "openai-completions",
296
- "models": [
297
- {
298
- "id": "deepseek-ai/deepseek-v3.2",
299
- "name": "DeepSeek V3.2",
300
- "contextWindow": 128000,
301
- "maxTokens": 8192
302
- }
303
- ]
301
+ "models": {
302
+ "providers": {
303
+ "nvidia": {
304
+ "baseUrl": "https://integrate.api.nvidia.com/v1",
305
+ "api": "openai-completions"
306
+ }
304
307
  }
305
308
  },
309
+ "env": {
310
+ "NVIDIA_API_KEY": "nvapi-xxxx-your-key"
311
+ },
306
312
  "agents": {
307
313
  "defaults": {
308
314
  "model": {
@@ -313,16 +319,22 @@ Or run without flags and choose **OpenClaw** from the startup menu.
313
319
  }
314
320
  ```
315
321
 
322
+ > ⚠️ **Note:** `providers` must be nested under `models.providers` — not at the config root. A root-level `providers` key is ignored by OpenClaw.
323
+
316
324
  ### After updating OpenClaw config
317
325
 
318
- Restart OpenClaw or run the CLI command to apply the new model:
326
+ OpenClaw's gateway **auto-reloads** config file changes (depending on `gateway.reload.mode`). To apply manually:
319
327
 
320
328
  ```bash
321
- openclaw restart
322
- # or
329
+ # Apply via CLI
323
330
  openclaw models set nvidia/deepseek-ai/deepseek-v3.2
331
+
332
+ # Or re-run the interactive setup wizard
333
+ openclaw configure
324
334
  ```
325
335
 
336
+ > ⚠️ **Note:** `openclaw restart` does **not** exist as a CLI command. Kill and relaunch the process manually if you need a full restart.
337
+
326
338
  > 💡 **Why use remote NIM models with OpenClaw?** NVIDIA NIM serves models via a fast API — no local GPU required, no VRAM limits, free credits for developers. You get frontier-class coding models (DeepSeek V3, Kimi K2, Qwen3 Coder) without downloading anything.
327
339
 
328
340
  ---
@@ -391,6 +403,21 @@ npm install
391
403
  npm start -- YOUR_API_KEY
392
404
  ```
393
405
 
406
+ ### Releasing a new version
407
+
408
+ 1. Make your changes and commit them with a descriptive message
409
+ 2. Update `CHANGELOG.md` with the new version entry
410
+ 3. Bump `"version"` in `package.json` (e.g. `0.1.3` → `0.1.4`)
411
+ 4. Commit with **just the version number** as the message:
412
+
413
+ ```bash
414
+ git add .
415
+ git commit -m "0.1.4"
416
+ git push
417
+ ```
418
+
419
+ The GitHub Actions workflow automatically publishes to npm on every push to `main`.
420
+
394
421
  ---
395
422
 
396
423
  ## 📄 License
@@ -755,35 +755,34 @@ async function startOpenClaw(model, apiKey) {
755
755
  console.log(chalk.dim(` 💾 Backup: ${backupPath}`))
756
756
  }
757
757
 
758
- // 📖 Ensure providers section exists with nvidia NIM block
759
- // 📖 Only injects if not already present - we don't overwrite existing provider config
760
- if (!config.providers) config.providers = {}
761
- if (!config.providers.nvidia) {
762
- config.providers.nvidia = {
758
+ // 📖 Ensure models.providers section exists with nvidia NIM block.
759
+ // 📖 Per OpenClaw docs (docs.openclaw.ai/providers/nvidia), providers MUST be nested under
760
+ // 📖 "models.providers", NOT at the config root. Root-level "providers" is ignored by OpenClaw.
761
+ // 📖 API key is NOT stored in the provider block — it's read from env var NVIDIA_API_KEY.
762
+ // 📖 If needed, it can be stored under the root "env" key: { env: { NVIDIA_API_KEY: "nvapi-..." } }
763
+ if (!config.models) config.models = {}
764
+ if (!config.models.providers) config.models.providers = {}
765
+ if (!config.models.providers.nvidia) {
766
+ config.models.providers.nvidia = {
763
767
  baseUrl: 'https://integrate.api.nvidia.com/v1',
764
- // 📖 Store key reference as env var name — avoid hardcoding key in config file
765
- apiKey: apiKey || process.env.NVIDIA_API_KEY || 'YOUR_NVIDIA_API_KEY',
766
768
  api: 'openai-completions',
767
- models: [],
768
769
  }
769
- console.log(chalk.dim(' ➕ Added nvidia provider block to OpenClaw config'))
770
+ console.log(chalk.dim(' ➕ Added nvidia provider block to OpenClaw config (models.providers.nvidia)'))
770
771
  }
771
772
 
772
- // 📖 Ensure the chosen model is in the nvidia models array
773
- const modelsArr = config.providers.nvidia.models
774
- const modelEntry = {
775
- id: model.modelId,
776
- name: model.label,
777
- contextWindow: 128000,
778
- maxTokens: 8192,
779
- }
780
- const alreadyListed = modelsArr.some(m => m.id === model.modelId)
781
- if (!alreadyListed) {
782
- modelsArr.push(modelEntry)
783
- console.log(chalk.dim(` ➕ Added ${model.label} to nvidia models list`))
773
+ // 📖 Store API key in the root "env" section so OpenClaw can read it as NVIDIA_API_KEY env var.
774
+ // 📖 Only writes if not already set to avoid overwriting an existing key.
775
+ const resolvedKey = apiKey || process.env.NVIDIA_API_KEY
776
+ if (resolvedKey) {
777
+ if (!config.env) config.env = {}
778
+ if (!config.env.NVIDIA_API_KEY) {
779
+ config.env.NVIDIA_API_KEY = resolvedKey
780
+ console.log(chalk.dim(' 🔑 Stored NVIDIA_API_KEY in config env section'))
781
+ }
784
782
  }
785
783
 
786
- // 📖 Set as the default primary model for all agents
784
+ // 📖 Set as the default primary model for all agents.
785
+ // 📖 Format: "provider/model-id" — e.g. "nvidia/deepseek-ai/deepseek-v3.2"
787
786
  if (!config.agents) config.agents = {}
788
787
  if (!config.agents.defaults) config.agents.defaults = {}
789
788
  if (!config.agents.defaults.model) config.agents.defaults.model = {}
@@ -795,8 +794,12 @@ async function startOpenClaw(model, apiKey) {
795
794
  console.log()
796
795
  console.log(chalk.dim(' 📄 Config updated: ' + OPENCLAW_CONFIG))
797
796
  console.log()
798
- console.log(chalk.dim(' 💡 Restart OpenClaw for changes to take effect:'))
799
- console.log(chalk.dim(' openclaw restart') + chalk.dim(' or ') + chalk.dim('openclaw models set nvidia/' + model.modelId))
797
+ // 📖 "openclaw restart" does NOT exist. The gateway auto-reloads on config file changes.
798
+ // 📖 To apply manually: use "openclaw models set" or "openclaw configure"
799
+ // 📖 See: https://docs.openclaw.ai/gateway/configuration
800
+ console.log(chalk.dim(' 💡 OpenClaw will reload config automatically (gateway.reload.mode).'))
801
+ console.log(chalk.dim(' To apply manually: openclaw models set nvidia/' + model.modelId))
802
+ console.log(chalk.dim(' Or run the setup wizard: openclaw configure'))
800
803
  console.log()
801
804
  }
802
805
 
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "free-coding-models",
3
- "version": "0.1.3",
3
+ "version": "0.1.4",
4
4
  "description": "Find the fastest coding LLM models in seconds — ping free models from multiple providers, pick the best one for OpenCode, Cursor, or any AI coding assistant.",
5
5
  "keywords": [
6
6
  "nvidia",