@blockrun/llm 1.8.0 → 1.10.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -103,7 +103,7 @@ const client = new LLMClient();
103
103
  // Auto-routes to cheapest capable model
104
104
  const result = await client.smartChat('What is 2+2?');
105
105
  console.log(result.response); // '4'
106
- console.log(result.model); // 'nvidia/kimi-k2.5' (cheap, fast)
106
+ console.log(result.model); // 'moonshot/kimi-k2.5' (cheap, fast)
107
107
  console.log(`Saved ${(result.routing.savings * 100).toFixed(0)}%`); // 'Saved 78%'
108
108
 
109
109
  // Complex reasoning task -> routes to reasoning model
@@ -144,7 +144,7 @@ The classifier runs in <1ms, 100% locally, and routes to one of four tiers:
144
144
 
145
145
  | Tier | Example Tasks | Auto Profile Model |
146
146
  |------|---------------|-------------------|
147
- | SIMPLE | "What is 2+2?", definitions | nvidia/kimi-k2.5 |
147
+ | SIMPLE | "What is 2+2?", definitions | moonshot/kimi-k2.5 |
148
148
  | MEDIUM | Code snippets, explanations | xai/grok-code-fast-1 |
149
149
  | COMPLEX | Architecture, long documents | google/gemini-3.1-pro |
150
150
  | REASONING | Proofs, multi-step reasoning | xai/grok-4-1-fast-reasoning |
@@ -236,11 +236,23 @@ The classifier runs in <1ms, 100% locally, and routes to one of four tiers:
236
236
  | `minimax/minimax-m2.7` | $0.30/M | $1.20/M |
237
237
  | `minimax/minimax-m2.5` | $0.30/M | $1.20/M |
238
238
 
239
- ### NVIDIA (Free & Hosted)
239
+ ### NVIDIA (Free) + Moonshot
240
+
241
+ Free tier refreshed 2026-04-21: retired the Nemotron family, `mistral-large-3-675b`,
242
+ `devstral-2-123b`, and paid `nvidia/kimi-k2.5`. The backend auto-redirects the
243
+ old IDs; the recommended replacements are listed below.
244
+
240
245
  | Model | Input Price | Output Price | Notes |
241
246
  |-------|-------------|--------------|-------|
242
- | `nvidia/gpt-oss-120b` | **FREE** | **FREE** | OpenAI open-weight 120B (Apache 2.0) |
243
- | `nvidia/kimi-k2.5` | $0.60/M | $3.00/M | Moonshot 1T MoE with vision |
247
+ | `nvidia/qwen3-next-80b-a3b-thinking` | **FREE** | **FREE** | Reasoning flagship 116 tok/s, thinking mode |
248
+ | `nvidia/mistral-small-4-119b` | **FREE** | **FREE** | Fastest free chat 114 tok/s |
249
+ | `nvidia/glm-4.7` | **FREE** | **FREE** | GLM-4.7 with thinking — 237 tok/s |
250
+ | `nvidia/llama-4-maverick` | **FREE** | **FREE** | Llama 4 Maverick MoE |
251
+ | `nvidia/qwen3-coder-480b` | **FREE** | **FREE** | Coding-optimised 480B MoE |
252
+ | `nvidia/deepseek-v3.2` | **FREE** | **FREE** | DeepSeek V3.2 hosted |
253
+ | `nvidia/gpt-oss-120b` | **FREE** | **FREE** | OpenAI open-weight 120B — 123 tok/s |
254
+ | `nvidia/gpt-oss-20b` | **FREE** | **FREE** | OpenAI open-weight 20B — 155 tok/s |
255
+ | `moonshot/kimi-k2.5` | $0.60/M | $3.00/M | Direct from Moonshot — replaces `nvidia/kimi-k2.5` |
244
256
 
245
257
  ### E2E Verified Models
246
258
 
@@ -262,6 +274,7 @@ All models below have been tested end-to-end via the TypeScript SDK (Feb 2026):
262
274
  |-------|-------|
263
275
  | `openai/dall-e-3` | $0.04-0.08/image |
264
276
  | `openai/gpt-image-1` | $0.02-0.04/image |
277
+ | `openai/gpt-image-2` | $0.06-0.12/image (reasoning-driven, multilingual text rendering, character consistency) |
265
278
  | `google/nano-banana` | $0.05/image |
266
279
  | `google/nano-banana-pro` | $0.10-0.15/image |
267
280
  | `black-forest/flux-1.1-pro` | $0.04/image |
@@ -269,10 +282,15 @@ All models below have been tested end-to-end via the TypeScript SDK (Feb 2026):
269
282
  | `xai/grok-imagine-image-pro` | $0.07/image |
270
283
  | `zai/cogview-4` | $0.015/image |
271
284
 
285
+ Image editing (`client.edit`): `openai/gpt-image-1` and `openai/gpt-image-2` both support the `/v1/images/image2image` endpoint.
286
+
272
287
  ### Video Generation
273
288
  | Model | Price |
274
289
  |-------|-------|
275
290
  | `xai/grok-imagine-video` | $0.05/sec (8s default → $0.42/clip) |
291
+ | `bytedance/seedance-1.5-pro` | $0.03/sec (5s default, up to 10s, 720p) |
292
+ | `bytedance/seedance-2.0-fast` | $0.15/sec (~60-80s gen, sweet-spot price/quality) |
293
+ | `bytedance/seedance-2.0` | $0.30/sec (720p Pro) |
276
294
 
277
295
  ```ts
278
296
  import { VideoClient } from '@blockrun/llm';
package/dist/index.cjs CHANGED
@@ -1685,7 +1685,7 @@ var import_accounts3 = require("viem/accounts");
1685
1685
  var DEFAULT_API_URL2 = "https://blockrun.ai/api";
1686
1686
  var DEFAULT_MODEL = "google/nano-banana";
1687
1687
  var DEFAULT_SIZE = "1024x1024";
1688
- var DEFAULT_TIMEOUT2 = 12e4;
1688
+ var DEFAULT_TIMEOUT2 = 2e5;
1689
1689
  var ImageClient = class {
1690
1690
  account;
1691
1691
  privateKey;
package/dist/index.js CHANGED
@@ -1596,7 +1596,7 @@ import { privateKeyToAccount as privateKeyToAccount2 } from "viem/accounts";
1596
1596
  var DEFAULT_API_URL2 = "https://blockrun.ai/api";
1597
1597
  var DEFAULT_MODEL = "google/nano-banana";
1598
1598
  var DEFAULT_SIZE = "1024x1024";
1599
- var DEFAULT_TIMEOUT2 = 12e4;
1599
+ var DEFAULT_TIMEOUT2 = 2e5;
1600
1600
  var ImageClient = class {
1601
1601
  account;
1602
1602
  privateKey;
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@blockrun/llm",
3
- "version": "1.8.0",
3
+ "version": "1.10.1",
4
4
  "type": "module",
5
5
  "description": "BlockRun SDK - Pay-per-request AI (LLM, Image, Video, Music) via x402 on Base and Solana",
6
6
  "main": "dist/index.cjs",