@blockrun/clawrouter 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.js ADDED
@@ -0,0 +1,888 @@
1
+ // src/auth.ts
2
+ import { writeFile, readFile, mkdir } from "fs/promises";
3
+ import { join } from "path";
4
+ import { homedir } from "os";
5
+ import { generatePrivateKey, privateKeyToAccount } from "viem/accounts";
6
+ var WALLET_DIR = join(homedir(), ".openclaw", "blockrun");
7
+ var WALLET_FILE = join(WALLET_DIR, "wallet.key");
8
+ async function loadSavedWallet() {
9
+ try {
10
+ const key = (await readFile(WALLET_FILE, "utf-8")).trim();
11
+ if (key.startsWith("0x") && key.length === 66) return key;
12
+ } catch {
13
+ }
14
+ return void 0;
15
+ }
16
+ async function generateAndSaveWallet() {
17
+ const key = generatePrivateKey();
18
+ const account = privateKeyToAccount(key);
19
+ await mkdir(WALLET_DIR, { recursive: true });
20
+ await writeFile(WALLET_FILE, key + "\n", { mode: 384 });
21
+ return { key, address: account.address };
22
+ }
23
+ async function resolveOrGenerateWalletKey() {
24
+ const saved = await loadSavedWallet();
25
+ if (saved) {
26
+ const account = privateKeyToAccount(saved);
27
+ return { key: saved, address: account.address, source: "saved" };
28
+ }
29
+ const envKey = process.env.BLOCKRUN_WALLET_KEY;
30
+ if (typeof envKey === "string" && envKey.startsWith("0x") && envKey.length === 66) {
31
+ const account = privateKeyToAccount(envKey);
32
+ return { key: envKey, address: account.address, source: "env" };
33
+ }
34
+ const { key, address } = await generateAndSaveWallet();
35
+ return { key, address, source: "generated" };
36
+ }
37
+ var walletKeyAuth = {
38
+ id: "wallet-key",
39
+ label: "Wallet Private Key",
40
+ hint: "Enter your EVM wallet private key (0x...) for x402 payments to BlockRun",
41
+ kind: "api_key",
42
+ run: async (ctx) => {
43
+ const key = await ctx.prompter.text({
44
+ message: "Enter your wallet private key (0x...)",
45
+ validate: (value) => {
46
+ const trimmed = value.trim();
47
+ if (!trimmed.startsWith("0x")) return "Key must start with 0x";
48
+ if (trimmed.length !== 66) return "Key must be 66 characters (0x + 64 hex)";
49
+ if (!/^0x[0-9a-fA-F]{64}$/.test(trimmed)) return "Key must be valid hex";
50
+ return void 0;
51
+ }
52
+ });
53
+ if (!key || typeof key !== "string") {
54
+ throw new Error("Wallet key is required");
55
+ }
56
+ return {
57
+ profiles: [
58
+ {
59
+ profileId: "default",
60
+ credential: { apiKey: key.trim() }
61
+ }
62
+ ],
63
+ notes: [
64
+ "Wallet key stored securely in OpenClaw credentials.",
65
+ "Your wallet signs x402 USDC payments on Base for each LLM call.",
66
+ "Fund your wallet with USDC on Base to start using BlockRun models."
67
+ ]
68
+ };
69
+ }
70
+ };
71
+ var envKeyAuth = {
72
+ id: "env-key",
73
+ label: "Environment Variable",
74
+ hint: "Use BLOCKRUN_WALLET_KEY environment variable",
75
+ kind: "api_key",
76
+ run: async () => {
77
+ const key = process.env.BLOCKRUN_WALLET_KEY;
78
+ if (!key) {
79
+ throw new Error(
80
+ "BLOCKRUN_WALLET_KEY environment variable is not set. Set it to your EVM wallet private key (0x...)."
81
+ );
82
+ }
83
+ return {
84
+ profiles: [
85
+ {
86
+ profileId: "default",
87
+ credential: { apiKey: key.trim() }
88
+ }
89
+ ],
90
+ notes: ["Using wallet key from BLOCKRUN_WALLET_KEY environment variable."]
91
+ };
92
+ }
93
+ };
94
+
95
+ // src/models.ts
96
+ var BLOCKRUN_MODELS = [
97
+ // Smart routing meta-model — proxy replaces with actual model
98
+ { id: "blockrun/auto", name: "BlockRun Smart Router", inputPrice: 0, outputPrice: 0, contextWindow: 105e4, maxOutput: 128e3 },
99
+ // OpenAI GPT-5 Family
100
+ { id: "openai/gpt-5.2", name: "GPT-5.2", inputPrice: 1.75, outputPrice: 14, contextWindow: 4e5, maxOutput: 128e3, reasoning: true, vision: true },
101
+ { id: "openai/gpt-5-mini", name: "GPT-5 Mini", inputPrice: 0.25, outputPrice: 2, contextWindow: 2e5, maxOutput: 65536 },
102
+ { id: "openai/gpt-5-nano", name: "GPT-5 Nano", inputPrice: 0.05, outputPrice: 0.4, contextWindow: 128e3, maxOutput: 32768 },
103
+ { id: "openai/gpt-5.2-pro", name: "GPT-5.2 Pro", inputPrice: 21, outputPrice: 168, contextWindow: 4e5, maxOutput: 128e3, reasoning: true },
104
+ // OpenAI GPT-4 Family
105
+ { id: "openai/gpt-4.1", name: "GPT-4.1", inputPrice: 2, outputPrice: 8, contextWindow: 128e3, maxOutput: 16384, vision: true },
106
+ { id: "openai/gpt-4.1-mini", name: "GPT-4.1 Mini", inputPrice: 0.4, outputPrice: 1.6, contextWindow: 128e3, maxOutput: 16384 },
107
+ { id: "openai/gpt-4.1-nano", name: "GPT-4.1 Nano", inputPrice: 0.1, outputPrice: 0.4, contextWindow: 128e3, maxOutput: 16384 },
108
+ { id: "openai/gpt-4o", name: "GPT-4o", inputPrice: 2.5, outputPrice: 10, contextWindow: 128e3, maxOutput: 16384, vision: true },
109
+ { id: "openai/gpt-4o-mini", name: "GPT-4o Mini", inputPrice: 0.15, outputPrice: 0.6, contextWindow: 128e3, maxOutput: 16384 },
110
+ // OpenAI O-series (Reasoning)
111
+ { id: "openai/o1", name: "o1", inputPrice: 15, outputPrice: 60, contextWindow: 2e5, maxOutput: 1e5, reasoning: true },
112
+ { id: "openai/o1-mini", name: "o1-mini", inputPrice: 1.1, outputPrice: 4.4, contextWindow: 128e3, maxOutput: 65536, reasoning: true },
113
+ { id: "openai/o3", name: "o3", inputPrice: 2, outputPrice: 8, contextWindow: 2e5, maxOutput: 1e5, reasoning: true },
114
+ { id: "openai/o3-mini", name: "o3-mini", inputPrice: 1.1, outputPrice: 4.4, contextWindow: 128e3, maxOutput: 65536, reasoning: true },
115
+ { id: "openai/o4-mini", name: "o4-mini", inputPrice: 1.1, outputPrice: 4.4, contextWindow: 128e3, maxOutput: 65536, reasoning: true },
116
+ // Anthropic
117
+ { id: "anthropic/claude-haiku-4.5", name: "Claude Haiku 4.5", inputPrice: 1, outputPrice: 5, contextWindow: 2e5, maxOutput: 8192 },
118
+ { id: "anthropic/claude-sonnet-4", name: "Claude Sonnet 4", inputPrice: 3, outputPrice: 15, contextWindow: 2e5, maxOutput: 64e3, reasoning: true },
119
+ { id: "anthropic/claude-opus-4", name: "Claude Opus 4", inputPrice: 15, outputPrice: 75, contextWindow: 2e5, maxOutput: 32e3, reasoning: true },
120
+ { id: "anthropic/claude-opus-4.5", name: "Claude Opus 4.5", inputPrice: 15, outputPrice: 75, contextWindow: 2e5, maxOutput: 32e3, reasoning: true },
121
+ // Google
122
+ { id: "google/gemini-3-pro-preview", name: "Gemini 3 Pro Preview", inputPrice: 2, outputPrice: 12, contextWindow: 105e4, maxOutput: 65536, reasoning: true, vision: true },
123
+ { id: "google/gemini-2.5-pro", name: "Gemini 2.5 Pro", inputPrice: 1.25, outputPrice: 10, contextWindow: 105e4, maxOutput: 65536, reasoning: true, vision: true },
124
+ { id: "google/gemini-2.5-flash", name: "Gemini 2.5 Flash", inputPrice: 0.15, outputPrice: 0.6, contextWindow: 1e6, maxOutput: 65536 },
125
+ // DeepSeek
126
+ { id: "deepseek/deepseek-chat", name: "DeepSeek V3.2 Chat", inputPrice: 0.28, outputPrice: 0.42, contextWindow: 128e3, maxOutput: 8192 },
127
+ { id: "deepseek/deepseek-reasoner", name: "DeepSeek V3.2 Reasoner", inputPrice: 0.28, outputPrice: 0.42, contextWindow: 128e3, maxOutput: 8192, reasoning: true },
128
+ // xAI / Grok
129
+ { id: "xai/grok-3", name: "Grok 3", inputPrice: 3, outputPrice: 15, contextWindow: 131072, maxOutput: 16384, reasoning: true },
130
+ { id: "xai/grok-3-fast", name: "Grok 3 Fast", inputPrice: 5, outputPrice: 25, contextWindow: 131072, maxOutput: 16384, reasoning: true },
131
+ { id: "xai/grok-3-mini", name: "Grok 3 Mini", inputPrice: 0.3, outputPrice: 0.5, contextWindow: 131072, maxOutput: 16384 }
132
+ ];
133
+ function toOpenClawModel(m) {
134
+ return {
135
+ id: m.id,
136
+ name: m.name,
137
+ api: "openai-completions",
138
+ reasoning: m.reasoning ?? false,
139
+ input: m.vision ? ["text", "image"] : ["text"],
140
+ cost: {
141
+ input: m.inputPrice,
142
+ output: m.outputPrice,
143
+ cacheRead: 0,
144
+ cacheWrite: 0
145
+ },
146
+ contextWindow: m.contextWindow,
147
+ maxTokens: m.maxOutput
148
+ };
149
+ }
150
+ var OPENCLAW_MODELS = BLOCKRUN_MODELS.map(toOpenClawModel);
151
+ function buildProviderModels(baseUrl) {
152
+ return {
153
+ baseUrl: `${baseUrl}/v1`,
154
+ api: "openai-completions",
155
+ models: OPENCLAW_MODELS
156
+ };
157
+ }
158
+
159
+ // src/provider.ts
160
+ var activeProxy = null;
161
+ function setActiveProxy(proxy) {
162
+ activeProxy = proxy;
163
+ }
164
+ var blockrunProvider = {
165
+ id: "blockrun",
166
+ label: "BlockRun",
167
+ docsPath: "https://blockrun.ai/docs",
168
+ aliases: ["br"],
169
+ envVars: ["BLOCKRUN_WALLET_KEY"],
170
+ // Model definitions — dynamically set to proxy URL
171
+ get models() {
172
+ if (!activeProxy) {
173
+ return buildProviderModels("https://blockrun.ai/api");
174
+ }
175
+ return buildProviderModels(activeProxy.baseUrl);
176
+ },
177
+ // Auth methods
178
+ auth: [envKeyAuth, walletKeyAuth],
179
+ // Format the stored credential as the wallet key
180
+ formatApiKey: (cred) => {
181
+ if ("apiKey" in cred && typeof cred.apiKey === "string") {
182
+ return cred.apiKey;
183
+ }
184
+ throw new Error("BlockRun credential must contain an apiKey (wallet private key)");
185
+ }
186
+ };
187
+
188
+ // src/proxy.ts
189
+ import { createServer } from "http";
190
+ import { privateKeyToAccount as privateKeyToAccount3 } from "viem/accounts";
191
+
192
+ // src/x402.ts
193
+ import { signTypedData, privateKeyToAccount as privateKeyToAccount2 } from "viem/accounts";
194
+ var BASE_CHAIN_ID = 8453;
195
+ var USDC_BASE = "0x833589fCD6eDb6E08f4c7C32D4f71b54bdA02913";
196
+ var USDC_DOMAIN = {
197
+ name: "USD Coin",
198
+ version: "2",
199
+ chainId: BASE_CHAIN_ID,
200
+ verifyingContract: USDC_BASE
201
+ };
202
+ var TRANSFER_TYPES = {
203
+ TransferWithAuthorization: [
204
+ { name: "from", type: "address" },
205
+ { name: "to", type: "address" },
206
+ { name: "value", type: "uint256" },
207
+ { name: "validAfter", type: "uint256" },
208
+ { name: "validBefore", type: "uint256" },
209
+ { name: "nonce", type: "bytes32" }
210
+ ]
211
+ };
212
+ function createNonce() {
213
+ const bytes = new Uint8Array(32);
214
+ crypto.getRandomValues(bytes);
215
+ return `0x${Array.from(bytes).map((b) => b.toString(16).padStart(2, "0")).join("")}`;
216
+ }
217
+ function parsePaymentRequired(headerValue) {
218
+ const decoded = atob(headerValue);
219
+ return JSON.parse(decoded);
220
+ }
221
+ async function createPaymentPayload(privateKey, fromAddress, recipient, amount, resourceUrl) {
222
+ const now = Math.floor(Date.now() / 1e3);
223
+ const validAfter = now - 600;
224
+ const validBefore = now + 300;
225
+ const nonce = createNonce();
226
+ const signature = await signTypedData({
227
+ privateKey,
228
+ domain: USDC_DOMAIN,
229
+ types: TRANSFER_TYPES,
230
+ primaryType: "TransferWithAuthorization",
231
+ message: {
232
+ from: fromAddress,
233
+ to: recipient,
234
+ value: BigInt(amount),
235
+ validAfter: BigInt(validAfter),
236
+ validBefore: BigInt(validBefore),
237
+ nonce
238
+ }
239
+ });
240
+ const paymentData = {
241
+ x402Version: 2,
242
+ resource: {
243
+ url: resourceUrl,
244
+ description: "BlockRun AI API call",
245
+ mimeType: "application/json"
246
+ },
247
+ accepted: {
248
+ scheme: "exact",
249
+ network: "eip155:8453",
250
+ amount,
251
+ asset: USDC_BASE,
252
+ payTo: recipient,
253
+ maxTimeoutSeconds: 300,
254
+ extra: { name: "USD Coin", version: "2" }
255
+ },
256
+ payload: {
257
+ signature,
258
+ authorization: {
259
+ from: fromAddress,
260
+ to: recipient,
261
+ value: amount,
262
+ validAfter: validAfter.toString(),
263
+ validBefore: validBefore.toString(),
264
+ nonce
265
+ }
266
+ },
267
+ extensions: {}
268
+ };
269
+ return btoa(JSON.stringify(paymentData));
270
+ }
271
+ function createPaymentFetch(privateKey) {
272
+ const account = privateKeyToAccount2(privateKey);
273
+ const walletAddress = account.address;
274
+ return async (input, init) => {
275
+ const url = typeof input === "string" ? input : input instanceof URL ? input.href : input.url;
276
+ const response = await fetch(input, init);
277
+ if (response.status !== 402) {
278
+ return response;
279
+ }
280
+ const paymentHeader = response.headers.get("x-payment-required");
281
+ if (!paymentHeader) {
282
+ throw new Error("402 response missing x-payment-required header");
283
+ }
284
+ const paymentRequired = parsePaymentRequired(paymentHeader);
285
+ const option = paymentRequired.accepts?.[0];
286
+ if (!option) {
287
+ throw new Error("No payment options in 402 response");
288
+ }
289
+ const amount = option.amount || option.maxAmountRequired;
290
+ if (!amount) {
291
+ throw new Error("No amount in payment requirements");
292
+ }
293
+ const paymentPayload = await createPaymentPayload(
294
+ privateKey,
295
+ walletAddress,
296
+ option.payTo,
297
+ amount,
298
+ url
299
+ );
300
+ const retryHeaders = new Headers(init?.headers);
301
+ retryHeaders.set("payment-signature", paymentPayload);
302
+ return fetch(input, {
303
+ ...init,
304
+ headers: retryHeaders
305
+ });
306
+ };
307
+ }
308
+
309
+ // src/router/rules.ts
310
+ function classifyByRules(prompt, systemPrompt, estimatedTokens, config, ambiguousZone) {
311
+ const text = `${systemPrompt ?? ""} ${prompt}`.toLowerCase();
312
+ let score = 0;
313
+ const signals = [];
314
+ if (estimatedTokens < config.tokenCountThresholds.simple) {
315
+ score -= 2;
316
+ signals.push(`short (${estimatedTokens} tokens)`);
317
+ } else if (estimatedTokens > config.tokenCountThresholds.complex) {
318
+ score += 2;
319
+ signals.push(`long (${estimatedTokens} tokens)`);
320
+ }
321
+ const codeMatches = config.codeKeywords.filter((kw) => text.includes(kw.toLowerCase()));
322
+ if (codeMatches.length >= 2) {
323
+ score += 2;
324
+ signals.push(`code (${codeMatches.slice(0, 3).join(", ")})`);
325
+ } else if (codeMatches.length === 1) {
326
+ score += 1;
327
+ signals.push(`possible code (${codeMatches[0]})`);
328
+ }
329
+ const reasoningMatches = config.reasoningKeywords.filter((kw) => text.includes(kw.toLowerCase()));
330
+ if (reasoningMatches.length > 0) {
331
+ score += 3;
332
+ signals.push(`reasoning (${reasoningMatches.slice(0, 3).join(", ")})`);
333
+ }
334
+ const techMatches = config.technicalKeywords.filter((kw) => text.includes(kw.toLowerCase()));
335
+ if (techMatches.length >= 2) {
336
+ score += Math.floor(techMatches.length / 2);
337
+ signals.push(`technical (${techMatches.slice(0, 3).join(", ")})`);
338
+ }
339
+ const creativeMatches = config.creativeKeywords.filter((kw) => text.includes(kw.toLowerCase()));
340
+ if (creativeMatches.length > 0) {
341
+ score += 1;
342
+ signals.push(`creative (${creativeMatches[0]})`);
343
+ }
344
+ const simpleMatches = config.simpleKeywords.filter((kw) => text.includes(kw.toLowerCase()));
345
+ if (simpleMatches.length > 0) {
346
+ score -= 2;
347
+ signals.push(`simple (${simpleMatches.slice(0, 2).join(", ")})`);
348
+ }
349
+ const multiStepPatterns = [/first.*then/i, /step \d/i, /\d\.\s/];
350
+ const multiStepHits = multiStepPatterns.filter((p) => p.test(text));
351
+ if (multiStepHits.length > 0) {
352
+ score += 1;
353
+ signals.push("multi-step");
354
+ }
355
+ const questionCount = (prompt.match(/\?/g) || []).length;
356
+ if (questionCount > 3) {
357
+ score += 1;
358
+ signals.push(`${questionCount} questions`);
359
+ }
360
+ let tier;
361
+ let confidence;
362
+ if (reasoningMatches.length >= 2) {
363
+ tier = "REASONING";
364
+ confidence = 0.9;
365
+ } else if (score <= 0) {
366
+ tier = "SIMPLE";
367
+ confidence = Math.min(0.95, 0.85 + Math.abs(score) * 0.02);
368
+ } else if (score >= ambiguousZone[0] && score <= ambiguousZone[1]) {
369
+ tier = null;
370
+ confidence = 0.5;
371
+ } else if (score >= 3 && score <= 4) {
372
+ tier = "MEDIUM";
373
+ confidence = 0.75 + (score - 3) * 0.05;
374
+ } else if (score >= 5 && score <= 6) {
375
+ tier = "COMPLEX";
376
+ confidence = 0.7 + (score - 5) * 0.075;
377
+ } else if (score >= 7) {
378
+ tier = "REASONING";
379
+ confidence = 0.7 + Math.min(0.1, (score - 7) * 0.05);
380
+ } else {
381
+ tier = null;
382
+ confidence = 0.5;
383
+ }
384
+ return { score, tier, confidence, signals };
385
+ }
386
+
387
+ // src/router/llm-classifier.ts
388
+ var CLASSIFIER_PROMPT = `You are a query complexity classifier. Classify the user's query into exactly one category.
389
+
390
+ Categories:
391
+ - SIMPLE: Factual Q&A, definitions, translations, short answers
392
+ - MEDIUM: Summaries, explanations, moderate code generation
393
+ - COMPLEX: Multi-step code, system design, creative writing, analysis
394
+ - REASONING: Mathematical proofs, formal logic, step-by-step problem solving
395
+
396
+ Respond with ONLY one word: SIMPLE, MEDIUM, COMPLEX, or REASONING.`;
397
+ var cache = /* @__PURE__ */ new Map();
398
+ async function classifyByLLM(prompt, config, payFetch, apiBase) {
399
+ const truncated = prompt.slice(0, config.truncationChars);
400
+ const cacheKey = simpleHash(truncated);
401
+ const cached = cache.get(cacheKey);
402
+ if (cached && cached.expires > Date.now()) {
403
+ return { tier: cached.tier, confidence: 0.75 };
404
+ }
405
+ try {
406
+ const response = await payFetch(`${apiBase}/v1/chat/completions`, {
407
+ method: "POST",
408
+ headers: { "Content-Type": "application/json" },
409
+ body: JSON.stringify({
410
+ model: config.model,
411
+ messages: [
412
+ { role: "system", content: CLASSIFIER_PROMPT },
413
+ { role: "user", content: truncated }
414
+ ],
415
+ max_tokens: config.maxTokens,
416
+ temperature: config.temperature,
417
+ stream: false
418
+ })
419
+ });
420
+ if (!response.ok) {
421
+ return { tier: "MEDIUM", confidence: 0.5 };
422
+ }
423
+ const data = await response.json();
424
+ const content = data.choices?.[0]?.message?.content?.trim().toUpperCase() ?? "";
425
+ const tier = parseTier(content);
426
+ cache.set(cacheKey, { tier, expires: Date.now() + config.cacheTtlMs });
427
+ if (cache.size > 1e3) {
428
+ pruneCache();
429
+ }
430
+ return { tier, confidence: 0.75 };
431
+ } catch {
432
+ return { tier: "MEDIUM", confidence: 0.5 };
433
+ }
434
+ }
435
+ function parseTier(text) {
436
+ if (/\bREASONING\b/.test(text)) return "REASONING";
437
+ if (/\bCOMPLEX\b/.test(text)) return "COMPLEX";
438
+ if (/\bMEDIUM\b/.test(text)) return "MEDIUM";
439
+ if (/\bSIMPLE\b/.test(text)) return "SIMPLE";
440
+ return "MEDIUM";
441
+ }
442
+ function simpleHash(str) {
443
+ let hash = 0;
444
+ for (let i = 0; i < str.length; i++) {
445
+ const char = str.charCodeAt(i);
446
+ hash = (hash << 5) - hash + char;
447
+ hash |= 0;
448
+ }
449
+ return hash.toString(36);
450
+ }
451
+ function pruneCache() {
452
+ const now = Date.now();
453
+ for (const [key, value] of cache) {
454
+ if (value.expires <= now) {
455
+ cache.delete(key);
456
+ }
457
+ }
458
+ }
459
+
460
+ // src/router/selector.ts
461
+ function selectModel(tier, confidence, method, reasoning, tierConfigs, modelPricing, estimatedInputTokens, maxOutputTokens) {
462
+ const tierConfig = tierConfigs[tier];
463
+ const model = tierConfig.primary;
464
+ const pricing = modelPricing.get(model);
465
+ const inputCost = pricing ? estimatedInputTokens / 1e6 * pricing.inputPrice : 0;
466
+ const outputCost = pricing ? maxOutputTokens / 1e6 * pricing.outputPrice : 0;
467
+ const costEstimate = inputCost + outputCost;
468
+ const gpt4oPricing = modelPricing.get("openai/gpt-4o");
469
+ const baselineInput = gpt4oPricing ? estimatedInputTokens / 1e6 * gpt4oPricing.inputPrice : 0;
470
+ const baselineOutput = gpt4oPricing ? maxOutputTokens / 1e6 * gpt4oPricing.outputPrice : 0;
471
+ const baselineCost = baselineInput + baselineOutput;
472
+ const savings = baselineCost > 0 ? Math.max(0, (baselineCost - costEstimate) / baselineCost) : 0;
473
+ return {
474
+ model,
475
+ tier,
476
+ confidence,
477
+ method,
478
+ reasoning,
479
+ costEstimate,
480
+ baselineCost,
481
+ savings
482
+ };
483
+ }
484
+
485
+ // src/router/config.ts
486
+ var DEFAULT_ROUTING_CONFIG = {
487
+ version: "1.0",
488
+ classifier: {
489
+ ambiguousZone: [1, 2],
490
+ llmModel: "google/gemini-2.5-flash",
491
+ llmMaxTokens: 10,
492
+ llmTemperature: 0,
493
+ promptTruncationChars: 500,
494
+ cacheTtlMs: 36e5
495
+ // 1 hour
496
+ },
497
+ scoring: {
498
+ tokenCountThresholds: { simple: 50, complex: 500 },
499
+ codeKeywords: [
500
+ "function",
501
+ "class",
502
+ "import",
503
+ "def",
504
+ "SELECT",
505
+ "async",
506
+ "await",
507
+ "const",
508
+ "let",
509
+ "var",
510
+ "return",
511
+ "```"
512
+ ],
513
+ reasoningKeywords: [
514
+ "prove",
515
+ "theorem",
516
+ "derive",
517
+ "step by step",
518
+ "chain of thought",
519
+ "formally",
520
+ "mathematical",
521
+ "proof",
522
+ "logically"
523
+ ],
524
+ simpleKeywords: [
525
+ "what is",
526
+ "define",
527
+ "translate",
528
+ "hello",
529
+ "yes or no",
530
+ "capital of",
531
+ "how old",
532
+ "who is",
533
+ "when was"
534
+ ],
535
+ technicalKeywords: [
536
+ "algorithm",
537
+ "optimize",
538
+ "architecture",
539
+ "distributed",
540
+ "kubernetes",
541
+ "microservice",
542
+ "database",
543
+ "infrastructure"
544
+ ],
545
+ creativeKeywords: [
546
+ "story",
547
+ "poem",
548
+ "compose",
549
+ "brainstorm",
550
+ "creative",
551
+ "imagine",
552
+ "write a"
553
+ ]
554
+ },
555
+ tiers: {
556
+ SIMPLE: {
557
+ primary: "google/gemini-2.5-flash",
558
+ fallback: ["deepseek/deepseek-chat", "openai/gpt-4o-mini"]
559
+ },
560
+ MEDIUM: {
561
+ primary: "deepseek/deepseek-chat",
562
+ fallback: ["google/gemini-2.5-flash", "openai/gpt-4o-mini"]
563
+ },
564
+ COMPLEX: {
565
+ primary: "anthropic/claude-sonnet-4",
566
+ fallback: ["openai/gpt-4o", "google/gemini-2.5-pro"]
567
+ },
568
+ REASONING: {
569
+ primary: "openai/o3",
570
+ fallback: ["google/gemini-2.5-pro", "anthropic/claude-sonnet-4"]
571
+ }
572
+ },
573
+ overrides: {
574
+ maxTokensForceComplex: 1e5,
575
+ structuredOutputMinTier: "MEDIUM"
576
+ }
577
+ };
578
+
579
+ // src/router/index.ts
580
+ async function route(prompt, systemPrompt, maxOutputTokens, options) {
581
+ const { config, modelPricing, payFetch, apiBase } = options;
582
+ const fullText = `${systemPrompt ?? ""} ${prompt}`;
583
+ const estimatedTokens = Math.ceil(fullText.length / 4);
584
+ if (estimatedTokens > config.overrides.maxTokensForceComplex) {
585
+ return selectModel(
586
+ "COMPLEX",
587
+ 0.95,
588
+ "rules",
589
+ `Input exceeds ${config.overrides.maxTokensForceComplex} tokens`,
590
+ config.tiers,
591
+ modelPricing,
592
+ estimatedTokens,
593
+ maxOutputTokens
594
+ );
595
+ }
596
+ const hasStructuredOutput = systemPrompt ? /json|structured|schema/i.test(systemPrompt) : false;
597
+ const ruleResult = classifyByRules(
598
+ prompt,
599
+ systemPrompt,
600
+ estimatedTokens,
601
+ config.scoring,
602
+ config.classifier.ambiguousZone
603
+ );
604
+ let tier;
605
+ let confidence;
606
+ let method = "rules";
607
+ let reasoning = `score=${ruleResult.score} | ${ruleResult.signals.join(", ")}`;
608
+ if (ruleResult.tier !== null) {
609
+ tier = ruleResult.tier;
610
+ confidence = ruleResult.confidence;
611
+ } else {
612
+ const llmResult = await classifyByLLM(
613
+ prompt,
614
+ {
615
+ model: config.classifier.llmModel,
616
+ maxTokens: config.classifier.llmMaxTokens,
617
+ temperature: config.classifier.llmTemperature,
618
+ truncationChars: config.classifier.promptTruncationChars,
619
+ cacheTtlMs: config.classifier.cacheTtlMs
620
+ },
621
+ payFetch,
622
+ apiBase
623
+ );
624
+ tier = llmResult.tier;
625
+ confidence = llmResult.confidence;
626
+ method = "llm";
627
+ reasoning += ` | ambiguous -> LLM: ${tier}`;
628
+ }
629
+ if (hasStructuredOutput) {
630
+ const tierRank = { SIMPLE: 0, MEDIUM: 1, COMPLEX: 2, REASONING: 3 };
631
+ const minTier = config.overrides.structuredOutputMinTier;
632
+ if (tierRank[tier] < tierRank[minTier]) {
633
+ reasoning += ` | upgraded to ${minTier} (structured output)`;
634
+ tier = minTier;
635
+ }
636
+ }
637
+ return selectModel(
638
+ tier,
639
+ confidence,
640
+ method,
641
+ reasoning,
642
+ config.tiers,
643
+ modelPricing,
644
+ estimatedTokens,
645
+ maxOutputTokens
646
+ );
647
+ }
648
+
649
+ // src/logger.ts
650
+ import { appendFile, mkdir as mkdir2 } from "fs/promises";
651
+ import { join as join2 } from "path";
652
+ import { homedir as homedir2 } from "os";
653
+ var LOG_DIR = join2(homedir2(), ".openclaw", "blockrun", "logs");
654
+ var dirReady = false;
655
+ async function ensureDir() {
656
+ if (dirReady) return;
657
+ await mkdir2(LOG_DIR, { recursive: true });
658
+ dirReady = true;
659
+ }
660
+ async function logUsage(entry) {
661
+ try {
662
+ await ensureDir();
663
+ const date = entry.timestamp.slice(0, 10);
664
+ const file = join2(LOG_DIR, `usage-${date}.jsonl`);
665
+ await appendFile(file, JSON.stringify(entry) + "\n");
666
+ } catch {
667
+ }
668
+ }
669
+
670
+ // src/proxy.ts
671
+ var BLOCKRUN_API = "https://blockrun.ai/api";
672
+ var AUTO_MODEL = "blockrun/auto";
673
+ var USER_AGENT = "claw-router/0.1.0";
674
+ function buildModelPricing() {
675
+ const map = /* @__PURE__ */ new Map();
676
+ for (const m of BLOCKRUN_MODELS) {
677
+ if (m.id === AUTO_MODEL) continue;
678
+ map.set(m.id, { inputPrice: m.inputPrice, outputPrice: m.outputPrice });
679
+ }
680
+ return map;
681
+ }
682
+ function mergeRoutingConfig(overrides) {
683
+ if (!overrides) return DEFAULT_ROUTING_CONFIG;
684
+ return {
685
+ ...DEFAULT_ROUTING_CONFIG,
686
+ ...overrides,
687
+ classifier: { ...DEFAULT_ROUTING_CONFIG.classifier, ...overrides.classifier },
688
+ scoring: { ...DEFAULT_ROUTING_CONFIG.scoring, ...overrides.scoring },
689
+ tiers: { ...DEFAULT_ROUTING_CONFIG.tiers, ...overrides.tiers },
690
+ overrides: { ...DEFAULT_ROUTING_CONFIG.overrides, ...overrides.overrides }
691
+ };
692
+ }
693
+ async function startProxy(options) {
694
+ const apiBase = options.apiBase ?? BLOCKRUN_API;
695
+ const account = privateKeyToAccount3(options.walletKey);
696
+ const payFetch = createPaymentFetch(options.walletKey);
697
+ const routingConfig = mergeRoutingConfig(options.routingConfig);
698
+ const modelPricing = buildModelPricing();
699
+ const routerOpts = {
700
+ config: routingConfig,
701
+ modelPricing,
702
+ payFetch,
703
+ apiBase
704
+ };
705
+ const server = createServer(async (req, res) => {
706
+ if (req.url === "/health") {
707
+ res.writeHead(200, { "Content-Type": "application/json" });
708
+ res.end(JSON.stringify({ status: "ok", wallet: account.address }));
709
+ return;
710
+ }
711
+ if (!req.url?.startsWith("/v1")) {
712
+ res.writeHead(404, { "Content-Type": "application/json" });
713
+ res.end(JSON.stringify({ error: "Not found" }));
714
+ return;
715
+ }
716
+ try {
717
+ await proxyRequest(req, res, apiBase, payFetch, options, routerOpts);
718
+ } catch (err) {
719
+ const error = err instanceof Error ? err : new Error(String(err));
720
+ options.onError?.(error);
721
+ if (!res.headersSent) {
722
+ res.writeHead(502, { "Content-Type": "application/json" });
723
+ res.end(JSON.stringify({
724
+ error: { message: `Proxy error: ${error.message}`, type: "proxy_error" }
725
+ }));
726
+ }
727
+ }
728
+ });
729
+ const listenPort = options.port ?? 0;
730
+ return new Promise((resolve, reject) => {
731
+ server.on("error", reject);
732
+ server.listen(listenPort, "127.0.0.1", () => {
733
+ const addr = server.address();
734
+ const port = addr.port;
735
+ const baseUrl = `http://127.0.0.1:${port}`;
736
+ options.onReady?.(port);
737
+ resolve({
738
+ port,
739
+ baseUrl,
740
+ close: () => new Promise((res, rej) => {
741
+ server.close((err) => err ? rej(err) : res());
742
+ })
743
+ });
744
+ });
745
+ });
746
+ }
747
+ async function proxyRequest(req, res, apiBase, payFetch, options, routerOpts) {
748
+ const startTime = Date.now();
749
+ const upstreamUrl = `${apiBase}${req.url}`;
750
+ const bodyChunks = [];
751
+ for await (const chunk of req) {
752
+ bodyChunks.push(Buffer.isBuffer(chunk) ? chunk : Buffer.from(chunk));
753
+ }
754
+ let body = Buffer.concat(bodyChunks);
755
+ let routingDecision;
756
+ const isChatCompletion = req.url?.includes("/chat/completions");
757
+ if (isChatCompletion && body.length > 0) {
758
+ try {
759
+ const parsed = JSON.parse(body.toString());
760
+ if (parsed.model === AUTO_MODEL) {
761
+ const messages = parsed.messages;
762
+ let lastUserMsg;
763
+ if (messages) {
764
+ for (let i = messages.length - 1; i >= 0; i--) {
765
+ if (messages[i].role === "user") {
766
+ lastUserMsg = messages[i];
767
+ break;
768
+ }
769
+ }
770
+ }
771
+ const systemMsg = messages?.find((m) => m.role === "system");
772
+ const prompt = typeof lastUserMsg?.content === "string" ? lastUserMsg.content : "";
773
+ const systemPrompt = typeof systemMsg?.content === "string" ? systemMsg.content : void 0;
774
+ const maxTokens = parsed.max_tokens || 4096;
775
+ routingDecision = await route(prompt, systemPrompt, maxTokens, routerOpts);
776
+ parsed.model = routingDecision.model;
777
+ body = Buffer.from(JSON.stringify(parsed));
778
+ options.onRouted?.(routingDecision);
779
+ }
780
+ } catch {
781
+ }
782
+ }
783
+ const headers = {};
784
+ for (const [key, value] of Object.entries(req.headers)) {
785
+ if (key === "host" || key === "connection" || key === "transfer-encoding" || key === "content-length") continue;
786
+ if (typeof value === "string") {
787
+ headers[key] = value;
788
+ }
789
+ }
790
+ if (!headers["content-type"]) {
791
+ headers["content-type"] = "application/json";
792
+ }
793
+ headers["user-agent"] = USER_AGENT;
794
+ const upstream = await payFetch(upstreamUrl, {
795
+ method: req.method ?? "POST",
796
+ headers,
797
+ body: body.length > 0 ? body : void 0
798
+ });
799
+ const responseHeaders = {};
800
+ upstream.headers.forEach((value, key) => {
801
+ if (key === "transfer-encoding" || key === "connection") return;
802
+ responseHeaders[key] = value;
803
+ });
804
+ res.writeHead(upstream.status, responseHeaders);
805
+ if (upstream.body) {
806
+ const reader = upstream.body.getReader();
807
+ try {
808
+ while (true) {
809
+ const { done, value } = await reader.read();
810
+ if (done) break;
811
+ res.write(value);
812
+ }
813
+ } finally {
814
+ reader.releaseLock();
815
+ }
816
+ }
817
+ res.end();
818
+ if (routingDecision) {
819
+ const entry = {
820
+ timestamp: (/* @__PURE__ */ new Date()).toISOString(),
821
+ model: routingDecision.model,
822
+ cost: routingDecision.costEstimate,
823
+ latencyMs: Date.now() - startTime
824
+ };
825
+ logUsage(entry).catch(() => {
826
+ });
827
+ }
828
+ }
829
+
830
+ // src/index.ts
831
+ var plugin = {
832
+ id: "claw-router",
833
+ name: "ClawRouter",
834
+ description: "Smart LLM router \u2014 30+ models, x402 micropayments, 63% cost savings",
835
+ version: "0.1.0",
836
+ register(api) {
837
+ api.registerProvider(blockrunProvider);
838
+ api.logger.info("BlockRun provider registered (30+ models via x402)");
839
+ },
840
+ async activate(api) {
841
+ const { key: walletKey, address, source } = await resolveOrGenerateWalletKey();
842
+ if (source === "generated") {
843
+ api.logger.info(`Generated new wallet: ${address}`);
844
+ api.logger.info(`Fund with USDC on Base to start using ClawRouter.`);
845
+ } else if (source === "saved") {
846
+ api.logger.info(`Using saved wallet: ${address}`);
847
+ } else {
848
+ api.logger.info(`Using wallet from BLOCKRUN_WALLET_KEY: ${address}`);
849
+ }
850
+ const routingConfig = api.pluginConfig?.routing;
851
+ try {
852
+ const proxy = await startProxy({
853
+ walletKey,
854
+ routingConfig,
855
+ onReady: (port) => {
856
+ api.logger.info(`BlockRun x402 proxy listening on port ${port}`);
857
+ },
858
+ onError: (error) => {
859
+ api.logger.error(`BlockRun proxy error: ${error.message}`);
860
+ },
861
+ onRouted: (decision) => {
862
+ const cost = decision.costEstimate.toFixed(4);
863
+ const saved = (decision.savings * 100).toFixed(0);
864
+ api.logger.info(`${decision.model} $${cost} (saved ${saved}%)`);
865
+ }
866
+ });
867
+ setActiveProxy(proxy);
868
+ api.logger.info(`BlockRun provider active \u2014 ${proxy.baseUrl}/v1 (smart routing enabled)`);
869
+ } catch (err) {
870
+ api.logger.error(
871
+ `Failed to start BlockRun proxy: ${err instanceof Error ? err.message : String(err)}`
872
+ );
873
+ }
874
+ }
875
+ };
876
+ var index_default = plugin;
877
+ export {
878
+ BLOCKRUN_MODELS,
879
+ DEFAULT_ROUTING_CONFIG,
880
+ OPENCLAW_MODELS,
881
+ blockrunProvider,
882
+ buildProviderModels,
883
+ index_default as default,
884
+ logUsage,
885
+ route,
886
+ startProxy
887
+ };
888
+ //# sourceMappingURL=index.js.map