@blockrun/clawrouter 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,357 @@
1
+ /**
2
+ * OpenClaw Plugin Types (locally defined)
3
+ *
4
+ * OpenClaw's plugin SDK uses duck typing — these match the shapes
5
+ * expected by registerProvider() and the plugin system.
6
+ * Defined locally to avoid depending on internal OpenClaw paths.
7
+ */
8
+ type ModelApi = "openai-completions" | "openai-responses" | "anthropic-messages" | "google-generative-ai" | "github-copilot" | "bedrock-converse-stream";
9
+ type ModelDefinitionConfig = {
10
+ id: string;
11
+ name: string;
12
+ api?: ModelApi;
13
+ reasoning: boolean;
14
+ input: Array<"text" | "image">;
15
+ cost: {
16
+ input: number;
17
+ output: number;
18
+ cacheRead: number;
19
+ cacheWrite: number;
20
+ };
21
+ contextWindow: number;
22
+ maxTokens: number;
23
+ headers?: Record<string, string>;
24
+ };
25
+ type ModelProviderConfig = {
26
+ baseUrl: string;
27
+ apiKey?: string;
28
+ api?: ModelApi;
29
+ headers?: Record<string, string>;
30
+ authHeader?: boolean;
31
+ models: ModelDefinitionConfig[];
32
+ };
33
+ type AuthProfileCredential = {
34
+ apiKey?: string;
35
+ type?: string;
36
+ [key: string]: unknown;
37
+ };
38
+ type ProviderAuthResult = {
39
+ profiles: Array<{
40
+ profileId: string;
41
+ credential: AuthProfileCredential;
42
+ }>;
43
+ configPatch?: Record<string, unknown>;
44
+ defaultModel?: string;
45
+ notes?: string[];
46
+ };
47
+ type WizardPrompter = {
48
+ text: (opts: {
49
+ message: string;
50
+ validate?: (value: string) => string | undefined;
51
+ }) => Promise<string | symbol>;
52
+ note: (message: string) => void;
53
+ progress: (message: string) => {
54
+ stop: (message?: string) => void;
55
+ };
56
+ };
57
+ type ProviderAuthContext = {
58
+ config: Record<string, unknown>;
59
+ agentDir?: string;
60
+ workspaceDir?: string;
61
+ prompter: WizardPrompter;
62
+ runtime: {
63
+ log: (message: string) => void;
64
+ };
65
+ isRemote: boolean;
66
+ openUrl: (url: string) => Promise<void>;
67
+ };
68
+ type ProviderAuthMethod = {
69
+ id: string;
70
+ label: string;
71
+ hint?: string;
72
+ kind: "oauth" | "api_key" | "token" | "device_code" | "custom";
73
+ run: (ctx: ProviderAuthContext) => Promise<ProviderAuthResult>;
74
+ };
75
+ type ProviderPlugin = {
76
+ id: string;
77
+ label: string;
78
+ docsPath?: string;
79
+ aliases?: string[];
80
+ envVars?: string[];
81
+ models?: ModelProviderConfig;
82
+ auth: ProviderAuthMethod[];
83
+ formatApiKey?: (cred: AuthProfileCredential) => string;
84
+ };
85
+ type PluginLogger = {
86
+ debug?: (message: string) => void;
87
+ info: (message: string) => void;
88
+ warn: (message: string) => void;
89
+ error: (message: string) => void;
90
+ };
91
+ type OpenClawPluginApi = {
92
+ id: string;
93
+ name: string;
94
+ version?: string;
95
+ description?: string;
96
+ source: string;
97
+ config: Record<string, unknown> & {
98
+ models?: {
99
+ providers?: Record<string, ModelProviderConfig>;
100
+ };
101
+ };
102
+ pluginConfig?: Record<string, unknown>;
103
+ logger: PluginLogger;
104
+ registerProvider: (provider: ProviderPlugin) => void;
105
+ registerTool: (tool: unknown, opts?: unknown) => void;
106
+ registerHook: (events: string | string[], handler: unknown, opts?: unknown) => void;
107
+ registerHttpRoute: (params: {
108
+ path: string;
109
+ handler: unknown;
110
+ }) => void;
111
+ registerService: (service: unknown) => void;
112
+ registerCommand: (command: unknown) => void;
113
+ resolvePath: (input: string) => string;
114
+ on: (hookName: string, handler: unknown, opts?: unknown) => void;
115
+ };
116
+ type OpenClawPluginDefinition = {
117
+ id?: string;
118
+ name?: string;
119
+ description?: string;
120
+ version?: string;
121
+ register?: (api: OpenClawPluginApi) => void | Promise<void>;
122
+ activate?: (api: OpenClawPluginApi) => void | Promise<void>;
123
+ };
124
+
125
+ /**
126
+ * Smart Router Types
127
+ *
128
+ * Four classification tiers — REASONING is distinct from COMPLEX because
129
+ * reasoning tasks need different models (o3, gemini-pro) than general
130
+ * complex tasks (gpt-4o, sonnet-4).
131
+ */
132
+ type Tier = "SIMPLE" | "MEDIUM" | "COMPLEX" | "REASONING";
133
+ type RoutingDecision = {
134
+ model: string;
135
+ tier: Tier;
136
+ confidence: number;
137
+ method: "rules" | "llm";
138
+ reasoning: string;
139
+ costEstimate: number;
140
+ baselineCost: number;
141
+ savings: number;
142
+ };
143
+ type TierConfig = {
144
+ primary: string;
145
+ fallback: string[];
146
+ };
147
+ type ScoringConfig = {
148
+ tokenCountThresholds: {
149
+ simple: number;
150
+ complex: number;
151
+ };
152
+ codeKeywords: string[];
153
+ reasoningKeywords: string[];
154
+ simpleKeywords: string[];
155
+ technicalKeywords: string[];
156
+ creativeKeywords: string[];
157
+ };
158
+ type ClassifierConfig = {
159
+ ambiguousZone: [number, number];
160
+ llmModel: string;
161
+ llmMaxTokens: number;
162
+ llmTemperature: number;
163
+ promptTruncationChars: number;
164
+ cacheTtlMs: number;
165
+ };
166
+ type OverridesConfig = {
167
+ maxTokensForceComplex: number;
168
+ structuredOutputMinTier: Tier;
169
+ };
170
+ type RoutingConfig = {
171
+ version: string;
172
+ classifier: ClassifierConfig;
173
+ scoring: ScoringConfig;
174
+ tiers: Record<Tier, TierConfig>;
175
+ overrides: OverridesConfig;
176
+ };
177
+
178
+ /**
179
+ * Tier → Model Selection
180
+ *
181
+ * Maps a classification tier to the cheapest capable model.
182
+ * Builds RoutingDecision metadata with cost estimates and savings.
183
+ */
184
+
185
+ type ModelPricing = {
186
+ inputPrice: number;
187
+ outputPrice: number;
188
+ };
189
+
190
+ /**
191
+ * Default Routing Config
192
+ *
193
+ * All routing parameters as a TypeScript constant.
194
+ * Operators override via openclaw.yaml plugin config.
195
+ */
196
+
197
+ declare const DEFAULT_ROUTING_CONFIG: RoutingConfig;
198
+
199
+ /**
200
+ * Smart Router Entry Point
201
+ *
202
+ * Classifies requests and routes to the cheapest capable model.
203
+ * Uses hybrid approach: rules first (< 1ms), LLM fallback for ambiguous cases.
204
+ */
205
+
206
+ type RouterOptions = {
207
+ config: RoutingConfig;
208
+ modelPricing: Map<string, ModelPricing>;
209
+ payFetch: (input: RequestInfo | URL, init?: RequestInit) => Promise<Response>;
210
+ apiBase: string;
211
+ };
212
+ /**
213
+ * Route a request to the cheapest capable model.
214
+ *
215
+ * 1. Check overrides (large context, structured output)
216
+ * 2. Run rule-based classifier
217
+ * 3. If ambiguous, run LLM classifier
218
+ * 4. Select model for tier
219
+ * 5. Return RoutingDecision with metadata
220
+ */
221
+ declare function route(prompt: string, systemPrompt: string | undefined, maxOutputTokens: number, options: RouterOptions): Promise<RoutingDecision>;
222
+
223
+ /**
224
+ * Local x402 Proxy Server
225
+ *
226
+ * Sits between OpenClaw's pi-ai (which makes standard OpenAI-format requests)
227
+ * and BlockRun's API (which requires x402 micropayments).
228
+ *
229
+ * Flow:
230
+ * pi-ai → http://localhost:{port}/v1/chat/completions
231
+ * → proxy forwards to https://blockrun.ai/api/v1/chat/completions
232
+ * → gets 402 → @x402/fetch signs payment → retries
233
+ * → streams response back to pi-ai
234
+ *
235
+ * Phase 2 additions:
236
+ * - Smart routing: when model is "blockrun/auto", classify query and pick cheapest model
237
+ * - Usage logging: log every request as JSON line to ~/.openclaw/blockrun/logs/
238
+ */
239
+
240
+ type ProxyOptions = {
241
+ walletKey: string;
242
+ apiBase?: string;
243
+ port?: number;
244
+ routingConfig?: Partial<RoutingConfig>;
245
+ onReady?: (port: number) => void;
246
+ onError?: (error: Error) => void;
247
+ onPayment?: (info: {
248
+ model: string;
249
+ amount: string;
250
+ network: string;
251
+ }) => void;
252
+ onRouted?: (decision: RoutingDecision) => void;
253
+ };
254
+ type ProxyHandle = {
255
+ port: number;
256
+ baseUrl: string;
257
+ close: () => Promise<void>;
258
+ };
259
+ /**
260
+ * Start the local x402 proxy server.
261
+ *
262
+ * Returns a handle with the assigned port, base URL, and a close function.
263
+ */
264
+ declare function startProxy(options: ProxyOptions): Promise<ProxyHandle>;
265
+
266
+ /**
267
+ * BlockRun ProviderPlugin for OpenClaw
268
+ *
269
+ * Registers BlockRun as an LLM provider in OpenClaw.
270
+ * Uses a local x402 proxy to handle micropayments transparently —
271
+ * pi-ai sees a standard OpenAI-compatible API at localhost.
272
+ */
273
+
274
+ /**
275
+ * BlockRun provider plugin definition.
276
+ */
277
+ declare const blockrunProvider: ProviderPlugin;
278
+
279
+ /**
280
+ * BlockRun Model Definitions for OpenClaw
281
+ *
282
+ * Maps BlockRun's 30+ AI models to OpenClaw's ModelDefinitionConfig format.
283
+ * All models use the "openai-completions" API since BlockRun is OpenAI-compatible.
284
+ *
285
+ * Pricing is in USD per 1M tokens. Operators pay these rates via x402;
286
+ * they set their own markup when reselling to end users (Phase 2).
287
+ */
288
+
289
+ type BlockRunModel = {
290
+ id: string;
291
+ name: string;
292
+ inputPrice: number;
293
+ outputPrice: number;
294
+ contextWindow: number;
295
+ maxOutput: number;
296
+ reasoning?: boolean;
297
+ vision?: boolean;
298
+ };
299
+ declare const BLOCKRUN_MODELS: BlockRunModel[];
300
+ /**
301
+ * All BlockRun models in OpenClaw format.
302
+ */
303
+ declare const OPENCLAW_MODELS: ModelDefinitionConfig[];
304
+ /**
305
+ * Build a ModelProviderConfig for BlockRun.
306
+ *
307
+ * @param baseUrl - The proxy's local base URL (e.g., "http://127.0.0.1:12345")
308
+ */
309
+ declare function buildProviderModels(baseUrl: string): ModelProviderConfig;
310
+
311
+ /**
312
+ * Usage Logger
313
+ *
314
+ * Logs every LLM request as a JSON line to a daily log file.
315
+ * Files: ~/.openclaw/blockrun/logs/usage-YYYY-MM-DD.jsonl
316
+ *
317
+ * MVP: append-only JSON lines. No rotation, no cleanup.
318
+ * Logging never breaks the request flow — all errors are swallowed.
319
+ */
320
+ type UsageEntry = {
321
+ timestamp: string;
322
+ model: string;
323
+ cost: number;
324
+ latencyMs: number;
325
+ };
326
+ /**
327
+ * Log a usage entry as a JSON line.
328
+ */
329
+ declare function logUsage(entry: UsageEntry): Promise<void>;
330
+
331
+ /**
332
+ * @blockrun/openclaw-provider
333
+ *
334
+ * OpenClaw plugin that adds BlockRun as an LLM provider with 30+ AI models.
335
+ * Payments are handled automatically via x402 USDC micropayments on Base.
336
+ * Smart routing picks the cheapest capable model for each request.
337
+ *
338
+ * Usage:
339
+ * # Install the plugin
340
+ * openclaw plugin install @blockrun/openclaw-provider
341
+ *
342
+ * # Set wallet key
343
+ * export BLOCKRUN_WALLET_KEY=0x...
344
+ *
345
+ * # Or configure via wizard
346
+ * openclaw provider add blockrun
347
+ *
348
+ * # Use smart routing (auto-picks cheapest model)
349
+ * openclaw config set model blockrun/auto
350
+ *
351
+ * # Or use any specific BlockRun model
352
+ * openclaw config set model openai/gpt-5.2
353
+ */
354
+
355
+ declare const plugin: OpenClawPluginDefinition;
356
+
357
+ export { BLOCKRUN_MODELS, DEFAULT_ROUTING_CONFIG, OPENCLAW_MODELS, type RoutingConfig, type RoutingDecision, type Tier, type UsageEntry, blockrunProvider, buildProviderModels, plugin as default, logUsage, route, startProxy };