@blockrun/cc 0.8.2 → 0.9.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/LICENSE CHANGED
@@ -4,7 +4,7 @@ Licensor: BlockRun AI (Piebald LLC)
4
4
 
5
5
  Licensed Work: brcc
6
6
 
7
- Change Date: 2030-03-23
7
+ Change Date: 2036-03-24
8
8
 
9
9
  Change License: MIT
10
10
 
package/README.md CHANGED
@@ -18,7 +18,7 @@ You're paying $200/month and still can't work.<br><br>
18
18
  [![npm downloads](https://img.shields.io/npm/dm/@blockrun/cc.svg?style=flat-square&color=blue)](https://npmjs.com/package/@blockrun/cc)
19
19
  [![GitHub stars](https://img.shields.io/github/stars/BlockRunAI/brcc?style=flat-square)](https://github.com/BlockRunAI/brcc)
20
20
  [![TypeScript](https://img.shields.io/badge/TypeScript-5.7-3178c6?style=flat-square&logo=typescript&logoColor=white)](https://typescriptlang.org)
21
- [![License: BUSL-1.1](https://img.shields.io/badge/License-BUSL--1.1-orange?style=flat-square)](LICENSE)
21
+ [![License: MIT](https://img.shields.io/badge/License-BUSL--1.1-orange?style=flat-square)](LICENSE)
22
22
 
23
23
  [![x402 Protocol](https://img.shields.io/badge/x402-Micropayments-purple?style=flat-square)](https://x402.org)
24
24
  [![Base Network](https://img.shields.io/badge/Base-USDC-0052FF?style=flat-square&logo=coinbase&logoColor=white)](https://base.org)
@@ -110,13 +110,43 @@ That's it. Claude Code opens with access to 40+ models, no rate limits.
110
110
  ### From the command line
111
111
 
112
112
  ```bash
113
- brcc start # Default (Claude Sonnet 4.6)
114
- brcc start --model nvidia/gpt-oss-120b # Free no USDC needed
115
- brcc start --model openai/gpt-5.4 # GPT-5.4
116
- brcc start --model deepseek/deepseek-chat # Budget coding ($0.28/M)
113
+ brcc start # Default: smart routing (blockrun/auto)
114
+ brcc start --model blockrun/eco # Cheapest capable model
115
+ brcc start --model blockrun/premium # Best quality
116
+ brcc start --model blockrun/free # Free tier only
117
+ brcc start --model deepseek/deepseek-chat # Direct model access
117
118
  brcc start --model anthropic/claude-opus-4.6 # Most capable
118
119
  ```
119
120
 
121
+ ### Smart Routing (Built-in)
122
+
123
+ brcc includes ClawRouter's 15-dimension classifier for automatic model selection:
124
+
125
+ | Profile | Strategy | Savings | Best For |
126
+ |---------|----------|---------|----------|
127
+ | `blockrun/auto` | Balanced (default) | 74-100% | General use |
128
+ | `blockrun/eco` | Cheapest possible | 95-100% | Maximum savings |
129
+ | `blockrun/premium` | Best quality | 0% | Mission-critical |
130
+ | `blockrun/free` | Free tier only | 100% | Zero cost |
131
+
132
+ **How it works:**
133
+ ```
134
+ "What is 2+2?" → SIMPLE → gemini-flash ($0.0002)
135
+ "Write a React component" → MEDIUM → kimi-k2.5 ($0.002)
136
+ "Design a microservice..." → COMPLEX → gemini-3.1-pro ($0.007)
137
+ "Prove this theorem..." → REASONING → grok-4-fast ($0.0004)
138
+ ```
139
+
140
+ **In-session switching:**
141
+ ```
142
+ use auto # Switch to smart routing
143
+ use eco # Switch to cheapest
144
+ use premium # Switch to best quality
145
+ use free # Switch to free models
146
+ use sonnet # Direct Claude Sonnet
147
+ use deepseek # Direct DeepSeek
148
+ ```
149
+
120
150
  ### Inside Claude Code
121
151
 
122
152
  Use `/model` to switch between Sonnet, Opus, and Haiku. Each maps to the BlockRun model you've configured:
@@ -172,6 +202,7 @@ Paid Models
172
202
  | `brcc start [--model <id>]` | Start proxy + launch Claude Code |
173
203
  | `brcc models` | List all models with pricing |
174
204
  | `brcc balance` | Check wallet USDC balance |
205
+ | `brcc stats` | View usage statistics and savings |
175
206
  | `brcc config set <key> <value>` | Configure model mappings |
176
207
  | `brcc config list` | View current settings |
177
208
 
@@ -190,11 +221,55 @@ Your wallet is saved to `~/.blockrun/` and shared with all BlockRun tools.
190
221
  ```bash
191
222
  brcc start # Default model
192
223
  brcc start --model nvidia/gpt-oss-120b # Free model
193
- brcc start --model openai/gpt-5.4 # Specific model
224
+ brcc start --model openai/gpt-5.4 # Specific model
194
225
  brcc start --no-launch # Proxy only mode
226
+ brcc start --no-fallback # Disable auto-fallback
195
227
  brcc start -p 9000 # Custom port
196
228
  ```
197
229
 
230
+ ### `brcc stats`
231
+
232
+ View your usage statistics and cost savings:
233
+
234
+ ```bash
235
+ $ brcc stats
236
+
237
+ 📊 brcc Usage Statistics
238
+
239
+ ───────────────────────────────────────────────────────────
240
+
241
+ Overview (7 days)
242
+
243
+ Requests: 1,234
244
+ Total Cost: $4.5672
245
+ Avg per Request: $0.003701
246
+ Input Tokens: 2,456,000
247
+ Output Tokens: 892,000
248
+ Fallbacks: 23 (1.9%)
249
+
250
+ By Model
251
+
252
+ anthropic/claude-sonnet-4.6
253
+ 450 req · $2.1340 (46.7%) · 245ms avg
254
+ deepseek/deepseek-chat
255
+ 620 req · $0.8901 (19.5%) · 180ms avg
256
+ ↳ 12 fallback recoveries
257
+ nvidia/gpt-oss-120b
258
+ 164 req · $0.0000 (0%) · 320ms avg
259
+
260
+ 💰 Savings vs Claude Opus
261
+
262
+ Opus equivalent: $34.62
263
+ Your actual cost: $4.57
264
+ Saved: $30.05 (86.8%)
265
+
266
+ ───────────────────────────────────────────────────────────
267
+ Run `brcc stats --clear` to reset statistics
268
+
269
+ $ brcc stats --clear # Reset all statistics
270
+ $ brcc stats --json # Output as JSON (for scripts)
271
+ ```
272
+
198
273
  ### `brcc config`
199
274
 
200
275
  ```bash
@@ -207,6 +282,36 @@ brcc config list
207
282
 
208
283
  ---
209
284
 
285
+ ## Automatic Fallback
286
+
287
+ When a model returns an error (429 rate limit, 500+ server error), brcc automatically retries with backup models. This ensures your work never stops.
288
+
289
+ **Default fallback chain:**
290
+ ```
291
+ anthropic/claude-sonnet-4.6
292
+ ↓ (if 429/500/502/503/504)
293
+ google/gemini-2.5-pro
294
+
295
+ deepseek/deepseek-chat
296
+
297
+ xai/grok-4-fast
298
+
299
+ nvidia/gpt-oss-120b (free, always available)
300
+ ```
301
+
302
+ **How it looks:**
303
+ ```
304
+ [brcc] ⚠️ anthropic/claude-sonnet-4.6 returned 429, falling back to google/gemini-2.5-pro
305
+ [brcc] ↺ Fallback successful: using google/gemini-2.5-pro
306
+ ```
307
+
308
+ To disable fallback:
309
+ ```bash
310
+ brcc start --no-fallback
311
+ ```
312
+
313
+ ---
314
+
210
315
  ## How It Works
211
316
 
212
317
  ```
@@ -277,4 +382,4 @@ Yes. GPT-5, Gemini, DeepSeek, Grok, and 30+ others work through Claude Code via
277
382
 
278
383
  ## License
279
384
 
280
- [Business Source License 1.1](LICENSE) — Free to use, modify, and deploy. Cannot be used to build a competing hosted service. Converts to MIT in 2030.
385
+ [Business Source License 1.1](LICENSE) — Free to use, modify, and deploy. Cannot be used to build a competing hosted service. Converts to MIT in 2036.
@@ -2,6 +2,7 @@ interface StartOptions {
2
2
  port?: string;
3
3
  model?: string;
4
4
  launch?: boolean;
5
+ fallback?: boolean;
5
6
  debug?: boolean;
6
7
  }
7
8
  export declare function startCommand(options: StartOptions): Promise<void>;
@@ -7,6 +7,7 @@ import { loadConfig } from './config.js';
7
7
  export async function startCommand(options) {
8
8
  const chain = loadChain();
9
9
  const apiUrl = API_URLS[chain];
10
+ const fallbackEnabled = options.fallback !== false; // Default true
10
11
  if (chain === 'solana') {
11
12
  const wallet = await getOrCreateSolanaWallet();
12
13
  if (wallet.isNew) {
@@ -19,13 +20,21 @@ export async function startCommand(options) {
19
20
  const shouldLaunch = options.launch !== false;
20
21
  const model = options.model;
21
22
  console.log(chalk.bold('brcc — BlockRun Claude Code\n'));
22
- console.log(`Chain: ${chalk.magenta('solana')}`);
23
- console.log(`Wallet: ${chalk.cyan(wallet.address)}`);
23
+ console.log(`Chain: ${chalk.magenta('solana')}`);
24
+ console.log(`Wallet: ${chalk.cyan(wallet.address)}`);
24
25
  if (model)
25
- console.log(`Model: ${chalk.green(model)}`);
26
- console.log(`Proxy: ${chalk.cyan(`http://localhost:${port}`)}`);
27
- console.log(`Backend: ${chalk.dim(apiUrl)}\n`);
28
- const server = createProxy({ port, apiUrl, chain: 'solana', modelOverride: model, debug: options.debug });
26
+ console.log(`Model: ${chalk.green(model)}`);
27
+ console.log(`Fallback: ${fallbackEnabled ? chalk.green('enabled') : chalk.yellow('disabled')}`);
28
+ console.log(`Proxy: ${chalk.cyan(`http://localhost:${port}`)}`);
29
+ console.log(`Backend: ${chalk.dim(apiUrl)}\n`);
30
+ const server = createProxy({
31
+ port,
32
+ apiUrl,
33
+ chain: 'solana',
34
+ modelOverride: model,
35
+ debug: options.debug,
36
+ fallbackEnabled,
37
+ });
29
38
  launchServer(server, port, shouldLaunch, model);
30
39
  }
31
40
  else {
@@ -40,19 +49,29 @@ export async function startCommand(options) {
40
49
  const shouldLaunch = options.launch !== false;
41
50
  const model = options.model;
42
51
  console.log(chalk.bold('brcc — BlockRun Claude Code\n'));
43
- console.log(`Chain: ${chalk.magenta('base')}`);
44
- console.log(`Wallet: ${chalk.cyan(wallet.address)}`);
52
+ console.log(`Chain: ${chalk.magenta('base')}`);
53
+ console.log(`Wallet: ${chalk.cyan(wallet.address)}`);
45
54
  if (model)
46
- console.log(`Model: ${chalk.green(model)}`);
47
- console.log(`Proxy: ${chalk.cyan(`http://localhost:${port}`)}`);
48
- console.log(`Backend: ${chalk.dim(apiUrl)}\n`);
49
- const server = createProxy({ port, apiUrl, chain: 'base', modelOverride: model, debug: options.debug });
55
+ console.log(`Model: ${chalk.green(model)}`);
56
+ console.log(`Fallback: ${fallbackEnabled ? chalk.green('enabled') : chalk.yellow('disabled')}`);
57
+ console.log(`Proxy: ${chalk.cyan(`http://localhost:${port}`)}`);
58
+ console.log(`Backend: ${chalk.dim(apiUrl)}\n`);
59
+ const server = createProxy({
60
+ port,
61
+ apiUrl,
62
+ chain: 'base',
63
+ modelOverride: model,
64
+ debug: options.debug,
65
+ fallbackEnabled,
66
+ });
50
67
  launchServer(server, port, shouldLaunch, model);
51
68
  }
52
69
  }
53
70
  function launchServer(server, port, shouldLaunch, model) {
54
71
  server.listen(port, () => {
55
- console.log(chalk.green(`Proxy running on port ${port}\n`));
72
+ console.log(chalk.green(`✓ Proxy running on port ${port}`));
73
+ console.log(chalk.dim(` Usage tracking: ~/.blockrun/brcc-stats.json`));
74
+ console.log(chalk.dim(` Run 'brcc stats' to view statistics\n`));
56
75
  if (shouldLaunch) {
57
76
  console.log('Starting Claude Code...\n');
58
77
  const cleanEnv = { ...process.env };
@@ -0,0 +1,10 @@
1
+ /**
2
+ * brcc stats command
3
+ * Display usage statistics and cost savings
4
+ */
5
+ interface StatsOptions {
6
+ clear?: boolean;
7
+ json?: boolean;
8
+ }
9
+ export declare function statsCommand(options: StatsOptions): void;
10
+ export {};
@@ -0,0 +1,94 @@
1
+ /**
2
+ * brcc stats command
3
+ * Display usage statistics and cost savings
4
+ */
5
+ import chalk from 'chalk';
6
+ import { clearStats, getStatsSummary } from '../stats/tracker.js';
7
+ export function statsCommand(options) {
8
+ if (options.clear) {
9
+ clearStats();
10
+ console.log(chalk.green('✓ Statistics cleared'));
11
+ return;
12
+ }
13
+ const { stats, opusCost, saved, savedPct, avgCostPerRequest, period } = getStatsSummary();
14
+ // JSON output for programmatic access
15
+ if (options.json) {
16
+ console.log(JSON.stringify({
17
+ ...stats,
18
+ computed: {
19
+ opusCost,
20
+ saved,
21
+ savedPct,
22
+ avgCostPerRequest,
23
+ period,
24
+ },
25
+ }, null, 2));
26
+ return;
27
+ }
28
+ // Pretty output
29
+ console.log(chalk.bold('\n📊 brcc Usage Statistics\n'));
30
+ console.log('─'.repeat(55));
31
+ if (stats.totalRequests === 0) {
32
+ console.log(chalk.gray('\n No requests recorded yet. Start using brcc!\n'));
33
+ console.log('─'.repeat(55) + '\n');
34
+ return;
35
+ }
36
+ // Overview
37
+ console.log(chalk.bold('\n Overview') + chalk.gray(` (${period})\n`));
38
+ console.log(` Requests: ${chalk.cyan(stats.totalRequests.toLocaleString())}`);
39
+ console.log(` Total Cost: ${chalk.green('$' + stats.totalCostUsd.toFixed(4))}`);
40
+ console.log(` Avg per Request:${chalk.gray(' $' + avgCostPerRequest.toFixed(6))}`);
41
+ console.log(` Input Tokens: ${stats.totalInputTokens.toLocaleString()}`);
42
+ console.log(` Output Tokens: ${stats.totalOutputTokens.toLocaleString()}`);
43
+ if (stats.totalFallbacks > 0) {
44
+ const fallbackPct = ((stats.totalFallbacks / stats.totalRequests) *
45
+ 100).toFixed(1);
46
+ console.log(` Fallbacks: ${chalk.yellow(stats.totalFallbacks.toString())} (${fallbackPct}%)`);
47
+ }
48
+ // Per-model breakdown
49
+ const modelEntries = Object.entries(stats.byModel);
50
+ if (modelEntries.length > 0) {
51
+ console.log(chalk.bold('\n By Model\n'));
52
+ // Sort by cost (descending)
53
+ const sorted = modelEntries.sort((a, b) => b[1].costUsd - a[1].costUsd);
54
+ for (const [model, data] of sorted) {
55
+ const pct = stats.totalCostUsd > 0
56
+ ? ((data.costUsd / stats.totalCostUsd) * 100).toFixed(1)
57
+ : '0';
58
+ const avgLatency = Math.round(data.avgLatencyMs);
59
+ // Shorten model name if too long
60
+ const displayModel = model.length > 35 ? model.slice(0, 32) + '...' : model;
61
+ console.log(` ${chalk.cyan(displayModel)}`);
62
+ console.log(chalk.gray(` ${data.requests} req · $${data.costUsd.toFixed(4)} (${pct}%) · ${avgLatency}ms avg`));
63
+ if (data.fallbackCount > 0) {
64
+ console.log(chalk.yellow(` ↳ ${data.fallbackCount} fallback recoveries`));
65
+ }
66
+ }
67
+ }
68
+ // Savings comparison
69
+ console.log(chalk.bold('\n 💰 Savings vs Claude Opus\n'));
70
+ if (opusCost > 0) {
71
+ console.log(` Opus equivalent: ${chalk.gray('$' + opusCost.toFixed(2))}`);
72
+ console.log(` Your actual cost:${chalk.green(' $' + stats.totalCostUsd.toFixed(2))}`);
73
+ console.log(` ${chalk.green.bold(`Saved: $${saved.toFixed(2)} (${savedPct.toFixed(1)}%)`)}`);
74
+ }
75
+ else {
76
+ console.log(chalk.gray(' Not enough data to calculate savings'));
77
+ }
78
+ // Recent activity (last 5 requests)
79
+ if (stats.history.length > 0) {
80
+ console.log(chalk.bold('\n Recent Activity\n'));
81
+ const recent = stats.history.slice(-5).reverse();
82
+ for (const record of recent) {
83
+ const time = new Date(record.timestamp).toLocaleTimeString();
84
+ const model = record.model.split('/').pop() || record.model;
85
+ const cost = '$' + record.costUsd.toFixed(4);
86
+ const fallbackMark = record.fallback ? chalk.yellow(' ↺') : '';
87
+ console.log(chalk.gray(` ${time}`) +
88
+ ` ${model}${fallbackMark} ` +
89
+ chalk.green(cost));
90
+ }
91
+ }
92
+ console.log('\n' + '─'.repeat(55));
93
+ console.log(chalk.gray(' Run `brcc stats --clear` to reset statistics\n'));
94
+ }
package/dist/index.js CHANGED
@@ -5,12 +5,13 @@ import { startCommand } from './commands/start.js';
5
5
  import { balanceCommand } from './commands/balance.js';
6
6
  import { modelsCommand } from './commands/models.js';
7
7
  import { configCommand } from './commands/config.js';
8
+ import { statsCommand } from './commands/stats.js';
8
9
  const program = new Command();
9
10
  program
10
11
  .name('brcc')
11
12
  .description('BlockRun Claude Code — run Claude Code with any model, pay with USDC.\n\n' +
12
13
  'Use /model inside Claude Code to switch between models on the fly.')
13
- .version('0.5.0');
14
+ .version('0.9.0');
14
15
  program
15
16
  .command('setup [chain]')
16
17
  .description('Create a new wallet for payments (base or solana)')
@@ -21,6 +22,7 @@ program
21
22
  .option('-p, --port <port>', 'Proxy port', '8402')
22
23
  .option('-m, --model <model>', 'Default model (e.g. openai/gpt-5.4, anthropic/claude-sonnet-4.6)')
23
24
  .option('--no-launch', 'Start proxy only, do not launch Claude Code')
25
+ .option('--no-fallback', 'Disable automatic fallback to backup models')
24
26
  .option('--debug', 'Enable debug logging')
25
27
  .action(startCommand);
26
28
  program
@@ -36,4 +38,10 @@ program
36
38
  .description('Manage brcc config (set, get, unset, list)\n' +
37
39
  'Keys: default-model, sonnet-model, opus-model, haiku-model, smart-routing')
38
40
  .action(configCommand);
41
+ program
42
+ .command('stats')
43
+ .description('Show usage statistics and cost savings')
44
+ .option('--clear', 'Clear all statistics')
45
+ .option('--json', 'Output in JSON format')
46
+ .action(statsCommand);
39
47
  program.parse();
@@ -0,0 +1,34 @@
1
+ /**
2
+ * Fallback chain for brcc
3
+ * Automatically switches to backup models when primary fails (429, 5xx, etc.)
4
+ */
5
+ export interface FallbackConfig {
6
+ /** Models to try in order of priority */
7
+ chain: string[];
8
+ /** HTTP status codes that trigger fallback */
9
+ retryOn: number[];
10
+ /** Maximum retries across all models */
11
+ maxRetries: number;
12
+ /** Delay between retries in ms */
13
+ retryDelayMs: number;
14
+ }
15
+ export declare const DEFAULT_FALLBACK_CONFIG: FallbackConfig;
16
+ export interface FallbackResult {
17
+ response: Response;
18
+ modelUsed: string;
19
+ fallbackUsed: boolean;
20
+ attemptsCount: number;
21
+ failedModels: string[];
22
+ }
23
+ /**
24
+ * Fetch with automatic fallback to backup models
25
+ */
26
+ export declare function fetchWithFallback(url: string, init: RequestInit, originalBody: string, config?: FallbackConfig, onFallback?: (model: string, statusCode: number, nextModel: string) => void): Promise<FallbackResult>;
27
+ /**
28
+ * Get the current model from fallback chain based on parsed request
29
+ */
30
+ export declare function getCurrentModelFromChain(requestedModel: string | undefined, config?: FallbackConfig): string;
31
+ /**
32
+ * Build fallback chain starting from a specific model
33
+ */
34
+ export declare function buildFallbackChain(startModel: string, config?: FallbackConfig): string[];
@@ -0,0 +1,115 @@
1
+ /**
2
+ * Fallback chain for brcc
3
+ * Automatically switches to backup models when primary fails (429, 5xx, etc.)
4
+ */
5
+ export const DEFAULT_FALLBACK_CONFIG = {
6
+ chain: [
7
+ 'blockrun/auto', // Smart routing (default)
8
+ 'blockrun/eco', // Cheapest capable model
9
+ 'deepseek/deepseek-chat', // Direct fallback
10
+ 'nvidia/gpt-oss-120b', // Free model as ultimate fallback
11
+ ],
12
+ retryOn: [429, 500, 502, 503, 504, 529],
13
+ maxRetries: 5,
14
+ retryDelayMs: 1000,
15
+ };
16
+ /**
17
+ * Sleep helper
18
+ */
19
+ function sleep(ms) {
20
+ return new Promise((resolve) => setTimeout(resolve, ms));
21
+ }
22
+ /**
23
+ * Replace model in request body
24
+ */
25
+ function replaceModelInBody(body, newModel) {
26
+ try {
27
+ const parsed = JSON.parse(body);
28
+ parsed.model = newModel;
29
+ return JSON.stringify(parsed);
30
+ }
31
+ catch {
32
+ return body;
33
+ }
34
+ }
35
+ /**
36
+ * Fetch with automatic fallback to backup models
37
+ */
38
+ export async function fetchWithFallback(url, init, originalBody, config = DEFAULT_FALLBACK_CONFIG, onFallback) {
39
+ const failedModels = [];
40
+ let attempts = 0;
41
+ for (let i = 0; i < config.chain.length && attempts < config.maxRetries; i++) {
42
+ const model = config.chain[i];
43
+ const body = replaceModelInBody(originalBody, model);
44
+ try {
45
+ attempts++;
46
+ const response = await fetch(url, {
47
+ ...init,
48
+ body,
49
+ });
50
+ // Success or non-retryable error
51
+ if (!config.retryOn.includes(response.status)) {
52
+ return {
53
+ response,
54
+ modelUsed: model,
55
+ fallbackUsed: i > 0,
56
+ attemptsCount: attempts,
57
+ failedModels,
58
+ };
59
+ }
60
+ // Retryable error - log and try next
61
+ failedModels.push(model);
62
+ const nextModel = config.chain[i + 1];
63
+ if (nextModel && onFallback) {
64
+ onFallback(model, response.status, nextModel);
65
+ }
66
+ // Wait before trying next model (with exponential backoff for same model retries)
67
+ if (i < config.chain.length - 1) {
68
+ await sleep(config.retryDelayMs);
69
+ }
70
+ }
71
+ catch (err) {
72
+ // Network error - try next model
73
+ failedModels.push(model);
74
+ const nextModel = config.chain[i + 1];
75
+ if (nextModel && onFallback) {
76
+ const errMsg = err instanceof Error ? err.message : 'Network error';
77
+ onFallback(model, 0, nextModel);
78
+ console.error(`[fallback] ${model} network error: ${errMsg}`);
79
+ }
80
+ if (i < config.chain.length - 1) {
81
+ await sleep(config.retryDelayMs);
82
+ }
83
+ }
84
+ }
85
+ // All models failed - throw error
86
+ throw new Error(`All models in fallback chain failed: ${failedModels.join(', ')}`);
87
+ }
88
+ /**
89
+ * Get the current model from fallback chain based on parsed request
90
+ */
91
+ export function getCurrentModelFromChain(requestedModel, config = DEFAULT_FALLBACK_CONFIG) {
92
+ // If model is explicitly set and in chain, start from there
93
+ if (requestedModel) {
94
+ const index = config.chain.indexOf(requestedModel);
95
+ if (index >= 0) {
96
+ return requestedModel;
97
+ }
98
+ // Model not in chain, use as-is (user specified custom model)
99
+ return requestedModel;
100
+ }
101
+ // Default to first model in chain
102
+ return config.chain[0];
103
+ }
104
+ /**
105
+ * Build fallback chain starting from a specific model
106
+ */
107
+ export function buildFallbackChain(startModel, config = DEFAULT_FALLBACK_CONFIG) {
108
+ const index = config.chain.indexOf(startModel);
109
+ if (index >= 0) {
110
+ // Start from this model and include all after it
111
+ return config.chain.slice(index);
112
+ }
113
+ // Model not in default chain - prepend it
114
+ return [startModel, ...config.chain];
115
+ }
@@ -6,6 +6,7 @@ export interface ProxyOptions {
6
6
  chain?: Chain;
7
7
  modelOverride?: string;
8
8
  debug?: boolean;
9
+ fallbackEnabled?: boolean;
9
10
  }
10
11
  export declare function createProxy(options: ProxyOptions): http.Server;
11
12
  type RequestCategory = 'simple' | 'code' | 'default';