@blockrun/cc 0.7.3 → 0.8.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/proxy/server.js +74 -2
- package/package.json +1 -1
package/dist/proxy/server.js
CHANGED
|
@@ -16,8 +16,56 @@ function debug(options, ...args) {
|
|
|
16
16
|
}
|
|
17
17
|
const DEFAULT_MAX_TOKENS = 4096;
|
|
18
18
|
let lastOutputTokens = 0;
|
|
19
|
+
// Model shortcuts for quick switching
|
|
20
|
+
const MODEL_SHORTCUTS = {
|
|
21
|
+
'gpt': 'openai/gpt-5.4',
|
|
22
|
+
'gpt5': 'openai/gpt-5.4',
|
|
23
|
+
'gpt-5': 'openai/gpt-5.4',
|
|
24
|
+
'gpt-5.4': 'openai/gpt-5.4',
|
|
25
|
+
'sonnet': 'anthropic/claude-sonnet-4.6',
|
|
26
|
+
'claude': 'anthropic/claude-sonnet-4.6',
|
|
27
|
+
'opus': 'anthropic/claude-opus-4.6',
|
|
28
|
+
'haiku': 'anthropic/claude-haiku-4.5',
|
|
29
|
+
'deepseek': 'deepseek/deepseek-chat',
|
|
30
|
+
'gemini': 'google/gemini-2.5-pro',
|
|
31
|
+
'grok': 'xai/grok-3',
|
|
32
|
+
'free': 'nvidia/gpt-oss-120b',
|
|
33
|
+
'mini': 'openai/gpt-5-mini',
|
|
34
|
+
'glm': 'zai/glm-5',
|
|
35
|
+
};
|
|
36
|
+
function detectModelSwitch(parsed) {
|
|
37
|
+
if (!parsed.messages || parsed.messages.length === 0)
|
|
38
|
+
return null;
|
|
39
|
+
const last = parsed.messages[parsed.messages.length - 1];
|
|
40
|
+
if (last.role !== 'user')
|
|
41
|
+
return null;
|
|
42
|
+
let content = '';
|
|
43
|
+
if (typeof last.content === 'string') {
|
|
44
|
+
content = last.content;
|
|
45
|
+
}
|
|
46
|
+
else if (Array.isArray(last.content)) {
|
|
47
|
+
const textBlock = last.content.find(b => b.type === 'text' && b.text);
|
|
48
|
+
if (textBlock && textBlock.text)
|
|
49
|
+
content = textBlock.text;
|
|
50
|
+
}
|
|
51
|
+
if (!content)
|
|
52
|
+
return null;
|
|
53
|
+
content = content.trim().toLowerCase();
|
|
54
|
+
const match = content.match(/^use\s+(.+)$/);
|
|
55
|
+
if (!match)
|
|
56
|
+
return null;
|
|
57
|
+
const modelInput = match[1].trim();
|
|
58
|
+
// Check shortcuts first
|
|
59
|
+
if (MODEL_SHORTCUTS[modelInput])
|
|
60
|
+
return MODEL_SHORTCUTS[modelInput];
|
|
61
|
+
// If it contains a slash, treat as full model ID
|
|
62
|
+
if (modelInput.includes('/'))
|
|
63
|
+
return modelInput;
|
|
64
|
+
return null;
|
|
65
|
+
}
|
|
19
66
|
export function createProxy(options) {
|
|
20
67
|
const chain = options.chain || 'base';
|
|
68
|
+
let currentModel = options.modelOverride || null;
|
|
21
69
|
let baseWallet = null;
|
|
22
70
|
let solanaWallet = null;
|
|
23
71
|
if (chain === 'base') {
|
|
@@ -48,8 +96,32 @@ export function createProxy(options) {
|
|
|
48
96
|
if (body) {
|
|
49
97
|
try {
|
|
50
98
|
const parsed = JSON.parse(body);
|
|
51
|
-
|
|
52
|
-
|
|
99
|
+
// Intercept "use <model>" commands for in-session model switching
|
|
100
|
+
if (parsed.messages) {
|
|
101
|
+
const last = parsed.messages[parsed.messages.length - 1];
|
|
102
|
+
debug(options, `last msg role=${last?.role} content-type=${typeof last?.content} content=${JSON.stringify(last?.content).slice(0, 200)}`);
|
|
103
|
+
}
|
|
104
|
+
const switchCmd = detectModelSwitch(parsed);
|
|
105
|
+
if (switchCmd) {
|
|
106
|
+
currentModel = switchCmd;
|
|
107
|
+
debug(options, `model switched to: ${currentModel}`);
|
|
108
|
+
const fakeResponse = {
|
|
109
|
+
id: `msg_brcc_${Date.now()}`,
|
|
110
|
+
type: 'message',
|
|
111
|
+
role: 'assistant',
|
|
112
|
+
model: currentModel,
|
|
113
|
+
content: [{ type: 'text', text: `Switched to **${currentModel}**. All subsequent requests will use this model.` }],
|
|
114
|
+
stop_reason: 'end_turn',
|
|
115
|
+
stop_sequence: null,
|
|
116
|
+
usage: { input_tokens: 0, output_tokens: 10 },
|
|
117
|
+
};
|
|
118
|
+
res.writeHead(200, { 'Content-Type': 'application/json' });
|
|
119
|
+
res.end(JSON.stringify(fakeResponse));
|
|
120
|
+
return;
|
|
121
|
+
}
|
|
122
|
+
// Apply model override
|
|
123
|
+
if ((currentModel || options.modelOverride) && parsed.model) {
|
|
124
|
+
parsed.model = currentModel || options.modelOverride;
|
|
53
125
|
}
|
|
54
126
|
if (parsed.max_tokens) {
|
|
55
127
|
const original = parsed.max_tokens;
|