@anh3d0nic/qwen-code-termux-ice 16.0.7 → 16.0.8
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/package.json +1 -1
- package/scripts/api-caller.js +205 -0
- package/scripts/ice-v15.js +13 -9
package/package.json
CHANGED
|
@@ -0,0 +1,205 @@
|
|
|
1
|
+
#!/usr/bin/env node
|
|
2
|
+
/**
|
|
3
|
+
* Simple API caller for ICE v15
|
|
4
|
+
* Supports: Qwen (DashScope), Gemini, Groq
|
|
5
|
+
*/
|
|
6
|
+
|
|
7
|
+
const https = require('https');
|
|
8
|
+
|
|
9
|
+
const TERMUX = {
|
|
10
|
+
PREFIX: '/data/data/com.termux/files/usr',
|
|
11
|
+
HOME: '/data/data/com.termux/files/home'
|
|
12
|
+
};
|
|
13
|
+
|
|
14
|
+
const API_CONFIG = {
|
|
15
|
+
qwen: {
|
|
16
|
+
url: 'dashscope.aliyuncs.com',
|
|
17
|
+
path: '/compatible-mode/v1/chat/completions',
|
|
18
|
+
model: 'qwen3-coder-plus'
|
|
19
|
+
},
|
|
20
|
+
gemini: {
|
|
21
|
+
url: 'generativelanguage.googleapis.com',
|
|
22
|
+
path: '/v1beta/models/gemini-2.0-flash:generateContent',
|
|
23
|
+
model: 'gemini-2.0-flash'
|
|
24
|
+
},
|
|
25
|
+
groq: {
|
|
26
|
+
url: 'api.groq.com',
|
|
27
|
+
path: '/openai/v1/chat/completions',
|
|
28
|
+
model: 'llama-3.1-70b-versatile'
|
|
29
|
+
}
|
|
30
|
+
};
|
|
31
|
+
|
|
32
|
+
function loadApiKeys() {
|
|
33
|
+
const fs = require('fs');
|
|
34
|
+
const path = require('path');
|
|
35
|
+
const keysFile = path.join(TERMUX.HOME, '.qwen-ice', 'config', 'api-keys.json');
|
|
36
|
+
try {
|
|
37
|
+
if (fs.existsSync(keysFile)) {
|
|
38
|
+
return JSON.parse(fs.readFileSync(keysFile, 'utf8'));
|
|
39
|
+
}
|
|
40
|
+
} catch (e) {}
|
|
41
|
+
return { keys: [], default_provider: 'qwen' };
|
|
42
|
+
}
|
|
43
|
+
|
|
44
|
+
function callQwenAPI(apiKey, messages) {
|
|
45
|
+
return new Promise((resolve, reject) => {
|
|
46
|
+
const data = JSON.stringify({
|
|
47
|
+
model: API_CONFIG.qwen.model,
|
|
48
|
+
messages: messages,
|
|
49
|
+
max_tokens: 4096
|
|
50
|
+
});
|
|
51
|
+
|
|
52
|
+
const options = {
|
|
53
|
+
hostname: API_CONFIG.qwen.url,
|
|
54
|
+
port: 443,
|
|
55
|
+
path: API_CONFIG.qwen.path,
|
|
56
|
+
method: 'POST',
|
|
57
|
+
headers: {
|
|
58
|
+
'Content-Type': 'application/json',
|
|
59
|
+
'Authorization': `Bearer ${apiKey}`,
|
|
60
|
+
'Content-Length': data.length
|
|
61
|
+
}
|
|
62
|
+
};
|
|
63
|
+
|
|
64
|
+
const req = https.request(options, (res) => {
|
|
65
|
+
let responseData = '';
|
|
66
|
+
res.on('data', chunk => responseData += chunk);
|
|
67
|
+
res.on('end', () => {
|
|
68
|
+
if (res.statusCode >= 400) {
|
|
69
|
+
reject(new Error(`Qwen API error: ${res.statusCode}`));
|
|
70
|
+
} else {
|
|
71
|
+
try {
|
|
72
|
+
const parsed = JSON.parse(responseData);
|
|
73
|
+
resolve(parsed.choices?.[0]?.message?.content || '');
|
|
74
|
+
} catch (e) {
|
|
75
|
+
reject(new Error('Failed to parse Qwen response'));
|
|
76
|
+
}
|
|
77
|
+
}
|
|
78
|
+
});
|
|
79
|
+
});
|
|
80
|
+
|
|
81
|
+
req.on('error', reject);
|
|
82
|
+
req.setTimeout(60000, () => reject(new Error('Qwen API timeout')));
|
|
83
|
+
req.write(data);
|
|
84
|
+
req.end();
|
|
85
|
+
});
|
|
86
|
+
}
|
|
87
|
+
|
|
88
|
+
function callGeminiAPI(apiKey, messages) {
|
|
89
|
+
return new Promise((resolve, reject) => {
|
|
90
|
+
const lastMessage = messages[messages.length - 1]?.content || '';
|
|
91
|
+
const data = JSON.stringify({
|
|
92
|
+
contents: [{ parts: [{ text: lastMessage }] }],
|
|
93
|
+
generationConfig: { maxOutputTokens: 4096 }
|
|
94
|
+
});
|
|
95
|
+
|
|
96
|
+
const options = {
|
|
97
|
+
hostname: API_CONFIG.gemini.url,
|
|
98
|
+
port: 443,
|
|
99
|
+
path: `${API_CONFIG.gemini.path}?key=${apiKey}`,
|
|
100
|
+
method: 'POST',
|
|
101
|
+
headers: {
|
|
102
|
+
'Content-Type': 'application/json',
|
|
103
|
+
'Content-Length': data.length
|
|
104
|
+
}
|
|
105
|
+
};
|
|
106
|
+
|
|
107
|
+
const req = https.request(options, (res) => {
|
|
108
|
+
let responseData = '';
|
|
109
|
+
res.on('data', chunk => responseData += chunk);
|
|
110
|
+
res.on('end', () => {
|
|
111
|
+
if (res.statusCode === 429) {
|
|
112
|
+
reject(new Error('Gemini rate limit exceeded'));
|
|
113
|
+
} else if (res.statusCode >= 400) {
|
|
114
|
+
reject(new Error(`Gemini API error: ${res.statusCode}`));
|
|
115
|
+
} else {
|
|
116
|
+
try {
|
|
117
|
+
const parsed = JSON.parse(responseData);
|
|
118
|
+
resolve(parsed.candidates?.[0]?.content?.parts?.[0]?.text || '');
|
|
119
|
+
} catch (e) {
|
|
120
|
+
reject(new Error('Failed to parse Gemini response'));
|
|
121
|
+
}
|
|
122
|
+
}
|
|
123
|
+
});
|
|
124
|
+
});
|
|
125
|
+
|
|
126
|
+
req.on('error', reject);
|
|
127
|
+
req.setTimeout(60000, () => reject(new Error('Gemini API timeout')));
|
|
128
|
+
req.write(data);
|
|
129
|
+
req.end();
|
|
130
|
+
});
|
|
131
|
+
}
|
|
132
|
+
|
|
133
|
+
function callGroqAPI(apiKey, messages) {
|
|
134
|
+
return new Promise((resolve, reject) => {
|
|
135
|
+
const data = JSON.stringify({
|
|
136
|
+
model: API_CONFIG.groq.model,
|
|
137
|
+
messages: messages,
|
|
138
|
+
max_tokens: 4096
|
|
139
|
+
});
|
|
140
|
+
|
|
141
|
+
const options = {
|
|
142
|
+
hostname: API_CONFIG.groq.url,
|
|
143
|
+
port: 443,
|
|
144
|
+
path: API_CONFIG.groq.path,
|
|
145
|
+
method: 'POST',
|
|
146
|
+
headers: {
|
|
147
|
+
'Content-Type': 'application/json',
|
|
148
|
+
'Authorization': `Bearer ${apiKey}`,
|
|
149
|
+
'Content-Length': data.length
|
|
150
|
+
}
|
|
151
|
+
};
|
|
152
|
+
|
|
153
|
+
const req = https.request(options, (res) => {
|
|
154
|
+
let responseData = '';
|
|
155
|
+
res.on('data', chunk => responseData += chunk);
|
|
156
|
+
res.on('end', () => {
|
|
157
|
+
if (res.statusCode >= 400) {
|
|
158
|
+
reject(new Error(`Groq API error: ${res.statusCode}`));
|
|
159
|
+
} else {
|
|
160
|
+
try {
|
|
161
|
+
const parsed = JSON.parse(responseData);
|
|
162
|
+
resolve(parsed.choices?.[0]?.message?.content || '');
|
|
163
|
+
} catch (e) {
|
|
164
|
+
reject(new Error('Failed to parse Groq response'));
|
|
165
|
+
}
|
|
166
|
+
}
|
|
167
|
+
});
|
|
168
|
+
});
|
|
169
|
+
|
|
170
|
+
req.on('error', reject);
|
|
171
|
+
req.setTimeout(60000, () => reject(new Error('Groq API timeout')));
|
|
172
|
+
req.write(data);
|
|
173
|
+
req.end();
|
|
174
|
+
});
|
|
175
|
+
}
|
|
176
|
+
|
|
177
|
+
async function callLLM(messages, provider = null) {
|
|
178
|
+
const config = loadApiKeys();
|
|
179
|
+
const selectedProvider = provider || config.default_provider || 'qwen';
|
|
180
|
+
|
|
181
|
+
const keyEntry = config.keys?.find(k => k.provider === selectedProvider && k.type !== 'oauth');
|
|
182
|
+
|
|
183
|
+
if (!keyEntry || !keyEntry.key) {
|
|
184
|
+
// No API key - return stub response
|
|
185
|
+
const lastMessage = messages[messages.length - 1]?.content || '';
|
|
186
|
+
return `[No API key configured for ${selectedProvider}]\n\nI understand your query: "${lastMessage.substring(0, 100)}${lastMessage.length > 100 ? '...' : ''}"\n\nTo get real AI responses, run: qwen-ice api-setup`;
|
|
187
|
+
}
|
|
188
|
+
|
|
189
|
+
try {
|
|
190
|
+
switch (selectedProvider) {
|
|
191
|
+
case 'qwen':
|
|
192
|
+
return await callQwenAPI(keyEntry.key, messages);
|
|
193
|
+
case 'gemini':
|
|
194
|
+
return await callGeminiAPI(keyEntry.key, messages);
|
|
195
|
+
case 'groq':
|
|
196
|
+
return await callGroqAPI(keyEntry.key, messages);
|
|
197
|
+
default:
|
|
198
|
+
return await callQwenAPI(keyEntry.key, messages);
|
|
199
|
+
}
|
|
200
|
+
} catch (err) {
|
|
201
|
+
return `[API Error: ${err.message}]\n\nYour query was: "${messages[messages.length - 1]?.content.substring(0, 100)}"`;
|
|
202
|
+
}
|
|
203
|
+
}
|
|
204
|
+
|
|
205
|
+
module.exports = { callLLM, loadApiKeys };
|
package/scripts/ice-v15.js
CHANGED
|
@@ -1244,7 +1244,7 @@ class UnifiedPipeline {
|
|
|
1244
1244
|
|
|
1245
1245
|
// Step 4: Generate response
|
|
1246
1246
|
console.log('✏️ Step 4: Generate Response');
|
|
1247
|
-
let response = this.generateResponse(enhancedInput, intent);
|
|
1247
|
+
let response = await this.generateResponse(enhancedInput, intent);
|
|
1248
1248
|
console.log(' Response generated\n');
|
|
1249
1249
|
|
|
1250
1250
|
// Step 5: Self-Critique
|
|
@@ -1358,14 +1358,18 @@ class UnifiedPipeline {
|
|
|
1358
1358
|
return response;
|
|
1359
1359
|
}
|
|
1360
1360
|
|
|
1361
|
-
generateResponse(input, intent) {
|
|
1362
|
-
|
|
1363
|
-
|
|
1364
|
-
|
|
1365
|
-
|
|
1366
|
-
|
|
1367
|
-
|
|
1368
|
-
|
|
1361
|
+
async generateResponse(input, intent) {
|
|
1362
|
+
const { callLLM } = require('./api-caller.js');
|
|
1363
|
+
|
|
1364
|
+
const systemPrompt = 'You are Qwen Code ICE v15.0 - a Termux-optimized AI assistant. Always provide Termux-compatible solutions (no sudo, use pkg instead of apt-get, use ' + TERMUX.PREFIX + ' paths).';
|
|
1365
|
+
|
|
1366
|
+
const messages = [
|
|
1367
|
+
{ role: 'system', content: systemPrompt },
|
|
1368
|
+
{ role: 'user', content: input }
|
|
1369
|
+
];
|
|
1370
|
+
|
|
1371
|
+
const response = await callLLM(messages);
|
|
1372
|
+
return response;
|
|
1369
1373
|
}
|
|
1370
1374
|
|
|
1371
1375
|
printSessionSummary() {
|