vibehacker 4.1.0 → 4.2.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/src/src/api.js ADDED
@@ -0,0 +1,314 @@
1
+ 'use strict';
2
+
3
+ const axios = require('axios');
4
+ const config = require('./config');
5
+
6
+ // ── Error Types ──────────────────────────────────────────────────────────────
7
+ const ERR = {
8
+ DAILY_LIMIT: 'DAILY_LIMIT',
9
+ RATE_LIMIT: 'RATE_LIMIT',
10
+ AUTH: 'AUTH',
11
+ NOT_FOUND: 'NOT_FOUND',
12
+ CREDITS: 'CREDITS',
13
+ SERVER: 'SERVER',
14
+ TIMEOUT: 'TIMEOUT',
15
+ ABORTED: 'ABORTED',
16
+ UNKNOWN: 'UNKNOWN',
17
+ };
18
+
19
+ // ── Provider Health Tracker ──────────────────────────────────────────────────
20
+ // Tracks success/failure rates, latency, and circuit breaker state per provider.
21
+ // Persists across requests within a session. Enables smart provider selection.
22
+
23
+ const _providerHealth = new Map(); // key: baseURL → { successes, failures, avgLatency, lastFailure, circuitOpen }
24
+
25
+ function getHealth(url) {
26
+ if (!_providerHealth.has(url)) {
27
+ _providerHealth.set(url, {
28
+ successes: 0, failures: 0,
29
+ avgLatency: 0, lastFailure: 0,
30
+ circuitOpen: false, circuitOpenUntil: 0,
31
+ });
32
+ }
33
+ return _providerHealth.get(url);
34
+ }
35
+
36
+ function recordSuccess(url, latencyMs) {
37
+ const h = getHealth(url);
38
+ h.successes++;
39
+ h.avgLatency = h.avgLatency ? (h.avgLatency * 0.7 + latencyMs * 0.3) : latencyMs;
40
+ h.circuitOpen = false;
41
+ h.failures = Math.max(0, h.failures - 1); // decay failures on success
42
+ }
43
+
44
+ function recordFailure(url) {
45
+ const h = getHealth(url);
46
+ h.failures++;
47
+ h.lastFailure = Date.now();
48
+ // Circuit breaker: open after 3 consecutive failures, cool down 30s
49
+ if (h.failures >= 3) {
50
+ h.circuitOpen = true;
51
+ h.circuitOpenUntil = Date.now() + 30000;
52
+ }
53
+ }
54
+
55
+ function isCircuitOpen(url) {
56
+ const h = getHealth(url);
57
+ if (!h.circuitOpen) return false;
58
+ if (Date.now() > h.circuitOpenUntil) {
59
+ h.circuitOpen = false; // half-open: allow one request to test
60
+ h.failures = 2; // almost open again if it fails
61
+ return false;
62
+ }
63
+ return true;
64
+ }
65
+
66
+ // ── Provider Detection (cached per baseURL) ──────────────────────────────────
67
+ const _providerCache = new Map();
68
+
69
+ function currentProviderType() {
70
+ const url = config.baseURL || '';
71
+ if (_providerCache.has(url)) return _providerCache.get(url);
72
+
73
+ const lower = url.toLowerCase();
74
+ let type = 'openai_compat';
75
+ if (lower.includes('vibsecurity.com')) type = 'vibehacker';
76
+ else if (lower.includes('anthropic.com')) type = 'anthropic';
77
+ else if (lower.includes('openai.com')) type = 'openai';
78
+ else if (lower.includes('groq.com')) type = 'groq';
79
+ else if (lower.includes('cerebras.ai')) type = 'cerebras';
80
+ else if (lower.includes('mistral.ai')) type = 'mistral';
81
+ else if (lower.includes('generativelanguage')) type = 'gemini';
82
+
83
+ _providerCache.set(url, type);
84
+ return type;
85
+ }
86
+
87
+ // ── Error Classification ─────────────────────────────────────────────────────
88
+ function classifyError(err, providerType) {
89
+ if (err.message?.includes('aborted') || err.code === 'ERR_CANCELED') {
90
+ return { type: ERR.ABORTED };
91
+ }
92
+ if (err.code === 'ECONNABORTED' || err.code === 'ETIMEDOUT' || err.code === 'ECONNREFUSED') {
93
+ return { type: ERR.TIMEOUT, msg: 'Connection failed — trying next provider.' };
94
+ }
95
+
96
+ const status = err.response?.status;
97
+ const errBody = err.response?.data?.error || err.response?.data || {};
98
+ const rawMsg = errBody.message || errBody.msg || err.message || '';
99
+ const lower = rawMsg.toLowerCase();
100
+
101
+ if (status === 401 || status === 403) return { type: ERR.AUTH, msg: 'Invalid API key. Run /key to update.' };
102
+ if (status === 402) return { type: ERR.CREDITS, msg: 'Insufficient credits.' };
103
+ if (status === 404 || status === 410) return { type: ERR.NOT_FOUND, msg: 'Model unavailable.' };
104
+ if (status === 408) return { type: ERR.TIMEOUT, msg: 'Request timed out.' };
105
+
106
+ if (status === 429 || status === 529) {
107
+ if (lower.includes('free-models-per-day') || lower.includes('per-day') || lower.includes('daily') || lower.includes('limit exceeded')) {
108
+ return { type: ERR.DAILY_LIMIT, msg: rawMsg || 'Daily limit reached.' };
109
+ }
110
+ return { type: ERR.RATE_LIMIT, msg: rawMsg.substring(0, 100) || 'Rate limited.' };
111
+ }
112
+ if (status >= 500) return { type: ERR.SERVER, msg: `Server error (${status}).` };
113
+ return { type: ERR.UNKNOWN, msg: `Error ${status || '?'}: ${rawMsg.substring(0, 100)}` };
114
+ }
115
+
116
+ // ── Unified SSE Stream Parser ────────────────────────────────────────────────
117
+ // Single optimized parser for both OpenAI and Anthropic SSE formats.
118
+ // Uses indexOf() scanning instead of split() for O(n) instead of O(n²).
119
+
120
+ function parseSseStream(stream, { isAnthropic, onToken, onDone, onError, signal }) {
121
+ let done = false;
122
+ let buffer = '';
123
+ let full = '';
124
+ let pendingEvent = '';
125
+
126
+ const finish = (fn, ...args) => { if (!done) { done = true; fn(...args); } };
127
+
128
+ stream.on('data', (chunk) => {
129
+ if (done) return;
130
+ buffer += chunk.toString('utf8');
131
+
132
+ // Optimized line parsing: indexOf instead of split
133
+ let nlIdx;
134
+ while ((nlIdx = buffer.indexOf('\n')) !== -1) {
135
+ const line = buffer.substring(0, nlIdx).trim();
136
+ buffer = buffer.substring(nlIdx + 1);
137
+
138
+ if (!line) continue;
139
+
140
+ // Anthropic event tracking
141
+ if (isAnthropic && line.startsWith('event: ')) {
142
+ pendingEvent = line.substring(7).trim();
143
+ continue;
144
+ }
145
+
146
+ if (!line.startsWith('data: ')) continue;
147
+ const raw = line.substring(6).trim();
148
+
149
+ if (raw === '[DONE]') { finish(onDone, full); return; }
150
+
151
+ try {
152
+ const parsed = JSON.parse(raw);
153
+
154
+ if (parsed.error) {
155
+ const errStatus = parsed.error.code || parsed.error.type === 'rate_limit_error' ? 429 : 500;
156
+ finish(onError, classifyError({ response: { status: errStatus, data: { error: parsed.error } } }));
157
+ return;
158
+ }
159
+
160
+ if (isAnthropic) {
161
+ if (pendingEvent === 'content_block_delta' && parsed.delta?.text) {
162
+ const token = parsed.delta.text;
163
+ full += token;
164
+ onToken(token, full);
165
+ }
166
+ if (pendingEvent === 'message_stop' || parsed.type === 'message_stop') {
167
+ finish(onDone, full);
168
+ }
169
+ } else {
170
+ const token = parsed.choices?.[0]?.delta?.content;
171
+ if (token) { full += token; onToken(token, full); }
172
+ const reason = parsed.choices?.[0]?.finish_reason;
173
+ if (reason && reason !== 'null') { finish(onDone, full); }
174
+ }
175
+ } catch (_) {}
176
+ }
177
+ });
178
+
179
+ stream.on('end', () => finish(onDone, full));
180
+ stream.on('error', (err) => finish(onError, axios.isCancel(err) ? { type: ERR.ABORTED } : classifyError(err)));
181
+ }
182
+
183
+ // ── Stream Chat — Unified Entry Point ────────────────────────────────────────
184
+ async function streamChat({ messages, model, signal, maxTokens, onToken, onDone, onError }) {
185
+ const ptype = currentProviderType();
186
+ const isAnth = ptype === 'anthropic';
187
+ const startMs = Date.now();
188
+ let finished = false;
189
+
190
+ // Circuit breaker check
191
+ if (isCircuitOpen(config.baseURL)) {
192
+ onError({ type: ERR.RATE_LIMIT, msg: 'Provider temporarily unavailable (circuit breaker).' });
193
+ return;
194
+ }
195
+
196
+ // Abort handling
197
+ const cancelSource = axios.CancelToken.source();
198
+ if (signal) {
199
+ if (signal.aborted) { onError({ type: ERR.ABORTED }); return; }
200
+ signal.addEventListener('abort', () => cancelSource.cancel('aborted'), { once: true });
201
+ }
202
+
203
+ // Build headers
204
+ const headers = { 'Content-Type': 'application/json' };
205
+ if (isAnth) {
206
+ headers['x-api-key'] = config.apiKey;
207
+ headers['anthropic-version'] = '2023-06-01';
208
+ } else {
209
+ headers['Authorization'] = `Bearer ${config.apiKey}`;
210
+ if (ptype === 'openai_compat' || ptype === 'vibehacker') {
211
+ headers['HTTP-Referer'] = config.httpReferer;
212
+ headers['X-Title'] = config.xTitle;
213
+ }
214
+ }
215
+
216
+ // Build body
217
+ let body, url;
218
+ if (isAnth) {
219
+ let system = '';
220
+ const chatMsgs = messages.filter(m => {
221
+ if (m.role === 'system') { system = m.content; return false; }
222
+ return true;
223
+ });
224
+ body = { model, max_tokens: maxTokens || config.maxTokens, stream: true, messages: chatMsgs };
225
+ if (system) body.system = system;
226
+ url = `${config.baseURL}/messages`;
227
+ } else {
228
+ body = {
229
+ model, messages,
230
+ stream: true,
231
+ temperature: config.temperature,
232
+ max_tokens: maxTokens || config.maxTokens,
233
+ };
234
+ // OpenRouter optimizations
235
+ if (ptype === 'openai_compat' || ptype === 'vibehacker') {
236
+ body.provider = { sort: 'throughput', allow_fallbacks: true, require_parameters: false };
237
+ }
238
+ url = `${config.baseURL}/chat/completions`;
239
+ }
240
+
241
+ // Make request
242
+ let source;
243
+ try {
244
+ source = await axios({
245
+ method: 'POST', url, headers, data: body,
246
+ responseType: 'stream',
247
+ timeout: isAnth ? 120000 : 90000,
248
+ cancelToken: cancelSource.token,
249
+ });
250
+ } catch (err) {
251
+ recordFailure(config.baseURL);
252
+ if (axios.isCancel(err)) { onError({ type: ERR.ABORTED }); return; }
253
+ onError(classifyError(err, ptype));
254
+ return;
255
+ }
256
+
257
+ // Parse stream
258
+ parseSseStream(source.data, {
259
+ isAnthropic: isAnth,
260
+ signal,
261
+ onToken,
262
+ onDone: (full) => {
263
+ if (!finished) {
264
+ finished = true;
265
+ recordSuccess(config.baseURL, Date.now() - startMs);
266
+ onDone(full);
267
+ }
268
+ },
269
+ onError: (err) => {
270
+ if (!finished) {
271
+ finished = true;
272
+ recordFailure(config.baseURL);
273
+ onError(err);
274
+ }
275
+ },
276
+ });
277
+ }
278
+
279
+ // ── Key Validation ───────────────────────────────────────────────────────────
280
+ async function checkKey(apiKey) {
281
+ const ptype = currentProviderType();
282
+ try {
283
+ if (ptype === 'anthropic') {
284
+ await axios.post(`${config.baseURL}/messages`, {
285
+ model: 'claude-haiku-3-5', max_tokens: 1,
286
+ messages: [{ role: 'user', content: 'hi' }],
287
+ }, {
288
+ headers: { 'x-api-key': apiKey, 'anthropic-version': '2023-06-01', 'Content-Type': 'application/json' },
289
+ timeout: 8000,
290
+ });
291
+ return { ok: true, data: { label: 'Anthropic key valid' } };
292
+ }
293
+ const r = await axios.get(`${config.baseURL}/auth/key`, {
294
+ headers: { 'Authorization': `Bearer ${apiKey}` }, timeout: 8000,
295
+ });
296
+ return { ok: true, data: r.data?.data };
297
+ } catch (e) {
298
+ const status = e.response?.status;
299
+ if (status === 401 || status === 403) return { ok: false, status, msg: 'Invalid API key' };
300
+ if (status && status !== 401 && status !== 403) return { ok: true, data: { label: 'Key accepted' } };
301
+ return { ok: false, status, msg: e.response?.data?.error?.message || e.message };
302
+ }
303
+ }
304
+
305
+ async function fetchFreeModels(apiKey) {
306
+ try {
307
+ const r = await axios.get('https://openrouter.ai/api/v1/models', {
308
+ headers: { 'Authorization': `Bearer ${apiKey || config.apiKey}` }, timeout: 12000,
309
+ });
310
+ return (r.data?.data || []).filter(m => m.id.endsWith(':free') && m.pricing?.prompt === '0');
311
+ } catch (_) { return []; }
312
+ }
313
+
314
+ module.exports = { streamChat, checkKey, fetchFreeModels, ERR, currentProviderType, isCircuitOpen, getHealth };