ai-speedometer-headless 2.1.4

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,942 @@
1
+ #!/usr/bin/env node
2
+ var __esm = (fn, res) => () => (fn && (res = fn(fn = 0)), res);
3
+
4
+ // ../core/src/ai-config.ts
5
+ import fs2 from "fs";
6
+ import path2 from "path";
7
+ import { homedir as homedir2 } from "os";
8
+ var getAIConfigPaths = () => {
9
+ const aiConfigDir = process.env.XDG_CONFIG_HOME || path2.join(homedir2(), ".config");
10
+ const aiSpeedometerConfigDir = path2.join(aiConfigDir, "ai-speedometer");
11
+ return {
12
+ configDir: aiSpeedometerConfigDir,
13
+ configJson: path2.join(aiSpeedometerConfigDir, "ai-benchmark-config.json"),
14
+ recentModelsCache: path2.join(aiSpeedometerConfigDir, "recent-models.json")
15
+ };
16
+ }, readAIConfig = async () => {
17
+ const { configJson } = getAIConfigPaths();
18
+ try {
19
+ if (!fs2.existsSync(configJson)) {
20
+ return { verifiedProviders: {}, customProviders: [] };
21
+ }
22
+ const data = fs2.readFileSync(configJson, "utf8");
23
+ return JSON.parse(data);
24
+ } catch (error) {
25
+ console.warn("Warning: Could not read ai-benchmark-config.json:", error.message);
26
+ return { verifiedProviders: {}, customProviders: [] };
27
+ }
28
+ }, getCustomProvidersFromConfig = async () => {
29
+ const config = await readAIConfig();
30
+ return config.customProviders || [];
31
+ };
32
+ var init_ai_config = () => {};
33
+
34
+ // ../core/src/opencode-integration.ts
35
+ import fs3 from "fs";
36
+ import path3 from "path";
37
+ import { homedir as homedir3 } from "os";
38
+
39
+ // ../core/src/models-dev.ts
40
+ import fs from "fs";
41
+ import path from "path";
42
+ import { homedir } from "os";
43
+
44
+ // ../core/custom-verified-providers.json
45
+ var custom_verified_providers_default = {
46
+ "custom-verified-providers": {
47
+ "zai-code-anth": {
48
+ id: "zai-code-anth",
49
+ name: "zai-code-anth",
50
+ baseUrl: "https://api.z.ai/api/anthropic/v1",
51
+ type: "anthropic",
52
+ models: {
53
+ "glm-4-5": {
54
+ id: "glm-4.5",
55
+ name: "GLM-4.5-anth"
56
+ },
57
+ "glm-4-5-air": {
58
+ id: "glm-4.5-air",
59
+ name: "GLM-4.5-air-anth"
60
+ },
61
+ "glm-4-6": {
62
+ id: "glm-4.6",
63
+ name: "GLM-4.6-anth"
64
+ }
65
+ }
66
+ },
67
+ "zai-china-anth-base": {
68
+ id: "zai-china-anth-base",
69
+ name: "zai-china-anth-base",
70
+ baseUrl: "https://open.bigmodel.cn/api/anthropic/v1",
71
+ type: "anthropic",
72
+ models: {
73
+ "glm-4-5": {
74
+ id: "glm-4.5",
75
+ name: "GLM-4.5-china-anth"
76
+ },
77
+ "glm-4-5-air": {
78
+ id: "glm-4.5-air",
79
+ name: "GLM-4.5-air-china-anth"
80
+ },
81
+ "glm-4-6": {
82
+ id: "glm-4.6",
83
+ name: "GLM-4.6-china-anth"
84
+ }
85
+ }
86
+ },
87
+ "extra-models-dev": {
88
+ chutes: {
89
+ "deepseek-ai/DeepSeek-V3.1-Terminus": {
90
+ id: "deepseek-ai/DeepSeek-V3.1-Terminus",
91
+ name: "DeepSeek V3.1 Terminus"
92
+ },
93
+ "meituan-longcat/LongCat-Flash-Thinking-FP8": {
94
+ id: "meituan-longcat/LongCat-Flash-Thinking-FP8",
95
+ name: "LongCat Flash Thinking FP8"
96
+ }
97
+ }
98
+ },
99
+ "nanogpt-plan": {
100
+ id: "nanogpt-plan",
101
+ name: "nanogpt-plan",
102
+ baseUrl: "https://nano-gpt.com/api/v1",
103
+ type: "openai-compatible",
104
+ models: {
105
+ "deepseek-ai/DeepSeek-V3.1-Terminus": {
106
+ id: "deepseek-ai/DeepSeek-V3.1-Terminus",
107
+ name: "DeepSeek V3.1 Terminus"
108
+ },
109
+ "deepseek-ai/DeepSeek-V3.1": {
110
+ id: "deepseek-ai/DeepSeek-V3.1",
111
+ name: "DeepSeek V3.1"
112
+ },
113
+ "deepseek-ai/DeepSeek-V3.1-Terminus:thinking": {
114
+ id: "deepseek-ai/DeepSeek-V3.1-Terminus:thinking",
115
+ name: "DeepSeek V3.1 Terminus Thinking"
116
+ },
117
+ "zai-org/GLM-4.5-FP8": {
118
+ id: "zai-org/GLM-4.5-FP8",
119
+ name: "GLM 4.5 FP8"
120
+ },
121
+ "zai-org/GLM-4.5-FP8:thinking": {
122
+ id: "zai-org/GLM-4.5-FP8:thinking",
123
+ name: "GLM 4.5 FP8 Thinking"
124
+ },
125
+ "zai-org/GLM-4.5-Air": {
126
+ id: "zai-org/GLM-4.5-Air",
127
+ name: "GLM 4.5 Air"
128
+ },
129
+ "moonshotai/Kimi-K2-Instruct": {
130
+ id: "moonshotai/Kimi-K2-Instruct",
131
+ name: "Kimi K2 Instruct"
132
+ },
133
+ "moonshotai/Kimi-K2-Instruct-0905": {
134
+ id: "moonshotai/Kimi-K2-Instruct-0905",
135
+ name: "Kimi K2 Instruct 0905"
136
+ },
137
+ "moonshotai/kimi-k2-thinking": {
138
+ id: "moonshotai/kimi-k2-thinking",
139
+ name: "Kimi K2 Thinking"
140
+ },
141
+ "deepseek-ai/deepseek-v3.2-exp": {
142
+ id: "deepseek-ai/deepseek-v3.2-exp",
143
+ name: "DeepSeek V3.2 Exp"
144
+ },
145
+ "z-ai/glm-4.6": {
146
+ id: "z-ai/glm-4.6",
147
+ name: "GLM 4.6"
148
+ },
149
+ "z-ai/glm-4.6:thinking": {
150
+ id: "z-ai/glm-4.6:thinking",
151
+ name: "GLM 4.6 Thinking"
152
+ },
153
+ "qwen3-vl-235b-a22b-instruct": {
154
+ id: "qwen3-vl-235b-a22b-instruct",
155
+ name: "Qwen3 VL 235B A22B Instruct"
156
+ },
157
+ "MiniMax-M2": {
158
+ id: "MiniMax-M2",
159
+ name: "MiniMax-M2"
160
+ }
161
+ }
162
+ }
163
+ }
164
+ };
165
+
166
+ // ../core/src/models-dev.ts
167
+ var CACHE_DIR = path.join(homedir(), ".cache", "ai-speedometer");
168
+ var CACHE_FILE = path.join(CACHE_DIR, "models.json");
169
+ var FALLBACK_PROVIDERS = [
170
+ {
171
+ id: "openai",
172
+ name: "OpenAI",
173
+ baseUrl: "https://api.openai.com/v1",
174
+ type: "openai-compatible",
175
+ models: [
176
+ { id: "gpt-4o", name: "GPT-4o" },
177
+ { id: "gpt-4o-mini", name: "GPT-4o Mini" },
178
+ { id: "gpt-4-turbo", name: "GPT-4 Turbo" },
179
+ { id: "gpt-3.5-turbo", name: "GPT-3.5 Turbo" }
180
+ ]
181
+ },
182
+ {
183
+ id: "anthropic",
184
+ name: "Anthropic",
185
+ baseUrl: "https://api.anthropic.com",
186
+ type: "anthropic",
187
+ models: [
188
+ { id: "claude-3-5-sonnet-20241022", name: "Claude 3.5 Sonnet" },
189
+ { id: "claude-3-5-haiku-20241022", name: "Claude 3.5 Haiku" },
190
+ { id: "claude-3-opus-20240229", name: "Claude 3 Opus" }
191
+ ]
192
+ },
193
+ {
194
+ id: "openrouter",
195
+ name: "OpenRouter",
196
+ baseUrl: "https://openrouter.ai/api/v1",
197
+ type: "openai-compatible",
198
+ models: [
199
+ { id: "anthropic/claude-3.5-sonnet", name: "Claude 3.5 Sonnet" },
200
+ { id: "openai/gpt-4o", name: "GPT-4o" },
201
+ { id: "openai/gpt-4o-mini", name: "GPT-4o Mini" }
202
+ ]
203
+ }
204
+ ];
205
+ function ensureCacheDir() {
206
+ try {
207
+ if (!fs.existsSync(CACHE_DIR)) {
208
+ fs.mkdirSync(CACHE_DIR, { recursive: true });
209
+ }
210
+ } catch (error) {
211
+ console.warn("Warning: Could not create cache directory:", error.message);
212
+ }
213
+ }
214
+ function isCacheExpired(cacheData) {
215
+ if (!cacheData.timestamp)
216
+ return true;
217
+ return Date.now() - cacheData.timestamp > 60 * 60 * 1000;
218
+ }
219
+ function loadCache() {
220
+ try {
221
+ if (fs.existsSync(CACHE_FILE)) {
222
+ const data = fs.readFileSync(CACHE_FILE, "utf8");
223
+ const parsed = JSON.parse(data);
224
+ if (isCacheExpired(parsed))
225
+ return null;
226
+ return parsed;
227
+ }
228
+ } catch (error) {
229
+ console.warn("Warning: Could not load cache:", error.message);
230
+ }
231
+ return null;
232
+ }
233
+ function saveCache(data) {
234
+ try {
235
+ ensureCacheDir();
236
+ const cacheData = { ...data, timestamp: Date.now() };
237
+ fs.writeFileSync(CACHE_FILE, JSON.stringify(cacheData, null, 2));
238
+ } catch (error) {
239
+ console.warn("Warning: Could not save cache:", error.message);
240
+ }
241
+ }
242
+ async function fetchFromAPI() {
243
+ try {
244
+ const response = await fetch("https://models.dev/api.json");
245
+ if (!response.ok)
246
+ throw new Error(`HTTP error! status: ${response.status}`);
247
+ return await response.json();
248
+ } catch (error) {
249
+ console.warn("Warning: Could not fetch from models.dev API:", error.message);
250
+ return null;
251
+ }
252
+ }
253
+ function getCustomProvidersJson() {
254
+ let data = custom_verified_providers_default;
255
+ const customProvidersPath = path.join(process.cwd(), "custom-verified-providers.json");
256
+ if (fs.existsSync(customProvidersPath)) {
257
+ try {
258
+ data = JSON.parse(fs.readFileSync(customProvidersPath, "utf8"));
259
+ } catch (fileError) {
260
+ console.warn("Warning: Could not load custom providers from file, using embedded data:", fileError.message);
261
+ }
262
+ }
263
+ return data;
264
+ }
265
+ function loadCustomVerifiedProviders() {
266
+ try {
267
+ const customProviders = getCustomProvidersJson();
268
+ const providers = [];
269
+ if (customProviders["custom-verified-providers"]) {
270
+ for (const [, providerData] of Object.entries(customProviders["custom-verified-providers"])) {
271
+ const entry = providerData;
272
+ if (entry.id === "extra-models-dev")
273
+ continue;
274
+ if (entry.id && entry.name && entry.models) {
275
+ providers.push({
276
+ id: entry.id,
277
+ name: entry.name,
278
+ baseUrl: entry.baseUrl || "",
279
+ type: entry.type || "openai-compatible",
280
+ models: Object.values(entry.models).map((m) => ({ id: m.id, name: m.name }))
281
+ });
282
+ }
283
+ }
284
+ }
285
+ return providers;
286
+ } catch (error) {
287
+ console.warn("Warning: Could not load custom verified providers:", error.message);
288
+ return [];
289
+ }
290
+ }
291
+ function loadExtraModels() {
292
+ try {
293
+ const customProviders = getCustomProvidersJson();
294
+ const extraModels = {};
295
+ const section = customProviders["custom-verified-providers"]?.["extra-models-dev"];
296
+ if (section) {
297
+ for (const [providerId, models] of Object.entries(section)) {
298
+ extraModels[providerId] = Object.values(models).map((m) => ({ id: m.id, name: m.name }));
299
+ }
300
+ }
301
+ return extraModels;
302
+ } catch (error) {
303
+ console.warn("Warning: Could not load extra models:", error.message);
304
+ return {};
305
+ }
306
+ }
307
+ function transformModelsDevData(apiData) {
308
+ const providers = [];
309
+ const customProviders = loadCustomVerifiedProviders();
310
+ providers.push(...customProviders);
311
+ const extraModels = loadExtraModels();
312
+ if (apiData) {
313
+ for (const [, providerData] of Object.entries(apiData)) {
314
+ if (providerData.id && providerData.name && providerData.models) {
315
+ const models = Object.values(providerData.models).map((m) => ({ id: m.id, name: m.name }));
316
+ if (extraModels[providerData.id]) {
317
+ models.push(...extraModels[providerData.id]);
318
+ }
319
+ providers.push({
320
+ id: providerData.id,
321
+ name: providerData.name,
322
+ baseUrl: providerData.api || providerData.baseUrl || "",
323
+ type: providerData.npm ? providerData.npm.includes("anthropic") ? "anthropic" : "openai-compatible" : "openai-compatible",
324
+ models
325
+ });
326
+ }
327
+ }
328
+ }
329
+ return providers.length === 0 ? FALLBACK_PROVIDERS : providers;
330
+ }
331
+ async function getAllProviders() {
332
+ const cachedData = loadCache();
333
+ if (cachedData?.providers) {
334
+ const customVerifiedProviders = loadCustomVerifiedProviders();
335
+ const existingIds = new Set(cachedData.providers.map((p) => p.id));
336
+ const missing = customVerifiedProviders.filter((p) => !existingIds.has(p.id));
337
+ if (missing.length > 0)
338
+ cachedData.providers.push(...missing);
339
+ return cachedData.providers;
340
+ }
341
+ const apiData = await fetchFromAPI();
342
+ if (apiData) {
343
+ const transformed = transformModelsDevData(apiData);
344
+ saveCache({ providers: transformed });
345
+ return transformed;
346
+ }
347
+ const fallback = [...FALLBACK_PROVIDERS];
348
+ const extraModels = loadExtraModels();
349
+ const customVerified = loadCustomVerifiedProviders();
350
+ fallback.forEach((p) => {
351
+ if (extraModels[p.id])
352
+ p.models.push(...extraModels[p.id]);
353
+ });
354
+ fallback.push(...customVerified);
355
+ return fallback;
356
+ }
357
+
358
+ // ../core/src/opencode-integration.ts
359
+ init_ai_config();
360
+ import { parse as parseJsonc } from "jsonc-parser";
361
+ var getXDGPaths = () => ({
362
+ data: path3.join(process.env.XDG_DATA_HOME || path3.join(homedir3(), ".local", "share"), "opencode"),
363
+ config: path3.join(process.env.XDG_CONFIG_HOME || path3.join(homedir3(), ".config"), "opencode")
364
+ });
365
+ var getFilePaths = () => {
366
+ const paths = getXDGPaths();
367
+ return {
368
+ authJson: path3.join(paths.data, "auth.json"),
369
+ opencodeJson: path3.join(paths.config, "opencode.json")
370
+ };
371
+ };
372
+ var readAuthJson = async () => {
373
+ const { authJson } = getFilePaths();
374
+ try {
375
+ if (!fs3.existsSync(authJson))
376
+ return {};
377
+ const data = fs3.readFileSync(authJson, "utf8");
378
+ const errors = [];
379
+ const parsed = parseJsonc(data, errors, { allowTrailingComma: true });
380
+ if (errors.length > 0) {
381
+ console.warn("Warning: JSONC parsing errors in auth.json:", errors.map((e) => e.error).join(", "));
382
+ return parsed || {};
383
+ }
384
+ return parsed;
385
+ } catch (error) {
386
+ console.warn("Warning: Could not read auth.json:", error.message);
387
+ return {};
388
+ }
389
+ };
390
+ var readOpencodeGlobalConfig = () => {
391
+ const configDir = path3.join(process.env.XDG_CONFIG_HOME || path3.join(homedir3(), ".config"), "opencode");
392
+ const candidates = ["config.json", "opencode.json", "opencode.jsonc"];
393
+ let merged = {};
394
+ for (const filename of candidates) {
395
+ const filePath = path3.join(configDir, filename);
396
+ try {
397
+ if (!fs3.existsSync(filePath))
398
+ continue;
399
+ const text = fs3.readFileSync(filePath, "utf8");
400
+ const errors = [];
401
+ const parsed = parseJsonc(text, errors, { allowTrailingComma: true });
402
+ if (parsed && typeof parsed === "object") {
403
+ merged = {
404
+ ...merged,
405
+ ...parsed,
406
+ provider: { ...merged.provider ?? {}, ...parsed.provider ?? {} }
407
+ };
408
+ }
409
+ } catch (error) {
410
+ console.warn(`Warning: Could not read opencode config file ${filename}:`, error.message);
411
+ }
412
+ }
413
+ return merged;
414
+ };
415
+ var getAuthenticatedProviders = async () => {
416
+ try {
417
+ const [allModelsDevProviders, authData, globalConfig] = await Promise.all([
418
+ getAllProviders(),
419
+ readAuthJson(),
420
+ Promise.resolve(readOpencodeGlobalConfig())
421
+ ]);
422
+ const database = new Map;
423
+ for (const mdProvider of allModelsDevProviders) {
424
+ const modelMap = new Map;
425
+ for (const m of mdProvider.models) {
426
+ modelMap.set(`${mdProvider.id}_${m.id}`, { id: `${mdProvider.id}_${m.id}`, name: m.name });
427
+ }
428
+ database.set(mdProvider.id, {
429
+ id: mdProvider.id,
430
+ name: mdProvider.name,
431
+ type: mdProvider.type,
432
+ baseUrl: mdProvider.baseUrl,
433
+ models: modelMap
434
+ });
435
+ }
436
+ for (const [providerID, entry] of Object.entries(globalConfig.provider ?? {})) {
437
+ const existing = database.get(providerID);
438
+ const modelMap = existing ? new Map(existing.models) : new Map;
439
+ for (const [modelKey, m] of Object.entries(entry.models ?? {})) {
440
+ const resolvedId = `${providerID}_${m.id ?? modelKey}`;
441
+ modelMap.set(resolvedId, { id: resolvedId, name: m.name ?? m.id ?? modelKey });
442
+ }
443
+ database.set(providerID, {
444
+ id: providerID,
445
+ name: entry.name ?? existing?.name ?? providerID,
446
+ type: existing?.type ?? "openai-compatible",
447
+ baseUrl: entry.options?.baseURL ?? entry.api ?? existing?.baseUrl ?? "",
448
+ models: modelMap,
449
+ npm: entry.npm
450
+ });
451
+ }
452
+ const providerMap = new Map;
453
+ for (const [providerID, authInfo] of Object.entries(authData)) {
454
+ if (authInfo.type !== "api" || !authInfo.key)
455
+ continue;
456
+ const dbEntry = database.get(providerID);
457
+ if (!dbEntry)
458
+ continue;
459
+ const configEntry = globalConfig.provider?.[providerID];
460
+ const npm = dbEntry.npm ?? configEntry?.npm;
461
+ const type = npm?.includes("anthropic") ? "anthropic" : dbEntry.type;
462
+ providerMap.set(providerID, {
463
+ id: providerID,
464
+ name: dbEntry.name,
465
+ type,
466
+ baseUrl: dbEntry.baseUrl,
467
+ apiKey: authInfo.key,
468
+ models: Array.from(dbEntry.models.values())
469
+ });
470
+ }
471
+ for (const [providerID, entry] of Object.entries(globalConfig.provider ?? {})) {
472
+ if (!entry.options?.apiKey)
473
+ continue;
474
+ const dbEntry = database.get(providerID);
475
+ if (!dbEntry)
476
+ continue;
477
+ const npm = dbEntry.npm ?? entry.npm;
478
+ const type = npm?.includes("anthropic") ? "anthropic" : dbEntry.type;
479
+ providerMap.set(providerID, {
480
+ id: providerID,
481
+ name: dbEntry.name,
482
+ type,
483
+ baseUrl: dbEntry.baseUrl,
484
+ apiKey: entry.options.apiKey,
485
+ models: Array.from(dbEntry.models.values())
486
+ });
487
+ }
488
+ return Array.from(providerMap.values());
489
+ } catch (error) {
490
+ console.warn("Warning: Could not load providers:", error.message);
491
+ return [];
492
+ }
493
+ };
494
+ var getAllAvailableProviders = async (includeAllProviders = false) => {
495
+ const [opencodeProviders, customProvidersFromConfig, customVerifiedProviders] = await Promise.all([
496
+ getAuthenticatedProviders(),
497
+ (async () => {
498
+ try {
499
+ return await getCustomProvidersFromConfig();
500
+ } catch (error) {
501
+ console.warn("Warning: Could not load custom providers:", error.message);
502
+ return [];
503
+ }
504
+ })(),
505
+ (async () => {
506
+ try {
507
+ return loadCustomVerifiedProviders();
508
+ } catch (error) {
509
+ console.warn("Warning: Could not load custom verified providers:", error.message);
510
+ return [];
511
+ }
512
+ })()
513
+ ]);
514
+ const providerMap = new Map;
515
+ customVerifiedProviders.forEach((p) => providerMap.set(p.id, p));
516
+ opencodeProviders.forEach((p) => providerMap.set(p.id, p));
517
+ customProvidersFromConfig.forEach((p) => providerMap.set(p.id, p));
518
+ if (includeAllProviders) {
519
+ try {
520
+ const allModelsDevProviders = await getAllProviders();
521
+ const authenticatedIds = new Set(opencodeProviders.map((p) => p.id));
522
+ const customIds = new Set(customProvidersFromConfig.map((p) => p.id));
523
+ const customVerifiedIds = new Set(customVerifiedProviders.map((p) => p.id));
524
+ allModelsDevProviders.forEach((provider) => {
525
+ if (!authenticatedIds.has(provider.id) && !customIds.has(provider.id) && !customVerifiedIds.has(provider.id)) {
526
+ providerMap.set(provider.id, {
527
+ ...provider,
528
+ type: provider.type,
529
+ apiKey: "",
530
+ models: provider.models.map((m) => ({ ...m, id: `${provider.id}_${m.id}` }))
531
+ });
532
+ }
533
+ });
534
+ } catch (error) {
535
+ console.warn("Warning: Could not load all models.dev providers:", error.message);
536
+ }
537
+ }
538
+ return Array.from(providerMap.values());
539
+ };
540
+
541
+ // ../core/src/constants.ts
542
+ var TEST_PROMPT = `make a 300 word story`;
543
+
544
+ // ../core/src/benchmark.ts
545
+ async function benchmarkSingleModelRest(model) {
546
+ try {
547
+ if (!model.providerConfig || !model.providerConfig.apiKey) {
548
+ throw new Error(`Missing API key for provider ${model.providerName}`);
549
+ }
550
+ if (!model.providerConfig.baseUrl) {
551
+ throw new Error(`Missing base URL for provider ${model.providerName}`);
552
+ }
553
+ let actualModelId;
554
+ if (model.id && model.id.includes("_")) {
555
+ actualModelId = model.id.split("_")[1];
556
+ } else if (model.id) {
557
+ actualModelId = model.id;
558
+ } else {
559
+ actualModelId = model.name;
560
+ }
561
+ actualModelId = actualModelId.trim();
562
+ const startTime = Date.now();
563
+ let firstTokenTime = null;
564
+ let streamedText = "";
565
+ let inputTokens = 0;
566
+ let outputTokens = 0;
567
+ let endpoint;
568
+ if (model.providerConfig.endpointFormat) {
569
+ endpoint = "/" + model.providerConfig.endpointFormat;
570
+ } else if (model.providerType === "anthropic") {
571
+ endpoint = "/messages";
572
+ } else if (model.providerType === "google") {
573
+ endpoint = "/models/" + actualModelId + ":streamGenerateContent";
574
+ } else {
575
+ endpoint = "/chat/completions";
576
+ }
577
+ const baseUrl = model.providerConfig.baseUrl.replace(/\/$/, "");
578
+ const url = `${baseUrl}${endpoint}`;
579
+ const headers = {
580
+ "Content-Type": "application/json",
581
+ Authorization: `Bearer ${model.providerConfig.apiKey}`
582
+ };
583
+ if (model.providerType === "anthropic") {
584
+ headers["x-api-key"] = model.providerConfig.apiKey;
585
+ headers["anthropic-version"] = "2023-06-01";
586
+ } else if (model.providerType === "google") {
587
+ delete headers["Authorization"];
588
+ headers["x-goog-api-key"] = model.providerConfig.apiKey;
589
+ }
590
+ const body = {
591
+ model: actualModelId,
592
+ messages: [{ role: "user", content: TEST_PROMPT }],
593
+ max_tokens: 500,
594
+ temperature: 0.7,
595
+ stream: true
596
+ };
597
+ if (model.providerType === "google") {
598
+ body["contents"] = [{ parts: [{ text: TEST_PROMPT }] }];
599
+ body["generationConfig"] = { maxOutputTokens: 500, temperature: 0.7 };
600
+ delete body["messages"];
601
+ delete body["max_tokens"];
602
+ delete body["stream"];
603
+ }
604
+ const response = await fetch(url, {
605
+ method: "POST",
606
+ headers,
607
+ body: JSON.stringify(body)
608
+ });
609
+ if (!response.ok) {
610
+ await response.text();
611
+ throw new Error(`API request failed: ${response.status} ${response.statusText}`);
612
+ }
613
+ const reader = response.body.getReader();
614
+ const decoder = new TextDecoder;
615
+ let buffer = "";
616
+ let isFirstChunk = true;
617
+ while (true) {
618
+ const { done, value } = await reader.read();
619
+ if (done)
620
+ break;
621
+ if (isFirstChunk && !firstTokenTime) {
622
+ firstTokenTime = Date.now();
623
+ isFirstChunk = false;
624
+ }
625
+ buffer += decoder.decode(value, { stream: true });
626
+ const lines = buffer.split(`
627
+ `);
628
+ buffer = lines.pop() || "";
629
+ for (const line of lines) {
630
+ const trimmedLine = line.trim();
631
+ if (!trimmedLine)
632
+ continue;
633
+ try {
634
+ if (model.providerType === "anthropic") {
635
+ if (trimmedLine.startsWith("data: ")) {
636
+ const jsonStr = trimmedLine.slice(6);
637
+ if (jsonStr === "[DONE]")
638
+ break;
639
+ const chunk = JSON.parse(jsonStr);
640
+ const chunkTyped = chunk;
641
+ if (chunkTyped.type === "content_block_delta" && chunkTyped.delta?.text) {
642
+ streamedText += chunkTyped.delta.text;
643
+ } else if (chunkTyped.type === "message_start" && chunkTyped.message?.usage) {
644
+ inputTokens = chunkTyped.message.usage.input_tokens || 0;
645
+ } else if (chunkTyped.type === "message_delta") {
646
+ if (chunkTyped.usage?.output_tokens)
647
+ outputTokens = chunkTyped.usage.output_tokens;
648
+ if (chunkTyped.usage?.input_tokens && !inputTokens)
649
+ inputTokens = chunkTyped.usage.input_tokens;
650
+ }
651
+ } else if (trimmedLine.startsWith("event: ")) {
652
+ continue;
653
+ } else {
654
+ const chunk = JSON.parse(trimmedLine);
655
+ if (chunk.type === "content_block_delta" && chunk.delta?.text) {
656
+ streamedText += chunk.delta.text;
657
+ } else if (chunk.type === "message_start" && chunk.message?.usage) {
658
+ inputTokens = chunk.message.usage.input_tokens || 0;
659
+ } else if (chunk.type === "message_delta") {
660
+ if (chunk.usage?.output_tokens)
661
+ outputTokens = chunk.usage.output_tokens;
662
+ if (chunk.usage?.input_tokens && !inputTokens)
663
+ inputTokens = chunk.usage.input_tokens;
664
+ }
665
+ }
666
+ } else if (model.providerType === "google") {
667
+ const chunk = JSON.parse(trimmedLine);
668
+ if (chunk.candidates?.[0]?.content?.parts?.[0]?.text) {
669
+ streamedText += chunk.candidates[0].content.parts[0].text;
670
+ }
671
+ if (chunk.usageMetadata?.promptTokenCount)
672
+ inputTokens = chunk.usageMetadata.promptTokenCount;
673
+ if (chunk.usageMetadata?.candidatesTokenCount)
674
+ outputTokens = chunk.usageMetadata.candidatesTokenCount;
675
+ } else {
676
+ if (trimmedLine.startsWith("data: ")) {
677
+ const jsonStr = trimmedLine.slice(6);
678
+ if (jsonStr === "[DONE]")
679
+ break;
680
+ const chunk = JSON.parse(jsonStr);
681
+ if (chunk.choices?.[0]?.delta?.content)
682
+ streamedText += chunk.choices[0].delta.content;
683
+ else if (chunk.choices?.[0]?.delta?.reasoning)
684
+ streamedText += chunk.choices[0].delta.reasoning;
685
+ if (chunk.usage?.prompt_tokens)
686
+ inputTokens = chunk.usage.prompt_tokens;
687
+ if (chunk.usage?.completion_tokens)
688
+ outputTokens = chunk.usage.completion_tokens;
689
+ }
690
+ }
691
+ } catch {
692
+ continue;
693
+ }
694
+ }
695
+ }
696
+ const endTime = Date.now();
697
+ const totalTime = endTime - startTime;
698
+ const timeToFirstToken = firstTokenTime ? firstTokenTime - startTime : totalTime;
699
+ const usedEstimateForOutput = !outputTokens;
700
+ const usedEstimateForInput = !inputTokens;
701
+ const finalOutputTokens = outputTokens || Math.round(streamedText.length / 4);
702
+ const finalInputTokens = inputTokens || Math.round(TEST_PROMPT.length / 4);
703
+ const totalTokens = finalInputTokens + finalOutputTokens;
704
+ const tokensPerSecond = totalTime > 0 ? finalOutputTokens / totalTime * 1000 : 0;
705
+ return {
706
+ model: model.name,
707
+ provider: model.providerName,
708
+ totalTime,
709
+ timeToFirstToken,
710
+ tokenCount: finalOutputTokens,
711
+ tokensPerSecond,
712
+ promptTokens: finalInputTokens,
713
+ totalTokens,
714
+ usedEstimateForOutput,
715
+ usedEstimateForInput,
716
+ success: true
717
+ };
718
+ } catch (error) {
719
+ return {
720
+ model: model.name,
721
+ provider: model.providerName,
722
+ totalTime: 0,
723
+ timeToFirstToken: 0,
724
+ tokenCount: 0,
725
+ tokensPerSecond: 0,
726
+ promptTokens: 0,
727
+ totalTokens: 0,
728
+ usedEstimateForOutput: true,
729
+ usedEstimateForInput: true,
730
+ success: false,
731
+ error: error.message
732
+ };
733
+ }
734
+ }
735
+
736
+ // ../core/src/headless.ts
737
+ function parseProviderModel(arg) {
738
+ const firstColonIndex = arg.indexOf(":");
739
+ if (firstColonIndex === -1) {
740
+ throw new Error(`Invalid format. Use provider:model (e.g., openai:gpt-4)`);
741
+ }
742
+ return {
743
+ provider: arg.substring(0, firstColonIndex),
744
+ model: arg.substring(firstColonIndex + 1)
745
+ };
746
+ }
747
+ function createCustomProviderFromCli(cliArgs) {
748
+ const { provider, model } = parseProviderModel(cliArgs.benchCustom);
749
+ if (!cliArgs.baseUrl)
750
+ throw new Error("--base-url is required for custom provider benchmarking");
751
+ if (!cliArgs.apiKey)
752
+ throw new Error("--api-key is required for custom provider benchmarking");
753
+ const endpointFormat = cliArgs.endpointFormat || "chat/completions";
754
+ return {
755
+ id: provider,
756
+ name: provider,
757
+ type: "openai-compatible",
758
+ baseUrl: cliArgs.baseUrl,
759
+ apiKey: cliArgs.apiKey,
760
+ endpointFormat,
761
+ models: [{ name: model, id: model }]
762
+ };
763
+ }
764
+ async function loadConfig(includeAll) {
765
+ try {
766
+ const providers = await getAllAvailableProviders(includeAll);
767
+ return { providers };
768
+ } catch (error) {
769
+ console.error("Error: Failed to load providers:", error.message);
770
+ return { providers: [] };
771
+ }
772
+ }
773
+ function buildJsonOutput(providerName, providerId, modelName, modelId, result, formatted) {
774
+ const jsonOutput = {
775
+ provider: providerName,
776
+ providerId,
777
+ model: modelName,
778
+ modelId,
779
+ method: "rest-api",
780
+ success: result.success,
781
+ totalTime: result.totalTime,
782
+ totalTimeSeconds: result.totalTime / 1000,
783
+ timeToFirstToken: result.timeToFirstToken,
784
+ timeToFirstTokenSeconds: result.timeToFirstToken / 1000,
785
+ tokensPerSecond: result.tokensPerSecond,
786
+ outputTokens: result.tokenCount,
787
+ promptTokens: result.promptTokens,
788
+ totalTokens: result.totalTokens,
789
+ is_estimated: !!(result.usedEstimateForOutput || result.usedEstimateForInput),
790
+ error: result.error || null
791
+ };
792
+ return JSON.stringify(jsonOutput, null, formatted ? 2 : 0);
793
+ }
794
+ async function runHeadlessBenchmark(cliArgs) {
795
+ try {
796
+ if (cliArgs.benchCustom) {
797
+ const customProvider = createCustomProviderFromCli(cliArgs);
798
+ const modelDef = customProvider.models[0];
799
+ const modelConfig2 = {
800
+ id: modelDef.id,
801
+ name: modelDef.name,
802
+ providerName: customProvider.name,
803
+ providerType: customProvider.type,
804
+ providerId: customProvider.id,
805
+ providerConfig: {
806
+ baseUrl: customProvider.baseUrl,
807
+ apiKey: customProvider.apiKey,
808
+ endpointFormat: customProvider.endpointFormat
809
+ }
810
+ };
811
+ const result2 = await benchmarkSingleModelRest(modelConfig2);
812
+ if (!result2.success && result2.error)
813
+ console.error(`Error: Benchmark failed: ${result2.error}`);
814
+ console.log(buildJsonOutput(customProvider.name, customProvider.id, modelDef.name, modelDef.id, result2, cliArgs.formatted));
815
+ process.exit(result2.success ? 0 : 1);
816
+ }
817
+ const benchSpec = cliArgs.bench;
818
+ const colonIndex = benchSpec.indexOf(":");
819
+ if (colonIndex === -1) {
820
+ console.error("Error: Invalid --bench format. Use: provider:model");
821
+ process.exit(1);
822
+ }
823
+ const providerSpec = benchSpec.substring(0, colonIndex);
824
+ let modelName = benchSpec.substring(colonIndex + 1);
825
+ if (modelName.startsWith('"') && modelName.endsWith('"') || modelName.startsWith("'") && modelName.endsWith("'")) {
826
+ modelName = modelName.slice(1, -1);
827
+ }
828
+ if (!providerSpec || !modelName) {
829
+ console.error("Error: Invalid --bench format. Use: provider:model");
830
+ process.exit(1);
831
+ }
832
+ const config = await loadConfig(true);
833
+ const provider = config.providers.find((p) => p.id?.toLowerCase() === providerSpec.toLowerCase() || p.name?.toLowerCase() === providerSpec.toLowerCase());
834
+ if (!provider) {
835
+ console.error(`Error: Provider '${providerSpec}' not found`);
836
+ console.error("Available providers:");
837
+ config.providers.forEach((p) => console.error(` - ${p.id || p.name}`));
838
+ process.exit(1);
839
+ }
840
+ const model = provider.models.find((m) => {
841
+ const modelIdLower = m.id?.toLowerCase() || "";
842
+ const modelNameLower = m.name?.toLowerCase() || "";
843
+ const searchLower = modelName.toLowerCase();
844
+ if (modelIdLower === searchLower)
845
+ return true;
846
+ const idWithoutPrefix = modelIdLower.includes("_") ? modelIdLower.split("_").slice(1).join("_") : modelIdLower;
847
+ if (idWithoutPrefix === searchLower)
848
+ return true;
849
+ if (modelNameLower === searchLower)
850
+ return true;
851
+ return false;
852
+ });
853
+ if (!model) {
854
+ console.error(`Error: Model '${modelName}' not found in provider '${provider.name}'`);
855
+ console.error("Available models:");
856
+ provider.models.forEach((m) => {
857
+ const idWithoutPrefix = m.id?.includes("_") ? m.id.split("_").slice(1).join("_") : m.id;
858
+ console.error(` - ${m.name} (id: ${idWithoutPrefix})`);
859
+ });
860
+ process.exit(1);
861
+ }
862
+ const finalApiKey = cliArgs.apiKey || provider.apiKey;
863
+ if (!finalApiKey) {
864
+ console.error(`Error: No API key found for provider '${provider.name}'`);
865
+ console.error("Please provide --api-key flag or configure the provider first");
866
+ process.exit(1);
867
+ }
868
+ const modelConfig = {
869
+ id: model.id,
870
+ name: model.name,
871
+ providerName: provider.name,
872
+ providerType: provider.type,
873
+ providerId: provider.id,
874
+ providerConfig: {
875
+ ...provider,
876
+ apiKey: finalApiKey,
877
+ baseUrl: provider.baseUrl || ""
878
+ }
879
+ };
880
+ const result = await benchmarkSingleModelRest(modelConfig);
881
+ if (!result.success && result.error)
882
+ console.error(`Error: Benchmark failed: ${result.error}`);
883
+ console.log(buildJsonOutput(provider.name, provider.id, model.name, model.id, result, cliArgs.formatted));
884
+ process.exit(result.success ? 0 : 1);
885
+ } catch (error) {
886
+ console.error("Error: " + error.message);
887
+ process.exit(1);
888
+ }
889
+ }
890
+
891
+ // src/headless-entry.ts
892
+ var args = process.argv.slice(2);
893
+ var parsed = {
894
+ debug: false,
895
+ bench: null,
896
+ benchCustom: null,
897
+ apiKey: null,
898
+ baseUrl: null,
899
+ endpointFormat: null,
900
+ formatted: false,
901
+ help: false
902
+ };
903
+ for (let i = 0;i < args.length; i++) {
904
+ const arg = args[i];
905
+ if (arg === "--debug")
906
+ parsed.debug = true;
907
+ else if (arg === "--bench")
908
+ parsed.bench = args[++i] ?? null;
909
+ else if (arg === "--bench-custom")
910
+ parsed.benchCustom = args[++i] ?? null;
911
+ else if (arg === "--api-key")
912
+ parsed.apiKey = args[++i] ?? null;
913
+ else if (arg === "--base-url")
914
+ parsed.baseUrl = args[++i] ?? null;
915
+ else if (arg === "--endpoint-format")
916
+ parsed.endpointFormat = args[++i] ?? null;
917
+ else if (arg === "--formatted")
918
+ parsed.formatted = true;
919
+ else if (arg === "--help" || arg === "-h") {
920
+ console.log("ai-speedometer headless - Benchmark AI models (Node.js/Bun compatible)");
921
+ console.log("");
922
+ console.log("Usage:");
923
+ console.log(" ai-speedometer-headless --bench <provider:model>");
924
+ console.log(" ai-speedometer-headless --bench-custom <provider:model> --base-url <url> --api-key <key>");
925
+ console.log("");
926
+ console.log("Options:");
927
+ console.log(" --bench <provider:model> Run benchmark in headless mode");
928
+ console.log(" --bench-custom <provider:model> Run custom provider benchmark");
929
+ console.log(" --base-url <url> Base URL for custom provider");
930
+ console.log(" --api-key <key> API key");
931
+ console.log(" --endpoint-format <format> Endpoint format (default: chat/completions)");
932
+ console.log(" --formatted Pretty-print JSON output");
933
+ console.log(" --help, -h Show this help message");
934
+ process.exit(0);
935
+ }
936
+ }
937
+ if (!parsed.bench && !parsed.benchCustom) {
938
+ console.error("Error: --bench or --bench-custom is required");
939
+ console.error("Run with --help for usage");
940
+ process.exit(1);
941
+ }
942
+ await runHeadlessBenchmark(parsed);
package/package.json ADDED
@@ -0,0 +1,47 @@
1
+ {
2
+ "name": "ai-speedometer-headless",
3
+ "version": "2.1.4",
4
+ "description": "Headless CLI for benchmarking AI models — runs on Node.js and Bun, no TUI dependencies",
5
+ "bin": {
6
+ "ai-speedometer-headless": "dist/ai-speedometer-headless"
7
+ },
8
+ "engines": {
9
+ "node": ">=18.0.0",
10
+ "bun": ">=1.0.0"
11
+ },
12
+ "scripts": {
13
+ "build": "bun build src/headless-entry.ts --outdir dist --target node --external 'jsonc-parser' && printf '#!/usr/bin/env node\\n' | cat - dist/headless-entry.js > dist/ai-speedometer-headless && chmod +x dist/ai-speedometer-headless && rm dist/headless-entry.js",
14
+ "typecheck": "bun tsc --noEmit",
15
+ "prepublishOnly": "bun run build"
16
+ },
17
+ "keywords": [
18
+ "ai",
19
+ "benchmark",
20
+ "cli",
21
+ "headless",
22
+ "speedometer",
23
+ "llm",
24
+ "ci",
25
+ "docker"
26
+ ],
27
+ "author": "",
28
+ "license": "MIT",
29
+ "repository": {
30
+ "type": "git",
31
+ "url": "git+https://github.com/aptdnfapt/Ai-speedometer.git"
32
+ },
33
+ "bugs": {
34
+ "url": "https://github.com/aptdnfapt/Ai-speedometer/issues"
35
+ },
36
+ "homepage": "https://github.com/aptdnfapt/Ai-speedometer#readme",
37
+ "files": [
38
+ "dist/",
39
+ "../../README.md"
40
+ ],
41
+ "dependencies": {
42
+ "jsonc-parser": "^3.3.1"
43
+ },
44
+ "devDependencies": {
45
+ "@ai-speedometer/core": "workspace:*"
46
+ }
47
+ }