noosphere 0.1.3 → 0.2.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +1717 -96
- package/dist/index.cjs +314 -43
- package/dist/index.cjs.map +1 -1
- package/dist/index.js +314 -43
- package/dist/index.js.map +1 -1
- package/package.json +1 -1
package/dist/index.cjs
CHANGED
|
@@ -280,6 +280,105 @@ var UsageTracker = class {
|
|
|
280
280
|
var import_pi_ai = require("@mariozechner/pi-ai");
|
|
281
281
|
var KNOWN_PROVIDERS = ["anthropic", "google", "openai", "xai", "groq", "cerebras", "openrouter", "zai"];
|
|
282
282
|
var LOCAL_PROVIDERS = /* @__PURE__ */ new Set(["ollama"]);
|
|
283
|
+
var FETCH_TIMEOUT_MS = 8e3;
|
|
284
|
+
var OPENAI_CHAT_PREFIXES = ["gpt-", "o1", "o3", "o4", "chatgpt-", "codex-"];
|
|
285
|
+
var OPENAI_REASONING_PREFIXES = ["o1", "o3", "o4"];
|
|
286
|
+
var GOOGLE_GENERATIVE_PREFIXES = ["gemini-", "gemma-"];
|
|
287
|
+
var ANTHROPIC_CHAT_PREFIXES = ["claude-"];
|
|
288
|
+
var PROVIDER_APIS = {
|
|
289
|
+
openai: () => ({
|
|
290
|
+
url: "https://api.openai.com/v1/models",
|
|
291
|
+
headers: (key) => ({ Authorization: `Bearer ${key}` }),
|
|
292
|
+
piApiType: "openai-responses",
|
|
293
|
+
piBaseUrl: "https://api.openai.com/v1",
|
|
294
|
+
providerName: "openai",
|
|
295
|
+
filterChat: (id) => OPENAI_CHAT_PREFIXES.some((p) => id.startsWith(p)),
|
|
296
|
+
isReasoning: (id) => OPENAI_REASONING_PREFIXES.some((p) => id.startsWith(p)),
|
|
297
|
+
extractEntries: (data) => (data?.data ?? []).map((e) => ({ id: e.id, name: e.id }))
|
|
298
|
+
}),
|
|
299
|
+
anthropic: () => ({
|
|
300
|
+
url: "https://api.anthropic.com/v1/models?limit=100",
|
|
301
|
+
headers: (key) => ({ "x-api-key": key, "anthropic-version": "2023-06-01" }),
|
|
302
|
+
piApiType: "anthropic-messages",
|
|
303
|
+
piBaseUrl: "https://api.anthropic.com/v1",
|
|
304
|
+
providerName: "anthropic",
|
|
305
|
+
filterChat: (id) => ANTHROPIC_CHAT_PREFIXES.some((p) => id.startsWith(p)),
|
|
306
|
+
isReasoning: (id) => id.includes("opus") || id.includes("sonnet"),
|
|
307
|
+
extractEntries: (data) => (data?.data ?? []).map((e) => ({ id: e.id, name: e.display_name ?? e.id }))
|
|
308
|
+
}),
|
|
309
|
+
google: (key) => ({
|
|
310
|
+
url: `https://generativelanguage.googleapis.com/v1beta/models?key=${key}`,
|
|
311
|
+
headers: () => ({}),
|
|
312
|
+
piApiType: "google-generative-ai",
|
|
313
|
+
piBaseUrl: "https://generativelanguage.googleapis.com/v1beta",
|
|
314
|
+
providerName: "google",
|
|
315
|
+
filterChat: (id) => GOOGLE_GENERATIVE_PREFIXES.some((p) => id.startsWith(p)),
|
|
316
|
+
isReasoning: (id) => id.includes("thinking") || id.includes("2.5"),
|
|
317
|
+
extractEntries: (data) => (data?.models ?? []).filter((e) => !e.supportedGenerationMethods || e.supportedGenerationMethods.includes("generateContent")).map((e) => ({
|
|
318
|
+
id: e.name.replace(/^models\//, ""),
|
|
319
|
+
name: e.displayName ?? e.name.replace(/^models\//, ""),
|
|
320
|
+
contextWindow: e.inputTokenLimit,
|
|
321
|
+
maxTokens: e.outputTokenLimit
|
|
322
|
+
}))
|
|
323
|
+
}),
|
|
324
|
+
groq: () => ({
|
|
325
|
+
url: "https://api.groq.com/openai/v1/models",
|
|
326
|
+
headers: (key) => ({ Authorization: `Bearer ${key}` }),
|
|
327
|
+
piApiType: "openai-completions",
|
|
328
|
+
piBaseUrl: "https://api.groq.com/openai/v1",
|
|
329
|
+
providerName: "groq",
|
|
330
|
+
filterChat: () => true,
|
|
331
|
+
// Groq only serves chat models
|
|
332
|
+
isReasoning: (id) => id.includes("deepseek-r1"),
|
|
333
|
+
extractEntries: (data) => (data?.data ?? []).map((e) => ({ id: e.id, name: e.id }))
|
|
334
|
+
}),
|
|
335
|
+
mistral: () => ({
|
|
336
|
+
url: "https://api.mistral.ai/v1/models",
|
|
337
|
+
headers: (key) => ({ Authorization: `Bearer ${key}` }),
|
|
338
|
+
piApiType: "openai-completions",
|
|
339
|
+
piBaseUrl: "https://api.mistral.ai/v1",
|
|
340
|
+
providerName: "mistral",
|
|
341
|
+
filterChat: (id) => !id.includes("embed"),
|
|
342
|
+
isReasoning: (id) => id.includes("large") || id.includes("codestral"),
|
|
343
|
+
extractEntries: (data) => (data?.data ?? []).map((e) => ({ id: e.id, name: e.id }))
|
|
344
|
+
}),
|
|
345
|
+
xai: () => ({
|
|
346
|
+
url: "https://api.x.ai/v1/models",
|
|
347
|
+
headers: (key) => ({ Authorization: `Bearer ${key}` }),
|
|
348
|
+
piApiType: "openai-completions",
|
|
349
|
+
piBaseUrl: "https://api.x.ai/v1",
|
|
350
|
+
providerName: "xai",
|
|
351
|
+
filterChat: (id) => id.startsWith("grok"),
|
|
352
|
+
isReasoning: (id) => id.includes("think"),
|
|
353
|
+
extractEntries: (data) => (data?.data ?? []).map((e) => ({ id: e.id, name: e.id }))
|
|
354
|
+
}),
|
|
355
|
+
openrouter: () => ({
|
|
356
|
+
url: "https://openrouter.ai/api/v1/models",
|
|
357
|
+
headers: (key) => ({ Authorization: `Bearer ${key}` }),
|
|
358
|
+
piApiType: "openai-completions",
|
|
359
|
+
piBaseUrl: "https://openrouter.ai/api/v1",
|
|
360
|
+
providerName: "openrouter",
|
|
361
|
+
filterChat: () => true,
|
|
362
|
+
// OpenRouter only lists usable models
|
|
363
|
+
isReasoning: (id) => id.includes("o1") || id.includes("o3") || id.includes("thinking") || id.includes("deepseek-r1"),
|
|
364
|
+
extractEntries: (data) => (data?.data ?? []).map((e) => ({
|
|
365
|
+
id: e.id,
|
|
366
|
+
name: e.name ?? e.id,
|
|
367
|
+
contextWindow: e.context_length,
|
|
368
|
+
maxTokens: e.max_completion_tokens ?? e.top_provider?.max_completion_tokens
|
|
369
|
+
}))
|
|
370
|
+
}),
|
|
371
|
+
cerebras: () => ({
|
|
372
|
+
url: "https://api.cerebras.ai/v1/models",
|
|
373
|
+
headers: (key) => ({ Authorization: `Bearer ${key}` }),
|
|
374
|
+
piApiType: "openai-completions",
|
|
375
|
+
piBaseUrl: "https://api.cerebras.ai/v1",
|
|
376
|
+
providerName: "cerebras",
|
|
377
|
+
filterChat: () => true,
|
|
378
|
+
isReasoning: () => false,
|
|
379
|
+
extractEntries: (data) => (data?.data ?? []).map((e) => ({ id: e.id, name: e.id }))
|
|
380
|
+
})
|
|
381
|
+
};
|
|
283
382
|
function extractText(msg) {
|
|
284
383
|
return msg.content.filter((c) => c.type === "text").map((c) => c.text).join("");
|
|
285
384
|
}
|
|
@@ -293,6 +392,9 @@ var PiAiProvider = class {
|
|
|
293
392
|
modalities = ["llm"];
|
|
294
393
|
isLocal = false;
|
|
295
394
|
keys;
|
|
395
|
+
// Dynamically discovered models not in pi-ai's static catalog
|
|
396
|
+
dynamicModels = /* @__PURE__ */ new Map();
|
|
397
|
+
dynamicModelsFetched = false;
|
|
296
398
|
constructor(keys) {
|
|
297
399
|
this.keys = {};
|
|
298
400
|
for (const [k, v] of Object.entries(keys)) {
|
|
@@ -316,11 +418,14 @@ var PiAiProvider = class {
|
|
|
316
418
|
}
|
|
317
419
|
async listModels(modality) {
|
|
318
420
|
if (modality && modality !== "llm") return [];
|
|
421
|
+
await this.ensureDynamicModels();
|
|
319
422
|
const models = [];
|
|
423
|
+
const seenIds = /* @__PURE__ */ new Set();
|
|
320
424
|
for (const provider of KNOWN_PROVIDERS) {
|
|
321
425
|
try {
|
|
322
426
|
const providerModels = (0, import_pi_ai.getModels)(provider);
|
|
323
427
|
for (const m of providerModels) {
|
|
428
|
+
seenIds.add(m.id);
|
|
324
429
|
models.push({
|
|
325
430
|
id: m.id,
|
|
326
431
|
provider: "pi-ai",
|
|
@@ -342,10 +447,31 @@ var PiAiProvider = class {
|
|
|
342
447
|
} catch {
|
|
343
448
|
}
|
|
344
449
|
}
|
|
450
|
+
for (const [id, m] of this.dynamicModels) {
|
|
451
|
+
if (seenIds.has(id)) continue;
|
|
452
|
+
models.push({
|
|
453
|
+
id: m.id,
|
|
454
|
+
provider: "pi-ai",
|
|
455
|
+
name: m.name || m.id,
|
|
456
|
+
modality: "llm",
|
|
457
|
+
local: false,
|
|
458
|
+
cost: {
|
|
459
|
+
price: m.cost.input ?? 0,
|
|
460
|
+
unit: m.cost.input > 0 ? "per_1m_tokens" : "free"
|
|
461
|
+
},
|
|
462
|
+
capabilities: {
|
|
463
|
+
contextWindow: m.contextWindow,
|
|
464
|
+
maxTokens: m.maxTokens,
|
|
465
|
+
supportsVision: m.input.includes("image"),
|
|
466
|
+
supportsStreaming: true
|
|
467
|
+
}
|
|
468
|
+
});
|
|
469
|
+
}
|
|
345
470
|
return models;
|
|
346
471
|
}
|
|
347
472
|
async chat(options) {
|
|
348
473
|
const start = Date.now();
|
|
474
|
+
await this.ensureDynamicModels();
|
|
349
475
|
const { model, provider } = this.findModel(options.model);
|
|
350
476
|
if (!model || !provider) {
|
|
351
477
|
throw new Error(`Model not found: ${options.model ?? "default"}`);
|
|
@@ -378,20 +504,9 @@ var PiAiProvider = class {
|
|
|
378
504
|
}
|
|
379
505
|
stream(options) {
|
|
380
506
|
const start = Date.now();
|
|
381
|
-
const { model, provider } = this.findModel(options.model);
|
|
382
|
-
if (!model || !provider) {
|
|
383
|
-
throw new Error(`Model not found: ${options.model ?? "default"}`);
|
|
384
|
-
}
|
|
385
|
-
const context = {
|
|
386
|
-
systemPrompt: options.messages.find((m) => m.role === "system")?.content,
|
|
387
|
-
messages: options.messages.filter((m) => m.role !== "system").map((m) => ({
|
|
388
|
-
role: m.role,
|
|
389
|
-
content: m.content,
|
|
390
|
-
timestamp: Date.now()
|
|
391
|
-
}))
|
|
392
|
-
};
|
|
393
|
-
const piStream = (0, import_pi_ai.stream)(model, context);
|
|
394
507
|
const self = this;
|
|
508
|
+
let innerStream;
|
|
509
|
+
let providerModel;
|
|
395
510
|
let aborted = false;
|
|
396
511
|
let resolveResult = null;
|
|
397
512
|
let rejectResult = null;
|
|
@@ -399,10 +514,30 @@ var PiAiProvider = class {
|
|
|
399
514
|
resolveResult = resolve;
|
|
400
515
|
rejectResult = reject;
|
|
401
516
|
});
|
|
517
|
+
const ensureModel = async () => {
|
|
518
|
+
if (!providerModel) {
|
|
519
|
+
await self.ensureDynamicModels();
|
|
520
|
+
const found = self.findModel(options.model);
|
|
521
|
+
if (!found.model || !found.provider) {
|
|
522
|
+
throw new Error(`Model not found: ${options.model ?? "default"}`);
|
|
523
|
+
}
|
|
524
|
+
providerModel = { model: found.model, provider: found.provider };
|
|
525
|
+
const context = {
|
|
526
|
+
systemPrompt: options.messages.find((m) => m.role === "system")?.content,
|
|
527
|
+
messages: options.messages.filter((m) => m.role !== "system").map((m) => ({
|
|
528
|
+
role: m.role,
|
|
529
|
+
content: m.content,
|
|
530
|
+
timestamp: Date.now()
|
|
531
|
+
}))
|
|
532
|
+
};
|
|
533
|
+
innerStream = (0, import_pi_ai.stream)(providerModel.model, context);
|
|
534
|
+
}
|
|
535
|
+
};
|
|
402
536
|
const asyncIterator = {
|
|
403
537
|
async *[Symbol.asyncIterator]() {
|
|
404
538
|
try {
|
|
405
|
-
|
|
539
|
+
await ensureModel();
|
|
540
|
+
for await (const chunk of innerStream) {
|
|
406
541
|
if (aborted) break;
|
|
407
542
|
if (chunk.type === "text_delta") {
|
|
408
543
|
yield { type: "text_delta", delta: chunk.delta };
|
|
@@ -410,7 +545,7 @@ var PiAiProvider = class {
|
|
|
410
545
|
yield { type: "thinking_delta", delta: chunk.delta };
|
|
411
546
|
}
|
|
412
547
|
}
|
|
413
|
-
const final = await
|
|
548
|
+
const final = await innerStream.result();
|
|
414
549
|
const inputTokens = final.usage?.input ?? 0;
|
|
415
550
|
const outputTokens = final.usage?.output ?? 0;
|
|
416
551
|
const result = {
|
|
@@ -444,6 +579,78 @@ var PiAiProvider = class {
|
|
|
444
579
|
}
|
|
445
580
|
};
|
|
446
581
|
}
|
|
582
|
+
// --- Dynamic Model Discovery (ALL providers) ---
|
|
583
|
+
async ensureDynamicModels() {
|
|
584
|
+
if (this.dynamicModelsFetched) return;
|
|
585
|
+
this.dynamicModelsFetched = true;
|
|
586
|
+
const staticIds = /* @__PURE__ */ new Set();
|
|
587
|
+
for (const provider of KNOWN_PROVIDERS) {
|
|
588
|
+
try {
|
|
589
|
+
for (const m of (0, import_pi_ai.getModels)(provider)) {
|
|
590
|
+
staticIds.add(m.id);
|
|
591
|
+
}
|
|
592
|
+
} catch {
|
|
593
|
+
}
|
|
594
|
+
}
|
|
595
|
+
const fetchPromises = [];
|
|
596
|
+
for (const [providerKey, configFactory] of Object.entries(PROVIDER_APIS)) {
|
|
597
|
+
const apiKey = this.keys[providerKey];
|
|
598
|
+
if (!apiKey) continue;
|
|
599
|
+
fetchPromises.push(this.fetchProviderModels(configFactory(apiKey), apiKey, staticIds));
|
|
600
|
+
}
|
|
601
|
+
await Promise.allSettled(fetchPromises);
|
|
602
|
+
}
|
|
603
|
+
async fetchProviderModels(config, apiKey, staticIds) {
|
|
604
|
+
try {
|
|
605
|
+
const controller = new AbortController();
|
|
606
|
+
const timer = setTimeout(() => controller.abort(), FETCH_TIMEOUT_MS);
|
|
607
|
+
try {
|
|
608
|
+
const headers = config.headers(apiKey);
|
|
609
|
+
const res = await fetch(config.url, {
|
|
610
|
+
headers: Object.keys(headers).length > 0 ? headers : void 0,
|
|
611
|
+
signal: controller.signal
|
|
612
|
+
});
|
|
613
|
+
if (!res.ok) return;
|
|
614
|
+
const data = await res.json();
|
|
615
|
+
const entries = config.extractEntries(data);
|
|
616
|
+
const templateModel = this.findStaticTemplate(config.providerName);
|
|
617
|
+
for (const entry of entries) {
|
|
618
|
+
const id = entry.id;
|
|
619
|
+
if (!config.filterChat(id)) continue;
|
|
620
|
+
if (staticIds.has(id)) continue;
|
|
621
|
+
this.dynamicModels.set(id, {
|
|
622
|
+
id,
|
|
623
|
+
name: entry.name ?? id,
|
|
624
|
+
api: config.piApiType,
|
|
625
|
+
provider: config.providerName,
|
|
626
|
+
baseUrl: config.piBaseUrl,
|
|
627
|
+
reasoning: config.isReasoning(id),
|
|
628
|
+
input: ["text", "image"],
|
|
629
|
+
cost: templateModel?.cost ?? { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
|
|
630
|
+
contextWindow: entry.contextWindow ?? templateModel?.contextWindow ?? 128e3,
|
|
631
|
+
maxTokens: entry.maxTokens ?? templateModel?.maxTokens ?? 16384
|
|
632
|
+
});
|
|
633
|
+
}
|
|
634
|
+
} finally {
|
|
635
|
+
clearTimeout(timer);
|
|
636
|
+
}
|
|
637
|
+
} catch {
|
|
638
|
+
}
|
|
639
|
+
}
|
|
640
|
+
findStaticTemplate(providerName) {
|
|
641
|
+
try {
|
|
642
|
+
const models = (0, import_pi_ai.getModels)(providerName);
|
|
643
|
+
return models[0] ?? null;
|
|
644
|
+
} catch {
|
|
645
|
+
return null;
|
|
646
|
+
}
|
|
647
|
+
}
|
|
648
|
+
/** Force re-fetch of dynamic models from provider APIs */
|
|
649
|
+
async refreshDynamicModels() {
|
|
650
|
+
this.dynamicModelsFetched = false;
|
|
651
|
+
this.dynamicModels.clear();
|
|
652
|
+
await this.ensureDynamicModels();
|
|
653
|
+
}
|
|
447
654
|
findModel(modelId) {
|
|
448
655
|
for (const provider of KNOWN_PROVIDERS) {
|
|
449
656
|
try {
|
|
@@ -453,6 +660,10 @@ var PiAiProvider = class {
|
|
|
453
660
|
} catch {
|
|
454
661
|
}
|
|
455
662
|
}
|
|
663
|
+
if (modelId) {
|
|
664
|
+
const dynamic = this.dynamicModels.get(modelId);
|
|
665
|
+
if (dynamic) return { model: dynamic, provider: String(dynamic.provider) };
|
|
666
|
+
}
|
|
456
667
|
return { model: null, provider: null };
|
|
457
668
|
}
|
|
458
669
|
};
|
|
@@ -826,51 +1037,111 @@ var LocalTTSProvider = class {
|
|
|
826
1037
|
|
|
827
1038
|
// src/providers/huggingface.ts
|
|
828
1039
|
var import_inference = require("@huggingface/inference");
|
|
1040
|
+
var HF_HUB_API = "https://huggingface.co/api/models";
|
|
1041
|
+
var FETCH_TIMEOUT_MS2 = 1e4;
|
|
1042
|
+
var PIPELINE_TAG_MAP = {
|
|
1043
|
+
"text-generation": { modality: "llm", limit: 50 },
|
|
1044
|
+
"text-to-image": { modality: "image", limit: 50 },
|
|
1045
|
+
"text-to-speech": { modality: "tts", limit: 30 }
|
|
1046
|
+
};
|
|
1047
|
+
var DEFAULT_MODELS = [
|
|
1048
|
+
{ id: "stabilityai/stable-diffusion-xl-base-1.0", provider: "huggingface", name: "SDXL Base", modality: "image", local: false, cost: { price: 0, unit: "free" } },
|
|
1049
|
+
{ id: "facebook/mms-tts-eng", provider: "huggingface", name: "MMS TTS English", modality: "tts", local: false, cost: { price: 0, unit: "free" } },
|
|
1050
|
+
{ id: "meta-llama/Llama-3.1-8B-Instruct", provider: "huggingface", name: "Llama 3.1 8B", modality: "llm", local: false, cost: { price: 0, unit: "free" } }
|
|
1051
|
+
];
|
|
829
1052
|
var HuggingFaceProvider = class {
|
|
830
1053
|
id = "huggingface";
|
|
831
1054
|
name = "HuggingFace Inference";
|
|
832
1055
|
modalities = ["image", "tts", "llm"];
|
|
833
1056
|
isLocal = false;
|
|
834
1057
|
client;
|
|
1058
|
+
token;
|
|
1059
|
+
dynamicModels = null;
|
|
835
1060
|
constructor(token) {
|
|
1061
|
+
this.token = token;
|
|
836
1062
|
this.client = new import_inference.HfInference(token);
|
|
837
1063
|
}
|
|
838
1064
|
async ping() {
|
|
839
1065
|
return true;
|
|
840
1066
|
}
|
|
841
1067
|
async listModels(modality) {
|
|
1068
|
+
if (!this.dynamicModels) {
|
|
1069
|
+
await this.fetchHubModels();
|
|
1070
|
+
}
|
|
1071
|
+
const all = this.dynamicModels ?? DEFAULT_MODELS;
|
|
1072
|
+
if (modality) return all.filter((m) => m.modality === modality);
|
|
1073
|
+
return all;
|
|
1074
|
+
}
|
|
1075
|
+
async fetchHubModels() {
|
|
1076
|
+
const seenIds = /* @__PURE__ */ new Set();
|
|
842
1077
|
const models = [];
|
|
843
|
-
|
|
844
|
-
|
|
845
|
-
|
|
846
|
-
provider: "huggingface",
|
|
847
|
-
name: "SDXL Base",
|
|
848
|
-
modality: "image",
|
|
849
|
-
local: false,
|
|
850
|
-
cost: { price: 0, unit: "free" }
|
|
851
|
-
});
|
|
1078
|
+
for (const d of DEFAULT_MODELS) {
|
|
1079
|
+
seenIds.add(d.id);
|
|
1080
|
+
models.push(d);
|
|
852
1081
|
}
|
|
853
|
-
|
|
854
|
-
|
|
855
|
-
|
|
856
|
-
|
|
857
|
-
|
|
858
|
-
|
|
859
|
-
|
|
860
|
-
|
|
861
|
-
|
|
1082
|
+
const fetches = Object.entries(PIPELINE_TAG_MAP).map(
|
|
1083
|
+
([tag, { modality, limit }]) => this.fetchByPipelineTag(tag, modality, limit)
|
|
1084
|
+
);
|
|
1085
|
+
const results = await Promise.allSettled(fetches);
|
|
1086
|
+
for (const result of results) {
|
|
1087
|
+
if (result.status !== "fulfilled") continue;
|
|
1088
|
+
for (const model of result.value) {
|
|
1089
|
+
if (seenIds.has(model.id)) continue;
|
|
1090
|
+
seenIds.add(model.id);
|
|
1091
|
+
models.push(model);
|
|
1092
|
+
}
|
|
862
1093
|
}
|
|
863
|
-
|
|
864
|
-
|
|
865
|
-
|
|
866
|
-
|
|
867
|
-
|
|
868
|
-
|
|
869
|
-
|
|
870
|
-
|
|
871
|
-
|
|
1094
|
+
this.dynamicModels = models;
|
|
1095
|
+
}
|
|
1096
|
+
async fetchByPipelineTag(pipelineTag, modality, limit) {
|
|
1097
|
+
try {
|
|
1098
|
+
const controller = new AbortController();
|
|
1099
|
+
const timer = setTimeout(() => controller.abort(), FETCH_TIMEOUT_MS2);
|
|
1100
|
+
try {
|
|
1101
|
+
const params = new URLSearchParams({
|
|
1102
|
+
pipeline_tag: pipelineTag,
|
|
1103
|
+
inference_provider: "all",
|
|
1104
|
+
sort: "trendingScore",
|
|
1105
|
+
limit: String(limit),
|
|
1106
|
+
"expand[]": "inferenceProviderMapping"
|
|
1107
|
+
});
|
|
1108
|
+
const res = await fetch(`${HF_HUB_API}?${params}`, {
|
|
1109
|
+
headers: this.token ? { Authorization: `Bearer ${this.token}` } : {},
|
|
1110
|
+
signal: controller.signal
|
|
1111
|
+
});
|
|
1112
|
+
if (!res.ok) return [];
|
|
1113
|
+
const data = await res.json();
|
|
1114
|
+
return data.filter((entry) => entry.id || entry.modelId).map((entry) => {
|
|
1115
|
+
const id = entry.id ?? entry.modelId;
|
|
1116
|
+
const providers = (entry.inferenceProviderMapping ?? []).filter((p) => p.status === "live").map((p) => p.provider);
|
|
1117
|
+
const pricingProvider = (entry.inferenceProviderMapping ?? []).find((p) => p.providerDetails?.pricing);
|
|
1118
|
+
const pricing = pricingProvider?.providerDetails?.pricing;
|
|
1119
|
+
const contextLength = (entry.inferenceProviderMapping ?? []).find((p) => p.providerDetails?.context_length)?.providerDetails?.context_length;
|
|
1120
|
+
return {
|
|
1121
|
+
id,
|
|
1122
|
+
provider: "huggingface",
|
|
1123
|
+
name: id.split("/").pop() ?? id,
|
|
1124
|
+
modality,
|
|
1125
|
+
local: false,
|
|
1126
|
+
cost: {
|
|
1127
|
+
price: pricing?.input ?? 0,
|
|
1128
|
+
unit: pricing ? "per_1m_tokens" : "free"
|
|
1129
|
+
},
|
|
1130
|
+
capabilities: {
|
|
1131
|
+
...modality === "llm" ? {
|
|
1132
|
+
contextWindow: contextLength,
|
|
1133
|
+
supportsStreaming: true
|
|
1134
|
+
} : {},
|
|
1135
|
+
...providers.length > 0 ? { inferenceProviders: providers } : {}
|
|
1136
|
+
}
|
|
1137
|
+
};
|
|
1138
|
+
});
|
|
1139
|
+
} finally {
|
|
1140
|
+
clearTimeout(timer);
|
|
1141
|
+
}
|
|
1142
|
+
} catch {
|
|
1143
|
+
return [];
|
|
872
1144
|
}
|
|
873
|
-
return models;
|
|
874
1145
|
}
|
|
875
1146
|
async chat(options) {
|
|
876
1147
|
const start = Date.now();
|