noosphere 0.2.1 → 0.3.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -63,6 +63,56 @@ const audio = await ai.speak({
63
63
 
64
64
  Noosphere **automatically discovers the latest models from EVERY provider's API at runtime** — across **all 4 modalities** (LLM, image, video, TTS). When Google releases a new Gemini model, when OpenAI drops GPT-5, when FAL adds a new video model, when a new image model trends on HuggingFace — **you get them immediately**, without updating Noosphere or any dependency.
65
65
 
66
+ ### Provider Logos — SVG & PNG for Every Model
67
+
68
+ Every model returned by the auto-fetch includes a `logo` field with the provider's official logo in SVG and PNG formats. For aggregator providers (OpenRouter, HuggingFace), logos are resolved to the **real upstream provider** — so an `x-ai/grok-4` model gets the xAI logo, not OpenRouter's.
69
+
70
+ ```typescript
71
+ const models = await ai.getModels('llm');
72
+
73
+ for (const model of models) {
74
+ console.log(model.id, model.logo);
75
+ // "gpt-5" { svg: "https://cdn.simpleicons.org/openai", png: "https://cdn.brandfetch.io/.../icon.png" }
76
+ // "claude-opus-4-6" { svg: "https://cdn.simpleicons.org/anthropic", png: "https://cdn.brandfetch.io/.../icon.png" }
77
+ // "gemini-2.5-pro" { svg: "https://cdn.simpleicons.org/google", png: "https://cdn.brandfetch.io/.../icon.png" }
78
+ }
79
+
80
+ // Providers also have logos:
81
+ const providers = await ai.getProviders();
82
+ providers.forEach(p => console.log(p.id, p.logo));
83
+
84
+ // Use in your UI:
85
+ // <img src={model.logo.svg} alt={model.provider} />
86
+ ```
87
+
88
+ **Covered providers:** OpenAI, Anthropic, Google, Groq, Mistral, xAI, OpenRouter, Cerebras, Meta, DeepSeek, Microsoft, NVIDIA, Qwen, Cohere, Perplexity, Amazon, FAL, HuggingFace, ComfyUI, Piper, Kokoro, Ollama, SambaNova, Together, Fireworks, Replicate, Nebius, Novita.
89
+
90
+ You can also import the logo registry directly:
91
+
92
+ ```typescript
93
+ import { getProviderLogo, PROVIDER_LOGOS } from 'noosphere';
94
+
95
+ const logo = getProviderLogo('anthropic');
96
+ // { svg: "https://cdn.simpleicons.org/anthropic", png: "https://cdn.brandfetch.io/.../icon.png" }
97
+
98
+ // Or access the full map:
99
+ console.log(Object.keys(PROVIDER_LOGOS));
100
+ // ['openai', 'anthropic', 'google', 'groq', 'mistral', 'xai', 'openrouter', ...]
101
+ ```
102
+
103
+ For HuggingFace models with multiple inference providers, per-provider logos are available in `capabilities.inferenceProviderLogos`:
104
+
105
+ ```typescript
106
+ const hfModels = await ai.getModels('llm');
107
+ const qwen = hfModels.find(m => m.id === 'Qwen/Qwen2.5-72B-Instruct');
108
+
109
+ console.log(qwen.capabilities.inferenceProviderLogos);
110
+ // {
111
+ // "together": { svg: "https://cdn.simpleicons.org/togetherai", png: "..." },
112
+ // "fireworks-ai": { png: "https://cdn.brandfetch.io/.../icon.png" },
113
+ // }
114
+ ```
115
+
66
116
  ### The Problem It Solves
67
117
 
68
118
  Traditional AI libraries rely on **static model catalogs** hardcoded at build time. The `@mariozechner/pi-ai` dependency ships with ~246 LLM models in a pre-generated `models.generated.js` file. HuggingFace providers typically hardcode 3-5 default models. When a provider releases a new model, you'd have to wait for the library maintainer to update, publish, and then you'd `npm update`. This lag can be days or weeks.
package/dist/index.cjs CHANGED
@@ -21,7 +21,9 @@ var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: tru
21
21
  var index_exports = {};
22
22
  __export(index_exports, {
23
23
  Noosphere: () => Noosphere,
24
- NoosphereError: () => NoosphereError
24
+ NoosphereError: () => NoosphereError,
25
+ PROVIDER_LOGOS: () => PROVIDER_LOGOS,
26
+ getProviderLogo: () => getProviderLogo
25
27
  });
26
28
  module.exports = __toCommonJS(index_exports);
27
29
 
@@ -53,6 +55,16 @@ var NoosphereError = class extends Error {
53
55
  };
54
56
 
55
57
  // src/config.ts
58
+ var _envLoaded = false;
59
+ function loadEnv() {
60
+ if (_envLoaded) return;
61
+ _envLoaded = true;
62
+ try {
63
+ const dotenvx = require("@dotenvx/dotenvx");
64
+ dotenvx.config({ quiet: true });
65
+ } catch {
66
+ }
67
+ }
56
68
  var ENV_KEY_MAP = {
57
69
  openai: "OPENAI_API_KEY",
58
70
  anthropic: "ANTHROPIC_API_KEY",
@@ -76,6 +88,7 @@ var DEFAULT_RETRYABLE = [
76
88
  "TIMEOUT"
77
89
  ];
78
90
  function resolveConfig(input) {
91
+ loadEnv();
79
92
  const keys = {};
80
93
  for (const [name, envVar] of Object.entries(ENV_KEY_MAP)) {
81
94
  keys[name] = input.keys?.[name] ?? process.env[envVar];
@@ -117,6 +130,132 @@ function resolveConfig(input) {
117
130
  };
118
131
  }
119
132
 
133
+ // src/logos.ts
134
+ var PROVIDER_LOGOS = {
135
+ // --- Cloud LLM Providers ---
136
+ openai: {
137
+ svg: "https://cdn.simpleicons.org/openai",
138
+ png: "https://cdn.brandfetch.io/idR3duQxYl/w/512/h/512/theme/dark/icon.png"
139
+ },
140
+ anthropic: {
141
+ svg: "https://cdn.simpleicons.org/anthropic",
142
+ png: "https://cdn.brandfetch.io/id2S-kXbuM/w/512/h/512/theme/dark/icon.png"
143
+ },
144
+ google: {
145
+ svg: "https://cdn.simpleicons.org/google",
146
+ png: "https://cdn.brandfetch.io/id6O2oGzv-/w/512/h/512/theme/dark/icon.png"
147
+ },
148
+ groq: {
149
+ svg: "https://cdn.simpleicons.org/groq",
150
+ png: "https://cdn.brandfetch.io/idTEBPz5KO/w/512/h/512/theme/dark/icon.png"
151
+ },
152
+ mistral: {
153
+ svg: "https://cdn.simpleicons.org/mistral",
154
+ png: "https://cdn.brandfetch.io/idnBOFq5eF/w/512/h/512/theme/dark/icon.png"
155
+ },
156
+ xai: {
157
+ svg: "https://cdn.simpleicons.org/x",
158
+ png: "https://cdn.brandfetch.io/idS5WhqBbM/w/512/h/512/theme/dark/icon.png"
159
+ },
160
+ openrouter: {
161
+ svg: "https://openrouter.ai/favicon.svg",
162
+ png: "https://openrouter.ai/favicon.png"
163
+ },
164
+ cerebras: {
165
+ svg: "https://cdn.simpleicons.org/cerebras",
166
+ png: "https://cdn.brandfetch.io/idGa4PRFP0/w/512/h/512/theme/dark/icon.png"
167
+ },
168
+ // --- Media Providers ---
169
+ fal: {
170
+ svg: "https://fal.ai/favicon.svg",
171
+ png: "https://fal.ai/favicon.png"
172
+ },
173
+ huggingface: {
174
+ svg: "https://cdn.simpleicons.org/huggingface",
175
+ png: "https://cdn.brandfetch.io/idnrPPHe87/w/512/h/512/theme/dark/icon.png"
176
+ },
177
+ // --- Local Providers ---
178
+ comfyui: {
179
+ svg: "https://raw.githubusercontent.com/comfyanonymous/ComfyUI/master/web/assets/icon.svg",
180
+ png: "https://raw.githubusercontent.com/comfyanonymous/ComfyUI/master/web/assets/icon.png"
181
+ },
182
+ piper: {
183
+ png: "https://raw.githubusercontent.com/rhasspy/piper/master/logo.png"
184
+ },
185
+ kokoro: {
186
+ png: "https://raw.githubusercontent.com/hexgrad/kokoro/main/assets/icon.png"
187
+ },
188
+ ollama: {
189
+ svg: "https://cdn.simpleicons.org/ollama",
190
+ png: "https://cdn.brandfetch.io/idtesMoSFj/w/512/h/512/theme/dark/icon.png"
191
+ },
192
+ // --- Model Org Providers (from OpenRouter model prefixes) ---
193
+ meta: {
194
+ svg: "https://cdn.simpleicons.org/meta",
195
+ png: "https://cdn.brandfetch.io/idmKk_rq7Y/w/512/h/512/theme/dark/icon.png"
196
+ },
197
+ deepseek: {
198
+ png: "https://cdn.brandfetch.io/id1BWKUVWI/w/512/h/512/theme/dark/icon.png"
199
+ },
200
+ microsoft: {
201
+ svg: "https://cdn.simpleicons.org/microsoft",
202
+ png: "https://cdn.brandfetch.io/idchmboHEZ/w/512/h/512/theme/dark/icon.png"
203
+ },
204
+ nvidia: {
205
+ svg: "https://cdn.simpleicons.org/nvidia",
206
+ png: "https://cdn.brandfetch.io/id1JcGHuZN/w/512/h/512/theme/dark/icon.png"
207
+ },
208
+ qwen: {
209
+ png: "https://img.alicdn.com/imgextra/i1/O1CN01BUp2gU1sRZigvazUo_!!6000000005764-2-tps-228-228.png"
210
+ },
211
+ cohere: {
212
+ svg: "https://cdn.simpleicons.org/cohere",
213
+ png: "https://cdn.brandfetch.io/idiDnz1fvB/w/512/h/512/theme/dark/icon.png"
214
+ },
215
+ perplexity: {
216
+ svg: "https://cdn.simpleicons.org/perplexity",
217
+ png: "https://cdn.brandfetch.io/idwWX3Neii/w/512/h/512/theme/dark/icon.png"
218
+ },
219
+ amazon: {
220
+ svg: "https://cdn.simpleicons.org/amazonaws",
221
+ png: "https://cdn.brandfetch.io/idawORoPJZ/w/512/h/512/theme/dark/icon.png"
222
+ },
223
+ // --- HuggingFace Inference Providers ---
224
+ "hf-inference": {
225
+ svg: "https://cdn.simpleicons.org/huggingface",
226
+ png: "https://cdn.brandfetch.io/idnrPPHe87/w/512/h/512/theme/dark/icon.png"
227
+ },
228
+ "sambanova": {
229
+ png: "https://cdn.brandfetch.io/id__2e5yMY/w/512/h/512/theme/dark/icon.png"
230
+ },
231
+ "together": {
232
+ svg: "https://cdn.simpleicons.org/togetherai",
233
+ png: "https://cdn.brandfetch.io/idH5EoFVaH/w/512/h/512/theme/dark/icon.png"
234
+ },
235
+ "fireworks-ai": {
236
+ png: "https://cdn.brandfetch.io/idj1VQ2O4C/w/512/h/512/theme/dark/icon.png"
237
+ },
238
+ "replicate": {
239
+ svg: "https://cdn.simpleicons.org/replicate",
240
+ png: "https://cdn.brandfetch.io/idWKE4rRaH/w/512/h/512/theme/dark/icon.png"
241
+ },
242
+ "nebius": {
243
+ png: "https://cdn.brandfetch.io/idiUqSQ52b/w/512/h/512/theme/dark/icon.png"
244
+ },
245
+ "novita": {
246
+ png: "https://novita.ai/favicon.png"
247
+ }
248
+ };
249
+ function getProviderLogo(providerId) {
250
+ if (!providerId) return void 0;
251
+ if (PROVIDER_LOGOS[providerId]) return PROVIDER_LOGOS[providerId];
252
+ const normalized = providerId.toLowerCase().replace(/[-_\s]/g, "");
253
+ for (const [key, logo] of Object.entries(PROVIDER_LOGOS)) {
254
+ if (key.replace(/[-_\s]/g, "") === normalized) return logo;
255
+ }
256
+ return void 0;
257
+ }
258
+
120
259
  // src/registry.ts
121
260
  var Registry = class {
122
261
  providers = /* @__PURE__ */ new Map();
@@ -220,7 +359,8 @@ var Registry = class {
220
359
  local: provider.isLocal,
221
360
  status: "online",
222
361
  // ping-based status is set externally
223
- modelCount: cached?.models.length ?? 0
362
+ modelCount: cached?.models.length ?? 0,
363
+ logo: getProviderLogo(provider.id)
224
364
  });
225
365
  }
226
366
  return infos;
@@ -279,7 +419,6 @@ var UsageTracker = class {
279
419
  // src/providers/pi-ai.ts
280
420
  var import_pi_ai = require("@mariozechner/pi-ai");
281
421
  var KNOWN_PROVIDERS = ["anthropic", "google", "openai", "xai", "groq", "cerebras", "openrouter", "zai"];
282
- var LOCAL_PROVIDERS = /* @__PURE__ */ new Set(["ollama"]);
283
422
  var FETCH_TIMEOUT_MS = 8e3;
284
423
  var OPENAI_CHAT_PREFIXES = ["gpt-", "o1", "o3", "o4", "chatgpt-", "codex-"];
285
424
  var OPENAI_REASONING_PREFIXES = ["o1", "o3", "o4"];
@@ -420,35 +559,9 @@ var PiAiProvider = class {
420
559
  if (modality && modality !== "llm") return [];
421
560
  await this.ensureDynamicModels();
422
561
  const models = [];
423
- const seenIds = /* @__PURE__ */ new Set();
424
- for (const provider of KNOWN_PROVIDERS) {
425
- try {
426
- const providerModels = (0, import_pi_ai.getModels)(provider);
427
- for (const m of providerModels) {
428
- seenIds.add(m.id);
429
- models.push({
430
- id: m.id,
431
- provider: "pi-ai",
432
- name: m.name || m.id,
433
- modality: "llm",
434
- local: LOCAL_PROVIDERS.has(String(m.provider)),
435
- cost: {
436
- price: m.cost.input ?? 0,
437
- unit: m.cost.input > 0 ? "per_1m_tokens" : "free"
438
- },
439
- capabilities: {
440
- contextWindow: m.contextWindow,
441
- maxTokens: m.maxTokens,
442
- supportsVision: m.input.includes("image"),
443
- supportsStreaming: true
444
- }
445
- });
446
- }
447
- } catch {
448
- }
449
- }
450
- for (const [id, m] of this.dynamicModels) {
451
- if (seenIds.has(id)) continue;
562
+ for (const [, m] of this.dynamicModels) {
563
+ const providerName = String(m.provider);
564
+ const logoProvider = this.inferLogoProvider(m.id, providerName);
452
565
  models.push({
453
566
  id: m.id,
454
567
  provider: "pi-ai",
@@ -459,6 +572,7 @@ var PiAiProvider = class {
459
572
  price: m.cost.input ?? 0,
460
573
  unit: m.cost.input > 0 ? "per_1m_tokens" : "free"
461
574
  },
575
+ logo: getProviderLogo(logoProvider),
462
576
  capabilities: {
463
577
  contextWindow: m.contextWindow,
464
578
  maxTokens: m.maxTokens,
@@ -583,24 +697,15 @@ var PiAiProvider = class {
583
697
  async ensureDynamicModels() {
584
698
  if (this.dynamicModelsFetched) return;
585
699
  this.dynamicModelsFetched = true;
586
- const staticIds = /* @__PURE__ */ new Set();
587
- for (const provider of KNOWN_PROVIDERS) {
588
- try {
589
- for (const m of (0, import_pi_ai.getModels)(provider)) {
590
- staticIds.add(m.id);
591
- }
592
- } catch {
593
- }
594
- }
595
700
  const fetchPromises = [];
596
701
  for (const [providerKey, configFactory] of Object.entries(PROVIDER_APIS)) {
597
702
  const apiKey = this.keys[providerKey];
598
703
  if (!apiKey) continue;
599
- fetchPromises.push(this.fetchProviderModels(configFactory(apiKey), apiKey, staticIds));
704
+ fetchPromises.push(this.fetchProviderModels(configFactory(apiKey), apiKey));
600
705
  }
601
706
  await Promise.allSettled(fetchPromises);
602
707
  }
603
- async fetchProviderModels(config, apiKey, staticIds) {
708
+ async fetchProviderModels(config, apiKey) {
604
709
  try {
605
710
  const controller = new AbortController();
606
711
  const timer = setTimeout(() => controller.abort(), FETCH_TIMEOUT_MS);
@@ -613,11 +718,11 @@ var PiAiProvider = class {
613
718
  if (!res.ok) return;
614
719
  const data = await res.json();
615
720
  const entries = config.extractEntries(data);
616
- const templateModel = this.findStaticTemplate(config.providerName);
721
+ const staticTemplate = this.findStaticTemplate(config.providerName);
617
722
  for (const entry of entries) {
618
723
  const id = entry.id;
619
724
  if (!config.filterChat(id)) continue;
620
- if (staticIds.has(id)) continue;
725
+ const staticMatch = this.findStaticModel(config.providerName, id);
621
726
  this.dynamicModels.set(id, {
622
727
  id,
623
728
  name: entry.name ?? id,
@@ -625,10 +730,10 @@ var PiAiProvider = class {
625
730
  provider: config.providerName,
626
731
  baseUrl: config.piBaseUrl,
627
732
  reasoning: config.isReasoning(id),
628
- input: ["text", "image"],
629
- cost: templateModel?.cost ?? { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
630
- contextWindow: entry.contextWindow ?? templateModel?.contextWindow ?? 128e3,
631
- maxTokens: entry.maxTokens ?? templateModel?.maxTokens ?? 16384
733
+ input: staticMatch?.input ?? ["text", "image"],
734
+ cost: staticMatch?.cost ?? staticTemplate?.cost ?? { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 },
735
+ contextWindow: entry.contextWindow ?? staticMatch?.contextWindow ?? staticTemplate?.contextWindow ?? 128e3,
736
+ maxTokens: entry.maxTokens ?? staticMatch?.maxTokens ?? staticTemplate?.maxTokens ?? 16384
632
737
  });
633
738
  }
634
739
  } finally {
@@ -645,25 +750,75 @@ var PiAiProvider = class {
645
750
  return null;
646
751
  }
647
752
  }
753
+ findStaticModel(providerName, modelId) {
754
+ try {
755
+ const models = (0, import_pi_ai.getModels)(providerName);
756
+ return models.find((m) => m.id === modelId) ?? null;
757
+ } catch {
758
+ return null;
759
+ }
760
+ }
648
761
  /** Force re-fetch of dynamic models from provider APIs */
649
762
  async refreshDynamicModels() {
650
763
  this.dynamicModelsFetched = false;
651
764
  this.dynamicModels.clear();
652
765
  await this.ensureDynamicModels();
653
766
  }
767
+ /**
768
+ * Infer the real provider from model ID for logo resolution.
769
+ * e.g. "x-ai/grok-4" → "xai", "anthropic/claude-4" → "anthropic"
770
+ */
771
+ inferLogoProvider(modelId, fallback) {
772
+ const MODEL_PREFIX_TO_PROVIDER = {
773
+ "openai/": "openai",
774
+ "gpt-": "openai",
775
+ "o1-": "openai",
776
+ "o3-": "openai",
777
+ "o4-": "openai",
778
+ "chatgpt-": "openai",
779
+ "anthropic/": "anthropic",
780
+ "claude-": "anthropic",
781
+ "google/": "google",
782
+ "gemini-": "google",
783
+ "gemma-": "google",
784
+ "x-ai/": "xai",
785
+ "grok-": "xai",
786
+ "meta-llama/": "meta",
787
+ "mistralai/": "mistral",
788
+ "mistral-": "mistral",
789
+ "deepseek/": "deepseek",
790
+ "microsoft/": "microsoft",
791
+ "nvidia/": "nvidia",
792
+ "qwen/": "qwen",
793
+ "cohere/": "cohere",
794
+ "perplexity/": "perplexity",
795
+ "amazon/": "amazon"
796
+ };
797
+ const lower = modelId.toLowerCase();
798
+ for (const [prefix, provider] of Object.entries(MODEL_PREFIX_TO_PROVIDER)) {
799
+ if (lower.startsWith(prefix)) return provider;
800
+ }
801
+ return fallback;
802
+ }
654
803
  findModel(modelId) {
804
+ if (modelId) {
805
+ const dynamic = this.dynamicModels.get(modelId);
806
+ if (dynamic) return { model: dynamic, provider: String(dynamic.provider) };
807
+ }
808
+ if (!modelId) {
809
+ const first = this.dynamicModels.values().next();
810
+ if (!first.done && first.value) {
811
+ return { model: first.value, provider: String(first.value.provider) };
812
+ }
813
+ }
655
814
  for (const provider of KNOWN_PROVIDERS) {
656
815
  try {
657
816
  const models = (0, import_pi_ai.getModels)(provider);
658
- const found = modelId ? models.find((m) => m.id === modelId) : models[0];
817
+ const found = modelId ? models.find((m) => m.id === modelId) : void 0;
659
818
  if (found) return { model: found, provider };
660
819
  } catch {
661
820
  }
662
821
  }
663
- if (modelId) {
664
- const dynamic = this.dynamicModels.get(modelId);
665
- if (dynamic) return { model: dynamic, provider: String(dynamic.provider) };
666
- }
667
822
  return { model: null, provider: null };
668
823
  }
669
824
  };
@@ -704,7 +859,8 @@ var FalProvider = class {
704
859
  name: entry.modelId.replace("fal-ai/", ""),
705
860
  modality: inferredModality,
706
861
  local: false,
707
- cost: { price: entry.price, unit: entry.unit }
862
+ cost: { price: entry.price, unit: entry.unit },
863
+ logo: getProviderLogo("fal")
708
864
  });
709
865
  }
710
866
  return models;
@@ -857,6 +1013,7 @@ var ComfyUIProvider = class {
857
1013
  const res = await fetch(`${this.baseUrl}/object_info`);
858
1014
  if (!res.ok) return [];
859
1015
  const models = [];
1016
+ const logo = getProviderLogo("comfyui");
860
1017
  if (!modality || modality === "image") {
861
1018
  models.push({
862
1019
  id: "comfyui-txt2img",
@@ -865,6 +1022,7 @@ var ComfyUIProvider = class {
865
1022
  modality: "image",
866
1023
  local: true,
867
1024
  cost: { price: 0, unit: "free" },
1025
+ logo,
868
1026
  capabilities: { maxWidth: 2048, maxHeight: 2048, supportsNegativePrompt: true }
869
1027
  });
870
1028
  }
@@ -876,6 +1034,7 @@ var ComfyUIProvider = class {
876
1034
  modality: "video",
877
1035
  local: true,
878
1036
  cost: { price: 0, unit: "free" },
1037
+ logo,
879
1038
  capabilities: { maxDuration: 10, supportsImageToVideo: true }
880
1039
  });
881
1040
  }
@@ -987,6 +1146,7 @@ var LocalTTSProvider = class {
987
1146
  voices = data.data ?? [];
988
1147
  }
989
1148
  }
1149
+ const logo = getProviderLogo(this.id);
990
1150
  return voices.map((v) => ({
991
1151
  id: v.id,
992
1152
  provider: this.id,
@@ -994,6 +1154,7 @@ var LocalTTSProvider = class {
994
1154
  modality: "tts",
995
1155
  local: true,
996
1156
  cost: { price: 0, unit: "free" },
1157
+ logo,
997
1158
  capabilities: { voices: voices.map((vv) => vv.id) }
998
1159
  }));
999
1160
  } catch {
@@ -1044,11 +1205,6 @@ var PIPELINE_TAG_MAP = {
1044
1205
  "text-to-image": { modality: "image", limit: 50 },
1045
1206
  "text-to-speech": { modality: "tts", limit: 30 }
1046
1207
  };
1047
- var DEFAULT_MODELS = [
1048
- { id: "stabilityai/stable-diffusion-xl-base-1.0", provider: "huggingface", name: "SDXL Base", modality: "image", local: false, cost: { price: 0, unit: "free" } },
1049
- { id: "facebook/mms-tts-eng", provider: "huggingface", name: "MMS TTS English", modality: "tts", local: false, cost: { price: 0, unit: "free" } },
1050
- { id: "meta-llama/Llama-3.1-8B-Instruct", provider: "huggingface", name: "Llama 3.1 8B", modality: "llm", local: false, cost: { price: 0, unit: "free" } }
1051
- ];
1052
1208
  var HuggingFaceProvider = class {
1053
1209
  id = "huggingface";
1054
1210
  name = "HuggingFace Inference";
@@ -1068,17 +1224,13 @@ var HuggingFaceProvider = class {
1068
1224
  if (!this.dynamicModels) {
1069
1225
  await this.fetchHubModels();
1070
1226
  }
1071
- const all = this.dynamicModels ?? DEFAULT_MODELS;
1227
+ const all = this.dynamicModels ?? [];
1072
1228
  if (modality) return all.filter((m) => m.modality === modality);
1073
1229
  return all;
1074
1230
  }
1075
1231
  async fetchHubModels() {
1076
1232
  const seenIds = /* @__PURE__ */ new Set();
1077
1233
  const models = [];
1078
- for (const d of DEFAULT_MODELS) {
1079
- seenIds.add(d.id);
1080
- models.push(d);
1081
- }
1082
1234
  const fetches = Object.entries(PIPELINE_TAG_MAP).map(
1083
1235
  ([tag, { modality, limit }]) => this.fetchByPipelineTag(tag, modality, limit)
1084
1236
  );
@@ -1113,7 +1265,13 @@ var HuggingFaceProvider = class {
1113
1265
  const data = await res.json();
1114
1266
  return data.filter((entry) => entry.id || entry.modelId).map((entry) => {
1115
1267
  const id = entry.id ?? entry.modelId;
1116
- const providers = (entry.inferenceProviderMapping ?? []).filter((p) => p.status === "live").map((p) => p.provider);
1268
+ const liveProviders = (entry.inferenceProviderMapping ?? []).filter((p) => p.status === "live");
1269
+ const providers = liveProviders.map((p) => p.provider);
1270
+ const inferenceProviderLogos = {};
1271
+ for (const p of liveProviders) {
1272
+ const pLogo = getProviderLogo(p.provider);
1273
+ if (pLogo) inferenceProviderLogos[p.provider] = pLogo;
1274
+ }
1117
1275
  const pricingProvider = (entry.inferenceProviderMapping ?? []).find((p) => p.providerDetails?.pricing);
1118
1276
  const pricing = pricingProvider?.providerDetails?.pricing;
1119
1277
  const contextLength = (entry.inferenceProviderMapping ?? []).find((p) => p.providerDetails?.context_length)?.providerDetails?.context_length;
@@ -1127,12 +1285,14 @@ var HuggingFaceProvider = class {
1127
1285
  price: pricing?.input ?? 0,
1128
1286
  unit: pricing ? "per_1m_tokens" : "free"
1129
1287
  },
1288
+ logo: getProviderLogo("huggingface"),
1130
1289
  capabilities: {
1131
1290
  ...modality === "llm" ? {
1132
1291
  contextWindow: contextLength,
1133
1292
  supportsStreaming: true
1134
1293
  } : {},
1135
- ...providers.length > 0 ? { inferenceProviders: providers } : {}
1294
+ ...providers.length > 0 ? { inferenceProviders: providers } : {},
1295
+ ...Object.keys(inferenceProviderLogos).length > 0 ? { inferenceProviderLogos } : {}
1136
1296
  }
1137
1297
  };
1138
1298
  });
@@ -1584,6 +1744,8 @@ var Noosphere = class {
1584
1744
  // Annotate the CommonJS export names for ESM import in node:
1585
1745
  0 && (module.exports = {
1586
1746
  Noosphere,
1587
- NoosphereError
1747
+ NoosphereError,
1748
+ PROVIDER_LOGOS,
1749
+ getProviderLogo
1588
1750
  });
1589
1751
  //# sourceMappingURL=index.cjs.map