noosphere 0.1.0 → 0.1.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.cjs ADDED
@@ -0,0 +1,1318 @@
1
+ "use strict";
2
+ var __defProp = Object.defineProperty;
3
+ var __getOwnPropDesc = Object.getOwnPropertyDescriptor;
4
+ var __getOwnPropNames = Object.getOwnPropertyNames;
5
+ var __hasOwnProp = Object.prototype.hasOwnProperty;
6
+ var __export = (target, all) => {
7
+ for (var name in all)
8
+ __defProp(target, name, { get: all[name], enumerable: true });
9
+ };
10
+ var __copyProps = (to, from, except, desc) => {
11
+ if (from && typeof from === "object" || typeof from === "function") {
12
+ for (let key of __getOwnPropNames(from))
13
+ if (!__hasOwnProp.call(to, key) && key !== except)
14
+ __defProp(to, key, { get: () => from[key], enumerable: !(desc = __getOwnPropDesc(from, key)) || desc.enumerable });
15
+ }
16
+ return to;
17
+ };
18
+ var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: true }), mod);
19
+
20
+ // src/index.ts
21
+ var index_exports = {};
22
+ __export(index_exports, {
23
+ Noosphere: () => Noosphere,
24
+ NoosphereError: () => NoosphereError
25
+ });
26
+ module.exports = __toCommonJS(index_exports);
27
+
28
+ // src/errors.ts
29
+ var RETRYABLE_CODES = /* @__PURE__ */ new Set([
30
+ "PROVIDER_UNAVAILABLE",
31
+ "RATE_LIMITED",
32
+ "TIMEOUT",
33
+ "GENERATION_FAILED"
34
+ ]);
35
+ var NoosphereError = class extends Error {
36
+ code;
37
+ provider;
38
+ modality;
39
+ model;
40
+ cause;
41
+ constructor(message, options) {
42
+ super(message);
43
+ this.name = "NoosphereError";
44
+ this.code = options.code;
45
+ this.provider = options.provider;
46
+ this.modality = options.modality;
47
+ this.model = options.model;
48
+ this.cause = options.cause;
49
+ }
50
+ isRetryable() {
51
+ return RETRYABLE_CODES.has(this.code);
52
+ }
53
+ };
54
+
55
+ // src/config.ts
56
+ var ENV_KEY_MAP = {
57
+ openai: "OPENAI_API_KEY",
58
+ anthropic: "ANTHROPIC_API_KEY",
59
+ google: "GEMINI_API_KEY",
60
+ fal: "FAL_KEY",
61
+ openrouter: "OPENROUTER_API_KEY",
62
+ huggingface: "HUGGINGFACE_TOKEN",
63
+ groq: "GROQ_API_KEY",
64
+ mistral: "MISTRAL_API_KEY",
65
+ xai: "XAI_API_KEY"
66
+ };
67
+ var LOCAL_DEFAULTS = {
68
+ ollama: { host: "http://localhost", port: 11434, envHost: "OLLAMA_HOST", envPort: "OLLAMA_PORT" },
69
+ comfyui: { host: "http://localhost", port: 8188, envHost: "COMFYUI_HOST", envPort: "COMFYUI_PORT" },
70
+ piper: { host: "http://localhost", port: 5500, envHost: "PIPER_HOST", envPort: "PIPER_PORT" },
71
+ kokoro: { host: "http://localhost", port: 5501, envHost: "KOKORO_HOST", envPort: "KOKORO_PORT" }
72
+ };
73
+ var DEFAULT_RETRYABLE = [
74
+ "PROVIDER_UNAVAILABLE",
75
+ "RATE_LIMITED",
76
+ "TIMEOUT"
77
+ ];
78
+ function resolveConfig(input) {
79
+ const keys = {};
80
+ for (const [name, envVar] of Object.entries(ENV_KEY_MAP)) {
81
+ keys[name] = input.keys?.[name] ?? process.env[envVar];
82
+ }
83
+ const local = {};
84
+ for (const [name, defaults] of Object.entries(LOCAL_DEFAULTS)) {
85
+ const cfgLocal = input.local?.[name];
86
+ const envPort = process.env[defaults.envPort];
87
+ const envHost = process.env[defaults.envHost];
88
+ local[name] = {
89
+ enabled: cfgLocal?.enabled ?? true,
90
+ host: cfgLocal?.host ?? envHost ?? defaults.host,
91
+ port: cfgLocal?.port ?? (envPort ? parseInt(envPort, 10) : defaults.port),
92
+ type: cfgLocal?.type
93
+ };
94
+ }
95
+ const autoDetectEnv = process.env.NOOSPHERE_AUTO_DETECT_LOCAL;
96
+ const cacheTTLEnv = process.env.NOOSPHERE_DISCOVERY_CACHE_TTL;
97
+ return {
98
+ keys,
99
+ local,
100
+ customLocal: input.local?.custom ?? [],
101
+ defaults: input.defaults ?? {},
102
+ autoDetectLocal: input.autoDetectLocal ?? (autoDetectEnv !== void 0 ? autoDetectEnv !== "false" : true),
103
+ discoveryCacheTTL: input.discoveryCacheTTL ?? (cacheTTLEnv ? parseInt(cacheTTLEnv, 10) : 60),
104
+ retry: {
105
+ maxRetries: input.retry?.maxRetries ?? 2,
106
+ backoffMs: input.retry?.backoffMs ?? 1e3,
107
+ retryableErrors: input.retry?.retryableErrors ?? DEFAULT_RETRYABLE,
108
+ failover: input.retry?.failover ?? true
109
+ },
110
+ timeout: {
111
+ llm: input.timeout?.llm ?? 3e4,
112
+ image: input.timeout?.image ?? 12e4,
113
+ video: input.timeout?.video ?? 3e5,
114
+ tts: input.timeout?.tts ?? 6e4
115
+ },
116
+ onUsage: input.onUsage
117
+ };
118
+ }
119
+
120
+ // src/registry.ts
121
+ var Registry = class {
122
+ providers = /* @__PURE__ */ new Map();
123
+ modelCache = /* @__PURE__ */ new Map();
124
+ // providerId -> cached models
125
+ cacheTTLMs;
126
+ constructor(cacheTTLMinutes) {
127
+ this.cacheTTLMs = cacheTTLMinutes * 60 * 1e3;
128
+ }
129
+ addProvider(provider) {
130
+ this.providers.set(provider.id, provider);
131
+ }
132
+ getProvider(id) {
133
+ return this.providers.get(id);
134
+ }
135
+ getAllProviders() {
136
+ return Array.from(this.providers.values());
137
+ }
138
+ resolveProvider(modality, preferredId) {
139
+ if (preferredId) {
140
+ const p = this.providers.get(preferredId);
141
+ if (p && p.modalities.includes(modality)) return p;
142
+ return null;
143
+ }
144
+ let bestCloud = null;
145
+ for (const p of this.providers.values()) {
146
+ if (!p.modalities.includes(modality)) continue;
147
+ if (p.isLocal) return p;
148
+ if (!bestCloud) bestCloud = p;
149
+ }
150
+ return bestCloud;
151
+ }
152
+ resolveModel(modelId, modality) {
153
+ for (const [providerId, cached] of this.modelCache) {
154
+ const model = cached.models.find(
155
+ (m) => m.id === modelId && m.modality === modality
156
+ );
157
+ if (model) {
158
+ const provider = this.providers.get(providerId);
159
+ if (provider) return { provider, model };
160
+ }
161
+ }
162
+ return null;
163
+ }
164
+ getModels(modality) {
165
+ const all = [];
166
+ for (const cached of this.modelCache.values()) {
167
+ for (const model of cached.models) {
168
+ if (!modality || model.modality === modality) {
169
+ all.push(model);
170
+ }
171
+ }
172
+ }
173
+ return all;
174
+ }
175
+ getModel(provider, modelId) {
176
+ const cached = this.modelCache.get(provider);
177
+ return cached?.models.find((m) => m.id === modelId) ?? null;
178
+ }
179
+ async syncProvider(providerId) {
180
+ const provider = this.providers.get(providerId);
181
+ if (!provider) return 0;
182
+ const models = await provider.listModels();
183
+ this.modelCache.set(providerId, { models, syncedAt: Date.now() });
184
+ return models.length;
185
+ }
186
+ async syncAll() {
187
+ const byProvider = {};
188
+ const errors = [];
189
+ let synced = 0;
190
+ for (const provider of this.providers.values()) {
191
+ try {
192
+ const count = await this.syncProvider(provider.id);
193
+ byProvider[provider.id] = count;
194
+ synced += count;
195
+ } catch (err) {
196
+ const msg = err instanceof Error ? err.message : String(err);
197
+ errors.push(`${provider.id}: ${msg}`);
198
+ byProvider[provider.id] = 0;
199
+ }
200
+ }
201
+ return { synced, byProvider, errors };
202
+ }
203
+ isCacheStale(providerId) {
204
+ const cached = this.modelCache.get(providerId);
205
+ if (!cached) return true;
206
+ return Date.now() - cached.syncedAt > this.cacheTTLMs;
207
+ }
208
+ clearCache() {
209
+ this.modelCache.clear();
210
+ }
211
+ getProviderInfos(modality) {
212
+ const infos = [];
213
+ for (const provider of this.providers.values()) {
214
+ if (modality && !provider.modalities.includes(modality)) continue;
215
+ const cached = this.modelCache.get(provider.id);
216
+ infos.push({
217
+ id: provider.id,
218
+ name: provider.name,
219
+ modalities: provider.modalities,
220
+ local: provider.isLocal,
221
+ status: "online",
222
+ // ping-based status is set externally
223
+ modelCount: cached?.models.length ?? 0
224
+ });
225
+ }
226
+ return infos;
227
+ }
228
+ };
229
+
230
+ // src/tracking.ts
231
+ var UsageTracker = class {
232
+ events = [];
233
+ onUsage;
234
+ constructor(onUsage) {
235
+ this.onUsage = onUsage;
236
+ }
237
+ async record(event) {
238
+ this.events.push(event);
239
+ if (this.onUsage) {
240
+ await this.onUsage(event);
241
+ }
242
+ }
243
+ getSummary(options) {
244
+ let filtered = this.events;
245
+ if (options?.since) {
246
+ const since = new Date(options.since).getTime();
247
+ filtered = filtered.filter((e) => new Date(e.timestamp).getTime() >= since);
248
+ }
249
+ if (options?.until) {
250
+ const until = new Date(options.until).getTime();
251
+ filtered = filtered.filter((e) => new Date(e.timestamp).getTime() <= until);
252
+ }
253
+ if (options?.provider) {
254
+ filtered = filtered.filter((e) => e.provider === options.provider);
255
+ }
256
+ if (options?.modality) {
257
+ filtered = filtered.filter((e) => e.modality === options.modality);
258
+ }
259
+ const byProvider = {};
260
+ const byModality = { llm: 0, image: 0, video: 0, tts: 0 };
261
+ let totalCost = 0;
262
+ for (const event of filtered) {
263
+ totalCost += event.cost;
264
+ byProvider[event.provider] = (byProvider[event.provider] ?? 0) + event.cost;
265
+ byModality[event.modality] += event.cost;
266
+ }
267
+ return {
268
+ totalCost,
269
+ totalRequests: filtered.length,
270
+ byProvider,
271
+ byModality
272
+ };
273
+ }
274
+ clear() {
275
+ this.events = [];
276
+ }
277
+ };
278
+
279
+ // src/providers/pi-ai.ts
280
+ var import_pi_ai = require("@mariozechner/pi-ai");
281
+ var KNOWN_PROVIDERS = ["anthropic", "google", "openai", "xai", "groq", "cerebras", "openrouter", "zai"];
282
+ var LOCAL_PROVIDERS = /* @__PURE__ */ new Set(["ollama"]);
283
+ function extractText(msg) {
284
+ return msg.content.filter((c) => c.type === "text").map((c) => c.text).join("");
285
+ }
286
+ function extractThinking(msg) {
287
+ const thinking = msg.content.filter((c) => c.type === "thinking").map((c) => c.thinking).join("");
288
+ return thinking || void 0;
289
+ }
290
+ var PiAiProvider = class {
291
+ id = "pi-ai";
292
+ name = "pi-ai (LLM Gateway)";
293
+ modalities = ["llm"];
294
+ isLocal = false;
295
+ keys;
296
+ constructor(keys) {
297
+ this.keys = {};
298
+ for (const [k, v] of Object.entries(keys)) {
299
+ if (v) {
300
+ this.keys[k] = v;
301
+ if (KNOWN_PROVIDERS.includes(k)) {
302
+ (0, import_pi_ai.setApiKey)(k, v);
303
+ } else {
304
+ (0, import_pi_ai.setApiKey)(k, v);
305
+ }
306
+ }
307
+ }
308
+ }
309
+ async ping() {
310
+ try {
311
+ (0, import_pi_ai.getProviders)();
312
+ return true;
313
+ } catch {
314
+ return false;
315
+ }
316
+ }
317
+ async listModels(modality) {
318
+ if (modality && modality !== "llm") return [];
319
+ const models = [];
320
+ for (const provider of KNOWN_PROVIDERS) {
321
+ try {
322
+ const providerModels = (0, import_pi_ai.getModels)(provider);
323
+ for (const m of providerModels) {
324
+ models.push({
325
+ id: m.id,
326
+ provider: "pi-ai",
327
+ name: m.name || m.id,
328
+ modality: "llm",
329
+ local: LOCAL_PROVIDERS.has(String(m.provider)),
330
+ cost: {
331
+ price: m.cost.input ?? 0,
332
+ unit: m.cost.input > 0 ? "per_1m_tokens" : "free"
333
+ },
334
+ capabilities: {
335
+ contextWindow: m.contextWindow,
336
+ maxTokens: m.maxTokens,
337
+ supportsVision: m.input.includes("image"),
338
+ supportsStreaming: true
339
+ }
340
+ });
341
+ }
342
+ } catch {
343
+ }
344
+ }
345
+ return models;
346
+ }
347
+ async chat(options) {
348
+ const start = Date.now();
349
+ const { model, provider } = this.findModel(options.model);
350
+ if (!model || !provider) {
351
+ throw new Error(`Model not found: ${options.model ?? "default"}`);
352
+ }
353
+ const context = {
354
+ systemPrompt: options.messages.find((m) => m.role === "system")?.content,
355
+ messages: options.messages.filter((m) => m.role !== "system").map((m) => ({
356
+ role: m.role,
357
+ content: m.content,
358
+ timestamp: Date.now()
359
+ }))
360
+ };
361
+ const response = await (0, import_pi_ai.complete)(model, context);
362
+ const inputTokens = response.usage?.input ?? 0;
363
+ const outputTokens = response.usage?.output ?? 0;
364
+ return {
365
+ content: extractText(response),
366
+ thinking: extractThinking(response),
367
+ provider: "pi-ai",
368
+ model: response.model ?? options.model ?? "unknown",
369
+ modality: "llm",
370
+ latencyMs: Date.now() - start,
371
+ usage: {
372
+ cost: response.usage?.cost?.total ?? 0,
373
+ input: inputTokens,
374
+ output: outputTokens,
375
+ unit: "tokens"
376
+ }
377
+ };
378
+ }
379
+ stream(options) {
380
+ const start = Date.now();
381
+ const { model, provider } = this.findModel(options.model);
382
+ if (!model || !provider) {
383
+ throw new Error(`Model not found: ${options.model ?? "default"}`);
384
+ }
385
+ const context = {
386
+ systemPrompt: options.messages.find((m) => m.role === "system")?.content,
387
+ messages: options.messages.filter((m) => m.role !== "system").map((m) => ({
388
+ role: m.role,
389
+ content: m.content,
390
+ timestamp: Date.now()
391
+ }))
392
+ };
393
+ const piStream = (0, import_pi_ai.stream)(model, context);
394
+ const self = this;
395
+ let aborted = false;
396
+ let resolveResult = null;
397
+ let rejectResult = null;
398
+ const resultPromise = new Promise((resolve, reject) => {
399
+ resolveResult = resolve;
400
+ rejectResult = reject;
401
+ });
402
+ const asyncIterator = {
403
+ async *[Symbol.asyncIterator]() {
404
+ try {
405
+ for await (const chunk of piStream) {
406
+ if (aborted) break;
407
+ if (chunk.type === "text_delta") {
408
+ yield { type: "text_delta", delta: chunk.delta };
409
+ } else if (chunk.type === "thinking_delta") {
410
+ yield { type: "thinking_delta", delta: chunk.delta };
411
+ }
412
+ }
413
+ const final = await piStream.result();
414
+ const inputTokens = final.usage?.input ?? 0;
415
+ const outputTokens = final.usage?.output ?? 0;
416
+ const result = {
417
+ content: extractText(final),
418
+ thinking: extractThinking(final),
419
+ provider: "pi-ai",
420
+ model: final.model ?? options.model ?? "unknown",
421
+ modality: "llm",
422
+ latencyMs: Date.now() - start,
423
+ usage: {
424
+ cost: final.usage?.cost?.total ?? 0,
425
+ input: inputTokens,
426
+ output: outputTokens,
427
+ unit: "tokens"
428
+ }
429
+ };
430
+ resolveResult?.(result);
431
+ yield { type: "done", result };
432
+ } catch (err) {
433
+ const error = err instanceof Error ? err : new Error(String(err));
434
+ rejectResult?.(error);
435
+ yield { type: "error", error };
436
+ }
437
+ }
438
+ };
439
+ return {
440
+ [Symbol.asyncIterator]: () => asyncIterator[Symbol.asyncIterator](),
441
+ result: () => resultPromise,
442
+ abort: () => {
443
+ aborted = true;
444
+ }
445
+ };
446
+ }
447
+ findModel(modelId) {
448
+ for (const provider of KNOWN_PROVIDERS) {
449
+ try {
450
+ const models = (0, import_pi_ai.getModels)(provider);
451
+ const found = modelId ? models.find((m) => m.id === modelId) : models[0];
452
+ if (found) return { model: found, provider };
453
+ } catch {
454
+ }
455
+ }
456
+ return { model: null, provider: null };
457
+ }
458
+ };
459
+
460
+ // src/providers/fal.ts
461
+ var import_client = require("@fal-ai/client");
462
+ var FAL_PRICING_URL = "https://api.fal.ai/v1/models/pricing";
463
+ var FalProvider = class {
464
+ id = "fal";
465
+ name = "fal.ai";
466
+ modalities = ["image", "video", "tts"];
467
+ isLocal = false;
468
+ apiKey;
469
+ pricingCache = /* @__PURE__ */ new Map();
470
+ constructor(apiKey) {
471
+ this.apiKey = apiKey;
472
+ import_client.fal.config({ credentials: apiKey });
473
+ }
474
+ async ping() {
475
+ return !!this.apiKey;
476
+ }
477
+ async listModels(modality) {
478
+ try {
479
+ const res = await fetch(FAL_PRICING_URL, {
480
+ headers: { Authorization: `Key ${this.apiKey}` }
481
+ });
482
+ if (!res.ok) return [];
483
+ const data = await res.json();
484
+ this.pricingCache.clear();
485
+ const models = [];
486
+ for (const entry of data) {
487
+ const inferredModality = this.inferModality(entry.modelId, entry.unit);
488
+ if (modality && inferredModality !== modality) continue;
489
+ this.pricingCache.set(entry.modelId, { price: entry.price, unit: entry.unit });
490
+ models.push({
491
+ id: entry.modelId,
492
+ provider: "fal",
493
+ name: entry.modelId.replace("fal-ai/", ""),
494
+ modality: inferredModality,
495
+ local: false,
496
+ cost: { price: entry.price, unit: entry.unit }
497
+ });
498
+ }
499
+ return models;
500
+ } catch {
501
+ return [];
502
+ }
503
+ }
504
+ async image(options) {
505
+ const model = options.model ?? "fal-ai/flux/schnell";
506
+ const start = Date.now();
507
+ const response = await import_client.fal.subscribe(model, {
508
+ input: {
509
+ prompt: options.prompt,
510
+ negative_prompt: options.negativePrompt,
511
+ image_size: options.width && options.height ? { width: options.width, height: options.height } : void 0,
512
+ seed: options.seed,
513
+ num_inference_steps: options.steps,
514
+ guidance_scale: options.guidanceScale
515
+ }
516
+ });
517
+ const image = response.data?.images?.[0];
518
+ const pricing = this.pricingCache.get(model);
519
+ return {
520
+ url: image?.url,
521
+ provider: "fal",
522
+ model,
523
+ modality: "image",
524
+ latencyMs: Date.now() - start,
525
+ usage: {
526
+ cost: pricing?.price ?? 0,
527
+ unit: pricing?.unit ?? "per_image"
528
+ },
529
+ media: {
530
+ width: image?.width,
531
+ height: image?.height,
532
+ format: "png"
533
+ }
534
+ };
535
+ }
536
+ async video(options) {
537
+ const model = options.model ?? "fal-ai/kling-video/v2/master/text-to-video";
538
+ const start = Date.now();
539
+ const response = await import_client.fal.subscribe(model, {
540
+ input: {
541
+ prompt: options.prompt,
542
+ image_url: options.imageUrl,
543
+ duration: options.duration,
544
+ fps: options.fps
545
+ }
546
+ });
547
+ const video = response.data?.video;
548
+ const pricing = this.pricingCache.get(model);
549
+ return {
550
+ url: video?.url ?? response.data?.video_url,
551
+ provider: "fal",
552
+ model,
553
+ modality: "video",
554
+ latencyMs: Date.now() - start,
555
+ usage: {
556
+ cost: pricing?.price ?? 0,
557
+ unit: pricing?.unit ?? "per_second"
558
+ },
559
+ media: {
560
+ width: options.width,
561
+ height: options.height,
562
+ duration: options.duration,
563
+ format: "mp4",
564
+ fps: options.fps
565
+ }
566
+ };
567
+ }
568
+ async speak(options) {
569
+ const model = options.model ?? "fal-ai/kokoro/american-english";
570
+ const start = Date.now();
571
+ const response = await import_client.fal.run(model, {
572
+ input: {
573
+ text: options.text,
574
+ voice: options.voice,
575
+ speed: options.speed
576
+ }
577
+ });
578
+ const audioUrl = response.data?.audio_url ?? response.data?.audio?.url;
579
+ const pricing = this.pricingCache.get(model);
580
+ return {
581
+ url: audioUrl,
582
+ provider: "fal",
583
+ model,
584
+ modality: "tts",
585
+ latencyMs: Date.now() - start,
586
+ usage: {
587
+ cost: pricing?.price ?? 0,
588
+ input: options.text.length,
589
+ unit: pricing?.unit ?? "per_1k_chars"
590
+ },
591
+ media: {
592
+ format: options.format ?? "mp3"
593
+ }
594
+ };
595
+ }
596
+ inferModality(modelId, unit) {
597
+ if (unit.includes("char") || modelId.includes("tts") || modelId.includes("kokoro") || modelId.includes("elevenlabs")) return "tts";
598
+ if (unit.includes("second") || modelId.includes("video") || modelId.includes("kling") || modelId.includes("sora") || modelId.includes("veo")) return "video";
599
+ return "image";
600
+ }
601
+ };
602
+
603
+ // src/providers/comfyui.ts
604
+ var DEFAULT_TXT2IMG_WORKFLOW = {
605
+ "3": {
606
+ class_type: "KSampler",
607
+ inputs: {
608
+ seed: 0,
609
+ steps: 20,
610
+ cfg: 7,
611
+ sampler_name: "euler",
612
+ scheduler: "normal",
613
+ denoise: 1,
614
+ model: ["4", 0],
615
+ positive: ["6", 0],
616
+ negative: ["7", 0],
617
+ latent_image: ["5", 0]
618
+ }
619
+ },
620
+ "4": { class_type: "CheckpointLoaderSimple", inputs: { ckpt_name: "sd_xl_base_1.0.safetensors" } },
621
+ "5": { class_type: "EmptyLatentImage", inputs: { width: 1024, height: 1024, batch_size: 1 } },
622
+ "6": { class_type: "CLIPTextEncode", inputs: { text: "", clip: ["4", 1] } },
623
+ "7": { class_type: "CLIPTextEncode", inputs: { text: "", clip: ["4", 1] } },
624
+ "8": { class_type: "VAEDecode", inputs: { samples: ["3", 0], vae: ["4", 2] } },
625
+ "9": { class_type: "SaveImage", inputs: { filename_prefix: "noosphere", images: ["8", 0] } }
626
+ };
627
+ var ComfyUIProvider = class {
628
+ id = "comfyui";
629
+ name = "ComfyUI";
630
+ modalities = ["image", "video"];
631
+ isLocal = true;
632
+ baseUrl;
633
+ constructor(config) {
634
+ this.baseUrl = `${config.host}:${config.port}`;
635
+ }
636
+ async ping() {
637
+ try {
638
+ const res = await fetch(`${this.baseUrl}/system_stats`);
639
+ return res.ok;
640
+ } catch {
641
+ return false;
642
+ }
643
+ }
644
+ async listModels(modality) {
645
+ try {
646
+ const res = await fetch(`${this.baseUrl}/object_info`);
647
+ if (!res.ok) return [];
648
+ const models = [];
649
+ if (!modality || modality === "image") {
650
+ models.push({
651
+ id: "comfyui-txt2img",
652
+ provider: "comfyui",
653
+ name: "ComfyUI Text-to-Image",
654
+ modality: "image",
655
+ local: true,
656
+ cost: { price: 0, unit: "free" },
657
+ capabilities: { maxWidth: 2048, maxHeight: 2048, supportsNegativePrompt: true }
658
+ });
659
+ }
660
+ if (!modality || modality === "video") {
661
+ models.push({
662
+ id: "comfyui-txt2vid",
663
+ provider: "comfyui",
664
+ name: "ComfyUI Text-to-Video",
665
+ modality: "video",
666
+ local: true,
667
+ cost: { price: 0, unit: "free" },
668
+ capabilities: { maxDuration: 10, supportsImageToVideo: true }
669
+ });
670
+ }
671
+ return models;
672
+ } catch {
673
+ return [];
674
+ }
675
+ }
676
+ async image(options) {
677
+ const start = Date.now();
678
+ const workflow = structuredClone(DEFAULT_TXT2IMG_WORKFLOW);
679
+ workflow["6"].inputs.text = options.prompt;
680
+ workflow["7"].inputs.text = options.negativePrompt ?? "";
681
+ workflow["5"].inputs.width = options.width ?? 1024;
682
+ workflow["5"].inputs.height = options.height ?? 1024;
683
+ if (options.seed !== void 0) workflow["3"].inputs.seed = options.seed;
684
+ if (options.steps !== void 0) workflow["3"].inputs.steps = options.steps;
685
+ if (options.guidanceScale !== void 0) workflow["3"].inputs.cfg = options.guidanceScale;
686
+ const queueRes = await fetch(`${this.baseUrl}/prompt`, {
687
+ method: "POST",
688
+ headers: { "Content-Type": "application/json" },
689
+ body: JSON.stringify({ prompt: workflow })
690
+ });
691
+ if (!queueRes.ok) throw new Error(`ComfyUI queue failed: ${queueRes.status}`);
692
+ const { prompt_id } = await queueRes.json();
693
+ const imageData = await this.pollForResult(prompt_id);
694
+ return {
695
+ buffer: Buffer.from(imageData),
696
+ provider: "comfyui",
697
+ model: options.model ?? "comfyui-txt2img",
698
+ modality: "image",
699
+ latencyMs: Date.now() - start,
700
+ usage: { cost: 0, unit: "free" },
701
+ media: {
702
+ width: options.width ?? 1024,
703
+ height: options.height ?? 1024,
704
+ format: "png"
705
+ }
706
+ };
707
+ }
708
+ async video(_options) {
709
+ throw new Error("ComfyUI video generation requires a configured AnimateDiff workflow");
710
+ }
711
+ async pollForResult(promptId, maxWaitMs = 3e5) {
712
+ const deadline = Date.now() + maxWaitMs;
713
+ while (Date.now() < deadline) {
714
+ const res = await fetch(`${this.baseUrl}/history/${promptId}`);
715
+ if (!res.ok) {
716
+ await new Promise((r) => setTimeout(r, 1e3));
717
+ continue;
718
+ }
719
+ const history = await res.json();
720
+ const entry = history[promptId];
721
+ if (!entry?.outputs) {
722
+ await new Promise((r) => setTimeout(r, 1e3));
723
+ continue;
724
+ }
725
+ for (const nodeOutput of Object.values(entry.outputs)) {
726
+ if (nodeOutput.images?.length > 0) {
727
+ const img = nodeOutput.images[0];
728
+ const imgRes = await fetch(
729
+ `${this.baseUrl}/view?filename=${img.filename}&subfolder=${img.subfolder}&type=${img.type}`
730
+ );
731
+ return imgRes.arrayBuffer();
732
+ }
733
+ }
734
+ await new Promise((r) => setTimeout(r, 1e3));
735
+ }
736
+ throw new Error(`ComfyUI generation timed out after ${maxWaitMs}ms`);
737
+ }
738
+ };
739
+
740
+ // src/providers/local-tts.ts
741
+ var LocalTTSProvider = class {
742
+ id;
743
+ name;
744
+ modalities = ["tts"];
745
+ isLocal = true;
746
+ baseUrl;
747
+ constructor(config) {
748
+ this.id = config.id;
749
+ this.name = config.name;
750
+ this.baseUrl = `${config.host}:${config.port}`;
751
+ }
752
+ async ping() {
753
+ try {
754
+ const res = await fetch(`${this.baseUrl}/health`);
755
+ return res.ok;
756
+ } catch {
757
+ return false;
758
+ }
759
+ }
760
+ async listModels(modality) {
761
+ if (modality && modality !== "tts") return [];
762
+ try {
763
+ let voices = [];
764
+ try {
765
+ const res = await fetch(`${this.baseUrl}/voices`);
766
+ if (res.ok) {
767
+ const data = await res.json();
768
+ if (Array.isArray(data)) {
769
+ voices = data;
770
+ }
771
+ }
772
+ } catch {
773
+ const res = await fetch(`${this.baseUrl}/v1/models`);
774
+ if (res.ok) {
775
+ const data = await res.json();
776
+ voices = data.data ?? [];
777
+ }
778
+ }
779
+ return voices.map((v) => ({
780
+ id: v.id,
781
+ provider: this.id,
782
+ name: v.name ?? v.id,
783
+ modality: "tts",
784
+ local: true,
785
+ cost: { price: 0, unit: "free" },
786
+ capabilities: { voices: voices.map((vv) => vv.id) }
787
+ }));
788
+ } catch {
789
+ return [];
790
+ }
791
+ }
792
+ async speak(options) {
793
+ const start = Date.now();
794
+ const res = await fetch(`${this.baseUrl}/v1/audio/speech`, {
795
+ method: "POST",
796
+ headers: { "Content-Type": "application/json" },
797
+ body: JSON.stringify({
798
+ model: options.model ?? "tts-1",
799
+ input: options.text,
800
+ voice: options.voice ?? "default",
801
+ speed: options.speed ?? 1,
802
+ response_format: options.format ?? "mp3"
803
+ })
804
+ });
805
+ if (!res.ok) {
806
+ throw new Error(`Local TTS failed: ${res.status} ${await res.text()}`);
807
+ }
808
+ const audioBuffer = Buffer.from(await res.arrayBuffer());
809
+ return {
810
+ buffer: audioBuffer,
811
+ provider: this.id,
812
+ model: options.model ?? options.voice ?? "default",
813
+ modality: "tts",
814
+ latencyMs: Date.now() - start,
815
+ usage: {
816
+ cost: 0,
817
+ input: options.text.length,
818
+ unit: "characters"
819
+ },
820
+ media: {
821
+ format: options.format ?? "mp3"
822
+ }
823
+ };
824
+ }
825
+ };
826
+
827
+ // src/providers/huggingface.ts
828
+ var import_inference = require("@huggingface/inference");
829
+ var HuggingFaceProvider = class {
830
+ id = "huggingface";
831
+ name = "HuggingFace Inference";
832
+ modalities = ["image", "tts", "llm"];
833
+ isLocal = false;
834
+ client;
835
+ constructor(token) {
836
+ this.client = new import_inference.HfInference(token);
837
+ }
838
+ async ping() {
839
+ return true;
840
+ }
841
+ async listModels(modality) {
842
+ const models = [];
843
+ if (!modality || modality === "image") {
844
+ models.push({
845
+ id: "stabilityai/stable-diffusion-xl-base-1.0",
846
+ provider: "huggingface",
847
+ name: "SDXL Base",
848
+ modality: "image",
849
+ local: false,
850
+ cost: { price: 0, unit: "free" }
851
+ });
852
+ }
853
+ if (!modality || modality === "tts") {
854
+ models.push({
855
+ id: "facebook/mms-tts-eng",
856
+ provider: "huggingface",
857
+ name: "MMS TTS English",
858
+ modality: "tts",
859
+ local: false,
860
+ cost: { price: 0, unit: "free" }
861
+ });
862
+ }
863
+ if (!modality || modality === "llm") {
864
+ models.push({
865
+ id: "meta-llama/Llama-3.1-8B-Instruct",
866
+ provider: "huggingface",
867
+ name: "Llama 3.1 8B",
868
+ modality: "llm",
869
+ local: false,
870
+ cost: { price: 0, unit: "free" }
871
+ });
872
+ }
873
+ return models;
874
+ }
875
+ async chat(options) {
876
+ const start = Date.now();
877
+ const model = options.model ?? "meta-llama/Llama-3.1-8B-Instruct";
878
+ const response = await this.client.chatCompletion({
879
+ model,
880
+ messages: options.messages,
881
+ temperature: options.temperature,
882
+ max_tokens: options.maxTokens
883
+ });
884
+ const choice = response.choices?.[0];
885
+ const usage = response.usage;
886
+ return {
887
+ content: choice?.message?.content ?? "",
888
+ provider: "huggingface",
889
+ model,
890
+ modality: "llm",
891
+ latencyMs: Date.now() - start,
892
+ usage: {
893
+ cost: 0,
894
+ input: usage?.prompt_tokens,
895
+ output: usage?.completion_tokens,
896
+ unit: "tokens"
897
+ }
898
+ };
899
+ }
900
+ async image(options) {
901
+ const start = Date.now();
902
+ const model = options.model ?? "stabilityai/stable-diffusion-xl-base-1.0";
903
+ const blob = await this.client.textToImage({
904
+ model,
905
+ inputs: options.prompt,
906
+ parameters: {
907
+ negative_prompt: options.negativePrompt,
908
+ width: options.width,
909
+ height: options.height,
910
+ guidance_scale: options.guidanceScale,
911
+ num_inference_steps: options.steps
912
+ }
913
+ }, { outputType: "blob" });
914
+ const buffer = Buffer.from(await blob.arrayBuffer());
915
+ return {
916
+ buffer,
917
+ provider: "huggingface",
918
+ model,
919
+ modality: "image",
920
+ latencyMs: Date.now() - start,
921
+ usage: { cost: 0, unit: "free" },
922
+ media: {
923
+ width: options.width ?? 1024,
924
+ height: options.height ?? 1024,
925
+ format: "png"
926
+ }
927
+ };
928
+ }
929
+ async speak(options) {
930
+ const start = Date.now();
931
+ const model = options.model ?? "facebook/mms-tts-eng";
932
+ const blob = await this.client.textToSpeech({
933
+ model,
934
+ inputs: options.text
935
+ });
936
+ const buffer = Buffer.from(await blob.arrayBuffer());
937
+ return {
938
+ buffer,
939
+ provider: "huggingface",
940
+ model,
941
+ modality: "tts",
942
+ latencyMs: Date.now() - start,
943
+ usage: {
944
+ cost: 0,
945
+ input: options.text.length,
946
+ unit: "characters"
947
+ },
948
+ media: { format: "wav" }
949
+ };
950
+ }
951
+ };
952
+
953
+ // src/noosphere.ts
954
+ var Noosphere = class {
955
+ config;
956
+ registry;
957
+ tracker;
958
+ initialized = false;
959
+ constructor(config = {}) {
960
+ this.config = resolveConfig(config);
961
+ this.registry = new Registry(this.config.discoveryCacheTTL);
962
+ this.tracker = new UsageTracker(this.config.onUsage);
963
+ }
964
+ /** Register a custom provider adapter */
965
+ registerProvider(provider) {
966
+ this.registry.addProvider(provider);
967
+ }
968
+ // --- Generation Methods ---
969
+ async chat(options) {
970
+ if (!this.initialized) await this.init();
971
+ const provider = this.resolveProviderForModality("llm", options.provider, options.model);
972
+ if (!provider.chat) {
973
+ throw new NoosphereError(`Provider '${provider.id}' does not support chat`, {
974
+ code: "INVALID_INPUT",
975
+ provider: provider.id,
976
+ modality: "llm"
977
+ });
978
+ }
979
+ const start = Date.now();
980
+ try {
981
+ const result = await this.executeWithRetry(
982
+ "llm",
983
+ provider,
984
+ () => provider.chat(options),
985
+ (alt) => alt.chat ? () => alt.chat(options) : null
986
+ );
987
+ await this.trackUsage(result, options.metadata);
988
+ return result;
989
+ } catch (err) {
990
+ await this.trackError("llm", provider.id, options.model, start, err, options.metadata);
991
+ throw err;
992
+ }
993
+ }
994
+ stream(options) {
995
+ const self = this;
996
+ let innerStream;
997
+ let finalResult;
998
+ let providerRef;
999
+ const ensureInit = async () => {
1000
+ if (!self.initialized) await self.init();
1001
+ if (!providerRef) {
1002
+ providerRef = self.resolveProviderForModality("llm", options.provider, options.model);
1003
+ if (!providerRef.stream) {
1004
+ throw new NoosphereError(`Provider '${providerRef.id}' does not support streaming`, {
1005
+ code: "INVALID_INPUT",
1006
+ provider: providerRef.id,
1007
+ modality: "llm"
1008
+ });
1009
+ }
1010
+ innerStream = providerRef.stream(options);
1011
+ }
1012
+ };
1013
+ const wrappedIterator = {
1014
+ async *[Symbol.asyncIterator]() {
1015
+ await ensureInit();
1016
+ try {
1017
+ for await (const event of innerStream) {
1018
+ if (event.type === "done" && event.result) {
1019
+ finalResult = event.result;
1020
+ await self.trackUsage(event.result, options.metadata);
1021
+ }
1022
+ yield event;
1023
+ }
1024
+ } catch (err) {
1025
+ await self.trackError("llm", providerRef.id, options.model, Date.now(), err, options.metadata);
1026
+ throw err;
1027
+ }
1028
+ }
1029
+ };
1030
+ return {
1031
+ [Symbol.asyncIterator]: () => wrappedIterator[Symbol.asyncIterator](),
1032
+ result: async () => {
1033
+ if (finalResult) return finalResult;
1034
+ for await (const event of wrappedIterator) {
1035
+ if (event.type === "done" && event.result) return event.result;
1036
+ if (event.type === "error" && event.error) throw event.error;
1037
+ }
1038
+ throw new NoosphereError("Stream ended without result", {
1039
+ code: "GENERATION_FAILED",
1040
+ provider: providerRef?.id ?? "unknown",
1041
+ modality: "llm"
1042
+ });
1043
+ },
1044
+ abort: () => innerStream?.abort()
1045
+ };
1046
+ }
1047
+ async image(options) {
1048
+ if (!this.initialized) await this.init();
1049
+ const provider = this.resolveProviderForModality("image", options.provider, options.model);
1050
+ if (!provider.image) {
1051
+ throw new NoosphereError(`Provider '${provider.id}' does not support image generation`, {
1052
+ code: "INVALID_INPUT",
1053
+ provider: provider.id,
1054
+ modality: "image"
1055
+ });
1056
+ }
1057
+ const start = Date.now();
1058
+ try {
1059
+ const result = await this.executeWithRetry(
1060
+ "image",
1061
+ provider,
1062
+ () => provider.image(options),
1063
+ (alt) => alt.image ? () => alt.image(options) : null
1064
+ );
1065
+ await this.trackUsage(result, options.metadata);
1066
+ return result;
1067
+ } catch (err) {
1068
+ await this.trackError("image", provider.id, options.model, start, err, options.metadata);
1069
+ throw err;
1070
+ }
1071
+ }
1072
+ async video(options) {
1073
+ if (!this.initialized) await this.init();
1074
+ const provider = this.resolveProviderForModality("video", options.provider, options.model);
1075
+ if (!provider.video) {
1076
+ throw new NoosphereError(`Provider '${provider.id}' does not support video generation`, {
1077
+ code: "INVALID_INPUT",
1078
+ provider: provider.id,
1079
+ modality: "video"
1080
+ });
1081
+ }
1082
+ const start = Date.now();
1083
+ try {
1084
+ const result = await this.executeWithRetry(
1085
+ "video",
1086
+ provider,
1087
+ () => provider.video(options),
1088
+ (alt) => alt.video ? () => alt.video(options) : null
1089
+ );
1090
+ await this.trackUsage(result, options.metadata);
1091
+ return result;
1092
+ } catch (err) {
1093
+ await this.trackError("video", provider.id, options.model, start, err, options.metadata);
1094
+ throw err;
1095
+ }
1096
+ }
1097
+ async speak(options) {
1098
+ if (!this.initialized) await this.init();
1099
+ const provider = this.resolveProviderForModality("tts", options.provider, options.model);
1100
+ if (!provider.speak) {
1101
+ throw new NoosphereError(`Provider '${provider.id}' does not support TTS`, {
1102
+ code: "INVALID_INPUT",
1103
+ provider: provider.id,
1104
+ modality: "tts"
1105
+ });
1106
+ }
1107
+ const start = Date.now();
1108
+ try {
1109
+ const result = await this.executeWithRetry(
1110
+ "tts",
1111
+ provider,
1112
+ () => provider.speak(options),
1113
+ (alt) => alt.speak ? () => alt.speak(options) : null
1114
+ );
1115
+ await this.trackUsage(result, options.metadata);
1116
+ return result;
1117
+ } catch (err) {
1118
+ await this.trackError("tts", provider.id, options.model, start, err, options.metadata);
1119
+ throw err;
1120
+ }
1121
+ }
1122
+ // --- Discovery Methods ---
1123
+ async getProviders(modality) {
1124
+ if (!this.initialized) await this.init();
1125
+ return this.registry.getProviderInfos(modality);
1126
+ }
1127
+ async getModels(modality) {
1128
+ if (!this.initialized) await this.init();
1129
+ return this.registry.getModels(modality);
1130
+ }
1131
+ async getModel(provider, modelId) {
1132
+ if (!this.initialized) await this.init();
1133
+ return this.registry.getModel(provider, modelId);
1134
+ }
1135
+ async syncModels() {
1136
+ if (!this.initialized) await this.init();
1137
+ return this.registry.syncAll();
1138
+ }
1139
+ // --- Tracking Methods ---
1140
+ getUsage(options) {
1141
+ return this.tracker.getSummary(options);
1142
+ }
1143
+ // --- Lifecycle ---
1144
+ async dispose() {
1145
+ for (const provider of this.registry.getAllProviders()) {
1146
+ if (provider.dispose) {
1147
+ await provider.dispose();
1148
+ }
1149
+ }
1150
+ this.registry.clearCache();
1151
+ this.tracker.clear();
1152
+ }
1153
+ // --- Internal ---
1154
+ async init() {
1155
+ if (this.initialized) return;
1156
+ this.initialized = true;
1157
+ const { keys, local, autoDetectLocal } = this.config;
1158
+ const llmKeys = {
1159
+ openai: keys.openai,
1160
+ anthropic: keys.anthropic,
1161
+ google: keys.google,
1162
+ openrouter: keys.openrouter,
1163
+ groq: keys.groq,
1164
+ mistral: keys.mistral,
1165
+ xai: keys.xai
1166
+ };
1167
+ const hasAnyLLMKey = Object.values(llmKeys).some(Boolean);
1168
+ if (hasAnyLLMKey) {
1169
+ this.registry.addProvider(new PiAiProvider(llmKeys));
1170
+ }
1171
+ if (keys.fal) {
1172
+ this.registry.addProvider(new FalProvider(keys.fal));
1173
+ }
1174
+ if (keys.huggingface) {
1175
+ this.registry.addProvider(new HuggingFaceProvider(keys.huggingface));
1176
+ }
1177
+ if (autoDetectLocal) {
1178
+ const PING_TIMEOUT_MS = 2e3;
1179
+ const pingUrl = async (url) => {
1180
+ try {
1181
+ const controller = new AbortController();
1182
+ const timer = setTimeout(() => controller.abort(), PING_TIMEOUT_MS);
1183
+ try {
1184
+ const res = await fetch(url, { signal: controller.signal });
1185
+ return res.ok;
1186
+ } finally {
1187
+ clearTimeout(timer);
1188
+ }
1189
+ } catch {
1190
+ return false;
1191
+ }
1192
+ };
1193
+ const comfyuiCfg = local["comfyui"];
1194
+ const piperCfg = local["piper"];
1195
+ const kokoroCfg = local["kokoro"];
1196
+ await Promise.allSettled([
1197
+ // ComfyUI
1198
+ (async () => {
1199
+ if (comfyuiCfg?.enabled) {
1200
+ const ok = await pingUrl(`${comfyuiCfg.host}:${comfyuiCfg.port}/system_stats`);
1201
+ if (ok) {
1202
+ this.registry.addProvider(new ComfyUIProvider({ host: comfyuiCfg.host, port: comfyuiCfg.port }));
1203
+ }
1204
+ }
1205
+ })(),
1206
+ // Piper TTS
1207
+ (async () => {
1208
+ if (piperCfg?.enabled) {
1209
+ const ok = await pingUrl(`${piperCfg.host}:${piperCfg.port}/health`);
1210
+ if (ok) {
1211
+ this.registry.addProvider(new LocalTTSProvider({ id: "piper", name: "Piper TTS", host: piperCfg.host, port: piperCfg.port }));
1212
+ }
1213
+ }
1214
+ })(),
1215
+ // Kokoro TTS
1216
+ (async () => {
1217
+ if (kokoroCfg?.enabled) {
1218
+ const ok = await pingUrl(`${kokoroCfg.host}:${kokoroCfg.port}/health`);
1219
+ if (ok) {
1220
+ this.registry.addProvider(new LocalTTSProvider({ id: "kokoro", name: "Kokoro TTS", host: kokoroCfg.host, port: kokoroCfg.port }));
1221
+ }
1222
+ }
1223
+ })()
1224
+ ]);
1225
+ }
1226
+ }
1227
+ resolveProviderForModality(modality, preferredId, modelId) {
1228
+ if (modelId && !preferredId) {
1229
+ const resolved = this.registry.resolveModel(modelId, modality);
1230
+ if (resolved) return resolved.provider;
1231
+ }
1232
+ if (!preferredId) {
1233
+ const defaultCfg = this.config.defaults[modality];
1234
+ if (defaultCfg) {
1235
+ preferredId = defaultCfg.provider;
1236
+ }
1237
+ }
1238
+ const provider = this.registry.resolveProvider(modality, preferredId);
1239
+ if (!provider) {
1240
+ throw new NoosphereError(
1241
+ `No provider available for modality '${modality}'${preferredId ? ` (requested: ${preferredId})` : ""}`,
1242
+ { code: "NO_PROVIDER", provider: preferredId ?? "none", modality }
1243
+ );
1244
+ }
1245
+ return provider;
1246
+ }
1247
+ async executeWithRetry(modality, provider, fn, failoverFnFactory) {
1248
+ const { maxRetries, backoffMs, retryableErrors, failover } = this.config.retry;
1249
+ let lastError;
1250
+ for (let attempt = 0; attempt <= maxRetries; attempt++) {
1251
+ try {
1252
+ return await fn();
1253
+ } catch (err) {
1254
+ lastError = err instanceof Error ? err : new Error(String(err));
1255
+ const isNoosphereErr = err instanceof NoosphereError;
1256
+ const code = isNoosphereErr ? err.code : "GENERATION_FAILED";
1257
+ const isRetryable = retryableErrors.includes(code) || code === "GENERATION_FAILED";
1258
+ const allowsFailover = code !== "GENERATION_FAILED" && retryableErrors.includes(code);
1259
+ if (!isRetryable || attempt === maxRetries) {
1260
+ if (failover && allowsFailover && failoverFnFactory) {
1261
+ const altProviders = this.registry.getAllProviders().filter((p) => p.id !== provider.id && p.modalities.includes(modality));
1262
+ for (const alt of altProviders) {
1263
+ try {
1264
+ const altFn = failoverFnFactory(alt);
1265
+ if (altFn) return await altFn();
1266
+ } catch {
1267
+ }
1268
+ }
1269
+ }
1270
+ break;
1271
+ }
1272
+ const delay = backoffMs * Math.pow(2, attempt);
1273
+ await new Promise((resolve) => setTimeout(resolve, delay));
1274
+ }
1275
+ }
1276
+ throw lastError ?? new NoosphereError("Generation failed", {
1277
+ code: "GENERATION_FAILED",
1278
+ provider: provider.id,
1279
+ modality
1280
+ });
1281
+ }
1282
+ async trackUsage(result, metadata) {
1283
+ const event = {
1284
+ modality: result.modality,
1285
+ provider: result.provider,
1286
+ model: result.model,
1287
+ cost: result.usage.cost,
1288
+ latencyMs: result.latencyMs,
1289
+ input: result.usage.input,
1290
+ output: result.usage.output,
1291
+ unit: result.usage.unit,
1292
+ timestamp: (/* @__PURE__ */ new Date()).toISOString(),
1293
+ success: true,
1294
+ metadata
1295
+ };
1296
+ await this.tracker.record(event);
1297
+ }
1298
+ async trackError(modality, provider, model, startMs, err, metadata) {
1299
+ const event = {
1300
+ modality,
1301
+ provider,
1302
+ model: model ?? "unknown",
1303
+ cost: 0,
1304
+ latencyMs: Date.now() - startMs,
1305
+ timestamp: (/* @__PURE__ */ new Date()).toISOString(),
1306
+ success: false,
1307
+ error: err instanceof Error ? err.message : String(err),
1308
+ metadata
1309
+ };
1310
+ await this.tracker.record(event);
1311
+ }
1312
+ };
1313
+ // Annotate the CommonJS export names for ESM import in node:
1314
+ 0 && (module.exports = {
1315
+ Noosphere,
1316
+ NoosphereError
1317
+ });
1318
+ //# sourceMappingURL=index.cjs.map