llm-cost-meter 0.1.0 → 0.2.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/CHANGELOG.md CHANGED
@@ -1,5 +1,38 @@
1
1
  # Changelog
2
2
 
3
+ ## 0.2.0 (2026-04-05)
4
+
5
+ ### Streaming Support
6
+
7
+ - New `meterStream()` function — wraps streaming LLM calls, records cost after stream completes
8
+ - Works with both OpenAI and Anthropic streaming responses
9
+ - Extracts usage from stream object properties, final chunks, or accumulated message events
10
+ - Records `status: 'error'` events if stream fails mid-way
11
+ - `CostMeter.trackStream()` for instance-level streaming support
12
+
13
+ ### Express Middleware
14
+
15
+ - New `createExpressMiddleware()` — drop-in Express middleware
16
+ - Attaches `req.meter()` to every request with feature, userId, sessionId pre-filled
17
+ - Custom `extractUserId` and `extractSessionId` functions for flexible auth patterns
18
+ - Supports `env` and `tags` in middleware config
19
+ - Per-call option overrides from route handlers
20
+
21
+ ### Dashboard
22
+
23
+ - Live mode toggle — auto-refreshes every 5 seconds when enabled
24
+ - Green pulsing indicator when live mode is active
25
+ - Pauses polling when browser tab is hidden (Page Visibility API)
26
+ - Persists live mode preference in localStorage
27
+
28
+ ### Testing
29
+
30
+ - 128 tests (up from 110)
31
+ - New streaming test suite (10 tests): chunk passthrough, OpenAI/Anthropic usage extraction, error handling, latency measurement
32
+ - New middleware test suite (8 tests): req.meter attachment, user/session extraction, config overrides
33
+
34
+ ---
35
+
3
36
  ## 0.1.0 (2026-04-05)
4
37
 
5
38
  Initial release of llm-cost-meter.
package/README.md CHANGED
@@ -78,6 +78,82 @@ By feature:
78
78
  Insight: 'chat' drives 53% of cost but only 24% of calls.
79
79
  ```
80
80
 
81
+ ## Streaming Support
82
+
83
+ Track costs from streaming LLM responses. The stream is passed through unchanged — cost is recorded automatically when the stream ends.
84
+
85
+ ```typescript
86
+ import { meterStream } from 'llm-cost-meter';
87
+
88
+ // OpenAI streaming
89
+ const stream = await meterStream(
90
+ () => openai.chat.completions.create({
91
+ model: 'gpt-4o',
92
+ messages: [{ role: 'user', content: 'Write a poem' }],
93
+ stream: true,
94
+ stream_options: { include_usage: true },
95
+ }),
96
+ { feature: 'chat', userId: 'user_123' }
97
+ );
98
+
99
+ for await (const chunk of stream) {
100
+ process.stdout.write(chunk.choices[0]?.delta?.content ?? '');
101
+ }
102
+ // Cost event automatically recorded when stream ends
103
+
104
+ // Anthropic streaming
105
+ const stream = await meterStream(
106
+ () => anthropic.messages.stream({
107
+ model: 'claude-sonnet-4-20250514',
108
+ max_tokens: 1024,
109
+ messages: [{ role: 'user', content: 'Write a poem' }],
110
+ }),
111
+ { feature: 'chat' }
112
+ );
113
+
114
+ for await (const event of stream) {
115
+ // handle events
116
+ }
117
+ ```
118
+
119
+ Works with both OpenAI and Anthropic streaming. Extracts usage from the stream object, final chunk, or accumulated message events. If the stream errors mid-way, a `status: 'error'` event is recorded.
120
+
121
+ ## Express Middleware
122
+
123
+ Drop-in middleware that attaches `req.meter()` to every request:
124
+
125
+ ```typescript
126
+ import { createExpressMiddleware, configure } from 'llm-cost-meter';
127
+
128
+ configure({ adapters: ['console', 'local'] });
129
+
130
+ // Attach to specific routes
131
+ app.post('/api/chat',
132
+ createExpressMiddleware({ feature: 'chat' }),
133
+ async (req, res) => {
134
+ const response = await req.meter(() =>
135
+ client.messages.create({
136
+ model: 'claude-sonnet-4-20250514',
137
+ max_tokens: 1024,
138
+ messages: [{ role: 'user', content: req.body.message }],
139
+ })
140
+ );
141
+ res.json(response);
142
+ }
143
+ );
144
+
145
+ // With custom user/session extraction
146
+ app.use('/api', createExpressMiddleware({
147
+ feature: 'api',
148
+ extractUserId: (req) => req.headers['x-user-id'],
149
+ extractSessionId: (req) => req.cookies?.sid,
150
+ env: 'production',
151
+ tags: { team: 'backend' },
152
+ }));
153
+ ```
154
+
155
+ The middleware automatically fills in `feature`, `userId`, `sessionId`, `env`, and `tags` — your route handlers just call `req.meter()`.
156
+
81
157
  ## Tagging Guide
82
158
 
83
159
  Every `meter()` call accepts these tags:
@@ -83,6 +83,7 @@
83
83
  .pill-openai { background: rgba(6,182,212,0.15); color: var(--cyan); }
84
84
  .pill-custom { background: rgba(139,92,246,0.15); color: #8b5cf6; }
85
85
 
86
+ @keyframes pulse { 0%,100% { opacity: 1; } 50% { opacity: 0.4; } }
86
87
  .footer { text-align: center; padding: 24px; font-size: 12px; color: var(--text-muted); }
87
88
  .loading { display: flex; align-items: center; justify-content: center; height: 60vh; font-size: 16px; color: var(--text-muted); }
88
89
  .empty-state { text-align: center; padding: 60px 20px; color: var(--text-muted); }
@@ -281,6 +282,11 @@
281
282
  </div>
282
283
  ${activeFilters ? `<div class="filter-sep"></div><button class="btn-outline btn" onclick="clearAllFilters()">Clear all</button>` : ''}
283
284
  ${activeFilters ? `<div style="display:flex;gap:6px;flex-wrap:wrap">${activeFilters}</div>` : ''}
285
+ <div style="margin-left:auto">
286
+ <button class="btn ${liveMode ? '' : 'btn-outline'}" onclick="toggleLive()" style="${liveMode ? 'background:var(--green)' : ''}">
287
+ ${liveMode ? '<span style="display:inline-block;width:8px;height:8px;background:white;border-radius:50%;margin-right:6px;animation:pulse 1.5s infinite"></span>Live' : 'Live'}
288
+ </button>
289
+ </div>
284
290
  </div>
285
291
 
286
292
  <div class="container">
@@ -528,17 +534,55 @@
528
534
  }
529
535
  function exportAllEventsJSON() { exportJSON(getFilteredEvents(), 'llm-cost-events.json'); }
530
536
 
531
- // ── Load ──
532
- fetch('/api/events')
533
- .then(r => r.json())
534
- .then(events => { allEvents = events; render(); })
535
- .catch(() => {
536
- document.getElementById('app').innerHTML = `
537
- <div class="loading" style="flex-direction:column;gap:12px">
538
- <div>Could not load events.</div>
539
- <div style="font-size:13px">Make sure the server is running: <code>npm run dashboard</code></div>
540
- </div>`;
541
- });
537
+ // ── Live Mode ──
538
+ let liveMode = localStorage.getItem('llm-meter-live') === 'true';
539
+ let liveInterval = null;
540
+
541
+ function toggleLive() {
542
+ liveMode = !liveMode;
543
+ localStorage.setItem('llm-meter-live', String(liveMode));
544
+ if (liveMode) startPolling(); else stopPolling();
545
+ render();
546
+ }
547
+
548
+ function startPolling() {
549
+ stopPolling();
550
+ liveInterval = setInterval(() => {
551
+ if (document.hidden) return;
552
+ fetchEvents(true);
553
+ }, 5000);
554
+ }
555
+
556
+ function stopPolling() {
557
+ if (liveInterval) { clearInterval(liveInterval); liveInterval = null; }
558
+ }
559
+
560
+ function fetchEvents(silent) {
561
+ fetch('/api/events')
562
+ .then(r => r.json())
563
+ .then(events => {
564
+ if (silent && events.length === allEvents.length) return;
565
+ allEvents = events;
566
+ render();
567
+ })
568
+ .catch(() => {
569
+ if (!silent) {
570
+ document.getElementById('app').innerHTML = `
571
+ <div class="loading" style="flex-direction:column;gap:12px">
572
+ <div>Could not load events.</div>
573
+ <div style="font-size:13px">Make sure the server is running: <code>npm run dashboard</code></div>
574
+ </div>`;
575
+ }
576
+ });
577
+ }
578
+
579
+ document.addEventListener('visibilitychange', () => {
580
+ if (!document.hidden && liveMode) fetchEvents(true);
581
+ });
582
+
583
+ // ── Initial Load ──
584
+ fetchEvents(false);
585
+ if (liveMode) startPolling();
542
586
  </script>
543
587
  </body>
544
588
  </html>
package/dist/cli.js CHANGED
File without changes
package/dist/index.d.ts CHANGED
@@ -1,7 +1,8 @@
1
1
  import { MeterOptions, CostMeterConfig, GlobalConfig } from './types';
2
- export { CostEvent, MeterOptions, CostMeterConfig, CostAdapter, ModelPricing, PricingTable, SummaryRow, ReportOptions, GlobalConfig, ErrorHandler, MeterStats, } from './types';
2
+ export { CostEvent, MeterOptions, CostMeterConfig, CostAdapter, ModelPricing, PricingTable, SummaryRow, ReportOptions, GlobalConfig, ErrorHandler, MeterStats, ExpressMiddlewareOptions, } from './types';
3
3
  export { calculateCost, getAvailableModels, getAllPricing, configurePricing, setPricingTable, removePricing, } from './pricing';
4
4
  export { ConsoleAdapter, LocalAdapter, createAdapter } from './adapters';
5
+ export { createExpressMiddleware } from './middleware/express';
5
6
  /**
6
7
  * Configure the global llm-cost-meter settings.
7
8
  * Merges with current config. Use resetConfig() first for a clean slate.
@@ -61,5 +62,29 @@ export declare class CostMeter {
61
62
  tags?: Record<string, string>;
62
63
  latencyMs?: number;
63
64
  }): void;
65
+ /**
66
+ * Wrap a streaming LLM call. Passes through the stream unchanged
67
+ * and records cost after the stream completes.
68
+ */
69
+ trackStream<T extends AsyncIterable<any>>(fn: () => Promise<T>, options?: MeterOptions): Promise<T>;
64
70
  flush(): Promise<void>;
65
71
  }
72
+ /**
73
+ * Wrap a streaming LLM API call to track cost and usage.
74
+ * Returns the stream unchanged — cost is recorded after the stream completes.
75
+ *
76
+ * Works with both OpenAI and Anthropic streaming responses.
77
+ *
78
+ * @example
79
+ * ```typescript
80
+ * const stream = await meterStream(
81
+ * () => openai.chat.completions.create({ model: 'gpt-4o', messages: [...], stream: true }),
82
+ * { feature: 'chat', userId: 'user_123' }
83
+ * );
84
+ * for await (const chunk of stream) {
85
+ * process.stdout.write(chunk.choices[0]?.delta?.content ?? '');
86
+ * }
87
+ * // Cost event automatically recorded when stream ends
88
+ * ```
89
+ */
90
+ export declare function meterStream<T extends AsyncIterable<any>>(fn: () => Promise<T>, options?: MeterOptions): Promise<T>;
package/dist/index.js CHANGED
@@ -1,6 +1,6 @@
1
1
  "use strict";
2
2
  Object.defineProperty(exports, "__esModule", { value: true });
3
- exports.CostMeter = exports.createAdapter = exports.LocalAdapter = exports.ConsoleAdapter = exports.removePricing = exports.setPricingTable = exports.configurePricing = exports.getAllPricing = exports.getAvailableModels = exports.calculateCost = void 0;
3
+ exports.CostMeter = exports.createExpressMiddleware = exports.createAdapter = exports.LocalAdapter = exports.ConsoleAdapter = exports.removePricing = exports.setPricingTable = exports.configurePricing = exports.getAllPricing = exports.getAvailableModels = exports.calculateCost = void 0;
4
4
  exports.configure = configure;
5
5
  exports.resetConfig = resetConfig;
6
6
  exports.getConfig = getConfig;
@@ -8,6 +8,7 @@ exports.getMeterStats = getMeterStats;
8
8
  exports.resetStats = resetStats;
9
9
  exports.meter = meter;
10
10
  exports.flush = flush;
11
+ exports.meterStream = meterStream;
11
12
  const uuid_1 = require("uuid");
12
13
  const pricing_1 = require("./pricing");
13
14
  const adapters_1 = require("./adapters");
@@ -24,6 +25,9 @@ var adapters_2 = require("./adapters");
24
25
  Object.defineProperty(exports, "ConsoleAdapter", { enumerable: true, get: function () { return adapters_2.ConsoleAdapter; } });
25
26
  Object.defineProperty(exports, "LocalAdapter", { enumerable: true, get: function () { return adapters_2.LocalAdapter; } });
26
27
  Object.defineProperty(exports, "createAdapter", { enumerable: true, get: function () { return adapters_2.createAdapter; } });
28
+ // Re-export middleware
29
+ var express_1 = require("./middleware/express");
30
+ Object.defineProperty(exports, "createExpressMiddleware", { enumerable: true, get: function () { return express_1.createExpressMiddleware; } });
27
31
  // ── Default config ──────────────────────────────────────────────
28
32
  const DEFAULT_CONFIG = {
29
33
  adapters: ['console'],
@@ -284,8 +288,182 @@ class CostMeter {
284
288
  stats.eventsDropped++;
285
289
  });
286
290
  }
291
+ /**
292
+ * Wrap a streaming LLM call. Passes through the stream unchanged
293
+ * and records cost after the stream completes.
294
+ */
295
+ async trackStream(fn, options = {}) {
296
+ const startTime = Date.now();
297
+ const stream = await fn();
298
+ const adapters = this.adapters;
299
+ const config = this.config;
300
+ return wrapStream(stream, startTime, options, adapters, config.defaultTags ?? {}, config.provider, config.onError, config.verbose);
301
+ }
287
302
  async flush() {
288
303
  await Promise.all(this.adapters.map((a) => (a.flush ? a.flush() : Promise.resolve())));
289
304
  }
290
305
  }
291
306
  exports.CostMeter = CostMeter;
307
+ // ── Streaming Support ───────────────────────────────────────────
308
+ /**
309
+ * Extract usage from a streaming response's accumulated state.
310
+ * Works with both OpenAI and Anthropic stream objects.
311
+ */
312
+ function extractStreamUsage(streamObj, chunks) {
313
+ // OpenAI: stream objects often have .usage or finalUsage after iteration
314
+ if (streamObj?.usage?.prompt_tokens !== undefined) {
315
+ return {
316
+ provider: 'openai',
317
+ model: streamObj.model ?? 'unknown',
318
+ inputTokens: streamObj.usage.prompt_tokens,
319
+ outputTokens: streamObj.usage.completion_tokens ?? 0,
320
+ };
321
+ }
322
+ // Anthropic: stream objects accumulate a .message or .finalMessage with usage
323
+ if (streamObj?.message?.usage?.input_tokens !== undefined) {
324
+ return {
325
+ provider: 'anthropic',
326
+ model: streamObj.message.model ?? 'unknown',
327
+ inputTokens: streamObj.message.usage.input_tokens,
328
+ outputTokens: streamObj.message.usage.output_tokens ?? 0,
329
+ };
330
+ }
331
+ if (streamObj?.finalMessage?.usage?.input_tokens !== undefined) {
332
+ return {
333
+ provider: 'anthropic',
334
+ model: streamObj.finalMessage.model ?? 'unknown',
335
+ inputTokens: streamObj.finalMessage.usage.input_tokens,
336
+ outputTokens: streamObj.finalMessage.usage.output_tokens ?? 0,
337
+ };
338
+ }
339
+ // Fallback: scan chunks for usage data
340
+ let model = 'unknown';
341
+ let inputTokens = 0;
342
+ let outputTokens = 0;
343
+ let provider = 'custom';
344
+ for (const chunk of chunks) {
345
+ if (chunk?.model)
346
+ model = chunk.model;
347
+ // OpenAI chunk with usage (last chunk when include_usage is set)
348
+ if (chunk?.usage?.prompt_tokens !== undefined) {
349
+ provider = 'openai';
350
+ inputTokens = chunk.usage.prompt_tokens;
351
+ outputTokens = chunk.usage.completion_tokens ?? 0;
352
+ }
353
+ // Anthropic message_start event
354
+ if (chunk?.type === 'message_start' && chunk?.message?.usage) {
355
+ provider = 'anthropic';
356
+ inputTokens = chunk.message.usage.input_tokens ?? 0;
357
+ }
358
+ // Anthropic message_delta event with usage
359
+ if (chunk?.type === 'message_delta' && chunk?.usage) {
360
+ provider = 'anthropic';
361
+ outputTokens = chunk.usage.output_tokens ?? 0;
362
+ }
363
+ }
364
+ return { provider, model, inputTokens, outputTokens };
365
+ }
366
+ function wrapStream(stream, startTime, options, adapters, defaultTags, providerHint, onError, verbose) {
367
+ const chunks = [];
368
+ const wrapped = {
369
+ [Symbol.asyncIterator]() {
370
+ const iterator = stream[Symbol.asyncIterator]();
371
+ return {
372
+ async next() {
373
+ try {
374
+ const result = await iterator.next();
375
+ if (!result.done) {
376
+ chunks.push(result.value);
377
+ }
378
+ if (result.done) {
379
+ // Stream ended — record cost event
380
+ const latencyMs = Date.now() - startTime;
381
+ const usage = extractStreamUsage(stream, chunks);
382
+ const provider = providerHint ?? usage.provider;
383
+ const event = buildEvent(provider, usage.model, usage.inputTokens, usage.outputTokens, latencyMs, options, defaultTags);
384
+ stats.eventsTracked++;
385
+ dispatchEvent(event, adapters, options.awaitWrites ?? false, onError, verbose);
386
+ }
387
+ return result;
388
+ }
389
+ catch (error) {
390
+ // Stream errored — record error event
391
+ const latencyMs = Date.now() - startTime;
392
+ const event = buildEvent(providerHint ?? 'custom', 'unknown', 0, 0, latencyMs, options, defaultTags, 'error', error instanceof Error ? error.message : String(error));
393
+ stats.eventsTracked++;
394
+ dispatchEvent(event, adapters, options.awaitWrites ?? false, onError, verbose);
395
+ throw error;
396
+ }
397
+ },
398
+ async return(value) {
399
+ if (iterator.return)
400
+ return iterator.return(value);
401
+ return { done: true, value: undefined };
402
+ },
403
+ async throw(error) {
404
+ if (iterator.throw)
405
+ return iterator.throw(error);
406
+ throw error;
407
+ },
408
+ };
409
+ },
410
+ };
411
+ // Copy over non-iterator properties from the original stream
412
+ // (e.g., OpenAI's .controller, .response, etc.)
413
+ const proto = Object.getOwnPropertyNames(stream).concat(Object.getOwnPropertyNames(Object.getPrototypeOf(stream) ?? {}));
414
+ for (const key of proto) {
415
+ if (key === 'constructor' || key === Symbol.asyncIterator.toString())
416
+ continue;
417
+ if (!(key in wrapped)) {
418
+ try {
419
+ const desc = Object.getOwnPropertyDescriptor(stream, key) ??
420
+ Object.getOwnPropertyDescriptor(Object.getPrototypeOf(stream), key);
421
+ if (desc) {
422
+ Object.defineProperty(wrapped, key, {
423
+ get: () => stream[key],
424
+ enumerable: desc.enumerable,
425
+ configurable: true,
426
+ });
427
+ }
428
+ }
429
+ catch {
430
+ // Skip non-copyable properties
431
+ }
432
+ }
433
+ }
434
+ return wrapped;
435
+ }
436
+ /**
437
+ * Wrap a streaming LLM API call to track cost and usage.
438
+ * Returns the stream unchanged — cost is recorded after the stream completes.
439
+ *
440
+ * Works with both OpenAI and Anthropic streaming responses.
441
+ *
442
+ * @example
443
+ * ```typescript
444
+ * const stream = await meterStream(
445
+ * () => openai.chat.completions.create({ model: 'gpt-4o', messages: [...], stream: true }),
446
+ * { feature: 'chat', userId: 'user_123' }
447
+ * );
448
+ * for await (const chunk of stream) {
449
+ * process.stdout.write(chunk.choices[0]?.delta?.content ?? '');
450
+ * }
451
+ * // Cost event automatically recorded when stream ends
452
+ * ```
453
+ */
454
+ async function meterStream(fn, options = {}) {
455
+ const startTime = Date.now();
456
+ const adapters = getAdapters();
457
+ let stream;
458
+ try {
459
+ stream = await fn();
460
+ }
461
+ catch (error) {
462
+ const latencyMs = Date.now() - startTime;
463
+ const event = buildEvent('custom', 'unknown', 0, 0, latencyMs, options, globalConfig.defaultTags, 'error', error instanceof Error ? error.message : String(error));
464
+ stats.eventsTracked++;
465
+ dispatchEvent(event, adapters, options.awaitWrites ?? false, globalConfig.onError, globalConfig.verbose);
466
+ throw error;
467
+ }
468
+ return wrapStream(stream, startTime, options, adapters, globalConfig.defaultTags, undefined, globalConfig.onError, globalConfig.verbose);
469
+ }
@@ -0,0 +1,18 @@
1
+ import { ExpressMiddlewareOptions } from '../types';
2
+ /**
3
+ * Creates Express middleware that attaches `req.meter()` and `req.meterStream()`
4
+ * to every request, pre-filled with feature, userId, and sessionId.
5
+ *
6
+ * @example
7
+ * ```typescript
8
+ * import { createExpressMiddleware } from 'llm-cost-meter';
9
+ *
10
+ * app.post('/api/chat', createExpressMiddleware({ feature: 'chat' }), async (req, res) => {
11
+ * const response = await req.meter(() =>
12
+ * client.messages.create({ model: 'claude-sonnet-4-20250514', ... })
13
+ * );
14
+ * res.json(response);
15
+ * });
16
+ * ```
17
+ */
18
+ export declare function createExpressMiddleware(config: ExpressMiddlewareOptions): (req: any, _res: any, next: any) => void;
@@ -0,0 +1,35 @@
1
+ "use strict";
2
+ Object.defineProperty(exports, "__esModule", { value: true });
3
+ exports.createExpressMiddleware = createExpressMiddleware;
4
+ const index_1 = require("../index");
5
+ /**
6
+ * Creates Express middleware that attaches `req.meter()` and `req.meterStream()`
7
+ * to every request, pre-filled with feature, userId, and sessionId.
8
+ *
9
+ * @example
10
+ * ```typescript
11
+ * import { createExpressMiddleware } from 'llm-cost-meter';
12
+ *
13
+ * app.post('/api/chat', createExpressMiddleware({ feature: 'chat' }), async (req, res) => {
14
+ * const response = await req.meter(() =>
15
+ * client.messages.create({ model: 'claude-sonnet-4-20250514', ... })
16
+ * );
17
+ * res.json(response);
18
+ * });
19
+ * ```
20
+ */
21
+ function createExpressMiddleware(config) {
22
+ return (req, _res, next) => {
23
+ const userId = config.extractUserId?.(req) ?? req.user?.id;
24
+ const sessionId = config.extractSessionId?.(req) ?? req.sessionID ?? req.sessionId;
25
+ req.meter = (fn, options = {}) => (0, index_1.meter)(fn, {
26
+ feature: config.feature,
27
+ userId,
28
+ sessionId,
29
+ env: config.env,
30
+ tags: config.tags,
31
+ ...options,
32
+ });
33
+ next();
34
+ };
35
+ }
package/dist/types.d.ts CHANGED
@@ -87,3 +87,10 @@ export interface MeterStats {
87
87
  adapterErrors: number;
88
88
  unknownModels: Set<string>;
89
89
  }
90
+ export interface ExpressMiddlewareOptions {
91
+ feature: string;
92
+ extractUserId?: (req: any) => string | undefined;
93
+ extractSessionId?: (req: any) => string | undefined;
94
+ env?: string;
95
+ tags?: Record<string, string>;
96
+ }
package/package.json CHANGED
@@ -1,12 +1,12 @@
1
1
  {
2
2
  "name": "llm-cost-meter",
3
- "version": "0.1.0",
3
+ "version": "0.2.0",
4
4
  "description": "Per-feature, per-user cost attribution and reporting for LLM API calls",
5
5
  "author": "shmulikdav",
6
6
  "license": "MIT",
7
7
  "repository": {
8
8
  "type": "git",
9
- "url": "https://github.com/shmulikdav/llmeter.git"
9
+ "url": "https://github.com/shmulikdav/LLMeter.git"
10
10
  },
11
11
  "type": "commonjs",
12
12
  "engines": {