ai-cost-calc 1.3.5

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of ai-cost-calc might be problematic. Click here for more details.

package/LICENSE ADDED
@@ -0,0 +1,21 @@
1
+ MIT License
2
+
3
+ Copyright (c) 2025 ai-cost-calc contributors
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
package/README.md ADDED
@@ -0,0 +1,300 @@
1
+ # ai-cost-calc (JavaScript/TypeScript)
2
+
3
+ AI cost calculator and usage tracker for LLM apps.
4
+
5
+ - Powered by MarginDash live pricing, fetched at runtime (not bundled static pricing files)
6
+ - AI API prices change often; this SDK fetches live pricing from the API instead of relying on bundled static tables
7
+ - Privacy-first: your app still talks directly to AI providers, so prompts/responses stay in your stack
8
+ - Tracking is optional and sends usage plus event metadata (customer ID, event type, revenue if provided)
9
+
10
+ Use it in two ways:
11
+ - Free cost calculator (`cost`) for 400+ models (no API key required):
12
+ - exact mode with token counts (`inputTokens`, `outputTokens`)
13
+ - estimate mode with prompt/response text (`inputText`, `outputText`)
14
+ - live pricing with 24h cache per `AiCostCalc` instance
15
+ - Usage tracking (`addUsage` + `track`) with an API key
16
+
17
+ ## Pricing Data
18
+
19
+ Cost calculation uses live pricing from the API.
20
+ No hardcoded pricing tables.
21
+
22
+ - Pricing data is cached per `AiCostCalc` instance for 24 hours
23
+ - Cache refresh happens automatically when the cache is stale
24
+ - If a refresh fails after a successful fetch, the SDK reuses last-known pricing and retries after backoff
25
+
26
+ ## Caching Behavior
27
+
28
+ - Cache scope: per `AiCostCalc` instance
29
+ - Cache TTL: 24 hours
30
+ - Refresh failures: last-known pricing is reused, then retried after backoff
31
+ - Force refresh now: create a new `AiCostCalc` instance
32
+
33
+ ## Requirements
34
+
35
+ - Node.js 18+
36
+
37
+ ## Installation
38
+
39
+ ```bash
40
+ npm install ai-cost-calc
41
+ ```
42
+
43
+ For the tracking quickstart (OpenAI example):
44
+
45
+ ```bash
46
+ npm install openai
47
+ ```
48
+
49
+ For text-based estimation, `js-tiktoken` is used. It is an optional dependency,
50
+ so some environments may skip it. If needed:
51
+
52
+ ```bash
53
+ npm install js-tiktoken
54
+ ```
55
+
56
+ ## Quickstart (Cost Calculator)
57
+
58
+ ```typescript
59
+ import { AiCostCalc } from "ai-cost-calc";
60
+
61
+ async function run() {
62
+ const md = new AiCostCalc();
63
+ const result = await md.cost("gpt-4o", 1000, 500);
64
+ if (!result) return;
65
+ console.log(result.totalCost);
66
+ }
67
+
68
+ run();
69
+ ```
70
+
71
+ ## Quickstart (Usage Tracking)
72
+
73
+ Use an API key from your MarginDash dashboard.
74
+
75
+ ```typescript
76
+ import { AiCostCalc } from "ai-cost-calc";
77
+ import OpenAI from "openai";
78
+
79
+ async function run() {
80
+ const md = new AiCostCalc({ apiKey: process.env.AI_COST_CALC_API_KEY });
81
+ const openai = new OpenAI({ apiKey: process.env.OPENAI_API_KEY });
82
+
83
+ const response = await openai.chat.completions.create({
84
+ model: "gpt-4o",
85
+ messages: [{ role: "user", content: "Hello" }],
86
+ });
87
+
88
+ md.addUsage({
89
+ vendor: "openai",
90
+ model: response.model,
91
+ inputTokens: response.usage?.prompt_tokens ?? 0,
92
+ outputTokens: response.usage?.completion_tokens ?? 0,
93
+ });
94
+
95
+ md.track({
96
+ customerId: "cust_123",
97
+ eventType: "chat",
98
+ revenueAmountInCents: 250,
99
+ });
100
+
101
+ await md.shutdown();
102
+ }
103
+
104
+ run();
105
+ ```
106
+
107
+ ## When to Use Which Mode
108
+
109
+ | If you need... | Use... |
110
+ | --- | --- |
111
+ | Quick cost checks with no account setup | `cost()` only |
112
+ | Exact costs from provider token usage | `cost(model, inputTokens, outputTokens)` |
113
+ | Early estimation from prompt/response text | `cost(model, inputText, outputText?)` |
114
+ | MarginDash customer/revenue tracking | `addUsage()` + `track()` with `apiKey` |
115
+
116
+ ## Return Values and Failure Modes
117
+
118
+ | Method | Failure behavior |
119
+ | --- | --- |
120
+ | `cost()` | Returns `null` |
121
+ | `addUsage()` / `track()` without `apiKey` | No-op, reports via `onError` once |
122
+ | `flush()` / `shutdown()` | Do not throw for request failures; report via `onError` |
123
+
124
+ ## Common Integration Patterns
125
+
126
+ OpenAI (`chat.completions`):
127
+
128
+ ```typescript
129
+ md.addUsage({
130
+ vendor: "openai",
131
+ model: response.model,
132
+ inputTokens: response.usage?.prompt_tokens ?? 0,
133
+ outputTokens: response.usage?.completion_tokens ?? 0,
134
+ });
135
+ ```
136
+
137
+ Anthropic (`messages`):
138
+
139
+ ```typescript
140
+ md.addUsage({
141
+ vendor: "anthropic",
142
+ model: response.model,
143
+ inputTokens: response.usage?.input_tokens ?? 0,
144
+ outputTokens: response.usage?.output_tokens ?? 0,
145
+ });
146
+ ```
147
+
148
+ Google Gemini:
149
+
150
+ ```typescript
151
+ md.addUsage({
152
+ vendor: "google",
153
+ model: response.modelVersion ?? "gemini-2.0-flash",
154
+ inputTokens: response.usageMetadata?.promptTokenCount ?? 0,
155
+ outputTokens: response.usageMetadata?.candidatesTokenCount ?? 0,
156
+ });
157
+ ```
158
+
159
+ ## Environment Variables
160
+
161
+ - `AI_COST_CALC_API_KEY`: required only for tracking (from your MarginDash dashboard)
162
+ - `OPENAI_API_KEY`: only needed if you use the OpenAI SDK in your app
163
+
164
+ ## API Reference
165
+
166
+ ### `cost(model, inputTokens, outputTokens)`
167
+
168
+ Exact cost mode.
169
+
170
+ - `model`: model slug (example: `gpt-4o`, `claude-sonnet-4`)
171
+ - `inputTokens`: non-negative integer
172
+ - `outputTokens`: non-negative integer
173
+
174
+ ### `cost(model, inputText, outputText?)`
175
+
176
+ Estimated cost mode using `js-tiktoken`.
177
+
178
+ - `inputText`: prompt text
179
+ - `outputText`: optional response text (defaults to 0 output tokens)
180
+
181
+ Returns `Promise<CostResult | null>`.
182
+
183
+ `null` means one of:
184
+ - unknown model
185
+ - pricing fetch unavailable
186
+ - invalid arguments
187
+ - tokenizer unavailable/failure in estimate mode
188
+
189
+ `CostResult` fields:
190
+ - `model`
191
+ - `inputCost`
192
+ - `outputCost`
193
+ - `totalCost`
194
+ - `inputTokens`
195
+ - `outputTokens`
196
+ - `estimated`
197
+
198
+ ### `addUsage({ vendor, model, inputTokens, outputTokens })`
199
+
200
+ Buffers usage from one AI call. Requires `apiKey` in constructor.
201
+
202
+ ### `track({ customerId, revenueAmountInCents?, eventType?, uniqueRequestToken?, occurredAt? })`
203
+
204
+ Creates an event from all currently buffered usage entries and enqueues it for delivery.
205
+ Requires `apiKey`.
206
+
207
+ ### `flush()`
208
+
209
+ Immediately sends queued events. Returns `Promise<void>`.
210
+
211
+ ### `shutdown()`
212
+
213
+ Stops the background flush timer and flushes remaining events. Returns `Promise<void>`.
214
+ Call this before process exit.
215
+
216
+ ## Configuration
217
+
218
+ ```typescript
219
+ import { AiCostCalc } from "ai-cost-calc";
220
+
221
+ const md = new AiCostCalc({
222
+ apiKey: process.env.AI_COST_CALC_API_KEY, // optional for cost(); required for tracking
223
+ baseUrl: "https://margindash.com/api/v1",
224
+ flushIntervalMs: 5000,
225
+ maxRetries: 3,
226
+ defaultEventType: "ai_request",
227
+ debug: false,
228
+ onError: (err) => console.error(err.message),
229
+ });
230
+ ```
231
+
232
+ Options:
233
+ - `apiKey` (optional)
234
+ - `baseUrl` (default `https://margindash.com/api/v1`)
235
+ - `flushIntervalMs` (default `5000`, must be a finite number `> 0` when `apiKey` is set)
236
+ - `maxRetries` (default `3`, must be a non-negative integer)
237
+ - `defaultEventType` (default `ai_request`)
238
+ - `debug` (default `false`)
239
+ - `onError` (optional callback)
240
+
241
+ ## Error Handling
242
+
243
+ The SDK is non-throwing for normal tracking/cost failures and reports errors via:
244
+ - `onError` callback
245
+ - console logs when `debug: true`
246
+
247
+ Example:
248
+
249
+ ```typescript
250
+ const md = new AiCostCalc({
251
+ apiKey: process.env.AI_COST_CALC_API_KEY,
252
+ onError: (err) => console.error(err.message),
253
+ });
254
+ ```
255
+
256
+ ## Delivery Semantics
257
+
258
+ Tracking behavior:
259
+ - in-memory queue size limit: 1000 events (oldest dropped when full)
260
+ - pending usage limit before `track`: 1000 items (oldest dropped when full)
261
+ - batch size: 50 events/request
262
+ - retries on network errors, HTTP `429`, and `5xx` with exponential backoff
263
+
264
+ Idempotency:
265
+ - `uniqueRequestToken` is the idempotency key for an event
266
+ - if omitted, SDK auto-generates a UUID
267
+ - for retry-safe exactly-once behavior across your own retries, provide your own stable token
268
+
269
+ ## Privacy
270
+
271
+ Free cost mode only fetches pricing data.
272
+ If tracking is enabled, the SDK sends event metadata (for example: customer ID, event type, revenue), plus model/vendor and token counts.
273
+ Request/response content is not sent.
274
+
275
+ ## Troubleshooting
276
+
277
+ - `cost()` returns `null`:
278
+ - verify model slug
279
+ - check network access to the pricing API
280
+ - add `onError` and/or `debug: true`
281
+ - numbers look outdated:
282
+ - pricing cache TTL is 24 hours per `AiCostCalc` instance
283
+ - create a new `AiCostCalc` instance for an immediate refresh if needed
284
+ - text estimation fails:
285
+ - install `js-tiktoken` (`npm install js-tiktoken`)
286
+ - tracking methods appear to do nothing:
287
+ - confirm `apiKey` is set in constructor
288
+ - events missing on shutdown:
289
+ - `await md.shutdown()` before process exits
290
+
291
+ ## Versioning and Releases
292
+
293
+ This SDK follows semantic versioning.
294
+
295
+ - npm package: `ai-cost-calc`
296
+ - check release history on npm/GitHub before major upgrades
297
+
298
+ ## License
299
+
300
+ MIT
@@ -0,0 +1,116 @@
1
+ /** Payload passed to `AiCostCalc.track()`. */
2
+ interface EventPayload {
3
+ customerId: string;
4
+ revenueAmountInCents?: number;
5
+ uniqueRequestToken?: string;
6
+ eventType?: string;
7
+ occurredAt?: string;
8
+ }
9
+ /** Usage data from a single AI API call. */
10
+ interface UsageData {
11
+ vendor: string;
12
+ model: string;
13
+ inputTokens: number;
14
+ outputTokens: number;
15
+ }
16
+ /** Pricing data for a single model. */
17
+ interface ModelPricing {
18
+ slug: string;
19
+ inputPricePer1M: number;
20
+ outputPricePer1M: number;
21
+ }
22
+ /** Result of a cost calculation. */
23
+ interface CostResult {
24
+ model: string;
25
+ inputCost: number;
26
+ outputCost: number;
27
+ totalCost: number;
28
+ inputTokens?: number;
29
+ outputTokens?: number;
30
+ estimated?: boolean;
31
+ }
32
+ /** SDK configuration options. */
33
+ interface AiCostCalcConfig {
34
+ apiKey?: string;
35
+ baseUrl?: string;
36
+ flushIntervalMs?: number;
37
+ maxRetries?: number;
38
+ defaultEventType?: string;
39
+ onError?: (error: AiCostCalcError) => void;
40
+ debug?: boolean;
41
+ }
42
+ /** Error information surfaced via the `onError` callback. */
43
+ interface AiCostCalcError {
44
+ message: string;
45
+ cause?: unknown;
46
+ }
47
+
48
+ /**
49
+ * ai-cost-calc client.
50
+ *
51
+ * Queues events in memory and flushes them to the API in
52
+ * batches on a timer. Call {@link shutdown} when your process is about
53
+ * to exit so that remaining events are delivered.
54
+ */
55
+ declare class AiCostCalc {
56
+ private readonly apiKey;
57
+ private readonly baseUrl;
58
+ private readonly maxRetries;
59
+ private readonly defaultEventType;
60
+ private readonly onError?;
61
+ private readonly debug;
62
+ private flushTimer;
63
+ private shutdownPromise;
64
+ private queue;
65
+ private pendingUsages;
66
+ private readonly boundBeforeExit;
67
+ private apiKeyWarned;
68
+ private pricingCache;
69
+ private pricingFetchedAt;
70
+ private pricingPromise;
71
+ private pricingFailedAt;
72
+ private static readonly PRICING_CACHE_TTL_MS;
73
+ private static readonly PRICING_FAILURE_BACKOFF_MS;
74
+ constructor(config?: AiCostCalcConfig);
75
+ /**
76
+ * Calculate the cost of an AI API call using live pricing data.
77
+ * No API key required — pricing is fetched from the public models endpoint.
78
+ * Returns null if the model is unknown or pricing data is unavailable.
79
+ *
80
+ * Pass (model, inputTokens, outputTokens) for exact costs, or
81
+ * (model, inputText, outputText?) for estimated costs using js-tiktoken.
82
+ */
83
+ cost(model: string, inputTokens: number, outputTokens: number): Promise<CostResult | null>;
84
+ cost(model: string, inputText: string, outputText?: string): Promise<CostResult | null>;
85
+ private loadTokenizer;
86
+ addUsage(usage: UsageData): void;
87
+ /**
88
+ * Enqueue an event for delivery. This method is synchronous and will
89
+ * never throw -- errors are silently swallowed so that tracking can
90
+ * never crash the host application.
91
+ *
92
+ * All usage entries previously added via {@link addUsage} are drained
93
+ * and attached to the event.
94
+ */
95
+ track(event: EventPayload): void;
96
+ /**
97
+ * Flush all queued events to the API immediately.
98
+ */
99
+ flush(): Promise<void>;
100
+ /**
101
+ * Flush remaining events and stop the background timer.
102
+ * Call this before your process exits.
103
+ */
104
+ shutdown(): Promise<void>;
105
+ private requireApiKey;
106
+ private ensurePricing;
107
+ private enqueue;
108
+ private drain;
109
+ private toWireEvent;
110
+ private fetchWithRetry;
111
+ private sendBatch;
112
+ private reportError;
113
+ private log;
114
+ }
115
+
116
+ export { AiCostCalc, type AiCostCalcConfig, type AiCostCalcError, type CostResult, type EventPayload, type ModelPricing, type UsageData };
@@ -0,0 +1,116 @@
1
+ /** Payload passed to `AiCostCalc.track()`. */
2
+ interface EventPayload {
3
+ customerId: string;
4
+ revenueAmountInCents?: number;
5
+ uniqueRequestToken?: string;
6
+ eventType?: string;
7
+ occurredAt?: string;
8
+ }
9
+ /** Usage data from a single AI API call. */
10
+ interface UsageData {
11
+ vendor: string;
12
+ model: string;
13
+ inputTokens: number;
14
+ outputTokens: number;
15
+ }
16
+ /** Pricing data for a single model. */
17
+ interface ModelPricing {
18
+ slug: string;
19
+ inputPricePer1M: number;
20
+ outputPricePer1M: number;
21
+ }
22
+ /** Result of a cost calculation. */
23
+ interface CostResult {
24
+ model: string;
25
+ inputCost: number;
26
+ outputCost: number;
27
+ totalCost: number;
28
+ inputTokens?: number;
29
+ outputTokens?: number;
30
+ estimated?: boolean;
31
+ }
32
+ /** SDK configuration options. */
33
+ interface AiCostCalcConfig {
34
+ apiKey?: string;
35
+ baseUrl?: string;
36
+ flushIntervalMs?: number;
37
+ maxRetries?: number;
38
+ defaultEventType?: string;
39
+ onError?: (error: AiCostCalcError) => void;
40
+ debug?: boolean;
41
+ }
42
+ /** Error information surfaced via the `onError` callback. */
43
+ interface AiCostCalcError {
44
+ message: string;
45
+ cause?: unknown;
46
+ }
47
+
48
+ /**
49
+ * ai-cost-calc client.
50
+ *
51
+ * Queues events in memory and flushes them to the API in
52
+ * batches on a timer. Call {@link shutdown} when your process is about
53
+ * to exit so that remaining events are delivered.
54
+ */
55
+ declare class AiCostCalc {
56
+ private readonly apiKey;
57
+ private readonly baseUrl;
58
+ private readonly maxRetries;
59
+ private readonly defaultEventType;
60
+ private readonly onError?;
61
+ private readonly debug;
62
+ private flushTimer;
63
+ private shutdownPromise;
64
+ private queue;
65
+ private pendingUsages;
66
+ private readonly boundBeforeExit;
67
+ private apiKeyWarned;
68
+ private pricingCache;
69
+ private pricingFetchedAt;
70
+ private pricingPromise;
71
+ private pricingFailedAt;
72
+ private static readonly PRICING_CACHE_TTL_MS;
73
+ private static readonly PRICING_FAILURE_BACKOFF_MS;
74
+ constructor(config?: AiCostCalcConfig);
75
+ /**
76
+ * Calculate the cost of an AI API call using live pricing data.
77
+ * No API key required — pricing is fetched from the public models endpoint.
78
+ * Returns null if the model is unknown or pricing data is unavailable.
79
+ *
80
+ * Pass (model, inputTokens, outputTokens) for exact costs, or
81
+ * (model, inputText, outputText?) for estimated costs using js-tiktoken.
82
+ */
83
+ cost(model: string, inputTokens: number, outputTokens: number): Promise<CostResult | null>;
84
+ cost(model: string, inputText: string, outputText?: string): Promise<CostResult | null>;
85
+ private loadTokenizer;
86
+ addUsage(usage: UsageData): void;
87
+ /**
88
+ * Enqueue an event for delivery. This method is synchronous and will
89
+ * never throw -- errors are silently swallowed so that tracking can
90
+ * never crash the host application.
91
+ *
92
+ * All usage entries previously added via {@link addUsage} are drained
93
+ * and attached to the event.
94
+ */
95
+ track(event: EventPayload): void;
96
+ /**
97
+ * Flush all queued events to the API immediately.
98
+ */
99
+ flush(): Promise<void>;
100
+ /**
101
+ * Flush remaining events and stop the background timer.
102
+ * Call this before your process exits.
103
+ */
104
+ shutdown(): Promise<void>;
105
+ private requireApiKey;
106
+ private ensurePricing;
107
+ private enqueue;
108
+ private drain;
109
+ private toWireEvent;
110
+ private fetchWithRetry;
111
+ private sendBatch;
112
+ private reportError;
113
+ private log;
114
+ }
115
+
116
+ export { AiCostCalc, type AiCostCalcConfig, type AiCostCalcError, type CostResult, type EventPayload, type ModelPricing, type UsageData };
package/dist/index.js ADDED
@@ -0,0 +1,339 @@
1
+ "use strict";Object.defineProperty(exports, "__esModule", {value: true}); function _interopRequireWildcard(obj) { if (obj && obj.__esModule) { return obj; } else { var newObj = {}; if (obj != null) { for (var key in obj) { if (Object.prototype.hasOwnProperty.call(obj, key)) { newObj[key] = obj[key]; } } } newObj.default = obj; return newObj; } } function _nullishCoalesce(lhs, rhsFn) { if (lhs != null) { return lhs; } else { return rhsFn(); } } function _optionalChain(ops) { let lastAccessLHS = undefined; let value = ops[0]; let i = 1; while (i < ops.length) { const op = ops[i]; const fn = ops[i + 1]; i += 2; if ((op === 'optionalAccess' || op === 'optionalCall') && value == null) { return undefined; } if (op === 'access' || op === 'optionalAccess') { lastAccessLHS = value; value = fn(value); } else if (op === 'call' || op === 'optionalCall') { value = fn((...args) => value.call(lastAccessLHS, ...args)); lastAccessLHS = undefined; } } return value; } var _class;// src/client.ts
2
+ var DEFAULT_BASE_URL = "https://margindash.com/api/v1";
3
+ var DEFAULT_FLUSH_INTERVAL_MS = 5e3;
4
+ var DEFAULT_MAX_RETRIES = 3;
5
+ var DEFAULT_EVENT_TYPE = "ai_request";
6
+ var MAX_QUEUE_SIZE = 1e3;
7
+ var BATCH_SIZE = 50;
8
+ var MAX_PENDING_USAGES = 1e3;
9
+ var SDK_VERSION = "1.3.5";
10
+ var HTTP_TIMEOUT_MS = 1e4;
11
+ var MAX_BACKOFF_MS = 3e4;
12
+ var AiCostCalc = (_class = class _AiCostCalc {
13
+
14
+
15
+
16
+
17
+
18
+
19
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
20
+ __init() {this.flushTimer = null}
21
+ __init2() {this.shutdownPromise = null}
22
+ __init3() {this.queue = []}
23
+ __init4() {this.pendingUsages = []}
24
+
25
+ __init5() {this.apiKeyWarned = false}
26
+ // Pricing cache
27
+ __init6() {this.pricingCache = null}
28
+ __init7() {this.pricingFetchedAt = 0}
29
+ __init8() {this.pricingPromise = null}
30
+ __init9() {this.pricingFailedAt = 0}
31
+ static __initStatic() {this.PRICING_CACHE_TTL_MS = 864e5}
32
+ // 24 hours
33
+ static __initStatic2() {this.PRICING_FAILURE_BACKOFF_MS = 6e4}
34
+ // 60 seconds
35
+ constructor(config = {}) {;_class.prototype.__init.call(this);_class.prototype.__init2.call(this);_class.prototype.__init3.call(this);_class.prototype.__init4.call(this);_class.prototype.__init5.call(this);_class.prototype.__init6.call(this);_class.prototype.__init7.call(this);_class.prototype.__init8.call(this);_class.prototype.__init9.call(this);
36
+ this.apiKey = _nullishCoalesce(_optionalChain([config, 'access', _ => _.apiKey, 'optionalAccess', _2 => _2.trim, 'call', _3 => _3()]), () => ( ""));
37
+ this.baseUrl = (_nullishCoalesce(config.baseUrl, () => ( DEFAULT_BASE_URL))).replace(/\/+$/, "");
38
+ this.maxRetries = _nullishCoalesce(config.maxRetries, () => ( DEFAULT_MAX_RETRIES));
39
+ if (!Number.isInteger(this.maxRetries) || this.maxRetries < 0) {
40
+ throw new Error("maxRetries must be a non-negative integer");
41
+ }
42
+ this.defaultEventType = _nullishCoalesce(config.defaultEventType, () => ( DEFAULT_EVENT_TYPE));
43
+ this.onError = config.onError;
44
+ this.debug = _nullishCoalesce(config.debug, () => ( false));
45
+ if (this.apiKey) {
46
+ const intervalMs = _nullishCoalesce(config.flushIntervalMs, () => ( DEFAULT_FLUSH_INTERVAL_MS));
47
+ if (!Number.isFinite(intervalMs) || intervalMs <= 0) {
48
+ throw new Error("flushIntervalMs must be a finite number > 0");
49
+ }
50
+ this.flushTimer = setInterval(() => {
51
+ void this.flush();
52
+ }, intervalMs);
53
+ if (this.flushTimer && typeof this.flushTimer.unref === "function") {
54
+ this.flushTimer.unref();
55
+ }
56
+ }
57
+ this.boundBeforeExit = () => {
58
+ void this.shutdown();
59
+ };
60
+ if (this.apiKey && typeof process !== "undefined" && typeof process.on === "function") {
61
+ process.on("beforeExit", this.boundBeforeExit);
62
+ }
63
+ }
64
+ async cost(model, inputOrText, outputOrText) {
65
+ try {
66
+ let inputTokens;
67
+ let outputTokens;
68
+ let estimated = false;
69
+ if (typeof inputOrText === "string") {
70
+ if (outputOrText !== void 0 && typeof outputOrText !== "string") {
71
+ this.reportError({ message: "outputText must be a string when using text-based estimation" });
72
+ return null;
73
+ }
74
+ const tokenizer = await this.loadTokenizer();
75
+ if (!tokenizer) return null;
76
+ inputTokens = tokenizer(inputOrText, model);
77
+ outputTokens = typeof outputOrText === "string" ? tokenizer(outputOrText, model) : 0;
78
+ estimated = true;
79
+ } else if (typeof inputOrText === "number") {
80
+ if (typeof outputOrText !== "number") {
81
+ this.reportError({ message: "outputTokens is required when using token counts" });
82
+ return null;
83
+ }
84
+ if (!Number.isInteger(inputOrText) || !Number.isInteger(outputOrText) || inputOrText < 0 || outputOrText < 0) {
85
+ this.reportError({ message: "Token counts must be non-negative integers" });
86
+ return null;
87
+ }
88
+ inputTokens = inputOrText;
89
+ outputTokens = outputOrText;
90
+ } else {
91
+ this.reportError({ message: "Invalid arguments: pass (model, inputTokens, outputTokens) or (model, inputText, outputText?)" });
92
+ return null;
93
+ }
94
+ await this.ensurePricing();
95
+ const pricing = _optionalChain([this, 'access', _4 => _4.pricingCache, 'optionalAccess', _5 => _5.get, 'call', _6 => _6(model)]);
96
+ if (!pricing) return null;
97
+ const inputCost = inputTokens * pricing.inputPricePer1M / 1e6;
98
+ const outputCost = outputTokens * pricing.outputPricePer1M / 1e6;
99
+ return { model, inputCost, outputCost, totalCost: inputCost + outputCost, inputTokens, outputTokens, estimated };
100
+ } catch (e) {
101
+ this.reportError({ message: e instanceof Error ? e.message : "Unexpected error in cost()" });
102
+ return null;
103
+ }
104
+ }
105
+ async loadTokenizer() {
106
+ try {
107
+ const { countTokens } = await Promise.resolve().then(() => _interopRequireWildcard(require("./tokenizer.js")));
108
+ return countTokens;
109
+ } catch (e2) {
110
+ this.reportError({ message: "js-tiktoken is required for text-based cost estimation. Install it with: npm install js-tiktoken" });
111
+ return null;
112
+ }
113
+ }
114
+ addUsage(usage) {
115
+ if (!this.requireApiKey("addUsage")) return;
116
+ if (this.pendingUsages.length >= MAX_PENDING_USAGES) {
117
+ this.log(`pendingUsages limit reached (${MAX_PENDING_USAGES}), dropping oldest entry`);
118
+ this.pendingUsages.shift();
119
+ }
120
+ this.pendingUsages.push(usage);
121
+ }
122
+ /**
123
+ * Enqueue an event for delivery. This method is synchronous and will
124
+ * never throw -- errors are silently swallowed so that tracking can
125
+ * never crash the host application.
126
+ *
127
+ * All usage entries previously added via {@link addUsage} are drained
128
+ * and attached to the event.
129
+ */
130
+ track(event) {
131
+ if (!this.requireApiKey("track")) return;
132
+ if (this.shutdownPromise !== null) return;
133
+ try {
134
+ const usages = this.pendingUsages;
135
+ this.pendingUsages = [];
136
+ const wire = this.toWireEvent(event, usages);
137
+ this.enqueue(wire);
138
+ this.log(`event enqueued (queue: ${this.queue.length})`);
139
+ } catch (err) {
140
+ this.reportError({ message: "Failed to enqueue event", cause: err });
141
+ }
142
+ }
143
+ /**
144
+ * Flush all queued events to the API immediately.
145
+ */
146
+ async flush() {
147
+ if (!this.apiKey) return;
148
+ const batches = this.drain();
149
+ if (batches.length === 0) return;
150
+ const eventCount = batches.reduce((sum, b) => sum + b.length, 0);
151
+ this.log(`flushing ${eventCount} ${eventCount === 1 ? "event" : "events"} in ${batches.length} ${batches.length === 1 ? "batch" : "batches"}`);
152
+ await Promise.allSettled(batches.map((batch) => this.sendBatch(batch)));
153
+ }
154
+ /**
155
+ * Flush remaining events and stop the background timer.
156
+ * Call this before your process exits.
157
+ */
158
+ async shutdown() {
159
+ if (!this.apiKey) return;
160
+ if (this.shutdownPromise !== null) return this.shutdownPromise;
161
+ this.shutdownPromise = (async () => {
162
+ if (typeof process !== "undefined" && typeof process.removeListener === "function") {
163
+ process.removeListener("beforeExit", this.boundBeforeExit);
164
+ }
165
+ if (this.flushTimer !== null) {
166
+ clearInterval(this.flushTimer);
167
+ this.flushTimer = null;
168
+ }
169
+ await this.flush();
170
+ })();
171
+ return this.shutdownPromise;
172
+ }
173
+ // ---------------------------------------------------------------------------
174
+ // API key guard
175
+ // ---------------------------------------------------------------------------
176
+ requireApiKey(method) {
177
+ if (this.apiKey) return true;
178
+ if (!this.apiKeyWarned) {
179
+ this.reportError({ message: `apiKey required for ${method} \u2014 calls will be skipped` });
180
+ this.apiKeyWarned = true;
181
+ }
182
+ return false;
183
+ }
184
+ // ---------------------------------------------------------------------------
185
+ // Pricing
186
+ // ---------------------------------------------------------------------------
187
+ async ensurePricing() {
188
+ const now = Date.now();
189
+ if (this.pricingCache && now - this.pricingFetchedAt < _AiCostCalc.PRICING_CACHE_TTL_MS) {
190
+ return;
191
+ }
192
+ if (this.pricingFailedAt && now - this.pricingFailedAt < _AiCostCalc.PRICING_FAILURE_BACKOFF_MS) {
193
+ return;
194
+ }
195
+ if (this.pricingPromise) return this.pricingPromise;
196
+ this.pricingPromise = (async () => {
197
+ try {
198
+ const res = await fetch(`${this.baseUrl}/models`, {
199
+ headers: { "User-Agent": `ai-cost-calc-node/${SDK_VERSION}` },
200
+ signal: AbortSignal.timeout(HTTP_TIMEOUT_MS)
201
+ });
202
+ if (!res.ok) throw new Error(`status ${res.status}`);
203
+ const data = await res.json();
204
+ const map = /* @__PURE__ */ new Map();
205
+ if (Array.isArray(data.vendors)) {
206
+ for (const vendor of data.vendors) {
207
+ if (typeof vendor !== "object" || vendor === null) continue;
208
+ if (!Array.isArray(vendor.models)) continue;
209
+ for (const m of vendor.models) {
210
+ if (typeof m !== "object" || m === null) continue;
211
+ const input = m.input_price_per_1m;
212
+ const output = m.output_price_per_1m;
213
+ if (!m.slug || !Number.isFinite(input) || !Number.isFinite(output)) continue;
214
+ map.set(m.slug, {
215
+ slug: m.slug,
216
+ inputPricePer1M: input,
217
+ outputPricePer1M: output
218
+ });
219
+ }
220
+ }
221
+ }
222
+ this.pricingCache = map;
223
+ this.pricingFetchedAt = Date.now();
224
+ this.pricingFailedAt = 0;
225
+ this.log(`pricing loaded (${map.size} models)`);
226
+ } catch (err) {
227
+ this.pricingFailedAt = Date.now();
228
+ this.reportError({ message: "Failed to fetch pricing data", cause: err });
229
+ } finally {
230
+ this.pricingPromise = null;
231
+ }
232
+ })();
233
+ return this.pricingPromise;
234
+ }
235
+ // ---------------------------------------------------------------------------
236
+ // Queue
237
+ // ---------------------------------------------------------------------------
238
+ enqueue(event) {
239
+ if (this.queue.length >= MAX_QUEUE_SIZE) {
240
+ this.queue.shift();
241
+ this.log(`queue full (${MAX_QUEUE_SIZE}), dropping oldest event`);
242
+ }
243
+ this.queue.push(event);
244
+ }
245
+ drain() {
246
+ if (this.queue.length === 0) return [];
247
+ const all = this.queue;
248
+ this.queue = [];
249
+ const batches = [];
250
+ for (let i = 0; i < all.length; i += BATCH_SIZE) {
251
+ batches.push(all.slice(i, i + BATCH_SIZE));
252
+ }
253
+ return batches;
254
+ }
255
+ // ---------------------------------------------------------------------------
256
+ // Serialization
257
+ // ---------------------------------------------------------------------------
258
+ toWireEvent(event, usages) {
259
+ const wire = {
260
+ customer_id: event.customerId,
261
+ revenue_amount_in_cents: _nullishCoalesce(event.revenueAmountInCents, () => ( null)),
262
+ vendor_responses: usages.map((u) => ({
263
+ vendor_name: u.vendor,
264
+ ai_model_name: u.model,
265
+ input_tokens: u.inputTokens,
266
+ output_tokens: u.outputTokens
267
+ })),
268
+ unique_request_token: _nullishCoalesce(event.uniqueRequestToken, () => ( crypto.randomUUID())),
269
+ event_type: _nullishCoalesce(event.eventType, () => ( this.defaultEventType)),
270
+ occurred_at: _nullishCoalesce(event.occurredAt, () => ( (/* @__PURE__ */ new Date()).toISOString()))
271
+ };
272
+ return wire;
273
+ }
274
+ // ---------------------------------------------------------------------------
275
+ // HTTP
276
+ // ---------------------------------------------------------------------------
277
+ async fetchWithRetry(url, init) {
278
+ let lastError;
279
+ for (let attempt = 0; attempt <= this.maxRetries; attempt++) {
280
+ try {
281
+ const res = await fetch(url, init);
282
+ if (res.status >= 500 || res.status === 429) {
283
+ lastError = res;
284
+ } else {
285
+ return res;
286
+ }
287
+ } catch (err) {
288
+ lastError = err;
289
+ }
290
+ if (attempt < this.maxRetries) {
291
+ const status = lastError instanceof Response ? `status ${lastError.status}` : "network error";
292
+ this.log(`${status}, retrying (${attempt + 1}/${this.maxRetries})`);
293
+ const base = Math.min(1e3 * 2 ** attempt, MAX_BACKOFF_MS);
294
+ await new Promise((r) => setTimeout(r, base + Math.random() * base * 0.5));
295
+ }
296
+ }
297
+ throw lastError;
298
+ }
299
+ async sendBatch(events) {
300
+ try {
301
+ const response = await this.fetchWithRetry(`${this.baseUrl}/events`, {
302
+ method: "POST",
303
+ headers: {
304
+ "Content-Type": "application/json",
305
+ Authorization: `Bearer ${this.apiKey}`,
306
+ "User-Agent": `ai-cost-calc-node/${SDK_VERSION}`
307
+ },
308
+ body: JSON.stringify({ events }),
309
+ signal: AbortSignal.timeout(HTTP_TIMEOUT_MS)
310
+ });
311
+ this.log(`batch sent (status ${response.status}, ${events.length} ${events.length === 1 ? "event" : "events"})`);
312
+ if (!response.ok) {
313
+ let body = "";
314
+ try {
315
+ body = await response.text();
316
+ } catch (e3) {
317
+ }
318
+ this.reportError({ message: `Request failed with status ${response.status}: ${body}` });
319
+ }
320
+ } catch (err) {
321
+ const message = err instanceof Response ? `Request failed after retries (status ${err.status})` : "Request failed after retries";
322
+ this.reportError({ message, cause: err });
323
+ }
324
+ }
325
+ reportError(error) {
326
+ if (this.debug) console.error(`[ai-cost-calc] ${error.message}`);
327
+ if (!this.onError) return;
328
+ try {
329
+ this.onError(error);
330
+ } catch (e4) {
331
+ }
332
+ }
333
+ log(msg) {
334
+ if (this.debug) console.debug(`[ai-cost-calc] ${msg}`);
335
+ }
336
+ }, _class.__initStatic(), _class.__initStatic2(), _class);
337
+
338
+
339
+ exports.AiCostCalc = AiCostCalc;
package/dist/index.mjs ADDED
@@ -0,0 +1,339 @@
1
+ // src/client.ts
2
+ var DEFAULT_BASE_URL = "https://margindash.com/api/v1";
3
+ var DEFAULT_FLUSH_INTERVAL_MS = 5e3;
4
+ var DEFAULT_MAX_RETRIES = 3;
5
+ var DEFAULT_EVENT_TYPE = "ai_request";
6
+ var MAX_QUEUE_SIZE = 1e3;
7
+ var BATCH_SIZE = 50;
8
+ var MAX_PENDING_USAGES = 1e3;
9
+ var SDK_VERSION = "1.3.5";
10
+ var HTTP_TIMEOUT_MS = 1e4;
11
+ var MAX_BACKOFF_MS = 3e4;
12
+ var AiCostCalc = class _AiCostCalc {
13
+ apiKey;
14
+ baseUrl;
15
+ maxRetries;
16
+ defaultEventType;
17
+ onError;
18
+ debug;
19
+ // eslint-disable-next-line @typescript-eslint/no-explicit-any
20
+ flushTimer = null;
21
+ shutdownPromise = null;
22
+ queue = [];
23
+ pendingUsages = [];
24
+ boundBeforeExit;
25
+ apiKeyWarned = false;
26
+ // Pricing cache
27
+ pricingCache = null;
28
+ pricingFetchedAt = 0;
29
+ pricingPromise = null;
30
+ pricingFailedAt = 0;
31
+ static PRICING_CACHE_TTL_MS = 864e5;
32
+ // 24 hours
33
+ static PRICING_FAILURE_BACKOFF_MS = 6e4;
34
+ // 60 seconds
35
+ constructor(config = {}) {
36
+ this.apiKey = config.apiKey?.trim() ?? "";
37
+ this.baseUrl = (config.baseUrl ?? DEFAULT_BASE_URL).replace(/\/+$/, "");
38
+ this.maxRetries = config.maxRetries ?? DEFAULT_MAX_RETRIES;
39
+ if (!Number.isInteger(this.maxRetries) || this.maxRetries < 0) {
40
+ throw new Error("maxRetries must be a non-negative integer");
41
+ }
42
+ this.defaultEventType = config.defaultEventType ?? DEFAULT_EVENT_TYPE;
43
+ this.onError = config.onError;
44
+ this.debug = config.debug ?? false;
45
+ if (this.apiKey) {
46
+ const intervalMs = config.flushIntervalMs ?? DEFAULT_FLUSH_INTERVAL_MS;
47
+ if (!Number.isFinite(intervalMs) || intervalMs <= 0) {
48
+ throw new Error("flushIntervalMs must be a finite number > 0");
49
+ }
50
+ this.flushTimer = setInterval(() => {
51
+ void this.flush();
52
+ }, intervalMs);
53
+ if (this.flushTimer && typeof this.flushTimer.unref === "function") {
54
+ this.flushTimer.unref();
55
+ }
56
+ }
57
+ this.boundBeforeExit = () => {
58
+ void this.shutdown();
59
+ };
60
+ if (this.apiKey && typeof process !== "undefined" && typeof process.on === "function") {
61
+ process.on("beforeExit", this.boundBeforeExit);
62
+ }
63
+ }
64
+ async cost(model, inputOrText, outputOrText) {
65
+ try {
66
+ let inputTokens;
67
+ let outputTokens;
68
+ let estimated = false;
69
+ if (typeof inputOrText === "string") {
70
+ if (outputOrText !== void 0 && typeof outputOrText !== "string") {
71
+ this.reportError({ message: "outputText must be a string when using text-based estimation" });
72
+ return null;
73
+ }
74
+ const tokenizer = await this.loadTokenizer();
75
+ if (!tokenizer) return null;
76
+ inputTokens = tokenizer(inputOrText, model);
77
+ outputTokens = typeof outputOrText === "string" ? tokenizer(outputOrText, model) : 0;
78
+ estimated = true;
79
+ } else if (typeof inputOrText === "number") {
80
+ if (typeof outputOrText !== "number") {
81
+ this.reportError({ message: "outputTokens is required when using token counts" });
82
+ return null;
83
+ }
84
+ if (!Number.isInteger(inputOrText) || !Number.isInteger(outputOrText) || inputOrText < 0 || outputOrText < 0) {
85
+ this.reportError({ message: "Token counts must be non-negative integers" });
86
+ return null;
87
+ }
88
+ inputTokens = inputOrText;
89
+ outputTokens = outputOrText;
90
+ } else {
91
+ this.reportError({ message: "Invalid arguments: pass (model, inputTokens, outputTokens) or (model, inputText, outputText?)" });
92
+ return null;
93
+ }
94
+ await this.ensurePricing();
95
+ const pricing = this.pricingCache?.get(model);
96
+ if (!pricing) return null;
97
+ const inputCost = inputTokens * pricing.inputPricePer1M / 1e6;
98
+ const outputCost = outputTokens * pricing.outputPricePer1M / 1e6;
99
+ return { model, inputCost, outputCost, totalCost: inputCost + outputCost, inputTokens, outputTokens, estimated };
100
+ } catch (e) {
101
+ this.reportError({ message: e instanceof Error ? e.message : "Unexpected error in cost()" });
102
+ return null;
103
+ }
104
+ }
105
+ async loadTokenizer() {
106
+ try {
107
+ const { countTokens } = await import("./tokenizer.mjs");
108
+ return countTokens;
109
+ } catch {
110
+ this.reportError({ message: "js-tiktoken is required for text-based cost estimation. Install it with: npm install js-tiktoken" });
111
+ return null;
112
+ }
113
+ }
114
+ addUsage(usage) {
115
+ if (!this.requireApiKey("addUsage")) return;
116
+ if (this.pendingUsages.length >= MAX_PENDING_USAGES) {
117
+ this.log(`pendingUsages limit reached (${MAX_PENDING_USAGES}), dropping oldest entry`);
118
+ this.pendingUsages.shift();
119
+ }
120
+ this.pendingUsages.push(usage);
121
+ }
122
+ /**
123
+ * Enqueue an event for delivery. This method is synchronous and will
124
+ * never throw -- errors are silently swallowed so that tracking can
125
+ * never crash the host application.
126
+ *
127
+ * All usage entries previously added via {@link addUsage} are drained
128
+ * and attached to the event.
129
+ */
130
+ track(event) {
131
+ if (!this.requireApiKey("track")) return;
132
+ if (this.shutdownPromise !== null) return;
133
+ try {
134
+ const usages = this.pendingUsages;
135
+ this.pendingUsages = [];
136
+ const wire = this.toWireEvent(event, usages);
137
+ this.enqueue(wire);
138
+ this.log(`event enqueued (queue: ${this.queue.length})`);
139
+ } catch (err) {
140
+ this.reportError({ message: "Failed to enqueue event", cause: err });
141
+ }
142
+ }
143
+ /**
144
+ * Flush all queued events to the API immediately.
145
+ */
146
+ async flush() {
147
+ if (!this.apiKey) return;
148
+ const batches = this.drain();
149
+ if (batches.length === 0) return;
150
+ const eventCount = batches.reduce((sum, b) => sum + b.length, 0);
151
+ this.log(`flushing ${eventCount} ${eventCount === 1 ? "event" : "events"} in ${batches.length} ${batches.length === 1 ? "batch" : "batches"}`);
152
+ await Promise.allSettled(batches.map((batch) => this.sendBatch(batch)));
153
+ }
154
+ /**
155
+ * Flush remaining events and stop the background timer.
156
+ * Call this before your process exits.
157
+ */
158
+ async shutdown() {
159
+ if (!this.apiKey) return;
160
+ if (this.shutdownPromise !== null) return this.shutdownPromise;
161
+ this.shutdownPromise = (async () => {
162
+ if (typeof process !== "undefined" && typeof process.removeListener === "function") {
163
+ process.removeListener("beforeExit", this.boundBeforeExit);
164
+ }
165
+ if (this.flushTimer !== null) {
166
+ clearInterval(this.flushTimer);
167
+ this.flushTimer = null;
168
+ }
169
+ await this.flush();
170
+ })();
171
+ return this.shutdownPromise;
172
+ }
173
+ // ---------------------------------------------------------------------------
174
+ // API key guard
175
+ // ---------------------------------------------------------------------------
176
+ requireApiKey(method) {
177
+ if (this.apiKey) return true;
178
+ if (!this.apiKeyWarned) {
179
+ this.reportError({ message: `apiKey required for ${method} \u2014 calls will be skipped` });
180
+ this.apiKeyWarned = true;
181
+ }
182
+ return false;
183
+ }
184
+ // ---------------------------------------------------------------------------
185
+ // Pricing
186
+ // ---------------------------------------------------------------------------
187
+ async ensurePricing() {
188
+ const now = Date.now();
189
+ if (this.pricingCache && now - this.pricingFetchedAt < _AiCostCalc.PRICING_CACHE_TTL_MS) {
190
+ return;
191
+ }
192
+ if (this.pricingFailedAt && now - this.pricingFailedAt < _AiCostCalc.PRICING_FAILURE_BACKOFF_MS) {
193
+ return;
194
+ }
195
+ if (this.pricingPromise) return this.pricingPromise;
196
+ this.pricingPromise = (async () => {
197
+ try {
198
+ const res = await fetch(`${this.baseUrl}/models`, {
199
+ headers: { "User-Agent": `ai-cost-calc-node/${SDK_VERSION}` },
200
+ signal: AbortSignal.timeout(HTTP_TIMEOUT_MS)
201
+ });
202
+ if (!res.ok) throw new Error(`status ${res.status}`);
203
+ const data = await res.json();
204
+ const map = /* @__PURE__ */ new Map();
205
+ if (Array.isArray(data.vendors)) {
206
+ for (const vendor of data.vendors) {
207
+ if (typeof vendor !== "object" || vendor === null) continue;
208
+ if (!Array.isArray(vendor.models)) continue;
209
+ for (const m of vendor.models) {
210
+ if (typeof m !== "object" || m === null) continue;
211
+ const input = m.input_price_per_1m;
212
+ const output = m.output_price_per_1m;
213
+ if (!m.slug || !Number.isFinite(input) || !Number.isFinite(output)) continue;
214
+ map.set(m.slug, {
215
+ slug: m.slug,
216
+ inputPricePer1M: input,
217
+ outputPricePer1M: output
218
+ });
219
+ }
220
+ }
221
+ }
222
+ this.pricingCache = map;
223
+ this.pricingFetchedAt = Date.now();
224
+ this.pricingFailedAt = 0;
225
+ this.log(`pricing loaded (${map.size} models)`);
226
+ } catch (err) {
227
+ this.pricingFailedAt = Date.now();
228
+ this.reportError({ message: "Failed to fetch pricing data", cause: err });
229
+ } finally {
230
+ this.pricingPromise = null;
231
+ }
232
+ })();
233
+ return this.pricingPromise;
234
+ }
235
+ // ---------------------------------------------------------------------------
236
+ // Queue
237
+ // ---------------------------------------------------------------------------
238
+ enqueue(event) {
239
+ if (this.queue.length >= MAX_QUEUE_SIZE) {
240
+ this.queue.shift();
241
+ this.log(`queue full (${MAX_QUEUE_SIZE}), dropping oldest event`);
242
+ }
243
+ this.queue.push(event);
244
+ }
245
+ drain() {
246
+ if (this.queue.length === 0) return [];
247
+ const all = this.queue;
248
+ this.queue = [];
249
+ const batches = [];
250
+ for (let i = 0; i < all.length; i += BATCH_SIZE) {
251
+ batches.push(all.slice(i, i + BATCH_SIZE));
252
+ }
253
+ return batches;
254
+ }
255
+ // ---------------------------------------------------------------------------
256
+ // Serialization
257
+ // ---------------------------------------------------------------------------
258
+ toWireEvent(event, usages) {
259
+ const wire = {
260
+ customer_id: event.customerId,
261
+ revenue_amount_in_cents: event.revenueAmountInCents ?? null,
262
+ vendor_responses: usages.map((u) => ({
263
+ vendor_name: u.vendor,
264
+ ai_model_name: u.model,
265
+ input_tokens: u.inputTokens,
266
+ output_tokens: u.outputTokens
267
+ })),
268
+ unique_request_token: event.uniqueRequestToken ?? crypto.randomUUID(),
269
+ event_type: event.eventType ?? this.defaultEventType,
270
+ occurred_at: event.occurredAt ?? (/* @__PURE__ */ new Date()).toISOString()
271
+ };
272
+ return wire;
273
+ }
274
+ // ---------------------------------------------------------------------------
275
+ // HTTP
276
+ // ---------------------------------------------------------------------------
277
+ async fetchWithRetry(url, init) {
278
+ let lastError;
279
+ for (let attempt = 0; attempt <= this.maxRetries; attempt++) {
280
+ try {
281
+ const res = await fetch(url, init);
282
+ if (res.status >= 500 || res.status === 429) {
283
+ lastError = res;
284
+ } else {
285
+ return res;
286
+ }
287
+ } catch (err) {
288
+ lastError = err;
289
+ }
290
+ if (attempt < this.maxRetries) {
291
+ const status = lastError instanceof Response ? `status ${lastError.status}` : "network error";
292
+ this.log(`${status}, retrying (${attempt + 1}/${this.maxRetries})`);
293
+ const base = Math.min(1e3 * 2 ** attempt, MAX_BACKOFF_MS);
294
+ await new Promise((r) => setTimeout(r, base + Math.random() * base * 0.5));
295
+ }
296
+ }
297
+ throw lastError;
298
+ }
299
+ async sendBatch(events) {
300
+ try {
301
+ const response = await this.fetchWithRetry(`${this.baseUrl}/events`, {
302
+ method: "POST",
303
+ headers: {
304
+ "Content-Type": "application/json",
305
+ Authorization: `Bearer ${this.apiKey}`,
306
+ "User-Agent": `ai-cost-calc-node/${SDK_VERSION}`
307
+ },
308
+ body: JSON.stringify({ events }),
309
+ signal: AbortSignal.timeout(HTTP_TIMEOUT_MS)
310
+ });
311
+ this.log(`batch sent (status ${response.status}, ${events.length} ${events.length === 1 ? "event" : "events"})`);
312
+ if (!response.ok) {
313
+ let body = "";
314
+ try {
315
+ body = await response.text();
316
+ } catch {
317
+ }
318
+ this.reportError({ message: `Request failed with status ${response.status}: ${body}` });
319
+ }
320
+ } catch (err) {
321
+ const message = err instanceof Response ? `Request failed after retries (status ${err.status})` : "Request failed after retries";
322
+ this.reportError({ message, cause: err });
323
+ }
324
+ }
325
+ reportError(error) {
326
+ if (this.debug) console.error(`[ai-cost-calc] ${error.message}`);
327
+ if (!this.onError) return;
328
+ try {
329
+ this.onError(error);
330
+ } catch {
331
+ }
332
+ }
333
+ log(msg) {
334
+ if (this.debug) console.debug(`[ai-cost-calc] ${msg}`);
335
+ }
336
+ };
337
+ export {
338
+ AiCostCalc
339
+ };
@@ -0,0 +1,3 @@
1
+ declare function countTokens(text: string, model: string): number;
2
+
3
+ export { countTokens };
@@ -0,0 +1,3 @@
1
+ declare function countTokens(text: string, model: string): number;
2
+
3
+ export { countTokens };
@@ -0,0 +1,25 @@
1
+ "use strict";Object.defineProperty(exports, "__esModule", {value: true});// src/tokenizer.ts
2
+ var _jstiktoken = require('js-tiktoken');
3
+ var cache = /* @__PURE__ */ new Map();
4
+ var MAX_CACHE_SIZE = 128;
5
+ var MAX_TEXT_LENGTH = 1e6;
6
+ function countTokens(text, model) {
7
+ if (text.length > MAX_TEXT_LENGTH) throw new Error("Text exceeds 1MB limit");
8
+ let enc = cache.get(model);
9
+ if (!enc) {
10
+ if (cache.size >= MAX_CACHE_SIZE) {
11
+ const firstKey = cache.keys().next().value;
12
+ cache.delete(firstKey);
13
+ }
14
+ try {
15
+ enc = _jstiktoken.encodingForModel.call(void 0, model);
16
+ } catch (e) {
17
+ enc = _jstiktoken.getEncoding.call(void 0, "cl100k_base");
18
+ }
19
+ cache.set(model, enc);
20
+ }
21
+ return enc.encode(text).length;
22
+ }
23
+
24
+
25
+ exports.countTokens = countTokens;
@@ -0,0 +1,25 @@
1
+ // src/tokenizer.ts
2
+ import { encodingForModel, getEncoding } from "js-tiktoken";
3
+ var cache = /* @__PURE__ */ new Map();
4
+ var MAX_CACHE_SIZE = 128;
5
+ var MAX_TEXT_LENGTH = 1e6;
6
+ function countTokens(text, model) {
7
+ if (text.length > MAX_TEXT_LENGTH) throw new Error("Text exceeds 1MB limit");
8
+ let enc = cache.get(model);
9
+ if (!enc) {
10
+ if (cache.size >= MAX_CACHE_SIZE) {
11
+ const firstKey = cache.keys().next().value;
12
+ cache.delete(firstKey);
13
+ }
14
+ try {
15
+ enc = encodingForModel(model);
16
+ } catch {
17
+ enc = getEncoding("cl100k_base");
18
+ }
19
+ cache.set(model, enc);
20
+ }
21
+ return enc.encode(text).length;
22
+ }
23
+ export {
24
+ countTokens
25
+ };
package/package.json ADDED
@@ -0,0 +1,54 @@
1
+ {
2
+ "name": "ai-cost-calc",
3
+ "version": "1.3.5",
4
+ "description": "AI cost calculator and usage tracker. Calculate LLM API costs for 400+ models (OpenAI, Anthropic, Google). No API key required for cost lookups.",
5
+ "main": "./dist/index.js",
6
+ "module": "./dist/index.mjs",
7
+ "types": "./dist/index.d.ts",
8
+ "exports": {
9
+ ".": {
10
+ "types": "./dist/index.d.ts",
11
+ "import": "./dist/index.mjs",
12
+ "require": "./dist/index.js"
13
+ }
14
+ },
15
+ "files": [
16
+ "dist"
17
+ ],
18
+ "engines": {
19
+ "node": ">=18"
20
+ },
21
+ "scripts": {
22
+ "build": "tsup",
23
+ "test": "tsx --test tests/client.test.ts",
24
+ "typecheck": "tsc --noEmit",
25
+ "clean": "rm -rf dist"
26
+ },
27
+ "keywords": [
28
+ "ai-cost",
29
+ "llm-cost",
30
+ "llm-pricing",
31
+ "token-cost",
32
+ "openai-cost",
33
+ "openai-pricing",
34
+ "anthropic-pricing",
35
+ "ai-usage",
36
+ "cost-tracker",
37
+ "cost-calculator",
38
+ "model-pricing",
39
+ "token-pricing",
40
+ "ai-billing",
41
+ "llm-usage",
42
+ "api-cost"
43
+ ],
44
+ "license": "MIT",
45
+ "optionalDependencies": {
46
+ "js-tiktoken": "^1.0.0"
47
+ },
48
+ "devDependencies": {
49
+ "@types/node": "^25.1.0",
50
+ "tsup": "^8.0.0",
51
+ "tsx": "^4.21.0",
52
+ "typescript": "^5.4.0"
53
+ }
54
+ }