ai-sdk-cost-calculator 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/LICENSE ADDED
@@ -0,0 +1,21 @@
1
+ MIT License
2
+
3
+ Copyright (c) 2026 agos
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
package/README.md ADDED
@@ -0,0 +1,317 @@
1
+ # ai-sdk-cost-calculator
2
+
3
+ Cost calculator for [Vercel AI SDK](https://sdk.vercel.ai/) token usage. Calculate API costs for OpenAI, Anthropic, Google, xAI (Grok), and DeepSeek with support for long context pricing, prompt caching, and reasoning tokens.
4
+
5
+ ## Features
6
+
7
+ - **Multi-provider support**: OpenAI, Anthropic, Google Gemini, xAI Grok, DeepSeek
8
+ - **AI SDK integration**: Works directly with `LanguageModelUsage` from AI SDK
9
+ - **Long context pricing**: Automatic tier-based pricing (e.g., Claude Sonnet 4.5 >200K tokens)
10
+ - **Prompt caching**: Separate pricing for cache reads and writes
11
+ - **Reasoning tokens**: Support for o1/o3/R1 reasoning model costs
12
+ - **Cost tracking**: Track cumulative costs across multiple requests
13
+ - **TypeScript**: Full type definitions included
14
+
15
+ ## Installation
16
+
17
+ ```bash
18
+ npm install ai-sdk-cost-calculator
19
+ ```
20
+
21
+ **Peer dependency**: Requires `ai` (Vercel AI SDK) version 6.0.0 or higher.
22
+
23
+ ```bash
24
+ npm install ai
25
+ ```
26
+
27
+ ## Quick Start
28
+
29
+ ```typescript
30
+ import { generateText } from "ai";
31
+ import { openai } from "@ai-sdk/openai";
32
+ import { calculateCost, formatCost } from "ai-sdk-cost-calculator";
33
+
34
+ const result = await generateText({
35
+ model: openai("gpt-4o"),
36
+ prompt: "Hello, world!",
37
+ });
38
+
39
+ const cost = calculateCost({
40
+ provider: "openai",
41
+ modelId: "gpt-4o",
42
+ usage: result.usage,
43
+ });
44
+
45
+ console.log(`Total cost: ${formatCost(cost.totalCost)}`);
46
+ // Output: Total cost: $0.000150
47
+ ```
48
+
49
+ ## API Reference
50
+
51
+ ### `calculateCost(options)`
52
+
53
+ Calculate the cost for a single API request.
54
+
55
+ ```typescript
56
+ import { calculateCost } from "ai-sdk-cost-calculator";
57
+
58
+ const cost = calculateCost({
59
+ provider: "anthropic",
60
+ modelId: "claude-sonnet-4-5",
61
+ usage: {
62
+ inputTokens: 1000,
63
+ outputTokens: 500,
64
+ inputTokenDetails: {
65
+ cacheReadTokens: 200,
66
+ cacheWriteTokens: 100,
67
+ },
68
+ outputTokenDetails: {
69
+ reasoningTokens: 0,
70
+ },
71
+ },
72
+ });
73
+
74
+ console.log(cost);
75
+ // {
76
+ // inputCost: 0.0024,
77
+ // outputCost: 0.0075,
78
+ // cacheReadCost: 0.00006,
79
+ // cacheWriteCost: 0.000375,
80
+ // reasoningCost: 0,
81
+ // totalCost: 0.010335,
82
+ // currency: "USD",
83
+ // isLongContext: false
84
+ // }
85
+ ```
86
+
87
+ #### Options
88
+
89
+ | Property | Type | Required | Description |
90
+ |----------|------|----------|-------------|
91
+ | `provider` | `string` | Yes | Provider name: `"openai"`, `"anthropic"`, `"google"`, `"xai"`, `"deepseek"` |
92
+ | `modelId` | `string` | Yes | Model identifier (e.g., `"gpt-4o"`, `"claude-sonnet-4-5"`) |
93
+ | `usage` | `LanguageModelUsage` | Yes | Token usage from AI SDK |
94
+ | `customPricing` | `ModelPricing` | No | Override default pricing |
95
+
96
+ #### Return Value: `CostBreakdown`
97
+
98
+ | Property | Type | Description |
99
+ |----------|------|-------------|
100
+ | `inputCost` | `number` | Cost for input tokens (excluding cache) |
101
+ | `outputCost` | `number` | Cost for output tokens (excluding reasoning) |
102
+ | `cacheReadCost` | `number` | Cost for cached input tokens |
103
+ | `cacheWriteCost` | `number` | Cost for writing to cache |
104
+ | `reasoningCost` | `number` | Cost for reasoning tokens |
105
+ | `totalCost` | `number` | Sum of all costs |
106
+ | `currency` | `"USD"` | Always USD |
107
+ | `isLongContext` | `boolean` | Whether long context pricing was applied |
108
+
109
+ ### `formatCost(cost, decimals?)`
110
+
111
+ Format a cost value as a currency string.
112
+
113
+ ```typescript
114
+ import { formatCost } from "ai-sdk-cost-calculator";
115
+
116
+ formatCost(0.0015); // "$0.001500"
117
+ formatCost(0.0015, 4); // "$0.0015"
118
+ ```
119
+
120
+ ### `formatCostBreakdown(breakdown, decimals?)`
121
+
122
+ Format a full cost breakdown as a multi-line string.
123
+
124
+ ```typescript
125
+ import { formatCostBreakdown } from "ai-sdk-cost-calculator";
126
+
127
+ const output = formatCostBreakdown(cost);
128
+ // Input: $0.002400
129
+ // Cache Read: $0.000060
130
+ // Cache Write: $0.000375
131
+ // Output: $0.007500
132
+ // Total: $0.010335
133
+ ```
134
+
135
+ ### `createCostTracker(options)`
136
+
137
+ Track cumulative costs across multiple requests.
138
+
139
+ ```typescript
140
+ import { createCostTracker } from "ai-sdk-cost-calculator";
141
+
142
+ const tracker = createCostTracker({
143
+ provider: "openai",
144
+ modelId: "gpt-4o",
145
+ });
146
+
147
+ // Add usage from multiple requests
148
+ tracker.addUsage(result1.usage);
149
+ tracker.addUsage(result2.usage);
150
+ tracker.addUsage(result3.usage);
151
+
152
+ console.log(`Requests: ${tracker.getRequestCount()}`);
153
+ console.log(`Total tokens: ${tracker.getTotalUsage().totalTokens}`);
154
+ console.log(`Total cost: ${formatCost(tracker.getTotalCost().totalCost)}`);
155
+
156
+ // Reset when needed
157
+ tracker.reset();
158
+ ```
159
+
160
+ ### `calculateStreamCost(streamResult, options)`
161
+
162
+ Calculate cost from a streaming response.
163
+
164
+ ```typescript
165
+ import { streamText } from "ai";
166
+ import { calculateStreamCost } from "ai-sdk-cost-calculator";
167
+
168
+ const stream = await streamText({
169
+ model: openai("gpt-4o"),
170
+ prompt: "Write a haiku",
171
+ });
172
+
173
+ const cost = await calculateStreamCost(stream, {
174
+ provider: "openai",
175
+ modelId: "gpt-4o",
176
+ onCost: (cost, usage) => {
177
+ console.log(`Stream completed: ${formatCost(cost.totalCost)}`);
178
+ },
179
+ });
180
+ ```
181
+
182
+ ### Pricing Data Access
183
+
184
+ Access raw pricing data for any provider or model.
185
+
186
+ ```typescript
187
+ import {
188
+ openaiPricing,
189
+ anthropicPricing,
190
+ googlePricing,
191
+ xaiPricing,
192
+ deepseekPricing,
193
+ getModelPricing,
194
+ getModelPricingByModelId,
195
+ getAllSupportedModels,
196
+ } from "ai-sdk-cost-calculator";
197
+
198
+ // Get pricing for a specific model
199
+ const pricing = getModelPricing("anthropic", "claude-sonnet-4-5");
200
+ console.log(pricing);
201
+ // {
202
+ // inputPer1MTokens: 3,
203
+ // outputPer1MTokens: 15,
204
+ // cacheReadPer1MTokens: 0.3,
205
+ // cacheWritePer1MTokens: 3.75,
206
+ // longContextThreshold: 200000,
207
+ // longContextInputPer1MTokens: 6,
208
+ // longContextOutputPer1MTokens: 22.5,
209
+ // ...
210
+ // }
211
+
212
+ // Get pricing by model ID only (searches all providers)
213
+ const pricing2 = getModelPricingByModelId("gpt-4o");
214
+
215
+ // List all supported models
216
+ const models = getAllSupportedModels();
217
+ // [{ provider: "openai", modelId: "gpt-4o" }, ...]
218
+ ```
219
+
220
+ ## Supported Models
221
+
222
+ ### OpenAI (23 models)
223
+ - GPT-4o, GPT-4o-mini
224
+ - GPT-4.1, GPT-4.1-mini, GPT-4.1-nano
225
+ - GPT-4 Turbo, GPT-4, GPT-4-32k
226
+ - GPT-3.5 Turbo
227
+ - o1, o1-mini, o1-pro, o1-preview
228
+ - o3, o3-mini, o3-pro
229
+ - o4-mini
230
+
231
+ ### Anthropic (23 models)
232
+ - Claude Opus 4.5, Sonnet 4.5, Haiku 4.5
233
+ - Claude Opus 4.1, Sonnet 4, Opus 4
234
+ - Claude 3.5 Sonnet, 3.5 Haiku
235
+ - Claude 3 Opus, 3 Sonnet, 3 Haiku
236
+ - All dated variants (e.g., `claude-3-5-sonnet-20241022`)
237
+
238
+ ### Google Gemini (17 models)
239
+ - Gemini 3 Pro, 3 Flash
240
+ - Gemini 2.5 Pro, 2.5 Flash, 2.5 Flash-Lite
241
+ - Gemini 2.0 Flash, 2.0 Flash-Lite
242
+ - Gemini 1.5 Pro, 1.5 Flash, 1.5 Flash-8B
243
+
244
+ ### xAI Grok (12 models)
245
+ - Grok 4, Grok 4-0709
246
+ - Grok 4 Fast (reasoning/non-reasoning)
247
+ - Grok 4.1 Fast (reasoning/non-reasoning)
248
+ - Grok Code Fast
249
+ - Grok 3, Grok 3-mini
250
+ - Grok 2, Grok 2 Vision
251
+
252
+ ### DeepSeek (10 models)
253
+ - DeepSeek Chat (V3, V3.2)
254
+ - DeepSeek Reasoner (R1)
255
+ - DeepSeek R1 Distill variants
256
+ - DeepSeek Coder
257
+
258
+ ## Long Context Pricing
259
+
260
+ Some models have tiered pricing based on input token count:
261
+
262
+ | Model | Threshold | Standard | Long Context |
263
+ |-------|-----------|----------|--------------|
264
+ | Claude Sonnet 4.5 | 200K | $3/$15 | $6/$22.50 |
265
+ | Gemini 2.5 Pro | 200K | $1.25/$10 | $2.50/$15 |
266
+ | Gemini 1.5 Pro | 128K | $1.25/$5 | $2.50/$10 |
267
+ | Grok 4 Fast | 128K | $0.20/$0.50 | $0.40/$1.00 |
268
+
269
+ The calculator automatically applies the correct pricing tier based on input token count.
270
+
271
+ ## Custom Pricing
272
+
273
+ Override default pricing for any model:
274
+
275
+ ```typescript
276
+ const cost = calculateCost({
277
+ provider: "openai",
278
+ modelId: "gpt-4o",
279
+ usage: result.usage,
280
+ customPricing: {
281
+ inputPer1MTokens: 2.0, // Custom rate
282
+ outputPer1MTokens: 8.0,
283
+ cacheReadPer1MTokens: 1.0,
284
+ },
285
+ });
286
+ ```
287
+
288
+ ## TypeScript Types
289
+
290
+ ```typescript
291
+ import type {
292
+ CostBreakdown,
293
+ CalculateCostOptions,
294
+ ModelPricing,
295
+ ProviderPricing,
296
+ Provider,
297
+ CostTracker,
298
+ CreateCostTrackerOptions,
299
+ StreamCostOptions,
300
+ } from "ai-sdk-cost-calculator";
301
+ ```
302
+
303
+ ## Pricing Sources
304
+
305
+ Pricing data is sourced from official provider documentation:
306
+
307
+ - [OpenAI API Pricing](https://openai.com/api/pricing/)
308
+ - [Anthropic Claude Pricing](https://www.anthropic.com/pricing)
309
+ - [Google Gemini API Pricing](https://ai.google.dev/gemini-api/docs/pricing)
310
+ - [xAI Grok API](https://docs.x.ai/docs/models)
311
+ - [DeepSeek API Pricing](https://api-docs.deepseek.com/quick_start/pricing)
312
+
313
+ **Note**: Prices may change. Please verify current pricing with official sources for production use.
314
+
315
+ ## License
316
+
317
+ MIT
@@ -0,0 +1,22 @@
1
+ import { LanguageModelUsage } from "ai";
2
+ import { type ModelPricing } from "./prices";
3
+ export interface CostBreakdown {
4
+ inputCost: number;
5
+ outputCost: number;
6
+ cacheReadCost: number;
7
+ cacheWriteCost: number;
8
+ reasoningCost: number;
9
+ totalCost: number;
10
+ currency: "USD";
11
+ isLongContext: boolean;
12
+ }
13
+ export interface CalculateCostOptions {
14
+ provider: string;
15
+ modelId: string;
16
+ usage: LanguageModelUsage;
17
+ customPricing?: ModelPricing;
18
+ }
19
+ export declare function calculateCost(options: CalculateCostOptions): CostBreakdown;
20
+ export declare function formatCost(cost: number, decimals?: number): string;
21
+ export declare function formatCostBreakdown(result: CostBreakdown, decimals?: number): string;
22
+ //# sourceMappingURL=calculator.d.ts.map
@@ -0,0 +1 @@
1
+ {"version":3,"file":"calculator.d.ts","sourceRoot":"","sources":["../src/calculator.ts"],"names":[],"mappings":"AAAA,OAAO,EAAE,kBAAkB,EAAE,MAAM,IAAI,CAAC;AACxC,OAAO,EAAmB,KAAK,YAAY,EAAE,MAAM,UAAU,CAAC;AAE9D,MAAM,WAAW,aAAa;IAC5B,SAAS,EAAE,MAAM,CAAC;IAClB,UAAU,EAAE,MAAM,CAAC;IACnB,aAAa,EAAE,MAAM,CAAC;IACtB,cAAc,EAAE,MAAM,CAAC;IACvB,aAAa,EAAE,MAAM,CAAC;IACtB,SAAS,EAAE,MAAM,CAAC;IAClB,QAAQ,EAAE,KAAK,CAAC;IAChB,aAAa,EAAE,OAAO,CAAC;CACxB;AAED,MAAM,WAAW,oBAAoB;IACnC,QAAQ,EAAE,MAAM,CAAC;IACjB,OAAO,EAAE,MAAM,CAAC;IAChB,KAAK,EAAE,kBAAkB,CAAC;IAC1B,aAAa,CAAC,EAAE,YAAY,CAAC;CAC9B;AAqDD,wBAAgB,aAAa,CAAC,OAAO,EAAE,oBAAoB,GAAG,aAAa,CAwD1E;AAMD,wBAAgB,UAAU,CAAC,IAAI,EAAE,MAAM,EAAE,QAAQ,GAAE,MAAU,GAAG,MAAM,CAErE;AAED,wBAAgB,mBAAmB,CACjC,MAAM,EAAE,aAAa,EACrB,QAAQ,GAAE,MAAU,GACnB,MAAM,CAwBR"}
@@ -0,0 +1,96 @@
1
+ import { getModelPricing } from "./prices";
2
+ function getEffectivePricing(pricing, inputTokens) {
3
+ const threshold = pricing.longContextThreshold ?? 200000;
4
+ const isLongContext = inputTokens > threshold &&
5
+ pricing.longContextInputPer1MTokens !== undefined;
6
+ if (isLongContext) {
7
+ return {
8
+ inputPer1MTokens: pricing.longContextInputPer1MTokens,
9
+ outputPer1MTokens: pricing.longContextOutputPer1MTokens ?? pricing.outputPer1MTokens,
10
+ cacheReadPer1MTokens: pricing.longContextCacheReadPer1MTokens ??
11
+ pricing.cacheReadPer1MTokens ??
12
+ pricing.longContextInputPer1MTokens,
13
+ cacheWritePer1MTokens: pricing.longContextCacheWritePer1MTokens ??
14
+ pricing.cacheWritePer1MTokens ??
15
+ 0,
16
+ reasoningPer1MTokens: pricing.reasoningPer1MTokens ??
17
+ pricing.longContextOutputPer1MTokens ??
18
+ pricing.outputPer1MTokens,
19
+ isLongContext: true,
20
+ };
21
+ }
22
+ return {
23
+ inputPer1MTokens: pricing.inputPer1MTokens,
24
+ outputPer1MTokens: pricing.outputPer1MTokens,
25
+ cacheReadPer1MTokens: pricing.cacheReadPer1MTokens ?? pricing.inputPer1MTokens,
26
+ cacheWritePer1MTokens: pricing.cacheWritePer1MTokens ?? 0,
27
+ reasoningPer1MTokens: pricing.reasoningPer1MTokens ?? pricing.outputPer1MTokens,
28
+ isLongContext: false,
29
+ };
30
+ }
31
+ export function calculateCost(options) {
32
+ const { provider, modelId, usage, customPricing } = options;
33
+ const pricing = customPricing ?? getModelPricing(provider, modelId);
34
+ if (!pricing) {
35
+ throw new Error(`Unknown model: ${provider}/${modelId}. Use customPricing option or add the model to prices.ts`);
36
+ }
37
+ // Extract token counts from details
38
+ const noCacheTokens = usage.inputTokenDetails?.noCacheTokens ?? 0;
39
+ const cacheReadTokens = usage.inputTokenDetails?.cacheReadTokens ?? 0;
40
+ const cacheWriteTokens = usage.inputTokenDetails?.cacheWriteTokens ?? 0;
41
+ const textTokens = usage.outputTokenDetails?.textTokens ?? 0;
42
+ const reasoningTokens = usage.outputTokenDetails?.reasoningTokens ?? 0;
43
+ // Total input tokens for long context threshold check
44
+ const totalInputTokens = usage.inputTokens ?? (noCacheTokens + cacheReadTokens);
45
+ // Get effective pricing based on input token count
46
+ const effectivePricing = getEffectivePricing(pricing, totalInputTokens);
47
+ const inputCost = (noCacheTokens / 1_000_000) * effectivePricing.inputPer1MTokens;
48
+ const outputCost = (textTokens / 1_000_000) * effectivePricing.outputPer1MTokens;
49
+ const cacheReadCost = (cacheReadTokens / 1_000_000) * effectivePricing.cacheReadPer1MTokens;
50
+ const cacheWriteCost = (cacheWriteTokens / 1_000_000) * effectivePricing.cacheWritePer1MTokens;
51
+ const reasoningCost = (reasoningTokens / 1_000_000) * effectivePricing.reasoningPer1MTokens;
52
+ const totalCost = inputCost +
53
+ outputCost +
54
+ cacheReadCost +
55
+ cacheWriteCost +
56
+ reasoningCost;
57
+ return {
58
+ inputCost: roundToMicroDollars(inputCost),
59
+ outputCost: roundToMicroDollars(outputCost),
60
+ cacheReadCost: roundToMicroDollars(cacheReadCost),
61
+ cacheWriteCost: roundToMicroDollars(cacheWriteCost),
62
+ reasoningCost: roundToMicroDollars(reasoningCost),
63
+ totalCost: roundToMicroDollars(totalCost),
64
+ currency: "USD",
65
+ isLongContext: effectivePricing.isLongContext,
66
+ };
67
+ }
68
+ function roundToMicroDollars(value) {
69
+ return Math.round(value * 1_000_000) / 1_000_000;
70
+ }
71
+ export function formatCost(cost, decimals = 6) {
72
+ return `$${cost.toFixed(decimals)}`;
73
+ }
74
+ export function formatCostBreakdown(result, decimals = 6) {
75
+ const lines = [];
76
+ if (result.isLongContext) {
77
+ lines.push(`(Long context pricing applied)`);
78
+ }
79
+ if (result.inputCost > 0) {
80
+ lines.push(`Input: ${formatCost(result.inputCost, decimals)}`);
81
+ }
82
+ if (result.cacheReadCost > 0) {
83
+ lines.push(`Cache Read: ${formatCost(result.cacheReadCost, decimals)}`);
84
+ }
85
+ if (result.cacheWriteCost > 0) {
86
+ lines.push(`Cache Write: ${formatCost(result.cacheWriteCost, decimals)}`);
87
+ }
88
+ if (result.outputCost > 0) {
89
+ lines.push(`Output: ${formatCost(result.outputCost, decimals)}`);
90
+ }
91
+ if (result.reasoningCost > 0) {
92
+ lines.push(`Reasoning: ${formatCost(result.reasoningCost, decimals)}`);
93
+ }
94
+ lines.push(`Total: ${formatCost(result.totalCost, decimals)}`);
95
+ return lines.join("\n");
96
+ }
@@ -0,0 +1,84 @@
1
+ import { LanguageModelUsage } from 'ai';
2
+
3
+ interface ModelPricing {
4
+ inputPer1MTokens: number;
5
+ outputPer1MTokens: number;
6
+ cacheReadPer1MTokens?: number;
7
+ cacheWritePer1MTokens?: number;
8
+ reasoningPer1MTokens?: number;
9
+ longContextThreshold?: number;
10
+ longContextInputPer1MTokens?: number;
11
+ longContextOutputPer1MTokens?: number;
12
+ longContextCacheReadPer1MTokens?: number;
13
+ longContextCacheWritePer1MTokens?: number;
14
+ }
15
+ interface ProviderPricing {
16
+ [modelId: string]: ModelPricing;
17
+ }
18
+
19
+ declare const openaiPricing: ProviderPricing;
20
+
21
+ declare const anthropicPricing: ProviderPricing;
22
+
23
+ declare const googlePricing: ProviderPricing;
24
+
25
+ declare const xaiPricing: ProviderPricing;
26
+
27
+ declare const deepseekPricing: ProviderPricing;
28
+
29
+ declare const allPricing: Record<string, ProviderPricing>;
30
+ declare const flatPricing: ProviderPricing;
31
+ declare function getModelPricing(provider: string, modelId: string): ModelPricing | undefined;
32
+ declare function getModelPricingByModelId(modelId: string): ModelPricing | undefined;
33
+ declare function getAllSupportedModels(): {
34
+ provider: string;
35
+ modelId: string;
36
+ }[];
37
+ type Provider = keyof typeof allPricing;
38
+
39
+ interface CostBreakdown {
40
+ inputCost: number;
41
+ outputCost: number;
42
+ cacheReadCost: number;
43
+ cacheWriteCost: number;
44
+ reasoningCost: number;
45
+ totalCost: number;
46
+ currency: "USD";
47
+ isLongContext: boolean;
48
+ }
49
+ interface CalculateCostOptions {
50
+ provider: string;
51
+ modelId: string;
52
+ usage: LanguageModelUsage;
53
+ customPricing?: ModelPricing;
54
+ }
55
+ declare function calculateCost(options: CalculateCostOptions): CostBreakdown;
56
+ declare function formatCost(cost: number, decimals?: number): string;
57
+ declare function formatCostBreakdown(result: CostBreakdown, decimals?: number): string;
58
+
59
+ interface CostTracker {
60
+ addUsage(usage: LanguageModelUsage): void;
61
+ getTotalUsage(): LanguageModelUsage;
62
+ getTotalCost(): CostBreakdown;
63
+ getRequestCount(): number;
64
+ reset(): void;
65
+ }
66
+ interface CreateCostTrackerOptions {
67
+ provider: string;
68
+ modelId: string;
69
+ customPricing?: ModelPricing;
70
+ }
71
+ declare function createCostTracker(options: CreateCostTrackerOptions): CostTracker;
72
+ interface StreamCostOptions {
73
+ provider: string;
74
+ modelId: string;
75
+ customPricing?: ModelPricing;
76
+ onCost?: (cost: CostBreakdown, usage: LanguageModelUsage) => void;
77
+ }
78
+ declare function calculateStreamCost<T extends {
79
+ usage: LanguageModelUsage;
80
+ }>(streamResult: T | Promise<T>, options: StreamCostOptions): Promise<CostBreakdown & {
81
+ usage: LanguageModelUsage;
82
+ }>;
83
+
84
+ export { type CalculateCostOptions, type CostBreakdown, type CostTracker, type CreateCostTrackerOptions, type ModelPricing, type Provider, type ProviderPricing, type StreamCostOptions, allPricing, anthropicPricing, calculateCost, calculateStreamCost, createCostTracker, deepseekPricing, flatPricing, formatCost, formatCostBreakdown, getAllSupportedModels, getModelPricing, getModelPricingByModelId, googlePricing, openaiPricing, xaiPricing };
@@ -0,0 +1,84 @@
1
+ import { LanguageModelUsage } from 'ai';
2
+
3
+ interface ModelPricing {
4
+ inputPer1MTokens: number;
5
+ outputPer1MTokens: number;
6
+ cacheReadPer1MTokens?: number;
7
+ cacheWritePer1MTokens?: number;
8
+ reasoningPer1MTokens?: number;
9
+ longContextThreshold?: number;
10
+ longContextInputPer1MTokens?: number;
11
+ longContextOutputPer1MTokens?: number;
12
+ longContextCacheReadPer1MTokens?: number;
13
+ longContextCacheWritePer1MTokens?: number;
14
+ }
15
+ interface ProviderPricing {
16
+ [modelId: string]: ModelPricing;
17
+ }
18
+
19
+ declare const openaiPricing: ProviderPricing;
20
+
21
+ declare const anthropicPricing: ProviderPricing;
22
+
23
+ declare const googlePricing: ProviderPricing;
24
+
25
+ declare const xaiPricing: ProviderPricing;
26
+
27
+ declare const deepseekPricing: ProviderPricing;
28
+
29
+ declare const allPricing: Record<string, ProviderPricing>;
30
+ declare const flatPricing: ProviderPricing;
31
+ declare function getModelPricing(provider: string, modelId: string): ModelPricing | undefined;
32
+ declare function getModelPricingByModelId(modelId: string): ModelPricing | undefined;
33
+ declare function getAllSupportedModels(): {
34
+ provider: string;
35
+ modelId: string;
36
+ }[];
37
+ type Provider = keyof typeof allPricing;
38
+
39
+ interface CostBreakdown {
40
+ inputCost: number;
41
+ outputCost: number;
42
+ cacheReadCost: number;
43
+ cacheWriteCost: number;
44
+ reasoningCost: number;
45
+ totalCost: number;
46
+ currency: "USD";
47
+ isLongContext: boolean;
48
+ }
49
+ interface CalculateCostOptions {
50
+ provider: string;
51
+ modelId: string;
52
+ usage: LanguageModelUsage;
53
+ customPricing?: ModelPricing;
54
+ }
55
+ declare function calculateCost(options: CalculateCostOptions): CostBreakdown;
56
+ declare function formatCost(cost: number, decimals?: number): string;
57
+ declare function formatCostBreakdown(result: CostBreakdown, decimals?: number): string;
58
+
59
+ interface CostTracker {
60
+ addUsage(usage: LanguageModelUsage): void;
61
+ getTotalUsage(): LanguageModelUsage;
62
+ getTotalCost(): CostBreakdown;
63
+ getRequestCount(): number;
64
+ reset(): void;
65
+ }
66
+ interface CreateCostTrackerOptions {
67
+ provider: string;
68
+ modelId: string;
69
+ customPricing?: ModelPricing;
70
+ }
71
+ declare function createCostTracker(options: CreateCostTrackerOptions): CostTracker;
72
+ interface StreamCostOptions {
73
+ provider: string;
74
+ modelId: string;
75
+ customPricing?: ModelPricing;
76
+ onCost?: (cost: CostBreakdown, usage: LanguageModelUsage) => void;
77
+ }
78
+ declare function calculateStreamCost<T extends {
79
+ usage: LanguageModelUsage;
80
+ }>(streamResult: T | Promise<T>, options: StreamCostOptions): Promise<CostBreakdown & {
81
+ usage: LanguageModelUsage;
82
+ }>;
83
+
84
+ export { type CalculateCostOptions, type CostBreakdown, type CostTracker, type CreateCostTrackerOptions, type ModelPricing, type Provider, type ProviderPricing, type StreamCostOptions, allPricing, anthropicPricing, calculateCost, calculateStreamCost, createCostTracker, deepseekPricing, flatPricing, formatCost, formatCostBreakdown, getAllSupportedModels, getModelPricing, getModelPricingByModelId, googlePricing, openaiPricing, xaiPricing };
@@ -0,0 +1 @@
1
+ {"version":3,"file":"index.d.ts","sourceRoot":"","sources":["../src/index.ts"],"names":[],"mappings":"AACA,OAAO,EACL,aAAa,EACb,UAAU,EACV,mBAAmB,EACnB,KAAK,aAAa,EAClB,KAAK,oBAAoB,GAC1B,MAAM,cAAc,CAAC;AAGtB,OAAO,EACL,iBAAiB,EACjB,mBAAmB,EACnB,KAAK,WAAW,EAChB,KAAK,wBAAwB,EAC7B,KAAK,iBAAiB,GACvB,MAAM,aAAa,CAAC;AAGrB,OAAO,EACL,eAAe,EACf,wBAAwB,EACxB,qBAAqB,EACrB,aAAa,EACb,gBAAgB,EAChB,aAAa,EACb,UAAU,EACV,eAAe,EACf,UAAU,EACV,WAAW,EACX,KAAK,YAAY,EACjB,KAAK,eAAe,EACpB,KAAK,QAAQ,GACd,MAAM,UAAU,CAAC"}