mcp-cost-tracker 1.0.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/LICENSE +21 -0
- package/README.md +99 -0
- package/dist/dashboard/generator.d.ts +54 -0
- package/dist/dashboard/generator.d.ts.map +1 -0
- package/dist/dashboard/generator.js +577 -0
- package/dist/dashboard/generator.js.map +1 -0
- package/dist/index.d.ts +12 -0
- package/dist/index.d.ts.map +1 -0
- package/dist/index.js +60 -0
- package/dist/index.js.map +1 -0
- package/dist/pricing/models.d.ts +48 -0
- package/dist/pricing/models.d.ts.map +1 -0
- package/dist/pricing/models.js +207 -0
- package/dist/pricing/models.js.map +1 -0
- package/dist/storage/database.d.ts +129 -0
- package/dist/storage/database.d.ts.map +1 -0
- package/dist/storage/database.js +374 -0
- package/dist/storage/database.js.map +1 -0
- package/dist/tools/index.d.ts +4 -0
- package/dist/tools/index.d.ts.map +1 -0
- package/dist/tools/index.js +660 -0
- package/dist/tools/index.js.map +1 -0
- package/dist/tools/prompts.d.ts +3 -0
- package/dist/tools/prompts.d.ts.map +1 -0
- package/dist/tools/prompts.js +111 -0
- package/dist/tools/prompts.js.map +1 -0
- package/dist/tools/resources.d.ts +4 -0
- package/dist/tools/resources.d.ts.map +1 -0
- package/dist/tools/resources.js +138 -0
- package/dist/tools/resources.js.map +1 -0
- package/dist/utils/helpers.d.ts +29 -0
- package/dist/utils/helpers.d.ts.map +1 -0
- package/dist/utils/helpers.js +81 -0
- package/dist/utils/helpers.js.map +1 -0
- package/package.json +52 -0
- package/src/dashboard/generator.ts +628 -0
- package/src/index.ts +73 -0
- package/src/pricing/models.ts +246 -0
- package/src/storage/database.ts +525 -0
- package/src/tools/index.ts +780 -0
- package/src/tools/prompts.ts +124 -0
- package/src/tools/resources.ts +171 -0
- package/src/utils/helpers.ts +71 -0
- package/tsconfig.json +20 -0
|
@@ -0,0 +1,246 @@
|
|
|
1
|
+
/**
|
|
2
|
+
* LLM Model Pricing Database
|
|
3
|
+
* Prices are in USD per 1 million tokens
|
|
4
|
+
* Last updated: February 2026
|
|
5
|
+
*/
|
|
6
|
+
|
|
7
|
+
export interface ModelPricing {
|
|
8
|
+
provider: string;
|
|
9
|
+
model: string;
|
|
10
|
+
inputPricePerMTok: number;
|
|
11
|
+
outputPricePerMTok: number;
|
|
12
|
+
cachedInputPricePerMTok?: number;
|
|
13
|
+
contextWindow?: number;
|
|
14
|
+
category: 'chat' | 'reasoning' | 'embedding' | 'image' | 'audio' | 'code';
|
|
15
|
+
notes?: string;
|
|
16
|
+
}
|
|
17
|
+
|
|
18
|
+
/**
|
|
19
|
+
* Comprehensive pricing database for all major LLM providers.
|
|
20
|
+
* Prices are per 1 million tokens (MTok) in USD.
|
|
21
|
+
*/
|
|
22
|
+
export const MODEL_PRICING: ModelPricing[] = [
|
|
23
|
+
// ============================================================
|
|
24
|
+
// OpenAI Models
|
|
25
|
+
// ============================================================
|
|
26
|
+
// GPT-4.1 Series
|
|
27
|
+
{ provider: 'openai', model: 'gpt-4.1', inputPricePerMTok: 2.00, outputPricePerMTok: 8.00, cachedInputPricePerMTok: 0.50, contextWindow: 1047576, category: 'chat' },
|
|
28
|
+
{ provider: 'openai', model: 'gpt-4.1-mini', inputPricePerMTok: 0.40, outputPricePerMTok: 1.60, cachedInputPricePerMTok: 0.10, contextWindow: 1047576, category: 'chat' },
|
|
29
|
+
{ provider: 'openai', model: 'gpt-4.1-nano', inputPricePerMTok: 0.10, outputPricePerMTok: 0.40, cachedInputPricePerMTok: 0.025, contextWindow: 1047576, category: 'chat' },
|
|
30
|
+
|
|
31
|
+
// GPT-4o Series
|
|
32
|
+
{ provider: 'openai', model: 'gpt-4o', inputPricePerMTok: 2.50, outputPricePerMTok: 10.00, cachedInputPricePerMTok: 1.25, contextWindow: 128000, category: 'chat' },
|
|
33
|
+
{ provider: 'openai', model: 'gpt-4o-mini', inputPricePerMTok: 0.15, outputPricePerMTok: 0.60, cachedInputPricePerMTok: 0.075, contextWindow: 128000, category: 'chat' },
|
|
34
|
+
|
|
35
|
+
// GPT-5 Series
|
|
36
|
+
{ provider: 'openai', model: 'gpt-5', inputPricePerMTok: 10.00, outputPricePerMTok: 30.00, cachedInputPricePerMTok: 2.50, contextWindow: 128000, category: 'chat' },
|
|
37
|
+
{ provider: 'openai', model: 'gpt-5-mini', inputPricePerMTok: 1.50, outputPricePerMTok: 6.00, cachedInputPricePerMTok: 0.375, contextWindow: 128000, category: 'chat' },
|
|
38
|
+
{ provider: 'openai', model: 'gpt-5-nano', inputPricePerMTok: 0.05, outputPricePerMTok: 0.40, contextWindow: 128000, category: 'chat' },
|
|
39
|
+
|
|
40
|
+
// O-Series (Reasoning)
|
|
41
|
+
{ provider: 'openai', model: 'o1', inputPricePerMTok: 15.00, outputPricePerMTok: 60.00, cachedInputPricePerMTok: 7.50, contextWindow: 200000, category: 'reasoning' },
|
|
42
|
+
{ provider: 'openai', model: 'o1-mini', inputPricePerMTok: 1.10, outputPricePerMTok: 4.40, cachedInputPricePerMTok: 0.55, contextWindow: 128000, category: 'reasoning' },
|
|
43
|
+
{ provider: 'openai', model: 'o1-pro', inputPricePerMTok: 150.00, outputPricePerMTok: 600.00, contextWindow: 200000, category: 'reasoning' },
|
|
44
|
+
{ provider: 'openai', model: 'o3', inputPricePerMTok: 2.00, outputPricePerMTok: 8.00, cachedInputPricePerMTok: 0.50, contextWindow: 200000, category: 'reasoning' },
|
|
45
|
+
{ provider: 'openai', model: 'o3-mini', inputPricePerMTok: 1.10, outputPricePerMTok: 4.40, cachedInputPricePerMTok: 0.55, contextWindow: 200000, category: 'reasoning' },
|
|
46
|
+
{ provider: 'openai', model: 'o4-mini', inputPricePerMTok: 1.10, outputPricePerMTok: 4.40, cachedInputPricePerMTok: 0.55, contextWindow: 200000, category: 'reasoning' },
|
|
47
|
+
|
|
48
|
+
// Legacy GPT-4
|
|
49
|
+
{ provider: 'openai', model: 'gpt-4-turbo', inputPricePerMTok: 10.00, outputPricePerMTok: 30.00, contextWindow: 128000, category: 'chat' },
|
|
50
|
+
{ provider: 'openai', model: 'gpt-4', inputPricePerMTok: 30.00, outputPricePerMTok: 60.00, contextWindow: 8192, category: 'chat' },
|
|
51
|
+
{ provider: 'openai', model: 'gpt-3.5-turbo', inputPricePerMTok: 0.50, outputPricePerMTok: 1.50, contextWindow: 16385, category: 'chat' },
|
|
52
|
+
|
|
53
|
+
// Embeddings
|
|
54
|
+
{ provider: 'openai', model: 'text-embedding-3-large', inputPricePerMTok: 0.13, outputPricePerMTok: 0, contextWindow: 8191, category: 'embedding' },
|
|
55
|
+
{ provider: 'openai', model: 'text-embedding-3-small', inputPricePerMTok: 0.02, outputPricePerMTok: 0, contextWindow: 8191, category: 'embedding' },
|
|
56
|
+
{ provider: 'openai', model: 'text-embedding-ada-002', inputPricePerMTok: 0.10, outputPricePerMTok: 0, contextWindow: 8191, category: 'embedding' },
|
|
57
|
+
|
|
58
|
+
// ============================================================
|
|
59
|
+
// Anthropic Models
|
|
60
|
+
// ============================================================
|
|
61
|
+
{ provider: 'anthropic', model: 'claude-opus-4', inputPricePerMTok: 15.00, outputPricePerMTok: 75.00, cachedInputPricePerMTok: 1.875, contextWindow: 200000, category: 'chat' },
|
|
62
|
+
{ provider: 'anthropic', model: 'claude-sonnet-4', inputPricePerMTok: 3.00, outputPricePerMTok: 15.00, cachedInputPricePerMTok: 0.375, contextWindow: 200000, category: 'chat' },
|
|
63
|
+
{ provider: 'anthropic', model: 'claude-sonnet-4.5', inputPricePerMTok: 3.00, outputPricePerMTok: 15.00, cachedInputPricePerMTok: 0.375, contextWindow: 200000, category: 'chat' },
|
|
64
|
+
{ provider: 'anthropic', model: 'claude-sonnet-4-5', inputPricePerMTok: 3.00, outputPricePerMTok: 15.00, cachedInputPricePerMTok: 0.375, contextWindow: 200000, category: 'chat' },
|
|
65
|
+
{ provider: 'anthropic', model: 'claude-3-5-sonnet-20241022', inputPricePerMTok: 3.00, outputPricePerMTok: 15.00, cachedInputPricePerMTok: 0.375, contextWindow: 200000, category: 'chat' },
|
|
66
|
+
{ provider: 'anthropic', model: 'claude-3-5-sonnet-latest', inputPricePerMTok: 3.00, outputPricePerMTok: 15.00, cachedInputPricePerMTok: 0.375, contextWindow: 200000, category: 'chat' },
|
|
67
|
+
{ provider: 'anthropic', model: 'claude-3-5-haiku-20241022', inputPricePerMTok: 0.80, outputPricePerMTok: 4.00, cachedInputPricePerMTok: 0.08, contextWindow: 200000, category: 'chat' },
|
|
68
|
+
{ provider: 'anthropic', model: 'claude-3-haiku-20240307', inputPricePerMTok: 0.25, outputPricePerMTok: 1.25, cachedInputPricePerMTok: 0.03, contextWindow: 200000, category: 'chat' },
|
|
69
|
+
{ provider: 'anthropic', model: 'claude-3-opus-20240229', inputPricePerMTok: 15.00, outputPricePerMTok: 75.00, cachedInputPricePerMTok: 1.875, contextWindow: 200000, category: 'chat' },
|
|
70
|
+
{ provider: 'anthropic', model: 'claude-3-sonnet-20240229', inputPricePerMTok: 3.00, outputPricePerMTok: 15.00, contextWindow: 200000, category: 'chat' },
|
|
71
|
+
|
|
72
|
+
// Anthropic aliases
|
|
73
|
+
{ provider: 'anthropic', model: 'claude-3.5-sonnet', inputPricePerMTok: 3.00, outputPricePerMTok: 15.00, cachedInputPricePerMTok: 0.375, contextWindow: 200000, category: 'chat' },
|
|
74
|
+
{ provider: 'anthropic', model: 'claude-3.5-haiku', inputPricePerMTok: 0.80, outputPricePerMTok: 4.00, cachedInputPricePerMTok: 0.08, contextWindow: 200000, category: 'chat' },
|
|
75
|
+
{ provider: 'anthropic', model: 'claude-3-opus', inputPricePerMTok: 15.00, outputPricePerMTok: 75.00, cachedInputPricePerMTok: 1.875, contextWindow: 200000, category: 'chat' },
|
|
76
|
+
{ provider: 'anthropic', model: 'claude-3-haiku', inputPricePerMTok: 0.25, outputPricePerMTok: 1.25, cachedInputPricePerMTok: 0.03, contextWindow: 200000, category: 'chat' },
|
|
77
|
+
|
|
78
|
+
// ============================================================
|
|
79
|
+
// Google Models
|
|
80
|
+
// ============================================================
|
|
81
|
+
{ provider: 'google', model: 'gemini-2.5-pro', inputPricePerMTok: 1.25, outputPricePerMTok: 10.00, contextWindow: 1048576, category: 'chat', notes: 'Price for ≤200k tokens' },
|
|
82
|
+
{ provider: 'google', model: 'gemini-2.5-flash', inputPricePerMTok: 0.15, outputPricePerMTok: 0.60, contextWindow: 1048576, category: 'chat' },
|
|
83
|
+
{ provider: 'google', model: 'gemini-2.0-flash', inputPricePerMTok: 0.10, outputPricePerMTok: 0.40, contextWindow: 1048576, category: 'chat' },
|
|
84
|
+
{ provider: 'google', model: 'gemini-2.0-flash-lite', inputPricePerMTok: 0.075, outputPricePerMTok: 0.30, contextWindow: 1048576, category: 'chat' },
|
|
85
|
+
{ provider: 'google', model: 'gemini-1.5-pro', inputPricePerMTok: 1.25, outputPricePerMTok: 5.00, contextWindow: 2097152, category: 'chat' },
|
|
86
|
+
{ provider: 'google', model: 'gemini-1.5-flash', inputPricePerMTok: 0.075, outputPricePerMTok: 0.30, contextWindow: 1048576, category: 'chat' },
|
|
87
|
+
{ provider: 'google', model: 'gemini-1.0-pro', inputPricePerMTok: 0.50, outputPricePerMTok: 1.50, contextWindow: 32760, category: 'chat' },
|
|
88
|
+
|
|
89
|
+
// ============================================================
|
|
90
|
+
// DeepSeek Models
|
|
91
|
+
// ============================================================
|
|
92
|
+
{ provider: 'deepseek', model: 'deepseek-chat', inputPricePerMTok: 0.27, outputPricePerMTok: 1.10, cachedInputPricePerMTok: 0.07, contextWindow: 65536, category: 'chat' },
|
|
93
|
+
{ provider: 'deepseek', model: 'deepseek-v3', inputPricePerMTok: 0.27, outputPricePerMTok: 1.10, cachedInputPricePerMTok: 0.07, contextWindow: 65536, category: 'chat' },
|
|
94
|
+
{ provider: 'deepseek', model: 'deepseek-reasoner', inputPricePerMTok: 0.55, outputPricePerMTok: 2.19, cachedInputPricePerMTok: 0.14, contextWindow: 65536, category: 'reasoning' },
|
|
95
|
+
{ provider: 'deepseek', model: 'deepseek-r1', inputPricePerMTok: 0.55, outputPricePerMTok: 2.19, cachedInputPricePerMTok: 0.14, contextWindow: 65536, category: 'reasoning' },
|
|
96
|
+
|
|
97
|
+
// ============================================================
|
|
98
|
+
// Mistral Models
|
|
99
|
+
// ============================================================
|
|
100
|
+
{ provider: 'mistral', model: 'mistral-large-latest', inputPricePerMTok: 2.00, outputPricePerMTok: 6.00, contextWindow: 131072, category: 'chat' },
|
|
101
|
+
{ provider: 'mistral', model: 'mistral-medium-latest', inputPricePerMTok: 0.40, outputPricePerMTok: 2.00, contextWindow: 131072, category: 'chat' },
|
|
102
|
+
{ provider: 'mistral', model: 'mistral-small-latest', inputPricePerMTok: 0.10, outputPricePerMTok: 0.30, contextWindow: 131072, category: 'chat' },
|
|
103
|
+
{ provider: 'mistral', model: 'codestral-latest', inputPricePerMTok: 0.30, outputPricePerMTok: 0.90, contextWindow: 262144, category: 'code' },
|
|
104
|
+
{ provider: 'mistral', model: 'pixtral-large-latest', inputPricePerMTok: 2.00, outputPricePerMTok: 6.00, contextWindow: 131072, category: 'chat' },
|
|
105
|
+
{ provider: 'mistral', model: 'mistral-nemo', inputPricePerMTok: 0.15, outputPricePerMTok: 0.15, contextWindow: 131072, category: 'chat' },
|
|
106
|
+
|
|
107
|
+
// ============================================================
|
|
108
|
+
// Meta Llama Models (via common providers)
|
|
109
|
+
// ============================================================
|
|
110
|
+
{ provider: 'meta', model: 'llama-3.3-70b', inputPricePerMTok: 0.18, outputPricePerMTok: 0.18, contextWindow: 131072, category: 'chat' },
|
|
111
|
+
{ provider: 'meta', model: 'llama-3.1-405b', inputPricePerMTok: 0.80, outputPricePerMTok: 0.80, contextWindow: 131072, category: 'chat' },
|
|
112
|
+
{ provider: 'meta', model: 'llama-3.1-70b', inputPricePerMTok: 0.18, outputPricePerMTok: 0.18, contextWindow: 131072, category: 'chat' },
|
|
113
|
+
{ provider: 'meta', model: 'llama-3.1-8b', inputPricePerMTok: 0.02, outputPricePerMTok: 0.05, contextWindow: 131072, category: 'chat' },
|
|
114
|
+
{ provider: 'meta', model: 'llama-4-scout', inputPricePerMTok: 0.15, outputPricePerMTok: 0.60, contextWindow: 512000, category: 'chat' },
|
|
115
|
+
{ provider: 'meta', model: 'llama-4-maverick', inputPricePerMTok: 0.30, outputPricePerMTok: 0.90, contextWindow: 1048576, category: 'chat' },
|
|
116
|
+
|
|
117
|
+
// ============================================================
|
|
118
|
+
// Cohere Models
|
|
119
|
+
// ============================================================
|
|
120
|
+
{ provider: 'cohere', model: 'command-r-plus', inputPricePerMTok: 2.50, outputPricePerMTok: 10.00, contextWindow: 128000, category: 'chat' },
|
|
121
|
+
{ provider: 'cohere', model: 'command-r', inputPricePerMTok: 0.15, outputPricePerMTok: 0.60, contextWindow: 128000, category: 'chat' },
|
|
122
|
+
{ provider: 'cohere', model: 'command-light', inputPricePerMTok: 0.30, outputPricePerMTok: 0.60, contextWindow: 4096, category: 'chat' },
|
|
123
|
+
|
|
124
|
+
// ============================================================
|
|
125
|
+
// xAI Models
|
|
126
|
+
// ============================================================
|
|
127
|
+
{ provider: 'xai', model: 'grok-3', inputPricePerMTok: 3.00, outputPricePerMTok: 15.00, contextWindow: 131072, category: 'chat' },
|
|
128
|
+
{ provider: 'xai', model: 'grok-3-mini', inputPricePerMTok: 0.30, outputPricePerMTok: 0.50, contextWindow: 131072, category: 'reasoning' },
|
|
129
|
+
{ provider: 'xai', model: 'grok-2', inputPricePerMTok: 2.00, outputPricePerMTok: 10.00, contextWindow: 131072, category: 'chat' },
|
|
130
|
+
];
|
|
131
|
+
|
|
132
|
+
/**
|
|
133
|
+
* Build a lookup map for fast model price resolution.
|
|
134
|
+
* Supports fuzzy matching by normalizing model names.
|
|
135
|
+
*/
|
|
136
|
+
const pricingMap = new Map<string, ModelPricing>();
|
|
137
|
+
|
|
138
|
+
function normalizeModelName(name: string): string {
|
|
139
|
+
return name
|
|
140
|
+
.toLowerCase()
|
|
141
|
+
.replace(/[_\s]/g, '-')
|
|
142
|
+
.replace(/^(models\/|ft:|accounts\/[^/]+\/models\/)/, '')
|
|
143
|
+
.trim();
|
|
144
|
+
}
|
|
145
|
+
|
|
146
|
+
for (const pricing of MODEL_PRICING) {
|
|
147
|
+
pricingMap.set(normalizeModelName(pricing.model), pricing);
|
|
148
|
+
// Also index with provider prefix
|
|
149
|
+
pricingMap.set(`${pricing.provider}/${normalizeModelName(pricing.model)}`, pricing);
|
|
150
|
+
}
|
|
151
|
+
|
|
152
|
+
/**
|
|
153
|
+
* Look up pricing for a given model name.
|
|
154
|
+
* Supports exact match, provider-prefixed match, and fuzzy partial match.
|
|
155
|
+
*/
|
|
156
|
+
export function lookupPricing(model: string, provider?: string): ModelPricing | null {
|
|
157
|
+
const normalized = normalizeModelName(model);
|
|
158
|
+
|
|
159
|
+
// 1. Try exact match with provider prefix
|
|
160
|
+
if (provider) {
|
|
161
|
+
const key = `${provider.toLowerCase()}/${normalized}`;
|
|
162
|
+
const exact = pricingMap.get(key);
|
|
163
|
+
if (exact) return exact;
|
|
164
|
+
}
|
|
165
|
+
|
|
166
|
+
// 2. Try exact match without provider
|
|
167
|
+
const exact = pricingMap.get(normalized);
|
|
168
|
+
if (exact) return exact;
|
|
169
|
+
|
|
170
|
+
// 3. Fuzzy match: find the best partial match
|
|
171
|
+
let bestMatch: ModelPricing | null = null;
|
|
172
|
+
let bestScore = 0;
|
|
173
|
+
|
|
174
|
+
for (const pricing of MODEL_PRICING) {
|
|
175
|
+
const pricingNorm = normalizeModelName(pricing.model);
|
|
176
|
+
|
|
177
|
+
// Check if the query contains the model name or vice versa
|
|
178
|
+
if (normalized.includes(pricingNorm) || pricingNorm.includes(normalized)) {
|
|
179
|
+
const score = pricingNorm.length;
|
|
180
|
+
// If provider matches, boost score
|
|
181
|
+
const providerBoost = provider && pricing.provider === provider.toLowerCase() ? 1000 : 0;
|
|
182
|
+
if (score + providerBoost > bestScore) {
|
|
183
|
+
bestScore = score + providerBoost;
|
|
184
|
+
bestMatch = pricing;
|
|
185
|
+
}
|
|
186
|
+
}
|
|
187
|
+
}
|
|
188
|
+
|
|
189
|
+
return bestMatch;
|
|
190
|
+
}
|
|
191
|
+
|
|
192
|
+
/**
|
|
193
|
+
* Calculate cost for a given number of tokens.
|
|
194
|
+
*/
|
|
195
|
+
export function calculateCost(
|
|
196
|
+
model: string,
|
|
197
|
+
inputTokens: number,
|
|
198
|
+
outputTokens: number,
|
|
199
|
+
provider?: string,
|
|
200
|
+
cachedInputTokens?: number
|
|
201
|
+
): { cost: number; inputCost: number; outputCost: number; cachedInputCost: number; pricing: ModelPricing | null } {
|
|
202
|
+
const pricing = lookupPricing(model, provider);
|
|
203
|
+
|
|
204
|
+
if (!pricing) {
|
|
205
|
+
return { cost: 0, inputCost: 0, outputCost: 0, cachedInputCost: 0, pricing: null };
|
|
206
|
+
}
|
|
207
|
+
|
|
208
|
+
const effectiveInputTokens = cachedInputTokens
|
|
209
|
+
? inputTokens - cachedInputTokens
|
|
210
|
+
: inputTokens;
|
|
211
|
+
|
|
212
|
+
const inputCost = (effectiveInputTokens / 1_000_000) * pricing.inputPricePerMTok;
|
|
213
|
+
const outputCost = (outputTokens / 1_000_000) * pricing.outputPricePerMTok;
|
|
214
|
+
const cachedInputCost = cachedInputTokens && pricing.cachedInputPricePerMTok
|
|
215
|
+
? (cachedInputTokens / 1_000_000) * pricing.cachedInputPricePerMTok
|
|
216
|
+
: 0;
|
|
217
|
+
|
|
218
|
+
const cost = inputCost + outputCost + cachedInputCost;
|
|
219
|
+
|
|
220
|
+
return { cost, inputCost, outputCost, cachedInputCost, pricing };
|
|
221
|
+
}
|
|
222
|
+
|
|
223
|
+
/**
|
|
224
|
+
* Get all supported providers.
|
|
225
|
+
*/
|
|
226
|
+
export function getSupportedProviders(): string[] {
|
|
227
|
+
const providers = new Set<string>();
|
|
228
|
+
for (const pricing of MODEL_PRICING) {
|
|
229
|
+
providers.add(pricing.provider);
|
|
230
|
+
}
|
|
231
|
+
return Array.from(providers).sort();
|
|
232
|
+
}
|
|
233
|
+
|
|
234
|
+
/**
|
|
235
|
+
* Get all models for a given provider.
|
|
236
|
+
*/
|
|
237
|
+
export function getModelsForProvider(provider: string): ModelPricing[] {
|
|
238
|
+
return MODEL_PRICING.filter(p => p.provider === provider.toLowerCase());
|
|
239
|
+
}
|
|
240
|
+
|
|
241
|
+
/**
|
|
242
|
+
* Get all available models.
|
|
243
|
+
*/
|
|
244
|
+
export function getAllModels(): ModelPricing[] {
|
|
245
|
+
return [...MODEL_PRICING];
|
|
246
|
+
}
|