prompt-pricer 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.cjs ADDED
@@ -0,0 +1,494 @@
1
+ "use strict";
2
+ var __defProp = Object.defineProperty;
3
+ var __getOwnPropDesc = Object.getOwnPropertyDescriptor;
4
+ var __getOwnPropNames = Object.getOwnPropertyNames;
5
+ var __hasOwnProp = Object.prototype.hasOwnProperty;
6
+ var __export = (target, all) => {
7
+ for (var name in all)
8
+ __defProp(target, name, { get: all[name], enumerable: true });
9
+ };
10
+ var __copyProps = (to, from, except, desc) => {
11
+ if (from && typeof from === "object" || typeof from === "function") {
12
+ for (let key of __getOwnPropNames(from))
13
+ if (!__hasOwnProp.call(to, key) && key !== except)
14
+ __defProp(to, key, { get: () => from[key], enumerable: !(desc = __getOwnPropDesc(from, key)) || desc.enumerable });
15
+ }
16
+ return to;
17
+ };
18
+ var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: true }), mod);
19
+
20
+ // src/index.ts
21
+ var index_exports = {};
22
+ __export(index_exports, {
23
+ InvalidCalculateInputError: () => InvalidCalculateInputError,
24
+ InvalidTokenCountError: () => InvalidTokenCountError,
25
+ UnknownModelError: () => UnknownModelError,
26
+ calculate: () => calculate,
27
+ calculateBatch: () => calculateBatch,
28
+ compare: () => compare,
29
+ createStreamTracker: () => createStreamTracker,
30
+ estimateStreamCost: () => estimateStreamCost,
31
+ models: () => models
32
+ });
33
+ module.exports = __toCommonJS(index_exports);
34
+
35
+ // src/types.ts
36
+ function createModelId(id) {
37
+ return id;
38
+ }
39
+ var UnknownModelError = class extends Error {
40
+ constructor(modelId) {
41
+ super(`Unknown model: "${modelId}"`);
42
+ this.name = "UnknownModelError";
43
+ }
44
+ };
45
+ var InvalidTokenCountError = class extends Error {
46
+ constructor(field, value) {
47
+ super(`Invalid token count for "${field}": ${value} (must be >= 0)`);
48
+ this.name = "InvalidTokenCountError";
49
+ }
50
+ };
51
+ var InvalidCalculateInputError = class extends Error {
52
+ constructor(message) {
53
+ super(`Invalid input: ${message}`);
54
+ this.name = "InvalidCalculateInputError";
55
+ }
56
+ };
57
+
58
+ // src/models/anthropic.ts
59
+ var anthropicModels = [
60
+ {
61
+ id: createModelId("claude-3-5-sonnet"),
62
+ name: "Claude 3.5 Sonnet",
63
+ provider: "anthropic",
64
+ contextWindow: 2e5,
65
+ lastUpdated: "2026-03-01",
66
+ pricing: {
67
+ inputPer1M: 3,
68
+ outputPer1M: 15,
69
+ cachedInputPer1M: 3.75,
70
+ // 25% of standard
71
+ cachedReadPer1M: 0.3
72
+ // 2% of standard
73
+ },
74
+ notes: "Latest Claude 3.5 model, optimized for speed/cost balance. Supports prompt caching."
75
+ },
76
+ {
77
+ id: createModelId("claude-3-opus"),
78
+ name: "Claude 3 Opus",
79
+ provider: "anthropic",
80
+ contextWindow: 2e5,
81
+ lastUpdated: "2026-03-01",
82
+ pricing: {
83
+ inputPer1M: 15,
84
+ outputPer1M: 75,
85
+ cachedInputPer1M: 18.75,
86
+ cachedReadPer1M: 1.5
87
+ },
88
+ notes: "Most capable Claude 3 variant. Higher cost but best performance."
89
+ },
90
+ {
91
+ id: createModelId("claude-3-sonnet"),
92
+ name: "Claude 3 Sonnet",
93
+ provider: "anthropic",
94
+ contextWindow: 2e5,
95
+ lastUpdated: "2026-03-01",
96
+ pricing: {
97
+ inputPer1M: 3,
98
+ outputPer1M: 15,
99
+ cachedInputPer1M: 3.75,
100
+ cachedReadPer1M: 0.3
101
+ },
102
+ notes: "Balanced Claude 3 model. Use Claude 3.5 Sonnet for better speed/cost."
103
+ },
104
+ {
105
+ id: createModelId("claude-3-haiku"),
106
+ name: "Claude 3 Haiku",
107
+ provider: "anthropic",
108
+ contextWindow: 2e5,
109
+ lastUpdated: "2026-03-01",
110
+ pricing: {
111
+ inputPer1M: 0.25,
112
+ outputPer1M: 1.25,
113
+ cachedInputPer1M: 0.3125,
114
+ cachedReadPer1M: 0.025
115
+ },
116
+ notes: "Fastest and most compact Claude model. Best for simple tasks."
117
+ }
118
+ ];
119
+
120
+ // src/models/openai.ts
121
+ var openaiModels = [
122
+ {
123
+ id: createModelId("gpt-4o"),
124
+ name: "GPT-4o",
125
+ provider: "openai",
126
+ contextWindow: 128e3,
127
+ lastUpdated: "2026-03-15",
128
+ pricing: {
129
+ inputPer1M: 5,
130
+ outputPer1M: 15,
131
+ batchInputPer1M: 2.5,
132
+ batchOutputPer1M: 7.5
133
+ },
134
+ notes: "Latest flagship model. Supports batch API for cost reduction."
135
+ },
136
+ {
137
+ id: createModelId("gpt-4o-mini"),
138
+ name: "GPT-4o Mini",
139
+ provider: "openai",
140
+ contextWindow: 128e3,
141
+ lastUpdated: "2026-03-15",
142
+ pricing: {
143
+ inputPer1M: 0.15,
144
+ outputPer1M: 0.6,
145
+ batchInputPer1M: 0.075,
146
+ batchOutputPer1M: 0.3
147
+ },
148
+ notes: "Lightweight, cost-effective model. Great for simple tasks and high volume."
149
+ },
150
+ {
151
+ id: createModelId("gpt-4-turbo"),
152
+ name: "GPT-4 Turbo",
153
+ provider: "openai",
154
+ contextWindow: 128e3,
155
+ lastUpdated: "2026-03-15",
156
+ pricing: {
157
+ inputPer1M: 10,
158
+ outputPer1M: 30,
159
+ batchInputPer1M: 5,
160
+ batchOutputPer1M: 15
161
+ },
162
+ notes: "Previous generation flagship. Maintained for compatibility."
163
+ },
164
+ {
165
+ id: createModelId("o1"),
166
+ name: "o1",
167
+ provider: "openai",
168
+ contextWindow: 128e3,
169
+ lastUpdated: "2026-03-15",
170
+ pricing: {
171
+ inputPer1M: 15,
172
+ outputPer1M: 60
173
+ },
174
+ notes: "Reasoning model for complex problem-solving. Premium pricing."
175
+ },
176
+ {
177
+ id: createModelId("o1-mini"),
178
+ name: "o1 Mini",
179
+ provider: "openai",
180
+ contextWindow: 128e3,
181
+ lastUpdated: "2026-03-15",
182
+ pricing: {
183
+ inputPer1M: 3,
184
+ outputPer1M: 12
185
+ },
186
+ notes: "Lightweight reasoning model. Better cost/performance than o1."
187
+ }
188
+ ];
189
+
190
+ // src/models/google.ts
191
+ var googleModels = [
192
+ {
193
+ id: createModelId("gemini-1.5-pro"),
194
+ name: "Gemini 1.5 Pro",
195
+ provider: "google",
196
+ contextWindow: 2e6,
197
+ lastUpdated: "2026-02-01",
198
+ pricing: {
199
+ inputPer1M: 7.5,
200
+ outputPer1M: 30
201
+ },
202
+ notes: "Latest flagship model. Massive 2M token context window."
203
+ },
204
+ {
205
+ id: createModelId("gemini-1.5-flash"),
206
+ name: "Gemini 1.5 Flash",
207
+ provider: "google",
208
+ contextWindow: 1e6,
209
+ lastUpdated: "2026-02-01",
210
+ pricing: {
211
+ inputPer1M: 0.075,
212
+ outputPer1M: 0.3
213
+ },
214
+ notes: "Fast, cost-effective model. Excellent for high-volume applications."
215
+ },
216
+ {
217
+ id: createModelId("gemini-1-pro"),
218
+ name: "Gemini 1 Pro",
219
+ provider: "google",
220
+ contextWindow: 32e3,
221
+ lastUpdated: "2026-02-01",
222
+ pricing: {
223
+ inputPer1M: 0.5,
224
+ outputPer1M: 1.5
225
+ },
226
+ notes: "Previous generation. Maintained for compatibility."
227
+ }
228
+ ];
229
+
230
+ // src/models/catalog.ts
231
+ var ALL_MODELS = [
232
+ ...anthropicModels,
233
+ ...openaiModels,
234
+ ...googleModels
235
+ ];
236
+ var ALIASES = {
237
+ // Claude aliases
238
+ "claude-sonnet": "claude-3-5-sonnet",
239
+ "claude-haiku": "claude-3-haiku",
240
+ "claude-opus": "claude-3-opus",
241
+ // GPT aliases
242
+ "gpt4": "gpt-4o",
243
+ "gpt4o": "gpt-4o",
244
+ "gpt-4": "gpt-4o",
245
+ "gpt4o-mini": "gpt-4o-mini",
246
+ "gpt-4-mini": "gpt-4o-mini",
247
+ // Gemini aliases
248
+ "gemini": "gemini-1.5-pro",
249
+ "gemini-pro": "gemini-1.5-pro",
250
+ "gemini-flash": "gemini-1.5-flash"
251
+ };
252
+ function getModel(modelId) {
253
+ const normalized = modelId.toLowerCase().trim();
254
+ let model = ALL_MODELS.find((m) => m.id === normalized);
255
+ if (model) return model;
256
+ const canonical = ALIASES[normalized];
257
+ if (canonical) {
258
+ model = ALL_MODELS.find((m) => m.id === canonical);
259
+ if (model) return model;
260
+ }
261
+ throw new UnknownModelError(modelId);
262
+ }
263
+ function listModels() {
264
+ return ALL_MODELS;
265
+ }
266
+ function getModelsByProvider(provider) {
267
+ return ALL_MODELS.filter((m) => m.provider === provider);
268
+ }
269
+ function searchModels(query) {
270
+ const q = query.toLowerCase();
271
+ return ALL_MODELS.filter(
272
+ (m) => m.id.toLowerCase().includes(q) || m.name.toLowerCase().includes(q) || m.provider.toLowerCase().includes(q)
273
+ );
274
+ }
275
+ function getModelsByCost() {
276
+ const sorted = [...ALL_MODELS].sort(
277
+ (a, b) => a.pricing.inputPer1M - b.pricing.inputPer1M
278
+ );
279
+ return sorted;
280
+ }
281
+ function getCatalogStats() {
282
+ const providers = {
283
+ anthropic: 0,
284
+ openai: 0,
285
+ google: 0
286
+ };
287
+ for (const model of ALL_MODELS) {
288
+ providers[model.provider]++;
289
+ }
290
+ return {
291
+ totalModels: ALL_MODELS.length,
292
+ providers,
293
+ lastUpdated: (/* @__PURE__ */ new Date()).toISOString()
294
+ };
295
+ }
296
+ var models = {
297
+ get: getModel,
298
+ list: listModels,
299
+ byProvider: getModelsByProvider,
300
+ search: searchModels,
301
+ byCost: getModelsByCost,
302
+ stats: getCatalogStats
303
+ };
304
+
305
+ // src/utils.ts
306
+ function roundCost(value) {
307
+ return Math.round(value * 1e8) / 1e8;
308
+ }
309
+
310
+ // src/calculate.ts
311
+ function validateTokenCount(field, value) {
312
+ if (!Number.isFinite(value) || value < 0) {
313
+ throw new InvalidTokenCountError(field, value);
314
+ }
315
+ }
316
+ function calculate(input) {
317
+ if (!input.model || typeof input.model !== "string") {
318
+ throw new InvalidCalculateInputError("model must be a non-empty string");
319
+ }
320
+ validateTokenCount("inputTokens", input.inputTokens);
321
+ validateTokenCount("outputTokens", input.outputTokens);
322
+ if (input.cachedTokens !== void 0) {
323
+ validateTokenCount("cachedTokens", input.cachedTokens);
324
+ if (input.cachedTokens > input.inputTokens) {
325
+ throw new InvalidCalculateInputError(
326
+ "cachedTokens cannot exceed inputTokens"
327
+ );
328
+ }
329
+ }
330
+ let modelInfo;
331
+ try {
332
+ modelInfo = getModel(input.model);
333
+ } catch (error) {
334
+ if (error instanceof UnknownModelError) {
335
+ throw error;
336
+ }
337
+ throw new InvalidCalculateInputError(`Invalid model: ${input.model}`);
338
+ }
339
+ const { pricing } = modelInfo;
340
+ const standardInputTokens = input.inputTokens - (input.cachedTokens ?? 0);
341
+ const standardInputCost = roundCost(
342
+ standardInputTokens / 1e6 * pricing.inputPer1M
343
+ );
344
+ let cachedInputCost = 0;
345
+ if (input.cachedTokens && input.cachedTokens > 0 && pricing.cachedInputPer1M) {
346
+ cachedInputCost = roundCost(
347
+ input.cachedTokens / 1e6 * pricing.cachedInputPer1M
348
+ );
349
+ }
350
+ const inputCost = roundCost(standardInputCost + cachedInputCost);
351
+ const outputCost = roundCost(
352
+ input.outputTokens / 1e6 * pricing.outputPer1M
353
+ );
354
+ const total = roundCost(inputCost + outputCost);
355
+ const result = {
356
+ model: modelInfo.id,
357
+ provider: modelInfo.provider,
358
+ inputTokens: input.inputTokens,
359
+ outputTokens: input.outputTokens,
360
+ input: inputCost,
361
+ output: outputCost,
362
+ total,
363
+ currency: "USD",
364
+ pricing: modelInfo.pricing
365
+ };
366
+ if (input.cachedTokens !== void 0) {
367
+ result.cachedTokens = input.cachedTokens;
368
+ }
369
+ if (cachedInputCost > 0) {
370
+ result.cached = cachedInputCost;
371
+ }
372
+ return result;
373
+ }
374
+
375
+ // src/compare.ts
376
+ function compare(input) {
377
+ if (!input.models || input.models.length === 0) {
378
+ throw new InvalidCalculateInputError("models array must not be empty");
379
+ }
380
+ const results = input.models.map(
381
+ (model) => calculate({
382
+ model,
383
+ inputTokens: input.inputTokens,
384
+ outputTokens: input.outputTokens,
385
+ ...input.cachedTokens !== void 0 && { cachedTokens: input.cachedTokens }
386
+ })
387
+ );
388
+ results.sort((a, b) => a.total - b.total);
389
+ const cheapest = results[0].total;
390
+ return results.map((result, index) => ({
391
+ ...result,
392
+ rank: index + 1,
393
+ relativeCost: cheapest === 0 ? 1 : roundCost(result.total / cheapest)
394
+ }));
395
+ }
396
+
397
+ // src/batch.ts
398
+ function calculateBatch(inputs) {
399
+ if (!inputs || inputs.length === 0) {
400
+ throw new InvalidCalculateInputError("inputs array must not be empty");
401
+ }
402
+ const results = inputs.map((input) => calculate(input));
403
+ const totalCost = roundCost(results.reduce((sum, r) => sum + r.total, 0));
404
+ return {
405
+ results,
406
+ totalCost,
407
+ currency: "USD",
408
+ itemCount: results.length
409
+ };
410
+ }
411
+
412
+ // src/streaming.ts
413
+ var CHARS_PER_TOKEN = 4;
414
+ function estimateStreamCost(input) {
415
+ if (!Number.isFinite(input.inputTokens) || input.inputTokens < 0) {
416
+ throw new InvalidTokenCountError("inputTokens", input.inputTokens);
417
+ }
418
+ if (!Number.isFinite(input.expectedOutputTokens) || input.expectedOutputTokens < 0) {
419
+ throw new InvalidTokenCountError("expectedOutputTokens", input.expectedOutputTokens);
420
+ }
421
+ const modelInfo = getModel(input.model);
422
+ const { pricing } = modelInfo;
423
+ const inputCost = roundCost(input.inputTokens / 1e6 * pricing.inputPer1M);
424
+ const outputCost = roundCost(input.expectedOutputTokens / 1e6 * pricing.outputPer1M);
425
+ const estimatedTotal = roundCost(inputCost + outputCost);
426
+ return {
427
+ model: modelInfo.id,
428
+ inputTokens: input.inputTokens,
429
+ estimatedOutputTokens: input.expectedOutputTokens,
430
+ estimatedTotal,
431
+ confidence: "medium",
432
+ range: {
433
+ min: roundCost(inputCost + outputCost * 0.9),
434
+ max: roundCost(inputCost + outputCost * 1.1)
435
+ }
436
+ };
437
+ }
438
+ function createStreamTracker(input) {
439
+ if (!Number.isFinite(input.inputTokens) || input.inputTokens < 0) {
440
+ throw new InvalidTokenCountError("inputTokens", input.inputTokens);
441
+ }
442
+ const modelInfo = getModel(input.model);
443
+ const { pricing } = modelInfo;
444
+ const inputCost = roundCost(input.inputTokens / 1e6 * pricing.inputPer1M);
445
+ let totalChars = 0;
446
+ return {
447
+ addChunk(text) {
448
+ totalChars += text.length;
449
+ },
450
+ getCurrent() {
451
+ const estimatedTokens = Math.round(totalChars / CHARS_PER_TOKEN);
452
+ const outputCost = roundCost(estimatedTokens / 1e6 * pricing.outputPer1M);
453
+ const estimatedTotal = roundCost(inputCost + outputCost);
454
+ return {
455
+ model: modelInfo.id,
456
+ inputTokens: input.inputTokens,
457
+ estimatedOutputTokens: estimatedTokens,
458
+ estimatedTotal,
459
+ confidence: "low",
460
+ range: {
461
+ min: roundCost(estimatedTotal * 0.75),
462
+ max: roundCost(estimatedTotal * 1.25)
463
+ }
464
+ };
465
+ },
466
+ finalize() {
467
+ const outputTokens = Math.round(totalChars / CHARS_PER_TOKEN);
468
+ const outputCost = roundCost(outputTokens / 1e6 * pricing.outputPer1M);
469
+ return {
470
+ model: modelInfo.id,
471
+ provider: modelInfo.provider,
472
+ inputTokens: input.inputTokens,
473
+ outputTokens,
474
+ input: inputCost,
475
+ output: outputCost,
476
+ total: roundCost(inputCost + outputCost),
477
+ currency: "USD",
478
+ pricing: modelInfo.pricing
479
+ };
480
+ }
481
+ };
482
+ }
483
+ // Annotate the CommonJS export names for ESM import in node:
484
+ 0 && (module.exports = {
485
+ InvalidCalculateInputError,
486
+ InvalidTokenCountError,
487
+ UnknownModelError,
488
+ calculate,
489
+ calculateBatch,
490
+ compare,
491
+ createStreamTracker,
492
+ estimateStreamCost,
493
+ models
494
+ });
package/dist/index.js ADDED
@@ -0,0 +1,459 @@
1
+ // src/types.ts
2
+ function createModelId(id) {
3
+ return id;
4
+ }
5
+ var UnknownModelError = class extends Error {
6
+ constructor(modelId) {
7
+ super(`Unknown model: "${modelId}"`);
8
+ this.name = "UnknownModelError";
9
+ }
10
+ };
11
+ var InvalidTokenCountError = class extends Error {
12
+ constructor(field, value) {
13
+ super(`Invalid token count for "${field}": ${value} (must be >= 0)`);
14
+ this.name = "InvalidTokenCountError";
15
+ }
16
+ };
17
+ var InvalidCalculateInputError = class extends Error {
18
+ constructor(message) {
19
+ super(`Invalid input: ${message}`);
20
+ this.name = "InvalidCalculateInputError";
21
+ }
22
+ };
23
+
24
+ // src/models/anthropic.ts
25
+ var anthropicModels = [
26
+ {
27
+ id: createModelId("claude-3-5-sonnet"),
28
+ name: "Claude 3.5 Sonnet",
29
+ provider: "anthropic",
30
+ contextWindow: 2e5,
31
+ lastUpdated: "2026-03-01",
32
+ pricing: {
33
+ inputPer1M: 3,
34
+ outputPer1M: 15,
35
+ cachedInputPer1M: 3.75,
36
+ // 25% of standard
37
+ cachedReadPer1M: 0.3
38
+ // 2% of standard
39
+ },
40
+ notes: "Latest Claude 3.5 model, optimized for speed/cost balance. Supports prompt caching."
41
+ },
42
+ {
43
+ id: createModelId("claude-3-opus"),
44
+ name: "Claude 3 Opus",
45
+ provider: "anthropic",
46
+ contextWindow: 2e5,
47
+ lastUpdated: "2026-03-01",
48
+ pricing: {
49
+ inputPer1M: 15,
50
+ outputPer1M: 75,
51
+ cachedInputPer1M: 18.75,
52
+ cachedReadPer1M: 1.5
53
+ },
54
+ notes: "Most capable Claude 3 variant. Higher cost but best performance."
55
+ },
56
+ {
57
+ id: createModelId("claude-3-sonnet"),
58
+ name: "Claude 3 Sonnet",
59
+ provider: "anthropic",
60
+ contextWindow: 2e5,
61
+ lastUpdated: "2026-03-01",
62
+ pricing: {
63
+ inputPer1M: 3,
64
+ outputPer1M: 15,
65
+ cachedInputPer1M: 3.75,
66
+ cachedReadPer1M: 0.3
67
+ },
68
+ notes: "Balanced Claude 3 model. Use Claude 3.5 Sonnet for better speed/cost."
69
+ },
70
+ {
71
+ id: createModelId("claude-3-haiku"),
72
+ name: "Claude 3 Haiku",
73
+ provider: "anthropic",
74
+ contextWindow: 2e5,
75
+ lastUpdated: "2026-03-01",
76
+ pricing: {
77
+ inputPer1M: 0.25,
78
+ outputPer1M: 1.25,
79
+ cachedInputPer1M: 0.3125,
80
+ cachedReadPer1M: 0.025
81
+ },
82
+ notes: "Fastest and most compact Claude model. Best for simple tasks."
83
+ }
84
+ ];
85
+
86
+ // src/models/openai.ts
87
+ var openaiModels = [
88
+ {
89
+ id: createModelId("gpt-4o"),
90
+ name: "GPT-4o",
91
+ provider: "openai",
92
+ contextWindow: 128e3,
93
+ lastUpdated: "2026-03-15",
94
+ pricing: {
95
+ inputPer1M: 5,
96
+ outputPer1M: 15,
97
+ batchInputPer1M: 2.5,
98
+ batchOutputPer1M: 7.5
99
+ },
100
+ notes: "Latest flagship model. Supports batch API for cost reduction."
101
+ },
102
+ {
103
+ id: createModelId("gpt-4o-mini"),
104
+ name: "GPT-4o Mini",
105
+ provider: "openai",
106
+ contextWindow: 128e3,
107
+ lastUpdated: "2026-03-15",
108
+ pricing: {
109
+ inputPer1M: 0.15,
110
+ outputPer1M: 0.6,
111
+ batchInputPer1M: 0.075,
112
+ batchOutputPer1M: 0.3
113
+ },
114
+ notes: "Lightweight, cost-effective model. Great for simple tasks and high volume."
115
+ },
116
+ {
117
+ id: createModelId("gpt-4-turbo"),
118
+ name: "GPT-4 Turbo",
119
+ provider: "openai",
120
+ contextWindow: 128e3,
121
+ lastUpdated: "2026-03-15",
122
+ pricing: {
123
+ inputPer1M: 10,
124
+ outputPer1M: 30,
125
+ batchInputPer1M: 5,
126
+ batchOutputPer1M: 15
127
+ },
128
+ notes: "Previous generation flagship. Maintained for compatibility."
129
+ },
130
+ {
131
+ id: createModelId("o1"),
132
+ name: "o1",
133
+ provider: "openai",
134
+ contextWindow: 128e3,
135
+ lastUpdated: "2026-03-15",
136
+ pricing: {
137
+ inputPer1M: 15,
138
+ outputPer1M: 60
139
+ },
140
+ notes: "Reasoning model for complex problem-solving. Premium pricing."
141
+ },
142
+ {
143
+ id: createModelId("o1-mini"),
144
+ name: "o1 Mini",
145
+ provider: "openai",
146
+ contextWindow: 128e3,
147
+ lastUpdated: "2026-03-15",
148
+ pricing: {
149
+ inputPer1M: 3,
150
+ outputPer1M: 12
151
+ },
152
+ notes: "Lightweight reasoning model. Better cost/performance than o1."
153
+ }
154
+ ];
155
+
156
+ // src/models/google.ts
157
+ var googleModels = [
158
+ {
159
+ id: createModelId("gemini-1.5-pro"),
160
+ name: "Gemini 1.5 Pro",
161
+ provider: "google",
162
+ contextWindow: 2e6,
163
+ lastUpdated: "2026-02-01",
164
+ pricing: {
165
+ inputPer1M: 7.5,
166
+ outputPer1M: 30
167
+ },
168
+ notes: "Latest flagship model. Massive 2M token context window."
169
+ },
170
+ {
171
+ id: createModelId("gemini-1.5-flash"),
172
+ name: "Gemini 1.5 Flash",
173
+ provider: "google",
174
+ contextWindow: 1e6,
175
+ lastUpdated: "2026-02-01",
176
+ pricing: {
177
+ inputPer1M: 0.075,
178
+ outputPer1M: 0.3
179
+ },
180
+ notes: "Fast, cost-effective model. Excellent for high-volume applications."
181
+ },
182
+ {
183
+ id: createModelId("gemini-1-pro"),
184
+ name: "Gemini 1 Pro",
185
+ provider: "google",
186
+ contextWindow: 32e3,
187
+ lastUpdated: "2026-02-01",
188
+ pricing: {
189
+ inputPer1M: 0.5,
190
+ outputPer1M: 1.5
191
+ },
192
+ notes: "Previous generation. Maintained for compatibility."
193
+ }
194
+ ];
195
+
196
+ // src/models/catalog.ts
197
+ var ALL_MODELS = [
198
+ ...anthropicModels,
199
+ ...openaiModels,
200
+ ...googleModels
201
+ ];
202
+ var ALIASES = {
203
+ // Claude aliases
204
+ "claude-sonnet": "claude-3-5-sonnet",
205
+ "claude-haiku": "claude-3-haiku",
206
+ "claude-opus": "claude-3-opus",
207
+ // GPT aliases
208
+ "gpt4": "gpt-4o",
209
+ "gpt4o": "gpt-4o",
210
+ "gpt-4": "gpt-4o",
211
+ "gpt4o-mini": "gpt-4o-mini",
212
+ "gpt-4-mini": "gpt-4o-mini",
213
+ // Gemini aliases
214
+ "gemini": "gemini-1.5-pro",
215
+ "gemini-pro": "gemini-1.5-pro",
216
+ "gemini-flash": "gemini-1.5-flash"
217
+ };
218
+ function getModel(modelId) {
219
+ const normalized = modelId.toLowerCase().trim();
220
+ let model = ALL_MODELS.find((m) => m.id === normalized);
221
+ if (model) return model;
222
+ const canonical = ALIASES[normalized];
223
+ if (canonical) {
224
+ model = ALL_MODELS.find((m) => m.id === canonical);
225
+ if (model) return model;
226
+ }
227
+ throw new UnknownModelError(modelId);
228
+ }
229
+ function listModels() {
230
+ return ALL_MODELS;
231
+ }
232
+ function getModelsByProvider(provider) {
233
+ return ALL_MODELS.filter((m) => m.provider === provider);
234
+ }
235
+ function searchModels(query) {
236
+ const q = query.toLowerCase();
237
+ return ALL_MODELS.filter(
238
+ (m) => m.id.toLowerCase().includes(q) || m.name.toLowerCase().includes(q) || m.provider.toLowerCase().includes(q)
239
+ );
240
+ }
241
+ function getModelsByCost() {
242
+ const sorted = [...ALL_MODELS].sort(
243
+ (a, b) => a.pricing.inputPer1M - b.pricing.inputPer1M
244
+ );
245
+ return sorted;
246
+ }
247
+ function getCatalogStats() {
248
+ const providers = {
249
+ anthropic: 0,
250
+ openai: 0,
251
+ google: 0
252
+ };
253
+ for (const model of ALL_MODELS) {
254
+ providers[model.provider]++;
255
+ }
256
+ return {
257
+ totalModels: ALL_MODELS.length,
258
+ providers,
259
+ lastUpdated: (/* @__PURE__ */ new Date()).toISOString()
260
+ };
261
+ }
262
+ var models = {
263
+ get: getModel,
264
+ list: listModels,
265
+ byProvider: getModelsByProvider,
266
+ search: searchModels,
267
+ byCost: getModelsByCost,
268
+ stats: getCatalogStats
269
+ };
270
+
271
+ // src/utils.ts
272
+ function roundCost(value) {
273
+ return Math.round(value * 1e8) / 1e8;
274
+ }
275
+
276
+ // src/calculate.ts
277
+ function validateTokenCount(field, value) {
278
+ if (!Number.isFinite(value) || value < 0) {
279
+ throw new InvalidTokenCountError(field, value);
280
+ }
281
+ }
282
+ function calculate(input) {
283
+ if (!input.model || typeof input.model !== "string") {
284
+ throw new InvalidCalculateInputError("model must be a non-empty string");
285
+ }
286
+ validateTokenCount("inputTokens", input.inputTokens);
287
+ validateTokenCount("outputTokens", input.outputTokens);
288
+ if (input.cachedTokens !== void 0) {
289
+ validateTokenCount("cachedTokens", input.cachedTokens);
290
+ if (input.cachedTokens > input.inputTokens) {
291
+ throw new InvalidCalculateInputError(
292
+ "cachedTokens cannot exceed inputTokens"
293
+ );
294
+ }
295
+ }
296
+ let modelInfo;
297
+ try {
298
+ modelInfo = getModel(input.model);
299
+ } catch (error) {
300
+ if (error instanceof UnknownModelError) {
301
+ throw error;
302
+ }
303
+ throw new InvalidCalculateInputError(`Invalid model: ${input.model}`);
304
+ }
305
+ const { pricing } = modelInfo;
306
+ const standardInputTokens = input.inputTokens - (input.cachedTokens ?? 0);
307
+ const standardInputCost = roundCost(
308
+ standardInputTokens / 1e6 * pricing.inputPer1M
309
+ );
310
+ let cachedInputCost = 0;
311
+ if (input.cachedTokens && input.cachedTokens > 0 && pricing.cachedInputPer1M) {
312
+ cachedInputCost = roundCost(
313
+ input.cachedTokens / 1e6 * pricing.cachedInputPer1M
314
+ );
315
+ }
316
+ const inputCost = roundCost(standardInputCost + cachedInputCost);
317
+ const outputCost = roundCost(
318
+ input.outputTokens / 1e6 * pricing.outputPer1M
319
+ );
320
+ const total = roundCost(inputCost + outputCost);
321
+ const result = {
322
+ model: modelInfo.id,
323
+ provider: modelInfo.provider,
324
+ inputTokens: input.inputTokens,
325
+ outputTokens: input.outputTokens,
326
+ input: inputCost,
327
+ output: outputCost,
328
+ total,
329
+ currency: "USD",
330
+ pricing: modelInfo.pricing
331
+ };
332
+ if (input.cachedTokens !== void 0) {
333
+ result.cachedTokens = input.cachedTokens;
334
+ }
335
+ if (cachedInputCost > 0) {
336
+ result.cached = cachedInputCost;
337
+ }
338
+ return result;
339
+ }
340
+
341
+ // src/compare.ts
342
+ function compare(input) {
343
+ if (!input.models || input.models.length === 0) {
344
+ throw new InvalidCalculateInputError("models array must not be empty");
345
+ }
346
+ const results = input.models.map(
347
+ (model) => calculate({
348
+ model,
349
+ inputTokens: input.inputTokens,
350
+ outputTokens: input.outputTokens,
351
+ ...input.cachedTokens !== void 0 && { cachedTokens: input.cachedTokens }
352
+ })
353
+ );
354
+ results.sort((a, b) => a.total - b.total);
355
+ const cheapest = results[0].total;
356
+ return results.map((result, index) => ({
357
+ ...result,
358
+ rank: index + 1,
359
+ relativeCost: cheapest === 0 ? 1 : roundCost(result.total / cheapest)
360
+ }));
361
+ }
362
+
363
+ // src/batch.ts
364
+ function calculateBatch(inputs) {
365
+ if (!inputs || inputs.length === 0) {
366
+ throw new InvalidCalculateInputError("inputs array must not be empty");
367
+ }
368
+ const results = inputs.map((input) => calculate(input));
369
+ const totalCost = roundCost(results.reduce((sum, r) => sum + r.total, 0));
370
+ return {
371
+ results,
372
+ totalCost,
373
+ currency: "USD",
374
+ itemCount: results.length
375
+ };
376
+ }
377
+
378
+ // src/streaming.ts
379
+ var CHARS_PER_TOKEN = 4;
380
+ function estimateStreamCost(input) {
381
+ if (!Number.isFinite(input.inputTokens) || input.inputTokens < 0) {
382
+ throw new InvalidTokenCountError("inputTokens", input.inputTokens);
383
+ }
384
+ if (!Number.isFinite(input.expectedOutputTokens) || input.expectedOutputTokens < 0) {
385
+ throw new InvalidTokenCountError("expectedOutputTokens", input.expectedOutputTokens);
386
+ }
387
+ const modelInfo = getModel(input.model);
388
+ const { pricing } = modelInfo;
389
+ const inputCost = roundCost(input.inputTokens / 1e6 * pricing.inputPer1M);
390
+ const outputCost = roundCost(input.expectedOutputTokens / 1e6 * pricing.outputPer1M);
391
+ const estimatedTotal = roundCost(inputCost + outputCost);
392
+ return {
393
+ model: modelInfo.id,
394
+ inputTokens: input.inputTokens,
395
+ estimatedOutputTokens: input.expectedOutputTokens,
396
+ estimatedTotal,
397
+ confidence: "medium",
398
+ range: {
399
+ min: roundCost(inputCost + outputCost * 0.9),
400
+ max: roundCost(inputCost + outputCost * 1.1)
401
+ }
402
+ };
403
+ }
404
+ function createStreamTracker(input) {
405
+ if (!Number.isFinite(input.inputTokens) || input.inputTokens < 0) {
406
+ throw new InvalidTokenCountError("inputTokens", input.inputTokens);
407
+ }
408
+ const modelInfo = getModel(input.model);
409
+ const { pricing } = modelInfo;
410
+ const inputCost = roundCost(input.inputTokens / 1e6 * pricing.inputPer1M);
411
+ let totalChars = 0;
412
+ return {
413
+ addChunk(text) {
414
+ totalChars += text.length;
415
+ },
416
+ getCurrent() {
417
+ const estimatedTokens = Math.round(totalChars / CHARS_PER_TOKEN);
418
+ const outputCost = roundCost(estimatedTokens / 1e6 * pricing.outputPer1M);
419
+ const estimatedTotal = roundCost(inputCost + outputCost);
420
+ return {
421
+ model: modelInfo.id,
422
+ inputTokens: input.inputTokens,
423
+ estimatedOutputTokens: estimatedTokens,
424
+ estimatedTotal,
425
+ confidence: "low",
426
+ range: {
427
+ min: roundCost(estimatedTotal * 0.75),
428
+ max: roundCost(estimatedTotal * 1.25)
429
+ }
430
+ };
431
+ },
432
+ finalize() {
433
+ const outputTokens = Math.round(totalChars / CHARS_PER_TOKEN);
434
+ const outputCost = roundCost(outputTokens / 1e6 * pricing.outputPer1M);
435
+ return {
436
+ model: modelInfo.id,
437
+ provider: modelInfo.provider,
438
+ inputTokens: input.inputTokens,
439
+ outputTokens,
440
+ input: inputCost,
441
+ output: outputCost,
442
+ total: roundCost(inputCost + outputCost),
443
+ currency: "USD",
444
+ pricing: modelInfo.pricing
445
+ };
446
+ }
447
+ };
448
+ }
449
+ export {
450
+ InvalidCalculateInputError,
451
+ InvalidTokenCountError,
452
+ UnknownModelError,
453
+ calculate,
454
+ calculateBatch,
455
+ compare,
456
+ createStreamTracker,
457
+ estimateStreamCost,
458
+ models
459
+ };
package/package.json ADDED
@@ -0,0 +1,54 @@
1
+ {
2
+ "name": "prompt-pricer",
3
+ "version": "0.1.0",
4
+ "description": "Calculate LLM API costs before you get the bill. TypeScript-first, zero dependencies.",
5
+ "type": "module",
6
+ "exports": {
7
+ ".": {
8
+ "bun": "./src/index.ts",
9
+ "import": "./dist/index.js",
10
+ "require": "./dist/index.cjs",
11
+ "types": "./dist/index.d.ts"
12
+ }
13
+ },
14
+ "main": "./src/index.ts",
15
+ "module": "./src/index.ts",
16
+ "types": "./dist/index.d.ts",
17
+ "files": [
18
+ "dist",
19
+ "CHANGELOG.md"
20
+ ],
21
+ "sideEffects": false,
22
+ "scripts": {
23
+ "build": "bun build.ts",
24
+ "test": "bun test",
25
+ "test:coverage": "bun test --coverage",
26
+ "typecheck": "tsc --noEmit",
27
+ "smoke": "bun smoke.ts"
28
+ },
29
+ "keywords": [
30
+ "llm",
31
+ "ai",
32
+ "cost",
33
+ "calculator",
34
+ "claude",
35
+ "openai",
36
+ "gemini",
37
+ "tokens",
38
+ "pricing",
39
+ "typescript"
40
+ ],
41
+ "author": "JD",
42
+ "license": "MIT",
43
+ "repository": {
44
+ "type": "git",
45
+ "url": "git+https://github.com/jd/prompt-pricer.git"
46
+ },
47
+ "homepage": "https://promptpricer.dev",
48
+ "bugs": {
49
+ "url": "https://github.com/jd/prompt-pricer/issues"
50
+ },
51
+ "engines": {
52
+ "node": ">=18"
53
+ }
54
+ }