spendlens 1.0.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dist/index.d.mts +132 -0
- package/dist/index.d.ts +132 -0
- package/dist/index.js +231 -0
- package/dist/index.js.map +1 -0
- package/dist/index.mjs +196 -0
- package/dist/index.mjs.map +1 -0
- package/dist/tokenlens.umd.min.js +1 -0
- package/package.json +48 -0
package/dist/index.d.mts
ADDED
|
@@ -0,0 +1,132 @@
|
|
|
1
|
+
interface ModelConfig {
|
|
2
|
+
/** Human-readable label */
|
|
3
|
+
label: string;
|
|
4
|
+
/** Provider name */
|
|
5
|
+
provider: "anthropic" | "openai" | "google" | "meta" | "custom";
|
|
6
|
+
/** Input cost in USD per 1 million tokens */
|
|
7
|
+
inputCostPer1M: number;
|
|
8
|
+
/** Output cost in USD per 1 million tokens */
|
|
9
|
+
outputCostPer1M: number;
|
|
10
|
+
/** Maximum context window in tokens */
|
|
11
|
+
contextWindow: number;
|
|
12
|
+
}
|
|
13
|
+
interface TokenStats {
|
|
14
|
+
/** Estimated token count */
|
|
15
|
+
tokens: number;
|
|
16
|
+
/** Raw character count */
|
|
17
|
+
chars: number;
|
|
18
|
+
/** Word count */
|
|
19
|
+
words: number;
|
|
20
|
+
/** Sentence count */
|
|
21
|
+
sentences: number;
|
|
22
|
+
/** Paragraph count */
|
|
23
|
+
paragraphs: number;
|
|
24
|
+
/** Estimated input cost in USD */
|
|
25
|
+
inputCost: number;
|
|
26
|
+
/** Context usage as a fraction (0–1) */
|
|
27
|
+
contextUsage: number;
|
|
28
|
+
/** Context usage as a percentage string e.g. "12.34%" */
|
|
29
|
+
contextUsagePct: string;
|
|
30
|
+
/** Whether prompt is within context limits */
|
|
31
|
+
withinLimit: boolean;
|
|
32
|
+
/** Tokens remaining before hitting the context limit */
|
|
33
|
+
tokensRemaining: number;
|
|
34
|
+
}
|
|
35
|
+
interface TokenLensOptions {
|
|
36
|
+
/** Model ID from MODELS or a custom ModelConfig */
|
|
37
|
+
model?: string | ModelConfig;
|
|
38
|
+
/** Override the chars-per-token ratio (default: 3.8) */
|
|
39
|
+
charsPerToken?: number;
|
|
40
|
+
}
|
|
41
|
+
type ModelId = keyof typeof MODELS;
|
|
42
|
+
declare const MODELS: Record<string, ModelConfig>;
|
|
43
|
+
/**
|
|
44
|
+
* Estimate token count from a string.
|
|
45
|
+
*
|
|
46
|
+
* Uses a character-ratio heuristic (default 3.8 chars/token) which is accurate
|
|
47
|
+
* to within ~5% for English prose. Code and non-Latin scripts may vary.
|
|
48
|
+
*
|
|
49
|
+
* @example
|
|
50
|
+
* estimateTokens("Hello, world!") // → 3
|
|
51
|
+
*/
|
|
52
|
+
declare function estimateTokens(text: string, charsPerToken?: number): number;
|
|
53
|
+
/**
|
|
54
|
+
* Calculate the USD cost for a given token count and model.
|
|
55
|
+
*
|
|
56
|
+
* @example
|
|
57
|
+
* calcCost(1000, MODELS["claude-sonnet-4"], "input") // → 0.000003
|
|
58
|
+
*/
|
|
59
|
+
declare function calcCost(tokens: number, model: ModelConfig, type?: "input" | "output"): number;
|
|
60
|
+
/**
|
|
61
|
+
* Count words in a string.
|
|
62
|
+
*/
|
|
63
|
+
declare function countWords(text: string): number;
|
|
64
|
+
/**
|
|
65
|
+
* Count sentences in a string.
|
|
66
|
+
*/
|
|
67
|
+
declare function countSentences(text: string): number;
|
|
68
|
+
/**
|
|
69
|
+
* Count paragraphs (blocks separated by blank lines).
|
|
70
|
+
*/
|
|
71
|
+
declare function countParagraphs(text: string): number;
|
|
72
|
+
/**
|
|
73
|
+
* Resolve a model ID string or ModelConfig object into a ModelConfig.
|
|
74
|
+
* Falls back to claude-sonnet-4 if the model ID is unknown.
|
|
75
|
+
*/
|
|
76
|
+
declare function resolveModel(model?: string | ModelConfig): ModelConfig;
|
|
77
|
+
/**
|
|
78
|
+
* Compute full token statistics for a prompt string.
|
|
79
|
+
*
|
|
80
|
+
* @example
|
|
81
|
+
* const stats = getStats("Write me a poem about the sea", { model: "gpt-4o" });
|
|
82
|
+
* console.log(stats.tokens); // 8
|
|
83
|
+
* console.log(stats.inputCost); // 0.00000002
|
|
84
|
+
* console.log(stats.contextUsagePct); // "0.01%"
|
|
85
|
+
*/
|
|
86
|
+
declare function getStats(text: string, options?: TokenLensOptions): TokenStats;
|
|
87
|
+
/**
|
|
88
|
+
* Create a stateful watcher that fires a callback on every text change.
|
|
89
|
+
* Useful for attaching to textarea `input` events.
|
|
90
|
+
*
|
|
91
|
+
* @example
|
|
92
|
+
* const watcher = createWatcher({ model: "claude-sonnet-4" });
|
|
93
|
+
* textarea.addEventListener("input", (e) => {
|
|
94
|
+
* const stats = watcher.update(e.target.value);
|
|
95
|
+
* console.log(stats.tokens, stats.inputCost);
|
|
96
|
+
* });
|
|
97
|
+
* watcher.destroy(); // cleanup
|
|
98
|
+
*/
|
|
99
|
+
declare function createWatcher(options?: TokenLensOptions): {
|
|
100
|
+
update(text: string): TokenStats;
|
|
101
|
+
reset(): void;
|
|
102
|
+
destroy(): void;
|
|
103
|
+
};
|
|
104
|
+
/**
|
|
105
|
+
* Format a cost value as a human-readable USD string.
|
|
106
|
+
*
|
|
107
|
+
* @example
|
|
108
|
+
* formatCost(0.000003) // "$0.000003"
|
|
109
|
+
* formatCost(0.123456) // "$0.1235"
|
|
110
|
+
* formatCost(1.5) // "$1.50"
|
|
111
|
+
*/
|
|
112
|
+
declare function formatCost(cost: number): string;
|
|
113
|
+
/**
|
|
114
|
+
* Get a severity level based on context window usage.
|
|
115
|
+
* Useful for colour-coding a progress bar.
|
|
116
|
+
*/
|
|
117
|
+
declare function getContextSeverity(usage: number): "ok" | "warning" | "danger";
|
|
118
|
+
declare const TokenLens: {
|
|
119
|
+
getStats: typeof getStats;
|
|
120
|
+
estimateTokens: typeof estimateTokens;
|
|
121
|
+
calcCost: typeof calcCost;
|
|
122
|
+
formatCost: typeof formatCost;
|
|
123
|
+
countWords: typeof countWords;
|
|
124
|
+
countSentences: typeof countSentences;
|
|
125
|
+
countParagraphs: typeof countParagraphs;
|
|
126
|
+
createWatcher: typeof createWatcher;
|
|
127
|
+
resolveModel: typeof resolveModel;
|
|
128
|
+
getContextSeverity: typeof getContextSeverity;
|
|
129
|
+
MODELS: Record<string, ModelConfig>;
|
|
130
|
+
};
|
|
131
|
+
|
|
132
|
+
export { MODELS, type ModelConfig, type ModelId, type TokenLensOptions, type TokenStats, calcCost, countParagraphs, countSentences, countWords, createWatcher, TokenLens as default, estimateTokens, formatCost, getContextSeverity, getStats, resolveModel };
|
package/dist/index.d.ts
ADDED
|
@@ -0,0 +1,132 @@
|
|
|
1
|
+
interface ModelConfig {
|
|
2
|
+
/** Human-readable label */
|
|
3
|
+
label: string;
|
|
4
|
+
/** Provider name */
|
|
5
|
+
provider: "anthropic" | "openai" | "google" | "meta" | "custom";
|
|
6
|
+
/** Input cost in USD per 1 million tokens */
|
|
7
|
+
inputCostPer1M: number;
|
|
8
|
+
/** Output cost in USD per 1 million tokens */
|
|
9
|
+
outputCostPer1M: number;
|
|
10
|
+
/** Maximum context window in tokens */
|
|
11
|
+
contextWindow: number;
|
|
12
|
+
}
|
|
13
|
+
interface TokenStats {
|
|
14
|
+
/** Estimated token count */
|
|
15
|
+
tokens: number;
|
|
16
|
+
/** Raw character count */
|
|
17
|
+
chars: number;
|
|
18
|
+
/** Word count */
|
|
19
|
+
words: number;
|
|
20
|
+
/** Sentence count */
|
|
21
|
+
sentences: number;
|
|
22
|
+
/** Paragraph count */
|
|
23
|
+
paragraphs: number;
|
|
24
|
+
/** Estimated input cost in USD */
|
|
25
|
+
inputCost: number;
|
|
26
|
+
/** Context usage as a fraction (0–1) */
|
|
27
|
+
contextUsage: number;
|
|
28
|
+
/** Context usage as a percentage string e.g. "12.34%" */
|
|
29
|
+
contextUsagePct: string;
|
|
30
|
+
/** Whether prompt is within context limits */
|
|
31
|
+
withinLimit: boolean;
|
|
32
|
+
/** Tokens remaining before hitting the context limit */
|
|
33
|
+
tokensRemaining: number;
|
|
34
|
+
}
|
|
35
|
+
interface TokenLensOptions {
|
|
36
|
+
/** Model ID from MODELS or a custom ModelConfig */
|
|
37
|
+
model?: string | ModelConfig;
|
|
38
|
+
/** Override the chars-per-token ratio (default: 3.8) */
|
|
39
|
+
charsPerToken?: number;
|
|
40
|
+
}
|
|
41
|
+
type ModelId = keyof typeof MODELS;
|
|
42
|
+
declare const MODELS: Record<string, ModelConfig>;
|
|
43
|
+
/**
|
|
44
|
+
* Estimate token count from a string.
|
|
45
|
+
*
|
|
46
|
+
* Uses a character-ratio heuristic (default 3.8 chars/token) which is accurate
|
|
47
|
+
* to within ~5% for English prose. Code and non-Latin scripts may vary.
|
|
48
|
+
*
|
|
49
|
+
* @example
|
|
50
|
+
* estimateTokens("Hello, world!") // → 3
|
|
51
|
+
*/
|
|
52
|
+
declare function estimateTokens(text: string, charsPerToken?: number): number;
|
|
53
|
+
/**
|
|
54
|
+
* Calculate the USD cost for a given token count and model.
|
|
55
|
+
*
|
|
56
|
+
* @example
|
|
57
|
+
* calcCost(1000, MODELS["claude-sonnet-4"], "input") // → 0.000003
|
|
58
|
+
*/
|
|
59
|
+
declare function calcCost(tokens: number, model: ModelConfig, type?: "input" | "output"): number;
|
|
60
|
+
/**
|
|
61
|
+
* Count words in a string.
|
|
62
|
+
*/
|
|
63
|
+
declare function countWords(text: string): number;
|
|
64
|
+
/**
|
|
65
|
+
* Count sentences in a string.
|
|
66
|
+
*/
|
|
67
|
+
declare function countSentences(text: string): number;
|
|
68
|
+
/**
|
|
69
|
+
* Count paragraphs (blocks separated by blank lines).
|
|
70
|
+
*/
|
|
71
|
+
declare function countParagraphs(text: string): number;
|
|
72
|
+
/**
|
|
73
|
+
* Resolve a model ID string or ModelConfig object into a ModelConfig.
|
|
74
|
+
* Falls back to claude-sonnet-4 if the model ID is unknown.
|
|
75
|
+
*/
|
|
76
|
+
declare function resolveModel(model?: string | ModelConfig): ModelConfig;
|
|
77
|
+
/**
|
|
78
|
+
* Compute full token statistics for a prompt string.
|
|
79
|
+
*
|
|
80
|
+
* @example
|
|
81
|
+
* const stats = getStats("Write me a poem about the sea", { model: "gpt-4o" });
|
|
82
|
+
* console.log(stats.tokens); // 8
|
|
83
|
+
* console.log(stats.inputCost); // 0.00000002
|
|
84
|
+
* console.log(stats.contextUsagePct); // "0.01%"
|
|
85
|
+
*/
|
|
86
|
+
declare function getStats(text: string, options?: TokenLensOptions): TokenStats;
|
|
87
|
+
/**
|
|
88
|
+
* Create a stateful watcher that fires a callback on every text change.
|
|
89
|
+
* Useful for attaching to textarea `input` events.
|
|
90
|
+
*
|
|
91
|
+
* @example
|
|
92
|
+
* const watcher = createWatcher({ model: "claude-sonnet-4" });
|
|
93
|
+
* textarea.addEventListener("input", (e) => {
|
|
94
|
+
* const stats = watcher.update(e.target.value);
|
|
95
|
+
* console.log(stats.tokens, stats.inputCost);
|
|
96
|
+
* });
|
|
97
|
+
* watcher.destroy(); // cleanup
|
|
98
|
+
*/
|
|
99
|
+
declare function createWatcher(options?: TokenLensOptions): {
|
|
100
|
+
update(text: string): TokenStats;
|
|
101
|
+
reset(): void;
|
|
102
|
+
destroy(): void;
|
|
103
|
+
};
|
|
104
|
+
/**
|
|
105
|
+
* Format a cost value as a human-readable USD string.
|
|
106
|
+
*
|
|
107
|
+
* @example
|
|
108
|
+
* formatCost(0.000003) // "$0.000003"
|
|
109
|
+
* formatCost(0.123456) // "$0.1235"
|
|
110
|
+
* formatCost(1.5) // "$1.50"
|
|
111
|
+
*/
|
|
112
|
+
declare function formatCost(cost: number): string;
|
|
113
|
+
/**
|
|
114
|
+
* Get a severity level based on context window usage.
|
|
115
|
+
* Useful for colour-coding a progress bar.
|
|
116
|
+
*/
|
|
117
|
+
declare function getContextSeverity(usage: number): "ok" | "warning" | "danger";
|
|
118
|
+
declare const TokenLens: {
|
|
119
|
+
getStats: typeof getStats;
|
|
120
|
+
estimateTokens: typeof estimateTokens;
|
|
121
|
+
calcCost: typeof calcCost;
|
|
122
|
+
formatCost: typeof formatCost;
|
|
123
|
+
countWords: typeof countWords;
|
|
124
|
+
countSentences: typeof countSentences;
|
|
125
|
+
countParagraphs: typeof countParagraphs;
|
|
126
|
+
createWatcher: typeof createWatcher;
|
|
127
|
+
resolveModel: typeof resolveModel;
|
|
128
|
+
getContextSeverity: typeof getContextSeverity;
|
|
129
|
+
MODELS: Record<string, ModelConfig>;
|
|
130
|
+
};
|
|
131
|
+
|
|
132
|
+
export { MODELS, type ModelConfig, type ModelId, type TokenLensOptions, type TokenStats, calcCost, countParagraphs, countSentences, countWords, createWatcher, TokenLens as default, estimateTokens, formatCost, getContextSeverity, getStats, resolveModel };
|
package/dist/index.js
ADDED
|
@@ -0,0 +1,231 @@
|
|
|
1
|
+
"use strict";
|
|
2
|
+
var __defProp = Object.defineProperty;
|
|
3
|
+
var __getOwnPropDesc = Object.getOwnPropertyDescriptor;
|
|
4
|
+
var __getOwnPropNames = Object.getOwnPropertyNames;
|
|
5
|
+
var __hasOwnProp = Object.prototype.hasOwnProperty;
|
|
6
|
+
var __export = (target, all) => {
|
|
7
|
+
for (var name in all)
|
|
8
|
+
__defProp(target, name, { get: all[name], enumerable: true });
|
|
9
|
+
};
|
|
10
|
+
var __copyProps = (to, from, except, desc) => {
|
|
11
|
+
if (from && typeof from === "object" || typeof from === "function") {
|
|
12
|
+
for (let key of __getOwnPropNames(from))
|
|
13
|
+
if (!__hasOwnProp.call(to, key) && key !== except)
|
|
14
|
+
__defProp(to, key, { get: () => from[key], enumerable: !(desc = __getOwnPropDesc(from, key)) || desc.enumerable });
|
|
15
|
+
}
|
|
16
|
+
return to;
|
|
17
|
+
};
|
|
18
|
+
var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: true }), mod);
|
|
19
|
+
|
|
20
|
+
// src/index.ts
|
|
21
|
+
var src_exports = {};
|
|
22
|
+
__export(src_exports, {
|
|
23
|
+
MODELS: () => MODELS,
|
|
24
|
+
calcCost: () => calcCost,
|
|
25
|
+
countParagraphs: () => countParagraphs,
|
|
26
|
+
countSentences: () => countSentences,
|
|
27
|
+
countWords: () => countWords,
|
|
28
|
+
createWatcher: () => createWatcher,
|
|
29
|
+
default: () => src_default,
|
|
30
|
+
estimateTokens: () => estimateTokens,
|
|
31
|
+
formatCost: () => formatCost,
|
|
32
|
+
getContextSeverity: () => getContextSeverity,
|
|
33
|
+
getStats: () => getStats,
|
|
34
|
+
resolveModel: () => resolveModel
|
|
35
|
+
});
|
|
36
|
+
module.exports = __toCommonJS(src_exports);
|
|
37
|
+
var MODELS = {
|
|
38
|
+
// Anthropic
|
|
39
|
+
"claude-sonnet-4": {
|
|
40
|
+
label: "Claude Sonnet 4",
|
|
41
|
+
provider: "anthropic",
|
|
42
|
+
inputCostPer1M: 3,
|
|
43
|
+
outputCostPer1M: 15,
|
|
44
|
+
contextWindow: 2e5
|
|
45
|
+
},
|
|
46
|
+
"claude-opus-4": {
|
|
47
|
+
label: "Claude Opus 4",
|
|
48
|
+
provider: "anthropic",
|
|
49
|
+
inputCostPer1M: 15,
|
|
50
|
+
outputCostPer1M: 75,
|
|
51
|
+
contextWindow: 2e5
|
|
52
|
+
},
|
|
53
|
+
"claude-haiku-4": {
|
|
54
|
+
label: "Claude Haiku 4",
|
|
55
|
+
provider: "anthropic",
|
|
56
|
+
inputCostPer1M: 0.8,
|
|
57
|
+
outputCostPer1M: 4,
|
|
58
|
+
contextWindow: 2e5
|
|
59
|
+
},
|
|
60
|
+
// OpenAI
|
|
61
|
+
"gpt-4o": {
|
|
62
|
+
label: "GPT-4o",
|
|
63
|
+
provider: "openai",
|
|
64
|
+
inputCostPer1M: 2.5,
|
|
65
|
+
outputCostPer1M: 10,
|
|
66
|
+
contextWindow: 128e3
|
|
67
|
+
},
|
|
68
|
+
"gpt-4o-mini": {
|
|
69
|
+
label: "GPT-4o mini",
|
|
70
|
+
provider: "openai",
|
|
71
|
+
inputCostPer1M: 0.15,
|
|
72
|
+
outputCostPer1M: 0.6,
|
|
73
|
+
contextWindow: 128e3
|
|
74
|
+
},
|
|
75
|
+
"o1": {
|
|
76
|
+
label: "o1",
|
|
77
|
+
provider: "openai",
|
|
78
|
+
inputCostPer1M: 15,
|
|
79
|
+
outputCostPer1M: 60,
|
|
80
|
+
contextWindow: 2e5
|
|
81
|
+
},
|
|
82
|
+
"o3-mini": {
|
|
83
|
+
label: "o3-mini",
|
|
84
|
+
provider: "openai",
|
|
85
|
+
inputCostPer1M: 1.1,
|
|
86
|
+
outputCostPer1M: 4.4,
|
|
87
|
+
contextWindow: 2e5
|
|
88
|
+
},
|
|
89
|
+
// Google
|
|
90
|
+
"gemini-1.5-pro": {
|
|
91
|
+
label: "Gemini 1.5 Pro",
|
|
92
|
+
provider: "google",
|
|
93
|
+
inputCostPer1M: 3.5,
|
|
94
|
+
outputCostPer1M: 10.5,
|
|
95
|
+
contextWindow: 1e6
|
|
96
|
+
},
|
|
97
|
+
"gemini-1.5-flash": {
|
|
98
|
+
label: "Gemini 1.5 Flash",
|
|
99
|
+
provider: "google",
|
|
100
|
+
inputCostPer1M: 0.35,
|
|
101
|
+
outputCostPer1M: 1.05,
|
|
102
|
+
contextWindow: 1e6
|
|
103
|
+
},
|
|
104
|
+
"gemini-2.0-flash": {
|
|
105
|
+
label: "Gemini 2.0 Flash",
|
|
106
|
+
provider: "google",
|
|
107
|
+
inputCostPer1M: 0.1,
|
|
108
|
+
outputCostPer1M: 0.4,
|
|
109
|
+
contextWindow: 1e6
|
|
110
|
+
},
|
|
111
|
+
// Meta
|
|
112
|
+
"llama-3.1-8b": {
|
|
113
|
+
label: "Llama 3.1 8B",
|
|
114
|
+
provider: "meta",
|
|
115
|
+
inputCostPer1M: 0.18,
|
|
116
|
+
outputCostPer1M: 0.18,
|
|
117
|
+
contextWindow: 131072
|
|
118
|
+
},
|
|
119
|
+
"llama-3.1-70b": {
|
|
120
|
+
label: "Llama 3.1 70B",
|
|
121
|
+
provider: "meta",
|
|
122
|
+
inputCostPer1M: 0.88,
|
|
123
|
+
outputCostPer1M: 0.88,
|
|
124
|
+
contextWindow: 131072
|
|
125
|
+
}
|
|
126
|
+
};
|
|
127
|
+
var DEFAULT_CHARS_PER_TOKEN = 3.8;
|
|
128
|
+
function estimateTokens(text, charsPerToken = DEFAULT_CHARS_PER_TOKEN) {
|
|
129
|
+
if (!text) return 0;
|
|
130
|
+
return Math.round(text.length / charsPerToken);
|
|
131
|
+
}
|
|
132
|
+
function calcCost(tokens, model, type = "input") {
|
|
133
|
+
const rate = type === "input" ? model.inputCostPer1M : model.outputCostPer1M;
|
|
134
|
+
return tokens / 1e6 * rate;
|
|
135
|
+
}
|
|
136
|
+
function countWords(text) {
|
|
137
|
+
return text.trim() ? text.trim().split(/\s+/).length : 0;
|
|
138
|
+
}
|
|
139
|
+
function countSentences(text) {
|
|
140
|
+
if (!text.trim()) return 0;
|
|
141
|
+
const matches = text.match(/[.!?]+[\s\n]/g);
|
|
142
|
+
return matches ? matches.length + 1 : 1;
|
|
143
|
+
}
|
|
144
|
+
function countParagraphs(text) {
|
|
145
|
+
if (!text.trim()) return 0;
|
|
146
|
+
return text.split(/\n\s*\n/).filter((p) => p.trim().length > 0).length;
|
|
147
|
+
}
|
|
148
|
+
function resolveModel(model) {
|
|
149
|
+
if (!model) return MODELS["claude-sonnet-4"];
|
|
150
|
+
if (typeof model === "object") return model;
|
|
151
|
+
return MODELS[model] ?? MODELS["claude-sonnet-4"];
|
|
152
|
+
}
|
|
153
|
+
function getStats(text, options = {}) {
|
|
154
|
+
const model = resolveModel(options.model);
|
|
155
|
+
const tokens = estimateTokens(text, options.charsPerToken);
|
|
156
|
+
const inputCost = calcCost(tokens, model, "input");
|
|
157
|
+
const contextUsage = Math.min(tokens / model.contextWindow, 1);
|
|
158
|
+
const tokensRemaining = Math.max(model.contextWindow - tokens, 0);
|
|
159
|
+
return {
|
|
160
|
+
tokens,
|
|
161
|
+
chars: text.length,
|
|
162
|
+
words: countWords(text),
|
|
163
|
+
sentences: countSentences(text),
|
|
164
|
+
paragraphs: countParagraphs(text),
|
|
165
|
+
inputCost,
|
|
166
|
+
contextUsage,
|
|
167
|
+
contextUsagePct: (contextUsage * 100).toFixed(2) + "%",
|
|
168
|
+
withinLimit: tokens <= model.contextWindow,
|
|
169
|
+
tokensRemaining
|
|
170
|
+
};
|
|
171
|
+
}
|
|
172
|
+
function createWatcher(options = {}) {
|
|
173
|
+
let _lastText = "";
|
|
174
|
+
let _lastStats = null;
|
|
175
|
+
return {
|
|
176
|
+
update(text) {
|
|
177
|
+
if (text === _lastText && _lastStats) return _lastStats;
|
|
178
|
+
_lastText = text;
|
|
179
|
+
_lastStats = getStats(text, options);
|
|
180
|
+
return _lastStats;
|
|
181
|
+
},
|
|
182
|
+
reset() {
|
|
183
|
+
_lastText = "";
|
|
184
|
+
_lastStats = null;
|
|
185
|
+
},
|
|
186
|
+
destroy() {
|
|
187
|
+
_lastText = "";
|
|
188
|
+
_lastStats = null;
|
|
189
|
+
}
|
|
190
|
+
};
|
|
191
|
+
}
|
|
192
|
+
function formatCost(cost) {
|
|
193
|
+
if (cost === 0) return "$0.00";
|
|
194
|
+
if (cost < 1e-4) return "$" + cost.toFixed(6);
|
|
195
|
+
if (cost < 0.01) return "$" + cost.toFixed(4);
|
|
196
|
+
return "$" + cost.toFixed(2);
|
|
197
|
+
}
|
|
198
|
+
function getContextSeverity(usage) {
|
|
199
|
+
if (usage >= 0.9) return "danger";
|
|
200
|
+
if (usage >= 0.6) return "warning";
|
|
201
|
+
return "ok";
|
|
202
|
+
}
|
|
203
|
+
var TokenLens = {
|
|
204
|
+
getStats,
|
|
205
|
+
estimateTokens,
|
|
206
|
+
calcCost,
|
|
207
|
+
formatCost,
|
|
208
|
+
countWords,
|
|
209
|
+
countSentences,
|
|
210
|
+
countParagraphs,
|
|
211
|
+
createWatcher,
|
|
212
|
+
resolveModel,
|
|
213
|
+
getContextSeverity,
|
|
214
|
+
MODELS
|
|
215
|
+
};
|
|
216
|
+
var src_default = TokenLens;
|
|
217
|
+
// Annotate the CommonJS export names for ESM import in node:
|
|
218
|
+
0 && (module.exports = {
|
|
219
|
+
MODELS,
|
|
220
|
+
calcCost,
|
|
221
|
+
countParagraphs,
|
|
222
|
+
countSentences,
|
|
223
|
+
countWords,
|
|
224
|
+
createWatcher,
|
|
225
|
+
estimateTokens,
|
|
226
|
+
formatCost,
|
|
227
|
+
getContextSeverity,
|
|
228
|
+
getStats,
|
|
229
|
+
resolveModel
|
|
230
|
+
});
|
|
231
|
+
//# sourceMappingURL=index.js.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"sources":["../src/index.ts"],"sourcesContent":["// ─────────────────────────────────────────────────────────────────────────────\n// TokenLens core — token estimation + cost calculation for LLM prompts\n// ─────────────────────────────────────────────────────────────────────────────\n\n// ── Types ────────────────────────────────────────────────────────────────────\n\nexport interface ModelConfig {\n /** Human-readable label */\n label: string;\n /** Provider name */\n provider: \"anthropic\" | \"openai\" | \"google\" | \"meta\" | \"custom\";\n /** Input cost in USD per 1 million tokens */\n inputCostPer1M: number;\n /** Output cost in USD per 1 million tokens */\n outputCostPer1M: number;\n /** Maximum context window in tokens */\n contextWindow: number;\n}\n\nexport interface TokenStats {\n /** Estimated token count */\n tokens: number;\n /** Raw character count */\n chars: number;\n /** Word count */\n words: number;\n /** Sentence count */\n sentences: number;\n /** Paragraph count */\n paragraphs: number;\n /** Estimated input cost in USD */\n inputCost: number;\n /** Context usage as a fraction (0–1) */\n contextUsage: number;\n /** Context usage as a percentage string e.g. \"12.34%\" */\n contextUsagePct: string;\n /** Whether prompt is within context limits */\n withinLimit: boolean;\n /** Tokens remaining before hitting the context limit */\n tokensRemaining: number;\n}\n\nexport interface TokenLensOptions {\n /** Model ID from MODELS or a custom ModelConfig */\n model?: string | ModelConfig;\n /** Override the chars-per-token ratio (default: 3.8) */\n charsPerToken?: number;\n}\n\nexport type ModelId = keyof typeof MODELS;\n\n// ── Model registry ────────────────────────────────────────────────────────────\n\nexport const MODELS: Record<string, ModelConfig> = {\n // Anthropic\n \"claude-sonnet-4\": {\n label: \"Claude Sonnet 4\",\n provider: \"anthropic\",\n inputCostPer1M: 3,\n outputCostPer1M: 15,\n contextWindow: 200_000,\n },\n \"claude-opus-4\": {\n label: \"Claude Opus 4\",\n provider: \"anthropic\",\n inputCostPer1M: 15,\n outputCostPer1M: 75,\n contextWindow: 200_000,\n },\n \"claude-haiku-4\": {\n label: \"Claude Haiku 4\",\n provider: \"anthropic\",\n inputCostPer1M: 0.8,\n outputCostPer1M: 4,\n contextWindow: 200_000,\n },\n // OpenAI\n \"gpt-4o\": {\n label: \"GPT-4o\",\n provider: \"openai\",\n inputCostPer1M: 2.5,\n outputCostPer1M: 10,\n contextWindow: 128_000,\n },\n \"gpt-4o-mini\": {\n label: \"GPT-4o mini\",\n provider: \"openai\",\n inputCostPer1M: 0.15,\n outputCostPer1M: 0.6,\n contextWindow: 128_000,\n },\n \"o1\": {\n label: \"o1\",\n provider: \"openai\",\n inputCostPer1M: 15,\n outputCostPer1M: 60,\n contextWindow: 200_000,\n },\n \"o3-mini\": {\n label: \"o3-mini\",\n provider: \"openai\",\n inputCostPer1M: 1.1,\n outputCostPer1M: 4.4,\n contextWindow: 200_000,\n },\n // Google\n \"gemini-1.5-pro\": {\n label: \"Gemini 1.5 Pro\",\n provider: \"google\",\n inputCostPer1M: 3.5,\n outputCostPer1M: 10.5,\n contextWindow: 1_000_000,\n },\n \"gemini-1.5-flash\": {\n label: \"Gemini 1.5 Flash\",\n provider: \"google\",\n inputCostPer1M: 0.35,\n outputCostPer1M: 1.05,\n contextWindow: 1_000_000,\n },\n \"gemini-2.0-flash\": {\n label: \"Gemini 2.0 Flash\",\n provider: \"google\",\n inputCostPer1M: 0.1,\n outputCostPer1M: 0.4,\n contextWindow: 1_000_000,\n },\n // Meta\n \"llama-3.1-8b\": {\n label: \"Llama 3.1 8B\",\n provider: \"meta\",\n inputCostPer1M: 0.18,\n outputCostPer1M: 0.18,\n contextWindow: 131_072,\n },\n \"llama-3.1-70b\": {\n label: \"Llama 3.1 70B\",\n provider: \"meta\",\n inputCostPer1M: 0.88,\n outputCostPer1M: 0.88,\n contextWindow: 131_072,\n },\n};\n\n// ── Core estimation ───────────────────────────────────────────────────────────\n\nconst DEFAULT_CHARS_PER_TOKEN = 3.8;\n\n/**\n * Estimate token count from a string.\n *\n * Uses a character-ratio heuristic (default 3.8 chars/token) which is accurate\n * to within ~5% for English prose. Code and non-Latin scripts may vary.\n *\n * @example\n * estimateTokens(\"Hello, world!\") // → 3\n */\nexport function estimateTokens(\n text: string,\n charsPerToken = DEFAULT_CHARS_PER_TOKEN\n): number {\n if (!text) return 0;\n return Math.round(text.length / charsPerToken);\n}\n\n/**\n * Calculate the USD cost for a given token count and model.\n *\n * @example\n * calcCost(1000, MODELS[\"claude-sonnet-4\"], \"input\") // → 0.000003\n */\nexport function calcCost(\n tokens: number,\n model: ModelConfig,\n type: \"input\" | \"output\" = \"input\"\n): number {\n const rate =\n type === \"input\" ? model.inputCostPer1M : model.outputCostPer1M;\n return (tokens / 1_000_000) * rate;\n}\n\n/**\n * Count words in a string.\n */\nexport function countWords(text: string): number {\n return text.trim() ? text.trim().split(/\\s+/).length : 0;\n}\n\n/**\n * Count sentences in a string.\n */\nexport function countSentences(text: string): number {\n if (!text.trim()) return 0;\n const matches = text.match(/[.!?]+[\\s\\n]/g);\n return matches ? matches.length + 1 : 1;\n}\n\n/**\n * Count paragraphs (blocks separated by blank lines).\n */\nexport function countParagraphs(text: string): number {\n if (!text.trim()) return 0;\n return text\n .split(/\\n\\s*\\n/)\n .filter((p) => p.trim().length > 0).length;\n}\n\n/**\n * Resolve a model ID string or ModelConfig object into a ModelConfig.\n * Falls back to claude-sonnet-4 if the model ID is unknown.\n */\nexport function resolveModel(model?: string | ModelConfig): ModelConfig {\n if (!model) return MODELS[\"claude-sonnet-4\"];\n if (typeof model === \"object\") return model;\n return MODELS[model] ?? MODELS[\"claude-sonnet-4\"];\n}\n\n// ── Main API ──────────────────────────────────────────────────────────────────\n\n/**\n * Compute full token statistics for a prompt string.\n *\n * @example\n * const stats = getStats(\"Write me a poem about the sea\", { model: \"gpt-4o\" });\n * console.log(stats.tokens); // 8\n * console.log(stats.inputCost); // 0.00000002\n * console.log(stats.contextUsagePct); // \"0.01%\"\n */\nexport function getStats(\n text: string,\n options: TokenLensOptions = {}\n): TokenStats {\n const model = resolveModel(options.model);\n const tokens = estimateTokens(text, options.charsPerToken);\n const inputCost = calcCost(tokens, model, \"input\");\n const contextUsage = Math.min(tokens / model.contextWindow, 1);\n const tokensRemaining = Math.max(model.contextWindow - tokens, 0);\n\n return {\n tokens,\n chars: text.length,\n words: countWords(text),\n sentences: countSentences(text),\n paragraphs: countParagraphs(text),\n inputCost,\n contextUsage,\n contextUsagePct: (contextUsage * 100).toFixed(2) + \"%\",\n withinLimit: tokens <= model.contextWindow,\n tokensRemaining,\n };\n}\n\n/**\n * Create a stateful watcher that fires a callback on every text change.\n * Useful for attaching to textarea `input` events.\n *\n * @example\n * const watcher = createWatcher({ model: \"claude-sonnet-4\" });\n * textarea.addEventListener(\"input\", (e) => {\n * const stats = watcher.update(e.target.value);\n * console.log(stats.tokens, stats.inputCost);\n * });\n * watcher.destroy(); // cleanup\n */\nexport function createWatcher(options: TokenLensOptions = {}) {\n let _lastText = \"\";\n let _lastStats: TokenStats | null = null;\n\n return {\n update(text: string): TokenStats {\n if (text === _lastText && _lastStats) return _lastStats;\n _lastText = text;\n _lastStats = getStats(text, options);\n return _lastStats;\n },\n reset() {\n _lastText = \"\";\n _lastStats = null;\n },\n destroy() {\n _lastText = \"\";\n _lastStats = null;\n },\n };\n}\n\n/**\n * Format a cost value as a human-readable USD string.\n *\n * @example\n * formatCost(0.000003) // \"$0.000003\"\n * formatCost(0.123456) // \"$0.1235\"\n * formatCost(1.5) // \"$1.50\"\n */\nexport function formatCost(cost: number): string {\n if (cost === 0) return \"$0.00\";\n if (cost < 0.0001) return \"$\" + cost.toFixed(6);\n if (cost < 0.01) return \"$\" + cost.toFixed(4);\n return \"$\" + cost.toFixed(2);\n}\n\n/**\n * Get a severity level based on context window usage.\n * Useful for colour-coding a progress bar.\n */\nexport function getContextSeverity(\n usage: number\n): \"ok\" | \"warning\" | \"danger\" {\n if (usage >= 0.9) return \"danger\";\n if (usage >= 0.6) return \"warning\";\n return \"ok\";\n}\n\n// ── Default export (convenience object) ──────────────────────────────────────\n\nconst TokenLens = {\n getStats,\n estimateTokens,\n calcCost,\n formatCost,\n countWords,\n countSentences,\n countParagraphs,\n createWatcher,\n resolveModel,\n getContextSeverity,\n MODELS,\n};\n\nexport default TokenLens;\n"],"mappings":";;;;;;;;;;;;;;;;;;;;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAqDO,IAAM,SAAsC;AAAA;AAAA,EAEjD,mBAAmB;AAAA,IACjB,OAAO;AAAA,IACP,UAAU;AAAA,IACV,gBAAgB;AAAA,IAChB,iBAAiB;AAAA,IACjB,eAAe;AAAA,EACjB;AAAA,EACA,iBAAiB;AAAA,IACf,OAAO;AAAA,IACP,UAAU;AAAA,IACV,gBAAgB;AAAA,IAChB,iBAAiB;AAAA,IACjB,eAAe;AAAA,EACjB;AAAA,EACA,kBAAkB;AAAA,IAChB,OAAO;AAAA,IACP,UAAU;AAAA,IACV,gBAAgB;AAAA,IAChB,iBAAiB;AAAA,IACjB,eAAe;AAAA,EACjB;AAAA;AAAA,EAEA,UAAU;AAAA,IACR,OAAO;AAAA,IACP,UAAU;AAAA,IACV,gBAAgB;AAAA,IAChB,iBAAiB;AAAA,IACjB,eAAe;AAAA,EACjB;AAAA,EACA,eAAe;AAAA,IACb,OAAO;AAAA,IACP,UAAU;AAAA,IACV,gBAAgB;AAAA,IAChB,iBAAiB;AAAA,IACjB,eAAe;AAAA,EACjB;AAAA,EACA,MAAM;AAAA,IACJ,OAAO;AAAA,IACP,UAAU;AAAA,IACV,gBAAgB;AAAA,IAChB,iBAAiB;AAAA,IACjB,eAAe;AAAA,EACjB;AAAA,EACA,WAAW;AAAA,IACT,OAAO;AAAA,IACP,UAAU;AAAA,IACV,gBAAgB;AAAA,IAChB,iBAAiB;AAAA,IACjB,eAAe;AAAA,EACjB;AAAA;AAAA,EAEA,kBAAkB;AAAA,IAChB,OAAO;AAAA,IACP,UAAU;AAAA,IACV,gBAAgB;AAAA,IAChB,iBAAiB;AAAA,IACjB,eAAe;AAAA,EACjB;AAAA,EACA,oBAAoB;AAAA,IAClB,OAAO;AAAA,IACP,UAAU;AAAA,IACV,gBAAgB;AAAA,IAChB,iBAAiB;AAAA,IACjB,eAAe;AAAA,EACjB;AAAA,EACA,oBAAoB;AAAA,IAClB,OAAO;AAAA,IACP,UAAU;AAAA,IACV,gBAAgB;AAAA,IAChB,iBAAiB;AAAA,IACjB,eAAe;AAAA,EACjB;AAAA;AAAA,EAEA,gBAAgB;AAAA,IACd,OAAO;AAAA,IACP,UAAU;AAAA,IACV,gBAAgB;AAAA,IAChB,iBAAiB;AAAA,IACjB,eAAe;AAAA,EACjB;AAAA,EACA,iBAAiB;AAAA,IACf,OAAO;AAAA,IACP,UAAU;AAAA,IACV,gBAAgB;AAAA,IAChB,iBAAiB;AAAA,IACjB,eAAe;AAAA,EACjB;AACF;AAIA,IAAM,0BAA0B;AAWzB,SAAS,eACd,MACA,gBAAgB,yBACR;AACR,MAAI,CAAC,KAAM,QAAO;AAClB,SAAO,KAAK,MAAM,KAAK,SAAS,aAAa;AAC/C;AAQO,SAAS,SACd,QACA,OACA,OAA2B,SACnB;AACR,QAAM,OACJ,SAAS,UAAU,MAAM,iBAAiB,MAAM;AAClD,SAAQ,SAAS,MAAa;AAChC;AAKO,SAAS,WAAW,MAAsB;AAC/C,SAAO,KAAK,KAAK,IAAI,KAAK,KAAK,EAAE,MAAM,KAAK,EAAE,SAAS;AACzD;AAKO,SAAS,eAAe,MAAsB;AACnD,MAAI,CAAC,KAAK,KAAK,EAAG,QAAO;AACzB,QAAM,UAAU,KAAK,MAAM,eAAe;AAC1C,SAAO,UAAU,QAAQ,SAAS,IAAI;AACxC;AAKO,SAAS,gBAAgB,MAAsB;AACpD,MAAI,CAAC,KAAK,KAAK,EAAG,QAAO;AACzB,SAAO,KACJ,MAAM,SAAS,EACf,OAAO,CAAC,MAAM,EAAE,KAAK,EAAE,SAAS,CAAC,EAAE;AACxC;AAMO,SAAS,aAAa,OAA2C;AACtE,MAAI,CAAC,MAAO,QAAO,OAAO,iBAAiB;AAC3C,MAAI,OAAO,UAAU,SAAU,QAAO;AACtC,SAAO,OAAO,KAAK,KAAK,OAAO,iBAAiB;AAClD;AAaO,SAAS,SACd,MACA,UAA4B,CAAC,GACjB;AACZ,QAAM,QAAQ,aAAa,QAAQ,KAAK;AACxC,QAAM,SAAS,eAAe,MAAM,QAAQ,aAAa;AACzD,QAAM,YAAY,SAAS,QAAQ,OAAO,OAAO;AACjD,QAAM,eAAe,KAAK,IAAI,SAAS,MAAM,eAAe,CAAC;AAC7D,QAAM,kBAAkB,KAAK,IAAI,MAAM,gBAAgB,QAAQ,CAAC;AAEhE,SAAO;AAAA,IACL;AAAA,IACA,OAAO,KAAK;AAAA,IACZ,OAAO,WAAW,IAAI;AAAA,IACtB,WAAW,eAAe,IAAI;AAAA,IAC9B,YAAY,gBAAgB,IAAI;AAAA,IAChC;AAAA,IACA;AAAA,IACA,kBAAkB,eAAe,KAAK,QAAQ,CAAC,IAAI;AAAA,IACnD,aAAa,UAAU,MAAM;AAAA,IAC7B;AAAA,EACF;AACF;AAcO,SAAS,cAAc,UAA4B,CAAC,GAAG;AAC5D,MAAI,YAAY;AAChB,MAAI,aAAgC;AAEpC,SAAO;AAAA,IACL,OAAO,MAA0B;AAC/B,UAAI,SAAS,aAAa,WAAY,QAAO;AAC7C,kBAAY;AACZ,mBAAa,SAAS,MAAM,OAAO;AACnC,aAAO;AAAA,IACT;AAAA,IACA,QAAQ;AACN,kBAAY;AACZ,mBAAa;AAAA,IACf;AAAA,IACA,UAAU;AACR,kBAAY;AACZ,mBAAa;AAAA,IACf;AAAA,EACF;AACF;AAUO,SAAS,WAAW,MAAsB;AAC/C,MAAI,SAAS,EAAG,QAAO;AACvB,MAAI,OAAO,KAAQ,QAAO,MAAM,KAAK,QAAQ,CAAC;AAC9C,MAAI,OAAO,KAAM,QAAO,MAAM,KAAK,QAAQ,CAAC;AAC5C,SAAO,MAAM,KAAK,QAAQ,CAAC;AAC7B;AAMO,SAAS,mBACd,OAC6B;AAC7B,MAAI,SAAS,IAAK,QAAO;AACzB,MAAI,SAAS,IAAK,QAAO;AACzB,SAAO;AACT;AAIA,IAAM,YAAY;AAAA,EAChB;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AACF;AAEA,IAAO,cAAQ;","names":[]}
|
package/dist/index.mjs
ADDED
|
@@ -0,0 +1,196 @@
|
|
|
1
|
+
// src/index.ts
|
|
2
|
+
var MODELS = {
|
|
3
|
+
// Anthropic
|
|
4
|
+
"claude-sonnet-4": {
|
|
5
|
+
label: "Claude Sonnet 4",
|
|
6
|
+
provider: "anthropic",
|
|
7
|
+
inputCostPer1M: 3,
|
|
8
|
+
outputCostPer1M: 15,
|
|
9
|
+
contextWindow: 2e5
|
|
10
|
+
},
|
|
11
|
+
"claude-opus-4": {
|
|
12
|
+
label: "Claude Opus 4",
|
|
13
|
+
provider: "anthropic",
|
|
14
|
+
inputCostPer1M: 15,
|
|
15
|
+
outputCostPer1M: 75,
|
|
16
|
+
contextWindow: 2e5
|
|
17
|
+
},
|
|
18
|
+
"claude-haiku-4": {
|
|
19
|
+
label: "Claude Haiku 4",
|
|
20
|
+
provider: "anthropic",
|
|
21
|
+
inputCostPer1M: 0.8,
|
|
22
|
+
outputCostPer1M: 4,
|
|
23
|
+
contextWindow: 2e5
|
|
24
|
+
},
|
|
25
|
+
// OpenAI
|
|
26
|
+
"gpt-4o": {
|
|
27
|
+
label: "GPT-4o",
|
|
28
|
+
provider: "openai",
|
|
29
|
+
inputCostPer1M: 2.5,
|
|
30
|
+
outputCostPer1M: 10,
|
|
31
|
+
contextWindow: 128e3
|
|
32
|
+
},
|
|
33
|
+
"gpt-4o-mini": {
|
|
34
|
+
label: "GPT-4o mini",
|
|
35
|
+
provider: "openai",
|
|
36
|
+
inputCostPer1M: 0.15,
|
|
37
|
+
outputCostPer1M: 0.6,
|
|
38
|
+
contextWindow: 128e3
|
|
39
|
+
},
|
|
40
|
+
"o1": {
|
|
41
|
+
label: "o1",
|
|
42
|
+
provider: "openai",
|
|
43
|
+
inputCostPer1M: 15,
|
|
44
|
+
outputCostPer1M: 60,
|
|
45
|
+
contextWindow: 2e5
|
|
46
|
+
},
|
|
47
|
+
"o3-mini": {
|
|
48
|
+
label: "o3-mini",
|
|
49
|
+
provider: "openai",
|
|
50
|
+
inputCostPer1M: 1.1,
|
|
51
|
+
outputCostPer1M: 4.4,
|
|
52
|
+
contextWindow: 2e5
|
|
53
|
+
},
|
|
54
|
+
// Google
|
|
55
|
+
"gemini-1.5-pro": {
|
|
56
|
+
label: "Gemini 1.5 Pro",
|
|
57
|
+
provider: "google",
|
|
58
|
+
inputCostPer1M: 3.5,
|
|
59
|
+
outputCostPer1M: 10.5,
|
|
60
|
+
contextWindow: 1e6
|
|
61
|
+
},
|
|
62
|
+
"gemini-1.5-flash": {
|
|
63
|
+
label: "Gemini 1.5 Flash",
|
|
64
|
+
provider: "google",
|
|
65
|
+
inputCostPer1M: 0.35,
|
|
66
|
+
outputCostPer1M: 1.05,
|
|
67
|
+
contextWindow: 1e6
|
|
68
|
+
},
|
|
69
|
+
"gemini-2.0-flash": {
|
|
70
|
+
label: "Gemini 2.0 Flash",
|
|
71
|
+
provider: "google",
|
|
72
|
+
inputCostPer1M: 0.1,
|
|
73
|
+
outputCostPer1M: 0.4,
|
|
74
|
+
contextWindow: 1e6
|
|
75
|
+
},
|
|
76
|
+
// Meta
|
|
77
|
+
"llama-3.1-8b": {
|
|
78
|
+
label: "Llama 3.1 8B",
|
|
79
|
+
provider: "meta",
|
|
80
|
+
inputCostPer1M: 0.18,
|
|
81
|
+
outputCostPer1M: 0.18,
|
|
82
|
+
contextWindow: 131072
|
|
83
|
+
},
|
|
84
|
+
"llama-3.1-70b": {
|
|
85
|
+
label: "Llama 3.1 70B",
|
|
86
|
+
provider: "meta",
|
|
87
|
+
inputCostPer1M: 0.88,
|
|
88
|
+
outputCostPer1M: 0.88,
|
|
89
|
+
contextWindow: 131072
|
|
90
|
+
}
|
|
91
|
+
};
|
|
92
|
+
var DEFAULT_CHARS_PER_TOKEN = 3.8;
|
|
93
|
+
function estimateTokens(text, charsPerToken = DEFAULT_CHARS_PER_TOKEN) {
|
|
94
|
+
if (!text) return 0;
|
|
95
|
+
return Math.round(text.length / charsPerToken);
|
|
96
|
+
}
|
|
97
|
+
function calcCost(tokens, model, type = "input") {
|
|
98
|
+
const rate = type === "input" ? model.inputCostPer1M : model.outputCostPer1M;
|
|
99
|
+
return tokens / 1e6 * rate;
|
|
100
|
+
}
|
|
101
|
+
function countWords(text) {
|
|
102
|
+
return text.trim() ? text.trim().split(/\s+/).length : 0;
|
|
103
|
+
}
|
|
104
|
+
function countSentences(text) {
|
|
105
|
+
if (!text.trim()) return 0;
|
|
106
|
+
const matches = text.match(/[.!?]+[\s\n]/g);
|
|
107
|
+
return matches ? matches.length + 1 : 1;
|
|
108
|
+
}
|
|
109
|
+
function countParagraphs(text) {
|
|
110
|
+
if (!text.trim()) return 0;
|
|
111
|
+
return text.split(/\n\s*\n/).filter((p) => p.trim().length > 0).length;
|
|
112
|
+
}
|
|
113
|
+
function resolveModel(model) {
|
|
114
|
+
if (!model) return MODELS["claude-sonnet-4"];
|
|
115
|
+
if (typeof model === "object") return model;
|
|
116
|
+
return MODELS[model] ?? MODELS["claude-sonnet-4"];
|
|
117
|
+
}
|
|
118
|
+
function getStats(text, options = {}) {
|
|
119
|
+
const model = resolveModel(options.model);
|
|
120
|
+
const tokens = estimateTokens(text, options.charsPerToken);
|
|
121
|
+
const inputCost = calcCost(tokens, model, "input");
|
|
122
|
+
const contextUsage = Math.min(tokens / model.contextWindow, 1);
|
|
123
|
+
const tokensRemaining = Math.max(model.contextWindow - tokens, 0);
|
|
124
|
+
return {
|
|
125
|
+
tokens,
|
|
126
|
+
chars: text.length,
|
|
127
|
+
words: countWords(text),
|
|
128
|
+
sentences: countSentences(text),
|
|
129
|
+
paragraphs: countParagraphs(text),
|
|
130
|
+
inputCost,
|
|
131
|
+
contextUsage,
|
|
132
|
+
contextUsagePct: (contextUsage * 100).toFixed(2) + "%",
|
|
133
|
+
withinLimit: tokens <= model.contextWindow,
|
|
134
|
+
tokensRemaining
|
|
135
|
+
};
|
|
136
|
+
}
|
|
137
|
+
function createWatcher(options = {}) {
|
|
138
|
+
let _lastText = "";
|
|
139
|
+
let _lastStats = null;
|
|
140
|
+
return {
|
|
141
|
+
update(text) {
|
|
142
|
+
if (text === _lastText && _lastStats) return _lastStats;
|
|
143
|
+
_lastText = text;
|
|
144
|
+
_lastStats = getStats(text, options);
|
|
145
|
+
return _lastStats;
|
|
146
|
+
},
|
|
147
|
+
reset() {
|
|
148
|
+
_lastText = "";
|
|
149
|
+
_lastStats = null;
|
|
150
|
+
},
|
|
151
|
+
destroy() {
|
|
152
|
+
_lastText = "";
|
|
153
|
+
_lastStats = null;
|
|
154
|
+
}
|
|
155
|
+
};
|
|
156
|
+
}
|
|
157
|
+
function formatCost(cost) {
|
|
158
|
+
if (cost === 0) return "$0.00";
|
|
159
|
+
if (cost < 1e-4) return "$" + cost.toFixed(6);
|
|
160
|
+
if (cost < 0.01) return "$" + cost.toFixed(4);
|
|
161
|
+
return "$" + cost.toFixed(2);
|
|
162
|
+
}
|
|
163
|
+
function getContextSeverity(usage) {
|
|
164
|
+
if (usage >= 0.9) return "danger";
|
|
165
|
+
if (usage >= 0.6) return "warning";
|
|
166
|
+
return "ok";
|
|
167
|
+
}
|
|
168
|
+
var TokenLens = {
|
|
169
|
+
getStats,
|
|
170
|
+
estimateTokens,
|
|
171
|
+
calcCost,
|
|
172
|
+
formatCost,
|
|
173
|
+
countWords,
|
|
174
|
+
countSentences,
|
|
175
|
+
countParagraphs,
|
|
176
|
+
createWatcher,
|
|
177
|
+
resolveModel,
|
|
178
|
+
getContextSeverity,
|
|
179
|
+
MODELS
|
|
180
|
+
};
|
|
181
|
+
var src_default = TokenLens;
|
|
182
|
+
export {
|
|
183
|
+
MODELS,
|
|
184
|
+
calcCost,
|
|
185
|
+
countParagraphs,
|
|
186
|
+
countSentences,
|
|
187
|
+
countWords,
|
|
188
|
+
createWatcher,
|
|
189
|
+
src_default as default,
|
|
190
|
+
estimateTokens,
|
|
191
|
+
formatCost,
|
|
192
|
+
getContextSeverity,
|
|
193
|
+
getStats,
|
|
194
|
+
resolveModel
|
|
195
|
+
};
|
|
196
|
+
//# sourceMappingURL=index.mjs.map
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
{"version":3,"sources":["../src/index.ts"],"sourcesContent":["// ─────────────────────────────────────────────────────────────────────────────\n// TokenLens core — token estimation + cost calculation for LLM prompts\n// ─────────────────────────────────────────────────────────────────────────────\n\n// ── Types ────────────────────────────────────────────────────────────────────\n\nexport interface ModelConfig {\n /** Human-readable label */\n label: string;\n /** Provider name */\n provider: \"anthropic\" | \"openai\" | \"google\" | \"meta\" | \"custom\";\n /** Input cost in USD per 1 million tokens */\n inputCostPer1M: number;\n /** Output cost in USD per 1 million tokens */\n outputCostPer1M: number;\n /** Maximum context window in tokens */\n contextWindow: number;\n}\n\nexport interface TokenStats {\n /** Estimated token count */\n tokens: number;\n /** Raw character count */\n chars: number;\n /** Word count */\n words: number;\n /** Sentence count */\n sentences: number;\n /** Paragraph count */\n paragraphs: number;\n /** Estimated input cost in USD */\n inputCost: number;\n /** Context usage as a fraction (0–1) */\n contextUsage: number;\n /** Context usage as a percentage string e.g. \"12.34%\" */\n contextUsagePct: string;\n /** Whether prompt is within context limits */\n withinLimit: boolean;\n /** Tokens remaining before hitting the context limit */\n tokensRemaining: number;\n}\n\nexport interface TokenLensOptions {\n /** Model ID from MODELS or a custom ModelConfig */\n model?: string | ModelConfig;\n /** Override the chars-per-token ratio (default: 3.8) */\n charsPerToken?: number;\n}\n\nexport type ModelId = keyof typeof MODELS;\n\n// ── Model registry ────────────────────────────────────────────────────────────\n\nexport const MODELS: Record<string, ModelConfig> = {\n // Anthropic\n \"claude-sonnet-4\": {\n label: \"Claude Sonnet 4\",\n provider: \"anthropic\",\n inputCostPer1M: 3,\n outputCostPer1M: 15,\n contextWindow: 200_000,\n },\n \"claude-opus-4\": {\n label: \"Claude Opus 4\",\n provider: \"anthropic\",\n inputCostPer1M: 15,\n outputCostPer1M: 75,\n contextWindow: 200_000,\n },\n \"claude-haiku-4\": {\n label: \"Claude Haiku 4\",\n provider: \"anthropic\",\n inputCostPer1M: 0.8,\n outputCostPer1M: 4,\n contextWindow: 200_000,\n },\n // OpenAI\n \"gpt-4o\": {\n label: \"GPT-4o\",\n provider: \"openai\",\n inputCostPer1M: 2.5,\n outputCostPer1M: 10,\n contextWindow: 128_000,\n },\n \"gpt-4o-mini\": {\n label: \"GPT-4o mini\",\n provider: \"openai\",\n inputCostPer1M: 0.15,\n outputCostPer1M: 0.6,\n contextWindow: 128_000,\n },\n \"o1\": {\n label: \"o1\",\n provider: \"openai\",\n inputCostPer1M: 15,\n outputCostPer1M: 60,\n contextWindow: 200_000,\n },\n \"o3-mini\": {\n label: \"o3-mini\",\n provider: \"openai\",\n inputCostPer1M: 1.1,\n outputCostPer1M: 4.4,\n contextWindow: 200_000,\n },\n // Google\n \"gemini-1.5-pro\": {\n label: \"Gemini 1.5 Pro\",\n provider: \"google\",\n inputCostPer1M: 3.5,\n outputCostPer1M: 10.5,\n contextWindow: 1_000_000,\n },\n \"gemini-1.5-flash\": {\n label: \"Gemini 1.5 Flash\",\n provider: \"google\",\n inputCostPer1M: 0.35,\n outputCostPer1M: 1.05,\n contextWindow: 1_000_000,\n },\n \"gemini-2.0-flash\": {\n label: \"Gemini 2.0 Flash\",\n provider: \"google\",\n inputCostPer1M: 0.1,\n outputCostPer1M: 0.4,\n contextWindow: 1_000_000,\n },\n // Meta\n \"llama-3.1-8b\": {\n label: \"Llama 3.1 8B\",\n provider: \"meta\",\n inputCostPer1M: 0.18,\n outputCostPer1M: 0.18,\n contextWindow: 131_072,\n },\n \"llama-3.1-70b\": {\n label: \"Llama 3.1 70B\",\n provider: \"meta\",\n inputCostPer1M: 0.88,\n outputCostPer1M: 0.88,\n contextWindow: 131_072,\n },\n};\n\n// ── Core estimation ───────────────────────────────────────────────────────────\n\nconst DEFAULT_CHARS_PER_TOKEN = 3.8;\n\n/**\n * Estimate token count from a string.\n *\n * Uses a character-ratio heuristic (default 3.8 chars/token) which is accurate\n * to within ~5% for English prose. Code and non-Latin scripts may vary.\n *\n * @example\n * estimateTokens(\"Hello, world!\") // → 3\n */\nexport function estimateTokens(\n text: string,\n charsPerToken = DEFAULT_CHARS_PER_TOKEN\n): number {\n if (!text) return 0;\n return Math.round(text.length / charsPerToken);\n}\n\n/**\n * Calculate the USD cost for a given token count and model.\n *\n * @example\n * calcCost(1000, MODELS[\"claude-sonnet-4\"], \"input\") // → 0.000003\n */\nexport function calcCost(\n tokens: number,\n model: ModelConfig,\n type: \"input\" | \"output\" = \"input\"\n): number {\n const rate =\n type === \"input\" ? model.inputCostPer1M : model.outputCostPer1M;\n return (tokens / 1_000_000) * rate;\n}\n\n/**\n * Count words in a string.\n */\nexport function countWords(text: string): number {\n return text.trim() ? text.trim().split(/\\s+/).length : 0;\n}\n\n/**\n * Count sentences in a string.\n */\nexport function countSentences(text: string): number {\n if (!text.trim()) return 0;\n const matches = text.match(/[.!?]+[\\s\\n]/g);\n return matches ? matches.length + 1 : 1;\n}\n\n/**\n * Count paragraphs (blocks separated by blank lines).\n */\nexport function countParagraphs(text: string): number {\n if (!text.trim()) return 0;\n return text\n .split(/\\n\\s*\\n/)\n .filter((p) => p.trim().length > 0).length;\n}\n\n/**\n * Resolve a model ID string or ModelConfig object into a ModelConfig.\n * Falls back to claude-sonnet-4 if the model ID is unknown.\n */\nexport function resolveModel(model?: string | ModelConfig): ModelConfig {\n if (!model) return MODELS[\"claude-sonnet-4\"];\n if (typeof model === \"object\") return model;\n return MODELS[model] ?? MODELS[\"claude-sonnet-4\"];\n}\n\n// ── Main API ──────────────────────────────────────────────────────────────────\n\n/**\n * Compute full token statistics for a prompt string.\n *\n * @example\n * const stats = getStats(\"Write me a poem about the sea\", { model: \"gpt-4o\" });\n * console.log(stats.tokens); // 8\n * console.log(stats.inputCost); // 0.00000002\n * console.log(stats.contextUsagePct); // \"0.01%\"\n */\nexport function getStats(\n text: string,\n options: TokenLensOptions = {}\n): TokenStats {\n const model = resolveModel(options.model);\n const tokens = estimateTokens(text, options.charsPerToken);\n const inputCost = calcCost(tokens, model, \"input\");\n const contextUsage = Math.min(tokens / model.contextWindow, 1);\n const tokensRemaining = Math.max(model.contextWindow - tokens, 0);\n\n return {\n tokens,\n chars: text.length,\n words: countWords(text),\n sentences: countSentences(text),\n paragraphs: countParagraphs(text),\n inputCost,\n contextUsage,\n contextUsagePct: (contextUsage * 100).toFixed(2) + \"%\",\n withinLimit: tokens <= model.contextWindow,\n tokensRemaining,\n };\n}\n\n/**\n * Create a stateful watcher that fires a callback on every text change.\n * Useful for attaching to textarea `input` events.\n *\n * @example\n * const watcher = createWatcher({ model: \"claude-sonnet-4\" });\n * textarea.addEventListener(\"input\", (e) => {\n * const stats = watcher.update(e.target.value);\n * console.log(stats.tokens, stats.inputCost);\n * });\n * watcher.destroy(); // cleanup\n */\nexport function createWatcher(options: TokenLensOptions = {}) {\n let _lastText = \"\";\n let _lastStats: TokenStats | null = null;\n\n return {\n update(text: string): TokenStats {\n if (text === _lastText && _lastStats) return _lastStats;\n _lastText = text;\n _lastStats = getStats(text, options);\n return _lastStats;\n },\n reset() {\n _lastText = \"\";\n _lastStats = null;\n },\n destroy() {\n _lastText = \"\";\n _lastStats = null;\n },\n };\n}\n\n/**\n * Format a cost value as a human-readable USD string.\n *\n * @example\n * formatCost(0.000003) // \"$0.000003\"\n * formatCost(0.123456) // \"$0.1235\"\n * formatCost(1.5) // \"$1.50\"\n */\nexport function formatCost(cost: number): string {\n if (cost === 0) return \"$0.00\";\n if (cost < 0.0001) return \"$\" + cost.toFixed(6);\n if (cost < 0.01) return \"$\" + cost.toFixed(4);\n return \"$\" + cost.toFixed(2);\n}\n\n/**\n * Get a severity level based on context window usage.\n * Useful for colour-coding a progress bar.\n */\nexport function getContextSeverity(\n usage: number\n): \"ok\" | \"warning\" | \"danger\" {\n if (usage >= 0.9) return \"danger\";\n if (usage >= 0.6) return \"warning\";\n return \"ok\";\n}\n\n// ── Default export (convenience object) ──────────────────────────────────────\n\nconst TokenLens = {\n getStats,\n estimateTokens,\n calcCost,\n formatCost,\n countWords,\n countSentences,\n countParagraphs,\n createWatcher,\n resolveModel,\n getContextSeverity,\n MODELS,\n};\n\nexport default TokenLens;\n"],"mappings":";AAqDO,IAAM,SAAsC;AAAA;AAAA,EAEjD,mBAAmB;AAAA,IACjB,OAAO;AAAA,IACP,UAAU;AAAA,IACV,gBAAgB;AAAA,IAChB,iBAAiB;AAAA,IACjB,eAAe;AAAA,EACjB;AAAA,EACA,iBAAiB;AAAA,IACf,OAAO;AAAA,IACP,UAAU;AAAA,IACV,gBAAgB;AAAA,IAChB,iBAAiB;AAAA,IACjB,eAAe;AAAA,EACjB;AAAA,EACA,kBAAkB;AAAA,IAChB,OAAO;AAAA,IACP,UAAU;AAAA,IACV,gBAAgB;AAAA,IAChB,iBAAiB;AAAA,IACjB,eAAe;AAAA,EACjB;AAAA;AAAA,EAEA,UAAU;AAAA,IACR,OAAO;AAAA,IACP,UAAU;AAAA,IACV,gBAAgB;AAAA,IAChB,iBAAiB;AAAA,IACjB,eAAe;AAAA,EACjB;AAAA,EACA,eAAe;AAAA,IACb,OAAO;AAAA,IACP,UAAU;AAAA,IACV,gBAAgB;AAAA,IAChB,iBAAiB;AAAA,IACjB,eAAe;AAAA,EACjB;AAAA,EACA,MAAM;AAAA,IACJ,OAAO;AAAA,IACP,UAAU;AAAA,IACV,gBAAgB;AAAA,IAChB,iBAAiB;AAAA,IACjB,eAAe;AAAA,EACjB;AAAA,EACA,WAAW;AAAA,IACT,OAAO;AAAA,IACP,UAAU;AAAA,IACV,gBAAgB;AAAA,IAChB,iBAAiB;AAAA,IACjB,eAAe;AAAA,EACjB;AAAA;AAAA,EAEA,kBAAkB;AAAA,IAChB,OAAO;AAAA,IACP,UAAU;AAAA,IACV,gBAAgB;AAAA,IAChB,iBAAiB;AAAA,IACjB,eAAe;AAAA,EACjB;AAAA,EACA,oBAAoB;AAAA,IAClB,OAAO;AAAA,IACP,UAAU;AAAA,IACV,gBAAgB;AAAA,IAChB,iBAAiB;AAAA,IACjB,eAAe;AAAA,EACjB;AAAA,EACA,oBAAoB;AAAA,IAClB,OAAO;AAAA,IACP,UAAU;AAAA,IACV,gBAAgB;AAAA,IAChB,iBAAiB;AAAA,IACjB,eAAe;AAAA,EACjB;AAAA;AAAA,EAEA,gBAAgB;AAAA,IACd,OAAO;AAAA,IACP,UAAU;AAAA,IACV,gBAAgB;AAAA,IAChB,iBAAiB;AAAA,IACjB,eAAe;AAAA,EACjB;AAAA,EACA,iBAAiB;AAAA,IACf,OAAO;AAAA,IACP,UAAU;AAAA,IACV,gBAAgB;AAAA,IAChB,iBAAiB;AAAA,IACjB,eAAe;AAAA,EACjB;AACF;AAIA,IAAM,0BAA0B;AAWzB,SAAS,eACd,MACA,gBAAgB,yBACR;AACR,MAAI,CAAC,KAAM,QAAO;AAClB,SAAO,KAAK,MAAM,KAAK,SAAS,aAAa;AAC/C;AAQO,SAAS,SACd,QACA,OACA,OAA2B,SACnB;AACR,QAAM,OACJ,SAAS,UAAU,MAAM,iBAAiB,MAAM;AAClD,SAAQ,SAAS,MAAa;AAChC;AAKO,SAAS,WAAW,MAAsB;AAC/C,SAAO,KAAK,KAAK,IAAI,KAAK,KAAK,EAAE,MAAM,KAAK,EAAE,SAAS;AACzD;AAKO,SAAS,eAAe,MAAsB;AACnD,MAAI,CAAC,KAAK,KAAK,EAAG,QAAO;AACzB,QAAM,UAAU,KAAK,MAAM,eAAe;AAC1C,SAAO,UAAU,QAAQ,SAAS,IAAI;AACxC;AAKO,SAAS,gBAAgB,MAAsB;AACpD,MAAI,CAAC,KAAK,KAAK,EAAG,QAAO;AACzB,SAAO,KACJ,MAAM,SAAS,EACf,OAAO,CAAC,MAAM,EAAE,KAAK,EAAE,SAAS,CAAC,EAAE;AACxC;AAMO,SAAS,aAAa,OAA2C;AACtE,MAAI,CAAC,MAAO,QAAO,OAAO,iBAAiB;AAC3C,MAAI,OAAO,UAAU,SAAU,QAAO;AACtC,SAAO,OAAO,KAAK,KAAK,OAAO,iBAAiB;AAClD;AAaO,SAAS,SACd,MACA,UAA4B,CAAC,GACjB;AACZ,QAAM,QAAQ,aAAa,QAAQ,KAAK;AACxC,QAAM,SAAS,eAAe,MAAM,QAAQ,aAAa;AACzD,QAAM,YAAY,SAAS,QAAQ,OAAO,OAAO;AACjD,QAAM,eAAe,KAAK,IAAI,SAAS,MAAM,eAAe,CAAC;AAC7D,QAAM,kBAAkB,KAAK,IAAI,MAAM,gBAAgB,QAAQ,CAAC;AAEhE,SAAO;AAAA,IACL;AAAA,IACA,OAAO,KAAK;AAAA,IACZ,OAAO,WAAW,IAAI;AAAA,IACtB,WAAW,eAAe,IAAI;AAAA,IAC9B,YAAY,gBAAgB,IAAI;AAAA,IAChC;AAAA,IACA;AAAA,IACA,kBAAkB,eAAe,KAAK,QAAQ,CAAC,IAAI;AAAA,IACnD,aAAa,UAAU,MAAM;AAAA,IAC7B;AAAA,EACF;AACF;AAcO,SAAS,cAAc,UAA4B,CAAC,GAAG;AAC5D,MAAI,YAAY;AAChB,MAAI,aAAgC;AAEpC,SAAO;AAAA,IACL,OAAO,MAA0B;AAC/B,UAAI,SAAS,aAAa,WAAY,QAAO;AAC7C,kBAAY;AACZ,mBAAa,SAAS,MAAM,OAAO;AACnC,aAAO;AAAA,IACT;AAAA,IACA,QAAQ;AACN,kBAAY;AACZ,mBAAa;AAAA,IACf;AAAA,IACA,UAAU;AACR,kBAAY;AACZ,mBAAa;AAAA,IACf;AAAA,EACF;AACF;AAUO,SAAS,WAAW,MAAsB;AAC/C,MAAI,SAAS,EAAG,QAAO;AACvB,MAAI,OAAO,KAAQ,QAAO,MAAM,KAAK,QAAQ,CAAC;AAC9C,MAAI,OAAO,KAAM,QAAO,MAAM,KAAK,QAAQ,CAAC;AAC5C,SAAO,MAAM,KAAK,QAAQ,CAAC;AAC7B;AAMO,SAAS,mBACd,OAC6B;AAC7B,MAAI,SAAS,IAAK,QAAO;AACzB,MAAI,SAAS,IAAK,QAAO;AACzB,SAAO;AACT;AAIA,IAAM,YAAY;AAAA,EAChB;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AAAA,EACA;AACF;AAEA,IAAO,cAAQ;","names":[]}
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
"use strict";var TokenLens=(()=>{var u=Object.defineProperty;var P=Object.getOwnPropertyDescriptor;var x=Object.getOwnPropertyNames;var h=Object.prototype.hasOwnProperty;var w=(t,e)=>{for(var n in e)u(t,n,{get:e[n],enumerable:!0})},_=(t,e,n,o)=>{if(e&&typeof e=="object"||typeof e=="function")for(let r of x(e))!h.call(t,r)&&r!==n&&u(t,r,{get:()=>e[r],enumerable:!(o=P(e,r))||o.enumerable});return t};var k=t=>_(u({},"__esModule",{value:!0}),t);var S={};w(S,{MODELS:()=>i,calcCost:()=>a,countParagraphs:()=>c,countSentences:()=>l,countWords:()=>p,createWatcher:()=>C,default:()=>v,estimateTokens:()=>s,formatCost:()=>M,getContextSeverity:()=>f,getStats:()=>g,resolveModel:()=>d});var i={"claude-sonnet-4":{label:"Claude Sonnet 4",provider:"anthropic",inputCostPer1M:3,outputCostPer1M:15,contextWindow:2e5},"claude-opus-4":{label:"Claude Opus 4",provider:"anthropic",inputCostPer1M:15,outputCostPer1M:75,contextWindow:2e5},"claude-haiku-4":{label:"Claude Haiku 4",provider:"anthropic",inputCostPer1M:.8,outputCostPer1M:4,contextWindow:2e5},"gpt-4o":{label:"GPT-4o",provider:"openai",inputCostPer1M:2.5,outputCostPer1M:10,contextWindow:128e3},"gpt-4o-mini":{label:"GPT-4o mini",provider:"openai",inputCostPer1M:.15,outputCostPer1M:.6,contextWindow:128e3},o1:{label:"o1",provider:"openai",inputCostPer1M:15,outputCostPer1M:60,contextWindow:2e5},"o3-mini":{label:"o3-mini",provider:"openai",inputCostPer1M:1.1,outputCostPer1M:4.4,contextWindow:2e5},"gemini-1.5-pro":{label:"Gemini 1.5 Pro",provider:"google",inputCostPer1M:3.5,outputCostPer1M:10.5,contextWindow:1e6},"gemini-1.5-flash":{label:"Gemini 1.5 Flash",provider:"google",inputCostPer1M:.35,outputCostPer1M:1.05,contextWindow:1e6},"gemini-2.0-flash":{label:"Gemini 2.0 Flash",provider:"google",inputCostPer1M:.1,outputCostPer1M:.4,contextWindow:1e6},"llama-3.1-8b":{label:"Llama 3.1 8B",provider:"meta",inputCostPer1M:.18,outputCostPer1M:.18,contextWindow:131072},"llama-3.1-70b":{label:"Llama 3.1 70B",provider:"meta",inputCostPer1M:.88,outputCostPer1M:.88,contextWindow:131072}},W=3.8;function s(t,e=W){return t?Math.round(t.length/e):0}function a(t,e,n="input"){let o=n==="input"?e.inputCostPer1M:e.outputCostPer1M;return t/1e6*o}function p(t){return t.trim()?t.trim().split(/\s+/).length:0}function l(t){if(!t.trim())return 0;let e=t.match(/[.!?]+[\s\n]/g);return e?e.length+1:1}function c(t){return t.trim()?t.split(/\n\s*\n/).filter(e=>e.trim().length>0).length:0}function d(t){return t?typeof t=="object"?t:i[t]??i["claude-sonnet-4"]:i["claude-sonnet-4"]}function g(t,e={}){let n=d(e.model),o=s(t,e.charsPerToken),r=a(o,n,"input"),m=Math.min(o/n.contextWindow,1),b=Math.max(n.contextWindow-o,0);return{tokens:o,chars:t.length,words:p(t),sentences:l(t),paragraphs:c(t),inputCost:r,contextUsage:m,contextUsagePct:(m*100).toFixed(2)+"%",withinLimit:o<=n.contextWindow,tokensRemaining:b}}function C(t={}){let e="",n=null;return{update(o){return o===e&&n||(e=o,n=g(o,t)),n},reset(){e="",n=null},destroy(){e="",n=null}}}function M(t){return t===0?"$0.00":t<1e-4?"$"+t.toFixed(6):t<.01?"$"+t.toFixed(4):"$"+t.toFixed(2)}function f(t){return t>=.9?"danger":t>=.6?"warning":"ok"}var T={getStats:g,estimateTokens:s,calcCost:a,formatCost:M,countWords:p,countSentences:l,countParagraphs:c,createWatcher:C,resolveModel:d,getContextSeverity:f,MODELS:i},v=T;return k(S);})();
|
package/package.json
ADDED
|
@@ -0,0 +1,48 @@
|
|
|
1
|
+
{
|
|
2
|
+
"name": "spendlens",
|
|
3
|
+
"version": "1.0.1",
|
|
4
|
+
"description": "Real-time token counting and cost estimation for LLM prompts — before you hit send.",
|
|
5
|
+
"keywords": [
|
|
6
|
+
"tokens",
|
|
7
|
+
"llm",
|
|
8
|
+
"openai",
|
|
9
|
+
"anthropic",
|
|
10
|
+
"claude",
|
|
11
|
+
"gpt",
|
|
12
|
+
"cost",
|
|
13
|
+
"estimation",
|
|
14
|
+
"prompt",
|
|
15
|
+
"tokenizer"
|
|
16
|
+
],
|
|
17
|
+
"author": "Orion",
|
|
18
|
+
"license": "MIT",
|
|
19
|
+
"repository": {
|
|
20
|
+
"type": "git",
|
|
21
|
+
"url": "https://github.com/boyzliberty360/tokenlens"
|
|
22
|
+
},
|
|
23
|
+
"homepage": "https://github.com/boyzliberty360/tokenlens#readme",
|
|
24
|
+
"main": "./dist/index.js",
|
|
25
|
+
"module": "./dist/index.js",
|
|
26
|
+
"types": "./dist/index.d.ts",
|
|
27
|
+
"exports": {
|
|
28
|
+
".": {
|
|
29
|
+
"types": "./dist/index.d.ts",
|
|
30
|
+
"import": "./dist/index.js",
|
|
31
|
+
"require": "./dist/index.js"
|
|
32
|
+
}
|
|
33
|
+
},
|
|
34
|
+
"files": [
|
|
35
|
+
"dist",
|
|
36
|
+
"README.md",
|
|
37
|
+
"LICENSE"
|
|
38
|
+
],
|
|
39
|
+
"scripts": {
|
|
40
|
+
"build": "tsup",
|
|
41
|
+
"dev": "tsup --watch",
|
|
42
|
+
"test": "node --experimental-strip-types --test src/__tests__/index.test.ts"
|
|
43
|
+
},
|
|
44
|
+
"devDependencies": {
|
|
45
|
+
"tsup": "^8.0.0",
|
|
46
|
+
"typescript": "^5.4.0"
|
|
47
|
+
}
|
|
48
|
+
}
|