ai-token-estimator 1.0.0 → 1.0.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -108,59 +108,78 @@ This package counts Unicode code points, not UTF-16 code units. This means:
108
108
  - Accented characters count correctly
109
109
  - Most source code characters count as 1
110
110
 
111
+ <!-- SUPPORTED_MODELS_START -->
111
112
  ## Supported Models
112
113
 
114
+ > **Auto-updated weekly** via GitHub Actions from provider pricing pages.
115
+
113
116
  ### OpenAI Models
114
117
 
115
118
  | Model | Chars/Token | Input Cost (per 1M tokens) |
116
119
  |-------|-------------|---------------------------|
117
- | gpt-5.2 | 4 | $1.75 |
118
- | gpt-5.2-pro | 4 | $21.00 |
119
- | gpt-5-mini | 4 | $0.25 |
120
120
  | gpt-4.1 | 4 | $3.00 |
121
- | gpt-4.1-mini | 4 | $0.80 |
122
- | gpt-4.1-nano | 4 | $0.20 |
121
+ | gpt-4.1-mini | 4 | $0.40 |
122
+ | gpt-4.1-nano | 4 | $0.10 |
123
123
  | gpt-4o | 4 | $2.50 |
124
124
  | gpt-4o-mini | 4 | $0.15 |
125
- | o3 | 4 | $2.00 |
126
- | o4-mini | 4 | $4.00 |
125
+ | gpt-5-mini | 4 | $0.25 |
126
+ | gpt-5.2 | 4 | $1.75 |
127
+ | gpt-5.2-pro | 4 | $21.00 |
128
+ | gpt-realtime | 4 | $4.00 |
129
+ | gpt-realtime-mini | 4 | $0.60 |
127
130
  | o1 | 4 | $15.00 |
128
131
  | o1-pro | 4 | $150.00 |
132
+ | o3 | 4 | $2.00 |
133
+ | o4-mini | 4 | $4.00 |
129
134
 
130
135
  ### Anthropic Claude Models
131
136
 
132
137
  | Model | Chars/Token | Input Cost (per 1M tokens) |
133
138
  |-------|-------------|---------------------------|
134
- | claude-opus-4.5 | 3.5 | $5.00 |
135
- | claude-sonnet-4.5 | 3.5 | $3.00 |
139
+ | claude-haiku-3 | 3.5 | $0.25 |
140
+ | claude-haiku-3.5 | 3.5 | $0.80 |
136
141
  | claude-haiku-4.5 | 3.5 | $1.00 |
142
+ | claude-opus-3 | 3.5 | $15.00 |
137
143
  | claude-opus-4 | 3.5 | $15.00 |
138
144
  | claude-opus-4.1 | 3.5 | $15.00 |
145
+ | claude-opus-4.5 | 3.5 | $5.00 |
139
146
  | claude-sonnet-4 | 3.5 | $3.00 |
140
- | claude-opus-3 | 3.5 | $15.00 |
141
- | claude-haiku-3 | 3.5 | $0.25 |
142
- | claude-haiku-3.5 | 3.5 | $0.80 |
147
+ | claude-sonnet-4.5 | 3.5 | $3.00 |
143
148
 
144
149
  ### Google Gemini Models
145
150
 
146
151
  | Model | Chars/Token | Input Cost (per 1M tokens) |
147
152
  |-------|-------------|---------------------------|
148
- | gemini-3-pro | 4 | $2.00 |
149
- | gemini-3-flash | 4 | $0.50 |
150
- | gemini-2.5-pro | 4 | $1.25 |
153
+ | gemini-2.0-flash | 4 | $0.10 |
154
+ | gemini-2.0-flash-lite | 4 | $0.08 |
155
+ | gemini-2.5-computer-use-preview-10-2025 | 4 | $1.25 |
151
156
  | gemini-2.5-flash | 4 | $0.30 |
152
157
  | gemini-2.5-flash-lite | 4 | $0.10 |
153
- | gemini-2.0-flash | 4 | $0.10 |
154
- | gemini-2.0-flash-lite | 4 | $0.075 |
158
+ | gemini-2.5-flash-lite-preview-09-2025 | 4 | $0.10 |
159
+ | gemini-2.5-flash-native-audio-preview-12-2025 | 4 | $0.50 |
160
+ | gemini-2.5-flash-preview-09-2025 | 4 | $0.30 |
161
+ | gemini-2.5-flash-preview-tts | 4 | $0.50 |
162
+ | gemini-2.5-pro | 4 | $1.25 |
163
+ | gemini-2.5-pro-preview-tts | 4 | $1.00 |
164
+ | gemini-3-flash | 4 | $0.50 |
165
+ | gemini-3-pro | 4 | $2.00 |
155
166
 
156
- *Pricing last verified: December 2025*
167
+ *Last updated: 2025-12-25*
168
+ <!-- SUPPORTED_MODELS_END -->
157
169
 
158
- ## Updating Pricing
170
+ ## Pricing Updates
159
171
 
160
- Model configurations are embedded in the package. To update pricing:
161
- 1. Modify `src/models.ts`
162
- 2. Create a changeset: `npx changeset`
163
- 3. Publish a new version
172
+ Model pricing is automatically updated weekly via GitHub Actions. The update script fetches the latest prices directly from:
173
+ - [OpenAI Pricing](https://openai.com/api/pricing/)
174
+ - [Anthropic Pricing](https://www.anthropic.com/pricing)
175
+ - [Google AI Pricing](https://ai.google.dev/gemini-api/docs/pricing)
176
+
177
+ You can check when prices were last updated:
178
+
179
+ ```typescript
180
+ import { LAST_UPDATED } from 'ai-token-estimator';
181
+ console.log(LAST_UPDATED); // '2025-12-25'
182
+ ```
164
183
 
165
184
  ## License
166
185
 
package/dist/index.cjs CHANGED
@@ -21,6 +21,7 @@ var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: tru
21
21
  var index_exports = {};
22
22
  __export(index_exports, {
23
23
  DEFAULT_MODELS: () => DEFAULT_MODELS,
24
+ LAST_UPDATED: () => LAST_UPDATED,
24
25
  estimate: () => estimate,
25
26
  getAvailableModels: () => getAvailableModels,
26
27
  getModelConfig: () => getModelConfig
@@ -28,25 +29,12 @@ __export(index_exports, {
28
29
  module.exports = __toCommonJS(index_exports);
29
30
 
30
31
  // src/models.ts
32
+ var LAST_UPDATED = "2025-12-25";
31
33
  var models = {
32
34
  // ===================
33
35
  // OpenAI Models
34
36
  // ===================
35
37
  // OpenAI uses ~4 chars per token for English text
36
- // GPT-5 series (Flagship)
37
- "gpt-5.2": {
38
- charsPerToken: 4,
39
- inputCostPerMillion: 1.75
40
- },
41
- "gpt-5.2-pro": {
42
- charsPerToken: 4,
43
- inputCostPerMillion: 21
44
- },
45
- "gpt-5-mini": {
46
- charsPerToken: 4,
47
- inputCostPerMillion: 0.25
48
- },
49
- // GPT-4.1 series
50
38
  "gpt-4.1": {
51
39
  charsPerToken: 4,
52
40
  inputCostPerMillion: 3
@@ -59,7 +47,6 @@ var models = {
59
47
  charsPerToken: 4,
60
48
  inputCostPerMillion: 0.2
61
49
  },
62
- // GPT-4o series
63
50
  "gpt-4o": {
64
51
  charsPerToken: 4,
65
52
  inputCostPerMillion: 2.5
@@ -68,16 +55,26 @@ var models = {
68
55
  charsPerToken: 4,
69
56
  inputCostPerMillion: 0.15
70
57
  },
71
- // OpenAI Reasoning models (o-series)
72
- "o3": {
58
+ "gpt-5-mini": {
73
59
  charsPerToken: 4,
74
- inputCostPerMillion: 2
75
- // Estimated based on similar tier
60
+ inputCostPerMillion: 0.25
76
61
  },
77
- "o4-mini": {
62
+ "gpt-5.2": {
63
+ charsPerToken: 4,
64
+ inputCostPerMillion: 1.75
65
+ },
66
+ "gpt-5.2-pro": {
67
+ charsPerToken: 4,
68
+ inputCostPerMillion: 21
69
+ },
70
+ "gpt-realtime": {
78
71
  charsPerToken: 4,
79
72
  inputCostPerMillion: 4
80
73
  },
74
+ "gpt-realtime-mini": {
75
+ charsPerToken: 4,
76
+ inputCostPerMillion: 0.6
77
+ },
81
78
  "o1": {
82
79
  charsPerToken: 4,
83
80
  inputCostPerMillion: 15
@@ -85,66 +82,68 @@ var models = {
85
82
  "o1-pro": {
86
83
  charsPerToken: 4,
87
84
  inputCostPerMillion: 150
88
- // High-end reasoning
85
+ },
86
+ "o3": {
87
+ charsPerToken: 4,
88
+ inputCostPerMillion: 2
89
+ },
90
+ "o4-mini": {
91
+ charsPerToken: 4,
92
+ inputCostPerMillion: 4
89
93
  },
90
94
  // ===================
91
95
  // Anthropic Models
92
96
  // ===================
93
97
  // Anthropic uses ~3.5 chars per token for English text
94
- // Claude 4.5 series (Latest)
95
- "claude-opus-4.5": {
98
+ "claude-haiku-3": {
96
99
  charsPerToken: 3.5,
97
- inputCostPerMillion: 5
100
+ inputCostPerMillion: 0.25
98
101
  },
99
- "claude-sonnet-4.5": {
102
+ "claude-haiku-3.5": {
100
103
  charsPerToken: 3.5,
101
- inputCostPerMillion: 3
104
+ inputCostPerMillion: 0.8
102
105
  },
103
106
  "claude-haiku-4.5": {
104
107
  charsPerToken: 3.5,
105
108
  inputCostPerMillion: 1
106
109
  },
107
- // Claude 4 series
108
- "claude-opus-4": {
110
+ "claude-opus-3": {
109
111
  charsPerToken: 3.5,
110
112
  inputCostPerMillion: 15
111
113
  },
112
- "claude-opus-4.1": {
114
+ "claude-opus-4": {
113
115
  charsPerToken: 3.5,
114
116
  inputCostPerMillion: 15
115
117
  },
116
- "claude-sonnet-4": {
118
+ "claude-opus-4.1": {
117
119
  charsPerToken: 3.5,
118
- inputCostPerMillion: 3
120
+ inputCostPerMillion: 15
119
121
  },
120
- // Claude 3 series (Legacy)
121
- "claude-opus-3": {
122
+ "claude-opus-4.5": {
122
123
  charsPerToken: 3.5,
123
- inputCostPerMillion: 15
124
+ inputCostPerMillion: 5
124
125
  },
125
- "claude-haiku-3": {
126
+ "claude-sonnet-4": {
126
127
  charsPerToken: 3.5,
127
- inputCostPerMillion: 0.25
128
+ inputCostPerMillion: 3
128
129
  },
129
- "claude-haiku-3.5": {
130
+ "claude-sonnet-4.5": {
130
131
  charsPerToken: 3.5,
131
- inputCostPerMillion: 0.8
132
+ inputCostPerMillion: 3
132
133
  },
133
134
  // ===================
134
135
  // Google Gemini Models
135
136
  // ===================
136
137
  // Gemini uses similar tokenization to OpenAI (~4 chars per token)
137
- // Gemini 3 series (Latest)
138
- "gemini-3-pro": {
138
+ "gemini-2.0-flash": {
139
139
  charsPerToken: 4,
140
- inputCostPerMillion: 2
140
+ inputCostPerMillion: 0.1
141
141
  },
142
- "gemini-3-flash": {
142
+ "gemini-2.0-flash-lite": {
143
143
  charsPerToken: 4,
144
- inputCostPerMillion: 0.5
144
+ inputCostPerMillion: 0.075
145
145
  },
146
- // Gemini 2.5 series
147
- "gemini-2.5-pro": {
146
+ "gemini-2.5-computer-use-preview-10-2025": {
148
147
  charsPerToken: 4,
149
148
  inputCostPerMillion: 1.25
150
149
  },
@@ -156,14 +155,37 @@ var models = {
156
155
  charsPerToken: 4,
157
156
  inputCostPerMillion: 0.1
158
157
  },
159
- // Gemini 2.0 series
160
- "gemini-2.0-flash": {
158
+ "gemini-2.5-flash-lite-preview-09-2025": {
161
159
  charsPerToken: 4,
162
160
  inputCostPerMillion: 0.1
163
161
  },
164
- "gemini-2.0-flash-lite": {
162
+ "gemini-2.5-flash-native-audio-preview-12-2025": {
165
163
  charsPerToken: 4,
166
- inputCostPerMillion: 0.075
164
+ inputCostPerMillion: 0.5
165
+ },
166
+ "gemini-2.5-flash-preview-09-2025": {
167
+ charsPerToken: 4,
168
+ inputCostPerMillion: 0.3
169
+ },
170
+ "gemini-2.5-flash-preview-tts": {
171
+ charsPerToken: 4,
172
+ inputCostPerMillion: 0.5
173
+ },
174
+ "gemini-2.5-pro": {
175
+ charsPerToken: 4,
176
+ inputCostPerMillion: 1.25
177
+ },
178
+ "gemini-2.5-pro-preview-tts": {
179
+ charsPerToken: 4,
180
+ inputCostPerMillion: 1
181
+ },
182
+ "gemini-3-flash": {
183
+ charsPerToken: 4,
184
+ inputCostPerMillion: 0.5
185
+ },
186
+ "gemini-3-pro": {
187
+ charsPerToken: 4,
188
+ inputCostPerMillion: 2
167
189
  }
168
190
  };
169
191
  Object.values(models).forEach((config) => Object.freeze(config));
@@ -219,6 +241,7 @@ function estimate(input) {
219
241
  // Annotate the CommonJS export names for ESM import in node:
220
242
  0 && (module.exports = {
221
243
  DEFAULT_MODELS,
244
+ LAST_UPDATED,
222
245
  estimate,
223
246
  getAvailableModels,
224
247
  getModelConfig
package/dist/index.d.cts CHANGED
@@ -53,6 +53,20 @@ interface EstimateOutput {
53
53
  */
54
54
  declare function estimate(input: EstimateInput): EstimateOutput;
55
55
 
56
+ /**
57
+ * Default model configurations.
58
+ *
59
+ * AUTO-GENERATED FILE - DO NOT EDIT MANUALLY
60
+ * Last updated: 2025-12-25
61
+ *
62
+ * Sources:
63
+ * - OpenAI: https://openai.com/api/pricing/
64
+ * - Anthropic: https://www.anthropic.com/pricing
65
+ * - Google: https://ai.google.dev/gemini-api/docs/pricing
66
+ *
67
+ * This file is automatically updated weekly by GitHub Actions.
68
+ */
69
+ declare const LAST_UPDATED = "2025-12-25";
56
70
  declare const DEFAULT_MODELS: Readonly<Record<string, Readonly<ModelConfig>>>;
57
71
  /**
58
72
  * Get configuration for a specific model.
@@ -67,4 +81,4 @@ declare function getModelConfig(model: string): ModelConfig;
67
81
  */
68
82
  declare function getAvailableModels(): string[];
69
83
 
70
- export { DEFAULT_MODELS, type EstimateInput, type EstimateOutput, type ModelConfig, estimate, getAvailableModels, getModelConfig };
84
+ export { DEFAULT_MODELS, type EstimateInput, type EstimateOutput, LAST_UPDATED, type ModelConfig, estimate, getAvailableModels, getModelConfig };
package/dist/index.d.ts CHANGED
@@ -53,6 +53,20 @@ interface EstimateOutput {
53
53
  */
54
54
  declare function estimate(input: EstimateInput): EstimateOutput;
55
55
 
56
+ /**
57
+ * Default model configurations.
58
+ *
59
+ * AUTO-GENERATED FILE - DO NOT EDIT MANUALLY
60
+ * Last updated: 2025-12-25
61
+ *
62
+ * Sources:
63
+ * - OpenAI: https://openai.com/api/pricing/
64
+ * - Anthropic: https://www.anthropic.com/pricing
65
+ * - Google: https://ai.google.dev/gemini-api/docs/pricing
66
+ *
67
+ * This file is automatically updated weekly by GitHub Actions.
68
+ */
69
+ declare const LAST_UPDATED = "2025-12-25";
56
70
  declare const DEFAULT_MODELS: Readonly<Record<string, Readonly<ModelConfig>>>;
57
71
  /**
58
72
  * Get configuration for a specific model.
@@ -67,4 +81,4 @@ declare function getModelConfig(model: string): ModelConfig;
67
81
  */
68
82
  declare function getAvailableModels(): string[];
69
83
 
70
- export { DEFAULT_MODELS, type EstimateInput, type EstimateOutput, type ModelConfig, estimate, getAvailableModels, getModelConfig };
84
+ export { DEFAULT_MODELS, type EstimateInput, type EstimateOutput, LAST_UPDATED, type ModelConfig, estimate, getAvailableModels, getModelConfig };
package/dist/index.js CHANGED
@@ -1,23 +1,10 @@
1
1
  // src/models.ts
2
+ var LAST_UPDATED = "2025-12-25";
2
3
  var models = {
3
4
  // ===================
4
5
  // OpenAI Models
5
6
  // ===================
6
7
  // OpenAI uses ~4 chars per token for English text
7
- // GPT-5 series (Flagship)
8
- "gpt-5.2": {
9
- charsPerToken: 4,
10
- inputCostPerMillion: 1.75
11
- },
12
- "gpt-5.2-pro": {
13
- charsPerToken: 4,
14
- inputCostPerMillion: 21
15
- },
16
- "gpt-5-mini": {
17
- charsPerToken: 4,
18
- inputCostPerMillion: 0.25
19
- },
20
- // GPT-4.1 series
21
8
  "gpt-4.1": {
22
9
  charsPerToken: 4,
23
10
  inputCostPerMillion: 3
@@ -30,7 +17,6 @@ var models = {
30
17
  charsPerToken: 4,
31
18
  inputCostPerMillion: 0.2
32
19
  },
33
- // GPT-4o series
34
20
  "gpt-4o": {
35
21
  charsPerToken: 4,
36
22
  inputCostPerMillion: 2.5
@@ -39,16 +25,26 @@ var models = {
39
25
  charsPerToken: 4,
40
26
  inputCostPerMillion: 0.15
41
27
  },
42
- // OpenAI Reasoning models (o-series)
43
- "o3": {
28
+ "gpt-5-mini": {
44
29
  charsPerToken: 4,
45
- inputCostPerMillion: 2
46
- // Estimated based on similar tier
30
+ inputCostPerMillion: 0.25
47
31
  },
48
- "o4-mini": {
32
+ "gpt-5.2": {
33
+ charsPerToken: 4,
34
+ inputCostPerMillion: 1.75
35
+ },
36
+ "gpt-5.2-pro": {
37
+ charsPerToken: 4,
38
+ inputCostPerMillion: 21
39
+ },
40
+ "gpt-realtime": {
49
41
  charsPerToken: 4,
50
42
  inputCostPerMillion: 4
51
43
  },
44
+ "gpt-realtime-mini": {
45
+ charsPerToken: 4,
46
+ inputCostPerMillion: 0.6
47
+ },
52
48
  "o1": {
53
49
  charsPerToken: 4,
54
50
  inputCostPerMillion: 15
@@ -56,66 +52,68 @@ var models = {
56
52
  "o1-pro": {
57
53
  charsPerToken: 4,
58
54
  inputCostPerMillion: 150
59
- // High-end reasoning
55
+ },
56
+ "o3": {
57
+ charsPerToken: 4,
58
+ inputCostPerMillion: 2
59
+ },
60
+ "o4-mini": {
61
+ charsPerToken: 4,
62
+ inputCostPerMillion: 4
60
63
  },
61
64
  // ===================
62
65
  // Anthropic Models
63
66
  // ===================
64
67
  // Anthropic uses ~3.5 chars per token for English text
65
- // Claude 4.5 series (Latest)
66
- "claude-opus-4.5": {
68
+ "claude-haiku-3": {
67
69
  charsPerToken: 3.5,
68
- inputCostPerMillion: 5
70
+ inputCostPerMillion: 0.25
69
71
  },
70
- "claude-sonnet-4.5": {
72
+ "claude-haiku-3.5": {
71
73
  charsPerToken: 3.5,
72
- inputCostPerMillion: 3
74
+ inputCostPerMillion: 0.8
73
75
  },
74
76
  "claude-haiku-4.5": {
75
77
  charsPerToken: 3.5,
76
78
  inputCostPerMillion: 1
77
79
  },
78
- // Claude 4 series
79
- "claude-opus-4": {
80
+ "claude-opus-3": {
80
81
  charsPerToken: 3.5,
81
82
  inputCostPerMillion: 15
82
83
  },
83
- "claude-opus-4.1": {
84
+ "claude-opus-4": {
84
85
  charsPerToken: 3.5,
85
86
  inputCostPerMillion: 15
86
87
  },
87
- "claude-sonnet-4": {
88
+ "claude-opus-4.1": {
88
89
  charsPerToken: 3.5,
89
- inputCostPerMillion: 3
90
+ inputCostPerMillion: 15
90
91
  },
91
- // Claude 3 series (Legacy)
92
- "claude-opus-3": {
92
+ "claude-opus-4.5": {
93
93
  charsPerToken: 3.5,
94
- inputCostPerMillion: 15
94
+ inputCostPerMillion: 5
95
95
  },
96
- "claude-haiku-3": {
96
+ "claude-sonnet-4": {
97
97
  charsPerToken: 3.5,
98
- inputCostPerMillion: 0.25
98
+ inputCostPerMillion: 3
99
99
  },
100
- "claude-haiku-3.5": {
100
+ "claude-sonnet-4.5": {
101
101
  charsPerToken: 3.5,
102
- inputCostPerMillion: 0.8
102
+ inputCostPerMillion: 3
103
103
  },
104
104
  // ===================
105
105
  // Google Gemini Models
106
106
  // ===================
107
107
  // Gemini uses similar tokenization to OpenAI (~4 chars per token)
108
- // Gemini 3 series (Latest)
109
- "gemini-3-pro": {
108
+ "gemini-2.0-flash": {
110
109
  charsPerToken: 4,
111
- inputCostPerMillion: 2
110
+ inputCostPerMillion: 0.1
112
111
  },
113
- "gemini-3-flash": {
112
+ "gemini-2.0-flash-lite": {
114
113
  charsPerToken: 4,
115
- inputCostPerMillion: 0.5
114
+ inputCostPerMillion: 0.075
116
115
  },
117
- // Gemini 2.5 series
118
- "gemini-2.5-pro": {
116
+ "gemini-2.5-computer-use-preview-10-2025": {
119
117
  charsPerToken: 4,
120
118
  inputCostPerMillion: 1.25
121
119
  },
@@ -127,14 +125,37 @@ var models = {
127
125
  charsPerToken: 4,
128
126
  inputCostPerMillion: 0.1
129
127
  },
130
- // Gemini 2.0 series
131
- "gemini-2.0-flash": {
128
+ "gemini-2.5-flash-lite-preview-09-2025": {
132
129
  charsPerToken: 4,
133
130
  inputCostPerMillion: 0.1
134
131
  },
135
- "gemini-2.0-flash-lite": {
132
+ "gemini-2.5-flash-native-audio-preview-12-2025": {
136
133
  charsPerToken: 4,
137
- inputCostPerMillion: 0.075
134
+ inputCostPerMillion: 0.5
135
+ },
136
+ "gemini-2.5-flash-preview-09-2025": {
137
+ charsPerToken: 4,
138
+ inputCostPerMillion: 0.3
139
+ },
140
+ "gemini-2.5-flash-preview-tts": {
141
+ charsPerToken: 4,
142
+ inputCostPerMillion: 0.5
143
+ },
144
+ "gemini-2.5-pro": {
145
+ charsPerToken: 4,
146
+ inputCostPerMillion: 1.25
147
+ },
148
+ "gemini-2.5-pro-preview-tts": {
149
+ charsPerToken: 4,
150
+ inputCostPerMillion: 1
151
+ },
152
+ "gemini-3-flash": {
153
+ charsPerToken: 4,
154
+ inputCostPerMillion: 0.5
155
+ },
156
+ "gemini-3-pro": {
157
+ charsPerToken: 4,
158
+ inputCostPerMillion: 2
138
159
  }
139
160
  };
140
161
  Object.values(models).forEach((config) => Object.freeze(config));
@@ -189,6 +210,7 @@ function estimate(input) {
189
210
  }
190
211
  export {
191
212
  DEFAULT_MODELS,
213
+ LAST_UPDATED,
192
214
  estimate,
193
215
  getAvailableModels,
194
216
  getModelConfig
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "ai-token-estimator",
3
- "version": "1.0.0",
3
+ "version": "1.0.2",
4
4
  "description": "Estimate token counts and costs for LLM API calls",
5
5
  "type": "module",
6
6
  "main": "./dist/index.cjs",
@@ -28,7 +28,8 @@
28
28
  "test": "vitest run",
29
29
  "test:watch": "vitest",
30
30
  "lint": "eslint src tests",
31
- "prepublishOnly": "npm run lint && npm run test && npm run build"
31
+ "prepublishOnly": "npm run lint && npm run test && npm run build",
32
+ "update-pricing": "tsx scripts/update-pricing.ts"
32
33
  },
33
34
  "keywords": [
34
35
  "llm",
@@ -51,10 +52,12 @@
51
52
  },
52
53
  "devDependencies": {
53
54
  "@changesets/cli": "^2.29.8",
55
+ "@mendable/firecrawl-js": "^1.19.0",
54
56
  "@typescript-eslint/eslint-plugin": "^8.50.1",
55
57
  "@typescript-eslint/parser": "^8.50.1",
56
58
  "eslint": "^9.39.2",
57
59
  "tsup": "^8.5.1",
60
+ "tsx": "^4.19.2",
58
61
  "typescript": "^5.9.3",
59
62
  "vitest": "^4.0.16"
60
63
  }