@ccusage/codex 17.1.8 → 17.2.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (2) hide show
  1. package/dist/index.js +108 -1
  2. package/package.json +1 -1
package/dist/index.js CHANGED
@@ -895,7 +895,7 @@ async function executeCommand(cmd, ctx, name$1) {
895
895
  await resolved.run(ctx);
896
896
  }
897
897
  var name = "@ccusage/codex";
898
- var version = "17.1.8";
898
+ var version = "17.2.1";
899
899
  var description = "Usage analysis tool for OpenAI Codex sessions";
900
900
  var require_debug = /* @__PURE__ */ __commonJSMin(((exports, module) => {
901
901
  let messages = [];
@@ -7094,6 +7094,52 @@ const PREFETCHED_CODEX_PRICING = {
7094
7094
  "max_input_tokens": 272e3,
7095
7095
  "max_output_tokens": 128e3
7096
7096
  },
7097
+ "azure/gpt-5.2": {
7098
+ "input_cost_per_token": 175e-8,
7099
+ "output_cost_per_token": 14e-6,
7100
+ "cache_read_input_token_cost": 175e-9,
7101
+ "max_tokens": 128e3,
7102
+ "max_input_tokens": 4e5,
7103
+ "max_output_tokens": 128e3
7104
+ },
7105
+ "azure/gpt-5.2-2025-12-11": {
7106
+ "input_cost_per_token": 175e-8,
7107
+ "output_cost_per_token": 14e-6,
7108
+ "cache_read_input_token_cost": 175e-9,
7109
+ "max_tokens": 128e3,
7110
+ "max_input_tokens": 4e5,
7111
+ "max_output_tokens": 128e3
7112
+ },
7113
+ "azure/gpt-5.2-chat": {
7114
+ "input_cost_per_token": 175e-8,
7115
+ "output_cost_per_token": 14e-6,
7116
+ "cache_read_input_token_cost": 175e-9,
7117
+ "max_tokens": 16384,
7118
+ "max_input_tokens": 128e3,
7119
+ "max_output_tokens": 16384
7120
+ },
7121
+ "azure/gpt-5.2-chat-2025-12-11": {
7122
+ "input_cost_per_token": 175e-8,
7123
+ "output_cost_per_token": 14e-6,
7124
+ "cache_read_input_token_cost": 175e-9,
7125
+ "max_tokens": 16384,
7126
+ "max_input_tokens": 128e3,
7127
+ "max_output_tokens": 16384
7128
+ },
7129
+ "azure/gpt-5.2-pro": {
7130
+ "input_cost_per_token": 21e-6,
7131
+ "output_cost_per_token": 168e-6,
7132
+ "max_tokens": 128e3,
7133
+ "max_input_tokens": 4e5,
7134
+ "max_output_tokens": 128e3
7135
+ },
7136
+ "azure/gpt-5.2-pro-2025-12-11": {
7137
+ "input_cost_per_token": 21e-6,
7138
+ "output_cost_per_token": 168e-6,
7139
+ "max_tokens": 128e3,
7140
+ "max_input_tokens": 4e5,
7141
+ "max_output_tokens": 128e3
7142
+ },
7097
7143
  "gpt-5": {
7098
7144
  "input_cost_per_token": 125e-8,
7099
7145
  "output_cost_per_token": 1e-5,
@@ -7126,6 +7172,44 @@ const PREFETCHED_CODEX_PRICING = {
7126
7172
  "max_input_tokens": 128e3,
7127
7173
  "max_output_tokens": 16384
7128
7174
  },
7175
+ "gpt-5.2": {
7176
+ "input_cost_per_token": 175e-8,
7177
+ "output_cost_per_token": 14e-6,
7178
+ "cache_read_input_token_cost": 175e-9,
7179
+ "max_tokens": 128e3,
7180
+ "max_input_tokens": 4e5,
7181
+ "max_output_tokens": 128e3
7182
+ },
7183
+ "gpt-5.2-2025-12-11": {
7184
+ "input_cost_per_token": 175e-8,
7185
+ "output_cost_per_token": 14e-6,
7186
+ "cache_read_input_token_cost": 175e-9,
7187
+ "max_tokens": 128e3,
7188
+ "max_input_tokens": 4e5,
7189
+ "max_output_tokens": 128e3
7190
+ },
7191
+ "gpt-5.2-chat-latest": {
7192
+ "input_cost_per_token": 175e-8,
7193
+ "output_cost_per_token": 14e-6,
7194
+ "cache_read_input_token_cost": 175e-9,
7195
+ "max_tokens": 16384,
7196
+ "max_input_tokens": 128e3,
7197
+ "max_output_tokens": 16384
7198
+ },
7199
+ "gpt-5.2-pro": {
7200
+ "input_cost_per_token": 21e-6,
7201
+ "output_cost_per_token": 168e-6,
7202
+ "max_tokens": 128e3,
7203
+ "max_input_tokens": 4e5,
7204
+ "max_output_tokens": 128e3
7205
+ },
7206
+ "gpt-5.2-pro-2025-12-11": {
7207
+ "input_cost_per_token": 21e-6,
7208
+ "output_cost_per_token": 168e-6,
7209
+ "max_tokens": 128e3,
7210
+ "max_input_tokens": 4e5,
7211
+ "max_output_tokens": 128e3
7212
+ },
7129
7213
  "gpt-5-pro": {
7130
7214
  "input_cost_per_token": 15e-6,
7131
7215
  "output_cost_per_token": 12e-5,
@@ -7267,6 +7351,29 @@ const PREFETCHED_CODEX_PRICING = {
7267
7351
  "max_tokens": 128e3,
7268
7352
  "max_input_tokens": 272e3,
7269
7353
  "max_output_tokens": 128e3
7354
+ },
7355
+ "openrouter/openai/gpt-5.2": {
7356
+ "input_cost_per_token": 175e-8,
7357
+ "output_cost_per_token": 14e-6,
7358
+ "cache_read_input_token_cost": 175e-9,
7359
+ "max_tokens": 4e5,
7360
+ "max_input_tokens": 4e5,
7361
+ "max_output_tokens": 128e3
7362
+ },
7363
+ "openrouter/openai/gpt-5.2-chat": {
7364
+ "input_cost_per_token": 175e-8,
7365
+ "output_cost_per_token": 14e-6,
7366
+ "cache_read_input_token_cost": 175e-9,
7367
+ "max_tokens": 128e3,
7368
+ "max_input_tokens": 128e3,
7369
+ "max_output_tokens": 16384
7370
+ },
7371
+ "openrouter/openai/gpt-5.2-pro": {
7372
+ "input_cost_per_token": 21e-6,
7373
+ "output_cost_per_token": 168e-6,
7374
+ "max_tokens": 4e5,
7375
+ "max_input_tokens": 4e5,
7376
+ "max_output_tokens": 128e3
7270
7377
  }
7271
7378
  };
7272
7379
  var CodexPricingSource = class {
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@ccusage/codex",
3
- "version": "17.1.8",
3
+ "version": "17.2.1",
4
4
  "description": "Usage analysis tool for OpenAI Codex sessions",
5
5
  "homepage": "https://github.com/ryoppippi/ccusage#readme",
6
6
  "bugs": {