@ccusage/codex 18.0.7 → 18.0.9

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (2) hide show
  1. package/dist/index.js +80 -2
  2. package/package.json +1 -1
package/dist/index.js CHANGED
@@ -895,7 +895,7 @@ async function executeCommand(cmd, ctx, name$1) {
895
895
  await resolved.run(ctx);
896
896
  }
897
897
  var name = "@ccusage/codex";
898
- var version = "18.0.7";
898
+ var version = "18.0.9";
899
899
  var description = "Usage analysis tool for OpenAI Codex sessions";
900
900
  var require_debug = /* @__PURE__ */ __commonJSMin(((exports, module) => {
901
901
  let messages = [];
@@ -6959,6 +6959,16 @@ const CODEX_PROVIDER_PREFIXES = [
6959
6959
  "openrouter/openai/"
6960
6960
  ];
6961
6961
  const CODEX_MODEL_ALIASES_MAP = new Map([["gpt-5-codex", "gpt-5"], ["gpt-5.3-codex", "gpt-5.2-codex"]]);
6962
+ const FREE_MODEL_PRICING = {
6963
+ inputCostPerMToken: 0,
6964
+ cachedInputCostPerMToken: 0,
6965
+ outputCostPerMToken: 0
6966
+ };
6967
+ function isOpenRouterFreeModel(model) {
6968
+ const normalized = model.trim().toLowerCase();
6969
+ if (normalized === "openrouter/free") return true;
6970
+ return normalized.startsWith("openrouter/") && normalized.endsWith(":free");
6971
+ }
6962
6972
  function hasNonZeroTokenPricing(pricing) {
6963
6973
  return (pricing.input_cost_per_token ?? 0) > 0 || (pricing.output_cost_per_token ?? 0) > 0 || (pricing.cache_read_input_token_cost ?? 0) > 0;
6964
6974
  }
@@ -7157,6 +7167,14 @@ const PREFETCHED_CODEX_PRICING = {
7157
7167
  "max_input_tokens": 272e3,
7158
7168
  "max_output_tokens": 128e3
7159
7169
  },
7170
+ "azure/gpt-5.3-codex": {
7171
+ "input_cost_per_token": 175e-8,
7172
+ "output_cost_per_token": 14e-6,
7173
+ "cache_read_input_token_cost": 175e-9,
7174
+ "max_tokens": 128e3,
7175
+ "max_input_tokens": 272e3,
7176
+ "max_output_tokens": 128e3
7177
+ },
7160
7178
  "azure/gpt-5.2-pro": {
7161
7179
  "input_cost_per_token": 21e-6,
7162
7180
  "output_cost_per_token": 168e-6,
@@ -7227,6 +7245,14 @@ const PREFETCHED_CODEX_PRICING = {
7227
7245
  "max_input_tokens": 128e3,
7228
7246
  "max_output_tokens": 16384
7229
7247
  },
7248
+ "gpt-5.3-chat-latest": {
7249
+ "input_cost_per_token": 175e-8,
7250
+ "output_cost_per_token": 14e-6,
7251
+ "cache_read_input_token_cost": 175e-9,
7252
+ "max_tokens": 16384,
7253
+ "max_input_tokens": 128e3,
7254
+ "max_output_tokens": 16384
7255
+ },
7230
7256
  "gpt-5.2-pro": {
7231
7257
  "input_cost_per_token": 21e-6,
7232
7258
  "output_cost_per_token": 168e-6,
@@ -7241,6 +7267,38 @@ const PREFETCHED_CODEX_PRICING = {
7241
7267
  "max_input_tokens": 272e3,
7242
7268
  "max_output_tokens": 128e3
7243
7269
  },
7270
+ "gpt-5.4": {
7271
+ "input_cost_per_token": 25e-7,
7272
+ "output_cost_per_token": 15e-6,
7273
+ "cache_read_input_token_cost": 25e-8,
7274
+ "max_tokens": 128e3,
7275
+ "max_input_tokens": 105e4,
7276
+ "max_output_tokens": 128e3
7277
+ },
7278
+ "gpt-5.4-2026-03-05": {
7279
+ "input_cost_per_token": 25e-7,
7280
+ "output_cost_per_token": 15e-6,
7281
+ "cache_read_input_token_cost": 25e-8,
7282
+ "max_tokens": 128e3,
7283
+ "max_input_tokens": 105e4,
7284
+ "max_output_tokens": 128e3
7285
+ },
7286
+ "gpt-5.4-pro": {
7287
+ "input_cost_per_token": 3e-5,
7288
+ "output_cost_per_token": 18e-5,
7289
+ "cache_read_input_token_cost": 3e-6,
7290
+ "max_tokens": 128e3,
7291
+ "max_input_tokens": 105e4,
7292
+ "max_output_tokens": 128e3
7293
+ },
7294
+ "gpt-5.4-pro-2026-03-05": {
7295
+ "input_cost_per_token": 3e-5,
7296
+ "output_cost_per_token": 18e-5,
7297
+ "cache_read_input_token_cost": 3e-6,
7298
+ "max_tokens": 128e3,
7299
+ "max_input_tokens": 105e4,
7300
+ "max_output_tokens": 128e3
7301
+ },
7244
7302
  "gpt-5-pro": {
7245
7303
  "input_cost_per_token": 15e-6,
7246
7304
  "output_cost_per_token": 12e-5,
@@ -7319,6 +7377,14 @@ const PREFETCHED_CODEX_PRICING = {
7319
7377
  "max_input_tokens": 272e3,
7320
7378
  "max_output_tokens": 128e3
7321
7379
  },
7380
+ "gpt-5.3-codex": {
7381
+ "input_cost_per_token": 175e-8,
7382
+ "output_cost_per_token": 14e-6,
7383
+ "cache_read_input_token_cost": 175e-9,
7384
+ "max_tokens": 128e3,
7385
+ "max_input_tokens": 272e3,
7386
+ "max_output_tokens": 128e3
7387
+ },
7322
7388
  "gpt-5-mini": {
7323
7389
  "input_cost_per_token": 25e-8,
7324
7390
  "output_cost_per_token": 2e-6,
@@ -7399,6 +7465,14 @@ const PREFETCHED_CODEX_PRICING = {
7399
7465
  "max_input_tokens": 272e3,
7400
7466
  "max_output_tokens": 128e3
7401
7467
  },
7468
+ "openrouter/openai/gpt-5.1-codex-max": {
7469
+ "input_cost_per_token": 125e-8,
7470
+ "output_cost_per_token": 1e-5,
7471
+ "cache_read_input_token_cost": 125e-9,
7472
+ "max_tokens": 128e3,
7473
+ "max_input_tokens": 4e5,
7474
+ "max_output_tokens": 128e3
7475
+ },
7402
7476
  "openrouter/openai/gpt-5.2": {
7403
7477
  "input_cost_per_token": 175e-8,
7404
7478
  "output_cost_per_token": 14e-6,
@@ -7453,6 +7527,7 @@ var CodexPricingSource = class {
7453
7527
  this.fetcher[Symbol.dispose]();
7454
7528
  }
7455
7529
  async getPricing(model) {
7530
+ if (isOpenRouterFreeModel(model)) return FREE_MODEL_PRICING;
7456
7531
  const directLookup = await this.fetcher.getModelPricing(model);
7457
7532
  if (isFailure(directLookup)) throw directLookup.error;
7458
7533
  let pricing = directLookup.value;
@@ -7462,7 +7537,10 @@ var CodexPricingSource = class {
7462
7537
  if (isFailure(aliasLookup)) throw aliasLookup.error;
7463
7538
  if (aliasLookup.value != null && hasNonZeroTokenPricing(aliasLookup.value)) pricing = aliasLookup.value;
7464
7539
  }
7465
- if (pricing == null) throw new Error(`Pricing not found for model ${model}`);
7540
+ if (pricing == null) {
7541
+ logger.warn(`Pricing not found for model ${model}; defaulting to zero-cost pricing.`);
7542
+ return FREE_MODEL_PRICING;
7543
+ }
7466
7544
  return {
7467
7545
  inputCostPerMToken: toPerMillion(pricing.input_cost_per_token),
7468
7546
  cachedInputCostPerMToken: toPerMillion(pricing.cache_read_input_token_cost, pricing.input_cost_per_token),
package/package.json CHANGED
@@ -1,7 +1,7 @@
1
1
  {
2
2
  "name": "@ccusage/codex",
3
3
  "type": "module",
4
- "version": "18.0.7",
4
+ "version": "18.0.9",
5
5
  "description": "Usage analysis tool for OpenAI Codex sessions",
6
6
  "author": "ryoppippi",
7
7
  "license": "MIT",