@chenpu17/cc-gw 0.4.3 → 0.5.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (22) hide show
  1. package/package.json +1 -1
  2. package/src/server/dist/index.js +149 -69
  3. package/src/web/dist/assets/{About-BWcLLdLY.js → About-DK242vw9.js} +2 -2
  4. package/src/web/dist/assets/{ApiKeys-DsYmx21U.js → ApiKeys-BGROxK6-.js} +1 -1
  5. package/src/web/dist/assets/{Button-CZXniSHM.js → Button-Bnnxe9ep.js} +1 -1
  6. package/src/web/dist/assets/{Dashboard-H7fcVgwO.js → Dashboard-B7zokimB.js} +2 -2
  7. package/src/web/dist/assets/{FormField-SZpxR702.js → FormField-BzT4FGj8.js} +1 -1
  8. package/src/web/dist/assets/{Help-CgWIUFIO.js → Help-CJooSMdJ.js} +1 -1
  9. package/src/web/dist/assets/{Input-BdyQWPOU.js → Input-B-P-J4xQ.js} +1 -1
  10. package/src/web/dist/assets/{Login-0_Y4Go8x.js → Login-B9RgxiYX.js} +1 -1
  11. package/src/web/dist/assets/Logs-CsJCTftU.js +1 -0
  12. package/src/web/dist/assets/{ModelManagement-DBVBITho.js → ModelManagement-dDhNa_5z.js} +1 -1
  13. package/src/web/dist/assets/{PageSection-B08EcVAN.js → PageSection-Dzvd3cKD.js} +1 -1
  14. package/src/web/dist/assets/{Settings-DEloCGp7.js → Settings-BcMQ79b0.js} +1 -1
  15. package/src/web/dist/assets/{StatusBadge-8KAMZvYW.js → StatusBadge-CAkVtC--.js} +1 -1
  16. package/src/web/dist/assets/{copy-BdNskWTP.js → copy-D6cuJHzh.js} +1 -1
  17. package/src/web/dist/assets/{index-CyrAg0Ev.js → index-Cm-hZvRJ.js} +1 -1
  18. package/src/web/dist/assets/{index-BK1UNVMz.js → index-agm-2asf.js} +4 -4
  19. package/src/web/dist/assets/{info-BTcWJb9B.js → info-CfAuBePJ.js} +1 -1
  20. package/src/web/dist/assets/{useApiQuery-BNTE55UK.js → useApiQuery-ns68sM2H.js} +1 -1
  21. package/src/web/dist/index.html +1 -1
  22. package/src/web/dist/assets/Logs-MTopPD6L.js +0 -1
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@chenpu17/cc-gw",
3
- "version": "0.4.3",
3
+ "version": "0.5.0",
4
4
  "private": false,
5
5
  "type": "module",
6
6
  "scripts": {
@@ -11227,6 +11227,8 @@ async function ensureSchema(db) {
11227
11227
  );
11228
11228
  await maybeAddColumn(db, "request_logs", "client_model", "TEXT");
11229
11229
  await maybeAddColumn(db, "request_logs", "cached_tokens", "INTEGER");
11230
+ await maybeAddColumn(db, "request_logs", "cache_read_tokens", "INTEGER DEFAULT 0");
11231
+ await maybeAddColumn(db, "request_logs", "cache_creation_tokens", "INTEGER DEFAULT 0");
11230
11232
  await maybeAddColumn(db, "request_logs", "ttft_ms", "INTEGER");
11231
11233
  await maybeAddColumn(db, "request_logs", "tpot_ms", "REAL");
11232
11234
  await maybeAddColumn(db, "request_logs", "stream", "INTEGER");
@@ -11416,6 +11418,14 @@ async function updateLogTokens(requestId, values) {
11416
11418
  values.outputTokens,
11417
11419
  values.cachedTokens ?? null
11418
11420
  ];
11421
+ if (values.cacheReadTokens !== void 0) {
11422
+ setters.push("cache_read_tokens = ?");
11423
+ params.push(values.cacheReadTokens ?? null);
11424
+ }
11425
+ if (values.cacheCreationTokens !== void 0) {
11426
+ setters.push("cache_creation_tokens = ?");
11427
+ params.push(values.cacheCreationTokens ?? null);
11428
+ }
11419
11429
  if (values.ttftMs !== void 0) {
11420
11430
  setters.push("ttft_ms = ?");
11421
11431
  params.push(values.ttftMs ?? null);
@@ -12243,8 +12253,8 @@ async function registerMessagesRoute(app) {
12243
12253
  if (providerType === "anthropic") {
12244
12254
  let inputTokens2 = json.usage?.input_tokens ?? 0;
12245
12255
  let outputTokens2 = json.usage?.output_tokens ?? 0;
12246
- const cached3 = resolveCachedTokens(json.usage);
12247
- const cachedTokens3 = cached3.read + cached3.creation;
12256
+ const cached2 = resolveCachedTokens(json.usage);
12257
+ const cachedTokens2 = cached2.read + cached2.creation;
12248
12258
  if (!inputTokens2) {
12249
12259
  inputTokens2 = target.tokenEstimate || estimateTokens(normalized, target.modelId);
12250
12260
  }
@@ -12255,13 +12265,13 @@ async function registerMessagesRoute(app) {
12255
12265
  logUsage("non_stream.anthropic", {
12256
12266
  input: inputTokens2,
12257
12267
  output: outputTokens2,
12258
- cached: cachedTokens3
12268
+ cached: cachedTokens2
12259
12269
  });
12260
12270
  const latencyMs2 = Date.now() - requestStart;
12261
12271
  await updateLogTokens(logId, {
12262
12272
  inputTokens: inputTokens2,
12263
12273
  outputTokens: outputTokens2,
12264
- cachedTokens: cachedTokens3,
12274
+ cachedTokens: cachedTokens2,
12265
12275
  ttftMs: latencyMs2,
12266
12276
  tpotMs: computeTpot(latencyMs2, outputTokens2, { streaming: false })
12267
12277
  });
@@ -12270,9 +12280,9 @@ async function registerMessagesRoute(app) {
12270
12280
  requests: 1,
12271
12281
  inputTokens: inputTokens2,
12272
12282
  outputTokens: outputTokens2,
12273
- cachedTokens: cachedTokens3,
12274
- cacheReadTokens: cached3.read,
12275
- cacheCreationTokens: cached3.creation,
12283
+ cachedTokens: cachedTokens2,
12284
+ cacheReadTokens: cached2.read,
12285
+ cacheCreationTokens: cached2.creation,
12276
12286
  latencyMs: latencyMs2
12277
12287
  });
12278
12288
  if (storeResponsePayloads) {
@@ -12293,8 +12303,8 @@ async function registerMessagesRoute(app) {
12293
12303
  const claudeResponse = buildClaudeResponse(json, target.modelId);
12294
12304
  let inputTokens = json.usage?.prompt_tokens ?? 0;
12295
12305
  let outputTokens = json.usage?.completion_tokens ?? 0;
12296
- const cached2 = resolveCachedTokens(json.usage);
12297
- const cachedTokens2 = cached2.read + cached2.creation;
12306
+ const cached = resolveCachedTokens(json.usage);
12307
+ const cachedTokens = cached.read + cached.creation;
12298
12308
  if (!inputTokens) {
12299
12309
  inputTokens = target.tokenEstimate || estimateTokens(normalized, target.modelId);
12300
12310
  }
@@ -12305,13 +12315,13 @@ async function registerMessagesRoute(app) {
12305
12315
  logUsage("non_stream.openai", {
12306
12316
  input: inputTokens,
12307
12317
  output: outputTokens,
12308
- cached: cachedTokens2
12318
+ cached: cachedTokens
12309
12319
  });
12310
12320
  const latencyMs = Date.now() - requestStart;
12311
12321
  await updateLogTokens(logId, {
12312
12322
  inputTokens,
12313
12323
  outputTokens,
12314
- cachedTokens: cachedTokens2,
12324
+ cachedTokens,
12315
12325
  ttftMs: latencyMs,
12316
12326
  tpotMs: computeTpot(latencyMs, outputTokens, { streaming: false })
12317
12327
  });
@@ -12320,9 +12330,9 @@ async function registerMessagesRoute(app) {
12320
12330
  requests: 1,
12321
12331
  inputTokens,
12322
12332
  outputTokens,
12323
- cachedTokens: cachedTokens2,
12324
- cacheReadTokens: cached2.read,
12325
- cacheCreationTokens: cached2.creation,
12333
+ cachedTokens,
12334
+ cacheReadTokens: cached.read,
12335
+ cacheCreationTokens: cached.creation,
12326
12336
  latencyMs
12327
12337
  });
12328
12338
  if (storeResponsePayloads) {
@@ -12573,9 +12583,9 @@ async function registerMessagesRoute(app) {
12573
12583
  }
12574
12584
  const totalLatencyMs = Date.now() - requestStart;
12575
12585
  const ttftMs = firstTokenAt2 ? firstTokenAt2 - requestStart : null;
12576
- const cached2 = resolveCachedTokens(lastUsagePayload);
12586
+ const cached = resolveCachedTokens(lastUsagePayload);
12577
12587
  if (usageCached2 === null) {
12578
- usageCached2 = cached2.read + cached2.creation;
12588
+ usageCached2 = cached.read + cached.creation;
12579
12589
  }
12580
12590
  logUsage("stream.anthropic.final", {
12581
12591
  input: usagePrompt2,
@@ -12586,6 +12596,8 @@ async function registerMessagesRoute(app) {
12586
12596
  inputTokens: usagePrompt2,
12587
12597
  outputTokens: usageCompletion2,
12588
12598
  cachedTokens: usageCached2,
12599
+ cacheReadTokens: cached.read,
12600
+ cacheCreationTokens: cached.creation,
12589
12601
  ttftMs,
12590
12602
  tpotMs: computeTpot(totalLatencyMs, usageCompletion2, {
12591
12603
  streaming: true,
@@ -12598,8 +12610,8 @@ async function registerMessagesRoute(app) {
12598
12610
  inputTokens: usagePrompt2,
12599
12611
  outputTokens: usageCompletion2,
12600
12612
  cachedTokens: usageCached2,
12601
- cacheReadTokens: cached2.read,
12602
- cacheCreationTokens: cached2.creation,
12613
+ cacheReadTokens: cached.read,
12614
+ cacheCreationTokens: cached.creation,
12603
12615
  latencyMs: totalLatencyMs
12604
12616
  });
12605
12617
  if (storeResponsePayloads) {
@@ -12759,6 +12771,8 @@ data: ${JSON.stringify(data)}
12759
12771
  inputTokens: finalPromptTokens,
12760
12772
  outputTokens: finalCompletionTokens,
12761
12773
  cachedTokens: usageCached,
12774
+ cacheReadTokens: 0,
12775
+ cacheCreationTokens: 0,
12762
12776
  ttftMs,
12763
12777
  tpotMs: computeTpot(totalLatencyMs, finalCompletionTokens, {
12764
12778
  streaming: true,
@@ -12923,6 +12937,8 @@ data: ${JSON.stringify(data)}
12923
12937
  inputTokens: fallbackPrompt,
12924
12938
  outputTokens: fallbackCompletion,
12925
12939
  cachedTokens: usageCached,
12940
+ cacheReadTokens: 0,
12941
+ cacheCreationTokens: 0,
12926
12942
  ttftMs,
12927
12943
  tpotMs: computeTpot(totalLatencyMs, fallbackCompletion, {
12928
12944
  streaming: true,
@@ -13880,18 +13896,20 @@ async function registerOpenAiRoutes(app) {
13880
13896
  if (!Number.isFinite(inputTokens3) || inputTokens3 <= 0) {
13881
13897
  inputTokens3 = target.tokenEstimate ?? estimateTokens(normalized, target.modelId);
13882
13898
  }
13883
- const cached3 = resolveCachedTokens2(usagePayload2);
13884
- const cachedTokens3 = cached3.read + cached3.creation;
13899
+ const cached2 = resolveCachedTokens2(usagePayload2);
13900
+ const cachedTokens2 = cached2.read + cached2.creation;
13885
13901
  const latencyMs3 = Date.now() - requestStart;
13886
13902
  const openAIResponse = buildOpenAIResponseFromClaude(parsed, target.modelId, converted, {
13887
13903
  inputTokens: inputTokens3,
13888
13904
  outputTokens: outputTokens3,
13889
- cachedTokens: cachedTokens3
13905
+ cachedTokens: cachedTokens2
13890
13906
  });
13891
13907
  await updateLogTokens(logId, {
13892
13908
  inputTokens: inputTokens3,
13893
13909
  outputTokens: outputTokens3,
13894
- cachedTokens: cachedTokens3,
13910
+ cachedTokens: usageCached,
13911
+ cacheReadTokens: cached2.read,
13912
+ cacheCreationTokens: cached2.creation,
13895
13913
  ttftMs: latencyMs3,
13896
13914
  tpotMs: computeTpot2(latencyMs3, outputTokens3, { streaming: false })
13897
13915
  });
@@ -13900,7 +13918,9 @@ async function registerOpenAiRoutes(app) {
13900
13918
  requests: 1,
13901
13919
  inputTokens: inputTokens3,
13902
13920
  outputTokens: outputTokens3,
13903
- cachedTokens: cachedTokens3,
13921
+ cachedTokens: usageCached,
13922
+ cacheReadTokens: usageCacheRead,
13923
+ cacheCreationTokens: usageCacheCreation,
13904
13924
  latencyMs: latencyMs3
13905
13925
  });
13906
13926
  if (storeResponsePayloads) {
@@ -13935,13 +13955,15 @@ async function registerOpenAiRoutes(app) {
13935
13955
  return 0;
13936
13956
  })();
13937
13957
  const outputTokens2 = baseOutputTokens + reasoningTokens2;
13938
- const cached2 = resolveCachedTokens2(usagePayload);
13939
- const cachedTokens2 = cached2.read + cached2.creation;
13958
+ const cached = resolveCachedTokens2(usagePayload);
13959
+ const cachedTokens = cached.read + cached.creation;
13940
13960
  const latencyMs2 = Date.now() - requestStart;
13941
13961
  await updateLogTokens(logId, {
13942
13962
  inputTokens: inputTokens2,
13943
13963
  outputTokens: outputTokens2,
13944
- cachedTokens: cachedTokens2,
13964
+ cachedTokens: usageCached,
13965
+ cacheReadTokens: cached.read,
13966
+ cacheCreationTokens: cached.creation,
13945
13967
  ttftMs: usagePayload?.first_token_latency_ms ?? latencyMs2,
13946
13968
  tpotMs: usagePayload?.tokens_per_second ? computeTpot2(latencyMs2, outputTokens2, { streaming: false, reasoningTokens: reasoningTokens2 }) : null
13947
13969
  });
@@ -13983,6 +14005,8 @@ async function registerOpenAiRoutes(app) {
13983
14005
  let usagePrompt2 = null;
13984
14006
  let usageCompletion2 = null;
13985
14007
  let usageCached2 = null;
14008
+ let usageCacheRead2 = 0;
14009
+ let usageCacheCreation2 = 0;
13986
14010
  let lastUsagePayload = null;
13987
14011
  let firstTokenAt2 = null;
13988
14012
  let claudeMessageId = null;
@@ -14024,9 +14048,9 @@ async function registerOpenAiRoutes(app) {
14024
14048
  );
14025
14049
  if (usageCached2 == null) {
14026
14050
  const candidate = resolveCachedTokens2(usagePayload);
14027
- if (candidate != null) {
14028
- usageCached2 = candidate;
14029
- }
14051
+ usageCacheRead2 = candidate.read;
14052
+ usageCacheCreation2 = candidate.creation;
14053
+ usageCached2 = candidate.read + candidate.creation;
14030
14054
  }
14031
14055
  lastUsagePayload = usagePayload;
14032
14056
  };
@@ -14297,7 +14321,8 @@ async function registerOpenAiRoutes(app) {
14297
14321
  ensureCreatedSent();
14298
14322
  let finalPromptTokens = typeof usagePrompt2 === "number" && usagePrompt2 > 0 ? usagePrompt2 : target.tokenEstimate ?? estimateTokens(normalized, target.modelId);
14299
14323
  let finalCompletionTokens = typeof usageCompletion2 === "number" && usageCompletion2 > 0 ? usageCompletion2 : aggregatedText ? estimateTextTokens(aggregatedText, target.modelId) : 0;
14300
- const finalCachedTokens = usageCached2 != null ? usageCached2 : resolveCachedTokens2(lastUsagePayload);
14324
+ const finalCachedResult = usageCached2 != null ? { read: usageCacheRead2, creation: usageCacheCreation2 } : resolveCachedTokens2(lastUsagePayload);
14325
+ const finalCachedTokens = finalCachedResult.read + finalCachedResult.creation;
14301
14326
  const totalLatencyMs = Date.now() - requestStart;
14302
14327
  const ttftMs = firstTokenAt2 ? firstTokenAt2 - requestStart : null;
14303
14328
  const openAIResponse = buildOpenAIResponseFromClaude(claudeMessage, target.modelId, converted, {
@@ -14328,6 +14353,8 @@ async function registerOpenAiRoutes(app) {
14328
14353
  inputTokens: finalPromptTokens,
14329
14354
  outputTokens: finalCompletionTokens,
14330
14355
  cachedTokens: finalCachedTokens ?? null,
14356
+ cacheReadTokens: 0,
14357
+ cacheCreationTokens: 0,
14331
14358
  ttftMs,
14332
14359
  tpotMs: computeTpot2(totalLatencyMs, finalCompletionTokens, {
14333
14360
  streaming: true,
@@ -14339,9 +14366,9 @@ async function registerOpenAiRoutes(app) {
14339
14366
  requests: 1,
14340
14367
  inputTokens: finalPromptTokens,
14341
14368
  outputTokens: finalCompletionTokens,
14342
- cachedTokens: usageCached2,
14343
- cacheReadTokens: cached.read,
14344
- cacheCreationTokens: cached.creation,
14369
+ cachedTokens: finalCachedTokens,
14370
+ cacheReadTokens: finalCachedResult.read,
14371
+ cacheCreationTokens: finalCachedResult.creation,
14345
14372
  latencyMs: totalLatencyMs
14346
14373
  });
14347
14374
  if (storeResponsePayloads && capturedResponseChunks2) {
@@ -14361,6 +14388,8 @@ async function registerOpenAiRoutes(app) {
14361
14388
  let usageCompletion = null;
14362
14389
  let usageReasoning = null;
14363
14390
  let usageCached = null;
14391
+ let usageCacheRead = 0;
14392
+ let usageCacheCreation = 0;
14364
14393
  let firstTokenAt = null;
14365
14394
  let chunkCount = 0;
14366
14395
  const capturedResponseChunks = storeResponsePayloads ? [] : null;
@@ -14400,7 +14429,10 @@ async function registerOpenAiRoutes(app) {
14400
14429
  usageReasoning
14401
14430
  );
14402
14431
  if (usageCached == null) {
14403
- usageCached = resolveCachedTokens2(usagePayload);
14432
+ const cachedResult = resolveCachedTokens2(usagePayload);
14433
+ usageCacheRead = cachedResult.read;
14434
+ usageCacheCreation = cachedResult.creation;
14435
+ usageCached = cachedResult.read + cachedResult.creation;
14404
14436
  }
14405
14437
  if (OPENAI_DEBUG) {
14406
14438
  debugLog("usage payload received", usagePayload);
@@ -14492,6 +14524,8 @@ async function registerOpenAiRoutes(app) {
14492
14524
  inputTokens,
14493
14525
  outputTokens,
14494
14526
  cachedTokens: usageCached,
14527
+ cacheReadTokens: 0,
14528
+ cacheCreationTokens: 0,
14495
14529
  ttftMs: firstTokenAt ? firstTokenAt - requestStart : null,
14496
14530
  tpotMs: computeTpot2(latencyMs, outputTokens, {
14497
14531
  streaming: true,
@@ -14749,13 +14783,15 @@ async function registerOpenAiRoutes(app) {
14749
14783
  inputTokens: inputTokens3,
14750
14784
  outputTokens: outputTokens3
14751
14785
  });
14752
- const cached3 = resolveCachedTokens2(usagePayload2);
14753
- const cachedTokens3 = cached3.read + cached3.creation;
14786
+ const cached2 = resolveCachedTokens2(usagePayload2);
14787
+ const cachedTokens2 = cached2.read + cached2.creation;
14754
14788
  const latencyMs3 = Date.now() - requestStart;
14755
14789
  await updateLogTokens(logId, {
14756
14790
  inputTokens: inputTokens3,
14757
14791
  outputTokens: outputTokens3,
14758
- cachedTokens: cachedTokens3,
14792
+ cachedTokens: usageCached,
14793
+ cacheReadTokens: cached2.read,
14794
+ cacheCreationTokens: cached2.creation,
14759
14795
  ttftMs: latencyMs3,
14760
14796
  tpotMs: computeTpot2(latencyMs3, outputTokens3, { streaming: false })
14761
14797
  });
@@ -14764,7 +14800,9 @@ async function registerOpenAiRoutes(app) {
14764
14800
  requests: 1,
14765
14801
  inputTokens: inputTokens3,
14766
14802
  outputTokens: outputTokens3,
14767
- cachedTokens: cachedTokens3,
14803
+ cachedTokens: usageCached,
14804
+ cacheReadTokens: usageCacheRead,
14805
+ cacheCreationTokens: usageCacheCreation,
14768
14806
  latencyMs: latencyMs3
14769
14807
  });
14770
14808
  if (storeResponsePayloads) {
@@ -14794,13 +14832,15 @@ async function registerOpenAiRoutes(app) {
14794
14832
  })(),
14795
14833
  target.modelId
14796
14834
  );
14797
- const cached2 = resolveCachedTokens2(usagePayload);
14798
- const cachedTokens2 = cached2.read + cached2.creation;
14835
+ const cached = resolveCachedTokens2(usagePayload);
14836
+ const cachedTokens = cached.read + cached.creation;
14799
14837
  const latencyMs2 = Date.now() - requestStart;
14800
14838
  await updateLogTokens(logId, {
14801
14839
  inputTokens: inputTokens2,
14802
14840
  outputTokens: outputTokens2,
14803
- cachedTokens: cachedTokens2,
14841
+ cachedTokens: usageCached,
14842
+ cacheReadTokens: cached.read,
14843
+ cacheCreationTokens: cached.creation,
14804
14844
  ttftMs: usagePayload?.first_token_latency_ms ?? latencyMs2,
14805
14845
  tpotMs: usagePayload?.tokens_per_second ? computeTpot2(latencyMs2, outputTokens2, { streaming: false }) : null
14806
14846
  });
@@ -14842,6 +14882,8 @@ async function registerOpenAiRoutes(app) {
14842
14882
  let usagePrompt2 = null;
14843
14883
  let usageCompletion2 = null;
14844
14884
  let usageCached2 = null;
14885
+ let usageCacheRead2 = 0;
14886
+ let usageCacheCreation2 = 0;
14845
14887
  let lastUsagePayload = null;
14846
14888
  let firstTokenAt2 = null;
14847
14889
  let claudeStopReason = null;
@@ -14884,9 +14926,9 @@ async function registerOpenAiRoutes(app) {
14884
14926
  );
14885
14927
  if (usageCached2 == null) {
14886
14928
  const candidate = resolveCachedTokens2(usagePayload);
14887
- if (candidate != null) {
14888
- usageCached2 = candidate;
14889
- }
14929
+ usageCacheRead2 = candidate.read;
14930
+ usageCacheCreation2 = candidate.creation;
14931
+ usageCached2 = candidate.read + candidate.creation;
14890
14932
  }
14891
14933
  lastUsagePayload = usagePayload;
14892
14934
  };
@@ -15197,7 +15239,8 @@ async function registerOpenAiRoutes(app) {
15197
15239
  }
15198
15240
  const finalPromptTokens = typeof usagePrompt2 === "number" && usagePrompt2 > 0 ? usagePrompt2 : target.tokenEstimate ?? estimateTokens(normalized, target.modelId);
15199
15241
  const finalCompletionTokens = typeof usageCompletion2 === "number" && usageCompletion2 > 0 ? usageCompletion2 : aggregatedText ? estimateTextTokens(aggregatedText, target.modelId) : 0;
15200
- const finalCachedTokens = usageCached2 != null ? usageCached2 : resolveCachedTokens2(lastUsagePayload);
15242
+ const finalCachedResult = usageCached2 != null ? { read: usageCacheRead2, creation: usageCacheCreation2 } : resolveCachedTokens2(lastUsagePayload);
15243
+ const finalCachedTokens = finalCachedResult.read + finalCachedResult.creation;
15201
15244
  const totalLatencyMs = Date.now() - requestStart;
15202
15245
  const ttftMs = firstTokenAt2 ? firstTokenAt2 - requestStart : null;
15203
15246
  const finishReason = mapClaudeStopReasonToChatFinish(claudeStopReason) ?? "stop";
@@ -15230,6 +15273,8 @@ async function registerOpenAiRoutes(app) {
15230
15273
  inputTokens: finalPromptTokens,
15231
15274
  outputTokens: finalCompletionTokens,
15232
15275
  cachedTokens: finalCachedTokens ?? null,
15276
+ cacheReadTokens: 0,
15277
+ cacheCreationTokens: 0,
15233
15278
  ttftMs,
15234
15279
  tpotMs: computeTpot2(totalLatencyMs, finalCompletionTokens, {
15235
15280
  streaming: true,
@@ -15241,9 +15286,9 @@ async function registerOpenAiRoutes(app) {
15241
15286
  requests: 1,
15242
15287
  inputTokens: finalPromptTokens,
15243
15288
  outputTokens: finalCompletionTokens,
15244
- cachedTokens: usageCached2,
15245
- cacheReadTokens: cached.read,
15246
- cacheCreationTokens: cached.creation,
15289
+ cachedTokens: finalCachedTokens,
15290
+ cacheReadTokens: finalCachedResult.read,
15291
+ cacheCreationTokens: finalCachedResult.creation,
15247
15292
  latencyMs: totalLatencyMs
15248
15293
  });
15249
15294
  if (storeResponsePayloads && capturedResponseChunks2) {
@@ -15262,6 +15307,8 @@ async function registerOpenAiRoutes(app) {
15262
15307
  let usagePrompt = null;
15263
15308
  let usageCompletion = null;
15264
15309
  let usageCached = null;
15310
+ let usageCacheRead = 0;
15311
+ let usageCacheCreation = 0;
15265
15312
  let firstTokenAt = null;
15266
15313
  const capturedResponseChunks = storeResponsePayloads ? [] : null;
15267
15314
  const replyClosed = () => {
@@ -15294,7 +15341,10 @@ async function registerOpenAiRoutes(app) {
15294
15341
  usageCompletion
15295
15342
  );
15296
15343
  if (usageCached == null) {
15297
- usageCached = resolveCachedTokens2(usagePayload);
15344
+ const cachedResult = resolveCachedTokens2(usagePayload);
15345
+ usageCacheRead = cachedResult.read;
15346
+ usageCacheCreation = cachedResult.creation;
15347
+ usageCached = cachedResult.read + cachedResult.creation;
15298
15348
  }
15299
15349
  };
15300
15350
  while (true) {
@@ -15369,6 +15419,8 @@ async function registerOpenAiRoutes(app) {
15369
15419
  inputTokens,
15370
15420
  outputTokens,
15371
15421
  cachedTokens: usageCached,
15422
+ cacheReadTokens: 0,
15423
+ cacheCreationTokens: 0,
15372
15424
  ttftMs: firstTokenAt ? firstTokenAt - requestStart : null,
15373
15425
  tpotMs: computeTpot2(latencyMs, outputTokens, {
15374
15426
  streaming: true,
@@ -17523,13 +17575,15 @@ async function handleAnthropicProtocol(request, reply, endpoint, endpointId, app
17523
17575
  const json = await new Response(upstream.body).json();
17524
17576
  const inputTokens = json.usage?.input_tokens ?? estimateTokens(normalized, target.modelId);
17525
17577
  const outputTokens = json.usage?.output_tokens ?? 0;
17526
- const cached2 = resolveCachedTokens3(json.usage);
17527
- const cachedTokens2 = cached2.read + cached2.creation;
17578
+ const cached = resolveCachedTokens3(json.usage);
17579
+ const cachedTokens = cached.read + cached.creation;
17528
17580
  const latencyMs = Date.now() - requestStart;
17529
17581
  await updateLogTokens(logId, {
17530
17582
  inputTokens,
17531
17583
  outputTokens,
17532
- cachedTokens: cachedTokens2,
17584
+ cachedTokens: usageCached,
17585
+ cacheReadTokens: cached.read,
17586
+ cacheCreationTokens: cached.creation,
17533
17587
  ttftMs: latencyMs,
17534
17588
  tpotMs: computeTpot3(latencyMs, outputTokens, { streaming: false })
17535
17589
  });
@@ -17538,7 +17592,9 @@ async function handleAnthropicProtocol(request, reply, endpoint, endpointId, app
17538
17592
  requests: 1,
17539
17593
  inputTokens,
17540
17594
  outputTokens,
17541
- cachedTokens: cachedTokens2,
17595
+ cachedTokens: usageCached,
17596
+ cacheReadTokens: usageCacheRead,
17597
+ cacheCreationTokens: usageCacheCreation,
17542
17598
  latencyMs
17543
17599
  });
17544
17600
  if (storeResponsePayloads) {
@@ -17566,6 +17622,8 @@ async function handleAnthropicProtocol(request, reply, endpoint, endpointId, app
17566
17622
  let usagePrompt = 0;
17567
17623
  let usageCompletion = 0;
17568
17624
  let usageCached = null;
17625
+ let usageCacheRead = 0;
17626
+ let usageCacheCreation = 0;
17569
17627
  let firstTokenAt = null;
17570
17628
  const capturedChunks = storeResponsePayloads ? [] : null;
17571
17629
  try {
@@ -17594,10 +17652,10 @@ async function handleAnthropicProtocol(request, reply, endpoint, endpointId, app
17594
17652
  if (parsed?.usage) {
17595
17653
  usagePrompt = parsed.usage.input_tokens ?? usagePrompt;
17596
17654
  usageCompletion = parsed.usage.output_tokens ?? usageCompletion;
17597
- const cached2 = resolveCachedTokens3(parsed.usage);
17598
- if (cached2 !== null) {
17599
- usageCached = cached2;
17600
- }
17655
+ const cached = resolveCachedTokens3(parsed.usage);
17656
+ usageCacheRead = cached.read;
17657
+ usageCacheCreation = cached.creation;
17658
+ usageCached = cached.read + cached.creation;
17601
17659
  }
17602
17660
  if (!firstTokenAt && (parsed?.type === "content_block_delta" || parsed?.delta?.text)) {
17603
17661
  firstTokenAt = Date.now();
@@ -17624,6 +17682,8 @@ async function handleAnthropicProtocol(request, reply, endpoint, endpointId, app
17624
17682
  inputTokens: usagePrompt,
17625
17683
  outputTokens: usageCompletion,
17626
17684
  cachedTokens: usageCached,
17685
+ cacheReadTokens: usageCacheRead,
17686
+ cacheCreationTokens: usageCacheCreation,
17627
17687
  ttftMs,
17628
17688
  tpotMs: computeTpot3(totalLatencyMs, usageCompletion, {
17629
17689
  streaming: true,
@@ -17636,8 +17696,6 @@ async function handleAnthropicProtocol(request, reply, endpoint, endpointId, app
17636
17696
  inputTokens: usagePrompt,
17637
17697
  outputTokens: usageCompletion,
17638
17698
  cachedTokens: usageCached,
17639
- cacheReadTokens: cached.read,
17640
- cacheCreationTokens: cached.creation,
17641
17699
  latencyMs: totalLatencyMs
17642
17700
  });
17643
17701
  if (storeResponsePayloads && capturedChunks) {
@@ -17817,13 +17875,15 @@ async function handleOpenAIChatProtocol(request, reply, endpoint, endpointId, ap
17817
17875
  const usagePayload = json?.usage ?? null;
17818
17876
  const inputTokens2 = usagePayload?.prompt_tokens ?? usagePayload?.input_tokens ?? target.tokenEstimate ?? estimateTokens(normalized, target.modelId);
17819
17877
  const outputTokens2 = usagePayload?.completion_tokens ?? usagePayload?.output_tokens ?? estimateTextTokens(json?.choices?.[0]?.message?.content ?? "", target.modelId);
17820
- const cached2 = resolveCachedTokens3(usagePayload);
17821
- const cachedTokens2 = cached2.read + cached2.creation;
17878
+ const cached = resolveCachedTokens3(usagePayload);
17879
+ const cachedTokens = cached.read + cached.creation;
17822
17880
  const latencyMs2 = Date.now() - requestStart;
17823
17881
  await updateLogTokens(logId, {
17824
17882
  inputTokens: inputTokens2,
17825
17883
  outputTokens: outputTokens2,
17826
- cachedTokens: cachedTokens2,
17884
+ cachedTokens: usageCached,
17885
+ cacheReadTokens: cached.read,
17886
+ cacheCreationTokens: cached.creation,
17827
17887
  ttftMs: latencyMs2,
17828
17888
  tpotMs: computeTpot3(latencyMs2, outputTokens2, { streaming: false })
17829
17889
  });
@@ -17832,7 +17892,9 @@ async function handleOpenAIChatProtocol(request, reply, endpoint, endpointId, ap
17832
17892
  requests: 1,
17833
17893
  inputTokens: inputTokens2,
17834
17894
  outputTokens: outputTokens2,
17835
- cachedTokens: cachedTokens2,
17895
+ cachedTokens: usageCached,
17896
+ cacheReadTokens: usageCacheRead,
17897
+ cacheCreationTokens: usageCacheCreation,
17836
17898
  latencyMs: latencyMs2
17837
17899
  });
17838
17900
  if (storeResponsePayloads) {
@@ -17857,6 +17919,8 @@ async function handleOpenAIChatProtocol(request, reply, endpoint, endpointId, ap
17857
17919
  let usagePrompt = null;
17858
17920
  let usageCompletion = null;
17859
17921
  let usageCached = null;
17922
+ let usageCacheRead = 0;
17923
+ let usageCacheCreation = 0;
17860
17924
  let firstTokenAt = null;
17861
17925
  const capturedChunks = storeResponsePayloads ? [] : null;
17862
17926
  try {
@@ -17886,7 +17950,10 @@ async function handleOpenAIChatProtocol(request, reply, endpoint, endpointId, ap
17886
17950
  if (usage) {
17887
17951
  usagePrompt = usage.prompt_tokens ?? usage.input_tokens ?? usagePrompt;
17888
17952
  usageCompletion = usage.completion_tokens ?? usage.output_tokens ?? usageCompletion;
17889
- usageCached = usage.cached_tokens ?? usageCached;
17953
+ const cachedResult = resolveCachedTokens3(usage);
17954
+ usageCacheRead = cachedResult.read;
17955
+ usageCacheCreation = cachedResult.creation;
17956
+ usageCached = cachedResult.read + cachedResult.creation;
17890
17957
  }
17891
17958
  } catch {
17892
17959
  }
@@ -17908,6 +17975,8 @@ async function handleOpenAIChatProtocol(request, reply, endpoint, endpointId, ap
17908
17975
  inputTokens,
17909
17976
  outputTokens,
17910
17977
  cachedTokens: usageCached,
17978
+ cacheReadTokens: usageCacheRead,
17979
+ cacheCreationTokens: usageCacheCreation,
17911
17980
  ttftMs: firstTokenAt ? firstTokenAt - requestStart : null,
17912
17981
  tpotMs: computeTpot3(latencyMs, outputTokens, {
17913
17982
  streaming: true,
@@ -18098,13 +18167,15 @@ async function handleOpenAIResponsesProtocol(request, reply, endpoint, endpointI
18098
18167
  const inputTokens2 = usagePayload?.prompt_tokens ?? usagePayload?.input_tokens ?? target.tokenEstimate ?? estimateTokens(normalized, target.modelId);
18099
18168
  const content = json?.response?.body?.content ?? json?.choices?.[0]?.message?.content ?? "";
18100
18169
  const outputTokens2 = usagePayload?.completion_tokens ?? usagePayload?.output_tokens ?? estimateTextTokens(content, target.modelId);
18101
- const cached2 = resolveCachedTokens3(usagePayload);
18102
- const cachedTokens2 = cached2.read + cached2.creation;
18170
+ const cached = resolveCachedTokens3(usagePayload);
18171
+ const cachedTokens = cached.read + cached.creation;
18103
18172
  const latencyMs2 = Date.now() - requestStart;
18104
18173
  await updateLogTokens(logId, {
18105
18174
  inputTokens: inputTokens2,
18106
18175
  outputTokens: outputTokens2,
18107
- cachedTokens: cachedTokens2,
18176
+ cachedTokens: usageCached,
18177
+ cacheReadTokens: cached.read,
18178
+ cacheCreationTokens: cached.creation,
18108
18179
  ttftMs: latencyMs2,
18109
18180
  tpotMs: computeTpot3(latencyMs2, outputTokens2, { streaming: false })
18110
18181
  });
@@ -18113,7 +18184,9 @@ async function handleOpenAIResponsesProtocol(request, reply, endpoint, endpointI
18113
18184
  requests: 1,
18114
18185
  inputTokens: inputTokens2,
18115
18186
  outputTokens: outputTokens2,
18116
- cachedTokens: cachedTokens2,
18187
+ cachedTokens: usageCached,
18188
+ cacheReadTokens: usageCacheRead,
18189
+ cacheCreationTokens: usageCacheCreation,
18117
18190
  latencyMs: latencyMs2
18118
18191
  });
18119
18192
  if (storeResponsePayloads) {
@@ -18138,6 +18211,8 @@ async function handleOpenAIResponsesProtocol(request, reply, endpoint, endpointI
18138
18211
  let usagePrompt = null;
18139
18212
  let usageCompletion = null;
18140
18213
  let usageCached = null;
18214
+ let usageCacheRead = 0;
18215
+ let usageCacheCreation = 0;
18141
18216
  let firstTokenAt = null;
18142
18217
  const capturedChunks = storeResponsePayloads ? [] : null;
18143
18218
  try {
@@ -18167,7 +18242,10 @@ async function handleOpenAIResponsesProtocol(request, reply, endpoint, endpointI
18167
18242
  if (usage) {
18168
18243
  usagePrompt = usage.prompt_tokens ?? usage.input_tokens ?? usagePrompt;
18169
18244
  usageCompletion = usage.completion_tokens ?? usage.output_tokens ?? usageCompletion;
18170
- usageCached = usage.cached_tokens ?? usageCached;
18245
+ const cachedResult = resolveCachedTokens3(usage);
18246
+ usageCacheRead = cachedResult.read;
18247
+ usageCacheCreation = cachedResult.creation;
18248
+ usageCached = cachedResult.read + cachedResult.creation;
18171
18249
  }
18172
18250
  } catch {
18173
18251
  }
@@ -18189,6 +18267,8 @@ async function handleOpenAIResponsesProtocol(request, reply, endpoint, endpointI
18189
18267
  inputTokens,
18190
18268
  outputTokens,
18191
18269
  cachedTokens: usageCached,
18270
+ cacheReadTokens: usageCacheRead,
18271
+ cacheCreationTokens: usageCacheCreation,
18192
18272
  ttftMs: firstTokenAt ? firstTokenAt - requestStart : null,
18193
18273
  tpotMs: computeTpot3(latencyMs, outputTokens, {
18194
18274
  streaming: true,
@@ -1,4 +1,4 @@
1
- import{c as p,u as v,a as k,r,j as e,d as o,U as j,m as i}from"./index-BK1UNVMz.js";import{u as N}from"./useApiQuery-BNTE55UK.js";import{P as w,a as d}from"./PageSection-B08EcVAN.js";import"./Input-BdyQWPOU.js";import{B as b}from"./Button-CZXniSHM.js";import{I as y}from"./info-BTcWJb9B.js";/**
1
+ import{c as p,u as v,a as k,r,j as e,d as o,U as j,m as i}from"./index-agm-2asf.js";import{u as N}from"./useApiQuery-ns68sM2H.js";import{P as w,a as d}from"./PageSection-Dzvd3cKD.js";import"./Input-B-P-J4xQ.js";import{B as b}from"./Button-Bnnxe9ep.js";import{I as y}from"./info-CfAuBePJ.js";/**
2
2
  * @license lucide-react v0.344.0 - ISC
3
3
  *
4
4
  * This source code is licensed under the ISC license.
@@ -8,4 +8,4 @@ import{c as p,u as v,a as k,r,j as e,d as o,U as j,m as i}from"./index-BK1UNVMz.
8
8
  *
9
9
  * This source code is licensed under the ISC license.
10
10
  * See the LICENSE file in the root directory of this source tree.
11
- */const E=p("Sparkles",[["path",{d:"m12 3-1.912 5.813a2 2 0 0 1-1.275 1.275L3 12l5.813 1.912a2 2 0 0 1 1.275 1.275L12 21l1.912-5.813a2 2 0 0 1 1.275-1.275L21 12l-5.813-1.912a2 2 0 0 1-1.275-1.275L12 3Z",key:"17u4zn"}],["path",{d:"M5 3v4",key:"bklmnn"}],["path",{d:"M19 17v4",key:"iiml17"}],["path",{d:"M3 5h4",key:"nem4j1"}],["path",{d:"M17 19h4",key:"lbex7p"}]]),I="0.4.3",_={version:I},L={VITE_BUILD_TIME:"2025-10-30T03:41:36.581Z",VITE_NODE_VERSION:"v22.14.0"};function m({items:t}){return t.length===0?null:e.jsx("dl",{className:"grid gap-4 sm:grid-cols-2 xl:grid-cols-2",children:t.map(s=>e.jsxs("div",{className:"rounded-2xl border border-slate-200/50 bg-white p-4 shadow-sm shadow-slate-200/30 transition-all duration-200 hover:-translate-y-0.5 hover:border-slate-200/70 hover:shadow-md hover:shadow-slate-200/40 dark:border-slate-700/50 dark:bg-slate-900/80 dark:shadow-lg dark:shadow-slate-900/30 dark:hover:border-slate-600/70",children:[e.jsx("dt",{className:"text-xs font-semibold uppercase tracking-[0.14em] text-slate-500 dark:text-slate-400",children:s.label}),e.jsx("dd",{className:"mt-2 text-base font-semibold text-slate-900 dark:text-slate-100",children:s.value}),s.hint?e.jsx("p",{className:o(i,"mt-2 text-xs leading-relaxed"),children:s.hint}):null]},s.label))})}function P(){const{t}=v(),{pushToast:s}=k(),a=N(["status","gateway"],{url:"/api/status",method:"GET"},{staleTime:6e4});r.useEffect(()=>{a.isError&&a.error&&s({title:t("about.toast.statusError.title"),description:a.error.message,variant:"error"})},[a.isError,a.error,s,t]);const n=_.version,l=r.useMemo(()=>{const u=L,f=u.VITE_BUILD_TIME,g=u.VITE_NODE_VERSION;return{buildTime:f,nodeVersion:g}},[]),h=r.useMemo(()=>[{label:t("about.app.labels.name"),value:e.jsx("span",{className:"font-mono text-sm font-semibold text-slate-900 dark:text-slate-100",children:"cc-gw"})},{label:t("about.app.labels.version"),value:e.jsxs("span",{className:"font-mono text-sm font-semibold text-blue-700 dark:text-blue-200",children:["v",n]})},{label:t("about.app.labels.buildTime"),value:l.buildTime,hint:t("about.app.hint.buildTime")},{label:t("about.app.labels.node"),value:e.jsx("span",{className:"font-mono text-sm text-slate-800 dark:text-slate-200",children:l.nodeVersion})}],[n,l.buildTime,l.nodeVersion,t]),c=r.useMemo(()=>a.data?[{label:t("about.status.labels.host"),value:a.data.host??"127.0.0.1"},{label:t("about.status.labels.port"),value:a.data.port.toLocaleString()},{label:t("about.status.labels.providers"),value:a.data.providers.toLocaleString()},{label:t("about.status.labels.active"),value:(a.data.activeRequests??0).toLocaleString(),hint:t("about.status.hint.active")}]:[],[a.data,t]),x=()=>{s({title:t("about.toast.updatesPlanned"),variant:"info"})};return e.jsxs("div",{className:"space-y-8",children:[e.jsx(w,{icon:e.jsx(y,{className:"h-6 w-6","aria-hidden":"true"}),title:t("about.title"),description:t("about.description"),badge:`v${n}`,actions:e.jsx(b,{variant:"primary",icon:e.jsx(E,{className:"h-4 w-4","aria-hidden":"true"}),onClick:x,children:t("about.support.actions.checkUpdates")})}),e.jsxs("div",{className:"grid gap-6 lg:grid-cols-2",children:[e.jsx(d,{title:t("about.app.title"),description:t("about.app.subtitle"),className:"h-full",contentClassName:"gap-4",children:e.jsx(m,{items:h})}),e.jsx(d,{title:t("about.status.title"),description:t("about.status.subtitle"),className:"h-full",contentClassName:"gap-4",actions:e.jsx(b,{variant:"subtle",size:"sm",icon:e.jsx(T,{className:"h-4 w-4","aria-hidden":"true"}),onClick:()=>a.refetch(),loading:a.isFetching,children:a.isFetching?t("common.actions.refreshing"):t("common.actions.refresh")}),children:a.isLoading?e.jsxs("div",{className:"flex h-36 flex-col items-center justify-center gap-3 text-center",children:[e.jsx("div",{className:"h-10 w-10 animate-spin rounded-full border-[3px] border-blue-500/30 border-t-blue-600 dark:border-blue-400/20 dark:border-t-blue-300"}),e.jsx("p",{className:o(i,"text-sm"),children:t("about.status.loading")})]}):c.length>0?e.jsx(m,{items:c}):e.jsxs("div",{className:"flex h-36 flex-col items-center justify-center gap-2 rounded-2xl border border-dashed border-slate-200/60 bg-white p-6 text-center shadow-inner dark:border-slate-700/60 dark:bg-slate-900/60",children:[e.jsx("p",{className:"text-sm font-semibold text-slate-700 dark:text-slate-200",children:t("about.status.empty")}),e.jsx("p",{className:o(i,"text-xs"),children:t("common.actions.refresh")})]})})]}),e.jsx(d,{title:t("about.support.title"),description:e.jsxs("span",{className:"space-y-1",children:[e.jsx("span",{className:"block text-sm font-semibold text-blue-600 dark:text-blue-300",children:t("about.support.subtitle")}),e.jsx("span",{children:t("about.support.description")})]}),className:"relative overflow-hidden",contentClassName:"gap-6",children:e.jsxs("div",{className:"flex flex-col gap-4 rounded-3xl border border-slate-200/50 bg-white p-6 shadow-lg shadow-slate-200/30 backdrop-blur-md dark:border-slate-700/50 dark:bg-slate-900/80 dark:shadow-slate-900/40",children:[e.jsxs("div",{className:"flex flex-wrap items-start gap-4",children:[e.jsx("div",{className:"grid h-12 w-12 place-items-center rounded-2xl bg-gradient-to-br from-blue-500/20 to-indigo-500/20 text-blue-600 shadow-inner dark:text-blue-200",children:e.jsx(j,{className:"h-6 w-6","aria-hidden":"true"})}),e.jsx("p",{className:o(i,"text-sm leading-6"),children:t("about.support.tip")})]}),e.jsx("code",{className:"inline-flex items-center gap-2 self-start rounded-full border border-blue-200/50 bg-blue-50/80 px-4 py-2 text-xs font-semibold tracking-wide text-blue-700 shadow-sm dark:border-blue-500/30 dark:bg-blue-900/30 dark:text-blue-200",children:"~/.cc-gw/config.json"})]})})]})}export{P as default};
11
+ */const E=p("Sparkles",[["path",{d:"m12 3-1.912 5.813a2 2 0 0 1-1.275 1.275L3 12l5.813 1.912a2 2 0 0 1 1.275 1.275L12 21l1.912-5.813a2 2 0 0 1 1.275-1.275L21 12l-5.813-1.912a2 2 0 0 1-1.275-1.275L12 3Z",key:"17u4zn"}],["path",{d:"M5 3v4",key:"bklmnn"}],["path",{d:"M19 17v4",key:"iiml17"}],["path",{d:"M3 5h4",key:"nem4j1"}],["path",{d:"M17 19h4",key:"lbex7p"}]]),I="0.5.0",_={version:I},L={VITE_BUILD_TIME:"2025-10-31T04:22:15.501Z",VITE_NODE_VERSION:"v22.14.0"};function m({items:t}){return t.length===0?null:e.jsx("dl",{className:"grid gap-4 sm:grid-cols-2 xl:grid-cols-2",children:t.map(s=>e.jsxs("div",{className:"rounded-2xl border border-slate-200/50 bg-white p-4 shadow-sm shadow-slate-200/30 transition-all duration-200 hover:-translate-y-0.5 hover:border-slate-200/70 hover:shadow-md hover:shadow-slate-200/40 dark:border-slate-700/50 dark:bg-slate-900/80 dark:shadow-lg dark:shadow-slate-900/30 dark:hover:border-slate-600/70",children:[e.jsx("dt",{className:"text-xs font-semibold uppercase tracking-[0.14em] text-slate-500 dark:text-slate-400",children:s.label}),e.jsx("dd",{className:"mt-2 text-base font-semibold text-slate-900 dark:text-slate-100",children:s.value}),s.hint?e.jsx("p",{className:o(i,"mt-2 text-xs leading-relaxed"),children:s.hint}):null]},s.label))})}function P(){const{t}=v(),{pushToast:s}=k(),a=N(["status","gateway"],{url:"/api/status",method:"GET"},{staleTime:6e4});r.useEffect(()=>{a.isError&&a.error&&s({title:t("about.toast.statusError.title"),description:a.error.message,variant:"error"})},[a.isError,a.error,s,t]);const n=_.version,l=r.useMemo(()=>{const u=L,f=u.VITE_BUILD_TIME,g=u.VITE_NODE_VERSION;return{buildTime:f,nodeVersion:g}},[]),h=r.useMemo(()=>[{label:t("about.app.labels.name"),value:e.jsx("span",{className:"font-mono text-sm font-semibold text-slate-900 dark:text-slate-100",children:"cc-gw"})},{label:t("about.app.labels.version"),value:e.jsxs("span",{className:"font-mono text-sm font-semibold text-blue-700 dark:text-blue-200",children:["v",n]})},{label:t("about.app.labels.buildTime"),value:l.buildTime,hint:t("about.app.hint.buildTime")},{label:t("about.app.labels.node"),value:e.jsx("span",{className:"font-mono text-sm text-slate-800 dark:text-slate-200",children:l.nodeVersion})}],[n,l.buildTime,l.nodeVersion,t]),c=r.useMemo(()=>a.data?[{label:t("about.status.labels.host"),value:a.data.host??"127.0.0.1"},{label:t("about.status.labels.port"),value:a.data.port.toLocaleString()},{label:t("about.status.labels.providers"),value:a.data.providers.toLocaleString()},{label:t("about.status.labels.active"),value:(a.data.activeRequests??0).toLocaleString(),hint:t("about.status.hint.active")}]:[],[a.data,t]),x=()=>{s({title:t("about.toast.updatesPlanned"),variant:"info"})};return e.jsxs("div",{className:"space-y-8",children:[e.jsx(w,{icon:e.jsx(y,{className:"h-6 w-6","aria-hidden":"true"}),title:t("about.title"),description:t("about.description"),badge:`v${n}`,actions:e.jsx(b,{variant:"primary",icon:e.jsx(E,{className:"h-4 w-4","aria-hidden":"true"}),onClick:x,children:t("about.support.actions.checkUpdates")})}),e.jsxs("div",{className:"grid gap-6 lg:grid-cols-2",children:[e.jsx(d,{title:t("about.app.title"),description:t("about.app.subtitle"),className:"h-full",contentClassName:"gap-4",children:e.jsx(m,{items:h})}),e.jsx(d,{title:t("about.status.title"),description:t("about.status.subtitle"),className:"h-full",contentClassName:"gap-4",actions:e.jsx(b,{variant:"subtle",size:"sm",icon:e.jsx(T,{className:"h-4 w-4","aria-hidden":"true"}),onClick:()=>a.refetch(),loading:a.isFetching,children:a.isFetching?t("common.actions.refreshing"):t("common.actions.refresh")}),children:a.isLoading?e.jsxs("div",{className:"flex h-36 flex-col items-center justify-center gap-3 text-center",children:[e.jsx("div",{className:"h-10 w-10 animate-spin rounded-full border-[3px] border-blue-500/30 border-t-blue-600 dark:border-blue-400/20 dark:border-t-blue-300"}),e.jsx("p",{className:o(i,"text-sm"),children:t("about.status.loading")})]}):c.length>0?e.jsx(m,{items:c}):e.jsxs("div",{className:"flex h-36 flex-col items-center justify-center gap-2 rounded-2xl border border-dashed border-slate-200/60 bg-white p-6 text-center shadow-inner dark:border-slate-700/60 dark:bg-slate-900/60",children:[e.jsx("p",{className:"text-sm font-semibold text-slate-700 dark:text-slate-200",children:t("about.status.empty")}),e.jsx("p",{className:o(i,"text-xs"),children:t("common.actions.refresh")})]})})]}),e.jsx(d,{title:t("about.support.title"),description:e.jsxs("span",{className:"space-y-1",children:[e.jsx("span",{className:"block text-sm font-semibold text-blue-600 dark:text-blue-300",children:t("about.support.subtitle")}),e.jsx("span",{children:t("about.support.description")})]}),className:"relative overflow-hidden",contentClassName:"gap-6",children:e.jsxs("div",{className:"flex flex-col gap-4 rounded-3xl border border-slate-200/50 bg-white p-6 shadow-lg shadow-slate-200/30 backdrop-blur-md dark:border-slate-700/50 dark:bg-slate-900/80 dark:shadow-slate-900/40",children:[e.jsxs("div",{className:"flex flex-wrap items-start gap-4",children:[e.jsx("div",{className:"grid h-12 w-12 place-items-center rounded-2xl bg-gradient-to-br from-blue-500/20 to-indigo-500/20 text-blue-600 shadow-inner dark:text-blue-200",children:e.jsx(j,{className:"h-6 w-6","aria-hidden":"true"})}),e.jsx("p",{className:o(i,"text-sm leading-6"),children:t("about.support.tip")})]}),e.jsx("code",{className:"inline-flex items-center gap-2 self-start rounded-full border border-blue-200/50 bg-blue-50/80 px-4 py-2 text-xs font-semibold tracking-wide text-blue-700 shadow-sm dark:border-blue-500/30 dark:bg-blue-900/30 dark:text-blue-200",children:"~/.cc-gw/config.json"})]})})]})}export{P as default};