ghc-proxy 0.4.2 → 0.5.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/README.md CHANGED
@@ -96,14 +96,14 @@ See the [Claude Code settings docs](https://docs.anthropic.com/en/docs/claude-co
96
96
  ghc-proxy sits between your tools and the GitHub Copilot API:
97
97
 
98
98
  ```text
99
- ┌─────────────┐ ┌───────────┐ ┌──────────────────────┐
100
- │ Claude Code │──────│ ghc-proxy │──────│ api.githubcopilot.com│
101
- │ Cursor │ │ :4141 │ │
102
- │ Any client │ │ │ │
103
- └─────────────┘ └───────────┘ └──────────────────────┘
104
- OpenAI or Translates GitHub Copilot
105
- Anthropic between API
106
- format formats
99
+ ┌──────────────┐ ┌───────────┐ ┌───────────────────────┐
100
+ │ Claude Code │──────│ ghc-proxy │──────│ api.githubcopilot.com
101
+ │ Cursor │ │ :4141 │ │
102
+ │ Any client │ │ │ │
103
+ └──────────────┘ └───────────┘ └───────────────────────┘
104
+ OpenAI or Translates GitHub Copilot
105
+ Anthropic between API
106
+ format formats
107
107
  ```
108
108
 
109
109
  The proxy authenticates with GitHub using the [device code OAuth flow](https://docs.github.com/en/apps/oauth-apps/building-oauth-apps/authorizing-oauth-apps#device-flow) (the same flow VS Code uses), then exchanges the GitHub token for a short-lived Copilot token that auto-refreshes.
package/dist/main.mjs CHANGED
@@ -5380,7 +5380,12 @@ const configFileSchema = object({
5380
5380
  useFunctionApplyPatch: boolean().optional(),
5381
5381
  responsesApiContextManagementModels: array(string()).optional(),
5382
5382
  modelReasoningEfforts: record(string(), reasoningEffortSchema).optional(),
5383
- contextUpgrade: boolean().optional()
5383
+ modelRewrites: array(object({
5384
+ from: string(),
5385
+ to: string()
5386
+ })).optional(),
5387
+ contextUpgrade: boolean().optional(),
5388
+ contextUpgradeTokenThreshold: number().int().positive().optional()
5384
5389
  }).passthrough();
5385
5390
  const KNOWN_CONFIG_KEYS = new Set(Object.keys(configFileSchema.shape));
5386
5391
  let cachedConfig = {};
@@ -5388,6 +5393,7 @@ const DEFAULT_REASONING_EFFORT = "high";
5388
5393
  const DEFAULT_USE_FUNCTION_APPLY_PATCH = true;
5389
5394
  const DEFAULT_COMPACT_USE_SMALL_MODEL = false;
5390
5395
  const DEFAULT_CONTEXT_UPGRADE = true;
5396
+ const DEFAULT_CONTEXT_UPGRADE_TOKEN_THRESHOLD = 16e4;
5391
5397
  async function readConfig() {
5392
5398
  try {
5393
5399
  const content = await fs.readFile(PATHS.CONFIG_PATH, "utf8");
@@ -5439,6 +5445,9 @@ function isResponsesApiContextManagementModel(model) {
5439
5445
  function shouldContextUpgrade() {
5440
5446
  return cachedConfig.contextUpgrade ?? DEFAULT_CONTEXT_UPGRADE;
5441
5447
  }
5448
+ function getContextUpgradeTokenThreshold() {
5449
+ return cachedConfig.contextUpgradeTokenThreshold ?? DEFAULT_CONTEXT_UPGRADE_TOKEN_THRESHOLD;
5450
+ }
5442
5451
  function getReasoningEffortForModel(model) {
5443
5452
  return cachedConfig.modelReasoningEfforts?.[model] ?? DEFAULT_REASONING_EFFORT;
5444
5453
  }
@@ -6216,7 +6225,7 @@ const checkUsage = defineCommand({
6216
6225
 
6217
6226
  //#endregion
6218
6227
  //#region src/lib/version.ts
6219
- const VERSION = "0.4.2";
6228
+ const VERSION = "0.5.0";
6220
6229
 
6221
6230
  //#endregion
6222
6231
  //#region src/debug.ts
@@ -46561,12 +46570,20 @@ function colorizeMethod(method) {
46561
46570
  }
46562
46571
  function formatModelMapping(info) {
46563
46572
  if (!info) return "";
46564
- const { originalModel, mappedModel } = info;
46565
- if (!originalModel && !mappedModel) return "";
46566
- const original = originalModel ?? "-";
46567
- const mapped = mappedModel ?? "-";
46568
- if (original === mapped) return ` ${colorize("dim", "model=")}${colorize("blueBright", original)}`;
46569
- return ` ${colorize("dim", "model=")}${colorize("blueBright", original)} ${colorize("dim", "→")} ${colorize("greenBright", mapped)}`;
46573
+ const { originalModel, rewrittenModel, mappedModel } = info;
46574
+ if (!originalModel && !rewrittenModel && !mappedModel) return "";
46575
+ const parts = [];
46576
+ const displayOriginal = originalModel ?? "-";
46577
+ parts.push(colorize("blueBright", displayOriginal));
46578
+ if (rewrittenModel && rewrittenModel !== displayOriginal) {
46579
+ parts.push(colorize("dim", "~>"));
46580
+ parts.push(colorize("cyanBright", rewrittenModel));
46581
+ }
46582
+ if (mappedModel && mappedModel !== (rewrittenModel ?? displayOriginal)) {
46583
+ parts.push(colorize("dim", "→"));
46584
+ parts.push(colorize("greenBright", mappedModel));
46585
+ }
46586
+ return ` ${colorize("dim", "model=")}${parts.join(" ")}`;
46570
46587
  }
46571
46588
  /**
46572
46589
  * Request logging function.
@@ -47208,10 +47225,10 @@ var AnthropicStreamTranslator = class {
47208
47225
  }
47209
47226
  onChunk(chunk) {
47210
47227
  const deltas = this.toConversationDeltas(chunk);
47228
+ if (chunk.usage) this.state.lastUsage = chunk.usage;
47211
47229
  if (deltas.length === 0) return [];
47212
47230
  const events = [];
47213
47231
  this.appendMessageStart(events, chunk);
47214
- this.state.lastUsage = chunk.usage;
47215
47232
  for (const delta of deltas) switch (delta.kind) {
47216
47233
  case "message_start": break;
47217
47234
  case "thinking_delta":
@@ -47244,8 +47261,7 @@ var AnthropicStreamTranslator = class {
47244
47261
  ...delta.metadata
47245
47262
  };
47246
47263
  this.state.pendingStopReason = delta.stopReason;
47247
- if (delta.usage) this.state.lastUsage = delta.usage;
47248
- events.push(...this.onDone());
47264
+ this.closeAllBlocks(events);
47249
47265
  break;
47250
47266
  }
47251
47267
  return events;
@@ -47253,9 +47269,7 @@ var AnthropicStreamTranslator = class {
47253
47269
  onDone() {
47254
47270
  if (!this.state.messageStartSent || this.state.messageStopSent) return [];
47255
47271
  const events = [];
47256
- this.thinkingWriter.close(events);
47257
- this.textWriter.close(events);
47258
- this.toolWriter.closeAll(events);
47272
+ this.closeAllBlocks(events);
47259
47273
  events.push({
47260
47274
  type: "message_delta",
47261
47275
  delta: {
@@ -47276,6 +47290,11 @@ var AnthropicStreamTranslator = class {
47276
47290
  }
47277
47291
  }];
47278
47292
  }
47293
+ closeAllBlocks(events) {
47294
+ this.thinkingWriter.close(events);
47295
+ this.textWriter.close(events);
47296
+ this.toolWriter.closeAll(events);
47297
+ }
47279
47298
  appendMessageStart(events, chunk) {
47280
47299
  if (this.state.messageStartSent) return;
47281
47300
  events.push({
@@ -47940,6 +47959,106 @@ function modelSupportsOutputConfig(model) {
47940
47959
  return !MODELS_REJECTING_OUTPUT_CONFIG.has(model.id);
47941
47960
  }
47942
47961
 
47962
+ //#endregion
47963
+ //#region src/lib/model-rewrite.ts
47964
+ /**
47965
+ * Unified model rewrite: user rules → built-in normalization → pass-through.
47966
+ * Call once at handler entry, before any model lookup or policy.
47967
+ */
47968
+ function rewriteModel(modelId) {
47969
+ const userRules = getCachedConfig().modelRewrites;
47970
+ if (userRules) {
47971
+ for (const rule of userRules) if (matchesGlob(rule.from, modelId)) return {
47972
+ originalModel: modelId,
47973
+ model: normalizeToKnownModel(rule.to) ?? rule.to
47974
+ };
47975
+ }
47976
+ const normalized = normalizeToKnownModel(modelId);
47977
+ if (normalized && normalized !== modelId) return {
47978
+ originalModel: modelId,
47979
+ model: normalized
47980
+ };
47981
+ return {
47982
+ originalModel: modelId,
47983
+ model: modelId
47984
+ };
47985
+ }
47986
+ /**
47987
+ * Apply model rewrite to a mutable model field and log if changed.
47988
+ * Returns the rewrite result for downstream use.
47989
+ */
47990
+ function applyModelRewrite(payload) {
47991
+ const result = rewriteModel(payload.model);
47992
+ if (result.model !== result.originalModel) {
47993
+ consola.debug(`Model rewritten: ${result.originalModel} ~> ${result.model}`);
47994
+ payload.model = result.model;
47995
+ }
47996
+ return result;
47997
+ }
47998
+ const DOT_RE = /\./g;
47999
+ /**
48000
+ * Resolve a model ID against Copilot's cached model list using
48001
+ * dash/dot equivalence. Returns the canonical ID if found.
48002
+ */
48003
+ function normalizeToKnownModel(modelId) {
48004
+ const models = state.cache.models?.data;
48005
+ if (!models) return void 0;
48006
+ if (models.some((m) => m.id === modelId)) return modelId;
48007
+ const normalized = modelId.replace(DOT_RE, "-");
48008
+ for (const model of models) if (model.id.replace(DOT_RE, "-") === normalized) return model.id;
48009
+ }
48010
+ const GLOB_SPECIAL_RE = /[.+^${}()|[\]\\]/g;
48011
+ const GLOB_STAR_RE = /\*/g;
48012
+ function matchesGlob(pattern, value) {
48013
+ if (!pattern.includes("*")) return pattern === value;
48014
+ return new RegExp(`^${pattern.replace(GLOB_SPECIAL_RE, "\\$&").replace(GLOB_STAR_RE, ".*")}$`).test(value);
48015
+ }
48016
+ /** Data-driven upgrade rules. Add new entries to extend. */
48017
+ const CONTEXT_UPGRADE_RULES = [{
48018
+ from: "claude-opus-4.6",
48019
+ to: "claude-opus-4.6-1m"
48020
+ }];
48021
+ /** Pre-computed set for fast model eligibility checks (avoids token estimation on non-eligible models). */
48022
+ const UPGRADE_ELIGIBLE_MODELS = new Set(CONTEXT_UPGRADE_RULES.map((r) => r.from));
48023
+ /**
48024
+ * Quick check: does this model have any context-upgrade rules?
48025
+ * Use to skip expensive token estimation for ineligible models.
48026
+ */
48027
+ function hasContextUpgradeRule(model) {
48028
+ return UPGRADE_ELIGIBLE_MODELS.has(model);
48029
+ }
48030
+ /** Find the upgrade rule for a model whose target exists in Copilot's model list. */
48031
+ function findUpgradeRule(model) {
48032
+ for (const rule of CONTEXT_UPGRADE_RULES) if (model === rule.from && findModelById(rule.to)) return rule;
48033
+ }
48034
+ /**
48035
+ * Proactive: resolve the upgrade target model for a given model + token count.
48036
+ * Returns the target model ID, or undefined if no upgrade applies.
48037
+ */
48038
+ function resolveContextUpgrade(model, estimatedTokens) {
48039
+ const rule = findUpgradeRule(model);
48040
+ if (rule && estimatedTokens > getContextUpgradeTokenThreshold()) return rule.to;
48041
+ }
48042
+ /**
48043
+ * Reactive: get the upgrade target for a model on context-length error.
48044
+ * Returns the target model ID, or undefined if no fallback applies.
48045
+ */
48046
+ function getContextUpgradeTarget(model) {
48047
+ return findUpgradeRule(model)?.to;
48048
+ }
48049
+ /** Context-length error detection with pattern matching */
48050
+ const CONTEXT_ERROR_PATTERNS = [
48051
+ /context.length/i,
48052
+ /too.long/i,
48053
+ /token.*(limit|maximum|exceed)/i,
48054
+ /(limit|maximum|exceed).*token/i
48055
+ ];
48056
+ function isContextLengthError(error) {
48057
+ if (!(error instanceof HTTPError) || error.status !== 400) return false;
48058
+ const message = error.body?.error?.message;
48059
+ return message ? CONTEXT_ERROR_PATTERNS.some((pattern) => pattern.test(message)) : false;
48060
+ }
48061
+
47943
48062
  //#endregion
47944
48063
  //#region src/lib/tokenizer.ts
47945
48064
  const ENCODING_MAP = {
@@ -48740,7 +48859,8 @@ async function handleCompletionCore({ body, signal, headers }) {
48740
48859
  const adapter = new OpenAIChatAdapter();
48741
48860
  let payload = parseOpenAIChatPayload(body);
48742
48861
  consola.debug("Request payload:", JSON.stringify(payload).slice(-400));
48743
- const originalModel = payload.model;
48862
+ const rewrite = applyModelRewrite(payload);
48863
+ const originalModel = rewrite.originalModel;
48744
48864
  const selectedModel = findModelById(payload.model);
48745
48865
  try {
48746
48866
  if (selectedModel) {
@@ -48761,6 +48881,7 @@ async function handleCompletionCore({ body, signal, headers }) {
48761
48881
  const plan = adapter.toCapiPlan(payload, { requestContext: readCapiRequestContext(headers) });
48762
48882
  const modelMapping = {
48763
48883
  originalModel,
48884
+ rewrittenModel: rewrite.model,
48764
48885
  mappedModel: plan.resolvedModel
48765
48886
  };
48766
48887
  const transport = new CopilotTransport(createCopilotClient());
@@ -48860,60 +48981,15 @@ async function handleCountTokensCore({ body, headers }) {
48860
48981
  return { input_tokens: finalTokenCount };
48861
48982
  }
48862
48983
 
48863
- //#endregion
48864
- //#region src/lib/context-upgrade.ts
48865
- /** Data-driven upgrade rules. Add new entries to extend. */
48866
- const CONTEXT_UPGRADE_RULES = [{
48867
- from: "claude-opus-4.6",
48868
- to: "claude-opus-4.6-1m",
48869
- tokenThreshold: 19e4
48870
- }];
48871
- /** Pre-computed set for fast model eligibility checks (avoids token estimation on non-eligible models). */
48872
- const UPGRADE_ELIGIBLE_MODELS = new Set(CONTEXT_UPGRADE_RULES.map((r) => r.from));
48873
- /**
48874
- * Quick check: does this model have any context-upgrade rules?
48875
- * Use to skip expensive token estimation for ineligible models.
48876
- */
48877
- function hasContextUpgradeRule(model) {
48878
- return UPGRADE_ELIGIBLE_MODELS.has(model);
48879
- }
48880
- /** Find the upgrade rule for a model whose target exists in Copilot's model list. */
48881
- function findUpgradeRule(model) {
48882
- for (const rule of CONTEXT_UPGRADE_RULES) if (model === rule.from && findModelById(rule.to)) return rule;
48883
- }
48884
- /**
48885
- * Proactive: resolve the upgrade target model for a given model + token count.
48886
- * Returns the target model ID, or undefined if no upgrade applies.
48887
- */
48888
- function resolveContextUpgrade(model, estimatedTokens) {
48889
- const rule = findUpgradeRule(model);
48890
- if (rule && estimatedTokens > rule.tokenThreshold) return rule.to;
48891
- }
48892
- /**
48893
- * Reactive: get the upgrade target for a model on context-length error.
48894
- * Returns the target model ID, or undefined if no fallback applies.
48895
- */
48896
- function getContextUpgradeTarget(model) {
48897
- return findUpgradeRule(model)?.to;
48898
- }
48899
- /** Context-length error detection with pattern matching */
48900
- const CONTEXT_ERROR_PATTERNS = [
48901
- /context.length/i,
48902
- /too.long/i,
48903
- /token.*(limit|maximum|exceed)/i,
48904
- /(limit|maximum|exceed).*token/i
48905
- ];
48906
- function isContextLengthError(error) {
48907
- if (!(error instanceof HTTPError) || error.status !== 400) return false;
48908
- const message = error.body?.error?.message;
48909
- return message ? CONTEXT_ERROR_PATTERNS.some((pattern) => pattern.test(message)) : false;
48910
- }
48911
-
48912
48984
  //#endregion
48913
48985
  //#region src/lib/request-model-policy.ts
48914
48986
  const COMPACT_SYSTEM_PROMPT_START = "You are a helpful AI assistant tasked with summarizing conversations";
48915
- function applyMessagesModelPolicy(payload) {
48987
+ function applyMessagesModelPolicy(payload, options) {
48916
48988
  const originalModel = payload.model;
48989
+ if (options?.betaUpgraded) return {
48990
+ originalModel,
48991
+ routedModel: originalModel
48992
+ };
48917
48993
  if (shouldContextUpgrade() && hasContextUpgradeRule(payload.model)) {
48918
48994
  const contextUpgradeTarget = resolveContextUpgrade(payload.model, estimateAnthropicInputTokens(payload));
48919
48995
  if (contextUpgradeTarget) {
@@ -49396,6 +49472,13 @@ function createMessagesViaChatCompletionsStrategy(transport, adapter, plan, sign
49396
49472
  data: JSON.stringify(event)
49397
49473
  }));
49398
49474
  },
49475
+ onStreamDone() {
49476
+ if (!streamTranslator) return null;
49477
+ return streamTranslator.onDone().map((event) => ({
49478
+ event: event.type,
49479
+ data: JSON.stringify(event)
49480
+ }));
49481
+ },
49399
49482
  shouldBreakStream() {
49400
49483
  return done;
49401
49484
  },
@@ -50048,7 +50131,10 @@ function createMessagesViaResponsesStrategy(copilotClient, responsesPayload, opt
50048
50131
  //#endregion
50049
50132
  //#region src/routes/messages/strategy-registry.ts
50050
50133
  function selectStrategy(registry, model) {
50051
- for (const entry of registry) if (entry.canHandle(model)) return entry;
50134
+ for (const entry of registry) if (entry.canHandle(model)) {
50135
+ consola.debug(`Strategy selected: ${entry.name} for model: ${model?.id ?? "(unknown)"}`);
50136
+ return entry;
50137
+ }
50052
50138
  return registry.at(-1);
50053
50139
  }
50054
50140
  function filterThinkingBlocksForNativeMessages(anthropicPayload) {
@@ -50142,6 +50228,30 @@ const defaultStrategyRegistry = [
50142
50228
 
50143
50229
  //#endregion
50144
50230
  //#region src/routes/messages/handler.ts
50231
+ const CONTEXT_BETA_RE = /^context-\d+[km]-/;
50232
+ function processAnthropicBetaHeader(rawHeader, model) {
50233
+ if (!rawHeader) return {
50234
+ header: void 0,
50235
+ upgradeTarget: void 0
50236
+ };
50237
+ const values = rawHeader.split(",").map((v) => v.trim()).filter(Boolean);
50238
+ let upgradeTarget;
50239
+ const filtered = [];
50240
+ for (const value of values) {
50241
+ if (CONTEXT_BETA_RE.test(value)) {
50242
+ if (!upgradeTarget && shouldContextUpgrade()) {
50243
+ const target = getContextUpgradeTarget(model);
50244
+ if (target) upgradeTarget = target;
50245
+ }
50246
+ continue;
50247
+ }
50248
+ filtered.push(value);
50249
+ }
50250
+ return {
50251
+ header: filtered.length > 0 ? filtered.join(",") : void 0,
50252
+ upgradeTarget
50253
+ };
50254
+ }
50145
50255
  /**
50146
50256
  * Core handler for Anthropic messages endpoint.
50147
50257
  * Returns both the execution result and model mapping info.
@@ -50149,10 +50259,17 @@ const defaultStrategyRegistry = [
50149
50259
  async function handleMessagesCore({ body, signal, headers }) {
50150
50260
  const anthropicPayload = parseAnthropicMessagesPayload(body);
50151
50261
  if (consola.level >= 4) consola.debug("Anthropic request payload:", JSON.stringify(anthropicPayload));
50152
- const anthropicBetaHeader = headers.get("anthropic-beta") ?? void 0;
50153
- const modelRouting = applyMessagesModelPolicy(anthropicPayload);
50262
+ const rewrite = applyModelRewrite(anthropicPayload);
50263
+ const betaResult = processAnthropicBetaHeader(headers.get("anthropic-beta"), anthropicPayload.model);
50264
+ if (betaResult.upgradeTarget) {
50265
+ consola.debug(`Beta header context upgrade: ${anthropicPayload.model} → ${betaResult.upgradeTarget}`);
50266
+ anthropicPayload.model = betaResult.upgradeTarget;
50267
+ }
50268
+ const anthropicBetaHeader = betaResult.header;
50269
+ const modelRouting = applyMessagesModelPolicy(anthropicPayload, { betaUpgraded: !!betaResult.upgradeTarget });
50154
50270
  const modelMapping = {
50155
- originalModel: modelRouting.originalModel,
50271
+ originalModel: rewrite.originalModel,
50272
+ rewrittenModel: rewrite.model,
50156
50273
  mappedModel: modelRouting.routedModel
50157
50274
  };
50158
50275
  if (modelRouting.reason) consola.debug(`Routed anthropic request via ${modelRouting.reason}:`, `${modelRouting.originalModel} -> ${modelRouting.routedModel}`);
@@ -50186,7 +50303,8 @@ async function handleMessagesCore({ body, signal, headers }) {
50186
50303
  selectedModel: retryModel,
50187
50304
  upstreamSignal: retrySignal,
50188
50305
  modelMapping: {
50189
- originalModel: modelRouting.originalModel,
50306
+ originalModel: rewrite.originalModel,
50307
+ rewrittenModel: rewrite.model,
50190
50308
  mappedModel: upgradeTarget
50191
50309
  }
50192
50310
  });
@@ -50305,6 +50423,7 @@ const HTTP_URL_RE = /^https?:\/\//i;
50305
50423
  */
50306
50424
  async function handleResponsesCore({ body, signal, headers }) {
50307
50425
  const payload = parseResponsesPayload(body);
50426
+ const rewrite = applyModelRewrite(payload);
50308
50427
  applyResponsesToolTransforms(payload);
50309
50428
  applyResponsesInputPolicies(payload);
50310
50429
  compactInputByLatestCompaction(payload);
@@ -50314,12 +50433,19 @@ async function handleResponsesCore({ body, signal, headers }) {
50314
50433
  applyContextManagement(payload, selectedModel.capabilities.limits.max_prompt_tokens);
50315
50434
  const { vision, initiator } = getResponsesRequestOptions(payload);
50316
50435
  const upstreamSignal = createUpstreamSignalFromConfig(signal);
50317
- return runStrategy(createResponsesPassthroughStrategy(createCopilotClient(), payload, {
50318
- vision,
50319
- initiator,
50320
- requestContext: readCapiRequestContext(headers),
50321
- signal: upstreamSignal.signal
50322
- }), upstreamSignal);
50436
+ return {
50437
+ result: await runStrategy(createResponsesPassthroughStrategy(createCopilotClient(), payload, {
50438
+ vision,
50439
+ initiator,
50440
+ requestContext: readCapiRequestContext(headers),
50441
+ signal: upstreamSignal.signal
50442
+ }), upstreamSignal),
50443
+ modelMapping: {
50444
+ originalModel: rewrite.originalModel,
50445
+ rewrittenModel: rewrite.model,
50446
+ mappedModel: payload.model
50447
+ }
50448
+ };
50323
50449
  }
50324
50450
  function applyResponsesToolTransforms(payload) {
50325
50451
  applyFunctionApplyPatch(payload);
@@ -50455,11 +50581,12 @@ function parseBooleanParam(value) {
50455
50581
  //#region src/routes/responses/route.ts
50456
50582
  function createResponsesRoutes() {
50457
50583
  return new Elysia().use(requestGuardPlugin).post("/responses", async function* ({ body, request }) {
50458
- const result = await handleResponsesCore({
50584
+ const { result, modelMapping } = await handleResponsesCore({
50459
50585
  body,
50460
50586
  signal: request.signal,
50461
50587
  headers: request.headers
50462
50588
  });
50589
+ if (modelMapping) setRequestModelMapping(request, modelMapping);
50463
50590
  if (result.kind === "json") return result.data;
50464
50591
  yield* sseAdapter(result.generator);
50465
50592
  }, { guarded: true }).post("/responses/input_tokens", async ({ body, request }) => {