@ljoukov/llm 4.0.2 → 4.0.4

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
package/dist/index.d.cts CHANGED
@@ -115,11 +115,11 @@ type LlmToolCallCompletedEvent = {
115
115
  type LlmToolCallStreamEvent = LlmToolCallStartedEvent | LlmToolCallCompletedEvent;
116
116
  type LlmStreamEvent = LlmTextDeltaEvent | LlmUsageEvent | LlmModelEvent | LlmBlockedEvent | LlmToolCallStreamEvent;
117
117
  type LlmProvider = "openai" | "chatgpt" | "gemini" | "fireworks";
118
- declare const LLM_TEXT_MODEL_IDS: readonly ["gpt-5.3-codex", "gpt-5.2", "gpt-5.1-codex-mini", "chatgpt-gpt-5.3-codex", "chatgpt-gpt-5.3-codex-spark", "chatgpt-gpt-5.2", "chatgpt-gpt-5.1-codex-mini", "kimi-k2.5", "glm-5", "minimax-m2.1", "gpt-oss-120b", "gemini-3.1-pro-preview", "gemini-3-flash-preview", "gemini-2.5-pro", "gemini-flash-latest", "gemini-flash-lite-latest"];
118
+ declare const LLM_TEXT_MODEL_IDS: readonly ["gpt-5.4", "gpt-5.3-codex", "gpt-5.2", "gpt-5.1-codex-mini", "chatgpt-gpt-5.4", "chatgpt-gpt-5.4-fast", "chatgpt-gpt-5.3-codex", "chatgpt-gpt-5.3-codex-spark", "chatgpt-gpt-5.2", "chatgpt-gpt-5.1-codex-mini", "kimi-k2.5", "glm-5", "minimax-m2.1", "gpt-oss-120b", "gemini-3.1-pro-preview", "gemini-3-flash-preview", "gemini-2.5-pro", "gemini-flash-latest", "gemini-flash-lite-latest"];
119
119
  type LlmTextModelId = (typeof LLM_TEXT_MODEL_IDS)[number];
120
120
  declare const LLM_IMAGE_MODEL_IDS: readonly ["gemini-3-pro-image-preview", "gemini-3.1-flash-image-preview"];
121
121
  type LlmImageModelId = (typeof LLM_IMAGE_MODEL_IDS)[number];
122
- declare const LLM_MODEL_IDS: readonly ["gpt-5.3-codex", "gpt-5.2", "gpt-5.1-codex-mini", "chatgpt-gpt-5.3-codex", "chatgpt-gpt-5.3-codex-spark", "chatgpt-gpt-5.2", "chatgpt-gpt-5.1-codex-mini", "kimi-k2.5", "glm-5", "minimax-m2.1", "gpt-oss-120b", "gemini-3.1-pro-preview", "gemini-3-flash-preview", "gemini-2.5-pro", "gemini-flash-latest", "gemini-flash-lite-latest", "gemini-3-pro-image-preview", "gemini-3.1-flash-image-preview"];
122
+ declare const LLM_MODEL_IDS: readonly ["gpt-5.4", "gpt-5.3-codex", "gpt-5.2", "gpt-5.1-codex-mini", "chatgpt-gpt-5.4", "chatgpt-gpt-5.4-fast", "chatgpt-gpt-5.3-codex", "chatgpt-gpt-5.3-codex-spark", "chatgpt-gpt-5.2", "chatgpt-gpt-5.1-codex-mini", "kimi-k2.5", "glm-5", "minimax-m2.1", "gpt-oss-120b", "gemini-3.1-pro-preview", "gemini-3-flash-preview", "gemini-2.5-pro", "gemini-flash-latest", "gemini-flash-lite-latest", "gemini-3-pro-image-preview", "gemini-3.1-flash-image-preview"];
123
123
  type LlmModelId = (typeof LLM_MODEL_IDS)[number];
124
124
  declare function isLlmTextModelId(value: string): value is LlmTextModelId;
125
125
  declare function isLlmImageModelId(value: string): value is LlmImageModelId;
@@ -582,6 +582,7 @@ type AgentLogLineSink = {
582
582
  };
583
583
  type AgentLoggingConfig = {
584
584
  readonly workspaceDir?: string;
585
+ readonly callLogsDir?: string;
585
586
  readonly mirrorToConsole?: boolean;
586
587
  readonly sink?: AgentLogLineSink;
587
588
  };
@@ -848,10 +849,10 @@ declare function refreshChatGptOauthToken(refreshToken: string, fallback?: {
848
849
  }): Promise<ChatGptAuthProfile>;
849
850
  declare function getChatGptAuthProfile(): Promise<ChatGptAuthProfile>;
850
851
 
851
- declare const OPENAI_MODEL_IDS: readonly ["gpt-5.3-codex", "gpt-5.2", "gpt-5.1-codex-mini"];
852
+ declare const OPENAI_MODEL_IDS: readonly ["gpt-5.4", "gpt-5.3-codex", "gpt-5.2", "gpt-5.1-codex-mini"];
852
853
  type OpenAiModelId = (typeof OPENAI_MODEL_IDS)[number];
853
854
  declare function isOpenAiModelId(value: string): value is OpenAiModelId;
854
- declare const CHATGPT_MODEL_IDS: readonly ["chatgpt-gpt-5.3-codex", "chatgpt-gpt-5.3-codex-spark", "chatgpt-gpt-5.2", "chatgpt-gpt-5.1-codex-mini"];
855
+ declare const CHATGPT_MODEL_IDS: readonly ["chatgpt-gpt-5.4", "chatgpt-gpt-5.4-fast", "chatgpt-gpt-5.3-codex", "chatgpt-gpt-5.3-codex-spark", "chatgpt-gpt-5.2", "chatgpt-gpt-5.1-codex-mini"];
855
856
  type ChatGptModelId = (typeof CHATGPT_MODEL_IDS)[number];
856
857
  declare function isChatGptModelId(value: string): value is ChatGptModelId;
857
858
 
package/dist/index.d.ts CHANGED
@@ -115,11 +115,11 @@ type LlmToolCallCompletedEvent = {
115
115
  type LlmToolCallStreamEvent = LlmToolCallStartedEvent | LlmToolCallCompletedEvent;
116
116
  type LlmStreamEvent = LlmTextDeltaEvent | LlmUsageEvent | LlmModelEvent | LlmBlockedEvent | LlmToolCallStreamEvent;
117
117
  type LlmProvider = "openai" | "chatgpt" | "gemini" | "fireworks";
118
- declare const LLM_TEXT_MODEL_IDS: readonly ["gpt-5.3-codex", "gpt-5.2", "gpt-5.1-codex-mini", "chatgpt-gpt-5.3-codex", "chatgpt-gpt-5.3-codex-spark", "chatgpt-gpt-5.2", "chatgpt-gpt-5.1-codex-mini", "kimi-k2.5", "glm-5", "minimax-m2.1", "gpt-oss-120b", "gemini-3.1-pro-preview", "gemini-3-flash-preview", "gemini-2.5-pro", "gemini-flash-latest", "gemini-flash-lite-latest"];
118
+ declare const LLM_TEXT_MODEL_IDS: readonly ["gpt-5.4", "gpt-5.3-codex", "gpt-5.2", "gpt-5.1-codex-mini", "chatgpt-gpt-5.4", "chatgpt-gpt-5.4-fast", "chatgpt-gpt-5.3-codex", "chatgpt-gpt-5.3-codex-spark", "chatgpt-gpt-5.2", "chatgpt-gpt-5.1-codex-mini", "kimi-k2.5", "glm-5", "minimax-m2.1", "gpt-oss-120b", "gemini-3.1-pro-preview", "gemini-3-flash-preview", "gemini-2.5-pro", "gemini-flash-latest", "gemini-flash-lite-latest"];
119
119
  type LlmTextModelId = (typeof LLM_TEXT_MODEL_IDS)[number];
120
120
  declare const LLM_IMAGE_MODEL_IDS: readonly ["gemini-3-pro-image-preview", "gemini-3.1-flash-image-preview"];
121
121
  type LlmImageModelId = (typeof LLM_IMAGE_MODEL_IDS)[number];
122
- declare const LLM_MODEL_IDS: readonly ["gpt-5.3-codex", "gpt-5.2", "gpt-5.1-codex-mini", "chatgpt-gpt-5.3-codex", "chatgpt-gpt-5.3-codex-spark", "chatgpt-gpt-5.2", "chatgpt-gpt-5.1-codex-mini", "kimi-k2.5", "glm-5", "minimax-m2.1", "gpt-oss-120b", "gemini-3.1-pro-preview", "gemini-3-flash-preview", "gemini-2.5-pro", "gemini-flash-latest", "gemini-flash-lite-latest", "gemini-3-pro-image-preview", "gemini-3.1-flash-image-preview"];
122
+ declare const LLM_MODEL_IDS: readonly ["gpt-5.4", "gpt-5.3-codex", "gpt-5.2", "gpt-5.1-codex-mini", "chatgpt-gpt-5.4", "chatgpt-gpt-5.4-fast", "chatgpt-gpt-5.3-codex", "chatgpt-gpt-5.3-codex-spark", "chatgpt-gpt-5.2", "chatgpt-gpt-5.1-codex-mini", "kimi-k2.5", "glm-5", "minimax-m2.1", "gpt-oss-120b", "gemini-3.1-pro-preview", "gemini-3-flash-preview", "gemini-2.5-pro", "gemini-flash-latest", "gemini-flash-lite-latest", "gemini-3-pro-image-preview", "gemini-3.1-flash-image-preview"];
123
123
  type LlmModelId = (typeof LLM_MODEL_IDS)[number];
124
124
  declare function isLlmTextModelId(value: string): value is LlmTextModelId;
125
125
  declare function isLlmImageModelId(value: string): value is LlmImageModelId;
@@ -582,6 +582,7 @@ type AgentLogLineSink = {
582
582
  };
583
583
  type AgentLoggingConfig = {
584
584
  readonly workspaceDir?: string;
585
+ readonly callLogsDir?: string;
585
586
  readonly mirrorToConsole?: boolean;
586
587
  readonly sink?: AgentLogLineSink;
587
588
  };
@@ -848,10 +849,10 @@ declare function refreshChatGptOauthToken(refreshToken: string, fallback?: {
848
849
  }): Promise<ChatGptAuthProfile>;
849
850
  declare function getChatGptAuthProfile(): Promise<ChatGptAuthProfile>;
850
851
 
851
- declare const OPENAI_MODEL_IDS: readonly ["gpt-5.3-codex", "gpt-5.2", "gpt-5.1-codex-mini"];
852
+ declare const OPENAI_MODEL_IDS: readonly ["gpt-5.4", "gpt-5.3-codex", "gpt-5.2", "gpt-5.1-codex-mini"];
852
853
  type OpenAiModelId = (typeof OPENAI_MODEL_IDS)[number];
853
854
  declare function isOpenAiModelId(value: string): value is OpenAiModelId;
854
- declare const CHATGPT_MODEL_IDS: readonly ["chatgpt-gpt-5.3-codex", "chatgpt-gpt-5.3-codex-spark", "chatgpt-gpt-5.2", "chatgpt-gpt-5.1-codex-mini"];
855
+ declare const CHATGPT_MODEL_IDS: readonly ["chatgpt-gpt-5.4", "chatgpt-gpt-5.4-fast", "chatgpt-gpt-5.3-codex", "chatgpt-gpt-5.3-codex-spark", "chatgpt-gpt-5.2", "chatgpt-gpt-5.1-codex-mini"];
855
856
  type ChatGptModelId = (typeof CHATGPT_MODEL_IDS)[number];
856
857
  declare function isChatGptModelId(value: string): value is ChatGptModelId;
857
858
 
package/dist/index.js CHANGED
@@ -205,6 +205,16 @@ var OPENAI_GPT_52_PRICING = {
205
205
  cachedRate: 0.175 / 1e6,
206
206
  outputRate: 14 / 1e6
207
207
  };
208
+ var OPENAI_GPT_54_PRICING = {
209
+ inputRate: 2.5 / 1e6,
210
+ cachedRate: 0.25 / 1e6,
211
+ outputRate: 15 / 1e6
212
+ };
213
+ var OPENAI_GPT_54_PRIORITY_PRICING = {
214
+ inputRate: 5 / 1e6,
215
+ cachedRate: 0.5 / 1e6,
216
+ outputRate: 30 / 1e6
217
+ };
208
218
  var OPENAI_GPT_53_CODEX_PRICING = {
209
219
  inputRate: 1.25 / 1e6,
210
220
  cachedRate: 0.125 / 1e6,
@@ -216,6 +226,12 @@ var OPENAI_GPT_5_MINI_PRICING = {
216
226
  outputRate: 2 / 1e6
217
227
  };
218
228
  function getOpenAiPricing(modelId) {
229
+ if (modelId.includes("gpt-5.4-fast")) {
230
+ return OPENAI_GPT_54_PRIORITY_PRICING;
231
+ }
232
+ if (modelId.includes("gpt-5.4")) {
233
+ return OPENAI_GPT_54_PRICING;
234
+ }
219
235
  if (modelId.includes("gpt-5.3-codex-spark")) {
220
236
  return OPENAI_GPT_5_MINI_PRICING;
221
237
  }
@@ -2626,11 +2642,18 @@ async function runOpenAiCall(fn, modelId, runOptions) {
2626
2642
  }
2627
2643
 
2628
2644
  // src/openai/models.ts
2629
- var OPENAI_MODEL_IDS = ["gpt-5.3-codex", "gpt-5.2", "gpt-5.1-codex-mini"];
2645
+ var OPENAI_MODEL_IDS = [
2646
+ "gpt-5.4",
2647
+ "gpt-5.3-codex",
2648
+ "gpt-5.2",
2649
+ "gpt-5.1-codex-mini"
2650
+ ];
2630
2651
  function isOpenAiModelId(value) {
2631
2652
  return OPENAI_MODEL_IDS.includes(value);
2632
2653
  }
2633
2654
  var CHATGPT_MODEL_IDS = [
2655
+ "chatgpt-gpt-5.4",
2656
+ "chatgpt-gpt-5.4-fast",
2634
2657
  "chatgpt-gpt-5.3-codex",
2635
2658
  "chatgpt-gpt-5.3-codex-spark",
2636
2659
  "chatgpt-gpt-5.2",
@@ -2642,6 +2665,17 @@ function isChatGptModelId(value) {
2642
2665
  function stripChatGptPrefix(model) {
2643
2666
  return model.slice("chatgpt-".length);
2644
2667
  }
2668
+ function resolveChatGptProviderModel(model) {
2669
+ switch (model) {
2670
+ case "chatgpt-gpt-5.4-fast":
2671
+ return "gpt-5.4";
2672
+ default:
2673
+ return stripChatGptPrefix(model);
2674
+ }
2675
+ }
2676
+ function resolveChatGptServiceTier(model) {
2677
+ return model === "chatgpt-gpt-5.4-fast" ? "priority" : void 0;
2678
+ }
2645
2679
 
2646
2680
  // src/agentLogging.ts
2647
2681
  import { AsyncLocalStorage } from "async_hooks";
@@ -2823,7 +2857,8 @@ var AgentLoggingSessionImpl = class {
2823
2857
  callCounter = 0;
2824
2858
  constructor(config) {
2825
2859
  this.workspaceDir = path3.resolve(config.workspaceDir ?? process.cwd());
2826
- this.logsRootDir = path3.join(path3.dirname(this.workspaceDir), "logs");
2860
+ const configuredCallLogsDir = typeof config.callLogsDir === "string" ? config.callLogsDir.trim() : "";
2861
+ this.logsRootDir = configuredCallLogsDir.length > 0 ? path3.resolve(this.workspaceDir, configuredCallLogsDir) : path3.join(this.workspaceDir, "llm_calls");
2827
2862
  this.mirrorToConsole = config.mirrorToConsole !== false;
2828
2863
  this.sink = config.sink;
2829
2864
  this.agentLogPath = path3.join(this.workspaceDir, "agent.log");
@@ -3390,7 +3425,11 @@ function convertLlmContentToGeminiContent(content) {
3390
3425
  }
3391
3426
  function resolveProvider(model) {
3392
3427
  if (isChatGptModelId(model)) {
3393
- return { provider: "chatgpt", model: stripChatGptPrefix(model) };
3428
+ return {
3429
+ provider: "chatgpt",
3430
+ model: resolveChatGptProviderModel(model),
3431
+ serviceTier: resolveChatGptServiceTier(model)
3432
+ };
3394
3433
  }
3395
3434
  if (isGeminiTextModelId(model) || isGeminiImageModelId(model)) {
3396
3435
  return { provider: "gemini", model };
@@ -5101,6 +5140,7 @@ async function runTextCall(params) {
5101
5140
  model: modelForProvider,
5102
5141
  store: false,
5103
5142
  stream: true,
5143
+ ...providerInfo.serviceTier ? { service_tier: providerInfo.serviceTier } : {},
5104
5144
  instructions: chatGptInput.instructions ?? "You are a helpful assistant.",
5105
5145
  input: chatGptInput.input,
5106
5146
  include: ["reasoning.encrypted_content"],
@@ -5135,7 +5175,7 @@ async function runTextCall(params) {
5135
5175
  queue.push({ type: "blocked" });
5136
5176
  }
5137
5177
  if (result.model) {
5138
- modelVersion = `chatgpt-${result.model}`;
5178
+ modelVersion = providerInfo.serviceTier ? request.model : `chatgpt-${result.model}`;
5139
5179
  queue.push({ type: "model", modelVersion });
5140
5180
  }
5141
5181
  latestUsage = extractChatGptUsageTokens(result.usage);
@@ -6097,6 +6137,7 @@ async function runToolLoop(request) {
6097
6137
  model: providerInfo.model,
6098
6138
  store: false,
6099
6139
  stream: true,
6140
+ ...providerInfo.serviceTier ? { service_tier: providerInfo.serviceTier } : {},
6100
6141
  instructions: toolLoopInput.instructions ?? "You are a helpful assistant.",
6101
6142
  input,
6102
6143
  prompt_cache_key: promptCacheKey,
@@ -6137,7 +6178,7 @@ async function runToolLoop(request) {
6137
6178
  }
6138
6179
  });
6139
6180
  const modelCompletedAtMs = Date.now();
6140
- modelVersion = response.model ? `chatgpt-${response.model}` : request.model;
6181
+ modelVersion = response.model && !providerInfo.serviceTier ? `chatgpt-${response.model}` : request.model;
6141
6182
  usageTokens = extractChatGptUsageTokens(response.usage);
6142
6183
  const stepCostUsd = estimateCallCostUsd({
6143
6184
  modelId: modelVersion,
@@ -10262,9 +10303,10 @@ function createRootAgentLoggingSession(request) {
10262
10303
  if (!selected) {
10263
10304
  return void 0;
10264
10305
  }
10306
+ const workspaceDir = typeof selected.workspaceDir === "string" && selected.workspaceDir.trim().length > 0 ? path7.resolve(selected.workspaceDir) : resolveWorkspaceDirForLogging(request);
10265
10307
  return createAgentLoggingSession({
10266
10308
  ...selected,
10267
- workspaceDir: typeof selected.workspaceDir === "string" && selected.workspaceDir.trim().length > 0 ? path7.resolve(selected.workspaceDir) : resolveWorkspaceDirForLogging(request),
10309
+ workspaceDir,
10268
10310
  mirrorToConsole: selected.mirrorToConsole !== false
10269
10311
  });
10270
10312
  }