@letta-ai/letta-code 0.17.0 → 0.17.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (2) hide show
  1. package/letta.js +787 -389
  2. package/package.json +1 -1
package/letta.js CHANGED
@@ -3222,7 +3222,7 @@ var package_default;
3222
3222
  var init_package = __esm(() => {
3223
3223
  package_default = {
3224
3224
  name: "@letta-ai/letta-code",
3225
- version: "0.17.0",
3225
+ version: "0.17.1",
3226
3226
  description: "Letta Code is a CLI tool for interacting with stateful Letta agents from the terminal.",
3227
3227
  type: "module",
3228
3228
  bin: {
@@ -7319,14 +7319,14 @@ var init_models2 = __esm(() => {
7319
7319
  {
7320
7320
  id: "auto",
7321
7321
  handle: "letta/auto",
7322
- label: "Auto",
7322
+ label: "Auto (Beta)",
7323
7323
  description: "Automatically select the best model",
7324
7324
  isFeatured: true
7325
7325
  },
7326
7326
  {
7327
7327
  id: "auto-fast",
7328
7328
  handle: "letta/auto-fast",
7329
- label: "Auto Fast",
7329
+ label: "Auto Fast (Beta)",
7330
7330
  description: "Automatically select the best fast model",
7331
7331
  isFeatured: true
7332
7332
  },
@@ -7908,6 +7908,72 @@ var init_models2 = __esm(() => {
7908
7908
  parallel_tool_calls: true
7909
7909
  }
7910
7910
  },
7911
+ {
7912
+ id: "gpt-5.4-none",
7913
+ handle: "openai/gpt-5.4",
7914
+ label: "GPT-5.4",
7915
+ description: "OpenAI's most capable model (no reasoning)",
7916
+ updateArgs: {
7917
+ reasoning_effort: "none",
7918
+ verbosity: "medium",
7919
+ context_window: 272000,
7920
+ max_output_tokens: 128000,
7921
+ parallel_tool_calls: true
7922
+ }
7923
+ },
7924
+ {
7925
+ id: "gpt-5.4-low",
7926
+ handle: "openai/gpt-5.4",
7927
+ label: "GPT-5.4",
7928
+ description: "OpenAI's most capable model (low reasoning)",
7929
+ updateArgs: {
7930
+ reasoning_effort: "low",
7931
+ verbosity: "medium",
7932
+ context_window: 272000,
7933
+ max_output_tokens: 128000,
7934
+ parallel_tool_calls: true
7935
+ }
7936
+ },
7937
+ {
7938
+ id: "gpt-5.4-medium",
7939
+ handle: "openai/gpt-5.4",
7940
+ label: "GPT-5.4",
7941
+ description: "OpenAI's most capable model (med reasoning)",
7942
+ updateArgs: {
7943
+ reasoning_effort: "medium",
7944
+ verbosity: "medium",
7945
+ context_window: 272000,
7946
+ max_output_tokens: 128000,
7947
+ parallel_tool_calls: true
7948
+ }
7949
+ },
7950
+ {
7951
+ id: "gpt-5.4-high",
7952
+ handle: "openai/gpt-5.4",
7953
+ label: "GPT-5.4",
7954
+ description: "OpenAI's most capable model (high reasoning)",
7955
+ isFeatured: true,
7956
+ updateArgs: {
7957
+ reasoning_effort: "high",
7958
+ verbosity: "medium",
7959
+ context_window: 272000,
7960
+ max_output_tokens: 128000,
7961
+ parallel_tool_calls: true
7962
+ }
7963
+ },
7964
+ {
7965
+ id: "gpt-5.4-xhigh",
7966
+ handle: "openai/gpt-5.4",
7967
+ label: "GPT-5.4",
7968
+ description: "OpenAI's most capable model (max reasoning)",
7969
+ updateArgs: {
7970
+ reasoning_effort: "xhigh",
7971
+ verbosity: "medium",
7972
+ context_window: 272000,
7973
+ max_output_tokens: 128000,
7974
+ parallel_tool_calls: true
7975
+ }
7976
+ },
7911
7977
  {
7912
7978
  id: "gpt-5.3-codex-none",
7913
7979
  handle: "openai/gpt-5.3-codex",
@@ -8556,7 +8622,7 @@ function getDefaultModel() {
8556
8622
  }
8557
8623
  function getDefaultModelForTier(billingTier) {
8558
8624
  if (billingTier?.toLowerCase() === "free") {
8559
- const freeDefault = models.find((m) => m.id === "minimax-m2.5");
8625
+ const freeDefault = models.find((m) => m.id === "glm-5");
8560
8626
  if (freeDefault)
8561
8627
  return freeDefault.handle;
8562
8628
  }
@@ -34859,7 +34925,7 @@ function getDefaultModel2() {
34859
34925
  }
34860
34926
  function getDefaultModelForTier2(billingTier) {
34861
34927
  if (billingTier?.toLowerCase() === "free") {
34862
- const freeDefault = models2.find((m) => m.id === "minimax-m2.5");
34928
+ const freeDefault = models2.find((m) => m.id === "glm-5");
34863
34929
  if (freeDefault)
34864
34930
  return freeDefault.handle;
34865
34931
  }
@@ -60378,6 +60444,15 @@ async function getPrimaryAgentModelHandle() {
60378
60444
  return null;
60379
60445
  }
60380
60446
  }
60447
+ async function getCurrentBillingTier() {
60448
+ try {
60449
+ const client = await getClient2();
60450
+ const balance = await client.get("/v1/metadata/balance");
60451
+ return balance.billing_tier ?? null;
60452
+ } catch {
60453
+ return null;
60454
+ }
60455
+ }
60381
60456
  function isProviderNotSupportedError(errorOutput) {
60382
60457
  return errorOutput.includes("Provider") && errorOutput.includes("is not supported") && errorOutput.includes("supported providers:");
60383
60458
  }
@@ -60401,13 +60476,16 @@ function swapProviderPrefix(parentHandle, recommendedHandle) {
60401
60476
  return `${parentProvider}/${modelPortion}`;
60402
60477
  }
60403
60478
  async function resolveSubagentModel(options) {
60404
- const { userModel, recommendedModel, parentModelHandle } = options;
60479
+ const { userModel, recommendedModel, parentModelHandle, billingTier } = options;
60405
60480
  if (userModel)
60406
60481
  return userModel;
60407
60482
  let recommendedHandle = null;
60408
60483
  if (recommendedModel && recommendedModel !== "inherit") {
60409
60484
  recommendedHandle = resolveModel2(recommendedModel);
60410
60485
  }
60486
+ if (recommendedModel !== "inherit" && billingTier?.toLowerCase() === "free") {
60487
+ recommendedHandle = getDefaultModelForTier2(billingTier);
60488
+ }
60411
60489
  let availableHandles = options.availableHandles ?? null;
60412
60490
  const isAvailable = async (handle) => {
60413
60491
  try {
@@ -60810,10 +60888,12 @@ async function spawnSubagent(type, prompt, userModel, subagentId, signal, existi
60810
60888
  }
60811
60889
  const isDeployingExisting = Boolean(existingAgentId || existingConversationId);
60812
60890
  const parentModelHandle = await getPrimaryAgentModelHandle();
60891
+ const billingTier = await getCurrentBillingTier();
60813
60892
  const model = isDeployingExisting ? null : await resolveSubagentModel({
60814
60893
  userModel,
60815
60894
  recommendedModel: config.recommendedModel,
60816
- parentModelHandle
60895
+ parentModelHandle,
60896
+ billingTier
60817
60897
  });
60818
60898
  const baseURL = getBaseURL();
60819
60899
  let finalPrompt = prompt;
@@ -65791,7 +65871,7 @@ async function loadSpecificTools(toolNames) {
65791
65871
  releaseSwitchLock();
65792
65872
  }
65793
65873
  }
65794
- async function loadTools(modelIdentifier) {
65874
+ async function loadTools(modelIdentifier, options) {
65795
65875
  acquireSwitchLock();
65796
65876
  try {
65797
65877
  const { toolFilter: toolFilter2 } = await Promise.resolve().then(() => (init_filter(), exports_filter));
@@ -65812,6 +65892,10 @@ async function loadTools(modelIdentifier) {
65812
65892
  } else {
65813
65893
  baseToolNames = TOOL_NAMES;
65814
65894
  }
65895
+ if (options?.exclude && options.exclude.length > 0) {
65896
+ const excludeSet = new Set(options.exclude);
65897
+ baseToolNames = baseToolNames.filter((name) => !excludeSet.has(name));
65898
+ }
65815
65899
  const newRegistry = new Map;
65816
65900
  for (const name of baseToolNames) {
65817
65901
  if (!toolFilter2.isEnabled(name)) {
@@ -66593,341 +66677,6 @@ var init_approval_execution = __esm(async () => {
66593
66677
  ]);
66594
66678
  });
66595
66679
 
66596
- // src/agent/check-approval.ts
66597
- var exports_check_approval = {};
66598
- __export(exports_check_approval, {
66599
- prepareMessageHistory: () => prepareMessageHistory2,
66600
- getResumeData: () => getResumeData2,
66601
- extractApprovals: () => extractApprovals2
66602
- });
66603
- function isPrimaryMessageType2(messageType) {
66604
- return messageType === "user_message" || messageType === "assistant_message" || messageType === "event_message" || messageType === "summary_message";
66605
- }
66606
- function isAnchorMessageType2(messageType) {
66607
- return messageType === "user_message" || messageType === "assistant_message";
66608
- }
66609
- function isBackfillEnabled2() {
66610
- const val = process.env.LETTA_BACKFILL;
66611
- return val !== "0" && val !== "false";
66612
- }
66613
- function extractApprovals2(messageToCheck) {
66614
- const approvalMsg = messageToCheck;
66615
- const toolCalls = Array.isArray(approvalMsg.tool_calls) ? approvalMsg.tool_calls : approvalMsg.tool_call ? [approvalMsg.tool_call] : [];
66616
- const pendingApprovals = toolCalls.filter((tc) => !!tc && !!tc.tool_call_id).map((tc) => ({
66617
- toolCallId: tc.tool_call_id,
66618
- toolName: tc.name || "",
66619
- toolArgs: tc.arguments || ""
66620
- }));
66621
- const pendingApproval = pendingApprovals[0] || null;
66622
- if (pendingApprovals.length > 0) {
66623
- debugWarn("check-approval", `Found ${pendingApprovals.length} pending approval(s): ${pendingApprovals.map((a) => a.toolName).join(", ")}`);
66624
- }
66625
- return { pendingApproval, pendingApprovals };
66626
- }
66627
- function prepareMessageHistory2(messages, opts) {
66628
- const isRenderable = (msg) => {
66629
- const t = msg.message_type;
66630
- if (t === "user_message" || t === "assistant_message" || t === "reasoning_message" || t === "tool_call_message" || t === "tool_return_message" || t === "approval_request_message" || t === "approval_response_message") {
66631
- return true;
66632
- }
66633
- const ts = t;
66634
- return ts === "event_message" || ts === "summary_message";
66635
- };
66636
- const renderable = messages.filter(isRenderable);
66637
- if (opts?.primaryOnly) {
66638
- const convo = renderable.filter((m) => isPrimaryMessageType2(m.message_type));
66639
- let trimmed = convo.slice(-BACKFILL_PRIMARY_MESSAGE_LIMIT2);
66640
- const hasAssistant = trimmed.some((m) => m.message_type === "assistant_message");
66641
- if (!hasAssistant) {
66642
- const lastAssistantIndex = convo.map((m) => m.message_type).lastIndexOf("assistant_message");
66643
- if (lastAssistantIndex >= 0) {
66644
- const lastAssistant = convo[lastAssistantIndex];
66645
- if (lastAssistant) {
66646
- const tailLimit = Math.max(BACKFILL_PRIMARY_MESSAGE_LIMIT2 - 1, 0);
66647
- const newestTail = tailLimit > 0 ? convo.slice(-tailLimit) : [];
66648
- trimmed = [lastAssistant, ...newestTail];
66649
- }
66650
- }
66651
- }
66652
- if (trimmed.length > 0)
66653
- return trimmed;
66654
- const reasoning = renderable.filter((m) => m.message_type === "reasoning_message");
66655
- if (reasoning.length > 0) {
66656
- return reasoning.slice(-BACKFILL_PRIMARY_MESSAGE_LIMIT2);
66657
- }
66658
- return [];
66659
- }
66660
- const isPrimary = (msg) => {
66661
- const t = msg.message_type;
66662
- return t === "user_message" || t === "assistant_message" || t === "reasoning_message" || t === "event_message" || t === "summary_message";
66663
- };
66664
- let primaryCount = 0;
66665
- let startIndex = Math.max(0, renderable.length - 1);
66666
- for (let i = renderable.length - 1;i >= 0; i -= 1) {
66667
- const msg = renderable[i];
66668
- if (!msg)
66669
- continue;
66670
- if (isPrimary(msg)) {
66671
- primaryCount += 1;
66672
- if (primaryCount >= BACKFILL_PRIMARY_MESSAGE_LIMIT2) {
66673
- startIndex = i;
66674
- break;
66675
- }
66676
- }
66677
- startIndex = i;
66678
- }
66679
- let messageHistory = renderable.slice(startIndex);
66680
- if (messageHistory.length > BACKFILL_MAX_RENDERABLE_MESSAGES2) {
66681
- messageHistory = messageHistory.slice(-BACKFILL_MAX_RENDERABLE_MESSAGES2);
66682
- }
66683
- if (messageHistory[0]?.message_type === "tool_return_message") {
66684
- messageHistory = messageHistory.slice(1);
66685
- }
66686
- return messageHistory;
66687
- }
66688
- function sortChronological2(messages) {
66689
- return [...messages].sort((a, b) => {
66690
- const ta = a.date ? new Date(a.date).getTime() : 0;
66691
- const tb = b.date ? new Date(b.date).getTime() : 0;
66692
- if (!Number.isFinite(ta) && !Number.isFinite(tb))
66693
- return 0;
66694
- if (!Number.isFinite(ta))
66695
- return -1;
66696
- if (!Number.isFinite(tb))
66697
- return 1;
66698
- return ta - tb;
66699
- });
66700
- }
66701
- async function fetchConversationBackfillMessages2(client, conversationId) {
66702
- const collected = [];
66703
- const seen = new Set;
66704
- let cursorBefore = null;
66705
- let assistantCount = 0;
66706
- let anchorCount = 0;
66707
- for (let pageIndex = 0;pageIndex < BACKFILL_MAX_PAGES2; pageIndex += 1) {
66708
- const page = await client.conversations.messages.list(conversationId, {
66709
- limit: BACKFILL_PAGE_LIMIT2,
66710
- order: "desc",
66711
- ...cursorBefore ? { before: cursorBefore } : {}
66712
- });
66713
- const items = page.getPaginatedItems();
66714
- if (items.length === 0)
66715
- break;
66716
- cursorBefore = items[items.length - 1]?.id ?? null;
66717
- for (const m of items) {
66718
- if (!m?.id)
66719
- continue;
66720
- const key = "otid" in m && m.otid ? `otid:${String(m.otid)}` : `id:${m.id}:${m.message_type ?? ""}`;
66721
- if (seen.has(key))
66722
- continue;
66723
- seen.add(key);
66724
- collected.push(m);
66725
- if (m.message_type === "assistant_message")
66726
- assistantCount += 1;
66727
- if (isAnchorMessageType2(m.message_type))
66728
- anchorCount += 1;
66729
- }
66730
- if (assistantCount >= BACKFILL_MIN_ASSISTANT2 && anchorCount >= BACKFILL_ANCHOR_MESSAGE_LIMIT2) {
66731
- break;
66732
- }
66733
- if (items.length < BACKFILL_PAGE_LIMIT2)
66734
- break;
66735
- }
66736
- if (assistantCount < BACKFILL_MIN_ASSISTANT2) {
66737
- debugWarn("check-approval", `Backfill scan found 0 assistant messages in last ${collected.length} messages (tool-heavy conversation?)`);
66738
- }
66739
- return sortChronological2(collected);
66740
- }
66741
- async function getResumeData2(client, agent, conversationId, options = {}) {
66742
- try {
66743
- const includeMessageHistory = options.includeMessageHistory ?? true;
66744
- let inContextMessageIds;
66745
- let messages = [];
66746
- const useConversationsApi = conversationId && conversationId !== "default";
66747
- if (useConversationsApi) {
66748
- const conversation = await client.conversations.retrieve(conversationId);
66749
- inContextMessageIds = conversation.in_context_message_ids;
66750
- if (!inContextMessageIds || inContextMessageIds.length === 0) {
66751
- debugWarn("check-approval", "No in-context messages - no pending approvals");
66752
- if (includeMessageHistory && isBackfillEnabled2()) {
66753
- try {
66754
- const backfill = await fetchConversationBackfillMessages2(client, conversationId);
66755
- return {
66756
- pendingApproval: null,
66757
- pendingApprovals: [],
66758
- messageHistory: prepareMessageHistory2(backfill, {
66759
- primaryOnly: true
66760
- })
66761
- };
66762
- } catch (backfillError) {
66763
- debugWarn("check-approval", `Failed to load message history: ${backfillError instanceof Error ? backfillError.message : String(backfillError)}`);
66764
- }
66765
- }
66766
- return {
66767
- pendingApproval: null,
66768
- pendingApprovals: [],
66769
- messageHistory: []
66770
- };
66771
- }
66772
- const lastInContextId = inContextMessageIds.at(-1);
66773
- if (!lastInContextId) {
66774
- throw new Error("Expected at least one in-context message");
66775
- }
66776
- const retrievedMessages = await client.messages.retrieve(lastInContextId);
66777
- if (includeMessageHistory && isBackfillEnabled2()) {
66778
- try {
66779
- messages = await fetchConversationBackfillMessages2(client, conversationId);
66780
- } catch (backfillError) {
66781
- debugWarn("check-approval", `Failed to load message history: ${backfillError instanceof Error ? backfillError.message : String(backfillError)}`);
66782
- }
66783
- }
66784
- const messageToCheck = retrievedMessages.find((msg) => msg.message_type === "approval_request_message") ?? retrievedMessages[0];
66785
- if (messageToCheck) {
66786
- debugWarn("check-approval", `Found last in-context message: ${messageToCheck.id} (type: ${messageToCheck.message_type})` + (retrievedMessages.length > 1 ? ` - had ${retrievedMessages.length} variants` : ""));
66787
- if (messageToCheck.message_type === "approval_request_message") {
66788
- const { pendingApproval, pendingApprovals } = extractApprovals2(messageToCheck);
66789
- return {
66790
- pendingApproval,
66791
- pendingApprovals,
66792
- messageHistory: prepareMessageHistory2(messages, {
66793
- primaryOnly: true
66794
- })
66795
- };
66796
- }
66797
- } else {
66798
- debugWarn("check-approval", `Last in-context message ${lastInContextId} not found via retrieve`);
66799
- }
66800
- return {
66801
- pendingApproval: null,
66802
- pendingApprovals: [],
66803
- messageHistory: prepareMessageHistory2(messages, { primaryOnly: true })
66804
- };
66805
- } else {
66806
- inContextMessageIds = agent.message_ids;
66807
- if (!inContextMessageIds || inContextMessageIds.length === 0) {
66808
- debugWarn("check-approval", "No in-context messages (default/agent API) - no pending approvals");
66809
- return {
66810
- pendingApproval: null,
66811
- pendingApprovals: [],
66812
- messageHistory: []
66813
- };
66814
- }
66815
- const lastInContextId = inContextMessageIds.at(-1);
66816
- if (!lastInContextId) {
66817
- throw new Error("Expected at least one in-context message");
66818
- }
66819
- const retrievedMessages = await client.messages.retrieve(lastInContextId);
66820
- if (includeMessageHistory && isBackfillEnabled2()) {
66821
- try {
66822
- const messagesPage = await client.conversations.messages.list("default", {
66823
- agent_id: agent.id,
66824
- limit: BACKFILL_PAGE_LIMIT2,
66825
- order: "desc"
66826
- });
66827
- messages = sortChronological2(messagesPage.getPaginatedItems());
66828
- if (process.env.DEBUG) {
66829
- console.log(`[DEBUG] conversations.messages.list(default, agent_id=${agent.id}) returned ${messages.length} messages`);
66830
- }
66831
- } catch (backfillError) {
66832
- debugWarn("check-approval", `Failed to load message history: ${backfillError instanceof Error ? backfillError.message : String(backfillError)}`);
66833
- }
66834
- }
66835
- const messageToCheck = retrievedMessages.find((msg) => msg.message_type === "approval_request_message") ?? retrievedMessages[0];
66836
- if (messageToCheck) {
66837
- debugWarn("check-approval", `Found last in-context message: ${messageToCheck.id} (type: ${messageToCheck.message_type})` + (retrievedMessages.length > 1 ? ` - had ${retrievedMessages.length} variants` : ""));
66838
- if (messageToCheck.message_type === "approval_request_message") {
66839
- const { pendingApproval, pendingApprovals } = extractApprovals2(messageToCheck);
66840
- return {
66841
- pendingApproval,
66842
- pendingApprovals,
66843
- messageHistory: prepareMessageHistory2(messages, {
66844
- primaryOnly: true
66845
- })
66846
- };
66847
- }
66848
- } else {
66849
- debugWarn("check-approval", `Last in-context message ${lastInContextId} not found via retrieve (default/agent API)`);
66850
- }
66851
- return {
66852
- pendingApproval: null,
66853
- pendingApprovals: [],
66854
- messageHistory: prepareMessageHistory2(messages, { primaryOnly: true })
66855
- };
66856
- }
66857
- } catch (error) {
66858
- if (error instanceof APIError2 && (error.status === 404 || error.status === 422)) {
66859
- throw error;
66860
- }
66861
- console.error("Error getting resume data:", error);
66862
- return { pendingApproval: null, pendingApprovals: [], messageHistory: [] };
66863
- }
66864
- }
66865
- var BACKFILL_PRIMARY_MESSAGE_LIMIT2 = 12, BACKFILL_MAX_RENDERABLE_MESSAGES2 = 80, BACKFILL_ANCHOR_MESSAGE_LIMIT2 = 6, BACKFILL_PAGE_LIMIT2 = 200, BACKFILL_MAX_PAGES2 = 25, BACKFILL_MIN_ASSISTANT2 = 1;
66866
- var init_check_approval = __esm(() => {
66867
- init_error();
66868
- init_debug();
66869
- });
66870
-
66871
- // src/agent/message.ts
66872
- function getStreamRequestStartTime(stream2) {
66873
- return streamRequestStartTimes.get(stream2);
66874
- }
66875
- function getStreamToolContextId(stream2) {
66876
- return streamToolContextIds.get(stream2) ?? null;
66877
- }
66878
- function getStreamRequestContext(stream2) {
66879
- return streamRequestContexts.get(stream2);
66880
- }
66881
- async function sendMessageStream(conversationId, messages, opts = { streamTokens: true, background: true }, requestOptions = {
66882
- maxRetries: 0
66883
- }) {
66884
- const requestStartTime = isTimingsEnabled() ? performance.now() : undefined;
66885
- const requestStartedAtMs = Date.now();
66886
- const client = await getClient2();
66887
- await waitForToolsetReady();
66888
- const { clientTools, contextId } = captureToolExecutionContext();
66889
- const isDefaultConversation = conversationId === "default";
66890
- if (isDefaultConversation && !opts.agentId) {
66891
- throw new Error("agentId is required in opts when using default conversation");
66892
- }
66893
- const resolvedConversationId = conversationId;
66894
- const requestBody = {
66895
- messages,
66896
- streaming: true,
66897
- stream_tokens: opts.streamTokens ?? true,
66898
- background: opts.background ?? true,
66899
- client_tools: clientTools,
66900
- include_compaction_messages: true,
66901
- ...isDefaultConversation ? { agent_id: opts.agentId } : {}
66902
- };
66903
- if (process.env.DEBUG) {
66904
- console.log(`[DEBUG] sendMessageStream: conversationId=${conversationId}, agentId=${opts.agentId ?? "(none)"}`);
66905
- }
66906
- const stream2 = await client.conversations.messages.create(resolvedConversationId, requestBody, requestOptions);
66907
- if (requestStartTime !== undefined) {
66908
- streamRequestStartTimes.set(stream2, requestStartTime);
66909
- }
66910
- streamToolContextIds.set(stream2, contextId);
66911
- streamRequestContexts.set(stream2, {
66912
- conversationId,
66913
- resolvedConversationId,
66914
- agentId: opts.agentId ?? null,
66915
- requestStartedAtMs
66916
- });
66917
- return stream2;
66918
- }
66919
- var streamRequestStartTimes, streamToolContextIds, streamRequestContexts;
66920
- var init_message = __esm(async () => {
66921
- init_timing();
66922
- await __promiseAll([
66923
- init_manager3(),
66924
- init_client2()
66925
- ]);
66926
- streamRequestStartTimes = new WeakMap;
66927
- streamToolContextIds = new WeakMap;
66928
- streamRequestContexts = new WeakMap;
66929
- });
66930
-
66931
66680
  // src/cli/helpers/errorFormatter.ts
66932
66681
  function extractReasonList2(value) {
66933
66682
  if (!Array.isArray(value))
@@ -67631,6 +67380,359 @@ var init_turn_recovery_policy = __esm(() => {
67631
67380
  RETRYABLE_429_PATTERN = /Error code:\s*429|rate limit|too many requests/i;
67632
67381
  });
67633
67382
 
67383
+ // src/agent/approval-recovery.ts
67384
+ async function fetchRunErrorDetail(runId) {
67385
+ if (!runId)
67386
+ return null;
67387
+ try {
67388
+ const client = await getClient2();
67389
+ const run = await client.runs.retrieve(runId);
67390
+ const metaError = run.metadata?.error;
67391
+ return metaError?.detail ?? metaError?.message ?? metaError?.error?.detail ?? metaError?.error?.message ?? null;
67392
+ } catch {
67393
+ return null;
67394
+ }
67395
+ }
67396
+ var init_approval_recovery = __esm(async () => {
67397
+ init_turn_recovery_policy();
67398
+ await init_client2();
67399
+ });
67400
+
67401
+ // src/agent/check-approval.ts
67402
+ var exports_check_approval = {};
67403
+ __export(exports_check_approval, {
67404
+ prepareMessageHistory: () => prepareMessageHistory2,
67405
+ getResumeData: () => getResumeData2,
67406
+ extractApprovals: () => extractApprovals2
67407
+ });
67408
+ function isPrimaryMessageType2(messageType) {
67409
+ return messageType === "user_message" || messageType === "assistant_message" || messageType === "event_message" || messageType === "summary_message";
67410
+ }
67411
+ function isAnchorMessageType2(messageType) {
67412
+ return messageType === "user_message" || messageType === "assistant_message";
67413
+ }
67414
+ function isBackfillEnabled2() {
67415
+ const val = process.env.LETTA_BACKFILL;
67416
+ return val !== "0" && val !== "false";
67417
+ }
67418
+ function extractApprovals2(messageToCheck) {
67419
+ const approvalMsg = messageToCheck;
67420
+ const toolCalls = Array.isArray(approvalMsg.tool_calls) ? approvalMsg.tool_calls : approvalMsg.tool_call ? [approvalMsg.tool_call] : [];
67421
+ const pendingApprovals = toolCalls.filter((tc) => !!tc && !!tc.tool_call_id).map((tc) => ({
67422
+ toolCallId: tc.tool_call_id,
67423
+ toolName: tc.name || "",
67424
+ toolArgs: tc.arguments || ""
67425
+ }));
67426
+ const pendingApproval = pendingApprovals[0] || null;
67427
+ if (pendingApprovals.length > 0) {
67428
+ debugWarn("check-approval", `Found ${pendingApprovals.length} pending approval(s): ${pendingApprovals.map((a) => a.toolName).join(", ")}`);
67429
+ }
67430
+ return { pendingApproval, pendingApprovals };
67431
+ }
67432
+ function prepareMessageHistory2(messages, opts) {
67433
+ const isRenderable = (msg) => {
67434
+ const t = msg.message_type;
67435
+ if (t === "user_message" || t === "assistant_message" || t === "reasoning_message" || t === "tool_call_message" || t === "tool_return_message" || t === "approval_request_message" || t === "approval_response_message") {
67436
+ return true;
67437
+ }
67438
+ const ts = t;
67439
+ return ts === "event_message" || ts === "summary_message";
67440
+ };
67441
+ const renderable = messages.filter(isRenderable);
67442
+ if (opts?.primaryOnly) {
67443
+ const convo = renderable.filter((m) => isPrimaryMessageType2(m.message_type));
67444
+ let trimmed = convo.slice(-BACKFILL_PRIMARY_MESSAGE_LIMIT2);
67445
+ const hasAssistant = trimmed.some((m) => m.message_type === "assistant_message");
67446
+ if (!hasAssistant) {
67447
+ const lastAssistantIndex = convo.map((m) => m.message_type).lastIndexOf("assistant_message");
67448
+ if (lastAssistantIndex >= 0) {
67449
+ const lastAssistant = convo[lastAssistantIndex];
67450
+ if (lastAssistant) {
67451
+ const tailLimit = Math.max(BACKFILL_PRIMARY_MESSAGE_LIMIT2 - 1, 0);
67452
+ const newestTail = tailLimit > 0 ? convo.slice(-tailLimit) : [];
67453
+ trimmed = [lastAssistant, ...newestTail];
67454
+ }
67455
+ }
67456
+ }
67457
+ if (trimmed.length > 0)
67458
+ return trimmed;
67459
+ const reasoning = renderable.filter((m) => m.message_type === "reasoning_message");
67460
+ if (reasoning.length > 0) {
67461
+ return reasoning.slice(-BACKFILL_PRIMARY_MESSAGE_LIMIT2);
67462
+ }
67463
+ return [];
67464
+ }
67465
+ const isPrimary = (msg) => {
67466
+ const t = msg.message_type;
67467
+ return t === "user_message" || t === "assistant_message" || t === "reasoning_message" || t === "event_message" || t === "summary_message";
67468
+ };
67469
+ let primaryCount = 0;
67470
+ let startIndex = Math.max(0, renderable.length - 1);
67471
+ for (let i = renderable.length - 1;i >= 0; i -= 1) {
67472
+ const msg = renderable[i];
67473
+ if (!msg)
67474
+ continue;
67475
+ if (isPrimary(msg)) {
67476
+ primaryCount += 1;
67477
+ if (primaryCount >= BACKFILL_PRIMARY_MESSAGE_LIMIT2) {
67478
+ startIndex = i;
67479
+ break;
67480
+ }
67481
+ }
67482
+ startIndex = i;
67483
+ }
67484
+ let messageHistory = renderable.slice(startIndex);
67485
+ if (messageHistory.length > BACKFILL_MAX_RENDERABLE_MESSAGES2) {
67486
+ messageHistory = messageHistory.slice(-BACKFILL_MAX_RENDERABLE_MESSAGES2);
67487
+ }
67488
+ if (messageHistory[0]?.message_type === "tool_return_message") {
67489
+ messageHistory = messageHistory.slice(1);
67490
+ }
67491
+ return messageHistory;
67492
+ }
67493
+ function sortChronological2(messages) {
67494
+ return [...messages].sort((a, b) => {
67495
+ const ta = a.date ? new Date(a.date).getTime() : 0;
67496
+ const tb = b.date ? new Date(b.date).getTime() : 0;
67497
+ if (!Number.isFinite(ta) && !Number.isFinite(tb))
67498
+ return 0;
67499
+ if (!Number.isFinite(ta))
67500
+ return -1;
67501
+ if (!Number.isFinite(tb))
67502
+ return 1;
67503
+ return ta - tb;
67504
+ });
67505
+ }
67506
+ async function fetchConversationBackfillMessages2(client, conversationId) {
67507
+ const collected = [];
67508
+ const seen = new Set;
67509
+ let cursorBefore = null;
67510
+ let assistantCount = 0;
67511
+ let anchorCount = 0;
67512
+ for (let pageIndex = 0;pageIndex < BACKFILL_MAX_PAGES2; pageIndex += 1) {
67513
+ const page = await client.conversations.messages.list(conversationId, {
67514
+ limit: BACKFILL_PAGE_LIMIT2,
67515
+ order: "desc",
67516
+ ...cursorBefore ? { before: cursorBefore } : {}
67517
+ });
67518
+ const items = page.getPaginatedItems();
67519
+ if (items.length === 0)
67520
+ break;
67521
+ cursorBefore = items[items.length - 1]?.id ?? null;
67522
+ for (const m of items) {
67523
+ if (!m?.id)
67524
+ continue;
67525
+ const key = "otid" in m && m.otid ? `otid:${String(m.otid)}` : `id:${m.id}:${m.message_type ?? ""}`;
67526
+ if (seen.has(key))
67527
+ continue;
67528
+ seen.add(key);
67529
+ collected.push(m);
67530
+ if (m.message_type === "assistant_message")
67531
+ assistantCount += 1;
67532
+ if (isAnchorMessageType2(m.message_type))
67533
+ anchorCount += 1;
67534
+ }
67535
+ if (assistantCount >= BACKFILL_MIN_ASSISTANT2 && anchorCount >= BACKFILL_ANCHOR_MESSAGE_LIMIT2) {
67536
+ break;
67537
+ }
67538
+ if (items.length < BACKFILL_PAGE_LIMIT2)
67539
+ break;
67540
+ }
67541
+ if (assistantCount < BACKFILL_MIN_ASSISTANT2) {
67542
+ debugWarn("check-approval", `Backfill scan found 0 assistant messages in last ${collected.length} messages (tool-heavy conversation?)`);
67543
+ }
67544
+ return sortChronological2(collected);
67545
+ }
67546
+ async function getResumeData2(client, agent, conversationId, options = {}) {
67547
+ try {
67548
+ const includeMessageHistory = options.includeMessageHistory ?? true;
67549
+ let inContextMessageIds;
67550
+ let messages = [];
67551
+ const useConversationsApi = conversationId && conversationId !== "default";
67552
+ if (useConversationsApi) {
67553
+ const conversation = await client.conversations.retrieve(conversationId);
67554
+ inContextMessageIds = conversation.in_context_message_ids;
67555
+ if (!inContextMessageIds || inContextMessageIds.length === 0) {
67556
+ debugWarn("check-approval", "No in-context messages - no pending approvals");
67557
+ if (includeMessageHistory && isBackfillEnabled2()) {
67558
+ try {
67559
+ const backfill = await fetchConversationBackfillMessages2(client, conversationId);
67560
+ return {
67561
+ pendingApproval: null,
67562
+ pendingApprovals: [],
67563
+ messageHistory: prepareMessageHistory2(backfill, {
67564
+ primaryOnly: true
67565
+ })
67566
+ };
67567
+ } catch (backfillError) {
67568
+ debugWarn("check-approval", `Failed to load message history: ${backfillError instanceof Error ? backfillError.message : String(backfillError)}`);
67569
+ }
67570
+ }
67571
+ return {
67572
+ pendingApproval: null,
67573
+ pendingApprovals: [],
67574
+ messageHistory: []
67575
+ };
67576
+ }
67577
+ const lastInContextId = inContextMessageIds.at(-1);
67578
+ if (!lastInContextId) {
67579
+ throw new Error("Expected at least one in-context message");
67580
+ }
67581
+ const retrievedMessages = await client.messages.retrieve(lastInContextId);
67582
+ if (includeMessageHistory && isBackfillEnabled2()) {
67583
+ try {
67584
+ messages = await fetchConversationBackfillMessages2(client, conversationId);
67585
+ } catch (backfillError) {
67586
+ debugWarn("check-approval", `Failed to load message history: ${backfillError instanceof Error ? backfillError.message : String(backfillError)}`);
67587
+ }
67588
+ }
67589
+ const messageToCheck = retrievedMessages.find((msg) => msg.message_type === "approval_request_message") ?? retrievedMessages[0];
67590
+ if (messageToCheck) {
67591
+ debugWarn("check-approval", `Found last in-context message: ${messageToCheck.id} (type: ${messageToCheck.message_type})` + (retrievedMessages.length > 1 ? ` - had ${retrievedMessages.length} variants` : ""));
67592
+ if (messageToCheck.message_type === "approval_request_message") {
67593
+ const { pendingApproval, pendingApprovals } = extractApprovals2(messageToCheck);
67594
+ return {
67595
+ pendingApproval,
67596
+ pendingApprovals,
67597
+ messageHistory: prepareMessageHistory2(messages, {
67598
+ primaryOnly: true
67599
+ })
67600
+ };
67601
+ }
67602
+ } else {
67603
+ debugWarn("check-approval", `Last in-context message ${lastInContextId} not found via retrieve`);
67604
+ }
67605
+ return {
67606
+ pendingApproval: null,
67607
+ pendingApprovals: [],
67608
+ messageHistory: prepareMessageHistory2(messages, { primaryOnly: true })
67609
+ };
67610
+ } else {
67611
+ inContextMessageIds = agent.message_ids;
67612
+ if (!inContextMessageIds || inContextMessageIds.length === 0) {
67613
+ debugWarn("check-approval", "No in-context messages (default/agent API) - no pending approvals");
67614
+ return {
67615
+ pendingApproval: null,
67616
+ pendingApprovals: [],
67617
+ messageHistory: []
67618
+ };
67619
+ }
67620
+ const lastInContextId = inContextMessageIds.at(-1);
67621
+ if (!lastInContextId) {
67622
+ throw new Error("Expected at least one in-context message");
67623
+ }
67624
+ const retrievedMessages = await client.messages.retrieve(lastInContextId);
67625
+ if (includeMessageHistory && isBackfillEnabled2()) {
67626
+ try {
67627
+ const messagesPage = await client.conversations.messages.list("default", {
67628
+ agent_id: agent.id,
67629
+ limit: BACKFILL_PAGE_LIMIT2,
67630
+ order: "desc"
67631
+ });
67632
+ messages = sortChronological2(messagesPage.getPaginatedItems());
67633
+ if (process.env.DEBUG) {
67634
+ console.log(`[DEBUG] conversations.messages.list(default, agent_id=${agent.id}) returned ${messages.length} messages`);
67635
+ }
67636
+ } catch (backfillError) {
67637
+ debugWarn("check-approval", `Failed to load message history: ${backfillError instanceof Error ? backfillError.message : String(backfillError)}`);
67638
+ }
67639
+ }
67640
+ const messageToCheck = retrievedMessages.find((msg) => msg.message_type === "approval_request_message") ?? retrievedMessages[0];
67641
+ if (messageToCheck) {
67642
+ debugWarn("check-approval", `Found last in-context message: ${messageToCheck.id} (type: ${messageToCheck.message_type})` + (retrievedMessages.length > 1 ? ` - had ${retrievedMessages.length} variants` : ""));
67643
+ if (messageToCheck.message_type === "approval_request_message") {
67644
+ const { pendingApproval, pendingApprovals } = extractApprovals2(messageToCheck);
67645
+ return {
67646
+ pendingApproval,
67647
+ pendingApprovals,
67648
+ messageHistory: prepareMessageHistory2(messages, {
67649
+ primaryOnly: true
67650
+ })
67651
+ };
67652
+ }
67653
+ } else {
67654
+ debugWarn("check-approval", `Last in-context message ${lastInContextId} not found via retrieve (default/agent API)`);
67655
+ }
67656
+ return {
67657
+ pendingApproval: null,
67658
+ pendingApprovals: [],
67659
+ messageHistory: prepareMessageHistory2(messages, { primaryOnly: true })
67660
+ };
67661
+ }
67662
+ } catch (error) {
67663
+ if (error instanceof APIError2 && (error.status === 404 || error.status === 422)) {
67664
+ throw error;
67665
+ }
67666
+ console.error("Error getting resume data:", error);
67667
+ return { pendingApproval: null, pendingApprovals: [], messageHistory: [] };
67668
+ }
67669
+ }
67670
+ var BACKFILL_PRIMARY_MESSAGE_LIMIT2 = 12, BACKFILL_MAX_RENDERABLE_MESSAGES2 = 80, BACKFILL_ANCHOR_MESSAGE_LIMIT2 = 6, BACKFILL_PAGE_LIMIT2 = 200, BACKFILL_MAX_PAGES2 = 25, BACKFILL_MIN_ASSISTANT2 = 1;
67671
+ var init_check_approval = __esm(() => {
67672
+ init_error();
67673
+ init_debug();
67674
+ });
67675
+
67676
+ // src/agent/message.ts
67677
+ function getStreamRequestStartTime(stream2) {
67678
+ return streamRequestStartTimes.get(stream2);
67679
+ }
67680
+ function getStreamToolContextId(stream2) {
67681
+ return streamToolContextIds.get(stream2) ?? null;
67682
+ }
67683
+ function getStreamRequestContext(stream2) {
67684
+ return streamRequestContexts.get(stream2);
67685
+ }
67686
+ async function sendMessageStream(conversationId, messages, opts = { streamTokens: true, background: true }, requestOptions = {
67687
+ maxRetries: 0
67688
+ }) {
67689
+ const requestStartTime = isTimingsEnabled() ? performance.now() : undefined;
67690
+ const requestStartedAtMs = Date.now();
67691
+ const client = await getClient2();
67692
+ await waitForToolsetReady();
67693
+ const { clientTools, contextId } = captureToolExecutionContext();
67694
+ const isDefaultConversation = conversationId === "default";
67695
+ if (isDefaultConversation && !opts.agentId) {
67696
+ throw new Error("agentId is required in opts when using default conversation");
67697
+ }
67698
+ const resolvedConversationId = conversationId;
67699
+ const requestBody = {
67700
+ messages,
67701
+ streaming: true,
67702
+ stream_tokens: opts.streamTokens ?? true,
67703
+ background: opts.background ?? true,
67704
+ client_tools: clientTools,
67705
+ include_compaction_messages: true,
67706
+ ...isDefaultConversation ? { agent_id: opts.agentId } : {}
67707
+ };
67708
+ if (process.env.DEBUG) {
67709
+ console.log(`[DEBUG] sendMessageStream: conversationId=${conversationId}, agentId=${opts.agentId ?? "(none)"}`);
67710
+ }
67711
+ const stream2 = await client.conversations.messages.create(resolvedConversationId, requestBody, requestOptions);
67712
+ if (requestStartTime !== undefined) {
67713
+ streamRequestStartTimes.set(stream2, requestStartTime);
67714
+ }
67715
+ streamToolContextIds.set(stream2, contextId);
67716
+ streamRequestContexts.set(stream2, {
67717
+ conversationId,
67718
+ resolvedConversationId,
67719
+ agentId: opts.agentId ?? null,
67720
+ requestStartedAtMs
67721
+ });
67722
+ return stream2;
67723
+ }
67724
+ var streamRequestStartTimes, streamToolContextIds, streamRequestContexts;
67725
+ var init_message = __esm(async () => {
67726
+ init_timing();
67727
+ await __promiseAll([
67728
+ init_manager3(),
67729
+ init_client2()
67730
+ ]);
67731
+ streamRequestStartTimes = new WeakMap;
67732
+ streamToolContextIds = new WeakMap;
67733
+ streamRequestContexts = new WeakMap;
67734
+ });
67735
+
67634
67736
  // src/cli/helpers/backfill.ts
67635
67737
  function getDisplayableToolReturn2(content) {
67636
67738
  if (!content)
@@ -71054,6 +71156,9 @@ function createRuntime() {
71054
71156
  cancelRequested: false,
71055
71157
  isRecoveringApprovals: false,
71056
71158
  pendingApprovalBatchByToolCallId: new Map,
71159
+ pendingInterruptedResults: null,
71160
+ pendingInterruptedContext: null,
71161
+ continuationEpoch: 0,
71057
71162
  coalescedSkipQueueItemIds: new Set,
71058
71163
  pendingTurns: 0,
71059
71164
  queueRuntime: null
@@ -71168,6 +71273,12 @@ function resolvePendingApprovalBatchId(runtime, pendingApprovals) {
71168
71273
  }
71169
71274
  return batchIds.values().next().value ?? null;
71170
71275
  }
71276
+ function resolveRecoveryBatchId(runtime, pendingApprovals) {
71277
+ if (runtime.pendingApprovalBatchByToolCallId.size === 0) {
71278
+ return `recovery-${crypto.randomUUID()}`;
71279
+ }
71280
+ return resolvePendingApprovalBatchId(runtime, pendingApprovals);
71281
+ }
71171
71282
  function clearPendingApprovalBatchIds(runtime, approvals) {
71172
71283
  for (const approval of approvals) {
71173
71284
  runtime.pendingApprovalBatchByToolCallId.delete(approval.toolCallId);
@@ -71182,6 +71293,9 @@ function stopRuntime(runtime, suppressCallbacks) {
71182
71293
  clearRuntimeTimers(runtime);
71183
71294
  rejectPendingApprovalResolvers(runtime, "Listener runtime stopped");
71184
71295
  runtime.pendingApprovalBatchByToolCallId.clear();
71296
+ runtime.pendingInterruptedResults = null;
71297
+ runtime.pendingInterruptedContext = null;
71298
+ runtime.continuationEpoch++;
71185
71299
  if (!runtime.socket) {
71186
71300
  return;
71187
71301
  }
@@ -71366,9 +71480,201 @@ function emitToWS(socket, event) {
71366
71480
  socket.send(JSON.stringify(outbound));
71367
71481
  }
71368
71482
  }
71483
+ function normalizeToolReturnValue(value) {
71484
+ if (typeof value === "string") {
71485
+ return value;
71486
+ }
71487
+ if (value === null || value === undefined) {
71488
+ return "";
71489
+ }
71490
+ try {
71491
+ return JSON.stringify(value);
71492
+ } catch {
71493
+ return String(value);
71494
+ }
71495
+ }
71496
+ function extractInterruptToolReturns(approvals) {
71497
+ if (!approvals || approvals.length === 0) {
71498
+ return [];
71499
+ }
71500
+ return approvals.flatMap((approval) => {
71501
+ if (!approval || typeof approval !== "object") {
71502
+ return [];
71503
+ }
71504
+ if ("type" in approval && approval.type === "tool") {
71505
+ const toolCallId = "tool_call_id" in approval && typeof approval.tool_call_id === "string" ? approval.tool_call_id : null;
71506
+ if (!toolCallId) {
71507
+ return [];
71508
+ }
71509
+ const status = "status" in approval && approval.status === "success" ? "success" : "error";
71510
+ const stdout = "stdout" in approval && Array.isArray(approval.stdout) ? approval.stdout.filter((entry) => typeof entry === "string") : undefined;
71511
+ const stderr = "stderr" in approval && Array.isArray(approval.stderr) ? approval.stderr.filter((entry) => typeof entry === "string") : undefined;
71512
+ return [
71513
+ {
71514
+ tool_call_id: toolCallId,
71515
+ status,
71516
+ tool_return: "tool_return" in approval ? normalizeToolReturnValue(approval.tool_return) : "",
71517
+ ...stdout ? { stdout } : {},
71518
+ ...stderr ? { stderr } : {}
71519
+ }
71520
+ ];
71521
+ }
71522
+ if ("type" in approval && approval.type === "approval") {
71523
+ const toolCallId = "tool_call_id" in approval && typeof approval.tool_call_id === "string" ? approval.tool_call_id : null;
71524
+ if (!toolCallId) {
71525
+ return [];
71526
+ }
71527
+ const reason = "reason" in approval && typeof approval.reason === "string" ? approval.reason : "User interrupted the stream";
71528
+ return [
71529
+ {
71530
+ tool_call_id: toolCallId,
71531
+ status: "error",
71532
+ tool_return: reason
71533
+ }
71534
+ ];
71535
+ }
71536
+ return [];
71537
+ });
71538
+ }
71539
+ function emitInterruptToolReturnMessage(socket, runtime, approvals, runId, uuidPrefix = "interrupt-tool-return") {
71540
+ const toolReturns = extractInterruptToolReturns(approvals);
71541
+ if (toolReturns.length === 0) {
71542
+ return;
71543
+ }
71544
+ const resolvedRunId = runId ?? runtime.activeRunId ?? undefined;
71545
+ for (const toolReturn of toolReturns) {
71546
+ emitToWS(socket, {
71547
+ type: "message",
71548
+ message_type: "tool_return_message",
71549
+ id: `message-${crypto.randomUUID()}`,
71550
+ date: new Date().toISOString(),
71551
+ run_id: resolvedRunId,
71552
+ tool_call_id: toolReturn.tool_call_id,
71553
+ tool_return: toolReturn.tool_return,
71554
+ status: toolReturn.status,
71555
+ ...toolReturn.stdout ? { stdout: toolReturn.stdout } : {},
71556
+ ...toolReturn.stderr ? { stderr: toolReturn.stderr } : {},
71557
+ tool_returns: [toolReturn],
71558
+ session_id: runtime.sessionId,
71559
+ uuid: `${uuidPrefix}-${crypto.randomUUID()}`
71560
+ });
71561
+ }
71562
+ }
71563
+ function getInterruptApprovalsForEmission(runtime, params) {
71564
+ if (params.lastExecutionResults && params.lastExecutionResults.length > 0) {
71565
+ return params.lastExecutionResults;
71566
+ }
71567
+ const context3 = runtime.pendingInterruptedContext;
71568
+ if (!context3 || context3.agentId !== params.agentId || context3.conversationId !== params.conversationId || context3.continuationEpoch !== runtime.continuationEpoch) {
71569
+ return null;
71570
+ }
71571
+ if (!runtime.pendingInterruptedResults || runtime.pendingInterruptedResults.length === 0) {
71572
+ return null;
71573
+ }
71574
+ return runtime.pendingInterruptedResults;
71575
+ }
71576
+ function populateInterruptQueue(runtime, input) {
71577
+ const shouldPopulate = !runtime.pendingInterruptedResults || runtime.pendingInterruptedResults.length === 0 || !runtime.pendingInterruptedContext;
71578
+ if (!shouldPopulate)
71579
+ return false;
71580
+ if (input.lastExecutionResults && input.lastExecutionResults.length > 0) {
71581
+ runtime.pendingInterruptedResults = input.lastExecutionResults;
71582
+ runtime.pendingInterruptedContext = {
71583
+ agentId: input.agentId,
71584
+ conversationId: input.conversationId,
71585
+ continuationEpoch: runtime.continuationEpoch
71586
+ };
71587
+ return true;
71588
+ }
71589
+ const batchToolCallIds = [...runtime.pendingApprovalBatchByToolCallId.keys()];
71590
+ const pendingIds = batchToolCallIds.length > 0 ? batchToolCallIds : input.lastNeedsUserInputToolCallIds;
71591
+ if (pendingIds.length > 0) {
71592
+ runtime.pendingInterruptedResults = pendingIds.map((toolCallId) => ({
71593
+ type: "approval",
71594
+ tool_call_id: toolCallId,
71595
+ approve: false,
71596
+ reason: "User interrupted the stream"
71597
+ }));
71598
+ runtime.pendingInterruptedContext = {
71599
+ agentId: input.agentId,
71600
+ conversationId: input.conversationId,
71601
+ continuationEpoch: runtime.continuationEpoch
71602
+ };
71603
+ return true;
71604
+ }
71605
+ if (process.env.DEBUG) {
71606
+ console.warn("[Listen] Cancel during approval loop but no tool_call_ids available " + "for interrupted queue — next turn may hit pre-stream conflict. " + `batchMap=${runtime.pendingApprovalBatchByToolCallId.size}, ` + `lastNeedsUserInput=${input.lastNeedsUserInputToolCallIds.length}`);
71607
+ }
71608
+ return false;
71609
+ }
71610
+ function consumeInterruptQueue(runtime, agentId, conversationId) {
71611
+ if (!runtime.pendingInterruptedResults || runtime.pendingInterruptedResults.length === 0) {
71612
+ return null;
71613
+ }
71614
+ const ctx = runtime.pendingInterruptedContext;
71615
+ let result = null;
71616
+ if (ctx && ctx.agentId === agentId && ctx.conversationId === conversationId && ctx.continuationEpoch === runtime.continuationEpoch) {
71617
+ result = {
71618
+ type: "approval",
71619
+ approvals: runtime.pendingInterruptedResults
71620
+ };
71621
+ }
71622
+ runtime.pendingInterruptedResults = null;
71623
+ runtime.pendingInterruptedContext = null;
71624
+ runtime.pendingApprovalBatchByToolCallId.clear();
71625
+ return result;
71626
+ }
71627
+ async function resolveStaleApprovals(runtime, abortSignal) {
71628
+ if (!runtime.activeAgentId)
71629
+ return;
71630
+ const client = await getClient2();
71631
+ let agent;
71632
+ try {
71633
+ agent = await client.agents.retrieve(runtime.activeAgentId);
71634
+ } catch (err) {
71635
+ if (err instanceof APIError2 && (err.status === 404 || err.status === 422)) {
71636
+ return;
71637
+ }
71638
+ throw err;
71639
+ }
71640
+ const requestedConversationId = runtime.activeConversationId && runtime.activeConversationId !== "default" ? runtime.activeConversationId : undefined;
71641
+ let resumeData;
71642
+ try {
71643
+ resumeData = await getResumeData2(client, agent, requestedConversationId, {
71644
+ includeMessageHistory: false
71645
+ });
71646
+ } catch (err) {
71647
+ if (err instanceof APIError2 && (err.status === 404 || err.status === 422)) {
71648
+ return;
71649
+ }
71650
+ throw err;
71651
+ }
71652
+ const pendingApprovals = resumeData.pendingApprovals || [];
71653
+ if (pendingApprovals.length === 0)
71654
+ return;
71655
+ if (abortSignal.aborted)
71656
+ throw new Error("Cancelled");
71657
+ const denialResults = pendingApprovals.map((approval) => ({
71658
+ type: "approval",
71659
+ tool_call_id: approval.toolCallId,
71660
+ approve: false,
71661
+ reason: "Auto-denied during pre-stream approval recovery"
71662
+ }));
71663
+ const recoveryConversationId = runtime.activeConversationId || "default";
71664
+ const recoveryStream = await sendMessageStream(recoveryConversationId, [{ type: "approval", approvals: denialResults }], {
71665
+ agentId: runtime.activeAgentId,
71666
+ streamTokens: true,
71667
+ background: true
71668
+ }, { maxRetries: 0, signal: abortSignal });
71669
+ const drainResult = await drainStreamWithResume(recoveryStream, createBuffers(runtime.activeAgentId), () => {}, abortSignal);
71670
+ if (drainResult.stopReason === "error") {
71671
+ throw new Error("Pre-stream approval recovery drain ended with error");
71672
+ }
71673
+ }
71369
71674
  async function sendMessageStreamWithRetry(conversationId, messages, opts, socket, runtime, abortSignal) {
71370
71675
  let transientRetries = 0;
71371
71676
  let conversationBusyRetries = 0;
71677
+ let preStreamRecoveryAttempts = 0;
71372
71678
  const MAX_CONVERSATION_BUSY_RETRIES = 1;
71373
71679
  while (true) {
71374
71680
  if (abortSignal?.aborted) {
@@ -71387,7 +71693,20 @@ async function sendMessageStreamWithRetry(conversationId, messages, opts, socket
71387
71693
  maxTransientRetries: LLM_API_ERROR_MAX_RETRIES
71388
71694
  });
71389
71695
  if (action === "resolve_approval_pending") {
71390
- throw preStreamError;
71696
+ if (abortSignal?.aborted)
71697
+ throw new Error("Cancelled by user");
71698
+ if (abortSignal && preStreamRecoveryAttempts < MAX_PRE_STREAM_RECOVERY) {
71699
+ preStreamRecoveryAttempts++;
71700
+ try {
71701
+ await resolveStaleApprovals(runtime, abortSignal);
71702
+ continue;
71703
+ } catch (_recoveryError) {
71704
+ if (abortSignal.aborted)
71705
+ throw new Error("Cancelled by user");
71706
+ }
71707
+ }
71708
+ const detail = await fetchRunErrorDetail(runtime.activeRunId);
71709
+ throw new Error(detail || `Pre-stream approval conflict (resolve_approval_pending) after ${preStreamRecoveryAttempts} recovery attempts`);
71391
71710
  }
71392
71711
  if (action === "retry_transient") {
71393
71712
  const attempt = transientRetries + 1;
@@ -71529,6 +71848,13 @@ function buildApprovalExecutionPlan(approvalMessage, pendingApprovals) {
71529
71848
  return { slots, decisions };
71530
71849
  }
71531
71850
  async function recoverPendingApprovals(runtime, socket, msg) {
71851
+ console.debug("[listener] recover_pending_approvals received", JSON.stringify({
71852
+ agentId: msg.agentId,
71853
+ conversationId: msg.conversationId ?? null,
71854
+ isProcessing: runtime.isProcessing,
71855
+ isRecovering: runtime.isRecoveringApprovals,
71856
+ batchMapSize: runtime.pendingApprovalBatchByToolCallId.size
71857
+ }));
71532
71858
  if (runtime.isProcessing || runtime.isRecoveringApprovals) {
71533
71859
  return;
71534
71860
  }
@@ -71557,11 +71883,11 @@ async function recoverPendingApprovals(runtime, socket, msg) {
71557
71883
  if (pendingApprovals.length === 0) {
71558
71884
  return;
71559
71885
  }
71560
- const recoveryBatchId = resolvePendingApprovalBatchId(runtime, pendingApprovals);
71886
+ const recoveryBatchId = resolveRecoveryBatchId(runtime, pendingApprovals);
71561
71887
  if (!recoveryBatchId) {
71562
71888
  emitToWS(socket, {
71563
71889
  type: "error",
71564
- message: "Unable to recover pending approvals without originating batch correlation",
71890
+ message: "Unable to recover pending approvals: ambiguous batch correlation",
71565
71891
  stop_reason: "error",
71566
71892
  session_id: runtime.sessionId,
71567
71893
  uuid: `error-${crypto.randomUUID()}`
@@ -71604,6 +71930,7 @@ async function recoverPendingApprovals(runtime, socket, msg) {
71604
71930
  runtime.lastStopReason = "requires_approval";
71605
71931
  return;
71606
71932
  }
71933
+ runtime.lastStopReason = "requires_approval";
71607
71934
  for (const ac of needsUserInput) {
71608
71935
  const requestId = `perm-${ac.approval.toolCallId}`;
71609
71936
  const diffs = await computeDiffPreviews(ac.approval.toolName, ac.parsedArgs);
@@ -71816,6 +72143,14 @@ async function connectWithRetry(runtime, opts, attempt = 0, startTime = Date.now
71816
72143
  if (hasPendingApprovals) {
71817
72144
  rejectPendingApprovalResolvers(runtime, "Cancelled by user");
71818
72145
  }
72146
+ const cancelConversationId = runtime.activeConversationId;
72147
+ const cancelAgentId = runtime.activeAgentId;
72148
+ if (cancelAgentId) {
72149
+ getClient2().then((client) => {
72150
+ const cancelId = cancelConversationId === "default" || !cancelConversationId ? cancelAgentId : cancelConversationId;
72151
+ return client.conversations.cancel(cancelId);
72152
+ }).catch(() => {});
72153
+ }
71819
72154
  emitCancelAck(socket, runtime, {
71820
72155
  requestId,
71821
72156
  accepted: true,
@@ -72014,6 +72349,8 @@ async function handleIncomingMessage(msg, socket, runtime, onStatusChange, conne
72014
72349
  const msgStartTime = performance.now();
72015
72350
  let msgTurnCount = 0;
72016
72351
  const msgRunIds = [];
72352
+ let lastExecutionResults = null;
72353
+ let lastNeedsUserInputToolCallIds = [];
72017
72354
  runtime.isProcessing = true;
72018
72355
  runtime.cancelRequested = false;
72019
72356
  runtime.activeAbortController = new AbortController;
@@ -72036,8 +72373,13 @@ async function handleIncomingMessage(msg, socket, runtime, onStatusChange, conne
72036
72373
  if (connectionId) {
72037
72374
  onStatusChange?.("processing", connectionId);
72038
72375
  }
72039
- let messagesToSend = msg.messages;
72376
+ let messagesToSend = [];
72040
72377
  let turnToolContextId = null;
72378
+ const consumed = consumeInterruptQueue(runtime, agentId || "", conversationId);
72379
+ if (consumed) {
72380
+ messagesToSend.push(consumed);
72381
+ }
72382
+ messagesToSend.push(...msg.messages);
72041
72383
  const firstMessage = msg.messages[0];
72042
72384
  const isApprovalMessage = firstMessage && "type" in firstMessage && firstMessage.type === "approval" && "approvals" in firstMessage;
72043
72385
  if (isApprovalMessage) {
@@ -72078,6 +72420,7 @@ async function handleIncomingMessage(msg, socket, runtime, onStatusChange, conne
72078
72420
  approvals: rebuiltApprovals
72079
72421
  }
72080
72422
  ];
72423
+ emitInterruptToolReturnMessage(socket, runtime, rebuiltApprovals, runtime.activeRunId ?? undefined, "tool-return");
72081
72424
  }
72082
72425
  let stream2 = await sendMessageStreamWithRetry(conversationId, messagesToSend, { agentId, streamTokens: true, background: true }, socket, runtime, runtime.activeAbortController.signal);
72083
72426
  turnToolContextId = getStreamToolContextId(stream2);
@@ -72185,13 +72528,45 @@ async function handleIncomingMessage(msg, socket, runtime, onStatusChange, conne
72185
72528
  break;
72186
72529
  }
72187
72530
  if (stopReason !== "requires_approval") {
72188
- runtime.lastStopReason = stopReason;
72531
+ const effectiveStopReason = runtime.cancelRequested ? "cancelled" : stopReason || "error";
72532
+ if (effectiveStopReason === "cancelled") {
72533
+ runtime.lastStopReason = "cancelled";
72534
+ runtime.isProcessing = false;
72535
+ clearActiveRunState(runtime);
72536
+ if (runtime.controlResponseCapable) {
72537
+ emitToWS(socket, {
72538
+ type: "result",
72539
+ subtype: "interrupted",
72540
+ agent_id: agentId,
72541
+ conversation_id: conversationId,
72542
+ duration_ms: performance.now() - msgStartTime,
72543
+ duration_api_ms: 0,
72544
+ num_turns: msgTurnCount,
72545
+ result: null,
72546
+ run_ids: msgRunIds,
72547
+ usage: null,
72548
+ stop_reason: "cancelled",
72549
+ session_id: runtime.sessionId,
72550
+ uuid: `result-${crypto.randomUUID()}`
72551
+ });
72552
+ } else {
72553
+ sendClientMessage(socket, {
72554
+ type: "result",
72555
+ success: false,
72556
+ stopReason: "cancelled"
72557
+ });
72558
+ }
72559
+ break;
72560
+ }
72561
+ runtime.lastStopReason = effectiveStopReason;
72189
72562
  runtime.isProcessing = false;
72190
72563
  clearActiveRunState(runtime);
72564
+ const errorDetail = await fetchRunErrorDetail(runId).catch(() => null);
72565
+ const errorMessage = errorDetail || `Unexpected stop reason: ${stopReason}`;
72191
72566
  emitToWS(socket, {
72192
72567
  type: "error",
72193
- message: `Unexpected stop reason: ${stopReason}`,
72194
- stop_reason: stopReason || "error",
72568
+ message: errorMessage,
72569
+ stop_reason: effectiveStopReason,
72195
72570
  run_id: runId,
72196
72571
  session_id: runtime.sessionId,
72197
72572
  uuid: `error-${crypto.randomUUID()}`
@@ -72208,7 +72583,7 @@ async function handleIncomingMessage(msg, socket, runtime, onStatusChange, conne
72208
72583
  result: null,
72209
72584
  run_ids: msgRunIds,
72210
72585
  usage: null,
72211
- stop_reason: stopReason || "error",
72586
+ stop_reason: effectiveStopReason,
72212
72587
  session_id: runtime.sessionId,
72213
72588
  uuid: `result-${crypto.randomUUID()}`
72214
72589
  });
@@ -72216,7 +72591,7 @@ async function handleIncomingMessage(msg, socket, runtime, onStatusChange, conne
72216
72591
  sendClientMessage(socket, {
72217
72592
  type: "result",
72218
72593
  success: false,
72219
- stopReason
72594
+ stopReason: effectiveStopReason
72220
72595
  });
72221
72596
  }
72222
72597
  break;
@@ -72238,6 +72613,8 @@ async function handleIncomingMessage(msg, socket, runtime, onStatusChange, conne
72238
72613
  treatAskAsDeny: false,
72239
72614
  requireArgsForAutoApprove: true
72240
72615
  });
72616
+ lastNeedsUserInputToolCallIds = needsUserInput.map((ac) => ac.approval.toolCallId);
72617
+ lastExecutionResults = null;
72241
72618
  for (const ac of autoAllowed) {
72242
72619
  emitToWS(socket, {
72243
72620
  type: "auto_approval",
@@ -72332,6 +72709,8 @@ async function handleIncomingMessage(msg, socket, runtime, onStatusChange, conne
72332
72709
  toolContextId: turnToolContextId ?? undefined,
72333
72710
  abortSignal: runtime.activeAbortController.signal
72334
72711
  });
72712
+ lastExecutionResults = executionResults;
72713
+ emitInterruptToolReturnMessage(socket, runtime, executionResults, runtime.activeRunId || runId || msgRunIds[msgRunIds.length - 1] || undefined, "tool-return");
72335
72714
  clearPendingApprovalBatchIds(runtime, decisions.map((decision) => decision.approval));
72336
72715
  stream2 = await sendMessageStreamWithRetry(conversationId, [
72337
72716
  {
@@ -72339,10 +72718,26 @@ async function handleIncomingMessage(msg, socket, runtime, onStatusChange, conne
72339
72718
  approvals: executionResults
72340
72719
  }
72341
72720
  ], { agentId, streamTokens: true, background: true }, socket, runtime, runtime.activeAbortController.signal);
72721
+ lastExecutionResults = null;
72722
+ lastNeedsUserInputToolCallIds = [];
72342
72723
  turnToolContextId = getStreamToolContextId(stream2);
72343
72724
  }
72344
72725
  } catch (error) {
72345
72726
  if (runtime.cancelRequested) {
72727
+ populateInterruptQueue(runtime, {
72728
+ lastExecutionResults,
72729
+ lastNeedsUserInputToolCallIds,
72730
+ agentId: agentId || "",
72731
+ conversationId
72732
+ });
72733
+ const approvalsForEmission = getInterruptApprovalsForEmission(runtime, {
72734
+ lastExecutionResults,
72735
+ agentId: agentId || "",
72736
+ conversationId
72737
+ });
72738
+ if (approvalsForEmission) {
72739
+ emitInterruptToolReturnMessage(socket, runtime, approvalsForEmission, runtime.activeRunId || msgRunIds[msgRunIds.length - 1] || undefined);
72740
+ }
72346
72741
  runtime.lastStopReason = "cancelled";
72347
72742
  runtime.isProcessing = false;
72348
72743
  clearActiveRunState(runtime);
@@ -72374,6 +72769,22 @@ async function handleIncomingMessage(msg, socket, runtime, onStatusChange, conne
72374
72769
  runtime.lastStopReason = "error";
72375
72770
  runtime.isProcessing = false;
72376
72771
  clearActiveRunState(runtime);
72772
+ if (msgRunIds.length === 0) {
72773
+ const errorPayload = {
72774
+ message: error instanceof Error ? error.message : String(error)
72775
+ };
72776
+ if (error instanceof APIError2) {
72777
+ errorPayload.status = error.status;
72778
+ if (error.error && typeof error.error === "object") {
72779
+ errorPayload.body = error.error;
72780
+ }
72781
+ }
72782
+ sendClientMessage(socket, {
72783
+ type: "run_request_error",
72784
+ error: errorPayload,
72785
+ batch_id: dequeuedBatchId
72786
+ });
72787
+ }
72377
72788
  const errorMessage = error instanceof Error ? error.message : String(error);
72378
72789
  emitToWS(socket, {
72379
72790
  type: "error",
@@ -72424,7 +72835,7 @@ function stopListenerClient() {
72424
72835
  activeRuntime = null;
72425
72836
  stopRuntime(runtime, true);
72426
72837
  }
72427
- var activeRuntime = null, MAX_RETRY_DURATION_MS, INITIAL_RETRY_DELAY_MS = 1000, MAX_RETRY_DELAY_MS = 30000, LLM_API_ERROR_MAX_RETRIES = 3, __listenClientTestUtils;
72838
+ var activeRuntime = null, MAX_RETRY_DURATION_MS, INITIAL_RETRY_DELAY_MS = 1000, MAX_RETRY_DELAY_MS = 30000, LLM_API_ERROR_MAX_RETRIES = 3, MAX_PRE_STREAM_RECOVERY = 2, __listenClientTestUtils;
72428
72839
  var init_listen_client = __esm(async () => {
72429
72840
  init_error();
72430
72841
  init_check_approval();
@@ -72435,6 +72846,7 @@ var init_listen_client = __esm(async () => {
72435
72846
  init_interactivePolicy();
72436
72847
  await __promiseAll([
72437
72848
  init_approval_execution(),
72849
+ init_approval_recovery(),
72438
72850
  init_client2(),
72439
72851
  init_message(),
72440
72852
  init_accumulator(),
@@ -72450,7 +72862,13 @@ var init_listen_client = __esm(async () => {
72450
72862
  emitToWS,
72451
72863
  rememberPendingApprovalBatchIds,
72452
72864
  resolvePendingApprovalBatchId,
72453
- clearPendingApprovalBatchIds
72865
+ resolveRecoveryBatchId,
72866
+ clearPendingApprovalBatchIds,
72867
+ populateInterruptQueue,
72868
+ consumeInterruptQueue,
72869
+ extractInterruptToolReturns,
72870
+ emitInterruptToolReturnMessage,
72871
+ getInterruptApprovalsForEmission
72454
72872
  };
72455
72873
  });
72456
72874
 
@@ -74104,24 +74522,6 @@ var init_cli2 = __esm(() => {
74104
74522
  cliPermissions2 = new CliPermissions2;
74105
74523
  });
74106
74524
 
74107
- // src/agent/approval-recovery.ts
74108
- async function fetchRunErrorDetail(runId) {
74109
- if (!runId)
74110
- return null;
74111
- try {
74112
- const client = await getClient2();
74113
- const run = await client.runs.retrieve(runId);
74114
- const metaError = run.metadata?.error;
74115
- return metaError?.detail ?? metaError?.message ?? metaError?.error?.detail ?? metaError?.error?.message ?? null;
74116
- } catch {
74117
- return null;
74118
- }
74119
- }
74120
- var init_approval_recovery = __esm(async () => {
74121
- init_turn_recovery_policy();
74122
- await init_client2();
74123
- });
74124
-
74125
74525
  // src/agent/listMessagesRouting.ts
74126
74526
  function resolveListMessagesRoute(listReq, sessionConvId, sessionAgentId) {
74127
74527
  const targetConvId = listReq.conversation_id ?? sessionConvId;
@@ -91084,7 +91484,11 @@ var init_InputRich = __esm(async () => {
91084
91484
  }, [backgroundAgents.length]);
91085
91485
  const bgAgentText = backgroundAgents.length === 0 ? "" : backgroundAgents.map((a) => {
91086
91486
  const elapsedS = Math.round((Date.now() - a.startTime) / 1000);
91087
- return `${a.type.toLowerCase()} (${elapsedS}s)`;
91487
+ const agentId = a.agentURL?.match(/\/agents\/([^/]+)/)?.[1];
91488
+ const chatUrl = agentId ? buildChatUrl(agentId) : null;
91489
+ const typeLabel = a.type.toLowerCase();
91490
+ const linkedType = chatUrl ? `\x1B]8;;${chatUrl}\x1B\\${typeLabel}\x1B]8;;\x1B\\` : typeLabel;
91491
+ return `${linkedType} (${elapsedS}s)`;
91088
91492
  }).join(" · ");
91089
91493
  const bgIndicatorWidth = backgroundAgents.length > 0 ? 2 + stringWidth(bgAgentText) + 3 : 0;
91090
91494
  const maxAgentChars = Math.max(10, Math.floor(rightColumnWidth * 0.45));
@@ -97438,12 +97842,11 @@ var init_ModelReasoningSelector = __esm(async () => {
97438
97842
  });
97439
97843
 
97440
97844
  // src/cli/components/ModelSelector.tsx
97441
- function getModelCategories(billingTier, isSelfHosted) {
97845
+ function getModelCategories(_billingTier, isSelfHosted) {
97442
97846
  if (isSelfHosted) {
97443
97847
  return ["server-recommended", "server-all"];
97444
97848
  }
97445
- const isFreeTier = billingTier?.toLowerCase() === "free";
97446
- return isFreeTier ? ["byok", "byok-all", "supported", "all"] : ["supported", "all", "byok", "byok-all"];
97849
+ return ["supported", "all", "byok", "byok-all"];
97447
97850
  }
97448
97851
  function filterModelsByAvailabilityForSelector(typedModels, availableHandles, allApiHandles) {
97449
97852
  if (availableHandles === null) {
@@ -97522,7 +97925,6 @@ function ModelSelector({
97522
97925
  const staticCandidates = typedModels.filter((m) => m.handle === handle);
97523
97926
  return staticCandidates.find((m) => m.isDefault) ?? staticCandidates.find((m) => m.isFeatured) ?? staticCandidates.find((m) => m.updateArgs?.reasoning_effort === "medium") ?? staticCandidates.find((m) => m.updateArgs?.reasoning_effort === "high") ?? staticCandidates[0];
97524
97927
  }, [typedModels]);
97525
- const isFreeTier = billingTier?.toLowerCase() === "free";
97526
97928
  const supportedModels = import_react79.useMemo(() => {
97527
97929
  if (availableHandles === undefined)
97528
97930
  return [];
@@ -97542,13 +97944,6 @@ function ModelSelector({
97542
97944
  seen.add(m.handle);
97543
97945
  deduped.push(pickPreferredStaticModel(m.handle) ?? m);
97544
97946
  }
97545
- if (isFreeTier) {
97546
- const freeModels = deduped.filter((m) => m.free);
97547
- const paidModels = deduped.filter((m) => !m.free);
97548
- const featured2 = paidModels.filter((m) => m.isFeatured);
97549
- const nonFeatured2 = paidModels.filter((m) => !m.isFeatured);
97550
- return [...freeModels, ...featured2, ...nonFeatured2];
97551
- }
97552
97947
  const featured = deduped.filter((m) => m.isFeatured);
97553
97948
  const nonFeatured = deduped.filter((m) => !m.isFeatured);
97554
97949
  return [...featured, ...nonFeatured];
@@ -97558,7 +97953,6 @@ function ModelSelector({
97558
97953
  allApiHandles,
97559
97954
  filterProvider,
97560
97955
  searchQuery,
97561
- isFreeTier,
97562
97956
  pickPreferredStaticModel
97563
97957
  ]);
97564
97958
  const isByokHandle = import_react79.useCallback((handle) => BYOK_PROVIDER_PREFIXES.some((prefix) => handle.startsWith(prefix)), []);
@@ -107151,7 +107545,7 @@ function App2({
107151
107545
  if (cancelled)
107152
107546
  return;
107153
107547
  debugLog("conversation-model", "Failed to sync conversation model override: %O", error);
107154
- applyAgentModelLocally();
107548
+ debugLog("conversation-model", "Keeping current model state after sync error (override in DB is authoritative)");
107155
107549
  }
107156
107550
  };
107157
107551
  syncConversationModel();
@@ -120397,7 +120791,7 @@ function replaceRegistry2(newTools) {
120397
120791
  toolRegistry2.set(key, value);
120398
120792
  }
120399
120793
  }
120400
- async function loadTools2(modelIdentifier) {
120794
+ async function loadTools2(modelIdentifier, options) {
120401
120795
  acquireSwitchLock2();
120402
120796
  try {
120403
120797
  const { toolFilter: toolFilter2 } = await Promise.resolve().then(() => (init_filter(), exports_filter));
@@ -120418,6 +120812,10 @@ async function loadTools2(modelIdentifier) {
120418
120812
  } else {
120419
120813
  baseToolNames = TOOL_NAMES2;
120420
120814
  }
120815
+ if (options?.exclude && options.exclude.length > 0) {
120816
+ const excludeSet = new Set(options.exclude);
120817
+ baseToolNames = baseToolNames.filter((name) => !excludeSet.has(name));
120818
+ }
120421
120819
  const newRegistry = new Map;
120422
120820
  for (const name of baseToolNames) {
120423
120821
  if (!toolFilter2.isEnabled(name)) {
@@ -121250,7 +121648,7 @@ Error: ${message}`);
121250
121648
  if (isHeadless) {
121251
121649
  markMilestone2("HEADLESS_MODE_START");
121252
121650
  const modelForTools = getModelForToolLoading(specifiedModel, specifiedToolset);
121253
- await loadTools2(modelForTools);
121651
+ await loadTools2(modelForTools, { exclude: ["AskUserQuestion"] });
121254
121652
  markMilestone2("TOOLS_LOADED");
121255
121653
  const headlessValues = specifiedAgentId && values.agent !== specifiedAgentId ? { ...values, agent: specifiedAgentId } : values;
121256
121654
  const { handleHeadlessCommand: handleHeadlessCommand2 } = await init_headless().then(() => exports_headless);
@@ -122010,4 +122408,4 @@ Error during initialization: ${message}`);
122010
122408
  }
122011
122409
  main();
122012
122410
 
122013
- //# debugId=B02B981CD008C36564756E2164756E21
122411
+ //# debugId=FE66CA162A20D40F64756E2164756E21