@letta-ai/letta-code 0.21.14 → 0.21.15

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (2) hide show
  1. package/letta.js +59 -21
  2. package/package.json +1 -1
package/letta.js CHANGED
@@ -3269,7 +3269,7 @@ var package_default;
3269
3269
  var init_package = __esm(() => {
3270
3270
  package_default = {
3271
3271
  name: "@letta-ai/letta-code",
3272
- version: "0.21.14",
3272
+ version: "0.21.15",
3273
3273
  description: "Letta Code is a CLI tool for interacting with stateful Letta agents from the terminal.",
3274
3274
  type: "module",
3275
3275
  bin: {
@@ -73286,6 +73286,9 @@ function isCloudflareEdge52xHtmlError2(text) {
73286
73286
  const has52xCode = CLOUDFLARE_EDGE_5XX_MARKER_PATTERN2.test(text) || CLOUDFLARE_EDGE_5XX_TITLE_PATTERN2.test(text);
73287
73287
  return hasCloudflare && hasHtml && has52xCode;
73288
73288
  }
73289
+ function isCloudflareEdge52xErrorText(text) {
73290
+ return CLOUDFLARE_EDGE_5XX_FORMATTED_PATTERN.test(text) || isCloudflareEdge52xHtmlError2(text);
73291
+ }
73289
73292
  function parseCloudflareEdgeError2(text) {
73290
73293
  if (!isCloudflareEdge52xHtmlError2(text))
73291
73294
  return;
@@ -73638,7 +73641,7 @@ Upgrade at: ${LETTA_USAGE_URL2}
73638
73641
  Delete ${resourceType} at: ${LETTA_AGENTS_URL2}`;
73639
73642
  }
73640
73643
  if (isCreditExhaustedError2(e, reasons)) {
73641
- return `Your account is out of credits for hosted inference. Add credits, enable auto-recharge, or upgrade at ${LETTA_USAGE_URL2}. You can also connect your own provider keys with /connect.`;
73644
+ return `Your account does not have credits for this model. Add your own API keys or upgrade your plan to purchase credits.`;
73642
73645
  }
73643
73646
  const tierUsageLimitMsg = getTierUsageLimitMessage2(reasons);
73644
73647
  if (tierUsageLimitMsg)
@@ -73704,7 +73707,7 @@ ${createAgentLink2(runId, agentId, conversationId)}` : baseError;
73704
73707
  function getRetryStatusMessage(errorDetail) {
73705
73708
  if (!errorDetail)
73706
73709
  return DEFAULT_RETRY_MESSAGE;
73707
- if (parseCloudflareEdgeError2(errorDetail))
73710
+ if (isCloudflareEdge52xErrorText(errorDetail))
73708
73711
  return null;
73709
73712
  if (checkZaiError(errorDetail))
73710
73713
  return "Z.ai API error, retrying...";
@@ -73731,7 +73734,7 @@ function createAgentLink2(runId, agentId, conversationId) {
73731
73734
  const url = buildChatUrl(agentId, { conversationId });
73732
73735
  return `View agent: \x1B]8;;${url}\x1B\\${agentId}\x1B]8;;\x1B\\ (run: ${runId})`;
73733
73736
  }
73734
- var LETTA_USAGE_URL2, LETTA_AGENTS_URL2, CLOUDFLARE_EDGE_5XX_MARKER_PATTERN2, CLOUDFLARE_EDGE_5XX_TITLE_PATTERN2, CHATGPT_USAGE_LIMIT_HINT2 = "Switch models with /model, or connect your own provider keys with /connect.", ENCRYPTED_CONTENT_HINT2, DEFAULT_RETRY_MESSAGE = "Unexpected downstream LLM API error, retrying...", ENDPOINT_TYPE_DISPLAY_NAMES;
73737
+ var LETTA_USAGE_URL2, LETTA_AGENTS_URL2, CLOUDFLARE_EDGE_5XX_MARKER_PATTERN2, CLOUDFLARE_EDGE_5XX_TITLE_PATTERN2, CLOUDFLARE_EDGE_5XX_FORMATTED_PATTERN, CHATGPT_USAGE_LIMIT_HINT2 = "Switch models with /model, or connect your own provider keys with /connect.", ENCRYPTED_CONTENT_HINT2, DEFAULT_RETRY_MESSAGE = "Unexpected downstream LLM API error, retrying...", ENDPOINT_TYPE_DISPLAY_NAMES;
73735
73738
  var init_errorFormatter = __esm(() => {
73736
73739
  init_error();
73737
73740
  init_errorContext();
@@ -73740,6 +73743,7 @@ var init_errorFormatter = __esm(() => {
73740
73743
  LETTA_AGENTS_URL2 = buildAppUrl("/projects/default-project/agents");
73741
73744
  CLOUDFLARE_EDGE_5XX_MARKER_PATTERN2 = /(^|\s)(502|52[0-6])\s*<!doctype html|error code\s*(502|52[0-6])/i;
73742
73745
  CLOUDFLARE_EDGE_5XX_TITLE_PATTERN2 = /\|\s*(502|52[0-6])\s*:/i;
73746
+ CLOUDFLARE_EDGE_5XX_FORMATTED_PATTERN = /\bCloudflare\s+(502|52[0-6])\b/i;
73743
73747
  ENCRYPTED_CONTENT_HINT2 = [
73744
73748
  "",
73745
73749
  "This occurs when the conversation contains messages with encrypted",
@@ -73766,7 +73770,7 @@ import { randomUUID as randomUUID3 } from "node:crypto";
73766
73770
  function isCloudflareEdge52xDetail(detail) {
73767
73771
  if (typeof detail !== "string")
73768
73772
  return false;
73769
- return isCloudflareEdge52xHtmlError2(detail);
73773
+ return isCloudflareEdge52xErrorText(detail);
73770
73774
  }
73771
73775
  function isQuotaLimitErrorDetail(detail) {
73772
73776
  return hasNonRetryableQuotaDetail(detail);
@@ -80106,7 +80110,7 @@ function shouldAttemptPostStopApprovalRecovery(params) {
80106
80110
  maxRetries: MAX_POST_STOP_APPROVAL_RECOVERY
80107
80111
  });
80108
80112
  }
80109
- async function isRetriablePostStopError(stopReason, lastRunId) {
80113
+ async function isRetriablePostStopError(stopReason, lastRunId, fallbackDetail) {
80110
80114
  if (stopReason === "llm_api_error") {
80111
80115
  return true;
80112
80116
  }
@@ -80124,7 +80128,7 @@ async function isRetriablePostStopError(stopReason, lastRunId) {
80124
80128
  return false;
80125
80129
  }
80126
80130
  if (!lastRunId) {
80127
- return false;
80131
+ return shouldRetryRunMetadataError(undefined, fallbackDetail);
80128
80132
  }
80129
80133
  try {
80130
80134
  const client = await getClient();
@@ -80134,7 +80138,7 @@ async function isRetriablePostStopError(stopReason, lastRunId) {
80134
80138
  const detail = metaError?.detail ?? metaError?.error?.detail ?? "";
80135
80139
  return shouldRetryRunMetadataError(errorType, detail);
80136
80140
  } catch {
80137
- return false;
80141
+ return shouldRetryRunMetadataError(undefined, fallbackDetail);
80138
80142
  }
80139
80143
  }
80140
80144
  async function drainRecoveryStreamWithEmission(recoveryStream, socket, runtime, params) {
@@ -81931,7 +81935,7 @@ async function handleIncomingMessage(msg, socket, runtime, onStatusChange, conne
81931
81935
  turnToolContextId = getStreamToolContextId(stream2);
81932
81936
  continue;
81933
81937
  }
81934
- const retriable = await isRetriablePostStopError(stopReason || "error", lastRunId);
81938
+ const retriable = await isRetriablePostStopError(stopReason || "error", lastRunId, errorDetail);
81935
81939
  if (retriable && llmApiErrorRetries < LLM_API_ERROR_MAX_RETRIES) {
81936
81940
  llmApiErrorRetries += 1;
81937
81941
  const attempt = llmApiErrorRetries;
@@ -89896,12 +89900,16 @@ ${loadedContents.join(`
89896
89900
  "tool_rule",
89897
89901
  "no_tool_call"
89898
89902
  ];
89899
- if (nonRetriableReasons.includes(stopReason)) {} else if (lastRunId && llmApiErrorRetries < LLM_API_ERROR_MAX_RETRIES2) {
89903
+ if (nonRetriableReasons.includes(stopReason)) {} else if (llmApiErrorRetries < LLM_API_ERROR_MAX_RETRIES2) {
89900
89904
  try {
89901
- const run = await client.runs.retrieve(lastRunId);
89902
- const metaError = run.metadata?.error;
89903
- const errorType = metaError?.error_type ?? metaError?.error?.error_type;
89904
- const detail = metaError?.detail ?? metaError?.error?.detail ?? "";
89905
+ let errorType;
89906
+ let detail = detailFromRun ?? latestErrorText ?? "";
89907
+ if (lastRunId) {
89908
+ const run = await client.runs.retrieve(lastRunId);
89909
+ const metaError = run.metadata?.error;
89910
+ errorType = metaError?.error_type ?? metaError?.error?.error_type;
89911
+ detail = metaError?.detail ?? metaError?.error?.detail ?? detail;
89912
+ }
89905
89913
  if (isEmptyResponseRetryable(errorType, detail, emptyResponseRetries, EMPTY_RESPONSE_MAX_RETRIES2)) {
89906
89914
  const attempt = emptyResponseRetries + 1;
89907
89915
  const delayMs = getRetryDelayMs({
@@ -89964,7 +89972,37 @@ ${loadedContents.join(`
89964
89972
  refreshCurrentInputOtids();
89965
89973
  continue;
89966
89974
  }
89967
- } catch (_e) {}
89975
+ } catch (_e) {
89976
+ if (shouldRetryRunMetadataError(undefined, detailFromRun ?? latestErrorText)) {
89977
+ const attempt = llmApiErrorRetries + 1;
89978
+ const detail = detailFromRun ?? latestErrorText;
89979
+ const delayMs = getRetryDelayMs({
89980
+ category: "transient_provider",
89981
+ attempt,
89982
+ detail
89983
+ });
89984
+ llmApiErrorRetries = attempt;
89985
+ if (outputFormat === "stream-json") {
89986
+ const retryMsg = {
89987
+ type: "retry",
89988
+ reason: "llm_api_error",
89989
+ attempt,
89990
+ max_attempts: LLM_API_ERROR_MAX_RETRIES2,
89991
+ delay_ms: delayMs,
89992
+ run_id: lastRunId ?? undefined,
89993
+ session_id: sessionId,
89994
+ uuid: `retry-${lastRunId || randomUUID8()}`
89995
+ };
89996
+ console.log(JSON.stringify(retryMsg));
89997
+ } else {
89998
+ const delaySeconds = Math.round(delayMs / 1000);
89999
+ console.error(`LLM API error encountered (attempt ${attempt} of ${LLM_API_ERROR_MAX_RETRIES2}), retrying in ${delaySeconds}s...`);
90000
+ }
90001
+ await new Promise((resolve27) => setTimeout(resolve27, delayMs));
90002
+ refreshCurrentInputOtids();
90003
+ continue;
90004
+ }
90005
+ }
89968
90006
  }
89969
90007
  markIncompleteToolsAsCancelled(buffers, true, "stream_error");
89970
90008
  const errorLines = toLines(buffers).filter((line) => line.kind === "error");
@@ -134489,7 +134527,7 @@ function sendDesktopNotification(message = "Awaiting your input", level = "info"
134489
134527
  debugLog("hooks", "Notification hook error", error);
134490
134528
  });
134491
134529
  }
134492
- async function isRetriableError(stopReason, lastRunId) {
134530
+ async function isRetriableError(stopReason, lastRunId, fallbackDetail) {
134493
134531
  if (stopReason === "llm_api_error")
134494
134532
  return true;
134495
134533
  const nonRetriableReasons = [
@@ -134516,10 +134554,10 @@ async function isRetriableError(stopReason, lastRunId) {
134516
134554
  }
134517
134555
  return false;
134518
134556
  } catch {
134519
- return false;
134557
+ return shouldRetryRunMetadataError(undefined, fallbackDetail);
134520
134558
  }
134521
134559
  }
134522
- return false;
134560
+ return shouldRetryRunMetadataError(undefined, fallbackDetail);
134523
134561
  }
134524
134562
  function saveLastSessionBeforeExit(conversationId) {
134525
134563
  try {
@@ -137450,7 +137488,7 @@ ${feedback}
137450
137488
  buffersRef.current.interrupted = false;
137451
137489
  continue;
137452
137490
  }
137453
- const retriable = await isRetriableError(stopReasonToHandle, lastRunId);
137491
+ const retriable = await isRetriableError(stopReasonToHandle, lastRunId, detailFromRun ?? latestErrorText ?? fallbackError);
137454
137492
  if (retriable && llmApiErrorRetriesRef.current < LLM_API_ERROR_MAX_RETRIES3) {
137455
137493
  llmApiErrorRetriesRef.current += 1;
137456
137494
  const attempt = llmApiErrorRetriesRef.current;
@@ -147248,7 +147286,7 @@ Upgrade at: ${LETTA_USAGE_URL}
147248
147286
  Delete ${resourceType} at: ${LETTA_AGENTS_URL}`;
147249
147287
  }
147250
147288
  if (isCreditExhaustedError(e, reasons)) {
147251
- return `Your account is out of credits for hosted inference. Add credits, enable auto-recharge, or upgrade at ${LETTA_USAGE_URL}. You can also connect your own provider keys with /connect.`;
147289
+ return `Your account does not have credits for this model. Add your own API keys or upgrade your plan to purchase credits.`;
147252
147290
  }
147253
147291
  const tierUsageLimitMsg = getTierUsageLimitMessage(reasons);
147254
147292
  if (tierUsageLimitMsg)
@@ -153546,4 +153584,4 @@ Error during initialization: ${message}`);
153546
153584
  }
153547
153585
  main();
153548
153586
 
153549
- //# debugId=DEDBAA251791817164756E2164756E21
153587
+ //# debugId=5769741D74E7325364756E2164756E21
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@letta-ai/letta-code",
3
- "version": "0.21.14",
3
+ "version": "0.21.15",
4
4
  "description": "Letta Code is a CLI tool for interacting with stateful Letta agents from the terminal.",
5
5
  "type": "module",
6
6
  "bin": {