@letta-ai/letta-code 0.24.1 → 0.24.2

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (2) hide show
  1. package/letta.js +141 -4
  2. package/package.json +1 -1
package/letta.js CHANGED
@@ -3269,7 +3269,7 @@ var package_default;
3269
3269
  var init_package = __esm(() => {
3270
3270
  package_default = {
3271
3271
  name: "@letta-ai/letta-code",
3272
- version: "0.24.1",
3272
+ version: "0.24.2",
3273
3273
  description: "Letta Code is a CLI tool for interacting with stateful Letta agents from the terminal.",
3274
3274
  type: "module",
3275
3275
  bin: {
@@ -9157,6 +9157,72 @@ var init_models2 = __esm(() => {
9157
9157
  parallel_tool_calls: true
9158
9158
  }
9159
9159
  },
9160
+ {
9161
+ id: "gpt-5.5-none",
9162
+ handle: "openai/gpt-5.5",
9163
+ label: "GPT-5.5",
9164
+ description: "OpenAI's most capable model (no reasoning)",
9165
+ updateArgs: {
9166
+ reasoning_effort: "none",
9167
+ verbosity: "medium",
9168
+ context_window: 272000,
9169
+ max_output_tokens: 128000,
9170
+ parallel_tool_calls: true
9171
+ }
9172
+ },
9173
+ {
9174
+ id: "gpt-5.5-low",
9175
+ handle: "openai/gpt-5.5",
9176
+ label: "GPT-5.5",
9177
+ description: "OpenAI's most capable model (low reasoning)",
9178
+ updateArgs: {
9179
+ reasoning_effort: "low",
9180
+ verbosity: "medium",
9181
+ context_window: 272000,
9182
+ max_output_tokens: 128000,
9183
+ parallel_tool_calls: true
9184
+ }
9185
+ },
9186
+ {
9187
+ id: "gpt-5.5-medium",
9188
+ handle: "openai/gpt-5.5",
9189
+ label: "GPT-5.5",
9190
+ description: "OpenAI's most capable model (med reasoning)",
9191
+ updateArgs: {
9192
+ reasoning_effort: "medium",
9193
+ verbosity: "medium",
9194
+ context_window: 272000,
9195
+ max_output_tokens: 128000,
9196
+ parallel_tool_calls: true
9197
+ }
9198
+ },
9199
+ {
9200
+ id: "gpt-5.5-high",
9201
+ handle: "openai/gpt-5.5",
9202
+ label: "GPT-5.5",
9203
+ description: "OpenAI's most capable model (high reasoning)",
9204
+ isFeatured: true,
9205
+ updateArgs: {
9206
+ reasoning_effort: "high",
9207
+ verbosity: "medium",
9208
+ context_window: 272000,
9209
+ max_output_tokens: 128000,
9210
+ parallel_tool_calls: true
9211
+ }
9212
+ },
9213
+ {
9214
+ id: "gpt-5.5-xhigh",
9215
+ handle: "openai/gpt-5.5",
9216
+ label: "GPT-5.5",
9217
+ description: "OpenAI's most capable model (max reasoning)",
9218
+ updateArgs: {
9219
+ reasoning_effort: "xhigh",
9220
+ verbosity: "medium",
9221
+ context_window: 272000,
9222
+ max_output_tokens: 128000,
9223
+ parallel_tool_calls: true
9224
+ }
9225
+ },
9160
9226
  {
9161
9227
  id: "gpt-5.4-none",
9162
9228
  handle: "openai/gpt-5.4",
@@ -9288,6 +9354,46 @@ var init_models2 = __esm(() => {
9288
9354
  parallel_tool_calls: true
9289
9355
  }
9290
9356
  },
9357
+ {
9358
+ id: "gpt-5.5-pro-medium",
9359
+ handle: "openai/gpt-5.5-pro",
9360
+ label: "GPT-5.5 Pro",
9361
+ description: "GPT-5.5 Pro — max performance variant (med reasoning)",
9362
+ updateArgs: {
9363
+ reasoning_effort: "medium",
9364
+ verbosity: "medium",
9365
+ context_window: 272000,
9366
+ max_output_tokens: 128000,
9367
+ parallel_tool_calls: true
9368
+ }
9369
+ },
9370
+ {
9371
+ id: "gpt-5.5-pro-high",
9372
+ handle: "openai/gpt-5.5-pro",
9373
+ label: "GPT-5.5 Pro",
9374
+ description: "GPT-5.5 Pro — max performance variant (high reasoning)",
9375
+ updateArgs: {
9376
+ reasoning_effort: "high",
9377
+ verbosity: "medium",
9378
+ context_window: 272000,
9379
+ max_output_tokens: 128000,
9380
+ parallel_tool_calls: true
9381
+ },
9382
+ isFeatured: true
9383
+ },
9384
+ {
9385
+ id: "gpt-5.5-pro-xhigh",
9386
+ handle: "openai/gpt-5.5-pro",
9387
+ label: "GPT-5.5 Pro",
9388
+ description: "GPT-5.5 Pro — max performance variant (max reasoning)",
9389
+ updateArgs: {
9390
+ reasoning_effort: "xhigh",
9391
+ verbosity: "medium",
9392
+ context_window: 272000,
9393
+ max_output_tokens: 128000,
9394
+ parallel_tool_calls: true
9395
+ }
9396
+ },
9291
9397
  {
9292
9398
  id: "gpt-5.4-pro-medium",
9293
9399
  handle: "openai/gpt-5.4-pro",
@@ -79649,6 +79755,20 @@ var init_byok_providers = __esm(() => {
79649
79755
  providerType: "google_ai",
79650
79756
  providerName: "lc-gemini"
79651
79757
  },
79758
+ {
79759
+ id: "moonshot",
79760
+ displayName: "Moonshot AI",
79761
+ description: "Connect a Moonshot AI API key",
79762
+ providerType: "moonshot",
79763
+ providerName: "lc-moonshot"
79764
+ },
79765
+ {
79766
+ id: "kimi-code",
79767
+ displayName: "Kimi Code",
79768
+ description: "Connect a Kimi Code API key",
79769
+ providerType: "moonshot_coding",
79770
+ providerName: "lc-kimi-code"
79771
+ },
79652
79772
  {
79653
79773
  id: "openrouter",
79654
79774
  displayName: "OpenRouter API",
@@ -79699,6 +79819,8 @@ var init_byok_providers = __esm(() => {
79699
79819
  google_ai: "google_ai",
79700
79820
  google_vertex: "google_vertex",
79701
79821
  minimax: "minimax",
79822
+ moonshot: "moonshot",
79823
+ moonshot_coding: "moonshot_coding",
79702
79824
  openrouter: "openrouter",
79703
79825
  bedrock: "bedrock"
79704
79826
  };
@@ -79768,6 +79890,8 @@ var init_connect_normalize = __esm(() => {
79768
79890
  zai: "zai",
79769
79891
  "zai-coding": "zai-coding",
79770
79892
  minimax: "minimax",
79893
+ moonshot: "moonshot",
79894
+ "kimi-code": "kimi-code",
79771
79895
  gemini: "gemini",
79772
79896
  openrouter: "openrouter",
79773
79897
  bedrock: "bedrock"
@@ -79779,6 +79903,8 @@ var init_connect_normalize = __esm(() => {
79779
79903
  "zai",
79780
79904
  "zai-coding",
79781
79905
  "minimax",
79906
+ "moonshot",
79907
+ "kimi-code",
79782
79908
  "gemini",
79783
79909
  "openrouter",
79784
79910
  "bedrock"
@@ -102276,6 +102402,15 @@ async function flushAndExit(code) {
102276
102402
  ]);
102277
102403
  process.exit(code);
102278
102404
  }
102405
+ async function writeFinalHeadlessStdout(text) {
102406
+ await new Promise((resolve31) => {
102407
+ if (process.stdout.destroyed || process.stdout.writableEnded) {
102408
+ resolve31();
102409
+ return;
102410
+ }
102411
+ process.stdout.write(text, () => resolve31());
102412
+ });
102413
+ }
102279
102414
  async function handleHeadlessCommand(parsedArgs, model, skillsDirectoryOverride, skillSourcesOverride, systemInfoReminderEnabledOverride) {
102280
102415
  const { values, positionals } = parsedArgs;
102281
102416
  telemetry.setSurface("headless");
@@ -103734,7 +103869,8 @@ ${loadedContents.join(`
103734
103869
  conversation_id: conversationId,
103735
103870
  usage
103736
103871
  };
103737
- console.log(JSON.stringify(output, null, 2));
103872
+ await writeFinalHeadlessStdout(`${JSON.stringify(output, null, 2)}
103873
+ `);
103738
103874
  } else if (outputFormat === "stream-json") {
103739
103875
  const allRunIds = new Set;
103740
103876
  for (const line of toLines(buffers)) {
@@ -103763,7 +103899,8 @@ ${loadedContents.join(`
103763
103899
  console.error("No assistant response found");
103764
103900
  await exitHeadless(1, "headless_missing_result_text");
103765
103901
  }
103766
- console.log(resultText);
103902
+ await writeFinalHeadlessStdout(`${resultText}
103903
+ `);
103767
103904
  }
103768
103905
  markMilestone("HEADLESS_COMPLETE");
103769
103906
  reportAllMilestones();
@@ -168068,4 +168205,4 @@ Error during initialization: ${message}`);
168068
168205
  }
168069
168206
  main();
168070
168207
 
168071
- //# debugId=E3F9508D8DDEF15664756E2164756E21
168208
+ //# debugId=876580101EC255F264756E2164756E21
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@letta-ai/letta-code",
3
- "version": "0.24.1",
3
+ "version": "0.24.2",
4
4
  "description": "Letta Code is a CLI tool for interacting with stateful Letta agents from the terminal.",
5
5
  "type": "module",
6
6
  "bin": {