@letta-ai/letta-code 0.21.2 → 0.21.3

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (2) hide show
  1. package/letta.js +617 -323
  2. package/package.json +1 -1
package/letta.js CHANGED
@@ -3269,7 +3269,7 @@ var package_default;
3269
3269
  var init_package = __esm(() => {
3270
3270
  package_default = {
3271
3271
  name: "@letta-ai/letta-code",
3272
- version: "0.21.2",
3272
+ version: "0.21.3",
3273
3273
  description: "Letta Code is a CLI tool for interacting with stateful Letta agents from the terminal.",
3274
3274
  type: "module",
3275
3275
  bin: {
@@ -7586,6 +7586,19 @@ var init_models2 = __esm(() => {
7586
7586
  parallel_tool_calls: true
7587
7587
  }
7588
7588
  },
7589
+ {
7590
+ id: "auto-chat",
7591
+ handle: "letta/auto-chat",
7592
+ label: "Auto Chat",
7593
+ description: "Automatically select the best model for chat",
7594
+ isFeatured: true,
7595
+ free: true,
7596
+ updateArgs: {
7597
+ context_window: 140000,
7598
+ max_output_tokens: 28000,
7599
+ parallel_tool_calls: true
7600
+ }
7601
+ },
7589
7602
  {
7590
7603
  id: "sonnet",
7591
7604
  handle: "anthropic/claude-sonnet-4-6",
@@ -37315,6 +37328,32 @@ var init_WelcomeScreen = __esm(async () => {
37315
37328
  });
37316
37329
 
37317
37330
  // src/providers/byok-providers.ts
37331
+ function buildByokProviderAliases(connectedProviders = []) {
37332
+ const aliases = {};
37333
+ for (const bp of BYOK_PROVIDERS) {
37334
+ const base2 = PROVIDER_TYPE_TO_BASE_PROVIDER[bp.providerType];
37335
+ if (base2) {
37336
+ aliases[bp.providerName] = base2;
37337
+ }
37338
+ }
37339
+ for (const provider of connectedProviders) {
37340
+ const base2 = PROVIDER_TYPE_TO_BASE_PROVIDER[provider.provider_type];
37341
+ if (base2) {
37342
+ aliases[provider.name] = base2;
37343
+ }
37344
+ }
37345
+ return aliases;
37346
+ }
37347
+ function isByokHandleForSelector(handle, byokProviderAliases) {
37348
+ if (STATIC_BYOK_PROVIDER_PREFIXES.some((prefix) => handle.startsWith(prefix))) {
37349
+ return true;
37350
+ }
37351
+ const slashIndex = handle.indexOf("/");
37352
+ if (slashIndex === -1)
37353
+ return false;
37354
+ const provider = handle.slice(0, slashIndex);
37355
+ return provider in byokProviderAliases;
37356
+ }
37318
37357
  async function getLettaConfig2() {
37319
37358
  const settings = await settingsManager.getSettingsWithSecureTokens();
37320
37359
  const baseUrl = process.env.LETTA_BASE_URL || settings.env?.LETTA_BASE_URL || LETTA_CLOUD_API_URL;
@@ -37405,7 +37444,7 @@ async function removeProviderByName(providerName) {
37405
37444
  function getProviderConfig(id) {
37406
37445
  return BYOK_PROVIDERS.find((p) => p.id === id);
37407
37446
  }
37408
- var BYOK_PROVIDERS;
37447
+ var BYOK_PROVIDERS, STATIC_BYOK_PROVIDER_PREFIXES, PROVIDER_TYPE_TO_BASE_PROVIDER;
37409
37448
  var init_byok_providers = __esm(async () => {
37410
37449
  init_http_headers();
37411
37450
  await __promiseAll([
@@ -37503,6 +37542,19 @@ var init_byok_providers = __esm(async () => {
37503
37542
  ]
37504
37543
  }
37505
37544
  ];
37545
+ STATIC_BYOK_PROVIDER_PREFIXES = ["chatgpt-plus-pro/", "lc-"];
37546
+ PROVIDER_TYPE_TO_BASE_PROVIDER = {
37547
+ chatgpt_oauth: "chatgpt-plus-pro",
37548
+ anthropic: "anthropic",
37549
+ openai: "openai",
37550
+ zai: "zai",
37551
+ zai_coding: "zai",
37552
+ google_ai: "google_ai",
37553
+ google_vertex: "google_vertex",
37554
+ minimax: "minimax",
37555
+ openrouter: "openrouter",
37556
+ bedrock: "bedrock"
37557
+ };
37506
37558
  });
37507
37559
 
37508
37560
  // src/cli/commands/connect-normalize.ts
@@ -40901,6 +40953,265 @@ var init_build4 = __esm(async () => {
40901
40953
  build_default2 = Spinner;
40902
40954
  });
40903
40955
 
40956
+ // src/agent/modify.ts
40957
+ var exports_modify = {};
40958
+ __export(exports_modify, {
40959
+ updateConversationLLMConfig: () => updateConversationLLMConfig,
40960
+ updateAgentSystemPromptRaw: () => updateAgentSystemPromptRaw2,
40961
+ updateAgentSystemPromptMemfs: () => updateAgentSystemPromptMemfs,
40962
+ updateAgentSystemPrompt: () => updateAgentSystemPrompt2,
40963
+ updateAgentLLMConfig: () => updateAgentLLMConfig2,
40964
+ recompileAgentSystemPrompt: () => recompileAgentSystemPrompt
40965
+ });
40966
+ function buildModelSettings2(modelHandle, updateArgs) {
40967
+ const isOpenAI = modelHandle.startsWith("openai/") || modelHandle.startsWith(`${OPENAI_CODEX_PROVIDER_NAME}/`);
40968
+ const isAnthropic = modelHandle.startsWith("anthropic/") || modelHandle.startsWith("claude-pro-max/") || modelHandle.startsWith("minimax/");
40969
+ const isZai = modelHandle.startsWith("zai/");
40970
+ const isGoogleAI = modelHandle.startsWith("google_ai/");
40971
+ const isGoogleVertex = modelHandle.startsWith("google_vertex/");
40972
+ const isOpenRouter = modelHandle.startsWith("openrouter/");
40973
+ const isBedrock = modelHandle.startsWith("bedrock/");
40974
+ let settings;
40975
+ if (isOpenAI || isOpenRouter) {
40976
+ const openaiSettings = {
40977
+ provider_type: "openai",
40978
+ parallel_tool_calls: true
40979
+ };
40980
+ if (updateArgs?.reasoning_effort) {
40981
+ openaiSettings.reasoning = {
40982
+ reasoning_effort: updateArgs.reasoning_effort
40983
+ };
40984
+ }
40985
+ const verbosity = updateArgs?.verbosity;
40986
+ if (verbosity === "low" || verbosity === "medium" || verbosity === "high") {
40987
+ openaiSettings.verbosity = verbosity;
40988
+ }
40989
+ if (typeof updateArgs?.strict === "boolean") {
40990
+ openaiSettings.strict = updateArgs.strict;
40991
+ }
40992
+ settings = openaiSettings;
40993
+ } else if (isAnthropic) {
40994
+ const anthropicSettings = {
40995
+ provider_type: "anthropic",
40996
+ parallel_tool_calls: true
40997
+ };
40998
+ const effort = updateArgs?.reasoning_effort;
40999
+ if (effort === "low" || effort === "medium" || effort === "high") {
41000
+ anthropicSettings.effort = effort;
41001
+ } else if (effort === "xhigh") {
41002
+ anthropicSettings.effort = "max";
41003
+ }
41004
+ if (updateArgs?.enable_reasoner !== undefined || typeof updateArgs?.max_reasoning_tokens === "number") {
41005
+ anthropicSettings.thinking = {
41006
+ type: updateArgs?.enable_reasoner === false ? "disabled" : "enabled",
41007
+ ...typeof updateArgs?.max_reasoning_tokens === "number" && {
41008
+ budget_tokens: updateArgs.max_reasoning_tokens
41009
+ }
41010
+ };
41011
+ }
41012
+ if (typeof updateArgs?.strict === "boolean") {
41013
+ anthropicSettings.strict = updateArgs.strict;
41014
+ }
41015
+ settings = anthropicSettings;
41016
+ } else if (isZai) {
41017
+ settings = {
41018
+ provider_type: "zai",
41019
+ parallel_tool_calls: true
41020
+ };
41021
+ } else if (isGoogleAI) {
41022
+ const googleSettings = {
41023
+ provider_type: "google_ai",
41024
+ parallel_tool_calls: true
41025
+ };
41026
+ if (updateArgs?.thinking_budget !== undefined) {
41027
+ googleSettings.thinking_config = {
41028
+ thinking_budget: updateArgs.thinking_budget
41029
+ };
41030
+ }
41031
+ if (typeof updateArgs?.temperature === "number") {
41032
+ googleSettings.temperature = updateArgs.temperature;
41033
+ }
41034
+ settings = googleSettings;
41035
+ } else if (isGoogleVertex) {
41036
+ const googleVertexSettings = {
41037
+ provider_type: "google_vertex",
41038
+ parallel_tool_calls: true
41039
+ };
41040
+ if (updateArgs?.thinking_budget !== undefined) {
41041
+ googleVertexSettings.thinking_config = {
41042
+ thinking_budget: updateArgs.thinking_budget
41043
+ };
41044
+ }
41045
+ if (typeof updateArgs?.temperature === "number") {
41046
+ googleVertexSettings.temperature = updateArgs.temperature;
41047
+ }
41048
+ settings = googleVertexSettings;
41049
+ } else if (isBedrock) {
41050
+ const bedrockSettings = {
41051
+ provider_type: "bedrock",
41052
+ parallel_tool_calls: true
41053
+ };
41054
+ const effort = updateArgs?.reasoning_effort;
41055
+ if (effort === "low" || effort === "medium" || effort === "high") {
41056
+ bedrockSettings.effort = effort;
41057
+ } else if (effort === "xhigh") {
41058
+ bedrockSettings.effort = "max";
41059
+ }
41060
+ if (updateArgs?.enable_reasoner !== undefined || typeof updateArgs?.max_reasoning_tokens === "number") {
41061
+ bedrockSettings.thinking = {
41062
+ type: updateArgs?.enable_reasoner === false ? "disabled" : "enabled",
41063
+ ...typeof updateArgs?.max_reasoning_tokens === "number" && {
41064
+ budget_tokens: updateArgs.max_reasoning_tokens
41065
+ }
41066
+ };
41067
+ }
41068
+ settings = bedrockSettings;
41069
+ } else {
41070
+ const openaiProxySettings = {
41071
+ provider_type: "openai",
41072
+ parallel_tool_calls: typeof updateArgs?.parallel_tool_calls === "boolean" ? updateArgs.parallel_tool_calls : true
41073
+ };
41074
+ if (typeof updateArgs?.strict === "boolean") {
41075
+ openaiProxySettings.strict = updateArgs.strict;
41076
+ }
41077
+ settings = openaiProxySettings;
41078
+ }
41079
+ if (typeof updateArgs?.max_output_tokens === "number" && "provider_type" in settings) {
41080
+ settings.max_output_tokens = updateArgs.max_output_tokens;
41081
+ }
41082
+ return settings;
41083
+ }
41084
+ async function updateAgentLLMConfig2(agentId, modelHandle, updateArgs, options) {
41085
+ const client = await getClient();
41086
+ const modelSettings = buildModelSettings2(modelHandle, updateArgs);
41087
+ const explicitContextWindow = updateArgs?.context_window;
41088
+ const shouldPreserveContextWindow = options?.preserveContextWindow === true;
41089
+ const contextWindow = explicitContextWindow ?? (!shouldPreserveContextWindow ? await getModelContextWindow(modelHandle) : undefined);
41090
+ const hasModelSettings = Object.keys(modelSettings).length > 0;
41091
+ await client.agents.update(agentId, {
41092
+ model: modelHandle,
41093
+ ...hasModelSettings && { model_settings: modelSettings },
41094
+ ...contextWindow && { context_window_limit: contextWindow },
41095
+ ...(typeof updateArgs?.max_output_tokens === "number" || updateArgs?.max_output_tokens === null) && {
41096
+ max_tokens: updateArgs.max_output_tokens
41097
+ }
41098
+ });
41099
+ const finalAgent = await client.agents.retrieve(agentId);
41100
+ return finalAgent;
41101
+ }
41102
+ async function updateConversationLLMConfig(conversationId, modelHandle, updateArgs) {
41103
+ const client = await getClient();
41104
+ const modelSettings = buildModelSettings2(modelHandle, updateArgs);
41105
+ const hasModelSettings = Object.keys(modelSettings).length > 0;
41106
+ const payload = {
41107
+ model: modelHandle,
41108
+ ...hasModelSettings && { model_settings: modelSettings }
41109
+ };
41110
+ return client.conversations.update(conversationId, payload);
41111
+ }
41112
+ async function recompileAgentSystemPrompt(conversationId, agentId, dryRun, clientOverride) {
41113
+ const client = clientOverride ?? await getClient();
41114
+ if (!agentId) {
41115
+ throw new Error("recompileAgentSystemPrompt requires agentId");
41116
+ }
41117
+ const params = {
41118
+ dry_run: dryRun,
41119
+ agent_id: agentId
41120
+ };
41121
+ return client.conversations.recompile(conversationId, params);
41122
+ }
41123
+ async function updateAgentSystemPromptRaw2(agentId, systemPromptContent) {
41124
+ try {
41125
+ const client = await getClient();
41126
+ await client.agents.update(agentId, {
41127
+ system: systemPromptContent
41128
+ });
41129
+ return {
41130
+ success: true,
41131
+ message: "System prompt updated successfully"
41132
+ };
41133
+ } catch (error) {
41134
+ return {
41135
+ success: false,
41136
+ message: `Failed to update system prompt: ${error instanceof Error ? error.message : String(error)}`
41137
+ };
41138
+ }
41139
+ }
41140
+ async function updateAgentSystemPrompt2(agentId, systemPromptId) {
41141
+ try {
41142
+ const { isKnownPreset: isKnownPreset2, resolveAndBuildSystemPrompt: resolveAndBuildSystemPrompt2 } = await Promise.resolve().then(() => (init_promptAssets(), exports_promptAssets));
41143
+ const { settingsManager: settingsManager2 } = await init_settings_manager().then(() => exports_settings_manager);
41144
+ const client = await getClient();
41145
+ const memoryMode = settingsManager2.isReady && settingsManager2.isMemfsEnabled(agentId) ? "memfs" : "standard";
41146
+ const systemPromptContent = await resolveAndBuildSystemPrompt2(systemPromptId, memoryMode);
41147
+ debugLog("modify", "systemPromptContent: %s", systemPromptContent);
41148
+ const updateResult = await updateAgentSystemPromptRaw2(agentId, systemPromptContent);
41149
+ if (!updateResult.success) {
41150
+ return {
41151
+ success: false,
41152
+ message: updateResult.message,
41153
+ agent: null
41154
+ };
41155
+ }
41156
+ if (settingsManager2.isReady) {
41157
+ if (isKnownPreset2(systemPromptId)) {
41158
+ settingsManager2.setSystemPromptPreset(agentId, systemPromptId);
41159
+ } else {
41160
+ settingsManager2.clearSystemPromptPreset(agentId);
41161
+ }
41162
+ }
41163
+ const agent = await client.agents.retrieve(agentId);
41164
+ return {
41165
+ success: true,
41166
+ message: "System prompt applied successfully",
41167
+ agent
41168
+ };
41169
+ } catch (error) {
41170
+ return {
41171
+ success: false,
41172
+ message: `Failed to apply system prompt: ${error instanceof Error ? error.message : String(error)}`,
41173
+ agent: null
41174
+ };
41175
+ }
41176
+ }
41177
+ async function updateAgentSystemPromptMemfs(agentId, enableMemfs) {
41178
+ try {
41179
+ const { settingsManager: settingsManager2 } = await init_settings_manager().then(() => exports_settings_manager);
41180
+ const { isKnownPreset: isKnownPreset2, buildSystemPrompt: buildSystemPrompt2, swapMemoryAddon: swapMemoryAddon2 } = await Promise.resolve().then(() => (init_promptAssets(), exports_promptAssets));
41181
+ const newMode = enableMemfs ? "memfs" : "standard";
41182
+ const storedPreset = settingsManager2.isReady ? settingsManager2.getSystemPromptPreset(agentId) : undefined;
41183
+ let nextSystemPrompt;
41184
+ if (storedPreset && isKnownPreset2(storedPreset)) {
41185
+ nextSystemPrompt = buildSystemPrompt2(storedPreset, newMode);
41186
+ } else {
41187
+ const client2 = await getClient();
41188
+ const agent = await client2.agents.retrieve(agentId);
41189
+ nextSystemPrompt = swapMemoryAddon2(agent.system || "", newMode);
41190
+ }
41191
+ const client = await getClient();
41192
+ await client.agents.update(agentId, {
41193
+ system: nextSystemPrompt
41194
+ });
41195
+ return {
41196
+ success: true,
41197
+ message: enableMemfs ? "System prompt updated to include Memory Filesystem section" : "System prompt updated to include standard Memory section"
41198
+ };
41199
+ } catch (error) {
41200
+ return {
41201
+ success: false,
41202
+ message: `Failed to update system prompt memfs: ${error instanceof Error ? error.message : String(error)}`
41203
+ };
41204
+ }
41205
+ }
41206
+ var init_modify = __esm(async () => {
41207
+ init_debug();
41208
+ await __promiseAll([
41209
+ init_openai_codex_provider(),
41210
+ init_available_models(),
41211
+ init_client2()
41212
+ ]);
41213
+ });
41214
+
40904
41215
  // src/cli/helpers/contextTracker.ts
40905
41216
  function createContextTracker() {
40906
41217
  return {
@@ -49305,265 +49616,6 @@ var init_memoryGit = __esm(async () => {
49305
49616
  execFile6 = promisify6(execFileCb);
49306
49617
  });
49307
49618
 
49308
- // src/agent/modify.ts
49309
- var exports_modify = {};
49310
- __export(exports_modify, {
49311
- updateConversationLLMConfig: () => updateConversationLLMConfig,
49312
- updateAgentSystemPromptRaw: () => updateAgentSystemPromptRaw2,
49313
- updateAgentSystemPromptMemfs: () => updateAgentSystemPromptMemfs,
49314
- updateAgentSystemPrompt: () => updateAgentSystemPrompt2,
49315
- updateAgentLLMConfig: () => updateAgentLLMConfig2,
49316
- recompileAgentSystemPrompt: () => recompileAgentSystemPrompt
49317
- });
49318
- function buildModelSettings2(modelHandle, updateArgs) {
49319
- const isOpenAI = modelHandle.startsWith("openai/") || modelHandle.startsWith(`${OPENAI_CODEX_PROVIDER_NAME}/`);
49320
- const isAnthropic = modelHandle.startsWith("anthropic/") || modelHandle.startsWith("claude-pro-max/") || modelHandle.startsWith("minimax/");
49321
- const isZai = modelHandle.startsWith("zai/");
49322
- const isGoogleAI = modelHandle.startsWith("google_ai/");
49323
- const isGoogleVertex = modelHandle.startsWith("google_vertex/");
49324
- const isOpenRouter = modelHandle.startsWith("openrouter/");
49325
- const isBedrock = modelHandle.startsWith("bedrock/");
49326
- let settings;
49327
- if (isOpenAI || isOpenRouter) {
49328
- const openaiSettings = {
49329
- provider_type: "openai",
49330
- parallel_tool_calls: true
49331
- };
49332
- if (updateArgs?.reasoning_effort) {
49333
- openaiSettings.reasoning = {
49334
- reasoning_effort: updateArgs.reasoning_effort
49335
- };
49336
- }
49337
- const verbosity = updateArgs?.verbosity;
49338
- if (verbosity === "low" || verbosity === "medium" || verbosity === "high") {
49339
- openaiSettings.verbosity = verbosity;
49340
- }
49341
- if (typeof updateArgs?.strict === "boolean") {
49342
- openaiSettings.strict = updateArgs.strict;
49343
- }
49344
- settings = openaiSettings;
49345
- } else if (isAnthropic) {
49346
- const anthropicSettings = {
49347
- provider_type: "anthropic",
49348
- parallel_tool_calls: true
49349
- };
49350
- const effort = updateArgs?.reasoning_effort;
49351
- if (effort === "low" || effort === "medium" || effort === "high") {
49352
- anthropicSettings.effort = effort;
49353
- } else if (effort === "xhigh") {
49354
- anthropicSettings.effort = "max";
49355
- }
49356
- if (updateArgs?.enable_reasoner !== undefined || typeof updateArgs?.max_reasoning_tokens === "number") {
49357
- anthropicSettings.thinking = {
49358
- type: updateArgs?.enable_reasoner === false ? "disabled" : "enabled",
49359
- ...typeof updateArgs?.max_reasoning_tokens === "number" && {
49360
- budget_tokens: updateArgs.max_reasoning_tokens
49361
- }
49362
- };
49363
- }
49364
- if (typeof updateArgs?.strict === "boolean") {
49365
- anthropicSettings.strict = updateArgs.strict;
49366
- }
49367
- settings = anthropicSettings;
49368
- } else if (isZai) {
49369
- settings = {
49370
- provider_type: "zai",
49371
- parallel_tool_calls: true
49372
- };
49373
- } else if (isGoogleAI) {
49374
- const googleSettings = {
49375
- provider_type: "google_ai",
49376
- parallel_tool_calls: true
49377
- };
49378
- if (updateArgs?.thinking_budget !== undefined) {
49379
- googleSettings.thinking_config = {
49380
- thinking_budget: updateArgs.thinking_budget
49381
- };
49382
- }
49383
- if (typeof updateArgs?.temperature === "number") {
49384
- googleSettings.temperature = updateArgs.temperature;
49385
- }
49386
- settings = googleSettings;
49387
- } else if (isGoogleVertex) {
49388
- const googleVertexSettings = {
49389
- provider_type: "google_vertex",
49390
- parallel_tool_calls: true
49391
- };
49392
- if (updateArgs?.thinking_budget !== undefined) {
49393
- googleVertexSettings.thinking_config = {
49394
- thinking_budget: updateArgs.thinking_budget
49395
- };
49396
- }
49397
- if (typeof updateArgs?.temperature === "number") {
49398
- googleVertexSettings.temperature = updateArgs.temperature;
49399
- }
49400
- settings = googleVertexSettings;
49401
- } else if (isBedrock) {
49402
- const bedrockSettings = {
49403
- provider_type: "bedrock",
49404
- parallel_tool_calls: true
49405
- };
49406
- const effort = updateArgs?.reasoning_effort;
49407
- if (effort === "low" || effort === "medium" || effort === "high") {
49408
- bedrockSettings.effort = effort;
49409
- } else if (effort === "xhigh") {
49410
- bedrockSettings.effort = "max";
49411
- }
49412
- if (updateArgs?.enable_reasoner !== undefined || typeof updateArgs?.max_reasoning_tokens === "number") {
49413
- bedrockSettings.thinking = {
49414
- type: updateArgs?.enable_reasoner === false ? "disabled" : "enabled",
49415
- ...typeof updateArgs?.max_reasoning_tokens === "number" && {
49416
- budget_tokens: updateArgs.max_reasoning_tokens
49417
- }
49418
- };
49419
- }
49420
- settings = bedrockSettings;
49421
- } else {
49422
- const openaiProxySettings = {
49423
- provider_type: "openai",
49424
- parallel_tool_calls: typeof updateArgs?.parallel_tool_calls === "boolean" ? updateArgs.parallel_tool_calls : true
49425
- };
49426
- if (typeof updateArgs?.strict === "boolean") {
49427
- openaiProxySettings.strict = updateArgs.strict;
49428
- }
49429
- settings = openaiProxySettings;
49430
- }
49431
- if (typeof updateArgs?.max_output_tokens === "number" && "provider_type" in settings) {
49432
- settings.max_output_tokens = updateArgs.max_output_tokens;
49433
- }
49434
- return settings;
49435
- }
49436
- async function updateAgentLLMConfig2(agentId, modelHandle, updateArgs, options) {
49437
- const client = await getClient();
49438
- const modelSettings = buildModelSettings2(modelHandle, updateArgs);
49439
- const explicitContextWindow = updateArgs?.context_window;
49440
- const shouldPreserveContextWindow = options?.preserveContextWindow === true;
49441
- const contextWindow = explicitContextWindow ?? (!shouldPreserveContextWindow ? await getModelContextWindow(modelHandle) : undefined);
49442
- const hasModelSettings = Object.keys(modelSettings).length > 0;
49443
- await client.agents.update(agentId, {
49444
- model: modelHandle,
49445
- ...hasModelSettings && { model_settings: modelSettings },
49446
- ...contextWindow && { context_window_limit: contextWindow },
49447
- ...(typeof updateArgs?.max_output_tokens === "number" || updateArgs?.max_output_tokens === null) && {
49448
- max_tokens: updateArgs.max_output_tokens
49449
- }
49450
- });
49451
- const finalAgent = await client.agents.retrieve(agentId);
49452
- return finalAgent;
49453
- }
49454
- async function updateConversationLLMConfig(conversationId, modelHandle, updateArgs) {
49455
- const client = await getClient();
49456
- const modelSettings = buildModelSettings2(modelHandle, updateArgs);
49457
- const hasModelSettings = Object.keys(modelSettings).length > 0;
49458
- const payload = {
49459
- model: modelHandle,
49460
- ...hasModelSettings && { model_settings: modelSettings }
49461
- };
49462
- return client.conversations.update(conversationId, payload);
49463
- }
49464
- async function recompileAgentSystemPrompt(conversationId, agentId, dryRun, clientOverride) {
49465
- const client = clientOverride ?? await getClient();
49466
- if (!agentId) {
49467
- throw new Error("recompileAgentSystemPrompt requires agentId");
49468
- }
49469
- const params = {
49470
- dry_run: dryRun,
49471
- agent_id: agentId
49472
- };
49473
- return client.conversations.recompile(conversationId, params);
49474
- }
49475
- async function updateAgentSystemPromptRaw2(agentId, systemPromptContent) {
49476
- try {
49477
- const client = await getClient();
49478
- await client.agents.update(agentId, {
49479
- system: systemPromptContent
49480
- });
49481
- return {
49482
- success: true,
49483
- message: "System prompt updated successfully"
49484
- };
49485
- } catch (error) {
49486
- return {
49487
- success: false,
49488
- message: `Failed to update system prompt: ${error instanceof Error ? error.message : String(error)}`
49489
- };
49490
- }
49491
- }
49492
- async function updateAgentSystemPrompt2(agentId, systemPromptId) {
49493
- try {
49494
- const { isKnownPreset: isKnownPreset2, resolveAndBuildSystemPrompt: resolveAndBuildSystemPrompt2 } = await Promise.resolve().then(() => (init_promptAssets(), exports_promptAssets));
49495
- const { settingsManager: settingsManager2 } = await init_settings_manager().then(() => exports_settings_manager);
49496
- const client = await getClient();
49497
- const memoryMode = settingsManager2.isReady && settingsManager2.isMemfsEnabled(agentId) ? "memfs" : "standard";
49498
- const systemPromptContent = await resolveAndBuildSystemPrompt2(systemPromptId, memoryMode);
49499
- debugLog("modify", "systemPromptContent: %s", systemPromptContent);
49500
- const updateResult = await updateAgentSystemPromptRaw2(agentId, systemPromptContent);
49501
- if (!updateResult.success) {
49502
- return {
49503
- success: false,
49504
- message: updateResult.message,
49505
- agent: null
49506
- };
49507
- }
49508
- if (settingsManager2.isReady) {
49509
- if (isKnownPreset2(systemPromptId)) {
49510
- settingsManager2.setSystemPromptPreset(agentId, systemPromptId);
49511
- } else {
49512
- settingsManager2.clearSystemPromptPreset(agentId);
49513
- }
49514
- }
49515
- const agent = await client.agents.retrieve(agentId);
49516
- return {
49517
- success: true,
49518
- message: "System prompt applied successfully",
49519
- agent
49520
- };
49521
- } catch (error) {
49522
- return {
49523
- success: false,
49524
- message: `Failed to apply system prompt: ${error instanceof Error ? error.message : String(error)}`,
49525
- agent: null
49526
- };
49527
- }
49528
- }
49529
- async function updateAgentSystemPromptMemfs(agentId, enableMemfs) {
49530
- try {
49531
- const { settingsManager: settingsManager2 } = await init_settings_manager().then(() => exports_settings_manager);
49532
- const { isKnownPreset: isKnownPreset2, buildSystemPrompt: buildSystemPrompt2, swapMemoryAddon: swapMemoryAddon2 } = await Promise.resolve().then(() => (init_promptAssets(), exports_promptAssets));
49533
- const newMode = enableMemfs ? "memfs" : "standard";
49534
- const storedPreset = settingsManager2.isReady ? settingsManager2.getSystemPromptPreset(agentId) : undefined;
49535
- let nextSystemPrompt;
49536
- if (storedPreset && isKnownPreset2(storedPreset)) {
49537
- nextSystemPrompt = buildSystemPrompt2(storedPreset, newMode);
49538
- } else {
49539
- const client2 = await getClient();
49540
- const agent = await client2.agents.retrieve(agentId);
49541
- nextSystemPrompt = swapMemoryAddon2(agent.system || "", newMode);
49542
- }
49543
- const client = await getClient();
49544
- await client.agents.update(agentId, {
49545
- system: nextSystemPrompt
49546
- });
49547
- return {
49548
- success: true,
49549
- message: enableMemfs ? "System prompt updated to include Memory Filesystem section" : "System prompt updated to include standard Memory section"
49550
- };
49551
- } catch (error) {
49552
- return {
49553
- success: false,
49554
- message: `Failed to update system prompt memfs: ${error instanceof Error ? error.message : String(error)}`
49555
- };
49556
- }
49557
- }
49558
- var init_modify = __esm(async () => {
49559
- init_debug();
49560
- await __promiseAll([
49561
- init_openai_codex_provider(),
49562
- init_available_models(),
49563
- init_client2()
49564
- ]);
49565
- });
49566
-
49567
49619
  // src/tools/filter.ts
49568
49620
  var exports_filter = {};
49569
49621
  __export(exports_filter, {
@@ -74900,12 +74952,22 @@ var init_catalog = __esm(() => {
74900
74952
  {
74901
74953
  id: "reflection-step-count",
74902
74954
  description: "Step-count reflection trigger handling",
74903
- modes: ["interactive", "headless-one-shot", "headless-bidirectional"]
74955
+ modes: [
74956
+ "interactive",
74957
+ "headless-one-shot",
74958
+ "headless-bidirectional",
74959
+ "listen"
74960
+ ]
74904
74961
  },
74905
74962
  {
74906
74963
  id: "reflection-compaction",
74907
74964
  description: "Compaction-triggered reflection trigger handling",
74908
- modes: ["interactive", "headless-one-shot", "headless-bidirectional"]
74965
+ modes: [
74966
+ "interactive",
74967
+ "headless-one-shot",
74968
+ "headless-bidirectional",
74969
+ "listen"
74970
+ ]
74909
74971
  },
74910
74972
  {
74911
74973
  id: "command-io",
@@ -80370,6 +80432,25 @@ var init_scheduler = __esm(async () => {
80370
80432
  GC_INTERVAL_MS = 60 * 60000;
80371
80433
  });
80372
80434
 
80435
+ // src/tools/toolset-labels.ts
80436
+ function formatToolsetName(id) {
80437
+ if (!id)
80438
+ return "Unknown";
80439
+ return TOOLSET_DISPLAY_NAMES[id] ?? id;
80440
+ }
80441
+ var TOOLSET_DISPLAY_NAMES;
80442
+ var init_toolset_labels = __esm(() => {
80443
+ TOOLSET_DISPLAY_NAMES = {
80444
+ default: "Claude",
80445
+ codex: "Codex",
80446
+ codex_snake: "Codex (snake_case)",
80447
+ gemini: "Gemini",
80448
+ gemini_snake: "Gemini (snake_case)",
80449
+ none: "None",
80450
+ auto: "Auto"
80451
+ };
80452
+ });
80453
+
80373
80454
  // src/websocket/terminalHandler.ts
80374
80455
  import * as os4 from "node:os";
80375
80456
  import WebSocket3 from "ws";
@@ -80742,6 +80823,25 @@ function isEnableMemfsCommand(value) {
80742
80823
  const c = value;
80743
80824
  return c.type === "enable_memfs" && typeof c.request_id === "string" && typeof c.agent_id === "string";
80744
80825
  }
80826
+ function isListModelsCommand(value) {
80827
+ if (!value || typeof value !== "object")
80828
+ return false;
80829
+ const c = value;
80830
+ return c.type === "list_models" && typeof c.request_id === "string";
80831
+ }
80832
+ function isUpdateModelCommand(value) {
80833
+ if (!value || typeof value !== "object")
80834
+ return false;
80835
+ const c = value;
80836
+ if (c.type !== "update_model" || typeof c.request_id !== "string" || !isRuntimeScope(c.runtime) || !c.payload || typeof c.payload !== "object") {
80837
+ return false;
80838
+ }
80839
+ const payload = c.payload;
80840
+ const hasModelId = payload.model_id === undefined || typeof payload.model_id === "string";
80841
+ const hasModelHandle = payload.model_handle === undefined || typeof payload.model_handle === "string";
80842
+ const hasAtLeastOne = typeof payload.model_id === "string" || typeof payload.model_handle === "string";
80843
+ return hasModelId && hasModelHandle && hasAtLeastOne;
80844
+ }
80745
80845
  function isCronListCommand(value) {
80746
80846
  if (!value || typeof value !== "object")
80747
80847
  return false;
@@ -80810,7 +80910,7 @@ function parseServerMessage(data) {
80810
80910
  try {
80811
80911
  const raw = typeof data === "string" ? data : data.toString();
80812
80912
  const parsed = JSON.parse(raw);
80813
- if (isInputCommand(parsed) || isChangeDeviceStateCommand(parsed) || isAbortMessageCommand(parsed) || isSyncCommand(parsed) || isTerminalSpawnCommand(parsed) || isTerminalInputCommand(parsed) || isTerminalResizeCommand(parsed) || isTerminalKillCommand(parsed) || isSearchFilesCommand(parsed) || isListInDirectoryCommand(parsed) || isReadFileCommand(parsed) || isEditFileCommand(parsed) || isListMemoryCommand(parsed) || isEnableMemfsCommand(parsed) || isCronListCommand(parsed) || isCronAddCommand(parsed) || isCronGetCommand(parsed) || isCronDeleteCommand(parsed) || isCronDeleteAllCommand(parsed) || isSkillEnableCommand(parsed) || isSkillDisableCommand(parsed) || isGetReflectionSettingsCommand(parsed) || isSetReflectionSettingsCommand(parsed) || isExecuteCommandCommand(parsed)) {
80913
+ if (isInputCommand(parsed) || isChangeDeviceStateCommand(parsed) || isAbortMessageCommand(parsed) || isSyncCommand(parsed) || isTerminalSpawnCommand(parsed) || isTerminalInputCommand(parsed) || isTerminalResizeCommand(parsed) || isTerminalKillCommand(parsed) || isSearchFilesCommand(parsed) || isListInDirectoryCommand(parsed) || isReadFileCommand(parsed) || isEditFileCommand(parsed) || isListMemoryCommand(parsed) || isEnableMemfsCommand(parsed) || isListModelsCommand(parsed) || isUpdateModelCommand(parsed) || isCronListCommand(parsed) || isCronAddCommand(parsed) || isCronGetCommand(parsed) || isCronDeleteCommand(parsed) || isCronDeleteAllCommand(parsed) || isSkillEnableCommand(parsed) || isSkillDisableCommand(parsed) || isGetReflectionSettingsCommand(parsed) || isSetReflectionSettingsCommand(parsed) || isExecuteCommandCommand(parsed)) {
80814
80914
  return parsed;
80815
80915
  }
80816
80916
  const invalidInput = getInvalidInputReason(parsed);
@@ -80956,6 +81056,188 @@ function handleModeChange(msg, socket, runtime, scope) {
80956
81056
  }
80957
81057
  }
80958
81058
  }
81059
+ function resolveModelForUpdate(payload) {
81060
+ if (typeof payload.model_id === "string" && payload.model_id.length > 0) {
81061
+ const byId = getModelInfo2(payload.model_id);
81062
+ if (byId) {
81063
+ const explicitHandle = typeof payload.model_handle === "string" && payload.model_handle.length > 0 ? payload.model_handle : null;
81064
+ return {
81065
+ id: byId.id,
81066
+ handle: explicitHandle ?? byId.handle,
81067
+ label: byId.label,
81068
+ updateArgs: byId.updateArgs && typeof byId.updateArgs === "object" ? { ...byId.updateArgs } : undefined
81069
+ };
81070
+ }
81071
+ }
81072
+ if (typeof payload.model_handle === "string" && payload.model_handle.length > 0) {
81073
+ const exactByHandle = models2.find((m) => m.handle === payload.model_handle);
81074
+ if (exactByHandle) {
81075
+ return {
81076
+ id: exactByHandle.id,
81077
+ handle: exactByHandle.handle,
81078
+ label: exactByHandle.label,
81079
+ updateArgs: exactByHandle.updateArgs && typeof exactByHandle.updateArgs === "object" ? { ...exactByHandle.updateArgs } : undefined
81080
+ };
81081
+ }
81082
+ return {
81083
+ id: payload.model_handle,
81084
+ handle: payload.model_handle,
81085
+ label: payload.model_handle,
81086
+ updateArgs: undefined
81087
+ };
81088
+ }
81089
+ return null;
81090
+ }
81091
+ function formatToolsetStatusMessageForModelUpdate(params) {
81092
+ const { nextToolset, toolsetPreference } = params;
81093
+ if (toolsetPreference === "auto") {
81094
+ return "Toolset auto-switched for this model: now using the " + formatToolsetName(nextToolset) + " toolset.";
81095
+ }
81096
+ return "Manual toolset override remains active: " + formatToolsetName(toolsetPreference) + ".";
81097
+ }
81098
+ function formatEffortSuffix(updateArgs) {
81099
+ if (!updateArgs)
81100
+ return "";
81101
+ const effort = updateArgs.reasoning_effort;
81102
+ if (typeof effort !== "string" || effort.length === 0)
81103
+ return "";
81104
+ const labels = {
81105
+ none: "No Reasoning",
81106
+ low: "Low",
81107
+ medium: "Medium",
81108
+ high: "High",
81109
+ xhigh: "Max"
81110
+ };
81111
+ return ` (${labels[effort] ?? effort})`;
81112
+ }
81113
+ function buildModelUpdateStatusMessage(params) {
81114
+ const {
81115
+ modelLabel,
81116
+ toolsetChanged,
81117
+ toolsetError,
81118
+ nextToolset,
81119
+ toolsetPreference,
81120
+ updateArgs
81121
+ } = params;
81122
+ let message = `Model updated to ${modelLabel}${formatEffortSuffix(updateArgs)}.`;
81123
+ if (toolsetError) {
81124
+ message += ` Warning: toolset switch failed (${toolsetError}).`;
81125
+ return { message, level: "warning" };
81126
+ }
81127
+ if (toolsetChanged) {
81128
+ message += ` ${formatToolsetStatusMessageForModelUpdate({
81129
+ nextToolset,
81130
+ toolsetPreference
81131
+ })}`;
81132
+ }
81133
+ return { message, level: "info" };
81134
+ }
81135
+ async function applyModelUpdateForRuntime(params) {
81136
+ const { socket, listener, scopedRuntime, requestId, model } = params;
81137
+ const agentId = scopedRuntime.agentId;
81138
+ const conversationId = scopedRuntime.conversationId;
81139
+ if (!agentId) {
81140
+ return {
81141
+ type: "update_model_response",
81142
+ request_id: requestId,
81143
+ success: false,
81144
+ error: "Missing agent_id in runtime scope"
81145
+ };
81146
+ }
81147
+ const isDefaultConversation = conversationId === "default";
81148
+ const updateArgs = {
81149
+ ...model.updateArgs ?? {},
81150
+ parallel_tool_calls: true
81151
+ };
81152
+ let modelSettings = null;
81153
+ let appliedTo;
81154
+ if (isDefaultConversation) {
81155
+ const updatedAgent = await updateAgentLLMConfig2(agentId, model.handle, updateArgs);
81156
+ modelSettings = updatedAgent.model_settings ?? null;
81157
+ appliedTo = "agent";
81158
+ } else {
81159
+ const updatedConversation = await updateConversationLLMConfig(conversationId, model.handle, updateArgs);
81160
+ modelSettings = updatedConversation.model_settings ?? null;
81161
+ appliedTo = "conversation";
81162
+ }
81163
+ const toolsetPreference = settingsManager.getToolsetPreference(agentId);
81164
+ const previousToolNames = getToolNames();
81165
+ let nextToolset;
81166
+ let toolsetError = null;
81167
+ try {
81168
+ if (toolsetPreference === "auto") {
81169
+ nextToolset = await switchToolsetForModel(model.handle, agentId);
81170
+ } else {
81171
+ await forceToolsetSwitch(toolsetPreference, agentId);
81172
+ nextToolset = toolsetPreference;
81173
+ }
81174
+ } catch (error) {
81175
+ nextToolset = toolsetPreference === "auto" ? "default" : toolsetPreference;
81176
+ toolsetError = error instanceof Error ? error.message : "Failed to switch toolset";
81177
+ }
81178
+ const toolsetChanged = !toolsetError && JSON.stringify(previousToolNames) !== JSON.stringify(getToolNames());
81179
+ const { message: statusMessage, level: statusLevel } = buildModelUpdateStatusMessage({
81180
+ modelLabel: model.label,
81181
+ toolsetChanged,
81182
+ toolsetError,
81183
+ nextToolset,
81184
+ toolsetPreference,
81185
+ updateArgs: model.updateArgs
81186
+ });
81187
+ emitStatusDelta(socket, scopedRuntime, {
81188
+ message: statusMessage,
81189
+ level: statusLevel,
81190
+ agentId,
81191
+ conversationId
81192
+ });
81193
+ emitRuntimeStateUpdates(listener, {
81194
+ agent_id: agentId,
81195
+ conversation_id: conversationId
81196
+ });
81197
+ return {
81198
+ type: "update_model_response",
81199
+ request_id: requestId,
81200
+ success: true,
81201
+ runtime: {
81202
+ agent_id: agentId,
81203
+ conversation_id: conversationId
81204
+ },
81205
+ applied_to: appliedTo,
81206
+ model_id: model.id,
81207
+ model_handle: model.handle,
81208
+ model_settings: modelSettings
81209
+ };
81210
+ }
81211
+ function buildListModelsEntries() {
81212
+ return models2.map((model) => ({
81213
+ id: model.id,
81214
+ handle: model.handle,
81215
+ label: model.label,
81216
+ description: model.description,
81217
+ ...typeof model.isDefault === "boolean" ? { isDefault: model.isDefault } : {},
81218
+ ...typeof model.isFeatured === "boolean" ? { isFeatured: model.isFeatured } : {},
81219
+ ...typeof model.free === "boolean" ? { free: model.free } : {},
81220
+ ...model.updateArgs && typeof model.updateArgs === "object" ? { updateArgs: model.updateArgs } : {}
81221
+ }));
81222
+ }
81223
+ async function buildListModelsResponse(requestId) {
81224
+ const entries = buildListModelsEntries();
81225
+ const [handlesResult, providersResult] = await Promise.allSettled([
81226
+ getAvailableModelHandles(),
81227
+ listProviders2()
81228
+ ]);
81229
+ const availableHandles = handlesResult.status === "fulfilled" ? [...handlesResult.value.handles] : null;
81230
+ const providers = providersResult.status === "fulfilled" ? providersResult.value : [];
81231
+ const byokProviderAliases = buildByokProviderAliases(providers);
81232
+ return {
81233
+ type: "list_models_response",
81234
+ request_id: requestId,
81235
+ success: true,
81236
+ entries,
81237
+ available_handles: availableHandles,
81238
+ byok_provider_aliases: byokProviderAliases
81239
+ };
81240
+ }
80959
81241
  function emitCronsUpdated(socket, scope) {
80960
81242
  socket.send(JSON.stringify({
80961
81243
  type: "crons_updated",
@@ -82163,6 +82445,64 @@ async function connectWithRetry(runtime, opts, attempt = 0, startTime = Date.now
82163
82445
  })();
82164
82446
  return;
82165
82447
  }
82448
+ if (isListModelsCommand(parsed)) {
82449
+ (async () => {
82450
+ try {
82451
+ const response = await buildListModelsResponse(parsed.request_id);
82452
+ socket.send(JSON.stringify(response));
82453
+ } catch (error) {
82454
+ socket.send(JSON.stringify({
82455
+ type: "list_models_response",
82456
+ request_id: parsed.request_id,
82457
+ success: false,
82458
+ entries: [],
82459
+ error: error instanceof Error ? error.message : "Failed to list models"
82460
+ }));
82461
+ }
82462
+ })();
82463
+ return;
82464
+ }
82465
+ if (isUpdateModelCommand(parsed)) {
82466
+ (async () => {
82467
+ const scopedRuntime = getOrCreateScopedRuntime(runtime, parsed.runtime.agent_id, parsed.runtime.conversation_id);
82468
+ const resolvedModel = resolveModelForUpdate(parsed.payload);
82469
+ if (!resolvedModel) {
82470
+ const failure = {
82471
+ type: "update_model_response",
82472
+ request_id: parsed.request_id,
82473
+ success: false,
82474
+ error: "Model not found. Provide a valid model_id from list_models or a model_handle."
82475
+ };
82476
+ socket.send(JSON.stringify(failure));
82477
+ return;
82478
+ }
82479
+ try {
82480
+ const response = await applyModelUpdateForRuntime({
82481
+ socket,
82482
+ listener: runtime,
82483
+ scopedRuntime,
82484
+ requestId: parsed.request_id,
82485
+ model: resolvedModel
82486
+ });
82487
+ socket.send(JSON.stringify(response));
82488
+ } catch (error) {
82489
+ const failure = {
82490
+ type: "update_model_response",
82491
+ request_id: parsed.request_id,
82492
+ success: false,
82493
+ runtime: {
82494
+ agent_id: parsed.runtime.agent_id,
82495
+ conversation_id: parsed.runtime.conversation_id
82496
+ },
82497
+ model_id: resolvedModel.id,
82498
+ model_handle: resolvedModel.handle,
82499
+ error: error instanceof Error ? error.message : "Failed to update model"
82500
+ };
82501
+ socket.send(JSON.stringify(failure));
82502
+ }
82503
+ })();
82504
+ return;
82505
+ }
82166
82506
  if (isCronListCommand(parsed) || isCronAddCommand(parsed) || isCronGetCommand(parsed) || isCronDeleteCommand(parsed) || isCronDeleteAllCommand(parsed)) {
82167
82507
  handleCronCommand(parsed, socket);
82168
82508
  return;
@@ -82467,6 +82807,7 @@ var init_client4 = __esm(async () => {
82467
82807
  init_constants();
82468
82808
  init_cron();
82469
82809
  init_queueRuntime();
82810
+ init_toolset_labels();
82470
82811
  init_debug();
82471
82812
  init_terminalHandler();
82472
82813
  init_constants2();
@@ -82474,13 +82815,18 @@ var init_client4 = __esm(async () => {
82474
82815
  init_permissionMode();
82475
82816
  init_runtime();
82476
82817
  await __promiseAll([
82818
+ init_available_models(),
82477
82819
  init_client2(),
82820
+ init_model2(),
82821
+ init_modify(),
82478
82822
  init_memoryReminder(),
82479
82823
  init_scheduler(),
82824
+ init_byok_providers(),
82480
82825
  init_settings_manager(),
82481
82826
  init_telemetry(),
82482
82827
  init_errorReporting(),
82483
82828
  init_manager3(),
82829
+ init_toolset(),
82484
82830
  init_approval(),
82485
82831
  init_commands(),
82486
82832
  init_interrupts(),
@@ -82498,6 +82844,11 @@ var init_client4 = __esm(async () => {
82498
82844
  createRuntime: createLegacyTestRuntime,
82499
82845
  createListenerRuntime: createRuntime,
82500
82846
  getOrCreateScopedRuntime,
82847
+ buildListModelsEntries,
82848
+ buildListModelsResponse,
82849
+ buildModelUpdateStatusMessage,
82850
+ resolveModelForUpdate,
82851
+ applyModelUpdateForRuntime,
82501
82852
  stopRuntime: (runtime, suppressCallbacks) => stopRuntime(asListenerRuntimeForTests(runtime), suppressCallbacks),
82502
82853
  setActiveRuntime,
82503
82854
  getListenerStatus,
@@ -88153,25 +88504,6 @@ var init_settings = __esm(() => {
88153
88504
  };
88154
88505
  });
88155
88506
 
88156
- // src/tools/toolset-labels.ts
88157
- function formatToolsetName(id) {
88158
- if (!id)
88159
- return "Unknown";
88160
- return TOOLSET_DISPLAY_NAMES[id] ?? id;
88161
- }
88162
- var TOOLSET_DISPLAY_NAMES;
88163
- var init_toolset_labels = __esm(() => {
88164
- TOOLSET_DISPLAY_NAMES = {
88165
- default: "Claude",
88166
- codex: "Codex",
88167
- codex_snake: "Codex (snake_case)",
88168
- gemini: "Gemini",
88169
- gemini_snake: "Gemini (snake_case)",
88170
- none: "None",
88171
- auto: "Auto"
88172
- };
88173
- });
88174
-
88175
88507
  // src/cli/commands/mcp.ts
88176
88508
  function uid(prefix) {
88177
88509
  return `${prefix}-${Date.now().toString(36)}-${Math.random().toString(36).slice(2, 8)}`;
@@ -122673,32 +123005,6 @@ var init_ModelReasoningSelector = __esm(async () => {
122673
123005
  });
122674
123006
 
122675
123007
  // src/cli/components/ModelSelector.tsx
122676
- function buildByokProviderAliases(providers) {
122677
- const aliases = {
122678
- "lc-anthropic": "anthropic",
122679
- "lc-openai": "openai",
122680
- "lc-zai": "zai",
122681
- "lc-gemini": "google_ai",
122682
- "chatgpt-plus-pro": "chatgpt-plus-pro"
122683
- };
122684
- for (const provider of providers) {
122685
- const baseProvider = PROVIDER_TYPE_TO_BASE_PROVIDER[provider.provider_type];
122686
- if (baseProvider) {
122687
- aliases[provider.name] = baseProvider;
122688
- }
122689
- }
122690
- return aliases;
122691
- }
122692
- function isByokHandleForSelector(handle, byokProviderAliases) {
122693
- if (STATIC_BYOK_PROVIDER_PREFIXES.some((prefix) => handle.startsWith(prefix))) {
122694
- return true;
122695
- }
122696
- const slashIndex = handle.indexOf("/");
122697
- if (slashIndex === -1)
122698
- return false;
122699
- const provider = handle.slice(0, slashIndex);
122700
- return provider in byokProviderAliases;
122701
- }
122702
123008
  function getModelCategories(_billingTier, isSelfHosted) {
122703
123009
  if (isSelfHosted) {
122704
123010
  return ["server-recommended", "server-all"];
@@ -123278,7 +123584,7 @@ function ModelSelector({
123278
123584
  ]
123279
123585
  }, undefined, true, undefined, this);
123280
123586
  }
123281
- var import_react82, jsx_dev_runtime59, SOLID_LINE23 = "─", VISIBLE_ITEMS2 = 8, STATIC_BYOK_PROVIDER_PREFIXES, PROVIDER_TYPE_TO_BASE_PROVIDER, API_GATED_MODEL_HANDLES;
123587
+ var import_react82, jsx_dev_runtime59, SOLID_LINE23 = "─", VISIBLE_ITEMS2 = 8, API_GATED_MODEL_HANDLES;
123282
123588
  var init_ModelSelector = __esm(async () => {
123283
123589
  init_useTerminalWidth();
123284
123590
  init_colors();
@@ -123291,18 +123597,6 @@ var init_ModelSelector = __esm(async () => {
123291
123597
  ]);
123292
123598
  import_react82 = __toESM(require_react(), 1);
123293
123599
  jsx_dev_runtime59 = __toESM(require_jsx_dev_runtime(), 1);
123294
- STATIC_BYOK_PROVIDER_PREFIXES = ["chatgpt-plus-pro/", "lc-"];
123295
- PROVIDER_TYPE_TO_BASE_PROVIDER = {
123296
- chatgpt_oauth: "chatgpt-plus-pro",
123297
- anthropic: "anthropic",
123298
- openai: "openai",
123299
- zai: "zai",
123300
- google_ai: "google_ai",
123301
- google_vertex: "google_vertex",
123302
- minimax: "minimax",
123303
- openrouter: "openrouter",
123304
- bedrock: "bedrock"
123305
- };
123306
123600
  API_GATED_MODEL_HANDLES = new Set(["letta/auto", "letta/auto-fast"]);
123307
123601
  });
123308
123602
 
@@ -148953,4 +149247,4 @@ Error during initialization: ${message}`);
148953
149247
  }
148954
149248
  main();
148955
149249
 
148956
- //# debugId=A078D4F31EFE26FF64756E2164756E21
149250
+ //# debugId=77E981F713C7F78064756E2164756E21
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@letta-ai/letta-code",
3
- "version": "0.21.2",
3
+ "version": "0.21.3",
4
4
  "description": "Letta Code is a CLI tool for interacting with stateful Letta agents from the terminal.",
5
5
  "type": "module",
6
6
  "bin": {