letta-nightly 0.7.7.dev20250430205840__py3-none-any.whl → 0.7.8.dev20250501104226__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (54) hide show
  1. letta/__init__.py +1 -1
  2. letta/agent.py +8 -12
  3. letta/agents/exceptions.py +6 -0
  4. letta/agents/letta_agent.py +48 -35
  5. letta/agents/letta_agent_batch.py +6 -2
  6. letta/agents/voice_agent.py +10 -7
  7. letta/constants.py +5 -1
  8. letta/functions/composio_helpers.py +100 -0
  9. letta/functions/functions.py +4 -2
  10. letta/functions/helpers.py +19 -99
  11. letta/groups/helpers.py +1 -0
  12. letta/groups/sleeptime_multi_agent.py +5 -1
  13. letta/helpers/message_helper.py +21 -4
  14. letta/helpers/tool_execution_helper.py +1 -1
  15. letta/interfaces/anthropic_streaming_interface.py +165 -158
  16. letta/interfaces/openai_chat_completions_streaming_interface.py +1 -1
  17. letta/llm_api/anthropic.py +15 -10
  18. letta/llm_api/anthropic_client.py +5 -1
  19. letta/llm_api/google_vertex_client.py +1 -1
  20. letta/llm_api/llm_api_tools.py +7 -0
  21. letta/llm_api/llm_client.py +12 -2
  22. letta/llm_api/llm_client_base.py +4 -0
  23. letta/llm_api/openai.py +9 -3
  24. letta/llm_api/openai_client.py +18 -4
  25. letta/memory.py +3 -1
  26. letta/orm/group.py +2 -0
  27. letta/orm/provider.py +10 -0
  28. letta/schemas/agent.py +0 -1
  29. letta/schemas/enums.py +11 -0
  30. letta/schemas/group.py +24 -0
  31. letta/schemas/llm_config.py +1 -0
  32. letta/schemas/llm_config_overrides.py +2 -2
  33. letta/schemas/providers.py +75 -20
  34. letta/schemas/tool.py +3 -8
  35. letta/server/rest_api/app.py +12 -0
  36. letta/server/rest_api/chat_completions_interface.py +1 -1
  37. letta/server/rest_api/interface.py +8 -10
  38. letta/server/rest_api/{optimistic_json_parser.py → json_parser.py} +62 -26
  39. letta/server/rest_api/routers/v1/agents.py +1 -1
  40. letta/server/rest_api/routers/v1/llms.py +4 -3
  41. letta/server/rest_api/routers/v1/providers.py +4 -1
  42. letta/server/rest_api/routers/v1/voice.py +0 -2
  43. letta/server/rest_api/utils.py +8 -19
  44. letta/server/server.py +25 -11
  45. letta/services/group_manager.py +58 -0
  46. letta/services/provider_manager.py +25 -14
  47. letta/services/summarizer/summarizer.py +15 -7
  48. letta/services/tool_executor/tool_execution_manager.py +1 -1
  49. letta/services/tool_executor/tool_executor.py +3 -3
  50. {letta_nightly-0.7.7.dev20250430205840.dist-info → letta_nightly-0.7.8.dev20250501104226.dist-info}/METADATA +4 -5
  51. {letta_nightly-0.7.7.dev20250430205840.dist-info → letta_nightly-0.7.8.dev20250501104226.dist-info}/RECORD +54 -52
  52. {letta_nightly-0.7.7.dev20250430205840.dist-info → letta_nightly-0.7.8.dev20250501104226.dist-info}/LICENSE +0 -0
  53. {letta_nightly-0.7.7.dev20250430205840.dist-info → letta_nightly-0.7.8.dev20250501104226.dist-info}/WHEEL +0 -0
  54. {letta_nightly-0.7.7.dev20250430205840.dist-info → letta_nightly-0.7.8.dev20250501104226.dist-info}/entry_points.txt +0 -0
letta/__init__.py CHANGED
@@ -1,4 +1,4 @@
1
- __version__ = "0.7.7"
1
+ __version__ = "0.7.8"
2
2
 
3
3
  # import clients
4
4
  from letta.client.client import LocalClient, RESTClient, create_client
letta/agent.py CHANGED
@@ -21,14 +21,14 @@ from letta.constants import (
21
21
  )
22
22
  from letta.errors import ContextWindowExceededError
23
23
  from letta.functions.ast_parsers import coerce_dict_args_by_annotations, get_function_annotations_from_source
24
+ from letta.functions.composio_helpers import execute_composio_action, generate_composio_action_from_func_name
24
25
  from letta.functions.functions import get_function_from_module
25
- from letta.functions.helpers import execute_composio_action, generate_composio_action_from_func_name
26
26
  from letta.functions.mcp_client.base_client import BaseMCPClient
27
27
  from letta.helpers import ToolRulesSolver
28
28
  from letta.helpers.composio_helpers import get_composio_api_key
29
29
  from letta.helpers.datetime_helpers import get_utc_time
30
30
  from letta.helpers.json_helpers import json_dumps, json_loads
31
- from letta.helpers.message_helper import prepare_input_message_create
31
+ from letta.helpers.message_helper import convert_message_creates_to_messages
32
32
  from letta.interface import AgentInterface
33
33
  from letta.llm_api.helpers import calculate_summarizer_cutoff, get_token_counts_for_messages, is_context_overflow_error
34
34
  from letta.llm_api.llm_api_tools import create
@@ -331,8 +331,10 @@ class Agent(BaseAgent):
331
331
  log_telemetry(self.logger, "_get_ai_reply create start")
332
332
  # New LLM client flow
333
333
  llm_client = LLMClient.create(
334
- provider=self.agent_state.llm_config.model_endpoint_type,
334
+ provider_name=self.agent_state.llm_config.provider_name,
335
+ provider_type=self.agent_state.llm_config.model_endpoint_type,
335
336
  put_inner_thoughts_first=put_inner_thoughts_first,
337
+ actor_id=self.user.id,
336
338
  )
337
339
 
338
340
  if llm_client and not stream:
@@ -726,8 +728,7 @@ class Agent(BaseAgent):
726
728
  self.tool_rules_solver.clear_tool_history()
727
729
 
728
730
  # Convert MessageCreate objects to Message objects
729
- message_objects = [prepare_input_message_create(m, self.agent_state.id, True, True) for m in input_messages]
730
- next_input_messages = message_objects
731
+ next_input_messages = convert_message_creates_to_messages(input_messages, self.agent_state.id)
731
732
  counter = 0
732
733
  total_usage = UsageStatistics()
733
734
  step_count = 0
@@ -942,12 +943,7 @@ class Agent(BaseAgent):
942
943
  model_endpoint=self.agent_state.llm_config.model_endpoint,
943
944
  context_window_limit=self.agent_state.llm_config.context_window,
944
945
  usage=response.usage,
945
- # TODO(@caren): Add full provider support - this line is a workaround for v0 BYOK feature
946
- provider_id=(
947
- self.provider_manager.get_anthropic_override_provider_id()
948
- if self.agent_state.llm_config.model_endpoint_type == "anthropic"
949
- else None
950
- ),
946
+ provider_id=self.provider_manager.get_provider_id_from_name(self.agent_state.llm_config.provider_name),
951
947
  job_id=job_id,
952
948
  )
953
949
  for message in all_new_messages:
@@ -1103,7 +1099,7 @@ class Agent(BaseAgent):
1103
1099
  logger.info(f"Packaged into message: {summary_message}")
1104
1100
 
1105
1101
  prior_len = len(in_context_messages_openai)
1106
- self.agent_state = self.agent_manager.trim_all_in_context_messages_except_system(agent_id=self.agent_state.id, actor=self.user)
1102
+ self.agent_state = self.agent_manager.trim_older_in_context_messages(num=cutoff, agent_id=self.agent_state.id, actor=self.user)
1107
1103
  packed_summary_message = {"role": "user", "content": summary_message}
1108
1104
  # Prepend the summary
1109
1105
  self.agent_state = self.agent_manager.prepend_to_in_context_messages(
@@ -0,0 +1,6 @@
1
+ class IncompatibleAgentType(ValueError):
2
+ def __init__(self, expected_type: str, actual_type: str):
3
+ message = f"Incompatible agent type: expected '{expected_type}', but got '{actual_type}'."
4
+ super().__init__(message)
5
+ self.expected_type = expected_type
6
+ self.actual_type = actual_type
@@ -67,8 +67,10 @@ class LettaAgent(BaseAgent):
67
67
  )
68
68
  tool_rules_solver = ToolRulesSolver(agent_state.tool_rules)
69
69
  llm_client = LLMClient.create(
70
- provider=agent_state.llm_config.model_endpoint_type,
70
+ provider_name=agent_state.llm_config.provider_name,
71
+ provider_type=agent_state.llm_config.model_endpoint_type,
71
72
  put_inner_thoughts_first=True,
73
+ actor_id=self.actor.id,
72
74
  )
73
75
  for step in range(max_steps):
74
76
  response = await self._get_ai_reply(
@@ -109,8 +111,10 @@ class LettaAgent(BaseAgent):
109
111
  )
110
112
  tool_rules_solver = ToolRulesSolver(agent_state.tool_rules)
111
113
  llm_client = LLMClient.create(
112
- llm_config=agent_state.llm_config,
114
+ provider_name=agent_state.llm_config.provider_name,
115
+ provider_type=agent_state.llm_config.model_endpoint_type,
113
116
  put_inner_thoughts_first=True,
117
+ actor_id=self.actor.id,
114
118
  )
115
119
 
116
120
  for step in range(max_steps):
@@ -125,7 +129,7 @@ class LettaAgent(BaseAgent):
125
129
  # TODO: THIS IS INCREDIBLY UGLY
126
130
  # TODO: THERE ARE MULTIPLE COPIES OF THE LLM_CONFIG EVERYWHERE THAT ARE GETTING MANIPULATED
127
131
  interface = AnthropicStreamingInterface(
128
- use_assistant_message=use_assistant_message, put_inner_thoughts_in_kwarg=llm_client.llm_config.put_inner_thoughts_in_kwargs
132
+ use_assistant_message=use_assistant_message, put_inner_thoughts_in_kwarg=agent_state.llm_config.put_inner_thoughts_in_kwargs
129
133
  )
130
134
  async for chunk in interface.process(stream):
131
135
  yield f"data: {chunk.model_dump_json()}\n\n"
@@ -179,6 +183,7 @@ class LettaAgent(BaseAgent):
179
183
  ToolType.LETTA_SLEEPTIME_CORE,
180
184
  }
181
185
  or (t.tool_type == ToolType.LETTA_MULTI_AGENT_CORE and t.name == "send_message_to_agents_matching_tags")
186
+ or (t.tool_type == ToolType.EXTERNAL_COMPOSIO)
182
187
  ]
183
188
 
184
189
  valid_tool_names = tool_rules_solver.get_allowed_tool_names(available_tools=set([t.name for t in tools]))
@@ -274,45 +279,49 @@ class LettaAgent(BaseAgent):
274
279
  return persisted_messages, continue_stepping
275
280
 
276
281
  def _rebuild_memory(self, in_context_messages: List[Message], agent_state: AgentState) -> List[Message]:
277
- self.agent_manager.refresh_memory(agent_state=agent_state, actor=self.actor)
278
-
279
- # TODO: This is a pretty brittle pattern established all over our code, need to get rid of this
280
- curr_system_message = in_context_messages[0]
281
- curr_memory_str = agent_state.memory.compile()
282
- curr_system_message_text = curr_system_message.content[0].text
283
- if curr_memory_str in curr_system_message_text:
284
- # NOTE: could this cause issues if a block is removed? (substring match would still work)
285
- logger.debug(
286
- f"Memory hasn't changed for agent id={agent_state.id} and actor=({self.actor.id}, {self.actor.name}), skipping system prompt rebuild"
287
- )
288
- return in_context_messages
282
+ try:
283
+ self.agent_manager.refresh_memory(agent_state=agent_state, actor=self.actor)
284
+
285
+ # TODO: This is a pretty brittle pattern established all over our code, need to get rid of this
286
+ curr_system_message = in_context_messages[0]
287
+ curr_memory_str = agent_state.memory.compile()
288
+ curr_system_message_text = curr_system_message.content[0].text
289
+ if curr_memory_str in curr_system_message_text:
290
+ # NOTE: could this cause issues if a block is removed? (substring match would still work)
291
+ logger.debug(
292
+ f"Memory hasn't changed for agent id={agent_state.id} and actor=({self.actor.id}, {self.actor.name}), skipping system prompt rebuild"
293
+ )
294
+ return in_context_messages
289
295
 
290
- memory_edit_timestamp = get_utc_time()
296
+ memory_edit_timestamp = get_utc_time()
291
297
 
292
- num_messages = self.message_manager.size(actor=self.actor, agent_id=agent_state.id)
293
- num_archival_memories = self.passage_manager.size(actor=self.actor, agent_id=agent_state.id)
298
+ num_messages = self.message_manager.size(actor=self.actor, agent_id=agent_state.id)
299
+ num_archival_memories = self.passage_manager.size(actor=self.actor, agent_id=agent_state.id)
294
300
 
295
- new_system_message_str = compile_system_message(
296
- system_prompt=agent_state.system,
297
- in_context_memory=agent_state.memory,
298
- in_context_memory_last_edit=memory_edit_timestamp,
299
- previous_message_count=num_messages,
300
- archival_memory_size=num_archival_memories,
301
- )
301
+ new_system_message_str = compile_system_message(
302
+ system_prompt=agent_state.system,
303
+ in_context_memory=agent_state.memory,
304
+ in_context_memory_last_edit=memory_edit_timestamp,
305
+ previous_message_count=num_messages,
306
+ archival_memory_size=num_archival_memories,
307
+ )
302
308
 
303
- diff = united_diff(curr_system_message_text, new_system_message_str)
304
- if len(diff) > 0:
305
- logger.debug(f"Rebuilding system with new memory...\nDiff:\n{diff}")
309
+ diff = united_diff(curr_system_message_text, new_system_message_str)
310
+ if len(diff) > 0:
311
+ logger.debug(f"Rebuilding system with new memory...\nDiff:\n{diff}")
306
312
 
307
- new_system_message = self.message_manager.update_message_by_id(
308
- curr_system_message.id, message_update=MessageUpdate(content=new_system_message_str), actor=self.actor
309
- )
313
+ new_system_message = self.message_manager.update_message_by_id(
314
+ curr_system_message.id, message_update=MessageUpdate(content=new_system_message_str), actor=self.actor
315
+ )
310
316
 
311
- # Skip pulling down the agent's memory again to save on a db call
312
- return [new_system_message] + in_context_messages[1:]
317
+ # Skip pulling down the agent's memory again to save on a db call
318
+ return [new_system_message] + in_context_messages[1:]
313
319
 
314
- else:
315
- return in_context_messages
320
+ else:
321
+ return in_context_messages
322
+ except:
323
+ logger.exception(f"Failed to rebuild memory for agent id={agent_state.id} and actor=({self.actor.id}, {self.actor.name})")
324
+ raise
316
325
 
317
326
  @trace_method
318
327
  async def _execute_tool(self, tool_name: str, tool_args: dict, agent_state: AgentState) -> Tuple[str, bool]:
@@ -331,6 +340,10 @@ class LettaAgent(BaseAgent):
331
340
  results = await self._send_message_to_agents_matching_tags(**tool_args)
332
341
  log_event(name="finish_send_message_to_agents_matching_tags", attributes=tool_args)
333
342
  return json.dumps(results), True
343
+ elif target_tool.type == ToolType.EXTERNAL_COMPOSIO:
344
+ log_event(name=f"start_composio_{tool_name}_execution", attributes=tool_args)
345
+ log_event(name=f"finish_compsio_{tool_name}_execution", attributes=tool_args)
346
+ return tool_execution_result.func_return, True
334
347
  else:
335
348
  tool_execution_manager = ToolExecutionManager(agent_state=agent_state, actor=self.actor)
336
349
  # TODO: Integrate sandbox result
@@ -156,8 +156,10 @@ class LettaAgentBatch:
156
156
 
157
157
  log_event(name="init_llm_client")
158
158
  llm_client = LLMClient.create(
159
- provider=agent_states[0].llm_config.model_endpoint_type,
159
+ provider_name=agent_states[0].llm_config.provider_name,
160
+ provider_type=agent_states[0].llm_config.model_endpoint_type,
160
161
  put_inner_thoughts_first=True,
162
+ actor_id=self.actor.id,
161
163
  )
162
164
  agent_llm_config_mapping = {s.id: s.llm_config for s in agent_states}
163
165
 
@@ -273,8 +275,10 @@ class LettaAgentBatch:
273
275
 
274
276
  # translate provider‑specific response → OpenAI‑style tool call (unchanged)
275
277
  llm_client = LLMClient.create(
276
- provider=item.llm_config.model_endpoint_type,
278
+ provider_name=item.llm_config.provider_name,
279
+ provider_type=item.llm_config.model_endpoint_type,
277
280
  put_inner_thoughts_first=True,
281
+ actor_id=self.actor.id,
278
282
  )
279
283
  tool_call = (
280
284
  llm_client.convert_response_to_chat_completion(
@@ -6,6 +6,7 @@ from typing import Any, AsyncGenerator, Dict, List, Optional, Tuple
6
6
  import openai
7
7
 
8
8
  from letta.agents.base_agent import BaseAgent
9
+ from letta.agents.exceptions import IncompatibleAgentType
9
10
  from letta.agents.voice_sleeptime_agent import VoiceSleeptimeAgent
10
11
  from letta.constants import NON_USER_MSG_PREFIX
11
12
  from letta.helpers.datetime_helpers import get_utc_time
@@ -18,7 +19,7 @@ from letta.helpers.tool_execution_helper import (
18
19
  from letta.interfaces.openai_chat_completions_streaming_interface import OpenAIChatCompletionsStreamingInterface
19
20
  from letta.log import get_logger
20
21
  from letta.orm.enums import ToolType
21
- from letta.schemas.agent import AgentState
22
+ from letta.schemas.agent import AgentState, AgentType
22
23
  from letta.schemas.enums import MessageRole
23
24
  from letta.schemas.letta_response import LettaResponse
24
25
  from letta.schemas.message import Message, MessageCreate, MessageUpdate
@@ -68,8 +69,6 @@ class VoiceAgent(BaseAgent):
68
69
  block_manager: BlockManager,
69
70
  passage_manager: PassageManager,
70
71
  actor: User,
71
- message_buffer_limit: int,
72
- message_buffer_min: int,
73
72
  ):
74
73
  super().__init__(
75
74
  agent_id=agent_id, openai_client=openai_client, message_manager=message_manager, agent_manager=agent_manager, actor=actor
@@ -80,8 +79,6 @@ class VoiceAgent(BaseAgent):
80
79
  self.passage_manager = passage_manager
81
80
  # TODO: This is not guaranteed to exist!
82
81
  self.summary_block_label = "human"
83
- self.message_buffer_limit = message_buffer_limit
84
- self.message_buffer_min = message_buffer_min
85
82
 
86
83
  # Cached archival memory/message size
87
84
  self.num_messages = self.message_manager.size(actor=self.actor, agent_id=agent_id)
@@ -108,8 +105,8 @@ class VoiceAgent(BaseAgent):
108
105
  target_block_label=self.summary_block_label,
109
106
  message_transcripts=[],
110
107
  ),
111
- message_buffer_limit=self.message_buffer_limit,
112
- message_buffer_min=self.message_buffer_min,
108
+ message_buffer_limit=agent_state.multi_agent_group.max_message_buffer_length,
109
+ message_buffer_min=agent_state.multi_agent_group.min_message_buffer_length,
113
110
  )
114
111
 
115
112
  return summarizer
@@ -124,9 +121,15 @@ class VoiceAgent(BaseAgent):
124
121
  """
125
122
  if len(input_messages) != 1 or input_messages[0].role != MessageRole.user:
126
123
  raise ValueError(f"Voice Agent was invoked with multiple input messages or message did not have role `user`: {input_messages}")
124
+
127
125
  user_query = input_messages[0].content[0].text
128
126
 
129
127
  agent_state = self.agent_manager.get_agent_by_id(self.agent_id, actor=self.actor)
128
+
129
+ # Safety check
130
+ if agent_state.agent_type != AgentType.voice_convo_agent:
131
+ raise IncompatibleAgentType(expected_type=AgentType.voice_convo_agent, actual_type=agent_state.agent_type)
132
+
130
133
  summarizer = self.init_summarizer(agent_state=agent_state)
131
134
 
132
135
  in_context_messages = self.message_manager.get_messages_by_ids(message_ids=agent_state.message_ids, actor=self.actor)
letta/constants.py CHANGED
@@ -4,7 +4,7 @@ from logging import CRITICAL, DEBUG, ERROR, INFO, NOTSET, WARN, WARNING
4
4
  LETTA_DIR = os.path.join(os.path.expanduser("~"), ".letta")
5
5
  LETTA_TOOL_EXECUTION_DIR = os.path.join(LETTA_DIR, "tool_execution_dir")
6
6
 
7
- LETTA_MODEL_ENDPOINT = "https://inference.memgpt.ai"
7
+ LETTA_MODEL_ENDPOINT = "https://inference.letta.com"
8
8
 
9
9
  ADMIN_PREFIX = "/v1/admin"
10
10
  API_PREFIX = "/v1"
@@ -35,6 +35,10 @@ TOOL_CALL_ID_MAX_LEN = 29
35
35
  # minimum context window size
36
36
  MIN_CONTEXT_WINDOW = 4096
37
37
 
38
+ # Voice Sleeptime message buffer lengths
39
+ DEFAULT_MAX_MESSAGE_BUFFER_LENGTH = 30
40
+ DEFAULT_MIN_MESSAGE_BUFFER_LENGTH = 15
41
+
38
42
  # embeddings
39
43
  MAX_EMBEDDING_DIM = 4096 # maximum supported embeding size - do NOT change or else DBs will need to be reset
40
44
  DEFAULT_EMBEDDING_CHUNK_SIZE = 300
@@ -0,0 +1,100 @@
1
+ import asyncio
2
+ import os
3
+ from typing import Any, Optional
4
+
5
+ from composio import ComposioToolSet
6
+ from composio.constants import DEFAULT_ENTITY_ID
7
+ from composio.exceptions import (
8
+ ApiKeyNotProvidedError,
9
+ ComposioSDKError,
10
+ ConnectedAccountNotFoundError,
11
+ EnumMetadataNotFound,
12
+ EnumStringNotFound,
13
+ )
14
+
15
+ from letta.constants import COMPOSIO_ENTITY_ENV_VAR_KEY
16
+
17
+
18
+ # TODO: This is kind of hacky, as this is used to search up the action later on composio's side
19
+ # TODO: So be very careful changing/removing these pair of functions
20
+ def _generate_func_name_from_composio_action(action_name: str) -> str:
21
+ """
22
+ Generates the composio function name from the composio action.
23
+
24
+ Args:
25
+ action_name: The composio action name
26
+
27
+ Returns:
28
+ function name
29
+ """
30
+ return action_name.lower()
31
+
32
+
33
+ def generate_composio_action_from_func_name(func_name: str) -> str:
34
+ """
35
+ Generates the composio action from the composio function name.
36
+
37
+ Args:
38
+ func_name: The composio function name
39
+
40
+ Returns:
41
+ composio action name
42
+ """
43
+ return func_name.upper()
44
+
45
+
46
+ def generate_composio_tool_wrapper(action_name: str) -> tuple[str, str]:
47
+ # Generate func name
48
+ func_name = _generate_func_name_from_composio_action(action_name)
49
+
50
+ wrapper_function_str = f"""\
51
+ def {func_name}(**kwargs):
52
+ raise RuntimeError("Something went wrong - we should never be using the persisted source code for Composio. Please reach out to Letta team")
53
+ """
54
+
55
+ # Compile safety check
56
+ _assert_code_gen_compilable(wrapper_function_str.strip())
57
+
58
+ return func_name, wrapper_function_str.strip()
59
+
60
+
61
+ async def execute_composio_action_async(
62
+ action_name: str, args: dict, api_key: Optional[str] = None, entity_id: Optional[str] = None
63
+ ) -> tuple[str, str]:
64
+ try:
65
+ loop = asyncio.get_running_loop()
66
+ return await loop.run_in_executor(None, execute_composio_action, action_name, args, api_key, entity_id)
67
+ except Exception as e:
68
+ raise RuntimeError(f"Error in execute_composio_action_async: {e}") from e
69
+
70
+
71
+ def execute_composio_action(action_name: str, args: dict, api_key: Optional[str] = None, entity_id: Optional[str] = None) -> Any:
72
+ entity_id = entity_id or os.getenv(COMPOSIO_ENTITY_ENV_VAR_KEY, DEFAULT_ENTITY_ID)
73
+ try:
74
+ composio_toolset = ComposioToolSet(api_key=api_key, entity_id=entity_id, lock=False)
75
+ response = composio_toolset.execute_action(action=action_name, params=args)
76
+ except ApiKeyNotProvidedError:
77
+ raise RuntimeError(
78
+ f"Composio API key is missing for action '{action_name}'. "
79
+ "Please set the sandbox environment variables either through the ADE or the API."
80
+ )
81
+ except ConnectedAccountNotFoundError:
82
+ raise RuntimeError(f"No connected account was found for action '{action_name}'. " "Please link an account and try again.")
83
+ except EnumStringNotFound as e:
84
+ raise RuntimeError(f"Invalid value provided for action '{action_name}': " + str(e) + ". Please check the action parameters.")
85
+ except EnumMetadataNotFound as e:
86
+ raise RuntimeError(f"Invalid value provided for action '{action_name}': " + str(e) + ". Please check the action parameters.")
87
+ except ComposioSDKError as e:
88
+ raise RuntimeError(f"An unexpected error occurred in Composio SDK while executing action '{action_name}': " + str(e))
89
+
90
+ if "error" in response and response["error"]:
91
+ raise RuntimeError(f"Error while executing action '{action_name}': " + str(response["error"]))
92
+
93
+ return response.get("data")
94
+
95
+
96
+ def _assert_code_gen_compilable(code_str):
97
+ try:
98
+ compile(code_str, "<string>", "exec")
99
+ except SyntaxError as e:
100
+ print(f"Syntax error in code: {e}")
@@ -1,8 +1,9 @@
1
1
  import importlib
2
2
  import inspect
3
+ from collections.abc import Callable
3
4
  from textwrap import dedent # remove indentation
4
5
  from types import ModuleType
5
- from typing import Dict, List, Literal, Optional
6
+ from typing import Any, Dict, List, Literal, Optional
6
7
 
7
8
  from letta.errors import LettaToolCreateError
8
9
  from letta.functions.schema_generator import generate_schema
@@ -66,7 +67,8 @@ def parse_source_code(func) -> str:
66
67
  return source_code
67
68
 
68
69
 
69
- def get_function_from_module(module_name: str, function_name: str):
70
+ # TODO (cliandy) refactor below two funcs
71
+ def get_function_from_module(module_name: str, function_name: str) -> Callable[..., Any]:
70
72
  """
71
73
  Dynamically imports a function from a specified module.
72
74