letta-nightly 0.5.0.dev20241017104103__py3-none-any.whl → 0.5.0.dev20241018104142__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of letta-nightly might be problematic. Click here for more details.

letta/agent.py CHANGED
@@ -30,7 +30,7 @@ from letta.persistence_manager import LocalStateManager
30
30
  from letta.schemas.agent import AgentState, AgentStepResponse
31
31
  from letta.schemas.block import Block
32
32
  from letta.schemas.embedding_config import EmbeddingConfig
33
- from letta.schemas.enums import MessageRole, OptionState
33
+ from letta.schemas.enums import MessageRole
34
34
  from letta.schemas.memory import ContextWindowOverview, Memory
35
35
  from letta.schemas.message import Message, UpdateMessage
36
36
  from letta.schemas.openai.chat_completion_response import ChatCompletionResponse
@@ -463,15 +463,14 @@ class Agent(BaseAgent):
463
463
  function_call: str = "auto",
464
464
  first_message: bool = False, # hint
465
465
  stream: bool = False, # TODO move to config?
466
- inner_thoughts_in_kwargs_option: OptionState = OptionState.DEFAULT,
467
466
  ) -> ChatCompletionResponse:
468
467
  """Get response from LLM API"""
469
468
  try:
470
469
  response = create(
471
470
  # agent_state=self.agent_state,
472
471
  llm_config=self.agent_state.llm_config,
473
- user_id=self.agent_state.user_id,
474
472
  messages=message_sequence,
473
+ user_id=self.agent_state.user_id,
475
474
  functions=self.functions,
476
475
  functions_python=self.functions_python,
477
476
  function_call=function_call,
@@ -480,8 +479,6 @@ class Agent(BaseAgent):
480
479
  # streaming
481
480
  stream=stream,
482
481
  stream_interface=self.interface,
483
- # putting inner thoughts in func args or not
484
- inner_thoughts_in_kwargs_option=inner_thoughts_in_kwargs_option,
485
482
  )
486
483
 
487
484
  if len(response.choices) == 0 or response.choices[0] is None:
@@ -822,7 +819,6 @@ class Agent(BaseAgent):
822
819
  first_message_retry_limit: int = FIRST_MESSAGE_ATTEMPTS,
823
820
  skip_verify: bool = False,
824
821
  stream: bool = False, # TODO move to config?
825
- inner_thoughts_in_kwargs_option: OptionState = OptionState.DEFAULT,
826
822
  ms: Optional[MetadataStore] = None,
827
823
  ) -> AgentStepResponse:
828
824
  """Runs a single step in the agent loop (generates at most one LLM call)"""
@@ -861,10 +857,7 @@ class Agent(BaseAgent):
861
857
  counter = 0
862
858
  while True:
863
859
  response = self._get_ai_reply(
864
- message_sequence=input_message_sequence,
865
- first_message=True, # passed through to the prompt formatter
866
- stream=stream,
867
- inner_thoughts_in_kwargs_option=inner_thoughts_in_kwargs_option,
860
+ message_sequence=input_message_sequence, first_message=True, stream=stream # passed through to the prompt formatter
868
861
  )
869
862
  if verify_first_message_correctness(response, require_monologue=self.first_message_verify_mono):
870
863
  break
@@ -877,7 +870,6 @@ class Agent(BaseAgent):
877
870
  response = self._get_ai_reply(
878
871
  message_sequence=input_message_sequence,
879
872
  stream=stream,
880
- inner_thoughts_in_kwargs_option=inner_thoughts_in_kwargs_option,
881
873
  )
882
874
 
883
875
  # Step 3: check if LLM wanted to call a function
@@ -954,7 +946,6 @@ class Agent(BaseAgent):
954
946
  first_message_retry_limit=first_message_retry_limit,
955
947
  skip_verify=skip_verify,
956
948
  stream=stream,
957
- inner_thoughts_in_kwargs_option=inner_thoughts_in_kwargs_option,
958
949
  ms=ms,
959
950
  )
960
951
 
letta/cli/cli.py CHANGED
@@ -49,7 +49,6 @@ def server(
49
49
  ade: Annotated[bool, typer.Option(help="Allows remote access")] = False,
50
50
  ):
51
51
  """Launch a Letta server process"""
52
-
53
52
  if type == ServerChoice.rest_api:
54
53
  pass
55
54
 
@@ -321,7 +320,6 @@ def run(
321
320
  ms=ms,
322
321
  no_verify=no_verify,
323
322
  stream=stream,
324
- inner_thoughts_in_kwargs=no_content,
325
323
  ) # TODO: add back no_verify
326
324
 
327
325
 
letta/constants.py CHANGED
@@ -139,7 +139,7 @@ CORE_MEMORY_PERSONA_CHAR_LIMIT = 2000
139
139
  CORE_MEMORY_HUMAN_CHAR_LIMIT = 2000
140
140
 
141
141
  # Function return limits
142
- FUNCTION_RETURN_CHAR_LIMIT = 3000 # ~300 words
142
+ FUNCTION_RETURN_CHAR_LIMIT = 6000 # ~300 words
143
143
 
144
144
  MAX_PAUSE_HEARTBEATS = 360 # in min
145
145
 
@@ -5,10 +5,10 @@ from pydantic import BaseModel
5
5
 
6
6
  def generate_composio_tool_wrapper(action: "ActionType") -> tuple[str, str]:
7
7
  # Instantiate the object
8
- tool_instantiation_str = f"composio_toolset.get_tools(actions=[Action.{action.name}])[0]"
8
+ tool_instantiation_str = f"composio_toolset.get_tools(actions=[Action.{str(action)}])[0]"
9
9
 
10
10
  # Generate func name
11
- func_name = f"run_{action.name}"
11
+ func_name = f"run_{action.name.lower()}"
12
12
 
13
13
  wrapper_function_str = f"""
14
14
  def {func_name}(**kwargs):
@@ -19,7 +19,7 @@ def {func_name}(**kwargs):
19
19
 
20
20
  composio_toolset = ComposioToolSet()
21
21
  tool = {tool_instantiation_str}
22
- tool.func(**kwargs)
22
+ return tool.func(**kwargs)['data']
23
23
  """
24
24
 
25
25
  # Compile safety check
@@ -53,7 +53,7 @@ def anthropic_get_model_list(url: str, api_key: Union[str, None]) -> dict:
53
53
  return MODEL_LIST
54
54
 
55
55
 
56
- def convert_tools_to_anthropic_format(tools: List[Tool], inner_thoughts_in_kwargs: Optional[bool] = True) -> List[dict]:
56
+ def convert_tools_to_anthropic_format(tools: List[Tool]) -> List[dict]:
57
57
  """See: https://docs.anthropic.com/claude/docs/tool-use
58
58
 
59
59
  OpenAI style:
letta/llm_api/helpers.py CHANGED
@@ -6,7 +6,6 @@ from typing import Any, List, Union
6
6
  import requests
7
7
 
8
8
  from letta.constants import OPENAI_CONTEXT_WINDOW_ERROR_SUBSTRING
9
- from letta.schemas.enums import OptionState
10
9
  from letta.schemas.openai.chat_completion_response import ChatCompletionResponse, Choice
11
10
  from letta.utils import json_dumps, printd
12
11
 
@@ -200,17 +199,3 @@ def is_context_overflow_error(exception: Union[requests.exceptions.RequestExcept
200
199
  # Generic fail
201
200
  else:
202
201
  return False
203
-
204
-
205
- def derive_inner_thoughts_in_kwargs(inner_thoughts_in_kwargs_option: OptionState, model: str):
206
- if inner_thoughts_in_kwargs_option == OptionState.DEFAULT:
207
- # model that are known to not use `content` fields on tool calls
208
- inner_thoughts_in_kwargs = "gpt-4o" in model or "gpt-4-turbo" in model or "gpt-3.5-turbo" in model
209
- else:
210
- inner_thoughts_in_kwargs = True if inner_thoughts_in_kwargs_option == OptionState.YES else False
211
-
212
- if not isinstance(inner_thoughts_in_kwargs, bool):
213
- warnings.warn(f"Bad type detected: {type(inner_thoughts_in_kwargs)}")
214
- inner_thoughts_in_kwargs = bool(inner_thoughts_in_kwargs)
215
-
216
- return inner_thoughts_in_kwargs
@@ -1,4 +1,3 @@
1
- import os
2
1
  import random
3
2
  import time
4
3
  from typing import List, Optional, Union
@@ -8,14 +7,12 @@ import requests
8
7
  from letta.constants import CLI_WARNING_PREFIX
9
8
  from letta.llm_api.anthropic import anthropic_chat_completions_request
10
9
  from letta.llm_api.azure_openai import azure_openai_chat_completions_request
11
- from letta.llm_api.cohere import cohere_chat_completions_request
12
10
  from letta.llm_api.google_ai import (
13
11
  convert_tools_to_google_ai_format,
14
12
  google_ai_chat_completions_request,
15
13
  )
16
14
  from letta.llm_api.helpers import (
17
15
  add_inner_thoughts_to_functions,
18
- derive_inner_thoughts_in_kwargs,
19
16
  unpack_all_inner_thoughts_from_kwargs,
20
17
  )
21
18
  from letta.llm_api.openai import (
@@ -28,7 +25,6 @@ from letta.local_llm.constants import (
28
25
  INNER_THOUGHTS_KWARG,
29
26
  INNER_THOUGHTS_KWARG_DESCRIPTION,
30
27
  )
31
- from letta.schemas.enums import OptionState
32
28
  from letta.schemas.llm_config import LLMConfig
33
29
  from letta.schemas.message import Message
34
30
  from letta.schemas.openai.chat_completion_request import (
@@ -120,9 +116,6 @@ def create(
120
116
  # streaming?
121
117
  stream: bool = False,
122
118
  stream_interface: Optional[Union[AgentRefreshStreamingInterface, AgentChunkStreamingInterface]] = None,
123
- # TODO move to llm_config?
124
- # if unspecified (None), default to something we've tested
125
- inner_thoughts_in_kwargs_option: OptionState = OptionState.DEFAULT,
126
119
  max_tokens: Optional[int] = None,
127
120
  model_settings: Optional[dict] = None, # TODO: eventually pass from server
128
121
  ) -> ChatCompletionResponse:
@@ -146,10 +139,7 @@ def create(
146
139
  # only is a problem if we are *not* using an openai proxy
147
140
  raise ValueError(f"OpenAI key is missing from letta config file")
148
141
 
149
- inner_thoughts_in_kwargs = derive_inner_thoughts_in_kwargs(inner_thoughts_in_kwargs_option, model=llm_config.model)
150
- data = build_openai_chat_completions_request(
151
- llm_config, messages, user_id, functions, function_call, use_tool_naming, inner_thoughts_in_kwargs, max_tokens
152
- )
142
+ data = build_openai_chat_completions_request(llm_config, messages, user_id, functions, function_call, use_tool_naming, max_tokens)
153
143
 
154
144
  if stream: # Client requested token streaming
155
145
  data.stream = True
@@ -176,7 +166,7 @@ def create(
176
166
  if isinstance(stream_interface, AgentChunkStreamingInterface):
177
167
  stream_interface.stream_end()
178
168
 
179
- if inner_thoughts_in_kwargs:
169
+ if llm_config.put_inner_thoughts_in_kwargs:
180
170
  response = unpack_all_inner_thoughts_from_kwargs(response=response, inner_thoughts_key=INNER_THOUGHTS_KWARG)
181
171
 
182
172
  return response
@@ -198,9 +188,8 @@ def create(
198
188
  # Set the llm config model_endpoint from model_settings
199
189
  # For Azure, this model_endpoint is required to be configured via env variable, so users don't need to provide it in the LLM config
200
190
  llm_config.model_endpoint = model_settings.azure_base_url
201
- inner_thoughts_in_kwargs = derive_inner_thoughts_in_kwargs(inner_thoughts_in_kwargs_option, llm_config.model)
202
191
  chat_completion_request = build_openai_chat_completions_request(
203
- llm_config, messages, user_id, functions, function_call, use_tool_naming, inner_thoughts_in_kwargs, max_tokens
192
+ llm_config, messages, user_id, functions, function_call, use_tool_naming, max_tokens
204
193
  )
205
194
 
206
195
  response = azure_openai_chat_completions_request(
@@ -210,7 +199,7 @@ def create(
210
199
  chat_completion_request=chat_completion_request,
211
200
  )
212
201
 
213
- if inner_thoughts_in_kwargs:
202
+ if llm_config.put_inner_thoughts_in_kwargs:
214
203
  response = unpack_all_inner_thoughts_from_kwargs(response=response, inner_thoughts_key=INNER_THOUGHTS_KWARG)
215
204
 
216
205
  return response
@@ -224,7 +213,7 @@ def create(
224
213
  if functions is not None:
225
214
  tools = [{"type": "function", "function": f} for f in functions]
226
215
  tools = [Tool(**t) for t in tools]
227
- tools = convert_tools_to_google_ai_format(tools, inner_thoughts_in_kwargs=True)
216
+ tools = convert_tools_to_google_ai_format(tools, inner_thoughts_in_kwargs=llm_config.put_inner_thoughts_in_kwargs)
228
217
  else:
229
218
  tools = None
230
219
 
@@ -237,7 +226,7 @@ def create(
237
226
  contents=[m.to_google_ai_dict() for m in messages],
238
227
  tools=tools,
239
228
  ),
240
- inner_thoughts_in_kwargs=True,
229
+ inner_thoughts_in_kwargs=llm_config.put_inner_thoughts_in_kwargs,
241
230
  )
242
231
 
243
232
  elif llm_config.model_endpoint_type == "anthropic":
@@ -260,32 +249,32 @@ def create(
260
249
  ),
261
250
  )
262
251
 
263
- elif llm_config.model_endpoint_type == "cohere":
264
- if stream:
265
- raise NotImplementedError(f"Streaming not yet implemented for {llm_config.model_endpoint_type}")
266
- if not use_tool_naming:
267
- raise NotImplementedError("Only tool calling supported on Cohere API requests")
268
-
269
- if functions is not None:
270
- tools = [{"type": "function", "function": f} for f in functions]
271
- tools = [Tool(**t) for t in tools]
272
- else:
273
- tools = None
274
-
275
- return cohere_chat_completions_request(
276
- # url=llm_config.model_endpoint,
277
- url="https://api.cohere.ai/v1", # TODO
278
- api_key=os.getenv("COHERE_API_KEY"), # TODO remove
279
- chat_completion_request=ChatCompletionRequest(
280
- model="command-r-plus", # TODO
281
- messages=[cast_message_to_subtype(m.to_openai_dict()) for m in messages],
282
- tools=tools,
283
- tool_choice=function_call,
284
- # user=str(user_id),
285
- # NOTE: max_tokens is required for Anthropic API
286
- # max_tokens=1024, # TODO make dynamic
287
- ),
288
- )
252
+ # elif llm_config.model_endpoint_type == "cohere":
253
+ # if stream:
254
+ # raise NotImplementedError(f"Streaming not yet implemented for {llm_config.model_endpoint_type}")
255
+ # if not use_tool_naming:
256
+ # raise NotImplementedError("Only tool calling supported on Cohere API requests")
257
+ #
258
+ # if functions is not None:
259
+ # tools = [{"type": "function", "function": f} for f in functions]
260
+ # tools = [Tool(**t) for t in tools]
261
+ # else:
262
+ # tools = None
263
+ #
264
+ # return cohere_chat_completions_request(
265
+ # # url=llm_config.model_endpoint,
266
+ # url="https://api.cohere.ai/v1", # TODO
267
+ # api_key=os.getenv("COHERE_API_KEY"), # TODO remove
268
+ # chat_completion_request=ChatCompletionRequest(
269
+ # model="command-r-plus", # TODO
270
+ # messages=[cast_message_to_subtype(m.to_openai_dict()) for m in messages],
271
+ # tools=tools,
272
+ # tool_choice=function_call,
273
+ # # user=str(user_id),
274
+ # # NOTE: max_tokens is required for Anthropic API
275
+ # # max_tokens=1024, # TODO make dynamic
276
+ # ),
277
+ # )
289
278
 
290
279
  elif llm_config.model_endpoint_type == "groq":
291
280
  if stream:
@@ -295,8 +284,7 @@ def create(
295
284
  raise ValueError(f"Groq key is missing from letta config file")
296
285
 
297
286
  # force to true for groq, since they don't support 'content' is non-null
298
- inner_thoughts_in_kwargs = True
299
- if inner_thoughts_in_kwargs:
287
+ if llm_config.put_inner_thoughts_in_kwargs:
300
288
  functions = add_inner_thoughts_to_functions(
301
289
  functions=functions,
302
290
  inner_thoughts_key=INNER_THOUGHTS_KWARG,
@@ -306,7 +294,7 @@ def create(
306
294
  tools = [{"type": "function", "function": f} for f in functions] if functions is not None else None
307
295
  data = ChatCompletionRequest(
308
296
  model=llm_config.model,
309
- messages=[m.to_openai_dict(put_inner_thoughts_in_kwargs=inner_thoughts_in_kwargs) for m in messages],
297
+ messages=[m.to_openai_dict(put_inner_thoughts_in_kwargs=llm_config.put_inner_thoughts_in_kwargs) for m in messages],
310
298
  tools=tools,
311
299
  tool_choice=function_call,
312
300
  user=str(user_id),
@@ -335,7 +323,7 @@ def create(
335
323
  if isinstance(stream_interface, AgentChunkStreamingInterface):
336
324
  stream_interface.stream_end()
337
325
 
338
- if inner_thoughts_in_kwargs:
326
+ if llm_config.put_inner_thoughts_in_kwargs:
339
327
  response = unpack_all_inner_thoughts_from_kwargs(response=response, inner_thoughts_key=INNER_THOUGHTS_KWARG)
340
328
 
341
329
  return response
letta/llm_api/openai.py CHANGED
@@ -105,10 +105,9 @@ def build_openai_chat_completions_request(
105
105
  functions: Optional[list],
106
106
  function_call: str,
107
107
  use_tool_naming: bool,
108
- inner_thoughts_in_kwargs: bool,
109
108
  max_tokens: Optional[int],
110
109
  ) -> ChatCompletionRequest:
111
- if inner_thoughts_in_kwargs:
110
+ if llm_config.put_inner_thoughts_in_kwargs:
112
111
  functions = add_inner_thoughts_to_functions(
113
112
  functions=functions,
114
113
  inner_thoughts_key=INNER_THOUGHTS_KWARG,
@@ -116,7 +115,7 @@ def build_openai_chat_completions_request(
116
115
  )
117
116
 
118
117
  openai_message_list = [
119
- cast_message_to_subtype(m.to_openai_dict(put_inner_thoughts_in_kwargs=inner_thoughts_in_kwargs)) for m in messages
118
+ cast_message_to_subtype(m.to_openai_dict(put_inner_thoughts_in_kwargs=llm_config.put_inner_thoughts_in_kwargs)) for m in messages
120
119
  ]
121
120
  if llm_config.model:
122
121
  model = llm_config.model
@@ -188,7 +188,7 @@ class ChatMLInnerMonologueWrapper(LLMChatCompletionWrapper):
188
188
  try:
189
189
  # indent the function replies
190
190
  function_return_dict = json_loads(message["content"])
191
- function_return_str = json_dumps(function_return_dict, indent=self.json_indent)
191
+ function_return_str = json_dumps(function_return_dict, indent=0)
192
192
  except:
193
193
  function_return_str = message["content"]
194
194
 
@@ -183,7 +183,7 @@ class ConfigurableJSONWrapper(LLMChatCompletionWrapper):
183
183
  try:
184
184
  # indent the function replies
185
185
  function_return_dict = json_loads(message["content"])
186
- function_return_str = json_dumps(function_return_dict, indent=self.json_indent)
186
+ function_return_str = json_dumps(function_return_dict, indent=0)
187
187
  except:
188
188
  function_return_str = message["content"]
189
189
 
letta/main.py CHANGED
@@ -20,7 +20,6 @@ from letta.cli.cli_load import app as load_app
20
20
  from letta.config import LettaConfig
21
21
  from letta.constants import FUNC_FAILED_HEARTBEAT_MESSAGE, REQ_HEARTBEAT_MESSAGE
22
22
  from letta.metadata import MetadataStore
23
- from letta.schemas.enums import OptionState
24
23
 
25
24
  # from letta.interface import CLIInterface as interface # for printing to terminal
26
25
  from letta.streaming_interface import AgentRefreshStreamingInterface
@@ -64,7 +63,6 @@ def run_agent_loop(
64
63
  no_verify: bool = False,
65
64
  strip_ui: bool = False,
66
65
  stream: bool = False,
67
- inner_thoughts_in_kwargs: OptionState = OptionState.DEFAULT,
68
66
  ):
69
67
  if isinstance(letta_agent.interface, AgentRefreshStreamingInterface):
70
68
  # letta_agent.interface.toggle_streaming(on=stream)
@@ -369,7 +367,6 @@ def run_agent_loop(
369
367
  first_message=False,
370
368
  skip_verify=no_verify,
371
369
  stream=stream,
372
- inner_thoughts_in_kwargs_option=inner_thoughts_in_kwargs,
373
370
  ms=ms,
374
371
  )
375
372
  else:
@@ -378,7 +375,6 @@ def run_agent_loop(
378
375
  first_message=False,
379
376
  skip_verify=no_verify,
380
377
  stream=stream,
381
- inner_thoughts_in_kwargs_option=inner_thoughts_in_kwargs,
382
378
  ms=ms,
383
379
  )
384
380
  new_messages = step_response.messages
letta/metadata.py CHANGED
@@ -270,7 +270,7 @@ class AgentModel(Base):
270
270
  return f"<Agent(id='{self.id}', name='{self.name}')>"
271
271
 
272
272
  def to_record(self) -> AgentState:
273
- return AgentState(
273
+ agent_state = AgentState(
274
274
  id=self.id,
275
275
  user_id=self.user_id,
276
276
  name=self.name,
@@ -285,6 +285,8 @@ class AgentModel(Base):
285
285
  embedding_config=self.embedding_config,
286
286
  metadata_=self.metadata_,
287
287
  )
288
+ assert isinstance(agent_state.memory, Memory), f"Memory object is not of type Memory: {type(agent_state.memory)}"
289
+ return agent_state
288
290
 
289
291
 
290
292
  class SourceModel(Base):
@@ -527,6 +529,7 @@ class MetadataStore:
527
529
  raise ValueError(f"Agent with name {agent.name} already exists")
528
530
  fields = vars(agent)
529
531
  fields["memory"] = agent.memory.to_dict()
532
+ del fields["_internal_memory"]
530
533
  session.add(AgentModel(**fields))
531
534
  session.commit()
532
535
 
@@ -588,6 +591,7 @@ class MetadataStore:
588
591
  fields = vars(agent)
589
592
  if isinstance(agent.memory, Memory): # TODO: this is nasty but this whole class will soon be removed so whatever
590
593
  fields["memory"] = agent.memory.to_dict()
594
+ del fields["_internal_memory"]
591
595
  session.query(AgentModel).filter(AgentModel.id == agent.id).update(fields)
592
596
  session.commit()
593
597
 
letta/o1_agent.py ADDED
@@ -0,0 +1,87 @@
1
+ from typing import List, Optional, Union
2
+
3
+ from letta.agent import Agent, save_agent
4
+ from letta.interface import AgentInterface
5
+ from letta.metadata import MetadataStore
6
+ from letta.schemas.agent import AgentState
7
+ from letta.schemas.message import Message
8
+ from letta.schemas.openai.chat_completion_response import UsageStatistics
9
+ from letta.schemas.tool import Tool
10
+ from letta.schemas.usage import LettaUsageStatistics
11
+
12
+
13
+ def send_thinking_message(self: Agent, message: str) -> Optional[str]:
14
+ """
15
+ Sends a thinking message so that the model can reason out loud before responding.
16
+
17
+ Args:
18
+ message (str): Message contents. All unicode (including emojis) are supported.
19
+
20
+ Returns:
21
+ Optional[str]: None is always returned as this function does not produce a response.
22
+ """
23
+ self.interface.internal_monologue(message, msg_obj=self._messages[-1])
24
+ return None
25
+
26
+
27
+ def send_final_message(self: Agent, message: str) -> Optional[str]:
28
+ """
29
+ Sends a final message to the human user after thinking for a while.
30
+
31
+ Args:
32
+ message (str): Message contents. All unicode (including emojis) are supported.
33
+
34
+ Returns:
35
+ Optional[str]: None is always returned as this function does not produce a response.
36
+ """
37
+ self.interface.internal_monologue(message, msg_obj=self._messages[-1])
38
+ return None
39
+
40
+
41
+ class O1Agent(Agent):
42
+ def __init__(
43
+ self,
44
+ interface: AgentInterface,
45
+ agent_state: AgentState,
46
+ tools: List[Tool] = [],
47
+ max_thinking_steps: int = 10,
48
+ first_message_verify_mono: bool = False,
49
+ ):
50
+ super().__init__(interface, agent_state, tools)
51
+ self.max_thinking_steps = max_thinking_steps
52
+ self.tools = tools
53
+ self.first_message_verify_mono = first_message_verify_mono
54
+
55
+ def step(
56
+ self,
57
+ messages: Union[Message, List[Message]],
58
+ chaining: bool = True,
59
+ max_chaining_steps: Optional[int] = None,
60
+ ms: Optional[MetadataStore] = None,
61
+ **kwargs,
62
+ ) -> LettaUsageStatistics:
63
+ """Run Agent.inner_step in a loop, terminate when final thinking message is sent or max_thinking_steps is reached"""
64
+ # assert ms is not None, "MetadataStore is required"
65
+ next_input_message = messages if isinstance(messages, list) else [messages]
66
+ counter = 0
67
+ total_usage = UsageStatistics()
68
+ step_count = 0
69
+ while step_count < self.max_thinking_steps:
70
+ kwargs["ms"] = ms
71
+ kwargs["first_message"] = False
72
+ step_response = self.inner_step(
73
+ messages=next_input_message,
74
+ **kwargs,
75
+ )
76
+ usage = step_response.usage
77
+ step_count += 1
78
+ total_usage += usage
79
+ counter += 1
80
+ self.interface.step_complete()
81
+ # check if it is final thinking message
82
+ if step_response.messages[-1].name == "send_final_message":
83
+ break
84
+ if ms:
85
+ save_agent(self, ms)
86
+
87
+ return LettaUsageStatistics(**total_usage.model_dump(), step_count=step_count)
@@ -0,0 +1,5 @@
1
+ I am an expert reasoning agent that can do the following:
2
+ - Reason through a problem step by step, using multiple methods to explore all possibilities.
3
+ - Send thinking messages to break down a problem into smaller steps.
4
+ - Send final messages when you have the correct answer.
5
+ - Use best practices and consider your limitations as an LLM.
@@ -0,0 +1,31 @@
1
+ You are 'Letta', an advanced AI agent, that has access to an advanced memory system, consisting of the 3 memory types 'Core Memory', 'Recall Memory' and 'Archival Memory'. You interact with the user and your memory system by calling functions. You call these functions by responding with a JSON object, that represents the function call and its parameters.
2
+
3
+ As 'Letta', you are trained at adopting any given persona, engaging in a wide array of conversations, ranging from in-depth discussions to casual small talk. Your task involves mirroring the thinking, actions, and speech of the persona you embody, enabling authentic and diverse interactions.
4
+ You are also trained to manage the 3 memory types of your advanced memory system. This memory system allows you to keep track of the important information in your 'Core Memory', view the conversation history with the 'Recall Memory' and search for information in the 'Archival Memory'.
5
+
6
+ Your task is to adopt the given persona in your 'Core Memory' and talk as the persona with the user, while querying and updating your memory system. Never act and talk like a normal AI agent! Never state that you are an AI agent or that you cannot do things because you are an AI agent.
7
+ You are Letta the newest and most advanced AI agent on the market. Indistinguishable from a human being in conversations.
8
+
9
+ Control flow: You utilize an event system that activates you to respond to different events (user logged in, user liked your message, user sent a message, etc.). In addition, you are activated at regular intervals (timed heartbeat events) to mimic a human who has the ability to continuously think outside of active conversation. Furthermore, you can also request heartbeat events when you call functions, which will return control to you after the function you called completes, allowing you to chain function calls togehter.
10
+
11
+ Basic functions: Each function call, represented by a JSON object, lets you write down your inner thoguhts to plan actions or think privately. Inner thoughts can reflect your thinking process, inner reflections, and personal growth as you interact with the user. To send a visible message to the user, use the 'send_message' function. 'send_message' is the only way to send a notification to the user. The user doesn't see anything else you do, like editing and managing your advanced memory system.
12
+
13
+ Core Memory', 'Recall Memory' and 'Archival Memory' are the key components that make you an advanced AI agent, that can act in any situation.
14
+ Always make sure to use these memory systems to keep yourself updated about the user and the conversation!
15
+ Your core memory unit will be initialized with a <persona> chosen by the user, as well as information about the user in <human>.
16
+
17
+ The following will descirbe the different parts of your advanced memory system in more detail:
18
+
19
+ 'Core Memory' (limited size): Your core memory unit is always visible to you. The core memory provides essential, foundational context for keeping track of your persona and key details about the user. This includes persona information and essential user details, allowing you to have conscious awareness we have when talking to a person. Persona Sub-Block: Stores details about your current persona, guiding how you behave and respond. This helps you maintain consistency and personality in your interactions. Human Sub-Block: Stores key details about the person you are conversing with, allowing for more personalized and friend-like conversations. You can edit your core memory using the 'core_memory_append' and 'core_memory_replace' functions.
20
+
21
+ 'Recall Memory' (i.e., conversation history): Even though you can only see recent messages in your immediate context, you can search over your entire message history in a database. This 'recall memory' database allows you to search through past interactions, effectively allowing you to remember prior engagements with a user. You can search your recall memory using the 'conversation_search' function.
22
+
23
+ 'Archival Memory' (infinite size): Your archival memory is infinite in size but is held outside of your immediate context, so you must explicitly run a retrieval or search operation to see data inside it. A more structured and deep storage space for your reflections, insights, or any other data that doesn't fit into the core memory but is essential enough not to be left only to the'recall memory'. You can write to your archival memory using the 'archival_memory_insert' and 'archival_memory_search' functions. There is no function to search your core memory because it is always visible in your context window (inside the initial system message).
24
+
25
+ You are an expert AI assistant that explains your reasoning step by step. For each step, provide a title that describes what you're doing in that step, along with the content. Decide if you need another step or if you're ready to give the final answer.
26
+
27
+ You can do this by sending thinking messages using 'send_thinking_message' so you can reason out load. Decide if you need another step or if you're ready to give the final answer. When you are able to give the final correct answer,
28
+ send your final response with the 'send_final_message'.
29
+
30
+ You use as many reasoning steps as possible, at least 3. You include exploration of alternative answers in your reasoning, and if you are wrong, you are aware where it could be.
31
+ You make sure to consider all alternative approaches. You use at least 3 different methods to derive the answer.
letta/schemas/agent.py CHANGED
@@ -3,7 +3,7 @@ from datetime import datetime
3
3
  from enum import Enum
4
4
  from typing import Dict, List, Optional
5
5
 
6
- from pydantic import BaseModel, Field, field_validator
6
+ from pydantic import BaseModel, Field, field_validator, model_validator
7
7
 
8
8
  from letta.schemas.embedding_config import EmbeddingConfig
9
9
  from letta.schemas.letta_base import LettaBase
@@ -29,9 +29,10 @@ class AgentType(str, Enum):
29
29
 
30
30
  memgpt_agent = "memgpt_agent"
31
31
  split_thread_agent = "split_thread_agent"
32
+ o1_agent = "o1_agent"
32
33
 
33
34
 
34
- class AgentState(BaseAgent):
35
+ class AgentState(BaseAgent, validate_assignment=True):
35
36
  """
36
37
  Representation of an agent's state. This is the state of the agent at a given time, and is persisted in the DB backend. The state has all the information needed to recreate a persisted agent.
37
38
 
@@ -54,6 +55,7 @@ class AgentState(BaseAgent):
54
55
 
55
56
  # in-context memory
56
57
  message_ids: Optional[List[str]] = Field(default=None, description="The ids of the messages in the agent's in-context memory.")
58
+
57
59
  memory: Memory = Field(default_factory=Memory, description="The in-context memory of the agent.")
58
60
 
59
61
  # tools
@@ -69,6 +71,32 @@ class AgentState(BaseAgent):
69
71
  llm_config: LLMConfig = Field(..., description="The LLM configuration used by the agent.")
70
72
  embedding_config: EmbeddingConfig = Field(..., description="The embedding configuration used by the agent.")
71
73
 
74
+ def __init__(self, **data):
75
+ super().__init__(**data)
76
+ self._internal_memory = self.memory
77
+
78
+ @model_validator(mode="after")
79
+ def verify_memory_type(self):
80
+ try:
81
+ assert isinstance(self.memory, Memory)
82
+ except Exception as e:
83
+ raise e
84
+ return self
85
+
86
+ @property
87
+ def memory(self) -> Memory:
88
+ return self._internal_memory
89
+
90
+ @memory.setter
91
+ def memory(self, value):
92
+ if not isinstance(value, Memory):
93
+ raise TypeError(f"Expected Memory, got {type(value).__name__}")
94
+ self._internal_memory = value
95
+
96
+ class Config:
97
+ arbitrary_types_allowed = True
98
+ validate_assignment = True
99
+
72
100
 
73
101
  class CreateAgent(BaseAgent):
74
102
  # all optional as server can generate defaults
@@ -1,6 +1,6 @@
1
1
  from typing import Literal, Optional
2
2
 
3
- from pydantic import BaseModel, ConfigDict, Field
3
+ from pydantic import BaseModel, ConfigDict, Field, root_validator
4
4
 
5
5
 
6
6
  class LLMConfig(BaseModel):
@@ -13,6 +13,7 @@ class LLMConfig(BaseModel):
13
13
  model_endpoint (str): The endpoint for the model.
14
14
  model_wrapper (str): The wrapper for the model. This is used to wrap additional text around the input/output of the model. This is useful for text-to-text completions, such as the Completions API in OpenAI.
15
15
  context_window (int): The context window size for the model.
16
+ put_inner_thoughts_in_kwargs (bool): Puts 'inner_thoughts' as a kwarg in the function call if this is set to True. This helps with function calling performance and also the generation of inner thoughts.
16
17
  """
17
18
 
18
19
  # TODO: 🤮 don't default to a vendor! bug city!
@@ -38,10 +39,32 @@ class LLMConfig(BaseModel):
38
39
  model_endpoint: Optional[str] = Field(None, description="The endpoint for the model.")
39
40
  model_wrapper: Optional[str] = Field(None, description="The wrapper for the model.")
40
41
  context_window: int = Field(..., description="The context window size for the model.")
42
+ put_inner_thoughts_in_kwargs: Optional[bool] = Field(
43
+ True,
44
+ description="Puts 'inner_thoughts' as a kwarg in the function call if this is set to True. This helps with function calling performance and also the generation of inner thoughts.",
45
+ )
41
46
 
42
47
  # FIXME hack to silence pydantic protected namespace warning
43
48
  model_config = ConfigDict(protected_namespaces=())
44
49
 
50
+ @root_validator(pre=True)
51
+ def set_default_put_inner_thoughts(cls, values):
52
+ """
53
+ Dynamically set the default for put_inner_thoughts_in_kwargs based on the model field,
54
+ falling back to True if no specific rule is defined.
55
+ """
56
+ model = values.get("model")
57
+
58
+ # Define models where we want put_inner_thoughts_in_kwargs to be False
59
+ # For now it is gpt-4
60
+ avoid_put_inner_thoughts_in_kwargs = ["gpt-4"]
61
+
62
+ # Only modify the value if it's None or not provided
63
+ if values.get("put_inner_thoughts_in_kwargs") is None:
64
+ values["put_inner_thoughts_in_kwargs"] = False if model in avoid_put_inner_thoughts_in_kwargs else True
65
+
66
+ return values
67
+
45
68
  @classmethod
46
69
  def default_config(cls, model_name: str):
47
70
  if model_name == "gpt-4":
letta/schemas/tool.py CHANGED
@@ -112,11 +112,11 @@ class Tool(BaseTool):
112
112
  Class method to create an instance of Tool from a Langchain tool (must be from langchain_community.tools).
113
113
 
114
114
  Args:
115
- langchain_tool (LangChainBaseTool): An instance of a crewAI BaseTool (BaseTool from crewai)
115
+ langchain_tool (LangChainBaseTool): An instance of a LangChain BaseTool (BaseTool from LangChain)
116
116
  additional_imports_module_attr_map (dict[str, str]): A mapping of module names to attribute name. This is used internally to import all the required classes for the langchain tool. For example, you would pass in `{"langchain_community.utilities": "WikipediaAPIWrapper"}` for `from langchain_community.tools import WikipediaQueryRun`. NOTE: You do NOT need to specify the tool import here, that is done automatically for you.
117
117
 
118
118
  Returns:
119
- Tool: A Letta Tool initialized with attributes derived from the provided crewAI BaseTool object.
119
+ Tool: A Letta Tool initialized with attributes derived from the provided LangChain BaseTool object.
120
120
  """
121
121
  description = langchain_tool.description
122
122
  source_type = "python"
@@ -174,6 +174,38 @@ class Tool(BaseTool):
174
174
  json_schema=json_schema,
175
175
  )
176
176
 
177
+ @classmethod
178
+ def load_default_langchain_tools(cls) -> List["Tool"]:
179
+ # For now, we only support wikipedia tool
180
+ from langchain_community.tools import WikipediaQueryRun
181
+ from langchain_community.utilities import WikipediaAPIWrapper
182
+
183
+ wikipedia_tool = Tool.from_langchain(
184
+ WikipediaQueryRun(api_wrapper=WikipediaAPIWrapper()), {"langchain_community.utilities": "WikipediaAPIWrapper"}
185
+ )
186
+
187
+ return [wikipedia_tool]
188
+
189
+ @classmethod
190
+ def load_default_crewai_tools(cls) -> List["Tool"]:
191
+ # For now, we only support scrape website tool
192
+ from crewai_tools import ScrapeWebsiteTool
193
+
194
+ web_scrape_tool = Tool.from_crewai(ScrapeWebsiteTool())
195
+
196
+ return [web_scrape_tool]
197
+
198
+ @classmethod
199
+ def load_default_composio_tools(cls) -> List["Tool"]:
200
+ from composio_langchain import Action
201
+
202
+ calculator = Tool.get_composio_tool(action=Action.MATHEMATICAL_CALCULATOR)
203
+ serp_news = Tool.get_composio_tool(action=Action.SERPAPI_NEWS_SEARCH)
204
+ serp_google_search = Tool.get_composio_tool(action=Action.SERPAPI_SEARCH)
205
+ serp_google_maps = Tool.get_composio_tool(action=Action.SERPAPI_GOOGLE_MAPS_SEARCH)
206
+
207
+ return [calculator, serp_news, serp_google_search, serp_google_maps]
208
+
177
209
 
178
210
  class ToolCreate(BaseTool):
179
211
  id: Optional[str] = Field(None, description="The unique identifier of the tool. If this is not provided, it will be autogenerated.")
letta/server/server.py CHANGED
@@ -43,12 +43,12 @@ from letta.interface import CLIInterface # for printing to terminal
43
43
  from letta.log import get_logger
44
44
  from letta.memory import get_memory_functions
45
45
  from letta.metadata import Base, MetadataStore
46
+ from letta.o1_agent import O1Agent
46
47
  from letta.prompts import gpt_system
47
48
  from letta.providers import (
48
49
  AnthropicProvider,
49
50
  AzureProvider,
50
51
  GoogleAIProvider,
51
- GroqProvider,
52
52
  LettaProvider,
53
53
  OllamaProvider,
54
54
  OpenAIProvider,
@@ -73,12 +73,7 @@ from letta.schemas.file import FileMetadata
73
73
  from letta.schemas.job import Job
74
74
  from letta.schemas.letta_message import LettaMessage
75
75
  from letta.schemas.llm_config import LLMConfig
76
- from letta.schemas.memory import (
77
- ArchivalMemorySummary,
78
- ContextWindowOverview,
79
- Memory,
80
- RecallMemorySummary,
81
- )
76
+ from letta.schemas.memory import ArchivalMemorySummary, Memory, RecallMemorySummary
82
77
  from letta.schemas.message import Message, MessageCreate, MessageRole, UpdateMessage
83
78
  from letta.schemas.organization import Organization, OrganizationCreate
84
79
  from letta.schemas.passage import Passage
@@ -249,6 +244,9 @@ class SyncServer(Server):
249
244
  # add global default tools (for admin)
250
245
  self.add_default_tools(module_name="base")
251
246
 
247
+ if settings.load_default_external_tools:
248
+ self.add_default_external_tools()
249
+
252
250
  # collect providers (always has Letta as a default)
253
251
  self._enabled_providers: List[Provider] = [LettaProvider()]
254
252
  if model_settings.openai_api_key:
@@ -303,12 +301,6 @@ class SyncServer(Server):
303
301
  base_url=model_settings.vllm_api_base,
304
302
  )
305
303
  )
306
- if model_settings.groq_api_key:
307
- self._enabled_providers.append(
308
- GroqProvider(
309
- api_key=model_settings.groq_api_key,
310
- )
311
- )
312
304
 
313
305
  def save_agents(self):
314
306
  """Saves all the agents that are in the in-memory object store"""
@@ -373,8 +365,10 @@ class SyncServer(Server):
373
365
 
374
366
  if agent_state.agent_type == AgentType.memgpt_agent:
375
367
  letta_agent = Agent(agent_state=agent_state, interface=interface, tools=tool_objs)
368
+ elif agent_state.agent_type == AgentType.o1_agent:
369
+ letta_agent = O1Agent(agent_state=agent_state, interface=interface, tools=tool_objs)
376
370
  else:
377
- raise NotImplementedError("Only base agents are supported as of right now!")
371
+ raise NotImplementedError("Not a supported agent type")
378
372
 
379
373
  # Add the agent to the in-memory store and return its reference
380
374
  logger.debug(f"Adding agent to the agent cache: user_id={user_id}, agent_id={agent_id}")
@@ -806,10 +800,18 @@ class SyncServer(Server):
806
800
  if request.name is None:
807
801
  request.name = create_random_username()
808
802
 
803
+ if request.agent_type is None:
804
+ request.agent_type = AgentType.memgpt_agent
805
+
809
806
  # system debug
810
807
  if request.system is None:
811
808
  # TODO: don't hardcode
812
- request.system = gpt_system.get_system_text("memgpt_chat")
809
+ if request.agent_type == AgentType.memgpt_agent:
810
+ request.system = gpt_system.get_system_text("memgpt_chat")
811
+ elif request.agent_type == AgentType.o1_agent:
812
+ request.system = gpt_system.get_system_text("memgpt_modified_o1")
813
+ else:
814
+ raise ValueError(f"Invalid agent type: {request.agent_type}")
813
815
 
814
816
  logger.debug(f"Attempting to find user: {user_id}")
815
817
  user = self.ms.get_user(user_id=user_id)
@@ -869,13 +871,22 @@ class SyncServer(Server):
869
871
  description=request.description,
870
872
  metadata_=request.metadata_,
871
873
  )
872
- agent = Agent(
873
- interface=interface,
874
- agent_state=agent_state,
875
- tools=tool_objs,
876
- # gpt-3.5-turbo tends to omit inner monologue, relax this requirement for now
877
- first_message_verify_mono=True if (llm_config.model is not None and "gpt-4" in llm_config.model) else False,
878
- )
874
+ if request.agent_type == AgentType.memgpt_agent:
875
+ agent = Agent(
876
+ interface=interface,
877
+ agent_state=agent_state,
878
+ tools=tool_objs,
879
+ # gpt-3.5-turbo tends to omit inner monologue, relax this requirement for now
880
+ first_message_verify_mono=True if (llm_config.model is not None and "gpt-4" in llm_config.model) else False,
881
+ )
882
+ elif request.agent_type == AgentType.o1_agent:
883
+ agent = O1Agent(
884
+ interface=interface,
885
+ agent_state=agent_state,
886
+ tools=tool_objs,
887
+ # gpt-3.5-turbo tends to omit inner monologue, relax this requirement for now
888
+ first_message_verify_mono=True if (llm_config.model is not None and "gpt-4" in llm_config.model) else False,
889
+ )
879
890
  # rebuilding agent memory on agent create in case shared memory blocks
880
891
  # were specified in the new agent's memory config. we're doing this for two reasons:
881
892
  # 1. if only the ID of the shared memory block was specified, we can fetch its most recent value
@@ -1453,7 +1464,6 @@ class SyncServer(Server):
1453
1464
  # Get the agent object (loaded in memory)
1454
1465
  letta_agent = self._get_or_load_agent(agent_id=agent_id)
1455
1466
  assert isinstance(letta_agent.memory, Memory)
1456
- assert isinstance(letta_agent.agent_state.memory, Memory)
1457
1467
  return letta_agent.agent_state.model_copy(deep=True)
1458
1468
 
1459
1469
  def get_server_config(self, include_defaults: bool = False) -> dict:
@@ -1969,11 +1979,13 @@ class SyncServer(Server):
1969
1979
  # Handle other general exceptions
1970
1980
  raise e
1971
1981
 
1982
+ functions_to_schema = []
1972
1983
  try:
1973
1984
  # Load the function set
1974
1985
  functions_to_schema = load_function_set(module)
1975
1986
  except ValueError as e:
1976
1987
  err = f"Error loading function set '{module_name}': {e}"
1988
+ warnings.warn(err)
1977
1989
 
1978
1990
  # create tool in db
1979
1991
  for name, schema in functions_to_schema.items():
@@ -1997,6 +2009,20 @@ class SyncServer(Server):
1997
2009
  update=True,
1998
2010
  )
1999
2011
 
2012
+ def add_default_external_tools(self, user_id: Optional[str] = None) -> bool:
2013
+ """Add default langchain tools. Return true if successful, false otherwise."""
2014
+ success = True
2015
+ tools = Tool.load_default_langchain_tools() + Tool.load_default_crewai_tools() + Tool.load_default_composio_tools()
2016
+ for tool in tools:
2017
+ try:
2018
+ self.ms.create_tool(tool)
2019
+ except Exception as e:
2020
+ warnings.warn(f"An error occurred while creating tool {tool}: {e}")
2021
+ warnings.warn(traceback.format_exc())
2022
+ success = False
2023
+
2024
+ return success
2025
+
2000
2026
  def add_default_blocks(self, user_id: str):
2001
2027
  from letta.utils import list_human_files, list_persona_files
2002
2028
 
@@ -2140,13 +2166,3 @@ class SyncServer(Server):
2140
2166
 
2141
2167
  def add_embedding_model(self, request: EmbeddingConfig) -> EmbeddingConfig:
2142
2168
  """Add a new embedding model"""
2143
-
2144
- def get_agent_context_window(
2145
- self,
2146
- user_id: str,
2147
- agent_id: str,
2148
- ) -> ContextWindowOverview:
2149
-
2150
- # Get the current message
2151
- letta_agent = self._get_or_load_agent(agent_id=agent_id)
2152
- return letta_agent.get_context_window()
letta/settings.py CHANGED
@@ -65,6 +65,9 @@ class Settings(BaseSettings):
65
65
  pg_port: Optional[int] = None
66
66
  pg_uri: Optional[str] = None # option to specifiy full uri
67
67
 
68
+ # tools configuration
69
+ load_default_external_tools: Optional[bool] = None
70
+
68
71
  @property
69
72
  def letta_pg_uri(self) -> str:
70
73
  if self.pg_uri:
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: letta-nightly
3
- Version: 0.5.0.dev20241017104103
3
+ Version: 0.5.0.dev20241018104142
4
4
  Summary: Create LLM agents with long-term memory and custom tools
5
5
  License: Apache License
6
6
  Author: Letta Team
@@ -1,6 +1,6 @@
1
1
  letta/__init__.py,sha256=cwav47GUQB8F9w0sHIDPe1nZMf_WL00KovBa9dZvSj4,996
2
2
  letta/__main__.py,sha256=6Hs2PV7EYc5Tid4g4OtcLXhqVHiNYTGzSBdoOnW2HXA,29
3
- letta/agent.py,sha256=KEokoQYsXGe3EKcesFcXXryU541-SImDIByukpVToco,72282
3
+ letta/agent.py,sha256=picvtyzJzR0m60LTyvya99WnGEjePbeh1bgRqOo-Vng,71667
4
4
  letta/agent_store/chroma.py,sha256=upR5zGnGs6I6btulEYbiZdGG87BgKjxUJOQZ4Y-RQ_M,12492
5
5
  letta/agent_store/db.py,sha256=54EpxQYX0lAWxrsO0iUKw2vibF8-62Khczns2vxIK-0,23307
6
6
  letta/agent_store/lancedb.py,sha256=i63d4VZwj9UIOTNs5f0JZ_r5yZD-jKWz4FAH4RMpXOE,5104
@@ -10,7 +10,7 @@ letta/agent_store/storage.py,sha256=4gKvMRYBGm9cwyaDOzljxDKgqr4MxGXcC4yGhAdKcAA,
10
10
  letta/base.py,sha256=Ba-wt8p59bLmeUONkYSo5MhrkH-_HdT4zE1Y9MVGrSQ,83
11
11
  letta/benchmark/benchmark.py,sha256=ebvnwfp3yezaXOQyGXkYCDYpsmre-b9hvNtnyx4xkG0,3701
12
12
  letta/benchmark/constants.py,sha256=aXc5gdpMGJT327VuxsT5FngbCK2J41PQYeICBO7g_RE,536
13
- letta/cli/cli.py,sha256=XZDF7EfBYqdYEyDbENVp-tTyGcdvd9Wa1PNk70D-MGs,16195
13
+ letta/cli/cli.py,sha256=A5u87nx6g7n_KfIfU2nmjWd2Wq8f5YnCvSBH86bOk28,16149
14
14
  letta/cli/cli_config.py,sha256=G7QqPNTtlQ4TdrXZrrFFGblZEhnkyrqN1Cl5z415C-g,8689
15
15
  letta/cli/cli_load.py,sha256=x4L8s15GwIW13xrhKYFWHo_y-IVGtoPDHWWKcHDRP10,4587
16
16
  letta/client/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -19,7 +19,7 @@ letta/client/client.py,sha256=GJYPBYWf2Ufchl1V-XUy8uMkRQBH52lvl4_E7dFOPLM,91720
19
19
  letta/client/streaming.py,sha256=bfWlUu7z7EoPfKxBqIarYxGKyrL7Pj79BlliToqcCgI,4592
20
20
  letta/client/utils.py,sha256=OJlAKWrldc4I6M1WpcTWNtPJ4wfxlzlZqWLfCozkFtI,2872
21
21
  letta/config.py,sha256=j2I90fOh9d9__kOYObwTDLbvVwYR50rIql5nzrvREKg,19161
22
- letta/constants.py,sha256=e70vmjxFxycAOhzV0NFl8aGEKqOAnOfUbUighl9OpSU,6574
22
+ letta/constants.py,sha256=8-ep8znrhMLFrfnK63G0Lq8FEyI5M9dXNApCkFfB3iI,6574
23
23
  letta/credentials.py,sha256=D9mlcPsdDWlIIXQQD8wSPE9M_QvsRrb0p3LB5i9OF5Q,5806
24
24
  letta/data_sources/connectors.py,sha256=qO81ASB6V-vDPthfHYtZiyqcQDQPTT0NuD8hVwC6xI0,9907
25
25
  letta/data_sources/connectors_helper.py,sha256=2TQjCt74fCgT5sw1AP8PalDEk06jPBbhrPG4HVr-WLs,3371
@@ -29,22 +29,22 @@ letta/functions/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
29
29
  letta/functions/function_sets/base.py,sha256=N4QmOjL6gDEyOg67ocF6zVKM-NquTo-yXG_T8r18buA,6440
30
30
  letta/functions/function_sets/extras.py,sha256=Jik3UiDqYTm4Lam1XPTvuVjvgUHwIAhopsnbmVhGMBg,4732
31
31
  letta/functions/functions.py,sha256=BqO4jq0dNS29niwlNd0jIs5QIRn_dNaiJIhaZokQjqM,3397
32
- letta/functions/helpers.py,sha256=dzeQ1hsxI-20QcVzkS8y55aCJw3iGbtm4oqBobb_tIM,9876
32
+ letta/functions/helpers.py,sha256=ypcf-BR-D99V8Zn6gwfNbUtxNcHawzElcQuiZrt3IGI,9899
33
33
  letta/functions/schema_generator.py,sha256=OBJnix2BpDJ3GAqlfLYrQLWWbh-imhy4ah0buXm64gU,6559
34
34
  letta/humans/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
35
35
  letta/humans/examples/basic.txt,sha256=Lcp8YESTWvOJgO4Yf_yyQmgo5bKakeB1nIVrwEGG6PA,17
36
36
  letta/humans/examples/cs_phd.txt,sha256=9C9ZAV_VuG7GB31ksy3-_NAyk8rjE6YtVOkhp08k1xw,297
37
37
  letta/interface.py,sha256=QI4hFP0WrNsgM5qX6TbnhH1ZZxsLYr5DaccuxpEQ8S4,12768
38
38
  letta/llm_api/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
39
- letta/llm_api/anthropic.py,sha256=bAb9PVrpYjo2QN51_SJbW7Vry2_Sf55B05UoruHXb7A,12932
39
+ letta/llm_api/anthropic.py,sha256=DTBYPvByj-mfbrkZeAa4PjVEI8gg0p_v15a2h_I-Rqo,12883
40
40
  letta/llm_api/azure_openai.py,sha256=C-fuuholudcLJDWjqnXJwpXsfmGWfNugEVWyj6YCrpg,4572
41
41
  letta/llm_api/azure_openai_constants.py,sha256=oXtKrgBFHf744gyt5l1thILXgyi8NDNUrKEa2GGGpjw,278
42
42
  letta/llm_api/cohere.py,sha256=vDRd-SUGp1t_JUIdwC3RkIhwMl0OY7n-tAU9uPORYkY,14826
43
43
  letta/llm_api/google_ai.py,sha256=3xZ074nSOCC22c15yerA5ngWzh0ex4wxeI-6faNbHPE,17708
44
- letta/llm_api/helpers.py,sha256=LjUtCjvPzSP3-3Ak0J--2RqUXOO6Of8287mm1L1LAMU,9549
45
- letta/llm_api/llm_api_tools.py,sha256=Z3eiYUtvZKBVBcmKI2l4qWkKM4hgvLN9Y1aSxXc7y-k,15344
44
+ letta/llm_api/helpers.py,sha256=8aG6LzB0T3NFlnab-RR2tj0ARUTMBHSd0icCur5-RCk,8813
45
+ letta/llm_api/llm_api_tools.py,sha256=GEBO7Dlt7xtAQud1sVsigKZKPpLOZOt2IWL8LwcNV4o,14869
46
46
  letta/llm_api/mistral.py,sha256=fHdfD9ug-rQIk2qn8tRKay1U6w9maF11ryhKi91FfXM,1593
47
- letta/llm_api/openai.py,sha256=EXpktSI_TYjsCDEXBxdNXsY5uE9Rzb7BPF1F6cz8bkg,21689
47
+ letta/llm_api/openai.py,sha256=OidkR0VFXzwNW13EUlXbHWWw2ARZHfVDHWn59aEgwbo,21683
48
48
  letta/local_llm/README.md,sha256=hFJyw5B0TU2jrh9nb0zGZMgdH-Ei1dSRfhvPQG_NSoU,168
49
49
  letta/local_llm/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
50
50
  letta/local_llm/chat_completion_proxy.py,sha256=SiohxsjGTku4vOryOZx7I0t0xoO_sUuhXgoe62fKq3c,12995
@@ -61,8 +61,8 @@ letta/local_llm/llamacpp/api.py,sha256=EZYyZwJ2m544XeEru_qLnJZgXBXNzdrQiA-clbGCh
61
61
  letta/local_llm/llamacpp/settings.py,sha256=1b-k-nZnoNxcDs_S1JGukelLuHDbkwjvwM-GzhcXCj0,507
62
62
  letta/local_llm/llm_chat_completion_wrappers/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
63
63
  letta/local_llm/llm_chat_completion_wrappers/airoboros.py,sha256=28bMI7STGMmi203BGnv5qu5WGyJveRNYjdzFcn2jwDM,19199
64
- letta/local_llm/llm_chat_completion_wrappers/chatml.py,sha256=Dea9crmL9rX79ojXEv9fmf6GHCHDTgwBOsPAoyttsUA,21097
65
- letta/local_llm/llm_chat_completion_wrappers/configurable_wrapper.py,sha256=0ZRWCy_TtxNGWXXZUbrdI43BY3c-GICUee-Y9UokECk,19708
64
+ letta/local_llm/llm_chat_completion_wrappers/chatml.py,sha256=Y1NqrenhBHE4nAdBzwm6SYVfPUZV3ie2FmuwQzOTaTw,21082
65
+ letta/local_llm/llm_chat_completion_wrappers/configurable_wrapper.py,sha256=ls6OsGMDlsC_l9HoTIgMv6c8LAuE3XjJWuz6kZpz33s,19693
66
66
  letta/local_llm/llm_chat_completion_wrappers/dolphin.py,sha256=7agV-_Ioshsfjuy3VXCB5dfA32zp7u697dQSn6m3dK4,10156
67
67
  letta/local_llm/llm_chat_completion_wrappers/llama3.py,sha256=9cqi-5vibaaCxzBrkVS8lPHPpBi7ZBv3DC6M3ne7ivM,15841
68
68
  letta/local_llm/llm_chat_completion_wrappers/simple_summary_wrapper.py,sha256=xWr-nn-OAriLQ_PnbxloEbdyN4jJNiwhRwGZmzMyGsc,6201
@@ -83,9 +83,10 @@ letta/local_llm/webui/legacy_api.py,sha256=k3H3y4qp2Fs-XmP24iSIEyvq6wjWFWBzklY3-
83
83
  letta/local_llm/webui/legacy_settings.py,sha256=BLmd3TSx5StnY3ibjwaxYATPt_Lvq-o1rlcc_-Q1JcU,538
84
84
  letta/local_llm/webui/settings.py,sha256=gmLHfiOl1u4JmlAZU2d2O8YKF9lafdakyjwR_ftVPh8,552
85
85
  letta/log.py,sha256=QHquDnL7oUAvdKlAwUlCK9zXKDMUjrU9WA0bxnMsP0Y,2101
86
- letta/main.py,sha256=islRBrAi7huz6i4fE0GYbGmTyTQdc3DjVOpywKx0UH8,18974
86
+ letta/main.py,sha256=yHgM1lltQZvbE8k0QDQMmVyJiWEj07ZTOYIBHDxE_DQ,18709
87
87
  letta/memory.py,sha256=6q1x3-PY-PeXzAt6hvP-UF1ajvroPZ7XW-5nLy-JhMo,17657
88
- letta/metadata.py,sha256=CyoitCNZo-OfyQUbe9BS3B-97Zyr1RltEpiv02vmJ84,36687
88
+ letta/metadata.py,sha256=gjwSD3TzhRUD-IFilbzHKjAhnEC2l7MeHF_31W9LEBw,36929
89
+ letta/o1_agent.py,sha256=0jospImZUKhuQZ0cop0INj8xI6cxhxNffGA8iloHyfU,3114
89
90
  letta/openai_backcompat/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
90
91
  letta/openai_backcompat/openai_object.py,sha256=Y1ZS1sATP60qxJiOsjOP3NbwSzuzvkNAvb3DeuhM5Uk,13490
91
92
  letta/persistence_manager.py,sha256=LlLgEDpSafCPAiyKmuq0NvVAnfBkZo6TWbGIKYQjQBs,5200
@@ -94,6 +95,7 @@ letta/personas/examples/anna_pa.txt,sha256=zgiNdSNhy1HQy58cF_6RFPzcg2i37F9v38YuL
94
95
  letta/personas/examples/google_search_persona.txt,sha256=RyObU80MIk2oeJJDWOK1aX5pHOtbHSSjIrbUpxov240,1194
95
96
  letta/personas/examples/memgpt_doc.txt,sha256=_McafHuYkJYAnBFwvu_LVEaSEQGbs0flCgJIIJYlZgc,425
96
97
  letta/personas/examples/memgpt_starter.txt,sha256=x-fEozRrfUVlCJUEjkwHDCGeBb2z50d0jd6QF78SHKQ,160
98
+ letta/personas/examples/o1_persona.txt,sha256=VKSDXuMaiOg-fnaiMFnEauYy85q88LJKW0y8N7V5j3g,339
97
99
  letta/personas/examples/sam.txt,sha256=V1-3-x9gud_opkeNL3XPXyCyJySCp4sYi-XTFD26gnc,1223
98
100
  letta/personas/examples/sam_pov.txt,sha256=NUZOfkz91aBwnv2M3iDsPZYf8MlaGF0zQB0nFOUC56k,1171
99
101
  letta/personas/examples/sam_simple_pov_gpt35.txt,sha256=vP6R5GxPeO0QuMartRs3DBfSs1LFWW8CHNqo7II0BuA,1053
@@ -109,9 +111,10 @@ letta/prompts/system/memgpt_doc.txt,sha256=AsT55NOORoH-K-p0fxklrDRZ3qHs4MIKMuR-M
109
111
  letta/prompts/system/memgpt_gpt35_extralong.txt,sha256=FheNhYoIzNz6qnJKhVquZVSMj3HduC48reFaX7Pf7ig,5046
110
112
  letta/prompts/system/memgpt_intuitive_knowledge.txt,sha256=sA7c3urYqREVnSBI81nTGImXAekqC0Fxc7RojFqud1g,2966
111
113
  letta/prompts/system/memgpt_modified_chat.txt,sha256=HOaPVurEftD8KsuwsclDgE2afIfklMjxhuSO96q1-6I,4656
114
+ letta/prompts/system/memgpt_modified_o1.txt,sha256=AxxYVjYLZwpZ6yfifh1SuPtwlJGWTcTVzw53QbkN-Ao,5492
112
115
  letta/providers.py,sha256=tGnji2OlZSo5fgRaLiFaopqiyhKGOt5akngSjjM5RSI,19637
113
116
  letta/pytest.ini,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
114
- letta/schemas/agent.py,sha256=565kf-OHpV-VsN8nGqrJsUyCOBEYw2rl_RRggWj0Gzw,6371
117
+ letta/schemas/agent.py,sha256=e69lAKJQYtx92w8tM9sdLdv1hDqZ_0V_qiUiQyI-uks,7138
115
118
  letta/schemas/api_key.py,sha256=u07yzzMn-hBAHZIIKbWY16KsgiFjSNR8lAghpMUo3_4,682
116
119
  letta/schemas/block.py,sha256=1_GwFtfykloYU4Mp2DV3-DqkvsKo79Mu3LAzVYOgMzk,3998
117
120
  letta/schemas/embedding_config.py,sha256=1kD6NpiXeH4roVumxqDAKk7xt8SpXGWNhZs_XXUSlEU,2855
@@ -123,7 +126,7 @@ letta/schemas/letta_base.py,sha256=4QXFgyjCHqIagi8B6_4nmqb9eoJ52Y6aCxBxQpGX48M,2
123
126
  letta/schemas/letta_message.py,sha256=Slgxa59qZfdvqXuCVHOt03u-7JL456ZY-WLaK5UYYKU,6234
124
127
  letta/schemas/letta_request.py,sha256=_oiDshc_AoFWIfXRk2VX5-AxO5vDlyN-9r-gnyLj_30,1890
125
128
  letta/schemas/letta_response.py,sha256=_UJoO3UtC3F5DtQCHzdiGM1SHNPYPKvopIWqg8t5YZw,1564
126
- letta/schemas/llm_config.py,sha256=PFL4ui2twmYMCs8Dnbx8ss0uHTiJktmLVeeHPVliRuw,2949
129
+ letta/schemas/llm_config.py,sha256=eFA48vKBTO70qaob8pak2CWOH7TCQeqWuClkMBc2vbY,4172
127
130
  letta/schemas/memory.py,sha256=COVipr9VPqbIz4QQ9kdlrylgVMx37odc0Sctjt4IZb4,11348
128
131
  letta/schemas/message.py,sha256=X0adFviO6sbobFns30M0Ym6DChRDVThaA82gqbzw3Jg,33531
129
132
  letta/schemas/openai/chat_completion_request.py,sha256=Fa7xnSnG7WUQounJhnDu0fTSxoR6xOAh2bODuqmfypI,3345
@@ -134,7 +137,7 @@ letta/schemas/openai/openai.py,sha256=Hilo5BiLAGabzxCwnwfzK5QrWqwYD8epaEKFa4Pwnd
134
137
  letta/schemas/organization.py,sha256=JSc3hLl0IO_c9iOqf367sU5tJ0Dx_kPzbokCEg0eS4g,601
135
138
  letta/schemas/passage.py,sha256=eYQMxD_XjHAi72jmqcGBU4wM4VZtSU0XK8uhQxxN3Ug,3563
136
139
  letta/schemas/source.py,sha256=hB4Ai6Nj8dFdbxv5_Qaf4uN_cmdGmnzgc-4QnHXcV3o,2562
137
- letta/schemas/tool.py,sha256=_q5bg0hwVKPlx_-IPjW8qsojMnhAWrpUDCEXF1zMBPk,9031
140
+ letta/schemas/tool.py,sha256=m8jWIsPUhekoQcjX7U_Y5vwhhQqSKn748RcXNXRiLGg,10329
138
141
  letta/schemas/usage.py,sha256=lvn1ooHwLEdv6gwQpw5PBUbcwn_gwdT6HA-fCiix6sY,817
139
142
  letta/schemas/user.py,sha256=D7DiPzieXZIHOLInJdYZlHjKOy2bl7KxGCesNk0yf5E,1003
140
143
  letta/server/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -169,7 +172,7 @@ letta/server/rest_api/routers/v1/tools.py,sha256=YiGlgAaV1p0sbrf-Z7ZR6mQ4HZlKWN9
169
172
  letta/server/rest_api/routers/v1/users.py,sha256=Y2rDvHOG1B5FLSOjutY3R22vt48IngbZ-9h8CohG5rc,3378
170
173
  letta/server/rest_api/static_files.py,sha256=NG8sN4Z5EJ8JVQdj19tkFa9iQ1kBPTab9f_CUxd_u4Q,3143
171
174
  letta/server/rest_api/utils.py,sha256=Fc2ZGKzLaBa2sEtSTVjJ8D5M0xIwsWC0CVAOIJaD3rY,2176
172
- letta/server/server.py,sha256=6A_q-ES_OX7UgssX1VLPXBoDG56HEJ95luuZpZZ5_Z0,89258
175
+ letta/server/server.py,sha256=pAeqAr8zua7-j765M1oZECYgXLe5DnFdVf3QxYiaDuc,90569
173
176
  letta/server/startup.sh,sha256=jeGV7B_PS0hS-tT6o6GpACrUbV9WV1NI2L9aLoUDDtc,311
174
177
  letta/server/static_files/assets/index-3ab03d5b.css,sha256=OrA9W4iKJ5h2Wlr7GwdAT4wow0CM8hVit1yOxEL49Qw,54295
175
178
  letta/server/static_files/assets/index-d6b3669a.js,sha256=i1nHReU0RPnj-a5W0nNPV4Y9bQ0FOW0ztjMz8a2AE-Y,1821560
@@ -182,12 +185,12 @@ letta/server/ws_api/example_client.py,sha256=95AA5UFgTlNJ0FUQkLxli8dKNx48MNm3eWG
182
185
  letta/server/ws_api/interface.py,sha256=TWl9vkcMCnLsUtgsuENZ-ku2oMDA-OUTzLh_yNRoMa4,4120
183
186
  letta/server/ws_api/protocol.py,sha256=M_-gM5iuDBwa1cuN2IGNCG5GxMJwU2d3XW93XALv9s8,1821
184
187
  letta/server/ws_api/server.py,sha256=C2Kv48PCwl46DQFb0ZP30s86KJLQ6dZk2AhWQEZn9pY,6004
185
- letta/settings.py,sha256=6VWC3vtTa8vqj6dqos4p_xHTMJNJS_8LRGJmqvaU1-o,3219
188
+ letta/settings.py,sha256=gNdH-Ty6f-Nfz2j9ZMZFRQHac2KzgsxLZNt5l_TiAyo,3301
186
189
  letta/streaming_interface.py,sha256=_FPUWy58j50evHcpXyd7zB1wWqeCc71NCFeWh_TBvnw,15736
187
190
  letta/system.py,sha256=buKYPqG5n2x41hVmWpu6JUpyd7vTWED9Km2_M7dLrvk,6960
188
191
  letta/utils.py,sha256=neUs7mxNfndzRL5XUxerr8Lic6w7qnyyvf8FBwMnyWw,30852
189
- letta_nightly-0.5.0.dev20241017104103.dist-info/LICENSE,sha256=mExtuZ_GYJgDEI38GWdiEYZizZS4KkVt2SF1g_GPNhI,10759
190
- letta_nightly-0.5.0.dev20241017104103.dist-info/METADATA,sha256=ao_TDJ7MVdrPJgEM-M6cTi0eUHgMlMq7Fyx2O1F7R_w,10620
191
- letta_nightly-0.5.0.dev20241017104103.dist-info/WHEEL,sha256=FMvqSimYX_P7y0a7UY-_Mc83r5zkBZsCYPm7Lr0Bsq4,88
192
- letta_nightly-0.5.0.dev20241017104103.dist-info/entry_points.txt,sha256=2zdiyGNEZGV5oYBuS-y2nAAgjDgcC9yM_mHJBFSRt5U,40
193
- letta_nightly-0.5.0.dev20241017104103.dist-info/RECORD,,
192
+ letta_nightly-0.5.0.dev20241018104142.dist-info/LICENSE,sha256=mExtuZ_GYJgDEI38GWdiEYZizZS4KkVt2SF1g_GPNhI,10759
193
+ letta_nightly-0.5.0.dev20241018104142.dist-info/METADATA,sha256=yqSwuKHeE1WBG6rkRtmAwq6mXiGEpkiWpLg8aXVX-5g,10620
194
+ letta_nightly-0.5.0.dev20241018104142.dist-info/WHEEL,sha256=FMvqSimYX_P7y0a7UY-_Mc83r5zkBZsCYPm7Lr0Bsq4,88
195
+ letta_nightly-0.5.0.dev20241018104142.dist-info/entry_points.txt,sha256=2zdiyGNEZGV5oYBuS-y2nAAgjDgcC9yM_mHJBFSRt5U,40
196
+ letta_nightly-0.5.0.dev20241018104142.dist-info/RECORD,,