letta-nightly 0.5.0.dev20241015014828__py3-none-any.whl → 0.5.0.dev20241016104103__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of letta-nightly might be problematic. Click here for more details.

letta/server/server.py CHANGED
@@ -16,6 +16,7 @@ import letta.system as system
16
16
  from letta.agent import Agent, save_agent
17
17
  from letta.agent_store.db import attach_base
18
18
  from letta.agent_store.storage import StorageConnector, TableType
19
+ from letta.client.utils import derive_function_name_regex
19
20
  from letta.credentials import LettaCredentials
20
21
  from letta.data_sources.connectors import DataConnector, load_data
21
22
 
@@ -72,9 +73,13 @@ from letta.schemas.file import FileMetadata
72
73
  from letta.schemas.job import Job
73
74
  from letta.schemas.letta_message import LettaMessage
74
75
  from letta.schemas.llm_config import LLMConfig
75
- from letta.schemas.memory import ArchivalMemorySummary, Memory, RecallMemorySummary
76
+ from letta.schemas.memory import (
77
+ ArchivalMemorySummary,
78
+ ContextWindowOverview,
79
+ Memory,
80
+ RecallMemorySummary,
81
+ )
76
82
  from letta.schemas.message import Message, MessageCreate, MessageRole, UpdateMessage
77
- from letta.schemas.openai.chat_completion_response import UsageStatistics
78
83
  from letta.schemas.organization import Organization, OrganizationCreate
79
84
  from letta.schemas.passage import Passage
80
85
  from letta.schemas.source import Source, SourceCreate, SourceUpdate
@@ -411,6 +416,7 @@ class SyncServer(Server):
411
416
  raise ValueError(f"messages should be a Message or a list of Message, got {type(input_messages)}")
412
417
 
413
418
  logger.debug(f"Got input messages: {input_messages}")
419
+ letta_agent = None
414
420
  try:
415
421
 
416
422
  # Get the agent object (loaded in memory)
@@ -422,83 +428,14 @@ class SyncServer(Server):
422
428
  token_streaming = letta_agent.interface.streaming_mode if hasattr(letta_agent.interface, "streaming_mode") else False
423
429
 
424
430
  logger.debug(f"Starting agent step")
425
- no_verify = True
426
- next_input_message = input_messages
427
- counter = 0
428
- total_usage = UsageStatistics()
429
- step_count = 0
430
- while True:
431
- step_response = letta_agent.step(
432
- messages=next_input_message,
433
- first_message=False,
434
- skip_verify=no_verify,
435
- return_dicts=False,
436
- stream=token_streaming,
437
- # timestamp=timestamp,
438
- ms=self.ms,
439
- )
440
- step_response.messages
441
- heartbeat_request = step_response.heartbeat_request
442
- function_failed = step_response.function_failed
443
- token_warning = step_response.in_context_memory_warning
444
- usage = step_response.usage
445
-
446
- step_count += 1
447
- total_usage += usage
448
- counter += 1
449
- letta_agent.interface.step_complete()
450
-
451
- logger.debug("Saving agent state")
452
- # save updated state
453
- save_agent(letta_agent, self.ms)
454
-
455
- # Chain stops
456
- if not self.chaining:
457
- logger.debug("No chaining, stopping after one step")
458
- break
459
- elif self.max_chaining_steps is not None and counter > self.max_chaining_steps:
460
- logger.debug(f"Hit max chaining steps, stopping after {counter} steps")
461
- break
462
- # Chain handlers
463
- elif token_warning:
464
- assert letta_agent.agent_state.user_id is not None
465
- next_input_message = Message.dict_to_message(
466
- agent_id=letta_agent.agent_state.id,
467
- user_id=letta_agent.agent_state.user_id,
468
- model=letta_agent.model,
469
- openai_message_dict={
470
- "role": "user", # TODO: change to system?
471
- "content": system.get_token_limit_warning(),
472
- },
473
- )
474
- continue # always chain
475
- elif function_failed:
476
- assert letta_agent.agent_state.user_id is not None
477
- next_input_message = Message.dict_to_message(
478
- agent_id=letta_agent.agent_state.id,
479
- user_id=letta_agent.agent_state.user_id,
480
- model=letta_agent.model,
481
- openai_message_dict={
482
- "role": "user", # TODO: change to system?
483
- "content": system.get_heartbeat(constants.FUNC_FAILED_HEARTBEAT_MESSAGE),
484
- },
485
- )
486
- continue # always chain
487
- elif heartbeat_request:
488
- assert letta_agent.agent_state.user_id is not None
489
- next_input_message = Message.dict_to_message(
490
- agent_id=letta_agent.agent_state.id,
491
- user_id=letta_agent.agent_state.user_id,
492
- model=letta_agent.model,
493
- openai_message_dict={
494
- "role": "user", # TODO: change to system?
495
- "content": system.get_heartbeat(constants.REQ_HEARTBEAT_MESSAGE),
496
- },
497
- )
498
- continue # always chain
499
- # Letta no-op / yield
500
- else:
501
- break
431
+ usage_stats = letta_agent.step(
432
+ messages=input_messages,
433
+ chaining=self.chaining,
434
+ max_chaining_steps=self.max_chaining_steps,
435
+ stream=token_streaming,
436
+ ms=self.ms,
437
+ skip_verify=True,
438
+ )
502
439
 
503
440
  except Exception as e:
504
441
  logger.error(f"Error in server._step: {e}")
@@ -506,9 +443,10 @@ class SyncServer(Server):
506
443
  raise
507
444
  finally:
508
445
  logger.debug("Calling step_yield()")
509
- letta_agent.interface.step_yield()
446
+ if letta_agent:
447
+ letta_agent.interface.step_yield()
510
448
 
511
- return LettaUsageStatistics(**total_usage.model_dump(), step_count=step_count)
449
+ return usage_stats
512
450
 
513
451
  def _command(self, user_id: str, agent_id: str, command: str) -> LettaUsageStatistics:
514
452
  """Process a CLI command"""
@@ -794,7 +732,7 @@ class SyncServer(Server):
794
732
  message_objects.append(message)
795
733
 
796
734
  else:
797
- raise ValueError(f"All messages must be of type Message or MessageCreate, got {type(messages)}")
735
+ raise ValueError(f"All messages must be of type Message or MessageCreate, got {[type(message) for message in messages]}")
798
736
 
799
737
  # Run the agent state forward
800
738
  return self._step(user_id=user_id, agent_id=agent_id, input_messages=message_objects)
@@ -1028,6 +966,80 @@ class SyncServer(Server):
1028
966
  # TODO: probably reload the agent somehow?
1029
967
  return letta_agent.agent_state
1030
968
 
969
+ def add_tool_to_agent(
970
+ self,
971
+ agent_id: str,
972
+ tool_id: str,
973
+ user_id: str,
974
+ ):
975
+ """Update the agents core memory block, return the new state"""
976
+ if self.ms.get_user(user_id=user_id) is None:
977
+ raise ValueError(f"User user_id={user_id} does not exist")
978
+ if self.ms.get_agent(agent_id=agent_id) is None:
979
+ raise ValueError(f"Agent agent_id={agent_id} does not exist")
980
+
981
+ # Get the agent object (loaded in memory)
982
+ letta_agent = self._get_or_load_agent(agent_id=agent_id)
983
+
984
+ # Get all the tool objects from the request
985
+ tool_objs = []
986
+ tool_obj = self.ms.get_tool(tool_id=tool_id, user_id=user_id)
987
+ assert tool_obj, f"Tool with id={tool_id} does not exist"
988
+ tool_objs.append(tool_obj)
989
+
990
+ for tool in letta_agent.tools:
991
+ tool_obj = self.ms.get_tool(tool_id=tool.id, user_id=user_id)
992
+ assert tool_obj, f"Tool with id={tool.id} does not exist"
993
+
994
+ # If it's not the already added tool
995
+ if tool_obj.id != tool_id:
996
+ tool_objs.append(tool_obj)
997
+
998
+ # replace the list of tool names ("ids") inside the agent state
999
+ letta_agent.agent_state.tools = [tool.name for tool in tool_objs]
1000
+
1001
+ # then attempt to link the tools modules
1002
+ letta_agent.link_tools(tool_objs)
1003
+
1004
+ # save the agent
1005
+ save_agent(letta_agent, self.ms)
1006
+ return letta_agent.agent_state
1007
+
1008
+ def remove_tool_from_agent(
1009
+ self,
1010
+ agent_id: str,
1011
+ tool_id: str,
1012
+ user_id: str,
1013
+ ):
1014
+ """Update the agents core memory block, return the new state"""
1015
+ if self.ms.get_user(user_id=user_id) is None:
1016
+ raise ValueError(f"User user_id={user_id} does not exist")
1017
+ if self.ms.get_agent(agent_id=agent_id) is None:
1018
+ raise ValueError(f"Agent agent_id={agent_id} does not exist")
1019
+
1020
+ # Get the agent object (loaded in memory)
1021
+ letta_agent = self._get_or_load_agent(agent_id=agent_id)
1022
+
1023
+ # Get all the tool_objs
1024
+ tool_objs = []
1025
+ for tool in letta_agent.tools:
1026
+ tool_obj = self.ms.get_tool(tool_id=tool.id, user_id=user_id)
1027
+ assert tool_obj, f"Tool with id={tool.id} does not exist"
1028
+
1029
+ # If it's not the tool we want to remove
1030
+ if tool_obj.id != tool_id:
1031
+ tool_objs.append(tool_obj)
1032
+
1033
+ # replace the list of tool names ("ids") inside the agent state
1034
+ letta_agent.agent_state.tools = [tool.name for tool in tool_objs]
1035
+
1036
+ # then attempt to link the tools modules
1037
+ letta_agent.link_tools(tool_objs)
1038
+
1039
+ # save the agent
1040
+ save_agent(letta_agent, self.ms)
1041
+ return letta_agent.agent_state
1042
+
1031
1043
  def _agent_state_to_config(self, agent_state: AgentState) -> dict:
1032
1044
  """Convert AgentState to a dict for a JSON response"""
1033
1045
  assert agent_state is not None
@@ -1683,6 +1695,9 @@ class SyncServer(Server):
1683
1695
 
1684
1696
  return job
1685
1697
 
1698
+ def delete_file_from_source(self, source_id: str, file_id: str, user_id: Optional[str]) -> Optional[FileMetadata]:
1699
+ return self.ms.delete_file_from_source(source_id=source_id, file_id=file_id, user_id=user_id)
1700
+
1686
1701
  def load_data(
1687
1702
  self,
1688
1703
  user_id: str,
@@ -1811,6 +1826,15 @@ class SyncServer(Server):
1811
1826
  """Get tool by ID."""
1812
1827
  return self.ms.get_tool(tool_id=tool_id)
1813
1828
 
1829
+ def tool_with_name_and_user_id_exists(self, tool: Tool, user_id: Optional[str] = None) -> bool:
1830
+ """Check if tool exists"""
1831
+ tool = self.ms.get_tool_with_name_and_user_id(tool_name=tool.name, user_id=user_id)
1832
+
1833
+ if tool is None:
1834
+ return False
1835
+ else:
1836
+ return True
1837
+
1814
1838
  def get_tool_id(self, name: str, user_id: str) -> Optional[str]:
1815
1839
  """Get tool ID from name and user_id."""
1816
1840
  tool = self.ms.get_tool(tool_name=name, user_id=user_id)
@@ -1818,16 +1842,27 @@ class SyncServer(Server):
1818
1842
  return None
1819
1843
  return tool.id
1820
1844
 
1821
- def update_tool(
1822
- self,
1823
- request: ToolUpdate,
1824
- ) -> Tool:
1845
+ def update_tool(self, request: ToolUpdate, user_id: Optional[str] = None) -> Tool:
1825
1846
  """Update an existing tool"""
1826
- existing_tool = self.ms.get_tool(tool_id=request.id)
1827
- if not existing_tool:
1828
- raise ValueError(f"Tool does not exist")
1847
+ if request.name:
1848
+ existing_tool = self.ms.get_tool_with_name_and_user_id(tool_name=request.name, user_id=user_id)
1849
+ if existing_tool is None:
1850
+ raise ValueError(f"Tool with name={request.name}, user_id={user_id} does not exist")
1851
+ else:
1852
+ existing_tool = self.ms.get_tool(tool_id=request.id)
1853
+ if existing_tool is None:
1854
+ raise ValueError(f"Tool with id={request.id} does not exist")
1855
+
1856
+ # Preserve the original tool id
1857
+ # As we can override the tool id as well
1858
+ # This is probably bad design if this is exposed to users...
1859
+ original_id = existing_tool.id
1829
1860
 
1830
1861
  # override updated fields
1862
+ if request.id:
1863
+ existing_tool.id = request.id
1864
+ if request.description:
1865
+ existing_tool.description = request.description
1831
1866
  if request.source_code:
1832
1867
  existing_tool.source_code = request.source_code
1833
1868
  if request.source_type:
@@ -1836,10 +1871,15 @@ class SyncServer(Server):
1836
1871
  existing_tool.tags = request.tags
1837
1872
  if request.json_schema:
1838
1873
  existing_tool.json_schema = request.json_schema
1874
+
1875
+ # If name is explicitly provided here, overide the tool name
1839
1876
  if request.name:
1840
1877
  existing_tool.name = request.name
1878
+ # Otherwise, if there's no name, and there's source code, we try to derive the name
1879
+ elif request.source_code:
1880
+ existing_tool.name = derive_function_name_regex(request.source_code)
1841
1881
 
1842
- self.ms.update_tool(existing_tool)
1882
+ self.ms.update_tool(original_id, existing_tool)
1843
1883
  return self.ms.get_tool(tool_id=request.id)
1844
1884
 
1845
1885
  def create_tool(self, request: ToolCreate, user_id: Optional[str] = None, update: bool = True) -> Tool: # TODO: add other fields
@@ -1866,7 +1906,7 @@ class SyncServer(Server):
1866
1906
 
1867
1907
  # TODO: not sure if this always works
1868
1908
  func = env[functions[-1]]
1869
- json_schema = generate_schema(func)
1909
+ json_schema = generate_schema(func, terminal=request.terminal)
1870
1910
  else:
1871
1911
  # provided by client
1872
1912
  json_schema = request.json_schema
@@ -1877,15 +1917,23 @@ class SyncServer(Server):
1877
1917
  assert request.name, f"Tool name must be provided in json_schema {json_schema}. This should never happen."
1878
1918
 
1879
1919
  # check if already exists:
1880
- existing_tool = self.ms.get_tool(tool_name=request.name, user_id=user_id)
1920
+ existing_tool = self.ms.get_tool(tool_id=request.id, tool_name=request.name, user_id=user_id)
1881
1921
  if existing_tool:
1882
1922
  if update:
1883
- updated_tool = self.update_tool(ToolUpdate(id=existing_tool.id, **vars(request)))
1923
+ # id is an optional field, so we will fill it with the existing tool id
1924
+ if not request.id:
1925
+ request.id = existing_tool.id
1926
+ updated_tool = self.update_tool(ToolUpdate(**vars(request)), user_id)
1884
1927
  assert updated_tool is not None, f"Failed to update tool {request.name}"
1885
1928
  return updated_tool
1886
1929
  else:
1887
1930
  raise ValueError(f"Tool {request.name} already exists and update=False")
1888
1931
 
1932
+ # check for description
1933
+ description = None
1934
+ if request.description:
1935
+ description = request.description
1936
+
1889
1937
  tool = Tool(
1890
1938
  name=request.name,
1891
1939
  source_code=request.source_code,
@@ -1893,9 +1941,14 @@ class SyncServer(Server):
1893
1941
  tags=request.tags,
1894
1942
  json_schema=json_schema,
1895
1943
  user_id=user_id,
1944
+ description=description,
1896
1945
  )
1946
+
1947
+ if request.id:
1948
+ tool.id = request.id
1949
+
1897
1950
  self.ms.create_tool(tool)
1898
- created_tool = self.ms.get_tool(tool_name=request.name, user_id=user_id)
1951
+ created_tool = self.ms.get_tool(tool_id=tool.id, user_id=user_id)
1899
1952
  return created_tool
1900
1953
 
1901
1954
  def delete_tool(self, tool_id: str):
@@ -2087,3 +2140,13 @@ class SyncServer(Server):
2087
2140
 
2088
2141
  def add_embedding_model(self, request: EmbeddingConfig) -> EmbeddingConfig:
2089
2142
  """Add a new embedding model"""
2143
+
2144
+ def get_agent_context_window(
2145
+ self,
2146
+ user_id: str,
2147
+ agent_id: str,
2148
+ ) -> ContextWindowOverview:
2149
+
2150
+ # Get the current message
2151
+ letta_agent = self._get_or_load_agent(agent_id=agent_id)
2152
+ return letta_agent.get_context_window()
@@ -0,0 +1,203 @@
1
+ Metadata-Version: 2.1
2
+ Name: letta-nightly
3
+ Version: 0.5.0.dev20241016104103
4
+ Summary: Create LLM agents with long-term memory and custom tools
5
+ License: Apache License
6
+ Author: Letta Team
7
+ Author-email: contact@letta.com
8
+ Requires-Python: >=3.10,<3.13
9
+ Classifier: License :: Other/Proprietary License
10
+ Classifier: Programming Language :: Python :: 3
11
+ Classifier: Programming Language :: Python :: 3.10
12
+ Classifier: Programming Language :: Python :: 3.11
13
+ Classifier: Programming Language :: Python :: 3.12
14
+ Provides-Extra: autogen
15
+ Provides-Extra: dev
16
+ Provides-Extra: external-tools
17
+ Provides-Extra: milvus
18
+ Provides-Extra: ollama
19
+ Provides-Extra: postgres
20
+ Provides-Extra: qdrant
21
+ Provides-Extra: server
22
+ Provides-Extra: tests
23
+ Requires-Dist: alembic (>=1.13.3,<2.0.0)
24
+ Requires-Dist: autoflake (>=2.3.0,<3.0.0) ; extra == "dev"
25
+ Requires-Dist: black[jupyter] (>=24.2.0,<25.0.0) ; extra == "dev"
26
+ Requires-Dist: chromadb (>=0.4.24,<0.5.0)
27
+ Requires-Dist: composio-core (>=0.5.28,<0.6.0) ; extra == "external-tools"
28
+ Requires-Dist: composio-langchain (>=0.5.28,<0.6.0) ; extra == "external-tools"
29
+ Requires-Dist: crewai (>=0.41.1,<0.42.0) ; extra == "external-tools"
30
+ Requires-Dist: crewai-tools (>=0.8.3,<0.9.0) ; extra == "external-tools"
31
+ Requires-Dist: datasets (>=2.14.6,<3.0.0) ; extra == "dev"
32
+ Requires-Dist: demjson3 (>=3.0.6,<4.0.0)
33
+ Requires-Dist: docker (>=7.1.0,<8.0.0) ; extra == "external-tools"
34
+ Requires-Dist: docstring-parser (>=0.16,<0.17)
35
+ Requires-Dist: docx2txt (>=0.8,<0.9)
36
+ Requires-Dist: fastapi (>=0.104.1,<0.105.0) ; extra == "server"
37
+ Requires-Dist: html2text (>=2020.1.16,<2021.0.0)
38
+ Requires-Dist: httpx (>=0.27.2,<0.28.0)
39
+ Requires-Dist: httpx-sse (>=0.4.0,<0.5.0)
40
+ Requires-Dist: isort (>=5.13.2,<6.0.0) ; extra == "dev"
41
+ Requires-Dist: jinja2 (>=3.1.4,<4.0.0)
42
+ Requires-Dist: langchain (>=0.2.16,<0.3.0) ; extra == "external-tools"
43
+ Requires-Dist: langchain-community (>=0.2.17,<0.3.0) ; extra == "external-tools"
44
+ Requires-Dist: llama-index (>=0.11.9,<0.12.0)
45
+ Requires-Dist: llama-index-embeddings-ollama (>=0.3.1,<0.4.0) ; extra == "ollama"
46
+ Requires-Dist: llama-index-embeddings-openai (>=0.2.5,<0.3.0)
47
+ Requires-Dist: locust (>=2.31.5,<3.0.0)
48
+ Requires-Dist: nltk (>=3.8.1,<4.0.0)
49
+ Requires-Dist: numpy (>=1.26.2,<2.0.0)
50
+ Requires-Dist: pexpect (>=4.9.0,<5.0.0) ; extra == "dev"
51
+ Requires-Dist: pg8000 (>=1.30.3,<2.0.0) ; extra == "postgres"
52
+ Requires-Dist: pgvector (>=0.2.3,<0.3.0) ; extra == "postgres"
53
+ Requires-Dist: pre-commit (>=3.5.0,<4.0.0) ; extra == "dev"
54
+ Requires-Dist: prettytable (>=3.9.0,<4.0.0)
55
+ Requires-Dist: pyautogen (==0.2.22) ; extra == "autogen"
56
+ Requires-Dist: pydantic (>=2.7.4,<3.0.0)
57
+ Requires-Dist: pydantic-settings (>=2.2.1,<3.0.0)
58
+ Requires-Dist: pymilvus (>=2.4.3,<3.0.0) ; extra == "milvus"
59
+ Requires-Dist: pyright (>=1.1.347,<2.0.0) ; extra == "dev"
60
+ Requires-Dist: pytest-asyncio (>=0.23.2,<0.24.0) ; extra == "dev"
61
+ Requires-Dist: pytest-order (>=1.2.0,<2.0.0) ; extra == "dev"
62
+ Requires-Dist: python-box (>=7.1.1,<8.0.0)
63
+ Requires-Dist: python-multipart (>=0.0.9,<0.0.10)
64
+ Requires-Dist: pytz (>=2023.3.post1,<2024.0)
65
+ Requires-Dist: pyyaml (>=6.0.1,<7.0.0)
66
+ Requires-Dist: qdrant-client (>=1.9.1,<2.0.0) ; extra == "qdrant"
67
+ Requires-Dist: questionary (>=2.0.1,<3.0.0)
68
+ Requires-Dist: setuptools (>=68.2.2,<69.0.0)
69
+ Requires-Dist: sqlalchemy (>=2.0.25,<3.0.0)
70
+ Requires-Dist: sqlalchemy-json (>=0.7.0,<0.8.0)
71
+ Requires-Dist: sqlalchemy-utils (>=0.41.2,<0.42.0)
72
+ Requires-Dist: sqlmodel (>=0.0.16,<0.0.17)
73
+ Requires-Dist: tiktoken (>=0.7.0,<0.8.0)
74
+ Requires-Dist: tqdm (>=4.66.1,<5.0.0)
75
+ Requires-Dist: typer[all] (>=0.9.0,<0.10.0)
76
+ Requires-Dist: uvicorn (>=0.24.0.post1,<0.25.0) ; extra == "server"
77
+ Requires-Dist: websockets (>=12.0,<13.0) ; extra == "server"
78
+ Requires-Dist: wikipedia (>=1.4.0,<2.0.0) ; extra == "external-tools" or extra == "tests"
79
+ Description-Content-Type: text/markdown
80
+
81
+ <p align="center">
82
+ <picture>
83
+ <source media="(prefers-color-scheme: dark)" srcset="assets/Letta-logo-RGB_GreyonTransparent_cropped_small.png">
84
+ <source media="(prefers-color-scheme: light)" srcset="assets/Letta-logo-RGB_OffBlackonTransparent_cropped_small.png">
85
+ <img alt="Letta logo" src="assets/Letta-logo-RGB_GreyonOffBlack_cropped_small.png" width="500">
86
+ </picture>
87
+ </p>
88
+
89
+ <div align="center">
90
+ <h1>Letta (previously MemGPT)</h1>
91
+
92
+ <h3>
93
+
94
+ [Homepage](https://letta.com) // [Documentation](https://docs.letta.com) // [Letta Cloud](https://forms.letta.com/early-access)
95
+
96
+ </h3>
97
+
98
+ **👾 Letta** is an open source framework for building stateful LLM applications. You can use Letta to build **stateful agents** with advanced reasoning capabilities and transparent long-term memory. The Letta framework is white box and model-agnostic.
99
+
100
+ [![Discord](https://img.shields.io/discord/1161736243340640419?label=Discord&logo=discord&logoColor=5865F2&style=flat-square&color=5865F2)](https://discord.gg/letta)
101
+ [![Twitter Follow](https://img.shields.io/badge/Follow-%40Letta__AI-1DA1F2?style=flat-square&logo=x&logoColor=white)](https://twitter.com/Letta_AI)
102
+ [![arxiv 2310.08560](https://img.shields.io/badge/Research-2310.08560-B31B1B?logo=arxiv&style=flat-square)](https://arxiv.org/abs/2310.08560)
103
+ [![Apache 2.0](https://img.shields.io/badge/License-Apache%202.0-silver?style=flat-square)](LICENSE)
104
+ [![Release](https://img.shields.io/github/v/release/cpacker/MemGPT?style=flat-square&label=Release&color=limegreen)](https://github.com/cpacker/MemGPT/releases)
105
+ [![GitHub](https://img.shields.io/github/stars/cpacker/MemGPT?style=flat-square&logo=github&label=Stars&color=gold)](https://github.com/cpacker/MemGPT)
106
+
107
+ </div>
108
+
109
+ > [!NOTE]
110
+ > **Looking for MemGPT?** You're in the right place!
111
+ >
112
+ > The MemGPT package and Docker image have been renamed to `letta` to clarify the distinction between MemGPT agents and the API server / runtime that runs LLM agents as *services*.
113
+ >
114
+ > You use the **Letta _framework_** to create **MemGPT _agents_**. Read more about the relationship between MemGPT and Letta [here](https://www.letta.com/blog/memgpt-and-letta).
115
+
116
+ ## ⚡ Quickstart
117
+
118
+ The two main ways to install Letta are through **pypi** (`pip`) or via **Docker**:
119
+ * **`pip`** (guide below) - the easiest way to try Letta, will default to using SQLite and ChromaDB for the database backends
120
+ * **Docker** (guide [here](https://docs.letta.com/install#run-letta-with-docker)) - recommended for production settings, will default to using Postgres (+ pgvector) for the database backend
121
+
122
+ ### Step 1 - Install Letta using `pip`
123
+ ```sh
124
+ $ pip install -U letta
125
+ ```
126
+
127
+ ### Step 2 - Set your environment variables for your chosen LLM / embedding providers
128
+ ```sh
129
+ $ export OPENAI_API_KEY=sk-...
130
+ ```
131
+
132
+ For Ollama (see our full [documentation](https://docs.letta.com/install) for examples of how to set up various providers):
133
+ ```sh
134
+ $ export OLLAMA_BASE_URL=http://localhost:11434
135
+ ```
136
+
137
+ ### Step 3 - Run the Letta CLI
138
+
139
+ You can create agents and chat with them via the Letta CLI tool (`letta run`):
140
+ ```sh
141
+ $ letta run
142
+ ```
143
+ ```
144
+ 🧬 Creating new agent...
145
+ ? Select LLM model: letta-free [type=openai] [ip=https://inference.memgpt.ai]
146
+ ? Select embedding model: letta-free [type=hugging-face] [ip=https://embeddings.memgpt.ai]
147
+ -> 🤖 Using persona profile: 'sam_pov'
148
+ -> 🧑 Using human profile: 'basic'
149
+ -> 🛠️ 8 tools: send_message, pause_heartbeats, conversation_search, conversation_search_date, archival_memory_insert, archival_memory_search, core_memory_append, core_memory_replace
150
+
151
+ 🎉 Created new agent 'InspiringSpinach'
152
+
153
+ Hit enter to begin (will request first Letta message)
154
+
155
+ 💭 User engaged. Recognize as Chad. Acknowledge presence and establish connection. Explore interests and engage in meaningful discourse.
156
+ 🤖 Hello, Chad. It's nice to meet you. I am keen on learning and exploring new ideas and concepts. Is there a specific topic you'd like to discuss today?
157
+
158
+ > Enter your message: my name is Brad, not Chad...
159
+
160
+ 💭 A discrepancy in identity. Reflect this change in core memory. Apologize for the oversight, and rectify the error.
161
+ ⚡🧠 [function] updating memory with core_memory_replace
162
+ First name: Chad
163
+ → First name: Brad
164
+ 💭 Apologize for the mistake in recognizing his name, and begin the conversation anew, addressing him by the correct name this time.
165
+ 🤖 I apologize for the oversight, Brad. Let's restart. It's a pleasure to meet you, Brad. Is there a specific topic you'd like to discuss today?
166
+
167
+ > Enter your message:
168
+ ```
169
+
170
+ ### Step 4 - Run the Letta server
171
+
172
+ You can start the Letta API server with `letta server` (see the full API reference [here](https://docs.letta.com/api-reference)):
173
+ ```sh
174
+ $ letta server
175
+ ```
176
+ ```
177
+ Initializing database...
178
+ Running: uvicorn server:app --host localhost --port 8283
179
+ INFO: Started server process [47750]
180
+ INFO: Waiting for application startup.
181
+ INFO: Application startup complete.
182
+ INFO: Uvicorn running on http://localhost:8283 (Press CTRL+C to quit)
183
+ ```
184
+
185
+ When you start the Letta API server, the ADE (Agent Development Environment) will be available on `http://localhost:8283`:
186
+ <img alt="Screenshot of the Letta ADE (Agent Development Environment)" src="assets/letta_ade_screenshot.png" width="1600">
187
+
188
+ In Letta, all agents are stored/persisted in the same database, so the agents you create in the CLI are accessible via the API and ADE, and vice versa. Check out the [quickstart guide on our docs](https://docs.letta.com/quickstart) for a tutorial where you create an agent in the Letta CLI and message the same agent via the Letta API.
189
+
190
+ ## 🤗 How to contribute
191
+
192
+ Letta is an open source project built by over a hundred contributors. There are many ways to get involved in the Letta OSS project!
193
+
194
+ * **Contribute to the project**: Interested in contributing? Start by reading our [Contribution Guidelines](https://github.com/cpacker/MemGPT/tree/main/CONTRIBUTING.md).
195
+ * **Ask a question**: Join our community on [Discord](https://discord.gg/letta) and direct your questions to the `#support` channel.
196
+ * **Report ssues or suggest features**: Have an issue or a feature request? Please submit them through our [GitHub Issues page](https://github.com/cpacker/MemGPT/issues).
197
+ * **Explore the roadmap**: Curious about future developments? View and comment on our [project roadmap](https://github.com/cpacker/MemGPT/issues/1533).
198
+ * **Join community events**: Stay updated with the [event calendar](https://lu.ma/berkeley-llm-meetup) or follow our [Twitter account](https://twitter.com/Letta_AI).
199
+
200
+ ---
201
+
202
+ ***Legal notices**: By using Letta and related Letta services (such as the Letta endpoint or hosted service), you are agreeing to our [privacy policy](https://www.letta.com/privacy-policy) and [terms of service](https://www.letta.com/terms-of-service).*
203
+