letta-nightly 0.1.7.dev20240924104148__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of letta-nightly might be problematic. Click here for more details.

Files changed (189) hide show
  1. letta/__init__.py +24 -0
  2. letta/__main__.py +3 -0
  3. letta/agent.py +1427 -0
  4. letta/agent_store/chroma.py +295 -0
  5. letta/agent_store/db.py +546 -0
  6. letta/agent_store/lancedb.py +177 -0
  7. letta/agent_store/milvus.py +198 -0
  8. letta/agent_store/qdrant.py +201 -0
  9. letta/agent_store/storage.py +188 -0
  10. letta/benchmark/benchmark.py +96 -0
  11. letta/benchmark/constants.py +14 -0
  12. letta/cli/cli.py +689 -0
  13. letta/cli/cli_config.py +1282 -0
  14. letta/cli/cli_load.py +166 -0
  15. letta/client/__init__.py +0 -0
  16. letta/client/admin.py +171 -0
  17. letta/client/client.py +2360 -0
  18. letta/client/streaming.py +90 -0
  19. letta/client/utils.py +61 -0
  20. letta/config.py +484 -0
  21. letta/configs/anthropic.json +13 -0
  22. letta/configs/letta_hosted.json +11 -0
  23. letta/configs/openai.json +12 -0
  24. letta/constants.py +134 -0
  25. letta/credentials.py +140 -0
  26. letta/data_sources/connectors.py +247 -0
  27. letta/embeddings.py +218 -0
  28. letta/errors.py +26 -0
  29. letta/functions/__init__.py +0 -0
  30. letta/functions/function_sets/base.py +174 -0
  31. letta/functions/function_sets/extras.py +132 -0
  32. letta/functions/functions.py +105 -0
  33. letta/functions/schema_generator.py +205 -0
  34. letta/humans/__init__.py +0 -0
  35. letta/humans/examples/basic.txt +1 -0
  36. letta/humans/examples/cs_phd.txt +9 -0
  37. letta/interface.py +314 -0
  38. letta/llm_api/__init__.py +0 -0
  39. letta/llm_api/anthropic.py +383 -0
  40. letta/llm_api/azure_openai.py +155 -0
  41. letta/llm_api/cohere.py +396 -0
  42. letta/llm_api/google_ai.py +468 -0
  43. letta/llm_api/llm_api_tools.py +485 -0
  44. letta/llm_api/openai.py +470 -0
  45. letta/local_llm/README.md +3 -0
  46. letta/local_llm/__init__.py +0 -0
  47. letta/local_llm/chat_completion_proxy.py +279 -0
  48. letta/local_llm/constants.py +31 -0
  49. letta/local_llm/function_parser.py +68 -0
  50. letta/local_llm/grammars/__init__.py +0 -0
  51. letta/local_llm/grammars/gbnf_grammar_generator.py +1324 -0
  52. letta/local_llm/grammars/json.gbnf +26 -0
  53. letta/local_llm/grammars/json_func_calls_with_inner_thoughts.gbnf +32 -0
  54. letta/local_llm/groq/api.py +97 -0
  55. letta/local_llm/json_parser.py +202 -0
  56. letta/local_llm/koboldcpp/api.py +62 -0
  57. letta/local_llm/koboldcpp/settings.py +23 -0
  58. letta/local_llm/llamacpp/api.py +58 -0
  59. letta/local_llm/llamacpp/settings.py +22 -0
  60. letta/local_llm/llm_chat_completion_wrappers/__init__.py +0 -0
  61. letta/local_llm/llm_chat_completion_wrappers/airoboros.py +452 -0
  62. letta/local_llm/llm_chat_completion_wrappers/chatml.py +470 -0
  63. letta/local_llm/llm_chat_completion_wrappers/configurable_wrapper.py +387 -0
  64. letta/local_llm/llm_chat_completion_wrappers/dolphin.py +246 -0
  65. letta/local_llm/llm_chat_completion_wrappers/llama3.py +345 -0
  66. letta/local_llm/llm_chat_completion_wrappers/simple_summary_wrapper.py +156 -0
  67. letta/local_llm/llm_chat_completion_wrappers/wrapper_base.py +11 -0
  68. letta/local_llm/llm_chat_completion_wrappers/zephyr.py +345 -0
  69. letta/local_llm/lmstudio/api.py +100 -0
  70. letta/local_llm/lmstudio/settings.py +29 -0
  71. letta/local_llm/ollama/api.py +88 -0
  72. letta/local_llm/ollama/settings.py +32 -0
  73. letta/local_llm/settings/__init__.py +0 -0
  74. letta/local_llm/settings/deterministic_mirostat.py +45 -0
  75. letta/local_llm/settings/settings.py +72 -0
  76. letta/local_llm/settings/simple.py +28 -0
  77. letta/local_llm/utils.py +265 -0
  78. letta/local_llm/vllm/api.py +63 -0
  79. letta/local_llm/webui/api.py +60 -0
  80. letta/local_llm/webui/legacy_api.py +58 -0
  81. letta/local_llm/webui/legacy_settings.py +23 -0
  82. letta/local_llm/webui/settings.py +24 -0
  83. letta/log.py +76 -0
  84. letta/main.py +437 -0
  85. letta/memory.py +440 -0
  86. letta/metadata.py +884 -0
  87. letta/openai_backcompat/__init__.py +0 -0
  88. letta/openai_backcompat/openai_object.py +437 -0
  89. letta/persistence_manager.py +148 -0
  90. letta/personas/__init__.py +0 -0
  91. letta/personas/examples/anna_pa.txt +13 -0
  92. letta/personas/examples/google_search_persona.txt +15 -0
  93. letta/personas/examples/memgpt_doc.txt +6 -0
  94. letta/personas/examples/memgpt_starter.txt +4 -0
  95. letta/personas/examples/sam.txt +14 -0
  96. letta/personas/examples/sam_pov.txt +14 -0
  97. letta/personas/examples/sam_simple_pov_gpt35.txt +13 -0
  98. letta/personas/examples/sqldb/test.db +0 -0
  99. letta/prompts/__init__.py +0 -0
  100. letta/prompts/gpt_summarize.py +14 -0
  101. letta/prompts/gpt_system.py +26 -0
  102. letta/prompts/system/memgpt_base.txt +49 -0
  103. letta/prompts/system/memgpt_chat.txt +58 -0
  104. letta/prompts/system/memgpt_chat_compressed.txt +13 -0
  105. letta/prompts/system/memgpt_chat_fstring.txt +51 -0
  106. letta/prompts/system/memgpt_doc.txt +50 -0
  107. letta/prompts/system/memgpt_gpt35_extralong.txt +53 -0
  108. letta/prompts/system/memgpt_intuitive_knowledge.txt +31 -0
  109. letta/prompts/system/memgpt_modified_chat.txt +23 -0
  110. letta/pytest.ini +0 -0
  111. letta/schemas/agent.py +117 -0
  112. letta/schemas/api_key.py +21 -0
  113. letta/schemas/block.py +135 -0
  114. letta/schemas/document.py +21 -0
  115. letta/schemas/embedding_config.py +54 -0
  116. letta/schemas/enums.py +35 -0
  117. letta/schemas/job.py +38 -0
  118. letta/schemas/letta_base.py +80 -0
  119. letta/schemas/letta_message.py +175 -0
  120. letta/schemas/letta_request.py +23 -0
  121. letta/schemas/letta_response.py +28 -0
  122. letta/schemas/llm_config.py +54 -0
  123. letta/schemas/memory.py +224 -0
  124. letta/schemas/message.py +727 -0
  125. letta/schemas/openai/chat_completion_request.py +123 -0
  126. letta/schemas/openai/chat_completion_response.py +136 -0
  127. letta/schemas/openai/chat_completions.py +123 -0
  128. letta/schemas/openai/embedding_response.py +11 -0
  129. letta/schemas/openai/openai.py +157 -0
  130. letta/schemas/organization.py +20 -0
  131. letta/schemas/passage.py +80 -0
  132. letta/schemas/source.py +62 -0
  133. letta/schemas/tool.py +143 -0
  134. letta/schemas/usage.py +18 -0
  135. letta/schemas/user.py +33 -0
  136. letta/server/__init__.py +0 -0
  137. letta/server/constants.py +6 -0
  138. letta/server/rest_api/__init__.py +0 -0
  139. letta/server/rest_api/admin/__init__.py +0 -0
  140. letta/server/rest_api/admin/agents.py +21 -0
  141. letta/server/rest_api/admin/tools.py +83 -0
  142. letta/server/rest_api/admin/users.py +98 -0
  143. letta/server/rest_api/app.py +193 -0
  144. letta/server/rest_api/auth/__init__.py +0 -0
  145. letta/server/rest_api/auth/index.py +43 -0
  146. letta/server/rest_api/auth_token.py +22 -0
  147. letta/server/rest_api/interface.py +726 -0
  148. letta/server/rest_api/routers/__init__.py +0 -0
  149. letta/server/rest_api/routers/openai/__init__.py +0 -0
  150. letta/server/rest_api/routers/openai/assistants/__init__.py +0 -0
  151. letta/server/rest_api/routers/openai/assistants/assistants.py +115 -0
  152. letta/server/rest_api/routers/openai/assistants/schemas.py +121 -0
  153. letta/server/rest_api/routers/openai/assistants/threads.py +336 -0
  154. letta/server/rest_api/routers/openai/chat_completions/__init__.py +0 -0
  155. letta/server/rest_api/routers/openai/chat_completions/chat_completions.py +131 -0
  156. letta/server/rest_api/routers/v1/__init__.py +15 -0
  157. letta/server/rest_api/routers/v1/agents.py +543 -0
  158. letta/server/rest_api/routers/v1/blocks.py +73 -0
  159. letta/server/rest_api/routers/v1/jobs.py +46 -0
  160. letta/server/rest_api/routers/v1/llms.py +28 -0
  161. letta/server/rest_api/routers/v1/organizations.py +61 -0
  162. letta/server/rest_api/routers/v1/sources.py +199 -0
  163. letta/server/rest_api/routers/v1/tools.py +103 -0
  164. letta/server/rest_api/routers/v1/users.py +109 -0
  165. letta/server/rest_api/static_files.py +74 -0
  166. letta/server/rest_api/utils.py +69 -0
  167. letta/server/server.py +1995 -0
  168. letta/server/startup.sh +8 -0
  169. letta/server/static_files/assets/index-0cbf7ad5.js +274 -0
  170. letta/server/static_files/assets/index-156816da.css +1 -0
  171. letta/server/static_files/assets/index-486e3228.js +274 -0
  172. letta/server/static_files/favicon.ico +0 -0
  173. letta/server/static_files/index.html +39 -0
  174. letta/server/static_files/memgpt_logo_transparent.png +0 -0
  175. letta/server/utils.py +46 -0
  176. letta/server/ws_api/__init__.py +0 -0
  177. letta/server/ws_api/example_client.py +104 -0
  178. letta/server/ws_api/interface.py +108 -0
  179. letta/server/ws_api/protocol.py +100 -0
  180. letta/server/ws_api/server.py +145 -0
  181. letta/settings.py +165 -0
  182. letta/streaming_interface.py +396 -0
  183. letta/system.py +207 -0
  184. letta/utils.py +1065 -0
  185. letta_nightly-0.1.7.dev20240924104148.dist-info/LICENSE +190 -0
  186. letta_nightly-0.1.7.dev20240924104148.dist-info/METADATA +98 -0
  187. letta_nightly-0.1.7.dev20240924104148.dist-info/RECORD +189 -0
  188. letta_nightly-0.1.7.dev20240924104148.dist-info/WHEEL +4 -0
  189. letta_nightly-0.1.7.dev20240924104148.dist-info/entry_points.txt +3 -0
letta/cli/cli.py ADDED
@@ -0,0 +1,689 @@
1
+ import json
2
+ import logging
3
+ import os
4
+ import sys
5
+ from enum import Enum
6
+ from typing import Annotated, Optional
7
+
8
+ import questionary
9
+ import requests
10
+ import typer
11
+
12
+ import letta.utils as utils
13
+ from letta import create_client
14
+ from letta.agent import Agent, save_agent
15
+ from letta.cli.cli_config import configure
16
+ from letta.config import LettaConfig
17
+ from letta.constants import CLI_WARNING_PREFIX, LETTA_DIR
18
+ from letta.credentials import LettaCredentials
19
+ from letta.log import get_logger
20
+ from letta.metadata import MetadataStore
21
+ from letta.schemas.embedding_config import EmbeddingConfig
22
+ from letta.schemas.enums import OptionState
23
+ from letta.schemas.llm_config import LLMConfig
24
+ from letta.schemas.memory import ChatMemory, Memory
25
+ from letta.server.server import logger as server_logger
26
+
27
+ # from letta.interface import CLIInterface as interface # for printing to terminal
28
+ from letta.streaming_interface import (
29
+ StreamingRefreshCLIInterface as interface, # for printing to terminal
30
+ )
31
+ from letta.utils import open_folder_in_explorer, printd
32
+
33
+ logger = get_logger(__name__)
34
+
35
+
36
+ class QuickstartChoice(Enum):
37
+ openai = "openai"
38
+ # azure = "azure"
39
+ letta_hosted = "letta"
40
+ anthropic = "anthropic"
41
+
42
+
43
+ def str_to_quickstart_choice(choice_str: str) -> QuickstartChoice:
44
+ try:
45
+ return QuickstartChoice[choice_str]
46
+ except KeyError:
47
+ valid_options = [choice.name for choice in QuickstartChoice]
48
+ raise ValueError(f"{choice_str} is not a valid QuickstartChoice. Valid options are: {valid_options}")
49
+
50
+
51
+ def set_config_with_dict(new_config: dict) -> (LettaConfig, bool):
52
+ """_summary_
53
+
54
+ Args:
55
+ new_config (dict): Dict of new config values
56
+
57
+ Returns:
58
+ new_config LettaConfig, modified (bool): Returns the new config and a boolean indicating if the config was modified
59
+ """
60
+ from letta.utils import printd
61
+
62
+ old_config = LettaConfig.load()
63
+ modified = False
64
+ for k, v in vars(old_config).items():
65
+ if k in new_config:
66
+ if v != new_config[k]:
67
+ printd(f"Replacing config {k}: {v} -> {new_config[k]}")
68
+ modified = True
69
+ # old_config[k] = new_config[k]
70
+ setattr(old_config, k, new_config[k]) # Set the new value using dot notation
71
+ else:
72
+ printd(f"Skipping new config {k}: {v} == {new_config[k]}")
73
+
74
+ # update embedding config
75
+ if old_config.default_embedding_config:
76
+ for k, v in vars(old_config.default_embedding_config).items():
77
+ if k in new_config:
78
+ if v != new_config[k]:
79
+ printd(f"Replacing config {k}: {v} -> {new_config[k]}")
80
+ modified = True
81
+ # old_config[k] = new_config[k]
82
+ setattr(old_config.default_embedding_config, k, new_config[k])
83
+ else:
84
+ printd(f"Skipping new config {k}: {v} == {new_config[k]}")
85
+ else:
86
+ modified = True
87
+ fields = ["embedding_model", "embedding_dim", "embedding_chunk_size", "embedding_endpoint", "embedding_endpoint_type"]
88
+ args = {}
89
+ for field in fields:
90
+ if field in new_config:
91
+ args[field] = new_config[field]
92
+ printd(f"Setting new config {field}: {new_config[field]}")
93
+ old_config.default_embedding_config = EmbeddingConfig(**args)
94
+
95
+ # update llm config
96
+ if old_config.default_llm_config:
97
+ for k, v in vars(old_config.default_llm_config).items():
98
+ if k in new_config:
99
+ if v != new_config[k]:
100
+ printd(f"Replacing config {k}: {v} -> {new_config[k]}")
101
+ modified = True
102
+ # old_config[k] = new_config[k]
103
+ setattr(old_config.default_llm_config, k, new_config[k])
104
+ else:
105
+ printd(f"Skipping new config {k}: {v} == {new_config[k]}")
106
+ else:
107
+ modified = True
108
+ fields = ["model", "model_endpoint", "model_endpoint_type", "model_wrapper", "context_window"]
109
+ args = {}
110
+ for field in fields:
111
+ if field in new_config:
112
+ args[field] = new_config[field]
113
+ printd(f"Setting new config {field}: {new_config[field]}")
114
+ old_config.default_llm_config = LLMConfig(**args)
115
+ return (old_config, modified)
116
+
117
+
118
+ def quickstart(
119
+ backend: Annotated[QuickstartChoice, typer.Option(help="Quickstart setup backend")] = "letta",
120
+ latest: Annotated[bool, typer.Option(help="Use --latest to pull the latest config from online")] = False,
121
+ debug: Annotated[bool, typer.Option(help="Use --debug to enable debugging output")] = False,
122
+ terminal: bool = True,
123
+ ):
124
+ """Set the base config file with a single command
125
+
126
+ This function and `configure` should be the ONLY places where LettaConfig.save() is called.
127
+ """
128
+
129
+ # setup logger
130
+ utils.DEBUG = debug
131
+ logging.getLogger().setLevel(logging.CRITICAL)
132
+ if debug:
133
+ logging.getLogger().setLevel(logging.DEBUG)
134
+
135
+ # make sure everything is set up properly
136
+ LettaConfig.create_config_dir()
137
+ credentials = LettaCredentials.load()
138
+
139
+ config_was_modified = False
140
+ if backend == QuickstartChoice.letta_hosted:
141
+ # if latest, try to pull the config from the repo
142
+ # fallback to using local
143
+ if latest:
144
+ # Download the latest letta hosted config
145
+ url = "https://raw.githubusercontent.com/cpacker/Letta/main/configs/letta_hosted.json"
146
+ response = requests.get(url)
147
+
148
+ # Check if the request was successful
149
+ if response.status_code == 200:
150
+ # Parse the response content as JSON
151
+ config = response.json()
152
+ # Output a success message and the first few items in the dictionary as a sample
153
+ printd("JSON config file downloaded successfully.")
154
+ new_config, config_was_modified = set_config_with_dict(config)
155
+ else:
156
+ typer.secho(f"Failed to download config from {url}. Status code: {response.status_code}", fg=typer.colors.RED)
157
+
158
+ # Load the file from the relative path
159
+ script_dir = os.path.dirname(__file__) # Get the directory where the script is located
160
+ backup_config_path = os.path.join(script_dir, "..", "configs", "letta_hosted.json")
161
+ try:
162
+ with open(backup_config_path, "r", encoding="utf-8") as file:
163
+ backup_config = json.load(file)
164
+ printd("Loaded backup config file successfully.")
165
+ new_config, config_was_modified = set_config_with_dict(backup_config)
166
+ except FileNotFoundError:
167
+ typer.secho(f"Backup config file not found at {backup_config_path}", fg=typer.colors.RED)
168
+ return
169
+ else:
170
+ # Load the file from the relative path
171
+ script_dir = os.path.dirname(__file__) # Get the directory where the script is located
172
+ backup_config_path = os.path.join(script_dir, "..", "configs", "letta_hosted.json")
173
+ try:
174
+ with open(backup_config_path, "r", encoding="utf-8") as file:
175
+ backup_config = json.load(file)
176
+ printd("Loaded config file successfully.")
177
+ new_config, config_was_modified = set_config_with_dict(backup_config)
178
+ except FileNotFoundError:
179
+ typer.secho(f"Config file not found at {backup_config_path}", fg=typer.colors.RED)
180
+ return
181
+
182
+ elif backend == QuickstartChoice.openai:
183
+ # Make sure we have an API key
184
+ api_key = os.getenv("OPENAI_API_KEY")
185
+ while api_key is None or len(api_key) == 0:
186
+ # Ask for API key as input
187
+ api_key = questionary.password("Enter your OpenAI API key (starts with 'sk-', see https://platform.openai.com/api-keys):").ask()
188
+ credentials.openai_key = api_key
189
+ credentials.save()
190
+
191
+ # if latest, try to pull the config from the repo
192
+ # fallback to using local
193
+ if latest:
194
+ url = "https://raw.githubusercontent.com/cpacker/Letta/main/configs/openai.json"
195
+ response = requests.get(url)
196
+
197
+ # Check if the request was successful
198
+ if response.status_code == 200:
199
+ # Parse the response content as JSON
200
+ config = response.json()
201
+ # Output a success message and the first few items in the dictionary as a sample
202
+ new_config, config_was_modified = set_config_with_dict(config)
203
+ else:
204
+ typer.secho(f"Failed to download config from {url}. Status code: {response.status_code}", fg=typer.colors.RED)
205
+
206
+ # Load the file from the relative path
207
+ script_dir = os.path.dirname(__file__) # Get the directory where the script is located
208
+ backup_config_path = os.path.join(script_dir, "..", "configs", "openai.json")
209
+ try:
210
+ with open(backup_config_path, "r", encoding="utf-8") as file:
211
+ backup_config = json.load(file)
212
+ printd("Loaded backup config file successfully.")
213
+ new_config, config_was_modified = set_config_with_dict(backup_config)
214
+ except FileNotFoundError:
215
+ typer.secho(f"Backup config file not found at {backup_config_path}", fg=typer.colors.RED)
216
+ return
217
+ else:
218
+ # Load the file from the relative path
219
+ script_dir = os.path.dirname(__file__) # Get the directory where the script is located
220
+ backup_config_path = os.path.join(script_dir, "..", "configs", "openai.json")
221
+ try:
222
+ with open(backup_config_path, "r", encoding="utf-8") as file:
223
+ backup_config = json.load(file)
224
+ printd("Loaded config file successfully.")
225
+ new_config, config_was_modified = set_config_with_dict(backup_config)
226
+ except FileNotFoundError:
227
+ typer.secho(f"Config file not found at {backup_config_path}", fg=typer.colors.RED)
228
+ return
229
+
230
+ elif backend == QuickstartChoice.anthropic:
231
+ # Make sure we have an API key
232
+ api_key = os.getenv("ANTHROPIC_API_KEY")
233
+ while api_key is None or len(api_key) == 0:
234
+ # Ask for API key as input
235
+ api_key = questionary.password("Enter your Anthropic API key:").ask()
236
+ credentials.anthropic_key = api_key
237
+ credentials.save()
238
+
239
+ script_dir = os.path.dirname(__file__) # Get the directory where the script is located
240
+ backup_config_path = os.path.join(script_dir, "..", "configs", "anthropic.json")
241
+ try:
242
+ with open(backup_config_path, "r", encoding="utf-8") as file:
243
+ backup_config = json.load(file)
244
+ printd("Loaded config file successfully.")
245
+ new_config, config_was_modified = set_config_with_dict(backup_config)
246
+ except FileNotFoundError:
247
+ typer.secho(f"Config file not found at {backup_config_path}", fg=typer.colors.RED)
248
+ return
249
+
250
+ else:
251
+ raise NotImplementedError(backend)
252
+
253
+ if config_was_modified:
254
+ printd(f"Saving new config file.")
255
+ new_config.save()
256
+ typer.secho(f"šŸ“– Letta configuration file updated!", fg=typer.colors.GREEN)
257
+ typer.secho(
258
+ "\n".join(
259
+ [
260
+ f"🧠 model\t-> {new_config.default_llm_config.model}",
261
+ f"šŸ–„ļø endpoint\t-> {new_config.default_llm_config.model_endpoint}",
262
+ ]
263
+ ),
264
+ fg=typer.colors.GREEN,
265
+ )
266
+ else:
267
+ typer.secho(f"šŸ“– Letta configuration file unchanged.", fg=typer.colors.WHITE)
268
+ typer.secho(
269
+ "\n".join(
270
+ [
271
+ f"🧠 model\t-> {new_config.default_llm_config.model}",
272
+ f"šŸ–„ļø endpoint\t-> {new_config.default_llm_config.model_endpoint}",
273
+ ]
274
+ ),
275
+ fg=typer.colors.WHITE,
276
+ )
277
+
278
+ # 'terminal' = quickstart was run alone, in which case we should guide the user on the next command
279
+ if terminal:
280
+ if config_was_modified:
281
+ typer.secho('⚔ Run "letta run" to create an agent with the new config.', fg=typer.colors.YELLOW)
282
+ else:
283
+ typer.secho('⚔ Run "letta run" to create an agent.', fg=typer.colors.YELLOW)
284
+
285
+
286
+ def open_folder():
287
+ """Open a folder viewer of the Letta home directory"""
288
+ try:
289
+ print(f"Opening home folder: {LETTA_DIR}")
290
+ open_folder_in_explorer(LETTA_DIR)
291
+ except Exception as e:
292
+ print(f"Failed to open folder with system viewer, error:\n{e}")
293
+
294
+
295
+ class ServerChoice(Enum):
296
+ rest_api = "rest"
297
+ ws_api = "websocket"
298
+
299
+
300
+ def server(
301
+ type: Annotated[ServerChoice, typer.Option(help="Server to run")] = "rest",
302
+ port: Annotated[Optional[int], typer.Option(help="Port to run the server on")] = None,
303
+ host: Annotated[Optional[str], typer.Option(help="Host to run the server on (default to localhost)")] = None,
304
+ debug: Annotated[bool, typer.Option(help="Turn debugging output on")] = False,
305
+ ):
306
+ """Launch a Letta server process"""
307
+
308
+ if type == ServerChoice.rest_api:
309
+ pass
310
+
311
+ # if LettaConfig.exists():
312
+ # config = LettaConfig.load()
313
+ # MetadataStore(config)
314
+ # _ = create_client() # triggers user creation
315
+ # else:
316
+ # typer.secho(f"No configuration exists. Run letta configure before starting the server.", fg=typer.colors.RED)
317
+ # sys.exit(1)
318
+
319
+ try:
320
+ from letta.server.rest_api.app import start_server
321
+
322
+ start_server(port=port, host=host, debug=debug)
323
+
324
+ except KeyboardInterrupt:
325
+ # Handle CTRL-C
326
+ typer.secho("Terminating the server...")
327
+ sys.exit(0)
328
+
329
+ elif type == ServerChoice.ws_api:
330
+ raise NotImplementedError("WS suppport deprecated")
331
+
332
+
333
+ def run(
334
+ persona: Annotated[Optional[str], typer.Option(help="Specify persona")] = None,
335
+ agent: Annotated[Optional[str], typer.Option(help="Specify agent name")] = None,
336
+ human: Annotated[Optional[str], typer.Option(help="Specify human")] = None,
337
+ system: Annotated[Optional[str], typer.Option(help="Specify system prompt (raw text)")] = None,
338
+ system_file: Annotated[Optional[str], typer.Option(help="Specify raw text file containing system prompt")] = None,
339
+ # model flags
340
+ model: Annotated[Optional[str], typer.Option(help="Specify the LLM model")] = None,
341
+ model_wrapper: Annotated[Optional[str], typer.Option(help="Specify the LLM model wrapper")] = None,
342
+ model_endpoint: Annotated[Optional[str], typer.Option(help="Specify the LLM model endpoint")] = None,
343
+ model_endpoint_type: Annotated[Optional[str], typer.Option(help="Specify the LLM model endpoint type")] = None,
344
+ context_window: Annotated[
345
+ Optional[int], typer.Option(help="The context window of the LLM you are using (e.g. 8k for most Mistral 7B variants)")
346
+ ] = None,
347
+ core_memory_limit: Annotated[
348
+ Optional[int], typer.Option(help="The character limit to each core-memory section (human/persona).")
349
+ ] = 2000,
350
+ # other
351
+ first: Annotated[bool, typer.Option(help="Use --first to send the first message in the sequence")] = False,
352
+ strip_ui: Annotated[bool, typer.Option(help="Remove all the bells and whistles in CLI output (helpful for testing)")] = False,
353
+ debug: Annotated[bool, typer.Option(help="Use --debug to enable debugging output")] = False,
354
+ no_verify: Annotated[bool, typer.Option(help="Bypass message verification")] = False,
355
+ yes: Annotated[bool, typer.Option("-y", help="Skip confirmation prompt and use defaults")] = False,
356
+ # streaming
357
+ stream: Annotated[bool, typer.Option(help="Enables message streaming in the CLI (if the backend supports it)")] = False,
358
+ # whether or not to put the inner thoughts inside the function args
359
+ no_content: Annotated[
360
+ OptionState, typer.Option(help="Set to 'yes' for LLM APIs that omit the `content` field during tool calling")
361
+ ] = OptionState.DEFAULT,
362
+ ):
363
+ """Start chatting with an Letta agent
364
+
365
+ Example usage: `letta run --agent myagent --data-source mydata --persona mypersona --human myhuman --model gpt-3.5-turbo`
366
+
367
+ :param persona: Specify persona
368
+ :param agent: Specify agent name (will load existing state if the agent exists, or create a new one with that name)
369
+ :param human: Specify human
370
+ :param model: Specify the LLM model
371
+
372
+ """
373
+
374
+ # setup logger
375
+ # TODO: remove Utils Debug after global logging is complete.
376
+ utils.DEBUG = debug
377
+ # TODO: add logging command line options for runtime log level
378
+
379
+ if debug:
380
+ logger.setLevel(logging.DEBUG)
381
+ server_logger.setLevel(logging.DEBUG)
382
+ else:
383
+ logger.setLevel(logging.CRITICAL)
384
+ server_logger.setLevel(logging.CRITICAL)
385
+
386
+ # from letta.migrate import (
387
+ # VERSION_CUTOFF,
388
+ # config_is_compatible,
389
+ # wipe_config_and_reconfigure,
390
+ # )
391
+
392
+ # if not config_is_compatible(allow_empty=True):
393
+ # typer.secho(f"\nYour current config file is incompatible with Letta versions later than {VERSION_CUTOFF}\n", fg=typer.colors.RED)
394
+ # choices = [
395
+ # "Run the full config setup (recommended)",
396
+ # "Create a new config using defaults",
397
+ # "Cancel",
398
+ # ]
399
+ # selection = questionary.select(
400
+ # f"To use Letta, you must either downgrade your Letta version (<= {VERSION_CUTOFF}), or regenerate your config. Would you like to proceed?",
401
+ # choices=choices,
402
+ # default=choices[0],
403
+ # ).ask()
404
+ # if selection == choices[0]:
405
+ # try:
406
+ # wipe_config_and_reconfigure()
407
+ # except Exception as e:
408
+ # typer.secho(f"Fresh config generation failed - error:\n{e}", fg=typer.colors.RED)
409
+ # raise
410
+ # elif selection == choices[1]:
411
+ # try:
412
+ # # Don't create a config, so that the next block of code asking about quickstart is run
413
+ # wipe_config_and_reconfigure(run_configure=False, create_config=False)
414
+ # except Exception as e:
415
+ # typer.secho(f"Fresh config generation failed - error:\n{e}", fg=typer.colors.RED)
416
+ # raise
417
+ # else:
418
+ # typer.secho("Letta config regeneration cancelled", fg=typer.colors.RED)
419
+ # raise KeyboardInterrupt()
420
+
421
+ # typer.secho("Note: if you would like to migrate old agents to the new release, please run `letta migrate`!", fg=typer.colors.GREEN)
422
+
423
+ if not LettaConfig.exists():
424
+ # if no config, ask about quickstart
425
+ # do you want to do:
426
+ # - openai (run quickstart)
427
+ # - letta hosted (run quickstart)
428
+ # - other (run configure)
429
+ if yes:
430
+ # if user is passing '-y' to bypass all inputs, use letta hosted
431
+ # since it can't fail out if you don't have an API key
432
+ quickstart(backend=QuickstartChoice.letta_hosted)
433
+ config = LettaConfig()
434
+
435
+ else:
436
+ config_choices = {
437
+ "letta": "Use the free Letta endpoints",
438
+ "openai": "Use OpenAI (requires an OpenAI API key)",
439
+ "other": "Other (OpenAI Azure, custom LLM endpoint, etc)",
440
+ }
441
+ print()
442
+ config_selection = questionary.select(
443
+ "How would you like to set up Letta?",
444
+ choices=list(config_choices.values()),
445
+ default=config_choices["letta"],
446
+ ).ask()
447
+
448
+ if config_selection == config_choices["letta"]:
449
+ print()
450
+ quickstart(backend=QuickstartChoice.letta_hosted, debug=debug, terminal=False, latest=False)
451
+ elif config_selection == config_choices["openai"]:
452
+ print()
453
+ quickstart(backend=QuickstartChoice.openai, debug=debug, terminal=False, latest=False)
454
+ elif config_selection == config_choices["other"]:
455
+ configure()
456
+ else:
457
+ raise ValueError(config_selection)
458
+
459
+ config = LettaConfig.load()
460
+
461
+ else: # load config
462
+ config = LettaConfig.load()
463
+
464
+ # read user id from config
465
+ ms = MetadataStore(config)
466
+ client = create_client()
467
+ client.user_id
468
+
469
+ # determine agent to use, if not provided
470
+ if not yes and not agent:
471
+ agents = client.list_agents()
472
+ agents = [a.name for a in agents]
473
+
474
+ if len(agents) > 0:
475
+ print()
476
+ select_agent = questionary.confirm("Would you like to select an existing agent?").ask()
477
+ if select_agent is None:
478
+ raise KeyboardInterrupt
479
+ if select_agent:
480
+ agent = questionary.select("Select agent:", choices=agents).ask()
481
+
482
+ # create agent config
483
+ if agent:
484
+ agent_id = client.get_agent_id(agent)
485
+ agent_state = client.get_agent(agent_id)
486
+ else:
487
+ agent_state = None
488
+ human = human if human else config.human
489
+ persona = persona if persona else config.persona
490
+ if agent and agent_state: # use existing agent
491
+ typer.secho(f"\nšŸ” Using existing agent {agent}", fg=typer.colors.GREEN)
492
+ # agent_config = AgentConfig.load(agent)
493
+ # agent_state = ms.get_agent(agent_name=agent, user_id=user_id)
494
+ printd("Loading agent state:", agent_state.id)
495
+ printd("Agent state:", agent_state.state)
496
+ # printd("State path:", agent_config.save_state_dir())
497
+ # printd("Persistent manager path:", agent_config.save_persistence_manager_dir())
498
+ # printd("Index path:", agent_config.save_agent_index_dir())
499
+ # persistence_manager = LocalStateManager(agent_config).load() # TODO: implement load
500
+ # TODO: load prior agent state
501
+
502
+ # Allow overriding model specifics (model, model wrapper, model endpoint IP + type, context_window)
503
+ if model and model != agent_state.llm_config.model:
504
+ typer.secho(
505
+ f"{CLI_WARNING_PREFIX}Overriding existing model {agent_state.llm_config.model} with {model}", fg=typer.colors.YELLOW
506
+ )
507
+ agent_state.llm_config.model = model
508
+ if context_window is not None and int(context_window) != agent_state.llm_config.context_window:
509
+ typer.secho(
510
+ f"{CLI_WARNING_PREFIX}Overriding existing context window {agent_state.llm_config.context_window} with {context_window}",
511
+ fg=typer.colors.YELLOW,
512
+ )
513
+ agent_state.llm_config.context_window = context_window
514
+ if model_wrapper and model_wrapper != agent_state.llm_config.model_wrapper:
515
+ typer.secho(
516
+ f"{CLI_WARNING_PREFIX}Overriding existing model wrapper {agent_state.llm_config.model_wrapper} with {model_wrapper}",
517
+ fg=typer.colors.YELLOW,
518
+ )
519
+ agent_state.llm_config.model_wrapper = model_wrapper
520
+ if model_endpoint and model_endpoint != agent_state.llm_config.model_endpoint:
521
+ typer.secho(
522
+ f"{CLI_WARNING_PREFIX}Overriding existing model endpoint {agent_state.llm_config.model_endpoint} with {model_endpoint}",
523
+ fg=typer.colors.YELLOW,
524
+ )
525
+ agent_state.llm_config.model_endpoint = model_endpoint
526
+ if model_endpoint_type and model_endpoint_type != agent_state.llm_config.model_endpoint_type:
527
+ typer.secho(
528
+ f"{CLI_WARNING_PREFIX}Overriding existing model endpoint type {agent_state.llm_config.model_endpoint_type} with {model_endpoint_type}",
529
+ fg=typer.colors.YELLOW,
530
+ )
531
+ agent_state.llm_config.model_endpoint_type = model_endpoint_type
532
+
533
+ # NOTE: commented out because this seems dangerous - instead users should use /systemswap when in the CLI
534
+ # # user specified a new system prompt
535
+ # if system:
536
+ # # NOTE: agent_state.system is the ORIGINAL system prompt,
537
+ # # whereas agent_state.state["system"] is the LATEST system prompt
538
+ # existing_system_prompt = agent_state.state["system"] if "system" in agent_state.state else None
539
+ # if existing_system_prompt != system:
540
+ # # override
541
+ # agent_state.state["system"] = system
542
+
543
+ # Update the agent with any overrides
544
+ agent_state = client.update_agent(
545
+ agent_id=agent_state.id,
546
+ name=agent_state.name,
547
+ llm_config=agent_state.llm_config,
548
+ embedding_config=agent_state.embedding_config,
549
+ )
550
+
551
+ # create agent
552
+ letta_agent = Agent(agent_state=agent_state, interface=interface(), tools=tools)
553
+
554
+ else: # create new agent
555
+ # create new agent config: override defaults with args if provided
556
+ typer.secho("\n🧬 Creating new agent...", fg=typer.colors.WHITE)
557
+
558
+ agent_name = agent if agent else utils.create_random_username()
559
+ llm_config = config.default_llm_config
560
+ embedding_config = config.default_embedding_config # TODO allow overriding embedding params via CLI run
561
+
562
+ # Allow overriding model specifics (model, model wrapper, model endpoint IP + type, context_window)
563
+ if model and model != llm_config.model:
564
+ typer.secho(f"{CLI_WARNING_PREFIX}Overriding default model {llm_config.model} with {model}", fg=typer.colors.YELLOW)
565
+ llm_config.model = model
566
+ if context_window is not None and int(context_window) != llm_config.context_window:
567
+ typer.secho(
568
+ f"{CLI_WARNING_PREFIX}Overriding default context window {llm_config.context_window} with {context_window}",
569
+ fg=typer.colors.YELLOW,
570
+ )
571
+ llm_config.context_window = context_window
572
+ if model_wrapper and model_wrapper != llm_config.model_wrapper:
573
+ typer.secho(
574
+ f"{CLI_WARNING_PREFIX}Overriding existing model wrapper {llm_config.model_wrapper} with {model_wrapper}",
575
+ fg=typer.colors.YELLOW,
576
+ )
577
+ llm_config.model_wrapper = model_wrapper
578
+ if model_endpoint and model_endpoint != llm_config.model_endpoint:
579
+ typer.secho(
580
+ f"{CLI_WARNING_PREFIX}Overriding existing model endpoint {llm_config.model_endpoint} with {model_endpoint}",
581
+ fg=typer.colors.YELLOW,
582
+ )
583
+ llm_config.model_endpoint = model_endpoint
584
+ if model_endpoint_type and model_endpoint_type != llm_config.model_endpoint_type:
585
+ typer.secho(
586
+ f"{CLI_WARNING_PREFIX}Overriding existing model endpoint type {llm_config.model_endpoint_type} with {model_endpoint_type}",
587
+ fg=typer.colors.YELLOW,
588
+ )
589
+ llm_config.model_endpoint_type = model_endpoint_type
590
+
591
+ # create agent
592
+ client = create_client()
593
+ human_obj = client.get_human(client.get_human_id(name=human))
594
+ persona_obj = client.get_persona(client.get_persona_id(name=persona))
595
+ if human_obj is None:
596
+ typer.secho(f"Couldn't find human {human} in database, please run `letta add human`", fg=typer.colors.RED)
597
+ sys.exit(1)
598
+ if persona_obj is None:
599
+ typer.secho(f"Couldn't find persona {persona} in database, please run `letta add persona`", fg=typer.colors.RED)
600
+ sys.exit(1)
601
+
602
+ if system_file:
603
+ try:
604
+ with open(system_file, "r", encoding="utf-8") as file:
605
+ system = file.read().strip()
606
+ printd("Loaded system file successfully.")
607
+ except FileNotFoundError:
608
+ typer.secho(f"System file not found at {system_file}", fg=typer.colors.RED)
609
+ system_prompt = system if system else None
610
+
611
+ memory = ChatMemory(human=human_obj.value, persona=persona_obj.value, limit=core_memory_limit)
612
+ metadata = {"human": human_obj.name, "persona": persona_obj.name}
613
+
614
+ typer.secho(f"-> šŸ¤– Using persona profile: '{persona_obj.name}'", fg=typer.colors.WHITE)
615
+ typer.secho(f"-> šŸ§‘ Using human profile: '{human_obj.name}'", fg=typer.colors.WHITE)
616
+
617
+ # add tools
618
+ agent_state = client.create_agent(
619
+ name=agent_name,
620
+ system=system_prompt,
621
+ embedding_config=embedding_config,
622
+ llm_config=llm_config,
623
+ memory=memory,
624
+ metadata=metadata,
625
+ )
626
+ assert isinstance(agent_state.memory, Memory), f"Expected Memory, got {type(agent_state.memory)}"
627
+ typer.secho(f"-> šŸ› ļø {len(agent_state.tools)} tools: {', '.join([t for t in agent_state.tools])}", fg=typer.colors.WHITE)
628
+ tools = [ms.get_tool(tool_name, user_id=client.user_id) for tool_name in agent_state.tools]
629
+
630
+ letta_agent = Agent(
631
+ interface=interface(),
632
+ agent_state=agent_state,
633
+ tools=tools,
634
+ # gpt-3.5-turbo tends to omit inner monologue, relax this requirement for now
635
+ first_message_verify_mono=True if (model is not None and "gpt-4" in model) else False,
636
+ )
637
+ save_agent(agent=letta_agent, ms=ms)
638
+ typer.secho(f"šŸŽ‰ Created new agent '{letta_agent.agent_state.name}' (id={letta_agent.agent_state.id})", fg=typer.colors.GREEN)
639
+
640
+ # start event loop
641
+ from letta.main import run_agent_loop
642
+
643
+ print() # extra space
644
+ run_agent_loop(
645
+ letta_agent=letta_agent,
646
+ config=config,
647
+ first=first,
648
+ ms=ms,
649
+ no_verify=no_verify,
650
+ stream=stream,
651
+ inner_thoughts_in_kwargs=no_content,
652
+ ) # TODO: add back no_verify
653
+
654
+
655
+ def delete_agent(
656
+ agent_name: Annotated[str, typer.Option(help="Specify agent to delete")],
657
+ user_id: Annotated[Optional[str], typer.Option(help="User ID to associate with the agent.")] = None,
658
+ ):
659
+ """Delete an agent from the database"""
660
+ # use client ID is no user_id provided
661
+ config = LettaConfig.load()
662
+ MetadataStore(config)
663
+ client = create_client(user_id=user_id)
664
+ agent = client.get_agent_by_name(agent_name)
665
+ if not agent:
666
+ typer.secho(f"Couldn't find agent named '{agent_name}' to delete", fg=typer.colors.RED)
667
+ sys.exit(1)
668
+
669
+ confirm = questionary.confirm(f"Are you sure you want to delete agent '{agent_name}' (id={agent.id})?", default=False).ask()
670
+ if confirm is None:
671
+ raise KeyboardInterrupt
672
+ if not confirm:
673
+ typer.secho(f"Cancelled agent deletion '{agent_name}' (id={agent.id})", fg=typer.colors.GREEN)
674
+ return
675
+
676
+ try:
677
+ # delete the agent
678
+ client.delete_agent(agent.id)
679
+ typer.secho(f"šŸ•Šļø Successfully deleted agent '{agent_name}' (id={agent.id})", fg=typer.colors.GREEN)
680
+ except Exception:
681
+ typer.secho(f"Failed to delete agent '{agent_name}' (id={agent.id})", fg=typer.colors.RED)
682
+ sys.exit(1)
683
+
684
+
685
+ def version():
686
+ import letta
687
+
688
+ print(letta.__version__)
689
+ return letta.__version__