letta-nightly 0.1.7.dev20240924104148__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of letta-nightly might be problematic. Click here for more details.
- letta/__init__.py +24 -0
- letta/__main__.py +3 -0
- letta/agent.py +1427 -0
- letta/agent_store/chroma.py +295 -0
- letta/agent_store/db.py +546 -0
- letta/agent_store/lancedb.py +177 -0
- letta/agent_store/milvus.py +198 -0
- letta/agent_store/qdrant.py +201 -0
- letta/agent_store/storage.py +188 -0
- letta/benchmark/benchmark.py +96 -0
- letta/benchmark/constants.py +14 -0
- letta/cli/cli.py +689 -0
- letta/cli/cli_config.py +1282 -0
- letta/cli/cli_load.py +166 -0
- letta/client/__init__.py +0 -0
- letta/client/admin.py +171 -0
- letta/client/client.py +2360 -0
- letta/client/streaming.py +90 -0
- letta/client/utils.py +61 -0
- letta/config.py +484 -0
- letta/configs/anthropic.json +13 -0
- letta/configs/letta_hosted.json +11 -0
- letta/configs/openai.json +12 -0
- letta/constants.py +134 -0
- letta/credentials.py +140 -0
- letta/data_sources/connectors.py +247 -0
- letta/embeddings.py +218 -0
- letta/errors.py +26 -0
- letta/functions/__init__.py +0 -0
- letta/functions/function_sets/base.py +174 -0
- letta/functions/function_sets/extras.py +132 -0
- letta/functions/functions.py +105 -0
- letta/functions/schema_generator.py +205 -0
- letta/humans/__init__.py +0 -0
- letta/humans/examples/basic.txt +1 -0
- letta/humans/examples/cs_phd.txt +9 -0
- letta/interface.py +314 -0
- letta/llm_api/__init__.py +0 -0
- letta/llm_api/anthropic.py +383 -0
- letta/llm_api/azure_openai.py +155 -0
- letta/llm_api/cohere.py +396 -0
- letta/llm_api/google_ai.py +468 -0
- letta/llm_api/llm_api_tools.py +485 -0
- letta/llm_api/openai.py +470 -0
- letta/local_llm/README.md +3 -0
- letta/local_llm/__init__.py +0 -0
- letta/local_llm/chat_completion_proxy.py +279 -0
- letta/local_llm/constants.py +31 -0
- letta/local_llm/function_parser.py +68 -0
- letta/local_llm/grammars/__init__.py +0 -0
- letta/local_llm/grammars/gbnf_grammar_generator.py +1324 -0
- letta/local_llm/grammars/json.gbnf +26 -0
- letta/local_llm/grammars/json_func_calls_with_inner_thoughts.gbnf +32 -0
- letta/local_llm/groq/api.py +97 -0
- letta/local_llm/json_parser.py +202 -0
- letta/local_llm/koboldcpp/api.py +62 -0
- letta/local_llm/koboldcpp/settings.py +23 -0
- letta/local_llm/llamacpp/api.py +58 -0
- letta/local_llm/llamacpp/settings.py +22 -0
- letta/local_llm/llm_chat_completion_wrappers/__init__.py +0 -0
- letta/local_llm/llm_chat_completion_wrappers/airoboros.py +452 -0
- letta/local_llm/llm_chat_completion_wrappers/chatml.py +470 -0
- letta/local_llm/llm_chat_completion_wrappers/configurable_wrapper.py +387 -0
- letta/local_llm/llm_chat_completion_wrappers/dolphin.py +246 -0
- letta/local_llm/llm_chat_completion_wrappers/llama3.py +345 -0
- letta/local_llm/llm_chat_completion_wrappers/simple_summary_wrapper.py +156 -0
- letta/local_llm/llm_chat_completion_wrappers/wrapper_base.py +11 -0
- letta/local_llm/llm_chat_completion_wrappers/zephyr.py +345 -0
- letta/local_llm/lmstudio/api.py +100 -0
- letta/local_llm/lmstudio/settings.py +29 -0
- letta/local_llm/ollama/api.py +88 -0
- letta/local_llm/ollama/settings.py +32 -0
- letta/local_llm/settings/__init__.py +0 -0
- letta/local_llm/settings/deterministic_mirostat.py +45 -0
- letta/local_llm/settings/settings.py +72 -0
- letta/local_llm/settings/simple.py +28 -0
- letta/local_llm/utils.py +265 -0
- letta/local_llm/vllm/api.py +63 -0
- letta/local_llm/webui/api.py +60 -0
- letta/local_llm/webui/legacy_api.py +58 -0
- letta/local_llm/webui/legacy_settings.py +23 -0
- letta/local_llm/webui/settings.py +24 -0
- letta/log.py +76 -0
- letta/main.py +437 -0
- letta/memory.py +440 -0
- letta/metadata.py +884 -0
- letta/openai_backcompat/__init__.py +0 -0
- letta/openai_backcompat/openai_object.py +437 -0
- letta/persistence_manager.py +148 -0
- letta/personas/__init__.py +0 -0
- letta/personas/examples/anna_pa.txt +13 -0
- letta/personas/examples/google_search_persona.txt +15 -0
- letta/personas/examples/memgpt_doc.txt +6 -0
- letta/personas/examples/memgpt_starter.txt +4 -0
- letta/personas/examples/sam.txt +14 -0
- letta/personas/examples/sam_pov.txt +14 -0
- letta/personas/examples/sam_simple_pov_gpt35.txt +13 -0
- letta/personas/examples/sqldb/test.db +0 -0
- letta/prompts/__init__.py +0 -0
- letta/prompts/gpt_summarize.py +14 -0
- letta/prompts/gpt_system.py +26 -0
- letta/prompts/system/memgpt_base.txt +49 -0
- letta/prompts/system/memgpt_chat.txt +58 -0
- letta/prompts/system/memgpt_chat_compressed.txt +13 -0
- letta/prompts/system/memgpt_chat_fstring.txt +51 -0
- letta/prompts/system/memgpt_doc.txt +50 -0
- letta/prompts/system/memgpt_gpt35_extralong.txt +53 -0
- letta/prompts/system/memgpt_intuitive_knowledge.txt +31 -0
- letta/prompts/system/memgpt_modified_chat.txt +23 -0
- letta/pytest.ini +0 -0
- letta/schemas/agent.py +117 -0
- letta/schemas/api_key.py +21 -0
- letta/schemas/block.py +135 -0
- letta/schemas/document.py +21 -0
- letta/schemas/embedding_config.py +54 -0
- letta/schemas/enums.py +35 -0
- letta/schemas/job.py +38 -0
- letta/schemas/letta_base.py +80 -0
- letta/schemas/letta_message.py +175 -0
- letta/schemas/letta_request.py +23 -0
- letta/schemas/letta_response.py +28 -0
- letta/schemas/llm_config.py +54 -0
- letta/schemas/memory.py +224 -0
- letta/schemas/message.py +727 -0
- letta/schemas/openai/chat_completion_request.py +123 -0
- letta/schemas/openai/chat_completion_response.py +136 -0
- letta/schemas/openai/chat_completions.py +123 -0
- letta/schemas/openai/embedding_response.py +11 -0
- letta/schemas/openai/openai.py +157 -0
- letta/schemas/organization.py +20 -0
- letta/schemas/passage.py +80 -0
- letta/schemas/source.py +62 -0
- letta/schemas/tool.py +143 -0
- letta/schemas/usage.py +18 -0
- letta/schemas/user.py +33 -0
- letta/server/__init__.py +0 -0
- letta/server/constants.py +6 -0
- letta/server/rest_api/__init__.py +0 -0
- letta/server/rest_api/admin/__init__.py +0 -0
- letta/server/rest_api/admin/agents.py +21 -0
- letta/server/rest_api/admin/tools.py +83 -0
- letta/server/rest_api/admin/users.py +98 -0
- letta/server/rest_api/app.py +193 -0
- letta/server/rest_api/auth/__init__.py +0 -0
- letta/server/rest_api/auth/index.py +43 -0
- letta/server/rest_api/auth_token.py +22 -0
- letta/server/rest_api/interface.py +726 -0
- letta/server/rest_api/routers/__init__.py +0 -0
- letta/server/rest_api/routers/openai/__init__.py +0 -0
- letta/server/rest_api/routers/openai/assistants/__init__.py +0 -0
- letta/server/rest_api/routers/openai/assistants/assistants.py +115 -0
- letta/server/rest_api/routers/openai/assistants/schemas.py +121 -0
- letta/server/rest_api/routers/openai/assistants/threads.py +336 -0
- letta/server/rest_api/routers/openai/chat_completions/__init__.py +0 -0
- letta/server/rest_api/routers/openai/chat_completions/chat_completions.py +131 -0
- letta/server/rest_api/routers/v1/__init__.py +15 -0
- letta/server/rest_api/routers/v1/agents.py +543 -0
- letta/server/rest_api/routers/v1/blocks.py +73 -0
- letta/server/rest_api/routers/v1/jobs.py +46 -0
- letta/server/rest_api/routers/v1/llms.py +28 -0
- letta/server/rest_api/routers/v1/organizations.py +61 -0
- letta/server/rest_api/routers/v1/sources.py +199 -0
- letta/server/rest_api/routers/v1/tools.py +103 -0
- letta/server/rest_api/routers/v1/users.py +109 -0
- letta/server/rest_api/static_files.py +74 -0
- letta/server/rest_api/utils.py +69 -0
- letta/server/server.py +1995 -0
- letta/server/startup.sh +8 -0
- letta/server/static_files/assets/index-0cbf7ad5.js +274 -0
- letta/server/static_files/assets/index-156816da.css +1 -0
- letta/server/static_files/assets/index-486e3228.js +274 -0
- letta/server/static_files/favicon.ico +0 -0
- letta/server/static_files/index.html +39 -0
- letta/server/static_files/memgpt_logo_transparent.png +0 -0
- letta/server/utils.py +46 -0
- letta/server/ws_api/__init__.py +0 -0
- letta/server/ws_api/example_client.py +104 -0
- letta/server/ws_api/interface.py +108 -0
- letta/server/ws_api/protocol.py +100 -0
- letta/server/ws_api/server.py +145 -0
- letta/settings.py +165 -0
- letta/streaming_interface.py +396 -0
- letta/system.py +207 -0
- letta/utils.py +1065 -0
- letta_nightly-0.1.7.dev20240924104148.dist-info/LICENSE +190 -0
- letta_nightly-0.1.7.dev20240924104148.dist-info/METADATA +98 -0
- letta_nightly-0.1.7.dev20240924104148.dist-info/RECORD +189 -0
- letta_nightly-0.1.7.dev20240924104148.dist-info/WHEEL +4 -0
- letta_nightly-0.1.7.dev20240924104148.dist-info/entry_points.txt +3 -0
|
@@ -0,0 +1,90 @@
|
|
|
1
|
+
import json
|
|
2
|
+
from typing import Generator
|
|
3
|
+
|
|
4
|
+
import httpx
|
|
5
|
+
from httpx_sse import SSEError, connect_sse
|
|
6
|
+
|
|
7
|
+
from letta.constants import OPENAI_CONTEXT_WINDOW_ERROR_SUBSTRING
|
|
8
|
+
from letta.errors import LLMError
|
|
9
|
+
from letta.schemas.enums import MessageStreamStatus
|
|
10
|
+
from letta.schemas.letta_message import (
|
|
11
|
+
FunctionCallMessage,
|
|
12
|
+
FunctionReturn,
|
|
13
|
+
InternalMonologue,
|
|
14
|
+
)
|
|
15
|
+
from letta.schemas.letta_response import LettaStreamingResponse
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
def _sse_post(url: str, data: dict, headers: dict) -> Generator[LettaStreamingResponse, None, None]:
|
|
19
|
+
|
|
20
|
+
with httpx.Client() as client:
|
|
21
|
+
with connect_sse(client, method="POST", url=url, json=data, headers=headers) as event_source:
|
|
22
|
+
|
|
23
|
+
# Inspect for errors before iterating (see https://github.com/florimondmanca/httpx-sse/pull/12)
|
|
24
|
+
if not event_source.response.is_success:
|
|
25
|
+
# handle errors
|
|
26
|
+
from letta.utils import printd
|
|
27
|
+
|
|
28
|
+
printd("Caught error before iterating SSE request:", vars(event_source.response))
|
|
29
|
+
printd(event_source.response.read())
|
|
30
|
+
|
|
31
|
+
try:
|
|
32
|
+
response_bytes = event_source.response.read()
|
|
33
|
+
response_dict = json.loads(response_bytes.decode("utf-8"))
|
|
34
|
+
error_message = response_dict["error"]["message"]
|
|
35
|
+
# e.g.: This model's maximum context length is 8192 tokens. However, your messages resulted in 8198 tokens (7450 in the messages, 748 in the functions). Please reduce the length of the messages or functions.
|
|
36
|
+
if OPENAI_CONTEXT_WINDOW_ERROR_SUBSTRING in error_message:
|
|
37
|
+
raise LLMError(error_message)
|
|
38
|
+
except LLMError:
|
|
39
|
+
raise
|
|
40
|
+
except:
|
|
41
|
+
print(f"Failed to parse SSE message, throwing SSE HTTP error up the stack")
|
|
42
|
+
event_source.response.raise_for_status()
|
|
43
|
+
|
|
44
|
+
try:
|
|
45
|
+
for sse in event_source.iter_sse():
|
|
46
|
+
# if sse.data == OPENAI_SSE_DONE:
|
|
47
|
+
# print("finished")
|
|
48
|
+
# break
|
|
49
|
+
if sse.data in [status.value for status in MessageStreamStatus]:
|
|
50
|
+
# break
|
|
51
|
+
# print("sse.data::", sse.data)
|
|
52
|
+
yield MessageStreamStatus(sse.data)
|
|
53
|
+
else:
|
|
54
|
+
chunk_data = json.loads(sse.data)
|
|
55
|
+
if "internal_monologue" in chunk_data:
|
|
56
|
+
yield InternalMonologue(**chunk_data)
|
|
57
|
+
elif "function_call" in chunk_data:
|
|
58
|
+
yield FunctionCallMessage(**chunk_data)
|
|
59
|
+
elif "function_return" in chunk_data:
|
|
60
|
+
yield FunctionReturn(**chunk_data)
|
|
61
|
+
else:
|
|
62
|
+
raise ValueError(f"Unknown message type in chunk_data: {chunk_data}")
|
|
63
|
+
|
|
64
|
+
except SSEError as e:
|
|
65
|
+
print("Caught an error while iterating the SSE stream:", str(e))
|
|
66
|
+
if "application/json" in str(e): # Check if the error is because of JSON response
|
|
67
|
+
# TODO figure out a better way to catch the error other than re-trying with a POST
|
|
68
|
+
response = client.post(url=url, json=data, headers=headers) # Make the request again to get the JSON response
|
|
69
|
+
if response.headers["Content-Type"].startswith("application/json"):
|
|
70
|
+
error_details = response.json() # Parse the JSON to get the error message
|
|
71
|
+
print("Request:", vars(response.request))
|
|
72
|
+
print("POST Error:", error_details)
|
|
73
|
+
print("Original SSE Error:", str(e))
|
|
74
|
+
else:
|
|
75
|
+
print("Failed to retrieve JSON error message via retry.")
|
|
76
|
+
else:
|
|
77
|
+
print("SSEError not related to 'application/json' content type.")
|
|
78
|
+
|
|
79
|
+
# Optionally re-raise the exception if you need to propagate it
|
|
80
|
+
raise e
|
|
81
|
+
|
|
82
|
+
except Exception as e:
|
|
83
|
+
if event_source.response.request is not None:
|
|
84
|
+
print("HTTP Request:", vars(event_source.response.request))
|
|
85
|
+
if event_source.response is not None:
|
|
86
|
+
print("HTTP Status:", event_source.response.status_code)
|
|
87
|
+
print("HTTP Headers:", event_source.response.headers)
|
|
88
|
+
# print("HTTP Body:", event_source.response.text)
|
|
89
|
+
print("Exception message:", str(e))
|
|
90
|
+
raise e
|
letta/client/utils.py
ADDED
|
@@ -0,0 +1,61 @@
|
|
|
1
|
+
from datetime import datetime
|
|
2
|
+
|
|
3
|
+
from IPython.display import HTML, display
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
def pprint(messages):
|
|
7
|
+
"""Utility function for pretty-printing the output of client.send_message in notebooks"""
|
|
8
|
+
|
|
9
|
+
css_styles = """
|
|
10
|
+
<style>
|
|
11
|
+
.terminal {
|
|
12
|
+
background-color: #002b36;
|
|
13
|
+
color: #839496;
|
|
14
|
+
font-family: 'Courier New', Courier, monospace;
|
|
15
|
+
padding: 10px;
|
|
16
|
+
border-radius: 5px;
|
|
17
|
+
}
|
|
18
|
+
.terminal strong {
|
|
19
|
+
color: #b58900;
|
|
20
|
+
}
|
|
21
|
+
.terminal .function-return {
|
|
22
|
+
color: #2aa198;
|
|
23
|
+
}
|
|
24
|
+
.terminal .internal-monologue {
|
|
25
|
+
color: #d33682;
|
|
26
|
+
}
|
|
27
|
+
.terminal .function-call {
|
|
28
|
+
color: #2aa198;
|
|
29
|
+
}
|
|
30
|
+
.terminal .assistant-message {
|
|
31
|
+
color: #859900;
|
|
32
|
+
}
|
|
33
|
+
.terminal pre {
|
|
34
|
+
color: #839496;
|
|
35
|
+
}
|
|
36
|
+
</style>
|
|
37
|
+
"""
|
|
38
|
+
|
|
39
|
+
html_content = css_styles + "<div class='terminal'>"
|
|
40
|
+
for message in messages:
|
|
41
|
+
date_str = message["date"]
|
|
42
|
+
date_formatted = datetime.fromisoformat(date_str.replace("Z", "+00:00")).strftime("%Y-%m-%d %H:%M:%S")
|
|
43
|
+
|
|
44
|
+
if "function_return" in message:
|
|
45
|
+
return_string = message["function_return"]
|
|
46
|
+
return_status = message["status"]
|
|
47
|
+
html_content += f"<p><strong>🛠️ [{date_formatted}] Function Return ({return_status}):</strong></p>"
|
|
48
|
+
html_content += f"<p class='function-return'>{return_string}</p>"
|
|
49
|
+
elif "internal_monologue" in message:
|
|
50
|
+
html_content += f"<p><strong>💭 [{date_formatted}] Internal Monologue:</strong></p>"
|
|
51
|
+
html_content += f"<p class='internal-monologue'>{message['internal_monologue']}</p>"
|
|
52
|
+
elif "function_call" in message:
|
|
53
|
+
html_content += f"<p><strong>🛠️ [[{date_formatted}] Function Call:</strong></p>"
|
|
54
|
+
html_content += f"<p class='function-call'>{message['function_call']}</p>"
|
|
55
|
+
elif "assistant_message" in message:
|
|
56
|
+
html_content += f"<p><strong>🤖 [{date_formatted}] Assistant Message:</strong></p>"
|
|
57
|
+
html_content += f"<p class='assistant-message'>{message['assistant_message']}</p>"
|
|
58
|
+
html_content += "<br>"
|
|
59
|
+
html_content += "</div>"
|
|
60
|
+
|
|
61
|
+
display(HTML(html_content))
|
letta/config.py
ADDED
|
@@ -0,0 +1,484 @@
|
|
|
1
|
+
import configparser
|
|
2
|
+
import inspect
|
|
3
|
+
import json
|
|
4
|
+
import os
|
|
5
|
+
import uuid
|
|
6
|
+
from dataclasses import dataclass
|
|
7
|
+
from typing import Optional
|
|
8
|
+
|
|
9
|
+
import letta
|
|
10
|
+
import letta.utils as utils
|
|
11
|
+
from letta.constants import (
|
|
12
|
+
CORE_MEMORY_HUMAN_CHAR_LIMIT,
|
|
13
|
+
CORE_MEMORY_PERSONA_CHAR_LIMIT,
|
|
14
|
+
DEFAULT_HUMAN,
|
|
15
|
+
DEFAULT_PERSONA,
|
|
16
|
+
DEFAULT_PRESET,
|
|
17
|
+
LETTA_DIR,
|
|
18
|
+
)
|
|
19
|
+
from letta.log import get_logger
|
|
20
|
+
from letta.schemas.agent import AgentState
|
|
21
|
+
from letta.schemas.embedding_config import EmbeddingConfig
|
|
22
|
+
from letta.schemas.llm_config import LLMConfig
|
|
23
|
+
|
|
24
|
+
logger = get_logger(__name__)
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
# helper functions for writing to configs
|
|
28
|
+
def get_field(config, section, field):
|
|
29
|
+
if section not in config:
|
|
30
|
+
return None
|
|
31
|
+
if config.has_option(section, field):
|
|
32
|
+
return config.get(section, field)
|
|
33
|
+
else:
|
|
34
|
+
return None
|
|
35
|
+
|
|
36
|
+
|
|
37
|
+
def set_field(config, section, field, value):
|
|
38
|
+
if value is None: # cannot write None
|
|
39
|
+
return
|
|
40
|
+
if section not in config: # create section
|
|
41
|
+
config.add_section(section)
|
|
42
|
+
config.set(section, field, value)
|
|
43
|
+
|
|
44
|
+
|
|
45
|
+
@dataclass
|
|
46
|
+
class LettaConfig:
|
|
47
|
+
config_path: str = os.getenv("MEMGPT_CONFIG_PATH") or os.path.join(LETTA_DIR, "config")
|
|
48
|
+
anon_clientid: str = str(uuid.UUID(int=0))
|
|
49
|
+
|
|
50
|
+
# preset
|
|
51
|
+
preset: str = DEFAULT_PRESET # TODO: rename to system prompt
|
|
52
|
+
|
|
53
|
+
# persona parameters
|
|
54
|
+
persona: str = DEFAULT_PERSONA
|
|
55
|
+
human: str = DEFAULT_HUMAN
|
|
56
|
+
|
|
57
|
+
# model parameters
|
|
58
|
+
default_llm_config: LLMConfig = None
|
|
59
|
+
|
|
60
|
+
# embedding parameters
|
|
61
|
+
default_embedding_config: EmbeddingConfig = None
|
|
62
|
+
|
|
63
|
+
# NONE OF THIS IS CONFIG ↓↓↓↓↓
|
|
64
|
+
# @norton120 these are the metdadatastore
|
|
65
|
+
|
|
66
|
+
# database configs: archival
|
|
67
|
+
archival_storage_type: str = "chroma" # local, db
|
|
68
|
+
archival_storage_path: str = os.path.join(LETTA_DIR, "chroma")
|
|
69
|
+
archival_storage_uri: str = None # TODO: eventually allow external vector DB
|
|
70
|
+
|
|
71
|
+
# database configs: recall
|
|
72
|
+
recall_storage_type: str = "sqlite" # local, db
|
|
73
|
+
recall_storage_path: str = LETTA_DIR
|
|
74
|
+
recall_storage_uri: str = None # TODO: eventually allow external vector DB
|
|
75
|
+
|
|
76
|
+
# database configs: metadata storage (sources, agents, data sources)
|
|
77
|
+
metadata_storage_type: str = "sqlite"
|
|
78
|
+
metadata_storage_path: str = LETTA_DIR
|
|
79
|
+
metadata_storage_uri: str = None
|
|
80
|
+
|
|
81
|
+
# database configs: agent state
|
|
82
|
+
persistence_manager_type: str = None # in-memory, db
|
|
83
|
+
persistence_manager_save_file: str = None # local file
|
|
84
|
+
persistence_manager_uri: str = None # db URI
|
|
85
|
+
|
|
86
|
+
# version (for backcompat)
|
|
87
|
+
letta_version: str = letta.__version__
|
|
88
|
+
|
|
89
|
+
# user info
|
|
90
|
+
policies_accepted: bool = False
|
|
91
|
+
|
|
92
|
+
# Default memory limits
|
|
93
|
+
core_memory_persona_char_limit: int = CORE_MEMORY_PERSONA_CHAR_LIMIT
|
|
94
|
+
core_memory_human_char_limit: int = CORE_MEMORY_HUMAN_CHAR_LIMIT
|
|
95
|
+
|
|
96
|
+
def __post_init__(self):
|
|
97
|
+
# ensure types
|
|
98
|
+
# self.embedding_chunk_size = int(self.embedding_chunk_size)
|
|
99
|
+
# self.embedding_dim = int(self.embedding_dim)
|
|
100
|
+
# self.context_window = int(self.context_window)
|
|
101
|
+
pass
|
|
102
|
+
|
|
103
|
+
@staticmethod
|
|
104
|
+
def generate_uuid() -> str:
|
|
105
|
+
return uuid.UUID(int=uuid.getnode()).hex
|
|
106
|
+
|
|
107
|
+
@classmethod
|
|
108
|
+
def load(cls, llm_config: Optional[LLMConfig] = None, embedding_config: Optional[EmbeddingConfig] = None) -> "LettaConfig":
|
|
109
|
+
# avoid circular import
|
|
110
|
+
from letta.utils import printd
|
|
111
|
+
|
|
112
|
+
# from letta.migrate import VERSION_CUTOFF, config_is_compatible
|
|
113
|
+
# if not config_is_compatible(allow_empty=True):
|
|
114
|
+
# error_message = " ".join(
|
|
115
|
+
# [
|
|
116
|
+
# f"\nYour current config file is incompatible with Letta versions later than {VERSION_CUTOFF}.",
|
|
117
|
+
# f"\nTo use Letta, you must either downgrade your Letta version (<= {VERSION_CUTOFF}) or regenerate your config using `letta configure`, or `letta migrate` if you would like to migrate old agents.",
|
|
118
|
+
# ]
|
|
119
|
+
# )
|
|
120
|
+
# raise ValueError(error_message)
|
|
121
|
+
|
|
122
|
+
config = configparser.ConfigParser()
|
|
123
|
+
|
|
124
|
+
# allow overriding with env variables
|
|
125
|
+
if os.getenv("MEMGPT_CONFIG_PATH"):
|
|
126
|
+
config_path = os.getenv("MEMGPT_CONFIG_PATH")
|
|
127
|
+
else:
|
|
128
|
+
config_path = LettaConfig.config_path
|
|
129
|
+
|
|
130
|
+
# insure all configuration directories exist
|
|
131
|
+
cls.create_config_dir()
|
|
132
|
+
printd(f"Loading config from {config_path}")
|
|
133
|
+
if os.path.exists(config_path):
|
|
134
|
+
# read existing config
|
|
135
|
+
config.read(config_path)
|
|
136
|
+
|
|
137
|
+
# Handle extraction of nested LLMConfig and EmbeddingConfig
|
|
138
|
+
llm_config_dict = {
|
|
139
|
+
# Extract relevant LLM configuration from the config file
|
|
140
|
+
"model": get_field(config, "model", "model"),
|
|
141
|
+
"model_endpoint": get_field(config, "model", "model_endpoint"),
|
|
142
|
+
"model_endpoint_type": get_field(config, "model", "model_endpoint_type"),
|
|
143
|
+
"model_wrapper": get_field(config, "model", "model_wrapper"),
|
|
144
|
+
"context_window": get_field(config, "model", "context_window"),
|
|
145
|
+
}
|
|
146
|
+
embedding_config_dict = {
|
|
147
|
+
# Extract relevant Embedding configuration from the config file
|
|
148
|
+
"embedding_endpoint": get_field(config, "embedding", "embedding_endpoint"),
|
|
149
|
+
"embedding_model": get_field(config, "embedding", "embedding_model"),
|
|
150
|
+
"embedding_endpoint_type": get_field(config, "embedding", "embedding_endpoint_type"),
|
|
151
|
+
"embedding_dim": get_field(config, "embedding", "embedding_dim"),
|
|
152
|
+
"embedding_chunk_size": get_field(config, "embedding", "embedding_chunk_size"),
|
|
153
|
+
}
|
|
154
|
+
# Remove null values
|
|
155
|
+
llm_config_dict = {k: v for k, v in llm_config_dict.items() if v is not None}
|
|
156
|
+
embedding_config_dict = {k: v for k, v in embedding_config_dict.items() if v is not None}
|
|
157
|
+
# Correct the types that aren't strings
|
|
158
|
+
if "context_window" in llm_config_dict and llm_config_dict["context_window"] is not None:
|
|
159
|
+
llm_config_dict["context_window"] = int(llm_config_dict["context_window"])
|
|
160
|
+
if "embedding_dim" in embedding_config_dict and embedding_config_dict["embedding_dim"] is not None:
|
|
161
|
+
embedding_config_dict["embedding_dim"] = int(embedding_config_dict["embedding_dim"])
|
|
162
|
+
if "embedding_chunk_size" in embedding_config_dict and embedding_config_dict["embedding_chunk_size"] is not None:
|
|
163
|
+
embedding_config_dict["embedding_chunk_size"] = int(embedding_config_dict["embedding_chunk_size"])
|
|
164
|
+
# Construct the inner properties
|
|
165
|
+
llm_config = LLMConfig(**llm_config_dict)
|
|
166
|
+
embedding_config = EmbeddingConfig(**embedding_config_dict)
|
|
167
|
+
|
|
168
|
+
# Everything else
|
|
169
|
+
config_dict = {
|
|
170
|
+
# Two prepared configs
|
|
171
|
+
"default_llm_config": llm_config,
|
|
172
|
+
"default_embedding_config": embedding_config,
|
|
173
|
+
# Agent related
|
|
174
|
+
"preset": get_field(config, "defaults", "preset"),
|
|
175
|
+
"persona": get_field(config, "defaults", "persona"),
|
|
176
|
+
"human": get_field(config, "defaults", "human"),
|
|
177
|
+
"agent": get_field(config, "defaults", "agent"),
|
|
178
|
+
# Storage related
|
|
179
|
+
"archival_storage_type": get_field(config, "archival_storage", "type"),
|
|
180
|
+
"archival_storage_path": get_field(config, "archival_storage", "path"),
|
|
181
|
+
"archival_storage_uri": get_field(config, "archival_storage", "uri"),
|
|
182
|
+
"recall_storage_type": get_field(config, "recall_storage", "type"),
|
|
183
|
+
"recall_storage_path": get_field(config, "recall_storage", "path"),
|
|
184
|
+
"recall_storage_uri": get_field(config, "recall_storage", "uri"),
|
|
185
|
+
"metadata_storage_type": get_field(config, "metadata_storage", "type"),
|
|
186
|
+
"metadata_storage_path": get_field(config, "metadata_storage", "path"),
|
|
187
|
+
"metadata_storage_uri": get_field(config, "metadata_storage", "uri"),
|
|
188
|
+
# Misc
|
|
189
|
+
"anon_clientid": get_field(config, "client", "anon_clientid"),
|
|
190
|
+
"config_path": config_path,
|
|
191
|
+
"letta_version": get_field(config, "version", "letta_version"),
|
|
192
|
+
}
|
|
193
|
+
# Don't include null values
|
|
194
|
+
config_dict = {k: v for k, v in config_dict.items() if v is not None}
|
|
195
|
+
|
|
196
|
+
return cls(**config_dict)
|
|
197
|
+
|
|
198
|
+
# assert embedding_config is not None, "Embedding config must be provided if config does not exist"
|
|
199
|
+
# assert llm_config is not None, "LLM config must be provided if config does not exist"
|
|
200
|
+
|
|
201
|
+
# create new config
|
|
202
|
+
anon_clientid = LettaConfig.generate_uuid()
|
|
203
|
+
config = cls(anon_clientid=anon_clientid, config_path=config_path)
|
|
204
|
+
|
|
205
|
+
config.create_config_dir() # create dirs
|
|
206
|
+
|
|
207
|
+
return config
|
|
208
|
+
|
|
209
|
+
def save(self):
|
|
210
|
+
import letta
|
|
211
|
+
|
|
212
|
+
config = configparser.ConfigParser()
|
|
213
|
+
|
|
214
|
+
# CLI defaults
|
|
215
|
+
set_field(config, "defaults", "preset", self.preset)
|
|
216
|
+
set_field(config, "defaults", "persona", self.persona)
|
|
217
|
+
set_field(config, "defaults", "human", self.human)
|
|
218
|
+
|
|
219
|
+
# model defaults
|
|
220
|
+
set_field(config, "model", "model", self.default_llm_config.model)
|
|
221
|
+
set_field(config, "model", "model_endpoint", self.default_llm_config.model_endpoint)
|
|
222
|
+
set_field(
|
|
223
|
+
config,
|
|
224
|
+
"model",
|
|
225
|
+
"model_endpoint_type",
|
|
226
|
+
self.default_llm_config.model_endpoint_type,
|
|
227
|
+
)
|
|
228
|
+
set_field(config, "model", "model_wrapper", self.default_llm_config.model_wrapper)
|
|
229
|
+
set_field(
|
|
230
|
+
config,
|
|
231
|
+
"model",
|
|
232
|
+
"context_window",
|
|
233
|
+
str(self.default_llm_config.context_window),
|
|
234
|
+
)
|
|
235
|
+
|
|
236
|
+
# embeddings
|
|
237
|
+
set_field(
|
|
238
|
+
config,
|
|
239
|
+
"embedding",
|
|
240
|
+
"embedding_endpoint_type",
|
|
241
|
+
self.default_embedding_config.embedding_endpoint_type,
|
|
242
|
+
)
|
|
243
|
+
set_field(
|
|
244
|
+
config,
|
|
245
|
+
"embedding",
|
|
246
|
+
"embedding_endpoint",
|
|
247
|
+
self.default_embedding_config.embedding_endpoint,
|
|
248
|
+
)
|
|
249
|
+
set_field(
|
|
250
|
+
config,
|
|
251
|
+
"embedding",
|
|
252
|
+
"embedding_model",
|
|
253
|
+
self.default_embedding_config.embedding_model,
|
|
254
|
+
)
|
|
255
|
+
set_field(
|
|
256
|
+
config,
|
|
257
|
+
"embedding",
|
|
258
|
+
"embedding_dim",
|
|
259
|
+
str(self.default_embedding_config.embedding_dim),
|
|
260
|
+
)
|
|
261
|
+
set_field(
|
|
262
|
+
config,
|
|
263
|
+
"embedding",
|
|
264
|
+
"embedding_chunk_size",
|
|
265
|
+
str(self.default_embedding_config.embedding_chunk_size),
|
|
266
|
+
)
|
|
267
|
+
|
|
268
|
+
# archival storage
|
|
269
|
+
set_field(config, "archival_storage", "type", self.archival_storage_type)
|
|
270
|
+
set_field(config, "archival_storage", "path", self.archival_storage_path)
|
|
271
|
+
set_field(config, "archival_storage", "uri", self.archival_storage_uri)
|
|
272
|
+
|
|
273
|
+
# recall storage
|
|
274
|
+
set_field(config, "recall_storage", "type", self.recall_storage_type)
|
|
275
|
+
set_field(config, "recall_storage", "path", self.recall_storage_path)
|
|
276
|
+
set_field(config, "recall_storage", "uri", self.recall_storage_uri)
|
|
277
|
+
|
|
278
|
+
# metadata storage
|
|
279
|
+
set_field(config, "metadata_storage", "type", self.metadata_storage_type)
|
|
280
|
+
set_field(config, "metadata_storage", "path", self.metadata_storage_path)
|
|
281
|
+
set_field(config, "metadata_storage", "uri", self.metadata_storage_uri)
|
|
282
|
+
|
|
283
|
+
# set version
|
|
284
|
+
set_field(config, "version", "letta_version", letta.__version__)
|
|
285
|
+
|
|
286
|
+
# client
|
|
287
|
+
if not self.anon_clientid:
|
|
288
|
+
self.anon_clientid = self.generate_uuid()
|
|
289
|
+
set_field(config, "client", "anon_clientid", self.anon_clientid)
|
|
290
|
+
|
|
291
|
+
# always make sure all directories are present
|
|
292
|
+
self.create_config_dir()
|
|
293
|
+
|
|
294
|
+
with open(self.config_path, "w", encoding="utf-8") as f:
|
|
295
|
+
config.write(f)
|
|
296
|
+
logger.debug(f"Saved Config: {self.config_path}")
|
|
297
|
+
|
|
298
|
+
@staticmethod
|
|
299
|
+
def exists():
|
|
300
|
+
# allow overriding with env variables
|
|
301
|
+
if os.getenv("MEMGPT_CONFIG_PATH"):
|
|
302
|
+
config_path = os.getenv("MEMGPT_CONFIG_PATH")
|
|
303
|
+
else:
|
|
304
|
+
config_path = LettaConfig.config_path
|
|
305
|
+
|
|
306
|
+
assert not os.path.isdir(config_path), f"Config path {config_path} cannot be set to a directory."
|
|
307
|
+
return os.path.exists(config_path)
|
|
308
|
+
|
|
309
|
+
@staticmethod
|
|
310
|
+
def create_config_dir():
|
|
311
|
+
if not os.path.exists(LETTA_DIR):
|
|
312
|
+
os.makedirs(LETTA_DIR, exist_ok=True)
|
|
313
|
+
|
|
314
|
+
folders = [
|
|
315
|
+
"personas",
|
|
316
|
+
"humans",
|
|
317
|
+
"archival",
|
|
318
|
+
"agents",
|
|
319
|
+
"functions",
|
|
320
|
+
"system_prompts",
|
|
321
|
+
"presets",
|
|
322
|
+
"settings",
|
|
323
|
+
]
|
|
324
|
+
|
|
325
|
+
for folder in folders:
|
|
326
|
+
if not os.path.exists(os.path.join(LETTA_DIR, folder)):
|
|
327
|
+
os.makedirs(os.path.join(LETTA_DIR, folder))
|
|
328
|
+
|
|
329
|
+
|
|
330
|
+
@dataclass
|
|
331
|
+
class AgentConfig:
|
|
332
|
+
"""
|
|
333
|
+
|
|
334
|
+
NOTE: this is a deprecated class, use AgentState instead. This class is only used for backcompatibility.
|
|
335
|
+
Configuration for a specific instance of an agent
|
|
336
|
+
"""
|
|
337
|
+
|
|
338
|
+
def __init__(
|
|
339
|
+
self,
|
|
340
|
+
persona,
|
|
341
|
+
human,
|
|
342
|
+
# model info
|
|
343
|
+
model=None,
|
|
344
|
+
model_endpoint_type=None,
|
|
345
|
+
model_endpoint=None,
|
|
346
|
+
model_wrapper=None,
|
|
347
|
+
context_window=None,
|
|
348
|
+
# embedding info
|
|
349
|
+
embedding_endpoint_type=None,
|
|
350
|
+
embedding_endpoint=None,
|
|
351
|
+
embedding_model=None,
|
|
352
|
+
embedding_dim=None,
|
|
353
|
+
embedding_chunk_size=None,
|
|
354
|
+
# other
|
|
355
|
+
preset=None,
|
|
356
|
+
data_sources=None,
|
|
357
|
+
# agent info
|
|
358
|
+
agent_config_path=None,
|
|
359
|
+
name=None,
|
|
360
|
+
create_time=None,
|
|
361
|
+
letta_version=None,
|
|
362
|
+
# functions
|
|
363
|
+
functions=None, # schema definitions ONLY (linked at runtime)
|
|
364
|
+
):
|
|
365
|
+
|
|
366
|
+
assert name, f"Agent name must be provided"
|
|
367
|
+
self.name = name
|
|
368
|
+
|
|
369
|
+
config = LettaConfig.load() # get default values
|
|
370
|
+
self.persona = config.persona if persona is None else persona
|
|
371
|
+
self.human = config.human if human is None else human
|
|
372
|
+
self.preset = config.preset if preset is None else preset
|
|
373
|
+
self.context_window = config.default_llm_config.context_window if context_window is None else context_window
|
|
374
|
+
self.model = config.default_llm_config.model if model is None else model
|
|
375
|
+
self.model_endpoint_type = config.default_llm_config.model_endpoint_type if model_endpoint_type is None else model_endpoint_type
|
|
376
|
+
self.model_endpoint = config.default_llm_config.model_endpoint if model_endpoint is None else model_endpoint
|
|
377
|
+
self.model_wrapper = config.default_llm_config.model_wrapper if model_wrapper is None else model_wrapper
|
|
378
|
+
self.llm_config = LLMConfig(
|
|
379
|
+
model=self.model,
|
|
380
|
+
model_endpoint_type=self.model_endpoint_type,
|
|
381
|
+
model_endpoint=self.model_endpoint,
|
|
382
|
+
model_wrapper=self.model_wrapper,
|
|
383
|
+
context_window=self.context_window,
|
|
384
|
+
)
|
|
385
|
+
self.embedding_endpoint_type = (
|
|
386
|
+
config.default_embedding_config.embedding_endpoint_type if embedding_endpoint_type is None else embedding_endpoint_type
|
|
387
|
+
)
|
|
388
|
+
self.embedding_endpoint = config.default_embedding_config.embedding_endpoint if embedding_endpoint is None else embedding_endpoint
|
|
389
|
+
self.embedding_model = config.default_embedding_config.embedding_model if embedding_model is None else embedding_model
|
|
390
|
+
self.embedding_dim = config.default_embedding_config.embedding_dim if embedding_dim is None else embedding_dim
|
|
391
|
+
self.embedding_chunk_size = (
|
|
392
|
+
config.default_embedding_config.embedding_chunk_size if embedding_chunk_size is None else embedding_chunk_size
|
|
393
|
+
)
|
|
394
|
+
self.embedding_config = EmbeddingConfig(
|
|
395
|
+
embedding_endpoint_type=self.embedding_endpoint_type,
|
|
396
|
+
embedding_endpoint=self.embedding_endpoint,
|
|
397
|
+
embedding_model=self.embedding_model,
|
|
398
|
+
embedding_dim=self.embedding_dim,
|
|
399
|
+
embedding_chunk_size=self.embedding_chunk_size,
|
|
400
|
+
)
|
|
401
|
+
|
|
402
|
+
# agent metadata
|
|
403
|
+
self.data_sources = data_sources if data_sources is not None else []
|
|
404
|
+
self.create_time = create_time if create_time is not None else utils.get_local_time()
|
|
405
|
+
if letta_version is None:
|
|
406
|
+
import letta
|
|
407
|
+
|
|
408
|
+
self.letta_version = letta.__version__
|
|
409
|
+
else:
|
|
410
|
+
self.letta_version = letta_version
|
|
411
|
+
|
|
412
|
+
# functions
|
|
413
|
+
self.functions = functions
|
|
414
|
+
|
|
415
|
+
# save agent config
|
|
416
|
+
self.agent_config_path = (
|
|
417
|
+
os.path.join(LETTA_DIR, "agents", self.name, "config.json") if agent_config_path is None else agent_config_path
|
|
418
|
+
)
|
|
419
|
+
|
|
420
|
+
def attach_data_source(self, data_source: str):
|
|
421
|
+
# TODO: add warning that only once source can be attached
|
|
422
|
+
# i.e. previous source will be overriden
|
|
423
|
+
self.data_sources.append(data_source)
|
|
424
|
+
self.save()
|
|
425
|
+
|
|
426
|
+
def save_dir(self):
|
|
427
|
+
return os.path.join(LETTA_DIR, "agents", self.name)
|
|
428
|
+
|
|
429
|
+
def save_state_dir(self):
|
|
430
|
+
# directory to save agent state
|
|
431
|
+
return os.path.join(LETTA_DIR, "agents", self.name, "agent_state")
|
|
432
|
+
|
|
433
|
+
def save_persistence_manager_dir(self):
|
|
434
|
+
# directory to save persistent manager state
|
|
435
|
+
return os.path.join(LETTA_DIR, "agents", self.name, "persistence_manager")
|
|
436
|
+
|
|
437
|
+
def save_agent_index_dir(self):
|
|
438
|
+
# save llama index inside of persistent manager directory
|
|
439
|
+
return os.path.join(self.save_persistence_manager_dir(), "index")
|
|
440
|
+
|
|
441
|
+
def save(self):
|
|
442
|
+
# save state of persistence manager
|
|
443
|
+
os.makedirs(os.path.join(LETTA_DIR, "agents", self.name), exist_ok=True)
|
|
444
|
+
# save version
|
|
445
|
+
self.letta_version = letta.__version__
|
|
446
|
+
with open(self.agent_config_path, "w", encoding="utf-8") as f:
|
|
447
|
+
json.dump(vars(self), f, indent=4)
|
|
448
|
+
|
|
449
|
+
def to_agent_state(self):
|
|
450
|
+
return AgentState(
|
|
451
|
+
name=self.name,
|
|
452
|
+
preset=self.preset,
|
|
453
|
+
persona=self.persona,
|
|
454
|
+
human=self.human,
|
|
455
|
+
llm_config=self.llm_config,
|
|
456
|
+
embedding_config=self.embedding_config,
|
|
457
|
+
create_time=self.create_time,
|
|
458
|
+
)
|
|
459
|
+
|
|
460
|
+
@staticmethod
|
|
461
|
+
def exists(name: str):
|
|
462
|
+
"""Check if agent config exists"""
|
|
463
|
+
agent_config_path = os.path.join(LETTA_DIR, "agents", name)
|
|
464
|
+
return os.path.exists(agent_config_path)
|
|
465
|
+
|
|
466
|
+
@classmethod
|
|
467
|
+
def load(cls, name: str):
|
|
468
|
+
"""Load agent config from JSON file"""
|
|
469
|
+
agent_config_path = os.path.join(LETTA_DIR, "agents", name, "config.json")
|
|
470
|
+
assert os.path.exists(agent_config_path), f"Agent config file does not exist at {agent_config_path}"
|
|
471
|
+
with open(agent_config_path, "r", encoding="utf-8") as f:
|
|
472
|
+
agent_config = json.load(f)
|
|
473
|
+
# allow compatibility accross versions
|
|
474
|
+
try:
|
|
475
|
+
class_args = inspect.getargspec(cls.__init__).args
|
|
476
|
+
except AttributeError:
|
|
477
|
+
# https://github.com/pytorch/pytorch/issues/15344
|
|
478
|
+
class_args = inspect.getfullargspec(cls.__init__).args
|
|
479
|
+
agent_fields = list(agent_config.keys())
|
|
480
|
+
for key in agent_fields:
|
|
481
|
+
if key not in class_args:
|
|
482
|
+
utils.printd(f"Removing missing argument {key} from agent config")
|
|
483
|
+
del agent_config[key]
|
|
484
|
+
return cls(**agent_config)
|
|
@@ -0,0 +1,13 @@
|
|
|
1
|
+
{
|
|
2
|
+
"context_window": 200000,
|
|
3
|
+
"model": "claude-3-opus-20240229",
|
|
4
|
+
"model_endpoint_type": "anthropic",
|
|
5
|
+
"model_endpoint": "https://api.anthropic.com/v1",
|
|
6
|
+
"model_wrapper": null,
|
|
7
|
+
"embedding_endpoint_type": "hugging-face",
|
|
8
|
+
"embedding_endpoint": "https://embeddings.memgpt.ai",
|
|
9
|
+
"embedding_model": "BAAI/bge-large-en-v1.5",
|
|
10
|
+
"embedding_dim": 1024,
|
|
11
|
+
"embedding_chunk_size": 300
|
|
12
|
+
|
|
13
|
+
}
|
|
@@ -0,0 +1,11 @@
|
|
|
1
|
+
{
|
|
2
|
+
"context_window": 8192,
|
|
3
|
+
"model_endpoint_type": "openai",
|
|
4
|
+
"model_endpoint": "https://inference.memgpt.ai",
|
|
5
|
+
"model": "memgpt-openai",
|
|
6
|
+
"embedding_endpoint_type": "hugging-face",
|
|
7
|
+
"embedding_endpoint": "https://embeddings.memgpt.ai",
|
|
8
|
+
"embedding_model": "BAAI/bge-large-en-v1.5",
|
|
9
|
+
"embedding_dim": 1024,
|
|
10
|
+
"embedding_chunk_size": 300
|
|
11
|
+
}
|
|
@@ -0,0 +1,12 @@
|
|
|
1
|
+
{
|
|
2
|
+
"context_window": 8192,
|
|
3
|
+
"model": "gpt-4",
|
|
4
|
+
"model_endpoint_type": "openai",
|
|
5
|
+
"model_endpoint": "https://api.openai.com/v1",
|
|
6
|
+
"model_wrapper": null,
|
|
7
|
+
"embedding_endpoint_type": "openai",
|
|
8
|
+
"embedding_endpoint": "https://api.openai.com/v1",
|
|
9
|
+
"embedding_model": "text-embedding-ada-002",
|
|
10
|
+
"embedding_dim": 1536,
|
|
11
|
+
"embedding_chunk_size": 300
|
|
12
|
+
}
|