letta-nightly 0.1.7.dev20240924104148__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of letta-nightly might be problematic. Click here for more details.
- letta/__init__.py +24 -0
- letta/__main__.py +3 -0
- letta/agent.py +1427 -0
- letta/agent_store/chroma.py +295 -0
- letta/agent_store/db.py +546 -0
- letta/agent_store/lancedb.py +177 -0
- letta/agent_store/milvus.py +198 -0
- letta/agent_store/qdrant.py +201 -0
- letta/agent_store/storage.py +188 -0
- letta/benchmark/benchmark.py +96 -0
- letta/benchmark/constants.py +14 -0
- letta/cli/cli.py +689 -0
- letta/cli/cli_config.py +1282 -0
- letta/cli/cli_load.py +166 -0
- letta/client/__init__.py +0 -0
- letta/client/admin.py +171 -0
- letta/client/client.py +2360 -0
- letta/client/streaming.py +90 -0
- letta/client/utils.py +61 -0
- letta/config.py +484 -0
- letta/configs/anthropic.json +13 -0
- letta/configs/letta_hosted.json +11 -0
- letta/configs/openai.json +12 -0
- letta/constants.py +134 -0
- letta/credentials.py +140 -0
- letta/data_sources/connectors.py +247 -0
- letta/embeddings.py +218 -0
- letta/errors.py +26 -0
- letta/functions/__init__.py +0 -0
- letta/functions/function_sets/base.py +174 -0
- letta/functions/function_sets/extras.py +132 -0
- letta/functions/functions.py +105 -0
- letta/functions/schema_generator.py +205 -0
- letta/humans/__init__.py +0 -0
- letta/humans/examples/basic.txt +1 -0
- letta/humans/examples/cs_phd.txt +9 -0
- letta/interface.py +314 -0
- letta/llm_api/__init__.py +0 -0
- letta/llm_api/anthropic.py +383 -0
- letta/llm_api/azure_openai.py +155 -0
- letta/llm_api/cohere.py +396 -0
- letta/llm_api/google_ai.py +468 -0
- letta/llm_api/llm_api_tools.py +485 -0
- letta/llm_api/openai.py +470 -0
- letta/local_llm/README.md +3 -0
- letta/local_llm/__init__.py +0 -0
- letta/local_llm/chat_completion_proxy.py +279 -0
- letta/local_llm/constants.py +31 -0
- letta/local_llm/function_parser.py +68 -0
- letta/local_llm/grammars/__init__.py +0 -0
- letta/local_llm/grammars/gbnf_grammar_generator.py +1324 -0
- letta/local_llm/grammars/json.gbnf +26 -0
- letta/local_llm/grammars/json_func_calls_with_inner_thoughts.gbnf +32 -0
- letta/local_llm/groq/api.py +97 -0
- letta/local_llm/json_parser.py +202 -0
- letta/local_llm/koboldcpp/api.py +62 -0
- letta/local_llm/koboldcpp/settings.py +23 -0
- letta/local_llm/llamacpp/api.py +58 -0
- letta/local_llm/llamacpp/settings.py +22 -0
- letta/local_llm/llm_chat_completion_wrappers/__init__.py +0 -0
- letta/local_llm/llm_chat_completion_wrappers/airoboros.py +452 -0
- letta/local_llm/llm_chat_completion_wrappers/chatml.py +470 -0
- letta/local_llm/llm_chat_completion_wrappers/configurable_wrapper.py +387 -0
- letta/local_llm/llm_chat_completion_wrappers/dolphin.py +246 -0
- letta/local_llm/llm_chat_completion_wrappers/llama3.py +345 -0
- letta/local_llm/llm_chat_completion_wrappers/simple_summary_wrapper.py +156 -0
- letta/local_llm/llm_chat_completion_wrappers/wrapper_base.py +11 -0
- letta/local_llm/llm_chat_completion_wrappers/zephyr.py +345 -0
- letta/local_llm/lmstudio/api.py +100 -0
- letta/local_llm/lmstudio/settings.py +29 -0
- letta/local_llm/ollama/api.py +88 -0
- letta/local_llm/ollama/settings.py +32 -0
- letta/local_llm/settings/__init__.py +0 -0
- letta/local_llm/settings/deterministic_mirostat.py +45 -0
- letta/local_llm/settings/settings.py +72 -0
- letta/local_llm/settings/simple.py +28 -0
- letta/local_llm/utils.py +265 -0
- letta/local_llm/vllm/api.py +63 -0
- letta/local_llm/webui/api.py +60 -0
- letta/local_llm/webui/legacy_api.py +58 -0
- letta/local_llm/webui/legacy_settings.py +23 -0
- letta/local_llm/webui/settings.py +24 -0
- letta/log.py +76 -0
- letta/main.py +437 -0
- letta/memory.py +440 -0
- letta/metadata.py +884 -0
- letta/openai_backcompat/__init__.py +0 -0
- letta/openai_backcompat/openai_object.py +437 -0
- letta/persistence_manager.py +148 -0
- letta/personas/__init__.py +0 -0
- letta/personas/examples/anna_pa.txt +13 -0
- letta/personas/examples/google_search_persona.txt +15 -0
- letta/personas/examples/memgpt_doc.txt +6 -0
- letta/personas/examples/memgpt_starter.txt +4 -0
- letta/personas/examples/sam.txt +14 -0
- letta/personas/examples/sam_pov.txt +14 -0
- letta/personas/examples/sam_simple_pov_gpt35.txt +13 -0
- letta/personas/examples/sqldb/test.db +0 -0
- letta/prompts/__init__.py +0 -0
- letta/prompts/gpt_summarize.py +14 -0
- letta/prompts/gpt_system.py +26 -0
- letta/prompts/system/memgpt_base.txt +49 -0
- letta/prompts/system/memgpt_chat.txt +58 -0
- letta/prompts/system/memgpt_chat_compressed.txt +13 -0
- letta/prompts/system/memgpt_chat_fstring.txt +51 -0
- letta/prompts/system/memgpt_doc.txt +50 -0
- letta/prompts/system/memgpt_gpt35_extralong.txt +53 -0
- letta/prompts/system/memgpt_intuitive_knowledge.txt +31 -0
- letta/prompts/system/memgpt_modified_chat.txt +23 -0
- letta/pytest.ini +0 -0
- letta/schemas/agent.py +117 -0
- letta/schemas/api_key.py +21 -0
- letta/schemas/block.py +135 -0
- letta/schemas/document.py +21 -0
- letta/schemas/embedding_config.py +54 -0
- letta/schemas/enums.py +35 -0
- letta/schemas/job.py +38 -0
- letta/schemas/letta_base.py +80 -0
- letta/schemas/letta_message.py +175 -0
- letta/schemas/letta_request.py +23 -0
- letta/schemas/letta_response.py +28 -0
- letta/schemas/llm_config.py +54 -0
- letta/schemas/memory.py +224 -0
- letta/schemas/message.py +727 -0
- letta/schemas/openai/chat_completion_request.py +123 -0
- letta/schemas/openai/chat_completion_response.py +136 -0
- letta/schemas/openai/chat_completions.py +123 -0
- letta/schemas/openai/embedding_response.py +11 -0
- letta/schemas/openai/openai.py +157 -0
- letta/schemas/organization.py +20 -0
- letta/schemas/passage.py +80 -0
- letta/schemas/source.py +62 -0
- letta/schemas/tool.py +143 -0
- letta/schemas/usage.py +18 -0
- letta/schemas/user.py +33 -0
- letta/server/__init__.py +0 -0
- letta/server/constants.py +6 -0
- letta/server/rest_api/__init__.py +0 -0
- letta/server/rest_api/admin/__init__.py +0 -0
- letta/server/rest_api/admin/agents.py +21 -0
- letta/server/rest_api/admin/tools.py +83 -0
- letta/server/rest_api/admin/users.py +98 -0
- letta/server/rest_api/app.py +193 -0
- letta/server/rest_api/auth/__init__.py +0 -0
- letta/server/rest_api/auth/index.py +43 -0
- letta/server/rest_api/auth_token.py +22 -0
- letta/server/rest_api/interface.py +726 -0
- letta/server/rest_api/routers/__init__.py +0 -0
- letta/server/rest_api/routers/openai/__init__.py +0 -0
- letta/server/rest_api/routers/openai/assistants/__init__.py +0 -0
- letta/server/rest_api/routers/openai/assistants/assistants.py +115 -0
- letta/server/rest_api/routers/openai/assistants/schemas.py +121 -0
- letta/server/rest_api/routers/openai/assistants/threads.py +336 -0
- letta/server/rest_api/routers/openai/chat_completions/__init__.py +0 -0
- letta/server/rest_api/routers/openai/chat_completions/chat_completions.py +131 -0
- letta/server/rest_api/routers/v1/__init__.py +15 -0
- letta/server/rest_api/routers/v1/agents.py +543 -0
- letta/server/rest_api/routers/v1/blocks.py +73 -0
- letta/server/rest_api/routers/v1/jobs.py +46 -0
- letta/server/rest_api/routers/v1/llms.py +28 -0
- letta/server/rest_api/routers/v1/organizations.py +61 -0
- letta/server/rest_api/routers/v1/sources.py +199 -0
- letta/server/rest_api/routers/v1/tools.py +103 -0
- letta/server/rest_api/routers/v1/users.py +109 -0
- letta/server/rest_api/static_files.py +74 -0
- letta/server/rest_api/utils.py +69 -0
- letta/server/server.py +1995 -0
- letta/server/startup.sh +8 -0
- letta/server/static_files/assets/index-0cbf7ad5.js +274 -0
- letta/server/static_files/assets/index-156816da.css +1 -0
- letta/server/static_files/assets/index-486e3228.js +274 -0
- letta/server/static_files/favicon.ico +0 -0
- letta/server/static_files/index.html +39 -0
- letta/server/static_files/memgpt_logo_transparent.png +0 -0
- letta/server/utils.py +46 -0
- letta/server/ws_api/__init__.py +0 -0
- letta/server/ws_api/example_client.py +104 -0
- letta/server/ws_api/interface.py +108 -0
- letta/server/ws_api/protocol.py +100 -0
- letta/server/ws_api/server.py +145 -0
- letta/settings.py +165 -0
- letta/streaming_interface.py +396 -0
- letta/system.py +207 -0
- letta/utils.py +1065 -0
- letta_nightly-0.1.7.dev20240924104148.dist-info/LICENSE +190 -0
- letta_nightly-0.1.7.dev20240924104148.dist-info/METADATA +98 -0
- letta_nightly-0.1.7.dev20240924104148.dist-info/RECORD +189 -0
- letta_nightly-0.1.7.dev20240924104148.dist-info/WHEEL +4 -0
- letta_nightly-0.1.7.dev20240924104148.dist-info/entry_points.txt +3 -0
letta/embeddings.py
ADDED
|
@@ -0,0 +1,218 @@
|
|
|
1
|
+
import os
|
|
2
|
+
import uuid
|
|
3
|
+
from typing import Any, List, Optional
|
|
4
|
+
|
|
5
|
+
import numpy as np
|
|
6
|
+
|
|
7
|
+
# from llama_index.core.base.embeddings import BaseEmbedding
|
|
8
|
+
# from llama_index.core.embeddings import BaseEmbedding
|
|
9
|
+
# from llama_index.core.base.embeddings.base import BaseEmbedding
|
|
10
|
+
# from llama_index.bridge.pydantic import PrivateAttr
|
|
11
|
+
# from llama_index.embeddings.base import BaseEmbedding
|
|
12
|
+
# from llama_index.embeddings.huggingface_utils import format_text
|
|
13
|
+
import tiktoken
|
|
14
|
+
from llama_index.core import Document as LlamaIndexDocument
|
|
15
|
+
|
|
16
|
+
# from llama_index.core.base.embeddings import BaseEmbedding
|
|
17
|
+
from llama_index.core.node_parser import SentenceSplitter
|
|
18
|
+
|
|
19
|
+
from letta.constants import (
|
|
20
|
+
EMBEDDING_TO_TOKENIZER_DEFAULT,
|
|
21
|
+
EMBEDDING_TO_TOKENIZER_MAP,
|
|
22
|
+
MAX_EMBEDDING_DIM,
|
|
23
|
+
)
|
|
24
|
+
from letta.credentials import LettaCredentials
|
|
25
|
+
from letta.schemas.embedding_config import EmbeddingConfig
|
|
26
|
+
from letta.utils import is_valid_url, printd
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
def parse_and_chunk_text(text: str, chunk_size: int) -> List[str]:
|
|
30
|
+
parser = SentenceSplitter(chunk_size=chunk_size)
|
|
31
|
+
llama_index_docs = [LlamaIndexDocument(text=text)]
|
|
32
|
+
nodes = parser.get_nodes_from_documents(llama_index_docs)
|
|
33
|
+
return [n.text for n in nodes]
|
|
34
|
+
|
|
35
|
+
|
|
36
|
+
def truncate_text(text: str, max_length: int, encoding) -> str:
|
|
37
|
+
# truncate the text based on max_length and encoding
|
|
38
|
+
encoded_text = encoding.encode(text)[:max_length]
|
|
39
|
+
return encoding.decode(encoded_text)
|
|
40
|
+
|
|
41
|
+
|
|
42
|
+
def check_and_split_text(text: str, embedding_model: str) -> List[str]:
|
|
43
|
+
"""Split text into chunks of max_length tokens or less"""
|
|
44
|
+
|
|
45
|
+
if embedding_model in EMBEDDING_TO_TOKENIZER_MAP:
|
|
46
|
+
encoding = tiktoken.get_encoding(EMBEDDING_TO_TOKENIZER_MAP[embedding_model])
|
|
47
|
+
else:
|
|
48
|
+
print(f"Warning: couldn't find tokenizer for model {embedding_model}, using default tokenizer {EMBEDDING_TO_TOKENIZER_DEFAULT}")
|
|
49
|
+
encoding = tiktoken.get_encoding(EMBEDDING_TO_TOKENIZER_DEFAULT)
|
|
50
|
+
|
|
51
|
+
num_tokens = len(encoding.encode(text))
|
|
52
|
+
|
|
53
|
+
# determine max length
|
|
54
|
+
if hasattr(encoding, "max_length"):
|
|
55
|
+
# TODO(fix) this is broken
|
|
56
|
+
max_length = encoding.max_length
|
|
57
|
+
else:
|
|
58
|
+
# TODO: figure out the real number
|
|
59
|
+
printd(f"Warning: couldn't find max_length for tokenizer {embedding_model}, using default max_length 8191")
|
|
60
|
+
max_length = 8191
|
|
61
|
+
|
|
62
|
+
# truncate text if too long
|
|
63
|
+
if num_tokens > max_length:
|
|
64
|
+
print(f"Warning: text is too long ({num_tokens} tokens), truncating to {max_length} tokens.")
|
|
65
|
+
# First, apply any necessary formatting
|
|
66
|
+
formatted_text = format_text(text, embedding_model)
|
|
67
|
+
# Then truncate
|
|
68
|
+
text = truncate_text(formatted_text, max_length, encoding)
|
|
69
|
+
|
|
70
|
+
return [text]
|
|
71
|
+
|
|
72
|
+
|
|
73
|
+
class EmbeddingEndpoint:
|
|
74
|
+
"""Implementation for OpenAI compatible endpoint"""
|
|
75
|
+
|
|
76
|
+
# """ Based off llama index https://github.com/run-llama/llama_index/blob/a98bdb8ecee513dc2e880f56674e7fd157d1dc3a/llama_index/embeddings/text_embeddings_inference.py """
|
|
77
|
+
|
|
78
|
+
# _user: str = PrivateAttr()
|
|
79
|
+
# _timeout: float = PrivateAttr()
|
|
80
|
+
# _base_url: str = PrivateAttr()
|
|
81
|
+
|
|
82
|
+
def __init__(
|
|
83
|
+
self,
|
|
84
|
+
model: str,
|
|
85
|
+
base_url: str,
|
|
86
|
+
user: str,
|
|
87
|
+
timeout: float = 60.0,
|
|
88
|
+
**kwargs: Any,
|
|
89
|
+
):
|
|
90
|
+
if not is_valid_url(base_url):
|
|
91
|
+
raise ValueError(
|
|
92
|
+
f"Embeddings endpoint was provided an invalid URL (set to: '{base_url}'). Make sure embedding_endpoint is set correctly in your Letta config."
|
|
93
|
+
)
|
|
94
|
+
self.model_name = model
|
|
95
|
+
self._user = user
|
|
96
|
+
self._base_url = base_url
|
|
97
|
+
self._timeout = timeout
|
|
98
|
+
|
|
99
|
+
def _call_api(self, text: str) -> List[float]:
|
|
100
|
+
if not is_valid_url(self._base_url):
|
|
101
|
+
raise ValueError(
|
|
102
|
+
f"Embeddings endpoint does not have a valid URL (set to: '{self._base_url}'). Make sure embedding_endpoint is set correctly in your Letta config."
|
|
103
|
+
)
|
|
104
|
+
import httpx
|
|
105
|
+
|
|
106
|
+
headers = {"Content-Type": "application/json"}
|
|
107
|
+
json_data = {"input": text, "model": self.model_name, "user": self._user}
|
|
108
|
+
|
|
109
|
+
with httpx.Client() as client:
|
|
110
|
+
response = client.post(
|
|
111
|
+
f"{self._base_url}/embeddings",
|
|
112
|
+
headers=headers,
|
|
113
|
+
json=json_data,
|
|
114
|
+
timeout=self._timeout,
|
|
115
|
+
)
|
|
116
|
+
|
|
117
|
+
response_json = response.json()
|
|
118
|
+
|
|
119
|
+
if isinstance(response_json, list):
|
|
120
|
+
# embedding directly in response
|
|
121
|
+
embedding = response_json
|
|
122
|
+
elif isinstance(response_json, dict):
|
|
123
|
+
# TEI embedding packaged inside openai-style response
|
|
124
|
+
try:
|
|
125
|
+
embedding = response_json["data"][0]["embedding"]
|
|
126
|
+
except (KeyError, IndexError):
|
|
127
|
+
raise TypeError(f"Got back an unexpected payload from text embedding function, response=\n{response_json}")
|
|
128
|
+
else:
|
|
129
|
+
# unknown response, can't parse
|
|
130
|
+
raise TypeError(f"Got back an unexpected payload from text embedding function, response=\n{response_json}")
|
|
131
|
+
|
|
132
|
+
return embedding
|
|
133
|
+
|
|
134
|
+
def get_text_embedding(self, text: str) -> List[float]:
|
|
135
|
+
return self._call_api(text)
|
|
136
|
+
|
|
137
|
+
|
|
138
|
+
def default_embedding_model():
|
|
139
|
+
# default to hugging face model running local
|
|
140
|
+
# warning: this is a terrible model
|
|
141
|
+
from llama_index.embeddings.huggingface import HuggingFaceEmbedding
|
|
142
|
+
|
|
143
|
+
os.environ["TOKENIZERS_PARALLELISM"] = "False"
|
|
144
|
+
model = "BAAI/bge-small-en-v1.5"
|
|
145
|
+
return HuggingFaceEmbedding(model_name=model)
|
|
146
|
+
|
|
147
|
+
|
|
148
|
+
def query_embedding(embedding_model, query_text: str):
|
|
149
|
+
"""Generate padded embedding for querying database"""
|
|
150
|
+
query_vec = embedding_model.get_text_embedding(query_text)
|
|
151
|
+
query_vec = np.array(query_vec)
|
|
152
|
+
query_vec = np.pad(query_vec, (0, MAX_EMBEDDING_DIM - query_vec.shape[0]), mode="constant").tolist()
|
|
153
|
+
return query_vec
|
|
154
|
+
|
|
155
|
+
|
|
156
|
+
def embedding_model(config: EmbeddingConfig, user_id: Optional[uuid.UUID] = None):
|
|
157
|
+
"""Return LlamaIndex embedding model to use for embeddings"""
|
|
158
|
+
|
|
159
|
+
endpoint_type = config.embedding_endpoint_type
|
|
160
|
+
|
|
161
|
+
# TODO refactor to pass credentials through args
|
|
162
|
+
credentials = LettaCredentials.load()
|
|
163
|
+
|
|
164
|
+
if endpoint_type == "openai":
|
|
165
|
+
from llama_index.embeddings.openai import OpenAIEmbedding
|
|
166
|
+
|
|
167
|
+
additional_kwargs = {"user_id": user_id} if user_id else {}
|
|
168
|
+
model = OpenAIEmbedding(
|
|
169
|
+
api_base=config.embedding_endpoint,
|
|
170
|
+
api_key=credentials.openai_key,
|
|
171
|
+
additional_kwargs=additional_kwargs,
|
|
172
|
+
)
|
|
173
|
+
return model
|
|
174
|
+
|
|
175
|
+
elif endpoint_type == "azure":
|
|
176
|
+
assert all(
|
|
177
|
+
[
|
|
178
|
+
credentials.azure_key is not None,
|
|
179
|
+
credentials.azure_embedding_endpoint is not None,
|
|
180
|
+
credentials.azure_version is not None,
|
|
181
|
+
]
|
|
182
|
+
)
|
|
183
|
+
from llama_index.embeddings.azure_openai import AzureOpenAIEmbedding
|
|
184
|
+
|
|
185
|
+
# https://learn.microsoft.com/en-us/azure/ai-services/openai/reference#embeddings
|
|
186
|
+
model = "text-embedding-ada-002"
|
|
187
|
+
deployment = credentials.azure_embedding_deployment if credentials.azure_embedding_deployment is not None else model
|
|
188
|
+
return AzureOpenAIEmbedding(
|
|
189
|
+
model=model,
|
|
190
|
+
deployment_name=deployment,
|
|
191
|
+
api_key=credentials.azure_key,
|
|
192
|
+
azure_endpoint=credentials.azure_endpoint,
|
|
193
|
+
api_version=credentials.azure_version,
|
|
194
|
+
)
|
|
195
|
+
|
|
196
|
+
elif endpoint_type == "hugging-face":
|
|
197
|
+
return EmbeddingEndpoint(
|
|
198
|
+
model=config.embedding_model,
|
|
199
|
+
base_url=config.embedding_endpoint,
|
|
200
|
+
user=user_id,
|
|
201
|
+
)
|
|
202
|
+
elif endpoint_type == "ollama":
|
|
203
|
+
|
|
204
|
+
from llama_index.embeddings.ollama import OllamaEmbedding
|
|
205
|
+
|
|
206
|
+
ollama_additional_kwargs = {}
|
|
207
|
+
callback_manager = None
|
|
208
|
+
|
|
209
|
+
model = OllamaEmbedding(
|
|
210
|
+
model_name=config.embedding_model,
|
|
211
|
+
base_url=config.embedding_endpoint,
|
|
212
|
+
ollama_additional_kwargs=ollama_additional_kwargs or {},
|
|
213
|
+
callback_manager=callback_manager or None,
|
|
214
|
+
)
|
|
215
|
+
return model
|
|
216
|
+
|
|
217
|
+
else:
|
|
218
|
+
return default_embedding_model()
|
letta/errors.py
ADDED
|
@@ -0,0 +1,26 @@
|
|
|
1
|
+
class LLMError(Exception):
|
|
2
|
+
"""Base class for all LLM-related errors."""
|
|
3
|
+
|
|
4
|
+
|
|
5
|
+
class LLMJSONParsingError(LLMError):
|
|
6
|
+
"""Exception raised for errors in the JSON parsing process."""
|
|
7
|
+
|
|
8
|
+
def __init__(self, message="Error parsing JSON generated by LLM"):
|
|
9
|
+
self.message = message
|
|
10
|
+
super().__init__(self.message)
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
class LocalLLMError(LLMError):
|
|
14
|
+
"""Generic catch-all error for local LLM problems"""
|
|
15
|
+
|
|
16
|
+
def __init__(self, message="Encountered an error while running local LLM"):
|
|
17
|
+
self.message = message
|
|
18
|
+
super().__init__(self.message)
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
class LocalLLMConnectionError(LLMError):
|
|
22
|
+
"""Error for when local LLM cannot be reached with provided IP/port"""
|
|
23
|
+
|
|
24
|
+
def __init__(self, message="Could not connect to local LLM"):
|
|
25
|
+
self.message = message
|
|
26
|
+
super().__init__(self.message)
|
|
File without changes
|
|
@@ -0,0 +1,174 @@
|
|
|
1
|
+
from typing import Optional
|
|
2
|
+
|
|
3
|
+
from letta.agent import Agent
|
|
4
|
+
from letta.constants import MAX_PAUSE_HEARTBEATS
|
|
5
|
+
|
|
6
|
+
# import math
|
|
7
|
+
# from letta.utils import json_dumps
|
|
8
|
+
|
|
9
|
+
### Functions / tools the agent can use
|
|
10
|
+
# All functions should return a response string (or None)
|
|
11
|
+
# If the function fails, throw an exception
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
def send_message(self: Agent, message: str) -> Optional[str]:
|
|
15
|
+
"""
|
|
16
|
+
Sends a message to the human user.
|
|
17
|
+
|
|
18
|
+
Args:
|
|
19
|
+
message (str): Message contents. All unicode (including emojis) are supported.
|
|
20
|
+
|
|
21
|
+
Returns:
|
|
22
|
+
Optional[str]: None is always returned as this function does not produce a response.
|
|
23
|
+
"""
|
|
24
|
+
# FIXME passing of msg_obj here is a hack, unclear if guaranteed to be the correct reference
|
|
25
|
+
self.interface.assistant_message(message) # , msg_obj=self._messages[-1])
|
|
26
|
+
return None
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
# Construct the docstring dynamically (since it should use the external constants)
|
|
30
|
+
pause_heartbeats_docstring = f"""
|
|
31
|
+
Temporarily ignore timed heartbeats. You may still receive messages from manual heartbeats and other events.
|
|
32
|
+
|
|
33
|
+
Args:
|
|
34
|
+
minutes (int): Number of minutes to ignore heartbeats for. Max value of {MAX_PAUSE_HEARTBEATS} minutes ({MAX_PAUSE_HEARTBEATS // 60} hours).
|
|
35
|
+
|
|
36
|
+
Returns:
|
|
37
|
+
str: Function status response
|
|
38
|
+
"""
|
|
39
|
+
|
|
40
|
+
|
|
41
|
+
def pause_heartbeats(self: Agent, minutes: int) -> Optional[str]:
|
|
42
|
+
import datetime
|
|
43
|
+
|
|
44
|
+
from letta.constants import MAX_PAUSE_HEARTBEATS
|
|
45
|
+
|
|
46
|
+
minutes = min(MAX_PAUSE_HEARTBEATS, minutes)
|
|
47
|
+
|
|
48
|
+
# Record the current time
|
|
49
|
+
self.pause_heartbeats_start = datetime.datetime.now(datetime.timezone.utc)
|
|
50
|
+
# And record how long the pause should go for
|
|
51
|
+
self.pause_heartbeats_minutes = int(minutes)
|
|
52
|
+
|
|
53
|
+
return f"Pausing timed heartbeats for {minutes} min"
|
|
54
|
+
|
|
55
|
+
|
|
56
|
+
pause_heartbeats.__doc__ = pause_heartbeats_docstring
|
|
57
|
+
|
|
58
|
+
|
|
59
|
+
def conversation_search(self: Agent, query: str, page: Optional[int] = 0) -> Optional[str]:
|
|
60
|
+
"""
|
|
61
|
+
Search prior conversation history using case-insensitive string matching.
|
|
62
|
+
|
|
63
|
+
Args:
|
|
64
|
+
query (str): String to search for.
|
|
65
|
+
page (int): Allows you to page through results. Only use on a follow-up query. Defaults to 0 (first page).
|
|
66
|
+
|
|
67
|
+
Returns:
|
|
68
|
+
str: Query result string
|
|
69
|
+
"""
|
|
70
|
+
|
|
71
|
+
import math
|
|
72
|
+
|
|
73
|
+
from letta.constants import RETRIEVAL_QUERY_DEFAULT_PAGE_SIZE
|
|
74
|
+
from letta.utils import json_dumps
|
|
75
|
+
|
|
76
|
+
if page is None or (isinstance(page, str) and page.lower().strip() == "none"):
|
|
77
|
+
page = 0
|
|
78
|
+
try:
|
|
79
|
+
page = int(page)
|
|
80
|
+
except:
|
|
81
|
+
raise ValueError(f"'page' argument must be an integer")
|
|
82
|
+
count = RETRIEVAL_QUERY_DEFAULT_PAGE_SIZE
|
|
83
|
+
results, total = self.persistence_manager.recall_memory.text_search(query, count=count, start=page * count)
|
|
84
|
+
num_pages = math.ceil(total / count) - 1 # 0 index
|
|
85
|
+
if len(results) == 0:
|
|
86
|
+
results_str = f"No results found."
|
|
87
|
+
else:
|
|
88
|
+
results_pref = f"Showing {len(results)} of {total} results (page {page}/{num_pages}):"
|
|
89
|
+
results_formatted = [f"timestamp: {d['timestamp']}, {d['message']['role']} - {d['message']['content']}" for d in results]
|
|
90
|
+
results_str = f"{results_pref} {json_dumps(results_formatted)}"
|
|
91
|
+
return results_str
|
|
92
|
+
|
|
93
|
+
|
|
94
|
+
def conversation_search_date(self: Agent, start_date: str, end_date: str, page: Optional[int] = 0) -> Optional[str]:
|
|
95
|
+
"""
|
|
96
|
+
Search prior conversation history using a date range.
|
|
97
|
+
|
|
98
|
+
Args:
|
|
99
|
+
start_date (str): The start of the date range to search, in the format 'YYYY-MM-DD'.
|
|
100
|
+
end_date (str): The end of the date range to search, in the format 'YYYY-MM-DD'.
|
|
101
|
+
page (int): Allows you to page through results. Only use on a follow-up query. Defaults to 0 (first page).
|
|
102
|
+
|
|
103
|
+
Returns:
|
|
104
|
+
str: Query result string
|
|
105
|
+
"""
|
|
106
|
+
import math
|
|
107
|
+
|
|
108
|
+
from letta.constants import RETRIEVAL_QUERY_DEFAULT_PAGE_SIZE
|
|
109
|
+
from letta.utils import json_dumps
|
|
110
|
+
|
|
111
|
+
if page is None or (isinstance(page, str) and page.lower().strip() == "none"):
|
|
112
|
+
page = 0
|
|
113
|
+
try:
|
|
114
|
+
page = int(page)
|
|
115
|
+
except:
|
|
116
|
+
raise ValueError(f"'page' argument must be an integer")
|
|
117
|
+
count = RETRIEVAL_QUERY_DEFAULT_PAGE_SIZE
|
|
118
|
+
results, total = self.persistence_manager.recall_memory.date_search(start_date, end_date, count=count, start=page * count)
|
|
119
|
+
num_pages = math.ceil(total / count) - 1 # 0 index
|
|
120
|
+
if len(results) == 0:
|
|
121
|
+
results_str = f"No results found."
|
|
122
|
+
else:
|
|
123
|
+
results_pref = f"Showing {len(results)} of {total} results (page {page}/{num_pages}):"
|
|
124
|
+
results_formatted = [f"timestamp: {d['timestamp']}, {d['message']['role']} - {d['message']['content']}" for d in results]
|
|
125
|
+
results_str = f"{results_pref} {json_dumps(results_formatted)}"
|
|
126
|
+
return results_str
|
|
127
|
+
|
|
128
|
+
|
|
129
|
+
def archival_memory_insert(self: Agent, content: str) -> Optional[str]:
|
|
130
|
+
"""
|
|
131
|
+
Add to archival memory. Make sure to phrase the memory contents such that it can be easily queried later.
|
|
132
|
+
|
|
133
|
+
Args:
|
|
134
|
+
content (str): Content to write to the memory. All unicode (including emojis) are supported.
|
|
135
|
+
|
|
136
|
+
Returns:
|
|
137
|
+
Optional[str]: None is always returned as this function does not produce a response.
|
|
138
|
+
"""
|
|
139
|
+
self.persistence_manager.archival_memory.insert(content)
|
|
140
|
+
return None
|
|
141
|
+
|
|
142
|
+
|
|
143
|
+
def archival_memory_search(self: Agent, query: str, page: Optional[int] = 0) -> Optional[str]:
|
|
144
|
+
"""
|
|
145
|
+
Search archival memory using semantic (embedding-based) search.
|
|
146
|
+
|
|
147
|
+
Args:
|
|
148
|
+
query (str): String to search for.
|
|
149
|
+
page (Optional[int]): Allows you to page through results. Only use on a follow-up query. Defaults to 0 (first page).
|
|
150
|
+
|
|
151
|
+
Returns:
|
|
152
|
+
str: Query result string
|
|
153
|
+
"""
|
|
154
|
+
import math
|
|
155
|
+
|
|
156
|
+
from letta.constants import RETRIEVAL_QUERY_DEFAULT_PAGE_SIZE
|
|
157
|
+
from letta.utils import json_dumps
|
|
158
|
+
|
|
159
|
+
if page is None or (isinstance(page, str) and page.lower().strip() == "none"):
|
|
160
|
+
page = 0
|
|
161
|
+
try:
|
|
162
|
+
page = int(page)
|
|
163
|
+
except:
|
|
164
|
+
raise ValueError(f"'page' argument must be an integer")
|
|
165
|
+
count = RETRIEVAL_QUERY_DEFAULT_PAGE_SIZE
|
|
166
|
+
results, total = self.persistence_manager.archival_memory.search(query, count=count, start=page * count)
|
|
167
|
+
num_pages = math.ceil(total / count) - 1 # 0 index
|
|
168
|
+
if len(results) == 0:
|
|
169
|
+
results_str = f"No results found."
|
|
170
|
+
else:
|
|
171
|
+
results_pref = f"Showing {len(results)} of {total} results (page {page}/{num_pages}):"
|
|
172
|
+
results_formatted = [f"timestamp: {d['timestamp']}, memory: {d['content']}" for d in results]
|
|
173
|
+
results_str = f"{results_pref} {json_dumps(results_formatted)}"
|
|
174
|
+
return results_str
|
|
@@ -0,0 +1,132 @@
|
|
|
1
|
+
import os
|
|
2
|
+
import uuid
|
|
3
|
+
from typing import Optional
|
|
4
|
+
|
|
5
|
+
import requests
|
|
6
|
+
|
|
7
|
+
from letta.constants import (
|
|
8
|
+
MESSAGE_CHATGPT_FUNCTION_MODEL,
|
|
9
|
+
MESSAGE_CHATGPT_FUNCTION_SYSTEM_MESSAGE,
|
|
10
|
+
)
|
|
11
|
+
from letta.llm_api.llm_api_tools import create
|
|
12
|
+
from letta.schemas.message import Message
|
|
13
|
+
from letta.utils import json_dumps, json_loads
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
def message_chatgpt(self, message: str):
|
|
17
|
+
"""
|
|
18
|
+
Send a message to a more basic AI, ChatGPT. A useful resource for asking questions. ChatGPT does not retain memory of previous interactions.
|
|
19
|
+
|
|
20
|
+
Args:
|
|
21
|
+
message (str): Message to send ChatGPT. Phrase your message as a full English sentence.
|
|
22
|
+
|
|
23
|
+
Returns:
|
|
24
|
+
str: Reply message from ChatGPT
|
|
25
|
+
"""
|
|
26
|
+
dummy_user_id = uuid.uuid4()
|
|
27
|
+
dummy_agent_id = uuid.uuid4()
|
|
28
|
+
message_sequence = [
|
|
29
|
+
Message(user_id=dummy_user_id, agent_id=dummy_agent_id, role="system", text=MESSAGE_CHATGPT_FUNCTION_SYSTEM_MESSAGE),
|
|
30
|
+
Message(user_id=dummy_user_id, agent_id=dummy_agent_id, role="user", text=str(message)),
|
|
31
|
+
]
|
|
32
|
+
# TODO: this will error without an LLMConfig
|
|
33
|
+
response = create(
|
|
34
|
+
model=MESSAGE_CHATGPT_FUNCTION_MODEL,
|
|
35
|
+
messages=message_sequence,
|
|
36
|
+
)
|
|
37
|
+
|
|
38
|
+
reply = response.choices[0].message.content
|
|
39
|
+
return reply
|
|
40
|
+
|
|
41
|
+
|
|
42
|
+
def read_from_text_file(self, filename: str, line_start: int, num_lines: Optional[int] = 1):
|
|
43
|
+
"""
|
|
44
|
+
Read lines from a text file.
|
|
45
|
+
|
|
46
|
+
Args:
|
|
47
|
+
filename (str): The name of the file to read.
|
|
48
|
+
line_start (int): Line to start reading from.
|
|
49
|
+
num_lines (Optional[int]): How many lines to read (defaults to 1).
|
|
50
|
+
|
|
51
|
+
Returns:
|
|
52
|
+
str: Text read from the file
|
|
53
|
+
"""
|
|
54
|
+
max_chars = 500
|
|
55
|
+
trunc_message = True
|
|
56
|
+
if not os.path.exists(filename):
|
|
57
|
+
raise FileNotFoundError(f"The file '{filename}' does not exist.")
|
|
58
|
+
|
|
59
|
+
if line_start < 1 or num_lines < 1:
|
|
60
|
+
raise ValueError("Both line_start and num_lines must be positive integers.")
|
|
61
|
+
|
|
62
|
+
lines = []
|
|
63
|
+
chars_read = 0
|
|
64
|
+
with open(filename, "r", encoding="utf-8") as file:
|
|
65
|
+
for current_line_number, line in enumerate(file, start=1):
|
|
66
|
+
if line_start <= current_line_number < line_start + num_lines:
|
|
67
|
+
chars_to_add = len(line)
|
|
68
|
+
if max_chars is not None and chars_read + chars_to_add > max_chars:
|
|
69
|
+
# If adding this line exceeds MAX_CHARS, truncate the line if needed and stop reading further.
|
|
70
|
+
excess_chars = (chars_read + chars_to_add) - max_chars
|
|
71
|
+
lines.append(line[:-excess_chars].rstrip("\n"))
|
|
72
|
+
if trunc_message:
|
|
73
|
+
lines.append(f"[SYSTEM ALERT - max chars ({max_chars}) reached during file read]")
|
|
74
|
+
break
|
|
75
|
+
else:
|
|
76
|
+
lines.append(line.rstrip("\n"))
|
|
77
|
+
chars_read += chars_to_add
|
|
78
|
+
if current_line_number >= line_start + num_lines - 1:
|
|
79
|
+
break
|
|
80
|
+
|
|
81
|
+
return "\n".join(lines)
|
|
82
|
+
|
|
83
|
+
|
|
84
|
+
def append_to_text_file(self, filename: str, content: str):
|
|
85
|
+
"""
|
|
86
|
+
Append to a text file.
|
|
87
|
+
|
|
88
|
+
Args:
|
|
89
|
+
filename (str): The name of the file to append to.
|
|
90
|
+
content (str): Content to append to the file.
|
|
91
|
+
|
|
92
|
+
Returns:
|
|
93
|
+
Optional[str]: None is always returned as this function does not produce a response.
|
|
94
|
+
"""
|
|
95
|
+
if not os.path.exists(filename):
|
|
96
|
+
raise FileNotFoundError(f"The file '{filename}' does not exist.")
|
|
97
|
+
|
|
98
|
+
with open(filename, "a", encoding="utf-8") as file:
|
|
99
|
+
file.write(content + "\n")
|
|
100
|
+
|
|
101
|
+
|
|
102
|
+
def http_request(self, method: str, url: str, payload_json: Optional[str] = None):
|
|
103
|
+
"""
|
|
104
|
+
Generates an HTTP request and returns the response.
|
|
105
|
+
|
|
106
|
+
Args:
|
|
107
|
+
method (str): The HTTP method (e.g., 'GET', 'POST').
|
|
108
|
+
url (str): The URL for the request.
|
|
109
|
+
payload_json (Optional[str]): A JSON string representing the request payload.
|
|
110
|
+
|
|
111
|
+
Returns:
|
|
112
|
+
dict: The response from the HTTP request.
|
|
113
|
+
"""
|
|
114
|
+
try:
|
|
115
|
+
headers = {"Content-Type": "application/json"}
|
|
116
|
+
|
|
117
|
+
# For GET requests, ignore the payload
|
|
118
|
+
if method.upper() == "GET":
|
|
119
|
+
print(f"[HTTP] launching GET request to {url}")
|
|
120
|
+
response = requests.get(url, headers=headers)
|
|
121
|
+
else:
|
|
122
|
+
# Validate and convert the payload for other types of requests
|
|
123
|
+
if payload_json:
|
|
124
|
+
payload = json_loads(payload_json)
|
|
125
|
+
else:
|
|
126
|
+
payload = {}
|
|
127
|
+
print(f"[HTTP] launching {method} request to {url}, payload=\n{json_dumps(payload, indent=2)}")
|
|
128
|
+
response = requests.request(method, url, json=payload, headers=headers)
|
|
129
|
+
|
|
130
|
+
return {"status_code": response.status_code, "headers": dict(response.headers), "body": response.text}
|
|
131
|
+
except Exception as e:
|
|
132
|
+
return {"error": str(e)}
|
|
@@ -0,0 +1,105 @@
|
|
|
1
|
+
import importlib
|
|
2
|
+
import inspect
|
|
3
|
+
import os
|
|
4
|
+
import sys
|
|
5
|
+
from textwrap import dedent # remove indentation
|
|
6
|
+
from types import ModuleType
|
|
7
|
+
|
|
8
|
+
from letta.constants import CLI_WARNING_PREFIX, LETTA_DIR
|
|
9
|
+
from letta.functions.schema_generator import generate_schema
|
|
10
|
+
|
|
11
|
+
USER_FUNCTIONS_DIR = os.path.join(LETTA_DIR, "functions")
|
|
12
|
+
|
|
13
|
+
sys.path.append(USER_FUNCTIONS_DIR)
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
def parse_source_code(func) -> str:
|
|
17
|
+
"""Parse the source code of a function and remove indendation"""
|
|
18
|
+
source_code = dedent(inspect.getsource(func))
|
|
19
|
+
return source_code
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
def load_function_set(module: ModuleType) -> dict:
|
|
23
|
+
"""Load the functions and generate schema for them, given a module object"""
|
|
24
|
+
function_dict = {}
|
|
25
|
+
|
|
26
|
+
for attr_name in dir(module):
|
|
27
|
+
# Get the attribute
|
|
28
|
+
attr = getattr(module, attr_name)
|
|
29
|
+
|
|
30
|
+
# Check if it's a callable function and not a built-in or special method
|
|
31
|
+
if inspect.isfunction(attr) and attr.__module__ == module.__name__:
|
|
32
|
+
if attr_name in function_dict:
|
|
33
|
+
raise ValueError(f"Found a duplicate of function name '{attr_name}'")
|
|
34
|
+
|
|
35
|
+
generated_schema = generate_schema(attr)
|
|
36
|
+
function_dict[attr_name] = {
|
|
37
|
+
"module": inspect.getsource(module),
|
|
38
|
+
"python_function": attr,
|
|
39
|
+
"json_schema": generated_schema,
|
|
40
|
+
}
|
|
41
|
+
|
|
42
|
+
if len(function_dict) == 0:
|
|
43
|
+
raise ValueError(f"No functions found in module {module}")
|
|
44
|
+
return function_dict
|
|
45
|
+
|
|
46
|
+
|
|
47
|
+
def validate_function(module_name, module_full_path):
|
|
48
|
+
try:
|
|
49
|
+
file = os.path.basename(module_full_path)
|
|
50
|
+
spec = importlib.util.spec_from_file_location(module_name, module_full_path)
|
|
51
|
+
module = importlib.util.module_from_spec(spec)
|
|
52
|
+
spec.loader.exec_module(module)
|
|
53
|
+
except ModuleNotFoundError as e:
|
|
54
|
+
# Handle missing module imports
|
|
55
|
+
missing_package = str(e).split("'")[1] # Extract the name of the missing package
|
|
56
|
+
print(f"{CLI_WARNING_PREFIX}skipped loading python file '{module_full_path}'!")
|
|
57
|
+
return (
|
|
58
|
+
False,
|
|
59
|
+
f"'{file}' imports '{missing_package}', but '{missing_package}' is not installed locally - install python package '{missing_package}' to link functions from '{file}' to Letta.",
|
|
60
|
+
)
|
|
61
|
+
except SyntaxError as e:
|
|
62
|
+
# Handle syntax errors in the module
|
|
63
|
+
return False, f"{CLI_WARNING_PREFIX}skipped loading python file '{file}' due to a syntax error: {e}"
|
|
64
|
+
except Exception as e:
|
|
65
|
+
# Handle other general exceptions
|
|
66
|
+
return False, f"{CLI_WARNING_PREFIX}skipped loading python file '{file}': {e}"
|
|
67
|
+
|
|
68
|
+
return True, None
|
|
69
|
+
|
|
70
|
+
|
|
71
|
+
def write_function(module_name: str, function_name: str, function_code: str):
|
|
72
|
+
"""Write a function to a file in the user functions directory"""
|
|
73
|
+
# Create the user functions directory if it doesn't exist
|
|
74
|
+
if not os.path.exists(USER_FUNCTIONS_DIR):
|
|
75
|
+
os.makedirs(USER_FUNCTIONS_DIR)
|
|
76
|
+
|
|
77
|
+
# Write the function to a file
|
|
78
|
+
file_path = os.path.join(USER_FUNCTIONS_DIR, f"{module_name}.py")
|
|
79
|
+
with open(file_path, "w", encoding="utf-8") as f:
|
|
80
|
+
f.write(function_code)
|
|
81
|
+
succ, error = validate_function(module_name, file_path)
|
|
82
|
+
|
|
83
|
+
# raise error if function cannot be loaded
|
|
84
|
+
if not succ:
|
|
85
|
+
raise ValueError(error)
|
|
86
|
+
return file_path
|
|
87
|
+
|
|
88
|
+
|
|
89
|
+
def load_function_file(filepath: str) -> dict:
|
|
90
|
+
file = os.path.basename(filepath)
|
|
91
|
+
module_name = file[:-3] # Remove '.py' from filename
|
|
92
|
+
try:
|
|
93
|
+
spec = importlib.util.spec_from_file_location(module_name, filepath)
|
|
94
|
+
module = importlib.util.module_from_spec(spec)
|
|
95
|
+
spec.loader.exec_module(module)
|
|
96
|
+
except ModuleNotFoundError as e:
|
|
97
|
+
# Handle missing module imports
|
|
98
|
+
missing_package = str(e).split("'")[1] # Extract the name of the missing package
|
|
99
|
+
print(f"{CLI_WARNING_PREFIX}skipped loading python file '{filepath}'!")
|
|
100
|
+
print(
|
|
101
|
+
f"'{file}' imports '{missing_package}', but '{missing_package}' is not installed locally - install python package '{missing_package}' to link functions from '{file}' to Letta."
|
|
102
|
+
)
|
|
103
|
+
# load all functions in the module
|
|
104
|
+
function_dict = load_function_set(module)
|
|
105
|
+
return function_dict
|