letta-nightly 0.1.7.dev20240924104148__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of letta-nightly might be problematic. Click here for more details.
- letta/__init__.py +24 -0
- letta/__main__.py +3 -0
- letta/agent.py +1427 -0
- letta/agent_store/chroma.py +295 -0
- letta/agent_store/db.py +546 -0
- letta/agent_store/lancedb.py +177 -0
- letta/agent_store/milvus.py +198 -0
- letta/agent_store/qdrant.py +201 -0
- letta/agent_store/storage.py +188 -0
- letta/benchmark/benchmark.py +96 -0
- letta/benchmark/constants.py +14 -0
- letta/cli/cli.py +689 -0
- letta/cli/cli_config.py +1282 -0
- letta/cli/cli_load.py +166 -0
- letta/client/__init__.py +0 -0
- letta/client/admin.py +171 -0
- letta/client/client.py +2360 -0
- letta/client/streaming.py +90 -0
- letta/client/utils.py +61 -0
- letta/config.py +484 -0
- letta/configs/anthropic.json +13 -0
- letta/configs/letta_hosted.json +11 -0
- letta/configs/openai.json +12 -0
- letta/constants.py +134 -0
- letta/credentials.py +140 -0
- letta/data_sources/connectors.py +247 -0
- letta/embeddings.py +218 -0
- letta/errors.py +26 -0
- letta/functions/__init__.py +0 -0
- letta/functions/function_sets/base.py +174 -0
- letta/functions/function_sets/extras.py +132 -0
- letta/functions/functions.py +105 -0
- letta/functions/schema_generator.py +205 -0
- letta/humans/__init__.py +0 -0
- letta/humans/examples/basic.txt +1 -0
- letta/humans/examples/cs_phd.txt +9 -0
- letta/interface.py +314 -0
- letta/llm_api/__init__.py +0 -0
- letta/llm_api/anthropic.py +383 -0
- letta/llm_api/azure_openai.py +155 -0
- letta/llm_api/cohere.py +396 -0
- letta/llm_api/google_ai.py +468 -0
- letta/llm_api/llm_api_tools.py +485 -0
- letta/llm_api/openai.py +470 -0
- letta/local_llm/README.md +3 -0
- letta/local_llm/__init__.py +0 -0
- letta/local_llm/chat_completion_proxy.py +279 -0
- letta/local_llm/constants.py +31 -0
- letta/local_llm/function_parser.py +68 -0
- letta/local_llm/grammars/__init__.py +0 -0
- letta/local_llm/grammars/gbnf_grammar_generator.py +1324 -0
- letta/local_llm/grammars/json.gbnf +26 -0
- letta/local_llm/grammars/json_func_calls_with_inner_thoughts.gbnf +32 -0
- letta/local_llm/groq/api.py +97 -0
- letta/local_llm/json_parser.py +202 -0
- letta/local_llm/koboldcpp/api.py +62 -0
- letta/local_llm/koboldcpp/settings.py +23 -0
- letta/local_llm/llamacpp/api.py +58 -0
- letta/local_llm/llamacpp/settings.py +22 -0
- letta/local_llm/llm_chat_completion_wrappers/__init__.py +0 -0
- letta/local_llm/llm_chat_completion_wrappers/airoboros.py +452 -0
- letta/local_llm/llm_chat_completion_wrappers/chatml.py +470 -0
- letta/local_llm/llm_chat_completion_wrappers/configurable_wrapper.py +387 -0
- letta/local_llm/llm_chat_completion_wrappers/dolphin.py +246 -0
- letta/local_llm/llm_chat_completion_wrappers/llama3.py +345 -0
- letta/local_llm/llm_chat_completion_wrappers/simple_summary_wrapper.py +156 -0
- letta/local_llm/llm_chat_completion_wrappers/wrapper_base.py +11 -0
- letta/local_llm/llm_chat_completion_wrappers/zephyr.py +345 -0
- letta/local_llm/lmstudio/api.py +100 -0
- letta/local_llm/lmstudio/settings.py +29 -0
- letta/local_llm/ollama/api.py +88 -0
- letta/local_llm/ollama/settings.py +32 -0
- letta/local_llm/settings/__init__.py +0 -0
- letta/local_llm/settings/deterministic_mirostat.py +45 -0
- letta/local_llm/settings/settings.py +72 -0
- letta/local_llm/settings/simple.py +28 -0
- letta/local_llm/utils.py +265 -0
- letta/local_llm/vllm/api.py +63 -0
- letta/local_llm/webui/api.py +60 -0
- letta/local_llm/webui/legacy_api.py +58 -0
- letta/local_llm/webui/legacy_settings.py +23 -0
- letta/local_llm/webui/settings.py +24 -0
- letta/log.py +76 -0
- letta/main.py +437 -0
- letta/memory.py +440 -0
- letta/metadata.py +884 -0
- letta/openai_backcompat/__init__.py +0 -0
- letta/openai_backcompat/openai_object.py +437 -0
- letta/persistence_manager.py +148 -0
- letta/personas/__init__.py +0 -0
- letta/personas/examples/anna_pa.txt +13 -0
- letta/personas/examples/google_search_persona.txt +15 -0
- letta/personas/examples/memgpt_doc.txt +6 -0
- letta/personas/examples/memgpt_starter.txt +4 -0
- letta/personas/examples/sam.txt +14 -0
- letta/personas/examples/sam_pov.txt +14 -0
- letta/personas/examples/sam_simple_pov_gpt35.txt +13 -0
- letta/personas/examples/sqldb/test.db +0 -0
- letta/prompts/__init__.py +0 -0
- letta/prompts/gpt_summarize.py +14 -0
- letta/prompts/gpt_system.py +26 -0
- letta/prompts/system/memgpt_base.txt +49 -0
- letta/prompts/system/memgpt_chat.txt +58 -0
- letta/prompts/system/memgpt_chat_compressed.txt +13 -0
- letta/prompts/system/memgpt_chat_fstring.txt +51 -0
- letta/prompts/system/memgpt_doc.txt +50 -0
- letta/prompts/system/memgpt_gpt35_extralong.txt +53 -0
- letta/prompts/system/memgpt_intuitive_knowledge.txt +31 -0
- letta/prompts/system/memgpt_modified_chat.txt +23 -0
- letta/pytest.ini +0 -0
- letta/schemas/agent.py +117 -0
- letta/schemas/api_key.py +21 -0
- letta/schemas/block.py +135 -0
- letta/schemas/document.py +21 -0
- letta/schemas/embedding_config.py +54 -0
- letta/schemas/enums.py +35 -0
- letta/schemas/job.py +38 -0
- letta/schemas/letta_base.py +80 -0
- letta/schemas/letta_message.py +175 -0
- letta/schemas/letta_request.py +23 -0
- letta/schemas/letta_response.py +28 -0
- letta/schemas/llm_config.py +54 -0
- letta/schemas/memory.py +224 -0
- letta/schemas/message.py +727 -0
- letta/schemas/openai/chat_completion_request.py +123 -0
- letta/schemas/openai/chat_completion_response.py +136 -0
- letta/schemas/openai/chat_completions.py +123 -0
- letta/schemas/openai/embedding_response.py +11 -0
- letta/schemas/openai/openai.py +157 -0
- letta/schemas/organization.py +20 -0
- letta/schemas/passage.py +80 -0
- letta/schemas/source.py +62 -0
- letta/schemas/tool.py +143 -0
- letta/schemas/usage.py +18 -0
- letta/schemas/user.py +33 -0
- letta/server/__init__.py +0 -0
- letta/server/constants.py +6 -0
- letta/server/rest_api/__init__.py +0 -0
- letta/server/rest_api/admin/__init__.py +0 -0
- letta/server/rest_api/admin/agents.py +21 -0
- letta/server/rest_api/admin/tools.py +83 -0
- letta/server/rest_api/admin/users.py +98 -0
- letta/server/rest_api/app.py +193 -0
- letta/server/rest_api/auth/__init__.py +0 -0
- letta/server/rest_api/auth/index.py +43 -0
- letta/server/rest_api/auth_token.py +22 -0
- letta/server/rest_api/interface.py +726 -0
- letta/server/rest_api/routers/__init__.py +0 -0
- letta/server/rest_api/routers/openai/__init__.py +0 -0
- letta/server/rest_api/routers/openai/assistants/__init__.py +0 -0
- letta/server/rest_api/routers/openai/assistants/assistants.py +115 -0
- letta/server/rest_api/routers/openai/assistants/schemas.py +121 -0
- letta/server/rest_api/routers/openai/assistants/threads.py +336 -0
- letta/server/rest_api/routers/openai/chat_completions/__init__.py +0 -0
- letta/server/rest_api/routers/openai/chat_completions/chat_completions.py +131 -0
- letta/server/rest_api/routers/v1/__init__.py +15 -0
- letta/server/rest_api/routers/v1/agents.py +543 -0
- letta/server/rest_api/routers/v1/blocks.py +73 -0
- letta/server/rest_api/routers/v1/jobs.py +46 -0
- letta/server/rest_api/routers/v1/llms.py +28 -0
- letta/server/rest_api/routers/v1/organizations.py +61 -0
- letta/server/rest_api/routers/v1/sources.py +199 -0
- letta/server/rest_api/routers/v1/tools.py +103 -0
- letta/server/rest_api/routers/v1/users.py +109 -0
- letta/server/rest_api/static_files.py +74 -0
- letta/server/rest_api/utils.py +69 -0
- letta/server/server.py +1995 -0
- letta/server/startup.sh +8 -0
- letta/server/static_files/assets/index-0cbf7ad5.js +274 -0
- letta/server/static_files/assets/index-156816da.css +1 -0
- letta/server/static_files/assets/index-486e3228.js +274 -0
- letta/server/static_files/favicon.ico +0 -0
- letta/server/static_files/index.html +39 -0
- letta/server/static_files/memgpt_logo_transparent.png +0 -0
- letta/server/utils.py +46 -0
- letta/server/ws_api/__init__.py +0 -0
- letta/server/ws_api/example_client.py +104 -0
- letta/server/ws_api/interface.py +108 -0
- letta/server/ws_api/protocol.py +100 -0
- letta/server/ws_api/server.py +145 -0
- letta/settings.py +165 -0
- letta/streaming_interface.py +396 -0
- letta/system.py +207 -0
- letta/utils.py +1065 -0
- letta_nightly-0.1.7.dev20240924104148.dist-info/LICENSE +190 -0
- letta_nightly-0.1.7.dev20240924104148.dist-info/METADATA +98 -0
- letta_nightly-0.1.7.dev20240924104148.dist-info/RECORD +189 -0
- letta_nightly-0.1.7.dev20240924104148.dist-info/WHEEL +4 -0
- letta_nightly-0.1.7.dev20240924104148.dist-info/entry_points.txt +3 -0
|
@@ -0,0 +1,205 @@
|
|
|
1
|
+
import inspect
|
|
2
|
+
import typing
|
|
3
|
+
from typing import Any, Dict, Optional, Type, get_args, get_origin
|
|
4
|
+
|
|
5
|
+
from docstring_parser import parse
|
|
6
|
+
from pydantic import BaseModel
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
def is_optional(annotation):
|
|
10
|
+
# Check if the annotation is a Union
|
|
11
|
+
if getattr(annotation, "__origin__", None) is typing.Union:
|
|
12
|
+
# Check if None is one of the options in the Union
|
|
13
|
+
return type(None) in annotation.__args__
|
|
14
|
+
return False
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
def optional_length(annotation):
|
|
18
|
+
if is_optional(annotation):
|
|
19
|
+
# Subtract 1 to account for NoneType
|
|
20
|
+
return len(annotation.__args__) - 1
|
|
21
|
+
else:
|
|
22
|
+
raise ValueError("The annotation is not an Optional type")
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
def type_to_json_schema_type(py_type):
|
|
26
|
+
"""
|
|
27
|
+
Maps a Python type to a JSON schema type.
|
|
28
|
+
Specifically handles typing.Optional and common Python types.
|
|
29
|
+
"""
|
|
30
|
+
# if get_origin(py_type) is typing.Optional:
|
|
31
|
+
if is_optional(py_type):
|
|
32
|
+
# Assert that Optional has only one type argument
|
|
33
|
+
type_args = get_args(py_type)
|
|
34
|
+
assert optional_length(py_type) == 1, f"Optional type must have exactly one type argument, but got {py_type}"
|
|
35
|
+
|
|
36
|
+
# Extract and map the inner type
|
|
37
|
+
return type_to_json_schema_type(type_args[0])
|
|
38
|
+
|
|
39
|
+
# Mapping of Python types to JSON schema types
|
|
40
|
+
type_map = {
|
|
41
|
+
int: "integer",
|
|
42
|
+
str: "string",
|
|
43
|
+
bool: "boolean",
|
|
44
|
+
float: "number",
|
|
45
|
+
list[str]: "array",
|
|
46
|
+
# Add more mappings as needed
|
|
47
|
+
}
|
|
48
|
+
if py_type not in type_map:
|
|
49
|
+
raise ValueError(f"Python type {py_type} has no corresponding JSON schema type")
|
|
50
|
+
|
|
51
|
+
return type_map.get(py_type, "string") # Default to "string" if type not in map
|
|
52
|
+
|
|
53
|
+
|
|
54
|
+
def pydantic_model_to_open_ai(model):
|
|
55
|
+
schema = model.model_json_schema()
|
|
56
|
+
docstring = parse(model.__doc__ or "")
|
|
57
|
+
parameters = {k: v for k, v in schema.items() if k not in ("title", "description")}
|
|
58
|
+
for param in docstring.params:
|
|
59
|
+
if (name := param.arg_name) in parameters["properties"] and (description := param.description):
|
|
60
|
+
if "description" not in parameters["properties"][name]:
|
|
61
|
+
parameters["properties"][name]["description"] = description
|
|
62
|
+
|
|
63
|
+
parameters["required"] = sorted(k for k, v in parameters["properties"].items() if "default" not in v)
|
|
64
|
+
|
|
65
|
+
if "description" not in schema:
|
|
66
|
+
if docstring.short_description:
|
|
67
|
+
schema["description"] = docstring.short_description
|
|
68
|
+
else:
|
|
69
|
+
raise
|
|
70
|
+
|
|
71
|
+
return {
|
|
72
|
+
"name": schema["title"],
|
|
73
|
+
"description": schema["description"],
|
|
74
|
+
"parameters": parameters,
|
|
75
|
+
}
|
|
76
|
+
|
|
77
|
+
|
|
78
|
+
def generate_schema(function, name: Optional[str] = None, description: Optional[str] = None):
|
|
79
|
+
# Get the signature of the function
|
|
80
|
+
sig = inspect.signature(function)
|
|
81
|
+
|
|
82
|
+
# Parse the docstring
|
|
83
|
+
docstring = parse(function.__doc__)
|
|
84
|
+
|
|
85
|
+
# Prepare the schema dictionary
|
|
86
|
+
schema = {
|
|
87
|
+
"name": function.__name__ if name is None else name,
|
|
88
|
+
"description": docstring.short_description if description is None else description,
|
|
89
|
+
"parameters": {"type": "object", "properties": {}, "required": []},
|
|
90
|
+
}
|
|
91
|
+
|
|
92
|
+
# TODO: ensure that 'agent' keyword is reserved for `Agent` class
|
|
93
|
+
|
|
94
|
+
for param in sig.parameters.values():
|
|
95
|
+
# Exclude 'self' parameter
|
|
96
|
+
if param.name == "self":
|
|
97
|
+
continue
|
|
98
|
+
|
|
99
|
+
# Assert that the parameter has a type annotation
|
|
100
|
+
if param.annotation == inspect.Parameter.empty:
|
|
101
|
+
raise TypeError(f"Parameter '{param.name}' in function '{function.__name__}' lacks a type annotation")
|
|
102
|
+
|
|
103
|
+
# Find the parameter's description in the docstring
|
|
104
|
+
param_doc = next((d for d in docstring.params if d.arg_name == param.name), None)
|
|
105
|
+
|
|
106
|
+
# Assert that the parameter has a description
|
|
107
|
+
if not param_doc or not param_doc.description:
|
|
108
|
+
raise ValueError(f"Parameter '{param.name}' in function '{function.__name__}' lacks a description in the docstring")
|
|
109
|
+
|
|
110
|
+
if inspect.isclass(param.annotation) and issubclass(param.annotation, BaseModel):
|
|
111
|
+
schema["parameters"]["properties"][param.name] = pydantic_model_to_open_ai(param.annotation)
|
|
112
|
+
else:
|
|
113
|
+
# Add parameter details to the schema
|
|
114
|
+
param_doc = next((d for d in docstring.params if d.arg_name == param.name), None)
|
|
115
|
+
schema["parameters"]["properties"][param.name] = {
|
|
116
|
+
# "type": "string" if param.annotation == str else str(param.annotation),
|
|
117
|
+
"type": type_to_json_schema_type(param.annotation) if param.annotation != inspect.Parameter.empty else "string",
|
|
118
|
+
"description": param_doc.description,
|
|
119
|
+
}
|
|
120
|
+
if param.default == inspect.Parameter.empty:
|
|
121
|
+
schema["parameters"]["required"].append(param.name)
|
|
122
|
+
|
|
123
|
+
if get_origin(param.annotation) is list:
|
|
124
|
+
if get_args(param.annotation)[0] is str:
|
|
125
|
+
schema["parameters"]["properties"][param.name]["items"] = {"type": "string"}
|
|
126
|
+
|
|
127
|
+
if param.annotation == inspect.Parameter.empty:
|
|
128
|
+
schema["parameters"]["required"].append(param.name)
|
|
129
|
+
|
|
130
|
+
# append the heartbeat
|
|
131
|
+
if function.__name__ not in ["send_message", "pause_heartbeats"]:
|
|
132
|
+
schema["parameters"]["properties"]["request_heartbeat"] = {
|
|
133
|
+
"type": "boolean",
|
|
134
|
+
"description": "Request an immediate heartbeat after function execution. Set to 'true' if you want to send a follow-up message or run a follow-up function.",
|
|
135
|
+
}
|
|
136
|
+
schema["parameters"]["required"].append("request_heartbeat")
|
|
137
|
+
|
|
138
|
+
return schema
|
|
139
|
+
|
|
140
|
+
|
|
141
|
+
def generate_schema_from_args_schema(
|
|
142
|
+
args_schema: Type[BaseModel], name: Optional[str] = None, description: Optional[str] = None
|
|
143
|
+
) -> Dict[str, Any]:
|
|
144
|
+
properties = {}
|
|
145
|
+
required = []
|
|
146
|
+
for field_name, field in args_schema.__fields__.items():
|
|
147
|
+
if field.type_.__name__ == "str":
|
|
148
|
+
field_type = "string"
|
|
149
|
+
elif field.type_.__name__ == "int":
|
|
150
|
+
field_type = "integer"
|
|
151
|
+
elif field.type_.__name__ == "bool":
|
|
152
|
+
field_type = "boolean"
|
|
153
|
+
else:
|
|
154
|
+
field_type = field.type_.__name__
|
|
155
|
+
properties[field_name] = {"type": field_type, "description": field.field_info.description}
|
|
156
|
+
if field.required:
|
|
157
|
+
required.append(field_name)
|
|
158
|
+
|
|
159
|
+
# Construct the OpenAI function call JSON object
|
|
160
|
+
function_call_json = {
|
|
161
|
+
"name": name,
|
|
162
|
+
"description": description,
|
|
163
|
+
"parameters": {"type": "object", "properties": properties, "required": required},
|
|
164
|
+
}
|
|
165
|
+
|
|
166
|
+
return function_call_json
|
|
167
|
+
|
|
168
|
+
|
|
169
|
+
def generate_langchain_tool_wrapper(tool_name: str) -> str:
|
|
170
|
+
import_statement = f"from langchain_community.tools import {tool_name}"
|
|
171
|
+
|
|
172
|
+
# NOTE: this will fail for tools like 'wikipedia = WikipediaQueryRun(api_wrapper=WikipediaAPIWrapper())' since it needs to pass an argument to the tool instantiation
|
|
173
|
+
# https://python.langchain.com/v0.1/docs/integrations/tools/wikipedia/
|
|
174
|
+
tool_instantiation = f"tool = {tool_name}()"
|
|
175
|
+
run_call = f"return tool._run(**kwargs)"
|
|
176
|
+
func_name = f"run_{tool_name.lower()}"
|
|
177
|
+
|
|
178
|
+
# Combine all parts into the wrapper function
|
|
179
|
+
wrapper_function_str = f"""
|
|
180
|
+
def {func_name}(**kwargs):
|
|
181
|
+
if 'self' in kwargs:
|
|
182
|
+
del kwargs['self']
|
|
183
|
+
{import_statement}
|
|
184
|
+
{tool_instantiation}
|
|
185
|
+
{run_call}
|
|
186
|
+
"""
|
|
187
|
+
return func_name, wrapper_function_str
|
|
188
|
+
|
|
189
|
+
|
|
190
|
+
def generate_crewai_tool_wrapper(tool_name: str) -> str:
|
|
191
|
+
import_statement = f"from crewai_tools import {tool_name}"
|
|
192
|
+
tool_instantiation = f"tool = {tool_name}()"
|
|
193
|
+
run_call = f"return tool._run(**kwargs)"
|
|
194
|
+
func_name = f"run_{tool_name.lower()}"
|
|
195
|
+
|
|
196
|
+
# Combine all parts into the wrapper function
|
|
197
|
+
wrapper_function_str = f"""
|
|
198
|
+
def {func_name}(**kwargs):
|
|
199
|
+
if 'self' in kwargs:
|
|
200
|
+
del kwargs['self']
|
|
201
|
+
{import_statement}
|
|
202
|
+
{tool_instantiation}
|
|
203
|
+
{run_call}
|
|
204
|
+
"""
|
|
205
|
+
return func_name, wrapper_function_str
|
letta/humans/__init__.py
ADDED
|
File without changes
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
First name: Chad
|
|
@@ -0,0 +1,9 @@
|
|
|
1
|
+
This is what I know so far about the user, I should expand this as I learn more about them.
|
|
2
|
+
|
|
3
|
+
First name: Chad
|
|
4
|
+
Last name: ?
|
|
5
|
+
Gender: Male
|
|
6
|
+
Age: ?
|
|
7
|
+
Nationality: ?
|
|
8
|
+
Occupation: Computer science PhD student at UC Berkeley
|
|
9
|
+
Interests: Formula 1, Sailing, Taste of the Himalayas Restaurant in Berkeley, CSGO
|
letta/interface.py
ADDED
|
@@ -0,0 +1,314 @@
|
|
|
1
|
+
import re
|
|
2
|
+
from abc import ABC, abstractmethod
|
|
3
|
+
from typing import List, Optional
|
|
4
|
+
|
|
5
|
+
from colorama import Fore, Style, init
|
|
6
|
+
|
|
7
|
+
from letta.constants import CLI_WARNING_PREFIX
|
|
8
|
+
from letta.schemas.message import Message
|
|
9
|
+
from letta.utils import json_loads, printd
|
|
10
|
+
|
|
11
|
+
init(autoreset=True)
|
|
12
|
+
|
|
13
|
+
# DEBUG = True # puts full message outputs in the terminal
|
|
14
|
+
DEBUG = False # only dumps important messages in the terminal
|
|
15
|
+
|
|
16
|
+
STRIP_UI = False
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
class AgentInterface(ABC):
|
|
20
|
+
"""Interfaces handle Letta-related events (observer pattern)
|
|
21
|
+
|
|
22
|
+
The 'msg' args provides the scoped message, and the optional Message arg can provide additional metadata.
|
|
23
|
+
"""
|
|
24
|
+
|
|
25
|
+
@abstractmethod
|
|
26
|
+
def user_message(self, msg: str, msg_obj: Optional[Message] = None):
|
|
27
|
+
"""Letta receives a user message"""
|
|
28
|
+
raise NotImplementedError
|
|
29
|
+
|
|
30
|
+
@abstractmethod
|
|
31
|
+
def internal_monologue(self, msg: str, msg_obj: Optional[Message] = None):
|
|
32
|
+
"""Letta generates some internal monologue"""
|
|
33
|
+
raise NotImplementedError
|
|
34
|
+
|
|
35
|
+
@abstractmethod
|
|
36
|
+
def assistant_message(self, msg: str, msg_obj: Optional[Message] = None):
|
|
37
|
+
"""Letta uses send_message"""
|
|
38
|
+
raise NotImplementedError
|
|
39
|
+
|
|
40
|
+
@abstractmethod
|
|
41
|
+
def function_message(self, msg: str, msg_obj: Optional[Message] = None):
|
|
42
|
+
"""Letta calls a function"""
|
|
43
|
+
raise NotImplementedError
|
|
44
|
+
|
|
45
|
+
# @abstractmethod
|
|
46
|
+
# @staticmethod
|
|
47
|
+
# def print_messages():
|
|
48
|
+
# raise NotImplementedError
|
|
49
|
+
|
|
50
|
+
# @abstractmethod
|
|
51
|
+
# @staticmethod
|
|
52
|
+
# def print_messages_raw():
|
|
53
|
+
# raise NotImplementedError
|
|
54
|
+
|
|
55
|
+
# @abstractmethod
|
|
56
|
+
# @staticmethod
|
|
57
|
+
# def step_yield():
|
|
58
|
+
# raise NotImplementedError
|
|
59
|
+
|
|
60
|
+
|
|
61
|
+
class CLIInterface(AgentInterface):
|
|
62
|
+
"""Basic interface for dumping agent events to the command-line"""
|
|
63
|
+
|
|
64
|
+
@staticmethod
|
|
65
|
+
def important_message(msg: str):
|
|
66
|
+
fstr = f"{Fore.MAGENTA}{Style.BRIGHT}{{msg}}{Style.RESET_ALL}"
|
|
67
|
+
if STRIP_UI:
|
|
68
|
+
fstr = "{msg}"
|
|
69
|
+
print(fstr.format(msg=msg))
|
|
70
|
+
|
|
71
|
+
@staticmethod
|
|
72
|
+
def warning_message(msg: str):
|
|
73
|
+
fstr = f"{Fore.RED}{Style.BRIGHT}{{msg}}{Style.RESET_ALL}"
|
|
74
|
+
if STRIP_UI:
|
|
75
|
+
fstr = "{msg}"
|
|
76
|
+
else:
|
|
77
|
+
print(fstr.format(msg=msg))
|
|
78
|
+
|
|
79
|
+
@staticmethod
|
|
80
|
+
def internal_monologue(msg: str, msg_obj: Optional[Message] = None):
|
|
81
|
+
# ANSI escape code for italic is '\x1B[3m'
|
|
82
|
+
fstr = f"\x1B[3m{Fore.LIGHTBLACK_EX}💭 {{msg}}{Style.RESET_ALL}"
|
|
83
|
+
if STRIP_UI:
|
|
84
|
+
fstr = "{msg}"
|
|
85
|
+
print(fstr.format(msg=msg))
|
|
86
|
+
|
|
87
|
+
@staticmethod
|
|
88
|
+
def assistant_message(msg: str, msg_obj: Optional[Message] = None):
|
|
89
|
+
fstr = f"{Fore.YELLOW}{Style.BRIGHT}🤖 {Fore.YELLOW}{{msg}}{Style.RESET_ALL}"
|
|
90
|
+
if STRIP_UI:
|
|
91
|
+
fstr = "{msg}"
|
|
92
|
+
print(fstr.format(msg=msg))
|
|
93
|
+
|
|
94
|
+
@staticmethod
|
|
95
|
+
def memory_message(msg: str, msg_obj: Optional[Message] = None):
|
|
96
|
+
fstr = f"{Fore.LIGHTMAGENTA_EX}{Style.BRIGHT}🧠 {Fore.LIGHTMAGENTA_EX}{{msg}}{Style.RESET_ALL}"
|
|
97
|
+
if STRIP_UI:
|
|
98
|
+
fstr = "{msg}"
|
|
99
|
+
print(fstr.format(msg=msg))
|
|
100
|
+
|
|
101
|
+
@staticmethod
|
|
102
|
+
def system_message(msg: str, msg_obj: Optional[Message] = None):
|
|
103
|
+
fstr = f"{Fore.MAGENTA}{Style.BRIGHT}🖥️ [system] {Fore.MAGENTA}{msg}{Style.RESET_ALL}"
|
|
104
|
+
if STRIP_UI:
|
|
105
|
+
fstr = "{msg}"
|
|
106
|
+
print(fstr.format(msg=msg))
|
|
107
|
+
|
|
108
|
+
@staticmethod
|
|
109
|
+
def user_message(msg: str, msg_obj: Optional[Message] = None, raw: bool = False, dump: bool = False, debug: bool = DEBUG):
|
|
110
|
+
def print_user_message(icon, msg, printf=print):
|
|
111
|
+
if STRIP_UI:
|
|
112
|
+
printf(f"{icon} {msg}")
|
|
113
|
+
else:
|
|
114
|
+
printf(f"{Fore.GREEN}{Style.BRIGHT}{icon} {Fore.GREEN}{msg}{Style.RESET_ALL}")
|
|
115
|
+
|
|
116
|
+
def printd_user_message(icon, msg):
|
|
117
|
+
return print_user_message(icon, msg)
|
|
118
|
+
|
|
119
|
+
if not (raw or dump or debug):
|
|
120
|
+
# we do not want to repeat the message in normal use
|
|
121
|
+
return
|
|
122
|
+
|
|
123
|
+
if isinstance(msg, str):
|
|
124
|
+
if raw:
|
|
125
|
+
printd_user_message("🧑", msg)
|
|
126
|
+
return
|
|
127
|
+
else:
|
|
128
|
+
try:
|
|
129
|
+
msg_json = json_loads(msg)
|
|
130
|
+
except:
|
|
131
|
+
printd(f"{CLI_WARNING_PREFIX}failed to parse user message into json")
|
|
132
|
+
printd_user_message("🧑", msg)
|
|
133
|
+
return
|
|
134
|
+
if msg_json["type"] == "user_message":
|
|
135
|
+
if dump:
|
|
136
|
+
print_user_message("🧑", msg_json["message"])
|
|
137
|
+
return
|
|
138
|
+
msg_json.pop("type")
|
|
139
|
+
printd_user_message("🧑", msg_json)
|
|
140
|
+
elif msg_json["type"] == "heartbeat":
|
|
141
|
+
if debug:
|
|
142
|
+
msg_json.pop("type")
|
|
143
|
+
printd_user_message("💓", msg_json)
|
|
144
|
+
elif dump:
|
|
145
|
+
print_user_message("💓", msg_json)
|
|
146
|
+
return
|
|
147
|
+
|
|
148
|
+
elif msg_json["type"] == "system_message":
|
|
149
|
+
msg_json.pop("type")
|
|
150
|
+
printd_user_message("🖥️", msg_json)
|
|
151
|
+
else:
|
|
152
|
+
printd_user_message("🧑", msg_json)
|
|
153
|
+
|
|
154
|
+
@staticmethod
|
|
155
|
+
def function_message(msg: str, msg_obj: Optional[Message] = None, debug: bool = DEBUG):
|
|
156
|
+
def print_function_message(icon, msg, color=Fore.RED, printf=print):
|
|
157
|
+
if STRIP_UI:
|
|
158
|
+
printf(f"⚡{icon} [function] {msg}")
|
|
159
|
+
else:
|
|
160
|
+
printf(f"{color}{Style.BRIGHT}⚡{icon} [function] {color}{msg}{Style.RESET_ALL}")
|
|
161
|
+
|
|
162
|
+
def printd_function_message(icon, msg, color=Fore.RED):
|
|
163
|
+
return print_function_message(icon, msg, color, printf=(print if debug else printd))
|
|
164
|
+
|
|
165
|
+
if isinstance(msg, dict):
|
|
166
|
+
printd_function_message("", msg)
|
|
167
|
+
return
|
|
168
|
+
|
|
169
|
+
if msg.startswith("Success"):
|
|
170
|
+
printd_function_message("🟢", msg)
|
|
171
|
+
elif msg.startswith("Error: "):
|
|
172
|
+
printd_function_message("🔴", msg)
|
|
173
|
+
elif msg.startswith("Ran "):
|
|
174
|
+
# NOTE: ignore 'ran' messages that come post-execution
|
|
175
|
+
return
|
|
176
|
+
elif msg.startswith("Running "):
|
|
177
|
+
if debug:
|
|
178
|
+
printd_function_message("", msg)
|
|
179
|
+
else:
|
|
180
|
+
match = re.search(r"Running (\w+)\((.*)\)", msg)
|
|
181
|
+
if match:
|
|
182
|
+
function_name = match.group(1)
|
|
183
|
+
function_args = match.group(2)
|
|
184
|
+
if function_name in ["archival_memory_insert", "archival_memory_search", "core_memory_replace", "core_memory_append"]:
|
|
185
|
+
if function_name in ["archival_memory_insert", "core_memory_append", "core_memory_replace"]:
|
|
186
|
+
print_function_message("🧠", f"updating memory with {function_name}")
|
|
187
|
+
elif function_name == "archival_memory_search":
|
|
188
|
+
print_function_message("🧠", f"searching memory with {function_name}")
|
|
189
|
+
try:
|
|
190
|
+
msg_dict = eval(function_args)
|
|
191
|
+
if function_name == "archival_memory_search":
|
|
192
|
+
output = f'\tquery: {msg_dict["query"]}, page: {msg_dict["page"]}'
|
|
193
|
+
if STRIP_UI:
|
|
194
|
+
print(output)
|
|
195
|
+
else:
|
|
196
|
+
print(f"{Fore.RED}{output}{Style.RESET_ALL}")
|
|
197
|
+
elif function_name == "archival_memory_insert":
|
|
198
|
+
output = f'\t→ {msg_dict["content"]}'
|
|
199
|
+
if STRIP_UI:
|
|
200
|
+
print(output)
|
|
201
|
+
else:
|
|
202
|
+
print(f"{Style.BRIGHT}{Fore.RED}{output}{Style.RESET_ALL}")
|
|
203
|
+
else:
|
|
204
|
+
if STRIP_UI:
|
|
205
|
+
print(f'\t {msg_dict["old_content"]}\n\t→ {msg_dict["new_content"]}')
|
|
206
|
+
else:
|
|
207
|
+
print(
|
|
208
|
+
f'{Style.BRIGHT}\t{Fore.RED} {msg_dict["old_content"]}\n\t{Fore.GREEN}→ {msg_dict["new_content"]}{Style.RESET_ALL}'
|
|
209
|
+
)
|
|
210
|
+
except Exception as e:
|
|
211
|
+
printd(str(e))
|
|
212
|
+
printd(msg_dict)
|
|
213
|
+
elif function_name in ["conversation_search", "conversation_search_date"]:
|
|
214
|
+
print_function_message("🧠", f"searching memory with {function_name}")
|
|
215
|
+
try:
|
|
216
|
+
msg_dict = eval(function_args)
|
|
217
|
+
output = f'\tquery: {msg_dict["query"]}, page: {msg_dict["page"]}'
|
|
218
|
+
if STRIP_UI:
|
|
219
|
+
print(output)
|
|
220
|
+
else:
|
|
221
|
+
print(f"{Fore.RED}{output}{Style.RESET_ALL}")
|
|
222
|
+
except Exception as e:
|
|
223
|
+
printd(str(e))
|
|
224
|
+
printd(msg_dict)
|
|
225
|
+
else:
|
|
226
|
+
printd(f"{CLI_WARNING_PREFIX}did not recognize function message")
|
|
227
|
+
printd_function_message("", msg)
|
|
228
|
+
else:
|
|
229
|
+
try:
|
|
230
|
+
msg_dict = json_loads(msg)
|
|
231
|
+
if "status" in msg_dict and msg_dict["status"] == "OK":
|
|
232
|
+
printd_function_message("", str(msg), color=Fore.GREEN)
|
|
233
|
+
else:
|
|
234
|
+
printd_function_message("", str(msg), color=Fore.RED)
|
|
235
|
+
except Exception:
|
|
236
|
+
print(f"{CLI_WARNING_PREFIX}did not recognize function message {type(msg)} {msg}")
|
|
237
|
+
printd_function_message("", msg)
|
|
238
|
+
|
|
239
|
+
@staticmethod
|
|
240
|
+
def print_messages(message_sequence: List[Message], dump=False):
|
|
241
|
+
# rewrite to dict format
|
|
242
|
+
message_sequence = [msg.to_openai_dict() for msg in message_sequence]
|
|
243
|
+
|
|
244
|
+
idx = len(message_sequence)
|
|
245
|
+
for msg in message_sequence:
|
|
246
|
+
if dump:
|
|
247
|
+
print(f"[{idx}] ", end="")
|
|
248
|
+
idx -= 1
|
|
249
|
+
role = msg["role"]
|
|
250
|
+
content = msg["content"]
|
|
251
|
+
|
|
252
|
+
if role == "system":
|
|
253
|
+
CLIInterface.system_message(content)
|
|
254
|
+
elif role == "assistant":
|
|
255
|
+
# Differentiate between internal monologue, function calls, and messages
|
|
256
|
+
if msg.get("function_call"):
|
|
257
|
+
if content is not None:
|
|
258
|
+
CLIInterface.internal_monologue(content)
|
|
259
|
+
# I think the next one is not up to date
|
|
260
|
+
# function_message(msg["function_call"])
|
|
261
|
+
args = json_loads(msg["function_call"].get("arguments"))
|
|
262
|
+
CLIInterface.assistant_message(args.get("message"))
|
|
263
|
+
# assistant_message(content)
|
|
264
|
+
elif msg.get("tool_calls"):
|
|
265
|
+
if content is not None:
|
|
266
|
+
CLIInterface.internal_monologue(content)
|
|
267
|
+
function_obj = msg["tool_calls"][0].get("function")
|
|
268
|
+
if function_obj:
|
|
269
|
+
args = json_loads(function_obj.get("arguments"))
|
|
270
|
+
CLIInterface.assistant_message(args.get("message"))
|
|
271
|
+
else:
|
|
272
|
+
CLIInterface.internal_monologue(content)
|
|
273
|
+
elif role == "user":
|
|
274
|
+
CLIInterface.user_message(content, dump=dump)
|
|
275
|
+
elif role == "function":
|
|
276
|
+
CLIInterface.function_message(content, debug=dump)
|
|
277
|
+
elif role == "tool":
|
|
278
|
+
CLIInterface.function_message(content, debug=dump)
|
|
279
|
+
else:
|
|
280
|
+
print(f"Unknown role: {content}")
|
|
281
|
+
|
|
282
|
+
@staticmethod
|
|
283
|
+
def print_messages_simple(message_sequence: List[Message]):
|
|
284
|
+
# rewrite to dict format
|
|
285
|
+
message_sequence = [msg.to_openai_dict() for msg in message_sequence]
|
|
286
|
+
|
|
287
|
+
for msg in message_sequence:
|
|
288
|
+
role = msg["role"]
|
|
289
|
+
content = msg["content"]
|
|
290
|
+
|
|
291
|
+
if role == "system":
|
|
292
|
+
CLIInterface.system_message(content)
|
|
293
|
+
elif role == "assistant":
|
|
294
|
+
CLIInterface.assistant_message(content)
|
|
295
|
+
elif role == "user":
|
|
296
|
+
CLIInterface.user_message(content, raw=True)
|
|
297
|
+
else:
|
|
298
|
+
print(f"Unknown role: {content}")
|
|
299
|
+
|
|
300
|
+
@staticmethod
|
|
301
|
+
def print_messages_raw(message_sequence: List[Message]):
|
|
302
|
+
# rewrite to dict format
|
|
303
|
+
message_sequence = [msg.to_openai_dict() for msg in message_sequence]
|
|
304
|
+
|
|
305
|
+
for msg in message_sequence:
|
|
306
|
+
print(msg)
|
|
307
|
+
|
|
308
|
+
@staticmethod
|
|
309
|
+
def step_yield():
|
|
310
|
+
pass
|
|
311
|
+
|
|
312
|
+
@staticmethod
|
|
313
|
+
def step_complete():
|
|
314
|
+
pass
|
|
File without changes
|