letta-nightly 0.1.7.dev20240924104148__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of letta-nightly might be problematic. Click here for more details.
- letta/__init__.py +24 -0
- letta/__main__.py +3 -0
- letta/agent.py +1427 -0
- letta/agent_store/chroma.py +295 -0
- letta/agent_store/db.py +546 -0
- letta/agent_store/lancedb.py +177 -0
- letta/agent_store/milvus.py +198 -0
- letta/agent_store/qdrant.py +201 -0
- letta/agent_store/storage.py +188 -0
- letta/benchmark/benchmark.py +96 -0
- letta/benchmark/constants.py +14 -0
- letta/cli/cli.py +689 -0
- letta/cli/cli_config.py +1282 -0
- letta/cli/cli_load.py +166 -0
- letta/client/__init__.py +0 -0
- letta/client/admin.py +171 -0
- letta/client/client.py +2360 -0
- letta/client/streaming.py +90 -0
- letta/client/utils.py +61 -0
- letta/config.py +484 -0
- letta/configs/anthropic.json +13 -0
- letta/configs/letta_hosted.json +11 -0
- letta/configs/openai.json +12 -0
- letta/constants.py +134 -0
- letta/credentials.py +140 -0
- letta/data_sources/connectors.py +247 -0
- letta/embeddings.py +218 -0
- letta/errors.py +26 -0
- letta/functions/__init__.py +0 -0
- letta/functions/function_sets/base.py +174 -0
- letta/functions/function_sets/extras.py +132 -0
- letta/functions/functions.py +105 -0
- letta/functions/schema_generator.py +205 -0
- letta/humans/__init__.py +0 -0
- letta/humans/examples/basic.txt +1 -0
- letta/humans/examples/cs_phd.txt +9 -0
- letta/interface.py +314 -0
- letta/llm_api/__init__.py +0 -0
- letta/llm_api/anthropic.py +383 -0
- letta/llm_api/azure_openai.py +155 -0
- letta/llm_api/cohere.py +396 -0
- letta/llm_api/google_ai.py +468 -0
- letta/llm_api/llm_api_tools.py +485 -0
- letta/llm_api/openai.py +470 -0
- letta/local_llm/README.md +3 -0
- letta/local_llm/__init__.py +0 -0
- letta/local_llm/chat_completion_proxy.py +279 -0
- letta/local_llm/constants.py +31 -0
- letta/local_llm/function_parser.py +68 -0
- letta/local_llm/grammars/__init__.py +0 -0
- letta/local_llm/grammars/gbnf_grammar_generator.py +1324 -0
- letta/local_llm/grammars/json.gbnf +26 -0
- letta/local_llm/grammars/json_func_calls_with_inner_thoughts.gbnf +32 -0
- letta/local_llm/groq/api.py +97 -0
- letta/local_llm/json_parser.py +202 -0
- letta/local_llm/koboldcpp/api.py +62 -0
- letta/local_llm/koboldcpp/settings.py +23 -0
- letta/local_llm/llamacpp/api.py +58 -0
- letta/local_llm/llamacpp/settings.py +22 -0
- letta/local_llm/llm_chat_completion_wrappers/__init__.py +0 -0
- letta/local_llm/llm_chat_completion_wrappers/airoboros.py +452 -0
- letta/local_llm/llm_chat_completion_wrappers/chatml.py +470 -0
- letta/local_llm/llm_chat_completion_wrappers/configurable_wrapper.py +387 -0
- letta/local_llm/llm_chat_completion_wrappers/dolphin.py +246 -0
- letta/local_llm/llm_chat_completion_wrappers/llama3.py +345 -0
- letta/local_llm/llm_chat_completion_wrappers/simple_summary_wrapper.py +156 -0
- letta/local_llm/llm_chat_completion_wrappers/wrapper_base.py +11 -0
- letta/local_llm/llm_chat_completion_wrappers/zephyr.py +345 -0
- letta/local_llm/lmstudio/api.py +100 -0
- letta/local_llm/lmstudio/settings.py +29 -0
- letta/local_llm/ollama/api.py +88 -0
- letta/local_llm/ollama/settings.py +32 -0
- letta/local_llm/settings/__init__.py +0 -0
- letta/local_llm/settings/deterministic_mirostat.py +45 -0
- letta/local_llm/settings/settings.py +72 -0
- letta/local_llm/settings/simple.py +28 -0
- letta/local_llm/utils.py +265 -0
- letta/local_llm/vllm/api.py +63 -0
- letta/local_llm/webui/api.py +60 -0
- letta/local_llm/webui/legacy_api.py +58 -0
- letta/local_llm/webui/legacy_settings.py +23 -0
- letta/local_llm/webui/settings.py +24 -0
- letta/log.py +76 -0
- letta/main.py +437 -0
- letta/memory.py +440 -0
- letta/metadata.py +884 -0
- letta/openai_backcompat/__init__.py +0 -0
- letta/openai_backcompat/openai_object.py +437 -0
- letta/persistence_manager.py +148 -0
- letta/personas/__init__.py +0 -0
- letta/personas/examples/anna_pa.txt +13 -0
- letta/personas/examples/google_search_persona.txt +15 -0
- letta/personas/examples/memgpt_doc.txt +6 -0
- letta/personas/examples/memgpt_starter.txt +4 -0
- letta/personas/examples/sam.txt +14 -0
- letta/personas/examples/sam_pov.txt +14 -0
- letta/personas/examples/sam_simple_pov_gpt35.txt +13 -0
- letta/personas/examples/sqldb/test.db +0 -0
- letta/prompts/__init__.py +0 -0
- letta/prompts/gpt_summarize.py +14 -0
- letta/prompts/gpt_system.py +26 -0
- letta/prompts/system/memgpt_base.txt +49 -0
- letta/prompts/system/memgpt_chat.txt +58 -0
- letta/prompts/system/memgpt_chat_compressed.txt +13 -0
- letta/prompts/system/memgpt_chat_fstring.txt +51 -0
- letta/prompts/system/memgpt_doc.txt +50 -0
- letta/prompts/system/memgpt_gpt35_extralong.txt +53 -0
- letta/prompts/system/memgpt_intuitive_knowledge.txt +31 -0
- letta/prompts/system/memgpt_modified_chat.txt +23 -0
- letta/pytest.ini +0 -0
- letta/schemas/agent.py +117 -0
- letta/schemas/api_key.py +21 -0
- letta/schemas/block.py +135 -0
- letta/schemas/document.py +21 -0
- letta/schemas/embedding_config.py +54 -0
- letta/schemas/enums.py +35 -0
- letta/schemas/job.py +38 -0
- letta/schemas/letta_base.py +80 -0
- letta/schemas/letta_message.py +175 -0
- letta/schemas/letta_request.py +23 -0
- letta/schemas/letta_response.py +28 -0
- letta/schemas/llm_config.py +54 -0
- letta/schemas/memory.py +224 -0
- letta/schemas/message.py +727 -0
- letta/schemas/openai/chat_completion_request.py +123 -0
- letta/schemas/openai/chat_completion_response.py +136 -0
- letta/schemas/openai/chat_completions.py +123 -0
- letta/schemas/openai/embedding_response.py +11 -0
- letta/schemas/openai/openai.py +157 -0
- letta/schemas/organization.py +20 -0
- letta/schemas/passage.py +80 -0
- letta/schemas/source.py +62 -0
- letta/schemas/tool.py +143 -0
- letta/schemas/usage.py +18 -0
- letta/schemas/user.py +33 -0
- letta/server/__init__.py +0 -0
- letta/server/constants.py +6 -0
- letta/server/rest_api/__init__.py +0 -0
- letta/server/rest_api/admin/__init__.py +0 -0
- letta/server/rest_api/admin/agents.py +21 -0
- letta/server/rest_api/admin/tools.py +83 -0
- letta/server/rest_api/admin/users.py +98 -0
- letta/server/rest_api/app.py +193 -0
- letta/server/rest_api/auth/__init__.py +0 -0
- letta/server/rest_api/auth/index.py +43 -0
- letta/server/rest_api/auth_token.py +22 -0
- letta/server/rest_api/interface.py +726 -0
- letta/server/rest_api/routers/__init__.py +0 -0
- letta/server/rest_api/routers/openai/__init__.py +0 -0
- letta/server/rest_api/routers/openai/assistants/__init__.py +0 -0
- letta/server/rest_api/routers/openai/assistants/assistants.py +115 -0
- letta/server/rest_api/routers/openai/assistants/schemas.py +121 -0
- letta/server/rest_api/routers/openai/assistants/threads.py +336 -0
- letta/server/rest_api/routers/openai/chat_completions/__init__.py +0 -0
- letta/server/rest_api/routers/openai/chat_completions/chat_completions.py +131 -0
- letta/server/rest_api/routers/v1/__init__.py +15 -0
- letta/server/rest_api/routers/v1/agents.py +543 -0
- letta/server/rest_api/routers/v1/blocks.py +73 -0
- letta/server/rest_api/routers/v1/jobs.py +46 -0
- letta/server/rest_api/routers/v1/llms.py +28 -0
- letta/server/rest_api/routers/v1/organizations.py +61 -0
- letta/server/rest_api/routers/v1/sources.py +199 -0
- letta/server/rest_api/routers/v1/tools.py +103 -0
- letta/server/rest_api/routers/v1/users.py +109 -0
- letta/server/rest_api/static_files.py +74 -0
- letta/server/rest_api/utils.py +69 -0
- letta/server/server.py +1995 -0
- letta/server/startup.sh +8 -0
- letta/server/static_files/assets/index-0cbf7ad5.js +274 -0
- letta/server/static_files/assets/index-156816da.css +1 -0
- letta/server/static_files/assets/index-486e3228.js +274 -0
- letta/server/static_files/favicon.ico +0 -0
- letta/server/static_files/index.html +39 -0
- letta/server/static_files/memgpt_logo_transparent.png +0 -0
- letta/server/utils.py +46 -0
- letta/server/ws_api/__init__.py +0 -0
- letta/server/ws_api/example_client.py +104 -0
- letta/server/ws_api/interface.py +108 -0
- letta/server/ws_api/protocol.py +100 -0
- letta/server/ws_api/server.py +145 -0
- letta/settings.py +165 -0
- letta/streaming_interface.py +396 -0
- letta/system.py +207 -0
- letta/utils.py +1065 -0
- letta_nightly-0.1.7.dev20240924104148.dist-info/LICENSE +190 -0
- letta_nightly-0.1.7.dev20240924104148.dist-info/METADATA +98 -0
- letta_nightly-0.1.7.dev20240924104148.dist-info/RECORD +189 -0
- letta_nightly-0.1.7.dev20240924104148.dist-info/WHEEL +4 -0
- letta_nightly-0.1.7.dev20240924104148.dist-info/entry_points.txt +3 -0
|
@@ -0,0 +1,470 @@
|
|
|
1
|
+
from letta.errors import LLMJSONParsingError
|
|
2
|
+
from letta.local_llm.json_parser import clean_json
|
|
3
|
+
from letta.local_llm.llm_chat_completion_wrappers.wrapper_base import (
|
|
4
|
+
LLMChatCompletionWrapper,
|
|
5
|
+
)
|
|
6
|
+
from letta.utils import json_dumps, json_loads
|
|
7
|
+
|
|
8
|
+
PREFIX_HINT = """# Reminders:
|
|
9
|
+
# Important information about yourself and the user is stored in (limited) core memory
|
|
10
|
+
# You can modify core memory with core_memory_replace
|
|
11
|
+
# You can add to core memory with core_memory_append
|
|
12
|
+
# Less important information is stored in (unlimited) archival memory
|
|
13
|
+
# You can add to archival memory with archival_memory_insert
|
|
14
|
+
# You can search archival memory with archival_memory_search
|
|
15
|
+
# You will always see the statistics of archival memory, so you know if there is content inside it
|
|
16
|
+
# If you receive new important information about the user (or yourself), you immediately update your memory with core_memory_replace, core_memory_append, or archival_memory_insert"""
|
|
17
|
+
|
|
18
|
+
FIRST_PREFIX_HINT = """# Reminders:
|
|
19
|
+
# This is your first interaction with the user!
|
|
20
|
+
# Initial information about them is provided in the core memory user block
|
|
21
|
+
# Make sure to introduce yourself to them
|
|
22
|
+
# Your inner thoughts should be private, interesting, and creative
|
|
23
|
+
# Do NOT use inner thoughts to communicate with the user
|
|
24
|
+
# Use send_message to communicate with the user"""
|
|
25
|
+
# Don't forget to use send_message, otherwise the user won't see your message"""
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
class ChatMLInnerMonologueWrapper(LLMChatCompletionWrapper):
|
|
29
|
+
"""ChatML-style prompt formatter, tested for use with https://huggingface.co/ehartford/dolphin-2.5-mixtral-8x7b#training"""
|
|
30
|
+
|
|
31
|
+
supports_first_message = True
|
|
32
|
+
|
|
33
|
+
def __init__(
|
|
34
|
+
self,
|
|
35
|
+
json_indent=2,
|
|
36
|
+
# simplify_json_content=True,
|
|
37
|
+
simplify_json_content=False,
|
|
38
|
+
clean_function_args=True,
|
|
39
|
+
include_assistant_prefix=True,
|
|
40
|
+
assistant_prefix_extra='\n{\n "function":',
|
|
41
|
+
assistant_prefix_extra_first_message='\n{\n "function": "send_message",',
|
|
42
|
+
allow_custom_roles=True, # allow roles outside user/assistant
|
|
43
|
+
use_system_role_in_user=False, # use the system role on user messages that don't use "type: user_message"
|
|
44
|
+
# allow_function_role=True, # use function role for function replies?
|
|
45
|
+
allow_function_role=False, # use function role for function replies?
|
|
46
|
+
no_function_role_role="assistant", # if no function role, which role to use?
|
|
47
|
+
no_function_role_prefix="FUNCTION RETURN:\n", # if no function role, what prefix to use?
|
|
48
|
+
# add a guiding hint
|
|
49
|
+
assistant_prefix_hint=False,
|
|
50
|
+
):
|
|
51
|
+
self.simplify_json_content = simplify_json_content
|
|
52
|
+
self.clean_func_args = clean_function_args
|
|
53
|
+
self.include_assistant_prefix = include_assistant_prefix
|
|
54
|
+
self.assistant_prefix_extra = assistant_prefix_extra
|
|
55
|
+
self.assistant_prefix_extra_first_message = assistant_prefix_extra_first_message
|
|
56
|
+
self.assistant_prefix_hint = assistant_prefix_hint
|
|
57
|
+
|
|
58
|
+
# role-based
|
|
59
|
+
self.allow_custom_roles = allow_custom_roles
|
|
60
|
+
self.use_system_role_in_user = use_system_role_in_user
|
|
61
|
+
self.allow_function_role = allow_function_role
|
|
62
|
+
# extras for when the function role is disallowed
|
|
63
|
+
self.no_function_role_role = no_function_role_role
|
|
64
|
+
self.no_function_role_prefix = no_function_role_prefix
|
|
65
|
+
|
|
66
|
+
# how to set json in prompt
|
|
67
|
+
self.json_indent = json_indent
|
|
68
|
+
|
|
69
|
+
def _compile_function_description(self, schema, add_inner_thoughts=True) -> str:
|
|
70
|
+
"""Go from a JSON schema to a string description for a prompt"""
|
|
71
|
+
# airorobos style
|
|
72
|
+
func_str = ""
|
|
73
|
+
func_str += f"{schema['name']}:"
|
|
74
|
+
func_str += f"\n description: {schema['description']}"
|
|
75
|
+
func_str += f"\n params:"
|
|
76
|
+
if add_inner_thoughts:
|
|
77
|
+
from letta.local_llm.constants import (
|
|
78
|
+
INNER_THOUGHTS_KWARG,
|
|
79
|
+
INNER_THOUGHTS_KWARG_DESCRIPTION,
|
|
80
|
+
)
|
|
81
|
+
|
|
82
|
+
func_str += f"\n {INNER_THOUGHTS_KWARG}: {INNER_THOUGHTS_KWARG_DESCRIPTION}"
|
|
83
|
+
for param_k, param_v in schema["parameters"]["properties"].items():
|
|
84
|
+
# TODO we're ignoring type
|
|
85
|
+
func_str += f"\n {param_k}: {param_v['description']}"
|
|
86
|
+
# TODO we're ignoring schema['parameters']['required']
|
|
87
|
+
return func_str
|
|
88
|
+
|
|
89
|
+
def _compile_function_block(self, functions) -> str:
|
|
90
|
+
"""functions dict -> string describing functions choices"""
|
|
91
|
+
prompt = ""
|
|
92
|
+
|
|
93
|
+
# prompt += f"\nPlease select the most suitable function and parameters from the list of available functions below, based on the user's input. Provide your response in JSON format."
|
|
94
|
+
prompt += f"Please select the most suitable function and parameters from the list of available functions below, based on the ongoing conversation. Provide your response in JSON format."
|
|
95
|
+
prompt += f"\nAvailable functions:"
|
|
96
|
+
for function_dict in functions:
|
|
97
|
+
prompt += f"\n{self._compile_function_description(function_dict)}"
|
|
98
|
+
|
|
99
|
+
return prompt
|
|
100
|
+
|
|
101
|
+
# NOTE: BOS/EOS chatml tokens are NOT inserted here
|
|
102
|
+
def _compile_system_message(self, system_message, functions, function_documentation=None) -> str:
|
|
103
|
+
"""system prompt + memory + functions -> string"""
|
|
104
|
+
prompt = ""
|
|
105
|
+
prompt += system_message
|
|
106
|
+
prompt += "\n"
|
|
107
|
+
if function_documentation is not None:
|
|
108
|
+
prompt += f"Please select the most suitable function and parameters from the list of available functions below, based on the ongoing conversation. Provide your response in JSON format."
|
|
109
|
+
prompt += f"\nAvailable functions:\n"
|
|
110
|
+
prompt += function_documentation
|
|
111
|
+
else:
|
|
112
|
+
prompt += self._compile_function_block(functions)
|
|
113
|
+
return prompt
|
|
114
|
+
|
|
115
|
+
def _compile_function_call(self, function_call, inner_thoughts=None):
|
|
116
|
+
"""Go from ChatCompletion to Airoboros style function trace (in prompt)
|
|
117
|
+
|
|
118
|
+
ChatCompletion data (inside message['function_call']):
|
|
119
|
+
"function_call": {
|
|
120
|
+
"name": ...
|
|
121
|
+
"arguments": {
|
|
122
|
+
"arg1": val1,
|
|
123
|
+
...
|
|
124
|
+
}
|
|
125
|
+
|
|
126
|
+
Airoboros output:
|
|
127
|
+
{
|
|
128
|
+
"function": "send_message",
|
|
129
|
+
"params": {
|
|
130
|
+
"message": "Hello there! I am Sam, an AI developed by Liminal Corp. How can I assist you today?"
|
|
131
|
+
}
|
|
132
|
+
}
|
|
133
|
+
"""
|
|
134
|
+
airo_func_call = {
|
|
135
|
+
"function": function_call["name"],
|
|
136
|
+
"params": {
|
|
137
|
+
"inner_thoughts": inner_thoughts,
|
|
138
|
+
**json_loads(function_call["arguments"]),
|
|
139
|
+
},
|
|
140
|
+
}
|
|
141
|
+
return json_dumps(airo_func_call, indent=self.json_indent)
|
|
142
|
+
|
|
143
|
+
# NOTE: BOS/EOS chatml tokens are NOT inserted here
|
|
144
|
+
def _compile_assistant_message(self, message) -> str:
|
|
145
|
+
"""assistant message -> string"""
|
|
146
|
+
prompt = ""
|
|
147
|
+
|
|
148
|
+
# need to add the function call if there was one
|
|
149
|
+
inner_thoughts = message["content"]
|
|
150
|
+
if "function_call" in message and message["function_call"]:
|
|
151
|
+
prompt += f"\n{self._compile_function_call(message['function_call'], inner_thoughts=inner_thoughts)}"
|
|
152
|
+
elif "tool_calls" in message and message["tool_calls"]:
|
|
153
|
+
for tool_call in message["tool_calls"]:
|
|
154
|
+
prompt += f"\n{self._compile_function_call(tool_call['function'], inner_thoughts=inner_thoughts)}"
|
|
155
|
+
else:
|
|
156
|
+
# TODO should we format this into JSON somehow?
|
|
157
|
+
prompt += inner_thoughts
|
|
158
|
+
|
|
159
|
+
return prompt
|
|
160
|
+
|
|
161
|
+
# NOTE: BOS/EOS chatml tokens are NOT inserted here
|
|
162
|
+
def _compile_user_message(self, message) -> str:
|
|
163
|
+
"""user message (should be JSON) -> string"""
|
|
164
|
+
prompt = ""
|
|
165
|
+
if self.simplify_json_content:
|
|
166
|
+
# Make user messages not JSON but plaintext instead
|
|
167
|
+
try:
|
|
168
|
+
user_msg_json = json_loads(message["content"])
|
|
169
|
+
user_msg_str = user_msg_json["message"]
|
|
170
|
+
except:
|
|
171
|
+
user_msg_str = message["content"]
|
|
172
|
+
else:
|
|
173
|
+
# Otherwise just dump the full json
|
|
174
|
+
try:
|
|
175
|
+
user_msg_json = json_loads(message["content"])
|
|
176
|
+
user_msg_str = json_dumps(user_msg_json, indent=self.json_indent)
|
|
177
|
+
except:
|
|
178
|
+
user_msg_str = message["content"]
|
|
179
|
+
|
|
180
|
+
prompt += user_msg_str
|
|
181
|
+
return prompt
|
|
182
|
+
|
|
183
|
+
# NOTE: BOS/EOS chatml tokens are NOT inserted here
|
|
184
|
+
def _compile_function_response(self, message) -> str:
|
|
185
|
+
"""function response message (should be JSON) -> string"""
|
|
186
|
+
# TODO we should clean up send_message returns to avoid cluttering the prompt
|
|
187
|
+
prompt = ""
|
|
188
|
+
try:
|
|
189
|
+
# indent the function replies
|
|
190
|
+
function_return_dict = json_loads(message["content"])
|
|
191
|
+
function_return_str = json_dumps(function_return_dict, indent=self.json_indent)
|
|
192
|
+
except:
|
|
193
|
+
function_return_str = message["content"]
|
|
194
|
+
|
|
195
|
+
prompt += function_return_str
|
|
196
|
+
return prompt
|
|
197
|
+
|
|
198
|
+
def chat_completion_to_prompt(self, messages, functions, first_message=False, function_documentation=None):
|
|
199
|
+
"""chatml-style prompt formatting, with implied support for multi-role"""
|
|
200
|
+
prompt = ""
|
|
201
|
+
|
|
202
|
+
# System insturctions go first
|
|
203
|
+
assert messages[0]["role"] == "system"
|
|
204
|
+
system_block = self._compile_system_message(
|
|
205
|
+
system_message=messages[0]["content"], functions=functions, function_documentation=function_documentation
|
|
206
|
+
)
|
|
207
|
+
prompt += f"<|im_start|>system\n{system_block.strip()}<|im_end|>"
|
|
208
|
+
|
|
209
|
+
# Last are the user/assistant messages
|
|
210
|
+
for message in messages[1:]:
|
|
211
|
+
assert message["role"] in ["user", "assistant", "function", "tool"], message
|
|
212
|
+
|
|
213
|
+
if message["role"] == "user":
|
|
214
|
+
# Support for AutoGen naming of agents
|
|
215
|
+
role_str = message["name"].strip().lower() if (self.allow_custom_roles and "name" in message) else message["role"]
|
|
216
|
+
msg_str = self._compile_user_message(message)
|
|
217
|
+
|
|
218
|
+
if self.use_system_role_in_user:
|
|
219
|
+
try:
|
|
220
|
+
msg_json = json_loads(message["content"])
|
|
221
|
+
if msg_json["type"] != "user_message":
|
|
222
|
+
role_str = "system"
|
|
223
|
+
except:
|
|
224
|
+
pass
|
|
225
|
+
prompt += f"\n<|im_start|>{role_str}\n{msg_str.strip()}<|im_end|>"
|
|
226
|
+
|
|
227
|
+
elif message["role"] == "assistant":
|
|
228
|
+
# Support for AutoGen naming of agents
|
|
229
|
+
role_str = message["name"].strip().lower() if (self.allow_custom_roles and "name" in message) else message["role"]
|
|
230
|
+
msg_str = self._compile_assistant_message(message)
|
|
231
|
+
|
|
232
|
+
prompt += f"\n<|im_start|>{role_str}\n{msg_str.strip()}<|im_end|>"
|
|
233
|
+
|
|
234
|
+
elif message["role"] in ["tool", "function"]:
|
|
235
|
+
if self.allow_function_role:
|
|
236
|
+
role_str = message["role"]
|
|
237
|
+
msg_str = self._compile_function_response(message)
|
|
238
|
+
prompt += f"\n<|im_start|>{role_str}\n{msg_str.strip()}<|im_end|>"
|
|
239
|
+
else:
|
|
240
|
+
# TODO figure out what to do with functions if we disallow function role
|
|
241
|
+
role_str = self.no_function_role_role
|
|
242
|
+
msg_str = self._compile_function_response(message)
|
|
243
|
+
func_resp_prefix = self.no_function_role_prefix
|
|
244
|
+
# NOTE whatever the special prefix is, it should also be a stop token
|
|
245
|
+
prompt += f"\n<|im_start|>{role_str}\n{func_resp_prefix}{msg_str.strip()}<|im_end|>"
|
|
246
|
+
|
|
247
|
+
else:
|
|
248
|
+
raise ValueError(message)
|
|
249
|
+
|
|
250
|
+
if self.include_assistant_prefix:
|
|
251
|
+
prompt += f"\n<|im_start|>assistant"
|
|
252
|
+
if self.assistant_prefix_hint:
|
|
253
|
+
prompt += f"\n{FIRST_PREFIX_HINT if first_message else PREFIX_HINT}"
|
|
254
|
+
if self.supports_first_message and first_message:
|
|
255
|
+
if self.assistant_prefix_extra_first_message:
|
|
256
|
+
prompt += self.assistant_prefix_extra_first_message
|
|
257
|
+
else:
|
|
258
|
+
if self.assistant_prefix_extra:
|
|
259
|
+
# assistant_prefix_extra='\n{\n "function":',
|
|
260
|
+
prompt += self.assistant_prefix_extra
|
|
261
|
+
|
|
262
|
+
return prompt
|
|
263
|
+
|
|
264
|
+
def _clean_function_args(self, function_name, function_args):
|
|
265
|
+
"""Some basic Letta-specific cleaning of function args"""
|
|
266
|
+
cleaned_function_name = function_name
|
|
267
|
+
cleaned_function_args = function_args.copy() if function_args is not None else {}
|
|
268
|
+
|
|
269
|
+
if function_name == "send_message":
|
|
270
|
+
# strip request_heartbeat
|
|
271
|
+
cleaned_function_args.pop("request_heartbeat", None)
|
|
272
|
+
|
|
273
|
+
inner_thoughts = None
|
|
274
|
+
if "inner_thoughts" in function_args:
|
|
275
|
+
inner_thoughts = cleaned_function_args.pop("inner_thoughts")
|
|
276
|
+
|
|
277
|
+
# TODO more cleaning to fix errors LLM makes
|
|
278
|
+
return inner_thoughts, cleaned_function_name, cleaned_function_args
|
|
279
|
+
|
|
280
|
+
def output_to_chat_completion_response(self, raw_llm_output, first_message=False):
|
|
281
|
+
"""Turn raw LLM output into a ChatCompletion style response with:
|
|
282
|
+
"message" = {
|
|
283
|
+
"role": "assistant",
|
|
284
|
+
"content": ...,
|
|
285
|
+
"function_call": {
|
|
286
|
+
"name": ...
|
|
287
|
+
"arguments": {
|
|
288
|
+
"arg1": val1,
|
|
289
|
+
...
|
|
290
|
+
}
|
|
291
|
+
}
|
|
292
|
+
}
|
|
293
|
+
"""
|
|
294
|
+
# if self.include_opening_brance_in_prefix and raw_llm_output[0] != "{":
|
|
295
|
+
# raw_llm_output = "{" + raw_llm_output
|
|
296
|
+
assistant_prefix = self.assistant_prefix_extra_first_message if first_message else self.assistant_prefix_extra
|
|
297
|
+
if assistant_prefix and raw_llm_output[: len(assistant_prefix)] != assistant_prefix:
|
|
298
|
+
# print(f"adding prefix back to llm, raw_llm_output=\n{raw_llm_output}")
|
|
299
|
+
raw_llm_output = assistant_prefix + raw_llm_output
|
|
300
|
+
# print(f"->\n{raw_llm_output}")
|
|
301
|
+
|
|
302
|
+
try:
|
|
303
|
+
function_json_output = clean_json(raw_llm_output)
|
|
304
|
+
except Exception as e:
|
|
305
|
+
raise Exception(f"Failed to decode JSON from LLM output:\n{raw_llm_output} - error\n{str(e)}")
|
|
306
|
+
try:
|
|
307
|
+
# NOTE: weird bug can happen where 'function' gets nested if the prefix in the prompt isn't abided by
|
|
308
|
+
if isinstance(function_json_output["function"], dict):
|
|
309
|
+
function_json_output = function_json_output["function"]
|
|
310
|
+
# regular unpacking
|
|
311
|
+
function_name = function_json_output["function"]
|
|
312
|
+
function_parameters = function_json_output["params"]
|
|
313
|
+
except KeyError as e:
|
|
314
|
+
raise LLMJSONParsingError(
|
|
315
|
+
f"Received valid JSON from LLM, but JSON was missing fields: {str(e)}. JSON result was:\n{function_json_output}"
|
|
316
|
+
)
|
|
317
|
+
|
|
318
|
+
if self.clean_func_args:
|
|
319
|
+
(
|
|
320
|
+
inner_thoughts,
|
|
321
|
+
function_name,
|
|
322
|
+
function_parameters,
|
|
323
|
+
) = self._clean_function_args(function_name, function_parameters)
|
|
324
|
+
|
|
325
|
+
message = {
|
|
326
|
+
"role": "assistant",
|
|
327
|
+
"content": inner_thoughts,
|
|
328
|
+
"function_call": {
|
|
329
|
+
"name": function_name,
|
|
330
|
+
"arguments": json_dumps(function_parameters),
|
|
331
|
+
},
|
|
332
|
+
}
|
|
333
|
+
return message
|
|
334
|
+
|
|
335
|
+
|
|
336
|
+
class ChatMLOuterInnerMonologueWrapper(ChatMLInnerMonologueWrapper):
|
|
337
|
+
"""Moves the inner monologue outside the main function to allow the LLM to omit function calls
|
|
338
|
+
|
|
339
|
+
NOTE: warning - this makes it easier for the agent to forget to call functions,
|
|
340
|
+
so it is advised to use the function-forcing wrapper unless the LLM is very good
|
|
341
|
+
|
|
342
|
+
ie instead of:
|
|
343
|
+
{
|
|
344
|
+
"function": "send_message",
|
|
345
|
+
"params": {
|
|
346
|
+
"inner_thoughts": "User has repeated the message. Recognizing repetition and taking a different approach.",
|
|
347
|
+
"message": "It looks like you're repeating yourself, Chad. Is there something you're trying to express, or are you just
|
|
348
|
+
testing me?"
|
|
349
|
+
}
|
|
350
|
+
}
|
|
351
|
+
|
|
352
|
+
this wrapper does:
|
|
353
|
+
{
|
|
354
|
+
"inner_thoughts": "User has repeated the message. Recognizing repetition and taking a different approach.",
|
|
355
|
+
"function": "send_message",
|
|
356
|
+
"params": {
|
|
357
|
+
"message": "It looks like you're repeating yourself, Chad. Is there something you're trying to express, or are you just
|
|
358
|
+
testing me?"
|
|
359
|
+
}
|
|
360
|
+
}
|
|
361
|
+
"""
|
|
362
|
+
|
|
363
|
+
# TODO find a way to support forcing the first func call
|
|
364
|
+
supports_first_message = False
|
|
365
|
+
|
|
366
|
+
def __init__(self, **kwargs):
|
|
367
|
+
# Set a different default for assistant_prefix_extra if not provided
|
|
368
|
+
kwargs.setdefault("assistant_prefix_extra", '\n{\n "inner_thoughts":')
|
|
369
|
+
super().__init__(**kwargs)
|
|
370
|
+
|
|
371
|
+
def _compile_function_block(self, functions) -> str:
|
|
372
|
+
"""NOTE: modified to not include inner thoughts at all as extras"""
|
|
373
|
+
prompt = ""
|
|
374
|
+
|
|
375
|
+
prompt += " ".join(
|
|
376
|
+
[
|
|
377
|
+
"Please select the most suitable function and parameters from the list of available functions below, based on the ongoing conversation.",
|
|
378
|
+
"Provide your response in JSON format.",
|
|
379
|
+
"You must always include inner thoughts, but you do not always have to call a function.",
|
|
380
|
+
]
|
|
381
|
+
)
|
|
382
|
+
prompt += f"\nAvailable functions:"
|
|
383
|
+
for function_dict in functions:
|
|
384
|
+
prompt += f"\n{self._compile_function_description(function_dict, add_inner_thoughts=False)}"
|
|
385
|
+
|
|
386
|
+
return prompt
|
|
387
|
+
|
|
388
|
+
def _compile_function_call(self, function_call, inner_thoughts=None):
|
|
389
|
+
"""NOTE: Modified to put inner thoughts outside the function"""
|
|
390
|
+
airo_func_call = {
|
|
391
|
+
"inner_thoughts": inner_thoughts,
|
|
392
|
+
"function": function_call["name"],
|
|
393
|
+
"params": {
|
|
394
|
+
# "inner_thoughts": inner_thoughts,
|
|
395
|
+
**json_loads(function_call["arguments"]),
|
|
396
|
+
},
|
|
397
|
+
}
|
|
398
|
+
return json_dumps(airo_func_call, indent=self.json_indent)
|
|
399
|
+
|
|
400
|
+
def output_to_chat_completion_response(self, raw_llm_output, first_message=False):
|
|
401
|
+
"""NOTE: Modified to expect "inner_thoughts" outside the function
|
|
402
|
+
|
|
403
|
+
Also, allow messages that have None/null function calls
|
|
404
|
+
"""
|
|
405
|
+
|
|
406
|
+
# If we used a prefex to guide generation, we need to add it to the output as a preefix
|
|
407
|
+
assistant_prefix = (
|
|
408
|
+
self.assistant_prefix_extra_first_message if (self.supports_first_message and first_message) else self.assistant_prefix_extra
|
|
409
|
+
)
|
|
410
|
+
if assistant_prefix and raw_llm_output[: len(assistant_prefix)] != assistant_prefix:
|
|
411
|
+
raw_llm_output = assistant_prefix + raw_llm_output
|
|
412
|
+
|
|
413
|
+
try:
|
|
414
|
+
function_json_output = clean_json(raw_llm_output)
|
|
415
|
+
except Exception as e:
|
|
416
|
+
raise Exception(f"Failed to decode JSON from LLM output:\n{raw_llm_output} - error\n{str(e)}")
|
|
417
|
+
try:
|
|
418
|
+
# NOTE: main diff
|
|
419
|
+
inner_thoughts = function_json_output["inner_thoughts"]
|
|
420
|
+
# NOTE: also have to account for "function": null
|
|
421
|
+
if (
|
|
422
|
+
"function" in function_json_output
|
|
423
|
+
and function_json_output["function"] is not None
|
|
424
|
+
and function_json_output["function"].strip().lower() != "none"
|
|
425
|
+
):
|
|
426
|
+
# TODO apply lm studio nested bug patch?
|
|
427
|
+
function_name = function_json_output["function"]
|
|
428
|
+
function_parameters = function_json_output["params"]
|
|
429
|
+
else:
|
|
430
|
+
function_name = None
|
|
431
|
+
function_parameters = None
|
|
432
|
+
except KeyError as e:
|
|
433
|
+
raise LLMJSONParsingError(f"Received valid JSON from LLM, but JSON was missing fields: {str(e)}")
|
|
434
|
+
|
|
435
|
+
# TODO add some code to clean inner thoughts
|
|
436
|
+
# e.g. fix this:
|
|
437
|
+
"""
|
|
438
|
+
💠I sense a new mind to engage with. Interesting...
|
|
439
|
+
🤖 Hello, I'm Sam. Welcome to our conversation.
|
|
440
|
+
> Enter your message: what do you know about me?
|
|
441
|
+
💠: I've been observing our previous conversations. I remember that your name is Chad.
|
|
442
|
+
🤖 I recall our previous interactions, Chad. How can I assist you today?
|
|
443
|
+
> Enter your message: is that all you know about me?
|
|
444
|
+
💠: I see you're curious about our connection. Let me do a quick search of my memory.
|
|
445
|
+
"""
|
|
446
|
+
|
|
447
|
+
if function_name is not None and self.clean_func_args:
|
|
448
|
+
(
|
|
449
|
+
_inner_thoughts, # NOTE: main diff (ignore)
|
|
450
|
+
function_name,
|
|
451
|
+
function_parameters,
|
|
452
|
+
) = self._clean_function_args(function_name, function_parameters)
|
|
453
|
+
|
|
454
|
+
message = {
|
|
455
|
+
"role": "assistant",
|
|
456
|
+
"content": inner_thoughts,
|
|
457
|
+
# "function_call": {
|
|
458
|
+
# "name": function_name,
|
|
459
|
+
# "arguments": json_dumps(function_parameters),
|
|
460
|
+
# },
|
|
461
|
+
}
|
|
462
|
+
|
|
463
|
+
# Add the function if not none:
|
|
464
|
+
if function_name is not None:
|
|
465
|
+
message["function_call"] = {
|
|
466
|
+
"name": function_name,
|
|
467
|
+
"arguments": json_dumps(function_parameters),
|
|
468
|
+
}
|
|
469
|
+
|
|
470
|
+
return message
|