letta-nightly 0.1.7.dev20240924104148__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of letta-nightly might be problematic. Click here for more details.
- letta/__init__.py +24 -0
- letta/__main__.py +3 -0
- letta/agent.py +1427 -0
- letta/agent_store/chroma.py +295 -0
- letta/agent_store/db.py +546 -0
- letta/agent_store/lancedb.py +177 -0
- letta/agent_store/milvus.py +198 -0
- letta/agent_store/qdrant.py +201 -0
- letta/agent_store/storage.py +188 -0
- letta/benchmark/benchmark.py +96 -0
- letta/benchmark/constants.py +14 -0
- letta/cli/cli.py +689 -0
- letta/cli/cli_config.py +1282 -0
- letta/cli/cli_load.py +166 -0
- letta/client/__init__.py +0 -0
- letta/client/admin.py +171 -0
- letta/client/client.py +2360 -0
- letta/client/streaming.py +90 -0
- letta/client/utils.py +61 -0
- letta/config.py +484 -0
- letta/configs/anthropic.json +13 -0
- letta/configs/letta_hosted.json +11 -0
- letta/configs/openai.json +12 -0
- letta/constants.py +134 -0
- letta/credentials.py +140 -0
- letta/data_sources/connectors.py +247 -0
- letta/embeddings.py +218 -0
- letta/errors.py +26 -0
- letta/functions/__init__.py +0 -0
- letta/functions/function_sets/base.py +174 -0
- letta/functions/function_sets/extras.py +132 -0
- letta/functions/functions.py +105 -0
- letta/functions/schema_generator.py +205 -0
- letta/humans/__init__.py +0 -0
- letta/humans/examples/basic.txt +1 -0
- letta/humans/examples/cs_phd.txt +9 -0
- letta/interface.py +314 -0
- letta/llm_api/__init__.py +0 -0
- letta/llm_api/anthropic.py +383 -0
- letta/llm_api/azure_openai.py +155 -0
- letta/llm_api/cohere.py +396 -0
- letta/llm_api/google_ai.py +468 -0
- letta/llm_api/llm_api_tools.py +485 -0
- letta/llm_api/openai.py +470 -0
- letta/local_llm/README.md +3 -0
- letta/local_llm/__init__.py +0 -0
- letta/local_llm/chat_completion_proxy.py +279 -0
- letta/local_llm/constants.py +31 -0
- letta/local_llm/function_parser.py +68 -0
- letta/local_llm/grammars/__init__.py +0 -0
- letta/local_llm/grammars/gbnf_grammar_generator.py +1324 -0
- letta/local_llm/grammars/json.gbnf +26 -0
- letta/local_llm/grammars/json_func_calls_with_inner_thoughts.gbnf +32 -0
- letta/local_llm/groq/api.py +97 -0
- letta/local_llm/json_parser.py +202 -0
- letta/local_llm/koboldcpp/api.py +62 -0
- letta/local_llm/koboldcpp/settings.py +23 -0
- letta/local_llm/llamacpp/api.py +58 -0
- letta/local_llm/llamacpp/settings.py +22 -0
- letta/local_llm/llm_chat_completion_wrappers/__init__.py +0 -0
- letta/local_llm/llm_chat_completion_wrappers/airoboros.py +452 -0
- letta/local_llm/llm_chat_completion_wrappers/chatml.py +470 -0
- letta/local_llm/llm_chat_completion_wrappers/configurable_wrapper.py +387 -0
- letta/local_llm/llm_chat_completion_wrappers/dolphin.py +246 -0
- letta/local_llm/llm_chat_completion_wrappers/llama3.py +345 -0
- letta/local_llm/llm_chat_completion_wrappers/simple_summary_wrapper.py +156 -0
- letta/local_llm/llm_chat_completion_wrappers/wrapper_base.py +11 -0
- letta/local_llm/llm_chat_completion_wrappers/zephyr.py +345 -0
- letta/local_llm/lmstudio/api.py +100 -0
- letta/local_llm/lmstudio/settings.py +29 -0
- letta/local_llm/ollama/api.py +88 -0
- letta/local_llm/ollama/settings.py +32 -0
- letta/local_llm/settings/__init__.py +0 -0
- letta/local_llm/settings/deterministic_mirostat.py +45 -0
- letta/local_llm/settings/settings.py +72 -0
- letta/local_llm/settings/simple.py +28 -0
- letta/local_llm/utils.py +265 -0
- letta/local_llm/vllm/api.py +63 -0
- letta/local_llm/webui/api.py +60 -0
- letta/local_llm/webui/legacy_api.py +58 -0
- letta/local_llm/webui/legacy_settings.py +23 -0
- letta/local_llm/webui/settings.py +24 -0
- letta/log.py +76 -0
- letta/main.py +437 -0
- letta/memory.py +440 -0
- letta/metadata.py +884 -0
- letta/openai_backcompat/__init__.py +0 -0
- letta/openai_backcompat/openai_object.py +437 -0
- letta/persistence_manager.py +148 -0
- letta/personas/__init__.py +0 -0
- letta/personas/examples/anna_pa.txt +13 -0
- letta/personas/examples/google_search_persona.txt +15 -0
- letta/personas/examples/memgpt_doc.txt +6 -0
- letta/personas/examples/memgpt_starter.txt +4 -0
- letta/personas/examples/sam.txt +14 -0
- letta/personas/examples/sam_pov.txt +14 -0
- letta/personas/examples/sam_simple_pov_gpt35.txt +13 -0
- letta/personas/examples/sqldb/test.db +0 -0
- letta/prompts/__init__.py +0 -0
- letta/prompts/gpt_summarize.py +14 -0
- letta/prompts/gpt_system.py +26 -0
- letta/prompts/system/memgpt_base.txt +49 -0
- letta/prompts/system/memgpt_chat.txt +58 -0
- letta/prompts/system/memgpt_chat_compressed.txt +13 -0
- letta/prompts/system/memgpt_chat_fstring.txt +51 -0
- letta/prompts/system/memgpt_doc.txt +50 -0
- letta/prompts/system/memgpt_gpt35_extralong.txt +53 -0
- letta/prompts/system/memgpt_intuitive_knowledge.txt +31 -0
- letta/prompts/system/memgpt_modified_chat.txt +23 -0
- letta/pytest.ini +0 -0
- letta/schemas/agent.py +117 -0
- letta/schemas/api_key.py +21 -0
- letta/schemas/block.py +135 -0
- letta/schemas/document.py +21 -0
- letta/schemas/embedding_config.py +54 -0
- letta/schemas/enums.py +35 -0
- letta/schemas/job.py +38 -0
- letta/schemas/letta_base.py +80 -0
- letta/schemas/letta_message.py +175 -0
- letta/schemas/letta_request.py +23 -0
- letta/schemas/letta_response.py +28 -0
- letta/schemas/llm_config.py +54 -0
- letta/schemas/memory.py +224 -0
- letta/schemas/message.py +727 -0
- letta/schemas/openai/chat_completion_request.py +123 -0
- letta/schemas/openai/chat_completion_response.py +136 -0
- letta/schemas/openai/chat_completions.py +123 -0
- letta/schemas/openai/embedding_response.py +11 -0
- letta/schemas/openai/openai.py +157 -0
- letta/schemas/organization.py +20 -0
- letta/schemas/passage.py +80 -0
- letta/schemas/source.py +62 -0
- letta/schemas/tool.py +143 -0
- letta/schemas/usage.py +18 -0
- letta/schemas/user.py +33 -0
- letta/server/__init__.py +0 -0
- letta/server/constants.py +6 -0
- letta/server/rest_api/__init__.py +0 -0
- letta/server/rest_api/admin/__init__.py +0 -0
- letta/server/rest_api/admin/agents.py +21 -0
- letta/server/rest_api/admin/tools.py +83 -0
- letta/server/rest_api/admin/users.py +98 -0
- letta/server/rest_api/app.py +193 -0
- letta/server/rest_api/auth/__init__.py +0 -0
- letta/server/rest_api/auth/index.py +43 -0
- letta/server/rest_api/auth_token.py +22 -0
- letta/server/rest_api/interface.py +726 -0
- letta/server/rest_api/routers/__init__.py +0 -0
- letta/server/rest_api/routers/openai/__init__.py +0 -0
- letta/server/rest_api/routers/openai/assistants/__init__.py +0 -0
- letta/server/rest_api/routers/openai/assistants/assistants.py +115 -0
- letta/server/rest_api/routers/openai/assistants/schemas.py +121 -0
- letta/server/rest_api/routers/openai/assistants/threads.py +336 -0
- letta/server/rest_api/routers/openai/chat_completions/__init__.py +0 -0
- letta/server/rest_api/routers/openai/chat_completions/chat_completions.py +131 -0
- letta/server/rest_api/routers/v1/__init__.py +15 -0
- letta/server/rest_api/routers/v1/agents.py +543 -0
- letta/server/rest_api/routers/v1/blocks.py +73 -0
- letta/server/rest_api/routers/v1/jobs.py +46 -0
- letta/server/rest_api/routers/v1/llms.py +28 -0
- letta/server/rest_api/routers/v1/organizations.py +61 -0
- letta/server/rest_api/routers/v1/sources.py +199 -0
- letta/server/rest_api/routers/v1/tools.py +103 -0
- letta/server/rest_api/routers/v1/users.py +109 -0
- letta/server/rest_api/static_files.py +74 -0
- letta/server/rest_api/utils.py +69 -0
- letta/server/server.py +1995 -0
- letta/server/startup.sh +8 -0
- letta/server/static_files/assets/index-0cbf7ad5.js +274 -0
- letta/server/static_files/assets/index-156816da.css +1 -0
- letta/server/static_files/assets/index-486e3228.js +274 -0
- letta/server/static_files/favicon.ico +0 -0
- letta/server/static_files/index.html +39 -0
- letta/server/static_files/memgpt_logo_transparent.png +0 -0
- letta/server/utils.py +46 -0
- letta/server/ws_api/__init__.py +0 -0
- letta/server/ws_api/example_client.py +104 -0
- letta/server/ws_api/interface.py +108 -0
- letta/server/ws_api/protocol.py +100 -0
- letta/server/ws_api/server.py +145 -0
- letta/settings.py +165 -0
- letta/streaming_interface.py +396 -0
- letta/system.py +207 -0
- letta/utils.py +1065 -0
- letta_nightly-0.1.7.dev20240924104148.dist-info/LICENSE +190 -0
- letta_nightly-0.1.7.dev20240924104148.dist-info/METADATA +98 -0
- letta_nightly-0.1.7.dev20240924104148.dist-info/RECORD +189 -0
- letta_nightly-0.1.7.dev20240924104148.dist-info/WHEEL +4 -0
- letta_nightly-0.1.7.dev20240924104148.dist-info/entry_points.txt +3 -0
|
@@ -0,0 +1,452 @@
|
|
|
1
|
+
from letta.utils import json_dumps, json_loads
|
|
2
|
+
|
|
3
|
+
from ...errors import LLMJSONParsingError
|
|
4
|
+
from ..json_parser import clean_json
|
|
5
|
+
from .wrapper_base import LLMChatCompletionWrapper
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
class Airoboros21Wrapper(LLMChatCompletionWrapper):
|
|
9
|
+
"""Wrapper for Airoboros 70b v2.1: https://huggingface.co/jondurbin/airoboros-l2-70b-2.1
|
|
10
|
+
|
|
11
|
+
Note: this wrapper formats a prompt that only generates JSON, no inner thoughts
|
|
12
|
+
"""
|
|
13
|
+
|
|
14
|
+
def __init__(
|
|
15
|
+
self,
|
|
16
|
+
simplify_json_content=True,
|
|
17
|
+
clean_function_args=True,
|
|
18
|
+
include_assistant_prefix=True,
|
|
19
|
+
include_opening_brace_in_prefix=True,
|
|
20
|
+
include_section_separators=True,
|
|
21
|
+
):
|
|
22
|
+
self.simplify_json_content = simplify_json_content
|
|
23
|
+
self.clean_func_args = clean_function_args
|
|
24
|
+
self.include_assistant_prefix = include_assistant_prefix
|
|
25
|
+
self.include_opening_brance_in_prefix = include_opening_brace_in_prefix
|
|
26
|
+
self.include_section_separators = include_section_separators
|
|
27
|
+
|
|
28
|
+
def chat_completion_to_prompt(self, messages, functions, function_documentation=None):
|
|
29
|
+
"""Example for airoboros: https://huggingface.co/jondurbin/airoboros-l2-70b-2.1#prompt-format
|
|
30
|
+
|
|
31
|
+
A chat.
|
|
32
|
+
USER: {prompt}
|
|
33
|
+
ASSISTANT:
|
|
34
|
+
|
|
35
|
+
Functions support: https://huggingface.co/jondurbin/airoboros-l2-70b-2.1#agentfunction-calling
|
|
36
|
+
|
|
37
|
+
As an AI assistant, please select the most suitable function and parameters from the list of available functions below, based on the user's input. Provide your response in JSON format.
|
|
38
|
+
|
|
39
|
+
Input: I want to know how many times 'Python' is mentioned in my text file.
|
|
40
|
+
|
|
41
|
+
Available functions:
|
|
42
|
+
file_analytics:
|
|
43
|
+
description: This tool performs various operations on a text file.
|
|
44
|
+
params:
|
|
45
|
+
action: The operation we want to perform on the data, such as "count_occurrences", "find_line", etc.
|
|
46
|
+
filters:
|
|
47
|
+
keyword: The word or phrase we want to search for.
|
|
48
|
+
|
|
49
|
+
OpenAI functions schema style:
|
|
50
|
+
|
|
51
|
+
{
|
|
52
|
+
"name": "send_message",
|
|
53
|
+
"description": "Sends a message to the human user",
|
|
54
|
+
"parameters": {
|
|
55
|
+
"type": "object",
|
|
56
|
+
"properties": {
|
|
57
|
+
# https://json-schema.org/understanding-json-schema/reference/array.html
|
|
58
|
+
"message": {
|
|
59
|
+
"type": "string",
|
|
60
|
+
"description": "Message contents. All unicode (including emojis) are supported.",
|
|
61
|
+
},
|
|
62
|
+
},
|
|
63
|
+
"required": ["message"],
|
|
64
|
+
}
|
|
65
|
+
},
|
|
66
|
+
"""
|
|
67
|
+
prompt = ""
|
|
68
|
+
|
|
69
|
+
# System insturctions go first
|
|
70
|
+
assert messages[0]["role"] == "system"
|
|
71
|
+
prompt += messages[0]["content"]
|
|
72
|
+
|
|
73
|
+
# Next is the functions preamble
|
|
74
|
+
def create_function_description(schema):
|
|
75
|
+
# airorobos style
|
|
76
|
+
func_str = ""
|
|
77
|
+
func_str += f"{schema['name']}:"
|
|
78
|
+
func_str += f"\n description: {schema['description']}"
|
|
79
|
+
func_str += f"\n params:"
|
|
80
|
+
for param_k, param_v in schema["parameters"]["properties"].items():
|
|
81
|
+
# TODO we're ignoring type
|
|
82
|
+
func_str += f"\n {param_k}: {param_v['description']}"
|
|
83
|
+
# TODO we're ignoring schema['parameters']['required']
|
|
84
|
+
return func_str
|
|
85
|
+
|
|
86
|
+
# prompt += f"\nPlease select the most suitable function and parameters from the list of available functions below, based on the user's input. Provide your response in JSON format."
|
|
87
|
+
prompt += f"\nPlease select the most suitable function and parameters from the list of available functions below, based on the ongoing conversation. Provide your response in JSON format."
|
|
88
|
+
prompt += f"\nAvailable functions:"
|
|
89
|
+
if function_documentation is not None:
|
|
90
|
+
prompt += f"\n{function_documentation}"
|
|
91
|
+
else:
|
|
92
|
+
for function_dict in functions:
|
|
93
|
+
prompt += f"\n{create_function_description(function_dict)}"
|
|
94
|
+
|
|
95
|
+
def create_function_call(function_call):
|
|
96
|
+
"""Go from ChatCompletion to Airoboros style function trace (in prompt)
|
|
97
|
+
|
|
98
|
+
ChatCompletion data (inside message['function_call']):
|
|
99
|
+
"function_call": {
|
|
100
|
+
"name": ...
|
|
101
|
+
"arguments": {
|
|
102
|
+
"arg1": val1,
|
|
103
|
+
...
|
|
104
|
+
}
|
|
105
|
+
|
|
106
|
+
Airoboros output:
|
|
107
|
+
{
|
|
108
|
+
"function": "send_message",
|
|
109
|
+
"params": {
|
|
110
|
+
"message": "Hello there! I am Sam, an AI developed by Liminal Corp. How can I assist you today?"
|
|
111
|
+
}
|
|
112
|
+
}
|
|
113
|
+
"""
|
|
114
|
+
airo_func_call = {
|
|
115
|
+
"function": function_call["name"],
|
|
116
|
+
"params": json_loads(function_call["arguments"]),
|
|
117
|
+
}
|
|
118
|
+
return json_dumps(airo_func_call, indent=2)
|
|
119
|
+
|
|
120
|
+
# Add a sep for the conversation
|
|
121
|
+
if self.include_section_separators:
|
|
122
|
+
prompt += "\n### INPUT"
|
|
123
|
+
|
|
124
|
+
# Last are the user/assistant messages
|
|
125
|
+
for message in messages[1:]:
|
|
126
|
+
assert message["role"] in ["user", "assistant", "function", "tool"], message
|
|
127
|
+
|
|
128
|
+
if message["role"] == "user":
|
|
129
|
+
if self.simplify_json_content:
|
|
130
|
+
try:
|
|
131
|
+
content_json = json_loads(message["content"])
|
|
132
|
+
content_simple = content_json["message"]
|
|
133
|
+
prompt += f"\nUSER: {content_simple}"
|
|
134
|
+
except:
|
|
135
|
+
prompt += f"\nUSER: {message['content']}"
|
|
136
|
+
elif message["role"] == "assistant":
|
|
137
|
+
prompt += f"\nASSISTANT: {message['content']}"
|
|
138
|
+
# need to add the function call if there was one
|
|
139
|
+
if "function_call" in message and message["function_call"]:
|
|
140
|
+
prompt += f"\n{create_function_call(message['function_call'])}"
|
|
141
|
+
elif message["role"] in ["function", "tool"]:
|
|
142
|
+
# TODO find a good way to add this
|
|
143
|
+
# prompt += f"\nASSISTANT: (function return) {message['content']}"
|
|
144
|
+
prompt += f"\nFUNCTION RETURN: {message['content']}"
|
|
145
|
+
continue
|
|
146
|
+
else:
|
|
147
|
+
raise ValueError(message)
|
|
148
|
+
|
|
149
|
+
# Add a sep for the response
|
|
150
|
+
if self.include_section_separators:
|
|
151
|
+
prompt += "\n### RESPONSE"
|
|
152
|
+
|
|
153
|
+
if self.include_assistant_prefix:
|
|
154
|
+
prompt += f"\nASSISTANT:"
|
|
155
|
+
if self.include_opening_brance_in_prefix:
|
|
156
|
+
prompt += "\n{"
|
|
157
|
+
|
|
158
|
+
print(prompt)
|
|
159
|
+
return prompt
|
|
160
|
+
|
|
161
|
+
def clean_function_args(self, function_name, function_args):
|
|
162
|
+
"""Some basic Letta-specific cleaning of function args"""
|
|
163
|
+
cleaned_function_name = function_name
|
|
164
|
+
cleaned_function_args = function_args.copy() if function_args is not None else {}
|
|
165
|
+
|
|
166
|
+
if function_name == "send_message":
|
|
167
|
+
# strip request_heartbeat
|
|
168
|
+
cleaned_function_args.pop("request_heartbeat", None)
|
|
169
|
+
|
|
170
|
+
# TODO more cleaning to fix errors LLM makes
|
|
171
|
+
return cleaned_function_name, cleaned_function_args
|
|
172
|
+
|
|
173
|
+
def output_to_chat_completion_response(self, raw_llm_output):
|
|
174
|
+
"""Turn raw LLM output into a ChatCompletion style response with:
|
|
175
|
+
"message" = {
|
|
176
|
+
"role": "assistant",
|
|
177
|
+
"content": ...,
|
|
178
|
+
"function_call": {
|
|
179
|
+
"name": ...
|
|
180
|
+
"arguments": {
|
|
181
|
+
"arg1": val1,
|
|
182
|
+
...
|
|
183
|
+
}
|
|
184
|
+
}
|
|
185
|
+
}
|
|
186
|
+
"""
|
|
187
|
+
if self.include_opening_brance_in_prefix and raw_llm_output[0] != "{":
|
|
188
|
+
raw_llm_output = "{" + raw_llm_output
|
|
189
|
+
|
|
190
|
+
try:
|
|
191
|
+
function_json_output = clean_json(raw_llm_output)
|
|
192
|
+
except Exception as e:
|
|
193
|
+
raise Exception(f"Failed to decode JSON from LLM output:\n{raw_llm_output} - error\n{str(e)}")
|
|
194
|
+
try:
|
|
195
|
+
function_name = function_json_output["function"]
|
|
196
|
+
function_parameters = function_json_output["params"]
|
|
197
|
+
except KeyError as e:
|
|
198
|
+
raise LLMJSONParsingError(f"Received valid JSON from LLM, but JSON was missing fields: {str(e)}")
|
|
199
|
+
|
|
200
|
+
if self.clean_func_args:
|
|
201
|
+
function_name, function_parameters = self.clean_function_args(function_name, function_parameters)
|
|
202
|
+
|
|
203
|
+
message = {
|
|
204
|
+
"role": "assistant",
|
|
205
|
+
"content": None,
|
|
206
|
+
"function_call": {
|
|
207
|
+
"name": function_name,
|
|
208
|
+
"arguments": json_dumps(function_parameters),
|
|
209
|
+
},
|
|
210
|
+
}
|
|
211
|
+
return message
|
|
212
|
+
|
|
213
|
+
|
|
214
|
+
class Airoboros21InnerMonologueWrapper(Airoboros21Wrapper):
|
|
215
|
+
"""Still expect only JSON outputs from model, but add inner monologue as a field"""
|
|
216
|
+
|
|
217
|
+
def __init__(
|
|
218
|
+
self,
|
|
219
|
+
simplify_json_content=True,
|
|
220
|
+
clean_function_args=True,
|
|
221
|
+
include_assistant_prefix=True,
|
|
222
|
+
# include_opening_brace_in_prefix=True,
|
|
223
|
+
# assistant_prefix_extra="\n{"
|
|
224
|
+
# assistant_prefix_extra='\n{\n "function": ',
|
|
225
|
+
assistant_prefix_extra='\n{\n "function":',
|
|
226
|
+
include_section_separators=True,
|
|
227
|
+
):
|
|
228
|
+
self.simplify_json_content = simplify_json_content
|
|
229
|
+
self.clean_func_args = clean_function_args
|
|
230
|
+
self.include_assistant_prefix = include_assistant_prefix
|
|
231
|
+
# self.include_opening_brance_in_prefix = include_opening_brace_in_prefix
|
|
232
|
+
self.assistant_prefix_extra = assistant_prefix_extra
|
|
233
|
+
self.include_section_separators = include_section_separators
|
|
234
|
+
|
|
235
|
+
def chat_completion_to_prompt(self, messages, functions, function_documentation=None):
|
|
236
|
+
"""Example for airoboros: https://huggingface.co/jondurbin/airoboros-l2-70b-2.1#prompt-format
|
|
237
|
+
|
|
238
|
+
A chat.
|
|
239
|
+
USER: {prompt}
|
|
240
|
+
ASSISTANT:
|
|
241
|
+
|
|
242
|
+
Functions support: https://huggingface.co/jondurbin/airoboros-l2-70b-2.1#agentfunction-calling
|
|
243
|
+
|
|
244
|
+
As an AI assistant, please select the most suitable function and parameters from the list of available functions below, based on the user's input. Provide your response in JSON format.
|
|
245
|
+
|
|
246
|
+
Input: I want to know how many times 'Python' is mentioned in my text file.
|
|
247
|
+
|
|
248
|
+
Available functions:
|
|
249
|
+
file_analytics:
|
|
250
|
+
description: This tool performs various operations on a text file.
|
|
251
|
+
params:
|
|
252
|
+
action: The operation we want to perform on the data, such as "count_occurrences", "find_line", etc.
|
|
253
|
+
filters:
|
|
254
|
+
keyword: The word or phrase we want to search for.
|
|
255
|
+
|
|
256
|
+
OpenAI functions schema style:
|
|
257
|
+
|
|
258
|
+
{
|
|
259
|
+
"name": "send_message",
|
|
260
|
+
"description": "Sends a message to the human user",
|
|
261
|
+
"parameters": {
|
|
262
|
+
"type": "object",
|
|
263
|
+
"properties": {
|
|
264
|
+
# https://json-schema.org/understanding-json-schema/reference/array.html
|
|
265
|
+
"message": {
|
|
266
|
+
"type": "string",
|
|
267
|
+
"description": "Message contents. All unicode (including emojis) are supported.",
|
|
268
|
+
},
|
|
269
|
+
},
|
|
270
|
+
"required": ["message"],
|
|
271
|
+
}
|
|
272
|
+
},
|
|
273
|
+
"""
|
|
274
|
+
prompt = ""
|
|
275
|
+
|
|
276
|
+
# System insturctions go first
|
|
277
|
+
assert messages[0]["role"] == "system"
|
|
278
|
+
prompt += messages[0]["content"]
|
|
279
|
+
|
|
280
|
+
# Next is the functions preamble
|
|
281
|
+
def create_function_description(schema, add_inner_thoughts=True):
|
|
282
|
+
# airorobos style
|
|
283
|
+
func_str = ""
|
|
284
|
+
func_str += f"{schema['name']}:"
|
|
285
|
+
func_str += f"\n description: {schema['description']}"
|
|
286
|
+
func_str += f"\n params:"
|
|
287
|
+
if add_inner_thoughts:
|
|
288
|
+
func_str += f"\n inner_thoughts: Deep inner monologue private to you only."
|
|
289
|
+
for param_k, param_v in schema["parameters"]["properties"].items():
|
|
290
|
+
# TODO we're ignoring type
|
|
291
|
+
func_str += f"\n {param_k}: {param_v['description']}"
|
|
292
|
+
# TODO we're ignoring schema['parameters']['required']
|
|
293
|
+
return func_str
|
|
294
|
+
|
|
295
|
+
# prompt += f"\nPlease select the most suitable function and parameters from the list of available functions below, based on the user's input. Provide your response in JSON format."
|
|
296
|
+
prompt += f"\nPlease select the most suitable function and parameters from the list of available functions below, based on the ongoing conversation. Provide your response in JSON format."
|
|
297
|
+
prompt += f"\nAvailable functions:"
|
|
298
|
+
if function_documentation is not None:
|
|
299
|
+
prompt += f"\n{function_documentation}"
|
|
300
|
+
else:
|
|
301
|
+
for function_dict in functions:
|
|
302
|
+
prompt += f"\n{create_function_description(function_dict)}"
|
|
303
|
+
|
|
304
|
+
def create_function_call(function_call, inner_thoughts=None):
|
|
305
|
+
"""Go from ChatCompletion to Airoboros style function trace (in prompt)
|
|
306
|
+
|
|
307
|
+
ChatCompletion data (inside message['function_call']):
|
|
308
|
+
"function_call": {
|
|
309
|
+
"name": ...
|
|
310
|
+
"arguments": {
|
|
311
|
+
"arg1": val1,
|
|
312
|
+
...
|
|
313
|
+
}
|
|
314
|
+
|
|
315
|
+
Airoboros output:
|
|
316
|
+
{
|
|
317
|
+
"function": "send_message",
|
|
318
|
+
"params": {
|
|
319
|
+
"message": "Hello there! I am Sam, an AI developed by Liminal Corp. How can I assist you today?"
|
|
320
|
+
}
|
|
321
|
+
}
|
|
322
|
+
"""
|
|
323
|
+
airo_func_call = {
|
|
324
|
+
"function": function_call["name"],
|
|
325
|
+
"params": {
|
|
326
|
+
"inner_thoughts": inner_thoughts,
|
|
327
|
+
**json_loads(function_call["arguments"]),
|
|
328
|
+
},
|
|
329
|
+
}
|
|
330
|
+
return json_dumps(airo_func_call, indent=2)
|
|
331
|
+
|
|
332
|
+
# Add a sep for the conversation
|
|
333
|
+
if self.include_section_separators:
|
|
334
|
+
prompt += "\n### INPUT"
|
|
335
|
+
|
|
336
|
+
# Last are the user/assistant messages
|
|
337
|
+
for message in messages[1:]:
|
|
338
|
+
assert message["role"] in ["user", "assistant", "function", "tool"], message
|
|
339
|
+
|
|
340
|
+
if message["role"] == "user":
|
|
341
|
+
# Support for AutoGen naming of agents
|
|
342
|
+
if "name" in message:
|
|
343
|
+
user_prefix = message["name"].strip()
|
|
344
|
+
user_prefix = f"USER ({user_prefix})"
|
|
345
|
+
else:
|
|
346
|
+
user_prefix = "USER"
|
|
347
|
+
if self.simplify_json_content:
|
|
348
|
+
try:
|
|
349
|
+
content_json = json_loads(message["content"])
|
|
350
|
+
content_simple = content_json["message"]
|
|
351
|
+
prompt += f"\n{user_prefix}: {content_simple}"
|
|
352
|
+
except:
|
|
353
|
+
prompt += f"\n{user_prefix}: {message['content']}"
|
|
354
|
+
elif message["role"] == "assistant":
|
|
355
|
+
# Support for AutoGen naming of agents
|
|
356
|
+
if "name" in message:
|
|
357
|
+
assistant_prefix = message["name"].strip()
|
|
358
|
+
assistant_prefix = f"ASSISTANT ({assistant_prefix})"
|
|
359
|
+
else:
|
|
360
|
+
assistant_prefix = "ASSISTANT"
|
|
361
|
+
prompt += f"\n{assistant_prefix}:"
|
|
362
|
+
# need to add the function call if there was one
|
|
363
|
+
inner_thoughts = message["content"]
|
|
364
|
+
if "function_call" in message and message["function_call"]:
|
|
365
|
+
prompt += f"\n{create_function_call(message['function_call'], inner_thoughts=inner_thoughts)}"
|
|
366
|
+
elif message["role"] in ["function", "tool"]:
|
|
367
|
+
# TODO find a good way to add this
|
|
368
|
+
# prompt += f"\nASSISTANT: (function return) {message['content']}"
|
|
369
|
+
prompt += f"\nFUNCTION RETURN: {message['content']}"
|
|
370
|
+
continue
|
|
371
|
+
else:
|
|
372
|
+
raise ValueError(message)
|
|
373
|
+
|
|
374
|
+
# Add a sep for the response
|
|
375
|
+
if self.include_section_separators:
|
|
376
|
+
prompt += "\n### RESPONSE"
|
|
377
|
+
|
|
378
|
+
if self.include_assistant_prefix:
|
|
379
|
+
prompt += f"\nASSISTANT:"
|
|
380
|
+
if self.assistant_prefix_extra:
|
|
381
|
+
prompt += self.assistant_prefix_extra
|
|
382
|
+
|
|
383
|
+
return prompt
|
|
384
|
+
|
|
385
|
+
def clean_function_args(self, function_name, function_args):
|
|
386
|
+
"""Some basic Letta-specific cleaning of function args"""
|
|
387
|
+
cleaned_function_name = function_name
|
|
388
|
+
cleaned_function_args = function_args.copy() if function_args is not None else {}
|
|
389
|
+
|
|
390
|
+
if function_name == "send_message":
|
|
391
|
+
# strip request_heartbeat
|
|
392
|
+
cleaned_function_args.pop("request_heartbeat", None)
|
|
393
|
+
|
|
394
|
+
inner_thoughts = None
|
|
395
|
+
if "inner_thoughts" in function_args:
|
|
396
|
+
inner_thoughts = cleaned_function_args.pop("inner_thoughts")
|
|
397
|
+
|
|
398
|
+
# TODO more cleaning to fix errors LLM makes
|
|
399
|
+
return inner_thoughts, cleaned_function_name, cleaned_function_args
|
|
400
|
+
|
|
401
|
+
def output_to_chat_completion_response(self, raw_llm_output):
|
|
402
|
+
"""Turn raw LLM output into a ChatCompletion style response with:
|
|
403
|
+
"message" = {
|
|
404
|
+
"role": "assistant",
|
|
405
|
+
"content": ...,
|
|
406
|
+
"function_call": {
|
|
407
|
+
"name": ...
|
|
408
|
+
"arguments": {
|
|
409
|
+
"arg1": val1,
|
|
410
|
+
...
|
|
411
|
+
}
|
|
412
|
+
}
|
|
413
|
+
}
|
|
414
|
+
"""
|
|
415
|
+
# if self.include_opening_brance_in_prefix and raw_llm_output[0] != "{":
|
|
416
|
+
# raw_llm_output = "{" + raw_llm_output
|
|
417
|
+
if self.assistant_prefix_extra and raw_llm_output[: len(self.assistant_prefix_extra)] != self.assistant_prefix_extra:
|
|
418
|
+
# print(f"adding prefix back to llm, raw_llm_output=\n{raw_llm_output}")
|
|
419
|
+
raw_llm_output = self.assistant_prefix_extra + raw_llm_output
|
|
420
|
+
# print(f"->\n{raw_llm_output}")
|
|
421
|
+
|
|
422
|
+
try:
|
|
423
|
+
function_json_output = clean_json(raw_llm_output)
|
|
424
|
+
except Exception as e:
|
|
425
|
+
raise Exception(f"Failed to decode JSON from LLM output:\n{raw_llm_output} - error\n{str(e)}")
|
|
426
|
+
try:
|
|
427
|
+
# NOTE: weird bug can happen where 'function' gets nested if the prefix in the prompt isn't abided by
|
|
428
|
+
if isinstance(function_json_output["function"], dict):
|
|
429
|
+
function_json_output = function_json_output["function"]
|
|
430
|
+
function_name = function_json_output["function"]
|
|
431
|
+
function_parameters = function_json_output["params"]
|
|
432
|
+
except KeyError as e:
|
|
433
|
+
raise LLMJSONParsingError(
|
|
434
|
+
f"Received valid JSON from LLM, but JSON was missing fields: {str(e)}. JSON result was:\n{function_json_output}"
|
|
435
|
+
)
|
|
436
|
+
|
|
437
|
+
if self.clean_func_args:
|
|
438
|
+
(
|
|
439
|
+
inner_thoughts,
|
|
440
|
+
function_name,
|
|
441
|
+
function_parameters,
|
|
442
|
+
) = self.clean_function_args(function_name, function_parameters)
|
|
443
|
+
|
|
444
|
+
message = {
|
|
445
|
+
"role": "assistant",
|
|
446
|
+
"content": inner_thoughts,
|
|
447
|
+
"function_call": {
|
|
448
|
+
"name": function_name,
|
|
449
|
+
"arguments": json_dumps(function_parameters),
|
|
450
|
+
},
|
|
451
|
+
}
|
|
452
|
+
return message
|