letta-nightly 0.1.7.dev20240924104148__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of letta-nightly might be problematic. Click here for more details.

Files changed (189) hide show
  1. letta/__init__.py +24 -0
  2. letta/__main__.py +3 -0
  3. letta/agent.py +1427 -0
  4. letta/agent_store/chroma.py +295 -0
  5. letta/agent_store/db.py +546 -0
  6. letta/agent_store/lancedb.py +177 -0
  7. letta/agent_store/milvus.py +198 -0
  8. letta/agent_store/qdrant.py +201 -0
  9. letta/agent_store/storage.py +188 -0
  10. letta/benchmark/benchmark.py +96 -0
  11. letta/benchmark/constants.py +14 -0
  12. letta/cli/cli.py +689 -0
  13. letta/cli/cli_config.py +1282 -0
  14. letta/cli/cli_load.py +166 -0
  15. letta/client/__init__.py +0 -0
  16. letta/client/admin.py +171 -0
  17. letta/client/client.py +2360 -0
  18. letta/client/streaming.py +90 -0
  19. letta/client/utils.py +61 -0
  20. letta/config.py +484 -0
  21. letta/configs/anthropic.json +13 -0
  22. letta/configs/letta_hosted.json +11 -0
  23. letta/configs/openai.json +12 -0
  24. letta/constants.py +134 -0
  25. letta/credentials.py +140 -0
  26. letta/data_sources/connectors.py +247 -0
  27. letta/embeddings.py +218 -0
  28. letta/errors.py +26 -0
  29. letta/functions/__init__.py +0 -0
  30. letta/functions/function_sets/base.py +174 -0
  31. letta/functions/function_sets/extras.py +132 -0
  32. letta/functions/functions.py +105 -0
  33. letta/functions/schema_generator.py +205 -0
  34. letta/humans/__init__.py +0 -0
  35. letta/humans/examples/basic.txt +1 -0
  36. letta/humans/examples/cs_phd.txt +9 -0
  37. letta/interface.py +314 -0
  38. letta/llm_api/__init__.py +0 -0
  39. letta/llm_api/anthropic.py +383 -0
  40. letta/llm_api/azure_openai.py +155 -0
  41. letta/llm_api/cohere.py +396 -0
  42. letta/llm_api/google_ai.py +468 -0
  43. letta/llm_api/llm_api_tools.py +485 -0
  44. letta/llm_api/openai.py +470 -0
  45. letta/local_llm/README.md +3 -0
  46. letta/local_llm/__init__.py +0 -0
  47. letta/local_llm/chat_completion_proxy.py +279 -0
  48. letta/local_llm/constants.py +31 -0
  49. letta/local_llm/function_parser.py +68 -0
  50. letta/local_llm/grammars/__init__.py +0 -0
  51. letta/local_llm/grammars/gbnf_grammar_generator.py +1324 -0
  52. letta/local_llm/grammars/json.gbnf +26 -0
  53. letta/local_llm/grammars/json_func_calls_with_inner_thoughts.gbnf +32 -0
  54. letta/local_llm/groq/api.py +97 -0
  55. letta/local_llm/json_parser.py +202 -0
  56. letta/local_llm/koboldcpp/api.py +62 -0
  57. letta/local_llm/koboldcpp/settings.py +23 -0
  58. letta/local_llm/llamacpp/api.py +58 -0
  59. letta/local_llm/llamacpp/settings.py +22 -0
  60. letta/local_llm/llm_chat_completion_wrappers/__init__.py +0 -0
  61. letta/local_llm/llm_chat_completion_wrappers/airoboros.py +452 -0
  62. letta/local_llm/llm_chat_completion_wrappers/chatml.py +470 -0
  63. letta/local_llm/llm_chat_completion_wrappers/configurable_wrapper.py +387 -0
  64. letta/local_llm/llm_chat_completion_wrappers/dolphin.py +246 -0
  65. letta/local_llm/llm_chat_completion_wrappers/llama3.py +345 -0
  66. letta/local_llm/llm_chat_completion_wrappers/simple_summary_wrapper.py +156 -0
  67. letta/local_llm/llm_chat_completion_wrappers/wrapper_base.py +11 -0
  68. letta/local_llm/llm_chat_completion_wrappers/zephyr.py +345 -0
  69. letta/local_llm/lmstudio/api.py +100 -0
  70. letta/local_llm/lmstudio/settings.py +29 -0
  71. letta/local_llm/ollama/api.py +88 -0
  72. letta/local_llm/ollama/settings.py +32 -0
  73. letta/local_llm/settings/__init__.py +0 -0
  74. letta/local_llm/settings/deterministic_mirostat.py +45 -0
  75. letta/local_llm/settings/settings.py +72 -0
  76. letta/local_llm/settings/simple.py +28 -0
  77. letta/local_llm/utils.py +265 -0
  78. letta/local_llm/vllm/api.py +63 -0
  79. letta/local_llm/webui/api.py +60 -0
  80. letta/local_llm/webui/legacy_api.py +58 -0
  81. letta/local_llm/webui/legacy_settings.py +23 -0
  82. letta/local_llm/webui/settings.py +24 -0
  83. letta/log.py +76 -0
  84. letta/main.py +437 -0
  85. letta/memory.py +440 -0
  86. letta/metadata.py +884 -0
  87. letta/openai_backcompat/__init__.py +0 -0
  88. letta/openai_backcompat/openai_object.py +437 -0
  89. letta/persistence_manager.py +148 -0
  90. letta/personas/__init__.py +0 -0
  91. letta/personas/examples/anna_pa.txt +13 -0
  92. letta/personas/examples/google_search_persona.txt +15 -0
  93. letta/personas/examples/memgpt_doc.txt +6 -0
  94. letta/personas/examples/memgpt_starter.txt +4 -0
  95. letta/personas/examples/sam.txt +14 -0
  96. letta/personas/examples/sam_pov.txt +14 -0
  97. letta/personas/examples/sam_simple_pov_gpt35.txt +13 -0
  98. letta/personas/examples/sqldb/test.db +0 -0
  99. letta/prompts/__init__.py +0 -0
  100. letta/prompts/gpt_summarize.py +14 -0
  101. letta/prompts/gpt_system.py +26 -0
  102. letta/prompts/system/memgpt_base.txt +49 -0
  103. letta/prompts/system/memgpt_chat.txt +58 -0
  104. letta/prompts/system/memgpt_chat_compressed.txt +13 -0
  105. letta/prompts/system/memgpt_chat_fstring.txt +51 -0
  106. letta/prompts/system/memgpt_doc.txt +50 -0
  107. letta/prompts/system/memgpt_gpt35_extralong.txt +53 -0
  108. letta/prompts/system/memgpt_intuitive_knowledge.txt +31 -0
  109. letta/prompts/system/memgpt_modified_chat.txt +23 -0
  110. letta/pytest.ini +0 -0
  111. letta/schemas/agent.py +117 -0
  112. letta/schemas/api_key.py +21 -0
  113. letta/schemas/block.py +135 -0
  114. letta/schemas/document.py +21 -0
  115. letta/schemas/embedding_config.py +54 -0
  116. letta/schemas/enums.py +35 -0
  117. letta/schemas/job.py +38 -0
  118. letta/schemas/letta_base.py +80 -0
  119. letta/schemas/letta_message.py +175 -0
  120. letta/schemas/letta_request.py +23 -0
  121. letta/schemas/letta_response.py +28 -0
  122. letta/schemas/llm_config.py +54 -0
  123. letta/schemas/memory.py +224 -0
  124. letta/schemas/message.py +727 -0
  125. letta/schemas/openai/chat_completion_request.py +123 -0
  126. letta/schemas/openai/chat_completion_response.py +136 -0
  127. letta/schemas/openai/chat_completions.py +123 -0
  128. letta/schemas/openai/embedding_response.py +11 -0
  129. letta/schemas/openai/openai.py +157 -0
  130. letta/schemas/organization.py +20 -0
  131. letta/schemas/passage.py +80 -0
  132. letta/schemas/source.py +62 -0
  133. letta/schemas/tool.py +143 -0
  134. letta/schemas/usage.py +18 -0
  135. letta/schemas/user.py +33 -0
  136. letta/server/__init__.py +0 -0
  137. letta/server/constants.py +6 -0
  138. letta/server/rest_api/__init__.py +0 -0
  139. letta/server/rest_api/admin/__init__.py +0 -0
  140. letta/server/rest_api/admin/agents.py +21 -0
  141. letta/server/rest_api/admin/tools.py +83 -0
  142. letta/server/rest_api/admin/users.py +98 -0
  143. letta/server/rest_api/app.py +193 -0
  144. letta/server/rest_api/auth/__init__.py +0 -0
  145. letta/server/rest_api/auth/index.py +43 -0
  146. letta/server/rest_api/auth_token.py +22 -0
  147. letta/server/rest_api/interface.py +726 -0
  148. letta/server/rest_api/routers/__init__.py +0 -0
  149. letta/server/rest_api/routers/openai/__init__.py +0 -0
  150. letta/server/rest_api/routers/openai/assistants/__init__.py +0 -0
  151. letta/server/rest_api/routers/openai/assistants/assistants.py +115 -0
  152. letta/server/rest_api/routers/openai/assistants/schemas.py +121 -0
  153. letta/server/rest_api/routers/openai/assistants/threads.py +336 -0
  154. letta/server/rest_api/routers/openai/chat_completions/__init__.py +0 -0
  155. letta/server/rest_api/routers/openai/chat_completions/chat_completions.py +131 -0
  156. letta/server/rest_api/routers/v1/__init__.py +15 -0
  157. letta/server/rest_api/routers/v1/agents.py +543 -0
  158. letta/server/rest_api/routers/v1/blocks.py +73 -0
  159. letta/server/rest_api/routers/v1/jobs.py +46 -0
  160. letta/server/rest_api/routers/v1/llms.py +28 -0
  161. letta/server/rest_api/routers/v1/organizations.py +61 -0
  162. letta/server/rest_api/routers/v1/sources.py +199 -0
  163. letta/server/rest_api/routers/v1/tools.py +103 -0
  164. letta/server/rest_api/routers/v1/users.py +109 -0
  165. letta/server/rest_api/static_files.py +74 -0
  166. letta/server/rest_api/utils.py +69 -0
  167. letta/server/server.py +1995 -0
  168. letta/server/startup.sh +8 -0
  169. letta/server/static_files/assets/index-0cbf7ad5.js +274 -0
  170. letta/server/static_files/assets/index-156816da.css +1 -0
  171. letta/server/static_files/assets/index-486e3228.js +274 -0
  172. letta/server/static_files/favicon.ico +0 -0
  173. letta/server/static_files/index.html +39 -0
  174. letta/server/static_files/memgpt_logo_transparent.png +0 -0
  175. letta/server/utils.py +46 -0
  176. letta/server/ws_api/__init__.py +0 -0
  177. letta/server/ws_api/example_client.py +104 -0
  178. letta/server/ws_api/interface.py +108 -0
  179. letta/server/ws_api/protocol.py +100 -0
  180. letta/server/ws_api/server.py +145 -0
  181. letta/settings.py +165 -0
  182. letta/streaming_interface.py +396 -0
  183. letta/system.py +207 -0
  184. letta/utils.py +1065 -0
  185. letta_nightly-0.1.7.dev20240924104148.dist-info/LICENSE +190 -0
  186. letta_nightly-0.1.7.dev20240924104148.dist-info/METADATA +98 -0
  187. letta_nightly-0.1.7.dev20240924104148.dist-info/RECORD +189 -0
  188. letta_nightly-0.1.7.dev20240924104148.dist-info/WHEEL +4 -0
  189. letta_nightly-0.1.7.dev20240924104148.dist-info/entry_points.txt +3 -0
@@ -0,0 +1,345 @@
1
+ from letta.errors import LLMJSONParsingError
2
+ from letta.local_llm.json_parser import clean_json
3
+ from letta.local_llm.llm_chat_completion_wrappers.wrapper_base import (
4
+ LLMChatCompletionWrapper,
5
+ )
6
+ from letta.utils import json_dumps, json_loads
7
+
8
+ PREFIX_HINT = """# Reminders:
9
+ # Important information about yourself and the user is stored in (limited) core memory
10
+ # You can modify core memory with core_memory_replace
11
+ # You can add to core memory with core_memory_append
12
+ # Less important information is stored in (unlimited) archival memory
13
+ # You can add to archival memory with archival_memory_insert
14
+ # You can search archival memory with archival_memory_search
15
+ # You will always see the statistics of archival memory, so you know if there is content inside it
16
+ # If you receive new important information about the user (or yourself), you immediately update your memory with core_memory_replace, core_memory_append, or archival_memory_insert"""
17
+
18
+ FIRST_PREFIX_HINT = """# Reminders:
19
+ # This is your first interaction with the user!
20
+ # Initial information about them is provided in the core memory user block
21
+ # Make sure to introduce yourself to them
22
+ # Your inner thoughts should be private, interesting, and creative
23
+ # Do NOT use inner thoughts to communicate with the user
24
+ # Use send_message to communicate with the user"""
25
+ # Don't forget to use send_message, otherwise the user won't see your message"""
26
+
27
+
28
+ class LLaMA3InnerMonologueWrapper(LLMChatCompletionWrapper):
29
+ """ChatML-style prompt formatter, tested for use with https://huggingface.co/meta-llama/Meta-Llama-3-8B-Instruct"""
30
+
31
+ supports_first_message = True
32
+
33
+ def __init__(
34
+ self,
35
+ json_indent=2,
36
+ # simplify_json_content=True,
37
+ simplify_json_content=False,
38
+ clean_function_args=True,
39
+ include_assistant_prefix=True,
40
+ assistant_prefix_extra='\n{\n "function":',
41
+ assistant_prefix_extra_first_message='\n{\n "function": "send_message",',
42
+ allow_custom_roles=True, # allow roles outside user/assistant
43
+ use_system_role_in_user=False, # use the system role on user messages that don't use "type: user_message"
44
+ # allow_function_role=True, # use function role for function replies?
45
+ allow_function_role=False, # use function role for function replies?
46
+ no_function_role_role="assistant", # if no function role, which role to use?
47
+ no_function_role_prefix="FUNCTION RETURN:\n", # if no function role, what prefix to use?
48
+ # add a guiding hint
49
+ assistant_prefix_hint=False,
50
+ ):
51
+ self.simplify_json_content = simplify_json_content
52
+ self.clean_func_args = clean_function_args
53
+ self.include_assistant_prefix = include_assistant_prefix
54
+ self.assistant_prefix_extra = assistant_prefix_extra
55
+ self.assistant_prefix_extra_first_message = assistant_prefix_extra_first_message
56
+ self.assistant_prefix_hint = assistant_prefix_hint
57
+
58
+ # role-based
59
+ self.allow_custom_roles = allow_custom_roles
60
+ self.use_system_role_in_user = use_system_role_in_user
61
+ self.allow_function_role = allow_function_role
62
+ # extras for when the function role is disallowed
63
+ self.no_function_role_role = no_function_role_role
64
+ self.no_function_role_prefix = no_function_role_prefix
65
+
66
+ # how to set json in prompt
67
+ self.json_indent = json_indent
68
+
69
+ def _compile_function_description(self, schema, add_inner_thoughts=True) -> str:
70
+ """Go from a JSON schema to a string description for a prompt"""
71
+ # airorobos style
72
+ func_str = ""
73
+ func_str += f"{schema['name']}:"
74
+ func_str += f"\n description: {schema['description']}"
75
+ func_str += "\n params:"
76
+ if add_inner_thoughts:
77
+ from letta.local_llm.constants import (
78
+ INNER_THOUGHTS_KWARG,
79
+ INNER_THOUGHTS_KWARG_DESCRIPTION,
80
+ )
81
+
82
+ func_str += f"\n {INNER_THOUGHTS_KWARG}: {INNER_THOUGHTS_KWARG_DESCRIPTION}"
83
+ for param_k, param_v in schema["parameters"]["properties"].items():
84
+ # TODO we're ignoring type
85
+ func_str += f"\n {param_k}: {param_v['description']}"
86
+ # TODO we're ignoring schema['parameters']['required']
87
+ return func_str
88
+
89
+ def _compile_function_block(self, functions) -> str:
90
+ """functions dict -> string describing functions choices"""
91
+ prompt = ""
92
+
93
+ # prompt += f"\nPlease select the most suitable function and parameters from the list of available functions below, based on the user's input. Provide your response in JSON format."
94
+ prompt += "Please select the most suitable function and parameters from the list of available functions below, based on the ongoing conversation. Provide your response in JSON format."
95
+ prompt += "\nAvailable functions:"
96
+ for function_dict in functions:
97
+ prompt += f"\n{self._compile_function_description(function_dict)}"
98
+
99
+ return prompt
100
+
101
+ # NOTE: BOS/EOS chatml tokens are NOT inserted here
102
+ def _compile_system_message(self, system_message, functions, function_documentation=None) -> str:
103
+ """system prompt + memory + functions -> string"""
104
+ prompt = ""
105
+ prompt += system_message
106
+ prompt += "\n"
107
+ if function_documentation is not None:
108
+ prompt += "Please select the most suitable function and parameters from the list of available functions below, based on the ongoing conversation. Provide your response in JSON format."
109
+ prompt += "\nAvailable functions:\n"
110
+ prompt += function_documentation
111
+ else:
112
+ prompt += self._compile_function_block(functions)
113
+ return prompt
114
+
115
+ def _compile_function_call(self, function_call, inner_thoughts=None):
116
+ """Go from ChatCompletion to Airoboros style function trace (in prompt)
117
+
118
+ ChatCompletion data (inside message['function_call']):
119
+ "function_call": {
120
+ "name": ...
121
+ "arguments": {
122
+ "arg1": val1,
123
+ ...
124
+ }
125
+
126
+ Airoboros output:
127
+ {
128
+ "function": "send_message",
129
+ "params": {
130
+ "message": "Hello there! I am Sam, an AI developed by Liminal Corp. How can I assist you today?"
131
+ }
132
+ }
133
+ """
134
+ airo_func_call = {
135
+ "function": function_call["name"],
136
+ "params": {
137
+ "inner_thoughts": inner_thoughts,
138
+ **json_loads(function_call["arguments"]),
139
+ },
140
+ }
141
+ return json_dumps(airo_func_call, indent=self.json_indent)
142
+
143
+ # NOTE: BOS/EOS chatml tokens are NOT inserted here
144
+ def _compile_assistant_message(self, message) -> str:
145
+ """assistant message -> string"""
146
+ prompt = ""
147
+
148
+ # need to add the function call if there was one
149
+ inner_thoughts = message["content"]
150
+ if "function_call" in message and message["function_call"]:
151
+ prompt += f"\n{self._compile_function_call(message['function_call'], inner_thoughts=inner_thoughts)}"
152
+ elif "tool_calls" in message and message["tool_calls"]:
153
+ for tool_call in message["tool_calls"]:
154
+ prompt += f"\n{self._compile_function_call(tool_call['function'], inner_thoughts=inner_thoughts)}"
155
+ else:
156
+ # TODO should we format this into JSON somehow?
157
+ prompt += inner_thoughts
158
+
159
+ return prompt
160
+
161
+ # NOTE: BOS/EOS chatml tokens are NOT inserted here
162
+ def _compile_user_message(self, message) -> str:
163
+ """user message (should be JSON) -> string"""
164
+ prompt = ""
165
+ if self.simplify_json_content:
166
+ # Make user messages not JSON but plaintext instead
167
+ try:
168
+ user_msg_json = json_loads(message["content"])
169
+ user_msg_str = user_msg_json["message"]
170
+ except:
171
+ user_msg_str = message["content"]
172
+ else:
173
+ # Otherwise just dump the full json
174
+ try:
175
+ user_msg_json = json_loads(message["content"])
176
+ user_msg_str = json_dumps(
177
+ user_msg_json,
178
+ indent=self.json_indent,
179
+ )
180
+ except:
181
+ user_msg_str = message["content"]
182
+
183
+ prompt += user_msg_str
184
+ return prompt
185
+
186
+ # NOTE: BOS/EOS chatml tokens are NOT inserted here
187
+ def _compile_function_response(self, message) -> str:
188
+ """function response message (should be JSON) -> string"""
189
+ # TODO we should clean up send_message returns to avoid cluttering the prompt
190
+ prompt = ""
191
+ try:
192
+ # indent the function replies
193
+ function_return_dict = json_loads(message["content"])
194
+ function_return_str = json_dumps(
195
+ function_return_dict,
196
+ indent=self.json_indent,
197
+ )
198
+ except:
199
+ function_return_str = message["content"]
200
+
201
+ prompt += function_return_str
202
+ return prompt
203
+
204
+ def chat_completion_to_prompt(self, messages, functions, first_message=False, function_documentation=None):
205
+ """chatml-style prompt formatting, with implied support for multi-role"""
206
+ prompt = "<|begin_of_text|>"
207
+
208
+ # System insturctions go first
209
+ assert messages[0]["role"] == "system"
210
+ system_block = self._compile_system_message(
211
+ system_message=messages[0]["content"],
212
+ functions=functions,
213
+ function_documentation=function_documentation,
214
+ )
215
+ prompt += f"<|start_header_id|>system<|end_header_id|>\n\n{system_block.strip()}<|eot_id|>"
216
+
217
+ # Last are the user/assistant messages
218
+ for message in messages[1:]:
219
+ assert message["role"] in ["user", "assistant", "function", "tool"], message
220
+
221
+ if message["role"] == "user":
222
+ # Support for AutoGen naming of agents
223
+ role_str = message["name"].strip().lower() if (self.allow_custom_roles and "name" in message) else message["role"]
224
+ msg_str = self._compile_user_message(message)
225
+
226
+ if self.use_system_role_in_user:
227
+ try:
228
+ msg_json = json_loads(message["content"])
229
+ if msg_json["type"] != "user_message":
230
+ role_str = "system"
231
+ except:
232
+ pass
233
+ prompt += f"\n<|start_header_id|>{role_str}<|end_header_id|>\n\n{msg_str.strip()}<|eot_id|>"
234
+
235
+ elif message["role"] == "assistant":
236
+ # Support for AutoGen naming of agents
237
+ role_str = message["name"].strip().lower() if (self.allow_custom_roles and "name" in message) else message["role"]
238
+ msg_str = self._compile_assistant_message(message)
239
+
240
+ prompt += f"\n<|start_header_id|>{role_str}<|end_header_id|>\n\n{msg_str.strip()}<|eot_id|>"
241
+
242
+ elif message["role"] in ["tool", "function"]:
243
+ if self.allow_function_role:
244
+ role_str = message["role"]
245
+ msg_str = self._compile_function_response(message)
246
+ prompt += f"\n<|start_header_id|>{role_str}<|end_header_id|>\n\n{msg_str.strip()}<|eot_id|>"
247
+ else:
248
+ # TODO figure out what to do with functions if we disallow function role
249
+ role_str = self.no_function_role_role
250
+ msg_str = self._compile_function_response(message)
251
+ func_resp_prefix = self.no_function_role_prefix
252
+ # NOTE whatever the special prefix is, it should also be a stop token
253
+ prompt += f"\n<|start_header_id|>{role_str}\n{func_resp_prefix}{msg_str.strip()}<|eot_id|>"
254
+
255
+ else:
256
+ raise ValueError(message)
257
+
258
+ if self.include_assistant_prefix:
259
+ prompt += "\n<|start_header_id|>assistant\n\n"
260
+ if self.assistant_prefix_hint:
261
+ prompt += f"\n{FIRST_PREFIX_HINT if first_message else PREFIX_HINT}"
262
+ if self.supports_first_message and first_message:
263
+ if self.assistant_prefix_extra_first_message:
264
+ prompt += self.assistant_prefix_extra_first_message
265
+ else:
266
+ if self.assistant_prefix_extra:
267
+ # assistant_prefix_extra='\n{\n "function":',
268
+ prompt += self.assistant_prefix_extra
269
+
270
+ return prompt
271
+
272
+ def _clean_function_args(self, function_name, function_args):
273
+ """Some basic Letta-specific cleaning of function args"""
274
+ cleaned_function_name = function_name
275
+ cleaned_function_args = function_args.copy() if function_args is not None else {}
276
+
277
+ if function_name == "send_message":
278
+ # strip request_heartbeat
279
+ cleaned_function_args.pop("request_heartbeat", None)
280
+
281
+ inner_thoughts = None
282
+ if "inner_thoughts" in function_args:
283
+ inner_thoughts = cleaned_function_args.pop("inner_thoughts")
284
+
285
+ # TODO more cleaning to fix errors LLM makes
286
+ return inner_thoughts, cleaned_function_name, cleaned_function_args
287
+
288
+ def output_to_chat_completion_response(self, raw_llm_output, first_message=False):
289
+ """Turn raw LLM output into a ChatCompletion style response with:
290
+ "message" = {
291
+ "role": "assistant",
292
+ "content": ...,
293
+ "function_call": {
294
+ "name": ...
295
+ "arguments": {
296
+ "arg1": val1,
297
+ ...
298
+ }
299
+ }
300
+ }
301
+ """
302
+ # if self.include_opening_brance_in_prefix and raw_llm_output[0] != "{":
303
+ # raw_llm_output = "{" + raw_llm_output
304
+ assistant_prefix = self.assistant_prefix_extra_first_message if first_message else self.assistant_prefix_extra
305
+ if assistant_prefix and raw_llm_output[: len(assistant_prefix)] != assistant_prefix:
306
+ # print(f"adding prefix back to llm, raw_llm_output=\n{raw_llm_output}")
307
+ raw_llm_output = assistant_prefix + raw_llm_output
308
+ # print(f"->\n{raw_llm_output}")
309
+
310
+ try:
311
+ # cover llama.cpp server for now #TODO remove this when fixed
312
+ raw_llm_output = raw_llm_output.rstrip()
313
+ if raw_llm_output.endswith("<|eot_id|>"):
314
+ raw_llm_output = raw_llm_output[: -len("<|eot_id|>")]
315
+ function_json_output = clean_json(raw_llm_output)
316
+ except Exception as e:
317
+ raise Exception(f"Failed to decode JSON from LLM output:\n{raw_llm_output} - error\n{str(e)}")
318
+ try:
319
+ # NOTE: weird bug can happen where 'function' gets nested if the prefix in the prompt isn't abided by
320
+ if isinstance(function_json_output["function"], dict):
321
+ function_json_output = function_json_output["function"]
322
+ # regular unpacking
323
+ function_name = function_json_output["function"]
324
+ function_parameters = function_json_output["params"]
325
+ except KeyError as e:
326
+ raise LLMJSONParsingError(
327
+ f"Received valid JSON from LLM, but JSON was missing fields: {str(e)}. JSON result was:\n{function_json_output}"
328
+ )
329
+
330
+ if self.clean_func_args:
331
+ (
332
+ inner_thoughts,
333
+ function_name,
334
+ function_parameters,
335
+ ) = self._clean_function_args(function_name, function_parameters)
336
+
337
+ message = {
338
+ "role": "assistant",
339
+ "content": inner_thoughts,
340
+ "function_call": {
341
+ "name": function_name,
342
+ "arguments": json_dumps(function_parameters),
343
+ },
344
+ }
345
+ return message
@@ -0,0 +1,156 @@
1
+ from letta.utils import json_dumps, json_loads
2
+
3
+ from .wrapper_base import LLMChatCompletionWrapper
4
+
5
+
6
+ class SimpleSummaryWrapper(LLMChatCompletionWrapper):
7
+ """A super basic wrapper that's meant to be used for summary generation only"""
8
+
9
+ def __init__(
10
+ self,
11
+ simplify_json_content=True,
12
+ include_assistant_prefix=True,
13
+ # include_assistant_prefix=False, # False here, because we launch directly into summary
14
+ include_section_separators=True,
15
+ ):
16
+ self.simplify_json_content = simplify_json_content
17
+ self.include_assistant_prefix = include_assistant_prefix
18
+ self.include_section_separators = include_section_separators
19
+
20
+ def chat_completion_to_prompt(self, messages, functions, function_documentation=None):
21
+ """Example for airoboros: https://huggingface.co/jondurbin/airoboros-l2-70b-2.1#prompt-format
22
+
23
+ Instructions on how to summarize
24
+ USER: {prompt}
25
+ ASSISTANT:
26
+
27
+ Functions support: https://huggingface.co/jondurbin/airoboros-l2-70b-2.1#agentfunction-calling
28
+
29
+ As an AI assistant, please select the most suitable function and parameters from the list of available functions below, based on the user's input. Provide your response in JSON format.
30
+
31
+ Input: I want to know how many times 'Python' is mentioned in my text file.
32
+
33
+ Available functions:
34
+ file_analytics:
35
+ description: This tool performs various operations on a text file.
36
+ params:
37
+ action: The operation we want to perform on the data, such as "count_occurrences", "find_line", etc.
38
+ filters:
39
+ keyword: The word or phrase we want to search for.
40
+
41
+ OpenAI functions schema style:
42
+
43
+ {
44
+ "name": "send_message",
45
+ "description": "Sends a message to the human user",
46
+ "parameters": {
47
+ "type": "object",
48
+ "properties": {
49
+ # https://json-schema.org/understanding-json-schema/reference/array.html
50
+ "message": {
51
+ "type": "string",
52
+ "description": "Message contents. All unicode (including emojis) are supported.",
53
+ },
54
+ },
55
+ "required": ["message"],
56
+ }
57
+ },
58
+ """
59
+ assert functions is None
60
+ prompt = ""
61
+
62
+ # System insturctions go first
63
+ assert messages[0]["role"] == "system"
64
+ prompt += messages[0]["content"]
65
+
66
+ def create_function_call(function_call):
67
+ """Go from ChatCompletion to Airoboros style function trace (in prompt)
68
+
69
+ ChatCompletion data (inside message['function_call']):
70
+ "function_call": {
71
+ "name": ...
72
+ "arguments": {
73
+ "arg1": val1,
74
+ ...
75
+ }
76
+
77
+ Airoboros output:
78
+ {
79
+ "function": "send_message",
80
+ "params": {
81
+ "message": "Hello there! I am Sam, an AI developed by Liminal Corp. How can I assist you today?"
82
+ }
83
+ }
84
+ """
85
+ airo_func_call = {
86
+ "function": function_call["name"],
87
+ "params": json_loads(function_call["arguments"]),
88
+ }
89
+ return json_dumps(airo_func_call, indent=2)
90
+
91
+ # Add a sep for the conversation
92
+ if self.include_section_separators:
93
+ prompt += "\n### INPUT"
94
+
95
+ # Last are the user/assistant messages
96
+ for message in messages[1:]:
97
+ assert message["role"] in ["user", "assistant", "function", "tool"], message
98
+
99
+ if message["role"] == "user":
100
+ if self.simplify_json_content:
101
+ try:
102
+ content_json = json_loads(message["content"])
103
+ content_simple = content_json["message"]
104
+ prompt += f"\nUSER: {content_simple}"
105
+ except:
106
+ prompt += f"\nUSER: {message['content']}"
107
+ elif message["role"] == "assistant":
108
+ prompt += f"\nASSISTANT: {message['content']}"
109
+ # need to add the function call if there was one
110
+ if "function_call" in message and message["function_call"]:
111
+ prompt += f"\n{create_function_call(message['function_call'])}"
112
+ elif "tool_calls" in message and message["tool_calls"]:
113
+ prompt += f"\n{create_function_call(message['tool_calls'][0]['function'])}"
114
+ elif message["role"] in ["function", "tool"]:
115
+ # TODO find a good way to add this
116
+ # prompt += f"\nASSISTANT: (function return) {message['content']}"
117
+ prompt += f"\nFUNCTION RETURN: {message['content']}"
118
+ continue
119
+ else:
120
+ raise ValueError(message)
121
+
122
+ # Add a sep for the response
123
+ if self.include_section_separators:
124
+ prompt += "\n### RESPONSE (your summary of the above conversation in plain English (no JSON!), do NOT exceed the word limit)"
125
+
126
+ if self.include_assistant_prefix:
127
+ # prompt += f"\nASSISTANT:"
128
+ prompt += f"\nSUMMARY:"
129
+
130
+ # print(prompt)
131
+ return prompt
132
+
133
+ def output_to_chat_completion_response(self, raw_llm_output):
134
+ """Turn raw LLM output into a ChatCompletion style response with:
135
+ "message" = {
136
+ "role": "assistant",
137
+ "content": ...,
138
+ "function_call": {
139
+ "name": ...
140
+ "arguments": {
141
+ "arg1": val1,
142
+ ...
143
+ }
144
+ }
145
+ }
146
+ """
147
+ raw_llm_output = raw_llm_output.strip()
148
+ message = {
149
+ "role": "assistant",
150
+ "content": raw_llm_output,
151
+ # "function_call": {
152
+ # "name": function_name,
153
+ # "arguments": json_dumps(function_parameters),
154
+ # },
155
+ }
156
+ return message
@@ -0,0 +1,11 @@
1
+ from abc import ABC, abstractmethod
2
+
3
+
4
+ class LLMChatCompletionWrapper(ABC):
5
+ @abstractmethod
6
+ def chat_completion_to_prompt(self, messages, functions, function_documentation=None):
7
+ """Go from ChatCompletion to a single prompt string"""
8
+
9
+ @abstractmethod
10
+ def output_to_chat_completion_response(self, raw_llm_output):
11
+ """Turn the LLM output string into a ChatCompletion response"""