letta-nightly 0.1.7.dev20240924104148__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of letta-nightly might be problematic. Click here for more details.

Files changed (189) hide show
  1. letta/__init__.py +24 -0
  2. letta/__main__.py +3 -0
  3. letta/agent.py +1427 -0
  4. letta/agent_store/chroma.py +295 -0
  5. letta/agent_store/db.py +546 -0
  6. letta/agent_store/lancedb.py +177 -0
  7. letta/agent_store/milvus.py +198 -0
  8. letta/agent_store/qdrant.py +201 -0
  9. letta/agent_store/storage.py +188 -0
  10. letta/benchmark/benchmark.py +96 -0
  11. letta/benchmark/constants.py +14 -0
  12. letta/cli/cli.py +689 -0
  13. letta/cli/cli_config.py +1282 -0
  14. letta/cli/cli_load.py +166 -0
  15. letta/client/__init__.py +0 -0
  16. letta/client/admin.py +171 -0
  17. letta/client/client.py +2360 -0
  18. letta/client/streaming.py +90 -0
  19. letta/client/utils.py +61 -0
  20. letta/config.py +484 -0
  21. letta/configs/anthropic.json +13 -0
  22. letta/configs/letta_hosted.json +11 -0
  23. letta/configs/openai.json +12 -0
  24. letta/constants.py +134 -0
  25. letta/credentials.py +140 -0
  26. letta/data_sources/connectors.py +247 -0
  27. letta/embeddings.py +218 -0
  28. letta/errors.py +26 -0
  29. letta/functions/__init__.py +0 -0
  30. letta/functions/function_sets/base.py +174 -0
  31. letta/functions/function_sets/extras.py +132 -0
  32. letta/functions/functions.py +105 -0
  33. letta/functions/schema_generator.py +205 -0
  34. letta/humans/__init__.py +0 -0
  35. letta/humans/examples/basic.txt +1 -0
  36. letta/humans/examples/cs_phd.txt +9 -0
  37. letta/interface.py +314 -0
  38. letta/llm_api/__init__.py +0 -0
  39. letta/llm_api/anthropic.py +383 -0
  40. letta/llm_api/azure_openai.py +155 -0
  41. letta/llm_api/cohere.py +396 -0
  42. letta/llm_api/google_ai.py +468 -0
  43. letta/llm_api/llm_api_tools.py +485 -0
  44. letta/llm_api/openai.py +470 -0
  45. letta/local_llm/README.md +3 -0
  46. letta/local_llm/__init__.py +0 -0
  47. letta/local_llm/chat_completion_proxy.py +279 -0
  48. letta/local_llm/constants.py +31 -0
  49. letta/local_llm/function_parser.py +68 -0
  50. letta/local_llm/grammars/__init__.py +0 -0
  51. letta/local_llm/grammars/gbnf_grammar_generator.py +1324 -0
  52. letta/local_llm/grammars/json.gbnf +26 -0
  53. letta/local_llm/grammars/json_func_calls_with_inner_thoughts.gbnf +32 -0
  54. letta/local_llm/groq/api.py +97 -0
  55. letta/local_llm/json_parser.py +202 -0
  56. letta/local_llm/koboldcpp/api.py +62 -0
  57. letta/local_llm/koboldcpp/settings.py +23 -0
  58. letta/local_llm/llamacpp/api.py +58 -0
  59. letta/local_llm/llamacpp/settings.py +22 -0
  60. letta/local_llm/llm_chat_completion_wrappers/__init__.py +0 -0
  61. letta/local_llm/llm_chat_completion_wrappers/airoboros.py +452 -0
  62. letta/local_llm/llm_chat_completion_wrappers/chatml.py +470 -0
  63. letta/local_llm/llm_chat_completion_wrappers/configurable_wrapper.py +387 -0
  64. letta/local_llm/llm_chat_completion_wrappers/dolphin.py +246 -0
  65. letta/local_llm/llm_chat_completion_wrappers/llama3.py +345 -0
  66. letta/local_llm/llm_chat_completion_wrappers/simple_summary_wrapper.py +156 -0
  67. letta/local_llm/llm_chat_completion_wrappers/wrapper_base.py +11 -0
  68. letta/local_llm/llm_chat_completion_wrappers/zephyr.py +345 -0
  69. letta/local_llm/lmstudio/api.py +100 -0
  70. letta/local_llm/lmstudio/settings.py +29 -0
  71. letta/local_llm/ollama/api.py +88 -0
  72. letta/local_llm/ollama/settings.py +32 -0
  73. letta/local_llm/settings/__init__.py +0 -0
  74. letta/local_llm/settings/deterministic_mirostat.py +45 -0
  75. letta/local_llm/settings/settings.py +72 -0
  76. letta/local_llm/settings/simple.py +28 -0
  77. letta/local_llm/utils.py +265 -0
  78. letta/local_llm/vllm/api.py +63 -0
  79. letta/local_llm/webui/api.py +60 -0
  80. letta/local_llm/webui/legacy_api.py +58 -0
  81. letta/local_llm/webui/legacy_settings.py +23 -0
  82. letta/local_llm/webui/settings.py +24 -0
  83. letta/log.py +76 -0
  84. letta/main.py +437 -0
  85. letta/memory.py +440 -0
  86. letta/metadata.py +884 -0
  87. letta/openai_backcompat/__init__.py +0 -0
  88. letta/openai_backcompat/openai_object.py +437 -0
  89. letta/persistence_manager.py +148 -0
  90. letta/personas/__init__.py +0 -0
  91. letta/personas/examples/anna_pa.txt +13 -0
  92. letta/personas/examples/google_search_persona.txt +15 -0
  93. letta/personas/examples/memgpt_doc.txt +6 -0
  94. letta/personas/examples/memgpt_starter.txt +4 -0
  95. letta/personas/examples/sam.txt +14 -0
  96. letta/personas/examples/sam_pov.txt +14 -0
  97. letta/personas/examples/sam_simple_pov_gpt35.txt +13 -0
  98. letta/personas/examples/sqldb/test.db +0 -0
  99. letta/prompts/__init__.py +0 -0
  100. letta/prompts/gpt_summarize.py +14 -0
  101. letta/prompts/gpt_system.py +26 -0
  102. letta/prompts/system/memgpt_base.txt +49 -0
  103. letta/prompts/system/memgpt_chat.txt +58 -0
  104. letta/prompts/system/memgpt_chat_compressed.txt +13 -0
  105. letta/prompts/system/memgpt_chat_fstring.txt +51 -0
  106. letta/prompts/system/memgpt_doc.txt +50 -0
  107. letta/prompts/system/memgpt_gpt35_extralong.txt +53 -0
  108. letta/prompts/system/memgpt_intuitive_knowledge.txt +31 -0
  109. letta/prompts/system/memgpt_modified_chat.txt +23 -0
  110. letta/pytest.ini +0 -0
  111. letta/schemas/agent.py +117 -0
  112. letta/schemas/api_key.py +21 -0
  113. letta/schemas/block.py +135 -0
  114. letta/schemas/document.py +21 -0
  115. letta/schemas/embedding_config.py +54 -0
  116. letta/schemas/enums.py +35 -0
  117. letta/schemas/job.py +38 -0
  118. letta/schemas/letta_base.py +80 -0
  119. letta/schemas/letta_message.py +175 -0
  120. letta/schemas/letta_request.py +23 -0
  121. letta/schemas/letta_response.py +28 -0
  122. letta/schemas/llm_config.py +54 -0
  123. letta/schemas/memory.py +224 -0
  124. letta/schemas/message.py +727 -0
  125. letta/schemas/openai/chat_completion_request.py +123 -0
  126. letta/schemas/openai/chat_completion_response.py +136 -0
  127. letta/schemas/openai/chat_completions.py +123 -0
  128. letta/schemas/openai/embedding_response.py +11 -0
  129. letta/schemas/openai/openai.py +157 -0
  130. letta/schemas/organization.py +20 -0
  131. letta/schemas/passage.py +80 -0
  132. letta/schemas/source.py +62 -0
  133. letta/schemas/tool.py +143 -0
  134. letta/schemas/usage.py +18 -0
  135. letta/schemas/user.py +33 -0
  136. letta/server/__init__.py +0 -0
  137. letta/server/constants.py +6 -0
  138. letta/server/rest_api/__init__.py +0 -0
  139. letta/server/rest_api/admin/__init__.py +0 -0
  140. letta/server/rest_api/admin/agents.py +21 -0
  141. letta/server/rest_api/admin/tools.py +83 -0
  142. letta/server/rest_api/admin/users.py +98 -0
  143. letta/server/rest_api/app.py +193 -0
  144. letta/server/rest_api/auth/__init__.py +0 -0
  145. letta/server/rest_api/auth/index.py +43 -0
  146. letta/server/rest_api/auth_token.py +22 -0
  147. letta/server/rest_api/interface.py +726 -0
  148. letta/server/rest_api/routers/__init__.py +0 -0
  149. letta/server/rest_api/routers/openai/__init__.py +0 -0
  150. letta/server/rest_api/routers/openai/assistants/__init__.py +0 -0
  151. letta/server/rest_api/routers/openai/assistants/assistants.py +115 -0
  152. letta/server/rest_api/routers/openai/assistants/schemas.py +121 -0
  153. letta/server/rest_api/routers/openai/assistants/threads.py +336 -0
  154. letta/server/rest_api/routers/openai/chat_completions/__init__.py +0 -0
  155. letta/server/rest_api/routers/openai/chat_completions/chat_completions.py +131 -0
  156. letta/server/rest_api/routers/v1/__init__.py +15 -0
  157. letta/server/rest_api/routers/v1/agents.py +543 -0
  158. letta/server/rest_api/routers/v1/blocks.py +73 -0
  159. letta/server/rest_api/routers/v1/jobs.py +46 -0
  160. letta/server/rest_api/routers/v1/llms.py +28 -0
  161. letta/server/rest_api/routers/v1/organizations.py +61 -0
  162. letta/server/rest_api/routers/v1/sources.py +199 -0
  163. letta/server/rest_api/routers/v1/tools.py +103 -0
  164. letta/server/rest_api/routers/v1/users.py +109 -0
  165. letta/server/rest_api/static_files.py +74 -0
  166. letta/server/rest_api/utils.py +69 -0
  167. letta/server/server.py +1995 -0
  168. letta/server/startup.sh +8 -0
  169. letta/server/static_files/assets/index-0cbf7ad5.js +274 -0
  170. letta/server/static_files/assets/index-156816da.css +1 -0
  171. letta/server/static_files/assets/index-486e3228.js +274 -0
  172. letta/server/static_files/favicon.ico +0 -0
  173. letta/server/static_files/index.html +39 -0
  174. letta/server/static_files/memgpt_logo_transparent.png +0 -0
  175. letta/server/utils.py +46 -0
  176. letta/server/ws_api/__init__.py +0 -0
  177. letta/server/ws_api/example_client.py +104 -0
  178. letta/server/ws_api/interface.py +108 -0
  179. letta/server/ws_api/protocol.py +100 -0
  180. letta/server/ws_api/server.py +145 -0
  181. letta/settings.py +165 -0
  182. letta/streaming_interface.py +396 -0
  183. letta/system.py +207 -0
  184. letta/utils.py +1065 -0
  185. letta_nightly-0.1.7.dev20240924104148.dist-info/LICENSE +190 -0
  186. letta_nightly-0.1.7.dev20240924104148.dist-info/METADATA +98 -0
  187. letta_nightly-0.1.7.dev20240924104148.dist-info/RECORD +189 -0
  188. letta_nightly-0.1.7.dev20240924104148.dist-info/WHEEL +4 -0
  189. letta_nightly-0.1.7.dev20240924104148.dist-info/entry_points.txt +3 -0
@@ -0,0 +1,387 @@
1
+ import yaml
2
+
3
+ from letta.utils import json_dumps, json_loads
4
+
5
+ from ...errors import LLMJSONParsingError
6
+ from ..json_parser import clean_json
7
+ from .wrapper_base import LLMChatCompletionWrapper
8
+
9
+
10
+ # A configurable model agnostic wrapper.
11
+ class ConfigurableJSONWrapper(LLMChatCompletionWrapper):
12
+ def __init__(
13
+ self,
14
+ pre_prompt: str = "",
15
+ post_prompt: str = "",
16
+ sys_prompt_start: str = "",
17
+ sys_prompt_end: str = "",
18
+ user_prompt_start: str = "",
19
+ user_prompt_end: str = "",
20
+ assistant_prompt_start: str = "",
21
+ assistant_prompt_end: str = "",
22
+ tool_prompt_start: str = "",
23
+ tool_prompt_end: str = "",
24
+ assistant_prefix_extra="",
25
+ assistant_prefix_extra_first_message="",
26
+ allow_custom_roles: bool = False, # allow roles outside user/assistant
27
+ custom_post_role: str = "", # For chatml this would be '\n'
28
+ custom_roles_prompt_start: str = "", # For chatml this would be '<|im_start|>'
29
+ custom_roles_prompt_end: str = "", # For chatml this would be '<|im_end|>'
30
+ include_sys_prompt_in_first_user_message: bool = False,
31
+ default_stop_sequences=None,
32
+ simplify_json_content: bool = False,
33
+ strip_prompt: bool = False,
34
+ json_indent: int = 2,
35
+ clean_function_args: bool = False,
36
+ ):
37
+ """
38
+ Initializes a new MessagesFormatter object.
39
+
40
+ Args:
41
+ pre_prompt (str): The pre-prompt content.
42
+ post_prompt (str): The post-prompt content
43
+ sys_prompt_start (str): The system messages prompt start. For chatml, this would be '<|im_start|>system\n'
44
+ sys_prompt_end (str): The system messages prompt end. For chatml, this would be '<|im_end|>'
45
+ user_prompt_start (str): The user messages prompt start. For chatml, this would be '<|im_start|>user\n'
46
+ user_prompt_end (str): The user messages prompt end. For chatml, this would be '<|im_end|>\n'
47
+ assistant_prompt_start (str): The assistant messages prompt start. For chatml, this would be '<|im_start|>user\n'
48
+ assistant_prompt_end (str): The assistant messages prompt end. For chatml, this would be '<|im_end|>\n'
49
+ tool_prompt_start (str): The tool messages prompt start. For chatml, this would be '<|im_start|>tool\n' if the model supports the tool role, otherwise it would be something like '<|im_start|>user\nFUNCTION RETURN:\n'
50
+ tool_prompt_end (str): The tool messages prompt end. For chatml, this would be '<|im_end|>\n'
51
+ assistant_prefix_extra (str): A prefix for every assistant message to steer the model to output JSON. Something like '\n{\n "function":'
52
+ assistant_prefix_extra_first_message (str): A prefix for the first assistant message to steer the model to output JSON and use a specific function. Something like '\n{\n "function": "send_message",'
53
+ allow_custom_roles (bool): If the wrapper allows custom roles, like names for autogen agents.
54
+ custom_post_role (str): The part that comes after the custom role string. For chatml, this would be '\n'
55
+ custom_roles_prompt_start: (str): Custom role prompt start. For chatml, this would be '<|im_start|>'
56
+ custom_roles_prompt_end: (str): Custom role prompt start. For chatml, this would be '<|im_end|>\n'
57
+ include_sys_prompt_in_first_user_message (bool): Indicates whether to include the system prompt in the first user message. For Llama2 this would be True, for chatml, this would be False
58
+ simplify_json_content (bool):
59
+ strip_prompt (bool): If whitespaces at the end and beginning of the prompt get stripped.
60
+ default_stop_sequences (List[str]): List of default stop sequences.
61
+
62
+ """
63
+ if default_stop_sequences is None:
64
+ default_stop_sequences = []
65
+ self.pre_prompt = pre_prompt
66
+ self.post_prompt = post_prompt
67
+ self.sys_prompt_start = sys_prompt_start
68
+ self.sys_prompt_end = sys_prompt_end
69
+ self.user_prompt_start = user_prompt_start
70
+ self.user_prompt_end = user_prompt_end
71
+ self.assistant_prompt_start = assistant_prompt_start
72
+ self.assistant_prompt_end = assistant_prompt_end
73
+ self.tool_prompt_start = tool_prompt_start
74
+ self.tool_prompt_end = tool_prompt_end
75
+ self.assistant_prefix_extra = assistant_prefix_extra
76
+ self.assistant_prefix_extra_first_message = assistant_prefix_extra_first_message
77
+ self.allow_custom_roles = allow_custom_roles
78
+ self.custom_post_role = custom_post_role
79
+ self.custom_roles_prompt_start = custom_roles_prompt_start
80
+ self.custom_roles_prompt_end = custom_roles_prompt_end
81
+ self.include_sys_prompt_in_first_user_message = include_sys_prompt_in_first_user_message
82
+ self.simplify_json_content = simplify_json_content
83
+ self.default_stop_sequences = default_stop_sequences
84
+ self.strip_prompt = strip_prompt
85
+ self.json_indent = json_indent
86
+ self.clean_func_args = clean_function_args
87
+ self.supports_first_message = True
88
+
89
+ def _compile_function_description(self, schema, add_inner_thoughts=True) -> str:
90
+ """Go from a JSON schema to a string description for a prompt"""
91
+ # airorobos style
92
+ func_str = ""
93
+ func_str += f"{schema['name']}:"
94
+ func_str += f"\n description: {schema['description']}"
95
+ func_str += f"\n params:"
96
+ if add_inner_thoughts:
97
+ func_str += f"\n inner_thoughts: Deep inner monologue private to you only."
98
+ for param_k, param_v in schema["parameters"]["properties"].items():
99
+ # TODO we're ignoring type
100
+ func_str += f"\n {param_k}: {param_v['description']}"
101
+ # TODO we're ignoring schema['parameters']['required']
102
+ return func_str
103
+
104
+ def _compile_function_block(self, functions) -> str:
105
+ """functions dict -> string describing functions choices"""
106
+ prompt = ""
107
+
108
+ # prompt += f"\nPlease select the most suitable function and parameters from the list of available functions below, based on the user's input. Provide your response in JSON format."
109
+ prompt += f"Please select the most suitable function and parameters from the list of available functions below, based on the ongoing conversation. Provide your response in JSON format."
110
+ prompt += f"\nAvailable functions:"
111
+ for function_dict in functions:
112
+ prompt += f"\n{self._compile_function_description(function_dict)}"
113
+
114
+ return prompt
115
+
116
+ def _compile_system_message(self, system_message, functions, function_documentation=None) -> str:
117
+ """system prompt + memory + functions -> string"""
118
+ prompt = system_message
119
+ prompt += "\n"
120
+ if function_documentation is not None:
121
+ prompt += f"Please select the most suitable function and parameters from the list of available functions below, based on the ongoing conversation. Provide your response in JSON format."
122
+ prompt += f"\nAvailable functions:"
123
+ prompt += function_documentation
124
+ else:
125
+ prompt += self._compile_function_block(functions)
126
+ return prompt
127
+
128
+ def _compile_function_call(self, function_call, inner_thoughts=None):
129
+ airo_func_call = {
130
+ "function": function_call["name"],
131
+ "params": {
132
+ "inner_thoughts": inner_thoughts,
133
+ **json_loads(function_call["arguments"]),
134
+ },
135
+ }
136
+ return json_dumps(airo_func_call, indent=self.json_indent)
137
+
138
+ # NOTE: BOS/EOS chatml tokens are NOT inserted here
139
+ def _compile_assistant_message(self, message) -> str:
140
+ """assistant message -> string"""
141
+ prompt = ""
142
+
143
+ # need to add the function call if there was one
144
+ inner_thoughts = message["content"]
145
+ if "function_call" in message and message["function_call"]:
146
+ prompt += f"\n{self._compile_function_call(message['function_call'], inner_thoughts=inner_thoughts)}"
147
+ elif "tool_calls" in message and message["tool_calls"]:
148
+ for tool_call in message["tool_calls"]:
149
+ prompt += f"\n{self._compile_function_call(tool_call['function'], inner_thoughts=inner_thoughts)}"
150
+ else:
151
+ # TODO should we format this into JSON somehow?
152
+ prompt += inner_thoughts
153
+
154
+ return prompt
155
+
156
+ # NOTE: BOS/EOS chatml tokens are NOT inserted here
157
+ def _compile_user_message(self, message) -> str:
158
+ """user message (should be JSON) -> string"""
159
+ prompt = ""
160
+ if self.simplify_json_content:
161
+ # Make user messages not JSON but plaintext instead
162
+ try:
163
+ user_msg_json = json_loads(message["content"])
164
+ user_msg_str = user_msg_json["message"]
165
+ except:
166
+ user_msg_str = message["content"]
167
+ else:
168
+ # Otherwise just dump the full json
169
+ try:
170
+ user_msg_json = json_loads(message["content"])
171
+ user_msg_str = json_dumps(user_msg_json, indent=self.json_indent)
172
+ except:
173
+ user_msg_str = message["content"]
174
+
175
+ prompt += user_msg_str
176
+ return prompt
177
+
178
+ # NOTE: BOS/EOS chatml tokens are NOT inserted here
179
+ def _compile_function_response(self, message) -> str:
180
+ """function response message (should be JSON) -> string"""
181
+ # TODO we should clean up send_message returns to avoid cluttering the prompt
182
+ prompt = ""
183
+ try:
184
+ # indent the function replies
185
+ function_return_dict = json_loads(message["content"])
186
+ function_return_str = json_dumps(function_return_dict, indent=self.json_indent)
187
+ except:
188
+ function_return_str = message["content"]
189
+
190
+ prompt += function_return_str
191
+ return prompt
192
+
193
+ def chat_completion_to_prompt(self, messages, functions, first_message=False, function_documentation=None):
194
+ formatted_messages = self.pre_prompt
195
+
196
+ no_user_prompt_start = False
197
+
198
+ for message in messages:
199
+ if message["role"] == "system":
200
+ msg = self._compile_system_message(message["content"], functions, function_documentation)
201
+ formatted_messages += self.sys_prompt_start + msg + self.sys_prompt_end
202
+
203
+ if self.include_sys_prompt_in_first_user_message:
204
+ formatted_messages = self.user_prompt_start + formatted_messages
205
+ no_user_prompt_start = True
206
+ elif message["role"] == "user":
207
+ msg = self._compile_user_message(message)
208
+ if no_user_prompt_start:
209
+ no_user_prompt_start = False
210
+ formatted_messages += msg + self.user_prompt_end
211
+ else:
212
+ formatted_messages += self.user_prompt_start + msg + self.user_prompt_end
213
+
214
+ elif message["role"] == "assistant":
215
+ msg = self._compile_assistant_message(message)
216
+ if self.allow_custom_roles and "name" in message:
217
+ role_str = message["name"].strip().lower() if (self.allow_custom_roles and "name" in message) else message["role"]
218
+ if no_user_prompt_start:
219
+ no_user_prompt_start = False
220
+ formatted_messages += (
221
+ self.user_prompt_end
222
+ + self.custom_roles_prompt_start
223
+ + role_str
224
+ + self.custom_post_role
225
+ + msg
226
+ + self.custom_roles_prompt_end
227
+ )
228
+ else:
229
+ formatted_messages += (
230
+ self.custom_roles_prompt_start + role_str + self.custom_post_role + msg + self.custom_roles_prompt_end
231
+ )
232
+ else:
233
+ if no_user_prompt_start:
234
+ no_user_prompt_start = False
235
+ formatted_messages += self.user_prompt_end + self.assistant_prompt_start + msg + self.assistant_prompt_end
236
+ else:
237
+ formatted_messages += self.assistant_prompt_start + msg + self.assistant_prompt_end
238
+ elif message["role"] == "tool":
239
+ msg = self._compile_function_response(message)
240
+ formatted_messages += self.tool_prompt_start + msg + self.tool_prompt_end
241
+
242
+ if self.strip_prompt:
243
+ if first_message:
244
+ prompt = formatted_messages + self.post_prompt + self.assistant_prefix_extra_first_message
245
+ else:
246
+ prompt = formatted_messages + self.post_prompt + self.assistant_prefix_extra
247
+ return prompt.strip()
248
+ else:
249
+ if first_message:
250
+ prompt = formatted_messages + self.post_prompt + self.assistant_prefix_extra_first_message
251
+ else:
252
+ prompt = formatted_messages + self.post_prompt + self.assistant_prefix_extra
253
+ return prompt
254
+
255
+ def _clean_function_args(self, function_name, function_args):
256
+ """Some basic Letta-specific cleaning of function args"""
257
+ cleaned_function_name = function_name
258
+ cleaned_function_args = function_args.copy() if function_args is not None else {}
259
+
260
+ if function_name == "send_message":
261
+ # strip request_heartbeat
262
+ cleaned_function_args.pop("request_heartbeat", None)
263
+
264
+ inner_thoughts = None
265
+ if "inner_thoughts" in function_args:
266
+ inner_thoughts = cleaned_function_args.pop("inner_thoughts")
267
+
268
+ # TODO more cleaning to fix errors LLM makes
269
+ return inner_thoughts, cleaned_function_name, cleaned_function_args
270
+
271
+ def output_to_chat_completion_response(self, raw_llm_output, first_message=False):
272
+ assistant_prefix = self.assistant_prefix_extra_first_message if first_message else self.assistant_prefix_extra
273
+ if assistant_prefix and raw_llm_output[: len(assistant_prefix)] != assistant_prefix:
274
+ raw_llm_output = assistant_prefix + raw_llm_output
275
+
276
+ try:
277
+ function_json_output = clean_json(raw_llm_output)
278
+ except Exception as e:
279
+ raise Exception(f"Failed to decode JSON from LLM output:\n{raw_llm_output} - error\n{str(e)}")
280
+ try:
281
+ # NOTE: weird bug can happen where 'function' gets nested if the prefix in the prompt isn't abided by
282
+ if isinstance(function_json_output["function"], dict):
283
+ function_json_output = function_json_output["function"]
284
+ # regular unpacking
285
+ function_name = function_json_output["function"]
286
+ function_parameters = function_json_output["params"]
287
+ if "inner_thoughts" in function_json_output:
288
+ inner_thoughts = function_json_output["inner_thoughts"]
289
+ else:
290
+ if "inner_thoughts" in function_json_output["params"]:
291
+ inner_thoughts = function_json_output["params"]["inner_thoughts"]
292
+ else:
293
+ inner_thoughts = ""
294
+ except KeyError as e:
295
+ raise LLMJSONParsingError(
296
+ f"Received valid JSON from LLM, but JSON was missing fields: {str(e)}. JSON result was:\n{function_json_output}"
297
+ )
298
+
299
+ if self.clean_func_args:
300
+ (
301
+ inner_thoughts,
302
+ function_name,
303
+ function_parameters,
304
+ ) = self._clean_function_args(function_name, function_parameters)
305
+
306
+ message = {
307
+ "role": "assistant",
308
+ "content": inner_thoughts,
309
+ "function_call": {
310
+ "name": function_name,
311
+ "arguments": json_dumps(function_parameters),
312
+ },
313
+ }
314
+ return message
315
+
316
+ def save_to_yaml(self, file_path: str):
317
+ """
318
+ Save the configuration to a YAML file.
319
+
320
+ Args:
321
+ file_path (str): The path to the YAML file.
322
+ """
323
+ data = {
324
+ "pre_prompt": self.pre_prompt,
325
+ "post_prompt": self.post_prompt,
326
+ "sys_prompt_start": self.sys_prompt_start,
327
+ "sys_prompt_end": self.sys_prompt_end,
328
+ "user_prompt_start": self.user_prompt_start,
329
+ "user_prompt_end": self.user_prompt_end,
330
+ "assistant_prompt_start": self.assistant_prompt_start,
331
+ "assistant_prompt_end": self.assistant_prompt_end,
332
+ "tool_prompt_start": self.tool_prompt_start,
333
+ "tool_prompt_end": self.tool_prompt_end,
334
+ "assistant_prefix_extra": self.assistant_prefix_extra,
335
+ "assistant_prefix_extra_first_message": self.assistant_prefix_extra_first_message,
336
+ "allow_custom_roles": self.allow_custom_roles,
337
+ "custom_post_role": self.custom_post_role,
338
+ "custom_roles_prompt_start": self.custom_roles_prompt_start,
339
+ "custom_roles_prompt_end": self.custom_roles_prompt_end,
340
+ "include_sys_prompt_in_first_user_message": self.include_sys_prompt_in_first_user_message,
341
+ "simplify_json_content": self.simplify_json_content,
342
+ "strip_prompt": self.strip_prompt,
343
+ "json_indent": self.json_indent,
344
+ "clean_function_args": self.clean_func_args,
345
+ "default_stop_sequences": self.default_stop_sequences,
346
+ }
347
+
348
+ with open(file_path, "w", encoding="utf-8") as yaml_file:
349
+ yaml.dump(data, yaml_file, default_flow_style=False)
350
+
351
+ @staticmethod
352
+ def load_from_yaml(file_path: str):
353
+ """
354
+ Load the configuration from a YAML file.
355
+
356
+ Args:
357
+ file_path (str): The path to the YAML file.
358
+ """
359
+ with open(file_path, "r", encoding="utf-8") as yaml_file:
360
+ data = yaml.safe_load(yaml_file)
361
+
362
+ wrapper = ConfigurableJSONWrapper()
363
+ # Set the attributes from the loaded data
364
+ wrapper.pre_prompt = data.get("pre_prompt", "")
365
+ wrapper.post_prompt = data.get("post_prompt", "")
366
+ wrapper.sys_prompt_start = data.get("sys_prompt_start", "")
367
+ wrapper.sys_prompt_end = data.get("sys_prompt_end", "")
368
+ wrapper.user_prompt_start = data.get("user_prompt_start", "")
369
+ wrapper.user_prompt_end = data.get("user_prompt_end", "")
370
+ wrapper.assistant_prompt_start = data.get("assistant_prompt_start", "")
371
+ wrapper.assistant_prompt_end = data.get("assistant_prompt_end", "")
372
+ wrapper.tool_prompt_start = data.get("tool_prompt_start", "")
373
+ wrapper.tool_prompt_end = data.get("tool_prompt_end", "")
374
+ wrapper.assistant_prefix_extra = data.get("assistant_prefix_extra", "")
375
+ wrapper.assistant_prefix_extra_first_message = data.get("assistant_prefix_extra_first_message", "")
376
+ wrapper.allow_custom_roles = data.get("allow_custom_roles", False)
377
+ wrapper.custom_post_role = data.get("custom_post_role", "")
378
+ wrapper.custom_roles_prompt_start = data.get("custom_roles_prompt_start", "")
379
+ wrapper.custom_roles_prompt_end = data.get("custom_roles_prompt_end", "")
380
+ wrapper.include_sys_prompt_in_first_user_message = data.get("include_sys_prompt_in_first_user_message", False)
381
+ wrapper.simplify_json_content = data.get("simplify_json_content", False)
382
+ wrapper.strip_prompt = data.get("strip_prompt", False)
383
+ wrapper.json_indent = data.get("json_indent", 2)
384
+ wrapper.clean_func_args = data.get("clean_function_args", False)
385
+ wrapper.default_stop_sequences = data.get("default_stop_sequences", [])
386
+
387
+ return wrapper
@@ -0,0 +1,246 @@
1
+ from letta.utils import json_dumps, json_loads
2
+
3
+ from ...errors import LLMJSONParsingError
4
+ from ..json_parser import clean_json
5
+ from .wrapper_base import LLMChatCompletionWrapper
6
+
7
+
8
+ class Dolphin21MistralWrapper(LLMChatCompletionWrapper):
9
+ """Wrapper for Dolphin 2.1 Mistral 7b: https://huggingface.co/ehartford/dolphin-2.1-mistral-7b
10
+
11
+ Note: this wrapper formats a prompt that only generates JSON, no inner thoughts
12
+ """
13
+
14
+ def __init__(
15
+ self,
16
+ simplify_json_content=True,
17
+ clean_function_args=True,
18
+ include_assistant_prefix=True,
19
+ include_opening_brace_in_prefix=True,
20
+ include_section_separators=False,
21
+ ):
22
+ self.simplify_json_content = simplify_json_content
23
+ self.clean_func_args = clean_function_args
24
+ self.include_assistant_prefix = include_assistant_prefix
25
+ self.include_opening_brance_in_prefix = include_opening_brace_in_prefix
26
+ self.include_section_separators = include_section_separators
27
+
28
+ def chat_completion_to_prompt(self, messages, functions, function_documentation=None):
29
+ """Example for airoboros: https://huggingface.co/jondurbin/airoboros-l2-70b-2.1#prompt-format
30
+
31
+ <|im_start|>system
32
+ You are Dolphin, a helpful AI assistant.<|im_end|>
33
+ <|im_start|>user
34
+ {prompt}<|im_end|>
35
+ <|im_start|>assistant
36
+
37
+ Do function spec Airoboros style inside the system message:
38
+ Functions support: https://huggingface.co/jondurbin/airoboros-l2-70b-2.1#agentfunction-calling
39
+
40
+ As an AI assistant, please select the most suitable function and parameters from the list of available functions below, based on the user's input. Provide your response in JSON format.
41
+
42
+ Input: I want to know how many times 'Python' is mentioned in my text file.
43
+
44
+ Available functions:
45
+ file_analytics:
46
+ description: This tool performs various operations on a text file.
47
+ params:
48
+ action: The operation we want to perform on the data, such as "count_occurrences", "find_line", etc.
49
+ filters:
50
+ keyword: The word or phrase we want to search for.
51
+
52
+ OpenAI functions schema style:
53
+
54
+ {
55
+ "name": "send_message",
56
+ "description": "Sends a message to the human user",
57
+ "parameters": {
58
+ "type": "object",
59
+ "properties": {
60
+ # https://json-schema.org/understanding-json-schema/reference/array.html
61
+ "message": {
62
+ "type": "string",
63
+ "description": "Message contents. All unicode (including emojis) are supported.",
64
+ },
65
+ },
66
+ "required": ["message"],
67
+ }
68
+ },
69
+ """
70
+ prompt = ""
71
+
72
+ # <|im_start|>system
73
+ # You are Dolphin, a helpful AI assistant.<|im_end|>
74
+
75
+ IM_START_TOKEN = "<|im_start|>"
76
+ IM_END_TOKEN = "<|im_end|>"
77
+
78
+ # System instructions go first
79
+ assert messages[0]["role"] == "system"
80
+ prompt += f"{IM_START_TOKEN}system"
81
+ prompt += f"\n{messages[0]['content']}"
82
+
83
+ # Next is the functions preamble
84
+ def create_function_description(schema):
85
+ # airorobos style
86
+ func_str = ""
87
+ func_str += f"{schema['name']}:"
88
+ func_str += f"\n description: {schema['description']}"
89
+ func_str += f"\n params:"
90
+ for param_k, param_v in schema["parameters"]["properties"].items():
91
+ # TODO we're ignoring type
92
+ func_str += f"\n {param_k}: {param_v['description']}"
93
+ # TODO we're ignoring schema['parameters']['required']
94
+ return func_str
95
+
96
+ # prompt += f"\nPlease select the most suitable function and parameters from the list of available functions below, based on the user's input. Provide your response in JSON format."
97
+ prompt += f"\nPlease select the most suitable function and parameters from the list of available functions below, based on the ongoing conversation. Provide your response in JSON format."
98
+ prompt += f"\nAvailable functions:"
99
+ if function_documentation is not None:
100
+ prompt += f"\n{function_documentation}"
101
+ else:
102
+ for function_dict in functions:
103
+ prompt += f"\n{create_function_description(function_dict)}"
104
+
105
+ # Put functions INSIDE system message (TODO experiment with this)
106
+ prompt += IM_END_TOKEN
107
+
108
+ def create_function_call(function_call):
109
+ """Go from ChatCompletion to Airoboros style function trace (in prompt)
110
+
111
+ ChatCompletion data (inside message['function_call']):
112
+ "function_call": {
113
+ "name": ...
114
+ "arguments": {
115
+ "arg1": val1,
116
+ ...
117
+ }
118
+
119
+ Airoboros output:
120
+ {
121
+ "function": "send_message",
122
+ "params": {
123
+ "message": "Hello there! I am Sam, an AI developed by Liminal Corp. How can I assist you today?"
124
+ }
125
+ }
126
+ """
127
+ airo_func_call = {
128
+ "function": function_call["name"],
129
+ "params": json_loads(function_call["arguments"]),
130
+ }
131
+ return json_dumps(airo_func_call, indent=2)
132
+
133
+ # option (1): from HF README:
134
+ # <|im_start|>user
135
+ # {prompt}<|im_end|>
136
+ # <|im_start|>assistant
137
+ # {assistant reply}
138
+ # {function output (if function)}
139
+
140
+ # option (2): take liberties
141
+ # <|im_start|>user
142
+ # {prompt}<|im_end|>
143
+ # <|im_start|>assistant
144
+ # or
145
+ # <|im_start|>function
146
+
147
+ # Add a sep for the conversation
148
+ # if self.include_section_separators:
149
+ # prompt += "\n### INPUT"
150
+
151
+ # Last are the user/assistant messages
152
+ for message in messages[1:]:
153
+ assert message["role"] in ["user", "assistant", "function", "tool"], message
154
+
155
+ if message["role"] == "user":
156
+ if self.simplify_json_content:
157
+ try:
158
+ content_json = (json_loads(message["content"]),)
159
+ content_simple = content_json["message"]
160
+ prompt += f"\n{IM_START_TOKEN}user\n{content_simple}{IM_END_TOKEN}"
161
+ # prompt += f"\nUSER: {content_simple}"
162
+ except:
163
+ prompt += f"\n{IM_START_TOKEN}user\n{message['content']}{IM_END_TOKEN}"
164
+ # prompt += f"\nUSER: {message['content']}"
165
+ elif message["role"] == "assistant":
166
+ prompt += f"\n{IM_START_TOKEN}assistant"
167
+ if message["content"] is not None:
168
+ prompt += f"\n{message['content']}"
169
+ # prompt += f"\nASSISTANT: {message['content']}"
170
+ # need to add the function call if there was one
171
+ if "function_call" in message and message["function_call"]:
172
+ prompt += f"\n{create_function_call(message['function_call'])}"
173
+ prompt += f"{IM_END_TOKEN}"
174
+ elif message["role"] in ["function", "tool"]:
175
+ # TODO find a good way to add this
176
+ # prompt += f"\nASSISTANT: (function return) {message['content']}"
177
+ prompt += f"\n{IM_START_TOKEN}assistant"
178
+ prompt += f"\nFUNCTION RETURN: {message['content']}"
179
+ # prompt += f"\nFUNCTION RETURN: {message['content']}"
180
+ continue
181
+ else:
182
+ raise ValueError(message)
183
+
184
+ # Add a sep for the response
185
+ # if self.include_section_separators:
186
+ # prompt += "\n### RESPONSE"
187
+
188
+ if self.include_assistant_prefix:
189
+ # prompt += f"\nASSISTANT:"
190
+ prompt += f"\n{IM_START_TOKEN}assistant"
191
+ if self.include_opening_brance_in_prefix:
192
+ prompt += "\n{"
193
+
194
+ return prompt
195
+
196
+ def clean_function_args(self, function_name, function_args):
197
+ """Some basic Letta-specific cleaning of function args"""
198
+ cleaned_function_name = function_name
199
+ cleaned_function_args = function_args.copy() if function_args is not None else {}
200
+
201
+ if function_name == "send_message":
202
+ # strip request_heartbeat
203
+ cleaned_function_args.pop("request_heartbeat", None)
204
+
205
+ # TODO more cleaning to fix errors LLM makes
206
+ return cleaned_function_name, cleaned_function_args
207
+
208
+ def output_to_chat_completion_response(self, raw_llm_output):
209
+ """Turn raw LLM output into a ChatCompletion style response with:
210
+ "message" = {
211
+ "role": "assistant",
212
+ "content": ...,
213
+ "function_call": {
214
+ "name": ...
215
+ "arguments": {
216
+ "arg1": val1,
217
+ ...
218
+ }
219
+ }
220
+ }
221
+ """
222
+ if self.include_opening_brance_in_prefix and raw_llm_output[0] != "{":
223
+ raw_llm_output = "{" + raw_llm_output
224
+
225
+ try:
226
+ function_json_output = clean_json(raw_llm_output)
227
+ except Exception as e:
228
+ raise Exception(f"Failed to decode JSON from LLM output:\n{raw_llm_output} - error\n{str(e)}")
229
+ try:
230
+ function_name = function_json_output["function"]
231
+ function_parameters = function_json_output["params"]
232
+ except KeyError as e:
233
+ raise LLMJSONParsingError(f"Received valid JSON from LLM, but JSON was missing fields: {str(e)}")
234
+
235
+ if self.clean_func_args:
236
+ function_name, function_parameters = self.clean_function_args(function_name, function_parameters)
237
+
238
+ message = {
239
+ "role": "assistant",
240
+ "content": None,
241
+ "function_call": {
242
+ "name": function_name,
243
+ "arguments": json_dumps(function_parameters),
244
+ },
245
+ }
246
+ return message