letta-nightly 0.7.21.dev20250522104246__py3-none-any.whl → 0.7.22.dev20250523081403__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- letta/__init__.py +2 -2
- letta/agents/base_agent.py +4 -2
- letta/agents/letta_agent.py +3 -10
- letta/agents/letta_agent_batch.py +6 -6
- letta/cli/cli.py +0 -316
- letta/cli/cli_load.py +0 -52
- letta/client/client.py +2 -1554
- letta/data_sources/connectors.py +4 -2
- letta/functions/ast_parsers.py +33 -43
- letta/groups/sleeptime_multi_agent_v2.py +49 -13
- letta/jobs/llm_batch_job_polling.py +3 -3
- letta/jobs/scheduler.py +20 -19
- letta/llm_api/anthropic_client.py +3 -0
- letta/llm_api/google_vertex_client.py +5 -0
- letta/llm_api/openai_client.py +5 -0
- letta/main.py +2 -362
- letta/server/db.py +5 -0
- letta/server/rest_api/routers/v1/agents.py +72 -43
- letta/server/rest_api/routers/v1/llms.py +2 -2
- letta/server/rest_api/routers/v1/messages.py +5 -3
- letta/server/rest_api/routers/v1/sandbox_configs.py +18 -18
- letta/server/rest_api/routers/v1/sources.py +49 -36
- letta/server/server.py +53 -22
- letta/services/agent_manager.py +797 -124
- letta/services/block_manager.py +14 -62
- letta/services/group_manager.py +37 -0
- letta/services/identity_manager.py +9 -0
- letta/services/job_manager.py +17 -0
- letta/services/llm_batch_manager.py +88 -64
- letta/services/message_manager.py +19 -0
- letta/services/organization_manager.py +10 -0
- letta/services/passage_manager.py +13 -0
- letta/services/per_agent_lock_manager.py +4 -0
- letta/services/provider_manager.py +34 -0
- letta/services/sandbox_config_manager.py +130 -0
- letta/services/source_manager.py +59 -44
- letta/services/step_manager.py +8 -1
- letta/services/tool_manager.py +21 -0
- letta/services/tool_sandbox/e2b_sandbox.py +4 -2
- letta/services/tool_sandbox/local_sandbox.py +7 -3
- letta/services/user_manager.py +16 -0
- {letta_nightly-0.7.21.dev20250522104246.dist-info → letta_nightly-0.7.22.dev20250523081403.dist-info}/METADATA +1 -1
- {letta_nightly-0.7.21.dev20250522104246.dist-info → letta_nightly-0.7.22.dev20250523081403.dist-info}/RECORD +46 -50
- letta/__main__.py +0 -3
- letta/benchmark/benchmark.py +0 -98
- letta/benchmark/constants.py +0 -14
- letta/cli/cli_config.py +0 -227
- {letta_nightly-0.7.21.dev20250522104246.dist-info → letta_nightly-0.7.22.dev20250523081403.dist-info}/LICENSE +0 -0
- {letta_nightly-0.7.21.dev20250522104246.dist-info → letta_nightly-0.7.22.dev20250523081403.dist-info}/WHEEL +0 -0
- {letta_nightly-0.7.21.dev20250522104246.dist-info → letta_nightly-0.7.22.dev20250523081403.dist-info}/entry_points.txt +0 -0
letta/__init__.py
CHANGED
letta/agents/base_agent.py
CHANGED
@@ -100,8 +100,10 @@ class BaseAgent(ABC):
|
|
100
100
|
|
101
101
|
# [DB Call] size of messages and archival memories
|
102
102
|
# todo: blocking for now
|
103
|
-
|
104
|
-
|
103
|
+
if num_messages is None:
|
104
|
+
num_messages = await self.message_manager.size_async(actor=self.actor, agent_id=agent_state.id)
|
105
|
+
if num_archival_memories is None:
|
106
|
+
num_archival_memories = await self.passage_manager.size_async(actor=self.actor, agent_id=agent_state.id)
|
105
107
|
|
106
108
|
new_system_message_str = compile_system_message(
|
107
109
|
system_prompt=agent_state.system,
|
letta/agents/letta_agent.py
CHANGED
@@ -174,20 +174,13 @@ class LettaAgent(BaseAgent):
|
|
174
174
|
for message in letta_messages:
|
175
175
|
yield f"data: {message.model_dump_json()}\n\n"
|
176
176
|
|
177
|
-
# update usage
|
178
|
-
# TODO: add run_id
|
179
|
-
usage.step_count += 1
|
180
|
-
usage.completion_tokens += response.usage.completion_tokens
|
181
|
-
usage.prompt_tokens += response.usage.prompt_tokens
|
182
|
-
usage.total_tokens += response.usage.total_tokens
|
183
|
-
|
184
177
|
if not should_continue:
|
185
178
|
break
|
186
179
|
|
187
180
|
# Extend the in context message ids
|
188
181
|
if not agent_state.message_buffer_autoclear:
|
189
182
|
message_ids = [m.id for m in (current_in_context_messages + new_in_context_messages)]
|
190
|
-
self.agent_manager.
|
183
|
+
await self.agent_manager.set_in_context_messages_async(agent_id=self.agent_id, message_ids=message_ids, actor=self.actor)
|
191
184
|
|
192
185
|
# Return back usage
|
193
186
|
yield f"data: {usage.model_dump_json()}\n\n"
|
@@ -285,7 +278,7 @@ class LettaAgent(BaseAgent):
|
|
285
278
|
# Extend the in context message ids
|
286
279
|
if not agent_state.message_buffer_autoclear:
|
287
280
|
message_ids = [m.id for m in (current_in_context_messages + new_in_context_messages)]
|
288
|
-
self.agent_manager.
|
281
|
+
await self.agent_manager.set_in_context_messages_async(agent_id=self.agent_id, message_ids=message_ids, actor=self.actor)
|
289
282
|
|
290
283
|
return current_in_context_messages, new_in_context_messages, usage
|
291
284
|
|
@@ -437,7 +430,7 @@ class LettaAgent(BaseAgent):
|
|
437
430
|
# Extend the in context message ids
|
438
431
|
if not agent_state.message_buffer_autoclear:
|
439
432
|
message_ids = [m.id for m in (current_in_context_messages + new_in_context_messages)]
|
440
|
-
self.agent_manager.
|
433
|
+
await self.agent_manager.set_in_context_messages_async(agent_id=self.agent_id, message_ids=message_ids, actor=self.actor)
|
441
434
|
|
442
435
|
# TODO: This may be out of sync, if in between steps users add files
|
443
436
|
# NOTE (cliandy): temporary for now for particlar use cases.
|
@@ -233,7 +233,7 @@ class LettaAgentBatch(BaseAgent):
|
|
233
233
|
ctx = await self._collect_resume_context(llm_batch_id)
|
234
234
|
|
235
235
|
log_event(name="update_statuses")
|
236
|
-
self.
|
236
|
+
await self._update_request_statuses_async(ctx.request_status_updates)
|
237
237
|
|
238
238
|
log_event(name="exec_tools")
|
239
239
|
exec_results = await self._execute_tools(ctx)
|
@@ -242,7 +242,7 @@ class LettaAgentBatch(BaseAgent):
|
|
242
242
|
msg_map = await self._persist_tool_messages(exec_results, ctx)
|
243
243
|
|
244
244
|
log_event(name="mark_steps_done")
|
245
|
-
self.
|
245
|
+
await self._mark_steps_complete_async(llm_batch_id, ctx.agent_ids)
|
246
246
|
|
247
247
|
log_event(name="prepare_next")
|
248
248
|
next_reqs, next_step_state = self._prepare_next_iteration(exec_results, ctx, msg_map)
|
@@ -382,9 +382,9 @@ class LettaAgentBatch(BaseAgent):
|
|
382
382
|
|
383
383
|
return self._extract_tool_call_and_decide_continue(tool_call, item.step_state)
|
384
384
|
|
385
|
-
def
|
385
|
+
async def _update_request_statuses_async(self, updates: List[RequestStatusUpdateInfo]) -> None:
|
386
386
|
if updates:
|
387
|
-
self.batch_manager.
|
387
|
+
await self.batch_manager.bulk_update_llm_batch_items_request_status_by_agent_async(updates=updates)
|
388
388
|
|
389
389
|
def _build_sandbox(self) -> Tuple[SandboxConfig, Dict[str, Any]]:
|
390
390
|
sbx_type = SandboxType.E2B if tool_settings.e2b_api_key else SandboxType.LOCAL
|
@@ -474,11 +474,11 @@ class LettaAgentBatch(BaseAgent):
|
|
474
474
|
await self.message_manager.create_many_messages_async([m for msgs in msg_map.values() for m in msgs], actor=self.actor)
|
475
475
|
return msg_map
|
476
476
|
|
477
|
-
def
|
477
|
+
async def _mark_steps_complete_async(self, llm_batch_id: str, agent_ids: List[str]) -> None:
|
478
478
|
updates = [
|
479
479
|
StepStatusUpdateInfo(llm_batch_id=llm_batch_id, agent_id=aid, step_status=AgentStepStatus.completed) for aid in agent_ids
|
480
480
|
]
|
481
|
-
self.batch_manager.
|
481
|
+
await self.batch_manager.bulk_update_llm_batch_items_step_status_by_agent_async(updates)
|
482
482
|
|
483
483
|
def _prepare_next_iteration(
|
484
484
|
self,
|
letta/cli/cli.py
CHANGED
@@ -1,37 +1,15 @@
|
|
1
|
-
import logging
|
2
1
|
import sys
|
3
2
|
from enum import Enum
|
4
3
|
from typing import Annotated, Optional
|
5
4
|
|
6
|
-
import questionary
|
7
5
|
import typer
|
8
6
|
|
9
|
-
import letta.utils as utils
|
10
|
-
from letta import create_client
|
11
|
-
from letta.agent import Agent, save_agent
|
12
|
-
from letta.config import LettaConfig
|
13
|
-
from letta.constants import CLI_WARNING_PREFIX, CORE_MEMORY_BLOCK_CHAR_LIMIT, LETTA_DIR, MIN_CONTEXT_WINDOW
|
14
|
-
from letta.local_llm.constants import ASSISTANT_MESSAGE_CLI_SYMBOL
|
15
7
|
from letta.log import get_logger
|
16
|
-
from letta.schemas.enums import OptionState
|
17
|
-
from letta.schemas.memory import ChatMemory, Memory
|
18
|
-
|
19
|
-
# from letta.interface import CLIInterface as interface # for printing to terminal
|
20
8
|
from letta.streaming_interface import StreamingRefreshCLIInterface as interface # for printing to terminal
|
21
|
-
from letta.utils import open_folder_in_explorer, printd
|
22
9
|
|
23
10
|
logger = get_logger(__name__)
|
24
11
|
|
25
12
|
|
26
|
-
def open_folder():
|
27
|
-
"""Open a folder viewer of the Letta home directory"""
|
28
|
-
try:
|
29
|
-
print(f"Opening home folder: {LETTA_DIR}")
|
30
|
-
open_folder_in_explorer(LETTA_DIR)
|
31
|
-
except Exception as e:
|
32
|
-
print(f"Failed to open folder with system viewer, error:\n{e}")
|
33
|
-
|
34
|
-
|
35
13
|
class ServerChoice(Enum):
|
36
14
|
rest_api = "rest"
|
37
15
|
ws_api = "websocket"
|
@@ -51,14 +29,6 @@ def server(
|
|
51
29
|
if type == ServerChoice.rest_api:
|
52
30
|
pass
|
53
31
|
|
54
|
-
# if LettaConfig.exists():
|
55
|
-
# config = LettaConfig.load()
|
56
|
-
# MetadataStore(config)
|
57
|
-
# _ = create_client() # triggers user creation
|
58
|
-
# else:
|
59
|
-
# typer.secho(f"No configuration exists. Run letta configure before starting the server.", fg=typer.colors.RED)
|
60
|
-
# sys.exit(1)
|
61
|
-
|
62
32
|
try:
|
63
33
|
from letta.server.rest_api.app import start_server
|
64
34
|
|
@@ -73,292 +43,6 @@ def server(
|
|
73
43
|
raise NotImplementedError("WS suppport deprecated")
|
74
44
|
|
75
45
|
|
76
|
-
def run(
|
77
|
-
persona: Annotated[Optional[str], typer.Option(help="Specify persona")] = None,
|
78
|
-
agent: Annotated[Optional[str], typer.Option(help="Specify agent name")] = None,
|
79
|
-
human: Annotated[Optional[str], typer.Option(help="Specify human")] = None,
|
80
|
-
system: Annotated[Optional[str], typer.Option(help="Specify system prompt (raw text)")] = None,
|
81
|
-
system_file: Annotated[Optional[str], typer.Option(help="Specify raw text file containing system prompt")] = None,
|
82
|
-
# model flags
|
83
|
-
model: Annotated[Optional[str], typer.Option(help="Specify the LLM model")] = None,
|
84
|
-
model_wrapper: Annotated[Optional[str], typer.Option(help="Specify the LLM model wrapper")] = None,
|
85
|
-
model_endpoint: Annotated[Optional[str], typer.Option(help="Specify the LLM model endpoint")] = None,
|
86
|
-
model_endpoint_type: Annotated[Optional[str], typer.Option(help="Specify the LLM model endpoint type")] = None,
|
87
|
-
context_window: Annotated[
|
88
|
-
Optional[int], typer.Option(help="The context window of the LLM you are using (e.g. 8k for most Mistral 7B variants)")
|
89
|
-
] = None,
|
90
|
-
core_memory_limit: Annotated[
|
91
|
-
Optional[int], typer.Option(help="The character limit to each core-memory section (human/persona).")
|
92
|
-
] = CORE_MEMORY_BLOCK_CHAR_LIMIT,
|
93
|
-
# other
|
94
|
-
first: Annotated[bool, typer.Option(help="Use --first to send the first message in the sequence")] = False,
|
95
|
-
strip_ui: Annotated[bool, typer.Option(help="Remove all the bells and whistles in CLI output (helpful for testing)")] = False,
|
96
|
-
debug: Annotated[bool, typer.Option(help="Use --debug to enable debugging output")] = False,
|
97
|
-
no_verify: Annotated[bool, typer.Option(help="Bypass message verification")] = False,
|
98
|
-
yes: Annotated[bool, typer.Option("-y", help="Skip confirmation prompt and use defaults")] = False,
|
99
|
-
# streaming
|
100
|
-
stream: Annotated[bool, typer.Option(help="Enables message streaming in the CLI (if the backend supports it)")] = False,
|
101
|
-
# whether or not to put the inner thoughts inside the function args
|
102
|
-
no_content: Annotated[
|
103
|
-
OptionState, typer.Option(help="Set to 'yes' for LLM APIs that omit the `content` field during tool calling")
|
104
|
-
] = OptionState.DEFAULT,
|
105
|
-
):
|
106
|
-
"""Start chatting with an Letta agent
|
107
|
-
|
108
|
-
Example usage: `letta run --agent myagent --data-source mydata --persona mypersona --human myhuman --model gpt-3.5-turbo`
|
109
|
-
|
110
|
-
:param persona: Specify persona
|
111
|
-
:param agent: Specify agent name (will load existing state if the agent exists, or create a new one with that name)
|
112
|
-
:param human: Specify human
|
113
|
-
:param model: Specify the LLM model
|
114
|
-
|
115
|
-
"""
|
116
|
-
|
117
|
-
# setup logger
|
118
|
-
# TODO: remove Utils Debug after global logging is complete.
|
119
|
-
utils.DEBUG = debug
|
120
|
-
# TODO: add logging command line options for runtime log level
|
121
|
-
|
122
|
-
from letta.server.server import logger as server_logger
|
123
|
-
|
124
|
-
if debug:
|
125
|
-
logger.setLevel(logging.DEBUG)
|
126
|
-
server_logger.setLevel(logging.DEBUG)
|
127
|
-
else:
|
128
|
-
logger.setLevel(logging.CRITICAL)
|
129
|
-
server_logger.setLevel(logging.CRITICAL)
|
130
|
-
|
131
|
-
# load config file
|
132
|
-
config = LettaConfig.load()
|
133
|
-
|
134
|
-
# read user id from config
|
135
|
-
client = create_client()
|
136
|
-
|
137
|
-
# determine agent to use, if not provided
|
138
|
-
if not yes and not agent:
|
139
|
-
agents = client.list_agents()
|
140
|
-
agents = [a.name for a in agents]
|
141
|
-
|
142
|
-
if len(agents) > 0:
|
143
|
-
print()
|
144
|
-
select_agent = questionary.confirm("Would you like to select an existing agent?").ask()
|
145
|
-
if select_agent is None:
|
146
|
-
raise KeyboardInterrupt
|
147
|
-
if select_agent:
|
148
|
-
agent = questionary.select("Select agent:", choices=agents).ask()
|
149
|
-
|
150
|
-
# create agent config
|
151
|
-
if agent:
|
152
|
-
agent_id = client.get_agent_id(agent)
|
153
|
-
agent_state = client.get_agent(agent_id)
|
154
|
-
else:
|
155
|
-
agent_state = None
|
156
|
-
human = human if human else config.human
|
157
|
-
persona = persona if persona else config.persona
|
158
|
-
if agent and agent_state: # use existing agent
|
159
|
-
typer.secho(f"\n🔁 Using existing agent {agent}", fg=typer.colors.GREEN)
|
160
|
-
printd("Loading agent state:", agent_state.id)
|
161
|
-
printd("Agent state:", agent_state.name)
|
162
|
-
# printd("State path:", agent_config.save_state_dir())
|
163
|
-
# printd("Persistent manager path:", agent_config.save_persistence_manager_dir())
|
164
|
-
# printd("Index path:", agent_config.save_agent_index_dir())
|
165
|
-
# TODO: load prior agent state
|
166
|
-
|
167
|
-
# Allow overriding model specifics (model, model wrapper, model endpoint IP + type, context_window)
|
168
|
-
if model and model != agent_state.llm_config.model:
|
169
|
-
typer.secho(
|
170
|
-
f"{CLI_WARNING_PREFIX}Overriding existing model {agent_state.llm_config.model} with {model}", fg=typer.colors.YELLOW
|
171
|
-
)
|
172
|
-
agent_state.llm_config.model = model
|
173
|
-
if context_window is not None and int(context_window) != agent_state.llm_config.context_window:
|
174
|
-
typer.secho(
|
175
|
-
f"{CLI_WARNING_PREFIX}Overriding existing context window {agent_state.llm_config.context_window} with {context_window}",
|
176
|
-
fg=typer.colors.YELLOW,
|
177
|
-
)
|
178
|
-
agent_state.llm_config.context_window = context_window
|
179
|
-
if model_wrapper and model_wrapper != agent_state.llm_config.model_wrapper:
|
180
|
-
typer.secho(
|
181
|
-
f"{CLI_WARNING_PREFIX}Overriding existing model wrapper {agent_state.llm_config.model_wrapper} with {model_wrapper}",
|
182
|
-
fg=typer.colors.YELLOW,
|
183
|
-
)
|
184
|
-
agent_state.llm_config.model_wrapper = model_wrapper
|
185
|
-
if model_endpoint and model_endpoint != agent_state.llm_config.model_endpoint:
|
186
|
-
typer.secho(
|
187
|
-
f"{CLI_WARNING_PREFIX}Overriding existing model endpoint {agent_state.llm_config.model_endpoint} with {model_endpoint}",
|
188
|
-
fg=typer.colors.YELLOW,
|
189
|
-
)
|
190
|
-
agent_state.llm_config.model_endpoint = model_endpoint
|
191
|
-
if model_endpoint_type and model_endpoint_type != agent_state.llm_config.model_endpoint_type:
|
192
|
-
typer.secho(
|
193
|
-
f"{CLI_WARNING_PREFIX}Overriding existing model endpoint type {agent_state.llm_config.model_endpoint_type} with {model_endpoint_type}",
|
194
|
-
fg=typer.colors.YELLOW,
|
195
|
-
)
|
196
|
-
agent_state.llm_config.model_endpoint_type = model_endpoint_type
|
197
|
-
|
198
|
-
# NOTE: commented out because this seems dangerous - instead users should use /systemswap when in the CLI
|
199
|
-
# # user specified a new system prompt
|
200
|
-
# if system:
|
201
|
-
# # NOTE: agent_state.system is the ORIGINAL system prompt,
|
202
|
-
# # whereas agent_state.state["system"] is the LATEST system prompt
|
203
|
-
# existing_system_prompt = agent_state.state["system"] if "system" in agent_state.state else None
|
204
|
-
# if existing_system_prompt != system:
|
205
|
-
# # override
|
206
|
-
# agent_state.state["system"] = system
|
207
|
-
|
208
|
-
# Update the agent with any overrides
|
209
|
-
agent_state = client.update_agent(
|
210
|
-
agent_id=agent_state.id,
|
211
|
-
name=agent_state.name,
|
212
|
-
llm_config=agent_state.llm_config,
|
213
|
-
embedding_config=agent_state.embedding_config,
|
214
|
-
)
|
215
|
-
|
216
|
-
# create agent
|
217
|
-
letta_agent = Agent(agent_state=agent_state, interface=interface(), user=client.user)
|
218
|
-
|
219
|
-
else: # create new agent
|
220
|
-
# create new agent config: override defaults with args if provided
|
221
|
-
typer.secho("\n🧬 Creating new agent...", fg=typer.colors.WHITE)
|
222
|
-
|
223
|
-
agent_name = agent if agent else utils.create_random_username()
|
224
|
-
|
225
|
-
# create agent
|
226
|
-
client = create_client()
|
227
|
-
|
228
|
-
# choose from list of llm_configs
|
229
|
-
llm_configs = client.list_llm_configs()
|
230
|
-
llm_options = [llm_config.model for llm_config in llm_configs]
|
231
|
-
llm_choices = [questionary.Choice(title=llm_config.pretty_print(), value=llm_config) for llm_config in llm_configs]
|
232
|
-
|
233
|
-
# select model
|
234
|
-
if len(llm_options) == 0:
|
235
|
-
raise ValueError("No LLM models found. Please enable a provider.")
|
236
|
-
elif len(llm_options) == 1:
|
237
|
-
llm_model_name = llm_options[0]
|
238
|
-
else:
|
239
|
-
llm_model_name = questionary.select("Select LLM model:", choices=llm_choices).ask().model
|
240
|
-
llm_config = [llm_config for llm_config in llm_configs if llm_config.model == llm_model_name][0]
|
241
|
-
|
242
|
-
# option to override context window
|
243
|
-
if llm_config.context_window is not None:
|
244
|
-
context_window_validator = lambda x: x.isdigit() and int(x) > MIN_CONTEXT_WINDOW and int(x) <= llm_config.context_window
|
245
|
-
context_window_input = questionary.text(
|
246
|
-
"Select LLM context window limit (hit enter for default):",
|
247
|
-
default=str(llm_config.context_window),
|
248
|
-
validate=context_window_validator,
|
249
|
-
).ask()
|
250
|
-
if context_window_input is not None:
|
251
|
-
llm_config.context_window = int(context_window_input)
|
252
|
-
else:
|
253
|
-
sys.exit(1)
|
254
|
-
|
255
|
-
# choose form list of embedding configs
|
256
|
-
embedding_configs = client.list_embedding_configs()
|
257
|
-
embedding_options = [embedding_config.embedding_model for embedding_config in embedding_configs]
|
258
|
-
|
259
|
-
embedding_choices = [
|
260
|
-
questionary.Choice(title=embedding_config.pretty_print(), value=embedding_config) for embedding_config in embedding_configs
|
261
|
-
]
|
262
|
-
|
263
|
-
# select model
|
264
|
-
if len(embedding_options) == 0:
|
265
|
-
raise ValueError("No embedding models found. Please enable a provider.")
|
266
|
-
elif len(embedding_options) == 1:
|
267
|
-
embedding_model_name = embedding_options[0]
|
268
|
-
else:
|
269
|
-
embedding_model_name = questionary.select("Select embedding model:", choices=embedding_choices).ask().embedding_model
|
270
|
-
embedding_config = [
|
271
|
-
embedding_config for embedding_config in embedding_configs if embedding_config.embedding_model == embedding_model_name
|
272
|
-
][0]
|
273
|
-
|
274
|
-
human_obj = client.get_human(client.get_human_id(name=human))
|
275
|
-
persona_obj = client.get_persona(client.get_persona_id(name=persona))
|
276
|
-
if human_obj is None:
|
277
|
-
typer.secho(f"Couldn't find human {human} in database, please run `letta add human`", fg=typer.colors.RED)
|
278
|
-
sys.exit(1)
|
279
|
-
if persona_obj is None:
|
280
|
-
typer.secho(f"Couldn't find persona {persona} in database, please run `letta add persona`", fg=typer.colors.RED)
|
281
|
-
sys.exit(1)
|
282
|
-
|
283
|
-
if system_file:
|
284
|
-
try:
|
285
|
-
with open(system_file, "r", encoding="utf-8") as file:
|
286
|
-
system = file.read().strip()
|
287
|
-
printd("Loaded system file successfully.")
|
288
|
-
except FileNotFoundError:
|
289
|
-
typer.secho(f"System file not found at {system_file}", fg=typer.colors.RED)
|
290
|
-
system_prompt = system if system else None
|
291
|
-
|
292
|
-
memory = ChatMemory(human=human_obj.value, persona=persona_obj.value, limit=core_memory_limit)
|
293
|
-
metadata = {"human": human_obj.template_name, "persona": persona_obj.template_name}
|
294
|
-
|
295
|
-
typer.secho(f"-> {ASSISTANT_MESSAGE_CLI_SYMBOL} Using persona profile: '{persona_obj.template_name}'", fg=typer.colors.WHITE)
|
296
|
-
typer.secho(f"-> 🧑 Using human profile: '{human_obj.template_name}'", fg=typer.colors.WHITE)
|
297
|
-
|
298
|
-
# add tools
|
299
|
-
agent_state = client.create_agent(
|
300
|
-
name=agent_name,
|
301
|
-
system=system_prompt,
|
302
|
-
embedding_config=embedding_config,
|
303
|
-
llm_config=llm_config,
|
304
|
-
memory=memory,
|
305
|
-
metadata=metadata,
|
306
|
-
)
|
307
|
-
assert isinstance(agent_state.memory, Memory), f"Expected Memory, got {type(agent_state.memory)}"
|
308
|
-
typer.secho(f"-> 🛠️ {len(agent_state.tools)} tools: {', '.join([t.name for t in agent_state.tools])}", fg=typer.colors.WHITE)
|
309
|
-
|
310
|
-
letta_agent = Agent(
|
311
|
-
interface=interface(),
|
312
|
-
agent_state=client.get_agent(agent_state.id),
|
313
|
-
# gpt-3.5-turbo tends to omit inner monologue, relax this requirement for now
|
314
|
-
first_message_verify_mono=True if (model is not None and "gpt-4" in model) else False,
|
315
|
-
user=client.user,
|
316
|
-
)
|
317
|
-
save_agent(agent=letta_agent)
|
318
|
-
typer.secho(f"🎉 Created new agent '{letta_agent.agent_state.name}' (id={letta_agent.agent_state.id})", fg=typer.colors.GREEN)
|
319
|
-
|
320
|
-
# start event loop
|
321
|
-
from letta.main import run_agent_loop
|
322
|
-
|
323
|
-
print() # extra space
|
324
|
-
run_agent_loop(
|
325
|
-
letta_agent=letta_agent,
|
326
|
-
config=config,
|
327
|
-
first=first,
|
328
|
-
no_verify=no_verify,
|
329
|
-
stream=stream,
|
330
|
-
) # TODO: add back no_verify
|
331
|
-
|
332
|
-
|
333
|
-
def delete_agent(
|
334
|
-
agent_name: Annotated[str, typer.Option(help="Specify agent to delete")],
|
335
|
-
):
|
336
|
-
"""Delete an agent from the database"""
|
337
|
-
# use client ID is no user_id provided
|
338
|
-
config = LettaConfig.load()
|
339
|
-
MetadataStore(config)
|
340
|
-
client = create_client()
|
341
|
-
agent = client.get_agent_by_name(agent_name)
|
342
|
-
if not agent:
|
343
|
-
typer.secho(f"Couldn't find agent named '{agent_name}' to delete", fg=typer.colors.RED)
|
344
|
-
sys.exit(1)
|
345
|
-
|
346
|
-
confirm = questionary.confirm(f"Are you sure you want to delete agent '{agent_name}' (id={agent.id})?", default=False).ask()
|
347
|
-
if confirm is None:
|
348
|
-
raise KeyboardInterrupt
|
349
|
-
if not confirm:
|
350
|
-
typer.secho(f"Cancelled agent deletion '{agent_name}' (id={agent.id})", fg=typer.colors.GREEN)
|
351
|
-
return
|
352
|
-
|
353
|
-
try:
|
354
|
-
# delete the agent
|
355
|
-
client.delete_agent(agent.id)
|
356
|
-
typer.secho(f"🕊️ Successfully deleted agent '{agent_name}' (id={agent.id})", fg=typer.colors.GREEN)
|
357
|
-
except Exception:
|
358
|
-
typer.secho(f"Failed to delete agent '{agent_name}' (id={agent.id})", fg=typer.colors.RED)
|
359
|
-
sys.exit(1)
|
360
|
-
|
361
|
-
|
362
46
|
def version() -> str:
|
363
47
|
import letta
|
364
48
|
|
letta/cli/cli_load.py
CHANGED
@@ -8,61 +8,9 @@ letta load <data-connector-type> --name <dataset-name> [ADDITIONAL ARGS]
|
|
8
8
|
|
9
9
|
"""
|
10
10
|
|
11
|
-
import uuid
|
12
|
-
from typing import Annotated, List, Optional
|
13
|
-
|
14
|
-
import questionary
|
15
11
|
import typer
|
16
12
|
|
17
|
-
from letta import create_client
|
18
|
-
from letta.data_sources.connectors import DirectoryConnector
|
19
|
-
|
20
13
|
app = typer.Typer()
|
21
14
|
|
22
15
|
|
23
16
|
default_extensions = "txt,md,pdf"
|
24
|
-
|
25
|
-
|
26
|
-
@app.command("directory")
|
27
|
-
def load_directory(
|
28
|
-
name: Annotated[str, typer.Option(help="Name of dataset to load.")],
|
29
|
-
input_dir: Annotated[Optional[str], typer.Option(help="Path to directory containing dataset.")] = None,
|
30
|
-
input_files: Annotated[List[str], typer.Option(help="List of paths to files containing dataset.")] = [],
|
31
|
-
recursive: Annotated[bool, typer.Option(help="Recursively search for files in directory.")] = False,
|
32
|
-
extensions: Annotated[str, typer.Option(help="Comma separated list of file extensions to load")] = default_extensions,
|
33
|
-
user_id: Annotated[Optional[uuid.UUID], typer.Option(help="User ID to associate with dataset.")] = None, # TODO: remove
|
34
|
-
description: Annotated[Optional[str], typer.Option(help="Description of the source.")] = None,
|
35
|
-
):
|
36
|
-
client = create_client()
|
37
|
-
|
38
|
-
# create connector
|
39
|
-
connector = DirectoryConnector(input_files=input_files, input_directory=input_dir, recursive=recursive, extensions=extensions)
|
40
|
-
|
41
|
-
# choose form list of embedding configs
|
42
|
-
embedding_configs = client.list_embedding_configs()
|
43
|
-
embedding_options = [embedding_config.embedding_model for embedding_config in embedding_configs]
|
44
|
-
|
45
|
-
embedding_choices = [
|
46
|
-
questionary.Choice(title=embedding_config.pretty_print(), value=embedding_config) for embedding_config in embedding_configs
|
47
|
-
]
|
48
|
-
|
49
|
-
# select model
|
50
|
-
if len(embedding_options) == 0:
|
51
|
-
raise ValueError("No embedding models found. Please enable a provider.")
|
52
|
-
elif len(embedding_options) == 1:
|
53
|
-
embedding_model_name = embedding_options[0]
|
54
|
-
else:
|
55
|
-
embedding_model_name = questionary.select("Select embedding model:", choices=embedding_choices).ask().embedding_model
|
56
|
-
embedding_config = [
|
57
|
-
embedding_config for embedding_config in embedding_configs if embedding_config.embedding_model == embedding_model_name
|
58
|
-
][0]
|
59
|
-
|
60
|
-
# create source
|
61
|
-
source = client.create_source(name=name, embedding_config=embedding_config)
|
62
|
-
|
63
|
-
# load data
|
64
|
-
try:
|
65
|
-
client.load_data(connector, source_name=name)
|
66
|
-
except Exception as e:
|
67
|
-
typer.secho(f"Failed to load data from provided information.\n{e}", fg=typer.colors.RED)
|
68
|
-
client.delete_source(source.id)
|