letta-nightly 0.7.21.dev20250522104246__py3-none-any.whl → 0.7.22.dev20250523104244__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- letta/__init__.py +2 -2
- letta/agents/base_agent.py +4 -2
- letta/agents/letta_agent.py +3 -10
- letta/agents/letta_agent_batch.py +6 -6
- letta/cli/cli.py +0 -316
- letta/cli/cli_load.py +0 -52
- letta/client/client.py +2 -1554
- letta/data_sources/connectors.py +4 -2
- letta/functions/ast_parsers.py +33 -43
- letta/groups/sleeptime_multi_agent_v2.py +49 -13
- letta/jobs/llm_batch_job_polling.py +3 -3
- letta/jobs/scheduler.py +20 -19
- letta/llm_api/anthropic_client.py +3 -0
- letta/llm_api/google_vertex_client.py +5 -0
- letta/llm_api/openai_client.py +5 -0
- letta/main.py +2 -362
- letta/server/db.py +5 -0
- letta/server/rest_api/routers/v1/agents.py +72 -43
- letta/server/rest_api/routers/v1/llms.py +2 -2
- letta/server/rest_api/routers/v1/messages.py +5 -3
- letta/server/rest_api/routers/v1/sandbox_configs.py +18 -18
- letta/server/rest_api/routers/v1/sources.py +49 -36
- letta/server/server.py +53 -22
- letta/services/agent_manager.py +797 -124
- letta/services/block_manager.py +14 -62
- letta/services/group_manager.py +37 -0
- letta/services/identity_manager.py +9 -0
- letta/services/job_manager.py +17 -0
- letta/services/llm_batch_manager.py +88 -64
- letta/services/message_manager.py +19 -0
- letta/services/organization_manager.py +10 -0
- letta/services/passage_manager.py +13 -0
- letta/services/per_agent_lock_manager.py +4 -0
- letta/services/provider_manager.py +34 -0
- letta/services/sandbox_config_manager.py +130 -0
- letta/services/source_manager.py +59 -44
- letta/services/step_manager.py +8 -1
- letta/services/tool_manager.py +21 -0
- letta/services/tool_sandbox/e2b_sandbox.py +4 -2
- letta/services/tool_sandbox/local_sandbox.py +7 -3
- letta/services/user_manager.py +16 -0
- {letta_nightly-0.7.21.dev20250522104246.dist-info → letta_nightly-0.7.22.dev20250523104244.dist-info}/METADATA +1 -1
- {letta_nightly-0.7.21.dev20250522104246.dist-info → letta_nightly-0.7.22.dev20250523104244.dist-info}/RECORD +46 -50
- letta/__main__.py +0 -3
- letta/benchmark/benchmark.py +0 -98
- letta/benchmark/constants.py +0 -14
- letta/cli/cli_config.py +0 -227
- {letta_nightly-0.7.21.dev20250522104246.dist-info → letta_nightly-0.7.22.dev20250523104244.dist-info}/LICENSE +0 -0
- {letta_nightly-0.7.21.dev20250522104246.dist-info → letta_nightly-0.7.22.dev20250523104244.dist-info}/WHEEL +0 -0
- {letta_nightly-0.7.21.dev20250522104246.dist-info → letta_nightly-0.7.22.dev20250523104244.dist-info}/entry_points.txt +0 -0
letta/services/agent_manager.py
CHANGED
@@ -1,9 +1,11 @@
|
|
1
1
|
import asyncio
|
2
|
+
import os
|
2
3
|
from datetime import datetime, timezone
|
3
4
|
from typing import Dict, List, Optional, Set, Tuple
|
4
5
|
|
5
6
|
import numpy as np
|
6
7
|
import sqlalchemy as sa
|
8
|
+
from openai.types.beta.function_tool import FunctionTool as OpenAITool
|
7
9
|
from sqlalchemy import Select, and_, delete, func, insert, literal, or_, select, union_all
|
8
10
|
from sqlalchemy.dialects.postgresql import insert as pg_insert
|
9
11
|
|
@@ -20,6 +22,7 @@ from letta.constants import (
|
|
20
22
|
)
|
21
23
|
from letta.embeddings import embedding_model
|
22
24
|
from letta.helpers.datetime_helpers import get_utc_time
|
25
|
+
from letta.llm_api.llm_client import LLMClient
|
23
26
|
from letta.log import get_logger
|
24
27
|
from letta.orm import Agent as AgentModel
|
25
28
|
from letta.orm import AgentPassage, AgentsTags
|
@@ -42,9 +45,11 @@ from letta.schemas.agent import AgentType, CreateAgent, UpdateAgent, get_prompt_
|
|
42
45
|
from letta.schemas.block import Block as PydanticBlock
|
43
46
|
from letta.schemas.block import BlockUpdate
|
44
47
|
from letta.schemas.embedding_config import EmbeddingConfig
|
48
|
+
from letta.schemas.enums import MessageRole, ProviderType
|
45
49
|
from letta.schemas.group import Group as PydanticGroup
|
46
50
|
from letta.schemas.group import ManagerType
|
47
|
-
from letta.schemas.
|
51
|
+
from letta.schemas.letta_message_content import TextContent
|
52
|
+
from letta.schemas.memory import ContextWindowOverview, Memory
|
48
53
|
from letta.schemas.message import Message
|
49
54
|
from letta.schemas.message import Message as PydanticMessage
|
50
55
|
from letta.schemas.message import MessageCreate, MessageUpdate
|
@@ -79,7 +84,7 @@ from letta.services.source_manager import SourceManager
|
|
79
84
|
from letta.services.tool_manager import ToolManager
|
80
85
|
from letta.settings import settings
|
81
86
|
from letta.tracing import trace_method
|
82
|
-
from letta.utils import enforce_types, united_diff
|
87
|
+
from letta.utils import count_tokens, enforce_types, united_diff
|
83
88
|
|
84
89
|
logger = get_logger(__name__)
|
85
90
|
|
@@ -548,6 +553,7 @@ class AgentManager:
|
|
548
553
|
|
549
554
|
return init_messages
|
550
555
|
|
556
|
+
@trace_method
|
551
557
|
@enforce_types
|
552
558
|
def append_initial_message_sequence_to_in_context_messages(
|
553
559
|
self, actor: PydanticUser, agent_state: PydanticAgentState, initial_message_sequence: Optional[List[MessageCreate]] = None
|
@@ -555,6 +561,7 @@ class AgentManager:
|
|
555
561
|
init_messages = self._generate_initial_message_sequence(actor, agent_state, initial_message_sequence)
|
556
562
|
return self.append_to_in_context_messages(init_messages, agent_id=agent_state.id, actor=actor)
|
557
563
|
|
564
|
+
@trace_method
|
558
565
|
@enforce_types
|
559
566
|
def update_agent(
|
560
567
|
self,
|
@@ -674,6 +681,7 @@ class AgentManager:
|
|
674
681
|
|
675
682
|
return agent.to_pydantic()
|
676
683
|
|
684
|
+
@trace_method
|
677
685
|
@enforce_types
|
678
686
|
async def update_agent_async(
|
679
687
|
self,
|
@@ -792,6 +800,7 @@ class AgentManager:
|
|
792
800
|
return await agent.to_pydantic_async()
|
793
801
|
|
794
802
|
# TODO: Make this general and think about how to roll this into sqlalchemybase
|
803
|
+
@trace_method
|
795
804
|
def list_agents(
|
796
805
|
self,
|
797
806
|
actor: PydanticUser,
|
@@ -850,6 +859,7 @@ class AgentManager:
|
|
850
859
|
agents = result.scalars().all()
|
851
860
|
return [agent.to_pydantic(include_relationships=include_relationships) for agent in agents]
|
852
861
|
|
862
|
+
@trace_method
|
853
863
|
async def list_agents_async(
|
854
864
|
self,
|
855
865
|
actor: PydanticUser,
|
@@ -909,6 +919,7 @@ class AgentManager:
|
|
909
919
|
return await asyncio.gather(*[agent.to_pydantic_async(include_relationships=include_relationships) for agent in agents])
|
910
920
|
|
911
921
|
@enforce_types
|
922
|
+
@trace_method
|
912
923
|
def list_agents_matching_tags(
|
913
924
|
self,
|
914
925
|
actor: PydanticUser,
|
@@ -951,6 +962,7 @@ class AgentManager:
|
|
951
962
|
|
952
963
|
return list(session.execute(query).scalars())
|
953
964
|
|
965
|
+
@trace_method
|
954
966
|
def size(
|
955
967
|
self,
|
956
968
|
actor: PydanticUser,
|
@@ -961,6 +973,7 @@ class AgentManager:
|
|
961
973
|
with db_registry.session() as session:
|
962
974
|
return AgentModel.size(db_session=session, actor=actor)
|
963
975
|
|
976
|
+
@trace_method
|
964
977
|
async def size_async(
|
965
978
|
self,
|
966
979
|
actor: PydanticUser,
|
@@ -971,6 +984,7 @@ class AgentManager:
|
|
971
984
|
async with db_registry.async_session() as session:
|
972
985
|
return await AgentModel.size_async(db_session=session, actor=actor)
|
973
986
|
|
987
|
+
@trace_method
|
974
988
|
@enforce_types
|
975
989
|
def get_agent_by_id(self, agent_id: str, actor: PydanticUser) -> PydanticAgentState:
|
976
990
|
"""Fetch an agent by its ID."""
|
@@ -978,6 +992,37 @@ class AgentManager:
|
|
978
992
|
agent = AgentModel.read(db_session=session, identifier=agent_id, actor=actor)
|
979
993
|
return agent.to_pydantic()
|
980
994
|
|
995
|
+
@trace_method
|
996
|
+
@enforce_types
|
997
|
+
async def get_agent_by_id_async(
|
998
|
+
self,
|
999
|
+
agent_id: str,
|
1000
|
+
actor: PydanticUser,
|
1001
|
+
include_relationships: Optional[List[str]] = None,
|
1002
|
+
) -> PydanticAgentState:
|
1003
|
+
"""Fetch an agent by its ID."""
|
1004
|
+
async with db_registry.async_session() as session:
|
1005
|
+
agent = await AgentModel.read_async(db_session=session, identifier=agent_id, actor=actor)
|
1006
|
+
return await agent.to_pydantic_async(include_relationships=include_relationships)
|
1007
|
+
|
1008
|
+
@trace_method
|
1009
|
+
@enforce_types
|
1010
|
+
async def get_agents_by_ids_async(
|
1011
|
+
self,
|
1012
|
+
agent_ids: list[str],
|
1013
|
+
actor: PydanticUser,
|
1014
|
+
include_relationships: Optional[List[str]] = None,
|
1015
|
+
) -> list[PydanticAgentState]:
|
1016
|
+
"""Fetch a list of agents by their IDs."""
|
1017
|
+
async with db_registry.async_session() as session:
|
1018
|
+
agents = await AgentModel.read_multiple_async(
|
1019
|
+
db_session=session,
|
1020
|
+
identifiers=agent_ids,
|
1021
|
+
actor=actor,
|
1022
|
+
)
|
1023
|
+
return await asyncio.gather(*[agent.to_pydantic_async(include_relationships=include_relationships) for agent in agents])
|
1024
|
+
|
1025
|
+
@trace_method
|
981
1026
|
@enforce_types
|
982
1027
|
async def get_agent_by_id_async(
|
983
1028
|
self,
|
@@ -1013,6 +1058,7 @@ class AgentManager:
|
|
1013
1058
|
agent = AgentModel.read(db_session=session, name=agent_name, actor=actor)
|
1014
1059
|
return agent.to_pydantic()
|
1015
1060
|
|
1061
|
+
@trace_method
|
1016
1062
|
@enforce_types
|
1017
1063
|
def delete_agent(self, agent_id: str, actor: PydanticUser) -> None:
|
1018
1064
|
"""
|
@@ -1060,6 +1106,57 @@ class AgentManager:
|
|
1060
1106
|
else:
|
1061
1107
|
logger.debug(f"Agent with ID {agent_id} successfully hard deleted")
|
1062
1108
|
|
1109
|
+
@trace_method
|
1110
|
+
@enforce_types
|
1111
|
+
async def delete_agent_async(self, agent_id: str, actor: PydanticUser) -> None:
|
1112
|
+
"""
|
1113
|
+
Deletes an agent and its associated relationships.
|
1114
|
+
Ensures proper permission checks and cascades where applicable.
|
1115
|
+
|
1116
|
+
Args:
|
1117
|
+
agent_id: ID of the agent to be deleted.
|
1118
|
+
actor: User performing the action.
|
1119
|
+
|
1120
|
+
Raises:
|
1121
|
+
NoResultFound: If agent doesn't exist
|
1122
|
+
"""
|
1123
|
+
async with db_registry.async_session() as session:
|
1124
|
+
# Retrieve the agent
|
1125
|
+
logger.debug(f"Hard deleting Agent with ID: {agent_id} with actor={actor}")
|
1126
|
+
agent = await AgentModel.read_async(db_session=session, identifier=agent_id, actor=actor)
|
1127
|
+
agents_to_delete = [agent]
|
1128
|
+
sleeptime_group_to_delete = None
|
1129
|
+
|
1130
|
+
# Delete sleeptime agent and group (TODO this is flimsy pls fix)
|
1131
|
+
if agent.multi_agent_group:
|
1132
|
+
participant_agent_ids = agent.multi_agent_group.agent_ids
|
1133
|
+
if agent.multi_agent_group.manager_type in {ManagerType.sleeptime, ManagerType.voice_sleeptime} and participant_agent_ids:
|
1134
|
+
for participant_agent_id in participant_agent_ids:
|
1135
|
+
try:
|
1136
|
+
sleeptime_agent = await AgentModel.read_async(db_session=session, identifier=participant_agent_id, actor=actor)
|
1137
|
+
agents_to_delete.append(sleeptime_agent)
|
1138
|
+
except NoResultFound:
|
1139
|
+
pass # agent already deleted
|
1140
|
+
sleeptime_agent_group = await GroupModel.read_async(
|
1141
|
+
db_session=session, identifier=agent.multi_agent_group.id, actor=actor
|
1142
|
+
)
|
1143
|
+
sleeptime_group_to_delete = sleeptime_agent_group
|
1144
|
+
|
1145
|
+
try:
|
1146
|
+
if sleeptime_group_to_delete is not None:
|
1147
|
+
await session.delete(sleeptime_group_to_delete)
|
1148
|
+
await session.commit()
|
1149
|
+
for agent in agents_to_delete:
|
1150
|
+
await session.delete(agent)
|
1151
|
+
await session.commit()
|
1152
|
+
except Exception as e:
|
1153
|
+
await session.rollback()
|
1154
|
+
logger.exception(f"Failed to hard delete Agent with ID {agent_id}")
|
1155
|
+
raise ValueError(f"Failed to hard delete Agent with ID {agent_id}: {e}")
|
1156
|
+
else:
|
1157
|
+
logger.debug(f"Agent with ID {agent_id} successfully hard deleted")
|
1158
|
+
|
1159
|
+
@trace_method
|
1063
1160
|
@enforce_types
|
1064
1161
|
def serialize(self, agent_id: str, actor: PydanticUser) -> AgentSchema:
|
1065
1162
|
with db_registry.session() as session:
|
@@ -1068,6 +1165,7 @@ class AgentManager:
|
|
1068
1165
|
data = schema.dump(agent)
|
1069
1166
|
return AgentSchema(**data)
|
1070
1167
|
|
1168
|
+
@trace_method
|
1071
1169
|
@enforce_types
|
1072
1170
|
def deserialize(
|
1073
1171
|
self,
|
@@ -1137,6 +1235,7 @@ class AgentManager:
|
|
1137
1235
|
# ======================================================================================================================
|
1138
1236
|
# Per Agent Environment Variable Management
|
1139
1237
|
# ======================================================================================================================
|
1238
|
+
@trace_method
|
1140
1239
|
@enforce_types
|
1141
1240
|
def _set_environment_variables(
|
1142
1241
|
self,
|
@@ -1192,6 +1291,7 @@ class AgentManager:
|
|
1192
1291
|
# Return the updated agent state
|
1193
1292
|
return agent.to_pydantic()
|
1194
1293
|
|
1294
|
+
@trace_method
|
1195
1295
|
@enforce_types
|
1196
1296
|
def list_groups(self, agent_id: str, actor: PydanticUser, manager_type: Optional[str] = None) -> List[PydanticGroup]:
|
1197
1297
|
with db_registry.session() as session:
|
@@ -1208,11 +1308,19 @@ class AgentManager:
|
|
1208
1308
|
# TODO: 2) These messages are ordered from oldest to newest
|
1209
1309
|
# TODO: This can be fixed by having an actual relationship in the ORM for message_ids
|
1210
1310
|
# TODO: This can also be made more efficient, instead of getting, setting, we can do it all in one db session for one query.
|
1311
|
+
@trace_method
|
1211
1312
|
@enforce_types
|
1212
1313
|
def get_in_context_messages(self, agent_id: str, actor: PydanticUser) -> List[PydanticMessage]:
|
1213
1314
|
message_ids = self.get_agent_by_id(agent_id=agent_id, actor=actor).message_ids
|
1214
1315
|
return self.message_manager.get_messages_by_ids(message_ids=message_ids, actor=actor)
|
1215
1316
|
|
1317
|
+
@trace_method
|
1318
|
+
@enforce_types
|
1319
|
+
async def get_in_context_messages_async(self, agent_id: str, actor: PydanticUser) -> List[PydanticMessage]:
|
1320
|
+
agent = await self.get_agent_by_id_async(agent_id=agent_id, include_relationships=[], actor=actor)
|
1321
|
+
return await self.message_manager.get_messages_by_ids_async(message_ids=agent.message_ids, actor=actor)
|
1322
|
+
|
1323
|
+
@trace_method
|
1216
1324
|
@enforce_types
|
1217
1325
|
async def get_in_context_messages_async(self, agent_id: str, actor: PydanticUser) -> List[PydanticMessage]:
|
1218
1326
|
agent = await self.get_agent_by_id_async(agent_id=agent_id, include_relationships=[], actor=actor)
|
@@ -1223,6 +1331,7 @@ class AgentManager:
|
|
1223
1331
|
message_ids = self.get_agent_by_id(agent_id=agent_id, actor=actor).message_ids
|
1224
1332
|
return self.message_manager.get_message_by_id(message_id=message_ids[0], actor=actor)
|
1225
1333
|
|
1334
|
+
@trace_method
|
1226
1335
|
@enforce_types
|
1227
1336
|
async def get_system_message_async(self, agent_id: str, actor: PydanticUser) -> PydanticMessage:
|
1228
1337
|
agent = await self.get_agent_by_id_async(agent_id=agent_id, include_relationships=[], actor=actor)
|
@@ -1231,6 +1340,7 @@ class AgentManager:
|
|
1231
1340
|
# TODO: This is duplicated below
|
1232
1341
|
# TODO: This is legacy code and should be cleaned up
|
1233
1342
|
# TODO: A lot of the memory "compilation" should be offset to a separate class
|
1343
|
+
@trace_method
|
1234
1344
|
@enforce_types
|
1235
1345
|
def rebuild_system_prompt(self, agent_id: str, actor: PydanticUser, force=False, update_timestamp=True) -> PydanticAgentState:
|
1236
1346
|
"""Rebuilds the system message with the latest memory object and any shared memory block updates
|
@@ -1296,6 +1406,75 @@ class AgentManager:
|
|
1296
1406
|
else:
|
1297
1407
|
return agent_state
|
1298
1408
|
|
1409
|
+
@trace_method
|
1410
|
+
@enforce_types
|
1411
|
+
async def rebuild_system_prompt_async(
|
1412
|
+
self, agent_id: str, actor: PydanticUser, force=False, update_timestamp=True
|
1413
|
+
) -> PydanticAgentState:
|
1414
|
+
"""Rebuilds the system message with the latest memory object and any shared memory block updates
|
1415
|
+
|
1416
|
+
Updates to core memory blocks should trigger a "rebuild", which itself will create a new message object
|
1417
|
+
|
1418
|
+
Updates to the memory header should *not* trigger a rebuild, since that will simply flood recall storage with excess messages
|
1419
|
+
"""
|
1420
|
+
agent_state = await self.get_agent_by_id_async(agent_id=agent_id, include_relationships=["memory"], actor=actor)
|
1421
|
+
|
1422
|
+
curr_system_message = await self.get_system_message_async(
|
1423
|
+
agent_id=agent_id, actor=actor
|
1424
|
+
) # this is the system + memory bank, not just the system prompt
|
1425
|
+
curr_system_message_openai = curr_system_message.to_openai_dict()
|
1426
|
+
|
1427
|
+
# note: we only update the system prompt if the core memory is changed
|
1428
|
+
# this means that the archival/recall memory statistics may be someout out of date
|
1429
|
+
curr_memory_str = agent_state.memory.compile()
|
1430
|
+
if curr_memory_str in curr_system_message_openai["content"] and not force:
|
1431
|
+
# NOTE: could this cause issues if a block is removed? (substring match would still work)
|
1432
|
+
logger.debug(
|
1433
|
+
f"Memory hasn't changed for agent id={agent_id} and actor=({actor.id}, {actor.name}), skipping system prompt rebuild"
|
1434
|
+
)
|
1435
|
+
return agent_state
|
1436
|
+
|
1437
|
+
# If the memory didn't update, we probably don't want to update the timestamp inside
|
1438
|
+
# For example, if we're doing a system prompt swap, this should probably be False
|
1439
|
+
if update_timestamp:
|
1440
|
+
memory_edit_timestamp = get_utc_time()
|
1441
|
+
else:
|
1442
|
+
# NOTE: a bit of a hack - we pull the timestamp from the message created_by
|
1443
|
+
memory_edit_timestamp = curr_system_message.created_at
|
1444
|
+
|
1445
|
+
num_messages = await self.message_manager.size_async(actor=actor, agent_id=agent_id)
|
1446
|
+
num_archival_memories = await self.passage_manager.size_async(actor=actor, agent_id=agent_id)
|
1447
|
+
|
1448
|
+
# update memory (TODO: potentially update recall/archival stats separately)
|
1449
|
+
new_system_message_str = compile_system_message(
|
1450
|
+
system_prompt=agent_state.system,
|
1451
|
+
in_context_memory=agent_state.memory,
|
1452
|
+
in_context_memory_last_edit=memory_edit_timestamp,
|
1453
|
+
recent_passages=self.list_passages(actor=actor, agent_id=agent_id, ascending=False, limit=10),
|
1454
|
+
previous_message_count=num_messages,
|
1455
|
+
archival_memory_size=num_archival_memories,
|
1456
|
+
)
|
1457
|
+
|
1458
|
+
diff = united_diff(curr_system_message_openai["content"], new_system_message_str)
|
1459
|
+
if len(diff) > 0: # there was a diff
|
1460
|
+
logger.debug(f"Rebuilding system with new memory...\nDiff:\n{diff}")
|
1461
|
+
|
1462
|
+
# Swap the system message out (only if there is a diff)
|
1463
|
+
message = PydanticMessage.dict_to_message(
|
1464
|
+
agent_id=agent_id,
|
1465
|
+
model=agent_state.llm_config.model,
|
1466
|
+
openai_message_dict={"role": "system", "content": new_system_message_str},
|
1467
|
+
)
|
1468
|
+
message = await self.message_manager.update_message_by_id_async(
|
1469
|
+
message_id=curr_system_message.id,
|
1470
|
+
message_update=MessageUpdate(**message.model_dump()),
|
1471
|
+
actor=actor,
|
1472
|
+
)
|
1473
|
+
return await self.set_in_context_messages_async(agent_id=agent_id, message_ids=agent_state.message_ids, actor=actor)
|
1474
|
+
else:
|
1475
|
+
return agent_state
|
1476
|
+
|
1477
|
+
@trace_method
|
1299
1478
|
@enforce_types
|
1300
1479
|
async def rebuild_system_prompt_async(
|
1301
1480
|
self, agent_id: str, actor: PydanticUser, force=False, update_timestamp=True
|
@@ -1367,6 +1546,12 @@ class AgentManager:
|
|
1367
1546
|
def set_in_context_messages(self, agent_id: str, message_ids: List[str], actor: PydanticUser) -> PydanticAgentState:
|
1368
1547
|
return self.update_agent(agent_id=agent_id, agent_update=UpdateAgent(message_ids=message_ids), actor=actor)
|
1369
1548
|
|
1549
|
+
@trace_method
|
1550
|
+
@enforce_types
|
1551
|
+
async def set_in_context_messages_async(self, agent_id: str, message_ids: List[str], actor: PydanticUser) -> PydanticAgentState:
|
1552
|
+
return await self.update_agent_async(agent_id=agent_id, agent_update=UpdateAgent(message_ids=message_ids), actor=actor)
|
1553
|
+
|
1554
|
+
@trace_method
|
1370
1555
|
@enforce_types
|
1371
1556
|
async def set_in_context_messages_async(self, agent_id: str, message_ids: List[str], actor: PydanticUser) -> PydanticAgentState:
|
1372
1557
|
return await self.update_agent_async(agent_id=agent_id, agent_update=UpdateAgent(message_ids=message_ids), actor=actor)
|
@@ -1377,6 +1562,7 @@ class AgentManager:
|
|
1377
1562
|
new_messages = [message_ids[0]] + message_ids[num:] # 0 is system message
|
1378
1563
|
return self.set_in_context_messages(agent_id=agent_id, message_ids=new_messages, actor=actor)
|
1379
1564
|
|
1565
|
+
@trace_method
|
1380
1566
|
@enforce_types
|
1381
1567
|
def trim_all_in_context_messages_except_system(self, agent_id: str, actor: PydanticUser) -> PydanticAgentState:
|
1382
1568
|
message_ids = self.get_agent_by_id(agent_id=agent_id, actor=actor).message_ids
|
@@ -1384,6 +1570,7 @@ class AgentManager:
|
|
1384
1570
|
new_messages = [message_ids[0]] # 0 is system message
|
1385
1571
|
return self.set_in_context_messages(agent_id=agent_id, message_ids=new_messages, actor=actor)
|
1386
1572
|
|
1573
|
+
@trace_method
|
1387
1574
|
@enforce_types
|
1388
1575
|
def prepend_to_in_context_messages(self, messages: List[PydanticMessage], agent_id: str, actor: PydanticUser) -> PydanticAgentState:
|
1389
1576
|
message_ids = self.get_agent_by_id(agent_id=agent_id, actor=actor).message_ids
|
@@ -1391,6 +1578,7 @@ class AgentManager:
|
|
1391
1578
|
message_ids = [message_ids[0]] + [m.id for m in new_messages] + message_ids[1:]
|
1392
1579
|
return self.set_in_context_messages(agent_id=agent_id, message_ids=message_ids, actor=actor)
|
1393
1580
|
|
1581
|
+
@trace_method
|
1394
1582
|
@enforce_types
|
1395
1583
|
def append_to_in_context_messages(self, messages: List[PydanticMessage], agent_id: str, actor: PydanticUser) -> PydanticAgentState:
|
1396
1584
|
messages = self.message_manager.create_many_messages(messages, actor=actor)
|
@@ -1398,6 +1586,7 @@ class AgentManager:
|
|
1398
1586
|
message_ids += [m.id for m in messages]
|
1399
1587
|
return self.set_in_context_messages(agent_id=agent_id, message_ids=message_ids, actor=actor)
|
1400
1588
|
|
1589
|
+
@trace_method
|
1401
1590
|
@enforce_types
|
1402
1591
|
def reset_messages(self, agent_id: str, actor: PydanticUser, add_default_initial_messages: bool = False) -> PydanticAgentState:
|
1403
1592
|
"""
|
@@ -1445,6 +1634,7 @@ class AgentManager:
|
|
1445
1634
|
return self.append_to_in_context_messages([system_message], agent_id=agent_state.id, actor=actor)
|
1446
1635
|
|
1447
1636
|
# TODO: I moved this from agent.py - replace all mentions of this with the agent_manager version
|
1637
|
+
@trace_method
|
1448
1638
|
@enforce_types
|
1449
1639
|
def update_memory_if_changed(self, agent_id: str, new_memory: Memory, actor: PydanticUser) -> PydanticAgentState:
|
1450
1640
|
"""
|
@@ -1482,6 +1672,7 @@ class AgentManager:
|
|
1482
1672
|
|
1483
1673
|
return agent_state
|
1484
1674
|
|
1675
|
+
@trace_method
|
1485
1676
|
@enforce_types
|
1486
1677
|
async def refresh_memory_async(self, agent_state: PydanticAgentState, actor: PydanticUser) -> PydanticAgentState:
|
1487
1678
|
block_ids = [b.id for b in agent_state.memory.blocks]
|
@@ -1496,6 +1687,7 @@ class AgentManager:
|
|
1496
1687
|
# ======================================================================================================================
|
1497
1688
|
# Source Management
|
1498
1689
|
# ======================================================================================================================
|
1690
|
+
@trace_method
|
1499
1691
|
@enforce_types
|
1500
1692
|
def attach_source(self, agent_id: str, source_id: str, actor: PydanticUser) -> PydanticAgentState:
|
1501
1693
|
"""
|
@@ -1540,6 +1732,7 @@ class AgentManager:
|
|
1540
1732
|
|
1541
1733
|
return agent.to_pydantic()
|
1542
1734
|
|
1735
|
+
@trace_method
|
1543
1736
|
@enforce_types
|
1544
1737
|
def append_system_message(self, agent_id: str, content: str, actor: PydanticUser):
|
1545
1738
|
|
@@ -1552,6 +1745,7 @@ class AgentManager:
|
|
1552
1745
|
# update agent in-context message IDs
|
1553
1746
|
self.append_to_in_context_messages(messages=[message], agent_id=agent_id, actor=actor)
|
1554
1747
|
|
1748
|
+
@trace_method
|
1555
1749
|
@enforce_types
|
1556
1750
|
def list_attached_sources(self, agent_id: str, actor: PydanticUser) -> List[PydanticSource]:
|
1557
1751
|
"""
|
@@ -1571,6 +1765,27 @@ class AgentManager:
|
|
1571
1765
|
# Use the lazy-loaded relationship to get sources
|
1572
1766
|
return [source.to_pydantic() for source in agent.sources]
|
1573
1767
|
|
1768
|
+
@trace_method
|
1769
|
+
@enforce_types
|
1770
|
+
async def list_attached_sources_async(self, agent_id: str, actor: PydanticUser) -> List[PydanticSource]:
|
1771
|
+
"""
|
1772
|
+
Lists all sources attached to an agent.
|
1773
|
+
|
1774
|
+
Args:
|
1775
|
+
agent_id: ID of the agent to list sources for
|
1776
|
+
actor: User performing the action
|
1777
|
+
|
1778
|
+
Returns:
|
1779
|
+
List[str]: List of source IDs attached to the agent
|
1780
|
+
"""
|
1781
|
+
async with db_registry.async_session() as session:
|
1782
|
+
# Verify agent exists and user has permission to access it
|
1783
|
+
agent = await AgentModel.read_async(db_session=session, identifier=agent_id, actor=actor)
|
1784
|
+
|
1785
|
+
# Use the lazy-loaded relationship to get sources
|
1786
|
+
return [source.to_pydantic() for source in agent.sources]
|
1787
|
+
|
1788
|
+
@trace_method
|
1574
1789
|
@enforce_types
|
1575
1790
|
async def list_attached_sources_async(self, agent_id: str, actor: PydanticUser) -> List[PydanticSource]:
|
1576
1791
|
"""
|
@@ -1620,6 +1835,7 @@ class AgentManager:
|
|
1620
1835
|
# ======================================================================================================================
|
1621
1836
|
# Block management
|
1622
1837
|
# ======================================================================================================================
|
1838
|
+
@trace_method
|
1623
1839
|
@enforce_types
|
1624
1840
|
def get_block_with_label(
|
1625
1841
|
self,
|
@@ -1635,6 +1851,51 @@ class AgentManager:
|
|
1635
1851
|
return block.to_pydantic()
|
1636
1852
|
raise NoResultFound(f"No block with label '{block_label}' found for agent '{agent_id}'")
|
1637
1853
|
|
1854
|
+
@trace_method
|
1855
|
+
@enforce_types
|
1856
|
+
async def get_block_with_label_async(
|
1857
|
+
self,
|
1858
|
+
agent_id: str,
|
1859
|
+
block_label: str,
|
1860
|
+
actor: PydanticUser,
|
1861
|
+
) -> PydanticBlock:
|
1862
|
+
"""Gets a block attached to an agent by its label."""
|
1863
|
+
async with db_registry.async_session() as session:
|
1864
|
+
agent = await AgentModel.read_async(db_session=session, identifier=agent_id, actor=actor)
|
1865
|
+
for block in agent.core_memory:
|
1866
|
+
if block.label == block_label:
|
1867
|
+
return block.to_pydantic()
|
1868
|
+
raise NoResultFound(f"No block with label '{block_label}' found for agent '{agent_id}'")
|
1869
|
+
|
1870
|
+
@trace_method
|
1871
|
+
@enforce_types
|
1872
|
+
async def modify_block_by_label_async(
|
1873
|
+
self,
|
1874
|
+
agent_id: str,
|
1875
|
+
block_label: str,
|
1876
|
+
block_update: BlockUpdate,
|
1877
|
+
actor: PydanticUser,
|
1878
|
+
) -> PydanticBlock:
|
1879
|
+
"""Gets a block attached to an agent by its label."""
|
1880
|
+
async with db_registry.async_session() as session:
|
1881
|
+
block = None
|
1882
|
+
agent = await AgentModel.read_async(db_session=session, identifier=agent_id, actor=actor)
|
1883
|
+
for block in agent.core_memory:
|
1884
|
+
if block.label == block_label:
|
1885
|
+
block = block
|
1886
|
+
break
|
1887
|
+
if not block:
|
1888
|
+
raise NoResultFound(f"No block with label '{block_label}' found for agent '{agent_id}'")
|
1889
|
+
|
1890
|
+
update_data = block_update.model_dump(to_orm=True, exclude_unset=True, exclude_none=True)
|
1891
|
+
|
1892
|
+
for key, value in update_data.items():
|
1893
|
+
setattr(block, key, value)
|
1894
|
+
|
1895
|
+
await block.update_async(session, actor=actor)
|
1896
|
+
return block.to_pydantic()
|
1897
|
+
|
1898
|
+
@trace_method
|
1638
1899
|
@enforce_types
|
1639
1900
|
async def modify_block_by_label_async(
|
1640
1901
|
self,
|
@@ -1686,6 +1947,7 @@ class AgentManager:
|
|
1686
1947
|
agent.update(session, actor=actor)
|
1687
1948
|
return agent.to_pydantic()
|
1688
1949
|
|
1950
|
+
@trace_method
|
1689
1951
|
@enforce_types
|
1690
1952
|
def attach_block(self, agent_id: str, block_id: str, actor: PydanticUser) -> PydanticAgentState:
|
1691
1953
|
"""Attaches a block to an agent."""
|
@@ -1697,6 +1959,19 @@ class AgentManager:
|
|
1697
1959
|
agent.update(session, actor=actor)
|
1698
1960
|
return agent.to_pydantic()
|
1699
1961
|
|
1962
|
+
@trace_method
|
1963
|
+
@enforce_types
|
1964
|
+
async def attach_block_async(self, agent_id: str, block_id: str, actor: PydanticUser) -> PydanticAgentState:
|
1965
|
+
"""Attaches a block to an agent."""
|
1966
|
+
async with db_registry.async_session() as session:
|
1967
|
+
agent = await AgentModel.read_async(db_session=session, identifier=agent_id, actor=actor)
|
1968
|
+
block = await BlockModel.read_async(db_session=session, identifier=block_id, actor=actor)
|
1969
|
+
|
1970
|
+
agent.core_memory.append(block)
|
1971
|
+
await agent.update_async(session, actor=actor)
|
1972
|
+
return await agent.to_pydantic_async()
|
1973
|
+
|
1974
|
+
@trace_method
|
1700
1975
|
@enforce_types
|
1701
1976
|
def detach_block(
|
1702
1977
|
self,
|
@@ -1717,6 +1992,28 @@ class AgentManager:
|
|
1717
1992
|
agent.update(session, actor=actor)
|
1718
1993
|
return agent.to_pydantic()
|
1719
1994
|
|
1995
|
+
@trace_method
|
1996
|
+
@enforce_types
|
1997
|
+
async def detach_block_async(
|
1998
|
+
self,
|
1999
|
+
agent_id: str,
|
2000
|
+
block_id: str,
|
2001
|
+
actor: PydanticUser,
|
2002
|
+
) -> PydanticAgentState:
|
2003
|
+
"""Detaches a block from an agent."""
|
2004
|
+
async with db_registry.async_session() as session:
|
2005
|
+
agent = await AgentModel.read_async(db_session=session, identifier=agent_id, actor=actor)
|
2006
|
+
original_length = len(agent.core_memory)
|
2007
|
+
|
2008
|
+
agent.core_memory = [b for b in agent.core_memory if b.id != block_id]
|
2009
|
+
|
2010
|
+
if len(agent.core_memory) == original_length:
|
2011
|
+
raise NoResultFound(f"No block with id '{block_id}' found for agent '{agent_id}' with actor id: '{actor.id}'")
|
2012
|
+
|
2013
|
+
await agent.update_async(session, actor=actor)
|
2014
|
+
return await agent.to_pydantic_async()
|
2015
|
+
|
2016
|
+
@trace_method
|
1720
2017
|
@enforce_types
|
1721
2018
|
def detach_block_with_label(
|
1722
2019
|
self,
|
@@ -1769,105 +2066,121 @@ class AgentManager:
|
|
1769
2066
|
embedded_text = np.array(embedded_text)
|
1770
2067
|
embedded_text = np.pad(embedded_text, (0, MAX_EMBEDDING_DIM - embedded_text.shape[0]), mode="constant").tolist()
|
1771
2068
|
|
1772
|
-
with
|
1773
|
-
|
1774
|
-
|
1775
|
-
if not agent_only: # Include source passages
|
1776
|
-
if agent_id is not None:
|
1777
|
-
source_passages = (
|
1778
|
-
select(SourcePassage, literal(None).label("agent_id"))
|
1779
|
-
.join(SourcesAgents, SourcesAgents.source_id == SourcePassage.source_id)
|
1780
|
-
.where(SourcesAgents.agent_id == agent_id)
|
1781
|
-
.where(SourcePassage.organization_id == actor.organization_id)
|
1782
|
-
)
|
1783
|
-
else:
|
1784
|
-
source_passages = select(SourcePassage, literal(None).label("agent_id")).where(
|
1785
|
-
SourcePassage.organization_id == actor.organization_id
|
1786
|
-
)
|
1787
|
-
|
1788
|
-
if source_id:
|
1789
|
-
source_passages = source_passages.where(SourcePassage.source_id == source_id)
|
1790
|
-
if file_id:
|
1791
|
-
source_passages = source_passages.where(SourcePassage.file_id == file_id)
|
1792
|
-
|
1793
|
-
# Add agent passages query
|
1794
|
-
agent_passages = None
|
2069
|
+
# Start with base query for source passages
|
2070
|
+
source_passages = None
|
2071
|
+
if not agent_only: # Include source passages
|
1795
2072
|
if agent_id is not None:
|
1796
|
-
|
1797
|
-
select(
|
1798
|
-
|
1799
|
-
|
1800
|
-
|
1801
|
-
AgentPassage.metadata_,
|
1802
|
-
AgentPassage.embedding,
|
1803
|
-
AgentPassage.created_at,
|
1804
|
-
AgentPassage.updated_at,
|
1805
|
-
AgentPassage.is_deleted,
|
1806
|
-
AgentPassage._created_by_id,
|
1807
|
-
AgentPassage._last_updated_by_id,
|
1808
|
-
AgentPassage.organization_id,
|
1809
|
-
literal(None).label("file_id"),
|
1810
|
-
literal(None).label("source_id"),
|
1811
|
-
AgentPassage.agent_id,
|
1812
|
-
)
|
1813
|
-
.where(AgentPassage.agent_id == agent_id)
|
1814
|
-
.where(AgentPassage.organization_id == actor.organization_id)
|
2073
|
+
source_passages = (
|
2074
|
+
select(SourcePassage, literal(None).label("agent_id"))
|
2075
|
+
.join(SourcesAgents, SourcesAgents.source_id == SourcePassage.source_id)
|
2076
|
+
.where(SourcesAgents.agent_id == agent_id)
|
2077
|
+
.where(SourcePassage.organization_id == actor.organization_id)
|
1815
2078
|
)
|
1816
|
-
|
1817
|
-
# Combine queries
|
1818
|
-
if source_passages is not None and agent_passages is not None:
|
1819
|
-
combined_query = union_all(source_passages, agent_passages).cte("combined_passages")
|
1820
|
-
elif agent_passages is not None:
|
1821
|
-
combined_query = agent_passages.cte("combined_passages")
|
1822
|
-
elif source_passages is not None:
|
1823
|
-
combined_query = source_passages.cte("combined_passages")
|
1824
2079
|
else:
|
1825
|
-
|
1826
|
-
|
1827
|
-
|
1828
|
-
main_query = select(combined_query)
|
2080
|
+
source_passages = select(SourcePassage, literal(None).label("agent_id")).where(
|
2081
|
+
SourcePassage.organization_id == actor.organization_id
|
2082
|
+
)
|
1829
2083
|
|
1830
|
-
# Apply filters
|
1831
|
-
if start_date:
|
1832
|
-
main_query = main_query.where(combined_query.c.created_at >= start_date)
|
1833
|
-
if end_date:
|
1834
|
-
main_query = main_query.where(combined_query.c.created_at <= end_date)
|
1835
2084
|
if source_id:
|
1836
|
-
|
2085
|
+
source_passages = source_passages.where(SourcePassage.source_id == source_id)
|
1837
2086
|
if file_id:
|
1838
|
-
|
2087
|
+
source_passages = source_passages.where(SourcePassage.file_id == file_id)
|
2088
|
+
|
2089
|
+
# Add agent passages query
|
2090
|
+
agent_passages = None
|
2091
|
+
if agent_id is not None:
|
2092
|
+
agent_passages = (
|
2093
|
+
select(
|
2094
|
+
AgentPassage.id,
|
2095
|
+
AgentPassage.text,
|
2096
|
+
AgentPassage.embedding_config,
|
2097
|
+
AgentPassage.metadata_,
|
2098
|
+
AgentPassage.embedding,
|
2099
|
+
AgentPassage.created_at,
|
2100
|
+
AgentPassage.updated_at,
|
2101
|
+
AgentPassage.is_deleted,
|
2102
|
+
AgentPassage._created_by_id,
|
2103
|
+
AgentPassage._last_updated_by_id,
|
2104
|
+
AgentPassage.organization_id,
|
2105
|
+
literal(None).label("file_id"),
|
2106
|
+
literal(None).label("source_id"),
|
2107
|
+
AgentPassage.agent_id,
|
2108
|
+
)
|
2109
|
+
.where(AgentPassage.agent_id == agent_id)
|
2110
|
+
.where(AgentPassage.organization_id == actor.organization_id)
|
2111
|
+
)
|
1839
2112
|
|
1840
|
-
|
1841
|
-
|
1842
|
-
|
1843
|
-
|
1844
|
-
|
1845
|
-
|
1846
|
-
|
1847
|
-
|
1848
|
-
|
1849
|
-
|
1850
|
-
|
1851
|
-
|
1852
|
-
|
2113
|
+
# Combine queries
|
2114
|
+
if source_passages is not None and agent_passages is not None:
|
2115
|
+
combined_query = union_all(source_passages, agent_passages).cte("combined_passages")
|
2116
|
+
elif agent_passages is not None:
|
2117
|
+
combined_query = agent_passages.cte("combined_passages")
|
2118
|
+
elif source_passages is not None:
|
2119
|
+
combined_query = source_passages.cte("combined_passages")
|
2120
|
+
else:
|
2121
|
+
raise ValueError("No passages found")
|
2122
|
+
|
2123
|
+
# Build main query from combined CTE
|
2124
|
+
main_query = select(combined_query)
|
2125
|
+
|
2126
|
+
# Apply filters
|
2127
|
+
if start_date:
|
2128
|
+
main_query = main_query.where(combined_query.c.created_at >= start_date)
|
2129
|
+
if end_date:
|
2130
|
+
main_query = main_query.where(combined_query.c.created_at <= end_date)
|
2131
|
+
if source_id:
|
2132
|
+
main_query = main_query.where(combined_query.c.source_id == source_id)
|
2133
|
+
if file_id:
|
2134
|
+
main_query = main_query.where(combined_query.c.file_id == file_id)
|
2135
|
+
|
2136
|
+
# Vector search
|
2137
|
+
if embedded_text:
|
2138
|
+
if settings.letta_pg_uri_no_default:
|
2139
|
+
# PostgreSQL with pgvector
|
2140
|
+
main_query = main_query.order_by(combined_query.c.embedding.cosine_distance(embedded_text).asc())
|
1853
2141
|
else:
|
1854
|
-
|
1855
|
-
|
2142
|
+
# SQLite with custom vector type
|
2143
|
+
query_embedding_binary = adapt_array(embedded_text)
|
2144
|
+
main_query = main_query.order_by(
|
2145
|
+
func.cosine_distance(combined_query.c.embedding, query_embedding_binary).asc(),
|
2146
|
+
combined_query.c.created_at.asc() if ascending else combined_query.c.created_at.desc(),
|
2147
|
+
combined_query.c.id.asc(),
|
2148
|
+
)
|
2149
|
+
else:
|
2150
|
+
if query_text:
|
2151
|
+
main_query = main_query.where(func.lower(combined_query.c.text).contains(func.lower(query_text)))
|
1856
2152
|
|
1857
|
-
|
1858
|
-
|
1859
|
-
|
1860
|
-
|
1861
|
-
|
1862
|
-
|
2153
|
+
# Handle pagination
|
2154
|
+
if before or after:
|
2155
|
+
# Create reference CTEs
|
2156
|
+
if before:
|
2157
|
+
before_ref = select(combined_query.c.created_at, combined_query.c.id).where(combined_query.c.id == before).cte("before_ref")
|
2158
|
+
if after:
|
2159
|
+
after_ref = select(combined_query.c.created_at, combined_query.c.id).where(combined_query.c.id == after).cte("after_ref")
|
2160
|
+
|
2161
|
+
if before and after:
|
2162
|
+
# Window-based query (get records between before and after)
|
2163
|
+
main_query = main_query.where(
|
2164
|
+
or_(
|
2165
|
+
combined_query.c.created_at < select(before_ref.c.created_at).scalar_subquery(),
|
2166
|
+
and_(
|
2167
|
+
combined_query.c.created_at == select(before_ref.c.created_at).scalar_subquery(),
|
2168
|
+
combined_query.c.id < select(before_ref.c.id).scalar_subquery(),
|
2169
|
+
),
|
1863
2170
|
)
|
1864
|
-
|
1865
|
-
|
1866
|
-
|
2171
|
+
)
|
2172
|
+
main_query = main_query.where(
|
2173
|
+
or_(
|
2174
|
+
combined_query.c.created_at > select(after_ref.c.created_at).scalar_subquery(),
|
2175
|
+
and_(
|
2176
|
+
combined_query.c.created_at == select(after_ref.c.created_at).scalar_subquery(),
|
2177
|
+
combined_query.c.id > select(after_ref.c.id).scalar_subquery(),
|
2178
|
+
),
|
1867
2179
|
)
|
1868
|
-
|
1869
|
-
|
1870
|
-
|
2180
|
+
)
|
2181
|
+
else:
|
2182
|
+
# Pure pagination (only before or only after)
|
2183
|
+
if before:
|
1871
2184
|
main_query = main_query.where(
|
1872
2185
|
or_(
|
1873
2186
|
combined_query.c.created_at < select(before_ref.c.created_at).scalar_subquery(),
|
@@ -1877,6 +2190,7 @@ class AgentManager:
|
|
1877
2190
|
),
|
1878
2191
|
)
|
1879
2192
|
)
|
2193
|
+
if after:
|
1880
2194
|
main_query = main_query.where(
|
1881
2195
|
or_(
|
1882
2196
|
combined_query.c.created_at > select(after_ref.c.created_at).scalar_subquery(),
|
@@ -1886,44 +2200,23 @@ class AgentManager:
|
|
1886
2200
|
),
|
1887
2201
|
)
|
1888
2202
|
)
|
1889
|
-
else:
|
1890
|
-
# Pure pagination (only before or only after)
|
1891
|
-
if before:
|
1892
|
-
main_query = main_query.where(
|
1893
|
-
or_(
|
1894
|
-
combined_query.c.created_at < select(before_ref.c.created_at).scalar_subquery(),
|
1895
|
-
and_(
|
1896
|
-
combined_query.c.created_at == select(before_ref.c.created_at).scalar_subquery(),
|
1897
|
-
combined_query.c.id < select(before_ref.c.id).scalar_subquery(),
|
1898
|
-
),
|
1899
|
-
)
|
1900
|
-
)
|
1901
|
-
if after:
|
1902
|
-
main_query = main_query.where(
|
1903
|
-
or_(
|
1904
|
-
combined_query.c.created_at > select(after_ref.c.created_at).scalar_subquery(),
|
1905
|
-
and_(
|
1906
|
-
combined_query.c.created_at == select(after_ref.c.created_at).scalar_subquery(),
|
1907
|
-
combined_query.c.id > select(after_ref.c.id).scalar_subquery(),
|
1908
|
-
),
|
1909
|
-
)
|
1910
|
-
)
|
1911
2203
|
|
1912
|
-
|
1913
|
-
|
1914
|
-
|
1915
|
-
|
1916
|
-
|
1917
|
-
|
1918
|
-
|
1919
|
-
|
1920
|
-
|
1921
|
-
|
1922
|
-
|
1923
|
-
|
2204
|
+
# Add ordering if not already ordered by similarity
|
2205
|
+
if not embed_query:
|
2206
|
+
if ascending:
|
2207
|
+
main_query = main_query.order_by(
|
2208
|
+
combined_query.c.created_at.asc(),
|
2209
|
+
combined_query.c.id.asc(),
|
2210
|
+
)
|
2211
|
+
else:
|
2212
|
+
main_query = main_query.order_by(
|
2213
|
+
combined_query.c.created_at.desc(),
|
2214
|
+
combined_query.c.id.asc(),
|
2215
|
+
)
|
1924
2216
|
|
1925
2217
|
return main_query
|
1926
2218
|
|
2219
|
+
@trace_method
|
1927
2220
|
@enforce_types
|
1928
2221
|
def list_passages(
|
1929
2222
|
self,
|
@@ -1983,6 +2276,67 @@ class AgentManager:
|
|
1983
2276
|
|
1984
2277
|
return [p.to_pydantic() for p in passages]
|
1985
2278
|
|
2279
|
+
@trace_method
|
2280
|
+
@enforce_types
|
2281
|
+
async def list_passages_async(
|
2282
|
+
self,
|
2283
|
+
actor: PydanticUser,
|
2284
|
+
agent_id: Optional[str] = None,
|
2285
|
+
file_id: Optional[str] = None,
|
2286
|
+
limit: Optional[int] = 50,
|
2287
|
+
query_text: Optional[str] = None,
|
2288
|
+
start_date: Optional[datetime] = None,
|
2289
|
+
end_date: Optional[datetime] = None,
|
2290
|
+
before: Optional[str] = None,
|
2291
|
+
after: Optional[str] = None,
|
2292
|
+
source_id: Optional[str] = None,
|
2293
|
+
embed_query: bool = False,
|
2294
|
+
ascending: bool = True,
|
2295
|
+
embedding_config: Optional[EmbeddingConfig] = None,
|
2296
|
+
agent_only: bool = False,
|
2297
|
+
) -> List[PydanticPassage]:
|
2298
|
+
"""Lists all passages attached to an agent."""
|
2299
|
+
async with db_registry.async_session() as session:
|
2300
|
+
main_query = self._build_passage_query(
|
2301
|
+
actor=actor,
|
2302
|
+
agent_id=agent_id,
|
2303
|
+
file_id=file_id,
|
2304
|
+
query_text=query_text,
|
2305
|
+
start_date=start_date,
|
2306
|
+
end_date=end_date,
|
2307
|
+
before=before,
|
2308
|
+
after=after,
|
2309
|
+
source_id=source_id,
|
2310
|
+
embed_query=embed_query,
|
2311
|
+
ascending=ascending,
|
2312
|
+
embedding_config=embedding_config,
|
2313
|
+
agent_only=agent_only,
|
2314
|
+
)
|
2315
|
+
|
2316
|
+
# Add limit
|
2317
|
+
if limit:
|
2318
|
+
main_query = main_query.limit(limit)
|
2319
|
+
|
2320
|
+
# Execute query
|
2321
|
+
result = await session.execute(main_query)
|
2322
|
+
|
2323
|
+
passages = []
|
2324
|
+
for row in result:
|
2325
|
+
data = dict(row._mapping)
|
2326
|
+
if data["agent_id"] is not None:
|
2327
|
+
# This is an AgentPassage - remove source fields
|
2328
|
+
data.pop("source_id", None)
|
2329
|
+
data.pop("file_id", None)
|
2330
|
+
passage = AgentPassage(**data)
|
2331
|
+
else:
|
2332
|
+
# This is a SourcePassage - remove agent field
|
2333
|
+
data.pop("agent_id", None)
|
2334
|
+
passage = SourcePassage(**data)
|
2335
|
+
passages.append(passage)
|
2336
|
+
|
2337
|
+
return [p.to_pydantic() for p in passages]
|
2338
|
+
|
2339
|
+
@trace_method
|
1986
2340
|
@enforce_types
|
1987
2341
|
async def list_passages_async(
|
1988
2342
|
self,
|
@@ -2081,9 +2435,48 @@ class AgentManager:
|
|
2081
2435
|
count_query = select(func.count()).select_from(main_query.subquery())
|
2082
2436
|
return session.scalar(count_query) or 0
|
2083
2437
|
|
2438
|
+
@enforce_types
|
2439
|
+
async def passage_size_async(
|
2440
|
+
self,
|
2441
|
+
actor: PydanticUser,
|
2442
|
+
agent_id: Optional[str] = None,
|
2443
|
+
file_id: Optional[str] = None,
|
2444
|
+
query_text: Optional[str] = None,
|
2445
|
+
start_date: Optional[datetime] = None,
|
2446
|
+
end_date: Optional[datetime] = None,
|
2447
|
+
before: Optional[str] = None,
|
2448
|
+
after: Optional[str] = None,
|
2449
|
+
source_id: Optional[str] = None,
|
2450
|
+
embed_query: bool = False,
|
2451
|
+
ascending: bool = True,
|
2452
|
+
embedding_config: Optional[EmbeddingConfig] = None,
|
2453
|
+
agent_only: bool = False,
|
2454
|
+
) -> int:
|
2455
|
+
async with db_registry.async_session() as session:
|
2456
|
+
main_query = self._build_passage_query(
|
2457
|
+
actor=actor,
|
2458
|
+
agent_id=agent_id,
|
2459
|
+
file_id=file_id,
|
2460
|
+
query_text=query_text,
|
2461
|
+
start_date=start_date,
|
2462
|
+
end_date=end_date,
|
2463
|
+
before=before,
|
2464
|
+
after=after,
|
2465
|
+
source_id=source_id,
|
2466
|
+
embed_query=embed_query,
|
2467
|
+
ascending=ascending,
|
2468
|
+
embedding_config=embedding_config,
|
2469
|
+
agent_only=agent_only,
|
2470
|
+
)
|
2471
|
+
|
2472
|
+
# Convert to count query
|
2473
|
+
count_query = select(func.count()).select_from(main_query.subquery())
|
2474
|
+
return (await session.execute(count_query)).scalar() or 0
|
2475
|
+
|
2084
2476
|
# ======================================================================================================================
|
2085
2477
|
# Tool Management
|
2086
2478
|
# ======================================================================================================================
|
2479
|
+
@trace_method
|
2087
2480
|
@enforce_types
|
2088
2481
|
def attach_tool(self, agent_id: str, tool_id: str, actor: PydanticUser) -> PydanticAgentState:
|
2089
2482
|
"""
|
@@ -2119,6 +2512,7 @@ class AgentManager:
|
|
2119
2512
|
agent.update(session, actor=actor)
|
2120
2513
|
return agent.to_pydantic()
|
2121
2514
|
|
2515
|
+
@trace_method
|
2122
2516
|
@enforce_types
|
2123
2517
|
def detach_tool(self, agent_id: str, tool_id: str, actor: PydanticUser) -> PydanticAgentState:
|
2124
2518
|
"""
|
@@ -2152,6 +2546,7 @@ class AgentManager:
|
|
2152
2546
|
agent.update(session, actor=actor)
|
2153
2547
|
return agent.to_pydantic()
|
2154
2548
|
|
2549
|
+
@trace_method
|
2155
2550
|
@enforce_types
|
2156
2551
|
def list_attached_tools(self, agent_id: str, actor: PydanticUser) -> List[PydanticTool]:
|
2157
2552
|
"""
|
@@ -2171,6 +2566,7 @@ class AgentManager:
|
|
2171
2566
|
# ======================================================================================================================
|
2172
2567
|
# Tag Management
|
2173
2568
|
# ======================================================================================================================
|
2569
|
+
@trace_method
|
2174
2570
|
@enforce_types
|
2175
2571
|
def list_tags(
|
2176
2572
|
self, actor: PydanticUser, after: Optional[str] = None, limit: Optional[int] = 50, query_text: Optional[str] = None
|
@@ -2205,6 +2601,7 @@ class AgentManager:
|
|
2205
2601
|
results = [tag[0] for tag in query.all()]
|
2206
2602
|
return results
|
2207
2603
|
|
2604
|
+
@trace_method
|
2208
2605
|
@enforce_types
|
2209
2606
|
async def list_tags_async(
|
2210
2607
|
self, actor: PydanticUser, after: Optional[str] = None, limit: Optional[int] = 50, query_text: Optional[str] = None
|
@@ -2243,3 +2640,279 @@ class AgentManager:
|
|
2243
2640
|
# Extract the tag values from the result
|
2244
2641
|
results = [row[0] for row in result.all()]
|
2245
2642
|
return results
|
2643
|
+
|
2644
|
+
async def get_context_window(self, agent_id: str, actor: PydanticUser) -> ContextWindowOverview:
|
2645
|
+
if os.getenv("LETTA_ENVIRONMENT") == "PRODUCTION":
|
2646
|
+
return await self.get_context_window_from_anthropic_async(agent_id=agent_id, actor=actor)
|
2647
|
+
return await self.get_context_window_from_tiktoken_async(agent_id=agent_id, actor=actor)
|
2648
|
+
|
2649
|
+
async def get_context_window_from_anthropic_async(self, agent_id: str, actor: PydanticUser) -> ContextWindowOverview:
|
2650
|
+
"""Get the context window of the agent"""
|
2651
|
+
agent_state = await self.get_agent_by_id_async(agent_id=agent_id, actor=actor)
|
2652
|
+
anthropic_client = LLMClient.create(provider_type=ProviderType.anthropic, actor=actor)
|
2653
|
+
model = agent_state.llm_config.model if agent_state.llm_config.model_endpoint_type == "anthropic" else None
|
2654
|
+
|
2655
|
+
# Grab the in-context messages
|
2656
|
+
# conversion of messages to anthropic dict format, which is passed to the token counter
|
2657
|
+
(in_context_messages, passage_manager_size, message_manager_size) = await asyncio.gather(
|
2658
|
+
self.get_in_context_messages_async(agent_id=agent_id, actor=actor),
|
2659
|
+
self.passage_manager.size_async(actor=actor, agent_id=agent_id),
|
2660
|
+
self.message_manager.size_async(actor=actor, agent_id=agent_id),
|
2661
|
+
)
|
2662
|
+
in_context_messages_anthropic = [m.to_anthropic_dict() for m in in_context_messages]
|
2663
|
+
|
2664
|
+
# Extract system, memory and external summary
|
2665
|
+
if (
|
2666
|
+
len(in_context_messages) > 0
|
2667
|
+
and in_context_messages[0].role == MessageRole.system
|
2668
|
+
and in_context_messages[0].content
|
2669
|
+
and len(in_context_messages[0].content) == 1
|
2670
|
+
and isinstance(in_context_messages[0].content[0], TextContent)
|
2671
|
+
):
|
2672
|
+
system_message = in_context_messages[0].content[0].text
|
2673
|
+
|
2674
|
+
external_memory_marker_pos = system_message.find("###")
|
2675
|
+
core_memory_marker_pos = system_message.find("<", external_memory_marker_pos)
|
2676
|
+
if external_memory_marker_pos != -1 and core_memory_marker_pos != -1:
|
2677
|
+
system_prompt = system_message[:external_memory_marker_pos].strip()
|
2678
|
+
external_memory_summary = system_message[external_memory_marker_pos:core_memory_marker_pos].strip()
|
2679
|
+
core_memory = system_message[core_memory_marker_pos:].strip()
|
2680
|
+
else:
|
2681
|
+
# if no markers found, put everything in system message
|
2682
|
+
system_prompt = system_message
|
2683
|
+
external_memory_summary = None
|
2684
|
+
core_memory = None
|
2685
|
+
else:
|
2686
|
+
# if no system message, fall back on agent's system prompt
|
2687
|
+
system_prompt = agent_state.system
|
2688
|
+
external_memory_summary = None
|
2689
|
+
core_memory = None
|
2690
|
+
|
2691
|
+
num_tokens_system_coroutine = anthropic_client.count_tokens(model=model, messages=[{"role": "user", "content": system_prompt}])
|
2692
|
+
num_tokens_core_memory_coroutine = (
|
2693
|
+
anthropic_client.count_tokens(model=model, messages=[{"role": "user", "content": core_memory}])
|
2694
|
+
if core_memory
|
2695
|
+
else asyncio.sleep(0, result=0)
|
2696
|
+
)
|
2697
|
+
num_tokens_external_memory_summary_coroutine = (
|
2698
|
+
anthropic_client.count_tokens(model=model, messages=[{"role": "user", "content": external_memory_summary}])
|
2699
|
+
if external_memory_summary
|
2700
|
+
else asyncio.sleep(0, result=0)
|
2701
|
+
)
|
2702
|
+
|
2703
|
+
# Check if there's a summary message in the message queue
|
2704
|
+
if (
|
2705
|
+
len(in_context_messages) > 1
|
2706
|
+
and in_context_messages[1].role == MessageRole.user
|
2707
|
+
and in_context_messages[1].content
|
2708
|
+
and len(in_context_messages[1].content) == 1
|
2709
|
+
and isinstance(in_context_messages[1].content[0], TextContent)
|
2710
|
+
# TODO remove hardcoding
|
2711
|
+
and "The following is a summary of the previous " in in_context_messages[1].content[0].text
|
2712
|
+
):
|
2713
|
+
# Summary message exists
|
2714
|
+
text_content = in_context_messages[1].content[0].text
|
2715
|
+
assert text_content is not None
|
2716
|
+
summary_memory = text_content
|
2717
|
+
num_tokens_summary_memory_coroutine = anthropic_client.count_tokens(
|
2718
|
+
model=model, messages=[{"role": "user", "content": summary_memory}]
|
2719
|
+
)
|
2720
|
+
# with a summary message, the real messages start at index 2
|
2721
|
+
num_tokens_messages_coroutine = (
|
2722
|
+
anthropic_client.count_tokens(model=model, messages=in_context_messages_anthropic[2:])
|
2723
|
+
if len(in_context_messages_anthropic) > 2
|
2724
|
+
else asyncio.sleep(0, result=0)
|
2725
|
+
)
|
2726
|
+
|
2727
|
+
else:
|
2728
|
+
summary_memory = None
|
2729
|
+
num_tokens_summary_memory_coroutine = asyncio.sleep(0, result=0)
|
2730
|
+
# with no summary message, the real messages start at index 1
|
2731
|
+
num_tokens_messages_coroutine = (
|
2732
|
+
anthropic_client.count_tokens(model=model, messages=in_context_messages_anthropic[1:])
|
2733
|
+
if len(in_context_messages_anthropic) > 1
|
2734
|
+
else asyncio.sleep(0, result=0)
|
2735
|
+
)
|
2736
|
+
|
2737
|
+
# tokens taken up by function definitions
|
2738
|
+
if agent_state.tools and len(agent_state.tools) > 0:
|
2739
|
+
available_functions_definitions = [OpenAITool(type="function", function=f.json_schema) for f in agent_state.tools]
|
2740
|
+
num_tokens_available_functions_definitions_coroutine = anthropic_client.count_tokens(
|
2741
|
+
model=model,
|
2742
|
+
tools=available_functions_definitions,
|
2743
|
+
)
|
2744
|
+
else:
|
2745
|
+
available_functions_definitions = []
|
2746
|
+
num_tokens_available_functions_definitions_coroutine = asyncio.sleep(0, result=0)
|
2747
|
+
|
2748
|
+
(
|
2749
|
+
num_tokens_system,
|
2750
|
+
num_tokens_core_memory,
|
2751
|
+
num_tokens_external_memory_summary,
|
2752
|
+
num_tokens_summary_memory,
|
2753
|
+
num_tokens_messages,
|
2754
|
+
num_tokens_available_functions_definitions,
|
2755
|
+
) = await asyncio.gather(
|
2756
|
+
num_tokens_system_coroutine,
|
2757
|
+
num_tokens_core_memory_coroutine,
|
2758
|
+
num_tokens_external_memory_summary_coroutine,
|
2759
|
+
num_tokens_summary_memory_coroutine,
|
2760
|
+
num_tokens_messages_coroutine,
|
2761
|
+
num_tokens_available_functions_definitions_coroutine,
|
2762
|
+
)
|
2763
|
+
|
2764
|
+
num_tokens_used_total = (
|
2765
|
+
num_tokens_system # system prompt
|
2766
|
+
+ num_tokens_available_functions_definitions # function definitions
|
2767
|
+
+ num_tokens_core_memory # core memory
|
2768
|
+
+ num_tokens_external_memory_summary # metadata (statistics) about recall/archival
|
2769
|
+
+ num_tokens_summary_memory # summary of ongoing conversation
|
2770
|
+
+ num_tokens_messages # tokens taken by messages
|
2771
|
+
)
|
2772
|
+
assert isinstance(num_tokens_used_total, int)
|
2773
|
+
|
2774
|
+
return ContextWindowOverview(
|
2775
|
+
# context window breakdown (in messages)
|
2776
|
+
num_messages=len(in_context_messages),
|
2777
|
+
num_archival_memory=passage_manager_size,
|
2778
|
+
num_recall_memory=message_manager_size,
|
2779
|
+
num_tokens_external_memory_summary=num_tokens_external_memory_summary,
|
2780
|
+
external_memory_summary=external_memory_summary,
|
2781
|
+
# top-level information
|
2782
|
+
context_window_size_max=agent_state.llm_config.context_window,
|
2783
|
+
context_window_size_current=num_tokens_used_total,
|
2784
|
+
# context window breakdown (in tokens)
|
2785
|
+
num_tokens_system=num_tokens_system,
|
2786
|
+
system_prompt=system_prompt,
|
2787
|
+
num_tokens_core_memory=num_tokens_core_memory,
|
2788
|
+
core_memory=core_memory,
|
2789
|
+
num_tokens_summary_memory=num_tokens_summary_memory,
|
2790
|
+
summary_memory=summary_memory,
|
2791
|
+
num_tokens_messages=num_tokens_messages,
|
2792
|
+
messages=in_context_messages,
|
2793
|
+
# related to functions
|
2794
|
+
num_tokens_functions_definitions=num_tokens_available_functions_definitions,
|
2795
|
+
functions_definitions=available_functions_definitions,
|
2796
|
+
)
|
2797
|
+
|
2798
|
+
async def get_context_window_from_tiktoken_async(self, agent_id: str, actor: PydanticUser) -> ContextWindowOverview:
|
2799
|
+
"""Get the context window of the agent"""
|
2800
|
+
from letta.local_llm.utils import num_tokens_from_functions, num_tokens_from_messages
|
2801
|
+
|
2802
|
+
agent_state = await self.get_agent_by_id_async(agent_id=agent_id, actor=actor)
|
2803
|
+
# Grab the in-context messages
|
2804
|
+
# conversion of messages to OpenAI dict format, which is passed to the token counter
|
2805
|
+
(in_context_messages, passage_manager_size, message_manager_size) = await asyncio.gather(
|
2806
|
+
self.get_in_context_messages_async(agent_id=agent_id, actor=actor),
|
2807
|
+
self.passage_manager.size_async(actor=actor, agent_id=agent_id),
|
2808
|
+
self.message_manager.size_async(actor=actor, agent_id=agent_id),
|
2809
|
+
)
|
2810
|
+
in_context_messages_openai = [m.to_openai_dict() for m in in_context_messages]
|
2811
|
+
|
2812
|
+
# Extract system, memory and external summary
|
2813
|
+
if (
|
2814
|
+
len(in_context_messages) > 0
|
2815
|
+
and in_context_messages[0].role == MessageRole.system
|
2816
|
+
and in_context_messages[0].content
|
2817
|
+
and len(in_context_messages[0].content) == 1
|
2818
|
+
and isinstance(in_context_messages[0].content[0], TextContent)
|
2819
|
+
):
|
2820
|
+
system_message = in_context_messages[0].content[0].text
|
2821
|
+
|
2822
|
+
external_memory_marker_pos = system_message.find("###")
|
2823
|
+
core_memory_marker_pos = system_message.find("<", external_memory_marker_pos)
|
2824
|
+
if external_memory_marker_pos != -1 and core_memory_marker_pos != -1:
|
2825
|
+
system_prompt = system_message[:external_memory_marker_pos].strip()
|
2826
|
+
external_memory_summary = system_message[external_memory_marker_pos:core_memory_marker_pos].strip()
|
2827
|
+
core_memory = system_message[core_memory_marker_pos:].strip()
|
2828
|
+
else:
|
2829
|
+
# if no markers found, put everything in system message
|
2830
|
+
system_prompt = system_message
|
2831
|
+
external_memory_summary = ""
|
2832
|
+
core_memory = ""
|
2833
|
+
else:
|
2834
|
+
# if no system message, fall back on agent's system prompt
|
2835
|
+
system_prompt = agent_state.system
|
2836
|
+
external_memory_summary = ""
|
2837
|
+
core_memory = ""
|
2838
|
+
|
2839
|
+
num_tokens_system = count_tokens(system_prompt)
|
2840
|
+
num_tokens_core_memory = count_tokens(core_memory)
|
2841
|
+
num_tokens_external_memory_summary = count_tokens(external_memory_summary)
|
2842
|
+
|
2843
|
+
# Check if there's a summary message in the message queue
|
2844
|
+
if (
|
2845
|
+
len(in_context_messages) > 1
|
2846
|
+
and in_context_messages[1].role == MessageRole.user
|
2847
|
+
and in_context_messages[1].content
|
2848
|
+
and len(in_context_messages[1].content) == 1
|
2849
|
+
and isinstance(in_context_messages[1].content[0], TextContent)
|
2850
|
+
# TODO remove hardcoding
|
2851
|
+
and "The following is a summary of the previous " in in_context_messages[1].content[0].text
|
2852
|
+
):
|
2853
|
+
# Summary message exists
|
2854
|
+
text_content = in_context_messages[1].content[0].text
|
2855
|
+
assert text_content is not None
|
2856
|
+
summary_memory = text_content
|
2857
|
+
num_tokens_summary_memory = count_tokens(text_content)
|
2858
|
+
# with a summary message, the real messages start at index 2
|
2859
|
+
num_tokens_messages = (
|
2860
|
+
num_tokens_from_messages(messages=in_context_messages_openai[2:], model=agent_state.llm_config.model)
|
2861
|
+
if len(in_context_messages_openai) > 2
|
2862
|
+
else 0
|
2863
|
+
)
|
2864
|
+
|
2865
|
+
else:
|
2866
|
+
summary_memory = None
|
2867
|
+
num_tokens_summary_memory = 0
|
2868
|
+
# with no summary message, the real messages start at index 1
|
2869
|
+
num_tokens_messages = (
|
2870
|
+
num_tokens_from_messages(messages=in_context_messages_openai[1:], model=agent_state.llm_config.model)
|
2871
|
+
if len(in_context_messages_openai) > 1
|
2872
|
+
else 0
|
2873
|
+
)
|
2874
|
+
|
2875
|
+
# tokens taken up by function definitions
|
2876
|
+
agent_state_tool_jsons = [t.json_schema for t in agent_state.tools]
|
2877
|
+
if agent_state_tool_jsons:
|
2878
|
+
available_functions_definitions = [OpenAITool(type="function", function=f) for f in agent_state_tool_jsons]
|
2879
|
+
num_tokens_available_functions_definitions = num_tokens_from_functions(
|
2880
|
+
functions=agent_state_tool_jsons, model=agent_state.llm_config.model
|
2881
|
+
)
|
2882
|
+
else:
|
2883
|
+
available_functions_definitions = []
|
2884
|
+
num_tokens_available_functions_definitions = 0
|
2885
|
+
|
2886
|
+
num_tokens_used_total = (
|
2887
|
+
num_tokens_system # system prompt
|
2888
|
+
+ num_tokens_available_functions_definitions # function definitions
|
2889
|
+
+ num_tokens_core_memory # core memory
|
2890
|
+
+ num_tokens_external_memory_summary # metadata (statistics) about recall/archival
|
2891
|
+
+ num_tokens_summary_memory # summary of ongoing conversation
|
2892
|
+
+ num_tokens_messages # tokens taken by messages
|
2893
|
+
)
|
2894
|
+
assert isinstance(num_tokens_used_total, int)
|
2895
|
+
|
2896
|
+
return ContextWindowOverview(
|
2897
|
+
# context window breakdown (in messages)
|
2898
|
+
num_messages=len(in_context_messages),
|
2899
|
+
num_archival_memory=passage_manager_size,
|
2900
|
+
num_recall_memory=message_manager_size,
|
2901
|
+
num_tokens_external_memory_summary=num_tokens_external_memory_summary,
|
2902
|
+
external_memory_summary=external_memory_summary,
|
2903
|
+
# top-level information
|
2904
|
+
context_window_size_max=agent_state.llm_config.context_window,
|
2905
|
+
context_window_size_current=num_tokens_used_total,
|
2906
|
+
# context window breakdown (in tokens)
|
2907
|
+
num_tokens_system=num_tokens_system,
|
2908
|
+
system_prompt=system_prompt,
|
2909
|
+
num_tokens_core_memory=num_tokens_core_memory,
|
2910
|
+
core_memory=core_memory,
|
2911
|
+
num_tokens_summary_memory=num_tokens_summary_memory,
|
2912
|
+
summary_memory=summary_memory,
|
2913
|
+
num_tokens_messages=num_tokens_messages,
|
2914
|
+
messages=in_context_messages,
|
2915
|
+
# related to functions
|
2916
|
+
num_tokens_functions_definitions=num_tokens_available_functions_definitions,
|
2917
|
+
functions_definitions=available_functions_definitions,
|
2918
|
+
)
|