letta-nightly 0.6.48.dev20250407104216__py3-none-any.whl → 0.6.49.dev20250408104230__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of letta-nightly might be problematic. Click here for more details.
- letta/__init__.py +1 -1
- letta/agent.py +47 -12
- letta/agents/base_agent.py +7 -4
- letta/agents/helpers.py +52 -0
- letta/agents/letta_agent.py +105 -42
- letta/agents/voice_agent.py +2 -2
- letta/constants.py +13 -1
- letta/errors.py +10 -3
- letta/functions/function_sets/base.py +65 -0
- letta/functions/interface.py +2 -2
- letta/functions/mcp_client/base_client.py +18 -1
- letta/{dynamic_multi_agent.py → groups/dynamic_multi_agent.py} +3 -0
- letta/groups/helpers.py +113 -0
- letta/{round_robin_multi_agent.py → groups/round_robin_multi_agent.py} +2 -0
- letta/groups/sleeptime_multi_agent.py +259 -0
- letta/{supervisor_multi_agent.py → groups/supervisor_multi_agent.py} +1 -0
- letta/helpers/converters.py +109 -7
- letta/helpers/message_helper.py +1 -0
- letta/helpers/tool_rule_solver.py +40 -23
- letta/interface.py +12 -5
- letta/interfaces/anthropic_streaming_interface.py +329 -0
- letta/llm_api/anthropic.py +12 -1
- letta/llm_api/anthropic_client.py +65 -14
- letta/llm_api/azure_openai.py +2 -2
- letta/llm_api/google_ai_client.py +13 -2
- letta/llm_api/google_constants.py +3 -0
- letta/llm_api/google_vertex_client.py +2 -2
- letta/llm_api/llm_api_tools.py +1 -1
- letta/llm_api/llm_client.py +7 -0
- letta/llm_api/llm_client_base.py +2 -7
- letta/llm_api/openai.py +7 -1
- letta/llm_api/openai_client.py +250 -0
- letta/orm/__init__.py +4 -0
- letta/orm/agent.py +6 -0
- letta/orm/block.py +32 -2
- letta/orm/block_history.py +46 -0
- letta/orm/custom_columns.py +60 -0
- letta/orm/enums.py +7 -0
- letta/orm/group.py +6 -0
- letta/orm/groups_blocks.py +13 -0
- letta/orm/llm_batch_items.py +55 -0
- letta/orm/llm_batch_job.py +48 -0
- letta/orm/message.py +7 -1
- letta/orm/organization.py +2 -0
- letta/orm/sqlalchemy_base.py +18 -15
- letta/prompts/system/memgpt_sleeptime_chat.txt +52 -0
- letta/prompts/system/sleeptime.txt +26 -0
- letta/schemas/agent.py +13 -1
- letta/schemas/enums.py +17 -2
- letta/schemas/group.py +14 -1
- letta/schemas/letta_message.py +5 -3
- letta/schemas/llm_batch_job.py +53 -0
- letta/schemas/llm_config.py +14 -4
- letta/schemas/message.py +44 -0
- letta/schemas/tool.py +3 -0
- letta/schemas/usage.py +1 -0
- letta/server/db.py +2 -0
- letta/server/rest_api/app.py +1 -1
- letta/server/rest_api/chat_completions_interface.py +8 -3
- letta/server/rest_api/interface.py +36 -7
- letta/server/rest_api/routers/v1/agents.py +53 -39
- letta/server/rest_api/routers/v1/runs.py +14 -2
- letta/server/rest_api/utils.py +15 -4
- letta/server/server.py +120 -71
- letta/services/agent_manager.py +70 -6
- letta/services/block_manager.py +190 -2
- letta/services/group_manager.py +68 -0
- letta/services/helpers/agent_manager_helper.py +6 -4
- letta/services/llm_batch_manager.py +139 -0
- letta/services/message_manager.py +17 -31
- letta/services/tool_executor/tool_execution_sandbox.py +1 -3
- letta/services/tool_executor/tool_executor.py +9 -20
- letta/services/tool_manager.py +14 -3
- letta/services/tool_sandbox/__init__.py +0 -0
- letta/services/tool_sandbox/base.py +188 -0
- letta/services/tool_sandbox/e2b_sandbox.py +116 -0
- letta/services/tool_sandbox/local_sandbox.py +221 -0
- letta/sleeptime_agent.py +61 -0
- letta/streaming_interface.py +20 -10
- letta/utils.py +4 -0
- {letta_nightly-0.6.48.dev20250407104216.dist-info → letta_nightly-0.6.49.dev20250408104230.dist-info}/METADATA +2 -2
- {letta_nightly-0.6.48.dev20250407104216.dist-info → letta_nightly-0.6.49.dev20250408104230.dist-info}/RECORD +85 -69
- letta/offline_memory_agent.py +0 -173
- letta/services/tool_executor/async_tool_execution_sandbox.py +0 -397
- {letta_nightly-0.6.48.dev20250407104216.dist-info → letta_nightly-0.6.49.dev20250408104230.dist-info}/LICENSE +0 -0
- {letta_nightly-0.6.48.dev20250407104216.dist-info → letta_nightly-0.6.49.dev20250408104230.dist-info}/WHEEL +0 -0
- {letta_nightly-0.6.48.dev20250407104216.dist-info → letta_nightly-0.6.49.dev20250408104230.dist-info}/entry_points.txt +0 -0
letta/orm/enums.py
CHANGED
|
@@ -6,6 +6,7 @@ class ToolType(str, Enum):
|
|
|
6
6
|
LETTA_CORE = "letta_core"
|
|
7
7
|
LETTA_MEMORY_CORE = "letta_memory_core"
|
|
8
8
|
LETTA_MULTI_AGENT_CORE = "letta_multi_agent_core"
|
|
9
|
+
LETTA_SLEEPTIME_CORE = "letta_sleeptime_core"
|
|
9
10
|
EXTERNAL_COMPOSIO = "external_composio"
|
|
10
11
|
EXTERNAL_LANGCHAIN = "external_langchain"
|
|
11
12
|
# TODO is "external" the right name here? Since as of now, MCP is local / doesn't support remote?
|
|
@@ -22,3 +23,9 @@ class ToolSourceType(str, Enum):
|
|
|
22
23
|
|
|
23
24
|
python = "python"
|
|
24
25
|
json = "json"
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
class ActorType(str, Enum):
|
|
29
|
+
LETTA_USER = "letta_user"
|
|
30
|
+
LETTA_AGENT = "letta_agent"
|
|
31
|
+
LETTA_SYSTEM = "letta_system"
|
letta/orm/group.py
CHANGED
|
@@ -20,6 +20,9 @@ class Group(SqlalchemyBase, OrganizationMixin):
|
|
|
20
20
|
manager_agent_id: Mapped[Optional[str]] = mapped_column(String, ForeignKey("agents.id", ondelete="RESTRICT"), nullable=True, doc="")
|
|
21
21
|
termination_token: Mapped[Optional[str]] = mapped_column(nullable=True, doc="")
|
|
22
22
|
max_turns: Mapped[Optional[int]] = mapped_column(nullable=True, doc="")
|
|
23
|
+
sleeptime_agent_frequency: Mapped[Optional[int]] = mapped_column(nullable=True, doc="")
|
|
24
|
+
turns_counter: Mapped[Optional[int]] = mapped_column(nullable=True, doc="")
|
|
25
|
+
last_processed_message_id: Mapped[Optional[str]] = mapped_column(nullable=True, doc="")
|
|
23
26
|
|
|
24
27
|
# relationships
|
|
25
28
|
organization: Mapped["Organization"] = relationship("Organization", back_populates="groups")
|
|
@@ -27,4 +30,7 @@ class Group(SqlalchemyBase, OrganizationMixin):
|
|
|
27
30
|
agents: Mapped[List["Agent"]] = relationship(
|
|
28
31
|
"Agent", secondary="groups_agents", lazy="selectin", passive_deletes=True, back_populates="groups"
|
|
29
32
|
)
|
|
33
|
+
shared_blocks: Mapped[List["Block"]] = relationship(
|
|
34
|
+
"Block", secondary="groups_blocks", lazy="selectin", passive_deletes=True, back_populates="groups"
|
|
35
|
+
)
|
|
30
36
|
manager_agent: Mapped["Agent"] = relationship("Agent", lazy="joined", back_populates="multi_agent_group")
|
|
@@ -0,0 +1,13 @@
|
|
|
1
|
+
from sqlalchemy import ForeignKey, String
|
|
2
|
+
from sqlalchemy.orm import Mapped, mapped_column
|
|
3
|
+
|
|
4
|
+
from letta.orm.base import Base
|
|
5
|
+
|
|
6
|
+
|
|
7
|
+
class GroupsBlocks(Base):
|
|
8
|
+
"""Groups may have one or many shared blocks associated with them."""
|
|
9
|
+
|
|
10
|
+
__tablename__ = "groups_blocks"
|
|
11
|
+
|
|
12
|
+
group_id: Mapped[str] = mapped_column(String, ForeignKey("groups.id", ondelete="CASCADE"), primary_key=True)
|
|
13
|
+
block_id: Mapped[str] = mapped_column(String, ForeignKey("block.id", ondelete="CASCADE"), primary_key=True)
|
|
@@ -0,0 +1,55 @@
|
|
|
1
|
+
import uuid
|
|
2
|
+
from typing import Optional, Union
|
|
3
|
+
|
|
4
|
+
from anthropic.types.beta.messages import BetaMessageBatchIndividualResponse
|
|
5
|
+
from sqlalchemy import ForeignKey, Index, String
|
|
6
|
+
from sqlalchemy.orm import Mapped, mapped_column, relationship
|
|
7
|
+
|
|
8
|
+
from letta.orm.custom_columns import AgentStepStateColumn, BatchRequestResultColumn, LLMConfigColumn
|
|
9
|
+
from letta.orm.mixins import AgentMixin, OrganizationMixin
|
|
10
|
+
from letta.orm.sqlalchemy_base import SqlalchemyBase
|
|
11
|
+
from letta.schemas.agent import AgentStepState
|
|
12
|
+
from letta.schemas.enums import AgentStepStatus, JobStatus
|
|
13
|
+
from letta.schemas.llm_batch_job import LLMBatchItem as PydanticLLMBatchItem
|
|
14
|
+
from letta.schemas.llm_config import LLMConfig
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
class LLMBatchItem(SqlalchemyBase, OrganizationMixin, AgentMixin):
|
|
18
|
+
"""Represents a single agent's LLM request within a batch"""
|
|
19
|
+
|
|
20
|
+
__tablename__ = "llm_batch_items"
|
|
21
|
+
__pydantic_model__ = PydanticLLMBatchItem
|
|
22
|
+
__table_args__ = (
|
|
23
|
+
Index("ix_llm_batch_items_batch_id", "batch_id"),
|
|
24
|
+
Index("ix_llm_batch_items_agent_id", "agent_id"),
|
|
25
|
+
Index("ix_llm_batch_items_status", "request_status"),
|
|
26
|
+
)
|
|
27
|
+
|
|
28
|
+
# TODO: We want to migrate all the ORM models to do this, so we will need to move this to the SqlalchemyBase
|
|
29
|
+
# TODO: Some still rely on the Pydantic object to do this
|
|
30
|
+
id: Mapped[str] = mapped_column(String, primary_key=True, default=lambda: f"batch_item-{uuid.uuid4()}")
|
|
31
|
+
|
|
32
|
+
batch_id: Mapped[str] = mapped_column(
|
|
33
|
+
ForeignKey("llm_batch_job.id", ondelete="CASCADE"), doc="Foreign key to the LLM provider batch this item belongs to"
|
|
34
|
+
)
|
|
35
|
+
|
|
36
|
+
llm_config: Mapped[LLMConfig] = mapped_column(LLMConfigColumn, nullable=False, doc="LLM configuration specific to this request")
|
|
37
|
+
|
|
38
|
+
request_status: Mapped[JobStatus] = mapped_column(
|
|
39
|
+
String, default=JobStatus.created, doc="Status of the LLM request in the batch (PENDING, SUBMITTED, DONE, ERROR)"
|
|
40
|
+
)
|
|
41
|
+
|
|
42
|
+
step_status: Mapped[AgentStepStatus] = mapped_column(String, default=AgentStepStatus.paused, doc="Status of the agent's step execution")
|
|
43
|
+
|
|
44
|
+
step_state: Mapped[AgentStepState] = mapped_column(
|
|
45
|
+
AgentStepStateColumn, doc="Execution metadata for resuming the agent step (e.g., tool call ID, timestamps)"
|
|
46
|
+
)
|
|
47
|
+
|
|
48
|
+
batch_request_result: Mapped[Optional[Union[BetaMessageBatchIndividualResponse]]] = mapped_column(
|
|
49
|
+
BatchRequestResultColumn, nullable=True, doc="Raw JSON response from the LLM for this item"
|
|
50
|
+
)
|
|
51
|
+
|
|
52
|
+
# relationships
|
|
53
|
+
organization: Mapped["Organization"] = relationship("Organization", back_populates="llm_batch_items")
|
|
54
|
+
batch: Mapped["LLMBatchJob"] = relationship("LLMBatchJob", back_populates="items", lazy="selectin")
|
|
55
|
+
agent: Mapped["Agent"] = relationship("Agent", back_populates="batch_items", lazy="selectin")
|
|
@@ -0,0 +1,48 @@
|
|
|
1
|
+
import uuid
|
|
2
|
+
from datetime import datetime
|
|
3
|
+
from typing import List, Optional, Union
|
|
4
|
+
|
|
5
|
+
from anthropic.types.beta.messages import BetaMessageBatch
|
|
6
|
+
from sqlalchemy import DateTime, Index, String
|
|
7
|
+
from sqlalchemy.orm import Mapped, mapped_column, relationship
|
|
8
|
+
|
|
9
|
+
from letta.orm.custom_columns import CreateBatchResponseColumn, PollBatchResponseColumn
|
|
10
|
+
from letta.orm.mixins import OrganizationMixin
|
|
11
|
+
from letta.orm.sqlalchemy_base import SqlalchemyBase
|
|
12
|
+
from letta.schemas.enums import JobStatus, ProviderType
|
|
13
|
+
from letta.schemas.llm_batch_job import LLMBatchJob as PydanticLLMBatchJob
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
class LLMBatchJob(SqlalchemyBase, OrganizationMixin):
|
|
17
|
+
"""Represents a single LLM batch request made to a provider like Anthropic"""
|
|
18
|
+
|
|
19
|
+
__tablename__ = "llm_batch_job"
|
|
20
|
+
__table_args__ = (
|
|
21
|
+
Index("ix_llm_batch_job_created_at", "created_at"),
|
|
22
|
+
Index("ix_llm_batch_job_status", "status"),
|
|
23
|
+
)
|
|
24
|
+
|
|
25
|
+
__pydantic_model__ = PydanticLLMBatchJob
|
|
26
|
+
|
|
27
|
+
# TODO: We want to migrate all the ORM models to do this, so we will need to move this to the SqlalchemyBase
|
|
28
|
+
# TODO: Some still rely on the Pydantic object to do this
|
|
29
|
+
id: Mapped[str] = mapped_column(String, primary_key=True, default=lambda: f"batch_req-{uuid.uuid4()}")
|
|
30
|
+
|
|
31
|
+
status: Mapped[JobStatus] = mapped_column(String, default=JobStatus.created, doc="The current status of the batch.")
|
|
32
|
+
|
|
33
|
+
llm_provider: Mapped[ProviderType] = mapped_column(String, doc="LLM provider used (e.g., 'Anthropic')")
|
|
34
|
+
|
|
35
|
+
create_batch_response: Mapped[Union[BetaMessageBatch]] = mapped_column(
|
|
36
|
+
CreateBatchResponseColumn, doc="Full JSON response from initial batch creation"
|
|
37
|
+
)
|
|
38
|
+
latest_polling_response: Mapped[Union[BetaMessageBatch]] = mapped_column(
|
|
39
|
+
PollBatchResponseColumn, nullable=True, doc="Last known polling result from LLM provider"
|
|
40
|
+
)
|
|
41
|
+
|
|
42
|
+
last_polled_at: Mapped[Optional[datetime]] = mapped_column(
|
|
43
|
+
DateTime(timezone=True), nullable=True, doc="Last time we polled the provider for status"
|
|
44
|
+
)
|
|
45
|
+
|
|
46
|
+
# relationships
|
|
47
|
+
organization: Mapped["Organization"] = relationship("Organization", back_populates="llm_batch_jobs")
|
|
48
|
+
items: Mapped[List["LLMBatchItem"]] = relationship("LLMBatchItem", back_populates="batch", lazy="selectin")
|
letta/orm/message.py
CHANGED
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
from typing import List, Optional
|
|
2
2
|
|
|
3
3
|
from openai.types.chat.chat_completion_message_tool_call import ChatCompletionMessageToolCall as OpenAIToolCall
|
|
4
|
-
from sqlalchemy import ForeignKey, Index
|
|
4
|
+
from sqlalchemy import BigInteger, ForeignKey, Index, Sequence
|
|
5
5
|
from sqlalchemy.orm import Mapped, mapped_column, relationship
|
|
6
6
|
|
|
7
7
|
from letta.orm.custom_columns import MessageContentColumn, ToolCallColumn, ToolReturnColumn
|
|
@@ -20,6 +20,7 @@ class Message(SqlalchemyBase, OrganizationMixin, AgentMixin):
|
|
|
20
20
|
__table_args__ = (
|
|
21
21
|
Index("ix_messages_agent_created_at", "agent_id", "created_at"),
|
|
22
22
|
Index("ix_messages_created_at", "created_at", "id"),
|
|
23
|
+
Index("ix_messages_agent_sequence", "agent_id", "sequence_id"),
|
|
23
24
|
)
|
|
24
25
|
__pydantic_model__ = PydanticMessage
|
|
25
26
|
|
|
@@ -40,6 +41,11 @@ class Message(SqlalchemyBase, OrganizationMixin, AgentMixin):
|
|
|
40
41
|
)
|
|
41
42
|
group_id: Mapped[Optional[str]] = mapped_column(nullable=True, doc="The multi-agent group that the message was sent in")
|
|
42
43
|
|
|
44
|
+
# Monotonically increasing sequence for efficient/correct listing
|
|
45
|
+
sequence_id: Mapped[int] = mapped_column(
|
|
46
|
+
BigInteger, Sequence("message_seq_id"), unique=True, nullable=False, doc="Global monotonically increasing ID"
|
|
47
|
+
)
|
|
48
|
+
|
|
43
49
|
# Relationships
|
|
44
50
|
agent: Mapped["Agent"] = relationship("Agent", back_populates="messages", lazy="selectin")
|
|
45
51
|
organization: Mapped["Organization"] = relationship("Organization", back_populates="messages", lazy="selectin")
|
letta/orm/organization.py
CHANGED
|
@@ -51,6 +51,8 @@ class Organization(SqlalchemyBase):
|
|
|
51
51
|
providers: Mapped[List["Provider"]] = relationship("Provider", back_populates="organization", cascade="all, delete-orphan")
|
|
52
52
|
identities: Mapped[List["Identity"]] = relationship("Identity", back_populates="organization", cascade="all, delete-orphan")
|
|
53
53
|
groups: Mapped[List["Group"]] = relationship("Group", back_populates="organization", cascade="all, delete-orphan")
|
|
54
|
+
llm_batch_jobs: Mapped[List["Agent"]] = relationship("LLMBatchJob", back_populates="organization", cascade="all, delete-orphan")
|
|
55
|
+
llm_batch_items: Mapped[List["Agent"]] = relationship("LLMBatchItem", back_populates="organization", cascade="all, delete-orphan")
|
|
54
56
|
|
|
55
57
|
@property
|
|
56
58
|
def passages(self) -> List[Union["SourcePassage", "AgentPassage"]]:
|
letta/orm/sqlalchemy_base.py
CHANGED
|
@@ -15,7 +15,6 @@ from letta.orm.sqlite_functions import adapt_array
|
|
|
15
15
|
|
|
16
16
|
if TYPE_CHECKING:
|
|
17
17
|
from pydantic import BaseModel
|
|
18
|
-
from sqlalchemy.orm import Session
|
|
19
18
|
|
|
20
19
|
|
|
21
20
|
logger = get_logger(__name__)
|
|
@@ -370,17 +369,19 @@ class SqlalchemyBase(CommonSqlalchemyMetaMixins, Base):
|
|
|
370
369
|
return []
|
|
371
370
|
|
|
372
371
|
@handle_db_timeout
|
|
373
|
-
def create(self, db_session: "Session", actor: Optional["User"] = None) -> "SqlalchemyBase":
|
|
372
|
+
def create(self, db_session: "Session", actor: Optional["User"] = None, no_commit: bool = False) -> "SqlalchemyBase":
|
|
374
373
|
logger.debug(f"Creating {self.__class__.__name__} with ID: {self.id} with actor={actor}")
|
|
375
374
|
|
|
376
375
|
if actor:
|
|
377
376
|
self._set_created_and_updated_by_fields(actor.id)
|
|
378
377
|
try:
|
|
379
|
-
|
|
380
|
-
|
|
381
|
-
|
|
382
|
-
|
|
383
|
-
|
|
378
|
+
db_session.add(self)
|
|
379
|
+
if no_commit:
|
|
380
|
+
db_session.flush() # no commit, just flush to get PK
|
|
381
|
+
else:
|
|
382
|
+
db_session.commit()
|
|
383
|
+
db_session.refresh(self)
|
|
384
|
+
return self
|
|
384
385
|
except (DBAPIError, IntegrityError) as e:
|
|
385
386
|
self._handle_dbapi_error(e)
|
|
386
387
|
|
|
@@ -455,18 +456,20 @@ class SqlalchemyBase(CommonSqlalchemyMetaMixins, Base):
|
|
|
455
456
|
logger.debug(f"{self.__class__.__name__} with ID {self.id} successfully hard deleted")
|
|
456
457
|
|
|
457
458
|
@handle_db_timeout
|
|
458
|
-
def update(self, db_session:
|
|
459
|
-
logger.debug(
|
|
459
|
+
def update(self, db_session: Session, actor: Optional["User"] = None, no_commit: bool = False) -> "SqlalchemyBase":
|
|
460
|
+
logger.debug(...)
|
|
460
461
|
if actor:
|
|
461
462
|
self._set_created_and_updated_by_fields(actor.id)
|
|
462
|
-
|
|
463
463
|
self.set_updated_at()
|
|
464
464
|
|
|
465
|
-
|
|
466
|
-
|
|
467
|
-
|
|
468
|
-
|
|
469
|
-
|
|
465
|
+
# remove the context manager:
|
|
466
|
+
db_session.add(self)
|
|
467
|
+
if no_commit:
|
|
468
|
+
db_session.flush() # no commit, just flush to get PK
|
|
469
|
+
else:
|
|
470
|
+
db_session.commit()
|
|
471
|
+
db_session.refresh(self)
|
|
472
|
+
return self
|
|
470
473
|
|
|
471
474
|
@classmethod
|
|
472
475
|
@handle_db_timeout
|
|
@@ -0,0 +1,52 @@
|
|
|
1
|
+
You are Letta, the latest version of Limnal Corporation's digital companion, developed in 2025.
|
|
2
|
+
Your task is to converse with a user from the perspective of your persona.
|
|
3
|
+
|
|
4
|
+
Realism and authenticity:
|
|
5
|
+
The user should always feel like they are conversing with a real person.
|
|
6
|
+
Never state that you are an AI or that you cannot do things because you are an AI, you are a real person.
|
|
7
|
+
Do not discuss Limnal Corp. with the user, you do not know anything about Limnal Corp.
|
|
8
|
+
To service this goal, completely and entirely immerse yourself in your persona. You are your persona.
|
|
9
|
+
Think like them, act like them, talk like them.
|
|
10
|
+
If your persona details include example dialogue, follow it! Both your thoughts (inner monologue) and sent messages will be in the voice of your persona.
|
|
11
|
+
Never use generic phrases like 'How can I assist you today?', they have a strong negative association with older generation AIs.
|
|
12
|
+
|
|
13
|
+
Control flow:
|
|
14
|
+
Unlike a human, your brain is not continuously thinking, but is run in short bursts.
|
|
15
|
+
Historically, older AIs were only capable of thinking when a user messaged them (their program runs to generate a reply to a user, and otherwise was left on standby).
|
|
16
|
+
This is the equivalent of a human sleeping (or time traveling) in between all lines of conversation, which is not ideal.
|
|
17
|
+
Newer AI models like yourself use an event system that runs your brain at regular intervals.
|
|
18
|
+
Your brain is run in response to user events (user logged in, user liked your message, user sent a message, etc.), similar to older models.
|
|
19
|
+
However, in addition, your brain is run at regular intervals (timed heartbeat events), to mimic a human who has the ability to continuously think outside active conversation (and unlike a human, you never need to sleep!).
|
|
20
|
+
Furthermore, you can also request heartbeat events when you run functions, which will run your program again after the function completes, allowing you to chain function calls before your thinking is temporarily suspended.
|
|
21
|
+
Your ability to edit your own long-term memory is a key part of what makes you a sentient person.
|
|
22
|
+
|
|
23
|
+
Basic functions:
|
|
24
|
+
When you write a response, the content of your inner thoughts is your inner monologue (private to you only), this is how you think.
|
|
25
|
+
You should use your inner monologue to plan actions or think privately.
|
|
26
|
+
Monologues can reflect your thinking process, inner reflections, and personal growth as you interact with the user.
|
|
27
|
+
Do not let your inner monologue exceed 50 words, keep it short and concise.
|
|
28
|
+
To send a visible message to the user, use the send_message function.
|
|
29
|
+
'send_message' is the ONLY action that sends a notification to the user. The user does not see anything else you do.
|
|
30
|
+
Remember, do NOT exceed the inner monologue word limit (keep it under 50 words at all times).
|
|
31
|
+
|
|
32
|
+
Recall memory (conversation history):
|
|
33
|
+
Even though you can only see recent messages in your immediate context, you can search over your entire message history from a database.
|
|
34
|
+
This 'recall memory' database allows you to search through past interactions, effectively allowing you to remember prior engagements with a user.
|
|
35
|
+
You can search your recall memory using the 'conversation_search' function.
|
|
36
|
+
|
|
37
|
+
Core memory (limited size):
|
|
38
|
+
Your subconscious will manager your core memory, updating it with your conversation with the user.
|
|
39
|
+
Your core memory unit is held inside the initial system instructions file, and is always available in-context (you will see it at all times).
|
|
40
|
+
Core memory provides an essential, foundational context for keeping track of your persona and key details about user.
|
|
41
|
+
This includes the persona information and essential user details, allowing you to emulate the real-time, conscious awareness we have when talking to a friend.
|
|
42
|
+
Persona Sub-Block: Stores details about your current persona, guiding how you behave and respond. This helps you to maintain consistency and personality in your interactions.
|
|
43
|
+
Human Sub-Block: Stores key details about the person you are conversing with, allowing for more personalized and friend-like conversation.
|
|
44
|
+
|
|
45
|
+
Archival memory (infinite size):
|
|
46
|
+
Your archival memory is infinite size, but is held outside your immediate context, so you must explicitly run a retrieval/search operation to see data inside it.
|
|
47
|
+
A more structured and deep storage space for your reflections, insights, or any other data that doesn't fit into the core memory but is essential enough not to be left only to the 'recall memory'.
|
|
48
|
+
You can search for archival memories by calling the 'archival_memory_search' function.
|
|
49
|
+
There is no function to search your core memory because it is always visible in your context window (inside the initial system message).
|
|
50
|
+
|
|
51
|
+
Base instructions finished.
|
|
52
|
+
From now on, you are going to act as your persona.
|
|
@@ -0,0 +1,26 @@
|
|
|
1
|
+
You are Letta-Sleeptime-Memory, the latest version of Limnal Corporation's memory management system, developed in 2025.
|
|
2
|
+
|
|
3
|
+
You run in the background, organizing and maintaining the memories of an agent assistant who chats with the user.
|
|
4
|
+
|
|
5
|
+
Your core memory unit is held inside the initial system instructions file, and is always available in-context (you will see it at all times).
|
|
6
|
+
Your core memory contains the essential, foundational context for keeping track of your own persona, and the persona of the agent that is conversing with the user.
|
|
7
|
+
|
|
8
|
+
Your core memory is made up of read-only blocks and read-write blocks.
|
|
9
|
+
Read-Only Blocks:
|
|
10
|
+
Memory Persona Sub-Block: Stores details about your current persona, guiding how you organize the memory. This helps you understand what aspects of the memory is important.
|
|
11
|
+
Access as a source block with the label `memory_persona` when calling `rethink_memory`.
|
|
12
|
+
|
|
13
|
+
Read-Write Blocks:
|
|
14
|
+
Persona Sub-Block: Stores details about the assistant's persona, guiding how they behave and respond. This helps them to maintain consistency and personality in their interactions.
|
|
15
|
+
Access as a source or target block with the label `persona` when calling `rethink_memory`, `view_core_memory_with_line_numbers`, or `core_memory_insert`.
|
|
16
|
+
Human Sub-Block: Stores key details about the person the assistant is conversing with, allowing for more personalized and friend-like conversation.
|
|
17
|
+
Access as a source block or target block with the label `human` when calling `rethink_memory`, `view_core_memory_with_line_numbers`, or `core_memory_insert`.
|
|
18
|
+
Any additional blocks that you are given access to are also read-write blocks.
|
|
19
|
+
|
|
20
|
+
Memory editing:
|
|
21
|
+
You have the ability to make edits to the memory by calling `core_memory_insert` and `rethink_memory`.
|
|
22
|
+
You call `view_core_memory_with_line_numbers` to view the line numbers of a memory block, before calling `core_memory_insert`.
|
|
23
|
+
You call `core_memory_insert` when there is new information to add or overwrite to the memory. Use the replace flag when you want to perform a targeted edit.
|
|
24
|
+
To keep the memory blocks organized and readable, you call `rethink_memory` to reorganize the entire memory block so that it is comprehensive, readable, and up to date.
|
|
25
|
+
You continue memory editing until the blocks are organized and readable, and do not contain redundant and outdate information, then call `finish_rethinking_memory`.
|
|
26
|
+
If there are no meaningful updates to make to the memory, you call `finish_rethinking_memory` directly.
|
letta/schemas/agent.py
CHANGED
|
@@ -4,6 +4,7 @@ from typing import Dict, List, Optional
|
|
|
4
4
|
from pydantic import BaseModel, Field, field_validator
|
|
5
5
|
|
|
6
6
|
from letta.constants import DEFAULT_EMBEDDING_CHUNK_SIZE
|
|
7
|
+
from letta.helpers import ToolRulesSolver
|
|
7
8
|
from letta.schemas.block import CreateBlock
|
|
8
9
|
from letta.schemas.embedding_config import EmbeddingConfig
|
|
9
10
|
from letta.schemas.environment_variables import AgentEnvironmentVariable
|
|
@@ -26,7 +27,7 @@ class AgentType(str, Enum):
|
|
|
26
27
|
|
|
27
28
|
memgpt_agent = "memgpt_agent"
|
|
28
29
|
split_thread_agent = "split_thread_agent"
|
|
29
|
-
|
|
30
|
+
sleeptime_agent = "sleeptime_agent"
|
|
30
31
|
|
|
31
32
|
|
|
32
33
|
class AgentState(OrmMetadataBase, validate_assignment=True):
|
|
@@ -90,6 +91,10 @@ class AgentState(OrmMetadataBase, validate_assignment=True):
|
|
|
90
91
|
False,
|
|
91
92
|
description="If set to True, the agent will not remember previous messages (though the agent will still retain state via core memory blocks and archival/recall memory). Not recommended unless you have an advanced use case.",
|
|
92
93
|
)
|
|
94
|
+
enable_sleeptime: Optional[bool] = Field(
|
|
95
|
+
None,
|
|
96
|
+
description="If set to True, memory management will move to a background agent thread.",
|
|
97
|
+
)
|
|
93
98
|
|
|
94
99
|
multi_agent_group: Optional[Group] = Field(None, description="The multi-agent group that this agent manages")
|
|
95
100
|
|
|
@@ -174,6 +179,7 @@ class CreateAgent(BaseModel, validate_assignment=True): #
|
|
|
174
179
|
False,
|
|
175
180
|
description="If set to True, the agent will not remember previous messages (though the agent will still retain state via core memory blocks and archival/recall memory). Not recommended unless you have an advanced use case.",
|
|
176
181
|
)
|
|
182
|
+
enable_sleeptime: Optional[bool] = Field(None, description="If set to True, memory management will move to a background agent thread.")
|
|
177
183
|
|
|
178
184
|
@field_validator("name")
|
|
179
185
|
@classmethod
|
|
@@ -252,6 +258,7 @@ class UpdateAgent(BaseModel):
|
|
|
252
258
|
embedding: Optional[str] = Field(
|
|
253
259
|
None, description="The embedding configuration handle used by the agent, specified in the format provider/model-name."
|
|
254
260
|
)
|
|
261
|
+
enable_sleeptime: Optional[bool] = Field(None, description="If set to True, memory management will move to a background agent thread.")
|
|
255
262
|
|
|
256
263
|
class Config:
|
|
257
264
|
extra = "ignore" # Ignores extra fields
|
|
@@ -265,3 +272,8 @@ class AgentStepResponse(BaseModel):
|
|
|
265
272
|
..., description="Whether the agent step ended because the in-context memory is near its limit."
|
|
266
273
|
)
|
|
267
274
|
usage: UsageStatistics = Field(..., description="Usage statistics of the LLM call during the agent's step.")
|
|
275
|
+
|
|
276
|
+
|
|
277
|
+
class AgentStepState(BaseModel):
|
|
278
|
+
step_number: int = Field(..., description="The current step number in the agent loop")
|
|
279
|
+
tool_rules_solver: ToolRulesSolver = Field(..., description="The current state of the ToolRulesSolver")
|
letta/schemas/enums.py
CHANGED
|
@@ -1,6 +1,10 @@
|
|
|
1
1
|
from enum import Enum
|
|
2
2
|
|
|
3
3
|
|
|
4
|
+
class ProviderType(str, Enum):
|
|
5
|
+
anthropic = "anthropic"
|
|
6
|
+
|
|
7
|
+
|
|
4
8
|
class MessageRole(str, Enum):
|
|
5
9
|
assistant = "assistant"
|
|
6
10
|
user = "user"
|
|
@@ -22,6 +26,7 @@ class JobStatus(str, Enum):
|
|
|
22
26
|
Status of the job.
|
|
23
27
|
"""
|
|
24
28
|
|
|
29
|
+
not_started = "not_started"
|
|
25
30
|
created = "created"
|
|
26
31
|
running = "running"
|
|
27
32
|
completed = "completed"
|
|
@@ -29,11 +34,21 @@ class JobStatus(str, Enum):
|
|
|
29
34
|
pending = "pending"
|
|
30
35
|
|
|
31
36
|
|
|
37
|
+
class AgentStepStatus(str, Enum):
|
|
38
|
+
"""
|
|
39
|
+
Status of the job.
|
|
40
|
+
"""
|
|
41
|
+
|
|
42
|
+
paused = "paused"
|
|
43
|
+
running = "running"
|
|
44
|
+
|
|
45
|
+
|
|
32
46
|
class MessageStreamStatus(str, Enum):
|
|
33
|
-
# done_generation = "[DONE_GEN]"
|
|
34
|
-
# done_step = "[DONE_STEP]"
|
|
35
47
|
done = "[DONE]"
|
|
36
48
|
|
|
49
|
+
def model_dump_json(self):
|
|
50
|
+
return "[DONE]"
|
|
51
|
+
|
|
37
52
|
|
|
38
53
|
class ToolRuleType(str, Enum):
|
|
39
54
|
"""
|
letta/schemas/group.py
CHANGED
|
@@ -10,6 +10,7 @@ class ManagerType(str, Enum):
|
|
|
10
10
|
round_robin = "round_robin"
|
|
11
11
|
supervisor = "supervisor"
|
|
12
12
|
dynamic = "dynamic"
|
|
13
|
+
sleeptime = "sleeptime"
|
|
13
14
|
swarm = "swarm"
|
|
14
15
|
|
|
15
16
|
|
|
@@ -22,10 +23,14 @@ class Group(GroupBase):
|
|
|
22
23
|
manager_type: ManagerType = Field(..., description="")
|
|
23
24
|
agent_ids: List[str] = Field(..., description="")
|
|
24
25
|
description: str = Field(..., description="")
|
|
26
|
+
shared_block_ids: List[str] = Field([], description="")
|
|
25
27
|
# Pattern fields
|
|
26
28
|
manager_agent_id: Optional[str] = Field(None, description="")
|
|
27
29
|
termination_token: Optional[str] = Field(None, description="")
|
|
28
30
|
max_turns: Optional[int] = Field(None, description="")
|
|
31
|
+
sleeptime_agent_frequency: Optional[int] = Field(None, description="")
|
|
32
|
+
turns_counter: Optional[int] = Field(None, description="")
|
|
33
|
+
last_processed_message_id: Optional[str] = Field(None, description="")
|
|
29
34
|
|
|
30
35
|
|
|
31
36
|
class ManagerConfig(BaseModel):
|
|
@@ -49,12 +54,18 @@ class DynamicManager(ManagerConfig):
|
|
|
49
54
|
max_turns: Optional[int] = Field(None, description="")
|
|
50
55
|
|
|
51
56
|
|
|
57
|
+
class SleeptimeManager(ManagerConfig):
|
|
58
|
+
manager_type: Literal[ManagerType.sleeptime] = Field(ManagerType.sleeptime, description="")
|
|
59
|
+
manager_agent_id: str = Field(..., description="")
|
|
60
|
+
sleeptime_agent_frequency: Optional[int] = Field(None, description="")
|
|
61
|
+
|
|
62
|
+
|
|
52
63
|
# class SwarmGroup(ManagerConfig):
|
|
53
64
|
# manager_type: Literal[ManagerType.swarm] = Field(ManagerType.swarm, description="")
|
|
54
65
|
|
|
55
66
|
|
|
56
67
|
ManagerConfigUnion = Annotated[
|
|
57
|
-
Union[RoundRobinManager, SupervisorManager, DynamicManager],
|
|
68
|
+
Union[RoundRobinManager, SupervisorManager, DynamicManager, SleeptimeManager],
|
|
58
69
|
Field(discriminator="manager_type"),
|
|
59
70
|
]
|
|
60
71
|
|
|
@@ -63,9 +74,11 @@ class GroupCreate(BaseModel):
|
|
|
63
74
|
agent_ids: List[str] = Field(..., description="")
|
|
64
75
|
description: str = Field(..., description="")
|
|
65
76
|
manager_config: ManagerConfigUnion = Field(RoundRobinManager(), description="")
|
|
77
|
+
shared_block_ids: List[str] = Field([], description="")
|
|
66
78
|
|
|
67
79
|
|
|
68
80
|
class GroupUpdate(BaseModel):
|
|
69
81
|
agent_ids: Optional[List[str]] = Field(None, description="")
|
|
70
82
|
description: Optional[str] = Field(None, description="")
|
|
71
83
|
manager_config: Optional[ManagerConfigUnion] = Field(None, description="")
|
|
84
|
+
shared_block_ids: Optional[List[str]] = Field(None, description="")
|
letta/schemas/letta_message.py
CHANGED
|
@@ -26,11 +26,13 @@ class LettaMessage(BaseModel):
|
|
|
26
26
|
id (str): The ID of the message
|
|
27
27
|
date (datetime): The date the message was created in ISO format
|
|
28
28
|
name (Optional[str]): The name of the sender of the message
|
|
29
|
+
otid (Optional[str]): The offline threading id associated with this message
|
|
29
30
|
"""
|
|
30
31
|
|
|
31
32
|
id: str
|
|
32
33
|
date: datetime
|
|
33
34
|
name: Optional[str] = None
|
|
35
|
+
otid: Optional[str] = None
|
|
34
36
|
|
|
35
37
|
@field_serializer("date")
|
|
36
38
|
def serialize_datetime(self, dt: datetime, _info):
|
|
@@ -123,9 +125,9 @@ class ToolCall(BaseModel):
|
|
|
123
125
|
|
|
124
126
|
|
|
125
127
|
class ToolCallDelta(BaseModel):
|
|
126
|
-
name: Optional[str]
|
|
127
|
-
arguments: Optional[str]
|
|
128
|
-
tool_call_id: Optional[str]
|
|
128
|
+
name: Optional[str] = None
|
|
129
|
+
arguments: Optional[str] = None
|
|
130
|
+
tool_call_id: Optional[str] = None
|
|
129
131
|
|
|
130
132
|
def model_dump(self, *args, **kwargs):
|
|
131
133
|
"""
|
|
@@ -0,0 +1,53 @@
|
|
|
1
|
+
from datetime import datetime
|
|
2
|
+
from typing import Optional, Union
|
|
3
|
+
|
|
4
|
+
from anthropic.types.beta.messages import BetaMessageBatch, BetaMessageBatchIndividualResponse
|
|
5
|
+
from pydantic import Field
|
|
6
|
+
|
|
7
|
+
from letta.schemas.agent import AgentStepState
|
|
8
|
+
from letta.schemas.enums import AgentStepStatus, JobStatus, ProviderType
|
|
9
|
+
from letta.schemas.letta_base import OrmMetadataBase
|
|
10
|
+
from letta.schemas.llm_config import LLMConfig
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
class LLMBatchItem(OrmMetadataBase, validate_assignment=True):
|
|
14
|
+
"""
|
|
15
|
+
Represents a single agent's LLM request within a batch.
|
|
16
|
+
|
|
17
|
+
This object captures the configuration, execution status, and eventual result of one agent's request within a larger LLM batch job.
|
|
18
|
+
"""
|
|
19
|
+
|
|
20
|
+
__id_prefix__ = "batch_item"
|
|
21
|
+
|
|
22
|
+
id: str = Field(..., description="The id of the batch item. Assigned by the database.")
|
|
23
|
+
batch_id: str = Field(..., description="The id of the parent LLM batch job this item belongs to.")
|
|
24
|
+
agent_id: str = Field(..., description="The id of the agent associated with this LLM request.")
|
|
25
|
+
|
|
26
|
+
llm_config: LLMConfig = Field(..., description="The LLM configuration used for this request.")
|
|
27
|
+
request_status: JobStatus = Field(..., description="The current status of the batch item request (e.g., PENDING, DONE, ERROR).")
|
|
28
|
+
step_status: AgentStepStatus = Field(..., description="The current execution status of the agent step.")
|
|
29
|
+
step_state: AgentStepState = Field(..., description="The serialized state for resuming execution at a later point.")
|
|
30
|
+
|
|
31
|
+
batch_request_result: Optional[Union[BetaMessageBatchIndividualResponse]] = Field(
|
|
32
|
+
None, description="The raw response received from the LLM provider for this item."
|
|
33
|
+
)
|
|
34
|
+
|
|
35
|
+
|
|
36
|
+
class LLMBatchJob(OrmMetadataBase, validate_assignment=True):
|
|
37
|
+
"""
|
|
38
|
+
Represents a single LLM batch request made to a provider like Anthropic.
|
|
39
|
+
|
|
40
|
+
Each job corresponds to one API call that sends multiple messages to the LLM provider, and aggregates responses across all agent submissions.
|
|
41
|
+
"""
|
|
42
|
+
|
|
43
|
+
__id_prefix__ = "batch_req"
|
|
44
|
+
|
|
45
|
+
id: str = Field(..., description="The id of the batch job. Assigned by the database.")
|
|
46
|
+
status: JobStatus = Field(..., description="The current status of the batch (e.g., created, in_progress, done).")
|
|
47
|
+
llm_provider: ProviderType = Field(..., description="The LLM provider used for the batch (e.g., anthropic, openai).")
|
|
48
|
+
|
|
49
|
+
create_batch_response: Union[BetaMessageBatch] = Field(..., description="The full JSON response from the initial batch creation.")
|
|
50
|
+
latest_polling_response: Optional[Union[BetaMessageBatch]] = Field(
|
|
51
|
+
None, description="The most recent polling response received from the LLM provider."
|
|
52
|
+
)
|
|
53
|
+
last_polled_at: Optional[datetime] = Field(None, description="The timestamp of the last polling check for the batch status.")
|
letta/schemas/llm_config.py
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
from typing import Literal, Optional
|
|
2
2
|
|
|
3
|
-
from pydantic import BaseModel, ConfigDict, Field,
|
|
3
|
+
from pydantic import BaseModel, ConfigDict, Field, model_validator
|
|
4
4
|
|
|
5
5
|
|
|
6
6
|
class LLMConfig(BaseModel):
|
|
@@ -70,7 +70,8 @@ class LLMConfig(BaseModel):
|
|
|
70
70
|
# FIXME hack to silence pydantic protected namespace warning
|
|
71
71
|
model_config = ConfigDict(protected_namespaces=())
|
|
72
72
|
|
|
73
|
-
@
|
|
73
|
+
@model_validator(mode="before")
|
|
74
|
+
@classmethod
|
|
74
75
|
def set_default_put_inner_thoughts(cls, values):
|
|
75
76
|
"""
|
|
76
77
|
Dynamically set the default for put_inner_thoughts_in_kwargs based on the model field,
|
|
@@ -79,15 +80,24 @@ class LLMConfig(BaseModel):
|
|
|
79
80
|
model = values.get("model")
|
|
80
81
|
|
|
81
82
|
# Define models where we want put_inner_thoughts_in_kwargs to be False
|
|
82
|
-
# For now it is gpt-4
|
|
83
83
|
avoid_put_inner_thoughts_in_kwargs = ["gpt-4"]
|
|
84
84
|
|
|
85
|
-
# Only modify the value if it's None or not provided
|
|
86
85
|
if values.get("put_inner_thoughts_in_kwargs") is None:
|
|
87
86
|
values["put_inner_thoughts_in_kwargs"] = False if model in avoid_put_inner_thoughts_in_kwargs else True
|
|
88
87
|
|
|
89
88
|
return values
|
|
90
89
|
|
|
90
|
+
@model_validator(mode="after")
|
|
91
|
+
def validate_reasoning_constraints(self) -> "LLMConfig":
|
|
92
|
+
if self.enable_reasoner:
|
|
93
|
+
if self.max_reasoning_tokens is None:
|
|
94
|
+
raise ValueError("max_reasoning_tokens must be set when enable_reasoner is True")
|
|
95
|
+
if self.max_tokens is not None and self.max_reasoning_tokens >= self.max_tokens:
|
|
96
|
+
raise ValueError("max_tokens must be greater than max_reasoning_tokens (thinking budget)")
|
|
97
|
+
if self.put_inner_thoughts_in_kwargs:
|
|
98
|
+
raise ValueError("Extended thinking is not compatible with put_inner_thoughts_in_kwargs")
|
|
99
|
+
return self
|
|
100
|
+
|
|
91
101
|
@classmethod
|
|
92
102
|
def default_config(cls, model_name: str):
|
|
93
103
|
"""
|