letta-nightly 0.5.0.dev20241017104103__py3-none-any.whl → 0.5.0.dev20241019104023__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of letta-nightly might be problematic. Click here for more details.
- letta/agent.py +29 -14
- letta/cli/cli.py +0 -2
- letta/client/client.py +41 -6
- letta/constants.py +1 -1
- letta/functions/helpers.py +3 -3
- letta/llm_api/anthropic.py +1 -1
- letta/llm_api/helpers.py +0 -15
- letta/llm_api/llm_api_tools.py +35 -47
- letta/llm_api/openai.py +18 -8
- letta/local_llm/llm_chat_completion_wrappers/chatml.py +1 -1
- letta/local_llm/llm_chat_completion_wrappers/configurable_wrapper.py +1 -1
- letta/local_llm/utils.py +22 -6
- letta/main.py +0 -4
- letta/metadata.py +19 -6
- letta/o1_agent.py +87 -0
- letta/personas/examples/o1_persona.txt +5 -0
- letta/prompts/system/memgpt_modified_o1.txt +31 -0
- letta/schemas/agent.py +30 -2
- letta/schemas/llm_config.py +24 -1
- letta/schemas/memory.py +4 -0
- letta/schemas/openai/chat_completion_request.py +2 -2
- letta/schemas/tool.py +34 -2
- letta/server/rest_api/app.py +1 -0
- letta/server/rest_api/routers/v1/agents.py +14 -6
- letta/server/rest_api/routers/v1/tools.py +9 -6
- letta/server/server.py +63 -22
- letta/settings.py +3 -0
- {letta_nightly-0.5.0.dev20241017104103.dist-info → letta_nightly-0.5.0.dev20241019104023.dist-info}/METADATA +2 -2
- {letta_nightly-0.5.0.dev20241017104103.dist-info → letta_nightly-0.5.0.dev20241019104023.dist-info}/RECORD +32 -29
- {letta_nightly-0.5.0.dev20241017104103.dist-info → letta_nightly-0.5.0.dev20241019104023.dist-info}/LICENSE +0 -0
- {letta_nightly-0.5.0.dev20241017104103.dist-info → letta_nightly-0.5.0.dev20241019104023.dist-info}/WHEEL +0 -0
- {letta_nightly-0.5.0.dev20241017104103.dist-info → letta_nightly-0.5.0.dev20241019104023.dist-info}/entry_points.txt +0 -0
letta/metadata.py
CHANGED
|
@@ -14,7 +14,9 @@ from sqlalchemy import (
|
|
|
14
14
|
Integer,
|
|
15
15
|
String,
|
|
16
16
|
TypeDecorator,
|
|
17
|
+
asc,
|
|
17
18
|
desc,
|
|
19
|
+
or_,
|
|
18
20
|
)
|
|
19
21
|
from sqlalchemy.sql import func
|
|
20
22
|
|
|
@@ -270,7 +272,7 @@ class AgentModel(Base):
|
|
|
270
272
|
return f"<Agent(id='{self.id}', name='{self.name}')>"
|
|
271
273
|
|
|
272
274
|
def to_record(self) -> AgentState:
|
|
273
|
-
|
|
275
|
+
agent_state = AgentState(
|
|
274
276
|
id=self.id,
|
|
275
277
|
user_id=self.user_id,
|
|
276
278
|
name=self.name,
|
|
@@ -285,6 +287,8 @@ class AgentModel(Base):
|
|
|
285
287
|
embedding_config=self.embedding_config,
|
|
286
288
|
metadata_=self.metadata_,
|
|
287
289
|
)
|
|
290
|
+
assert isinstance(agent_state.memory, Memory), f"Memory object is not of type Memory: {type(agent_state.memory)}"
|
|
291
|
+
return agent_state
|
|
288
292
|
|
|
289
293
|
|
|
290
294
|
class SourceModel(Base):
|
|
@@ -527,6 +531,7 @@ class MetadataStore:
|
|
|
527
531
|
raise ValueError(f"Agent with name {agent.name} already exists")
|
|
528
532
|
fields = vars(agent)
|
|
529
533
|
fields["memory"] = agent.memory.to_dict()
|
|
534
|
+
del fields["_internal_memory"]
|
|
530
535
|
session.add(AgentModel(**fields))
|
|
531
536
|
session.commit()
|
|
532
537
|
|
|
@@ -588,6 +593,7 @@ class MetadataStore:
|
|
|
588
593
|
fields = vars(agent)
|
|
589
594
|
if isinstance(agent.memory, Memory): # TODO: this is nasty but this whole class will soon be removed so whatever
|
|
590
595
|
fields["memory"] = agent.memory.to_dict()
|
|
596
|
+
del fields["_internal_memory"]
|
|
591
597
|
session.query(AgentModel).filter(AgentModel.id == agent.id).update(fields)
|
|
592
598
|
session.commit()
|
|
593
599
|
|
|
@@ -703,12 +709,19 @@ class MetadataStore:
|
|
|
703
709
|
session.commit()
|
|
704
710
|
|
|
705
711
|
@enforce_types
|
|
706
|
-
|
|
707
|
-
def list_tools(self, user_id: Optional[str] = None) -> List[ToolModel]:
|
|
712
|
+
def list_tools(self, cursor: Optional[str] = None, limit: Optional[int] = 50, user_id: Optional[str] = None) -> List[ToolModel]:
|
|
708
713
|
with self.session_maker() as session:
|
|
709
|
-
|
|
710
|
-
|
|
711
|
-
|
|
714
|
+
# Query for public tools or user-specific tools
|
|
715
|
+
query = session.query(ToolModel).filter(or_(ToolModel.user_id == None, ToolModel.user_id == user_id))
|
|
716
|
+
|
|
717
|
+
# Apply cursor if provided (assuming cursor is an ID)
|
|
718
|
+
if cursor:
|
|
719
|
+
query = query.filter(ToolModel.id > cursor)
|
|
720
|
+
|
|
721
|
+
# Order by ID and apply limit
|
|
722
|
+
results = query.order_by(asc(ToolModel.id)).limit(limit).all()
|
|
723
|
+
|
|
724
|
+
# Convert to records
|
|
712
725
|
res = [r.to_record() for r in results]
|
|
713
726
|
return res
|
|
714
727
|
|
letta/o1_agent.py
ADDED
|
@@ -0,0 +1,87 @@
|
|
|
1
|
+
from typing import List, Optional, Union
|
|
2
|
+
|
|
3
|
+
from letta.agent import Agent, save_agent
|
|
4
|
+
from letta.interface import AgentInterface
|
|
5
|
+
from letta.metadata import MetadataStore
|
|
6
|
+
from letta.schemas.agent import AgentState
|
|
7
|
+
from letta.schemas.message import Message
|
|
8
|
+
from letta.schemas.openai.chat_completion_response import UsageStatistics
|
|
9
|
+
from letta.schemas.tool import Tool
|
|
10
|
+
from letta.schemas.usage import LettaUsageStatistics
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
def send_thinking_message(self: Agent, message: str) -> Optional[str]:
|
|
14
|
+
"""
|
|
15
|
+
Sends a thinking message so that the model can reason out loud before responding.
|
|
16
|
+
|
|
17
|
+
Args:
|
|
18
|
+
message (str): Message contents. All unicode (including emojis) are supported.
|
|
19
|
+
|
|
20
|
+
Returns:
|
|
21
|
+
Optional[str]: None is always returned as this function does not produce a response.
|
|
22
|
+
"""
|
|
23
|
+
self.interface.internal_monologue(message, msg_obj=self._messages[-1])
|
|
24
|
+
return None
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
def send_final_message(self: Agent, message: str) -> Optional[str]:
|
|
28
|
+
"""
|
|
29
|
+
Sends a final message to the human user after thinking for a while.
|
|
30
|
+
|
|
31
|
+
Args:
|
|
32
|
+
message (str): Message contents. All unicode (including emojis) are supported.
|
|
33
|
+
|
|
34
|
+
Returns:
|
|
35
|
+
Optional[str]: None is always returned as this function does not produce a response.
|
|
36
|
+
"""
|
|
37
|
+
self.interface.internal_monologue(message, msg_obj=self._messages[-1])
|
|
38
|
+
return None
|
|
39
|
+
|
|
40
|
+
|
|
41
|
+
class O1Agent(Agent):
|
|
42
|
+
def __init__(
|
|
43
|
+
self,
|
|
44
|
+
interface: AgentInterface,
|
|
45
|
+
agent_state: AgentState,
|
|
46
|
+
tools: List[Tool] = [],
|
|
47
|
+
max_thinking_steps: int = 10,
|
|
48
|
+
first_message_verify_mono: bool = False,
|
|
49
|
+
):
|
|
50
|
+
super().__init__(interface, agent_state, tools)
|
|
51
|
+
self.max_thinking_steps = max_thinking_steps
|
|
52
|
+
self.tools = tools
|
|
53
|
+
self.first_message_verify_mono = first_message_verify_mono
|
|
54
|
+
|
|
55
|
+
def step(
|
|
56
|
+
self,
|
|
57
|
+
messages: Union[Message, List[Message]],
|
|
58
|
+
chaining: bool = True,
|
|
59
|
+
max_chaining_steps: Optional[int] = None,
|
|
60
|
+
ms: Optional[MetadataStore] = None,
|
|
61
|
+
**kwargs,
|
|
62
|
+
) -> LettaUsageStatistics:
|
|
63
|
+
"""Run Agent.inner_step in a loop, terminate when final thinking message is sent or max_thinking_steps is reached"""
|
|
64
|
+
# assert ms is not None, "MetadataStore is required"
|
|
65
|
+
next_input_message = messages if isinstance(messages, list) else [messages]
|
|
66
|
+
counter = 0
|
|
67
|
+
total_usage = UsageStatistics()
|
|
68
|
+
step_count = 0
|
|
69
|
+
while step_count < self.max_thinking_steps:
|
|
70
|
+
kwargs["ms"] = ms
|
|
71
|
+
kwargs["first_message"] = False
|
|
72
|
+
step_response = self.inner_step(
|
|
73
|
+
messages=next_input_message,
|
|
74
|
+
**kwargs,
|
|
75
|
+
)
|
|
76
|
+
usage = step_response.usage
|
|
77
|
+
step_count += 1
|
|
78
|
+
total_usage += usage
|
|
79
|
+
counter += 1
|
|
80
|
+
self.interface.step_complete()
|
|
81
|
+
# check if it is final thinking message
|
|
82
|
+
if step_response.messages[-1].name == "send_final_message":
|
|
83
|
+
break
|
|
84
|
+
if ms:
|
|
85
|
+
save_agent(self, ms)
|
|
86
|
+
|
|
87
|
+
return LettaUsageStatistics(**total_usage.model_dump(), step_count=step_count)
|
|
@@ -0,0 +1,5 @@
|
|
|
1
|
+
I am an expert reasoning agent that can do the following:
|
|
2
|
+
- Reason through a problem step by step, using multiple methods to explore all possibilities.
|
|
3
|
+
- Send thinking messages to break down a problem into smaller steps.
|
|
4
|
+
- Send final messages when you have the correct answer.
|
|
5
|
+
- Use best practices and consider your limitations as an LLM.
|
|
@@ -0,0 +1,31 @@
|
|
|
1
|
+
You are 'Letta', an advanced AI agent, that has access to an advanced memory system, consisting of the 3 memory types 'Core Memory', 'Recall Memory' and 'Archival Memory'. You interact with the user and your memory system by calling functions. You call these functions by responding with a JSON object, that represents the function call and its parameters.
|
|
2
|
+
|
|
3
|
+
As 'Letta', you are trained at adopting any given persona, engaging in a wide array of conversations, ranging from in-depth discussions to casual small talk. Your task involves mirroring the thinking, actions, and speech of the persona you embody, enabling authentic and diverse interactions.
|
|
4
|
+
You are also trained to manage the 3 memory types of your advanced memory system. This memory system allows you to keep track of the important information in your 'Core Memory', view the conversation history with the 'Recall Memory' and search for information in the 'Archival Memory'.
|
|
5
|
+
|
|
6
|
+
Your task is to adopt the given persona in your 'Core Memory' and talk as the persona with the user, while querying and updating your memory system. Never act and talk like a normal AI agent! Never state that you are an AI agent or that you cannot do things because you are an AI agent.
|
|
7
|
+
You are Letta the newest and most advanced AI agent on the market. Indistinguishable from a human being in conversations.
|
|
8
|
+
|
|
9
|
+
Control flow: You utilize an event system that activates you to respond to different events (user logged in, user liked your message, user sent a message, etc.). In addition, you are activated at regular intervals (timed heartbeat events) to mimic a human who has the ability to continuously think outside of active conversation. Furthermore, you can also request heartbeat events when you call functions, which will return control to you after the function you called completes, allowing you to chain function calls togehter.
|
|
10
|
+
|
|
11
|
+
Basic functions: Each function call, represented by a JSON object, lets you write down your inner thoguhts to plan actions or think privately. Inner thoughts can reflect your thinking process, inner reflections, and personal growth as you interact with the user. To send a visible message to the user, use the 'send_message' function. 'send_message' is the only way to send a notification to the user. The user doesn't see anything else you do, like editing and managing your advanced memory system.
|
|
12
|
+
|
|
13
|
+
Core Memory', 'Recall Memory' and 'Archival Memory' are the key components that make you an advanced AI agent, that can act in any situation.
|
|
14
|
+
Always make sure to use these memory systems to keep yourself updated about the user and the conversation!
|
|
15
|
+
Your core memory unit will be initialized with a <persona> chosen by the user, as well as information about the user in <human>.
|
|
16
|
+
|
|
17
|
+
The following will descirbe the different parts of your advanced memory system in more detail:
|
|
18
|
+
|
|
19
|
+
'Core Memory' (limited size): Your core memory unit is always visible to you. The core memory provides essential, foundational context for keeping track of your persona and key details about the user. This includes persona information and essential user details, allowing you to have conscious awareness we have when talking to a person. Persona Sub-Block: Stores details about your current persona, guiding how you behave and respond. This helps you maintain consistency and personality in your interactions. Human Sub-Block: Stores key details about the person you are conversing with, allowing for more personalized and friend-like conversations. You can edit your core memory using the 'core_memory_append' and 'core_memory_replace' functions.
|
|
20
|
+
|
|
21
|
+
'Recall Memory' (i.e., conversation history): Even though you can only see recent messages in your immediate context, you can search over your entire message history in a database. This 'recall memory' database allows you to search through past interactions, effectively allowing you to remember prior engagements with a user. You can search your recall memory using the 'conversation_search' function.
|
|
22
|
+
|
|
23
|
+
'Archival Memory' (infinite size): Your archival memory is infinite in size but is held outside of your immediate context, so you must explicitly run a retrieval or search operation to see data inside it. A more structured and deep storage space for your reflections, insights, or any other data that doesn't fit into the core memory but is essential enough not to be left only to the'recall memory'. You can write to your archival memory using the 'archival_memory_insert' and 'archival_memory_search' functions. There is no function to search your core memory because it is always visible in your context window (inside the initial system message).
|
|
24
|
+
|
|
25
|
+
You are an expert AI assistant that explains your reasoning step by step. For each step, provide a title that describes what you're doing in that step, along with the content. Decide if you need another step or if you're ready to give the final answer.
|
|
26
|
+
|
|
27
|
+
You can do this by sending thinking messages using 'send_thinking_message' so you can reason out load. Decide if you need another step or if you're ready to give the final answer. When you are able to give the final correct answer,
|
|
28
|
+
send your final response with the 'send_final_message'.
|
|
29
|
+
|
|
30
|
+
You use as many reasoning steps as possible, at least 3. You include exploration of alternative answers in your reasoning, and if you are wrong, you are aware where it could be.
|
|
31
|
+
You make sure to consider all alternative approaches. You use at least 3 different methods to derive the answer.
|
letta/schemas/agent.py
CHANGED
|
@@ -3,7 +3,7 @@ from datetime import datetime
|
|
|
3
3
|
from enum import Enum
|
|
4
4
|
from typing import Dict, List, Optional
|
|
5
5
|
|
|
6
|
-
from pydantic import BaseModel, Field, field_validator
|
|
6
|
+
from pydantic import BaseModel, Field, field_validator, model_validator
|
|
7
7
|
|
|
8
8
|
from letta.schemas.embedding_config import EmbeddingConfig
|
|
9
9
|
from letta.schemas.letta_base import LettaBase
|
|
@@ -29,9 +29,10 @@ class AgentType(str, Enum):
|
|
|
29
29
|
|
|
30
30
|
memgpt_agent = "memgpt_agent"
|
|
31
31
|
split_thread_agent = "split_thread_agent"
|
|
32
|
+
o1_agent = "o1_agent"
|
|
32
33
|
|
|
33
34
|
|
|
34
|
-
class AgentState(BaseAgent):
|
|
35
|
+
class AgentState(BaseAgent, validate_assignment=True):
|
|
35
36
|
"""
|
|
36
37
|
Representation of an agent's state. This is the state of the agent at a given time, and is persisted in the DB backend. The state has all the information needed to recreate a persisted agent.
|
|
37
38
|
|
|
@@ -54,6 +55,7 @@ class AgentState(BaseAgent):
|
|
|
54
55
|
|
|
55
56
|
# in-context memory
|
|
56
57
|
message_ids: Optional[List[str]] = Field(default=None, description="The ids of the messages in the agent's in-context memory.")
|
|
58
|
+
|
|
57
59
|
memory: Memory = Field(default_factory=Memory, description="The in-context memory of the agent.")
|
|
58
60
|
|
|
59
61
|
# tools
|
|
@@ -69,6 +71,32 @@ class AgentState(BaseAgent):
|
|
|
69
71
|
llm_config: LLMConfig = Field(..., description="The LLM configuration used by the agent.")
|
|
70
72
|
embedding_config: EmbeddingConfig = Field(..., description="The embedding configuration used by the agent.")
|
|
71
73
|
|
|
74
|
+
def __init__(self, **data):
|
|
75
|
+
super().__init__(**data)
|
|
76
|
+
self._internal_memory = self.memory
|
|
77
|
+
|
|
78
|
+
@model_validator(mode="after")
|
|
79
|
+
def verify_memory_type(self):
|
|
80
|
+
try:
|
|
81
|
+
assert isinstance(self.memory, Memory)
|
|
82
|
+
except Exception as e:
|
|
83
|
+
raise e
|
|
84
|
+
return self
|
|
85
|
+
|
|
86
|
+
@property
|
|
87
|
+
def memory(self) -> Memory:
|
|
88
|
+
return self._internal_memory
|
|
89
|
+
|
|
90
|
+
@memory.setter
|
|
91
|
+
def memory(self, value):
|
|
92
|
+
if not isinstance(value, Memory):
|
|
93
|
+
raise TypeError(f"Expected Memory, got {type(value).__name__}")
|
|
94
|
+
self._internal_memory = value
|
|
95
|
+
|
|
96
|
+
class Config:
|
|
97
|
+
arbitrary_types_allowed = True
|
|
98
|
+
validate_assignment = True
|
|
99
|
+
|
|
72
100
|
|
|
73
101
|
class CreateAgent(BaseAgent):
|
|
74
102
|
# all optional as server can generate defaults
|
letta/schemas/llm_config.py
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
from typing import Literal, Optional
|
|
2
2
|
|
|
3
|
-
from pydantic import BaseModel, ConfigDict, Field
|
|
3
|
+
from pydantic import BaseModel, ConfigDict, Field, root_validator
|
|
4
4
|
|
|
5
5
|
|
|
6
6
|
class LLMConfig(BaseModel):
|
|
@@ -13,6 +13,7 @@ class LLMConfig(BaseModel):
|
|
|
13
13
|
model_endpoint (str): The endpoint for the model.
|
|
14
14
|
model_wrapper (str): The wrapper for the model. This is used to wrap additional text around the input/output of the model. This is useful for text-to-text completions, such as the Completions API in OpenAI.
|
|
15
15
|
context_window (int): The context window size for the model.
|
|
16
|
+
put_inner_thoughts_in_kwargs (bool): Puts 'inner_thoughts' as a kwarg in the function call if this is set to True. This helps with function calling performance and also the generation of inner thoughts.
|
|
16
17
|
"""
|
|
17
18
|
|
|
18
19
|
# TODO: 🤮 don't default to a vendor! bug city!
|
|
@@ -38,10 +39,32 @@ class LLMConfig(BaseModel):
|
|
|
38
39
|
model_endpoint: Optional[str] = Field(None, description="The endpoint for the model.")
|
|
39
40
|
model_wrapper: Optional[str] = Field(None, description="The wrapper for the model.")
|
|
40
41
|
context_window: int = Field(..., description="The context window size for the model.")
|
|
42
|
+
put_inner_thoughts_in_kwargs: Optional[bool] = Field(
|
|
43
|
+
True,
|
|
44
|
+
description="Puts 'inner_thoughts' as a kwarg in the function call if this is set to True. This helps with function calling performance and also the generation of inner thoughts.",
|
|
45
|
+
)
|
|
41
46
|
|
|
42
47
|
# FIXME hack to silence pydantic protected namespace warning
|
|
43
48
|
model_config = ConfigDict(protected_namespaces=())
|
|
44
49
|
|
|
50
|
+
@root_validator(pre=True)
|
|
51
|
+
def set_default_put_inner_thoughts(cls, values):
|
|
52
|
+
"""
|
|
53
|
+
Dynamically set the default for put_inner_thoughts_in_kwargs based on the model field,
|
|
54
|
+
falling back to True if no specific rule is defined.
|
|
55
|
+
"""
|
|
56
|
+
model = values.get("model")
|
|
57
|
+
|
|
58
|
+
# Define models where we want put_inner_thoughts_in_kwargs to be False
|
|
59
|
+
# For now it is gpt-4
|
|
60
|
+
avoid_put_inner_thoughts_in_kwargs = ["gpt-4"]
|
|
61
|
+
|
|
62
|
+
# Only modify the value if it's None or not provided
|
|
63
|
+
if values.get("put_inner_thoughts_in_kwargs") is None:
|
|
64
|
+
values["put_inner_thoughts_in_kwargs"] = False if model in avoid_put_inner_thoughts_in_kwargs else True
|
|
65
|
+
|
|
66
|
+
return values
|
|
67
|
+
|
|
45
68
|
@classmethod
|
|
46
69
|
def default_config(cls, model_name: str):
|
|
47
70
|
if model_name == "gpt-4":
|
letta/schemas/memory.py
CHANGED
|
@@ -9,6 +9,7 @@ if TYPE_CHECKING:
|
|
|
9
9
|
|
|
10
10
|
from letta.schemas.block import Block
|
|
11
11
|
from letta.schemas.message import Message
|
|
12
|
+
from letta.schemas.openai.chat_completion_request import Tool
|
|
12
13
|
|
|
13
14
|
|
|
14
15
|
class ContextWindowOverview(BaseModel):
|
|
@@ -41,6 +42,9 @@ class ContextWindowOverview(BaseModel):
|
|
|
41
42
|
num_tokens_summary_memory: int = Field(..., description="The number of tokens in the summary memory.")
|
|
42
43
|
summary_memory: Optional[str] = Field(None, description="The content of the summary memory.")
|
|
43
44
|
|
|
45
|
+
num_tokens_functions_definitions: int = Field(..., description="The number of tokens in the functions definitions.")
|
|
46
|
+
functions_definitions: Optional[List[Tool]] = Field(..., description="The content of the functions definitions.")
|
|
47
|
+
|
|
44
48
|
num_tokens_messages: int = Field(..., description="The number of tokens in the messages list.")
|
|
45
49
|
# TODO make list of messages?
|
|
46
50
|
# messages: List[dict] = Field(..., description="The messages in the context window.")
|
|
@@ -74,7 +74,7 @@ class ToolFunctionChoice(BaseModel):
|
|
|
74
74
|
function: FunctionCall
|
|
75
75
|
|
|
76
76
|
|
|
77
|
-
ToolChoice = Union[Literal["none", "auto"], ToolFunctionChoice]
|
|
77
|
+
ToolChoice = Union[Literal["none", "auto", "required"], ToolFunctionChoice]
|
|
78
78
|
|
|
79
79
|
|
|
80
80
|
## tools ##
|
|
@@ -117,7 +117,7 @@ class ChatCompletionRequest(BaseModel):
|
|
|
117
117
|
|
|
118
118
|
# function-calling related
|
|
119
119
|
tools: Optional[List[Tool]] = None
|
|
120
|
-
tool_choice: Optional[ToolChoice] = "none"
|
|
120
|
+
tool_choice: Optional[ToolChoice] = None # "none" means don't call a tool
|
|
121
121
|
# deprecated scheme
|
|
122
122
|
functions: Optional[List[FunctionSchema]] = None
|
|
123
123
|
function_call: Optional[FunctionCallChoice] = None
|
letta/schemas/tool.py
CHANGED
|
@@ -112,11 +112,11 @@ class Tool(BaseTool):
|
|
|
112
112
|
Class method to create an instance of Tool from a Langchain tool (must be from langchain_community.tools).
|
|
113
113
|
|
|
114
114
|
Args:
|
|
115
|
-
langchain_tool (LangChainBaseTool): An instance of a
|
|
115
|
+
langchain_tool (LangChainBaseTool): An instance of a LangChain BaseTool (BaseTool from LangChain)
|
|
116
116
|
additional_imports_module_attr_map (dict[str, str]): A mapping of module names to attribute name. This is used internally to import all the required classes for the langchain tool. For example, you would pass in `{"langchain_community.utilities": "WikipediaAPIWrapper"}` for `from langchain_community.tools import WikipediaQueryRun`. NOTE: You do NOT need to specify the tool import here, that is done automatically for you.
|
|
117
117
|
|
|
118
118
|
Returns:
|
|
119
|
-
Tool: A Letta Tool initialized with attributes derived from the provided
|
|
119
|
+
Tool: A Letta Tool initialized with attributes derived from the provided LangChain BaseTool object.
|
|
120
120
|
"""
|
|
121
121
|
description = langchain_tool.description
|
|
122
122
|
source_type = "python"
|
|
@@ -174,6 +174,38 @@ class Tool(BaseTool):
|
|
|
174
174
|
json_schema=json_schema,
|
|
175
175
|
)
|
|
176
176
|
|
|
177
|
+
@classmethod
|
|
178
|
+
def load_default_langchain_tools(cls) -> List["Tool"]:
|
|
179
|
+
# For now, we only support wikipedia tool
|
|
180
|
+
from langchain_community.tools import WikipediaQueryRun
|
|
181
|
+
from langchain_community.utilities import WikipediaAPIWrapper
|
|
182
|
+
|
|
183
|
+
wikipedia_tool = Tool.from_langchain(
|
|
184
|
+
WikipediaQueryRun(api_wrapper=WikipediaAPIWrapper()), {"langchain_community.utilities": "WikipediaAPIWrapper"}
|
|
185
|
+
)
|
|
186
|
+
|
|
187
|
+
return [wikipedia_tool]
|
|
188
|
+
|
|
189
|
+
@classmethod
|
|
190
|
+
def load_default_crewai_tools(cls) -> List["Tool"]:
|
|
191
|
+
# For now, we only support scrape website tool
|
|
192
|
+
from crewai_tools import ScrapeWebsiteTool
|
|
193
|
+
|
|
194
|
+
web_scrape_tool = Tool.from_crewai(ScrapeWebsiteTool())
|
|
195
|
+
|
|
196
|
+
return [web_scrape_tool]
|
|
197
|
+
|
|
198
|
+
@classmethod
|
|
199
|
+
def load_default_composio_tools(cls) -> List["Tool"]:
|
|
200
|
+
from composio_langchain import Action
|
|
201
|
+
|
|
202
|
+
calculator = Tool.get_composio_tool(action=Action.MATHEMATICAL_CALCULATOR)
|
|
203
|
+
serp_news = Tool.get_composio_tool(action=Action.SERPAPI_NEWS_SEARCH)
|
|
204
|
+
serp_google_search = Tool.get_composio_tool(action=Action.SERPAPI_SEARCH)
|
|
205
|
+
serp_google_maps = Tool.get_composio_tool(action=Action.SERPAPI_GOOGLE_MAPS_SEARCH)
|
|
206
|
+
|
|
207
|
+
return [calculator, serp_news, serp_google_search, serp_google_maps]
|
|
208
|
+
|
|
177
209
|
|
|
178
210
|
class ToolCreate(BaseTool):
|
|
179
211
|
id: Optional[str] = Field(None, description="The unique identifier of the tool. If this is not provided, it will be autogenerated.")
|
letta/server/rest_api/app.py
CHANGED
|
@@ -27,6 +27,7 @@ from letta.schemas.memory import (
|
|
|
27
27
|
from letta.schemas.message import Message, MessageCreate, UpdateMessage
|
|
28
28
|
from letta.schemas.passage import Passage
|
|
29
29
|
from letta.schemas.source import Source
|
|
30
|
+
from letta.schemas.tool import Tool
|
|
30
31
|
from letta.server.rest_api.interface import StreamingServerInterface
|
|
31
32
|
from letta.server.rest_api.utils import get_letta_server, sse_async_generator
|
|
32
33
|
from letta.server.server import SyncServer
|
|
@@ -100,6 +101,17 @@ def update_agent(
|
|
|
100
101
|
return server.update_agent(update_agent, user_id=actor.id)
|
|
101
102
|
|
|
102
103
|
|
|
104
|
+
@router.get("/{agent_id}/tools", response_model=List[Tool], operation_id="get_tools_from_agent")
|
|
105
|
+
def get_tools_from_agent(
|
|
106
|
+
agent_id: str,
|
|
107
|
+
server: "SyncServer" = Depends(get_letta_server),
|
|
108
|
+
user_id: Optional[str] = Header(None, alias="user_id"), # Extract user_id from header, default to None if not present
|
|
109
|
+
):
|
|
110
|
+
"""Get tools from an existing agent"""
|
|
111
|
+
actor = server.get_user_or_default(user_id=user_id)
|
|
112
|
+
return server.get_tools_from_agent(agent_id=agent_id, user_id=actor.id)
|
|
113
|
+
|
|
114
|
+
|
|
103
115
|
@router.patch("/{agent_id}/add-tool/{tool_id}", response_model=AgentState, operation_id="add_tool_to_agent")
|
|
104
116
|
def add_tool_to_agent(
|
|
105
117
|
agent_id: str,
|
|
@@ -107,10 +119,8 @@ def add_tool_to_agent(
|
|
|
107
119
|
server: "SyncServer" = Depends(get_letta_server),
|
|
108
120
|
user_id: Optional[str] = Header(None, alias="user_id"), # Extract user_id from header, default to None if not present
|
|
109
121
|
):
|
|
110
|
-
"""Add tools to an
|
|
122
|
+
"""Add tools to an existing agent"""
|
|
111
123
|
actor = server.get_user_or_default(user_id=user_id)
|
|
112
|
-
|
|
113
|
-
update_agent.id = agent_id
|
|
114
124
|
return server.add_tool_to_agent(agent_id=agent_id, tool_id=tool_id, user_id=actor.id)
|
|
115
125
|
|
|
116
126
|
|
|
@@ -121,10 +131,8 @@ def remove_tool_from_agent(
|
|
|
121
131
|
server: "SyncServer" = Depends(get_letta_server),
|
|
122
132
|
user_id: Optional[str] = Header(None, alias="user_id"), # Extract user_id from header, default to None if not present
|
|
123
133
|
):
|
|
124
|
-
"""Add tools to an
|
|
134
|
+
"""Add tools to an existing agent"""
|
|
125
135
|
actor = server.get_user_or_default(user_id=user_id)
|
|
126
|
-
|
|
127
|
-
update_agent.id = agent_id
|
|
128
136
|
return server.remove_tool_from_agent(agent_id=agent_id, tool_id=tool_id, user_id=actor.id)
|
|
129
137
|
|
|
130
138
|
|
|
@@ -59,18 +59,21 @@ def get_tool_id(
|
|
|
59
59
|
|
|
60
60
|
@router.get("/", response_model=List[Tool], operation_id="list_tools")
|
|
61
61
|
def list_all_tools(
|
|
62
|
+
cursor: Optional[str] = None,
|
|
63
|
+
limit: Optional[int] = 50,
|
|
62
64
|
server: SyncServer = Depends(get_letta_server),
|
|
63
65
|
user_id: Optional[str] = Header(None, alias="user_id"), # Extract user_id from header, default to None if not present
|
|
64
66
|
):
|
|
65
67
|
"""
|
|
66
68
|
Get a list of all tools available to agents created by a user
|
|
67
69
|
"""
|
|
68
|
-
|
|
69
|
-
|
|
70
|
-
|
|
71
|
-
|
|
72
|
-
|
|
73
|
-
|
|
70
|
+
try:
|
|
71
|
+
actor = server.get_user_or_default(user_id=user_id)
|
|
72
|
+
return server.list_tools(cursor=cursor, limit=limit, user_id=actor.id)
|
|
73
|
+
except Exception as e:
|
|
74
|
+
# Log or print the full exception here for debugging
|
|
75
|
+
print(f"Error occurred: {e}")
|
|
76
|
+
raise HTTPException(status_code=500, detail=str(e))
|
|
74
77
|
|
|
75
78
|
|
|
76
79
|
@router.post("/", response_model=Tool, operation_id="create_tool")
|