letta-nightly 0.1.7.dev20240924104148__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of letta-nightly might be problematic. Click here for more details.
- letta/__init__.py +24 -0
- letta/__main__.py +3 -0
- letta/agent.py +1427 -0
- letta/agent_store/chroma.py +295 -0
- letta/agent_store/db.py +546 -0
- letta/agent_store/lancedb.py +177 -0
- letta/agent_store/milvus.py +198 -0
- letta/agent_store/qdrant.py +201 -0
- letta/agent_store/storage.py +188 -0
- letta/benchmark/benchmark.py +96 -0
- letta/benchmark/constants.py +14 -0
- letta/cli/cli.py +689 -0
- letta/cli/cli_config.py +1282 -0
- letta/cli/cli_load.py +166 -0
- letta/client/__init__.py +0 -0
- letta/client/admin.py +171 -0
- letta/client/client.py +2360 -0
- letta/client/streaming.py +90 -0
- letta/client/utils.py +61 -0
- letta/config.py +484 -0
- letta/configs/anthropic.json +13 -0
- letta/configs/letta_hosted.json +11 -0
- letta/configs/openai.json +12 -0
- letta/constants.py +134 -0
- letta/credentials.py +140 -0
- letta/data_sources/connectors.py +247 -0
- letta/embeddings.py +218 -0
- letta/errors.py +26 -0
- letta/functions/__init__.py +0 -0
- letta/functions/function_sets/base.py +174 -0
- letta/functions/function_sets/extras.py +132 -0
- letta/functions/functions.py +105 -0
- letta/functions/schema_generator.py +205 -0
- letta/humans/__init__.py +0 -0
- letta/humans/examples/basic.txt +1 -0
- letta/humans/examples/cs_phd.txt +9 -0
- letta/interface.py +314 -0
- letta/llm_api/__init__.py +0 -0
- letta/llm_api/anthropic.py +383 -0
- letta/llm_api/azure_openai.py +155 -0
- letta/llm_api/cohere.py +396 -0
- letta/llm_api/google_ai.py +468 -0
- letta/llm_api/llm_api_tools.py +485 -0
- letta/llm_api/openai.py +470 -0
- letta/local_llm/README.md +3 -0
- letta/local_llm/__init__.py +0 -0
- letta/local_llm/chat_completion_proxy.py +279 -0
- letta/local_llm/constants.py +31 -0
- letta/local_llm/function_parser.py +68 -0
- letta/local_llm/grammars/__init__.py +0 -0
- letta/local_llm/grammars/gbnf_grammar_generator.py +1324 -0
- letta/local_llm/grammars/json.gbnf +26 -0
- letta/local_llm/grammars/json_func_calls_with_inner_thoughts.gbnf +32 -0
- letta/local_llm/groq/api.py +97 -0
- letta/local_llm/json_parser.py +202 -0
- letta/local_llm/koboldcpp/api.py +62 -0
- letta/local_llm/koboldcpp/settings.py +23 -0
- letta/local_llm/llamacpp/api.py +58 -0
- letta/local_llm/llamacpp/settings.py +22 -0
- letta/local_llm/llm_chat_completion_wrappers/__init__.py +0 -0
- letta/local_llm/llm_chat_completion_wrappers/airoboros.py +452 -0
- letta/local_llm/llm_chat_completion_wrappers/chatml.py +470 -0
- letta/local_llm/llm_chat_completion_wrappers/configurable_wrapper.py +387 -0
- letta/local_llm/llm_chat_completion_wrappers/dolphin.py +246 -0
- letta/local_llm/llm_chat_completion_wrappers/llama3.py +345 -0
- letta/local_llm/llm_chat_completion_wrappers/simple_summary_wrapper.py +156 -0
- letta/local_llm/llm_chat_completion_wrappers/wrapper_base.py +11 -0
- letta/local_llm/llm_chat_completion_wrappers/zephyr.py +345 -0
- letta/local_llm/lmstudio/api.py +100 -0
- letta/local_llm/lmstudio/settings.py +29 -0
- letta/local_llm/ollama/api.py +88 -0
- letta/local_llm/ollama/settings.py +32 -0
- letta/local_llm/settings/__init__.py +0 -0
- letta/local_llm/settings/deterministic_mirostat.py +45 -0
- letta/local_llm/settings/settings.py +72 -0
- letta/local_llm/settings/simple.py +28 -0
- letta/local_llm/utils.py +265 -0
- letta/local_llm/vllm/api.py +63 -0
- letta/local_llm/webui/api.py +60 -0
- letta/local_llm/webui/legacy_api.py +58 -0
- letta/local_llm/webui/legacy_settings.py +23 -0
- letta/local_llm/webui/settings.py +24 -0
- letta/log.py +76 -0
- letta/main.py +437 -0
- letta/memory.py +440 -0
- letta/metadata.py +884 -0
- letta/openai_backcompat/__init__.py +0 -0
- letta/openai_backcompat/openai_object.py +437 -0
- letta/persistence_manager.py +148 -0
- letta/personas/__init__.py +0 -0
- letta/personas/examples/anna_pa.txt +13 -0
- letta/personas/examples/google_search_persona.txt +15 -0
- letta/personas/examples/memgpt_doc.txt +6 -0
- letta/personas/examples/memgpt_starter.txt +4 -0
- letta/personas/examples/sam.txt +14 -0
- letta/personas/examples/sam_pov.txt +14 -0
- letta/personas/examples/sam_simple_pov_gpt35.txt +13 -0
- letta/personas/examples/sqldb/test.db +0 -0
- letta/prompts/__init__.py +0 -0
- letta/prompts/gpt_summarize.py +14 -0
- letta/prompts/gpt_system.py +26 -0
- letta/prompts/system/memgpt_base.txt +49 -0
- letta/prompts/system/memgpt_chat.txt +58 -0
- letta/prompts/system/memgpt_chat_compressed.txt +13 -0
- letta/prompts/system/memgpt_chat_fstring.txt +51 -0
- letta/prompts/system/memgpt_doc.txt +50 -0
- letta/prompts/system/memgpt_gpt35_extralong.txt +53 -0
- letta/prompts/system/memgpt_intuitive_knowledge.txt +31 -0
- letta/prompts/system/memgpt_modified_chat.txt +23 -0
- letta/pytest.ini +0 -0
- letta/schemas/agent.py +117 -0
- letta/schemas/api_key.py +21 -0
- letta/schemas/block.py +135 -0
- letta/schemas/document.py +21 -0
- letta/schemas/embedding_config.py +54 -0
- letta/schemas/enums.py +35 -0
- letta/schemas/job.py +38 -0
- letta/schemas/letta_base.py +80 -0
- letta/schemas/letta_message.py +175 -0
- letta/schemas/letta_request.py +23 -0
- letta/schemas/letta_response.py +28 -0
- letta/schemas/llm_config.py +54 -0
- letta/schemas/memory.py +224 -0
- letta/schemas/message.py +727 -0
- letta/schemas/openai/chat_completion_request.py +123 -0
- letta/schemas/openai/chat_completion_response.py +136 -0
- letta/schemas/openai/chat_completions.py +123 -0
- letta/schemas/openai/embedding_response.py +11 -0
- letta/schemas/openai/openai.py +157 -0
- letta/schemas/organization.py +20 -0
- letta/schemas/passage.py +80 -0
- letta/schemas/source.py +62 -0
- letta/schemas/tool.py +143 -0
- letta/schemas/usage.py +18 -0
- letta/schemas/user.py +33 -0
- letta/server/__init__.py +0 -0
- letta/server/constants.py +6 -0
- letta/server/rest_api/__init__.py +0 -0
- letta/server/rest_api/admin/__init__.py +0 -0
- letta/server/rest_api/admin/agents.py +21 -0
- letta/server/rest_api/admin/tools.py +83 -0
- letta/server/rest_api/admin/users.py +98 -0
- letta/server/rest_api/app.py +193 -0
- letta/server/rest_api/auth/__init__.py +0 -0
- letta/server/rest_api/auth/index.py +43 -0
- letta/server/rest_api/auth_token.py +22 -0
- letta/server/rest_api/interface.py +726 -0
- letta/server/rest_api/routers/__init__.py +0 -0
- letta/server/rest_api/routers/openai/__init__.py +0 -0
- letta/server/rest_api/routers/openai/assistants/__init__.py +0 -0
- letta/server/rest_api/routers/openai/assistants/assistants.py +115 -0
- letta/server/rest_api/routers/openai/assistants/schemas.py +121 -0
- letta/server/rest_api/routers/openai/assistants/threads.py +336 -0
- letta/server/rest_api/routers/openai/chat_completions/__init__.py +0 -0
- letta/server/rest_api/routers/openai/chat_completions/chat_completions.py +131 -0
- letta/server/rest_api/routers/v1/__init__.py +15 -0
- letta/server/rest_api/routers/v1/agents.py +543 -0
- letta/server/rest_api/routers/v1/blocks.py +73 -0
- letta/server/rest_api/routers/v1/jobs.py +46 -0
- letta/server/rest_api/routers/v1/llms.py +28 -0
- letta/server/rest_api/routers/v1/organizations.py +61 -0
- letta/server/rest_api/routers/v1/sources.py +199 -0
- letta/server/rest_api/routers/v1/tools.py +103 -0
- letta/server/rest_api/routers/v1/users.py +109 -0
- letta/server/rest_api/static_files.py +74 -0
- letta/server/rest_api/utils.py +69 -0
- letta/server/server.py +1995 -0
- letta/server/startup.sh +8 -0
- letta/server/static_files/assets/index-0cbf7ad5.js +274 -0
- letta/server/static_files/assets/index-156816da.css +1 -0
- letta/server/static_files/assets/index-486e3228.js +274 -0
- letta/server/static_files/favicon.ico +0 -0
- letta/server/static_files/index.html +39 -0
- letta/server/static_files/memgpt_logo_transparent.png +0 -0
- letta/server/utils.py +46 -0
- letta/server/ws_api/__init__.py +0 -0
- letta/server/ws_api/example_client.py +104 -0
- letta/server/ws_api/interface.py +108 -0
- letta/server/ws_api/protocol.py +100 -0
- letta/server/ws_api/server.py +145 -0
- letta/settings.py +165 -0
- letta/streaming_interface.py +396 -0
- letta/system.py +207 -0
- letta/utils.py +1065 -0
- letta_nightly-0.1.7.dev20240924104148.dist-info/LICENSE +190 -0
- letta_nightly-0.1.7.dev20240924104148.dist-info/METADATA +98 -0
- letta_nightly-0.1.7.dev20240924104148.dist-info/RECORD +189 -0
- letta_nightly-0.1.7.dev20240924104148.dist-info/WHEEL +4 -0
- letta_nightly-0.1.7.dev20240924104148.dist-info/entry_points.txt +3 -0
|
@@ -0,0 +1,123 @@
|
|
|
1
|
+
from typing import Any, Dict, List, Literal, Optional, Union
|
|
2
|
+
|
|
3
|
+
from pydantic import BaseModel, Field
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
class SystemMessage(BaseModel):
|
|
7
|
+
content: str
|
|
8
|
+
role: str = "system"
|
|
9
|
+
name: Optional[str] = None
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
class UserMessage(BaseModel):
|
|
13
|
+
content: Union[str, List[str]]
|
|
14
|
+
role: str = "user"
|
|
15
|
+
name: Optional[str] = None
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
class ToolCallFunction(BaseModel):
|
|
19
|
+
name: str
|
|
20
|
+
arguments: str
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
class ToolCall(BaseModel):
|
|
24
|
+
id: str
|
|
25
|
+
type: Literal["function"] = "function"
|
|
26
|
+
function: ToolCallFunction
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
class AssistantMessage(BaseModel):
|
|
30
|
+
content: Optional[str] = None
|
|
31
|
+
role: str = "assistant"
|
|
32
|
+
name: Optional[str] = None
|
|
33
|
+
tool_calls: Optional[List[ToolCall]] = None
|
|
34
|
+
|
|
35
|
+
|
|
36
|
+
class ToolMessage(BaseModel):
|
|
37
|
+
content: str
|
|
38
|
+
role: str = "tool"
|
|
39
|
+
tool_call_id: str
|
|
40
|
+
|
|
41
|
+
|
|
42
|
+
ChatMessage = Union[SystemMessage, UserMessage, AssistantMessage, ToolMessage]
|
|
43
|
+
|
|
44
|
+
|
|
45
|
+
# TODO: this might not be necessary with the validator
|
|
46
|
+
def cast_message_to_subtype(m_dict: dict) -> ChatMessage:
|
|
47
|
+
"""Cast a dictionary to one of the individual message types"""
|
|
48
|
+
role = m_dict.get("role")
|
|
49
|
+
if role == "system":
|
|
50
|
+
return SystemMessage(**m_dict)
|
|
51
|
+
elif role == "user":
|
|
52
|
+
return UserMessage(**m_dict)
|
|
53
|
+
elif role == "assistant":
|
|
54
|
+
return AssistantMessage(**m_dict)
|
|
55
|
+
elif role == "tool":
|
|
56
|
+
return ToolMessage(**m_dict)
|
|
57
|
+
else:
|
|
58
|
+
raise ValueError("Unknown message role")
|
|
59
|
+
|
|
60
|
+
|
|
61
|
+
class ResponseFormat(BaseModel):
|
|
62
|
+
type: str = Field(default="text", pattern="^(text|json_object)$")
|
|
63
|
+
|
|
64
|
+
|
|
65
|
+
## tool_choice ##
|
|
66
|
+
class FunctionCall(BaseModel):
|
|
67
|
+
name: str
|
|
68
|
+
|
|
69
|
+
|
|
70
|
+
class ToolFunctionChoice(BaseModel):
|
|
71
|
+
# The type of the tool. Currently, only function is supported
|
|
72
|
+
type: Literal["function"] = "function"
|
|
73
|
+
# type: str = Field(default="function", const=True)
|
|
74
|
+
function: FunctionCall
|
|
75
|
+
|
|
76
|
+
|
|
77
|
+
ToolChoice = Union[Literal["none", "auto"], ToolFunctionChoice]
|
|
78
|
+
|
|
79
|
+
|
|
80
|
+
## tools ##
|
|
81
|
+
class FunctionSchema(BaseModel):
|
|
82
|
+
name: str
|
|
83
|
+
description: Optional[str] = None
|
|
84
|
+
parameters: Optional[Dict[str, Any]] = None # JSON Schema for the parameters
|
|
85
|
+
|
|
86
|
+
|
|
87
|
+
class Tool(BaseModel):
|
|
88
|
+
# The type of the tool. Currently, only function is supported
|
|
89
|
+
type: Literal["function"] = "function"
|
|
90
|
+
# type: str = Field(default="function", const=True)
|
|
91
|
+
function: FunctionSchema
|
|
92
|
+
|
|
93
|
+
|
|
94
|
+
## function_call ##
|
|
95
|
+
FunctionCallChoice = Union[Literal["none", "auto"], FunctionCall]
|
|
96
|
+
|
|
97
|
+
|
|
98
|
+
class ChatCompletionRequest(BaseModel):
|
|
99
|
+
"""https://platform.openai.com/docs/api-reference/chat/create"""
|
|
100
|
+
|
|
101
|
+
model: str
|
|
102
|
+
messages: List[ChatMessage]
|
|
103
|
+
frequency_penalty: Optional[float] = 0
|
|
104
|
+
logit_bias: Optional[Dict[str, int]] = None
|
|
105
|
+
logprobs: Optional[bool] = False
|
|
106
|
+
top_logprobs: Optional[int] = None
|
|
107
|
+
max_tokens: Optional[int] = None
|
|
108
|
+
n: Optional[int] = 1
|
|
109
|
+
presence_penalty: Optional[float] = 0
|
|
110
|
+
response_format: Optional[ResponseFormat] = None
|
|
111
|
+
seed: Optional[int] = None
|
|
112
|
+
stop: Optional[Union[str, List[str]]] = None
|
|
113
|
+
stream: Optional[bool] = False
|
|
114
|
+
temperature: Optional[float] = 1
|
|
115
|
+
top_p: Optional[float] = 1
|
|
116
|
+
user: Optional[str] = None # unique ID of the end-user (for monitoring)
|
|
117
|
+
|
|
118
|
+
# function-calling related
|
|
119
|
+
tools: Optional[List[Tool]] = None
|
|
120
|
+
tool_choice: Optional[ToolChoice] = "none"
|
|
121
|
+
# deprecated scheme
|
|
122
|
+
functions: Optional[List[FunctionSchema]] = None
|
|
123
|
+
function_call: Optional[FunctionCallChoice] = None
|
|
@@ -0,0 +1,136 @@
|
|
|
1
|
+
import datetime
|
|
2
|
+
from typing import Dict, List, Literal, Optional, Union
|
|
3
|
+
|
|
4
|
+
from pydantic import BaseModel
|
|
5
|
+
|
|
6
|
+
# class ToolCallFunction(BaseModel):
|
|
7
|
+
# name: str
|
|
8
|
+
# arguments: str
|
|
9
|
+
|
|
10
|
+
|
|
11
|
+
class FunctionCall(BaseModel):
|
|
12
|
+
arguments: str
|
|
13
|
+
name: str
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
class ToolCall(BaseModel):
|
|
17
|
+
id: str
|
|
18
|
+
# "Currently, only function is supported"
|
|
19
|
+
type: Literal["function"] = "function"
|
|
20
|
+
# function: ToolCallFunction
|
|
21
|
+
function: FunctionCall
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
class LogProbToken(BaseModel):
|
|
25
|
+
token: str
|
|
26
|
+
logprob: float
|
|
27
|
+
bytes: Optional[List[int]]
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
class MessageContentLogProb(BaseModel):
|
|
31
|
+
token: str
|
|
32
|
+
logprob: float
|
|
33
|
+
bytes: Optional[List[int]]
|
|
34
|
+
top_logprobs: Optional[List[LogProbToken]]
|
|
35
|
+
|
|
36
|
+
|
|
37
|
+
class Message(BaseModel):
|
|
38
|
+
content: Optional[str] = None
|
|
39
|
+
tool_calls: Optional[List[ToolCall]] = None
|
|
40
|
+
role: str
|
|
41
|
+
function_call: Optional[FunctionCall] = None # Deprecated
|
|
42
|
+
|
|
43
|
+
|
|
44
|
+
class Choice(BaseModel):
|
|
45
|
+
finish_reason: str
|
|
46
|
+
index: int
|
|
47
|
+
message: Message
|
|
48
|
+
logprobs: Optional[Dict[str, Union[List[MessageContentLogProb], None]]] = None
|
|
49
|
+
|
|
50
|
+
|
|
51
|
+
class UsageStatistics(BaseModel):
|
|
52
|
+
completion_tokens: int = 0
|
|
53
|
+
prompt_tokens: int = 0
|
|
54
|
+
total_tokens: int = 0
|
|
55
|
+
|
|
56
|
+
def __add__(self, other: "UsageStatistics") -> "UsageStatistics":
|
|
57
|
+
return UsageStatistics(
|
|
58
|
+
completion_tokens=self.completion_tokens + other.completion_tokens,
|
|
59
|
+
prompt_tokens=self.prompt_tokens + other.prompt_tokens,
|
|
60
|
+
total_tokens=self.total_tokens + other.total_tokens,
|
|
61
|
+
)
|
|
62
|
+
|
|
63
|
+
|
|
64
|
+
class ChatCompletionResponse(BaseModel):
|
|
65
|
+
"""https://platform.openai.com/docs/api-reference/chat/object"""
|
|
66
|
+
|
|
67
|
+
id: str
|
|
68
|
+
choices: List[Choice]
|
|
69
|
+
created: datetime.datetime
|
|
70
|
+
model: Optional[str] = None # NOTE: this is not consistent with OpenAI API standard, however is necessary to support local LLMs
|
|
71
|
+
# system_fingerprint: str # docs say this is mandatory, but in reality API returns None
|
|
72
|
+
system_fingerprint: Optional[str] = None
|
|
73
|
+
# object: str = Field(default="chat.completion")
|
|
74
|
+
object: Literal["chat.completion"] = "chat.completion"
|
|
75
|
+
usage: UsageStatistics
|
|
76
|
+
|
|
77
|
+
|
|
78
|
+
class FunctionCallDelta(BaseModel):
|
|
79
|
+
# arguments: Optional[str] = None
|
|
80
|
+
name: Optional[str] = None
|
|
81
|
+
arguments: str
|
|
82
|
+
# name: str
|
|
83
|
+
|
|
84
|
+
|
|
85
|
+
class ToolCallDelta(BaseModel):
|
|
86
|
+
index: int
|
|
87
|
+
id: Optional[str] = None
|
|
88
|
+
# "Currently, only function is supported"
|
|
89
|
+
type: Literal["function"] = "function"
|
|
90
|
+
# function: ToolCallFunction
|
|
91
|
+
function: Optional[FunctionCallDelta] = None
|
|
92
|
+
|
|
93
|
+
|
|
94
|
+
class MessageDelta(BaseModel):
|
|
95
|
+
"""Partial delta stream of a Message
|
|
96
|
+
|
|
97
|
+
Example ChunkResponse:
|
|
98
|
+
{
|
|
99
|
+
'id': 'chatcmpl-9EOCkKdicNo1tiL1956kPvCnL2lLS',
|
|
100
|
+
'object': 'chat.completion.chunk',
|
|
101
|
+
'created': 1713216662,
|
|
102
|
+
'model': 'gpt-4-0613',
|
|
103
|
+
'system_fingerprint': None,
|
|
104
|
+
'choices': [{
|
|
105
|
+
'index': 0,
|
|
106
|
+
'delta': {'content': 'User'},
|
|
107
|
+
'logprobs': None,
|
|
108
|
+
'finish_reason': None
|
|
109
|
+
}]
|
|
110
|
+
}
|
|
111
|
+
"""
|
|
112
|
+
|
|
113
|
+
content: Optional[str] = None
|
|
114
|
+
tool_calls: Optional[List[ToolCallDelta]] = None
|
|
115
|
+
# role: Optional[str] = None
|
|
116
|
+
function_call: Optional[FunctionCallDelta] = None # Deprecated
|
|
117
|
+
|
|
118
|
+
|
|
119
|
+
class ChunkChoice(BaseModel):
|
|
120
|
+
finish_reason: Optional[str] = None # NOTE: when streaming will be null
|
|
121
|
+
index: int
|
|
122
|
+
delta: MessageDelta
|
|
123
|
+
logprobs: Optional[Dict[str, Union[List[MessageContentLogProb], None]]] = None
|
|
124
|
+
|
|
125
|
+
|
|
126
|
+
class ChatCompletionChunkResponse(BaseModel):
|
|
127
|
+
"""https://platform.openai.com/docs/api-reference/chat/streaming"""
|
|
128
|
+
|
|
129
|
+
id: str
|
|
130
|
+
choices: List[ChunkChoice]
|
|
131
|
+
created: datetime.datetime
|
|
132
|
+
model: str
|
|
133
|
+
# system_fingerprint: str # docs say this is mandatory, but in reality API returns None
|
|
134
|
+
system_fingerprint: Optional[str] = None
|
|
135
|
+
# object: str = Field(default="chat.completion")
|
|
136
|
+
object: Literal["chat.completion.chunk"] = "chat.completion.chunk"
|
|
@@ -0,0 +1,123 @@
|
|
|
1
|
+
from typing import Any, Dict, List, Literal, Optional, Union
|
|
2
|
+
|
|
3
|
+
from pydantic import BaseModel, Field
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
class SystemMessage(BaseModel):
|
|
7
|
+
content: str
|
|
8
|
+
role: str = "system"
|
|
9
|
+
name: Optional[str] = None
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
class UserMessage(BaseModel):
|
|
13
|
+
content: Union[str, List[str]]
|
|
14
|
+
role: str = "user"
|
|
15
|
+
name: Optional[str] = None
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
class ToolCallFunction(BaseModel):
|
|
19
|
+
name: str = Field(..., description="The name of the function to call")
|
|
20
|
+
arguments: str = Field(..., description="The arguments to pass to the function (JSON dump)")
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
class ToolCall(BaseModel):
|
|
24
|
+
id: str = Field(..., description="The ID of the tool call")
|
|
25
|
+
type: str = "function"
|
|
26
|
+
function: ToolCallFunction = Field(..., description="The arguments and name for the function")
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
class AssistantMessage(BaseModel):
|
|
30
|
+
content: Optional[str] = None
|
|
31
|
+
role: str = "assistant"
|
|
32
|
+
name: Optional[str] = None
|
|
33
|
+
tool_calls: Optional[List[ToolCall]] = None
|
|
34
|
+
|
|
35
|
+
|
|
36
|
+
class ToolMessage(BaseModel):
|
|
37
|
+
content: str
|
|
38
|
+
role: str = "tool"
|
|
39
|
+
tool_call_id: str
|
|
40
|
+
|
|
41
|
+
|
|
42
|
+
ChatMessage = Union[SystemMessage, UserMessage, AssistantMessage, ToolMessage]
|
|
43
|
+
|
|
44
|
+
|
|
45
|
+
# TODO: this might not be necessary with the validator
|
|
46
|
+
def cast_message_to_subtype(m_dict: dict) -> ChatMessage:
|
|
47
|
+
"""Cast a dictionary to one of the individual message types"""
|
|
48
|
+
role = m_dict.get("role")
|
|
49
|
+
if role == "system":
|
|
50
|
+
return SystemMessage(**m_dict)
|
|
51
|
+
elif role == "user":
|
|
52
|
+
return UserMessage(**m_dict)
|
|
53
|
+
elif role == "assistant":
|
|
54
|
+
return AssistantMessage(**m_dict)
|
|
55
|
+
elif role == "tool":
|
|
56
|
+
return ToolMessage(**m_dict)
|
|
57
|
+
else:
|
|
58
|
+
raise ValueError("Unknown message role")
|
|
59
|
+
|
|
60
|
+
|
|
61
|
+
class ResponseFormat(BaseModel):
|
|
62
|
+
type: str = Field(default="text", pattern="^(text|json_object)$")
|
|
63
|
+
|
|
64
|
+
|
|
65
|
+
## tool_choice ##
|
|
66
|
+
class FunctionCall(BaseModel):
|
|
67
|
+
name: str
|
|
68
|
+
|
|
69
|
+
|
|
70
|
+
class ToolFunctionChoice(BaseModel):
|
|
71
|
+
# The type of the tool. Currently, only function is supported
|
|
72
|
+
type: Literal["function"] = "function"
|
|
73
|
+
# type: str = Field(default="function", const=True)
|
|
74
|
+
function: FunctionCall
|
|
75
|
+
|
|
76
|
+
|
|
77
|
+
ToolChoice = Union[Literal["none", "auto"], ToolFunctionChoice]
|
|
78
|
+
|
|
79
|
+
|
|
80
|
+
## tools ##
|
|
81
|
+
class FunctionSchema(BaseModel):
|
|
82
|
+
name: str
|
|
83
|
+
description: Optional[str] = None
|
|
84
|
+
parameters: Optional[Dict[str, Any]] = None # JSON Schema for the parameters
|
|
85
|
+
|
|
86
|
+
|
|
87
|
+
class Tool(BaseModel):
|
|
88
|
+
# The type of the tool. Currently, only function is supported
|
|
89
|
+
type: Literal["function"] = "function"
|
|
90
|
+
# type: str = Field(default="function", const=True)
|
|
91
|
+
function: FunctionSchema
|
|
92
|
+
|
|
93
|
+
|
|
94
|
+
## function_call ##
|
|
95
|
+
FunctionCallChoice = Union[Literal["none", "auto"], FunctionCall]
|
|
96
|
+
|
|
97
|
+
|
|
98
|
+
class ChatCompletionRequest(BaseModel):
|
|
99
|
+
"""https://platform.openai.com/docs/api-reference/chat/create"""
|
|
100
|
+
|
|
101
|
+
model: str
|
|
102
|
+
messages: List[ChatMessage]
|
|
103
|
+
frequency_penalty: Optional[float] = 0
|
|
104
|
+
logit_bias: Optional[Dict[str, int]] = None
|
|
105
|
+
logprobs: Optional[bool] = False
|
|
106
|
+
top_logprobs: Optional[int] = None
|
|
107
|
+
max_tokens: Optional[int] = None
|
|
108
|
+
n: Optional[int] = 1
|
|
109
|
+
presence_penalty: Optional[float] = 0
|
|
110
|
+
response_format: Optional[ResponseFormat] = None
|
|
111
|
+
seed: Optional[int] = None
|
|
112
|
+
stop: Optional[Union[str, List[str]]] = None
|
|
113
|
+
stream: Optional[bool] = False
|
|
114
|
+
temperature: Optional[float] = 1
|
|
115
|
+
top_p: Optional[float] = 1
|
|
116
|
+
user: Optional[str] = None # unique ID of the end-user (for monitoring)
|
|
117
|
+
|
|
118
|
+
# function-calling related
|
|
119
|
+
tools: Optional[List[Tool]] = None
|
|
120
|
+
tool_choice: Optional[ToolChoice] = "none"
|
|
121
|
+
# deprecated scheme
|
|
122
|
+
functions: Optional[List[FunctionSchema]] = None
|
|
123
|
+
function_call: Optional[FunctionCallChoice] = None
|
|
@@ -0,0 +1,11 @@
|
|
|
1
|
+
from typing import List, Literal
|
|
2
|
+
|
|
3
|
+
from pydantic import BaseModel
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
class EmbeddingResponse(BaseModel):
|
|
7
|
+
"""OpenAI embedding response model: https://platform.openai.com/docs/api-reference/embeddings/object"""
|
|
8
|
+
|
|
9
|
+
index: int # the index of the embedding in the list of embeddings
|
|
10
|
+
embedding: List[float]
|
|
11
|
+
object: Literal["embedding"] = "embedding"
|
|
@@ -0,0 +1,157 @@
|
|
|
1
|
+
from enum import Enum
|
|
2
|
+
from typing import Dict, List, Optional, Union
|
|
3
|
+
|
|
4
|
+
from pydantic import BaseModel, Field
|
|
5
|
+
|
|
6
|
+
|
|
7
|
+
class ImageFile(BaseModel):
|
|
8
|
+
type: str = "image_file"
|
|
9
|
+
file_id: str
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
class Text(BaseModel):
|
|
13
|
+
object: str = "text"
|
|
14
|
+
text: str = Field(..., description="The text content to be processed by the agent.")
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
class MessageRoleType(str, Enum):
|
|
18
|
+
user = "user"
|
|
19
|
+
system = "system"
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
class OpenAIAssistant(BaseModel):
|
|
23
|
+
"""Represents an OpenAI assistant (equivalent to Letta preset)"""
|
|
24
|
+
|
|
25
|
+
id: str = Field(..., description="The unique identifier of the assistant.")
|
|
26
|
+
name: str = Field(..., description="The name of the assistant.")
|
|
27
|
+
object: str = "assistant"
|
|
28
|
+
description: Optional[str] = Field(None, description="The description of the assistant.")
|
|
29
|
+
created_at: int = Field(..., description="The unix timestamp of when the assistant was created.")
|
|
30
|
+
model: str = Field(..., description="The model used by the assistant.")
|
|
31
|
+
instructions: str = Field(..., description="The instructions for the assistant.")
|
|
32
|
+
tools: Optional[List[str]] = Field(None, description="The tools used by the assistant.")
|
|
33
|
+
file_ids: Optional[List[str]] = Field(None, description="List of file IDs associated with the assistant.")
|
|
34
|
+
metadata: Optional[dict] = Field(None, description="Metadata associated with the assistant.")
|
|
35
|
+
|
|
36
|
+
|
|
37
|
+
class OpenAIMessage(BaseModel):
|
|
38
|
+
id: str = Field(..., description="The unique identifier of the message.")
|
|
39
|
+
object: str = "thread.message"
|
|
40
|
+
created_at: int = Field(..., description="The unix timestamp of when the message was created.")
|
|
41
|
+
thread_id: str = Field(..., description="The unique identifier of the thread.")
|
|
42
|
+
role: str = Field(..., description="Role of the message sender (either 'user' or 'system')")
|
|
43
|
+
content: List[Union[Text, ImageFile]] = Field(None, description="The message content to be processed by the agent.")
|
|
44
|
+
assistant_id: str = Field(..., description="The unique identifier of the assistant.")
|
|
45
|
+
run_id: Optional[str] = Field(None, description="The unique identifier of the run.")
|
|
46
|
+
file_ids: Optional[List[str]] = Field(None, description="List of file IDs associated with the message.")
|
|
47
|
+
metadata: Optional[Dict] = Field(None, description="Metadata associated with the message.")
|
|
48
|
+
|
|
49
|
+
|
|
50
|
+
class MessageFile(BaseModel):
|
|
51
|
+
id: str
|
|
52
|
+
object: str = "thread.message.file"
|
|
53
|
+
created_at: int # unix timestamp
|
|
54
|
+
|
|
55
|
+
|
|
56
|
+
class OpenAIThread(BaseModel):
|
|
57
|
+
"""Represents an OpenAI thread (equivalent to Letta agent)"""
|
|
58
|
+
|
|
59
|
+
id: str = Field(..., description="The unique identifier of the thread.")
|
|
60
|
+
object: str = "thread"
|
|
61
|
+
created_at: int = Field(..., description="The unix timestamp of when the thread was created.")
|
|
62
|
+
metadata: dict = Field(None, description="Metadata associated with the thread.")
|
|
63
|
+
|
|
64
|
+
|
|
65
|
+
class AssistantFile(BaseModel):
|
|
66
|
+
id: str = Field(..., description="The unique identifier of the file.")
|
|
67
|
+
object: str = "assistant.file"
|
|
68
|
+
created_at: int = Field(..., description="The unix timestamp of when the file was created.")
|
|
69
|
+
assistant_id: str = Field(..., description="The unique identifier of the assistant.")
|
|
70
|
+
|
|
71
|
+
|
|
72
|
+
class MessageFile(BaseModel):
|
|
73
|
+
id: str = Field(..., description="The unique identifier of the file.")
|
|
74
|
+
object: str = "thread.message.file"
|
|
75
|
+
created_at: int = Field(..., description="The unix timestamp of when the file was created.")
|
|
76
|
+
message_id: str = Field(..., description="The unique identifier of the message.")
|
|
77
|
+
|
|
78
|
+
|
|
79
|
+
class Function(BaseModel):
|
|
80
|
+
name: str = Field(..., description="The name of the function.")
|
|
81
|
+
arguments: str = Field(..., description="The arguments of the function.")
|
|
82
|
+
|
|
83
|
+
|
|
84
|
+
class ToolCall(BaseModel):
|
|
85
|
+
id: str = Field(..., description="The unique identifier of the tool call.")
|
|
86
|
+
type: str = "function"
|
|
87
|
+
function: Function = Field(..., description="The function call.")
|
|
88
|
+
|
|
89
|
+
|
|
90
|
+
class ToolCallOutput(BaseModel):
|
|
91
|
+
tool_call_id: str = Field(..., description="The unique identifier of the tool call.")
|
|
92
|
+
output: str = Field(..., description="The output of the tool call.")
|
|
93
|
+
|
|
94
|
+
|
|
95
|
+
class RequiredAction(BaseModel):
|
|
96
|
+
type: str = "submit_tool_outputs"
|
|
97
|
+
submit_tool_outputs: List[ToolCall]
|
|
98
|
+
|
|
99
|
+
|
|
100
|
+
class OpenAIError(BaseModel):
|
|
101
|
+
code: str = Field(..., description="The error code.")
|
|
102
|
+
message: str = Field(..., description="The error message.")
|
|
103
|
+
|
|
104
|
+
|
|
105
|
+
class OpenAIUsage(BaseModel):
|
|
106
|
+
completion_tokens: int = Field(..., description="The number of tokens used for the run.")
|
|
107
|
+
prompt_tokens: int = Field(..., description="The number of tokens used for the prompt.")
|
|
108
|
+
total_tokens: int = Field(..., description="The total number of tokens used for the run.")
|
|
109
|
+
|
|
110
|
+
|
|
111
|
+
class OpenAIMessageCreationStep(BaseModel):
|
|
112
|
+
type: str = "message_creation"
|
|
113
|
+
message_id: str = Field(..., description="The unique identifier of the message.")
|
|
114
|
+
|
|
115
|
+
|
|
116
|
+
class OpenAIToolCallsStep(BaseModel):
|
|
117
|
+
type: str = "tool_calls"
|
|
118
|
+
tool_calls: List[ToolCall] = Field(..., description="The tool calls.")
|
|
119
|
+
|
|
120
|
+
|
|
121
|
+
class OpenAIRun(BaseModel):
|
|
122
|
+
id: str = Field(..., description="The unique identifier of the run.")
|
|
123
|
+
object: str = "thread.run"
|
|
124
|
+
created_at: int = Field(..., description="The unix timestamp of when the run was created.")
|
|
125
|
+
thread_id: str = Field(..., description="The unique identifier of the thread.")
|
|
126
|
+
assistant_id: str = Field(..., description="The unique identifier of the assistant.")
|
|
127
|
+
status: str = Field(..., description="The status of the run.")
|
|
128
|
+
required_action: Optional[RequiredAction] = Field(None, description="The required action of the run.")
|
|
129
|
+
last_error: Optional[OpenAIError] = Field(None, description="The last error of the run.")
|
|
130
|
+
expires_at: int = Field(..., description="The unix timestamp of when the run expires.")
|
|
131
|
+
started_at: Optional[int] = Field(None, description="The unix timestamp of when the run started.")
|
|
132
|
+
cancelled_at: Optional[int] = Field(None, description="The unix timestamp of when the run was cancelled.")
|
|
133
|
+
failed_at: Optional[int] = Field(None, description="The unix timestamp of when the run failed.")
|
|
134
|
+
completed_at: Optional[int] = Field(None, description="The unix timestamp of when the run completed.")
|
|
135
|
+
model: str = Field(..., description="The model used by the run.")
|
|
136
|
+
instructions: str = Field(..., description="The instructions for the run.")
|
|
137
|
+
tools: Optional[List[ToolCall]] = Field(None, description="The tools used by the run.") # TODO: also add code interpreter / retrieval
|
|
138
|
+
file_ids: Optional[List[str]] = Field(None, description="List of file IDs associated with the run.")
|
|
139
|
+
metadata: Optional[dict] = Field(None, description="Metadata associated with the run.")
|
|
140
|
+
usage: Optional[OpenAIUsage] = Field(None, description="The usage of the run.")
|
|
141
|
+
|
|
142
|
+
|
|
143
|
+
class OpenAIRunStep(BaseModel):
|
|
144
|
+
id: str = Field(..., description="The unique identifier of the run step.")
|
|
145
|
+
object: str = "thread.run.step"
|
|
146
|
+
created_at: int = Field(..., description="The unix timestamp of when the run step was created.")
|
|
147
|
+
assistant_id: str = Field(..., description="The unique identifier of the assistant.")
|
|
148
|
+
thread_id: str = Field(..., description="The unique identifier of the thread.")
|
|
149
|
+
run_id: str = Field(..., description="The unique identifier of the run.")
|
|
150
|
+
type: str = Field(..., description="The type of the run step.") # message_creation, tool_calls
|
|
151
|
+
status: str = Field(..., description="The status of the run step.")
|
|
152
|
+
step_defaults: Union[OpenAIToolCallsStep, OpenAIMessageCreationStep] = Field(..., description="The step defaults.")
|
|
153
|
+
last_error: Optional[OpenAIError] = Field(None, description="The last error of the run step.")
|
|
154
|
+
expired_at: Optional[int] = Field(None, description="The unix timestamp of when the run step expired.")
|
|
155
|
+
failed_at: Optional[int] = Field(None, description="The unix timestamp of when the run failed.")
|
|
156
|
+
completed_at: Optional[int] = Field(None, description="The unix timestamp of when the run completed.")
|
|
157
|
+
usage: Optional[OpenAIUsage] = Field(None, description="The usage of the run.")
|
|
@@ -0,0 +1,20 @@
|
|
|
1
|
+
from datetime import datetime
|
|
2
|
+
from typing import Optional
|
|
3
|
+
|
|
4
|
+
from pydantic import Field
|
|
5
|
+
|
|
6
|
+
from letta.schemas.letta_base import LettaBase
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
class OrganizationBase(LettaBase):
|
|
10
|
+
__id_prefix__ = "org"
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
class Organization(OrganizationBase):
|
|
14
|
+
id: str = OrganizationBase.generate_id_field()
|
|
15
|
+
name: str = Field(..., description="The name of the organization.")
|
|
16
|
+
created_at: datetime = Field(default_factory=datetime.utcnow, description="The creation date of the user.")
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
class OrganizationCreate(OrganizationBase):
|
|
20
|
+
name: Optional[str] = Field(None, description="The name of the organization.")
|
letta/schemas/passage.py
ADDED
|
@@ -0,0 +1,80 @@
|
|
|
1
|
+
from datetime import datetime
|
|
2
|
+
from typing import Dict, List, Optional
|
|
3
|
+
|
|
4
|
+
from pydantic import Field, field_validator
|
|
5
|
+
|
|
6
|
+
from letta.constants import MAX_EMBEDDING_DIM
|
|
7
|
+
from letta.schemas.embedding_config import EmbeddingConfig
|
|
8
|
+
from letta.schemas.letta_base import LettaBase
|
|
9
|
+
from letta.utils import get_utc_time
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
class PassageBase(LettaBase):
|
|
13
|
+
__id_prefix__ = "passage"
|
|
14
|
+
|
|
15
|
+
# associated user/agent
|
|
16
|
+
user_id: Optional[str] = Field(None, description="The unique identifier of the user associated with the passage.")
|
|
17
|
+
agent_id: Optional[str] = Field(None, description="The unique identifier of the agent associated with the passage.")
|
|
18
|
+
|
|
19
|
+
# origin data source
|
|
20
|
+
source_id: Optional[str] = Field(None, description="The data source of the passage.")
|
|
21
|
+
|
|
22
|
+
# document association
|
|
23
|
+
doc_id: Optional[str] = Field(None, description="The unique identifier of the document associated with the passage.")
|
|
24
|
+
metadata_: Optional[Dict] = Field({}, description="The metadata of the passage.")
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
class Passage(PassageBase):
|
|
28
|
+
"""
|
|
29
|
+
Representation of a passage, which is stored in archival memory.
|
|
30
|
+
|
|
31
|
+
Parameters:
|
|
32
|
+
text (str): The text of the passage.
|
|
33
|
+
embedding (List[float]): The embedding of the passage.
|
|
34
|
+
embedding_config (EmbeddingConfig): The embedding configuration used by the passage.
|
|
35
|
+
created_at (datetime): The creation date of the passage.
|
|
36
|
+
user_id (str): The unique identifier of the user associated with the passage.
|
|
37
|
+
agent_id (str): The unique identifier of the agent associated with the passage.
|
|
38
|
+
source_id (str): The data source of the passage.
|
|
39
|
+
doc_id (str): The unique identifier of the document associated with the passage.
|
|
40
|
+
"""
|
|
41
|
+
|
|
42
|
+
id: str = PassageBase.generate_id_field()
|
|
43
|
+
|
|
44
|
+
# passage text
|
|
45
|
+
text: str = Field(..., description="The text of the passage.")
|
|
46
|
+
|
|
47
|
+
# embeddings
|
|
48
|
+
embedding: Optional[List[float]] = Field(..., description="The embedding of the passage.")
|
|
49
|
+
embedding_config: Optional[EmbeddingConfig] = Field(..., description="The embedding configuration used by the passage.")
|
|
50
|
+
|
|
51
|
+
created_at: datetime = Field(default_factory=get_utc_time, description="The creation date of the passage.")
|
|
52
|
+
|
|
53
|
+
@field_validator("embedding")
|
|
54
|
+
@classmethod
|
|
55
|
+
def pad_embeddings(cls, embedding: List[float]) -> List[float]:
|
|
56
|
+
"""Pad embeddings to `MAX_EMBEDDING_SIZE`. This is necessary to ensure all stored embeddings are the same size."""
|
|
57
|
+
import numpy as np
|
|
58
|
+
|
|
59
|
+
if embedding and len(embedding) != MAX_EMBEDDING_DIM:
|
|
60
|
+
np_embedding = np.array(embedding)
|
|
61
|
+
padded_embedding = np.pad(np_embedding, (0, MAX_EMBEDDING_DIM - np_embedding.shape[0]), mode="constant")
|
|
62
|
+
return padded_embedding.tolist()
|
|
63
|
+
return embedding
|
|
64
|
+
|
|
65
|
+
|
|
66
|
+
class PassageCreate(PassageBase):
|
|
67
|
+
text: str = Field(..., description="The text of the passage.")
|
|
68
|
+
|
|
69
|
+
# optionally provide embeddings
|
|
70
|
+
embedding: Optional[List[float]] = Field(None, description="The embedding of the passage.")
|
|
71
|
+
embedding_config: Optional[EmbeddingConfig] = Field(None, description="The embedding configuration used by the passage.")
|
|
72
|
+
|
|
73
|
+
|
|
74
|
+
class PassageUpdate(PassageCreate):
|
|
75
|
+
id: str = Field(..., description="The unique identifier of the passage.")
|
|
76
|
+
text: Optional[str] = Field(None, description="The text of the passage.")
|
|
77
|
+
|
|
78
|
+
# optionally provide embeddings
|
|
79
|
+
embedding: Optional[List[float]] = Field(None, description="The embedding of the passage.")
|
|
80
|
+
embedding_config: Optional[EmbeddingConfig] = Field(None, description="The embedding configuration used by the passage.")
|