letta-nightly 0.1.7.dev20240924104148__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of letta-nightly might be problematic. Click here for more details.

Files changed (189) hide show
  1. letta/__init__.py +24 -0
  2. letta/__main__.py +3 -0
  3. letta/agent.py +1427 -0
  4. letta/agent_store/chroma.py +295 -0
  5. letta/agent_store/db.py +546 -0
  6. letta/agent_store/lancedb.py +177 -0
  7. letta/agent_store/milvus.py +198 -0
  8. letta/agent_store/qdrant.py +201 -0
  9. letta/agent_store/storage.py +188 -0
  10. letta/benchmark/benchmark.py +96 -0
  11. letta/benchmark/constants.py +14 -0
  12. letta/cli/cli.py +689 -0
  13. letta/cli/cli_config.py +1282 -0
  14. letta/cli/cli_load.py +166 -0
  15. letta/client/__init__.py +0 -0
  16. letta/client/admin.py +171 -0
  17. letta/client/client.py +2360 -0
  18. letta/client/streaming.py +90 -0
  19. letta/client/utils.py +61 -0
  20. letta/config.py +484 -0
  21. letta/configs/anthropic.json +13 -0
  22. letta/configs/letta_hosted.json +11 -0
  23. letta/configs/openai.json +12 -0
  24. letta/constants.py +134 -0
  25. letta/credentials.py +140 -0
  26. letta/data_sources/connectors.py +247 -0
  27. letta/embeddings.py +218 -0
  28. letta/errors.py +26 -0
  29. letta/functions/__init__.py +0 -0
  30. letta/functions/function_sets/base.py +174 -0
  31. letta/functions/function_sets/extras.py +132 -0
  32. letta/functions/functions.py +105 -0
  33. letta/functions/schema_generator.py +205 -0
  34. letta/humans/__init__.py +0 -0
  35. letta/humans/examples/basic.txt +1 -0
  36. letta/humans/examples/cs_phd.txt +9 -0
  37. letta/interface.py +314 -0
  38. letta/llm_api/__init__.py +0 -0
  39. letta/llm_api/anthropic.py +383 -0
  40. letta/llm_api/azure_openai.py +155 -0
  41. letta/llm_api/cohere.py +396 -0
  42. letta/llm_api/google_ai.py +468 -0
  43. letta/llm_api/llm_api_tools.py +485 -0
  44. letta/llm_api/openai.py +470 -0
  45. letta/local_llm/README.md +3 -0
  46. letta/local_llm/__init__.py +0 -0
  47. letta/local_llm/chat_completion_proxy.py +279 -0
  48. letta/local_llm/constants.py +31 -0
  49. letta/local_llm/function_parser.py +68 -0
  50. letta/local_llm/grammars/__init__.py +0 -0
  51. letta/local_llm/grammars/gbnf_grammar_generator.py +1324 -0
  52. letta/local_llm/grammars/json.gbnf +26 -0
  53. letta/local_llm/grammars/json_func_calls_with_inner_thoughts.gbnf +32 -0
  54. letta/local_llm/groq/api.py +97 -0
  55. letta/local_llm/json_parser.py +202 -0
  56. letta/local_llm/koboldcpp/api.py +62 -0
  57. letta/local_llm/koboldcpp/settings.py +23 -0
  58. letta/local_llm/llamacpp/api.py +58 -0
  59. letta/local_llm/llamacpp/settings.py +22 -0
  60. letta/local_llm/llm_chat_completion_wrappers/__init__.py +0 -0
  61. letta/local_llm/llm_chat_completion_wrappers/airoboros.py +452 -0
  62. letta/local_llm/llm_chat_completion_wrappers/chatml.py +470 -0
  63. letta/local_llm/llm_chat_completion_wrappers/configurable_wrapper.py +387 -0
  64. letta/local_llm/llm_chat_completion_wrappers/dolphin.py +246 -0
  65. letta/local_llm/llm_chat_completion_wrappers/llama3.py +345 -0
  66. letta/local_llm/llm_chat_completion_wrappers/simple_summary_wrapper.py +156 -0
  67. letta/local_llm/llm_chat_completion_wrappers/wrapper_base.py +11 -0
  68. letta/local_llm/llm_chat_completion_wrappers/zephyr.py +345 -0
  69. letta/local_llm/lmstudio/api.py +100 -0
  70. letta/local_llm/lmstudio/settings.py +29 -0
  71. letta/local_llm/ollama/api.py +88 -0
  72. letta/local_llm/ollama/settings.py +32 -0
  73. letta/local_llm/settings/__init__.py +0 -0
  74. letta/local_llm/settings/deterministic_mirostat.py +45 -0
  75. letta/local_llm/settings/settings.py +72 -0
  76. letta/local_llm/settings/simple.py +28 -0
  77. letta/local_llm/utils.py +265 -0
  78. letta/local_llm/vllm/api.py +63 -0
  79. letta/local_llm/webui/api.py +60 -0
  80. letta/local_llm/webui/legacy_api.py +58 -0
  81. letta/local_llm/webui/legacy_settings.py +23 -0
  82. letta/local_llm/webui/settings.py +24 -0
  83. letta/log.py +76 -0
  84. letta/main.py +437 -0
  85. letta/memory.py +440 -0
  86. letta/metadata.py +884 -0
  87. letta/openai_backcompat/__init__.py +0 -0
  88. letta/openai_backcompat/openai_object.py +437 -0
  89. letta/persistence_manager.py +148 -0
  90. letta/personas/__init__.py +0 -0
  91. letta/personas/examples/anna_pa.txt +13 -0
  92. letta/personas/examples/google_search_persona.txt +15 -0
  93. letta/personas/examples/memgpt_doc.txt +6 -0
  94. letta/personas/examples/memgpt_starter.txt +4 -0
  95. letta/personas/examples/sam.txt +14 -0
  96. letta/personas/examples/sam_pov.txt +14 -0
  97. letta/personas/examples/sam_simple_pov_gpt35.txt +13 -0
  98. letta/personas/examples/sqldb/test.db +0 -0
  99. letta/prompts/__init__.py +0 -0
  100. letta/prompts/gpt_summarize.py +14 -0
  101. letta/prompts/gpt_system.py +26 -0
  102. letta/prompts/system/memgpt_base.txt +49 -0
  103. letta/prompts/system/memgpt_chat.txt +58 -0
  104. letta/prompts/system/memgpt_chat_compressed.txt +13 -0
  105. letta/prompts/system/memgpt_chat_fstring.txt +51 -0
  106. letta/prompts/system/memgpt_doc.txt +50 -0
  107. letta/prompts/system/memgpt_gpt35_extralong.txt +53 -0
  108. letta/prompts/system/memgpt_intuitive_knowledge.txt +31 -0
  109. letta/prompts/system/memgpt_modified_chat.txt +23 -0
  110. letta/pytest.ini +0 -0
  111. letta/schemas/agent.py +117 -0
  112. letta/schemas/api_key.py +21 -0
  113. letta/schemas/block.py +135 -0
  114. letta/schemas/document.py +21 -0
  115. letta/schemas/embedding_config.py +54 -0
  116. letta/schemas/enums.py +35 -0
  117. letta/schemas/job.py +38 -0
  118. letta/schemas/letta_base.py +80 -0
  119. letta/schemas/letta_message.py +175 -0
  120. letta/schemas/letta_request.py +23 -0
  121. letta/schemas/letta_response.py +28 -0
  122. letta/schemas/llm_config.py +54 -0
  123. letta/schemas/memory.py +224 -0
  124. letta/schemas/message.py +727 -0
  125. letta/schemas/openai/chat_completion_request.py +123 -0
  126. letta/schemas/openai/chat_completion_response.py +136 -0
  127. letta/schemas/openai/chat_completions.py +123 -0
  128. letta/schemas/openai/embedding_response.py +11 -0
  129. letta/schemas/openai/openai.py +157 -0
  130. letta/schemas/organization.py +20 -0
  131. letta/schemas/passage.py +80 -0
  132. letta/schemas/source.py +62 -0
  133. letta/schemas/tool.py +143 -0
  134. letta/schemas/usage.py +18 -0
  135. letta/schemas/user.py +33 -0
  136. letta/server/__init__.py +0 -0
  137. letta/server/constants.py +6 -0
  138. letta/server/rest_api/__init__.py +0 -0
  139. letta/server/rest_api/admin/__init__.py +0 -0
  140. letta/server/rest_api/admin/agents.py +21 -0
  141. letta/server/rest_api/admin/tools.py +83 -0
  142. letta/server/rest_api/admin/users.py +98 -0
  143. letta/server/rest_api/app.py +193 -0
  144. letta/server/rest_api/auth/__init__.py +0 -0
  145. letta/server/rest_api/auth/index.py +43 -0
  146. letta/server/rest_api/auth_token.py +22 -0
  147. letta/server/rest_api/interface.py +726 -0
  148. letta/server/rest_api/routers/__init__.py +0 -0
  149. letta/server/rest_api/routers/openai/__init__.py +0 -0
  150. letta/server/rest_api/routers/openai/assistants/__init__.py +0 -0
  151. letta/server/rest_api/routers/openai/assistants/assistants.py +115 -0
  152. letta/server/rest_api/routers/openai/assistants/schemas.py +121 -0
  153. letta/server/rest_api/routers/openai/assistants/threads.py +336 -0
  154. letta/server/rest_api/routers/openai/chat_completions/__init__.py +0 -0
  155. letta/server/rest_api/routers/openai/chat_completions/chat_completions.py +131 -0
  156. letta/server/rest_api/routers/v1/__init__.py +15 -0
  157. letta/server/rest_api/routers/v1/agents.py +543 -0
  158. letta/server/rest_api/routers/v1/blocks.py +73 -0
  159. letta/server/rest_api/routers/v1/jobs.py +46 -0
  160. letta/server/rest_api/routers/v1/llms.py +28 -0
  161. letta/server/rest_api/routers/v1/organizations.py +61 -0
  162. letta/server/rest_api/routers/v1/sources.py +199 -0
  163. letta/server/rest_api/routers/v1/tools.py +103 -0
  164. letta/server/rest_api/routers/v1/users.py +109 -0
  165. letta/server/rest_api/static_files.py +74 -0
  166. letta/server/rest_api/utils.py +69 -0
  167. letta/server/server.py +1995 -0
  168. letta/server/startup.sh +8 -0
  169. letta/server/static_files/assets/index-0cbf7ad5.js +274 -0
  170. letta/server/static_files/assets/index-156816da.css +1 -0
  171. letta/server/static_files/assets/index-486e3228.js +274 -0
  172. letta/server/static_files/favicon.ico +0 -0
  173. letta/server/static_files/index.html +39 -0
  174. letta/server/static_files/memgpt_logo_transparent.png +0 -0
  175. letta/server/utils.py +46 -0
  176. letta/server/ws_api/__init__.py +0 -0
  177. letta/server/ws_api/example_client.py +104 -0
  178. letta/server/ws_api/interface.py +108 -0
  179. letta/server/ws_api/protocol.py +100 -0
  180. letta/server/ws_api/server.py +145 -0
  181. letta/settings.py +165 -0
  182. letta/streaming_interface.py +396 -0
  183. letta/system.py +207 -0
  184. letta/utils.py +1065 -0
  185. letta_nightly-0.1.7.dev20240924104148.dist-info/LICENSE +190 -0
  186. letta_nightly-0.1.7.dev20240924104148.dist-info/METADATA +98 -0
  187. letta_nightly-0.1.7.dev20240924104148.dist-info/RECORD +189 -0
  188. letta_nightly-0.1.7.dev20240924104148.dist-info/WHEEL +4 -0
  189. letta_nightly-0.1.7.dev20240924104148.dist-info/entry_points.txt +3 -0
letta/schemas/block.py ADDED
@@ -0,0 +1,135 @@
1
+ from typing import Optional
2
+
3
+ from pydantic import Field, model_validator
4
+ from typing_extensions import Self
5
+
6
+ from letta.schemas.letta_base import LettaBase
7
+
8
+ # block of the LLM context
9
+
10
+
11
+ class BaseBlock(LettaBase, validate_assignment=True):
12
+ """Base block of the LLM context"""
13
+
14
+ __id_prefix__ = "block"
15
+
16
+ # data value
17
+ value: Optional[str] = Field(None, description="Value of the block.")
18
+ limit: int = Field(2000, description="Character limit of the block.")
19
+
20
+ name: Optional[str] = Field(None, description="Name of the block.")
21
+ template: bool = Field(False, description="Whether the block is a template (e.g. saved human/persona options).")
22
+ label: Optional[str] = Field(None, description="Label of the block (e.g. 'human', 'persona').")
23
+
24
+ # metadat
25
+ description: Optional[str] = Field(None, description="Description of the block.")
26
+ metadata_: Optional[dict] = Field({}, description="Metadata of the block.")
27
+
28
+ # associated user/agent
29
+ user_id: Optional[str] = Field(None, description="The unique identifier of the user associated with the block.")
30
+
31
+ @model_validator(mode="after")
32
+ def verify_char_limit(self) -> Self:
33
+ try:
34
+ assert len(self) <= self.limit
35
+ except AssertionError:
36
+ error_msg = f"Edit failed: Exceeds {self.limit} character limit (requested {len(self)}) - {str(self)}."
37
+ raise ValueError(error_msg)
38
+ except Exception as e:
39
+ raise e
40
+ return self
41
+
42
+ @model_validator(mode="after")
43
+ def ensure_label(self) -> Self:
44
+ if not self.label:
45
+ self.label = self.name
46
+ return self
47
+
48
+ def __len__(self):
49
+ return len(str(self))
50
+
51
+ def __str__(self) -> str:
52
+ if isinstance(self.value, list):
53
+ return ",".join(self.value)
54
+ elif isinstance(self.value, str):
55
+ return self.value
56
+ else:
57
+ return ""
58
+
59
+ def __setattr__(self, name, value):
60
+ """Run validation if self.value is updated"""
61
+ super().__setattr__(name, value)
62
+ if name == "value":
63
+ # run validation
64
+ self.__class__.validate(self.dict(exclude_unset=True))
65
+
66
+
67
+ class Block(BaseBlock):
68
+ """
69
+ A Block represents a reserved section of the LLM's context window which is editable. `Block` objects contained in the `Memory` object, which is able to edit the Block values.
70
+
71
+ Parameters:
72
+ name (str): The name of the block.
73
+ value (str): The value of the block. This is the string that is represented in the context window.
74
+ limit (int): The character limit of the block.
75
+ template (bool): Whether the block is a template (e.g. saved human/persona options). Non-template blocks are not stored in the database and are ephemeral, while templated blocks are stored in the database.
76
+ label (str): The label of the block (e.g. 'human', 'persona'). This defines a category for the block.
77
+ description (str): Description of the block.
78
+ metadata_ (Dict): Metadata of the block.
79
+ user_id (str): The unique identifier of the user associated with the block.
80
+ """
81
+
82
+ id: str = BaseBlock.generate_id_field()
83
+ value: str = Field(..., description="Value of the block.")
84
+
85
+
86
+ class Human(Block):
87
+ """Human block of the LLM context"""
88
+
89
+ label: str = "human"
90
+
91
+
92
+ class Persona(Block):
93
+ """Persona block of the LLM context"""
94
+
95
+ label: str = "persona"
96
+
97
+
98
+ class CreateBlock(BaseBlock):
99
+ """Create a block"""
100
+
101
+ template: bool = True
102
+ label: str = Field(..., description="Label of the block.")
103
+
104
+
105
+ class CreatePersona(BaseBlock):
106
+ """Create a persona block"""
107
+
108
+ template: bool = True
109
+ label: str = "persona"
110
+
111
+
112
+ class CreateHuman(BaseBlock):
113
+ """Create a human block"""
114
+
115
+ template: bool = True
116
+ label: str = "human"
117
+
118
+
119
+ class UpdateBlock(BaseBlock):
120
+ """Update a block"""
121
+
122
+ id: str = Field(..., description="The unique identifier of the block.")
123
+ limit: Optional[int] = Field(2000, description="Character limit of the block.")
124
+
125
+
126
+ class UpdatePersona(UpdateBlock):
127
+ """Update a persona block"""
128
+
129
+ label: str = "persona"
130
+
131
+
132
+ class UpdateHuman(UpdateBlock):
133
+ """Update a human block"""
134
+
135
+ label: str = "human"
@@ -0,0 +1,21 @@
1
+ from typing import Dict, Optional
2
+
3
+ from pydantic import Field
4
+
5
+ from letta.schemas.letta_base import LettaBase
6
+
7
+
8
+ class DocumentBase(LettaBase):
9
+ """Base class for document schemas"""
10
+
11
+ __id_prefix__ = "doc"
12
+
13
+
14
+ class Document(DocumentBase):
15
+ """Representation of a single document (broken up into `Passage` objects)"""
16
+
17
+ id: str = DocumentBase.generate_id_field()
18
+ text: str = Field(..., description="The text of the document.")
19
+ source_id: str = Field(..., description="The unique identifier of the source associated with the document.")
20
+ user_id: str = Field(description="The unique identifier of the user associated with the document.")
21
+ metadata_: Optional[Dict] = Field({}, description="The metadata of the document.")
@@ -0,0 +1,54 @@
1
+ from typing import Optional
2
+
3
+ from pydantic import BaseModel, Field
4
+
5
+
6
+ class EmbeddingConfig(BaseModel):
7
+ """
8
+
9
+ Embedding model configuration. This object specifies all the information necessary to access an embedding model to usage with Letta, except for secret keys.
10
+
11
+ Attributes:
12
+ embedding_endpoint_type (str): The endpoint type for the model.
13
+ embedding_endpoint (str): The endpoint for the model.
14
+ embedding_model (str): The model for the embedding.
15
+ embedding_dim (int): The dimension of the embedding.
16
+ embedding_chunk_size (int): The chunk size of the embedding.
17
+ azure_endpoint (:obj:`str`, optional): The Azure endpoint for the model (Azure only).
18
+ azure_version (str): The Azure version for the model (Azure only).
19
+ azure_deployment (str): The Azure deployment for the model (Azure only).
20
+
21
+ """
22
+
23
+ embedding_endpoint_type: str = Field(..., description="The endpoint type for the model.")
24
+ embedding_endpoint: Optional[str] = Field(None, description="The endpoint for the model (`None` if local).")
25
+ embedding_model: str = Field(..., description="The model for the embedding.")
26
+ embedding_dim: int = Field(..., description="The dimension of the embedding.")
27
+ embedding_chunk_size: Optional[int] = Field(300, description="The chunk size of the embedding.")
28
+
29
+ # azure only
30
+ azure_endpoint: Optional[str] = Field(None, description="The Azure endpoint for the model.")
31
+ azure_version: Optional[str] = Field(None, description="The Azure version for the model.")
32
+ azure_deployment: Optional[str] = Field(None, description="The Azure deployment for the model.")
33
+
34
+ @classmethod
35
+ def default_config(cls, model_name: Optional[str] = None, provider: Optional[str] = None):
36
+
37
+ if model_name == "text-embedding-ada-002" or (not model_name and provider == "openai"):
38
+ return cls(
39
+ embedding_model="text-embedding-ada-002",
40
+ embedding_endpoint_type="openai",
41
+ embedding_endpoint="https://api.openai.com/v1",
42
+ embedding_dim=1536,
43
+ embedding_chunk_size=300,
44
+ )
45
+ elif model_name == "letta":
46
+ return cls(
47
+ embedding_endpoint="https://embeddings.memgpt.ai",
48
+ embedding_model="BAAI/bge-large-en-v1.5",
49
+ embedding_dim=1024,
50
+ embedding_chunk_size=300,
51
+ embedding_endpoint_type="hugging-face",
52
+ )
53
+ else:
54
+ raise ValueError(f"Model {model_name} not supported.")
letta/schemas/enums.py ADDED
@@ -0,0 +1,35 @@
1
+ from enum import Enum
2
+
3
+
4
+ class MessageRole(str, Enum):
5
+ assistant = "assistant"
6
+ user = "user"
7
+ tool = "tool"
8
+ function = "function"
9
+ system = "system"
10
+
11
+
12
+ class OptionState(str, Enum):
13
+ """Useful for kwargs that are bool + default option"""
14
+
15
+ YES = "yes"
16
+ NO = "no"
17
+ DEFAULT = "default"
18
+
19
+
20
+ class JobStatus(str, Enum):
21
+ """
22
+ Status of the job.
23
+ """
24
+
25
+ created = "created"
26
+ running = "running"
27
+ completed = "completed"
28
+ failed = "failed"
29
+ pending = "pending"
30
+
31
+
32
+ class MessageStreamStatus(str, Enum):
33
+ done_generation = "[DONE_GEN]"
34
+ done_step = "[DONE_STEP]"
35
+ done = "[DONE]"
letta/schemas/job.py ADDED
@@ -0,0 +1,38 @@
1
+ from datetime import datetime
2
+ from typing import Optional
3
+
4
+ from pydantic import Field
5
+
6
+ from letta.schemas.enums import JobStatus
7
+ from letta.schemas.letta_base import LettaBase
8
+ from letta.utils import get_utc_time
9
+
10
+
11
+ class JobBase(LettaBase):
12
+ __id_prefix__ = "job"
13
+ metadata_: Optional[dict] = Field({}, description="The metadata of the job.")
14
+
15
+
16
+ class Job(JobBase):
17
+ """
18
+ Representation of offline jobs, used for tracking status of data loading tasks (involving parsing and embedding documents).
19
+
20
+ Parameters:
21
+ id (str): The unique identifier of the job.
22
+ status (JobStatus): The status of the job.
23
+ created_at (datetime): The unix timestamp of when the job was created.
24
+ completed_at (datetime): The unix timestamp of when the job was completed.
25
+ user_id (str): The unique identifier of the user associated with the.
26
+
27
+ """
28
+
29
+ id: str = JobBase.generate_id_field()
30
+ status: JobStatus = Field(default=JobStatus.created, description="The status of the job.")
31
+ created_at: datetime = Field(default_factory=get_utc_time, description="The unix timestamp of when the job was created.")
32
+ completed_at: Optional[datetime] = Field(None, description="The unix timestamp of when the job was completed.")
33
+ user_id: str = Field(..., description="The unique identifier of the user associated with the job.")
34
+
35
+
36
+ class JobUpdate(JobBase):
37
+ id: str = Field(..., description="The unique identifier of the job.")
38
+ status: Optional[JobStatus] = Field(..., description="The status of the job.")
@@ -0,0 +1,80 @@
1
+ import uuid
2
+ from logging import getLogger
3
+ from typing import Optional
4
+ from uuid import UUID
5
+
6
+ from pydantic import BaseModel, ConfigDict, Field, field_validator
7
+
8
+ # from: https://gist.github.com/norton120/22242eadb80bf2cf1dd54a961b151c61
9
+
10
+
11
+ logger = getLogger(__name__)
12
+
13
+
14
+ class LettaBase(BaseModel):
15
+ """Base schema for Letta schemas (does not include model provider schemas, e.g. OpenAI)"""
16
+
17
+ model_config = ConfigDict(
18
+ # allows you to use the snake or camelcase names in your code (ie user_id or userId)
19
+ populate_by_name=True,
20
+ # allows you do dump a sqlalchemy object directly (ie PersistedAddress.model_validate(SQLAdress)
21
+ from_attributes=True,
22
+ # throw errors if attributes are given that don't belong
23
+ extra="forbid",
24
+ )
25
+
26
+ # def __id_prefix__(self):
27
+ # raise NotImplementedError("All schemas must have an __id_prefix__ attribute!")
28
+
29
+ @classmethod
30
+ def generate_id_field(cls, prefix: Optional[str] = None) -> "Field":
31
+ prefix = prefix or cls.__id_prefix__
32
+
33
+ # TODO: generate ID from regex pattern?
34
+ def _generate_id() -> str:
35
+ return f"{prefix}-{uuid.uuid4()}"
36
+
37
+ return Field(
38
+ ...,
39
+ description=cls._id_description(prefix),
40
+ pattern=cls._id_regex_pattern(prefix),
41
+ examples=[cls._id_example(prefix)],
42
+ default_factory=_generate_id,
43
+ )
44
+
45
+ # def _generate_id(self) -> str:
46
+ # return f"{self.__id_prefix__}-{uuid.uuid4()}"
47
+
48
+ @classmethod
49
+ def _id_regex_pattern(cls, prefix: str):
50
+ """generates the regex pattern for a given id"""
51
+ return (
52
+ r"^" + prefix + r"-" # prefix string
53
+ r"[a-fA-F0-9]{8}" # 8 hexadecimal characters
54
+ # r"[a-fA-F0-9]{4}-" # 4 hexadecimal characters
55
+ # r"[a-fA-F0-9]{4}-" # 4 hexadecimal characters
56
+ # r"[a-fA-F0-9]{4}-" # 4 hexadecimal characters
57
+ # r"[a-fA-F0-9]{12}$" # 12 hexadecimal characters
58
+ )
59
+
60
+ @classmethod
61
+ def _id_example(cls, prefix: str):
62
+ """generates an example id for a given prefix"""
63
+ return [prefix + "-123e4567-e89b-12d3-a456-426614174000"]
64
+
65
+ @classmethod
66
+ def _id_description(cls, prefix: str):
67
+ """generates a factory function for a given prefix"""
68
+ return f"The human-friendly ID of the {prefix.capitalize()}"
69
+
70
+ @field_validator("id", check_fields=False, mode="before")
71
+ @classmethod
72
+ def allow_bare_uuids(cls, v, values):
73
+ """to ease the transition to stripe ids,
74
+ we allow bare uuids and convert them with a warning
75
+ """
76
+ _ = values # for SCA
77
+ if isinstance(v, UUID):
78
+ logger.warning("Bare UUIDs are deprecated, please use the full prefixed id!")
79
+ return f"{cls.__id_prefix__}-{v}"
80
+ return v
@@ -0,0 +1,175 @@
1
+ import json
2
+ from datetime import datetime, timezone
3
+ from typing import Annotated, Literal, Optional, Union
4
+
5
+ from pydantic import BaseModel, Field, field_serializer, field_validator
6
+
7
+ # Letta API style responses (intended to be easier to use vs getting true Message types)
8
+
9
+
10
+ class LettaMessage(BaseModel):
11
+ """
12
+ Base class for simplified Letta message response type. This is intended to be used for developers who want the internal monologue, function calls, and function returns in a simplified format that does not include additional information other than the content and timestamp.
13
+
14
+ Attributes:
15
+ id (str): The ID of the message
16
+ date (datetime): The date the message was created in ISO format
17
+
18
+ """
19
+
20
+ # NOTE: use Pydantic's discriminated unions feature: https://docs.pydantic.dev/latest/concepts/unions/#discriminated-unions
21
+ # see `message_type` attribute
22
+
23
+ id: str
24
+ date: datetime
25
+
26
+ @field_serializer("date")
27
+ def serialize_datetime(self, dt: datetime, _info):
28
+ if dt.tzinfo is None or dt.tzinfo.utcoffset(dt) is None:
29
+ dt = dt.replace(tzinfo=timezone.utc)
30
+ # Remove microseconds since it seems like we're inconsistent with getting them
31
+ # TODO figure out why we don't always get microseconds (get_utc_time() does)
32
+ return dt.isoformat(timespec="seconds")
33
+
34
+
35
+ class SystemMessage(LettaMessage):
36
+ """
37
+ A message generated by the system. Never streamed back on a response, only used for cursor pagination.
38
+
39
+ Attributes:
40
+ message (str): The message sent by the system
41
+ id (str): The ID of the message
42
+ date (datetime): The date the message was created in ISO format
43
+ """
44
+
45
+ message_type: Literal["system_message"] = "system_message"
46
+ message: str
47
+
48
+
49
+ class UserMessage(LettaMessage):
50
+ """
51
+ A message sent by the user. Never streamed back on a response, only used for cursor pagination.
52
+
53
+ Attributes:
54
+ message (str): The message sent by the user
55
+ id (str): The ID of the message
56
+ date (datetime): The date the message was created in ISO format
57
+ """
58
+
59
+ message_type: Literal["user_message"] = "user_message"
60
+ message: str
61
+
62
+
63
+ class InternalMonologue(LettaMessage):
64
+ """
65
+ Representation of an agent's internal monologue.
66
+
67
+ Attributes:
68
+ internal_monologue (str): The internal monologue of the agent
69
+ id (str): The ID of the message
70
+ date (datetime): The date the message was created in ISO format
71
+ """
72
+
73
+ message_type: Literal["internal_monologue"] = "internal_monologue"
74
+ internal_monologue: str
75
+
76
+
77
+ class FunctionCall(BaseModel):
78
+
79
+ name: str
80
+ arguments: str
81
+
82
+
83
+ class FunctionCallDelta(BaseModel):
84
+
85
+ name: Optional[str]
86
+ arguments: Optional[str]
87
+
88
+ # NOTE: this is a workaround to exclude None values from the JSON dump,
89
+ # since the OpenAI style of returning chunks doesn't include keys with null values
90
+ def model_dump(self, *args, **kwargs):
91
+ kwargs["exclude_none"] = True
92
+ return super().model_dump(*args, **kwargs)
93
+
94
+ def json(self, *args, **kwargs):
95
+ return json.dumps(self.model_dump(exclude_none=True), *args, **kwargs)
96
+
97
+
98
+ class FunctionCallMessage(LettaMessage):
99
+ """
100
+ A message representing a request to call a function (generated by the LLM to trigger function execution).
101
+
102
+ Attributes:
103
+ function_call (Union[FunctionCall, FunctionCallDelta]): The function call
104
+ id (str): The ID of the message
105
+ date (datetime): The date the message was created in ISO format
106
+ """
107
+
108
+ message_type: Literal["function_call"] = "function_call"
109
+ function_call: Union[FunctionCall, FunctionCallDelta]
110
+
111
+ # NOTE: this is required for the FunctionCallDelta exclude_none to work correctly
112
+ def model_dump(self, *args, **kwargs):
113
+ kwargs["exclude_none"] = True
114
+ data = super().model_dump(*args, **kwargs)
115
+ if isinstance(data["function_call"], dict):
116
+ data["function_call"] = {k: v for k, v in data["function_call"].items() if v is not None}
117
+ return data
118
+
119
+ class Config:
120
+ json_encoders = {
121
+ FunctionCallDelta: lambda v: v.model_dump(exclude_none=True),
122
+ FunctionCall: lambda v: v.model_dump(exclude_none=True),
123
+ }
124
+
125
+ # NOTE: this is required to cast dicts into FunctionCallMessage objects
126
+ # Without this extra validator, Pydantic will throw an error if 'name' or 'arguments' are None
127
+ # (instead of properly casting to FunctionCallDelta instead of FunctionCall)
128
+ @field_validator("function_call", mode="before")
129
+ @classmethod
130
+ def validate_function_call(cls, v):
131
+ if isinstance(v, dict):
132
+ if "name" in v and "arguments" in v:
133
+ return FunctionCall(name=v["name"], arguments=v["arguments"])
134
+ elif "name" in v or "arguments" in v:
135
+ return FunctionCallDelta(name=v.get("name"), arguments=v.get("arguments"))
136
+ else:
137
+ raise ValueError("function_call must contain either 'name' or 'arguments'")
138
+ return v
139
+
140
+
141
+ class FunctionReturn(LettaMessage):
142
+ """
143
+ A message representing the return value of a function call (generated by Letta executing the requested function).
144
+
145
+ Attributes:
146
+ function_return (str): The return value of the function
147
+ status (Literal["success", "error"]): The status of the function call
148
+ id (str): The ID of the message
149
+ date (datetime): The date the message was created in ISO format
150
+ """
151
+
152
+ message_type: Literal["function_return"] = "function_return"
153
+ function_return: str
154
+ status: Literal["success", "error"]
155
+
156
+
157
+ # Legacy Letta API had an additional type "assistant_message" and the "function_call" was a formatted string
158
+
159
+
160
+ class AssistantMessage(LettaMessage):
161
+ message_type: Literal["assistant_message"] = "assistant_message"
162
+ assistant_message: str
163
+
164
+
165
+ class LegacyFunctionCallMessage(LettaMessage):
166
+ function_call: str
167
+
168
+
169
+ LegacyLettaMessage = Union[InternalMonologue, AssistantMessage, LegacyFunctionCallMessage, FunctionReturn]
170
+
171
+
172
+ LettaMessageUnion = Annotated[
173
+ Union[SystemMessage, UserMessage, InternalMonologue, FunctionCallMessage, FunctionReturn, AssistantMessage],
174
+ Field(discriminator="message_type"),
175
+ ]
@@ -0,0 +1,23 @@
1
+ from typing import List
2
+
3
+ from pydantic import BaseModel, Field
4
+
5
+ from letta.schemas.message import MessageCreate
6
+
7
+
8
+ class LettaRequest(BaseModel):
9
+ messages: List[MessageCreate] = Field(..., description="The messages to be sent to the agent.")
10
+ run_async: bool = Field(default=False, description="Whether to asynchronously send the messages to the agent.") # TODO: implement
11
+
12
+ stream_steps: bool = Field(
13
+ default=False, description="Flag to determine if the response should be streamed. Set to True for streaming agent steps."
14
+ )
15
+ stream_tokens: bool = Field(
16
+ default=False,
17
+ description="Flag to determine if individual tokens should be streamed. Set to True for token streaming (requires stream_steps = True).",
18
+ )
19
+
20
+ return_message_object: bool = Field(
21
+ default=False,
22
+ description="Set True to return the raw Message object. Set False to return the Message in the format of the Letta API.",
23
+ )
@@ -0,0 +1,28 @@
1
+ from typing import List, Union
2
+
3
+ from pydantic import BaseModel, Field
4
+
5
+ from letta.schemas.enums import MessageStreamStatus
6
+ from letta.schemas.letta_message import LettaMessage
7
+ from letta.schemas.message import Message
8
+ from letta.schemas.usage import LettaUsageStatistics
9
+
10
+ # TODO: consider moving into own file
11
+
12
+
13
+ class LettaResponse(BaseModel):
14
+ """
15
+ Response object from an agent interaction, consisting of the new messages generated by the agent and usage statistics.
16
+ The type of the returned messages can be either `Message` or `LettaMessage`, depending on what was specified in the request.
17
+
18
+ Attributes:
19
+ messages (List[Union[Message, LettaMessage]]): The messages returned by the agent.
20
+ usage (LettaUsageStatistics): The usage statistics
21
+ """
22
+
23
+ messages: Union[List[Message], List[LettaMessage]] = Field(..., description="The messages returned by the agent.")
24
+ usage: LettaUsageStatistics = Field(..., description="The usage statistics of the agent.")
25
+
26
+
27
+ # The streaming response is either [DONE], [DONE_STEP], [DONE], an error, or a LettaMessage
28
+ LettaStreamingResponse = Union[LettaMessage, MessageStreamStatus]
@@ -0,0 +1,54 @@
1
+ from typing import Optional
2
+
3
+ from pydantic import BaseModel, ConfigDict, Field
4
+
5
+
6
+ class LLMConfig(BaseModel):
7
+ """
8
+ Configuration for a Language Model (LLM) model. This object specifies all the information necessary to access an LLM model to usage with Letta, except for secret keys.
9
+
10
+ Attributes:
11
+ model (str): The name of the LLM model.
12
+ model_endpoint_type (str): The endpoint type for the model.
13
+ model_endpoint (str): The endpoint for the model.
14
+ model_wrapper (str): The wrapper for the model.
15
+ context_window (int): The context window size for the model.
16
+ """
17
+
18
+ # TODO: 🤮 don't default to a vendor! bug city!
19
+ model: str = Field(..., description="LLM model name. ")
20
+ model_endpoint_type: str = Field(..., description="The endpoint type for the model.")
21
+ model_endpoint: str = Field(..., description="The endpoint for the model.")
22
+ model_wrapper: Optional[str] = Field(None, description="The wrapper for the model.")
23
+ context_window: int = Field(..., description="The context window size for the model.")
24
+
25
+ # FIXME hack to silence pydantic protected namespace warning
26
+ model_config = ConfigDict(protected_namespaces=())
27
+
28
+ @classmethod
29
+ def default_config(cls, model_name: str):
30
+ if model_name == "gpt-4":
31
+ return cls(
32
+ model="gpt-4",
33
+ model_endpoint_type="openai",
34
+ model_endpoint="https://api.openai.com/v1",
35
+ model_wrapper=None,
36
+ context_window=8192,
37
+ )
38
+ elif model_name == "gpt-4o-mini":
39
+ return cls(
40
+ model="gpt-4o-mini",
41
+ model_endpoint_type="openai",
42
+ model_endpoint="https://api.openai.com/v1",
43
+ model_wrapper=None,
44
+ context_window=128000,
45
+ )
46
+ elif model_name == "letta":
47
+ return cls(
48
+ model="memgpt-openai",
49
+ model_endpoint_type="openai",
50
+ model_endpoint="https://inference.memgpt.ai",
51
+ context_window=16384,
52
+ )
53
+ else:
54
+ raise ValueError(f"Model {model_name} not supported.")