letta-nightly 0.4.1.dev20241004012408__py3-none-any.whl → 0.4.1.dev20241005104008__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of letta-nightly might be problematic. Click here for more details.

Files changed (34) hide show
  1. letta/cli/cli.py +30 -365
  2. letta/cli/cli_config.py +70 -27
  3. letta/client/client.py +103 -11
  4. letta/config.py +80 -80
  5. letta/constants.py +6 -0
  6. letta/credentials.py +10 -1
  7. letta/errors.py +63 -5
  8. letta/llm_api/llm_api_tools.py +110 -52
  9. letta/local_llm/chat_completion_proxy.py +0 -3
  10. letta/main.py +1 -2
  11. letta/metadata.py +12 -0
  12. letta/providers.py +232 -0
  13. letta/schemas/block.py +1 -1
  14. letta/schemas/letta_request.py +17 -0
  15. letta/schemas/letta_response.py +11 -0
  16. letta/schemas/llm_config.py +18 -2
  17. letta/schemas/message.py +40 -13
  18. letta/server/rest_api/app.py +5 -0
  19. letta/server/rest_api/interface.py +115 -24
  20. letta/server/rest_api/routers/v1/agents.py +36 -3
  21. letta/server/rest_api/routers/v1/llms.py +6 -2
  22. letta/server/server.py +60 -87
  23. letta/server/static_files/assets/index-3ab03d5b.css +1 -0
  24. letta/server/static_files/assets/{index-4d08d8a3.js → index-9a9c449b.js} +69 -69
  25. letta/server/static_files/index.html +2 -2
  26. letta/settings.py +144 -114
  27. letta/utils.py +6 -1
  28. {letta_nightly-0.4.1.dev20241004012408.dist-info → letta_nightly-0.4.1.dev20241005104008.dist-info}/METADATA +1 -1
  29. {letta_nightly-0.4.1.dev20241004012408.dist-info → letta_nightly-0.4.1.dev20241005104008.dist-info}/RECORD +32 -32
  30. letta/local_llm/groq/api.py +0 -97
  31. letta/server/static_files/assets/index-156816da.css +0 -1
  32. {letta_nightly-0.4.1.dev20241004012408.dist-info → letta_nightly-0.4.1.dev20241005104008.dist-info}/LICENSE +0 -0
  33. {letta_nightly-0.4.1.dev20241004012408.dist-info → letta_nightly-0.4.1.dev20241005104008.dist-info}/WHEEL +0 -0
  34. {letta_nightly-0.4.1.dev20241004012408.dist-info → letta_nightly-0.4.1.dev20241005104008.dist-info}/entry_points.txt +0 -0
letta/client/client.py CHANGED
@@ -272,6 +272,8 @@ class RESTClient(AbstractClient):
272
272
  token: str,
273
273
  api_prefix: str = "v1",
274
274
  debug: bool = False,
275
+ default_llm_config: Optional[LLMConfig] = None,
276
+ default_embedding_config: Optional[EmbeddingConfig] = None,
275
277
  ):
276
278
  """
277
279
  Initializes a new instance of Client class.
@@ -280,11 +282,14 @@ class RESTClient(AbstractClient):
280
282
  auto_save (bool): Whether to automatically save changes.
281
283
  user_id (str): The user ID.
282
284
  debug (bool): Whether to print debug information.
285
+ default
283
286
  """
284
287
  super().__init__(debug=debug)
285
288
  self.base_url = base_url
286
289
  self.api_prefix = api_prefix
287
290
  self.headers = {"accept": "application/json", "authorization": f"Bearer {token}"}
291
+ self._default_llm_config = default_llm_config
292
+ self._default_embedding_config = default_embedding_config
288
293
 
289
294
  def list_agents(self) -> List[AgentState]:
290
295
  response = requests.get(f"{self.base_url}/{self.api_prefix}/agents", headers=self.headers)
@@ -315,8 +320,8 @@ class RESTClient(AbstractClient):
315
320
  self,
316
321
  name: Optional[str] = None,
317
322
  # model configs
318
- embedding_config: Optional[EmbeddingConfig] = None,
319
- llm_config: Optional[LLMConfig] = None,
323
+ embedding_config: EmbeddingConfig = None,
324
+ llm_config: LLMConfig = None,
320
325
  # memory
321
326
  memory: Memory = ChatMemory(human=get_human_text(DEFAULT_HUMAN), persona=get_persona_text(DEFAULT_PERSONA)),
322
327
  # system
@@ -364,6 +369,10 @@ class RESTClient(AbstractClient):
364
369
  tool = self.create_tool(func, name=func_name, tags=["memory", "letta-base"], update=True)
365
370
  tool_names.append(tool.name)
366
371
 
372
+ # check if default configs are provided
373
+ assert embedding_config or self._default_embedding_config, f"Embedding config must be provided"
374
+ assert llm_config or self._default_llm_config, f"LLM config must be provided"
375
+
367
376
  # create agent
368
377
  request = CreateAgent(
369
378
  name=name,
@@ -372,8 +381,8 @@ class RESTClient(AbstractClient):
372
381
  memory=memory,
373
382
  tools=tool_names,
374
383
  system=system,
375
- llm_config=llm_config,
376
- embedding_config=embedding_config,
384
+ llm_config=llm_config if llm_config else self._default_llm_config,
385
+ embedding_config=embedding_config if embedding_config else self._default_embedding_config,
377
386
  )
378
387
 
379
388
  response = requests.post(f"{self.base_url}/{self.api_prefix}/agents", json=request.model_dump(), headers=self.headers)
@@ -1326,6 +1335,48 @@ class RESTClient(AbstractClient):
1326
1335
  raise ValueError(f"Failed to get tool: {response.text}")
1327
1336
  return response.json()
1328
1337
 
1338
+ def set_default_llm_config(self, llm_config: LLMConfig):
1339
+ """
1340
+ Set the default LLM configuration
1341
+
1342
+ Args:
1343
+ llm_config (LLMConfig): LLM configuration
1344
+ """
1345
+ self._default_llm_config = llm_config
1346
+
1347
+ def set_default_embedding_config(self, embedding_config: EmbeddingConfig):
1348
+ """
1349
+ Set the default embedding configuration
1350
+
1351
+ Args:
1352
+ embedding_config (EmbeddingConfig): Embedding configuration
1353
+ """
1354
+ self._default_embedding_config = embedding_config
1355
+
1356
+ def list_llm_configs(self) -> List[LLMConfig]:
1357
+ """
1358
+ List available LLM configurations
1359
+
1360
+ Returns:
1361
+ configs (List[LLMConfig]): List of LLM configurations
1362
+ """
1363
+ response = requests.get(f"{self.base_url}/{self.api_prefix}/models", headers=self.headers)
1364
+ if response.status_code != 200:
1365
+ raise ValueError(f"Failed to list LLM configs: {response.text}")
1366
+ return [LLMConfig(**config) for config in response.json()]
1367
+
1368
+ def list_embedding_configs(self) -> List[EmbeddingConfig]:
1369
+ """
1370
+ List available embedding configurations
1371
+
1372
+ Returns:
1373
+ configs (List[EmbeddingConfig]): List of embedding configurations
1374
+ """
1375
+ response = requests.get(f"{self.base_url}/{self.api_prefix}/models/embedding", headers=self.headers)
1376
+ if response.status_code != 200:
1377
+ raise ValueError(f"Failed to list embedding configs: {response.text}")
1378
+ return [EmbeddingConfig(**config) for config in response.json()]
1379
+
1329
1380
 
1330
1381
  class LocalClient(AbstractClient):
1331
1382
  """
@@ -1344,6 +1395,8 @@ class LocalClient(AbstractClient):
1344
1395
  auto_save: bool = False,
1345
1396
  user_id: Optional[str] = None,
1346
1397
  debug: bool = False,
1398
+ default_llm_config: Optional[LLMConfig] = None,
1399
+ default_embedding_config: Optional[EmbeddingConfig] = None,
1347
1400
  ):
1348
1401
  """
1349
1402
  Initializes a new instance of Client class.
@@ -1359,6 +1412,11 @@ class LocalClient(AbstractClient):
1359
1412
  letta.utils.DEBUG = debug
1360
1413
  logging.getLogger().setLevel(logging.CRITICAL)
1361
1414
 
1415
+ # save default model config
1416
+ self._default_llm_config = default_llm_config
1417
+ self._default_embedding_config = default_embedding_config
1418
+
1419
+ # create server
1362
1420
  self.interface = QueuingInterface(debug=debug)
1363
1421
  self.server = SyncServer(default_interface_factory=lambda: self.interface)
1364
1422
 
@@ -1405,8 +1463,8 @@ class LocalClient(AbstractClient):
1405
1463
  self,
1406
1464
  name: Optional[str] = None,
1407
1465
  # model configs
1408
- embedding_config: Optional[EmbeddingConfig] = None,
1409
- llm_config: Optional[LLMConfig] = None,
1466
+ embedding_config: EmbeddingConfig = None,
1467
+ llm_config: LLMConfig = None,
1410
1468
  # memory
1411
1469
  memory: Memory = ChatMemory(human=get_human_text(DEFAULT_HUMAN), persona=get_persona_text(DEFAULT_PERSONA)),
1412
1470
  # system
@@ -1453,6 +1511,10 @@ class LocalClient(AbstractClient):
1453
1511
 
1454
1512
  self.interface.clear()
1455
1513
 
1514
+ # check if default configs are provided
1515
+ assert embedding_config or self._default_embedding_config, f"Embedding config must be provided"
1516
+ assert llm_config or self._default_llm_config, f"LLM config must be provided"
1517
+
1456
1518
  # create agent
1457
1519
  agent_state = self.server.create_agent(
1458
1520
  CreateAgent(
@@ -1462,8 +1524,8 @@ class LocalClient(AbstractClient):
1462
1524
  memory=memory,
1463
1525
  tools=tool_names,
1464
1526
  system=system,
1465
- llm_config=llm_config,
1466
- embedding_config=embedding_config,
1527
+ llm_config=llm_config if llm_config else self._default_llm_config,
1528
+ embedding_config=embedding_config if embedding_config else self._default_embedding_config,
1467
1529
  ),
1468
1530
  user_id=self.user_id,
1469
1531
  )
@@ -1592,7 +1654,7 @@ class LocalClient(AbstractClient):
1592
1654
  # memory
1593
1655
  def get_in_context_memory(self, agent_id: str) -> Memory:
1594
1656
  """
1595
- Get the in-contxt (i.e. core) memory of an agent
1657
+ Get the in-context (i.e. core) memory of an agent
1596
1658
 
1597
1659
  Args:
1598
1660
  agent_id (str): ID of the agent
@@ -2363,7 +2425,37 @@ class LocalClient(AbstractClient):
2363
2425
  return self.server.delete_block(id)
2364
2426
 
2365
2427
  def set_default_llm_config(self, llm_config: LLMConfig):
2366
- self.server.server_llm_config = llm_config
2428
+ """
2429
+ Set the default LLM configuration for agents.
2430
+
2431
+ Args:
2432
+ llm_config (LLMConfig): LLM configuration
2433
+ """
2434
+ self._default_llm_config = llm_config
2367
2435
 
2368
2436
  def set_default_embedding_config(self, embedding_config: EmbeddingConfig):
2369
- self.server.server_embedding_config = embedding_config
2437
+ """
2438
+ Set the default embedding configuration for agents.
2439
+
2440
+ Args:
2441
+ embedding_config (EmbeddingConfig): Embedding configuration
2442
+ """
2443
+ self._default_embedding_config = embedding_config
2444
+
2445
+ def list_llm_configs(self) -> List[LLMConfig]:
2446
+ """
2447
+ List available LLM configurations
2448
+
2449
+ Returns:
2450
+ configs (List[LLMConfig]): List of LLM configurations
2451
+ """
2452
+ return self.server.list_llm_models()
2453
+
2454
+ def list_embedding_configs(self) -> List[EmbeddingConfig]:
2455
+ """
2456
+ List available embedding configurations
2457
+
2458
+ Returns:
2459
+ configs (List[EmbeddingConfig]): List of embedding configurations
2460
+ """
2461
+ return self.server.list_embedding_models()
letta/config.py CHANGED
@@ -55,10 +55,10 @@ class LettaConfig:
55
55
  human: str = DEFAULT_HUMAN
56
56
 
57
57
  # model parameters
58
- default_llm_config: LLMConfig = None
58
+ # default_llm_config: LLMConfig = None
59
59
 
60
60
  # embedding parameters
61
- default_embedding_config: EmbeddingConfig = None
61
+ # default_embedding_config: EmbeddingConfig = None
62
62
 
63
63
  # NONE OF THIS IS CONFIG ↓↓↓↓↓
64
64
  # @norton120 these are the metdadatastore
@@ -130,42 +130,42 @@ class LettaConfig:
130
130
  # read existing config
131
131
  config.read(config_path)
132
132
 
133
- # Handle extraction of nested LLMConfig and EmbeddingConfig
134
- llm_config_dict = {
135
- # Extract relevant LLM configuration from the config file
136
- "model": get_field(config, "model", "model"),
137
- "model_endpoint": get_field(config, "model", "model_endpoint"),
138
- "model_endpoint_type": get_field(config, "model", "model_endpoint_type"),
139
- "model_wrapper": get_field(config, "model", "model_wrapper"),
140
- "context_window": get_field(config, "model", "context_window"),
141
- }
142
- embedding_config_dict = {
143
- # Extract relevant Embedding configuration from the config file
144
- "embedding_endpoint": get_field(config, "embedding", "embedding_endpoint"),
145
- "embedding_model": get_field(config, "embedding", "embedding_model"),
146
- "embedding_endpoint_type": get_field(config, "embedding", "embedding_endpoint_type"),
147
- "embedding_dim": get_field(config, "embedding", "embedding_dim"),
148
- "embedding_chunk_size": get_field(config, "embedding", "embedding_chunk_size"),
149
- }
150
- # Remove null values
151
- llm_config_dict = {k: v for k, v in llm_config_dict.items() if v is not None}
152
- embedding_config_dict = {k: v for k, v in embedding_config_dict.items() if v is not None}
133
+ ## Handle extraction of nested LLMConfig and EmbeddingConfig
134
+ # llm_config_dict = {
135
+ # # Extract relevant LLM configuration from the config file
136
+ # "model": get_field(config, "model", "model"),
137
+ # "model_endpoint": get_field(config, "model", "model_endpoint"),
138
+ # "model_endpoint_type": get_field(config, "model", "model_endpoint_type"),
139
+ # "model_wrapper": get_field(config, "model", "model_wrapper"),
140
+ # "context_window": get_field(config, "model", "context_window"),
141
+ # }
142
+ # embedding_config_dict = {
143
+ # # Extract relevant Embedding configuration from the config file
144
+ # "embedding_endpoint": get_field(config, "embedding", "embedding_endpoint"),
145
+ # "embedding_model": get_field(config, "embedding", "embedding_model"),
146
+ # "embedding_endpoint_type": get_field(config, "embedding", "embedding_endpoint_type"),
147
+ # "embedding_dim": get_field(config, "embedding", "embedding_dim"),
148
+ # "embedding_chunk_size": get_field(config, "embedding", "embedding_chunk_size"),
149
+ # }
150
+ ## Remove null values
151
+ # llm_config_dict = {k: v for k, v in llm_config_dict.items() if v is not None}
152
+ # embedding_config_dict = {k: v for k, v in embedding_config_dict.items() if v is not None}
153
153
  # Correct the types that aren't strings
154
- if "context_window" in llm_config_dict and llm_config_dict["context_window"] is not None:
155
- llm_config_dict["context_window"] = int(llm_config_dict["context_window"])
156
- if "embedding_dim" in embedding_config_dict and embedding_config_dict["embedding_dim"] is not None:
157
- embedding_config_dict["embedding_dim"] = int(embedding_config_dict["embedding_dim"])
158
- if "embedding_chunk_size" in embedding_config_dict and embedding_config_dict["embedding_chunk_size"] is not None:
159
- embedding_config_dict["embedding_chunk_size"] = int(embedding_config_dict["embedding_chunk_size"])
160
- # Construct the inner properties
161
- llm_config = LLMConfig(**llm_config_dict)
162
- embedding_config = EmbeddingConfig(**embedding_config_dict)
154
+ # if "context_window" in llm_config_dict and llm_config_dict["context_window"] is not None:
155
+ # llm_config_dict["context_window"] = int(llm_config_dict["context_window"])
156
+ # if "embedding_dim" in embedding_config_dict and embedding_config_dict["embedding_dim"] is not None:
157
+ # embedding_config_dict["embedding_dim"] = int(embedding_config_dict["embedding_dim"])
158
+ # if "embedding_chunk_size" in embedding_config_dict and embedding_config_dict["embedding_chunk_size"] is not None:
159
+ # embedding_config_dict["embedding_chunk_size"] = int(embedding_config_dict["embedding_chunk_size"])
160
+ ## Construct the inner properties
161
+ # llm_config = LLMConfig(**llm_config_dict)
162
+ # embedding_config = EmbeddingConfig(**embedding_config_dict)
163
163
 
164
164
  # Everything else
165
165
  config_dict = {
166
166
  # Two prepared configs
167
- "default_llm_config": llm_config,
168
- "default_embedding_config": embedding_config,
167
+ # "default_llm_config": llm_config,
168
+ # "default_embedding_config": embedding_config,
169
169
  # Agent related
170
170
  "preset": get_field(config, "defaults", "preset"),
171
171
  "persona": get_field(config, "defaults", "persona"),
@@ -212,53 +212,53 @@ class LettaConfig:
212
212
  set_field(config, "defaults", "human", self.human)
213
213
 
214
214
  # model defaults
215
- set_field(config, "model", "model", self.default_llm_config.model)
216
- set_field(config, "model", "model_endpoint", self.default_llm_config.model_endpoint)
217
- set_field(
218
- config,
219
- "model",
220
- "model_endpoint_type",
221
- self.default_llm_config.model_endpoint_type,
222
- )
223
- set_field(config, "model", "model_wrapper", self.default_llm_config.model_wrapper)
224
- set_field(
225
- config,
226
- "model",
227
- "context_window",
228
- str(self.default_llm_config.context_window),
229
- )
230
-
231
- # embeddings
232
- set_field(
233
- config,
234
- "embedding",
235
- "embedding_endpoint_type",
236
- self.default_embedding_config.embedding_endpoint_type,
237
- )
238
- set_field(
239
- config,
240
- "embedding",
241
- "embedding_endpoint",
242
- self.default_embedding_config.embedding_endpoint,
243
- )
244
- set_field(
245
- config,
246
- "embedding",
247
- "embedding_model",
248
- self.default_embedding_config.embedding_model,
249
- )
250
- set_field(
251
- config,
252
- "embedding",
253
- "embedding_dim",
254
- str(self.default_embedding_config.embedding_dim),
255
- )
256
- set_field(
257
- config,
258
- "embedding",
259
- "embedding_chunk_size",
260
- str(self.default_embedding_config.embedding_chunk_size),
261
- )
215
+ # set_field(config, "model", "model", self.default_llm_config.model)
216
+ ##set_field(config, "model", "model_endpoint", self.default_llm_config.model_endpoint)
217
+ # set_field(
218
+ # config,
219
+ # "model",
220
+ # "model_endpoint_type",
221
+ # self.default_llm_config.model_endpoint_type,
222
+ # )
223
+ # set_field(config, "model", "model_wrapper", self.default_llm_config.model_wrapper)
224
+ # set_field(
225
+ # config,
226
+ # "model",
227
+ # "context_window",
228
+ # str(self.default_llm_config.context_window),
229
+ # )
230
+
231
+ ## embeddings
232
+ # set_field(
233
+ # config,
234
+ # "embedding",
235
+ # "embedding_endpoint_type",
236
+ # self.default_embedding_config.embedding_endpoint_type,
237
+ # )
238
+ # set_field(
239
+ # config,
240
+ # "embedding",
241
+ # "embedding_endpoint",
242
+ # self.default_embedding_config.embedding_endpoint,
243
+ # )
244
+ # set_field(
245
+ # config,
246
+ # "embedding",
247
+ # "embedding_model",
248
+ # self.default_embedding_config.embedding_model,
249
+ # )
250
+ # set_field(
251
+ # config,
252
+ # "embedding",
253
+ # "embedding_dim",
254
+ # str(self.default_embedding_config.embedding_dim),
255
+ # )
256
+ # set_field(
257
+ # config,
258
+ # "embedding",
259
+ # "embedding_chunk_size",
260
+ # str(self.default_embedding_config.embedding_chunk_size),
261
+ # )
262
262
 
263
263
  # archival storage
264
264
  set_field(config, "archival_storage", "type", self.archival_storage_type)
letta/constants.py CHANGED
@@ -46,6 +46,12 @@ BASE_TOOLS = [
46
46
  "archival_memory_search",
47
47
  ]
48
48
 
49
+ # The name of the tool used to send message to the user
50
+ # May not be relevant in cases where the agent has multiple ways to message to user (send_imessage, send_discord_mesasge, ...)
51
+ # or in cases where the agent has no concept of messaging a user (e.g. a workflow agent)
52
+ DEFAULT_MESSAGE_TOOL = "send_message"
53
+ DEFAULT_MESSAGE_TOOL_KWARG = "message"
54
+
49
55
  # LOGGER_LOG_LEVEL is use to convert Text to Logging level value for logging mostly for Cli input to setting level
50
56
  LOGGER_LOG_LEVELS = {"CRITICAL": CRITICAL, "ERROR": ERROR, "WARN": WARN, "WARNING": WARNING, "INFO": INFO, "DEBUG": DEBUG, "NOTSET": NOTSET}
51
57
 
letta/credentials.py CHANGED
@@ -23,7 +23,7 @@ class LettaCredentials:
23
23
  google_ai_service_endpoint: Optional[str] = None
24
24
 
25
25
  # anthropic config
26
- anthropic_key: Optional[str] = None
26
+ anthropic_key: Optional[str] = os.getenv("ANTHROPIC_API_KEY")
27
27
 
28
28
  # cohere config
29
29
  cohere_key: Optional[str] = None
@@ -31,6 +31,10 @@ class LettaCredentials:
31
31
  # azure config
32
32
  azure_auth_type: str = "api_key"
33
33
  azure_key: Optional[str] = None
34
+
35
+ # groq config
36
+ groq_key: Optional[str] = os.getenv("GROQ_API_KEY")
37
+
34
38
  # base llm / model
35
39
  azure_version: Optional[str] = None
36
40
  azure_endpoint: Optional[str] = None
@@ -77,6 +81,8 @@ class LettaCredentials:
77
81
  "anthropic_key": get_field(config, "anthropic", "key"),
78
82
  # cohere
79
83
  "cohere_key": get_field(config, "cohere", "key"),
84
+ # groq
85
+ "groq_key": get_field(config, "groq", "key"),
80
86
  # open llm
81
87
  "openllm_auth_type": get_field(config, "openllm", "auth_type"),
82
88
  "openllm_key": get_field(config, "openllm", "key"),
@@ -119,6 +125,9 @@ class LettaCredentials:
119
125
  # cohere
120
126
  set_field(config, "cohere", "key", self.cohere_key)
121
127
 
128
+ # groq
129
+ set_field(config, "groq", "key", self.groq_key)
130
+
122
131
  # openllm config
123
132
  set_field(config, "openllm", "auth_type", self.openllm_auth_type)
124
133
  set_field(config, "openllm", "key", self.openllm_key)
letta/errors.py CHANGED
@@ -1,8 +1,20 @@
1
- class LLMError(Exception):
2
- """Base class for all LLM-related errors."""
1
+ import json
2
+ from typing import TYPE_CHECKING, List, Optional, Union
3
3
 
4
+ # Avoid circular imports
5
+ if TYPE_CHECKING:
6
+ from letta.schemas.message import Message
4
7
 
5
- class LLMJSONParsingError(LLMError):
8
+
9
+ class LettaError(Exception):
10
+ """Base class for all Letta related errors."""
11
+
12
+
13
+ class LLMError(LettaError):
14
+ pass
15
+
16
+
17
+ class LLMJSONParsingError(LettaError):
6
18
  """Exception raised for errors in the JSON parsing process."""
7
19
 
8
20
  def __init__(self, message="Error parsing JSON generated by LLM"):
@@ -10,7 +22,7 @@ class LLMJSONParsingError(LLMError):
10
22
  super().__init__(self.message)
11
23
 
12
24
 
13
- class LocalLLMError(LLMError):
25
+ class LocalLLMError(LettaError):
14
26
  """Generic catch-all error for local LLM problems"""
15
27
 
16
28
  def __init__(self, message="Encountered an error while running local LLM"):
@@ -18,9 +30,55 @@ class LocalLLMError(LLMError):
18
30
  super().__init__(self.message)
19
31
 
20
32
 
21
- class LocalLLMConnectionError(LLMError):
33
+ class LocalLLMConnectionError(LettaError):
22
34
  """Error for when local LLM cannot be reached with provided IP/port"""
23
35
 
24
36
  def __init__(self, message="Could not connect to local LLM"):
25
37
  self.message = message
26
38
  super().__init__(self.message)
39
+
40
+
41
+ class LettaMessageError(LettaError):
42
+ """Base error class for handling message-related errors."""
43
+
44
+ messages: List[Union["Message", "LettaMessage"]]
45
+ default_error_message: str = "An error occurred with the message."
46
+
47
+ def __init__(self, *, messages: List[Union["Message", "LettaMessage"]], explanation: Optional[str] = None) -> None:
48
+ error_msg = self.construct_error_message(messages, self.default_error_message, explanation)
49
+ super().__init__(error_msg)
50
+ self.messages = messages
51
+
52
+ @staticmethod
53
+ def construct_error_message(messages: List[Union["Message", "LettaMessage"]], error_msg: str, explanation: Optional[str] = None) -> str:
54
+ """Helper method to construct a clean and formatted error message."""
55
+ if explanation:
56
+ error_msg += f" (Explanation: {explanation})"
57
+
58
+ # Pretty print out message JSON
59
+ message_json = json.dumps([message.model_dump_json(indent=4) for message in messages], indent=4)
60
+ return f"{error_msg}\n\n{message_json}"
61
+
62
+
63
+ class MissingFunctionCallError(LettaMessageError):
64
+ """Error raised when a message is missing a function call."""
65
+
66
+ default_error_message = "The message is missing a function call."
67
+
68
+
69
+ class InvalidFunctionCallError(LettaMessageError):
70
+ """Error raised when a message uses an invalid function call."""
71
+
72
+ default_error_message = "The message uses an invalid function call or has improper usage of a function call."
73
+
74
+
75
+ class MissingInnerMonologueError(LettaMessageError):
76
+ """Error raised when a message is missing an inner monologue."""
77
+
78
+ default_error_message = "The message is missing an inner monologue."
79
+
80
+
81
+ class InvalidInnerMonologueError(LettaMessageError):
82
+ """Error raised when a message has a malformed inner monologue."""
83
+
84
+ default_error_message = "The message has a malformed inner monologue."