letta-nightly 0.11.0.dev20250808055434__py3-none-any.whl → 0.11.2.dev20250808210309__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
letta/__init__.py CHANGED
@@ -5,7 +5,7 @@ try:
5
5
  __version__ = version("letta")
6
6
  except PackageNotFoundError:
7
7
  # Fallback for development installations
8
- __version__ = "0.11.0"
8
+ __version__ = "0.11.2"
9
9
 
10
10
  if os.environ.get("LETTA_VERSION"):
11
11
  __version__ = os.environ["LETTA_VERSION"]
letta/constants.py CHANGED
@@ -10,6 +10,7 @@ DEFAULT_TIMEZONE = "UTC"
10
10
 
11
11
  ADMIN_PREFIX = "/v1/admin"
12
12
  API_PREFIX = "/v1"
13
+ OLLAMA_API_PREFIX = "/v1"
13
14
  OPENAI_API_PREFIX = "/openai"
14
15
 
15
16
  COMPOSIO_ENTITY_ENV_VAR_KEY = "COMPOSIO_ENTITY"
@@ -50,8 +51,9 @@ TOOL_CALL_ID_MAX_LEN = 29
50
51
  # Max steps for agent loop
51
52
  DEFAULT_MAX_STEPS = 50
52
53
 
53
- # minimum context window size
54
+ # context window size
54
55
  MIN_CONTEXT_WINDOW = 4096
56
+ DEFAULT_CONTEXT_WINDOW = 32000
55
57
 
56
58
  # number of concurrent embedding requests to sent
57
59
  EMBEDDING_BATCH_SIZE = 200
@@ -63,6 +65,7 @@ DEFAULT_MIN_MESSAGE_BUFFER_LENGTH = 15
63
65
  # embeddings
64
66
  MAX_EMBEDDING_DIM = 4096 # maximum supported embeding size - do NOT change or else DBs will need to be reset
65
67
  DEFAULT_EMBEDDING_CHUNK_SIZE = 300
68
+ DEFAULT_EMBEDDING_DIM = 1024
66
69
 
67
70
  # tokenizers
68
71
  EMBEDDING_TO_TOKENIZER_MAP = {
letta/embeddings.py CHANGED
@@ -139,10 +139,11 @@ class AzureOpenAIEmbedding:
139
139
 
140
140
  class OllamaEmbeddings:
141
141
 
142
+ # Uses OpenAI API standard
142
143
  # Format:
143
- # curl http://localhost:11434/api/embeddings -d '{
144
+ # curl http://localhost:11434/v1/embeddings -d '{
144
145
  # "model": "mxbai-embed-large",
145
- # "prompt": "Llamas are members of the camelid family"
146
+ # "input": "Llamas are members of the camelid family"
146
147
  # }'
147
148
 
148
149
  def __init__(self, model: str, base_url: str, ollama_additional_kwargs: dict):
@@ -154,18 +155,18 @@ class OllamaEmbeddings:
154
155
  import httpx
155
156
 
156
157
  headers = {"Content-Type": "application/json"}
157
- json_data = {"model": self.model, "prompt": text}
158
+ json_data = {"model": self.model, "input": text}
158
159
  json_data.update(self.ollama_additional_kwargs)
159
160
 
160
161
  with httpx.Client() as client:
161
162
  response = client.post(
162
- f"{self.base_url}/api/embeddings",
163
+ f"{self.base_url}/embeddings",
163
164
  headers=headers,
164
165
  json=json_data,
165
166
  )
166
167
 
167
168
  response_json = response.json()
168
- return response_json["embedding"]
169
+ return response_json["data"][0]["embedding"]
169
170
 
170
171
 
171
172
  class GoogleEmbeddings:
@@ -187,9 +187,12 @@ class LLMConfig(BaseModel):
187
187
 
188
188
  @classmethod
189
189
  def apply_reasoning_setting_to_config(cls, config: "LLMConfig", reasoning: bool):
190
- if reasoning:
191
- config.enable_reasoner = True
190
+ if not reasoning:
191
+ config.put_inner_thoughts_in_kwargs = False
192
+ config.enable_reasoner = False
192
193
 
194
+ else:
195
+ config.enable_reasoner = True
193
196
  if (
194
197
  config.model_endpoint_type == "anthropic"
195
198
  and ("claude-opus-4" in config.model or "claude-sonnet-4" in config.model or "claude-3-7-sonnet" in config.model)
@@ -207,9 +210,6 @@ class LLMConfig(BaseModel):
207
210
  config.reasoning_effort = "medium"
208
211
  else:
209
212
  config.put_inner_thoughts_in_kwargs = True
210
-
211
- else:
212
- config.enable_reasoner = False
213
- config.put_inner_thoughts_in_kwargs = False
213
+ config.enable_reasoner = False
214
214
 
215
215
  return config
@@ -3,7 +3,7 @@ from typing import Literal
3
3
  import aiohttp
4
4
  from pydantic import Field
5
5
 
6
- from letta.constants import DEFAULT_EMBEDDING_CHUNK_SIZE
6
+ from letta.constants import DEFAULT_EMBEDDING_CHUNK_SIZE, DEFAULT_CONTEXT_WINDOW, DEFAULT_EMBEDDING_DIM, OLLAMA_API_PREFIX
7
7
  from letta.log import get_logger
8
8
  from letta.schemas.embedding_config import EmbeddingConfig
9
9
  from letta.schemas.enums import ProviderCategory, ProviderType
@@ -12,8 +12,6 @@ from letta.schemas.providers.openai import OpenAIProvider
12
12
 
13
13
  logger = get_logger(__name__)
14
14
 
15
- ollama_prefix = "/v1"
16
-
17
15
 
18
16
  class OllamaProvider(OpenAIProvider):
19
17
  """Ollama provider that uses the native /api/generate endpoint
@@ -41,19 +39,30 @@ class OllamaProvider(OpenAIProvider):
41
39
  response_json = await response.json()
42
40
 
43
41
  configs = []
44
- for model in response_json["models"]:
45
- context_window = await self._get_model_context_window(model["name"])
42
+ for model in response_json.get("models", []):
43
+ model_name = model["name"]
44
+ model_details = await self._get_model_details_async(model_name)
45
+ if not model_details or "completion" not in model_details.get("capabilities", []):
46
+ continue
47
+
48
+ context_window = None
49
+ model_info = model_details.get("model_info", {})
50
+ if architecture := model_info.get("general.architecture"):
51
+ if context_length := model_info.get(f"{architecture}.context_length"):
52
+ context_window = int(context_length)
53
+
46
54
  if context_window is None:
47
- print(f"Ollama model {model['name']} has no context window, using default 32000")
48
- context_window = 32000
55
+ logger.warning(f"Ollama model {model_name} has no context window, using default {DEFAULT_CONTEXT_WINDOW}")
56
+ context_window = DEFAULT_CONTEXT_WINDOW
57
+
49
58
  configs.append(
50
59
  LLMConfig(
51
- model=model["name"],
60
+ model=model_name,
52
61
  model_endpoint_type=ProviderType.ollama,
53
- model_endpoint=f"{self.base_url}{ollama_prefix}",
62
+ model_endpoint=f"{self.base_url}{OLLAMA_API_PREFIX}",
54
63
  model_wrapper=self.default_prompt_formatter,
55
64
  context_window=context_window,
56
- handle=self.get_handle(model["name"]),
65
+ handle=self.get_handle(model_name),
57
66
  provider_name=self.name,
58
67
  provider_category=self.provider_category,
59
68
  )
@@ -73,25 +82,36 @@ class OllamaProvider(OpenAIProvider):
73
82
  response_json = await response.json()
74
83
 
75
84
  configs = []
76
- for model in response_json["models"]:
77
- embedding_dim = await self._get_model_embedding_dim(model["name"])
85
+ for model in response_json.get("models", []):
86
+ model_name = model["name"]
87
+ model_details = await self._get_model_details_async(model_name)
88
+ if not model_details or "embedding" not in model_details.get("capabilities", []):
89
+ continue
90
+
91
+ embedding_dim = None
92
+ model_info = model_details.get("model_info", {})
93
+ if architecture := model_info.get("general.architecture"):
94
+ if embedding_length := model_info.get(f"{architecture}.embedding_length"):
95
+ embedding_dim = int(embedding_length)
96
+
78
97
  if not embedding_dim:
79
- print(f"Ollama model {model['name']} has no embedding dimension, using default 1024")
80
- # continue
81
- embedding_dim = 1024
98
+ logger.warning(f"Ollama model {model_name} has no embedding dimension, using default {DEFAULT_EMBEDDING_DIM}")
99
+ embedding_dim = DEFAULT_EMBEDDING_DIM
100
+
82
101
  configs.append(
83
102
  EmbeddingConfig(
84
- embedding_model=model["name"],
103
+ embedding_model=model_name,
85
104
  embedding_endpoint_type=ProviderType.ollama,
86
- embedding_endpoint=f"{self.base_url}{ollama_prefix}",
105
+ embedding_endpoint=f"{self.base_url}{OLLAMA_API_PREFIX}",
87
106
  embedding_dim=embedding_dim,
88
107
  embedding_chunk_size=DEFAULT_EMBEDDING_CHUNK_SIZE,
89
- handle=self.get_handle(model["name"], is_embedding=True),
108
+ handle=self.get_handle(model_name, is_embedding=True),
90
109
  )
91
110
  )
92
111
  return configs
93
112
 
94
- async def _get_model_context_window(self, model_name: str) -> int | None:
113
+ async def _get_model_details_async(self, model_name: str) -> dict | None:
114
+ """Get detailed information for a specific model from /api/show."""
95
115
  endpoint = f"{self.base_url}/api/show"
96
116
  payload = {"name": model_name}
97
117
 
@@ -102,39 +122,7 @@ class OllamaProvider(OpenAIProvider):
102
122
  error_text = await response.text()
103
123
  logger.warning(f"Failed to get model info for {model_name}: {response.status} - {error_text}")
104
124
  return None
105
-
106
- response_json = await response.json()
107
- model_info = response_json.get("model_info", {})
108
-
109
- if architecture := model_info.get("general.architecture"):
110
- if context_length := model_info.get(f"{architecture}.context_length"):
111
- return int(context_length)
112
-
125
+ return await response.json()
113
126
  except Exception as e:
114
- logger.warning(f"Failed to get model context window for {model_name} with error: {e}")
115
-
116
- return None
117
-
118
- async def _get_model_embedding_dim(self, model_name: str) -> int | None:
119
- endpoint = f"{self.base_url}/api/show"
120
- payload = {"name": model_name}
121
-
122
- try:
123
- async with aiohttp.ClientSession() as session:
124
- async with session.post(endpoint, json=payload) as response:
125
- if response.status != 200:
126
- error_text = await response.text()
127
- logger.warning(f"Failed to get model info for {model_name}: {response.status} - {error_text}")
128
- return None
129
-
130
- response_json = await response.json()
131
- model_info = response_json.get("model_info", {})
132
-
133
- if architecture := model_info.get("general.architecture"):
134
- if embedding_length := model_info.get(f"{architecture}.embedding_length"):
135
- return int(embedding_length)
136
-
137
- except Exception as e:
138
- logger.warning(f"Failed to get model embedding dimension for {model_name} with error: {e}")
139
-
140
- return None
127
+ logger.warning(f"Failed to get model details for {model_name} with error: {e}")
128
+ return None
@@ -1,6 +1,7 @@
1
- from typing import Dict
1
+ from typing import Dict, Optional
2
2
 
3
3
  from marshmallow import fields, post_dump, pre_load
4
+ from sqlalchemy import func
4
5
  from sqlalchemy.orm import sessionmaker
5
6
 
6
7
  import letta
@@ -15,6 +16,7 @@ from letta.serialize_schemas.marshmallow_custom_fields import EmbeddingConfigFie
15
16
  from letta.serialize_schemas.marshmallow_message import SerializedMessageSchema
16
17
  from letta.serialize_schemas.marshmallow_tag import SerializedAgentTagSchema
17
18
  from letta.serialize_schemas.marshmallow_tool import SerializedToolSchema
19
+ from letta.settings import DatabaseChoice, settings
18
20
 
19
21
 
20
22
  class MarshmallowAgentSchema(BaseSchema):
@@ -41,9 +43,10 @@ class MarshmallowAgentSchema(BaseSchema):
41
43
  tool_exec_environment_variables = fields.List(fields.Nested(SerializedAgentEnvironmentVariableSchema))
42
44
  tags = fields.List(fields.Nested(SerializedAgentTagSchema))
43
45
 
44
- def __init__(self, *args, session: sessionmaker, actor: User, **kwargs):
46
+ def __init__(self, *args, session: sessionmaker, actor: User, max_steps: Optional[int] = None, **kwargs):
45
47
  super().__init__(*args, actor=actor, **kwargs)
46
48
  self.session = session
49
+ self.max_steps = max_steps
47
50
 
48
51
  # Propagate session and actor to nested schemas automatically
49
52
  for field in self.fields.values():
@@ -64,16 +67,103 @@ class MarshmallowAgentSchema(BaseSchema):
64
67
 
65
68
  with db_registry.session() as session:
66
69
  agent_id = data.get("id")
67
- msgs = (
68
- session.query(MessageModel)
69
- .filter(
70
- MessageModel.agent_id == agent_id,
71
- MessageModel.organization_id == self.actor.organization_id,
70
+
71
+ if self.max_steps is not None:
72
+ # first, always get the system message
73
+ system_msg = (
74
+ session.query(MessageModel)
75
+ .filter(
76
+ MessageModel.agent_id == agent_id,
77
+ MessageModel.organization_id == self.actor.organization_id,
78
+ MessageModel.role == "system",
79
+ )
80
+ .order_by(MessageModel.sequence_id.asc())
81
+ .first()
82
+ )
83
+
84
+ if settings.database_engine is DatabaseChoice.POSTGRES:
85
+ # efficient PostgreSQL approach using subquery
86
+ user_msg_subquery = (
87
+ session.query(MessageModel.sequence_id)
88
+ .filter(
89
+ MessageModel.agent_id == agent_id,
90
+ MessageModel.organization_id == self.actor.organization_id,
91
+ MessageModel.role == "user",
92
+ )
93
+ .order_by(MessageModel.sequence_id.desc())
94
+ .limit(self.max_steps)
95
+ .subquery()
96
+ )
97
+
98
+ # get the minimum sequence_id from the subquery
99
+ cutoff_sequence_id = session.query(func.min(user_msg_subquery.c.sequence_id)).scalar()
100
+
101
+ if cutoff_sequence_id:
102
+ # get messages from cutoff, excluding system message to avoid duplicates
103
+ step_msgs = (
104
+ session.query(MessageModel)
105
+ .filter(
106
+ MessageModel.agent_id == agent_id,
107
+ MessageModel.organization_id == self.actor.organization_id,
108
+ MessageModel.sequence_id >= cutoff_sequence_id,
109
+ MessageModel.role != "system",
110
+ )
111
+ .order_by(MessageModel.sequence_id.asc())
112
+ .all()
113
+ )
114
+ # combine system message with step messages
115
+ msgs = [system_msg] + step_msgs if system_msg else step_msgs
116
+ else:
117
+ # no user messages, just return system message
118
+ msgs = [system_msg] if system_msg else []
119
+ else:
120
+ # sqlite approach: get all user messages first, then get messages from cutoff
121
+ user_messages = (
122
+ session.query(MessageModel.sequence_id)
123
+ .filter(
124
+ MessageModel.agent_id == agent_id,
125
+ MessageModel.organization_id == self.actor.organization_id,
126
+ MessageModel.role == "user",
127
+ )
128
+ .order_by(MessageModel.sequence_id.desc())
129
+ .limit(self.max_steps)
130
+ .all()
131
+ )
132
+
133
+ if user_messages:
134
+ # get the minimum sequence_id
135
+ cutoff_sequence_id = min(msg.sequence_id for msg in user_messages)
136
+
137
+ # get messages from cutoff, excluding system message to avoid duplicates
138
+ step_msgs = (
139
+ session.query(MessageModel)
140
+ .filter(
141
+ MessageModel.agent_id == agent_id,
142
+ MessageModel.organization_id == self.actor.organization_id,
143
+ MessageModel.sequence_id >= cutoff_sequence_id,
144
+ MessageModel.role != "system",
145
+ )
146
+ .order_by(MessageModel.sequence_id.asc())
147
+ .all()
148
+ )
149
+ # combine system message with step messages
150
+ msgs = [system_msg] + step_msgs if system_msg else step_msgs
151
+ else:
152
+ # no user messages, just return system message
153
+ msgs = [system_msg] if system_msg else []
154
+ else:
155
+ # if no limit, get all messages in ascending order
156
+ msgs = (
157
+ session.query(MessageModel)
158
+ .filter(
159
+ MessageModel.agent_id == agent_id,
160
+ MessageModel.organization_id == self.actor.organization_id,
161
+ )
162
+ .order_by(MessageModel.sequence_id.asc())
163
+ .all()
72
164
  )
73
- .order_by(MessageModel.sequence_id.asc())
74
- .all()
75
- )
76
- # overwrite the “messages” key with a fully serialized list
165
+
166
+ # overwrite the "messages" key with a fully serialized list
77
167
  data[self.FIELD_MESSAGES] = [SerializedMessageSchema(session=self.session, actor=self.actor).dump(m) for m in msgs]
78
168
 
79
169
  return data
@@ -146,6 +146,7 @@ class IndentedORJSONResponse(Response):
146
146
  @router.get("/{agent_id}/export", response_class=IndentedORJSONResponse, operation_id="export_agent_serialized")
147
147
  def export_agent_serialized(
148
148
  agent_id: str,
149
+ max_steps: int = 100,
149
150
  server: "SyncServer" = Depends(get_letta_server),
150
151
  actor_id: str | None = Header(None, alias="user_id"),
151
152
  # do not remove, used to autogeneration of spec
@@ -158,7 +159,7 @@ def export_agent_serialized(
158
159
  actor = server.user_manager.get_user_or_default(user_id=actor_id)
159
160
 
160
161
  try:
161
- agent = server.agent_manager.serialize(agent_id=agent_id, actor=actor)
162
+ agent = server.agent_manager.serialize(agent_id=agent_id, actor=actor, max_steps=max_steps)
162
163
  return agent.model_dump()
163
164
  except NoResultFound:
164
165
  raise HTTPException(status_code=404, detail=f"Agent with id={agent_id} not found for user_id={actor.id}.")
@@ -1446,10 +1446,10 @@ class AgentManager:
1446
1446
 
1447
1447
  @enforce_types
1448
1448
  @trace_method
1449
- def serialize(self, agent_id: str, actor: PydanticUser) -> AgentSchema:
1449
+ def serialize(self, agent_id: str, actor: PydanticUser, max_steps: Optional[int] = None) -> AgentSchema:
1450
1450
  with db_registry.session() as session:
1451
1451
  agent = AgentModel.read(db_session=session, identifier=agent_id, actor=actor)
1452
- schema = MarshmallowAgentSchema(session=session, actor=actor)
1452
+ schema = MarshmallowAgentSchema(session=session, actor=actor, max_steps=max_steps)
1453
1453
  data = schema.dump(agent)
1454
1454
  return AgentSchema(**data)
1455
1455
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: letta-nightly
3
- Version: 0.11.0.dev20250808055434
3
+ Version: 0.11.2.dev20250808210309
4
4
  Summary: Create LLM agents with long-term memory and custom tools
5
5
  License: Apache License
6
6
  Author: Letta Team
@@ -1,4 +1,4 @@
1
- letta/__init__.py,sha256=egWzLDPN65F8R6kmzwHODXfUKI4dzKd394hsXrmPsbs,1321
1
+ letta/__init__.py,sha256=psvi_LC5TMlxXPWuIwlSeUPhOZJHv6RJJQfvghPf9Zo,1321
2
2
  letta/agent.py,sha256=o591CrbxIepAfmVdZv7OVBCQsfAvKqv_HTd89LYPgu8,89462
3
3
  letta/agents/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
4
4
  letta/agents/base_agent.py,sha256=-hrG_4iNba2e68LF8nzfPKeCviBdoWZ6jODd798ryt0,7796
@@ -16,12 +16,12 @@ letta/client/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
16
16
  letta/client/streaming.py,sha256=UsDS_tDTsA3HgYryIDvGGmx_dWfnfQwtmEwLi4Z89Ik,4701
17
17
  letta/client/utils.py,sha256=VCGV-op5ZSmurd4yw7Vhf93XDQ0BkyBT8qsuV7EqfiU,2859
18
18
  letta/config.py,sha256=JFGY4TWW0Wm5fTbZamOwWqk5G8Nn-TXyhgByGoAqy2c,12375
19
- letta/constants.py,sha256=CIo3xpLsYr1EqYZJdrlDqPOoMmomtOlFYX4eV8uF-pk,15326
19
+ letta/constants.py,sha256=8bOs4Ya6ZQO3kCJFoc9rFJX-NW-FlMGduXq8DdkpZeM,15404
20
20
  letta/data_sources/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
21
21
  letta/data_sources/connectors.py,sha256=V8mUgE3V6CX-CcOyvkPSQ_ZWP2VtuqgTEXkCN1j0p68,7920
22
22
  letta/data_sources/connectors_helper.py,sha256=oQpVlc-BjSz9sTZ7sp4PsJSXJbBKpZPi3Dam03CURTQ,3376
23
23
  letta/data_sources/redis_client.py,sha256=Lz9hjJL9S7yd_qsvbuwqrGL3GwZe-qOc0uZm9FqNm4M,10688
24
- letta/embeddings.py,sha256=5bxVQOJHSXezbHH-htZDfzce_ZTEn_WY5E07PS09HO8,10555
24
+ letta/embeddings.py,sha256=WNSUAMEWLMDcQg_2iLap8ApmOnCT98JytnBHWuv_rCg,10590
25
25
  letta/errors.py,sha256=DiskTVZiSZ4jbjEBYw7TDvuslnYhcXSXX2LUzmMbydo,7632
26
26
  letta/functions/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
27
27
  letta/functions/ast_parsers.py,sha256=0dXAN4qx3pWL_Y0aoEkaBpMKwI-kpoLEJftjW3v2I4E,5031
@@ -260,7 +260,7 @@ letta/schemas/letta_request.py,sha256=GF7tSVjoAXukl1SXN2FBi8ZMaWOVRvuJuXPgEwbV7H
260
260
  letta/schemas/letta_response.py,sha256=e6FcAhRX3heB0FoWAAozB3RJboMwi_JpelTdc5JupVA,8188
261
261
  letta/schemas/letta_stop_reason.py,sha256=4t39UKMMsLuNM-9a0BG7Mi-zZ7YhYLFSEpTWc8_OyrQ,2035
262
262
  letta/schemas/llm_batch_job.py,sha256=xr7RmMc9ItmL344vcIn1MJaT2nOf0F7qEHrsXkQNFQI,3136
263
- letta/schemas/llm_config.py,sha256=-B--Tf4rHacp6zcIbSunwoiKRCcvoqY0frwMR_2a0V8,9412
263
+ letta/schemas/llm_config.py,sha256=SLjJRc1trV8fgZoTyBPC9hsgNONWQKfmf8SE97qFfmo,9462
264
264
  letta/schemas/llm_config_overrides.py,sha256=E6qJuVA8TwAAy3VjGitJ5jSQo5PbN-6VPcZOF5qhP9A,1815
265
265
  letta/schemas/mcp.py,sha256=_FKUSIoTLfx64buKqye-9fPET8-1_e2h9uYByNwTVio,10440
266
266
  letta/schemas/memory.py,sha256=45j0akHGSrShd8v8wW-7lJhFRNWi9rWEvFp8w6f1PUk,14142
@@ -290,7 +290,7 @@ letta/schemas/providers/groq.py,sha256=AquJQH-Y5-s75Nj2_X7xavuWUu5F2bSvHjAZ1Gfpe
290
290
  letta/schemas/providers/letta.py,sha256=50VcmTMm8OEQjZgdlLypa4QmNPe0mJycpZfbxxSb_ts,1611
291
291
  letta/schemas/providers/lmstudio.py,sha256=Hi8Nir96B5FCQ8cVT-mWl7nLX0Z2-RIxUANAqRGG-zo,4323
292
292
  letta/schemas/providers/mistral.py,sha256=EjFF6YcfN5jBjCfnZw3ECv_3qYuG0HVb7B0VoYk-jKU,1866
293
- letta/schemas/providers/ollama.py,sha256=iVx9xxrulG9ohbhk4kMtfryCnMgmynWg_NL4SesGX5U,6253
293
+ letta/schemas/providers/ollama.py,sha256=6uMKhK7lq_f9-k0oFNO7NDhBdpv57HxyjWlYTuuYYsE,5970
294
294
  letta/schemas/providers/openai.py,sha256=jlnMu3t1_IHWT4dGn8zZlbITl6wQl2X9onn_B2ZhV48,11051
295
295
  letta/schemas/providers/together.py,sha256=2zFca6Jy08r1ANrdvtlSIduyDr8ek9Tt1yYiz1S-5g8,3422
296
296
  letta/schemas/providers/vllm.py,sha256=CwM260cxWLkviVzY4wwkw4NmDAK69fv531AofRGa9JA,2480
@@ -309,7 +309,7 @@ letta/schemas/tool_rule.py,sha256=e9pWC2kZvdnohQuCTAxm96UjczrPnSB_lEeVkBEBPN4,97
309
309
  letta/schemas/usage.py,sha256=9SSTH5kUliwiVF14b-yKbDcmxQBOLg4YH5xhXDbW9UU,1281
310
310
  letta/schemas/user.py,sha256=GanbgD80N33FBjWKkv-MvUO01C0GHzrYmJ-o80wgLLI,1481
311
311
  letta/serialize_schemas/__init__.py,sha256=cosMjvWz7cubC1azbUofzYrcDBTuSgjJImUdsrSs3p0,77
312
- letta/serialize_schemas/marshmallow_agent.py,sha256=89FN6iTAbnMxmuPj3SuUhtDtOE-Yg8rNzy3GjprciwI,5798
312
+ letta/serialize_schemas/marshmallow_agent.py,sha256=FrIz3XD-XR29tZio1Ji0588XPP4CJNE17sDTC0HHaKk,10370
313
313
  letta/serialize_schemas/marshmallow_agent_environment_variable.py,sha256=9RYJkaNH2UiRoIFzrNklVAGl3uMmu3n6NwzFdviPPVA,653
314
314
  letta/serialize_schemas/marshmallow_base.py,sha256=GP0ImCRfJ-BqNKe-T44Feal18pmFQG-p8JllOsSSNRk,1379
315
315
  letta/serialize_schemas/marshmallow_block.py,sha256=qV17pbztsb9MD-632aC66aBJ5m-HK780ifOO9YnoKoo,1043
@@ -337,7 +337,7 @@ letta/server/rest_api/routers/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5N
337
337
  letta/server/rest_api/routers/openai/chat_completions/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
338
338
  letta/server/rest_api/routers/openai/chat_completions/chat_completions.py,sha256=QBWab1fn2LXVDMtc6li3gOzmrNzDiUw5WUJsMeeMZII,5076
339
339
  letta/server/rest_api/routers/v1/__init__.py,sha256=HlL-WoRMz39NLyHhFugHbdIho66ZVK5VG2YRNYIiMZU,1866
340
- letta/server/rest_api/routers/v1/agents.py,sha256=ptaNTInm9cgqIKeqGY2aHAK3rn2ZY3qXmlw7k6mxTYg,64556
340
+ letta/server/rest_api/routers/v1/agents.py,sha256=xb78slGGlPsceL6rrHDssoR128tVbJSbkroSnXvu9FE,64603
341
341
  letta/server/rest_api/routers/v1/blocks.py,sha256=jICprv_qKlS1H9xOwzXVnGG41l9D9WR6RpKPAKljrhk,7459
342
342
  letta/server/rest_api/routers/v1/embeddings.py,sha256=PRaQlrmEXPiIdWsTbadrFsv3Afyv5oEFUdhgHA8FTi8,989
343
343
  letta/server/rest_api/routers/v1/folders.py,sha256=ikC1oYkWZXafDlFDv1hOkuGkOnUCAU3XqNmZG6PsqS4,21646
@@ -376,7 +376,7 @@ letta/server/ws_api/protocol.py,sha256=5mDgpfNZn_kNwHnpt5Dsuw8gdNH298sgxTGed3etz
376
376
  letta/server/ws_api/server.py,sha256=_16TQafm509rqRztZYqo0HKKZoe8ccBrNftd_kbIJTE,5833
377
377
  letta/services/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
378
378
  letta/services/agent_file_manager.py,sha256=bgYTyQA90Iqo3W-LprPtyyOKf2itoqivcRhh4EOUXss,30847
379
- letta/services/agent_manager.py,sha256=EbBV-yNEIv-AOUiZtcbusnidcGXFyHLA7L6jmuh2ih4,151310
379
+ letta/services/agent_manager.py,sha256=6CUnFBGoC42im08I10ZG8hIWYw-gq35iQ-sAmK0EYQM,151364
380
380
  letta/services/agent_serialization_manager.py,sha256=YrpeY1SQavEKr9IjlwNp0MD8iwq5U_Deh9-B57ghCzQ,39640
381
381
  letta/services/archive_manager.py,sha256=a_EDb0agJAze1oOELfnHmPUQB0k9fG1xBK5p_o2zsis,9427
382
382
  letta/services/block_manager.py,sha256=sSbNpsbk62-qBtOmhIp3YlOQKTpWQ3gXlVrFpF1vY50,33491
@@ -456,8 +456,8 @@ letta/templates/summary_request_text.j2,sha256=ZttQwXonW2lk4pJLYzLK0pmo4EO4EtUUI
456
456
  letta/templates/template_helper.py,sha256=HkG3zwRc5NVGmSTQu5PUTpz7LevK43bzXVaQuN8urf0,1634
457
457
  letta/types/__init__.py,sha256=hokKjCVFGEfR7SLMrtZsRsBfsC7yTIbgKPLdGg4K1eY,147
458
458
  letta/utils.py,sha256=Fwwe2imHRamc_kucAATo8NXhwDG5NBoOIYmBaERXUhM,38384
459
- letta_nightly-0.11.0.dev20250808055434.dist-info/LICENSE,sha256=mExtuZ_GYJgDEI38GWdiEYZizZS4KkVt2SF1g_GPNhI,10759
460
- letta_nightly-0.11.0.dev20250808055434.dist-info/METADATA,sha256=oGIMFfhX6qRD55DEP_UdK596w0AMrKXiUlo-VWTAJfw,23281
461
- letta_nightly-0.11.0.dev20250808055434.dist-info/WHEEL,sha256=FMvqSimYX_P7y0a7UY-_Mc83r5zkBZsCYPm7Lr0Bsq4,88
462
- letta_nightly-0.11.0.dev20250808055434.dist-info/entry_points.txt,sha256=2zdiyGNEZGV5oYBuS-y2nAAgjDgcC9yM_mHJBFSRt5U,40
463
- letta_nightly-0.11.0.dev20250808055434.dist-info/RECORD,,
459
+ letta_nightly-0.11.2.dev20250808210309.dist-info/LICENSE,sha256=mExtuZ_GYJgDEI38GWdiEYZizZS4KkVt2SF1g_GPNhI,10759
460
+ letta_nightly-0.11.2.dev20250808210309.dist-info/METADATA,sha256=cVexz7SQTJh7Ta3BC_P64fN1U7qDj2Uw5g7w6LFcuFI,23281
461
+ letta_nightly-0.11.2.dev20250808210309.dist-info/WHEEL,sha256=FMvqSimYX_P7y0a7UY-_Mc83r5zkBZsCYPm7Lr0Bsq4,88
462
+ letta_nightly-0.11.2.dev20250808210309.dist-info/entry_points.txt,sha256=2zdiyGNEZGV5oYBuS-y2nAAgjDgcC9yM_mHJBFSRt5U,40
463
+ letta_nightly-0.11.2.dev20250808210309.dist-info/RECORD,,