letta-nightly 0.6.33.dev20250227104112__py3-none-any.whl → 0.6.34.dev20250228104059__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of letta-nightly might be problematic. Click here for more details.

@@ -69,7 +69,6 @@ def list_agents(
69
69
  "project_id": project_id,
70
70
  "template_id": template_id,
71
71
  "base_template_id": base_template_id,
72
- "identifier_id": identifier_id,
73
72
  }.items()
74
73
  if value is not None
75
74
  }
@@ -84,6 +83,7 @@ def list_agents(
84
83
  tags=tags,
85
84
  match_all_tags=match_all_tags,
86
85
  identifier_keys=identifier_keys,
86
+ identifier_id=identifier_id,
87
87
  **kwargs,
88
88
  )
89
89
  return agents
@@ -0,0 +1,315 @@
1
+ import json
2
+ import uuid
3
+ from typing import TYPE_CHECKING, Optional
4
+
5
+ import httpx
6
+ import openai
7
+ from fastapi import APIRouter, Body, Depends, Header, HTTPException
8
+ from fastapi.responses import StreamingResponse
9
+ from openai.types.chat.chat_completion_chunk import ChatCompletionChunk, Choice, ChoiceDelta
10
+ from openai.types.chat.completion_create_params import CompletionCreateParams
11
+ from starlette.concurrency import run_in_threadpool
12
+
13
+ from letta.constants import LETTA_TOOL_SET, NON_USER_MSG_PREFIX, PRE_EXECUTION_MESSAGE_ARG
14
+ from letta.helpers.tool_execution_helper import (
15
+ add_pre_execution_message,
16
+ enable_strict_mode,
17
+ execute_external_tool,
18
+ remove_request_heartbeat,
19
+ )
20
+ from letta.log import get_logger
21
+ from letta.orm.enums import ToolType
22
+ from letta.schemas.openai.chat_completion_request import (
23
+ AssistantMessage,
24
+ ChatCompletionRequest,
25
+ Tool,
26
+ ToolCall,
27
+ ToolCallFunction,
28
+ ToolMessage,
29
+ UserMessage,
30
+ )
31
+ from letta.server.rest_api.optimistic_json_parser import OptimisticJSONParser
32
+ from letta.server.rest_api.utils import (
33
+ convert_letta_messages_to_openai,
34
+ create_assistant_messages_from_openai_response,
35
+ create_tool_call_messages_from_openai_response,
36
+ create_user_message,
37
+ get_letta_server,
38
+ get_messages_from_completion_request,
39
+ )
40
+ from letta.settings import model_settings
41
+
42
+ if TYPE_CHECKING:
43
+ from letta.server.server import SyncServer
44
+
45
+
46
+ router = APIRouter(prefix="/voice", tags=["voice"])
47
+
48
+ logger = get_logger(__name__)
49
+
50
+
51
+ @router.post(
52
+ "/chat/completions",
53
+ response_model=None,
54
+ operation_id="create_voice_chat_completions",
55
+ responses={
56
+ 200: {
57
+ "description": "Successful response",
58
+ "content": {
59
+ "text/event-stream": {"description": "Server-Sent Events stream"},
60
+ },
61
+ }
62
+ },
63
+ )
64
+ async def create_voice_chat_completions(
65
+ completion_request: CompletionCreateParams = Body(...),
66
+ server: "SyncServer" = Depends(get_letta_server),
67
+ user_id: Optional[str] = Header(None, alias="user_id"),
68
+ ):
69
+ actor = server.user_manager.get_user_or_default(user_id=user_id)
70
+
71
+ agent_id = str(completion_request.get("user", None))
72
+ if agent_id is None:
73
+ raise HTTPException(status_code=400, detail="Must pass agent_id in the 'user' field")
74
+
75
+ agent_state = server.agent_manager.get_agent_by_id(agent_id=agent_id, actor=actor)
76
+ if agent_state.llm_config.model_endpoint_type != "openai":
77
+ raise HTTPException(status_code=400, detail="Only OpenAI models are supported by this endpoint.")
78
+
79
+ # Convert Letta messages to OpenAI messages
80
+ in_context_messages = server.message_manager.get_messages_by_ids(message_ids=agent_state.message_ids, actor=actor)
81
+ openai_messages = convert_letta_messages_to_openai(in_context_messages)
82
+
83
+ # Also parse user input from completion_request and append
84
+ input_message = get_messages_from_completion_request(completion_request)[-1]
85
+ openai_messages.append(input_message)
86
+
87
+ # Tools we allow this agent to call
88
+ tools = [t for t in agent_state.tools if t.name not in LETTA_TOOL_SET and t.tool_type in {ToolType.EXTERNAL_COMPOSIO, ToolType.CUSTOM}]
89
+
90
+ # Initial request
91
+ openai_request = ChatCompletionRequest(
92
+ model=agent_state.llm_config.model,
93
+ messages=openai_messages,
94
+ # TODO: This nested thing here is so ugly, need to refactor
95
+ tools=(
96
+ [
97
+ Tool(type="function", function=enable_strict_mode(add_pre_execution_message(remove_request_heartbeat(t.json_schema))))
98
+ for t in tools
99
+ ]
100
+ if tools
101
+ else None
102
+ ),
103
+ tool_choice="auto",
104
+ user=user_id,
105
+ max_completion_tokens=agent_state.llm_config.max_tokens,
106
+ temperature=agent_state.llm_config.temperature,
107
+ stream=True,
108
+ )
109
+
110
+ # Create the OpenAI async client
111
+ client = openai.AsyncClient(
112
+ api_key=model_settings.openai_api_key,
113
+ max_retries=0,
114
+ http_client=httpx.AsyncClient(
115
+ timeout=httpx.Timeout(connect=15.0, read=30.0, write=15.0, pool=15.0),
116
+ follow_redirects=True,
117
+ limits=httpx.Limits(
118
+ max_connections=50,
119
+ max_keepalive_connections=50,
120
+ keepalive_expiry=120,
121
+ ),
122
+ ),
123
+ )
124
+
125
+ # The messages we want to persist to the Letta agent
126
+ user_message = create_user_message(input_message=input_message, agent_id=agent_id, actor=actor)
127
+ message_db_queue = [user_message]
128
+
129
+ async def event_stream():
130
+ """
131
+ A function-calling loop:
132
+ - We stream partial tokens.
133
+ - If we detect a tool call (finish_reason="tool_calls"), we parse it,
134
+ add two messages to the conversation:
135
+ (a) assistant message with tool_calls referencing the same ID
136
+ (b) a tool message referencing that ID, containing the tool result.
137
+ - Re-invoke the OpenAI request with updated conversation, streaming again.
138
+ - End when finish_reason="stop" or no more tool calls.
139
+ """
140
+
141
+ # We'll keep updating this conversation in a loop
142
+ conversation = openai_messages[:]
143
+
144
+ while True:
145
+ # Make the streaming request to OpenAI
146
+ stream = await client.chat.completions.create(**openai_request.model_dump(exclude_unset=True))
147
+
148
+ content_buffer = []
149
+ tool_call_name = None
150
+ tool_call_args_str = ""
151
+ tool_call_id = None
152
+ tool_call_happened = False
153
+ finish_reason_stop = False
154
+ optimistic_json_parser = OptimisticJSONParser(strict=True)
155
+ current_parsed_json_result = {}
156
+
157
+ async with stream:
158
+ async for chunk in stream:
159
+ choice = chunk.choices[0]
160
+ delta = choice.delta
161
+ finish_reason = choice.finish_reason # "tool_calls", "stop", or None
162
+
163
+ if delta.content:
164
+ content_buffer.append(delta.content)
165
+ yield f"data: {chunk.model_dump_json()}\n\n"
166
+
167
+ # CASE B: Partial tool call info
168
+ if delta.tool_calls:
169
+ # Typically there's only one in delta.tool_calls
170
+ tc = delta.tool_calls[0]
171
+ if tc.function.name:
172
+ tool_call_name = tc.function.name
173
+ if tc.function.arguments:
174
+ tool_call_args_str += tc.function.arguments
175
+
176
+ # See if we can stream out the pre-execution message
177
+ parsed_args = optimistic_json_parser.parse(tool_call_args_str)
178
+ if parsed_args.get(
179
+ PRE_EXECUTION_MESSAGE_ARG
180
+ ) and current_parsed_json_result.get( # Ensure key exists and is not None/empty
181
+ PRE_EXECUTION_MESSAGE_ARG
182
+ ) != parsed_args.get(
183
+ PRE_EXECUTION_MESSAGE_ARG
184
+ ):
185
+ # Only stream if there's something new to stream
186
+ # We do this way to avoid hanging JSON at the end of the stream, e.g. '}'
187
+ if parsed_args != current_parsed_json_result:
188
+ current_parsed_json_result = parsed_args
189
+ synthetic_chunk = ChatCompletionChunk(
190
+ id=chunk.id,
191
+ object=chunk.object,
192
+ created=chunk.created,
193
+ model=chunk.model,
194
+ choices=[
195
+ Choice(
196
+ index=choice.index,
197
+ delta=ChoiceDelta(content=tc.function.arguments, role="assistant"),
198
+ finish_reason=None,
199
+ )
200
+ ],
201
+ )
202
+
203
+ yield f"data: {synthetic_chunk.model_dump_json()}\n\n"
204
+
205
+ # We might generate a unique ID for the tool call
206
+ if tc.id:
207
+ tool_call_id = tc.id
208
+
209
+ # Check finish_reason
210
+ if finish_reason == "tool_calls":
211
+ tool_call_happened = True
212
+ break
213
+ elif finish_reason == "stop":
214
+ finish_reason_stop = True
215
+ break
216
+
217
+ if content_buffer:
218
+ # We treat that partial text as an assistant message
219
+ content = "".join(content_buffer)
220
+ conversation.append({"role": "assistant", "content": content})
221
+
222
+ # Create an assistant message here to persist later
223
+ assistant_messages = create_assistant_messages_from_openai_response(
224
+ response_text=content, agent_id=agent_id, model=agent_state.llm_config.model, actor=actor
225
+ )
226
+ message_db_queue.extend(assistant_messages)
227
+
228
+ if tool_call_happened:
229
+ # Parse the tool call arguments
230
+ try:
231
+ tool_args = json.loads(tool_call_args_str)
232
+ except json.JSONDecodeError:
233
+ tool_args = {}
234
+
235
+ if not tool_call_id:
236
+ # If no tool_call_id given by the model, generate one
237
+ tool_call_id = f"call_{uuid.uuid4().hex[:8]}"
238
+
239
+ # 1) Insert the "assistant" message with the tool_calls field
240
+ # referencing the same tool_call_id
241
+ assistant_tool_call_msg = AssistantMessage(
242
+ content=None,
243
+ tool_calls=[ToolCall(id=tool_call_id, function=ToolCallFunction(name=tool_call_name, arguments=tool_call_args_str))],
244
+ )
245
+
246
+ conversation.append(assistant_tool_call_msg.model_dump())
247
+
248
+ # 2) Execute the tool
249
+ target_tool = next((x for x in tools if x.name == tool_call_name), None)
250
+ if not target_tool:
251
+ # Tool not found, handle error
252
+ yield f"data: {json.dumps({'error': 'Tool not found', 'tool': tool_call_name})}\n\n"
253
+ break
254
+
255
+ try:
256
+ tool_result, _ = execute_external_tool(
257
+ agent_state=agent_state,
258
+ function_name=tool_call_name,
259
+ function_args=tool_args,
260
+ target_letta_tool=target_tool,
261
+ actor=actor,
262
+ allow_agent_state_modifications=False,
263
+ )
264
+ function_call_success = True
265
+ except Exception as e:
266
+ tool_result = f"Failed to call tool. Error: {e}"
267
+ function_call_success = False
268
+
269
+ # 3) Insert the "tool" message referencing the same tool_call_id
270
+ tool_message = ToolMessage(content=json.dumps({"result": tool_result}), tool_call_id=tool_call_id)
271
+
272
+ conversation.append(tool_message.model_dump())
273
+
274
+ # 4) Add a user message prompting the tool call result summarization
275
+ heartbeat_user_message = UserMessage(
276
+ content=f"{NON_USER_MSG_PREFIX} Tool finished executing. Summarize the result for the user.",
277
+ )
278
+ conversation.append(heartbeat_user_message.model_dump())
279
+
280
+ # Now, re-invoke OpenAI with the updated conversation
281
+ openai_request.messages = conversation
282
+
283
+ # Create a tool call message and append to message_db_queue
284
+ tool_call_messages = create_tool_call_messages_from_openai_response(
285
+ agent_id=agent_state.id,
286
+ model=agent_state.llm_config.model,
287
+ function_name=tool_call_name,
288
+ function_arguments=tool_args,
289
+ tool_call_id=tool_call_id,
290
+ function_call_success=function_call_success,
291
+ function_response=tool_result,
292
+ actor=actor,
293
+ add_heartbeat_request_system_message=True,
294
+ )
295
+ message_db_queue.extend(tool_call_messages)
296
+
297
+ continue # Start the while loop again
298
+
299
+ if finish_reason_stop:
300
+ break
301
+
302
+ # If we reach here, no tool call, no "stop", but we've ended streaming
303
+ # Possibly a model error or some other finish reason. We'll just end.
304
+ break
305
+
306
+ await run_in_threadpool(
307
+ server.agent_manager.append_to_in_context_messages,
308
+ message_db_queue,
309
+ agent_id=agent_id,
310
+ actor=actor,
311
+ )
312
+
313
+ yield "data: [DONE]\n\n"
314
+
315
+ return StreamingResponse(event_stream(), media_type="text/event-stream")
@@ -3,7 +3,6 @@ import json
3
3
  import os
4
4
  import uuid
5
5
  import warnings
6
- from datetime import datetime, timezone
7
6
  from enum import Enum
8
7
  from typing import TYPE_CHECKING, AsyncGenerator, Dict, Iterable, List, Optional, Union, cast
9
8
 
@@ -14,8 +13,9 @@ from openai.types.chat.chat_completion_message_tool_call import Function as Open
14
13
  from openai.types.chat.completion_create_params import CompletionCreateParams
15
14
  from pydantic import BaseModel
16
15
 
17
- from letta.constants import DEFAULT_MESSAGE_TOOL, DEFAULT_MESSAGE_TOOL_KWARG
16
+ from letta.constants import DEFAULT_MESSAGE_TOOL, DEFAULT_MESSAGE_TOOL_KWARG, REQ_HEARTBEAT_MESSAGE
18
17
  from letta.errors import ContextWindowExceededError, RateLimitExceededError
18
+ from letta.helpers.datetime_helpers import get_utc_time
19
19
  from letta.log import get_logger
20
20
  from letta.schemas.enums import MessageRole
21
21
  from letta.schemas.letta_message import TextContent
@@ -23,6 +23,7 @@ from letta.schemas.message import Message
23
23
  from letta.schemas.usage import LettaUsageStatistics
24
24
  from letta.schemas.user import User
25
25
  from letta.server.rest_api.interface import StreamingServerInterface
26
+ from letta.system import get_heartbeat, package_function_response
26
27
 
27
28
  if TYPE_CHECKING:
28
29
  from letta.server.server import SyncServer
@@ -144,7 +145,8 @@ def create_user_message(input_message: dict, agent_id: str, actor: User) -> Mess
144
145
  Converts a user input message into the internal structured format.
145
146
  """
146
147
  # Generate timestamp in the correct format
147
- now = datetime.now(timezone.utc).isoformat()
148
+ # Skip pytz for performance reasons
149
+ now = get_utc_time().isoformat()
148
150
 
149
151
  # Format message as structured JSON
150
152
  structured_message = {"type": "user_message", "message": input_message["content"], "time": now}
@@ -159,37 +161,36 @@ def create_user_message(input_message: dict, agent_id: str, actor: User) -> Mess
159
161
  model=None,
160
162
  tool_calls=None,
161
163
  tool_call_id=None,
162
- created_at=datetime.now(timezone.utc),
164
+ created_at=get_utc_time(),
163
165
  )
164
166
 
165
167
  return user_message
166
168
 
167
169
 
168
- def create_assistant_message_from_openai_response(
169
- response_text: str,
170
+ def create_tool_call_messages_from_openai_response(
170
171
  agent_id: str,
171
172
  model: str,
173
+ function_name: str,
174
+ function_arguments: Dict,
175
+ tool_call_id: str,
176
+ function_call_success: bool,
177
+ function_response: Optional[str],
172
178
  actor: User,
173
- ) -> Message:
174
- """
175
- Converts an OpenAI response into a Message that follows the internal
176
- paradigm where LLM responses are structured as tool calls instead of content.
177
- """
178
- tool_call_id = str(uuid.uuid4())
179
+ add_heartbeat_request_system_message: bool = False,
180
+ ) -> List[Message]:
181
+ messages = []
179
182
 
180
183
  # Construct the tool call with the assistant's message
184
+ function_arguments["request_heartbeat"] = True
181
185
  tool_call = OpenAIToolCall(
182
186
  id=tool_call_id,
183
187
  function=OpenAIFunction(
184
- name=DEFAULT_MESSAGE_TOOL,
185
- arguments='{\n "' + DEFAULT_MESSAGE_TOOL_KWARG + '": ' + f'"{response_text}",\n "request_heartbeat": true\n' + "}",
188
+ name=function_name,
189
+ arguments=json.dumps(function_arguments),
186
190
  ),
187
191
  type="function",
188
192
  )
189
-
190
- # Construct the Message object
191
193
  assistant_message = Message(
192
- id=f"message-{uuid.uuid4()}",
193
194
  role=MessageRole.assistant,
194
195
  content=[],
195
196
  organization_id=actor.organization_id,
@@ -197,10 +198,62 @@ def create_assistant_message_from_openai_response(
197
198
  model=model,
198
199
  tool_calls=[tool_call],
199
200
  tool_call_id=tool_call_id,
200
- created_at=datetime.now(timezone.utc),
201
+ created_at=get_utc_time(),
201
202
  )
203
+ messages.append(assistant_message)
204
+
205
+ tool_message = Message(
206
+ role=MessageRole.tool,
207
+ content=[TextContent(text=package_function_response(function_call_success, function_response))],
208
+ organization_id=actor.organization_id,
209
+ agent_id=agent_id,
210
+ model=model,
211
+ tool_calls=[],
212
+ tool_call_id=tool_call_id,
213
+ created_at=get_utc_time(),
214
+ name=function_name,
215
+ )
216
+ messages.append(tool_message)
217
+
218
+ if add_heartbeat_request_system_message:
219
+ heartbeat_system_message = Message(
220
+ role=MessageRole.user,
221
+ content=[TextContent(text=get_heartbeat(REQ_HEARTBEAT_MESSAGE))],
222
+ organization_id=actor.organization_id,
223
+ agent_id=agent_id,
224
+ model=model,
225
+ tool_calls=[],
226
+ tool_call_id=None,
227
+ created_at=get_utc_time(),
228
+ )
229
+ messages.append(heartbeat_system_message)
230
+
231
+ return messages
232
+
202
233
 
203
- return assistant_message
234
+ def create_assistant_messages_from_openai_response(
235
+ response_text: str,
236
+ agent_id: str,
237
+ model: str,
238
+ actor: User,
239
+ ) -> List[Message]:
240
+ """
241
+ Converts an OpenAI response into Messages that follow the internal
242
+ paradigm where LLM responses are structured as tool calls instead of content.
243
+ """
244
+ tool_call_id = str(uuid.uuid4())
245
+
246
+ return create_tool_call_messages_from_openai_response(
247
+ agent_id=agent_id,
248
+ model=model,
249
+ function_name=DEFAULT_MESSAGE_TOOL,
250
+ function_arguments={DEFAULT_MESSAGE_TOOL_KWARG: response_text}, # Avoid raw string manipulation
251
+ tool_call_id=tool_call_id,
252
+ function_call_success=True,
253
+ function_response=None,
254
+ actor=actor,
255
+ add_heartbeat_request_system_message=False,
256
+ )
204
257
 
205
258
 
206
259
  def convert_letta_messages_to_openai(messages: List[Message]) -> List[dict]:
letta/server/server.py CHANGED
@@ -60,6 +60,7 @@ from letta.schemas.providers import (
60
60
  TogetherProvider,
61
61
  VLLMChatCompletionsProvider,
62
62
  VLLMCompletionsProvider,
63
+ xAIProvider,
63
64
  )
64
65
  from letta.schemas.sandbox_config import SandboxType
65
66
  from letta.schemas.source import Source
@@ -311,6 +312,8 @@ class SyncServer(Server):
311
312
  self._enabled_providers.append(LMStudioOpenAIProvider(base_url=lmstudio_url))
312
313
  if model_settings.deepseek_api_key:
313
314
  self._enabled_providers.append(DeepSeekProvider(api_key=model_settings.deepseek_api_key))
315
+ if model_settings.xai_api_key:
316
+ self._enabled_providers.append(xAIProvider(api_key=model_settings.xai_api_key))
314
317
 
315
318
  def load_agent(self, agent_id: str, actor: User, interface: Union[AgentInterface, None] = None) -> Agent:
316
319
  """Updated method to load agents from persisted storage"""
@@ -1197,7 +1200,8 @@ class SyncServer(Server):
1197
1200
  # Disable token streaming if not OpenAI or Anthropic
1198
1201
  # TODO: cleanup this logic
1199
1202
  llm_config = letta_agent.agent_state.llm_config
1200
- supports_token_streaming = ["openai", "anthropic", "deepseek"]
1203
+ # supports_token_streaming = ["openai", "anthropic", "xai", "deepseek"]
1204
+ supports_token_streaming = ["openai", "anthropic", "deepseek"] # TODO re-enable xAI once streaming is patched
1201
1205
  if stream_tokens and (
1202
1206
  llm_config.model_endpoint_type not in supports_token_streaming or "inference.memgpt.ai" in llm_config.model_endpoint
1203
1207
  ):
@@ -115,8 +115,8 @@ class IdentityManager:
115
115
  if replace:
116
116
  existing_identity.properties = [prop.model_dump() for prop in identity.properties]
117
117
  else:
118
- new_properties = existing_identity.properties + identity.properties
119
- existing_identity.properties = [prop.model_dump() for prop in new_properties]
118
+ new_properties = existing_identity.properties + [prop.model_dump() for prop in identity.properties]
119
+ existing_identity.properties = new_properties
120
120
 
121
121
  self._process_agent_relationship(
122
122
  session=session, identity=existing_identity, agent_ids=identity.agent_ids, allow_partial=False, replace=replace
letta/settings.py CHANGED
@@ -63,6 +63,9 @@ class ModelSettings(BaseSettings):
63
63
  # deepseek
64
64
  deepseek_api_key: Optional[str] = None
65
65
 
66
+ # xAI / Grok
67
+ xai_api_key: Optional[str] = None
68
+
66
69
  # groq
67
70
  groq_api_key: Optional[str] = None
68
71
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: letta-nightly
3
- Version: 0.6.33.dev20250227104112
3
+ Version: 0.6.34.dev20250228104059
4
4
  Summary: Create LLM agents with long-term memory and custom tools
5
5
  License: Apache License
6
6
  Author: Letta Team
@@ -1,6 +1,6 @@
1
- letta/__init__.py,sha256=ZG24l5R0k4y_ttVVqlMdf3VtTG63oUJlIbU0I38-TrM,918
1
+ letta/__init__.py,sha256=a7R3j3Qmd9DXH-PvZSRZ7_Mht5Aw5TPiLgc6yuAmJUw,918
2
2
  letta/__main__.py,sha256=6Hs2PV7EYc5Tid4g4OtcLXhqVHiNYTGzSBdoOnW2HXA,29
3
- letta/agent.py,sha256=NqmrtW0gNfo0kT15WzRfY-TzhMDx4TaWfEmPUWSB2FI,60921
3
+ letta/agent.py,sha256=je4571lbHoI0Gt3kbW1gRT445R5KodDKFClYjj9w4bU,61470
4
4
  letta/benchmark/benchmark.py,sha256=ebvnwfp3yezaXOQyGXkYCDYpsmre-b9hvNtnyx4xkG0,3701
5
5
  letta/benchmark/constants.py,sha256=aXc5gdpMGJT327VuxsT5FngbCK2J41PQYeICBO7g_RE,536
6
6
  letta/chat_only_agent.py,sha256=71Lf-df8y3nsE9IFKpEigaZaWHoWnXnhVChkp1L-83I,4760
@@ -48,9 +48,9 @@ letta/llm_api/google_ai.py,sha256=VnoxG6QYcwgFEbH8iJ8MHaMQrW4ROekZy6ZV5ZdHxzI,18
48
48
  letta/llm_api/google_constants.py,sha256=ZdABT9l9l-qKcV2QCkVsv9kQbttx6JyIJoOWS8IMS5o,448
49
49
  letta/llm_api/google_vertex.py,sha256=Cqr73-jZJJvii1M_0QEmasNajOIJ5TDs5GabsCJjI04,14149
50
50
  letta/llm_api/helpers.py,sha256=pXBemF43Ywbwub5dc5V7Slw5K7lNlO0ae8dQBOXgDHs,16773
51
- letta/llm_api/llm_api_tools.py,sha256=wOm_NqBipE2M_XUN8y5KCDpZrXaEJuuvZ0llqfWFROo,26205
51
+ letta/llm_api/llm_api_tools.py,sha256=AQe-5xiMRRwlx52D1H3LBxt_XHhl7RA3bHM5k9YLcQY,28442
52
52
  letta/llm_api/mistral.py,sha256=fHdfD9ug-rQIk2qn8tRKay1U6w9maF11ryhKi91FfXM,1593
53
- letta/llm_api/openai.py,sha256=NCL_pT35BfSsQ7LSfUdNuhI2hi1nEhBiGffLz_81Mcw,21271
53
+ letta/llm_api/openai.py,sha256=wZHuQX018u2lZEKohWlzSYyQn9iYUSEuVn9QAaXIg0A,22038
54
54
  letta/local_llm/README.md,sha256=hFJyw5B0TU2jrh9nb0zGZMgdH-Ei1dSRfhvPQG_NSoU,168
55
55
  letta/local_llm/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
56
56
  letta/local_llm/chat_completion_proxy.py,sha256=44rvabj2iXPswe5jFt0qWYCasSVXjkT31DivvDoWVMM,13543
@@ -117,7 +117,7 @@ letta/orm/provider.py,sha256=-qA9tvKTZgaM4D7CoDZZiA7zTgjaaWDV4jZvifQv_MM,805
117
117
  letta/orm/sandbox_config.py,sha256=DyOy_1_zCMlp13elCqPcuuA6OwUove6mrjhcpROTg50,4150
118
118
  letta/orm/source.py,sha256=z89VZUHV9K8Ew9JCYoZqUeRb1WEUKmrn0MMFkppaphE,2117
119
119
  letta/orm/sources_agents.py,sha256=Ik_PokCBrXRd9wXWomeNeb8EtLUwjb9VMZ8LWXqpK5A,473
120
- letta/orm/sqlalchemy_base.py,sha256=tIuKJ0CH4tYCzhpDqqr2EDD6AxGlnXI752oYZtlvM6o,22151
120
+ letta/orm/sqlalchemy_base.py,sha256=8037lD9w5vY7rXhqmXg1mG78UywdHczef7-XGhW7b1U,22511
121
121
  letta/orm/sqlite_functions.py,sha256=JCScKiRlYCKxy9hChQ8wsk4GMKknZE24MunnG3fM1Gw,4255
122
122
  letta/orm/step.py,sha256=6t_PlVd8pW1Rd6JeECImBG2n9P-yif0Sl9Uzhb-m77w,2982
123
123
  letta/orm/tool.py,sha256=JEPHlM4ePaLaGtHpHhYdKCteHTRJnOFgQmfR5wL8TpA,2379
@@ -164,7 +164,7 @@ letta/schemas/letta_base.py,sha256=HTnSHJ2YSyhEdpY-vg9Y7ywqS1zzTjb9j5iVPYsuVSk,3
164
164
  letta/schemas/letta_message.py,sha256=QHzIEwnEJEkE02biCwyQo5IvL2fVq_whBRQD3vPYO48,9837
165
165
  letta/schemas/letta_request.py,sha256=dzy3kwb5j2QLaSV0sDlwISEMt2xxH3IiK-vR9xJV65k,1123
166
166
  letta/schemas/letta_response.py,sha256=pq-SxXQy5yZo1-DiAwV2mMURlUvz1Uu7HHR_tB1hMho,7139
167
- letta/schemas/llm_config.py,sha256=R0GQvw3DzsShpxZY6eWte4A2f1QjUrAAJ928DvLuBZs,5596
167
+ letta/schemas/llm_config.py,sha256=bqq4LGE9layPcnnkzd_8d2SB8o1x8XdDzfd2ZkYQwcY,5611
168
168
  letta/schemas/llm_config_overrides.py,sha256=-oRglCTcajF6UAK3RAa0FLWVuKODPI1v403fDIWMAtA,1815
169
169
  letta/schemas/memory.py,sha256=GOYDfPKzbWftUWO9Hv4KW7xAi1EIQmC8zpP7qvEkVHw,10245
170
170
  letta/schemas/message.py,sha256=3-0zd-WuasJ1rZPDSqwJ7mRex399icPgpsVk19T7CoQ,37687
@@ -175,7 +175,7 @@ letta/schemas/openai/embedding_response.py,sha256=WKIZpXab1Av7v6sxKG8feW3ZtpQUNo
175
175
  letta/schemas/openai/openai.py,sha256=Hilo5BiLAGabzxCwnwfzK5QrWqwYD8epaEKFa4Pwndk,7970
176
176
  letta/schemas/organization.py,sha256=_RR8jlOOdJyG31q53IDdIvBVvIfAZrQWAGuvc5HmW24,788
177
177
  letta/schemas/passage.py,sha256=RG0vkaewEu4a_NAZM-FVyMammHjqpPP0RDYAdu27g6A,3723
178
- letta/schemas/providers.py,sha256=JUFdujrLmrGA84tybQ4aquxknEyiod47lRKrpStm9s4,41351
178
+ letta/schemas/providers.py,sha256=k02mAFMn9F8zW6vjHSIL-OfBlfFharJQM43SrJfdmbs,43666
179
179
  letta/schemas/run.py,sha256=SRqPRziINIiPunjOhE_NlbnQYgxTvqmbauni_yfBQRA,2085
180
180
  letta/schemas/sandbox_config.py,sha256=Nz8K5brqe6jpf66KnTJ0-E7ZeFdPoBFGN-XOI35OeaY,5926
181
181
  letta/schemas/source.py,sha256=-BQVolcXA2ziCu2ztR6cbTdGUc8G7vGJy7rvpdf1hpg,2880
@@ -198,14 +198,14 @@ letta/server/rest_api/app.py,sha256=F9XAwZt5nPAE75gHSurXGe4mGmNvmI8DD7RssK8l124,
198
198
  letta/server/rest_api/auth/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
199
199
  letta/server/rest_api/auth/index.py,sha256=fQBGyVylGSRfEMLQ17cZzrHd5Y1xiVylvPqH5Rl-lXQ,1378
200
200
  letta/server/rest_api/auth_token.py,sha256=725EFEIiNj4dh70hrSd94UysmFD8vcJLrTRfNHkzxDo,774
201
- letta/server/rest_api/chat_completions_interface.py,sha256=ORkZ-UmkmSx-FyGp85BDs9sQdNE49GehlmxVSxq4vLs,12073
201
+ letta/server/rest_api/chat_completions_interface.py,sha256=htY1v3eyP6OmoDkBYog2fPZX_2cHsNKkxUAAAvuUbiE,10862
202
202
  letta/server/rest_api/interface.py,sha256=jAt7azrk27sNKNCZHgoIzYDIUbEgJ8hsC3Ef7OevH7U,57605
203
203
  letta/server/rest_api/optimistic_json_parser.py,sha256=1z4d9unmxMb0ou7owJ62uUQoNjNYf21FmaNdg0ZcqUU,6567
204
204
  letta/server/rest_api/routers/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
205
205
  letta/server/rest_api/routers/openai/chat_completions/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
206
- letta/server/rest_api/routers/openai/chat_completions/chat_completions.py,sha256=8u7n42niB_6X5uShMa76-ldAMdx7lHG-6ax8GEAr25s,17478
207
- letta/server/rest_api/routers/v1/__init__.py,sha256=kQUDemPYl4ZcOndpsexbLQRAObkuDN00ZYTnQJYiHNk,1269
208
- letta/server/rest_api/routers/v1/agents.py,sha256=Ac7IpwwxutLKziJIzTKTrh90cd3u0aC6Nixbxbo7bE0,26274
206
+ letta/server/rest_api/routers/openai/chat_completions/chat_completions.py,sha256=yJo_oiv75QsD324lC0uR2B9su9WG1FxabQhAJKS--6s,5530
207
+ letta/server/rest_api/routers/v1/__init__.py,sha256=Zi2th-okqT_RWAjB8MYGHX8CpHt1OSRyO5V8SJEp6UU,1361
208
+ letta/server/rest_api/routers/v1/agents.py,sha256=gkvlga9MOgtYjlH2dOX4mnLpS6ZxKgMTGQcqHiIQrMU,26267
209
209
  letta/server/rest_api/routers/v1/blocks.py,sha256=0j7JX2BQzk31RyhvPZeEb-zh9ImXsVU4_8y5XMiR_WA,3900
210
210
  letta/server/rest_api/routers/v1/health.py,sha256=MoOjkydhGcJXTiuJrKIB0etVXiRMdTa51S8RQ8-50DQ,399
211
211
  letta/server/rest_api/routers/v1/identities.py,sha256=CCEF-e91yXOaHevEM0cpPoTHGnWNVljvKD5YC1nFvVk,4981
@@ -220,9 +220,10 @@ letta/server/rest_api/routers/v1/steps.py,sha256=IpCQuxpS34-4Qpgdv0FQJO-SffkFkW-
220
220
  letta/server/rest_api/routers/v1/tags.py,sha256=coydgvL6-9cuG2Hy5Ea7QY3inhTHlsf69w0tcZenBus,880
221
221
  letta/server/rest_api/routers/v1/tools.py,sha256=qIpXzrjihQgEnoGeiKxEzaKmzHIVvBO0V_lR1ciBHZs,13147
222
222
  letta/server/rest_api/routers/v1/users.py,sha256=G5DBHSkPfBgVHN2Wkm-rVYiLQAudwQczIq2Z3YLdbVo,2277
223
+ letta/server/rest_api/routers/v1/voice.py,sha256=rvO6U3Vb9Q2l_kIzfsOgToKjpr06UvnovNN2NSzLcI0,13294
223
224
  letta/server/rest_api/static_files.py,sha256=NG8sN4Z5EJ8JVQdj19tkFa9iQ1kBPTab9f_CUxd_u4Q,3143
224
- letta/server/rest_api/utils.py,sha256=fsp5rojySucv3TSSaY9QQ5DUyE6GhxkyKg8rZQQhZVA,11980
225
- letta/server/server.py,sha256=3TjO0sef8UeQc_0efCoPT6vGz7f673KDZHwd15qKW2Q,57384
225
+ letta/server/rest_api/utils.py,sha256=dpWC_MQijJ66wq4E5KPgXzHmGt6iNsj-Cw0kS4CLOnE,13693
226
+ letta/server/server.py,sha256=RnK8LLmczo0rHDl7nFGuYFA3HmqODFqXSH6Wc2g1BZI,57664
226
227
  letta/server/startup.sh,sha256=qEi6dQHJRzEzDIgnIODj-RYp-O1XstfFpc6cFLkUzVs,1576
227
228
  letta/server/static_files/assets/index-048c9598.js,sha256=mR16XppvselwKCcNgONs4L7kZEVa4OEERm4lNZYtLSk,146819
228
229
  letta/server/static_files/assets/index-0e31b727.css,sha256=SBbja96uiQVLDhDOroHgM6NSl7tS4lpJRCREgSS_hA8,7672
@@ -240,7 +241,7 @@ letta/services/agent_manager.py,sha256=fiLhLgYh65LXGuzwsYD38oT3lfBhnyn6yK1uvY3De
240
241
  letta/services/block_manager.py,sha256=PkbiO59VimLyK6PolWR5O29uHxksCH6pP6K4Vkov3NA,5573
241
242
  letta/services/helpers/agent_manager_helper.py,sha256=MexqAGoc2e8Bso4_hJhBR6qyiFXtiB2MiMMqL-ur1a0,11302
242
243
  letta/services/helpers/tool_execution_helper.py,sha256=q8uSiQcX6VH_iNg5VNloZgC2JkH9lIOXBKCXYPx2Yac,6097
243
- letta/services/identity_manager.py,sha256=iFkiEqY8Uik8Bzla53Qh1empZvfJug914xzIwWCO9ho,7144
244
+ letta/services/identity_manager.py,sha256=o1XMd5NG2gZ_YWrPOmisfFN0i-2ohoq8n1GM8F49Lf4,7144
244
245
  letta/services/job_manager.py,sha256=y7P03ijWrOY1HzhphrRdeEPUQz-wHcNvoi-zrefjbuE,13155
245
246
  letta/services/message_manager.py,sha256=miGZ24h6NC16wHiTP95ooo-M-o2x1rxRnj67p8vQyOY,10702
246
247
  letta/services/organization_manager.py,sha256=dhQ3cFPXWNYLfMjdahr2HsOAMJ1JtCEWj1G8Nei5MQc,3388
@@ -253,14 +254,14 @@ letta/services/step_manager.py,sha256=svDP_Mv64iIJKzYDFlDT5fabmWBvyjPPa0FIN--L7u
253
254
  letta/services/tool_execution_sandbox.py,sha256=mev4oCHy4B_uoXRccTirDNp_pSX_s5wbUVNz1oKrvBU,22067
254
255
  letta/services/tool_manager.py,sha256=Q-J8mZKw3zi5Ymxy48DiwpOcv1s6rqdSkRHE6pbnzKk,9568
255
256
  letta/services/user_manager.py,sha256=ScHbdJK9kNF8QXjsd3ZWGEL87n_Uyp3YwfKetOJmpHs,4304
256
- letta/settings.py,sha256=l6jrMyIK3tqLTXhBx5rN06nEEnJh4vafxBQWhUa2WAI,6579
257
+ letta/settings.py,sha256=KAWy4bCE9OWwyNmMKabamFtf2iMWxFNT_fyxDdRuK1U,6635
257
258
  letta/streaming_interface.py,sha256=1vuAckIxo1p1UsXtDzE8LTUve5RoTZRdXUe-WBIYDWU,15818
258
259
  letta/streaming_utils.py,sha256=jLqFTVhUL76FeOuYk8TaRQHmPTf3HSRc2EoJwxJNK6U,11946
259
260
  letta/system.py,sha256=dnOrS2FlRMwijQnOvfrky0Lg8wEw-FUq2zzfAJOUSKA,8477
260
261
  letta/tracing.py,sha256=0uCH8j2ipTpS8Vt7bFl74sG5ckgBHy9fu-cyG9SBSsc,7464
261
262
  letta/utils.py,sha256=AdHrQ2OQ3V4XhJ1LtYwbLUO71j2IJY37cIUxXPgaaRY,32125
262
- letta_nightly-0.6.33.dev20250227104112.dist-info/LICENSE,sha256=mExtuZ_GYJgDEI38GWdiEYZizZS4KkVt2SF1g_GPNhI,10759
263
- letta_nightly-0.6.33.dev20250227104112.dist-info/METADATA,sha256=j9ZbD055qC94ibSLqbUQyTtz3VsjR8IYFocSOLJmDvA,22589
264
- letta_nightly-0.6.33.dev20250227104112.dist-info/WHEEL,sha256=FMvqSimYX_P7y0a7UY-_Mc83r5zkBZsCYPm7Lr0Bsq4,88
265
- letta_nightly-0.6.33.dev20250227104112.dist-info/entry_points.txt,sha256=2zdiyGNEZGV5oYBuS-y2nAAgjDgcC9yM_mHJBFSRt5U,40
266
- letta_nightly-0.6.33.dev20250227104112.dist-info/RECORD,,
263
+ letta_nightly-0.6.34.dev20250228104059.dist-info/LICENSE,sha256=mExtuZ_GYJgDEI38GWdiEYZizZS4KkVt2SF1g_GPNhI,10759
264
+ letta_nightly-0.6.34.dev20250228104059.dist-info/METADATA,sha256=v1uR1pKB59K--FH0HU0ibs86EjqU2nWCWehLX7KyL6M,22589
265
+ letta_nightly-0.6.34.dev20250228104059.dist-info/WHEEL,sha256=FMvqSimYX_P7y0a7UY-_Mc83r5zkBZsCYPm7Lr0Bsq4,88
266
+ letta_nightly-0.6.34.dev20250228104059.dist-info/entry_points.txt,sha256=2zdiyGNEZGV5oYBuS-y2nAAgjDgcC9yM_mHJBFSRt5U,40
267
+ letta_nightly-0.6.34.dev20250228104059.dist-info/RECORD,,