letta-nightly 0.11.7.dev20251008104128__py3-none-any.whl → 0.12.0.dev20251009203644__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (27) hide show
  1. letta/__init__.py +1 -1
  2. letta/agents/letta_agent_v3.py +33 -5
  3. letta/database_utils.py +161 -0
  4. letta/interfaces/anthropic_streaming_interface.py +21 -9
  5. letta/interfaces/gemini_streaming_interface.py +7 -5
  6. letta/interfaces/openai_streaming_interface.py +42 -30
  7. letta/llm_api/anthropic_client.py +36 -16
  8. letta/llm_api/google_vertex_client.py +1 -0
  9. letta/orm/__init__.py +1 -0
  10. letta/orm/run_metrics.py +82 -0
  11. letta/schemas/letta_message.py +29 -12
  12. letta/schemas/message.py +192 -51
  13. letta/schemas/run_metrics.py +21 -0
  14. letta/server/db.py +3 -10
  15. letta/server/rest_api/interface.py +85 -41
  16. letta/server/rest_api/routers/v1/providers.py +34 -0
  17. letta/server/rest_api/routers/v1/runs.py +27 -18
  18. letta/server/server.py +22 -0
  19. letta/services/context_window_calculator/token_counter.py +1 -1
  20. letta/services/helpers/run_manager_helper.py +5 -21
  21. letta/services/run_manager.py +63 -0
  22. letta/system.py +5 -1
  23. {letta_nightly-0.11.7.dev20251008104128.dist-info → letta_nightly-0.12.0.dev20251009203644.dist-info}/METADATA +1 -1
  24. {letta_nightly-0.11.7.dev20251008104128.dist-info → letta_nightly-0.12.0.dev20251009203644.dist-info}/RECORD +27 -24
  25. {letta_nightly-0.11.7.dev20251008104128.dist-info → letta_nightly-0.12.0.dev20251009203644.dist-info}/WHEEL +0 -0
  26. {letta_nightly-0.11.7.dev20251008104128.dist-info → letta_nightly-0.12.0.dev20251009203644.dist-info}/entry_points.txt +0 -0
  27. {letta_nightly-0.11.7.dev20251008104128.dist-info → letta_nightly-0.12.0.dev20251009203644.dist-info}/licenses/LICENSE +0 -0
@@ -562,14 +562,16 @@ class StreamingServerInterface(AgentChunkStreamingInterface):
562
562
 
563
563
  if prev_message_type and prev_message_type != "tool_call_message":
564
564
  message_index += 1
565
+ tool_call_delta = ToolCallDelta(
566
+ name=json_reasoning_content.get("name"),
567
+ arguments=json.dumps(json_reasoning_content.get("arguments")),
568
+ tool_call_id=None,
569
+ )
565
570
  processed_chunk = ToolCallMessage(
566
571
  id=message_id,
567
572
  date=message_date,
568
- tool_call=ToolCallDelta(
569
- name=json_reasoning_content.get("name"),
570
- arguments=json.dumps(json_reasoning_content.get("arguments")),
571
- tool_call_id=None,
572
- ),
573
+ tool_call=tool_call_delta,
574
+ tool_calls=tool_call_delta,
573
575
  name=name,
574
576
  otid=Message.generate_otid_from_id(message_id, message_index),
575
577
  )
@@ -703,14 +705,16 @@ class StreamingServerInterface(AgentChunkStreamingInterface):
703
705
  else:
704
706
  if prev_message_type and prev_message_type != "tool_call_message":
705
707
  message_index += 1
708
+ tc_delta = ToolCallDelta(
709
+ name=tool_call_delta.get("name"),
710
+ arguments=tool_call_delta.get("arguments"),
711
+ tool_call_id=tool_call_delta.get("id"),
712
+ )
706
713
  processed_chunk = ToolCallMessage(
707
714
  id=message_id,
708
715
  date=message_date,
709
- tool_call=ToolCallDelta(
710
- name=tool_call_delta.get("name"),
711
- arguments=tool_call_delta.get("arguments"),
712
- tool_call_id=tool_call_delta.get("id"),
713
- ),
716
+ tool_call=tc_delta,
717
+ tool_calls=tc_delta,
714
718
  name=name,
715
719
  otid=Message.generate_otid_from_id(message_id, message_index),
716
720
  )
@@ -779,14 +783,16 @@ class StreamingServerInterface(AgentChunkStreamingInterface):
779
783
  else:
780
784
  if prev_message_type and prev_message_type != "tool_call_message":
781
785
  message_index += 1
786
+ tc_delta = ToolCallDelta(
787
+ name=self.function_name_buffer,
788
+ arguments=None,
789
+ tool_call_id=self.function_id_buffer,
790
+ )
782
791
  processed_chunk = ToolCallMessage(
783
792
  id=message_id,
784
793
  date=message_date,
785
- tool_call=ToolCallDelta(
786
- name=self.function_name_buffer,
787
- arguments=None,
788
- tool_call_id=self.function_id_buffer,
789
- ),
794
+ tool_call=tc_delta,
795
+ tool_calls=tc_delta,
790
796
  name=name,
791
797
  otid=Message.generate_otid_from_id(message_id, message_index),
792
798
  )
@@ -843,14 +849,16 @@ class StreamingServerInterface(AgentChunkStreamingInterface):
843
849
  combined_chunk = self.function_args_buffer + updates_main_json
844
850
  if prev_message_type and prev_message_type != "tool_call_message":
845
851
  message_index += 1
852
+ tc_delta = ToolCallDelta(
853
+ name=None,
854
+ arguments=combined_chunk,
855
+ tool_call_id=self.function_id_buffer,
856
+ )
846
857
  processed_chunk = ToolCallMessage(
847
858
  id=message_id,
848
859
  date=message_date,
849
- tool_call=ToolCallDelta(
850
- name=None,
851
- arguments=combined_chunk,
852
- tool_call_id=self.function_id_buffer,
853
- ),
860
+ tool_call=tc_delta,
861
+ tool_calls=tc_delta,
854
862
  name=name,
855
863
  otid=Message.generate_otid_from_id(message_id, message_index),
856
864
  )
@@ -861,14 +869,16 @@ class StreamingServerInterface(AgentChunkStreamingInterface):
861
869
  # If there's no buffer to clear, just output a new chunk with new data
862
870
  if prev_message_type and prev_message_type != "tool_call_message":
863
871
  message_index += 1
872
+ tc_delta = ToolCallDelta(
873
+ name=None,
874
+ arguments=updates_main_json,
875
+ tool_call_id=self.function_id_buffer,
876
+ )
864
877
  processed_chunk = ToolCallMessage(
865
878
  id=message_id,
866
879
  date=message_date,
867
- tool_call=ToolCallDelta(
868
- name=None,
869
- arguments=updates_main_json,
870
- tool_call_id=self.function_id_buffer,
871
- ),
880
+ tool_call=tc_delta,
881
+ tool_calls=tc_delta,
872
882
  name=name,
873
883
  otid=Message.generate_otid_from_id(message_id, message_index),
874
884
  )
@@ -992,14 +1002,16 @@ class StreamingServerInterface(AgentChunkStreamingInterface):
992
1002
  else:
993
1003
  if prev_message_type and prev_message_type != "tool_call_message":
994
1004
  message_index += 1
1005
+ tc_delta = ToolCallDelta(
1006
+ name=tool_call_delta.get("name"),
1007
+ arguments=tool_call_delta.get("arguments"),
1008
+ tool_call_id=tool_call_delta.get("id"),
1009
+ )
995
1010
  processed_chunk = ToolCallMessage(
996
1011
  id=message_id,
997
1012
  date=message_date,
998
- tool_call=ToolCallDelta(
999
- name=tool_call_delta.get("name"),
1000
- arguments=tool_call_delta.get("arguments"),
1001
- tool_call_id=tool_call_delta.get("id"),
1002
- ),
1013
+ tool_call=tc_delta,
1014
+ tool_calls=tc_delta,
1003
1015
  name=name,
1004
1016
  otid=Message.generate_otid_from_id(message_id, message_index),
1005
1017
  )
@@ -1262,14 +1274,16 @@ class StreamingServerInterface(AgentChunkStreamingInterface):
1262
1274
  # Store the ID of the tool call so allow skipping the corresponding response
1263
1275
  self.prev_assistant_message_id = function_call.id
1264
1276
  else:
1277
+ tool_call_obj = ToolCall(
1278
+ name=function_call.function.name,
1279
+ arguments=function_call.function.arguments,
1280
+ tool_call_id=function_call.id,
1281
+ )
1265
1282
  processed_chunk = ToolCallMessage(
1266
1283
  id=msg_obj.id,
1267
1284
  date=msg_obj.created_at,
1268
- tool_call=ToolCall(
1269
- name=function_call.function.name,
1270
- arguments=function_call.function.arguments,
1271
- tool_call_id=function_call.id,
1272
- ),
1285
+ tool_call=tool_call_obj,
1286
+ tool_calls=tool_call_obj,
1273
1287
  name=msg_obj.name,
1274
1288
  otid=Message.generate_otid_from_id(msg_obj.id, chunk_index) if chunk_index is not None else None,
1275
1289
  )
@@ -1303,14 +1317,29 @@ class StreamingServerInterface(AgentChunkStreamingInterface):
1303
1317
  # Skip this tool call receipt
1304
1318
  return
1305
1319
  else:
1320
+ from letta.schemas.letta_message import ToolReturn as ToolReturnSchema
1321
+
1322
+ status = msg_obj.tool_returns[0].status if msg_obj.tool_returns else "success"
1323
+ stdout = msg_obj.tool_returns[0].stdout if msg_obj.tool_returns else []
1324
+ stderr = msg_obj.tool_returns[0].stderr if msg_obj.tool_returns else []
1325
+
1326
+ tool_return_obj = ToolReturnSchema(
1327
+ tool_return=msg,
1328
+ status=status,
1329
+ tool_call_id=msg_obj.tool_call_id,
1330
+ stdout=stdout,
1331
+ stderr=stderr,
1332
+ )
1333
+
1306
1334
  new_message = ToolReturnMessage(
1307
1335
  id=msg_obj.id,
1308
1336
  date=msg_obj.created_at,
1309
1337
  tool_return=msg,
1310
- status=msg_obj.tool_returns[0].status if msg_obj.tool_returns else "success",
1338
+ status=status,
1311
1339
  tool_call_id=msg_obj.tool_call_id,
1312
- stdout=msg_obj.tool_returns[0].stdout if msg_obj.tool_returns else [],
1313
- stderr=msg_obj.tool_returns[0].stderr if msg_obj.tool_returns else [],
1340
+ stdout=stdout,
1341
+ stderr=stderr,
1342
+ tool_returns=[tool_return_obj],
1314
1343
  name=msg_obj.name,
1315
1344
  otid=Message.generate_otid_from_id(msg_obj.id, chunk_index) if chunk_index is not None else None,
1316
1345
  )
@@ -1319,14 +1348,29 @@ class StreamingServerInterface(AgentChunkStreamingInterface):
1319
1348
  msg = msg.replace("Error: ", "", 1)
1320
1349
  # new_message = {"function_return": msg, "status": "error"}
1321
1350
  assert msg_obj.tool_call_id is not None
1351
+ from letta.schemas.letta_message import ToolReturn as ToolReturnSchema
1352
+
1353
+ status = msg_obj.tool_returns[0].status if msg_obj.tool_returns else "error"
1354
+ stdout = msg_obj.tool_returns[0].stdout if msg_obj.tool_returns else []
1355
+ stderr = msg_obj.tool_returns[0].stderr if msg_obj.tool_returns else []
1356
+
1357
+ tool_return_obj = ToolReturnSchema(
1358
+ tool_return=msg,
1359
+ status=status,
1360
+ tool_call_id=msg_obj.tool_call_id,
1361
+ stdout=stdout,
1362
+ stderr=stderr,
1363
+ )
1364
+
1322
1365
  new_message = ToolReturnMessage(
1323
1366
  id=msg_obj.id,
1324
1367
  date=msg_obj.created_at,
1325
1368
  tool_return=msg,
1326
- status=msg_obj.tool_returns[0].status if msg_obj.tool_returns else "error",
1369
+ status=status,
1327
1370
  tool_call_id=msg_obj.tool_call_id,
1328
- stdout=msg_obj.tool_returns[0].stdout if msg_obj.tool_returns else [],
1329
- stderr=msg_obj.tool_returns[0].stderr if msg_obj.tool_returns else [],
1371
+ stdout=stdout,
1372
+ stderr=stderr,
1373
+ tool_returns=[tool_return_obj],
1330
1374
  name=msg_obj.name,
1331
1375
  otid=Message.generate_otid_from_id(msg_obj.id, chunk_index) if chunk_index is not None else None,
1332
1376
  )
@@ -120,6 +120,40 @@ async def check_provider(
120
120
  raise HTTPException(status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, detail=f"{e}")
121
121
 
122
122
 
123
+ @router.post("/{provider_id}/check", response_model=None, operation_id="check_existing_provider")
124
+ async def check_existing_provider(
125
+ provider_id: str,
126
+ headers: HeaderParams = Depends(get_headers),
127
+ server: "SyncServer" = Depends(get_letta_server),
128
+ ):
129
+ """
130
+ Verify the API key and additional parameters for an existing provider.
131
+ """
132
+ try:
133
+ actor = await server.user_manager.get_actor_or_default_async(actor_id=headers.actor_id)
134
+ provider = await server.provider_manager.get_provider_async(provider_id=provider_id, actor=actor)
135
+
136
+ # Create a ProviderCheck from the existing provider
137
+ provider_check = ProviderCheck(
138
+ provider_type=provider.provider_type,
139
+ api_key=provider.api_key,
140
+ base_url=provider.base_url,
141
+ )
142
+
143
+ await server.provider_manager.check_provider_api_key(provider_check=provider_check)
144
+ return JSONResponse(
145
+ status_code=status.HTTP_200_OK, content={"message": f"Valid api key for provider_type={provider.provider_type.value}"}
146
+ )
147
+ except LLMAuthenticationError as e:
148
+ raise HTTPException(status_code=status.HTTP_401_UNAUTHORIZED, detail=f"{e.message}")
149
+ except NoResultFound:
150
+ raise HTTPException(status_code=404, detail=f"Provider provider_id={provider_id} not found for user_id={actor.id}.")
151
+ except HTTPException:
152
+ raise
153
+ except Exception as e:
154
+ raise HTTPException(status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, detail=f"{e}")
155
+
156
+
123
157
  @router.delete("/{provider_id}", response_model=None, operation_id="delete_provider")
124
158
  async def delete_provider(
125
159
  provider_id: str,
@@ -13,6 +13,7 @@ from letta.schemas.letta_request import RetrieveStreamRequest
13
13
  from letta.schemas.letta_stop_reason import StopReasonType
14
14
  from letta.schemas.openai.chat_completion_response import UsageStatistics
15
15
  from letta.schemas.run import Run
16
+ from letta.schemas.run_metrics import RunMetrics
16
17
  from letta.schemas.step import Step
17
18
  from letta.server.rest_api.dependencies import HeaderParams, get_headers, get_letta_server
18
19
  from letta.server.rest_api.redis_stream_manager import redis_sse_stream_generator
@@ -224,6 +225,23 @@ async def retrieve_run_usage(
224
225
  raise HTTPException(status_code=404, detail=f"Run '{run_id}' not found")
225
226
 
226
227
 
228
+ @router.get("/{run_id}/metrics", response_model=RunMetrics, operation_id="retrieve_metrics_for_run")
229
+ async def retrieve_metrics_for_run(
230
+ run_id: str,
231
+ headers: HeaderParams = Depends(get_headers),
232
+ server: "SyncServer" = Depends(get_letta_server),
233
+ ):
234
+ """
235
+ Get run metrics by run ID.
236
+ """
237
+ try:
238
+ actor = await server.user_manager.get_actor_or_default_async(actor_id=headers.actor_id)
239
+ runs_manager = RunManager()
240
+ return await runs_manager.get_run_metrics_async(run_id=run_id, actor=actor)
241
+ except NoResultFound:
242
+ raise HTTPException(status_code=404, detail="Run metrics not found")
243
+
244
+
227
245
  @router.get(
228
246
  "/{run_id}/steps",
229
247
  response_model=List[Step],
@@ -247,18 +265,14 @@ async def list_run_steps(
247
265
  actor = await server.user_manager.get_actor_or_default_async(actor_id=headers.actor_id)
248
266
  runs_manager = RunManager()
249
267
 
250
- try:
251
- steps = await runs_manager.get_run_steps(
252
- run_id=run_id,
253
- actor=actor,
254
- limit=limit,
255
- before=before,
256
- after=after,
257
- ascending=(order == "asc"),
258
- )
259
- return steps
260
- except NoResultFound as e:
261
- raise HTTPException(status_code=404, detail=str(e))
268
+ return await runs_manager.get_run_steps(
269
+ run_id=run_id,
270
+ actor=actor,
271
+ limit=limit,
272
+ before=before,
273
+ after=after,
274
+ ascending=(order == "asc"),
275
+ )
262
276
 
263
277
 
264
278
  @router.delete("/{run_id}", response_model=Run, operation_id="delete_run")
@@ -272,12 +286,7 @@ async def delete_run(
272
286
  """
273
287
  actor = await server.user_manager.get_actor_or_default_async(actor_id=headers.actor_id)
274
288
  runs_manager = RunManager()
275
-
276
- try:
277
- run = await runs_manager.delete_run_by_id(run_id=run_id, actor=actor)
278
- return run
279
- except NoResultFound:
280
- raise HTTPException(status_code=404, detail="Run not found")
289
+ return await runs_manager.delete_run(run_id=run_id, actor=actor)
281
290
 
282
291
 
283
292
  @router.post(
letta/server/server.py CHANGED
@@ -1239,6 +1239,16 @@ class SyncServer(object):
1239
1239
  function_args=tool_args,
1240
1240
  tool=tool,
1241
1241
  )
1242
+ from letta.schemas.letta_message import ToolReturn as ToolReturnSchema
1243
+
1244
+ tool_return_obj = ToolReturnSchema(
1245
+ tool_return=str(tool_execution_result.func_return),
1246
+ status=tool_execution_result.status,
1247
+ tool_call_id="null",
1248
+ stdout=tool_execution_result.stdout,
1249
+ stderr=tool_execution_result.stderr,
1250
+ )
1251
+
1242
1252
  return ToolReturnMessage(
1243
1253
  id="null",
1244
1254
  tool_call_id="null",
@@ -1247,10 +1257,21 @@ class SyncServer(object):
1247
1257
  tool_return=str(tool_execution_result.func_return),
1248
1258
  stdout=tool_execution_result.stdout,
1249
1259
  stderr=tool_execution_result.stderr,
1260
+ tool_returns=[tool_return_obj],
1250
1261
  )
1251
1262
 
1252
1263
  except Exception as e:
1253
1264
  func_return = get_friendly_error_msg(function_name=tool.name, exception_name=type(e).__name__, exception_message=str(e))
1265
+ from letta.schemas.letta_message import ToolReturn as ToolReturnSchema
1266
+
1267
+ tool_return_obj = ToolReturnSchema(
1268
+ tool_return=func_return,
1269
+ status="error",
1270
+ tool_call_id="null",
1271
+ stdout=[],
1272
+ stderr=[traceback.format_exc()],
1273
+ )
1274
+
1254
1275
  return ToolReturnMessage(
1255
1276
  id="null",
1256
1277
  tool_call_id="null",
@@ -1259,6 +1280,7 @@ class SyncServer(object):
1259
1280
  tool_return=func_return,
1260
1281
  stdout=[],
1261
1282
  stderr=[traceback.format_exc()],
1283
+ tool_returns=[tool_return_obj],
1262
1284
  )
1263
1285
 
1264
1286
  # MCP wrappers
@@ -74,7 +74,7 @@ class AnthropicTokenCounter(TokenCounter):
74
74
  return await self.client.count_tokens(model=self.model, tools=tools)
75
75
 
76
76
  def convert_messages(self, messages: List[Any]) -> List[Dict[str, Any]]:
77
- return Message.to_anthropic_dicts_from_list(messages)
77
+ return Message.to_anthropic_dicts_from_list(messages, current_model=self.model)
78
78
 
79
79
 
80
80
  class TiktokenCounter(TokenCounter):
@@ -2,14 +2,10 @@ from datetime import datetime
2
2
  from typing import Optional
3
3
 
4
4
  from sqlalchemy import asc, desc, nulls_last, select
5
- from letta.settings import DatabaseChoice, settings
6
5
 
7
6
  from letta.orm.run import Run as RunModel
8
- from letta.settings import DatabaseChoice, settings
9
- from sqlalchemy import asc, desc
10
- from typing import Optional
11
-
12
7
  from letta.services.helpers.agent_manager_helper import _cursor_filter
8
+ from letta.settings import DatabaseChoice, settings
13
9
 
14
10
 
15
11
  async def _apply_pagination_async(
@@ -29,17 +25,11 @@ async def _apply_pagination_async(
29
25
  sort_nulls_last = False
30
26
 
31
27
  if after:
32
- result = (
33
- await session.execute(
34
- select(sort_column, RunModel.id).where(RunModel.id == after)
35
- )
36
- ).first()
28
+ result = (await session.execute(select(sort_column, RunModel.id).where(RunModel.id == after))).first()
37
29
  if result:
38
30
  after_sort_value, after_id = result
39
31
  # SQLite does not support as granular timestamping, so we need to round the timestamp
40
- if settings.database_engine is DatabaseChoice.SQLITE and isinstance(
41
- after_sort_value, datetime
42
- ):
32
+ if settings.database_engine is DatabaseChoice.SQLITE and isinstance(after_sort_value, datetime):
43
33
  after_sort_value = after_sort_value.strftime("%Y-%m-%d %H:%M:%S")
44
34
  query = query.where(
45
35
  _cursor_filter(
@@ -53,17 +43,11 @@ async def _apply_pagination_async(
53
43
  )
54
44
 
55
45
  if before:
56
- result = (
57
- await session.execute(
58
- select(sort_column, RunModel.id).where(RunModel.id == before)
59
- )
60
- ).first()
46
+ result = (await session.execute(select(sort_column, RunModel.id).where(RunModel.id == before))).first()
61
47
  if result:
62
48
  before_sort_value, before_id = result
63
49
  # SQLite does not support as granular timestamping, so we need to round the timestamp
64
- if settings.database_engine is DatabaseChoice.SQLITE and isinstance(
65
- before_sort_value, datetime
66
- ):
50
+ if settings.database_engine is DatabaseChoice.SQLITE and isinstance(before_sort_value, datetime):
67
51
  before_sort_value = before_sort_value.strftime("%Y-%m-%d %H:%M:%S")
68
52
  query = query.where(
69
53
  _cursor_filter(
@@ -8,9 +8,11 @@ from sqlalchemy.orm import Session
8
8
 
9
9
  from letta.helpers.datetime_helpers import get_utc_time
10
10
  from letta.log import get_logger
11
+ from letta.orm.agent import Agent as AgentModel
11
12
  from letta.orm.errors import NoResultFound
12
13
  from letta.orm.message import Message as MessageModel
13
14
  from letta.orm.run import Run as RunModel
15
+ from letta.orm.run_metrics import RunMetrics as RunMetricsModel
14
16
  from letta.orm.sqlalchemy_base import AccessType
15
17
  from letta.orm.step import Step as StepModel
16
18
  from letta.otel.tracing import log_event, trace_method
@@ -21,6 +23,7 @@ from letta.schemas.letta_response import LettaResponse
21
23
  from letta.schemas.letta_stop_reason import LettaStopReason, StopReasonType
22
24
  from letta.schemas.message import Message as PydanticMessage
23
25
  from letta.schemas.run import Run as PydanticRun, RunUpdate
26
+ from letta.schemas.run_metrics import RunMetrics as PydanticRunMetrics
24
27
  from letta.schemas.step import Step as PydanticStep
25
28
  from letta.schemas.usage import LettaUsageStatistics
26
29
  from letta.schemas.user import User as PydanticUser
@@ -62,6 +65,23 @@ class RunManager:
62
65
  run = RunModel(**run_data)
63
66
  run.organization_id = organization_id
64
67
  run = await run.create_async(session, actor=actor, no_commit=True, no_refresh=True)
68
+
69
+ # Create run metrics with start timestamp
70
+ import time
71
+
72
+ # Get the project_id from the agent
73
+ agent = await session.get(AgentModel, agent_id)
74
+ project_id = agent.project_id if agent else None
75
+
76
+ metrics = RunMetricsModel(
77
+ id=run.id,
78
+ organization_id=organization_id,
79
+ agent_id=agent_id,
80
+ project_id=project_id,
81
+ run_start_ns=int(time.time() * 1e9), # Current time in nanoseconds
82
+ num_steps=0, # Initialize to 0
83
+ )
84
+ await metrics.create_async(session)
65
85
  await session.commit()
66
86
 
67
87
  return run.to_pydantic()
@@ -178,6 +198,21 @@ class RunManager:
178
198
  await run.update_async(db_session=session, actor=actor, no_commit=True, no_refresh=True)
179
199
  final_metadata = run.metadata_
180
200
  pydantic_run = run.to_pydantic()
201
+
202
+ await session.commit()
203
+
204
+ # update run metrics table
205
+ num_steps = len(await self.step_manager.list_steps_async(run_id=run_id, actor=actor))
206
+ async with db_registry.async_session() as session:
207
+ metrics = await RunMetricsModel.read_async(db_session=session, identifier=run_id, actor=actor)
208
+ # Calculate runtime if run is completing
209
+ if is_terminal_update and metrics.run_start_ns:
210
+ import time
211
+
212
+ current_ns = int(time.time() * 1e9)
213
+ metrics.run_ns = current_ns - metrics.run_start_ns
214
+ metrics.num_steps = num_steps
215
+ await metrics.update_async(db_session=session, actor=actor, no_commit=True, no_refresh=True)
181
216
  await session.commit()
182
217
 
183
218
  # Dispatch callback outside of database session if needed
@@ -299,3 +334,31 @@ class RunManager:
299
334
  raise NoResultFound(f"Run with id {run_id} not found")
300
335
  pydantic_run = run.to_pydantic()
301
336
  return pydantic_run.request_config
337
+
338
+ @enforce_types
339
+ async def get_run_metrics_async(self, run_id: str, actor: PydanticUser) -> PydanticRunMetrics:
340
+ """Get metrics for a run."""
341
+ async with db_registry.async_session() as session:
342
+ metrics = await RunMetricsModel.read_async(db_session=session, identifier=run_id, actor=actor)
343
+ return metrics.to_pydantic()
344
+
345
+ @enforce_types
346
+ async def get_run_steps(
347
+ self,
348
+ run_id: str,
349
+ actor: PydanticUser,
350
+ limit: Optional[int] = 100,
351
+ before: Optional[str] = None,
352
+ after: Optional[str] = None,
353
+ ascending: bool = False,
354
+ ) -> List[PydanticStep]:
355
+ """Get steps for a run."""
356
+ async with db_registry.async_session() as session:
357
+ run = await RunModel.read_async(db_session=session, identifier=run_id, actor=actor, access_type=AccessType.ORGANIZATION)
358
+ if not run:
359
+ raise NoResultFound(f"Run with id {run_id} not found")
360
+
361
+ steps = await self.step_manager.list_steps_async(
362
+ actor=actor, run_id=run_id, limit=limit, before=before, after=after, order="asc" if ascending else "desc"
363
+ )
364
+ return steps
letta/system.py CHANGED
@@ -248,7 +248,11 @@ def unpack_message(packed_message: str) -> str:
248
248
  warnings.warn(f"Was unable to find 'message' field in packed message object: '{packed_message}'")
249
249
  return packed_message
250
250
  else:
251
- message_type = message_json["type"]
251
+ try:
252
+ message_type = message_json["type"]
253
+ except:
254
+ return packed_message
255
+
252
256
  if message_type != "user_message":
253
257
  warnings.warn(f"Expected type to be 'user_message', but was '{message_type}', so not unpacking: '{packed_message}'")
254
258
  return packed_message
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: letta-nightly
3
- Version: 0.11.7.dev20251008104128
3
+ Version: 0.12.0.dev20251009203644
4
4
  Summary: Create LLM agents with long-term memory and custom tools
5
5
  Author-email: Letta Team <contact@letta.com>
6
6
  License: Apache License