letta-nightly 0.11.7.dev20251007104119__py3-none-any.whl → 0.11.7.dev20251008104128__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (145) hide show
  1. letta/adapters/letta_llm_adapter.py +1 -0
  2. letta/adapters/letta_llm_request_adapter.py +0 -1
  3. letta/adapters/letta_llm_stream_adapter.py +7 -2
  4. letta/adapters/simple_llm_request_adapter.py +88 -0
  5. letta/adapters/simple_llm_stream_adapter.py +192 -0
  6. letta/agents/agent_loop.py +6 -0
  7. letta/agents/ephemeral_summary_agent.py +2 -1
  8. letta/agents/helpers.py +142 -6
  9. letta/agents/letta_agent.py +13 -33
  10. letta/agents/letta_agent_batch.py +2 -4
  11. letta/agents/letta_agent_v2.py +87 -77
  12. letta/agents/letta_agent_v3.py +899 -0
  13. letta/agents/voice_agent.py +2 -6
  14. letta/constants.py +8 -4
  15. letta/errors.py +40 -0
  16. letta/functions/function_sets/base.py +84 -4
  17. letta/functions/function_sets/multi_agent.py +0 -3
  18. letta/functions/schema_generator.py +113 -71
  19. letta/groups/dynamic_multi_agent.py +3 -2
  20. letta/groups/helpers.py +1 -2
  21. letta/groups/round_robin_multi_agent.py +3 -2
  22. letta/groups/sleeptime_multi_agent.py +3 -2
  23. letta/groups/sleeptime_multi_agent_v2.py +1 -1
  24. letta/groups/sleeptime_multi_agent_v3.py +17 -17
  25. letta/groups/supervisor_multi_agent.py +84 -80
  26. letta/helpers/converters.py +3 -0
  27. letta/helpers/message_helper.py +4 -0
  28. letta/helpers/tool_rule_solver.py +92 -5
  29. letta/interfaces/anthropic_streaming_interface.py +409 -0
  30. letta/interfaces/gemini_streaming_interface.py +296 -0
  31. letta/interfaces/openai_streaming_interface.py +752 -1
  32. letta/llm_api/anthropic_client.py +126 -16
  33. letta/llm_api/bedrock_client.py +4 -2
  34. letta/llm_api/deepseek_client.py +4 -1
  35. letta/llm_api/google_vertex_client.py +123 -42
  36. letta/llm_api/groq_client.py +4 -1
  37. letta/llm_api/llm_api_tools.py +11 -4
  38. letta/llm_api/llm_client_base.py +6 -2
  39. letta/llm_api/openai.py +32 -2
  40. letta/llm_api/openai_client.py +423 -18
  41. letta/llm_api/xai_client.py +4 -1
  42. letta/main.py +9 -5
  43. letta/memory.py +1 -0
  44. letta/orm/__init__.py +1 -1
  45. letta/orm/agent.py +10 -0
  46. letta/orm/block.py +7 -16
  47. letta/orm/blocks_agents.py +8 -2
  48. letta/orm/files_agents.py +2 -0
  49. letta/orm/job.py +7 -5
  50. letta/orm/mcp_oauth.py +1 -0
  51. letta/orm/message.py +21 -6
  52. letta/orm/organization.py +2 -0
  53. letta/orm/provider.py +6 -2
  54. letta/orm/run.py +71 -0
  55. letta/orm/sandbox_config.py +7 -1
  56. letta/orm/sqlalchemy_base.py +0 -306
  57. letta/orm/step.py +6 -5
  58. letta/orm/step_metrics.py +5 -5
  59. letta/otel/tracing.py +28 -3
  60. letta/plugins/defaults.py +4 -4
  61. letta/prompts/system_prompts/__init__.py +2 -0
  62. letta/prompts/system_prompts/letta_v1.py +25 -0
  63. letta/schemas/agent.py +3 -2
  64. letta/schemas/agent_file.py +9 -3
  65. letta/schemas/block.py +23 -10
  66. letta/schemas/enums.py +21 -2
  67. letta/schemas/job.py +17 -4
  68. letta/schemas/letta_message_content.py +71 -2
  69. letta/schemas/letta_stop_reason.py +5 -5
  70. letta/schemas/llm_config.py +53 -3
  71. letta/schemas/memory.py +1 -1
  72. letta/schemas/message.py +504 -117
  73. letta/schemas/openai/responses_request.py +64 -0
  74. letta/schemas/providers/__init__.py +2 -0
  75. letta/schemas/providers/anthropic.py +16 -0
  76. letta/schemas/providers/ollama.py +115 -33
  77. letta/schemas/providers/openrouter.py +52 -0
  78. letta/schemas/providers/vllm.py +2 -1
  79. letta/schemas/run.py +48 -42
  80. letta/schemas/step.py +2 -2
  81. letta/schemas/step_metrics.py +1 -1
  82. letta/schemas/tool.py +15 -107
  83. letta/schemas/tool_rule.py +88 -5
  84. letta/serialize_schemas/marshmallow_agent.py +1 -0
  85. letta/server/db.py +86 -408
  86. letta/server/rest_api/app.py +61 -10
  87. letta/server/rest_api/dependencies.py +14 -0
  88. letta/server/rest_api/redis_stream_manager.py +19 -8
  89. letta/server/rest_api/routers/v1/agents.py +364 -292
  90. letta/server/rest_api/routers/v1/blocks.py +14 -20
  91. letta/server/rest_api/routers/v1/identities.py +45 -110
  92. letta/server/rest_api/routers/v1/internal_templates.py +21 -0
  93. letta/server/rest_api/routers/v1/jobs.py +23 -6
  94. letta/server/rest_api/routers/v1/messages.py +1 -1
  95. letta/server/rest_api/routers/v1/runs.py +126 -85
  96. letta/server/rest_api/routers/v1/sandbox_configs.py +10 -19
  97. letta/server/rest_api/routers/v1/tools.py +281 -594
  98. letta/server/rest_api/routers/v1/voice.py +1 -1
  99. letta/server/rest_api/streaming_response.py +29 -29
  100. letta/server/rest_api/utils.py +122 -64
  101. letta/server/server.py +160 -887
  102. letta/services/agent_manager.py +236 -919
  103. letta/services/agent_serialization_manager.py +16 -0
  104. letta/services/archive_manager.py +0 -100
  105. letta/services/block_manager.py +211 -168
  106. letta/services/file_manager.py +1 -1
  107. letta/services/files_agents_manager.py +24 -33
  108. letta/services/group_manager.py +0 -142
  109. letta/services/helpers/agent_manager_helper.py +7 -2
  110. letta/services/helpers/run_manager_helper.py +85 -0
  111. letta/services/job_manager.py +96 -411
  112. letta/services/lettuce/__init__.py +6 -0
  113. letta/services/lettuce/lettuce_client_base.py +86 -0
  114. letta/services/mcp_manager.py +38 -6
  115. letta/services/message_manager.py +165 -362
  116. letta/services/organization_manager.py +0 -36
  117. letta/services/passage_manager.py +0 -345
  118. letta/services/provider_manager.py +0 -80
  119. letta/services/run_manager.py +301 -0
  120. letta/services/sandbox_config_manager.py +0 -234
  121. letta/services/step_manager.py +62 -39
  122. letta/services/summarizer/summarizer.py +9 -7
  123. letta/services/telemetry_manager.py +0 -16
  124. letta/services/tool_executor/builtin_tool_executor.py +35 -0
  125. letta/services/tool_executor/core_tool_executor.py +397 -2
  126. letta/services/tool_executor/files_tool_executor.py +3 -3
  127. letta/services/tool_executor/multi_agent_tool_executor.py +30 -15
  128. letta/services/tool_executor/tool_execution_manager.py +6 -8
  129. letta/services/tool_executor/tool_executor_base.py +3 -3
  130. letta/services/tool_manager.py +85 -339
  131. letta/services/tool_sandbox/base.py +24 -13
  132. letta/services/tool_sandbox/e2b_sandbox.py +16 -1
  133. letta/services/tool_schema_generator.py +123 -0
  134. letta/services/user_manager.py +0 -99
  135. letta/settings.py +20 -4
  136. {letta_nightly-0.11.7.dev20251007104119.dist-info → letta_nightly-0.11.7.dev20251008104128.dist-info}/METADATA +3 -5
  137. {letta_nightly-0.11.7.dev20251007104119.dist-info → letta_nightly-0.11.7.dev20251008104128.dist-info}/RECORD +140 -132
  138. letta/agents/temporal/activities/__init__.py +0 -4
  139. letta/agents/temporal/activities/example_activity.py +0 -7
  140. letta/agents/temporal/activities/prepare_messages.py +0 -10
  141. letta/agents/temporal/temporal_agent_workflow.py +0 -56
  142. letta/agents/temporal/types.py +0 -25
  143. {letta_nightly-0.11.7.dev20251007104119.dist-info → letta_nightly-0.11.7.dev20251008104128.dist-info}/WHEEL +0 -0
  144. {letta_nightly-0.11.7.dev20251007104119.dist-info → letta_nightly-0.11.7.dev20251008104128.dist-info}/entry_points.txt +0 -0
  145. {letta_nightly-0.11.7.dev20251007104119.dist-info → letta_nightly-0.11.7.dev20251008104128.dist-info}/licenses/LICENSE +0 -0
@@ -52,7 +52,7 @@ async def create_voice_chat_completions(
52
52
  message_manager=server.message_manager,
53
53
  agent_manager=server.agent_manager,
54
54
  block_manager=server.block_manager,
55
- job_manager=server.job_manager,
55
+ run_manager=server.run_manager,
56
56
  passage_manager=server.passage_manager,
57
57
  actor=actor,
58
58
  )
@@ -13,23 +13,23 @@ from starlette.types import Send
13
13
 
14
14
  from letta.errors import LettaUnexpectedStreamCancellationError, PendingApprovalError
15
15
  from letta.log import get_logger
16
- from letta.schemas.enums import JobStatus
16
+ from letta.schemas.enums import RunStatus
17
17
  from letta.schemas.letta_ping import LettaPing
18
18
  from letta.schemas.user import User
19
19
  from letta.server.rest_api.utils import capture_sentry_exception
20
- from letta.services.job_manager import JobManager
20
+ from letta.services.run_manager import RunManager
21
21
  from letta.settings import settings
22
22
  from letta.utils import safe_create_task
23
23
 
24
24
  logger = get_logger(__name__)
25
25
 
26
26
 
27
- class JobCancelledException(Exception):
28
- """Exception raised when a job is explicitly cancelled (not due to client timeout)"""
27
+ class RunCancelledException(Exception):
28
+ """Exception raised when a run is explicitly cancelled (not due to client timeout)"""
29
29
 
30
- def __init__(self, job_id: str, message: str = None):
31
- self.job_id = job_id
32
- super().__init__(message or f"Job {job_id} was explicitly cancelled")
30
+ def __init__(self, run_id: str, message: str = None):
31
+ self.run_id = run_id
32
+ super().__init__(message or f"Run {run_id} was explicitly cancelled")
33
33
 
34
34
 
35
35
  async def add_keepalive_to_stream(
@@ -109,21 +109,21 @@ async def add_keepalive_to_stream(
109
109
  # TODO (cliandy) wrap this and handle types
110
110
  async def cancellation_aware_stream_wrapper(
111
111
  stream_generator: AsyncIterator[str | bytes],
112
- job_manager: JobManager,
113
- job_id: str,
112
+ run_manager: RunManager,
113
+ run_id: str,
114
114
  actor: User,
115
115
  cancellation_check_interval: float = 0.5,
116
116
  ) -> AsyncIterator[str | bytes]:
117
117
  """
118
- Wraps a stream generator to provide real-time job cancellation checking.
118
+ Wraps a stream generator to provide real-time run cancellation checking.
119
119
 
120
- This wrapper periodically checks for job cancellation while streaming and
120
+ This wrapper periodically checks for run cancellation while streaming and
121
121
  can interrupt the stream at any point, not just at step boundaries.
122
122
 
123
123
  Args:
124
124
  stream_generator: The original stream generator to wrap
125
- job_manager: Job manager instance for checking job status
126
- job_id: ID of the job to monitor for cancellation
125
+ run_manager: Run manager instance for checking run status
126
+ run_id: ID of the run to monitor for cancellation
127
127
  actor: User/actor making the request
128
128
  cancellation_check_interval: How often to check for cancellation (seconds)
129
129
 
@@ -131,7 +131,7 @@ async def cancellation_aware_stream_wrapper(
131
131
  Stream chunks from the original generator until cancelled
132
132
 
133
133
  Raises:
134
- asyncio.CancelledError: If the job is cancelled during streaming
134
+ asyncio.CancelledError: If the run is cancelled during streaming
135
135
  """
136
136
  last_cancellation_check = asyncio.get_event_loop().time()
137
137
 
@@ -141,32 +141,32 @@ async def cancellation_aware_stream_wrapper(
141
141
  current_time = asyncio.get_event_loop().time()
142
142
  if current_time - last_cancellation_check >= cancellation_check_interval:
143
143
  try:
144
- job = await job_manager.get_job_by_id_async(job_id=job_id, actor=actor)
145
- if job.status == JobStatus.cancelled:
146
- logger.info(f"Stream cancelled for job {job_id}, interrupting stream")
144
+ run = await run_manager.get_run_by_id_async(run_id=run_id, actor=actor)
145
+ if run.status == RunStatus.cancelled:
146
+ logger.info(f"Stream cancelled for run {run_id}, interrupting stream")
147
147
  # Send cancellation event to client
148
148
  cancellation_event = {"message_type": "stop_reason", "stop_reason": "cancelled"}
149
149
  yield f"data: {json.dumps(cancellation_event)}\n\n"
150
- # Raise custom exception for explicit job cancellation
151
- raise JobCancelledException(job_id, f"Job {job_id} was cancelled")
150
+ # Raise custom exception for explicit run cancellation
151
+ raise RunCancelledException(run_id, f"Run {run_id} was cancelled")
152
152
  except Exception as e:
153
153
  # Log warning but don't fail the stream if cancellation check fails
154
- logger.warning(f"Failed to check job cancellation for job {job_id}: {e}")
154
+ logger.warning(f"Failed to check run cancellation for run {run_id}: {e}")
155
155
 
156
156
  last_cancellation_check = current_time
157
157
 
158
158
  yield chunk
159
159
 
160
- except JobCancelledException:
161
- # Re-raise JobCancelledException to distinguish from client timeout
162
- logger.info(f"Stream for job {job_id} was explicitly cancelled and cleaned up")
160
+ except RunCancelledException:
161
+ # Re-raise RunCancelledException to distinguish from client timeout
162
+ logger.info(f"Stream for run {run_id} was explicitly cancelled and cleaned up")
163
163
  raise
164
164
  except asyncio.CancelledError:
165
165
  # Re-raise CancelledError (likely client timeout) to ensure proper cleanup
166
- logger.info(f"Stream for job {job_id} was cancelled (likely client timeout) and cleaned up")
166
+ logger.info(f"Stream for run {run_id} was cancelled (likely client timeout) and cleaned up")
167
167
  raise
168
168
  except Exception as e:
169
- logger.error(f"Error in cancellation-aware stream wrapper for job {job_id}: {e}")
169
+ logger.error(f"Error in cancellation-aware stream wrapper for run {run_id}: {e}")
170
170
  raise
171
171
 
172
172
 
@@ -267,12 +267,12 @@ class StreamingResponseWithStatusCode(StreamingResponse):
267
267
  self._client_connected = False
268
268
  # Continue processing but don't try to send more data
269
269
 
270
- # Handle explicit job cancellations (should not throw error)
271
- except JobCancelledException as exc:
272
- logger.info(f"Stream was explicitly cancelled for job {exc.job_id}")
270
+ # Handle explicit run cancellations (should not throw error)
271
+ except RunCancelledException as exc:
272
+ logger.info(f"Stream was explicitly cancelled for run {exc.run_id}")
273
273
  # Handle explicit cancellation gracefully without error
274
274
  more_body = False
275
- cancellation_resp = {"message": "Job was cancelled"}
275
+ cancellation_resp = {"message": "Run was cancelled"}
276
276
  cancellation_event = f"event: cancelled\ndata: {json.dumps(cancellation_resp)}\n\n".encode(self.charset)
277
277
  if not self.response_started:
278
278
  await send(
@@ -27,7 +27,13 @@ from letta.otel.metric_registry import MetricRegistry
27
27
  from letta.otel.tracing import tracer
28
28
  from letta.schemas.agent import AgentState
29
29
  from letta.schemas.enums import MessageRole
30
- from letta.schemas.letta_message_content import OmittedReasoningContent, ReasoningContent, RedactedReasoningContent, TextContent
30
+ from letta.schemas.letta_message_content import (
31
+ OmittedReasoningContent,
32
+ ReasoningContent,
33
+ RedactedReasoningContent,
34
+ SummarizedReasoningContent,
35
+ TextContent,
36
+ )
31
37
  from letta.schemas.llm_config import LLMConfig
32
38
  from letta.schemas.message import ApprovalCreate, Message, MessageCreate, ToolReturn
33
39
  from letta.schemas.tool_execution_result import ToolExecutionResult
@@ -148,7 +154,7 @@ def capture_sentry_exception(e: BaseException):
148
154
  sentry_sdk.capture_exception(e)
149
155
 
150
156
 
151
- def create_input_messages(input_messages: List[MessageCreate], agent_id: str, timezone: str, actor: User) -> List[Message]:
157
+ def create_input_messages(input_messages: List[MessageCreate], agent_id: str, timezone: str, run_id: str, actor: User) -> List[Message]:
152
158
  """
153
159
  Converts a user input message into the internal structured format.
154
160
 
@@ -156,7 +162,9 @@ def create_input_messages(input_messages: List[MessageCreate], agent_id: str, ti
156
162
  we should unify this when it's clear what message attributes we need.
157
163
  """
158
164
 
159
- messages = convert_message_creates_to_messages(input_messages, agent_id, timezone, wrap_user_message=False, wrap_system_message=False)
165
+ messages = convert_message_creates_to_messages(
166
+ input_messages, agent_id, timezone, run_id, wrap_user_message=False, wrap_system_message=False
167
+ )
160
168
  return messages
161
169
 
162
170
 
@@ -184,10 +192,13 @@ def create_approval_request_message_from_llm_response(
184
192
  reasoning_content: Optional[List[Union[TextContent, ReasoningContent, RedactedReasoningContent, OmittedReasoningContent]]] = None,
185
193
  pre_computed_assistant_message_id: Optional[str] = None,
186
194
  step_id: str | None = None,
195
+ run_id: str = None,
196
+ append_request_heartbeat: bool = True,
187
197
  ) -> Message:
188
198
  # Construct the tool call with the assistant's message
189
- # Force set request_heartbeat in tool_args to calculated continue_stepping
190
- function_arguments[REQUEST_HEARTBEAT_PARAM] = continue_stepping
199
+ # Optionally set request_heartbeat in tool args (v2 behavior only)
200
+ if append_request_heartbeat:
201
+ function_arguments[REQUEST_HEARTBEAT_PARAM] = continue_stepping
191
202
  tool_call = OpenAIToolCall(
192
203
  id=tool_call_id,
193
204
  function=OpenAIFunction(
@@ -207,6 +218,7 @@ def create_approval_request_message_from_llm_response(
207
218
  tool_call_id=tool_call_id,
208
219
  created_at=get_utc_time(),
209
220
  step_id=step_id,
221
+ run_id=run_id,
210
222
  )
211
223
  if pre_computed_assistant_message_id:
212
224
  approval_message.id = pre_computed_assistant_message_id
@@ -216,82 +228,130 @@ def create_approval_request_message_from_llm_response(
216
228
  def create_letta_messages_from_llm_response(
217
229
  agent_id: str,
218
230
  model: str,
219
- function_name: str,
220
- function_arguments: Dict,
221
- tool_execution_result: ToolExecutionResult,
222
- tool_call_id: str,
223
- function_call_success: bool,
231
+ function_name: Optional[str],
232
+ function_arguments: Optional[Dict],
233
+ tool_execution_result: Optional[ToolExecutionResult],
234
+ tool_call_id: Optional[str],
224
235
  function_response: Optional[str],
225
236
  timezone: str,
226
- actor: User,
237
+ run_id: str | None = None,
238
+ step_id: str = None,
227
239
  continue_stepping: bool = False,
228
240
  heartbeat_reason: Optional[str] = None,
229
- reasoning_content: Optional[List[Union[TextContent, ReasoningContent, RedactedReasoningContent, OmittedReasoningContent]]] = None,
241
+ reasoning_content: Optional[
242
+ List[Union[TextContent, ReasoningContent, RedactedReasoningContent, OmittedReasoningContent | SummarizedReasoningContent]]
243
+ ] = None,
230
244
  pre_computed_assistant_message_id: Optional[str] = None,
231
245
  llm_batch_item_id: Optional[str] = None,
232
- step_id: str | None = None,
233
246
  is_approval_response: bool | None = None,
247
+ # force set request_heartbeat, useful for v2 loop to ensure matching tool rules
248
+ force_set_request_heartbeat: bool = True,
249
+ add_heartbeat_on_continue: bool = True,
234
250
  ) -> List[Message]:
235
251
  messages = []
236
- if not is_approval_response:
237
- # Construct the tool call with the assistant's message
238
- # Force set request_heartbeat in tool_args to calculated continue_stepping
239
- function_arguments[REQUEST_HEARTBEAT_PARAM] = continue_stepping
240
- tool_call = OpenAIToolCall(
241
- id=tool_call_id,
242
- function=OpenAIFunction(
243
- name=function_name,
244
- arguments=json.dumps(function_arguments),
245
- ),
246
- type="function",
247
- )
248
- # TODO: Use ToolCallContent instead of tool_calls
249
- # TODO: This helps preserve ordering
250
- assistant_message = Message(
251
- role=MessageRole.assistant,
252
- content=reasoning_content if reasoning_content else [],
252
+ if not is_approval_response: # Skip approval responses (omit them)
253
+ if function_name is not None:
254
+ # Construct the tool call with the assistant's message
255
+ # Force set request_heartbeat in tool_args to calculated continue_stepping
256
+ if force_set_request_heartbeat:
257
+ function_arguments[REQUEST_HEARTBEAT_PARAM] = continue_stepping
258
+ tool_call = OpenAIToolCall(
259
+ id=tool_call_id,
260
+ function=OpenAIFunction(
261
+ name=function_name,
262
+ arguments=json.dumps(function_arguments),
263
+ ),
264
+ type="function",
265
+ )
266
+ # TODO: Use ToolCallContent instead of tool_calls
267
+ # TODO: This helps preserve ordering
268
+
269
+ # Safeguard against empty text messages
270
+ content = []
271
+ if reasoning_content:
272
+ for content_part in reasoning_content:
273
+ if isinstance(content_part, TextContent) and content_part.text == "":
274
+ continue
275
+ content.append(content_part)
276
+
277
+ assistant_message = Message(
278
+ role=MessageRole.assistant,
279
+ content=content,
280
+ agent_id=agent_id,
281
+ model=model,
282
+ tool_calls=[tool_call],
283
+ tool_call_id=tool_call_id,
284
+ created_at=get_utc_time(),
285
+ batch_item_id=llm_batch_item_id,
286
+ run_id=run_id,
287
+ )
288
+ else:
289
+ # Safeguard against empty text messages
290
+ content = []
291
+ if reasoning_content:
292
+ for content_part in reasoning_content:
293
+ if isinstance(content_part, TextContent) and content_part.text == "":
294
+ continue
295
+ content.append(content_part)
296
+
297
+ # Should only hit this if using react agents
298
+ if content and len(content) > 0:
299
+ assistant_message = Message(
300
+ role=MessageRole.assistant,
301
+ # NOTE: weird that this is called "reasoning_content" here, since it's not
302
+ content=content,
303
+ agent_id=agent_id,
304
+ model=model,
305
+ tool_calls=None,
306
+ tool_call_id=None,
307
+ created_at=get_utc_time(),
308
+ batch_item_id=llm_batch_item_id,
309
+ run_id=run_id,
310
+ )
311
+ else:
312
+ assistant_message = None
313
+
314
+ if assistant_message:
315
+ if pre_computed_assistant_message_id:
316
+ assistant_message.id = pre_computed_assistant_message_id
317
+ messages.append(assistant_message)
318
+
319
+ # TODO: Use ToolReturnContent instead of TextContent
320
+ # TODO: This helps preserve ordering
321
+ if tool_execution_result is not None:
322
+ packaged_function_response = package_function_response(tool_execution_result.success_flag, function_response, timezone)
323
+ tool_message = Message(
324
+ role=MessageRole.tool,
325
+ content=[TextContent(text=packaged_function_response)],
253
326
  agent_id=agent_id,
254
327
  model=model,
255
- tool_calls=[tool_call],
328
+ tool_calls=[],
256
329
  tool_call_id=tool_call_id,
257
330
  created_at=get_utc_time(),
331
+ name=function_name,
258
332
  batch_item_id=llm_batch_item_id,
333
+ tool_returns=[
334
+ ToolReturn(
335
+ tool_call_id=tool_call_id,
336
+ status=tool_execution_result.status,
337
+ stderr=tool_execution_result.stderr,
338
+ stdout=tool_execution_result.stdout,
339
+ func_response=packaged_function_response,
340
+ )
341
+ ],
342
+ run_id=run_id,
259
343
  )
260
- if pre_computed_assistant_message_id:
261
- assistant_message.id = pre_computed_assistant_message_id
262
- messages.append(assistant_message)
344
+ messages.append(tool_message)
263
345
 
264
- # TODO: Use ToolReturnContent instead of TextContent
265
- # TODO: This helps preserve ordering
266
- tool_message = Message(
267
- role=MessageRole.tool,
268
- content=[TextContent(text=package_function_response(function_call_success, function_response, timezone))],
269
- agent_id=agent_id,
270
- model=model,
271
- tool_calls=[],
272
- tool_call_id=tool_call_id,
273
- created_at=get_utc_time(),
274
- name=function_name,
275
- batch_item_id=llm_batch_item_id,
276
- tool_returns=[
277
- ToolReturn(
278
- status=tool_execution_result.status,
279
- stderr=tool_execution_result.stderr,
280
- stdout=tool_execution_result.stdout,
281
- # func_return=tool_execution_result.func_return,
282
- )
283
- ],
284
- )
285
- messages.append(tool_message)
286
-
287
- if continue_stepping:
346
+ if continue_stepping and add_heartbeat_on_continue:
347
+ # TODO skip this for react agents, instead we just force looping
288
348
  heartbeat_system_message = create_heartbeat_system_message(
289
349
  agent_id=agent_id,
290
350
  model=model,
291
- function_call_success=function_call_success,
292
- actor=actor,
351
+ function_call_success=(tool_execution_result.success_flag if tool_execution_result is not None else True),
293
352
  timezone=timezone,
294
353
  heartbeat_reason=heartbeat_reason,
354
+ run_id=run_id,
295
355
  )
296
356
  messages.append(heartbeat_system_message)
297
357
 
@@ -306,9 +366,9 @@ def create_heartbeat_system_message(
306
366
  model: str,
307
367
  function_call_success: bool,
308
368
  timezone: str,
309
- actor: User,
310
369
  llm_batch_item_id: Optional[str] = None,
311
370
  heartbeat_reason: Optional[str] = None,
371
+ run_id: Optional[str] = None,
312
372
  ) -> Message:
313
373
  if heartbeat_reason:
314
374
  text_content = heartbeat_reason
@@ -324,6 +384,7 @@ def create_heartbeat_system_message(
324
384
  tool_call_id=None,
325
385
  created_at=get_utc_time(),
326
386
  batch_item_id=llm_batch_item_id,
387
+ run_id=run_id,
327
388
  )
328
389
  return heartbeat_system_message
329
390
 
@@ -332,7 +393,6 @@ def create_assistant_messages_from_openai_response(
332
393
  response_text: str,
333
394
  agent_id: str,
334
395
  model: str,
335
- actor: User,
336
396
  timezone: str,
337
397
  ) -> List[Message]:
338
398
  """
@@ -348,10 +408,8 @@ def create_assistant_messages_from_openai_response(
348
408
  function_arguments={DEFAULT_MESSAGE_TOOL_KWARG: response_text}, # Avoid raw string manipulation
349
409
  tool_execution_result=ToolExecutionResult(status="success"),
350
410
  tool_call_id=tool_call_id,
351
- function_call_success=True,
352
411
  function_response=None,
353
412
  timezone=timezone,
354
- actor=actor,
355
413
  continue_stepping=False,
356
414
  )
357
415