letta-nightly 0.11.7.dev20251007104119__py3-none-any.whl → 0.11.7.dev20251008104128__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- letta/adapters/letta_llm_adapter.py +1 -0
- letta/adapters/letta_llm_request_adapter.py +0 -1
- letta/adapters/letta_llm_stream_adapter.py +7 -2
- letta/adapters/simple_llm_request_adapter.py +88 -0
- letta/adapters/simple_llm_stream_adapter.py +192 -0
- letta/agents/agent_loop.py +6 -0
- letta/agents/ephemeral_summary_agent.py +2 -1
- letta/agents/helpers.py +142 -6
- letta/agents/letta_agent.py +13 -33
- letta/agents/letta_agent_batch.py +2 -4
- letta/agents/letta_agent_v2.py +87 -77
- letta/agents/letta_agent_v3.py +899 -0
- letta/agents/voice_agent.py +2 -6
- letta/constants.py +8 -4
- letta/errors.py +40 -0
- letta/functions/function_sets/base.py +84 -4
- letta/functions/function_sets/multi_agent.py +0 -3
- letta/functions/schema_generator.py +113 -71
- letta/groups/dynamic_multi_agent.py +3 -2
- letta/groups/helpers.py +1 -2
- letta/groups/round_robin_multi_agent.py +3 -2
- letta/groups/sleeptime_multi_agent.py +3 -2
- letta/groups/sleeptime_multi_agent_v2.py +1 -1
- letta/groups/sleeptime_multi_agent_v3.py +17 -17
- letta/groups/supervisor_multi_agent.py +84 -80
- letta/helpers/converters.py +3 -0
- letta/helpers/message_helper.py +4 -0
- letta/helpers/tool_rule_solver.py +92 -5
- letta/interfaces/anthropic_streaming_interface.py +409 -0
- letta/interfaces/gemini_streaming_interface.py +296 -0
- letta/interfaces/openai_streaming_interface.py +752 -1
- letta/llm_api/anthropic_client.py +126 -16
- letta/llm_api/bedrock_client.py +4 -2
- letta/llm_api/deepseek_client.py +4 -1
- letta/llm_api/google_vertex_client.py +123 -42
- letta/llm_api/groq_client.py +4 -1
- letta/llm_api/llm_api_tools.py +11 -4
- letta/llm_api/llm_client_base.py +6 -2
- letta/llm_api/openai.py +32 -2
- letta/llm_api/openai_client.py +423 -18
- letta/llm_api/xai_client.py +4 -1
- letta/main.py +9 -5
- letta/memory.py +1 -0
- letta/orm/__init__.py +1 -1
- letta/orm/agent.py +10 -0
- letta/orm/block.py +7 -16
- letta/orm/blocks_agents.py +8 -2
- letta/orm/files_agents.py +2 -0
- letta/orm/job.py +7 -5
- letta/orm/mcp_oauth.py +1 -0
- letta/orm/message.py +21 -6
- letta/orm/organization.py +2 -0
- letta/orm/provider.py +6 -2
- letta/orm/run.py +71 -0
- letta/orm/sandbox_config.py +7 -1
- letta/orm/sqlalchemy_base.py +0 -306
- letta/orm/step.py +6 -5
- letta/orm/step_metrics.py +5 -5
- letta/otel/tracing.py +28 -3
- letta/plugins/defaults.py +4 -4
- letta/prompts/system_prompts/__init__.py +2 -0
- letta/prompts/system_prompts/letta_v1.py +25 -0
- letta/schemas/agent.py +3 -2
- letta/schemas/agent_file.py +9 -3
- letta/schemas/block.py +23 -10
- letta/schemas/enums.py +21 -2
- letta/schemas/job.py +17 -4
- letta/schemas/letta_message_content.py +71 -2
- letta/schemas/letta_stop_reason.py +5 -5
- letta/schemas/llm_config.py +53 -3
- letta/schemas/memory.py +1 -1
- letta/schemas/message.py +504 -117
- letta/schemas/openai/responses_request.py +64 -0
- letta/schemas/providers/__init__.py +2 -0
- letta/schemas/providers/anthropic.py +16 -0
- letta/schemas/providers/ollama.py +115 -33
- letta/schemas/providers/openrouter.py +52 -0
- letta/schemas/providers/vllm.py +2 -1
- letta/schemas/run.py +48 -42
- letta/schemas/step.py +2 -2
- letta/schemas/step_metrics.py +1 -1
- letta/schemas/tool.py +15 -107
- letta/schemas/tool_rule.py +88 -5
- letta/serialize_schemas/marshmallow_agent.py +1 -0
- letta/server/db.py +86 -408
- letta/server/rest_api/app.py +61 -10
- letta/server/rest_api/dependencies.py +14 -0
- letta/server/rest_api/redis_stream_manager.py +19 -8
- letta/server/rest_api/routers/v1/agents.py +364 -292
- letta/server/rest_api/routers/v1/blocks.py +14 -20
- letta/server/rest_api/routers/v1/identities.py +45 -110
- letta/server/rest_api/routers/v1/internal_templates.py +21 -0
- letta/server/rest_api/routers/v1/jobs.py +23 -6
- letta/server/rest_api/routers/v1/messages.py +1 -1
- letta/server/rest_api/routers/v1/runs.py +126 -85
- letta/server/rest_api/routers/v1/sandbox_configs.py +10 -19
- letta/server/rest_api/routers/v1/tools.py +281 -594
- letta/server/rest_api/routers/v1/voice.py +1 -1
- letta/server/rest_api/streaming_response.py +29 -29
- letta/server/rest_api/utils.py +122 -64
- letta/server/server.py +160 -887
- letta/services/agent_manager.py +236 -919
- letta/services/agent_serialization_manager.py +16 -0
- letta/services/archive_manager.py +0 -100
- letta/services/block_manager.py +211 -168
- letta/services/file_manager.py +1 -1
- letta/services/files_agents_manager.py +24 -33
- letta/services/group_manager.py +0 -142
- letta/services/helpers/agent_manager_helper.py +7 -2
- letta/services/helpers/run_manager_helper.py +85 -0
- letta/services/job_manager.py +96 -411
- letta/services/lettuce/__init__.py +6 -0
- letta/services/lettuce/lettuce_client_base.py +86 -0
- letta/services/mcp_manager.py +38 -6
- letta/services/message_manager.py +165 -362
- letta/services/organization_manager.py +0 -36
- letta/services/passage_manager.py +0 -345
- letta/services/provider_manager.py +0 -80
- letta/services/run_manager.py +301 -0
- letta/services/sandbox_config_manager.py +0 -234
- letta/services/step_manager.py +62 -39
- letta/services/summarizer/summarizer.py +9 -7
- letta/services/telemetry_manager.py +0 -16
- letta/services/tool_executor/builtin_tool_executor.py +35 -0
- letta/services/tool_executor/core_tool_executor.py +397 -2
- letta/services/tool_executor/files_tool_executor.py +3 -3
- letta/services/tool_executor/multi_agent_tool_executor.py +30 -15
- letta/services/tool_executor/tool_execution_manager.py +6 -8
- letta/services/tool_executor/tool_executor_base.py +3 -3
- letta/services/tool_manager.py +85 -339
- letta/services/tool_sandbox/base.py +24 -13
- letta/services/tool_sandbox/e2b_sandbox.py +16 -1
- letta/services/tool_schema_generator.py +123 -0
- letta/services/user_manager.py +0 -99
- letta/settings.py +20 -4
- {letta_nightly-0.11.7.dev20251007104119.dist-info → letta_nightly-0.11.7.dev20251008104128.dist-info}/METADATA +3 -5
- {letta_nightly-0.11.7.dev20251007104119.dist-info → letta_nightly-0.11.7.dev20251008104128.dist-info}/RECORD +140 -132
- letta/agents/temporal/activities/__init__.py +0 -4
- letta/agents/temporal/activities/example_activity.py +0 -7
- letta/agents/temporal/activities/prepare_messages.py +0 -10
- letta/agents/temporal/temporal_agent_workflow.py +0 -56
- letta/agents/temporal/types.py +0 -25
- {letta_nightly-0.11.7.dev20251007104119.dist-info → letta_nightly-0.11.7.dev20251008104128.dist-info}/WHEEL +0 -0
- {letta_nightly-0.11.7.dev20251007104119.dist-info → letta_nightly-0.11.7.dev20251008104128.dist-info}/entry_points.txt +0 -0
- {letta_nightly-0.11.7.dev20251007104119.dist-info → letta_nightly-0.11.7.dev20251008104128.dist-info}/licenses/LICENSE +0 -0
letta/schemas/message.py
CHANGED
@@ -11,9 +11,10 @@ from enum import Enum
|
|
11
11
|
from typing import Annotated, Any, Dict, List, Literal, Optional, Union
|
12
12
|
|
13
13
|
from openai.types.chat.chat_completion_message_tool_call import ChatCompletionMessageToolCall as OpenAIToolCall, Function as OpenAIFunction
|
14
|
+
from openai.types.responses import ResponseReasoningItem
|
14
15
|
from pydantic import BaseModel, Field, field_validator, model_validator
|
15
16
|
|
16
|
-
from letta.constants import DEFAULT_MESSAGE_TOOL, DEFAULT_MESSAGE_TOOL_KWARG, TOOL_CALL_ID_MAX_LEN
|
17
|
+
from letta.constants import DEFAULT_MESSAGE_TOOL, DEFAULT_MESSAGE_TOOL_KWARG, REQUEST_HEARTBEAT_PARAM, TOOL_CALL_ID_MAX_LEN
|
17
18
|
from letta.helpers.datetime_helpers import get_utc_time, is_utc_datetime
|
18
19
|
from letta.helpers.json_helpers import json_dumps
|
19
20
|
from letta.local_llm.constants import INNER_THOUGHTS_KWARG, INNER_THOUGHTS_KWARG_VERTEX
|
@@ -25,6 +26,7 @@ from letta.schemas.letta_message import (
|
|
25
26
|
AssistantMessage,
|
26
27
|
HiddenReasoningMessage,
|
27
28
|
LettaMessage,
|
29
|
+
MessageType,
|
28
30
|
ReasoningMessage,
|
29
31
|
SystemMessage,
|
30
32
|
ToolCall,
|
@@ -38,7 +40,9 @@ from letta.schemas.letta_message_content import (
|
|
38
40
|
OmittedReasoningContent,
|
39
41
|
ReasoningContent,
|
40
42
|
RedactedReasoningContent,
|
43
|
+
SummarizedReasoningContent,
|
41
44
|
TextContent,
|
45
|
+
ToolCallContent,
|
42
46
|
ToolReturnContent,
|
43
47
|
get_letta_message_content_union_str_json_schema,
|
44
48
|
)
|
@@ -192,6 +196,7 @@ class Message(BaseMessage):
|
|
192
196
|
tool_call_id: Optional[str] = Field(default=None, description="The ID of the tool call. Only applicable for role tool.")
|
193
197
|
# Extras
|
194
198
|
step_id: Optional[str] = Field(default=None, description="The id of the step that this message was created in.")
|
199
|
+
run_id: Optional[str] = Field(default=None, description="The id of the run that this message was created in.")
|
195
200
|
otid: Optional[str] = Field(default=None, description="The offline threading id associated with this message")
|
196
201
|
tool_returns: Optional[List[ToolReturn]] = Field(default=None, description="Tool execution return information for prior tool calls")
|
197
202
|
group_id: Optional[str] = Field(default=None, description="The multi-agent group that the message was sent in")
|
@@ -208,6 +213,13 @@ class Message(BaseMessage):
|
|
208
213
|
# This overrides the optional base orm schema, created_at MUST exist on all messages objects
|
209
214
|
created_at: datetime = Field(default_factory=get_utc_time, description="The timestamp when the object was created.")
|
210
215
|
|
216
|
+
# validate that run_id is set
|
217
|
+
# @model_validator(mode="after")
|
218
|
+
# def validate_run_id(self):
|
219
|
+
# if self.run_id is None:
|
220
|
+
# raise ValueError("Run ID is required")
|
221
|
+
# return self
|
222
|
+
|
211
223
|
@field_validator("role")
|
212
224
|
@classmethod
|
213
225
|
def validate_role(cls, v: str) -> str:
|
@@ -239,6 +251,7 @@ class Message(BaseMessage):
|
|
239
251
|
assistant_message_tool_kwarg: str = DEFAULT_MESSAGE_TOOL_KWARG,
|
240
252
|
reverse: bool = True,
|
241
253
|
include_err: Optional[bool] = None,
|
254
|
+
text_is_assistant_message: bool = False,
|
242
255
|
) -> List[LettaMessage]:
|
243
256
|
if use_assistant_message:
|
244
257
|
message_ids_to_remove = []
|
@@ -270,6 +283,7 @@ class Message(BaseMessage):
|
|
270
283
|
assistant_message_tool_kwarg=assistant_message_tool_kwarg,
|
271
284
|
reverse=reverse,
|
272
285
|
include_err=include_err,
|
286
|
+
text_is_assistant_message=text_is_assistant_message,
|
273
287
|
)
|
274
288
|
]
|
275
289
|
|
@@ -280,12 +294,15 @@ class Message(BaseMessage):
|
|
280
294
|
assistant_message_tool_kwarg: str = DEFAULT_MESSAGE_TOOL_KWARG,
|
281
295
|
reverse: bool = True,
|
282
296
|
include_err: Optional[bool] = None,
|
297
|
+
text_is_assistant_message: bool = False,
|
283
298
|
) -> List[LettaMessage]:
|
284
299
|
"""Convert message object (in DB format) to the style used by the original Letta API"""
|
300
|
+
|
285
301
|
messages = []
|
286
302
|
if self.role == MessageRole.assistant:
|
287
303
|
if self.content:
|
288
|
-
messages.extend(self._convert_reasoning_messages())
|
304
|
+
messages.extend(self._convert_reasoning_messages(text_is_assistant_message=text_is_assistant_message))
|
305
|
+
|
289
306
|
if self.tool_calls is not None:
|
290
307
|
messages.extend(
|
291
308
|
self._convert_tool_call_messages(
|
@@ -296,14 +313,14 @@ class Message(BaseMessage):
|
|
296
313
|
),
|
297
314
|
)
|
298
315
|
elif self.role == MessageRole.tool:
|
299
|
-
messages.
|
316
|
+
messages.extend(self._convert_tool_return_message())
|
300
317
|
elif self.role == MessageRole.user:
|
301
318
|
messages.append(self._convert_user_message())
|
302
319
|
elif self.role == MessageRole.system:
|
303
320
|
messages.append(self._convert_system_message())
|
304
321
|
elif self.role == MessageRole.approval:
|
305
322
|
if self.content:
|
306
|
-
messages.extend(self._convert_reasoning_messages())
|
323
|
+
messages.extend(self._convert_reasoning_messages(text_is_assistant_message=text_is_assistant_message))
|
307
324
|
if self.tool_calls is not None:
|
308
325
|
tool_calls = self._convert_tool_call_messages()
|
309
326
|
assert len(tool_calls) == 1
|
@@ -317,6 +334,7 @@ class Message(BaseMessage):
|
|
317
334
|
approve=self.approve,
|
318
335
|
approval_request_id=self.approval_request_id,
|
319
336
|
reason=self.denial_reason,
|
337
|
+
run_id=self.run_id,
|
320
338
|
)
|
321
339
|
messages.append(approval_response_message)
|
322
340
|
else:
|
@@ -324,30 +342,37 @@ class Message(BaseMessage):
|
|
324
342
|
|
325
343
|
return messages[::-1] if reverse else messages
|
326
344
|
|
327
|
-
def _convert_reasoning_messages(
|
345
|
+
def _convert_reasoning_messages(
|
346
|
+
self,
|
347
|
+
current_message_count: int = 0,
|
348
|
+
text_is_assistant_message: bool = False, # For v3 loop, set to True
|
349
|
+
) -> List[LettaMessage]:
|
328
350
|
messages = []
|
329
|
-
|
330
|
-
|
351
|
+
|
352
|
+
for content_part in self.content:
|
331
353
|
otid = Message.generate_otid_from_id(self.id, current_message_count + len(messages))
|
332
|
-
|
333
|
-
|
334
|
-
|
335
|
-
|
336
|
-
|
337
|
-
|
338
|
-
|
339
|
-
|
340
|
-
|
341
|
-
|
342
|
-
|
343
|
-
|
344
|
-
|
345
|
-
|
346
|
-
|
347
|
-
|
348
|
-
|
349
|
-
|
350
|
-
|
354
|
+
|
355
|
+
if isinstance(content_part, TextContent):
|
356
|
+
if text_is_assistant_message:
|
357
|
+
# .content is assistant message
|
358
|
+
if messages and messages[-1].message_type == MessageType.assistant_message:
|
359
|
+
messages[-1].content += content_part.text
|
360
|
+
else:
|
361
|
+
messages.append(
|
362
|
+
AssistantMessage(
|
363
|
+
id=self.id,
|
364
|
+
date=self.created_at,
|
365
|
+
content=content_part.text,
|
366
|
+
name=self.name,
|
367
|
+
otid=otid,
|
368
|
+
sender_id=self.sender_id,
|
369
|
+
step_id=self.step_id,
|
370
|
+
is_err=self.is_err,
|
371
|
+
run_id=self.run_id,
|
372
|
+
)
|
373
|
+
)
|
374
|
+
else:
|
375
|
+
# .content is COT
|
351
376
|
messages.append(
|
352
377
|
ReasoningMessage(
|
353
378
|
id=self.id,
|
@@ -358,10 +383,15 @@ class Message(BaseMessage):
|
|
358
383
|
sender_id=self.sender_id,
|
359
384
|
step_id=self.step_id,
|
360
385
|
is_err=self.is_err,
|
386
|
+
run_id=self.run_id,
|
361
387
|
)
|
362
388
|
)
|
363
|
-
|
364
|
-
|
389
|
+
|
390
|
+
elif isinstance(content_part, ReasoningContent):
|
391
|
+
# "native" COT
|
392
|
+
if messages and messages[-1].message_type == MessageType.reasoning_message:
|
393
|
+
messages[-1].reasoning += content_part.reasoning
|
394
|
+
else:
|
365
395
|
messages.append(
|
366
396
|
ReasoningMessage(
|
367
397
|
id=self.id,
|
@@ -373,41 +403,87 @@ class Message(BaseMessage):
|
|
373
403
|
otid=otid,
|
374
404
|
step_id=self.step_id,
|
375
405
|
is_err=self.is_err,
|
406
|
+
run_id=self.run_id,
|
376
407
|
)
|
377
408
|
)
|
378
|
-
|
379
|
-
|
409
|
+
|
410
|
+
elif isinstance(content_part, SummarizedReasoningContent):
|
411
|
+
# TODO remove the cast and just return the native type
|
412
|
+
casted_content_part = content_part.to_reasoning_content()
|
413
|
+
if casted_content_part is not None:
|
380
414
|
messages.append(
|
381
|
-
|
415
|
+
ReasoningMessage(
|
382
416
|
id=self.id,
|
383
417
|
date=self.created_at,
|
384
|
-
|
385
|
-
|
418
|
+
reasoning=casted_content_part.reasoning,
|
419
|
+
source="reasoner_model", # TODO do we want to tag like this?
|
420
|
+
signature=casted_content_part.signature,
|
386
421
|
name=self.name,
|
387
422
|
otid=otid,
|
388
|
-
sender_id=self.sender_id,
|
389
423
|
step_id=self.step_id,
|
390
424
|
is_err=self.is_err,
|
425
|
+
run_id=self.run_id,
|
391
426
|
)
|
392
427
|
)
|
393
|
-
|
394
|
-
|
395
|
-
|
396
|
-
|
397
|
-
|
398
|
-
|
399
|
-
|
400
|
-
|
401
|
-
|
402
|
-
|
403
|
-
|
404
|
-
|
405
|
-
|
428
|
+
|
429
|
+
elif isinstance(content_part, RedactedReasoningContent):
|
430
|
+
# "native" redacted/hidden COT
|
431
|
+
messages.append(
|
432
|
+
HiddenReasoningMessage(
|
433
|
+
id=self.id,
|
434
|
+
date=self.created_at,
|
435
|
+
state="redacted",
|
436
|
+
hidden_reasoning=content_part.data,
|
437
|
+
name=self.name,
|
438
|
+
otid=otid,
|
439
|
+
sender_id=self.sender_id,
|
440
|
+
step_id=self.step_id,
|
441
|
+
is_err=self.is_err,
|
442
|
+
run_id=self.run_id,
|
406
443
|
)
|
407
|
-
|
408
|
-
|
444
|
+
)
|
445
|
+
|
446
|
+
elif isinstance(content_part, OmittedReasoningContent):
|
447
|
+
# Special case for "hidden reasoning" models like o1/o3
|
448
|
+
# NOTE: we also have to think about how to return this during streaming
|
449
|
+
messages.append(
|
450
|
+
HiddenReasoningMessage(
|
451
|
+
id=self.id,
|
452
|
+
date=self.created_at,
|
453
|
+
state="omitted",
|
454
|
+
name=self.name,
|
455
|
+
otid=otid,
|
456
|
+
step_id=self.step_id,
|
457
|
+
is_err=self.is_err,
|
458
|
+
run_id=self.run_id,
|
459
|
+
)
|
460
|
+
)
|
461
|
+
|
462
|
+
else:
|
463
|
+
warnings.warn(f"Unrecognized content part in assistant message: {content_part}")
|
464
|
+
|
409
465
|
return messages
|
410
466
|
|
467
|
+
def _convert_assistant_message(
|
468
|
+
self,
|
469
|
+
) -> AssistantMessage:
|
470
|
+
if self.content and len(self.content) == 1 and isinstance(self.content[0], TextContent):
|
471
|
+
text_content = self.content[0].text
|
472
|
+
else:
|
473
|
+
raise ValueError(f"Invalid assistant message (no text object on message): {self.content}")
|
474
|
+
|
475
|
+
return AssistantMessage(
|
476
|
+
id=self.id,
|
477
|
+
date=self.created_at,
|
478
|
+
content=text_content,
|
479
|
+
name=self.name,
|
480
|
+
otid=self.otid,
|
481
|
+
sender_id=self.sender_id,
|
482
|
+
step_id=self.step_id,
|
483
|
+
# is_err=self.is_err,
|
484
|
+
run_id=self.run_id,
|
485
|
+
)
|
486
|
+
|
411
487
|
def _convert_tool_call_messages(
|
412
488
|
self,
|
413
489
|
current_message_count: int = 0,
|
@@ -438,6 +514,7 @@ class Message(BaseMessage):
|
|
438
514
|
sender_id=self.sender_id,
|
439
515
|
step_id=self.step_id,
|
440
516
|
is_err=self.is_err,
|
517
|
+
run_id=self.run_id,
|
441
518
|
)
|
442
519
|
)
|
443
520
|
else:
|
@@ -455,49 +532,135 @@ class Message(BaseMessage):
|
|
455
532
|
sender_id=self.sender_id,
|
456
533
|
step_id=self.step_id,
|
457
534
|
is_err=self.is_err,
|
535
|
+
run_id=self.run_id,
|
458
536
|
)
|
459
537
|
)
|
460
538
|
return messages
|
461
539
|
|
462
|
-
def _convert_tool_return_message(self) -> ToolReturnMessage:
|
463
|
-
"""Convert tool role message to ToolReturnMessage
|
540
|
+
def _convert_tool_return_message(self) -> List[ToolReturnMessage]:
|
541
|
+
"""Convert tool role message to ToolReturnMessage.
|
464
542
|
|
465
|
-
|
543
|
+
The tool return is packaged as follows:
|
466
544
|
packaged_message = {
|
467
545
|
"status": "OK" if was_success else "Failed",
|
468
546
|
"message": response_string,
|
469
547
|
"time": formatted_time,
|
470
548
|
}
|
549
|
+
|
550
|
+
Returns:
|
551
|
+
List[ToolReturnMessage]: Converted tool return messages
|
552
|
+
|
553
|
+
Raises:
|
554
|
+
ValueError: If message role is not 'tool', parsing fails, or no valid content exists
|
471
555
|
"""
|
472
|
-
if self.
|
473
|
-
|
474
|
-
|
475
|
-
|
556
|
+
if self.role != MessageRole.tool:
|
557
|
+
raise ValueError(f"Cannot convert message of type {self.role} to ToolReturnMessage")
|
558
|
+
|
559
|
+
if self.tool_returns:
|
560
|
+
return self._convert_explicit_tool_returns()
|
476
561
|
|
562
|
+
return self._convert_legacy_tool_return()
|
563
|
+
|
564
|
+
def _convert_explicit_tool_returns(self) -> List[ToolReturnMessage]:
|
565
|
+
"""Convert explicit tool returns to ToolReturnMessage list."""
|
566
|
+
tool_returns = []
|
567
|
+
|
568
|
+
for index, tool_return in enumerate(self.tool_returns):
|
569
|
+
parsed_data = self._parse_tool_response(tool_return.func_response)
|
570
|
+
|
571
|
+
tool_returns.append(
|
572
|
+
self._create_tool_return_message(
|
573
|
+
message_text=parsed_data["message"],
|
574
|
+
status=parsed_data["status"],
|
575
|
+
tool_call_id=tool_return.tool_call_id,
|
576
|
+
stdout=tool_return.stdout,
|
577
|
+
stderr=tool_return.stderr,
|
578
|
+
otid_index=index,
|
579
|
+
)
|
580
|
+
)
|
581
|
+
|
582
|
+
return tool_returns
|
583
|
+
|
584
|
+
def _convert_legacy_tool_return(self) -> List[ToolReturnMessage]:
|
585
|
+
"""Convert legacy single text content to ToolReturnMessage."""
|
586
|
+
if not self._has_single_text_content():
|
587
|
+
raise ValueError(f"No valid tool returns to convert: {self}")
|
588
|
+
|
589
|
+
text_content = self.content[0].text
|
590
|
+
parsed_data = self._parse_tool_response(text_content)
|
591
|
+
|
592
|
+
return [
|
593
|
+
self._create_tool_return_message(
|
594
|
+
message_text=parsed_data["message"],
|
595
|
+
status=parsed_data["status"],
|
596
|
+
tool_call_id=self.tool_call_id,
|
597
|
+
stdout=None,
|
598
|
+
stderr=None,
|
599
|
+
otid_index=0,
|
600
|
+
)
|
601
|
+
]
|
602
|
+
|
603
|
+
def _has_single_text_content(self) -> bool:
|
604
|
+
"""Check if message has exactly one text content item."""
|
605
|
+
return self.content and len(self.content) == 1 and isinstance(self.content[0], TextContent)
|
606
|
+
|
607
|
+
def _parse_tool_response(self, response_text: str) -> dict:
|
608
|
+
"""Parse tool response JSON and extract message and status.
|
609
|
+
|
610
|
+
Args:
|
611
|
+
response_text: Raw JSON response text
|
612
|
+
|
613
|
+
Returns:
|
614
|
+
Dictionary with 'message' and 'status' keys
|
615
|
+
|
616
|
+
Raises:
|
617
|
+
ValueError: If JSON parsing fails
|
618
|
+
"""
|
477
619
|
try:
|
478
|
-
function_return = parse_json(
|
479
|
-
|
480
|
-
|
481
|
-
|
482
|
-
|
620
|
+
function_return = parse_json(response_text)
|
621
|
+
return {
|
622
|
+
"message": str(function_return.get("message", response_text)),
|
623
|
+
"status": self._parse_tool_status(function_return.get("status", "OK")),
|
624
|
+
}
|
625
|
+
except json.JSONDecodeError as e:
|
626
|
+
raise ValueError(f"Failed to decode function return: {response_text}") from e
|
483
627
|
|
484
|
-
|
485
|
-
|
486
|
-
|
628
|
+
def _create_tool_return_message(
|
629
|
+
self,
|
630
|
+
message_text: str,
|
631
|
+
status: str,
|
632
|
+
tool_call_id: Optional[str],
|
633
|
+
stdout: Optional[str],
|
634
|
+
stderr: Optional[str],
|
635
|
+
otid_index: int,
|
636
|
+
) -> ToolReturnMessage:
|
637
|
+
"""Create a ToolReturnMessage with common attributes.
|
487
638
|
|
639
|
+
Args:
|
640
|
+
message_text: The tool return message text
|
641
|
+
status: Tool execution status
|
642
|
+
tool_call_id: Optional tool call identifier
|
643
|
+
stdout: Optional standard output
|
644
|
+
stderr: Optional standard error
|
645
|
+
otid_index: Index for OTID generation
|
646
|
+
|
647
|
+
Returns:
|
648
|
+
Configured ToolReturnMessage instance
|
649
|
+
"""
|
488
650
|
return ToolReturnMessage(
|
489
651
|
id=self.id,
|
490
652
|
date=self.created_at,
|
491
653
|
tool_return=message_text,
|
492
|
-
status=
|
493
|
-
tool_call_id=
|
494
|
-
stdout=
|
495
|
-
stderr=
|
654
|
+
status=status,
|
655
|
+
tool_call_id=tool_call_id,
|
656
|
+
stdout=stdout,
|
657
|
+
stderr=stderr,
|
496
658
|
name=self.name,
|
497
|
-
otid=Message.generate_otid_from_id(self.id,
|
659
|
+
otid=Message.generate_otid_from_id(self.id, otid_index),
|
498
660
|
sender_id=self.sender_id,
|
499
661
|
step_id=self.step_id,
|
500
662
|
is_err=self.is_err,
|
663
|
+
run_id=self.run_id,
|
501
664
|
)
|
502
665
|
|
503
666
|
@staticmethod
|
@@ -531,6 +694,7 @@ class Message(BaseMessage):
|
|
531
694
|
sender_id=self.sender_id,
|
532
695
|
step_id=self.step_id,
|
533
696
|
is_err=self.is_err,
|
697
|
+
run_id=self.run_id,
|
534
698
|
)
|
535
699
|
|
536
700
|
def _convert_system_message(self) -> SystemMessage:
|
@@ -548,6 +712,7 @@ class Message(BaseMessage):
|
|
548
712
|
otid=self.otid,
|
549
713
|
sender_id=self.sender_id,
|
550
714
|
step_id=self.step_id,
|
715
|
+
run_id=self.run_id,
|
551
716
|
)
|
552
717
|
|
553
718
|
@staticmethod
|
@@ -561,6 +726,7 @@ class Message(BaseMessage):
|
|
561
726
|
name: Optional[str] = None,
|
562
727
|
group_id: Optional[str] = None,
|
563
728
|
tool_returns: Optional[List[ToolReturn]] = None,
|
729
|
+
run_id: Optional[str] = None,
|
564
730
|
) -> Message:
|
565
731
|
"""Convert a ChatCompletion message object into a Message object (synced to DB)"""
|
566
732
|
if not created_at:
|
@@ -622,6 +788,7 @@ class Message(BaseMessage):
|
|
622
788
|
id=str(id),
|
623
789
|
tool_returns=tool_returns,
|
624
790
|
group_id=group_id,
|
791
|
+
run_id=run_id,
|
625
792
|
)
|
626
793
|
else:
|
627
794
|
return Message(
|
@@ -636,6 +803,7 @@ class Message(BaseMessage):
|
|
636
803
|
created_at=created_at,
|
637
804
|
tool_returns=tool_returns,
|
638
805
|
group_id=group_id,
|
806
|
+
run_id=run_id,
|
639
807
|
)
|
640
808
|
|
641
809
|
elif "function_call" in openai_message_dict and openai_message_dict["function_call"] is not None:
|
@@ -671,6 +839,7 @@ class Message(BaseMessage):
|
|
671
839
|
id=str(id),
|
672
840
|
tool_returns=tool_returns,
|
673
841
|
group_id=group_id,
|
842
|
+
run_id=run_id,
|
674
843
|
)
|
675
844
|
else:
|
676
845
|
return Message(
|
@@ -685,6 +854,7 @@ class Message(BaseMessage):
|
|
685
854
|
created_at=created_at,
|
686
855
|
tool_returns=tool_returns,
|
687
856
|
group_id=group_id,
|
857
|
+
run_id=run_id,
|
688
858
|
)
|
689
859
|
|
690
860
|
else:
|
@@ -720,6 +890,7 @@ class Message(BaseMessage):
|
|
720
890
|
id=str(id),
|
721
891
|
tool_returns=tool_returns,
|
722
892
|
group_id=group_id,
|
893
|
+
run_id=run_id,
|
723
894
|
)
|
724
895
|
else:
|
725
896
|
return Message(
|
@@ -734,6 +905,7 @@ class Message(BaseMessage):
|
|
734
905
|
created_at=created_at,
|
735
906
|
tool_returns=tool_returns,
|
736
907
|
group_id=group_id,
|
908
|
+
run_id=run_id,
|
737
909
|
)
|
738
910
|
|
739
911
|
def to_openai_dict_search_results(self, max_tool_id_length: int = TOOL_CALL_ID_MAX_LEN) -> dict:
|
@@ -746,8 +918,13 @@ class Message(BaseMessage):
|
|
746
918
|
max_tool_id_length: int = TOOL_CALL_ID_MAX_LEN,
|
747
919
|
put_inner_thoughts_in_kwargs: bool = False,
|
748
920
|
use_developer_message: bool = False,
|
921
|
+
# if true, then treat the content field as AssistantMessage
|
922
|
+
native_content: bool = False,
|
923
|
+
strip_request_heartbeat: bool = False,
|
749
924
|
) -> dict | None:
|
750
925
|
"""Go from Message class to ChatCompletion message object"""
|
926
|
+
assert not (native_content and put_inner_thoughts_in_kwargs), "native_content and put_inner_thoughts_in_kwargs cannot both be true"
|
927
|
+
|
751
928
|
if self.role == "approval" and self.tool_calls is None:
|
752
929
|
return None
|
753
930
|
|
@@ -763,8 +940,8 @@ class Message(BaseMessage):
|
|
763
940
|
# Otherwise, check if we have TextContent and multiple other parts
|
764
941
|
elif self.content and len(self.content) > 1:
|
765
942
|
text = [content for content in self.content if isinstance(content, TextContent)]
|
766
|
-
assert len(text) == 1, f"multiple text content parts found in a single message: {self.content}"
|
767
|
-
text_content =
|
943
|
+
# assert len(text) == 1, f"multiple text content parts found in a single message: {self.content}"
|
944
|
+
text_content = "\n\n".join([t.text for t in text])
|
768
945
|
parse_content_parts = True
|
769
946
|
else:
|
770
947
|
text_content = None
|
@@ -788,11 +965,22 @@ class Message(BaseMessage):
|
|
788
965
|
}
|
789
966
|
|
790
967
|
elif self.role == "assistant" or self.role == "approval":
|
791
|
-
assert self.tool_calls is not None or text_content is not None
|
792
|
-
|
793
|
-
|
794
|
-
|
795
|
-
|
968
|
+
assert self.tool_calls is not None or text_content is not None, vars(self)
|
969
|
+
|
970
|
+
# if native content, then put it directly inside the content
|
971
|
+
if native_content:
|
972
|
+
openai_message = {
|
973
|
+
# TODO support listed content (if it's possible for role assistant?)
|
974
|
+
# "content": self.content,
|
975
|
+
"content": text_content, # here content is not reasoning, it's assistant message
|
976
|
+
"role": "assistant",
|
977
|
+
}
|
978
|
+
# otherwise, if inner_thoughts_in_kwargs, hold it for the tool calls
|
979
|
+
else:
|
980
|
+
openai_message = {
|
981
|
+
"content": None if (put_inner_thoughts_in_kwargs and self.tool_calls is not None) else text_content,
|
982
|
+
"role": "assistant",
|
983
|
+
}
|
796
984
|
|
797
985
|
if self.tool_calls is not None:
|
798
986
|
if put_inner_thoughts_in_kwargs:
|
@@ -807,6 +995,11 @@ class Message(BaseMessage):
|
|
807
995
|
]
|
808
996
|
else:
|
809
997
|
openai_message["tool_calls"] = [tool_call.model_dump() for tool_call in self.tool_calls]
|
998
|
+
|
999
|
+
if strip_request_heartbeat:
|
1000
|
+
for tool_call_dict in openai_message["tool_calls"]:
|
1001
|
+
tool_call_dict.pop(REQUEST_HEARTBEAT_PARAM, None)
|
1002
|
+
|
810
1003
|
if max_tool_id_length:
|
811
1004
|
for tool_call_dict in openai_message["tool_calls"]:
|
812
1005
|
tool_call_dict["id"] = tool_call_dict["id"][:max_tool_id_length]
|
@@ -858,10 +1051,116 @@ class Message(BaseMessage):
|
|
858
1051
|
result = [m for m in result if m is not None]
|
859
1052
|
return result
|
860
1053
|
|
1054
|
+
def to_openai_responses_dicts(
|
1055
|
+
self,
|
1056
|
+
max_tool_id_length: int = TOOL_CALL_ID_MAX_LEN,
|
1057
|
+
) -> List[dict]:
|
1058
|
+
"""Go from Message class to ChatCompletion message object"""
|
1059
|
+
|
1060
|
+
if self.role == "approval" and self.tool_calls is None:
|
1061
|
+
return []
|
1062
|
+
|
1063
|
+
message_dicts = []
|
1064
|
+
|
1065
|
+
if self.role == "system":
|
1066
|
+
assert len(self.content) == 1 and isinstance(self.content[0], TextContent), vars(self)
|
1067
|
+
message_dicts.append(
|
1068
|
+
{
|
1069
|
+
"role": "developer",
|
1070
|
+
"content": self.content[0].text,
|
1071
|
+
}
|
1072
|
+
)
|
1073
|
+
|
1074
|
+
elif self.role == "user":
|
1075
|
+
# TODO do we need to do a swap to placeholder text here for images?
|
1076
|
+
assert all([isinstance(c, TextContent) or isinstance(c, ImageContent) for c in self.content]), vars(self)
|
1077
|
+
|
1078
|
+
user_dict = {
|
1079
|
+
"role": self.role.value if hasattr(self.role, "value") else self.role,
|
1080
|
+
# TODO support multi-modal
|
1081
|
+
"content": self.content[0].text,
|
1082
|
+
}
|
1083
|
+
|
1084
|
+
# Optional field, do not include if null or invalid
|
1085
|
+
if self.name is not None:
|
1086
|
+
if bool(re.match(r"^[^\s<|\\/>]+$", self.name)):
|
1087
|
+
user_dict["name"] = self.name
|
1088
|
+
else:
|
1089
|
+
warnings.warn(f"Using OpenAI with invalid 'name' field (name={self.name} role={self.role}).")
|
1090
|
+
|
1091
|
+
message_dicts.append(user_dict)
|
1092
|
+
|
1093
|
+
elif self.role == "assistant" or self.role == "approval":
|
1094
|
+
assert self.tool_calls is not None or (self.content is not None and len(self.content) > 0)
|
1095
|
+
|
1096
|
+
# A few things may be in here, firstly reasoning content, secondly assistant messages, thirdly tool calls
|
1097
|
+
# TODO check if OpenAI Responses is capable of R->A->T like Anthropic?
|
1098
|
+
|
1099
|
+
if self.content is not None:
|
1100
|
+
for content_part in self.content:
|
1101
|
+
if isinstance(content_part, SummarizedReasoningContent):
|
1102
|
+
message_dicts.append(
|
1103
|
+
{
|
1104
|
+
"type": "reasoning",
|
1105
|
+
"id": content_part.id,
|
1106
|
+
"summary": [{"type": "summary_text", "text": s.text} for s in content_part.summary],
|
1107
|
+
"encrypted_content": content_part.encrypted_content,
|
1108
|
+
}
|
1109
|
+
)
|
1110
|
+
elif isinstance(content_part, TextContent):
|
1111
|
+
message_dicts.append(
|
1112
|
+
{
|
1113
|
+
"role": "assistant",
|
1114
|
+
"content": content_part.text,
|
1115
|
+
}
|
1116
|
+
)
|
1117
|
+
# else skip
|
1118
|
+
|
1119
|
+
if self.tool_calls is not None:
|
1120
|
+
for tool_call in self.tool_calls:
|
1121
|
+
message_dicts.append(
|
1122
|
+
{
|
1123
|
+
"type": "function_call",
|
1124
|
+
"call_id": tool_call.id[:max_tool_id_length] if max_tool_id_length else tool_call.id,
|
1125
|
+
"name": tool_call.function.name,
|
1126
|
+
"arguments": tool_call.function.arguments,
|
1127
|
+
"status": "completed", # TODO check if needed?
|
1128
|
+
}
|
1129
|
+
)
|
1130
|
+
|
1131
|
+
elif self.role == "tool":
|
1132
|
+
assert self.tool_call_id is not None, vars(self)
|
1133
|
+
assert len(self.content) == 1 and isinstance(self.content[0], TextContent), vars(self)
|
1134
|
+
message_dicts.append(
|
1135
|
+
{
|
1136
|
+
"type": "function_call_output",
|
1137
|
+
"call_id": self.tool_call_id[:max_tool_id_length] if max_tool_id_length else self.tool_call_id,
|
1138
|
+
"output": self.content[0].text,
|
1139
|
+
}
|
1140
|
+
)
|
1141
|
+
|
1142
|
+
else:
|
1143
|
+
raise ValueError(self.role)
|
1144
|
+
|
1145
|
+
return message_dicts
|
1146
|
+
|
1147
|
+
@staticmethod
|
1148
|
+
def to_openai_responses_dicts_from_list(
|
1149
|
+
messages: List[Message],
|
1150
|
+
max_tool_id_length: int = TOOL_CALL_ID_MAX_LEN,
|
1151
|
+
) -> List[dict]:
|
1152
|
+
result = []
|
1153
|
+
for message in messages:
|
1154
|
+
result.extend(message.to_openai_responses_dicts(max_tool_id_length=max_tool_id_length))
|
1155
|
+
return result
|
1156
|
+
|
861
1157
|
def to_anthropic_dict(
|
862
1158
|
self,
|
863
1159
|
inner_thoughts_xml_tag="thinking",
|
864
1160
|
put_inner_thoughts_in_kwargs: bool = False,
|
1161
|
+
# if true, then treat the content field as AssistantMessage
|
1162
|
+
native_content: bool = False,
|
1163
|
+
strip_request_heartbeat: bool = False,
|
865
1164
|
) -> dict | None:
|
866
1165
|
"""
|
867
1166
|
Convert to an Anthropic message dictionary
|
@@ -869,6 +1168,8 @@ class Message(BaseMessage):
|
|
869
1168
|
Args:
|
870
1169
|
inner_thoughts_xml_tag (str): The XML tag to wrap around inner thoughts
|
871
1170
|
"""
|
1171
|
+
assert not (native_content and put_inner_thoughts_in_kwargs), "native_content and put_inner_thoughts_in_kwargs cannot both be true"
|
1172
|
+
|
872
1173
|
if self.role == "approval" and self.tool_calls is None:
|
873
1174
|
return None
|
874
1175
|
|
@@ -929,43 +1230,76 @@ class Message(BaseMessage):
|
|
929
1230
|
}
|
930
1231
|
|
931
1232
|
elif self.role == "assistant" or self.role == "approval":
|
932
|
-
assert self.tool_calls is not None or text_content is not None
|
1233
|
+
# assert self.tool_calls is not None or text_content is not None, vars(self)
|
1234
|
+
assert self.tool_calls is not None or len(self.content) > 0
|
933
1235
|
anthropic_message = {
|
934
1236
|
"role": "assistant",
|
935
1237
|
}
|
936
1238
|
content = []
|
937
|
-
|
938
|
-
|
939
|
-
|
940
|
-
|
941
|
-
|
942
|
-
|
943
|
-
|
944
|
-
|
945
|
-
|
946
|
-
|
947
|
-
|
948
|
-
|
949
|
-
|
950
|
-
|
951
|
-
|
952
|
-
|
953
|
-
|
954
|
-
|
955
|
-
|
956
|
-
|
957
|
-
|
958
|
-
|
959
|
-
|
960
|
-
|
961
|
-
|
962
|
-
|
963
|
-
|
964
|
-
|
965
|
-
|
966
|
-
|
967
|
-
|
968
|
-
|
1239
|
+
if native_content:
|
1240
|
+
# No special handling for TextContent
|
1241
|
+
if self.content is not None:
|
1242
|
+
for content_part in self.content:
|
1243
|
+
# TextContent, ImageContent, ToolCallContent, ToolReturnContent, ReasoningContent, RedactedReasoningContent, OmittedReasoningContent
|
1244
|
+
if isinstance(content_part, ReasoningContent):
|
1245
|
+
content.append(
|
1246
|
+
{
|
1247
|
+
"type": "thinking",
|
1248
|
+
"thinking": content_part.reasoning,
|
1249
|
+
"signature": content_part.signature,
|
1250
|
+
}
|
1251
|
+
)
|
1252
|
+
elif isinstance(content_part, RedactedReasoningContent):
|
1253
|
+
content.append(
|
1254
|
+
{
|
1255
|
+
"type": "redacted_thinking",
|
1256
|
+
"data": content_part.data,
|
1257
|
+
}
|
1258
|
+
)
|
1259
|
+
elif isinstance(content_part, TextContent):
|
1260
|
+
content.append(
|
1261
|
+
{
|
1262
|
+
"type": "text",
|
1263
|
+
"text": content_part.text,
|
1264
|
+
}
|
1265
|
+
)
|
1266
|
+
else:
|
1267
|
+
# Skip unsupported types eg OmmitedReasoningContent
|
1268
|
+
pass
|
1269
|
+
|
1270
|
+
else:
|
1271
|
+
# COT / reasoning / thinking
|
1272
|
+
if self.content is not None and len(self.content) >= 1:
|
1273
|
+
for content_part in self.content:
|
1274
|
+
if isinstance(content_part, ReasoningContent):
|
1275
|
+
content.append(
|
1276
|
+
{
|
1277
|
+
"type": "thinking",
|
1278
|
+
"thinking": content_part.reasoning,
|
1279
|
+
"signature": content_part.signature,
|
1280
|
+
}
|
1281
|
+
)
|
1282
|
+
if isinstance(content_part, RedactedReasoningContent):
|
1283
|
+
content.append(
|
1284
|
+
{
|
1285
|
+
"type": "redacted_thinking",
|
1286
|
+
"data": content_part.data,
|
1287
|
+
}
|
1288
|
+
)
|
1289
|
+
if isinstance(content_part, TextContent):
|
1290
|
+
content.append(
|
1291
|
+
{
|
1292
|
+
"type": "text",
|
1293
|
+
"text": content_part.text,
|
1294
|
+
}
|
1295
|
+
)
|
1296
|
+
elif text_content is not None:
|
1297
|
+
content.append(
|
1298
|
+
{
|
1299
|
+
"type": "text",
|
1300
|
+
"text": add_xml_tag(string=text_content, xml_tag=inner_thoughts_xml_tag),
|
1301
|
+
}
|
1302
|
+
)
|
969
1303
|
# Tool calling
|
970
1304
|
if self.tool_calls is not None:
|
971
1305
|
for tool_call in self.tool_calls:
|
@@ -978,6 +1312,9 @@ class Message(BaseMessage):
|
|
978
1312
|
else:
|
979
1313
|
tool_call_input = parse_json(tool_call.function.arguments)
|
980
1314
|
|
1315
|
+
if strip_request_heartbeat:
|
1316
|
+
tool_call_input.pop(REQUEST_HEARTBEAT_PARAM, None)
|
1317
|
+
|
981
1318
|
content.append(
|
982
1319
|
{
|
983
1320
|
"type": "tool_use",
|
@@ -987,8 +1324,6 @@ class Message(BaseMessage):
|
|
987
1324
|
}
|
988
1325
|
)
|
989
1326
|
|
990
|
-
# If the only content was text, unpack it back into a singleton
|
991
|
-
# TODO support multi-modal
|
992
1327
|
anthropic_message["content"] = content
|
993
1328
|
|
994
1329
|
elif self.role == "tool":
|
@@ -1016,21 +1351,34 @@ class Message(BaseMessage):
|
|
1016
1351
|
messages: List[Message],
|
1017
1352
|
inner_thoughts_xml_tag: str = "thinking",
|
1018
1353
|
put_inner_thoughts_in_kwargs: bool = False,
|
1354
|
+
# if true, then treat the content field as AssistantMessage
|
1355
|
+
native_content: bool = False,
|
1356
|
+
strip_request_heartbeat: bool = False,
|
1019
1357
|
) -> List[dict]:
|
1020
1358
|
result = [
|
1021
1359
|
m.to_anthropic_dict(
|
1022
1360
|
inner_thoughts_xml_tag=inner_thoughts_xml_tag,
|
1023
1361
|
put_inner_thoughts_in_kwargs=put_inner_thoughts_in_kwargs,
|
1362
|
+
native_content=native_content,
|
1363
|
+
strip_request_heartbeat=strip_request_heartbeat,
|
1024
1364
|
)
|
1025
1365
|
for m in messages
|
1026
1366
|
]
|
1027
1367
|
result = [m for m in result if m is not None]
|
1028
1368
|
return result
|
1029
1369
|
|
1030
|
-
def to_google_dict(
|
1370
|
+
def to_google_dict(
|
1371
|
+
self,
|
1372
|
+
put_inner_thoughts_in_kwargs: bool = True,
|
1373
|
+
# if true, then treat the content field as AssistantMessage
|
1374
|
+
native_content: bool = False,
|
1375
|
+
strip_request_heartbeat: bool = False,
|
1376
|
+
) -> dict | None:
|
1031
1377
|
"""
|
1032
1378
|
Go from Message class to Google AI REST message object
|
1033
1379
|
"""
|
1380
|
+
assert not (native_content and put_inner_thoughts_in_kwargs), "native_content and put_inner_thoughts_in_kwargs cannot both be true"
|
1381
|
+
|
1034
1382
|
if self.role == "approval" and self.tool_calls is None:
|
1035
1383
|
return None
|
1036
1384
|
|
@@ -1080,7 +1428,7 @@ class Message(BaseMessage):
|
|
1080
1428
|
}
|
1081
1429
|
|
1082
1430
|
elif self.role == "assistant" or self.role == "approval":
|
1083
|
-
assert self.tool_calls is not None or text_content is not None
|
1431
|
+
assert self.tool_calls is not None or text_content is not None or len(self.content) > 1
|
1084
1432
|
google_ai_message = {
|
1085
1433
|
"role": "model", # NOTE: different
|
1086
1434
|
}
|
@@ -1088,7 +1436,12 @@ class Message(BaseMessage):
|
|
1088
1436
|
# NOTE: Google AI API doesn't allow non-null content + function call
|
1089
1437
|
# To get around this, just two a two part message, inner thoughts first then
|
1090
1438
|
parts = []
|
1091
|
-
|
1439
|
+
|
1440
|
+
if native_content and text_content is not None:
|
1441
|
+
# TODO support multi-part assistant content
|
1442
|
+
parts.append({"text": text_content})
|
1443
|
+
|
1444
|
+
elif not put_inner_thoughts_in_kwargs and text_content is not None:
|
1092
1445
|
# NOTE: ideally we do multi-part for CoT / inner thoughts + function call, but Google AI API doesn't allow it
|
1093
1446
|
raise NotImplementedError
|
1094
1447
|
parts.append({"text": text_content})
|
@@ -1110,6 +1463,9 @@ class Message(BaseMessage):
|
|
1110
1463
|
assert len(self.tool_calls) == 1
|
1111
1464
|
function_args[INNER_THOUGHTS_KWARG_VERTEX] = text_content
|
1112
1465
|
|
1466
|
+
if strip_request_heartbeat:
|
1467
|
+
function_args.pop(REQUEST_HEARTBEAT_PARAM, None)
|
1468
|
+
|
1113
1469
|
parts.append(
|
1114
1470
|
{
|
1115
1471
|
"functionCall": {
|
@@ -1119,8 +1475,36 @@ class Message(BaseMessage):
|
|
1119
1475
|
}
|
1120
1476
|
)
|
1121
1477
|
else:
|
1122
|
-
|
1123
|
-
|
1478
|
+
if not native_content:
|
1479
|
+
assert text_content is not None
|
1480
|
+
parts.append({"text": text_content})
|
1481
|
+
|
1482
|
+
if self.content and len(self.content) > 1:
|
1483
|
+
native_google_content_parts = []
|
1484
|
+
for content in self.content:
|
1485
|
+
if isinstance(content, TextContent):
|
1486
|
+
native_part = {"text": content.text}
|
1487
|
+
if content.signature:
|
1488
|
+
native_part["thought_signature"] = content.signature
|
1489
|
+
native_google_content_parts.append(native_part)
|
1490
|
+
elif isinstance(content, ReasoningContent):
|
1491
|
+
native_google_content_parts.append({"text": content.reasoning, "thought": True})
|
1492
|
+
elif isinstance(content, ToolCallContent):
|
1493
|
+
native_part = {
|
1494
|
+
"function_call": {
|
1495
|
+
"name": content.name,
|
1496
|
+
"args": content.input,
|
1497
|
+
},
|
1498
|
+
}
|
1499
|
+
if content.signature:
|
1500
|
+
native_part["thought_signature"] = content.signature
|
1501
|
+
native_google_content_parts.append(native_part)
|
1502
|
+
else:
|
1503
|
+
# silently drop other content types
|
1504
|
+
pass
|
1505
|
+
if native_google_content_parts:
|
1506
|
+
parts = native_google_content_parts
|
1507
|
+
|
1124
1508
|
google_ai_message["parts"] = parts
|
1125
1509
|
|
1126
1510
|
elif self.role == "tool":
|
@@ -1171,10 +1555,12 @@ class Message(BaseMessage):
|
|
1171
1555
|
def to_google_dicts_from_list(
|
1172
1556
|
messages: List[Message],
|
1173
1557
|
put_inner_thoughts_in_kwargs: bool = True,
|
1558
|
+
native_content: bool = False,
|
1174
1559
|
):
|
1175
1560
|
result = [
|
1176
1561
|
m.to_google_dict(
|
1177
1562
|
put_inner_thoughts_in_kwargs=put_inner_thoughts_in_kwargs,
|
1563
|
+
native_content=native_content,
|
1178
1564
|
)
|
1179
1565
|
for m in messages
|
1180
1566
|
]
|
@@ -1200,10 +1586,11 @@ class Message(BaseMessage):
|
|
1200
1586
|
|
1201
1587
|
|
1202
1588
|
class ToolReturn(BaseModel):
|
1589
|
+
tool_call_id: Optional[Any] = Field(None, description="The ID for the tool call")
|
1203
1590
|
status: Literal["success", "error"] = Field(..., description="The status of the tool call")
|
1204
1591
|
stdout: Optional[List[str]] = Field(default=None, description="Captured stdout (e.g. prints, logs) from the tool invocation")
|
1205
1592
|
stderr: Optional[List[str]] = Field(default=None, description="Captured stderr from the tool invocation")
|
1206
|
-
|
1593
|
+
func_response: Optional[str] = Field(None, description="The function response string")
|
1207
1594
|
|
1208
1595
|
|
1209
1596
|
class MessageSearchRequest(BaseModel):
|