lite-agent 0.9.0__py3-none-any.whl → 0.10.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of lite-agent might be problematic. Click here for more details.
- lite_agent/agent.py +17 -314
- lite_agent/chat_display.py +10 -4
- lite_agent/client.py +15 -8
- lite_agent/runner.py +56 -101
- lite_agent/types/__init__.py +1 -1
- lite_agent/types/messages.py +2 -1
- lite_agent/utils/advanced_message_builder.py +201 -0
- lite_agent/utils/message_builder.py +1 -1
- lite_agent/utils/message_converter.py +232 -0
- lite_agent/utils/message_state_manager.py +152 -0
- {lite_agent-0.9.0.dist-info → lite_agent-0.10.0.dist-info}/METADATA +1 -1
- {lite_agent-0.9.0.dist-info → lite_agent-0.10.0.dist-info}/RECORD +13 -10
- {lite_agent-0.9.0.dist-info → lite_agent-0.10.0.dist-info}/WHEEL +0 -0
lite_agent/runner.py
CHANGED
|
@@ -1,5 +1,4 @@
|
|
|
1
1
|
import json
|
|
2
|
-
import warnings
|
|
3
2
|
from collections.abc import AsyncGenerator, Sequence
|
|
4
3
|
from datetime import datetime, timedelta, timezone
|
|
5
4
|
from os import PathLike
|
|
@@ -28,8 +27,9 @@ from lite_agent.types import (
|
|
|
28
27
|
UserInput,
|
|
29
28
|
UserTextContent,
|
|
30
29
|
)
|
|
31
|
-
from lite_agent.types.events import AssistantMessageEvent, FunctionCallOutputEvent
|
|
30
|
+
from lite_agent.types.events import AssistantMessageEvent, FunctionCallOutputEvent
|
|
32
31
|
from lite_agent.utils.message_builder import MessageBuilder
|
|
32
|
+
from lite_agent.utils.message_state_manager import MessageStateManager
|
|
33
33
|
|
|
34
34
|
|
|
35
35
|
class Runner:
|
|
@@ -38,50 +38,40 @@ class Runner:
|
|
|
38
38
|
self.messages: list[FlexibleRunnerMessage] = []
|
|
39
39
|
self.api = api
|
|
40
40
|
self.streaming = streaming
|
|
41
|
-
self.
|
|
41
|
+
self._message_state_manager = MessageStateManager()
|
|
42
42
|
self.usage = MessageUsage(input_tokens=0, output_tokens=0, total_tokens=0)
|
|
43
43
|
|
|
44
|
-
def _start_assistant_message(self, content: str = "", meta: AssistantMessageMeta | None = None) -> None:
|
|
44
|
+
async def _start_assistant_message(self, content: str = "", meta: AssistantMessageMeta | None = None) -> None:
|
|
45
45
|
"""Start a new assistant message."""
|
|
46
46
|
# Create meta with model information if not provided
|
|
47
47
|
if meta is None:
|
|
48
48
|
meta = AssistantMessageMeta()
|
|
49
49
|
if hasattr(self.agent.client, "model"):
|
|
50
50
|
meta.model = self.agent.client.model
|
|
51
|
-
self.
|
|
52
|
-
content=[AssistantTextContent(text=content)],
|
|
53
|
-
meta=meta,
|
|
54
|
-
)
|
|
51
|
+
await self._message_state_manager.start_message(content, meta)
|
|
55
52
|
|
|
56
|
-
def _ensure_current_assistant_message(self) -> NewAssistantMessage:
|
|
53
|
+
async def _ensure_current_assistant_message(self) -> NewAssistantMessage:
|
|
57
54
|
"""Ensure current assistant message exists and return it."""
|
|
58
|
-
|
|
59
|
-
self._start_assistant_message()
|
|
60
|
-
if self._current_assistant_message is None:
|
|
61
|
-
msg = "Failed to create current assistant message"
|
|
62
|
-
raise RuntimeError(msg)
|
|
63
|
-
return self._current_assistant_message
|
|
64
|
-
|
|
65
|
-
def _add_to_current_assistant_message(self, content_item: AssistantTextContent | AssistantToolCall | AssistantToolCallResult) -> None:
|
|
66
|
-
"""Add content to the current assistant message."""
|
|
67
|
-
self._ensure_current_assistant_message().content.append(content_item)
|
|
55
|
+
return await self._message_state_manager.ensure_message_exists()
|
|
68
56
|
|
|
69
|
-
def
|
|
57
|
+
async def _add_to_current_assistant_message(self, content_item: AssistantTextContent | AssistantToolCall | AssistantToolCallResult) -> None:
|
|
58
|
+
"""Add content to the current assistant message."""
|
|
59
|
+
if isinstance(content_item, AssistantTextContent):
|
|
60
|
+
await self._message_state_manager.add_text_delta(content_item.text)
|
|
61
|
+
elif isinstance(content_item, AssistantToolCall):
|
|
62
|
+
await self._message_state_manager.add_tool_call(content_item)
|
|
63
|
+
elif isinstance(content_item, AssistantToolCallResult):
|
|
64
|
+
await self._message_state_manager.add_tool_result(content_item)
|
|
65
|
+
|
|
66
|
+
async def _add_text_content_to_current_assistant_message(self, delta: str) -> None:
|
|
70
67
|
"""Add text delta to the current assistant message's text content."""
|
|
71
|
-
|
|
72
|
-
# Find the first text content item and append the delta
|
|
73
|
-
for content_item in message.content:
|
|
74
|
-
if content_item.type == "text":
|
|
75
|
-
content_item.text += delta
|
|
76
|
-
return
|
|
77
|
-
# If no text content found, add new text content
|
|
78
|
-
message.content.append(AssistantTextContent(text=delta))
|
|
68
|
+
await self._message_state_manager.add_text_delta(delta)
|
|
79
69
|
|
|
80
|
-
def _finalize_assistant_message(self) -> None:
|
|
70
|
+
async def _finalize_assistant_message(self) -> None:
|
|
81
71
|
"""Finalize the current assistant message and add it to messages."""
|
|
82
|
-
|
|
83
|
-
|
|
84
|
-
self.
|
|
72
|
+
finalized_message = await self._message_state_manager.finalize_message()
|
|
73
|
+
if finalized_message is not None:
|
|
74
|
+
self.messages.append(finalized_message)
|
|
85
75
|
|
|
86
76
|
def _add_tool_call_result(self, call_id: str, output: str, execution_time_ms: int | None = None) -> None:
|
|
87
77
|
"""Add a tool call result to the last assistant message, or create a new one if needed."""
|
|
@@ -127,10 +117,12 @@ class Runner:
|
|
|
127
117
|
# Check for transfer_to_agent calls first
|
|
128
118
|
transfer_calls = [tc for tc in tool_calls if tc.function.name == ToolName.TRANSFER_TO_AGENT]
|
|
129
119
|
if transfer_calls:
|
|
120
|
+
logger.info(f"Processing {len(transfer_calls)} transfer_to_agent calls")
|
|
130
121
|
# Handle all transfer calls but only execute the first one
|
|
131
122
|
for i, tool_call in enumerate(transfer_calls):
|
|
132
123
|
if i == 0:
|
|
133
124
|
# Execute the first transfer
|
|
125
|
+
logger.info(f"Executing agent transfer: {tool_call.function.arguments}")
|
|
134
126
|
call_id, output = await self._handle_agent_transfer(tool_call)
|
|
135
127
|
# Generate function_call_output event if in includes
|
|
136
128
|
if "function_call_output" in includes:
|
|
@@ -205,8 +197,6 @@ class Runner:
|
|
|
205
197
|
last_message = cast("NewAssistantMessage", self.messages[-1])
|
|
206
198
|
last_message.content.append(tool_result)
|
|
207
199
|
|
|
208
|
-
# Note: For completion API compatibility, the conversion happens when sending to LLM
|
|
209
|
-
|
|
210
200
|
async def _collect_all_chunks(self, stream: AsyncGenerator[AgentChunk, None]) -> list[AgentChunk]:
|
|
211
201
|
"""Collect all chunks from an async generator into a list."""
|
|
212
202
|
return [chunk async for chunk in stream]
|
|
@@ -218,7 +208,6 @@ class Runner:
|
|
|
218
208
|
includes: Sequence[AgentChunkType] | None = None,
|
|
219
209
|
context: "Any | None" = None, # noqa: ANN401
|
|
220
210
|
record_to: PathLike | str | None = None,
|
|
221
|
-
agent_kwargs: dict[str, Any] | None = None,
|
|
222
211
|
) -> AsyncGenerator[AgentChunk, None]:
|
|
223
212
|
"""Run the agent and return a RunResponse object that can be asynchronously iterated for each chunk.
|
|
224
213
|
|
|
@@ -253,7 +242,7 @@ class Runner:
|
|
|
253
242
|
# Handle single message (BaseModel, TypedDict, or dict)
|
|
254
243
|
self.append_message(user_input) # type: ignore[arg-type]
|
|
255
244
|
logger.debug("Messages prepared, calling _run")
|
|
256
|
-
return self._run(max_steps, includes, self._normalize_record_path(record_to), context=context
|
|
245
|
+
return self._run(max_steps, includes, self._normalize_record_path(record_to), context=context)
|
|
257
246
|
|
|
258
247
|
async def _run(
|
|
259
248
|
self,
|
|
@@ -261,7 +250,6 @@ class Runner:
|
|
|
261
250
|
includes: Sequence[AgentChunkType],
|
|
262
251
|
record_to: Path | None = None,
|
|
263
252
|
context: Any | None = None, # noqa: ANN401
|
|
264
|
-
agent_kwargs: dict[str, Any] | None = None,
|
|
265
253
|
) -> AsyncGenerator[AgentChunk, None]:
|
|
266
254
|
"""Run the agent and return a RunResponse object that can be asynchronously iterated for each chunk."""
|
|
267
255
|
logger.debug(f"Running agent with messages: {self.messages}")
|
|
@@ -293,21 +281,13 @@ class Runner:
|
|
|
293
281
|
|
|
294
282
|
while not is_finish() and steps < max_steps:
|
|
295
283
|
logger.debug(f"Step {steps}: finish_reason={finish_reason}, is_finish()={is_finish()}")
|
|
296
|
-
|
|
297
|
-
# This allows us to keep the new format internally but ensures compatibility
|
|
298
|
-
# Extract agent kwargs for reasoning configuration
|
|
299
|
-
reasoning = None
|
|
300
|
-
if agent_kwargs:
|
|
301
|
-
reasoning = agent_kwargs.get("reasoning")
|
|
302
|
-
|
|
303
|
-
logger.debug(f"Using API: {self.api}, streaming: {self.streaming}")
|
|
284
|
+
logger.info(f"Making LLM request: API={self.api}, streaming={self.streaming}, messages={len(self.messages)}")
|
|
304
285
|
match self.api:
|
|
305
286
|
case "completion":
|
|
306
287
|
logger.debug("Calling agent.completion")
|
|
307
288
|
resp = await self.agent.completion(
|
|
308
289
|
self.messages,
|
|
309
290
|
record_to_file=record_to,
|
|
310
|
-
reasoning=reasoning,
|
|
311
291
|
streaming=self.streaming,
|
|
312
292
|
)
|
|
313
293
|
case "responses":
|
|
@@ -315,7 +295,6 @@ class Runner:
|
|
|
315
295
|
resp = await self.agent.responses(
|
|
316
296
|
self.messages,
|
|
317
297
|
record_to_file=record_to,
|
|
318
|
-
reasoning=reasoning,
|
|
319
298
|
streaming=self.streaming,
|
|
320
299
|
)
|
|
321
300
|
case _:
|
|
@@ -331,48 +310,55 @@ class Runner:
|
|
|
331
310
|
logger.debug(f"Assistant message chunk: {len(chunk.message.content) if chunk.message.content else 0} content items")
|
|
332
311
|
# Start or update assistant message in new format
|
|
333
312
|
# If we already have a current assistant message, just update its metadata
|
|
334
|
-
|
|
313
|
+
current_message = await self._message_state_manager.get_current_message()
|
|
314
|
+
if current_message is not None:
|
|
335
315
|
# Preserve all existing metadata and only update specific fields
|
|
336
|
-
|
|
337
|
-
|
|
338
|
-
|
|
339
|
-
original_meta.latency_ms = chunk.message.meta.latency_ms
|
|
340
|
-
if hasattr(chunk.message.meta, "output_time_ms"):
|
|
341
|
-
original_meta.total_time_ms = chunk.message.meta.output_time_ms
|
|
316
|
+
meta_updates = {"sent_at": chunk.message.meta.sent_at}
|
|
317
|
+
# Only include fields of type datetime in meta_updates
|
|
318
|
+
# Update int fields separately after update_meta
|
|
342
319
|
# Preserve other metadata fields like model, usage, etc.
|
|
343
320
|
for attr in ["model", "usage", "input_tokens", "output_tokens"]:
|
|
344
321
|
if hasattr(chunk.message.meta, attr):
|
|
345
|
-
|
|
322
|
+
meta_updates[attr] = getattr(chunk.message.meta, attr)
|
|
323
|
+
await self._message_state_manager.update_meta(**meta_updates)
|
|
324
|
+
# Now update int fields directly if present
|
|
325
|
+
if hasattr(chunk.message.meta, "latency_ms"):
|
|
326
|
+
await self._message_state_manager.update_meta(latency_ms=chunk.message.meta.latency_ms)
|
|
327
|
+
if hasattr(chunk.message.meta, "output_time_ms"):
|
|
328
|
+
await self._message_state_manager.update_meta(total_time_ms=chunk.message.meta.output_time_ms)
|
|
346
329
|
else:
|
|
347
|
-
# For non-streaming mode,
|
|
348
|
-
self.
|
|
330
|
+
# For non-streaming mode, start with complete message
|
|
331
|
+
await self._start_assistant_message(meta=chunk.message.meta)
|
|
332
|
+
# Add all content from the chunk message
|
|
333
|
+
for content_item in chunk.message.content:
|
|
334
|
+
await self._add_to_current_assistant_message(content_item)
|
|
349
335
|
|
|
350
336
|
# If model is None, try to get it from agent client
|
|
351
|
-
|
|
352
|
-
|
|
337
|
+
current_message = await self._message_state_manager.get_current_message()
|
|
338
|
+
if current_message is not None and current_message.meta.model is None and hasattr(self.agent.client, "model"):
|
|
339
|
+
await self._message_state_manager.update_meta(model=self.agent.client.model)
|
|
353
340
|
# Only yield assistant_message chunk if it's in includes and has content
|
|
354
|
-
if chunk.type in includes and
|
|
341
|
+
if chunk.type in includes and current_message is not None:
|
|
355
342
|
# Create a new chunk with the current assistant message content
|
|
356
343
|
updated_chunk = AssistantMessageEvent(
|
|
357
|
-
message=
|
|
344
|
+
message=current_message,
|
|
358
345
|
)
|
|
359
346
|
yield updated_chunk
|
|
360
347
|
case "content_delta":
|
|
361
348
|
# Accumulate text content to current assistant message
|
|
362
|
-
self._add_text_content_to_current_assistant_message(chunk.delta)
|
|
349
|
+
await self._add_text_content_to_current_assistant_message(chunk.delta)
|
|
363
350
|
# Always yield content_delta chunk if it's in includes
|
|
364
351
|
if chunk.type in includes:
|
|
365
352
|
yield chunk
|
|
366
353
|
case "function_call":
|
|
367
354
|
logger.debug(f"Function call: {chunk.name}({chunk.arguments or '{}'})")
|
|
368
355
|
# Add tool call to current assistant message
|
|
369
|
-
# Keep arguments as string for compatibility with funcall library
|
|
370
356
|
tool_call = AssistantToolCall(
|
|
371
357
|
call_id=chunk.call_id,
|
|
372
358
|
name=chunk.name,
|
|
373
359
|
arguments=chunk.arguments or "{}",
|
|
374
360
|
)
|
|
375
|
-
self._add_to_current_assistant_message(tool_call)
|
|
361
|
+
await self._add_to_current_assistant_message(tool_call)
|
|
376
362
|
# Always yield function_call chunk if it's in includes
|
|
377
363
|
if chunk.type in includes:
|
|
378
364
|
yield chunk
|
|
@@ -390,9 +376,8 @@ class Runner:
|
|
|
390
376
|
target_message = None
|
|
391
377
|
|
|
392
378
|
# First check if we have a current assistant message
|
|
393
|
-
|
|
394
|
-
|
|
395
|
-
else:
|
|
379
|
+
target_message = await self._message_state_manager.get_current_message()
|
|
380
|
+
if target_message is None:
|
|
396
381
|
# Otherwise, look for the last assistant message in the list
|
|
397
382
|
for i in range(len(self.messages) - 1, -1, -1):
|
|
398
383
|
current_message = self.messages[i]
|
|
@@ -422,9 +407,9 @@ class Runner:
|
|
|
422
407
|
yield chunk
|
|
423
408
|
case "timing":
|
|
424
409
|
# Update timing information in current assistant message
|
|
425
|
-
|
|
426
|
-
|
|
427
|
-
self.
|
|
410
|
+
current_message = await self._message_state_manager.get_current_message()
|
|
411
|
+
if current_message is not None:
|
|
412
|
+
await self._message_state_manager.update_meta(latency_ms=chunk.timing.latency_ms, total_time_ms=chunk.timing.output_time_ms)
|
|
428
413
|
# Also try to update the last assistant message if no current message
|
|
429
414
|
elif self.messages and isinstance(self.messages[-1], NewAssistantMessage):
|
|
430
415
|
last_message = cast("NewAssistantMessage", self.messages[-1])
|
|
@@ -437,7 +422,7 @@ class Runner:
|
|
|
437
422
|
yield chunk
|
|
438
423
|
|
|
439
424
|
# Finalize assistant message so it can be found in pending function calls
|
|
440
|
-
self._finalize_assistant_message()
|
|
425
|
+
await self._finalize_assistant_message()
|
|
441
426
|
|
|
442
427
|
# Check for pending tool calls after processing current assistant message
|
|
443
428
|
pending_tool_calls = self._find_pending_tool_calls()
|
|
@@ -463,36 +448,6 @@ class Runner:
|
|
|
463
448
|
require_confirm_tools = await self.agent.list_require_confirm_tools(tool_calls)
|
|
464
449
|
return bool(require_confirm_tools)
|
|
465
450
|
|
|
466
|
-
async def run_continue_until_complete(
|
|
467
|
-
self,
|
|
468
|
-
max_steps: int = 20,
|
|
469
|
-
includes: list[AgentChunkType] | None = None,
|
|
470
|
-
record_to: PathLike | str | None = None,
|
|
471
|
-
) -> list[AgentChunk]:
|
|
472
|
-
"""Deprecated: Use run_until_complete(None) instead."""
|
|
473
|
-
warnings.warn(
|
|
474
|
-
"run_continue_until_complete is deprecated. Use run_until_complete(None) instead.",
|
|
475
|
-
DeprecationWarning,
|
|
476
|
-
stacklevel=2,
|
|
477
|
-
)
|
|
478
|
-
resp = self.run_continue_stream(max_steps, includes, record_to=record_to)
|
|
479
|
-
return await self._collect_all_chunks(resp)
|
|
480
|
-
|
|
481
|
-
def run_continue_stream(
|
|
482
|
-
self,
|
|
483
|
-
max_steps: int = 20,
|
|
484
|
-
includes: list[AgentChunkType] | None = None,
|
|
485
|
-
record_to: PathLike | str | None = None,
|
|
486
|
-
context: "Any | None" = None, # noqa: ANN401
|
|
487
|
-
) -> AsyncGenerator[AgentChunk, None]:
|
|
488
|
-
"""Deprecated: Use run(None) instead."""
|
|
489
|
-
warnings.warn(
|
|
490
|
-
"run_continue_stream is deprecated. Use run(None) instead.",
|
|
491
|
-
DeprecationWarning,
|
|
492
|
-
stacklevel=2,
|
|
493
|
-
)
|
|
494
|
-
return self._run_continue_stream(max_steps, includes, record_to=record_to, context=context)
|
|
495
|
-
|
|
496
451
|
async def _run_continue_stream(
|
|
497
452
|
self,
|
|
498
453
|
max_steps: int = 20,
|
lite_agent/types/__init__.py
CHANGED
lite_agent/types/messages.py
CHANGED
|
@@ -211,7 +211,7 @@ class UserMessageContentItemImageURL(BaseModel):
|
|
|
211
211
|
image_url: UserMessageContentItemImageURLImageURL
|
|
212
212
|
|
|
213
213
|
|
|
214
|
-
#
|
|
214
|
+
# Message wrapper classes for backward compatibility
|
|
215
215
|
class AgentUserMessage(NewUserMessage):
|
|
216
216
|
def __init__(
|
|
217
217
|
self,
|
|
@@ -250,6 +250,7 @@ class AgentAssistantMessage(NewAssistantMessage):
|
|
|
250
250
|
)
|
|
251
251
|
|
|
252
252
|
|
|
253
|
+
# AgentSystemMessage is now an alias to NewSystemMessage
|
|
253
254
|
AgentSystemMessage = NewSystemMessage
|
|
254
255
|
RunnerMessage = NewMessage
|
|
255
256
|
|
|
@@ -0,0 +1,201 @@
|
|
|
1
|
+
"""Advanced message builder with fluent interface for complex message construction."""
|
|
2
|
+
|
|
3
|
+
from datetime import datetime, timezone
|
|
4
|
+
from typing import Any
|
|
5
|
+
|
|
6
|
+
from lite_agent.loggers import logger
|
|
7
|
+
from lite_agent.types import (
|
|
8
|
+
AssistantMessageMeta,
|
|
9
|
+
AssistantTextContent,
|
|
10
|
+
AssistantToolCall,
|
|
11
|
+
AssistantToolCallResult,
|
|
12
|
+
MessageMeta,
|
|
13
|
+
MessageUsage,
|
|
14
|
+
NewAssistantMessage,
|
|
15
|
+
NewSystemMessage,
|
|
16
|
+
NewUserMessage,
|
|
17
|
+
UserImageContent,
|
|
18
|
+
UserTextContent,
|
|
19
|
+
)
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
class FluentMessageBuilder:
|
|
23
|
+
"""Fluent interface for building complex messages step by step."""
|
|
24
|
+
|
|
25
|
+
def __init__(self):
|
|
26
|
+
self._reset()
|
|
27
|
+
|
|
28
|
+
def _reset(self) -> None:
|
|
29
|
+
"""Reset builder state."""
|
|
30
|
+
self._message_type = None
|
|
31
|
+
self._content_items = []
|
|
32
|
+
self._meta = None
|
|
33
|
+
|
|
34
|
+
def user_message(self) -> "FluentMessageBuilder":
|
|
35
|
+
"""Start building a user message."""
|
|
36
|
+
self._reset()
|
|
37
|
+
self._message_type = "user"
|
|
38
|
+
self._meta = MessageMeta()
|
|
39
|
+
logger.debug("Started building user message")
|
|
40
|
+
return self
|
|
41
|
+
|
|
42
|
+
def assistant_message(self, model: str | None = None) -> "FluentMessageBuilder":
|
|
43
|
+
"""Start building an assistant message."""
|
|
44
|
+
self._reset()
|
|
45
|
+
self._message_type = "assistant"
|
|
46
|
+
self._meta = AssistantMessageMeta(model=model)
|
|
47
|
+
logger.debug(f"Started building assistant message (model: {model})")
|
|
48
|
+
return self
|
|
49
|
+
|
|
50
|
+
def system_message(self) -> "FluentMessageBuilder":
|
|
51
|
+
"""Start building a system message."""
|
|
52
|
+
self._reset()
|
|
53
|
+
self._message_type = "system"
|
|
54
|
+
self._meta = MessageMeta()
|
|
55
|
+
self._content = ""
|
|
56
|
+
logger.debug("Started building system message")
|
|
57
|
+
return self
|
|
58
|
+
|
|
59
|
+
def add_text(self, text: str) -> "FluentMessageBuilder":
|
|
60
|
+
"""Add text content."""
|
|
61
|
+
if self._message_type == "user":
|
|
62
|
+
self._content_items.append(UserTextContent(text=text))
|
|
63
|
+
elif self._message_type == "assistant":
|
|
64
|
+
self._content_items.append(AssistantTextContent(text=text))
|
|
65
|
+
elif self._message_type == "system":
|
|
66
|
+
self._content = text
|
|
67
|
+
else:
|
|
68
|
+
msg = "Message type not set. Call user_message(), assistant_message(), or system_message() first."
|
|
69
|
+
raise ValueError(msg)
|
|
70
|
+
|
|
71
|
+
logger.debug(f"Added text content (length: {len(text)})")
|
|
72
|
+
return self
|
|
73
|
+
|
|
74
|
+
def add_image(self, image_url: str | None = None, file_id: str | None = None, detail: str = "auto") -> "FluentMessageBuilder":
|
|
75
|
+
"""Add image content to user message."""
|
|
76
|
+
if self._message_type != "user":
|
|
77
|
+
msg = "Images can only be added to user messages"
|
|
78
|
+
raise ValueError(msg)
|
|
79
|
+
|
|
80
|
+
self._content_items.append(UserImageContent(image_url=image_url, file_id=file_id, detail=detail))
|
|
81
|
+
logger.debug(f"Added image content (url: {bool(image_url)}, file_id: {bool(file_id)})")
|
|
82
|
+
return self
|
|
83
|
+
|
|
84
|
+
def add_tool_call(self, call_id: str, name: str, arguments: dict[str, Any] | str) -> "FluentMessageBuilder":
|
|
85
|
+
"""Add tool call to assistant message."""
|
|
86
|
+
if self._message_type != "assistant":
|
|
87
|
+
msg = "Tool calls can only be added to assistant messages"
|
|
88
|
+
raise ValueError(msg)
|
|
89
|
+
|
|
90
|
+
self._content_items.append(AssistantToolCall(call_id=call_id, name=name, arguments=arguments))
|
|
91
|
+
logger.debug(f"Added tool call: {name} (call_id: {call_id})")
|
|
92
|
+
return self
|
|
93
|
+
|
|
94
|
+
def add_tool_result(self, call_id: str, output: str, execution_time_ms: int | None = None) -> "FluentMessageBuilder":
|
|
95
|
+
"""Add tool call result to assistant message."""
|
|
96
|
+
if self._message_type != "assistant":
|
|
97
|
+
msg = "Tool results can only be added to assistant messages"
|
|
98
|
+
raise ValueError(msg)
|
|
99
|
+
|
|
100
|
+
self._content_items.append(AssistantToolCallResult(call_id=call_id, output=output, execution_time_ms=execution_time_ms))
|
|
101
|
+
logger.debug(f"Added tool result for call: {call_id}")
|
|
102
|
+
return self
|
|
103
|
+
|
|
104
|
+
def with_timestamp(self, timestamp: datetime | None = None) -> "FluentMessageBuilder":
|
|
105
|
+
"""Set message timestamp."""
|
|
106
|
+
if self._meta is None:
|
|
107
|
+
msg = "Message type not set. Call user_message(), assistant_message(), or system_message() first."
|
|
108
|
+
raise ValueError(msg)
|
|
109
|
+
if timestamp is None:
|
|
110
|
+
timestamp = datetime.now(timezone.utc)
|
|
111
|
+
self._meta.sent_at = timestamp
|
|
112
|
+
return self
|
|
113
|
+
|
|
114
|
+
def with_usage(self, input_tokens: int | None = None, output_tokens: int | None = None) -> "FluentMessageBuilder":
|
|
115
|
+
"""Set usage information (assistant messages only)."""
|
|
116
|
+
if self._message_type != "assistant":
|
|
117
|
+
msg = "Usage information can only be set for assistant messages"
|
|
118
|
+
raise ValueError(msg)
|
|
119
|
+
|
|
120
|
+
if self._meta is None:
|
|
121
|
+
msg = "Message type not set. Call user_message(), assistant_message(), or system_message() first."
|
|
122
|
+
raise ValueError(msg)
|
|
123
|
+
|
|
124
|
+
if not hasattr(self._meta, "usage"):
|
|
125
|
+
self._meta.usage = MessageUsage()
|
|
126
|
+
|
|
127
|
+
if self._meta.usage is not None:
|
|
128
|
+
if input_tokens is not None:
|
|
129
|
+
self._meta.usage.input_tokens = input_tokens
|
|
130
|
+
if output_tokens is not None:
|
|
131
|
+
self._meta.usage.output_tokens = output_tokens
|
|
132
|
+
if input_tokens is not None and output_tokens is not None:
|
|
133
|
+
self._meta.usage.total_tokens = input_tokens + output_tokens
|
|
134
|
+
|
|
135
|
+
return self
|
|
136
|
+
|
|
137
|
+
def with_timing(self, latency_ms: int | None = None, total_time_ms: int | None = None) -> "FluentMessageBuilder":
|
|
138
|
+
"""Set timing information (assistant messages only)."""
|
|
139
|
+
if self._message_type != "assistant":
|
|
140
|
+
msg = "Timing information can only be set for assistant messages"
|
|
141
|
+
raise ValueError(msg)
|
|
142
|
+
|
|
143
|
+
if self._meta is None:
|
|
144
|
+
msg = "Message type not set. Call user_message(), assistant_message(), or system_message() first."
|
|
145
|
+
raise ValueError(msg)
|
|
146
|
+
|
|
147
|
+
if latency_ms is not None:
|
|
148
|
+
self._meta.latency_ms = latency_ms # type: ignore[attr-defined]
|
|
149
|
+
if total_time_ms is not None:
|
|
150
|
+
self._meta.total_time_ms = total_time_ms # type: ignore[attr-defined]
|
|
151
|
+
|
|
152
|
+
return self
|
|
153
|
+
|
|
154
|
+
def build(self) -> NewUserMessage | NewAssistantMessage | NewSystemMessage:
|
|
155
|
+
"""Build the final message."""
|
|
156
|
+
if self._message_type == "user":
|
|
157
|
+
message = NewUserMessage(content=self._content_items, meta=self._meta)
|
|
158
|
+
elif self._message_type == "assistant":
|
|
159
|
+
message = NewAssistantMessage(content=self._content_items, meta=self._meta)
|
|
160
|
+
elif self._message_type == "system":
|
|
161
|
+
message = NewSystemMessage(content=self._content, meta=self._meta)
|
|
162
|
+
else:
|
|
163
|
+
msg = "Message type not set"
|
|
164
|
+
raise ValueError(msg)
|
|
165
|
+
|
|
166
|
+
logger.debug(f"Built {self._message_type} message with {len(getattr(self, '_content_items', []))} content items")
|
|
167
|
+
return message
|
|
168
|
+
|
|
169
|
+
|
|
170
|
+
class MessageBuilderFactory:
|
|
171
|
+
"""Factory for creating common message types quickly."""
|
|
172
|
+
|
|
173
|
+
@staticmethod
|
|
174
|
+
def create_simple_user_message(text: str) -> NewUserMessage:
|
|
175
|
+
"""Create a simple user text message."""
|
|
176
|
+
return FluentMessageBuilder().user_message().add_text(text).build()
|
|
177
|
+
|
|
178
|
+
@staticmethod
|
|
179
|
+
def create_simple_assistant_message(text: str, model: str | None = None) -> NewAssistantMessage:
|
|
180
|
+
"""Create a simple assistant text message."""
|
|
181
|
+
return FluentMessageBuilder().assistant_message(model).add_text(text).build()
|
|
182
|
+
|
|
183
|
+
@staticmethod
|
|
184
|
+
def create_system_message(text: str) -> NewSystemMessage:
|
|
185
|
+
"""Create a system message."""
|
|
186
|
+
return FluentMessageBuilder().system_message().add_text(text).build()
|
|
187
|
+
|
|
188
|
+
@staticmethod
|
|
189
|
+
def create_user_message_with_image(text: str, image_url: str) -> NewUserMessage:
|
|
190
|
+
"""Create a user message with text and image."""
|
|
191
|
+
return FluentMessageBuilder().user_message().add_text(text).add_image(image_url=image_url).build()
|
|
192
|
+
|
|
193
|
+
@staticmethod
|
|
194
|
+
def create_assistant_with_tool_call(text: str, call_id: str, tool_name: str, arguments: dict[str, Any], model: str | None = None) -> NewAssistantMessage:
|
|
195
|
+
"""Create an assistant message with text and a tool call."""
|
|
196
|
+
return FluentMessageBuilder().assistant_message(model).add_text(text).add_tool_call(call_id, tool_name, arguments).build()
|
|
197
|
+
|
|
198
|
+
@staticmethod
|
|
199
|
+
def create_assistant_with_tool_result(call_id: str, result: str, execution_time_ms: int | None = None, model: str | None = None) -> NewAssistantMessage:
|
|
200
|
+
"""Create an assistant message with just a tool result."""
|
|
201
|
+
return FluentMessageBuilder().assistant_message(model).add_tool_result(call_id, result, execution_time_ms).build()
|
|
@@ -190,7 +190,7 @@ class MessageBuilder:
|
|
|
190
190
|
# Fallback for other content types
|
|
191
191
|
assistant_content_items = [AssistantTextContent(text=str(content))]
|
|
192
192
|
|
|
193
|
-
# Handle tool calls if present
|
|
193
|
+
# Handle tool calls if present
|
|
194
194
|
if "tool_calls" in message:
|
|
195
195
|
for tool_call in message.get("tool_calls", []):
|
|
196
196
|
try:
|