lite-agent 0.8.0__py3-none-any.whl → 0.10.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of lite-agent might be problematic. Click here for more details.

lite_agent/agent.py CHANGED
@@ -6,23 +6,20 @@ from typing import Any, Optional
6
6
  from funcall import Funcall
7
7
  from jinja2 import Environment, FileSystemLoader
8
8
 
9
- from lite_agent.client import BaseLLMClient, LiteLLMClient, ReasoningConfig
9
+ from lite_agent.client import BaseLLMClient, LiteLLMClient
10
10
  from lite_agent.constants import CompletionMode, ToolName
11
11
  from lite_agent.loggers import logger
12
12
  from lite_agent.response_handlers import CompletionResponseHandler, ResponsesAPIHandler
13
13
  from lite_agent.types import (
14
14
  AgentChunk,
15
- AssistantTextContent,
16
- AssistantToolCall,
17
- AssistantToolCallResult,
18
15
  FunctionCallEvent,
19
16
  FunctionCallOutputEvent,
20
17
  RunnerMessages,
21
18
  ToolCall,
22
- message_to_llm_dict,
23
19
  system_message_to_llm_dict,
24
20
  )
25
- from lite_agent.types.messages import NewAssistantMessage, NewSystemMessage, NewUserMessage
21
+ from lite_agent.types.messages import NewSystemMessage
22
+ from lite_agent.utils.message_converter import MessageFormatConverter, ResponsesFormatConverter
26
23
 
27
24
  TEMPLATES_DIR = Path(__file__).parent / "templates"
28
25
  jinja_env = Environment(loader=FileSystemLoader(str(TEMPLATES_DIR)), autoescape=True)
@@ -43,12 +40,10 @@ class Agent:
43
40
  handoffs: list["Agent"] | None = None,
44
41
  message_transfer: Callable[[RunnerMessages], RunnerMessages] | None = None,
45
42
  completion_condition: str = "stop",
46
- reasoning: ReasoningConfig = None,
47
43
  stop_before_tools: list[str] | list[Callable] | None = None,
48
44
  ) -> None:
49
45
  self.name = name
50
46
  self.instructions = instructions
51
- self.reasoning = reasoning
52
47
  # Convert stop_before_functions to function names
53
48
  if stop_before_tools:
54
49
  self.stop_before_functions = set()
@@ -70,7 +65,6 @@ class Agent:
70
65
  # Otherwise, create a LitellmClient instance
71
66
  self.client = LiteLLMClient(
72
67
  model=model,
73
- reasoning=reasoning,
74
68
  )
75
69
  self.completion_condition = completion_condition
76
70
  self.handoffs = handoffs if handoffs else []
@@ -194,9 +188,8 @@ class Agent:
194
188
  # Regenerate transfer tools to include the new agent
195
189
  self._add_transfer_tools(self.handoffs)
196
190
 
197
- def prepare_completion_messages(self, messages: RunnerMessages) -> list[dict]:
198
- """Prepare messages for completions API (with conversion)."""
199
- converted_messages = self._convert_responses_to_completions_format(messages)
191
+ def _build_instructions(self) -> str:
192
+ """Build complete instructions with templates."""
200
193
  instructions = self.instructions
201
194
  if self.handoffs:
202
195
  instructions = HANDOFFS_SOURCE_INSTRUCTIONS_TEMPLATE.render(extra_instructions=None) + "\n\n" + instructions
@@ -204,6 +197,12 @@ class Agent:
204
197
  instructions = HANDOFFS_TARGET_INSTRUCTIONS_TEMPLATE.render(extra_instructions=None) + "\n\n" + instructions
205
198
  if self.completion_condition == "call":
206
199
  instructions = WAIT_FOR_USER_INSTRUCTIONS_TEMPLATE.render(extra_instructions=None) + "\n\n" + instructions
200
+ return instructions
201
+
202
+ def prepare_completion_messages(self, messages: RunnerMessages) -> list[dict]:
203
+ """Prepare messages for completions API (with conversion)."""
204
+ converted_messages = MessageFormatConverter.to_completion_format(messages)
205
+ instructions = self._build_instructions()
207
206
  return [
208
207
  system_message_to_llm_dict(
209
208
  NewSystemMessage(
@@ -215,96 +214,24 @@ class Agent:
215
214
 
216
215
  def prepare_responses_messages(self, messages: RunnerMessages) -> list[dict[str, Any]]:
217
216
  """Prepare messages for responses API (no conversion, just add system message if needed)."""
218
- instructions = self.instructions
219
- if self.handoffs:
220
- instructions = HANDOFFS_SOURCE_INSTRUCTIONS_TEMPLATE.render(extra_instructions=None) + "\n\n" + instructions
221
- if self.parent:
222
- instructions = HANDOFFS_TARGET_INSTRUCTIONS_TEMPLATE.render(extra_instructions=None) + "\n\n" + instructions
223
- if self.completion_condition == "call":
224
- instructions = WAIT_FOR_USER_INSTRUCTIONS_TEMPLATE.render(extra_instructions=None) + "\n\n" + instructions
225
- res: list[dict[str, Any]] = [
217
+ instructions = self._build_instructions()
218
+ converted_messages = ResponsesFormatConverter.to_responses_format(messages)
219
+ return [
226
220
  {
227
221
  "role": "system",
228
222
  "content": f"You are {self.name}. {instructions}",
229
223
  },
224
+ *converted_messages,
230
225
  ]
231
- for message in messages:
232
- if isinstance(message, NewAssistantMessage):
233
- for item in message.content:
234
- if isinstance(item, AssistantTextContent):
235
- res.append(
236
- {
237
- "role": "assistant",
238
- "content": item.text,
239
- },
240
- )
241
- elif isinstance(item, AssistantToolCall):
242
- res.append(
243
- {
244
- "type": "function_call",
245
- "call_id": item.call_id,
246
- "name": item.name,
247
- "arguments": item.arguments,
248
- },
249
- )
250
- elif isinstance(item, AssistantToolCallResult):
251
- res.append(
252
- {
253
- "type": "function_call_output",
254
- "call_id": item.call_id,
255
- "output": item.output,
256
- },
257
- )
258
- elif isinstance(message, NewSystemMessage):
259
- res.append(
260
- {
261
- "role": "system",
262
- "content": message.content,
263
- },
264
- )
265
- elif isinstance(message, NewUserMessage):
266
- contents = []
267
- for item in message.content:
268
- match item.type:
269
- case "text":
270
- contents.append(
271
- {
272
- "type": "input_text",
273
- "text": item.text,
274
- },
275
- )
276
- case "image":
277
- contents.append(
278
- {
279
- "type": "input_image",
280
- "image_url": item.image_url,
281
- },
282
- )
283
- case "file":
284
- contents.append(
285
- {
286
- "type": "input_file",
287
- "file_id": item.file_id,
288
- "file_name": item.file_name,
289
- },
290
- )
291
- res.append(
292
- {
293
- "role": message.role,
294
- "content": contents,
295
- },
296
- )
297
- return res
298
226
 
299
227
  async def completion(
300
228
  self,
301
229
  messages: RunnerMessages,
302
230
  record_to_file: Path | None = None,
303
- reasoning: ReasoningConfig = None,
304
231
  *,
305
232
  streaming: bool = True,
306
233
  ) -> AsyncGenerator[AgentChunk, None]:
307
- # Apply message transfer callback if provided - always use legacy format for LLM compatibility
234
+ # Apply message transfer callback if provided
308
235
  processed_messages = messages
309
236
  if self.message_transfer:
310
237
  logger.debug(f"Applying message transfer callback for agent {self.name}")
@@ -318,7 +245,6 @@ class Agent:
318
245
  messages=self.message_histories,
319
246
  tools=tools,
320
247
  tool_choice="auto", # TODO: make this configurable
321
- reasoning=reasoning,
322
248
  streaming=streaming,
323
249
  )
324
250
 
@@ -330,11 +256,10 @@ class Agent:
330
256
  self,
331
257
  messages: RunnerMessages,
332
258
  record_to_file: Path | None = None,
333
- reasoning: ReasoningConfig = None,
334
259
  *,
335
260
  streaming: bool = True,
336
261
  ) -> AsyncGenerator[AgentChunk, None]:
337
- # Apply message transfer callback if provided - always use legacy format for LLM compatibility
262
+ # Apply message transfer callback if provided
338
263
  processed_messages = messages
339
264
  if self.message_transfer:
340
265
  logger.debug(f"Applying message transfer callback for agent {self.name}")
@@ -347,7 +272,6 @@ class Agent:
347
272
  messages=self.message_histories,
348
273
  tools=tools,
349
274
  tool_choice="auto", # TODO: make this configurable
350
- reasoning=reasoning,
351
275
  streaming=streaming,
352
276
  )
353
277
  # Use response handler for unified processing
@@ -416,176 +340,6 @@ class Agent:
416
340
  execution_time_ms=execution_time_ms,
417
341
  )
418
342
 
419
- def _convert_responses_to_completions_format(self, messages: RunnerMessages) -> list[dict]:
420
- """Convert messages from responses API format to completions API format."""
421
- converted_messages = []
422
- i = 0
423
-
424
- while i < len(messages):
425
- message = messages[i]
426
- message_dict = message_to_llm_dict(message) if isinstance(message, (NewUserMessage, NewSystemMessage, NewAssistantMessage)) else message
427
-
428
- message_type = message_dict.get("type")
429
- role = message_dict.get("role")
430
-
431
- if role == "assistant":
432
- # Extract tool_calls from content if present
433
- tool_calls = []
434
- content = message_dict.get("content", [])
435
-
436
- # Handle both string and array content
437
- if isinstance(content, list):
438
- # Extract tool_calls from content array and filter out non-text content
439
- filtered_content = []
440
- for item in content:
441
- if isinstance(item, dict):
442
- if item.get("type") == "tool_call":
443
- tool_call = {
444
- "id": item.get("call_id", ""),
445
- "type": "function",
446
- "function": {
447
- "name": item.get("name", ""),
448
- "arguments": item.get("arguments", "{}"),
449
- },
450
- "index": len(tool_calls),
451
- }
452
- tool_calls.append(tool_call)
453
- elif item.get("type") == "text":
454
- filtered_content.append(item)
455
- # Skip tool_call_result - they should be handled by separate function_call_output messages
456
-
457
- # Update content to only include text items
458
- if filtered_content:
459
- message_dict = message_dict.copy()
460
- message_dict["content"] = filtered_content
461
- elif tool_calls:
462
- # If we have tool_calls but no text content, set content to None per OpenAI API spec
463
- message_dict = message_dict.copy()
464
- message_dict["content"] = None
465
-
466
- # Look ahead for function_call messages (legacy support)
467
- j = i + 1
468
- while j < len(messages):
469
- next_message = messages[j]
470
- next_dict = message_to_llm_dict(next_message) if isinstance(next_message, (NewUserMessage, NewSystemMessage, NewAssistantMessage)) else next_message
471
-
472
- if next_dict.get("type") == "function_call":
473
- tool_call = {
474
- "id": next_dict["call_id"], # type: ignore
475
- "type": "function",
476
- "function": {
477
- "name": next_dict["name"], # type: ignore
478
- "arguments": next_dict["arguments"], # type: ignore
479
- },
480
- "index": len(tool_calls),
481
- }
482
- tool_calls.append(tool_call)
483
- j += 1
484
- else:
485
- break
486
-
487
- # Create assistant message with tool_calls if any
488
- assistant_msg = message_dict.copy()
489
- if tool_calls:
490
- assistant_msg["tool_calls"] = tool_calls # type: ignore
491
-
492
- # Convert content format for OpenAI API compatibility
493
- content = assistant_msg.get("content", [])
494
- if isinstance(content, list):
495
- # Extract text content and convert to string using list comprehension
496
- text_parts = [item.get("text", "") for item in content if isinstance(item, dict) and item.get("type") == "text"]
497
- assistant_msg["content"] = " ".join(text_parts) if text_parts else None
498
-
499
- converted_messages.append(assistant_msg)
500
- i = j # Skip the function_call messages we've processed
501
-
502
- elif message_type == "function_call_output":
503
- # Convert to tool message
504
- converted_messages.append(
505
- {
506
- "role": "tool",
507
- "tool_call_id": message_dict["call_id"], # type: ignore
508
- "content": message_dict["output"], # type: ignore
509
- },
510
- )
511
- i += 1
512
-
513
- elif message_type == "function_call":
514
- # This should have been processed with the assistant message
515
- # Skip it if we encounter it standalone
516
- i += 1
517
-
518
- else:
519
- # Regular message (user, system)
520
- converted_msg = message_dict.copy()
521
-
522
- # Handle new Response API format for user messages
523
- content = message_dict.get("content")
524
- if role == "user" and isinstance(content, list):
525
- converted_msg["content"] = self._convert_user_content_to_completions_format(content) # type: ignore
526
-
527
- converted_messages.append(converted_msg)
528
- i += 1
529
-
530
- return converted_messages
531
-
532
- def _convert_user_content_to_completions_format(self, content: list) -> list:
533
- """Convert user message content from Response API format to Completion API format."""
534
- # Handle the case where content might not actually be a list due to test mocking
535
- if type(content) is not list: # Use type() instead of isinstance() to avoid test mocking issues
536
- return content
537
-
538
- converted_content = []
539
- for item in content:
540
- # Convert Pydantic objects to dict first
541
- if hasattr(item, "model_dump"):
542
- item_dict = item.model_dump()
543
- elif hasattr(item, "dict"): # For older Pydantic versions
544
- item_dict = item.dict()
545
- elif isinstance(item, dict):
546
- item_dict = item
547
- else:
548
- # Handle non-dict items (shouldn't happen, but just in case)
549
- converted_content.append(item)
550
- continue
551
-
552
- item_type = item_dict.get("type")
553
- if item_type in ["input_text", "text"]:
554
- # Convert ResponseInputText or new text format to completion API format
555
- converted_content.append(
556
- {
557
- "type": "text",
558
- "text": item_dict["text"],
559
- },
560
- )
561
- elif item_type in ["input_image", "image"]:
562
- # Convert ResponseInputImage to completion API format
563
- if item_dict.get("file_id"):
564
- msg = "File ID input is not supported for Completion API"
565
- raise ValueError(msg)
566
-
567
- if not item_dict.get("image_url"):
568
- msg = "ResponseInputImage must have either file_id or image_url"
569
- raise ValueError(msg)
570
-
571
- # Build image_url object with detail inside
572
- image_data = {"url": item_dict["image_url"]}
573
- detail = item_dict.get("detail", "auto")
574
- if detail: # Include detail if provided
575
- image_data["detail"] = detail
576
-
577
- converted_content.append(
578
- {
579
- "type": "image_url",
580
- "image_url": image_data,
581
- },
582
- )
583
- else:
584
- # Keep existing format (text, image_url)
585
- converted_content.append(item_dict)
586
-
587
- return converted_content
588
-
589
343
  def set_message_transfer(self, message_transfer: Callable[[RunnerMessages], RunnerMessages] | None) -> None:
590
344
  """Set or update the message transfer callback function.
591
345