lite-agent 0.3.0__py3-none-any.whl → 0.4.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of lite-agent might be problematic. Click here for more details.

lite_agent/__init__.py CHANGED
@@ -1,8 +1,8 @@
1
1
  """Lite Agent - A lightweight AI agent framework."""
2
2
 
3
3
  from .agent import Agent
4
+ from .chat_display import display_chat_summary, display_messages
4
5
  from .message_transfers import consolidate_history_transfer
5
- from .rich_helpers import print_chat_history, print_chat_summary
6
6
  from .runner import Runner
7
7
 
8
- __all__ = ["Agent", "Runner", "consolidate_history_transfer", "print_chat_history", "print_chat_summary"]
8
+ __all__ = ["Agent", "Runner", "consolidate_history_transfer", "display_chat_summary", "display_messages"]
lite_agent/agent.py CHANGED
@@ -1,3 +1,4 @@
1
+ import time
1
2
  from collections.abc import AsyncGenerator, Callable, Sequence
2
3
  from pathlib import Path
3
4
  from typing import Any, Optional
@@ -5,12 +6,12 @@ from typing import Any, Optional
5
6
  from funcall import Funcall
6
7
  from jinja2 import Environment, FileSystemLoader
7
8
  from litellm import CustomStreamWrapper
8
- from pydantic import BaseModel
9
9
 
10
10
  from lite_agent.client import BaseLLMClient, LiteLLMClient
11
11
  from lite_agent.loggers import logger
12
- from lite_agent.stream_handlers import litellm_stream_handler
13
- from lite_agent.types import AgentChunk, AgentSystemMessage, RunnerMessages, ToolCall, ToolCallChunk, ToolCallResultChunk
12
+ from lite_agent.stream_handlers import litellm_completion_stream_handler, litellm_response_stream_handler
13
+ from lite_agent.types import AgentChunk, FunctionCallEvent, FunctionCallOutputEvent, RunnerMessages, ToolCall, message_to_llm_dict, system_message_to_llm_dict
14
+ from lite_agent.types.messages import NewAssistantMessage, NewSystemMessage, NewUserMessage
14
15
 
15
16
  TEMPLATES_DIR = Path(__file__).parent / "templates"
16
17
  jinja_env = Environment(loader=FileSystemLoader(str(TEMPLATES_DIR)), autoescape=True)
@@ -162,41 +163,122 @@ class Agent:
162
163
  # Regenerate transfer tools to include the new agent
163
164
  self._add_transfer_tools(self.handoffs)
164
165
 
165
- def prepare_completion_messages(self, messages: RunnerMessages) -> list[dict[str, str]]:
166
- # Convert from responses format to completions format
166
+ def prepare_completion_messages(self, messages: RunnerMessages) -> list[dict]:
167
+ """Prepare messages for completions API (with conversion)."""
167
168
  converted_messages = self._convert_responses_to_completions_format(messages)
168
-
169
- # Prepare instructions with handoff-specific additions
170
169
  instructions = self.instructions
171
-
172
- # Add source instructions if this agent can handoff to others
173
170
  if self.handoffs:
174
171
  instructions = HANDOFFS_SOURCE_INSTRUCTIONS_TEMPLATE.render(extra_instructions=None) + "\n\n" + instructions
175
-
176
- # Add target instructions if this agent can be handed off to (has a parent)
177
172
  if self.parent:
178
173
  instructions = HANDOFFS_TARGET_INSTRUCTIONS_TEMPLATE.render(extra_instructions=None) + "\n\n" + instructions
179
-
180
- # Add wait_for_user instructions if completion condition is "call"
181
174
  if self.completion_condition == "call":
182
175
  instructions = WAIT_FOR_USER_INSTRUCTIONS_TEMPLATE.render(extra_instructions=None) + "\n\n" + instructions
183
-
184
176
  return [
185
- AgentSystemMessage(
186
- role="system",
187
- content=f"You are {self.name}. {instructions}",
188
- ).model_dump(),
177
+ system_message_to_llm_dict(
178
+ NewSystemMessage(
179
+ content=f"You are {self.name}. {instructions}",
180
+ ),
181
+ ),
189
182
  *converted_messages,
190
183
  ]
191
184
 
185
+ def prepare_responses_messages(self, messages: RunnerMessages) -> list[dict[str, Any]]:
186
+ """Prepare messages for responses API (no conversion, just add system message if needed)."""
187
+ instructions = self.instructions
188
+ if self.handoffs:
189
+ instructions = HANDOFFS_SOURCE_INSTRUCTIONS_TEMPLATE.render(extra_instructions=None) + "\n\n" + instructions
190
+ if self.parent:
191
+ instructions = HANDOFFS_TARGET_INSTRUCTIONS_TEMPLATE.render(extra_instructions=None) + "\n\n" + instructions
192
+ if self.completion_condition == "call":
193
+ instructions = WAIT_FOR_USER_INSTRUCTIONS_TEMPLATE.render(extra_instructions=None) + "\n\n" + instructions
194
+ res: list[dict[str, Any]] = [
195
+ {
196
+ "role": "system",
197
+ "content": f"You are {self.name}. {instructions}",
198
+ },
199
+ ]
200
+ for message in messages:
201
+ if isinstance(message, NewAssistantMessage):
202
+ for item in message.content:
203
+ match item.type:
204
+ case "text":
205
+ res.append(
206
+ {
207
+ "role": "assistant",
208
+ "content": item.text,
209
+ },
210
+ )
211
+ case "tool_call":
212
+ res.append(
213
+ {
214
+ "type": "function_call",
215
+ "call_id": item.call_id,
216
+ "name": item.name,
217
+ "arguments": item.arguments,
218
+ },
219
+ )
220
+ case "tool_call_result":
221
+ res.append(
222
+ {
223
+ "type": "function_call_output",
224
+ "call_id": item.call_id,
225
+ "output": item.output,
226
+ },
227
+ )
228
+ elif isinstance(message, NewSystemMessage):
229
+ res.append(
230
+ {
231
+ "role": "system",
232
+ "content": message.content,
233
+ },
234
+ )
235
+ elif isinstance(message, NewUserMessage):
236
+ contents = []
237
+ for item in message.content:
238
+ match item.type:
239
+ case "text":
240
+ contents.append(
241
+ {
242
+ "type": "input_text",
243
+ "text": item.text,
244
+ },
245
+ )
246
+ case "image":
247
+ contents.append(
248
+ {
249
+ "type": "input_image",
250
+ "image_url": item.image_url,
251
+ },
252
+ )
253
+ case "file":
254
+ contents.append(
255
+ {
256
+ "type": "input_file",
257
+ "file_id": item.file_id,
258
+ "file_name": item.file_name,
259
+ },
260
+ )
261
+ res.append(
262
+ {
263
+ "role": message.role,
264
+ "content": contents,
265
+ },
266
+ )
267
+ # Handle dict messages (legacy format)
268
+ elif isinstance(message, dict):
269
+ res.append(message)
270
+ return res
271
+
192
272
  async def completion(self, messages: RunnerMessages, record_to_file: Path | None = None) -> AsyncGenerator[AgentChunk, None]:
193
- # Apply message transfer callback if provided
273
+ # Apply message transfer callback if provided - always use legacy format for LLM compatibility
194
274
  processed_messages = messages
195
275
  if self.message_transfer:
196
276
  logger.debug(f"Applying message transfer callback for agent {self.name}")
197
277
  processed_messages = self.message_transfer(messages)
198
278
 
279
+ # For completions API, use prepare_completion_messages
199
280
  self.message_histories = self.prepare_completion_messages(processed_messages)
281
+
200
282
  tools = self.fc.get_tools(target="completion")
201
283
  resp = await self.client.completion(
202
284
  messages=self.message_histories,
@@ -206,10 +288,27 @@ class Agent:
206
288
 
207
289
  # Ensure resp is a CustomStreamWrapper
208
290
  if isinstance(resp, CustomStreamWrapper):
209
- return litellm_stream_handler(resp, record_to=record_to_file)
291
+ return litellm_completion_stream_handler(resp, record_to=record_to_file)
210
292
  msg = "Response is not a CustomStreamWrapper, cannot stream chunks."
211
293
  raise TypeError(msg)
212
294
 
295
+ async def responses(self, messages: RunnerMessages, record_to_file: Path | None = None) -> AsyncGenerator[AgentChunk, None]:
296
+ # Apply message transfer callback if provided - always use legacy format for LLM compatibility
297
+ processed_messages = messages
298
+ if self.message_transfer:
299
+ logger.debug(f"Applying message transfer callback for agent {self.name}")
300
+ processed_messages = self.message_transfer(messages)
301
+
302
+ # For responses API, use prepare_responses_messages (no conversion)
303
+ self.message_histories = self.prepare_responses_messages(processed_messages)
304
+ tools = self.fc.get_tools()
305
+ resp = await self.client.responses(
306
+ messages=self.message_histories,
307
+ tools=tools,
308
+ tool_choice="auto", # TODO: make this configurable
309
+ )
310
+ return litellm_response_stream_handler(resp, record_to=record_to_file)
311
+
213
312
  async def list_require_confirm_tools(self, tool_calls: Sequence[ToolCall] | None) -> Sequence[ToolCall]:
214
313
  if not tool_calls:
215
314
  return []
@@ -225,7 +324,7 @@ class Agent:
225
324
  results.append(tool_call)
226
325
  return results
227
326
 
228
- async def handle_tool_calls(self, tool_calls: Sequence[ToolCall] | None, context: Any | None = None) -> AsyncGenerator[ToolCallChunk | ToolCallResultChunk, None]: # noqa: ANN401
327
+ async def handle_tool_calls(self, tool_calls: Sequence[ToolCall] | None, context: Any | None = None) -> AsyncGenerator[FunctionCallEvent | FunctionCallOutputEvent, None]: # noqa: ANN401
229
328
  if not tool_calls:
230
329
  return
231
330
  if tool_calls:
@@ -236,26 +335,31 @@ class Agent:
236
335
  continue
237
336
 
238
337
  for tool_call in tool_calls:
338
+ yield FunctionCallEvent(
339
+ call_id=tool_call.id,
340
+ name=tool_call.function.name,
341
+ arguments=tool_call.function.arguments or "",
342
+ )
343
+ start_time = time.time()
239
344
  try:
240
- yield ToolCallChunk(
241
- type="tool_call",
242
- name=tool_call.function.name,
243
- arguments=tool_call.function.arguments or "",
244
- )
245
345
  content = await self.fc.call_function_async(tool_call.function.name, tool_call.function.arguments or "", context)
246
- yield ToolCallResultChunk(
247
- type="tool_call_result",
346
+ end_time = time.time()
347
+ execution_time_ms = int((end_time - start_time) * 1000)
348
+ yield FunctionCallOutputEvent(
248
349
  tool_call_id=tool_call.id,
249
350
  name=tool_call.function.name,
250
351
  content=str(content),
352
+ execution_time_ms=execution_time_ms,
251
353
  )
252
- except Exception as e: # noqa: PERF203
354
+ except Exception as e:
253
355
  logger.exception("Tool call %s failed", tool_call.id)
254
- yield ToolCallResultChunk(
255
- type="tool_call_result",
356
+ end_time = time.time()
357
+ execution_time_ms = int((end_time - start_time) * 1000)
358
+ yield FunctionCallOutputEvent(
256
359
  tool_call_id=tool_call.id,
257
360
  name=tool_call.function.name,
258
361
  content=str(e),
362
+ execution_time_ms=execution_time_ms,
259
363
  )
260
364
 
261
365
  def _convert_responses_to_completions_format(self, messages: RunnerMessages) -> list[dict]:
@@ -265,7 +369,7 @@ class Agent:
265
369
 
266
370
  while i < len(messages):
267
371
  message = messages[i]
268
- message_dict = message.model_dump() if isinstance(message, BaseModel) else message
372
+ message_dict = message_to_llm_dict(message) if isinstance(message, (NewUserMessage, NewSystemMessage, NewAssistantMessage)) else message
269
373
 
270
374
  message_type = message_dict.get("type")
271
375
  role = message_dict.get("role")
@@ -277,11 +381,11 @@ class Agent:
277
381
 
278
382
  while j < len(messages):
279
383
  next_message = messages[j]
280
- next_dict = next_message.model_dump() if isinstance(next_message, BaseModel) else next_message
384
+ next_dict = message_to_llm_dict(next_message) if isinstance(next_message, (NewUserMessage, NewSystemMessage, NewAssistantMessage)) else next_message
281
385
 
282
386
  if next_dict.get("type") == "function_call":
283
387
  tool_call = {
284
- "id": next_dict["function_call_id"], # type: ignore
388
+ "id": next_dict["call_id"], # type: ignore
285
389
  "type": "function",
286
390
  "function": {
287
391
  "name": next_dict["name"], # type: ignore
@@ -340,44 +444,52 @@ class Agent:
340
444
 
341
445
  converted_content = []
342
446
  for item in content:
343
- if isinstance(item, dict):
344
- item_type = item.get("type")
345
- if item_type == "input_text":
346
- # Convert ResponseInputText to completion API format
347
- converted_content.append(
348
- {
349
- "type": "text",
350
- "text": item["text"],
351
- },
352
- )
353
- elif item_type == "input_image":
354
- # Convert ResponseInputImage to completion API format
355
- if item.get("file_id"):
356
- msg = "File ID input is not supported for Completion API. Please use image_url instead of file_id for image input."
357
- raise ValueError(msg)
358
-
359
- if not item.get("image_url"):
360
- msg = "ResponseInputImage must have either file_id or image_url, but image_url is required for Completion API."
361
- raise ValueError(msg)
362
-
363
- # Build image_url object with detail inside
364
- image_data = {"url": item["image_url"]}
365
- detail = item.get("detail", "auto")
366
- if detail: # Include detail if provided
367
- image_data["detail"] = detail
368
-
369
- converted_content.append(
370
- {
371
- "type": "image_url",
372
- "image_url": image_data,
373
- },
374
- )
375
- else:
376
- # Keep existing format (text, image_url)
377
- converted_content.append(item)
447
+ # Convert Pydantic objects to dict first
448
+ if hasattr(item, "model_dump"):
449
+ item_dict = item.model_dump()
450
+ elif hasattr(item, "dict"): # For older Pydantic versions
451
+ item_dict = item.dict()
452
+ elif isinstance(item, dict):
453
+ item_dict = item
378
454
  else:
379
455
  # Handle non-dict items (shouldn't happen, but just in case)
380
456
  converted_content.append(item)
457
+ continue
458
+
459
+ item_type = item_dict.get("type")
460
+ if item_type in ["input_text", "text"]:
461
+ # Convert ResponseInputText or new text format to completion API format
462
+ converted_content.append(
463
+ {
464
+ "type": "text",
465
+ "text": item_dict["text"],
466
+ },
467
+ )
468
+ elif item_type in ["input_image", "image"]:
469
+ # Convert ResponseInputImage to completion API format
470
+ if item_dict.get("file_id"):
471
+ msg = "File ID input is not supported for Completion API"
472
+ raise ValueError(msg)
473
+
474
+ if not item_dict.get("image_url"):
475
+ msg = "ResponseInputImage must have either file_id or image_url"
476
+ raise ValueError(msg)
477
+
478
+ # Build image_url object with detail inside
479
+ image_data = {"url": item_dict["image_url"]}
480
+ detail = item_dict.get("detail", "auto")
481
+ if detail: # Include detail if provided
482
+ image_data["detail"] = detail
483
+
484
+ converted_content.append(
485
+ {
486
+ "type": "image_url",
487
+ "image_url": image_data,
488
+ },
489
+ )
490
+ else:
491
+ # Keep existing format (text, image_url)
492
+ converted_content.append(item_dict)
381
493
 
382
494
  return converted_content
383
495