lite-agent 0.2.0__py3-none-any.whl → 0.4.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of lite-agent might be problematic. Click here for more details.

@@ -1,5 +1,6 @@
1
- from lite_agent.stream_handlers.litellm import litellm_stream_handler
1
+ from lite_agent.stream_handlers.litellm import litellm_completion_stream_handler, litellm_response_stream_handler
2
2
 
3
3
  __all__ = [
4
- "litellm_stream_handler",
4
+ "litellm_completion_stream_handler",
5
+ "litellm_response_stream_handler",
5
6
  ]
@@ -1,14 +1,19 @@
1
1
  from collections.abc import AsyncGenerator
2
2
  from pathlib import Path
3
+ from typing import TYPE_CHECKING
3
4
 
4
5
  import aiofiles
5
6
  import litellm
6
- from aiofiles.threadpool.text import AsyncTextIOWrapper
7
- from litellm.types.utils import Delta, ModelResponseStream, StreamingChoices
7
+ from litellm.types.llms.openai import ResponsesAPIStreamingResponse
8
+ from litellm.types.utils import ModelResponseStream
9
+ from pydantic import BaseModel
8
10
 
9
11
  from lite_agent.loggers import logger
10
- from lite_agent.processors import StreamChunkProcessor
11
- from lite_agent.types import AgentChunk, CompletionRawChunk, ContentDeltaChunk, FinalMessageChunk, ToolCallDeltaChunk, UsageChunk
12
+ from lite_agent.processors import CompletionEventProcessor, ResponseEventProcessor
13
+ from lite_agent.types import AgentChunk
14
+
15
+ if TYPE_CHECKING:
16
+ from aiofiles.threadpool.text import AsyncTextIOWrapper
12
17
 
13
18
 
14
19
  def ensure_record_file(record_to: Path | None) -> Path | None:
@@ -20,75 +25,14 @@ def ensure_record_file(record_to: Path | None) -> Path | None:
20
25
  return record_to
21
26
 
22
27
 
23
- async def process_chunk(
24
- processor: StreamChunkProcessor,
25
- chunk: ModelResponseStream,
26
- record_file: AsyncTextIOWrapper | None = None,
27
- ) -> AsyncGenerator[AgentChunk, None]:
28
- if record_file:
29
- await record_file.write(chunk.model_dump_json() + "\n")
30
- await record_file.flush()
31
- yield CompletionRawChunk(type="completion_raw", raw=chunk)
32
- usage_chunk = await handle_usage_chunk(processor, chunk)
33
- if usage_chunk:
34
- yield usage_chunk
35
- return
36
- if not chunk.choices:
37
- return
38
- choice = chunk.choices[0]
39
- delta = choice.delta
40
- for result in await handle_content_and_tool_calls(processor, chunk, choice, delta):
41
- yield result
42
- if choice.finish_reason:
43
- current_message = processor.current_message
44
- yield FinalMessageChunk(type="final_message", message=current_message, finish_reason=choice.finish_reason)
45
-
46
-
47
- async def handle_usage_chunk(processor: StreamChunkProcessor, chunk: ModelResponseStream) -> UsageChunk | None:
48
- usage = processor.handle_usage_info(chunk)
49
- if usage:
50
- return UsageChunk(type="usage", usage=usage)
51
- return None
52
-
53
-
54
- async def handle_content_and_tool_calls(
55
- processor: StreamChunkProcessor,
56
- chunk: ModelResponseStream,
57
- choice: StreamingChoices,
58
- delta: Delta,
59
- ) -> list[AgentChunk]:
60
- results: list[AgentChunk] = []
61
- if not processor.is_initialized:
62
- processor.initialize_message(chunk, choice)
63
- if delta.content:
64
- results.append(ContentDeltaChunk(type="content_delta", delta=delta.content))
65
- processor.update_content(delta.content)
66
- if delta.tool_calls is not None:
67
- processor.update_tool_calls(delta.tool_calls)
68
- if delta.tool_calls and processor.current_message.tool_calls:
69
- results.extend(
70
- [
71
- ToolCallDeltaChunk(
72
- type="tool_call_delta",
73
- tool_call_id=processor.current_message.tool_calls[-1].id,
74
- name=processor.current_message.tool_calls[-1].function.name,
75
- arguments_delta=tool_call.function.arguments or "",
76
- )
77
- for tool_call in delta.tool_calls
78
- if tool_call.function.arguments
79
- ],
80
- )
81
- return results
82
-
83
-
84
- async def litellm_stream_handler(
28
+ async def litellm_completion_stream_handler(
85
29
  resp: litellm.CustomStreamWrapper,
86
30
  record_to: Path | None = None,
87
31
  ) -> AsyncGenerator[AgentChunk, None]:
88
32
  """
89
33
  Optimized chunk handler
90
34
  """
91
- processor = StreamChunkProcessor()
35
+ processor = CompletionEventProcessor()
92
36
  record_file: AsyncTextIOWrapper | None = None
93
37
  record_path = ensure_record_file(record_to)
94
38
  if record_path:
@@ -99,7 +43,32 @@ async def litellm_stream_handler(
99
43
  logger.warning("unexpected chunk type: %s", type(chunk))
100
44
  logger.warning("chunk content: %s", chunk)
101
45
  continue
102
- async for result in process_chunk(processor, chunk, record_file):
46
+ async for result in processor.process_chunk(chunk, record_file):
47
+ yield result
48
+ finally:
49
+ if record_file:
50
+ await record_file.close()
51
+
52
+
53
+ async def litellm_response_stream_handler(
54
+ resp: AsyncGenerator[ResponsesAPIStreamingResponse, None],
55
+ record_to: Path | None = None,
56
+ ) -> AsyncGenerator[AgentChunk, None]:
57
+ """
58
+ Response API stream handler for processing ResponsesAPIStreamingResponse chunks
59
+ """
60
+ processor = ResponseEventProcessor()
61
+ record_file: AsyncTextIOWrapper | None = None
62
+ record_path = ensure_record_file(record_to)
63
+ if record_path:
64
+ record_file = await aiofiles.open(record_path, "w", encoding="utf-8")
65
+ try:
66
+ async for chunk in resp:
67
+ if not isinstance(chunk, BaseModel):
68
+ logger.warning("unexpected chunk type: %s", type(chunk))
69
+ logger.warning("chunk content: %s", chunk)
70
+ continue
71
+ async for result in processor.process_chunk(chunk, record_file):
103
72
  yield result
104
73
  finally:
105
74
  if record_file:
@@ -0,0 +1,10 @@
1
+ <HandoffsGuide>
2
+ You are a parent agent that can assign tasks to sub-agents.
3
+
4
+ You can transfer conversations to other agents for specific tasks.
5
+ If you need to assign tasks to multiple agents, you should break down the tasks and assign them one by one.
6
+ You need to wait for one sub-agent to finish before assigning the task to the next sub-agent.
7
+ {% if extra_instructions %}
8
+ {{ extra_instructions }}
9
+ {% endif %}
10
+ </HandoffsGuide>
@@ -0,0 +1,9 @@
1
+ <TransferToParentGuide>
2
+ You are a sub-agent that is assigned to a specific task by your parent agent.
3
+
4
+ Everything you output is intended for your parent agent to read.
5
+ When you finish your task, you should call `transfer_to_parent` to transfer back to parent agent.
6
+ {% if extra_instructions %}
7
+ {{ extra_instructions }}
8
+ {% endif %}
9
+ </TransferToParentGuide>
@@ -0,0 +1,6 @@
1
+ <WaitForUserGuide>
2
+ When you have completed your assigned task or need more information from the user, you must call the `wait_for_user` function.
3
+ {% if extra_instructions %}
4
+ {{ extra_instructions }}
5
+ {% endif %}
6
+ </WaitForUserGuide>
@@ -1,29 +1,70 @@
1
1
  # Export all types from submodules
2
- from .chunks import (
2
+ from .events import (
3
3
  AgentChunk,
4
4
  AgentChunkType,
5
- CompletionRawChunk,
6
- ContentDeltaChunk,
7
- FinalMessageChunk,
8
- ToolCallChunk,
9
- ToolCallDeltaChunk,
10
- ToolCallResultChunk,
11
- UsageChunk,
5
+ AssistantMessageEvent,
6
+ CompletionRawEvent,
7
+ ContentDeltaEvent,
8
+ FunctionCallDeltaEvent,
9
+ FunctionCallEvent,
10
+ FunctionCallOutputEvent,
11
+ ResponseRawEvent,
12
+ Timing,
13
+ TimingEvent,
14
+ UsageEvent,
15
+ )
16
+ from .events import (
17
+ Usage as EventUsage,
12
18
  )
13
19
  from .messages import (
20
+ # Legacy compatibility aliases (temporary)
14
21
  AgentAssistantMessage,
15
- AgentFunctionCallOutput,
16
- AgentFunctionToolCallMessage,
17
- AgentMessage,
18
22
  AgentSystemMessage,
19
23
  AgentUserMessage,
20
24
  AssistantMessage,
21
- Message,
25
+ # New types
26
+ AssistantMessageContent,
27
+ AssistantMessageDict,
28
+ AssistantMessageMeta,
29
+ AssistantTextContent,
30
+ AssistantToolCall,
31
+ AssistantToolCallResult,
32
+ BasicMessageMeta,
33
+ FlexibleRunnerMessage,
34
+ FunctionCallDict,
35
+ FunctionCallOutputDict,
36
+ LLMResponseMeta,
37
+ MessageDict,
38
+ # New metadata types
39
+ MessageMeta,
40
+ MessageUsage,
41
+ NewAssistantMessage,
42
+ NewMessage,
43
+ NewMessages,
44
+ NewSystemMessage,
45
+ # New structured message types
46
+ NewUserMessage,
47
+ ResponseInputImage,
48
+ ResponseInputText,
22
49
  RunnerMessage,
23
50
  RunnerMessages,
51
+ SystemMessageDict,
52
+ UserFileContent,
53
+ UserImageContent,
54
+ UserInput,
55
+ # New content types
56
+ UserMessageContent,
24
57
  UserMessageContentItemImageURL,
25
58
  UserMessageContentItemImageURLImageURL,
26
59
  UserMessageContentItemText,
60
+ UserMessageDict,
61
+ UserTextContent,
62
+ # Utility functions
63
+ assistant_message_to_llm_dict,
64
+ message_to_llm_dict,
65
+ messages_to_llm_format,
66
+ system_message_to_llm_dict,
67
+ user_message_to_llm_dict,
27
68
  )
28
69
  from .tool_calls import ToolCall, ToolCallFunction
29
70
 
@@ -31,25 +72,58 @@ __all__ = [
31
72
  "AgentAssistantMessage",
32
73
  "AgentChunk",
33
74
  "AgentChunkType",
34
- "AgentFunctionCallOutput",
35
- "AgentFunctionToolCallMessage",
36
- "AgentMessage",
37
75
  "AgentSystemMessage",
38
76
  "AgentUserMessage",
39
77
  "AssistantMessage",
40
- "CompletionRawChunk",
41
- "ContentDeltaChunk",
42
- "FinalMessageChunk",
43
- "Message",
78
+ "AssistantMessageContent",
79
+ "AssistantMessageDict",
80
+ "AssistantMessageEvent",
81
+ "AssistantMessageMeta",
82
+ "AssistantTextContent",
83
+ "AssistantToolCall",
84
+ "AssistantToolCallResult",
85
+ "BasicMessageMeta",
86
+ "CompletionRawEvent",
87
+ "ContentDeltaEvent",
88
+ "EventUsage",
89
+ "FlexibleRunnerMessage",
90
+ "FunctionCallDeltaEvent",
91
+ "FunctionCallDict",
92
+ "FunctionCallEvent",
93
+ "FunctionCallOutputDict",
94
+ "FunctionCallOutputEvent",
95
+ "LLMResponseMeta",
96
+ "MessageDict",
97
+ "MessageMeta",
98
+ "MessageUsage",
99
+ "NewAssistantMessage",
100
+ "NewMessage",
101
+ "NewMessages",
102
+ "NewSystemMessage",
103
+ "NewUserMessage",
104
+ "ResponseInputImage",
105
+ "ResponseInputText",
106
+ "ResponseRawEvent",
44
107
  "RunnerMessage",
45
108
  "RunnerMessages",
109
+ "SystemMessageDict",
110
+ "Timing",
111
+ "TimingEvent",
46
112
  "ToolCall",
47
- "ToolCallChunk",
48
- "ToolCallDeltaChunk",
49
113
  "ToolCallFunction",
50
- "ToolCallResultChunk",
51
- "UsageChunk",
114
+ "UsageEvent",
115
+ "UserFileContent",
116
+ "UserImageContent",
117
+ "UserInput",
118
+ "UserMessageContent",
52
119
  "UserMessageContentItemImageURL",
53
120
  "UserMessageContentItemImageURLImageURL",
54
121
  "UserMessageContentItemText",
122
+ "UserMessageDict",
123
+ "UserTextContent",
124
+ "assistant_message_to_llm_dict",
125
+ "message_to_llm_dict",
126
+ "messages_to_llm_format",
127
+ "system_message_to_llm_dict",
128
+ "user_message_to_llm_dict",
55
129
  ]
@@ -0,0 +1,119 @@
1
+ from typing import Literal
2
+
3
+ from litellm.types.utils import ModelResponseStream
4
+ from pydantic import BaseModel
5
+
6
+ from .messages import NewAssistantMessage
7
+
8
+
9
+ class Usage(BaseModel):
10
+ input_tokens: int
11
+ output_tokens: int
12
+
13
+
14
+ class Timing(BaseModel):
15
+ latency_ms: int
16
+ output_time_ms: int
17
+
18
+
19
+ class CompletionRawEvent(BaseModel):
20
+ """
21
+ Define the type of chunk
22
+ """
23
+
24
+ type: Literal["completion_raw"] = "completion_raw"
25
+ raw: ModelResponseStream
26
+
27
+
28
+ class ResponseRawEvent(BaseModel):
29
+ """
30
+ Define the type of response raw chunk
31
+ """
32
+
33
+ type: Literal["response_raw"] = "response_raw"
34
+ raw: object
35
+
36
+
37
+ class UsageEvent(BaseModel):
38
+ """
39
+ Define the type of usage info chunk
40
+ """
41
+
42
+ type: Literal["usage"] = "usage"
43
+ usage: Usage
44
+
45
+
46
+ class TimingEvent(BaseModel):
47
+ """
48
+ Define the type of timing info chunk
49
+ """
50
+
51
+ type: Literal["timing"] = "timing"
52
+ timing: Timing
53
+
54
+
55
+ class AssistantMessageEvent(BaseModel):
56
+ """
57
+ Define the type of assistant message chunk
58
+ """
59
+
60
+ type: Literal["assistant_message"] = "assistant_message"
61
+ message: NewAssistantMessage
62
+
63
+
64
+ class FunctionCallEvent(BaseModel):
65
+ """
66
+ Define the type of tool call chunk
67
+ """
68
+
69
+ type: Literal["function_call"] = "function_call"
70
+ call_id: str
71
+ name: str
72
+ arguments: str
73
+
74
+
75
+ class FunctionCallOutputEvent(BaseModel):
76
+ """
77
+ Define the type of tool call result chunk
78
+ """
79
+
80
+ type: Literal["function_call_output"] = "function_call_output"
81
+ tool_call_id: str
82
+ name: str
83
+ content: str
84
+ execution_time_ms: int | None = None
85
+
86
+
87
+ class ContentDeltaEvent(BaseModel):
88
+ """
89
+ Define the type of message chunk
90
+ """
91
+
92
+ type: Literal["content_delta"] = "content_delta"
93
+ delta: str
94
+
95
+
96
+ class FunctionCallDeltaEvent(BaseModel):
97
+ """
98
+ Define the type of tool call delta chunk
99
+ """
100
+
101
+ type: Literal["function_call_delta"] = "function_call_delta"
102
+ tool_call_id: str
103
+ name: str
104
+ arguments_delta: str
105
+
106
+
107
+ AgentChunk = CompletionRawEvent | ResponseRawEvent | UsageEvent | TimingEvent | FunctionCallEvent | FunctionCallOutputEvent | ContentDeltaEvent | FunctionCallDeltaEvent | AssistantMessageEvent
108
+
109
+ AgentChunkType = Literal[
110
+ "completion_raw",
111
+ "response_raw",
112
+ "usage",
113
+ "timing",
114
+ "function_call",
115
+ "function_call_output",
116
+ "content_delta",
117
+ "function_call_delta",
118
+ "assistant_message",
119
+ ]