lite-agent 0.1.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of lite-agent might be problematic. Click here for more details.

@@ -0,0 +1,10 @@
1
+ # Python-generated files
2
+ __pycache__/
3
+ *.py[oc]
4
+ build/
5
+ dist/
6
+ wheels/
7
+ *.egg-info
8
+
9
+ # Virtual environments
10
+ .venv
@@ -0,0 +1 @@
1
+ 3.11
@@ -0,0 +1,16 @@
1
+ {
2
+ // Use IntelliSense to learn about possible attributes.
3
+ // Hover to view descriptions of existing attributes.
4
+ // For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387
5
+ "version": "0.2.0",
6
+ "configurations": [
7
+ {
8
+ "name": "Python main",
9
+ "type": "debugpy",
10
+ "request": "launch",
11
+ "program": "${workspaceFolder}/src/easy_agent/__main__.py",
12
+ "console": "integratedTerminal",
13
+ "justMyCode": false
14
+ }
15
+ ]
16
+ }
@@ -0,0 +1,22 @@
1
+ Metadata-Version: 2.4
2
+ Name: lite-agent
3
+ Version: 0.1.0
4
+ Summary: A lightweight, extensible framework for building AI agent.
5
+ Author-email: Jianqi Pan <jannchie@gmail.com>
6
+ License: MIT
7
+ Keywords: AI,agent framework,assistant,chatbot,function call,openai,pydantic,rich,tooling
8
+ Classifier: Intended Audience :: Developers
9
+ Classifier: Intended Audience :: Science/Research
10
+ Classifier: License :: OSI Approved :: MIT License
11
+ Classifier: Operating System :: OS Independent
12
+ Classifier: Programming Language :: Python :: 3
13
+ Classifier: Programming Language :: Python :: 3.10
14
+ Classifier: Programming Language :: Python :: 3.11
15
+ Classifier: Programming Language :: Python :: 3.12
16
+ Classifier: Topic :: Communications :: Chat
17
+ Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
18
+ Classifier: Topic :: Software Development :: Libraries :: Python Modules
19
+ Requires-Python: >=3.10
20
+ Requires-Dist: funcall>=0.6.0
21
+ Requires-Dist: prompt-toolkit>=3.0.51
22
+ Requires-Dist: rich>=14.0.0
File without changes
@@ -0,0 +1,77 @@
1
+ [project]
2
+ name = "lite-agent"
3
+ version = "0.1.0"
4
+ description = "A lightweight, extensible framework for building AI agent."
5
+ readme = "README.md"
6
+ authors = [{ name = "Jianqi Pan", email = "jannchie@gmail.com" }]
7
+ requires-python = ">=3.10"
8
+ dependencies = ["funcall>=0.6.0", "prompt-toolkit>=3.0.51", "rich>=14.0.0"]
9
+ keywords = [
10
+ "function call",
11
+ "openai",
12
+ "pydantic",
13
+ "rich",
14
+ "AI",
15
+ "tooling",
16
+ "agent framework",
17
+ "assistant",
18
+ "chatbot",
19
+ ]
20
+ license = { text = "MIT" }
21
+ classifiers = [
22
+ "Programming Language :: Python :: 3",
23
+ "Programming Language :: Python :: 3.10",
24
+ "Programming Language :: Python :: 3.11",
25
+ "Programming Language :: Python :: 3.12",
26
+ "License :: OSI Approved :: MIT License",
27
+ "Operating System :: OS Independent",
28
+ "Intended Audience :: Developers",
29
+ "Intended Audience :: Science/Research",
30
+ "Topic :: Software Development :: Libraries :: Python Modules",
31
+ "Topic :: Scientific/Engineering :: Artificial Intelligence",
32
+ "Topic :: Communications :: Chat",
33
+ ]
34
+
35
+ [build-system]
36
+ requires = ["hatchling"]
37
+ build-backend = "hatchling.build"
38
+
39
+
40
+ [tool.ruff]
41
+ line-length = 200
42
+
43
+ [tool.ruff.lint]
44
+ select = ["ALL"]
45
+ ignore = [
46
+ "PGH",
47
+ "RUF002",
48
+ "RUF003",
49
+ "BLE001",
50
+ "ERA001",
51
+ "FIX002",
52
+ "TD002",
53
+ "TD003",
54
+ "D",
55
+ "PLR2004",
56
+ "INP001",
57
+ "N812",
58
+ "FBT003",
59
+ "S311",
60
+ "ANN201",
61
+ "RUF012",
62
+ "T201",
63
+ "PT009",
64
+ ]
65
+
66
+ [tool.ruff.lint.per-file-ignores]
67
+ "**/tests/**/*" = ["S101"]
68
+
69
+ [tool.uv]
70
+ upgrade = true
71
+ package = true
72
+
73
+ [dependency-groups]
74
+ dev = ["pytest>=8.3.5", "pytest-cov>=6.1.1", "ruff>=0.11.10"]
75
+
76
+ [tool.coverage.run]
77
+ omit = ["tests/*"]
File without changes
@@ -0,0 +1,110 @@
1
+ import asyncio
2
+ import logging
3
+
4
+ from prompt_toolkit import PromptSession
5
+ from prompt_toolkit.validation import Validator
6
+ from rich.console import Console
7
+ from rich.logging import RichHandler
8
+
9
+ from open_agents.agent import Agent
10
+ from open_agents.loggers import logger
11
+ from open_agents.runner import Runner
12
+ from open_agents.types import AgentChunk, ContentDeltaChunk
13
+
14
+ logging.basicConfig(level=logging.WARNING, handlers=[RichHandler()], format="%(message)s")
15
+ logger.setLevel(logging.INFO)
16
+
17
+
18
+ # --- Tool functions (English) ---
19
+ async def get_whether(city: str) -> str:
20
+ """Get the weather for a city."""
21
+ await asyncio.sleep(1) # 模擬網路延遲
22
+ return f"The weather in {city} is sunny with a few clouds."
23
+
24
+
25
+ async def get_temperature(city: str) -> str:
26
+ """Get the temperature for a city."""
27
+ await asyncio.sleep(1) # 模擬網路延遲
28
+ return f"The temperature in {city} is 25°C."
29
+
30
+
31
+ class RichChannel:
32
+ def __init__(self) -> None:
33
+ self.console = Console()
34
+ self.map = {
35
+ "final_message": self.handle_final_message,
36
+ "tool_call": self.handle_tool_call,
37
+ "tool_call_result": self.handle_tool_call_result,
38
+ "tool_call_delta": self.handle_tool_call_delta,
39
+ "content_delta": self.handle_content_delta,
40
+ "usage": self.handle_usage,
41
+ }
42
+ self.new_turn = True
43
+
44
+ def handle(self, chunk: AgentChunk):
45
+ handler = self.map[chunk["type"]]
46
+ handler(chunk)
47
+
48
+ def handle_final_message(self, _chunk: AgentChunk):
49
+ print()
50
+ self.new_turn = True
51
+
52
+ def handle_tool_call(self, chunk: AgentChunk):
53
+ name = chunk.get("name", "<unknown>")
54
+ arguments = chunk.get("arguments", "")
55
+ self.console.print(f"🛠️ [green]{name}[/green]([yellow]{arguments}[/yellow])")
56
+
57
+ def handle_tool_call_result(self, chunk: AgentChunk):
58
+ name = chunk.get("name", "<unknown>")
59
+ content = chunk.get("content", "")
60
+ self.console.print(f"🛠️ [green]{name}[/green] → [yellow]{content}[/yellow]")
61
+
62
+ def handle_tool_call_delta(self, chunk: AgentChunk): ...
63
+ def handle_content_delta(self, chunk: ContentDeltaChunk):
64
+ if self.new_turn:
65
+ self.console.print("🤖 ", end="")
66
+ self.new_turn = False
67
+ print(chunk["delta"], end="", flush=True)
68
+
69
+ def handle_usage(self, chunk: AgentChunk):
70
+ if False:
71
+ usage = chunk["usage"]
72
+ self.console.print(f"In: {usage.prompt_tokens}, Out: {usage.completion_tokens}, Total: {usage.total_tokens}")
73
+
74
+
75
+ async def main():
76
+ agent = Agent(
77
+ model="gpt-4.1",
78
+ name="Weather Assistant",
79
+ instructions="You are a helpful weather assistant. Before using tools, briefly explain what you are going to do. Provide friendly and informative responses.",
80
+ tools=[get_whether, get_temperature],
81
+ )
82
+ session = PromptSession()
83
+ rich_channel = RichChannel()
84
+ runner = Runner(agent)
85
+ not_empty_validator = Validator.from_callable(
86
+ lambda text: bool(text.strip()),
87
+ error_message="Input cannot be empty.",
88
+ move_cursor_to_end=True,
89
+ )
90
+ while True:
91
+ try:
92
+ user_input = await session.prompt_async(
93
+ "👤 ",
94
+ default="",
95
+ complete_while_typing=True,
96
+ validator=not_empty_validator,
97
+ validate_while_typing=False,
98
+ )
99
+ if user_input.lower() in {"exit", "quit"}:
100
+ break
101
+ response = runner.run_stream(user_input)
102
+ async for chunk in response:
103
+ rich_channel.handle(chunk)
104
+
105
+ except (EOFError, KeyboardInterrupt):
106
+ break
107
+
108
+
109
+ if __name__ == "__main__":
110
+ asyncio.run(main())
@@ -0,0 +1,36 @@
1
+ from collections.abc import AsyncGenerator, Callable
2
+
3
+ import litellm
4
+ from funcall import Funcall
5
+
6
+ from open_agents.chunk_handler import AgentChunk, chunk_handler
7
+ from open_agents.types import RunnerMessages
8
+
9
+
10
+ class Agent:
11
+ def __init__(self, *, model: str, name: str, instructions: str, tools: list[Callable] | None = None) -> None:
12
+ self.name = name
13
+ self.instructions = instructions
14
+ self.fc = Funcall(tools)
15
+ self.model = model
16
+
17
+ def prepare_messages(self, messages: RunnerMessages) -> list[dict]:
18
+ return [
19
+ {
20
+ "role": "system",
21
+ "content": f"You are {self.name}. {self.instructions}",
22
+ },
23
+ *messages,
24
+ ]
25
+
26
+ async def stream_async(self, messages: RunnerMessages) -> AsyncGenerator[AgentChunk, None]:
27
+ self.message_histories = self.prepare_messages(messages)
28
+ tools = self.fc.get_tools(target="litellm")
29
+ resp = await litellm.acompletion(
30
+ model=self.model,
31
+ messages=self.message_histories,
32
+ tools=tools,
33
+ tool_choice="auto",
34
+ stream=True,
35
+ )
36
+ return chunk_handler(resp, self.fc)
@@ -0,0 +1,166 @@
1
+ from collections.abc import AsyncGenerator
2
+ from typing import Literal, TypedDict
3
+
4
+ import litellm
5
+ from funcall import Funcall
6
+
7
+ from open_agents.loggers import logger
8
+ from open_agents.processors import StreamChunkProcessor
9
+ from open_agents.processors.stream_chunk_processor import AssistantMessage
10
+
11
+
12
+ class LiteLLMRawChunk(TypedDict):
13
+ """
14
+ Define the type of chunk
15
+ """
16
+
17
+ type: Literal["litellm_raw"]
18
+ raw: litellm.ModelResponseStream
19
+
20
+
21
+ class UsageChunk(TypedDict):
22
+ """
23
+ Define the type of usage info chunk
24
+ """
25
+
26
+ type: Literal["usage"]
27
+ usage: litellm.Usage
28
+
29
+
30
+ class FinalMessageChunk(TypedDict):
31
+ """
32
+ Define the type of final message chunk
33
+ """
34
+
35
+ type: Literal["final_message"]
36
+ message: AssistantMessage
37
+ finish_reason: Literal["stop", "tool_calls"]
38
+
39
+
40
+ class ToolCallChunk(TypedDict):
41
+ """
42
+ Define the type of tool call chunk
43
+ """
44
+
45
+ type: Literal["tool_call"]
46
+ name: str
47
+ arguments: str
48
+
49
+
50
+ class ToolCallResultChunk(TypedDict):
51
+ """
52
+ Define the type of tool call result chunk
53
+ """
54
+
55
+ type: Literal["tool_call_result"]
56
+ tool_call_id: str
57
+ name: str
58
+ content: str
59
+
60
+
61
+ class ContentDeltaChunk(TypedDict):
62
+ """
63
+ Define the type of message chunk
64
+ """
65
+
66
+ type: Literal["content_delta"]
67
+ delta: str
68
+
69
+
70
+ class ToolCallDeltaChunk(TypedDict):
71
+ """
72
+ Define the type of tool call delta chunk
73
+ """
74
+
75
+ type: Literal["tool_call_delta"]
76
+ tool_call_id: str
77
+ name: str
78
+ arguments_delta: str
79
+
80
+
81
+ AgentChunk = LiteLLMRawChunk | UsageChunk | FinalMessageChunk | ToolCallChunk | ToolCallResultChunk | ContentDeltaChunk
82
+
83
+
84
+ async def chunk_handler(
85
+ resp: litellm.CustomStreamWrapper,
86
+ fc: Funcall,
87
+ ) -> AsyncGenerator[AgentChunk, None]:
88
+ """
89
+ Optimized chunk handler
90
+
91
+ Args:
92
+ resp: LiteLLM streaming response wrapper
93
+ fc: function call handler
94
+
95
+ Yields:
96
+ litellm.ModelResponseStream: processed response chunk
97
+
98
+ Raises:
99
+ Exception: various exceptions during processing
100
+ """
101
+ processor = StreamChunkProcessor(fc)
102
+ async for chunk in resp:
103
+ if not isinstance(chunk, litellm.ModelResponseStream):
104
+ logger.debug("unexpected chunk type: %s", type(chunk))
105
+ logger.debug("chunk content: %s", chunk)
106
+ continue
107
+
108
+ # Handle usage info
109
+ if usage := processor.handle_usage_info(chunk):
110
+ yield UsageChunk(type="usage", usage=usage)
111
+ continue
112
+
113
+ # Get choice and delta data
114
+ if not chunk.choices:
115
+ yield LiteLLMRawChunk(type="litellm_raw", raw=chunk)
116
+ continue
117
+
118
+ choice = chunk.choices[0]
119
+ delta = choice.delta
120
+ if not processor.current_message:
121
+ processor.initialize_message(chunk, choice)
122
+ if delta.content:
123
+ yield ContentDeltaChunk(type="content_delta", delta=delta.content)
124
+ processor.update_content(delta.content)
125
+ processor.update_tool_calls(delta.tool_calls)
126
+ if delta.tool_calls:
127
+ for tool_call in delta.tool_calls:
128
+ if tool_call.function.arguments:
129
+ yield ToolCallDeltaChunk(
130
+ type="tool_call_delta",
131
+ tool_call_id=processor.current_message.tool_calls[-1].id,
132
+ name=processor.current_message.tool_calls[-1].function.name,
133
+ arguments_delta=tool_call.function.arguments,
134
+ )
135
+ # Check if finished
136
+ if choice.finish_reason and processor.current_message:
137
+ current_message = processor.finalize_message()
138
+ yield FinalMessageChunk(type="final_message", message=current_message, finish_reason=choice.finish_reason)
139
+ # New: check tool_calls and handle
140
+ tool_calls = current_message.tool_calls
141
+ if tool_calls:
142
+ # Execute each tool_call and yield result
143
+ for tool_call in tool_calls:
144
+ try:
145
+ yield ToolCallChunk(
146
+ type="tool_call",
147
+ name=tool_call.function.name,
148
+ arguments=tool_call.function.arguments,
149
+ )
150
+ content = await fc.call_function_async(tool_call.function.name, tool_call.function.arguments)
151
+ yield ToolCallResultChunk(
152
+ type="tool_call_result",
153
+ tool_call_id=tool_call.id,
154
+ name=tool_call.function.name,
155
+ content=str(content),
156
+ )
157
+ except Exception as e: # noqa: PERF203
158
+ logger.exception("Tool call %s failed", tool_call.id)
159
+ yield ToolCallResultChunk(
160
+ type="tool_call_result",
161
+ tool_call_id=tool_call.id,
162
+ name=tool_call.function.name,
163
+ content=str(e),
164
+ )
165
+ continue
166
+ yield LiteLLMRawChunk(type="litellm_raw", raw=chunk)
@@ -0,0 +1,3 @@
1
+ import logging
2
+
3
+ logger = logging.getLogger("easy_agent")
@@ -0,0 +1,3 @@
1
+ from open_agents.processors.stream_chunk_processor import StreamChunkProcessor
2
+
3
+ __all__ = ["StreamChunkProcessor"]
@@ -0,0 +1,85 @@
1
+ import litellm
2
+ from funcall import Funcall
3
+ from litellm.types.utils import ChatCompletionDeltaToolCall, StreamingChoices
4
+
5
+ from open_agents.loggers import logger
6
+ from open_agents.types import AssistantMessage, ToolCall, ToolCallFunction
7
+
8
+
9
+ class StreamChunkProcessor:
10
+ """Processor for handling streaming responses"""
11
+
12
+ def __init__(self, fc: Funcall) -> None:
13
+ self.fc = fc
14
+ self.current_message: AssistantMessage = None
15
+
16
+ def initialize_message(self, chunk: litellm.ModelResponseStream, choice: StreamingChoices) -> None:
17
+ """Initialize the message object"""
18
+ delta = choice.delta
19
+ self.current_message = AssistantMessage(
20
+ id=chunk.id,
21
+ index=choice.index,
22
+ role=delta.role,
23
+ content="",
24
+ )
25
+ logger.debug("Initialized new message: %s", self.current_message.id)
26
+
27
+ def update_content(self, content: str) -> None:
28
+ """Update message content"""
29
+ if self.current_message and content:
30
+ self.current_message.content += content
31
+
32
+ def _initialize_tool_calls(self, tool_calls: list[litellm.ChatCompletionMessageToolCall]) -> None:
33
+ """Initialize tool calls"""
34
+ if not self.current_message:
35
+ return
36
+
37
+ self.current_message.tool_calls = []
38
+ for call in tool_calls:
39
+ logger.debug("Create new tool call: %s", call.id)
40
+
41
+ def _update_tool_calls(self, tool_calls: list[litellm.ChatCompletionMessageToolCall]) -> None:
42
+ """Update existing tool calls"""
43
+ if not self.current_message or not self.current_message.tool_calls:
44
+ return
45
+
46
+ for current_call, new_call in zip(self.current_message.tool_calls, tool_calls, strict=False):
47
+ if new_call.function.arguments:
48
+ current_call.function.arguments += new_call.function.arguments
49
+ if new_call.type:
50
+ current_call.type = new_call.type
51
+
52
+ def update_tool_calls(self, tool_calls: list[ChatCompletionDeltaToolCall]) -> None:
53
+ """Handle tool call updates"""
54
+ if not tool_calls:
55
+ return
56
+ for call in tool_calls:
57
+ if call.id:
58
+ new_tool_call = ToolCall(
59
+ id=call.id,
60
+ type=call.type,
61
+ function=ToolCallFunction(
62
+ name=call.function.name or "",
63
+ arguments=call.function.arguments,
64
+ ),
65
+ index=call.index,
66
+ )
67
+ if self.current_message.tool_calls is None:
68
+ self.current_message.tool_calls = []
69
+ self.current_message.tool_calls.append(new_tool_call)
70
+ else:
71
+ existing_call = self.current_message.tool_calls[call.index]
72
+ if call.function.arguments:
73
+ existing_call.function.arguments += call.function.arguments
74
+
75
+ def handle_usage_info(self, chunk: litellm.ModelResponseStream) -> litellm.Usage | None:
76
+ """Handle usage info, return whether this chunk should be skipped"""
77
+ usage = getattr(chunk, "usage", None)
78
+ if usage:
79
+ logger.debug("Model usage: %s", usage)
80
+ return usage
81
+
82
+ def finalize_message(self) -> AssistantMessage:
83
+ """Finalize message processing"""
84
+ logger.debug("Message finalized: %s", self.current_message)
85
+ return self.current_message
File without changes
@@ -0,0 +1,51 @@
1
+ from collections.abc import AsyncGenerator
2
+ from typing import Literal
3
+
4
+ from open_agents.agent import Agent
5
+ from open_agents.chunk_handler import AgentChunk
6
+ from open_agents.types import AgentToolCallMessage, RunnerMessages
7
+
8
+
9
+ class Runner:
10
+ def __init__(self, agent: Agent) -> None:
11
+ self.agent = agent
12
+ self.messages: RunnerMessages = []
13
+
14
+ def run_stream(
15
+ self,
16
+ user_input: RunnerMessages | str,
17
+ max_steps: int = 20,
18
+ includes: list[Literal["usage", "final_message", "tool_call", "tool_call_result"]] | None = None,
19
+ ) -> AsyncGenerator[AgentChunk, None]:
20
+ """Run the agent and return a RunResponse object that can be asynchronously iterated for each chunk."""
21
+ if includes is None:
22
+ includes = ["final_message", "usage", "tool_call", "tool_call_result", "tool_call_delta", "content_delta"]
23
+ if isinstance(user_input, str):
24
+ self.messages.append({"role": "user", "content": user_input})
25
+ else:
26
+ self.messages = user_input
27
+
28
+ return self._run_aiter(max_steps, includes)
29
+
30
+ async def _run_aiter(self, max_steps: int, includes: list[Literal["usage", "final_message", "tool_call", "tool_call_result"]]) -> AsyncGenerator[AgentChunk, None]:
31
+ """Run the agent and return a RunResponse object that can be asynchronously iterated for each chunk."""
32
+ steps = 0
33
+ finish_reason = None
34
+ while finish_reason != "stop" and steps < max_steps:
35
+ resp = await self.agent.stream_async(self.messages)
36
+ async for chunk in resp:
37
+ if chunk["type"] == "final_message":
38
+ message = chunk["message"]
39
+ self.messages.append(message.model_dump())
40
+ finish_reason = chunk["finish_reason"]
41
+ elif chunk["type"] == "tool_call_result":
42
+ self.messages.append(
43
+ AgentToolCallMessage(
44
+ role="tool",
45
+ tool_call_id=chunk["tool_call_id"],
46
+ content=chunk["content"],
47
+ ),
48
+ )
49
+ if chunk["type"] in includes:
50
+ yield chunk
51
+ steps += 1