lite-agent 0.1.0__py3-none-any.whl → 0.2.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of lite-agent might be problematic. Click here for more details.

@@ -0,0 +1,89 @@
1
+ from typing import Literal
2
+
3
+ from litellm import Usage
4
+ from litellm.types.utils import ModelResponseStream
5
+ from pydantic import BaseModel
6
+
7
+ from .messages import AssistantMessage
8
+
9
+
10
+ class CompletionRawChunk(BaseModel):
11
+ """
12
+ Define the type of chunk
13
+ """
14
+
15
+ type: Literal["completion_raw"]
16
+ raw: ModelResponseStream
17
+
18
+
19
+ class UsageChunk(BaseModel):
20
+ """
21
+ Define the type of usage info chunk
22
+ """
23
+
24
+ type: Literal["usage"]
25
+ usage: Usage
26
+
27
+
28
+ class FinalMessageChunk(BaseModel):
29
+ """
30
+ Define the type of final message chunk
31
+ """
32
+
33
+ type: Literal["final_message"]
34
+ message: AssistantMessage
35
+ finish_reason: str | None = None # Literal["stop", "tool_calls"]
36
+
37
+
38
+ class ToolCallChunk(BaseModel):
39
+ """
40
+ Define the type of tool call chunk
41
+ """
42
+
43
+ type: Literal["tool_call"]
44
+ name: str
45
+ arguments: str
46
+
47
+
48
+ class ToolCallResultChunk(BaseModel):
49
+ """
50
+ Define the type of tool call result chunk
51
+ """
52
+
53
+ type: Literal["tool_call_result"]
54
+ tool_call_id: str
55
+ name: str
56
+ content: str
57
+
58
+
59
+ class ContentDeltaChunk(BaseModel):
60
+ """
61
+ Define the type of message chunk
62
+ """
63
+
64
+ type: Literal["content_delta"]
65
+ delta: str
66
+
67
+
68
+ class ToolCallDeltaChunk(BaseModel):
69
+ """
70
+ Define the type of tool call delta chunk
71
+ """
72
+
73
+ type: Literal["tool_call_delta"]
74
+ tool_call_id: str
75
+ name: str
76
+ arguments_delta: str
77
+
78
+
79
+ AgentChunk = CompletionRawChunk | UsageChunk | FinalMessageChunk | ToolCallChunk | ToolCallResultChunk | ContentDeltaChunk | ToolCallDeltaChunk
80
+
81
+ AgentChunkType = Literal[
82
+ "completion_raw",
83
+ "usage",
84
+ "final_message",
85
+ "tool_call",
86
+ "tool_call_result",
87
+ "content_delta",
88
+ "tool_call_delta",
89
+ ]
@@ -0,0 +1,68 @@
1
+ from collections.abc import Sequence
2
+ from typing import Literal
3
+
4
+ from pydantic import BaseModel
5
+ from rich import Any
6
+
7
+ from .tool_calls import ToolCall
8
+
9
+
10
+ class AssistantMessage(BaseModel):
11
+ id: str
12
+ index: int
13
+ role: Literal["assistant"] = "assistant"
14
+ content: str = ""
15
+ tool_calls: list[ToolCall] | None = None
16
+
17
+
18
+ class Message(BaseModel):
19
+ role: str
20
+ content: str
21
+
22
+
23
+ class UserMessageContentItemText(BaseModel):
24
+ type: Literal["text"]
25
+ text: str
26
+
27
+
28
+ class UserMessageContentItemImageURLImageURL(BaseModel):
29
+ url: str
30
+
31
+
32
+ class UserMessageContentItemImageURL(BaseModel):
33
+ type: Literal["image_url"]
34
+ image_url: UserMessageContentItemImageURLImageURL
35
+
36
+
37
+ class AgentUserMessage(BaseModel):
38
+ role: Literal["user"]
39
+ content: str | Sequence[UserMessageContentItemText | UserMessageContentItemImageURL]
40
+
41
+
42
+ class AgentAssistantMessage(BaseModel):
43
+ role: Literal["assistant"]
44
+ content: str
45
+
46
+
47
+ class AgentSystemMessage(BaseModel):
48
+ role: Literal["system"]
49
+ content: str
50
+
51
+
52
+ class AgentFunctionToolCallMessage(BaseModel):
53
+ arguments: str
54
+ type: Literal["function_call"]
55
+ function_call_id: str
56
+ name: str
57
+ content: str
58
+
59
+
60
+ class AgentFunctionCallOutput(BaseModel):
61
+ call_id: str
62
+ output: str
63
+ type: Literal["function_call_output"]
64
+
65
+
66
+ RunnerMessage = AgentUserMessage | AgentAssistantMessage | AgentSystemMessage | AgentFunctionToolCallMessage | AgentFunctionCallOutput
67
+ AgentMessage = RunnerMessage | AgentSystemMessage
68
+ RunnerMessages = Sequence[RunnerMessage | dict[str, Any]]
@@ -0,0 +1,15 @@
1
+ from typing import Literal
2
+
3
+ from pydantic import BaseModel
4
+
5
+
6
+ class ToolCallFunction(BaseModel):
7
+ name: str
8
+ arguments: str | None = None
9
+
10
+
11
+ class ToolCall(BaseModel):
12
+ type: Literal["function"]
13
+ function: ToolCallFunction
14
+ id: str
15
+ index: int
@@ -0,0 +1,111 @@
1
+ Metadata-Version: 2.4
2
+ Name: lite-agent
3
+ Version: 0.2.0
4
+ Summary: A lightweight, extensible framework for building AI agent.
5
+ Author-email: Jianqi Pan <jannchie@gmail.com>
6
+ License: MIT
7
+ Keywords: AI,agent framework,assistant,chatbot,function call,openai,pydantic,rich,tooling
8
+ Classifier: Intended Audience :: Developers
9
+ Classifier: Intended Audience :: Science/Research
10
+ Classifier: License :: OSI Approved :: MIT License
11
+ Classifier: Operating System :: OS Independent
12
+ Classifier: Programming Language :: Python :: 3
13
+ Classifier: Programming Language :: Python :: 3.10
14
+ Classifier: Programming Language :: Python :: 3.11
15
+ Classifier: Programming Language :: Python :: 3.12
16
+ Classifier: Topic :: Communications :: Chat
17
+ Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
18
+ Classifier: Topic :: Software Development :: Libraries :: Python Modules
19
+ Requires-Python: >=3.10
20
+ Requires-Dist: aiofiles>=24.1.0
21
+ Requires-Dist: funcall>=0.7.0
22
+ Requires-Dist: prompt-toolkit>=3.0.51
23
+ Requires-Dist: rich>=14.0.0
24
+ Description-Content-Type: text/markdown
25
+
26
+ # LiteAgent
27
+
28
+ [![codecov](https://codecov.io/gh/Jannchie/lite-agent/graph/badge.svg?token=SJW89Z1VAZ)](https://codecov.io/gh/Jannchie/lite-agent)
29
+
30
+ ## Introduction
31
+
32
+ LiteAgent is an easy-to-learn, lightweight, and extensible AI agent framework built on top of [LiteLLM](https://github.com/BerriAI/litellm). It is designed as a minimal yet practical implementation for quickly building intelligent assistants and chatbots with robust tool-calling capabilities. The codebase is intentionally simple, making it ideal for learning, extension, and rapid prototyping.
33
+
34
+ **Key Advantages:**
35
+
36
+ - **Minimal and approachable:** The simplest agent implementation for fast learning and hacking.
37
+ - **Accurate and complete type hints:** All function signatures are fully type-hinted and never faked, ensuring reliable developer experience and static analysis.
38
+ - **Flexible parameter definition:** Supports defining tool function parameters using basic types, Pydantic models, or Python dataclasses—even in combination.
39
+ - **Streaming responses:** Seamless support for LiteLLM streaming output.
40
+ - **Custom tool functions:** Easily integrate your own Python functions (e.g., weather, temperature queries).
41
+ - **Rich type annotations, Pydantic-based.**
42
+ - **Easy to extend and test.**
43
+
44
+ ## Installation
45
+
46
+ You can install LiteAgent directly from PyPI:
47
+
48
+ ```bash
49
+ pip install lite-agent
50
+ ```
51
+
52
+ Or use [uv](https://github.com/astral-sh/uv):
53
+
54
+ ```bash
55
+ uv pip install lite-agent
56
+ ```
57
+
58
+ If you want to install from source for development:
59
+
60
+ ```bash
61
+ uv pip install -e .
62
+ # or
63
+ pip install -e .
64
+ ```
65
+
66
+ ## Quick Start
67
+
68
+ ### Code Example
69
+
70
+ See `examples/basic.py`:
71
+
72
+ ```python
73
+ import asyncio
74
+ from lite_agent.agent import Agent
75
+ from lite_agent.runner import Runner
76
+
77
+ async def get_whether(city: str) -> str:
78
+ await asyncio.sleep(1)
79
+ return f"The weather in {city} is sunny with a few clouds."
80
+
81
+ async def main():
82
+ agent = Agent(
83
+ model="gpt-4.1",
84
+ name="Weather Assistant",
85
+ instructions="You are a helpful weather assistant.",
86
+ tools=[get_whether],
87
+ )
88
+ runner = Runner(agent)
89
+ resp = await runner.run_until_complete("What's the weather in New York?")
90
+ for chunk in resp:
91
+ print(chunk)
92
+
93
+ if __name__ == "__main__":
94
+ asyncio.run(main())
95
+ ```
96
+
97
+ See `pyproject.toml` for details.
98
+
99
+ ## Testing
100
+
101
+ ```bash
102
+ pytest
103
+ ```
104
+
105
+ ## License
106
+
107
+ MIT License
108
+
109
+ ## Author
110
+
111
+ Jianqi Pan ([jannchie@gmail.com](mailto:jannchie@gmail.com))
@@ -0,0 +1,17 @@
1
+ lite_agent/__init__.py,sha256=T-jxY0aHwqPz5gs-ZzH1zvFgFdYVmOd15fnEc5A2H6U,229
2
+ lite_agent/agent.py,sha256=UE06H1Huk4gHGovbh9DuAJAH2zC0hqf1YXgrVa59_Os,13546
3
+ lite_agent/loggers.py,sha256=XkNkdqwD_nQGfhQJ-bBWT7koci_mMkNw3aBpyMhOICw,57
4
+ lite_agent/message_transfers.py,sha256=nT7-tID20RK2yoN-rDiEE6sSclluSlhYSkayCzmPwk8,3984
5
+ lite_agent/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
6
+ lite_agent/runner.py,sha256=lO8Z5R2RxjjgYzDAeYQbO2niQeieXOEzqUEN484G7QU,21367
7
+ lite_agent/processors/__init__.py,sha256=X78GKL_IWwW2lg8w4DD6GOFWLzAR2wTfKxHlvOkcuUQ,114
8
+ lite_agent/processors/stream_chunk_processor.py,sha256=nMA_cW7FDpXwJvm4F8vFwBXmHHsSELQFcoNEjH3xvn8,4751
9
+ lite_agent/stream_handlers/__init__.py,sha256=2GSiG0VUgcQlFMl6JkGAqikXMII1a43Hr-J5NIct6dk,115
10
+ lite_agent/stream_handlers/litellm.py,sha256=NNMAl8Bvoc2xe-qWKtfqvJQA2yr3sz1IUU90rQ_9iBw,3976
11
+ lite_agent/types/__init__.py,sha256=sc2cdX1tPisfQwu2-apZtUa3u_Q6WDEqzNglfXhwCJo,1295
12
+ lite_agent/types/chunks.py,sha256=Ro5BtrrdsYGkKrEekIhs9vIrBM7HljtgOkHherH8B3k,1697
13
+ lite_agent/types/messages.py,sha256=cmMcj_r1_R9Pgu6ixmBIq2uwwqi5KzIllxGoxpcxF3w,1531
14
+ lite_agent/types/tool_calls.py,sha256=Xnut8-2-Ld9vgA2GKJY6BbFlBaAv_n4W7vo7Jx21A-E,260
15
+ lite_agent-0.2.0.dist-info/METADATA,sha256=egjWZCc9UgPAHMCJqZrjBXlXrG39f9LoW9k6DzntMas,3455
16
+ lite_agent-0.2.0.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
17
+ lite_agent-0.2.0.dist-info/RECORD,,
lite_agent/__main__.py DELETED
@@ -1,110 +0,0 @@
1
- import asyncio
2
- import logging
3
-
4
- from prompt_toolkit import PromptSession
5
- from prompt_toolkit.validation import Validator
6
- from rich.console import Console
7
- from rich.logging import RichHandler
8
-
9
- from open_agents.agent import Agent
10
- from open_agents.loggers import logger
11
- from open_agents.runner import Runner
12
- from open_agents.types import AgentChunk, ContentDeltaChunk
13
-
14
- logging.basicConfig(level=logging.WARNING, handlers=[RichHandler()], format="%(message)s")
15
- logger.setLevel(logging.INFO)
16
-
17
-
18
- # --- Tool functions (English) ---
19
- async def get_whether(city: str) -> str:
20
- """Get the weather for a city."""
21
- await asyncio.sleep(1) # 模擬網路延遲
22
- return f"The weather in {city} is sunny with a few clouds."
23
-
24
-
25
- async def get_temperature(city: str) -> str:
26
- """Get the temperature for a city."""
27
- await asyncio.sleep(1) # 模擬網路延遲
28
- return f"The temperature in {city} is 25°C."
29
-
30
-
31
- class RichChannel:
32
- def __init__(self) -> None:
33
- self.console = Console()
34
- self.map = {
35
- "final_message": self.handle_final_message,
36
- "tool_call": self.handle_tool_call,
37
- "tool_call_result": self.handle_tool_call_result,
38
- "tool_call_delta": self.handle_tool_call_delta,
39
- "content_delta": self.handle_content_delta,
40
- "usage": self.handle_usage,
41
- }
42
- self.new_turn = True
43
-
44
- def handle(self, chunk: AgentChunk):
45
- handler = self.map[chunk["type"]]
46
- handler(chunk)
47
-
48
- def handle_final_message(self, _chunk: AgentChunk):
49
- print()
50
- self.new_turn = True
51
-
52
- def handle_tool_call(self, chunk: AgentChunk):
53
- name = chunk.get("name", "<unknown>")
54
- arguments = chunk.get("arguments", "")
55
- self.console.print(f"🛠️ [green]{name}[/green]([yellow]{arguments}[/yellow])")
56
-
57
- def handle_tool_call_result(self, chunk: AgentChunk):
58
- name = chunk.get("name", "<unknown>")
59
- content = chunk.get("content", "")
60
- self.console.print(f"🛠️ [green]{name}[/green] → [yellow]{content}[/yellow]")
61
-
62
- def handle_tool_call_delta(self, chunk: AgentChunk): ...
63
- def handle_content_delta(self, chunk: ContentDeltaChunk):
64
- if self.new_turn:
65
- self.console.print("🤖 ", end="")
66
- self.new_turn = False
67
- print(chunk["delta"], end="", flush=True)
68
-
69
- def handle_usage(self, chunk: AgentChunk):
70
- if False:
71
- usage = chunk["usage"]
72
- self.console.print(f"In: {usage.prompt_tokens}, Out: {usage.completion_tokens}, Total: {usage.total_tokens}")
73
-
74
-
75
- async def main():
76
- agent = Agent(
77
- model="gpt-4.1",
78
- name="Weather Assistant",
79
- instructions="You are a helpful weather assistant. Before using tools, briefly explain what you are going to do. Provide friendly and informative responses.",
80
- tools=[get_whether, get_temperature],
81
- )
82
- session = PromptSession()
83
- rich_channel = RichChannel()
84
- runner = Runner(agent)
85
- not_empty_validator = Validator.from_callable(
86
- lambda text: bool(text.strip()),
87
- error_message="Input cannot be empty.",
88
- move_cursor_to_end=True,
89
- )
90
- while True:
91
- try:
92
- user_input = await session.prompt_async(
93
- "👤 ",
94
- default="",
95
- complete_while_typing=True,
96
- validator=not_empty_validator,
97
- validate_while_typing=False,
98
- )
99
- if user_input.lower() in {"exit", "quit"}:
100
- break
101
- response = runner.run_stream(user_input)
102
- async for chunk in response:
103
- rich_channel.handle(chunk)
104
-
105
- except (EOFError, KeyboardInterrupt):
106
- break
107
-
108
-
109
- if __name__ == "__main__":
110
- asyncio.run(main())
@@ -1,166 +0,0 @@
1
- from collections.abc import AsyncGenerator
2
- from typing import Literal, TypedDict
3
-
4
- import litellm
5
- from funcall import Funcall
6
-
7
- from open_agents.loggers import logger
8
- from open_agents.processors import StreamChunkProcessor
9
- from open_agents.processors.stream_chunk_processor import AssistantMessage
10
-
11
-
12
- class LiteLLMRawChunk(TypedDict):
13
- """
14
- Define the type of chunk
15
- """
16
-
17
- type: Literal["litellm_raw"]
18
- raw: litellm.ModelResponseStream
19
-
20
-
21
- class UsageChunk(TypedDict):
22
- """
23
- Define the type of usage info chunk
24
- """
25
-
26
- type: Literal["usage"]
27
- usage: litellm.Usage
28
-
29
-
30
- class FinalMessageChunk(TypedDict):
31
- """
32
- Define the type of final message chunk
33
- """
34
-
35
- type: Literal["final_message"]
36
- message: AssistantMessage
37
- finish_reason: Literal["stop", "tool_calls"]
38
-
39
-
40
- class ToolCallChunk(TypedDict):
41
- """
42
- Define the type of tool call chunk
43
- """
44
-
45
- type: Literal["tool_call"]
46
- name: str
47
- arguments: str
48
-
49
-
50
- class ToolCallResultChunk(TypedDict):
51
- """
52
- Define the type of tool call result chunk
53
- """
54
-
55
- type: Literal["tool_call_result"]
56
- tool_call_id: str
57
- name: str
58
- content: str
59
-
60
-
61
- class ContentDeltaChunk(TypedDict):
62
- """
63
- Define the type of message chunk
64
- """
65
-
66
- type: Literal["content_delta"]
67
- delta: str
68
-
69
-
70
- class ToolCallDeltaChunk(TypedDict):
71
- """
72
- Define the type of tool call delta chunk
73
- """
74
-
75
- type: Literal["tool_call_delta"]
76
- tool_call_id: str
77
- name: str
78
- arguments_delta: str
79
-
80
-
81
- AgentChunk = LiteLLMRawChunk | UsageChunk | FinalMessageChunk | ToolCallChunk | ToolCallResultChunk | ContentDeltaChunk
82
-
83
-
84
- async def chunk_handler(
85
- resp: litellm.CustomStreamWrapper,
86
- fc: Funcall,
87
- ) -> AsyncGenerator[AgentChunk, None]:
88
- """
89
- Optimized chunk handler
90
-
91
- Args:
92
- resp: LiteLLM streaming response wrapper
93
- fc: function call handler
94
-
95
- Yields:
96
- litellm.ModelResponseStream: processed response chunk
97
-
98
- Raises:
99
- Exception: various exceptions during processing
100
- """
101
- processor = StreamChunkProcessor(fc)
102
- async for chunk in resp:
103
- if not isinstance(chunk, litellm.ModelResponseStream):
104
- logger.debug("unexpected chunk type: %s", type(chunk))
105
- logger.debug("chunk content: %s", chunk)
106
- continue
107
-
108
- # Handle usage info
109
- if usage := processor.handle_usage_info(chunk):
110
- yield UsageChunk(type="usage", usage=usage)
111
- continue
112
-
113
- # Get choice and delta data
114
- if not chunk.choices:
115
- yield LiteLLMRawChunk(type="litellm_raw", raw=chunk)
116
- continue
117
-
118
- choice = chunk.choices[0]
119
- delta = choice.delta
120
- if not processor.current_message:
121
- processor.initialize_message(chunk, choice)
122
- if delta.content:
123
- yield ContentDeltaChunk(type="content_delta", delta=delta.content)
124
- processor.update_content(delta.content)
125
- processor.update_tool_calls(delta.tool_calls)
126
- if delta.tool_calls:
127
- for tool_call in delta.tool_calls:
128
- if tool_call.function.arguments:
129
- yield ToolCallDeltaChunk(
130
- type="tool_call_delta",
131
- tool_call_id=processor.current_message.tool_calls[-1].id,
132
- name=processor.current_message.tool_calls[-1].function.name,
133
- arguments_delta=tool_call.function.arguments,
134
- )
135
- # Check if finished
136
- if choice.finish_reason and processor.current_message:
137
- current_message = processor.finalize_message()
138
- yield FinalMessageChunk(type="final_message", message=current_message, finish_reason=choice.finish_reason)
139
- # New: check tool_calls and handle
140
- tool_calls = current_message.tool_calls
141
- if tool_calls:
142
- # Execute each tool_call and yield result
143
- for tool_call in tool_calls:
144
- try:
145
- yield ToolCallChunk(
146
- type="tool_call",
147
- name=tool_call.function.name,
148
- arguments=tool_call.function.arguments,
149
- )
150
- content = await fc.call_function_async(tool_call.function.name, tool_call.function.arguments)
151
- yield ToolCallResultChunk(
152
- type="tool_call_result",
153
- tool_call_id=tool_call.id,
154
- name=tool_call.function.name,
155
- content=str(content),
156
- )
157
- except Exception as e: # noqa: PERF203
158
- logger.exception("Tool call %s failed", tool_call.id)
159
- yield ToolCallResultChunk(
160
- type="tool_call_result",
161
- tool_call_id=tool_call.id,
162
- name=tool_call.function.name,
163
- content=str(e),
164
- )
165
- continue
166
- yield LiteLLMRawChunk(type="litellm_raw", raw=chunk)