lite-agent 0.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of lite-agent might be problematic. Click here for more details.

lite_agent/__init__.py ADDED
File without changes
lite_agent/__main__.py ADDED
@@ -0,0 +1,110 @@
1
+ import asyncio
2
+ import logging
3
+
4
+ from prompt_toolkit import PromptSession
5
+ from prompt_toolkit.validation import Validator
6
+ from rich.console import Console
7
+ from rich.logging import RichHandler
8
+
9
+ from open_agents.agent import Agent
10
+ from open_agents.loggers import logger
11
+ from open_agents.runner import Runner
12
+ from open_agents.types import AgentChunk, ContentDeltaChunk
13
+
14
+ logging.basicConfig(level=logging.WARNING, handlers=[RichHandler()], format="%(message)s")
15
+ logger.setLevel(logging.INFO)
16
+
17
+
18
+ # --- Tool functions (English) ---
19
+ async def get_whether(city: str) -> str:
20
+ """Get the weather for a city."""
21
+ await asyncio.sleep(1) # 模擬網路延遲
22
+ return f"The weather in {city} is sunny with a few clouds."
23
+
24
+
25
+ async def get_temperature(city: str) -> str:
26
+ """Get the temperature for a city."""
27
+ await asyncio.sleep(1) # 模擬網路延遲
28
+ return f"The temperature in {city} is 25°C."
29
+
30
+
31
+ class RichChannel:
32
+ def __init__(self) -> None:
33
+ self.console = Console()
34
+ self.map = {
35
+ "final_message": self.handle_final_message,
36
+ "tool_call": self.handle_tool_call,
37
+ "tool_call_result": self.handle_tool_call_result,
38
+ "tool_call_delta": self.handle_tool_call_delta,
39
+ "content_delta": self.handle_content_delta,
40
+ "usage": self.handle_usage,
41
+ }
42
+ self.new_turn = True
43
+
44
+ def handle(self, chunk: AgentChunk):
45
+ handler = self.map[chunk["type"]]
46
+ handler(chunk)
47
+
48
+ def handle_final_message(self, _chunk: AgentChunk):
49
+ print()
50
+ self.new_turn = True
51
+
52
+ def handle_tool_call(self, chunk: AgentChunk):
53
+ name = chunk.get("name", "<unknown>")
54
+ arguments = chunk.get("arguments", "")
55
+ self.console.print(f"🛠️ [green]{name}[/green]([yellow]{arguments}[/yellow])")
56
+
57
+ def handle_tool_call_result(self, chunk: AgentChunk):
58
+ name = chunk.get("name", "<unknown>")
59
+ content = chunk.get("content", "")
60
+ self.console.print(f"🛠️ [green]{name}[/green] → [yellow]{content}[/yellow]")
61
+
62
+ def handle_tool_call_delta(self, chunk: AgentChunk): ...
63
+ def handle_content_delta(self, chunk: ContentDeltaChunk):
64
+ if self.new_turn:
65
+ self.console.print("🤖 ", end="")
66
+ self.new_turn = False
67
+ print(chunk["delta"], end="", flush=True)
68
+
69
+ def handle_usage(self, chunk: AgentChunk):
70
+ if False:
71
+ usage = chunk["usage"]
72
+ self.console.print(f"In: {usage.prompt_tokens}, Out: {usage.completion_tokens}, Total: {usage.total_tokens}")
73
+
74
+
75
+ async def main():
76
+ agent = Agent(
77
+ model="gpt-4.1",
78
+ name="Weather Assistant",
79
+ instructions="You are a helpful weather assistant. Before using tools, briefly explain what you are going to do. Provide friendly and informative responses.",
80
+ tools=[get_whether, get_temperature],
81
+ )
82
+ session = PromptSession()
83
+ rich_channel = RichChannel()
84
+ runner = Runner(agent)
85
+ not_empty_validator = Validator.from_callable(
86
+ lambda text: bool(text.strip()),
87
+ error_message="Input cannot be empty.",
88
+ move_cursor_to_end=True,
89
+ )
90
+ while True:
91
+ try:
92
+ user_input = await session.prompt_async(
93
+ "👤 ",
94
+ default="",
95
+ complete_while_typing=True,
96
+ validator=not_empty_validator,
97
+ validate_while_typing=False,
98
+ )
99
+ if user_input.lower() in {"exit", "quit"}:
100
+ break
101
+ response = runner.run_stream(user_input)
102
+ async for chunk in response:
103
+ rich_channel.handle(chunk)
104
+
105
+ except (EOFError, KeyboardInterrupt):
106
+ break
107
+
108
+
109
+ if __name__ == "__main__":
110
+ asyncio.run(main())
lite_agent/agent.py ADDED
@@ -0,0 +1,36 @@
1
+ from collections.abc import AsyncGenerator, Callable
2
+
3
+ import litellm
4
+ from funcall import Funcall
5
+
6
+ from open_agents.chunk_handler import AgentChunk, chunk_handler
7
+ from open_agents.types import RunnerMessages
8
+
9
+
10
+ class Agent:
11
+ def __init__(self, *, model: str, name: str, instructions: str, tools: list[Callable] | None = None) -> None:
12
+ self.name = name
13
+ self.instructions = instructions
14
+ self.fc = Funcall(tools)
15
+ self.model = model
16
+
17
+ def prepare_messages(self, messages: RunnerMessages) -> list[dict]:
18
+ return [
19
+ {
20
+ "role": "system",
21
+ "content": f"You are {self.name}. {self.instructions}",
22
+ },
23
+ *messages,
24
+ ]
25
+
26
+ async def stream_async(self, messages: RunnerMessages) -> AsyncGenerator[AgentChunk, None]:
27
+ self.message_histories = self.prepare_messages(messages)
28
+ tools = self.fc.get_tools(target="litellm")
29
+ resp = await litellm.acompletion(
30
+ model=self.model,
31
+ messages=self.message_histories,
32
+ tools=tools,
33
+ tool_choice="auto",
34
+ stream=True,
35
+ )
36
+ return chunk_handler(resp, self.fc)
@@ -0,0 +1,166 @@
1
+ from collections.abc import AsyncGenerator
2
+ from typing import Literal, TypedDict
3
+
4
+ import litellm
5
+ from funcall import Funcall
6
+
7
+ from open_agents.loggers import logger
8
+ from open_agents.processors import StreamChunkProcessor
9
+ from open_agents.processors.stream_chunk_processor import AssistantMessage
10
+
11
+
12
+ class LiteLLMRawChunk(TypedDict):
13
+ """
14
+ Define the type of chunk
15
+ """
16
+
17
+ type: Literal["litellm_raw"]
18
+ raw: litellm.ModelResponseStream
19
+
20
+
21
+ class UsageChunk(TypedDict):
22
+ """
23
+ Define the type of usage info chunk
24
+ """
25
+
26
+ type: Literal["usage"]
27
+ usage: litellm.Usage
28
+
29
+
30
+ class FinalMessageChunk(TypedDict):
31
+ """
32
+ Define the type of final message chunk
33
+ """
34
+
35
+ type: Literal["final_message"]
36
+ message: AssistantMessage
37
+ finish_reason: Literal["stop", "tool_calls"]
38
+
39
+
40
+ class ToolCallChunk(TypedDict):
41
+ """
42
+ Define the type of tool call chunk
43
+ """
44
+
45
+ type: Literal["tool_call"]
46
+ name: str
47
+ arguments: str
48
+
49
+
50
+ class ToolCallResultChunk(TypedDict):
51
+ """
52
+ Define the type of tool call result chunk
53
+ """
54
+
55
+ type: Literal["tool_call_result"]
56
+ tool_call_id: str
57
+ name: str
58
+ content: str
59
+
60
+
61
+ class ContentDeltaChunk(TypedDict):
62
+ """
63
+ Define the type of message chunk
64
+ """
65
+
66
+ type: Literal["content_delta"]
67
+ delta: str
68
+
69
+
70
+ class ToolCallDeltaChunk(TypedDict):
71
+ """
72
+ Define the type of tool call delta chunk
73
+ """
74
+
75
+ type: Literal["tool_call_delta"]
76
+ tool_call_id: str
77
+ name: str
78
+ arguments_delta: str
79
+
80
+
81
+ AgentChunk = LiteLLMRawChunk | UsageChunk | FinalMessageChunk | ToolCallChunk | ToolCallResultChunk | ContentDeltaChunk
82
+
83
+
84
+ async def chunk_handler(
85
+ resp: litellm.CustomStreamWrapper,
86
+ fc: Funcall,
87
+ ) -> AsyncGenerator[AgentChunk, None]:
88
+ """
89
+ Optimized chunk handler
90
+
91
+ Args:
92
+ resp: LiteLLM streaming response wrapper
93
+ fc: function call handler
94
+
95
+ Yields:
96
+ litellm.ModelResponseStream: processed response chunk
97
+
98
+ Raises:
99
+ Exception: various exceptions during processing
100
+ """
101
+ processor = StreamChunkProcessor(fc)
102
+ async for chunk in resp:
103
+ if not isinstance(chunk, litellm.ModelResponseStream):
104
+ logger.debug("unexpected chunk type: %s", type(chunk))
105
+ logger.debug("chunk content: %s", chunk)
106
+ continue
107
+
108
+ # Handle usage info
109
+ if usage := processor.handle_usage_info(chunk):
110
+ yield UsageChunk(type="usage", usage=usage)
111
+ continue
112
+
113
+ # Get choice and delta data
114
+ if not chunk.choices:
115
+ yield LiteLLMRawChunk(type="litellm_raw", raw=chunk)
116
+ continue
117
+
118
+ choice = chunk.choices[0]
119
+ delta = choice.delta
120
+ if not processor.current_message:
121
+ processor.initialize_message(chunk, choice)
122
+ if delta.content:
123
+ yield ContentDeltaChunk(type="content_delta", delta=delta.content)
124
+ processor.update_content(delta.content)
125
+ processor.update_tool_calls(delta.tool_calls)
126
+ if delta.tool_calls:
127
+ for tool_call in delta.tool_calls:
128
+ if tool_call.function.arguments:
129
+ yield ToolCallDeltaChunk(
130
+ type="tool_call_delta",
131
+ tool_call_id=processor.current_message.tool_calls[-1].id,
132
+ name=processor.current_message.tool_calls[-1].function.name,
133
+ arguments_delta=tool_call.function.arguments,
134
+ )
135
+ # Check if finished
136
+ if choice.finish_reason and processor.current_message:
137
+ current_message = processor.finalize_message()
138
+ yield FinalMessageChunk(type="final_message", message=current_message, finish_reason=choice.finish_reason)
139
+ # New: check tool_calls and handle
140
+ tool_calls = current_message.tool_calls
141
+ if tool_calls:
142
+ # Execute each tool_call and yield result
143
+ for tool_call in tool_calls:
144
+ try:
145
+ yield ToolCallChunk(
146
+ type="tool_call",
147
+ name=tool_call.function.name,
148
+ arguments=tool_call.function.arguments,
149
+ )
150
+ content = await fc.call_function_async(tool_call.function.name, tool_call.function.arguments)
151
+ yield ToolCallResultChunk(
152
+ type="tool_call_result",
153
+ tool_call_id=tool_call.id,
154
+ name=tool_call.function.name,
155
+ content=str(content),
156
+ )
157
+ except Exception as e: # noqa: PERF203
158
+ logger.exception("Tool call %s failed", tool_call.id)
159
+ yield ToolCallResultChunk(
160
+ type="tool_call_result",
161
+ tool_call_id=tool_call.id,
162
+ name=tool_call.function.name,
163
+ content=str(e),
164
+ )
165
+ continue
166
+ yield LiteLLMRawChunk(type="litellm_raw", raw=chunk)
lite_agent/loggers.py ADDED
@@ -0,0 +1,3 @@
1
+ import logging
2
+
3
+ logger = logging.getLogger("easy_agent")
@@ -0,0 +1,3 @@
1
+ from open_agents.processors.stream_chunk_processor import StreamChunkProcessor
2
+
3
+ __all__ = ["StreamChunkProcessor"]
@@ -0,0 +1,85 @@
1
+ import litellm
2
+ from funcall import Funcall
3
+ from litellm.types.utils import ChatCompletionDeltaToolCall, StreamingChoices
4
+
5
+ from open_agents.loggers import logger
6
+ from open_agents.types import AssistantMessage, ToolCall, ToolCallFunction
7
+
8
+
9
+ class StreamChunkProcessor:
10
+ """Processor for handling streaming responses"""
11
+
12
+ def __init__(self, fc: Funcall) -> None:
13
+ self.fc = fc
14
+ self.current_message: AssistantMessage = None
15
+
16
+ def initialize_message(self, chunk: litellm.ModelResponseStream, choice: StreamingChoices) -> None:
17
+ """Initialize the message object"""
18
+ delta = choice.delta
19
+ self.current_message = AssistantMessage(
20
+ id=chunk.id,
21
+ index=choice.index,
22
+ role=delta.role,
23
+ content="",
24
+ )
25
+ logger.debug("Initialized new message: %s", self.current_message.id)
26
+
27
+ def update_content(self, content: str) -> None:
28
+ """Update message content"""
29
+ if self.current_message and content:
30
+ self.current_message.content += content
31
+
32
+ def _initialize_tool_calls(self, tool_calls: list[litellm.ChatCompletionMessageToolCall]) -> None:
33
+ """Initialize tool calls"""
34
+ if not self.current_message:
35
+ return
36
+
37
+ self.current_message.tool_calls = []
38
+ for call in tool_calls:
39
+ logger.debug("Create new tool call: %s", call.id)
40
+
41
+ def _update_tool_calls(self, tool_calls: list[litellm.ChatCompletionMessageToolCall]) -> None:
42
+ """Update existing tool calls"""
43
+ if not self.current_message or not self.current_message.tool_calls:
44
+ return
45
+
46
+ for current_call, new_call in zip(self.current_message.tool_calls, tool_calls, strict=False):
47
+ if new_call.function.arguments:
48
+ current_call.function.arguments += new_call.function.arguments
49
+ if new_call.type:
50
+ current_call.type = new_call.type
51
+
52
+ def update_tool_calls(self, tool_calls: list[ChatCompletionDeltaToolCall]) -> None:
53
+ """Handle tool call updates"""
54
+ if not tool_calls:
55
+ return
56
+ for call in tool_calls:
57
+ if call.id:
58
+ new_tool_call = ToolCall(
59
+ id=call.id,
60
+ type=call.type,
61
+ function=ToolCallFunction(
62
+ name=call.function.name or "",
63
+ arguments=call.function.arguments,
64
+ ),
65
+ index=call.index,
66
+ )
67
+ if self.current_message.tool_calls is None:
68
+ self.current_message.tool_calls = []
69
+ self.current_message.tool_calls.append(new_tool_call)
70
+ else:
71
+ existing_call = self.current_message.tool_calls[call.index]
72
+ if call.function.arguments:
73
+ existing_call.function.arguments += call.function.arguments
74
+
75
+ def handle_usage_info(self, chunk: litellm.ModelResponseStream) -> litellm.Usage | None:
76
+ """Handle usage info, return whether this chunk should be skipped"""
77
+ usage = getattr(chunk, "usage", None)
78
+ if usage:
79
+ logger.debug("Model usage: %s", usage)
80
+ return usage
81
+
82
+ def finalize_message(self) -> AssistantMessage:
83
+ """Finalize message processing"""
84
+ logger.debug("Message finalized: %s", self.current_message)
85
+ return self.current_message
lite_agent/py.typed ADDED
File without changes
lite_agent/runner.py ADDED
@@ -0,0 +1,51 @@
1
+ from collections.abc import AsyncGenerator
2
+ from typing import Literal
3
+
4
+ from open_agents.agent import Agent
5
+ from open_agents.chunk_handler import AgentChunk
6
+ from open_agents.types import AgentToolCallMessage, RunnerMessages
7
+
8
+
9
+ class Runner:
10
+ def __init__(self, agent: Agent) -> None:
11
+ self.agent = agent
12
+ self.messages: RunnerMessages = []
13
+
14
+ def run_stream(
15
+ self,
16
+ user_input: RunnerMessages | str,
17
+ max_steps: int = 20,
18
+ includes: list[Literal["usage", "final_message", "tool_call", "tool_call_result"]] | None = None,
19
+ ) -> AsyncGenerator[AgentChunk, None]:
20
+ """Run the agent and return a RunResponse object that can be asynchronously iterated for each chunk."""
21
+ if includes is None:
22
+ includes = ["final_message", "usage", "tool_call", "tool_call_result", "tool_call_delta", "content_delta"]
23
+ if isinstance(user_input, str):
24
+ self.messages.append({"role": "user", "content": user_input})
25
+ else:
26
+ self.messages = user_input
27
+
28
+ return self._run_aiter(max_steps, includes)
29
+
30
+ async def _run_aiter(self, max_steps: int, includes: list[Literal["usage", "final_message", "tool_call", "tool_call_result"]]) -> AsyncGenerator[AgentChunk, None]:
31
+ """Run the agent and return a RunResponse object that can be asynchronously iterated for each chunk."""
32
+ steps = 0
33
+ finish_reason = None
34
+ while finish_reason != "stop" and steps < max_steps:
35
+ resp = await self.agent.stream_async(self.messages)
36
+ async for chunk in resp:
37
+ if chunk["type"] == "final_message":
38
+ message = chunk["message"]
39
+ self.messages.append(message.model_dump())
40
+ finish_reason = chunk["finish_reason"]
41
+ elif chunk["type"] == "tool_call_result":
42
+ self.messages.append(
43
+ AgentToolCallMessage(
44
+ role="tool",
45
+ tool_call_id=chunk["tool_call_id"],
46
+ content=chunk["content"],
47
+ ),
48
+ )
49
+ if chunk["type"] in includes:
50
+ yield chunk
51
+ steps += 1
lite_agent/types.py ADDED
@@ -0,0 +1,152 @@
1
+ from typing import Literal, TypedDict
2
+
3
+ import litellm
4
+ from pydantic import BaseModel
5
+
6
+
7
+ class ToolCallFunction(BaseModel):
8
+ name: str
9
+ arguments: str | None = None
10
+
11
+
12
+ class ToolCall(BaseModel):
13
+ type: Literal["function"]
14
+ function: ToolCallFunction
15
+ id: str
16
+
17
+
18
+ class AssistantMessage(BaseModel):
19
+ id: str
20
+ role: Literal["assistant"] = "assistant"
21
+ content: str = ""
22
+ tool_calls: list[ToolCall] | None = None
23
+
24
+
25
+ class Message(TypedDict):
26
+ role: str
27
+ content: str
28
+
29
+
30
+ class UserMessageContentItemText(TypedDict):
31
+ type: Literal["text"]
32
+ text: str
33
+
34
+
35
+ class UserMessageContentItemImageURLImageURL(TypedDict):
36
+ url: str
37
+
38
+
39
+ class UserMessageContentItemImageURL(TypedDict):
40
+ type: Literal["image_url"]
41
+ image_url: UserMessageContentItemImageURLImageURL
42
+
43
+
44
+ class AgentUserMessage(TypedDict):
45
+ role: Literal["user"] = "user"
46
+ content: str | list[UserMessageContentItemText | UserMessageContentItemImageURL]
47
+
48
+
49
+ class AssistantMessageToolCallFunction(TypedDict):
50
+ name: str
51
+ arguments: str
52
+
53
+
54
+ class AssistantMessageToolCall(TypedDict):
55
+ id: str
56
+ type: Literal["function"]
57
+ function: AssistantMessageToolCallFunction
58
+ tool_call_id: str
59
+
60
+
61
+ class AgentAssistantMessage(TypedDict):
62
+ role: Literal["assistant"] = "assistant"
63
+ content: str
64
+ tool_calls: list[AssistantMessageToolCall] | None
65
+
66
+
67
+ class AgentSystemMessage(TypedDict):
68
+ role: Literal["system"] = "system"
69
+ content: str
70
+
71
+
72
+ class AgentToolCallMessage(TypedDict):
73
+ role: Literal["tool"] = "tool"
74
+ tool_call_id: str
75
+ content: str
76
+
77
+
78
+ RunnerMessage = AgentUserMessage | AgentAssistantMessage | AgentToolCallMessage
79
+ AgentMessage = RunnerMessage | AgentSystemMessage
80
+ RunnerMessages = list[RunnerMessage]
81
+
82
+
83
+ class LiteLLMRawChunk(TypedDict):
84
+ """
85
+ Define the type of chunk
86
+ """
87
+
88
+ type: Literal["litellm_raw"]
89
+ raw: litellm.ModelResponseStream
90
+
91
+
92
+ class UsageChunk(TypedDict):
93
+ """
94
+ Define the type of usage info chunk
95
+ """
96
+
97
+ type: Literal["usage"]
98
+ usage: litellm.Usage
99
+
100
+
101
+ class FinalMessageChunk(TypedDict):
102
+ """
103
+ Define the type of final message chunk
104
+ """
105
+
106
+ type: Literal["final_message"]
107
+ message: AssistantMessage
108
+ finish_reason: Literal["stop", "tool_calls"]
109
+
110
+
111
+ class ToolCallChunk(TypedDict):
112
+ """
113
+ Define the type of tool call chunk
114
+ """
115
+
116
+ type: Literal["tool_call"]
117
+ name: str
118
+ arguments: str
119
+
120
+
121
+ class ToolCallResultChunk(TypedDict):
122
+ """
123
+ Define the type of tool call result chunk
124
+ """
125
+
126
+ type: Literal["tool_call_result"]
127
+ tool_call_id: str
128
+ name: str
129
+ content: str
130
+
131
+
132
+ class ContentDeltaChunk(TypedDict):
133
+ """
134
+ Define the type of message chunk
135
+ """
136
+
137
+ type: Literal["content_delta"]
138
+ delta: str
139
+
140
+
141
+ class ToolCallDeltaChunk(TypedDict):
142
+ """
143
+ Define the type of tool call delta chunk
144
+ """
145
+
146
+ type: Literal["tool_call_delta"]
147
+ tool_call_id: str
148
+ name: str
149
+ arguments_delta: str
150
+
151
+
152
+ AgentChunk = LiteLLMRawChunk | UsageChunk | FinalMessageChunk | ToolCallChunk | ToolCallResultChunk | ContentDeltaChunk | ToolCallDeltaChunk
@@ -0,0 +1,22 @@
1
+ Metadata-Version: 2.4
2
+ Name: lite-agent
3
+ Version: 0.1.0
4
+ Summary: A lightweight, extensible framework for building AI agent.
5
+ Author-email: Jianqi Pan <jannchie@gmail.com>
6
+ License: MIT
7
+ Keywords: AI,agent framework,assistant,chatbot,function call,openai,pydantic,rich,tooling
8
+ Classifier: Intended Audience :: Developers
9
+ Classifier: Intended Audience :: Science/Research
10
+ Classifier: License :: OSI Approved :: MIT License
11
+ Classifier: Operating System :: OS Independent
12
+ Classifier: Programming Language :: Python :: 3
13
+ Classifier: Programming Language :: Python :: 3.10
14
+ Classifier: Programming Language :: Python :: 3.11
15
+ Classifier: Programming Language :: Python :: 3.12
16
+ Classifier: Topic :: Communications :: Chat
17
+ Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
18
+ Classifier: Topic :: Software Development :: Libraries :: Python Modules
19
+ Requires-Python: >=3.10
20
+ Requires-Dist: funcall>=0.6.0
21
+ Requires-Dist: prompt-toolkit>=3.0.51
22
+ Requires-Dist: rich>=14.0.0
@@ -0,0 +1,13 @@
1
+ lite_agent/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
2
+ lite_agent/__main__.py,sha256=D4Yg5a6hDYMq6aCEGg3MjILF4pVWD6mBKjri-qs7Rzk,3766
3
+ lite_agent/agent.py,sha256=LmC5aqMVWsq5AkE18Od0FEVUdiZN1GTs5OlO6mrvxUM,1187
4
+ lite_agent/chunk_handler.py,sha256=MDk9rW_axZ412VOPnITbQGVr-mCGAe2jy3khUXkYbvM,5135
5
+ lite_agent/loggers.py,sha256=0-yb_1Ec8TodfTI_nZWeCJSpSZZM9R53UUOE5--L2iw,57
6
+ lite_agent/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
7
+ lite_agent/runner.py,sha256=TaBm6pCFwdJtygNnmVZDlFbkK3-Z1s52YD9XviwrA6o,2257
8
+ lite_agent/types.py,sha256=NUpoSDaaiWWYezWluDdWBb-DsYFvZFoNd4tLWqBzCZ4,3058
9
+ lite_agent/processors/__init__.py,sha256=AIxxS9uLcNws1Erf-b8z8zZKaKA5NQ7ooZ7HaCIJTq4,115
10
+ lite_agent/processors/stream_chunk_processor.py,sha256=WlVcfc4kdiCz0Fc1Ybe5GapN2stK0C_0DIymbiGpZfI,3420
11
+ lite_agent-0.1.0.dist-info/METADATA,sha256=oFRlVtYwsr1P9RPOP2JNu309-Jpqc95f-TYrExmrFBc,972
12
+ lite_agent-0.1.0.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
13
+ lite_agent-0.1.0.dist-info/RECORD,,
@@ -0,0 +1,4 @@
1
+ Wheel-Version: 1.0
2
+ Generator: hatchling 1.27.0
3
+ Root-Is-Purelib: true
4
+ Tag: py3-none-any