lite-agent 0.5.0__py3-none-any.whl → 0.6.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of lite-agent might be problematic. Click here for more details.

lite_agent/agent.py CHANGED
@@ -5,11 +5,10 @@ from typing import Any, Optional
5
5
 
6
6
  from funcall import Funcall
7
7
  from jinja2 import Environment, FileSystemLoader
8
- from litellm import CustomStreamWrapper
9
8
 
10
9
  from lite_agent.client import BaseLLMClient, LiteLLMClient, ReasoningConfig
11
10
  from lite_agent.loggers import logger
12
- from lite_agent.stream_handlers import litellm_completion_stream_handler, litellm_response_stream_handler
11
+ from lite_agent.response_handlers import CompletionResponseHandler, ResponsesAPIHandler
13
12
  from lite_agent.types import AgentChunk, FunctionCallEvent, FunctionCallOutputEvent, RunnerMessages, ToolCall, message_to_llm_dict, system_message_to_llm_dict
14
13
  from lite_agent.types.messages import NewAssistantMessage, NewSystemMessage, NewUserMessage
15
14
 
@@ -22,7 +21,7 @@ WAIT_FOR_USER_INSTRUCTIONS_TEMPLATE = jinja_env.get_template("wait_for_user_inst
22
21
 
23
22
 
24
23
  class Agent:
25
- def __init__( # noqa: PLR0913
24
+ def __init__(
26
25
  self,
27
26
  *,
28
27
  model: str | BaseLLMClient,
@@ -280,6 +279,7 @@ class Agent:
280
279
  messages: RunnerMessages,
281
280
  record_to_file: Path | None = None,
282
281
  reasoning: ReasoningConfig = None,
282
+ streaming: bool = True,
283
283
  ) -> AsyncGenerator[AgentChunk, None]:
284
284
  # Apply message transfer callback if provided - always use legacy format for LLM compatibility
285
285
  processed_messages = messages
@@ -296,19 +296,19 @@ class Agent:
296
296
  tools=tools,
297
297
  tool_choice="auto", # TODO: make this configurable
298
298
  reasoning=reasoning,
299
+ streaming=streaming,
299
300
  )
300
301
 
301
- # Ensure resp is a CustomStreamWrapper
302
- if isinstance(resp, CustomStreamWrapper):
303
- return litellm_completion_stream_handler(resp, record_to=record_to_file)
304
- msg = "Response is not a CustomStreamWrapper, cannot stream chunks."
305
- raise TypeError(msg)
302
+ # Use response handler for unified processing
303
+ handler = CompletionResponseHandler()
304
+ return handler.handle(resp, streaming, record_to_file)
306
305
 
307
306
  async def responses(
308
307
  self,
309
308
  messages: RunnerMessages,
310
309
  record_to_file: Path | None = None,
311
310
  reasoning: ReasoningConfig = None,
311
+ streaming: bool = True,
312
312
  ) -> AsyncGenerator[AgentChunk, None]:
313
313
  # Apply message transfer callback if provided - always use legacy format for LLM compatibility
314
314
  processed_messages = messages
@@ -324,8 +324,11 @@ class Agent:
324
324
  tools=tools,
325
325
  tool_choice="auto", # TODO: make this configurable
326
326
  reasoning=reasoning,
327
+ streaming=streaming,
327
328
  )
328
- return litellm_response_stream_handler(resp, record_to=record_to_file)
329
+ # Use response handler for unified processing
330
+ handler = ResponsesAPIHandler()
331
+ return handler.handle(resp, streaming, record_to_file)
329
332
 
330
333
  async def list_require_confirm_tools(self, tool_calls: Sequence[ToolCall] | None) -> Sequence[ToolCall]:
331
334
  if not tool_calls:
@@ -539,3 +542,4 @@ class Agent:
539
542
  required=[],
540
543
  handler=wait_for_user_handler,
541
544
  )
545
+
@@ -437,7 +437,7 @@ def display_messages(
437
437
  )
438
438
 
439
439
 
440
- def _display_single_message_compact( # noqa: PLR0913
440
+ def _display_single_message_compact(
441
441
  message: FlexibleRunnerMessage,
442
442
  *,
443
443
  index: int | None = None,
lite_agent/client.py CHANGED
@@ -5,6 +5,7 @@ from typing import Any, Literal
5
5
  import litellm
6
6
  from openai.types.chat import ChatCompletionToolParam
7
7
  from openai.types.responses import FunctionToolParam
8
+ from pydantic import BaseModel
8
9
 
9
10
  ReasoningEffort = Literal["minimal", "low", "medium", "high"]
10
11
  ThinkingConfig = dict[str, Any] | None
@@ -18,6 +19,17 @@ ReasoningConfig = (
18
19
  )
19
20
 
20
21
 
22
+ class LLMConfig(BaseModel):
23
+ """LLM generation parameters configuration."""
24
+
25
+ temperature: float | None = None
26
+ max_tokens: int | None = None
27
+ top_p: float | None = None
28
+ frequency_penalty: float | None = None
29
+ presence_penalty: float | None = None
30
+ stop: list[str] | str | None = None
31
+
32
+
21
33
  def parse_reasoning_config(reasoning: ReasoningConfig) -> tuple[ReasoningEffort | None, ThinkingConfig]:
22
34
  """
23
35
  解析统一的推理配置,返回 reasoning_effort 和 thinking_config。
@@ -36,7 +48,10 @@ def parse_reasoning_config(reasoning: ReasoningConfig) -> tuple[ReasoningEffort
36
48
  return None, None
37
49
  if isinstance(reasoning, str):
38
50
  # 字符串类型,使用 reasoning_effort
39
- return reasoning, None
51
+ # 确保字符串是有效的 ReasoningEffort 值
52
+ if reasoning in ("minimal", "low", "medium", "high"):
53
+ return reasoning, None # type: ignore[return-value]
54
+ return None, None
40
55
  if isinstance(reasoning, dict):
41
56
  # 字典类型,使用 thinking_config
42
57
  return None, reasoning
@@ -58,13 +73,24 @@ class BaseLLMClient(abc.ABC):
58
73
  api_base: str | None = None,
59
74
  api_version: str | None = None,
60
75
  reasoning: ReasoningConfig = None,
76
+ llm_config: LLMConfig | None = None,
77
+ **llm_params: Any, # noqa: ANN401
61
78
  ):
62
79
  self.model = model
63
80
  self.api_key = api_key
64
81
  self.api_base = api_base
65
82
  self.api_version = api_version
66
83
 
84
+ # 处理 LLM 生成参数
85
+ if llm_config is not None:
86
+ self.llm_config = llm_config
87
+ else:
88
+ # 从 **llm_params 创建配置
89
+ self.llm_config = LLMConfig(**llm_params)
90
+
67
91
  # 处理推理配置
92
+ self.reasoning_effort: ReasoningEffort | None
93
+ self.thinking_config: ThinkingConfig
68
94
  self.reasoning_effort, self.thinking_config = parse_reasoning_config(reasoning)
69
95
 
70
96
  @abc.abstractmethod
@@ -74,6 +100,7 @@ class BaseLLMClient(abc.ABC):
74
100
  tools: list[ChatCompletionToolParam] | None = None,
75
101
  tool_choice: str = "auto",
76
102
  reasoning: ReasoningConfig = None,
103
+ streaming: bool = True,
77
104
  **kwargs: Any, # noqa: ANN401
78
105
  ) -> Any: # noqa: ANN401
79
106
  """Perform a completion request to the LLM."""
@@ -85,6 +112,7 @@ class BaseLLMClient(abc.ABC):
85
112
  tools: list[FunctionToolParam] | None = None,
86
113
  tool_choice: Literal["none", "auto", "required"] = "auto",
87
114
  reasoning: ReasoningConfig = None,
115
+ streaming: bool = True,
88
116
  **kwargs: Any, # noqa: ANN401
89
117
  ) -> Any: # noqa: ANN401
90
118
  """Perform a response request to the LLM."""
@@ -108,6 +136,7 @@ class LiteLLMClient(BaseLLMClient):
108
136
  tools: list[ChatCompletionToolParam] | None = None,
109
137
  tool_choice: str = "auto",
110
138
  reasoning: ReasoningConfig = None,
139
+ streaming: bool = True,
111
140
  **kwargs: Any, # noqa: ANN401
112
141
  ) -> Any: # noqa: ANN401
113
142
  """Perform a completion request to the Litellm API."""
@@ -126,10 +155,24 @@ class LiteLLMClient(BaseLLMClient):
126
155
  "api_version": self.api_version,
127
156
  "api_key": self.api_key,
128
157
  "api_base": self.api_base,
129
- "stream": True,
158
+ "stream": streaming,
130
159
  **kwargs,
131
160
  }
132
161
 
162
+ # Add LLM generation parameters if specified
163
+ if self.llm_config.temperature is not None:
164
+ completion_params["temperature"] = self.llm_config.temperature
165
+ if self.llm_config.max_tokens is not None:
166
+ completion_params["max_tokens"] = self.llm_config.max_tokens
167
+ if self.llm_config.top_p is not None:
168
+ completion_params["top_p"] = self.llm_config.top_p
169
+ if self.llm_config.frequency_penalty is not None:
170
+ completion_params["frequency_penalty"] = self.llm_config.frequency_penalty
171
+ if self.llm_config.presence_penalty is not None:
172
+ completion_params["presence_penalty"] = self.llm_config.presence_penalty
173
+ if self.llm_config.stop is not None:
174
+ completion_params["stop"] = self.llm_config.stop
175
+
133
176
  # Add reasoning parameters if specified
134
177
  if final_reasoning_effort is not None:
135
178
  completion_params["reasoning_effort"] = final_reasoning_effort
@@ -144,6 +187,7 @@ class LiteLLMClient(BaseLLMClient):
144
187
  tools: list[FunctionToolParam] | None = None,
145
188
  tool_choice: Literal["none", "auto", "required"] = "auto",
146
189
  reasoning: ReasoningConfig = None,
190
+ streaming: bool = True,
147
191
  **kwargs: Any, # noqa: ANN401
148
192
  ) -> Any: # type: ignore[return] # noqa: ANN401
149
193
  """Perform a response request to the Litellm API."""
@@ -164,11 +208,25 @@ class LiteLLMClient(BaseLLMClient):
164
208
  "api_version": self.api_version,
165
209
  "api_key": self.api_key,
166
210
  "api_base": self.api_base,
167
- "stream": True,
211
+ "stream": streaming,
168
212
  "store": False,
169
213
  **kwargs,
170
214
  }
171
215
 
216
+ # Add LLM generation parameters if specified
217
+ if self.llm_config.temperature is not None:
218
+ response_params["temperature"] = self.llm_config.temperature
219
+ if self.llm_config.max_tokens is not None:
220
+ response_params["max_tokens"] = self.llm_config.max_tokens
221
+ if self.llm_config.top_p is not None:
222
+ response_params["top_p"] = self.llm_config.top_p
223
+ if self.llm_config.frequency_penalty is not None:
224
+ response_params["frequency_penalty"] = self.llm_config.frequency_penalty
225
+ if self.llm_config.presence_penalty is not None:
226
+ response_params["presence_penalty"] = self.llm_config.presence_penalty
227
+ if self.llm_config.stop is not None:
228
+ response_params["stop"] = self.llm_config.stop
229
+
172
230
  # Add reasoning parameters if specified
173
231
  if final_reasoning_effort is not None:
174
232
  response_params["reasoning_effort"] = final_reasoning_effort
@@ -0,0 +1,10 @@
1
+ """Response handlers for unified streaming and non-streaming processing."""
2
+ from lite_agent.response_handlers.base import ResponseHandler
3
+ from lite_agent.response_handlers.completion import CompletionResponseHandler
4
+ from lite_agent.response_handlers.responses import ResponsesAPIHandler
5
+
6
+ __all__ = [
7
+ "CompletionResponseHandler",
8
+ "ResponseHandler",
9
+ "ResponsesAPIHandler",
10
+ ]
@@ -0,0 +1,46 @@
1
+ """Base response handler for unified streaming and non-streaming response processing."""
2
+ from abc import ABC, abstractmethod
3
+ from collections.abc import AsyncGenerator
4
+ from pathlib import Path
5
+ from typing import Any
6
+
7
+ from lite_agent.types import AgentChunk
8
+
9
+
10
+ class ResponseHandler(ABC):
11
+ """Base class for handling both streaming and non-streaming responses."""
12
+
13
+ async def handle(
14
+ self,
15
+ response: Any,
16
+ streaming: bool,
17
+ record_to: Path | None = None,
18
+ ) -> AsyncGenerator[AgentChunk, None]:
19
+ """Handle a response in either streaming or non-streaming mode.
20
+
21
+ Args:
22
+ response: The LLM response object
23
+ streaming: Whether to process as streaming or non-streaming
24
+ record_to: Optional file path to record the conversation
25
+
26
+ Yields:
27
+ AgentChunk: Processed chunks from the response
28
+ """
29
+ if streaming:
30
+ async for chunk in self._handle_streaming(response, record_to):
31
+ yield chunk
32
+ else:
33
+ async for chunk in self._handle_non_streaming(response, record_to):
34
+ yield chunk
35
+
36
+ @abstractmethod
37
+ async def _handle_streaming(
38
+ self, response: Any, record_to: Path | None = None,
39
+ ) -> AsyncGenerator[AgentChunk, None]:
40
+ """Handle streaming response."""
41
+
42
+ @abstractmethod
43
+ async def _handle_non_streaming(
44
+ self, response: Any, record_to: Path | None = None,
45
+ ) -> AsyncGenerator[AgentChunk, None]:
46
+ """Handle non-streaming response."""
@@ -0,0 +1,50 @@
1
+ """Completion API response handler."""
2
+ from collections.abc import AsyncGenerator
3
+ from datetime import datetime, timezone
4
+ from pathlib import Path
5
+ from typing import Any
6
+
7
+ from litellm import CustomStreamWrapper
8
+
9
+ from lite_agent.response_handlers.base import ResponseHandler
10
+ from lite_agent.stream_handlers import litellm_completion_stream_handler
11
+ from lite_agent.types import AgentChunk
12
+ from lite_agent.types.events import AssistantMessageEvent
13
+ from lite_agent.types.messages import AssistantMessageMeta, AssistantTextContent, NewAssistantMessage
14
+
15
+
16
+ class CompletionResponseHandler(ResponseHandler):
17
+ """Handler for Completion API responses."""
18
+
19
+ async def _handle_streaming(
20
+ self, response: Any, record_to: Path | None = None,
21
+ ) -> AsyncGenerator[AgentChunk, None]:
22
+ """Handle streaming completion response."""
23
+ if isinstance(response, CustomStreamWrapper):
24
+ async for chunk in litellm_completion_stream_handler(response, record_to):
25
+ yield chunk
26
+ else:
27
+ msg = "Response is not a CustomStreamWrapper, cannot stream chunks."
28
+ raise TypeError(msg)
29
+
30
+ async def _handle_non_streaming(
31
+ self, response: Any, record_to: Path | None = None,
32
+ ) -> AsyncGenerator[AgentChunk, None]:
33
+ """Handle non-streaming completion response."""
34
+ # Convert completion response to chunks
35
+ if hasattr(response, "choices") and response.choices:
36
+ choice = response.choices[0]
37
+ content_items = []
38
+
39
+ # Add text content
40
+ if choice.message and choice.message.content:
41
+ content_items.append(AssistantTextContent(text=choice.message.content))
42
+
43
+ # TODO: Handle tool calls in the future
44
+
45
+ if content_items:
46
+ message = NewAssistantMessage(
47
+ content=content_items,
48
+ meta=AssistantMessageMeta(sent_at=datetime.now(timezone.utc)),
49
+ )
50
+ yield AssistantMessageEvent(message=message)
@@ -0,0 +1,42 @@
1
+ """Responses API response handler."""
2
+ from collections.abc import AsyncGenerator
3
+ from datetime import datetime, timezone
4
+ from pathlib import Path
5
+ from typing import Any
6
+
7
+ from lite_agent.response_handlers.base import ResponseHandler
8
+ from lite_agent.stream_handlers import litellm_response_stream_handler
9
+ from lite_agent.types import AgentChunk
10
+ from lite_agent.types.events import AssistantMessageEvent
11
+ from lite_agent.types.messages import AssistantMessageMeta, AssistantTextContent, NewAssistantMessage
12
+
13
+
14
+ class ResponsesAPIHandler(ResponseHandler):
15
+ """Handler for Responses API responses."""
16
+
17
+ async def _handle_streaming(
18
+ self, response: Any, record_to: Path | None = None,
19
+ ) -> AsyncGenerator[AgentChunk, None]:
20
+ """Handle streaming responses API response."""
21
+ async for chunk in litellm_response_stream_handler(response, record_to):
22
+ yield chunk
23
+
24
+ async def _handle_non_streaming(
25
+ self, response: Any, record_to: Path | None = None,
26
+ ) -> AsyncGenerator[AgentChunk, None]:
27
+ """Handle non-streaming responses API response."""
28
+ # Convert ResponsesAPIResponse to chunks
29
+ if hasattr(response, "output") and response.output:
30
+ for output_message in response.output:
31
+ if hasattr(output_message, "content") and output_message.content:
32
+ content_text = ""
33
+ for content_item in output_message.content:
34
+ if hasattr(content_item, "text"):
35
+ content_text += content_item.text
36
+
37
+ if content_text:
38
+ message = NewAssistantMessage(
39
+ content=[AssistantTextContent(text=content_text)],
40
+ meta=AssistantMessageMeta(sent_at=datetime.now(timezone.utc)),
41
+ )
42
+ yield AssistantMessageEvent(message=message)
lite_agent/runner.py CHANGED
@@ -44,10 +44,11 @@ DEFAULT_INCLUDES: tuple[AgentChunkType, ...] = (
44
44
 
45
45
 
46
46
  class Runner:
47
- def __init__(self, agent: Agent, api: Literal["completion", "responses"] = "responses") -> None:
47
+ def __init__(self, agent: Agent, api: Literal["completion", "responses"] = "responses", streaming: bool = True) -> None:
48
48
  self.agent = agent
49
49
  self.messages: list[NewMessage] = []
50
50
  self.api = api
51
+ self.streaming = streaming
51
52
  self._current_assistant_message: NewAssistantMessage | None = None
52
53
 
53
54
  @property
@@ -168,7 +169,7 @@ class Runner:
168
169
  """Collect all chunks from an async generator into a list."""
169
170
  return [chunk async for chunk in stream]
170
171
 
171
- def run( # noqa: PLR0913
172
+ def run(
172
173
  self,
173
174
  user_input: UserInput,
174
175
  max_steps: int = 20,
@@ -178,6 +179,7 @@ class Runner:
178
179
  agent_kwargs: dict[str, Any] | None = None,
179
180
  ) -> AsyncGenerator[AgentChunk, None]:
180
181
  """Run the agent and return a RunResponse object that can be asynchronously iterated for each chunk."""
182
+ logger.debug(f"Runner.run called with streaming={self.streaming}, api={self.api}")
181
183
  includes = self._normalize_includes(includes)
182
184
  match user_input:
183
185
  case str():
@@ -189,6 +191,7 @@ class Runner:
189
191
  case _:
190
192
  # Handle single message (BaseModel, TypedDict, or dict)
191
193
  self.append_message(user_input) # type: ignore[arg-type]
194
+ logger.debug("Messages prepared, calling _run")
192
195
  return self._run(max_steps, includes, self._normalize_record_path(record_to), context=context, agent_kwargs=agent_kwargs)
193
196
 
194
197
  async def _run(
@@ -226,22 +229,28 @@ class Runner:
226
229
  if agent_kwargs:
227
230
  reasoning = agent_kwargs.get("reasoning")
228
231
 
232
+ logger.debug(f"Using API: {self.api}, streaming: {self.streaming}")
229
233
  match self.api:
230
234
  case "completion":
235
+ logger.debug("Calling agent.completion")
231
236
  resp = await self.agent.completion(
232
237
  self.messages,
233
238
  record_to_file=record_to,
234
239
  reasoning=reasoning,
240
+ streaming=self.streaming,
235
241
  )
236
242
  case "responses":
243
+ logger.debug("Calling agent.responses")
237
244
  resp = await self.agent.responses(
238
245
  self.messages,
239
246
  record_to_file=record_to,
240
247
  reasoning=reasoning,
248
+ streaming=self.streaming,
241
249
  )
242
250
  case _:
243
251
  msg = f"Unknown API type: {self.api}"
244
252
  raise ValueError(msg)
253
+ logger.debug(f"Received response from agent: {type(resp)}")
245
254
  async for chunk in resp:
246
255
  match chunk.type:
247
256
  case "assistant_message":
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: lite-agent
3
- Version: 0.5.0
3
+ Version: 0.6.0
4
4
  Summary: A lightweight, extensible framework for building AI agent.
5
5
  Author-email: Jianqi Pan <jannchie@gmail.com>
6
6
  License: MIT
@@ -1,14 +1,18 @@
1
1
  lite_agent/__init__.py,sha256=Swuefee0etSiaDnn30K2hBNV9UI3hIValW3A-pRE7e0,338
2
- lite_agent/agent.py,sha256=M0U59KpMy6OGFje6yZuQCYVGr4oBboRwbtImPF59o2w,23314
3
- lite_agent/chat_display.py,sha256=b0sUH3fkutc4e_KAKH7AtPu2msyLloNIAiWqCNavdds,30533
4
- lite_agent/client.py,sha256=HG-NbTIUSFAUAPjRow3TFYJxvTc6Y4bdT2oJWIJNEEk,5963
2
+ lite_agent/agent.py,sha256=9stxur0iqdG9NUDXdk1ElxenjYsRsurt36hGhZcz_-c,23323
3
+ lite_agent/chat_display.py,sha256=Pfg6ZgTeIuzRZMVxOUzlwZU18rfOLD9-8I1lqUd_fXc,30516
4
+ lite_agent/client.py,sha256=QhtZZGX54ha9-HKHcbx0qUsaAUi4-TAO-YckCH_itQY,8633
5
5
  lite_agent/loggers.py,sha256=XkNkdqwD_nQGfhQJ-bBWT7koci_mMkNw3aBpyMhOICw,57
6
6
  lite_agent/message_transfers.py,sha256=9qucjc-uSIXvVfhcmVRC_0lp0Q8sWp99dV4ReCh6ZlI,4428
7
7
  lite_agent/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
8
- lite_agent/runner.py,sha256=U7eVNAJ_VLwgbPPpn-vggSgvBmFl8wMMFWn3mWCsDow,40423
8
+ lite_agent/runner.py,sha256=B0KYE0Wfta4X85kPm_hMMGrLz8o1-TmGUnOG1cUZeBM,40985
9
9
  lite_agent/processors/__init__.py,sha256=ybpAzpMBIE9v5I24wIBZRXeaOaPNTmoKH13aofgNI6Q,234
10
10
  lite_agent/processors/completion_event_processor.py,sha256=8fQYRofgBd8t0V3oUakTOmZdv5Q9tCuzADGCGvVgy0k,13442
11
11
  lite_agent/processors/response_event_processor.py,sha256=CElJMUzLs8mklVqJtoLiVu-NTq0Dz2NNd9YdAKpjgE0,8088
12
+ lite_agent/response_handlers/__init__.py,sha256=2xe8YngMpjdp1B5tE8C3IiimYI30TnqQHj9KTtg6wCI,385
13
+ lite_agent/response_handlers/base.py,sha256=amQSnhUdoMyaacL7BlGfIUJDYDgqH6seYlfOl6loy-w,1566
14
+ lite_agent/response_handlers/completion.py,sha256=X-sBM-ZBxodppcCXAwoN8Lslda5QYSoK7DdKEdgaYnM,2026
15
+ lite_agent/response_handlers/responses.py,sha256=KEKnnsel8HLiF2Ob8TzVSXuRjudCpvyZ_GMrg3ME2g0,1915
12
16
  lite_agent/stream_handlers/__init__.py,sha256=a5s1GZr42uvndtcQqEhK2cnjGkK8ZFTAZCj3J61Bb5E,209
13
17
  lite_agent/stream_handlers/litellm.py,sha256=3D0u7R2ADA8kDwpFImZlw20o-CsmFXVLvq4nvwwD0Rk,2922
14
18
  lite_agent/templates/handoffs_source_instructions.xml.j2,sha256=2XsXQlBzk38qbxGrfyt8y2b0KlZmsV_1xavLufcdkHc,428
@@ -18,6 +22,6 @@ lite_agent/types/__init__.py,sha256=QKuhjFWRcpAlsBK9JYgoCABpoQExwhuyGudJoiiqQfs,
18
22
  lite_agent/types/events.py,sha256=mFMqV55WWJbPDyb_P61nd3qMLpEnwZgVY6NTKFkINkg,2389
19
23
  lite_agent/types/messages.py,sha256=c7nTIWqXNo562het_vaWcZvsoy-adkARwAYn4JNqm0c,9897
20
24
  lite_agent/types/tool_calls.py,sha256=Xnut8-2-Ld9vgA2GKJY6BbFlBaAv_n4W7vo7Jx21A-E,260
21
- lite_agent-0.5.0.dist-info/METADATA,sha256=20K2Xirnyawl1uN_I8TLcuGlgRjNhs04hz2BtDDRnbM,3456
22
- lite_agent-0.5.0.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
23
- lite_agent-0.5.0.dist-info/RECORD,,
25
+ lite_agent-0.6.0.dist-info/METADATA,sha256=_gfjiwA85XKoQdB9TCJx3BI2D21gNkw-C5pL3CaiZz8,3456
26
+ lite_agent-0.6.0.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
27
+ lite_agent-0.6.0.dist-info/RECORD,,