lite-agent 0.4.0__tar.gz → 0.5.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of lite-agent might be problematic. Click here for more details.

Files changed (86) hide show
  1. {lite_agent-0.4.0 → lite_agent-0.5.0}/CHANGELOG.md +24 -0
  2. {lite_agent-0.4.0 → lite_agent-0.5.0}/CLAUDE.md +47 -6
  3. {lite_agent-0.4.0 → lite_agent-0.5.0}/PKG-INFO +1 -1
  4. lite_agent-0.5.0/examples/reasoning_example.py +113 -0
  5. {lite_agent-0.4.0 → lite_agent-0.5.0}/pyproject.toml +1 -1
  6. {lite_agent-0.4.0 → lite_agent-0.5.0}/src/lite_agent/agent.py +27 -7
  7. lite_agent-0.5.0/src/lite_agent/client.py +178 -0
  8. {lite_agent-0.4.0 → lite_agent-0.5.0}/src/lite_agent/runner.py +153 -137
  9. {lite_agent-0.4.0 → lite_agent-0.5.0}/src/lite_agent/stream_handlers/litellm.py +16 -7
  10. lite_agent-0.5.0/tests/unit/test_agent_additional.py +182 -0
  11. {lite_agent-0.4.0 → lite_agent-0.5.0}/tests/unit/test_agent_handoffs.py +5 -5
  12. lite_agent-0.5.0/tests/unit/test_chat_display.py +247 -0
  13. lite_agent-0.5.0/tests/unit/test_chat_display_additional.py +304 -0
  14. lite_agent-0.5.0/tests/unit/test_message_transfers_additional.py +334 -0
  15. lite_agent-0.5.0/tests/unit/test_response_event_processor.py +635 -0
  16. {lite_agent-0.4.0 → lite_agent-0.5.0}/tests/unit/test_runner.py +0 -15
  17. lite_agent-0.5.0/tests/unit/test_simple_stream_handlers.py +56 -0
  18. lite_agent-0.5.0/tests/unit/test_stream_handlers_additional.py +262 -0
  19. {lite_agent-0.4.0 → lite_agent-0.5.0}/uv.lock +260 -245
  20. lite_agent-0.4.0/src/lite_agent/client.py +0 -69
  21. lite_agent-0.4.0/tests/unit/test_chat_display.py +0 -38
  22. {lite_agent-0.4.0 → lite_agent-0.5.0}/.claude/settings.local.json +0 -0
  23. {lite_agent-0.4.0 → lite_agent-0.5.0}/.github/workflows/ci.yml +0 -0
  24. {lite_agent-0.4.0 → lite_agent-0.5.0}/.gitignore +0 -0
  25. {lite_agent-0.4.0 → lite_agent-0.5.0}/.python-version +0 -0
  26. {lite_agent-0.4.0 → lite_agent-0.5.0}/.vscode/launch.json +0 -0
  27. {lite_agent-0.4.0 → lite_agent-0.5.0}/README.md +0 -0
  28. {lite_agent-0.4.0 → lite_agent-0.5.0}/examples/basic.py +0 -0
  29. {lite_agent-0.4.0 → lite_agent-0.5.0}/examples/basic_agent.py +0 -0
  30. {lite_agent-0.4.0 → lite_agent-0.5.0}/examples/channels/rich_channel.py +0 -0
  31. {lite_agent-0.4.0 → lite_agent-0.5.0}/examples/chat_display_demo.py +0 -0
  32. {lite_agent-0.4.0 → lite_agent-0.5.0}/examples/confirm_and_continue.py +0 -0
  33. {lite_agent-0.4.0 → lite_agent-0.5.0}/examples/consolidate_history.py +0 -0
  34. {lite_agent-0.4.0 → lite_agent-0.5.0}/examples/context.py +0 -0
  35. {lite_agent-0.4.0 → lite_agent-0.5.0}/examples/handoffs.py +0 -0
  36. {lite_agent-0.4.0 → lite_agent-0.5.0}/examples/image.py +0 -0
  37. {lite_agent-0.4.0 → lite_agent-0.5.0}/examples/message_transfer_example.py +0 -0
  38. {lite_agent-0.4.0 → lite_agent-0.5.0}/examples/message_transfer_example_new.py +0 -0
  39. {lite_agent-0.4.0 → lite_agent-0.5.0}/examples/new_message_structure_demo.py +0 -0
  40. {lite_agent-0.4.0 → lite_agent-0.5.0}/examples/response_api_example.py +0 -0
  41. {lite_agent-0.4.0 → lite_agent-0.5.0}/examples/responses.py +0 -0
  42. {lite_agent-0.4.0 → lite_agent-0.5.0}/examples/set_chat_history_example.py +0 -0
  43. {lite_agent-0.4.0 → lite_agent-0.5.0}/examples/stop_with_tool_call.py +0 -0
  44. {lite_agent-0.4.0 → lite_agent-0.5.0}/examples/terminal.py +0 -0
  45. {lite_agent-0.4.0 → lite_agent-0.5.0}/examples/translate/main.py +0 -0
  46. {lite_agent-0.4.0 → lite_agent-0.5.0}/examples/translate/prompts/translation_system.md.j2 +0 -0
  47. {lite_agent-0.4.0 → lite_agent-0.5.0}/examples/translate.py +0 -0
  48. {lite_agent-0.4.0 → lite_agent-0.5.0}/examples/type_system_example.py +0 -0
  49. {lite_agent-0.4.0 → lite_agent-0.5.0}/scripts/record_chat_messages.py +0 -0
  50. {lite_agent-0.4.0 → lite_agent-0.5.0}/src/lite_agent/__init__.py +0 -0
  51. {lite_agent-0.4.0 → lite_agent-0.5.0}/src/lite_agent/chat_display.py +0 -0
  52. {lite_agent-0.4.0 → lite_agent-0.5.0}/src/lite_agent/loggers.py +0 -0
  53. {lite_agent-0.4.0 → lite_agent-0.5.0}/src/lite_agent/message_transfers.py +0 -0
  54. {lite_agent-0.4.0 → lite_agent-0.5.0}/src/lite_agent/processors/__init__.py +0 -0
  55. {lite_agent-0.4.0 → lite_agent-0.5.0}/src/lite_agent/processors/completion_event_processor.py +0 -0
  56. {lite_agent-0.4.0 → lite_agent-0.5.0}/src/lite_agent/processors/response_event_processor.py +0 -0
  57. {lite_agent-0.4.0 → lite_agent-0.5.0}/src/lite_agent/py.typed +0 -0
  58. {lite_agent-0.4.0 → lite_agent-0.5.0}/src/lite_agent/stream_handlers/__init__.py +0 -0
  59. {lite_agent-0.4.0 → lite_agent-0.5.0}/src/lite_agent/templates/handoffs_source_instructions.xml.j2 +0 -0
  60. {lite_agent-0.4.0 → lite_agent-0.5.0}/src/lite_agent/templates/handoffs_target_instructions.xml.j2 +0 -0
  61. {lite_agent-0.4.0 → lite_agent-0.5.0}/src/lite_agent/templates/wait_for_user_instructions.xml.j2 +0 -0
  62. {lite_agent-0.4.0 → lite_agent-0.5.0}/src/lite_agent/types/__init__.py +0 -0
  63. {lite_agent-0.4.0 → lite_agent-0.5.0}/src/lite_agent/types/events.py +0 -0
  64. {lite_agent-0.4.0 → lite_agent-0.5.0}/src/lite_agent/types/messages.py +0 -0
  65. {lite_agent-0.4.0 → lite_agent-0.5.0}/src/lite_agent/types/tool_calls.py +0 -0
  66. {lite_agent-0.4.0 → lite_agent-0.5.0}/tests/integration/test_agent_with_mocks.py +0 -0
  67. {lite_agent-0.4.0 → lite_agent-0.5.0}/tests/integration/test_basic.py +0 -0
  68. {lite_agent-0.4.0 → lite_agent-0.5.0}/tests/integration/test_mock_litellm.py +0 -0
  69. {lite_agent-0.4.0 → lite_agent-0.5.0}/tests/mocks/basic/1.jsonl +0 -0
  70. {lite_agent-0.4.0 → lite_agent-0.5.0}/tests/mocks/confirm_and_continue/1.jsonl +0 -0
  71. {lite_agent-0.4.0 → lite_agent-0.5.0}/tests/mocks/confirm_and_continue/2.jsonl +0 -0
  72. {lite_agent-0.4.0 → lite_agent-0.5.0}/tests/mocks/context/1.jsonl +0 -0
  73. {lite_agent-0.4.0 → lite_agent-0.5.0}/tests/mocks/handoffs/1.jsonl +0 -0
  74. {lite_agent-0.4.0 → lite_agent-0.5.0}/tests/performance/test_set_chat_history_performance.py +0 -0
  75. {lite_agent-0.4.0 → lite_agent-0.5.0}/tests/test_new_messages.py +0 -0
  76. {lite_agent-0.4.0 → lite_agent-0.5.0}/tests/unit/test_agent.py +0 -0
  77. {lite_agent-0.4.0 → lite_agent-0.5.0}/tests/unit/test_append_message.py +0 -0
  78. {lite_agent-0.4.0 → lite_agent-0.5.0}/tests/unit/test_completion_condition.py +0 -0
  79. {lite_agent-0.4.0 → lite_agent-0.5.0}/tests/unit/test_file_recording.py +0 -0
  80. {lite_agent-0.4.0 → lite_agent-0.5.0}/tests/unit/test_litellm_stream_handler.py +0 -0
  81. {lite_agent-0.4.0 → lite_agent-0.5.0}/tests/unit/test_message_transfer.py +0 -0
  82. {lite_agent-0.4.0 → lite_agent-0.5.0}/tests/unit/test_message_transfers.py +0 -0
  83. {lite_agent-0.4.0 → lite_agent-0.5.0}/tests/unit/test_response_api_format.py +0 -0
  84. {lite_agent-0.4.0 → lite_agent-0.5.0}/tests/unit/test_set_chat_history.py +0 -0
  85. {lite_agent-0.4.0 → lite_agent-0.5.0}/tests/unit/test_stream_chunk_processor.py +0 -0
  86. {lite_agent-0.4.0 → lite_agent-0.5.0}/tests/utils/mock_litellm.py +0 -0
@@ -1,3 +1,27 @@
1
+ ## v0.5.0
2
+
3
+ [v0.4.1...v0.5.0](https://github.com/Jannchie/lite-agent/compare/v0.4.1...v0.5.0)
4
+
5
+ ### :sparkles: Features
6
+
7
+ - **reasoning**: add unified reasoning config for agent and runner - By [Jannchie](mailto:jannchie@gmail.com) in [1ede077](https://github.com/Jannchie/lite-agent/commit/1ede077)
8
+
9
+ ### :adhesive_bandage: Fixes
10
+
11
+ - **runner**: remove redundant last message check - By [Jannchie](mailto:jannchie@gmail.com) in [0037b96](https://github.com/Jannchie/lite-agent/commit/0037b96)
12
+
13
+ ### :memo: Documentation
14
+
15
+ - **claude-guide**: expand testing linting and dev instructions - By [Jannchie](mailto:jannchie@gmail.com) in [d9136c2](https://github.com/Jannchie/lite-agent/commit/d9136c2)
16
+
17
+ ## v0.4.1
18
+
19
+ [v0.4.0...v0.4.1](https://github.com/Jannchie/lite-agent/compare/v0.4.0...v0.4.1)
20
+
21
+ ### :art: Refactors
22
+
23
+ - **runner**: replace if-elif with match-case for chunk handling - By [Jannchie](mailto:jannchie@gmail.com) in [e9a8464](https://github.com/Jannchie/lite-agent/commit/e9a8464)
24
+
1
25
  ## v0.4.0
2
26
 
3
27
  [v0.3.0...v0.4.0](https://github.com/Jannchie/lite-agent/compare/v0.3.0...v0.4.0)
@@ -10,21 +10,40 @@ This file provides guidance to Claude Code (claude.ai/code) when working with co
10
10
  pytest # Run all tests
11
11
  pytest tests/unit/ # Run only unit tests
12
12
  pytest tests/integration/ # Run only integration tests
13
+ pytest tests/performance/ # Run performance tests
13
14
  pytest --cov # Run with coverage
15
+ pytest -v # Verbose output
16
+ pytest -k "test_name" # Run specific test
14
17
  ```
15
18
 
16
19
  ### Linting and Formatting
17
20
 
18
21
  ```bash
19
22
  ruff check # Run linter
23
+ ruff check --fix # Run linter and auto-fix issues
20
24
  ruff format # Format code
25
+ pyright # Type checking (optional)
21
26
  ```
22
27
 
23
28
  ### Package Management
24
29
 
25
30
  ```bash
26
- uv add lite-agent # Install from PyPI
27
- uv add --dev lite-agent # Install dev package
31
+ uv install # Install all dependencies
32
+ uv add <package> # Add a new dependency
33
+ uv add --dev <package> # Add a development dependency
34
+ uv sync # Sync dependencies from lock file
35
+ uv run <command> # Run command in project environment
36
+ ```
37
+
38
+ ### Running Examples
39
+
40
+ ```bash
41
+ uv run python examples/basic.py
42
+ uv run python examples/handoffs.py
43
+ uv run python examples/chat_display_demo.py
44
+ uv run python examples/context.py
45
+ uv run python examples/terminal.py
46
+ uv run python examples/translate/main.py
28
47
  ```
29
48
 
30
49
  ## Project Architecture
@@ -92,9 +111,31 @@ Examples demonstrate various usage patterns:
92
111
 
93
112
  ### Testing Architecture
94
113
 
95
- - **Unit tests**: Test individual components in isolation
96
- - **Integration tests**: Test full agent workflows with mocked LLM responses
97
- - **Performance tests**: Test memory usage and performance characteristics
98
- - **Mock system**: JSONL-based conversation recording/playback for deterministic testing
114
+ - **Unit tests**: Test individual components in isolation (`tests/unit/`)
115
+ - **Integration tests**: Test full agent workflows with mocked LLM responses (`tests/integration/`)
116
+ - **Performance tests**: Test memory usage and performance characteristics (`tests/performance/`)
117
+ - **Mock system**: JSONL-based conversation recording/playback for deterministic testing (`tests/mocks/`)
118
+
119
+ **Recording Mock Conversations**: Use `scripts/record_chat_messages.py` to capture real LLM interactions for test scenarios
120
+
121
+ ### API Compatibility
122
+
123
+ The framework supports two OpenAI API modes:
124
+
125
+ - **Response API** (default): Modern structured response format
126
+ - **Completion API**: Legacy completion format for backward compatibility
127
+
128
+ Set via `Runner(agent, api="completion")` or `Runner(agent, api="responses")`.
129
+
130
+ ### Development Notes
131
+
132
+ - Project uses strict ruff linting with `select = ["ALL"]` and specific ignores
133
+ - All functions require full type annotations
134
+ - Uses `uv` for package management and dependency resolution
135
+ - Mock conversations stored in `tests/mocks/` as JSONL files for reproducible testing
136
+ - Examples in `examples/` directory demonstrate various usage patterns
137
+ - Template system uses Jinja2 for dynamic instruction generation (`src/lite_agent/templates/`)
138
+ - Requires `litellm` as core dependency for LLM interactions
139
+ - Chat display functionality uses `rich` library for formatted console output
99
140
 
100
141
  The framework emphasizes simplicity and extensibility while maintaining full type safety and comprehensive streaming support.
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: lite-agent
3
- Version: 0.4.0
3
+ Version: 0.5.0
4
4
  Summary: A lightweight, extensible framework for building AI agent.
5
5
  Author-email: Jianqi Pan <jannchie@gmail.com>
6
6
  License: MIT
@@ -0,0 +1,113 @@
1
+ import asyncio
2
+ import logging
3
+
4
+ from rich.logging import RichHandler
5
+
6
+ from lite_agent.agent import Agent
7
+ from lite_agent.runner import Runner
8
+
9
+ logging.basicConfig(
10
+ level=logging.WARNING,
11
+ format="%(message)s",
12
+ datefmt="[%X]",
13
+ handlers=[RichHandler(rich_tracebacks=True)],
14
+ )
15
+
16
+ logger = logging.getLogger("lite_agent")
17
+ logger.setLevel(logging.DEBUG)
18
+
19
+
20
+ async def analyze_complex_problem(problem_description: str) -> str:
21
+ """Analyze a complex problem and return insights."""
22
+ return f"Analysis for: {problem_description}\n- Key factors identified\n- Potential solutions outlined\n- Risk assessment completed"
23
+
24
+
25
+ async def demo_reasoning_configurations():
26
+ """演示不同的推理配置方法。"""
27
+ print("=== 推理配置演示 ===\n")
28
+
29
+ # 1. 在Agent初始化时设置推理参数
30
+ print("1. Agent初始化时设置reasoning_effort:")
31
+ agent_with_reasoning = Agent(
32
+ model="gpt-4o-mini",
33
+ name="推理助手",
34
+ instructions="你是一个深度分析助手,使用仔细的推理来提供全面的分析。",
35
+ reasoning_effort="high", # 高强度推理
36
+ )
37
+ print(f" Agent推理努力程度: {agent_with_reasoning.reasoning_effort}")
38
+ print(f" 客户端推理努力程度: {agent_with_reasoning.client.reasoning_effort}")
39
+
40
+ # 2. 使用thinking_config进行更精细的控制
41
+ print("\n2. 使用thinking_config进行精细控制:")
42
+ agent_with_thinking = Agent(
43
+ model="claude-3-5-sonnet-20241022", # Anthropic模型支持thinking
44
+ name="思考助手",
45
+ instructions="你是一个深思熟虑的助手。",
46
+ thinking_config={"type": "enabled", "budget_tokens": 2048},
47
+ )
48
+ print(f" Agent思考配置: {agent_with_thinking.thinking_config}")
49
+ print(f" 客户端思考配置: {agent_with_thinking.client.thinking_config}")
50
+
51
+ # 3. 同时设置reasoning_effort和thinking_config
52
+ print("\n3. 同时设置多种推理参数:")
53
+ agent_full_config = Agent(
54
+ model="o1-mini", # OpenAI推理模型
55
+ name="全配置推理助手",
56
+ instructions="你是一个高级推理助手。",
57
+ reasoning_effort="medium",
58
+ thinking_config={"type": "enabled", "budget_tokens": 1024},
59
+ )
60
+ print(f" 推理努力程度: {agent_full_config.reasoning_effort}")
61
+ print(f" 思考配置: {agent_full_config.thinking_config}")
62
+
63
+ # 4. 演示运行时覆盖推理参数
64
+ print("\n4. 运行时覆盖推理参数:")
65
+ runner = Runner(agent_with_reasoning)
66
+ print(" - Agent默认使用 reasoning_effort='high'")
67
+ print(" - 运行时可通过 agent_kwargs 覆盖:")
68
+ print(" runner.run(query, agent_kwargs={'reasoning_effort': 'minimal'})")
69
+
70
+ # 注意:由于没有实际的API密钥,我们不运行真实的API调用
71
+ print("\n✓ 所有推理配置功能已成功设置!")
72
+
73
+
74
+ async def main():
75
+ """主演示函数。"""
76
+ await demo_reasoning_configurations()
77
+
78
+ print("\n" + "="*60)
79
+ print("推理配置使用说明:")
80
+ print("="*60)
81
+ print("""
82
+ 1. reasoning_effort 参数 (OpenAI兼容):
83
+ - "minimal": 最小推理,快速响应
84
+ - "low": 低强度推理
85
+ - "medium": 中等推理(推荐)
86
+ - "high": 高强度推理,更深入分析
87
+
88
+ 2. thinking_config 参数 (Anthropic兼容):
89
+ - {"type": "enabled", "budget_tokens": N}
90
+ - N 可以是 1024, 2048, 4096 等
91
+
92
+ 3. 使用方法:
93
+ a) Agent初始化时设置: Agent(..., reasoning_effort="high")
94
+ b) 运行时覆盖: runner.run(query, agent_kwargs={"reasoning_effort": "low"})
95
+
96
+ 4. 模型兼容性:
97
+ - OpenAI: o1, o3, o4-mini 系列
98
+ - Anthropic: claude-3.5-sonnet 等
99
+ - 其他: 通过LiteLLM自动转换
100
+
101
+ 5. 示例代码:
102
+ ```python
103
+ agent = Agent(
104
+ model="gpt-4o-mini",
105
+ reasoning_effort="medium",
106
+ thinking_config={"type": "enabled", "budget_tokens": 2048}
107
+ )
108
+ ```
109
+ """)
110
+
111
+
112
+ if __name__ == "__main__":
113
+ asyncio.run(main())
@@ -1,6 +1,6 @@
1
1
  [project]
2
2
  name = "lite-agent"
3
- version = "0.4.0"
3
+ version = "0.5.0"
4
4
  description = "A lightweight, extensible framework for building AI agent."
5
5
  readme = "README.md"
6
6
  authors = [{ name = "Jianqi Pan", email = "jannchie@gmail.com" }]
@@ -7,7 +7,7 @@ from funcall import Funcall
7
7
  from jinja2 import Environment, FileSystemLoader
8
8
  from litellm import CustomStreamWrapper
9
9
 
10
- from lite_agent.client import BaseLLMClient, LiteLLMClient
10
+ from lite_agent.client import BaseLLMClient, LiteLLMClient, ReasoningConfig
11
11
  from lite_agent.loggers import logger
12
12
  from lite_agent.stream_handlers import litellm_completion_stream_handler, litellm_response_stream_handler
13
13
  from lite_agent.types import AgentChunk, FunctionCallEvent, FunctionCallOutputEvent, RunnerMessages, ToolCall, message_to_llm_dict, system_message_to_llm_dict
@@ -32,15 +32,21 @@ class Agent:
32
32
  handoffs: list["Agent"] | None = None,
33
33
  message_transfer: Callable[[RunnerMessages], RunnerMessages] | None = None,
34
34
  completion_condition: str = "stop",
35
+ reasoning: ReasoningConfig = None,
35
36
  ) -> None:
36
37
  self.name = name
37
38
  self.instructions = instructions
39
+ self.reasoning = reasoning
40
+
38
41
  if isinstance(model, BaseLLMClient):
39
42
  # If model is a BaseLLMClient instance, use it directly
40
43
  self.client = model
41
44
  else:
42
45
  # Otherwise, create a LitellmClient instance
43
- self.client = LiteLLMClient(model=model)
46
+ self.client = LiteLLMClient(
47
+ model=model,
48
+ reasoning=reasoning,
49
+ )
44
50
  self.completion_condition = completion_condition
45
51
  self.handoffs = handoffs if handoffs else []
46
52
  self._parent: Agent | None = None
@@ -174,9 +180,11 @@ class Agent:
174
180
  if self.completion_condition == "call":
175
181
  instructions = WAIT_FOR_USER_INSTRUCTIONS_TEMPLATE.render(extra_instructions=None) + "\n\n" + instructions
176
182
  return [
177
- system_message_to_llm_dict(NewSystemMessage(
178
- content=f"You are {self.name}. {instructions}",
179
- )),
183
+ system_message_to_llm_dict(
184
+ NewSystemMessage(
185
+ content=f"You are {self.name}. {instructions}",
186
+ ),
187
+ ),
180
188
  *converted_messages,
181
189
  ]
182
190
 
@@ -267,7 +275,12 @@ class Agent:
267
275
  res.append(message)
268
276
  return res
269
277
 
270
- async def completion(self, messages: RunnerMessages, record_to_file: Path | None = None) -> AsyncGenerator[AgentChunk, None]:
278
+ async def completion(
279
+ self,
280
+ messages: RunnerMessages,
281
+ record_to_file: Path | None = None,
282
+ reasoning: ReasoningConfig = None,
283
+ ) -> AsyncGenerator[AgentChunk, None]:
271
284
  # Apply message transfer callback if provided - always use legacy format for LLM compatibility
272
285
  processed_messages = messages
273
286
  if self.message_transfer:
@@ -282,6 +295,7 @@ class Agent:
282
295
  messages=self.message_histories,
283
296
  tools=tools,
284
297
  tool_choice="auto", # TODO: make this configurable
298
+ reasoning=reasoning,
285
299
  )
286
300
 
287
301
  # Ensure resp is a CustomStreamWrapper
@@ -290,7 +304,12 @@ class Agent:
290
304
  msg = "Response is not a CustomStreamWrapper, cannot stream chunks."
291
305
  raise TypeError(msg)
292
306
 
293
- async def responses(self, messages: RunnerMessages, record_to_file: Path | None = None) -> AsyncGenerator[AgentChunk, None]:
307
+ async def responses(
308
+ self,
309
+ messages: RunnerMessages,
310
+ record_to_file: Path | None = None,
311
+ reasoning: ReasoningConfig = None,
312
+ ) -> AsyncGenerator[AgentChunk, None]:
294
313
  # Apply message transfer callback if provided - always use legacy format for LLM compatibility
295
314
  processed_messages = messages
296
315
  if self.message_transfer:
@@ -304,6 +323,7 @@ class Agent:
304
323
  messages=self.message_histories,
305
324
  tools=tools,
306
325
  tool_choice="auto", # TODO: make this configurable
326
+ reasoning=reasoning,
307
327
  )
308
328
  return litellm_response_stream_handler(resp, record_to=record_to_file)
309
329
 
@@ -0,0 +1,178 @@
1
+ import abc
2
+ import os
3
+ from typing import Any, Literal
4
+
5
+ import litellm
6
+ from openai.types.chat import ChatCompletionToolParam
7
+ from openai.types.responses import FunctionToolParam
8
+
9
+ ReasoningEffort = Literal["minimal", "low", "medium", "high"]
10
+ ThinkingConfig = dict[str, Any] | None
11
+
12
+ # 统一的推理配置类型
13
+ ReasoningConfig = (
14
+ str
15
+ | dict[str, Any] # {"type": "enabled", "budget_tokens": 2048} 或其他配置
16
+ | bool # True/False 简单开关
17
+ | None # 不启用推理
18
+ )
19
+
20
+
21
+ def parse_reasoning_config(reasoning: ReasoningConfig) -> tuple[ReasoningEffort | None, ThinkingConfig]:
22
+ """
23
+ 解析统一的推理配置,返回 reasoning_effort 和 thinking_config。
24
+
25
+ Args:
26
+ reasoning: 统一的推理配置
27
+ - str: "minimal", "low", "medium", "high" -> reasoning_effort
28
+ - dict: {"type": "enabled", "budget_tokens": N} -> thinking_config
29
+ - bool: True -> "medium", False -> None
30
+ - None: 不启用推理
31
+
32
+ Returns:
33
+ tuple: (reasoning_effort, thinking_config)
34
+ """
35
+ if reasoning is None:
36
+ return None, None
37
+ if isinstance(reasoning, str):
38
+ # 字符串类型,使用 reasoning_effort
39
+ return reasoning, None
40
+ if isinstance(reasoning, dict):
41
+ # 字典类型,使用 thinking_config
42
+ return None, reasoning
43
+ if isinstance(reasoning, bool):
44
+ # 布尔类型,True 使用默认的 medium,False 不启用
45
+ return "medium" if reasoning else None, None
46
+ # 其他类型,默认不启用
47
+ return None, None
48
+
49
+
50
+ class BaseLLMClient(abc.ABC):
51
+ """Base class for LLM clients."""
52
+
53
+ def __init__(
54
+ self,
55
+ *,
56
+ model: str,
57
+ api_key: str | None = None,
58
+ api_base: str | None = None,
59
+ api_version: str | None = None,
60
+ reasoning: ReasoningConfig = None,
61
+ ):
62
+ self.model = model
63
+ self.api_key = api_key
64
+ self.api_base = api_base
65
+ self.api_version = api_version
66
+
67
+ # 处理推理配置
68
+ self.reasoning_effort, self.thinking_config = parse_reasoning_config(reasoning)
69
+
70
+ @abc.abstractmethod
71
+ async def completion(
72
+ self,
73
+ messages: list[Any],
74
+ tools: list[ChatCompletionToolParam] | None = None,
75
+ tool_choice: str = "auto",
76
+ reasoning: ReasoningConfig = None,
77
+ **kwargs: Any, # noqa: ANN401
78
+ ) -> Any: # noqa: ANN401
79
+ """Perform a completion request to the LLM."""
80
+
81
+ @abc.abstractmethod
82
+ async def responses(
83
+ self,
84
+ messages: list[dict[str, Any]], # Changed from ResponseInputParam
85
+ tools: list[FunctionToolParam] | None = None,
86
+ tool_choice: Literal["none", "auto", "required"] = "auto",
87
+ reasoning: ReasoningConfig = None,
88
+ **kwargs: Any, # noqa: ANN401
89
+ ) -> Any: # noqa: ANN401
90
+ """Perform a response request to the LLM."""
91
+
92
+
93
+ class LiteLLMClient(BaseLLMClient):
94
+ def _resolve_reasoning_params(
95
+ self,
96
+ reasoning: ReasoningConfig,
97
+ ) -> tuple[ReasoningEffort | None, ThinkingConfig]:
98
+ """解析推理配置参数。"""
99
+ if reasoning is not None:
100
+ return parse_reasoning_config(reasoning)
101
+
102
+ # 使用实例默认值
103
+ return self.reasoning_effort, self.thinking_config
104
+
105
+ async def completion(
106
+ self,
107
+ messages: list[Any],
108
+ tools: list[ChatCompletionToolParam] | None = None,
109
+ tool_choice: str = "auto",
110
+ reasoning: ReasoningConfig = None,
111
+ **kwargs: Any, # noqa: ANN401
112
+ ) -> Any: # noqa: ANN401
113
+ """Perform a completion request to the Litellm API."""
114
+
115
+ # 处理推理配置参数
116
+ final_reasoning_effort, final_thinking_config = self._resolve_reasoning_params(
117
+ reasoning,
118
+ )
119
+
120
+ # Prepare completion parameters
121
+ completion_params = {
122
+ "model": self.model,
123
+ "messages": messages,
124
+ "tools": tools,
125
+ "tool_choice": tool_choice,
126
+ "api_version": self.api_version,
127
+ "api_key": self.api_key,
128
+ "api_base": self.api_base,
129
+ "stream": True,
130
+ **kwargs,
131
+ }
132
+
133
+ # Add reasoning parameters if specified
134
+ if final_reasoning_effort is not None:
135
+ completion_params["reasoning_effort"] = final_reasoning_effort
136
+ if final_thinking_config is not None:
137
+ completion_params["thinking"] = final_thinking_config
138
+
139
+ return await litellm.acompletion(**completion_params)
140
+
141
+ async def responses(
142
+ self,
143
+ messages: list[dict[str, Any]], # Changed from ResponseInputParam
144
+ tools: list[FunctionToolParam] | None = None,
145
+ tool_choice: Literal["none", "auto", "required"] = "auto",
146
+ reasoning: ReasoningConfig = None,
147
+ **kwargs: Any, # noqa: ANN401
148
+ ) -> Any: # type: ignore[return] # noqa: ANN401
149
+ """Perform a response request to the Litellm API."""
150
+
151
+ os.environ["DISABLE_AIOHTTP_TRANSPORT"] = "True"
152
+
153
+ # 处理推理配置参数
154
+ final_reasoning_effort, final_thinking_config = self._resolve_reasoning_params(
155
+ reasoning,
156
+ )
157
+
158
+ # Prepare response parameters
159
+ response_params = {
160
+ "model": self.model,
161
+ "input": messages, # type: ignore[arg-type]
162
+ "tools": tools,
163
+ "tool_choice": tool_choice,
164
+ "api_version": self.api_version,
165
+ "api_key": self.api_key,
166
+ "api_base": self.api_base,
167
+ "stream": True,
168
+ "store": False,
169
+ **kwargs,
170
+ }
171
+
172
+ # Add reasoning parameters if specified
173
+ if final_reasoning_effort is not None:
174
+ response_params["reasoning_effort"] = final_reasoning_effort
175
+ if final_thinking_config is not None:
176
+ response_params["thinking"] = final_thinking_config
177
+
178
+ return await litellm.aresponses(**response_params) # type: ignore[return-value]