lite-agent 0.10.0__tar.gz → 0.12.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of lite-agent might be problematic. Click here for more details.
- {lite_agent-0.10.0 → lite_agent-0.12.0}/.claude/settings.local.json +4 -1
- {lite_agent-0.10.0 → lite_agent-0.12.0}/CHANGELOG.md +21 -0
- {lite_agent-0.10.0 → lite_agent-0.12.0}/PKG-INFO +2 -1
- {lite_agent-0.10.0 → lite_agent-0.12.0}/examples/basic.py +1 -1
- {lite_agent-0.10.0 → lite_agent-0.12.0}/examples/context.py +12 -3
- lite_agent-0.12.0/examples/custom_termination.py +119 -0
- lite_agent-0.12.0/examples/history_context_demo.py +198 -0
- {lite_agent-0.10.0 → lite_agent-0.12.0}/pyproject.toml +3 -5
- {lite_agent-0.10.0 → lite_agent-0.12.0}/src/lite_agent/agent.py +15 -0
- {lite_agent-0.10.0 → lite_agent-0.12.0}/src/lite_agent/client.py +50 -24
- lite_agent-0.12.0/src/lite_agent/context.py +37 -0
- {lite_agent-0.10.0 → lite_agent-0.12.0}/src/lite_agent/runner.py +30 -3
- {lite_agent-0.10.0 → lite_agent-0.12.0}/src/lite_agent/utils/advanced_message_builder.py +49 -16
- lite_agent-0.12.0/tests/mocks/context/1.jsonl +19 -0
- {lite_agent-0.10.0 → lite_agent-0.12.0}/uv.lock +1 -1
- lite_agent-0.10.0/tests/mocks/context/1.jsonl +0 -19
- {lite_agent-0.10.0 → lite_agent-0.12.0}/.github/workflows/ci.yml +0 -0
- {lite_agent-0.10.0 → lite_agent-0.12.0}/.gitignore +0 -0
- {lite_agent-0.10.0 → lite_agent-0.12.0}/.python-version +0 -0
- {lite_agent-0.10.0 → lite_agent-0.12.0}/.vscode/launch.json +0 -0
- {lite_agent-0.10.0 → lite_agent-0.12.0}/CLAUDE.md +0 -0
- {lite_agent-0.10.0 → lite_agent-0.12.0}/README.md +0 -0
- {lite_agent-0.10.0 → lite_agent-0.12.0}/examples/basic_agent.py +0 -0
- {lite_agent-0.10.0 → lite_agent-0.12.0}/examples/basic_model.py +0 -0
- {lite_agent-0.10.0 → lite_agent-0.12.0}/examples/cancel_and_transfer_demo.py +0 -0
- {lite_agent-0.10.0 → lite_agent-0.12.0}/examples/channels/rich_channel.py +0 -0
- {lite_agent-0.10.0 → lite_agent-0.12.0}/examples/chat_display_demo.py +0 -0
- {lite_agent-0.10.0 → lite_agent-0.12.0}/examples/confirm_and_continue.py +0 -0
- {lite_agent-0.10.0 → lite_agent-0.12.0}/examples/consolidate_history.py +0 -0
- {lite_agent-0.10.0 → lite_agent-0.12.0}/examples/debug_non_streaming.py +0 -0
- {lite_agent-0.10.0 → lite_agent-0.12.0}/examples/debug_with_logging.py +0 -0
- {lite_agent-0.10.0 → lite_agent-0.12.0}/examples/handoffs.py +0 -0
- {lite_agent-0.10.0 → lite_agent-0.12.0}/examples/image.py +0 -0
- {lite_agent-0.10.0 → lite_agent-0.12.0}/examples/knowledge/main.py +0 -0
- {lite_agent-0.10.0 → lite_agent-0.12.0}/examples/llm_config_demo.py +0 -0
- {lite_agent-0.10.0 → lite_agent-0.12.0}/examples/message_transfer_example.py +0 -0
- {lite_agent-0.10.0 → lite_agent-0.12.0}/examples/message_transfer_example_new.py +0 -0
- {lite_agent-0.10.0 → lite_agent-0.12.0}/examples/new_message_structure_demo.py +0 -0
- {lite_agent-0.10.0 → lite_agent-0.12.0}/examples/non_streaming.py +0 -0
- {lite_agent-0.10.0 → lite_agent-0.12.0}/examples/reasoning_example.py +0 -0
- {lite_agent-0.10.0 → lite_agent-0.12.0}/examples/response_api_example.py +0 -0
- {lite_agent-0.10.0 → lite_agent-0.12.0}/examples/responses.py +0 -0
- {lite_agent-0.10.0 → lite_agent-0.12.0}/examples/set_chat_history_example.py +0 -0
- {lite_agent-0.10.0 → lite_agent-0.12.0}/examples/simple_debug.py +0 -0
- {lite_agent-0.10.0 → lite_agent-0.12.0}/examples/simple_debug2.py +0 -0
- {lite_agent-0.10.0 → lite_agent-0.12.0}/examples/stop_before_functions.py +0 -0
- {lite_agent-0.10.0 → lite_agent-0.12.0}/examples/stop_with_tool_call.py +0 -0
- {lite_agent-0.10.0 → lite_agent-0.12.0}/examples/streaming_demo.py +0 -0
- {lite_agent-0.10.0 → lite_agent-0.12.0}/examples/terminal.py +0 -0
- {lite_agent-0.10.0 → lite_agent-0.12.0}/examples/translate/main.py +0 -0
- {lite_agent-0.10.0 → lite_agent-0.12.0}/examples/translate/prompts/translation_system.md.j2 +0 -0
- {lite_agent-0.10.0 → lite_agent-0.12.0}/examples/translate.py +0 -0
- {lite_agent-0.10.0 → lite_agent-0.12.0}/examples/type_system_example.py +0 -0
- {lite_agent-0.10.0 → lite_agent-0.12.0}/htmlcov/.gitignore +0 -0
- {lite_agent-0.10.0 → lite_agent-0.12.0}/htmlcov/class_index.html +0 -0
- {lite_agent-0.10.0 → lite_agent-0.12.0}/htmlcov/coverage_html_cb_6fb7b396.js +0 -0
- {lite_agent-0.10.0 → lite_agent-0.12.0}/htmlcov/favicon_32_cb_58284776.png +0 -0
- {lite_agent-0.10.0 → lite_agent-0.12.0}/htmlcov/function_index.html +0 -0
- {lite_agent-0.10.0 → lite_agent-0.12.0}/htmlcov/index.html +0 -0
- {lite_agent-0.10.0 → lite_agent-0.12.0}/htmlcov/keybd_closed_cb_ce680311.png +0 -0
- {lite_agent-0.10.0 → lite_agent-0.12.0}/htmlcov/status.json +0 -0
- {lite_agent-0.10.0 → lite_agent-0.12.0}/htmlcov/style_cb_6b508a39.css +0 -0
- {lite_agent-0.10.0 → lite_agent-0.12.0}/htmlcov/z_02adcf90d0f2eeb1___init___py.html +0 -0
- {lite_agent-0.10.0 → lite_agent-0.12.0}/htmlcov/z_02adcf90d0f2eeb1_events_py.html +0 -0
- {lite_agent-0.10.0 → lite_agent-0.12.0}/htmlcov/z_02adcf90d0f2eeb1_messages_py.html +0 -0
- {lite_agent-0.10.0 → lite_agent-0.12.0}/htmlcov/z_02adcf90d0f2eeb1_tool_calls_py.html +0 -0
- {lite_agent-0.10.0 → lite_agent-0.12.0}/htmlcov/z_1d32cdd6b7b66bed___init___py.html +0 -0
- {lite_agent-0.10.0 → lite_agent-0.12.0}/htmlcov/z_1d32cdd6b7b66bed_message_builder_py.html +0 -0
- {lite_agent-0.10.0 → lite_agent-0.12.0}/htmlcov/z_1d32cdd6b7b66bed_metrics_py.html +0 -0
- {lite_agent-0.10.0 → lite_agent-0.12.0}/htmlcov/z_40b804173f68aa9e___init___py.html +0 -0
- {lite_agent-0.10.0 → lite_agent-0.12.0}/htmlcov/z_40b804173f68aa9e_agent_py.html +0 -0
- {lite_agent-0.10.0 → lite_agent-0.12.0}/htmlcov/z_40b804173f68aa9e_chat_display_py.html +0 -0
- {lite_agent-0.10.0 → lite_agent-0.12.0}/htmlcov/z_40b804173f68aa9e_client_py.html +0 -0
- {lite_agent-0.10.0 → lite_agent-0.12.0}/htmlcov/z_40b804173f68aa9e_constants_py.html +0 -0
- {lite_agent-0.10.0 → lite_agent-0.12.0}/htmlcov/z_40b804173f68aa9e_loggers_py.html +0 -0
- {lite_agent-0.10.0 → lite_agent-0.12.0}/htmlcov/z_40b804173f68aa9e_message_transfers_py.html +0 -0
- {lite_agent-0.10.0 → lite_agent-0.12.0}/htmlcov/z_40b804173f68aa9e_runner_py.html +0 -0
- {lite_agent-0.10.0 → lite_agent-0.12.0}/htmlcov/z_71ac9935daa08879___init___py.html +0 -0
- {lite_agent-0.10.0 → lite_agent-0.12.0}/htmlcov/z_71ac9935daa08879_base_py.html +0 -0
- {lite_agent-0.10.0 → lite_agent-0.12.0}/htmlcov/z_71ac9935daa08879_completion_py.html +0 -0
- {lite_agent-0.10.0 → lite_agent-0.12.0}/htmlcov/z_71ac9935daa08879_responses_py.html +0 -0
- {lite_agent-0.10.0 → lite_agent-0.12.0}/htmlcov/z_c8357a9ef7e20b45___init___py.html +0 -0
- {lite_agent-0.10.0 → lite_agent-0.12.0}/htmlcov/z_c8357a9ef7e20b45_litellm_py.html +0 -0
- {lite_agent-0.10.0 → lite_agent-0.12.0}/htmlcov/z_f01690d2832086e5___init___py.html +0 -0
- {lite_agent-0.10.0 → lite_agent-0.12.0}/htmlcov/z_f01690d2832086e5_completion_event_processor_py.html +0 -0
- {lite_agent-0.10.0 → lite_agent-0.12.0}/htmlcov/z_f01690d2832086e5_response_event_processor_py.html +0 -0
- {lite_agent-0.10.0 → lite_agent-0.12.0}/pyrightconfig.json +0 -0
- {lite_agent-0.10.0 → lite_agent-0.12.0}/scripts/record_chat_messages.py +0 -0
- {lite_agent-0.10.0 → lite_agent-0.12.0}/src/lite_agent/__init__.py +0 -0
- {lite_agent-0.10.0 → lite_agent-0.12.0}/src/lite_agent/chat_display.py +0 -0
- {lite_agent-0.10.0 → lite_agent-0.12.0}/src/lite_agent/constants.py +0 -0
- {lite_agent-0.10.0 → lite_agent-0.12.0}/src/lite_agent/loggers.py +0 -0
- {lite_agent-0.10.0 → lite_agent-0.12.0}/src/lite_agent/message_transfers.py +0 -0
- {lite_agent-0.10.0 → lite_agent-0.12.0}/src/lite_agent/processors/__init__.py +0 -0
- {lite_agent-0.10.0 → lite_agent-0.12.0}/src/lite_agent/processors/completion_event_processor.py +0 -0
- {lite_agent-0.10.0 → lite_agent-0.12.0}/src/lite_agent/processors/response_event_processor.py +0 -0
- {lite_agent-0.10.0 → lite_agent-0.12.0}/src/lite_agent/py.typed +0 -0
- {lite_agent-0.10.0 → lite_agent-0.12.0}/src/lite_agent/response_handlers/__init__.py +0 -0
- {lite_agent-0.10.0 → lite_agent-0.12.0}/src/lite_agent/response_handlers/base.py +0 -0
- {lite_agent-0.10.0 → lite_agent-0.12.0}/src/lite_agent/response_handlers/completion.py +0 -0
- {lite_agent-0.10.0 → lite_agent-0.12.0}/src/lite_agent/response_handlers/responses.py +0 -0
- {lite_agent-0.10.0 → lite_agent-0.12.0}/src/lite_agent/stream_handlers/__init__.py +0 -0
- {lite_agent-0.10.0 → lite_agent-0.12.0}/src/lite_agent/stream_handlers/litellm.py +0 -0
- {lite_agent-0.10.0 → lite_agent-0.12.0}/src/lite_agent/templates/handoffs_source_instructions.xml.j2 +0 -0
- {lite_agent-0.10.0 → lite_agent-0.12.0}/src/lite_agent/templates/handoffs_target_instructions.xml.j2 +0 -0
- {lite_agent-0.10.0 → lite_agent-0.12.0}/src/lite_agent/templates/wait_for_user_instructions.xml.j2 +0 -0
- {lite_agent-0.10.0 → lite_agent-0.12.0}/src/lite_agent/types/__init__.py +0 -0
- {lite_agent-0.10.0 → lite_agent-0.12.0}/src/lite_agent/types/events.py +0 -0
- {lite_agent-0.10.0 → lite_agent-0.12.0}/src/lite_agent/types/messages.py +0 -0
- {lite_agent-0.10.0 → lite_agent-0.12.0}/src/lite_agent/types/tool_calls.py +0 -0
- {lite_agent-0.10.0 → lite_agent-0.12.0}/src/lite_agent/utils/__init__.py +0 -0
- {lite_agent-0.10.0 → lite_agent-0.12.0}/src/lite_agent/utils/message_builder.py +0 -0
- {lite_agent-0.10.0 → lite_agent-0.12.0}/src/lite_agent/utils/message_converter.py +0 -0
- {lite_agent-0.10.0 → lite_agent-0.12.0}/src/lite_agent/utils/message_state_manager.py +0 -0
- {lite_agent-0.10.0 → lite_agent-0.12.0}/src/lite_agent/utils/metrics.py +0 -0
- {lite_agent-0.10.0 → lite_agent-0.12.0}/temp/main.py +0 -0
- {lite_agent-0.10.0 → lite_agent-0.12.0}/tests/integration/test_agent_with_mocks.py +0 -0
- {lite_agent-0.10.0 → lite_agent-0.12.0}/tests/integration/test_basic.py +0 -0
- {lite_agent-0.10.0 → lite_agent-0.12.0}/tests/integration/test_mock_litellm.py +0 -0
- {lite_agent-0.10.0 → lite_agent-0.12.0}/tests/mocks/basic/1.jsonl +0 -0
- {lite_agent-0.10.0 → lite_agent-0.12.0}/tests/mocks/confirm_and_continue/1.jsonl +0 -0
- {lite_agent-0.10.0 → lite_agent-0.12.0}/tests/mocks/confirm_and_continue/2.jsonl +0 -0
- {lite_agent-0.10.0 → lite_agent-0.12.0}/tests/mocks/handoffs/1.jsonl +0 -0
- {lite_agent-0.10.0 → lite_agent-0.12.0}/tests/performance/test_set_chat_history_performance.py +0 -0
- {lite_agent-0.10.0 → lite_agent-0.12.0}/tests/test_new_messages.py +0 -0
- {lite_agent-0.10.0 → lite_agent-0.12.0}/tests/unit/test_agent.py +0 -0
- {lite_agent-0.10.0 → lite_agent-0.12.0}/tests/unit/test_agent_additional.py +0 -0
- {lite_agent-0.10.0 → lite_agent-0.12.0}/tests/unit/test_agent_handoffs.py +0 -0
- {lite_agent-0.10.0 → lite_agent-0.12.0}/tests/unit/test_append_message.py +0 -0
- {lite_agent-0.10.0 → lite_agent-0.12.0}/tests/unit/test_cancel_pending_tools.py +0 -0
- {lite_agent-0.10.0 → lite_agent-0.12.0}/tests/unit/test_chat_display.py +0 -0
- {lite_agent-0.10.0 → lite_agent-0.12.0}/tests/unit/test_chat_display_additional.py +0 -0
- {lite_agent-0.10.0 → lite_agent-0.12.0}/tests/unit/test_chat_display_simple.py +0 -0
- {lite_agent-0.10.0 → lite_agent-0.12.0}/tests/unit/test_client.py +0 -0
- {lite_agent-0.10.0 → lite_agent-0.12.0}/tests/unit/test_completion_condition.py +0 -0
- {lite_agent-0.10.0 → lite_agent-0.12.0}/tests/unit/test_file_recording.py +0 -0
- {lite_agent-0.10.0 → lite_agent-0.12.0}/tests/unit/test_litellm_stream_handler.py +0 -0
- {lite_agent-0.10.0 → lite_agent-0.12.0}/tests/unit/test_message_builder.py +0 -0
- {lite_agent-0.10.0 → lite_agent-0.12.0}/tests/unit/test_message_transfer.py +0 -0
- {lite_agent-0.10.0 → lite_agent-0.12.0}/tests/unit/test_message_transfers.py +0 -0
- {lite_agent-0.10.0 → lite_agent-0.12.0}/tests/unit/test_message_transfers_additional.py +0 -0
- {lite_agent-0.10.0 → lite_agent-0.12.0}/tests/unit/test_response_api_format.py +0 -0
- {lite_agent-0.10.0 → lite_agent-0.12.0}/tests/unit/test_response_event_processor.py +0 -0
- {lite_agent-0.10.0 → lite_agent-0.12.0}/tests/unit/test_response_handlers.py +0 -0
- {lite_agent-0.10.0 → lite_agent-0.12.0}/tests/unit/test_runner.py +0 -0
- {lite_agent-0.10.0 → lite_agent-0.12.0}/tests/unit/test_set_chat_history.py +0 -0
- {lite_agent-0.10.0 → lite_agent-0.12.0}/tests/unit/test_simple_stream_handlers.py +0 -0
- {lite_agent-0.10.0 → lite_agent-0.12.0}/tests/unit/test_stream_chunk_processor.py +0 -0
- {lite_agent-0.10.0 → lite_agent-0.12.0}/tests/unit/test_stream_handlers_additional.py +0 -0
- {lite_agent-0.10.0 → lite_agent-0.12.0}/tests/unit/test_streaming_config.py +0 -0
- {lite_agent-0.10.0 → lite_agent-0.12.0}/tests/unit/test_utils_extended.py +0 -0
- {lite_agent-0.10.0 → lite_agent-0.12.0}/tests/utils/mock_litellm.py +0 -0
|
@@ -1,3 +1,24 @@
|
|
|
1
|
+
## v0.12.0
|
|
2
|
+
|
|
3
|
+
[v0.11.0...v0.12.0](https://github.com/Jannchie/lite-agent/compare/v0.11.0...v0.12.0)
|
|
4
|
+
|
|
5
|
+
### :sparkles: Features
|
|
6
|
+
|
|
7
|
+
- **context**: add history context injection for tools - By [Jannchie](mailto:jannchie@gmail.com) in [7649170](https://github.com/Jannchie/lite-agent/commit/7649170)
|
|
8
|
+
- **termination**: add custom termination tools support - By [Jannchie](mailto:jannchie@gmail.com) in [5e65e38](https://github.com/Jannchie/lite-agent/commit/5e65e38)
|
|
9
|
+
|
|
10
|
+
## v0.11.0
|
|
11
|
+
|
|
12
|
+
[v0.10.0...v0.11.0](https://github.com/Jannchie/lite-agent/compare/v0.10.0...v0.11.0)
|
|
13
|
+
|
|
14
|
+
### :sparkles: Features
|
|
15
|
+
|
|
16
|
+
- **client**: add typed dicts for reasoning config and refactor parsing logic - By [Jannchie](mailto:jannchie@gmail.com) in [8d125b4](https://github.com/Jannchie/lite-agent/commit/8d125b4)
|
|
17
|
+
|
|
18
|
+
### :adhesive_bandage: Fixes
|
|
19
|
+
|
|
20
|
+
- **message-builder**: ensure correct meta types and safer field access - By [Jannchie](mailto:jannchie@gmail.com) in [6c29c43](https://github.com/Jannchie/lite-agent/commit/6c29c43)
|
|
21
|
+
|
|
1
22
|
## v0.10.0
|
|
2
23
|
|
|
3
24
|
[v0.9.0...v0.10.0](https://github.com/Jannchie/lite-agent/compare/v0.9.0...v0.10.0)
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: lite-agent
|
|
3
|
-
Version: 0.
|
|
3
|
+
Version: 0.12.0
|
|
4
4
|
Summary: A lightweight, extensible framework for building AI agent.
|
|
5
5
|
Author-email: Jianqi Pan <jannchie@gmail.com>
|
|
6
6
|
License: MIT
|
|
@@ -13,6 +13,7 @@ Classifier: Programming Language :: Python :: 3
|
|
|
13
13
|
Classifier: Programming Language :: Python :: 3.10
|
|
14
14
|
Classifier: Programming Language :: Python :: 3.11
|
|
15
15
|
Classifier: Programming Language :: Python :: 3.12
|
|
16
|
+
Classifier: Programming Language :: Python :: 3.13
|
|
16
17
|
Classifier: Topic :: Communications :: Chat
|
|
17
18
|
Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
|
|
18
19
|
Classifier: Topic :: Software Development :: Libraries :: Python Modules
|
|
@@ -33,7 +33,7 @@ agent = Agent(
|
|
|
33
33
|
|
|
34
34
|
|
|
35
35
|
async def main():
|
|
36
|
-
runner = Runner(agent
|
|
36
|
+
runner = Runner(agent)
|
|
37
37
|
await runner.run_until_complete(
|
|
38
38
|
"What is the temperature in New York?",
|
|
39
39
|
includes=["usage", "assistant_message", "function_call", "function_call_output", "timing"],
|
|
@@ -6,6 +6,7 @@ from pydantic import BaseModel
|
|
|
6
6
|
from rich.logging import RichHandler
|
|
7
7
|
|
|
8
8
|
from lite_agent.agent import Agent
|
|
9
|
+
from lite_agent.context import HistoryContext
|
|
9
10
|
from lite_agent.runner import Runner
|
|
10
11
|
|
|
11
12
|
|
|
@@ -27,13 +28,21 @@ logger = logging.getLogger("lite_agent")
|
|
|
27
28
|
logger.setLevel(logging.DEBUG)
|
|
28
29
|
|
|
29
30
|
|
|
30
|
-
async def get_current_city_temperature(context: Context[WeatherContext]) -> str:
|
|
31
|
+
async def get_current_city_temperature(context: Context[HistoryContext[WeatherContext]]) -> str:
|
|
31
32
|
"""Get the temperature for the current city specified in the context."""
|
|
32
33
|
await asyncio.sleep(1)
|
|
33
|
-
|
|
34
|
+
|
|
35
|
+
# Access user data
|
|
36
|
+
if not context.value.data:
|
|
34
37
|
msg = "City must be specified in the context."
|
|
35
38
|
raise ValueError(msg)
|
|
36
|
-
|
|
39
|
+
city = context.value.data.city
|
|
40
|
+
|
|
41
|
+
# Access conversation history
|
|
42
|
+
messages = context.value.history_messages
|
|
43
|
+
previous_questions = sum(1 for msg in messages if hasattr(msg, "role") and getattr(msg, "role", None) == "user")
|
|
44
|
+
|
|
45
|
+
return f"The temperature in {city} is 25°C. (This is your {previous_questions + 1} question today.)"
|
|
37
46
|
|
|
38
47
|
|
|
39
48
|
agent = Agent(
|
|
@@ -0,0 +1,119 @@
|
|
|
1
|
+
import asyncio
|
|
2
|
+
import logging
|
|
3
|
+
|
|
4
|
+
from rich.logging import RichHandler
|
|
5
|
+
|
|
6
|
+
from lite_agent.agent import Agent
|
|
7
|
+
from lite_agent.chat_display import display_messages
|
|
8
|
+
from lite_agent.runner import Runner
|
|
9
|
+
|
|
10
|
+
logging.basicConfig(
|
|
11
|
+
level=logging.WARNING,
|
|
12
|
+
format="%(message)s",
|
|
13
|
+
datefmt="[%X]",
|
|
14
|
+
handlers=[RichHandler(rich_tracebacks=True)],
|
|
15
|
+
)
|
|
16
|
+
|
|
17
|
+
logger = logging.getLogger("lite_agent")
|
|
18
|
+
logger.setLevel(logging.DEBUG)
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
async def get_weather(city: str) -> str:
|
|
22
|
+
"""Get the weather for a city."""
|
|
23
|
+
return f"The weather in {city} is sunny, 25°C."
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
async def mark_task_complete(task: str) -> str:
|
|
27
|
+
"""Mark a task as complete. This will terminate the agent."""
|
|
28
|
+
return f"Task '{task}' has been marked as complete!"
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
async def save_report(content: str) -> str:
|
|
32
|
+
"""Save a report to file. This will also terminate the agent."""
|
|
33
|
+
return f"Report saved: {content[:50]}..."
|
|
34
|
+
|
|
35
|
+
|
|
36
|
+
# Example 1: Single custom termination tool
|
|
37
|
+
agent1 = Agent(
|
|
38
|
+
model="gpt-4.1",
|
|
39
|
+
name="Task Assistant",
|
|
40
|
+
instructions="You are a task assistant. Use the weather tool if needed, then mark the task complete when done.",
|
|
41
|
+
tools=[get_weather, mark_task_complete],
|
|
42
|
+
completion_condition="call",
|
|
43
|
+
termination_tools=[mark_task_complete], # Only this tool will terminate
|
|
44
|
+
)
|
|
45
|
+
|
|
46
|
+
# Example 2: Multiple termination tools
|
|
47
|
+
agent2 = Agent(
|
|
48
|
+
model="gpt-4.1",
|
|
49
|
+
name="Report Assistant",
|
|
50
|
+
instructions="You are a report assistant. Get weather data and either save a report or mark the task complete.",
|
|
51
|
+
tools=[get_weather, mark_task_complete, save_report],
|
|
52
|
+
completion_condition="call",
|
|
53
|
+
termination_tools=[mark_task_complete, save_report], # Either tool will terminate
|
|
54
|
+
)
|
|
55
|
+
|
|
56
|
+
# Example 3: Using string names for termination tools
|
|
57
|
+
agent3 = Agent(
|
|
58
|
+
model="gpt-4.1",
|
|
59
|
+
name="String-based Assistant",
|
|
60
|
+
instructions="You are an assistant. Get weather data and mark task complete when done.",
|
|
61
|
+
tools=[get_weather, mark_task_complete],
|
|
62
|
+
completion_condition="call",
|
|
63
|
+
termination_tools=["mark_task_complete"], # Using string name
|
|
64
|
+
)
|
|
65
|
+
|
|
66
|
+
|
|
67
|
+
async def test_single_termination():
|
|
68
|
+
print("\n=== Testing Single Custom Termination Tool ===")
|
|
69
|
+
runner = Runner(agent1)
|
|
70
|
+
resp = runner.run(
|
|
71
|
+
"Check the weather in Tokyo and mark the task as complete",
|
|
72
|
+
includes=["assistant_message", "function_call", "function_call_output"],
|
|
73
|
+
)
|
|
74
|
+
async for chunk in resp:
|
|
75
|
+
if chunk.type == "assistant_message":
|
|
76
|
+
print(f"Assistant: {chunk.message.content}")
|
|
77
|
+
|
|
78
|
+
print("\nFinal messages:")
|
|
79
|
+
display_messages(runner.messages)
|
|
80
|
+
|
|
81
|
+
|
|
82
|
+
async def test_multiple_termination():
|
|
83
|
+
print("\n=== Testing Multiple Termination Tools ===")
|
|
84
|
+
runner = Runner(agent2)
|
|
85
|
+
resp = runner.run(
|
|
86
|
+
"Check weather in London and save a weather report",
|
|
87
|
+
includes=["assistant_message", "function_call", "function_call_output"],
|
|
88
|
+
)
|
|
89
|
+
async for chunk in resp:
|
|
90
|
+
if chunk.type == "assistant_message":
|
|
91
|
+
print(f"Assistant: {chunk.message.content}")
|
|
92
|
+
|
|
93
|
+
print("\nFinal messages:")
|
|
94
|
+
display_messages(runner.messages)
|
|
95
|
+
|
|
96
|
+
|
|
97
|
+
async def test_string_termination():
|
|
98
|
+
print("\n=== Testing String-based Termination ===")
|
|
99
|
+
runner = Runner(agent3)
|
|
100
|
+
resp = runner.run(
|
|
101
|
+
"What's the weather like in Paris? Mark complete when done.",
|
|
102
|
+
includes=["assistant_message", "function_call", "function_call_output"],
|
|
103
|
+
)
|
|
104
|
+
async for chunk in resp:
|
|
105
|
+
if chunk.type == "assistant_message":
|
|
106
|
+
print(f"Assistant: {chunk.message.content}")
|
|
107
|
+
|
|
108
|
+
print("\nFinal messages:")
|
|
109
|
+
display_messages(runner.messages)
|
|
110
|
+
|
|
111
|
+
|
|
112
|
+
async def main():
|
|
113
|
+
await test_single_termination()
|
|
114
|
+
await test_multiple_termination()
|
|
115
|
+
await test_string_termination()
|
|
116
|
+
|
|
117
|
+
|
|
118
|
+
if __name__ == "__main__":
|
|
119
|
+
asyncio.run(main())
|
|
@@ -0,0 +1,198 @@
|
|
|
1
|
+
"""Demo showing how to access history messages in tool functions."""
|
|
2
|
+
|
|
3
|
+
import asyncio
|
|
4
|
+
import logging
|
|
5
|
+
|
|
6
|
+
from funcall import Context
|
|
7
|
+
from pydantic import BaseModel
|
|
8
|
+
from rich.logging import RichHandler
|
|
9
|
+
|
|
10
|
+
from lite_agent.agent import Agent
|
|
11
|
+
from lite_agent.context import HistoryContext
|
|
12
|
+
from lite_agent.runner import Runner
|
|
13
|
+
|
|
14
|
+
logging.basicConfig(
|
|
15
|
+
level=logging.WARNING,
|
|
16
|
+
format="%(message)s",
|
|
17
|
+
datefmt="[%X]",
|
|
18
|
+
handlers=[RichHandler(rich_tracebacks=True)],
|
|
19
|
+
)
|
|
20
|
+
|
|
21
|
+
logger = logging.getLogger("lite_agent")
|
|
22
|
+
logger.setLevel(logging.DEBUG)
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
# User-defined context data
|
|
26
|
+
class UserContext(BaseModel):
|
|
27
|
+
user_id: str
|
|
28
|
+
city: str
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
# Tool that only needs history messages
|
|
32
|
+
async def count_messages(ctx: Context[HistoryContext[None]]) -> str:
|
|
33
|
+
"""Count the number of messages in the conversation history."""
|
|
34
|
+
messages = ctx.value.history_messages
|
|
35
|
+
return f"The conversation has {len(messages)} messages in total."
|
|
36
|
+
|
|
37
|
+
|
|
38
|
+
# Tool that needs both history and user data
|
|
39
|
+
async def analyze_conversation(ctx: Context[HistoryContext[UserContext]]) -> str:
|
|
40
|
+
"""Analyze conversation history with user context."""
|
|
41
|
+
messages = ctx.value.history_messages
|
|
42
|
+
user_data = ctx.value.data
|
|
43
|
+
|
|
44
|
+
if user_data is None:
|
|
45
|
+
return f"Found {len(messages)} messages, but no user data available."
|
|
46
|
+
|
|
47
|
+
user_id = user_data.user_id
|
|
48
|
+
city = user_data.city
|
|
49
|
+
|
|
50
|
+
# Analyze message content
|
|
51
|
+
user_message_count = sum(1 for msg in messages if hasattr(msg, "role") and getattr(msg, "role", None) == "user")
|
|
52
|
+
|
|
53
|
+
return f"Analysis for user {user_id} from {city}:\n- Total messages: {len(messages)}\n- User messages: {user_message_count}\n- Assistant messages: {len(messages) - user_message_count}"
|
|
54
|
+
|
|
55
|
+
|
|
56
|
+
# Tool that provides conversation summary
|
|
57
|
+
async def summarize_recent(ctx: Context[HistoryContext]) -> str:
|
|
58
|
+
"""Summarize the most recent messages."""
|
|
59
|
+
messages = ctx.value.history_messages
|
|
60
|
+
|
|
61
|
+
if len(messages) == 0:
|
|
62
|
+
return "No conversation history available."
|
|
63
|
+
|
|
64
|
+
# Get the last few messages
|
|
65
|
+
recent_messages = messages[-3:]
|
|
66
|
+
summary = f"Recent activity ({len(recent_messages)} messages):\n"
|
|
67
|
+
|
|
68
|
+
for i, msg in enumerate(recent_messages, 1):
|
|
69
|
+
# Handle different message types
|
|
70
|
+
if hasattr(msg, "content"):
|
|
71
|
+
content = getattr(msg, "content", "")
|
|
72
|
+
if isinstance(content, list) and content:
|
|
73
|
+
# Extract text from content list
|
|
74
|
+
text_parts = []
|
|
75
|
+
for item in content:
|
|
76
|
+
if hasattr(item, "text"):
|
|
77
|
+
text_parts.append(item.text)
|
|
78
|
+
elif isinstance(item, str):
|
|
79
|
+
text_parts.append(item)
|
|
80
|
+
content_text = " ".join(text_parts)[:50] + "..."
|
|
81
|
+
elif isinstance(content, str):
|
|
82
|
+
content_text = content[:50] + "..."
|
|
83
|
+
else:
|
|
84
|
+
content_text = str(content)[:50] + "..."
|
|
85
|
+
else:
|
|
86
|
+
content_text = "No content"
|
|
87
|
+
|
|
88
|
+
role = getattr(msg, "role", "unknown")
|
|
89
|
+
summary += f"{i}. {role}: {content_text}\n"
|
|
90
|
+
|
|
91
|
+
return summary
|
|
92
|
+
|
|
93
|
+
|
|
94
|
+
agent = Agent(
|
|
95
|
+
model="gpt-4o-mini",
|
|
96
|
+
name="History Assistant",
|
|
97
|
+
instructions="You are an assistant that can analyze conversation history. Use the provided tools to help users understand their conversation patterns.",
|
|
98
|
+
tools=[count_messages, analyze_conversation, summarize_recent],
|
|
99
|
+
)
|
|
100
|
+
|
|
101
|
+
|
|
102
|
+
async def demo_without_user_context():
|
|
103
|
+
"""Demo using tools without providing user context."""
|
|
104
|
+
print("\n=== Demo 1: No user context (history only) ===")
|
|
105
|
+
|
|
106
|
+
runner = Runner(agent)
|
|
107
|
+
|
|
108
|
+
# Add some initial conversation
|
|
109
|
+
runner.add_user_message("Hello, how are you?")
|
|
110
|
+
runner.add_assistant_message("I'm doing well, thank you! How can I help you today?")
|
|
111
|
+
runner.add_user_message("Can you count our messages?")
|
|
112
|
+
|
|
113
|
+
resp = runner.run(
|
|
114
|
+
"Please count how many messages we have exchanged so far.",
|
|
115
|
+
includes=["function_call_output"],
|
|
116
|
+
)
|
|
117
|
+
|
|
118
|
+
async for chunk in resp:
|
|
119
|
+
if chunk.type == "function_call_output":
|
|
120
|
+
print(f"Tool result: {chunk.content}")
|
|
121
|
+
|
|
122
|
+
|
|
123
|
+
async def demo_with_user_context():
|
|
124
|
+
"""Demo using tools with user context data."""
|
|
125
|
+
print("\n=== Demo 2: With user context ===")
|
|
126
|
+
|
|
127
|
+
runner = Runner(agent)
|
|
128
|
+
|
|
129
|
+
# Add some conversation history
|
|
130
|
+
runner.add_user_message("Hi there!")
|
|
131
|
+
runner.add_assistant_message("Hello! Nice to meet you.")
|
|
132
|
+
runner.add_user_message("I'm from Beijing.")
|
|
133
|
+
runner.add_assistant_message("That's great! Beijing is a wonderful city.")
|
|
134
|
+
runner.add_user_message("Can you analyze our conversation?")
|
|
135
|
+
|
|
136
|
+
# Provide user context
|
|
137
|
+
user_ctx = UserContext(user_id="alice_123", city="Beijing")
|
|
138
|
+
|
|
139
|
+
resp = runner.run(
|
|
140
|
+
"Please analyze our conversation with my user information.",
|
|
141
|
+
context=Context(user_ctx),
|
|
142
|
+
includes=["function_call_output"],
|
|
143
|
+
)
|
|
144
|
+
|
|
145
|
+
async for chunk in resp:
|
|
146
|
+
if chunk.type == "function_call_output":
|
|
147
|
+
print(f"Tool result: {chunk.content}")
|
|
148
|
+
|
|
149
|
+
|
|
150
|
+
async def demo_conversation_summary():
|
|
151
|
+
"""Demo conversation summarization."""
|
|
152
|
+
print("\n=== Demo 3: Conversation summary ===")
|
|
153
|
+
|
|
154
|
+
runner = Runner(agent)
|
|
155
|
+
|
|
156
|
+
# Build up a longer conversation
|
|
157
|
+
conversation = [
|
|
158
|
+
("user", "Hello, I need help with Python."),
|
|
159
|
+
("assistant", "I'd be happy to help with Python! What specifically do you need help with?"),
|
|
160
|
+
("user", "I'm trying to understand async/await."),
|
|
161
|
+
("assistant", "Async/await is used for asynchronous programming in Python. It allows you to write concurrent code."),
|
|
162
|
+
("user", "Can you give me an example?"),
|
|
163
|
+
("assistant", "Sure! Here's a simple example: async def my_function(): await some_task()"),
|
|
164
|
+
("user", "That's helpful, thanks!"),
|
|
165
|
+
]
|
|
166
|
+
|
|
167
|
+
for role, content in conversation:
|
|
168
|
+
if role == "user":
|
|
169
|
+
runner.add_user_message(content)
|
|
170
|
+
else:
|
|
171
|
+
runner.add_assistant_message(content)
|
|
172
|
+
|
|
173
|
+
resp = runner.run(
|
|
174
|
+
"Can you summarize our recent conversation?",
|
|
175
|
+
includes=["function_call_output"],
|
|
176
|
+
)
|
|
177
|
+
|
|
178
|
+
async for chunk in resp:
|
|
179
|
+
if chunk.type == "function_call_output":
|
|
180
|
+
print(f"Tool result:\n{chunk.content}")
|
|
181
|
+
|
|
182
|
+
|
|
183
|
+
async def main():
|
|
184
|
+
"""Run all demos."""
|
|
185
|
+
await demo_without_user_context()
|
|
186
|
+
await demo_with_user_context()
|
|
187
|
+
await demo_conversation_summary()
|
|
188
|
+
|
|
189
|
+
print("\n=== Demos completed! ===")
|
|
190
|
+
print("Key takeaways:")
|
|
191
|
+
print("1. Tools automatically receive history_messages in context")
|
|
192
|
+
print("2. Use Context[HistoryContext[None]] for history-only tools")
|
|
193
|
+
print("3. Use Context[HistoryContext[YourDataType]] for tools that need both")
|
|
194
|
+
print("4. Full type safety and IDE completion support")
|
|
195
|
+
|
|
196
|
+
|
|
197
|
+
if __name__ == "__main__":
|
|
198
|
+
asyncio.run(main())
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
[project]
|
|
2
2
|
name = "lite-agent"
|
|
3
|
-
version = "0.
|
|
3
|
+
version = "0.12.0"
|
|
4
4
|
description = "A lightweight, extensible framework for building AI agent."
|
|
5
5
|
readme = "README.md"
|
|
6
6
|
authors = [{ name = "Jianqi Pan", email = "jannchie@gmail.com" }]
|
|
@@ -29,6 +29,7 @@ classifiers = [
|
|
|
29
29
|
"Programming Language :: Python :: 3.10",
|
|
30
30
|
"Programming Language :: Python :: 3.11",
|
|
31
31
|
"Programming Language :: Python :: 3.12",
|
|
32
|
+
"Programming Language :: Python :: 3.13",
|
|
32
33
|
"License :: OSI Approved :: MIT License",
|
|
33
34
|
"Operating System :: OS Independent",
|
|
34
35
|
"Intended Audience :: Developers",
|
|
@@ -90,10 +91,7 @@ ignore = [
|
|
|
90
91
|
"ARG002",
|
|
91
92
|
"S110",
|
|
92
93
|
]
|
|
93
|
-
"examples/**/*" = [
|
|
94
|
-
"SLF001",
|
|
95
|
-
"S110",
|
|
96
|
-
]
|
|
94
|
+
"examples/**/*" = ["SLF001", "S110"]
|
|
97
95
|
|
|
98
96
|
|
|
99
97
|
[tool.uv]
|
|
@@ -41,6 +41,7 @@ class Agent:
|
|
|
41
41
|
message_transfer: Callable[[RunnerMessages], RunnerMessages] | None = None,
|
|
42
42
|
completion_condition: str = "stop",
|
|
43
43
|
stop_before_tools: list[str] | list[Callable] | None = None,
|
|
44
|
+
termination_tools: list[str] | list[Callable] | None = None,
|
|
44
45
|
) -> None:
|
|
45
46
|
self.name = name
|
|
46
47
|
self.instructions = instructions
|
|
@@ -58,6 +59,20 @@ class Agent:
|
|
|
58
59
|
else:
|
|
59
60
|
self.stop_before_functions = set()
|
|
60
61
|
|
|
62
|
+
# Convert termination_tools to function names
|
|
63
|
+
if termination_tools:
|
|
64
|
+
self.termination_tools = set()
|
|
65
|
+
for func in termination_tools:
|
|
66
|
+
if isinstance(func, str):
|
|
67
|
+
self.termination_tools.add(func)
|
|
68
|
+
elif callable(func):
|
|
69
|
+
self.termination_tools.add(func.__name__)
|
|
70
|
+
else:
|
|
71
|
+
msg = f"termination_tools must contain strings or callables, got {type(func)}"
|
|
72
|
+
raise TypeError(msg)
|
|
73
|
+
else:
|
|
74
|
+
self.termination_tools = set()
|
|
75
|
+
|
|
61
76
|
if isinstance(model, BaseLLMClient):
|
|
62
77
|
# If model is a BaseLLMClient instance, use it directly
|
|
63
78
|
self.client = model
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
import abc
|
|
2
2
|
import os
|
|
3
|
-
from typing import Any, Literal
|
|
3
|
+
from typing import Any, Literal, NotRequired, TypedDict
|
|
4
4
|
|
|
5
5
|
import litellm
|
|
6
6
|
from openai.types.chat import ChatCompletionToolParam
|
|
@@ -8,12 +8,28 @@ from openai.types.responses import FunctionToolParam
|
|
|
8
8
|
from pydantic import BaseModel
|
|
9
9
|
|
|
10
10
|
ReasoningEffort = Literal["minimal", "low", "medium", "high"]
|
|
11
|
-
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
class ThinkingConfigDict(TypedDict):
|
|
14
|
+
"""Thinking configuration for reasoning models like Claude."""
|
|
15
|
+
|
|
16
|
+
type: Literal["enabled"] # 启用推理
|
|
17
|
+
budget_tokens: NotRequired[int] # 推理令牌预算,可选
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
class ReasoningEffortDict(TypedDict):
|
|
21
|
+
"""Reasoning effort configuration."""
|
|
22
|
+
|
|
23
|
+
effort: ReasoningEffort
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
ThinkingConfig = ThinkingConfigDict | None
|
|
12
27
|
|
|
13
28
|
# 统一的推理配置类型
|
|
14
29
|
ReasoningConfig = (
|
|
15
|
-
|
|
16
|
-
|
|
|
30
|
+
ReasoningEffort # "minimal", "low", "medium", "high"
|
|
31
|
+
| ReasoningEffortDict # {"effort": "minimal"}
|
|
32
|
+
| ThinkingConfigDict # {"type": "enabled", "budget_tokens": 2048}
|
|
17
33
|
| bool # True/False 简单开关
|
|
18
34
|
| None # 不启用推理
|
|
19
35
|
)
|
|
@@ -36,8 +52,9 @@ def parse_reasoning_config(reasoning: ReasoningConfig) -> tuple[ReasoningEffort
|
|
|
36
52
|
|
|
37
53
|
Args:
|
|
38
54
|
reasoning: 统一的推理配置
|
|
39
|
-
-
|
|
40
|
-
-
|
|
55
|
+
- ReasoningEffort: "minimal", "low", "medium", "high" -> reasoning_effort
|
|
56
|
+
- ReasoningEffortDict: {"effort": "minimal"} -> reasoning_effort
|
|
57
|
+
- ThinkingConfigDict: {"type": "enabled", "budget_tokens": 2048} -> thinking_config
|
|
41
58
|
- bool: True -> "medium", False -> None
|
|
42
59
|
- None: 不启用推理
|
|
43
60
|
|
|
@@ -47,28 +64,38 @@ def parse_reasoning_config(reasoning: ReasoningConfig) -> tuple[ReasoningEffort
|
|
|
47
64
|
if reasoning is None:
|
|
48
65
|
return None, None
|
|
49
66
|
|
|
50
|
-
if isinstance(reasoning, str):
|
|
51
|
-
#
|
|
52
|
-
|
|
53
|
-
|
|
54
|
-
return reasoning, None # type: ignore[return-value]
|
|
55
|
-
elif isinstance(reasoning, dict):
|
|
56
|
-
# 检查是否为 {"effort": "value"} 格式
|
|
57
|
-
if "effort" in reasoning and len(reasoning) == 1:
|
|
58
|
-
effort = reasoning["effort"]
|
|
59
|
-
if isinstance(effort, str) and effort in ("minimal", "low", "medium", "high"):
|
|
60
|
-
return effort, None # type: ignore[return-value]
|
|
61
|
-
else:
|
|
62
|
-
# 其他字典格式,作为 thinking_config
|
|
63
|
-
return None, reasoning
|
|
64
|
-
elif isinstance(reasoning, bool):
|
|
65
|
-
# 布尔类型,True 使用默认的 medium,False 不启用
|
|
67
|
+
if isinstance(reasoning, str) and reasoning in ("minimal", "low", "medium", "high"):
|
|
68
|
+
return reasoning, None # type: ignore[return-value]
|
|
69
|
+
|
|
70
|
+
if isinstance(reasoning, bool):
|
|
66
71
|
return ("medium", None) if reasoning else (None, None)
|
|
67
72
|
|
|
73
|
+
if isinstance(reasoning, dict):
|
|
74
|
+
return _parse_dict_reasoning_config(reasoning)
|
|
75
|
+
|
|
68
76
|
# 其他类型或无效格式,默认不启用
|
|
69
77
|
return None, None
|
|
70
78
|
|
|
71
79
|
|
|
80
|
+
def _parse_dict_reasoning_config(reasoning: ReasoningEffortDict | ThinkingConfigDict | dict[str, Any]) -> tuple[ReasoningEffort | None, ThinkingConfig]:
|
|
81
|
+
"""解析字典格式的推理配置。"""
|
|
82
|
+
# 检查是否为 {"effort": "value"} 格式 (ReasoningEffortDict)
|
|
83
|
+
if "effort" in reasoning and len(reasoning) == 1:
|
|
84
|
+
effort = reasoning["effort"]
|
|
85
|
+
if isinstance(effort, str) and effort in ("minimal", "low", "medium", "high"):
|
|
86
|
+
return effort, None # type: ignore[return-value]
|
|
87
|
+
|
|
88
|
+
# 检查是否为 ThinkingConfigDict 格式
|
|
89
|
+
if "type" in reasoning and reasoning.get("type") == "enabled":
|
|
90
|
+
# 验证 ThinkingConfigDict 的结构
|
|
91
|
+
valid_keys = {"type", "budget_tokens"}
|
|
92
|
+
if all(key in valid_keys for key in reasoning):
|
|
93
|
+
return None, reasoning # type: ignore[return-value]
|
|
94
|
+
|
|
95
|
+
# 其他未知字典格式,仍尝试作为 thinking_config
|
|
96
|
+
return None, reasoning # type: ignore[return-value]
|
|
97
|
+
|
|
98
|
+
|
|
72
99
|
class BaseLLMClient(abc.ABC):
|
|
73
100
|
"""Base class for LLM clients."""
|
|
74
101
|
|
|
@@ -240,8 +267,7 @@ class LiteLLMClient(BaseLLMClient):
|
|
|
240
267
|
|
|
241
268
|
# Add reasoning parameters if specified
|
|
242
269
|
if final_reasoning_effort is not None:
|
|
243
|
-
response_params["
|
|
270
|
+
response_params["reasoning"] = {"effort": final_reasoning_effort}
|
|
244
271
|
if final_thinking_config is not None:
|
|
245
272
|
response_params["thinking"] = final_thinking_config
|
|
246
|
-
|
|
247
273
|
return await litellm.aresponses(**response_params) # type: ignore[return-value]
|
|
@@ -0,0 +1,37 @@
|
|
|
1
|
+
"""Context utilities for injecting history messages into tool calls."""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
from typing import Any, Generic, TypeVar
|
|
6
|
+
|
|
7
|
+
from pydantic import BaseModel
|
|
8
|
+
|
|
9
|
+
T = TypeVar("T")
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
class HistoryContext(BaseModel, Generic[T]):
|
|
13
|
+
"""包含历史消息的上下文容器
|
|
14
|
+
|
|
15
|
+
这个类会自动被 Runner 用来包装用户的 context,确保工具函数能够访问历史消息。
|
|
16
|
+
|
|
17
|
+
Attributes:
|
|
18
|
+
history_messages: 工具调用前的所有历史消息
|
|
19
|
+
data: 用户自定义的上下文数据(可选)
|
|
20
|
+
|
|
21
|
+
Examples:
|
|
22
|
+
>>> # 只需要历史消息的工具
|
|
23
|
+
>>> async def count_messages(ctx: Context[HistoryContext[None]]) -> str:
|
|
24
|
+
... return f"总共 {len(ctx.value.history_messages)} 条消息"
|
|
25
|
+
|
|
26
|
+
>>> # 需要历史消息和用户数据的工具
|
|
27
|
+
>>> class UserData(BaseModel):
|
|
28
|
+
... user_id: str
|
|
29
|
+
>>>
|
|
30
|
+
>>> async def analyze_user(ctx: Context[HistoryContext[UserData]]) -> str:
|
|
31
|
+
... messages = ctx.value.history_messages
|
|
32
|
+
... user_id = ctx.value.data.user_id
|
|
33
|
+
... return f"用户 {user_id} 有 {len(messages)} 条消息"
|
|
34
|
+
"""
|
|
35
|
+
|
|
36
|
+
history_messages: list[Any]
|
|
37
|
+
data: T | None = None
|