lite-agent 0.8.0__tar.gz → 0.10.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of lite-agent might be problematic. Click here for more details.
- {lite_agent-0.8.0 → lite_agent-0.10.0}/.claude/settings.local.json +2 -1
- {lite_agent-0.8.0 → lite_agent-0.10.0}/CHANGELOG.md +40 -0
- {lite_agent-0.8.0 → lite_agent-0.10.0}/CLAUDE.md +22 -8
- {lite_agent-0.8.0 → lite_agent-0.10.0}/PKG-INFO +2 -2
- {lite_agent-0.8.0 → lite_agent-0.10.0}/examples/basic.py +6 -4
- {lite_agent-0.8.0 → lite_agent-0.10.0}/examples/basic_agent.py +4 -4
- {lite_agent-0.8.0 → lite_agent-0.10.0}/examples/cancel_and_transfer_demo.py +21 -19
- lite_agent-0.10.0/examples/knowledge/main.py +67 -0
- {lite_agent-0.8.0 → lite_agent-0.10.0}/examples/non_streaming.py +2 -3
- lite_agent-0.10.0/examples/reasoning_example.py +127 -0
- {lite_agent-0.8.0 → lite_agent-0.10.0}/examples/response_api_example.py +2 -2
- {lite_agent-0.8.0 → lite_agent-0.10.0}/examples/simple_debug.py +1 -2
- {lite_agent-0.8.0 → lite_agent-0.10.0}/examples/stop_before_functions.py +3 -3
- {lite_agent-0.8.0 → lite_agent-0.10.0}/examples/terminal.py +2 -2
- lite_agent-0.10.0/htmlcov/.gitignore +2 -0
- lite_agent-0.10.0/htmlcov/class_index.html +771 -0
- lite_agent-0.10.0/htmlcov/coverage_html_cb_6fb7b396.js +733 -0
- lite_agent-0.10.0/htmlcov/favicon_32_cb_58284776.png +0 -0
- lite_agent-0.10.0/htmlcov/function_index.html +1475 -0
- lite_agent-0.10.0/htmlcov/index.html +272 -0
- lite_agent-0.10.0/htmlcov/keybd_closed_cb_ce680311.png +0 -0
- lite_agent-0.10.0/htmlcov/status.json +1 -0
- lite_agent-0.10.0/htmlcov/style_cb_6b508a39.css +377 -0
- lite_agent-0.10.0/htmlcov/z_02adcf90d0f2eeb1___init___py.html +228 -0
- lite_agent-0.10.0/htmlcov/z_02adcf90d0f2eeb1_events_py.html +216 -0
- lite_agent-0.10.0/htmlcov/z_02adcf90d0f2eeb1_messages_py.html +441 -0
- lite_agent-0.10.0/htmlcov/z_02adcf90d0f2eeb1_tool_calls_py.html +112 -0
- lite_agent-0.10.0/htmlcov/z_1d32cdd6b7b66bed___init___py.html +97 -0
- lite_agent-0.10.0/htmlcov/z_1d32cdd6b7b66bed_message_builder_py.html +308 -0
- lite_agent-0.10.0/htmlcov/z_1d32cdd6b7b66bed_metrics_py.html +147 -0
- lite_agent-0.10.0/htmlcov/z_40b804173f68aa9e___init___py.html +105 -0
- lite_agent-0.10.0/htmlcov/z_40b804173f68aa9e_agent_py.html +777 -0
- lite_agent-0.10.0/htmlcov/z_40b804173f68aa9e_chat_display_py.html +884 -0
- lite_agent-0.10.0/htmlcov/z_40b804173f68aa9e_client_py.html +337 -0
- lite_agent-0.10.0/htmlcov/z_40b804173f68aa9e_constants_py.html +127 -0
- lite_agent-0.10.0/htmlcov/z_40b804173f68aa9e_loggers_py.html +100 -0
- lite_agent-0.10.0/htmlcov/z_40b804173f68aa9e_message_transfers_py.html +216 -0
- lite_agent-0.10.0/htmlcov/z_40b804173f68aa9e_runner_py.html +993 -0
- lite_agent-0.10.0/htmlcov/z_71ac9935daa08879___init___py.html +108 -0
- lite_agent-0.10.0/htmlcov/z_71ac9935daa08879_base_py.html +151 -0
- lite_agent-0.10.0/htmlcov/z_71ac9935daa08879_completion_py.html +175 -0
- lite_agent-0.10.0/htmlcov/z_71ac9935daa08879_responses_py.html +173 -0
- lite_agent-0.10.0/htmlcov/z_c8357a9ef7e20b45___init___py.html +103 -0
- lite_agent-0.10.0/htmlcov/z_c8357a9ef7e20b45_litellm_py.html +181 -0
- lite_agent-0.10.0/htmlcov/z_f01690d2832086e5___init___py.html +101 -0
- lite_agent-0.10.0/htmlcov/z_f01690d2832086e5_completion_event_processor_py.html +397 -0
- lite_agent-0.10.0/htmlcov/z_f01690d2832086e5_response_event_processor_py.html +310 -0
- {lite_agent-0.8.0 → lite_agent-0.10.0}/pyproject.toml +7 -2
- lite_agent-0.10.0/pyrightconfig.json +12 -0
- {lite_agent-0.8.0 → lite_agent-0.10.0}/scripts/record_chat_messages.py +1 -1
- {lite_agent-0.8.0 → lite_agent-0.10.0}/src/lite_agent/agent.py +17 -263
- {lite_agent-0.8.0 → lite_agent-0.10.0}/src/lite_agent/chat_display.py +304 -41
- {lite_agent-0.8.0 → lite_agent-0.10.0}/src/lite_agent/client.py +15 -8
- {lite_agent-0.8.0 → lite_agent-0.10.0}/src/lite_agent/message_transfers.py +21 -2
- {lite_agent-0.8.0 → lite_agent-0.10.0}/src/lite_agent/processors/response_event_processor.py +4 -2
- {lite_agent-0.8.0 → lite_agent-0.10.0}/src/lite_agent/runner.py +90 -104
- {lite_agent-0.8.0 → lite_agent-0.10.0}/src/lite_agent/types/__init__.py +1 -1
- {lite_agent-0.8.0 → lite_agent-0.10.0}/src/lite_agent/types/messages.py +2 -1
- lite_agent-0.10.0/src/lite_agent/utils/advanced_message_builder.py +201 -0
- {lite_agent-0.8.0 → lite_agent-0.10.0}/src/lite_agent/utils/message_builder.py +4 -2
- lite_agent-0.10.0/src/lite_agent/utils/message_converter.py +232 -0
- lite_agent-0.10.0/src/lite_agent/utils/message_state_manager.py +152 -0
- {lite_agent-0.8.0 → lite_agent-0.10.0}/tests/integration/test_agent_with_mocks.py +1 -1
- {lite_agent-0.8.0 → lite_agent-0.10.0}/tests/unit/test_agent_additional.py +6 -12
- {lite_agent-0.8.0 → lite_agent-0.10.0}/tests/unit/test_cancel_pending_tools.py +1 -1
- {lite_agent-0.8.0 → lite_agent-0.10.0}/tests/unit/test_chat_display_additional.py +2 -3
- lite_agent-0.10.0/tests/unit/test_chat_display_simple.py +155 -0
- lite_agent-0.10.0/tests/unit/test_client.py +326 -0
- lite_agent-0.10.0/tests/unit/test_message_builder.py +422 -0
- {lite_agent-0.8.0 → lite_agent-0.10.0}/tests/unit/test_message_transfers.py +27 -20
- lite_agent-0.10.0/tests/unit/test_response_handlers.py +270 -0
- {lite_agent-0.8.0 → lite_agent-0.10.0}/tests/unit/test_runner.py +4 -4
- lite_agent-0.10.0/tests/unit/test_utils_extended.py +97 -0
- {lite_agent-0.8.0 → lite_agent-0.10.0}/uv.lock +373 -348
- lite_agent-0.8.0/examples/reasoning_example.py +0 -113
- {lite_agent-0.8.0 → lite_agent-0.10.0}/.github/workflows/ci.yml +0 -0
- {lite_agent-0.8.0 → lite_agent-0.10.0}/.gitignore +0 -0
- {lite_agent-0.8.0 → lite_agent-0.10.0}/.python-version +0 -0
- {lite_agent-0.8.0 → lite_agent-0.10.0}/.vscode/launch.json +0 -0
- {lite_agent-0.8.0 → lite_agent-0.10.0}/README.md +0 -0
- {lite_agent-0.8.0 → lite_agent-0.10.0}/examples/basic_model.py +0 -0
- {lite_agent-0.8.0 → lite_agent-0.10.0}/examples/channels/rich_channel.py +0 -0
- {lite_agent-0.8.0 → lite_agent-0.10.0}/examples/chat_display_demo.py +0 -0
- {lite_agent-0.8.0 → lite_agent-0.10.0}/examples/confirm_and_continue.py +0 -0
- {lite_agent-0.8.0 → lite_agent-0.10.0}/examples/consolidate_history.py +0 -0
- {lite_agent-0.8.0 → lite_agent-0.10.0}/examples/context.py +0 -0
- {lite_agent-0.8.0 → lite_agent-0.10.0}/examples/debug_non_streaming.py +0 -0
- {lite_agent-0.8.0 → lite_agent-0.10.0}/examples/debug_with_logging.py +0 -0
- {lite_agent-0.8.0 → lite_agent-0.10.0}/examples/handoffs.py +0 -0
- {lite_agent-0.8.0 → lite_agent-0.10.0}/examples/image.py +0 -0
- {lite_agent-0.8.0 → lite_agent-0.10.0}/examples/llm_config_demo.py +0 -0
- {lite_agent-0.8.0 → lite_agent-0.10.0}/examples/message_transfer_example.py +0 -0
- {lite_agent-0.8.0 → lite_agent-0.10.0}/examples/message_transfer_example_new.py +0 -0
- {lite_agent-0.8.0 → lite_agent-0.10.0}/examples/new_message_structure_demo.py +0 -0
- {lite_agent-0.8.0 → lite_agent-0.10.0}/examples/responses.py +0 -0
- {lite_agent-0.8.0 → lite_agent-0.10.0}/examples/set_chat_history_example.py +0 -0
- {lite_agent-0.8.0 → lite_agent-0.10.0}/examples/simple_debug2.py +0 -0
- {lite_agent-0.8.0 → lite_agent-0.10.0}/examples/stop_with_tool_call.py +0 -0
- {lite_agent-0.8.0 → lite_agent-0.10.0}/examples/streaming_demo.py +0 -0
- {lite_agent-0.8.0 → lite_agent-0.10.0}/examples/translate/main.py +0 -0
- {lite_agent-0.8.0 → lite_agent-0.10.0}/examples/translate/prompts/translation_system.md.j2 +0 -0
- {lite_agent-0.8.0 → lite_agent-0.10.0}/examples/translate.py +0 -0
- {lite_agent-0.8.0 → lite_agent-0.10.0}/examples/type_system_example.py +0 -0
- {lite_agent-0.8.0 → lite_agent-0.10.0}/src/lite_agent/__init__.py +0 -0
- {lite_agent-0.8.0 → lite_agent-0.10.0}/src/lite_agent/constants.py +0 -0
- {lite_agent-0.8.0 → lite_agent-0.10.0}/src/lite_agent/loggers.py +0 -0
- {lite_agent-0.8.0 → lite_agent-0.10.0}/src/lite_agent/processors/__init__.py +0 -0
- {lite_agent-0.8.0 → lite_agent-0.10.0}/src/lite_agent/processors/completion_event_processor.py +0 -0
- {lite_agent-0.8.0 → lite_agent-0.10.0}/src/lite_agent/py.typed +0 -0
- {lite_agent-0.8.0 → lite_agent-0.10.0}/src/lite_agent/response_handlers/__init__.py +0 -0
- {lite_agent-0.8.0 → lite_agent-0.10.0}/src/lite_agent/response_handlers/base.py +0 -0
- {lite_agent-0.8.0 → lite_agent-0.10.0}/src/lite_agent/response_handlers/completion.py +0 -0
- {lite_agent-0.8.0 → lite_agent-0.10.0}/src/lite_agent/response_handlers/responses.py +0 -0
- {lite_agent-0.8.0 → lite_agent-0.10.0}/src/lite_agent/stream_handlers/__init__.py +0 -0
- {lite_agent-0.8.0 → lite_agent-0.10.0}/src/lite_agent/stream_handlers/litellm.py +0 -0
- {lite_agent-0.8.0 → lite_agent-0.10.0}/src/lite_agent/templates/handoffs_source_instructions.xml.j2 +0 -0
- {lite_agent-0.8.0 → lite_agent-0.10.0}/src/lite_agent/templates/handoffs_target_instructions.xml.j2 +0 -0
- {lite_agent-0.8.0 → lite_agent-0.10.0}/src/lite_agent/templates/wait_for_user_instructions.xml.j2 +0 -0
- {lite_agent-0.8.0 → lite_agent-0.10.0}/src/lite_agent/types/events.py +0 -0
- {lite_agent-0.8.0 → lite_agent-0.10.0}/src/lite_agent/types/tool_calls.py +0 -0
- {lite_agent-0.8.0 → lite_agent-0.10.0}/src/lite_agent/utils/__init__.py +0 -0
- {lite_agent-0.8.0 → lite_agent-0.10.0}/src/lite_agent/utils/metrics.py +0 -0
- {lite_agent-0.8.0 → lite_agent-0.10.0}/temp/main.py +0 -0
- {lite_agent-0.8.0 → lite_agent-0.10.0}/tests/integration/test_basic.py +0 -0
- {lite_agent-0.8.0 → lite_agent-0.10.0}/tests/integration/test_mock_litellm.py +0 -0
- {lite_agent-0.8.0 → lite_agent-0.10.0}/tests/mocks/basic/1.jsonl +0 -0
- {lite_agent-0.8.0 → lite_agent-0.10.0}/tests/mocks/confirm_and_continue/1.jsonl +0 -0
- {lite_agent-0.8.0 → lite_agent-0.10.0}/tests/mocks/confirm_and_continue/2.jsonl +0 -0
- {lite_agent-0.8.0 → lite_agent-0.10.0}/tests/mocks/context/1.jsonl +0 -0
- {lite_agent-0.8.0 → lite_agent-0.10.0}/tests/mocks/handoffs/1.jsonl +0 -0
- {lite_agent-0.8.0 → lite_agent-0.10.0}/tests/performance/test_set_chat_history_performance.py +0 -0
- {lite_agent-0.8.0 → lite_agent-0.10.0}/tests/test_new_messages.py +0 -0
- {lite_agent-0.8.0 → lite_agent-0.10.0}/tests/unit/test_agent.py +0 -0
- {lite_agent-0.8.0 → lite_agent-0.10.0}/tests/unit/test_agent_handoffs.py +0 -0
- {lite_agent-0.8.0 → lite_agent-0.10.0}/tests/unit/test_append_message.py +0 -0
- {lite_agent-0.8.0 → lite_agent-0.10.0}/tests/unit/test_chat_display.py +0 -0
- {lite_agent-0.8.0 → lite_agent-0.10.0}/tests/unit/test_completion_condition.py +0 -0
- {lite_agent-0.8.0 → lite_agent-0.10.0}/tests/unit/test_file_recording.py +0 -0
- {lite_agent-0.8.0 → lite_agent-0.10.0}/tests/unit/test_litellm_stream_handler.py +0 -0
- {lite_agent-0.8.0 → lite_agent-0.10.0}/tests/unit/test_message_transfer.py +0 -0
- {lite_agent-0.8.0 → lite_agent-0.10.0}/tests/unit/test_message_transfers_additional.py +0 -0
- {lite_agent-0.8.0 → lite_agent-0.10.0}/tests/unit/test_response_api_format.py +0 -0
- {lite_agent-0.8.0 → lite_agent-0.10.0}/tests/unit/test_response_event_processor.py +0 -0
- {lite_agent-0.8.0 → lite_agent-0.10.0}/tests/unit/test_set_chat_history.py +0 -0
- {lite_agent-0.8.0 → lite_agent-0.10.0}/tests/unit/test_simple_stream_handlers.py +0 -0
- {lite_agent-0.8.0 → lite_agent-0.10.0}/tests/unit/test_stream_chunk_processor.py +0 -0
- {lite_agent-0.8.0 → lite_agent-0.10.0}/tests/unit/test_stream_handlers_additional.py +0 -0
- {lite_agent-0.8.0 → lite_agent-0.10.0}/tests/unit/test_streaming_config.py +0 -0
- {lite_agent-0.8.0 → lite_agent-0.10.0}/tests/utils/mock_litellm.py +0 -0
|
@@ -1,3 +1,43 @@
|
|
|
1
|
+
## v0.10.0
|
|
2
|
+
|
|
3
|
+
[v0.9.0...v0.10.0](https://github.com/Jannchie/lite-agent/compare/v0.9.0...v0.10.0)
|
|
4
|
+
|
|
5
|
+
### :rocket: Breaking Changes
|
|
6
|
+
|
|
7
|
+
- **reasoning-config**: streamline client reasoning config and improve error handling - By [Jannchie](mailto:jannchie@gmail.com) in [23c5d01](https://github.com/Jannchie/lite-agent/commit/23c5d01)
|
|
8
|
+
- **runner**: remove deprecated run_continue methods - By [Jannchie](mailto:jannchie@gmail.com) in [37a8675](https://github.com/Jannchie/lite-agent/commit/37a8675)
|
|
9
|
+
|
|
10
|
+
### :adhesive_bandage: Fixes
|
|
11
|
+
|
|
12
|
+
- **chat-display**: separate label and content rows in messages - By [Jannchie](mailto:jannchie@gmail.com) in [06a6405](https://github.com/Jannchie/lite-agent/commit/06a6405)
|
|
13
|
+
- **runner**: update meta fields handling in update_meta - By [Jannchie](mailto:jannchie@gmail.com) in [254f610](https://github.com/Jannchie/lite-agent/commit/254f610)
|
|
14
|
+
|
|
15
|
+
### :art: Refactors
|
|
16
|
+
|
|
17
|
+
- **messages**: modularize message handling and conversion - By [Jannchie](mailto:jannchie@gmail.com) in [acad106](https://github.com/Jannchie/lite-agent/commit/acad106)
|
|
18
|
+
|
|
19
|
+
### :memo: Documentation
|
|
20
|
+
|
|
21
|
+
- **docs**: expand examples and clarify message types - By [Jannchie](mailto:jannchie@gmail.com) in [9f3a5d7](https://github.com/Jannchie/lite-agent/commit/9f3a5d7)
|
|
22
|
+
|
|
23
|
+
## v0.9.0
|
|
24
|
+
|
|
25
|
+
[v0.8.0...v0.9.0](https://github.com/Jannchie/lite-agent/compare/v0.8.0...v0.9.0)
|
|
26
|
+
|
|
27
|
+
### :sparkles: Features
|
|
28
|
+
|
|
29
|
+
- **agent**: improve assistant message tool_call handling - By [Jannchie](mailto:jannchie@gmail.com) in [45e6de8](https://github.com/Jannchie/lite-agent/commit/45e6de8)
|
|
30
|
+
- **chat-display**: add column-style message display and show model info - By [Jannchie](mailto:jannchie@gmail.com) in [3559137](https://github.com/Jannchie/lite-agent/commit/3559137)
|
|
31
|
+
- **tests**: add tests for client message_builder and response handlers - By [Jannchie](mailto:jannchie@gmail.com) in [ceec7d7](https://github.com/Jannchie/lite-agent/commit/ceec7d7)
|
|
32
|
+
|
|
33
|
+
### :art: Refactors
|
|
34
|
+
|
|
35
|
+
- **examples**: update examples to use new message and content types - By [Jannchie](mailto:jannchie@gmail.com) in [59b4618](https://github.com/Jannchie/lite-agent/commit/59b4618)
|
|
36
|
+
|
|
37
|
+
### :wrench: Chores
|
|
38
|
+
|
|
39
|
+
- **deps**: bump funcall to 0.11.0 - By [Jannchie](mailto:jannchie@gmail.com) in [eb5e7ae](https://github.com/Jannchie/lite-agent/commit/eb5e7ae)
|
|
40
|
+
|
|
1
41
|
## v0.8.0
|
|
2
42
|
|
|
3
43
|
[v0.7.0...v0.8.0](https://github.com/Jannchie/lite-agent/compare/v0.7.0...v0.8.0)
|
|
@@ -38,17 +38,20 @@ uv run <command> # Run command in project environment
|
|
|
38
38
|
### Running Examples
|
|
39
39
|
|
|
40
40
|
```bash
|
|
41
|
-
uv run python examples/basic.py
|
|
42
|
-
uv run python examples/handoffs.py
|
|
43
|
-
uv run python examples/chat_display_demo.py
|
|
44
|
-
uv run python examples/context.py
|
|
45
|
-
uv run python examples/terminal.py
|
|
46
|
-
uv run python examples/translate/main.py
|
|
41
|
+
uv run python examples/basic.py # Simple agent with tool calling
|
|
42
|
+
uv run python examples/handoffs.py # Agent-to-agent transfers
|
|
43
|
+
uv run python examples/chat_display_demo.py # Rich console output
|
|
44
|
+
uv run python examples/context.py # Context passing to tools
|
|
45
|
+
uv run python examples/terminal.py # Terminal-based interaction
|
|
46
|
+
uv run python examples/translate/main.py # Translation agent example
|
|
47
|
+
uv run python examples/streaming_demo.py # Streaming responses demo
|
|
48
|
+
uv run python examples/response_api_example.py # Response API format demo
|
|
49
|
+
uv run python scripts/record_chat_messages.py # Record conversations for testing
|
|
47
50
|
```
|
|
48
51
|
|
|
49
52
|
## Project Architecture
|
|
50
53
|
|
|
51
|
-
LiteAgent is a lightweight AI agent framework
|
|
54
|
+
LiteAgent is a lightweight AI agent framework designed for flexibility with any LLM provider. The core architecture consists of:
|
|
52
55
|
|
|
53
56
|
### Core Components
|
|
54
57
|
|
|
@@ -127,6 +130,16 @@ The framework supports two OpenAI API modes:
|
|
|
127
130
|
|
|
128
131
|
Set via `Runner(agent, api="completion")` or `Runner(agent, api="responses")`.
|
|
129
132
|
|
|
133
|
+
### Message Types and Streaming
|
|
134
|
+
|
|
135
|
+
The framework provides rich message types supporting both text and structured content:
|
|
136
|
+
|
|
137
|
+
- **Text messages**: Simple string content for basic interactions
|
|
138
|
+
- **Tool calls**: Structured function calls with parameters and results
|
|
139
|
+
- **Agent transfers**: Built-in support for handoffs between specialized agents
|
|
140
|
+
- **Rich content**: Support for complex message structures via Pydantic models
|
|
141
|
+
- **Streaming chunks**: Real-time processing of LLM responses with granular event types
|
|
142
|
+
|
|
130
143
|
### Development Notes
|
|
131
144
|
|
|
132
145
|
- Project uses strict ruff linting with `select = ["ALL"]` and specific ignores
|
|
@@ -135,7 +148,8 @@ Set via `Runner(agent, api="completion")` or `Runner(agent, api="responses")`.
|
|
|
135
148
|
- Mock conversations stored in `tests/mocks/` as JSONL files for reproducible testing
|
|
136
149
|
- Examples in `examples/` directory demonstrate various usage patterns
|
|
137
150
|
- Template system uses Jinja2 for dynamic instruction generation (`src/lite_agent/templates/`)
|
|
138
|
-
-
|
|
151
|
+
- Does NOT directly depend on `litellm` - works with any compatible LLM client via `BaseLLMClient` interface
|
|
139
152
|
- Chat display functionality uses `rich` library for formatted console output
|
|
153
|
+
- Uses `pyright` for type checking with custom configuration excluding examples and temp directories
|
|
140
154
|
|
|
141
155
|
The framework emphasizes simplicity and extensibility while maintaining full type safety and comprehensive streaming support.
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: lite-agent
|
|
3
|
-
Version: 0.
|
|
3
|
+
Version: 0.10.0
|
|
4
4
|
Summary: A lightweight, extensible framework for building AI agent.
|
|
5
5
|
Author-email: Jianqi Pan <jannchie@gmail.com>
|
|
6
6
|
License: MIT
|
|
@@ -18,7 +18,7 @@ Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
|
|
|
18
18
|
Classifier: Topic :: Software Development :: Libraries :: Python Modules
|
|
19
19
|
Requires-Python: >=3.10
|
|
20
20
|
Requires-Dist: aiofiles>=24.1.0
|
|
21
|
-
Requires-Dist: funcall>=0.
|
|
21
|
+
Requires-Dist: funcall>=0.11.0
|
|
22
22
|
Requires-Dist: openai<=1.99.5
|
|
23
23
|
Requires-Dist: prompt-toolkit>=3.0.51
|
|
24
24
|
Requires-Dist: rich>=14.0.0
|
|
@@ -4,6 +4,8 @@ import logging
|
|
|
4
4
|
from rich.logging import RichHandler
|
|
5
5
|
|
|
6
6
|
from lite_agent.agent import Agent
|
|
7
|
+
from lite_agent.chat_display import display_messages
|
|
8
|
+
from lite_agent.client import LiteLLMClient
|
|
7
9
|
from lite_agent.runner import Runner
|
|
8
10
|
|
|
9
11
|
logging.basicConfig(
|
|
@@ -23,7 +25,7 @@ async def get_temperature(city: str) -> str:
|
|
|
23
25
|
|
|
24
26
|
|
|
25
27
|
agent = Agent(
|
|
26
|
-
model="gpt-
|
|
28
|
+
model=LiteLLMClient(model="gpt-5-nano", reasoning={"effort": "minimal"}),
|
|
27
29
|
name="Weather Assistant",
|
|
28
30
|
instructions="You are a helpful weather assistant. Before using tools, briefly explain what you are going to do. Provide friendly and informative responses.",
|
|
29
31
|
tools=[get_temperature],
|
|
@@ -31,12 +33,12 @@ agent = Agent(
|
|
|
31
33
|
|
|
32
34
|
|
|
33
35
|
async def main():
|
|
34
|
-
runner = Runner(agent, streaming=
|
|
35
|
-
|
|
36
|
+
runner = Runner(agent, streaming=True, api="completion")
|
|
37
|
+
await runner.run_until_complete(
|
|
36
38
|
"What is the temperature in New York?",
|
|
37
39
|
includes=["usage", "assistant_message", "function_call", "function_call_output", "timing"],
|
|
38
40
|
)
|
|
39
|
-
|
|
41
|
+
display_messages(runner.messages)
|
|
40
42
|
|
|
41
43
|
|
|
42
44
|
if __name__ == "__main__":
|
|
@@ -4,6 +4,7 @@ import logging
|
|
|
4
4
|
from rich.logging import RichHandler
|
|
5
5
|
|
|
6
6
|
from lite_agent.agent import Agent
|
|
7
|
+
from lite_agent.types import NewUserMessage, UserTextContent
|
|
7
8
|
|
|
8
9
|
logging.basicConfig(
|
|
9
10
|
level=logging.WARNING,
|
|
@@ -39,10 +40,9 @@ agent = Agent(
|
|
|
39
40
|
async def main():
|
|
40
41
|
resp = await agent.completion(
|
|
41
42
|
[
|
|
42
|
-
|
|
43
|
-
"
|
|
44
|
-
|
|
45
|
-
},
|
|
43
|
+
NewUserMessage(
|
|
44
|
+
content=[UserTextContent(text="What is the temperature and whether in New York?")],
|
|
45
|
+
),
|
|
46
46
|
],
|
|
47
47
|
)
|
|
48
48
|
async for chunk in resp:
|
|
@@ -6,6 +6,7 @@ Demo showing two new features:
|
|
|
6
6
|
"""
|
|
7
7
|
|
|
8
8
|
import asyncio
|
|
9
|
+
import json
|
|
9
10
|
import logging
|
|
10
11
|
|
|
11
12
|
from funcall.decorators import tool
|
|
@@ -13,7 +14,8 @@ from rich.logging import RichHandler
|
|
|
13
14
|
|
|
14
15
|
from lite_agent.agent import Agent
|
|
15
16
|
from lite_agent.runner import Runner
|
|
16
|
-
from lite_agent.types import AssistantTextContent, AssistantToolCall, NewAssistantMessage
|
|
17
|
+
from lite_agent.types import AssistantTextContent, AssistantToolCall, AssistantToolCallResult, NewAssistantMessage, ToolCall, ToolCallFunction
|
|
18
|
+
from lite_agent.types.events import FunctionCallOutputEvent
|
|
17
19
|
|
|
18
20
|
logging.basicConfig(
|
|
19
21
|
level=logging.WARNING,
|
|
@@ -106,9 +108,9 @@ async def demo_1_cancel_pending_tools():
|
|
|
106
108
|
chunks.append(chunk)
|
|
107
109
|
if len(chunks) >= 3: # Stop after getting cancellation events
|
|
108
110
|
break
|
|
109
|
-
except Exception:
|
|
111
|
+
except Exception as e:
|
|
110
112
|
# Expected - no real LLM configured
|
|
111
|
-
|
|
113
|
+
logger.debug("Expected exception in demo: %s", e)
|
|
112
114
|
|
|
113
115
|
print(f"Events from run() method: {len(chunks)}")
|
|
114
116
|
for i, chunk in enumerate(chunks):
|
|
@@ -156,7 +158,7 @@ async def demo_1_cancel_pending_tools():
|
|
|
156
158
|
print(f"Content items in assistant message: {len(assistant_msg.content)}")
|
|
157
159
|
|
|
158
160
|
# Show the cancellation results
|
|
159
|
-
cancellation_results = [item for item in assistant_msg.content if item
|
|
161
|
+
cancellation_results = [item for item in assistant_msg.content if isinstance(item, AssistantToolCallResult)]
|
|
160
162
|
print(f"Cancellation results added: {len(cancellation_results)}")
|
|
161
163
|
for result in cancellation_results:
|
|
162
164
|
print(f" - {result.call_id}: {result.output}")
|
|
@@ -194,9 +196,6 @@ async def demo_2_transfer_events():
|
|
|
194
196
|
print(f"Current agent: {runner.agent.name}")
|
|
195
197
|
|
|
196
198
|
# Simulate transfer_to_agent call
|
|
197
|
-
import json
|
|
198
|
-
|
|
199
|
-
from lite_agent.types import ToolCall, ToolCallFunction
|
|
200
199
|
|
|
201
200
|
transfer_call = ToolCall(
|
|
202
201
|
type="function",
|
|
@@ -209,13 +208,14 @@ async def demo_2_transfer_events():
|
|
|
209
208
|
)
|
|
210
209
|
|
|
211
210
|
# Handle transfer and collect events
|
|
212
|
-
chunks = []
|
|
213
|
-
async for chunk in runner._handle_tool_calls([transfer_call], ["function_call_output"]):
|
|
214
|
-
chunks.append(chunk)
|
|
211
|
+
chunks = [chunk async for chunk in runner._handle_tool_calls([transfer_call], ["function_call_output"])]
|
|
215
212
|
|
|
216
213
|
print(f"Events generated: {len(chunks)}")
|
|
217
214
|
for i, chunk in enumerate(chunks):
|
|
218
|
-
|
|
215
|
+
if isinstance(chunk, FunctionCallOutputEvent):
|
|
216
|
+
print(f" Event {i + 1}: {chunk.type} - {chunk.content}")
|
|
217
|
+
else:
|
|
218
|
+
print(f" Event {i + 1}: {chunk.type}")
|
|
219
219
|
|
|
220
220
|
print(f"Agent after transfer: {runner.agent.name}")
|
|
221
221
|
|
|
@@ -232,13 +232,14 @@ async def demo_2_transfer_events():
|
|
|
232
232
|
)
|
|
233
233
|
|
|
234
234
|
# Handle parent transfer and collect events
|
|
235
|
-
chunks = []
|
|
236
|
-
async for chunk in runner._handle_tool_calls([parent_transfer_call], ["function_call_output"]):
|
|
237
|
-
chunks.append(chunk)
|
|
235
|
+
chunks = [chunk async for chunk in runner._handle_tool_calls([parent_transfer_call], ["function_call_output"])]
|
|
238
236
|
|
|
239
237
|
print(f"Events generated: {len(chunks)}")
|
|
240
238
|
for i, chunk in enumerate(chunks):
|
|
241
|
-
|
|
239
|
+
if isinstance(chunk, FunctionCallOutputEvent):
|
|
240
|
+
print(f" Event {i + 1}: {chunk.type} - {chunk.content}")
|
|
241
|
+
else:
|
|
242
|
+
print(f" Event {i + 1}: {chunk.type}")
|
|
242
243
|
|
|
243
244
|
print(f"Agent after parent transfer: {runner.agent.name}")
|
|
244
245
|
|
|
@@ -265,13 +266,14 @@ async def demo_2_transfer_events():
|
|
|
265
266
|
index=1,
|
|
266
267
|
)
|
|
267
268
|
|
|
268
|
-
chunks = []
|
|
269
|
-
async for chunk in runner._handle_tool_calls([transfer_call_1, transfer_call_2], ["function_call_output"]):
|
|
270
|
-
chunks.append(chunk)
|
|
269
|
+
chunks = [chunk async for chunk in runner._handle_tool_calls([transfer_call_1, transfer_call_2], ["function_call_output"])]
|
|
271
270
|
|
|
272
271
|
print(f"Events generated: {len(chunks)}")
|
|
273
272
|
for i, chunk in enumerate(chunks):
|
|
274
|
-
|
|
273
|
+
if isinstance(chunk, FunctionCallOutputEvent):
|
|
274
|
+
print(f" Event {i + 1}: {chunk.content}")
|
|
275
|
+
else:
|
|
276
|
+
print(f" Event {i + 1}: {chunk.type}")
|
|
275
277
|
|
|
276
278
|
print(f"Final agent: {runner.agent.name}")
|
|
277
279
|
|
|
@@ -0,0 +1,67 @@
|
|
|
1
|
+
import asyncio
|
|
2
|
+
import logging
|
|
3
|
+
|
|
4
|
+
from rich.logging import RichHandler
|
|
5
|
+
|
|
6
|
+
from lite_agent.agent import Agent
|
|
7
|
+
from lite_agent.chat_display import display_messages
|
|
8
|
+
from lite_agent.client import LiteLLMClient
|
|
9
|
+
from lite_agent.runner import Runner
|
|
10
|
+
|
|
11
|
+
logging.basicConfig(
|
|
12
|
+
level=logging.WARNING,
|
|
13
|
+
format="%(message)s",
|
|
14
|
+
datefmt="[%X]",
|
|
15
|
+
handlers=[RichHandler(rich_tracebacks=True)],
|
|
16
|
+
)
|
|
17
|
+
|
|
18
|
+
logger = logging.getLogger("lite_agent")
|
|
19
|
+
logger.setLevel(logging.DEBUG)
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
def list_knowledges() -> list[str]:
|
|
23
|
+
"""
|
|
24
|
+
列出系统中存在的知识,返回的是知识的标题
|
|
25
|
+
"""
|
|
26
|
+
return ["苹果的价格的计算方法", "香蕉的价格", "茄子的价格", "芒果的价格", "其他水果蔬菜的价格"]
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
def read_knowledge(knowledge: str) -> str:
|
|
30
|
+
"""
|
|
31
|
+
通过知识的标题获取其内容
|
|
32
|
+
"""
|
|
33
|
+
match knowledge:
|
|
34
|
+
case "苹果的价格的计算方法":
|
|
35
|
+
return "苹果的价格是两倍的香蕉的价格减去茄子的价格"
|
|
36
|
+
case "香蕉的价格":
|
|
37
|
+
return "香蕉的价格为3元"
|
|
38
|
+
case "其他水果蔬菜的价格":
|
|
39
|
+
return "茄子的价格为5元"
|
|
40
|
+
case "茄子的价格":
|
|
41
|
+
return "茄子的价格请参阅其他知识库"
|
|
42
|
+
case "芒果的价格":
|
|
43
|
+
return "芒果的价格为7元"
|
|
44
|
+
case _:
|
|
45
|
+
return "不存在的知识"
|
|
46
|
+
|
|
47
|
+
|
|
48
|
+
agent = Agent(
|
|
49
|
+
model=LiteLLMClient(model="gpt-5-nano", reasoning={"effort": "minimal"}),
|
|
50
|
+
name="Assistant",
|
|
51
|
+
instructions="你是一个有帮助的助手。",
|
|
52
|
+
tools=[list_knowledges, read_knowledge],
|
|
53
|
+
)
|
|
54
|
+
|
|
55
|
+
|
|
56
|
+
async def main():
|
|
57
|
+
runner = Runner(agent, streaming=True)
|
|
58
|
+
await runner.run_until_complete(
|
|
59
|
+
"苹果的价格是多少?仔细检查知识库告诉我答案。",
|
|
60
|
+
includes=["usage", "assistant_message", "function_call", "function_call_output", "timing"],
|
|
61
|
+
)
|
|
62
|
+
|
|
63
|
+
display_messages(runner.messages)
|
|
64
|
+
|
|
65
|
+
|
|
66
|
+
if __name__ == "__main__":
|
|
67
|
+
asyncio.run(main())
|
|
@@ -3,6 +3,7 @@ Simple example demonstrating non-streaming mode in LiteAgent.
|
|
|
3
3
|
"""
|
|
4
4
|
|
|
5
5
|
import asyncio
|
|
6
|
+
from datetime import datetime, timezone
|
|
6
7
|
|
|
7
8
|
from lite_agent import Agent, Runner
|
|
8
9
|
|
|
@@ -33,9 +34,7 @@ async def main():
|
|
|
33
34
|
# Example with a simple tool
|
|
34
35
|
def get_time() -> str:
|
|
35
36
|
"""Get the current time."""
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
return f"Current time: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}"
|
|
37
|
+
return f"Current time: {datetime.now(timezone.utc).strftime('%Y-%m-%d %H:%M:%S')}"
|
|
39
38
|
|
|
40
39
|
agent_with_tools = Agent(
|
|
41
40
|
name="TimeAgent",
|
|
@@ -0,0 +1,127 @@
|
|
|
1
|
+
import asyncio
|
|
2
|
+
import logging
|
|
3
|
+
|
|
4
|
+
from rich.logging import RichHandler
|
|
5
|
+
|
|
6
|
+
from lite_agent.agent import Agent
|
|
7
|
+
from lite_agent.client import LiteLLMClient
|
|
8
|
+
|
|
9
|
+
logging.basicConfig(
|
|
10
|
+
level=logging.WARNING,
|
|
11
|
+
format="%(message)s",
|
|
12
|
+
datefmt="[%X]",
|
|
13
|
+
handlers=[RichHandler(rich_tracebacks=True)],
|
|
14
|
+
)
|
|
15
|
+
|
|
16
|
+
logger = logging.getLogger("lite_agent")
|
|
17
|
+
logger.setLevel(logging.DEBUG)
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
async def analyze_complex_problem(problem_description: str) -> str:
|
|
21
|
+
"""Analyze a complex problem and return insights."""
|
|
22
|
+
return f"Analysis for: {problem_description}\n- Key factors identified\n- Potential solutions outlined\n- Risk assessment completed"
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
async def demo_reasoning_configurations():
|
|
26
|
+
"""演示不同的推理配置方法。"""
|
|
27
|
+
print("=== 推理配置演示 ===\n")
|
|
28
|
+
|
|
29
|
+
# 1. 使用 LiteLLMClient 设置推理强度(字符串形式)
|
|
30
|
+
print("1. 使用 LiteLLMClient 设置推理强度:")
|
|
31
|
+
agent_with_reasoning = Agent(
|
|
32
|
+
model=LiteLLMClient(model="gpt-4o-mini", reasoning="high"),
|
|
33
|
+
name="推理助手",
|
|
34
|
+
instructions="你是一个深度分析助手,使用仔细的推理来提供全面的分析。",
|
|
35
|
+
)
|
|
36
|
+
print(f" 客户端推理努力程度: {agent_with_reasoning.client.reasoning_effort}")
|
|
37
|
+
print(f" 客户端思考配置: {agent_with_reasoning.client.thinking_config}")
|
|
38
|
+
|
|
39
|
+
# 2. 使用 LiteLLMClient 进行更精细的控制(字典形式)
|
|
40
|
+
print("\n2. 使用 LiteLLMClient 进行精细控制:")
|
|
41
|
+
agent_with_thinking = Agent(
|
|
42
|
+
model=LiteLLMClient(
|
|
43
|
+
model="claude-3-5-sonnet-20241022", # Anthropic模型支持thinking
|
|
44
|
+
reasoning={"type": "enabled", "budget_tokens": 2048},
|
|
45
|
+
),
|
|
46
|
+
name="思考助手",
|
|
47
|
+
instructions="你是一个深思熟虑的助手。",
|
|
48
|
+
)
|
|
49
|
+
print(f" 客户端推理努力程度: {agent_with_thinking.client.reasoning_effort}")
|
|
50
|
+
print(f" 客户端思考配置: {agent_with_thinking.client.thinking_config}")
|
|
51
|
+
|
|
52
|
+
# 3. 使用 {"effort": "value"} 格式(推荐)
|
|
53
|
+
print('\n3. 使用 {"effort": "value"} 格式:')
|
|
54
|
+
agent_effort_reasoning = Agent(
|
|
55
|
+
model=LiteLLMClient(
|
|
56
|
+
model="o1-mini", # OpenAI推理模型
|
|
57
|
+
reasoning={"effort": "medium"},
|
|
58
|
+
),
|
|
59
|
+
name="努力推理助手",
|
|
60
|
+
instructions="你是一个高级推理助手。",
|
|
61
|
+
)
|
|
62
|
+
print(f" 客户端推理努力程度: {agent_effort_reasoning.client.reasoning_effort}")
|
|
63
|
+
print(f" 客户端思考配置: {agent_effort_reasoning.client.thinking_config}")
|
|
64
|
+
|
|
65
|
+
# 4. 使用布尔值设置推理(会默认使用medium级别)
|
|
66
|
+
print("\n4. 使用布尔值启用推理:")
|
|
67
|
+
agent_bool_reasoning = Agent(
|
|
68
|
+
model=LiteLLMClient(model="o1-mini", reasoning=True), # 布尔值,会使用默认的medium级别
|
|
69
|
+
name="布尔推理助手",
|
|
70
|
+
instructions="你是一个高级推理助手。",
|
|
71
|
+
)
|
|
72
|
+
print(f" 客户端推理努力程度: {agent_bool_reasoning.client.reasoning_effort}")
|
|
73
|
+
print(f" 客户端思考配置: {agent_bool_reasoning.client.thinking_config}")
|
|
74
|
+
|
|
75
|
+
# 5. 演示运行时推理参数配置
|
|
76
|
+
print("\n5. 运行时推理参数配置:")
|
|
77
|
+
print(" - 推理配置现在只能在 LiteLLMClient 初始化时设置")
|
|
78
|
+
print(" - 如需动态调整,请创建不同的 Agent 实例")
|
|
79
|
+
|
|
80
|
+
# 注意:由于没有实际的API密钥,我们不运行真实的API调用
|
|
81
|
+
print("\n✓ 所有推理配置功能已成功设置!")
|
|
82
|
+
|
|
83
|
+
|
|
84
|
+
async def main():
|
|
85
|
+
"""主演示函数。"""
|
|
86
|
+
await demo_reasoning_configurations()
|
|
87
|
+
|
|
88
|
+
print("\n" + "=" * 60)
|
|
89
|
+
print("推理配置使用说明:")
|
|
90
|
+
print("=" * 60)
|
|
91
|
+
print("""
|
|
92
|
+
1. reasoning 参数类型 (在 LiteLLMClient 中设置):
|
|
93
|
+
- 字符串: "minimal", "low", "medium", "high" -> reasoning_effort
|
|
94
|
+
- {"effort": "value"}: {"effort": "minimal"} -> reasoning_effort (推荐)
|
|
95
|
+
- 字典: {"type": "enabled", "budget_tokens": N} -> thinking_config
|
|
96
|
+
- 布尔: True -> "medium", False -> 不启用
|
|
97
|
+
|
|
98
|
+
2. 使用方法:
|
|
99
|
+
```python
|
|
100
|
+
# 方法1: 字符串形式
|
|
101
|
+
agent = Agent(
|
|
102
|
+
model=LiteLLMClient(model="gpt-4o-mini", reasoning="high")
|
|
103
|
+
)
|
|
104
|
+
|
|
105
|
+
# 方法2: {"effort": "value"} 形式(推荐)
|
|
106
|
+
agent = Agent(
|
|
107
|
+
model=LiteLLMClient(model="gpt-4o-mini", reasoning={"effort": "minimal"})
|
|
108
|
+
)
|
|
109
|
+
|
|
110
|
+
# 方法3: 字典形式 (Anthropic模型)
|
|
111
|
+
agent = Agent(
|
|
112
|
+
model=LiteLLMClient(
|
|
113
|
+
model="claude-3-5-sonnet-20241022",
|
|
114
|
+
reasoning={"type": "enabled", "budget_tokens": 2048}
|
|
115
|
+
)
|
|
116
|
+
)
|
|
117
|
+
```
|
|
118
|
+
|
|
119
|
+
3. 模型兼容性:
|
|
120
|
+
- OpenAI: o1, o3, gpt-4o-mini 等支持 reasoning_effort
|
|
121
|
+
- Anthropic: claude-3.5-sonnet 等支持 thinking
|
|
122
|
+
- 其他: 通过 LiteLLM 自动转换
|
|
123
|
+
""")
|
|
124
|
+
|
|
125
|
+
|
|
126
|
+
if __name__ == "__main__":
|
|
127
|
+
asyncio.run(main())
|
|
@@ -86,7 +86,7 @@ async def main():
|
|
|
86
86
|
)
|
|
87
87
|
|
|
88
88
|
# This will raise an error when trying to convert
|
|
89
|
-
converted_messages = agent._convert_responses_to_completions_format(runner.messages)
|
|
89
|
+
converted_messages = agent._convert_responses_to_completions_format(runner.messages)
|
|
90
90
|
print("❌ This should not be reached!")
|
|
91
91
|
|
|
92
92
|
except ValueError as e:
|
|
@@ -108,7 +108,7 @@ async def main():
|
|
|
108
108
|
print("\n=== Converted messages for LLM API ===")
|
|
109
109
|
# Note: In a real application, this conversion happens automatically
|
|
110
110
|
# We're accessing the private method here just for demonstration
|
|
111
|
-
converted_messages = agent._convert_responses_to_completions_format(runner.messages)
|
|
111
|
+
converted_messages = agent._convert_responses_to_completions_format(runner.messages)
|
|
112
112
|
for i, message in enumerate(converted_messages):
|
|
113
113
|
print(f"Converted Message {i + 1}:")
|
|
114
114
|
print(f" {message}")
|
|
@@ -3,6 +3,7 @@ Simple debug to check non-streaming response.
|
|
|
3
3
|
"""
|
|
4
4
|
|
|
5
5
|
import asyncio
|
|
6
|
+
import traceback
|
|
6
7
|
|
|
7
8
|
from lite_agent import Agent, Runner
|
|
8
9
|
|
|
@@ -29,8 +30,6 @@ async def main():
|
|
|
29
30
|
print(f"First chunk: {chunks[0]}")
|
|
30
31
|
except Exception as e:
|
|
31
32
|
print(f"Error: {e}")
|
|
32
|
-
import traceback
|
|
33
|
-
|
|
34
33
|
traceback.print_exc()
|
|
35
34
|
|
|
36
35
|
|
|
@@ -60,7 +60,7 @@ async def main():
|
|
|
60
60
|
print(f"\nHas require confirm tools: {await runner.has_require_confirm_tools()}")
|
|
61
61
|
|
|
62
62
|
# Continue execution
|
|
63
|
-
resp = runner.
|
|
63
|
+
resp = runner.run(None,
|
|
64
64
|
includes=["usage", "assistant_message", "function_call", "function_call_output"],
|
|
65
65
|
)
|
|
66
66
|
async for chunk in resp:
|
|
@@ -82,7 +82,7 @@ async def main():
|
|
|
82
82
|
print(f"\nHas require confirm tools: {await runner2.has_require_confirm_tools()}")
|
|
83
83
|
|
|
84
84
|
# Continue execution
|
|
85
|
-
resp = runner2.
|
|
85
|
+
resp = runner2.run(None,
|
|
86
86
|
includes=["usage", "assistant_message", "function_call", "function_call_output"],
|
|
87
87
|
)
|
|
88
88
|
async for chunk in resp:
|
|
@@ -103,7 +103,7 @@ async def main():
|
|
|
103
103
|
print(f"\nHas require confirm tools (decorator): {await runner3.has_require_confirm_tools()}")
|
|
104
104
|
|
|
105
105
|
# Continue execution
|
|
106
|
-
resp = runner3.
|
|
106
|
+
resp = runner3.run(None,
|
|
107
107
|
includes=["usage", "assistant_message", "function_call", "function_call_output"],
|
|
108
108
|
)
|
|
109
109
|
async for chunk in resp:
|
|
@@ -60,11 +60,11 @@ async def main():
|
|
|
60
60
|
validate_while_typing=False,
|
|
61
61
|
)
|
|
62
62
|
if user_input.lower() in {"y", "yes"}:
|
|
63
|
-
response = runner.
|
|
63
|
+
response = runner.run(None )
|
|
64
64
|
async for chunk in response:
|
|
65
65
|
await rich_channel.handle(chunk)
|
|
66
66
|
else:
|
|
67
|
-
response = runner.
|
|
67
|
+
response = runner.run(None )
|
|
68
68
|
rich_channel.new_turn()
|
|
69
69
|
except (EOFError, KeyboardInterrupt):
|
|
70
70
|
break
|