hdsp-jupyter-extension 2.0.7__py3-none-any.whl → 2.0.10__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- agent_server/core/embedding_service.py +67 -46
- agent_server/core/rag_manager.py +40 -17
- agent_server/core/retriever.py +12 -6
- agent_server/core/vllm_embedding_service.py +246 -0
- agent_server/langchain/ARCHITECTURE.md +7 -51
- agent_server/langchain/agent.py +39 -20
- agent_server/langchain/custom_middleware.py +206 -62
- agent_server/langchain/hitl_config.py +6 -9
- agent_server/langchain/llm_factory.py +85 -1
- agent_server/langchain/logging_utils.py +52 -13
- agent_server/langchain/prompts.py +85 -45
- agent_server/langchain/tools/__init__.py +14 -10
- agent_server/langchain/tools/file_tools.py +266 -40
- agent_server/langchain/tools/file_utils.py +334 -0
- agent_server/langchain/tools/jupyter_tools.py +0 -1
- agent_server/langchain/tools/lsp_tools.py +264 -0
- agent_server/langchain/tools/resource_tools.py +12 -12
- agent_server/langchain/tools/search_tools.py +3 -158
- agent_server/main.py +7 -0
- agent_server/routers/langchain_agent.py +207 -102
- agent_server/routers/rag.py +8 -3
- hdsp_agent_core/models/rag.py +15 -1
- hdsp_agent_core/services/rag_service.py +6 -1
- {hdsp_jupyter_extension-2.0.7.data → hdsp_jupyter_extension-2.0.10.data}/data/share/jupyter/labextensions/hdsp-agent/build_log.json +1 -1
- {hdsp_jupyter_extension-2.0.7.data → hdsp_jupyter_extension-2.0.10.data}/data/share/jupyter/labextensions/hdsp-agent/package.json +3 -2
- hdsp_jupyter_extension-2.0.7.data/data/share/jupyter/labextensions/hdsp-agent/static/frontend_styles_index_js.4770ec0fb2d173b6deb4.js → hdsp_jupyter_extension-2.0.10.data/data/share/jupyter/labextensions/hdsp-agent/static/frontend_styles_index_js.2d9fb488c82498c45c2d.js +251 -5
- hdsp_jupyter_extension-2.0.10.data/data/share/jupyter/labextensions/hdsp-agent/static/frontend_styles_index_js.2d9fb488c82498c45c2d.js.map +1 -0
- hdsp_jupyter_extension-2.0.7.data/data/share/jupyter/labextensions/hdsp-agent/static/lib_index_js.29cf4312af19e86f82af.js → hdsp_jupyter_extension-2.0.10.data/data/share/jupyter/labextensions/hdsp-agent/static/lib_index_js.dc6434bee96ab03a0539.js +1831 -274
- hdsp_jupyter_extension-2.0.10.data/data/share/jupyter/labextensions/hdsp-agent/static/lib_index_js.dc6434bee96ab03a0539.js.map +1 -0
- hdsp_jupyter_extension-2.0.7.data/data/share/jupyter/labextensions/hdsp-agent/static/remoteEntry.61343eb4cf0577e74b50.js → hdsp_jupyter_extension-2.0.10.data/data/share/jupyter/labextensions/hdsp-agent/static/remoteEntry.4a252df3ade74efee8d6.js +11 -9
- hdsp_jupyter_extension-2.0.10.data/data/share/jupyter/labextensions/hdsp-agent/static/remoteEntry.4a252df3ade74efee8d6.js.map +1 -0
- jupyter_ext/labextension/static/vendors-node_modules_emotion_cache_dist_emotion-cache_browser_development_esm_js-node_modules-782ee5.d9ed8645ef1d311657d8.js → hdsp_jupyter_extension-2.0.10.data/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_emotion_cache_dist_emotion-cache_browser_development_esm_js.24edcc52a1c014a8a5f0.js +2 -209
- hdsp_jupyter_extension-2.0.10.data/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_emotion_cache_dist_emotion-cache_browser_development_esm_js.24edcc52a1c014a8a5f0.js.map +1 -0
- jupyter_ext/labextension/static/vendors-node_modules_emotion_react_dist_emotion-react_browser_development_esm_js.36b49c71871f98d4f549.js → hdsp_jupyter_extension-2.0.10.data/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_emotion_react_dist_emotion-react_browser_development_esm_js.19ecf6babe00caff6b8a.js +209 -2
- hdsp_jupyter_extension-2.0.10.data/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_emotion_react_dist_emotion-react_browser_development_esm_js.19ecf6babe00caff6b8a.js.map +1 -0
- jupyter_ext/labextension/static/vendors-node_modules_mui_material_utils_createSvgIcon_js.2e13df4ea61496e95d45.js → hdsp_jupyter_extension-2.0.10.data/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_mui_material_utils_createSvgIcon_js.1f5038488cdfd8b3a85d.js +212 -3
- hdsp_jupyter_extension-2.0.10.data/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_mui_material_utils_createSvgIcon_js.1f5038488cdfd8b3a85d.js.map +1 -0
- {hdsp_jupyter_extension-2.0.7.dist-info → hdsp_jupyter_extension-2.0.10.dist-info}/METADATA +1 -3
- hdsp_jupyter_extension-2.0.10.dist-info/RECORD +144 -0
- jupyter_ext/__init__.py +18 -0
- jupyter_ext/_version.py +1 -1
- jupyter_ext/handlers.py +176 -1
- jupyter_ext/labextension/build_log.json +1 -1
- jupyter_ext/labextension/package.json +3 -2
- jupyter_ext/labextension/static/{frontend_styles_index_js.4770ec0fb2d173b6deb4.js → frontend_styles_index_js.2d9fb488c82498c45c2d.js} +251 -5
- jupyter_ext/labextension/static/frontend_styles_index_js.2d9fb488c82498c45c2d.js.map +1 -0
- jupyter_ext/labextension/static/{lib_index_js.29cf4312af19e86f82af.js → lib_index_js.dc6434bee96ab03a0539.js} +1831 -274
- jupyter_ext/labextension/static/lib_index_js.dc6434bee96ab03a0539.js.map +1 -0
- jupyter_ext/labextension/static/{remoteEntry.61343eb4cf0577e74b50.js → remoteEntry.4a252df3ade74efee8d6.js} +11 -9
- jupyter_ext/labextension/static/remoteEntry.4a252df3ade74efee8d6.js.map +1 -0
- hdsp_jupyter_extension-2.0.7.data/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_emotion_cache_dist_emotion-cache_browser_development_esm_js-node_modules-782ee5.d9ed8645ef1d311657d8.js → jupyter_ext/labextension/static/vendors-node_modules_emotion_cache_dist_emotion-cache_browser_development_esm_js.24edcc52a1c014a8a5f0.js +2 -209
- jupyter_ext/labextension/static/vendors-node_modules_emotion_cache_dist_emotion-cache_browser_development_esm_js.24edcc52a1c014a8a5f0.js.map +1 -0
- hdsp_jupyter_extension-2.0.7.data/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_emotion_react_dist_emotion-react_browser_development_esm_js.36b49c71871f98d4f549.js → jupyter_ext/labextension/static/vendors-node_modules_emotion_react_dist_emotion-react_browser_development_esm_js.19ecf6babe00caff6b8a.js +209 -2
- jupyter_ext/labextension/static/vendors-node_modules_emotion_react_dist_emotion-react_browser_development_esm_js.19ecf6babe00caff6b8a.js.map +1 -0
- hdsp_jupyter_extension-2.0.7.data/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_mui_material_utils_createSvgIcon_js.2e13df4ea61496e95d45.js → jupyter_ext/labextension/static/vendors-node_modules_mui_material_utils_createSvgIcon_js.1f5038488cdfd8b3a85d.js +212 -3
- jupyter_ext/labextension/static/vendors-node_modules_mui_material_utils_createSvgIcon_js.1f5038488cdfd8b3a85d.js.map +1 -0
- hdsp_jupyter_extension-2.0.7.data/data/share/jupyter/labextensions/hdsp-agent/static/frontend_styles_index_js.4770ec0fb2d173b6deb4.js.map +0 -1
- hdsp_jupyter_extension-2.0.7.data/data/share/jupyter/labextensions/hdsp-agent/static/lib_index_js.29cf4312af19e86f82af.js.map +0 -1
- hdsp_jupyter_extension-2.0.7.data/data/share/jupyter/labextensions/hdsp-agent/static/remoteEntry.61343eb4cf0577e74b50.js.map +0 -1
- hdsp_jupyter_extension-2.0.7.data/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_emotion_cache_dist_emotion-cache_browser_development_esm_js-node_modules-782ee5.d9ed8645ef1d311657d8.js.map +0 -1
- hdsp_jupyter_extension-2.0.7.data/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_emotion_react_dist_emotion-react_browser_development_esm_js.36b49c71871f98d4f549.js.map +0 -1
- hdsp_jupyter_extension-2.0.7.data/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_mui_material_utils_createSvgIcon_js.2e13df4ea61496e95d45.js.map +0 -1
- hdsp_jupyter_extension-2.0.7.dist-info/RECORD +0 -141
- jupyter_ext/labextension/static/frontend_styles_index_js.4770ec0fb2d173b6deb4.js.map +0 -1
- jupyter_ext/labextension/static/lib_index_js.29cf4312af19e86f82af.js.map +0 -1
- jupyter_ext/labextension/static/remoteEntry.61343eb4cf0577e74b50.js.map +0 -1
- jupyter_ext/labextension/static/vendors-node_modules_emotion_cache_dist_emotion-cache_browser_development_esm_js-node_modules-782ee5.d9ed8645ef1d311657d8.js.map +0 -1
- jupyter_ext/labextension/static/vendors-node_modules_emotion_react_dist_emotion-react_browser_development_esm_js.36b49c71871f98d4f549.js.map +0 -1
- jupyter_ext/labextension/static/vendors-node_modules_mui_material_utils_createSvgIcon_js.2e13df4ea61496e95d45.js.map +0 -1
- {hdsp_jupyter_extension-2.0.7.data → hdsp_jupyter_extension-2.0.10.data}/data/etc/jupyter/jupyter_server_config.d/hdsp_jupyter_extension.json +0 -0
- {hdsp_jupyter_extension-2.0.7.data → hdsp_jupyter_extension-2.0.10.data}/data/share/jupyter/labextensions/hdsp-agent/install.json +0 -0
- {hdsp_jupyter_extension-2.0.7.data → hdsp_jupyter_extension-2.0.10.data}/data/share/jupyter/labextensions/hdsp-agent/static/node_modules_emotion_use-insertion-effect-with-fallbacks_dist_emotion-use-insertion-effect-wi-3ba6b80.c095373419d05e6f141a.js +0 -0
- {hdsp_jupyter_extension-2.0.7.data → hdsp_jupyter_extension-2.0.10.data}/data/share/jupyter/labextensions/hdsp-agent/static/node_modules_emotion_use-insertion-effect-with-fallbacks_dist_emotion-use-insertion-effect-wi-3ba6b80.c095373419d05e6f141a.js.map +0 -0
- {hdsp_jupyter_extension-2.0.7.data → hdsp_jupyter_extension-2.0.10.data}/data/share/jupyter/labextensions/hdsp-agent/static/node_modules_emotion_use-insertion-effect-with-fallbacks_dist_emotion-use-insertion-effect-wi-3ba6b81.61e75fb98ecff46cf836.js +0 -0
- {hdsp_jupyter_extension-2.0.7.data → hdsp_jupyter_extension-2.0.10.data}/data/share/jupyter/labextensions/hdsp-agent/static/node_modules_emotion_use-insertion-effect-with-fallbacks_dist_emotion-use-insertion-effect-wi-3ba6b81.61e75fb98ecff46cf836.js.map +0 -0
- {hdsp_jupyter_extension-2.0.7.data → hdsp_jupyter_extension-2.0.10.data}/data/share/jupyter/labextensions/hdsp-agent/static/style.js +0 -0
- {hdsp_jupyter_extension-2.0.7.data → hdsp_jupyter_extension-2.0.10.data}/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_babel_runtime_helpers_esm_extends_js-node_modules_emotion_serialize_dist-051195.e2553aab0c3963b83dd7.js +0 -0
- {hdsp_jupyter_extension-2.0.7.data → hdsp_jupyter_extension-2.0.10.data}/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_babel_runtime_helpers_esm_extends_js-node_modules_emotion_serialize_dist-051195.e2553aab0c3963b83dd7.js.map +0 -0
- {hdsp_jupyter_extension-2.0.7.data → hdsp_jupyter_extension-2.0.10.data}/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_emotion_styled_dist_emotion-styled_browser_development_esm_js.661fb5836f4978a7c6e1.js +0 -0
- {hdsp_jupyter_extension-2.0.7.data → hdsp_jupyter_extension-2.0.10.data}/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_emotion_styled_dist_emotion-styled_browser_development_esm_js.661fb5836f4978a7c6e1.js.map +0 -0
- {hdsp_jupyter_extension-2.0.7.data → hdsp_jupyter_extension-2.0.10.data}/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_mui_material_index_js.985697e0162d8d088ca2.js +0 -0
- {hdsp_jupyter_extension-2.0.7.data → hdsp_jupyter_extension-2.0.10.data}/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_mui_material_index_js.985697e0162d8d088ca2.js.map +0 -0
- {hdsp_jupyter_extension-2.0.7.dist-info → hdsp_jupyter_extension-2.0.10.dist-info}/WHEEL +0 -0
- {hdsp_jupyter_extension-2.0.7.dist-info → hdsp_jupyter_extension-2.0.10.dist-info}/licenses/LICENSE +0 -0
|
@@ -151,13 +151,10 @@ jupyter_ext/
|
|
|
151
151
|
```python
|
|
152
152
|
- jupyter_cell_tool # Python 코드 실행
|
|
153
153
|
- markdown_tool # 마크다운 셀 추가
|
|
154
|
-
- final_answer_tool # 작업 완료 및 요약
|
|
155
154
|
- read_file_tool # 파일 읽기
|
|
156
155
|
- write_file_tool # 파일 쓰기
|
|
157
|
-
- list_files_tool # 디렉토리 목록
|
|
158
|
-
- search_workspace_tool # 워크스페이스 검색 (grep/rg)
|
|
159
156
|
- search_notebook_cells_tool # 노트북 셀 검색
|
|
160
|
-
- execute_command_tool # 쉘 명령 실행
|
|
157
|
+
- execute_command_tool # 쉘 명령 실행 (파일 검색은 find/grep 사용)
|
|
161
158
|
- check_resource_tool # 리소스 확인
|
|
162
159
|
```
|
|
163
160
|
|
|
@@ -441,8 +438,6 @@ non-HITL 도구 실행 후 continuation 프롬프트를 주입합니다.
|
|
|
441
438
|
NON_HITL_TOOLS = {
|
|
442
439
|
"markdown_tool",
|
|
443
440
|
"read_file_tool",
|
|
444
|
-
"list_files_tool",
|
|
445
|
-
"search_workspace_tool",
|
|
446
441
|
"search_notebook_cells_tool",
|
|
447
442
|
"write_todos",
|
|
448
443
|
}
|
|
@@ -508,8 +503,7 @@ LLM 호출 횟수를 제한합니다.
|
|
|
508
503
|
|
|
509
504
|
**설정**:
|
|
510
505
|
```python
|
|
511
|
-
- write_todos: run_limit=
|
|
512
|
-
- list_files_tool: run_limit=5, exit_behavior="continue"
|
|
506
|
+
- write_todos: run_limit=20, exit_behavior="continue"
|
|
513
507
|
```
|
|
514
508
|
|
|
515
509
|
### 9. `SummarizationMiddleware` (LangChain 내장)
|
|
@@ -634,52 +628,14 @@ Python 코드를 Jupyter 셀에서 실행합니다.
|
|
|
634
628
|
**특징**:
|
|
635
629
|
- HITL 대상 (사용자 승인 필요)
|
|
636
630
|
|
|
637
|
-
#### `list_files_tool`
|
|
638
|
-
디렉토리 목록을 가져옵니다.
|
|
639
|
-
|
|
640
|
-
**파라미터**:
|
|
641
|
-
- `path`: 디렉토리 경로 (기본 ".")
|
|
642
|
-
- `recursive`: 재귀 탐색 여부 (기본 False)
|
|
643
|
-
|
|
644
|
-
**반환**:
|
|
645
|
-
```python
|
|
646
|
-
{
|
|
647
|
-
"tool": "list_files",
|
|
648
|
-
"parameters": {"path": ".", "recursive": False},
|
|
649
|
-
"status": "completed",
|
|
650
|
-
"files": ["file1.py", "file2.csv", ...]
|
|
651
|
-
}
|
|
652
|
-
```
|
|
653
|
-
|
|
654
631
|
---
|
|
655
632
|
|
|
656
633
|
### Search Tools (`search_tools.py`)
|
|
657
634
|
|
|
658
|
-
|
|
659
|
-
|
|
660
|
-
|
|
661
|
-
|
|
662
|
-
- `pattern`: 정규식 패턴
|
|
663
|
-
- `file_types`: 파일 타입 필터 (예: ["py", "md"])
|
|
664
|
-
- `path`: 검색 경로 (기본 ".")
|
|
665
|
-
|
|
666
|
-
**반환**:
|
|
667
|
-
```python
|
|
668
|
-
{
|
|
669
|
-
"tool": "search_workspace",
|
|
670
|
-
"parameters": {"pattern": "...", "file_types": ["py"], "path": "."},
|
|
671
|
-
"status": "completed",
|
|
672
|
-
"results": [
|
|
673
|
-
{"file": "file1.py", "line_number": 10, "line": "..."},
|
|
674
|
-
...
|
|
675
|
-
],
|
|
676
|
-
"command": "rg ... (또는 grep ...)"
|
|
677
|
-
}
|
|
678
|
-
```
|
|
679
|
-
|
|
680
|
-
**특징**:
|
|
681
|
-
- ripgrep 우선 사용 (속도)
|
|
682
|
-
- 없으면 grep 사용
|
|
635
|
+
> **Note**: 파일 검색 기능은 `execute_command_tool`을 통해 `find`/`grep` 명령을 직접 사용합니다.
|
|
636
|
+
>
|
|
637
|
+
> - 파일명 검색: `execute_command_tool(command="find . -iname '*pattern*' 2>/dev/null")`
|
|
638
|
+
> - 파일 내용 검색: `execute_command_tool(command="grep -rn 'pattern' --include='*.py' .")`
|
|
683
639
|
|
|
684
640
|
#### `search_notebook_cells_tool`
|
|
685
641
|
Jupyter 노트북 셀에서 패턴을 검색합니다.
|
|
@@ -961,7 +917,7 @@ return
|
|
|
961
917
|
- **HITL**: 사용자 승인 필요
|
|
962
918
|
- `jupyter_cell_tool`, `execute_command_tool`, `write_file_tool`
|
|
963
919
|
- **non-HITL**: 즉시 실행
|
|
964
|
-
- `markdown_tool`, `read_file_tool`, `
|
|
920
|
+
- `markdown_tool`, `read_file_tool`, `search_*_tool`
|
|
965
921
|
- **클라이언트 실행**: 서버에서 실행하지 않음
|
|
966
922
|
- `check_resource_tool`: CheckResourceHandler에서 처리
|
|
967
923
|
|
agent_server/langchain/agent.py
CHANGED
|
@@ -23,14 +23,15 @@ from agent_server.langchain.prompts import (
|
|
|
23
23
|
)
|
|
24
24
|
from agent_server.langchain.tools import (
|
|
25
25
|
check_resource_tool,
|
|
26
|
+
diagnostics_tool,
|
|
27
|
+
edit_file_tool,
|
|
26
28
|
execute_command_tool,
|
|
27
|
-
final_answer_tool,
|
|
28
29
|
jupyter_cell_tool,
|
|
29
|
-
list_files_tool,
|
|
30
30
|
markdown_tool,
|
|
31
|
+
multiedit_file_tool,
|
|
31
32
|
read_file_tool,
|
|
33
|
+
references_tool,
|
|
32
34
|
search_notebook_cells_tool,
|
|
33
|
-
search_workspace_tool,
|
|
34
35
|
write_file_tool,
|
|
35
36
|
)
|
|
36
37
|
|
|
@@ -42,14 +43,15 @@ def _get_all_tools():
|
|
|
42
43
|
return [
|
|
43
44
|
jupyter_cell_tool,
|
|
44
45
|
markdown_tool,
|
|
45
|
-
final_answer_tool,
|
|
46
46
|
read_file_tool,
|
|
47
47
|
write_file_tool,
|
|
48
|
-
|
|
49
|
-
|
|
48
|
+
edit_file_tool,
|
|
49
|
+
multiedit_file_tool,
|
|
50
50
|
search_notebook_cells_tool,
|
|
51
51
|
execute_command_tool,
|
|
52
52
|
check_resource_tool,
|
|
53
|
+
diagnostics_tool,
|
|
54
|
+
references_tool,
|
|
53
55
|
]
|
|
54
56
|
|
|
55
57
|
|
|
@@ -107,7 +109,6 @@ def create_simple_chat_agent(
|
|
|
107
109
|
|
|
108
110
|
# Configure middleware
|
|
109
111
|
middleware = []
|
|
110
|
-
|
|
111
112
|
# Add empty response handler middleware
|
|
112
113
|
handle_empty_response = create_handle_empty_response_middleware(wrap_model_call)
|
|
113
114
|
middleware.append(handle_empty_response)
|
|
@@ -117,7 +118,9 @@ def create_simple_chat_agent(
|
|
|
117
118
|
middleware.append(limit_tool_calls)
|
|
118
119
|
|
|
119
120
|
# Add tool args normalization middleware (convert list args to strings based on schema)
|
|
120
|
-
normalize_tool_args = create_normalize_tool_args_middleware(
|
|
121
|
+
normalize_tool_args = create_normalize_tool_args_middleware(
|
|
122
|
+
wrap_model_call, tools=tools
|
|
123
|
+
)
|
|
121
124
|
middleware.append(normalize_tool_args)
|
|
122
125
|
|
|
123
126
|
# Add continuation prompt middleware
|
|
@@ -156,22 +159,14 @@ def create_simple_chat_agent(
|
|
|
156
159
|
logger.info("Added ModelCallLimitMiddleware with run_limit=30")
|
|
157
160
|
|
|
158
161
|
# ToolCallLimitMiddleware: Prevent specific tools from being called too many times
|
|
159
|
-
#
|
|
162
|
+
# run_limit resets automatically per user message
|
|
160
163
|
write_todos_limit = ToolCallLimitMiddleware(
|
|
161
164
|
tool_name="write_todos",
|
|
162
|
-
run_limit=
|
|
163
|
-
exit_behavior="continue", # Let agent continue with other tools
|
|
164
|
-
)
|
|
165
|
-
middleware.append(write_todos_limit)
|
|
166
|
-
|
|
167
|
-
# Limit list_files_tool to prevent excessive directory listing
|
|
168
|
-
list_files_limit = ToolCallLimitMiddleware(
|
|
169
|
-
tool_name="list_files_tool",
|
|
170
|
-
run_limit=5, # Max 5 list_files calls per user message
|
|
165
|
+
run_limit=20, # Max 20 write_todos calls per user message
|
|
171
166
|
exit_behavior="continue",
|
|
172
167
|
)
|
|
173
|
-
middleware.append(
|
|
174
|
-
logger.info("Added ToolCallLimitMiddleware for write_todos
|
|
168
|
+
middleware.append(write_todos_limit)
|
|
169
|
+
logger.info("Added ToolCallLimitMiddleware for write_todos (20/msg)")
|
|
175
170
|
|
|
176
171
|
# Add SummarizationMiddleware to maintain context across cycles
|
|
177
172
|
summary_llm = create_summarization_llm(llm_config)
|
|
@@ -210,6 +205,30 @@ Example: "데이터를 로드하겠습니다." then call jupyter_cell_tool.
|
|
|
210
205
|
system_prompt = system_prompt + "\n" + gemini_content_prompt
|
|
211
206
|
logger.info("Added Gemini 2.5 Flash specific prompt for content inclusion")
|
|
212
207
|
|
|
208
|
+
# Add vLLM/gpt-oss specific prompt for Korean responses and proper todo structure
|
|
209
|
+
provider = llm_config.get("provider", "")
|
|
210
|
+
if provider == "vllm":
|
|
211
|
+
vllm_prompt = """
|
|
212
|
+
## 🔴 중요: 한국어로 응답하세요
|
|
213
|
+
- 모든 응답, 설명, todo 항목은 반드시 한국어로 작성하세요.
|
|
214
|
+
- 코드 주석과 출력 설명도 한국어로 작성하세요.
|
|
215
|
+
- 영어로 응답하지 마세요.
|
|
216
|
+
|
|
217
|
+
## 🔴 MANDATORY: Todo List Structure
|
|
218
|
+
When creating todos with write_todos, you MUST:
|
|
219
|
+
1. Write all todo items in Korean
|
|
220
|
+
2. ALWAYS include "작업 요약 및 다음단계 제시" as the LAST todo item
|
|
221
|
+
3. Example structure:
|
|
222
|
+
- 데이터 로드 및 확인
|
|
223
|
+
- 데이터 분석 수행
|
|
224
|
+
- 작업 요약 및 다음단계 제시 ← 반드시 마지막에 포함!
|
|
225
|
+
|
|
226
|
+
## 🔴 IMPORTANT: Never return empty responses
|
|
227
|
+
If you have nothing to say, call a tool instead. NEVER return an empty response.
|
|
228
|
+
"""
|
|
229
|
+
system_prompt = system_prompt + "\n" + vllm_prompt
|
|
230
|
+
logger.info("Added vLLM/gpt-oss specific prompt for Korean responses")
|
|
231
|
+
|
|
213
232
|
logger.info("SimpleChatAgent system_prompt: %s", system_prompt)
|
|
214
233
|
|
|
215
234
|
# Create agent with checkpointer (required for HITL)
|
|
@@ -78,6 +78,20 @@ def parse_json_tool_call(text) -> Optional[Dict[str, Any]]:
|
|
|
78
78
|
return None
|
|
79
79
|
|
|
80
80
|
|
|
81
|
+
def normalize_tool_name(tool_name: str) -> str:
|
|
82
|
+
"""Normalize tool name to match registered tool names.
|
|
83
|
+
|
|
84
|
+
Rules:
|
|
85
|
+
- write_todos_tool → write_todos (TodoListMiddleware exception)
|
|
86
|
+
- other tools without _tool suffix → add _tool suffix
|
|
87
|
+
"""
|
|
88
|
+
if tool_name == "write_todos_tool":
|
|
89
|
+
return "write_todos"
|
|
90
|
+
if not tool_name.endswith("_tool") and tool_name != "write_todos":
|
|
91
|
+
return f"{tool_name}_tool"
|
|
92
|
+
return tool_name
|
|
93
|
+
|
|
94
|
+
|
|
81
95
|
def create_tool_call_message(tool_name: str, arguments: Dict[str, Any]) -> AIMessage:
|
|
82
96
|
"""Create AIMessage with tool_calls from parsed JSON.
|
|
83
97
|
|
|
@@ -88,9 +102,7 @@ def create_tool_call_message(tool_name: str, arguments: Dict[str, Any]) -> AIMes
|
|
|
88
102
|
Returns:
|
|
89
103
|
AIMessage with properly formatted tool_calls
|
|
90
104
|
"""
|
|
91
|
-
|
|
92
|
-
if not tool_name.endswith("_tool"):
|
|
93
|
-
tool_name = f"{tool_name}_tool"
|
|
105
|
+
tool_name = normalize_tool_name(tool_name)
|
|
94
106
|
|
|
95
107
|
return AIMessage(
|
|
96
108
|
content="",
|
|
@@ -127,6 +139,19 @@ def create_handle_empty_response_middleware(wrap_model_call):
|
|
|
127
139
|
def handle_empty_response(request, handler):
|
|
128
140
|
max_retries = 2
|
|
129
141
|
|
|
142
|
+
# Check if all todos are completed - if so, skip processing entirely
|
|
143
|
+
todos = request.state.get("todos", [])
|
|
144
|
+
if todos:
|
|
145
|
+
pending_todos = [
|
|
146
|
+
t for t in todos if t.get("status") in ("pending", "in_progress")
|
|
147
|
+
]
|
|
148
|
+
if not pending_todos:
|
|
149
|
+
logger.info(
|
|
150
|
+
"All %d todos completed - skipping handle_empty_response middleware",
|
|
151
|
+
len(todos),
|
|
152
|
+
)
|
|
153
|
+
return handler(request)
|
|
154
|
+
|
|
130
155
|
# Check if last message is final_answer_tool result - if so, don't retry/synthesize
|
|
131
156
|
# This allows agent to naturally terminate after final_answer_tool
|
|
132
157
|
messages = request.messages
|
|
@@ -194,6 +219,25 @@ def create_handle_empty_response_middleware(wrap_model_call):
|
|
|
194
219
|
# Invalid response - retry with JSON schema prompt
|
|
195
220
|
if response_message and attempt < max_retries:
|
|
196
221
|
reason = "text-only" if has_content else "empty"
|
|
222
|
+
|
|
223
|
+
json_prompt = _build_json_prompt(request, response_message, has_content)
|
|
224
|
+
|
|
225
|
+
# If _build_json_prompt returns None, skip retry and synthesize write_todos
|
|
226
|
+
# This happens when: all todos completed OR current todo is summary/next_steps
|
|
227
|
+
if json_prompt is None:
|
|
228
|
+
logger.info(
|
|
229
|
+
"Skipping retry for %s response, synthesizing write_todos with content",
|
|
230
|
+
reason,
|
|
231
|
+
)
|
|
232
|
+
# Synthesize write_todos while preserving the content (summary)
|
|
233
|
+
synthetic_message = _create_synthetic_final_answer(
|
|
234
|
+
request, response_message, has_content
|
|
235
|
+
)
|
|
236
|
+
response = _replace_ai_message_in_response(
|
|
237
|
+
response, synthetic_message
|
|
238
|
+
)
|
|
239
|
+
return response
|
|
240
|
+
|
|
197
241
|
logger.warning(
|
|
198
242
|
"Invalid AIMessage (%s) detected (attempt %d/%d). "
|
|
199
243
|
"Retrying with JSON schema prompt...",
|
|
@@ -202,16 +246,26 @@ def create_handle_empty_response_middleware(wrap_model_call):
|
|
|
202
246
|
max_retries + 1,
|
|
203
247
|
)
|
|
204
248
|
|
|
205
|
-
json_prompt = _build_json_prompt(request, response_message, has_content)
|
|
206
249
|
request = request.override(
|
|
207
250
|
messages=request.messages + [HumanMessage(content=json_prompt)]
|
|
208
251
|
)
|
|
209
252
|
continue
|
|
210
253
|
|
|
211
|
-
# Max retries exhausted - synthesize
|
|
254
|
+
# Max retries exhausted - synthesize write_todos to complete
|
|
212
255
|
if response_message:
|
|
256
|
+
# Check if todos are already all completed - if so, just return
|
|
257
|
+
todos = request.state.get("todos", [])
|
|
258
|
+
pending_todos = [
|
|
259
|
+
t for t in todos if t.get("status") in ("pending", "in_progress")
|
|
260
|
+
]
|
|
261
|
+
if todos and not pending_todos:
|
|
262
|
+
logger.info(
|
|
263
|
+
"Max retries exhausted but all todos completed - returning response as-is"
|
|
264
|
+
)
|
|
265
|
+
return response
|
|
266
|
+
|
|
213
267
|
logger.warning(
|
|
214
|
-
"Max retries exhausted. Synthesizing
|
|
268
|
+
"Max retries exhausted. Synthesizing write_todos to complete."
|
|
215
269
|
)
|
|
216
270
|
synthetic_message = _create_synthetic_final_answer(
|
|
217
271
|
request, response_message, has_content
|
|
@@ -262,14 +316,33 @@ def _build_json_prompt(request, response_message, has_content):
|
|
|
262
316
|
"""Build JSON-forcing prompt based on context."""
|
|
263
317
|
todos = request.state.get("todos", [])
|
|
264
318
|
pending_todos = [t for t in todos if t.get("status") in ("pending", "in_progress")]
|
|
319
|
+
in_progress_todos = [t for t in todos if t.get("status") == "in_progress"]
|
|
265
320
|
|
|
266
321
|
if has_content:
|
|
267
|
-
|
|
322
|
+
# If all todos completed, don't force another tool call
|
|
323
|
+
if todos and not pending_todos:
|
|
324
|
+
return None # Signal to skip retry
|
|
325
|
+
|
|
326
|
+
# If current in_progress todo is "작업 요약 및 다음단계 제시", accept text-only response
|
|
327
|
+
# The LLM is outputting the summary, we'll synthesize write_todos
|
|
328
|
+
if in_progress_todos:
|
|
329
|
+
current_todo = in_progress_todos[0].get("content", "")
|
|
330
|
+
if (
|
|
331
|
+
"작업 요약" in current_todo
|
|
332
|
+
or "다음단계" in current_todo
|
|
333
|
+
or "다음 단계" in current_todo
|
|
334
|
+
):
|
|
335
|
+
logger.info(
|
|
336
|
+
"Current todo is summary/next steps ('%s'), accepting text-only response",
|
|
337
|
+
current_todo[:30],
|
|
338
|
+
)
|
|
339
|
+
return None # Signal to skip retry - will synthesize write_todos with content
|
|
340
|
+
|
|
268
341
|
return (
|
|
269
342
|
f"{JSON_TOOL_SCHEMA}\n\n"
|
|
270
343
|
f"Your previous response was text, not JSON. "
|
|
271
|
-
f"
|
|
272
|
-
f'{{"tool": "
|
|
344
|
+
f"Call the next appropriate tool to continue.\n"
|
|
345
|
+
f'Example: {{"tool": "jupyter_cell_tool", "arguments": {{"code": "print(\'hello\')"}}}}'
|
|
273
346
|
)
|
|
274
347
|
elif pending_todos:
|
|
275
348
|
todo_list = ", ".join(t.get("content", "")[:20] for t in pending_todos[:3])
|
|
@@ -280,39 +353,62 @@ def _build_json_prompt(request, response_message, has_content):
|
|
|
280
353
|
f"Call jupyter_cell_tool with Python code to complete the next task.\n"
|
|
281
354
|
f"Example: {example_json}"
|
|
282
355
|
)
|
|
356
|
+
elif not todos:
|
|
357
|
+
# No todos yet = new task starting, LLM must create todos or call a tool
|
|
358
|
+
# This happens when LLM returns empty response at the start of a new task
|
|
359
|
+
logger.info("No todos exist yet - forcing retry to create todos or call tool")
|
|
360
|
+
return (
|
|
361
|
+
f"{JSON_TOOL_SCHEMA}\n\n"
|
|
362
|
+
f"Your response was empty. You MUST call a tool to proceed.\n"
|
|
363
|
+
f"한국어로 응답하고, write_todos로 작업 목록을 만들거나 jupyter_cell_tool/read_file_tool을 호출하세요.\n"
|
|
364
|
+
f'Example: {{"tool": "write_todos", "arguments": {{"todos": [{{"content": "데이터 분석", "status": "in_progress"}}]}}}}'
|
|
365
|
+
)
|
|
283
366
|
else:
|
|
367
|
+
# Todos exist but all completed - ask for summary
|
|
368
|
+
logger.info("All todos completed but response empty - asking for summary")
|
|
284
369
|
return (
|
|
285
370
|
f"{JSON_TOOL_SCHEMA}\n\n"
|
|
286
|
-
f"All tasks completed. Call
|
|
287
|
-
f
|
|
371
|
+
f"All tasks completed. Call markdown_tool to provide a summary in Korean.\n"
|
|
372
|
+
f"한국어로 작업 요약을 작성하세요.\n"
|
|
373
|
+
f'Example: {{"tool": "markdown_tool", "arguments": {{"content": "작업이 완료되었습니다."}}}}'
|
|
288
374
|
)
|
|
289
375
|
|
|
290
376
|
|
|
291
377
|
def _create_synthetic_final_answer(request, response_message, has_content):
|
|
292
|
-
"""Create synthetic
|
|
293
|
-
|
|
294
|
-
|
|
378
|
+
"""Create synthetic write_todos call to mark all todos as completed.
|
|
379
|
+
|
|
380
|
+
This triggers automatic session termination via router's all_todos_completed check.
|
|
381
|
+
Preserves the LLM's text content (summary) if present.
|
|
382
|
+
"""
|
|
383
|
+
todos = request.state.get("todos", [])
|
|
384
|
+
|
|
385
|
+
# Mark all todos as completed
|
|
386
|
+
completed_todos = (
|
|
387
|
+
[{**todo, "status": "completed"} for todo in todos]
|
|
388
|
+
if todos
|
|
389
|
+
else [{"content": "작업 완료", "status": "completed"}]
|
|
390
|
+
)
|
|
391
|
+
|
|
392
|
+
# Preserve original content (summary JSON) if present
|
|
393
|
+
original_content = ""
|
|
394
|
+
if has_content and response_message and response_message.content:
|
|
395
|
+
original_content = response_message.content
|
|
295
396
|
logger.info(
|
|
296
|
-
"
|
|
297
|
-
len(
|
|
397
|
+
"Creating synthetic write_todos with preserved content (length=%d)",
|
|
398
|
+
len(original_content),
|
|
298
399
|
)
|
|
299
400
|
else:
|
|
300
|
-
|
|
301
|
-
|
|
302
|
-
|
|
303
|
-
]
|
|
304
|
-
summary = (
|
|
305
|
-
f"작업이 완료되었습니다. 완료된 항목: {', '.join(completed_todos[:5])}"
|
|
306
|
-
if completed_todos
|
|
307
|
-
else "작업이 완료되었습니다."
|
|
401
|
+
logger.info(
|
|
402
|
+
"Creating synthetic write_todos to mark %d todos as completed",
|
|
403
|
+
len(completed_todos),
|
|
308
404
|
)
|
|
309
405
|
|
|
310
406
|
return AIMessage(
|
|
311
|
-
content=
|
|
407
|
+
content=original_content, # Preserve the summary content for UI
|
|
312
408
|
tool_calls=[
|
|
313
409
|
{
|
|
314
|
-
"name": "
|
|
315
|
-
"args": {"
|
|
410
|
+
"name": "write_todos",
|
|
411
|
+
"args": {"todos": completed_todos},
|
|
316
412
|
"id": str(uuid.uuid4()),
|
|
317
413
|
"type": "tool_call",
|
|
318
414
|
}
|
|
@@ -363,70 +459,70 @@ def create_limit_tool_calls_middleware(wrap_model_call):
|
|
|
363
459
|
|
|
364
460
|
def _get_string_params_from_tools(tools) -> Dict[str, set]:
|
|
365
461
|
"""Extract string parameter names from tool schemas.
|
|
366
|
-
|
|
462
|
+
|
|
367
463
|
Analyzes each tool's Pydantic args_schema to determine which parameters
|
|
368
464
|
should be strings (not arrays).
|
|
369
|
-
|
|
465
|
+
|
|
370
466
|
Args:
|
|
371
467
|
tools: List of LangChain tools
|
|
372
|
-
|
|
468
|
+
|
|
373
469
|
Returns:
|
|
374
470
|
Dict mapping tool names to sets of string parameter names
|
|
375
471
|
"""
|
|
376
472
|
from typing import get_args, get_origin
|
|
377
|
-
|
|
473
|
+
|
|
378
474
|
tool_string_params: Dict[str, set] = {}
|
|
379
|
-
|
|
475
|
+
|
|
380
476
|
for tool in tools:
|
|
381
|
-
tool_name = getattr(tool,
|
|
477
|
+
tool_name = getattr(tool, "name", None)
|
|
382
478
|
if not tool_name:
|
|
383
479
|
continue
|
|
384
|
-
|
|
385
|
-
args_schema = getattr(tool,
|
|
480
|
+
|
|
481
|
+
args_schema = getattr(tool, "args_schema", None)
|
|
386
482
|
if not args_schema:
|
|
387
483
|
continue
|
|
388
|
-
|
|
484
|
+
|
|
389
485
|
string_params = set()
|
|
390
|
-
|
|
486
|
+
|
|
391
487
|
# Get field annotations from Pydantic model
|
|
392
488
|
try:
|
|
393
|
-
annotations = getattr(args_schema,
|
|
489
|
+
annotations = getattr(args_schema, "__annotations__", {})
|
|
394
490
|
for field_name, field_type in annotations.items():
|
|
395
491
|
origin = get_origin(field_type)
|
|
396
|
-
|
|
492
|
+
|
|
397
493
|
# Check if it's a simple str type
|
|
398
494
|
if field_type is str:
|
|
399
495
|
string_params.add(field_name)
|
|
400
496
|
# Check if it's Optional[str] (Union[str, None])
|
|
401
|
-
elif origin is type(None) or str(origin) ==
|
|
497
|
+
elif origin is type(None) or str(origin) == "typing.Union":
|
|
402
498
|
args = get_args(field_type)
|
|
403
499
|
if str in args:
|
|
404
500
|
string_params.add(field_name)
|
|
405
501
|
except Exception as e:
|
|
406
502
|
logger.debug("Failed to analyze schema for tool %s: %s", tool_name, e)
|
|
407
|
-
|
|
503
|
+
|
|
408
504
|
if string_params:
|
|
409
505
|
tool_string_params[tool_name] = string_params
|
|
410
506
|
logger.debug("Tool %s string params: %s", tool_name, string_params)
|
|
411
|
-
|
|
507
|
+
|
|
412
508
|
return tool_string_params
|
|
413
509
|
|
|
414
510
|
|
|
415
511
|
def create_normalize_tool_args_middleware(wrap_model_call, tools=None):
|
|
416
512
|
"""Create middleware to normalize tool call arguments.
|
|
417
|
-
|
|
513
|
+
|
|
418
514
|
Gemini sometimes returns tool call arguments with list values instead of strings.
|
|
419
515
|
This middleware converts list arguments to strings ONLY for parameters that
|
|
420
516
|
are defined as str in the tool's Pydantic schema.
|
|
421
|
-
|
|
517
|
+
|
|
422
518
|
Args:
|
|
423
519
|
wrap_model_call: LangChain's wrap_model_call decorator
|
|
424
520
|
tools: Optional list of tools to analyze for type information
|
|
425
|
-
|
|
521
|
+
|
|
426
522
|
Returns:
|
|
427
523
|
Middleware function
|
|
428
524
|
"""
|
|
429
|
-
|
|
525
|
+
|
|
430
526
|
# Build tool -> string params mapping from tool schemas
|
|
431
527
|
tool_string_params: Dict[str, set] = {}
|
|
432
528
|
if tools:
|
|
@@ -436,25 +532,37 @@ def create_normalize_tool_args_middleware(wrap_model_call, tools=None):
|
|
|
436
532
|
len(tool_string_params),
|
|
437
533
|
{k: list(v) for k, v in tool_string_params.items()},
|
|
438
534
|
)
|
|
439
|
-
|
|
535
|
+
|
|
440
536
|
@wrap_model_call
|
|
441
537
|
@_with_middleware_logging("normalize_tool_args")
|
|
442
538
|
def normalize_tool_args(request, handler):
|
|
443
539
|
response = handler(request)
|
|
444
|
-
|
|
540
|
+
|
|
445
541
|
if hasattr(response, "result"):
|
|
446
542
|
result = response.result
|
|
447
543
|
messages = result if isinstance(result, list) else [result]
|
|
448
|
-
|
|
544
|
+
|
|
449
545
|
for msg in messages:
|
|
450
546
|
if isinstance(msg, AIMessage) and hasattr(msg, "tool_calls"):
|
|
451
547
|
tool_calls = msg.tool_calls
|
|
452
548
|
if tool_calls:
|
|
453
549
|
for tool_call in tool_calls:
|
|
454
550
|
tool_name = tool_call.get("name", "")
|
|
551
|
+
# Normalize tool name (e.g., write_todos_tool → write_todos)
|
|
552
|
+
normalized_name = normalize_tool_name(tool_name)
|
|
553
|
+
if normalized_name != tool_name:
|
|
554
|
+
logger.info(
|
|
555
|
+
"Normalized tool name: %s → %s",
|
|
556
|
+
tool_name,
|
|
557
|
+
normalized_name,
|
|
558
|
+
)
|
|
559
|
+
tool_call["name"] = normalized_name
|
|
560
|
+
tool_name = normalized_name
|
|
455
561
|
string_params = tool_string_params.get(tool_name, set())
|
|
456
|
-
|
|
457
|
-
if "args" in tool_call and isinstance(
|
|
562
|
+
|
|
563
|
+
if "args" in tool_call and isinstance(
|
|
564
|
+
tool_call["args"], dict
|
|
565
|
+
):
|
|
458
566
|
args = tool_call["args"]
|
|
459
567
|
# Normalize list arguments to strings for str-typed params
|
|
460
568
|
for key, value in args.items():
|
|
@@ -464,9 +572,12 @@ def create_normalize_tool_args_middleware(wrap_model_call, tools=None):
|
|
|
464
572
|
for part in value:
|
|
465
573
|
if isinstance(part, str):
|
|
466
574
|
text_parts.append(part)
|
|
467
|
-
elif
|
|
575
|
+
elif (
|
|
576
|
+
isinstance(part, dict)
|
|
577
|
+
and part.get("type") == "text"
|
|
578
|
+
):
|
|
468
579
|
text_parts.append(part.get("text", ""))
|
|
469
|
-
|
|
580
|
+
|
|
470
581
|
if text_parts:
|
|
471
582
|
normalized_value = "\n".join(text_parts)
|
|
472
583
|
logger.info(
|
|
@@ -476,9 +587,33 @@ def create_normalize_tool_args_middleware(wrap_model_call, tools=None):
|
|
|
476
587
|
tool_name,
|
|
477
588
|
)
|
|
478
589
|
args[key] = normalized_value
|
|
479
|
-
|
|
590
|
+
|
|
591
|
+
# Ensure write_todos includes summary todo as last item
|
|
592
|
+
if tool_name == "write_todos" and "todos" in args:
|
|
593
|
+
todos = args["todos"]
|
|
594
|
+
if isinstance(todos, list) and len(todos) > 0:
|
|
595
|
+
# Check if any todo contains summary keywords
|
|
596
|
+
summary_keywords = ["작업 요약", "다음단계", "다음 단계", "요약 및"]
|
|
597
|
+
has_summary = any(
|
|
598
|
+
any(kw in todo.get("content", "") for kw in summary_keywords)
|
|
599
|
+
for todo in todos
|
|
600
|
+
if isinstance(todo, dict)
|
|
601
|
+
)
|
|
602
|
+
|
|
603
|
+
if not has_summary:
|
|
604
|
+
# Add summary todo as last item
|
|
605
|
+
summary_todo = {
|
|
606
|
+
"content": "작업 요약 및 다음단계 제시",
|
|
607
|
+
"status": "pending"
|
|
608
|
+
}
|
|
609
|
+
todos.append(summary_todo)
|
|
610
|
+
logger.info(
|
|
611
|
+
"Auto-added '작업 요약 및 다음단계 제시' to write_todos (total: %d todos)",
|
|
612
|
+
len(todos),
|
|
613
|
+
)
|
|
614
|
+
|
|
480
615
|
return response
|
|
481
|
-
|
|
616
|
+
|
|
482
617
|
return normalize_tool_args
|
|
483
618
|
|
|
484
619
|
|
|
@@ -516,16 +651,24 @@ def create_inject_continuation_middleware(wrap_model_call):
|
|
|
516
651
|
pass
|
|
517
652
|
|
|
518
653
|
if tool_name in NON_HITL_TOOLS:
|
|
519
|
-
logger.info(
|
|
520
|
-
"Injecting continuation prompt after non-HITL tool: %s",
|
|
521
|
-
tool_name,
|
|
522
|
-
)
|
|
523
|
-
|
|
524
654
|
todos = request.state.get("todos", [])
|
|
525
655
|
pending_todos = [
|
|
526
656
|
t for t in todos if t.get("status") in ("pending", "in_progress")
|
|
527
657
|
]
|
|
528
658
|
|
|
659
|
+
# If all todos are completed, don't inject continuation - let router handle termination
|
|
660
|
+
if not pending_todos and todos:
|
|
661
|
+
logger.info(
|
|
662
|
+
"All todos completed, skipping continuation for tool: %s",
|
|
663
|
+
tool_name,
|
|
664
|
+
)
|
|
665
|
+
return handler(request)
|
|
666
|
+
|
|
667
|
+
logger.info(
|
|
668
|
+
"Injecting continuation prompt after non-HITL tool: %s",
|
|
669
|
+
tool_name,
|
|
670
|
+
)
|
|
671
|
+
|
|
529
672
|
if pending_todos:
|
|
530
673
|
pending_list = ", ".join(
|
|
531
674
|
t.get("content", "")[:30] for t in pending_todos[:3]
|
|
@@ -536,9 +679,10 @@ def create_inject_continuation_middleware(wrap_model_call):
|
|
|
536
679
|
f"Call jupyter_cell_tool or the next appropriate tool."
|
|
537
680
|
)
|
|
538
681
|
else:
|
|
682
|
+
# No todos yet - let agent create them
|
|
539
683
|
continuation = (
|
|
540
|
-
f"Tool '{tool_name}' completed.
|
|
541
|
-
f"
|
|
684
|
+
f"Tool '{tool_name}' completed. "
|
|
685
|
+
f"Create a todo list with write_todos if needed."
|
|
542
686
|
)
|
|
543
687
|
|
|
544
688
|
new_messages = list(messages) + [
|