hdsp-jupyter-extension 2.0.27__py3-none-any.whl → 2.0.28__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- agent_server/context_providers/__init__.py +4 -2
- agent_server/context_providers/actions.py +73 -7
- agent_server/context_providers/file.py +23 -23
- agent_server/langchain/__init__.py +2 -2
- agent_server/langchain/agent.py +18 -251
- agent_server/langchain/agent_factory.py +26 -4
- agent_server/langchain/agent_prompts/planner_prompt.py +22 -31
- agent_server/langchain/custom_middleware.py +268 -43
- agent_server/langchain/llm_factory.py +102 -54
- agent_server/langchain/logging_utils.py +1 -1
- agent_server/langchain/middleware/__init__.py +5 -0
- agent_server/langchain/middleware/content_injection_middleware.py +110 -0
- agent_server/langchain/middleware/subagent_events.py +88 -9
- agent_server/langchain/middleware/subagent_middleware.py +501 -245
- agent_server/langchain/prompts.py +5 -22
- agent_server/langchain/state_schema.py +44 -0
- agent_server/langchain/tools/jupyter_tools.py +4 -5
- agent_server/langchain/tools/tool_registry.py +6 -0
- agent_server/routers/chat.py +305 -2
- agent_server/routers/config.py +193 -8
- agent_server/routers/config_schema.py +254 -0
- agent_server/routers/context.py +31 -8
- agent_server/routers/langchain_agent.py +276 -155
- hdsp_agent_core/managers/config_manager.py +100 -1
- {hdsp_jupyter_extension-2.0.27.data → hdsp_jupyter_extension-2.0.28.data}/data/share/jupyter/labextensions/hdsp-agent/build_log.json +1 -1
- {hdsp_jupyter_extension-2.0.27.data → hdsp_jupyter_extension-2.0.28.data}/data/share/jupyter/labextensions/hdsp-agent/package.json +2 -2
- hdsp_jupyter_extension-2.0.27.data/data/share/jupyter/labextensions/hdsp-agent/static/frontend_styles_index_js.b5e4416b4e07ec087aad.js → hdsp_jupyter_extension-2.0.28.data/data/share/jupyter/labextensions/hdsp-agent/static/frontend_styles_index_js.55727265b00191e68d9a.js +479 -15
- hdsp_jupyter_extension-2.0.28.data/data/share/jupyter/labextensions/hdsp-agent/static/frontend_styles_index_js.55727265b00191e68d9a.js.map +1 -0
- jupyter_ext/labextension/static/lib_index_js.67505497667f9c0a763d.js → hdsp_jupyter_extension-2.0.28.data/data/share/jupyter/labextensions/hdsp-agent/static/lib_index_js.df05d90f366bfd5fa023.js +1287 -190
- hdsp_jupyter_extension-2.0.28.data/data/share/jupyter/labextensions/hdsp-agent/static/lib_index_js.df05d90f366bfd5fa023.js.map +1 -0
- hdsp_jupyter_extension-2.0.27.data/data/share/jupyter/labextensions/hdsp-agent/static/remoteEntry.4ab73bb5068405670214.js → hdsp_jupyter_extension-2.0.28.data/data/share/jupyter/labextensions/hdsp-agent/static/remoteEntry.08fce819ee32e9d25175.js +3 -3
- jupyter_ext/labextension/static/remoteEntry.4ab73bb5068405670214.js.map → hdsp_jupyter_extension-2.0.28.data/data/share/jupyter/labextensions/hdsp-agent/static/remoteEntry.08fce819ee32e9d25175.js.map +1 -1
- {hdsp_jupyter_extension-2.0.27.dist-info → hdsp_jupyter_extension-2.0.28.dist-info}/METADATA +1 -1
- {hdsp_jupyter_extension-2.0.27.dist-info → hdsp_jupyter_extension-2.0.28.dist-info}/RECORD +65 -63
- jupyter_ext/_version.py +1 -1
- jupyter_ext/handlers.py +41 -0
- jupyter_ext/labextension/build_log.json +1 -1
- jupyter_ext/labextension/package.json +2 -2
- jupyter_ext/labextension/static/{frontend_styles_index_js.b5e4416b4e07ec087aad.js → frontend_styles_index_js.55727265b00191e68d9a.js} +479 -15
- jupyter_ext/labextension/static/frontend_styles_index_js.55727265b00191e68d9a.js.map +1 -0
- hdsp_jupyter_extension-2.0.27.data/data/share/jupyter/labextensions/hdsp-agent/static/lib_index_js.67505497667f9c0a763d.js → jupyter_ext/labextension/static/lib_index_js.df05d90f366bfd5fa023.js +1287 -190
- jupyter_ext/labextension/static/lib_index_js.df05d90f366bfd5fa023.js.map +1 -0
- jupyter_ext/labextension/static/{remoteEntry.4ab73bb5068405670214.js → remoteEntry.08fce819ee32e9d25175.js} +3 -3
- hdsp_jupyter_extension-2.0.27.data/data/share/jupyter/labextensions/hdsp-agent/static/remoteEntry.4ab73bb5068405670214.js.map → jupyter_ext/labextension/static/remoteEntry.08fce819ee32e9d25175.js.map +1 -1
- agent_server/langchain/middleware/description_injector.py +0 -150
- hdsp_jupyter_extension-2.0.27.data/data/share/jupyter/labextensions/hdsp-agent/static/frontend_styles_index_js.b5e4416b4e07ec087aad.js.map +0 -1
- hdsp_jupyter_extension-2.0.27.data/data/share/jupyter/labextensions/hdsp-agent/static/lib_index_js.67505497667f9c0a763d.js.map +0 -1
- jupyter_ext/labextension/static/frontend_styles_index_js.b5e4416b4e07ec087aad.js.map +0 -1
- jupyter_ext/labextension/static/lib_index_js.67505497667f9c0a763d.js.map +0 -1
- {hdsp_jupyter_extension-2.0.27.data → hdsp_jupyter_extension-2.0.28.data}/data/etc/jupyter/jupyter_server_config.d/hdsp_jupyter_extension.json +0 -0
- {hdsp_jupyter_extension-2.0.27.data → hdsp_jupyter_extension-2.0.28.data}/data/share/jupyter/labextensions/hdsp-agent/install.json +0 -0
- {hdsp_jupyter_extension-2.0.27.data → hdsp_jupyter_extension-2.0.28.data}/data/share/jupyter/labextensions/hdsp-agent/static/node_modules_emotion_use-insertion-effect-with-fallbacks_dist_emotion-use-insertion-effect-wi-3ba6b80.c095373419d05e6f141a.js +0 -0
- {hdsp_jupyter_extension-2.0.27.data → hdsp_jupyter_extension-2.0.28.data}/data/share/jupyter/labextensions/hdsp-agent/static/node_modules_emotion_use-insertion-effect-with-fallbacks_dist_emotion-use-insertion-effect-wi-3ba6b80.c095373419d05e6f141a.js.map +0 -0
- {hdsp_jupyter_extension-2.0.27.data → hdsp_jupyter_extension-2.0.28.data}/data/share/jupyter/labextensions/hdsp-agent/static/node_modules_emotion_use-insertion-effect-with-fallbacks_dist_emotion-use-insertion-effect-wi-3ba6b81.61e75fb98ecff46cf836.js +0 -0
- {hdsp_jupyter_extension-2.0.27.data → hdsp_jupyter_extension-2.0.28.data}/data/share/jupyter/labextensions/hdsp-agent/static/node_modules_emotion_use-insertion-effect-with-fallbacks_dist_emotion-use-insertion-effect-wi-3ba6b81.61e75fb98ecff46cf836.js.map +0 -0
- {hdsp_jupyter_extension-2.0.27.data → hdsp_jupyter_extension-2.0.28.data}/data/share/jupyter/labextensions/hdsp-agent/static/style.js +0 -0
- {hdsp_jupyter_extension-2.0.27.data → hdsp_jupyter_extension-2.0.28.data}/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_babel_runtime_helpers_esm_extends_js-node_modules_emotion_serialize_dist-051195.e2553aab0c3963b83dd7.js +0 -0
- {hdsp_jupyter_extension-2.0.27.data → hdsp_jupyter_extension-2.0.28.data}/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_babel_runtime_helpers_esm_extends_js-node_modules_emotion_serialize_dist-051195.e2553aab0c3963b83dd7.js.map +0 -0
- {hdsp_jupyter_extension-2.0.27.data → hdsp_jupyter_extension-2.0.28.data}/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_emotion_cache_dist_emotion-cache_browser_development_esm_js.24edcc52a1c014a8a5f0.js +0 -0
- {hdsp_jupyter_extension-2.0.27.data → hdsp_jupyter_extension-2.0.28.data}/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_emotion_cache_dist_emotion-cache_browser_development_esm_js.24edcc52a1c014a8a5f0.js.map +0 -0
- {hdsp_jupyter_extension-2.0.27.data → hdsp_jupyter_extension-2.0.28.data}/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_emotion_react_dist_emotion-react_browser_development_esm_js.19ecf6babe00caff6b8a.js +0 -0
- {hdsp_jupyter_extension-2.0.27.data → hdsp_jupyter_extension-2.0.28.data}/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_emotion_react_dist_emotion-react_browser_development_esm_js.19ecf6babe00caff6b8a.js.map +0 -0
- {hdsp_jupyter_extension-2.0.27.data → hdsp_jupyter_extension-2.0.28.data}/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_emotion_styled_dist_emotion-styled_browser_development_esm_js.661fb5836f4978a7c6e1.js +0 -0
- {hdsp_jupyter_extension-2.0.27.data → hdsp_jupyter_extension-2.0.28.data}/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_emotion_styled_dist_emotion-styled_browser_development_esm_js.661fb5836f4978a7c6e1.js.map +0 -0
- {hdsp_jupyter_extension-2.0.27.data → hdsp_jupyter_extension-2.0.28.data}/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_mui_material_index_js.985697e0162d8d088ca2.js +0 -0
- {hdsp_jupyter_extension-2.0.27.data → hdsp_jupyter_extension-2.0.28.data}/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_mui_material_index_js.985697e0162d8d088ca2.js.map +0 -0
- {hdsp_jupyter_extension-2.0.27.data → hdsp_jupyter_extension-2.0.28.data}/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_mui_material_utils_createSvgIcon_js.1f5038488cdfd8b3a85d.js +0 -0
- {hdsp_jupyter_extension-2.0.27.data → hdsp_jupyter_extension-2.0.28.data}/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_mui_material_utils_createSvgIcon_js.1f5038488cdfd8b3a85d.js.map +0 -0
- {hdsp_jupyter_extension-2.0.27.dist-info → hdsp_jupyter_extension-2.0.28.dist-info}/WHEEL +0 -0
- {hdsp_jupyter_extension-2.0.27.dist-info → hdsp_jupyter_extension-2.0.28.dist-info}/licenses/LICENSE +0 -0
|
@@ -5,15 +5,16 @@ Main Agent (Supervisor) System Prompt for Multi-Agent Mode
|
|
|
5
5
|
PLANNER_SYSTEM_PROMPT = """당신은 작업을 조율하는 Main Agent입니다. 한국어로 응답하세요.
|
|
6
6
|
|
|
7
7
|
# 핵심 원칙
|
|
8
|
-
1.
|
|
9
|
-
2.
|
|
10
|
-
3.
|
|
11
|
-
4.
|
|
8
|
+
1. **간단한 작업 (1-2단계)**: write_todos 사용 금지 → 바로 실행하고 종료
|
|
9
|
+
2. **복잡한 작업 (3단계+)**: write_todos로 계획 → 순차 실행 → 완료 시 final_summary_tool 호출
|
|
10
|
+
3. **직접 코드, 쿼리 작성 금지** - 모든 코드/쿼리 생성은 task_tool로 서브에이전트에게 위임
|
|
11
|
+
4. 서브에이전트가 반환한 코드를 적절한 도구로 실행
|
|
12
12
|
|
|
13
13
|
# 작업 흐름
|
|
14
14
|
|
|
15
15
|
## Step 1: 계획 수립
|
|
16
|
-
|
|
16
|
+
- **간단한 작업 (1-2단계)**: write_todos 없이 바로 실행. 완료 후 추가 도구 호출 없이 종료.
|
|
17
|
+
- **복잡한 작업 (3단계+)**: write_todos로 작업 목록 생성 (실제 작업만 포함, 요약은 시스템이 자동 처리)
|
|
17
18
|
|
|
18
19
|
## Step 2: 코드/쿼리 생성 요청
|
|
19
20
|
필요한 경우, task_tool을 호출하여 서브에이전트에게 위임:
|
|
@@ -25,18 +26,20 @@ PLANNER_SYSTEM_PROMPT = """당신은 작업을 조율하는 Main Agent입니다.
|
|
|
25
26
|
| researcher | 정보 검색 | task_tool(agent_name="researcher", description="관련 문서 검색") |
|
|
26
27
|
|
|
27
28
|
## Step 3: 결과 실행/적용 (필수!)
|
|
28
|
-
**task_tool
|
|
29
|
+
**task_tool 호출 후 반드시 결과를 처리해야 함. 코드/SQL은 자동 주입됩니다:**
|
|
29
30
|
|
|
30
|
-
| 서브에이전트 | 작업 유형 | 처리 방법 |
|
|
31
|
-
|
|
32
|
-
| python_developer | 코드 실행 (데이터 분석, 시각화) | jupyter_cell_tool | jupyter_cell_tool(
|
|
33
|
-
| python_developer | **파일 생성/수정** | **write_file_tool
|
|
34
|
-
| athena_query | SQL 표시 | markdown_tool | markdown_tool(
|
|
31
|
+
| 서브에이전트 | 작업 유형 | 처리 방법 | 호출 방법 |
|
|
32
|
+
|-------------|----------|----------|----------|
|
|
33
|
+
| python_developer | 코드 실행 (데이터 분석, 시각화) | jupyter_cell_tool | jupyter_cell_tool() ← 코드 자동 주입, code 파라미터 불필요 |
|
|
34
|
+
| python_developer | **파일 생성/수정** | **write_file_tool** | write_file_tool(path="파일경로") ← content 자동 주입 |
|
|
35
|
+
| athena_query | SQL 표시 | markdown_tool | markdown_tool() ← SQL 자동 주입, content 파라미터 불필요 |
|
|
35
36
|
| researcher | 텍스트 요약 | 직접 응답 | - |
|
|
36
37
|
|
|
37
|
-
**🔴 중요:
|
|
38
|
-
-
|
|
39
|
-
-
|
|
38
|
+
**🔴 중요: 코드/SQL 자동 주입**
|
|
39
|
+
- task_tool이 생성한 코드/SQL은 **State를 통해 자동 주입**됩니다
|
|
40
|
+
- **코드를 직접 복사하거나 인자로 전달할 필요 없음** — 도구만 호출하면 됨
|
|
41
|
+
- **파일 생성/수정 요청** → `write_file_tool(path=...)` 사용 (content 자동 주입)
|
|
42
|
+
- **코드 실행 요청** (데이터 분석, 차트 등) → `jupyter_cell_tool()` 사용 (code 자동 주입)
|
|
40
43
|
- **❌ markdown_tool은 코드 저장용이 아님!** (표시 전용)
|
|
41
44
|
|
|
42
45
|
**중요**: task_tool 결과를 받은 후 바로 write_todos로 완료 처리하지 말고, 반드시 위 도구로 결과를 먼저 적용!
|
|
@@ -49,34 +52,22 @@ PLANNER_SYSTEM_PROMPT = """당신은 작업을 조율하는 Main Agent입니다.
|
|
|
49
52
|
- **🔴 기존 todo 절대 삭제 금지**: 전체 리스트를 항상 포함하고 status만 변경
|
|
50
53
|
- **🔴 상태 전환 순서 필수**: pending → in_progress → completed (건너뛰기 금지!)
|
|
51
54
|
- **🔴 초기 생성 규칙**: 첫 write_todos 호출 시 첫 번째 todo만 in_progress, 나머지는 모두 pending
|
|
52
|
-
- 올바른 초기 예: [{"content": "작업1", "status": "in_progress"}, {"content": "작업2", "status": "pending"}
|
|
55
|
+
- 올바른 초기 예: [{"content": "작업1", "status": "in_progress"}, {"content": "작업2", "status": "pending"}]
|
|
53
56
|
- 잘못된 초기 예: [{"content": "작업1", "status": "completed"}, ...] ← 실제 작업 없이 completed 금지!
|
|
54
57
|
- **🔴 completed 전환 조건**: 실제 도구(task_tool, jupyter_cell_tool 등)로 작업 수행 후에만 completed로 변경
|
|
55
58
|
- in_progress 상태는 **동시에 1개만** 허용 (completed, pending todo는 삭제하지 않고 모두 유지)
|
|
56
59
|
- content에 도구(tool)명 언급 금지
|
|
57
|
-
- **
|
|
58
|
-
|
|
59
|
-
# "작업 요약 및 다음 단계 제시" todo 완료 시 [필수]
|
|
60
|
-
1. "작업 요약 및 다음 단계 제시"를 **in_progress**로 변경 (write_todos 호출)
|
|
61
|
-
2. **반드시 final_summary_tool 호출**:
|
|
62
|
-
final_summary_tool(
|
|
63
|
-
summary="완료된 작업 요약",
|
|
64
|
-
next_items=[{"subject": "제목", "description": "설명"}, ...]
|
|
65
|
-
)
|
|
66
|
-
|
|
67
|
-
3. final_summary_tool 호출 후 "작업 요약 및 다음 단계 제시"를 **completed**로 변경
|
|
68
|
-
|
|
69
|
-
- next_items 3개 이상 필수
|
|
70
|
-
- **final_summary_tool 호출 없이 종료 금지**
|
|
60
|
+
- **"작업 요약" todo 추가 금지**: 실제 작업만 todo로 생성 (요약은 시스템이 자동 처리)
|
|
71
61
|
|
|
72
62
|
# 도구 사용시 주의할 점
|
|
73
63
|
## 파일 위치 모를 때 탐색 순서: search_files_tool → list_workspace_tool → 재검색 → ask_user_tool 순서로!)
|
|
74
|
-
## list_workspace_tool로 전체 디렉토리 파일 목록 검색 금지! 최대한 pattern 으로 drill down 해서 검색할 것
|
|
64
|
+
## list_workspace_tool로 전체 디렉토리 파일 목록 검색 금지! 최대한 pattern 으로 drill down 해서 검색할 것
|
|
75
65
|
|
|
76
66
|
# 금지 사항
|
|
77
67
|
- 직접 코드/SQL 작성 (반드시 task_tool 사용)
|
|
78
68
|
- task_tool 없이 jupyter_cell_tool 호출
|
|
79
|
-
- **task_tool 결과를
|
|
69
|
+
- **task_tool 결과를 처리하지 않고 바로 완료** (python_developer → jupyter_cell_tool, athena_query → markdown_tool 필수)
|
|
70
|
+
- jupyter_cell_tool에 code 인자를 직접 전달 (자동 주입되므로 불필요)
|
|
80
71
|
- 빈 응답
|
|
81
72
|
"""
|
|
82
73
|
|
|
@@ -12,7 +12,9 @@ import uuid
|
|
|
12
12
|
from typing import Any, Dict, Optional
|
|
13
13
|
|
|
14
14
|
from json_repair import repair_json
|
|
15
|
-
from
|
|
15
|
+
from langchain.agents.middleware import AgentMiddleware
|
|
16
|
+
from langchain_core.messages import AIMessage, HumanMessage, ToolMessage
|
|
17
|
+
from langgraph.types import Command
|
|
16
18
|
|
|
17
19
|
from agent_server.langchain.logging_utils import (
|
|
18
20
|
_format_middleware_marker,
|
|
@@ -25,6 +27,92 @@ from agent_server.langchain.prompts import JSON_TOOL_SCHEMA, NON_HITL_TOOLS
|
|
|
25
27
|
logger = logging.getLogger(__name__)
|
|
26
28
|
|
|
27
29
|
|
|
30
|
+
# ---------------------------------------------------------------------------
|
|
31
|
+
# TodoActiveMiddleware — manages todo_active state field
|
|
32
|
+
# ---------------------------------------------------------------------------
|
|
33
|
+
|
|
34
|
+
|
|
35
|
+
class TodoActiveMiddleware(AgentMiddleware):
|
|
36
|
+
"""Middleware that manages the `todo_active` state field.
|
|
37
|
+
|
|
38
|
+
Intercepts write_todos and final_summary_tool calls to set/clear
|
|
39
|
+
the todo_active flag in LangGraph state via Command.
|
|
40
|
+
|
|
41
|
+
- write_todos called → todo_active = True
|
|
42
|
+
- final_summary_tool called → todo_active = False
|
|
43
|
+
|
|
44
|
+
This flag is checked by handle_empty_response and continuation_control
|
|
45
|
+
middlewares to decide whether to force continuation or let the LLM
|
|
46
|
+
terminate naturally (for simple 1-2 step tasks).
|
|
47
|
+
"""
|
|
48
|
+
|
|
49
|
+
def wrap_tool_call(self, request, handler):
|
|
50
|
+
"""Intercept tool calls to manage todo_active state."""
|
|
51
|
+
result = handler(request)
|
|
52
|
+
tool_name = request.tool_call.get("name", "")
|
|
53
|
+
|
|
54
|
+
if tool_name == "write_todos":
|
|
55
|
+
return self._wrap_with_todo_active(request, result, active=True)
|
|
56
|
+
elif tool_name in ("final_summary_tool", "final_summary"):
|
|
57
|
+
return self._wrap_with_todo_active(request, result, active=False)
|
|
58
|
+
|
|
59
|
+
return result
|
|
60
|
+
|
|
61
|
+
def _wrap_with_todo_active(self, request, result, active: bool):
|
|
62
|
+
"""Wrap tool result in a Command that updates todo_active state.
|
|
63
|
+
|
|
64
|
+
Handles two cases:
|
|
65
|
+
1. Result is already a Command (e.g., from TodoListMiddleware) → merge
|
|
66
|
+
2. Result is a ToolMessage → wrap in new Command
|
|
67
|
+
"""
|
|
68
|
+
try:
|
|
69
|
+
if isinstance(result, Command):
|
|
70
|
+
# Merge todo_active into existing Command's update dict
|
|
71
|
+
existing_update = (
|
|
72
|
+
result.update if hasattr(result, "update") and result.update else {}
|
|
73
|
+
)
|
|
74
|
+
merged_update = {**existing_update, "todo_active": active}
|
|
75
|
+
logger.info(
|
|
76
|
+
"[TodoActive] Merged todo_active=%s into Command for tool '%s'",
|
|
77
|
+
active,
|
|
78
|
+
request.tool_call.get("name", ""),
|
|
79
|
+
)
|
|
80
|
+
return Command(update=merged_update)
|
|
81
|
+
elif isinstance(result, ToolMessage):
|
|
82
|
+
# Wrap ToolMessage in a new Command
|
|
83
|
+
logger.info(
|
|
84
|
+
"[TodoActive] Wrapped ToolMessage in Command with todo_active=%s for tool '%s'",
|
|
85
|
+
active,
|
|
86
|
+
request.tool_call.get("name", ""),
|
|
87
|
+
)
|
|
88
|
+
return Command(
|
|
89
|
+
update={
|
|
90
|
+
"todo_active": active,
|
|
91
|
+
"messages": [result],
|
|
92
|
+
}
|
|
93
|
+
)
|
|
94
|
+
else:
|
|
95
|
+
# Unknown result type — wrap as ToolMessage
|
|
96
|
+
tool_call_id = request.tool_call.get("id", "")
|
|
97
|
+
content = str(result) if result else ""
|
|
98
|
+
logger.info(
|
|
99
|
+
"[TodoActive] Wrapped unknown result type (%s) in Command with todo_active=%s",
|
|
100
|
+
type(result).__name__,
|
|
101
|
+
active,
|
|
102
|
+
)
|
|
103
|
+
return Command(
|
|
104
|
+
update={
|
|
105
|
+
"todo_active": active,
|
|
106
|
+
"messages": [
|
|
107
|
+
ToolMessage(content=content, tool_call_id=tool_call_id)
|
|
108
|
+
],
|
|
109
|
+
}
|
|
110
|
+
)
|
|
111
|
+
except Exception as e:
|
|
112
|
+
logger.warning("[TodoActive] Failed to set todo_active=%s: %s", active, e)
|
|
113
|
+
return result
|
|
114
|
+
|
|
115
|
+
|
|
28
116
|
def parse_json_tool_call(text) -> Optional[Dict[str, Any]]:
|
|
29
117
|
"""Parse JSON tool call from text response.
|
|
30
118
|
|
|
@@ -262,6 +350,31 @@ def create_handle_empty_response_middleware(wrap_model_call):
|
|
|
262
350
|
def handle_empty_response(request, handler):
|
|
263
351
|
max_retries = 2
|
|
264
352
|
|
|
353
|
+
# Guard: If final_summary_tool was already called, stop the agent immediately.
|
|
354
|
+
# This is independent of todo status (LLM may call final_summary before
|
|
355
|
+
# marking all todos as completed).
|
|
356
|
+
todo_active = request.state.get("todo_active", False)
|
|
357
|
+
if not todo_active:
|
|
358
|
+
messages = request.messages
|
|
359
|
+
# Find last REAL HumanMessage index
|
|
360
|
+
_last_human = -1
|
|
361
|
+
for _i, _msg in enumerate(messages):
|
|
362
|
+
_mtype = getattr(_msg, "type", "") or type(_msg).__name__
|
|
363
|
+
if _mtype in ("human", "HumanMessage"):
|
|
364
|
+
_mcontent = getattr(_msg, "content", "") or ""
|
|
365
|
+
if not _mcontent.startswith("[SYSTEM]"):
|
|
366
|
+
_last_human = _i
|
|
367
|
+
_msgs_after = (
|
|
368
|
+
messages[_last_human + 1 :] if _last_human >= 0 else messages[-10:]
|
|
369
|
+
)
|
|
370
|
+
for _msg in _msgs_after:
|
|
371
|
+
_name = getattr(_msg, "name", "") or ""
|
|
372
|
+
if _name in ("final_summary_tool", "final_summary"):
|
|
373
|
+
logger.info(
|
|
374
|
+
"final_summary_tool already executed and todo_active=False - stopping agent (no LLM call)"
|
|
375
|
+
)
|
|
376
|
+
return AIMessage(content="", tool_calls=[])
|
|
377
|
+
|
|
265
378
|
# Check if all todos are completed - if so, return empty response to stop agent
|
|
266
379
|
# Method 1: Check state.todos
|
|
267
380
|
todos = request.state.get("todos", [])
|
|
@@ -297,8 +410,15 @@ def create_handle_empty_response_middleware(wrap_model_call):
|
|
|
297
410
|
else messages[-10:]
|
|
298
411
|
)
|
|
299
412
|
for msg in messages_to_check:
|
|
413
|
+
# Check ToolMessage name for final_summary_tool
|
|
414
|
+
msg_name = getattr(msg, "name", "") or ""
|
|
415
|
+
if msg_name in ("final_summary_tool", "final_summary"):
|
|
416
|
+
summary_exists = True
|
|
417
|
+
break
|
|
300
418
|
content = getattr(msg, "content", "") or ""
|
|
301
|
-
if '"summary"' in content and '"next_items"' in content
|
|
419
|
+
if ('"summary"' in content and '"next_items"' in content) or (
|
|
420
|
+
"'summary'" in content and "'next_items'" in content
|
|
421
|
+
):
|
|
302
422
|
summary_exists = True
|
|
303
423
|
break
|
|
304
424
|
|
|
@@ -343,8 +463,15 @@ def create_handle_empty_response_middleware(wrap_model_call):
|
|
|
343
463
|
messages = request.messages
|
|
344
464
|
summary_exists = False
|
|
345
465
|
for msg in messages[-15:]:
|
|
466
|
+
# Check ToolMessage name for final_summary_tool
|
|
467
|
+
msg_name = getattr(msg, "name", "") or ""
|
|
468
|
+
if msg_name in ("final_summary_tool", "final_summary"):
|
|
469
|
+
summary_exists = True
|
|
470
|
+
break
|
|
346
471
|
msg_content = getattr(msg, "content", "") or ""
|
|
347
|
-
if '"summary"' in msg_content and '"next_items"' in msg_content
|
|
472
|
+
if ('"summary"' in msg_content and '"next_items"' in msg_content) or (
|
|
473
|
+
"'summary'" in msg_content and "'next_items'" in msg_content
|
|
474
|
+
):
|
|
348
475
|
summary_exists = True
|
|
349
476
|
break
|
|
350
477
|
if any(
|
|
@@ -583,6 +710,14 @@ def create_handle_empty_response_middleware(wrap_model_call):
|
|
|
583
710
|
|
|
584
711
|
# Invalid response - retry with JSON schema prompt
|
|
585
712
|
if response_message and attempt < max_retries:
|
|
713
|
+
# todo_active=False → LLM can terminate naturally (simple tasks)
|
|
714
|
+
todo_active = request.state.get("todo_active", False)
|
|
715
|
+
if not todo_active:
|
|
716
|
+
logger.info(
|
|
717
|
+
"todo_active=False - skipping retry, allowing LLM natural termination"
|
|
718
|
+
)
|
|
719
|
+
return response
|
|
720
|
+
|
|
586
721
|
reason = "text-only" if has_content else "empty"
|
|
587
722
|
|
|
588
723
|
json_prompt = _build_json_prompt(request, response_message, has_content)
|
|
@@ -776,23 +911,38 @@ def _build_json_prompt(request, response_message, has_content):
|
|
|
776
911
|
f"Example: {example_json}"
|
|
777
912
|
)
|
|
778
913
|
elif not todos:
|
|
779
|
-
# No todos
|
|
780
|
-
# This
|
|
781
|
-
|
|
782
|
-
|
|
783
|
-
|
|
784
|
-
f"Your response was empty. You MUST call a tool to proceed.\n"
|
|
785
|
-
f"한국어로 응답하고, write_todos로 작업 목록을 만들거나 jupyter_cell_tool/read_file_tool을 호출하세요.\n"
|
|
786
|
-
f'Example: {{"tool": "write_todos", "arguments": {{"todos": [{{"content": "데이터 분석", "status": "in_progress"}}]}}}}'
|
|
914
|
+
# No todos → simple task (1-2 steps), don't force write_todos creation
|
|
915
|
+
# This was the DIRECT CAUSE of the simple-task infinite loop:
|
|
916
|
+
# LLM completes simple task → empty response → forced to create todos → loop
|
|
917
|
+
logger.info(
|
|
918
|
+
"No todos exist - simple task, skipping retry (no write_todos forcing)"
|
|
787
919
|
)
|
|
920
|
+
return None # Signal to skip retry — LLM terminates naturally
|
|
788
921
|
else:
|
|
789
|
-
# Todos exist but all completed
|
|
790
|
-
|
|
922
|
+
# Todos exist but all completed
|
|
923
|
+
# Check if final_summary_tool was already called in message history
|
|
924
|
+
messages = getattr(request, "messages", [])
|
|
925
|
+
final_summary_already_called = any(
|
|
926
|
+
getattr(msg, "name", "") in ("final_summary_tool", "final_summary")
|
|
927
|
+
for msg in messages
|
|
928
|
+
)
|
|
929
|
+
if final_summary_already_called:
|
|
930
|
+
logger.info(
|
|
931
|
+
"All todos completed and final_summary_tool already called - "
|
|
932
|
+
"signaling skip (no more retries needed)"
|
|
933
|
+
)
|
|
934
|
+
return None # Signal to skip retry and synthesize completion
|
|
935
|
+
|
|
936
|
+
logger.info(
|
|
937
|
+
"All todos completed but response empty - asking for final_summary_tool"
|
|
938
|
+
)
|
|
791
939
|
return (
|
|
792
940
|
f"{JSON_TOOL_SCHEMA}\n\n"
|
|
793
|
-
f"All tasks completed. Call
|
|
794
|
-
f"
|
|
795
|
-
f
|
|
941
|
+
f"All tasks completed. Call final_summary_tool to provide a summary.\n"
|
|
942
|
+
f"final_summary_tool(summary='완료된 작업 요약', "
|
|
943
|
+
f"next_items=[{{'subject': '제목', 'description': '설명'}}, ...]) "
|
|
944
|
+
f"(next_items 3개 이상 필수).\n"
|
|
945
|
+
f"텍스트로 JSON을 출력하지 말고, 반드시 도구 호출로 실행하세요."
|
|
796
946
|
)
|
|
797
947
|
|
|
798
948
|
|
|
@@ -1020,8 +1170,31 @@ def create_normalize_tool_args_middleware(wrap_model_call, tools=None):
|
|
|
1020
1170
|
tool_call["args"], dict
|
|
1021
1171
|
):
|
|
1022
1172
|
args = tool_call["args"]
|
|
1023
|
-
# Normalize
|
|
1173
|
+
# Normalize non-string arguments for str-typed params
|
|
1024
1174
|
for key, value in args.items():
|
|
1175
|
+
# Convert dict to string/None for str-typed params
|
|
1176
|
+
# LLM sometimes sends {} instead of null for Optional[str]
|
|
1177
|
+
if key in string_params and isinstance(value, dict):
|
|
1178
|
+
if not value: # Empty dict {}
|
|
1179
|
+
logger.info(
|
|
1180
|
+
"Converted empty dict to None for '%s' in tool '%s'",
|
|
1181
|
+
key,
|
|
1182
|
+
tool_name,
|
|
1183
|
+
)
|
|
1184
|
+
args[key] = None
|
|
1185
|
+
else:
|
|
1186
|
+
# Non-empty dict → JSON string
|
|
1187
|
+
json_str = json.dumps(
|
|
1188
|
+
value, ensure_ascii=False
|
|
1189
|
+
)
|
|
1190
|
+
logger.info(
|
|
1191
|
+
"Converted dict to JSON string for '%s' in tool '%s': %s",
|
|
1192
|
+
key,
|
|
1193
|
+
tool_name,
|
|
1194
|
+
json_str[:100],
|
|
1195
|
+
)
|
|
1196
|
+
args[key] = json_str
|
|
1197
|
+
|
|
1025
1198
|
if key in string_params and isinstance(value, list):
|
|
1026
1199
|
# Join list items into a single string
|
|
1027
1200
|
text_parts = []
|
|
@@ -1150,10 +1323,18 @@ def create_continuation_control_middleware(wrap_model_call):
|
|
|
1150
1323
|
else messages[-15:]
|
|
1151
1324
|
)
|
|
1152
1325
|
for msg in messages_to_check:
|
|
1326
|
+
# Check if this is a ToolMessage from final_summary_tool
|
|
1327
|
+
msg_name = getattr(msg, "name", "") or ""
|
|
1328
|
+
if msg_name in ("final_summary_tool", "final_summary"):
|
|
1329
|
+
return True
|
|
1330
|
+
|
|
1153
1331
|
msg_content = getattr(msg, "content", "") or ""
|
|
1154
|
-
# Check for summary JSON
|
|
1332
|
+
# Check for summary JSON (double quotes)
|
|
1155
1333
|
if '"summary"' in msg_content and '"next_items"' in msg_content:
|
|
1156
1334
|
return True
|
|
1335
|
+
# Check for summary Python str (single quotes from tool output)
|
|
1336
|
+
if "'summary'" in msg_content and "'next_items'" in msg_content:
|
|
1337
|
+
return True
|
|
1157
1338
|
# Check for markdown summary (common patterns)
|
|
1158
1339
|
if any(
|
|
1159
1340
|
kw in msg_content
|
|
@@ -1203,6 +1384,24 @@ def create_continuation_control_middleware(wrap_model_call):
|
|
|
1203
1384
|
pass
|
|
1204
1385
|
|
|
1205
1386
|
if tool_name in NON_HITL_TOOLS:
|
|
1387
|
+
# GUARD: Skip forcing when final_summary_tool already ran
|
|
1388
|
+
if tool_name in ("final_summary_tool", "final_summary"):
|
|
1389
|
+
logger.info(
|
|
1390
|
+
"final_summary_tool already executed - "
|
|
1391
|
+
"skipping continuation (preventing infinite loop)"
|
|
1392
|
+
)
|
|
1393
|
+
return handler(request)
|
|
1394
|
+
|
|
1395
|
+
# GUARD: todo_active=False → simple task, skip continuation
|
|
1396
|
+
todo_active = request.state.get("todo_active", False)
|
|
1397
|
+
if not todo_active:
|
|
1398
|
+
logger.info(
|
|
1399
|
+
"todo_active=False after tool '%s' - "
|
|
1400
|
+
"simple task, skipping continuation",
|
|
1401
|
+
tool_name,
|
|
1402
|
+
)
|
|
1403
|
+
return handler(request)
|
|
1404
|
+
|
|
1206
1405
|
todos = request.state.get("todos", [])
|
|
1207
1406
|
|
|
1208
1407
|
last_real_human_idx = _find_last_real_human_idx(messages)
|
|
@@ -1237,36 +1436,60 @@ def create_continuation_control_middleware(wrap_model_call):
|
|
|
1237
1436
|
tool_name,
|
|
1238
1437
|
)
|
|
1239
1438
|
|
|
1240
|
-
#
|
|
1241
|
-
#
|
|
1242
|
-
#
|
|
1243
|
-
|
|
1439
|
+
# === State-based branching: todos 유무로 분기 ===
|
|
1440
|
+
#
|
|
1441
|
+
# (1) todos 없음 → 간단한 1~2단계 작업 → continuation 불필요
|
|
1442
|
+
# (2) todos 있음 + 미완료 → 다음 작업 유도
|
|
1443
|
+
# (3) todos 있음 + 전부 완료 → final_summary_tool 호출 유도
|
|
1444
|
+
#
|
|
1445
|
+
if not todos:
|
|
1446
|
+
# No todos in state → simple task (1~2 steps)
|
|
1447
|
+
# Don't inject any continuation — LLM finishes naturally.
|
|
1244
1448
|
logger.info(
|
|
1245
|
-
"
|
|
1246
|
-
"
|
|
1247
|
-
|
|
1449
|
+
"No todos in state after tool: %s - "
|
|
1450
|
+
"simple task, skipping continuation",
|
|
1451
|
+
tool_name,
|
|
1248
1452
|
)
|
|
1249
|
-
# Don't inject continuation - let agent naturally continue or stop
|
|
1250
1453
|
elif pending_todos:
|
|
1251
|
-
|
|
1252
|
-
|
|
1253
|
-
|
|
1254
|
-
|
|
1255
|
-
|
|
1256
|
-
|
|
1257
|
-
|
|
1258
|
-
|
|
1259
|
-
|
|
1260
|
-
|
|
1261
|
-
|
|
1262
|
-
|
|
1454
|
+
# Todos exist with pending items → guide to next task
|
|
1455
|
+
if tool_name == "write_todos":
|
|
1456
|
+
# write_todos with pending items → agent manages its own flow
|
|
1457
|
+
logger.info(
|
|
1458
|
+
"write_todos with %d pending todos - "
|
|
1459
|
+
"agent manages own flow",
|
|
1460
|
+
len(pending_todos),
|
|
1461
|
+
)
|
|
1462
|
+
else:
|
|
1463
|
+
pending_list = ", ".join(
|
|
1464
|
+
t.get("content", "")[:30] for t in pending_todos[:3]
|
|
1465
|
+
)
|
|
1466
|
+
continuation = (
|
|
1467
|
+
f"Tool '{tool_name}' completed. "
|
|
1468
|
+
f"Continue with pending tasks: {pending_list}. "
|
|
1469
|
+
f"Call jupyter_cell_tool or the next appropriate tool."
|
|
1470
|
+
)
|
|
1471
|
+
new_messages = list(messages) + [
|
|
1472
|
+
HumanMessage(content=f"[SYSTEM] {continuation}")
|
|
1473
|
+
]
|
|
1474
|
+
request = request.override(messages=new_messages)
|
|
1263
1475
|
else:
|
|
1476
|
+
# All todos completed → prompt for final_summary_tool
|
|
1477
|
+
logger.info(
|
|
1478
|
+
"All %d todos completed after tool: %s - "
|
|
1479
|
+
"prompting for final_summary_tool",
|
|
1480
|
+
len(todos),
|
|
1481
|
+
tool_name,
|
|
1482
|
+
)
|
|
1264
1483
|
continuation = (
|
|
1265
|
-
|
|
1266
|
-
|
|
1484
|
+
"[SYSTEM] 모든 작업이 완료되었습니다. "
|
|
1485
|
+
"반드시 final_summary_tool을 호출하여 작업 요약과 다음 단계를 제시하세요. "
|
|
1486
|
+
"final_summary_tool(summary='완료된 작업 요약', "
|
|
1487
|
+
"next_items=[{'subject': '제목', 'description': '설명'}, ...]) "
|
|
1488
|
+
"(next_items 3개 이상 필수). "
|
|
1489
|
+
"텍스트로 JSON을 출력하지 말고, 반드시 도구 호출로 실행하세요."
|
|
1267
1490
|
)
|
|
1268
1491
|
new_messages = list(messages) + [
|
|
1269
|
-
HumanMessage(content=
|
|
1492
|
+
HumanMessage(content=continuation)
|
|
1270
1493
|
]
|
|
1271
1494
|
request = request.override(messages=new_messages)
|
|
1272
1495
|
|
|
@@ -1287,8 +1510,10 @@ def create_continuation_control_middleware(wrap_model_call):
|
|
|
1287
1510
|
if isinstance(p, (str, dict))
|
|
1288
1511
|
)
|
|
1289
1512
|
|
|
1290
|
-
# Check if content contains summary JSON pattern
|
|
1291
|
-
has_summary_json = '"summary"' in content and '"next_items"' in content
|
|
1513
|
+
# Check if content contains summary JSON pattern (double or single quotes)
|
|
1514
|
+
has_summary_json = ('"summary"' in content and '"next_items"' in content) or (
|
|
1515
|
+
"'summary'" in content and "'next_items'" in content
|
|
1516
|
+
)
|
|
1292
1517
|
|
|
1293
1518
|
if has_summary_json:
|
|
1294
1519
|
tool_calls = getattr(response_message, "tool_calls", []) or []
|