hdsp-jupyter-extension 2.0.27__py3-none-any.whl → 2.0.28__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- agent_server/context_providers/__init__.py +4 -2
- agent_server/context_providers/actions.py +73 -7
- agent_server/context_providers/file.py +23 -23
- agent_server/langchain/__init__.py +2 -2
- agent_server/langchain/agent.py +18 -251
- agent_server/langchain/agent_factory.py +26 -4
- agent_server/langchain/agent_prompts/planner_prompt.py +22 -31
- agent_server/langchain/custom_middleware.py +268 -43
- agent_server/langchain/llm_factory.py +102 -54
- agent_server/langchain/logging_utils.py +1 -1
- agent_server/langchain/middleware/__init__.py +5 -0
- agent_server/langchain/middleware/content_injection_middleware.py +110 -0
- agent_server/langchain/middleware/subagent_events.py +88 -9
- agent_server/langchain/middleware/subagent_middleware.py +501 -245
- agent_server/langchain/prompts.py +5 -22
- agent_server/langchain/state_schema.py +44 -0
- agent_server/langchain/tools/jupyter_tools.py +4 -5
- agent_server/langchain/tools/tool_registry.py +6 -0
- agent_server/routers/chat.py +305 -2
- agent_server/routers/config.py +193 -8
- agent_server/routers/config_schema.py +254 -0
- agent_server/routers/context.py +31 -8
- agent_server/routers/langchain_agent.py +276 -155
- hdsp_agent_core/managers/config_manager.py +100 -1
- {hdsp_jupyter_extension-2.0.27.data → hdsp_jupyter_extension-2.0.28.data}/data/share/jupyter/labextensions/hdsp-agent/build_log.json +1 -1
- {hdsp_jupyter_extension-2.0.27.data → hdsp_jupyter_extension-2.0.28.data}/data/share/jupyter/labextensions/hdsp-agent/package.json +2 -2
- hdsp_jupyter_extension-2.0.27.data/data/share/jupyter/labextensions/hdsp-agent/static/frontend_styles_index_js.b5e4416b4e07ec087aad.js → hdsp_jupyter_extension-2.0.28.data/data/share/jupyter/labextensions/hdsp-agent/static/frontend_styles_index_js.55727265b00191e68d9a.js +479 -15
- hdsp_jupyter_extension-2.0.28.data/data/share/jupyter/labextensions/hdsp-agent/static/frontend_styles_index_js.55727265b00191e68d9a.js.map +1 -0
- jupyter_ext/labextension/static/lib_index_js.67505497667f9c0a763d.js → hdsp_jupyter_extension-2.0.28.data/data/share/jupyter/labextensions/hdsp-agent/static/lib_index_js.df05d90f366bfd5fa023.js +1287 -190
- hdsp_jupyter_extension-2.0.28.data/data/share/jupyter/labextensions/hdsp-agent/static/lib_index_js.df05d90f366bfd5fa023.js.map +1 -0
- hdsp_jupyter_extension-2.0.27.data/data/share/jupyter/labextensions/hdsp-agent/static/remoteEntry.4ab73bb5068405670214.js → hdsp_jupyter_extension-2.0.28.data/data/share/jupyter/labextensions/hdsp-agent/static/remoteEntry.08fce819ee32e9d25175.js +3 -3
- jupyter_ext/labextension/static/remoteEntry.4ab73bb5068405670214.js.map → hdsp_jupyter_extension-2.0.28.data/data/share/jupyter/labextensions/hdsp-agent/static/remoteEntry.08fce819ee32e9d25175.js.map +1 -1
- {hdsp_jupyter_extension-2.0.27.dist-info → hdsp_jupyter_extension-2.0.28.dist-info}/METADATA +1 -1
- {hdsp_jupyter_extension-2.0.27.dist-info → hdsp_jupyter_extension-2.0.28.dist-info}/RECORD +65 -63
- jupyter_ext/_version.py +1 -1
- jupyter_ext/handlers.py +41 -0
- jupyter_ext/labextension/build_log.json +1 -1
- jupyter_ext/labextension/package.json +2 -2
- jupyter_ext/labextension/static/{frontend_styles_index_js.b5e4416b4e07ec087aad.js → frontend_styles_index_js.55727265b00191e68d9a.js} +479 -15
- jupyter_ext/labextension/static/frontend_styles_index_js.55727265b00191e68d9a.js.map +1 -0
- hdsp_jupyter_extension-2.0.27.data/data/share/jupyter/labextensions/hdsp-agent/static/lib_index_js.67505497667f9c0a763d.js → jupyter_ext/labextension/static/lib_index_js.df05d90f366bfd5fa023.js +1287 -190
- jupyter_ext/labextension/static/lib_index_js.df05d90f366bfd5fa023.js.map +1 -0
- jupyter_ext/labextension/static/{remoteEntry.4ab73bb5068405670214.js → remoteEntry.08fce819ee32e9d25175.js} +3 -3
- hdsp_jupyter_extension-2.0.27.data/data/share/jupyter/labextensions/hdsp-agent/static/remoteEntry.4ab73bb5068405670214.js.map → jupyter_ext/labextension/static/remoteEntry.08fce819ee32e9d25175.js.map +1 -1
- agent_server/langchain/middleware/description_injector.py +0 -150
- hdsp_jupyter_extension-2.0.27.data/data/share/jupyter/labextensions/hdsp-agent/static/frontend_styles_index_js.b5e4416b4e07ec087aad.js.map +0 -1
- hdsp_jupyter_extension-2.0.27.data/data/share/jupyter/labextensions/hdsp-agent/static/lib_index_js.67505497667f9c0a763d.js.map +0 -1
- jupyter_ext/labextension/static/frontend_styles_index_js.b5e4416b4e07ec087aad.js.map +0 -1
- jupyter_ext/labextension/static/lib_index_js.67505497667f9c0a763d.js.map +0 -1
- {hdsp_jupyter_extension-2.0.27.data → hdsp_jupyter_extension-2.0.28.data}/data/etc/jupyter/jupyter_server_config.d/hdsp_jupyter_extension.json +0 -0
- {hdsp_jupyter_extension-2.0.27.data → hdsp_jupyter_extension-2.0.28.data}/data/share/jupyter/labextensions/hdsp-agent/install.json +0 -0
- {hdsp_jupyter_extension-2.0.27.data → hdsp_jupyter_extension-2.0.28.data}/data/share/jupyter/labextensions/hdsp-agent/static/node_modules_emotion_use-insertion-effect-with-fallbacks_dist_emotion-use-insertion-effect-wi-3ba6b80.c095373419d05e6f141a.js +0 -0
- {hdsp_jupyter_extension-2.0.27.data → hdsp_jupyter_extension-2.0.28.data}/data/share/jupyter/labextensions/hdsp-agent/static/node_modules_emotion_use-insertion-effect-with-fallbacks_dist_emotion-use-insertion-effect-wi-3ba6b80.c095373419d05e6f141a.js.map +0 -0
- {hdsp_jupyter_extension-2.0.27.data → hdsp_jupyter_extension-2.0.28.data}/data/share/jupyter/labextensions/hdsp-agent/static/node_modules_emotion_use-insertion-effect-with-fallbacks_dist_emotion-use-insertion-effect-wi-3ba6b81.61e75fb98ecff46cf836.js +0 -0
- {hdsp_jupyter_extension-2.0.27.data → hdsp_jupyter_extension-2.0.28.data}/data/share/jupyter/labextensions/hdsp-agent/static/node_modules_emotion_use-insertion-effect-with-fallbacks_dist_emotion-use-insertion-effect-wi-3ba6b81.61e75fb98ecff46cf836.js.map +0 -0
- {hdsp_jupyter_extension-2.0.27.data → hdsp_jupyter_extension-2.0.28.data}/data/share/jupyter/labextensions/hdsp-agent/static/style.js +0 -0
- {hdsp_jupyter_extension-2.0.27.data → hdsp_jupyter_extension-2.0.28.data}/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_babel_runtime_helpers_esm_extends_js-node_modules_emotion_serialize_dist-051195.e2553aab0c3963b83dd7.js +0 -0
- {hdsp_jupyter_extension-2.0.27.data → hdsp_jupyter_extension-2.0.28.data}/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_babel_runtime_helpers_esm_extends_js-node_modules_emotion_serialize_dist-051195.e2553aab0c3963b83dd7.js.map +0 -0
- {hdsp_jupyter_extension-2.0.27.data → hdsp_jupyter_extension-2.0.28.data}/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_emotion_cache_dist_emotion-cache_browser_development_esm_js.24edcc52a1c014a8a5f0.js +0 -0
- {hdsp_jupyter_extension-2.0.27.data → hdsp_jupyter_extension-2.0.28.data}/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_emotion_cache_dist_emotion-cache_browser_development_esm_js.24edcc52a1c014a8a5f0.js.map +0 -0
- {hdsp_jupyter_extension-2.0.27.data → hdsp_jupyter_extension-2.0.28.data}/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_emotion_react_dist_emotion-react_browser_development_esm_js.19ecf6babe00caff6b8a.js +0 -0
- {hdsp_jupyter_extension-2.0.27.data → hdsp_jupyter_extension-2.0.28.data}/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_emotion_react_dist_emotion-react_browser_development_esm_js.19ecf6babe00caff6b8a.js.map +0 -0
- {hdsp_jupyter_extension-2.0.27.data → hdsp_jupyter_extension-2.0.28.data}/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_emotion_styled_dist_emotion-styled_browser_development_esm_js.661fb5836f4978a7c6e1.js +0 -0
- {hdsp_jupyter_extension-2.0.27.data → hdsp_jupyter_extension-2.0.28.data}/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_emotion_styled_dist_emotion-styled_browser_development_esm_js.661fb5836f4978a7c6e1.js.map +0 -0
- {hdsp_jupyter_extension-2.0.27.data → hdsp_jupyter_extension-2.0.28.data}/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_mui_material_index_js.985697e0162d8d088ca2.js +0 -0
- {hdsp_jupyter_extension-2.0.27.data → hdsp_jupyter_extension-2.0.28.data}/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_mui_material_index_js.985697e0162d8d088ca2.js.map +0 -0
- {hdsp_jupyter_extension-2.0.27.data → hdsp_jupyter_extension-2.0.28.data}/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_mui_material_utils_createSvgIcon_js.1f5038488cdfd8b3a85d.js +0 -0
- {hdsp_jupyter_extension-2.0.27.data → hdsp_jupyter_extension-2.0.28.data}/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_mui_material_utils_createSvgIcon_js.1f5038488cdfd8b3a85d.js.map +0 -0
- {hdsp_jupyter_extension-2.0.27.dist-info → hdsp_jupyter_extension-2.0.28.dist-info}/WHEEL +0 -0
- {hdsp_jupyter_extension-2.0.27.dist-info → hdsp_jupyter_extension-2.0.28.dist-info}/licenses/LICENSE +0 -0
|
@@ -19,10 +19,7 @@ from langgraph.checkpoint.memory import InMemorySaver
|
|
|
19
19
|
from pydantic import BaseModel, ConfigDict, Field
|
|
20
20
|
from sse_starlette.sse import EventSourceResponse
|
|
21
21
|
|
|
22
|
-
from agent_server.langchain.agent import
|
|
23
|
-
_get_all_tools,
|
|
24
|
-
create_agent_system,
|
|
25
|
-
)
|
|
22
|
+
from agent_server.langchain.agent import create_agent_system
|
|
26
23
|
from agent_server.langchain.llm_factory import create_llm
|
|
27
24
|
from agent_server.langchain.logging_utils import (
|
|
28
25
|
LOG_RESPONSE_END,
|
|
@@ -33,7 +30,10 @@ from agent_server.langchain.middleware.code_history_middleware import (
|
|
|
33
30
|
)
|
|
34
31
|
|
|
35
32
|
# Note: Subagent middleware is used by agent_factory, not directly by router
|
|
36
|
-
from agent_server.langchain.middleware.subagent_events import
|
|
33
|
+
from agent_server.langchain.middleware.subagent_events import (
|
|
34
|
+
drain_subagent_events,
|
|
35
|
+
drain_summarization_events,
|
|
36
|
+
)
|
|
37
37
|
|
|
38
38
|
logger = logging.getLogger(__name__)
|
|
39
39
|
router = APIRouter(prefix="/langchain", tags=["langchain-agent"])
|
|
@@ -63,11 +63,43 @@ def get_subagent_debug_events():
|
|
|
63
63
|
"""
|
|
64
64
|
Drain subagent events and convert to SSE debug events.
|
|
65
65
|
|
|
66
|
+
If any subagent_complete event is found, appends a "LLM 응답 대기 중"
|
|
67
|
+
event so the UI doesn't show "완료" during the next LLM call.
|
|
68
|
+
|
|
66
69
|
Returns:
|
|
67
70
|
List of SSE event dicts for debug display
|
|
68
71
|
"""
|
|
69
72
|
events = drain_subagent_events()
|
|
70
73
|
sse_events = []
|
|
74
|
+
has_complete = False
|
|
75
|
+
for event in events:
|
|
76
|
+
sse_events.append(
|
|
77
|
+
{
|
|
78
|
+
"event": "debug",
|
|
79
|
+
"data": json.dumps(event.to_status_dict()),
|
|
80
|
+
}
|
|
81
|
+
)
|
|
82
|
+
if event.event_type == "subagent_complete":
|
|
83
|
+
has_complete = True
|
|
84
|
+
if has_complete:
|
|
85
|
+
sse_events.append(
|
|
86
|
+
{
|
|
87
|
+
"event": "debug",
|
|
88
|
+
"data": json.dumps({"status": "LLM 응답 대기 중", "icon": "thinking"}),
|
|
89
|
+
}
|
|
90
|
+
)
|
|
91
|
+
return sse_events
|
|
92
|
+
|
|
93
|
+
|
|
94
|
+
def get_summarization_debug_events():
|
|
95
|
+
"""
|
|
96
|
+
Drain summarization events and convert to SSE debug events.
|
|
97
|
+
|
|
98
|
+
Returns:
|
|
99
|
+
List of SSE event dicts for debug display
|
|
100
|
+
"""
|
|
101
|
+
events = drain_summarization_events()
|
|
102
|
+
sse_events = []
|
|
71
103
|
for event in events:
|
|
72
104
|
sse_events.append(
|
|
73
105
|
{
|
|
@@ -104,15 +136,19 @@ def _get_tool_status_message(
|
|
|
104
136
|
}
|
|
105
137
|
elif tool_name_normalized in ("task", "task_tool"):
|
|
106
138
|
# Show subagent delegation details with expand support
|
|
107
|
-
agent_name = tool_args.get("
|
|
139
|
+
agent_name = tool_args.get("subagent_type") or tool_args.get(
|
|
140
|
+
"agent_name", "unknown"
|
|
141
|
+
)
|
|
108
142
|
description = tool_args.get("description", "")
|
|
109
143
|
short_desc = description[:50] + "..." if len(description) > 50 else description
|
|
110
144
|
return {
|
|
111
|
-
"status": f"{agent_name}
|
|
145
|
+
"status": f"Subagent-{agent_name} 실행: {short_desc}",
|
|
112
146
|
"icon": "agent",
|
|
113
147
|
"expandable": len(description) > 50,
|
|
114
|
-
"full_text": f"{agent_name}
|
|
148
|
+
"full_text": f"Subagent-{agent_name} 실행: {description}",
|
|
115
149
|
}
|
|
150
|
+
elif tool_name_normalized in ("final_summary_tool", "final_summary"):
|
|
151
|
+
return {"status": "작업 마무리중...", "icon": "summary"}
|
|
116
152
|
elif tool_name_normalized in ("list_workspace_tool", "list_workspace"):
|
|
117
153
|
path = tool_args.get("path", ".")
|
|
118
154
|
pattern = tool_args.get("pattern", "*")
|
|
@@ -224,12 +260,12 @@ class LLMConfig(BaseModel):
|
|
|
224
260
|
system_prompt: Optional[str] = Field(
|
|
225
261
|
default=None,
|
|
226
262
|
alias="systemPrompt",
|
|
227
|
-
description="Override system prompt
|
|
263
|
+
description="Override system prompt",
|
|
228
264
|
)
|
|
229
265
|
agent_prompts: Optional[AgentPromptsConfig] = Field(
|
|
230
266
|
default=None,
|
|
231
267
|
alias="agentPrompts",
|
|
232
|
-
description="Per-agent system prompts
|
|
268
|
+
description="Per-agent system prompts",
|
|
233
269
|
)
|
|
234
270
|
resource_context: Optional[Union[Dict[str, Any], str]] = Field(
|
|
235
271
|
default=None,
|
|
@@ -273,10 +309,6 @@ class AgentRequest(BaseModel):
|
|
|
273
309
|
default=None,
|
|
274
310
|
description="Thread ID for conversation persistence (required for HITL)",
|
|
275
311
|
)
|
|
276
|
-
agentMode: Optional[str] = Field(
|
|
277
|
-
default="single",
|
|
278
|
-
description="Agent mode: 'single' (all tools) or 'multi' (Planner + Subagents)",
|
|
279
|
-
)
|
|
280
312
|
|
|
281
313
|
|
|
282
314
|
class ResumeDecision(BaseModel):
|
|
@@ -304,10 +336,6 @@ class ResumeRequest(BaseModel):
|
|
|
304
336
|
workspaceRoot: Optional[str] = Field(
|
|
305
337
|
default=".", description="Workspace root directory"
|
|
306
338
|
)
|
|
307
|
-
agentMode: Optional[str] = Field(
|
|
308
|
-
default="single",
|
|
309
|
-
description="Agent mode: 'single' (all tools) or 'multi' (Planner + Subagents)",
|
|
310
|
-
)
|
|
311
339
|
|
|
312
340
|
|
|
313
341
|
class ExecutionResult(BaseModel):
|
|
@@ -354,20 +382,18 @@ def _get_agent_cache_key(
|
|
|
354
382
|
llm_config: Dict[str, Any],
|
|
355
383
|
workspace_root: str,
|
|
356
384
|
system_prompt_override: Optional[str] = None,
|
|
357
|
-
agent_mode: str = "single",
|
|
358
385
|
agent_prompts: Optional[Dict[str, str]] = None,
|
|
359
386
|
) -> str:
|
|
360
387
|
"""Generate cache key for agent instance.
|
|
361
388
|
|
|
362
389
|
Agent instances are cached based on LLM config, workspace root, system prompt,
|
|
363
|
-
|
|
390
|
+
and agent prompts. Different configurations require different agent instances.
|
|
364
391
|
|
|
365
392
|
Args:
|
|
366
393
|
llm_config: LLM configuration dictionary
|
|
367
394
|
workspace_root: Workspace root directory
|
|
368
395
|
system_prompt_override: Optional custom system prompt
|
|
369
|
-
|
|
370
|
-
agent_prompts: Optional dict of per-agent prompts (for multi-agent mode)
|
|
396
|
+
agent_prompts: Optional dict of per-agent prompts
|
|
371
397
|
|
|
372
398
|
Returns:
|
|
373
399
|
MD5 hash of the configuration as cache key
|
|
@@ -382,15 +408,23 @@ def _get_agent_cache_key(
|
|
|
382
408
|
)
|
|
383
409
|
|
|
384
410
|
cache_data = (
|
|
385
|
-
f"{config_str}|{workspace_root}|{prompt_str}|{
|
|
411
|
+
f"{config_str}|{workspace_root}|{prompt_str}|{agent_prompts_str}"
|
|
386
412
|
)
|
|
387
413
|
cache_key = hashlib.md5(cache_data.encode()).hexdigest()
|
|
388
414
|
|
|
389
415
|
return cache_key
|
|
390
416
|
|
|
391
417
|
|
|
392
|
-
def _normalize_action_request(
|
|
393
|
-
|
|
418
|
+
def _normalize_action_request(
|
|
419
|
+
action: Dict[str, Any],
|
|
420
|
+
state: Optional[Dict[str, Any]] = None,
|
|
421
|
+
) -> Dict[str, Any]:
|
|
422
|
+
"""Normalize HITL action request payload across LangChain versions.
|
|
423
|
+
|
|
424
|
+
Also injects generated_content from state into tool args for display
|
|
425
|
+
(ContentInjectionMiddleware.wrap_tool_call runs at execution time,
|
|
426
|
+
but HITL interrupts before execution — so we inject here for display).
|
|
427
|
+
"""
|
|
394
428
|
logger.info(f"[_normalize_action_request] Called with action: {str(action)[:200]}")
|
|
395
429
|
name = (
|
|
396
430
|
action.get("name")
|
|
@@ -407,39 +441,32 @@ def _normalize_action_request(action: Dict[str, Any]) -> Dict[str, Any]:
|
|
|
407
441
|
or action.get("parameters")
|
|
408
442
|
or {}
|
|
409
443
|
)
|
|
444
|
+
|
|
445
|
+
# Inject generated_content from state into tool args for HITL display
|
|
446
|
+
if state and isinstance(args, dict):
|
|
447
|
+
content = state.get("generated_content")
|
|
448
|
+
content_type = state.get("generated_content_type")
|
|
449
|
+
desc = state.get("content_description")
|
|
450
|
+
if content and content_type:
|
|
451
|
+
if content_type == "python":
|
|
452
|
+
if name == "jupyter_cell_tool" and not args.get("code"):
|
|
453
|
+
args = {**args, "code": content}
|
|
454
|
+
if desc and not args.get("description"):
|
|
455
|
+
args["description"] = desc
|
|
456
|
+
elif name == "write_file_tool" and not args.get("content"):
|
|
457
|
+
args = {**args, "content": content}
|
|
458
|
+
elif content_type == "sql":
|
|
459
|
+
if name == "markdown_tool" and not args.get("content"):
|
|
460
|
+
sql_md = f"```sql\n{content}\n```"
|
|
461
|
+
if desc:
|
|
462
|
+
sql_md = f"{desc}\n\n{sql_md}"
|
|
463
|
+
args = {**args, "content": sql_md}
|
|
464
|
+
|
|
410
465
|
# Try to get description from action first, then from args (for jupyter_cell_tool etc)
|
|
411
466
|
description = action.get("description", "") or (
|
|
412
467
|
args.get("description", "") if isinstance(args, dict) else ""
|
|
413
468
|
)
|
|
414
469
|
|
|
415
|
-
# Auto-inject description for jupyter_cell_tool from python_developer's response
|
|
416
|
-
# Only inject into args.description, keep top-level description as HITL default
|
|
417
|
-
if name == "jupyter_cell_tool":
|
|
418
|
-
logger.info(
|
|
419
|
-
f"[HITL] jupyter_cell_tool detected, current description: '{description[:50] if description else 'None'}'"
|
|
420
|
-
)
|
|
421
|
-
try:
|
|
422
|
-
from agent_server.langchain.middleware.description_injector import (
|
|
423
|
-
clear_pending_description,
|
|
424
|
-
get_pending_description,
|
|
425
|
-
)
|
|
426
|
-
|
|
427
|
-
pending = get_pending_description()
|
|
428
|
-
if pending:
|
|
429
|
-
# Inject into args.description only (for detailed description display)
|
|
430
|
-
# Keep top-level description as HITL approval message
|
|
431
|
-
if isinstance(args, dict):
|
|
432
|
-
args = dict(args)
|
|
433
|
-
args["description"] = pending
|
|
434
|
-
clear_pending_description()
|
|
435
|
-
logger.info(
|
|
436
|
-
f"[HITL] Auto-injected description into args: {pending[:80]}..."
|
|
437
|
-
)
|
|
438
|
-
else:
|
|
439
|
-
logger.info("[HITL] No pending description from python_developer")
|
|
440
|
-
except Exception as e:
|
|
441
|
-
logger.warning(f"Failed to inject description: {e}")
|
|
442
|
-
|
|
443
470
|
return {"name": name, "arguments": args, "description": description}
|
|
444
471
|
|
|
445
472
|
|
|
@@ -695,9 +722,9 @@ async def stream_agent(request: AgentRequest):
|
|
|
695
722
|
thread_id,
|
|
696
723
|
)
|
|
697
724
|
|
|
698
|
-
# Handle
|
|
699
|
-
if "
|
|
700
|
-
logger.info(f"
|
|
725
|
+
# Handle /reset command
|
|
726
|
+
if "/reset" in request.request:
|
|
727
|
+
logger.info(f"/reset command detected for thread: {thread_id}")
|
|
701
728
|
from agent_server.langchain.middleware.code_history_middleware import (
|
|
702
729
|
clear_code_history,
|
|
703
730
|
)
|
|
@@ -763,57 +790,37 @@ async def stream_agent(request: AgentRequest):
|
|
|
763
790
|
|
|
764
791
|
resolved_workspace_root = _resolve_workspace_root(request.workspaceRoot)
|
|
765
792
|
|
|
766
|
-
# Get agent
|
|
767
|
-
agent_mode = getattr(request, "agentMode", "single") or "single"
|
|
768
|
-
logger.info("Agent mode: %s", agent_mode)
|
|
769
|
-
|
|
770
|
-
# Get agent prompts (for multi-agent mode)
|
|
793
|
+
# Get agent prompts for per-agent customization
|
|
771
794
|
agent_prompts = None
|
|
772
|
-
if
|
|
773
|
-
# Multi-agent mode: Use agentPrompts for per-agent customization
|
|
774
|
-
# systemPrompt is for single-agent mode only (DEFAULT_SYSTEM_PROMPT)
|
|
775
|
-
if request.llmConfig and request.llmConfig.agent_prompts:
|
|
776
|
-
agent_prompts = {
|
|
777
|
-
"planner": request.llmConfig.agent_prompts.planner,
|
|
778
|
-
"python_developer": (
|
|
779
|
-
request.llmConfig.agent_prompts.python_developer
|
|
780
|
-
),
|
|
781
|
-
"researcher": request.llmConfig.agent_prompts.researcher,
|
|
782
|
-
"athena_query": request.llmConfig.agent_prompts.athena_query,
|
|
783
|
-
}
|
|
784
|
-
agent_prompts = {k: v for k, v in agent_prompts.items() if v}
|
|
785
|
-
logger.info(
|
|
786
|
-
"Multi-agent mode: Using agentPrompts (%s)",
|
|
787
|
-
list(agent_prompts.keys()),
|
|
788
|
-
)
|
|
789
|
-
# In multi-agent mode, DON'T use systemPrompt as override
|
|
790
|
-
# (systemPrompt = single-agent prompt, not planner prompt)
|
|
791
|
-
# Use agentPrompts.planner instead (handled by agent_factory)
|
|
792
|
-
if system_prompt_override:
|
|
793
|
-
logger.info(
|
|
794
|
-
"Multi-agent mode: Ignoring systemPrompt override (len=%d) - "
|
|
795
|
-
"use agentPrompts.planner instead",
|
|
796
|
-
len(system_prompt_override),
|
|
797
|
-
)
|
|
798
|
-
system_prompt_override = None
|
|
799
|
-
elif request.llmConfig and request.llmConfig.agent_prompts:
|
|
800
|
-
# Single-agent mode: can use custom prompts (not applicable currently)
|
|
795
|
+
if request.llmConfig and request.llmConfig.agent_prompts:
|
|
801
796
|
agent_prompts = {
|
|
802
797
|
"planner": request.llmConfig.agent_prompts.planner,
|
|
803
|
-
"python_developer":
|
|
798
|
+
"python_developer": (
|
|
799
|
+
request.llmConfig.agent_prompts.python_developer
|
|
800
|
+
),
|
|
804
801
|
"researcher": request.llmConfig.agent_prompts.researcher,
|
|
805
802
|
"athena_query": request.llmConfig.agent_prompts.athena_query,
|
|
806
803
|
}
|
|
807
804
|
agent_prompts = {k: v for k, v in agent_prompts.items() if v}
|
|
805
|
+
logger.info(
|
|
806
|
+
"Using agentPrompts (%s)",
|
|
807
|
+
list(agent_prompts.keys()),
|
|
808
|
+
)
|
|
809
|
+
# Don't use systemPrompt as override — use agentPrompts.planner instead
|
|
810
|
+
if system_prompt_override:
|
|
811
|
+
logger.info(
|
|
812
|
+
"Ignoring systemPrompt override (len=%d) - "
|
|
813
|
+
"use agentPrompts.planner instead",
|
|
814
|
+
len(system_prompt_override),
|
|
815
|
+
)
|
|
816
|
+
system_prompt_override = None
|
|
808
817
|
|
|
809
818
|
# Get or create cached agent
|
|
810
|
-
# DEBUG: Log cache key components
|
|
811
819
|
logger.info(
|
|
812
|
-
"
|
|
820
|
+
"Cache key components - provider=%s, workspace=%s, "
|
|
813
821
|
"has_system_prompt=%s, has_agent_prompts=%s",
|
|
814
822
|
config_dict.get("provider"),
|
|
815
823
|
resolved_workspace_root[:50] if resolved_workspace_root else None,
|
|
816
|
-
agent_mode,
|
|
817
824
|
bool(system_prompt_override),
|
|
818
825
|
bool(agent_prompts),
|
|
819
826
|
)
|
|
@@ -822,23 +829,20 @@ async def stream_agent(request: AgentRequest):
|
|
|
822
829
|
llm_config=config_dict,
|
|
823
830
|
workspace_root=resolved_workspace_root,
|
|
824
831
|
system_prompt_override=system_prompt_override,
|
|
825
|
-
agent_mode=agent_mode,
|
|
826
832
|
agent_prompts=agent_prompts,
|
|
827
833
|
)
|
|
828
834
|
|
|
829
835
|
if agent_cache_key in _simple_agent_instances:
|
|
830
836
|
agent = _simple_agent_instances[agent_cache_key]
|
|
831
837
|
logger.info(
|
|
832
|
-
"Using cached agent for key %s (
|
|
838
|
+
"Using cached agent for key %s (total cached: %d)",
|
|
833
839
|
agent_cache_key[:8],
|
|
834
|
-
agent_mode,
|
|
835
840
|
len(_simple_agent_instances),
|
|
836
841
|
)
|
|
837
842
|
else:
|
|
838
843
|
logger.info(
|
|
839
|
-
"Creating new agent for key %s
|
|
844
|
+
"Creating new agent for key %s",
|
|
840
845
|
agent_cache_key[:8],
|
|
841
|
-
agent_mode,
|
|
842
846
|
)
|
|
843
847
|
agent = create_agent_system(
|
|
844
848
|
llm_config=config_dict,
|
|
@@ -846,14 +850,12 @@ async def stream_agent(request: AgentRequest):
|
|
|
846
850
|
enable_hitl=True,
|
|
847
851
|
checkpointer=checkpointer,
|
|
848
852
|
system_prompt_override=system_prompt_override,
|
|
849
|
-
agent_mode=agent_mode,
|
|
850
853
|
agent_prompts=agent_prompts,
|
|
851
854
|
)
|
|
852
855
|
_simple_agent_instances[agent_cache_key] = agent
|
|
853
856
|
logger.info(
|
|
854
|
-
"Agent cached for key %s (
|
|
857
|
+
"Agent cached for key %s (total cached: %d)",
|
|
855
858
|
agent_cache_key[:8],
|
|
856
|
-
agent_mode,
|
|
857
859
|
len(_simple_agent_instances),
|
|
858
860
|
)
|
|
859
861
|
|
|
@@ -900,8 +902,11 @@ async def stream_agent(request: AgentRequest):
|
|
|
900
902
|
previous_todos_context = None
|
|
901
903
|
if should_reset_todos:
|
|
902
904
|
try:
|
|
903
|
-
agent.update_state(config, {"todos": []})
|
|
904
|
-
logger.info(
|
|
905
|
+
agent.update_state(config, {"todos": [], "todo_active": False})
|
|
906
|
+
logger.info(
|
|
907
|
+
"Reset todos and todo_active in agent state for thread %s",
|
|
908
|
+
thread_id,
|
|
909
|
+
)
|
|
905
910
|
# Prepare event to notify frontend (will be yielded after function setup)
|
|
906
911
|
todos_reset_event = {
|
|
907
912
|
"event": "todos",
|
|
@@ -920,7 +925,8 @@ async def stream_agent(request: AgentRequest):
|
|
|
920
925
|
items_summary += "..."
|
|
921
926
|
previous_todos_context = (
|
|
922
927
|
f"[SYSTEM] 이전 todo list가 완료 혹은 취소되었습니다. 완료된 작업: {items_summary}. "
|
|
923
|
-
f"새 작업을 시작합니다. 이전 todo list
|
|
928
|
+
f"새 작업을 시작합니다. 이전 todo list는 초기화되었습니다. "
|
|
929
|
+
f"간단한 작업(1-2단계)이면 write_todos 없이 바로 실행하세요."
|
|
924
930
|
)
|
|
925
931
|
logger.info(
|
|
926
932
|
"Injecting previous todos context: %s",
|
|
@@ -959,6 +965,11 @@ async def stream_agent(request: AgentRequest):
|
|
|
959
965
|
"data": json.dumps({"status": "LLM 응답 대기 중", "icon": "thinking"}),
|
|
960
966
|
}
|
|
961
967
|
|
|
968
|
+
# Track message count for summarization detection
|
|
969
|
+
# SummarizationMiddleware keeps ~3 messages after compression
|
|
970
|
+
previous_message_count = 0
|
|
971
|
+
summarization_detected = False
|
|
972
|
+
|
|
962
973
|
# Main streaming loop
|
|
963
974
|
async for step in _async_stream_wrapper(
|
|
964
975
|
agent, agent_input, config, stream_mode="values"
|
|
@@ -969,6 +980,11 @@ async def stream_agent(request: AgentRequest):
|
|
|
969
980
|
f"Thread {thread_id} cancelled by user, stopping stream"
|
|
970
981
|
)
|
|
971
982
|
clear_cancelled_thread(thread_id)
|
|
983
|
+
# Reset todo_active on cancellation
|
|
984
|
+
try:
|
|
985
|
+
agent.update_state(config, {"todo_active": False})
|
|
986
|
+
except Exception:
|
|
987
|
+
pass
|
|
972
988
|
yield {
|
|
973
989
|
"event": "cancelled",
|
|
974
990
|
"data": json.dumps(
|
|
@@ -1005,6 +1021,30 @@ async def stream_agent(request: AgentRequest):
|
|
|
1005
1021
|
# Process messages (no continue statements to ensure interrupt check always runs)
|
|
1006
1022
|
if isinstance(step, dict) and "messages" in step:
|
|
1007
1023
|
messages = step["messages"]
|
|
1024
|
+
current_message_count = len(messages) if messages else 0
|
|
1025
|
+
|
|
1026
|
+
# Detect summarization by checking for lc_source: "summarization" marker
|
|
1027
|
+
# SummarizationMiddleware injects summary into system prompt with this marker
|
|
1028
|
+
if not summarization_detected and messages:
|
|
1029
|
+
for msg in messages:
|
|
1030
|
+
# Check additional_kwargs for lc_source
|
|
1031
|
+
additional_kwargs = getattr(msg, "additional_kwargs", {}) or {}
|
|
1032
|
+
if additional_kwargs.get("lc_source") == "summarization":
|
|
1033
|
+
summarization_detected = True
|
|
1034
|
+
logger.info(
|
|
1035
|
+
f"[Agent] Summarization detected via lc_source marker"
|
|
1036
|
+
)
|
|
1037
|
+
yield {
|
|
1038
|
+
"event": "debug",
|
|
1039
|
+
"data": json.dumps({
|
|
1040
|
+
"status": "대화가 자동으로 압축되었습니다.",
|
|
1041
|
+
"icon": "check"
|
|
1042
|
+
}),
|
|
1043
|
+
}
|
|
1044
|
+
break
|
|
1045
|
+
|
|
1046
|
+
previous_message_count = current_message_count
|
|
1047
|
+
|
|
1008
1048
|
should_process_message = False
|
|
1009
1049
|
if messages:
|
|
1010
1050
|
last_message = messages[-1]
|
|
@@ -1147,10 +1187,12 @@ async def stream_agent(request: AgentRequest):
|
|
|
1147
1187
|
}
|
|
1148
1188
|
return # Exit the generator
|
|
1149
1189
|
else:
|
|
1150
|
-
logger.
|
|
1151
|
-
"All %d todos completed
|
|
1190
|
+
logger.info(
|
|
1191
|
+
"All %d todos completed (no summary in step) - "
|
|
1192
|
+
"continuing to wait for final_summary_tool",
|
|
1152
1193
|
len(todos),
|
|
1153
1194
|
)
|
|
1195
|
+
# Don't auto-terminate — let agent call final_summary_tool
|
|
1154
1196
|
|
|
1155
1197
|
tool_name = getattr(last_message, "name", "") or ""
|
|
1156
1198
|
logger.info(
|
|
@@ -1338,7 +1380,14 @@ async def stream_agent(request: AgentRequest):
|
|
|
1338
1380
|
has_summary_json = (
|
|
1339
1381
|
'"summary"' in msg_content
|
|
1340
1382
|
and '"next_items"' in msg_content
|
|
1383
|
+
) or (
|
|
1384
|
+
"'summary'" in msg_content
|
|
1385
|
+
and "'next_items'" in msg_content
|
|
1341
1386
|
)
|
|
1387
|
+
# Check if last_message is a ToolMessage from final_summary_tool
|
|
1388
|
+
is_final_summary_tool_msg = (
|
|
1389
|
+
getattr(last_message, "name", "") or ""
|
|
1390
|
+
) in ("final_summary_tool", "final_summary")
|
|
1342
1391
|
# Also check for markdown summary format
|
|
1343
1392
|
has_markdown_summary = any(
|
|
1344
1393
|
kw in msg_content
|
|
@@ -1352,15 +1401,18 @@ async def stream_agent(request: AgentRequest):
|
|
|
1352
1401
|
]
|
|
1353
1402
|
)
|
|
1354
1403
|
has_summary = (
|
|
1355
|
-
has_summary_json
|
|
1404
|
+
has_summary_json
|
|
1405
|
+
or has_markdown_summary
|
|
1406
|
+
or is_final_summary_tool_msg
|
|
1356
1407
|
)
|
|
1357
1408
|
|
|
1358
1409
|
# Only check current AIMessage for summary (not history, to avoid false positives)
|
|
1359
1410
|
if not has_summary:
|
|
1360
|
-
logger.
|
|
1361
|
-
"All todos completed
|
|
1411
|
+
logger.info(
|
|
1412
|
+
"All todos completed (no summary in message) - "
|
|
1413
|
+
"continuing to wait for final_summary_tool"
|
|
1362
1414
|
)
|
|
1363
|
-
# Don't terminate
|
|
1415
|
+
# Don't auto-terminate — let agent call final_summary_tool
|
|
1364
1416
|
else:
|
|
1365
1417
|
logger.info(
|
|
1366
1418
|
"All %d todos completed and summary exists in current message, auto-terminating",
|
|
@@ -1616,6 +1668,10 @@ async def stream_agent(request: AgentRequest):
|
|
|
1616
1668
|
for subagent_event in get_subagent_debug_events():
|
|
1617
1669
|
yield subagent_event
|
|
1618
1670
|
|
|
1671
|
+
# Drain and emit any summarization events (context compression)
|
|
1672
|
+
for summarization_event in get_summarization_debug_events():
|
|
1673
|
+
yield summarization_event
|
|
1674
|
+
|
|
1619
1675
|
# Check for interrupt AFTER processing todos and messages
|
|
1620
1676
|
# This ensures todos/debug events are emitted even in interrupt steps
|
|
1621
1677
|
if isinstance(step, dict) and "__interrupt__" in step:
|
|
@@ -1650,7 +1706,10 @@ async def stream_agent(request: AgentRequest):
|
|
|
1650
1706
|
f"[INTERRUPT] action_requests count: {len(action_requests)}, first: {str(action_requests[0])[:200] if action_requests else 'none'}"
|
|
1651
1707
|
)
|
|
1652
1708
|
normalized_actions = [
|
|
1653
|
-
_normalize_action_request(
|
|
1709
|
+
_normalize_action_request(
|
|
1710
|
+
a, state=step if isinstance(step, dict) else None
|
|
1711
|
+
)
|
|
1712
|
+
for a in action_requests
|
|
1654
1713
|
]
|
|
1655
1714
|
if normalized_actions:
|
|
1656
1715
|
_simple_agent_pending_actions[thread_id] = (
|
|
@@ -1711,7 +1770,27 @@ async def stream_agent(request: AgentRequest):
|
|
|
1711
1770
|
)
|
|
1712
1771
|
|
|
1713
1772
|
llm = create_llm(fallback_config)
|
|
1714
|
-
tools
|
|
1773
|
+
from agent_server.langchain.tools import (
|
|
1774
|
+
jupyter_cell_tool,
|
|
1775
|
+
markdown_tool,
|
|
1776
|
+
ask_user_tool,
|
|
1777
|
+
read_file_tool,
|
|
1778
|
+
write_file_tool,
|
|
1779
|
+
edit_file_tool,
|
|
1780
|
+
multiedit_file_tool,
|
|
1781
|
+
search_notebook_cells_tool,
|
|
1782
|
+
execute_command_tool,
|
|
1783
|
+
check_resource_tool,
|
|
1784
|
+
diagnostics_tool,
|
|
1785
|
+
references_tool,
|
|
1786
|
+
)
|
|
1787
|
+
tools = [
|
|
1788
|
+
jupyter_cell_tool, markdown_tool, ask_user_tool,
|
|
1789
|
+
read_file_tool, write_file_tool, edit_file_tool,
|
|
1790
|
+
multiedit_file_tool, search_notebook_cells_tool,
|
|
1791
|
+
execute_command_tool, check_resource_tool,
|
|
1792
|
+
diagnostics_tool, references_tool,
|
|
1793
|
+
]
|
|
1715
1794
|
# Force tool calling - use tool_config for Gemini, tool_choice for others
|
|
1716
1795
|
provider = config_dict.get("provider", "gemini")
|
|
1717
1796
|
if provider == "gemini":
|
|
@@ -2074,56 +2153,48 @@ async def resume_agent(request: ResumeRequest):
|
|
|
2074
2153
|
|
|
2075
2154
|
checkpointer = _simple_agent_checkpointers.get(request.threadId)
|
|
2076
2155
|
|
|
2077
|
-
# Get agent
|
|
2078
|
-
agent_mode = getattr(request, "agentMode", "single") or "single"
|
|
2079
|
-
logger.info("Resume: Agent mode: %s", agent_mode)
|
|
2080
|
-
|
|
2081
|
-
# Get agent prompts (for multi-agent mode)
|
|
2156
|
+
# Get agent prompts for per-agent customization
|
|
2082
2157
|
agent_prompts = None
|
|
2083
|
-
if
|
|
2084
|
-
|
|
2085
|
-
agent_prompts
|
|
2086
|
-
|
|
2087
|
-
|
|
2088
|
-
|
|
2089
|
-
|
|
2090
|
-
|
|
2091
|
-
|
|
2092
|
-
|
|
2093
|
-
|
|
2094
|
-
|
|
2095
|
-
|
|
2096
|
-
|
|
2097
|
-
|
|
2098
|
-
|
|
2099
|
-
|
|
2100
|
-
|
|
2101
|
-
|
|
2102
|
-
|
|
2103
|
-
|
|
2104
|
-
system_prompt_override = None
|
|
2158
|
+
if request.llmConfig and request.llmConfig.agent_prompts:
|
|
2159
|
+
agent_prompts = {
|
|
2160
|
+
"planner": request.llmConfig.agent_prompts.planner,
|
|
2161
|
+
"python_developer": (
|
|
2162
|
+
request.llmConfig.agent_prompts.python_developer
|
|
2163
|
+
),
|
|
2164
|
+
"researcher": request.llmConfig.agent_prompts.researcher,
|
|
2165
|
+
"athena_query": request.llmConfig.agent_prompts.athena_query,
|
|
2166
|
+
}
|
|
2167
|
+
agent_prompts = {k: v for k, v in agent_prompts.items() if v}
|
|
2168
|
+
logger.info(
|
|
2169
|
+
"Resume: Using agentPrompts (%s)",
|
|
2170
|
+
list(agent_prompts.keys()),
|
|
2171
|
+
)
|
|
2172
|
+
# Don't use systemPrompt as override — use agentPrompts.planner instead
|
|
2173
|
+
if system_prompt_override:
|
|
2174
|
+
logger.info(
|
|
2175
|
+
"Resume: Ignoring systemPrompt (len=%d)",
|
|
2176
|
+
len(system_prompt_override),
|
|
2177
|
+
)
|
|
2178
|
+
system_prompt_override = None
|
|
2105
2179
|
|
|
2106
2180
|
agent_cache_key = _get_agent_cache_key(
|
|
2107
2181
|
llm_config=config_dict,
|
|
2108
2182
|
workspace_root=resolved_workspace_root,
|
|
2109
2183
|
system_prompt_override=system_prompt_override,
|
|
2110
|
-
agent_mode=agent_mode,
|
|
2111
2184
|
agent_prompts=agent_prompts,
|
|
2112
2185
|
)
|
|
2113
2186
|
|
|
2114
2187
|
if agent_cache_key in _simple_agent_instances:
|
|
2115
2188
|
agent = _simple_agent_instances[agent_cache_key]
|
|
2116
2189
|
logger.info(
|
|
2117
|
-
"Resume: Using cached agent for key %s (
|
|
2190
|
+
"Resume: Using cached agent for key %s (total cached: %d)",
|
|
2118
2191
|
agent_cache_key[:8],
|
|
2119
|
-
agent_mode,
|
|
2120
2192
|
len(_simple_agent_instances),
|
|
2121
2193
|
)
|
|
2122
2194
|
else:
|
|
2123
2195
|
logger.info(
|
|
2124
|
-
"Resume: Creating new agent for key %s
|
|
2196
|
+
"Resume: Creating new agent for key %s",
|
|
2125
2197
|
agent_cache_key[:8],
|
|
2126
|
-
agent_mode,
|
|
2127
2198
|
)
|
|
2128
2199
|
agent = create_agent_system(
|
|
2129
2200
|
llm_config=config_dict,
|
|
@@ -2131,14 +2202,12 @@ async def resume_agent(request: ResumeRequest):
|
|
|
2131
2202
|
enable_hitl=True,
|
|
2132
2203
|
checkpointer=checkpointer,
|
|
2133
2204
|
system_prompt_override=system_prompt_override,
|
|
2134
|
-
agent_mode=agent_mode,
|
|
2135
2205
|
agent_prompts=agent_prompts,
|
|
2136
2206
|
)
|
|
2137
2207
|
_simple_agent_instances[agent_cache_key] = agent
|
|
2138
2208
|
logger.info(
|
|
2139
|
-
"Resume: Agent cached for key %s (
|
|
2209
|
+
"Resume: Agent cached for key %s (total cached: %d)",
|
|
2140
2210
|
agent_cache_key[:8],
|
|
2141
|
-
agent_mode,
|
|
2142
2211
|
len(_simple_agent_instances),
|
|
2143
2212
|
)
|
|
2144
2213
|
|
|
@@ -2241,6 +2310,10 @@ async def resume_agent(request: ResumeRequest):
|
|
|
2241
2310
|
|
|
2242
2311
|
step_count = 0
|
|
2243
2312
|
|
|
2313
|
+
# Track message count for summarization detection
|
|
2314
|
+
previous_message_count = 0
|
|
2315
|
+
summarization_detected = False
|
|
2316
|
+
|
|
2244
2317
|
async for step in _async_stream_wrapper(
|
|
2245
2318
|
agent,
|
|
2246
2319
|
Command(resume={"decisions": langgraph_decisions}),
|
|
@@ -2253,6 +2326,11 @@ async def resume_agent(request: ResumeRequest):
|
|
|
2253
2326
|
f"Thread {request.threadId} cancelled by user, stopping resume stream"
|
|
2254
2327
|
)
|
|
2255
2328
|
clear_cancelled_thread(request.threadId)
|
|
2329
|
+
# Reset todo_active on cancellation
|
|
2330
|
+
try:
|
|
2331
|
+
agent.update_state(config, {"todo_active": False})
|
|
2332
|
+
except Exception:
|
|
2333
|
+
pass
|
|
2256
2334
|
yield {
|
|
2257
2335
|
"event": "cancelled",
|
|
2258
2336
|
"data": json.dumps(
|
|
@@ -2288,6 +2366,30 @@ async def resume_agent(request: ResumeRequest):
|
|
|
2288
2366
|
# Process messages (no continue statements to ensure interrupt check always runs)
|
|
2289
2367
|
if isinstance(step, dict) and "messages" in step:
|
|
2290
2368
|
messages = step["messages"]
|
|
2369
|
+
current_message_count = len(messages) if messages else 0
|
|
2370
|
+
|
|
2371
|
+
# Detect summarization by checking for lc_source: "summarization" marker
|
|
2372
|
+
# SummarizationMiddleware injects summary into system prompt with this marker
|
|
2373
|
+
if not summarization_detected and messages:
|
|
2374
|
+
for msg in messages:
|
|
2375
|
+
# Check additional_kwargs for lc_source
|
|
2376
|
+
additional_kwargs = getattr(msg, "additional_kwargs", {}) or {}
|
|
2377
|
+
if additional_kwargs.get("lc_source") == "summarization":
|
|
2378
|
+
summarization_detected = True
|
|
2379
|
+
logger.info(
|
|
2380
|
+
f"[Agent-Resume] Summarization detected via lc_source marker"
|
|
2381
|
+
)
|
|
2382
|
+
yield {
|
|
2383
|
+
"event": "debug",
|
|
2384
|
+
"data": json.dumps({
|
|
2385
|
+
"status": "대화가 자동으로 압축되었습니다.",
|
|
2386
|
+
"icon": "check"
|
|
2387
|
+
}),
|
|
2388
|
+
}
|
|
2389
|
+
break
|
|
2390
|
+
|
|
2391
|
+
previous_message_count = current_message_count
|
|
2392
|
+
|
|
2291
2393
|
should_process_message = False
|
|
2292
2394
|
if messages:
|
|
2293
2395
|
last_message = messages[-1]
|
|
@@ -2457,10 +2559,12 @@ async def resume_agent(request: ResumeRequest):
|
|
|
2457
2559
|
}
|
|
2458
2560
|
return # Exit the generator
|
|
2459
2561
|
else:
|
|
2460
|
-
logger.
|
|
2461
|
-
"Resume: All %d todos completed
|
|
2562
|
+
logger.info(
|
|
2563
|
+
"Resume: All %d todos completed (no summary in step) - "
|
|
2564
|
+
"continuing to wait for final_summary_tool",
|
|
2462
2565
|
len(todos),
|
|
2463
2566
|
)
|
|
2567
|
+
# Don't auto-terminate — let agent call final_summary_tool
|
|
2464
2568
|
|
|
2465
2569
|
tool_name = getattr(last_message, "name", "") or ""
|
|
2466
2570
|
logger.info(
|
|
@@ -2664,7 +2768,14 @@ async def resume_agent(request: ResumeRequest):
|
|
|
2664
2768
|
has_summary_json = (
|
|
2665
2769
|
'"summary"' in msg_content
|
|
2666
2770
|
and '"next_items"' in msg_content
|
|
2771
|
+
) or (
|
|
2772
|
+
"'summary'" in msg_content
|
|
2773
|
+
and "'next_items'" in msg_content
|
|
2667
2774
|
)
|
|
2775
|
+
# Check if last_message is a ToolMessage from final_summary_tool
|
|
2776
|
+
is_final_summary_tool_msg = (
|
|
2777
|
+
getattr(last_message, "name", "") or ""
|
|
2778
|
+
) in ("final_summary_tool", "final_summary")
|
|
2668
2779
|
# Also check for markdown summary format
|
|
2669
2780
|
has_markdown_summary = any(
|
|
2670
2781
|
kw in msg_content
|
|
@@ -2678,15 +2789,18 @@ async def resume_agent(request: ResumeRequest):
|
|
|
2678
2789
|
]
|
|
2679
2790
|
)
|
|
2680
2791
|
has_summary = (
|
|
2681
|
-
has_summary_json
|
|
2792
|
+
has_summary_json
|
|
2793
|
+
or has_markdown_summary
|
|
2794
|
+
or is_final_summary_tool_msg
|
|
2682
2795
|
)
|
|
2683
2796
|
|
|
2684
2797
|
# Only check current AIMessage for summary (not history, to avoid false positives)
|
|
2685
2798
|
if not has_summary:
|
|
2686
|
-
logger.
|
|
2687
|
-
"Resume: All todos completed
|
|
2799
|
+
logger.info(
|
|
2800
|
+
"Resume: All todos completed (no summary in message) - "
|
|
2801
|
+
"continuing to wait for final_summary_tool"
|
|
2688
2802
|
)
|
|
2689
|
-
# Don't terminate
|
|
2803
|
+
# Don't auto-terminate — let agent call final_summary_tool
|
|
2690
2804
|
else:
|
|
2691
2805
|
logger.info(
|
|
2692
2806
|
"Resume: All %d todos completed and summary exists in current message, auto-terminating",
|
|
@@ -2841,6 +2955,10 @@ async def resume_agent(request: ResumeRequest):
|
|
|
2841
2955
|
for subagent_event in get_subagent_debug_events():
|
|
2842
2956
|
yield subagent_event
|
|
2843
2957
|
|
|
2958
|
+
# Drain and emit any summarization events (context compression)
|
|
2959
|
+
for summarization_event in get_summarization_debug_events():
|
|
2960
|
+
yield summarization_event
|
|
2961
|
+
|
|
2844
2962
|
# Check for interrupt AFTER processing todos and messages
|
|
2845
2963
|
# This ensures todos/debug events are emitted even in interrupt steps
|
|
2846
2964
|
if isinstance(step, dict) and "__interrupt__" in step:
|
|
@@ -2864,7 +2982,10 @@ async def resume_agent(request: ResumeRequest):
|
|
|
2864
2982
|
f"[RESUME INTERRUPT] action_requests count: {len(action_requests)}, first: {str(action_requests[0])[:200] if action_requests else 'none'}"
|
|
2865
2983
|
)
|
|
2866
2984
|
normalized_actions = [
|
|
2867
|
-
_normalize_action_request(
|
|
2985
|
+
_normalize_action_request(
|
|
2986
|
+
a, state=step if isinstance(step, dict) else None
|
|
2987
|
+
)
|
|
2988
|
+
for a in action_requests
|
|
2868
2989
|
]
|
|
2869
2990
|
if normalized_actions:
|
|
2870
2991
|
_simple_agent_pending_actions[request.threadId] = (
|