hdsp-jupyter-extension 2.0.8__py3-none-any.whl → 2.0.10__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (63) hide show
  1. agent_server/core/rag_manager.py +12 -3
  2. agent_server/core/retriever.py +2 -1
  3. agent_server/core/vllm_embedding_service.py +8 -5
  4. agent_server/langchain/ARCHITECTURE.md +7 -51
  5. agent_server/langchain/agent.py +31 -20
  6. agent_server/langchain/custom_middleware.py +148 -31
  7. agent_server/langchain/hitl_config.py +0 -8
  8. agent_server/langchain/llm_factory.py +85 -1
  9. agent_server/langchain/logging_utils.py +7 -7
  10. agent_server/langchain/prompts.py +45 -36
  11. agent_server/langchain/tools/__init__.py +1 -10
  12. agent_server/langchain/tools/file_tools.py +9 -61
  13. agent_server/langchain/tools/jupyter_tools.py +0 -1
  14. agent_server/langchain/tools/lsp_tools.py +8 -8
  15. agent_server/langchain/tools/resource_tools.py +12 -12
  16. agent_server/langchain/tools/search_tools.py +3 -158
  17. agent_server/routers/langchain_agent.py +122 -113
  18. {hdsp_jupyter_extension-2.0.8.data → hdsp_jupyter_extension-2.0.10.data}/data/share/jupyter/labextensions/hdsp-agent/build_log.json +1 -1
  19. {hdsp_jupyter_extension-2.0.8.data → hdsp_jupyter_extension-2.0.10.data}/data/share/jupyter/labextensions/hdsp-agent/package.json +2 -2
  20. hdsp_jupyter_extension-2.0.8.data/data/share/jupyter/labextensions/hdsp-agent/static/frontend_styles_index_js.8740a527757068814573.js → hdsp_jupyter_extension-2.0.10.data/data/share/jupyter/labextensions/hdsp-agent/static/frontend_styles_index_js.2d9fb488c82498c45c2d.js +93 -4
  21. hdsp_jupyter_extension-2.0.10.data/data/share/jupyter/labextensions/hdsp-agent/static/frontend_styles_index_js.2d9fb488c82498c45c2d.js.map +1 -0
  22. hdsp_jupyter_extension-2.0.8.data/data/share/jupyter/labextensions/hdsp-agent/static/lib_index_js.e4ff4b5779b5e049f84c.js → hdsp_jupyter_extension-2.0.10.data/data/share/jupyter/labextensions/hdsp-agent/static/lib_index_js.dc6434bee96ab03a0539.js +90 -71
  23. hdsp_jupyter_extension-2.0.10.data/data/share/jupyter/labextensions/hdsp-agent/static/lib_index_js.dc6434bee96ab03a0539.js.map +1 -0
  24. hdsp_jupyter_extension-2.0.8.data/data/share/jupyter/labextensions/hdsp-agent/static/remoteEntry.020cdb0b864cfaa4e41e.js → hdsp_jupyter_extension-2.0.10.data/data/share/jupyter/labextensions/hdsp-agent/static/remoteEntry.4a252df3ade74efee8d6.js +6 -6
  25. hdsp_jupyter_extension-2.0.10.data/data/share/jupyter/labextensions/hdsp-agent/static/remoteEntry.4a252df3ade74efee8d6.js.map +1 -0
  26. {hdsp_jupyter_extension-2.0.8.dist-info → hdsp_jupyter_extension-2.0.10.dist-info}/METADATA +1 -3
  27. {hdsp_jupyter_extension-2.0.8.dist-info → hdsp_jupyter_extension-2.0.10.dist-info}/RECORD +57 -57
  28. jupyter_ext/_version.py +1 -1
  29. jupyter_ext/labextension/build_log.json +1 -1
  30. jupyter_ext/labextension/package.json +2 -2
  31. jupyter_ext/labextension/static/{frontend_styles_index_js.8740a527757068814573.js → frontend_styles_index_js.2d9fb488c82498c45c2d.js} +93 -4
  32. jupyter_ext/labextension/static/frontend_styles_index_js.2d9fb488c82498c45c2d.js.map +1 -0
  33. jupyter_ext/labextension/static/{lib_index_js.e4ff4b5779b5e049f84c.js → lib_index_js.dc6434bee96ab03a0539.js} +90 -71
  34. jupyter_ext/labextension/static/lib_index_js.dc6434bee96ab03a0539.js.map +1 -0
  35. jupyter_ext/labextension/static/{remoteEntry.020cdb0b864cfaa4e41e.js → remoteEntry.4a252df3ade74efee8d6.js} +6 -6
  36. jupyter_ext/labextension/static/remoteEntry.4a252df3ade74efee8d6.js.map +1 -0
  37. hdsp_jupyter_extension-2.0.8.data/data/share/jupyter/labextensions/hdsp-agent/static/frontend_styles_index_js.8740a527757068814573.js.map +0 -1
  38. hdsp_jupyter_extension-2.0.8.data/data/share/jupyter/labextensions/hdsp-agent/static/lib_index_js.e4ff4b5779b5e049f84c.js.map +0 -1
  39. hdsp_jupyter_extension-2.0.8.data/data/share/jupyter/labextensions/hdsp-agent/static/remoteEntry.020cdb0b864cfaa4e41e.js.map +0 -1
  40. jupyter_ext/labextension/static/frontend_styles_index_js.8740a527757068814573.js.map +0 -1
  41. jupyter_ext/labextension/static/lib_index_js.e4ff4b5779b5e049f84c.js.map +0 -1
  42. jupyter_ext/labextension/static/remoteEntry.020cdb0b864cfaa4e41e.js.map +0 -1
  43. {hdsp_jupyter_extension-2.0.8.data → hdsp_jupyter_extension-2.0.10.data}/data/etc/jupyter/jupyter_server_config.d/hdsp_jupyter_extension.json +0 -0
  44. {hdsp_jupyter_extension-2.0.8.data → hdsp_jupyter_extension-2.0.10.data}/data/share/jupyter/labextensions/hdsp-agent/install.json +0 -0
  45. {hdsp_jupyter_extension-2.0.8.data → hdsp_jupyter_extension-2.0.10.data}/data/share/jupyter/labextensions/hdsp-agent/static/node_modules_emotion_use-insertion-effect-with-fallbacks_dist_emotion-use-insertion-effect-wi-3ba6b80.c095373419d05e6f141a.js +0 -0
  46. {hdsp_jupyter_extension-2.0.8.data → hdsp_jupyter_extension-2.0.10.data}/data/share/jupyter/labextensions/hdsp-agent/static/node_modules_emotion_use-insertion-effect-with-fallbacks_dist_emotion-use-insertion-effect-wi-3ba6b80.c095373419d05e6f141a.js.map +0 -0
  47. {hdsp_jupyter_extension-2.0.8.data → hdsp_jupyter_extension-2.0.10.data}/data/share/jupyter/labextensions/hdsp-agent/static/node_modules_emotion_use-insertion-effect-with-fallbacks_dist_emotion-use-insertion-effect-wi-3ba6b81.61e75fb98ecff46cf836.js +0 -0
  48. {hdsp_jupyter_extension-2.0.8.data → hdsp_jupyter_extension-2.0.10.data}/data/share/jupyter/labextensions/hdsp-agent/static/node_modules_emotion_use-insertion-effect-with-fallbacks_dist_emotion-use-insertion-effect-wi-3ba6b81.61e75fb98ecff46cf836.js.map +0 -0
  49. {hdsp_jupyter_extension-2.0.8.data → hdsp_jupyter_extension-2.0.10.data}/data/share/jupyter/labextensions/hdsp-agent/static/style.js +0 -0
  50. {hdsp_jupyter_extension-2.0.8.data → hdsp_jupyter_extension-2.0.10.data}/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_babel_runtime_helpers_esm_extends_js-node_modules_emotion_serialize_dist-051195.e2553aab0c3963b83dd7.js +0 -0
  51. {hdsp_jupyter_extension-2.0.8.data → hdsp_jupyter_extension-2.0.10.data}/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_babel_runtime_helpers_esm_extends_js-node_modules_emotion_serialize_dist-051195.e2553aab0c3963b83dd7.js.map +0 -0
  52. {hdsp_jupyter_extension-2.0.8.data → hdsp_jupyter_extension-2.0.10.data}/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_emotion_cache_dist_emotion-cache_browser_development_esm_js.24edcc52a1c014a8a5f0.js +0 -0
  53. {hdsp_jupyter_extension-2.0.8.data → hdsp_jupyter_extension-2.0.10.data}/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_emotion_cache_dist_emotion-cache_browser_development_esm_js.24edcc52a1c014a8a5f0.js.map +0 -0
  54. {hdsp_jupyter_extension-2.0.8.data → hdsp_jupyter_extension-2.0.10.data}/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_emotion_react_dist_emotion-react_browser_development_esm_js.19ecf6babe00caff6b8a.js +0 -0
  55. {hdsp_jupyter_extension-2.0.8.data → hdsp_jupyter_extension-2.0.10.data}/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_emotion_react_dist_emotion-react_browser_development_esm_js.19ecf6babe00caff6b8a.js.map +0 -0
  56. {hdsp_jupyter_extension-2.0.8.data → hdsp_jupyter_extension-2.0.10.data}/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_emotion_styled_dist_emotion-styled_browser_development_esm_js.661fb5836f4978a7c6e1.js +0 -0
  57. {hdsp_jupyter_extension-2.0.8.data → hdsp_jupyter_extension-2.0.10.data}/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_emotion_styled_dist_emotion-styled_browser_development_esm_js.661fb5836f4978a7c6e1.js.map +0 -0
  58. {hdsp_jupyter_extension-2.0.8.data → hdsp_jupyter_extension-2.0.10.data}/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_mui_material_index_js.985697e0162d8d088ca2.js +0 -0
  59. {hdsp_jupyter_extension-2.0.8.data → hdsp_jupyter_extension-2.0.10.data}/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_mui_material_index_js.985697e0162d8d088ca2.js.map +0 -0
  60. {hdsp_jupyter_extension-2.0.8.data → hdsp_jupyter_extension-2.0.10.data}/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_mui_material_utils_createSvgIcon_js.1f5038488cdfd8b3a85d.js +0 -0
  61. {hdsp_jupyter_extension-2.0.8.data → hdsp_jupyter_extension-2.0.10.data}/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_mui_material_utils_createSvgIcon_js.1f5038488cdfd8b3a85d.js.map +0 -0
  62. {hdsp_jupyter_extension-2.0.8.dist-info → hdsp_jupyter_extension-2.0.10.dist-info}/WHEEL +0 -0
  63. {hdsp_jupyter_extension-2.0.8.dist-info → hdsp_jupyter_extension-2.0.10.dist-info}/licenses/LICENSE +0 -0
@@ -1,19 +1,16 @@
1
1
  """
2
2
  Search Tools for LangChain Agent
3
3
 
4
- Provides tools for searching code in workspace and notebooks.
5
- These tools return pending_execution status and are executed on the client (Jupyter) side
6
- using subprocess (find/grep/ripgrep).
4
+ Provides tools for searching notebook cells.
5
+ For file searching, use execute_command_tool with find/grep commands.
7
6
 
8
7
  Key features:
9
8
  - Returns command info for client-side execution via subprocess
10
- - Supports ripgrep (rg) if available, falls back to grep
11
9
  - Executes immediately without user approval
12
10
  - Shows the command being executed in status messages
13
11
  """
14
12
 
15
13
  import logging
16
- import shutil
17
14
  from typing import Any, Dict, List, Optional
18
15
 
19
16
  from langchain_core.tools import tool
@@ -22,23 +19,6 @@ from pydantic import BaseModel, Field
22
19
  logger = logging.getLogger(__name__)
23
20
 
24
21
 
25
- class SearchWorkspaceInput(BaseModel):
26
- """Input schema for search_workspace tool"""
27
-
28
- pattern: str = Field(description="Search pattern (regex or text)")
29
- file_types: List[str] = Field(
30
- default=["*.py", "*.ipynb"],
31
- description="File patterns to search (e.g., ['*.py', '*.ipynb'])",
32
- )
33
- path: str = Field(default=".", description="Directory to search in")
34
- max_results: int = Field(default=50, description="Maximum number of results")
35
- case_sensitive: bool = Field(default=False, description="Case-sensitive search")
36
- execution_result: Optional[Dict[str, Any]] = Field(
37
- default=None,
38
- description="Execution result payload from the client",
39
- )
40
-
41
-
42
22
  class SearchNotebookCellsInput(BaseModel):
43
23
  """Input schema for search_notebook_cells tool"""
44
24
 
@@ -58,72 +38,6 @@ class SearchNotebookCellsInput(BaseModel):
58
38
  )
59
39
 
60
40
 
61
- def _is_ripgrep_available() -> bool:
62
- """Check if ripgrep (rg) is installed and available."""
63
- return shutil.which("rg") is not None
64
-
65
-
66
- def _build_grep_command(
67
- pattern: str,
68
- file_types: List[str],
69
- path: str,
70
- case_sensitive: bool,
71
- max_results: int,
72
- ) -> tuple[str, str]:
73
- """
74
- Build a grep/ripgrep command for searching files.
75
-
76
- Returns:
77
- Tuple of (command_string, tool_name) where tool_name is 'rg' or 'grep'
78
- """
79
- # Check ripgrep availability (this check will also be done on client)
80
- use_ripgrep = _is_ripgrep_available()
81
-
82
- if use_ripgrep:
83
- # Build ripgrep command
84
- cmd_parts = ["rg", "--line-number", "--with-filename"]
85
-
86
- if not case_sensitive:
87
- cmd_parts.append("--ignore-case")
88
-
89
- # Add file type filters using glob patterns
90
- for ft in file_types:
91
- cmd_parts.extend(["--glob", ft])
92
-
93
- # Limit results
94
- cmd_parts.extend(["--max-count", str(max_results)])
95
-
96
- # Escape pattern for shell
97
- escaped_pattern = pattern.replace("'", "'\\''")
98
- cmd_parts.append(f"'{escaped_pattern}'")
99
- cmd_parts.append(path)
100
-
101
- return " ".join(cmd_parts), "rg"
102
- else:
103
- # Build find + grep command for cross-platform compatibility
104
- find_parts = ["find", path, "-type", "f", "("]
105
-
106
- for i, ft in enumerate(file_types):
107
- if i > 0:
108
- find_parts.append("-o")
109
- find_parts.extend(["-name", f"'{ft}'"])
110
-
111
- find_parts.append(")")
112
-
113
- # Add grep with proper flags
114
- grep_flags = "-n" # Line numbers
115
- if not case_sensitive:
116
- grep_flags += "i"
117
-
118
- # Escape pattern for shell
119
- escaped_pattern = pattern.replace("'", "'\\''")
120
-
121
- # Combine with xargs for efficiency
122
- cmd = f"{' '.join(find_parts)} 2>/dev/null | xargs grep -{grep_flags} '{escaped_pattern}' 2>/dev/null | head -n {max_results}"
123
-
124
- return cmd, "grep"
125
-
126
-
127
41
  def _build_notebook_search_command(
128
42
  pattern: str,
129
43
  notebook_path: Optional[str],
@@ -139,74 +53,6 @@ def _build_notebook_search_command(
139
53
  )
140
54
 
141
55
 
142
- @tool(args_schema=SearchWorkspaceInput)
143
- def search_workspace_tool(
144
- pattern: str,
145
- file_types: List[str] = None,
146
- path: str = ".",
147
- max_results: int = 50,
148
- case_sensitive: bool = False,
149
- execution_result: Optional[Dict[str, Any]] = None,
150
- workspace_root: str = ".",
151
- ) -> Dict[str, Any]:
152
- """
153
- Search for a pattern across files in the workspace.
154
-
155
- This tool is executed on the client side using subprocess (grep/ripgrep).
156
- Searches both regular files and Jupyter notebooks.
157
-
158
- Args:
159
- pattern: Search pattern (regex or text)
160
- file_types: File patterns to search (default: ['*.py', '*.ipynb'])
161
- path: Directory to search in (relative to workspace)
162
- max_results: Maximum number of results to return
163
- case_sensitive: Whether search is case-sensitive
164
-
165
- Returns:
166
- Dict with search results or pending_execution status
167
- """
168
- if file_types is None:
169
- file_types = ["*.py", "*.ipynb"]
170
-
171
- # Build the search command
172
- command, tool_used = _build_grep_command(
173
- pattern=pattern,
174
- file_types=file_types,
175
- path=path,
176
- case_sensitive=case_sensitive,
177
- max_results=max_results,
178
- )
179
-
180
- response: Dict[str, Any] = {
181
- "tool": "search_workspace_tool",
182
- "parameters": {
183
- "pattern": pattern,
184
- "file_types": file_types,
185
- "path": path,
186
- "max_results": max_results,
187
- "case_sensitive": case_sensitive,
188
- },
189
- "command": command,
190
- "tool_used": tool_used,
191
- "status": "pending_execution",
192
- "message": "Search queued for execution by client",
193
- }
194
-
195
- if execution_result is not None:
196
- response["execution_result"] = execution_result
197
- response["status"] = "complete"
198
- response["message"] = "Search executed with client-reported results"
199
- # Parse the execution result to extract search results
200
- if isinstance(execution_result, dict):
201
- response["success"] = execution_result.get("success", False)
202
- response["results"] = execution_result.get("results", [])
203
- response["total_results"] = execution_result.get("total_results", 0)
204
- if "error" in execution_result:
205
- response["error"] = execution_result["error"]
206
-
207
- return response
208
-
209
-
210
56
  @tool(args_schema=SearchNotebookCellsInput)
211
57
  def search_notebook_cells_tool(
212
58
  pattern: str,
@@ -281,11 +127,10 @@ def create_search_tools(workspace_root: str = ".") -> List:
281
127
  Note: workspace_root is not used since tools return pending_execution
282
128
  and actual execution happens on the client side.
283
129
  """
284
- return [search_workspace_tool, search_notebook_cells_tool]
130
+ return [search_notebook_cells_tool]
285
131
 
286
132
 
287
133
  # Export all tools
288
134
  SEARCH_TOOLS = [
289
- search_workspace_tool,
290
135
  search_notebook_cells_tool,
291
136
  ]
@@ -702,6 +702,27 @@ async def stream_agent(request: AgentRequest):
702
702
  "event": "todos",
703
703
  "data": json.dumps({"todos": todos}),
704
704
  }
705
+ # Check if all todos are completed - auto terminate
706
+ all_completed = all(
707
+ t.get("status") == "completed" for t in todos
708
+ )
709
+ if all_completed and len(todos) > 0:
710
+ logger.info(
711
+ "All %d todos completed, auto-terminating agent",
712
+ len(todos),
713
+ )
714
+ yield {
715
+ "event": "debug_clear",
716
+ "data": json.dumps({}),
717
+ }
718
+ yield {
719
+ "event": "done",
720
+ "data": json.dumps(
721
+ {"reason": "all_todos_completed"}
722
+ ),
723
+ }
724
+ return # Exit the generator
725
+
705
726
  tool_name = getattr(last_message, "name", "") or ""
706
727
  logger.info(
707
728
  "SimpleAgent ToolMessage name attribute: %s", tool_name
@@ -727,7 +748,7 @@ async def stream_agent(request: AgentRequest):
727
748
  final_answer = tool_result.get(
728
749
  "answer"
729
750
  ) or tool_result.get("parameters", {}).get("answer")
730
-
751
+
731
752
  # Check for next_items in answer field (LLM may put JSON here)
732
753
  if final_answer:
733
754
  try:
@@ -735,25 +756,41 @@ async def stream_agent(request: AgentRequest):
735
756
  if "next_items" in answer_json:
736
757
  next_items_block = f"\n\n```json\n{json.dumps(answer_json, ensure_ascii=False, indent=2)}\n```"
737
758
  # Get summary for the main text
738
- summary_text = tool_result.get(
739
- "summary"
740
- ) or tool_result.get("parameters", {}).get("summary") or ""
741
- final_answer = summary_text + next_items_block
742
- logger.info("Extracted next_items from answer field")
759
+ summary_text = (
760
+ tool_result.get("summary")
761
+ or tool_result.get(
762
+ "parameters", {}
763
+ ).get("summary")
764
+ or ""
765
+ )
766
+ final_answer = (
767
+ summary_text + next_items_block
768
+ )
769
+ logger.info(
770
+ "Extracted next_items from answer field"
771
+ )
743
772
  except (json.JSONDecodeError, TypeError):
744
773
  pass
745
-
774
+
746
775
  # Check for next_items in summary field (Gemini puts JSON here)
747
776
  summary = tool_result.get(
748
777
  "summary"
749
- ) or tool_result.get("parameters", {}).get("summary")
750
- if summary and "next_items" not in (final_answer or ""):
778
+ ) or tool_result.get("parameters", {}).get(
779
+ "summary"
780
+ )
781
+ if summary and "next_items" not in (
782
+ final_answer or ""
783
+ ):
751
784
  try:
752
785
  summary_json = json.loads(summary)
753
786
  if "next_items" in summary_json:
754
787
  next_items_block = f"\n\n```json\n{json.dumps(summary_json, ensure_ascii=False, indent=2)}\n```"
755
- final_answer = (final_answer or "") + next_items_block
756
- logger.info("Extracted next_items from summary field")
788
+ final_answer = (
789
+ final_answer or ""
790
+ ) + next_items_block
791
+ logger.info(
792
+ "Extracted next_items from summary field"
793
+ )
757
794
  except (json.JSONDecodeError, TypeError):
758
795
  pass
759
796
  if final_answer:
@@ -878,13 +915,6 @@ async def stream_agent(request: AgentRequest):
878
915
 
879
916
  # Create detailed status message for search tools
880
917
  if tool_name in (
881
- "search_workspace_tool",
882
- "search_workspace",
883
- ):
884
- pattern = tool_args.get("pattern", "")
885
- path = tool_args.get("path", ".")
886
- status_msg = f"🔍 검색 실행: grep/rg '{pattern}' in {path}"
887
- elif tool_name in (
888
918
  "search_notebook_cells_tool",
889
919
  "search_notebook_cells",
890
920
  ):
@@ -950,34 +980,6 @@ async def stream_agent(request: AgentRequest):
950
980
  }
951
981
  ),
952
982
  }
953
- elif tool_name in (
954
- "search_workspace_tool",
955
- "search_workspace",
956
- ):
957
- # Search workspace - emit tool_call for client-side execution
958
- produced_output = True
959
- yield {
960
- "event": "tool_call",
961
- "data": json.dumps(
962
- {
963
- "tool": "search_workspace",
964
- "pattern": tool_args.get(
965
- "pattern", ""
966
- ),
967
- "file_types": tool_args.get(
968
- "file_types",
969
- ["*.py", "*.ipynb"],
970
- ),
971
- "path": tool_args.get("path", "."),
972
- "max_results": tool_args.get(
973
- "max_results", 50
974
- ),
975
- "case_sensitive": tool_args.get(
976
- "case_sensitive", False
977
- ),
978
- }
979
- ),
980
- }
981
983
  elif tool_name in (
982
984
  "search_notebook_cells_tool",
983
985
  "search_notebook_cells",
@@ -1161,7 +1163,7 @@ async def stream_agent(request: AgentRequest):
1161
1163
  content=(
1162
1164
  "You MUST respond with a valid tool call. "
1163
1165
  "Available tools: jupyter_cell_tool (for Python code), markdown_tool (for text), "
1164
- "list_files_tool (to list files), read_file_tool (to read files). "
1166
+ "execute_command_tool (to search files with find/grep), read_file_tool (to read files). "
1165
1167
  "Choose the most appropriate tool and provide valid JSON arguments."
1166
1168
  )
1167
1169
  ),
@@ -1280,11 +1282,7 @@ async def stream_agent(request: AgentRequest):
1280
1282
  }
1281
1283
  ),
1282
1284
  }
1283
- elif tool_name in (
1284
- "read_file_tool",
1285
- "list_files_tool",
1286
- "search_workspace_tool",
1287
- ):
1285
+ elif tool_name == "read_file_tool":
1288
1286
  # For file operations, generate code with the LLM
1289
1287
  logger.info(
1290
1288
  "Fallback: Generating code for %s via LLM",
@@ -1447,11 +1445,13 @@ async def resume_agent(request: ResumeRequest):
1447
1445
  )
1448
1446
  yield {
1449
1447
  "event": "error",
1450
- "data": json.dumps({
1451
- "error": "Session expired or not found",
1452
- "code": "CHECKPOINT_NOT_FOUND",
1453
- "message": "이전 세션을 찾을 수 없습니다. 서버가 재시작되었거나 세션이 만료되었습니다. 새로운 대화를 시작해주세요.",
1454
- }),
1448
+ "data": json.dumps(
1449
+ {
1450
+ "error": "Session expired or not found",
1451
+ "code": "CHECKPOINT_NOT_FOUND",
1452
+ "message": "이전 세션을 찾을 수 없습니다. 서버가 재시작되었거나 세션이 만료되었습니다. 새로운 대화를 시작해주세요.",
1453
+ }
1454
+ ),
1455
1455
  }
1456
1456
  return
1457
1457
 
@@ -1471,7 +1471,9 @@ async def resume_agent(request: ResumeRequest):
1471
1471
  len(_simple_agent_instances),
1472
1472
  )
1473
1473
  else:
1474
- logger.info("Resume: Creating new agent for key %s", agent_cache_key[:8])
1474
+ logger.info(
1475
+ "Resume: Creating new agent for key %s", agent_cache_key[:8]
1476
+ )
1475
1477
  agent = create_simple_chat_agent(
1476
1478
  llm_config=config_dict,
1477
1479
  workspace_root=resolved_workspace_root,
@@ -1654,6 +1656,27 @@ async def resume_agent(request: ResumeRequest):
1654
1656
  "event": "todos",
1655
1657
  "data": json.dumps({"todos": todos}),
1656
1658
  }
1659
+ # Check if all todos are completed - auto terminate
1660
+ all_completed = all(
1661
+ t.get("status") == "completed" for t in todos
1662
+ )
1663
+ if all_completed and len(todos) > 0:
1664
+ logger.info(
1665
+ "Resume: All %d todos completed, auto-terminating agent",
1666
+ len(todos),
1667
+ )
1668
+ yield {
1669
+ "event": "debug_clear",
1670
+ "data": json.dumps({}),
1671
+ }
1672
+ yield {
1673
+ "event": "done",
1674
+ "data": json.dumps(
1675
+ {"reason": "all_todos_completed"}
1676
+ ),
1677
+ }
1678
+ return # Exit the generator
1679
+
1657
1680
  tool_name = getattr(last_message, "name", "") or ""
1658
1681
  logger.info(
1659
1682
  "Resume ToolMessage name attribute: %s", tool_name
@@ -1677,7 +1700,7 @@ async def resume_agent(request: ResumeRequest):
1677
1700
  final_answer = tool_result.get(
1678
1701
  "answer"
1679
1702
  ) or tool_result.get("parameters", {}).get("answer")
1680
-
1703
+
1681
1704
  # Check for next_items in answer field (LLM may put JSON here)
1682
1705
  if final_answer:
1683
1706
  try:
@@ -1685,25 +1708,41 @@ async def resume_agent(request: ResumeRequest):
1685
1708
  if "next_items" in answer_json:
1686
1709
  next_items_block = f"\n\n```json\n{json.dumps(answer_json, ensure_ascii=False, indent=2)}\n```"
1687
1710
  # Get summary for the main text
1688
- summary_text = tool_result.get(
1689
- "summary"
1690
- ) or tool_result.get("parameters", {}).get("summary") or ""
1691
- final_answer = summary_text + next_items_block
1692
- logger.info("Resume: Extracted next_items from answer field")
1711
+ summary_text = (
1712
+ tool_result.get("summary")
1713
+ or tool_result.get(
1714
+ "parameters", {}
1715
+ ).get("summary")
1716
+ or ""
1717
+ )
1718
+ final_answer = (
1719
+ summary_text + next_items_block
1720
+ )
1721
+ logger.info(
1722
+ "Resume: Extracted next_items from answer field"
1723
+ )
1693
1724
  except (json.JSONDecodeError, TypeError):
1694
1725
  pass
1695
-
1726
+
1696
1727
  # Check for next_items in summary field (Gemini puts JSON here)
1697
1728
  summary = tool_result.get(
1698
1729
  "summary"
1699
- ) or tool_result.get("parameters", {}).get("summary")
1700
- if summary and "next_items" not in (final_answer or ""):
1730
+ ) or tool_result.get("parameters", {}).get(
1731
+ "summary"
1732
+ )
1733
+ if summary and "next_items" not in (
1734
+ final_answer or ""
1735
+ ):
1701
1736
  try:
1702
1737
  summary_json = json.loads(summary)
1703
1738
  if "next_items" in summary_json:
1704
1739
  next_items_block = f"\n\n```json\n{json.dumps(summary_json, ensure_ascii=False, indent=2)}\n```"
1705
- final_answer = (final_answer or "") + next_items_block
1706
- logger.info("Resume: Extracted next_items from summary field")
1740
+ final_answer = (
1741
+ final_answer or ""
1742
+ ) + next_items_block
1743
+ logger.info(
1744
+ "Resume: Extracted next_items from summary field"
1745
+ )
1707
1746
  except (json.JSONDecodeError, TypeError):
1708
1747
  pass
1709
1748
  if final_answer:
@@ -1850,13 +1889,6 @@ async def resume_agent(request: ResumeRequest):
1850
1889
 
1851
1890
  # Create detailed status message for search tools
1852
1891
  if tool_name in (
1853
- "search_workspace_tool",
1854
- "search_workspace",
1855
- ):
1856
- pattern = tool_args.get("pattern", "")
1857
- path = tool_args.get("path", ".")
1858
- status_msg = f"🔍 검색 실행: grep/rg '{pattern}' in {path}"
1859
- elif tool_name in (
1860
1892
  "search_notebook_cells_tool",
1861
1893
  "search_notebook_cells",
1862
1894
  ):
@@ -1914,33 +1946,6 @@ async def resume_agent(request: ResumeRequest):
1914
1946
  }
1915
1947
  ),
1916
1948
  }
1917
- elif tool_name in (
1918
- "search_workspace_tool",
1919
- "search_workspace",
1920
- ):
1921
- # Search workspace - emit tool_call for client-side execution
1922
- yield {
1923
- "event": "tool_call",
1924
- "data": json.dumps(
1925
- {
1926
- "tool": "search_workspace",
1927
- "pattern": tool_args.get(
1928
- "pattern", ""
1929
- ),
1930
- "file_types": tool_args.get(
1931
- "file_types",
1932
- ["*.py", "*.ipynb"],
1933
- ),
1934
- "path": tool_args.get("path", "."),
1935
- "max_results": tool_args.get(
1936
- "max_results", 50
1937
- ),
1938
- "case_sensitive": tool_args.get(
1939
- "case_sensitive", False
1940
- ),
1941
- }
1942
- ),
1943
- }
1944
1949
  elif tool_name in (
1945
1950
  "search_notebook_cells_tool",
1946
1951
  "search_notebook_cells",
@@ -2057,20 +2062,24 @@ async def resume_agent(request: ResumeRequest):
2057
2062
  )
2058
2063
  yield {
2059
2064
  "event": "error",
2060
- "data": json.dumps({
2061
- "error": "Session state lost",
2062
- "code": "CONTENTS_NOT_SPECIFIED",
2063
- "error_type": type(e).__name__,
2064
- "message": "세션 상태가 손실되었습니다. 서버가 재시작되었거나 세션이 만료되었습니다. 새로운 대화를 시작해주세요.",
2065
- }),
2065
+ "data": json.dumps(
2066
+ {
2067
+ "error": "Session state lost",
2068
+ "code": "CONTENTS_NOT_SPECIFIED",
2069
+ "error_type": type(e).__name__,
2070
+ "message": "세션 상태가 손실되었습니다. 서버가 재시작되었거나 세션이 만료되었습니다. 새로운 대화를 시작해주세요.",
2071
+ }
2072
+ ),
2066
2073
  }
2067
2074
  else:
2068
2075
  yield {
2069
2076
  "event": "error",
2070
- "data": json.dumps({
2071
- "error": error_msg,
2072
- "error_type": type(e).__name__,
2073
- }),
2077
+ "data": json.dumps(
2078
+ {
2079
+ "error": error_msg,
2080
+ "error_type": type(e).__name__,
2081
+ }
2082
+ ),
2074
2083
  }
2075
2084
 
2076
2085
  return EventSourceResponse(event_generator())
@@ -722,7 +722,7 @@
722
722
  "@mui/material": {},
723
723
  "react-markdown": {},
724
724
  "hdsp-agent": {
725
- "version": "2.0.8",
725
+ "version": "2.0.10",
726
726
  "singleton": true,
727
727
  "import": "/Users/a421721/Desktop/hdsp/hdsp_agent/extensions/jupyter/lib/index.js"
728
728
  }
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "hdsp-agent",
3
- "version": "2.0.8",
3
+ "version": "2.0.10",
4
4
  "description": "HDSP Agent JupyterLab Extension - Thin client for Agent Server",
5
5
  "keywords": [
6
6
  "jupyter",
@@ -127,7 +127,7 @@
127
127
  }
128
128
  },
129
129
  "_build": {
130
- "load": "static/remoteEntry.020cdb0b864cfaa4e41e.js",
130
+ "load": "static/remoteEntry.4a252df3ade74efee8d6.js",
131
131
  "extension": "./extension",
132
132
  "style": "./style"
133
133
  }