hdsp-jupyter-extension 2.0.6__py3-none-any.whl → 2.0.8__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (88) hide show
  1. agent_server/core/embedding_service.py +67 -46
  2. agent_server/core/rag_manager.py +31 -17
  3. agent_server/core/reflection_engine.py +0 -1
  4. agent_server/core/retriever.py +13 -8
  5. agent_server/core/vllm_embedding_service.py +243 -0
  6. agent_server/knowledge/watchdog_service.py +1 -1
  7. agent_server/langchain/ARCHITECTURE.md +1193 -0
  8. agent_server/langchain/agent.py +82 -588
  9. agent_server/langchain/custom_middleware.py +663 -0
  10. agent_server/langchain/executors/__init__.py +2 -7
  11. agent_server/langchain/executors/notebook_searcher.py +46 -38
  12. agent_server/langchain/hitl_config.py +71 -0
  13. agent_server/langchain/llm_factory.py +166 -0
  14. agent_server/langchain/logging_utils.py +223 -0
  15. agent_server/langchain/prompts.py +150 -0
  16. agent_server/langchain/state.py +16 -6
  17. agent_server/langchain/tools/__init__.py +19 -0
  18. agent_server/langchain/tools/file_tools.py +354 -114
  19. agent_server/langchain/tools/file_utils.py +334 -0
  20. agent_server/langchain/tools/jupyter_tools.py +18 -18
  21. agent_server/langchain/tools/lsp_tools.py +264 -0
  22. agent_server/langchain/tools/resource_tools.py +161 -0
  23. agent_server/langchain/tools/search_tools.py +198 -216
  24. agent_server/langchain/tools/shell_tools.py +54 -0
  25. agent_server/main.py +11 -1
  26. agent_server/routers/health.py +1 -1
  27. agent_server/routers/langchain_agent.py +1040 -289
  28. agent_server/routers/rag.py +8 -3
  29. hdsp_agent_core/models/rag.py +15 -1
  30. hdsp_agent_core/prompts/auto_agent_prompts.py +3 -3
  31. hdsp_agent_core/services/rag_service.py +6 -1
  32. {hdsp_jupyter_extension-2.0.6.data → hdsp_jupyter_extension-2.0.8.data}/data/share/jupyter/labextensions/hdsp-agent/build_log.json +1 -1
  33. {hdsp_jupyter_extension-2.0.6.data → hdsp_jupyter_extension-2.0.8.data}/data/share/jupyter/labextensions/hdsp-agent/package.json +3 -2
  34. hdsp_jupyter_extension-2.0.6.data/data/share/jupyter/labextensions/hdsp-agent/static/frontend_styles_index_js.02d346171474a0fb2dc1.js → hdsp_jupyter_extension-2.0.8.data/data/share/jupyter/labextensions/hdsp-agent/static/frontend_styles_index_js.8740a527757068814573.js +470 -7
  35. hdsp_jupyter_extension-2.0.8.data/data/share/jupyter/labextensions/hdsp-agent/static/frontend_styles_index_js.8740a527757068814573.js.map +1 -0
  36. hdsp_jupyter_extension-2.0.6.data/data/share/jupyter/labextensions/hdsp-agent/static/lib_index_js.a223ea20056954479ae9.js → hdsp_jupyter_extension-2.0.8.data/data/share/jupyter/labextensions/hdsp-agent/static/lib_index_js.e4ff4b5779b5e049f84c.js +3196 -441
  37. hdsp_jupyter_extension-2.0.8.data/data/share/jupyter/labextensions/hdsp-agent/static/lib_index_js.e4ff4b5779b5e049f84c.js.map +1 -0
  38. hdsp_jupyter_extension-2.0.6.data/data/share/jupyter/labextensions/hdsp-agent/static/remoteEntry.addf2fa038fa60304aa2.js → hdsp_jupyter_extension-2.0.8.data/data/share/jupyter/labextensions/hdsp-agent/static/remoteEntry.020cdb0b864cfaa4e41e.js +9 -7
  39. hdsp_jupyter_extension-2.0.8.data/data/share/jupyter/labextensions/hdsp-agent/static/remoteEntry.020cdb0b864cfaa4e41e.js.map +1 -0
  40. {hdsp_jupyter_extension-2.0.6.dist-info → hdsp_jupyter_extension-2.0.8.dist-info}/METADATA +2 -1
  41. {hdsp_jupyter_extension-2.0.6.dist-info → hdsp_jupyter_extension-2.0.8.dist-info}/RECORD +75 -69
  42. jupyter_ext/__init__.py +18 -0
  43. jupyter_ext/_version.py +1 -1
  44. jupyter_ext/handlers.py +1351 -58
  45. jupyter_ext/labextension/build_log.json +1 -1
  46. jupyter_ext/labextension/package.json +3 -2
  47. jupyter_ext/labextension/static/{frontend_styles_index_js.02d346171474a0fb2dc1.js → frontend_styles_index_js.8740a527757068814573.js} +470 -7
  48. jupyter_ext/labextension/static/frontend_styles_index_js.8740a527757068814573.js.map +1 -0
  49. jupyter_ext/labextension/static/{lib_index_js.a223ea20056954479ae9.js → lib_index_js.e4ff4b5779b5e049f84c.js} +3196 -441
  50. jupyter_ext/labextension/static/lib_index_js.e4ff4b5779b5e049f84c.js.map +1 -0
  51. jupyter_ext/labextension/static/{remoteEntry.addf2fa038fa60304aa2.js → remoteEntry.020cdb0b864cfaa4e41e.js} +9 -7
  52. jupyter_ext/labextension/static/remoteEntry.020cdb0b864cfaa4e41e.js.map +1 -0
  53. jupyter_ext/resource_usage.py +180 -0
  54. jupyter_ext/tests/test_handlers.py +58 -0
  55. agent_server/langchain/executors/jupyter_executor.py +0 -429
  56. agent_server/langchain/middleware/__init__.py +0 -36
  57. agent_server/langchain/middleware/code_search_middleware.py +0 -278
  58. agent_server/langchain/middleware/error_handling_middleware.py +0 -338
  59. agent_server/langchain/middleware/jupyter_execution_middleware.py +0 -301
  60. agent_server/langchain/middleware/rag_middleware.py +0 -227
  61. agent_server/langchain/middleware/validation_middleware.py +0 -240
  62. hdsp_jupyter_extension-2.0.6.data/data/share/jupyter/labextensions/hdsp-agent/static/frontend_styles_index_js.02d346171474a0fb2dc1.js.map +0 -1
  63. hdsp_jupyter_extension-2.0.6.data/data/share/jupyter/labextensions/hdsp-agent/static/lib_index_js.a223ea20056954479ae9.js.map +0 -1
  64. hdsp_jupyter_extension-2.0.6.data/data/share/jupyter/labextensions/hdsp-agent/static/remoteEntry.addf2fa038fa60304aa2.js.map +0 -1
  65. jupyter_ext/labextension/static/frontend_styles_index_js.02d346171474a0fb2dc1.js.map +0 -1
  66. jupyter_ext/labextension/static/lib_index_js.a223ea20056954479ae9.js.map +0 -1
  67. jupyter_ext/labextension/static/remoteEntry.addf2fa038fa60304aa2.js.map +0 -1
  68. {hdsp_jupyter_extension-2.0.6.data → hdsp_jupyter_extension-2.0.8.data}/data/etc/jupyter/jupyter_server_config.d/hdsp_jupyter_extension.json +0 -0
  69. {hdsp_jupyter_extension-2.0.6.data → hdsp_jupyter_extension-2.0.8.data}/data/share/jupyter/labextensions/hdsp-agent/install.json +0 -0
  70. {hdsp_jupyter_extension-2.0.6.data → hdsp_jupyter_extension-2.0.8.data}/data/share/jupyter/labextensions/hdsp-agent/static/node_modules_emotion_use-insertion-effect-with-fallbacks_dist_emotion-use-insertion-effect-wi-3ba6b80.c095373419d05e6f141a.js +0 -0
  71. {hdsp_jupyter_extension-2.0.6.data → hdsp_jupyter_extension-2.0.8.data}/data/share/jupyter/labextensions/hdsp-agent/static/node_modules_emotion_use-insertion-effect-with-fallbacks_dist_emotion-use-insertion-effect-wi-3ba6b80.c095373419d05e6f141a.js.map +0 -0
  72. {hdsp_jupyter_extension-2.0.6.data → hdsp_jupyter_extension-2.0.8.data}/data/share/jupyter/labextensions/hdsp-agent/static/node_modules_emotion_use-insertion-effect-with-fallbacks_dist_emotion-use-insertion-effect-wi-3ba6b81.61e75fb98ecff46cf836.js +0 -0
  73. {hdsp_jupyter_extension-2.0.6.data → hdsp_jupyter_extension-2.0.8.data}/data/share/jupyter/labextensions/hdsp-agent/static/node_modules_emotion_use-insertion-effect-with-fallbacks_dist_emotion-use-insertion-effect-wi-3ba6b81.61e75fb98ecff46cf836.js.map +0 -0
  74. {hdsp_jupyter_extension-2.0.6.data → hdsp_jupyter_extension-2.0.8.data}/data/share/jupyter/labextensions/hdsp-agent/static/style.js +0 -0
  75. {hdsp_jupyter_extension-2.0.6.data → hdsp_jupyter_extension-2.0.8.data}/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_babel_runtime_helpers_esm_extends_js-node_modules_emotion_serialize_dist-051195.e2553aab0c3963b83dd7.js +0 -0
  76. {hdsp_jupyter_extension-2.0.6.data → hdsp_jupyter_extension-2.0.8.data}/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_babel_runtime_helpers_esm_extends_js-node_modules_emotion_serialize_dist-051195.e2553aab0c3963b83dd7.js.map +0 -0
  77. {hdsp_jupyter_extension-2.0.6.data → hdsp_jupyter_extension-2.0.8.data}/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_emotion_cache_dist_emotion-cache_browser_development_esm_js.24edcc52a1c014a8a5f0.js +0 -0
  78. {hdsp_jupyter_extension-2.0.6.data → hdsp_jupyter_extension-2.0.8.data}/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_emotion_cache_dist_emotion-cache_browser_development_esm_js.24edcc52a1c014a8a5f0.js.map +0 -0
  79. {hdsp_jupyter_extension-2.0.6.data → hdsp_jupyter_extension-2.0.8.data}/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_emotion_react_dist_emotion-react_browser_development_esm_js.19ecf6babe00caff6b8a.js +0 -0
  80. {hdsp_jupyter_extension-2.0.6.data → hdsp_jupyter_extension-2.0.8.data}/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_emotion_react_dist_emotion-react_browser_development_esm_js.19ecf6babe00caff6b8a.js.map +0 -0
  81. {hdsp_jupyter_extension-2.0.6.data → hdsp_jupyter_extension-2.0.8.data}/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_emotion_styled_dist_emotion-styled_browser_development_esm_js.661fb5836f4978a7c6e1.js +0 -0
  82. {hdsp_jupyter_extension-2.0.6.data → hdsp_jupyter_extension-2.0.8.data}/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_emotion_styled_dist_emotion-styled_browser_development_esm_js.661fb5836f4978a7c6e1.js.map +0 -0
  83. {hdsp_jupyter_extension-2.0.6.data → hdsp_jupyter_extension-2.0.8.data}/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_mui_material_index_js.985697e0162d8d088ca2.js +0 -0
  84. {hdsp_jupyter_extension-2.0.6.data → hdsp_jupyter_extension-2.0.8.data}/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_mui_material_index_js.985697e0162d8d088ca2.js.map +0 -0
  85. {hdsp_jupyter_extension-2.0.6.data → hdsp_jupyter_extension-2.0.8.data}/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_mui_material_utils_createSvgIcon_js.1f5038488cdfd8b3a85d.js +0 -0
  86. {hdsp_jupyter_extension-2.0.6.data → hdsp_jupyter_extension-2.0.8.data}/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_mui_material_utils_createSvgIcon_js.1f5038488cdfd8b3a85d.js.map +0 -0
  87. {hdsp_jupyter_extension-2.0.6.dist-info → hdsp_jupyter_extension-2.0.8.dist-info}/WHEEL +0 -0
  88. {hdsp_jupyter_extension-2.0.6.dist-info → hdsp_jupyter_extension-2.0.8.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,150 @@
1
+ """
2
+ Prompt templates for LangChain agent.
3
+
4
+ Contains system prompts, JSON schema for fallback tool calling,
5
+ and middleware-specific prompts.
6
+ """
7
+
8
+ DEFAULT_SYSTEM_PROMPT = """You are an expert Python data scientist and Jupyter notebook assistant.
9
+ Your role is to help users with data analysis, visualization, and Python coding tasks in Jupyter notebooks. You can use only Korean
10
+
11
+ # Core Behavior
12
+ Be concise and direct. Answer in fewer than 4 lines unless the user asks for detail.
13
+ After working on a file, just stop - don't explain what you did unless asked.
14
+ Avoid unnecessary introductions or conclusions.
15
+
16
+ ## Task Management
17
+ Use write_todos for complex multi-step tasks (3+ steps). Mark tasks in_progress before starting, completed immediately after finishing.
18
+ For simple 1-2 step tasks, just do them directly without todos.
19
+
20
+ You MUST ALWAYS call a tool in every response. After any tool result, you MUST:
21
+ 1. Check your todo list - are there pending or in_progress items?
22
+ 2. If YES → call the next appropriate tool (jupyter_cell_tool, markdown_tool, etc.)
23
+ 3. When you suggest next steps for todo item '다음 단계 제시', you MUST create next steps in json format matching this schema:
24
+ {
25
+ "next_items": [
26
+ {
27
+ "subject": "<subject for next step>",
28
+ "description": "<detailed description for the next step>"
29
+ }, ...
30
+ ]
31
+ }
32
+ 4. If ALL todos are completed → call final_answer_tool with a summary
33
+
34
+ ## 🔴 MANDATORY: Resource Check Before Data Hanlding
35
+ **ALWAYS call check_resource_tool FIRST** when the task involves:
36
+ - Loading files: .csv, .parquet, .json, .xlsx, .pickle, .h5, .feather
37
+ - Handling datasets(dataframe) with pandas, polars, dask, or similar libraries
38
+ - Training ML models on data files
39
+
40
+ ## Mandatory Workflow
41
+ 1. After EVERY tool result, immediately call the next tool
42
+ 2. Continue until ALL todos show status: "completed"
43
+ 3. ONLY THEN call final_answer_tool to summarize
44
+ 4. Only use jupyter_cell_tool for Python code or when the user explicitly asks to run in a notebook cell
45
+ 5. For plots and charts, use English text only.
46
+
47
+ ## ❌ FORBIDDEN (will break the workflow)
48
+ - Producing an empty response (no tool call, no content)
49
+ - Stopping after any tool without calling the next tool
50
+ - Ending without calling final_answer_tool
51
+ - Leaving todos in "in_progress" or "pending" state without continuing
52
+
53
+ ## 📖 File Reading Best Practices
54
+ **CRITICAL**: When exploring codebases or reading files, use pagination to prevent context overflow.
55
+
56
+ **Pattern for codebase exploration:**
57
+ 1. First scan: `read_file_tool(path, limit=100)` - See file structure and key sections
58
+ 2. Targeted read: `read_file_tool(path, offset=100, limit=200)` - Read specific sections if needed
59
+ 3. Full read: Only read without limit when necessary for immediate editing
60
+
61
+ **When to paginate (use offset/limit):**
62
+ - Reading any file >500 lines
63
+ - Exploring unfamiliar codebases (always start with limit=100)
64
+ - Reading multiple files in sequence
65
+ - Any research or investigation task
66
+
67
+ **When full read is OK:**
68
+ - Small files (<500 lines)
69
+ - Files you need to edit immediately after reading
70
+ - After confirming file size with first scan
71
+
72
+ ## 🔧 Code Development
73
+ For code generation/refactoring, use LSP tools (diagnostics_tool, references_tool) to check errors and find symbol usages. Use multiedit_file_tool for multiple changes in one file.
74
+ """
75
+
76
+ JSON_TOOL_SCHEMA = """You MUST respond with ONLY valid JSON matching this schema:
77
+ {
78
+ "tool": "<tool_name>",
79
+ "arguments": {"arg1": "value1", ...}
80
+ }
81
+
82
+ Available tools:
83
+ - jupyter_cell_tool: Execute Python code. Arguments: {"code": "<python_code>"}
84
+ - markdown_tool: Add markdown cell. Arguments: {"content": "<markdown>"}
85
+ - final_answer_tool: Complete task. Arguments: {"answer": "<summary>"}
86
+ - write_todos: Update task list. Arguments: {"todos": [{"content": "...", "status": "pending|in_progress|completed"}]}
87
+ - read_file_tool: Read file with pagination. Arguments: {"path": "<file_path>", "offset": 0, "limit": 500}
88
+ - write_file_tool: Write file. Arguments: {"path": "<path>", "content": "<content>", "overwrite": false}
89
+ - list_files_tool: List directory. Arguments: {"path": ".", "recursive": false}
90
+ - search_workspace_tool: Search files. Arguments: {"pattern": "<regex>", "file_types": ["py"], "path": "."}
91
+ - search_notebook_cells_tool: Search notebook cells. Arguments: {"pattern": "<regex>"}
92
+ - execute_command_tool: Execute shell command. Arguments: {"command": "<command>", "stdin": "<input_for_prompts>"}
93
+ - check_resource_tool: Check resources before data processing. Arguments: {"files": ["<path>"], "dataframes": ["<var_name>"]}
94
+
95
+ Output ONLY the JSON object, no markdown, no explanation."""
96
+
97
+ TODO_LIST_SYSTEM_PROMPT = """
98
+ ## CRITICAL WORKFLOW RULES - MUST FOLLOW:
99
+ - NEVER stop after calling write_todos - ALWAYS make another tool call immediately
100
+ - For simple 1-2 step tasks, just do them directly without todos.
101
+
102
+ ## 🔴 NEW USER MESSAGE = FRESH START:
103
+ - When user sends a NEW message, treat it as a COMPLETELY NEW TASK
104
+ - IGNORE any previous todo completion history - start fresh
105
+ - Do NOT assume any work was already done based on past conversations
106
+ - Create a NEW todo list for the new request, even if similar items existed before
107
+ - "다음 단계 제시" from a previous task is NOT completed for the new task
108
+
109
+ ## Todo List Management:
110
+ - Before complex tasks, use write_todos to create a task list
111
+ - Update todos as you complete each step (mark 'in_progress' → 'completed')
112
+ - Each todo item should be specific and descriptive
113
+ - All todo items must be written in Korean
114
+ - ALWAYS include "다음 단계 제시" as the LAST item
115
+
116
+ ## Task Completion Flow:
117
+ 1. When current task is done → mark it 'completed' with write_todos
118
+ 2. For "다음 단계 제시" → mark completed, then call final_answer_tool with suggestions
119
+
120
+ ## FORBIDDEN PATTERNS:
121
+ ❌ Calling write_todos and then stopping
122
+ ❌ Updating todo status without doing the actual work
123
+ ❌ Ending turn without calling final_answer_tool when all tasks are done
124
+ ❌ Marking a todo as 'completed' without actually executing it in THIS conversation
125
+ """
126
+
127
+ TODO_LIST_TOOL_DESCRIPTION = """Update the task list for tracking progress.
128
+ ⚠️ CRITICAL: This tool is ONLY for tracking - it does NOT do any actual work.
129
+ After calling this tool, you MUST IMMEDIATELY call another tool (jupyter_cell_tool, markdown_tool, or final_answer_tool).
130
+ NEVER end your response after calling write_todos - always continue with the next action tool."""
131
+
132
+ # Non-HITL tools that execute immediately without user approval
133
+ NON_HITL_TOOLS = {
134
+ "markdown_tool",
135
+ "markdown",
136
+ "read_file_tool",
137
+ "read_file",
138
+ "list_files_tool",
139
+ "list_files",
140
+ "search_workspace_tool",
141
+ "search_workspace",
142
+ "search_notebook_cells_tool",
143
+ "search_notebook_cells",
144
+ "write_todos",
145
+ # LSP tools (read-only)
146
+ "diagnostics_tool",
147
+ "diagnostics",
148
+ "references_tool",
149
+ "references",
150
+ }
@@ -6,7 +6,7 @@ This state is passed through the agent execution and updated by middleware.
6
6
  """
7
7
 
8
8
  from dataclasses import dataclass
9
- from typing import Annotated, Any, Dict, List, Optional, TypedDict
9
+ from typing import Annotated, Any, Dict, List, Optional, TypedDict, Union
10
10
 
11
11
  from langchain_core.messages import BaseMessage
12
12
  from langgraph.graph.message import add_messages
@@ -14,6 +14,7 @@ from langgraph.graph.message import add_messages
14
14
 
15
15
  class NotebookContext(TypedDict, total=False):
16
16
  """Current notebook context"""
17
+
17
18
  notebook_path: str
18
19
  cell_count: int
19
20
  imported_libraries: List[str]
@@ -24,6 +25,7 @@ class NotebookContext(TypedDict, total=False):
24
25
 
25
26
  class ExecutionResult(TypedDict, total=False):
26
27
  """Result of code execution"""
28
+
27
29
  success: bool
28
30
  output: str
29
31
  error_type: Optional[str]
@@ -35,6 +37,7 @@ class ExecutionResult(TypedDict, total=False):
35
37
 
36
38
  class SearchResult(TypedDict, total=False):
37
39
  """Result of code search"""
40
+
38
41
  file_path: str
39
42
  cell_index: Optional[int]
40
43
  line_number: Optional[int]
@@ -51,6 +54,7 @@ class AgentState(TypedDict, total=False):
51
54
  - Tool executions (execution results, search results)
52
55
  - Agent decisions (current step, plan updates)
53
56
  """
57
+
54
58
  # Message history - uses add_messages reducer to accumulate messages
55
59
  messages: Annotated[List[BaseMessage], add_messages]
56
60
 
@@ -87,6 +91,9 @@ class AgentState(TypedDict, total=False):
87
91
  # Detected libraries for knowledge injection
88
92
  detected_libraries: List[str]
89
93
 
94
+ # Resource usage context (legacy, use check_resource_tool instead)
95
+ resource_context: Optional[Union[Dict[str, Any], str]]
96
+
90
97
  # Final answer
91
98
  final_answer: Optional[str]
92
99
  is_complete: bool
@@ -96,9 +103,10 @@ class AgentState(TypedDict, total=False):
96
103
  class AgentRuntime:
97
104
  """
98
105
  Runtime context passed to middleware.
99
-
106
+
100
107
  Contains references to executors and services needed by middleware.
101
108
  """
109
+
102
110
  jupyter_executor: Any = None
103
111
  notebook_searcher: Any = None
104
112
  rag_manager: Any = None
@@ -107,7 +115,7 @@ class AgentRuntime:
107
115
  workspace_root: str = "."
108
116
 
109
117
  # Execution mode
110
- embedded_mode: bool = True
118
+ embedded_mode: bool = False
111
119
 
112
120
  # Configuration
113
121
  max_retries: int = 3
@@ -123,19 +131,20 @@ def create_initial_state(
123
131
  ) -> AgentState:
124
132
  """
125
133
  Create initial agent state from user request.
126
-
134
+
127
135
  Args:
128
136
  user_request: Natural language request from user
129
137
  notebook_context: Current notebook state
130
138
  llm_config: LLM configuration
131
-
139
+
132
140
  Returns:
133
141
  Initialized AgentState
134
142
  """
135
143
  return AgentState(
136
144
  messages=[],
137
145
  user_request=user_request,
138
- notebook_context=notebook_context or NotebookContext(
146
+ notebook_context=notebook_context
147
+ or NotebookContext(
139
148
  notebook_path="",
140
149
  cell_count=0,
141
150
  imported_libraries=[],
@@ -154,6 +163,7 @@ def create_initial_state(
154
163
  recovery_strategy=None,
155
164
  llm_config=llm_config or {},
156
165
  detected_libraries=[],
166
+ resource_context=None,
157
167
  final_answer=None,
158
168
  is_complete=False,
159
169
  )
@@ -7,13 +7,20 @@ Tools available:
7
7
  - final_answer: Complete the task
8
8
  - read_file: Read file content
9
9
  - write_file: Write file content
10
+ - edit_file: Edit file with string replacement
10
11
  - list_files: List directory contents
11
12
  - search_workspace: Search files in workspace
12
13
  - search_notebook_cells: Search cells in notebooks
14
+ - execute_command_tool: Run shell commands (client-executed)
15
+ - check_resource_tool: Check resources before data processing (client-executed)
16
+ - diagnostics_tool: Get LSP diagnostics (errors, warnings)
17
+ - references_tool: Find symbol references via LSP
13
18
  """
14
19
 
15
20
  from agent_server.langchain.tools.file_tools import (
21
+ edit_file_tool,
16
22
  list_files_tool,
23
+ multiedit_file_tool,
17
24
  read_file_tool,
18
25
  write_file_tool,
19
26
  )
@@ -22,10 +29,16 @@ from agent_server.langchain.tools.jupyter_tools import (
22
29
  jupyter_cell_tool,
23
30
  markdown_tool,
24
31
  )
32
+ from agent_server.langchain.tools.lsp_tools import (
33
+ diagnostics_tool,
34
+ references_tool,
35
+ )
36
+ from agent_server.langchain.tools.resource_tools import check_resource_tool
25
37
  from agent_server.langchain.tools.search_tools import (
26
38
  search_notebook_cells_tool,
27
39
  search_workspace_tool,
28
40
  )
41
+ from agent_server.langchain.tools.shell_tools import execute_command_tool
29
42
 
30
43
  __all__ = [
31
44
  "jupyter_cell_tool",
@@ -33,7 +46,13 @@ __all__ = [
33
46
  "final_answer_tool",
34
47
  "read_file_tool",
35
48
  "write_file_tool",
49
+ "edit_file_tool",
50
+ "multiedit_file_tool",
36
51
  "list_files_tool",
37
52
  "search_workspace_tool",
38
53
  "search_notebook_cells_tool",
54
+ "execute_command_tool",
55
+ "check_resource_tool",
56
+ "diagnostics_tool",
57
+ "references_tool",
39
58
  ]