hdsp-jupyter-extension 2.0.7__py3-none-any.whl → 2.0.10__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (84) hide show
  1. agent_server/core/embedding_service.py +67 -46
  2. agent_server/core/rag_manager.py +40 -17
  3. agent_server/core/retriever.py +12 -6
  4. agent_server/core/vllm_embedding_service.py +246 -0
  5. agent_server/langchain/ARCHITECTURE.md +7 -51
  6. agent_server/langchain/agent.py +39 -20
  7. agent_server/langchain/custom_middleware.py +206 -62
  8. agent_server/langchain/hitl_config.py +6 -9
  9. agent_server/langchain/llm_factory.py +85 -1
  10. agent_server/langchain/logging_utils.py +52 -13
  11. agent_server/langchain/prompts.py +85 -45
  12. agent_server/langchain/tools/__init__.py +14 -10
  13. agent_server/langchain/tools/file_tools.py +266 -40
  14. agent_server/langchain/tools/file_utils.py +334 -0
  15. agent_server/langchain/tools/jupyter_tools.py +0 -1
  16. agent_server/langchain/tools/lsp_tools.py +264 -0
  17. agent_server/langchain/tools/resource_tools.py +12 -12
  18. agent_server/langchain/tools/search_tools.py +3 -158
  19. agent_server/main.py +7 -0
  20. agent_server/routers/langchain_agent.py +207 -102
  21. agent_server/routers/rag.py +8 -3
  22. hdsp_agent_core/models/rag.py +15 -1
  23. hdsp_agent_core/services/rag_service.py +6 -1
  24. {hdsp_jupyter_extension-2.0.7.data → hdsp_jupyter_extension-2.0.10.data}/data/share/jupyter/labextensions/hdsp-agent/build_log.json +1 -1
  25. {hdsp_jupyter_extension-2.0.7.data → hdsp_jupyter_extension-2.0.10.data}/data/share/jupyter/labextensions/hdsp-agent/package.json +3 -2
  26. hdsp_jupyter_extension-2.0.7.data/data/share/jupyter/labextensions/hdsp-agent/static/frontend_styles_index_js.4770ec0fb2d173b6deb4.js → hdsp_jupyter_extension-2.0.10.data/data/share/jupyter/labextensions/hdsp-agent/static/frontend_styles_index_js.2d9fb488c82498c45c2d.js +251 -5
  27. hdsp_jupyter_extension-2.0.10.data/data/share/jupyter/labextensions/hdsp-agent/static/frontend_styles_index_js.2d9fb488c82498c45c2d.js.map +1 -0
  28. hdsp_jupyter_extension-2.0.7.data/data/share/jupyter/labextensions/hdsp-agent/static/lib_index_js.29cf4312af19e86f82af.js → hdsp_jupyter_extension-2.0.10.data/data/share/jupyter/labextensions/hdsp-agent/static/lib_index_js.dc6434bee96ab03a0539.js +1831 -274
  29. hdsp_jupyter_extension-2.0.10.data/data/share/jupyter/labextensions/hdsp-agent/static/lib_index_js.dc6434bee96ab03a0539.js.map +1 -0
  30. hdsp_jupyter_extension-2.0.7.data/data/share/jupyter/labextensions/hdsp-agent/static/remoteEntry.61343eb4cf0577e74b50.js → hdsp_jupyter_extension-2.0.10.data/data/share/jupyter/labextensions/hdsp-agent/static/remoteEntry.4a252df3ade74efee8d6.js +11 -9
  31. hdsp_jupyter_extension-2.0.10.data/data/share/jupyter/labextensions/hdsp-agent/static/remoteEntry.4a252df3ade74efee8d6.js.map +1 -0
  32. jupyter_ext/labextension/static/vendors-node_modules_emotion_cache_dist_emotion-cache_browser_development_esm_js-node_modules-782ee5.d9ed8645ef1d311657d8.js → hdsp_jupyter_extension-2.0.10.data/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_emotion_cache_dist_emotion-cache_browser_development_esm_js.24edcc52a1c014a8a5f0.js +2 -209
  33. hdsp_jupyter_extension-2.0.10.data/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_emotion_cache_dist_emotion-cache_browser_development_esm_js.24edcc52a1c014a8a5f0.js.map +1 -0
  34. jupyter_ext/labextension/static/vendors-node_modules_emotion_react_dist_emotion-react_browser_development_esm_js.36b49c71871f98d4f549.js → hdsp_jupyter_extension-2.0.10.data/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_emotion_react_dist_emotion-react_browser_development_esm_js.19ecf6babe00caff6b8a.js +209 -2
  35. hdsp_jupyter_extension-2.0.10.data/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_emotion_react_dist_emotion-react_browser_development_esm_js.19ecf6babe00caff6b8a.js.map +1 -0
  36. jupyter_ext/labextension/static/vendors-node_modules_mui_material_utils_createSvgIcon_js.2e13df4ea61496e95d45.js → hdsp_jupyter_extension-2.0.10.data/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_mui_material_utils_createSvgIcon_js.1f5038488cdfd8b3a85d.js +212 -3
  37. hdsp_jupyter_extension-2.0.10.data/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_mui_material_utils_createSvgIcon_js.1f5038488cdfd8b3a85d.js.map +1 -0
  38. {hdsp_jupyter_extension-2.0.7.dist-info → hdsp_jupyter_extension-2.0.10.dist-info}/METADATA +1 -3
  39. hdsp_jupyter_extension-2.0.10.dist-info/RECORD +144 -0
  40. jupyter_ext/__init__.py +18 -0
  41. jupyter_ext/_version.py +1 -1
  42. jupyter_ext/handlers.py +176 -1
  43. jupyter_ext/labextension/build_log.json +1 -1
  44. jupyter_ext/labextension/package.json +3 -2
  45. jupyter_ext/labextension/static/{frontend_styles_index_js.4770ec0fb2d173b6deb4.js → frontend_styles_index_js.2d9fb488c82498c45c2d.js} +251 -5
  46. jupyter_ext/labextension/static/frontend_styles_index_js.2d9fb488c82498c45c2d.js.map +1 -0
  47. jupyter_ext/labextension/static/{lib_index_js.29cf4312af19e86f82af.js → lib_index_js.dc6434bee96ab03a0539.js} +1831 -274
  48. jupyter_ext/labextension/static/lib_index_js.dc6434bee96ab03a0539.js.map +1 -0
  49. jupyter_ext/labextension/static/{remoteEntry.61343eb4cf0577e74b50.js → remoteEntry.4a252df3ade74efee8d6.js} +11 -9
  50. jupyter_ext/labextension/static/remoteEntry.4a252df3ade74efee8d6.js.map +1 -0
  51. hdsp_jupyter_extension-2.0.7.data/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_emotion_cache_dist_emotion-cache_browser_development_esm_js-node_modules-782ee5.d9ed8645ef1d311657d8.js → jupyter_ext/labextension/static/vendors-node_modules_emotion_cache_dist_emotion-cache_browser_development_esm_js.24edcc52a1c014a8a5f0.js +2 -209
  52. jupyter_ext/labextension/static/vendors-node_modules_emotion_cache_dist_emotion-cache_browser_development_esm_js.24edcc52a1c014a8a5f0.js.map +1 -0
  53. hdsp_jupyter_extension-2.0.7.data/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_emotion_react_dist_emotion-react_browser_development_esm_js.36b49c71871f98d4f549.js → jupyter_ext/labextension/static/vendors-node_modules_emotion_react_dist_emotion-react_browser_development_esm_js.19ecf6babe00caff6b8a.js +209 -2
  54. jupyter_ext/labextension/static/vendors-node_modules_emotion_react_dist_emotion-react_browser_development_esm_js.19ecf6babe00caff6b8a.js.map +1 -0
  55. hdsp_jupyter_extension-2.0.7.data/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_mui_material_utils_createSvgIcon_js.2e13df4ea61496e95d45.js → jupyter_ext/labextension/static/vendors-node_modules_mui_material_utils_createSvgIcon_js.1f5038488cdfd8b3a85d.js +212 -3
  56. jupyter_ext/labextension/static/vendors-node_modules_mui_material_utils_createSvgIcon_js.1f5038488cdfd8b3a85d.js.map +1 -0
  57. hdsp_jupyter_extension-2.0.7.data/data/share/jupyter/labextensions/hdsp-agent/static/frontend_styles_index_js.4770ec0fb2d173b6deb4.js.map +0 -1
  58. hdsp_jupyter_extension-2.0.7.data/data/share/jupyter/labextensions/hdsp-agent/static/lib_index_js.29cf4312af19e86f82af.js.map +0 -1
  59. hdsp_jupyter_extension-2.0.7.data/data/share/jupyter/labextensions/hdsp-agent/static/remoteEntry.61343eb4cf0577e74b50.js.map +0 -1
  60. hdsp_jupyter_extension-2.0.7.data/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_emotion_cache_dist_emotion-cache_browser_development_esm_js-node_modules-782ee5.d9ed8645ef1d311657d8.js.map +0 -1
  61. hdsp_jupyter_extension-2.0.7.data/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_emotion_react_dist_emotion-react_browser_development_esm_js.36b49c71871f98d4f549.js.map +0 -1
  62. hdsp_jupyter_extension-2.0.7.data/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_mui_material_utils_createSvgIcon_js.2e13df4ea61496e95d45.js.map +0 -1
  63. hdsp_jupyter_extension-2.0.7.dist-info/RECORD +0 -141
  64. jupyter_ext/labextension/static/frontend_styles_index_js.4770ec0fb2d173b6deb4.js.map +0 -1
  65. jupyter_ext/labextension/static/lib_index_js.29cf4312af19e86f82af.js.map +0 -1
  66. jupyter_ext/labextension/static/remoteEntry.61343eb4cf0577e74b50.js.map +0 -1
  67. jupyter_ext/labextension/static/vendors-node_modules_emotion_cache_dist_emotion-cache_browser_development_esm_js-node_modules-782ee5.d9ed8645ef1d311657d8.js.map +0 -1
  68. jupyter_ext/labextension/static/vendors-node_modules_emotion_react_dist_emotion-react_browser_development_esm_js.36b49c71871f98d4f549.js.map +0 -1
  69. jupyter_ext/labextension/static/vendors-node_modules_mui_material_utils_createSvgIcon_js.2e13df4ea61496e95d45.js.map +0 -1
  70. {hdsp_jupyter_extension-2.0.7.data → hdsp_jupyter_extension-2.0.10.data}/data/etc/jupyter/jupyter_server_config.d/hdsp_jupyter_extension.json +0 -0
  71. {hdsp_jupyter_extension-2.0.7.data → hdsp_jupyter_extension-2.0.10.data}/data/share/jupyter/labextensions/hdsp-agent/install.json +0 -0
  72. {hdsp_jupyter_extension-2.0.7.data → hdsp_jupyter_extension-2.0.10.data}/data/share/jupyter/labextensions/hdsp-agent/static/node_modules_emotion_use-insertion-effect-with-fallbacks_dist_emotion-use-insertion-effect-wi-3ba6b80.c095373419d05e6f141a.js +0 -0
  73. {hdsp_jupyter_extension-2.0.7.data → hdsp_jupyter_extension-2.0.10.data}/data/share/jupyter/labextensions/hdsp-agent/static/node_modules_emotion_use-insertion-effect-with-fallbacks_dist_emotion-use-insertion-effect-wi-3ba6b80.c095373419d05e6f141a.js.map +0 -0
  74. {hdsp_jupyter_extension-2.0.7.data → hdsp_jupyter_extension-2.0.10.data}/data/share/jupyter/labextensions/hdsp-agent/static/node_modules_emotion_use-insertion-effect-with-fallbacks_dist_emotion-use-insertion-effect-wi-3ba6b81.61e75fb98ecff46cf836.js +0 -0
  75. {hdsp_jupyter_extension-2.0.7.data → hdsp_jupyter_extension-2.0.10.data}/data/share/jupyter/labextensions/hdsp-agent/static/node_modules_emotion_use-insertion-effect-with-fallbacks_dist_emotion-use-insertion-effect-wi-3ba6b81.61e75fb98ecff46cf836.js.map +0 -0
  76. {hdsp_jupyter_extension-2.0.7.data → hdsp_jupyter_extension-2.0.10.data}/data/share/jupyter/labextensions/hdsp-agent/static/style.js +0 -0
  77. {hdsp_jupyter_extension-2.0.7.data → hdsp_jupyter_extension-2.0.10.data}/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_babel_runtime_helpers_esm_extends_js-node_modules_emotion_serialize_dist-051195.e2553aab0c3963b83dd7.js +0 -0
  78. {hdsp_jupyter_extension-2.0.7.data → hdsp_jupyter_extension-2.0.10.data}/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_babel_runtime_helpers_esm_extends_js-node_modules_emotion_serialize_dist-051195.e2553aab0c3963b83dd7.js.map +0 -0
  79. {hdsp_jupyter_extension-2.0.7.data → hdsp_jupyter_extension-2.0.10.data}/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_emotion_styled_dist_emotion-styled_browser_development_esm_js.661fb5836f4978a7c6e1.js +0 -0
  80. {hdsp_jupyter_extension-2.0.7.data → hdsp_jupyter_extension-2.0.10.data}/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_emotion_styled_dist_emotion-styled_browser_development_esm_js.661fb5836f4978a7c6e1.js.map +0 -0
  81. {hdsp_jupyter_extension-2.0.7.data → hdsp_jupyter_extension-2.0.10.data}/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_mui_material_index_js.985697e0162d8d088ca2.js +0 -0
  82. {hdsp_jupyter_extension-2.0.7.data → hdsp_jupyter_extension-2.0.10.data}/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_mui_material_index_js.985697e0162d8d088ca2.js.map +0 -0
  83. {hdsp_jupyter_extension-2.0.7.dist-info → hdsp_jupyter_extension-2.0.10.dist-info}/WHEEL +0 -0
  84. {hdsp_jupyter_extension-2.0.7.dist-info → hdsp_jupyter_extension-2.0.10.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,334 @@
1
+ """
2
+ File Utilities for LangChain Agent
3
+
4
+ Provides utility functions for file operations:
5
+ - perform_string_replacement: String replacement with occurrence validation
6
+ - compute_unified_diff: Generate unified diff between before/after content
7
+ - count_diff_changes: Count additions/deletions from diff
8
+ - format_content_with_line_numbers: Format file content with line numbers (cat -n style)
9
+ - check_empty_content: Check if content is empty and return warning message
10
+ """
11
+
12
+ import difflib
13
+ from typing import List, Optional, Tuple, Union
14
+
15
+ # Constants for file reading (aligned with DeepAgents)
16
+ EMPTY_CONTENT_WARNING = "System reminder: File exists but has empty contents"
17
+ MAX_LINE_LENGTH = 10000 # Chunk lines longer than this
18
+ LINE_NUMBER_WIDTH = 6 # Width for line number padding
19
+
20
+
21
+ def check_empty_content(content: str) -> Optional[str]:
22
+ """
23
+ Check if content is empty and return warning message.
24
+
25
+ Args:
26
+ content: Content to check
27
+
28
+ Returns:
29
+ Warning message if empty, None otherwise
30
+ """
31
+ if not content or content.strip() == "":
32
+ return EMPTY_CONTENT_WARNING
33
+ return None
34
+
35
+
36
+ def format_content_with_line_numbers(
37
+ content: Union[str, List[str]],
38
+ start_line: int = 1,
39
+ ) -> str:
40
+ """
41
+ Format file content with line numbers (cat -n style).
42
+
43
+ Chunks lines longer than MAX_LINE_LENGTH with continuation markers (e.g., 5.1, 5.2).
44
+
45
+ Args:
46
+ content: File content as string or list of lines
47
+ start_line: Starting line number (default: 1)
48
+
49
+ Returns:
50
+ Formatted content with line numbers and continuation markers
51
+ """
52
+ if isinstance(content, str):
53
+ lines = content.split("\n")
54
+ # Remove trailing empty line if content ends with newline
55
+ if lines and lines[-1] == "":
56
+ lines = lines[:-1]
57
+ else:
58
+ lines = content
59
+
60
+ result_lines = []
61
+ for i, line in enumerate(lines):
62
+ line_num = i + start_line
63
+
64
+ if len(line) <= MAX_LINE_LENGTH:
65
+ result_lines.append(f"{line_num:{LINE_NUMBER_WIDTH}d}\t{line}")
66
+ else:
67
+ # Split long line into chunks with continuation markers
68
+ num_chunks = (len(line) + MAX_LINE_LENGTH - 1) // MAX_LINE_LENGTH
69
+ for chunk_idx in range(num_chunks):
70
+ start = chunk_idx * MAX_LINE_LENGTH
71
+ end = min(start + MAX_LINE_LENGTH, len(line))
72
+ chunk = line[start:end]
73
+ if chunk_idx == 0:
74
+ # First chunk: use normal line number
75
+ result_lines.append(f"{line_num:{LINE_NUMBER_WIDTH}d}\t{chunk}")
76
+ else:
77
+ # Continuation chunks: use decimal notation (e.g., 5.1, 5.2)
78
+ continuation_marker = f"{line_num}.{chunk_idx}"
79
+ result_lines.append(
80
+ f"{continuation_marker:>{LINE_NUMBER_WIDTH}}\t{chunk}"
81
+ )
82
+
83
+ return "\n".join(result_lines)
84
+
85
+
86
+ def format_read_response(
87
+ content: str,
88
+ offset: int = 0,
89
+ limit: int = 500,
90
+ ) -> str:
91
+ """
92
+ Format file content for read response with line numbers and pagination.
93
+
94
+ Args:
95
+ content: Full file content
96
+ offset: Line offset (0-indexed)
97
+ limit: Maximum number of lines to return
98
+
99
+ Returns:
100
+ Formatted content with line numbers, or error/warning message
101
+ """
102
+ # Check for empty content
103
+ empty_msg = check_empty_content(content)
104
+ if empty_msg:
105
+ return empty_msg
106
+
107
+ lines = content.splitlines()
108
+ total_lines = len(lines)
109
+
110
+ # Validate offset
111
+ if offset >= total_lines:
112
+ return f"Error: Line offset {offset} exceeds file length ({total_lines} lines)"
113
+
114
+ # Apply pagination
115
+ start_idx = offset
116
+ end_idx = min(start_idx + limit, total_lines)
117
+ selected_lines = lines[start_idx:end_idx]
118
+
119
+ # Format with line numbers
120
+ formatted = format_content_with_line_numbers(
121
+ selected_lines, start_line=start_idx + 1
122
+ )
123
+
124
+ # Add pagination info if truncated
125
+ if end_idx < total_lines:
126
+ remaining = total_lines - end_idx
127
+ formatted += f"\n\n[... {remaining} more lines. Use offset={end_idx} to continue reading]"
128
+
129
+ return formatted
130
+
131
+
132
+ def _normalize_whitespace(text: str) -> str:
133
+ """Normalize line endings and trailing whitespace per line."""
134
+ lines = text.replace("\r\n", "\n").replace("\r", "\n").split("\n")
135
+ return "\n".join(line.rstrip() for line in lines)
136
+
137
+
138
+ def perform_string_replacement(
139
+ content: str,
140
+ old_string: str,
141
+ new_string: str,
142
+ replace_all: bool = False,
143
+ ) -> Union[Tuple[str, int], str]:
144
+ """
145
+ Perform string replacement with occurrence validation.
146
+
147
+ Includes fallback strategies for more robust matching:
148
+ 1. Exact match
149
+ 2. Strip leading/trailing newlines from old_string
150
+ 3. Normalize whitespace (line endings, trailing spaces)
151
+
152
+ Args:
153
+ content: Original file content
154
+ old_string: String to replace
155
+ new_string: Replacement string
156
+ replace_all: Whether to replace all occurrences
157
+
158
+ Returns:
159
+ Tuple of (new_content, occurrences) on success,
160
+ or error message string on failure
161
+ """
162
+ # Strategy 1: Exact match
163
+ occurrences = content.count(old_string)
164
+
165
+ if occurrences == 0:
166
+ # Strategy 2: Strip leading/trailing newlines from old_string
167
+ stripped_old = old_string.strip("\n")
168
+ occurrences = content.count(stripped_old)
169
+ if occurrences > 0:
170
+ old_string = stripped_old
171
+ # Also strip new_string's leading/trailing newlines to match
172
+ new_string = new_string.strip("\n")
173
+
174
+ if occurrences == 0:
175
+ # Strategy 3: Normalize whitespace (line endings, trailing spaces)
176
+ normalized_content = _normalize_whitespace(content)
177
+ normalized_old = _normalize_whitespace(old_string.strip("\n"))
178
+ occurrences = normalized_content.count(normalized_old)
179
+
180
+ if occurrences > 0:
181
+ # Find the original text in content that matches normalized version
182
+ # We need to do replacement on the normalized content first
183
+ normalized_new = _normalize_whitespace(new_string.strip("\n"))
184
+ if occurrences > 1 and not replace_all:
185
+ preview = (
186
+ old_string[:50] + "..." if len(old_string) > 50 else old_string
187
+ )
188
+ return (
189
+ f"Error: String '{preview}' appears {occurrences} times in file. "
190
+ "Use replace_all=True to replace all instances, "
191
+ "or provide a more specific string with surrounding context."
192
+ )
193
+ # Replace on normalized content, then return
194
+ new_content = normalized_content.replace(normalized_old, normalized_new)
195
+ return new_content, occurrences
196
+
197
+ if occurrences == 0:
198
+ # All strategies failed
199
+ preview = old_string[:100] + "..." if len(old_string) > 100 else old_string
200
+ return f"Error: String not found in file: '{preview}'"
201
+
202
+ if occurrences > 1 and not replace_all:
203
+ preview = old_string[:50] + "..." if len(old_string) > 50 else old_string
204
+ return (
205
+ f"Error: String '{preview}' appears {occurrences} times in file. "
206
+ "Use replace_all=True to replace all instances, "
207
+ "or provide a more specific string with surrounding context."
208
+ )
209
+
210
+ new_content = content.replace(old_string, new_string)
211
+ return new_content, occurrences
212
+
213
+
214
+ def compute_unified_diff(
215
+ before: str,
216
+ after: str,
217
+ filepath: str,
218
+ max_lines: int = 100,
219
+ context_lines: int = 3,
220
+ ) -> Union[str, None]:
221
+ """
222
+ Compute a unified diff between before and after content.
223
+
224
+ Args:
225
+ before: Original content
226
+ after: New content
227
+ filepath: Path for display in diff headers
228
+ max_lines: Maximum number of diff lines (None for unlimited)
229
+ context_lines: Number of context lines around changes (default 3)
230
+
231
+ Returns:
232
+ Unified diff string or None if no changes
233
+ """
234
+ before_lines = before.splitlines()
235
+ after_lines = after.splitlines()
236
+
237
+ diff_lines = list(
238
+ difflib.unified_diff(
239
+ before_lines,
240
+ after_lines,
241
+ fromfile=f"{filepath} (before)",
242
+ tofile=f"{filepath} (after)",
243
+ lineterm="",
244
+ n=context_lines,
245
+ )
246
+ )
247
+
248
+ if not diff_lines:
249
+ return None
250
+
251
+ if max_lines and len(diff_lines) > max_lines:
252
+ truncated = diff_lines[: max_lines - 1]
253
+ truncated.append(
254
+ f"... [{len(diff_lines) - max_lines + 1} more lines truncated]"
255
+ )
256
+ return "\n".join(truncated)
257
+
258
+ return "\n".join(diff_lines)
259
+
260
+
261
+ def count_diff_changes(diff: str) -> Tuple[int, int]:
262
+ """
263
+ Count additions and deletions from unified diff.
264
+
265
+ Args:
266
+ diff: Unified diff string
267
+
268
+ Returns:
269
+ Tuple of (additions, deletions) line counts
270
+ """
271
+ if not diff:
272
+ return 0, 0
273
+
274
+ additions = sum(
275
+ 1
276
+ for line in diff.splitlines()
277
+ if line.startswith("+") and not line.startswith("+++")
278
+ )
279
+ deletions = sum(
280
+ 1
281
+ for line in diff.splitlines()
282
+ if line.startswith("-") and not line.startswith("---")
283
+ )
284
+ return additions, deletions
285
+
286
+
287
+ def build_edit_preview(
288
+ original_content: str,
289
+ old_string: str,
290
+ new_string: str,
291
+ replace_all: bool,
292
+ filepath: str,
293
+ ) -> dict:
294
+ """
295
+ Build a preview for edit_file operation including diff.
296
+
297
+ Args:
298
+ original_content: Current file content
299
+ old_string: String to replace
300
+ new_string: Replacement string
301
+ replace_all: Whether to replace all occurrences
302
+ filepath: File path for display
303
+
304
+ Returns:
305
+ Dict with diff, occurrences, and change counts
306
+ """
307
+ result = perform_string_replacement(
308
+ original_content, old_string, new_string, replace_all
309
+ )
310
+
311
+ if isinstance(result, str):
312
+ # Error case
313
+ return {
314
+ "success": False,
315
+ "error": result,
316
+ "diff": None,
317
+ "occurrences": 0,
318
+ "lines_added": 0,
319
+ "lines_removed": 0,
320
+ }
321
+
322
+ new_content, occurrences = result
323
+ diff = compute_unified_diff(original_content, new_content, filepath)
324
+ additions, deletions = count_diff_changes(diff) if diff else (0, 0)
325
+
326
+ return {
327
+ "success": True,
328
+ "error": None,
329
+ "diff": diff,
330
+ "occurrences": occurrences,
331
+ "lines_added": additions,
332
+ "lines_removed": deletions,
333
+ "new_content": new_content,
334
+ }
@@ -139,5 +139,4 @@ def final_answer_tool(answer: str, summary: Optional[str] = None) -> Dict[str, A
139
139
  JUPYTER_TOOLS = [
140
140
  jupyter_cell_tool,
141
141
  markdown_tool,
142
- final_answer_tool,
143
142
  ]
@@ -0,0 +1,264 @@
1
+ """
2
+ LSP Tools for LangChain Agent
3
+
4
+ Provides tools for LSP (Language Server Protocol) integration:
5
+ - diagnostics_tool: Get code diagnostics (errors, warnings)
6
+ - references_tool: Find symbol references
7
+
8
+ Crush 패턴 적용:
9
+ - 진단 결과 포맷팅 (severity 기반 정렬)
10
+ - 출력 제한 (최대 10개 + 요약)
11
+ - Grep-then-LSP 패턴 (references)
12
+ """
13
+
14
+ from typing import Any, Dict, List, Optional
15
+
16
+ from langchain_core.tools import tool
17
+ from pydantic import BaseModel, Field
18
+
19
+
20
+ class DiagnosticsInput(BaseModel):
21
+ """Input schema for diagnostics tool"""
22
+
23
+ path: Optional[str] = Field(
24
+ default=None,
25
+ description="File path to get diagnostics for. If not provided, returns project-wide diagnostics.",
26
+ )
27
+ severity_filter: Optional[str] = Field(
28
+ default=None,
29
+ description="Filter by severity: 'error', 'warning', 'hint', or None for all",
30
+ )
31
+ execution_result: Optional[Dict[str, Any]] = Field(
32
+ default=None,
33
+ description="LSP diagnostics result from client",
34
+ )
35
+
36
+
37
+ class ReferencesInput(BaseModel):
38
+ """Input schema for references tool"""
39
+
40
+ symbol: str = Field(description="Symbol name to find references for")
41
+ path: Optional[str] = Field(
42
+ default=None,
43
+ description="File path where the symbol is located (optional)",
44
+ )
45
+ line: Optional[int] = Field(
46
+ default=None, description="Line number (1-indexed, optional)"
47
+ )
48
+ character: Optional[int] = Field(
49
+ default=None, description="Character position (optional)"
50
+ )
51
+ execution_result: Optional[Dict[str, Any]] = Field(
52
+ default=None,
53
+ description="LSP references result from client",
54
+ )
55
+
56
+
57
+ @tool(args_schema=DiagnosticsInput)
58
+ def diagnostics_tool(
59
+ path: Optional[str] = None,
60
+ severity_filter: Optional[str] = None,
61
+ execution_result: Optional[Dict[str, Any]] = None,
62
+ ) -> Dict[str, Any]:
63
+ """
64
+ Get LSP diagnostics (errors, warnings) for a file or the entire project.
65
+
66
+ Use this tool to:
67
+ - Check for syntax errors before running code
68
+ - Find type errors in Python/TypeScript files
69
+ - Identify unused imports or variables
70
+ - Verify code quality issues after editing
71
+
72
+ The diagnostics are provided by language servers (pylsp, etc.)
73
+ and are more accurate than simple linting.
74
+
75
+ **Best Practice**: Always check diagnostics after editing code:
76
+ 1. edit_file_tool(...) - make changes
77
+ 2. diagnostics_tool(path="file.py") - verify no new errors
78
+
79
+ Args:
80
+ path: Optional file path. None = project-wide diagnostics
81
+ severity_filter: Optional filter ('error', 'warning', 'hint')
82
+
83
+ Returns:
84
+ Formatted diagnostics with severity, location, and message
85
+ """
86
+ if execution_result is None:
87
+ # Client needs to execute this
88
+ return {
89
+ "tool": "diagnostics_tool",
90
+ "parameters": {
91
+ "path": path,
92
+ "severity_filter": severity_filter,
93
+ },
94
+ "status": "pending_execution",
95
+ "message": "Diagnostics request queued for LSP bridge execution",
96
+ }
97
+
98
+ # Process client result (Crush 패턴)
99
+ diagnostics = execution_result.get("diagnostics", [])
100
+ lsp_available = execution_result.get("lsp_available", False)
101
+
102
+ if not lsp_available:
103
+ return {
104
+ "tool": "diagnostics_tool",
105
+ "success": True,
106
+ "output": "LSP not available. Install jupyterlab-lsp for code diagnostics.\nUse execute_command_tool with grep for text-based code search instead.",
107
+ "counts": {"errors": 0, "warnings": 0, "total": 0},
108
+ }
109
+
110
+ # Severity ordering (errors first)
111
+ severity_order = {"error": 0, "warning": 1, "information": 2, "hint": 3}
112
+
113
+ # Sort diagnostics
114
+ sorted_diags = sorted(
115
+ diagnostics,
116
+ key=lambda d: (
117
+ severity_order.get(d.get("severity", "hint"), 3),
118
+ d.get("file", ""),
119
+ d.get("line", 0),
120
+ ),
121
+ )
122
+
123
+ # Filter by severity if specified
124
+ if severity_filter:
125
+ sorted_diags = [d for d in sorted_diags if d.get("severity") == severity_filter]
126
+
127
+ # Format output (Crush의 formatDiagnostics 패턴)
128
+ formatted_lines = []
129
+ for d in sorted_diags[:10]: # 최대 10개
130
+ severity = d.get("severity", "hint").upper()
131
+ line = d.get("line", 0)
132
+ col = d.get("character", 0)
133
+ source = d.get("source", "")
134
+ code = d.get("code", "")
135
+ message = d.get("message", "")
136
+ file = d.get("file", path or "")
137
+
138
+ location = f"{file}:{line}:{col}" if file else f"L{line}:{col}"
139
+ source_info = f"[{source}]" if source else ""
140
+ code_info = f"[{code}]" if code else ""
141
+
142
+ formatted_lines.append(
143
+ f"{severity} {location} {source_info}{code_info} {message}"
144
+ )
145
+
146
+ # Calculate counts
147
+ total = len(diagnostics)
148
+ errors = sum(1 for d in diagnostics if d.get("severity") == "error")
149
+ warnings = sum(1 for d in diagnostics if d.get("severity") == "warning")
150
+
151
+ # Add summary
152
+ summary = f"\n--- Summary: {errors} errors, {warnings} warnings, {total} total"
153
+ if total > 10:
154
+ summary += " (showing first 10)"
155
+
156
+ output = (
157
+ "\n".join(formatted_lines) + summary
158
+ if formatted_lines
159
+ else f"No diagnostics found.{' LSP is available.' if lsp_available else ''}"
160
+ )
161
+
162
+ return {
163
+ "tool": "diagnostics_tool",
164
+ "success": True,
165
+ "output": output,
166
+ "counts": {"errors": errors, "warnings": warnings, "total": total},
167
+ }
168
+
169
+
170
+ @tool(args_schema=ReferencesInput)
171
+ def references_tool(
172
+ symbol: str,
173
+ path: Optional[str] = None,
174
+ line: Optional[int] = None,
175
+ character: Optional[int] = None,
176
+ execution_result: Optional[Dict[str, Any]] = None,
177
+ ) -> Dict[str, Any]:
178
+ """
179
+ Find all references to a symbol across the codebase.
180
+
181
+ Use this tool to:
182
+ - Check if a function/class is used before renaming/deleting
183
+ - Understand how a variable is used throughout the code
184
+ - Find all usages before refactoring
185
+
186
+ If LSP is not available, falls back to execute_command_tool with grep.
187
+
188
+ Args:
189
+ symbol: Symbol name (function, class, variable)
190
+ path: Optional file path where symbol is located
191
+ line: Optional line number (1-indexed)
192
+ character: Optional character position
193
+
194
+ Returns:
195
+ List of locations where the symbol is referenced
196
+ """
197
+ if execution_result is None:
198
+ return {
199
+ "tool": "references_tool",
200
+ "parameters": {
201
+ "symbol": symbol,
202
+ "path": path,
203
+ "line": line,
204
+ "character": character,
205
+ },
206
+ "status": "pending_execution",
207
+ "message": "References search queued for execution",
208
+ }
209
+
210
+ locations = execution_result.get("locations", [])
211
+ lsp_available = execution_result.get("lsp_available", False)
212
+ used_grep = execution_result.get("used_grep", False)
213
+
214
+ if not locations:
215
+ if not lsp_available:
216
+ return {
217
+ "tool": "references_tool",
218
+ "success": True,
219
+ "output": f"LSP not available. Use execute_command_tool with grep with pattern='{symbol}' for text-based search.",
220
+ "count": 0,
221
+ }
222
+ return {
223
+ "tool": "references_tool",
224
+ "success": True,
225
+ "output": f"No references found for '{symbol}'",
226
+ "count": 0,
227
+ }
228
+
229
+ # Group by file (Crush 패턴)
230
+ by_file: Dict[str, List] = {}
231
+ for loc in locations:
232
+ file = loc.get("file", "unknown")
233
+ if file not in by_file:
234
+ by_file[file] = []
235
+ by_file[file].append(loc)
236
+
237
+ # Format output
238
+ method_note = " (grep-based)" if used_grep else " (LSP)"
239
+ formatted_lines = [
240
+ f"Found {len(locations)} references to '{symbol}'{method_note}:\n"
241
+ ]
242
+
243
+ for file, locs in sorted(by_file.items()):
244
+ formatted_lines.append(f"\n📄 {file}")
245
+ for loc in sorted(locs, key=lambda x: x.get("line", 0)):
246
+ line_num = loc.get("line", 0)
247
+ col = loc.get("character", 0)
248
+ preview = (loc.get("preview", "") or "")[:60]
249
+ formatted_lines.append(f" L{line_num}:{col} {preview}")
250
+
251
+ return {
252
+ "tool": "references_tool",
253
+ "success": True,
254
+ "output": "\n".join(formatted_lines),
255
+ "count": len(locations),
256
+ "by_file": {f: len(locs) for f, locs in by_file.items()},
257
+ }
258
+
259
+
260
+ # Export all LSP tools
261
+ LSP_TOOLS = [
262
+ diagnostics_tool,
263
+ references_tool,
264
+ ]
@@ -46,7 +46,7 @@ def _build_file_size_command(files: List[str]) -> str:
46
46
  """
47
47
  if not files:
48
48
  return ""
49
-
49
+
50
50
  # Use stat with format that works on both macOS and Linux
51
51
  # macOS: stat -f "%z %N"
52
52
  # Linux: stat -c "%s %n"
@@ -62,10 +62,10 @@ def _build_dataframe_check_code(dataframes: List[str]) -> str:
62
62
  """
63
63
  if not dataframes:
64
64
  return ""
65
-
65
+
66
66
  df_checks = []
67
67
  for df_name in dataframes:
68
- df_checks.append(f'''
68
+ df_checks.append(f"""
69
69
  try:
70
70
  _df = {df_name}
71
71
  _info = {{
@@ -79,14 +79,14 @@ try:
79
79
  except NameError:
80
80
  _info = {{"name": "{df_name}", "exists": False}}
81
81
  _results.append(_info)
82
- ''')
83
-
84
- code = f'''
82
+ """)
83
+
84
+ code = f"""
85
85
  import json
86
86
  _results = []
87
87
  {chr(10).join(df_checks)}
88
88
  print(json.dumps(_results))
89
- '''
89
+ """
90
90
  return code.strip()
91
91
 
92
92
 
@@ -137,20 +137,20 @@ def check_resource_tool(
137
137
  response["execution_result"] = execution_result
138
138
  response["status"] = "complete"
139
139
  response["message"] = "Resource check completed"
140
-
140
+
141
141
  # Parse the execution result
142
142
  if isinstance(execution_result, dict):
143
143
  response["success"] = execution_result.get("success", False)
144
-
144
+
145
145
  # System resources
146
146
  response["system"] = execution_result.get("system", {})
147
-
147
+
148
148
  # File sizes
149
149
  response["files"] = execution_result.get("files", [])
150
-
150
+
151
151
  # DataFrame info
152
152
  response["dataframes"] = execution_result.get("dataframes", [])
153
-
153
+
154
154
  if "error" in execution_result:
155
155
  response["error"] = execution_result["error"]
156
156