hdsp-jupyter-extension 2.0.6__py3-none-any.whl → 2.0.7__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- agent_server/core/reflection_engine.py +0 -1
- agent_server/knowledge/watchdog_service.py +1 -1
- agent_server/langchain/ARCHITECTURE.md +1193 -0
- agent_server/langchain/agent.py +74 -588
- agent_server/langchain/custom_middleware.py +636 -0
- agent_server/langchain/executors/__init__.py +2 -7
- agent_server/langchain/executors/notebook_searcher.py +46 -38
- agent_server/langchain/hitl_config.py +66 -0
- agent_server/langchain/llm_factory.py +166 -0
- agent_server/langchain/logging_utils.py +184 -0
- agent_server/langchain/prompts.py +119 -0
- agent_server/langchain/state.py +16 -6
- agent_server/langchain/tools/__init__.py +6 -0
- agent_server/langchain/tools/file_tools.py +91 -129
- agent_server/langchain/tools/jupyter_tools.py +18 -18
- agent_server/langchain/tools/resource_tools.py +161 -0
- agent_server/langchain/tools/search_tools.py +198 -216
- agent_server/langchain/tools/shell_tools.py +54 -0
- agent_server/main.py +4 -1
- agent_server/routers/health.py +1 -1
- agent_server/routers/langchain_agent.py +940 -285
- hdsp_agent_core/prompts/auto_agent_prompts.py +3 -3
- {hdsp_jupyter_extension-2.0.6.data → hdsp_jupyter_extension-2.0.7.data}/data/share/jupyter/labextensions/hdsp-agent/build_log.json +1 -1
- {hdsp_jupyter_extension-2.0.6.data → hdsp_jupyter_extension-2.0.7.data}/data/share/jupyter/labextensions/hdsp-agent/package.json +2 -2
- hdsp_jupyter_extension-2.0.6.data/data/share/jupyter/labextensions/hdsp-agent/static/frontend_styles_index_js.02d346171474a0fb2dc1.js → hdsp_jupyter_extension-2.0.7.data/data/share/jupyter/labextensions/hdsp-agent/static/frontend_styles_index_js.4770ec0fb2d173b6deb4.js +312 -6
- hdsp_jupyter_extension-2.0.7.data/data/share/jupyter/labextensions/hdsp-agent/static/frontend_styles_index_js.4770ec0fb2d173b6deb4.js.map +1 -0
- hdsp_jupyter_extension-2.0.6.data/data/share/jupyter/labextensions/hdsp-agent/static/lib_index_js.a223ea20056954479ae9.js → hdsp_jupyter_extension-2.0.7.data/data/share/jupyter/labextensions/hdsp-agent/static/lib_index_js.29cf4312af19e86f82af.js +1547 -330
- hdsp_jupyter_extension-2.0.7.data/data/share/jupyter/labextensions/hdsp-agent/static/lib_index_js.29cf4312af19e86f82af.js.map +1 -0
- hdsp_jupyter_extension-2.0.6.data/data/share/jupyter/labextensions/hdsp-agent/static/remoteEntry.addf2fa038fa60304aa2.js → hdsp_jupyter_extension-2.0.7.data/data/share/jupyter/labextensions/hdsp-agent/static/remoteEntry.61343eb4cf0577e74b50.js +8 -8
- hdsp_jupyter_extension-2.0.7.data/data/share/jupyter/labextensions/hdsp-agent/static/remoteEntry.61343eb4cf0577e74b50.js.map +1 -0
- hdsp_jupyter_extension-2.0.6.data/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_emotion_cache_dist_emotion-cache_browser_development_esm_js.24edcc52a1c014a8a5f0.js → hdsp_jupyter_extension-2.0.7.data/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_emotion_cache_dist_emotion-cache_browser_development_esm_js-node_modules-782ee5.d9ed8645ef1d311657d8.js +209 -2
- hdsp_jupyter_extension-2.0.7.data/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_emotion_cache_dist_emotion-cache_browser_development_esm_js-node_modules-782ee5.d9ed8645ef1d311657d8.js.map +1 -0
- jupyter_ext/labextension/static/vendors-node_modules_emotion_react_dist_emotion-react_browser_development_esm_js.19ecf6babe00caff6b8a.js → hdsp_jupyter_extension-2.0.7.data/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_emotion_react_dist_emotion-react_browser_development_esm_js.36b49c71871f98d4f549.js +2 -209
- hdsp_jupyter_extension-2.0.7.data/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_emotion_react_dist_emotion-react_browser_development_esm_js.36b49c71871f98d4f549.js.map +1 -0
- hdsp_jupyter_extension-2.0.6.data/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_mui_material_utils_createSvgIcon_js.1f5038488cdfd8b3a85d.js → hdsp_jupyter_extension-2.0.7.data/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_mui_material_utils_createSvgIcon_js.2e13df4ea61496e95d45.js +3 -212
- hdsp_jupyter_extension-2.0.7.data/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_mui_material_utils_createSvgIcon_js.2e13df4ea61496e95d45.js.map +1 -0
- {hdsp_jupyter_extension-2.0.6.dist-info → hdsp_jupyter_extension-2.0.7.dist-info}/METADATA +2 -1
- {hdsp_jupyter_extension-2.0.6.dist-info → hdsp_jupyter_extension-2.0.7.dist-info}/RECORD +71 -68
- jupyter_ext/_version.py +1 -1
- jupyter_ext/handlers.py +1176 -58
- jupyter_ext/labextension/build_log.json +1 -1
- jupyter_ext/labextension/package.json +2 -2
- jupyter_ext/labextension/static/{frontend_styles_index_js.02d346171474a0fb2dc1.js → frontend_styles_index_js.4770ec0fb2d173b6deb4.js} +312 -6
- jupyter_ext/labextension/static/frontend_styles_index_js.4770ec0fb2d173b6deb4.js.map +1 -0
- jupyter_ext/labextension/static/{lib_index_js.a223ea20056954479ae9.js → lib_index_js.29cf4312af19e86f82af.js} +1547 -330
- jupyter_ext/labextension/static/lib_index_js.29cf4312af19e86f82af.js.map +1 -0
- jupyter_ext/labextension/static/{remoteEntry.addf2fa038fa60304aa2.js → remoteEntry.61343eb4cf0577e74b50.js} +8 -8
- jupyter_ext/labextension/static/remoteEntry.61343eb4cf0577e74b50.js.map +1 -0
- jupyter_ext/labextension/static/{vendors-node_modules_emotion_cache_dist_emotion-cache_browser_development_esm_js.24edcc52a1c014a8a5f0.js → vendors-node_modules_emotion_cache_dist_emotion-cache_browser_development_esm_js-node_modules-782ee5.d9ed8645ef1d311657d8.js} +209 -2
- jupyter_ext/labextension/static/vendors-node_modules_emotion_cache_dist_emotion-cache_browser_development_esm_js-node_modules-782ee5.d9ed8645ef1d311657d8.js.map +1 -0
- hdsp_jupyter_extension-2.0.6.data/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_emotion_react_dist_emotion-react_browser_development_esm_js.19ecf6babe00caff6b8a.js → jupyter_ext/labextension/static/vendors-node_modules_emotion_react_dist_emotion-react_browser_development_esm_js.36b49c71871f98d4f549.js +2 -209
- jupyter_ext/labextension/static/vendors-node_modules_emotion_react_dist_emotion-react_browser_development_esm_js.36b49c71871f98d4f549.js.map +1 -0
- jupyter_ext/labextension/static/{vendors-node_modules_mui_material_utils_createSvgIcon_js.1f5038488cdfd8b3a85d.js → vendors-node_modules_mui_material_utils_createSvgIcon_js.2e13df4ea61496e95d45.js} +3 -212
- jupyter_ext/labextension/static/vendors-node_modules_mui_material_utils_createSvgIcon_js.2e13df4ea61496e95d45.js.map +1 -0
- jupyter_ext/resource_usage.py +180 -0
- jupyter_ext/tests/test_handlers.py +58 -0
- agent_server/langchain/executors/jupyter_executor.py +0 -429
- agent_server/langchain/middleware/__init__.py +0 -36
- agent_server/langchain/middleware/code_search_middleware.py +0 -278
- agent_server/langchain/middleware/error_handling_middleware.py +0 -338
- agent_server/langchain/middleware/jupyter_execution_middleware.py +0 -301
- agent_server/langchain/middleware/rag_middleware.py +0 -227
- agent_server/langchain/middleware/validation_middleware.py +0 -240
- hdsp_jupyter_extension-2.0.6.data/data/share/jupyter/labextensions/hdsp-agent/static/frontend_styles_index_js.02d346171474a0fb2dc1.js.map +0 -1
- hdsp_jupyter_extension-2.0.6.data/data/share/jupyter/labextensions/hdsp-agent/static/lib_index_js.a223ea20056954479ae9.js.map +0 -1
- hdsp_jupyter_extension-2.0.6.data/data/share/jupyter/labextensions/hdsp-agent/static/remoteEntry.addf2fa038fa60304aa2.js.map +0 -1
- hdsp_jupyter_extension-2.0.6.data/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_emotion_cache_dist_emotion-cache_browser_development_esm_js.24edcc52a1c014a8a5f0.js.map +0 -1
- hdsp_jupyter_extension-2.0.6.data/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_emotion_react_dist_emotion-react_browser_development_esm_js.19ecf6babe00caff6b8a.js.map +0 -1
- hdsp_jupyter_extension-2.0.6.data/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_mui_material_utils_createSvgIcon_js.1f5038488cdfd8b3a85d.js.map +0 -1
- jupyter_ext/labextension/static/frontend_styles_index_js.02d346171474a0fb2dc1.js.map +0 -1
- jupyter_ext/labextension/static/lib_index_js.a223ea20056954479ae9.js.map +0 -1
- jupyter_ext/labextension/static/remoteEntry.addf2fa038fa60304aa2.js.map +0 -1
- jupyter_ext/labextension/static/vendors-node_modules_emotion_cache_dist_emotion-cache_browser_development_esm_js.24edcc52a1c014a8a5f0.js.map +0 -1
- jupyter_ext/labextension/static/vendors-node_modules_emotion_react_dist_emotion-react_browser_development_esm_js.19ecf6babe00caff6b8a.js.map +0 -1
- jupyter_ext/labextension/static/vendors-node_modules_mui_material_utils_createSvgIcon_js.1f5038488cdfd8b3a85d.js.map +0 -1
- {hdsp_jupyter_extension-2.0.6.data → hdsp_jupyter_extension-2.0.7.data}/data/etc/jupyter/jupyter_server_config.d/hdsp_jupyter_extension.json +0 -0
- {hdsp_jupyter_extension-2.0.6.data → hdsp_jupyter_extension-2.0.7.data}/data/share/jupyter/labextensions/hdsp-agent/install.json +0 -0
- {hdsp_jupyter_extension-2.0.6.data → hdsp_jupyter_extension-2.0.7.data}/data/share/jupyter/labextensions/hdsp-agent/static/node_modules_emotion_use-insertion-effect-with-fallbacks_dist_emotion-use-insertion-effect-wi-3ba6b80.c095373419d05e6f141a.js +0 -0
- {hdsp_jupyter_extension-2.0.6.data → hdsp_jupyter_extension-2.0.7.data}/data/share/jupyter/labextensions/hdsp-agent/static/node_modules_emotion_use-insertion-effect-with-fallbacks_dist_emotion-use-insertion-effect-wi-3ba6b80.c095373419d05e6f141a.js.map +0 -0
- {hdsp_jupyter_extension-2.0.6.data → hdsp_jupyter_extension-2.0.7.data}/data/share/jupyter/labextensions/hdsp-agent/static/node_modules_emotion_use-insertion-effect-with-fallbacks_dist_emotion-use-insertion-effect-wi-3ba6b81.61e75fb98ecff46cf836.js +0 -0
- {hdsp_jupyter_extension-2.0.6.data → hdsp_jupyter_extension-2.0.7.data}/data/share/jupyter/labextensions/hdsp-agent/static/node_modules_emotion_use-insertion-effect-with-fallbacks_dist_emotion-use-insertion-effect-wi-3ba6b81.61e75fb98ecff46cf836.js.map +0 -0
- {hdsp_jupyter_extension-2.0.6.data → hdsp_jupyter_extension-2.0.7.data}/data/share/jupyter/labextensions/hdsp-agent/static/style.js +0 -0
- {hdsp_jupyter_extension-2.0.6.data → hdsp_jupyter_extension-2.0.7.data}/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_babel_runtime_helpers_esm_extends_js-node_modules_emotion_serialize_dist-051195.e2553aab0c3963b83dd7.js +0 -0
- {hdsp_jupyter_extension-2.0.6.data → hdsp_jupyter_extension-2.0.7.data}/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_babel_runtime_helpers_esm_extends_js-node_modules_emotion_serialize_dist-051195.e2553aab0c3963b83dd7.js.map +0 -0
- {hdsp_jupyter_extension-2.0.6.data → hdsp_jupyter_extension-2.0.7.data}/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_emotion_styled_dist_emotion-styled_browser_development_esm_js.661fb5836f4978a7c6e1.js +0 -0
- {hdsp_jupyter_extension-2.0.6.data → hdsp_jupyter_extension-2.0.7.data}/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_emotion_styled_dist_emotion-styled_browser_development_esm_js.661fb5836f4978a7c6e1.js.map +0 -0
- {hdsp_jupyter_extension-2.0.6.data → hdsp_jupyter_extension-2.0.7.data}/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_mui_material_index_js.985697e0162d8d088ca2.js +0 -0
- {hdsp_jupyter_extension-2.0.6.data → hdsp_jupyter_extension-2.0.7.data}/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_mui_material_index_js.985697e0162d8d088ca2.js.map +0 -0
- {hdsp_jupyter_extension-2.0.6.dist-info → hdsp_jupyter_extension-2.0.7.dist-info}/WHEEL +0 -0
- {hdsp_jupyter_extension-2.0.6.dist-info → hdsp_jupyter_extension-2.0.7.dist-info}/licenses/LICENSE +0 -0
|
@@ -22,6 +22,7 @@ logger = logging.getLogger(__name__)
|
|
|
22
22
|
@dataclass
|
|
23
23
|
class SearchMatch:
|
|
24
24
|
"""Single search match result"""
|
|
25
|
+
|
|
25
26
|
file_path: str
|
|
26
27
|
cell_index: Optional[int] = None
|
|
27
28
|
cell_type: Optional[str] = None
|
|
@@ -47,6 +48,7 @@ class SearchMatch:
|
|
|
47
48
|
@dataclass
|
|
48
49
|
class SearchResults:
|
|
49
50
|
"""Collection of search results"""
|
|
51
|
+
|
|
50
52
|
query: str
|
|
51
53
|
total_matches: int
|
|
52
54
|
files_searched: int
|
|
@@ -66,14 +68,14 @@ class SearchResults:
|
|
|
66
68
|
class NotebookSearcher:
|
|
67
69
|
"""
|
|
68
70
|
Searches notebooks and workspace files for patterns.
|
|
69
|
-
|
|
71
|
+
|
|
70
72
|
Features:
|
|
71
73
|
- Search across all files in workspace
|
|
72
74
|
- Search within specific notebooks
|
|
73
75
|
- Filter by cell type (code/markdown)
|
|
74
76
|
- Regex or literal text matching
|
|
75
77
|
- Context lines around matches
|
|
76
|
-
|
|
78
|
+
|
|
77
79
|
Usage:
|
|
78
80
|
searcher = NotebookSearcher(workspace_root="/path/to/workspace")
|
|
79
81
|
results = searcher.search_workspace("import pandas")
|
|
@@ -137,7 +139,7 @@ class NotebookSearcher:
|
|
|
137
139
|
end = min(len(lines), line_idx + context_lines + 1)
|
|
138
140
|
|
|
139
141
|
before = "\n".join(lines[start:line_idx])
|
|
140
|
-
after = "\n".join(lines[line_idx + 1:end])
|
|
142
|
+
after = "\n".join(lines[line_idx + 1 : end])
|
|
141
143
|
|
|
142
144
|
return before, after
|
|
143
145
|
|
|
@@ -153,7 +155,7 @@ class NotebookSearcher:
|
|
|
153
155
|
) -> SearchResults:
|
|
154
156
|
"""
|
|
155
157
|
Search within a specific notebook.
|
|
156
|
-
|
|
158
|
+
|
|
157
159
|
Args:
|
|
158
160
|
notebook_path: Path to notebook (relative to workspace)
|
|
159
161
|
pattern: Search pattern
|
|
@@ -162,7 +164,7 @@ class NotebookSearcher:
|
|
|
162
164
|
is_regex: Treat pattern as regex
|
|
163
165
|
max_results: Maximum matches to return
|
|
164
166
|
context_lines: Context lines around matches
|
|
165
|
-
|
|
167
|
+
|
|
166
168
|
Returns:
|
|
167
169
|
SearchResults with matches
|
|
168
170
|
"""
|
|
@@ -200,16 +202,18 @@ class NotebookSearcher:
|
|
|
200
202
|
if compiled.search(line):
|
|
201
203
|
before, after = self._get_context(lines, line_idx, context_lines)
|
|
202
204
|
|
|
203
|
-
matches.append(
|
|
204
|
-
|
|
205
|
-
|
|
206
|
-
|
|
207
|
-
|
|
208
|
-
|
|
209
|
-
|
|
210
|
-
|
|
211
|
-
|
|
212
|
-
|
|
205
|
+
matches.append(
|
|
206
|
+
SearchMatch(
|
|
207
|
+
file_path=notebook_path,
|
|
208
|
+
cell_index=idx,
|
|
209
|
+
cell_type=current_type,
|
|
210
|
+
line_number=line_idx + 1,
|
|
211
|
+
content=line.strip()[:200],
|
|
212
|
+
context_before=before[:100],
|
|
213
|
+
context_after=after[:100],
|
|
214
|
+
match_type="line",
|
|
215
|
+
)
|
|
216
|
+
)
|
|
213
217
|
|
|
214
218
|
if len(matches) >= max_results:
|
|
215
219
|
break
|
|
@@ -238,7 +242,7 @@ class NotebookSearcher:
|
|
|
238
242
|
) -> SearchResults:
|
|
239
243
|
"""
|
|
240
244
|
Search across workspace files.
|
|
241
|
-
|
|
245
|
+
|
|
242
246
|
Args:
|
|
243
247
|
pattern: Search pattern
|
|
244
248
|
file_patterns: File glob patterns to include (e.g., ["*.py", "*.ipynb"])
|
|
@@ -248,7 +252,7 @@ class NotebookSearcher:
|
|
|
248
252
|
max_results: Maximum matches to return
|
|
249
253
|
include_notebooks: Search in .ipynb files
|
|
250
254
|
include_python: Search in .py files
|
|
251
|
-
|
|
255
|
+
|
|
252
256
|
Returns:
|
|
253
257
|
SearchResults with matches
|
|
254
258
|
"""
|
|
@@ -335,14 +339,16 @@ class NotebookSearcher:
|
|
|
335
339
|
if line_idx < len(lines) - 1:
|
|
336
340
|
after = lines[line_idx + 1].strip()[:100]
|
|
337
341
|
|
|
338
|
-
matches.append(
|
|
339
|
-
|
|
340
|
-
|
|
341
|
-
|
|
342
|
-
|
|
343
|
-
|
|
344
|
-
|
|
345
|
-
|
|
342
|
+
matches.append(
|
|
343
|
+
SearchMatch(
|
|
344
|
+
file_path=rel_path,
|
|
345
|
+
line_number=line_idx + 1,
|
|
346
|
+
content=line.strip()[:200],
|
|
347
|
+
context_before=before,
|
|
348
|
+
context_after=after,
|
|
349
|
+
match_type="line",
|
|
350
|
+
)
|
|
351
|
+
)
|
|
346
352
|
|
|
347
353
|
if len(matches) >= max_results:
|
|
348
354
|
break
|
|
@@ -360,14 +366,14 @@ class NotebookSearcher:
|
|
|
360
366
|
) -> List[Dict[str, Any]]:
|
|
361
367
|
"""
|
|
362
368
|
Search cells in the current notebook.
|
|
363
|
-
|
|
369
|
+
|
|
364
370
|
Convenience method for quick cell search in active notebook.
|
|
365
|
-
|
|
371
|
+
|
|
366
372
|
Args:
|
|
367
373
|
notebook_path: Current notebook path
|
|
368
374
|
pattern: Search pattern
|
|
369
375
|
cell_type: Optional cell type filter
|
|
370
|
-
|
|
376
|
+
|
|
371
377
|
Returns:
|
|
372
378
|
List of matching cells with their indices and content
|
|
373
379
|
"""
|
|
@@ -390,22 +396,24 @@ class NotebookSearcher:
|
|
|
390
396
|
"matching_lines": [],
|
|
391
397
|
}
|
|
392
398
|
|
|
393
|
-
cells_by_index[idx]["matching_lines"].append(
|
|
394
|
-
|
|
395
|
-
|
|
396
|
-
|
|
399
|
+
cells_by_index[idx]["matching_lines"].append(
|
|
400
|
+
{
|
|
401
|
+
"line_number": match.line_number,
|
|
402
|
+
"content": match.content,
|
|
403
|
+
}
|
|
404
|
+
)
|
|
397
405
|
|
|
398
406
|
return list(cells_by_index.values())
|
|
399
407
|
|
|
400
408
|
def get_notebook_structure(self, notebook_path: str) -> Dict[str, Any]:
|
|
401
409
|
"""
|
|
402
410
|
Get structural overview of a notebook.
|
|
403
|
-
|
|
411
|
+
|
|
404
412
|
Returns information about cells, imports, and defined symbols.
|
|
405
|
-
|
|
413
|
+
|
|
406
414
|
Args:
|
|
407
415
|
notebook_path: Path to notebook
|
|
408
|
-
|
|
416
|
+
|
|
409
417
|
Returns:
|
|
410
418
|
Dict with notebook structure information
|
|
411
419
|
"""
|
|
@@ -420,9 +428,9 @@ class NotebookSearcher:
|
|
|
420
428
|
imports = set()
|
|
421
429
|
definitions = set()
|
|
422
430
|
|
|
423
|
-
import_pattern = re.compile(r
|
|
424
|
-
def_pattern = re.compile(r
|
|
425
|
-
var_pattern = re.compile(r
|
|
431
|
+
import_pattern = re.compile(r"^(?:import|from)\s+([\w.]+)", re.MULTILINE)
|
|
432
|
+
def_pattern = re.compile(r"^(?:def|class)\s+(\w+)", re.MULTILINE)
|
|
433
|
+
var_pattern = re.compile(r"^(\w+)\s*=", re.MULTILINE)
|
|
426
434
|
|
|
427
435
|
for idx, cell in enumerate(cells):
|
|
428
436
|
cell_type = cell.get("cell_type", "code")
|
|
@@ -0,0 +1,66 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Human-in-the-Loop (HITL) configuration for LangChain agent.
|
|
3
|
+
|
|
4
|
+
Defines which tools require user approval and their approval settings.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
from typing import Any, Dict
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
def get_hitl_interrupt_config() -> Dict[str, Any]:
|
|
11
|
+
"""Return HITL interrupt config for client-side tool execution.
|
|
12
|
+
|
|
13
|
+
Returns:
|
|
14
|
+
Dictionary mapping tool names to their HITL configuration:
|
|
15
|
+
- False: No approval needed, execute immediately
|
|
16
|
+
- Dict with allowed_decisions and description: Require approval
|
|
17
|
+
|
|
18
|
+
The allowed_decisions can include:
|
|
19
|
+
- "approve": Execute the tool as requested
|
|
20
|
+
- "edit": Modify the tool arguments before execution
|
|
21
|
+
- "reject": Cancel the tool execution
|
|
22
|
+
"""
|
|
23
|
+
return {
|
|
24
|
+
# Require approval before executing code
|
|
25
|
+
"jupyter_cell_tool": {
|
|
26
|
+
"allowed_decisions": ["approve", "edit", "reject"],
|
|
27
|
+
"description": "🔍 Code execution requires approval",
|
|
28
|
+
},
|
|
29
|
+
# Safe operations - no approval needed
|
|
30
|
+
"markdown_tool": False,
|
|
31
|
+
"read_file_tool": {
|
|
32
|
+
"allowed_decisions": ["approve", "edit"],
|
|
33
|
+
"description": "📄 파일 읽기 실행 중",
|
|
34
|
+
},
|
|
35
|
+
"list_files_tool": {
|
|
36
|
+
"allowed_decisions": ["approve", "edit"],
|
|
37
|
+
"description": "📂 파일 목록 조회 중",
|
|
38
|
+
},
|
|
39
|
+
"write_todos": False, # Todo updates don't need approval
|
|
40
|
+
# Search tools need HITL for client-side execution (auto-approved by frontend)
|
|
41
|
+
# Uses 'edit' decision to pass execution_result back
|
|
42
|
+
"search_workspace_tool": {
|
|
43
|
+
"allowed_decisions": ["approve", "edit"],
|
|
44
|
+
"description": "🔍 Searching workspace files",
|
|
45
|
+
},
|
|
46
|
+
"search_notebook_cells_tool": {
|
|
47
|
+
"allowed_decisions": ["approve", "edit"],
|
|
48
|
+
"description": "🔍 Searching notebook cells",
|
|
49
|
+
},
|
|
50
|
+
# Resource check tool for client-side execution (auto-approved by frontend)
|
|
51
|
+
"check_resource_tool": {
|
|
52
|
+
"allowed_decisions": ["approve", "edit"],
|
|
53
|
+
"description": "📊 Checking system resources",
|
|
54
|
+
},
|
|
55
|
+
"execute_command_tool": {
|
|
56
|
+
"allowed_decisions": ["approve", "edit", "reject"],
|
|
57
|
+
"description": "🖥️ Shell command requires approval",
|
|
58
|
+
},
|
|
59
|
+
# File write requires approval
|
|
60
|
+
"write_file_tool": {
|
|
61
|
+
"allowed_decisions": ["approve", "edit", "reject"],
|
|
62
|
+
"description": "⚠️ File write requires approval",
|
|
63
|
+
},
|
|
64
|
+
# Final answer doesn't need approval
|
|
65
|
+
"final_answer_tool": False,
|
|
66
|
+
}
|
|
@@ -0,0 +1,166 @@
|
|
|
1
|
+
"""
|
|
2
|
+
LLM Factory for LangChain agent.
|
|
3
|
+
|
|
4
|
+
Provides functions to create LangChain LLM instances from configuration.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
import logging
|
|
8
|
+
from typing import Any, Dict
|
|
9
|
+
|
|
10
|
+
from agent_server.langchain.logging_utils import LLMTraceLogger
|
|
11
|
+
|
|
12
|
+
logger = logging.getLogger(__name__)
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
def create_llm(llm_config: Dict[str, Any]):
|
|
16
|
+
"""Create LangChain LLM from config.
|
|
17
|
+
|
|
18
|
+
Args:
|
|
19
|
+
llm_config: Configuration dictionary containing:
|
|
20
|
+
- provider: "gemini", "openai", or "vllm"
|
|
21
|
+
- gemini: {apiKey, model} for Gemini
|
|
22
|
+
- openai: {apiKey, model} for OpenAI
|
|
23
|
+
- vllm: {endpoint, model, apiKey} for vLLM
|
|
24
|
+
|
|
25
|
+
Returns:
|
|
26
|
+
Configured LangChain LLM instance
|
|
27
|
+
|
|
28
|
+
Raises:
|
|
29
|
+
ValueError: If provider is unsupported or API key is missing
|
|
30
|
+
"""
|
|
31
|
+
provider = llm_config.get("provider", "gemini")
|
|
32
|
+
callbacks = [LLMTraceLogger()]
|
|
33
|
+
|
|
34
|
+
if provider == "gemini":
|
|
35
|
+
return _create_gemini_llm(llm_config, callbacks)
|
|
36
|
+
elif provider == "openai":
|
|
37
|
+
return _create_openai_llm(llm_config, callbacks)
|
|
38
|
+
elif provider == "vllm":
|
|
39
|
+
return _create_vllm_llm(llm_config, callbacks)
|
|
40
|
+
else:
|
|
41
|
+
raise ValueError(f"Unsupported LLM provider: {provider}")
|
|
42
|
+
|
|
43
|
+
|
|
44
|
+
def _create_gemini_llm(llm_config: Dict[str, Any], callbacks):
|
|
45
|
+
"""Create Gemini LLM instance."""
|
|
46
|
+
from langchain_google_genai import ChatGoogleGenerativeAI
|
|
47
|
+
|
|
48
|
+
gemini_config = llm_config.get("gemini", {})
|
|
49
|
+
api_key = gemini_config.get("apiKey")
|
|
50
|
+
model = gemini_config.get("model", "gemini-2.5-pro")
|
|
51
|
+
|
|
52
|
+
if not api_key:
|
|
53
|
+
raise ValueError("Gemini API key not configured")
|
|
54
|
+
|
|
55
|
+
logger.info(f"Creating Gemini LLM with model: {model}")
|
|
56
|
+
|
|
57
|
+
# Gemini 2.5 Flash has issues with tool calling in LangChain
|
|
58
|
+
# Use convert_system_message_to_human for better compatibility
|
|
59
|
+
return ChatGoogleGenerativeAI(
|
|
60
|
+
model=model,
|
|
61
|
+
google_api_key=api_key,
|
|
62
|
+
temperature=0.0,
|
|
63
|
+
max_output_tokens=8192,
|
|
64
|
+
convert_system_message_to_human=True, # Better tool calling support
|
|
65
|
+
callbacks=callbacks,
|
|
66
|
+
)
|
|
67
|
+
|
|
68
|
+
|
|
69
|
+
def _create_openai_llm(llm_config: Dict[str, Any], callbacks):
|
|
70
|
+
"""Create OpenAI LLM instance."""
|
|
71
|
+
from langchain_openai import ChatOpenAI
|
|
72
|
+
|
|
73
|
+
openai_config = llm_config.get("openai", {})
|
|
74
|
+
api_key = openai_config.get("apiKey")
|
|
75
|
+
model = openai_config.get("model", "gpt-4")
|
|
76
|
+
|
|
77
|
+
if not api_key:
|
|
78
|
+
raise ValueError("OpenAI API key not configured")
|
|
79
|
+
|
|
80
|
+
logger.info(f"Creating OpenAI LLM with model: {model}")
|
|
81
|
+
|
|
82
|
+
return ChatOpenAI(
|
|
83
|
+
model=model,
|
|
84
|
+
api_key=api_key,
|
|
85
|
+
temperature=0.0,
|
|
86
|
+
max_tokens=4096,
|
|
87
|
+
callbacks=callbacks,
|
|
88
|
+
)
|
|
89
|
+
|
|
90
|
+
|
|
91
|
+
def _create_vllm_llm(llm_config: Dict[str, Any], callbacks):
|
|
92
|
+
"""Create vLLM-compatible LLM instance."""
|
|
93
|
+
from langchain_openai import ChatOpenAI
|
|
94
|
+
|
|
95
|
+
vllm_config = llm_config.get("vllm", {})
|
|
96
|
+
endpoint = vllm_config.get("endpoint", "http://localhost:8000")
|
|
97
|
+
model = vllm_config.get("model", "default")
|
|
98
|
+
api_key = vllm_config.get("apiKey", "dummy")
|
|
99
|
+
|
|
100
|
+
logger.info(f"Creating vLLM LLM with model: {model}, endpoint: {endpoint}")
|
|
101
|
+
|
|
102
|
+
return ChatOpenAI(
|
|
103
|
+
model=model,
|
|
104
|
+
api_key=api_key,
|
|
105
|
+
base_url=f"{endpoint}/v1",
|
|
106
|
+
temperature=0.0,
|
|
107
|
+
max_tokens=4096,
|
|
108
|
+
callbacks=callbacks,
|
|
109
|
+
)
|
|
110
|
+
|
|
111
|
+
|
|
112
|
+
def create_summarization_llm(llm_config: Dict[str, Any]):
|
|
113
|
+
"""Create LLM for summarization middleware.
|
|
114
|
+
|
|
115
|
+
Uses the same provider as the main LLM but with simpler configuration.
|
|
116
|
+
|
|
117
|
+
Args:
|
|
118
|
+
llm_config: Configuration dictionary
|
|
119
|
+
|
|
120
|
+
Returns:
|
|
121
|
+
LLM instance suitable for summarization, or None if unavailable
|
|
122
|
+
"""
|
|
123
|
+
provider = llm_config.get("provider", "gemini")
|
|
124
|
+
|
|
125
|
+
try:
|
|
126
|
+
if provider == "gemini":
|
|
127
|
+
from langchain_google_genai import ChatGoogleGenerativeAI
|
|
128
|
+
|
|
129
|
+
gemini_config = llm_config.get("gemini", {})
|
|
130
|
+
api_key = gemini_config.get("apiKey")
|
|
131
|
+
if api_key:
|
|
132
|
+
return ChatGoogleGenerativeAI(
|
|
133
|
+
model="gemini-2.5-flash",
|
|
134
|
+
google_api_key=api_key,
|
|
135
|
+
temperature=0.0,
|
|
136
|
+
)
|
|
137
|
+
elif provider == "openai":
|
|
138
|
+
from langchain_openai import ChatOpenAI
|
|
139
|
+
|
|
140
|
+
openai_config = llm_config.get("openai", {})
|
|
141
|
+
api_key = openai_config.get("apiKey")
|
|
142
|
+
if api_key:
|
|
143
|
+
return ChatOpenAI(
|
|
144
|
+
model="gpt-4o-mini",
|
|
145
|
+
api_key=api_key,
|
|
146
|
+
temperature=0.0,
|
|
147
|
+
)
|
|
148
|
+
elif provider == "vllm":
|
|
149
|
+
from langchain_openai import ChatOpenAI
|
|
150
|
+
|
|
151
|
+
vllm_config = llm_config.get("vllm", {})
|
|
152
|
+
endpoint = vllm_config.get("endpoint", "http://localhost:8000")
|
|
153
|
+
model = vllm_config.get("model", "default")
|
|
154
|
+
api_key = vllm_config.get("apiKey", "dummy")
|
|
155
|
+
|
|
156
|
+
return ChatOpenAI(
|
|
157
|
+
model=model,
|
|
158
|
+
api_key=api_key,
|
|
159
|
+
base_url=f"{endpoint}/v1",
|
|
160
|
+
temperature=0.0,
|
|
161
|
+
)
|
|
162
|
+
except Exception as e:
|
|
163
|
+
logger.warning(f"Failed to create summarization LLM: {e}")
|
|
164
|
+
return None
|
|
165
|
+
|
|
166
|
+
return None
|
|
@@ -0,0 +1,184 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Logging utilities for LangChain agent.
|
|
3
|
+
|
|
4
|
+
Provides helper functions for structured logging of LLM interactions,
|
|
5
|
+
messages, and middleware execution.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
import json
|
|
9
|
+
import logging
|
|
10
|
+
from functools import wraps
|
|
11
|
+
from typing import Any, Dict
|
|
12
|
+
|
|
13
|
+
from langchain_core.callbacks import BaseCallbackHandler
|
|
14
|
+
|
|
15
|
+
logger = logging.getLogger(__name__)
|
|
16
|
+
|
|
17
|
+
LOG_SEPARATOR = "=" * 96
|
|
18
|
+
LOG_SUBSECTION = "-" * 96
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
def _format_system_prompt_for_log(messages) -> tuple[int, int, str]:
|
|
22
|
+
"""Extract and format system messages for logging."""
|
|
23
|
+
from langchain_core.messages import SystemMessage
|
|
24
|
+
|
|
25
|
+
system_contents = [
|
|
26
|
+
str(getattr(msg, "content", ""))
|
|
27
|
+
for msg in messages
|
|
28
|
+
if isinstance(msg, SystemMessage)
|
|
29
|
+
]
|
|
30
|
+
combined = "\n\n".join(system_contents)
|
|
31
|
+
return len(system_contents), len(combined), combined
|
|
32
|
+
|
|
33
|
+
|
|
34
|
+
def _pretty_json(value: Any) -> str:
|
|
35
|
+
"""Format value as pretty-printed JSON."""
|
|
36
|
+
try:
|
|
37
|
+
return json.dumps(value, indent=2, ensure_ascii=False, sort_keys=True)
|
|
38
|
+
except TypeError:
|
|
39
|
+
return json.dumps(str(value), indent=2, ensure_ascii=False)
|
|
40
|
+
|
|
41
|
+
|
|
42
|
+
def _serialize_message(message) -> Dict[str, Any]:
|
|
43
|
+
"""Serialize a LangChain message to a dictionary."""
|
|
44
|
+
data: Dict[str, Any] = {"type": message.__class__.__name__}
|
|
45
|
+
content = getattr(message, "content", None)
|
|
46
|
+
if content is not None:
|
|
47
|
+
data["content"] = content
|
|
48
|
+
name = getattr(message, "name", None)
|
|
49
|
+
if name:
|
|
50
|
+
data["name"] = name
|
|
51
|
+
tool_call_id = getattr(message, "tool_call_id", None)
|
|
52
|
+
if tool_call_id:
|
|
53
|
+
data["tool_call_id"] = tool_call_id
|
|
54
|
+
tool_calls = getattr(message, "tool_calls", None)
|
|
55
|
+
if tool_calls:
|
|
56
|
+
data["tool_calls"] = tool_calls
|
|
57
|
+
additional_kwargs = getattr(message, "additional_kwargs", None)
|
|
58
|
+
if additional_kwargs:
|
|
59
|
+
data["additional_kwargs"] = additional_kwargs
|
|
60
|
+
response_metadata = getattr(message, "response_metadata", None)
|
|
61
|
+
if response_metadata:
|
|
62
|
+
data["response_metadata"] = response_metadata
|
|
63
|
+
return data
|
|
64
|
+
|
|
65
|
+
|
|
66
|
+
def _format_messages_block(title: str, messages) -> str:
|
|
67
|
+
"""Format a list of messages as a log block."""
|
|
68
|
+
lines = [LOG_SEPARATOR, title, LOG_SEPARATOR]
|
|
69
|
+
if not messages:
|
|
70
|
+
lines.append("<empty>")
|
|
71
|
+
lines.append(LOG_SEPARATOR)
|
|
72
|
+
return "\n".join(lines)
|
|
73
|
+
|
|
74
|
+
for idx, message in enumerate(messages):
|
|
75
|
+
lines.append(f"[{idx}] {message.__class__.__name__}")
|
|
76
|
+
lines.append(_pretty_json(_serialize_message(message)))
|
|
77
|
+
if idx < len(messages) - 1:
|
|
78
|
+
lines.append(LOG_SUBSECTION)
|
|
79
|
+
lines.append(LOG_SEPARATOR)
|
|
80
|
+
return "\n".join(lines)
|
|
81
|
+
|
|
82
|
+
|
|
83
|
+
def _format_json_block(title: str, payload: Any) -> str:
|
|
84
|
+
"""Format a JSON payload as a log block."""
|
|
85
|
+
return "\n".join(
|
|
86
|
+
[
|
|
87
|
+
LOG_SEPARATOR,
|
|
88
|
+
title,
|
|
89
|
+
LOG_SEPARATOR,
|
|
90
|
+
_pretty_json(payload),
|
|
91
|
+
LOG_SEPARATOR,
|
|
92
|
+
]
|
|
93
|
+
)
|
|
94
|
+
|
|
95
|
+
|
|
96
|
+
def _format_middleware_marker(name: str, stage: str) -> str:
|
|
97
|
+
"""Format a middleware execution marker."""
|
|
98
|
+
return "\n".join([LOG_SEPARATOR, f"MIDDLEWARE {stage}: {name}", LOG_SEPARATOR])
|
|
99
|
+
|
|
100
|
+
|
|
101
|
+
def _with_middleware_logging(name: str):
|
|
102
|
+
"""Decorator to add logging around middleware execution."""
|
|
103
|
+
|
|
104
|
+
def decorator(func):
|
|
105
|
+
@wraps(func)
|
|
106
|
+
def wrapped(request, handler):
|
|
107
|
+
logger.info("%s", _format_middleware_marker(name, "START"))
|
|
108
|
+
response = func(request, handler)
|
|
109
|
+
logger.info("%s", _format_middleware_marker(name, "END"))
|
|
110
|
+
return response
|
|
111
|
+
|
|
112
|
+
return wrapped
|
|
113
|
+
|
|
114
|
+
return decorator
|
|
115
|
+
|
|
116
|
+
|
|
117
|
+
class LLMTraceLogger(BaseCallbackHandler):
|
|
118
|
+
"""Log prompts, responses, tool calls, and tool messages."""
|
|
119
|
+
|
|
120
|
+
def _normalize_batches(self, messages):
|
|
121
|
+
if not messages:
|
|
122
|
+
return []
|
|
123
|
+
if isinstance(messages[0], (list, tuple)):
|
|
124
|
+
return messages
|
|
125
|
+
return [messages]
|
|
126
|
+
|
|
127
|
+
def _log_prompt_batches(self, title: str, messages) -> None:
|
|
128
|
+
for batch_idx, batch in enumerate(self._normalize_batches(messages)):
|
|
129
|
+
header = f"{title} (batch={batch_idx}, messages={len(batch)})"
|
|
130
|
+
logger.info("%s", _format_messages_block(header, batch))
|
|
131
|
+
|
|
132
|
+
tool_messages = [
|
|
133
|
+
msg
|
|
134
|
+
for msg in batch
|
|
135
|
+
if getattr(msg, "type", "") == "tool"
|
|
136
|
+
or msg.__class__.__name__ == "ToolMessage"
|
|
137
|
+
]
|
|
138
|
+
if tool_messages:
|
|
139
|
+
tool_header = f"{title} TOOL MESSAGES (batch={batch_idx})"
|
|
140
|
+
logger.info("%s", _format_messages_block(tool_header, tool_messages))
|
|
141
|
+
|
|
142
|
+
def on_chat_model_start(self, serialized, messages, **kwargs) -> None:
|
|
143
|
+
if not messages:
|
|
144
|
+
logger.info(
|
|
145
|
+
"%s",
|
|
146
|
+
_format_messages_block("AGENT -> LLM PROMPT (<none>)", []),
|
|
147
|
+
)
|
|
148
|
+
return
|
|
149
|
+
self._log_prompt_batches("AGENT -> LLM PROMPT", messages)
|
|
150
|
+
|
|
151
|
+
def on_chat_model_end(self, response, **kwargs) -> None:
|
|
152
|
+
generations = getattr(response, "generations", None) or []
|
|
153
|
+
if generations and isinstance(generations[0], list):
|
|
154
|
+
batches = generations
|
|
155
|
+
else:
|
|
156
|
+
batches = [generations]
|
|
157
|
+
|
|
158
|
+
for batch_idx, batch in enumerate(batches):
|
|
159
|
+
for gen_idx, generation in enumerate(batch):
|
|
160
|
+
message = getattr(generation, "message", None)
|
|
161
|
+
if not message:
|
|
162
|
+
continue
|
|
163
|
+
|
|
164
|
+
title = (
|
|
165
|
+
f"LLM -> AGENT RESPONSE (batch={batch_idx}, generation={gen_idx})"
|
|
166
|
+
)
|
|
167
|
+
logger.info("%s", _format_messages_block(title, [message]))
|
|
168
|
+
|
|
169
|
+
tool_calls = getattr(message, "tool_calls", None)
|
|
170
|
+
if tool_calls:
|
|
171
|
+
tool_title = (
|
|
172
|
+
"LLM -> AGENT TOOL CALLS "
|
|
173
|
+
f"(batch={batch_idx}, generation={gen_idx})"
|
|
174
|
+
)
|
|
175
|
+
logger.info("%s", _format_json_block(tool_title, tool_calls))
|
|
176
|
+
|
|
177
|
+
def on_llm_start(self, serialized, prompts, **kwargs) -> None:
|
|
178
|
+
if not prompts:
|
|
179
|
+
logger.info("%s", _format_json_block("LLM PROMPT (<none>)", ""))
|
|
180
|
+
return
|
|
181
|
+
|
|
182
|
+
for idx, prompt in enumerate(prompts):
|
|
183
|
+
title = f"LLM PROMPT (batch={idx}, length={len(prompt)})"
|
|
184
|
+
logger.info("%s", _format_json_block(title, prompt))
|