hdsp-jupyter-extension 2.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- agent_server/__init__.py +8 -0
- agent_server/core/__init__.py +92 -0
- agent_server/core/api_key_manager.py +427 -0
- agent_server/core/code_validator.py +1238 -0
- agent_server/core/context_condenser.py +308 -0
- agent_server/core/embedding_service.py +254 -0
- agent_server/core/error_classifier.py +577 -0
- agent_server/core/llm_client.py +95 -0
- agent_server/core/llm_service.py +649 -0
- agent_server/core/notebook_generator.py +274 -0
- agent_server/core/prompt_builder.py +35 -0
- agent_server/core/rag_manager.py +742 -0
- agent_server/core/reflection_engine.py +489 -0
- agent_server/core/retriever.py +248 -0
- agent_server/core/state_verifier.py +452 -0
- agent_server/core/summary_generator.py +484 -0
- agent_server/core/task_manager.py +198 -0
- agent_server/knowledge/__init__.py +9 -0
- agent_server/knowledge/watchdog_service.py +352 -0
- agent_server/main.py +160 -0
- agent_server/prompts/__init__.py +60 -0
- agent_server/prompts/file_action_prompts.py +113 -0
- agent_server/routers/__init__.py +9 -0
- agent_server/routers/agent.py +591 -0
- agent_server/routers/chat.py +188 -0
- agent_server/routers/config.py +100 -0
- agent_server/routers/file_resolver.py +293 -0
- agent_server/routers/health.py +42 -0
- agent_server/routers/rag.py +163 -0
- agent_server/schemas/__init__.py +60 -0
- hdsp_agent_core/__init__.py +158 -0
- hdsp_agent_core/factory.py +252 -0
- hdsp_agent_core/interfaces.py +203 -0
- hdsp_agent_core/knowledge/__init__.py +31 -0
- hdsp_agent_core/knowledge/chunking.py +356 -0
- hdsp_agent_core/knowledge/libraries/dask.md +188 -0
- hdsp_agent_core/knowledge/libraries/matplotlib.md +164 -0
- hdsp_agent_core/knowledge/libraries/polars.md +68 -0
- hdsp_agent_core/knowledge/loader.py +337 -0
- hdsp_agent_core/llm/__init__.py +13 -0
- hdsp_agent_core/llm/service.py +556 -0
- hdsp_agent_core/managers/__init__.py +22 -0
- hdsp_agent_core/managers/config_manager.py +133 -0
- hdsp_agent_core/managers/session_manager.py +251 -0
- hdsp_agent_core/models/__init__.py +115 -0
- hdsp_agent_core/models/agent.py +316 -0
- hdsp_agent_core/models/chat.py +41 -0
- hdsp_agent_core/models/common.py +95 -0
- hdsp_agent_core/models/rag.py +368 -0
- hdsp_agent_core/prompts/__init__.py +63 -0
- hdsp_agent_core/prompts/auto_agent_prompts.py +1260 -0
- hdsp_agent_core/prompts/cell_action_prompts.py +98 -0
- hdsp_agent_core/services/__init__.py +18 -0
- hdsp_agent_core/services/agent_service.py +438 -0
- hdsp_agent_core/services/chat_service.py +205 -0
- hdsp_agent_core/services/rag_service.py +262 -0
- hdsp_agent_core/tests/__init__.py +1 -0
- hdsp_agent_core/tests/conftest.py +102 -0
- hdsp_agent_core/tests/test_factory.py +251 -0
- hdsp_agent_core/tests/test_services.py +326 -0
- hdsp_jupyter_extension-2.0.0.data/data/etc/jupyter/jupyter_server_config.d/hdsp_jupyter_extension.json +7 -0
- hdsp_jupyter_extension-2.0.0.data/data/share/jupyter/labextensions/hdsp-agent/build_log.json +738 -0
- hdsp_jupyter_extension-2.0.0.data/data/share/jupyter/labextensions/hdsp-agent/install.json +5 -0
- hdsp_jupyter_extension-2.0.0.data/data/share/jupyter/labextensions/hdsp-agent/package.json +134 -0
- hdsp_jupyter_extension-2.0.0.data/data/share/jupyter/labextensions/hdsp-agent/static/frontend_styles_index_js.2607ff74c74acfa83158.js +4369 -0
- hdsp_jupyter_extension-2.0.0.data/data/share/jupyter/labextensions/hdsp-agent/static/frontend_styles_index_js.2607ff74c74acfa83158.js.map +1 -0
- hdsp_jupyter_extension-2.0.0.data/data/share/jupyter/labextensions/hdsp-agent/static/lib_index_js.622c1a5918b3aafb2315.js +12496 -0
- hdsp_jupyter_extension-2.0.0.data/data/share/jupyter/labextensions/hdsp-agent/static/lib_index_js.622c1a5918b3aafb2315.js.map +1 -0
- hdsp_jupyter_extension-2.0.0.data/data/share/jupyter/labextensions/hdsp-agent/static/node_modules_emotion_use-insertion-effect-with-fallbacks_dist_emotion-use-insertion-effect-wi-3ba6b80.c095373419d05e6f141a.js +94 -0
- hdsp_jupyter_extension-2.0.0.data/data/share/jupyter/labextensions/hdsp-agent/static/node_modules_emotion_use-insertion-effect-with-fallbacks_dist_emotion-use-insertion-effect-wi-3ba6b80.c095373419d05e6f141a.js.map +1 -0
- hdsp_jupyter_extension-2.0.0.data/data/share/jupyter/labextensions/hdsp-agent/static/node_modules_emotion_use-insertion-effect-with-fallbacks_dist_emotion-use-insertion-effect-wi-3ba6b81.61e75fb98ecff46cf836.js +94 -0
- hdsp_jupyter_extension-2.0.0.data/data/share/jupyter/labextensions/hdsp-agent/static/node_modules_emotion_use-insertion-effect-with-fallbacks_dist_emotion-use-insertion-effect-wi-3ba6b81.61e75fb98ecff46cf836.js.map +1 -0
- hdsp_jupyter_extension-2.0.0.data/data/share/jupyter/labextensions/hdsp-agent/static/remoteEntry.dae97cde171e13b8c834.js +623 -0
- hdsp_jupyter_extension-2.0.0.data/data/share/jupyter/labextensions/hdsp-agent/static/remoteEntry.dae97cde171e13b8c834.js.map +1 -0
- hdsp_jupyter_extension-2.0.0.data/data/share/jupyter/labextensions/hdsp-agent/static/style.js +4 -0
- hdsp_jupyter_extension-2.0.0.data/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_babel_runtime_helpers_esm_extends_js-node_modules_emotion_serialize_dist-051195.e2553aab0c3963b83dd7.js +507 -0
- hdsp_jupyter_extension-2.0.0.data/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_babel_runtime_helpers_esm_extends_js-node_modules_emotion_serialize_dist-051195.e2553aab0c3963b83dd7.js.map +1 -0
- hdsp_jupyter_extension-2.0.0.data/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_emotion_cache_dist_emotion-cache_browser_development_esm_js-node_modules-782ee5.d9ed8645ef1d311657d8.js +2071 -0
- hdsp_jupyter_extension-2.0.0.data/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_emotion_cache_dist_emotion-cache_browser_development_esm_js-node_modules-782ee5.d9ed8645ef1d311657d8.js.map +1 -0
- hdsp_jupyter_extension-2.0.0.data/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_emotion_react_dist_emotion-react_browser_development_esm_js.36b49c71871f98d4f549.js +1059 -0
- hdsp_jupyter_extension-2.0.0.data/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_emotion_react_dist_emotion-react_browser_development_esm_js.36b49c71871f98d4f549.js.map +1 -0
- hdsp_jupyter_extension-2.0.0.data/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_emotion_styled_dist_emotion-styled_browser_development_esm_js.661fb5836f4978a7c6e1.js +376 -0
- hdsp_jupyter_extension-2.0.0.data/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_emotion_styled_dist_emotion-styled_browser_development_esm_js.661fb5836f4978a7c6e1.js.map +1 -0
- hdsp_jupyter_extension-2.0.0.data/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_mui_material_index_js.985697e0162d8d088ca2.js +60336 -0
- hdsp_jupyter_extension-2.0.0.data/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_mui_material_index_js.985697e0162d8d088ca2.js.map +1 -0
- hdsp_jupyter_extension-2.0.0.data/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_mui_material_utils_createSvgIcon_js.2e13df4ea61496e95d45.js +7132 -0
- hdsp_jupyter_extension-2.0.0.data/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_mui_material_utils_createSvgIcon_js.2e13df4ea61496e95d45.js.map +1 -0
- hdsp_jupyter_extension-2.0.0.dist-info/METADATA +152 -0
- hdsp_jupyter_extension-2.0.0.dist-info/RECORD +121 -0
- hdsp_jupyter_extension-2.0.0.dist-info/WHEEL +4 -0
- hdsp_jupyter_extension-2.0.0.dist-info/licenses/LICENSE +21 -0
- jupyter_ext/__init__.py +233 -0
- jupyter_ext/_version.py +4 -0
- jupyter_ext/config.py +111 -0
- jupyter_ext/etc/jupyter/jupyter_server_config.d/hdsp_jupyter_extension.json +7 -0
- jupyter_ext/handlers.py +632 -0
- jupyter_ext/labextension/build_log.json +738 -0
- jupyter_ext/labextension/package.json +134 -0
- jupyter_ext/labextension/static/frontend_styles_index_js.2607ff74c74acfa83158.js +4369 -0
- jupyter_ext/labextension/static/frontend_styles_index_js.2607ff74c74acfa83158.js.map +1 -0
- jupyter_ext/labextension/static/lib_index_js.622c1a5918b3aafb2315.js +12496 -0
- jupyter_ext/labextension/static/lib_index_js.622c1a5918b3aafb2315.js.map +1 -0
- jupyter_ext/labextension/static/node_modules_emotion_use-insertion-effect-with-fallbacks_dist_emotion-use-insertion-effect-wi-3ba6b80.c095373419d05e6f141a.js +94 -0
- jupyter_ext/labextension/static/node_modules_emotion_use-insertion-effect-with-fallbacks_dist_emotion-use-insertion-effect-wi-3ba6b80.c095373419d05e6f141a.js.map +1 -0
- jupyter_ext/labextension/static/node_modules_emotion_use-insertion-effect-with-fallbacks_dist_emotion-use-insertion-effect-wi-3ba6b81.61e75fb98ecff46cf836.js +94 -0
- jupyter_ext/labextension/static/node_modules_emotion_use-insertion-effect-with-fallbacks_dist_emotion-use-insertion-effect-wi-3ba6b81.61e75fb98ecff46cf836.js.map +1 -0
- jupyter_ext/labextension/static/remoteEntry.dae97cde171e13b8c834.js +623 -0
- jupyter_ext/labextension/static/remoteEntry.dae97cde171e13b8c834.js.map +1 -0
- jupyter_ext/labextension/static/style.js +4 -0
- jupyter_ext/labextension/static/vendors-node_modules_babel_runtime_helpers_esm_extends_js-node_modules_emotion_serialize_dist-051195.e2553aab0c3963b83dd7.js +507 -0
- jupyter_ext/labextension/static/vendors-node_modules_babel_runtime_helpers_esm_extends_js-node_modules_emotion_serialize_dist-051195.e2553aab0c3963b83dd7.js.map +1 -0
- jupyter_ext/labextension/static/vendors-node_modules_emotion_cache_dist_emotion-cache_browser_development_esm_js-node_modules-782ee5.d9ed8645ef1d311657d8.js +2071 -0
- jupyter_ext/labextension/static/vendors-node_modules_emotion_cache_dist_emotion-cache_browser_development_esm_js-node_modules-782ee5.d9ed8645ef1d311657d8.js.map +1 -0
- jupyter_ext/labextension/static/vendors-node_modules_emotion_react_dist_emotion-react_browser_development_esm_js.36b49c71871f98d4f549.js +1059 -0
- jupyter_ext/labextension/static/vendors-node_modules_emotion_react_dist_emotion-react_browser_development_esm_js.36b49c71871f98d4f549.js.map +1 -0
- jupyter_ext/labextension/static/vendors-node_modules_emotion_styled_dist_emotion-styled_browser_development_esm_js.661fb5836f4978a7c6e1.js +376 -0
- jupyter_ext/labextension/static/vendors-node_modules_emotion_styled_dist_emotion-styled_browser_development_esm_js.661fb5836f4978a7c6e1.js.map +1 -0
- jupyter_ext/labextension/static/vendors-node_modules_mui_material_index_js.985697e0162d8d088ca2.js +60336 -0
- jupyter_ext/labextension/static/vendors-node_modules_mui_material_index_js.985697e0162d8d088ca2.js.map +1 -0
- jupyter_ext/labextension/static/vendors-node_modules_mui_material_utils_createSvgIcon_js.2e13df4ea61496e95d45.js +7132 -0
- jupyter_ext/labextension/static/vendors-node_modules_mui_material_utils_createSvgIcon_js.2e13df4ea61496e95d45.js.map +1 -0
|
@@ -0,0 +1,591 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Agent Router - Core agent functionality endpoints
|
|
3
|
+
|
|
4
|
+
Handles plan generation, refinement, replanning, and state verification.
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
import json
|
|
8
|
+
import logging
|
|
9
|
+
import re
|
|
10
|
+
from typing import Any, Dict, List
|
|
11
|
+
|
|
12
|
+
from fastapi import APIRouter, HTTPException
|
|
13
|
+
from hdsp_agent_core.knowledge.loader import get_knowledge_base, get_library_detector
|
|
14
|
+
from hdsp_agent_core.managers.config_manager import ConfigManager
|
|
15
|
+
from hdsp_agent_core.models.agent import (
|
|
16
|
+
PlanRequest,
|
|
17
|
+
PlanResponse,
|
|
18
|
+
RefineRequest,
|
|
19
|
+
RefineResponse,
|
|
20
|
+
ReflectRequest,
|
|
21
|
+
ReflectResponse,
|
|
22
|
+
ReplanRequest,
|
|
23
|
+
ReplanResponse,
|
|
24
|
+
ReportExecutionRequest,
|
|
25
|
+
ReportExecutionResponse,
|
|
26
|
+
ValidateRequest,
|
|
27
|
+
ValidateResponse,
|
|
28
|
+
VerifyStateRequest,
|
|
29
|
+
VerifyStateResponse,
|
|
30
|
+
)
|
|
31
|
+
from hdsp_agent_core.prompts.auto_agent_prompts import (
|
|
32
|
+
format_plan_prompt,
|
|
33
|
+
format_refine_prompt,
|
|
34
|
+
format_reflection_prompt,
|
|
35
|
+
)
|
|
36
|
+
|
|
37
|
+
from agent_server.core.code_validator import CodeValidator
|
|
38
|
+
from agent_server.core.error_classifier import get_error_classifier
|
|
39
|
+
from agent_server.core.llm_service import LLMService
|
|
40
|
+
from agent_server.core.rag_manager import get_rag_manager
|
|
41
|
+
from agent_server.core.state_verifier import get_state_verifier
|
|
42
|
+
|
|
43
|
+
router = APIRouter()
|
|
44
|
+
logger = logging.getLogger(__name__)
|
|
45
|
+
|
|
46
|
+
|
|
47
|
+
# ============ Helper Functions ============
|
|
48
|
+
|
|
49
|
+
|
|
50
|
+
def _get_config() -> Dict[str, Any]:
|
|
51
|
+
"""Get current configuration (fallback only)"""
|
|
52
|
+
return ConfigManager.get_instance().get_config()
|
|
53
|
+
|
|
54
|
+
|
|
55
|
+
def _build_llm_config(llm_config) -> Dict[str, Any]:
|
|
56
|
+
"""
|
|
57
|
+
Build LLM config dict from client-provided LLMConfig.
|
|
58
|
+
Falls back to server config if not provided.
|
|
59
|
+
"""
|
|
60
|
+
if llm_config is None:
|
|
61
|
+
# Fallback to server config (backward compatibility)
|
|
62
|
+
return _get_config()
|
|
63
|
+
|
|
64
|
+
# Build config from client-provided data
|
|
65
|
+
config = {"provider": llm_config.provider}
|
|
66
|
+
|
|
67
|
+
if llm_config.gemini:
|
|
68
|
+
config["gemini"] = {
|
|
69
|
+
"apiKey": llm_config.gemini.apiKey,
|
|
70
|
+
"model": llm_config.gemini.model,
|
|
71
|
+
}
|
|
72
|
+
|
|
73
|
+
if llm_config.openai:
|
|
74
|
+
config["openai"] = {
|
|
75
|
+
"apiKey": llm_config.openai.apiKey,
|
|
76
|
+
"model": llm_config.openai.model,
|
|
77
|
+
}
|
|
78
|
+
|
|
79
|
+
if llm_config.vllm:
|
|
80
|
+
config["vllm"] = {
|
|
81
|
+
"endpoint": llm_config.vllm.endpoint,
|
|
82
|
+
"apiKey": llm_config.vllm.apiKey,
|
|
83
|
+
"model": llm_config.vllm.model,
|
|
84
|
+
}
|
|
85
|
+
|
|
86
|
+
return config
|
|
87
|
+
|
|
88
|
+
|
|
89
|
+
async def _call_llm(prompt: str, llm_config=None) -> str:
|
|
90
|
+
"""Call LLM with prompt using client-provided config"""
|
|
91
|
+
config = _build_llm_config(llm_config)
|
|
92
|
+
llm_service = LLMService(config)
|
|
93
|
+
return await llm_service.generate_response(prompt)
|
|
94
|
+
|
|
95
|
+
|
|
96
|
+
def _parse_json_response(response: str) -> Dict[str, Any]:
|
|
97
|
+
"""Extract JSON from LLM response"""
|
|
98
|
+
# Try direct JSON parsing first
|
|
99
|
+
try:
|
|
100
|
+
return json.loads(response)
|
|
101
|
+
except json.JSONDecodeError:
|
|
102
|
+
pass
|
|
103
|
+
|
|
104
|
+
# Try extracting from markdown code blocks
|
|
105
|
+
json_patterns = [
|
|
106
|
+
r"```json\s*([\s\S]*?)\s*```",
|
|
107
|
+
r"```\s*([\s\S]*?)\s*```",
|
|
108
|
+
r"\{[\s\S]*\}",
|
|
109
|
+
]
|
|
110
|
+
|
|
111
|
+
for pattern in json_patterns:
|
|
112
|
+
matches = re.findall(pattern, response)
|
|
113
|
+
for match in matches:
|
|
114
|
+
try:
|
|
115
|
+
return json.loads(match)
|
|
116
|
+
except json.JSONDecodeError:
|
|
117
|
+
continue
|
|
118
|
+
|
|
119
|
+
return {}
|
|
120
|
+
|
|
121
|
+
|
|
122
|
+
def _sanitize_tool_calls(data: Dict[str, Any]) -> Dict[str, Any]:
|
|
123
|
+
"""Remove markdown code blocks from tool call code parameters"""
|
|
124
|
+
|
|
125
|
+
def clean_code(code: str) -> str:
|
|
126
|
+
if not code:
|
|
127
|
+
return code
|
|
128
|
+
# Remove ```python ... ``` wrapper
|
|
129
|
+
code = re.sub(r"^```(?:python)?\s*\n?", "", code)
|
|
130
|
+
code = re.sub(r"\n?```\s*$", "", code)
|
|
131
|
+
return code.strip()
|
|
132
|
+
|
|
133
|
+
if "plan" in data and "steps" in data["plan"]:
|
|
134
|
+
for step in data["plan"]["steps"]:
|
|
135
|
+
for tc in step.get("toolCalls", []):
|
|
136
|
+
if tc.get("tool") == "jupyter_cell":
|
|
137
|
+
params = tc.get("parameters", {})
|
|
138
|
+
if "code" in params:
|
|
139
|
+
params["code"] = clean_code(params["code"])
|
|
140
|
+
|
|
141
|
+
if "toolCalls" in data:
|
|
142
|
+
for tc in data["toolCalls"]:
|
|
143
|
+
if tc.get("tool") == "jupyter_cell":
|
|
144
|
+
params = tc.get("parameters", {})
|
|
145
|
+
if "code" in params:
|
|
146
|
+
params["code"] = clean_code(params["code"])
|
|
147
|
+
|
|
148
|
+
return data
|
|
149
|
+
|
|
150
|
+
|
|
151
|
+
def _detect_required_libraries(
|
|
152
|
+
request: str, imported_libraries: List[str]
|
|
153
|
+
) -> List[str]:
|
|
154
|
+
"""
|
|
155
|
+
Deterministic library detection (no LLM call).
|
|
156
|
+
Detects libraries needed based on keywords and patterns.
|
|
157
|
+
"""
|
|
158
|
+
knowledge_base = get_knowledge_base()
|
|
159
|
+
library_detector = get_library_detector()
|
|
160
|
+
|
|
161
|
+
available = knowledge_base.list_available_libraries()
|
|
162
|
+
if not available:
|
|
163
|
+
return []
|
|
164
|
+
|
|
165
|
+
detected = library_detector.detect(
|
|
166
|
+
request=request,
|
|
167
|
+
available_libraries=available,
|
|
168
|
+
imported_libraries=imported_libraries,
|
|
169
|
+
)
|
|
170
|
+
|
|
171
|
+
return detected
|
|
172
|
+
|
|
173
|
+
|
|
174
|
+
def _get_installed_packages() -> List[str]:
|
|
175
|
+
"""Get list of installed Python packages"""
|
|
176
|
+
import subprocess
|
|
177
|
+
|
|
178
|
+
try:
|
|
179
|
+
result = subprocess.run(
|
|
180
|
+
["pip", "list", "--format=freeze"],
|
|
181
|
+
capture_output=True,
|
|
182
|
+
text=True,
|
|
183
|
+
timeout=10,
|
|
184
|
+
)
|
|
185
|
+
packages = []
|
|
186
|
+
for line in result.stdout.strip().split("\n"):
|
|
187
|
+
if "==" in line:
|
|
188
|
+
packages.append(line.split("==")[0].lower())
|
|
189
|
+
return packages[:100] # Limit to prevent token explosion
|
|
190
|
+
except Exception:
|
|
191
|
+
return []
|
|
192
|
+
|
|
193
|
+
|
|
194
|
+
# ============ Endpoints ============
|
|
195
|
+
|
|
196
|
+
|
|
197
|
+
@router.post("/plan", response_model=PlanResponse)
|
|
198
|
+
async def generate_plan(request: PlanRequest) -> Dict[str, Any]:
|
|
199
|
+
"""
|
|
200
|
+
Generate an execution plan from a natural language request.
|
|
201
|
+
|
|
202
|
+
Takes a user request and notebook context, returns a structured plan
|
|
203
|
+
with steps and tool calls.
|
|
204
|
+
|
|
205
|
+
RAG context is automatically injected if available.
|
|
206
|
+
"""
|
|
207
|
+
logger.info(f"Plan request received: {request.request[:100]}...")
|
|
208
|
+
|
|
209
|
+
if not request.request:
|
|
210
|
+
raise HTTPException(status_code=400, detail="request is required")
|
|
211
|
+
|
|
212
|
+
try:
|
|
213
|
+
# Deterministic library detection
|
|
214
|
+
imported_libs = request.notebookContext.importedLibraries
|
|
215
|
+
detected_libraries = _detect_required_libraries(request.request, imported_libs)
|
|
216
|
+
logger.info(f"Detected libraries: {detected_libraries}")
|
|
217
|
+
|
|
218
|
+
# Get RAG context if available (with library prioritization)
|
|
219
|
+
rag_context = None
|
|
220
|
+
try:
|
|
221
|
+
rag_manager = get_rag_manager()
|
|
222
|
+
if rag_manager.is_ready:
|
|
223
|
+
# Pass detected_libraries to prioritize relevant API guides
|
|
224
|
+
rag_context = await rag_manager.get_context_for_query(
|
|
225
|
+
query=request.request, detected_libraries=detected_libraries
|
|
226
|
+
)
|
|
227
|
+
if rag_context:
|
|
228
|
+
logger.info(
|
|
229
|
+
f"RAG context injected: {len(rag_context)} chars (libs: {detected_libraries})"
|
|
230
|
+
)
|
|
231
|
+
except Exception as e:
|
|
232
|
+
logger.warning(f"RAG context retrieval failed: {e}")
|
|
233
|
+
# Continue without RAG context
|
|
234
|
+
|
|
235
|
+
# Build prompt
|
|
236
|
+
prompt = format_plan_prompt(
|
|
237
|
+
request=request.request,
|
|
238
|
+
cell_count=request.notebookContext.cellCount,
|
|
239
|
+
imported_libraries=imported_libs,
|
|
240
|
+
defined_variables=request.notebookContext.definedVariables,
|
|
241
|
+
recent_cells=request.notebookContext.recentCells,
|
|
242
|
+
available_libraries=_get_installed_packages(),
|
|
243
|
+
detected_libraries=detected_libraries,
|
|
244
|
+
rag_context=rag_context,
|
|
245
|
+
)
|
|
246
|
+
|
|
247
|
+
# Call LLM with client-provided config
|
|
248
|
+
response = await _call_llm(prompt, request.llmConfig)
|
|
249
|
+
logger.info(f"LLM response length: {len(response)}")
|
|
250
|
+
|
|
251
|
+
# Parse response
|
|
252
|
+
plan_data = _parse_json_response(response)
|
|
253
|
+
|
|
254
|
+
if not plan_data or "plan" not in plan_data:
|
|
255
|
+
raise HTTPException(
|
|
256
|
+
status_code=500,
|
|
257
|
+
detail=f"Failed to parse plan from LLM response: {response[:200]}",
|
|
258
|
+
)
|
|
259
|
+
|
|
260
|
+
# Sanitize code blocks
|
|
261
|
+
plan_data = _sanitize_tool_calls(plan_data)
|
|
262
|
+
|
|
263
|
+
# Ensure goal field exists (use user request if not provided by LLM)
|
|
264
|
+
if "goal" not in plan_data["plan"]:
|
|
265
|
+
plan_data["plan"]["goal"] = request.request
|
|
266
|
+
|
|
267
|
+
return {
|
|
268
|
+
"plan": plan_data["plan"],
|
|
269
|
+
"reasoning": plan_data.get("reasoning", ""),
|
|
270
|
+
}
|
|
271
|
+
|
|
272
|
+
except HTTPException:
|
|
273
|
+
raise
|
|
274
|
+
except Exception as e:
|
|
275
|
+
logger.error(f"Plan generation failed: {e}", exc_info=True)
|
|
276
|
+
raise HTTPException(status_code=500, detail=str(e))
|
|
277
|
+
|
|
278
|
+
|
|
279
|
+
@router.post("/refine", response_model=RefineResponse)
|
|
280
|
+
async def refine_code(request: RefineRequest) -> Dict[str, Any]:
|
|
281
|
+
"""
|
|
282
|
+
Refine code after an execution error.
|
|
283
|
+
|
|
284
|
+
Takes the failed step and error information, returns refined tool calls.
|
|
285
|
+
"""
|
|
286
|
+
logger.info(f"Refine request: attempt {request.attempt}")
|
|
287
|
+
|
|
288
|
+
if not request.error:
|
|
289
|
+
raise HTTPException(status_code=400, detail="error is required")
|
|
290
|
+
|
|
291
|
+
try:
|
|
292
|
+
# Extract previous code
|
|
293
|
+
previous_code = request.previousCode or ""
|
|
294
|
+
if not previous_code and request.step.get("toolCalls"):
|
|
295
|
+
for tc in request.step["toolCalls"]:
|
|
296
|
+
if tc.get("tool") == "jupyter_cell":
|
|
297
|
+
previous_code = tc.get("parameters", {}).get("code", "")
|
|
298
|
+
break
|
|
299
|
+
|
|
300
|
+
# Process traceback
|
|
301
|
+
traceback_data = request.error.traceback or []
|
|
302
|
+
traceback_str = (
|
|
303
|
+
"\n".join(traceback_data)
|
|
304
|
+
if isinstance(traceback_data, list)
|
|
305
|
+
else str(traceback_data)
|
|
306
|
+
)
|
|
307
|
+
|
|
308
|
+
# Build prompt
|
|
309
|
+
prompt = format_refine_prompt(
|
|
310
|
+
original_code=previous_code,
|
|
311
|
+
error_type=request.error.type,
|
|
312
|
+
error_message=request.error.message,
|
|
313
|
+
traceback=traceback_str,
|
|
314
|
+
attempt=request.attempt,
|
|
315
|
+
max_attempts=3,
|
|
316
|
+
available_libraries=_get_installed_packages(),
|
|
317
|
+
defined_variables=[],
|
|
318
|
+
)
|
|
319
|
+
|
|
320
|
+
# Call LLM with client-provided config
|
|
321
|
+
response = await _call_llm(prompt, request.llmConfig)
|
|
322
|
+
|
|
323
|
+
# Parse response
|
|
324
|
+
refine_data = _parse_json_response(response)
|
|
325
|
+
|
|
326
|
+
if not refine_data or "toolCalls" not in refine_data:
|
|
327
|
+
# Try extracting code directly
|
|
328
|
+
code_match = re.search(r"```(?:python)?\s*([\s\S]*?)\s*```", response)
|
|
329
|
+
if code_match:
|
|
330
|
+
refine_data = {
|
|
331
|
+
"toolCalls": [
|
|
332
|
+
{
|
|
333
|
+
"tool": "jupyter_cell",
|
|
334
|
+
"parameters": {"code": code_match.group(1).strip()},
|
|
335
|
+
}
|
|
336
|
+
],
|
|
337
|
+
"reasoning": "",
|
|
338
|
+
}
|
|
339
|
+
else:
|
|
340
|
+
raise HTTPException(
|
|
341
|
+
status_code=500, detail="Failed to generate refined code"
|
|
342
|
+
)
|
|
343
|
+
|
|
344
|
+
# Sanitize code blocks
|
|
345
|
+
refine_data = _sanitize_tool_calls(refine_data)
|
|
346
|
+
|
|
347
|
+
return {
|
|
348
|
+
"toolCalls": refine_data["toolCalls"],
|
|
349
|
+
"reasoning": refine_data.get("reasoning", ""),
|
|
350
|
+
}
|
|
351
|
+
|
|
352
|
+
except HTTPException:
|
|
353
|
+
raise
|
|
354
|
+
except Exception as e:
|
|
355
|
+
logger.error(f"Refine failed: {e}", exc_info=True)
|
|
356
|
+
raise HTTPException(status_code=500, detail=str(e))
|
|
357
|
+
|
|
358
|
+
|
|
359
|
+
@router.post("/replan", response_model=ReplanResponse)
|
|
360
|
+
async def replan(request: ReplanRequest) -> Dict[str, Any]:
|
|
361
|
+
"""
|
|
362
|
+
Determine how to handle a failed step.
|
|
363
|
+
|
|
364
|
+
Uses deterministic error classification first.
|
|
365
|
+
LLM fallback is triggered when:
|
|
366
|
+
1. Same error fails 2+ times (previousAttempts >= 2)
|
|
367
|
+
2. Unknown error type not in pattern mapping
|
|
368
|
+
3. Complex error (2+ exceptions in traceback)
|
|
369
|
+
"""
|
|
370
|
+
logger.info(
|
|
371
|
+
f"Replan request for step {request.currentStepIndex} "
|
|
372
|
+
f"(attempts: {request.previousAttempts}, useLlmFallback: {request.useLlmFallback})"
|
|
373
|
+
)
|
|
374
|
+
|
|
375
|
+
try:
|
|
376
|
+
classifier = get_error_classifier()
|
|
377
|
+
|
|
378
|
+
traceback_data = request.error.traceback or []
|
|
379
|
+
traceback_str = (
|
|
380
|
+
"\n".join(traceback_data)
|
|
381
|
+
if isinstance(traceback_data, list)
|
|
382
|
+
else str(traceback_data)
|
|
383
|
+
)
|
|
384
|
+
|
|
385
|
+
# Check if LLM fallback should be used
|
|
386
|
+
should_use_llm, fallback_reason = classifier.should_use_llm_fallback(
|
|
387
|
+
error_type=request.error.type,
|
|
388
|
+
traceback=traceback_str,
|
|
389
|
+
previous_attempts=request.previousAttempts,
|
|
390
|
+
)
|
|
391
|
+
|
|
392
|
+
if should_use_llm and request.useLlmFallback:
|
|
393
|
+
logger.info(f"LLM fallback triggered: {fallback_reason}")
|
|
394
|
+
# For now, still use pattern matching but log the fallback trigger
|
|
395
|
+
# TODO: Enable LLM fallback when LLM client is configured
|
|
396
|
+
analysis = classifier.classify(
|
|
397
|
+
error_type=request.error.type,
|
|
398
|
+
error_message=request.error.message,
|
|
399
|
+
traceback=traceback_str,
|
|
400
|
+
)
|
|
401
|
+
# Mark that LLM fallback was triggered but not used (no client)
|
|
402
|
+
analysis.reasoning += f" (LLM fallback 조건 충족: {fallback_reason})"
|
|
403
|
+
else:
|
|
404
|
+
# Use deterministic error classification
|
|
405
|
+
analysis = classifier.classify(
|
|
406
|
+
error_type=request.error.type,
|
|
407
|
+
error_message=request.error.message,
|
|
408
|
+
traceback=traceback_str,
|
|
409
|
+
)
|
|
410
|
+
|
|
411
|
+
return {
|
|
412
|
+
"decision": analysis.decision.value,
|
|
413
|
+
"analysis": analysis.to_dict()["analysis"],
|
|
414
|
+
"reasoning": analysis.reasoning,
|
|
415
|
+
"changes": analysis.changes,
|
|
416
|
+
"usedLlm": analysis.used_llm,
|
|
417
|
+
"confidence": analysis.confidence,
|
|
418
|
+
}
|
|
419
|
+
|
|
420
|
+
except Exception as e:
|
|
421
|
+
logger.error(f"Replan failed: {e}", exc_info=True)
|
|
422
|
+
raise HTTPException(status_code=500, detail=str(e))
|
|
423
|
+
|
|
424
|
+
|
|
425
|
+
@router.post("/verify-state", response_model=VerifyStateResponse)
|
|
426
|
+
async def verify_state(request: VerifyStateRequest) -> Dict[str, Any]:
|
|
427
|
+
"""
|
|
428
|
+
Verify execution state after a step completes.
|
|
429
|
+
|
|
430
|
+
Checks if the actual output matches expected changes.
|
|
431
|
+
"""
|
|
432
|
+
logger.info(f"Verify state for step {request.stepIndex}")
|
|
433
|
+
|
|
434
|
+
try:
|
|
435
|
+
verifier = get_state_verifier()
|
|
436
|
+
|
|
437
|
+
result = verifier.verify(
|
|
438
|
+
step_index=request.stepIndex,
|
|
439
|
+
expected_changes=request.expectedChanges,
|
|
440
|
+
actual_output=request.actualOutput,
|
|
441
|
+
execution_result=request.executionResult,
|
|
442
|
+
)
|
|
443
|
+
|
|
444
|
+
return {
|
|
445
|
+
"verified": result.verified,
|
|
446
|
+
"discrepancies": result.discrepancies,
|
|
447
|
+
"confidence": result.confidence,
|
|
448
|
+
}
|
|
449
|
+
|
|
450
|
+
except Exception as e:
|
|
451
|
+
logger.error(f"State verification failed: {e}", exc_info=True)
|
|
452
|
+
raise HTTPException(status_code=500, detail=str(e))
|
|
453
|
+
|
|
454
|
+
|
|
455
|
+
@router.post("/report-execution", response_model=ReportExecutionResponse)
|
|
456
|
+
async def report_execution(request: ReportExecutionRequest) -> Dict[str, Any]:
|
|
457
|
+
"""
|
|
458
|
+
Report tool execution results from the client.
|
|
459
|
+
|
|
460
|
+
The client (IDE extension) executes tools locally and reports
|
|
461
|
+
results back to the agent server for processing.
|
|
462
|
+
"""
|
|
463
|
+
logger.info(f"Execution report for step {request.stepId}")
|
|
464
|
+
|
|
465
|
+
# Process the execution result
|
|
466
|
+
# This could trigger state verification, update session state, etc.
|
|
467
|
+
|
|
468
|
+
return {
|
|
469
|
+
"acknowledged": True,
|
|
470
|
+
"nextAction": None, # Could return next suggested action
|
|
471
|
+
}
|
|
472
|
+
|
|
473
|
+
|
|
474
|
+
@router.post("/validate", response_model=ValidateResponse)
|
|
475
|
+
async def validate_code(request: ValidateRequest) -> Dict[str, Any]:
|
|
476
|
+
"""
|
|
477
|
+
Validate code before execution.
|
|
478
|
+
|
|
479
|
+
Performs static analysis using AST, Ruff, and Pyflakes to detect:
|
|
480
|
+
- Syntax errors
|
|
481
|
+
- Undefined variables
|
|
482
|
+
- Unused imports
|
|
483
|
+
- Code style issues
|
|
484
|
+
- Security vulnerabilities
|
|
485
|
+
|
|
486
|
+
Returns validation results with automatic fixes when possible.
|
|
487
|
+
"""
|
|
488
|
+
logger.info(f"Validate request for {len(request.code)} chars of code")
|
|
489
|
+
|
|
490
|
+
try:
|
|
491
|
+
# Build notebook context for validator
|
|
492
|
+
notebook_ctx = {}
|
|
493
|
+
if request.notebookContext:
|
|
494
|
+
notebook_ctx = {
|
|
495
|
+
"definedVariables": request.notebookContext.definedVariables,
|
|
496
|
+
"importedLibraries": request.notebookContext.importedLibraries,
|
|
497
|
+
}
|
|
498
|
+
|
|
499
|
+
# Run full validation
|
|
500
|
+
validator = CodeValidator(notebook_context=notebook_ctx)
|
|
501
|
+
result = validator.full_validation(request.code)
|
|
502
|
+
|
|
503
|
+
# Convert ValidationResult to ValidateResponse
|
|
504
|
+
return {
|
|
505
|
+
"valid": result.is_valid,
|
|
506
|
+
"issues": [issue.to_dict() for issue in result.issues],
|
|
507
|
+
"dependencies": result.dependencies.to_dict()
|
|
508
|
+
if result.dependencies
|
|
509
|
+
else None,
|
|
510
|
+
"hasErrors": result.has_errors,
|
|
511
|
+
"hasWarnings": result.has_warnings,
|
|
512
|
+
"summary": result.summary,
|
|
513
|
+
}
|
|
514
|
+
|
|
515
|
+
except Exception as e:
|
|
516
|
+
logger.error(f"Code validation failed: {e}", exc_info=True)
|
|
517
|
+
raise HTTPException(status_code=500, detail=str(e))
|
|
518
|
+
|
|
519
|
+
|
|
520
|
+
@router.post("/reflect", response_model=ReflectResponse)
|
|
521
|
+
async def reflect_on_step(request: ReflectRequest) -> Dict[str, Any]:
|
|
522
|
+
"""
|
|
523
|
+
Reflect on step execution results.
|
|
524
|
+
|
|
525
|
+
Analyzes the execution result of a single step to determine:
|
|
526
|
+
- Whether the step succeeded and met checkpoint criteria
|
|
527
|
+
- Impact on remaining steps
|
|
528
|
+
- Recommended next actions (continue/adjust/retry/replan)
|
|
529
|
+
|
|
530
|
+
This is called after each step execution to guide adaptive planning.
|
|
531
|
+
"""
|
|
532
|
+
logger.info(
|
|
533
|
+
f"Reflect request for step {request.stepNumber}: {request.stepDescription[:50]}..."
|
|
534
|
+
)
|
|
535
|
+
|
|
536
|
+
try:
|
|
537
|
+
# Build reflection prompt
|
|
538
|
+
prompt = format_reflection_prompt(
|
|
539
|
+
step_number=request.stepNumber,
|
|
540
|
+
step_description=request.stepDescription,
|
|
541
|
+
executed_code=request.executedCode,
|
|
542
|
+
execution_status=request.executionStatus,
|
|
543
|
+
execution_output=request.executionOutput,
|
|
544
|
+
error_message=request.errorMessage or "",
|
|
545
|
+
expected_outcome=request.expectedOutcome or "",
|
|
546
|
+
validation_criteria=request.validationCriteria or [],
|
|
547
|
+
remaining_steps=request.remainingSteps or [],
|
|
548
|
+
)
|
|
549
|
+
|
|
550
|
+
# Call LLM (using server config since ReflectRequest doesn't have llmConfig)
|
|
551
|
+
response = await _call_llm(prompt)
|
|
552
|
+
|
|
553
|
+
# Parse JSON response
|
|
554
|
+
reflection_data = _parse_json_response(response)
|
|
555
|
+
|
|
556
|
+
if not reflection_data:
|
|
557
|
+
# Fallback: Simple heuristic when LLM fails
|
|
558
|
+
is_success = request.executionStatus == "success"
|
|
559
|
+
return {
|
|
560
|
+
"reflection": {
|
|
561
|
+
"evaluation": {
|
|
562
|
+
"checkpoint_passed": is_success,
|
|
563
|
+
"output_matches_expected": is_success,
|
|
564
|
+
"confidence_score": 0.5,
|
|
565
|
+
},
|
|
566
|
+
"analysis": {
|
|
567
|
+
"success_factors": ["실행 완료"] if is_success else [],
|
|
568
|
+
"failure_factors": [] if is_success else ["에러 발생"],
|
|
569
|
+
"unexpected_outcomes": [],
|
|
570
|
+
},
|
|
571
|
+
"impact_on_remaining": {
|
|
572
|
+
"affected_steps": [],
|
|
573
|
+
"severity": "none" if is_success else "minor",
|
|
574
|
+
"description": "영향 없음"
|
|
575
|
+
if is_success
|
|
576
|
+
else "다음 단계 확인 필요",
|
|
577
|
+
},
|
|
578
|
+
"recommendations": {
|
|
579
|
+
"action": "continue" if is_success else "retry",
|
|
580
|
+
"adjustments": [],
|
|
581
|
+
"reasoning": "기본 휴리스틱 기반 판단",
|
|
582
|
+
},
|
|
583
|
+
}
|
|
584
|
+
}
|
|
585
|
+
|
|
586
|
+
# Return structured reflection result
|
|
587
|
+
return {"reflection": reflection_data}
|
|
588
|
+
|
|
589
|
+
except Exception as e:
|
|
590
|
+
logger.error(f"Reflection failed: {e}", exc_info=True)
|
|
591
|
+
raise HTTPException(status_code=500, detail=str(e))
|