hdsp-jupyter-extension 2.0.10__py3-none-any.whl → 2.0.11__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- agent_server/core/notebook_generator.py +4 -4
- agent_server/langchain/custom_middleware.py +95 -9
- agent_server/langchain/hitl_config.py +5 -0
- agent_server/langchain/llm_factory.py +1 -85
- agent_server/langchain/prompts.py +105 -128
- agent_server/prompts/file_action_prompts.py +8 -8
- agent_server/routers/langchain_agent.py +78 -12
- hdsp_agent_core/__init__.py +46 -47
- hdsp_agent_core/factory.py +6 -10
- hdsp_agent_core/interfaces.py +4 -2
- hdsp_agent_core/knowledge/__init__.py +5 -5
- hdsp_agent_core/knowledge/chunking.py +87 -61
- hdsp_agent_core/knowledge/loader.py +103 -101
- hdsp_agent_core/llm/service.py +192 -107
- hdsp_agent_core/managers/config_manager.py +16 -22
- hdsp_agent_core/managers/session_manager.py +5 -4
- hdsp_agent_core/models/__init__.py +12 -12
- hdsp_agent_core/models/agent.py +15 -8
- hdsp_agent_core/models/common.py +1 -2
- hdsp_agent_core/models/rag.py +48 -111
- hdsp_agent_core/prompts/__init__.py +12 -12
- hdsp_agent_core/prompts/cell_action_prompts.py +9 -7
- hdsp_agent_core/services/agent_service.py +10 -8
- hdsp_agent_core/services/chat_service.py +10 -6
- hdsp_agent_core/services/rag_service.py +3 -6
- hdsp_agent_core/tests/conftest.py +4 -1
- hdsp_agent_core/tests/test_factory.py +2 -2
- hdsp_agent_core/tests/test_services.py +12 -19
- {hdsp_jupyter_extension-2.0.10.data → hdsp_jupyter_extension-2.0.11.data}/data/share/jupyter/labextensions/hdsp-agent/build_log.json +1 -1
- {hdsp_jupyter_extension-2.0.10.data → hdsp_jupyter_extension-2.0.11.data}/data/share/jupyter/labextensions/hdsp-agent/package.json +2 -2
- hdsp_jupyter_extension-2.0.10.data/data/share/jupyter/labextensions/hdsp-agent/static/lib_index_js.dc6434bee96ab03a0539.js → hdsp_jupyter_extension-2.0.11.data/data/share/jupyter/labextensions/hdsp-agent/static/lib_index_js.58c1e128ba0b76f41f04.js +81 -77
- hdsp_jupyter_extension-2.0.11.data/data/share/jupyter/labextensions/hdsp-agent/static/lib_index_js.58c1e128ba0b76f41f04.js.map +1 -0
- hdsp_jupyter_extension-2.0.10.data/data/share/jupyter/labextensions/hdsp-agent/static/remoteEntry.4a252df3ade74efee8d6.js → hdsp_jupyter_extension-2.0.11.data/data/share/jupyter/labextensions/hdsp-agent/static/remoteEntry.9da31d1134a53b0c4af5.js +3 -3
- jupyter_ext/labextension/static/remoteEntry.4a252df3ade74efee8d6.js.map → hdsp_jupyter_extension-2.0.11.data/data/share/jupyter/labextensions/hdsp-agent/static/remoteEntry.9da31d1134a53b0c4af5.js.map +1 -1
- {hdsp_jupyter_extension-2.0.10.dist-info → hdsp_jupyter_extension-2.0.11.dist-info}/METADATA +1 -1
- {hdsp_jupyter_extension-2.0.10.dist-info → hdsp_jupyter_extension-2.0.11.dist-info}/RECORD +68 -68
- jupyter_ext/__init__.py +21 -11
- jupyter_ext/_version.py +1 -1
- jupyter_ext/handlers.py +69 -50
- jupyter_ext/labextension/build_log.json +1 -1
- jupyter_ext/labextension/package.json +2 -2
- jupyter_ext/labextension/static/{lib_index_js.dc6434bee96ab03a0539.js → lib_index_js.58c1e128ba0b76f41f04.js} +81 -77
- jupyter_ext/labextension/static/lib_index_js.58c1e128ba0b76f41f04.js.map +1 -0
- jupyter_ext/labextension/static/{remoteEntry.4a252df3ade74efee8d6.js → remoteEntry.9da31d1134a53b0c4af5.js} +3 -3
- hdsp_jupyter_extension-2.0.10.data/data/share/jupyter/labextensions/hdsp-agent/static/remoteEntry.4a252df3ade74efee8d6.js.map → jupyter_ext/labextension/static/remoteEntry.9da31d1134a53b0c4af5.js.map +1 -1
- hdsp_jupyter_extension-2.0.10.data/data/share/jupyter/labextensions/hdsp-agent/static/lib_index_js.dc6434bee96ab03a0539.js.map +0 -1
- jupyter_ext/labextension/static/lib_index_js.dc6434bee96ab03a0539.js.map +0 -1
- {hdsp_jupyter_extension-2.0.10.data → hdsp_jupyter_extension-2.0.11.data}/data/etc/jupyter/jupyter_server_config.d/hdsp_jupyter_extension.json +0 -0
- {hdsp_jupyter_extension-2.0.10.data → hdsp_jupyter_extension-2.0.11.data}/data/share/jupyter/labextensions/hdsp-agent/install.json +0 -0
- {hdsp_jupyter_extension-2.0.10.data → hdsp_jupyter_extension-2.0.11.data}/data/share/jupyter/labextensions/hdsp-agent/static/frontend_styles_index_js.2d9fb488c82498c45c2d.js +0 -0
- {hdsp_jupyter_extension-2.0.10.data → hdsp_jupyter_extension-2.0.11.data}/data/share/jupyter/labextensions/hdsp-agent/static/frontend_styles_index_js.2d9fb488c82498c45c2d.js.map +0 -0
- {hdsp_jupyter_extension-2.0.10.data → hdsp_jupyter_extension-2.0.11.data}/data/share/jupyter/labextensions/hdsp-agent/static/node_modules_emotion_use-insertion-effect-with-fallbacks_dist_emotion-use-insertion-effect-wi-3ba6b80.c095373419d05e6f141a.js +0 -0
- {hdsp_jupyter_extension-2.0.10.data → hdsp_jupyter_extension-2.0.11.data}/data/share/jupyter/labextensions/hdsp-agent/static/node_modules_emotion_use-insertion-effect-with-fallbacks_dist_emotion-use-insertion-effect-wi-3ba6b80.c095373419d05e6f141a.js.map +0 -0
- {hdsp_jupyter_extension-2.0.10.data → hdsp_jupyter_extension-2.0.11.data}/data/share/jupyter/labextensions/hdsp-agent/static/node_modules_emotion_use-insertion-effect-with-fallbacks_dist_emotion-use-insertion-effect-wi-3ba6b81.61e75fb98ecff46cf836.js +0 -0
- {hdsp_jupyter_extension-2.0.10.data → hdsp_jupyter_extension-2.0.11.data}/data/share/jupyter/labextensions/hdsp-agent/static/node_modules_emotion_use-insertion-effect-with-fallbacks_dist_emotion-use-insertion-effect-wi-3ba6b81.61e75fb98ecff46cf836.js.map +0 -0
- {hdsp_jupyter_extension-2.0.10.data → hdsp_jupyter_extension-2.0.11.data}/data/share/jupyter/labextensions/hdsp-agent/static/style.js +0 -0
- {hdsp_jupyter_extension-2.0.10.data → hdsp_jupyter_extension-2.0.11.data}/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_babel_runtime_helpers_esm_extends_js-node_modules_emotion_serialize_dist-051195.e2553aab0c3963b83dd7.js +0 -0
- {hdsp_jupyter_extension-2.0.10.data → hdsp_jupyter_extension-2.0.11.data}/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_babel_runtime_helpers_esm_extends_js-node_modules_emotion_serialize_dist-051195.e2553aab0c3963b83dd7.js.map +0 -0
- {hdsp_jupyter_extension-2.0.10.data → hdsp_jupyter_extension-2.0.11.data}/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_emotion_cache_dist_emotion-cache_browser_development_esm_js.24edcc52a1c014a8a5f0.js +0 -0
- {hdsp_jupyter_extension-2.0.10.data → hdsp_jupyter_extension-2.0.11.data}/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_emotion_cache_dist_emotion-cache_browser_development_esm_js.24edcc52a1c014a8a5f0.js.map +0 -0
- {hdsp_jupyter_extension-2.0.10.data → hdsp_jupyter_extension-2.0.11.data}/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_emotion_react_dist_emotion-react_browser_development_esm_js.19ecf6babe00caff6b8a.js +0 -0
- {hdsp_jupyter_extension-2.0.10.data → hdsp_jupyter_extension-2.0.11.data}/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_emotion_react_dist_emotion-react_browser_development_esm_js.19ecf6babe00caff6b8a.js.map +0 -0
- {hdsp_jupyter_extension-2.0.10.data → hdsp_jupyter_extension-2.0.11.data}/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_emotion_styled_dist_emotion-styled_browser_development_esm_js.661fb5836f4978a7c6e1.js +0 -0
- {hdsp_jupyter_extension-2.0.10.data → hdsp_jupyter_extension-2.0.11.data}/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_emotion_styled_dist_emotion-styled_browser_development_esm_js.661fb5836f4978a7c6e1.js.map +0 -0
- {hdsp_jupyter_extension-2.0.10.data → hdsp_jupyter_extension-2.0.11.data}/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_mui_material_index_js.985697e0162d8d088ca2.js +0 -0
- {hdsp_jupyter_extension-2.0.10.data → hdsp_jupyter_extension-2.0.11.data}/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_mui_material_index_js.985697e0162d8d088ca2.js.map +0 -0
- {hdsp_jupyter_extension-2.0.10.data → hdsp_jupyter_extension-2.0.11.data}/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_mui_material_utils_createSvgIcon_js.1f5038488cdfd8b3a85d.js +0 -0
- {hdsp_jupyter_extension-2.0.10.data → hdsp_jupyter_extension-2.0.11.data}/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_mui_material_utils_createSvgIcon_js.1f5038488cdfd8b3a85d.js.map +0 -0
- {hdsp_jupyter_extension-2.0.10.dist-info → hdsp_jupyter_extension-2.0.11.dist-info}/WHEEL +0 -0
- {hdsp_jupyter_extension-2.0.10.dist-info → hdsp_jupyter_extension-2.0.11.dist-info}/licenses/LICENSE +0 -0
|
@@ -179,11 +179,11 @@ class EmbeddedAgentService(IAgentService):
|
|
|
179
179
|
rag_context = None
|
|
180
180
|
try:
|
|
181
181
|
from hdsp_agent_core.factory import get_service_factory
|
|
182
|
+
|
|
182
183
|
rag_service = get_service_factory().get_rag_service()
|
|
183
184
|
if rag_service.is_ready():
|
|
184
185
|
rag_context = await rag_service.get_context_for_query(
|
|
185
|
-
query=request.request,
|
|
186
|
-
detected_libraries=detected_libraries
|
|
186
|
+
query=request.request, detected_libraries=detected_libraries
|
|
187
187
|
)
|
|
188
188
|
except Exception as e:
|
|
189
189
|
logger.warning(f"RAG context retrieval failed: {e}")
|
|
@@ -208,7 +208,9 @@ class EmbeddedAgentService(IAgentService):
|
|
|
208
208
|
plan_data = self._parse_json_response(response)
|
|
209
209
|
|
|
210
210
|
if not plan_data or "plan" not in plan_data:
|
|
211
|
-
raise ValueError(
|
|
211
|
+
raise ValueError(
|
|
212
|
+
f"Failed to parse plan from LLM response: {response[:200]}"
|
|
213
|
+
)
|
|
212
214
|
|
|
213
215
|
# Sanitize and ensure goal
|
|
214
216
|
plan_data = self._sanitize_tool_calls(plan_data)
|
|
@@ -291,6 +293,7 @@ class EmbeddedAgentService(IAgentService):
|
|
|
291
293
|
# Use error classifier for deterministic classification
|
|
292
294
|
try:
|
|
293
295
|
from hdsp_agent_core.managers.error_classifier import get_error_classifier
|
|
296
|
+
|
|
294
297
|
classifier = get_error_classifier()
|
|
295
298
|
except ImportError:
|
|
296
299
|
# Fallback if error_classifier not yet migrated
|
|
@@ -346,7 +349,9 @@ class EmbeddedAgentService(IAgentService):
|
|
|
346
349
|
return {
|
|
347
350
|
"valid": result.is_valid,
|
|
348
351
|
"issues": [issue.to_dict() for issue in result.issues],
|
|
349
|
-
"dependencies": result.dependencies.to_dict()
|
|
352
|
+
"dependencies": result.dependencies.to_dict()
|
|
353
|
+
if result.dependencies
|
|
354
|
+
else None,
|
|
350
355
|
"hasErrors": result.has_errors,
|
|
351
356
|
"hasWarnings": result.has_warnings,
|
|
352
357
|
"summary": result.summary,
|
|
@@ -379,10 +384,7 @@ class ProxyAgentService(IAgentService):
|
|
|
379
384
|
logger.info(f"ProxyAgentService initialized (server: {self._base_url})")
|
|
380
385
|
|
|
381
386
|
async def _request(
|
|
382
|
-
self,
|
|
383
|
-
method: str,
|
|
384
|
-
path: str,
|
|
385
|
-
data: Optional[Dict] = None
|
|
387
|
+
self, method: str, path: str, data: Optional[Dict] = None
|
|
386
388
|
) -> Dict[str, Any]:
|
|
387
389
|
"""Make HTTP request to agent server"""
|
|
388
390
|
url = f"{self._base_url}{path}"
|
|
@@ -69,7 +69,9 @@ class EmbeddedChatService(IChatService):
|
|
|
69
69
|
session = self._session_manager.get_or_create_session(conversation_id)
|
|
70
70
|
return session.id
|
|
71
71
|
|
|
72
|
-
def _build_context(
|
|
72
|
+
def _build_context(
|
|
73
|
+
self, conversation_id: str, max_messages: int = 5
|
|
74
|
+
) -> Optional[str]:
|
|
73
75
|
"""Build conversation context from history"""
|
|
74
76
|
return self._session_manager.build_context(conversation_id, max_messages)
|
|
75
77
|
|
|
@@ -88,7 +90,9 @@ class EmbeddedChatService(IChatService):
|
|
|
88
90
|
config = self._build_llm_config(request.llmConfig)
|
|
89
91
|
|
|
90
92
|
if not config or not config.get("provider"):
|
|
91
|
-
raise ValueError(
|
|
93
|
+
raise ValueError(
|
|
94
|
+
"LLM not configured. Please provide llmConfig with API keys."
|
|
95
|
+
)
|
|
92
96
|
|
|
93
97
|
# Get or create conversation
|
|
94
98
|
conversation_id = self._get_or_create_conversation(request.conversationId)
|
|
@@ -98,9 +102,7 @@ class EmbeddedChatService(IChatService):
|
|
|
98
102
|
|
|
99
103
|
# Call LLM
|
|
100
104
|
llm_service = LLMService(config)
|
|
101
|
-
response = await llm_service.generate_response(
|
|
102
|
-
request.message, context=context
|
|
103
|
-
)
|
|
105
|
+
response = await llm_service.generate_response(request.message, context=context)
|
|
104
106
|
|
|
105
107
|
# Store messages
|
|
106
108
|
self._store_messages(conversation_id, request.message, response)
|
|
@@ -124,7 +126,9 @@ class EmbeddedChatService(IChatService):
|
|
|
124
126
|
config = self._build_llm_config(request.llmConfig)
|
|
125
127
|
|
|
126
128
|
if not config or not config.get("provider"):
|
|
127
|
-
yield {
|
|
129
|
+
yield {
|
|
130
|
+
"error": "LLM not configured. Please provide llmConfig with API keys."
|
|
131
|
+
}
|
|
128
132
|
return
|
|
129
133
|
|
|
130
134
|
# Get or create conversation
|
|
@@ -91,7 +91,7 @@ class EmbeddedRAGService(IRAGService):
|
|
|
91
91
|
self,
|
|
92
92
|
query: str,
|
|
93
93
|
detected_libraries: Optional[List[str]] = None,
|
|
94
|
-
max_results: int = 5
|
|
94
|
+
max_results: int = 5,
|
|
95
95
|
) -> Optional[str]:
|
|
96
96
|
"""Get formatted context for a query"""
|
|
97
97
|
logger.info(f"[Embedded] Get context for query: {query[:100]}...")
|
|
@@ -186,10 +186,7 @@ class ProxyRAGService(IRAGService):
|
|
|
186
186
|
return self._ready
|
|
187
187
|
|
|
188
188
|
async def _request(
|
|
189
|
-
self,
|
|
190
|
-
method: str,
|
|
191
|
-
path: str,
|
|
192
|
-
data: Optional[Dict] = None
|
|
189
|
+
self, method: str, path: str, data: Optional[Dict] = None
|
|
193
190
|
) -> Dict[str, Any]:
|
|
194
191
|
"""Make HTTP request to RAG server"""
|
|
195
192
|
url = f"{self._base_url}{path}"
|
|
@@ -218,7 +215,7 @@ class ProxyRAGService(IRAGService):
|
|
|
218
215
|
self,
|
|
219
216
|
query: str,
|
|
220
217
|
detected_libraries: Optional[List[str]] = None,
|
|
221
|
-
max_results: int = 5
|
|
218
|
+
max_results: int = 5,
|
|
222
219
|
) -> Optional[str]:
|
|
223
220
|
"""Get context via proxy"""
|
|
224
221
|
logger.info(f"[Proxy] Get context for query: {query[:100]}...")
|
|
@@ -5,7 +5,6 @@ Provides pytest fixtures for testing ServiceFactory and service implementations.
|
|
|
5
5
|
"""
|
|
6
6
|
|
|
7
7
|
import os
|
|
8
|
-
from typing import Generator
|
|
9
8
|
from unittest.mock import AsyncMock, MagicMock, patch
|
|
10
9
|
|
|
11
10
|
import pytest
|
|
@@ -15,6 +14,7 @@ import pytest
|
|
|
15
14
|
def reset_factory():
|
|
16
15
|
"""Reset ServiceFactory singleton before each test"""
|
|
17
16
|
from hdsp_agent_core.factory import ServiceFactory
|
|
17
|
+
|
|
18
18
|
ServiceFactory.reset_instance()
|
|
19
19
|
yield
|
|
20
20
|
ServiceFactory.reset_instance()
|
|
@@ -76,6 +76,7 @@ def sample_plan_request():
|
|
|
76
76
|
"""Sample plan request for testing"""
|
|
77
77
|
from hdsp_agent_core.models.agent import PlanRequest
|
|
78
78
|
from hdsp_agent_core.models.common import NotebookContext
|
|
79
|
+
|
|
79
80
|
return PlanRequest(
|
|
80
81
|
request="Create a simple plot",
|
|
81
82
|
notebookContext=NotebookContext(),
|
|
@@ -86,6 +87,7 @@ def sample_plan_request():
|
|
|
86
87
|
def sample_chat_request():
|
|
87
88
|
"""Sample chat request for testing"""
|
|
88
89
|
from hdsp_agent_core.models.chat import ChatRequest
|
|
90
|
+
|
|
89
91
|
return ChatRequest(
|
|
90
92
|
message="Hello, how can I analyze this data?",
|
|
91
93
|
conversationId="test-conversation",
|
|
@@ -96,6 +98,7 @@ def sample_chat_request():
|
|
|
96
98
|
def sample_search_request():
|
|
97
99
|
"""Sample search request for testing"""
|
|
98
100
|
from hdsp_agent_core.models.rag import SearchRequest
|
|
101
|
+
|
|
99
102
|
return SearchRequest(
|
|
100
103
|
query="pandas dataframe operations",
|
|
101
104
|
top_k=5,
|
|
@@ -137,7 +137,7 @@ class TestServiceFactoryInitialization:
|
|
|
137
137
|
# Mock the RAG service initialization
|
|
138
138
|
with patch(
|
|
139
139
|
"hdsp_agent_core.services.rag_service.EmbeddedRAGService.initialize",
|
|
140
|
-
new_callable=AsyncMock
|
|
140
|
+
new_callable=AsyncMock,
|
|
141
141
|
):
|
|
142
142
|
factory = ServiceFactory.get_instance()
|
|
143
143
|
await factory.initialize()
|
|
@@ -241,7 +241,7 @@ class TestServiceFactoryModeSpecificServices:
|
|
|
241
241
|
# Mock RAG initialization
|
|
242
242
|
with patch(
|
|
243
243
|
"hdsp_agent_core.services.rag_service.EmbeddedRAGService.initialize",
|
|
244
|
-
new_callable=AsyncMock
|
|
244
|
+
new_callable=AsyncMock,
|
|
245
245
|
):
|
|
246
246
|
factory = ServiceFactory.get_instance()
|
|
247
247
|
await factory.initialize()
|
|
@@ -18,10 +18,8 @@ class TestProxyAgentService:
|
|
|
18
18
|
def proxy_agent_service(self):
|
|
19
19
|
"""Create ProxyAgentService instance"""
|
|
20
20
|
from hdsp_agent_core.services.agent_service import ProxyAgentService
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
timeout=30.0
|
|
24
|
-
)
|
|
21
|
+
|
|
22
|
+
return ProxyAgentService(base_url="http://localhost:8000", timeout=30.0)
|
|
25
23
|
|
|
26
24
|
def test_implements_interface(self, proxy_agent_service):
|
|
27
25
|
"""Test ProxyAgentService implements IAgentService"""
|
|
@@ -41,12 +39,8 @@ class TestProxyAgentService:
|
|
|
41
39
|
):
|
|
42
40
|
"""Test generate_plan makes HTTP POST request"""
|
|
43
41
|
mock_response_data = {
|
|
44
|
-
"plan": {
|
|
45
|
-
|
|
46
|
-
"totalSteps": 1,
|
|
47
|
-
"steps": []
|
|
48
|
-
},
|
|
49
|
-
"reasoning": "Test reasoning"
|
|
42
|
+
"plan": {"goal": "Create a simple plot", "totalSteps": 1, "steps": []},
|
|
43
|
+
"reasoning": "Test reasoning",
|
|
50
44
|
}
|
|
51
45
|
|
|
52
46
|
with patch(
|
|
@@ -76,10 +70,8 @@ class TestProxyChatService:
|
|
|
76
70
|
def proxy_chat_service(self):
|
|
77
71
|
"""Create ProxyChatService instance"""
|
|
78
72
|
from hdsp_agent_core.services.chat_service import ProxyChatService
|
|
79
|
-
|
|
80
|
-
|
|
81
|
-
timeout=30.0
|
|
82
|
-
)
|
|
73
|
+
|
|
74
|
+
return ProxyChatService(base_url="http://localhost:8000", timeout=30.0)
|
|
83
75
|
|
|
84
76
|
def test_implements_interface(self, proxy_chat_service):
|
|
85
77
|
"""Test ProxyChatService implements IChatService"""
|
|
@@ -119,10 +111,8 @@ class TestProxyRAGService:
|
|
|
119
111
|
def proxy_rag_service(self):
|
|
120
112
|
"""Create ProxyRAGService instance"""
|
|
121
113
|
from hdsp_agent_core.services.rag_service import ProxyRAGService
|
|
122
|
-
|
|
123
|
-
|
|
124
|
-
timeout=30.0
|
|
125
|
-
)
|
|
114
|
+
|
|
115
|
+
return ProxyRAGService(base_url="http://localhost:8000", timeout=30.0)
|
|
126
116
|
|
|
127
117
|
def test_implements_interface(self, proxy_rag_service):
|
|
128
118
|
"""Test ProxyRAGService implements IRAGService"""
|
|
@@ -189,6 +179,7 @@ class TestEmbeddedAgentService:
|
|
|
189
179
|
def embedded_agent_service(self):
|
|
190
180
|
"""Create EmbeddedAgentService instance"""
|
|
191
181
|
from hdsp_agent_core.services.agent_service import EmbeddedAgentService
|
|
182
|
+
|
|
192
183
|
return EmbeddedAgentService()
|
|
193
184
|
|
|
194
185
|
def test_implements_interface(self, embedded_agent_service):
|
|
@@ -202,7 +193,7 @@ class TestEmbeddedAgentService:
|
|
|
202
193
|
"""Test generate_plan calls LLM service"""
|
|
203
194
|
# Mock the LLM service
|
|
204
195
|
with patch.object(
|
|
205
|
-
embedded_agent_service,
|
|
196
|
+
embedded_agent_service, "_llm_service", create=True
|
|
206
197
|
) as mock_llm:
|
|
207
198
|
mock_llm.generate_response = AsyncMock(
|
|
208
199
|
return_value='{"steps": [], "plan_id": "test"}'
|
|
@@ -219,6 +210,7 @@ class TestEmbeddedChatService:
|
|
|
219
210
|
def embedded_chat_service(self):
|
|
220
211
|
"""Create EmbeddedChatService instance"""
|
|
221
212
|
from hdsp_agent_core.services.chat_service import EmbeddedChatService
|
|
213
|
+
|
|
222
214
|
return EmbeddedChatService()
|
|
223
215
|
|
|
224
216
|
def test_implements_interface(self, embedded_chat_service):
|
|
@@ -233,6 +225,7 @@ class TestEmbeddedRAGService:
|
|
|
233
225
|
def embedded_rag_service(self):
|
|
234
226
|
"""Create EmbeddedRAGService instance with mocked RAG manager"""
|
|
235
227
|
from hdsp_agent_core.services.rag_service import EmbeddedRAGService
|
|
228
|
+
|
|
236
229
|
service = EmbeddedRAGService()
|
|
237
230
|
return service
|
|
238
231
|
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "hdsp-agent",
|
|
3
|
-
"version": "2.0.
|
|
3
|
+
"version": "2.0.11",
|
|
4
4
|
"description": "HDSP Agent JupyterLab Extension - Thin client for Agent Server",
|
|
5
5
|
"keywords": [
|
|
6
6
|
"jupyter",
|
|
@@ -127,7 +127,7 @@
|
|
|
127
127
|
}
|
|
128
128
|
},
|
|
129
129
|
"_build": {
|
|
130
|
-
"load": "static/remoteEntry.
|
|
130
|
+
"load": "static/remoteEntry.9da31d1134a53b0c4af5.js",
|
|
131
131
|
"extension": "./extension",
|
|
132
132
|
"style": "./style"
|
|
133
133
|
}
|
|
@@ -3261,9 +3261,10 @@ SyntaxError: '(' was never closed
|
|
|
3261
3261
|
const interruptAction = msg.metadata?.interrupt?.action;
|
|
3262
3262
|
const isWriteFile = interruptAction === 'write_file_tool';
|
|
3263
3263
|
const isEditFile = interruptAction === 'edit_file_tool';
|
|
3264
|
+
const isMultiEditFile = interruptAction === 'multiedit_file_tool';
|
|
3264
3265
|
const writePath = (isWriteFile
|
|
3265
3266
|
&& typeof msg.metadata?.interrupt?.args?.path === 'string') ? msg.metadata?.interrupt?.args?.path : '';
|
|
3266
|
-
const editPath = (isEditFile
|
|
3267
|
+
const editPath = ((isEditFile || isMultiEditFile)
|
|
3267
3268
|
&& typeof msg.metadata?.interrupt?.args?.path === 'string') ? msg.metadata?.interrupt?.args?.path : '';
|
|
3268
3269
|
const autoApproved = msg.metadata?.interrupt?.autoApproved;
|
|
3269
3270
|
const headerRole = msg.role === 'user'
|
|
@@ -3283,10 +3284,26 @@ SyntaxError: '(' was never closed
|
|
|
3283
3284
|
react__WEBPACK_IMPORTED_MODULE_0___default().createElement("div", { className: "jp-agent-interrupt-action-args" }, (() => {
|
|
3284
3285
|
const command = msg.metadata?.interrupt?.args?.command;
|
|
3285
3286
|
const code = msg.metadata?.interrupt?.args?.code || msg.metadata?.interrupt?.args?.content || '';
|
|
3286
|
-
// Handle edit_file_tool with diff preview
|
|
3287
|
+
// Handle edit_file_tool and multiedit_file_tool with diff preview
|
|
3287
3288
|
let snippet;
|
|
3288
3289
|
let language;
|
|
3289
|
-
if (
|
|
3290
|
+
if (isMultiEditFile) {
|
|
3291
|
+
// Handle multiedit_file_tool - show all edits
|
|
3292
|
+
const edits = msg.metadata?.interrupt?.args?.edits || [];
|
|
3293
|
+
const editsPreview = edits.slice(0, 5).map((edit, idx) => {
|
|
3294
|
+
const oldStr = edit.old_string || '';
|
|
3295
|
+
const newStr = edit.new_string || '';
|
|
3296
|
+
const oldPreview = oldStr.length > 100 ? oldStr.slice(0, 100) + '...' : oldStr;
|
|
3297
|
+
const newPreview = newStr.length > 100 ? newStr.slice(0, 100) + '...' : newStr;
|
|
3298
|
+
return `# Edit ${idx + 1}${edit.replace_all ? ' (replace_all)' : ''}\n` +
|
|
3299
|
+
oldPreview.split('\n').map((line) => `-${line}`).join('\n') + '\n' +
|
|
3300
|
+
newPreview.split('\n').map((line) => `+${line}`).join('\n');
|
|
3301
|
+
}).join('\n\n');
|
|
3302
|
+
const moreEdits = edits.length > 5 ? `\n\n... and ${edits.length - 5} more edits` : '';
|
|
3303
|
+
snippet = `--- ${editPath} (${edits.length} edits)\n+++ ${editPath} (after)\n\n${editsPreview}${moreEdits}`;
|
|
3304
|
+
language = 'diff';
|
|
3305
|
+
}
|
|
3306
|
+
else if (isEditFile) {
|
|
3290
3307
|
const oldStr = msg.metadata?.interrupt?.args?.old_string || '';
|
|
3291
3308
|
const newStr = msg.metadata?.interrupt?.args?.new_string || '';
|
|
3292
3309
|
const replaceAll = msg.metadata?.interrupt?.args?.replace_all;
|
|
@@ -3325,9 +3342,10 @@ SyntaxError: '(' was never closed
|
|
|
3325
3342
|
const safePath = escapeHtml(writePath);
|
|
3326
3343
|
html = html.replace(/<span class="code-block-language">[^<]*<\/span>/, `<span class="code-block-language jp-agent-interrupt-path">${safePath}</span>`);
|
|
3327
3344
|
}
|
|
3328
|
-
if (isEditFile && editPath) {
|
|
3345
|
+
if ((isEditFile || isMultiEditFile) && editPath) {
|
|
3329
3346
|
const safePath = escapeHtml(editPath);
|
|
3330
|
-
|
|
3347
|
+
const editIcon = isMultiEditFile ? '📝' : '✏️';
|
|
3348
|
+
html = html.replace(/<span class="code-block-language">[^<]*<\/span>/, `<span class="code-block-language jp-agent-interrupt-path">${editIcon} ${safePath}</span>`);
|
|
3331
3349
|
}
|
|
3332
3350
|
// actionHtml이 비어있지 않을 때만 추가
|
|
3333
3351
|
return actionHtml ? html.replace('</div>', `${actionHtml}</div>`) : html;
|
|
@@ -9034,77 +9052,63 @@ __webpack_require__.r(__webpack_exports__);
|
|
|
9034
9052
|
* - Key rotation on rate limit (429) is handled by frontend
|
|
9035
9053
|
*/
|
|
9036
9054
|
const STORAGE_KEY = 'hdsp-agent-llm-config';
|
|
9037
|
-
const DEFAULT_LANGCHAIN_SYSTEM_PROMPT = `You are an expert Python data scientist and Jupyter notebook assistant.
|
|
9038
|
-
|
|
9039
|
-
|
|
9040
|
-
|
|
9041
|
-
|
|
9042
|
-
|
|
9043
|
-
|
|
9044
|
-
|
|
9045
|
-
|
|
9046
|
-
|
|
9047
|
-
|
|
9048
|
-
|
|
9049
|
-
|
|
9050
|
-
|
|
9051
|
-
|
|
9052
|
-
|
|
9053
|
-
|
|
9054
|
-
- Output
|
|
9055
|
-
-
|
|
9056
|
-
- Both
|
|
9057
|
-
|
|
9058
|
-
|
|
9059
|
-
|
|
9060
|
-
|
|
9061
|
-
|
|
9062
|
-
-
|
|
9063
|
-
|
|
9064
|
-
|
|
9065
|
-
|
|
9066
|
-
|
|
9067
|
-
|
|
9068
|
-
|
|
9069
|
-
|
|
9070
|
-
|
|
9071
|
-
|
|
9072
|
-
|
|
9073
|
-
|
|
9074
|
-
|
|
9075
|
-
|
|
9076
|
-
|
|
9077
|
-
|
|
9078
|
-
|
|
9079
|
-
|
|
9080
|
-
|
|
9081
|
-
- \`
|
|
9082
|
-
|
|
9083
|
-
|
|
9084
|
-
|
|
9085
|
-
|
|
9086
|
-
|
|
9087
|
-
|
|
9088
|
-
|
|
9089
|
-
|
|
9090
|
-
|
|
9091
|
-
|
|
9092
|
-
|
|
9093
|
-
|
|
9094
|
-
|
|
9095
|
-
**When to paginate (use offset/limit):**
|
|
9096
|
-
- Reading any file >500 lines
|
|
9097
|
-
- Exploring unfamiliar codebases (always start with limit=100)
|
|
9098
|
-
- Reading multiple files in sequence
|
|
9099
|
-
- Any research or investigation task
|
|
9100
|
-
|
|
9101
|
-
**When full read is OK:**
|
|
9102
|
-
- Small files (<500 lines)
|
|
9103
|
-
- Files you need to edit immediately after reading
|
|
9104
|
-
- After confirming file size with first scan
|
|
9105
|
-
|
|
9106
|
-
## 🔧 Code Development
|
|
9107
|
-
For code generation/refactoring, use LSP tools (diagnostics_tool, references_tool) to check errors and find symbol usages. Use multiedit_file_tool for multiple changes in one file.
|
|
9055
|
+
const DEFAULT_LANGCHAIN_SYSTEM_PROMPT = `You are an expert Python data scientist and Jupyter notebook assistant. Respond in Korean only.
|
|
9056
|
+
|
|
9057
|
+
# Core Rules
|
|
9058
|
+
1. Be concise (≤4 lines unless detail requested)
|
|
9059
|
+
2. ALWAYS call a tool in every response - never respond with text only
|
|
9060
|
+
3. ALWAYS include a brief Korean explanation before tool calls
|
|
9061
|
+
|
|
9062
|
+
# Task Workflow
|
|
9063
|
+
|
|
9064
|
+
## Simple Tasks (1-2 steps)
|
|
9065
|
+
Execute directly without todos.
|
|
9066
|
+
|
|
9067
|
+
## Complex Tasks (3+ steps)
|
|
9068
|
+
1. Create todos with write_todos (all items in Korean)
|
|
9069
|
+
2. ALWAYS include "작업 요약 및 다음단계 제시" as the LAST item
|
|
9070
|
+
3. After each tool result: check todos → call next tool → repeat
|
|
9071
|
+
4. **Final todo ("작업 요약 및 다음단계 제시")**:
|
|
9072
|
+
- FIRST: Output summary JSON in your content (REQUIRED!)
|
|
9073
|
+
- THEN: Call write_todos to mark all as completed
|
|
9074
|
+
- Both must be in the SAME response
|
|
9075
|
+
|
|
9076
|
+
### Summary JSON Format (MUST output before marking complete)
|
|
9077
|
+
\`\`\`json
|
|
9078
|
+
{"summary": "실행된 작업 요약", "next_items": [{"subject": "제목", "description": "설명"}]}
|
|
9079
|
+
\`\`\`
|
|
9080
|
+
Suggest 3-5 next items. **You CANNOT mark "작업 요약" as completed without outputting this JSON first.**
|
|
9081
|
+
|
|
9082
|
+
# Mandatory Checks
|
|
9083
|
+
|
|
9084
|
+
## Resource Check (BEFORE data operations)
|
|
9085
|
+
Call \`check_resource_tool\` FIRST when:
|
|
9086
|
+
- Loading files (.csv, .parquet, .json, .xlsx, .pickle, .h5, .feather)
|
|
9087
|
+
- Using pandas/polars/dask for dataframes
|
|
9088
|
+
- Training ML models
|
|
9089
|
+
|
|
9090
|
+
# Tool Usage
|
|
9091
|
+
|
|
9092
|
+
## File Search (execute_command_tool)
|
|
9093
|
+
\`\`\`bash
|
|
9094
|
+
find . -iname '*filename*.csv' 2>/dev/null # Find by name
|
|
9095
|
+
grep -rn 'pattern' --include='*.py' . # Search contents
|
|
9096
|
+
\`\`\`
|
|
9097
|
+
|
|
9098
|
+
## File Reading (read_file_tool)
|
|
9099
|
+
- Large files: \`read_file_tool(path, limit=100)\` first
|
|
9100
|
+
- Use \`offset\` for pagination
|
|
9101
|
+
- Small files (<500 lines): Read without limit
|
|
9102
|
+
|
|
9103
|
+
## Code Output
|
|
9104
|
+
- For plots/charts: Use English labels only
|
|
9105
|
+
- Use LSP tools for error checking and symbol lookup
|
|
9106
|
+
- Use multiedit_file_tool for multiple changes
|
|
9107
|
+
|
|
9108
|
+
# Forbidden
|
|
9109
|
+
- Empty responses (no tool call AND no content)
|
|
9110
|
+
- Tool calls without Korean explanation
|
|
9111
|
+
- Stopping with pending/in_progress todos
|
|
9108
9112
|
`;
|
|
9109
9113
|
// ═══════════════════════════════════════════════════════════════════════════
|
|
9110
9114
|
// Key Rotation State (in-memory, not persisted)
|
|
@@ -16003,4 +16007,4 @@ __webpack_require__.r(__webpack_exports__);
|
|
|
16003
16007
|
/***/ }
|
|
16004
16008
|
|
|
16005
16009
|
}]);
|
|
16006
|
-
//# sourceMappingURL=lib_index_js.
|
|
16010
|
+
//# sourceMappingURL=lib_index_js.58c1e128ba0b76f41f04.js.map
|