hdsp-jupyter-extension 2.0.27__py3-none-any.whl → 2.0.28__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- agent_server/context_providers/__init__.py +4 -2
- agent_server/context_providers/actions.py +73 -7
- agent_server/context_providers/file.py +23 -23
- agent_server/langchain/__init__.py +2 -2
- agent_server/langchain/agent.py +18 -251
- agent_server/langchain/agent_factory.py +26 -4
- agent_server/langchain/agent_prompts/planner_prompt.py +22 -31
- agent_server/langchain/custom_middleware.py +268 -43
- agent_server/langchain/llm_factory.py +102 -54
- agent_server/langchain/logging_utils.py +1 -1
- agent_server/langchain/middleware/__init__.py +5 -0
- agent_server/langchain/middleware/content_injection_middleware.py +110 -0
- agent_server/langchain/middleware/subagent_events.py +88 -9
- agent_server/langchain/middleware/subagent_middleware.py +501 -245
- agent_server/langchain/prompts.py +5 -22
- agent_server/langchain/state_schema.py +44 -0
- agent_server/langchain/tools/jupyter_tools.py +4 -5
- agent_server/langchain/tools/tool_registry.py +6 -0
- agent_server/routers/chat.py +305 -2
- agent_server/routers/config.py +193 -8
- agent_server/routers/config_schema.py +254 -0
- agent_server/routers/context.py +31 -8
- agent_server/routers/langchain_agent.py +276 -155
- hdsp_agent_core/managers/config_manager.py +100 -1
- {hdsp_jupyter_extension-2.0.27.data → hdsp_jupyter_extension-2.0.28.data}/data/share/jupyter/labextensions/hdsp-agent/build_log.json +1 -1
- {hdsp_jupyter_extension-2.0.27.data → hdsp_jupyter_extension-2.0.28.data}/data/share/jupyter/labextensions/hdsp-agent/package.json +2 -2
- hdsp_jupyter_extension-2.0.27.data/data/share/jupyter/labextensions/hdsp-agent/static/frontend_styles_index_js.b5e4416b4e07ec087aad.js → hdsp_jupyter_extension-2.0.28.data/data/share/jupyter/labextensions/hdsp-agent/static/frontend_styles_index_js.55727265b00191e68d9a.js +479 -15
- hdsp_jupyter_extension-2.0.28.data/data/share/jupyter/labextensions/hdsp-agent/static/frontend_styles_index_js.55727265b00191e68d9a.js.map +1 -0
- jupyter_ext/labextension/static/lib_index_js.67505497667f9c0a763d.js → hdsp_jupyter_extension-2.0.28.data/data/share/jupyter/labextensions/hdsp-agent/static/lib_index_js.df05d90f366bfd5fa023.js +1287 -190
- hdsp_jupyter_extension-2.0.28.data/data/share/jupyter/labextensions/hdsp-agent/static/lib_index_js.df05d90f366bfd5fa023.js.map +1 -0
- hdsp_jupyter_extension-2.0.27.data/data/share/jupyter/labextensions/hdsp-agent/static/remoteEntry.4ab73bb5068405670214.js → hdsp_jupyter_extension-2.0.28.data/data/share/jupyter/labextensions/hdsp-agent/static/remoteEntry.08fce819ee32e9d25175.js +3 -3
- jupyter_ext/labextension/static/remoteEntry.4ab73bb5068405670214.js.map → hdsp_jupyter_extension-2.0.28.data/data/share/jupyter/labextensions/hdsp-agent/static/remoteEntry.08fce819ee32e9d25175.js.map +1 -1
- {hdsp_jupyter_extension-2.0.27.dist-info → hdsp_jupyter_extension-2.0.28.dist-info}/METADATA +1 -1
- {hdsp_jupyter_extension-2.0.27.dist-info → hdsp_jupyter_extension-2.0.28.dist-info}/RECORD +65 -63
- jupyter_ext/_version.py +1 -1
- jupyter_ext/handlers.py +41 -0
- jupyter_ext/labextension/build_log.json +1 -1
- jupyter_ext/labextension/package.json +2 -2
- jupyter_ext/labextension/static/{frontend_styles_index_js.b5e4416b4e07ec087aad.js → frontend_styles_index_js.55727265b00191e68d9a.js} +479 -15
- jupyter_ext/labextension/static/frontend_styles_index_js.55727265b00191e68d9a.js.map +1 -0
- hdsp_jupyter_extension-2.0.27.data/data/share/jupyter/labextensions/hdsp-agent/static/lib_index_js.67505497667f9c0a763d.js → jupyter_ext/labextension/static/lib_index_js.df05d90f366bfd5fa023.js +1287 -190
- jupyter_ext/labextension/static/lib_index_js.df05d90f366bfd5fa023.js.map +1 -0
- jupyter_ext/labextension/static/{remoteEntry.4ab73bb5068405670214.js → remoteEntry.08fce819ee32e9d25175.js} +3 -3
- hdsp_jupyter_extension-2.0.27.data/data/share/jupyter/labextensions/hdsp-agent/static/remoteEntry.4ab73bb5068405670214.js.map → jupyter_ext/labextension/static/remoteEntry.08fce819ee32e9d25175.js.map +1 -1
- agent_server/langchain/middleware/description_injector.py +0 -150
- hdsp_jupyter_extension-2.0.27.data/data/share/jupyter/labextensions/hdsp-agent/static/frontend_styles_index_js.b5e4416b4e07ec087aad.js.map +0 -1
- hdsp_jupyter_extension-2.0.27.data/data/share/jupyter/labextensions/hdsp-agent/static/lib_index_js.67505497667f9c0a763d.js.map +0 -1
- jupyter_ext/labextension/static/frontend_styles_index_js.b5e4416b4e07ec087aad.js.map +0 -1
- jupyter_ext/labextension/static/lib_index_js.67505497667f9c0a763d.js.map +0 -1
- {hdsp_jupyter_extension-2.0.27.data → hdsp_jupyter_extension-2.0.28.data}/data/etc/jupyter/jupyter_server_config.d/hdsp_jupyter_extension.json +0 -0
- {hdsp_jupyter_extension-2.0.27.data → hdsp_jupyter_extension-2.0.28.data}/data/share/jupyter/labextensions/hdsp-agent/install.json +0 -0
- {hdsp_jupyter_extension-2.0.27.data → hdsp_jupyter_extension-2.0.28.data}/data/share/jupyter/labextensions/hdsp-agent/static/node_modules_emotion_use-insertion-effect-with-fallbacks_dist_emotion-use-insertion-effect-wi-3ba6b80.c095373419d05e6f141a.js +0 -0
- {hdsp_jupyter_extension-2.0.27.data → hdsp_jupyter_extension-2.0.28.data}/data/share/jupyter/labextensions/hdsp-agent/static/node_modules_emotion_use-insertion-effect-with-fallbacks_dist_emotion-use-insertion-effect-wi-3ba6b80.c095373419d05e6f141a.js.map +0 -0
- {hdsp_jupyter_extension-2.0.27.data → hdsp_jupyter_extension-2.0.28.data}/data/share/jupyter/labextensions/hdsp-agent/static/node_modules_emotion_use-insertion-effect-with-fallbacks_dist_emotion-use-insertion-effect-wi-3ba6b81.61e75fb98ecff46cf836.js +0 -0
- {hdsp_jupyter_extension-2.0.27.data → hdsp_jupyter_extension-2.0.28.data}/data/share/jupyter/labextensions/hdsp-agent/static/node_modules_emotion_use-insertion-effect-with-fallbacks_dist_emotion-use-insertion-effect-wi-3ba6b81.61e75fb98ecff46cf836.js.map +0 -0
- {hdsp_jupyter_extension-2.0.27.data → hdsp_jupyter_extension-2.0.28.data}/data/share/jupyter/labextensions/hdsp-agent/static/style.js +0 -0
- {hdsp_jupyter_extension-2.0.27.data → hdsp_jupyter_extension-2.0.28.data}/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_babel_runtime_helpers_esm_extends_js-node_modules_emotion_serialize_dist-051195.e2553aab0c3963b83dd7.js +0 -0
- {hdsp_jupyter_extension-2.0.27.data → hdsp_jupyter_extension-2.0.28.data}/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_babel_runtime_helpers_esm_extends_js-node_modules_emotion_serialize_dist-051195.e2553aab0c3963b83dd7.js.map +0 -0
- {hdsp_jupyter_extension-2.0.27.data → hdsp_jupyter_extension-2.0.28.data}/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_emotion_cache_dist_emotion-cache_browser_development_esm_js.24edcc52a1c014a8a5f0.js +0 -0
- {hdsp_jupyter_extension-2.0.27.data → hdsp_jupyter_extension-2.0.28.data}/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_emotion_cache_dist_emotion-cache_browser_development_esm_js.24edcc52a1c014a8a5f0.js.map +0 -0
- {hdsp_jupyter_extension-2.0.27.data → hdsp_jupyter_extension-2.0.28.data}/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_emotion_react_dist_emotion-react_browser_development_esm_js.19ecf6babe00caff6b8a.js +0 -0
- {hdsp_jupyter_extension-2.0.27.data → hdsp_jupyter_extension-2.0.28.data}/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_emotion_react_dist_emotion-react_browser_development_esm_js.19ecf6babe00caff6b8a.js.map +0 -0
- {hdsp_jupyter_extension-2.0.27.data → hdsp_jupyter_extension-2.0.28.data}/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_emotion_styled_dist_emotion-styled_browser_development_esm_js.661fb5836f4978a7c6e1.js +0 -0
- {hdsp_jupyter_extension-2.0.27.data → hdsp_jupyter_extension-2.0.28.data}/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_emotion_styled_dist_emotion-styled_browser_development_esm_js.661fb5836f4978a7c6e1.js.map +0 -0
- {hdsp_jupyter_extension-2.0.27.data → hdsp_jupyter_extension-2.0.28.data}/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_mui_material_index_js.985697e0162d8d088ca2.js +0 -0
- {hdsp_jupyter_extension-2.0.27.data → hdsp_jupyter_extension-2.0.28.data}/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_mui_material_index_js.985697e0162d8d088ca2.js.map +0 -0
- {hdsp_jupyter_extension-2.0.27.data → hdsp_jupyter_extension-2.0.28.data}/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_mui_material_utils_createSvgIcon_js.1f5038488cdfd8b3a85d.js +0 -0
- {hdsp_jupyter_extension-2.0.27.data → hdsp_jupyter_extension-2.0.28.data}/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_mui_material_utils_createSvgIcon_js.1f5038488cdfd8b3a85d.js.map +0 -0
- {hdsp_jupyter_extension-2.0.27.dist-info → hdsp_jupyter_extension-2.0.28.dist-info}/WHEEL +0 -0
- {hdsp_jupyter_extension-2.0.27.dist-info → hdsp_jupyter_extension-2.0.28.dist-info}/licenses/LICENSE +0 -0
|
@@ -93,7 +93,6 @@ def _create_vllm_llm(llm_config: Dict[str, Any], callbacks):
|
|
|
93
93
|
from langchain_openai import ChatOpenAI
|
|
94
94
|
|
|
95
95
|
vllm_config = llm_config.get("vllm", {})
|
|
96
|
-
# User provides full base URL (e.g., https://openrouter.ai/api/v1)
|
|
97
96
|
endpoint = vllm_config.get("endpoint", "http://localhost:8000/v1")
|
|
98
97
|
model = vllm_config.get("model", "default")
|
|
99
98
|
api_key = vllm_config.get("apiKey", "dummy")
|
|
@@ -140,9 +139,11 @@ def _create_vllm_llm(llm_config: Dict[str, Any], callbacks):
|
|
|
140
139
|
|
|
141
140
|
|
|
142
141
|
def create_summarization_llm(llm_config: Dict[str, Any]):
|
|
143
|
-
"""Create LLM for summarization middleware.
|
|
142
|
+
"""Create LLM for summarization middleware and /compact feature.
|
|
144
143
|
|
|
145
|
-
|
|
144
|
+
Priority:
|
|
145
|
+
1. If llm_config["summarization"]["enabled"] is True, use that config
|
|
146
|
+
2. Otherwise, fall back to main provider with default summarization model
|
|
146
147
|
|
|
147
148
|
Args:
|
|
148
149
|
llm_config: Configuration dictionary
|
|
@@ -150,60 +151,107 @@ def create_summarization_llm(llm_config: Dict[str, Any]):
|
|
|
150
151
|
Returns:
|
|
151
152
|
LLM instance suitable for summarization, or None if unavailable
|
|
152
153
|
"""
|
|
153
|
-
provider = llm_config.get("provider", "gemini")
|
|
154
|
-
|
|
155
154
|
try:
|
|
156
|
-
|
|
157
|
-
|
|
158
|
-
|
|
159
|
-
|
|
160
|
-
|
|
161
|
-
|
|
162
|
-
|
|
163
|
-
|
|
164
|
-
|
|
165
|
-
|
|
166
|
-
)
|
|
167
|
-
elif provider == "openai":
|
|
168
|
-
from langchain_openai import ChatOpenAI
|
|
169
|
-
|
|
170
|
-
openai_config = llm_config.get("openai", {})
|
|
171
|
-
api_key = openai_config.get("apiKey")
|
|
172
|
-
if api_key:
|
|
173
|
-
return ChatOpenAI(
|
|
174
|
-
model="gpt-4o-mini",
|
|
175
|
-
api_key=api_key,
|
|
176
|
-
temperature=0.0,
|
|
177
|
-
)
|
|
178
|
-
elif provider == "vllm":
|
|
179
|
-
vllm_config = llm_config.get("vllm", {})
|
|
180
|
-
# User provides full base URL (e.g., https://openrouter.ai/api/v1)
|
|
181
|
-
endpoint = vllm_config.get("endpoint", "http://localhost:8000/v1")
|
|
182
|
-
model = vllm_config.get("model", "default")
|
|
183
|
-
api_key = vllm_config.get("apiKey", "dummy")
|
|
184
|
-
|
|
185
|
-
# Use ChatGPTOSS for gpt-oss models (but not via OpenRouter)
|
|
186
|
-
is_openrouter = "openrouter" in endpoint.lower()
|
|
187
|
-
if "gpt-oss" in model.lower() and not is_openrouter:
|
|
188
|
-
from agent_server.langchain.models import ChatGPTOSS
|
|
189
|
-
|
|
190
|
-
return ChatGPTOSS(
|
|
191
|
-
model=model,
|
|
192
|
-
base_url=endpoint,
|
|
193
|
-
api_key=api_key,
|
|
194
|
-
temperature=0.0,
|
|
195
|
-
)
|
|
196
|
-
|
|
197
|
-
from langchain_openai import ChatOpenAI
|
|
198
|
-
|
|
199
|
-
return ChatOpenAI(
|
|
200
|
-
model=model,
|
|
201
|
-
api_key=api_key,
|
|
202
|
-
base_url=endpoint, # Use endpoint as-is
|
|
203
|
-
temperature=0.0,
|
|
155
|
+
# 1. Check for dedicated summarization config
|
|
156
|
+
summarization_config = llm_config.get("summarization", {})
|
|
157
|
+
if summarization_config.get("enabled"):
|
|
158
|
+
sum_provider = summarization_config.get("provider", "gemini")
|
|
159
|
+
sum_model = summarization_config.get("model")
|
|
160
|
+
logger.info(
|
|
161
|
+
f"Using dedicated summarization LLM: provider={sum_provider}, model={sum_model or 'default'}"
|
|
162
|
+
)
|
|
163
|
+
return _create_llm_for_provider(
|
|
164
|
+
llm_config, sum_provider, sum_model, for_summarization=True
|
|
204
165
|
)
|
|
166
|
+
|
|
167
|
+
# 2. Fall back to main provider with default summarization model
|
|
168
|
+
provider = llm_config.get("provider", "gemini")
|
|
169
|
+
logger.info(f"Using main provider for summarization: {provider}")
|
|
170
|
+
return _create_llm_for_provider(
|
|
171
|
+
llm_config, provider, None, for_summarization=True
|
|
172
|
+
)
|
|
173
|
+
|
|
205
174
|
except Exception as e:
|
|
206
175
|
logger.warning(f"Failed to create summarization LLM: {e}")
|
|
207
176
|
return None
|
|
208
177
|
|
|
209
|
-
|
|
178
|
+
|
|
179
|
+
def _create_llm_for_provider(
|
|
180
|
+
llm_config: Dict[str, Any],
|
|
181
|
+
provider: str,
|
|
182
|
+
model_override: str = None,
|
|
183
|
+
for_summarization: bool = False,
|
|
184
|
+
):
|
|
185
|
+
"""Create LLM instance for a specific provider.
|
|
186
|
+
|
|
187
|
+
Args:
|
|
188
|
+
llm_config: Full configuration dictionary (for credentials)
|
|
189
|
+
provider: Provider to use ('gemini', 'openai', 'vllm')
|
|
190
|
+
model_override: Optional model name override
|
|
191
|
+
for_summarization: If True, use lightweight default models
|
|
192
|
+
|
|
193
|
+
Returns:
|
|
194
|
+
LLM instance or None
|
|
195
|
+
"""
|
|
196
|
+
if provider == "gemini":
|
|
197
|
+
from langchain_google_genai import ChatGoogleGenerativeAI
|
|
198
|
+
|
|
199
|
+
gemini_config = llm_config.get("gemini", {})
|
|
200
|
+
api_key = gemini_config.get("apiKey")
|
|
201
|
+
if not api_key:
|
|
202
|
+
logger.warning("No Gemini API key found")
|
|
203
|
+
return None
|
|
204
|
+
|
|
205
|
+
model = model_override or ("gemini-2.5-flash" if for_summarization else gemini_config.get("model", "gemini-2.5-flash"))
|
|
206
|
+
return ChatGoogleGenerativeAI(
|
|
207
|
+
model=model,
|
|
208
|
+
google_api_key=api_key,
|
|
209
|
+
temperature=0.0,
|
|
210
|
+
)
|
|
211
|
+
|
|
212
|
+
elif provider == "openai":
|
|
213
|
+
from langchain_openai import ChatOpenAI
|
|
214
|
+
|
|
215
|
+
openai_config = llm_config.get("openai", {})
|
|
216
|
+
api_key = openai_config.get("apiKey")
|
|
217
|
+
if not api_key:
|
|
218
|
+
logger.warning("No OpenAI API key found")
|
|
219
|
+
return None
|
|
220
|
+
|
|
221
|
+
model = model_override or ("gpt-4o-mini" if for_summarization else openai_config.get("model", "gpt-4"))
|
|
222
|
+
return ChatOpenAI(
|
|
223
|
+
model=model,
|
|
224
|
+
api_key=api_key,
|
|
225
|
+
temperature=0.0,
|
|
226
|
+
)
|
|
227
|
+
|
|
228
|
+
elif provider == "vllm":
|
|
229
|
+
vllm_config = llm_config.get("vllm", {})
|
|
230
|
+
endpoint = vllm_config.get("endpoint", "http://localhost:8000/v1")
|
|
231
|
+
api_key = vllm_config.get("apiKey", "dummy")
|
|
232
|
+
model = model_override or vllm_config.get("model", "default")
|
|
233
|
+
|
|
234
|
+
# Use ChatGPTOSS for gpt-oss models (but not via OpenRouter)
|
|
235
|
+
is_openrouter = "openrouter" in endpoint.lower()
|
|
236
|
+
if "gpt-oss" in model.lower() and not is_openrouter:
|
|
237
|
+
from agent_server.langchain.models import ChatGPTOSS
|
|
238
|
+
|
|
239
|
+
return ChatGPTOSS(
|
|
240
|
+
model=model,
|
|
241
|
+
base_url=endpoint,
|
|
242
|
+
api_key=api_key,
|
|
243
|
+
temperature=0.0,
|
|
244
|
+
)
|
|
245
|
+
|
|
246
|
+
from langchain_openai import ChatOpenAI
|
|
247
|
+
|
|
248
|
+
return ChatOpenAI(
|
|
249
|
+
model=model,
|
|
250
|
+
api_key=api_key,
|
|
251
|
+
base_url=endpoint,
|
|
252
|
+
temperature=0.0,
|
|
253
|
+
)
|
|
254
|
+
|
|
255
|
+
else:
|
|
256
|
+
logger.warning(f"Unknown provider: {provider}")
|
|
257
|
+
return None
|
|
@@ -37,7 +37,7 @@ def disable_langchain_logging():
|
|
|
37
37
|
|
|
38
38
|
|
|
39
39
|
# Auto-disable on import (comment this line to re-enable all logs)
|
|
40
|
-
disable_langchain_logging()
|
|
40
|
+
# disable_langchain_logging() # TEMPORARILY ENABLED FOR DEBUGGING
|
|
41
41
|
|
|
42
42
|
LOG_SEPARATOR = "=" * 96
|
|
43
43
|
LOG_SUBSECTION = "-" * 96
|
|
@@ -3,10 +3,14 @@ Middleware Module
|
|
|
3
3
|
|
|
4
4
|
Custom middleware for the multi-agent architecture:
|
|
5
5
|
- SubAgentMiddleware: Handles subagent delegation via task tool
|
|
6
|
+
- ContentInjectionMiddleware: Injects generated code/SQL into tool args
|
|
6
7
|
- SkillMiddleware: Progressive skill loading for code generation agents
|
|
7
8
|
- Existing middleware from custom_middleware.py is also available
|
|
8
9
|
"""
|
|
9
10
|
|
|
11
|
+
from agent_server.langchain.middleware.content_injection_middleware import (
|
|
12
|
+
ContentInjectionMiddleware,
|
|
13
|
+
)
|
|
10
14
|
from agent_server.langchain.middleware.skill_middleware import (
|
|
11
15
|
SkillMiddleware,
|
|
12
16
|
get_skill_middleware,
|
|
@@ -18,6 +22,7 @@ from agent_server.langchain.middleware.subagent_middleware import (
|
|
|
18
22
|
|
|
19
23
|
__all__ = [
|
|
20
24
|
"SubAgentMiddleware",
|
|
25
|
+
"ContentInjectionMiddleware",
|
|
21
26
|
"create_task_tool",
|
|
22
27
|
"SkillMiddleware",
|
|
23
28
|
"get_skill_middleware",
|
|
@@ -0,0 +1,110 @@
|
|
|
1
|
+
"""
|
|
2
|
+
ContentInjectionMiddleware
|
|
3
|
+
|
|
4
|
+
Injects generated_content from LangGraph state into target tool args.
|
|
5
|
+
This eliminates JSON escaping issues when passing code/SQL between agents.
|
|
6
|
+
|
|
7
|
+
Runs BEFORE HumanInTheLoopMiddleware so HITL shows the full injected content.
|
|
8
|
+
|
|
9
|
+
Flow:
|
|
10
|
+
1. Subagent generates code/SQL → stored in state via Command
|
|
11
|
+
2. Main Agent calls target tool (e.g., jupyter_cell_tool) without args
|
|
12
|
+
3. This middleware reads state and injects content into tool args
|
|
13
|
+
4. HITL middleware sees full content for user approval
|
|
14
|
+
|
|
15
|
+
content_type → tool injection mapping:
|
|
16
|
+
- "python" → jupyter_cell_tool(code=...), write_file_tool(content=...)
|
|
17
|
+
- "sql" → markdown_tool(content="```sql\\n...\\n```")
|
|
18
|
+
"""
|
|
19
|
+
|
|
20
|
+
import logging
|
|
21
|
+
from typing import Any, Callable, Union
|
|
22
|
+
|
|
23
|
+
from langchain.agents.middleware import AgentMiddleware
|
|
24
|
+
from langchain_core.messages import ToolMessage
|
|
25
|
+
|
|
26
|
+
logger = logging.getLogger(__name__)
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
class ContentInjectionMiddleware(AgentMiddleware):
|
|
30
|
+
"""Inject state's generated_content into target tool call args.
|
|
31
|
+
|
|
32
|
+
When a subagent generates code/SQL via task_tool, it's stored in
|
|
33
|
+
LangGraph state (generated_content, generated_content_type, content_description).
|
|
34
|
+
This middleware reads the state and injects the content into the
|
|
35
|
+
appropriate tool's arguments before execution.
|
|
36
|
+
|
|
37
|
+
This ensures:
|
|
38
|
+
1. Code/SQL bypasses LLM JSON serialization (no escaping issues)
|
|
39
|
+
2. HITL middleware sees the full injected content for approval
|
|
40
|
+
3. Main Agent doesn't need to copy code into tool args
|
|
41
|
+
|
|
42
|
+
Usage in agent_factory.py:
|
|
43
|
+
middleware = [
|
|
44
|
+
ContentInjectionMiddleware(), # BEFORE HITL
|
|
45
|
+
...,
|
|
46
|
+
hitl_middleware, # Sees injected content
|
|
47
|
+
]
|
|
48
|
+
"""
|
|
49
|
+
|
|
50
|
+
def wrap_tool_call(self, request, handler):
|
|
51
|
+
"""Intercept tool calls and inject generated content from state.
|
|
52
|
+
|
|
53
|
+
Args:
|
|
54
|
+
request: ToolCallRequest with tool_call, state, runtime
|
|
55
|
+
handler: Next handler in middleware chain
|
|
56
|
+
|
|
57
|
+
Returns:
|
|
58
|
+
ToolMessage or Command from handler
|
|
59
|
+
"""
|
|
60
|
+
state = request.state
|
|
61
|
+
if not state:
|
|
62
|
+
return handler(request)
|
|
63
|
+
|
|
64
|
+
content = state.get("generated_content") if isinstance(state, dict) else getattr(state, "generated_content", None)
|
|
65
|
+
content_type = state.get("generated_content_type") if isinstance(state, dict) else getattr(state, "generated_content_type", None)
|
|
66
|
+
desc = state.get("content_description") if isinstance(state, dict) else getattr(state, "content_description", None)
|
|
67
|
+
|
|
68
|
+
if not content or not content_type:
|
|
69
|
+
return handler(request)
|
|
70
|
+
|
|
71
|
+
tool_call = request.tool_call
|
|
72
|
+
tool_name = tool_call["name"]
|
|
73
|
+
args = tool_call.get("args", {})
|
|
74
|
+
|
|
75
|
+
new_args = None
|
|
76
|
+
|
|
77
|
+
# Python code injection
|
|
78
|
+
if content_type == "python":
|
|
79
|
+
if tool_name == "jupyter_cell_tool" and not args.get("code"):
|
|
80
|
+
new_args = {**args, "code": content}
|
|
81
|
+
if desc and not args.get("description"):
|
|
82
|
+
new_args["description"] = desc
|
|
83
|
+
logger.info(
|
|
84
|
+
"[ContentInjection] Injected python code (%d chars) into jupyter_cell_tool",
|
|
85
|
+
len(content),
|
|
86
|
+
)
|
|
87
|
+
elif tool_name == "write_file_tool" and not args.get("content"):
|
|
88
|
+
new_args = {**args, "content": content}
|
|
89
|
+
logger.info(
|
|
90
|
+
"[ContentInjection] Injected python code (%d chars) into write_file_tool",
|
|
91
|
+
len(content),
|
|
92
|
+
)
|
|
93
|
+
|
|
94
|
+
# SQL query injection
|
|
95
|
+
elif content_type == "sql":
|
|
96
|
+
if tool_name == "markdown_tool" and not args.get("content"):
|
|
97
|
+
sql_markdown = f"```sql\n{content}\n```"
|
|
98
|
+
if desc:
|
|
99
|
+
sql_markdown = f"{desc}\n\n{sql_markdown}"
|
|
100
|
+
new_args = {**args, "content": sql_markdown}
|
|
101
|
+
logger.info(
|
|
102
|
+
"[ContentInjection] Injected SQL (%d chars) into markdown_tool",
|
|
103
|
+
len(content),
|
|
104
|
+
)
|
|
105
|
+
|
|
106
|
+
if new_args is not None:
|
|
107
|
+
modified_call = {**tool_call, "args": new_args}
|
|
108
|
+
request = request.override(tool_call=modified_call)
|
|
109
|
+
|
|
110
|
+
return handler(request)
|
|
@@ -42,35 +42,38 @@ class SubagentEvent:
|
|
|
42
42
|
"""Convert to debug message for UI display (legacy, for logging)."""
|
|
43
43
|
if self.event_type == "subagent_start":
|
|
44
44
|
desc_preview = self.description[:80] + "..." if self.description and len(self.description) > 80 else self.description
|
|
45
|
-
return f"Subagent
|
|
45
|
+
return f"Subagent-{self.subagent_name} 작업 시작: {desc_preview}"
|
|
46
46
|
elif self.event_type == "subagent_tool_call":
|
|
47
|
-
return f"Subagent
|
|
47
|
+
return f"Subagent-{self.subagent_name} Tool 실행: {self.tool_name}"
|
|
48
48
|
elif self.event_type == "subagent_complete":
|
|
49
|
-
return f"Subagent
|
|
49
|
+
return f"Subagent-{self.subagent_name} 완료"
|
|
50
50
|
else:
|
|
51
|
-
return f"Subagent
|
|
51
|
+
return f"Subagent-{self.subagent_name} {self.event_type}"
|
|
52
52
|
|
|
53
53
|
def to_status_dict(self) -> Dict[str, Any]:
|
|
54
54
|
"""Convert to status dict with icon for SSE streaming."""
|
|
55
55
|
if self.event_type == "subagent_start":
|
|
56
56
|
desc_preview = self.description[:80] + "..." if self.description and len(self.description) > 80 else self.description
|
|
57
|
+
full_status = f"Subagent-{self.subagent_name} 작업 시작: {self.description}"
|
|
57
58
|
return {
|
|
58
|
-
"status": f"Subagent
|
|
59
|
-
"icon": "subagentStart"
|
|
59
|
+
"status": f"Subagent-{self.subagent_name} 작업 시작: {desc_preview}",
|
|
60
|
+
"icon": "subagentStart",
|
|
61
|
+
"expandable": bool(self.description and len(self.description) > 80),
|
|
62
|
+
"full_text": full_status,
|
|
60
63
|
}
|
|
61
64
|
elif self.event_type == "subagent_tool_call":
|
|
62
65
|
return {
|
|
63
|
-
"status": f"Subagent
|
|
66
|
+
"status": f"Subagent-{self.subagent_name} Tool 실행: {self.tool_name}",
|
|
64
67
|
"icon": "tool"
|
|
65
68
|
}
|
|
66
69
|
elif self.event_type == "subagent_complete":
|
|
67
70
|
return {
|
|
68
|
-
"status": f"Subagent
|
|
71
|
+
"status": f"Subagent-{self.subagent_name} 완료",
|
|
69
72
|
"icon": "subagentComplete"
|
|
70
73
|
}
|
|
71
74
|
else:
|
|
72
75
|
return {
|
|
73
|
-
"status": f"Subagent
|
|
76
|
+
"status": f"Subagent-{self.subagent_name} {self.event_type}",
|
|
74
77
|
"icon": "info"
|
|
75
78
|
}
|
|
76
79
|
|
|
@@ -169,3 +172,79 @@ def drain_subagent_events() -> List[SubagentEvent]:
|
|
|
169
172
|
def get_pending_event_count() -> int:
|
|
170
173
|
"""Get the number of pending events in the queue."""
|
|
171
174
|
return _event_queue.qsize()
|
|
175
|
+
|
|
176
|
+
|
|
177
|
+
# ═══════════════════════════════════════════════════════════════════════════
|
|
178
|
+
# Summarization Events - For context compression status display
|
|
179
|
+
# ═══════════════════════════════════════════════════════════════════════════
|
|
180
|
+
|
|
181
|
+
@dataclass
|
|
182
|
+
class SummarizationEvent:
|
|
183
|
+
"""Represents a summarization event for UI display."""
|
|
184
|
+
|
|
185
|
+
event_type: str # summarization_start, summarization_complete
|
|
186
|
+
message_count_before: Optional[int] = None
|
|
187
|
+
message_count_after: Optional[int] = None
|
|
188
|
+
timestamp: datetime = field(default_factory=datetime.now)
|
|
189
|
+
|
|
190
|
+
def to_status_dict(self) -> Dict[str, Any]:
|
|
191
|
+
"""Convert to status dict with icon for SSE streaming."""
|
|
192
|
+
if self.event_type == "summarization_start":
|
|
193
|
+
return {
|
|
194
|
+
"status": "대화 컨텍스트 요약 중...",
|
|
195
|
+
"icon": "thinking"
|
|
196
|
+
}
|
|
197
|
+
elif self.event_type == "summarization_complete":
|
|
198
|
+
if self.message_count_before and self.message_count_after:
|
|
199
|
+
return {
|
|
200
|
+
"status": f"대화가 자동으로 압축되었습니다. ({self.message_count_before} → {self.message_count_after} 메시지)",
|
|
201
|
+
"icon": "check"
|
|
202
|
+
}
|
|
203
|
+
return {
|
|
204
|
+
"status": "대화가 자동으로 압축되었습니다.",
|
|
205
|
+
"icon": "check"
|
|
206
|
+
}
|
|
207
|
+
else:
|
|
208
|
+
return {
|
|
209
|
+
"status": f"Summarization {self.event_type}",
|
|
210
|
+
"icon": "info"
|
|
211
|
+
}
|
|
212
|
+
|
|
213
|
+
|
|
214
|
+
# Separate queue for summarization events (to avoid mixing with subagent events)
|
|
215
|
+
_summarization_queue: Queue = Queue()
|
|
216
|
+
|
|
217
|
+
|
|
218
|
+
def emit_summarization_start() -> None:
|
|
219
|
+
"""Emit a summarization start event."""
|
|
220
|
+
event = SummarizationEvent(event_type="summarization_start")
|
|
221
|
+
_summarization_queue.put(event)
|
|
222
|
+
logger.info("Summarization event: starting context compression")
|
|
223
|
+
|
|
224
|
+
|
|
225
|
+
def emit_summarization_complete(before_count: int = 0, after_count: int = 0) -> None:
|
|
226
|
+
"""Emit a summarization complete event."""
|
|
227
|
+
event = SummarizationEvent(
|
|
228
|
+
event_type="summarization_complete",
|
|
229
|
+
message_count_before=before_count,
|
|
230
|
+
message_count_after=after_count,
|
|
231
|
+
)
|
|
232
|
+
_summarization_queue.put(event)
|
|
233
|
+
logger.info(f"Summarization event: complete ({before_count} → {after_count} messages)")
|
|
234
|
+
|
|
235
|
+
|
|
236
|
+
def drain_summarization_events() -> List[SummarizationEvent]:
|
|
237
|
+
"""
|
|
238
|
+
Drain all pending summarization events from the queue.
|
|
239
|
+
|
|
240
|
+
Returns:
|
|
241
|
+
List of SummarizationEvent objects
|
|
242
|
+
"""
|
|
243
|
+
events = []
|
|
244
|
+
while True:
|
|
245
|
+
try:
|
|
246
|
+
event = _summarization_queue.get_nowait()
|
|
247
|
+
events.append(event)
|
|
248
|
+
except Empty:
|
|
249
|
+
break
|
|
250
|
+
return events
|