hdsp-jupyter-extension 2.0.6__py3-none-any.whl → 2.0.8__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- agent_server/core/embedding_service.py +67 -46
- agent_server/core/rag_manager.py +31 -17
- agent_server/core/reflection_engine.py +0 -1
- agent_server/core/retriever.py +13 -8
- agent_server/core/vllm_embedding_service.py +243 -0
- agent_server/knowledge/watchdog_service.py +1 -1
- agent_server/langchain/ARCHITECTURE.md +1193 -0
- agent_server/langchain/agent.py +82 -588
- agent_server/langchain/custom_middleware.py +663 -0
- agent_server/langchain/executors/__init__.py +2 -7
- agent_server/langchain/executors/notebook_searcher.py +46 -38
- agent_server/langchain/hitl_config.py +71 -0
- agent_server/langchain/llm_factory.py +166 -0
- agent_server/langchain/logging_utils.py +223 -0
- agent_server/langchain/prompts.py +150 -0
- agent_server/langchain/state.py +16 -6
- agent_server/langchain/tools/__init__.py +19 -0
- agent_server/langchain/tools/file_tools.py +354 -114
- agent_server/langchain/tools/file_utils.py +334 -0
- agent_server/langchain/tools/jupyter_tools.py +18 -18
- agent_server/langchain/tools/lsp_tools.py +264 -0
- agent_server/langchain/tools/resource_tools.py +161 -0
- agent_server/langchain/tools/search_tools.py +198 -216
- agent_server/langchain/tools/shell_tools.py +54 -0
- agent_server/main.py +11 -1
- agent_server/routers/health.py +1 -1
- agent_server/routers/langchain_agent.py +1040 -289
- agent_server/routers/rag.py +8 -3
- hdsp_agent_core/models/rag.py +15 -1
- hdsp_agent_core/prompts/auto_agent_prompts.py +3 -3
- hdsp_agent_core/services/rag_service.py +6 -1
- {hdsp_jupyter_extension-2.0.6.data → hdsp_jupyter_extension-2.0.8.data}/data/share/jupyter/labextensions/hdsp-agent/build_log.json +1 -1
- {hdsp_jupyter_extension-2.0.6.data → hdsp_jupyter_extension-2.0.8.data}/data/share/jupyter/labextensions/hdsp-agent/package.json +3 -2
- hdsp_jupyter_extension-2.0.6.data/data/share/jupyter/labextensions/hdsp-agent/static/frontend_styles_index_js.02d346171474a0fb2dc1.js → hdsp_jupyter_extension-2.0.8.data/data/share/jupyter/labextensions/hdsp-agent/static/frontend_styles_index_js.8740a527757068814573.js +470 -7
- hdsp_jupyter_extension-2.0.8.data/data/share/jupyter/labextensions/hdsp-agent/static/frontend_styles_index_js.8740a527757068814573.js.map +1 -0
- hdsp_jupyter_extension-2.0.6.data/data/share/jupyter/labextensions/hdsp-agent/static/lib_index_js.a223ea20056954479ae9.js → hdsp_jupyter_extension-2.0.8.data/data/share/jupyter/labextensions/hdsp-agent/static/lib_index_js.e4ff4b5779b5e049f84c.js +3196 -441
- hdsp_jupyter_extension-2.0.8.data/data/share/jupyter/labextensions/hdsp-agent/static/lib_index_js.e4ff4b5779b5e049f84c.js.map +1 -0
- hdsp_jupyter_extension-2.0.6.data/data/share/jupyter/labextensions/hdsp-agent/static/remoteEntry.addf2fa038fa60304aa2.js → hdsp_jupyter_extension-2.0.8.data/data/share/jupyter/labextensions/hdsp-agent/static/remoteEntry.020cdb0b864cfaa4e41e.js +9 -7
- hdsp_jupyter_extension-2.0.8.data/data/share/jupyter/labextensions/hdsp-agent/static/remoteEntry.020cdb0b864cfaa4e41e.js.map +1 -0
- {hdsp_jupyter_extension-2.0.6.dist-info → hdsp_jupyter_extension-2.0.8.dist-info}/METADATA +2 -1
- {hdsp_jupyter_extension-2.0.6.dist-info → hdsp_jupyter_extension-2.0.8.dist-info}/RECORD +75 -69
- jupyter_ext/__init__.py +18 -0
- jupyter_ext/_version.py +1 -1
- jupyter_ext/handlers.py +1351 -58
- jupyter_ext/labextension/build_log.json +1 -1
- jupyter_ext/labextension/package.json +3 -2
- jupyter_ext/labextension/static/{frontend_styles_index_js.02d346171474a0fb2dc1.js → frontend_styles_index_js.8740a527757068814573.js} +470 -7
- jupyter_ext/labextension/static/frontend_styles_index_js.8740a527757068814573.js.map +1 -0
- jupyter_ext/labextension/static/{lib_index_js.a223ea20056954479ae9.js → lib_index_js.e4ff4b5779b5e049f84c.js} +3196 -441
- jupyter_ext/labextension/static/lib_index_js.e4ff4b5779b5e049f84c.js.map +1 -0
- jupyter_ext/labextension/static/{remoteEntry.addf2fa038fa60304aa2.js → remoteEntry.020cdb0b864cfaa4e41e.js} +9 -7
- jupyter_ext/labextension/static/remoteEntry.020cdb0b864cfaa4e41e.js.map +1 -0
- jupyter_ext/resource_usage.py +180 -0
- jupyter_ext/tests/test_handlers.py +58 -0
- agent_server/langchain/executors/jupyter_executor.py +0 -429
- agent_server/langchain/middleware/__init__.py +0 -36
- agent_server/langchain/middleware/code_search_middleware.py +0 -278
- agent_server/langchain/middleware/error_handling_middleware.py +0 -338
- agent_server/langchain/middleware/jupyter_execution_middleware.py +0 -301
- agent_server/langchain/middleware/rag_middleware.py +0 -227
- agent_server/langchain/middleware/validation_middleware.py +0 -240
- hdsp_jupyter_extension-2.0.6.data/data/share/jupyter/labextensions/hdsp-agent/static/frontend_styles_index_js.02d346171474a0fb2dc1.js.map +0 -1
- hdsp_jupyter_extension-2.0.6.data/data/share/jupyter/labextensions/hdsp-agent/static/lib_index_js.a223ea20056954479ae9.js.map +0 -1
- hdsp_jupyter_extension-2.0.6.data/data/share/jupyter/labextensions/hdsp-agent/static/remoteEntry.addf2fa038fa60304aa2.js.map +0 -1
- jupyter_ext/labextension/static/frontend_styles_index_js.02d346171474a0fb2dc1.js.map +0 -1
- jupyter_ext/labextension/static/lib_index_js.a223ea20056954479ae9.js.map +0 -1
- jupyter_ext/labextension/static/remoteEntry.addf2fa038fa60304aa2.js.map +0 -1
- {hdsp_jupyter_extension-2.0.6.data → hdsp_jupyter_extension-2.0.8.data}/data/etc/jupyter/jupyter_server_config.d/hdsp_jupyter_extension.json +0 -0
- {hdsp_jupyter_extension-2.0.6.data → hdsp_jupyter_extension-2.0.8.data}/data/share/jupyter/labextensions/hdsp-agent/install.json +0 -0
- {hdsp_jupyter_extension-2.0.6.data → hdsp_jupyter_extension-2.0.8.data}/data/share/jupyter/labextensions/hdsp-agent/static/node_modules_emotion_use-insertion-effect-with-fallbacks_dist_emotion-use-insertion-effect-wi-3ba6b80.c095373419d05e6f141a.js +0 -0
- {hdsp_jupyter_extension-2.0.6.data → hdsp_jupyter_extension-2.0.8.data}/data/share/jupyter/labextensions/hdsp-agent/static/node_modules_emotion_use-insertion-effect-with-fallbacks_dist_emotion-use-insertion-effect-wi-3ba6b80.c095373419d05e6f141a.js.map +0 -0
- {hdsp_jupyter_extension-2.0.6.data → hdsp_jupyter_extension-2.0.8.data}/data/share/jupyter/labextensions/hdsp-agent/static/node_modules_emotion_use-insertion-effect-with-fallbacks_dist_emotion-use-insertion-effect-wi-3ba6b81.61e75fb98ecff46cf836.js +0 -0
- {hdsp_jupyter_extension-2.0.6.data → hdsp_jupyter_extension-2.0.8.data}/data/share/jupyter/labextensions/hdsp-agent/static/node_modules_emotion_use-insertion-effect-with-fallbacks_dist_emotion-use-insertion-effect-wi-3ba6b81.61e75fb98ecff46cf836.js.map +0 -0
- {hdsp_jupyter_extension-2.0.6.data → hdsp_jupyter_extension-2.0.8.data}/data/share/jupyter/labextensions/hdsp-agent/static/style.js +0 -0
- {hdsp_jupyter_extension-2.0.6.data → hdsp_jupyter_extension-2.0.8.data}/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_babel_runtime_helpers_esm_extends_js-node_modules_emotion_serialize_dist-051195.e2553aab0c3963b83dd7.js +0 -0
- {hdsp_jupyter_extension-2.0.6.data → hdsp_jupyter_extension-2.0.8.data}/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_babel_runtime_helpers_esm_extends_js-node_modules_emotion_serialize_dist-051195.e2553aab0c3963b83dd7.js.map +0 -0
- {hdsp_jupyter_extension-2.0.6.data → hdsp_jupyter_extension-2.0.8.data}/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_emotion_cache_dist_emotion-cache_browser_development_esm_js.24edcc52a1c014a8a5f0.js +0 -0
- {hdsp_jupyter_extension-2.0.6.data → hdsp_jupyter_extension-2.0.8.data}/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_emotion_cache_dist_emotion-cache_browser_development_esm_js.24edcc52a1c014a8a5f0.js.map +0 -0
- {hdsp_jupyter_extension-2.0.6.data → hdsp_jupyter_extension-2.0.8.data}/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_emotion_react_dist_emotion-react_browser_development_esm_js.19ecf6babe00caff6b8a.js +0 -0
- {hdsp_jupyter_extension-2.0.6.data → hdsp_jupyter_extension-2.0.8.data}/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_emotion_react_dist_emotion-react_browser_development_esm_js.19ecf6babe00caff6b8a.js.map +0 -0
- {hdsp_jupyter_extension-2.0.6.data → hdsp_jupyter_extension-2.0.8.data}/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_emotion_styled_dist_emotion-styled_browser_development_esm_js.661fb5836f4978a7c6e1.js +0 -0
- {hdsp_jupyter_extension-2.0.6.data → hdsp_jupyter_extension-2.0.8.data}/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_emotion_styled_dist_emotion-styled_browser_development_esm_js.661fb5836f4978a7c6e1.js.map +0 -0
- {hdsp_jupyter_extension-2.0.6.data → hdsp_jupyter_extension-2.0.8.data}/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_mui_material_index_js.985697e0162d8d088ca2.js +0 -0
- {hdsp_jupyter_extension-2.0.6.data → hdsp_jupyter_extension-2.0.8.data}/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_mui_material_index_js.985697e0162d8d088ca2.js.map +0 -0
- {hdsp_jupyter_extension-2.0.6.data → hdsp_jupyter_extension-2.0.8.data}/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_mui_material_utils_createSvgIcon_js.1f5038488cdfd8b3a85d.js +0 -0
- {hdsp_jupyter_extension-2.0.6.data → hdsp_jupyter_extension-2.0.8.data}/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_mui_material_utils_createSvgIcon_js.1f5038488cdfd8b3a85d.js.map +0 -0
- {hdsp_jupyter_extension-2.0.6.dist-info → hdsp_jupyter_extension-2.0.8.dist-info}/WHEEL +0 -0
- {hdsp_jupyter_extension-2.0.6.dist-info → hdsp_jupyter_extension-2.0.8.dist-info}/licenses/LICENSE +0 -0
|
@@ -0,0 +1,663 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Custom middleware for LangChain agent.
|
|
3
|
+
|
|
4
|
+
Provides middleware for handling empty responses, limiting tool calls,
|
|
5
|
+
injecting continuation prompts, and patching dangling tool calls.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
import json
|
|
9
|
+
import logging
|
|
10
|
+
import re
|
|
11
|
+
import uuid
|
|
12
|
+
from typing import Any, Dict, Optional
|
|
13
|
+
|
|
14
|
+
from langchain_core.messages import AIMessage, HumanMessage
|
|
15
|
+
|
|
16
|
+
from agent_server.langchain.logging_utils import (
|
|
17
|
+
_format_middleware_marker,
|
|
18
|
+
_with_middleware_logging,
|
|
19
|
+
)
|
|
20
|
+
from agent_server.langchain.prompts import JSON_TOOL_SCHEMA, NON_HITL_TOOLS
|
|
21
|
+
|
|
22
|
+
logger = logging.getLogger(__name__)
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
def parse_json_tool_call(text) -> Optional[Dict[str, Any]]:
|
|
26
|
+
"""Parse JSON tool call from text response.
|
|
27
|
+
|
|
28
|
+
Args:
|
|
29
|
+
text: Raw text that may contain a JSON tool call (str or list)
|
|
30
|
+
|
|
31
|
+
Returns:
|
|
32
|
+
Parsed dictionary with 'tool' and 'arguments' keys, or None
|
|
33
|
+
"""
|
|
34
|
+
if not text:
|
|
35
|
+
return None
|
|
36
|
+
|
|
37
|
+
# Handle list content (multimodal responses from Gemini)
|
|
38
|
+
if isinstance(text, list):
|
|
39
|
+
text_parts = []
|
|
40
|
+
for part in text:
|
|
41
|
+
if isinstance(part, str):
|
|
42
|
+
text_parts.append(part)
|
|
43
|
+
elif isinstance(part, dict) and part.get("type") == "text":
|
|
44
|
+
text_parts.append(part.get("text", ""))
|
|
45
|
+
text = "\n".join(text_parts)
|
|
46
|
+
|
|
47
|
+
if not isinstance(text, str) or not text:
|
|
48
|
+
return None
|
|
49
|
+
|
|
50
|
+
# Clean up response
|
|
51
|
+
text = text.strip()
|
|
52
|
+
if text.startswith("```json"):
|
|
53
|
+
text = text[7:]
|
|
54
|
+
elif text.startswith("```"):
|
|
55
|
+
text = text[3:]
|
|
56
|
+
if text.endswith("```"):
|
|
57
|
+
text = text[:-3]
|
|
58
|
+
text = text.strip()
|
|
59
|
+
|
|
60
|
+
# Try direct JSON parse
|
|
61
|
+
try:
|
|
62
|
+
data = json.loads(text)
|
|
63
|
+
if "tool" in data:
|
|
64
|
+
return data
|
|
65
|
+
except json.JSONDecodeError:
|
|
66
|
+
pass
|
|
67
|
+
|
|
68
|
+
# Try to find JSON object in response
|
|
69
|
+
json_match = re.search(r"\{[\s\S]*\}", text)
|
|
70
|
+
if json_match:
|
|
71
|
+
try:
|
|
72
|
+
data = json.loads(json_match.group())
|
|
73
|
+
if "tool" in data:
|
|
74
|
+
return data
|
|
75
|
+
except json.JSONDecodeError:
|
|
76
|
+
pass
|
|
77
|
+
|
|
78
|
+
return None
|
|
79
|
+
|
|
80
|
+
|
|
81
|
+
def normalize_tool_name(tool_name: str) -> str:
|
|
82
|
+
"""Normalize tool name to match registered tool names.
|
|
83
|
+
|
|
84
|
+
Rules:
|
|
85
|
+
- write_todos_tool → write_todos (TodoListMiddleware exception)
|
|
86
|
+
- other tools without _tool suffix → add _tool suffix
|
|
87
|
+
"""
|
|
88
|
+
if tool_name == "write_todos_tool":
|
|
89
|
+
return "write_todos"
|
|
90
|
+
if not tool_name.endswith("_tool") and tool_name != "write_todos":
|
|
91
|
+
return f"{tool_name}_tool"
|
|
92
|
+
return tool_name
|
|
93
|
+
|
|
94
|
+
|
|
95
|
+
def create_tool_call_message(tool_name: str, arguments: Dict[str, Any]) -> AIMessage:
|
|
96
|
+
"""Create AIMessage with tool_calls from parsed JSON.
|
|
97
|
+
|
|
98
|
+
Args:
|
|
99
|
+
tool_name: Name of the tool to call
|
|
100
|
+
arguments: Tool arguments dictionary
|
|
101
|
+
|
|
102
|
+
Returns:
|
|
103
|
+
AIMessage with properly formatted tool_calls
|
|
104
|
+
"""
|
|
105
|
+
tool_name = normalize_tool_name(tool_name)
|
|
106
|
+
|
|
107
|
+
return AIMessage(
|
|
108
|
+
content="",
|
|
109
|
+
tool_calls=[
|
|
110
|
+
{
|
|
111
|
+
"name": tool_name,
|
|
112
|
+
"args": arguments,
|
|
113
|
+
"id": str(uuid.uuid4()),
|
|
114
|
+
"type": "tool_call",
|
|
115
|
+
}
|
|
116
|
+
],
|
|
117
|
+
)
|
|
118
|
+
|
|
119
|
+
|
|
120
|
+
def create_handle_empty_response_middleware(wrap_model_call):
|
|
121
|
+
"""Create middleware to detect and handle empty LLM responses with JSON fallback.
|
|
122
|
+
|
|
123
|
+
For models that don't support native tool calling well (e.g., Gemini 2.5 Flash),
|
|
124
|
+
this middleware:
|
|
125
|
+
1. Detects empty or text-only responses (no tool_calls)
|
|
126
|
+
2. Retries with JSON schema prompt to force structured output
|
|
127
|
+
3. Parses JSON response and injects tool_calls into AIMessage
|
|
128
|
+
4. Falls back to synthetic final_answer if all else fails
|
|
129
|
+
|
|
130
|
+
Args:
|
|
131
|
+
wrap_model_call: LangChain's wrap_model_call decorator
|
|
132
|
+
|
|
133
|
+
Returns:
|
|
134
|
+
Middleware function
|
|
135
|
+
"""
|
|
136
|
+
|
|
137
|
+
@wrap_model_call
|
|
138
|
+
@_with_middleware_logging("handle_empty_response")
|
|
139
|
+
def handle_empty_response(request, handler):
|
|
140
|
+
max_retries = 2
|
|
141
|
+
|
|
142
|
+
# Check if last message is final_answer_tool result - if so, don't retry/synthesize
|
|
143
|
+
# This allows agent to naturally terminate after final_answer_tool
|
|
144
|
+
messages = request.messages
|
|
145
|
+
if messages:
|
|
146
|
+
last_msg = messages[-1]
|
|
147
|
+
if getattr(last_msg, "type", "") == "tool":
|
|
148
|
+
tool_name = getattr(last_msg, "name", "") or ""
|
|
149
|
+
if not tool_name:
|
|
150
|
+
try:
|
|
151
|
+
content_json = json.loads(last_msg.content)
|
|
152
|
+
tool_name = content_json.get("tool", "")
|
|
153
|
+
except (json.JSONDecodeError, TypeError, AttributeError):
|
|
154
|
+
pass
|
|
155
|
+
if tool_name in ("final_answer_tool", "final_answer"):
|
|
156
|
+
logger.info(
|
|
157
|
+
"Last message is final_answer_tool result - allowing natural termination"
|
|
158
|
+
)
|
|
159
|
+
# Just call handler and return response as-is (no retry/synthesize)
|
|
160
|
+
return handler(request)
|
|
161
|
+
|
|
162
|
+
for attempt in range(max_retries + 1):
|
|
163
|
+
response = handler(request)
|
|
164
|
+
|
|
165
|
+
# Extract AIMessage from response
|
|
166
|
+
response_message = _extract_ai_message(response)
|
|
167
|
+
|
|
168
|
+
has_content = (
|
|
169
|
+
bool(getattr(response_message, "content", None))
|
|
170
|
+
if response_message
|
|
171
|
+
else False
|
|
172
|
+
)
|
|
173
|
+
has_tool_calls = (
|
|
174
|
+
bool(getattr(response_message, "tool_calls", None))
|
|
175
|
+
if response_message
|
|
176
|
+
else False
|
|
177
|
+
)
|
|
178
|
+
|
|
179
|
+
logger.info(
|
|
180
|
+
"handle_empty_response: attempt=%d, type=%s, content=%s, tool_calls=%s",
|
|
181
|
+
attempt + 1,
|
|
182
|
+
type(response_message).__name__ if response_message else None,
|
|
183
|
+
has_content,
|
|
184
|
+
has_tool_calls,
|
|
185
|
+
)
|
|
186
|
+
|
|
187
|
+
# Valid response with tool_calls
|
|
188
|
+
if has_tool_calls:
|
|
189
|
+
return response
|
|
190
|
+
|
|
191
|
+
# Try to parse JSON from content
|
|
192
|
+
if has_content and response_message:
|
|
193
|
+
parsed = parse_json_tool_call(response_message.content)
|
|
194
|
+
if parsed:
|
|
195
|
+
tool_name = parsed.get("tool", "")
|
|
196
|
+
arguments = parsed.get("arguments", {})
|
|
197
|
+
logger.info(
|
|
198
|
+
"Parsed JSON tool call from content: tool=%s",
|
|
199
|
+
tool_name,
|
|
200
|
+
)
|
|
201
|
+
|
|
202
|
+
new_message = create_tool_call_message(tool_name, arguments)
|
|
203
|
+
response = _replace_ai_message_in_response(response, new_message)
|
|
204
|
+
return response
|
|
205
|
+
|
|
206
|
+
# Invalid response - retry with JSON schema prompt
|
|
207
|
+
if response_message and attempt < max_retries:
|
|
208
|
+
reason = "text-only" if has_content else "empty"
|
|
209
|
+
logger.warning(
|
|
210
|
+
"Invalid AIMessage (%s) detected (attempt %d/%d). "
|
|
211
|
+
"Retrying with JSON schema prompt...",
|
|
212
|
+
reason,
|
|
213
|
+
attempt + 1,
|
|
214
|
+
max_retries + 1,
|
|
215
|
+
)
|
|
216
|
+
|
|
217
|
+
json_prompt = _build_json_prompt(request, response_message, has_content)
|
|
218
|
+
request = request.override(
|
|
219
|
+
messages=request.messages + [HumanMessage(content=json_prompt)]
|
|
220
|
+
)
|
|
221
|
+
continue
|
|
222
|
+
|
|
223
|
+
# Max retries exhausted - synthesize final_answer
|
|
224
|
+
if response_message:
|
|
225
|
+
logger.warning(
|
|
226
|
+
"Max retries exhausted. Synthesizing final_answer response."
|
|
227
|
+
)
|
|
228
|
+
synthetic_message = _create_synthetic_final_answer(
|
|
229
|
+
request, response_message, has_content
|
|
230
|
+
)
|
|
231
|
+
response = _replace_ai_message_in_response(response, synthetic_message)
|
|
232
|
+
return response
|
|
233
|
+
|
|
234
|
+
return response
|
|
235
|
+
|
|
236
|
+
return response
|
|
237
|
+
|
|
238
|
+
return handle_empty_response
|
|
239
|
+
|
|
240
|
+
|
|
241
|
+
def _extract_ai_message(response):
|
|
242
|
+
"""Extract AIMessage from various response formats."""
|
|
243
|
+
if hasattr(response, "result"):
|
|
244
|
+
result = response.result
|
|
245
|
+
if isinstance(result, list):
|
|
246
|
+
for msg in reversed(result):
|
|
247
|
+
if isinstance(msg, AIMessage):
|
|
248
|
+
return msg
|
|
249
|
+
elif isinstance(result, AIMessage):
|
|
250
|
+
return result
|
|
251
|
+
elif hasattr(response, "message"):
|
|
252
|
+
return response.message
|
|
253
|
+
elif hasattr(response, "messages") and response.messages:
|
|
254
|
+
return response.messages[-1]
|
|
255
|
+
elif isinstance(response, AIMessage):
|
|
256
|
+
return response
|
|
257
|
+
return None
|
|
258
|
+
|
|
259
|
+
|
|
260
|
+
def _replace_ai_message_in_response(response, new_message):
|
|
261
|
+
"""Replace AIMessage in response with a new one."""
|
|
262
|
+
if hasattr(response, "result"):
|
|
263
|
+
if isinstance(response.result, list):
|
|
264
|
+
new_result = [
|
|
265
|
+
new_message if isinstance(m, AIMessage) else m for m in response.result
|
|
266
|
+
]
|
|
267
|
+
response.result = new_result
|
|
268
|
+
else:
|
|
269
|
+
response.result = new_message
|
|
270
|
+
return response
|
|
271
|
+
|
|
272
|
+
|
|
273
|
+
def _build_json_prompt(request, response_message, has_content):
|
|
274
|
+
"""Build JSON-forcing prompt based on context."""
|
|
275
|
+
todos = request.state.get("todos", [])
|
|
276
|
+
pending_todos = [t for t in todos if t.get("status") in ("pending", "in_progress")]
|
|
277
|
+
|
|
278
|
+
if has_content:
|
|
279
|
+
content_preview = response_message.content[:300]
|
|
280
|
+
return (
|
|
281
|
+
f"{JSON_TOOL_SCHEMA}\n\n"
|
|
282
|
+
f"Your previous response was text, not JSON. "
|
|
283
|
+
f"Wrap your answer in final_answer_tool:\n"
|
|
284
|
+
f'{{"tool": "final_answer_tool", "arguments": {{"answer": "{content_preview}..."}}}}'
|
|
285
|
+
)
|
|
286
|
+
elif pending_todos:
|
|
287
|
+
todo_list = ", ".join(t.get("content", "")[:20] for t in pending_todos[:3])
|
|
288
|
+
example_json = '{"tool": "jupyter_cell_tool", "arguments": {"code": "import pandas as pd\\ndf = pd.read_csv(\'titanic.csv\')\\nprint(df.head())"}}'
|
|
289
|
+
return (
|
|
290
|
+
f"{JSON_TOOL_SCHEMA}\n\n"
|
|
291
|
+
f"Pending tasks: {todo_list}\n"
|
|
292
|
+
f"Call jupyter_cell_tool with Python code to complete the next task.\n"
|
|
293
|
+
f"Example: {example_json}"
|
|
294
|
+
)
|
|
295
|
+
else:
|
|
296
|
+
return (
|
|
297
|
+
f"{JSON_TOOL_SCHEMA}\n\n"
|
|
298
|
+
f"All tasks completed. Call final_answer_tool:\n"
|
|
299
|
+
f'{{"tool": "final_answer_tool", "arguments": {{"answer": "작업이 완료되었습니다."}}}}'
|
|
300
|
+
)
|
|
301
|
+
|
|
302
|
+
|
|
303
|
+
def _create_synthetic_final_answer(request, response_message, has_content):
|
|
304
|
+
"""Create synthetic final_answer message."""
|
|
305
|
+
if has_content and response_message.content:
|
|
306
|
+
summary = response_message.content
|
|
307
|
+
logger.info(
|
|
308
|
+
"Using LLM's text content as final answer (length=%d)",
|
|
309
|
+
len(summary),
|
|
310
|
+
)
|
|
311
|
+
else:
|
|
312
|
+
todos = request.state.get("todos", [])
|
|
313
|
+
completed_todos = [
|
|
314
|
+
t.get("content", "") for t in todos if t.get("status") == "completed"
|
|
315
|
+
]
|
|
316
|
+
summary = (
|
|
317
|
+
f"작업이 완료되었습니다. 완료된 항목: {', '.join(completed_todos[:5])}"
|
|
318
|
+
if completed_todos
|
|
319
|
+
else "작업이 완료되었습니다."
|
|
320
|
+
)
|
|
321
|
+
|
|
322
|
+
return AIMessage(
|
|
323
|
+
content="",
|
|
324
|
+
tool_calls=[
|
|
325
|
+
{
|
|
326
|
+
"name": "final_answer_tool",
|
|
327
|
+
"args": {"answer": summary},
|
|
328
|
+
"id": str(uuid.uuid4()),
|
|
329
|
+
"type": "tool_call",
|
|
330
|
+
}
|
|
331
|
+
],
|
|
332
|
+
)
|
|
333
|
+
|
|
334
|
+
|
|
335
|
+
def create_limit_tool_calls_middleware(wrap_model_call):
|
|
336
|
+
"""Create middleware to limit model to one tool call at a time.
|
|
337
|
+
|
|
338
|
+
Some models (like vLLM GPT) return multiple tool calls in a single response.
|
|
339
|
+
This causes conflicts with TodoListMiddleware when processing multiple decisions.
|
|
340
|
+
By limiting to one tool call, we ensure the agent processes actions sequentially.
|
|
341
|
+
|
|
342
|
+
Args:
|
|
343
|
+
wrap_model_call: LangChain's wrap_model_call decorator
|
|
344
|
+
|
|
345
|
+
Returns:
|
|
346
|
+
Middleware function
|
|
347
|
+
"""
|
|
348
|
+
|
|
349
|
+
@wrap_model_call
|
|
350
|
+
@_with_middleware_logging("limit_tool_calls_to_one")
|
|
351
|
+
def limit_tool_calls_to_one(request, handler):
|
|
352
|
+
response = handler(request)
|
|
353
|
+
|
|
354
|
+
if hasattr(response, "result"):
|
|
355
|
+
result = response.result
|
|
356
|
+
messages = result if isinstance(result, list) else [result]
|
|
357
|
+
|
|
358
|
+
for msg in messages:
|
|
359
|
+
if isinstance(msg, AIMessage) and hasattr(msg, "tool_calls"):
|
|
360
|
+
tool_calls = msg.tool_calls
|
|
361
|
+
if tool_calls and len(tool_calls) > 1:
|
|
362
|
+
logger.info(
|
|
363
|
+
"Limiting tool calls from %d to 1 (keeping first: %s)",
|
|
364
|
+
len(tool_calls),
|
|
365
|
+
tool_calls[0].get("name", "unknown")
|
|
366
|
+
if tool_calls
|
|
367
|
+
else "none",
|
|
368
|
+
)
|
|
369
|
+
msg.tool_calls = [tool_calls[0]]
|
|
370
|
+
|
|
371
|
+
return response
|
|
372
|
+
|
|
373
|
+
return limit_tool_calls_to_one
|
|
374
|
+
|
|
375
|
+
|
|
376
|
+
def _get_string_params_from_tools(tools) -> Dict[str, set]:
|
|
377
|
+
"""Extract string parameter names from tool schemas.
|
|
378
|
+
|
|
379
|
+
Analyzes each tool's Pydantic args_schema to determine which parameters
|
|
380
|
+
should be strings (not arrays).
|
|
381
|
+
|
|
382
|
+
Args:
|
|
383
|
+
tools: List of LangChain tools
|
|
384
|
+
|
|
385
|
+
Returns:
|
|
386
|
+
Dict mapping tool names to sets of string parameter names
|
|
387
|
+
"""
|
|
388
|
+
from typing import get_args, get_origin
|
|
389
|
+
|
|
390
|
+
tool_string_params: Dict[str, set] = {}
|
|
391
|
+
|
|
392
|
+
for tool in tools:
|
|
393
|
+
tool_name = getattr(tool, "name", None)
|
|
394
|
+
if not tool_name:
|
|
395
|
+
continue
|
|
396
|
+
|
|
397
|
+
args_schema = getattr(tool, "args_schema", None)
|
|
398
|
+
if not args_schema:
|
|
399
|
+
continue
|
|
400
|
+
|
|
401
|
+
string_params = set()
|
|
402
|
+
|
|
403
|
+
# Get field annotations from Pydantic model
|
|
404
|
+
try:
|
|
405
|
+
annotations = getattr(args_schema, "__annotations__", {})
|
|
406
|
+
for field_name, field_type in annotations.items():
|
|
407
|
+
origin = get_origin(field_type)
|
|
408
|
+
|
|
409
|
+
# Check if it's a simple str type
|
|
410
|
+
if field_type is str:
|
|
411
|
+
string_params.add(field_name)
|
|
412
|
+
# Check if it's Optional[str] (Union[str, None])
|
|
413
|
+
elif origin is type(None) or str(origin) == "typing.Union":
|
|
414
|
+
args = get_args(field_type)
|
|
415
|
+
if str in args:
|
|
416
|
+
string_params.add(field_name)
|
|
417
|
+
except Exception as e:
|
|
418
|
+
logger.debug("Failed to analyze schema for tool %s: %s", tool_name, e)
|
|
419
|
+
|
|
420
|
+
if string_params:
|
|
421
|
+
tool_string_params[tool_name] = string_params
|
|
422
|
+
logger.debug("Tool %s string params: %s", tool_name, string_params)
|
|
423
|
+
|
|
424
|
+
return tool_string_params
|
|
425
|
+
|
|
426
|
+
|
|
427
|
+
def create_normalize_tool_args_middleware(wrap_model_call, tools=None):
|
|
428
|
+
"""Create middleware to normalize tool call arguments.
|
|
429
|
+
|
|
430
|
+
Gemini sometimes returns tool call arguments with list values instead of strings.
|
|
431
|
+
This middleware converts list arguments to strings ONLY for parameters that
|
|
432
|
+
are defined as str in the tool's Pydantic schema.
|
|
433
|
+
|
|
434
|
+
Args:
|
|
435
|
+
wrap_model_call: LangChain's wrap_model_call decorator
|
|
436
|
+
tools: Optional list of tools to analyze for type information
|
|
437
|
+
|
|
438
|
+
Returns:
|
|
439
|
+
Middleware function
|
|
440
|
+
"""
|
|
441
|
+
|
|
442
|
+
# Build tool -> string params mapping from tool schemas
|
|
443
|
+
tool_string_params: Dict[str, set] = {}
|
|
444
|
+
if tools:
|
|
445
|
+
tool_string_params = _get_string_params_from_tools(tools)
|
|
446
|
+
logger.info(
|
|
447
|
+
"Initialized normalize_tool_args with %d tools: %s",
|
|
448
|
+
len(tool_string_params),
|
|
449
|
+
{k: list(v) for k, v in tool_string_params.items()},
|
|
450
|
+
)
|
|
451
|
+
|
|
452
|
+
@wrap_model_call
|
|
453
|
+
@_with_middleware_logging("normalize_tool_args")
|
|
454
|
+
def normalize_tool_args(request, handler):
|
|
455
|
+
response = handler(request)
|
|
456
|
+
|
|
457
|
+
if hasattr(response, "result"):
|
|
458
|
+
result = response.result
|
|
459
|
+
messages = result if isinstance(result, list) else [result]
|
|
460
|
+
|
|
461
|
+
for msg in messages:
|
|
462
|
+
if isinstance(msg, AIMessage) and hasattr(msg, "tool_calls"):
|
|
463
|
+
tool_calls = msg.tool_calls
|
|
464
|
+
if tool_calls:
|
|
465
|
+
for tool_call in tool_calls:
|
|
466
|
+
tool_name = tool_call.get("name", "")
|
|
467
|
+
# Normalize tool name (e.g., write_todos_tool → write_todos)
|
|
468
|
+
normalized_name = normalize_tool_name(tool_name)
|
|
469
|
+
if normalized_name != tool_name:
|
|
470
|
+
logger.info(
|
|
471
|
+
"Normalized tool name: %s → %s",
|
|
472
|
+
tool_name,
|
|
473
|
+
normalized_name,
|
|
474
|
+
)
|
|
475
|
+
tool_call["name"] = normalized_name
|
|
476
|
+
tool_name = normalized_name
|
|
477
|
+
string_params = tool_string_params.get(tool_name, set())
|
|
478
|
+
|
|
479
|
+
if "args" in tool_call and isinstance(
|
|
480
|
+
tool_call["args"], dict
|
|
481
|
+
):
|
|
482
|
+
args = tool_call["args"]
|
|
483
|
+
# Normalize list arguments to strings for str-typed params
|
|
484
|
+
for key, value in args.items():
|
|
485
|
+
if key in string_params and isinstance(value, list):
|
|
486
|
+
# Join list items into a single string
|
|
487
|
+
text_parts = []
|
|
488
|
+
for part in value:
|
|
489
|
+
if isinstance(part, str):
|
|
490
|
+
text_parts.append(part)
|
|
491
|
+
elif (
|
|
492
|
+
isinstance(part, dict)
|
|
493
|
+
and part.get("type") == "text"
|
|
494
|
+
):
|
|
495
|
+
text_parts.append(part.get("text", ""))
|
|
496
|
+
|
|
497
|
+
if text_parts:
|
|
498
|
+
normalized_value = "\n".join(text_parts)
|
|
499
|
+
logger.info(
|
|
500
|
+
"Normalized list argument '%s' to string (length=%d) for tool '%s'",
|
|
501
|
+
key,
|
|
502
|
+
len(normalized_value),
|
|
503
|
+
tool_name,
|
|
504
|
+
)
|
|
505
|
+
args[key] = normalized_value
|
|
506
|
+
|
|
507
|
+
return response
|
|
508
|
+
|
|
509
|
+
return normalize_tool_args
|
|
510
|
+
|
|
511
|
+
|
|
512
|
+
def create_inject_continuation_middleware(wrap_model_call):
|
|
513
|
+
"""Create middleware to inject continuation prompt after non-HITL tool execution.
|
|
514
|
+
|
|
515
|
+
Non-HITL tools execute immediately without user approval, which can cause
|
|
516
|
+
Gemini to produce empty responses. This middleware injects a system message
|
|
517
|
+
to remind the LLM to continue with the next action.
|
|
518
|
+
|
|
519
|
+
Args:
|
|
520
|
+
wrap_model_call: LangChain's wrap_model_call decorator
|
|
521
|
+
|
|
522
|
+
Returns:
|
|
523
|
+
Middleware function
|
|
524
|
+
"""
|
|
525
|
+
|
|
526
|
+
@wrap_model_call
|
|
527
|
+
@_with_middleware_logging("inject_continuation_after_non_hitl_tool")
|
|
528
|
+
def inject_continuation_after_non_hitl_tool(request, handler):
|
|
529
|
+
messages = request.messages
|
|
530
|
+
if not messages:
|
|
531
|
+
return handler(request)
|
|
532
|
+
|
|
533
|
+
last_msg = messages[-1]
|
|
534
|
+
if getattr(last_msg, "type", "") == "tool":
|
|
535
|
+
tool_name = getattr(last_msg, "name", "") or ""
|
|
536
|
+
|
|
537
|
+
# Try to extract tool name from content
|
|
538
|
+
if not tool_name:
|
|
539
|
+
try:
|
|
540
|
+
content_json = json.loads(last_msg.content)
|
|
541
|
+
tool_name = content_json.get("tool", "")
|
|
542
|
+
except (json.JSONDecodeError, TypeError, AttributeError):
|
|
543
|
+
pass
|
|
544
|
+
|
|
545
|
+
if tool_name in NON_HITL_TOOLS:
|
|
546
|
+
logger.info(
|
|
547
|
+
"Injecting continuation prompt after non-HITL tool: %s",
|
|
548
|
+
tool_name,
|
|
549
|
+
)
|
|
550
|
+
|
|
551
|
+
todos = request.state.get("todos", [])
|
|
552
|
+
pending_todos = [
|
|
553
|
+
t for t in todos if t.get("status") in ("pending", "in_progress")
|
|
554
|
+
]
|
|
555
|
+
|
|
556
|
+
if pending_todos:
|
|
557
|
+
pending_list = ", ".join(
|
|
558
|
+
t.get("content", "")[:30] for t in pending_todos[:3]
|
|
559
|
+
)
|
|
560
|
+
continuation = (
|
|
561
|
+
f"Tool '{tool_name}' completed. "
|
|
562
|
+
f"Continue with pending tasks: {pending_list}. "
|
|
563
|
+
f"Call jupyter_cell_tool or the next appropriate tool."
|
|
564
|
+
)
|
|
565
|
+
else:
|
|
566
|
+
continuation = (
|
|
567
|
+
f"Tool '{tool_name}' completed. All tasks done. "
|
|
568
|
+
f"Call final_answer_tool with a summary NOW."
|
|
569
|
+
)
|
|
570
|
+
|
|
571
|
+
new_messages = list(messages) + [
|
|
572
|
+
HumanMessage(content=f"[SYSTEM] {continuation}")
|
|
573
|
+
]
|
|
574
|
+
request = request.override(messages=new_messages)
|
|
575
|
+
|
|
576
|
+
return handler(request)
|
|
577
|
+
|
|
578
|
+
return inject_continuation_after_non_hitl_tool
|
|
579
|
+
|
|
580
|
+
|
|
581
|
+
def create_patch_tool_calls_middleware(AgentMiddleware, ToolMessage, Overwrite):
|
|
582
|
+
"""Create middleware to patch dangling tool calls.
|
|
583
|
+
|
|
584
|
+
When a new user message arrives before a tool call completes, we need to
|
|
585
|
+
add synthetic ToolMessage responses for those dangling calls so the
|
|
586
|
+
conversation can continue properly.
|
|
587
|
+
|
|
588
|
+
Args:
|
|
589
|
+
AgentMiddleware: LangChain's AgentMiddleware base class
|
|
590
|
+
ToolMessage: LangChain's ToolMessage class
|
|
591
|
+
Overwrite: LangGraph's Overwrite type
|
|
592
|
+
|
|
593
|
+
Returns:
|
|
594
|
+
PatchToolCallsMiddleware class instance
|
|
595
|
+
"""
|
|
596
|
+
|
|
597
|
+
class PatchToolCallsMiddleware(AgentMiddleware):
|
|
598
|
+
"""Patch dangling tool calls so the agent can continue."""
|
|
599
|
+
|
|
600
|
+
def before_agent(self, state, runtime):
|
|
601
|
+
logger.info(
|
|
602
|
+
"%s",
|
|
603
|
+
_format_middleware_marker(
|
|
604
|
+
"PatchToolCallsMiddleware.before_agent", "START"
|
|
605
|
+
),
|
|
606
|
+
)
|
|
607
|
+
messages = state.get("messages", [])
|
|
608
|
+
if not messages:
|
|
609
|
+
logger.info(
|
|
610
|
+
"%s",
|
|
611
|
+
_format_middleware_marker(
|
|
612
|
+
"PatchToolCallsMiddleware.before_agent", "NOOP"
|
|
613
|
+
),
|
|
614
|
+
)
|
|
615
|
+
return None
|
|
616
|
+
|
|
617
|
+
patched = []
|
|
618
|
+
for i, msg in enumerate(messages):
|
|
619
|
+
patched.append(msg)
|
|
620
|
+
if getattr(msg, "type", "") == "ai" and getattr(
|
|
621
|
+
msg, "tool_calls", None
|
|
622
|
+
):
|
|
623
|
+
for tool_call in msg.tool_calls:
|
|
624
|
+
tool_call_id = tool_call.get("id")
|
|
625
|
+
if not tool_call_id:
|
|
626
|
+
continue
|
|
627
|
+
has_tool_msg = any(
|
|
628
|
+
(
|
|
629
|
+
getattr(m, "type", "") == "tool"
|
|
630
|
+
and getattr(m, "tool_call_id", None) == tool_call_id
|
|
631
|
+
)
|
|
632
|
+
for m in messages[i:]
|
|
633
|
+
)
|
|
634
|
+
if not has_tool_msg:
|
|
635
|
+
tool_msg = (
|
|
636
|
+
f"Tool call {tool_call.get('name', 'unknown')} with id {tool_call_id} "
|
|
637
|
+
"was cancelled - another message came in before it could be completed."
|
|
638
|
+
)
|
|
639
|
+
patched.append(
|
|
640
|
+
ToolMessage(
|
|
641
|
+
content=tool_msg,
|
|
642
|
+
name=tool_call.get("name", "unknown"),
|
|
643
|
+
tool_call_id=tool_call_id,
|
|
644
|
+
)
|
|
645
|
+
)
|
|
646
|
+
|
|
647
|
+
if patched == messages:
|
|
648
|
+
logger.info(
|
|
649
|
+
"%s",
|
|
650
|
+
_format_middleware_marker(
|
|
651
|
+
"PatchToolCallsMiddleware.before_agent", "NOOP"
|
|
652
|
+
),
|
|
653
|
+
)
|
|
654
|
+
return None
|
|
655
|
+
logger.info(
|
|
656
|
+
"%s",
|
|
657
|
+
_format_middleware_marker(
|
|
658
|
+
"PatchToolCallsMiddleware.before_agent", "PATCHED"
|
|
659
|
+
),
|
|
660
|
+
)
|
|
661
|
+
return {"messages": Overwrite(patched)}
|
|
662
|
+
|
|
663
|
+
return PatchToolCallsMiddleware()
|
|
@@ -1,15 +1,10 @@
|
|
|
1
1
|
"""
|
|
2
|
-
Jupyter Executors
|
|
3
|
-
|
|
4
|
-
Provides direct access to Jupyter kernel for code execution
|
|
5
|
-
when running inside JupyterLab server.
|
|
2
|
+
Jupyter Executors
|
|
6
3
|
|
|
7
4
|
Components:
|
|
8
|
-
- JupyterExecutor: Execute code in Jupyter kernel
|
|
9
5
|
- NotebookSearcher: Search notebooks and cells
|
|
10
6
|
"""
|
|
11
7
|
|
|
12
|
-
from agent_server.langchain.executors.jupyter_executor import JupyterExecutor
|
|
13
8
|
from agent_server.langchain.executors.notebook_searcher import NotebookSearcher
|
|
14
9
|
|
|
15
|
-
__all__ = ["
|
|
10
|
+
__all__ = ["NotebookSearcher"]
|