hdsp-jupyter-extension 2.0.5__py3-none-any.whl → 2.0.7__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (90) hide show
  1. agent_server/core/reflection_engine.py +0 -1
  2. agent_server/knowledge/watchdog_service.py +1 -1
  3. agent_server/langchain/ARCHITECTURE.md +1193 -0
  4. agent_server/langchain/agent.py +74 -551
  5. agent_server/langchain/custom_middleware.py +636 -0
  6. agent_server/langchain/executors/__init__.py +2 -7
  7. agent_server/langchain/executors/notebook_searcher.py +46 -38
  8. agent_server/langchain/hitl_config.py +66 -0
  9. agent_server/langchain/llm_factory.py +166 -0
  10. agent_server/langchain/logging_utils.py +184 -0
  11. agent_server/langchain/prompts.py +119 -0
  12. agent_server/langchain/state.py +16 -6
  13. agent_server/langchain/tools/__init__.py +6 -0
  14. agent_server/langchain/tools/file_tools.py +91 -129
  15. agent_server/langchain/tools/jupyter_tools.py +18 -18
  16. agent_server/langchain/tools/resource_tools.py +161 -0
  17. agent_server/langchain/tools/search_tools.py +198 -216
  18. agent_server/langchain/tools/shell_tools.py +54 -0
  19. agent_server/main.py +4 -1
  20. agent_server/routers/health.py +1 -1
  21. agent_server/routers/langchain_agent.py +941 -305
  22. hdsp_agent_core/prompts/auto_agent_prompts.py +3 -3
  23. {hdsp_jupyter_extension-2.0.5.data → hdsp_jupyter_extension-2.0.7.data}/data/share/jupyter/labextensions/hdsp-agent/build_log.json +1 -1
  24. {hdsp_jupyter_extension-2.0.5.data → hdsp_jupyter_extension-2.0.7.data}/data/share/jupyter/labextensions/hdsp-agent/package.json +2 -2
  25. hdsp_jupyter_extension-2.0.5.data/data/share/jupyter/labextensions/hdsp-agent/static/frontend_styles_index_js.8cc4873c413ed56ff485.js → hdsp_jupyter_extension-2.0.7.data/data/share/jupyter/labextensions/hdsp-agent/static/frontend_styles_index_js.4770ec0fb2d173b6deb4.js +314 -8
  26. hdsp_jupyter_extension-2.0.7.data/data/share/jupyter/labextensions/hdsp-agent/static/frontend_styles_index_js.4770ec0fb2d173b6deb4.js.map +1 -0
  27. hdsp_jupyter_extension-2.0.5.data/data/share/jupyter/labextensions/hdsp-agent/static/lib_index_js.a223ea20056954479ae9.js → hdsp_jupyter_extension-2.0.7.data/data/share/jupyter/labextensions/hdsp-agent/static/lib_index_js.29cf4312af19e86f82af.js +1547 -330
  28. hdsp_jupyter_extension-2.0.7.data/data/share/jupyter/labextensions/hdsp-agent/static/lib_index_js.29cf4312af19e86f82af.js.map +1 -0
  29. hdsp_jupyter_extension-2.0.5.data/data/share/jupyter/labextensions/hdsp-agent/static/remoteEntry.37299706f55c6d46099d.js → hdsp_jupyter_extension-2.0.7.data/data/share/jupyter/labextensions/hdsp-agent/static/remoteEntry.61343eb4cf0577e74b50.js +8 -8
  30. hdsp_jupyter_extension-2.0.7.data/data/share/jupyter/labextensions/hdsp-agent/static/remoteEntry.61343eb4cf0577e74b50.js.map +1 -0
  31. hdsp_jupyter_extension-2.0.5.data/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_emotion_cache_dist_emotion-cache_browser_development_esm_js.24edcc52a1c014a8a5f0.js → hdsp_jupyter_extension-2.0.7.data/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_emotion_cache_dist_emotion-cache_browser_development_esm_js-node_modules-782ee5.d9ed8645ef1d311657d8.js +209 -2
  32. hdsp_jupyter_extension-2.0.7.data/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_emotion_cache_dist_emotion-cache_browser_development_esm_js-node_modules-782ee5.d9ed8645ef1d311657d8.js.map +1 -0
  33. jupyter_ext/labextension/static/vendors-node_modules_emotion_react_dist_emotion-react_browser_development_esm_js.19ecf6babe00caff6b8a.js → hdsp_jupyter_extension-2.0.7.data/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_emotion_react_dist_emotion-react_browser_development_esm_js.36b49c71871f98d4f549.js +2 -209
  34. hdsp_jupyter_extension-2.0.7.data/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_emotion_react_dist_emotion-react_browser_development_esm_js.36b49c71871f98d4f549.js.map +1 -0
  35. hdsp_jupyter_extension-2.0.5.data/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_mui_material_utils_createSvgIcon_js.1f5038488cdfd8b3a85d.js → hdsp_jupyter_extension-2.0.7.data/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_mui_material_utils_createSvgIcon_js.2e13df4ea61496e95d45.js +3 -212
  36. hdsp_jupyter_extension-2.0.7.data/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_mui_material_utils_createSvgIcon_js.2e13df4ea61496e95d45.js.map +1 -0
  37. {hdsp_jupyter_extension-2.0.5.dist-info → hdsp_jupyter_extension-2.0.7.dist-info}/METADATA +2 -1
  38. {hdsp_jupyter_extension-2.0.5.dist-info → hdsp_jupyter_extension-2.0.7.dist-info}/RECORD +71 -68
  39. jupyter_ext/_version.py +1 -1
  40. jupyter_ext/handlers.py +1176 -58
  41. jupyter_ext/labextension/build_log.json +1 -1
  42. jupyter_ext/labextension/package.json +2 -2
  43. jupyter_ext/labextension/static/{frontend_styles_index_js.8cc4873c413ed56ff485.js → frontend_styles_index_js.4770ec0fb2d173b6deb4.js} +314 -8
  44. jupyter_ext/labextension/static/frontend_styles_index_js.4770ec0fb2d173b6deb4.js.map +1 -0
  45. jupyter_ext/labextension/static/{lib_index_js.a223ea20056954479ae9.js → lib_index_js.29cf4312af19e86f82af.js} +1547 -330
  46. jupyter_ext/labextension/static/lib_index_js.29cf4312af19e86f82af.js.map +1 -0
  47. jupyter_ext/labextension/static/{remoteEntry.37299706f55c6d46099d.js → remoteEntry.61343eb4cf0577e74b50.js} +8 -8
  48. jupyter_ext/labextension/static/remoteEntry.61343eb4cf0577e74b50.js.map +1 -0
  49. jupyter_ext/labextension/static/{vendors-node_modules_emotion_cache_dist_emotion-cache_browser_development_esm_js.24edcc52a1c014a8a5f0.js → vendors-node_modules_emotion_cache_dist_emotion-cache_browser_development_esm_js-node_modules-782ee5.d9ed8645ef1d311657d8.js} +209 -2
  50. jupyter_ext/labextension/static/vendors-node_modules_emotion_cache_dist_emotion-cache_browser_development_esm_js-node_modules-782ee5.d9ed8645ef1d311657d8.js.map +1 -0
  51. hdsp_jupyter_extension-2.0.5.data/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_emotion_react_dist_emotion-react_browser_development_esm_js.19ecf6babe00caff6b8a.js → jupyter_ext/labextension/static/vendors-node_modules_emotion_react_dist_emotion-react_browser_development_esm_js.36b49c71871f98d4f549.js +2 -209
  52. jupyter_ext/labextension/static/vendors-node_modules_emotion_react_dist_emotion-react_browser_development_esm_js.36b49c71871f98d4f549.js.map +1 -0
  53. jupyter_ext/labextension/static/{vendors-node_modules_mui_material_utils_createSvgIcon_js.1f5038488cdfd8b3a85d.js → vendors-node_modules_mui_material_utils_createSvgIcon_js.2e13df4ea61496e95d45.js} +3 -212
  54. jupyter_ext/labextension/static/vendors-node_modules_mui_material_utils_createSvgIcon_js.2e13df4ea61496e95d45.js.map +1 -0
  55. jupyter_ext/resource_usage.py +180 -0
  56. jupyter_ext/tests/test_handlers.py +58 -0
  57. agent_server/langchain/executors/jupyter_executor.py +0 -429
  58. agent_server/langchain/middleware/__init__.py +0 -36
  59. agent_server/langchain/middleware/code_search_middleware.py +0 -278
  60. agent_server/langchain/middleware/error_handling_middleware.py +0 -338
  61. agent_server/langchain/middleware/jupyter_execution_middleware.py +0 -301
  62. agent_server/langchain/middleware/rag_middleware.py +0 -227
  63. agent_server/langchain/middleware/validation_middleware.py +0 -240
  64. hdsp_jupyter_extension-2.0.5.data/data/share/jupyter/labextensions/hdsp-agent/static/frontend_styles_index_js.8cc4873c413ed56ff485.js.map +0 -1
  65. hdsp_jupyter_extension-2.0.5.data/data/share/jupyter/labextensions/hdsp-agent/static/lib_index_js.a223ea20056954479ae9.js.map +0 -1
  66. hdsp_jupyter_extension-2.0.5.data/data/share/jupyter/labextensions/hdsp-agent/static/remoteEntry.37299706f55c6d46099d.js.map +0 -1
  67. hdsp_jupyter_extension-2.0.5.data/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_emotion_cache_dist_emotion-cache_browser_development_esm_js.24edcc52a1c014a8a5f0.js.map +0 -1
  68. hdsp_jupyter_extension-2.0.5.data/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_emotion_react_dist_emotion-react_browser_development_esm_js.19ecf6babe00caff6b8a.js.map +0 -1
  69. hdsp_jupyter_extension-2.0.5.data/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_mui_material_utils_createSvgIcon_js.1f5038488cdfd8b3a85d.js.map +0 -1
  70. jupyter_ext/labextension/static/frontend_styles_index_js.8cc4873c413ed56ff485.js.map +0 -1
  71. jupyter_ext/labextension/static/lib_index_js.a223ea20056954479ae9.js.map +0 -1
  72. jupyter_ext/labextension/static/remoteEntry.37299706f55c6d46099d.js.map +0 -1
  73. jupyter_ext/labextension/static/vendors-node_modules_emotion_cache_dist_emotion-cache_browser_development_esm_js.24edcc52a1c014a8a5f0.js.map +0 -1
  74. jupyter_ext/labextension/static/vendors-node_modules_emotion_react_dist_emotion-react_browser_development_esm_js.19ecf6babe00caff6b8a.js.map +0 -1
  75. jupyter_ext/labextension/static/vendors-node_modules_mui_material_utils_createSvgIcon_js.1f5038488cdfd8b3a85d.js.map +0 -1
  76. {hdsp_jupyter_extension-2.0.5.data → hdsp_jupyter_extension-2.0.7.data}/data/etc/jupyter/jupyter_server_config.d/hdsp_jupyter_extension.json +0 -0
  77. {hdsp_jupyter_extension-2.0.5.data → hdsp_jupyter_extension-2.0.7.data}/data/share/jupyter/labextensions/hdsp-agent/install.json +0 -0
  78. {hdsp_jupyter_extension-2.0.5.data → hdsp_jupyter_extension-2.0.7.data}/data/share/jupyter/labextensions/hdsp-agent/static/node_modules_emotion_use-insertion-effect-with-fallbacks_dist_emotion-use-insertion-effect-wi-3ba6b80.c095373419d05e6f141a.js +0 -0
  79. {hdsp_jupyter_extension-2.0.5.data → hdsp_jupyter_extension-2.0.7.data}/data/share/jupyter/labextensions/hdsp-agent/static/node_modules_emotion_use-insertion-effect-with-fallbacks_dist_emotion-use-insertion-effect-wi-3ba6b80.c095373419d05e6f141a.js.map +0 -0
  80. {hdsp_jupyter_extension-2.0.5.data → hdsp_jupyter_extension-2.0.7.data}/data/share/jupyter/labextensions/hdsp-agent/static/node_modules_emotion_use-insertion-effect-with-fallbacks_dist_emotion-use-insertion-effect-wi-3ba6b81.61e75fb98ecff46cf836.js +0 -0
  81. {hdsp_jupyter_extension-2.0.5.data → hdsp_jupyter_extension-2.0.7.data}/data/share/jupyter/labextensions/hdsp-agent/static/node_modules_emotion_use-insertion-effect-with-fallbacks_dist_emotion-use-insertion-effect-wi-3ba6b81.61e75fb98ecff46cf836.js.map +0 -0
  82. {hdsp_jupyter_extension-2.0.5.data → hdsp_jupyter_extension-2.0.7.data}/data/share/jupyter/labextensions/hdsp-agent/static/style.js +0 -0
  83. {hdsp_jupyter_extension-2.0.5.data → hdsp_jupyter_extension-2.0.7.data}/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_babel_runtime_helpers_esm_extends_js-node_modules_emotion_serialize_dist-051195.e2553aab0c3963b83dd7.js +0 -0
  84. {hdsp_jupyter_extension-2.0.5.data → hdsp_jupyter_extension-2.0.7.data}/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_babel_runtime_helpers_esm_extends_js-node_modules_emotion_serialize_dist-051195.e2553aab0c3963b83dd7.js.map +0 -0
  85. {hdsp_jupyter_extension-2.0.5.data → hdsp_jupyter_extension-2.0.7.data}/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_emotion_styled_dist_emotion-styled_browser_development_esm_js.661fb5836f4978a7c6e1.js +0 -0
  86. {hdsp_jupyter_extension-2.0.5.data → hdsp_jupyter_extension-2.0.7.data}/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_emotion_styled_dist_emotion-styled_browser_development_esm_js.661fb5836f4978a7c6e1.js.map +0 -0
  87. {hdsp_jupyter_extension-2.0.5.data → hdsp_jupyter_extension-2.0.7.data}/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_mui_material_index_js.985697e0162d8d088ca2.js +0 -0
  88. {hdsp_jupyter_extension-2.0.5.data → hdsp_jupyter_extension-2.0.7.data}/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_mui_material_index_js.985697e0162d8d088ca2.js.map +0 -0
  89. {hdsp_jupyter_extension-2.0.5.dist-info → hdsp_jupyter_extension-2.0.7.dist-info}/WHEEL +0 -0
  90. {hdsp_jupyter_extension-2.0.5.dist-info → hdsp_jupyter_extension-2.0.7.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,636 @@
1
+ """
2
+ Custom middleware for LangChain agent.
3
+
4
+ Provides middleware for handling empty responses, limiting tool calls,
5
+ injecting continuation prompts, and patching dangling tool calls.
6
+ """
7
+
8
+ import json
9
+ import logging
10
+ import re
11
+ import uuid
12
+ from typing import Any, Dict, Optional
13
+
14
+ from langchain_core.messages import AIMessage, HumanMessage
15
+
16
+ from agent_server.langchain.logging_utils import (
17
+ _format_middleware_marker,
18
+ _with_middleware_logging,
19
+ )
20
+ from agent_server.langchain.prompts import JSON_TOOL_SCHEMA, NON_HITL_TOOLS
21
+
22
+ logger = logging.getLogger(__name__)
23
+
24
+
25
+ def parse_json_tool_call(text) -> Optional[Dict[str, Any]]:
26
+ """Parse JSON tool call from text response.
27
+
28
+ Args:
29
+ text: Raw text that may contain a JSON tool call (str or list)
30
+
31
+ Returns:
32
+ Parsed dictionary with 'tool' and 'arguments' keys, or None
33
+ """
34
+ if not text:
35
+ return None
36
+
37
+ # Handle list content (multimodal responses from Gemini)
38
+ if isinstance(text, list):
39
+ text_parts = []
40
+ for part in text:
41
+ if isinstance(part, str):
42
+ text_parts.append(part)
43
+ elif isinstance(part, dict) and part.get("type") == "text":
44
+ text_parts.append(part.get("text", ""))
45
+ text = "\n".join(text_parts)
46
+
47
+ if not isinstance(text, str) or not text:
48
+ return None
49
+
50
+ # Clean up response
51
+ text = text.strip()
52
+ if text.startswith("```json"):
53
+ text = text[7:]
54
+ elif text.startswith("```"):
55
+ text = text[3:]
56
+ if text.endswith("```"):
57
+ text = text[:-3]
58
+ text = text.strip()
59
+
60
+ # Try direct JSON parse
61
+ try:
62
+ data = json.loads(text)
63
+ if "tool" in data:
64
+ return data
65
+ except json.JSONDecodeError:
66
+ pass
67
+
68
+ # Try to find JSON object in response
69
+ json_match = re.search(r"\{[\s\S]*\}", text)
70
+ if json_match:
71
+ try:
72
+ data = json.loads(json_match.group())
73
+ if "tool" in data:
74
+ return data
75
+ except json.JSONDecodeError:
76
+ pass
77
+
78
+ return None
79
+
80
+
81
+ def create_tool_call_message(tool_name: str, arguments: Dict[str, Any]) -> AIMessage:
82
+ """Create AIMessage with tool_calls from parsed JSON.
83
+
84
+ Args:
85
+ tool_name: Name of the tool to call
86
+ arguments: Tool arguments dictionary
87
+
88
+ Returns:
89
+ AIMessage with properly formatted tool_calls
90
+ """
91
+ # Normalize tool name
92
+ if not tool_name.endswith("_tool"):
93
+ tool_name = f"{tool_name}_tool"
94
+
95
+ return AIMessage(
96
+ content="",
97
+ tool_calls=[
98
+ {
99
+ "name": tool_name,
100
+ "args": arguments,
101
+ "id": str(uuid.uuid4()),
102
+ "type": "tool_call",
103
+ }
104
+ ],
105
+ )
106
+
107
+
108
+ def create_handle_empty_response_middleware(wrap_model_call):
109
+ """Create middleware to detect and handle empty LLM responses with JSON fallback.
110
+
111
+ For models that don't support native tool calling well (e.g., Gemini 2.5 Flash),
112
+ this middleware:
113
+ 1. Detects empty or text-only responses (no tool_calls)
114
+ 2. Retries with JSON schema prompt to force structured output
115
+ 3. Parses JSON response and injects tool_calls into AIMessage
116
+ 4. Falls back to synthetic final_answer if all else fails
117
+
118
+ Args:
119
+ wrap_model_call: LangChain's wrap_model_call decorator
120
+
121
+ Returns:
122
+ Middleware function
123
+ """
124
+
125
+ @wrap_model_call
126
+ @_with_middleware_logging("handle_empty_response")
127
+ def handle_empty_response(request, handler):
128
+ max_retries = 2
129
+
130
+ # Check if last message is final_answer_tool result - if so, don't retry/synthesize
131
+ # This allows agent to naturally terminate after final_answer_tool
132
+ messages = request.messages
133
+ if messages:
134
+ last_msg = messages[-1]
135
+ if getattr(last_msg, "type", "") == "tool":
136
+ tool_name = getattr(last_msg, "name", "") or ""
137
+ if not tool_name:
138
+ try:
139
+ content_json = json.loads(last_msg.content)
140
+ tool_name = content_json.get("tool", "")
141
+ except (json.JSONDecodeError, TypeError, AttributeError):
142
+ pass
143
+ if tool_name in ("final_answer_tool", "final_answer"):
144
+ logger.info(
145
+ "Last message is final_answer_tool result - allowing natural termination"
146
+ )
147
+ # Just call handler and return response as-is (no retry/synthesize)
148
+ return handler(request)
149
+
150
+ for attempt in range(max_retries + 1):
151
+ response = handler(request)
152
+
153
+ # Extract AIMessage from response
154
+ response_message = _extract_ai_message(response)
155
+
156
+ has_content = (
157
+ bool(getattr(response_message, "content", None))
158
+ if response_message
159
+ else False
160
+ )
161
+ has_tool_calls = (
162
+ bool(getattr(response_message, "tool_calls", None))
163
+ if response_message
164
+ else False
165
+ )
166
+
167
+ logger.info(
168
+ "handle_empty_response: attempt=%d, type=%s, content=%s, tool_calls=%s",
169
+ attempt + 1,
170
+ type(response_message).__name__ if response_message else None,
171
+ has_content,
172
+ has_tool_calls,
173
+ )
174
+
175
+ # Valid response with tool_calls
176
+ if has_tool_calls:
177
+ return response
178
+
179
+ # Try to parse JSON from content
180
+ if has_content and response_message:
181
+ parsed = parse_json_tool_call(response_message.content)
182
+ if parsed:
183
+ tool_name = parsed.get("tool", "")
184
+ arguments = parsed.get("arguments", {})
185
+ logger.info(
186
+ "Parsed JSON tool call from content: tool=%s",
187
+ tool_name,
188
+ )
189
+
190
+ new_message = create_tool_call_message(tool_name, arguments)
191
+ response = _replace_ai_message_in_response(response, new_message)
192
+ return response
193
+
194
+ # Invalid response - retry with JSON schema prompt
195
+ if response_message and attempt < max_retries:
196
+ reason = "text-only" if has_content else "empty"
197
+ logger.warning(
198
+ "Invalid AIMessage (%s) detected (attempt %d/%d). "
199
+ "Retrying with JSON schema prompt...",
200
+ reason,
201
+ attempt + 1,
202
+ max_retries + 1,
203
+ )
204
+
205
+ json_prompt = _build_json_prompt(request, response_message, has_content)
206
+ request = request.override(
207
+ messages=request.messages + [HumanMessage(content=json_prompt)]
208
+ )
209
+ continue
210
+
211
+ # Max retries exhausted - synthesize final_answer
212
+ if response_message:
213
+ logger.warning(
214
+ "Max retries exhausted. Synthesizing final_answer response."
215
+ )
216
+ synthetic_message = _create_synthetic_final_answer(
217
+ request, response_message, has_content
218
+ )
219
+ response = _replace_ai_message_in_response(response, synthetic_message)
220
+ return response
221
+
222
+ return response
223
+
224
+ return response
225
+
226
+ return handle_empty_response
227
+
228
+
229
+ def _extract_ai_message(response):
230
+ """Extract AIMessage from various response formats."""
231
+ if hasattr(response, "result"):
232
+ result = response.result
233
+ if isinstance(result, list):
234
+ for msg in reversed(result):
235
+ if isinstance(msg, AIMessage):
236
+ return msg
237
+ elif isinstance(result, AIMessage):
238
+ return result
239
+ elif hasattr(response, "message"):
240
+ return response.message
241
+ elif hasattr(response, "messages") and response.messages:
242
+ return response.messages[-1]
243
+ elif isinstance(response, AIMessage):
244
+ return response
245
+ return None
246
+
247
+
248
+ def _replace_ai_message_in_response(response, new_message):
249
+ """Replace AIMessage in response with a new one."""
250
+ if hasattr(response, "result"):
251
+ if isinstance(response.result, list):
252
+ new_result = [
253
+ new_message if isinstance(m, AIMessage) else m for m in response.result
254
+ ]
255
+ response.result = new_result
256
+ else:
257
+ response.result = new_message
258
+ return response
259
+
260
+
261
+ def _build_json_prompt(request, response_message, has_content):
262
+ """Build JSON-forcing prompt based on context."""
263
+ todos = request.state.get("todos", [])
264
+ pending_todos = [t for t in todos if t.get("status") in ("pending", "in_progress")]
265
+
266
+ if has_content:
267
+ content_preview = response_message.content[:300]
268
+ return (
269
+ f"{JSON_TOOL_SCHEMA}\n\n"
270
+ f"Your previous response was text, not JSON. "
271
+ f"Wrap your answer in final_answer_tool:\n"
272
+ f'{{"tool": "final_answer_tool", "arguments": {{"answer": "{content_preview}..."}}}}'
273
+ )
274
+ elif pending_todos:
275
+ todo_list = ", ".join(t.get("content", "")[:20] for t in pending_todos[:3])
276
+ example_json = '{"tool": "jupyter_cell_tool", "arguments": {"code": "import pandas as pd\\ndf = pd.read_csv(\'titanic.csv\')\\nprint(df.head())"}}'
277
+ return (
278
+ f"{JSON_TOOL_SCHEMA}\n\n"
279
+ f"Pending tasks: {todo_list}\n"
280
+ f"Call jupyter_cell_tool with Python code to complete the next task.\n"
281
+ f"Example: {example_json}"
282
+ )
283
+ else:
284
+ return (
285
+ f"{JSON_TOOL_SCHEMA}\n\n"
286
+ f"All tasks completed. Call final_answer_tool:\n"
287
+ f'{{"tool": "final_answer_tool", "arguments": {{"answer": "작업이 완료되었습니다."}}}}'
288
+ )
289
+
290
+
291
+ def _create_synthetic_final_answer(request, response_message, has_content):
292
+ """Create synthetic final_answer message."""
293
+ if has_content and response_message.content:
294
+ summary = response_message.content
295
+ logger.info(
296
+ "Using LLM's text content as final answer (length=%d)",
297
+ len(summary),
298
+ )
299
+ else:
300
+ todos = request.state.get("todos", [])
301
+ completed_todos = [
302
+ t.get("content", "") for t in todos if t.get("status") == "completed"
303
+ ]
304
+ summary = (
305
+ f"작업이 완료되었습니다. 완료된 항목: {', '.join(completed_todos[:5])}"
306
+ if completed_todos
307
+ else "작업이 완료되었습니다."
308
+ )
309
+
310
+ return AIMessage(
311
+ content="",
312
+ tool_calls=[
313
+ {
314
+ "name": "final_answer_tool",
315
+ "args": {"answer": summary},
316
+ "id": str(uuid.uuid4()),
317
+ "type": "tool_call",
318
+ }
319
+ ],
320
+ )
321
+
322
+
323
+ def create_limit_tool_calls_middleware(wrap_model_call):
324
+ """Create middleware to limit model to one tool call at a time.
325
+
326
+ Some models (like vLLM GPT) return multiple tool calls in a single response.
327
+ This causes conflicts with TodoListMiddleware when processing multiple decisions.
328
+ By limiting to one tool call, we ensure the agent processes actions sequentially.
329
+
330
+ Args:
331
+ wrap_model_call: LangChain's wrap_model_call decorator
332
+
333
+ Returns:
334
+ Middleware function
335
+ """
336
+
337
+ @wrap_model_call
338
+ @_with_middleware_logging("limit_tool_calls_to_one")
339
+ def limit_tool_calls_to_one(request, handler):
340
+ response = handler(request)
341
+
342
+ if hasattr(response, "result"):
343
+ result = response.result
344
+ messages = result if isinstance(result, list) else [result]
345
+
346
+ for msg in messages:
347
+ if isinstance(msg, AIMessage) and hasattr(msg, "tool_calls"):
348
+ tool_calls = msg.tool_calls
349
+ if tool_calls and len(tool_calls) > 1:
350
+ logger.info(
351
+ "Limiting tool calls from %d to 1 (keeping first: %s)",
352
+ len(tool_calls),
353
+ tool_calls[0].get("name", "unknown")
354
+ if tool_calls
355
+ else "none",
356
+ )
357
+ msg.tool_calls = [tool_calls[0]]
358
+
359
+ return response
360
+
361
+ return limit_tool_calls_to_one
362
+
363
+
364
+ def _get_string_params_from_tools(tools) -> Dict[str, set]:
365
+ """Extract string parameter names from tool schemas.
366
+
367
+ Analyzes each tool's Pydantic args_schema to determine which parameters
368
+ should be strings (not arrays).
369
+
370
+ Args:
371
+ tools: List of LangChain tools
372
+
373
+ Returns:
374
+ Dict mapping tool names to sets of string parameter names
375
+ """
376
+ from typing import get_args, get_origin
377
+
378
+ tool_string_params: Dict[str, set] = {}
379
+
380
+ for tool in tools:
381
+ tool_name = getattr(tool, 'name', None)
382
+ if not tool_name:
383
+ continue
384
+
385
+ args_schema = getattr(tool, 'args_schema', None)
386
+ if not args_schema:
387
+ continue
388
+
389
+ string_params = set()
390
+
391
+ # Get field annotations from Pydantic model
392
+ try:
393
+ annotations = getattr(args_schema, '__annotations__', {})
394
+ for field_name, field_type in annotations.items():
395
+ origin = get_origin(field_type)
396
+
397
+ # Check if it's a simple str type
398
+ if field_type is str:
399
+ string_params.add(field_name)
400
+ # Check if it's Optional[str] (Union[str, None])
401
+ elif origin is type(None) or str(origin) == 'typing.Union':
402
+ args = get_args(field_type)
403
+ if str in args:
404
+ string_params.add(field_name)
405
+ except Exception as e:
406
+ logger.debug("Failed to analyze schema for tool %s: %s", tool_name, e)
407
+
408
+ if string_params:
409
+ tool_string_params[tool_name] = string_params
410
+ logger.debug("Tool %s string params: %s", tool_name, string_params)
411
+
412
+ return tool_string_params
413
+
414
+
415
+ def create_normalize_tool_args_middleware(wrap_model_call, tools=None):
416
+ """Create middleware to normalize tool call arguments.
417
+
418
+ Gemini sometimes returns tool call arguments with list values instead of strings.
419
+ This middleware converts list arguments to strings ONLY for parameters that
420
+ are defined as str in the tool's Pydantic schema.
421
+
422
+ Args:
423
+ wrap_model_call: LangChain's wrap_model_call decorator
424
+ tools: Optional list of tools to analyze for type information
425
+
426
+ Returns:
427
+ Middleware function
428
+ """
429
+
430
+ # Build tool -> string params mapping from tool schemas
431
+ tool_string_params: Dict[str, set] = {}
432
+ if tools:
433
+ tool_string_params = _get_string_params_from_tools(tools)
434
+ logger.info(
435
+ "Initialized normalize_tool_args with %d tools: %s",
436
+ len(tool_string_params),
437
+ {k: list(v) for k, v in tool_string_params.items()},
438
+ )
439
+
440
+ @wrap_model_call
441
+ @_with_middleware_logging("normalize_tool_args")
442
+ def normalize_tool_args(request, handler):
443
+ response = handler(request)
444
+
445
+ if hasattr(response, "result"):
446
+ result = response.result
447
+ messages = result if isinstance(result, list) else [result]
448
+
449
+ for msg in messages:
450
+ if isinstance(msg, AIMessage) and hasattr(msg, "tool_calls"):
451
+ tool_calls = msg.tool_calls
452
+ if tool_calls:
453
+ for tool_call in tool_calls:
454
+ tool_name = tool_call.get("name", "")
455
+ string_params = tool_string_params.get(tool_name, set())
456
+
457
+ if "args" in tool_call and isinstance(tool_call["args"], dict):
458
+ args = tool_call["args"]
459
+ # Normalize list arguments to strings for str-typed params
460
+ for key, value in args.items():
461
+ if key in string_params and isinstance(value, list):
462
+ # Join list items into a single string
463
+ text_parts = []
464
+ for part in value:
465
+ if isinstance(part, str):
466
+ text_parts.append(part)
467
+ elif isinstance(part, dict) and part.get("type") == "text":
468
+ text_parts.append(part.get("text", ""))
469
+
470
+ if text_parts:
471
+ normalized_value = "\n".join(text_parts)
472
+ logger.info(
473
+ "Normalized list argument '%s' to string (length=%d) for tool '%s'",
474
+ key,
475
+ len(normalized_value),
476
+ tool_name,
477
+ )
478
+ args[key] = normalized_value
479
+
480
+ return response
481
+
482
+ return normalize_tool_args
483
+
484
+
485
+ def create_inject_continuation_middleware(wrap_model_call):
486
+ """Create middleware to inject continuation prompt after non-HITL tool execution.
487
+
488
+ Non-HITL tools execute immediately without user approval, which can cause
489
+ Gemini to produce empty responses. This middleware injects a system message
490
+ to remind the LLM to continue with the next action.
491
+
492
+ Args:
493
+ wrap_model_call: LangChain's wrap_model_call decorator
494
+
495
+ Returns:
496
+ Middleware function
497
+ """
498
+
499
+ @wrap_model_call
500
+ @_with_middleware_logging("inject_continuation_after_non_hitl_tool")
501
+ def inject_continuation_after_non_hitl_tool(request, handler):
502
+ messages = request.messages
503
+ if not messages:
504
+ return handler(request)
505
+
506
+ last_msg = messages[-1]
507
+ if getattr(last_msg, "type", "") == "tool":
508
+ tool_name = getattr(last_msg, "name", "") or ""
509
+
510
+ # Try to extract tool name from content
511
+ if not tool_name:
512
+ try:
513
+ content_json = json.loads(last_msg.content)
514
+ tool_name = content_json.get("tool", "")
515
+ except (json.JSONDecodeError, TypeError, AttributeError):
516
+ pass
517
+
518
+ if tool_name in NON_HITL_TOOLS:
519
+ logger.info(
520
+ "Injecting continuation prompt after non-HITL tool: %s",
521
+ tool_name,
522
+ )
523
+
524
+ todos = request.state.get("todos", [])
525
+ pending_todos = [
526
+ t for t in todos if t.get("status") in ("pending", "in_progress")
527
+ ]
528
+
529
+ if pending_todos:
530
+ pending_list = ", ".join(
531
+ t.get("content", "")[:30] for t in pending_todos[:3]
532
+ )
533
+ continuation = (
534
+ f"Tool '{tool_name}' completed. "
535
+ f"Continue with pending tasks: {pending_list}. "
536
+ f"Call jupyter_cell_tool or the next appropriate tool."
537
+ )
538
+ else:
539
+ continuation = (
540
+ f"Tool '{tool_name}' completed. All tasks done. "
541
+ f"Call final_answer_tool with a summary NOW."
542
+ )
543
+
544
+ new_messages = list(messages) + [
545
+ HumanMessage(content=f"[SYSTEM] {continuation}")
546
+ ]
547
+ request = request.override(messages=new_messages)
548
+
549
+ return handler(request)
550
+
551
+ return inject_continuation_after_non_hitl_tool
552
+
553
+
554
+ def create_patch_tool_calls_middleware(AgentMiddleware, ToolMessage, Overwrite):
555
+ """Create middleware to patch dangling tool calls.
556
+
557
+ When a new user message arrives before a tool call completes, we need to
558
+ add synthetic ToolMessage responses for those dangling calls so the
559
+ conversation can continue properly.
560
+
561
+ Args:
562
+ AgentMiddleware: LangChain's AgentMiddleware base class
563
+ ToolMessage: LangChain's ToolMessage class
564
+ Overwrite: LangGraph's Overwrite type
565
+
566
+ Returns:
567
+ PatchToolCallsMiddleware class instance
568
+ """
569
+
570
+ class PatchToolCallsMiddleware(AgentMiddleware):
571
+ """Patch dangling tool calls so the agent can continue."""
572
+
573
+ def before_agent(self, state, runtime):
574
+ logger.info(
575
+ "%s",
576
+ _format_middleware_marker(
577
+ "PatchToolCallsMiddleware.before_agent", "START"
578
+ ),
579
+ )
580
+ messages = state.get("messages", [])
581
+ if not messages:
582
+ logger.info(
583
+ "%s",
584
+ _format_middleware_marker(
585
+ "PatchToolCallsMiddleware.before_agent", "NOOP"
586
+ ),
587
+ )
588
+ return None
589
+
590
+ patched = []
591
+ for i, msg in enumerate(messages):
592
+ patched.append(msg)
593
+ if getattr(msg, "type", "") == "ai" and getattr(
594
+ msg, "tool_calls", None
595
+ ):
596
+ for tool_call in msg.tool_calls:
597
+ tool_call_id = tool_call.get("id")
598
+ if not tool_call_id:
599
+ continue
600
+ has_tool_msg = any(
601
+ (
602
+ getattr(m, "type", "") == "tool"
603
+ and getattr(m, "tool_call_id", None) == tool_call_id
604
+ )
605
+ for m in messages[i:]
606
+ )
607
+ if not has_tool_msg:
608
+ tool_msg = (
609
+ f"Tool call {tool_call.get('name', 'unknown')} with id {tool_call_id} "
610
+ "was cancelled - another message came in before it could be completed."
611
+ )
612
+ patched.append(
613
+ ToolMessage(
614
+ content=tool_msg,
615
+ name=tool_call.get("name", "unknown"),
616
+ tool_call_id=tool_call_id,
617
+ )
618
+ )
619
+
620
+ if patched == messages:
621
+ logger.info(
622
+ "%s",
623
+ _format_middleware_marker(
624
+ "PatchToolCallsMiddleware.before_agent", "NOOP"
625
+ ),
626
+ )
627
+ return None
628
+ logger.info(
629
+ "%s",
630
+ _format_middleware_marker(
631
+ "PatchToolCallsMiddleware.before_agent", "PATCHED"
632
+ ),
633
+ )
634
+ return {"messages": Overwrite(patched)}
635
+
636
+ return PatchToolCallsMiddleware()
@@ -1,15 +1,10 @@
1
1
  """
2
- Jupyter Executors (Embedded Mode)
3
-
4
- Provides direct access to Jupyter kernel for code execution
5
- when running inside JupyterLab server.
2
+ Jupyter Executors
6
3
 
7
4
  Components:
8
- - JupyterExecutor: Execute code in Jupyter kernel
9
5
  - NotebookSearcher: Search notebooks and cells
10
6
  """
11
7
 
12
- from agent_server.langchain.executors.jupyter_executor import JupyterExecutor
13
8
  from agent_server.langchain.executors.notebook_searcher import NotebookSearcher
14
9
 
15
- __all__ = ["JupyterExecutor", "NotebookSearcher"]
10
+ __all__ = ["NotebookSearcher"]