hdsp-jupyter-extension 2.0.0__py3-none-any.whl → 2.0.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (79) hide show
  1. agent_server/langchain/__init__.py +18 -0
  2. agent_server/langchain/agent.py +694 -0
  3. agent_server/langchain/executors/__init__.py +15 -0
  4. agent_server/langchain/executors/jupyter_executor.py +429 -0
  5. agent_server/langchain/executors/notebook_searcher.py +477 -0
  6. agent_server/langchain/middleware/__init__.py +36 -0
  7. agent_server/langchain/middleware/code_search_middleware.py +278 -0
  8. agent_server/langchain/middleware/error_handling_middleware.py +338 -0
  9. agent_server/langchain/middleware/jupyter_execution_middleware.py +301 -0
  10. agent_server/langchain/middleware/rag_middleware.py +227 -0
  11. agent_server/langchain/middleware/validation_middleware.py +240 -0
  12. agent_server/langchain/state.py +159 -0
  13. agent_server/langchain/tools/__init__.py +39 -0
  14. agent_server/langchain/tools/file_tools.py +279 -0
  15. agent_server/langchain/tools/jupyter_tools.py +143 -0
  16. agent_server/langchain/tools/search_tools.py +309 -0
  17. agent_server/main.py +13 -0
  18. agent_server/routers/health.py +14 -0
  19. agent_server/routers/langchain_agent.py +1368 -0
  20. {hdsp_jupyter_extension-2.0.0.data → hdsp_jupyter_extension-2.0.2.data}/data/share/jupyter/labextensions/hdsp-agent/build_log.json +1 -1
  21. {hdsp_jupyter_extension-2.0.0.data → hdsp_jupyter_extension-2.0.2.data}/data/share/jupyter/labextensions/hdsp-agent/package.json +2 -2
  22. hdsp_jupyter_extension-2.0.0.data/data/share/jupyter/labextensions/hdsp-agent/static/frontend_styles_index_js.2607ff74c74acfa83158.js → hdsp_jupyter_extension-2.0.2.data/data/share/jupyter/labextensions/hdsp-agent/static/frontend_styles_index_js.634cf0ae0f3592d0882f.js +408 -4
  23. hdsp_jupyter_extension-2.0.2.data/data/share/jupyter/labextensions/hdsp-agent/static/frontend_styles_index_js.634cf0ae0f3592d0882f.js.map +1 -0
  24. hdsp_jupyter_extension-2.0.0.data/data/share/jupyter/labextensions/hdsp-agent/static/lib_index_js.622c1a5918b3aafb2315.js → hdsp_jupyter_extension-2.0.2.data/data/share/jupyter/labextensions/hdsp-agent/static/lib_index_js.1366019c413f1d68467f.js +753 -65
  25. hdsp_jupyter_extension-2.0.2.data/data/share/jupyter/labextensions/hdsp-agent/static/lib_index_js.1366019c413f1d68467f.js.map +1 -0
  26. hdsp_jupyter_extension-2.0.0.data/data/share/jupyter/labextensions/hdsp-agent/static/remoteEntry.dae97cde171e13b8c834.js → hdsp_jupyter_extension-2.0.2.data/data/share/jupyter/labextensions/hdsp-agent/static/remoteEntry.3379c4b222c042de2b01.js +11 -11
  27. hdsp_jupyter_extension-2.0.2.data/data/share/jupyter/labextensions/hdsp-agent/static/remoteEntry.3379c4b222c042de2b01.js.map +1 -0
  28. jupyter_ext/labextension/static/vendors-node_modules_emotion_cache_dist_emotion-cache_browser_development_esm_js-node_modules-782ee5.d9ed8645ef1d311657d8.js → hdsp_jupyter_extension-2.0.2.data/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_emotion_cache_dist_emotion-cache_browser_development_esm_js.24edcc52a1c014a8a5f0.js +2 -209
  29. hdsp_jupyter_extension-2.0.2.data/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_emotion_cache_dist_emotion-cache_browser_development_esm_js.24edcc52a1c014a8a5f0.js.map +1 -0
  30. jupyter_ext/labextension/static/vendors-node_modules_emotion_react_dist_emotion-react_browser_development_esm_js.36b49c71871f98d4f549.js → hdsp_jupyter_extension-2.0.2.data/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_emotion_react_dist_emotion-react_browser_development_esm_js.19ecf6babe00caff6b8a.js +209 -2
  31. hdsp_jupyter_extension-2.0.2.data/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_emotion_react_dist_emotion-react_browser_development_esm_js.19ecf6babe00caff6b8a.js.map +1 -0
  32. hdsp_jupyter_extension-2.0.0.data/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_mui_material_utils_createSvgIcon_js.2e13df4ea61496e95d45.js → hdsp_jupyter_extension-2.0.2.data/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_mui_material_utils_createSvgIcon_js.1f5038488cdfd8b3a85d.js +212 -3
  33. hdsp_jupyter_extension-2.0.2.data/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_mui_material_utils_createSvgIcon_js.1f5038488cdfd8b3a85d.js.map +1 -0
  34. {hdsp_jupyter_extension-2.0.0.dist-info → hdsp_jupyter_extension-2.0.2.dist-info}/METADATA +2 -6
  35. {hdsp_jupyter_extension-2.0.0.dist-info → hdsp_jupyter_extension-2.0.2.dist-info}/RECORD +67 -50
  36. jupyter_ext/__init__.py +1 -1
  37. jupyter_ext/_version.py +1 -1
  38. jupyter_ext/handlers.py +126 -1
  39. jupyter_ext/labextension/build_log.json +1 -1
  40. jupyter_ext/labextension/package.json +2 -2
  41. jupyter_ext/labextension/static/{frontend_styles_index_js.2607ff74c74acfa83158.js → frontend_styles_index_js.634cf0ae0f3592d0882f.js} +408 -4
  42. jupyter_ext/labextension/static/frontend_styles_index_js.634cf0ae0f3592d0882f.js.map +1 -0
  43. jupyter_ext/labextension/static/{lib_index_js.622c1a5918b3aafb2315.js → lib_index_js.1366019c413f1d68467f.js} +753 -65
  44. jupyter_ext/labextension/static/lib_index_js.1366019c413f1d68467f.js.map +1 -0
  45. jupyter_ext/labextension/static/{remoteEntry.dae97cde171e13b8c834.js → remoteEntry.3379c4b222c042de2b01.js} +11 -11
  46. jupyter_ext/labextension/static/remoteEntry.3379c4b222c042de2b01.js.map +1 -0
  47. hdsp_jupyter_extension-2.0.0.data/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_emotion_cache_dist_emotion-cache_browser_development_esm_js-node_modules-782ee5.d9ed8645ef1d311657d8.js → jupyter_ext/labextension/static/vendors-node_modules_emotion_cache_dist_emotion-cache_browser_development_esm_js.24edcc52a1c014a8a5f0.js +2 -209
  48. jupyter_ext/labextension/static/vendors-node_modules_emotion_cache_dist_emotion-cache_browser_development_esm_js.24edcc52a1c014a8a5f0.js.map +1 -0
  49. hdsp_jupyter_extension-2.0.0.data/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_emotion_react_dist_emotion-react_browser_development_esm_js.36b49c71871f98d4f549.js → jupyter_ext/labextension/static/vendors-node_modules_emotion_react_dist_emotion-react_browser_development_esm_js.19ecf6babe00caff6b8a.js +209 -2
  50. jupyter_ext/labextension/static/vendors-node_modules_emotion_react_dist_emotion-react_browser_development_esm_js.19ecf6babe00caff6b8a.js.map +1 -0
  51. jupyter_ext/labextension/static/{vendors-node_modules_mui_material_utils_createSvgIcon_js.2e13df4ea61496e95d45.js → vendors-node_modules_mui_material_utils_createSvgIcon_js.1f5038488cdfd8b3a85d.js} +212 -3
  52. jupyter_ext/labextension/static/vendors-node_modules_mui_material_utils_createSvgIcon_js.1f5038488cdfd8b3a85d.js.map +1 -0
  53. hdsp_jupyter_extension-2.0.0.data/data/share/jupyter/labextensions/hdsp-agent/static/frontend_styles_index_js.2607ff74c74acfa83158.js.map +0 -1
  54. hdsp_jupyter_extension-2.0.0.data/data/share/jupyter/labextensions/hdsp-agent/static/lib_index_js.622c1a5918b3aafb2315.js.map +0 -1
  55. hdsp_jupyter_extension-2.0.0.data/data/share/jupyter/labextensions/hdsp-agent/static/remoteEntry.dae97cde171e13b8c834.js.map +0 -1
  56. hdsp_jupyter_extension-2.0.0.data/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_emotion_cache_dist_emotion-cache_browser_development_esm_js-node_modules-782ee5.d9ed8645ef1d311657d8.js.map +0 -1
  57. hdsp_jupyter_extension-2.0.0.data/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_emotion_react_dist_emotion-react_browser_development_esm_js.36b49c71871f98d4f549.js.map +0 -1
  58. hdsp_jupyter_extension-2.0.0.data/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_mui_material_utils_createSvgIcon_js.2e13df4ea61496e95d45.js.map +0 -1
  59. jupyter_ext/labextension/static/frontend_styles_index_js.2607ff74c74acfa83158.js.map +0 -1
  60. jupyter_ext/labextension/static/lib_index_js.622c1a5918b3aafb2315.js.map +0 -1
  61. jupyter_ext/labextension/static/remoteEntry.dae97cde171e13b8c834.js.map +0 -1
  62. jupyter_ext/labextension/static/vendors-node_modules_emotion_cache_dist_emotion-cache_browser_development_esm_js-node_modules-782ee5.d9ed8645ef1d311657d8.js.map +0 -1
  63. jupyter_ext/labextension/static/vendors-node_modules_emotion_react_dist_emotion-react_browser_development_esm_js.36b49c71871f98d4f549.js.map +0 -1
  64. jupyter_ext/labextension/static/vendors-node_modules_mui_material_utils_createSvgIcon_js.2e13df4ea61496e95d45.js.map +0 -1
  65. {hdsp_jupyter_extension-2.0.0.data → hdsp_jupyter_extension-2.0.2.data}/data/etc/jupyter/jupyter_server_config.d/hdsp_jupyter_extension.json +0 -0
  66. {hdsp_jupyter_extension-2.0.0.data → hdsp_jupyter_extension-2.0.2.data}/data/share/jupyter/labextensions/hdsp-agent/install.json +0 -0
  67. {hdsp_jupyter_extension-2.0.0.data → hdsp_jupyter_extension-2.0.2.data}/data/share/jupyter/labextensions/hdsp-agent/static/node_modules_emotion_use-insertion-effect-with-fallbacks_dist_emotion-use-insertion-effect-wi-3ba6b80.c095373419d05e6f141a.js +0 -0
  68. {hdsp_jupyter_extension-2.0.0.data → hdsp_jupyter_extension-2.0.2.data}/data/share/jupyter/labextensions/hdsp-agent/static/node_modules_emotion_use-insertion-effect-with-fallbacks_dist_emotion-use-insertion-effect-wi-3ba6b80.c095373419d05e6f141a.js.map +0 -0
  69. {hdsp_jupyter_extension-2.0.0.data → hdsp_jupyter_extension-2.0.2.data}/data/share/jupyter/labextensions/hdsp-agent/static/node_modules_emotion_use-insertion-effect-with-fallbacks_dist_emotion-use-insertion-effect-wi-3ba6b81.61e75fb98ecff46cf836.js +0 -0
  70. {hdsp_jupyter_extension-2.0.0.data → hdsp_jupyter_extension-2.0.2.data}/data/share/jupyter/labextensions/hdsp-agent/static/node_modules_emotion_use-insertion-effect-with-fallbacks_dist_emotion-use-insertion-effect-wi-3ba6b81.61e75fb98ecff46cf836.js.map +0 -0
  71. {hdsp_jupyter_extension-2.0.0.data → hdsp_jupyter_extension-2.0.2.data}/data/share/jupyter/labextensions/hdsp-agent/static/style.js +0 -0
  72. {hdsp_jupyter_extension-2.0.0.data → hdsp_jupyter_extension-2.0.2.data}/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_babel_runtime_helpers_esm_extends_js-node_modules_emotion_serialize_dist-051195.e2553aab0c3963b83dd7.js +0 -0
  73. {hdsp_jupyter_extension-2.0.0.data → hdsp_jupyter_extension-2.0.2.data}/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_babel_runtime_helpers_esm_extends_js-node_modules_emotion_serialize_dist-051195.e2553aab0c3963b83dd7.js.map +0 -0
  74. {hdsp_jupyter_extension-2.0.0.data → hdsp_jupyter_extension-2.0.2.data}/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_emotion_styled_dist_emotion-styled_browser_development_esm_js.661fb5836f4978a7c6e1.js +0 -0
  75. {hdsp_jupyter_extension-2.0.0.data → hdsp_jupyter_extension-2.0.2.data}/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_emotion_styled_dist_emotion-styled_browser_development_esm_js.661fb5836f4978a7c6e1.js.map +0 -0
  76. {hdsp_jupyter_extension-2.0.0.data → hdsp_jupyter_extension-2.0.2.data}/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_mui_material_index_js.985697e0162d8d088ca2.js +0 -0
  77. {hdsp_jupyter_extension-2.0.0.data → hdsp_jupyter_extension-2.0.2.data}/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_mui_material_index_js.985697e0162d8d088ca2.js.map +0 -0
  78. {hdsp_jupyter_extension-2.0.0.dist-info → hdsp_jupyter_extension-2.0.2.dist-info}/WHEEL +0 -0
  79. {hdsp_jupyter_extension-2.0.0.dist-info → hdsp_jupyter_extension-2.0.2.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,1368 @@
1
+ """
2
+ LangChain Agent Router
3
+
4
+ FastAPI router for the LangChain-based Jupyter agent.
5
+ Provides streaming and non-streaming endpoints for agent execution.
6
+ """
7
+
8
+ import asyncio
9
+ import json
10
+ import logging
11
+ import uuid
12
+ from typing import Any, Dict, List, Optional
13
+
14
+ from fastapi import APIRouter, HTTPException
15
+ from langchain_core.messages import AIMessage, HumanMessage, SystemMessage, ToolMessage
16
+ from langgraph.checkpoint.memory import InMemorySaver
17
+ from pydantic import BaseModel, ConfigDict, Field
18
+ from sse_starlette.sse import EventSourceResponse
19
+
20
+ from agent_server.langchain.agent import (
21
+ _create_llm,
22
+ _get_all_tools,
23
+ create_simple_chat_agent,
24
+ )
25
+
26
+ logger = logging.getLogger(__name__)
27
+ router = APIRouter(prefix="/langchain", tags=["langchain-agent"])
28
+
29
+
30
+ # ============ Request/Response Models ============
31
+
32
+
33
+ class LLMConfig(BaseModel):
34
+ """LLM configuration"""
35
+
36
+ provider: str = Field(default="gemini", description="LLM provider")
37
+ gemini: Optional[Dict[str, Any]] = Field(default=None)
38
+ openai: Optional[Dict[str, Any]] = Field(default=None)
39
+ vllm: Optional[Dict[str, Any]] = Field(default=None)
40
+
41
+
42
+ class NotebookContext(BaseModel):
43
+ """Current notebook context"""
44
+
45
+ model_config = ConfigDict(populate_by_name=True)
46
+
47
+ notebookPath: Optional[str] = Field(default=None, alias="notebook_path")
48
+ kernelId: Optional[str] = Field(default=None, alias="kernel_id")
49
+ cellCount: int = Field(default=0, alias="cell_count")
50
+ importedLibraries: List[str] = Field(
51
+ default_factory=list, alias="imported_libraries"
52
+ )
53
+ definedVariables: List[str] = Field(default_factory=list, alias="defined_variables")
54
+ recentCells: List[Dict[str, Any]] = Field(
55
+ default_factory=list, alias="recent_cells"
56
+ )
57
+
58
+
59
+ class AgentRequest(BaseModel):
60
+ """Request for agent execution"""
61
+
62
+ request: str = Field(description="User's natural language request")
63
+ notebookContext: Optional[NotebookContext] = Field(
64
+ default=None, description="Current notebook state"
65
+ )
66
+ llmConfig: Optional[LLMConfig] = Field(
67
+ default=None, description="LLM configuration"
68
+ )
69
+ stream: bool = Field(default=False, description="Enable streaming response")
70
+ workspaceRoot: Optional[str] = Field(
71
+ default=".", description="Workspace root directory"
72
+ )
73
+ threadId: Optional[str] = Field(
74
+ default=None,
75
+ description="Thread ID for conversation persistence (required for HITL)",
76
+ )
77
+
78
+
79
+ class ResumeDecision(BaseModel):
80
+ """Decision for resuming interrupted execution"""
81
+
82
+ type: str = Field(description="Decision type: approve, edit, or reject")
83
+ args: Optional[Dict[str, Any]] = Field(
84
+ default=None, description="Modified tool arguments (for edit)"
85
+ )
86
+ feedback: Optional[str] = Field(
87
+ default=None, description="Rejection feedback (for reject)"
88
+ )
89
+
90
+
91
+ class ResumeRequest(BaseModel):
92
+ """Request to resume interrupted execution"""
93
+
94
+ threadId: str = Field(description="Thread ID of interrupted execution")
95
+ decisions: List[ResumeDecision] = Field(
96
+ description="List of decisions for each interrupted action"
97
+ )
98
+ llmConfig: Optional[LLMConfig] = Field(
99
+ default=None, description="LLM configuration"
100
+ )
101
+ workspaceRoot: Optional[str] = Field(
102
+ default=".", description="Workspace root directory"
103
+ )
104
+
105
+
106
+ class ExecutionResult(BaseModel):
107
+ """Single execution result"""
108
+
109
+ model_config = ConfigDict(populate_by_name=True)
110
+
111
+ success: bool
112
+ output: Optional[str] = None
113
+ error: Optional[str] = None
114
+ errorType: Optional[str] = Field(default=None, alias="error_type")
115
+ cellIndex: Optional[int] = Field(default=None, alias="cell_index")
116
+
117
+
118
+ class AgentResponse(BaseModel):
119
+ """Response from agent execution"""
120
+
121
+ model_config = ConfigDict(populate_by_name=True)
122
+
123
+ success: bool
124
+ finalAnswer: Optional[str] = Field(default=None, alias="final_answer")
125
+ executionHistory: List[ExecutionResult] = Field(
126
+ default_factory=list, alias="execution_history"
127
+ )
128
+ isComplete: bool = Field(default=False, alias="is_complete")
129
+ error: Optional[str] = None
130
+ errorType: Optional[str] = Field(default=None, alias="error_type")
131
+
132
+
133
+ # ============ Agent Instance Cache ============
134
+
135
+
136
+ _simple_agent_checkpointers: Dict[str, Any] = {}
137
+ _simple_agent_pending_actions: Dict[str, List[Dict[str, Any]]] = {}
138
+
139
+
140
+ def _normalize_action_request(action: Dict[str, Any]) -> Dict[str, Any]:
141
+ """Normalize HITL action request payload across LangChain versions."""
142
+ name = (
143
+ action.get("name")
144
+ or action.get("tool")
145
+ or action.get("tool_name")
146
+ or action.get("action")
147
+ or "unknown"
148
+ )
149
+ args = (
150
+ action.get("arguments")
151
+ or action.get("args")
152
+ or action.get("tool_input")
153
+ or action.get("input")
154
+ or action.get("parameters")
155
+ or {}
156
+ )
157
+ description = action.get("description", "")
158
+ return {"name": name, "arguments": args, "description": description}
159
+
160
+
161
+ def _extract_todos(payload: Any) -> Optional[List[Dict[str, Any]]]:
162
+ """Extract todos list from various payload shapes."""
163
+ if isinstance(payload, str):
164
+ if "Updated todo list to" in payload:
165
+ try:
166
+ import ast
167
+
168
+ list_text = payload.split("Updated todo list to", 1)[1].strip()
169
+ payload = {"todos": ast.literal_eval(list_text)}
170
+ except Exception:
171
+ payload = payload
172
+ if isinstance(payload, str):
173
+ try:
174
+ payload = json.loads(payload)
175
+ except json.JSONDecodeError:
176
+ try:
177
+ import ast
178
+
179
+ payload = ast.literal_eval(payload)
180
+ except Exception:
181
+ return None
182
+ if not isinstance(payload, dict):
183
+ return None
184
+
185
+ for key in ("todos", "todo_list", "todoList"):
186
+ todos = payload.get(key)
187
+ if isinstance(todos, list) and todos:
188
+ return todos
189
+ return None
190
+
191
+
192
+ def _emit_todos_from_tool_calls(
193
+ tool_calls: List[Dict[str, Any]],
194
+ ) -> Optional[List[Dict[str, Any]]]:
195
+ """Extract todos from AIMessage tool calls if present."""
196
+ for tool_call in tool_calls:
197
+ name = tool_call.get("name") or tool_call.get("tool") or ""
198
+ if name in ("write_todos", "write_todos_tool", "todos"):
199
+ todos = _extract_todos(
200
+ tool_call.get("args")
201
+ or tool_call.get("arguments")
202
+ or tool_call.get("input")
203
+ )
204
+ if todos:
205
+ return todos
206
+ return None
207
+
208
+
209
+ def _normalize_tool_calls(raw_tool_calls: Any) -> List[Dict[str, Any]]:
210
+ """Normalize tool calls from provider-specific payloads."""
211
+ if not raw_tool_calls:
212
+ return []
213
+ if isinstance(raw_tool_calls, dict):
214
+ raw_tool_calls = [raw_tool_calls]
215
+ if not isinstance(raw_tool_calls, list):
216
+ return []
217
+
218
+ normalized: List[Dict[str, Any]] = []
219
+ for call in raw_tool_calls:
220
+ if not isinstance(call, dict):
221
+ continue
222
+ if "name" in call and "args" in call:
223
+ normalized.append({"name": call.get("name"), "args": call.get("args")})
224
+ continue
225
+ if call.get("type") == "function" and "function" in call:
226
+ fn = call.get("function", {})
227
+ args = fn.get("arguments", {})
228
+ if isinstance(args, str):
229
+ try:
230
+ args = json.loads(args)
231
+ except json.JSONDecodeError:
232
+ args = {}
233
+ normalized.append({"name": fn.get("name"), "args": args})
234
+ continue
235
+ if "function_call" in call:
236
+ fn = call.get("function_call", {})
237
+ args = fn.get("arguments", {})
238
+ if isinstance(args, str):
239
+ try:
240
+ args = json.loads(args)
241
+ except json.JSONDecodeError:
242
+ args = {}
243
+ normalized.append({"name": fn.get("name"), "args": args})
244
+ continue
245
+ return normalized
246
+
247
+
248
+ def _message_signature(message: Any) -> str:
249
+ """Create a stable signature to de-duplicate repeated streamed messages."""
250
+ content = getattr(message, "content", "") or ""
251
+ tool_calls = getattr(message, "tool_calls", None)
252
+ if tool_calls:
253
+ try:
254
+ tool_calls = json.dumps(tool_calls, ensure_ascii=False, sort_keys=True)
255
+ except TypeError:
256
+ tool_calls = str(tool_calls)
257
+ else:
258
+ tool_calls = ""
259
+ return f"{type(message).__name__}:{content}:{tool_calls}"
260
+
261
+
262
+ def _complete_todos(todos: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
263
+ """Mark all todos as completed to close out the run."""
264
+ return [
265
+ {**todo, "status": "completed"} if todo.get("status") != "completed" else todo
266
+ for todo in todos
267
+ ]
268
+
269
+
270
+ async def _generate_fallback_code(
271
+ llm: Any,
272
+ tool_name: str,
273
+ tool_args: Dict[str, Any],
274
+ ) -> str:
275
+ """Generate Python code for a tool intent when tool calls are malformed."""
276
+ system_prompt = (
277
+ "You generate Python code to run in a Jupyter notebook. "
278
+ "Return ONLY Python code with no markdown fences or extra text."
279
+ )
280
+ user_prompt = (
281
+ "Tool intent:\n"
282
+ f"- tool_name: {tool_name}\n"
283
+ f"- tool_args: {json.dumps(tool_args, ensure_ascii=False)}\n\n"
284
+ "Write minimal, safe Python code that accomplishes the intent. "
285
+ "Use only standard library unless pandas is clearly required."
286
+ )
287
+ response = await llm.ainvoke(
288
+ [
289
+ SystemMessage(content=system_prompt),
290
+ HumanMessage(content=user_prompt),
291
+ ]
292
+ )
293
+ return (getattr(response, "content", "") or "").strip()
294
+
295
+
296
+ # ============ Endpoints ============
297
+
298
+
299
+ @router.post("/run", response_model=AgentResponse)
300
+ async def run_agent(request: AgentRequest) -> Dict[str, Any]:
301
+ """
302
+ Execute agent with user request (non-streaming).
303
+
304
+ Takes a natural language request and notebook context,
305
+ runs the LangChain agent, and returns the complete result.
306
+ """
307
+ logger.info(f"Agent run request: {request.request[:100]}...")
308
+
309
+ if not request.request:
310
+ raise HTTPException(status_code=400, detail="Request is required")
311
+
312
+ raise HTTPException(
313
+ status_code=400,
314
+ detail="Non-streaming execution is not supported for the simple agent. Use /langchain/stream.",
315
+ )
316
+
317
+
318
+ @router.post("/stream")
319
+ async def stream_agent(request: AgentRequest):
320
+ """
321
+ Execute agent with streaming response.
322
+
323
+ Returns Server-Sent Events (SSE) with:
324
+ - debug: Debug status (middleware, tool call, LLM call)
325
+ - token: LLM response tokens
326
+ - tool_call: Tool invocation events
327
+ - tool_result: Tool execution results
328
+ - interrupt: Human-in-the-loop approval required
329
+ - complete: Final answer and completion
330
+ - error: Error events
331
+ """
332
+
333
+ logger.info(f"Agent stream request: {request.request[:100]}...")
334
+
335
+ if not request.request:
336
+ raise HTTPException(status_code=400, detail="Request is required")
337
+
338
+ # Generate thread_id if not provided
339
+ thread_id = request.threadId or str(uuid.uuid4())
340
+
341
+ async def event_generator():
342
+ try:
343
+ # Use simple agent with HITL
344
+ provider = request.llmConfig.provider if request.llmConfig else "gemini"
345
+ model_name = None
346
+ if request.llmConfig:
347
+ if request.llmConfig.gemini:
348
+ model_name = request.llmConfig.gemini.get("model")
349
+ elif request.llmConfig.openai:
350
+ model_name = request.llmConfig.openai.get("model")
351
+ elif request.llmConfig.vllm:
352
+ model_name = request.llmConfig.vllm.get("model")
353
+ logger.info("SimpleAgent LLM provider=%s model=%s", provider, model_name)
354
+ # Convert LLMConfig to dict
355
+ config_dict = {
356
+ "provider": request.llmConfig.provider
357
+ if request.llmConfig
358
+ else "gemini",
359
+ }
360
+ if request.llmConfig:
361
+ if request.llmConfig.gemini:
362
+ config_dict["gemini"] = request.llmConfig.gemini
363
+ if request.llmConfig.openai:
364
+ config_dict["openai"] = request.llmConfig.openai
365
+ if request.llmConfig.vllm:
366
+ config_dict["vllm"] = request.llmConfig.vllm
367
+
368
+ agent = create_simple_chat_agent(
369
+ llm_config=config_dict,
370
+ workspace_root=request.workspaceRoot or ".",
371
+ enable_hitl=True,
372
+ checkpointer=_simple_agent_checkpointers.setdefault(
373
+ thread_id, InMemorySaver()
374
+ ),
375
+ )
376
+
377
+ # Prepare config with thread_id
378
+ config = {"configurable": {"thread_id": thread_id}}
379
+
380
+ # Prepare input
381
+ agent_input = {"messages": [{"role": "user", "content": request.request}]}
382
+
383
+ # Stream with interrupt handling
384
+ logger.info(
385
+ "SimpleAgent input: %s", json.dumps(agent_input, ensure_ascii=False)
386
+ )
387
+ produced_output = False
388
+ last_finish_reason = None
389
+ last_signature = None
390
+ latest_todos: Optional[List[Dict[str, Any]]] = None
391
+
392
+ # Initial status: waiting for LLM
393
+ yield {
394
+ "event": "debug",
395
+ "data": json.dumps({"status": "🤔 LLM 응답 대기 중..."}),
396
+ }
397
+
398
+ for step in agent.stream(agent_input, config, stream_mode="values"):
399
+ if isinstance(step, dict):
400
+ logger.info(
401
+ "SimpleAgent step keys: %s", ",".join(sorted(step.keys()))
402
+ )
403
+ # Check for interrupt
404
+ if isinstance(step, dict) and "__interrupt__" in step:
405
+ interrupts = step["__interrupt__"]
406
+
407
+ yield {
408
+ "event": "debug",
409
+ "data": json.dumps({"status": "⏸️ 사용자 승인 대기 중..."}),
410
+ }
411
+
412
+ # Process interrupts
413
+ for interrupt in interrupts:
414
+ interrupt_value = (
415
+ interrupt.value
416
+ if hasattr(interrupt, "value")
417
+ else interrupt
418
+ )
419
+
420
+ # Extract action requests
421
+ action_requests = interrupt_value.get("action_requests", [])
422
+ normalized_actions = [
423
+ _normalize_action_request(a) for a in action_requests
424
+ ]
425
+ if normalized_actions:
426
+ _simple_agent_pending_actions[thread_id] = (
427
+ normalized_actions
428
+ )
429
+
430
+ for action in normalized_actions:
431
+ yield {
432
+ "event": "interrupt",
433
+ "data": json.dumps(
434
+ {
435
+ "thread_id": thread_id,
436
+ "action": action.get("name", "unknown"),
437
+ "args": action.get("arguments", {}),
438
+ "description": action.get("description", ""),
439
+ }
440
+ ),
441
+ }
442
+
443
+ # Stop streaming - wait for resume
444
+ return
445
+
446
+ # Check for todos in state and stream them
447
+ if isinstance(step, dict) and "todos" in step:
448
+ todos = step["todos"]
449
+ if todos:
450
+ latest_todos = todos
451
+ yield {
452
+ "event": "todos",
453
+ "data": json.dumps({"todos": todos}),
454
+ }
455
+ elif isinstance(step, dict):
456
+ todos = _extract_todos(step)
457
+ if todos:
458
+ latest_todos = todos
459
+ yield {
460
+ "event": "todos",
461
+ "data": json.dumps({"todos": todos}),
462
+ }
463
+
464
+ # Process messages
465
+ if isinstance(step, dict) and "messages" in step:
466
+ messages = step["messages"]
467
+ if messages:
468
+ last_message = messages[-1]
469
+ signature = _message_signature(last_message)
470
+ if signature == last_signature:
471
+ continue
472
+ last_signature = signature
473
+ logger.info(
474
+ "SimpleAgent last_message type=%s has_content=%s tool_calls=%s",
475
+ type(last_message).__name__,
476
+ bool(getattr(last_message, "content", None)),
477
+ bool(getattr(last_message, "tool_calls", None)),
478
+ )
479
+
480
+ # Skip HumanMessage - don't echo user's input back
481
+ if isinstance(last_message, HumanMessage):
482
+ continue
483
+
484
+ # Handle ToolMessage - extract final_answer result
485
+ if isinstance(last_message, ToolMessage):
486
+ logger.info(
487
+ "SimpleAgent ToolMessage content: %s",
488
+ last_message.content,
489
+ )
490
+ todos = _extract_todos(last_message.content)
491
+ if todos:
492
+ latest_todos = todos
493
+ yield {
494
+ "event": "todos",
495
+ "data": json.dumps({"todos": todos}),
496
+ }
497
+ tool_name = getattr(last_message, "name", "") or ""
498
+ logger.info(
499
+ "SimpleAgent ToolMessage name attribute: %s", tool_name
500
+ )
501
+
502
+ # Also check content for tool name if name attribute is empty
503
+ if not tool_name:
504
+ try:
505
+ content_json = json.loads(last_message.content)
506
+ tool_name = content_json.get("tool", "")
507
+ logger.info(
508
+ "SimpleAgent ToolMessage tool from content: %s",
509
+ tool_name,
510
+ )
511
+ except (json.JSONDecodeError, TypeError):
512
+ pass
513
+
514
+ if tool_name in ("final_answer_tool", "final_answer"):
515
+ # Extract the final answer from the tool result
516
+ try:
517
+ tool_result = json.loads(last_message.content)
518
+ # Check both direct "answer" and "parameters.answer"
519
+ final_answer = tool_result.get(
520
+ "answer"
521
+ ) or tool_result.get("parameters", {}).get("answer")
522
+ if final_answer:
523
+ yield {
524
+ "event": "token",
525
+ "data": json.dumps({"content": final_answer}),
526
+ }
527
+ else:
528
+ # Fallback to raw content if no answer found
529
+ yield {
530
+ "event": "token",
531
+ "data": json.dumps(
532
+ {"content": last_message.content}
533
+ ),
534
+ }
535
+ except json.JSONDecodeError:
536
+ # If not JSON, use content directly
537
+ if last_message.content:
538
+ yield {
539
+ "event": "token",
540
+ "data": json.dumps(
541
+ {"content": last_message.content}
542
+ ),
543
+ }
544
+ if latest_todos:
545
+ yield {
546
+ "event": "todos",
547
+ "data": json.dumps(
548
+ {"todos": _complete_todos(latest_todos)}
549
+ ),
550
+ }
551
+ # End stream after final answer
552
+ yield {"event": "debug_clear", "data": json.dumps({})}
553
+ yield {
554
+ "event": "complete",
555
+ "data": json.dumps(
556
+ {"success": True, "thread_id": thread_id}
557
+ ),
558
+ }
559
+ return
560
+ # Skip other tool messages (jupyter_cell, markdown results)
561
+ continue
562
+
563
+ # Handle AIMessage
564
+ if isinstance(last_message, AIMessage):
565
+ logger.info(
566
+ "SimpleAgent AIMessage content: %s",
567
+ last_message.content or "",
568
+ )
569
+ logger.info(
570
+ "SimpleAgent AIMessage tool_calls: %s",
571
+ json.dumps(last_message.tool_calls, ensure_ascii=False)
572
+ if hasattr(last_message, "tool_calls")
573
+ else "[]",
574
+ )
575
+ logger.info(
576
+ "SimpleAgent AIMessage additional_kwargs: %s",
577
+ json.dumps(
578
+ getattr(last_message, "additional_kwargs", {})
579
+ or {},
580
+ ensure_ascii=False,
581
+ ),
582
+ )
583
+ logger.info(
584
+ "SimpleAgent AIMessage response_metadata: %s",
585
+ json.dumps(
586
+ getattr(last_message, "response_metadata", {})
587
+ or {},
588
+ ensure_ascii=False,
589
+ ),
590
+ )
591
+ logger.info(
592
+ "SimpleAgent AIMessage usage_metadata: %s",
593
+ json.dumps(
594
+ getattr(last_message, "usage_metadata", {}) or {},
595
+ ensure_ascii=False,
596
+ ),
597
+ )
598
+ last_finish_reason = (
599
+ getattr(last_message, "response_metadata", {}) or {}
600
+ ).get("finish_reason")
601
+ # Check for tool calls first (display debug)
602
+ tool_calls = []
603
+ if (
604
+ hasattr(last_message, "tool_calls")
605
+ and last_message.tool_calls
606
+ ):
607
+ tool_calls = last_message.tool_calls
608
+ else:
609
+ raw_tool_calls = (
610
+ getattr(last_message, "additional_kwargs", {}) or {}
611
+ ).get("tool_calls")
612
+ if not raw_tool_calls:
613
+ raw_tool_calls = (
614
+ getattr(last_message, "additional_kwargs", {})
615
+ or {}
616
+ ).get("function_call")
617
+ tool_calls = _normalize_tool_calls(raw_tool_calls)
618
+
619
+ if tool_calls:
620
+ todos = _emit_todos_from_tool_calls(tool_calls)
621
+ if todos:
622
+ latest_todos = todos
623
+ yield {
624
+ "event": "todos",
625
+ "data": json.dumps({"todos": todos}),
626
+ }
627
+ for tool_call in tool_calls:
628
+ tool_name = tool_call.get("name", "unknown")
629
+ tool_args = tool_call.get("args", {})
630
+
631
+ yield {
632
+ "event": "debug",
633
+ "data": json.dumps(
634
+ {"status": f"🔧 Tool 실행: {tool_name}"}
635
+ ),
636
+ }
637
+
638
+ # Send tool_call event with details for frontend to execute
639
+ if tool_name in (
640
+ "jupyter_cell_tool",
641
+ "jupyter_cell",
642
+ ):
643
+ produced_output = True
644
+ yield {
645
+ "event": "tool_call",
646
+ "data": json.dumps(
647
+ {
648
+ "tool": "jupyter_cell",
649
+ "code": tool_args.get("code", ""),
650
+ "description": tool_args.get(
651
+ "description", ""
652
+ ),
653
+ }
654
+ ),
655
+ }
656
+ elif tool_name in ("markdown_tool", "markdown"):
657
+ produced_output = True
658
+ yield {
659
+ "event": "tool_call",
660
+ "data": json.dumps(
661
+ {
662
+ "tool": "markdown",
663
+ "content": tool_args.get(
664
+ "content", ""
665
+ ),
666
+ }
667
+ ),
668
+ }
669
+
670
+ # Only display content if it's not empty and not a JSON tool response
671
+ if (
672
+ hasattr(last_message, "content")
673
+ and last_message.content
674
+ ):
675
+ content = last_message.content
676
+
677
+ # Filter out raw JSON tool responses
678
+ if not (
679
+ content.strip().startswith('{"tool":')
680
+ or content.strip().startswith('{"status":')
681
+ or '"pending_execution"' in content
682
+ or '"status": "complete"' in content
683
+ ):
684
+ produced_output = True
685
+ yield {
686
+ "event": "token",
687
+ "data": json.dumps({"content": content}),
688
+ }
689
+
690
+ if not produced_output and last_finish_reason == "MALFORMED_FUNCTION_CALL":
691
+ logger.info(
692
+ "SimpleAgent fallback: retrying tool call generation after malformed function call"
693
+ )
694
+ try:
695
+ fallback_config = json.loads(json.dumps(config_dict))
696
+ if fallback_config.get(
697
+ "provider"
698
+ ) == "gemini" and fallback_config.get("gemini", {}).get(
699
+ "model", ""
700
+ ).endswith("flash"):
701
+ fallback_config.setdefault("gemini", {})["model"] = (
702
+ "gemini-2.5-pro"
703
+ )
704
+ logger.info(
705
+ "SimpleAgent fallback: switching model to gemini-2.5-pro"
706
+ )
707
+
708
+ llm = _create_llm(fallback_config)
709
+ tools = _get_all_tools()
710
+ # Force tool calling - use tool_config for Gemini, tool_choice for others
711
+ provider = config_dict.get("provider", "gemini")
712
+ if provider == "gemini":
713
+ # Gemini uses tool_config with function_calling_config
714
+ llm_with_tools = llm.bind_tools(
715
+ tools,
716
+ tool_config={"function_calling_config": {"mode": "ANY"}},
717
+ )
718
+ else:
719
+ # OpenAI and others use tool_choice
720
+ llm_with_tools = llm.bind_tools(tools, tool_choice="any")
721
+ fallback_messages = [
722
+ SystemMessage(
723
+ content=(
724
+ "You MUST respond with a valid tool call. "
725
+ "Available tools: jupyter_cell_tool (for Python code), markdown_tool (for text), "
726
+ "list_files_tool (to list files), read_file_tool (to read files). "
727
+ "Choose the most appropriate tool and provide valid JSON arguments."
728
+ )
729
+ ),
730
+ HumanMessage(content=request.request),
731
+ ]
732
+ logger.info(
733
+ "SimpleAgent fallback: calling LLM with tool_choice=any"
734
+ )
735
+ fallback_response = await asyncio.wait_for(
736
+ llm_with_tools.ainvoke(fallback_messages),
737
+ timeout=30,
738
+ )
739
+ logger.info("SimpleAgent fallback: LLM response received")
740
+ logger.info(
741
+ "SimpleAgent fallback response type: %s",
742
+ type(fallback_response).__name__,
743
+ )
744
+ if hasattr(fallback_response, "tool_calls"):
745
+ logger.info(
746
+ "SimpleAgent fallback tool_calls: %s",
747
+ json.dumps(
748
+ fallback_response.tool_calls or [],
749
+ ensure_ascii=False,
750
+ ),
751
+ )
752
+ if hasattr(fallback_response, "content"):
753
+ logger.info(
754
+ "SimpleAgent fallback content: %s",
755
+ fallback_response.content or "",
756
+ )
757
+ except asyncio.TimeoutError:
758
+ logger.error("SimpleAgent fallback timed out after 30s")
759
+ yield {
760
+ "event": "token",
761
+ "data": json.dumps(
762
+ {
763
+ "content": "모델이 도구 호출을 생성하지 못했습니다. 다시 시도해주세요."
764
+ }
765
+ ),
766
+ }
767
+ produced_output = True
768
+ fallback_response = None
769
+ except Exception as fallback_error:
770
+ logger.error(
771
+ "SimpleAgent fallback error: %s",
772
+ fallback_error,
773
+ exc_info=True,
774
+ )
775
+ yield {
776
+ "event": "token",
777
+ "data": json.dumps(
778
+ {"content": f"오류가 발생했습니다: {str(fallback_error)}"}
779
+ ),
780
+ }
781
+ produced_output = True
782
+ fallback_response = None
783
+ if isinstance(fallback_response, AIMessage) and getattr(
784
+ fallback_response, "tool_calls", None
785
+ ):
786
+ for tool_call in fallback_response.tool_calls:
787
+ tool_name = tool_call.get("name", "unknown")
788
+ tool_args = tool_call.get("args", {})
789
+
790
+ logger.info("Fallback processing tool: %s", tool_name)
791
+
792
+ if tool_name in ("jupyter_cell_tool", "jupyter_cell"):
793
+ produced_output = True
794
+ yield {
795
+ "event": "debug",
796
+ "data": json.dumps(
797
+ {"status": f"🔧 Tool 실행: {tool_name}"}
798
+ ),
799
+ }
800
+ yield {
801
+ "event": "tool_call",
802
+ "data": json.dumps(
803
+ {
804
+ "tool": "jupyter_cell",
805
+ "code": tool_args.get("code", ""),
806
+ "description": tool_args.get("description", ""),
807
+ }
808
+ ),
809
+ }
810
+ elif tool_name in ("markdown_tool", "markdown"):
811
+ produced_output = True
812
+ yield {
813
+ "event": "debug",
814
+ "data": json.dumps(
815
+ {"status": f"🔧 Tool 실행: {tool_name}"}
816
+ ),
817
+ }
818
+ yield {
819
+ "event": "tool_call",
820
+ "data": json.dumps(
821
+ {
822
+ "tool": "markdown",
823
+ "content": tool_args.get("content", ""),
824
+ }
825
+ ),
826
+ }
827
+ elif tool_name in (
828
+ "read_file_tool",
829
+ "list_files_tool",
830
+ "search_workspace_tool",
831
+ ):
832
+ # For file operations, generate code with the LLM
833
+ logger.info(
834
+ "Fallback: Generating code for %s via LLM",
835
+ tool_name,
836
+ )
837
+ produced_output = True
838
+ try:
839
+ code = await asyncio.wait_for(
840
+ _generate_fallback_code(
841
+ llm=llm,
842
+ tool_name=tool_name,
843
+ tool_args=tool_args,
844
+ ),
845
+ timeout=30,
846
+ )
847
+ except asyncio.TimeoutError:
848
+ code = ""
849
+ logger.error(
850
+ "Fallback code generation timed out for %s",
851
+ tool_name,
852
+ )
853
+ except Exception as code_error:
854
+ code = ""
855
+ logger.error(
856
+ "Fallback code generation error: %s",
857
+ code_error,
858
+ exc_info=True,
859
+ )
860
+
861
+ if not code:
862
+ yield {
863
+ "event": "token",
864
+ "data": json.dumps(
865
+ {
866
+ "content": "도구 실행을 위한 코드를 생성하지 못했습니다. 다시 시도해주세요."
867
+ }
868
+ ),
869
+ }
870
+ produced_output = True
871
+ continue
872
+
873
+ yield {
874
+ "event": "debug",
875
+ "data": json.dumps(
876
+ {"status": "🔄 Jupyter Cell로 변환 중..."}
877
+ ),
878
+ }
879
+ yield {
880
+ "event": "tool_call",
881
+ "data": json.dumps(
882
+ {
883
+ "tool": "jupyter_cell",
884
+ "code": code,
885
+ "description": f"Converted from {tool_name}",
886
+ }
887
+ ),
888
+ }
889
+ else:
890
+ # Unknown tool - skip and show message
891
+ logger.warning(
892
+ "Fallback: Unknown tool %s, skipping", tool_name
893
+ )
894
+ yield {
895
+ "event": "token",
896
+ "data": json.dumps(
897
+ {
898
+ "content": f"알 수 없는 도구 '{tool_name}'입니다. jupyter_cell_tool을 사용해주세요."
899
+ }
900
+ ),
901
+ }
902
+ produced_output = True
903
+ elif (
904
+ isinstance(fallback_response, AIMessage)
905
+ and fallback_response.content
906
+ ):
907
+ produced_output = True
908
+ yield {
909
+ "event": "token",
910
+ "data": json.dumps({"content": fallback_response.content}),
911
+ }
912
+ elif fallback_response is not None and not produced_output:
913
+ yield {
914
+ "event": "token",
915
+ "data": json.dumps(
916
+ {
917
+ "content": "모델이 도구 호출을 생성하지 못했습니다. 다시 시도해주세요."
918
+ }
919
+ ),
920
+ }
921
+ produced_output = True
922
+
923
+ # Clear debug status before completion
924
+ yield {"event": "debug_clear", "data": json.dumps({})}
925
+
926
+ # No interrupt - execution completed
927
+ yield {
928
+ "event": "complete",
929
+ "data": json.dumps({"success": True, "thread_id": thread_id}),
930
+ }
931
+
932
+ except Exception as e:
933
+ logger.error(f"Stream error: {e}", exc_info=True)
934
+ yield {
935
+ "event": "error",
936
+ "data": json.dumps(
937
+ {
938
+ "error": str(e),
939
+ "error_type": type(e).__name__,
940
+ }
941
+ ),
942
+ }
943
+
944
+ return EventSourceResponse(event_generator())
945
+
946
+
947
+ @router.post("/resume")
948
+ async def resume_agent(request: ResumeRequest):
949
+ """
950
+ Resume interrupted agent execution with user decisions.
951
+
952
+ Takes user decisions (approve/edit/reject) and resumes the agent
953
+ execution from the interrupt point.
954
+
955
+ Returns Server-Sent Events (SSE) with the same format as /stream.
956
+ """
957
+ from langgraph.types import Command
958
+
959
+ logger.info(f"Resume request for thread: {request.threadId}")
960
+
961
+ async def event_generator():
962
+ try:
963
+ # Convert LLMConfig to dict
964
+ config_dict = {
965
+ "provider": request.llmConfig.provider
966
+ if request.llmConfig
967
+ else "gemini",
968
+ }
969
+ if request.llmConfig:
970
+ if request.llmConfig.gemini:
971
+ config_dict["gemini"] = request.llmConfig.gemini
972
+ if request.llmConfig.openai:
973
+ config_dict["openai"] = request.llmConfig.openai
974
+ if request.llmConfig.vllm:
975
+ config_dict["vllm"] = request.llmConfig.vllm
976
+
977
+ # Create agent (will use same checkpointer)
978
+ agent = create_simple_chat_agent(
979
+ llm_config=config_dict,
980
+ workspace_root=request.workspaceRoot or ".",
981
+ enable_hitl=True,
982
+ checkpointer=_simple_agent_checkpointers.setdefault(
983
+ request.threadId, InMemorySaver()
984
+ ),
985
+ )
986
+
987
+ # Prepare config with thread_id
988
+ config = {"configurable": {"thread_id": request.threadId}}
989
+
990
+ pending_actions = _simple_agent_pending_actions.get(request.threadId, [])
991
+
992
+ # Convert decisions to LangChain format
993
+ langgraph_decisions = []
994
+ for index, decision in enumerate(request.decisions):
995
+ if decision.type == "approve":
996
+ langgraph_decisions.append({"type": "approve"})
997
+ elif decision.type == "edit":
998
+ action = (
999
+ pending_actions[index] if index < len(pending_actions) else {}
1000
+ )
1001
+ edited_action = {
1002
+ "name": action.get("name", "unknown"),
1003
+ "args": decision.args or action.get("arguments", {}) or {},
1004
+ }
1005
+ langgraph_decisions.append(
1006
+ {
1007
+ "type": "edit",
1008
+ "edited_action": edited_action,
1009
+ }
1010
+ )
1011
+ elif decision.type == "reject":
1012
+ langgraph_decisions.append(
1013
+ {
1014
+ "type": "reject",
1015
+ "feedback": decision.feedback
1016
+ or "User rejected this action",
1017
+ }
1018
+ )
1019
+
1020
+ # Resume execution
1021
+ yield {
1022
+ "event": "debug",
1023
+ "data": json.dumps({"status": "▶️ 실행 재개 중..."}),
1024
+ }
1025
+
1026
+ _simple_agent_pending_actions.pop(request.threadId, None)
1027
+
1028
+ # Track processed tool calls to avoid duplicates (middleware can yield same step multiple times)
1029
+ processed_tool_call_ids: set[str] = set()
1030
+ latest_todos: Optional[List[Dict[str, Any]]] = None
1031
+
1032
+ # Resume with Command
1033
+ last_signature = None
1034
+
1035
+ # Status: waiting for LLM response
1036
+ yield {
1037
+ "event": "debug",
1038
+ "data": json.dumps({"status": "🤔 LLM 응답 대기 중..."}),
1039
+ }
1040
+
1041
+ step_count = 0
1042
+ for step in agent.stream(
1043
+ Command(resume={"decisions": langgraph_decisions}),
1044
+ config,
1045
+ stream_mode="values",
1046
+ ):
1047
+ step_count += 1
1048
+ step_keys = sorted(step.keys()) if isinstance(step, dict) else []
1049
+ logger.info(
1050
+ "Resume stream step %d: type=%s, keys=%s",
1051
+ step_count,
1052
+ type(step).__name__,
1053
+ step_keys,
1054
+ )
1055
+
1056
+ # Check for another interrupt
1057
+ if isinstance(step, dict) and "__interrupt__" in step:
1058
+ interrupts = step["__interrupt__"]
1059
+
1060
+ yield {
1061
+ "event": "debug",
1062
+ "data": json.dumps({"status": "⏸️ 사용자 승인 대기 중..."}),
1063
+ }
1064
+
1065
+ for interrupt in interrupts:
1066
+ interrupt_value = (
1067
+ interrupt.value
1068
+ if hasattr(interrupt, "value")
1069
+ else interrupt
1070
+ )
1071
+ action_requests = interrupt_value.get("action_requests", [])
1072
+ normalized_actions = [
1073
+ _normalize_action_request(a) for a in action_requests
1074
+ ]
1075
+ if normalized_actions:
1076
+ _simple_agent_pending_actions[request.threadId] = (
1077
+ normalized_actions
1078
+ )
1079
+
1080
+ for action in normalized_actions:
1081
+ yield {
1082
+ "event": "interrupt",
1083
+ "data": json.dumps(
1084
+ {
1085
+ "thread_id": request.threadId,
1086
+ "action": action.get("name", "unknown"),
1087
+ "args": action.get("arguments", {}),
1088
+ "description": action.get("description", ""),
1089
+ }
1090
+ ),
1091
+ }
1092
+
1093
+ return
1094
+
1095
+ # Check for todos in state and stream them
1096
+ if isinstance(step, dict) and "todos" in step:
1097
+ todos = step["todos"]
1098
+ if todos:
1099
+ latest_todos = todos
1100
+ yield {"event": "todos", "data": json.dumps({"todos": todos})}
1101
+ elif isinstance(step, dict):
1102
+ todos = _extract_todos(step)
1103
+ if todos:
1104
+ latest_todos = todos
1105
+ yield {"event": "todos", "data": json.dumps({"todos": todos})}
1106
+
1107
+ # Process messages
1108
+ if isinstance(step, dict) and "messages" in step:
1109
+ messages = step["messages"]
1110
+ if messages:
1111
+ last_message = messages[-1]
1112
+ signature = _message_signature(last_message)
1113
+ if signature == last_signature:
1114
+ continue
1115
+ last_signature = signature
1116
+
1117
+ if isinstance(last_message, ToolMessage):
1118
+ logger.info(
1119
+ "Resume ToolMessage content: %s", last_message.content
1120
+ )
1121
+ todos = _extract_todos(last_message.content)
1122
+ if todos:
1123
+ latest_todos = todos
1124
+ yield {
1125
+ "event": "todos",
1126
+ "data": json.dumps({"todos": todos}),
1127
+ }
1128
+ tool_name = getattr(last_message, "name", "") or ""
1129
+ logger.info(
1130
+ "Resume ToolMessage name attribute: %s", tool_name
1131
+ )
1132
+
1133
+ # Also check content for tool name if name attribute is empty
1134
+ if not tool_name:
1135
+ try:
1136
+ content_json = json.loads(last_message.content)
1137
+ tool_name = content_json.get("tool", "")
1138
+ logger.info(
1139
+ "Resume ToolMessage tool from content: %s",
1140
+ tool_name,
1141
+ )
1142
+ except (json.JSONDecodeError, TypeError):
1143
+ pass
1144
+
1145
+ if tool_name in ("final_answer_tool", "final_answer"):
1146
+ try:
1147
+ tool_result = json.loads(last_message.content)
1148
+ final_answer = tool_result.get(
1149
+ "answer"
1150
+ ) or tool_result.get("parameters", {}).get("answer")
1151
+ if final_answer:
1152
+ yield {
1153
+ "event": "token",
1154
+ "data": json.dumps(
1155
+ {"content": final_answer}
1156
+ ),
1157
+ }
1158
+ else:
1159
+ yield {
1160
+ "event": "token",
1161
+ "data": json.dumps(
1162
+ {"content": last_message.content}
1163
+ ),
1164
+ }
1165
+ except json.JSONDecodeError:
1166
+ yield {
1167
+ "event": "token",
1168
+ "data": json.dumps(
1169
+ {"content": last_message.content}
1170
+ ),
1171
+ }
1172
+ if latest_todos:
1173
+ yield {
1174
+ "event": "todos",
1175
+ "data": json.dumps(
1176
+ {"todos": _complete_todos(latest_todos)}
1177
+ ),
1178
+ }
1179
+ yield {"event": "debug_clear", "data": json.dumps({})}
1180
+ yield {
1181
+ "event": "complete",
1182
+ "data": json.dumps(
1183
+ {"success": True, "thread_id": request.threadId}
1184
+ ),
1185
+ }
1186
+ return
1187
+ # Skip other ToolMessages (jupyter_cell, markdown, etc.) - don't emit their content
1188
+ continue
1189
+
1190
+ if hasattr(last_message, "content") and last_message.content:
1191
+ content = last_message.content
1192
+
1193
+ # Filter out raw JSON tool responses
1194
+ if not (
1195
+ content.strip().startswith('{"tool":')
1196
+ or content.strip().startswith('{"status":')
1197
+ or '"pending_execution"' in content
1198
+ or '"status": "complete"' in content
1199
+ ):
1200
+ yield {
1201
+ "event": "token",
1202
+ "data": json.dumps({"content": content}),
1203
+ }
1204
+
1205
+ if (
1206
+ hasattr(last_message, "tool_calls")
1207
+ and last_message.tool_calls
1208
+ ):
1209
+ # Filter out already processed tool calls (avoid duplicates from middleware)
1210
+ new_tool_calls = [
1211
+ tc
1212
+ for tc in last_message.tool_calls
1213
+ if tc.get("id") not in processed_tool_call_ids
1214
+ ]
1215
+
1216
+ if not new_tool_calls:
1217
+ # All tool calls already processed, skip
1218
+ continue
1219
+
1220
+ # Mark these tool calls as processed
1221
+ for tc in new_tool_calls:
1222
+ if tc.get("id"):
1223
+ processed_tool_call_ids.add(tc["id"])
1224
+
1225
+ logger.info(
1226
+ "Resume AIMessage tool_calls: %s",
1227
+ json.dumps(new_tool_calls, ensure_ascii=False),
1228
+ )
1229
+ todos = _emit_todos_from_tool_calls(new_tool_calls)
1230
+ if todos:
1231
+ latest_todos = todos
1232
+ yield {
1233
+ "event": "todos",
1234
+ "data": json.dumps({"todos": todos}),
1235
+ }
1236
+ for tool_call in new_tool_calls:
1237
+ tool_name = tool_call.get("name", "unknown")
1238
+ tool_args = tool_call.get("args", {})
1239
+ if tool_args.get("execution_result"):
1240
+ logger.info(
1241
+ "Resume tool_call includes execution_result; skipping client execution for %s",
1242
+ tool_name,
1243
+ )
1244
+ continue
1245
+
1246
+ yield {
1247
+ "event": "debug",
1248
+ "data": json.dumps(
1249
+ {"status": f"🔧 Tool 실행: {tool_name}"}
1250
+ ),
1251
+ }
1252
+
1253
+ if tool_name in ("jupyter_cell_tool", "jupyter_cell"):
1254
+ yield {
1255
+ "event": "tool_call",
1256
+ "data": json.dumps(
1257
+ {
1258
+ "tool": "jupyter_cell",
1259
+ "code": tool_args.get("code", ""),
1260
+ "description": tool_args.get(
1261
+ "description", ""
1262
+ ),
1263
+ }
1264
+ ),
1265
+ }
1266
+ elif tool_name in ("markdown_tool", "markdown"):
1267
+ yield {
1268
+ "event": "tool_call",
1269
+ "data": json.dumps(
1270
+ {
1271
+ "tool": "markdown",
1272
+ "content": tool_args.get("content", ""),
1273
+ }
1274
+ ),
1275
+ }
1276
+
1277
+ # Clear debug status before completion
1278
+ yield {"event": "debug_clear", "data": json.dumps({})}
1279
+
1280
+ # Execution completed - stream ended without final_answer
1281
+ logger.warning(
1282
+ "Resume stream ended without final_answer_tool after %d steps. "
1283
+ "Last signature: %s, Latest todos: %s",
1284
+ step_count,
1285
+ last_signature,
1286
+ latest_todos,
1287
+ )
1288
+ yield {
1289
+ "event": "complete",
1290
+ "data": json.dumps({"success": True, "thread_id": request.threadId}),
1291
+ }
1292
+
1293
+ except Exception as e:
1294
+ logger.error(f"Resume error: {e}", exc_info=True)
1295
+ yield {
1296
+ "event": "error",
1297
+ "data": json.dumps(
1298
+ {
1299
+ "error": str(e),
1300
+ "error_type": type(e).__name__,
1301
+ }
1302
+ ),
1303
+ }
1304
+
1305
+ return EventSourceResponse(event_generator())
1306
+
1307
+
1308
+ @router.post("/search")
1309
+ async def search_workspace(
1310
+ pattern: str,
1311
+ path: str = ".",
1312
+ file_types: Optional[List[str]] = None,
1313
+ notebook_path: Optional[str] = None,
1314
+ workspace_root: str = ".",
1315
+ ) -> Dict[str, Any]:
1316
+ """
1317
+ Search for patterns in workspace files and notebooks.
1318
+
1319
+ Args:
1320
+ pattern: Search pattern (text or regex)
1321
+ path: Directory to search
1322
+ file_types: File patterns to include
1323
+ notebook_path: Specific notebook to search
1324
+ workspace_root: Workspace root directory
1325
+ """
1326
+ from agent_server.langchain.executors.notebook_searcher import NotebookSearcher
1327
+
1328
+ searcher = NotebookSearcher(workspace_root)
1329
+
1330
+ if notebook_path:
1331
+ results = searcher.search_notebook(
1332
+ notebook_path,
1333
+ pattern,
1334
+ max_results=50,
1335
+ )
1336
+ else:
1337
+ results = searcher.search_workspace(
1338
+ pattern,
1339
+ file_patterns=file_types,
1340
+ path=path,
1341
+ max_results=100,
1342
+ )
1343
+
1344
+ return results.to_dict()
1345
+
1346
+
1347
+ @router.get("/health")
1348
+ async def health_check() -> Dict[str, Any]:
1349
+ """Health check for LangChain agent router"""
1350
+ return {
1351
+ "status": "ok",
1352
+ "router": "langchain-agent",
1353
+ "version": "1.0.0",
1354
+ }
1355
+
1356
+
1357
+ @router.delete("/cache")
1358
+ async def clear_agent_cache() -> Dict[str, Any]:
1359
+ """Clear the agent instance cache"""
1360
+ global _agent_cache
1361
+ count = len(_agent_cache)
1362
+ _agent_cache.clear()
1363
+
1364
+ return {
1365
+ "status": "ok",
1366
+ "cleared": count,
1367
+ "message": f"Cleared {count} cached agent instances",
1368
+ }