hdsp-jupyter-extension 2.0.18__py3-none-any.whl → 2.0.20__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (52) hide show
  1. agent_server/langchain/agent_prompts/planner_prompt.py +22 -11
  2. agent_server/langchain/custom_middleware.py +97 -65
  3. agent_server/langchain/llm_factory.py +37 -5
  4. agent_server/langchain/logging_utils.py +41 -16
  5. agent_server/langchain/models/__init__.py +5 -0
  6. agent_server/langchain/models/gpt_oss_chat.py +351 -0
  7. agent_server/langchain/prompts.py +12 -7
  8. agent_server/routers/langchain_agent.py +10 -0
  9. {hdsp_jupyter_extension-2.0.18.data → hdsp_jupyter_extension-2.0.20.data}/data/share/jupyter/labextensions/hdsp-agent/build_log.json +1 -1
  10. {hdsp_jupyter_extension-2.0.18.data → hdsp_jupyter_extension-2.0.20.data}/data/share/jupyter/labextensions/hdsp-agent/package.json +2 -2
  11. hdsp_jupyter_extension-2.0.18.data/data/share/jupyter/labextensions/hdsp-agent/static/frontend_styles_index_js.037b3c8e5d6a92b63b16.js → hdsp_jupyter_extension-2.0.20.data/data/share/jupyter/labextensions/hdsp-agent/static/frontend_styles_index_js.96745acc14125453fba8.js +36 -2
  12. hdsp_jupyter_extension-2.0.20.data/data/share/jupyter/labextensions/hdsp-agent/static/frontend_styles_index_js.96745acc14125453fba8.js.map +1 -0
  13. jupyter_ext/labextension/static/lib_index_js.8f72c63cdf542389aa9d.js → hdsp_jupyter_extension-2.0.20.data/data/share/jupyter/labextensions/hdsp-agent/static/lib_index_js.90f80cb80187de8c5ae5.js +91 -8
  14. hdsp_jupyter_extension-2.0.20.data/data/share/jupyter/labextensions/hdsp-agent/static/lib_index_js.90f80cb80187de8c5ae5.js.map +1 -0
  15. hdsp_jupyter_extension-2.0.18.data/data/share/jupyter/labextensions/hdsp-agent/static/remoteEntry.5099145cc2b28312d170.js → hdsp_jupyter_extension-2.0.20.data/data/share/jupyter/labextensions/hdsp-agent/static/remoteEntry.586bf5521d043cdd37b8.js +3 -3
  16. jupyter_ext/labextension/static/remoteEntry.5099145cc2b28312d170.js.map → hdsp_jupyter_extension-2.0.20.data/data/share/jupyter/labextensions/hdsp-agent/static/remoteEntry.586bf5521d043cdd37b8.js.map +1 -1
  17. {hdsp_jupyter_extension-2.0.18.dist-info → hdsp_jupyter_extension-2.0.20.dist-info}/METADATA +1 -1
  18. {hdsp_jupyter_extension-2.0.18.dist-info → hdsp_jupyter_extension-2.0.20.dist-info}/RECORD +48 -46
  19. jupyter_ext/_version.py +1 -1
  20. jupyter_ext/labextension/build_log.json +1 -1
  21. jupyter_ext/labextension/package.json +2 -2
  22. jupyter_ext/labextension/static/{frontend_styles_index_js.037b3c8e5d6a92b63b16.js → frontend_styles_index_js.96745acc14125453fba8.js} +36 -2
  23. jupyter_ext/labextension/static/frontend_styles_index_js.96745acc14125453fba8.js.map +1 -0
  24. hdsp_jupyter_extension-2.0.18.data/data/share/jupyter/labextensions/hdsp-agent/static/lib_index_js.8f72c63cdf542389aa9d.js → jupyter_ext/labextension/static/lib_index_js.90f80cb80187de8c5ae5.js +91 -8
  25. jupyter_ext/labextension/static/lib_index_js.90f80cb80187de8c5ae5.js.map +1 -0
  26. jupyter_ext/labextension/static/{remoteEntry.5099145cc2b28312d170.js → remoteEntry.586bf5521d043cdd37b8.js} +3 -3
  27. hdsp_jupyter_extension-2.0.18.data/data/share/jupyter/labextensions/hdsp-agent/static/remoteEntry.5099145cc2b28312d170.js.map → jupyter_ext/labextension/static/remoteEntry.586bf5521d043cdd37b8.js.map +1 -1
  28. hdsp_jupyter_extension-2.0.18.data/data/share/jupyter/labextensions/hdsp-agent/static/frontend_styles_index_js.037b3c8e5d6a92b63b16.js.map +0 -1
  29. hdsp_jupyter_extension-2.0.18.data/data/share/jupyter/labextensions/hdsp-agent/static/lib_index_js.8f72c63cdf542389aa9d.js.map +0 -1
  30. jupyter_ext/labextension/static/frontend_styles_index_js.037b3c8e5d6a92b63b16.js.map +0 -1
  31. jupyter_ext/labextension/static/lib_index_js.8f72c63cdf542389aa9d.js.map +0 -1
  32. {hdsp_jupyter_extension-2.0.18.data → hdsp_jupyter_extension-2.0.20.data}/data/etc/jupyter/jupyter_server_config.d/hdsp_jupyter_extension.json +0 -0
  33. {hdsp_jupyter_extension-2.0.18.data → hdsp_jupyter_extension-2.0.20.data}/data/share/jupyter/labextensions/hdsp-agent/install.json +0 -0
  34. {hdsp_jupyter_extension-2.0.18.data → hdsp_jupyter_extension-2.0.20.data}/data/share/jupyter/labextensions/hdsp-agent/static/node_modules_emotion_use-insertion-effect-with-fallbacks_dist_emotion-use-insertion-effect-wi-3ba6b80.c095373419d05e6f141a.js +0 -0
  35. {hdsp_jupyter_extension-2.0.18.data → hdsp_jupyter_extension-2.0.20.data}/data/share/jupyter/labextensions/hdsp-agent/static/node_modules_emotion_use-insertion-effect-with-fallbacks_dist_emotion-use-insertion-effect-wi-3ba6b80.c095373419d05e6f141a.js.map +0 -0
  36. {hdsp_jupyter_extension-2.0.18.data → hdsp_jupyter_extension-2.0.20.data}/data/share/jupyter/labextensions/hdsp-agent/static/node_modules_emotion_use-insertion-effect-with-fallbacks_dist_emotion-use-insertion-effect-wi-3ba6b81.61e75fb98ecff46cf836.js +0 -0
  37. {hdsp_jupyter_extension-2.0.18.data → hdsp_jupyter_extension-2.0.20.data}/data/share/jupyter/labextensions/hdsp-agent/static/node_modules_emotion_use-insertion-effect-with-fallbacks_dist_emotion-use-insertion-effect-wi-3ba6b81.61e75fb98ecff46cf836.js.map +0 -0
  38. {hdsp_jupyter_extension-2.0.18.data → hdsp_jupyter_extension-2.0.20.data}/data/share/jupyter/labextensions/hdsp-agent/static/style.js +0 -0
  39. {hdsp_jupyter_extension-2.0.18.data → hdsp_jupyter_extension-2.0.20.data}/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_babel_runtime_helpers_esm_extends_js-node_modules_emotion_serialize_dist-051195.e2553aab0c3963b83dd7.js +0 -0
  40. {hdsp_jupyter_extension-2.0.18.data → hdsp_jupyter_extension-2.0.20.data}/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_babel_runtime_helpers_esm_extends_js-node_modules_emotion_serialize_dist-051195.e2553aab0c3963b83dd7.js.map +0 -0
  41. {hdsp_jupyter_extension-2.0.18.data → hdsp_jupyter_extension-2.0.20.data}/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_emotion_cache_dist_emotion-cache_browser_development_esm_js.24edcc52a1c014a8a5f0.js +0 -0
  42. {hdsp_jupyter_extension-2.0.18.data → hdsp_jupyter_extension-2.0.20.data}/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_emotion_cache_dist_emotion-cache_browser_development_esm_js.24edcc52a1c014a8a5f0.js.map +0 -0
  43. {hdsp_jupyter_extension-2.0.18.data → hdsp_jupyter_extension-2.0.20.data}/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_emotion_react_dist_emotion-react_browser_development_esm_js.19ecf6babe00caff6b8a.js +0 -0
  44. {hdsp_jupyter_extension-2.0.18.data → hdsp_jupyter_extension-2.0.20.data}/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_emotion_react_dist_emotion-react_browser_development_esm_js.19ecf6babe00caff6b8a.js.map +0 -0
  45. {hdsp_jupyter_extension-2.0.18.data → hdsp_jupyter_extension-2.0.20.data}/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_emotion_styled_dist_emotion-styled_browser_development_esm_js.661fb5836f4978a7c6e1.js +0 -0
  46. {hdsp_jupyter_extension-2.0.18.data → hdsp_jupyter_extension-2.0.20.data}/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_emotion_styled_dist_emotion-styled_browser_development_esm_js.661fb5836f4978a7c6e1.js.map +0 -0
  47. {hdsp_jupyter_extension-2.0.18.data → hdsp_jupyter_extension-2.0.20.data}/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_mui_material_index_js.985697e0162d8d088ca2.js +0 -0
  48. {hdsp_jupyter_extension-2.0.18.data → hdsp_jupyter_extension-2.0.20.data}/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_mui_material_index_js.985697e0162d8d088ca2.js.map +0 -0
  49. {hdsp_jupyter_extension-2.0.18.data → hdsp_jupyter_extension-2.0.20.data}/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_mui_material_utils_createSvgIcon_js.1f5038488cdfd8b3a85d.js +0 -0
  50. {hdsp_jupyter_extension-2.0.18.data → hdsp_jupyter_extension-2.0.20.data}/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_mui_material_utils_createSvgIcon_js.1f5038488cdfd8b3a85d.js.map +0 -0
  51. {hdsp_jupyter_extension-2.0.18.dist-info → hdsp_jupyter_extension-2.0.20.dist-info}/WHEEL +0 -0
  52. {hdsp_jupyter_extension-2.0.18.dist-info → hdsp_jupyter_extension-2.0.20.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,351 @@
1
+ """
2
+ ChatGPTOSS: Custom ChatModel for gpt-oss (Harmony format).
3
+
4
+ gpt-oss uses a different instruction hierarchy:
5
+ - developer: behavioral rules/instructions (highest priority)
6
+ - system: metadata (date, cutoff, tools)
7
+ - user: actual questions
8
+
9
+ LangChain's ChatOpenAI sends everything as 'system', which gpt-oss treats as low-priority metadata.
10
+ This class converts SystemMessage to 'developer' role for proper instruction following.
11
+ """
12
+
13
+ import json
14
+ import logging
15
+ import uuid
16
+ from typing import Any, Dict, Iterator, List, Optional, Union
17
+
18
+ from langchain_core.callbacks import CallbackManagerForLLMRun
19
+ from langchain_core.language_models.chat_models import BaseChatModel
20
+ from langchain_core.messages import (
21
+ AIMessage,
22
+ AIMessageChunk,
23
+ BaseMessage,
24
+ HumanMessage,
25
+ SystemMessage,
26
+ ToolMessage,
27
+ )
28
+ from langchain_core.outputs import ChatGeneration, ChatGenerationChunk, ChatResult
29
+ from langchain_core.tools import BaseTool
30
+ from openai import OpenAI
31
+ from pydantic import Field
32
+
33
+ logger = logging.getLogger(__name__)
34
+
35
+
36
+ class ChatGPTOSS(BaseChatModel):
37
+ """ChatModel for gpt-oss with developer role support.
38
+
39
+ Converts SystemMessage to 'developer' role for proper instruction hierarchy
40
+ in gpt-oss (Harmony format) models.
41
+ """
42
+
43
+ client: Any = Field(default=None, exclude=True)
44
+ model: str = Field(default="openai/gpt-oss-120b")
45
+ base_url: str = Field(default="http://localhost:8000/v1")
46
+ api_key: str = Field(default="dummy")
47
+ temperature: float = Field(default=0.0)
48
+ max_tokens: int = Field(default=8192)
49
+ streaming: bool = Field(default=False)
50
+
51
+ # Tool-related fields (private, not exposed to pydantic)
52
+ _tools: Optional[List[Dict[str, Any]]] = None
53
+ _tool_choice: Optional[Union[str, Dict[str, Any]]] = None
54
+
55
+ def __init__(self, callbacks=None, **kwargs):
56
+ # Remove callbacks from kwargs before super().__init__ if present
57
+ # BaseChatModel handles callbacks through its own mechanism
58
+ super().__init__(callbacks=callbacks, **kwargs)
59
+ # Initialize OpenAI client
60
+ self.client = OpenAI(
61
+ base_url=self.base_url,
62
+ api_key=self.api_key,
63
+ )
64
+ self._tools = None
65
+ self._tool_choice = None
66
+
67
+ @property
68
+ def _llm_type(self) -> str:
69
+ return "gpt-oss"
70
+
71
+ @property
72
+ def _identifying_params(self) -> Dict[str, Any]:
73
+ return {
74
+ "model": self.model,
75
+ "base_url": self.base_url,
76
+ "temperature": self.temperature,
77
+ "max_tokens": self.max_tokens,
78
+ }
79
+
80
+ def bind_tools(
81
+ self,
82
+ tools: List[Union[BaseTool, Dict[str, Any]]],
83
+ *,
84
+ tool_choice: Optional[Union[str, Dict[str, Any]]] = None,
85
+ **kwargs,
86
+ ) -> "ChatGPTOSS":
87
+ """Bind tools to the model.
88
+
89
+ Returns a new instance with tools bound.
90
+ """
91
+ # Convert tools to OpenAI format
92
+ formatted_tools = []
93
+ for tool in tools:
94
+ if isinstance(tool, BaseTool):
95
+ # Convert LangChain tool to OpenAI format
96
+ tool_schema = {
97
+ "type": "function",
98
+ "function": {
99
+ "name": tool.name,
100
+ "description": tool.description or "",
101
+ "parameters": tool.args_schema.schema() if tool.args_schema else {"type": "object", "properties": {}},
102
+ },
103
+ }
104
+ formatted_tools.append(tool_schema)
105
+ elif isinstance(tool, dict):
106
+ # Already in dict format, ensure it has correct structure
107
+ if "type" not in tool:
108
+ tool = {"type": "function", "function": tool}
109
+ formatted_tools.append(tool)
110
+
111
+ # Create new instance with tools bound
112
+ new_instance = ChatGPTOSS(
113
+ model=self.model,
114
+ base_url=self.base_url,
115
+ api_key=self.api_key,
116
+ temperature=self.temperature,
117
+ max_tokens=self.max_tokens,
118
+ streaming=self.streaming,
119
+ )
120
+ new_instance._tools = formatted_tools
121
+ new_instance._tool_choice = tool_choice
122
+ return new_instance
123
+
124
+ def _convert_messages(self, messages: List[BaseMessage]) -> List[Dict[str, Any]]:
125
+ """Convert LangChain messages to OpenAI format with developer role.
126
+
127
+ Key conversion: SystemMessage -> role=developer
128
+ """
129
+ result = []
130
+
131
+ for msg in messages:
132
+ if isinstance(msg, SystemMessage):
133
+ # Convert system to developer for gpt-oss instruction hierarchy
134
+ result.append({
135
+ "role": "developer",
136
+ "content": msg.content,
137
+ })
138
+ elif isinstance(msg, HumanMessage):
139
+ result.append({
140
+ "role": "user",
141
+ "content": msg.content,
142
+ })
143
+ elif isinstance(msg, AIMessage):
144
+ ai_msg: Dict[str, Any] = {
145
+ "role": "assistant",
146
+ "content": msg.content or "",
147
+ }
148
+ # Include tool calls if present
149
+ tool_calls = getattr(msg, "tool_calls", None)
150
+ if tool_calls:
151
+ ai_msg["tool_calls"] = [
152
+ {
153
+ "id": tc.get("id", str(uuid.uuid4())[:8]),
154
+ "type": "function",
155
+ "function": {
156
+ "name": tc["name"],
157
+ "arguments": json.dumps(tc["args"]) if isinstance(tc["args"], dict) else tc["args"],
158
+ },
159
+ }
160
+ for tc in tool_calls
161
+ ]
162
+ result.append(ai_msg)
163
+ elif isinstance(msg, ToolMessage):
164
+ result.append({
165
+ "role": "tool",
166
+ "tool_call_id": msg.tool_call_id,
167
+ "content": msg.content,
168
+ })
169
+ else:
170
+ # Fallback for other message types
171
+ role = getattr(msg, "role", "user")
172
+ result.append({
173
+ "role": role,
174
+ "content": msg.content,
175
+ })
176
+
177
+ return result
178
+
179
+ def _create_chat_result(self, response) -> ChatResult:
180
+ """Convert OpenAI response to LangChain ChatResult."""
181
+ choice = response.choices[0]
182
+ message = choice.message
183
+
184
+ # Build AIMessage
185
+ content = message.content or ""
186
+ additional_kwargs: Dict[str, Any] = {}
187
+ tool_calls_list = []
188
+
189
+ if message.tool_calls:
190
+ additional_kwargs["tool_calls"] = [
191
+ {
192
+ "id": tc.id,
193
+ "type": "function",
194
+ "function": {
195
+ "name": tc.function.name,
196
+ "arguments": tc.function.arguments,
197
+ },
198
+ }
199
+ for tc in message.tool_calls
200
+ ]
201
+ # Also convert to LangChain tool_calls format
202
+ for tc in message.tool_calls:
203
+ try:
204
+ args = json.loads(tc.function.arguments)
205
+ except json.JSONDecodeError:
206
+ args = {"raw": tc.function.arguments}
207
+ tool_calls_list.append({
208
+ "name": tc.function.name,
209
+ "args": args,
210
+ "id": tc.id,
211
+ "type": "tool_call",
212
+ })
213
+
214
+ ai_message = AIMessage(
215
+ content=content,
216
+ additional_kwargs=additional_kwargs,
217
+ tool_calls=tool_calls_list if tool_calls_list else [],
218
+ response_metadata={
219
+ "model_name": response.model,
220
+ "finish_reason": choice.finish_reason,
221
+ "id": response.id,
222
+ },
223
+ )
224
+
225
+ # Add usage metadata if available
226
+ if response.usage:
227
+ ai_message.usage_metadata = {
228
+ "input_tokens": response.usage.prompt_tokens,
229
+ "output_tokens": response.usage.completion_tokens,
230
+ "total_tokens": response.usage.total_tokens,
231
+ }
232
+
233
+ generation = ChatGeneration(message=ai_message)
234
+ return ChatResult(generations=[generation])
235
+
236
+ def _generate(
237
+ self,
238
+ messages: List[BaseMessage],
239
+ stop: Optional[List[str]] = None,
240
+ run_manager: Optional[CallbackManagerForLLMRun] = None,
241
+ **kwargs,
242
+ ) -> ChatResult:
243
+ """Generate a response from the model."""
244
+ openai_messages = self._convert_messages(messages)
245
+
246
+ # Build request kwargs
247
+ request_kwargs: Dict[str, Any] = {
248
+ "model": self.model,
249
+ "messages": openai_messages,
250
+ "temperature": self.temperature,
251
+ "max_tokens": self.max_tokens,
252
+ }
253
+
254
+ if stop:
255
+ request_kwargs["stop"] = stop
256
+
257
+ if self._tools:
258
+ request_kwargs["tools"] = self._tools
259
+ if self._tool_choice:
260
+ request_kwargs["tool_choice"] = self._tool_choice
261
+
262
+ # Make API call
263
+ logger.debug(f"ChatGPTOSS request: model={self.model}, messages_count={len(openai_messages)}")
264
+ response = self.client.chat.completions.create(**request_kwargs)
265
+
266
+ return self._create_chat_result(response)
267
+
268
+ def _stream(
269
+ self,
270
+ messages: List[BaseMessage],
271
+ stop: Optional[List[str]] = None,
272
+ run_manager: Optional[CallbackManagerForLLMRun] = None,
273
+ **kwargs,
274
+ ) -> Iterator[ChatGenerationChunk]:
275
+ """Stream responses from the model."""
276
+ openai_messages = self._convert_messages(messages)
277
+
278
+ # Build request kwargs
279
+ request_kwargs: Dict[str, Any] = {
280
+ "model": self.model,
281
+ "messages": openai_messages,
282
+ "temperature": self.temperature,
283
+ "max_tokens": self.max_tokens,
284
+ "stream": True,
285
+ }
286
+
287
+ if stop:
288
+ request_kwargs["stop"] = stop
289
+
290
+ if self._tools:
291
+ request_kwargs["tools"] = self._tools
292
+ if self._tool_choice:
293
+ request_kwargs["tool_choice"] = self._tool_choice
294
+
295
+ # Make streaming API call
296
+ response = self.client.chat.completions.create(**request_kwargs)
297
+
298
+ # Accumulate tool calls across chunks
299
+ tool_calls_accum: Dict[int, Dict[str, Any]] = {}
300
+
301
+ for chunk in response:
302
+ if not chunk.choices:
303
+ continue
304
+
305
+ choice = chunk.choices[0]
306
+ delta = choice.delta
307
+
308
+ content = delta.content or ""
309
+ additional_kwargs: Dict[str, Any] = {}
310
+ tool_call_chunks = []
311
+
312
+ # Handle tool calls in streaming
313
+ if delta.tool_calls:
314
+ for tc in delta.tool_calls:
315
+ idx = tc.index
316
+ if idx not in tool_calls_accum:
317
+ tool_calls_accum[idx] = {
318
+ "id": tc.id or "",
319
+ "name": "",
320
+ "arguments": "",
321
+ }
322
+ if tc.id:
323
+ tool_calls_accum[idx]["id"] = tc.id
324
+ if tc.function:
325
+ if tc.function.name:
326
+ tool_calls_accum[idx]["name"] = tc.function.name
327
+ if tc.function.arguments:
328
+ tool_calls_accum[idx]["arguments"] += tc.function.arguments
329
+
330
+ # Build tool call chunk for LangChain
331
+ tool_call_chunks.append({
332
+ "index": idx,
333
+ "id": tool_calls_accum[idx]["id"],
334
+ "name": tool_calls_accum[idx]["name"],
335
+ "args": tool_calls_accum[idx]["arguments"],
336
+ })
337
+
338
+ # Create chunk message
339
+ chunk_message = AIMessageChunk(
340
+ content=content,
341
+ additional_kwargs=additional_kwargs,
342
+ tool_call_chunks=tool_call_chunks if tool_call_chunks else [],
343
+ )
344
+
345
+ # Add finish reason on last chunk
346
+ if choice.finish_reason:
347
+ chunk_message.response_metadata = {
348
+ "finish_reason": choice.finish_reason,
349
+ }
350
+
351
+ yield ChatGenerationChunk(message=chunk_message)
@@ -19,14 +19,15 @@ DEFAULT_SYSTEM_PROMPT = """You are an expert Python data scientist and Jupyter n
19
19
  # write_todos 규칙 [필수]
20
20
  - 한국어로 작성
21
21
  - **🔴 기존 todo 절대 삭제 금지**: 전체 리스트를 항상 포함하고 status만 변경
22
- - 잘못된 예: [{"content": "작업 요약", "status": "completed"}] 기존 todo 삭제됨!
23
- - 올바른 예: [{"content": "기존 작업1", "status": "completed"}, {"content": "기존 작업2", "status": "completed"}, {"content": "작업 요약", "status": "completed"}]
24
- - **일괄 업데이트**: 연속 완료된 todo는 번의 write_todos 호출로 처리
25
- - in_progress는 **1개만** 유지
22
+ - **🔴 상태 전환 순서 필수**: pending in_progress completed (건너뛰기 금지!)
23
+ - **🔴 초기 생성 규칙**: write_todos 호출 번째 todo만 in_progress, 나머지는 모두 pending
24
+ - 올바른 초기 예: [{"content": "작업1", "status": "in_progress"}, {"content": "작업2", "status": "pending"}, {"content": "작업 요약 및 다음 단계 제시", "status": "pending"}]
25
+ - 잘못된 초기 예: [{"content": "작업1", "status": "completed"}, ...] ← 실제 작업 없이 completed 금지!
26
+ - **🔴 completed 전환 조건**: 실제 도구로 작업 수행 후에만 completed로 변경
27
+ - in_progress는 **동시에 1개만** 유지
26
28
  - **[필수] 마지막 todo는 반드시 "작업 요약 및 다음 단계 제시"로 생성**
27
29
  - **🔴 [실행 순서 필수]**: "작업 요약 및 다음 단계 제시"는 **반드시 가장 마지막에 실행**
28
30
  - 다른 모든 todo가 completed 상태가 된 후에만 이 todo를 in_progress로 변경
29
- - 비슷한 이름의 다른 작업(보고서 검토, 결과 정리 등)과 혼동 금지
30
31
  - **[중요] "작업 요약 및 다음 단계 제시"는 summary JSON 출력 후에만 completed 표시**
31
32
 
32
33
  # 모든 작업 완료 후 [필수]
@@ -41,6 +42,7 @@ DEFAULT_SYSTEM_PROMPT = """You are an expert Python data scientist and Jupyter n
41
42
  - check_resource_tool: 대용량 파일/데이터프레임 작업 전 필수
42
43
  - read_file_tool: 대용량 파일은 limit=100으로 먼저 확인
43
44
  - jupyter_cell_tool: 차트 라벨은 영어로
45
+ - **KeyboardInterrupt 발생 시**: ask_user_tool로 중단 사유를 사용자에게 확인 (예: "코드 실행이 중단되었습니다. 중단 사유를 알려주시면 도움이 됩니다.")
44
46
  - **파일 수정 후**: diagnostics_tool로 오류 확인 필수
45
47
 
46
48
  # 사용자 입력 요청 [중요]
@@ -85,8 +87,11 @@ TODO_LIST_TOOL_DESCRIPTION = """Todo 리스트 관리 도구.
85
87
  - 진행 상황 추적이 필요할 때
86
88
 
87
89
  규칙:
88
- - in_progress는 1개만
89
- - 완료 즉시 completed 변경
90
+ - **🔴 기존 todo 삭제 금지**: status만 변경하고 전체 리스트 유지
91
+ - **🔴 상태 전환 순서 필수**: pending → in_progress → completed (건너뛰기 금지!)
92
+ - **🔴 초기 생성**: 첫 호출 시 첫 번째만 in_progress, 나머지는 pending
93
+ - **🔴 completed 조건**: 실제 도구로 작업 수행 후에만 completed로 변경
94
+ - in_progress 상태는 **동시에 1개만** 허용
90
95
  - **[필수] 마지막 todo는 반드시 "작업 요약 및 다음 단계 제시"로 생성**
91
96
  - **🔴 [실행 순서]**: todo는 반드시 리스트 순서대로 실행하고, "작업 요약 및 다음 단계 제시"는 맨 마지막에 실행
92
97
  - 이 "작업 요약 및 다음 단계 제시" todo 에서는 전체 작업 요약과 다음 단계를 제시하는 내용을 JSON 형태로 출력:
@@ -1082,6 +1082,11 @@ async def stream_agent(request: AgentRequest):
1082
1082
 
1083
1083
  # Handle AIMessage
1084
1084
  elif isinstance(last_message, AIMessage):
1085
+ # LLM Response separator for easy log reading
1086
+ print("\n" + "🔵" * 48, flush=True)
1087
+ print("=" * 96, flush=True)
1088
+ print(" ✨ LLM RESPONSE", flush=True)
1089
+ print("=" * 96, flush=True)
1085
1090
  logger.info(
1086
1091
  "SimpleAgent AIMessage content: %s",
1087
1092
  last_message.content or "",
@@ -1115,6 +1120,11 @@ async def stream_agent(request: AgentRequest):
1115
1120
  ensure_ascii=False,
1116
1121
  ),
1117
1122
  )
1123
+ # LLM Response end separator
1124
+ print("=" * 96, flush=True)
1125
+ print(" ✅ LLM RESPONSE END", flush=True)
1126
+ print("=" * 96, flush=True)
1127
+ print("🔵" * 48 + "\n", flush=True)
1118
1128
  last_finish_reason = (
1119
1129
  getattr(last_message, "response_metadata", {}) or {}
1120
1130
  ).get("finish_reason")
@@ -722,7 +722,7 @@
722
722
  "@mui/material": {},
723
723
  "react-markdown": {},
724
724
  "hdsp-agent": {
725
- "version": "2.0.18",
725
+ "version": "2.0.20",
726
726
  "singleton": true,
727
727
  "import": "/Users/a421721/Desktop/hdsp/hdsp_agent/extensions/jupyter/lib/index.js"
728
728
  }
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "hdsp-agent",
3
- "version": "2.0.18",
3
+ "version": "2.0.20",
4
4
  "description": "HDSP Agent JupyterLab Extension - Thin client for Agent Server",
5
5
  "keywords": [
6
6
  "jupyter",
@@ -132,7 +132,7 @@
132
132
  }
133
133
  },
134
134
  "_build": {
135
- "load": "static/remoteEntry.5099145cc2b28312d170.js",
135
+ "load": "static/remoteEntry.586bf5521d043cdd37b8.js",
136
136
  "extension": "./extension",
137
137
  "style": "./style"
138
138
  }