hdsp-jupyter-extension 2.0.19__py3-none-any.whl → 2.0.21__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (49) hide show
  1. agent_server/langchain/agent_prompts/planner_prompt.py +3 -0
  2. agent_server/langchain/custom_middleware.py +0 -17
  3. agent_server/langchain/llm_factory.py +45 -5
  4. agent_server/langchain/logging_utils.py +108 -30
  5. agent_server/langchain/middleware/subagent_middleware.py +80 -11
  6. agent_server/langchain/models/__init__.py +5 -0
  7. agent_server/langchain/models/gpt_oss_chat.py +351 -0
  8. agent_server/langchain/prompts.py +1 -0
  9. agent_server/routers/langchain_agent.py +10 -0
  10. {hdsp_jupyter_extension-2.0.19.data → hdsp_jupyter_extension-2.0.21.data}/data/share/jupyter/labextensions/hdsp-agent/build_log.json +1 -1
  11. {hdsp_jupyter_extension-2.0.19.data → hdsp_jupyter_extension-2.0.21.data}/data/share/jupyter/labextensions/hdsp-agent/package.json +2 -2
  12. jupyter_ext/labextension/static/lib_index_js.1917fbaea37d75dc69b3.js → hdsp_jupyter_extension-2.0.21.data/data/share/jupyter/labextensions/hdsp-agent/static/lib_index_js.90f80cb80187de8c5ae5.js +71 -6
  13. hdsp_jupyter_extension-2.0.21.data/data/share/jupyter/labextensions/hdsp-agent/static/lib_index_js.90f80cb80187de8c5ae5.js.map +1 -0
  14. hdsp_jupyter_extension-2.0.19.data/data/share/jupyter/labextensions/hdsp-agent/static/remoteEntry.d686ab71eb65b5ef8f15.js → hdsp_jupyter_extension-2.0.21.data/data/share/jupyter/labextensions/hdsp-agent/static/remoteEntry.93b1c499786ecd47b837.js +3 -3
  15. hdsp_jupyter_extension-2.0.19.data/data/share/jupyter/labextensions/hdsp-agent/static/remoteEntry.d686ab71eb65b5ef8f15.js.map → hdsp_jupyter_extension-2.0.21.data/data/share/jupyter/labextensions/hdsp-agent/static/remoteEntry.93b1c499786ecd47b837.js.map +1 -1
  16. {hdsp_jupyter_extension-2.0.19.dist-info → hdsp_jupyter_extension-2.0.21.dist-info}/METADATA +1 -1
  17. {hdsp_jupyter_extension-2.0.19.dist-info → hdsp_jupyter_extension-2.0.21.dist-info}/RECORD +47 -45
  18. jupyter_ext/_version.py +1 -1
  19. jupyter_ext/labextension/build_log.json +1 -1
  20. jupyter_ext/labextension/package.json +2 -2
  21. hdsp_jupyter_extension-2.0.19.data/data/share/jupyter/labextensions/hdsp-agent/static/lib_index_js.1917fbaea37d75dc69b3.js → jupyter_ext/labextension/static/lib_index_js.90f80cb80187de8c5ae5.js +71 -6
  22. jupyter_ext/labextension/static/lib_index_js.90f80cb80187de8c5ae5.js.map +1 -0
  23. jupyter_ext/labextension/static/{remoteEntry.d686ab71eb65b5ef8f15.js → remoteEntry.93b1c499786ecd47b837.js} +3 -3
  24. jupyter_ext/labextension/static/{remoteEntry.d686ab71eb65b5ef8f15.js.map → remoteEntry.93b1c499786ecd47b837.js.map} +1 -1
  25. hdsp_jupyter_extension-2.0.19.data/data/share/jupyter/labextensions/hdsp-agent/static/lib_index_js.1917fbaea37d75dc69b3.js.map +0 -1
  26. jupyter_ext/labextension/static/lib_index_js.1917fbaea37d75dc69b3.js.map +0 -1
  27. {hdsp_jupyter_extension-2.0.19.data → hdsp_jupyter_extension-2.0.21.data}/data/etc/jupyter/jupyter_server_config.d/hdsp_jupyter_extension.json +0 -0
  28. {hdsp_jupyter_extension-2.0.19.data → hdsp_jupyter_extension-2.0.21.data}/data/share/jupyter/labextensions/hdsp-agent/install.json +0 -0
  29. {hdsp_jupyter_extension-2.0.19.data → hdsp_jupyter_extension-2.0.21.data}/data/share/jupyter/labextensions/hdsp-agent/static/frontend_styles_index_js.96745acc14125453fba8.js +0 -0
  30. {hdsp_jupyter_extension-2.0.19.data → hdsp_jupyter_extension-2.0.21.data}/data/share/jupyter/labextensions/hdsp-agent/static/frontend_styles_index_js.96745acc14125453fba8.js.map +0 -0
  31. {hdsp_jupyter_extension-2.0.19.data → hdsp_jupyter_extension-2.0.21.data}/data/share/jupyter/labextensions/hdsp-agent/static/node_modules_emotion_use-insertion-effect-with-fallbacks_dist_emotion-use-insertion-effect-wi-3ba6b80.c095373419d05e6f141a.js +0 -0
  32. {hdsp_jupyter_extension-2.0.19.data → hdsp_jupyter_extension-2.0.21.data}/data/share/jupyter/labextensions/hdsp-agent/static/node_modules_emotion_use-insertion-effect-with-fallbacks_dist_emotion-use-insertion-effect-wi-3ba6b80.c095373419d05e6f141a.js.map +0 -0
  33. {hdsp_jupyter_extension-2.0.19.data → hdsp_jupyter_extension-2.0.21.data}/data/share/jupyter/labextensions/hdsp-agent/static/node_modules_emotion_use-insertion-effect-with-fallbacks_dist_emotion-use-insertion-effect-wi-3ba6b81.61e75fb98ecff46cf836.js +0 -0
  34. {hdsp_jupyter_extension-2.0.19.data → hdsp_jupyter_extension-2.0.21.data}/data/share/jupyter/labextensions/hdsp-agent/static/node_modules_emotion_use-insertion-effect-with-fallbacks_dist_emotion-use-insertion-effect-wi-3ba6b81.61e75fb98ecff46cf836.js.map +0 -0
  35. {hdsp_jupyter_extension-2.0.19.data → hdsp_jupyter_extension-2.0.21.data}/data/share/jupyter/labextensions/hdsp-agent/static/style.js +0 -0
  36. {hdsp_jupyter_extension-2.0.19.data → hdsp_jupyter_extension-2.0.21.data}/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_babel_runtime_helpers_esm_extends_js-node_modules_emotion_serialize_dist-051195.e2553aab0c3963b83dd7.js +0 -0
  37. {hdsp_jupyter_extension-2.0.19.data → hdsp_jupyter_extension-2.0.21.data}/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_babel_runtime_helpers_esm_extends_js-node_modules_emotion_serialize_dist-051195.e2553aab0c3963b83dd7.js.map +0 -0
  38. {hdsp_jupyter_extension-2.0.19.data → hdsp_jupyter_extension-2.0.21.data}/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_emotion_cache_dist_emotion-cache_browser_development_esm_js.24edcc52a1c014a8a5f0.js +0 -0
  39. {hdsp_jupyter_extension-2.0.19.data → hdsp_jupyter_extension-2.0.21.data}/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_emotion_cache_dist_emotion-cache_browser_development_esm_js.24edcc52a1c014a8a5f0.js.map +0 -0
  40. {hdsp_jupyter_extension-2.0.19.data → hdsp_jupyter_extension-2.0.21.data}/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_emotion_react_dist_emotion-react_browser_development_esm_js.19ecf6babe00caff6b8a.js +0 -0
  41. {hdsp_jupyter_extension-2.0.19.data → hdsp_jupyter_extension-2.0.21.data}/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_emotion_react_dist_emotion-react_browser_development_esm_js.19ecf6babe00caff6b8a.js.map +0 -0
  42. {hdsp_jupyter_extension-2.0.19.data → hdsp_jupyter_extension-2.0.21.data}/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_emotion_styled_dist_emotion-styled_browser_development_esm_js.661fb5836f4978a7c6e1.js +0 -0
  43. {hdsp_jupyter_extension-2.0.19.data → hdsp_jupyter_extension-2.0.21.data}/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_emotion_styled_dist_emotion-styled_browser_development_esm_js.661fb5836f4978a7c6e1.js.map +0 -0
  44. {hdsp_jupyter_extension-2.0.19.data → hdsp_jupyter_extension-2.0.21.data}/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_mui_material_index_js.985697e0162d8d088ca2.js +0 -0
  45. {hdsp_jupyter_extension-2.0.19.data → hdsp_jupyter_extension-2.0.21.data}/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_mui_material_index_js.985697e0162d8d088ca2.js.map +0 -0
  46. {hdsp_jupyter_extension-2.0.19.data → hdsp_jupyter_extension-2.0.21.data}/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_mui_material_utils_createSvgIcon_js.1f5038488cdfd8b3a85d.js +0 -0
  47. {hdsp_jupyter_extension-2.0.19.data → hdsp_jupyter_extension-2.0.21.data}/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_mui_material_utils_createSvgIcon_js.1f5038488cdfd8b3a85d.js.map +0 -0
  48. {hdsp_jupyter_extension-2.0.19.dist-info → hdsp_jupyter_extension-2.0.21.dist-info}/WHEEL +0 -0
  49. {hdsp_jupyter_extension-2.0.19.dist-info → hdsp_jupyter_extension-2.0.21.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,351 @@
1
+ """
2
+ ChatGPTOSS: Custom ChatModel for gpt-oss (Harmony format).
3
+
4
+ gpt-oss uses a different instruction hierarchy:
5
+ - developer: behavioral rules/instructions (highest priority)
6
+ - system: metadata (date, cutoff, tools)
7
+ - user: actual questions
8
+
9
+ LangChain's ChatOpenAI sends everything as 'system', which gpt-oss treats as low-priority metadata.
10
+ This class converts SystemMessage to 'developer' role for proper instruction following.
11
+ """
12
+
13
+ import json
14
+ import logging
15
+ import uuid
16
+ from typing import Any, Dict, Iterator, List, Optional, Union
17
+
18
+ from langchain_core.callbacks import CallbackManagerForLLMRun
19
+ from langchain_core.language_models.chat_models import BaseChatModel
20
+ from langchain_core.messages import (
21
+ AIMessage,
22
+ AIMessageChunk,
23
+ BaseMessage,
24
+ HumanMessage,
25
+ SystemMessage,
26
+ ToolMessage,
27
+ )
28
+ from langchain_core.outputs import ChatGeneration, ChatGenerationChunk, ChatResult
29
+ from langchain_core.tools import BaseTool
30
+ from openai import OpenAI
31
+ from pydantic import Field
32
+
33
+ logger = logging.getLogger(__name__)
34
+
35
+
36
+ class ChatGPTOSS(BaseChatModel):
37
+ """ChatModel for gpt-oss with developer role support.
38
+
39
+ Converts SystemMessage to 'developer' role for proper instruction hierarchy
40
+ in gpt-oss (Harmony format) models.
41
+ """
42
+
43
+ client: Any = Field(default=None, exclude=True)
44
+ model: str = Field(default="openai/gpt-oss-120b")
45
+ base_url: str = Field(default="http://localhost:8000/v1")
46
+ api_key: str = Field(default="dummy")
47
+ temperature: float = Field(default=0.0)
48
+ max_tokens: int = Field(default=8192)
49
+ streaming: bool = Field(default=False)
50
+
51
+ # Tool-related fields (private, not exposed to pydantic)
52
+ _tools: Optional[List[Dict[str, Any]]] = None
53
+ _tool_choice: Optional[Union[str, Dict[str, Any]]] = None
54
+
55
+ def __init__(self, callbacks=None, **kwargs):
56
+ # Remove callbacks from kwargs before super().__init__ if present
57
+ # BaseChatModel handles callbacks through its own mechanism
58
+ super().__init__(callbacks=callbacks, **kwargs)
59
+ # Initialize OpenAI client
60
+ self.client = OpenAI(
61
+ base_url=self.base_url,
62
+ api_key=self.api_key,
63
+ )
64
+ self._tools = None
65
+ self._tool_choice = None
66
+
67
+ @property
68
+ def _llm_type(self) -> str:
69
+ return "gpt-oss"
70
+
71
+ @property
72
+ def _identifying_params(self) -> Dict[str, Any]:
73
+ return {
74
+ "model": self.model,
75
+ "base_url": self.base_url,
76
+ "temperature": self.temperature,
77
+ "max_tokens": self.max_tokens,
78
+ }
79
+
80
+ def bind_tools(
81
+ self,
82
+ tools: List[Union[BaseTool, Dict[str, Any]]],
83
+ *,
84
+ tool_choice: Optional[Union[str, Dict[str, Any]]] = None,
85
+ **kwargs,
86
+ ) -> "ChatGPTOSS":
87
+ """Bind tools to the model.
88
+
89
+ Returns a new instance with tools bound.
90
+ """
91
+ # Convert tools to OpenAI format
92
+ formatted_tools = []
93
+ for tool in tools:
94
+ if isinstance(tool, BaseTool):
95
+ # Convert LangChain tool to OpenAI format
96
+ tool_schema = {
97
+ "type": "function",
98
+ "function": {
99
+ "name": tool.name,
100
+ "description": tool.description or "",
101
+ "parameters": tool.args_schema.schema() if tool.args_schema else {"type": "object", "properties": {}},
102
+ },
103
+ }
104
+ formatted_tools.append(tool_schema)
105
+ elif isinstance(tool, dict):
106
+ # Already in dict format, ensure it has correct structure
107
+ if "type" not in tool:
108
+ tool = {"type": "function", "function": tool}
109
+ formatted_tools.append(tool)
110
+
111
+ # Create new instance with tools bound
112
+ new_instance = ChatGPTOSS(
113
+ model=self.model,
114
+ base_url=self.base_url,
115
+ api_key=self.api_key,
116
+ temperature=self.temperature,
117
+ max_tokens=self.max_tokens,
118
+ streaming=self.streaming,
119
+ )
120
+ new_instance._tools = formatted_tools
121
+ new_instance._tool_choice = tool_choice
122
+ return new_instance
123
+
124
+ def _convert_messages(self, messages: List[BaseMessage]) -> List[Dict[str, Any]]:
125
+ """Convert LangChain messages to OpenAI format with developer role.
126
+
127
+ Key conversion: SystemMessage -> role=developer
128
+ """
129
+ result = []
130
+
131
+ for msg in messages:
132
+ if isinstance(msg, SystemMessage):
133
+ # Convert system to developer for gpt-oss instruction hierarchy
134
+ result.append({
135
+ "role": "developer",
136
+ "content": msg.content,
137
+ })
138
+ elif isinstance(msg, HumanMessage):
139
+ result.append({
140
+ "role": "user",
141
+ "content": msg.content,
142
+ })
143
+ elif isinstance(msg, AIMessage):
144
+ ai_msg: Dict[str, Any] = {
145
+ "role": "assistant",
146
+ "content": msg.content or "",
147
+ }
148
+ # Include tool calls if present
149
+ tool_calls = getattr(msg, "tool_calls", None)
150
+ if tool_calls:
151
+ ai_msg["tool_calls"] = [
152
+ {
153
+ "id": tc.get("id", str(uuid.uuid4())[:8]),
154
+ "type": "function",
155
+ "function": {
156
+ "name": tc["name"],
157
+ "arguments": json.dumps(tc["args"]) if isinstance(tc["args"], dict) else tc["args"],
158
+ },
159
+ }
160
+ for tc in tool_calls
161
+ ]
162
+ result.append(ai_msg)
163
+ elif isinstance(msg, ToolMessage):
164
+ result.append({
165
+ "role": "tool",
166
+ "tool_call_id": msg.tool_call_id,
167
+ "content": msg.content,
168
+ })
169
+ else:
170
+ # Fallback for other message types
171
+ role = getattr(msg, "role", "user")
172
+ result.append({
173
+ "role": role,
174
+ "content": msg.content,
175
+ })
176
+
177
+ return result
178
+
179
+ def _create_chat_result(self, response) -> ChatResult:
180
+ """Convert OpenAI response to LangChain ChatResult."""
181
+ choice = response.choices[0]
182
+ message = choice.message
183
+
184
+ # Build AIMessage
185
+ content = message.content or ""
186
+ additional_kwargs: Dict[str, Any] = {}
187
+ tool_calls_list = []
188
+
189
+ if message.tool_calls:
190
+ additional_kwargs["tool_calls"] = [
191
+ {
192
+ "id": tc.id,
193
+ "type": "function",
194
+ "function": {
195
+ "name": tc.function.name,
196
+ "arguments": tc.function.arguments,
197
+ },
198
+ }
199
+ for tc in message.tool_calls
200
+ ]
201
+ # Also convert to LangChain tool_calls format
202
+ for tc in message.tool_calls:
203
+ try:
204
+ args = json.loads(tc.function.arguments)
205
+ except json.JSONDecodeError:
206
+ args = {"raw": tc.function.arguments}
207
+ tool_calls_list.append({
208
+ "name": tc.function.name,
209
+ "args": args,
210
+ "id": tc.id,
211
+ "type": "tool_call",
212
+ })
213
+
214
+ ai_message = AIMessage(
215
+ content=content,
216
+ additional_kwargs=additional_kwargs,
217
+ tool_calls=tool_calls_list if tool_calls_list else [],
218
+ response_metadata={
219
+ "model_name": response.model,
220
+ "finish_reason": choice.finish_reason,
221
+ "id": response.id,
222
+ },
223
+ )
224
+
225
+ # Add usage metadata if available
226
+ if response.usage:
227
+ ai_message.usage_metadata = {
228
+ "input_tokens": response.usage.prompt_tokens,
229
+ "output_tokens": response.usage.completion_tokens,
230
+ "total_tokens": response.usage.total_tokens,
231
+ }
232
+
233
+ generation = ChatGeneration(message=ai_message)
234
+ return ChatResult(generations=[generation])
235
+
236
+ def _generate(
237
+ self,
238
+ messages: List[BaseMessage],
239
+ stop: Optional[List[str]] = None,
240
+ run_manager: Optional[CallbackManagerForLLMRun] = None,
241
+ **kwargs,
242
+ ) -> ChatResult:
243
+ """Generate a response from the model."""
244
+ openai_messages = self._convert_messages(messages)
245
+
246
+ # Build request kwargs
247
+ request_kwargs: Dict[str, Any] = {
248
+ "model": self.model,
249
+ "messages": openai_messages,
250
+ "temperature": self.temperature,
251
+ "max_tokens": self.max_tokens,
252
+ }
253
+
254
+ if stop:
255
+ request_kwargs["stop"] = stop
256
+
257
+ if self._tools:
258
+ request_kwargs["tools"] = self._tools
259
+ if self._tool_choice:
260
+ request_kwargs["tool_choice"] = self._tool_choice
261
+
262
+ # Make API call
263
+ logger.debug(f"ChatGPTOSS request: model={self.model}, messages_count={len(openai_messages)}")
264
+ response = self.client.chat.completions.create(**request_kwargs)
265
+
266
+ return self._create_chat_result(response)
267
+
268
+ def _stream(
269
+ self,
270
+ messages: List[BaseMessage],
271
+ stop: Optional[List[str]] = None,
272
+ run_manager: Optional[CallbackManagerForLLMRun] = None,
273
+ **kwargs,
274
+ ) -> Iterator[ChatGenerationChunk]:
275
+ """Stream responses from the model."""
276
+ openai_messages = self._convert_messages(messages)
277
+
278
+ # Build request kwargs
279
+ request_kwargs: Dict[str, Any] = {
280
+ "model": self.model,
281
+ "messages": openai_messages,
282
+ "temperature": self.temperature,
283
+ "max_tokens": self.max_tokens,
284
+ "stream": True,
285
+ }
286
+
287
+ if stop:
288
+ request_kwargs["stop"] = stop
289
+
290
+ if self._tools:
291
+ request_kwargs["tools"] = self._tools
292
+ if self._tool_choice:
293
+ request_kwargs["tool_choice"] = self._tool_choice
294
+
295
+ # Make streaming API call
296
+ response = self.client.chat.completions.create(**request_kwargs)
297
+
298
+ # Accumulate tool calls across chunks
299
+ tool_calls_accum: Dict[int, Dict[str, Any]] = {}
300
+
301
+ for chunk in response:
302
+ if not chunk.choices:
303
+ continue
304
+
305
+ choice = chunk.choices[0]
306
+ delta = choice.delta
307
+
308
+ content = delta.content or ""
309
+ additional_kwargs: Dict[str, Any] = {}
310
+ tool_call_chunks = []
311
+
312
+ # Handle tool calls in streaming
313
+ if delta.tool_calls:
314
+ for tc in delta.tool_calls:
315
+ idx = tc.index
316
+ if idx not in tool_calls_accum:
317
+ tool_calls_accum[idx] = {
318
+ "id": tc.id or "",
319
+ "name": "",
320
+ "arguments": "",
321
+ }
322
+ if tc.id:
323
+ tool_calls_accum[idx]["id"] = tc.id
324
+ if tc.function:
325
+ if tc.function.name:
326
+ tool_calls_accum[idx]["name"] = tc.function.name
327
+ if tc.function.arguments:
328
+ tool_calls_accum[idx]["arguments"] += tc.function.arguments
329
+
330
+ # Build tool call chunk for LangChain
331
+ tool_call_chunks.append({
332
+ "index": idx,
333
+ "id": tool_calls_accum[idx]["id"],
334
+ "name": tool_calls_accum[idx]["name"],
335
+ "args": tool_calls_accum[idx]["arguments"],
336
+ })
337
+
338
+ # Create chunk message
339
+ chunk_message = AIMessageChunk(
340
+ content=content,
341
+ additional_kwargs=additional_kwargs,
342
+ tool_call_chunks=tool_call_chunks if tool_call_chunks else [],
343
+ )
344
+
345
+ # Add finish reason on last chunk
346
+ if choice.finish_reason:
347
+ chunk_message.response_metadata = {
348
+ "finish_reason": choice.finish_reason,
349
+ }
350
+
351
+ yield ChatGenerationChunk(message=chunk_message)
@@ -42,6 +42,7 @@ DEFAULT_SYSTEM_PROMPT = """You are an expert Python data scientist and Jupyter n
42
42
  - check_resource_tool: 대용량 파일/데이터프레임 작업 전 필수
43
43
  - read_file_tool: 대용량 파일은 limit=100으로 먼저 확인
44
44
  - jupyter_cell_tool: 차트 라벨은 영어로
45
+ - **KeyboardInterrupt 발생 시**: ask_user_tool로 중단 사유를 사용자에게 확인 (예: "코드 실행이 중단되었습니다. 중단 사유를 알려주시면 도움이 됩니다.")
45
46
  - **파일 수정 후**: diagnostics_tool로 오류 확인 필수
46
47
 
47
48
  # 사용자 입력 요청 [중요]
@@ -1082,6 +1082,11 @@ async def stream_agent(request: AgentRequest):
1082
1082
 
1083
1083
  # Handle AIMessage
1084
1084
  elif isinstance(last_message, AIMessage):
1085
+ # LLM Response separator for easy log reading
1086
+ print("\n" + "🔵" * 48, flush=True)
1087
+ print("=" * 96, flush=True)
1088
+ print(" ✨ LLM RESPONSE", flush=True)
1089
+ print("=" * 96, flush=True)
1085
1090
  logger.info(
1086
1091
  "SimpleAgent AIMessage content: %s",
1087
1092
  last_message.content or "",
@@ -1115,6 +1120,11 @@ async def stream_agent(request: AgentRequest):
1115
1120
  ensure_ascii=False,
1116
1121
  ),
1117
1122
  )
1123
+ # LLM Response end separator
1124
+ print("=" * 96, flush=True)
1125
+ print(" ✅ LLM RESPONSE END", flush=True)
1126
+ print("=" * 96, flush=True)
1127
+ print("🔵" * 48 + "\n", flush=True)
1118
1128
  last_finish_reason = (
1119
1129
  getattr(last_message, "response_metadata", {}) or {}
1120
1130
  ).get("finish_reason")
@@ -722,7 +722,7 @@
722
722
  "@mui/material": {},
723
723
  "react-markdown": {},
724
724
  "hdsp-agent": {
725
- "version": "2.0.19",
725
+ "version": "2.0.21",
726
726
  "singleton": true,
727
727
  "import": "/Users/a421721/Desktop/hdsp/hdsp_agent/extensions/jupyter/lib/index.js"
728
728
  }
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "hdsp-agent",
3
- "version": "2.0.19",
3
+ "version": "2.0.21",
4
4
  "description": "HDSP Agent JupyterLab Extension - Thin client for Agent Server",
5
5
  "keywords": [
6
6
  "jupyter",
@@ -132,7 +132,7 @@
132
132
  }
133
133
  },
134
134
  "_build": {
135
- "load": "static/remoteEntry.d686ab71eb65b5ef8f15.js",
135
+ "load": "static/remoteEntry.93b1c499786ecd47b837.js",
136
136
  "extension": "./extension",
137
137
  "style": "./style"
138
138
  }
@@ -2295,7 +2295,7 @@ const ChatPanel = (0,react__WEBPACK_IMPORTED_MODULE_0__.forwardRef)(({ apiServic
2295
2295
  }
2296
2296
  }
2297
2297
  const formatted = files.map((file) => {
2298
- const icon = file.isDir ? _utils_icons__WEBPACK_IMPORTED_MODULE_8__.Icons.folder : _utils_icons__WEBPACK_IMPORTED_MODULE_8__.Icons.file;
2298
+ const icon = file.isDir ? '[DIR]' : '[FILE]';
2299
2299
  const sizeInfo = file.isDir ? '' : ` (${file.size} bytes)`;
2300
2300
  return `${icon} ${file.path}${file.isDir ? '/' : sizeInfo}`;
2301
2301
  }).join('\n');
@@ -4240,6 +4240,8 @@ const SettingsPanel = ({ onClose, onSave, currentConfig }) => {
4240
4240
  const [vllmEndpoint, setVllmEndpoint] = (0,react__WEBPACK_IMPORTED_MODULE_0__.useState)(initConfig.vllm?.endpoint || 'http://localhost:8000/v1');
4241
4241
  const [vllmApiKey, setVllmApiKey] = (0,react__WEBPACK_IMPORTED_MODULE_0__.useState)(initConfig.vllm?.apiKey || '');
4242
4242
  const [vllmModel, setVllmModel] = (0,react__WEBPACK_IMPORTED_MODULE_0__.useState)(initConfig.vllm?.model || 'meta-llama/Llama-2-7b-chat-hf');
4243
+ const [vllmUseResponsesApi, setVllmUseResponsesApi] = (0,react__WEBPACK_IMPORTED_MODULE_0__.useState)(Boolean(initConfig.vllm?.useResponsesApi));
4244
+ const [vllmTemperature, setVllmTemperature] = (0,react__WEBPACK_IMPORTED_MODULE_0__.useState)(initConfig.vllm?.temperature ?? 0.0);
4243
4245
  // OpenAI settings
4244
4246
  const [openaiApiKey, setOpenaiApiKey] = (0,react__WEBPACK_IMPORTED_MODULE_0__.useState)(initConfig.openai?.apiKey || '');
4245
4247
  const [openaiModel, setOpenaiModel] = (0,react__WEBPACK_IMPORTED_MODULE_0__.useState)(initConfig.openai?.model || 'gpt-4');
@@ -4282,6 +4284,8 @@ const SettingsPanel = ({ onClose, onSave, currentConfig }) => {
4282
4284
  setVllmEndpoint(currentConfig.vllm?.endpoint || 'http://localhost:8000/v1');
4283
4285
  setVllmApiKey(currentConfig.vllm?.apiKey || '');
4284
4286
  setVllmModel(currentConfig.vllm?.model || 'meta-llama/Llama-2-7b-chat-hf');
4287
+ setVllmUseResponsesApi(Boolean(currentConfig.vllm?.useResponsesApi));
4288
+ setVllmTemperature(currentConfig.vllm?.temperature ?? 0.0);
4285
4289
  setOpenaiApiKey(currentConfig.openai?.apiKey || '');
4286
4290
  setOpenaiModel(currentConfig.openai?.model || 'gpt-4');
4287
4291
  setSystemPrompt(currentConfig.systemPrompt || (0,_services_ApiKeyManager__WEBPACK_IMPORTED_MODULE_10__.getDefaultLLMConfig)().systemPrompt || '');
@@ -4310,7 +4314,9 @@ const SettingsPanel = ({ onClose, onSave, currentConfig }) => {
4310
4314
  vllm: {
4311
4315
  endpoint: vllmEndpoint,
4312
4316
  apiKey: vllmApiKey,
4313
- model: vllmModel
4317
+ model: vllmModel,
4318
+ useResponsesApi: vllmUseResponsesApi,
4319
+ temperature: vllmTemperature
4314
4320
  },
4315
4321
  openai: {
4316
4322
  apiKey: openaiApiKey,
@@ -4472,7 +4478,16 @@ const SettingsPanel = ({ onClose, onSave, currentConfig }) => {
4472
4478
  react__WEBPACK_IMPORTED_MODULE_0___default().createElement("input", { type: "password", className: "jp-agent-settings-input", value: vllmApiKey, onChange: (e) => setVllmApiKey(e.target.value), placeholder: "API \uD0A4\uAC00 \uD544\uC694\uD55C \uACBD\uC6B0 \uC785\uB825" })),
4473
4479
  react__WEBPACK_IMPORTED_MODULE_0___default().createElement("div", { className: "jp-agent-settings-group" },
4474
4480
  react__WEBPACK_IMPORTED_MODULE_0___default().createElement("label", { className: "jp-agent-settings-label" }, "\uBAA8\uB378 \uC774\uB984"),
4475
- react__WEBPACK_IMPORTED_MODULE_0___default().createElement("input", { type: "text", className: "jp-agent-settings-input", value: vllmModel, onChange: (e) => setVllmModel(e.target.value), placeholder: "meta-llama/Llama-2-7b-chat-hf" })))),
4481
+ react__WEBPACK_IMPORTED_MODULE_0___default().createElement("input", { type: "text", className: "jp-agent-settings-input", value: vllmModel, onChange: (e) => setVllmModel(e.target.value), placeholder: "meta-llama/Llama-2-7b-chat-hf" })),
4482
+ react__WEBPACK_IMPORTED_MODULE_0___default().createElement("div", { className: "jp-agent-settings-group" },
4483
+ react__WEBPACK_IMPORTED_MODULE_0___default().createElement("label", { className: "jp-agent-settings-label" }, "Temperature"),
4484
+ react__WEBPACK_IMPORTED_MODULE_0___default().createElement("input", { type: "number", className: "jp-agent-settings-input", value: vllmTemperature, onChange: (e) => setVllmTemperature(Math.max(0, Math.min(2, parseFloat(e.target.value) || 0))), min: 0, max: 2, step: 0.1, style: { width: '100px' } }),
4485
+ react__WEBPACK_IMPORTED_MODULE_0___default().createElement("small", { style: { color: '#666', marginLeft: '8px' } }, "0.0 = \uACB0\uC815\uC801, 1.0+ = \uCC3D\uC758\uC801")),
4486
+ react__WEBPACK_IMPORTED_MODULE_0___default().createElement("div", { className: "jp-agent-settings-group" },
4487
+ react__WEBPACK_IMPORTED_MODULE_0___default().createElement("label", { className: "jp-agent-settings-checkbox" },
4488
+ react__WEBPACK_IMPORTED_MODULE_0___default().createElement("input", { type: "checkbox", checked: vllmUseResponsesApi, onChange: (e) => setVllmUseResponsesApi(e.target.checked) }),
4489
+ react__WEBPACK_IMPORTED_MODULE_0___default().createElement("span", null, "Use Responses API (/v1/responses)")),
4490
+ react__WEBPACK_IMPORTED_MODULE_0___default().createElement("small", { style: { color: '#666', display: 'block', marginTop: '4px' } }, "OpenAI Responses API\uB97C \uC9C0\uC6D0\uD558\uB294 \uC5D4\uB4DC\uD3EC\uC778\uD2B8\uC5D0\uC11C \uC0AC\uC6A9")))),
4476
4491
  provider === 'openai' && (react__WEBPACK_IMPORTED_MODULE_0___default().createElement("div", { className: "jp-agent-settings-provider" },
4477
4492
  react__WEBPACK_IMPORTED_MODULE_0___default().createElement("h3", null, "OpenAI \uC124\uC815"),
4478
4493
  react__WEBPACK_IMPORTED_MODULE_0___default().createElement("div", { className: "jp-agent-settings-group" },
@@ -9338,7 +9353,34 @@ class ApiService {
9338
9353
  }
9339
9354
  // Handle token events (streaming LLM response)
9340
9355
  if (data.content) {
9341
- onChunk(data.content);
9356
+ // Filter out raw todos JSON that shouldn't be displayed to users
9357
+ // Pattern: {"todos": [{"content": "...", "status": "..."}, ...]}
9358
+ const contentStr = String(data.content).trim();
9359
+ const isSummaryJson = contentStr.includes('"summary"') && contentStr.includes('"next_items"');
9360
+ // Check if this is a raw todos JSON object (not summary JSON)
9361
+ let isRawTodosJson = false;
9362
+ if (!isSummaryJson && contentStr.includes('"todos"')) {
9363
+ try {
9364
+ const parsed = JSON.parse(contentStr);
9365
+ // Check structure: {todos: [{content, status}, ...]}
9366
+ if (parsed && Array.isArray(parsed.todos) && parsed.todos.length > 0) {
9367
+ const firstTodo = parsed.todos[0];
9368
+ if (firstTodo && 'content' in firstTodo && 'status' in firstTodo) {
9369
+ isRawTodosJson = true;
9370
+ }
9371
+ }
9372
+ }
9373
+ catch {
9374
+ // Not valid JSON, check for partial pattern
9375
+ isRawTodosJson = /"todos"\s*:\s*\[\s*\{[^}]*"content"\s*:/.test(contentStr);
9376
+ }
9377
+ }
9378
+ if (isRawTodosJson) {
9379
+ console.log('[ApiService] Filtered raw todos JSON from display');
9380
+ }
9381
+ else {
9382
+ onChunk(data.content);
9383
+ }
9342
9384
  }
9343
9385
  // Handle tool_call events - pass to handler for cell creation
9344
9386
  if (currentEventType === 'tool_call' && data.tool && onToolCall) {
@@ -9508,7 +9550,30 @@ class ApiService {
9508
9550
  }
9509
9551
  // Content chunks
9510
9552
  if (data.content && onChunk) {
9511
- onChunk(data.content);
9553
+ // Filter out raw todos JSON that shouldn't be displayed to users
9554
+ const contentStr = String(data.content).trim();
9555
+ const isSummaryJson = contentStr.includes('"summary"') && contentStr.includes('"next_items"');
9556
+ let isRawTodosJson = false;
9557
+ if (!isSummaryJson && contentStr.includes('"todos"')) {
9558
+ try {
9559
+ const parsed = JSON.parse(contentStr);
9560
+ if (parsed && Array.isArray(parsed.todos) && parsed.todos.length > 0) {
9561
+ const firstTodo = parsed.todos[0];
9562
+ if (firstTodo && 'content' in firstTodo && 'status' in firstTodo) {
9563
+ isRawTodosJson = true;
9564
+ }
9565
+ }
9566
+ }
9567
+ catch {
9568
+ isRawTodosJson = /"todos"\s*:\s*\[\s*\{[^}]*"content"\s*:/.test(contentStr);
9569
+ }
9570
+ }
9571
+ if (isRawTodosJson) {
9572
+ console.log('[ApiService] Filtered raw todos JSON from display');
9573
+ }
9574
+ else {
9575
+ onChunk(data.content);
9576
+ }
9512
9577
  }
9513
9578
  // Tool call events during resume
9514
9579
  if (currentEventType === 'tool_call' && data.tool && onToolCall) {
@@ -11813,4 +11878,4 @@ __webpack_require__.r(__webpack_exports__);
11813
11878
  /***/ }
11814
11879
 
11815
11880
  }]);
11816
- //# sourceMappingURL=lib_index_js.1917fbaea37d75dc69b3.js.map
11881
+ //# sourceMappingURL=lib_index_js.90f80cb80187de8c5ae5.js.map