hdsp-jupyter-extension 2.0.19__py3-none-any.whl → 2.0.20__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (48) hide show
  1. agent_server/langchain/agent_prompts/planner_prompt.py +3 -0
  2. agent_server/langchain/custom_middleware.py +0 -17
  3. agent_server/langchain/llm_factory.py +37 -5
  4. agent_server/langchain/logging_utils.py +41 -16
  5. agent_server/langchain/models/__init__.py +5 -0
  6. agent_server/langchain/models/gpt_oss_chat.py +351 -0
  7. agent_server/langchain/prompts.py +1 -0
  8. agent_server/routers/langchain_agent.py +10 -0
  9. {hdsp_jupyter_extension-2.0.19.data → hdsp_jupyter_extension-2.0.20.data}/data/share/jupyter/labextensions/hdsp-agent/build_log.json +1 -1
  10. {hdsp_jupyter_extension-2.0.19.data → hdsp_jupyter_extension-2.0.20.data}/data/share/jupyter/labextensions/hdsp-agent/package.json +2 -2
  11. jupyter_ext/labextension/static/lib_index_js.1917fbaea37d75dc69b3.js → hdsp_jupyter_extension-2.0.20.data/data/share/jupyter/labextensions/hdsp-agent/static/lib_index_js.90f80cb80187de8c5ae5.js +71 -6
  12. hdsp_jupyter_extension-2.0.20.data/data/share/jupyter/labextensions/hdsp-agent/static/lib_index_js.90f80cb80187de8c5ae5.js.map +1 -0
  13. hdsp_jupyter_extension-2.0.19.data/data/share/jupyter/labextensions/hdsp-agent/static/remoteEntry.d686ab71eb65b5ef8f15.js → hdsp_jupyter_extension-2.0.20.data/data/share/jupyter/labextensions/hdsp-agent/static/remoteEntry.586bf5521d043cdd37b8.js +3 -3
  14. jupyter_ext/labextension/static/remoteEntry.d686ab71eb65b5ef8f15.js.map → hdsp_jupyter_extension-2.0.20.data/data/share/jupyter/labextensions/hdsp-agent/static/remoteEntry.586bf5521d043cdd37b8.js.map +1 -1
  15. {hdsp_jupyter_extension-2.0.19.dist-info → hdsp_jupyter_extension-2.0.20.dist-info}/METADATA +1 -1
  16. {hdsp_jupyter_extension-2.0.19.dist-info → hdsp_jupyter_extension-2.0.20.dist-info}/RECORD +46 -44
  17. jupyter_ext/_version.py +1 -1
  18. jupyter_ext/labextension/build_log.json +1 -1
  19. jupyter_ext/labextension/package.json +2 -2
  20. hdsp_jupyter_extension-2.0.19.data/data/share/jupyter/labextensions/hdsp-agent/static/lib_index_js.1917fbaea37d75dc69b3.js → jupyter_ext/labextension/static/lib_index_js.90f80cb80187de8c5ae5.js +71 -6
  21. jupyter_ext/labextension/static/lib_index_js.90f80cb80187de8c5ae5.js.map +1 -0
  22. jupyter_ext/labextension/static/{remoteEntry.d686ab71eb65b5ef8f15.js → remoteEntry.586bf5521d043cdd37b8.js} +3 -3
  23. hdsp_jupyter_extension-2.0.19.data/data/share/jupyter/labextensions/hdsp-agent/static/remoteEntry.d686ab71eb65b5ef8f15.js.map → jupyter_ext/labextension/static/remoteEntry.586bf5521d043cdd37b8.js.map +1 -1
  24. hdsp_jupyter_extension-2.0.19.data/data/share/jupyter/labextensions/hdsp-agent/static/lib_index_js.1917fbaea37d75dc69b3.js.map +0 -1
  25. jupyter_ext/labextension/static/lib_index_js.1917fbaea37d75dc69b3.js.map +0 -1
  26. {hdsp_jupyter_extension-2.0.19.data → hdsp_jupyter_extension-2.0.20.data}/data/etc/jupyter/jupyter_server_config.d/hdsp_jupyter_extension.json +0 -0
  27. {hdsp_jupyter_extension-2.0.19.data → hdsp_jupyter_extension-2.0.20.data}/data/share/jupyter/labextensions/hdsp-agent/install.json +0 -0
  28. {hdsp_jupyter_extension-2.0.19.data → hdsp_jupyter_extension-2.0.20.data}/data/share/jupyter/labextensions/hdsp-agent/static/frontend_styles_index_js.96745acc14125453fba8.js +0 -0
  29. {hdsp_jupyter_extension-2.0.19.data → hdsp_jupyter_extension-2.0.20.data}/data/share/jupyter/labextensions/hdsp-agent/static/frontend_styles_index_js.96745acc14125453fba8.js.map +0 -0
  30. {hdsp_jupyter_extension-2.0.19.data → hdsp_jupyter_extension-2.0.20.data}/data/share/jupyter/labextensions/hdsp-agent/static/node_modules_emotion_use-insertion-effect-with-fallbacks_dist_emotion-use-insertion-effect-wi-3ba6b80.c095373419d05e6f141a.js +0 -0
  31. {hdsp_jupyter_extension-2.0.19.data → hdsp_jupyter_extension-2.0.20.data}/data/share/jupyter/labextensions/hdsp-agent/static/node_modules_emotion_use-insertion-effect-with-fallbacks_dist_emotion-use-insertion-effect-wi-3ba6b80.c095373419d05e6f141a.js.map +0 -0
  32. {hdsp_jupyter_extension-2.0.19.data → hdsp_jupyter_extension-2.0.20.data}/data/share/jupyter/labextensions/hdsp-agent/static/node_modules_emotion_use-insertion-effect-with-fallbacks_dist_emotion-use-insertion-effect-wi-3ba6b81.61e75fb98ecff46cf836.js +0 -0
  33. {hdsp_jupyter_extension-2.0.19.data → hdsp_jupyter_extension-2.0.20.data}/data/share/jupyter/labextensions/hdsp-agent/static/node_modules_emotion_use-insertion-effect-with-fallbacks_dist_emotion-use-insertion-effect-wi-3ba6b81.61e75fb98ecff46cf836.js.map +0 -0
  34. {hdsp_jupyter_extension-2.0.19.data → hdsp_jupyter_extension-2.0.20.data}/data/share/jupyter/labextensions/hdsp-agent/static/style.js +0 -0
  35. {hdsp_jupyter_extension-2.0.19.data → hdsp_jupyter_extension-2.0.20.data}/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_babel_runtime_helpers_esm_extends_js-node_modules_emotion_serialize_dist-051195.e2553aab0c3963b83dd7.js +0 -0
  36. {hdsp_jupyter_extension-2.0.19.data → hdsp_jupyter_extension-2.0.20.data}/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_babel_runtime_helpers_esm_extends_js-node_modules_emotion_serialize_dist-051195.e2553aab0c3963b83dd7.js.map +0 -0
  37. {hdsp_jupyter_extension-2.0.19.data → hdsp_jupyter_extension-2.0.20.data}/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_emotion_cache_dist_emotion-cache_browser_development_esm_js.24edcc52a1c014a8a5f0.js +0 -0
  38. {hdsp_jupyter_extension-2.0.19.data → hdsp_jupyter_extension-2.0.20.data}/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_emotion_cache_dist_emotion-cache_browser_development_esm_js.24edcc52a1c014a8a5f0.js.map +0 -0
  39. {hdsp_jupyter_extension-2.0.19.data → hdsp_jupyter_extension-2.0.20.data}/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_emotion_react_dist_emotion-react_browser_development_esm_js.19ecf6babe00caff6b8a.js +0 -0
  40. {hdsp_jupyter_extension-2.0.19.data → hdsp_jupyter_extension-2.0.20.data}/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_emotion_react_dist_emotion-react_browser_development_esm_js.19ecf6babe00caff6b8a.js.map +0 -0
  41. {hdsp_jupyter_extension-2.0.19.data → hdsp_jupyter_extension-2.0.20.data}/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_emotion_styled_dist_emotion-styled_browser_development_esm_js.661fb5836f4978a7c6e1.js +0 -0
  42. {hdsp_jupyter_extension-2.0.19.data → hdsp_jupyter_extension-2.0.20.data}/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_emotion_styled_dist_emotion-styled_browser_development_esm_js.661fb5836f4978a7c6e1.js.map +0 -0
  43. {hdsp_jupyter_extension-2.0.19.data → hdsp_jupyter_extension-2.0.20.data}/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_mui_material_index_js.985697e0162d8d088ca2.js +0 -0
  44. {hdsp_jupyter_extension-2.0.19.data → hdsp_jupyter_extension-2.0.20.data}/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_mui_material_index_js.985697e0162d8d088ca2.js.map +0 -0
  45. {hdsp_jupyter_extension-2.0.19.data → hdsp_jupyter_extension-2.0.20.data}/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_mui_material_utils_createSvgIcon_js.1f5038488cdfd8b3a85d.js +0 -0
  46. {hdsp_jupyter_extension-2.0.19.data → hdsp_jupyter_extension-2.0.20.data}/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_mui_material_utils_createSvgIcon_js.1f5038488cdfd8b3a85d.js.map +0 -0
  47. {hdsp_jupyter_extension-2.0.19.dist-info → hdsp_jupyter_extension-2.0.20.dist-info}/WHEEL +0 -0
  48. {hdsp_jupyter_extension-2.0.19.dist-info → hdsp_jupyter_extension-2.0.20.dist-info}/licenses/LICENSE +0 -0
@@ -40,6 +40,9 @@ PLANNER_SYSTEM_PROMPT = """당신은 작업을 조율하는 Main Agent입니다.
40
40
 
41
41
  **중요**: task_tool 결과를 받은 후 바로 write_todos로 완료 처리하지 말고, 반드시 위 도구로 결과를 먼저 적용!
42
42
 
43
+ **🔴 KeyboardInterrupt 발생 시**: jupyter_cell_tool 실행 중 KeyboardInterrupt가 발생하면 ask_user_tool로 중단 사유를 사용자에게 확인
44
+ - 예: ask_user_tool(question="코드 실행이 중단되었습니다. 중단 사유를 알려주시면 다음 진행에 참고하겠습니다.", input_type="text")
45
+
43
46
  # write_todos 규칙 [필수]
44
47
  - 한국어로 작성
45
48
  - **🔴 기존 todo 절대 삭제 금지**: 전체 리스트를 항상 포함하고 status만 변경
@@ -1053,23 +1053,6 @@ def create_normalize_tool_args_middleware(wrap_model_call, tools=None):
1053
1053
  break
1054
1054
  break
1055
1055
 
1056
- # Clean AIMessage content when write_todos is called
1057
- # Remove redundant todos JSON from content (keep summary JSON)
1058
- if tool_name == "write_todos":
1059
- msg_content = getattr(msg, "content", "") or ""
1060
- if msg_content and '"todos"' in msg_content:
1061
- # Keep content only if it's summary JSON
1062
- is_summary_json = (
1063
- '"summary"' in msg_content
1064
- and '"next_items"' in msg_content
1065
- )
1066
- if not is_summary_json:
1067
- # Clear redundant todos content
1068
- msg.content = ""
1069
- logger.info(
1070
- "Cleared redundant todos JSON from AIMessage content (write_todos tool_call exists)"
1071
- )
1072
-
1073
1056
  return response
1074
1057
 
1075
1058
  return normalize_tool_args
@@ -97,16 +97,37 @@ def _create_vllm_llm(llm_config: Dict[str, Any], callbacks):
97
97
  endpoint = vllm_config.get("endpoint", "http://localhost:8000/v1")
98
98
  model = vllm_config.get("model", "default")
99
99
  api_key = vllm_config.get("apiKey", "dummy")
100
+ use_responses_api = vllm_config.get("useResponsesApi", False)
101
+ temperature = vllm_config.get("temperature", 0.0)
100
102
 
101
- logger.info(f"Creating vLLM LLM with model: {model}, endpoint: {endpoint}")
103
+ logger.info(
104
+ f"Creating vLLM LLM with model: {model}, endpoint: {endpoint}, "
105
+ f"use_responses_api: {use_responses_api}, temperature: {temperature}"
106
+ )
107
+
108
+ # Use ChatGPTOSS for gpt-oss models (Harmony format with developer role)
109
+ if "gpt-oss" in model.lower():
110
+ from agent_server.langchain.models import ChatGPTOSS
111
+
112
+ logger.info(f"Using ChatGPTOSS for gpt-oss model (developer role support)")
113
+ return ChatGPTOSS(
114
+ model=model,
115
+ base_url=endpoint,
116
+ api_key=api_key,
117
+ temperature=temperature,
118
+ max_tokens=8192,
119
+ streaming=False,
120
+ callbacks=callbacks,
121
+ )
102
122
 
103
123
  return ChatOpenAI(
104
124
  model=model,
105
125
  api_key=api_key,
106
126
  base_url=endpoint, # Use endpoint as-is (no /v1 suffix added)
107
127
  streaming=False, # Agent mode: disable LLM streaming (SSE handled by agent server)
108
- temperature=0.0,
109
- max_tokens=32768,
128
+ temperature=temperature,
129
+ max_tokens=8192,
130
+ use_responses_api=use_responses_api, # Use /v1/responses endpoint if True
110
131
  callbacks=callbacks,
111
132
  )
112
133
 
@@ -148,14 +169,25 @@ def create_summarization_llm(llm_config: Dict[str, Any]):
148
169
  temperature=0.0,
149
170
  )
150
171
  elif provider == "vllm":
151
- from langchain_openai import ChatOpenAI
152
-
153
172
  vllm_config = llm_config.get("vllm", {})
154
173
  # User provides full base URL (e.g., https://openrouter.ai/api/v1)
155
174
  endpoint = vllm_config.get("endpoint", "http://localhost:8000/v1")
156
175
  model = vllm_config.get("model", "default")
157
176
  api_key = vllm_config.get("apiKey", "dummy")
158
177
 
178
+ # Use ChatGPTOSS for gpt-oss models
179
+ if "gpt-oss" in model.lower():
180
+ from agent_server.langchain.models import ChatGPTOSS
181
+
182
+ return ChatGPTOSS(
183
+ model=model,
184
+ base_url=endpoint,
185
+ api_key=api_key,
186
+ temperature=0.0,
187
+ )
188
+
189
+ from langchain_openai import ChatOpenAI
190
+
159
191
  return ChatOpenAI(
160
192
  model=model,
161
193
  api_key=api_key,
@@ -14,8 +14,36 @@ from langchain_core.callbacks import BaseCallbackHandler
14
14
 
15
15
  logger = logging.getLogger(__name__)
16
16
 
17
+ # Dedicated logger for LLM responses - always enabled with its own handler
18
+ llm_response_logger = logging.getLogger("agent_server.llm_response")
19
+ llm_response_logger.setLevel(logging.INFO)
20
+ llm_response_logger.propagate = True # Propagate to root logger
21
+
22
+ # Ensure it has a handler if running standalone
23
+ if not llm_response_logger.handlers and not logging.getLogger().handlers:
24
+ _handler = logging.StreamHandler()
25
+ _handler.setFormatter(logging.Formatter('%(message)s'))
26
+ llm_response_logger.addHandler(_handler)
27
+
28
+
29
+ def disable_langchain_logging():
30
+ """Disable all langchain logging except LLM responses."""
31
+ # Set all langchain loggers to CRITICAL
32
+ for name in list(logging.Logger.manager.loggerDict.keys()):
33
+ if "langchain" in name.lower() or name.startswith("agent_server.langchain"):
34
+ logging.getLogger(name).setLevel(logging.CRITICAL)
35
+ # Keep LLM response logger at INFO
36
+ llm_response_logger.setLevel(logging.INFO)
37
+
38
+
39
+ # Auto-disable on import (comment this line to re-enable all logs)
40
+ disable_langchain_logging()
41
+
17
42
  LOG_SEPARATOR = "=" * 96
18
43
  LOG_SUBSECTION = "-" * 96
44
+ LOG_EMOJI_LINE = "🔵" * 48
45
+ LOG_RESPONSE_START = f"\n\n{LOG_EMOJI_LINE}\n{'=' * 96}\n ✨ LLM RESPONSE START\n{'=' * 96}"
46
+ LOG_RESPONSE_END = f"{'=' * 96}\n ✅ LLM RESPONSE END\n{'=' * 96}\n{LOG_EMOJI_LINE}\n"
19
47
 
20
48
 
21
49
  def _format_system_prompt_for_log(messages) -> tuple[int, int, str]:
@@ -179,15 +207,15 @@ class LLMTraceLogger(BaseCallbackHandler):
179
207
  logger.info("%s", "\n".join(lines))
180
208
 
181
209
  def on_chat_model_start(self, serialized, messages, **kwargs) -> None:
182
- if not messages:
183
- logger.info(
184
- "%s",
185
- _format_messages_block("AGENT -> LLM PROMPT (<none>)", []),
186
- )
187
- return
188
- self._log_prompt_batches("AGENT -> LLM PROMPT", messages)
210
+ # Request logging disabled - only log responses
211
+ pass
189
212
 
190
213
  def on_chat_model_end(self, response, **kwargs) -> None:
214
+ # Debug: Check if callback is even called
215
+ print("[DEBUG] on_chat_model_end CALLED!", flush=True)
216
+ # Use print for guaranteed visibility
217
+ print(LOG_RESPONSE_START, flush=True)
218
+
191
219
  generations = getattr(response, "generations", None) or []
192
220
  if generations and isinstance(generations[0], list):
193
221
  batches = generations
@@ -203,7 +231,7 @@ class LLMTraceLogger(BaseCallbackHandler):
203
231
  title = (
204
232
  f"LLM -> AGENT RESPONSE (batch={batch_idx}, generation={gen_idx})"
205
233
  )
206
- logger.info("%s", _format_messages_block(title, [message]))
234
+ print(_format_messages_block(title, [message]), flush=True)
207
235
 
208
236
  tool_calls = getattr(message, "tool_calls", None)
209
237
  if tool_calls:
@@ -211,13 +239,10 @@ class LLMTraceLogger(BaseCallbackHandler):
211
239
  "LLM -> AGENT TOOL CALLS "
212
240
  f"(batch={batch_idx}, generation={gen_idx})"
213
241
  )
214
- logger.info("%s", _format_json_block(tool_title, tool_calls))
242
+ print(_format_json_block(tool_title, tool_calls), flush=True)
215
243
 
216
- def on_llm_start(self, serialized, prompts, **kwargs) -> None:
217
- if not prompts:
218
- logger.info("%s", _format_json_block("LLM PROMPT (<none>)", ""))
219
- return
244
+ print(LOG_RESPONSE_END, flush=True)
220
245
 
221
- for idx, prompt in enumerate(prompts):
222
- title = f"LLM PROMPT (batch={idx}, length={len(prompt)})"
223
- logger.info("%s", _format_json_block(title, prompt))
246
+ def on_llm_start(self, serialized, prompts, **kwargs) -> None:
247
+ # Request logging disabled - only log responses
248
+ pass
@@ -0,0 +1,5 @@
1
+ """Custom LangChain chat models."""
2
+
3
+ from agent_server.langchain.models.gpt_oss_chat import ChatGPTOSS
4
+
5
+ __all__ = ["ChatGPTOSS"]
@@ -0,0 +1,351 @@
1
+ """
2
+ ChatGPTOSS: Custom ChatModel for gpt-oss (Harmony format).
3
+
4
+ gpt-oss uses a different instruction hierarchy:
5
+ - developer: behavioral rules/instructions (highest priority)
6
+ - system: metadata (date, cutoff, tools)
7
+ - user: actual questions
8
+
9
+ LangChain's ChatOpenAI sends everything as 'system', which gpt-oss treats as low-priority metadata.
10
+ This class converts SystemMessage to 'developer' role for proper instruction following.
11
+ """
12
+
13
+ import json
14
+ import logging
15
+ import uuid
16
+ from typing import Any, Dict, Iterator, List, Optional, Union
17
+
18
+ from langchain_core.callbacks import CallbackManagerForLLMRun
19
+ from langchain_core.language_models.chat_models import BaseChatModel
20
+ from langchain_core.messages import (
21
+ AIMessage,
22
+ AIMessageChunk,
23
+ BaseMessage,
24
+ HumanMessage,
25
+ SystemMessage,
26
+ ToolMessage,
27
+ )
28
+ from langchain_core.outputs import ChatGeneration, ChatGenerationChunk, ChatResult
29
+ from langchain_core.tools import BaseTool
30
+ from openai import OpenAI
31
+ from pydantic import Field
32
+
33
+ logger = logging.getLogger(__name__)
34
+
35
+
36
+ class ChatGPTOSS(BaseChatModel):
37
+ """ChatModel for gpt-oss with developer role support.
38
+
39
+ Converts SystemMessage to 'developer' role for proper instruction hierarchy
40
+ in gpt-oss (Harmony format) models.
41
+ """
42
+
43
+ client: Any = Field(default=None, exclude=True)
44
+ model: str = Field(default="openai/gpt-oss-120b")
45
+ base_url: str = Field(default="http://localhost:8000/v1")
46
+ api_key: str = Field(default="dummy")
47
+ temperature: float = Field(default=0.0)
48
+ max_tokens: int = Field(default=8192)
49
+ streaming: bool = Field(default=False)
50
+
51
+ # Tool-related fields (private, not exposed to pydantic)
52
+ _tools: Optional[List[Dict[str, Any]]] = None
53
+ _tool_choice: Optional[Union[str, Dict[str, Any]]] = None
54
+
55
+ def __init__(self, callbacks=None, **kwargs):
56
+ # Remove callbacks from kwargs before super().__init__ if present
57
+ # BaseChatModel handles callbacks through its own mechanism
58
+ super().__init__(callbacks=callbacks, **kwargs)
59
+ # Initialize OpenAI client
60
+ self.client = OpenAI(
61
+ base_url=self.base_url,
62
+ api_key=self.api_key,
63
+ )
64
+ self._tools = None
65
+ self._tool_choice = None
66
+
67
+ @property
68
+ def _llm_type(self) -> str:
69
+ return "gpt-oss"
70
+
71
+ @property
72
+ def _identifying_params(self) -> Dict[str, Any]:
73
+ return {
74
+ "model": self.model,
75
+ "base_url": self.base_url,
76
+ "temperature": self.temperature,
77
+ "max_tokens": self.max_tokens,
78
+ }
79
+
80
+ def bind_tools(
81
+ self,
82
+ tools: List[Union[BaseTool, Dict[str, Any]]],
83
+ *,
84
+ tool_choice: Optional[Union[str, Dict[str, Any]]] = None,
85
+ **kwargs,
86
+ ) -> "ChatGPTOSS":
87
+ """Bind tools to the model.
88
+
89
+ Returns a new instance with tools bound.
90
+ """
91
+ # Convert tools to OpenAI format
92
+ formatted_tools = []
93
+ for tool in tools:
94
+ if isinstance(tool, BaseTool):
95
+ # Convert LangChain tool to OpenAI format
96
+ tool_schema = {
97
+ "type": "function",
98
+ "function": {
99
+ "name": tool.name,
100
+ "description": tool.description or "",
101
+ "parameters": tool.args_schema.schema() if tool.args_schema else {"type": "object", "properties": {}},
102
+ },
103
+ }
104
+ formatted_tools.append(tool_schema)
105
+ elif isinstance(tool, dict):
106
+ # Already in dict format, ensure it has correct structure
107
+ if "type" not in tool:
108
+ tool = {"type": "function", "function": tool}
109
+ formatted_tools.append(tool)
110
+
111
+ # Create new instance with tools bound
112
+ new_instance = ChatGPTOSS(
113
+ model=self.model,
114
+ base_url=self.base_url,
115
+ api_key=self.api_key,
116
+ temperature=self.temperature,
117
+ max_tokens=self.max_tokens,
118
+ streaming=self.streaming,
119
+ )
120
+ new_instance._tools = formatted_tools
121
+ new_instance._tool_choice = tool_choice
122
+ return new_instance
123
+
124
+ def _convert_messages(self, messages: List[BaseMessage]) -> List[Dict[str, Any]]:
125
+ """Convert LangChain messages to OpenAI format with developer role.
126
+
127
+ Key conversion: SystemMessage -> role=developer
128
+ """
129
+ result = []
130
+
131
+ for msg in messages:
132
+ if isinstance(msg, SystemMessage):
133
+ # Convert system to developer for gpt-oss instruction hierarchy
134
+ result.append({
135
+ "role": "developer",
136
+ "content": msg.content,
137
+ })
138
+ elif isinstance(msg, HumanMessage):
139
+ result.append({
140
+ "role": "user",
141
+ "content": msg.content,
142
+ })
143
+ elif isinstance(msg, AIMessage):
144
+ ai_msg: Dict[str, Any] = {
145
+ "role": "assistant",
146
+ "content": msg.content or "",
147
+ }
148
+ # Include tool calls if present
149
+ tool_calls = getattr(msg, "tool_calls", None)
150
+ if tool_calls:
151
+ ai_msg["tool_calls"] = [
152
+ {
153
+ "id": tc.get("id", str(uuid.uuid4())[:8]),
154
+ "type": "function",
155
+ "function": {
156
+ "name": tc["name"],
157
+ "arguments": json.dumps(tc["args"]) if isinstance(tc["args"], dict) else tc["args"],
158
+ },
159
+ }
160
+ for tc in tool_calls
161
+ ]
162
+ result.append(ai_msg)
163
+ elif isinstance(msg, ToolMessage):
164
+ result.append({
165
+ "role": "tool",
166
+ "tool_call_id": msg.tool_call_id,
167
+ "content": msg.content,
168
+ })
169
+ else:
170
+ # Fallback for other message types
171
+ role = getattr(msg, "role", "user")
172
+ result.append({
173
+ "role": role,
174
+ "content": msg.content,
175
+ })
176
+
177
+ return result
178
+
179
+ def _create_chat_result(self, response) -> ChatResult:
180
+ """Convert OpenAI response to LangChain ChatResult."""
181
+ choice = response.choices[0]
182
+ message = choice.message
183
+
184
+ # Build AIMessage
185
+ content = message.content or ""
186
+ additional_kwargs: Dict[str, Any] = {}
187
+ tool_calls_list = []
188
+
189
+ if message.tool_calls:
190
+ additional_kwargs["tool_calls"] = [
191
+ {
192
+ "id": tc.id,
193
+ "type": "function",
194
+ "function": {
195
+ "name": tc.function.name,
196
+ "arguments": tc.function.arguments,
197
+ },
198
+ }
199
+ for tc in message.tool_calls
200
+ ]
201
+ # Also convert to LangChain tool_calls format
202
+ for tc in message.tool_calls:
203
+ try:
204
+ args = json.loads(tc.function.arguments)
205
+ except json.JSONDecodeError:
206
+ args = {"raw": tc.function.arguments}
207
+ tool_calls_list.append({
208
+ "name": tc.function.name,
209
+ "args": args,
210
+ "id": tc.id,
211
+ "type": "tool_call",
212
+ })
213
+
214
+ ai_message = AIMessage(
215
+ content=content,
216
+ additional_kwargs=additional_kwargs,
217
+ tool_calls=tool_calls_list if tool_calls_list else [],
218
+ response_metadata={
219
+ "model_name": response.model,
220
+ "finish_reason": choice.finish_reason,
221
+ "id": response.id,
222
+ },
223
+ )
224
+
225
+ # Add usage metadata if available
226
+ if response.usage:
227
+ ai_message.usage_metadata = {
228
+ "input_tokens": response.usage.prompt_tokens,
229
+ "output_tokens": response.usage.completion_tokens,
230
+ "total_tokens": response.usage.total_tokens,
231
+ }
232
+
233
+ generation = ChatGeneration(message=ai_message)
234
+ return ChatResult(generations=[generation])
235
+
236
+ def _generate(
237
+ self,
238
+ messages: List[BaseMessage],
239
+ stop: Optional[List[str]] = None,
240
+ run_manager: Optional[CallbackManagerForLLMRun] = None,
241
+ **kwargs,
242
+ ) -> ChatResult:
243
+ """Generate a response from the model."""
244
+ openai_messages = self._convert_messages(messages)
245
+
246
+ # Build request kwargs
247
+ request_kwargs: Dict[str, Any] = {
248
+ "model": self.model,
249
+ "messages": openai_messages,
250
+ "temperature": self.temperature,
251
+ "max_tokens": self.max_tokens,
252
+ }
253
+
254
+ if stop:
255
+ request_kwargs["stop"] = stop
256
+
257
+ if self._tools:
258
+ request_kwargs["tools"] = self._tools
259
+ if self._tool_choice:
260
+ request_kwargs["tool_choice"] = self._tool_choice
261
+
262
+ # Make API call
263
+ logger.debug(f"ChatGPTOSS request: model={self.model}, messages_count={len(openai_messages)}")
264
+ response = self.client.chat.completions.create(**request_kwargs)
265
+
266
+ return self._create_chat_result(response)
267
+
268
+ def _stream(
269
+ self,
270
+ messages: List[BaseMessage],
271
+ stop: Optional[List[str]] = None,
272
+ run_manager: Optional[CallbackManagerForLLMRun] = None,
273
+ **kwargs,
274
+ ) -> Iterator[ChatGenerationChunk]:
275
+ """Stream responses from the model."""
276
+ openai_messages = self._convert_messages(messages)
277
+
278
+ # Build request kwargs
279
+ request_kwargs: Dict[str, Any] = {
280
+ "model": self.model,
281
+ "messages": openai_messages,
282
+ "temperature": self.temperature,
283
+ "max_tokens": self.max_tokens,
284
+ "stream": True,
285
+ }
286
+
287
+ if stop:
288
+ request_kwargs["stop"] = stop
289
+
290
+ if self._tools:
291
+ request_kwargs["tools"] = self._tools
292
+ if self._tool_choice:
293
+ request_kwargs["tool_choice"] = self._tool_choice
294
+
295
+ # Make streaming API call
296
+ response = self.client.chat.completions.create(**request_kwargs)
297
+
298
+ # Accumulate tool calls across chunks
299
+ tool_calls_accum: Dict[int, Dict[str, Any]] = {}
300
+
301
+ for chunk in response:
302
+ if not chunk.choices:
303
+ continue
304
+
305
+ choice = chunk.choices[0]
306
+ delta = choice.delta
307
+
308
+ content = delta.content or ""
309
+ additional_kwargs: Dict[str, Any] = {}
310
+ tool_call_chunks = []
311
+
312
+ # Handle tool calls in streaming
313
+ if delta.tool_calls:
314
+ for tc in delta.tool_calls:
315
+ idx = tc.index
316
+ if idx not in tool_calls_accum:
317
+ tool_calls_accum[idx] = {
318
+ "id": tc.id or "",
319
+ "name": "",
320
+ "arguments": "",
321
+ }
322
+ if tc.id:
323
+ tool_calls_accum[idx]["id"] = tc.id
324
+ if tc.function:
325
+ if tc.function.name:
326
+ tool_calls_accum[idx]["name"] = tc.function.name
327
+ if tc.function.arguments:
328
+ tool_calls_accum[idx]["arguments"] += tc.function.arguments
329
+
330
+ # Build tool call chunk for LangChain
331
+ tool_call_chunks.append({
332
+ "index": idx,
333
+ "id": tool_calls_accum[idx]["id"],
334
+ "name": tool_calls_accum[idx]["name"],
335
+ "args": tool_calls_accum[idx]["arguments"],
336
+ })
337
+
338
+ # Create chunk message
339
+ chunk_message = AIMessageChunk(
340
+ content=content,
341
+ additional_kwargs=additional_kwargs,
342
+ tool_call_chunks=tool_call_chunks if tool_call_chunks else [],
343
+ )
344
+
345
+ # Add finish reason on last chunk
346
+ if choice.finish_reason:
347
+ chunk_message.response_metadata = {
348
+ "finish_reason": choice.finish_reason,
349
+ }
350
+
351
+ yield ChatGenerationChunk(message=chunk_message)
@@ -42,6 +42,7 @@ DEFAULT_SYSTEM_PROMPT = """You are an expert Python data scientist and Jupyter n
42
42
  - check_resource_tool: 대용량 파일/데이터프레임 작업 전 필수
43
43
  - read_file_tool: 대용량 파일은 limit=100으로 먼저 확인
44
44
  - jupyter_cell_tool: 차트 라벨은 영어로
45
+ - **KeyboardInterrupt 발생 시**: ask_user_tool로 중단 사유를 사용자에게 확인 (예: "코드 실행이 중단되었습니다. 중단 사유를 알려주시면 도움이 됩니다.")
45
46
  - **파일 수정 후**: diagnostics_tool로 오류 확인 필수
46
47
 
47
48
  # 사용자 입력 요청 [중요]
@@ -1082,6 +1082,11 @@ async def stream_agent(request: AgentRequest):
1082
1082
 
1083
1083
  # Handle AIMessage
1084
1084
  elif isinstance(last_message, AIMessage):
1085
+ # LLM Response separator for easy log reading
1086
+ print("\n" + "🔵" * 48, flush=True)
1087
+ print("=" * 96, flush=True)
1088
+ print(" ✨ LLM RESPONSE", flush=True)
1089
+ print("=" * 96, flush=True)
1085
1090
  logger.info(
1086
1091
  "SimpleAgent AIMessage content: %s",
1087
1092
  last_message.content or "",
@@ -1115,6 +1120,11 @@ async def stream_agent(request: AgentRequest):
1115
1120
  ensure_ascii=False,
1116
1121
  ),
1117
1122
  )
1123
+ # LLM Response end separator
1124
+ print("=" * 96, flush=True)
1125
+ print(" ✅ LLM RESPONSE END", flush=True)
1126
+ print("=" * 96, flush=True)
1127
+ print("🔵" * 48 + "\n", flush=True)
1118
1128
  last_finish_reason = (
1119
1129
  getattr(last_message, "response_metadata", {}) or {}
1120
1130
  ).get("finish_reason")
@@ -722,7 +722,7 @@
722
722
  "@mui/material": {},
723
723
  "react-markdown": {},
724
724
  "hdsp-agent": {
725
- "version": "2.0.19",
725
+ "version": "2.0.20",
726
726
  "singleton": true,
727
727
  "import": "/Users/a421721/Desktop/hdsp/hdsp_agent/extensions/jupyter/lib/index.js"
728
728
  }
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "hdsp-agent",
3
- "version": "2.0.19",
3
+ "version": "2.0.20",
4
4
  "description": "HDSP Agent JupyterLab Extension - Thin client for Agent Server",
5
5
  "keywords": [
6
6
  "jupyter",
@@ -132,7 +132,7 @@
132
132
  }
133
133
  },
134
134
  "_build": {
135
- "load": "static/remoteEntry.d686ab71eb65b5ef8f15.js",
135
+ "load": "static/remoteEntry.586bf5521d043cdd37b8.js",
136
136
  "extension": "./extension",
137
137
  "style": "./style"
138
138
  }