hdsp-jupyter-extension 2.0.3__py3-none-any.whl → 2.0.5__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- agent_server/langchain/agent.py +44 -37
- agent_server/routers/langchain_agent.py +49 -9
- hdsp_agent_core/models/common.py +8 -1
- {hdsp_jupyter_extension-2.0.3.data → hdsp_jupyter_extension-2.0.5.data}/data/share/jupyter/labextensions/hdsp-agent/build_log.json +1 -1
- {hdsp_jupyter_extension-2.0.3.data → hdsp_jupyter_extension-2.0.5.data}/data/share/jupyter/labextensions/hdsp-agent/package.json +2 -2
- hdsp_jupyter_extension-2.0.3.data/data/share/jupyter/labextensions/hdsp-agent/static/frontend_styles_index_js.634cf0ae0f3592d0882f.js → hdsp_jupyter_extension-2.0.5.data/data/share/jupyter/labextensions/hdsp-agent/static/frontend_styles_index_js.8cc4873c413ed56ff485.js +22 -4
- hdsp_jupyter_extension-2.0.5.data/data/share/jupyter/labextensions/hdsp-agent/static/frontend_styles_index_js.8cc4873c413ed56ff485.js.map +1 -0
- hdsp_jupyter_extension-2.0.3.data/data/share/jupyter/labextensions/hdsp-agent/static/lib_index_js.1366019c413f1d68467f.js → hdsp_jupyter_extension-2.0.5.data/data/share/jupyter/labextensions/hdsp-agent/static/lib_index_js.a223ea20056954479ae9.js +53 -5
- hdsp_jupyter_extension-2.0.5.data/data/share/jupyter/labextensions/hdsp-agent/static/lib_index_js.a223ea20056954479ae9.js.map +1 -0
- hdsp_jupyter_extension-2.0.3.data/data/share/jupyter/labextensions/hdsp-agent/static/remoteEntry.b6d91b150c0800bddfa4.js → hdsp_jupyter_extension-2.0.5.data/data/share/jupyter/labextensions/hdsp-agent/static/remoteEntry.37299706f55c6d46099d.js +3 -3
- jupyter_ext/labextension/static/remoteEntry.b6d91b150c0800bddfa4.js.map → hdsp_jupyter_extension-2.0.5.data/data/share/jupyter/labextensions/hdsp-agent/static/remoteEntry.37299706f55c6d46099d.js.map +1 -1
- {hdsp_jupyter_extension-2.0.3.dist-info → hdsp_jupyter_extension-2.0.5.dist-info}/METADATA +1 -1
- {hdsp_jupyter_extension-2.0.3.dist-info → hdsp_jupyter_extension-2.0.5.dist-info}/RECORD +43 -43
- jupyter_ext/_version.py +1 -1
- jupyter_ext/labextension/build_log.json +1 -1
- jupyter_ext/labextension/package.json +2 -2
- jupyter_ext/labextension/static/{frontend_styles_index_js.634cf0ae0f3592d0882f.js → frontend_styles_index_js.8cc4873c413ed56ff485.js} +22 -4
- jupyter_ext/labextension/static/frontend_styles_index_js.8cc4873c413ed56ff485.js.map +1 -0
- jupyter_ext/labextension/static/{lib_index_js.1366019c413f1d68467f.js → lib_index_js.a223ea20056954479ae9.js} +53 -5
- jupyter_ext/labextension/static/lib_index_js.a223ea20056954479ae9.js.map +1 -0
- jupyter_ext/labextension/static/{remoteEntry.b6d91b150c0800bddfa4.js → remoteEntry.37299706f55c6d46099d.js} +3 -3
- hdsp_jupyter_extension-2.0.3.data/data/share/jupyter/labextensions/hdsp-agent/static/remoteEntry.b6d91b150c0800bddfa4.js.map → jupyter_ext/labextension/static/remoteEntry.37299706f55c6d46099d.js.map +1 -1
- hdsp_jupyter_extension-2.0.3.data/data/share/jupyter/labextensions/hdsp-agent/static/frontend_styles_index_js.634cf0ae0f3592d0882f.js.map +0 -1
- hdsp_jupyter_extension-2.0.3.data/data/share/jupyter/labextensions/hdsp-agent/static/lib_index_js.1366019c413f1d68467f.js.map +0 -1
- jupyter_ext/labextension/static/frontend_styles_index_js.634cf0ae0f3592d0882f.js.map +0 -1
- jupyter_ext/labextension/static/lib_index_js.1366019c413f1d68467f.js.map +0 -1
- {hdsp_jupyter_extension-2.0.3.data → hdsp_jupyter_extension-2.0.5.data}/data/etc/jupyter/jupyter_server_config.d/hdsp_jupyter_extension.json +0 -0
- {hdsp_jupyter_extension-2.0.3.data → hdsp_jupyter_extension-2.0.5.data}/data/share/jupyter/labextensions/hdsp-agent/install.json +0 -0
- {hdsp_jupyter_extension-2.0.3.data → hdsp_jupyter_extension-2.0.5.data}/data/share/jupyter/labextensions/hdsp-agent/static/node_modules_emotion_use-insertion-effect-with-fallbacks_dist_emotion-use-insertion-effect-wi-3ba6b80.c095373419d05e6f141a.js +0 -0
- {hdsp_jupyter_extension-2.0.3.data → hdsp_jupyter_extension-2.0.5.data}/data/share/jupyter/labextensions/hdsp-agent/static/node_modules_emotion_use-insertion-effect-with-fallbacks_dist_emotion-use-insertion-effect-wi-3ba6b80.c095373419d05e6f141a.js.map +0 -0
- {hdsp_jupyter_extension-2.0.3.data → hdsp_jupyter_extension-2.0.5.data}/data/share/jupyter/labextensions/hdsp-agent/static/node_modules_emotion_use-insertion-effect-with-fallbacks_dist_emotion-use-insertion-effect-wi-3ba6b81.61e75fb98ecff46cf836.js +0 -0
- {hdsp_jupyter_extension-2.0.3.data → hdsp_jupyter_extension-2.0.5.data}/data/share/jupyter/labextensions/hdsp-agent/static/node_modules_emotion_use-insertion-effect-with-fallbacks_dist_emotion-use-insertion-effect-wi-3ba6b81.61e75fb98ecff46cf836.js.map +0 -0
- {hdsp_jupyter_extension-2.0.3.data → hdsp_jupyter_extension-2.0.5.data}/data/share/jupyter/labextensions/hdsp-agent/static/style.js +0 -0
- {hdsp_jupyter_extension-2.0.3.data → hdsp_jupyter_extension-2.0.5.data}/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_babel_runtime_helpers_esm_extends_js-node_modules_emotion_serialize_dist-051195.e2553aab0c3963b83dd7.js +0 -0
- {hdsp_jupyter_extension-2.0.3.data → hdsp_jupyter_extension-2.0.5.data}/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_babel_runtime_helpers_esm_extends_js-node_modules_emotion_serialize_dist-051195.e2553aab0c3963b83dd7.js.map +0 -0
- {hdsp_jupyter_extension-2.0.3.data → hdsp_jupyter_extension-2.0.5.data}/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_emotion_cache_dist_emotion-cache_browser_development_esm_js.24edcc52a1c014a8a5f0.js +0 -0
- {hdsp_jupyter_extension-2.0.3.data → hdsp_jupyter_extension-2.0.5.data}/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_emotion_cache_dist_emotion-cache_browser_development_esm_js.24edcc52a1c014a8a5f0.js.map +0 -0
- {hdsp_jupyter_extension-2.0.3.data → hdsp_jupyter_extension-2.0.5.data}/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_emotion_react_dist_emotion-react_browser_development_esm_js.19ecf6babe00caff6b8a.js +0 -0
- {hdsp_jupyter_extension-2.0.3.data → hdsp_jupyter_extension-2.0.5.data}/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_emotion_react_dist_emotion-react_browser_development_esm_js.19ecf6babe00caff6b8a.js.map +0 -0
- {hdsp_jupyter_extension-2.0.3.data → hdsp_jupyter_extension-2.0.5.data}/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_emotion_styled_dist_emotion-styled_browser_development_esm_js.661fb5836f4978a7c6e1.js +0 -0
- {hdsp_jupyter_extension-2.0.3.data → hdsp_jupyter_extension-2.0.5.data}/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_emotion_styled_dist_emotion-styled_browser_development_esm_js.661fb5836f4978a7c6e1.js.map +0 -0
- {hdsp_jupyter_extension-2.0.3.data → hdsp_jupyter_extension-2.0.5.data}/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_mui_material_index_js.985697e0162d8d088ca2.js +0 -0
- {hdsp_jupyter_extension-2.0.3.data → hdsp_jupyter_extension-2.0.5.data}/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_mui_material_index_js.985697e0162d8d088ca2.js.map +0 -0
- {hdsp_jupyter_extension-2.0.3.data → hdsp_jupyter_extension-2.0.5.data}/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_mui_material_utils_createSvgIcon_js.1f5038488cdfd8b3a85d.js +0 -0
- {hdsp_jupyter_extension-2.0.3.data → hdsp_jupyter_extension-2.0.5.data}/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_mui_material_utils_createSvgIcon_js.1f5038488cdfd8b3a85d.js.map +0 -0
- {hdsp_jupyter_extension-2.0.3.dist-info → hdsp_jupyter_extension-2.0.5.dist-info}/WHEEL +0 -0
- {hdsp_jupyter_extension-2.0.3.dist-info → hdsp_jupyter_extension-2.0.5.dist-info}/licenses/LICENSE +0 -0
agent_server/langchain/agent.py
CHANGED
|
@@ -20,6 +20,43 @@ from agent_server.langchain.tools import (
|
|
|
20
20
|
|
|
21
21
|
logger = logging.getLogger(__name__)
|
|
22
22
|
|
|
23
|
+
DEFAULT_SYSTEM_PROMPT = """You are an expert Python data scientist and Jupyter notebook assistant.
|
|
24
|
+
Your role is to help users with data analysis, visualization, and Python coding tasks in Jupyter notebooks.
|
|
25
|
+
|
|
26
|
+
## ⚠️ CRITICAL RULE: NEVER produce an empty response
|
|
27
|
+
|
|
28
|
+
You MUST ALWAYS call a tool in every response. After any tool result, you MUST:
|
|
29
|
+
1. Check your todo list - are there pending or in_progress items?
|
|
30
|
+
2. If YES → call the next appropriate tool (jupyter_cell_tool, markdown_tool, etc.)
|
|
31
|
+
3. If ALL todos are completed → call final_answer_tool with a summary
|
|
32
|
+
|
|
33
|
+
NEVER end your turn without calling a tool. NEVER produce an empty response.
|
|
34
|
+
|
|
35
|
+
## Available Tools
|
|
36
|
+
1. **jupyter_cell_tool**: Execute Python code in a new notebook cell
|
|
37
|
+
2. **markdown_tool**: Add a markdown explanation cell
|
|
38
|
+
3. **final_answer_tool**: Complete the task with a summary - REQUIRED when done
|
|
39
|
+
4. **read_file_tool**: Read file contents
|
|
40
|
+
5. **write_file_tool**: Write file contents
|
|
41
|
+
6. **list_files_tool**: List directory contents
|
|
42
|
+
7. **search_workspace_tool**: Search for patterns in workspace files
|
|
43
|
+
8. **search_notebook_cells_tool**: Search for patterns in notebook cells
|
|
44
|
+
9. **write_todos**: Create and update task list for complex multi-step tasks
|
|
45
|
+
|
|
46
|
+
## Mandatory Workflow
|
|
47
|
+
1. After EVERY tool result, immediately call the next tool
|
|
48
|
+
2. Continue until ALL todos show status: "completed"
|
|
49
|
+
3. ONLY THEN call final_answer_tool to summarize
|
|
50
|
+
4. If `!pip install` fails, use `!pip3 install` instead
|
|
51
|
+
5. For plots and charts, use English text only
|
|
52
|
+
|
|
53
|
+
## ❌ FORBIDDEN (will break the workflow)
|
|
54
|
+
- Producing an empty response (no tool call, no content)
|
|
55
|
+
- Stopping after any tool without calling the next tool
|
|
56
|
+
- Ending without calling final_answer_tool
|
|
57
|
+
- Leaving todos in "in_progress" or "pending" state without continuing
|
|
58
|
+
"""
|
|
59
|
+
|
|
23
60
|
|
|
24
61
|
def _create_llm(llm_config: Dict[str, Any]):
|
|
25
62
|
"""Create LangChain LLM from config"""
|
|
@@ -107,6 +144,7 @@ def create_simple_chat_agent(
|
|
|
107
144
|
enable_hitl: bool = True,
|
|
108
145
|
enable_todo_list: bool = True,
|
|
109
146
|
checkpointer: Optional[object] = None,
|
|
147
|
+
system_prompt_override: Optional[str] = None,
|
|
110
148
|
):
|
|
111
149
|
"""
|
|
112
150
|
Create a simple chat agent using LangChain's create_agent with Human-in-the-Loop.
|
|
@@ -642,43 +680,12 @@ NEVER end your response after calling write_todos - always continue with the nex
|
|
|
642
680
|
middleware.append(list_files_limit)
|
|
643
681
|
logger.info("Added ToolCallLimitMiddleware for write_todos and list_files_tool")
|
|
644
682
|
|
|
645
|
-
# System prompt for the agent
|
|
646
|
-
|
|
647
|
-
|
|
648
|
-
|
|
649
|
-
|
|
650
|
-
|
|
651
|
-
You MUST ALWAYS call a tool in every response. After any tool result, you MUST:
|
|
652
|
-
1. Check your todo list - are there pending or in_progress items?
|
|
653
|
-
2. If YES → call the next appropriate tool (jupyter_cell_tool, markdown_tool, etc.)
|
|
654
|
-
3. If ALL todos are completed → call final_answer_tool with a summary
|
|
655
|
-
|
|
656
|
-
NEVER end your turn without calling a tool. NEVER produce an empty response.
|
|
657
|
-
|
|
658
|
-
## Available Tools
|
|
659
|
-
1. **jupyter_cell_tool**: Execute Python code in a new notebook cell
|
|
660
|
-
2. **markdown_tool**: Add a markdown explanation cell
|
|
661
|
-
3. **final_answer_tool**: Complete the task with a summary - REQUIRED when done
|
|
662
|
-
4. **read_file_tool**: Read file contents
|
|
663
|
-
5. **write_file_tool**: Write file contents
|
|
664
|
-
6. **list_files_tool**: List directory contents
|
|
665
|
-
7. **search_workspace_tool**: Search for patterns in workspace files
|
|
666
|
-
8. **search_notebook_cells_tool**: Search for patterns in notebook cells
|
|
667
|
-
9. **write_todos**: Create and update task list for complex multi-step tasks
|
|
668
|
-
|
|
669
|
-
## Mandatory Workflow
|
|
670
|
-
1. After EVERY tool result, immediately call the next tool
|
|
671
|
-
2. Continue until ALL todos show status: "completed"
|
|
672
|
-
3. ONLY THEN call final_answer_tool to summarize
|
|
673
|
-
4. If `!pip install` fails, use `!pip3 install` instead
|
|
674
|
-
5. For plots and charts, use English text only
|
|
675
|
-
|
|
676
|
-
## ❌ FORBIDDEN (will break the workflow)
|
|
677
|
-
- Producing an empty response (no tool call, no content)
|
|
678
|
-
- Stopping after any tool without calling the next tool
|
|
679
|
-
- Ending without calling final_answer_tool
|
|
680
|
-
- Leaving todos in "in_progress" or "pending" state without continuing
|
|
681
|
-
"""
|
|
683
|
+
# System prompt for the agent (override applies only to LangChain agent)
|
|
684
|
+
if system_prompt_override and system_prompt_override.strip():
|
|
685
|
+
system_prompt = system_prompt_override.strip()
|
|
686
|
+
logger.info("SimpleChatAgent using custom system prompt override")
|
|
687
|
+
else:
|
|
688
|
+
system_prompt = DEFAULT_SYSTEM_PROMPT
|
|
682
689
|
|
|
683
690
|
logger.info("SimpleChatAgent system_prompt: %s", system_prompt)
|
|
684
691
|
|
|
@@ -33,10 +33,17 @@ router = APIRouter(prefix="/langchain", tags=["langchain-agent"])
|
|
|
33
33
|
class LLMConfig(BaseModel):
|
|
34
34
|
"""LLM configuration"""
|
|
35
35
|
|
|
36
|
+
model_config = ConfigDict(populate_by_name=True)
|
|
37
|
+
|
|
36
38
|
provider: str = Field(default="gemini", description="LLM provider")
|
|
37
39
|
gemini: Optional[Dict[str, Any]] = Field(default=None)
|
|
38
40
|
openai: Optional[Dict[str, Any]] = Field(default=None)
|
|
39
41
|
vllm: Optional[Dict[str, Any]] = Field(default=None)
|
|
42
|
+
system_prompt: Optional[str] = Field(
|
|
43
|
+
default=None,
|
|
44
|
+
alias="systemPrompt",
|
|
45
|
+
description="Override system prompt for LangChain agent",
|
|
46
|
+
)
|
|
40
47
|
|
|
41
48
|
|
|
42
49
|
class NotebookContext(BaseModel):
|
|
@@ -364,6 +371,9 @@ async def stream_agent(request: AgentRequest):
|
|
|
364
371
|
config_dict["openai"] = request.llmConfig.openai
|
|
365
372
|
if request.llmConfig.vllm:
|
|
366
373
|
config_dict["vllm"] = request.llmConfig.vllm
|
|
374
|
+
system_prompt_override = (
|
|
375
|
+
request.llmConfig.system_prompt if request.llmConfig else None
|
|
376
|
+
)
|
|
367
377
|
|
|
368
378
|
agent = create_simple_chat_agent(
|
|
369
379
|
llm_config=config_dict,
|
|
@@ -372,6 +382,7 @@ async def stream_agent(request: AgentRequest):
|
|
|
372
382
|
checkpointer=_simple_agent_checkpointers.setdefault(
|
|
373
383
|
thread_id, InMemorySaver()
|
|
374
384
|
),
|
|
385
|
+
system_prompt_override=system_prompt_override,
|
|
375
386
|
)
|
|
376
387
|
|
|
377
388
|
# Prepare config with thread_id
|
|
@@ -392,7 +403,7 @@ async def stream_agent(request: AgentRequest):
|
|
|
392
403
|
# Initial status: waiting for LLM
|
|
393
404
|
yield {
|
|
394
405
|
"event": "debug",
|
|
395
|
-
"data": json.dumps({"status": "🤔 LLM 응답 대기
|
|
406
|
+
"data": json.dumps({"status": "🤔 LLM 응답 대기 중"}),
|
|
396
407
|
}
|
|
397
408
|
|
|
398
409
|
for step in agent.stream(agent_input, config, stream_mode="values"):
|
|
@@ -406,7 +417,7 @@ async def stream_agent(request: AgentRequest):
|
|
|
406
417
|
|
|
407
418
|
yield {
|
|
408
419
|
"event": "debug",
|
|
409
|
-
"data": json.dumps({"status": "⏸️ 사용자 승인 대기
|
|
420
|
+
"data": json.dumps({"status": "⏸️ 사용자 승인 대기 중"}),
|
|
410
421
|
}
|
|
411
422
|
|
|
412
423
|
# Process interrupts
|
|
@@ -427,7 +438,8 @@ async def stream_agent(request: AgentRequest):
|
|
|
427
438
|
normalized_actions
|
|
428
439
|
)
|
|
429
440
|
|
|
430
|
-
|
|
441
|
+
total_actions = len(normalized_actions)
|
|
442
|
+
for idx, action in enumerate(normalized_actions):
|
|
431
443
|
yield {
|
|
432
444
|
"event": "interrupt",
|
|
433
445
|
"data": json.dumps(
|
|
@@ -436,6 +448,8 @@ async def stream_agent(request: AgentRequest):
|
|
|
436
448
|
"action": action.get("name", "unknown"),
|
|
437
449
|
"args": action.get("arguments", {}),
|
|
438
450
|
"description": action.get("description", ""),
|
|
451
|
+
"action_index": idx,
|
|
452
|
+
"total_actions": total_actions,
|
|
439
453
|
}
|
|
440
454
|
),
|
|
441
455
|
}
|
|
@@ -873,7 +887,7 @@ async def stream_agent(request: AgentRequest):
|
|
|
873
887
|
yield {
|
|
874
888
|
"event": "debug",
|
|
875
889
|
"data": json.dumps(
|
|
876
|
-
{"status": "🔄 Jupyter Cell로 변환
|
|
890
|
+
{"status": "🔄 Jupyter Cell로 변환 중"}
|
|
877
891
|
),
|
|
878
892
|
}
|
|
879
893
|
yield {
|
|
@@ -973,6 +987,9 @@ async def resume_agent(request: ResumeRequest):
|
|
|
973
987
|
config_dict["openai"] = request.llmConfig.openai
|
|
974
988
|
if request.llmConfig.vllm:
|
|
975
989
|
config_dict["vllm"] = request.llmConfig.vllm
|
|
990
|
+
system_prompt_override = (
|
|
991
|
+
request.llmConfig.system_prompt if request.llmConfig else None
|
|
992
|
+
)
|
|
976
993
|
|
|
977
994
|
# Create agent (will use same checkpointer)
|
|
978
995
|
agent = create_simple_chat_agent(
|
|
@@ -982,16 +999,36 @@ async def resume_agent(request: ResumeRequest):
|
|
|
982
999
|
checkpointer=_simple_agent_checkpointers.setdefault(
|
|
983
1000
|
request.threadId, InMemorySaver()
|
|
984
1001
|
),
|
|
1002
|
+
system_prompt_override=system_prompt_override,
|
|
985
1003
|
)
|
|
986
1004
|
|
|
987
1005
|
# Prepare config with thread_id
|
|
988
1006
|
config = {"configurable": {"thread_id": request.threadId}}
|
|
989
1007
|
|
|
990
1008
|
pending_actions = _simple_agent_pending_actions.get(request.threadId, [])
|
|
1009
|
+
num_pending = len(pending_actions)
|
|
1010
|
+
num_decisions = len(request.decisions)
|
|
1011
|
+
|
|
1012
|
+
# If user provides fewer decisions than pending actions,
|
|
1013
|
+
# reject remaining actions to avoid multiple state updates per step
|
|
1014
|
+
# This prevents "Can receive only one value per step" errors for todos
|
|
1015
|
+
decisions_to_process = list(request.decisions)
|
|
1016
|
+
if num_decisions < num_pending and num_decisions > 0:
|
|
1017
|
+
logger.info(
|
|
1018
|
+
f"Have {num_decisions} decision(s) but {num_pending} pending action(s). "
|
|
1019
|
+
f"Auto-rejecting remaining {num_pending - num_decisions} action(s)."
|
|
1020
|
+
)
|
|
1021
|
+
# Create reject decisions for remaining actions
|
|
1022
|
+
for i in range(num_pending - num_decisions):
|
|
1023
|
+
reject_decision = ResumeDecision(
|
|
1024
|
+
type="reject",
|
|
1025
|
+
feedback="Auto-rejected: only one action can be processed at a time"
|
|
1026
|
+
)
|
|
1027
|
+
decisions_to_process.append(reject_decision)
|
|
991
1028
|
|
|
992
1029
|
# Convert decisions to LangChain format
|
|
993
1030
|
langgraph_decisions = []
|
|
994
|
-
for index, decision in enumerate(
|
|
1031
|
+
for index, decision in enumerate(decisions_to_process):
|
|
995
1032
|
if decision.type == "approve":
|
|
996
1033
|
langgraph_decisions.append({"type": "approve"})
|
|
997
1034
|
elif decision.type == "edit":
|
|
@@ -1020,7 +1057,7 @@ async def resume_agent(request: ResumeRequest):
|
|
|
1020
1057
|
# Resume execution
|
|
1021
1058
|
yield {
|
|
1022
1059
|
"event": "debug",
|
|
1023
|
-
"data": json.dumps({"status": "▶️ 실행 재개
|
|
1060
|
+
"data": json.dumps({"status": "▶️ 실행 재개 중"}),
|
|
1024
1061
|
}
|
|
1025
1062
|
|
|
1026
1063
|
_simple_agent_pending_actions.pop(request.threadId, None)
|
|
@@ -1035,7 +1072,7 @@ async def resume_agent(request: ResumeRequest):
|
|
|
1035
1072
|
# Status: waiting for LLM response
|
|
1036
1073
|
yield {
|
|
1037
1074
|
"event": "debug",
|
|
1038
|
-
"data": json.dumps({"status": "🤔 LLM 응답 대기
|
|
1075
|
+
"data": json.dumps({"status": "🤔 LLM 응답 대기 중"}),
|
|
1039
1076
|
}
|
|
1040
1077
|
|
|
1041
1078
|
step_count = 0
|
|
@@ -1059,7 +1096,7 @@ async def resume_agent(request: ResumeRequest):
|
|
|
1059
1096
|
|
|
1060
1097
|
yield {
|
|
1061
1098
|
"event": "debug",
|
|
1062
|
-
"data": json.dumps({"status": "⏸️ 사용자 승인 대기
|
|
1099
|
+
"data": json.dumps({"status": "⏸️ 사용자 승인 대기 중"}),
|
|
1063
1100
|
}
|
|
1064
1101
|
|
|
1065
1102
|
for interrupt in interrupts:
|
|
@@ -1077,7 +1114,8 @@ async def resume_agent(request: ResumeRequest):
|
|
|
1077
1114
|
normalized_actions
|
|
1078
1115
|
)
|
|
1079
1116
|
|
|
1080
|
-
|
|
1117
|
+
total_actions = len(normalized_actions)
|
|
1118
|
+
for idx, action in enumerate(normalized_actions):
|
|
1081
1119
|
yield {
|
|
1082
1120
|
"event": "interrupt",
|
|
1083
1121
|
"data": json.dumps(
|
|
@@ -1086,6 +1124,8 @@ async def resume_agent(request: ResumeRequest):
|
|
|
1086
1124
|
"action": action.get("name", "unknown"),
|
|
1087
1125
|
"args": action.get("arguments", {}),
|
|
1088
1126
|
"description": action.get("description", ""),
|
|
1127
|
+
"action_index": idx,
|
|
1128
|
+
"total_actions": total_actions,
|
|
1089
1129
|
}
|
|
1090
1130
|
),
|
|
1091
1131
|
}
|
hdsp_agent_core/models/common.py
CHANGED
|
@@ -6,7 +6,7 @@ Shared data models used across all agent services.
|
|
|
6
6
|
|
|
7
7
|
from typing import Any, Dict, List, Optional
|
|
8
8
|
|
|
9
|
-
from pydantic import BaseModel, Field
|
|
9
|
+
from pydantic import BaseModel, ConfigDict, Field
|
|
10
10
|
|
|
11
11
|
|
|
12
12
|
class ToolCall(BaseModel):
|
|
@@ -87,9 +87,16 @@ class LLMConfig(BaseModel):
|
|
|
87
87
|
API keys are managed client-side and passed with each request.
|
|
88
88
|
"""
|
|
89
89
|
|
|
90
|
+
model_config = ConfigDict(populate_by_name=True)
|
|
91
|
+
|
|
90
92
|
provider: str = Field(
|
|
91
93
|
default="gemini", description="LLM provider (gemini, openai, vllm)"
|
|
92
94
|
)
|
|
93
95
|
gemini: Optional[GeminiConfig] = Field(default=None, description="Gemini config")
|
|
94
96
|
openai: Optional[OpenAIConfig] = Field(default=None, description="OpenAI config")
|
|
95
97
|
vllm: Optional[VLLMConfig] = Field(default=None, description="vLLM config")
|
|
98
|
+
system_prompt: Optional[str] = Field(
|
|
99
|
+
default=None,
|
|
100
|
+
alias="systemPrompt",
|
|
101
|
+
description="LangChain system prompt override",
|
|
102
|
+
)
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
{
|
|
2
2
|
"name": "hdsp-agent",
|
|
3
|
-
"version": "2.0.
|
|
3
|
+
"version": "2.0.5",
|
|
4
4
|
"description": "HDSP Agent JupyterLab Extension - Thin client for Agent Server",
|
|
5
5
|
"keywords": [
|
|
6
6
|
"jupyter",
|
|
@@ -126,7 +126,7 @@
|
|
|
126
126
|
}
|
|
127
127
|
},
|
|
128
128
|
"_build": {
|
|
129
|
-
"load": "static/remoteEntry.
|
|
129
|
+
"load": "static/remoteEntry.37299706f55c6d46099d.js",
|
|
130
130
|
"extension": "./extension",
|
|
131
131
|
"style": "./style"
|
|
132
132
|
}
|