hdsp-jupyter-extension 2.0.28__py3-none-any.whl → 2.0.29__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (54) hide show
  1. agent_server/config/__init__.py +5 -0
  2. agent_server/config/server_config.py +213 -0
  3. agent_server/core/__init__.py +2 -2
  4. agent_server/core/llm_service.py +2 -3
  5. agent_server/main.py +4 -4
  6. agent_server/routers/agent.py +2 -2
  7. agent_server/routers/chat.py +31 -28
  8. agent_server/routers/config.py +8 -7
  9. agent_server/routers/langchain_agent.py +97 -79
  10. hdsp_agent_core/managers/config_manager.py +37 -87
  11. {hdsp_jupyter_extension-2.0.28.data → hdsp_jupyter_extension-2.0.29.data}/data/share/jupyter/labextensions/hdsp-agent/build_log.json +1 -1
  12. {hdsp_jupyter_extension-2.0.28.data → hdsp_jupyter_extension-2.0.29.data}/data/share/jupyter/labextensions/hdsp-agent/package.json +2 -2
  13. hdsp_jupyter_extension-2.0.28.data/data/share/jupyter/labextensions/hdsp-agent/static/frontend_styles_index_js.55727265b00191e68d9a.js → hdsp_jupyter_extension-2.0.29.data/data/share/jupyter/labextensions/hdsp-agent/static/frontend_styles_index_js.f2eca2f8fa682eb21f72.js +11 -12
  14. hdsp_jupyter_extension-2.0.29.data/data/share/jupyter/labextensions/hdsp-agent/static/frontend_styles_index_js.f2eca2f8fa682eb21f72.js.map +1 -0
  15. jupyter_ext/labextension/static/lib_index_js.df05d90f366bfd5fa023.js → hdsp_jupyter_extension-2.0.29.data/data/share/jupyter/labextensions/hdsp-agent/static/lib_index_js.cc0a7158a5e3de7f22f7.js +125 -949
  16. hdsp_jupyter_extension-2.0.29.data/data/share/jupyter/labextensions/hdsp-agent/static/lib_index_js.cc0a7158a5e3de7f22f7.js.map +1 -0
  17. hdsp_jupyter_extension-2.0.28.data/data/share/jupyter/labextensions/hdsp-agent/static/remoteEntry.08fce819ee32e9d25175.js → hdsp_jupyter_extension-2.0.29.data/data/share/jupyter/labextensions/hdsp-agent/static/remoteEntry.bfff374b5cc6a57e16d2.js +3 -3
  18. jupyter_ext/labextension/static/remoteEntry.08fce819ee32e9d25175.js.map → hdsp_jupyter_extension-2.0.29.data/data/share/jupyter/labextensions/hdsp-agent/static/remoteEntry.bfff374b5cc6a57e16d2.js.map +1 -1
  19. {hdsp_jupyter_extension-2.0.28.dist-info → hdsp_jupyter_extension-2.0.29.dist-info}/METADATA +1 -1
  20. {hdsp_jupyter_extension-2.0.28.dist-info → hdsp_jupyter_extension-2.0.29.dist-info}/RECORD +50 -48
  21. jupyter_ext/_version.py +1 -1
  22. jupyter_ext/labextension/build_log.json +1 -1
  23. jupyter_ext/labextension/package.json +2 -2
  24. jupyter_ext/labextension/static/{frontend_styles_index_js.55727265b00191e68d9a.js → frontend_styles_index_js.f2eca2f8fa682eb21f72.js} +11 -12
  25. jupyter_ext/labextension/static/frontend_styles_index_js.f2eca2f8fa682eb21f72.js.map +1 -0
  26. hdsp_jupyter_extension-2.0.28.data/data/share/jupyter/labextensions/hdsp-agent/static/lib_index_js.df05d90f366bfd5fa023.js → jupyter_ext/labextension/static/lib_index_js.cc0a7158a5e3de7f22f7.js +125 -949
  27. jupyter_ext/labextension/static/lib_index_js.cc0a7158a5e3de7f22f7.js.map +1 -0
  28. jupyter_ext/labextension/static/{remoteEntry.08fce819ee32e9d25175.js → remoteEntry.bfff374b5cc6a57e16d2.js} +3 -3
  29. hdsp_jupyter_extension-2.0.28.data/data/share/jupyter/labextensions/hdsp-agent/static/remoteEntry.08fce819ee32e9d25175.js.map → jupyter_ext/labextension/static/remoteEntry.bfff374b5cc6a57e16d2.js.map +1 -1
  30. hdsp_jupyter_extension-2.0.28.data/data/share/jupyter/labextensions/hdsp-agent/static/frontend_styles_index_js.55727265b00191e68d9a.js.map +0 -1
  31. hdsp_jupyter_extension-2.0.28.data/data/share/jupyter/labextensions/hdsp-agent/static/lib_index_js.df05d90f366bfd5fa023.js.map +0 -1
  32. jupyter_ext/labextension/static/frontend_styles_index_js.55727265b00191e68d9a.js.map +0 -1
  33. jupyter_ext/labextension/static/lib_index_js.df05d90f366bfd5fa023.js.map +0 -1
  34. {hdsp_jupyter_extension-2.0.28.data → hdsp_jupyter_extension-2.0.29.data}/data/etc/jupyter/jupyter_server_config.d/hdsp_jupyter_extension.json +0 -0
  35. {hdsp_jupyter_extension-2.0.28.data → hdsp_jupyter_extension-2.0.29.data}/data/share/jupyter/labextensions/hdsp-agent/install.json +0 -0
  36. {hdsp_jupyter_extension-2.0.28.data → hdsp_jupyter_extension-2.0.29.data}/data/share/jupyter/labextensions/hdsp-agent/static/node_modules_emotion_use-insertion-effect-with-fallbacks_dist_emotion-use-insertion-effect-wi-3ba6b80.c095373419d05e6f141a.js +0 -0
  37. {hdsp_jupyter_extension-2.0.28.data → hdsp_jupyter_extension-2.0.29.data}/data/share/jupyter/labextensions/hdsp-agent/static/node_modules_emotion_use-insertion-effect-with-fallbacks_dist_emotion-use-insertion-effect-wi-3ba6b80.c095373419d05e6f141a.js.map +0 -0
  38. {hdsp_jupyter_extension-2.0.28.data → hdsp_jupyter_extension-2.0.29.data}/data/share/jupyter/labextensions/hdsp-agent/static/node_modules_emotion_use-insertion-effect-with-fallbacks_dist_emotion-use-insertion-effect-wi-3ba6b81.61e75fb98ecff46cf836.js +0 -0
  39. {hdsp_jupyter_extension-2.0.28.data → hdsp_jupyter_extension-2.0.29.data}/data/share/jupyter/labextensions/hdsp-agent/static/node_modules_emotion_use-insertion-effect-with-fallbacks_dist_emotion-use-insertion-effect-wi-3ba6b81.61e75fb98ecff46cf836.js.map +0 -0
  40. {hdsp_jupyter_extension-2.0.28.data → hdsp_jupyter_extension-2.0.29.data}/data/share/jupyter/labextensions/hdsp-agent/static/style.js +0 -0
  41. {hdsp_jupyter_extension-2.0.28.data → hdsp_jupyter_extension-2.0.29.data}/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_babel_runtime_helpers_esm_extends_js-node_modules_emotion_serialize_dist-051195.e2553aab0c3963b83dd7.js +0 -0
  42. {hdsp_jupyter_extension-2.0.28.data → hdsp_jupyter_extension-2.0.29.data}/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_babel_runtime_helpers_esm_extends_js-node_modules_emotion_serialize_dist-051195.e2553aab0c3963b83dd7.js.map +0 -0
  43. {hdsp_jupyter_extension-2.0.28.data → hdsp_jupyter_extension-2.0.29.data}/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_emotion_cache_dist_emotion-cache_browser_development_esm_js.24edcc52a1c014a8a5f0.js +0 -0
  44. {hdsp_jupyter_extension-2.0.28.data → hdsp_jupyter_extension-2.0.29.data}/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_emotion_cache_dist_emotion-cache_browser_development_esm_js.24edcc52a1c014a8a5f0.js.map +0 -0
  45. {hdsp_jupyter_extension-2.0.28.data → hdsp_jupyter_extension-2.0.29.data}/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_emotion_react_dist_emotion-react_browser_development_esm_js.19ecf6babe00caff6b8a.js +0 -0
  46. {hdsp_jupyter_extension-2.0.28.data → hdsp_jupyter_extension-2.0.29.data}/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_emotion_react_dist_emotion-react_browser_development_esm_js.19ecf6babe00caff6b8a.js.map +0 -0
  47. {hdsp_jupyter_extension-2.0.28.data → hdsp_jupyter_extension-2.0.29.data}/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_emotion_styled_dist_emotion-styled_browser_development_esm_js.661fb5836f4978a7c6e1.js +0 -0
  48. {hdsp_jupyter_extension-2.0.28.data → hdsp_jupyter_extension-2.0.29.data}/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_emotion_styled_dist_emotion-styled_browser_development_esm_js.661fb5836f4978a7c6e1.js.map +0 -0
  49. {hdsp_jupyter_extension-2.0.28.data → hdsp_jupyter_extension-2.0.29.data}/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_mui_material_index_js.985697e0162d8d088ca2.js +0 -0
  50. {hdsp_jupyter_extension-2.0.28.data → hdsp_jupyter_extension-2.0.29.data}/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_mui_material_index_js.985697e0162d8d088ca2.js.map +0 -0
  51. {hdsp_jupyter_extension-2.0.28.data → hdsp_jupyter_extension-2.0.29.data}/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_mui_material_utils_createSvgIcon_js.1f5038488cdfd8b3a85d.js +0 -0
  52. {hdsp_jupyter_extension-2.0.28.data → hdsp_jupyter_extension-2.0.29.data}/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_mui_material_utils_createSvgIcon_js.1f5038488cdfd8b3a85d.js.map +0 -0
  53. {hdsp_jupyter_extension-2.0.28.dist-info → hdsp_jupyter_extension-2.0.29.dist-info}/WHEEL +0 -0
  54. {hdsp_jupyter_extension-2.0.28.dist-info → hdsp_jupyter_extension-2.0.29.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,5 @@
1
+ """Agent Server Configuration Module"""
2
+
3
+ from .server_config import ServerConfigManager, get_server_config_manager
4
+
5
+ __all__ = ["ServerConfigManager", "get_server_config_manager"]
@@ -0,0 +1,213 @@
1
+ """
2
+ Server Configuration Manager - Agent Server specific settings
3
+
4
+ This config manager handles all LLM and server settings.
5
+ Stored in ~/.hdsp_agent/server_config.json
6
+
7
+ This is separate from the client-side config which only stores agentServerUrl.
8
+ """
9
+
10
+ import json
11
+ import os
12
+ import tempfile
13
+ from pathlib import Path
14
+ from typing import Any, Dict
15
+
16
+
17
+ class ServerConfigManager:
18
+ """Manage Agent Server configuration persistence"""
19
+
20
+ _instance = None
21
+ _config_file = None
22
+
23
+ def __init__(self):
24
+ try:
25
+ # 1순위: 환경변수
26
+ config_dir = os.environ.get("HDSP_AGENT_CONFIG_DIR")
27
+
28
+ # 2순위: 홈 디렉토리 ~/.hdsp_agent
29
+ if not config_dir:
30
+ try:
31
+ config_dir = os.path.expanduser("~/.hdsp_agent")
32
+ except Exception:
33
+ config_dir = None
34
+
35
+ # 경로가 유효한지 체크하고, 없으면 생성 시도
36
+ if config_dir:
37
+ config_path = Path(config_dir)
38
+ try:
39
+ config_path.mkdir(parents=True, exist_ok=True)
40
+ self._config_file = config_path / "server_config.json"
41
+ except Exception as e:
42
+ print(f"Warning: Cannot write to {config_dir}: {e}")
43
+ self._config_file = None
44
+
45
+ # 3순위 (비상용): 쓰기 실패했거나 경로가 없으면 /tmp 사용
46
+ if not self._config_file:
47
+ tmp_dir = Path(tempfile.gettempdir()) / "hdsp_agent_server"
48
+ tmp_dir.mkdir(parents=True, exist_ok=True)
49
+ self._config_file = tmp_dir / "server_config.json"
50
+ print(f"Using temporary config path: {self._config_file}")
51
+
52
+ except Exception as e:
53
+ print(f"Critical Error in ServerConfigManager init: {e}")
54
+ self._config_file = Path("/tmp/hdsp_agent_server_config_fallback.json")
55
+
56
+ self._config = self._load_config()
57
+ print(f"ServerConfigManager initialized: {self._config_file}")
58
+
59
+ @classmethod
60
+ def get_instance(cls):
61
+ """Get singleton instance"""
62
+ if cls._instance is None:
63
+ cls._instance = ServerConfigManager()
64
+ return cls._instance
65
+
66
+ @classmethod
67
+ def reset_instance(cls):
68
+ """Reset singleton instance (for testing)"""
69
+ cls._instance = None
70
+
71
+ def _load_config(self) -> Dict[str, Any]:
72
+ """Load configuration from file"""
73
+ if not self._config_file.exists():
74
+ return self._default_config()
75
+
76
+ try:
77
+ with open(self._config_file, "r") as f:
78
+ loaded = json.load(f)
79
+ # Merge with defaults to ensure all keys exist
80
+ defaults = self._default_config()
81
+ defaults.update(loaded)
82
+ return defaults
83
+ except (json.JSONDecodeError, IOError) as e:
84
+ print(f"Error loading server config: {e}")
85
+ return self._default_config()
86
+
87
+ def _default_config(self) -> Dict[str, Any]:
88
+ """Get default server configuration"""
89
+ return {
90
+ # LLM Provider Settings
91
+ "provider": "gemini",
92
+ "gemini": {
93
+ "apiKey": "",
94
+ "apiKeys": [],
95
+ "model": "gemini-2.5-flash"
96
+ },
97
+ "openai": {
98
+ "apiKey": "",
99
+ "model": "gpt-4o"
100
+ },
101
+ "vllm": {
102
+ "endpoint": "https://openrouter.ai/api/v1",
103
+ "apiKey": "",
104
+ "model": "openai/gpt-4o",
105
+ "useResponsesApi": False
106
+ },
107
+ # Summarization LLM
108
+ "summarization": {
109
+ "enabled": False,
110
+ "provider": "gemini",
111
+ "model": None,
112
+ "endpoint": "",
113
+ "apiKey": ""
114
+ },
115
+ # Embedding
116
+ "embedding": {
117
+ "provider": "openai",
118
+ "model": "text-embedding-3-small",
119
+ "apiKey": "",
120
+ "endpoint": ""
121
+ },
122
+ # RAG
123
+ "rag": {
124
+ "qdrantUrl": "http://localhost:6333",
125
+ "collectionName": "hdsp_docs"
126
+ },
127
+ # Server Settings
128
+ "agentServerTimeout": 120.0,
129
+ "idleTimeout": 3600, # 1 hour default
130
+ # Prompts (None = use defaults)
131
+ "prompts": {
132
+ "planner": None,
133
+ "python_developer": None,
134
+ "researcher": None,
135
+ "athena_query": None
136
+ },
137
+ # Feature Toggles
138
+ "useResponsesApi": False
139
+ }
140
+
141
+ def get_config(self) -> Dict[str, Any]:
142
+ """Get current configuration"""
143
+ self._config = self._load_config()
144
+ return self._config.copy()
145
+
146
+ def save_config(self, config: Dict[str, Any]):
147
+ """Save configuration to file"""
148
+ self._config.update(config)
149
+ self._config_file.parent.mkdir(parents=True, exist_ok=True)
150
+
151
+ try:
152
+ with open(self._config_file, "w") as f:
153
+ json.dump(self._config, f, indent=2)
154
+ except IOError as e:
155
+ raise RuntimeError(f"Failed to save server config: {e}")
156
+
157
+ def get(self, key: str, default=None):
158
+ """Get specific config value"""
159
+ return self._config.get(key, default)
160
+
161
+ def set(self, key: str, value: Any):
162
+ """Set specific config value"""
163
+ self._config[key] = value
164
+ self.save_config(self._config)
165
+
166
+ def update_config(self, updates: Dict[str, Any]):
167
+ """Update configuration with partial updates (deep merge for nested dicts)"""
168
+ for key, value in updates.items():
169
+ if (
170
+ key in self._config
171
+ and isinstance(self._config[key], dict)
172
+ and isinstance(value, dict)
173
+ ):
174
+ self._config[key].update(value)
175
+ else:
176
+ self._config[key] = value
177
+ self.save_config(self._config)
178
+
179
+ def get_admin_config(self) -> Dict[str, Any]:
180
+ """Get all server configuration (for admin API)"""
181
+ config = self.get_config()
182
+ # Return everything except internal keys
183
+ return config.copy()
184
+
185
+ def update_admin_config(self, updates: Dict[str, Any]):
186
+ """Update server configuration"""
187
+ self.update_config(updates)
188
+
189
+ def get_user_config(self) -> Dict[str, Any]:
190
+ """Get user configuration.
191
+
192
+ NOTE: User preferences (workspaceRoot, autoApprove) are now stored
193
+ in browser localStorage. This method returns defaults for backward
194
+ compatibility with the API.
195
+ """
196
+ return {
197
+ "workspaceRoot": "",
198
+ "temperature": 0.7,
199
+ "autoApprove": False
200
+ }
201
+
202
+ def update_user_config(self, updates: Dict[str, Any]):
203
+ """Update user configuration.
204
+
205
+ NOTE: User preferences are stored in browser localStorage.
206
+ This is a no-op for backward compatibility.
207
+ """
208
+ pass # No-op: user config is in localStorage
209
+
210
+
211
+ def get_server_config_manager() -> ServerConfigManager:
212
+ """Get the singleton ServerConfigManager instance"""
213
+ return ServerConfigManager.get_instance()
@@ -2,7 +2,7 @@
2
2
  Core services for HDSP Agent Server
3
3
  """
4
4
 
5
- from hdsp_agent_core.managers.config_manager import ConfigManager
5
+ from agent_server.config import ServerConfigManager
6
6
  from hdsp_agent_core.managers.session_manager import (
7
7
  ChatMessage,
8
8
  Session,
@@ -47,7 +47,7 @@ from .state_verifier import (
47
47
  from .summary_generator import SummaryGenerator, TaskType, get_summary_generator
48
48
 
49
49
  __all__ = [
50
- "ConfigManager",
50
+ "ServerConfigManager",
51
51
  "LLMClient",
52
52
  "LLMService",
53
53
  "PromptBuilder",
@@ -28,11 +28,10 @@ class LLMService:
28
28
  return self._key_manager
29
29
  if self.provider == "gemini":
30
30
  try:
31
- from hdsp_agent_core.managers.config_manager import ConfigManager
32
-
31
+ from agent_server.config import ServerConfigManager
33
32
  from agent_server.core.api_key_manager import get_key_manager
34
33
 
35
- return get_key_manager(ConfigManager.get_instance())
34
+ return get_key_manager(ServerConfigManager.get_instance())
36
35
  except ImportError:
37
36
  # Fallback for standalone usage
38
37
  return None
agent_server/main.py CHANGED
@@ -95,12 +95,12 @@ async def _legacy_startup():
95
95
  logger.info("Using legacy startup (hdsp_agent_core not available)")
96
96
 
97
97
  try:
98
- from hdsp_agent_core.managers.config_manager import ConfigManager
98
+ from agent_server.config import ServerConfigManager
99
99
 
100
- ConfigManager.get_instance()
101
- logger.info("Configuration loaded successfully")
100
+ ServerConfigManager.get_instance()
101
+ logger.info("Server configuration loaded successfully")
102
102
  except Exception as e:
103
- logger.warning(f"Failed to load configuration: {e}")
103
+ logger.warning(f"Failed to load server configuration: {e}")
104
104
 
105
105
  # Initialize RAG system
106
106
  try:
@@ -11,7 +11,7 @@ from typing import Any, Dict, List
11
11
 
12
12
  from fastapi import APIRouter, HTTPException
13
13
  from hdsp_agent_core.knowledge.loader import get_knowledge_base, get_library_detector
14
- from hdsp_agent_core.managers.config_manager import ConfigManager
14
+ from agent_server.config import ServerConfigManager
15
15
  from hdsp_agent_core.models.agent import (
16
16
  PlanRequest,
17
17
  PlanResponse,
@@ -49,7 +49,7 @@ logger = logging.getLogger(__name__)
49
49
 
50
50
  def _get_config() -> Dict[str, Any]:
51
51
  """Get current configuration (fallback only)"""
52
- return ConfigManager.get_instance().get_config()
52
+ return ServerConfigManager.get_instance().get_config()
53
53
 
54
54
 
55
55
  def _build_llm_config(llm_config) -> Dict[str, Any]:
@@ -12,7 +12,7 @@ from typing import Any, AsyncGenerator, Dict, Optional
12
12
 
13
13
  from fastapi import APIRouter, HTTPException
14
14
  from fastapi.responses import StreamingResponse
15
- from hdsp_agent_core.managers.config_manager import ConfigManager
15
+ from agent_server.config import ServerConfigManager
16
16
  from hdsp_agent_core.managers.session_manager import ChatMessage, get_session_manager
17
17
  from hdsp_agent_core.models.chat import ChatRequest, ChatResponse
18
18
  from pydantic import BaseModel
@@ -105,39 +105,42 @@ logger = logging.getLogger(__name__)
105
105
 
106
106
 
107
107
  def _get_config() -> Dict[str, Any]:
108
- """Get current configuration (fallback only)"""
109
- return ConfigManager.get_instance().get_config()
108
+ """Get current configuration from server AdminConfig."""
109
+ return ServerConfigManager.get_instance().get_admin_config()
110
110
 
111
111
 
112
112
  def _build_llm_config(llm_config) -> Dict[str, Any]:
113
113
  """
114
- Build LLM config dict from client-provided LLMConfig.
115
- Falls back to server config if not provided.
114
+ Get LLM config from server AdminConfig.
115
+
116
+ NOTE: Client-provided llmConfig is IGNORED for LLM settings.
117
+ All LLM configuration (provider, API keys, models) comes from
118
+ the server's AdminConfig managed by administrators.
119
+
120
+ Args:
121
+ llm_config: Ignored (kept for backward compatibility)
122
+
123
+ Returns:
124
+ Dict with provider, gemini, openai, vllm from server config
116
125
  """
117
- if llm_config is None:
118
- return _get_config()
126
+ # Always use server config - ignore client-provided config
127
+ admin_config = ServerConfigManager.get_instance().get_admin_config()
119
128
 
120
- config = {"provider": llm_config.provider}
129
+ config = {"provider": admin_config.get("provider", "gemini")}
121
130
 
122
- if llm_config.gemini:
123
- config["gemini"] = {
124
- "apiKey": llm_config.gemini.apiKey,
125
- "model": llm_config.gemini.model,
126
- }
131
+ if admin_config.get("gemini"):
132
+ config["gemini"] = admin_config["gemini"]
127
133
 
128
- if llm_config.openai:
129
- config["openai"] = {
130
- "apiKey": llm_config.openai.apiKey,
131
- "model": llm_config.openai.model,
132
- }
134
+ if admin_config.get("openai"):
135
+ config["openai"] = admin_config["openai"]
133
136
 
134
- if llm_config.vllm:
135
- config["vllm"] = {
136
- "endpoint": llm_config.vllm.endpoint,
137
- "apiKey": llm_config.vllm.apiKey,
138
- "model": llm_config.vllm.model,
139
- }
137
+ if admin_config.get("vllm"):
138
+ config["vllm"] = admin_config["vllm"]
139
+
140
+ if admin_config.get("summarization"):
141
+ config["summarization"] = admin_config["summarization"]
140
142
 
143
+ logger.info("Using LLM config from server AdminConfig: provider=%s", config.get("provider"))
141
144
  return config
142
145
 
143
146
 
@@ -274,7 +277,7 @@ async def chat_message(request: ChatRequest) -> Dict[str, Any]:
274
277
  if not config or not config.get("provider"):
275
278
  raise HTTPException(
276
279
  status_code=400,
277
- detail="LLM not configured. Please provide llmConfig with API keys.",
280
+ detail="LLM not configured. Please configure LLM settings in Admin panel.",
278
281
  )
279
282
 
280
283
  # Get or create conversation
@@ -360,7 +363,7 @@ async def chat_stream(request: ChatRequest) -> StreamingResponse:
360
363
  config = _build_llm_config(request.llmConfig)
361
364
 
362
365
  if not config or not config.get("provider"):
363
- yield f"data: {json.dumps({'error': 'LLM not configured. Please provide llmConfig with API keys.'})}\n\n"
366
+ yield f"data: {json.dumps({'error': 'LLM not configured. Please configure LLM settings in Admin panel.'})}\n\n"
364
367
  return
365
368
 
366
369
  # Get or create conversation
@@ -485,8 +488,8 @@ async def compact_conversation(request: CompactRequest) -> CompactResponse:
485
488
  ]
486
489
  )
487
490
 
488
- # Create summarization LLM
489
- llm_config = request.llmConfig or _get_config()
491
+ # Create summarization LLM (always use server config)
492
+ llm_config = _get_config()
490
493
  summarization_llm = create_summarization_llm(llm_config)
491
494
 
492
495
  if not summarization_llm:
@@ -12,9 +12,10 @@ Provides:
12
12
  from typing import Any, Dict, Optional
13
13
 
14
14
  from fastapi import APIRouter, HTTPException
15
- from hdsp_agent_core.managers.config_manager import ConfigManager
16
15
  from pydantic import BaseModel
17
16
 
17
+ from agent_server.config import ServerConfigManager
18
+
18
19
  from agent_server.langchain.agent_prompts.athena_query_prompt import (
19
20
  ATHENA_QUERY_SYSTEM_PROMPT,
20
21
  )
@@ -67,7 +68,7 @@ async def get_config() -> ConfigResponse:
67
68
  Returns the current LLM provider settings (API keys are masked).
68
69
  """
69
70
  try:
70
- config_manager = ConfigManager.get_instance()
71
+ config_manager = ServerConfigManager.get_instance()
71
72
  config = config_manager.get_config()
72
73
 
73
74
  # Mask API keys for security
@@ -91,7 +92,7 @@ async def update_config(request: ConfigUpdateRequest) -> Dict[str, Any]:
91
92
  Updates LLM provider settings.
92
93
  """
93
94
  try:
94
- config_manager = ConfigManager.get_instance()
95
+ config_manager = ServerConfigManager.get_instance()
95
96
 
96
97
  # Build update dict from request
97
98
  updates = {}
@@ -162,7 +163,7 @@ async def get_admin_config() -> AdminConfigResponse:
162
163
  API keys are masked for security.
163
164
  """
164
165
  try:
165
- config_manager = ConfigManager.get_instance()
166
+ config_manager = ServerConfigManager.get_instance()
166
167
  config = config_manager.get_admin_config()
167
168
 
168
169
  # Mask API keys
@@ -207,7 +208,7 @@ async def update_admin_config(request: AdminConfigUpdateRequest) -> Dict[str, An
207
208
  Only updates provided fields.
208
209
  """
209
210
  try:
210
- config_manager = ConfigManager.get_instance()
211
+ config_manager = ServerConfigManager.get_instance()
211
212
 
212
213
  # Build update dict from request
213
214
  updates = {}
@@ -255,7 +256,7 @@ async def get_user_config() -> UserConfigResponse:
255
256
  Get user configuration (preferences).
256
257
  """
257
258
  try:
258
- config_manager = ConfigManager.get_instance()
259
+ config_manager = ServerConfigManager.get_instance()
259
260
  config = config_manager.get_user_config()
260
261
 
261
262
  return UserConfigResponse(
@@ -275,7 +276,7 @@ async def update_user_config(request: UserConfigUpdateRequest) -> Dict[str, Any]
275
276
  Only updates provided fields.
276
277
  """
277
278
  try:
278
- config_manager = ConfigManager.get_instance()
279
+ config_manager = ServerConfigManager.get_instance()
279
280
 
280
281
  # Build update dict from request
281
282
  updates = {}
@@ -34,8 +34,70 @@ from agent_server.langchain.middleware.subagent_events import (
34
34
  drain_subagent_events,
35
35
  drain_summarization_events,
36
36
  )
37
+ from agent_server.config import ServerConfigManager
37
38
 
38
39
  logger = logging.getLogger(__name__)
40
+
41
+
42
+ def _get_llm_config_from_server() -> Dict[str, Any]:
43
+ """
44
+ Load LLM configuration from server's AdminConfig.
45
+
46
+ This replaces client-provided llmConfig - all LLM settings now come from
47
+ the server configuration managed by administrators.
48
+
49
+ Returns:
50
+ Dict with provider, gemini, openai, vllm, prompts, etc.
51
+ """
52
+ config_manager = ServerConfigManager.get_instance()
53
+ admin_config = config_manager.get_admin_config()
54
+
55
+ config_dict = {
56
+ "provider": admin_config.get("provider", "gemini"),
57
+ }
58
+
59
+ # Copy LLM provider configs
60
+ if admin_config.get("gemini"):
61
+ config_dict["gemini"] = admin_config["gemini"]
62
+ if admin_config.get("openai"):
63
+ config_dict["openai"] = admin_config["openai"]
64
+ if admin_config.get("vllm"):
65
+ config_dict["vllm"] = admin_config["vllm"]
66
+ if admin_config.get("summarization"):
67
+ config_dict["summarization"] = admin_config["summarization"]
68
+
69
+ logger.info(
70
+ "Loaded LLM config from server: provider=%s",
71
+ config_dict.get("provider")
72
+ )
73
+ return config_dict
74
+
75
+
76
+ def _get_agent_prompts_from_server() -> Optional[Dict[str, str]]:
77
+ """
78
+ Load agent prompts from server's AdminConfig.
79
+
80
+ Returns:
81
+ Dict with planner, python_developer, researcher, athena_query prompts
82
+ or None if not configured.
83
+ """
84
+ config_manager = ServerConfigManager.get_instance()
85
+ admin_config = config_manager.get_admin_config()
86
+
87
+ prompts = admin_config.get("prompts", {})
88
+ if not prompts:
89
+ return None
90
+
91
+ agent_prompts = {
92
+ "planner": prompts.get("planner"),
93
+ "python_developer": prompts.get("python_developer"),
94
+ "researcher": prompts.get("researcher"),
95
+ "athena_query": prompts.get("athena_query"),
96
+ }
97
+ # Filter out None values
98
+ agent_prompts = {k: v for k, v in agent_prompts.items() if v}
99
+
100
+ return agent_prompts if agent_prompts else None
39
101
  router = APIRouter(prefix="/langchain", tags=["langchain-agent"])
40
102
 
41
103
  # Track cancelled thread IDs to stop ongoing executions
@@ -737,35 +799,24 @@ async def stream_agent(request: AgentRequest):
737
799
 
738
800
  async def event_generator():
739
801
  try:
740
- # Use simple agent with HITL
741
- provider = request.llmConfig.provider if request.llmConfig else "gemini"
802
+ # Load LLM config from server (not from client request)
803
+ config_dict = _get_llm_config_from_server()
804
+ provider = config_dict.get("provider", "gemini")
742
805
  model_name = None
743
- if request.llmConfig:
744
- if request.llmConfig.gemini:
745
- model_name = request.llmConfig.gemini.get("model")
746
- elif request.llmConfig.openai:
747
- model_name = request.llmConfig.openai.get("model")
748
- elif request.llmConfig.vllm:
749
- model_name = request.llmConfig.vllm.get("model")
750
- logger.info("SimpleAgent LLM provider=%s model=%s", provider, model_name)
751
- # Convert LLMConfig to dict
752
- config_dict = {
753
- "provider": request.llmConfig.provider
754
- if request.llmConfig
755
- else "gemini",
756
- }
757
- if request.llmConfig:
758
- if request.llmConfig.gemini:
759
- config_dict["gemini"] = request.llmConfig.gemini
760
- if request.llmConfig.openai:
761
- config_dict["openai"] = request.llmConfig.openai
762
- if request.llmConfig.vllm:
763
- config_dict["vllm"] = request.llmConfig.vllm
764
- if request.llmConfig.resource_context:
765
- config_dict["resource_context"] = request.llmConfig.resource_context
766
- system_prompt_override = (
767
- request.llmConfig.system_prompt if request.llmConfig else None
768
- )
806
+ if config_dict.get("gemini"):
807
+ model_name = config_dict["gemini"].get("model")
808
+ elif config_dict.get("openai"):
809
+ model_name = config_dict["openai"].get("model")
810
+ elif config_dict.get("vllm"):
811
+ model_name = config_dict["vllm"].get("model")
812
+ logger.info("SimpleAgent LLM provider=%s model=%s (from server config)", provider, model_name)
813
+
814
+ # resource_context can still come from client if provided
815
+ if request.llmConfig and request.llmConfig.resource_context:
816
+ config_dict["resource_context"] = request.llmConfig.resource_context
817
+
818
+ # system_prompt_override is no longer used from client
819
+ system_prompt_override = None
769
820
 
770
821
  # Get or create checkpointer for this thread
771
822
  is_existing_thread = thread_id in _simple_agent_checkpointers
@@ -790,20 +841,11 @@ async def stream_agent(request: AgentRequest):
790
841
 
791
842
  resolved_workspace_root = _resolve_workspace_root(request.workspaceRoot)
792
843
 
793
- # Get agent prompts for per-agent customization
794
- agent_prompts = None
795
- if request.llmConfig and request.llmConfig.agent_prompts:
796
- agent_prompts = {
797
- "planner": request.llmConfig.agent_prompts.planner,
798
- "python_developer": (
799
- request.llmConfig.agent_prompts.python_developer
800
- ),
801
- "researcher": request.llmConfig.agent_prompts.researcher,
802
- "athena_query": request.llmConfig.agent_prompts.athena_query,
803
- }
804
- agent_prompts = {k: v for k, v in agent_prompts.items() if v}
844
+ # Get agent prompts from server config
845
+ agent_prompts = _get_agent_prompts_from_server()
846
+ if agent_prompts:
805
847
  logger.info(
806
- "Using agentPrompts (%s)",
848
+ "Using agentPrompts from server config (%s)",
807
849
  list(agent_prompts.keys()),
808
850
  )
809
851
  # Don't use systemPrompt as override — use agentPrompts.planner instead
@@ -2110,24 +2152,16 @@ async def resume_agent(request: ResumeRequest):
2110
2152
 
2111
2153
  async def event_generator():
2112
2154
  try:
2113
- # Convert LLMConfig to dict
2114
- config_dict = {
2115
- "provider": request.llmConfig.provider
2116
- if request.llmConfig
2117
- else "gemini",
2118
- }
2119
- if request.llmConfig:
2120
- if request.llmConfig.gemini:
2121
- config_dict["gemini"] = request.llmConfig.gemini
2122
- if request.llmConfig.openai:
2123
- config_dict["openai"] = request.llmConfig.openai
2124
- if request.llmConfig.vllm:
2125
- config_dict["vllm"] = request.llmConfig.vllm
2126
- if request.llmConfig.resource_context:
2127
- config_dict["resource_context"] = request.llmConfig.resource_context
2128
- system_prompt_override = (
2129
- request.llmConfig.system_prompt if request.llmConfig else None
2130
- )
2155
+ # Load LLM config from server (not from client request)
2156
+ config_dict = _get_llm_config_from_server()
2157
+
2158
+ # resource_context can still come from client if provided
2159
+ if request.llmConfig and request.llmConfig.resource_context:
2160
+ config_dict["resource_context"] = request.llmConfig.resource_context
2161
+
2162
+ # system_prompt_override is no longer used from client
2163
+ system_prompt_override = None
2164
+
2131
2165
  # Get or create cached agent
2132
2166
  resolved_workspace_root = _resolve_workspace_root(request.workspaceRoot)
2133
2167
 
@@ -2153,29 +2187,13 @@ async def resume_agent(request: ResumeRequest):
2153
2187
 
2154
2188
  checkpointer = _simple_agent_checkpointers.get(request.threadId)
2155
2189
 
2156
- # Get agent prompts for per-agent customization
2157
- agent_prompts = None
2158
- if request.llmConfig and request.llmConfig.agent_prompts:
2159
- agent_prompts = {
2160
- "planner": request.llmConfig.agent_prompts.planner,
2161
- "python_developer": (
2162
- request.llmConfig.agent_prompts.python_developer
2163
- ),
2164
- "researcher": request.llmConfig.agent_prompts.researcher,
2165
- "athena_query": request.llmConfig.agent_prompts.athena_query,
2166
- }
2167
- agent_prompts = {k: v for k, v in agent_prompts.items() if v}
2190
+ # Get agent prompts from server config
2191
+ agent_prompts = _get_agent_prompts_from_server()
2192
+ if agent_prompts:
2168
2193
  logger.info(
2169
- "Resume: Using agentPrompts (%s)",
2194
+ "Resume: Using agentPrompts from server config (%s)",
2170
2195
  list(agent_prompts.keys()),
2171
2196
  )
2172
- # Don't use systemPrompt as override — use agentPrompts.planner instead
2173
- if system_prompt_override:
2174
- logger.info(
2175
- "Resume: Ignoring systemPrompt (len=%d)",
2176
- len(system_prompt_override),
2177
- )
2178
- system_prompt_override = None
2179
2197
 
2180
2198
  agent_cache_key = _get_agent_cache_key(
2181
2199
  llm_config=config_dict,