hdsp-jupyter-extension 2.0.22__py3-none-any.whl → 2.0.25__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- agent_server/context_providers/__init__.py +22 -0
- agent_server/context_providers/actions.py +45 -0
- agent_server/context_providers/base.py +231 -0
- agent_server/context_providers/file.py +316 -0
- agent_server/context_providers/processor.py +150 -0
- agent_server/langchain/models/gpt_oss_chat.py +51 -32
- agent_server/main.py +2 -1
- agent_server/routers/chat.py +61 -10
- agent_server/routers/context.py +168 -0
- agent_server/routers/langchain_agent.py +609 -182
- {hdsp_jupyter_extension-2.0.22.data → hdsp_jupyter_extension-2.0.25.data}/data/share/jupyter/labextensions/hdsp-agent/build_log.json +1 -1
- {hdsp_jupyter_extension-2.0.22.data → hdsp_jupyter_extension-2.0.25.data}/data/share/jupyter/labextensions/hdsp-agent/package.json +2 -2
- hdsp_jupyter_extension-2.0.22.data/data/share/jupyter/labextensions/hdsp-agent/static/frontend_styles_index_js.96745acc14125453fba8.js → hdsp_jupyter_extension-2.0.25.data/data/share/jupyter/labextensions/hdsp-agent/static/frontend_styles_index_js.b5e4416b4e07ec087aad.js +245 -121
- hdsp_jupyter_extension-2.0.25.data/data/share/jupyter/labextensions/hdsp-agent/static/frontend_styles_index_js.b5e4416b4e07ec087aad.js.map +1 -0
- jupyter_ext/labextension/static/lib_index_js.90f80cb80187de8c5ae5.js → hdsp_jupyter_extension-2.0.25.data/data/share/jupyter/labextensions/hdsp-agent/static/lib_index_js.67505497667f9c0a763d.js +589 -45
- hdsp_jupyter_extension-2.0.25.data/data/share/jupyter/labextensions/hdsp-agent/static/lib_index_js.67505497667f9c0a763d.js.map +1 -0
- hdsp_jupyter_extension-2.0.22.data/data/share/jupyter/labextensions/hdsp-agent/static/remoteEntry.8496e8475f1bd164669b.js → hdsp_jupyter_extension-2.0.25.data/data/share/jupyter/labextensions/hdsp-agent/static/remoteEntry.ffc2b4bc8e6cb300e1e1.js +3 -3
- jupyter_ext/labextension/static/remoteEntry.8496e8475f1bd164669b.js.map → hdsp_jupyter_extension-2.0.25.data/data/share/jupyter/labextensions/hdsp-agent/static/remoteEntry.ffc2b4bc8e6cb300e1e1.js.map +1 -1
- {hdsp_jupyter_extension-2.0.22.dist-info → hdsp_jupyter_extension-2.0.25.dist-info}/METADATA +1 -1
- {hdsp_jupyter_extension-2.0.22.dist-info → hdsp_jupyter_extension-2.0.25.dist-info}/RECORD +51 -45
- jupyter_ext/_version.py +1 -1
- jupyter_ext/handlers.py +29 -0
- jupyter_ext/labextension/build_log.json +1 -1
- jupyter_ext/labextension/package.json +2 -2
- jupyter_ext/labextension/static/{frontend_styles_index_js.96745acc14125453fba8.js → frontend_styles_index_js.b5e4416b4e07ec087aad.js} +245 -121
- jupyter_ext/labextension/static/frontend_styles_index_js.b5e4416b4e07ec087aad.js.map +1 -0
- hdsp_jupyter_extension-2.0.22.data/data/share/jupyter/labextensions/hdsp-agent/static/lib_index_js.90f80cb80187de8c5ae5.js → jupyter_ext/labextension/static/lib_index_js.67505497667f9c0a763d.js +589 -45
- jupyter_ext/labextension/static/lib_index_js.67505497667f9c0a763d.js.map +1 -0
- jupyter_ext/labextension/static/{remoteEntry.8496e8475f1bd164669b.js → remoteEntry.ffc2b4bc8e6cb300e1e1.js} +3 -3
- hdsp_jupyter_extension-2.0.22.data/data/share/jupyter/labextensions/hdsp-agent/static/remoteEntry.8496e8475f1bd164669b.js.map → jupyter_ext/labextension/static/remoteEntry.ffc2b4bc8e6cb300e1e1.js.map +1 -1
- hdsp_jupyter_extension-2.0.22.data/data/share/jupyter/labextensions/hdsp-agent/static/frontend_styles_index_js.96745acc14125453fba8.js.map +0 -1
- hdsp_jupyter_extension-2.0.22.data/data/share/jupyter/labextensions/hdsp-agent/static/lib_index_js.90f80cb80187de8c5ae5.js.map +0 -1
- jupyter_ext/labextension/static/frontend_styles_index_js.96745acc14125453fba8.js.map +0 -1
- jupyter_ext/labextension/static/lib_index_js.90f80cb80187de8c5ae5.js.map +0 -1
- {hdsp_jupyter_extension-2.0.22.data → hdsp_jupyter_extension-2.0.25.data}/data/etc/jupyter/jupyter_server_config.d/hdsp_jupyter_extension.json +0 -0
- {hdsp_jupyter_extension-2.0.22.data → hdsp_jupyter_extension-2.0.25.data}/data/share/jupyter/labextensions/hdsp-agent/install.json +0 -0
- {hdsp_jupyter_extension-2.0.22.data → hdsp_jupyter_extension-2.0.25.data}/data/share/jupyter/labextensions/hdsp-agent/static/node_modules_emotion_use-insertion-effect-with-fallbacks_dist_emotion-use-insertion-effect-wi-3ba6b80.c095373419d05e6f141a.js +0 -0
- {hdsp_jupyter_extension-2.0.22.data → hdsp_jupyter_extension-2.0.25.data}/data/share/jupyter/labextensions/hdsp-agent/static/node_modules_emotion_use-insertion-effect-with-fallbacks_dist_emotion-use-insertion-effect-wi-3ba6b80.c095373419d05e6f141a.js.map +0 -0
- {hdsp_jupyter_extension-2.0.22.data → hdsp_jupyter_extension-2.0.25.data}/data/share/jupyter/labextensions/hdsp-agent/static/node_modules_emotion_use-insertion-effect-with-fallbacks_dist_emotion-use-insertion-effect-wi-3ba6b81.61e75fb98ecff46cf836.js +0 -0
- {hdsp_jupyter_extension-2.0.22.data → hdsp_jupyter_extension-2.0.25.data}/data/share/jupyter/labextensions/hdsp-agent/static/node_modules_emotion_use-insertion-effect-with-fallbacks_dist_emotion-use-insertion-effect-wi-3ba6b81.61e75fb98ecff46cf836.js.map +0 -0
- {hdsp_jupyter_extension-2.0.22.data → hdsp_jupyter_extension-2.0.25.data}/data/share/jupyter/labextensions/hdsp-agent/static/style.js +0 -0
- {hdsp_jupyter_extension-2.0.22.data → hdsp_jupyter_extension-2.0.25.data}/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_babel_runtime_helpers_esm_extends_js-node_modules_emotion_serialize_dist-051195.e2553aab0c3963b83dd7.js +0 -0
- {hdsp_jupyter_extension-2.0.22.data → hdsp_jupyter_extension-2.0.25.data}/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_babel_runtime_helpers_esm_extends_js-node_modules_emotion_serialize_dist-051195.e2553aab0c3963b83dd7.js.map +0 -0
- {hdsp_jupyter_extension-2.0.22.data → hdsp_jupyter_extension-2.0.25.data}/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_emotion_cache_dist_emotion-cache_browser_development_esm_js.24edcc52a1c014a8a5f0.js +0 -0
- {hdsp_jupyter_extension-2.0.22.data → hdsp_jupyter_extension-2.0.25.data}/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_emotion_cache_dist_emotion-cache_browser_development_esm_js.24edcc52a1c014a8a5f0.js.map +0 -0
- {hdsp_jupyter_extension-2.0.22.data → hdsp_jupyter_extension-2.0.25.data}/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_emotion_react_dist_emotion-react_browser_development_esm_js.19ecf6babe00caff6b8a.js +0 -0
- {hdsp_jupyter_extension-2.0.22.data → hdsp_jupyter_extension-2.0.25.data}/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_emotion_react_dist_emotion-react_browser_development_esm_js.19ecf6babe00caff6b8a.js.map +0 -0
- {hdsp_jupyter_extension-2.0.22.data → hdsp_jupyter_extension-2.0.25.data}/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_emotion_styled_dist_emotion-styled_browser_development_esm_js.661fb5836f4978a7c6e1.js +0 -0
- {hdsp_jupyter_extension-2.0.22.data → hdsp_jupyter_extension-2.0.25.data}/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_emotion_styled_dist_emotion-styled_browser_development_esm_js.661fb5836f4978a7c6e1.js.map +0 -0
- {hdsp_jupyter_extension-2.0.22.data → hdsp_jupyter_extension-2.0.25.data}/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_mui_material_index_js.985697e0162d8d088ca2.js +0 -0
- {hdsp_jupyter_extension-2.0.22.data → hdsp_jupyter_extension-2.0.25.data}/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_mui_material_index_js.985697e0162d8d088ca2.js.map +0 -0
- {hdsp_jupyter_extension-2.0.22.data → hdsp_jupyter_extension-2.0.25.data}/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_mui_material_utils_createSvgIcon_js.1f5038488cdfd8b3a85d.js +0 -0
- {hdsp_jupyter_extension-2.0.22.data → hdsp_jupyter_extension-2.0.25.data}/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_mui_material_utils_createSvgIcon_js.1f5038488cdfd8b3a85d.js.map +0 -0
- {hdsp_jupyter_extension-2.0.22.dist-info → hdsp_jupyter_extension-2.0.25.dist-info}/WHEEL +0 -0
- {hdsp_jupyter_extension-2.0.22.dist-info → hdsp_jupyter_extension-2.0.25.dist-info}/licenses/LICENSE +0 -0
|
@@ -0,0 +1,150 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Context Processor
|
|
3
|
+
|
|
4
|
+
Processes context commands in user input, injecting file contents and other
|
|
5
|
+
context into the LLM prompt.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
import logging
|
|
9
|
+
from typing import Optional
|
|
10
|
+
|
|
11
|
+
from .base import BaseContextProvider, ContextProviderException, find_commands
|
|
12
|
+
from .file import FileContextProvider
|
|
13
|
+
|
|
14
|
+
logger = logging.getLogger(__name__)
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
class ContextProcessor:
|
|
18
|
+
"""
|
|
19
|
+
Processes context commands (@file, etc.) in user input.
|
|
20
|
+
|
|
21
|
+
This class:
|
|
22
|
+
1. Finds context commands in the user's message
|
|
23
|
+
2. Builds context content from the commands
|
|
24
|
+
3. Cleans the original message by replacing commands with references
|
|
25
|
+
"""
|
|
26
|
+
|
|
27
|
+
def __init__(self, base_dir: str = "."):
|
|
28
|
+
"""
|
|
29
|
+
Initialize the context processor.
|
|
30
|
+
|
|
31
|
+
Args:
|
|
32
|
+
base_dir: Base directory for resolving relative file paths
|
|
33
|
+
"""
|
|
34
|
+
self.base_dir = base_dir
|
|
35
|
+
self.providers: dict[str, BaseContextProvider] = {
|
|
36
|
+
"@file": FileContextProvider(base_dir=base_dir),
|
|
37
|
+
}
|
|
38
|
+
|
|
39
|
+
def process_message(self, message: str) -> tuple[str, str, list[str]]:
|
|
40
|
+
"""
|
|
41
|
+
Process context commands in a message.
|
|
42
|
+
|
|
43
|
+
Args:
|
|
44
|
+
message: The user's message containing context commands
|
|
45
|
+
|
|
46
|
+
Returns:
|
|
47
|
+
Tuple of:
|
|
48
|
+
- context: Combined context string to prepend to the prompt
|
|
49
|
+
- cleaned_message: Message with commands replaced by references
|
|
50
|
+
- errors: List of error messages (if any commands failed)
|
|
51
|
+
"""
|
|
52
|
+
context_parts = []
|
|
53
|
+
errors = []
|
|
54
|
+
cleaned_message = message
|
|
55
|
+
|
|
56
|
+
# Process each provider
|
|
57
|
+
for provider_id, provider in self.providers.items():
|
|
58
|
+
commands = find_commands(provider, message)
|
|
59
|
+
|
|
60
|
+
for command in commands:
|
|
61
|
+
try:
|
|
62
|
+
# Generate context for this command
|
|
63
|
+
context = provider.make_context(command)
|
|
64
|
+
context_parts.append(context)
|
|
65
|
+
|
|
66
|
+
# Replace command in message
|
|
67
|
+
replacement = provider.replace_command(command)
|
|
68
|
+
cleaned_message = cleaned_message.replace(
|
|
69
|
+
command.cmd, replacement, 1
|
|
70
|
+
)
|
|
71
|
+
|
|
72
|
+
logger.info(f"Processed context command: {command.cmd}")
|
|
73
|
+
|
|
74
|
+
except ContextProviderException as e:
|
|
75
|
+
errors.append(str(e))
|
|
76
|
+
logger.warning(f"Context command failed: {command.cmd} - {e}")
|
|
77
|
+
|
|
78
|
+
# Combine all context
|
|
79
|
+
combined_context = "\n\n".join(context_parts) if context_parts else ""
|
|
80
|
+
|
|
81
|
+
return combined_context, cleaned_message, errors
|
|
82
|
+
|
|
83
|
+
def has_context_commands(self, message: str) -> bool:
|
|
84
|
+
"""
|
|
85
|
+
Check if a message contains any context commands.
|
|
86
|
+
|
|
87
|
+
Args:
|
|
88
|
+
message: The message to check
|
|
89
|
+
|
|
90
|
+
Returns:
|
|
91
|
+
True if message contains context commands
|
|
92
|
+
"""
|
|
93
|
+
for provider in self.providers.values():
|
|
94
|
+
if find_commands(provider, message):
|
|
95
|
+
return True
|
|
96
|
+
return False
|
|
97
|
+
|
|
98
|
+
def build_prompt_with_context(
|
|
99
|
+
self,
|
|
100
|
+
message: str,
|
|
101
|
+
system_context: Optional[str] = None,
|
|
102
|
+
) -> tuple[str, list[str]]:
|
|
103
|
+
"""
|
|
104
|
+
Build a complete prompt with injected context.
|
|
105
|
+
|
|
106
|
+
Args:
|
|
107
|
+
message: The user's message
|
|
108
|
+
system_context: Optional additional system context
|
|
109
|
+
|
|
110
|
+
Returns:
|
|
111
|
+
Tuple of:
|
|
112
|
+
- prompt: Complete prompt with context
|
|
113
|
+
- errors: List of any errors that occurred
|
|
114
|
+
"""
|
|
115
|
+
context, cleaned_message, errors = self.process_message(message)
|
|
116
|
+
|
|
117
|
+
# Build the final prompt
|
|
118
|
+
parts = []
|
|
119
|
+
|
|
120
|
+
if context:
|
|
121
|
+
parts.append("### Provided Context ###")
|
|
122
|
+
parts.append(context)
|
|
123
|
+
parts.append("### End of Context ###\n")
|
|
124
|
+
|
|
125
|
+
if system_context:
|
|
126
|
+
parts.append(system_context)
|
|
127
|
+
|
|
128
|
+
parts.append(cleaned_message)
|
|
129
|
+
|
|
130
|
+
prompt = "\n\n".join(parts)
|
|
131
|
+
|
|
132
|
+
return prompt, errors
|
|
133
|
+
|
|
134
|
+
|
|
135
|
+
def process_context_commands(
|
|
136
|
+
message: str,
|
|
137
|
+
base_dir: str = ".",
|
|
138
|
+
) -> tuple[str, str, list[str]]:
|
|
139
|
+
"""
|
|
140
|
+
Convenience function to process context commands.
|
|
141
|
+
|
|
142
|
+
Args:
|
|
143
|
+
message: User's message
|
|
144
|
+
base_dir: Base directory for file resolution
|
|
145
|
+
|
|
146
|
+
Returns:
|
|
147
|
+
Tuple of (context, cleaned_message, errors)
|
|
148
|
+
"""
|
|
149
|
+
processor = ContextProcessor(base_dir=base_dir)
|
|
150
|
+
return processor.process_message(message)
|
|
@@ -98,7 +98,9 @@ class ChatGPTOSS(BaseChatModel):
|
|
|
98
98
|
"function": {
|
|
99
99
|
"name": tool.name,
|
|
100
100
|
"description": tool.description or "",
|
|
101
|
-
"parameters": tool.args_schema.schema()
|
|
101
|
+
"parameters": tool.args_schema.schema()
|
|
102
|
+
if tool.args_schema
|
|
103
|
+
else {"type": "object", "properties": {}},
|
|
102
104
|
},
|
|
103
105
|
}
|
|
104
106
|
formatted_tools.append(tool_schema)
|
|
@@ -116,6 +118,7 @@ class ChatGPTOSS(BaseChatModel):
|
|
|
116
118
|
temperature=self.temperature,
|
|
117
119
|
max_tokens=self.max_tokens,
|
|
118
120
|
streaming=self.streaming,
|
|
121
|
+
callbacks=self.callbacks, # Preserve callbacks (e.g., LLMTraceLogger)
|
|
119
122
|
)
|
|
120
123
|
new_instance._tools = formatted_tools
|
|
121
124
|
new_instance._tool_choice = tool_choice
|
|
@@ -131,15 +134,19 @@ class ChatGPTOSS(BaseChatModel):
|
|
|
131
134
|
for msg in messages:
|
|
132
135
|
if isinstance(msg, SystemMessage):
|
|
133
136
|
# Convert system to developer for gpt-oss instruction hierarchy
|
|
134
|
-
result.append(
|
|
135
|
-
|
|
136
|
-
|
|
137
|
-
|
|
137
|
+
result.append(
|
|
138
|
+
{
|
|
139
|
+
"role": "developer",
|
|
140
|
+
"content": msg.content,
|
|
141
|
+
}
|
|
142
|
+
)
|
|
138
143
|
elif isinstance(msg, HumanMessage):
|
|
139
|
-
result.append(
|
|
140
|
-
|
|
141
|
-
|
|
142
|
-
|
|
144
|
+
result.append(
|
|
145
|
+
{
|
|
146
|
+
"role": "user",
|
|
147
|
+
"content": msg.content,
|
|
148
|
+
}
|
|
149
|
+
)
|
|
143
150
|
elif isinstance(msg, AIMessage):
|
|
144
151
|
ai_msg: Dict[str, Any] = {
|
|
145
152
|
"role": "assistant",
|
|
@@ -154,25 +161,31 @@ class ChatGPTOSS(BaseChatModel):
|
|
|
154
161
|
"type": "function",
|
|
155
162
|
"function": {
|
|
156
163
|
"name": tc["name"],
|
|
157
|
-
"arguments": json.dumps(tc["args"])
|
|
164
|
+
"arguments": json.dumps(tc["args"])
|
|
165
|
+
if isinstance(tc["args"], dict)
|
|
166
|
+
else tc["args"],
|
|
158
167
|
},
|
|
159
168
|
}
|
|
160
169
|
for tc in tool_calls
|
|
161
170
|
]
|
|
162
171
|
result.append(ai_msg)
|
|
163
172
|
elif isinstance(msg, ToolMessage):
|
|
164
|
-
result.append(
|
|
165
|
-
|
|
166
|
-
|
|
167
|
-
|
|
168
|
-
|
|
173
|
+
result.append(
|
|
174
|
+
{
|
|
175
|
+
"role": "tool",
|
|
176
|
+
"tool_call_id": msg.tool_call_id,
|
|
177
|
+
"content": msg.content,
|
|
178
|
+
}
|
|
179
|
+
)
|
|
169
180
|
else:
|
|
170
181
|
# Fallback for other message types
|
|
171
182
|
role = getattr(msg, "role", "user")
|
|
172
|
-
result.append(
|
|
173
|
-
|
|
174
|
-
|
|
175
|
-
|
|
183
|
+
result.append(
|
|
184
|
+
{
|
|
185
|
+
"role": role,
|
|
186
|
+
"content": msg.content,
|
|
187
|
+
}
|
|
188
|
+
)
|
|
176
189
|
|
|
177
190
|
return result
|
|
178
191
|
|
|
@@ -204,12 +217,14 @@ class ChatGPTOSS(BaseChatModel):
|
|
|
204
217
|
args = json.loads(tc.function.arguments)
|
|
205
218
|
except json.JSONDecodeError:
|
|
206
219
|
args = {"raw": tc.function.arguments}
|
|
207
|
-
tool_calls_list.append(
|
|
208
|
-
|
|
209
|
-
|
|
210
|
-
|
|
211
|
-
|
|
212
|
-
|
|
220
|
+
tool_calls_list.append(
|
|
221
|
+
{
|
|
222
|
+
"name": tc.function.name,
|
|
223
|
+
"args": args,
|
|
224
|
+
"id": tc.id,
|
|
225
|
+
"type": "tool_call",
|
|
226
|
+
}
|
|
227
|
+
)
|
|
213
228
|
|
|
214
229
|
ai_message = AIMessage(
|
|
215
230
|
content=content,
|
|
@@ -260,7 +275,9 @@ class ChatGPTOSS(BaseChatModel):
|
|
|
260
275
|
request_kwargs["tool_choice"] = self._tool_choice
|
|
261
276
|
|
|
262
277
|
# Make API call
|
|
263
|
-
logger.debug(
|
|
278
|
+
logger.debug(
|
|
279
|
+
f"ChatGPTOSS request: model={self.model}, messages_count={len(openai_messages)}"
|
|
280
|
+
)
|
|
264
281
|
response = self.client.chat.completions.create(**request_kwargs)
|
|
265
282
|
|
|
266
283
|
return self._create_chat_result(response)
|
|
@@ -328,12 +345,14 @@ class ChatGPTOSS(BaseChatModel):
|
|
|
328
345
|
tool_calls_accum[idx]["arguments"] += tc.function.arguments
|
|
329
346
|
|
|
330
347
|
# Build tool call chunk for LangChain
|
|
331
|
-
tool_call_chunks.append(
|
|
332
|
-
|
|
333
|
-
|
|
334
|
-
|
|
335
|
-
|
|
336
|
-
|
|
348
|
+
tool_call_chunks.append(
|
|
349
|
+
{
|
|
350
|
+
"index": idx,
|
|
351
|
+
"id": tool_calls_accum[idx]["id"],
|
|
352
|
+
"name": tool_calls_accum[idx]["name"],
|
|
353
|
+
"args": tool_calls_accum[idx]["arguments"],
|
|
354
|
+
}
|
|
355
|
+
)
|
|
337
356
|
|
|
338
357
|
# Create chunk message
|
|
339
358
|
chunk_message = AIMessageChunk(
|
agent_server/main.py
CHANGED
|
@@ -14,7 +14,7 @@ from contextlib import asynccontextmanager
|
|
|
14
14
|
from fastapi import FastAPI
|
|
15
15
|
from fastapi.middleware.cors import CORSMiddleware
|
|
16
16
|
|
|
17
|
-
from agent_server.routers import agent, chat, config, file_resolver, health, rag
|
|
17
|
+
from agent_server.routers import agent, chat, config, context, file_resolver, health, rag
|
|
18
18
|
|
|
19
19
|
# Optional LangChain router (requires langchain dependencies)
|
|
20
20
|
try:
|
|
@@ -158,6 +158,7 @@ app.include_router(agent.router, prefix="/agent", tags=["Agent"])
|
|
|
158
158
|
app.include_router(chat.router, prefix="/chat", tags=["Chat"])
|
|
159
159
|
app.include_router(rag.router, prefix="/rag", tags=["RAG"])
|
|
160
160
|
app.include_router(file_resolver.router, prefix="/file", tags=["File Resolution"])
|
|
161
|
+
app.include_router(context.router, prefix="/context", tags=["Context Providers"])
|
|
161
162
|
|
|
162
163
|
# Register LangChain agent router if available
|
|
163
164
|
if LANGCHAIN_AVAILABLE:
|
agent_server/routers/chat.py
CHANGED
|
@@ -2,10 +2,12 @@
|
|
|
2
2
|
Chat Router - Chat and streaming endpoints
|
|
3
3
|
|
|
4
4
|
Handles conversational interactions with the LLM.
|
|
5
|
+
Supports @file context injection for including file contents in prompts.
|
|
5
6
|
"""
|
|
6
7
|
|
|
7
8
|
import json
|
|
8
9
|
import logging
|
|
10
|
+
import os
|
|
9
11
|
from typing import Any, AsyncGenerator, Dict
|
|
10
12
|
|
|
11
13
|
from fastapi import APIRouter, HTTPException
|
|
@@ -14,6 +16,7 @@ from hdsp_agent_core.managers.config_manager import ConfigManager
|
|
|
14
16
|
from hdsp_agent_core.managers.session_manager import get_session_manager
|
|
15
17
|
from hdsp_agent_core.models.chat import ChatRequest, ChatResponse
|
|
16
18
|
|
|
19
|
+
from agent_server.context_providers import ContextProcessor
|
|
17
20
|
from agent_server.core.llm_service import LLMService
|
|
18
21
|
|
|
19
22
|
router = APIRouter()
|
|
@@ -83,6 +86,7 @@ async def chat_message(request: ChatRequest) -> Dict[str, Any]:
|
|
|
83
86
|
"""
|
|
84
87
|
Send a chat message and get a response.
|
|
85
88
|
|
|
89
|
+
Supports @file context injection (e.g., "@file:path/to/file.py").
|
|
86
90
|
Maintains conversation context across messages using conversation ID.
|
|
87
91
|
"""
|
|
88
92
|
logger.info(f"Chat message received: {request.message[:100]}...")
|
|
@@ -104,25 +108,52 @@ async def chat_message(request: ChatRequest) -> Dict[str, Any]:
|
|
|
104
108
|
conversation_id = _get_or_create_conversation(request.conversationId)
|
|
105
109
|
|
|
106
110
|
# Build context from history
|
|
107
|
-
|
|
111
|
+
history_context = _build_context(conversation_id)
|
|
112
|
+
|
|
113
|
+
# Process @file and other context commands
|
|
114
|
+
base_dir = getattr(request, 'baseDir', None) or os.getcwd()
|
|
115
|
+
context_processor = ContextProcessor(base_dir=base_dir)
|
|
116
|
+
file_context, cleaned_message, context_errors = context_processor.process_message(
|
|
117
|
+
request.message
|
|
118
|
+
)
|
|
119
|
+
|
|
120
|
+
# Log any context processing errors
|
|
121
|
+
if context_errors:
|
|
122
|
+
logger.warning(f"Context processing errors: {context_errors}")
|
|
123
|
+
|
|
124
|
+
# Combine all context
|
|
125
|
+
all_context_parts = []
|
|
126
|
+
if file_context:
|
|
127
|
+
all_context_parts.append(file_context)
|
|
128
|
+
if history_context:
|
|
129
|
+
all_context_parts.append(history_context)
|
|
130
|
+
combined_context = "\n\n".join(all_context_parts) if all_context_parts else None
|
|
108
131
|
|
|
109
132
|
# Call LLM with client-provided config
|
|
110
133
|
llm_service = LLMService(config)
|
|
111
|
-
response = await llm_service.generate_response(
|
|
134
|
+
response = await llm_service.generate_response(
|
|
135
|
+
cleaned_message, context=combined_context
|
|
136
|
+
)
|
|
112
137
|
|
|
113
|
-
# Store messages
|
|
114
|
-
_store_messages(conversation_id,
|
|
138
|
+
# Store messages (use cleaned message for history)
|
|
139
|
+
_store_messages(conversation_id, cleaned_message, response)
|
|
115
140
|
|
|
116
141
|
# Get model info
|
|
117
142
|
provider = config.get("provider", "unknown")
|
|
118
143
|
model = config.get(provider, {}).get("model", "unknown")
|
|
119
144
|
|
|
120
|
-
|
|
145
|
+
result = {
|
|
121
146
|
"response": response,
|
|
122
147
|
"conversationId": conversation_id,
|
|
123
148
|
"model": f"{provider}/{model}",
|
|
124
149
|
}
|
|
125
150
|
|
|
151
|
+
# Include context errors if any
|
|
152
|
+
if context_errors:
|
|
153
|
+
result["contextErrors"] = context_errors
|
|
154
|
+
|
|
155
|
+
return result
|
|
156
|
+
|
|
126
157
|
except HTTPException:
|
|
127
158
|
raise
|
|
128
159
|
except Exception as e:
|
|
@@ -135,6 +166,7 @@ async def chat_stream(request: ChatRequest) -> StreamingResponse:
|
|
|
135
166
|
"""
|
|
136
167
|
Send a chat message and get a streaming response.
|
|
137
168
|
|
|
169
|
+
Supports @file context injection (e.g., "@file:path/to/file.py").
|
|
138
170
|
Returns Server-Sent Events (SSE) with partial responses.
|
|
139
171
|
"""
|
|
140
172
|
logger.info(f"Stream chat request: {request.message[:100]}...")
|
|
@@ -154,21 +186,40 @@ async def chat_stream(request: ChatRequest) -> StreamingResponse:
|
|
|
154
186
|
# Get or create conversation
|
|
155
187
|
conversation_id = _get_or_create_conversation(request.conversationId)
|
|
156
188
|
|
|
157
|
-
# Build context
|
|
158
|
-
|
|
189
|
+
# Build context from history
|
|
190
|
+
history_context = _build_context(conversation_id)
|
|
191
|
+
|
|
192
|
+
# Process @file and other context commands
|
|
193
|
+
base_dir = getattr(request, 'baseDir', None) or os.getcwd()
|
|
194
|
+
context_processor = ContextProcessor(base_dir=base_dir)
|
|
195
|
+
file_context, cleaned_message, context_errors = context_processor.process_message(
|
|
196
|
+
request.message
|
|
197
|
+
)
|
|
198
|
+
|
|
199
|
+
# Send context errors as a warning event
|
|
200
|
+
if context_errors:
|
|
201
|
+
yield f"data: {json.dumps({'warning': 'Context errors: ' + '; '.join(context_errors)})}\n\n"
|
|
202
|
+
|
|
203
|
+
# Combine all context
|
|
204
|
+
all_context_parts = []
|
|
205
|
+
if file_context:
|
|
206
|
+
all_context_parts.append(file_context)
|
|
207
|
+
if history_context:
|
|
208
|
+
all_context_parts.append(history_context)
|
|
209
|
+
combined_context = "\n\n".join(all_context_parts) if all_context_parts else None
|
|
159
210
|
|
|
160
211
|
# Stream LLM response with client-provided config
|
|
161
212
|
llm_service = LLMService(config)
|
|
162
213
|
full_response = ""
|
|
163
214
|
|
|
164
215
|
async for chunk in llm_service.generate_response_stream(
|
|
165
|
-
|
|
216
|
+
cleaned_message, context=combined_context
|
|
166
217
|
):
|
|
167
218
|
full_response += chunk
|
|
168
219
|
yield f"data: {json.dumps({'content': chunk, 'done': False})}\n\n"
|
|
169
220
|
|
|
170
|
-
# Store messages after streaming complete
|
|
171
|
-
_store_messages(conversation_id,
|
|
221
|
+
# Store messages after streaming complete (use cleaned message)
|
|
222
|
+
_store_messages(conversation_id, cleaned_message, full_response)
|
|
172
223
|
|
|
173
224
|
# Send final chunk with conversation ID
|
|
174
225
|
yield f"data: {json.dumps({'content': '', 'done': True, 'conversationId': conversation_id})}\n\n"
|
|
@@ -0,0 +1,168 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Context Router - Context provider autocomplete and resolution
|
|
3
|
+
|
|
4
|
+
Handles @file and other context command autocomplete and processing.
|
|
5
|
+
Inspired by jupyter-ai's autocomplete API.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
import logging
|
|
9
|
+
import os
|
|
10
|
+
from typing import Optional
|
|
11
|
+
|
|
12
|
+
from fastapi import APIRouter, Query
|
|
13
|
+
from pydantic import BaseModel
|
|
14
|
+
|
|
15
|
+
from agent_server.context_providers import FileContextProvider, ResetContextProvider
|
|
16
|
+
from agent_server.context_providers.base import ListOptionsResponse
|
|
17
|
+
|
|
18
|
+
router = APIRouter()
|
|
19
|
+
logger = logging.getLogger(__name__)
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
# All available providers (context and action)
|
|
23
|
+
def _get_all_providers(base_dir: str):
|
|
24
|
+
"""Get all available context/action providers."""
|
|
25
|
+
return {
|
|
26
|
+
"@file": FileContextProvider(base_dir=base_dir),
|
|
27
|
+
"@reset": ResetContextProvider(base_dir=base_dir),
|
|
28
|
+
}
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
class ContextOptionsRequest(BaseModel):
|
|
32
|
+
"""Request for context options."""
|
|
33
|
+
|
|
34
|
+
partial_command: Optional[str] = None # e.g., "@file:src/"
|
|
35
|
+
base_dir: Optional[str] = None # Base directory for file resolution
|
|
36
|
+
|
|
37
|
+
|
|
38
|
+
class FileContentRequest(BaseModel):
|
|
39
|
+
"""Request to get file content."""
|
|
40
|
+
|
|
41
|
+
filepath: str
|
|
42
|
+
base_dir: Optional[str] = None
|
|
43
|
+
|
|
44
|
+
|
|
45
|
+
class FileContentResponse(BaseModel):
|
|
46
|
+
"""Response with file content."""
|
|
47
|
+
|
|
48
|
+
content: str
|
|
49
|
+
filepath: str
|
|
50
|
+
error: Optional[str] = None
|
|
51
|
+
|
|
52
|
+
|
|
53
|
+
@router.get("/autocomplete", response_model=ListOptionsResponse)
|
|
54
|
+
async def get_autocomplete_options(
|
|
55
|
+
partial_command: Optional[str] = Query(None, alias="partialCommand"),
|
|
56
|
+
base_dir: Optional[str] = Query(None, alias="baseDir"),
|
|
57
|
+
) -> ListOptionsResponse:
|
|
58
|
+
"""
|
|
59
|
+
Get autocomplete options for context commands.
|
|
60
|
+
|
|
61
|
+
Query Parameters:
|
|
62
|
+
partialCommand: Partial command to complete
|
|
63
|
+
- "@" -> list all available commands
|
|
64
|
+
- "@fi" -> list commands starting with "@fi"
|
|
65
|
+
- "@file:" -> list files in current directory
|
|
66
|
+
- "@file:src/" -> list files in src directory
|
|
67
|
+
baseDir: Base directory for file resolution (default: cwd)
|
|
68
|
+
"""
|
|
69
|
+
base_directory = base_dir or os.getcwd()
|
|
70
|
+
providers = _get_all_providers(base_directory)
|
|
71
|
+
|
|
72
|
+
if not partial_command:
|
|
73
|
+
# Return all available context commands
|
|
74
|
+
options = [p.get_provider_option() for p in providers.values()]
|
|
75
|
+
return ListOptionsResponse(options=options)
|
|
76
|
+
|
|
77
|
+
# Check if it's just "@" or a partial command name (no colon)
|
|
78
|
+
if ":" not in partial_command:
|
|
79
|
+
# Filter providers that match the partial command
|
|
80
|
+
options = []
|
|
81
|
+
for cmd_id, provider in providers.items():
|
|
82
|
+
if cmd_id.startswith(partial_command):
|
|
83
|
+
options.append(provider.get_provider_option())
|
|
84
|
+
return ListOptionsResponse(options=options)
|
|
85
|
+
|
|
86
|
+
# Has colon - parse the partial command
|
|
87
|
+
cmd_id, _, arg_prefix = partial_command.partition(":")
|
|
88
|
+
|
|
89
|
+
# Find matching provider
|
|
90
|
+
if cmd_id in providers:
|
|
91
|
+
provider = providers[cmd_id]
|
|
92
|
+
if provider.requires_arg:
|
|
93
|
+
options = provider.get_arg_options(arg_prefix)
|
|
94
|
+
return ListOptionsResponse(options=options)
|
|
95
|
+
else:
|
|
96
|
+
# For commands without args (like @reset), return the command itself
|
|
97
|
+
return ListOptionsResponse(options=[provider.get_provider_option()])
|
|
98
|
+
|
|
99
|
+
# No matching provider
|
|
100
|
+
return ListOptionsResponse(options=[])
|
|
101
|
+
|
|
102
|
+
|
|
103
|
+
@router.post("/file/content", response_model=FileContentResponse)
|
|
104
|
+
async def get_file_content(request: FileContentRequest) -> FileContentResponse:
|
|
105
|
+
"""
|
|
106
|
+
Get the content of a file for context injection.
|
|
107
|
+
|
|
108
|
+
This is used when processing @file commands to include file contents
|
|
109
|
+
in the LLM prompt.
|
|
110
|
+
"""
|
|
111
|
+
try:
|
|
112
|
+
base_directory = request.base_dir or os.getcwd()
|
|
113
|
+
file_provider = FileContextProvider(base_dir=base_directory)
|
|
114
|
+
|
|
115
|
+
# Create a command object
|
|
116
|
+
from agent_server.context_providers.base import ContextCommand
|
|
117
|
+
|
|
118
|
+
cmd = ContextCommand(cmd=f"@file:{request.filepath}")
|
|
119
|
+
content = file_provider.make_context(cmd)
|
|
120
|
+
|
|
121
|
+
return FileContentResponse(
|
|
122
|
+
content=content,
|
|
123
|
+
filepath=request.filepath,
|
|
124
|
+
)
|
|
125
|
+
except Exception as e:
|
|
126
|
+
logger.error(f"Failed to read file {request.filepath}: {e}")
|
|
127
|
+
return FileContentResponse(
|
|
128
|
+
content="",
|
|
129
|
+
filepath=request.filepath,
|
|
130
|
+
error=str(e),
|
|
131
|
+
)
|
|
132
|
+
|
|
133
|
+
|
|
134
|
+
@router.get("/providers")
|
|
135
|
+
async def list_context_providers():
|
|
136
|
+
"""
|
|
137
|
+
List all available context providers.
|
|
138
|
+
|
|
139
|
+
Returns information about each context provider including:
|
|
140
|
+
- ID (e.g., "@file")
|
|
141
|
+
- Description
|
|
142
|
+
- Whether it requires an argument
|
|
143
|
+
- Whether it's an action command
|
|
144
|
+
"""
|
|
145
|
+
providers = [
|
|
146
|
+
{
|
|
147
|
+
"id": "@file",
|
|
148
|
+
"description": "Include file contents in the prompt",
|
|
149
|
+
"requires_arg": True,
|
|
150
|
+
"is_action": False,
|
|
151
|
+
"usage": "@file:path/to/file.py",
|
|
152
|
+
"examples": [
|
|
153
|
+
"@file:main.py",
|
|
154
|
+
"@file:src/utils.py",
|
|
155
|
+
"@file:'path with spaces/file.py'",
|
|
156
|
+
],
|
|
157
|
+
},
|
|
158
|
+
{
|
|
159
|
+
"id": "@reset",
|
|
160
|
+
"description": "Clear session and reset agent",
|
|
161
|
+
"requires_arg": False,
|
|
162
|
+
"is_action": True,
|
|
163
|
+
"usage": "@reset",
|
|
164
|
+
"examples": ["@reset"],
|
|
165
|
+
},
|
|
166
|
+
]
|
|
167
|
+
|
|
168
|
+
return {"providers": providers}
|