hdsp-jupyter-extension 2.0.23__py3-none-any.whl → 2.0.26__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (60) hide show
  1. agent_server/context_providers/__init__.py +22 -0
  2. agent_server/context_providers/actions.py +45 -0
  3. agent_server/context_providers/base.py +231 -0
  4. agent_server/context_providers/file.py +316 -0
  5. agent_server/context_providers/processor.py +150 -0
  6. agent_server/langchain/agent_factory.py +14 -14
  7. agent_server/langchain/agent_prompts/planner_prompt.py +13 -19
  8. agent_server/langchain/custom_middleware.py +73 -17
  9. agent_server/langchain/models/gpt_oss_chat.py +26 -13
  10. agent_server/langchain/prompts.py +11 -8
  11. agent_server/langchain/tools/jupyter_tools.py +43 -0
  12. agent_server/main.py +2 -1
  13. agent_server/routers/chat.py +61 -10
  14. agent_server/routers/context.py +168 -0
  15. agent_server/routers/langchain_agent.py +806 -203
  16. {hdsp_jupyter_extension-2.0.23.data → hdsp_jupyter_extension-2.0.26.data}/data/share/jupyter/labextensions/hdsp-agent/build_log.json +1 -1
  17. {hdsp_jupyter_extension-2.0.23.data → hdsp_jupyter_extension-2.0.26.data}/data/share/jupyter/labextensions/hdsp-agent/package.json +2 -2
  18. hdsp_jupyter_extension-2.0.23.data/data/share/jupyter/labextensions/hdsp-agent/static/frontend_styles_index_js.96745acc14125453fba8.js → hdsp_jupyter_extension-2.0.26.data/data/share/jupyter/labextensions/hdsp-agent/static/frontend_styles_index_js.b5e4416b4e07ec087aad.js +245 -121
  19. hdsp_jupyter_extension-2.0.26.data/data/share/jupyter/labextensions/hdsp-agent/static/frontend_styles_index_js.b5e4416b4e07ec087aad.js.map +1 -0
  20. jupyter_ext/labextension/static/lib_index_js.2d5ea542350862f7c531.js → hdsp_jupyter_extension-2.0.26.data/data/share/jupyter/labextensions/hdsp-agent/static/lib_index_js.67505497667f9c0a763d.js +583 -39
  21. hdsp_jupyter_extension-2.0.26.data/data/share/jupyter/labextensions/hdsp-agent/static/lib_index_js.67505497667f9c0a763d.js.map +1 -0
  22. hdsp_jupyter_extension-2.0.23.data/data/share/jupyter/labextensions/hdsp-agent/static/remoteEntry.f0127d8744730f2092c1.js → hdsp_jupyter_extension-2.0.26.data/data/share/jupyter/labextensions/hdsp-agent/static/remoteEntry.0fe2dcbbd176ee0efceb.js +3 -3
  23. hdsp_jupyter_extension-2.0.23.data/data/share/jupyter/labextensions/hdsp-agent/static/remoteEntry.f0127d8744730f2092c1.js.map → hdsp_jupyter_extension-2.0.26.data/data/share/jupyter/labextensions/hdsp-agent/static/remoteEntry.0fe2dcbbd176ee0efceb.js.map +1 -1
  24. {hdsp_jupyter_extension-2.0.23.dist-info → hdsp_jupyter_extension-2.0.26.dist-info}/METADATA +1 -1
  25. {hdsp_jupyter_extension-2.0.23.dist-info → hdsp_jupyter_extension-2.0.26.dist-info}/RECORD +56 -50
  26. jupyter_ext/_version.py +1 -1
  27. jupyter_ext/handlers.py +29 -0
  28. jupyter_ext/labextension/build_log.json +1 -1
  29. jupyter_ext/labextension/package.json +2 -2
  30. jupyter_ext/labextension/static/{frontend_styles_index_js.96745acc14125453fba8.js → frontend_styles_index_js.b5e4416b4e07ec087aad.js} +245 -121
  31. jupyter_ext/labextension/static/frontend_styles_index_js.b5e4416b4e07ec087aad.js.map +1 -0
  32. hdsp_jupyter_extension-2.0.23.data/data/share/jupyter/labextensions/hdsp-agent/static/lib_index_js.2d5ea542350862f7c531.js → jupyter_ext/labextension/static/lib_index_js.67505497667f9c0a763d.js +583 -39
  33. jupyter_ext/labextension/static/lib_index_js.67505497667f9c0a763d.js.map +1 -0
  34. jupyter_ext/labextension/static/{remoteEntry.f0127d8744730f2092c1.js → remoteEntry.0fe2dcbbd176ee0efceb.js} +3 -3
  35. jupyter_ext/labextension/static/{remoteEntry.f0127d8744730f2092c1.js.map → remoteEntry.0fe2dcbbd176ee0efceb.js.map} +1 -1
  36. hdsp_jupyter_extension-2.0.23.data/data/share/jupyter/labextensions/hdsp-agent/static/frontend_styles_index_js.96745acc14125453fba8.js.map +0 -1
  37. hdsp_jupyter_extension-2.0.23.data/data/share/jupyter/labextensions/hdsp-agent/static/lib_index_js.2d5ea542350862f7c531.js.map +0 -1
  38. jupyter_ext/labextension/static/frontend_styles_index_js.96745acc14125453fba8.js.map +0 -1
  39. jupyter_ext/labextension/static/lib_index_js.2d5ea542350862f7c531.js.map +0 -1
  40. {hdsp_jupyter_extension-2.0.23.data → hdsp_jupyter_extension-2.0.26.data}/data/etc/jupyter/jupyter_server_config.d/hdsp_jupyter_extension.json +0 -0
  41. {hdsp_jupyter_extension-2.0.23.data → hdsp_jupyter_extension-2.0.26.data}/data/share/jupyter/labextensions/hdsp-agent/install.json +0 -0
  42. {hdsp_jupyter_extension-2.0.23.data → hdsp_jupyter_extension-2.0.26.data}/data/share/jupyter/labextensions/hdsp-agent/static/node_modules_emotion_use-insertion-effect-with-fallbacks_dist_emotion-use-insertion-effect-wi-3ba6b80.c095373419d05e6f141a.js +0 -0
  43. {hdsp_jupyter_extension-2.0.23.data → hdsp_jupyter_extension-2.0.26.data}/data/share/jupyter/labextensions/hdsp-agent/static/node_modules_emotion_use-insertion-effect-with-fallbacks_dist_emotion-use-insertion-effect-wi-3ba6b80.c095373419d05e6f141a.js.map +0 -0
  44. {hdsp_jupyter_extension-2.0.23.data → hdsp_jupyter_extension-2.0.26.data}/data/share/jupyter/labextensions/hdsp-agent/static/node_modules_emotion_use-insertion-effect-with-fallbacks_dist_emotion-use-insertion-effect-wi-3ba6b81.61e75fb98ecff46cf836.js +0 -0
  45. {hdsp_jupyter_extension-2.0.23.data → hdsp_jupyter_extension-2.0.26.data}/data/share/jupyter/labextensions/hdsp-agent/static/node_modules_emotion_use-insertion-effect-with-fallbacks_dist_emotion-use-insertion-effect-wi-3ba6b81.61e75fb98ecff46cf836.js.map +0 -0
  46. {hdsp_jupyter_extension-2.0.23.data → hdsp_jupyter_extension-2.0.26.data}/data/share/jupyter/labextensions/hdsp-agent/static/style.js +0 -0
  47. {hdsp_jupyter_extension-2.0.23.data → hdsp_jupyter_extension-2.0.26.data}/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_babel_runtime_helpers_esm_extends_js-node_modules_emotion_serialize_dist-051195.e2553aab0c3963b83dd7.js +0 -0
  48. {hdsp_jupyter_extension-2.0.23.data → hdsp_jupyter_extension-2.0.26.data}/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_babel_runtime_helpers_esm_extends_js-node_modules_emotion_serialize_dist-051195.e2553aab0c3963b83dd7.js.map +0 -0
  49. {hdsp_jupyter_extension-2.0.23.data → hdsp_jupyter_extension-2.0.26.data}/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_emotion_cache_dist_emotion-cache_browser_development_esm_js.24edcc52a1c014a8a5f0.js +0 -0
  50. {hdsp_jupyter_extension-2.0.23.data → hdsp_jupyter_extension-2.0.26.data}/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_emotion_cache_dist_emotion-cache_browser_development_esm_js.24edcc52a1c014a8a5f0.js.map +0 -0
  51. {hdsp_jupyter_extension-2.0.23.data → hdsp_jupyter_extension-2.0.26.data}/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_emotion_react_dist_emotion-react_browser_development_esm_js.19ecf6babe00caff6b8a.js +0 -0
  52. {hdsp_jupyter_extension-2.0.23.data → hdsp_jupyter_extension-2.0.26.data}/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_emotion_react_dist_emotion-react_browser_development_esm_js.19ecf6babe00caff6b8a.js.map +0 -0
  53. {hdsp_jupyter_extension-2.0.23.data → hdsp_jupyter_extension-2.0.26.data}/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_emotion_styled_dist_emotion-styled_browser_development_esm_js.661fb5836f4978a7c6e1.js +0 -0
  54. {hdsp_jupyter_extension-2.0.23.data → hdsp_jupyter_extension-2.0.26.data}/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_emotion_styled_dist_emotion-styled_browser_development_esm_js.661fb5836f4978a7c6e1.js.map +0 -0
  55. {hdsp_jupyter_extension-2.0.23.data → hdsp_jupyter_extension-2.0.26.data}/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_mui_material_index_js.985697e0162d8d088ca2.js +0 -0
  56. {hdsp_jupyter_extension-2.0.23.data → hdsp_jupyter_extension-2.0.26.data}/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_mui_material_index_js.985697e0162d8d088ca2.js.map +0 -0
  57. {hdsp_jupyter_extension-2.0.23.data → hdsp_jupyter_extension-2.0.26.data}/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_mui_material_utils_createSvgIcon_js.1f5038488cdfd8b3a85d.js +0 -0
  58. {hdsp_jupyter_extension-2.0.23.data → hdsp_jupyter_extension-2.0.26.data}/data/share/jupyter/labextensions/hdsp-agent/static/vendors-node_modules_mui_material_utils_createSvgIcon_js.1f5038488cdfd8b3a85d.js.map +0 -0
  59. {hdsp_jupyter_extension-2.0.23.dist-info → hdsp_jupyter_extension-2.0.26.dist-info}/WHEEL +0 -0
  60. {hdsp_jupyter_extension-2.0.23.dist-info → hdsp_jupyter_extension-2.0.26.dist-info}/licenses/LICENSE +0 -0
@@ -2,10 +2,12 @@
2
2
  Chat Router - Chat and streaming endpoints
3
3
 
4
4
  Handles conversational interactions with the LLM.
5
+ Supports @file context injection for including file contents in prompts.
5
6
  """
6
7
 
7
8
  import json
8
9
  import logging
10
+ import os
9
11
  from typing import Any, AsyncGenerator, Dict
10
12
 
11
13
  from fastapi import APIRouter, HTTPException
@@ -14,6 +16,7 @@ from hdsp_agent_core.managers.config_manager import ConfigManager
14
16
  from hdsp_agent_core.managers.session_manager import get_session_manager
15
17
  from hdsp_agent_core.models.chat import ChatRequest, ChatResponse
16
18
 
19
+ from agent_server.context_providers import ContextProcessor
17
20
  from agent_server.core.llm_service import LLMService
18
21
 
19
22
  router = APIRouter()
@@ -83,6 +86,7 @@ async def chat_message(request: ChatRequest) -> Dict[str, Any]:
83
86
  """
84
87
  Send a chat message and get a response.
85
88
 
89
+ Supports @file context injection (e.g., "@file:path/to/file.py").
86
90
  Maintains conversation context across messages using conversation ID.
87
91
  """
88
92
  logger.info(f"Chat message received: {request.message[:100]}...")
@@ -104,25 +108,52 @@ async def chat_message(request: ChatRequest) -> Dict[str, Any]:
104
108
  conversation_id = _get_or_create_conversation(request.conversationId)
105
109
 
106
110
  # Build context from history
107
- context = _build_context(conversation_id)
111
+ history_context = _build_context(conversation_id)
112
+
113
+ # Process @file and other context commands
114
+ base_dir = getattr(request, 'baseDir', None) or os.getcwd()
115
+ context_processor = ContextProcessor(base_dir=base_dir)
116
+ file_context, cleaned_message, context_errors = context_processor.process_message(
117
+ request.message
118
+ )
119
+
120
+ # Log any context processing errors
121
+ if context_errors:
122
+ logger.warning(f"Context processing errors: {context_errors}")
123
+
124
+ # Combine all context
125
+ all_context_parts = []
126
+ if file_context:
127
+ all_context_parts.append(file_context)
128
+ if history_context:
129
+ all_context_parts.append(history_context)
130
+ combined_context = "\n\n".join(all_context_parts) if all_context_parts else None
108
131
 
109
132
  # Call LLM with client-provided config
110
133
  llm_service = LLMService(config)
111
- response = await llm_service.generate_response(request.message, context=context)
134
+ response = await llm_service.generate_response(
135
+ cleaned_message, context=combined_context
136
+ )
112
137
 
113
- # Store messages
114
- _store_messages(conversation_id, request.message, response)
138
+ # Store messages (use cleaned message for history)
139
+ _store_messages(conversation_id, cleaned_message, response)
115
140
 
116
141
  # Get model info
117
142
  provider = config.get("provider", "unknown")
118
143
  model = config.get(provider, {}).get("model", "unknown")
119
144
 
120
- return {
145
+ result = {
121
146
  "response": response,
122
147
  "conversationId": conversation_id,
123
148
  "model": f"{provider}/{model}",
124
149
  }
125
150
 
151
+ # Include context errors if any
152
+ if context_errors:
153
+ result["contextErrors"] = context_errors
154
+
155
+ return result
156
+
126
157
  except HTTPException:
127
158
  raise
128
159
  except Exception as e:
@@ -135,6 +166,7 @@ async def chat_stream(request: ChatRequest) -> StreamingResponse:
135
166
  """
136
167
  Send a chat message and get a streaming response.
137
168
 
169
+ Supports @file context injection (e.g., "@file:path/to/file.py").
138
170
  Returns Server-Sent Events (SSE) with partial responses.
139
171
  """
140
172
  logger.info(f"Stream chat request: {request.message[:100]}...")
@@ -154,21 +186,40 @@ async def chat_stream(request: ChatRequest) -> StreamingResponse:
154
186
  # Get or create conversation
155
187
  conversation_id = _get_or_create_conversation(request.conversationId)
156
188
 
157
- # Build context
158
- context = _build_context(conversation_id)
189
+ # Build context from history
190
+ history_context = _build_context(conversation_id)
191
+
192
+ # Process @file and other context commands
193
+ base_dir = getattr(request, 'baseDir', None) or os.getcwd()
194
+ context_processor = ContextProcessor(base_dir=base_dir)
195
+ file_context, cleaned_message, context_errors = context_processor.process_message(
196
+ request.message
197
+ )
198
+
199
+ # Send context errors as a warning event
200
+ if context_errors:
201
+ yield f"data: {json.dumps({'warning': 'Context errors: ' + '; '.join(context_errors)})}\n\n"
202
+
203
+ # Combine all context
204
+ all_context_parts = []
205
+ if file_context:
206
+ all_context_parts.append(file_context)
207
+ if history_context:
208
+ all_context_parts.append(history_context)
209
+ combined_context = "\n\n".join(all_context_parts) if all_context_parts else None
159
210
 
160
211
  # Stream LLM response with client-provided config
161
212
  llm_service = LLMService(config)
162
213
  full_response = ""
163
214
 
164
215
  async for chunk in llm_service.generate_response_stream(
165
- request.message, context=context
216
+ cleaned_message, context=combined_context
166
217
  ):
167
218
  full_response += chunk
168
219
  yield f"data: {json.dumps({'content': chunk, 'done': False})}\n\n"
169
220
 
170
- # Store messages after streaming complete
171
- _store_messages(conversation_id, request.message, full_response)
221
+ # Store messages after streaming complete (use cleaned message)
222
+ _store_messages(conversation_id, cleaned_message, full_response)
172
223
 
173
224
  # Send final chunk with conversation ID
174
225
  yield f"data: {json.dumps({'content': '', 'done': True, 'conversationId': conversation_id})}\n\n"
@@ -0,0 +1,168 @@
1
+ """
2
+ Context Router - Context provider autocomplete and resolution
3
+
4
+ Handles @file and other context command autocomplete and processing.
5
+ Inspired by jupyter-ai's autocomplete API.
6
+ """
7
+
8
+ import logging
9
+ import os
10
+ from typing import Optional
11
+
12
+ from fastapi import APIRouter, Query
13
+ from pydantic import BaseModel
14
+
15
+ from agent_server.context_providers import FileContextProvider, ResetContextProvider
16
+ from agent_server.context_providers.base import ListOptionsResponse
17
+
18
+ router = APIRouter()
19
+ logger = logging.getLogger(__name__)
20
+
21
+
22
+ # All available providers (context and action)
23
+ def _get_all_providers(base_dir: str):
24
+ """Get all available context/action providers."""
25
+ return {
26
+ "@file": FileContextProvider(base_dir=base_dir),
27
+ "@reset": ResetContextProvider(base_dir=base_dir),
28
+ }
29
+
30
+
31
+ class ContextOptionsRequest(BaseModel):
32
+ """Request for context options."""
33
+
34
+ partial_command: Optional[str] = None # e.g., "@file:src/"
35
+ base_dir: Optional[str] = None # Base directory for file resolution
36
+
37
+
38
+ class FileContentRequest(BaseModel):
39
+ """Request to get file content."""
40
+
41
+ filepath: str
42
+ base_dir: Optional[str] = None
43
+
44
+
45
+ class FileContentResponse(BaseModel):
46
+ """Response with file content."""
47
+
48
+ content: str
49
+ filepath: str
50
+ error: Optional[str] = None
51
+
52
+
53
+ @router.get("/autocomplete", response_model=ListOptionsResponse)
54
+ async def get_autocomplete_options(
55
+ partial_command: Optional[str] = Query(None, alias="partialCommand"),
56
+ base_dir: Optional[str] = Query(None, alias="baseDir"),
57
+ ) -> ListOptionsResponse:
58
+ """
59
+ Get autocomplete options for context commands.
60
+
61
+ Query Parameters:
62
+ partialCommand: Partial command to complete
63
+ - "@" -> list all available commands
64
+ - "@fi" -> list commands starting with "@fi"
65
+ - "@file:" -> list files in current directory
66
+ - "@file:src/" -> list files in src directory
67
+ baseDir: Base directory for file resolution (default: cwd)
68
+ """
69
+ base_directory = base_dir or os.getcwd()
70
+ providers = _get_all_providers(base_directory)
71
+
72
+ if not partial_command:
73
+ # Return all available context commands
74
+ options = [p.get_provider_option() for p in providers.values()]
75
+ return ListOptionsResponse(options=options)
76
+
77
+ # Check if it's just "@" or a partial command name (no colon)
78
+ if ":" not in partial_command:
79
+ # Filter providers that match the partial command
80
+ options = []
81
+ for cmd_id, provider in providers.items():
82
+ if cmd_id.startswith(partial_command):
83
+ options.append(provider.get_provider_option())
84
+ return ListOptionsResponse(options=options)
85
+
86
+ # Has colon - parse the partial command
87
+ cmd_id, _, arg_prefix = partial_command.partition(":")
88
+
89
+ # Find matching provider
90
+ if cmd_id in providers:
91
+ provider = providers[cmd_id]
92
+ if provider.requires_arg:
93
+ options = provider.get_arg_options(arg_prefix)
94
+ return ListOptionsResponse(options=options)
95
+ else:
96
+ # For commands without args (like @reset), return the command itself
97
+ return ListOptionsResponse(options=[provider.get_provider_option()])
98
+
99
+ # No matching provider
100
+ return ListOptionsResponse(options=[])
101
+
102
+
103
+ @router.post("/file/content", response_model=FileContentResponse)
104
+ async def get_file_content(request: FileContentRequest) -> FileContentResponse:
105
+ """
106
+ Get the content of a file for context injection.
107
+
108
+ This is used when processing @file commands to include file contents
109
+ in the LLM prompt.
110
+ """
111
+ try:
112
+ base_directory = request.base_dir or os.getcwd()
113
+ file_provider = FileContextProvider(base_dir=base_directory)
114
+
115
+ # Create a command object
116
+ from agent_server.context_providers.base import ContextCommand
117
+
118
+ cmd = ContextCommand(cmd=f"@file:{request.filepath}")
119
+ content = file_provider.make_context(cmd)
120
+
121
+ return FileContentResponse(
122
+ content=content,
123
+ filepath=request.filepath,
124
+ )
125
+ except Exception as e:
126
+ logger.error(f"Failed to read file {request.filepath}: {e}")
127
+ return FileContentResponse(
128
+ content="",
129
+ filepath=request.filepath,
130
+ error=str(e),
131
+ )
132
+
133
+
134
+ @router.get("/providers")
135
+ async def list_context_providers():
136
+ """
137
+ List all available context providers.
138
+
139
+ Returns information about each context provider including:
140
+ - ID (e.g., "@file")
141
+ - Description
142
+ - Whether it requires an argument
143
+ - Whether it's an action command
144
+ """
145
+ providers = [
146
+ {
147
+ "id": "@file",
148
+ "description": "Include file contents in the prompt",
149
+ "requires_arg": True,
150
+ "is_action": False,
151
+ "usage": "@file:path/to/file.py",
152
+ "examples": [
153
+ "@file:main.py",
154
+ "@file:src/utils.py",
155
+ "@file:'path with spaces/file.py'",
156
+ ],
157
+ },
158
+ {
159
+ "id": "@reset",
160
+ "description": "Clear session and reset agent",
161
+ "requires_arg": False,
162
+ "is_action": True,
163
+ "usage": "@reset",
164
+ "examples": ["@reset"],
165
+ },
166
+ ]
167
+
168
+ return {"providers": providers}