kg-mcp 0.1.8__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- kg_mcp/__init__.py +5 -0
- kg_mcp/__main__.py +8 -0
- kg_mcp/cli/__init__.py +3 -0
- kg_mcp/cli/setup.py +1100 -0
- kg_mcp/cli/status.py +344 -0
- kg_mcp/codegraph/__init__.py +3 -0
- kg_mcp/codegraph/indexer.py +296 -0
- kg_mcp/codegraph/model.py +170 -0
- kg_mcp/config.py +83 -0
- kg_mcp/kg/__init__.py +3 -0
- kg_mcp/kg/apply_schema.py +93 -0
- kg_mcp/kg/ingest.py +253 -0
- kg_mcp/kg/neo4j.py +155 -0
- kg_mcp/kg/repo.py +756 -0
- kg_mcp/kg/retrieval.py +225 -0
- kg_mcp/kg/schema.cypher +176 -0
- kg_mcp/llm/__init__.py +4 -0
- kg_mcp/llm/client.py +291 -0
- kg_mcp/llm/prompts/__init__.py +8 -0
- kg_mcp/llm/prompts/extractor.py +84 -0
- kg_mcp/llm/prompts/linker.py +117 -0
- kg_mcp/llm/schemas.py +248 -0
- kg_mcp/main.py +195 -0
- kg_mcp/mcp/__init__.py +3 -0
- kg_mcp/mcp/change_schemas.py +140 -0
- kg_mcp/mcp/prompts.py +223 -0
- kg_mcp/mcp/resources.py +218 -0
- kg_mcp/mcp/tools.py +537 -0
- kg_mcp/security/__init__.py +3 -0
- kg_mcp/security/auth.py +121 -0
- kg_mcp/security/origin.py +112 -0
- kg_mcp/utils.py +100 -0
- kg_mcp-0.1.8.dist-info/METADATA +86 -0
- kg_mcp-0.1.8.dist-info/RECORD +36 -0
- kg_mcp-0.1.8.dist-info/WHEEL +4 -0
- kg_mcp-0.1.8.dist-info/entry_points.txt +4 -0
kg_mcp/mcp/tools.py
ADDED
|
@@ -0,0 +1,537 @@
|
|
|
1
|
+
"""
|
|
2
|
+
MCP Tool definitions for the Knowledge Graph Memory Server.
|
|
3
|
+
|
|
4
|
+
This module exposes ONLY 2 tools to AI agents:
|
|
5
|
+
- kg_autopilot: Call at the START of every task
|
|
6
|
+
- kg_track_changes: Call AFTER every file modification
|
|
7
|
+
|
|
8
|
+
All other functionality is internal and not exposed via MCP.
|
|
9
|
+
"""
|
|
10
|
+
|
|
11
|
+
import logging
|
|
12
|
+
from typing import Any, Dict, List, Optional
|
|
13
|
+
|
|
14
|
+
from mcp.server.fastmcp import FastMCP
|
|
15
|
+
|
|
16
|
+
from kg_mcp.kg.ingest import get_ingest_pipeline
|
|
17
|
+
from kg_mcp.kg.retrieval import get_context_builder
|
|
18
|
+
from kg_mcp.kg.repo import get_repository
|
|
19
|
+
from kg_mcp.utils import serialize_response
|
|
20
|
+
|
|
21
|
+
logger = logging.getLogger(__name__)
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
# =============================================================================
|
|
25
|
+
# INTERNAL HELPER FUNCTIONS (Not exposed via MCP)
|
|
26
|
+
# =============================================================================
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
async def _ingest_message(
|
|
30
|
+
project_id: str,
|
|
31
|
+
user_text: str,
|
|
32
|
+
files: Optional[List[str]] = None,
|
|
33
|
+
diff: Optional[str] = None,
|
|
34
|
+
symbols: Optional[List[str]] = None,
|
|
35
|
+
tags: Optional[List[str]] = None,
|
|
36
|
+
) -> Dict[str, Any]:
|
|
37
|
+
"""
|
|
38
|
+
Internal: Analyze and save a user request to the knowledge graph.
|
|
39
|
+
Called by kg_autopilot.
|
|
40
|
+
"""
|
|
41
|
+
logger.info(f"_ingest_message called for project {project_id}")
|
|
42
|
+
|
|
43
|
+
try:
|
|
44
|
+
pipeline = get_ingest_pipeline()
|
|
45
|
+
result = await pipeline.process_message(
|
|
46
|
+
project_id=project_id,
|
|
47
|
+
user_text=user_text,
|
|
48
|
+
files=files,
|
|
49
|
+
diff=diff,
|
|
50
|
+
symbols=symbols,
|
|
51
|
+
tags=tags,
|
|
52
|
+
)
|
|
53
|
+
return serialize_response(result)
|
|
54
|
+
except Exception as e:
|
|
55
|
+
logger.error(f"_ingest_message failed: {e}")
|
|
56
|
+
return {
|
|
57
|
+
"error": str(e),
|
|
58
|
+
"interaction_id": None,
|
|
59
|
+
"extracted": {},
|
|
60
|
+
"created_entities": {},
|
|
61
|
+
}
|
|
62
|
+
|
|
63
|
+
|
|
64
|
+
async def _context_pack(
|
|
65
|
+
project_id: str,
|
|
66
|
+
focus_goal_id: Optional[str] = None,
|
|
67
|
+
query: Optional[str] = None,
|
|
68
|
+
k_hops: int = 2,
|
|
69
|
+
) -> Dict[str, Any]:
|
|
70
|
+
"""
|
|
71
|
+
Internal: Build a comprehensive context pack from the knowledge graph.
|
|
72
|
+
Called by kg_autopilot.
|
|
73
|
+
"""
|
|
74
|
+
logger.info(f"_context_pack called for project {project_id}")
|
|
75
|
+
|
|
76
|
+
try:
|
|
77
|
+
builder = get_context_builder()
|
|
78
|
+
result = await builder.build_context_pack(
|
|
79
|
+
project_id=project_id,
|
|
80
|
+
focus_goal_id=focus_goal_id,
|
|
81
|
+
query=query,
|
|
82
|
+
k_hops=k_hops,
|
|
83
|
+
)
|
|
84
|
+
return serialize_response(result)
|
|
85
|
+
except Exception as e:
|
|
86
|
+
logger.error(f"_context_pack failed: {e}")
|
|
87
|
+
return {
|
|
88
|
+
"error": str(e),
|
|
89
|
+
"markdown": f"# Error\n\nFailed to build context: {e}",
|
|
90
|
+
"entities": {},
|
|
91
|
+
}
|
|
92
|
+
|
|
93
|
+
|
|
94
|
+
async def _search(
|
|
95
|
+
project_id: str,
|
|
96
|
+
query: str,
|
|
97
|
+
filters: Optional[List[str]] = None,
|
|
98
|
+
limit: int = 20,
|
|
99
|
+
) -> Dict[str, Any]:
|
|
100
|
+
"""
|
|
101
|
+
Internal: Search the knowledge graph using fulltext + traversal.
|
|
102
|
+
Called by kg_autopilot when search_query is provided.
|
|
103
|
+
"""
|
|
104
|
+
logger.info(f"_search called: '{query}' in project {project_id}")
|
|
105
|
+
|
|
106
|
+
try:
|
|
107
|
+
repo = get_repository()
|
|
108
|
+
results = await repo.fulltext_search(
|
|
109
|
+
project_id=project_id,
|
|
110
|
+
query=query,
|
|
111
|
+
node_types=filters,
|
|
112
|
+
limit=limit,
|
|
113
|
+
)
|
|
114
|
+
return serialize_response({
|
|
115
|
+
"results": results,
|
|
116
|
+
"total": len(results),
|
|
117
|
+
"query": query,
|
|
118
|
+
})
|
|
119
|
+
except Exception as e:
|
|
120
|
+
logger.error(f"_search failed: {e}")
|
|
121
|
+
return {
|
|
122
|
+
"error": str(e),
|
|
123
|
+
"results": [],
|
|
124
|
+
"total": 0,
|
|
125
|
+
}
|
|
126
|
+
|
|
127
|
+
|
|
128
|
+
async def _link_code_artifact(
|
|
129
|
+
project_id: str,
|
|
130
|
+
path: str,
|
|
131
|
+
kind: str = "file",
|
|
132
|
+
language: Optional[str] = None,
|
|
133
|
+
symbol_fqn: Optional[str] = None,
|
|
134
|
+
start_line: Optional[int] = None,
|
|
135
|
+
end_line: Optional[int] = None,
|
|
136
|
+
git_commit: Optional[str] = None,
|
|
137
|
+
content_hash: Optional[str] = None,
|
|
138
|
+
related_goal_ids: Optional[List[str]] = None,
|
|
139
|
+
) -> Dict[str, Any]:
|
|
140
|
+
"""
|
|
141
|
+
Internal: Link a code artifact to the knowledge graph.
|
|
142
|
+
Called by kg_track_changes.
|
|
143
|
+
"""
|
|
144
|
+
logger.info(f"_link_code_artifact called: {path}")
|
|
145
|
+
|
|
146
|
+
try:
|
|
147
|
+
repo = get_repository()
|
|
148
|
+
artifact = await repo.upsert_code_artifact(
|
|
149
|
+
project_id=project_id,
|
|
150
|
+
path=path,
|
|
151
|
+
kind=kind,
|
|
152
|
+
language=language,
|
|
153
|
+
symbol_fqn=symbol_fqn,
|
|
154
|
+
start_line=start_line,
|
|
155
|
+
end_line=end_line,
|
|
156
|
+
git_commit=git_commit,
|
|
157
|
+
content_hash=content_hash,
|
|
158
|
+
related_goal_ids=related_goal_ids,
|
|
159
|
+
)
|
|
160
|
+
return {
|
|
161
|
+
"artifact_id": artifact.get("id"),
|
|
162
|
+
"path": path,
|
|
163
|
+
"linked_goals": len(related_goal_ids) if related_goal_ids else 0,
|
|
164
|
+
}
|
|
165
|
+
except Exception as e:
|
|
166
|
+
logger.error(f"_link_code_artifact failed: {e}")
|
|
167
|
+
return {
|
|
168
|
+
"error": str(e),
|
|
169
|
+
"artifact_id": None,
|
|
170
|
+
}
|
|
171
|
+
|
|
172
|
+
|
|
173
|
+
async def _impact_analysis(
|
|
174
|
+
project_id: str,
|
|
175
|
+
changed_paths: Optional[List[str]] = None,
|
|
176
|
+
changed_symbols: Optional[List[str]] = None,
|
|
177
|
+
) -> Dict[str, Any]:
|
|
178
|
+
"""
|
|
179
|
+
Internal: Analyze the impact of code changes.
|
|
180
|
+
Called by kg_track_changes when check_impact=True.
|
|
181
|
+
"""
|
|
182
|
+
logger.info(f"_impact_analysis called for project {project_id}")
|
|
183
|
+
|
|
184
|
+
if not changed_paths and not changed_symbols:
|
|
185
|
+
return {
|
|
186
|
+
"error": "At least one of changed_paths or changed_symbols is required",
|
|
187
|
+
"goals_to_retest": [],
|
|
188
|
+
"tests_to_run": [],
|
|
189
|
+
"strategies_to_review": [],
|
|
190
|
+
"artifacts_related": [],
|
|
191
|
+
}
|
|
192
|
+
|
|
193
|
+
try:
|
|
194
|
+
repo = get_repository()
|
|
195
|
+
paths = changed_paths or []
|
|
196
|
+
|
|
197
|
+
result = await repo.get_impact_for_artifacts(project_id, paths)
|
|
198
|
+
return serialize_response(result)
|
|
199
|
+
except Exception as e:
|
|
200
|
+
logger.error(f"_impact_analysis failed: {e}")
|
|
201
|
+
return {
|
|
202
|
+
"error": str(e),
|
|
203
|
+
"goals_to_retest": [],
|
|
204
|
+
"tests_to_run": [],
|
|
205
|
+
"strategies_to_review": [],
|
|
206
|
+
"artifacts_related": [],
|
|
207
|
+
}
|
|
208
|
+
|
|
209
|
+
|
|
210
|
+
# =============================================================================
|
|
211
|
+
# MCP TOOL REGISTRATION (Only 2 tools exposed)
|
|
212
|
+
# =============================================================================
|
|
213
|
+
|
|
214
|
+
|
|
215
|
+
def register_tools(mcp: FastMCP) -> None:
|
|
216
|
+
"""
|
|
217
|
+
Register MCP tools with the server.
|
|
218
|
+
|
|
219
|
+
Only 2 tools are exposed:
|
|
220
|
+
- kg_autopilot: For starting tasks
|
|
221
|
+
- kg_track_changes: For tracking file modifications
|
|
222
|
+
"""
|
|
223
|
+
|
|
224
|
+
@mcp.tool()
|
|
225
|
+
async def kg_autopilot(
|
|
226
|
+
project_id: str,
|
|
227
|
+
user_text: str,
|
|
228
|
+
search_query: Optional[str] = None,
|
|
229
|
+
files: Optional[List[str]] = None,
|
|
230
|
+
diff: Optional[str] = None,
|
|
231
|
+
symbols: Optional[List[str]] = None,
|
|
232
|
+
tags: Optional[List[str]] = None,
|
|
233
|
+
k_hops: int = 2,
|
|
234
|
+
) -> Dict[str, Any]:
|
|
235
|
+
"""
|
|
236
|
+
🚀 CALL THIS TOOL AT THE START OF EVERY TASK.
|
|
237
|
+
|
|
238
|
+
⚠️ DO NOT CALL THIS TOOL AFTER CREATING/MODIFYING FILES!
|
|
239
|
+
Use kg_track_changes instead for file changes.
|
|
240
|
+
|
|
241
|
+
WHEN TO USE THIS TOOL:
|
|
242
|
+
✅ Starting a new task or user request
|
|
243
|
+
✅ User asks a question (to retrieve past context)
|
|
244
|
+
✅ Resuming work on an existing project
|
|
245
|
+
✅ Need to understand goals, constraints, preferences
|
|
246
|
+
|
|
247
|
+
WHEN NOT TO USE THIS TOOL:
|
|
248
|
+
❌ After creating a file → use kg_track_changes
|
|
249
|
+
❌ After modifying a file → use kg_track_changes
|
|
250
|
+
❌ To "save" or "record" task completion → NOT NEEDED
|
|
251
|
+
❌ To update goal status → NOT SUPPORTED HERE
|
|
252
|
+
|
|
253
|
+
It automatically:
|
|
254
|
+
1. Ingests and analyzes the user request (extracts goals, constraints, etc.)
|
|
255
|
+
2. Returns the full context pack with active goals, preferences, pain points
|
|
256
|
+
3. Optionally searches existing knowledge if search_query is provided
|
|
257
|
+
|
|
258
|
+
Args:
|
|
259
|
+
project_id: Project identifier (use workspace folder name)
|
|
260
|
+
user_text: The user's message or request
|
|
261
|
+
search_query: Optional query to search existing knowledge
|
|
262
|
+
files: Optional list of file paths involved
|
|
263
|
+
diff: Optional code diff
|
|
264
|
+
symbols: Optional list of code symbols
|
|
265
|
+
tags: Optional tags for categorization
|
|
266
|
+
k_hops: Graph traversal depth (1-5, default 2)
|
|
267
|
+
|
|
268
|
+
Returns:
|
|
269
|
+
markdown: Formatted context pack (READ THIS CAREFULLY)
|
|
270
|
+
interaction_id: ID of the ingested interaction
|
|
271
|
+
extracted: Extracted entities (goals, constraints, etc.)
|
|
272
|
+
search_results: Search results if search_query was provided
|
|
273
|
+
"""
|
|
274
|
+
logger.info(f"kg_autopilot called for project {project_id}")
|
|
275
|
+
|
|
276
|
+
result: Dict[str, Any] = {
|
|
277
|
+
"markdown": "",
|
|
278
|
+
"interaction_id": None,
|
|
279
|
+
"extracted": {},
|
|
280
|
+
"search_results": [],
|
|
281
|
+
}
|
|
282
|
+
|
|
283
|
+
try:
|
|
284
|
+
# Step 1: Ingest the message
|
|
285
|
+
pipeline = get_ingest_pipeline()
|
|
286
|
+
ingest_result = await pipeline.process_message(
|
|
287
|
+
project_id=project_id,
|
|
288
|
+
user_text=user_text,
|
|
289
|
+
files=files,
|
|
290
|
+
diff=diff,
|
|
291
|
+
symbols=symbols,
|
|
292
|
+
tags=tags,
|
|
293
|
+
)
|
|
294
|
+
result["interaction_id"] = ingest_result.get("interaction_id")
|
|
295
|
+
result["extracted"] = ingest_result.get("extracted", {})
|
|
296
|
+
|
|
297
|
+
# Step 2: Build context pack
|
|
298
|
+
builder = get_context_builder()
|
|
299
|
+
context_result = await builder.build_context_pack(
|
|
300
|
+
project_id=project_id,
|
|
301
|
+
query=search_query,
|
|
302
|
+
k_hops=k_hops,
|
|
303
|
+
)
|
|
304
|
+
result["markdown"] = context_result.get("markdown", "")
|
|
305
|
+
# Add reminder about kg_track_changes
|
|
306
|
+
result["markdown"] += "\n\n---\n*📝 REMINDER: Call `kg_track_changes` after EVERY file you create or modify to keep the knowledge graph updated.*"
|
|
307
|
+
result["entities"] = context_result.get("entities", {})
|
|
308
|
+
|
|
309
|
+
# Step 3: Optional search
|
|
310
|
+
if search_query:
|
|
311
|
+
repo = get_repository()
|
|
312
|
+
search_results = await repo.fulltext_search(
|
|
313
|
+
project_id=project_id,
|
|
314
|
+
query=search_query,
|
|
315
|
+
limit=10,
|
|
316
|
+
)
|
|
317
|
+
result["search_results"] = search_results
|
|
318
|
+
|
|
319
|
+
return serialize_response(result)
|
|
320
|
+
|
|
321
|
+
except Exception as e:
|
|
322
|
+
logger.error(f"kg_autopilot failed: {e}")
|
|
323
|
+
result["error"] = str(e)
|
|
324
|
+
result["markdown"] = f"# Error\n\nFailed to build context: {e}"
|
|
325
|
+
return result
|
|
326
|
+
|
|
327
|
+
@mcp.tool()
|
|
328
|
+
async def kg_track_changes(
|
|
329
|
+
project_id: str,
|
|
330
|
+
changes: List[Dict[str, Any]],
|
|
331
|
+
check_impact: bool = True,
|
|
332
|
+
) -> Dict[str, Any]:
|
|
333
|
+
"""
|
|
334
|
+
🔗 CALL THIS TOOL AFTER EVERY FILE MODIFICATION.
|
|
335
|
+
|
|
336
|
+
⚠️ DO NOT use kg_autopilot for tracking file changes!
|
|
337
|
+
|
|
338
|
+
═══════════════════════════════════════════════════════════════════════════
|
|
339
|
+
REQUIRED INPUT FORMAT
|
|
340
|
+
═══════════════════════════════════════════════════════════════════════════
|
|
341
|
+
|
|
342
|
+
Each item in `changes` must be a dict with this structure:
|
|
343
|
+
|
|
344
|
+
{
|
|
345
|
+
"path": "/absolute/path/to/file.py", # REQUIRED
|
|
346
|
+
"change_type": "created" | "modified" | "deleted", # REQUIRED
|
|
347
|
+
"language": "python", # Optional, auto-detected
|
|
348
|
+
"symbols": [ # Optional but RECOMMENDED
|
|
349
|
+
{
|
|
350
|
+
"name": "function_name", # e.g. "calculate_tax"
|
|
351
|
+
"kind": "function" | "method" | "class", # REQUIRED
|
|
352
|
+
"line_start": 10, # REQUIRED
|
|
353
|
+
"line_end": 25, # REQUIRED
|
|
354
|
+
"signature": "def calculate_tax(...)", # Optional
|
|
355
|
+
"change_type": "added" | "modified" | "deleted" # REQUIRED
|
|
356
|
+
}
|
|
357
|
+
]
|
|
358
|
+
}
|
|
359
|
+
|
|
360
|
+
═══════════════════════════════════════════════════════════════════════════
|
|
361
|
+
EXAMPLE CALLS
|
|
362
|
+
═══════════════════════════════════════════════════════════════════════════
|
|
363
|
+
|
|
364
|
+
Example 1 - Creating a new file with a function:
|
|
365
|
+
```
|
|
366
|
+
kg_track_changes(
|
|
367
|
+
project_id="my-project",
|
|
368
|
+
changes=[{
|
|
369
|
+
"path": "/project/src/utils.py",
|
|
370
|
+
"change_type": "created",
|
|
371
|
+
"symbols": [{
|
|
372
|
+
"name": "format_currency",
|
|
373
|
+
"kind": "function",
|
|
374
|
+
"line_start": 1,
|
|
375
|
+
"line_end": 15,
|
|
376
|
+
"signature": "def format_currency(amount: float, currency: str = 'EUR') -> str",
|
|
377
|
+
"change_type": "added"
|
|
378
|
+
}]
|
|
379
|
+
}]
|
|
380
|
+
)
|
|
381
|
+
```
|
|
382
|
+
|
|
383
|
+
Example 2 - Modifying a class method:
|
|
384
|
+
```
|
|
385
|
+
kg_track_changes(
|
|
386
|
+
project_id="my-project",
|
|
387
|
+
changes=[{
|
|
388
|
+
"path": "/project/src/services/auth.py",
|
|
389
|
+
"change_type": "modified",
|
|
390
|
+
"symbols": [{
|
|
391
|
+
"name": "AuthService.validate_token",
|
|
392
|
+
"kind": "method",
|
|
393
|
+
"line_start": 45,
|
|
394
|
+
"line_end": 78,
|
|
395
|
+
"signature": "async def validate_token(self, token: str) -> bool",
|
|
396
|
+
"change_type": "modified"
|
|
397
|
+
}]
|
|
398
|
+
}]
|
|
399
|
+
)
|
|
400
|
+
```
|
|
401
|
+
|
|
402
|
+
Example 3 - Simple file tracking (no symbols):
|
|
403
|
+
```
|
|
404
|
+
kg_track_changes(
|
|
405
|
+
project_id="my-project",
|
|
406
|
+
changes=[{"path": "/project/README.md", "change_type": "modified"}]
|
|
407
|
+
)
|
|
408
|
+
```
|
|
409
|
+
|
|
410
|
+
═══════════════════════════════════════════════════════════════════════════
|
|
411
|
+
WHAT THIS TOOL DOES
|
|
412
|
+
═══════════════════════════════════════════════════════════════════════════
|
|
413
|
+
|
|
414
|
+
1. Creates/updates CodeArtifact node for each file
|
|
415
|
+
2. Creates Symbol nodes for each symbol, linked via CONTAINS relationship
|
|
416
|
+
3. AUTO-LINKS to ALL active goals (no need to specify goal IDs!)
|
|
417
|
+
4. Stores line ranges and signatures for semantic queries
|
|
418
|
+
5. Runs impact analysis to find affected tests/strategies
|
|
419
|
+
|
|
420
|
+
═══════════════════════════════════════════════════════════════════════════
|
|
421
|
+
|
|
422
|
+
Args:
|
|
423
|
+
project_id: Project identifier (use workspace folder name)
|
|
424
|
+
changes: List of file changes with optional symbols (see format above)
|
|
425
|
+
check_impact: Whether to run impact analysis (default: True)
|
|
426
|
+
|
|
427
|
+
Returns:
|
|
428
|
+
artifacts_linked: Number of files tracked
|
|
429
|
+
symbols_linked: Number of symbols tracked
|
|
430
|
+
auto_linked_goals: Goals automatically linked
|
|
431
|
+
impact_analysis: Affected tests and strategies
|
|
432
|
+
"""
|
|
433
|
+
logger.info(f"kg_track_changes called for {len(changes)} files")
|
|
434
|
+
|
|
435
|
+
if not changes:
|
|
436
|
+
return {
|
|
437
|
+
"error": "changes is required and cannot be empty",
|
|
438
|
+
"artifacts_linked": 0,
|
|
439
|
+
"symbols_linked": 0,
|
|
440
|
+
"impact_analysis": {},
|
|
441
|
+
}
|
|
442
|
+
|
|
443
|
+
result: Dict[str, Any] = {
|
|
444
|
+
"artifacts_linked": 0,
|
|
445
|
+
"symbols_linked": 0,
|
|
446
|
+
"linked_paths": [],
|
|
447
|
+
"linked_symbols": [],
|
|
448
|
+
"auto_linked_goals": [],
|
|
449
|
+
"impact_analysis": {},
|
|
450
|
+
}
|
|
451
|
+
|
|
452
|
+
try:
|
|
453
|
+
repo = get_repository()
|
|
454
|
+
|
|
455
|
+
# Step 1: Auto-link to active goals
|
|
456
|
+
try:
|
|
457
|
+
active_goals = await repo.get_active_goals(project_id)
|
|
458
|
+
related_goal_ids = [g["id"] for g in active_goals if g.get("id")]
|
|
459
|
+
result["auto_linked_goals"] = [
|
|
460
|
+
{"id": g["id"], "title": g.get("title", "Unknown")}
|
|
461
|
+
for g in active_goals if g.get("id")
|
|
462
|
+
]
|
|
463
|
+
logger.info(f"Auto-linking to {len(related_goal_ids)} active goals")
|
|
464
|
+
except Exception as goal_error:
|
|
465
|
+
logger.warning(f"Could not fetch active goals: {goal_error}")
|
|
466
|
+
related_goal_ids = []
|
|
467
|
+
|
|
468
|
+
# Step 2: Process each file change
|
|
469
|
+
all_paths = []
|
|
470
|
+
for change in changes:
|
|
471
|
+
path = change.get("path")
|
|
472
|
+
if not path:
|
|
473
|
+
logger.warning("Skipping change without path")
|
|
474
|
+
continue
|
|
475
|
+
|
|
476
|
+
all_paths.append(path)
|
|
477
|
+
change_type = change.get("change_type", "modified")
|
|
478
|
+
language = change.get("language")
|
|
479
|
+
symbols = change.get("symbols", [])
|
|
480
|
+
|
|
481
|
+
try:
|
|
482
|
+
# Create/update CodeArtifact
|
|
483
|
+
artifact = await repo.upsert_code_artifact(
|
|
484
|
+
project_id=project_id,
|
|
485
|
+
path=path,
|
|
486
|
+
kind="file",
|
|
487
|
+
language=language,
|
|
488
|
+
related_goal_ids=related_goal_ids,
|
|
489
|
+
)
|
|
490
|
+
artifact_id = artifact.get("id")
|
|
491
|
+
result["artifacts_linked"] += 1
|
|
492
|
+
result["linked_paths"].append(path)
|
|
493
|
+
|
|
494
|
+
# Create symbols if provided
|
|
495
|
+
if artifact_id and symbols:
|
|
496
|
+
for sym in symbols:
|
|
497
|
+
sym_name = sym.get("name")
|
|
498
|
+
if not sym_name:
|
|
499
|
+
continue
|
|
500
|
+
|
|
501
|
+
# Generate FQN: path:symbol_name
|
|
502
|
+
fqn = f"{path}:{sym_name}"
|
|
503
|
+
|
|
504
|
+
await repo.upsert_symbol(
|
|
505
|
+
artifact_id=artifact_id,
|
|
506
|
+
fqn=fqn,
|
|
507
|
+
name=sym_name,
|
|
508
|
+
kind=sym.get("kind", "function"),
|
|
509
|
+
line_start=sym.get("line_start"),
|
|
510
|
+
line_end=sym.get("line_end"),
|
|
511
|
+
signature=sym.get("signature"),
|
|
512
|
+
change_type=sym.get("change_type", "modified"),
|
|
513
|
+
)
|
|
514
|
+
result["symbols_linked"] += 1
|
|
515
|
+
result["linked_symbols"].append({
|
|
516
|
+
"fqn": fqn,
|
|
517
|
+
"name": sym_name,
|
|
518
|
+
"kind": sym.get("kind"),
|
|
519
|
+
"lines": f"{sym.get('line_start')}-{sym.get('line_end')}",
|
|
520
|
+
})
|
|
521
|
+
|
|
522
|
+
except Exception as link_error:
|
|
523
|
+
logger.warning(f"Failed to link {path}: {link_error}")
|
|
524
|
+
|
|
525
|
+
# Step 3: Impact analysis
|
|
526
|
+
if check_impact and all_paths:
|
|
527
|
+
impact = await repo.get_impact_for_artifacts(project_id, all_paths)
|
|
528
|
+
result["impact_analysis"] = impact
|
|
529
|
+
|
|
530
|
+
return serialize_response(result)
|
|
531
|
+
|
|
532
|
+
except Exception as e:
|
|
533
|
+
logger.error(f"kg_track_changes failed: {e}")
|
|
534
|
+
result["error"] = str(e)
|
|
535
|
+
return result
|
|
536
|
+
|
|
537
|
+
logger.info("MCP tools registered: kg_autopilot, kg_track_changes (2 tools only)")
|
kg_mcp/security/auth.py
ADDED
|
@@ -0,0 +1,121 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Authentication middleware for MCP server.
|
|
3
|
+
Validates Bearer tokens for incoming requests.
|
|
4
|
+
"""
|
|
5
|
+
|
|
6
|
+
import logging
|
|
7
|
+
from typing import Callable, Optional
|
|
8
|
+
|
|
9
|
+
from kg_mcp.config import get_settings
|
|
10
|
+
|
|
11
|
+
logger = logging.getLogger(__name__)
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
class AuthenticationError(Exception):
|
|
15
|
+
"""Raised when authentication fails."""
|
|
16
|
+
|
|
17
|
+
pass
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
def validate_bearer_token(token: str) -> bool:
|
|
21
|
+
"""
|
|
22
|
+
Validate a bearer token against the configured token.
|
|
23
|
+
|
|
24
|
+
Args:
|
|
25
|
+
token: The token to validate
|
|
26
|
+
|
|
27
|
+
Returns:
|
|
28
|
+
True if valid, False otherwise
|
|
29
|
+
"""
|
|
30
|
+
settings = get_settings()
|
|
31
|
+
|
|
32
|
+
if not settings.kg_mcp_token:
|
|
33
|
+
# No token configured - authentication disabled
|
|
34
|
+
logger.warning("No authentication token configured - all requests allowed")
|
|
35
|
+
return True
|
|
36
|
+
|
|
37
|
+
return token == settings.kg_mcp_token
|
|
38
|
+
|
|
39
|
+
|
|
40
|
+
def extract_bearer_token(authorization_header: Optional[str]) -> Optional[str]:
|
|
41
|
+
"""
|
|
42
|
+
Extract the bearer token from an Authorization header.
|
|
43
|
+
|
|
44
|
+
Args:
|
|
45
|
+
authorization_header: The full Authorization header value
|
|
46
|
+
|
|
47
|
+
Returns:
|
|
48
|
+
The token if present and properly formatted, None otherwise
|
|
49
|
+
"""
|
|
50
|
+
if not authorization_header:
|
|
51
|
+
return None
|
|
52
|
+
|
|
53
|
+
parts = authorization_header.split()
|
|
54
|
+
if len(parts) != 2:
|
|
55
|
+
return None
|
|
56
|
+
|
|
57
|
+
scheme, token = parts
|
|
58
|
+
if scheme.lower() != "bearer":
|
|
59
|
+
return None
|
|
60
|
+
|
|
61
|
+
return token
|
|
62
|
+
|
|
63
|
+
|
|
64
|
+
def create_auth_middleware() -> Callable:
|
|
65
|
+
"""
|
|
66
|
+
Create an authentication middleware function.
|
|
67
|
+
|
|
68
|
+
Returns:
|
|
69
|
+
A middleware function that validates requests
|
|
70
|
+
"""
|
|
71
|
+
|
|
72
|
+
async def auth_middleware(request, call_next):
|
|
73
|
+
"""Middleware to check authentication on all requests."""
|
|
74
|
+
settings = get_settings()
|
|
75
|
+
|
|
76
|
+
# Skip auth if no token configured
|
|
77
|
+
if not settings.kg_mcp_token:
|
|
78
|
+
return await call_next(request)
|
|
79
|
+
|
|
80
|
+
# Extract and validate token
|
|
81
|
+
auth_header = request.headers.get("authorization")
|
|
82
|
+
token = extract_bearer_token(auth_header)
|
|
83
|
+
|
|
84
|
+
if not token:
|
|
85
|
+
logger.warning(f"Missing or invalid Authorization header from {request.client.host}")
|
|
86
|
+
# Return 401 Unauthorized
|
|
87
|
+
from starlette.responses import JSONResponse
|
|
88
|
+
|
|
89
|
+
return JSONResponse(
|
|
90
|
+
status_code=401,
|
|
91
|
+
content={"error": "Missing or invalid Authorization header"},
|
|
92
|
+
)
|
|
93
|
+
|
|
94
|
+
if not validate_bearer_token(token):
|
|
95
|
+
logger.warning(f"Invalid token from {request.client.host}")
|
|
96
|
+
return JSONResponse(
|
|
97
|
+
status_code=403,
|
|
98
|
+
content={"error": "Invalid authentication token"},
|
|
99
|
+
)
|
|
100
|
+
|
|
101
|
+
return await call_next(request)
|
|
102
|
+
|
|
103
|
+
return auth_middleware
|
|
104
|
+
|
|
105
|
+
|
|
106
|
+
def require_auth(func: Callable) -> Callable:
|
|
107
|
+
"""
|
|
108
|
+
Decorator to require authentication for a function.
|
|
109
|
+
|
|
110
|
+
This is a simple decorator for individual functions,
|
|
111
|
+
not for use with the full middleware system.
|
|
112
|
+
"""
|
|
113
|
+
import functools
|
|
114
|
+
|
|
115
|
+
@functools.wraps(func)
|
|
116
|
+
async def wrapper(*args, **kwargs):
|
|
117
|
+
# In the context of MCP tools, auth is handled at the transport level
|
|
118
|
+
# This decorator is a placeholder for additional auth checks if needed
|
|
119
|
+
return await func(*args, **kwargs)
|
|
120
|
+
|
|
121
|
+
return wrapper
|