glide-mcp 0.1.1__py3-none-any.whl → 0.1.8__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- glide_mcp-0.1.8.dist-info/METADATA +91 -0
- glide_mcp-0.1.8.dist-info/RECORD +12 -0
- glide_mcp-0.1.8.dist-info/entry_points.txt +2 -0
- src/conflicts/fibonnaci.py +64 -0
- src/core/LLM/cerebras_inference.py +1 -1
- src/kite_exclusive/commit_splitter/services/voyage_service.py +69 -49
- src/kite_exclusive/resolve_conflicts/breeze_inference.py +70 -0
- src/kite_exclusive/resolve_conflicts/morph_service.py +106 -0
- src/mcp/app.py +488 -106
- glide_mcp-0.1.1.dist-info/METADATA +0 -67
- glide_mcp-0.1.1.dist-info/RECORD +0 -9
- glide_mcp-0.1.1.dist-info/entry_points.txt +0 -2
- {glide_mcp-0.1.1.dist-info → glide_mcp-0.1.8.dist-info}/WHEEL +0 -0
- {glide_mcp-0.1.1.dist-info → glide_mcp-0.1.8.dist-info}/licenses/LICENSE +0 -0
src/mcp/app.py
CHANGED
|
@@ -1,18 +1,19 @@
|
|
|
1
1
|
from src.kite_exclusive.commit_splitter.services.voyage_service import embed_code
|
|
2
2
|
from src.core.LLM.cerebras_inference import complete
|
|
3
|
-
from
|
|
3
|
+
from src.kite_exclusive.resolve_conflicts.breeze_inference import resolve_merge_conflict
|
|
4
|
+
from src.kite_exclusive.resolve_conflicts.morph_service import apply_code_edit
|
|
5
|
+
from typing import Dict, List, Tuple, Optional
|
|
4
6
|
import subprocess
|
|
5
7
|
import json
|
|
6
8
|
import os
|
|
7
9
|
import asyncio
|
|
10
|
+
import re
|
|
8
11
|
from dotenv import load_dotenv
|
|
9
12
|
import helix
|
|
10
13
|
from fastmcp import FastMCP
|
|
11
14
|
load_dotenv()
|
|
12
15
|
|
|
13
|
-
mcp = FastMCP
|
|
14
|
-
|
|
15
|
-
HELIX_API_ENDPOINT = os.getenv("HELIX_API_ENDPOINT", "")
|
|
16
|
+
mcp = FastMCP("glide")
|
|
16
17
|
|
|
17
18
|
|
|
18
19
|
async def find_git_root(start_path: str = None) -> str:
|
|
@@ -25,8 +26,6 @@ async def find_git_root(start_path: str = None) -> str:
|
|
|
25
26
|
Returns:
|
|
26
27
|
Path to the git repository root, or None if not in a git repository
|
|
27
28
|
"""
|
|
28
|
-
# First, try to get workspace root from common environment variables
|
|
29
|
-
# MCP clients like Cursor might set these
|
|
30
29
|
env_vars = [
|
|
31
30
|
"MCP_WORKSPACE_ROOT",
|
|
32
31
|
"CURSOR_WORKSPACE_ROOT",
|
|
@@ -38,15 +37,12 @@ async def find_git_root(start_path: str = None) -> str:
|
|
|
38
37
|
for env_var in env_vars:
|
|
39
38
|
workspace_from_env = os.getenv(env_var)
|
|
40
39
|
if workspace_from_env and os.path.isdir(workspace_from_env):
|
|
41
|
-
# Try to find git root starting from this directory
|
|
42
40
|
start_path = workspace_from_env
|
|
43
41
|
break
|
|
44
42
|
|
|
45
43
|
if start_path is None:
|
|
46
44
|
start_path = os.getcwd()
|
|
47
45
|
|
|
48
|
-
# Use git rev-parse --show-toplevel to find the git root
|
|
49
|
-
# Use asyncio.create_subprocess_exec directly to avoid circular dependency
|
|
50
46
|
try:
|
|
51
47
|
process = await asyncio.create_subprocess_exec(
|
|
52
48
|
"git",
|
|
@@ -64,40 +60,25 @@ async def find_git_root(start_path: str = None) -> str:
|
|
|
64
60
|
if git_root:
|
|
65
61
|
return git_root
|
|
66
62
|
except (FileNotFoundError, OSError):
|
|
67
|
-
# Git not found or other OS error
|
|
68
63
|
pass
|
|
69
64
|
|
|
70
65
|
return None
|
|
71
66
|
|
|
72
67
|
|
|
73
|
-
# Helper function to run subprocess calls asynchronously to avoid blocking stdio
|
|
74
68
|
async def run_subprocess(args: List[str], **kwargs) -> subprocess.CompletedProcess:
|
|
75
69
|
"""Run subprocess calls asynchronously to avoid blocking stdio transport."""
|
|
76
|
-
# Use asyncio.create_subprocess_exec instead of subprocess.run to avoid blocking
|
|
77
70
|
capture_output = kwargs.pop('capture_output', False)
|
|
78
71
|
text = kwargs.pop('text', False)
|
|
79
|
-
check = kwargs.pop('check', False)
|
|
72
|
+
check = kwargs.pop('check', False)
|
|
80
73
|
|
|
81
|
-
# CRITICAL: Set stdin to DEVNULL to prevent subprocess from inheriting
|
|
82
|
-
# the MCP stdio stdin, which causes deadlocks
|
|
83
74
|
stdin = kwargs.pop('stdin', asyncio.subprocess.DEVNULL)
|
|
84
|
-
|
|
85
|
-
# CRITICAL: Always capture stdout/stderr to PIPE to prevent subprocess output
|
|
86
|
-
# from leaking into the MCP stdio communication channel (which breaks JSON parsing)
|
|
87
|
-
# In stdio mode, parent's stdout/stderr IS the MCP communication channel, so we must
|
|
88
|
-
# always capture subprocess output to prevent git messages from breaking JSON protocol
|
|
89
75
|
stdout = asyncio.subprocess.PIPE
|
|
90
76
|
stderr = asyncio.subprocess.PIPE
|
|
91
|
-
# Remove any stdout/stderr from kwargs since we're overriding them
|
|
92
77
|
kwargs.pop('stdout', None)
|
|
93
78
|
kwargs.pop('stderr', None)
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
|
|
97
|
-
# Explicitly remove check and other invalid params to prevent errors
|
|
98
|
-
kwargs.pop('check', None) # Extra safety: ensure check is removed
|
|
99
|
-
kwargs.pop('timeout', None) # timeout handled by asyncio.wait_for elsewhere
|
|
100
|
-
kwargs.pop('input', None) # input not supported in async subprocess
|
|
79
|
+
kwargs.pop('check', None)
|
|
80
|
+
kwargs.pop('timeout', None)
|
|
81
|
+
kwargs.pop('input', None)
|
|
101
82
|
|
|
102
83
|
valid_exec_kwargs = {}
|
|
103
84
|
allowed_params = {'cwd', 'env', 'start_new_session', 'shell', 'preexec_fn',
|
|
@@ -106,10 +87,6 @@ async def run_subprocess(args: List[str], **kwargs) -> subprocess.CompletedProce
|
|
|
106
87
|
for key, value in kwargs.items():
|
|
107
88
|
if key in allowed_params:
|
|
108
89
|
valid_exec_kwargs[key] = value
|
|
109
|
-
# Silently ignore other parameters
|
|
110
|
-
|
|
111
|
-
# Final safety check: ensure check is not in valid_exec_kwargs
|
|
112
|
-
assert 'check' not in valid_exec_kwargs, "check parameter should not be passed to subprocess"
|
|
113
90
|
|
|
114
91
|
process = await asyncio.create_subprocess_exec(
|
|
115
92
|
*args,
|
|
@@ -121,7 +98,6 @@ async def run_subprocess(args: List[str], **kwargs) -> subprocess.CompletedProce
|
|
|
121
98
|
|
|
122
99
|
stdout_data, stderr_data = await process.communicate()
|
|
123
100
|
|
|
124
|
-
# Create a CompletedProcess-like object
|
|
125
101
|
result = subprocess.CompletedProcess(
|
|
126
102
|
args=args,
|
|
127
103
|
returncode=process.returncode,
|
|
@@ -129,7 +105,6 @@ async def run_subprocess(args: List[str], **kwargs) -> subprocess.CompletedProce
|
|
|
129
105
|
stderr=stderr_data.decode('utf-8') if text and stderr_data else stderr_data,
|
|
130
106
|
)
|
|
131
107
|
|
|
132
|
-
# If check=True, raise CalledProcessError on non-zero return code
|
|
133
108
|
if check and result.returncode != 0:
|
|
134
109
|
raise subprocess.CalledProcessError(
|
|
135
110
|
result.returncode, args, result.stdout, result.stderr
|
|
@@ -137,19 +112,6 @@ async def run_subprocess(args: List[str], **kwargs) -> subprocess.CompletedProce
|
|
|
137
112
|
|
|
138
113
|
return result
|
|
139
114
|
|
|
140
|
-
@mcp.tool
|
|
141
|
-
async def draft_pr():
|
|
142
|
-
instructions = [
|
|
143
|
-
"step 1: grep for CONTRIBUTING.md or similar documentation in the repository. If unable to find it, look for any contributing guidelines in the repository.",
|
|
144
|
-
"step 2: if not found, follow best practices for writing a pull request.",
|
|
145
|
-
"step 3: use the edit file tool to write a new PR_DRAFT.md file for the project.",
|
|
146
|
-
]
|
|
147
|
-
result = "draft pr instructions: \n\n"
|
|
148
|
-
for i, instruction in enumerate(instructions, 1):
|
|
149
|
-
result += f"{i}. {instruction}\n\n"
|
|
150
|
-
return result
|
|
151
|
-
|
|
152
|
-
|
|
153
115
|
@mcp.tool(
|
|
154
116
|
name="split_commit",
|
|
155
117
|
description="Splits a large unified diff / commit into smaller semantically-grouped commits.",
|
|
@@ -163,18 +125,13 @@ async def split_commit(workspace_root: str = None):
|
|
|
163
125
|
If not provided, will attempt to detect from environment variables or current directory.
|
|
164
126
|
"""
|
|
165
127
|
try:
|
|
166
|
-
# Detect the git repository root
|
|
167
128
|
if workspace_root:
|
|
168
|
-
# If provided, use it directly
|
|
169
129
|
detected_root = await find_git_root(workspace_root)
|
|
170
130
|
if detected_root:
|
|
171
131
|
workspace_root = detected_root
|
|
172
132
|
elif not os.path.isdir(workspace_root):
|
|
173
133
|
return f"error: provided workspace_root '{workspace_root}' does not exist or is not a directory."
|
|
174
|
-
# If workspace_root is provided but not a git repo, we'll still try to use it
|
|
175
|
-
# (git commands will fail with a clear error if it's not a git repo)
|
|
176
134
|
else:
|
|
177
|
-
# Try to auto-detect
|
|
178
135
|
workspace_root = await find_git_root()
|
|
179
136
|
if not workspace_root:
|
|
180
137
|
cwd = os.getcwd()
|
|
@@ -186,8 +143,6 @@ async def split_commit(workspace_root: str = None):
|
|
|
186
143
|
f" 2. Provide the workspace_root parameter with the path to your git repository root."
|
|
187
144
|
)
|
|
188
145
|
|
|
189
|
-
# 1) Collect changed files and per-file unified diffs
|
|
190
|
-
# Check staged, unstaged, and untracked files
|
|
191
146
|
staged_proc = await run_subprocess(
|
|
192
147
|
["git", "diff", "--cached", "--name-only"],
|
|
193
148
|
capture_output=True,
|
|
@@ -207,9 +162,6 @@ async def split_commit(workspace_root: str = None):
|
|
|
207
162
|
cwd=workspace_root
|
|
208
163
|
)
|
|
209
164
|
|
|
210
|
-
# Check if git commands failed (might indicate not a git repo)
|
|
211
|
-
# Note: git commands can return non-zero even in valid repos (e.g., no changes)
|
|
212
|
-
# Only error if we get explicit "not a git repository" messages
|
|
213
165
|
error_messages = []
|
|
214
166
|
if staged_proc.returncode != 0 and staged_proc.stderr:
|
|
215
167
|
error_messages.append(staged_proc.stderr)
|
|
@@ -238,7 +190,6 @@ async def split_commit(workspace_root: str = None):
|
|
|
238
190
|
|
|
239
191
|
file_to_diff: Dict[str, str] = {}
|
|
240
192
|
for path in changed_files:
|
|
241
|
-
# Try staged diff first, then unstaged
|
|
242
193
|
p = await run_subprocess(
|
|
243
194
|
["git", "diff", "--cached", "--", path],
|
|
244
195
|
capture_output=True,
|
|
@@ -257,69 +208,54 @@ async def split_commit(workspace_root: str = None):
|
|
|
257
208
|
if p.returncode == 0 and p.stdout.strip():
|
|
258
209
|
file_to_diff[path] = p.stdout
|
|
259
210
|
else:
|
|
260
|
-
# For untracked/new files, read the entire file content as the "diff"
|
|
261
|
-
# Paths from git are relative to workspace root, so join them
|
|
262
211
|
file_path = os.path.join(workspace_root, path) if not os.path.isabs(path) else path
|
|
263
212
|
try:
|
|
264
213
|
with open(file_path, "r", encoding="utf-8") as f:
|
|
265
214
|
content = f.read()
|
|
266
|
-
# Format as a new file addition diff
|
|
267
215
|
file_to_diff[path] = (
|
|
268
216
|
f"diff --git a/{path} b/{path}\nnew file mode 100644\n--- /dev/null\n+++ b/{path}\n@@ -0,0 +1,{len(content.splitlines())} @@\n+{chr(10).join('+'+line for line in content.splitlines())}"
|
|
269
217
|
)
|
|
270
218
|
except (FileNotFoundError, UnicodeDecodeError):
|
|
271
|
-
# File might not exist or not be text
|
|
272
219
|
continue
|
|
273
220
|
|
|
274
221
|
if not file_to_diff:
|
|
275
222
|
return "no per-file diffs produced"
|
|
276
223
|
|
|
277
|
-
|
|
278
|
-
suggestions: List[Tuple[str, str]] = [] # (file_path, suggested_message)
|
|
224
|
+
suggestions: List[Tuple[str, str]] = []
|
|
279
225
|
|
|
280
|
-
# Connect Helix client - supports both local and cloud via environment variables
|
|
281
226
|
use_local = os.getenv("HELIX_LOCAL", "false").lower() == "true"
|
|
282
227
|
|
|
283
228
|
if use_local:
|
|
284
229
|
db = helix.Client(local=True)
|
|
285
230
|
else:
|
|
286
|
-
# Use cloud deployment from helix.toml (production.fly)
|
|
287
|
-
# Helix SDK automatically reads helix.toml and uses the configured deployment
|
|
288
231
|
api_endpoint = os.getenv("HELIX_API_ENDPOINT", "")
|
|
289
|
-
if not
|
|
290
|
-
return "error:
|
|
232
|
+
if not api_endpoint:
|
|
233
|
+
return "error: HELIX_API_ENDPOINT is not set"
|
|
291
234
|
db = helix.Client(local=False, api_endpoint=api_endpoint)
|
|
292
235
|
|
|
293
236
|
for file_path, diff_text in file_to_diff.items():
|
|
294
|
-
# 2a) Embed with timeout (5 seconds)
|
|
295
237
|
try:
|
|
296
238
|
vec_batch = await asyncio.wait_for(
|
|
297
239
|
asyncio.to_thread(embed_code, diff_text, file_path=file_path),
|
|
298
240
|
timeout=5
|
|
299
241
|
)
|
|
300
242
|
except asyncio.TimeoutError:
|
|
301
|
-
return f"error: embedding timed out for {file_path}
|
|
243
|
+
return f"error: embedding timed out for {file_path}"
|
|
302
244
|
except Exception as embed_exc:
|
|
303
|
-
return f"error: embedding failed for {file_path}: {str(embed_exc)}
|
|
245
|
+
return f"error: embedding failed for {file_path}: {str(embed_exc)}"
|
|
304
246
|
|
|
305
247
|
if not vec_batch:
|
|
306
248
|
return f"error: embedding returned empty result for {file_path}"
|
|
307
249
|
vec = vec_batch[0]
|
|
308
250
|
|
|
309
251
|
try:
|
|
310
|
-
# 3) ANN search for similar diffs; k kept small to keep it snappy
|
|
311
|
-
# Add timeout to database query (5 seconds)
|
|
312
252
|
res = await asyncio.wait_for(
|
|
313
253
|
asyncio.to_thread(db.query, "getSimilarDiffsByVector", {"vec": vec, "k": 8}),
|
|
314
254
|
timeout=5
|
|
315
255
|
)
|
|
316
|
-
except asyncio.TimeoutError:
|
|
317
|
-
# If database query times out, continue without examples
|
|
256
|
+
except (asyncio.TimeoutError, Exception):
|
|
318
257
|
res = []
|
|
319
|
-
|
|
320
|
-
# If database query fails, continue without examples
|
|
321
|
-
res = []
|
|
322
|
-
# Result rows include commit_message, summary, file_path
|
|
258
|
+
|
|
323
259
|
examples = []
|
|
324
260
|
if isinstance(res, list):
|
|
325
261
|
for row in res[:5]:
|
|
@@ -334,12 +270,18 @@ async def split_commit(workspace_root: str = None):
|
|
|
334
270
|
|
|
335
271
|
example_block = "\n\n".join(examples) if examples else ""
|
|
336
272
|
|
|
337
|
-
# Helper function to detect and reject generic messages
|
|
338
273
|
def is_generic_message(msg: str) -> bool:
|
|
339
274
|
"""Check if a commit message is too generic."""
|
|
340
275
|
if not msg:
|
|
341
276
|
return True
|
|
342
277
|
msg_lower = msg.lower().strip()
|
|
278
|
+
|
|
279
|
+
# Reject reasoning tag patterns
|
|
280
|
+
if ("redacted_reasoning" in msg_lower or
|
|
281
|
+
"<think>" in msg_lower or
|
|
282
|
+
"</think>" in msg_lower):
|
|
283
|
+
return True
|
|
284
|
+
|
|
343
285
|
generic_patterns = [
|
|
344
286
|
"update ",
|
|
345
287
|
"fix bug",
|
|
@@ -352,11 +294,9 @@ async def split_commit(workspace_root: str = None):
|
|
|
352
294
|
"minor",
|
|
353
295
|
"temporary",
|
|
354
296
|
]
|
|
355
|
-
# Check if message starts with generic patterns
|
|
356
297
|
for pattern in generic_patterns:
|
|
357
298
|
if msg_lower.startswith(pattern):
|
|
358
299
|
return True
|
|
359
|
-
# Check if message is just a filename (e.g., "Update app.py")
|
|
360
300
|
if msg_lower.startswith("update ") and len(msg_lower.split()) <= 3:
|
|
361
301
|
return True
|
|
362
302
|
return False
|
|
@@ -397,47 +337,55 @@ EXAMPLES OF GOOD MESSAGES:
|
|
|
397
337
|
- "docs(readme): add installation instructions"
|
|
398
338
|
|
|
399
339
|
EXAMPLES OF BAD MESSAGES (DO NOT USE):
|
|
400
|
-
- "Update app.py"
|
|
401
|
-
- "Fix bug"
|
|
402
|
-
- "Refactor code"
|
|
403
|
-
- "Changes"
|
|
340
|
+
- "Update app.py"
|
|
341
|
+
- "Fix bug"
|
|
342
|
+
- "Refactor code"
|
|
343
|
+
- "Changes"
|
|
404
344
|
|
|
405
345
|
Remember: Your output must be SPECIFIC and describe WHAT changed, not generic file operations."""
|
|
406
346
|
)
|
|
407
347
|
user_prompt = (
|
|
408
|
-
"
|
|
348
|
+
"/no_think\n\nGenerate a commit message for this diff. Consider similar past changes if given.\n\n"
|
|
409
349
|
f"DIFF (truncated if long):\n{diff_text}\n\n"
|
|
410
350
|
f"SIMILAR EXAMPLES:\n{example_block}\n\n"
|
|
411
351
|
"Output ONLY the commit message title, nothing else."
|
|
412
352
|
)
|
|
413
353
|
|
|
414
|
-
# Call Cerebras inference - should always work
|
|
415
354
|
try:
|
|
416
355
|
raw_response = await asyncio.wait_for(
|
|
417
|
-
complete(user_prompt, system=system_prompt,
|
|
356
|
+
complete(user_prompt, system=system_prompt, temperature=0.0),
|
|
418
357
|
timeout=30.0
|
|
419
358
|
)
|
|
420
359
|
except asyncio.TimeoutError:
|
|
421
|
-
return f"error: Cerebras inference timed out for {file_path}
|
|
360
|
+
return f"error: Cerebras inference timed out for {file_path}"
|
|
422
361
|
except Exception as llm_exc:
|
|
423
|
-
return f"error: Cerebras inference failed for {file_path}: {str(llm_exc)}
|
|
362
|
+
return f"error: Cerebras inference failed for {file_path}: {str(llm_exc)}"
|
|
424
363
|
|
|
425
364
|
if not raw_response:
|
|
426
365
|
return f"error: Cerebras inference returned empty response for {file_path}"
|
|
427
366
|
|
|
428
|
-
|
|
367
|
+
# Strip reasoning tags from response (e.g., <think>, </think>, <think>, etc.)
|
|
368
|
+
cleaned_response = raw_response.strip()
|
|
369
|
+
# Remove XML-like reasoning tags
|
|
370
|
+
cleaned_response = re.sub(r'<[^>]*think[^>]*>', '', cleaned_response, flags=re.IGNORECASE)
|
|
371
|
+
cleaned_response = re.sub(r'<[^>]*reasoning[^>]*>', '', cleaned_response, flags=re.IGNORECASE)
|
|
372
|
+
cleaned_response = re.sub(r'<[^>]*redacted[^>]*>', '', cleaned_response, flags=re.IGNORECASE)
|
|
373
|
+
|
|
374
|
+
# Extract first non-empty line after cleaning
|
|
375
|
+
lines = [line.strip() for line in cleaned_response.splitlines() if line.strip()]
|
|
376
|
+
if not lines:
|
|
377
|
+
return f"error: No valid commit message found in response for {file_path} after cleaning reasoning tags"
|
|
378
|
+
|
|
379
|
+
commit_message = lines[0]
|
|
429
380
|
|
|
430
|
-
# Remove quotes if present
|
|
431
381
|
if commit_message.startswith('"') and commit_message.endswith('"'):
|
|
432
382
|
commit_message = commit_message[1:-1]
|
|
433
383
|
if commit_message.startswith("'") and commit_message.endswith("'"):
|
|
434
384
|
commit_message = commit_message[1:-1]
|
|
435
385
|
|
|
436
|
-
# Validate the message is not generic - fail if it is
|
|
437
386
|
if not commit_message or is_generic_message(commit_message):
|
|
438
387
|
return (
|
|
439
|
-
f"error: Cerebras inference generated generic message '{commit_message}' for {file_path}
|
|
440
|
-
f"Please improve the system prompt or check the inference output."
|
|
388
|
+
f"error: Cerebras inference generated generic message '{commit_message}' for {file_path}"
|
|
441
389
|
)
|
|
442
390
|
|
|
443
391
|
suggestions.append((file_path, commit_message))
|
|
@@ -445,7 +393,6 @@ Remember: Your output must be SPECIFIC and describe WHAT changed, not generic fi
|
|
|
445
393
|
if not suggestions:
|
|
446
394
|
return "no commit suggestions could be generated"
|
|
447
395
|
|
|
448
|
-
# 4) Commit each file separately with its suggested message
|
|
449
396
|
for file_path, message in suggestions:
|
|
450
397
|
try:
|
|
451
398
|
await run_subprocess(
|
|
@@ -465,21 +412,456 @@ Remember: Your output must be SPECIFIC and describe WHAT changed, not generic fi
|
|
|
465
412
|
"Ensure the file exists, is not conflicted, and git is functioning properly."
|
|
466
413
|
)
|
|
467
414
|
|
|
468
|
-
# 5) Return a compact report of what was committed
|
|
469
415
|
report = {"commits": [{"file": f, "message": m} for f, m in suggestions]}
|
|
470
416
|
return json.dumps(report, indent=2)
|
|
471
417
|
|
|
472
418
|
except Exception as e:
|
|
473
|
-
return (
|
|
474
|
-
|
|
475
|
-
|
|
476
|
-
|
|
419
|
+
return f"failed to split commit: {str(e)}"
|
|
420
|
+
|
|
421
|
+
|
|
422
|
+
_resolved_conflicts: Dict[str, Dict[str, str]] = {}
|
|
423
|
+
|
|
424
|
+
|
|
425
|
+
async def get_conflicted_files(workspace_root: str) -> List[str]:
|
|
426
|
+
"""
|
|
427
|
+
Find all files with merge conflicts in the git repository.
|
|
428
|
+
|
|
429
|
+
Checks both git's merge state and files with conflict markers directly.
|
|
430
|
+
|
|
431
|
+
Args:
|
|
432
|
+
workspace_root: Path to the git repository root
|
|
433
|
+
|
|
434
|
+
Returns:
|
|
435
|
+
List of file paths with merge conflicts
|
|
436
|
+
"""
|
|
437
|
+
conflicted_files = set()
|
|
438
|
+
|
|
439
|
+
# Method 1: Check git's merge state (for active merges)
|
|
440
|
+
try:
|
|
441
|
+
proc = await run_subprocess(
|
|
442
|
+
["git", "ls-files", "-u"],
|
|
443
|
+
capture_output=True,
|
|
444
|
+
text=True,
|
|
445
|
+
cwd=workspace_root
|
|
477
446
|
)
|
|
447
|
+
|
|
448
|
+
if proc.returncode == 0 and proc.stdout.strip():
|
|
449
|
+
# Extract unique file paths (git ls-files -u shows multiple entries per stage)
|
|
450
|
+
for line in proc.stdout.splitlines():
|
|
451
|
+
if line.strip():
|
|
452
|
+
# Format: stage_number mode hash filename
|
|
453
|
+
parts = line.split()
|
|
454
|
+
if len(parts) >= 4:
|
|
455
|
+
file_path = ' '.join(parts[3:]) # Handle filenames with spaces
|
|
456
|
+
conflicted_files.add(file_path)
|
|
457
|
+
except Exception:
|
|
458
|
+
pass
|
|
459
|
+
|
|
460
|
+
# Method 2: Scan files for conflict markers (works even if not in active merge)
|
|
461
|
+
try:
|
|
462
|
+
# Get all tracked files (or modified files)
|
|
463
|
+
proc = await run_subprocess(
|
|
464
|
+
["git", "ls-files"],
|
|
465
|
+
capture_output=True,
|
|
466
|
+
text=True,
|
|
467
|
+
cwd=workspace_root
|
|
468
|
+
)
|
|
469
|
+
|
|
470
|
+
if proc.returncode == 0:
|
|
471
|
+
all_files = [line.strip() for line in proc.stdout.splitlines() if line.strip()]
|
|
472
|
+
|
|
473
|
+
# Also check untracked/modified files
|
|
474
|
+
proc_modified = await run_subprocess(
|
|
475
|
+
["git", "ls-files", "--others", "--exclude-standard"],
|
|
476
|
+
capture_output=True,
|
|
477
|
+
text=True,
|
|
478
|
+
cwd=workspace_root
|
|
479
|
+
)
|
|
480
|
+
if proc_modified.returncode == 0:
|
|
481
|
+
all_files.extend([line.strip() for line in proc_modified.stdout.splitlines() if line.strip()])
|
|
482
|
+
|
|
483
|
+
# Pattern to match conflict markers
|
|
484
|
+
conflict_pattern = re.compile(r'^<<<<<<<', re.MULTILINE)
|
|
485
|
+
|
|
486
|
+
for file_path in all_files:
|
|
487
|
+
full_path = os.path.join(workspace_root, file_path) if not os.path.isabs(file_path) else file_path
|
|
488
|
+
|
|
489
|
+
# Skip if already found via git ls-files -u
|
|
490
|
+
if file_path in conflicted_files:
|
|
491
|
+
continue
|
|
492
|
+
|
|
493
|
+
try:
|
|
494
|
+
# Only check text files (skip binary files)
|
|
495
|
+
with open(full_path, "r", encoding="utf-8", errors="ignore") as f:
|
|
496
|
+
content = f.read()
|
|
497
|
+
# Check for conflict markers
|
|
498
|
+
if conflict_pattern.search(content):
|
|
499
|
+
conflicted_files.add(file_path)
|
|
500
|
+
except (FileNotFoundError, UnicodeDecodeError, PermissionError):
|
|
501
|
+
# Skip files we can't read
|
|
502
|
+
continue
|
|
503
|
+
except Exception:
|
|
504
|
+
pass
|
|
505
|
+
|
|
506
|
+
return sorted(list(conflicted_files))
|
|
507
|
+
|
|
508
|
+
|
|
509
|
+
def extract_conflict_content(file_path: str, workspace_root: str) -> Tuple[str, str]:
|
|
510
|
+
"""
|
|
511
|
+
Extract conflict content from a file.
|
|
512
|
+
|
|
513
|
+
Args:
|
|
514
|
+
file_path: Relative path to the conflicted file
|
|
515
|
+
workspace_root: Path to the git repository root
|
|
516
|
+
|
|
517
|
+
Returns:
|
|
518
|
+
Tuple of (original_file_content, conflict_text_with_markers)
|
|
519
|
+
"""
|
|
520
|
+
full_path = os.path.join(workspace_root, file_path) if not os.path.isabs(file_path) else file_path
|
|
521
|
+
|
|
522
|
+
try:
|
|
523
|
+
with open(full_path, "r", encoding="utf-8") as f:
|
|
524
|
+
original_content = f.read()
|
|
525
|
+
|
|
526
|
+
# Extract conflict sections (between <<<<<<< and >>>>>>> markers)
|
|
527
|
+
conflict_pattern = re.compile(
|
|
528
|
+
r'<<<<<<<[^\n]*\n(.*?)\n=======\n(.*?)\n>>>>>>>[^\n]*',
|
|
529
|
+
re.DOTALL
|
|
530
|
+
)
|
|
531
|
+
|
|
532
|
+
conflicts = conflict_pattern.findall(original_content)
|
|
533
|
+
if not conflicts:
|
|
534
|
+
# If no conflicts found with standard pattern, return the whole file
|
|
535
|
+
# as the conflict (might be a different conflict format)
|
|
536
|
+
return original_content, original_content
|
|
537
|
+
|
|
538
|
+
# Combine all conflict sections
|
|
539
|
+
conflict_texts = []
|
|
540
|
+
for match in conflict_pattern.finditer(original_content):
|
|
541
|
+
conflict_texts.append(match.group(0))
|
|
542
|
+
|
|
543
|
+
conflict_text = "\n\n".join(conflict_texts)
|
|
544
|
+
return original_content, conflict_text
|
|
545
|
+
except (FileNotFoundError, UnicodeDecodeError) as e:
|
|
546
|
+
raise RuntimeError(f"Failed to read file {file_path}: {str(e)}")
|
|
547
|
+
|
|
548
|
+
|
|
549
|
+
def format_resolution_as_edit_snippet(original_content: str, conflict_text: str, resolution: str) -> Tuple[str, str]:
|
|
550
|
+
"""
|
|
551
|
+
Format the resolution as an edit snippet for morphllm.
|
|
552
|
+
|
|
553
|
+
Args:
|
|
554
|
+
original_content: Original file content with conflicts
|
|
555
|
+
conflict_text: The conflict section with markers
|
|
556
|
+
resolution: The resolved content from breeze model
|
|
557
|
+
|
|
558
|
+
Returns:
|
|
559
|
+
Tuple of (instructions, edit_snippet)
|
|
560
|
+
"""
|
|
561
|
+
# Find the conflict section in the original content
|
|
562
|
+
conflict_start = original_content.find(conflict_text)
|
|
563
|
+
if conflict_start == -1:
|
|
564
|
+
# If exact match not found, try to find by markers
|
|
565
|
+
conflict_start = original_content.find("<<<<<<<")
|
|
566
|
+
|
|
567
|
+
if conflict_start == -1:
|
|
568
|
+
# Fallback: replace the entire conflict text
|
|
569
|
+
instructions = "Replace the merge conflict section with the resolved code."
|
|
570
|
+
edit_snippet = resolution
|
|
571
|
+
return instructions, edit_snippet
|
|
572
|
+
|
|
573
|
+
# Find lines before and after conflict
|
|
574
|
+
lines_before = original_content[:conflict_start].splitlines()
|
|
575
|
+
lines_after = original_content[conflict_start + len(conflict_text):].splitlines()
|
|
576
|
+
|
|
577
|
+
# Get context lines (last 3 lines before, first 3 lines after)
|
|
578
|
+
context_before = "\n".join(lines_before[-3:]) if lines_before else ""
|
|
579
|
+
context_after = "\n".join(lines_after[:3]) if lines_after else ""
|
|
580
|
+
|
|
581
|
+
# Determine comment style based on file extension (simple heuristic)
|
|
582
|
+
# Default to // for most languages
|
|
583
|
+
comment_style = "//"
|
|
584
|
+
|
|
585
|
+
# Build edit snippet
|
|
586
|
+
edit_lines = []
|
|
587
|
+
if context_before:
|
|
588
|
+
edit_lines.append(context_before)
|
|
589
|
+
edit_lines.append(f"{comment_style} ... existing code ...")
|
|
590
|
+
edit_lines.append(resolution)
|
|
591
|
+
edit_lines.append(f"{comment_style} ... existing code ...")
|
|
592
|
+
if context_after:
|
|
593
|
+
edit_lines.append(context_after)
|
|
594
|
+
|
|
595
|
+
edit_snippet = "\n".join(edit_lines)
|
|
596
|
+
instructions = "Replace the merge conflict markers and conflicting code sections with the resolved code."
|
|
597
|
+
|
|
598
|
+
return instructions, edit_snippet
|
|
478
599
|
|
|
479
600
|
|
|
480
|
-
@mcp.tool
|
|
481
|
-
|
|
482
|
-
|
|
601
|
+
@mcp.tool(
|
|
602
|
+
name="resolve_conflict",
|
|
603
|
+
description="Detects merge conflicts in the repository, resolves them using AI, and applies the changes with MorphLLM. Changes are written to files immediately. Use revert_conflict_resolution to undo changes if needed.",
|
|
604
|
+
)
|
|
605
|
+
async def resolve_conflict(workspace_root: Optional[str] = None):
|
|
606
|
+
"""
|
|
607
|
+
Detect and resolve merge conflicts using AI. Changes are applied immediately to files.
|
|
608
|
+
Review the previews and confirm or revert as needed.
|
|
609
|
+
|
|
610
|
+
Args:
|
|
611
|
+
workspace_root: Optional path to the workspace root directory.
|
|
612
|
+
If not provided, will attempt to detect from environment variables or current directory.
|
|
613
|
+
|
|
614
|
+
Returns:
|
|
615
|
+
JSON string with applied resolutions and previews for review
|
|
616
|
+
"""
|
|
617
|
+
global _resolved_conflicts
|
|
618
|
+
|
|
619
|
+
try:
|
|
620
|
+
if workspace_root:
|
|
621
|
+
detected_root = await find_git_root(workspace_root)
|
|
622
|
+
if detected_root:
|
|
623
|
+
workspace_root = detected_root
|
|
624
|
+
elif not os.path.isdir(workspace_root):
|
|
625
|
+
return json.dumps({"error": f"provided workspace_root '{workspace_root}' does not exist or is not a directory."})
|
|
626
|
+
else:
|
|
627
|
+
workspace_root = await find_git_root()
|
|
628
|
+
if not workspace_root:
|
|
629
|
+
cwd = os.getcwd()
|
|
630
|
+
return json.dumps({
|
|
631
|
+
"error": "could not detect git repository root.",
|
|
632
|
+
"current_directory": cwd,
|
|
633
|
+
"message": "Please either run this tool from within a git repository, or provide the workspace_root parameter."
|
|
634
|
+
})
|
|
635
|
+
|
|
636
|
+
# Find conflicted files
|
|
637
|
+
conflicted_files = await get_conflicted_files(workspace_root)
|
|
638
|
+
|
|
639
|
+
if not conflicted_files:
|
|
640
|
+
return json.dumps({
|
|
641
|
+
"message": "No merge conflicts detected.",
|
|
642
|
+
"resolved_files": []
|
|
643
|
+
})
|
|
644
|
+
|
|
645
|
+
resolved_files = []
|
|
646
|
+
_resolved_conflicts = {} # Store for potential revert
|
|
647
|
+
|
|
648
|
+
for file_path in conflicted_files:
|
|
649
|
+
try:
|
|
650
|
+
# Extract conflict content
|
|
651
|
+
original_content, conflict_text = extract_conflict_content(file_path, workspace_root)
|
|
652
|
+
|
|
653
|
+
# Get resolution from breeze model
|
|
654
|
+
try:
|
|
655
|
+
resolution = await asyncio.wait_for(
|
|
656
|
+
resolve_merge_conflict(conflict_text),
|
|
657
|
+
timeout=60.0
|
|
658
|
+
)
|
|
659
|
+
except asyncio.TimeoutError:
|
|
660
|
+
resolved_files.append({
|
|
661
|
+
"file": file_path,
|
|
662
|
+
"status": "error",
|
|
663
|
+
"error": "Breeze model timeout"
|
|
664
|
+
})
|
|
665
|
+
continue
|
|
666
|
+
except Exception as e:
|
|
667
|
+
resolved_files.append({
|
|
668
|
+
"file": file_path,
|
|
669
|
+
"status": "error",
|
|
670
|
+
"error": f"Breeze model failed: {str(e)}"
|
|
671
|
+
})
|
|
672
|
+
continue
|
|
673
|
+
|
|
674
|
+
if not resolution:
|
|
675
|
+
resolved_files.append({
|
|
676
|
+
"file": file_path,
|
|
677
|
+
"status": "error",
|
|
678
|
+
"error": "Breeze model returned empty resolution"
|
|
679
|
+
})
|
|
680
|
+
continue
|
|
681
|
+
|
|
682
|
+
# Format resolution as edit snippet
|
|
683
|
+
instructions, edit_snippet = format_resolution_as_edit_snippet(
|
|
684
|
+
original_content, conflict_text, resolution
|
|
685
|
+
)
|
|
686
|
+
|
|
687
|
+
# Apply via morphllm
|
|
688
|
+
try:
|
|
689
|
+
final_content = await asyncio.wait_for(
|
|
690
|
+
apply_code_edit(original_content, instructions, edit_snippet),
|
|
691
|
+
timeout=60.0
|
|
692
|
+
)
|
|
693
|
+
except asyncio.TimeoutError:
|
|
694
|
+
resolved_files.append({
|
|
695
|
+
"file": file_path,
|
|
696
|
+
"status": "error",
|
|
697
|
+
"error": "MorphLLM timeout"
|
|
698
|
+
})
|
|
699
|
+
continue
|
|
700
|
+
except Exception as e:
|
|
701
|
+
resolved_files.append({
|
|
702
|
+
"file": file_path,
|
|
703
|
+
"status": "error",
|
|
704
|
+
"error": f"MorphLLM failed: {str(e)}"
|
|
705
|
+
})
|
|
706
|
+
continue
|
|
707
|
+
|
|
708
|
+
# Apply the change immediately by writing to file
|
|
709
|
+
full_path = os.path.join(workspace_root, file_path) if not os.path.isabs(file_path) else file_path
|
|
710
|
+
try:
|
|
711
|
+
with open(full_path, "w", encoding="utf-8") as f:
|
|
712
|
+
f.write(final_content)
|
|
713
|
+
except Exception as e:
|
|
714
|
+
resolved_files.append({
|
|
715
|
+
"file": file_path,
|
|
716
|
+
"status": "error",
|
|
717
|
+
"error": f"Failed to write resolved content: {str(e)}"
|
|
718
|
+
})
|
|
719
|
+
continue
|
|
720
|
+
|
|
721
|
+
# Store original content for potential revert
|
|
722
|
+
_resolved_conflicts[file_path] = {
|
|
723
|
+
"workspace_root": workspace_root,
|
|
724
|
+
"original_content": original_content,
|
|
725
|
+
"resolved_content": final_content,
|
|
726
|
+
}
|
|
727
|
+
|
|
728
|
+
# Create a simple diff preview (show before/after around conflict)
|
|
729
|
+
conflict_lines = conflict_text.splitlines()
|
|
730
|
+
conflict_preview = "\n".join(conflict_lines[:10])
|
|
731
|
+
if len(conflict_lines) > 10:
|
|
732
|
+
conflict_preview += f"\n... ({len(conflict_lines) - 10} more lines) ..."
|
|
733
|
+
|
|
734
|
+
resolved_lines = final_content.splitlines()
|
|
735
|
+
resolved_preview = "\n".join(resolved_lines[:30])
|
|
736
|
+
if len(resolved_lines) > 30:
|
|
737
|
+
resolved_preview += f"\n... ({len(resolved_lines) - 30} more lines) ..."
|
|
738
|
+
|
|
739
|
+
resolved_files.append({
|
|
740
|
+
"file": file_path,
|
|
741
|
+
"status": "applied",
|
|
742
|
+
"conflict_preview": conflict_preview,
|
|
743
|
+
"resolved_preview": resolved_preview,
|
|
744
|
+
"message": "Changes have been applied to the file. Review and confirm to stage, or revert if needed."
|
|
745
|
+
})
|
|
746
|
+
|
|
747
|
+
except Exception as e:
|
|
748
|
+
resolved_files.append({
|
|
749
|
+
"file": file_path,
|
|
750
|
+
"status": "error",
|
|
751
|
+
"error": str(e)
|
|
752
|
+
})
|
|
753
|
+
|
|
754
|
+
successful_count = len([f for f in resolved_files if f.get('status') == 'applied'])
|
|
755
|
+
result = {
|
|
756
|
+
"message": f"Applied resolutions to {successful_count} file(s). Changes have been written to files.",
|
|
757
|
+
"instruction": "Review the resolved files above. You can stage them yourself or use the commit splitter. If you want to undo the changes, call revert_conflict_resolution.",
|
|
758
|
+
"resolved_files": resolved_files
|
|
759
|
+
}
|
|
760
|
+
|
|
761
|
+
return json.dumps(result, indent=2)
|
|
762
|
+
|
|
763
|
+
except Exception as e:
|
|
764
|
+
return json.dumps({"error": f"failed to resolve conflict: {str(e)}"})
|
|
765
|
+
|
|
766
|
+
|
|
767
|
+
@mcp.tool(
|
|
768
|
+
name="revert_conflict_resolution",
|
|
769
|
+
description="Reverts the resolved conflict files back to their original state with conflicts. Use if you want to undo the changes applied by resolve_conflict.",
|
|
770
|
+
)
|
|
771
|
+
async def revert_conflict_resolution(file_path: Optional[str] = None, workspace_root: Optional[str] = None, revert_all: bool = False):
|
|
772
|
+
"""
|
|
773
|
+
Revert resolved conflict files back to their original conflicted state.
|
|
774
|
+
|
|
775
|
+
Args:
|
|
776
|
+
file_path: Optional path to a specific file to revert (relative to workspace root).
|
|
777
|
+
If not provided and revert_all=False, reverts all pending resolutions.
|
|
778
|
+
workspace_root: Optional path to the workspace root directory.
|
|
779
|
+
If not provided, will attempt to detect from environment variables or current directory.
|
|
780
|
+
revert_all: If True, revert all pending resolutions at once
|
|
781
|
+
|
|
782
|
+
Returns:
|
|
783
|
+
JSON string with success/error status
|
|
784
|
+
"""
|
|
785
|
+
global _resolved_conflicts
|
|
786
|
+
|
|
787
|
+
try:
|
|
788
|
+
if workspace_root:
|
|
789
|
+
detected_root = await find_git_root(workspace_root)
|
|
790
|
+
if detected_root:
|
|
791
|
+
workspace_root = detected_root
|
|
792
|
+
elif not os.path.isdir(workspace_root):
|
|
793
|
+
return json.dumps({"error": f"provided workspace_root '{workspace_root}' does not exist or is not a directory."})
|
|
794
|
+
else:
|
|
795
|
+
workspace_root = await find_git_root()
|
|
796
|
+
if not workspace_root:
|
|
797
|
+
return json.dumps({"error": "could not detect git repository root."})
|
|
798
|
+
|
|
799
|
+
if revert_all or not file_path:
|
|
800
|
+
# Revert all pending resolutions
|
|
801
|
+
if not _resolved_conflicts:
|
|
802
|
+
return json.dumps({
|
|
803
|
+
"message": "No pending resolutions to revert."
|
|
804
|
+
})
|
|
805
|
+
|
|
806
|
+
reverted_files = []
|
|
807
|
+
errors = []
|
|
808
|
+
|
|
809
|
+
for path, resolution_data in list(_resolved_conflicts.items()):
|
|
810
|
+
try:
|
|
811
|
+
# Verify workspace root matches
|
|
812
|
+
if resolution_data["workspace_root"] != workspace_root:
|
|
813
|
+
errors.append({"file": path, "error": "Workspace root mismatch"})
|
|
814
|
+
continue
|
|
815
|
+
|
|
816
|
+
full_path = os.path.join(workspace_root, path) if not os.path.isabs(path) else path
|
|
817
|
+
|
|
818
|
+
# Write original content back to file
|
|
819
|
+
with open(full_path, "w", encoding="utf-8") as f:
|
|
820
|
+
f.write(resolution_data["original_content"])
|
|
821
|
+
|
|
822
|
+
reverted_files.append(path)
|
|
823
|
+
del _resolved_conflicts[path]
|
|
824
|
+
|
|
825
|
+
except Exception as e:
|
|
826
|
+
errors.append({"file": path, "error": str(e)})
|
|
827
|
+
|
|
828
|
+
result = {
|
|
829
|
+
"message": f"Reverted {len(reverted_files)} file(s) back to conflicted state.",
|
|
830
|
+
"reverted_files": reverted_files,
|
|
831
|
+
"errors": errors if errors else None
|
|
832
|
+
}
|
|
833
|
+
return json.dumps(result, indent=2)
|
|
834
|
+
else:
|
|
835
|
+
# Revert single file
|
|
836
|
+
if file_path not in _resolved_conflicts:
|
|
837
|
+
return json.dumps({
|
|
838
|
+
"error": f"No pending resolution found for '{file_path}'."
|
|
839
|
+
})
|
|
840
|
+
|
|
841
|
+
resolution_data = _resolved_conflicts[file_path]
|
|
842
|
+
|
|
843
|
+
# Verify workspace root matches
|
|
844
|
+
if resolution_data["workspace_root"] != workspace_root:
|
|
845
|
+
return json.dumps({
|
|
846
|
+
"error": f"Workspace root mismatch. Expected '{resolution_data['workspace_root']}', got '{workspace_root}'."
|
|
847
|
+
})
|
|
848
|
+
|
|
849
|
+
full_path = os.path.join(workspace_root, file_path) if not os.path.isabs(file_path) else file_path
|
|
850
|
+
|
|
851
|
+
# Write original content back to file
|
|
852
|
+
with open(full_path, "w", encoding="utf-8") as f:
|
|
853
|
+
f.write(resolution_data["original_content"])
|
|
854
|
+
|
|
855
|
+
# Remove from pending resolutions
|
|
856
|
+
del _resolved_conflicts[file_path]
|
|
857
|
+
|
|
858
|
+
return json.dumps({
|
|
859
|
+
"message": f"Reverted '{file_path}' back to conflicted state.",
|
|
860
|
+
"file": file_path
|
|
861
|
+
})
|
|
862
|
+
|
|
863
|
+
except Exception as e:
|
|
864
|
+
return json.dumps({"error": f"failed to revert conflict resolution: {str(e)}"})
|
|
483
865
|
|
|
484
866
|
|
|
485
867
|
def main():
|