glide-mcp 0.1.1__py3-none-any.whl → 0.1.12__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- glide_mcp-0.1.12.dist-info/METADATA +92 -0
- glide_mcp-0.1.12.dist-info/RECORD +12 -0
- glide_mcp-0.1.12.dist-info/entry_points.txt +2 -0
- src/conflicts/fibonnaci.py +18 -0
- src/core/LLM/cerebras_inference.py +1 -1
- src/kite_exclusive/commit_splitter/services/voyage_service.py +69 -49
- src/kite_exclusive/resolve_conflicts/breeze_inference.py +70 -0
- src/kite_exclusive/resolve_conflicts/morph_service.py +106 -0
- src/mcp/app.py +488 -106
- glide_mcp-0.1.1.dist-info/METADATA +0 -67
- glide_mcp-0.1.1.dist-info/RECORD +0 -9
- glide_mcp-0.1.1.dist-info/entry_points.txt +0 -2
- {glide_mcp-0.1.1.dist-info → glide_mcp-0.1.12.dist-info}/WHEEL +0 -0
- {glide_mcp-0.1.1.dist-info → glide_mcp-0.1.12.dist-info}/licenses/LICENSE +0 -0
|
@@ -0,0 +1,92 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: glide-mcp
|
|
3
|
+
Version: 0.1.12
|
|
4
|
+
Summary: mcp server that tries to save you from git troubles
|
|
5
|
+
License-File: LICENSE
|
|
6
|
+
Requires-Python: >=3.13
|
|
7
|
+
Requires-Dist: black>=25.9.0
|
|
8
|
+
Requires-Dist: cerebras-cloud-sdk>=1.56.1
|
|
9
|
+
Requires-Dist: fastmcp>=2.12.5
|
|
10
|
+
Requires-Dist: google-genai>=1.31.0
|
|
11
|
+
Requires-Dist: helix-py>=0.2.30
|
|
12
|
+
Requires-Dist: numpy>=2.3.4
|
|
13
|
+
Requires-Dist: ollama>=0.6.0
|
|
14
|
+
Requires-Dist: openai>=1.0.0
|
|
15
|
+
Requires-Dist: pytest-asyncio>=1.2.0
|
|
16
|
+
Requires-Dist: pytest>=8.4.2
|
|
17
|
+
Requires-Dist: python-dotenv>=1.1.1
|
|
18
|
+
Description-Content-Type: text/markdown
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
# Glide MCP
|
|
22
|
+
|
|
23
|
+
> Note: **we're currently recording a demo video and features for this mcp. Stay tuned and thank you for your patience :)**
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
### Configure MCP Client of your chosse to use Glide
|
|
28
|
+
|
|
29
|
+
1. **Add to Cursor (Preferred):**
|
|
30
|
+
|
|
31
|
+
[](https://cursor.com/en-US/install-mcp?name=glide-mcp&config=eyJlbnYiOnsiVk9ZQUdFQUlfQVBJX0tFWSI6InBhLXQ5MGptcVlmZ1pQYzBXMDBfMW1MMmEwZjBCbjRrOG9sX25kRVkydEl2OEMiLCJIRUxJWF9BUElfRU5EUE9JTlQiOiJodHRwczovL2hlbGl4LWdsaWRlLXByb2R1Y3Rpb24uZmx5LmRldiIsIkNFUkVCUkFTX0FQSV9LRVkiOiJjc2std3ZrM2p3bmo1NXgzdzhtZXByZXRya2Y4azMydHhucGQ4dnJueTRldG1rdGtkY2U4IiwiQ0VSRUJSQVNfTU9ERUxfSUQiOiJxd2VuLTMtMzJiIiwiTU9SUEhMTE1fQVBJX0tFWSI6InNrLU1TemIzYVlyWDBjb1F1WWFQT1haMmpJOFFzY3JRYnR0UVdvTUY2Y3MtdmJtSkdHSSIsIkhFTElYX0xPQ0FMIjoiRmFsc2UifSwiY29tbWFuZCI6InV2eCAtLWZyb20gZ2xpZGUtbWNwIGdsaWRlICJ9)
|
|
32
|
+
|
|
33
|
+
|
|
34
|
+
2. **Add to Claude Code:**
|
|
35
|
+
make sure to fill in the api keys correctly, no quotes needed
|
|
36
|
+
```zsh
|
|
37
|
+
claude mcp add --transport stdio glide-mcp --env VOYAGEAI_API_KEY= --env HELIX_API_ENDPOINT= --env CEREBRAS_API_KEY= --env CEREBRAS_MODEL_ID=qwen-3-32b --env HELIX_LOCAL= --env MORPHLLM_API_KEY= -- uvx --from glide-mcp glide
|
|
38
|
+
```
|
|
39
|
+
|
|
40
|
+
|
|
41
|
+
3. **Add to VSCode**:
|
|
42
|
+
|
|
43
|
+
[](vscode:mcp/install?{\"name\":\"glide-mcp\",\"command\":\"uvx\",\"args\":[\"--from\",\"glide-mcp\",\"glide\"],\"env\":{\"VOYAGEAI_API_KEY\":\"\",\"HELIX_API_ENDPOINT\":\"\",\"CEREBRAS_API_KEY\":\"\",\"CEREBRAS_MODEL_ID\":\"qwen-3-32b\",\"HELIX_LOCAL\":\"\"}})
|
|
44
|
+
|
|
45
|
+
|
|
46
|
+
You can add the API keys needed by opening the command palette (Cmd+Shift+P) and searching for `"MCP: List MCP Servers"`. Make sure to fill in the API keys correctly.
|
|
47
|
+
|
|
48
|
+
|
|
49
|
+
**Manual Installation:**
|
|
50
|
+
|
|
51
|
+
Add the following to your `mcp.json` configuration in your preferred editor / IDE:
|
|
52
|
+
|
|
53
|
+
```json
|
|
54
|
+
{
|
|
55
|
+
"mcpServers": {
|
|
56
|
+
"glide-mcp": {
|
|
57
|
+
"command": "uvx",
|
|
58
|
+
"args": ["--from", "glide-mcp", "glide"],
|
|
59
|
+
"env": {
|
|
60
|
+
"VOYAGEAI_API_KEY": "",
|
|
61
|
+
"HELIX_API_ENDPOINT": "",
|
|
62
|
+
"CEREBRAS_API_KEY": "",
|
|
63
|
+
"CEREBRAS_MODEL_ID": "qwen-3-32b",
|
|
64
|
+
"HELIX_LOCAL": "",
|
|
65
|
+
"MORPHLLM_API_KEY": ""
|
|
66
|
+
}
|
|
67
|
+
}
|
|
68
|
+
}
|
|
69
|
+
}
|
|
70
|
+
```
|
|
71
|
+
|
|
72
|
+
|
|
73
|
+
|
|
74
|
+
## Working with the source:
|
|
75
|
+
|
|
76
|
+
### 1. You can also clone the source
|
|
77
|
+
```bash
|
|
78
|
+
git clone https://github.com/SoarAILabs/glide.git
|
|
79
|
+
```
|
|
80
|
+
|
|
81
|
+
### 2. Navigate to the project directory
|
|
82
|
+
|
|
83
|
+
```bash
|
|
84
|
+
cd glide
|
|
85
|
+
```
|
|
86
|
+
|
|
87
|
+
### 3. Start the server
|
|
88
|
+
|
|
89
|
+
```bash
|
|
90
|
+
uv run python -m src.mcp.app
|
|
91
|
+
```
|
|
92
|
+
|
|
@@ -0,0 +1,12 @@
|
|
|
1
|
+
src/conflicts/fibonnaci.py,sha256=A_BCc9JDzR4wBuTLIDa3a5Y2RKLQhfTdkCMUNGUmJ2Q,481
|
|
2
|
+
src/core/LLM/cerebras_inference.py,sha256=aAQvsXnaENdjnt02H1Swt1U3RV922J619owm1z6pjT8,3488
|
|
3
|
+
src/kite_exclusive/commit_splitter/prompts/prompt1.md,sha256=eOTX0H_n0vOuyfanEvbC8MAj4pF7W7FMiCdJw3zSL5g,1255
|
|
4
|
+
src/kite_exclusive/commit_splitter/services/voyage_service.py,sha256=1vZ-1lfqnQqySh2xPqDmbvXexhagOL08ZsT3hJJYnEo,2170
|
|
5
|
+
src/kite_exclusive/resolve_conflicts/breeze_inference.py,sha256=5ZJ8nosGUfLyKykayjHlbH6Y91eQix2XaGCvUQwrjDw,1821
|
|
6
|
+
src/kite_exclusive/resolve_conflicts/morph_service.py,sha256=ip80YpVp2ugz8aPbNVGQRC_ahSQtX7GadtXlMeW7Q1E,2893
|
|
7
|
+
src/mcp/app.py,sha256=fUontmZBBWTvleShGQoc5OkU3RKmnVRthcP2pmxrq-c,35816
|
|
8
|
+
glide_mcp-0.1.12.dist-info/METADATA,sha256=uTQkLmA9_AoIpJAlOvumyRmfLFUUFXPYjWK50jDPCHc,3021
|
|
9
|
+
glide_mcp-0.1.12.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
|
|
10
|
+
glide_mcp-0.1.12.dist-info/entry_points.txt,sha256=Yuaw0eGqqTh9ZEUcL8B9Qvdzkc03kavXSEd5jzx9jck,43
|
|
11
|
+
glide_mcp-0.1.12.dist-info/licenses/LICENSE,sha256=bqVuW787bFw2uBL31Xlee7ydibnr_8TkCWlHdi7LXEM,1067
|
|
12
|
+
glide_mcp-0.1.12.dist-info/RECORD,,
|
|
@@ -0,0 +1,18 @@
|
|
|
1
|
+
def fibonacci(n):
|
|
2
|
+
"""Calculate the nth Fibonacci number using memoization"""
|
|
3
|
+
memo = {}
|
|
4
|
+
def fib_helper(k):
|
|
5
|
+
if k in memo:
|
|
6
|
+
return memo[k]
|
|
7
|
+
if k <= 1:
|
|
8
|
+
return k
|
|
9
|
+
memo[k] = fib_helper(k - 1) + fib_helper(k - 2)
|
|
10
|
+
return memo[k]
|
|
11
|
+
return fib_helper(n)
|
|
12
|
+
|
|
13
|
+
def factorial(n):
|
|
14
|
+
"""Calculate factorial iteratively"""
|
|
15
|
+
result = 1
|
|
16
|
+
for i in range(1, n + 1):
|
|
17
|
+
result = result * i
|
|
18
|
+
return result
|
|
@@ -7,7 +7,7 @@ from cerebras.cloud.sdk import AsyncCerebras
|
|
|
7
7
|
load_dotenv()
|
|
8
8
|
|
|
9
9
|
# Default model; override per-call via the `model` argument
|
|
10
|
-
DEFAULT_MODEL_ID: str = os.getenv("CEREBRAS_MODEL_ID", "qwen-3-
|
|
10
|
+
DEFAULT_MODEL_ID: str = os.getenv("CEREBRAS_MODEL_ID", "qwen-3-coder-480b")
|
|
11
11
|
|
|
12
12
|
_async_client: Optional[AsyncCerebras] = None
|
|
13
13
|
|
|
@@ -1,49 +1,69 @@
|
|
|
1
|
-
from helix.embedding.voyageai_client import VoyageAIEmbedder
|
|
2
|
-
from helix import Chunk
|
|
3
|
-
import os
|
|
4
|
-
|
|
5
|
-
|
|
6
|
-
|
|
7
|
-
|
|
8
|
-
|
|
9
|
-
|
|
10
|
-
|
|
11
|
-
|
|
12
|
-
|
|
13
|
-
|
|
14
|
-
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
|
|
1
|
+
from helix.embedding.voyageai_client import VoyageAIEmbedder
|
|
2
|
+
from helix import Chunk
|
|
3
|
+
import os
|
|
4
|
+
|
|
5
|
+
# Lazy-loaded embedder - only created when needed
|
|
6
|
+
_voyage_embedder = None
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
def _get_embedder():
|
|
10
|
+
"""Get or create the voyage embedder instance (lazy initialization)."""
|
|
11
|
+
global _voyage_embedder
|
|
12
|
+
if _voyage_embedder is None:
|
|
13
|
+
_voyage_embedder = VoyageAIEmbedder()
|
|
14
|
+
return _voyage_embedder
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
def embed_code(code: str, file_path: str = None):
|
|
18
|
+
"""
|
|
19
|
+
Embed code or diff text using VoyageAI embeddings.
|
|
20
|
+
|
|
21
|
+
For diffs, uses token_chunk instead of code_chunk since diffs are text format
|
|
22
|
+
and code_chunk has API compatibility issues.
|
|
23
|
+
|
|
24
|
+
Args:
|
|
25
|
+
code: The code or diff text to embed
|
|
26
|
+
file_path: Optional file path to determine language for code_chunk
|
|
27
|
+
|
|
28
|
+
Returns:
|
|
29
|
+
List of embeddings from VoyageAI
|
|
30
|
+
"""
|
|
31
|
+
try:
|
|
32
|
+
# Try code_chunk first if we have a valid language
|
|
33
|
+
if file_path:
|
|
34
|
+
ext = os.path.splitext(file_path)[1].lstrip(".")
|
|
35
|
+
lang_map = {
|
|
36
|
+
"py": "python",
|
|
37
|
+
"js": "javascript",
|
|
38
|
+
"ts": "typescript",
|
|
39
|
+
"jsx": "javascript",
|
|
40
|
+
"tsx": "typescript",
|
|
41
|
+
"java": "java",
|
|
42
|
+
"cpp": "cpp",
|
|
43
|
+
"c": "c",
|
|
44
|
+
"cs": "csharp",
|
|
45
|
+
"go": "go",
|
|
46
|
+
"rs": "rust",
|
|
47
|
+
"rb": "ruby",
|
|
48
|
+
"php": "php",
|
|
49
|
+
"swift": "swift",
|
|
50
|
+
"kt": "kotlin",
|
|
51
|
+
"scala": "scala",
|
|
52
|
+
"sh": "bash",
|
|
53
|
+
"hx": "python",
|
|
54
|
+
}
|
|
55
|
+
language = lang_map.get(ext.lower())
|
|
56
|
+
if language:
|
|
57
|
+
code_chunks = Chunk.code_chunk(code, language=language)
|
|
58
|
+
else:
|
|
59
|
+
code_chunks = Chunk.token_chunk(code)
|
|
60
|
+
else:
|
|
61
|
+
code_chunks = Chunk.token_chunk(code)
|
|
62
|
+
except Exception:
|
|
63
|
+
# Fallback to token_chunk if code_chunk fails
|
|
64
|
+
code_chunks = Chunk.token_chunk(code)
|
|
65
|
+
|
|
66
|
+
voyage_embedder = _get_embedder()
|
|
67
|
+
code_embeddings = voyage_embedder.embed_batch([f"{code_chunks}"])
|
|
68
|
+
|
|
69
|
+
return code_embeddings
|
|
@@ -0,0 +1,70 @@
|
|
|
1
|
+
import os
|
|
2
|
+
import asyncio
|
|
3
|
+
from typing import Optional
|
|
4
|
+
from dotenv import load_dotenv
|
|
5
|
+
import ollama
|
|
6
|
+
|
|
7
|
+
load_dotenv()
|
|
8
|
+
|
|
9
|
+
# Default model; override per-call via the `model` argument
|
|
10
|
+
DEFAULT_MODEL_ID: str = os.getenv("BREEZE_MODEL_ID", "hf.co/SoarAILabs/breeze-3b:Q4_K_M")
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
def _resolve_merge_conflict_sync(
|
|
14
|
+
conflict_text: str,
|
|
15
|
+
*,
|
|
16
|
+
model: Optional[str] = None,
|
|
17
|
+
) -> str:
|
|
18
|
+
"""
|
|
19
|
+
Resolve a merge conflict using the breeze model.
|
|
20
|
+
|
|
21
|
+
Args:
|
|
22
|
+
conflict_text: Merge conflict text with markers (<<<<<<<, =======, >>>>>>>)
|
|
23
|
+
model: Model name; defaults to DEFAULT_MODEL_ID
|
|
24
|
+
|
|
25
|
+
Returns:
|
|
26
|
+
Resolved content without conflict markers
|
|
27
|
+
"""
|
|
28
|
+
model_id = model or DEFAULT_MODEL_ID
|
|
29
|
+
|
|
30
|
+
try:
|
|
31
|
+
response = ollama.generate(
|
|
32
|
+
model=model_id,
|
|
33
|
+
prompt=conflict_text,
|
|
34
|
+
)
|
|
35
|
+
|
|
36
|
+
if not response or 'response' not in response:
|
|
37
|
+
raise RuntimeError("Ollama returned empty or invalid response")
|
|
38
|
+
|
|
39
|
+
resolved_content = response['response']
|
|
40
|
+
return resolved_content.strip()
|
|
41
|
+
except Exception as e:
|
|
42
|
+
raise RuntimeError(f"Ollama error: {str(e)}")
|
|
43
|
+
|
|
44
|
+
|
|
45
|
+
async def resolve_merge_conflict(
|
|
46
|
+
conflict_text: str,
|
|
47
|
+
*,
|
|
48
|
+
model: Optional[str] = None,
|
|
49
|
+
) -> str:
|
|
50
|
+
"""
|
|
51
|
+
Resolve a merge conflict using the breeze model.
|
|
52
|
+
|
|
53
|
+
Args:
|
|
54
|
+
conflict_text: Merge conflict text with markers (<<<<<<<, =======, >>>>>>>)
|
|
55
|
+
model: Model name; defaults to DEFAULT_MODEL_ID
|
|
56
|
+
|
|
57
|
+
Returns:
|
|
58
|
+
Resolved content without conflict markers
|
|
59
|
+
"""
|
|
60
|
+
return await asyncio.to_thread(
|
|
61
|
+
_resolve_merge_conflict_sync,
|
|
62
|
+
conflict_text,
|
|
63
|
+
model=model,
|
|
64
|
+
)
|
|
65
|
+
|
|
66
|
+
|
|
67
|
+
__all__ = [
|
|
68
|
+
"resolve_merge_conflict",
|
|
69
|
+
"DEFAULT_MODEL_ID",
|
|
70
|
+
]
|
|
@@ -0,0 +1,106 @@
|
|
|
1
|
+
import os
|
|
2
|
+
import asyncio
|
|
3
|
+
from typing import Optional
|
|
4
|
+
from dotenv import load_dotenv
|
|
5
|
+
from openai import OpenAI
|
|
6
|
+
|
|
7
|
+
load_dotenv()
|
|
8
|
+
|
|
9
|
+
# MorphLLM API configuration
|
|
10
|
+
MORPH_API_BASE = "https://api.morphllm.com/v1"
|
|
11
|
+
MORPH_MODEL = "morph-v3-fast"
|
|
12
|
+
|
|
13
|
+
_openai_client: Optional[OpenAI] = None
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
def _get_api_key() -> str:
|
|
17
|
+
"""Get MorphLLM API key from environment."""
|
|
18
|
+
api_key = os.getenv("MORPHLLM_API_KEY")
|
|
19
|
+
if not api_key:
|
|
20
|
+
raise RuntimeError(
|
|
21
|
+
"MORPHLLM_API_KEY or MORPH_API_KEY is not set. "
|
|
22
|
+
"Set it in the environment or .env file."
|
|
23
|
+
)
|
|
24
|
+
return api_key
|
|
25
|
+
|
|
26
|
+
|
|
27
|
+
def _get_openai_client() -> OpenAI:
|
|
28
|
+
"""Get or create the OpenAI client instance for MorphLLM (lazy initialization)."""
|
|
29
|
+
global _openai_client
|
|
30
|
+
if _openai_client is None:
|
|
31
|
+
_openai_client = OpenAI(
|
|
32
|
+
api_key=_get_api_key(),
|
|
33
|
+
base_url=MORPH_API_BASE,
|
|
34
|
+
)
|
|
35
|
+
return _openai_client
|
|
36
|
+
|
|
37
|
+
|
|
38
|
+
def _apply_code_edit_sync(
|
|
39
|
+
original_code: str,
|
|
40
|
+
instructions: str,
|
|
41
|
+
edit_snippet: str,
|
|
42
|
+
) -> str:
|
|
43
|
+
"""
|
|
44
|
+
Apply a code edit using MorphLLM.
|
|
45
|
+
|
|
46
|
+
Args:
|
|
47
|
+
original_code: The original file content
|
|
48
|
+
instructions: Single sentence instruction describing what the edit does
|
|
49
|
+
edit_snippet: The edit snippet with // ... existing code ... markers
|
|
50
|
+
|
|
51
|
+
Returns:
|
|
52
|
+
Final code with edit applied
|
|
53
|
+
"""
|
|
54
|
+
client = _get_openai_client()
|
|
55
|
+
|
|
56
|
+
# Format the prompt as MorphLLM expects:
|
|
57
|
+
# instructions + original_code + edit_snippet
|
|
58
|
+
prompt = f"{instructions}\n\n{original_code}\n\n{edit_snippet}"
|
|
59
|
+
|
|
60
|
+
try:
|
|
61
|
+
response = client.chat.completions.create(
|
|
62
|
+
model=MORPH_MODEL,
|
|
63
|
+
messages=[
|
|
64
|
+
{
|
|
65
|
+
"role": "user",
|
|
66
|
+
"content": prompt,
|
|
67
|
+
}
|
|
68
|
+
],
|
|
69
|
+
)
|
|
70
|
+
|
|
71
|
+
if not response or not response.choices or not response.choices[0].message.content:
|
|
72
|
+
raise RuntimeError("MorphLLM returned empty or invalid response")
|
|
73
|
+
|
|
74
|
+
final_code = response.choices[0].message.content
|
|
75
|
+
return final_code
|
|
76
|
+
except Exception as e:
|
|
77
|
+
raise RuntimeError(f"MorphLLM error: {str(e)}")
|
|
78
|
+
|
|
79
|
+
|
|
80
|
+
async def apply_code_edit(
|
|
81
|
+
original_code: str,
|
|
82
|
+
instructions: str,
|
|
83
|
+
edit_snippet: str,
|
|
84
|
+
) -> str:
|
|
85
|
+
"""
|
|
86
|
+
Apply a code edit using MorphLLM.
|
|
87
|
+
|
|
88
|
+
Args:
|
|
89
|
+
original_code: The original file content
|
|
90
|
+
instructions: Single sentence instruction describing what the edit does
|
|
91
|
+
edit_snippet: The edit snippet with // ... existing code ... markers
|
|
92
|
+
|
|
93
|
+
Returns:
|
|
94
|
+
Final code with edit applied
|
|
95
|
+
"""
|
|
96
|
+
return await asyncio.to_thread(
|
|
97
|
+
_apply_code_edit_sync,
|
|
98
|
+
original_code,
|
|
99
|
+
instructions,
|
|
100
|
+
edit_snippet,
|
|
101
|
+
)
|
|
102
|
+
|
|
103
|
+
|
|
104
|
+
__all__ = [
|
|
105
|
+
"apply_code_edit",
|
|
106
|
+
]
|