glide-mcp 0.1.1__py3-none-any.whl → 0.1.8__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,91 @@
1
+ Metadata-Version: 2.4
2
+ Name: glide-mcp
3
+ Version: 0.1.8
4
+ Summary: mcp server that tries to save you from git troubles
5
+ License-File: LICENSE
6
+ Requires-Python: >=3.13
7
+ Requires-Dist: black>=25.9.0
8
+ Requires-Dist: cerebras-cloud-sdk>=1.56.1
9
+ Requires-Dist: fastmcp>=2.12.5
10
+ Requires-Dist: helix-py>=0.2.30
11
+ Requires-Dist: numpy>=2.3.4
12
+ Requires-Dist: ollama>=0.6.0
13
+ Requires-Dist: openai>=1.0.0
14
+ Requires-Dist: pytest-asyncio>=1.2.0
15
+ Requires-Dist: pytest>=8.4.2
16
+ Requires-Dist: python-dotenv>=1.1.1
17
+ Description-Content-Type: text/markdown
18
+
19
+
20
+ # Glide MCP
21
+
22
+ > Note: **we're currently recording a demo video and features for this mcp. Stay tuned and thank you for your patience :)**
23
+
24
+
25
+
26
+ ### Configure MCP Client of your chosse to use Glide
27
+
28
+ 1. **Add to Cursor (Preferred):**
29
+
30
+ [![Install MCP Server](https://cursor.com/deeplink/mcp-install-dark.svg)](https://cursor.com/en-US/install-mcp?name=glide-mcp&config=eyJlbnYiOnsiVk9ZQUdFQUlfQVBJX0tFWSI6IiIsIkhFTElYX0FQSV9FTkRQT0lOVCI6IiIsIkNFUkVCUkFTX0FQSV9LRVkiOiIiLCJDRVJFQlJBU19NT0RFTF9JRCI6InF3ZW4tMy0zMmIiLCJIRUxJWF9MT0NBTCI6IiJ9LCJjb21tYW5kIjoidXZ4IC0tZnJvbSBnbGlkZS1tY3AgZ2xpZGUifQ%3D%3D)
31
+
32
+
33
+
34
+ 2. **Add to Claude Code:**
35
+ make sure to fill in the api keys correctly, no quotes needed
36
+ ```zsh
37
+ claude mcp add --transport stdio glide-mcp --env VOYAGEAI_API_KEY= --env HELIX_API_ENDPOINT= --env CEREBRAS_API_KEY= --env CEREBRAS_MODEL_ID=qwen-3-32b --env HELIX_LOCAL= -- uvx --from glide-mcp glide
38
+ ```
39
+
40
+
41
+ 3. **Add to VSCode**:
42
+
43
+ [![Install MCP Server](https://img.shields.io/badge/add_to_VSCode-blue)](vscode:mcp/install?{\"name\":\"glide-mcp\",\"command\":\"uvx\",\"args\":[\"--from\",\"glide-mcp\",\"glide\"],\"env\":{\"VOYAGEAI_API_KEY\":\"\",\"HELIX_API_ENDPOINT\":\"\",\"CEREBRAS_API_KEY\":\"\",\"CEREBRAS_MODEL_ID\":\"qwen-3-32b\",\"HELIX_LOCAL\":\"\"}})
44
+
45
+
46
+ You can add the API keys needed by opening the command palette (Cmd+Shift+P) and searching for `"MCP: List MCP Servers"`. Make sure to fill in the API keys correctly.
47
+
48
+
49
+ **Manual Installation:**
50
+
51
+ Add the following to your `mcp.json` configuration in your preferred editor / IDE:
52
+
53
+ ```json
54
+ {
55
+ "mcpServers": {
56
+ "glide-mcp": {
57
+ "command": "uvx",
58
+ "args": ["--from", "glide-mcp", "glide"],
59
+ "env": {
60
+ "VOYAGEAI_API_KEY": "",
61
+ "HELIX_API_ENDPOINT": "",
62
+ "CEREBRAS_API_KEY": "",
63
+ "CEREBRAS_MODEL_ID": "qwen-3-32b",
64
+ "HELIX_LOCAL": ""
65
+ }
66
+ }
67
+ }
68
+ }
69
+ ```
70
+
71
+
72
+
73
+ ## Working with the source:
74
+
75
+ ### 1. You can also clone the source
76
+ ```bash
77
+ git clone https://github.com/SoarAILabs/glide.git
78
+ ```
79
+
80
+ ### 2. Navigate to the project directory
81
+
82
+ ```bash
83
+ cd glide
84
+ ```
85
+
86
+ ### 3. Start the server
87
+
88
+ ```bash
89
+ uv run python -m src.mcp.app
90
+ ```
91
+
@@ -0,0 +1,12 @@
1
+ src/conflicts/fibonnaci.py,sha256=lu-mz2sFXqdrwmvhRK9jJyMUAah4qN6qzvpFrapKM3c,1744
2
+ src/core/LLM/cerebras_inference.py,sha256=aAQvsXnaENdjnt02H1Swt1U3RV922J619owm1z6pjT8,3488
3
+ src/kite_exclusive/commit_splitter/prompts/prompt1.md,sha256=eOTX0H_n0vOuyfanEvbC8MAj4pF7W7FMiCdJw3zSL5g,1255
4
+ src/kite_exclusive/commit_splitter/services/voyage_service.py,sha256=1vZ-1lfqnQqySh2xPqDmbvXexhagOL08ZsT3hJJYnEo,2170
5
+ src/kite_exclusive/resolve_conflicts/breeze_inference.py,sha256=5ZJ8nosGUfLyKykayjHlbH6Y91eQix2XaGCvUQwrjDw,1821
6
+ src/kite_exclusive/resolve_conflicts/morph_service.py,sha256=ip80YpVp2ugz8aPbNVGQRC_ahSQtX7GadtXlMeW7Q1E,2893
7
+ src/mcp/app.py,sha256=fUontmZBBWTvleShGQoc5OkU3RKmnVRthcP2pmxrq-c,35816
8
+ glide_mcp-0.1.8.dist-info/METADATA,sha256=UVX2D_4EXuYEgRGn3Rhrwzg-uru0E7tCBoena2xvMF8,2643
9
+ glide_mcp-0.1.8.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87
10
+ glide_mcp-0.1.8.dist-info/entry_points.txt,sha256=Yuaw0eGqqTh9ZEUcL8B9Qvdzkc03kavXSEd5jzx9jck,43
11
+ glide_mcp-0.1.8.dist-info/licenses/LICENSE,sha256=bqVuW787bFw2uBL31Xlee7ydibnr_8TkCWlHdi7LXEM,1067
12
+ glide_mcp-0.1.8.dist-info/RECORD,,
@@ -0,0 +1,2 @@
1
+ [console_scripts]
2
+ glide = src.mcp.app:main
@@ -0,0 +1,64 @@
1
+ <<<<<<< HEAD
2
+ def fibonacci(n):
3
+ """Calculate the nth Fibonacci number using iteration"""
4
+ if n <= 1:
5
+ return n
6
+ a, b = 0, 1
7
+ for _ in range(2, n + 1):
8
+ a, b = b, a + b
9
+ return b
10
+
11
+ def factorial(n):
12
+ """Calculate factorial recursively"""
13
+ if n == 0 or n == 1:
14
+ return 1
15
+ return n * factorial(n - 1)
16
+ ||| base
17
+ def fibonacci(n):
18
+ """Calculate the nth Fibonacci number using memoization"""
19
+ memo = {}
20
+ def fib_helper(k):
21
+ if k in memo:
22
+ return memo[k]
23
+ if k <= 1:
24
+ return k
25
+ memo[k] = fib_helper(k - 1) + fib_helper(k - 2)
26
+ return memo[k]
27
+ return fib_helper(n)
28
+
29
+ def factorial(n):
30
+ """Calculate factorial iteratively"""
31
+ result = 1
32
+ for i in range(1, n + 1):
33
+ result = i
34
+ return result
35
+ =======
36
+ def fibonacci(n):
37
+ """Calculate the nth Fibonacci number using matrix exponentiation"""
38
+ import numpy as np
39
+
40
+ def matrix_mult(A, B):
41
+ return [[A[0][0]B[0][0] + A[0][1]B[1][0], A[0][0]B[0][1] + A[0][1]B[1][1]],
42
+ [A[1][0]B[0][0] + A[1][1]B[1][0], A[1][0]B[0][1] + A[1][1]B[1][1]]]
43
+
44
+ def matrix_pow(M, exp):
45
+ result = [[1, 0], [0, 1]] # Identity matrix
46
+ while exp > 0:
47
+ if exp % 2 == 1:
48
+ result = matrix_mult(result, M)
49
+ M = matrix_mult(M, M)
50
+ exp //= 2
51
+ return result
52
+
53
+ if n <= 1:
54
+ return n
55
+
56
+ F = [[1, 1], [1, 0]]
57
+ result = matrix_pow(F, n - 1)
58
+ return result[0][0]
59
+
60
+ def factorial(n):
61
+ """Calculate factorial using reduce"""
62
+ from functools import reduce
63
+ return reduce(lambda x, y: x y, range(1, n + 1), 1) if n > 0 else 1
64
+ >>>>>>> branch
@@ -7,7 +7,7 @@ from cerebras.cloud.sdk import AsyncCerebras
7
7
  load_dotenv()
8
8
 
9
9
  # Default model; override per-call via the `model` argument
10
- DEFAULT_MODEL_ID: str = os.getenv("CEREBRAS_MODEL_ID", "qwen-3-32b")
10
+ DEFAULT_MODEL_ID: str = os.getenv("CEREBRAS_MODEL_ID", "qwen-3-coder-480b")
11
11
 
12
12
  _async_client: Optional[AsyncCerebras] = None
13
13
 
@@ -1,49 +1,69 @@
1
- from helix.embedding.voyageai_client import VoyageAIEmbedder
2
- from helix import Chunk
3
- import os
4
-
5
- voyage_embedder = VoyageAIEmbedder()
6
-
7
-
8
- def embed_code(code: str, file_path: str = None):
9
-
10
- # For diffs, use token_chunk instead of code_chunk since diffs are text format
11
- # and code_chunk has API compatibility issues
12
- try:
13
- # Try code_chunk first if we have a valid language
14
- if file_path:
15
- ext = os.path.splitext(file_path)[1].lstrip(".")
16
- lang_map = {
17
- "py": "python",
18
- "js": "javascript",
19
- "ts": "typescript",
20
- "jsx": "javascript",
21
- "tsx": "typescript",
22
- "java": "java",
23
- "cpp": "cpp",
24
- "c": "c",
25
- "cs": "csharp",
26
- "go": "go",
27
- "rs": "rust",
28
- "rb": "ruby",
29
- "php": "php",
30
- "swift": "swift",
31
- "kt": "kotlin",
32
- "scala": "scala",
33
- "sh": "bash",
34
- "hx": "python",
35
- }
36
- language = lang_map.get(ext.lower())
37
- if language:
38
- code_chunks = Chunk.code_chunk(code, language=language)
39
- else:
40
- code_chunks = Chunk.token_chunk(code)
41
- else:
42
- code_chunks = Chunk.token_chunk(code)
43
- except Exception:
44
- # Fallback to token_chunk if code_chunk fails
45
- code_chunks = Chunk.token_chunk(code)
46
-
47
- code_embeddings = voyage_embedder.embed_batch([f"{code_chunks}"])
48
-
49
- return code_embeddings
1
+ from helix.embedding.voyageai_client import VoyageAIEmbedder
2
+ from helix import Chunk
3
+ import os
4
+
5
+ # Lazy-loaded embedder - only created when needed
6
+ _voyage_embedder = None
7
+
8
+
9
+ def _get_embedder():
10
+ """Get or create the voyage embedder instance (lazy initialization)."""
11
+ global _voyage_embedder
12
+ if _voyage_embedder is None:
13
+ _voyage_embedder = VoyageAIEmbedder()
14
+ return _voyage_embedder
15
+
16
+
17
+ def embed_code(code: str, file_path: str = None):
18
+ """
19
+ Embed code or diff text using VoyageAI embeddings.
20
+
21
+ For diffs, uses token_chunk instead of code_chunk since diffs are text format
22
+ and code_chunk has API compatibility issues.
23
+
24
+ Args:
25
+ code: The code or diff text to embed
26
+ file_path: Optional file path to determine language for code_chunk
27
+
28
+ Returns:
29
+ List of embeddings from VoyageAI
30
+ """
31
+ try:
32
+ # Try code_chunk first if we have a valid language
33
+ if file_path:
34
+ ext = os.path.splitext(file_path)[1].lstrip(".")
35
+ lang_map = {
36
+ "py": "python",
37
+ "js": "javascript",
38
+ "ts": "typescript",
39
+ "jsx": "javascript",
40
+ "tsx": "typescript",
41
+ "java": "java",
42
+ "cpp": "cpp",
43
+ "c": "c",
44
+ "cs": "csharp",
45
+ "go": "go",
46
+ "rs": "rust",
47
+ "rb": "ruby",
48
+ "php": "php",
49
+ "swift": "swift",
50
+ "kt": "kotlin",
51
+ "scala": "scala",
52
+ "sh": "bash",
53
+ "hx": "python",
54
+ }
55
+ language = lang_map.get(ext.lower())
56
+ if language:
57
+ code_chunks = Chunk.code_chunk(code, language=language)
58
+ else:
59
+ code_chunks = Chunk.token_chunk(code)
60
+ else:
61
+ code_chunks = Chunk.token_chunk(code)
62
+ except Exception:
63
+ # Fallback to token_chunk if code_chunk fails
64
+ code_chunks = Chunk.token_chunk(code)
65
+
66
+ voyage_embedder = _get_embedder()
67
+ code_embeddings = voyage_embedder.embed_batch([f"{code_chunks}"])
68
+
69
+ return code_embeddings
@@ -0,0 +1,70 @@
1
+ import os
2
+ import asyncio
3
+ from typing import Optional
4
+ from dotenv import load_dotenv
5
+ import ollama
6
+
7
+ load_dotenv()
8
+
9
+ # Default model; override per-call via the `model` argument
10
+ DEFAULT_MODEL_ID: str = os.getenv("BREEZE_MODEL_ID", "hf.co/SoarAILabs/breeze-3b:Q4_K_M")
11
+
12
+
13
+ def _resolve_merge_conflict_sync(
14
+ conflict_text: str,
15
+ *,
16
+ model: Optional[str] = None,
17
+ ) -> str:
18
+ """
19
+ Resolve a merge conflict using the breeze model.
20
+
21
+ Args:
22
+ conflict_text: Merge conflict text with markers (<<<<<<<, =======, >>>>>>>)
23
+ model: Model name; defaults to DEFAULT_MODEL_ID
24
+
25
+ Returns:
26
+ Resolved content without conflict markers
27
+ """
28
+ model_id = model or DEFAULT_MODEL_ID
29
+
30
+ try:
31
+ response = ollama.generate(
32
+ model=model_id,
33
+ prompt=conflict_text,
34
+ )
35
+
36
+ if not response or 'response' not in response:
37
+ raise RuntimeError("Ollama returned empty or invalid response")
38
+
39
+ resolved_content = response['response']
40
+ return resolved_content.strip()
41
+ except Exception as e:
42
+ raise RuntimeError(f"Ollama error: {str(e)}")
43
+
44
+
45
+ async def resolve_merge_conflict(
46
+ conflict_text: str,
47
+ *,
48
+ model: Optional[str] = None,
49
+ ) -> str:
50
+ """
51
+ Resolve a merge conflict using the breeze model.
52
+
53
+ Args:
54
+ conflict_text: Merge conflict text with markers (<<<<<<<, =======, >>>>>>>)
55
+ model: Model name; defaults to DEFAULT_MODEL_ID
56
+
57
+ Returns:
58
+ Resolved content without conflict markers
59
+ """
60
+ return await asyncio.to_thread(
61
+ _resolve_merge_conflict_sync,
62
+ conflict_text,
63
+ model=model,
64
+ )
65
+
66
+
67
+ __all__ = [
68
+ "resolve_merge_conflict",
69
+ "DEFAULT_MODEL_ID",
70
+ ]
@@ -0,0 +1,106 @@
1
+ import os
2
+ import asyncio
3
+ from typing import Optional
4
+ from dotenv import load_dotenv
5
+ from openai import OpenAI
6
+
7
+ load_dotenv()
8
+
9
+ # MorphLLM API configuration
10
+ MORPH_API_BASE = "https://api.morphllm.com/v1"
11
+ MORPH_MODEL = "morph-v3-fast"
12
+
13
+ _openai_client: Optional[OpenAI] = None
14
+
15
+
16
+ def _get_api_key() -> str:
17
+ """Get MorphLLM API key from environment."""
18
+ api_key = os.getenv("MORPHLLM_API_KEY")
19
+ if not api_key:
20
+ raise RuntimeError(
21
+ "MORPHLLM_API_KEY or MORPH_API_KEY is not set. "
22
+ "Set it in the environment or .env file."
23
+ )
24
+ return api_key
25
+
26
+
27
+ def _get_openai_client() -> OpenAI:
28
+ """Get or create the OpenAI client instance for MorphLLM (lazy initialization)."""
29
+ global _openai_client
30
+ if _openai_client is None:
31
+ _openai_client = OpenAI(
32
+ api_key=_get_api_key(),
33
+ base_url=MORPH_API_BASE,
34
+ )
35
+ return _openai_client
36
+
37
+
38
+ def _apply_code_edit_sync(
39
+ original_code: str,
40
+ instructions: str,
41
+ edit_snippet: str,
42
+ ) -> str:
43
+ """
44
+ Apply a code edit using MorphLLM.
45
+
46
+ Args:
47
+ original_code: The original file content
48
+ instructions: Single sentence instruction describing what the edit does
49
+ edit_snippet: The edit snippet with // ... existing code ... markers
50
+
51
+ Returns:
52
+ Final code with edit applied
53
+ """
54
+ client = _get_openai_client()
55
+
56
+ # Format the prompt as MorphLLM expects:
57
+ # instructions + original_code + edit_snippet
58
+ prompt = f"{instructions}\n\n{original_code}\n\n{edit_snippet}"
59
+
60
+ try:
61
+ response = client.chat.completions.create(
62
+ model=MORPH_MODEL,
63
+ messages=[
64
+ {
65
+ "role": "user",
66
+ "content": prompt,
67
+ }
68
+ ],
69
+ )
70
+
71
+ if not response or not response.choices or not response.choices[0].message.content:
72
+ raise RuntimeError("MorphLLM returned empty or invalid response")
73
+
74
+ final_code = response.choices[0].message.content
75
+ return final_code
76
+ except Exception as e:
77
+ raise RuntimeError(f"MorphLLM error: {str(e)}")
78
+
79
+
80
+ async def apply_code_edit(
81
+ original_code: str,
82
+ instructions: str,
83
+ edit_snippet: str,
84
+ ) -> str:
85
+ """
86
+ Apply a code edit using MorphLLM.
87
+
88
+ Args:
89
+ original_code: The original file content
90
+ instructions: Single sentence instruction describing what the edit does
91
+ edit_snippet: The edit snippet with // ... existing code ... markers
92
+
93
+ Returns:
94
+ Final code with edit applied
95
+ """
96
+ return await asyncio.to_thread(
97
+ _apply_code_edit_sync,
98
+ original_code,
99
+ instructions,
100
+ edit_snippet,
101
+ )
102
+
103
+
104
+ __all__ = [
105
+ "apply_code_edit",
106
+ ]