@voria/cli 0.0.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +439 -0
- package/bin/voria +730 -0
- package/docs/ARCHITECTURE.md +419 -0
- package/docs/CHANGELOG.md +189 -0
- package/docs/CONTRIBUTING.md +447 -0
- package/docs/DESIGN_DECISIONS.md +380 -0
- package/docs/DEVELOPMENT.md +535 -0
- package/docs/EXAMPLES.md +434 -0
- package/docs/INSTALL.md +335 -0
- package/docs/IPC_PROTOCOL.md +310 -0
- package/docs/LLM_INTEGRATION.md +416 -0
- package/docs/MODULES.md +470 -0
- package/docs/PERFORMANCE.md +346 -0
- package/docs/PLUGINS.md +432 -0
- package/docs/QUICKSTART.md +184 -0
- package/docs/README.md +133 -0
- package/docs/ROADMAP.md +346 -0
- package/docs/SECURITY.md +334 -0
- package/docs/TROUBLESHOOTING.md +565 -0
- package/docs/USER_GUIDE.md +700 -0
- package/package.json +63 -0
- package/python/voria/__init__.py +8 -0
- package/python/voria/__pycache__/__init__.cpython-312.pyc +0 -0
- package/python/voria/__pycache__/engine.cpython-312.pyc +0 -0
- package/python/voria/core/__init__.py +1 -0
- package/python/voria/core/__pycache__/__init__.cpython-312.pyc +0 -0
- package/python/voria/core/__pycache__/setup.cpython-312.pyc +0 -0
- package/python/voria/core/agent/__init__.py +9 -0
- package/python/voria/core/agent/__pycache__/__init__.cpython-312.pyc +0 -0
- package/python/voria/core/agent/__pycache__/loop.cpython-312.pyc +0 -0
- package/python/voria/core/agent/loop.py +343 -0
- package/python/voria/core/executor/__init__.py +19 -0
- package/python/voria/core/executor/__pycache__/__init__.cpython-312.pyc +0 -0
- package/python/voria/core/executor/__pycache__/executor.cpython-312.pyc +0 -0
- package/python/voria/core/executor/executor.py +431 -0
- package/python/voria/core/github/__init__.py +33 -0
- package/python/voria/core/github/__pycache__/__init__.cpython-312.pyc +0 -0
- package/python/voria/core/github/__pycache__/client.cpython-312.pyc +0 -0
- package/python/voria/core/github/client.py +438 -0
- package/python/voria/core/llm/__init__.py +55 -0
- package/python/voria/core/llm/__pycache__/__init__.cpython-312.pyc +0 -0
- package/python/voria/core/llm/__pycache__/base.cpython-312.pyc +0 -0
- package/python/voria/core/llm/__pycache__/claude_provider.cpython-312.pyc +0 -0
- package/python/voria/core/llm/__pycache__/gemini_provider.cpython-312.pyc +0 -0
- package/python/voria/core/llm/__pycache__/modal_provider.cpython-312.pyc +0 -0
- package/python/voria/core/llm/__pycache__/model_discovery.cpython-312.pyc +0 -0
- package/python/voria/core/llm/__pycache__/openai_provider.cpython-312.pyc +0 -0
- package/python/voria/core/llm/base.py +152 -0
- package/python/voria/core/llm/claude_provider.py +188 -0
- package/python/voria/core/llm/gemini_provider.py +148 -0
- package/python/voria/core/llm/modal_provider.py +228 -0
- package/python/voria/core/llm/model_discovery.py +289 -0
- package/python/voria/core/llm/openai_provider.py +146 -0
- package/python/voria/core/patcher/__init__.py +9 -0
- package/python/voria/core/patcher/__pycache__/__init__.cpython-312.pyc +0 -0
- package/python/voria/core/patcher/__pycache__/patcher.cpython-312.pyc +0 -0
- package/python/voria/core/patcher/patcher.py +375 -0
- package/python/voria/core/planner/__init__.py +1 -0
- package/python/voria/core/setup.py +201 -0
- package/python/voria/core/token_manager/__init__.py +29 -0
- package/python/voria/core/token_manager/__pycache__/__init__.cpython-312.pyc +0 -0
- package/python/voria/core/token_manager/__pycache__/manager.cpython-312.pyc +0 -0
- package/python/voria/core/token_manager/manager.py +241 -0
- package/python/voria/engine.py +1185 -0
- package/python/voria/plugins/__init__.py +1 -0
- package/python/voria/plugins/python/__init__.py +1 -0
- package/python/voria/plugins/typescript/__init__.py +1 -0
|
@@ -0,0 +1,188 @@
|
|
|
1
|
+
"""Anthropic Claude LLM Provider"""
|
|
2
|
+
|
|
3
|
+
import logging
|
|
4
|
+
from typing import List, Dict, Any, Optional
|
|
5
|
+
import httpx
|
|
6
|
+
|
|
7
|
+
from .base import BaseLLMProvider, Message, LLMResponse
|
|
8
|
+
|
|
9
|
+
logger = logging.getLogger(__name__)
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
class ClaudeProvider(BaseLLMProvider):
|
|
13
|
+
"""Anthropic Claude LLM Provider"""
|
|
14
|
+
|
|
15
|
+
API_ENDPOINT = "https://api.anthropic.com/v1/messages"
|
|
16
|
+
DEFAULT_MODEL = "claude-3-opus-20240229"
|
|
17
|
+
|
|
18
|
+
def __init__(self, api_key: str, model: str = DEFAULT_MODEL):
|
|
19
|
+
"""
|
|
20
|
+
Initialize Claude provider
|
|
21
|
+
|
|
22
|
+
Args:
|
|
23
|
+
api_key: Anthropic API key
|
|
24
|
+
model: Model (claude-3-opus, claude-3-sonnet, claude-3-haiku)
|
|
25
|
+
"""
|
|
26
|
+
super().__init__(api_key, model)
|
|
27
|
+
self.client = httpx.AsyncClient(
|
|
28
|
+
headers={
|
|
29
|
+
"x-api-key": api_key,
|
|
30
|
+
"anthropic-version": "2023-06-01",
|
|
31
|
+
"content-type": "application/json",
|
|
32
|
+
},
|
|
33
|
+
timeout=300.0,
|
|
34
|
+
)
|
|
35
|
+
|
|
36
|
+
async def generate(
|
|
37
|
+
self, messages: List[Message], max_tokens: int = 2000, temperature: float = 0.7
|
|
38
|
+
) -> LLMResponse:
|
|
39
|
+
"""Generate response using Claude"""
|
|
40
|
+
try:
|
|
41
|
+
# Separate system message from user messages
|
|
42
|
+
system_content = ""
|
|
43
|
+
user_messages = []
|
|
44
|
+
|
|
45
|
+
for msg in messages:
|
|
46
|
+
if msg.role == "system":
|
|
47
|
+
system_content = msg.content
|
|
48
|
+
else:
|
|
49
|
+
user_messages.append({"role": msg.role, "content": msg.content})
|
|
50
|
+
|
|
51
|
+
payload = {
|
|
52
|
+
"model": self.model,
|
|
53
|
+
"max_tokens": max_tokens,
|
|
54
|
+
"messages": user_messages,
|
|
55
|
+
"temperature": temperature,
|
|
56
|
+
}
|
|
57
|
+
|
|
58
|
+
if system_content:
|
|
59
|
+
payload["system"] = system_content
|
|
60
|
+
|
|
61
|
+
logger.debug(f"Calling Claude API with {len(user_messages)} messages")
|
|
62
|
+
|
|
63
|
+
response = await self.client.post(self.API_ENDPOINT, json=payload)
|
|
64
|
+
response.raise_for_status()
|
|
65
|
+
|
|
66
|
+
data = response.json()
|
|
67
|
+
content = data["content"][0]["text"]
|
|
68
|
+
|
|
69
|
+
# Claude returns token usage directly
|
|
70
|
+
tokens_used = data.get("usage", {}).get("output_tokens", 0)
|
|
71
|
+
input_tokens = data.get("usage", {}).get("input_tokens", 0)
|
|
72
|
+
total_tokens = input_tokens + tokens_used
|
|
73
|
+
|
|
74
|
+
logger.info(
|
|
75
|
+
f"Claude API response: {tokens_used} output tokens "
|
|
76
|
+
f"({input_tokens} input, {total_tokens} total)"
|
|
77
|
+
)
|
|
78
|
+
|
|
79
|
+
return LLMResponse(
|
|
80
|
+
content=content,
|
|
81
|
+
tokens_used=total_tokens,
|
|
82
|
+
model=self.model,
|
|
83
|
+
provider="Claude",
|
|
84
|
+
)
|
|
85
|
+
|
|
86
|
+
except httpx.HTTPError as e:
|
|
87
|
+
logger.error(f"Claude API error: {e}")
|
|
88
|
+
raise
|
|
89
|
+
except Exception as e:
|
|
90
|
+
logger.error(f"Error generating with Claude: {e}")
|
|
91
|
+
raise
|
|
92
|
+
|
|
93
|
+
async def plan(self, issue_description: str) -> str:
|
|
94
|
+
"""Generate implementation plan"""
|
|
95
|
+
system_message = Message(
|
|
96
|
+
role="system",
|
|
97
|
+
content="""You are an expert software architect with decades of experience.
|
|
98
|
+
Create a detailed, step-by-step implementation plan for fixing this GitHub issue.
|
|
99
|
+
Include analysis, approach, and testing strategy.""",
|
|
100
|
+
)
|
|
101
|
+
|
|
102
|
+
user_message = Message(
|
|
103
|
+
role="user", content=f"GitHub Issue:\n{issue_description}"
|
|
104
|
+
)
|
|
105
|
+
|
|
106
|
+
response = await self.generate(
|
|
107
|
+
[system_message, user_message], max_tokens=2000, temperature=0.7
|
|
108
|
+
)
|
|
109
|
+
|
|
110
|
+
return response.content
|
|
111
|
+
|
|
112
|
+
async def generate_patch(
|
|
113
|
+
self,
|
|
114
|
+
issue_description: str,
|
|
115
|
+
context_files: Dict[str, str],
|
|
116
|
+
previous_errors: Optional[str] = None,
|
|
117
|
+
) -> str:
|
|
118
|
+
"""Generate code patch in unified diff format"""
|
|
119
|
+
system_message = Message(
|
|
120
|
+
role="system",
|
|
121
|
+
content="""You are an expert code generator and software engineer.
|
|
122
|
+
Generate a unified diff format patch to fix the issue.
|
|
123
|
+
|
|
124
|
+
Use this format exactly:
|
|
125
|
+
--- a/path/to/file
|
|
126
|
+
+++ b/path/to/file
|
|
127
|
+
@@ -line,count +line,count @@
|
|
128
|
+
context line
|
|
129
|
+
-removed line
|
|
130
|
+
+added line
|
|
131
|
+
|
|
132
|
+
Generate complete, working patches that will fix the issue.""",
|
|
133
|
+
)
|
|
134
|
+
|
|
135
|
+
context = f"Issue:\n{issue_description}\n\n"
|
|
136
|
+
context += "Current Code:\n"
|
|
137
|
+
for filename, content in context_files.items():
|
|
138
|
+
context += f"\n--- {filename} ---\n{content}\n"
|
|
139
|
+
|
|
140
|
+
if previous_errors:
|
|
141
|
+
context += f"\nPrevious Attempt Errors:\n{previous_errors}"
|
|
142
|
+
|
|
143
|
+
user_message = Message(role="user", content=context)
|
|
144
|
+
|
|
145
|
+
response = await self.generate(
|
|
146
|
+
[system_message, user_message], max_tokens=3000, temperature=0.5
|
|
147
|
+
)
|
|
148
|
+
|
|
149
|
+
return response.content
|
|
150
|
+
|
|
151
|
+
async def analyze_test_failure(
|
|
152
|
+
self, test_output: str, code_context: str
|
|
153
|
+
) -> Dict[str, Any]:
|
|
154
|
+
"""Analyze test failure and suggest improvements"""
|
|
155
|
+
system_message = Message(
|
|
156
|
+
role="system",
|
|
157
|
+
content="""Analyze the test failure in detail.
|
|
158
|
+
Provide:
|
|
159
|
+
1. Root cause analysis
|
|
160
|
+
2. Why the fix didn't work
|
|
161
|
+
3. Specific suggestions for improvement
|
|
162
|
+
4. Next approach to try
|
|
163
|
+
|
|
164
|
+
Be technical and precise.""",
|
|
165
|
+
)
|
|
166
|
+
|
|
167
|
+
user_message = Message(
|
|
168
|
+
role="user",
|
|
169
|
+
content=f"""Test Output:
|
|
170
|
+
{test_output}
|
|
171
|
+
|
|
172
|
+
Code Context:
|
|
173
|
+
{code_context}""",
|
|
174
|
+
)
|
|
175
|
+
|
|
176
|
+
response = await self.generate(
|
|
177
|
+
[system_message, user_message], max_tokens=1500, temperature=0.7
|
|
178
|
+
)
|
|
179
|
+
|
|
180
|
+
return {
|
|
181
|
+
"analysis": response.content,
|
|
182
|
+
"provider": "Claude",
|
|
183
|
+
"tokens_used": response.tokens_used,
|
|
184
|
+
}
|
|
185
|
+
|
|
186
|
+
async def close(self):
|
|
187
|
+
"""Close HTTP client"""
|
|
188
|
+
await self.client.aclose()
|
|
@@ -0,0 +1,148 @@
|
|
|
1
|
+
"""Google Gemini LLM Provider"""
|
|
2
|
+
|
|
3
|
+
import logging
|
|
4
|
+
from typing import List, Dict, Any, Optional
|
|
5
|
+
import httpx
|
|
6
|
+
|
|
7
|
+
from .base import BaseLLMProvider, Message, LLMResponse
|
|
8
|
+
|
|
9
|
+
logger = logging.getLogger(__name__)
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
class GeminiProvider(BaseLLMProvider):
|
|
13
|
+
"""Google Gemini Pro LLM Provider"""
|
|
14
|
+
|
|
15
|
+
API_ENDPOINT = "https://generativelanguage.googleapis.com/v1beta/models"
|
|
16
|
+
DEFAULT_MODEL = "gemini-pro"
|
|
17
|
+
|
|
18
|
+
def __init__(self, api_key: str, model: str = DEFAULT_MODEL):
|
|
19
|
+
"""
|
|
20
|
+
Initialize Gemini provider
|
|
21
|
+
|
|
22
|
+
Args:
|
|
23
|
+
api_key: Google API key
|
|
24
|
+
model: Model (gemini-pro, gemini-pro-vision, etc)
|
|
25
|
+
"""
|
|
26
|
+
super().__init__(api_key, model)
|
|
27
|
+
self.client = httpx.AsyncClient(timeout=300.0)
|
|
28
|
+
|
|
29
|
+
async def generate(
|
|
30
|
+
self, messages: List[Message], max_tokens: int = 2000, temperature: float = 0.7
|
|
31
|
+
) -> LLMResponse:
|
|
32
|
+
"""Generate response using Gemini"""
|
|
33
|
+
try:
|
|
34
|
+
# Convert messages to Gemini format
|
|
35
|
+
contents = []
|
|
36
|
+
for msg in messages:
|
|
37
|
+
contents.append(
|
|
38
|
+
{
|
|
39
|
+
"role": "user" if msg.role == "user" else "model",
|
|
40
|
+
"parts": [{"text": msg.content}],
|
|
41
|
+
}
|
|
42
|
+
)
|
|
43
|
+
|
|
44
|
+
payload = {
|
|
45
|
+
"contents": contents,
|
|
46
|
+
"generationConfig": {
|
|
47
|
+
"maxOutputTokens": max_tokens,
|
|
48
|
+
"temperature": temperature,
|
|
49
|
+
},
|
|
50
|
+
}
|
|
51
|
+
|
|
52
|
+
url = f"{self.API_ENDPOINT}/{self.model}:generateContent?key={self.api_key}"
|
|
53
|
+
|
|
54
|
+
logger.debug(f"Calling Gemini API with {len(messages)} messages")
|
|
55
|
+
|
|
56
|
+
response = await self.client.post(url, json=payload)
|
|
57
|
+
response.raise_for_status()
|
|
58
|
+
|
|
59
|
+
data = response.json()
|
|
60
|
+
content = data["candidates"][0]["content"]["parts"][0]["text"]
|
|
61
|
+
|
|
62
|
+
# Estimate tokens (Gemini doesn't always return token count)
|
|
63
|
+
tokens_used = len(content.split()) * 1.3 # Rough estimate
|
|
64
|
+
|
|
65
|
+
logger.info(f"Gemini API response: ~{int(tokens_used)} tokens")
|
|
66
|
+
|
|
67
|
+
return LLMResponse(
|
|
68
|
+
content=content,
|
|
69
|
+
tokens_used=int(tokens_used),
|
|
70
|
+
model=self.model,
|
|
71
|
+
provider="Gemini",
|
|
72
|
+
)
|
|
73
|
+
|
|
74
|
+
except httpx.HTTPError as e:
|
|
75
|
+
logger.error(f"Gemini API error: {e}")
|
|
76
|
+
raise
|
|
77
|
+
except Exception as e:
|
|
78
|
+
logger.error(f"Error generating with Gemini: {e}")
|
|
79
|
+
raise
|
|
80
|
+
|
|
81
|
+
async def plan(self, issue_description: str) -> str:
|
|
82
|
+
"""Generate implementation plan"""
|
|
83
|
+
system_message = Message(
|
|
84
|
+
role="system",
|
|
85
|
+
content="""You are an expert software architect.
|
|
86
|
+
Create a detailed implementation plan for fixing this GitHub issue.""",
|
|
87
|
+
)
|
|
88
|
+
|
|
89
|
+
user_message = Message(role="user", content=f"Issue:\n{issue_description}")
|
|
90
|
+
|
|
91
|
+
response = await self.generate([system_message, user_message], max_tokens=2000)
|
|
92
|
+
|
|
93
|
+
return response.content
|
|
94
|
+
|
|
95
|
+
async def generate_patch(
|
|
96
|
+
self,
|
|
97
|
+
issue_description: str,
|
|
98
|
+
context_files: Dict[str, str],
|
|
99
|
+
previous_errors: Optional[str] = None,
|
|
100
|
+
) -> str:
|
|
101
|
+
"""Generate code patch"""
|
|
102
|
+
system_message = Message(
|
|
103
|
+
role="system", content="""Generate a unified diff format patch."""
|
|
104
|
+
)
|
|
105
|
+
|
|
106
|
+
context = f"Issue:\n{issue_description}\n\n"
|
|
107
|
+
for filename, content in context_files.items():
|
|
108
|
+
context += f"\n--- {filename} ---\n{content}\n"
|
|
109
|
+
|
|
110
|
+
if previous_errors:
|
|
111
|
+
context += f"\nPrevious Errors:\n{previous_errors}"
|
|
112
|
+
|
|
113
|
+
user_message = Message(role="user", content=context)
|
|
114
|
+
|
|
115
|
+
response = await self.generate(
|
|
116
|
+
[system_message, user_message], max_tokens=3000, temperature=0.5
|
|
117
|
+
)
|
|
118
|
+
|
|
119
|
+
return response.content
|
|
120
|
+
|
|
121
|
+
async def analyze_test_failure(
|
|
122
|
+
self, test_output: str, code_context: str
|
|
123
|
+
) -> Dict[str, Any]:
|
|
124
|
+
"""Analyze test failure"""
|
|
125
|
+
system_message = Message(
|
|
126
|
+
role="system", content="Analyze the test failure and suggest fixes."
|
|
127
|
+
)
|
|
128
|
+
|
|
129
|
+
user_message = Message(
|
|
130
|
+
role="user",
|
|
131
|
+
content=f"""Test Output:
|
|
132
|
+
{test_output}
|
|
133
|
+
|
|
134
|
+
Code:
|
|
135
|
+
{code_context}""",
|
|
136
|
+
)
|
|
137
|
+
|
|
138
|
+
response = await self.generate([system_message, user_message], max_tokens=1500)
|
|
139
|
+
|
|
140
|
+
return {
|
|
141
|
+
"analysis": response.content,
|
|
142
|
+
"provider": "Gemini",
|
|
143
|
+
"tokens_used": response.tokens_used,
|
|
144
|
+
}
|
|
145
|
+
|
|
146
|
+
async def close(self):
|
|
147
|
+
"""Close HTTP client"""
|
|
148
|
+
await self.client.aclose()
|
|
@@ -0,0 +1,228 @@
|
|
|
1
|
+
"""Modal Z.ai GLM-5.1 LLM Provider Integration
|
|
2
|
+
|
|
3
|
+
Modal Research provides the Z.ai GLM-5.1-FP8 model via their API.
|
|
4
|
+
Docs: https://modal.com
|
|
5
|
+
Model: zai-org/GLM-5.1-FP8 (745B parameters)
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
import logging
|
|
9
|
+
from typing import List, Dict, Any, Optional
|
|
10
|
+
import httpx
|
|
11
|
+
import asyncio
|
|
12
|
+
|
|
13
|
+
from .base import BaseLLMProvider, Message, LLMResponse
|
|
14
|
+
|
|
15
|
+
logger = logging.getLogger(__name__)
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
class ModalProvider(BaseLLMProvider):
|
|
19
|
+
"""Modal Z.ai GLM-5.1 LLM Provider"""
|
|
20
|
+
|
|
21
|
+
API_ENDPOINT = "https://api.us-west-2.modal.direct/v1/chat/completions"
|
|
22
|
+
DEFAULT_MODEL = "zai-org/GLM-5.1-FP8"
|
|
23
|
+
|
|
24
|
+
def __init__(self, api_key: str, model: str = DEFAULT_MODEL):
|
|
25
|
+
"""
|
|
26
|
+
Initialize Modal provider
|
|
27
|
+
|
|
28
|
+
Args:
|
|
29
|
+
api_key: Modal API token
|
|
30
|
+
model: Model identifier (default: GLM-5.1-FP8)
|
|
31
|
+
"""
|
|
32
|
+
super().__init__(api_key, model)
|
|
33
|
+
self.client = httpx.AsyncClient(
|
|
34
|
+
headers={
|
|
35
|
+
"Authorization": f"Bearer {api_key}",
|
|
36
|
+
"Content-Type": "application/json",
|
|
37
|
+
},
|
|
38
|
+
timeout=300.0,
|
|
39
|
+
)
|
|
40
|
+
|
|
41
|
+
async def generate(
|
|
42
|
+
self, messages: List[Message], max_tokens: int = 2000, temperature: float = 0.7
|
|
43
|
+
) -> LLMResponse:
|
|
44
|
+
"""
|
|
45
|
+
Generate response using Modal Z.ai
|
|
46
|
+
|
|
47
|
+
Args:
|
|
48
|
+
messages: Chat messages
|
|
49
|
+
max_tokens: Maximum tokens in response
|
|
50
|
+
temperature: Sampling temperature
|
|
51
|
+
|
|
52
|
+
Returns:
|
|
53
|
+
LLMResponse with content and token info
|
|
54
|
+
"""
|
|
55
|
+
try:
|
|
56
|
+
payload = {
|
|
57
|
+
"model": self.model,
|
|
58
|
+
"messages": [
|
|
59
|
+
{"role": msg.role, "content": msg.content} for msg in messages
|
|
60
|
+
],
|
|
61
|
+
"max_tokens": max_tokens,
|
|
62
|
+
"temperature": temperature,
|
|
63
|
+
}
|
|
64
|
+
|
|
65
|
+
logger.debug(f"Calling Modal API with {len(messages)} messages")
|
|
66
|
+
|
|
67
|
+
response = await self.client.post(self.API_ENDPOINT, json=payload)
|
|
68
|
+
response.raise_for_status()
|
|
69
|
+
|
|
70
|
+
data = response.json()
|
|
71
|
+
content = data["choices"][0]["message"]["content"]
|
|
72
|
+
tokens_used = data.get("usage", {}).get("total_tokens", 0)
|
|
73
|
+
|
|
74
|
+
logger.info(f"Modal API response: {tokens_used} tokens used")
|
|
75
|
+
|
|
76
|
+
return LLMResponse(
|
|
77
|
+
content=content,
|
|
78
|
+
tokens_used=tokens_used,
|
|
79
|
+
model=self.model,
|
|
80
|
+
provider="Modal",
|
|
81
|
+
)
|
|
82
|
+
|
|
83
|
+
except httpx.HTTPError as e:
|
|
84
|
+
logger.error(f"Modal API error: {e}")
|
|
85
|
+
raise
|
|
86
|
+
except Exception as e:
|
|
87
|
+
logger.error(f"Error generating with Modal: {e}")
|
|
88
|
+
raise
|
|
89
|
+
|
|
90
|
+
async def plan(self, issue_description: str) -> str:
|
|
91
|
+
"""
|
|
92
|
+
Generate implementation plan from issue
|
|
93
|
+
|
|
94
|
+
Args:
|
|
95
|
+
issue_description: GitHub issue text
|
|
96
|
+
|
|
97
|
+
Returns:
|
|
98
|
+
Implementation plan
|
|
99
|
+
"""
|
|
100
|
+
system_message = Message(
|
|
101
|
+
role="system",
|
|
102
|
+
content="""You are an expert software architect analyzing GitHub issues.
|
|
103
|
+
Your task is to create a detailed implementation plan for fixing the issue.
|
|
104
|
+
|
|
105
|
+
Include:
|
|
106
|
+
1. Problem analysis
|
|
107
|
+
2. Root cause identification
|
|
108
|
+
3. Solution approach
|
|
109
|
+
4. Files to modify
|
|
110
|
+
5. Testing strategy
|
|
111
|
+
|
|
112
|
+
Be concise but thorough.""",
|
|
113
|
+
)
|
|
114
|
+
|
|
115
|
+
user_message = Message(
|
|
116
|
+
role="user", content=f"GitHub Issue:\n{issue_description}"
|
|
117
|
+
)
|
|
118
|
+
|
|
119
|
+
response = await self.generate(
|
|
120
|
+
[system_message, user_message], max_tokens=2000, temperature=0.7
|
|
121
|
+
)
|
|
122
|
+
|
|
123
|
+
return response.content
|
|
124
|
+
|
|
125
|
+
async def generate_patch(
|
|
126
|
+
self,
|
|
127
|
+
issue_description: str,
|
|
128
|
+
context_files: Dict[str, str],
|
|
129
|
+
previous_errors: Optional[str] = None,
|
|
130
|
+
) -> str:
|
|
131
|
+
"""
|
|
132
|
+
Generate code patch
|
|
133
|
+
|
|
134
|
+
Args:
|
|
135
|
+
issue_description: Issue description
|
|
136
|
+
context_files: Dict of filename -> file content
|
|
137
|
+
previous_errors: Errors from previous attempt
|
|
138
|
+
|
|
139
|
+
Returns:
|
|
140
|
+
Unified diff format patch
|
|
141
|
+
"""
|
|
142
|
+
system_message = Message(
|
|
143
|
+
role="system",
|
|
144
|
+
content="""You are an expert code generator.
|
|
145
|
+
Generate a unified diff format patch to fix the issue.
|
|
146
|
+
|
|
147
|
+
Format:
|
|
148
|
+
--- a/path/to/file
|
|
149
|
+
+++ b/path/to/file
|
|
150
|
+
@@ -line,count +line,count @@
|
|
151
|
+
unchanged line
|
|
152
|
+
-removed line
|
|
153
|
+
+added line
|
|
154
|
+
|
|
155
|
+
Generate complete, working patches.""",
|
|
156
|
+
)
|
|
157
|
+
|
|
158
|
+
context = f"Issue:\n{issue_description}\n\n"
|
|
159
|
+
context += "Current Code:\n"
|
|
160
|
+
for filename, content in context_files.items():
|
|
161
|
+
context += f"\n--- {filename} ---\n{content}\n"
|
|
162
|
+
|
|
163
|
+
if previous_errors:
|
|
164
|
+
context += f"\nPrevious Errors:\n{previous_errors}"
|
|
165
|
+
|
|
166
|
+
user_message = Message(role="user", content=context)
|
|
167
|
+
|
|
168
|
+
response = await self.generate(
|
|
169
|
+
[system_message, user_message], max_tokens=3000, temperature=0.5
|
|
170
|
+
)
|
|
171
|
+
|
|
172
|
+
return response.content
|
|
173
|
+
|
|
174
|
+
async def analyze_test_failure(
|
|
175
|
+
self, test_output: str, code_context: str
|
|
176
|
+
) -> Dict[str, Any]:
|
|
177
|
+
"""
|
|
178
|
+
Analyze test failure and suggest fix
|
|
179
|
+
|
|
180
|
+
Args:
|
|
181
|
+
test_output: Test output/error logs
|
|
182
|
+
code_context: Relevant code snippet
|
|
183
|
+
|
|
184
|
+
Returns:
|
|
185
|
+
Analysis dict with suggestions
|
|
186
|
+
"""
|
|
187
|
+
system_message = Message(
|
|
188
|
+
role="system",
|
|
189
|
+
content="""Analyze the test failure and provide:
|
|
190
|
+
1. Root cause
|
|
191
|
+
2. Why the fix didn't work
|
|
192
|
+
3. Suggested improvements
|
|
193
|
+
4. Next approach
|
|
194
|
+
|
|
195
|
+
Be technical and specific.""",
|
|
196
|
+
)
|
|
197
|
+
|
|
198
|
+
user_message = Message(
|
|
199
|
+
role="user",
|
|
200
|
+
content=f"""Test Output:
|
|
201
|
+
{test_output}
|
|
202
|
+
|
|
203
|
+
Code Context:
|
|
204
|
+
{code_context}""",
|
|
205
|
+
)
|
|
206
|
+
|
|
207
|
+
response = await self.generate(
|
|
208
|
+
[system_message, user_message], max_tokens=1500, temperature=0.7
|
|
209
|
+
)
|
|
210
|
+
|
|
211
|
+
return {
|
|
212
|
+
"analysis": response.content,
|
|
213
|
+
"provider": "Modal",
|
|
214
|
+
"tokens_used": response.tokens_used,
|
|
215
|
+
}
|
|
216
|
+
|
|
217
|
+
async def close(self):
|
|
218
|
+
"""Close HTTP client"""
|
|
219
|
+
await self.client.aclose()
|
|
220
|
+
|
|
221
|
+
def __del__(self):
|
|
222
|
+
"""Cleanup on deletion"""
|
|
223
|
+
try:
|
|
224
|
+
# Don't try to run async cleanup in __del__ as it may cause issues
|
|
225
|
+
# The context manager or explicit close() should handle cleanup
|
|
226
|
+
return
|
|
227
|
+
except:
|
|
228
|
+
pass
|