@voria/cli 0.0.2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +439 -0
- package/bin/voria +730 -0
- package/docs/ARCHITECTURE.md +419 -0
- package/docs/CHANGELOG.md +189 -0
- package/docs/CONTRIBUTING.md +447 -0
- package/docs/DESIGN_DECISIONS.md +380 -0
- package/docs/DEVELOPMENT.md +535 -0
- package/docs/EXAMPLES.md +434 -0
- package/docs/INSTALL.md +335 -0
- package/docs/IPC_PROTOCOL.md +310 -0
- package/docs/LLM_INTEGRATION.md +416 -0
- package/docs/MODULES.md +470 -0
- package/docs/PERFORMANCE.md +346 -0
- package/docs/PLUGINS.md +432 -0
- package/docs/QUICKSTART.md +184 -0
- package/docs/README.md +133 -0
- package/docs/ROADMAP.md +346 -0
- package/docs/SECURITY.md +334 -0
- package/docs/TROUBLESHOOTING.md +565 -0
- package/docs/USER_GUIDE.md +700 -0
- package/package.json +63 -0
- package/python/voria/__init__.py +8 -0
- package/python/voria/__pycache__/__init__.cpython-312.pyc +0 -0
- package/python/voria/__pycache__/engine.cpython-312.pyc +0 -0
- package/python/voria/core/__init__.py +1 -0
- package/python/voria/core/__pycache__/__init__.cpython-312.pyc +0 -0
- package/python/voria/core/__pycache__/setup.cpython-312.pyc +0 -0
- package/python/voria/core/agent/__init__.py +9 -0
- package/python/voria/core/agent/__pycache__/__init__.cpython-312.pyc +0 -0
- package/python/voria/core/agent/__pycache__/loop.cpython-312.pyc +0 -0
- package/python/voria/core/agent/loop.py +343 -0
- package/python/voria/core/executor/__init__.py +19 -0
- package/python/voria/core/executor/__pycache__/__init__.cpython-312.pyc +0 -0
- package/python/voria/core/executor/__pycache__/executor.cpython-312.pyc +0 -0
- package/python/voria/core/executor/executor.py +431 -0
- package/python/voria/core/github/__init__.py +33 -0
- package/python/voria/core/github/__pycache__/__init__.cpython-312.pyc +0 -0
- package/python/voria/core/github/__pycache__/client.cpython-312.pyc +0 -0
- package/python/voria/core/github/client.py +438 -0
- package/python/voria/core/llm/__init__.py +55 -0
- package/python/voria/core/llm/__pycache__/__init__.cpython-312.pyc +0 -0
- package/python/voria/core/llm/__pycache__/base.cpython-312.pyc +0 -0
- package/python/voria/core/llm/__pycache__/claude_provider.cpython-312.pyc +0 -0
- package/python/voria/core/llm/__pycache__/gemini_provider.cpython-312.pyc +0 -0
- package/python/voria/core/llm/__pycache__/modal_provider.cpython-312.pyc +0 -0
- package/python/voria/core/llm/__pycache__/model_discovery.cpython-312.pyc +0 -0
- package/python/voria/core/llm/__pycache__/openai_provider.cpython-312.pyc +0 -0
- package/python/voria/core/llm/base.py +152 -0
- package/python/voria/core/llm/claude_provider.py +188 -0
- package/python/voria/core/llm/gemini_provider.py +148 -0
- package/python/voria/core/llm/modal_provider.py +228 -0
- package/python/voria/core/llm/model_discovery.py +289 -0
- package/python/voria/core/llm/openai_provider.py +146 -0
- package/python/voria/core/patcher/__init__.py +9 -0
- package/python/voria/core/patcher/__pycache__/__init__.cpython-312.pyc +0 -0
- package/python/voria/core/patcher/__pycache__/patcher.cpython-312.pyc +0 -0
- package/python/voria/core/patcher/patcher.py +375 -0
- package/python/voria/core/planner/__init__.py +1 -0
- package/python/voria/core/setup.py +201 -0
- package/python/voria/core/token_manager/__init__.py +29 -0
- package/python/voria/core/token_manager/__pycache__/__init__.cpython-312.pyc +0 -0
- package/python/voria/core/token_manager/__pycache__/manager.cpython-312.pyc +0 -0
- package/python/voria/core/token_manager/manager.py +241 -0
- package/python/voria/engine.py +1185 -0
- package/python/voria/plugins/__init__.py +1 -0
- package/python/voria/plugins/python/__init__.py +1 -0
- package/python/voria/plugins/typescript/__init__.py +1 -0
|
@@ -0,0 +1,289 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Dynamic model discovery for all LLM providers.
|
|
3
|
+
Fetches available models at runtime based on API keys.
|
|
4
|
+
"""
|
|
5
|
+
|
|
6
|
+
import asyncio
|
|
7
|
+
import httpx
|
|
8
|
+
from dataclasses import dataclass
|
|
9
|
+
from typing import List, Optional
|
|
10
|
+
import logging
|
|
11
|
+
|
|
12
|
+
logger = logging.getLogger(__name__)
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
@dataclass
|
|
16
|
+
class ModelInfo:
|
|
17
|
+
"""Information about an available model."""
|
|
18
|
+
|
|
19
|
+
name: str
|
|
20
|
+
display_name: str
|
|
21
|
+
tokens_per_hour: Optional[int] = None
|
|
22
|
+
max_tokens: Optional[int] = None
|
|
23
|
+
description: str = ""
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
class ModelDiscovery:
|
|
27
|
+
"""Fetch available models from LLM providers."""
|
|
28
|
+
|
|
29
|
+
@staticmethod
|
|
30
|
+
async def fetch_modal_models(api_key: str) -> List[ModelInfo]:
|
|
31
|
+
"""Fetch available models from Modal Z.ai API."""
|
|
32
|
+
try:
|
|
33
|
+
async with httpx.AsyncClient() as client:
|
|
34
|
+
response = await client.get(
|
|
35
|
+
"https://api.us-west-2.modal.direct/v1/models",
|
|
36
|
+
headers={"Authorization": f"Bearer {api_key}"},
|
|
37
|
+
timeout=10.0,
|
|
38
|
+
)
|
|
39
|
+
if response.status_code == 200:
|
|
40
|
+
data = response.json()
|
|
41
|
+
models = []
|
|
42
|
+
# Modal returns model data in "data" key
|
|
43
|
+
for model in data.get("data", []):
|
|
44
|
+
models.append(
|
|
45
|
+
ModelInfo(
|
|
46
|
+
name=model.get("id", model.get("name")),
|
|
47
|
+
display_name=model.get("id", model.get("name")),
|
|
48
|
+
max_tokens=model.get("max_tokens", 4096),
|
|
49
|
+
description=f"Modal Z.ai - {model.get('created', 'N/A')}",
|
|
50
|
+
)
|
|
51
|
+
)
|
|
52
|
+
return (
|
|
53
|
+
models if models else await ModelDiscovery._get_modal_fallback()
|
|
54
|
+
)
|
|
55
|
+
else:
|
|
56
|
+
logger.warning(
|
|
57
|
+
f"Modal API returned {response.status_code}, using fallback models"
|
|
58
|
+
)
|
|
59
|
+
return await ModelDiscovery._get_modal_fallback()
|
|
60
|
+
except Exception as e:
|
|
61
|
+
logger.warning(f"Failed to fetch Modal models: {e}, using fallback")
|
|
62
|
+
return await ModelDiscovery._get_modal_fallback()
|
|
63
|
+
|
|
64
|
+
@staticmethod
|
|
65
|
+
async def _get_modal_fallback() -> List[ModelInfo]:
|
|
66
|
+
"""Fallback models for Modal when API unavailable."""
|
|
67
|
+
return [
|
|
68
|
+
ModelInfo(
|
|
69
|
+
name="zai-org/GLM-5.1-FP8",
|
|
70
|
+
display_name="GLM-5.1-FP8 (745B, Latest)",
|
|
71
|
+
max_tokens=4096,
|
|
72
|
+
description="Latest Modal Z.ai model - 745B parameters",
|
|
73
|
+
),
|
|
74
|
+
ModelInfo(
|
|
75
|
+
name="zai-org/GLM-4",
|
|
76
|
+
display_name="GLM-4 (370B, Legacy)",
|
|
77
|
+
max_tokens=2048,
|
|
78
|
+
description="Previous generation Modal model",
|
|
79
|
+
),
|
|
80
|
+
]
|
|
81
|
+
|
|
82
|
+
@staticmethod
|
|
83
|
+
async def fetch_openai_models(api_key: str) -> List[ModelInfo]:
|
|
84
|
+
"""Fetch available models from OpenAI API."""
|
|
85
|
+
try:
|
|
86
|
+
async with httpx.AsyncClient() as client:
|
|
87
|
+
response = await client.get(
|
|
88
|
+
"https://api.openai.com/v1/models",
|
|
89
|
+
headers={"Authorization": f"Bearer {api_key}"},
|
|
90
|
+
timeout=10.0,
|
|
91
|
+
)
|
|
92
|
+
if response.status_code == 200:
|
|
93
|
+
data = response.json()
|
|
94
|
+
models = []
|
|
95
|
+
# Filter to only gpt models suitable for text generation
|
|
96
|
+
suitable_models = {
|
|
97
|
+
"gpt-4o",
|
|
98
|
+
"gpt-4-turbo",
|
|
99
|
+
"gpt-4",
|
|
100
|
+
"gpt-3.5-turbo",
|
|
101
|
+
}
|
|
102
|
+
for model in data.get("data", []):
|
|
103
|
+
model_id = model.get("id", "")
|
|
104
|
+
# Match by prefix or exact name
|
|
105
|
+
if any(
|
|
106
|
+
model_id.startswith(prefix) for prefix in suitable_models
|
|
107
|
+
):
|
|
108
|
+
models.append(
|
|
109
|
+
ModelInfo(
|
|
110
|
+
name=model_id,
|
|
111
|
+
display_name=model_id,
|
|
112
|
+
description=f"OpenAI - {model.get('owned_by', 'N/A')}",
|
|
113
|
+
)
|
|
114
|
+
)
|
|
115
|
+
# Sort by recency (gpt-4o > gpt-4-turbo > gpt-4 > gpt-3.5-turbo)
|
|
116
|
+
return (
|
|
117
|
+
sorted(
|
|
118
|
+
models,
|
|
119
|
+
key=lambda x: (
|
|
120
|
+
not x.name.startswith("gpt-4o"),
|
|
121
|
+
not x.name.startswith("gpt-4-turbo"),
|
|
122
|
+
not x.name.startswith("gpt-4"),
|
|
123
|
+
),
|
|
124
|
+
)
|
|
125
|
+
if models
|
|
126
|
+
else await ModelDiscovery._get_openai_fallback()
|
|
127
|
+
)
|
|
128
|
+
else:
|
|
129
|
+
logger.warning(
|
|
130
|
+
f"OpenAI API returned {response.status_code}, using fallback models"
|
|
131
|
+
)
|
|
132
|
+
return await ModelDiscovery._get_openai_fallback()
|
|
133
|
+
except Exception as e:
|
|
134
|
+
logger.warning(f"Failed to fetch OpenAI models: {e}, using fallback")
|
|
135
|
+
return await ModelDiscovery._get_openai_fallback()
|
|
136
|
+
|
|
137
|
+
@staticmethod
|
|
138
|
+
async def _get_openai_fallback() -> List[ModelInfo]:
|
|
139
|
+
"""Fallback models for OpenAI when API unavailable."""
|
|
140
|
+
return [
|
|
141
|
+
ModelInfo(
|
|
142
|
+
name="gpt-5.4",
|
|
143
|
+
display_name="GPT-5.4 (Latest Frontier)",
|
|
144
|
+
max_tokens=128000,
|
|
145
|
+
description="Best intelligence at scale for agentic, coding, and professional workflows. $2.50 input, $15 output per 1M tokens",
|
|
146
|
+
),
|
|
147
|
+
ModelInfo(
|
|
148
|
+
name="gpt-5.4-mini",
|
|
149
|
+
display_name="GPT-5.4-mini (Mini Model)",
|
|
150
|
+
max_tokens=128000,
|
|
151
|
+
description="Strongest mini model yet for coding, computer use, and agentic tasks. $0.75 input, $4.50 output per 1M tokens",
|
|
152
|
+
),
|
|
153
|
+
ModelInfo(
|
|
154
|
+
name="gpt-5.4-nano",
|
|
155
|
+
display_name="GPT-5.4-nano (Cheapest)",
|
|
156
|
+
max_tokens=128000,
|
|
157
|
+
description="Cheapest GPT-5.4-class model for simple high-volume tasks. $0.20 input, $1.25 output per 1M tokens",
|
|
158
|
+
),
|
|
159
|
+
ModelInfo(
|
|
160
|
+
name="gpt-4o",
|
|
161
|
+
display_name="GPT-4o (Previous High Quality)",
|
|
162
|
+
max_tokens=128000,
|
|
163
|
+
description="Previous latest model - optimized for speed and cost",
|
|
164
|
+
),
|
|
165
|
+
]
|
|
166
|
+
|
|
167
|
+
@staticmethod
|
|
168
|
+
async def fetch_gemini_models(api_key: str) -> List[ModelInfo]:
|
|
169
|
+
"""Fetch available models from Google Gemini API."""
|
|
170
|
+
try:
|
|
171
|
+
async with httpx.AsyncClient() as client:
|
|
172
|
+
response = await client.get(
|
|
173
|
+
f"https://generativelanguage.googleapis.com/v1/models?key={api_key}",
|
|
174
|
+
timeout=10.0,
|
|
175
|
+
)
|
|
176
|
+
if response.status_code == 200:
|
|
177
|
+
data = response.json()
|
|
178
|
+
models = []
|
|
179
|
+
# Filter to generative models
|
|
180
|
+
for model in data.get("models", []):
|
|
181
|
+
model_name = model.get("name", "").replace("models/", "")
|
|
182
|
+
if "gemini" in model_name.lower():
|
|
183
|
+
models.append(
|
|
184
|
+
ModelInfo(
|
|
185
|
+
name=model_name,
|
|
186
|
+
display_name=model_name,
|
|
187
|
+
description=f"Google Gemini - {model.get('displayName', 'N/A')}",
|
|
188
|
+
)
|
|
189
|
+
)
|
|
190
|
+
return (
|
|
191
|
+
models
|
|
192
|
+
if models
|
|
193
|
+
else await ModelDiscovery._get_gemini_fallback()
|
|
194
|
+
)
|
|
195
|
+
else:
|
|
196
|
+
logger.warning(
|
|
197
|
+
f"Gemini API returned {response.status_code}, using fallback models"
|
|
198
|
+
)
|
|
199
|
+
return await ModelDiscovery._get_gemini_fallback()
|
|
200
|
+
except Exception as e:
|
|
201
|
+
logger.warning(f"Failed to fetch Gemini models: {e}, using fallback")
|
|
202
|
+
return await ModelDiscovery._get_gemini_fallback()
|
|
203
|
+
|
|
204
|
+
@staticmethod
|
|
205
|
+
async def _get_gemini_fallback() -> List[ModelInfo]:
|
|
206
|
+
"""Fallback models for Gemini when API unavailable."""
|
|
207
|
+
return [
|
|
208
|
+
ModelInfo(
|
|
209
|
+
name="gemini-3.1-pro",
|
|
210
|
+
display_name="Gemini 3.1 Pro (Latest SOTA Reasoning)",
|
|
211
|
+
max_tokens=200000,
|
|
212
|
+
description="Latest SOTA reasoning model with unprecedented depth and nuance. $2 input, $12 output per context window",
|
|
213
|
+
),
|
|
214
|
+
ModelInfo(
|
|
215
|
+
name="gemini-3-flash",
|
|
216
|
+
display_name="Gemini 3 Flash (Latest, Fastest)",
|
|
217
|
+
max_tokens=200000,
|
|
218
|
+
description="Most intelligent model built for speed, combining frontier intelligence with superior search and grounding",
|
|
219
|
+
),
|
|
220
|
+
ModelInfo(
|
|
221
|
+
name="gemini-3.1-flash-lite",
|
|
222
|
+
display_name="Gemini 3.1 Flash Lite (Cheapest)",
|
|
223
|
+
max_tokens=200000,
|
|
224
|
+
description="Most cost-efficient model, optimized for high-volume agentic tasks. $0.25 input, $1.50 output",
|
|
225
|
+
),
|
|
226
|
+
ModelInfo(
|
|
227
|
+
name="gemini-2.0-flash",
|
|
228
|
+
display_name="Gemini 2.0 Flash (Previous)",
|
|
229
|
+
max_tokens=2000,
|
|
230
|
+
description="Previous generation Gemini model",
|
|
231
|
+
),
|
|
232
|
+
]
|
|
233
|
+
|
|
234
|
+
@staticmethod
|
|
235
|
+
async def fetch_claude_models(api_key: str) -> List[ModelInfo]:
|
|
236
|
+
"""Fetch available models from Anthropic Claude API."""
|
|
237
|
+
try:
|
|
238
|
+
async with httpx.AsyncClient() as client:
|
|
239
|
+
# Claude doesn't have a public models endpoint, use documented models
|
|
240
|
+
# Make a test call to verify API key works
|
|
241
|
+
response = await client.get(
|
|
242
|
+
"https://api.anthropic.com/v1/models",
|
|
243
|
+
headers={"x-api-key": api_key},
|
|
244
|
+
timeout=10.0,
|
|
245
|
+
)
|
|
246
|
+
# If we get here, API key works - return known models
|
|
247
|
+
return await ModelDiscovery._get_claude_fallback()
|
|
248
|
+
except Exception as e:
|
|
249
|
+
logger.warning(f"Failed to verify Claude API: {e}, returning known models")
|
|
250
|
+
return await ModelDiscovery._get_claude_fallback()
|
|
251
|
+
|
|
252
|
+
@staticmethod
|
|
253
|
+
async def _get_claude_fallback() -> List[ModelInfo]:
|
|
254
|
+
"""Known Claude models (Anthropic doesn't provide list endpoint)."""
|
|
255
|
+
return [
|
|
256
|
+
ModelInfo(
|
|
257
|
+
name="claude-opus-4.6",
|
|
258
|
+
display_name="Claude Opus 4.6 (Most Intelligent)",
|
|
259
|
+
max_tokens=200000,
|
|
260
|
+
description="Most intelligent broadly available model for complex reasoning. $5 input, $25 output per 1M tokens",
|
|
261
|
+
),
|
|
262
|
+
ModelInfo(
|
|
263
|
+
name="claude-sonnet-4.6",
|
|
264
|
+
display_name="Claude Sonnet 4.6 (Best Value)",
|
|
265
|
+
max_tokens=200000,
|
|
266
|
+
description="Best balance of speed and intelligence. $3 input, $15 output per 1M tokens",
|
|
267
|
+
),
|
|
268
|
+
ModelInfo(
|
|
269
|
+
name="claude-haiku-4.5",
|
|
270
|
+
display_name="Claude Haiku 4.5 (Fastest, Cheapest)",
|
|
271
|
+
max_tokens=200000,
|
|
272
|
+
description="Fast and cost-efficient for simpler tasks. $0.80 input, $4 output per 1M tokens",
|
|
273
|
+
),
|
|
274
|
+
]
|
|
275
|
+
|
|
276
|
+
@staticmethod
|
|
277
|
+
async def discover_all(provider: str, api_key: str) -> List[ModelInfo]:
|
|
278
|
+
"""Discover all models for a given provider."""
|
|
279
|
+
provider = provider.lower().strip()
|
|
280
|
+
if provider == "modal":
|
|
281
|
+
return await ModelDiscovery.fetch_modal_models(api_key)
|
|
282
|
+
elif provider == "openai":
|
|
283
|
+
return await ModelDiscovery.fetch_openai_models(api_key)
|
|
284
|
+
elif provider == "gemini":
|
|
285
|
+
return await ModelDiscovery.fetch_gemini_models(api_key)
|
|
286
|
+
elif provider == "claude":
|
|
287
|
+
return await ModelDiscovery.fetch_claude_models(api_key)
|
|
288
|
+
else:
|
|
289
|
+
raise ValueError(f"Unknown provider: {provider}")
|
|
@@ -0,0 +1,146 @@
|
|
|
1
|
+
"""OpenAI GPT-4 and GPT-3.5 LLM Provider"""
|
|
2
|
+
|
|
3
|
+
import logging
|
|
4
|
+
from typing import List, Dict, Any, Optional
|
|
5
|
+
import httpx
|
|
6
|
+
|
|
7
|
+
from .base import BaseLLMProvider, Message, LLMResponse
|
|
8
|
+
|
|
9
|
+
logger = logging.getLogger(__name__)
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
class OpenAIProvider(BaseLLMProvider):
|
|
13
|
+
"""OpenAI GPT-4 and GPT-3.5-turbo Provider"""
|
|
14
|
+
|
|
15
|
+
API_ENDPOINT = "https://api.openai.com/v1/chat/completions"
|
|
16
|
+
DEFAULT_MODEL = "gpt-4"
|
|
17
|
+
|
|
18
|
+
def __init__(self, api_key: str, model: str = DEFAULT_MODEL):
|
|
19
|
+
"""
|
|
20
|
+
Initialize OpenAI provider
|
|
21
|
+
|
|
22
|
+
Args:
|
|
23
|
+
api_key: OpenAI API key
|
|
24
|
+
model: Model (gpt-4, gpt-3.5-turbo, etc)
|
|
25
|
+
"""
|
|
26
|
+
super().__init__(api_key, model)
|
|
27
|
+
self.client = httpx.AsyncClient(
|
|
28
|
+
headers={
|
|
29
|
+
"Authorization": f"Bearer {api_key}",
|
|
30
|
+
"Content-Type": "application/json",
|
|
31
|
+
},
|
|
32
|
+
timeout=300.0,
|
|
33
|
+
)
|
|
34
|
+
|
|
35
|
+
async def generate(
|
|
36
|
+
self, messages: List[Message], max_tokens: int = 2000, temperature: float = 0.7
|
|
37
|
+
) -> LLMResponse:
|
|
38
|
+
"""Generate response using OpenAI"""
|
|
39
|
+
try:
|
|
40
|
+
payload = {
|
|
41
|
+
"model": self.model,
|
|
42
|
+
"messages": [
|
|
43
|
+
{"role": msg.role, "content": msg.content} for msg in messages
|
|
44
|
+
],
|
|
45
|
+
"max_tokens": max_tokens,
|
|
46
|
+
"temperature": temperature,
|
|
47
|
+
}
|
|
48
|
+
|
|
49
|
+
logger.debug(f"Calling OpenAI API with {len(messages)} messages")
|
|
50
|
+
|
|
51
|
+
response = await self.client.post(self.API_ENDPOINT, json=payload)
|
|
52
|
+
response.raise_for_status()
|
|
53
|
+
|
|
54
|
+
data = response.json()
|
|
55
|
+
content = data["choices"][0]["message"]["content"]
|
|
56
|
+
tokens_used = data.get("usage", {}).get("total_tokens", 0)
|
|
57
|
+
|
|
58
|
+
logger.info(f"OpenAI API response: {tokens_used} tokens used")
|
|
59
|
+
|
|
60
|
+
return LLMResponse(
|
|
61
|
+
content=content,
|
|
62
|
+
tokens_used=tokens_used,
|
|
63
|
+
model=self.model,
|
|
64
|
+
provider="OpenAI",
|
|
65
|
+
)
|
|
66
|
+
|
|
67
|
+
except httpx.HTTPError as e:
|
|
68
|
+
logger.error(f"OpenAI API error: {e}")
|
|
69
|
+
raise
|
|
70
|
+
except Exception as e:
|
|
71
|
+
logger.error(f"Error generating with OpenAI: {e}")
|
|
72
|
+
raise
|
|
73
|
+
|
|
74
|
+
async def plan(self, issue_description: str) -> str:
|
|
75
|
+
"""Generate implementation plan"""
|
|
76
|
+
system_message = Message(
|
|
77
|
+
role="system",
|
|
78
|
+
content="""You are an expert software architect.
|
|
79
|
+
Create a detailed implementation plan for fixing this GitHub issue.""",
|
|
80
|
+
)
|
|
81
|
+
|
|
82
|
+
user_message = Message(role="user", content=f"Issue:\n{issue_description}")
|
|
83
|
+
|
|
84
|
+
response = await self.generate([system_message, user_message], max_tokens=2000)
|
|
85
|
+
|
|
86
|
+
return response.content
|
|
87
|
+
|
|
88
|
+
async def generate_patch(
|
|
89
|
+
self,
|
|
90
|
+
issue_description: str,
|
|
91
|
+
context_files: Dict[str, str],
|
|
92
|
+
previous_errors: Optional[str] = None,
|
|
93
|
+
) -> str:
|
|
94
|
+
"""Generate code patch in unified diff format"""
|
|
95
|
+
system_message = Message(
|
|
96
|
+
role="system",
|
|
97
|
+
content="""Generate a unified diff format patch.
|
|
98
|
+
Format:
|
|
99
|
+
--- a/path
|
|
100
|
+
+++ b/path
|
|
101
|
+
@@ -line,count +line,count @@""",
|
|
102
|
+
)
|
|
103
|
+
|
|
104
|
+
context = f"Issue:\n{issue_description}\n\n"
|
|
105
|
+
for filename, content in context_files.items():
|
|
106
|
+
context += f"\n--- {filename} ---\n{content}\n"
|
|
107
|
+
|
|
108
|
+
if previous_errors:
|
|
109
|
+
context += f"\nPrevious Errors:\n{previous_errors}"
|
|
110
|
+
|
|
111
|
+
user_message = Message(role="user", content=context)
|
|
112
|
+
|
|
113
|
+
response = await self.generate(
|
|
114
|
+
[system_message, user_message], max_tokens=3000, temperature=0.5
|
|
115
|
+
)
|
|
116
|
+
|
|
117
|
+
return response.content
|
|
118
|
+
|
|
119
|
+
async def analyze_test_failure(
|
|
120
|
+
self, test_output: str, code_context: str
|
|
121
|
+
) -> Dict[str, Any]:
|
|
122
|
+
"""Analyze test failure"""
|
|
123
|
+
system_message = Message(
|
|
124
|
+
role="system", content="Analyze the test failure and suggest fixes."
|
|
125
|
+
)
|
|
126
|
+
|
|
127
|
+
user_message = Message(
|
|
128
|
+
role="user",
|
|
129
|
+
content=f"""Test Output:
|
|
130
|
+
{test_output}
|
|
131
|
+
|
|
132
|
+
Code:
|
|
133
|
+
{code_context}""",
|
|
134
|
+
)
|
|
135
|
+
|
|
136
|
+
response = await self.generate([system_message, user_message], max_tokens=1500)
|
|
137
|
+
|
|
138
|
+
return {
|
|
139
|
+
"analysis": response.content,
|
|
140
|
+
"provider": "OpenAI",
|
|
141
|
+
"tokens_used": response.tokens_used,
|
|
142
|
+
}
|
|
143
|
+
|
|
144
|
+
async def close(self):
|
|
145
|
+
"""Close HTTP client"""
|
|
146
|
+
await self.client.aclose()
|
|
Binary file
|
|
Binary file
|