@voria/cli 0.0.4 → 0.0.5

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (47) hide show
  1. package/README.md +75 -380
  2. package/bin/voria +625 -486
  3. package/docs/CHANGELOG.md +19 -0
  4. package/docs/USER_GUIDE.md +34 -5
  5. package/package.json +1 -1
  6. package/python/voria/__init__.py +1 -1
  7. package/python/voria/__pycache__/__init__.cpython-312.pyc +0 -0
  8. package/python/voria/__pycache__/engine.cpython-312.pyc +0 -0
  9. package/python/voria/core/__pycache__/__init__.cpython-312.pyc +0 -0
  10. package/python/voria/core/__pycache__/setup.cpython-312.pyc +0 -0
  11. package/python/voria/core/agent/__pycache__/__init__.cpython-312.pyc +0 -0
  12. package/python/voria/core/agent/__pycache__/loop.cpython-312.pyc +0 -0
  13. package/python/voria/core/executor/__pycache__/__init__.cpython-312.pyc +0 -0
  14. package/python/voria/core/executor/__pycache__/executor.cpython-312.pyc +0 -0
  15. package/python/voria/core/executor/executor.py +5 -0
  16. package/python/voria/core/github/__pycache__/__init__.cpython-312.pyc +0 -0
  17. package/python/voria/core/github/__pycache__/client.cpython-312.pyc +0 -0
  18. package/python/voria/core/llm/__init__.py +16 -0
  19. package/python/voria/core/llm/__pycache__/__init__.cpython-312.pyc +0 -0
  20. package/python/voria/core/llm/__pycache__/base.cpython-312.pyc +0 -0
  21. package/python/voria/core/llm/__pycache__/claude_provider.cpython-312.pyc +0 -0
  22. package/python/voria/core/llm/__pycache__/deepseek_provider.cpython-312.pyc +0 -0
  23. package/python/voria/core/llm/__pycache__/gemini_provider.cpython-312.pyc +0 -0
  24. package/python/voria/core/llm/__pycache__/kimi_provider.cpython-312.pyc +0 -0
  25. package/python/voria/core/llm/__pycache__/minimax_provider.cpython-312.pyc +0 -0
  26. package/python/voria/core/llm/__pycache__/modal_provider.cpython-312.pyc +0 -0
  27. package/python/voria/core/llm/__pycache__/model_discovery.cpython-312.pyc +0 -0
  28. package/python/voria/core/llm/__pycache__/openai_provider.cpython-312.pyc +0 -0
  29. package/python/voria/core/llm/__pycache__/siliconflow_provider.cpython-312.pyc +0 -0
  30. package/python/voria/core/llm/base.py +12 -0
  31. package/python/voria/core/llm/claude_provider.py +46 -0
  32. package/python/voria/core/llm/deepseek_provider.py +109 -0
  33. package/python/voria/core/llm/gemini_provider.py +44 -0
  34. package/python/voria/core/llm/kimi_provider.py +109 -0
  35. package/python/voria/core/llm/minimax_provider.py +187 -0
  36. package/python/voria/core/llm/modal_provider.py +33 -0
  37. package/python/voria/core/llm/model_discovery.py +58 -16
  38. package/python/voria/core/llm/openai_provider.py +33 -0
  39. package/python/voria/core/llm/siliconflow_provider.py +109 -0
  40. package/python/voria/core/patcher/__pycache__/__init__.cpython-312.pyc +0 -0
  41. package/python/voria/core/patcher/__pycache__/patcher.cpython-312.pyc +0 -0
  42. package/python/voria/core/setup.py +4 -1
  43. package/python/voria/core/testing/__pycache__/definitions.cpython-312.pyc +0 -0
  44. package/python/voria/core/testing/__pycache__/runner.cpython-312.pyc +0 -0
  45. package/python/voria/core/testing/definitions.py +87 -0
  46. package/python/voria/core/testing/runner.py +324 -0
  47. package/python/voria/engine.py +736 -232
@@ -0,0 +1,187 @@
1
+ """MiniMax LLM Provider via NVIDIA Integrate API"""
2
+
3
+ import logging
4
+ from typing import List, Dict, Any, Optional
5
+ import httpx
6
+
7
+ from .base import BaseLLMProvider, Message, LLMResponse
8
+
9
+ logger = logging.getLogger(__name__)
10
+
11
+
12
+ class MiniMaxProvider(BaseLLMProvider):
13
+ """MiniMax LLM Provider using NVIDIA's OpenAI-compatible API"""
14
+
15
+ API_ENDPOINT = "https://integrate.api.nvidia.com/v1/chat/completions"
16
+ DEFAULT_MODEL = "minimaxai/minimax-m2.7"
17
+
18
+ def __init__(self, api_key: str, model: str = DEFAULT_MODEL):
19
+ """
20
+ Initialize MiniMax provider
21
+
22
+ Args:
23
+ api_key: NVIDIA API key
24
+ model: Model (minimaxai/minimax-m2.7, etc)
25
+ """
26
+ super().__init__(api_key, model)
27
+ self.client = httpx.AsyncClient(
28
+ headers={
29
+ "Authorization": f"Bearer {api_key}",
30
+ "Content-Type": "application/json",
31
+ },
32
+ timeout=300.0,
33
+ )
34
+
35
+ async def generate(
36
+ self, messages: List[Message], max_tokens: int = 2000, temperature: float = 0.7
37
+ ) -> LLMResponse:
38
+ """Generate response using MiniMax"""
39
+ try:
40
+ payload = {
41
+ "model": self.model,
42
+ "messages": [
43
+ {"role": msg.role, "content": msg.content} for msg in messages
44
+ ],
45
+ "max_tokens": max_tokens,
46
+ "temperature": temperature,
47
+ "top_p": 0.95,
48
+ "stream": False,
49
+ }
50
+
51
+ logger.debug(f"Calling MiniMax API with {len(messages)} messages")
52
+ logger.info(f"Sending generation request to MiniMax model {self.model}...")
53
+
54
+ response = await self.client.post(self.API_ENDPOINT, json=payload)
55
+ response.raise_for_status()
56
+
57
+ data = response.json()
58
+ content = data["choices"][0]["message"]["content"]
59
+ tokens_used = data.get("usage", {}).get("total_tokens", 0)
60
+
61
+ logger.info(f"MiniMax API response: {tokens_used} tokens used")
62
+
63
+ return LLMResponse(
64
+ content=content,
65
+ tokens_used=tokens_used,
66
+ model=self.model,
67
+ provider="MiniMax",
68
+ )
69
+
70
+ except httpx.HTTPError as e:
71
+ logger.error(f"MiniMax API error: {e}")
72
+ raise
73
+ except Exception as e:
74
+ logger.error(f"Error generating with MiniMax: {e}")
75
+ raise
76
+
77
+ async def stream_generate(
78
+ self, messages: List[Message], max_tokens: int = 2000, temperature: float = 0.7
79
+ ):
80
+ """Stream generation from MiniMax"""
81
+ try:
82
+ payload = {
83
+ "model": self.model,
84
+ "messages": [
85
+ {"role": msg.role, "content": msg.content} for msg in messages
86
+ ],
87
+ "max_tokens": max_tokens,
88
+ "temperature": temperature,
89
+ "top_p": 0.95,
90
+ "stream": True,
91
+ }
92
+
93
+ import json
94
+
95
+ async with self.client.stream("POST", self.API_ENDPOINT, json=payload) as response:
96
+ response.raise_for_status()
97
+ async for line in response.aiter_lines():
98
+ if not line: continue
99
+ if line.startswith("data: "):
100
+ data_str = line[6:]
101
+ if data_str == "[DONE]": break
102
+ try:
103
+ data = json.loads(data_str)
104
+ delta = data["choices"][0].get("delta", {})
105
+ if "content" in delta:
106
+ yield delta["content"]
107
+ except Exception as e:
108
+ logger.error(f"Error parsing stream chunk: {e}")
109
+ continue
110
+
111
+ except Exception as e:
112
+ logger.error(f"Error in MiniMax stream: {e}")
113
+ raise
114
+
115
+ async def plan(self, issue_description: str) -> str:
116
+ """Generate implementation plan"""
117
+ system_message = Message(
118
+ role="system",
119
+ content="""You are an expert software architect.
120
+ Create a detailed implementation plan for fixing this GitHub issue.""",
121
+ )
122
+
123
+ user_message = Message(role="user", content=f"Issue:\n{issue_description}")
124
+
125
+ response = await self.generate([system_message, user_message], max_tokens=2000)
126
+
127
+ return response.content
128
+
129
+ async def generate_patch(
130
+ self,
131
+ issue_description: str,
132
+ context_files: Dict[str, str],
133
+ previous_errors: Optional[str] = None,
134
+ ) -> str:
135
+ """Generate code patch in unified diff format"""
136
+ system_message = Message(
137
+ role="system",
138
+ content="""Generate a unified diff format patch.
139
+ Format:
140
+ --- a/path
141
+ +++ b/path
142
+ @@ -line,count +line,count @@""",
143
+ )
144
+
145
+ context = f"Issue:\n{issue_description}\n\n"
146
+ for filename, content in context_files.items():
147
+ context += f"\n--- {filename} ---\n{content}\n"
148
+
149
+ if previous_errors:
150
+ context += f"\nPrevious Errors:\n{previous_errors}"
151
+
152
+ user_message = Message(role="user", content=context)
153
+
154
+ response = await self.generate(
155
+ [system_message, user_message], max_tokens=3000, temperature=0.5
156
+ )
157
+
158
+ return response.content
159
+
160
+ async def analyze_test_failure(
161
+ self, test_output: str, code_context: str
162
+ ) -> Dict[str, Any]:
163
+ """Analyze test failure"""
164
+ system_message = Message(
165
+ role="system", content="Analyze the test failure and suggest fixes."
166
+ )
167
+
168
+ user_message = Message(
169
+ role="user",
170
+ content=f"""Test Output:
171
+ {test_output}
172
+
173
+ Code:
174
+ {code_context}""",
175
+ )
176
+
177
+ response = await self.generate([system_message, user_message], max_tokens=1500)
178
+
179
+ return {
180
+ "analysis": response.content,
181
+ "provider": "MiniMax",
182
+ "tokens_used": response.tokens_used,
183
+ }
184
+
185
+ async def close(self):
186
+ """Close HTTP client"""
187
+ await self.client.aclose()
@@ -214,6 +214,39 @@ Code Context:
214
214
  "tokens_used": response.tokens_used,
215
215
  }
216
216
 
217
+ async def stream_generate(
218
+ self, messages: List[Message], max_tokens: int = 2000, temperature: float = 0.7
219
+ ):
220
+ """Stream response tokens from Modal"""
221
+ import json as _json
222
+ try:
223
+ payload = {
224
+ "model": self.model,
225
+ "messages": [{"role": m.role, "content": m.content} for m in messages],
226
+ "max_tokens": max_tokens,
227
+ "temperature": temperature,
228
+ "stream": True,
229
+ }
230
+ async with self.client.stream("POST", self.API_ENDPOINT, json=payload) as response:
231
+ response.raise_for_status()
232
+ async for line in response.aiter_lines():
233
+ if not line:
234
+ continue
235
+ if line.startswith("data: "):
236
+ data_str = line[6:]
237
+ if data_str == "[DONE]":
238
+ break
239
+ try:
240
+ data = _json.loads(data_str)
241
+ delta = data["choices"][0].get("delta", {})
242
+ if "content" in delta:
243
+ yield delta["content"]
244
+ except Exception:
245
+ continue
246
+ except Exception as e:
247
+ logger.error(f"Modal stream error: {e}")
248
+ raise
249
+
217
250
  async def close(self):
218
251
  """Close HTTP client"""
219
252
  await self.client.aclose()
@@ -28,7 +28,9 @@ class ModelDiscovery:
28
28
  """Fetch available models from LLM providers."""
29
29
 
30
30
  @staticmethod
31
- async def fetch_generic_openai_compatible(api_key: str, base_url: str, provider_name: str) -> List[ModelInfo]:
31
+ async def fetch_generic_openai_compatible(
32
+ api_key: str, base_url: str, provider_name: str
33
+ ) -> List[ModelInfo]:
32
34
  """Fetch models from an OpenAI-compatible API."""
33
35
  try:
34
36
  async with httpx.AsyncClient() as client:
@@ -51,7 +53,9 @@ class ModelDiscovery:
51
53
  )
52
54
  return models
53
55
  else:
54
- logger.warning(f"{provider_name} API returned {response.status_code}")
56
+ logger.warning(
57
+ f"{provider_name} API returned {response.status_code}"
58
+ )
55
59
  return []
56
60
  except Exception as e:
57
61
  logger.warning(f"Failed to fetch {provider_name} models: {e}")
@@ -79,7 +83,9 @@ class ModelDiscovery:
79
83
  description=f"Modal Z.ai - {model.get('created', 'N/A')}",
80
84
  )
81
85
  )
82
- return models if models else await ModelDiscovery._get_modal_fallback()
86
+ return (
87
+ models if models else await ModelDiscovery._get_modal_fallback()
88
+ )
83
89
  return await ModelDiscovery._get_modal_fallback()
84
90
  except Exception:
85
91
  return await ModelDiscovery._get_modal_fallback()
@@ -108,8 +114,14 @@ class ModelDiscovery:
108
114
  for model in data.get("data", []):
109
115
  model_id = model.get("id", "")
110
116
  if any(model_id.startswith(p) for p in suitable_prefixes):
111
- models.append(ModelInfo(name=model_id, display_name=model_id))
112
- return models if models else await ModelDiscovery._get_openai_fallback()
117
+ models.append(
118
+ ModelInfo(name=model_id, display_name=model_id)
119
+ )
120
+ return (
121
+ models
122
+ if models
123
+ else await ModelDiscovery._get_openai_fallback()
124
+ )
113
125
  return await ModelDiscovery._get_openai_fallback()
114
126
  except Exception:
115
127
  return await ModelDiscovery._get_openai_fallback()
@@ -137,7 +149,11 @@ class ModelDiscovery:
137
149
  name = model.get("name", "").replace("models/", "")
138
150
  if "gemini" in name.lower():
139
151
  models.append(ModelInfo(name=name, display_name=name))
140
- return models if models else await ModelDiscovery._get_gemini_fallback()
152
+ return (
153
+ models
154
+ if models
155
+ else await ModelDiscovery._get_gemini_fallback()
156
+ )
141
157
  return await ModelDiscovery._get_gemini_fallback()
142
158
  except Exception:
143
159
  return await ModelDiscovery._get_gemini_fallback()
@@ -150,14 +166,30 @@ class ModelDiscovery:
150
166
  ]
151
167
 
152
168
  @staticmethod
153
- async def fetch_claude_models(api_key: str) -> List[ModelInfo]:
154
- # Anthropic doesn't have a models endpoint, just return hardcoded
169
+ async def _get_claude_fallback() -> List[ModelInfo]:
155
170
  return [
156
- ModelInfo(name="claude-3-5-sonnet-20240620", display_name="Claude 3.5 Sonnet"),
171
+ ModelInfo(
172
+ name="claude-3-5-sonnet-20240620", display_name="Claude 3.5 Sonnet"
173
+ ),
157
174
  ModelInfo(name="claude-3-opus-20240229", display_name="Claude 3 Opus"),
158
175
  ModelInfo(name="claude-3-haiku-20240307", display_name="Claude 3 Haiku"),
159
176
  ]
160
177
 
178
+ @staticmethod
179
+ async def fetch_claude_models(api_key: str) -> List[ModelInfo]:
180
+ # Anthropic doesn't have a models endpoint, just return hardcoded
181
+ return await ModelDiscovery._get_claude_fallback()
182
+
183
+ @staticmethod
184
+ async def _get_minimax_fallback() -> List[ModelInfo]:
185
+ return [
186
+ ModelInfo(
187
+ name="minimaxai/minimax-m2.7",
188
+ display_name="MiniMax M2.7",
189
+ description="MiniMax M2.7 via NVIDIA",
190
+ ),
191
+ ]
192
+
161
193
  @staticmethod
162
194
  async def discover_all(provider: str, api_key: str) -> List[ModelInfo]:
163
195
  provider = provider.lower().strip()
@@ -170,27 +202,37 @@ class ModelDiscovery:
170
202
  elif provider == "claude":
171
203
  return await ModelDiscovery.fetch_claude_models(api_key)
172
204
  elif provider == "deepseek":
173
- return await ModelDiscovery.fetch_generic_openai_compatible(api_key, "https://api.deepseek.com/v1", "DeepSeek")
205
+ return await ModelDiscovery.fetch_generic_openai_compatible(
206
+ api_key, "https://api.deepseek.com/v1", "DeepSeek"
207
+ )
174
208
  elif provider == "kimi":
175
- return await ModelDiscovery.fetch_generic_openai_compatible(api_key, "https://api.moonshot.cn/v1", "Kimi")
209
+ return await ModelDiscovery.fetch_generic_openai_compatible(
210
+ api_key, "https://api.moonshot.cn/v1", "Kimi"
211
+ )
176
212
  elif provider == "minimax":
177
- return await ModelDiscovery.fetch_generic_openai_compatible(api_key, "https://api.minimax.chat/v1", "Minimax")
213
+ return await ModelDiscovery.fetch_generic_openai_compatible(
214
+ api_key, "https://integrate.api.nvidia.com/v1", "MiniMax"
215
+ )
178
216
  elif provider == "siliconflow":
179
- return await ModelDiscovery.fetch_generic_openai_compatible(api_key, "https://api.siliconflow.cn/v1", "SiliconFlow")
217
+ return await ModelDiscovery.fetch_generic_openai_compatible(
218
+ api_key, "https://api.siliconflow.cn/v1", "SiliconFlow"
219
+ )
180
220
  else:
181
221
  return []
182
222
 
223
+
183
224
  if __name__ == "__main__":
184
225
  import sys
226
+
185
227
  if len(sys.argv) < 3:
186
228
  print(json.dumps([]))
187
229
  sys.exit(0)
188
-
230
+
189
231
  provider = sys.argv[1]
190
232
  api_key = sys.argv[2]
191
-
233
+
192
234
  async def main():
193
235
  models = await ModelDiscovery.discover_all(provider, api_key)
194
236
  print(json.dumps([asdict(m) for m in models]))
195
-
237
+
196
238
  asyncio.run(main())
@@ -141,6 +141,39 @@ Code:
141
141
  "tokens_used": response.tokens_used,
142
142
  }
143
143
 
144
+ async def stream_generate(
145
+ self, messages: List[Message], max_tokens: int = 2000, temperature: float = 0.7
146
+ ):
147
+ """Stream response tokens from OpenAI"""
148
+ import json as _json
149
+ try:
150
+ payload = {
151
+ "model": self.model,
152
+ "messages": [{"role": m.role, "content": m.content} for m in messages],
153
+ "max_tokens": max_tokens,
154
+ "temperature": temperature,
155
+ "stream": True,
156
+ }
157
+ async with self.client.stream("POST", self.API_ENDPOINT, json=payload) as response:
158
+ response.raise_for_status()
159
+ async for line in response.aiter_lines():
160
+ if not line:
161
+ continue
162
+ if line.startswith("data: "):
163
+ data_str = line[6:]
164
+ if data_str == "[DONE]":
165
+ break
166
+ try:
167
+ data = _json.loads(data_str)
168
+ delta = data["choices"][0].get("delta", {})
169
+ if "content" in delta:
170
+ yield delta["content"]
171
+ except Exception:
172
+ continue
173
+ except Exception as e:
174
+ logger.error(f"OpenAI stream error: {e}")
175
+ raise
176
+
144
177
  async def close(self):
145
178
  """Close HTTP client"""
146
179
  await self.client.aclose()
@@ -0,0 +1,109 @@
1
+ """SiliconFlow LLM Provider
2
+
3
+ SiliconFlow provides access to multiple open-source models via OpenAI-compatible API.
4
+ """
5
+
6
+ import json
7
+ import logging
8
+ from typing import List, Dict, Any, Optional
9
+ import httpx
10
+
11
+ from .base import BaseLLMProvider, Message, LLMResponse
12
+
13
+ logger = logging.getLogger(__name__)
14
+
15
+
16
+ class SiliconFlowProvider(BaseLLMProvider):
17
+ """SiliconFlow LLM Provider (OpenAI-compatible API)"""
18
+
19
+ API_ENDPOINT = "https://api.siliconflow.cn/v1/chat/completions"
20
+ DEFAULT_MODEL = "deepseek-ai/DeepSeek-V2.5"
21
+
22
+ def __init__(self, api_key: str, model: str = DEFAULT_MODEL):
23
+ super().__init__(api_key, model)
24
+ self.client = httpx.AsyncClient(
25
+ headers={
26
+ "Authorization": f"Bearer {api_key}",
27
+ "Content-Type": "application/json",
28
+ },
29
+ timeout=300.0,
30
+ )
31
+
32
+ async def generate(
33
+ self, messages: List[Message], max_tokens: int = 2000, temperature: float = 0.7
34
+ ) -> LLMResponse:
35
+ try:
36
+ payload = {
37
+ "model": self.model,
38
+ "messages": [
39
+ {"role": msg.role, "content": msg.content} for msg in messages
40
+ ],
41
+ "max_tokens": max_tokens,
42
+ "temperature": temperature,
43
+ }
44
+ response = await self.client.post(self.API_ENDPOINT, json=payload)
45
+ response.raise_for_status()
46
+ data = response.json()
47
+ content = data["choices"][0]["message"]["content"]
48
+ tokens_used = data.get("usage", {}).get("total_tokens", 0)
49
+ return LLMResponse(content=content, tokens_used=tokens_used, model=self.model, provider="SiliconFlow")
50
+ except Exception as e:
51
+ logger.error(f"SiliconFlow API error: {e}")
52
+ raise
53
+
54
+ async def stream_generate(
55
+ self, messages: List[Message], max_tokens: int = 2000, temperature: float = 0.7
56
+ ):
57
+ try:
58
+ payload = {
59
+ "model": self.model,
60
+ "messages": [{"role": m.role, "content": m.content} for m in messages],
61
+ "max_tokens": max_tokens,
62
+ "temperature": temperature,
63
+ "stream": True,
64
+ }
65
+ async with self.client.stream("POST", self.API_ENDPOINT, json=payload) as response:
66
+ response.raise_for_status()
67
+ async for line in response.aiter_lines():
68
+ if not line:
69
+ continue
70
+ if line.startswith("data: "):
71
+ data_str = line[6:]
72
+ if data_str == "[DONE]":
73
+ break
74
+ try:
75
+ data = json.loads(data_str)
76
+ delta = data["choices"][0].get("delta", {})
77
+ if "content" in delta:
78
+ yield delta["content"]
79
+ except Exception:
80
+ continue
81
+ except Exception as e:
82
+ logger.error(f"SiliconFlow stream error: {e}")
83
+ raise
84
+
85
+ async def plan(self, issue_description: str) -> str:
86
+ system_message = Message(role="system", content="You are an expert software architect. Create a detailed implementation plan.")
87
+ user_message = Message(role="user", content=f"Issue:\n{issue_description}")
88
+ response = await self.generate([system_message, user_message], max_tokens=2000)
89
+ return response.content
90
+
91
+ async def generate_patch(self, issue_description: str, context_files: Dict[str, str], previous_errors: Optional[str] = None) -> str:
92
+ system_message = Message(role="system", content="Generate a unified diff format patch.")
93
+ context = f"Issue:\n{issue_description}\n\n"
94
+ for filename, content in context_files.items():
95
+ context += f"\n--- {filename} ---\n{content}\n"
96
+ if previous_errors:
97
+ context += f"\nPrevious Errors:\n{previous_errors}"
98
+ user_message = Message(role="user", content=context)
99
+ response = await self.generate([system_message, user_message], max_tokens=3000, temperature=0.5)
100
+ return response.content
101
+
102
+ async def analyze_test_failure(self, test_output: str, code_context: str) -> Dict[str, Any]:
103
+ system_message = Message(role="system", content="Analyze the test failure and suggest fixes.")
104
+ user_message = Message(role="user", content=f"Test Output:\n{test_output}\n\nCode:\n{code_context}")
105
+ response = await self.generate([system_message, user_message], max_tokens=1500)
106
+ return {"analysis": response.content, "provider": "SiliconFlow", "tokens_used": response.tokens_used}
107
+
108
+ async def close(self):
109
+ await self.client.aclose()
@@ -80,6 +80,8 @@ class ProviderSetup:
80
80
  models = await ModelDiscovery._get_gemini_fallback()
81
81
  elif provider_name == "claude":
82
82
  models = await ModelDiscovery._get_claude_fallback()
83
+ elif provider_name == "minimax":
84
+ models = await ModelDiscovery._get_minimax_fallback()
83
85
 
84
86
  # Step 4: Choose model
85
87
  chosen_model = await self._choose_model(models)
@@ -117,7 +119,7 @@ class ProviderSetup:
117
119
 
118
120
  while True:
119
121
  try:
120
- choice = input("\nEnter number (1-4): ").strip()
122
+ choice = input(f"\nEnter number (1-{len(providers)}): ").strip()
121
123
  idx = int(choice) - 1
122
124
  if 0 <= idx < len(providers):
123
125
  return providers[idx]
@@ -133,6 +135,7 @@ class ProviderSetup:
133
135
  "openai": ["OPENAI_API_KEY"],
134
136
  "gemini": ["GOOGLE_API_KEY", "GEMINI_API_KEY"],
135
137
  "claude": ["ANTHROPIC_API_KEY", "CLAUDE_API_KEY"],
138
+ "minimax": ["MINIMAX_API_KEY", "NVIDIA_API_KEY"],
136
139
  }
137
140
 
138
141
  for env_var in env_vars.get(provider_name, []):
@@ -0,0 +1,87 @@
1
+ """
2
+ Definitions for 50+ different types of testing supported by voria.
3
+ Combines security (pentesting) and production/reliability tests.
4
+ """
5
+
6
+ from dataclasses import dataclass
7
+ from enum import Enum
8
+ from typing import List, Dict, Any, Optional
9
+
10
+ class TestCategory(Enum):
11
+ SECURITY = "Security (Pentesting)"
12
+ PRODUCTION = "Production & Reliability"
13
+ PERFORMANCE = "Performance & Latency"
14
+ STRESS = "Stress Testing"
15
+ QUALITY = "Code Quality & Compliance"
16
+
17
+ @dataclass
18
+ class TestInfo:
19
+ id: str
20
+ name: str
21
+ category: TestCategory
22
+ description: str
23
+ impact: str # High, Medium, Low
24
+ type: str # "static" (code analysis) or "dynamic" (runtime)
25
+
26
+ # The master list of all 52 tests
27
+ TEST_DEFINITIONS: List[TestInfo] = [
28
+ # --- SECURITY (25 tests) ---
29
+ TestInfo("sql_injection", "SQL Injection Scan", TestCategory.SECURITY, "Checks for improper sanitization of database queries.", "Critical", "static"),
30
+ TestInfo("xss", "Cross-Site Scripting (XSS)", TestCategory.SECURITY, "Checks for reflected or stored XSS vulnerabilities in web code.", "High", "static"),
31
+ TestInfo("csrf", "CSRF Protection Audit", TestCategory.SECURITY, "Verifies presence of CSRF tokens in state-changing requests.", "High", "static"),
32
+ TestInfo("path_traversal", "Path Traversal Probe", TestCategory.SECURITY, "Detects insecure file path handling that could allow unauthorized access.", "High", "static"),
33
+ TestInfo("insecure_deserialization", "Insecure Deserialization", TestCategory.SECURITY, "Identifies unsafe decoding of serialized data.", "Critical", "static"),
34
+ TestInfo("hardcoded_secrets", "Hardcoded Secret Detection", TestCategory.SECURITY, "Scans codebase for API keys, passwords, and private certificates.", "Critical", "static"),
35
+ TestInfo("insecure_jwt", "Insecure JWT Handling", TestCategory.SECURITY, "Checks for weak JWT algorithms or lack of signature verification.", "High", "static"),
36
+ TestInfo("broken_access_control", "Broken Access Control", TestCategory.SECURITY, "Analyzes authorization logic for potential bypasses.", "High", "static"),
37
+ TestInfo("open_redirect", "Open Redirect Audit", TestCategory.SECURITY, "Checks for unsafe user-controlled redirection URLs.", "Medium", "static"),
38
+ TestInfo("security_headers", "Security Headers Audit", TestCategory.SECURITY, "Verifies presence of CSP, HSTS, and X-Content-Type headers.", "Medium", "static"),
39
+ TestInfo("clickjacking", "Clickjacking Vulnerability", TestCategory.SECURITY, "Checks for X-Frame-Options or suitable CSP directives.", "Low", "static"),
40
+ TestInfo("bruteforce_protection", "Bruteforce Protection", TestCategory.SECURITY, "Identifies lack of rate limiting or account lockout logic.", "Medium", "static"),
41
+ TestInfo("weak_crypto", "Weak Cryptography", TestCategory.SECURITY, "Detects use of MD5, SHA1, or other deprecated algorithms.", "High", "static"),
42
+ TestInfo("sensitive_data_exposure", "Sensitive Data Exposure", TestCategory.SECURITY, "Checks for PII or sensitive info leaked in logs or error messages.", "High", "static"),
43
+ TestInfo("xxe", "XML External Entity (XXE)", TestCategory.SECURITY, "Checks for insecure XML parsers allowed to resolve external entities.", "High", "static"),
44
+ TestInfo("insecure_upload", "Insecure File Upload", TestCategory.SECURITY, "Analyzes file upload handling for potential malicious file execution.", "High", "static"),
45
+ TestInfo("command_injection", "Command Injection Scan", TestCategory.SECURITY, "Checks for shell commands built using untrusted user input.", "Critical", "static"),
46
+ TestInfo("directory_listing", "Directory Listing Probe", TestCategory.SECURITY, "Checks web config for inadvertent directory listing enablement.", "Medium", "static"),
47
+ TestInfo("ssrf", "Server-Side Request Forgery", TestCategory.SECURITY, "Detects code that makes requests to user-controlled internal URLs.", "High", "static"),
48
+ TestInfo("session_management", "Improper Session Management", TestCategory.SECURITY, "Analyzes session lifecycle, fixation, and timeout logic.", "Medium", "static"),
49
+ TestInfo("rate_limiting", "Lack of Rate Limiting", TestCategory.SECURITY, "Checks for API endpoints vulnerable to abuse without throttling.", "Medium", "static"),
50
+ TestInfo("info_leakage", "Information Leakage Scan", TestCategory.SECURITY, "Detects server versions or stack traces exposed to end users.", "Low", "static"),
51
+ TestInfo("vulnerable_components", "Known Vulnerable Components", TestCategory.SECURITY, "Audit dependencies against known vulnerability databases.", "High", "static"),
52
+ TestInfo("integrity_checks", "Lack of Integrity Checks", TestCategory.SECURITY, "Checks if downloaded assets or code lack checksum verification.", "Medium", "static"),
53
+ TestInfo("error_handling_leak", "Error Handling Leakage", TestCategory.SECURITY, "Verifies that catch blocks don't expose system internals.", "Low", "static"),
54
+
55
+ # --- PRODUCTION & RELIABILITY (10 tests) ---
56
+ TestInfo("latency_baseline", "Latency Baseline Audit", TestCategory.PRODUCTION, "Establishes baseline response times for core functions.", "Medium", "dynamic"),
57
+ TestInfo("deadlock_detection", "Potential Deadlock Scan", TestCategory.PRODUCTION, "Analyzes lock acquisition order for potential circular dependencies.", "High", "static"),
58
+ TestInfo("race_condition", "Race Condition Check", TestCategory.PRODUCTION, "Identifies non-atomic operations on shared state.", "High", "static"),
59
+ TestInfo("unhandled_exceptions", "Unhandled Exception Scan", TestCategory.PRODUCTION, "Checks for paths where exceptions could crash the process.", "High", "static"),
60
+ TestInfo("memory_leak_static", "Memory Leak static Scan", TestCategory.PRODUCTION, "Identifies patterns like growing collections or unclosed resources.", "Medium", "static"),
61
+ TestInfo("connection_exhaustion", "Conn Pool Exhaustion Probe", TestCategory.PRODUCTION, "Analyzes resource cleanup to prevent pool starvation.", "High", "static"),
62
+ TestInfo("slow_query", "Slow Query Detection", TestCategory.PRODUCTION, "Scans for unoptimized DB queries without indices.", "Medium", "static"),
63
+ TestInfo("cache_consistency", "Cache Inconsistency Scan", TestCategory.PRODUCTION, "Checks for missing cache invalidation after updates.", "Medium", "static"),
64
+ TestInfo("timeout_handling", "Missing Timeout Logic", TestCategory.PRODUCTION, "Detects blocking calls without explicit timeouts.", "Medium", "static"),
65
+ TestInfo("circular_dep", "Circular Dependency Audit", TestCategory.PRODUCTION, "Maps module imports for circularities that impair startup.", "Low", "static"),
66
+
67
+ # --- PERFORMANCE & STRESS (10 tests) ---
68
+ TestInfo("cpu_stress", "CPU Stress Resilience", TestCategory.STRESS, "Simulates heavy computational load to test stability.", "Medium", "dynamic"),
69
+ TestInfo("mem_stress", "Memory Stress Resilience", TestCategory.STRESS, "Simulates high memory allocation to test GC and OOM handling.", "Medium", "dynamic"),
70
+ TestInfo("concurrent_users", "High Concurrency Simulation", TestCategory.STRESS, "Simulates massive parallel user requests.", "High", "dynamic"),
71
+ TestInfo("payload_stress", "Large Payload Resilience", TestCategory.STRESS, "Tests handling of extremely large input data.", "Medium", "dynamic"),
72
+ TestInfo("network_latency", "Network Latency Simulation", TestCategory.PERFORMANCE, "Simulates slow network conditions (jitter/latency).", "Low", "dynamic"),
73
+ TestInfo("p99_latency", "P99 Latency Audit", TestCategory.PERFORMANCE, "Measures tail latency under normal load.", "Medium", "dynamic"),
74
+ TestInfo("throughput_max", "Max Throughput Benchmark", TestCategory.PERFORMANCE, "Determines the saturation point of the service.", "Medium", "dynamic"),
75
+ TestInfo("bundle_size", "Asset Bundle Size Audit", TestCategory.PERFORMANCE, "Analyzes production assets for excessive size.", "Low", "static"),
76
+ TestInfo("cold_start", "Cold Start Analysis", TestCategory.PERFORMANCE, "Measures startup time and initialization performance.", "Low", "dynamic"),
77
+ TestInfo("db_index_audit", "DB Index Optimization", TestCategory.PERFORMANCE, "Suggests missing indices based on query patterns.", "Medium", "static"),
78
+
79
+ # --- QUALITY & COMPLIANCE (7 tests) ---
80
+ TestInfo("license_compliance", "License Compliance Audit", TestCategory.QUALITY, "Checks dependencies for copyleft or restrictive licenses.", "Medium", "static"),
81
+ TestInfo("dep_graph", "Dependency Health Audit", TestCategory.QUALITY, "Analyzes depth and variety of project dependencies.", "Low", "static"),
82
+ TestInfo("doc_completeness", "Documentation Completeness", TestCategory.QUALITY, "Checks for missing docstrings or exported API docs.", "Low", "static"),
83
+ TestInfo("coverage_gap", "Test Coverage Gap Analysis", TestCategory.QUALITY, "Identifies critical paths missing automated tests.", "Medium", "static"),
84
+ TestInfo("lint_security", "Security-focused Linting", TestCategory.QUALITY, "Runs specialized security linter rules.", "Medium", "static"),
85
+ TestInfo("complexity_drift", "Complexity Drift Scan", TestCategory.QUALITY, "Detects increasing cyclomatic complexity over time.", "Low", "static"),
86
+ TestInfo("redundant_calls", "Redundant API Call Detection", TestCategory.QUALITY, "Identifies duplicate data fetching patterns.", "Low", "static"),
87
+ ]