@voria/cli 0.0.4 → 0.0.5

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (47) hide show
  1. package/README.md +75 -380
  2. package/bin/voria +625 -486
  3. package/docs/CHANGELOG.md +19 -0
  4. package/docs/USER_GUIDE.md +34 -5
  5. package/package.json +1 -1
  6. package/python/voria/__init__.py +1 -1
  7. package/python/voria/__pycache__/__init__.cpython-312.pyc +0 -0
  8. package/python/voria/__pycache__/engine.cpython-312.pyc +0 -0
  9. package/python/voria/core/__pycache__/__init__.cpython-312.pyc +0 -0
  10. package/python/voria/core/__pycache__/setup.cpython-312.pyc +0 -0
  11. package/python/voria/core/agent/__pycache__/__init__.cpython-312.pyc +0 -0
  12. package/python/voria/core/agent/__pycache__/loop.cpython-312.pyc +0 -0
  13. package/python/voria/core/executor/__pycache__/__init__.cpython-312.pyc +0 -0
  14. package/python/voria/core/executor/__pycache__/executor.cpython-312.pyc +0 -0
  15. package/python/voria/core/executor/executor.py +5 -0
  16. package/python/voria/core/github/__pycache__/__init__.cpython-312.pyc +0 -0
  17. package/python/voria/core/github/__pycache__/client.cpython-312.pyc +0 -0
  18. package/python/voria/core/llm/__init__.py +16 -0
  19. package/python/voria/core/llm/__pycache__/__init__.cpython-312.pyc +0 -0
  20. package/python/voria/core/llm/__pycache__/base.cpython-312.pyc +0 -0
  21. package/python/voria/core/llm/__pycache__/claude_provider.cpython-312.pyc +0 -0
  22. package/python/voria/core/llm/__pycache__/deepseek_provider.cpython-312.pyc +0 -0
  23. package/python/voria/core/llm/__pycache__/gemini_provider.cpython-312.pyc +0 -0
  24. package/python/voria/core/llm/__pycache__/kimi_provider.cpython-312.pyc +0 -0
  25. package/python/voria/core/llm/__pycache__/minimax_provider.cpython-312.pyc +0 -0
  26. package/python/voria/core/llm/__pycache__/modal_provider.cpython-312.pyc +0 -0
  27. package/python/voria/core/llm/__pycache__/model_discovery.cpython-312.pyc +0 -0
  28. package/python/voria/core/llm/__pycache__/openai_provider.cpython-312.pyc +0 -0
  29. package/python/voria/core/llm/__pycache__/siliconflow_provider.cpython-312.pyc +0 -0
  30. package/python/voria/core/llm/base.py +12 -0
  31. package/python/voria/core/llm/claude_provider.py +46 -0
  32. package/python/voria/core/llm/deepseek_provider.py +109 -0
  33. package/python/voria/core/llm/gemini_provider.py +44 -0
  34. package/python/voria/core/llm/kimi_provider.py +109 -0
  35. package/python/voria/core/llm/minimax_provider.py +187 -0
  36. package/python/voria/core/llm/modal_provider.py +33 -0
  37. package/python/voria/core/llm/model_discovery.py +58 -16
  38. package/python/voria/core/llm/openai_provider.py +33 -0
  39. package/python/voria/core/llm/siliconflow_provider.py +109 -0
  40. package/python/voria/core/patcher/__pycache__/__init__.cpython-312.pyc +0 -0
  41. package/python/voria/core/patcher/__pycache__/patcher.cpython-312.pyc +0 -0
  42. package/python/voria/core/setup.py +4 -1
  43. package/python/voria/core/testing/__pycache__/definitions.cpython-312.pyc +0 -0
  44. package/python/voria/core/testing/__pycache__/runner.cpython-312.pyc +0 -0
  45. package/python/voria/core/testing/definitions.py +87 -0
  46. package/python/voria/core/testing/runner.py +324 -0
  47. package/python/voria/engine.py +736 -232
package/docs/CHANGELOG.md CHANGED
@@ -7,6 +7,25 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
7
7
 
8
8
  ---
9
9
 
10
+ ## [v0.0.5] - April 14, 2026 ✅ TIER 1: SECURITY, STREAMING & RELIABILITY
11
+
12
+ ### 🛡️ Pentesting CLI (Enhanced)
13
+ - **25+ Security Audits** running in parallel with `voria scan all`
14
+ - **SARIF Support:** Export results for GitHub Security tab integration (`voria ci`)
15
+ - **Security Diff:** Compare posture between git refs (`voria diff`)
16
+ - **Watch Mode:** Automatic re-testing on file changes (`voria watch`)
17
+
18
+ ### ⚡ Performance & Reliability
19
+ - **HTTP Benchmarking:** Real-world load testing with latency distribution (`voria benchmark`)
20
+ - **Improved Stress Tests:** Real-time metrics for CPU, Memory, and Network tests
21
+ - **NVIDIA Integrated API Support:** Native support for MiniMax model v2.7
22
+
23
+ ### 🤖 LLM & UX
24
+ - **Streaming Output:** Real-time token-by-token streaming for plan and fix commands
25
+ - **Auto-Fix:** AI-powered bug fixing with automatic patch application (`voria fix --auto`)
26
+ - **Premium Blue UI:** Completely revamped CLI aesthetic with professional formatting
27
+ - **Daemon-Ready IPC:** Infrastructure for persistent background engine acceleration
28
+
10
29
  ## [v0.0.3] - April 12, 2026 ✅ ADVANCED ANALYSIS & CHINESE MODELS
11
30
 
12
31
  ### 🔭 Advanced Code Analysis (New)
@@ -222,20 +222,49 @@ voria apply fix.patch -v
222
222
 
223
223
  ---
224
224
 
225
+ ### `voria test [NAME] [--list]`
226
+
227
+ Run codebase security audits, stress tests, and performance probes.
228
+
229
+ **Usage:**
230
+ ```bash
231
+ # List all 50+ available tests
232
+ voria test --list
233
+
234
+ # Run a specific security scan (e.g., SQL Injection)
235
+ voria test sql_injection
236
+
237
+ # Perform a stress test
238
+ voria test cpu_stress
239
+ ```
240
+
241
+ **Categories Include:**
242
+ - **Security (Pentesting):** SQLi, XSS, CSRF, JWT, SSRF, XXE, and 20+ more.
243
+ - **Production Resilience:** Deadlock detection, Race conditions, Unhandled exceptions.
244
+ - **Performance:** Latency baseline, P99 audits, Throughput benchmarks.
245
+ - **Stress Testing:** CPU/Memory saturation, concurrent user simulation.
246
+ - **Quality:** License compliance, dependency graph health.
247
+
248
+ **What it does:**
249
+ 1. **Identifies** the test type (static analysis or dynamic probing).
250
+ 2. **Analyzes** code context using LLMs for security patterns.
251
+ 3. **Executes** runtime stress simulations for performance audits.
252
+ 4. **Reports** detailed findings, severity, and recommended fixes.
253
+
254
+ ---
255
+
225
256
  ## Configuration
226
257
 
227
258
  voria stores configuration in `~/.voria/config.json`:
228
259
 
229
260
  ```json
230
261
  {
231
- "llm_provider": "modal",
232
- "modal_token": "sk-modal-...",
233
- "github_token": "ghp_...",
234
262
  "llm_provider": "openai",
235
263
  "llm_api_key": "sk-...",
236
- "llm_model": "gpt-4",
264
+ "llm_model": "gpt-4o",
265
+ "github_token": "ghp_...",
237
266
  "daily_budget": 10.0,
238
- "max_retries": 1
267
+ "test_framework": "pytest"
239
268
  }
240
269
  ```
241
270
 
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@voria/cli",
3
- "version": "0.0.4",
3
+ "version": "0.0.5",
4
4
  "description": "AI-powered CLI tool for automated bug fixing - initialize with voria --init",
5
5
  "main": "bin/voria",
6
6
  "type": "module",
@@ -1,6 +1,6 @@
1
1
  """voria: AI-powered CLI tool for open source contributors."""
2
2
 
3
- __version__ = "0.1.0"
3
+ __version__ = "0.0.5"
4
4
  __author__ = "voria Contributors"
5
5
 
6
6
  from . import engine
@@ -247,6 +247,10 @@ class JestParser:
247
247
  """Parse jest JSON output"""
248
248
 
249
249
  results = []
250
+ passed = 0
251
+ failed = 0
252
+ skipped = 0
253
+ duration = 0.0
250
254
 
251
255
  try:
252
256
  # Jest outputs JSON
@@ -289,6 +293,7 @@ class JestParser:
289
293
  # Fallback: parse text output
290
294
  passed = len(re.findall(r"✓", stdout))
291
295
  failed = len(re.findall(r"✕", stdout))
296
+ # skipped and duration keep their default values (0, 0.0)
292
297
 
293
298
  return TestSuiteResult(
294
299
  framework="jest",
@@ -5,6 +5,10 @@ Supports multiple LLM providers with dynamic model discovery:
5
5
  - OpenAI GPT-4 / GPT-3.5-turbo
6
6
  - Google Gemini Pro
7
7
  - Anthropic Claude 3
8
+ - DeepSeek
9
+ - SiliconFlow
10
+ - Kimi (Moonshot AI)
11
+ - MiniMax
8
12
 
9
13
  Dynamic Model Discovery:
10
14
  from voria.core.llm import LLMProviderFactory
@@ -34,12 +38,20 @@ from .modal_provider import ModalProvider
34
38
  from .openai_provider import OpenAIProvider
35
39
  from .gemini_provider import GeminiProvider
36
40
  from .claude_provider import ClaudeProvider
41
+ from .minimax_provider import MiniMaxProvider
42
+ from .deepseek_provider import DeepSeekProvider
43
+ from .siliconflow_provider import SiliconFlowProvider
44
+ from .kimi_provider import KimiProvider
37
45
 
38
46
  # Register all providers
39
47
  LLMProviderFactory.register("modal", ModalProvider)
40
48
  LLMProviderFactory.register("openai", OpenAIProvider)
41
49
  LLMProviderFactory.register("gemini", GeminiProvider)
42
50
  LLMProviderFactory.register("claude", ClaudeProvider)
51
+ LLMProviderFactory.register("minimax", MiniMaxProvider)
52
+ LLMProviderFactory.register("deepseek", DeepSeekProvider)
53
+ LLMProviderFactory.register("siliconflow", SiliconFlowProvider)
54
+ LLMProviderFactory.register("kimi", KimiProvider)
43
55
 
44
56
  __all__ = [
45
57
  "BaseLLMProvider",
@@ -52,4 +64,8 @@ __all__ = [
52
64
  "OpenAIProvider",
53
65
  "GeminiProvider",
54
66
  "ClaudeProvider",
67
+ "MiniMaxProvider",
68
+ "DeepSeekProvider",
69
+ "SiliconFlowProvider",
70
+ "KimiProvider",
55
71
  ]
@@ -59,6 +59,18 @@ class BaseLLMProvider(ABC):
59
59
  """
60
60
  pass
61
61
 
62
+ @abstractmethod
63
+ async def stream_generate(
64
+ self, messages: List[Message], max_tokens: int = 2000, temperature: float = 0.7
65
+ ):
66
+ """
67
+ Stream response tokens from LLM
68
+
69
+ Returns:
70
+ Async generator yielding string tokens
71
+ """
72
+ pass
73
+
62
74
  @abstractmethod
63
75
  async def plan(self, issue_description: str) -> str:
64
76
  """Generate implementation plan from issue"""
@@ -183,6 +183,52 @@ Code Context:
183
183
  "tokens_used": response.tokens_used,
184
184
  }
185
185
 
186
+ async def stream_generate(
187
+ self, messages: List[Message], max_tokens: int = 2000, temperature: float = 0.7
188
+ ):
189
+ """Stream response tokens from Claude"""
190
+ import json as _json
191
+ try:
192
+ system_content = ""
193
+ user_messages = []
194
+ for msg in messages:
195
+ if msg.role == "system":
196
+ system_content = msg.content
197
+ else:
198
+ user_messages.append({"role": msg.role, "content": msg.content})
199
+
200
+ payload = {
201
+ "model": self.model,
202
+ "max_tokens": max_tokens,
203
+ "messages": user_messages,
204
+ "temperature": temperature,
205
+ "stream": True,
206
+ }
207
+ if system_content:
208
+ payload["system"] = system_content
209
+
210
+ async with self.client.stream("POST", self.API_ENDPOINT, json=payload) as response:
211
+ response.raise_for_status()
212
+ async for line in response.aiter_lines():
213
+ if not line:
214
+ continue
215
+ if line.startswith("data: "):
216
+ data_str = line[6:]
217
+ try:
218
+ data = _json.loads(data_str)
219
+ event_type = data.get("type", "")
220
+ if event_type == "content_block_delta":
221
+ delta = data.get("delta", {})
222
+ if delta.get("type") == "text_delta" and "text" in delta:
223
+ yield delta["text"]
224
+ elif event_type == "message_stop":
225
+ break
226
+ except Exception:
227
+ continue
228
+ except Exception as e:
229
+ logger.error(f"Claude stream error: {e}")
230
+ raise
231
+
186
232
  async def close(self):
187
233
  """Close HTTP client"""
188
234
  await self.client.aclose()
@@ -0,0 +1,109 @@
1
+ """DeepSeek LLM Provider
2
+
3
+ DeepSeek provides powerful code-focused models via OpenAI-compatible API.
4
+ """
5
+
6
+ import json
7
+ import logging
8
+ from typing import List, Dict, Any, Optional
9
+ import httpx
10
+
11
+ from .base import BaseLLMProvider, Message, LLMResponse
12
+
13
+ logger = logging.getLogger(__name__)
14
+
15
+
16
+ class DeepSeekProvider(BaseLLMProvider):
17
+ """DeepSeek LLM Provider (OpenAI-compatible API)"""
18
+
19
+ API_ENDPOINT = "https://api.deepseek.com/v1/chat/completions"
20
+ DEFAULT_MODEL = "deepseek-chat"
21
+
22
+ def __init__(self, api_key: str, model: str = DEFAULT_MODEL):
23
+ super().__init__(api_key, model)
24
+ self.client = httpx.AsyncClient(
25
+ headers={
26
+ "Authorization": f"Bearer {api_key}",
27
+ "Content-Type": "application/json",
28
+ },
29
+ timeout=300.0,
30
+ )
31
+
32
+ async def generate(
33
+ self, messages: List[Message], max_tokens: int = 2000, temperature: float = 0.7
34
+ ) -> LLMResponse:
35
+ try:
36
+ payload = {
37
+ "model": self.model,
38
+ "messages": [
39
+ {"role": msg.role, "content": msg.content} for msg in messages
40
+ ],
41
+ "max_tokens": max_tokens,
42
+ "temperature": temperature,
43
+ }
44
+ response = await self.client.post(self.API_ENDPOINT, json=payload)
45
+ response.raise_for_status()
46
+ data = response.json()
47
+ content = data["choices"][0]["message"]["content"]
48
+ tokens_used = data.get("usage", {}).get("total_tokens", 0)
49
+ return LLMResponse(content=content, tokens_used=tokens_used, model=self.model, provider="DeepSeek")
50
+ except Exception as e:
51
+ logger.error(f"DeepSeek API error: {e}")
52
+ raise
53
+
54
+ async def stream_generate(
55
+ self, messages: List[Message], max_tokens: int = 2000, temperature: float = 0.7
56
+ ):
57
+ try:
58
+ payload = {
59
+ "model": self.model,
60
+ "messages": [{"role": m.role, "content": m.content} for m in messages],
61
+ "max_tokens": max_tokens,
62
+ "temperature": temperature,
63
+ "stream": True,
64
+ }
65
+ async with self.client.stream("POST", self.API_ENDPOINT, json=payload) as response:
66
+ response.raise_for_status()
67
+ async for line in response.aiter_lines():
68
+ if not line:
69
+ continue
70
+ if line.startswith("data: "):
71
+ data_str = line[6:]
72
+ if data_str == "[DONE]":
73
+ break
74
+ try:
75
+ data = json.loads(data_str)
76
+ delta = data["choices"][0].get("delta", {})
77
+ if "content" in delta:
78
+ yield delta["content"]
79
+ except Exception:
80
+ continue
81
+ except Exception as e:
82
+ logger.error(f"DeepSeek stream error: {e}")
83
+ raise
84
+
85
+ async def plan(self, issue_description: str) -> str:
86
+ system_message = Message(role="system", content="You are an expert software architect. Create a detailed implementation plan.")
87
+ user_message = Message(role="user", content=f"Issue:\n{issue_description}")
88
+ response = await self.generate([system_message, user_message], max_tokens=2000)
89
+ return response.content
90
+
91
+ async def generate_patch(self, issue_description: str, context_files: Dict[str, str], previous_errors: Optional[str] = None) -> str:
92
+ system_message = Message(role="system", content="Generate a unified diff format patch.")
93
+ context = f"Issue:\n{issue_description}\n\n"
94
+ for filename, content in context_files.items():
95
+ context += f"\n--- {filename} ---\n{content}\n"
96
+ if previous_errors:
97
+ context += f"\nPrevious Errors:\n{previous_errors}"
98
+ user_message = Message(role="user", content=context)
99
+ response = await self.generate([system_message, user_message], max_tokens=3000, temperature=0.5)
100
+ return response.content
101
+
102
+ async def analyze_test_failure(self, test_output: str, code_context: str) -> Dict[str, Any]:
103
+ system_message = Message(role="system", content="Analyze the test failure and suggest fixes.")
104
+ user_message = Message(role="user", content=f"Test Output:\n{test_output}\n\nCode:\n{code_context}")
105
+ response = await self.generate([system_message, user_message], max_tokens=1500)
106
+ return {"analysis": response.content, "provider": "DeepSeek", "tokens_used": response.tokens_used}
107
+
108
+ async def close(self):
109
+ await self.client.aclose()
@@ -143,6 +143,50 @@ Code:
143
143
  "tokens_used": response.tokens_used,
144
144
  }
145
145
 
146
+ async def stream_generate(
147
+ self, messages: List[Message], max_tokens: int = 2000, temperature: float = 0.7
148
+ ):
149
+ """Stream response tokens from Gemini"""
150
+ import json as _json
151
+ try:
152
+ contents = []
153
+ for msg in messages:
154
+ contents.append({
155
+ "role": "user" if msg.role == "user" else "model",
156
+ "parts": [{"text": msg.content}],
157
+ })
158
+
159
+ payload = {
160
+ "contents": contents,
161
+ "generationConfig": {
162
+ "maxOutputTokens": max_tokens,
163
+ "temperature": temperature,
164
+ },
165
+ }
166
+
167
+ url = f"{self.API_ENDPOINT}/{self.model}:streamGenerateContent?key={self.api_key}&alt=sse"
168
+
169
+ async with self.client.stream("POST", url, json=payload) as response:
170
+ response.raise_for_status()
171
+ async for line in response.aiter_lines():
172
+ if not line:
173
+ continue
174
+ if line.startswith("data: "):
175
+ data_str = line[6:]
176
+ try:
177
+ data = _json.loads(data_str)
178
+ candidates = data.get("candidates", [])
179
+ if candidates:
180
+ parts = candidates[0].get("content", {}).get("parts", [])
181
+ for part in parts:
182
+ if "text" in part:
183
+ yield part["text"]
184
+ except Exception:
185
+ continue
186
+ except Exception as e:
187
+ logger.error(f"Gemini stream error: {e}")
188
+ raise
189
+
146
190
  async def close(self):
147
191
  """Close HTTP client"""
148
192
  await self.client.aclose()
@@ -0,0 +1,109 @@
1
+ """Kimi (Moonshot AI) LLM Provider
2
+
3
+ Kimi provides large context window models via OpenAI-compatible API.
4
+ """
5
+
6
+ import json
7
+ import logging
8
+ from typing import List, Dict, Any, Optional
9
+ import httpx
10
+
11
+ from .base import BaseLLMProvider, Message, LLMResponse
12
+
13
+ logger = logging.getLogger(__name__)
14
+
15
+
16
+ class KimiProvider(BaseLLMProvider):
17
+ """Kimi (Moonshot AI) LLM Provider (OpenAI-compatible API)"""
18
+
19
+ API_ENDPOINT = "https://api.moonshot.cn/v1/chat/completions"
20
+ DEFAULT_MODEL = "moonshot-v1-8k"
21
+
22
+ def __init__(self, api_key: str, model: str = DEFAULT_MODEL):
23
+ super().__init__(api_key, model)
24
+ self.client = httpx.AsyncClient(
25
+ headers={
26
+ "Authorization": f"Bearer {api_key}",
27
+ "Content-Type": "application/json",
28
+ },
29
+ timeout=300.0,
30
+ )
31
+
32
+ async def generate(
33
+ self, messages: List[Message], max_tokens: int = 2000, temperature: float = 0.7
34
+ ) -> LLMResponse:
35
+ try:
36
+ payload = {
37
+ "model": self.model,
38
+ "messages": [
39
+ {"role": msg.role, "content": msg.content} for msg in messages
40
+ ],
41
+ "max_tokens": max_tokens,
42
+ "temperature": temperature,
43
+ }
44
+ response = await self.client.post(self.API_ENDPOINT, json=payload)
45
+ response.raise_for_status()
46
+ data = response.json()
47
+ content = data["choices"][0]["message"]["content"]
48
+ tokens_used = data.get("usage", {}).get("total_tokens", 0)
49
+ return LLMResponse(content=content, tokens_used=tokens_used, model=self.model, provider="Kimi")
50
+ except Exception as e:
51
+ logger.error(f"Kimi API error: {e}")
52
+ raise
53
+
54
+ async def stream_generate(
55
+ self, messages: List[Message], max_tokens: int = 2000, temperature: float = 0.7
56
+ ):
57
+ try:
58
+ payload = {
59
+ "model": self.model,
60
+ "messages": [{"role": m.role, "content": m.content} for m in messages],
61
+ "max_tokens": max_tokens,
62
+ "temperature": temperature,
63
+ "stream": True,
64
+ }
65
+ async with self.client.stream("POST", self.API_ENDPOINT, json=payload) as response:
66
+ response.raise_for_status()
67
+ async for line in response.aiter_lines():
68
+ if not line:
69
+ continue
70
+ if line.startswith("data: "):
71
+ data_str = line[6:]
72
+ if data_str == "[DONE]":
73
+ break
74
+ try:
75
+ data = json.loads(data_str)
76
+ delta = data["choices"][0].get("delta", {})
77
+ if "content" in delta:
78
+ yield delta["content"]
79
+ except Exception:
80
+ continue
81
+ except Exception as e:
82
+ logger.error(f"Kimi stream error: {e}")
83
+ raise
84
+
85
+ async def plan(self, issue_description: str) -> str:
86
+ system_message = Message(role="system", content="You are an expert software architect. Create a detailed implementation plan.")
87
+ user_message = Message(role="user", content=f"Issue:\n{issue_description}")
88
+ response = await self.generate([system_message, user_message], max_tokens=2000)
89
+ return response.content
90
+
91
+ async def generate_patch(self, issue_description: str, context_files: Dict[str, str], previous_errors: Optional[str] = None) -> str:
92
+ system_message = Message(role="system", content="Generate a unified diff format patch.")
93
+ context = f"Issue:\n{issue_description}\n\n"
94
+ for filename, content in context_files.items():
95
+ context += f"\n--- {filename} ---\n{content}\n"
96
+ if previous_errors:
97
+ context += f"\nPrevious Errors:\n{previous_errors}"
98
+ user_message = Message(role="user", content=context)
99
+ response = await self.generate([system_message, user_message], max_tokens=3000, temperature=0.5)
100
+ return response.content
101
+
102
+ async def analyze_test_failure(self, test_output: str, code_context: str) -> Dict[str, Any]:
103
+ system_message = Message(role="system", content="Analyze the test failure and suggest fixes.")
104
+ user_message = Message(role="user", content=f"Test Output:\n{test_output}\n\nCode:\n{code_context}")
105
+ response = await self.generate([system_message, user_message], max_tokens=1500)
106
+ return {"analysis": response.content, "provider": "Kimi", "tokens_used": response.tokens_used}
107
+
108
+ async def close(self):
109
+ await self.client.aclose()