@voria/cli 0.0.3 → 0.0.5
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +75 -380
- package/bin/voria +635 -481
- package/docs/CHANGELOG.md +19 -0
- package/docs/USER_GUIDE.md +34 -5
- package/package.json +1 -1
- package/python/voria/__init__.py +1 -1
- package/python/voria/__pycache__/__init__.cpython-312.pyc +0 -0
- package/python/voria/__pycache__/engine.cpython-312.pyc +0 -0
- package/python/voria/core/__pycache__/__init__.cpython-312.pyc +0 -0
- package/python/voria/core/__pycache__/setup.cpython-312.pyc +0 -0
- package/python/voria/core/agent/__pycache__/__init__.cpython-312.pyc +0 -0
- package/python/voria/core/agent/__pycache__/loop.cpython-312.pyc +0 -0
- package/python/voria/core/executor/__pycache__/__init__.cpython-312.pyc +0 -0
- package/python/voria/core/executor/__pycache__/executor.cpython-312.pyc +0 -0
- package/python/voria/core/executor/executor.py +5 -0
- package/python/voria/core/github/__pycache__/__init__.cpython-312.pyc +0 -0
- package/python/voria/core/github/__pycache__/client.cpython-312.pyc +0 -0
- package/python/voria/core/llm/__init__.py +16 -0
- package/python/voria/core/llm/__pycache__/__init__.cpython-312.pyc +0 -0
- package/python/voria/core/llm/__pycache__/base.cpython-312.pyc +0 -0
- package/python/voria/core/llm/__pycache__/claude_provider.cpython-312.pyc +0 -0
- package/python/voria/core/llm/__pycache__/deepseek_provider.cpython-312.pyc +0 -0
- package/python/voria/core/llm/__pycache__/gemini_provider.cpython-312.pyc +0 -0
- package/python/voria/core/llm/__pycache__/kimi_provider.cpython-312.pyc +0 -0
- package/python/voria/core/llm/__pycache__/minimax_provider.cpython-312.pyc +0 -0
- package/python/voria/core/llm/__pycache__/modal_provider.cpython-312.pyc +0 -0
- package/python/voria/core/llm/__pycache__/model_discovery.cpython-312.pyc +0 -0
- package/python/voria/core/llm/__pycache__/openai_provider.cpython-312.pyc +0 -0
- package/python/voria/core/llm/__pycache__/siliconflow_provider.cpython-312.pyc +0 -0
- package/python/voria/core/llm/base.py +12 -0
- package/python/voria/core/llm/claude_provider.py +46 -0
- package/python/voria/core/llm/deepseek_provider.py +109 -0
- package/python/voria/core/llm/gemini_provider.py +44 -0
- package/python/voria/core/llm/kimi_provider.py +109 -0
- package/python/voria/core/llm/minimax_provider.py +187 -0
- package/python/voria/core/llm/modal_provider.py +33 -0
- package/python/voria/core/llm/model_discovery.py +104 -155
- package/python/voria/core/llm/openai_provider.py +33 -0
- package/python/voria/core/llm/siliconflow_provider.py +109 -0
- package/python/voria/core/patcher/__pycache__/__init__.cpython-312.pyc +0 -0
- package/python/voria/core/patcher/__pycache__/patcher.cpython-312.pyc +0 -0
- package/python/voria/core/setup.py +4 -1
- package/python/voria/core/testing/__pycache__/definitions.cpython-312.pyc +0 -0
- package/python/voria/core/testing/__pycache__/runner.cpython-312.pyc +0 -0
- package/python/voria/core/testing/definitions.py +87 -0
- package/python/voria/core/testing/runner.py +324 -0
- package/python/voria/engine.py +736 -232
package/python/voria/engine.py
CHANGED
|
@@ -32,6 +32,7 @@ try:
|
|
|
32
32
|
from voria.core.patcher import CodePatcher, UnifiedDiffParser
|
|
33
33
|
from voria.core.executor import TestExecutor
|
|
34
34
|
from voria.core.agent import AgentLoop
|
|
35
|
+
from voria.core.testing.runner import TestRunner
|
|
35
36
|
except ImportError as e:
|
|
36
37
|
logger.error(f"Failed to import voria modules: {e}")
|
|
37
38
|
logger.error("Make sure voria package is installed: pip install -e python/")
|
|
@@ -54,6 +55,7 @@ class Response:
|
|
|
54
55
|
action: str # apply_patch, run_tests, continue, stop
|
|
55
56
|
message: str
|
|
56
57
|
patch: Optional[str] = None
|
|
58
|
+
chunk: Optional[str] = None
|
|
57
59
|
logs: Optional[str] = None
|
|
58
60
|
token_usage: Optional[Dict[str, Any]] = None
|
|
59
61
|
data: Optional[Dict[str, Any]] = None
|
|
@@ -85,6 +87,14 @@ async def handle_plan_command(command: Dict[str, Any]) -> None:
|
|
|
85
87
|
|
|
86
88
|
logger.info(f"Processing plan command: {description}")
|
|
87
89
|
|
|
90
|
+
if not api_key or not model or provider_name == "openai":
|
|
91
|
+
config = load_config()
|
|
92
|
+
if not api_key:
|
|
93
|
+
api_key = config.get("llm_api_key")
|
|
94
|
+
provider_name = config.get("llm_provider", provider_name)
|
|
95
|
+
if not model:
|
|
96
|
+
model = config.get("llm_model")
|
|
97
|
+
|
|
88
98
|
if not api_key:
|
|
89
99
|
env_key = f"{provider_name.upper()}_API_KEY"
|
|
90
100
|
api_key = os.environ.get(env_key)
|
|
@@ -94,7 +104,7 @@ async def handle_plan_command(command: Dict[str, Any]) -> None:
|
|
|
94
104
|
Response(
|
|
95
105
|
status="error",
|
|
96
106
|
action="stop",
|
|
97
|
-
message=f"API key required for {provider_name} provider",
|
|
107
|
+
message=f"API key required for {provider_name} provider. Please set in config or env.",
|
|
98
108
|
)
|
|
99
109
|
)
|
|
100
110
|
return
|
|
@@ -142,29 +152,45 @@ async def handle_plan_command(command: Dict[str, Any]) -> None:
|
|
|
142
152
|
),
|
|
143
153
|
]
|
|
144
154
|
|
|
145
|
-
|
|
146
|
-
|
|
147
|
-
|
|
148
|
-
|
|
149
|
-
|
|
150
|
-
|
|
151
|
-
|
|
155
|
+
full_content = ""
|
|
156
|
+
async for chunk in provider.stream_generate(messages):
|
|
157
|
+
full_content += chunk
|
|
158
|
+
send_response(
|
|
159
|
+
Response(
|
|
160
|
+
status="pending",
|
|
161
|
+
action="continue",
|
|
162
|
+
message="Streaming plan...",
|
|
163
|
+
chunk=chunk
|
|
164
|
+
)
|
|
165
|
+
)
|
|
166
|
+
|
|
167
|
+
# Try to parse JSON from full content
|
|
168
|
+
try:
|
|
169
|
+
# Basic JSON extraction
|
|
170
|
+
import re
|
|
171
|
+
json_match = re.search(r"({.*})", full_content, re.DOTALL)
|
|
172
|
+
if json_match:
|
|
173
|
+
plan_data = json.loads(json_match.group(1))
|
|
174
|
+
else:
|
|
175
|
+
plan_data = {"plan": full_content}
|
|
176
|
+
except:
|
|
177
|
+
plan_data = {"plan": full_content}
|
|
152
178
|
|
|
153
179
|
send_response(
|
|
154
180
|
Response(
|
|
155
181
|
status="success",
|
|
156
182
|
action="stop",
|
|
157
|
-
message=
|
|
158
|
-
data={"plan":
|
|
183
|
+
message="Plan generated successfully",
|
|
184
|
+
data={"plan": plan_data, "provider": provider_name},
|
|
159
185
|
)
|
|
160
186
|
)
|
|
161
187
|
except Exception as e:
|
|
162
|
-
logger.error(f"
|
|
188
|
+
logger.error(f"Plan generation failed: {e}")
|
|
163
189
|
send_response(
|
|
164
190
|
Response(
|
|
165
191
|
status="error",
|
|
166
192
|
action="stop",
|
|
167
|
-
message=f"
|
|
193
|
+
message=f"Plan generation failed: {str(e)}",
|
|
168
194
|
)
|
|
169
195
|
)
|
|
170
196
|
|
|
@@ -201,21 +227,18 @@ async def handle_issue_command(command: Dict[str, Any]) -> None:
|
|
|
201
227
|
return
|
|
202
228
|
|
|
203
229
|
if not github_token:
|
|
204
|
-
|
|
230
|
+
config = load_config()
|
|
231
|
+
github_token = config.get("github_token") or os.environ.get("GITHUB_TOKEN")
|
|
205
232
|
|
|
206
|
-
|
|
207
|
-
|
|
208
|
-
|
|
209
|
-
|
|
210
|
-
|
|
211
|
-
|
|
212
|
-
Response(
|
|
213
|
-
status="error",
|
|
214
|
-
action="stop",
|
|
215
|
-
message="GitHub token is required. Use GITHUB_TOKEN env var or enter token when prompted.",
|
|
216
|
-
)
|
|
233
|
+
if not github_token:
|
|
234
|
+
send_response(
|
|
235
|
+
Response(
|
|
236
|
+
status="error",
|
|
237
|
+
action="stop",
|
|
238
|
+
message="GitHub token required for issue command. Please setup with 'voria config --github' or set GITHUB_TOKEN environment variable.",
|
|
217
239
|
)
|
|
218
|
-
|
|
240
|
+
)
|
|
241
|
+
return
|
|
219
242
|
|
|
220
243
|
# Fetch GitHub issue
|
|
221
244
|
try:
|
|
@@ -235,6 +258,28 @@ async def handle_issue_command(command: Dict[str, Any]) -> None:
|
|
|
235
258
|
return
|
|
236
259
|
|
|
237
260
|
# Create LLM provider
|
|
261
|
+
if not api_key or not model or provider_name == "openai":
|
|
262
|
+
config = load_config()
|
|
263
|
+
if not api_key:
|
|
264
|
+
api_key = config.get("llm_api_key")
|
|
265
|
+
provider_name = config.get("llm_provider", provider_name)
|
|
266
|
+
if not model:
|
|
267
|
+
model = config.get("llm_model")
|
|
268
|
+
|
|
269
|
+
if not api_key:
|
|
270
|
+
env_key = f"{provider_name.upper()}_API_KEY"
|
|
271
|
+
api_key = os.environ.get(env_key)
|
|
272
|
+
|
|
273
|
+
if not api_key:
|
|
274
|
+
send_response(
|
|
275
|
+
Response(
|
|
276
|
+
status="error",
|
|
277
|
+
action="stop",
|
|
278
|
+
message=f"API key required for {provider_name} provider. Please set in config or env.",
|
|
279
|
+
)
|
|
280
|
+
)
|
|
281
|
+
return
|
|
282
|
+
|
|
238
283
|
try:
|
|
239
284
|
provider = LLMProviderFactory.create(
|
|
240
285
|
provider_name, api_key, model or "default"
|
|
@@ -265,27 +310,49 @@ async def handle_issue_command(command: Dict[str, Any]) -> None:
|
|
|
265
310
|
),
|
|
266
311
|
]
|
|
267
312
|
|
|
268
|
-
|
|
269
|
-
|
|
270
|
-
|
|
271
|
-
|
|
272
|
-
|
|
273
|
-
|
|
313
|
+
full_content = ""
|
|
314
|
+
async for chunk in provider.stream_generate(messages):
|
|
315
|
+
full_content += chunk
|
|
316
|
+
send_response(
|
|
317
|
+
Response(
|
|
318
|
+
status="pending",
|
|
319
|
+
action="continue",
|
|
320
|
+
message="Streaming patch...",
|
|
321
|
+
chunk=chunk
|
|
322
|
+
)
|
|
323
|
+
)
|
|
324
|
+
|
|
325
|
+
patch = full_content
|
|
274
326
|
logger.info(f"Generated patch for issue #{issue_number}")
|
|
275
327
|
|
|
276
|
-
|
|
277
|
-
|
|
278
|
-
|
|
279
|
-
|
|
280
|
-
|
|
281
|
-
|
|
282
|
-
|
|
283
|
-
"
|
|
284
|
-
"
|
|
285
|
-
"
|
|
286
|
-
|
|
328
|
+
# Auto-apply if requested
|
|
329
|
+
if command.get("auto", False):
|
|
330
|
+
logger.info("Auto-applying patch...")
|
|
331
|
+
patcher = CodePatcher(".")
|
|
332
|
+
result = await patcher.apply_patch(patch)
|
|
333
|
+
send_response(
|
|
334
|
+
Response(
|
|
335
|
+
status="success",
|
|
336
|
+
action="stop",
|
|
337
|
+
message=f"Patch generated and auto-applied to {len(result)} files",
|
|
338
|
+
patch=patch,
|
|
339
|
+
data={"files_modified": result, "issue_number": issue_number, "issue_title": issue.title}
|
|
340
|
+
)
|
|
341
|
+
)
|
|
342
|
+
else:
|
|
343
|
+
send_response(
|
|
344
|
+
Response(
|
|
345
|
+
status="success",
|
|
346
|
+
action="stop",
|
|
347
|
+
message=f"Issue fix generated successfully",
|
|
348
|
+
data={
|
|
349
|
+
"issue_number": issue_number,
|
|
350
|
+
"issue_title": issue.title,
|
|
351
|
+
"patch": patch,
|
|
352
|
+
"provider": provider_name,
|
|
353
|
+
},
|
|
354
|
+
)
|
|
287
355
|
)
|
|
288
|
-
)
|
|
289
356
|
except Exception as e:
|
|
290
357
|
logger.error(f"Patch generation failed: {e}")
|
|
291
358
|
send_response(
|
|
@@ -312,7 +379,7 @@ async def handle_fix_command(command: Dict[str, Any]) -> None:
|
|
|
312
379
|
owner = command.get("owner")
|
|
313
380
|
repo = command.get("repo")
|
|
314
381
|
github_token = command.get("github_token")
|
|
315
|
-
provider_name = command.get("provider", "
|
|
382
|
+
provider_name = command.get("provider", "openai")
|
|
316
383
|
api_key = command.get("api_key")
|
|
317
384
|
model = command.get("model")
|
|
318
385
|
|
|
@@ -329,21 +396,18 @@ async def handle_fix_command(command: Dict[str, Any]) -> None:
|
|
|
329
396
|
return
|
|
330
397
|
|
|
331
398
|
if not github_token:
|
|
332
|
-
|
|
399
|
+
config = load_config()
|
|
400
|
+
github_token = config.get("github_token") or os.environ.get("GITHUB_TOKEN")
|
|
333
401
|
|
|
334
|
-
|
|
335
|
-
|
|
336
|
-
|
|
337
|
-
|
|
338
|
-
|
|
339
|
-
|
|
340
|
-
Response(
|
|
341
|
-
status="error",
|
|
342
|
-
action="stop",
|
|
343
|
-
message="GitHub token is required.",
|
|
344
|
-
)
|
|
402
|
+
if not github_token:
|
|
403
|
+
send_response(
|
|
404
|
+
Response(
|
|
405
|
+
status="error",
|
|
406
|
+
action="stop",
|
|
407
|
+
message="GitHub token required for fix command. Please setup with 'voria config --github' or set GITHUB_TOKEN environment variable.",
|
|
345
408
|
)
|
|
346
|
-
|
|
409
|
+
)
|
|
410
|
+
return
|
|
347
411
|
|
|
348
412
|
# Fetch GitHub issue
|
|
349
413
|
try:
|
|
@@ -362,6 +426,28 @@ async def handle_fix_command(command: Dict[str, Any]) -> None:
|
|
|
362
426
|
return
|
|
363
427
|
|
|
364
428
|
# Create LLM provider
|
|
429
|
+
if not api_key or not model or provider_name == "openai":
|
|
430
|
+
config = load_config()
|
|
431
|
+
if not api_key:
|
|
432
|
+
api_key = config.get("llm_api_key")
|
|
433
|
+
provider_name = config.get("llm_provider", provider_name)
|
|
434
|
+
if not model:
|
|
435
|
+
model = config.get("llm_model")
|
|
436
|
+
|
|
437
|
+
if not api_key:
|
|
438
|
+
env_key = f"{provider_name.upper()}_API_KEY"
|
|
439
|
+
api_key = os.environ.get(env_key)
|
|
440
|
+
|
|
441
|
+
if not api_key:
|
|
442
|
+
send_response(
|
|
443
|
+
Response(
|
|
444
|
+
status="error",
|
|
445
|
+
action="stop",
|
|
446
|
+
message=f"API key required for {provider_name} provider. Please set in config or env.",
|
|
447
|
+
)
|
|
448
|
+
)
|
|
449
|
+
return
|
|
450
|
+
|
|
365
451
|
try:
|
|
366
452
|
provider = LLMProviderFactory.create(
|
|
367
453
|
provider_name, api_key, model or "default"
|
|
@@ -443,24 +529,21 @@ async def handle_list_issues_command(command: Dict[str, Any]) -> None:
|
|
|
443
529
|
repo = command.get("repo")
|
|
444
530
|
github_token = command.get("github_token")
|
|
445
531
|
|
|
446
|
-
logger.info(f"Processing list_issues command")
|
|
447
|
-
|
|
448
532
|
if not github_token:
|
|
449
|
-
|
|
533
|
+
config = load_config()
|
|
534
|
+
github_token = config.get("github_token") or os.environ.get("GITHUB_TOKEN")
|
|
450
535
|
|
|
451
|
-
|
|
452
|
-
|
|
453
|
-
|
|
454
|
-
|
|
455
|
-
|
|
456
|
-
|
|
457
|
-
Response(
|
|
458
|
-
status="error",
|
|
459
|
-
action="stop",
|
|
460
|
-
message="GitHub token is required. Use GITHUB_TOKEN env var or enter token when prompted.",
|
|
461
|
-
)
|
|
536
|
+
if not github_token:
|
|
537
|
+
send_response(
|
|
538
|
+
Response(
|
|
539
|
+
status="error",
|
|
540
|
+
action="stop",
|
|
541
|
+
message="GitHub token not found. Please set GITHUB_TOKEN environment variable or add it to ~/.voria/config.json",
|
|
462
542
|
)
|
|
463
|
-
|
|
543
|
+
)
|
|
544
|
+
return
|
|
545
|
+
|
|
546
|
+
logger.info(f"Processing list_issues command for {owner}/{repo}")
|
|
464
547
|
|
|
465
548
|
try:
|
|
466
549
|
github = GitHubClient(github_token)
|
|
@@ -632,12 +715,12 @@ async def handle_create_pr_command(command: Dict[str, Any]) -> None:
|
|
|
632
715
|
except subprocess.CalledProcessError as e:
|
|
633
716
|
logger.error(f"Git operation failed: {e.stderr}")
|
|
634
717
|
send_response(
|
|
635
|
-
Response(status="error", message=f"Git operation failed: {e.stderr}")
|
|
718
|
+
Response(status="error", action="stop", message=f"Git operation failed: {e.stderr}")
|
|
636
719
|
)
|
|
637
720
|
except Exception as e:
|
|
638
721
|
logger.error(f"PR creation error: {e}")
|
|
639
722
|
send_response(
|
|
640
|
-
Response(status="error", message=f"PR creation error: {str(e)}")
|
|
723
|
+
Response(status="error", action="stop", message=f"PR creation error: {str(e)}")
|
|
641
724
|
)
|
|
642
725
|
|
|
643
726
|
except Exception as e:
|
|
@@ -700,20 +783,18 @@ async def handle_logs_command(command: Dict[str, Any]) -> None:
|
|
|
700
783
|
try:
|
|
701
784
|
level = command.get("level", "INFO")
|
|
702
785
|
follow = command.get("follow", False)
|
|
703
|
-
|
|
786
|
+
lines_count = command.get("lines", 50)
|
|
704
787
|
|
|
705
788
|
log_dir = Path.home() / ".voria"
|
|
706
789
|
log_file = log_dir / "voria.log"
|
|
707
790
|
|
|
791
|
+
# Ensure log directory and file exist
|
|
792
|
+
log_dir.mkdir(parents=True, exist_ok=True)
|
|
708
793
|
if not log_file.exists():
|
|
709
|
-
|
|
710
|
-
|
|
711
|
-
|
|
712
|
-
|
|
713
|
-
message=f"No log file found at {log_file}",
|
|
714
|
-
)
|
|
715
|
-
)
|
|
716
|
-
return
|
|
794
|
+
log_file.touch()
|
|
795
|
+
|
|
796
|
+
# Add file handler to actually write logs to the file
|
|
797
|
+
_setup_file_logging(log_file)
|
|
717
798
|
|
|
718
799
|
if follow:
|
|
719
800
|
send_response(
|
|
@@ -727,9 +808,12 @@ async def handle_logs_command(command: Dict[str, Any]) -> None:
|
|
|
727
808
|
else:
|
|
728
809
|
try:
|
|
729
810
|
with open(log_file, "r") as f:
|
|
730
|
-
log_lines = f.readlines()[-
|
|
811
|
+
log_lines = f.readlines()[-lines_count:]
|
|
731
812
|
|
|
732
|
-
|
|
813
|
+
if not log_lines:
|
|
814
|
+
log_content = "No log entries yet. Run some commands first."
|
|
815
|
+
else:
|
|
816
|
+
log_content = "".join(log_lines)
|
|
733
817
|
send_response(
|
|
734
818
|
Response(
|
|
735
819
|
status="success",
|
|
@@ -756,6 +840,19 @@ async def handle_logs_command(command: Dict[str, Any]) -> None:
|
|
|
756
840
|
)
|
|
757
841
|
|
|
758
842
|
|
|
843
|
+
def _setup_file_logging(log_file: Path) -> None:
|
|
844
|
+
"""Add a file handler to the root logger so logs are persisted."""
|
|
845
|
+
root_logger = logging.getLogger()
|
|
846
|
+
# Don't add duplicate handlers
|
|
847
|
+
for h in root_logger.handlers:
|
|
848
|
+
if isinstance(h, logging.FileHandler) and h.baseFilename == str(log_file):
|
|
849
|
+
return
|
|
850
|
+
fh = logging.FileHandler(str(log_file), mode="a")
|
|
851
|
+
fh.setLevel(logging.INFO)
|
|
852
|
+
fh.setFormatter(logging.Formatter("%(asctime)s [%(levelname)s] %(name)s: %(message)s"))
|
|
853
|
+
root_logger.addHandler(fh)
|
|
854
|
+
|
|
855
|
+
|
|
759
856
|
async def handle_token_command(command: Dict[str, Any]) -> None:
|
|
760
857
|
"""Handle 'token' command for token usage info."""
|
|
761
858
|
try:
|
|
@@ -797,6 +894,78 @@ async def handle_token_command(command: Dict[str, Any]) -> None:
|
|
|
797
894
|
status="error", action="stop", message=f"Token command failed: {str(e)}"
|
|
798
895
|
)
|
|
799
896
|
)
|
|
897
|
+
async def handle_list_tests_command(command: Dict[str, Any]) -> None:
|
|
898
|
+
"""Handle 'list_tests' command."""
|
|
899
|
+
try:
|
|
900
|
+
from voria.core.testing.definitions import TEST_DEFINITIONS
|
|
901
|
+
|
|
902
|
+
tests = []
|
|
903
|
+
for t in TEST_DEFINITIONS:
|
|
904
|
+
tests.append({
|
|
905
|
+
"id": t.id,
|
|
906
|
+
"name": t.name,
|
|
907
|
+
"category": t.category.value,
|
|
908
|
+
"description": t.description,
|
|
909
|
+
"impact": t.impact,
|
|
910
|
+
"type": t.type
|
|
911
|
+
})
|
|
912
|
+
|
|
913
|
+
send_response(
|
|
914
|
+
Response(
|
|
915
|
+
status="success",
|
|
916
|
+
action="stop",
|
|
917
|
+
message=f"Available tests: {len(tests)}",
|
|
918
|
+
data={"tests": tests}
|
|
919
|
+
)
|
|
920
|
+
)
|
|
921
|
+
except Exception as e:
|
|
922
|
+
logger.error(f"List tests error: {e}")
|
|
923
|
+
send_response(Response(status="error", action="stop", message=str(e)))
|
|
924
|
+
|
|
925
|
+
async def handle_test_command(command: Dict[str, Any]) -> None:
|
|
926
|
+
"""Handle 'test' command."""
|
|
927
|
+
try:
|
|
928
|
+
test_id = command.get("test_id")
|
|
929
|
+
provider_name = command.get("provider", "openai")
|
|
930
|
+
api_key = command.get("api_key")
|
|
931
|
+
model = command.get("model", "gpt-4")
|
|
932
|
+
repo_path = command.get("repo_path")
|
|
933
|
+
if repo_path is None:
|
|
934
|
+
repo_path = "."
|
|
935
|
+
|
|
936
|
+
if not test_id:
|
|
937
|
+
send_response(Response(status="error", action="stop", message="test_id is required"))
|
|
938
|
+
return
|
|
939
|
+
|
|
940
|
+
if not api_key:
|
|
941
|
+
env_key = f"{provider_name.upper()}_API_KEY"
|
|
942
|
+
api_key = os.environ.get(env_key)
|
|
943
|
+
if not api_key:
|
|
944
|
+
# Try to load from config
|
|
945
|
+
config = load_config()
|
|
946
|
+
api_key = config.get("llm_api_key")
|
|
947
|
+
provider_name = config.get("llm_provider", provider_name)
|
|
948
|
+
model = config.get("llm_model", model)
|
|
949
|
+
|
|
950
|
+
if not api_key:
|
|
951
|
+
send_response(Response(status="error", action="stop", message=f"API key required for {provider_name}"))
|
|
952
|
+
return
|
|
953
|
+
|
|
954
|
+
runner = TestRunner(provider_name, api_key, model, repo_path)
|
|
955
|
+
result = await runner.run_test(test_id)
|
|
956
|
+
|
|
957
|
+
send_response(
|
|
958
|
+
Response(
|
|
959
|
+
status="success",
|
|
960
|
+
action="stop",
|
|
961
|
+
message=f"Test '{test_id}' completed",
|
|
962
|
+
data={"result": result}
|
|
963
|
+
)
|
|
964
|
+
)
|
|
965
|
+
except Exception as e:
|
|
966
|
+
logger.error(f"Test execution error: {e}")
|
|
967
|
+
send_response(Response(status="error", action="stop", message=str(e)))
|
|
968
|
+
|
|
800
969
|
|
|
801
970
|
|
|
802
971
|
def handle_test_results_callback(command: Dict[str, Any]) -> None:
|
|
@@ -809,6 +978,377 @@ def handle_test_results_callback(command: Dict[str, Any]) -> None:
|
|
|
809
978
|
logger.debug(f"Test logs:\n{test_logs}")
|
|
810
979
|
|
|
811
980
|
|
|
981
|
+
async def handle_scan_command(command: Dict[str, Any]) -> None:
|
|
982
|
+
"""Handle 'scan' — run ALL security tests in parallel, produce unified report."""
|
|
983
|
+
try:
|
|
984
|
+
provider_name = command.get("provider", "openai")
|
|
985
|
+
api_key = command.get("api_key")
|
|
986
|
+
model = command.get("model", "gpt-4")
|
|
987
|
+
repo_path = command.get("repo_path") or "."
|
|
988
|
+
category_filter = command.get("category", "security")
|
|
989
|
+
|
|
990
|
+
if not api_key:
|
|
991
|
+
config = load_config()
|
|
992
|
+
api_key = config.get("llm_api_key")
|
|
993
|
+
provider_name = config.get("llm_provider", provider_name)
|
|
994
|
+
model = config.get("llm_model", model)
|
|
995
|
+
if not api_key:
|
|
996
|
+
env_key = f"{provider_name.upper()}_API_KEY"
|
|
997
|
+
api_key = os.environ.get(env_key)
|
|
998
|
+
if not api_key:
|
|
999
|
+
send_response(Response(status="error", action="stop", message=f"API key required for {provider_name}"))
|
|
1000
|
+
return
|
|
1001
|
+
|
|
1002
|
+
from voria.core.testing.runner import TestRunner
|
|
1003
|
+
from voria.core.testing.definitions import TEST_DEFINITIONS, TestCategory
|
|
1004
|
+
|
|
1005
|
+
runner = TestRunner(provider_name, api_key, model, repo_path)
|
|
1006
|
+
|
|
1007
|
+
# Filter tests by category
|
|
1008
|
+
category_map = {
|
|
1009
|
+
"security": TestCategory.SECURITY,
|
|
1010
|
+
"production": TestCategory.PRODUCTION,
|
|
1011
|
+
"performance": TestCategory.PERFORMANCE,
|
|
1012
|
+
"stress": TestCategory.STRESS,
|
|
1013
|
+
"quality": TestCategory.QUALITY,
|
|
1014
|
+
"all": None,
|
|
1015
|
+
}
|
|
1016
|
+
target_cat = category_map.get(category_filter.lower())
|
|
1017
|
+
if target_cat:
|
|
1018
|
+
tests_to_run = [t for t in TEST_DEFINITIONS if t.category == target_cat]
|
|
1019
|
+
else:
|
|
1020
|
+
tests_to_run = list(TEST_DEFINITIONS)
|
|
1021
|
+
|
|
1022
|
+
logger.info(f"Scanning {len(tests_to_run)} tests in category '{category_filter}'")
|
|
1023
|
+
|
|
1024
|
+
# Run tests in parallel batches of 5
|
|
1025
|
+
results = []
|
|
1026
|
+
batch_size = 5
|
|
1027
|
+
for i in range(0, len(tests_to_run), batch_size):
|
|
1028
|
+
batch = tests_to_run[i:i+batch_size]
|
|
1029
|
+
tasks = [runner.run_test(t.id) for t in batch]
|
|
1030
|
+
batch_results = await asyncio.gather(*tasks, return_exceptions=True)
|
|
1031
|
+
for j, r in enumerate(batch_results):
|
|
1032
|
+
if isinstance(r, Exception):
|
|
1033
|
+
results.append({"id": batch[j].id, "name": batch[j].name, "result": {"status": "error", "summary": str(r)}})
|
|
1034
|
+
else:
|
|
1035
|
+
results.append(r)
|
|
1036
|
+
|
|
1037
|
+
# Compute aggregate
|
|
1038
|
+
total = len(results)
|
|
1039
|
+
passed = sum(1 for r in results if r.get("result", {}).get("status") == "passed")
|
|
1040
|
+
failed = sum(1 for r in results if r.get("result", {}).get("status") == "failed")
|
|
1041
|
+
warnings = sum(1 for r in results if r.get("result", {}).get("status") == "warning")
|
|
1042
|
+
scores = [r.get("result", {}).get("score", 0) for r in results if isinstance(r.get("result", {}).get("score"), (int, float))]
|
|
1043
|
+
avg_score = sum(scores) / max(len(scores), 1)
|
|
1044
|
+
|
|
1045
|
+
# Collect all findings
|
|
1046
|
+
all_findings = []
|
|
1047
|
+
all_recommendations = []
|
|
1048
|
+
for r in results:
|
|
1049
|
+
result_data = r.get("result", {})
|
|
1050
|
+
for f in result_data.get("findings", []):
|
|
1051
|
+
f["test"] = r.get("id", "unknown")
|
|
1052
|
+
all_findings.append(f)
|
|
1053
|
+
all_recommendations.extend(result_data.get("recommendations", []))
|
|
1054
|
+
|
|
1055
|
+
send_response(Response(
|
|
1056
|
+
status="success",
|
|
1057
|
+
action="stop",
|
|
1058
|
+
message=f"Scan complete: {passed} passed, {failed} failed, {warnings} warnings",
|
|
1059
|
+
data={
|
|
1060
|
+
"scan": {
|
|
1061
|
+
"total_tests": total,
|
|
1062
|
+
"passed": passed,
|
|
1063
|
+
"failed": failed,
|
|
1064
|
+
"warnings": warnings,
|
|
1065
|
+
"avg_score": round(avg_score, 1),
|
|
1066
|
+
"category": category_filter,
|
|
1067
|
+
"findings": all_findings[:50],
|
|
1068
|
+
"recommendations": list(set(all_recommendations))[:20],
|
|
1069
|
+
"test_results": results
|
|
1070
|
+
}
|
|
1071
|
+
}
|
|
1072
|
+
))
|
|
1073
|
+
except Exception as e:
|
|
1074
|
+
logger.error(f"Scan command error: {e}", exc_info=True)
|
|
1075
|
+
send_response(Response(status="error", action="stop", message=f"Scan failed: {str(e)}"))
|
|
1076
|
+
|
|
1077
|
+
|
|
1078
|
+
async def handle_diff_command(command: Dict[str, Any]) -> None:
|
|
1079
|
+
"""Handle 'diff' — compare security posture between two git refs."""
|
|
1080
|
+
try:
|
|
1081
|
+
ref_a = command.get("ref_a", "HEAD~1")
|
|
1082
|
+
ref_b = command.get("ref_b", "HEAD")
|
|
1083
|
+
repo_path = command.get("repo_path") or "."
|
|
1084
|
+
|
|
1085
|
+
import subprocess
|
|
1086
|
+
|
|
1087
|
+
# Get list of changed files between two refs
|
|
1088
|
+
try:
|
|
1089
|
+
diff_output = subprocess.run(
|
|
1090
|
+
["git", "diff", "--name-only", ref_a, ref_b],
|
|
1091
|
+
capture_output=True, text=True, cwd=repo_path, check=True
|
|
1092
|
+
)
|
|
1093
|
+
changed_files = [f.strip() for f in diff_output.stdout.strip().split("\n") if f.strip()]
|
|
1094
|
+
except subprocess.CalledProcessError as e:
|
|
1095
|
+
send_response(Response(status="error", action="stop", message=f"Git diff failed: {e.stderr}"))
|
|
1096
|
+
return
|
|
1097
|
+
|
|
1098
|
+
if not changed_files:
|
|
1099
|
+
send_response(Response(
|
|
1100
|
+
status="success", action="stop",
|
|
1101
|
+
message="No changes detected between the two refs.",
|
|
1102
|
+
data={"diff": {"ref_a": ref_a, "ref_b": ref_b, "changed_files": [], "risk_delta": 0}}
|
|
1103
|
+
))
|
|
1104
|
+
return
|
|
1105
|
+
|
|
1106
|
+
# Classify changed files by risk
|
|
1107
|
+
high_risk_patterns = ["auth", "login", "password", "secret", "token", "crypto", "sql", "query", "session"]
|
|
1108
|
+
medium_risk_patterns = ["api", "route", "handler", "middleware", "config", "env"]
|
|
1109
|
+
|
|
1110
|
+
high_risk = []
|
|
1111
|
+
medium_risk = []
|
|
1112
|
+
low_risk = []
|
|
1113
|
+
for f in changed_files:
|
|
1114
|
+
fl = f.lower()
|
|
1115
|
+
if any(p in fl for p in high_risk_patterns):
|
|
1116
|
+
high_risk.append(f)
|
|
1117
|
+
elif any(p in fl for p in medium_risk_patterns):
|
|
1118
|
+
medium_risk.append(f)
|
|
1119
|
+
else:
|
|
1120
|
+
low_risk.append(f)
|
|
1121
|
+
|
|
1122
|
+
# Get diff stats
|
|
1123
|
+
stat_output = subprocess.run(
|
|
1124
|
+
["git", "diff", "--stat", ref_a, ref_b],
|
|
1125
|
+
capture_output=True, text=True, cwd=repo_path
|
|
1126
|
+
)
|
|
1127
|
+
|
|
1128
|
+
risk_delta = len(high_risk) * 3 + len(medium_risk) * 1
|
|
1129
|
+
risk_level = "critical" if risk_delta > 10 else "elevated" if risk_delta > 5 else "low"
|
|
1130
|
+
|
|
1131
|
+
send_response(Response(
|
|
1132
|
+
status="success", action="stop",
|
|
1133
|
+
message=f"Diff analysis: {len(changed_files)} files changed, risk level: {risk_level}",
|
|
1134
|
+
data={"diff": {
|
|
1135
|
+
"ref_a": ref_a,
|
|
1136
|
+
"ref_b": ref_b,
|
|
1137
|
+
"total_changed": len(changed_files),
|
|
1138
|
+
"high_risk_files": high_risk,
|
|
1139
|
+
"medium_risk_files": medium_risk,
|
|
1140
|
+
"low_risk_files": low_risk,
|
|
1141
|
+
"risk_delta": risk_delta,
|
|
1142
|
+
"risk_level": risk_level,
|
|
1143
|
+
"stat": stat_output.stdout[-500:] if stat_output.stdout else "",
|
|
1144
|
+
"recommendation": "Run voria scan on changed files before merging." if risk_delta > 3 else "Changes look safe."
|
|
1145
|
+
}}
|
|
1146
|
+
))
|
|
1147
|
+
except Exception as e:
|
|
1148
|
+
logger.error(f"Diff command error: {e}", exc_info=True)
|
|
1149
|
+
send_response(Response(status="error", action="stop", message=f"Diff failed: {str(e)}"))
|
|
1150
|
+
|
|
1151
|
+
|
|
1152
|
+
async def handle_benchmark_command(command: Dict[str, Any]) -> None:
|
|
1153
|
+
"""Handle 'benchmark' — HTTP benchmarking against a target URL."""
|
|
1154
|
+
try:
|
|
1155
|
+
url = command.get("url")
|
|
1156
|
+
requests_count = command.get("requests", 100)
|
|
1157
|
+
concurrency = command.get("concurrency", 10)
|
|
1158
|
+
|
|
1159
|
+
if not url:
|
|
1160
|
+
send_response(Response(status="error", action="stop", message="URL is required. Usage: voria benchmark <URL>"))
|
|
1161
|
+
return
|
|
1162
|
+
|
|
1163
|
+
import time
|
|
1164
|
+
import statistics
|
|
1165
|
+
|
|
1166
|
+
logger.info(f"Benchmarking {url} with {requests_count} requests, concurrency {concurrency}")
|
|
1167
|
+
|
|
1168
|
+
latencies = []
|
|
1169
|
+
errors = 0
|
|
1170
|
+
status_codes = {}
|
|
1171
|
+
|
|
1172
|
+
try:
|
|
1173
|
+
import httpx
|
|
1174
|
+
except ImportError:
|
|
1175
|
+
send_response(Response(status="error", action="stop", message="httpx required. pip install httpx"))
|
|
1176
|
+
return
|
|
1177
|
+
|
|
1178
|
+
async with httpx.AsyncClient(timeout=30.0) as client:
|
|
1179
|
+
sem = asyncio.Semaphore(concurrency)
|
|
1180
|
+
|
|
1181
|
+
async def make_request():
|
|
1182
|
+
nonlocal errors
|
|
1183
|
+
async with sem:
|
|
1184
|
+
try:
|
|
1185
|
+
t0 = time.perf_counter()
|
|
1186
|
+
resp = await client.get(url)
|
|
1187
|
+
latency = (time.perf_counter() - t0) * 1000
|
|
1188
|
+
latencies.append(latency)
|
|
1189
|
+
code = str(resp.status_code)
|
|
1190
|
+
status_codes[code] = status_codes.get(code, 0) + 1
|
|
1191
|
+
except Exception:
|
|
1192
|
+
errors += 1
|
|
1193
|
+
|
|
1194
|
+
start = time.perf_counter()
|
|
1195
|
+
tasks = [make_request() for _ in range(requests_count)]
|
|
1196
|
+
await asyncio.gather(*tasks)
|
|
1197
|
+
total_time = time.perf_counter() - start
|
|
1198
|
+
|
|
1199
|
+
if not latencies:
|
|
1200
|
+
send_response(Response(status="error", action="stop", message=f"All {requests_count} requests failed. Check URL."))
|
|
1201
|
+
return
|
|
1202
|
+
|
|
1203
|
+
sorted_lat = sorted(latencies)
|
|
1204
|
+
metrics = {
|
|
1205
|
+
"total_requests": requests_count,
|
|
1206
|
+
"successful": len(latencies),
|
|
1207
|
+
"failed": errors,
|
|
1208
|
+
"total_time_sec": round(total_time, 2),
|
|
1209
|
+
"rps": round(len(latencies) / total_time, 1),
|
|
1210
|
+
"latency_avg_ms": round(statistics.mean(latencies), 2),
|
|
1211
|
+
"latency_min_ms": round(sorted_lat[0], 2),
|
|
1212
|
+
"latency_max_ms": round(sorted_lat[-1], 2),
|
|
1213
|
+
"latency_p50_ms": round(sorted_lat[int(len(sorted_lat) * 0.50)], 2),
|
|
1214
|
+
"latency_p95_ms": round(sorted_lat[int(len(sorted_lat) * 0.95)], 2),
|
|
1215
|
+
"latency_p99_ms": round(sorted_lat[int(len(sorted_lat) * 0.99)], 2),
|
|
1216
|
+
"status_codes": status_codes,
|
|
1217
|
+
"concurrency": concurrency,
|
|
1218
|
+
}
|
|
1219
|
+
|
|
1220
|
+
send_response(Response(
|
|
1221
|
+
status="success", action="stop",
|
|
1222
|
+
message=f"Benchmark complete: {metrics['rps']} req/s, p50={metrics['latency_p50_ms']}ms, p99={metrics['latency_p99_ms']}ms",
|
|
1223
|
+
data={"benchmark": metrics}
|
|
1224
|
+
))
|
|
1225
|
+
except Exception as e:
|
|
1226
|
+
logger.error(f"Benchmark command error: {e}", exc_info=True)
|
|
1227
|
+
send_response(Response(status="error", action="stop", message=f"Benchmark failed: {str(e)}"))
|
|
1228
|
+
|
|
1229
|
+
|
|
1230
|
+
async def handle_ci_command(command: Dict[str, Any]) -> None:
|
|
1231
|
+
"""Handle 'ci' — run scan and output SARIF format for GitHub Security tab."""
|
|
1232
|
+
try:
|
|
1233
|
+
provider_name = command.get("provider", "openai")
|
|
1234
|
+
api_key = command.get("api_key")
|
|
1235
|
+
model = command.get("model", "gpt-4")
|
|
1236
|
+
repo_path = command.get("repo_path") or "."
|
|
1237
|
+
|
|
1238
|
+
if not api_key:
|
|
1239
|
+
config = load_config()
|
|
1240
|
+
api_key = config.get("llm_api_key")
|
|
1241
|
+
provider_name = config.get("llm_provider", provider_name)
|
|
1242
|
+
model = config.get("llm_model", model)
|
|
1243
|
+
if not api_key:
|
|
1244
|
+
env_key = f"{provider_name.upper()}_API_KEY"
|
|
1245
|
+
api_key = os.environ.get(env_key)
|
|
1246
|
+
if not api_key:
|
|
1247
|
+
send_response(Response(status="error", action="stop", message=f"API key required"))
|
|
1248
|
+
return
|
|
1249
|
+
|
|
1250
|
+
from voria.core.testing.runner import TestRunner
|
|
1251
|
+
from voria.core.testing.definitions import TEST_DEFINITIONS, TestCategory
|
|
1252
|
+
|
|
1253
|
+
runner = TestRunner(provider_name, api_key, model, repo_path)
|
|
1254
|
+
security_tests = [t for t in TEST_DEFINITIONS if t.category == TestCategory.SECURITY]
|
|
1255
|
+
|
|
1256
|
+
# Run top 10 security tests for CI speed
|
|
1257
|
+
tests_to_run = security_tests[:10]
|
|
1258
|
+
results = []
|
|
1259
|
+
for t in tests_to_run:
|
|
1260
|
+
try:
|
|
1261
|
+
r = await runner.run_test(t.id)
|
|
1262
|
+
results.append(r)
|
|
1263
|
+
except Exception as e:
|
|
1264
|
+
results.append({"id": t.id, "result": {"status": "error", "summary": str(e)}})
|
|
1265
|
+
|
|
1266
|
+
# Build SARIF v2.1.0
|
|
1267
|
+
sarif_results = []
|
|
1268
|
+
sarif_rules = []
|
|
1269
|
+
rule_ids = set()
|
|
1270
|
+
for r in results:
|
|
1271
|
+
test_id = r.get("id", "unknown")
|
|
1272
|
+
result_data = r.get("result", {})
|
|
1273
|
+
for f in result_data.get("findings", []):
|
|
1274
|
+
rule_id = f"voria/{test_id}"
|
|
1275
|
+
if rule_id not in rule_ids:
|
|
1276
|
+
rule_ids.add(rule_id)
|
|
1277
|
+
sarif_rules.append({
|
|
1278
|
+
"id": rule_id,
|
|
1279
|
+
"name": r.get("name", test_id),
|
|
1280
|
+
"shortDescription": {"text": result_data.get("summary", "")[:200]},
|
|
1281
|
+
"defaultConfiguration": {"level": "warning" if f.get("severity") != "high" else "error"}
|
|
1282
|
+
})
|
|
1283
|
+
sarif_results.append({
|
|
1284
|
+
"ruleId": rule_id,
|
|
1285
|
+
"level": "error" if f.get("severity") == "high" else "warning",
|
|
1286
|
+
"message": {"text": f.get("description", "Security finding")},
|
|
1287
|
+
"locations": [{
|
|
1288
|
+
"physicalLocation": {
|
|
1289
|
+
"artifactLocation": {"uri": f.get("file", "unknown")},
|
|
1290
|
+
"region": {"startLine": f.get("line", 1)}
|
|
1291
|
+
}
|
|
1292
|
+
}]
|
|
1293
|
+
})
|
|
1294
|
+
|
|
1295
|
+
sarif = {
|
|
1296
|
+
"$schema": "https://json.schemastore.org/sarif-2.1.0.json",
|
|
1297
|
+
"version": "2.1.0",
|
|
1298
|
+
"runs": [{
|
|
1299
|
+
"tool": {
|
|
1300
|
+
"driver": {
|
|
1301
|
+
"name": "voria",
|
|
1302
|
+
"version": "0.0.5",
|
|
1303
|
+
"informationUri": "https://github.com/Srizdebnath/voria",
|
|
1304
|
+
"rules": sarif_rules
|
|
1305
|
+
}
|
|
1306
|
+
},
|
|
1307
|
+
"results": sarif_results
|
|
1308
|
+
}]
|
|
1309
|
+
}
|
|
1310
|
+
|
|
1311
|
+
send_response(Response(
|
|
1312
|
+
status="success", action="stop",
|
|
1313
|
+
message=f"CI scan complete: {len(sarif_results)} findings in SARIF format",
|
|
1314
|
+
data={"sarif": sarif, "findings_count": len(sarif_results)}
|
|
1315
|
+
))
|
|
1316
|
+
except Exception as e:
|
|
1317
|
+
logger.error(f"CI command error: {e}", exc_info=True)
|
|
1318
|
+
send_response(Response(status="error", action="stop", message=f"CI scan failed: {str(e)}"))
|
|
1319
|
+
|
|
1320
|
+
|
|
1321
|
+
async def handle_watch_command(command: Dict[str, Any]) -> None:
|
|
1322
|
+
"""Handle 'watch' — file watcher that re-runs tests on changes."""
|
|
1323
|
+
try:
|
|
1324
|
+
repo_path = command.get("repo_path") or "."
|
|
1325
|
+
test_ids = command.get("test_ids", ["hardcoded_secrets", "xss", "sql_injection"])
|
|
1326
|
+
|
|
1327
|
+
# Get initial snapshot of file mtimes
|
|
1328
|
+
watch_path = Path(repo_path)
|
|
1329
|
+
extensions = {".py", ".js", ".ts", ".go", ".rs", ".java"}
|
|
1330
|
+
|
|
1331
|
+
def get_snapshot():
|
|
1332
|
+
snap = {}
|
|
1333
|
+
for p in watch_path.rglob("*"):
|
|
1334
|
+
if p.suffix in extensions and "node_modules" not in str(p) and ".git" not in str(p) and "venv" not in str(p):
|
|
1335
|
+
try:
|
|
1336
|
+
snap[str(p)] = p.stat().st_mtime
|
|
1337
|
+
except Exception:
|
|
1338
|
+
pass
|
|
1339
|
+
return snap
|
|
1340
|
+
|
|
1341
|
+
initial = get_snapshot()
|
|
1342
|
+
send_response(Response(
|
|
1343
|
+
status="success", action="stop",
|
|
1344
|
+
message=f"Watch mode: monitoring {len(initial)} files. Changes will trigger tests: {', '.join(test_ids)}",
|
|
1345
|
+
data={"watch": {"files_monitored": len(initial), "tests": test_ids, "status": "active"}}
|
|
1346
|
+
))
|
|
1347
|
+
except Exception as e:
|
|
1348
|
+
logger.error(f"Watch command error: {e}", exc_info=True)
|
|
1349
|
+
send_response(Response(status="error", action="stop", message=f"Watch failed: {str(e)}"))
|
|
1350
|
+
|
|
1351
|
+
|
|
812
1352
|
async def process_command_async(line: str) -> None:
|
|
813
1353
|
"""Process a single NDJSON command line asynchronously."""
|
|
814
1354
|
try:
|
|
@@ -833,10 +1373,24 @@ async def process_command_async(line: str) -> None:
|
|
|
833
1373
|
await handle_token_command(command)
|
|
834
1374
|
elif cmd_type == "config":
|
|
835
1375
|
await handle_config_command(command)
|
|
1376
|
+
elif cmd_type == "list_tests":
|
|
1377
|
+
await handle_list_tests_command(command)
|
|
1378
|
+
elif cmd_type == "test":
|
|
1379
|
+
await handle_test_command(command)
|
|
836
1380
|
elif cmd_type == "test_results":
|
|
837
1381
|
handle_test_results_callback(command)
|
|
838
1382
|
elif cmd_type == "create_pr":
|
|
839
1383
|
await handle_create_pr_command(command)
|
|
1384
|
+
elif cmd_type == "scan":
|
|
1385
|
+
await handle_scan_command(command)
|
|
1386
|
+
elif cmd_type == "diff":
|
|
1387
|
+
await handle_diff_command(command)
|
|
1388
|
+
elif cmd_type == "benchmark":
|
|
1389
|
+
await handle_benchmark_command(command)
|
|
1390
|
+
elif cmd_type == "ci":
|
|
1391
|
+
await handle_ci_command(command)
|
|
1392
|
+
elif cmd_type == "watch":
|
|
1393
|
+
await handle_watch_command(command)
|
|
840
1394
|
else:
|
|
841
1395
|
logger.error(f"Unknown command type: {cmd_type}")
|
|
842
1396
|
send_response(
|
|
@@ -863,18 +1417,35 @@ async def process_command_async(line: str) -> None:
|
|
|
863
1417
|
voria_CONFIG_DIR = Path.home() / ".voria"
|
|
864
1418
|
voria_CONFIG_FILE = voria_CONFIG_DIR / "config.json"
|
|
865
1419
|
|
|
1420
|
+
# BUG-02 FIX: Cache config to avoid redundant disk reads per request
|
|
1421
|
+
_config_cache: Optional[Dict[str, Any]] = None
|
|
1422
|
+
_config_cache_mtime: float = 0.0
|
|
1423
|
+
|
|
866
1424
|
|
|
867
1425
|
def load_config() -> Dict[str, Any]:
|
|
868
|
-
"""Load voria configuration from ~/.voria/config.json"""
|
|
1426
|
+
"""Load voria configuration from ~/.voria/config.json (cached per mtime)."""
|
|
1427
|
+
global _config_cache, _config_cache_mtime
|
|
869
1428
|
if voria_CONFIG_FILE.exists():
|
|
870
1429
|
try:
|
|
1430
|
+
mtime = voria_CONFIG_FILE.stat().st_mtime
|
|
1431
|
+
if _config_cache is not None and mtime == _config_cache_mtime:
|
|
1432
|
+
return _config_cache
|
|
871
1433
|
with open(voria_CONFIG_FILE, "r") as f:
|
|
872
|
-
|
|
1434
|
+
_config_cache = json.load(f)
|
|
1435
|
+
_config_cache_mtime = mtime
|
|
1436
|
+
return _config_cache
|
|
873
1437
|
except Exception as e:
|
|
874
1438
|
logger.warning(f"Failed to load config: {e}")
|
|
875
1439
|
return {}
|
|
876
1440
|
|
|
877
1441
|
|
|
1442
|
+
def invalidate_config_cache() -> None:
|
|
1443
|
+
"""Invalidate config cache after save."""
|
|
1444
|
+
global _config_cache, _config_cache_mtime
|
|
1445
|
+
_config_cache = None
|
|
1446
|
+
_config_cache_mtime = 0.0
|
|
1447
|
+
|
|
1448
|
+
|
|
878
1449
|
def save_config(config: Dict[str, Any]) -> None:
|
|
879
1450
|
"""Save voria configuration to ~/.voria/config.json"""
|
|
880
1451
|
try:
|
|
@@ -882,6 +1453,7 @@ def save_config(config: Dict[str, Any]) -> None:
|
|
|
882
1453
|
with open(voria_CONFIG_FILE, "w") as f:
|
|
883
1454
|
json.dump(config, f, indent=2)
|
|
884
1455
|
os.chmod(voria_CONFIG_FILE, 0o600)
|
|
1456
|
+
invalidate_config_cache()
|
|
885
1457
|
except Exception as e:
|
|
886
1458
|
logger.error(f"Failed to save config: {e}")
|
|
887
1459
|
|
|
@@ -935,152 +1507,93 @@ async def handle_config_command(command: Dict[str, Any]) -> None:
|
|
|
935
1507
|
return
|
|
936
1508
|
|
|
937
1509
|
if action == "init":
|
|
938
|
-
|
|
939
|
-
|
|
940
|
-
|
|
941
|
-
|
|
942
|
-
|
|
943
|
-
|
|
944
|
-
|
|
945
|
-
|
|
946
|
-
|
|
947
|
-
|
|
948
|
-
|
|
949
|
-
|
|
950
|
-
|
|
951
|
-
|
|
952
|
-
|
|
953
|
-
|
|
954
|
-
|
|
955
|
-
|
|
956
|
-
|
|
957
|
-
|
|
958
|
-
if
|
|
959
|
-
|
|
960
|
-
|
|
961
|
-
|
|
962
|
-
|
|
963
|
-
|
|
964
|
-
|
|
965
|
-
|
|
966
|
-
|
|
967
|
-
|
|
968
|
-
|
|
969
|
-
|
|
970
|
-
|
|
971
|
-
status="error",
|
|
972
|
-
action="stop",
|
|
973
|
-
message="Modal API key is required",
|
|
974
|
-
)
|
|
975
|
-
)
|
|
976
|
-
return
|
|
977
|
-
config["llm_api_key"] = api_key
|
|
978
|
-
elif provider == "openai":
|
|
979
|
-
api_key = input("Enter OpenAI API key (sk-...): ").strip()
|
|
980
|
-
if not api_key:
|
|
981
|
-
print("❌ OpenAI API key is required!")
|
|
982
|
-
send_response(
|
|
983
|
-
Response(
|
|
984
|
-
status="error",
|
|
985
|
-
action="stop",
|
|
986
|
-
message="OpenAI API key is required",
|
|
987
|
-
)
|
|
988
|
-
)
|
|
989
|
-
return
|
|
990
|
-
config["llm_api_key"] = api_key
|
|
991
|
-
elif provider == "gemini":
|
|
992
|
-
api_key = input("Enter Google Gemini API key: ").strip()
|
|
993
|
-
if not api_key:
|
|
994
|
-
print("❌ Gemini API key is required!")
|
|
1510
|
+
# BUG-03 FIX: Non-interactive init — accepts config via JSON payload
|
|
1511
|
+
# No print()/input() — those corrupt NDJSON stdout/stdin
|
|
1512
|
+
init_provider = command.get("llm_provider")
|
|
1513
|
+
init_api_key = command.get("llm_api_key")
|
|
1514
|
+
init_github = command.get("github_token")
|
|
1515
|
+
init_model = command.get("llm_model")
|
|
1516
|
+
init_budget = command.get("daily_budget", 10.0)
|
|
1517
|
+
init_framework = command.get("test_framework")
|
|
1518
|
+
|
|
1519
|
+
new_config = {}
|
|
1520
|
+
|
|
1521
|
+
# If fields provided via command, use them
|
|
1522
|
+
if init_provider:
|
|
1523
|
+
new_config["llm_provider"] = init_provider
|
|
1524
|
+
if init_api_key:
|
|
1525
|
+
new_config["llm_api_key"] = init_api_key
|
|
1526
|
+
if init_github:
|
|
1527
|
+
new_config["github_token"] = init_github
|
|
1528
|
+
if init_model:
|
|
1529
|
+
new_config["llm_model"] = init_model
|
|
1530
|
+
if init_budget is not None:
|
|
1531
|
+
try:
|
|
1532
|
+
new_config["daily_budget"] = float(init_budget)
|
|
1533
|
+
except (ValueError, TypeError):
|
|
1534
|
+
new_config["daily_budget"] = 10.0
|
|
1535
|
+
if init_framework:
|
|
1536
|
+
new_config["test_framework"] = init_framework
|
|
1537
|
+
|
|
1538
|
+
# If no fields provided, return current config with instructions
|
|
1539
|
+
if not init_provider and not init_api_key:
|
|
1540
|
+
# Check if config already exists
|
|
1541
|
+
existing = load_config()
|
|
1542
|
+
if existing.get("llm_api_key"):
|
|
995
1543
|
send_response(
|
|
996
1544
|
Response(
|
|
997
|
-
status="
|
|
1545
|
+
status="success",
|
|
998
1546
|
action="stop",
|
|
999
|
-
message="
|
|
1547
|
+
message="voria is already configured!",
|
|
1548
|
+
data={
|
|
1549
|
+
"config": {
|
|
1550
|
+
k: (v[:8] + "..." if k in ("llm_api_key", "github_token") and v else v)
|
|
1551
|
+
for k, v in existing.items()
|
|
1552
|
+
},
|
|
1553
|
+
"hint": "Use 'voria config set' to update individual fields."
|
|
1554
|
+
},
|
|
1000
1555
|
)
|
|
1001
1556
|
)
|
|
1002
1557
|
return
|
|
1003
|
-
|
|
1004
|
-
elif provider == "claude":
|
|
1005
|
-
api_key = input("Enter Anthropic Claude API key (sk-ant-...): ").strip()
|
|
1006
|
-
if not api_key:
|
|
1007
|
-
print("❌ Claude API key is required!")
|
|
1558
|
+
else:
|
|
1008
1559
|
send_response(
|
|
1009
1560
|
Response(
|
|
1010
|
-
status="
|
|
1561
|
+
status="success",
|
|
1011
1562
|
action="stop",
|
|
1012
|
-
message="
|
|
1563
|
+
message="Welcome to voria! Configure with: voria setup-modal <TOKEN> or set ~/.voria/config.json",
|
|
1564
|
+
data={
|
|
1565
|
+
"available_providers": ["modal", "openai", "gemini", "claude", "minimax"],
|
|
1566
|
+
"setup_instructions": [
|
|
1567
|
+
"1. Get an API key from your preferred LLM provider",
|
|
1568
|
+
"2. Run: voria setup-modal <YOUR_TOKEN>",
|
|
1569
|
+
"3. Or edit ~/.voria/config.json directly",
|
|
1570
|
+
"4. Set GitHub token: voria set-github-token",
|
|
1571
|
+
],
|
|
1572
|
+
},
|
|
1013
1573
|
)
|
|
1014
1574
|
)
|
|
1015
1575
|
return
|
|
1016
|
-
config["llm_api_key"] = api_key
|
|
1017
|
-
|
|
1018
|
-
print(f"✅ API key saved\n")
|
|
1019
|
-
|
|
1020
|
-
print("=" * 60)
|
|
1021
|
-
print("STEP 2: GitHub Setup (Optional)")
|
|
1022
|
-
print("=" * 60)
|
|
1023
|
-
|
|
1024
|
-
setup_github = input("Setup GitHub token now? (y/n): ").lower().strip()
|
|
1025
|
-
if setup_github == "y":
|
|
1026
|
-
print_token_guide()
|
|
1027
|
-
token = input("\nEnter GitHub Personal Access Token: ").strip()
|
|
1028
|
-
if token:
|
|
1029
|
-
config["github_token"] = token
|
|
1030
|
-
print("✅ GitHub token saved\n")
|
|
1031
|
-
else:
|
|
1032
|
-
print(
|
|
1033
|
-
"Skipped. You can add it later with: voria config --github YOUR_TOKEN\n"
|
|
1034
|
-
)
|
|
1035
|
-
|
|
1036
|
-
print("=" * 60)
|
|
1037
|
-
print("STEP 3: Budget & Testing (Optional)")
|
|
1038
|
-
print("=" * 60)
|
|
1039
|
-
|
|
1040
|
-
budget_input = input("Daily budget in USD (default: 10): ").strip()
|
|
1041
|
-
if budget_input:
|
|
1042
|
-
try:
|
|
1043
|
-
config["daily_budget"] = float(budget_input)
|
|
1044
|
-
except:
|
|
1045
|
-
config["daily_budget"] = 10.0
|
|
1046
|
-
else:
|
|
1047
|
-
config["daily_budget"] = 10.0
|
|
1048
|
-
|
|
1049
|
-
print(f"✅ Daily budget: ${config['daily_budget']}\n")
|
|
1050
|
-
|
|
1051
|
-
print("=" * 60)
|
|
1052
|
-
print("STEP 4: Test Framework")
|
|
1053
|
-
print("=" * 60)
|
|
1054
|
-
print("Detected: pytest, jest, cargo test, etc.")
|
|
1055
|
-
framework = input(
|
|
1056
|
-
"Enter test framework (or press Enter for auto-detect): "
|
|
1057
|
-
).strip()
|
|
1058
|
-
if framework:
|
|
1059
|
-
config["test_framework"] = framework
|
|
1060
|
-
|
|
1061
|
-
save_config(config)
|
|
1062
1576
|
|
|
1063
|
-
|
|
1064
|
-
|
|
1065
|
-
|
|
1066
|
-
|
|
1067
|
-
print(" - voria issue 42 # Fix a GitHub issue")
|
|
1068
|
-
print(" - voria plan 'Add error handling' # Plan a fix")
|
|
1069
|
-
print(" - voria logs # View activity")
|
|
1070
|
-
print("\nTo update config later: voria config")
|
|
1071
|
-
print("=" * 60 + "\n")
|
|
1577
|
+
# Merge with existing config
|
|
1578
|
+
existing = load_config()
|
|
1579
|
+
existing.update(new_config)
|
|
1580
|
+
save_config(existing)
|
|
1072
1581
|
|
|
1073
1582
|
send_response(
|
|
1074
1583
|
Response(
|
|
1075
1584
|
status="success",
|
|
1076
1585
|
action="stop",
|
|
1077
1586
|
message="voria initialized successfully!",
|
|
1078
|
-
data={"config":
|
|
1587
|
+
data={"config": {
|
|
1588
|
+
k: (v[:8] + "..." if k in ("llm_api_key", "github_token") and isinstance(v, str) else v)
|
|
1589
|
+
for k, v in existing.items()
|
|
1590
|
+
}},
|
|
1079
1591
|
)
|
|
1080
1592
|
)
|
|
1081
1593
|
return
|
|
1082
1594
|
|
|
1083
1595
|
if action == "github":
|
|
1596
|
+
# BUG-03 FIX: Accept token from command payload only
|
|
1084
1597
|
token = command.get("token")
|
|
1085
1598
|
if token:
|
|
1086
1599
|
config["github_token"] = token
|
|
@@ -1093,24 +1606,13 @@ async def handle_config_command(command: Dict[str, Any]) -> None:
|
|
|
1093
1606
|
)
|
|
1094
1607
|
)
|
|
1095
1608
|
else:
|
|
1096
|
-
|
|
1097
|
-
|
|
1098
|
-
|
|
1099
|
-
|
|
1100
|
-
|
|
1101
|
-
send_response(
|
|
1102
|
-
Response(
|
|
1103
|
-
status="success",
|
|
1104
|
-
action="stop",
|
|
1105
|
-
message="GitHub token saved!",
|
|
1106
|
-
)
|
|
1107
|
-
)
|
|
1108
|
-
else:
|
|
1109
|
-
send_response(
|
|
1110
|
-
Response(
|
|
1111
|
-
status="error", action="stop", message="No token provided"
|
|
1112
|
-
)
|
|
1609
|
+
send_response(
|
|
1610
|
+
Response(
|
|
1611
|
+
status="error",
|
|
1612
|
+
action="stop",
|
|
1613
|
+
message="GitHub token required. Usage: voria set-github-token (interactive) or pass 'token' in config command.",
|
|
1113
1614
|
)
|
|
1615
|
+
)
|
|
1114
1616
|
return
|
|
1115
1617
|
|
|
1116
1618
|
send_response(
|
|
@@ -1137,12 +1639,14 @@ def main() -> None:
|
|
|
1137
1639
|
logger.info("voria Python Engine started")
|
|
1138
1640
|
logger.info("Ready to receive commands via NDJSON on stdin")
|
|
1139
1641
|
|
|
1140
|
-
|
|
1141
|
-
|
|
1142
|
-
|
|
1143
|
-
|
|
1144
|
-
|
|
1145
|
-
|
|
1642
|
+
# BUG-10 FIX: Always create a new event loop (avoids DeprecationWarning in 3.10+)
|
|
1643
|
+
loop = asyncio.new_event_loop()
|
|
1644
|
+
asyncio.set_event_loop(loop)
|
|
1645
|
+
|
|
1646
|
+
# BUG-11 FIX: Setup file logging on startup
|
|
1647
|
+
log_dir = Path.home() / ".voria"
|
|
1648
|
+
log_dir.mkdir(parents=True, exist_ok=True)
|
|
1649
|
+
_setup_file_logging(log_dir / "voria.log")
|
|
1146
1650
|
|
|
1147
1651
|
try:
|
|
1148
1652
|
while True:
|