superqode 0.1.5__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- superqode/__init__.py +33 -0
- superqode/acp/__init__.py +23 -0
- superqode/acp/client.py +913 -0
- superqode/acp/permission_screen.py +457 -0
- superqode/acp/types.py +480 -0
- superqode/acp_discovery.py +856 -0
- superqode/agent/__init__.py +22 -0
- superqode/agent/edit_strategies.py +334 -0
- superqode/agent/loop.py +892 -0
- superqode/agent/qe_report_templates.py +39 -0
- superqode/agent/system_prompts.py +353 -0
- superqode/agent_output.py +721 -0
- superqode/agent_stream.py +953 -0
- superqode/agents/__init__.py +59 -0
- superqode/agents/acp_registry.py +305 -0
- superqode/agents/client.py +249 -0
- superqode/agents/data/augmentcode.com.toml +51 -0
- superqode/agents/data/cagent.dev.toml +51 -0
- superqode/agents/data/claude.com.toml +60 -0
- superqode/agents/data/codeassistant.dev.toml +51 -0
- superqode/agents/data/codex.openai.com.toml +57 -0
- superqode/agents/data/fastagent.ai.toml +66 -0
- superqode/agents/data/geminicli.com.toml +77 -0
- superqode/agents/data/goose.block.xyz.toml +54 -0
- superqode/agents/data/junie.jetbrains.com.toml +56 -0
- superqode/agents/data/kimi.moonshot.cn.toml +57 -0
- superqode/agents/data/llmlingagent.dev.toml +51 -0
- superqode/agents/data/molt.bot.toml +49 -0
- superqode/agents/data/opencode.ai.toml +60 -0
- superqode/agents/data/stakpak.dev.toml +51 -0
- superqode/agents/data/vtcode.dev.toml +51 -0
- superqode/agents/discovery.py +266 -0
- superqode/agents/messaging.py +160 -0
- superqode/agents/persona.py +166 -0
- superqode/agents/registry.py +421 -0
- superqode/agents/schema.py +72 -0
- superqode/agents/unified.py +367 -0
- superqode/app/__init__.py +111 -0
- superqode/app/constants.py +314 -0
- superqode/app/css.py +366 -0
- superqode/app/models.py +118 -0
- superqode/app/suggester.py +125 -0
- superqode/app/widgets.py +1591 -0
- superqode/app_enhanced.py +399 -0
- superqode/app_main.py +17187 -0
- superqode/approval.py +312 -0
- superqode/atomic.py +296 -0
- superqode/commands/__init__.py +1 -0
- superqode/commands/acp.py +965 -0
- superqode/commands/agents.py +180 -0
- superqode/commands/auth.py +278 -0
- superqode/commands/config.py +374 -0
- superqode/commands/init.py +826 -0
- superqode/commands/providers.py +819 -0
- superqode/commands/qe.py +1145 -0
- superqode/commands/roles.py +380 -0
- superqode/commands/serve.py +172 -0
- superqode/commands/suggestions.py +127 -0
- superqode/commands/superqe.py +460 -0
- superqode/config/__init__.py +51 -0
- superqode/config/loader.py +812 -0
- superqode/config/schema.py +498 -0
- superqode/core/__init__.py +111 -0
- superqode/core/roles.py +281 -0
- superqode/danger.py +386 -0
- superqode/data/superqode-template.yaml +1522 -0
- superqode/design_system.py +1080 -0
- superqode/dialogs/__init__.py +6 -0
- superqode/dialogs/base.py +39 -0
- superqode/dialogs/model.py +130 -0
- superqode/dialogs/provider.py +870 -0
- superqode/diff_view.py +919 -0
- superqode/enterprise.py +21 -0
- superqode/evaluation/__init__.py +25 -0
- superqode/evaluation/adapters.py +93 -0
- superqode/evaluation/behaviors.py +89 -0
- superqode/evaluation/engine.py +209 -0
- superqode/evaluation/scenarios.py +96 -0
- superqode/execution/__init__.py +36 -0
- superqode/execution/linter.py +538 -0
- superqode/execution/modes.py +347 -0
- superqode/execution/resolver.py +283 -0
- superqode/execution/runner.py +642 -0
- superqode/file_explorer.py +811 -0
- superqode/file_viewer.py +471 -0
- superqode/flash.py +183 -0
- superqode/guidance/__init__.py +58 -0
- superqode/guidance/config.py +203 -0
- superqode/guidance/prompts.py +71 -0
- superqode/harness/__init__.py +54 -0
- superqode/harness/accelerator.py +291 -0
- superqode/harness/config.py +319 -0
- superqode/harness/validator.py +147 -0
- superqode/history.py +279 -0
- superqode/integrations/superopt_runner.py +124 -0
- superqode/logging/__init__.py +49 -0
- superqode/logging/adapters.py +219 -0
- superqode/logging/formatter.py +923 -0
- superqode/logging/integration.py +341 -0
- superqode/logging/sinks.py +170 -0
- superqode/logging/unified_log.py +417 -0
- superqode/lsp/__init__.py +26 -0
- superqode/lsp/client.py +544 -0
- superqode/main.py +1069 -0
- superqode/mcp/__init__.py +89 -0
- superqode/mcp/auth_storage.py +380 -0
- superqode/mcp/client.py +1236 -0
- superqode/mcp/config.py +319 -0
- superqode/mcp/integration.py +337 -0
- superqode/mcp/oauth.py +436 -0
- superqode/mcp/oauth_callback.py +385 -0
- superqode/mcp/types.py +290 -0
- superqode/memory/__init__.py +31 -0
- superqode/memory/feedback.py +342 -0
- superqode/memory/store.py +522 -0
- superqode/notifications.py +369 -0
- superqode/optimization/__init__.py +5 -0
- superqode/optimization/config.py +33 -0
- superqode/permissions/__init__.py +25 -0
- superqode/permissions/rules.py +488 -0
- superqode/plan.py +323 -0
- superqode/providers/__init__.py +33 -0
- superqode/providers/gateway/__init__.py +165 -0
- superqode/providers/gateway/base.py +228 -0
- superqode/providers/gateway/litellm_gateway.py +1170 -0
- superqode/providers/gateway/openresponses_gateway.py +436 -0
- superqode/providers/health.py +297 -0
- superqode/providers/huggingface/__init__.py +74 -0
- superqode/providers/huggingface/downloader.py +472 -0
- superqode/providers/huggingface/endpoints.py +442 -0
- superqode/providers/huggingface/hub.py +531 -0
- superqode/providers/huggingface/inference.py +394 -0
- superqode/providers/huggingface/transformers_runner.py +516 -0
- superqode/providers/local/__init__.py +100 -0
- superqode/providers/local/base.py +438 -0
- superqode/providers/local/discovery.py +418 -0
- superqode/providers/local/lmstudio.py +256 -0
- superqode/providers/local/mlx.py +457 -0
- superqode/providers/local/ollama.py +486 -0
- superqode/providers/local/sglang.py +268 -0
- superqode/providers/local/tgi.py +260 -0
- superqode/providers/local/tool_support.py +477 -0
- superqode/providers/local/vllm.py +258 -0
- superqode/providers/manager.py +1338 -0
- superqode/providers/models.py +1016 -0
- superqode/providers/models_dev.py +578 -0
- superqode/providers/openresponses/__init__.py +87 -0
- superqode/providers/openresponses/converters/__init__.py +17 -0
- superqode/providers/openresponses/converters/messages.py +343 -0
- superqode/providers/openresponses/converters/tools.py +268 -0
- superqode/providers/openresponses/schema/__init__.py +56 -0
- superqode/providers/openresponses/schema/models.py +585 -0
- superqode/providers/openresponses/streaming/__init__.py +5 -0
- superqode/providers/openresponses/streaming/parser.py +338 -0
- superqode/providers/openresponses/tools/__init__.py +21 -0
- superqode/providers/openresponses/tools/apply_patch.py +352 -0
- superqode/providers/openresponses/tools/code_interpreter.py +290 -0
- superqode/providers/openresponses/tools/file_search.py +333 -0
- superqode/providers/openresponses/tools/mcp_adapter.py +252 -0
- superqode/providers/registry.py +716 -0
- superqode/providers/usage.py +332 -0
- superqode/pure_mode.py +384 -0
- superqode/qr/__init__.py +23 -0
- superqode/qr/dashboard.py +781 -0
- superqode/qr/generator.py +1018 -0
- superqode/qr/templates.py +135 -0
- superqode/safety/__init__.py +41 -0
- superqode/safety/sandbox.py +413 -0
- superqode/safety/warnings.py +256 -0
- superqode/server/__init__.py +33 -0
- superqode/server/lsp_server.py +775 -0
- superqode/server/web.py +250 -0
- superqode/session/__init__.py +25 -0
- superqode/session/persistence.py +580 -0
- superqode/session/sharing.py +477 -0
- superqode/session.py +475 -0
- superqode/sidebar.py +2991 -0
- superqode/stream_view.py +648 -0
- superqode/styles/__init__.py +3 -0
- superqode/superqe/__init__.py +184 -0
- superqode/superqe/acp_runner.py +1064 -0
- superqode/superqe/constitution/__init__.py +62 -0
- superqode/superqe/constitution/evaluator.py +308 -0
- superqode/superqe/constitution/loader.py +432 -0
- superqode/superqe/constitution/schema.py +250 -0
- superqode/superqe/events.py +591 -0
- superqode/superqe/frameworks/__init__.py +65 -0
- superqode/superqe/frameworks/base.py +234 -0
- superqode/superqe/frameworks/e2e.py +263 -0
- superqode/superqe/frameworks/executor.py +237 -0
- superqode/superqe/frameworks/javascript.py +409 -0
- superqode/superqe/frameworks/python.py +373 -0
- superqode/superqe/frameworks/registry.py +92 -0
- superqode/superqe/mcp_tools/__init__.py +47 -0
- superqode/superqe/mcp_tools/core_tools.py +418 -0
- superqode/superqe/mcp_tools/registry.py +230 -0
- superqode/superqe/mcp_tools/testing_tools.py +167 -0
- superqode/superqe/noise.py +89 -0
- superqode/superqe/orchestrator.py +778 -0
- superqode/superqe/roles.py +609 -0
- superqode/superqe/session.py +713 -0
- superqode/superqe/skills/__init__.py +57 -0
- superqode/superqe/skills/base.py +106 -0
- superqode/superqe/skills/core_skills.py +899 -0
- superqode/superqe/skills/registry.py +90 -0
- superqode/superqe/verifier.py +101 -0
- superqode/superqe_cli.py +76 -0
- superqode/tool_call.py +358 -0
- superqode/tools/__init__.py +93 -0
- superqode/tools/agent_tools.py +496 -0
- superqode/tools/base.py +324 -0
- superqode/tools/batch_tool.py +133 -0
- superqode/tools/diagnostics.py +311 -0
- superqode/tools/edit_tools.py +653 -0
- superqode/tools/enhanced_base.py +515 -0
- superqode/tools/file_tools.py +269 -0
- superqode/tools/file_tracking.py +45 -0
- superqode/tools/lsp_tools.py +610 -0
- superqode/tools/network_tools.py +350 -0
- superqode/tools/permissions.py +400 -0
- superqode/tools/question_tool.py +324 -0
- superqode/tools/search_tools.py +598 -0
- superqode/tools/shell_tools.py +259 -0
- superqode/tools/todo_tools.py +121 -0
- superqode/tools/validation.py +80 -0
- superqode/tools/web_tools.py +639 -0
- superqode/tui.py +1152 -0
- superqode/tui_integration.py +875 -0
- superqode/tui_widgets/__init__.py +27 -0
- superqode/tui_widgets/widgets/__init__.py +18 -0
- superqode/tui_widgets/widgets/progress.py +185 -0
- superqode/tui_widgets/widgets/tool_display.py +188 -0
- superqode/undo_manager.py +574 -0
- superqode/utils/__init__.py +5 -0
- superqode/utils/error_handling.py +323 -0
- superqode/utils/fuzzy.py +257 -0
- superqode/widgets/__init__.py +477 -0
- superqode/widgets/agent_collab.py +390 -0
- superqode/widgets/agent_store.py +936 -0
- superqode/widgets/agent_switcher.py +395 -0
- superqode/widgets/animation_manager.py +284 -0
- superqode/widgets/code_context.py +356 -0
- superqode/widgets/command_palette.py +412 -0
- superqode/widgets/connection_status.py +537 -0
- superqode/widgets/conversation_history.py +470 -0
- superqode/widgets/diff_indicator.py +155 -0
- superqode/widgets/enhanced_status_bar.py +385 -0
- superqode/widgets/enhanced_toast.py +476 -0
- superqode/widgets/file_browser.py +809 -0
- superqode/widgets/file_reference.py +585 -0
- superqode/widgets/issue_timeline.py +340 -0
- superqode/widgets/leader_key.py +264 -0
- superqode/widgets/mode_switcher.py +445 -0
- superqode/widgets/model_picker.py +234 -0
- superqode/widgets/permission_preview.py +1205 -0
- superqode/widgets/prompt.py +358 -0
- superqode/widgets/provider_connect.py +725 -0
- superqode/widgets/pty_shell.py +587 -0
- superqode/widgets/qe_dashboard.py +321 -0
- superqode/widgets/resizable_sidebar.py +377 -0
- superqode/widgets/response_changes.py +218 -0
- superqode/widgets/response_display.py +528 -0
- superqode/widgets/rich_tool_display.py +613 -0
- superqode/widgets/sidebar_panels.py +1180 -0
- superqode/widgets/slash_complete.py +356 -0
- superqode/widgets/split_view.py +612 -0
- superqode/widgets/status_bar.py +273 -0
- superqode/widgets/superqode_display.py +786 -0
- superqode/widgets/thinking_display.py +815 -0
- superqode/widgets/throbber.py +87 -0
- superqode/widgets/toast.py +206 -0
- superqode/widgets/unified_output.py +1073 -0
- superqode/workspace/__init__.py +75 -0
- superqode/workspace/artifacts.py +472 -0
- superqode/workspace/coordinator.py +353 -0
- superqode/workspace/diff_tracker.py +429 -0
- superqode/workspace/git_guard.py +373 -0
- superqode/workspace/git_snapshot.py +526 -0
- superqode/workspace/manager.py +750 -0
- superqode/workspace/snapshot.py +357 -0
- superqode/workspace/watcher.py +535 -0
- superqode/workspace/worktree.py +440 -0
- superqode-0.1.5.dist-info/METADATA +204 -0
- superqode-0.1.5.dist-info/RECORD +288 -0
- superqode-0.1.5.dist-info/WHEEL +5 -0
- superqode-0.1.5.dist-info/entry_points.txt +3 -0
- superqode-0.1.5.dist-info/licenses/LICENSE +648 -0
- superqode-0.1.5.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,1064 @@
|
|
|
1
|
+
"""
|
|
2
|
+
ACP Runner - Execute QE roles using ACP-compatible agents.
|
|
3
|
+
|
|
4
|
+
Uses the existing AgentStreamClient to communicate with coding agents
|
|
5
|
+
like OpenCode for AI-powered quality engineering analysis.
|
|
6
|
+
|
|
7
|
+
Features:
|
|
8
|
+
- Real-time streaming of agent analysis
|
|
9
|
+
- Structured finding extraction from agent output
|
|
10
|
+
- Support for OpenCode free models
|
|
11
|
+
- Integration with QE noise controls
|
|
12
|
+
"""
|
|
13
|
+
|
|
14
|
+
from __future__ import annotations
|
|
15
|
+
|
|
16
|
+
import asyncio
|
|
17
|
+
import json
|
|
18
|
+
import os
|
|
19
|
+
import re
|
|
20
|
+
from dataclasses import dataclass, field
|
|
21
|
+
from datetime import datetime
|
|
22
|
+
from pathlib import Path
|
|
23
|
+
from typing import Any, Callable, Dict, List, Optional
|
|
24
|
+
import logging
|
|
25
|
+
import shutil
|
|
26
|
+
|
|
27
|
+
from superqode.agent_stream import (
|
|
28
|
+
AgentStreamClient,
|
|
29
|
+
StreamEvent,
|
|
30
|
+
StreamEventType,
|
|
31
|
+
StreamMessage,
|
|
32
|
+
StreamToolCall,
|
|
33
|
+
)
|
|
34
|
+
|
|
35
|
+
logger = logging.getLogger(__name__)
|
|
36
|
+
|
|
37
|
+
|
|
38
|
+
# Default OpenCode command (same as TUI uses)
|
|
39
|
+
OPENCODE_COMMAND = "opencode run --format json"
|
|
40
|
+
|
|
41
|
+
# Mapping from QE role names to OpenCode agent names
|
|
42
|
+
QE_ROLE_TO_OPENCODE_AGENT = {
|
|
43
|
+
# Execution roles (not ACP-driven, but keep mapping for consistency)
|
|
44
|
+
"smoke_tester": "deployment-readiness",
|
|
45
|
+
"sanity_tester": "deployment-readiness",
|
|
46
|
+
"regression_tester": "code-complexity",
|
|
47
|
+
# Detection roles
|
|
48
|
+
"unit_tester": "mutation-tester",
|
|
49
|
+
"api_tester": "contract-tester",
|
|
50
|
+
"security_tester": "mutation-tester",
|
|
51
|
+
"performance_tester": "code-complexity",
|
|
52
|
+
"e2e_tester": "visual-tester",
|
|
53
|
+
# Heuristic role
|
|
54
|
+
"fullstack": "mutation-tester",
|
|
55
|
+
}
|
|
56
|
+
|
|
57
|
+
|
|
58
|
+
def get_opencode_agent_for_role(role_name: str) -> str:
|
|
59
|
+
"""Map QE role name to appropriate OpenCode agent."""
|
|
60
|
+
return QE_ROLE_TO_OPENCODE_AGENT.get(role_name, "mutation-tester") # Default fallback
|
|
61
|
+
|
|
62
|
+
|
|
63
|
+
@dataclass
|
|
64
|
+
class FixVerification:
|
|
65
|
+
"""Verification results for a suggested fix."""
|
|
66
|
+
|
|
67
|
+
fix_applied: bool = False
|
|
68
|
+
tests_run: List[str] = field(default_factory=list)
|
|
69
|
+
tests_passed: int = 0
|
|
70
|
+
tests_total: int = 0
|
|
71
|
+
fix_verified: bool = False
|
|
72
|
+
outcome: str = ""
|
|
73
|
+
is_improvement: bool = False
|
|
74
|
+
|
|
75
|
+
|
|
76
|
+
@dataclass
|
|
77
|
+
class ACPFinding:
|
|
78
|
+
"""A finding extracted from ACP agent output."""
|
|
79
|
+
|
|
80
|
+
id: str
|
|
81
|
+
severity: str # critical, high, medium, low, info
|
|
82
|
+
title: str
|
|
83
|
+
description: str
|
|
84
|
+
file_path: Optional[str] = None
|
|
85
|
+
line_number: Optional[int] = None
|
|
86
|
+
evidence: Optional[str] = None
|
|
87
|
+
suggested_fix: Optional[str] = None
|
|
88
|
+
confidence: float = 0.8
|
|
89
|
+
category: str = ""
|
|
90
|
+
|
|
91
|
+
# Fix verification data (populated when allow_suggestions is enabled)
|
|
92
|
+
fix_verification: Optional[FixVerification] = None
|
|
93
|
+
patch_file: Optional[str] = None # Path to saved patch file
|
|
94
|
+
|
|
95
|
+
@property
|
|
96
|
+
def has_verified_fix(self) -> bool:
|
|
97
|
+
"""Check if this finding has a verified fix."""
|
|
98
|
+
return (
|
|
99
|
+
self.fix_verification is not None
|
|
100
|
+
and self.fix_verification.fix_verified
|
|
101
|
+
and self.fix_verification.is_improvement
|
|
102
|
+
)
|
|
103
|
+
|
|
104
|
+
|
|
105
|
+
@dataclass
|
|
106
|
+
class ACPRunnerConfig:
|
|
107
|
+
"""Configuration for ACP runner."""
|
|
108
|
+
|
|
109
|
+
# Agent command (default: opencode acp)
|
|
110
|
+
agent_command: str = OPENCODE_COMMAND
|
|
111
|
+
|
|
112
|
+
# Model to use (for agents that support model selection)
|
|
113
|
+
model: Optional[str] = None
|
|
114
|
+
|
|
115
|
+
# Timeout in seconds
|
|
116
|
+
timeout_seconds: int = 300
|
|
117
|
+
|
|
118
|
+
# Auto-approve file operations
|
|
119
|
+
auto_approve: bool = True
|
|
120
|
+
|
|
121
|
+
# Verbose output
|
|
122
|
+
verbose: bool = False
|
|
123
|
+
|
|
124
|
+
# Callback for streaming events
|
|
125
|
+
on_event: Optional[Callable[[StreamEvent], None]] = None
|
|
126
|
+
|
|
127
|
+
# Suggestion mode settings
|
|
128
|
+
allow_suggestions: bool = False # When True, ask agent to generate fixes
|
|
129
|
+
verify_fixes: bool = True # Run verification on suggested fixes
|
|
130
|
+
max_fix_attempts: int = 3 # Max attempts per issue
|
|
131
|
+
|
|
132
|
+
|
|
133
|
+
@dataclass
|
|
134
|
+
class ACPRunnerResult:
|
|
135
|
+
"""Result from ACP runner execution."""
|
|
136
|
+
|
|
137
|
+
success: bool
|
|
138
|
+
findings: List[ACPFinding]
|
|
139
|
+
agent_output: str
|
|
140
|
+
tool_calls: List[Dict[str, Any]]
|
|
141
|
+
duration_seconds: float
|
|
142
|
+
errors: List[str]
|
|
143
|
+
|
|
144
|
+
|
|
145
|
+
class ACPQERunner:
|
|
146
|
+
"""
|
|
147
|
+
Runs QE analysis using an ACP-compatible coding agent.
|
|
148
|
+
|
|
149
|
+
Connects to agents like OpenCode and sends QE-specific prompts
|
|
150
|
+
to analyze code for issues, then extracts structured findings
|
|
151
|
+
from the agent's output.
|
|
152
|
+
"""
|
|
153
|
+
|
|
154
|
+
def __init__(
|
|
155
|
+
self,
|
|
156
|
+
project_root: Path,
|
|
157
|
+
config: Optional[ACPRunnerConfig] = None,
|
|
158
|
+
):
|
|
159
|
+
self.project_root = project_root.resolve()
|
|
160
|
+
self.config = config or ACPRunnerConfig()
|
|
161
|
+
|
|
162
|
+
self._client: Optional[AgentStreamClient] = None
|
|
163
|
+
self._collected_output: str = ""
|
|
164
|
+
self._collected_thoughts: str = ""
|
|
165
|
+
self._tool_calls: List[Dict[str, Any]] = []
|
|
166
|
+
self._findings: List[ACPFinding] = []
|
|
167
|
+
self._finding_counter = 0
|
|
168
|
+
|
|
169
|
+
async def run(self, prompt: str, role_name: str = "qe") -> ACPRunnerResult:
|
|
170
|
+
"""
|
|
171
|
+
Run the QE analysis with the given prompt using OpenCode subprocess.
|
|
172
|
+
|
|
173
|
+
Args:
|
|
174
|
+
prompt: The QE analysis prompt to send to the agent
|
|
175
|
+
role_name: Name of the QE role (for finding IDs)
|
|
176
|
+
|
|
177
|
+
Returns:
|
|
178
|
+
ACPRunnerResult with findings and agent output
|
|
179
|
+
"""
|
|
180
|
+
start_time = datetime.now()
|
|
181
|
+
errors = []
|
|
182
|
+
agent_logs = []
|
|
183
|
+
collected_output = ""
|
|
184
|
+
tool_calls = []
|
|
185
|
+
|
|
186
|
+
# Check if opencode is available
|
|
187
|
+
if not self._check_agent_available():
|
|
188
|
+
return ACPRunnerResult(
|
|
189
|
+
success=False,
|
|
190
|
+
findings=[],
|
|
191
|
+
agent_output="",
|
|
192
|
+
tool_calls=[],
|
|
193
|
+
duration_seconds=0.0,
|
|
194
|
+
errors=["OpenCode not found. Install with: npm i -g opencode-ai"],
|
|
195
|
+
)
|
|
196
|
+
|
|
197
|
+
agent_logs.append(
|
|
198
|
+
f"[{start_time.strftime('%H:%M:%S')}] Starting OpenCode analysis for {role_name}"
|
|
199
|
+
)
|
|
200
|
+
agent_logs.append(
|
|
201
|
+
f"[{start_time.strftime('%H:%M:%S')}] Command: {self._build_agent_command(role_name)}"
|
|
202
|
+
)
|
|
203
|
+
|
|
204
|
+
if self.config.verbose:
|
|
205
|
+
print(f"🤖 Starting {role_name} analysis with OpenCode...")
|
|
206
|
+
|
|
207
|
+
try:
|
|
208
|
+
# Build the command with appropriate agent
|
|
209
|
+
cmd_parts = self._build_agent_command(role_name).split()
|
|
210
|
+
cmd = cmd_parts + [prompt]
|
|
211
|
+
|
|
212
|
+
agent_logs.append(f"[{datetime.now().strftime('%H:%M:%S')}] Executing: {' '.join(cmd)}")
|
|
213
|
+
|
|
214
|
+
if self.config.verbose:
|
|
215
|
+
model_info = f" using {self.config.model}" if self.config.model else ""
|
|
216
|
+
print(f"🔧 Running OpenCode{model_info} for {role_name} analysis...")
|
|
217
|
+
|
|
218
|
+
# Run OpenCode as subprocess with memory limits for performance
|
|
219
|
+
process = await asyncio.create_subprocess_exec(
|
|
220
|
+
*cmd,
|
|
221
|
+
cwd=str(self.project_root),
|
|
222
|
+
stdout=asyncio.subprocess.PIPE,
|
|
223
|
+
stderr=asyncio.subprocess.PIPE,
|
|
224
|
+
limit=50 * 1024 * 1024, # 50MB memory limit for better performance
|
|
225
|
+
env={**os.environ, "PYTHONUNBUFFERED": "1"},
|
|
226
|
+
)
|
|
227
|
+
|
|
228
|
+
agent_logs.append(
|
|
229
|
+
f"[{datetime.now().strftime('%H:%M:%S')}] OpenCode process started (PID: {process.pid})"
|
|
230
|
+
)
|
|
231
|
+
|
|
232
|
+
if self.config.verbose:
|
|
233
|
+
print(f"⚙️ OpenCode process started (PID: {process.pid})")
|
|
234
|
+
print(f"⏳ Analyzing codebase... (timeout: {self.config.timeout_seconds}s)")
|
|
235
|
+
|
|
236
|
+
try:
|
|
237
|
+
# Stream stdout/stderr in real-time while collecting output
|
|
238
|
+
stdout_chunks = []
|
|
239
|
+
stderr_chunks = []
|
|
240
|
+
last_heartbeat = datetime.now()
|
|
241
|
+
heartbeat_interval = 10.0 # Show heartbeat every 10 seconds
|
|
242
|
+
|
|
243
|
+
async def stream_output(stream, chunks, prefix="", is_stderr=False):
|
|
244
|
+
"""Stream output in real-time."""
|
|
245
|
+
while True:
|
|
246
|
+
chunk = await stream.read(8192) # Read 8KB at a time
|
|
247
|
+
if not chunk:
|
|
248
|
+
break
|
|
249
|
+
chunks.append(chunk)
|
|
250
|
+
if self.config.verbose:
|
|
251
|
+
try:
|
|
252
|
+
text = chunk.decode("utf-8", errors="replace").rstrip()
|
|
253
|
+
if text:
|
|
254
|
+
# Filter out verbose JSON lines (like step_finish, step_start events)
|
|
255
|
+
for line in text.split("\n"):
|
|
256
|
+
if line.strip():
|
|
257
|
+
# Skip JSON event lines that are too verbose
|
|
258
|
+
line_stripped = line.strip()
|
|
259
|
+
# Only show if it's not a JSON event line or if it's an error
|
|
260
|
+
if (
|
|
261
|
+
not line_stripped.startswith('{"type":"')
|
|
262
|
+
or "error" in line_stripped.lower()
|
|
263
|
+
or is_stderr
|
|
264
|
+
):
|
|
265
|
+
print(f" {prefix}{line}")
|
|
266
|
+
except Exception:
|
|
267
|
+
pass
|
|
268
|
+
|
|
269
|
+
async def show_heartbeat():
|
|
270
|
+
"""Show periodic heartbeat with varied QA-related messages."""
|
|
271
|
+
start_time = datetime.now()
|
|
272
|
+
message_index = 0
|
|
273
|
+
|
|
274
|
+
# Varied QA-related messages with emojis
|
|
275
|
+
qa_messages = [
|
|
276
|
+
("🔍", "Analyzing code quality..."),
|
|
277
|
+
("🧪", "Running test suites..."),
|
|
278
|
+
("🔐", "Scanning for security vulnerabilities..."),
|
|
279
|
+
("⚡", "Checking performance issues..."),
|
|
280
|
+
("📊", "Evaluating code metrics..."),
|
|
281
|
+
("🛡️", "Validating code safety..."),
|
|
282
|
+
("🎯", "Identifying code issues..."),
|
|
283
|
+
("📝", "Reviewing code patterns..."),
|
|
284
|
+
("🔎", "Inspecting code structure..."),
|
|
285
|
+
("🧩", "Analyzing code complexity..."),
|
|
286
|
+
("✅", "Verifying code standards..."),
|
|
287
|
+
("🚨", "Detecting potential bugs..."),
|
|
288
|
+
("🔧", "Examining code quality..."),
|
|
289
|
+
("📈", "Assessing code health..."),
|
|
290
|
+
("🎨", "Reviewing code style..."),
|
|
291
|
+
("🔬", "Testing code functionality..."),
|
|
292
|
+
("📋", "Checking code compliance..."),
|
|
293
|
+
("🔄", "Running quality checks..."),
|
|
294
|
+
("💡", "Analyzing best practices..."),
|
|
295
|
+
("🌐", "Evaluating API quality..."),
|
|
296
|
+
]
|
|
297
|
+
|
|
298
|
+
while True:
|
|
299
|
+
await asyncio.sleep(heartbeat_interval)
|
|
300
|
+
elapsed = (datetime.now() - start_time).total_seconds()
|
|
301
|
+
elapsed_str = f"{int(elapsed)}s"
|
|
302
|
+
|
|
303
|
+
# Cycle through messages
|
|
304
|
+
emoji, message = qa_messages[message_index % len(qa_messages)]
|
|
305
|
+
message_index += 1
|
|
306
|
+
|
|
307
|
+
# Always show heartbeat for user engagement
|
|
308
|
+
print(f" {emoji} {message} ({elapsed_str} elapsed)")
|
|
309
|
+
agent_logs.append(
|
|
310
|
+
f"[{datetime.now().strftime('%H:%M:%S')}] Heartbeat: {message} ({elapsed_str} elapsed)"
|
|
311
|
+
)
|
|
312
|
+
|
|
313
|
+
# Create tasks for streaming and heartbeat
|
|
314
|
+
stdout_task = asyncio.create_task(
|
|
315
|
+
stream_output(process.stdout, stdout_chunks, prefix="[stdout] ")
|
|
316
|
+
)
|
|
317
|
+
stderr_task = asyncio.create_task(
|
|
318
|
+
stream_output(process.stderr, stderr_chunks, prefix="[stderr] ", is_stderr=True)
|
|
319
|
+
)
|
|
320
|
+
heartbeat_task = asyncio.create_task(show_heartbeat())
|
|
321
|
+
|
|
322
|
+
# Wait for process to complete with timeout
|
|
323
|
+
try:
|
|
324
|
+
# Wait for both streams to finish reading and process to complete
|
|
325
|
+
async def wait_for_completion():
|
|
326
|
+
# Wait for streams to finish
|
|
327
|
+
await asyncio.gather(stdout_task, stderr_task, return_exceptions=True)
|
|
328
|
+
# Wait for process to finish
|
|
329
|
+
return await process.wait()
|
|
330
|
+
|
|
331
|
+
return_code = await asyncio.wait_for(
|
|
332
|
+
wait_for_completion(), timeout=self.config.timeout_seconds
|
|
333
|
+
)
|
|
334
|
+
heartbeat_task.cancel() # Stop heartbeat
|
|
335
|
+
try:
|
|
336
|
+
await heartbeat_task
|
|
337
|
+
except asyncio.CancelledError:
|
|
338
|
+
pass
|
|
339
|
+
except asyncio.TimeoutError:
|
|
340
|
+
heartbeat_task.cancel()
|
|
341
|
+
try:
|
|
342
|
+
await heartbeat_task
|
|
343
|
+
except asyncio.CancelledError:
|
|
344
|
+
pass
|
|
345
|
+
# Kill process on timeout
|
|
346
|
+
try:
|
|
347
|
+
process.kill()
|
|
348
|
+
await process.wait()
|
|
349
|
+
except Exception:
|
|
350
|
+
pass
|
|
351
|
+
raise asyncio.TimeoutError("Process timed out")
|
|
352
|
+
|
|
353
|
+
# Combine chunks
|
|
354
|
+
stdout = b"".join(stdout_chunks)
|
|
355
|
+
stderr = b"".join(stderr_chunks)
|
|
356
|
+
|
|
357
|
+
agent_logs.append(
|
|
358
|
+
f"[{datetime.now().strftime('%H:%M:%S')}] Process completed with return code: {return_code}"
|
|
359
|
+
)
|
|
360
|
+
|
|
361
|
+
if return_code == 0:
|
|
362
|
+
agent_logs.append(
|
|
363
|
+
f"[{datetime.now().strftime('%H:%M:%S')}] ✅ Analysis completed successfully"
|
|
364
|
+
)
|
|
365
|
+
|
|
366
|
+
if self.config.verbose:
|
|
367
|
+
print(f"✅ {role_name} analysis completed successfully")
|
|
368
|
+
|
|
369
|
+
# Parse JSON output
|
|
370
|
+
try:
|
|
371
|
+
output_text = stdout.decode("utf-8", errors="replace")
|
|
372
|
+
collected_output = output_text
|
|
373
|
+
|
|
374
|
+
# Try to parse as a single JSON object
|
|
375
|
+
try:
|
|
376
|
+
json_data = json.loads(output_text)
|
|
377
|
+
agent_logs.append(
|
|
378
|
+
f"[{datetime.now().strftime('%H:%M:%S')}] Parsed JSON response"
|
|
379
|
+
)
|
|
380
|
+
|
|
381
|
+
# Extract findings from JSON structure
|
|
382
|
+
self._findings = self._extract_findings_from_json(json_data, role_name)
|
|
383
|
+
|
|
384
|
+
# Fallback to text parsing if JSON contained only freeform analysis
|
|
385
|
+
if not self._findings:
|
|
386
|
+
json_text = self._extract_text_from_json(json_data)
|
|
387
|
+
if json_text:
|
|
388
|
+
self._findings = self._extract_findings_from_text(
|
|
389
|
+
json_text, role_name
|
|
390
|
+
)
|
|
391
|
+
|
|
392
|
+
except json.JSONDecodeError:
|
|
393
|
+
# Try JSONL (one JSON object per line)
|
|
394
|
+
jsonl_objects = self._parse_jsonl(output_text)
|
|
395
|
+
if jsonl_objects:
|
|
396
|
+
agent_logs.append(
|
|
397
|
+
f"[{datetime.now().strftime('%H:%M:%S')}] Parsed JSONL response"
|
|
398
|
+
)
|
|
399
|
+
combined_text = []
|
|
400
|
+
for obj in jsonl_objects:
|
|
401
|
+
self._findings.extend(
|
|
402
|
+
self._extract_findings_from_json(obj, role_name)
|
|
403
|
+
)
|
|
404
|
+
obj_text = self._extract_text_from_json(obj)
|
|
405
|
+
if obj_text:
|
|
406
|
+
combined_text.append(obj_text)
|
|
407
|
+
|
|
408
|
+
if not self._findings and combined_text:
|
|
409
|
+
self._findings = self._extract_findings_from_text(
|
|
410
|
+
"\n".join(combined_text),
|
|
411
|
+
role_name,
|
|
412
|
+
)
|
|
413
|
+
else:
|
|
414
|
+
agent_logs.append(
|
|
415
|
+
f"[{datetime.now().strftime('%H:%M:%S')}] Output not valid JSON, treating as text"
|
|
416
|
+
)
|
|
417
|
+
# Extract findings from text output
|
|
418
|
+
self._findings = self._extract_findings_from_text(
|
|
419
|
+
output_text, role_name
|
|
420
|
+
)
|
|
421
|
+
|
|
422
|
+
except UnicodeDecodeError as e:
|
|
423
|
+
agent_logs.append(
|
|
424
|
+
f"[{datetime.now().strftime('%H:%M:%S')}] Failed to decode output: {e}"
|
|
425
|
+
)
|
|
426
|
+
errors.append(f"Failed to decode agent output: {e}")
|
|
427
|
+
|
|
428
|
+
else:
|
|
429
|
+
error_output = (
|
|
430
|
+
stderr.decode("utf-8", errors="replace") if stderr else "No stderr"
|
|
431
|
+
)
|
|
432
|
+
agent_logs.append(
|
|
433
|
+
f"[{datetime.now().strftime('%H:%M:%S')}] ❌ Process failed with error: {error_output}"
|
|
434
|
+
)
|
|
435
|
+
errors.append(f"OpenCode process failed: {error_output}")
|
|
436
|
+
|
|
437
|
+
except asyncio.TimeoutError:
|
|
438
|
+
agent_logs.append(
|
|
439
|
+
f"[{datetime.now().strftime('%H:%M:%S')}] ❌ Process timed out after {self.config.timeout_seconds}s"
|
|
440
|
+
)
|
|
441
|
+
errors.append(f"Agent timed out after {self.config.timeout_seconds}s")
|
|
442
|
+
|
|
443
|
+
if self.config.verbose:
|
|
444
|
+
print(f"⏰ {role_name} analysis timed out after {self.config.timeout_seconds}s")
|
|
445
|
+
process.kill()
|
|
446
|
+
|
|
447
|
+
except Exception as e:
|
|
448
|
+
agent_logs.append(f"[{datetime.now().strftime('%H:%M:%S')}] ❌ Unexpected error: {e}")
|
|
449
|
+
errors.append(f"Agent execution failed: {e}")
|
|
450
|
+
|
|
451
|
+
if self.config.verbose:
|
|
452
|
+
print(f"❌ {role_name} analysis failed: {e}")
|
|
453
|
+
logger.exception("ACP runner failed")
|
|
454
|
+
|
|
455
|
+
duration = (datetime.now() - start_time).total_seconds()
|
|
456
|
+
|
|
457
|
+
# Save the agent logs as an artifact
|
|
458
|
+
try:
|
|
459
|
+
from superqode.workspace.artifacts import ArtifactManager
|
|
460
|
+
|
|
461
|
+
manager = ArtifactManager(self.project_root)
|
|
462
|
+
manager.initialize("qe_logs")
|
|
463
|
+
|
|
464
|
+
log_content = "\n".join(agent_logs)
|
|
465
|
+
log_artifact = manager.save_log(
|
|
466
|
+
name=f"QE Agent Analysis - {role_name}", content=log_content, log_type="qe_agent"
|
|
467
|
+
)
|
|
468
|
+
agent_logs.append(
|
|
469
|
+
f"[{datetime.now().strftime('%H:%M:%S')}] Logs saved to: {log_artifact.path}"
|
|
470
|
+
)
|
|
471
|
+
except Exception as e:
|
|
472
|
+
logger.warning(f"Failed to save agent logs: {e}")
|
|
473
|
+
|
|
474
|
+
return ACPRunnerResult(
|
|
475
|
+
success=len(errors) == 0 and len(self._findings) > 0,
|
|
476
|
+
findings=self._findings,
|
|
477
|
+
agent_output=collected_output,
|
|
478
|
+
tool_calls=tool_calls,
|
|
479
|
+
duration_seconds=duration,
|
|
480
|
+
errors=errors,
|
|
481
|
+
)
|
|
482
|
+
|
|
483
|
+
def _extract_findings_from_json(self, json_data: dict, role_name: str) -> List[ACPFinding]:
|
|
484
|
+
"""Extract findings from OpenCode JSON output."""
|
|
485
|
+
findings = []
|
|
486
|
+
|
|
487
|
+
# OpenCode JSON structure varies, but typically has analysis results
|
|
488
|
+
# Look for findings, issues, or analysis sections
|
|
489
|
+
if isinstance(json_data, dict):
|
|
490
|
+
# Try different possible structures
|
|
491
|
+
potential_findings = (
|
|
492
|
+
json_data.get("findings", [])
|
|
493
|
+
or json_data.get("issues", [])
|
|
494
|
+
or json_data.get("results", [])
|
|
495
|
+
or json_data.get("analysis", {}).get("findings", [])
|
|
496
|
+
or [json_data]
|
|
497
|
+
if json_data.get("title") or json_data.get("description")
|
|
498
|
+
else []
|
|
499
|
+
)
|
|
500
|
+
|
|
501
|
+
for item in potential_findings:
|
|
502
|
+
if isinstance(item, dict) and (item.get("title") or item.get("description")):
|
|
503
|
+
self._finding_counter += 1
|
|
504
|
+
finding = ACPFinding(
|
|
505
|
+
id=f"{role_name}-{self._finding_counter:03d}",
|
|
506
|
+
severity=item.get("severity", "medium").lower(),
|
|
507
|
+
title=item.get("title", item.get("description", "")[:50]),
|
|
508
|
+
description=item.get("description", item.get("title", "")),
|
|
509
|
+
file_path=item.get("file_path") or item.get("file") or item.get("location"),
|
|
510
|
+
line_number=item.get("line_number") or item.get("line"),
|
|
511
|
+
evidence=item.get("evidence"),
|
|
512
|
+
suggested_fix=item.get("suggested_fix") or item.get("fix"),
|
|
513
|
+
confidence=item.get("confidence", 0.8),
|
|
514
|
+
category=role_name,
|
|
515
|
+
)
|
|
516
|
+
findings.append(finding)
|
|
517
|
+
|
|
518
|
+
return findings
|
|
519
|
+
|
|
520
|
+
def _extract_text_from_json(self, json_data: Any) -> str:
|
|
521
|
+
"""Extract freeform analysis text from a JSON payload."""
|
|
522
|
+
if isinstance(json_data, str):
|
|
523
|
+
return json_data
|
|
524
|
+
|
|
525
|
+
if not isinstance(json_data, dict):
|
|
526
|
+
return ""
|
|
527
|
+
|
|
528
|
+
# Common fields that may contain analysis text
|
|
529
|
+
for key in ("analysis", "text", "content", "message", "output", "response"):
|
|
530
|
+
value = json_data.get(key)
|
|
531
|
+
if isinstance(value, str):
|
|
532
|
+
return value
|
|
533
|
+
if isinstance(value, dict):
|
|
534
|
+
for nested_key in ("text", "content", "message", "analysis"):
|
|
535
|
+
nested_val = value.get(nested_key)
|
|
536
|
+
if isinstance(nested_val, str):
|
|
537
|
+
return nested_val
|
|
538
|
+
|
|
539
|
+
return ""
|
|
540
|
+
|
|
541
|
+
def _parse_jsonl(self, output_text: str) -> List[dict]:
|
|
542
|
+
"""Parse JSONL output into a list of JSON objects."""
|
|
543
|
+
objects = []
|
|
544
|
+
for line in output_text.splitlines():
|
|
545
|
+
line = line.strip()
|
|
546
|
+
if not line:
|
|
547
|
+
continue
|
|
548
|
+
try:
|
|
549
|
+
obj = json.loads(line)
|
|
550
|
+
except json.JSONDecodeError:
|
|
551
|
+
continue
|
|
552
|
+
if isinstance(obj, dict):
|
|
553
|
+
objects.append(obj)
|
|
554
|
+
return objects
|
|
555
|
+
|
|
556
|
+
def _extract_findings_from_text(self, text_output: str, role_name: str) -> List[ACPFinding]:
|
|
557
|
+
"""Extract findings from OpenCode text output."""
|
|
558
|
+
findings = []
|
|
559
|
+
|
|
560
|
+
# Use the existing text extraction logic
|
|
561
|
+
self._collected_output = text_output
|
|
562
|
+
self._extract_findings_from_output(role_name)
|
|
563
|
+
|
|
564
|
+
return self._findings
|
|
565
|
+
|
|
566
|
+
def _check_agent_available(self) -> bool:
|
|
567
|
+
"""Check if the agent command is available."""
|
|
568
|
+
cmd = self.config.agent_command.split()[0]
|
|
569
|
+
return shutil.which(cmd) is not None
|
|
570
|
+
|
|
571
|
+
def _build_agent_command(self, role_name: str = "qe") -> str:
|
|
572
|
+
"""Build the agent command with model configuration."""
|
|
573
|
+
cmd_parts = self.config.agent_command.split()
|
|
574
|
+
|
|
575
|
+
# If model is specified and using opencode, add model flag
|
|
576
|
+
if self.config.model and "opencode" in self.config.agent_command:
|
|
577
|
+
cmd_parts.extend(["-m", f"opencode/{self.config.model}"])
|
|
578
|
+
|
|
579
|
+
return " ".join(cmd_parts)
|
|
580
|
+
|
|
581
|
+
def _handle_event(self, event: StreamEvent):
|
|
582
|
+
"""Handle streaming events from the agent."""
|
|
583
|
+
if event.event_type == StreamEventType.MESSAGE_CHUNK:
|
|
584
|
+
msg: StreamMessage = event.data
|
|
585
|
+
self._collected_output += msg.text
|
|
586
|
+
|
|
587
|
+
# Forward to custom handler if provided
|
|
588
|
+
if self.config.on_event:
|
|
589
|
+
self.config.on_event(event)
|
|
590
|
+
|
|
591
|
+
elif event.event_type == StreamEventType.THOUGHT_CHUNK:
|
|
592
|
+
self._collected_thoughts += event.data.text
|
|
593
|
+
|
|
594
|
+
elif event.event_type == StreamEventType.TOOL_CALL:
|
|
595
|
+
tool_call: StreamToolCall = event.data
|
|
596
|
+
self._tool_calls.append(
|
|
597
|
+
{
|
|
598
|
+
"id": tool_call.tool_id,
|
|
599
|
+
"title": tool_call.title,
|
|
600
|
+
"kind": tool_call.kind.value,
|
|
601
|
+
"status": tool_call.status.value,
|
|
602
|
+
}
|
|
603
|
+
)
|
|
604
|
+
|
|
605
|
+
if self.config.on_event:
|
|
606
|
+
self.config.on_event(event)
|
|
607
|
+
|
|
608
|
+
elif event.event_type == StreamEventType.ERROR:
|
|
609
|
+
logger.error(f"Agent error: {event.data}")
|
|
610
|
+
|
|
611
|
+
if self.config.on_event:
|
|
612
|
+
self.config.on_event(event)
|
|
613
|
+
|
|
614
|
+
def _extract_findings_from_output(self, role_name: str):
|
|
615
|
+
"""
|
|
616
|
+
Extract structured findings from agent output.
|
|
617
|
+
|
|
618
|
+
Looks for patterns like:
|
|
619
|
+
- **CRITICAL**: ...
|
|
620
|
+
- **HIGH**: ...
|
|
621
|
+
- **MEDIUM**: ...
|
|
622
|
+
- **LOW**: ...
|
|
623
|
+
- Issue: ...
|
|
624
|
+
- Bug: ...
|
|
625
|
+
- Vulnerability: ...
|
|
626
|
+
"""
|
|
627
|
+
output = self._collected_output
|
|
628
|
+
|
|
629
|
+
# Pattern for severity-prefixed findings
|
|
630
|
+
severity_pattern = (
|
|
631
|
+
r"\*\*(CRITICAL|HIGH|MEDIUM|LOW|INFO)\*\*:\s*(.+?)(?=\n\*\*[A-Z]+\*\*:|\n\n|\Z)"
|
|
632
|
+
)
|
|
633
|
+
severity_colon_pattern = (
|
|
634
|
+
r"(?m)^(CRITICAL|HIGH|MEDIUM|LOW|INFO)\s*:\s*(.+?)"
|
|
635
|
+
r"(?=\n(?:CRITICAL|HIGH|MEDIUM|LOW|INFO)\s*:|\n\n|\Z)"
|
|
636
|
+
)
|
|
637
|
+
|
|
638
|
+
for match in re.finditer(severity_pattern, output, re.IGNORECASE | re.DOTALL):
|
|
639
|
+
severity = match.group(1).lower()
|
|
640
|
+
content = match.group(2).strip()
|
|
641
|
+
|
|
642
|
+
# Extract title (first line) and description (rest)
|
|
643
|
+
lines = content.split("\n", 1)
|
|
644
|
+
title = lines[0].strip()
|
|
645
|
+
description = lines[1].strip() if len(lines) > 1 else title
|
|
646
|
+
|
|
647
|
+
# Try to extract file path and line number
|
|
648
|
+
file_path, line_number = self._extract_location(content)
|
|
649
|
+
|
|
650
|
+
self._finding_counter += 1
|
|
651
|
+
finding = ACPFinding(
|
|
652
|
+
id=f"{role_name}-{self._finding_counter:03d}",
|
|
653
|
+
severity=severity,
|
|
654
|
+
title=title,
|
|
655
|
+
description=description,
|
|
656
|
+
file_path=file_path,
|
|
657
|
+
line_number=line_number,
|
|
658
|
+
confidence=0.8,
|
|
659
|
+
category=role_name,
|
|
660
|
+
)
|
|
661
|
+
self._findings.append(finding)
|
|
662
|
+
|
|
663
|
+
for match in re.finditer(severity_colon_pattern, output, re.IGNORECASE | re.DOTALL):
|
|
664
|
+
severity = match.group(1).lower()
|
|
665
|
+
content = match.group(2).strip()
|
|
666
|
+
|
|
667
|
+
lines = content.split("\n", 1)
|
|
668
|
+
title = lines[0].strip()
|
|
669
|
+
description = lines[1].strip() if len(lines) > 1 else title
|
|
670
|
+
|
|
671
|
+
file_path, line_number = self._extract_location(content)
|
|
672
|
+
|
|
673
|
+
self._finding_counter += 1
|
|
674
|
+
finding = ACPFinding(
|
|
675
|
+
id=f"{role_name}-{self._finding_counter:03d}",
|
|
676
|
+
severity=severity,
|
|
677
|
+
title=title,
|
|
678
|
+
description=description,
|
|
679
|
+
file_path=file_path,
|
|
680
|
+
line_number=line_number,
|
|
681
|
+
confidence=0.8,
|
|
682
|
+
category=role_name,
|
|
683
|
+
)
|
|
684
|
+
self._findings.append(finding)
|
|
685
|
+
|
|
686
|
+
# Pattern for issue/bug/vulnerability prefixed findings
|
|
687
|
+
issue_pattern = r"(?:Issue|Bug|Vulnerability|Problem|Warning):\s*(.+?)(?=\n(?:Issue|Bug|Vulnerability|Problem|Warning):|\n\n|\Z)"
|
|
688
|
+
|
|
689
|
+
for match in re.finditer(issue_pattern, output, re.IGNORECASE | re.DOTALL):
|
|
690
|
+
content = match.group(1).strip()
|
|
691
|
+
|
|
692
|
+
# Skip if we already captured this as a severity-prefixed finding
|
|
693
|
+
if any(f.description in content or content in f.description for f in self._findings):
|
|
694
|
+
continue
|
|
695
|
+
|
|
696
|
+
lines = content.split("\n", 1)
|
|
697
|
+
title = lines[0].strip()
|
|
698
|
+
description = lines[1].strip() if len(lines) > 1 else title
|
|
699
|
+
|
|
700
|
+
file_path, line_number = self._extract_location(content)
|
|
701
|
+
|
|
702
|
+
# Infer severity from keywords
|
|
703
|
+
severity = self._infer_severity(content)
|
|
704
|
+
|
|
705
|
+
self._finding_counter += 1
|
|
706
|
+
finding = ACPFinding(
|
|
707
|
+
id=f"{role_name}-{self._finding_counter:03d}",
|
|
708
|
+
severity=severity,
|
|
709
|
+
title=title,
|
|
710
|
+
description=description,
|
|
711
|
+
file_path=file_path,
|
|
712
|
+
line_number=line_number,
|
|
713
|
+
confidence=0.7,
|
|
714
|
+
category=role_name,
|
|
715
|
+
)
|
|
716
|
+
self._findings.append(finding)
|
|
717
|
+
|
|
718
|
+
# Also look for JSON-formatted findings
|
|
719
|
+
self._extract_json_findings(output, role_name)
|
|
720
|
+
|
|
721
|
+
def _extract_location(self, content: str) -> tuple:
|
|
722
|
+
"""Extract file path and line number from content."""
|
|
723
|
+
# Pattern: path/to/file.py:123 or path/to/file.py line 123
|
|
724
|
+
file_pattern = r"([a-zA-Z0-9_\-./\\]+\.[a-zA-Z]+)(?::(\d+)| line (\d+))?"
|
|
725
|
+
|
|
726
|
+
match = re.search(file_pattern, content)
|
|
727
|
+
if match:
|
|
728
|
+
file_path = match.group(1)
|
|
729
|
+
line_number = match.group(2) or match.group(3)
|
|
730
|
+
return file_path, int(line_number) if line_number else None
|
|
731
|
+
|
|
732
|
+
return None, None
|
|
733
|
+
|
|
734
|
+
def _infer_severity(self, content: str) -> str:
|
|
735
|
+
"""Infer severity from content keywords."""
|
|
736
|
+
content_lower = content.lower()
|
|
737
|
+
|
|
738
|
+
critical_keywords = ["critical", "security", "vulnerability", "injection", "xss", "exploit"]
|
|
739
|
+
high_keywords = ["high", "severe", "dangerous", "unsafe", "memory leak"]
|
|
740
|
+
medium_keywords = ["medium", "warning", "potential", "may cause"]
|
|
741
|
+
low_keywords = ["low", "minor", "style", "suggestion", "consider"]
|
|
742
|
+
|
|
743
|
+
for kw in critical_keywords:
|
|
744
|
+
if kw in content_lower:
|
|
745
|
+
return "critical"
|
|
746
|
+
|
|
747
|
+
for kw in high_keywords:
|
|
748
|
+
if kw in content_lower:
|
|
749
|
+
return "high"
|
|
750
|
+
|
|
751
|
+
for kw in medium_keywords:
|
|
752
|
+
if kw in content_lower:
|
|
753
|
+
return "medium"
|
|
754
|
+
|
|
755
|
+
for kw in low_keywords:
|
|
756
|
+
if kw in content_lower:
|
|
757
|
+
return "low"
|
|
758
|
+
|
|
759
|
+
return "medium" # Default
|
|
760
|
+
|
|
761
|
+
def _extract_json_findings(self, output: str, role_name: str):
|
|
762
|
+
"""Extract JSON-formatted findings from output."""
|
|
763
|
+
# Look for JSON blocks
|
|
764
|
+
json_pattern = r"```json\s*([\s\S]*?)\s*```"
|
|
765
|
+
|
|
766
|
+
for match in re.finditer(json_pattern, output):
|
|
767
|
+
try:
|
|
768
|
+
data = json.loads(match.group(1))
|
|
769
|
+
|
|
770
|
+
# Handle array of findings
|
|
771
|
+
if isinstance(data, list):
|
|
772
|
+
for item in data:
|
|
773
|
+
self._add_json_finding(item, role_name)
|
|
774
|
+
|
|
775
|
+
# Handle single finding object
|
|
776
|
+
elif isinstance(data, dict):
|
|
777
|
+
if "findings" in data:
|
|
778
|
+
for item in data["findings"]:
|
|
779
|
+
self._add_json_finding(item, role_name)
|
|
780
|
+
else:
|
|
781
|
+
self._add_json_finding(data, role_name)
|
|
782
|
+
|
|
783
|
+
except json.JSONDecodeError:
|
|
784
|
+
continue
|
|
785
|
+
|
|
786
|
+
def _add_json_finding(self, data: Dict[str, Any], role_name: str):
|
|
787
|
+
"""Add a finding from JSON data."""
|
|
788
|
+
if not isinstance(data, dict):
|
|
789
|
+
return
|
|
790
|
+
|
|
791
|
+
# Skip if missing required fields
|
|
792
|
+
if not data.get("title") and not data.get("description"):
|
|
793
|
+
return
|
|
794
|
+
|
|
795
|
+
self._finding_counter += 1
|
|
796
|
+
finding = ACPFinding(
|
|
797
|
+
id=f"{role_name}-{self._finding_counter:03d}",
|
|
798
|
+
severity=data.get("severity", "medium").lower(),
|
|
799
|
+
title=data.get("title", data.get("description", "")[:50]),
|
|
800
|
+
description=data.get("description", data.get("title", "")),
|
|
801
|
+
file_path=data.get("file_path") or data.get("file") or data.get("location"),
|
|
802
|
+
line_number=data.get("line_number") or data.get("line"),
|
|
803
|
+
evidence=data.get("evidence"),
|
|
804
|
+
suggested_fix=data.get("suggested_fix") or data.get("fix"),
|
|
805
|
+
confidence=data.get("confidence", 0.8),
|
|
806
|
+
category=role_name,
|
|
807
|
+
)
|
|
808
|
+
self._findings.append(finding)
|
|
809
|
+
|
|
810
|
+
|
|
811
|
+
# =============================================================================
|
|
812
|
+
# QE Prompts for Different Roles
|
|
813
|
+
# =============================================================================
|
|
814
|
+
|
|
815
|
+
QE_PROMPTS = {
|
|
816
|
+
"api_tester": """You are a Senior API Quality Engineer. Analyze this codebase for API issues.
|
|
817
|
+
|
|
818
|
+
Focus on:
|
|
819
|
+
1. API endpoint security (authentication, authorization, input validation)
|
|
820
|
+
2. API contract violations
|
|
821
|
+
3. Error handling in API routes
|
|
822
|
+
4. Rate limiting and throttling
|
|
823
|
+
5. Data validation and sanitization
|
|
824
|
+
|
|
825
|
+
For each issue found, report in this format:
|
|
826
|
+
**SEVERITY**: Title
|
|
827
|
+
Description of the issue
|
|
828
|
+
File: path/to/file.py:line_number
|
|
829
|
+
Evidence: code snippet or explanation
|
|
830
|
+
|
|
831
|
+
Start your analysis now. Be thorough but concise.""",
|
|
832
|
+
"security_tester": """You are a Senior Security Engineer. Analyze this codebase for security vulnerabilities.
|
|
833
|
+
|
|
834
|
+
Focus on:
|
|
835
|
+
1. OWASP Top 10 vulnerabilities
|
|
836
|
+
2. SQL/NoSQL injection points
|
|
837
|
+
3. XSS vulnerabilities
|
|
838
|
+
4. Authentication/authorization flaws
|
|
839
|
+
5. Sensitive data exposure
|
|
840
|
+
6. Hardcoded secrets or credentials
|
|
841
|
+
|
|
842
|
+
For each vulnerability found, report in this format:
|
|
843
|
+
**SEVERITY**: Title (e.g., **CRITICAL**: SQL Injection in user search)
|
|
844
|
+
Description and impact
|
|
845
|
+
File: path/to/file.py:line_number
|
|
846
|
+
Evidence: vulnerable code snippet
|
|
847
|
+
|
|
848
|
+
Start your security analysis now.""",
|
|
849
|
+
"unit_tester": """You are a Senior Test Engineer. Analyze this codebase for test coverage gaps.
|
|
850
|
+
|
|
851
|
+
Focus on:
|
|
852
|
+
1. Functions/methods lacking unit tests
|
|
853
|
+
2. Edge cases not covered by existing tests
|
|
854
|
+
3. Error handling paths not tested
|
|
855
|
+
4. Complex logic without test coverage
|
|
856
|
+
5. Public APIs without tests
|
|
857
|
+
|
|
858
|
+
For each gap found, report in this format:
|
|
859
|
+
**MEDIUM**: Missing tests for function_name
|
|
860
|
+
Description of what should be tested
|
|
861
|
+
File: path/to/file.py:line_number
|
|
862
|
+
Suggested test cases: brief description
|
|
863
|
+
|
|
864
|
+
Analyze the test coverage now.""",
|
|
865
|
+
"e2e_tester": """You are a Senior E2E Test Engineer. Analyze this codebase for workflow testing gaps.
|
|
866
|
+
|
|
867
|
+
Focus on:
|
|
868
|
+
1. Critical user workflows not tested
|
|
869
|
+
2. Integration points between components
|
|
870
|
+
3. User journey edge cases
|
|
871
|
+
4. State management issues
|
|
872
|
+
5. Cross-component data flow
|
|
873
|
+
|
|
874
|
+
For each issue found, report in this format:
|
|
875
|
+
**SEVERITY**: Title
|
|
876
|
+
Description of the workflow issue
|
|
877
|
+
Files involved: list files
|
|
878
|
+
Suggested E2E test scenario
|
|
879
|
+
|
|
880
|
+
Analyze user workflows now.""",
|
|
881
|
+
"performance_tester": """You are a Senior Performance Engineer. Analyze this codebase for performance issues.
|
|
882
|
+
|
|
883
|
+
Focus on:
|
|
884
|
+
1. N+1 query patterns
|
|
885
|
+
2. Memory leak patterns
|
|
886
|
+
3. Inefficient algorithms (O(n²) or worse)
|
|
887
|
+
4. Missing caching opportunities
|
|
888
|
+
5. Blocking I/O operations
|
|
889
|
+
6. Large data processing without pagination
|
|
890
|
+
|
|
891
|
+
For each issue found, report in this format:
|
|
892
|
+
**SEVERITY**: Title (e.g., **HIGH**: N+1 query in user listing)
|
|
893
|
+
Description and performance impact
|
|
894
|
+
File: path/to/file.py:line_number
|
|
895
|
+
Evidence: problematic code
|
|
896
|
+
Suggested optimization
|
|
897
|
+
|
|
898
|
+
Analyze performance now.""",
|
|
899
|
+
"fullstack": """You are a Senior QE Tech Lead with 15+ years of experience. Conduct a comprehensive code review.
|
|
900
|
+
|
|
901
|
+
Review all aspects:
|
|
902
|
+
1. Functional correctness - does the code do what it should?
|
|
903
|
+
2. Error handling - are errors handled properly?
|
|
904
|
+
3. Security - any security concerns?
|
|
905
|
+
4. Performance - any obvious bottlenecks?
|
|
906
|
+
5. Code quality - maintainability, readability
|
|
907
|
+
6. Test coverage - are there adequate tests?
|
|
908
|
+
|
|
909
|
+
For each issue found, report with severity:
|
|
910
|
+
**CRITICAL**: Blocks release, must fix
|
|
911
|
+
**HIGH**: Should fix before release
|
|
912
|
+
**MEDIUM**: Should fix soon
|
|
913
|
+
**LOW**: Nice to have
|
|
914
|
+
|
|
915
|
+
Format:
|
|
916
|
+
**SEVERITY**: Title
|
|
917
|
+
Description with business impact
|
|
918
|
+
File: path/to/file.py:line_number
|
|
919
|
+
Evidence and suggested fix
|
|
920
|
+
|
|
921
|
+
Be thorough but focus on real, practical issues that affect users and business.
|
|
922
|
+
Start your comprehensive review now.""",
|
|
923
|
+
}
|
|
924
|
+
|
|
925
|
+
|
|
926
|
+
def get_qe_prompt(role_name: str, allow_suggestions: bool = False) -> str:
|
|
927
|
+
"""Get the QE prompt for a role.
|
|
928
|
+
|
|
929
|
+
Args:
|
|
930
|
+
role_name: Name of the QE role
|
|
931
|
+
allow_suggestions: If True, enhance prompt to request fixes
|
|
932
|
+
|
|
933
|
+
Returns:
|
|
934
|
+
The role-specific prompt, enhanced for suggestions if enabled
|
|
935
|
+
"""
|
|
936
|
+
# Handle qe. prefix
|
|
937
|
+
if role_name.startswith("qe."):
|
|
938
|
+
role_name = role_name[3:]
|
|
939
|
+
|
|
940
|
+
base_prompt = QE_PROMPTS.get(role_name, QE_PROMPTS["fullstack"])
|
|
941
|
+
|
|
942
|
+
if allow_suggestions:
|
|
943
|
+
from superqode.enterprise import require_enterprise
|
|
944
|
+
|
|
945
|
+
if require_enterprise("Fix suggestions and verification"):
|
|
946
|
+
return _enhance_prompt_for_suggestions(base_prompt, role_name)
|
|
947
|
+
|
|
948
|
+
return base_prompt
|
|
949
|
+
|
|
950
|
+
|
|
951
|
+
def _enhance_prompt_for_suggestions(base_prompt: str, role_name: str) -> str:
|
|
952
|
+
"""Enhance a QE prompt to request suggested fixes.
|
|
953
|
+
|
|
954
|
+
When allow_suggestions is enabled, the agent should:
|
|
955
|
+
1. Find issues as usual
|
|
956
|
+
2. Generate a fix for each issue
|
|
957
|
+
3. Apply the fix in sandbox
|
|
958
|
+
4. Verify the fix works
|
|
959
|
+
5. Report the outcome with evidence
|
|
960
|
+
"""
|
|
961
|
+
suggestion_addendum = """
|
|
962
|
+
|
|
963
|
+
## SUGGESTION MODE ENABLED
|
|
964
|
+
|
|
965
|
+
For each issue you find, you MUST also:
|
|
966
|
+
|
|
967
|
+
1. **Generate a Fix**: Create a concrete code fix for the issue
|
|
968
|
+
2. **Apply the Fix**: Modify the relevant file(s) to implement the fix
|
|
969
|
+
3. **Verify the Fix**: Run any available tests to confirm the fix works
|
|
970
|
+
4. **Report Outcome**: Document what you did and whether it worked
|
|
971
|
+
|
|
972
|
+
For each finding with a fix, report in this enhanced format:
|
|
973
|
+
|
|
974
|
+
**SEVERITY**: Title
|
|
975
|
+
Description of the issue
|
|
976
|
+
File: path/to/file.py:line_number
|
|
977
|
+
Evidence: original problematic code
|
|
978
|
+
|
|
979
|
+
**SUGGESTED FIX**:
|
|
980
|
+
```diff
|
|
981
|
+
- old code
|
|
982
|
+
+ new code
|
|
983
|
+
```
|
|
984
|
+
|
|
985
|
+
**VERIFICATION**:
|
|
986
|
+
- Applied fix: Yes/No
|
|
987
|
+
- Tests run: list tests
|
|
988
|
+
- Tests passed: X/Y
|
|
989
|
+
- Fix verified: Yes/No
|
|
990
|
+
|
|
991
|
+
**OUTCOME**:
|
|
992
|
+
Brief description of what changed and confirmation the fix works.
|
|
993
|
+
|
|
994
|
+
IMPORTANT:
|
|
995
|
+
- Apply each fix to the actual files (you have permission in this sandbox)
|
|
996
|
+
- Run tests after each fix to verify
|
|
997
|
+
- If a fix doesn't work, try up to 3 alternative approaches
|
|
998
|
+
- Always report both the issue AND the fix with verification results
|
|
999
|
+
- The sandbox will be reverted after QE - your fixes are demonstrations only
|
|
1000
|
+
|
|
1001
|
+
Start your analysis with fix generation now."""
|
|
1002
|
+
|
|
1003
|
+
return base_prompt + suggestion_addendum
|
|
1004
|
+
|
|
1005
|
+
|
|
1006
|
+
# Prompts specifically for suggestion verification
|
|
1007
|
+
VERIFICATION_PROMPTS = {
|
|
1008
|
+
"verify_fix": """You previously suggested a fix for the following issue:
|
|
1009
|
+
|
|
1010
|
+
Issue: {issue_title}
|
|
1011
|
+
File: {file_path}:{line_number}
|
|
1012
|
+
Fix applied: {fix_description}
|
|
1013
|
+
|
|
1014
|
+
Now verify that the fix works:
|
|
1015
|
+
|
|
1016
|
+
1. Run the relevant tests for this file/module
|
|
1017
|
+
2. Check if the original issue is resolved
|
|
1018
|
+
3. Check for any regressions (new failures)
|
|
1019
|
+
|
|
1020
|
+
Report your verification results:
|
|
1021
|
+
|
|
1022
|
+
**VERIFICATION RESULT**:
|
|
1023
|
+
- Original issue resolved: Yes/No
|
|
1024
|
+
- Tests run: [list of tests]
|
|
1025
|
+
- Tests passed: X/Y
|
|
1026
|
+
- New regressions: Yes/No (if yes, describe)
|
|
1027
|
+
- Verification status: PASSED/FAILED/INCONCLUSIVE
|
|
1028
|
+
|
|
1029
|
+
If the fix caused problems, explain what went wrong.""",
|
|
1030
|
+
"revert_and_retry": """The previous fix attempt failed verification.
|
|
1031
|
+
|
|
1032
|
+
Original issue: {issue_title}
|
|
1033
|
+
Failed fix: {failed_fix}
|
|
1034
|
+
Failure reason: {failure_reason}
|
|
1035
|
+
|
|
1036
|
+
Please:
|
|
1037
|
+
1. Revert the previous fix
|
|
1038
|
+
2. Analyze why it failed
|
|
1039
|
+
3. Generate an alternative fix
|
|
1040
|
+
4. Apply and verify the new fix
|
|
1041
|
+
|
|
1042
|
+
Report the new attempt in the standard format.""",
|
|
1043
|
+
}
|
|
1044
|
+
|
|
1045
|
+
|
|
1046
|
+
def get_verification_prompt(
|
|
1047
|
+
prompt_type: str,
|
|
1048
|
+
issue_title: str = "",
|
|
1049
|
+
file_path: str = "",
|
|
1050
|
+
line_number: int = 0,
|
|
1051
|
+
fix_description: str = "",
|
|
1052
|
+
failed_fix: str = "",
|
|
1053
|
+
failure_reason: str = "",
|
|
1054
|
+
) -> str:
|
|
1055
|
+
"""Get a verification prompt with filled-in details."""
|
|
1056
|
+
template = VERIFICATION_PROMPTS.get(prompt_type, "")
|
|
1057
|
+
return template.format(
|
|
1058
|
+
issue_title=issue_title,
|
|
1059
|
+
file_path=file_path,
|
|
1060
|
+
line_number=line_number,
|
|
1061
|
+
fix_description=fix_description,
|
|
1062
|
+
failed_fix=failed_fix,
|
|
1063
|
+
failure_reason=failure_reason,
|
|
1064
|
+
)
|