superqode 0.1.5__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- superqode/__init__.py +33 -0
- superqode/acp/__init__.py +23 -0
- superqode/acp/client.py +913 -0
- superqode/acp/permission_screen.py +457 -0
- superqode/acp/types.py +480 -0
- superqode/acp_discovery.py +856 -0
- superqode/agent/__init__.py +22 -0
- superqode/agent/edit_strategies.py +334 -0
- superqode/agent/loop.py +892 -0
- superqode/agent/qe_report_templates.py +39 -0
- superqode/agent/system_prompts.py +353 -0
- superqode/agent_output.py +721 -0
- superqode/agent_stream.py +953 -0
- superqode/agents/__init__.py +59 -0
- superqode/agents/acp_registry.py +305 -0
- superqode/agents/client.py +249 -0
- superqode/agents/data/augmentcode.com.toml +51 -0
- superqode/agents/data/cagent.dev.toml +51 -0
- superqode/agents/data/claude.com.toml +60 -0
- superqode/agents/data/codeassistant.dev.toml +51 -0
- superqode/agents/data/codex.openai.com.toml +57 -0
- superqode/agents/data/fastagent.ai.toml +66 -0
- superqode/agents/data/geminicli.com.toml +77 -0
- superqode/agents/data/goose.block.xyz.toml +54 -0
- superqode/agents/data/junie.jetbrains.com.toml +56 -0
- superqode/agents/data/kimi.moonshot.cn.toml +57 -0
- superqode/agents/data/llmlingagent.dev.toml +51 -0
- superqode/agents/data/molt.bot.toml +49 -0
- superqode/agents/data/opencode.ai.toml +60 -0
- superqode/agents/data/stakpak.dev.toml +51 -0
- superqode/agents/data/vtcode.dev.toml +51 -0
- superqode/agents/discovery.py +266 -0
- superqode/agents/messaging.py +160 -0
- superqode/agents/persona.py +166 -0
- superqode/agents/registry.py +421 -0
- superqode/agents/schema.py +72 -0
- superqode/agents/unified.py +367 -0
- superqode/app/__init__.py +111 -0
- superqode/app/constants.py +314 -0
- superqode/app/css.py +366 -0
- superqode/app/models.py +118 -0
- superqode/app/suggester.py +125 -0
- superqode/app/widgets.py +1591 -0
- superqode/app_enhanced.py +399 -0
- superqode/app_main.py +17187 -0
- superqode/approval.py +312 -0
- superqode/atomic.py +296 -0
- superqode/commands/__init__.py +1 -0
- superqode/commands/acp.py +965 -0
- superqode/commands/agents.py +180 -0
- superqode/commands/auth.py +278 -0
- superqode/commands/config.py +374 -0
- superqode/commands/init.py +826 -0
- superqode/commands/providers.py +819 -0
- superqode/commands/qe.py +1145 -0
- superqode/commands/roles.py +380 -0
- superqode/commands/serve.py +172 -0
- superqode/commands/suggestions.py +127 -0
- superqode/commands/superqe.py +460 -0
- superqode/config/__init__.py +51 -0
- superqode/config/loader.py +812 -0
- superqode/config/schema.py +498 -0
- superqode/core/__init__.py +111 -0
- superqode/core/roles.py +281 -0
- superqode/danger.py +386 -0
- superqode/data/superqode-template.yaml +1522 -0
- superqode/design_system.py +1080 -0
- superqode/dialogs/__init__.py +6 -0
- superqode/dialogs/base.py +39 -0
- superqode/dialogs/model.py +130 -0
- superqode/dialogs/provider.py +870 -0
- superqode/diff_view.py +919 -0
- superqode/enterprise.py +21 -0
- superqode/evaluation/__init__.py +25 -0
- superqode/evaluation/adapters.py +93 -0
- superqode/evaluation/behaviors.py +89 -0
- superqode/evaluation/engine.py +209 -0
- superqode/evaluation/scenarios.py +96 -0
- superqode/execution/__init__.py +36 -0
- superqode/execution/linter.py +538 -0
- superqode/execution/modes.py +347 -0
- superqode/execution/resolver.py +283 -0
- superqode/execution/runner.py +642 -0
- superqode/file_explorer.py +811 -0
- superqode/file_viewer.py +471 -0
- superqode/flash.py +183 -0
- superqode/guidance/__init__.py +58 -0
- superqode/guidance/config.py +203 -0
- superqode/guidance/prompts.py +71 -0
- superqode/harness/__init__.py +54 -0
- superqode/harness/accelerator.py +291 -0
- superqode/harness/config.py +319 -0
- superqode/harness/validator.py +147 -0
- superqode/history.py +279 -0
- superqode/integrations/superopt_runner.py +124 -0
- superqode/logging/__init__.py +49 -0
- superqode/logging/adapters.py +219 -0
- superqode/logging/formatter.py +923 -0
- superqode/logging/integration.py +341 -0
- superqode/logging/sinks.py +170 -0
- superqode/logging/unified_log.py +417 -0
- superqode/lsp/__init__.py +26 -0
- superqode/lsp/client.py +544 -0
- superqode/main.py +1069 -0
- superqode/mcp/__init__.py +89 -0
- superqode/mcp/auth_storage.py +380 -0
- superqode/mcp/client.py +1236 -0
- superqode/mcp/config.py +319 -0
- superqode/mcp/integration.py +337 -0
- superqode/mcp/oauth.py +436 -0
- superqode/mcp/oauth_callback.py +385 -0
- superqode/mcp/types.py +290 -0
- superqode/memory/__init__.py +31 -0
- superqode/memory/feedback.py +342 -0
- superqode/memory/store.py +522 -0
- superqode/notifications.py +369 -0
- superqode/optimization/__init__.py +5 -0
- superqode/optimization/config.py +33 -0
- superqode/permissions/__init__.py +25 -0
- superqode/permissions/rules.py +488 -0
- superqode/plan.py +323 -0
- superqode/providers/__init__.py +33 -0
- superqode/providers/gateway/__init__.py +165 -0
- superqode/providers/gateway/base.py +228 -0
- superqode/providers/gateway/litellm_gateway.py +1170 -0
- superqode/providers/gateway/openresponses_gateway.py +436 -0
- superqode/providers/health.py +297 -0
- superqode/providers/huggingface/__init__.py +74 -0
- superqode/providers/huggingface/downloader.py +472 -0
- superqode/providers/huggingface/endpoints.py +442 -0
- superqode/providers/huggingface/hub.py +531 -0
- superqode/providers/huggingface/inference.py +394 -0
- superqode/providers/huggingface/transformers_runner.py +516 -0
- superqode/providers/local/__init__.py +100 -0
- superqode/providers/local/base.py +438 -0
- superqode/providers/local/discovery.py +418 -0
- superqode/providers/local/lmstudio.py +256 -0
- superqode/providers/local/mlx.py +457 -0
- superqode/providers/local/ollama.py +486 -0
- superqode/providers/local/sglang.py +268 -0
- superqode/providers/local/tgi.py +260 -0
- superqode/providers/local/tool_support.py +477 -0
- superqode/providers/local/vllm.py +258 -0
- superqode/providers/manager.py +1338 -0
- superqode/providers/models.py +1016 -0
- superqode/providers/models_dev.py +578 -0
- superqode/providers/openresponses/__init__.py +87 -0
- superqode/providers/openresponses/converters/__init__.py +17 -0
- superqode/providers/openresponses/converters/messages.py +343 -0
- superqode/providers/openresponses/converters/tools.py +268 -0
- superqode/providers/openresponses/schema/__init__.py +56 -0
- superqode/providers/openresponses/schema/models.py +585 -0
- superqode/providers/openresponses/streaming/__init__.py +5 -0
- superqode/providers/openresponses/streaming/parser.py +338 -0
- superqode/providers/openresponses/tools/__init__.py +21 -0
- superqode/providers/openresponses/tools/apply_patch.py +352 -0
- superqode/providers/openresponses/tools/code_interpreter.py +290 -0
- superqode/providers/openresponses/tools/file_search.py +333 -0
- superqode/providers/openresponses/tools/mcp_adapter.py +252 -0
- superqode/providers/registry.py +716 -0
- superqode/providers/usage.py +332 -0
- superqode/pure_mode.py +384 -0
- superqode/qr/__init__.py +23 -0
- superqode/qr/dashboard.py +781 -0
- superqode/qr/generator.py +1018 -0
- superqode/qr/templates.py +135 -0
- superqode/safety/__init__.py +41 -0
- superqode/safety/sandbox.py +413 -0
- superqode/safety/warnings.py +256 -0
- superqode/server/__init__.py +33 -0
- superqode/server/lsp_server.py +775 -0
- superqode/server/web.py +250 -0
- superqode/session/__init__.py +25 -0
- superqode/session/persistence.py +580 -0
- superqode/session/sharing.py +477 -0
- superqode/session.py +475 -0
- superqode/sidebar.py +2991 -0
- superqode/stream_view.py +648 -0
- superqode/styles/__init__.py +3 -0
- superqode/superqe/__init__.py +184 -0
- superqode/superqe/acp_runner.py +1064 -0
- superqode/superqe/constitution/__init__.py +62 -0
- superqode/superqe/constitution/evaluator.py +308 -0
- superqode/superqe/constitution/loader.py +432 -0
- superqode/superqe/constitution/schema.py +250 -0
- superqode/superqe/events.py +591 -0
- superqode/superqe/frameworks/__init__.py +65 -0
- superqode/superqe/frameworks/base.py +234 -0
- superqode/superqe/frameworks/e2e.py +263 -0
- superqode/superqe/frameworks/executor.py +237 -0
- superqode/superqe/frameworks/javascript.py +409 -0
- superqode/superqe/frameworks/python.py +373 -0
- superqode/superqe/frameworks/registry.py +92 -0
- superqode/superqe/mcp_tools/__init__.py +47 -0
- superqode/superqe/mcp_tools/core_tools.py +418 -0
- superqode/superqe/mcp_tools/registry.py +230 -0
- superqode/superqe/mcp_tools/testing_tools.py +167 -0
- superqode/superqe/noise.py +89 -0
- superqode/superqe/orchestrator.py +778 -0
- superqode/superqe/roles.py +609 -0
- superqode/superqe/session.py +713 -0
- superqode/superqe/skills/__init__.py +57 -0
- superqode/superqe/skills/base.py +106 -0
- superqode/superqe/skills/core_skills.py +899 -0
- superqode/superqe/skills/registry.py +90 -0
- superqode/superqe/verifier.py +101 -0
- superqode/superqe_cli.py +76 -0
- superqode/tool_call.py +358 -0
- superqode/tools/__init__.py +93 -0
- superqode/tools/agent_tools.py +496 -0
- superqode/tools/base.py +324 -0
- superqode/tools/batch_tool.py +133 -0
- superqode/tools/diagnostics.py +311 -0
- superqode/tools/edit_tools.py +653 -0
- superqode/tools/enhanced_base.py +515 -0
- superqode/tools/file_tools.py +269 -0
- superqode/tools/file_tracking.py +45 -0
- superqode/tools/lsp_tools.py +610 -0
- superqode/tools/network_tools.py +350 -0
- superqode/tools/permissions.py +400 -0
- superqode/tools/question_tool.py +324 -0
- superqode/tools/search_tools.py +598 -0
- superqode/tools/shell_tools.py +259 -0
- superqode/tools/todo_tools.py +121 -0
- superqode/tools/validation.py +80 -0
- superqode/tools/web_tools.py +639 -0
- superqode/tui.py +1152 -0
- superqode/tui_integration.py +875 -0
- superqode/tui_widgets/__init__.py +27 -0
- superqode/tui_widgets/widgets/__init__.py +18 -0
- superqode/tui_widgets/widgets/progress.py +185 -0
- superqode/tui_widgets/widgets/tool_display.py +188 -0
- superqode/undo_manager.py +574 -0
- superqode/utils/__init__.py +5 -0
- superqode/utils/error_handling.py +323 -0
- superqode/utils/fuzzy.py +257 -0
- superqode/widgets/__init__.py +477 -0
- superqode/widgets/agent_collab.py +390 -0
- superqode/widgets/agent_store.py +936 -0
- superqode/widgets/agent_switcher.py +395 -0
- superqode/widgets/animation_manager.py +284 -0
- superqode/widgets/code_context.py +356 -0
- superqode/widgets/command_palette.py +412 -0
- superqode/widgets/connection_status.py +537 -0
- superqode/widgets/conversation_history.py +470 -0
- superqode/widgets/diff_indicator.py +155 -0
- superqode/widgets/enhanced_status_bar.py +385 -0
- superqode/widgets/enhanced_toast.py +476 -0
- superqode/widgets/file_browser.py +809 -0
- superqode/widgets/file_reference.py +585 -0
- superqode/widgets/issue_timeline.py +340 -0
- superqode/widgets/leader_key.py +264 -0
- superqode/widgets/mode_switcher.py +445 -0
- superqode/widgets/model_picker.py +234 -0
- superqode/widgets/permission_preview.py +1205 -0
- superqode/widgets/prompt.py +358 -0
- superqode/widgets/provider_connect.py +725 -0
- superqode/widgets/pty_shell.py +587 -0
- superqode/widgets/qe_dashboard.py +321 -0
- superqode/widgets/resizable_sidebar.py +377 -0
- superqode/widgets/response_changes.py +218 -0
- superqode/widgets/response_display.py +528 -0
- superqode/widgets/rich_tool_display.py +613 -0
- superqode/widgets/sidebar_panels.py +1180 -0
- superqode/widgets/slash_complete.py +356 -0
- superqode/widgets/split_view.py +612 -0
- superqode/widgets/status_bar.py +273 -0
- superqode/widgets/superqode_display.py +786 -0
- superqode/widgets/thinking_display.py +815 -0
- superqode/widgets/throbber.py +87 -0
- superqode/widgets/toast.py +206 -0
- superqode/widgets/unified_output.py +1073 -0
- superqode/workspace/__init__.py +75 -0
- superqode/workspace/artifacts.py +472 -0
- superqode/workspace/coordinator.py +353 -0
- superqode/workspace/diff_tracker.py +429 -0
- superqode/workspace/git_guard.py +373 -0
- superqode/workspace/git_snapshot.py +526 -0
- superqode/workspace/manager.py +750 -0
- superqode/workspace/snapshot.py +357 -0
- superqode/workspace/watcher.py +535 -0
- superqode/workspace/worktree.py +440 -0
- superqode-0.1.5.dist-info/METADATA +204 -0
- superqode-0.1.5.dist-info/RECORD +288 -0
- superqode-0.1.5.dist-info/WHEEL +5 -0
- superqode-0.1.5.dist-info/entry_points.txt +3 -0
- superqode-0.1.5.dist-info/licenses/LICENSE +648 -0
- superqode-0.1.5.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,237 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Multi-Framework Executor - Execute tests across multiple frameworks.
|
|
3
|
+
|
|
4
|
+
Provides:
|
|
5
|
+
- Parallel execution across frameworks
|
|
6
|
+
- Result aggregation
|
|
7
|
+
- Unified reporting
|
|
8
|
+
"""
|
|
9
|
+
|
|
10
|
+
from dataclasses import dataclass, field
|
|
11
|
+
from datetime import datetime
|
|
12
|
+
from pathlib import Path
|
|
13
|
+
from typing import Any, Dict, List, Optional
|
|
14
|
+
import asyncio
|
|
15
|
+
|
|
16
|
+
from .base import FrameworkConfig, ExecutionResult
|
|
17
|
+
from .registry import detect_framework, get_framework
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
@dataclass
|
|
21
|
+
class ExecutorConfig:
|
|
22
|
+
"""Configuration for multi-framework execution."""
|
|
23
|
+
|
|
24
|
+
project_root: Path = field(default_factory=Path.cwd)
|
|
25
|
+
parallel_frameworks: bool = True
|
|
26
|
+
timeout_seconds: int = 600
|
|
27
|
+
fail_fast: bool = False
|
|
28
|
+
coverage: bool = False
|
|
29
|
+
workers_per_framework: int = 4
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
@dataclass
|
|
33
|
+
class MultiFrameworkResult:
|
|
34
|
+
"""Result of multi-framework test execution."""
|
|
35
|
+
|
|
36
|
+
started_at: datetime
|
|
37
|
+
ended_at: Optional[datetime] = None
|
|
38
|
+
duration_seconds: float = 0.0
|
|
39
|
+
framework_results: Dict[str, ExecutionResult] = field(default_factory=dict)
|
|
40
|
+
total: int = 0
|
|
41
|
+
passed: int = 0
|
|
42
|
+
failed: int = 0
|
|
43
|
+
skipped: int = 0
|
|
44
|
+
errors: int = 0
|
|
45
|
+
|
|
46
|
+
@property
|
|
47
|
+
def success(self) -> bool:
|
|
48
|
+
"""Did all frameworks pass?"""
|
|
49
|
+
return all(r.success for r in self.framework_results.values())
|
|
50
|
+
|
|
51
|
+
@property
|
|
52
|
+
def frameworks_run(self) -> List[str]:
|
|
53
|
+
"""List of frameworks that were run."""
|
|
54
|
+
return list(self.framework_results.keys())
|
|
55
|
+
|
|
56
|
+
def to_dict(self) -> Dict[str, Any]:
|
|
57
|
+
"""Convert to dictionary."""
|
|
58
|
+
return {
|
|
59
|
+
"started_at": self.started_at.isoformat(),
|
|
60
|
+
"ended_at": self.ended_at.isoformat() if self.ended_at else None,
|
|
61
|
+
"duration_seconds": self.duration_seconds,
|
|
62
|
+
"success": self.success,
|
|
63
|
+
"total": self.total,
|
|
64
|
+
"passed": self.passed,
|
|
65
|
+
"failed": self.failed,
|
|
66
|
+
"skipped": self.skipped,
|
|
67
|
+
"errors": self.errors,
|
|
68
|
+
"frameworks": {
|
|
69
|
+
name: result.to_dict() for name, result in self.framework_results.items()
|
|
70
|
+
},
|
|
71
|
+
}
|
|
72
|
+
|
|
73
|
+
|
|
74
|
+
class MultiFrameworkExecutor:
|
|
75
|
+
"""
|
|
76
|
+
Executor for running tests across multiple frameworks.
|
|
77
|
+
|
|
78
|
+
Supports parallel execution of up to 10,000+ concurrent tests
|
|
79
|
+
by distributing work across frameworks and workers.
|
|
80
|
+
"""
|
|
81
|
+
|
|
82
|
+
def __init__(self, config: Optional[ExecutorConfig] = None):
|
|
83
|
+
"""Initialize the executor."""
|
|
84
|
+
self.config = config or ExecutorConfig()
|
|
85
|
+
|
|
86
|
+
async def execute(
|
|
87
|
+
self,
|
|
88
|
+
frameworks: Optional[List[str]] = None,
|
|
89
|
+
tests: Optional[Dict[str, List[str]]] = None,
|
|
90
|
+
**kwargs,
|
|
91
|
+
) -> MultiFrameworkResult:
|
|
92
|
+
"""
|
|
93
|
+
Execute tests across frameworks.
|
|
94
|
+
|
|
95
|
+
Args:
|
|
96
|
+
frameworks: List of framework names to run (None = auto-detect)
|
|
97
|
+
tests: Dict of framework name to test list
|
|
98
|
+
**kwargs: Additional options
|
|
99
|
+
|
|
100
|
+
Returns:
|
|
101
|
+
MultiFrameworkResult with aggregated results
|
|
102
|
+
"""
|
|
103
|
+
started_at = datetime.now()
|
|
104
|
+
|
|
105
|
+
result = MultiFrameworkResult(started_at=started_at)
|
|
106
|
+
|
|
107
|
+
# Get frameworks to run
|
|
108
|
+
framework_instances = []
|
|
109
|
+
|
|
110
|
+
if frameworks:
|
|
111
|
+
for name in frameworks:
|
|
112
|
+
fw_config = FrameworkConfig(
|
|
113
|
+
project_root=self.config.project_root,
|
|
114
|
+
parallel=True,
|
|
115
|
+
workers=self.config.workers_per_framework,
|
|
116
|
+
timeout_seconds=self.config.timeout_seconds,
|
|
117
|
+
fail_fast=self.config.fail_fast,
|
|
118
|
+
coverage=self.config.coverage,
|
|
119
|
+
)
|
|
120
|
+
fw = get_framework(name, fw_config)
|
|
121
|
+
if fw:
|
|
122
|
+
framework_instances.append(fw)
|
|
123
|
+
else:
|
|
124
|
+
# Auto-detect frameworks
|
|
125
|
+
fw_config = FrameworkConfig(
|
|
126
|
+
project_root=self.config.project_root,
|
|
127
|
+
parallel=True,
|
|
128
|
+
workers=self.config.workers_per_framework,
|
|
129
|
+
)
|
|
130
|
+
framework_instances = detect_framework(self.config.project_root, fw_config)
|
|
131
|
+
|
|
132
|
+
if not framework_instances:
|
|
133
|
+
result.ended_at = datetime.now()
|
|
134
|
+
return result
|
|
135
|
+
|
|
136
|
+
# Execute frameworks
|
|
137
|
+
if self.config.parallel_frameworks:
|
|
138
|
+
# Run all frameworks in parallel
|
|
139
|
+
tasks = []
|
|
140
|
+
for fw in framework_instances:
|
|
141
|
+
fw_tests = tests.get(fw.NAME) if tests else None
|
|
142
|
+
tasks.append(self._run_framework(fw, fw_tests))
|
|
143
|
+
|
|
144
|
+
fw_results = await asyncio.gather(*tasks, return_exceptions=True)
|
|
145
|
+
|
|
146
|
+
for fw, fw_result in zip(framework_instances, fw_results):
|
|
147
|
+
if isinstance(fw_result, Exception):
|
|
148
|
+
# Create error result
|
|
149
|
+
result.framework_results[fw.NAME] = ExecutionResult(
|
|
150
|
+
framework=fw.NAME,
|
|
151
|
+
started_at=started_at,
|
|
152
|
+
ended_at=datetime.now(),
|
|
153
|
+
errors=1,
|
|
154
|
+
error_output=str(fw_result),
|
|
155
|
+
)
|
|
156
|
+
else:
|
|
157
|
+
result.framework_results[fw.NAME] = fw_result
|
|
158
|
+
else:
|
|
159
|
+
# Run frameworks sequentially
|
|
160
|
+
for fw in framework_instances:
|
|
161
|
+
if self.config.fail_fast and result.failed > 0:
|
|
162
|
+
break
|
|
163
|
+
|
|
164
|
+
fw_tests = tests.get(fw.NAME) if tests else None
|
|
165
|
+
try:
|
|
166
|
+
fw_result = await self._run_framework(fw, fw_tests)
|
|
167
|
+
result.framework_results[fw.NAME] = fw_result
|
|
168
|
+
except Exception as e:
|
|
169
|
+
result.framework_results[fw.NAME] = ExecutionResult(
|
|
170
|
+
framework=fw.NAME,
|
|
171
|
+
started_at=started_at,
|
|
172
|
+
ended_at=datetime.now(),
|
|
173
|
+
errors=1,
|
|
174
|
+
error_output=str(e),
|
|
175
|
+
)
|
|
176
|
+
|
|
177
|
+
# Aggregate results
|
|
178
|
+
for fw_result in result.framework_results.values():
|
|
179
|
+
result.total += fw_result.total
|
|
180
|
+
result.passed += fw_result.passed
|
|
181
|
+
result.failed += fw_result.failed
|
|
182
|
+
result.skipped += fw_result.skipped
|
|
183
|
+
result.errors += fw_result.errors
|
|
184
|
+
|
|
185
|
+
result.ended_at = datetime.now()
|
|
186
|
+
result.duration_seconds = (result.ended_at - started_at).total_seconds()
|
|
187
|
+
|
|
188
|
+
return result
|
|
189
|
+
|
|
190
|
+
async def _run_framework(self, framework, tests: Optional[List[str]] = None) -> ExecutionResult:
|
|
191
|
+
"""Run a single framework."""
|
|
192
|
+
return await framework.execute(tests=tests)
|
|
193
|
+
|
|
194
|
+
async def discover_all(self) -> Dict[str, List[str]]:
|
|
195
|
+
"""Discover tests across all detected frameworks."""
|
|
196
|
+
fw_config = FrameworkConfig(project_root=self.config.project_root)
|
|
197
|
+
frameworks = detect_framework(self.config.project_root, fw_config)
|
|
198
|
+
|
|
199
|
+
results = {}
|
|
200
|
+
for fw in frameworks:
|
|
201
|
+
suites = await fw.discover()
|
|
202
|
+
tests = []
|
|
203
|
+
for suite in suites:
|
|
204
|
+
tests.extend(suite.tests)
|
|
205
|
+
results[fw.NAME] = tests
|
|
206
|
+
|
|
207
|
+
return results
|
|
208
|
+
|
|
209
|
+
|
|
210
|
+
async def execute_tests(
|
|
211
|
+
project_root: Optional[Path] = None,
|
|
212
|
+
frameworks: Optional[List[str]] = None,
|
|
213
|
+
parallel: bool = True,
|
|
214
|
+
coverage: bool = False,
|
|
215
|
+
**kwargs,
|
|
216
|
+
) -> MultiFrameworkResult:
|
|
217
|
+
"""
|
|
218
|
+
Execute tests across frameworks.
|
|
219
|
+
|
|
220
|
+
Args:
|
|
221
|
+
project_root: Project root directory
|
|
222
|
+
frameworks: Frameworks to run (None = auto-detect)
|
|
223
|
+
parallel: Run frameworks in parallel
|
|
224
|
+
coverage: Collect coverage data
|
|
225
|
+
**kwargs: Additional options
|
|
226
|
+
|
|
227
|
+
Returns:
|
|
228
|
+
MultiFrameworkResult
|
|
229
|
+
"""
|
|
230
|
+
config = ExecutorConfig(
|
|
231
|
+
project_root=project_root or Path.cwd(),
|
|
232
|
+
parallel_frameworks=parallel,
|
|
233
|
+
coverage=coverage,
|
|
234
|
+
)
|
|
235
|
+
|
|
236
|
+
executor = MultiFrameworkExecutor(config)
|
|
237
|
+
return await executor.execute(frameworks=frameworks, **kwargs)
|
|
@@ -0,0 +1,409 @@
|
|
|
1
|
+
"""
|
|
2
|
+
JavaScript Test Framework Implementations.
|
|
3
|
+
|
|
4
|
+
Supports:
|
|
5
|
+
- Jest
|
|
6
|
+
- Mocha
|
|
7
|
+
- Vitest
|
|
8
|
+
- Jasmine
|
|
9
|
+
- AVA
|
|
10
|
+
"""
|
|
11
|
+
|
|
12
|
+
from datetime import datetime
|
|
13
|
+
from pathlib import Path
|
|
14
|
+
from typing import List, Optional
|
|
15
|
+
import json
|
|
16
|
+
import re
|
|
17
|
+
|
|
18
|
+
from .base import (
|
|
19
|
+
TestFramework,
|
|
20
|
+
FrameworkConfig,
|
|
21
|
+
TestResult,
|
|
22
|
+
TestSuite,
|
|
23
|
+
ExecutionResult,
|
|
24
|
+
TestStatus,
|
|
25
|
+
)
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
class JestFramework(TestFramework):
|
|
29
|
+
"""Jest test framework."""
|
|
30
|
+
|
|
31
|
+
NAME = "jest"
|
|
32
|
+
DISPLAY_NAME = "Jest"
|
|
33
|
+
LANGUAGE = "javascript"
|
|
34
|
+
FILE_PATTERNS = ["**/*.test.js", "**/*.test.ts", "**/*.spec.js", "**/*.spec.ts"]
|
|
35
|
+
|
|
36
|
+
@classmethod
|
|
37
|
+
def detect(cls, project_root: Path) -> bool:
|
|
38
|
+
"""Detect if Jest is used."""
|
|
39
|
+
# Check package.json
|
|
40
|
+
package_json = project_root / "package.json"
|
|
41
|
+
if package_json.exists():
|
|
42
|
+
try:
|
|
43
|
+
data = json.loads(package_json.read_text())
|
|
44
|
+
deps = {**data.get("dependencies", {}), **data.get("devDependencies", {})}
|
|
45
|
+
if "jest" in deps:
|
|
46
|
+
return True
|
|
47
|
+
# Check scripts
|
|
48
|
+
scripts = data.get("scripts", {})
|
|
49
|
+
if any("jest" in str(v) for v in scripts.values()):
|
|
50
|
+
return True
|
|
51
|
+
except Exception:
|
|
52
|
+
pass
|
|
53
|
+
|
|
54
|
+
# Check for jest.config.js
|
|
55
|
+
if (project_root / "jest.config.js").exists():
|
|
56
|
+
return True
|
|
57
|
+
if (project_root / "jest.config.ts").exists():
|
|
58
|
+
return True
|
|
59
|
+
|
|
60
|
+
return False
|
|
61
|
+
|
|
62
|
+
async def discover(self) -> List[TestSuite]:
|
|
63
|
+
"""Discover Jest tests."""
|
|
64
|
+
command = ["npx", "jest", "--listTests", "--json"]
|
|
65
|
+
|
|
66
|
+
exit_code, stdout, stderr = await self.run_command(command, timeout=60)
|
|
67
|
+
|
|
68
|
+
suites = []
|
|
69
|
+
try:
|
|
70
|
+
test_files = json.loads(stdout)
|
|
71
|
+
for file_path in test_files:
|
|
72
|
+
suites.append(
|
|
73
|
+
TestSuite(
|
|
74
|
+
name=Path(file_path).stem,
|
|
75
|
+
file_path=file_path,
|
|
76
|
+
tests=[], # Jest doesn't list individual tests easily
|
|
77
|
+
)
|
|
78
|
+
)
|
|
79
|
+
except json.JSONDecodeError:
|
|
80
|
+
pass
|
|
81
|
+
|
|
82
|
+
return suites
|
|
83
|
+
|
|
84
|
+
async def execute(self, tests: Optional[List[str]] = None, **kwargs) -> ExecutionResult:
|
|
85
|
+
"""Execute Jest tests."""
|
|
86
|
+
started_at = datetime.now()
|
|
87
|
+
|
|
88
|
+
command = ["npx", "jest", "--json"]
|
|
89
|
+
|
|
90
|
+
if self.config.verbose:
|
|
91
|
+
command.append("--verbose")
|
|
92
|
+
if self.config.fail_fast:
|
|
93
|
+
command.append("--bail")
|
|
94
|
+
if self.config.coverage:
|
|
95
|
+
command.append("--coverage")
|
|
96
|
+
if self.config.parallel and self.config.workers > 1:
|
|
97
|
+
command.extend(["--maxWorkers", str(self.config.workers)])
|
|
98
|
+
|
|
99
|
+
if tests:
|
|
100
|
+
command.extend(tests)
|
|
101
|
+
|
|
102
|
+
exit_code, stdout, stderr = await self.run_command(command)
|
|
103
|
+
|
|
104
|
+
ended_at = datetime.now()
|
|
105
|
+
duration = (ended_at - started_at).total_seconds()
|
|
106
|
+
|
|
107
|
+
# Parse JSON output
|
|
108
|
+
test_results = []
|
|
109
|
+
total = passed = failed = skipped = 0
|
|
110
|
+
coverage = None
|
|
111
|
+
|
|
112
|
+
try:
|
|
113
|
+
# Find JSON in output (Jest outputs JSON after other text)
|
|
114
|
+
json_match = re.search(r'\{[\s\S]*"numTotalTests"[\s\S]*\}', stdout)
|
|
115
|
+
if json_match:
|
|
116
|
+
data = json.loads(json_match.group())
|
|
117
|
+
|
|
118
|
+
total = data.get("numTotalTests", 0)
|
|
119
|
+
passed = data.get("numPassedTests", 0)
|
|
120
|
+
failed = data.get("numFailedTests", 0)
|
|
121
|
+
skipped = data.get("numPendingTests", 0)
|
|
122
|
+
|
|
123
|
+
# Parse individual test results
|
|
124
|
+
for result in data.get("testResults", []):
|
|
125
|
+
for assertion in result.get("assertionResults", []):
|
|
126
|
+
status_map = {
|
|
127
|
+
"passed": TestStatus.PASSED,
|
|
128
|
+
"failed": TestStatus.FAILED,
|
|
129
|
+
"pending": TestStatus.SKIPPED,
|
|
130
|
+
"skipped": TestStatus.SKIPPED,
|
|
131
|
+
}
|
|
132
|
+
status = status_map.get(assertion.get("status", "failed"), TestStatus.ERROR)
|
|
133
|
+
|
|
134
|
+
test_results.append(
|
|
135
|
+
TestResult(
|
|
136
|
+
name=assertion.get("fullName", ""),
|
|
137
|
+
status=status,
|
|
138
|
+
duration_ms=assertion.get("duration", 0),
|
|
139
|
+
file_path=result.get("name"),
|
|
140
|
+
error_message="\n".join(assertion.get("failureMessages", [])),
|
|
141
|
+
)
|
|
142
|
+
)
|
|
143
|
+
|
|
144
|
+
# Get coverage if available
|
|
145
|
+
cov_data = data.get("coverageMap", {})
|
|
146
|
+
if cov_data:
|
|
147
|
+
# Calculate overall coverage
|
|
148
|
+
total_statements = 0
|
|
149
|
+
covered_statements = 0
|
|
150
|
+
for file_cov in cov_data.values():
|
|
151
|
+
s = file_cov.get("s", {})
|
|
152
|
+
total_statements += len(s)
|
|
153
|
+
covered_statements += sum(1 for v in s.values() if v > 0)
|
|
154
|
+
if total_statements > 0:
|
|
155
|
+
coverage = (covered_statements / total_statements) * 100
|
|
156
|
+
|
|
157
|
+
except json.JSONDecodeError:
|
|
158
|
+
pass
|
|
159
|
+
|
|
160
|
+
return ExecutionResult(
|
|
161
|
+
framework=self.NAME,
|
|
162
|
+
started_at=started_at,
|
|
163
|
+
ended_at=ended_at,
|
|
164
|
+
duration_seconds=duration,
|
|
165
|
+
total=total,
|
|
166
|
+
passed=passed,
|
|
167
|
+
failed=failed,
|
|
168
|
+
skipped=skipped,
|
|
169
|
+
errors=0,
|
|
170
|
+
test_results=test_results,
|
|
171
|
+
coverage_percentage=coverage,
|
|
172
|
+
output=stdout,
|
|
173
|
+
error_output=stderr,
|
|
174
|
+
)
|
|
175
|
+
|
|
176
|
+
def parse_results(self, output: str) -> List[TestResult]:
|
|
177
|
+
"""Parse Jest output."""
|
|
178
|
+
# Handled in execute() with JSON parsing
|
|
179
|
+
return []
|
|
180
|
+
|
|
181
|
+
def get_command(self, tests: Optional[List[str]] = None) -> List[str]:
|
|
182
|
+
"""Get Jest command."""
|
|
183
|
+
command = ["npx", "jest"]
|
|
184
|
+
if tests:
|
|
185
|
+
command.extend(tests)
|
|
186
|
+
return command
|
|
187
|
+
|
|
188
|
+
|
|
189
|
+
class MochaFramework(TestFramework):
|
|
190
|
+
"""Mocha test framework."""
|
|
191
|
+
|
|
192
|
+
NAME = "mocha"
|
|
193
|
+
DISPLAY_NAME = "Mocha"
|
|
194
|
+
LANGUAGE = "javascript"
|
|
195
|
+
FILE_PATTERNS = ["**/*.test.js", "**/*.spec.js", "test/**/*.js"]
|
|
196
|
+
|
|
197
|
+
@classmethod
|
|
198
|
+
def detect(cls, project_root: Path) -> bool:
|
|
199
|
+
"""Detect if Mocha is used."""
|
|
200
|
+
package_json = project_root / "package.json"
|
|
201
|
+
if package_json.exists():
|
|
202
|
+
try:
|
|
203
|
+
data = json.loads(package_json.read_text())
|
|
204
|
+
deps = {**data.get("dependencies", {}), **data.get("devDependencies", {})}
|
|
205
|
+
if "mocha" in deps:
|
|
206
|
+
return True
|
|
207
|
+
except Exception:
|
|
208
|
+
pass
|
|
209
|
+
|
|
210
|
+
if (project_root / ".mocharc.js").exists():
|
|
211
|
+
return True
|
|
212
|
+
if (project_root / ".mocharc.json").exists():
|
|
213
|
+
return True
|
|
214
|
+
|
|
215
|
+
return False
|
|
216
|
+
|
|
217
|
+
async def discover(self) -> List[TestSuite]:
|
|
218
|
+
"""Discover Mocha tests."""
|
|
219
|
+
suites = []
|
|
220
|
+
for pattern in self.FILE_PATTERNS:
|
|
221
|
+
for file_path in self.config.project_root.glob(pattern):
|
|
222
|
+
suites.append(TestSuite(name=file_path.stem, file_path=str(file_path), tests=[]))
|
|
223
|
+
return suites
|
|
224
|
+
|
|
225
|
+
async def execute(self, tests: Optional[List[str]] = None, **kwargs) -> ExecutionResult:
|
|
226
|
+
"""Execute Mocha tests."""
|
|
227
|
+
started_at = datetime.now()
|
|
228
|
+
|
|
229
|
+
command = ["npx", "mocha", "--reporter", "json"]
|
|
230
|
+
|
|
231
|
+
if self.config.fail_fast:
|
|
232
|
+
command.append("--bail")
|
|
233
|
+
if self.config.parallel and self.config.workers > 1:
|
|
234
|
+
command.extend(["--parallel", "--jobs", str(self.config.workers)])
|
|
235
|
+
|
|
236
|
+
if tests:
|
|
237
|
+
command.extend(tests)
|
|
238
|
+
|
|
239
|
+
exit_code, stdout, stderr = await self.run_command(command)
|
|
240
|
+
|
|
241
|
+
ended_at = datetime.now()
|
|
242
|
+
duration = (ended_at - started_at).total_seconds()
|
|
243
|
+
|
|
244
|
+
test_results = []
|
|
245
|
+
total = passed = failed = skipped = 0
|
|
246
|
+
|
|
247
|
+
try:
|
|
248
|
+
data = json.loads(stdout)
|
|
249
|
+
stats = data.get("stats", {})
|
|
250
|
+
total = stats.get("tests", 0)
|
|
251
|
+
passed = stats.get("passes", 0)
|
|
252
|
+
failed = stats.get("failures", 0)
|
|
253
|
+
skipped = stats.get("pending", 0)
|
|
254
|
+
|
|
255
|
+
for test in data.get("passes", []):
|
|
256
|
+
test_results.append(
|
|
257
|
+
TestResult(
|
|
258
|
+
name=test.get("fullTitle", ""),
|
|
259
|
+
status=TestStatus.PASSED,
|
|
260
|
+
duration_ms=test.get("duration", 0),
|
|
261
|
+
file_path=test.get("file"),
|
|
262
|
+
)
|
|
263
|
+
)
|
|
264
|
+
|
|
265
|
+
for test in data.get("failures", []):
|
|
266
|
+
test_results.append(
|
|
267
|
+
TestResult(
|
|
268
|
+
name=test.get("fullTitle", ""),
|
|
269
|
+
status=TestStatus.FAILED,
|
|
270
|
+
duration_ms=test.get("duration", 0),
|
|
271
|
+
file_path=test.get("file"),
|
|
272
|
+
error_message=test.get("err", {}).get("message"),
|
|
273
|
+
stack_trace=test.get("err", {}).get("stack"),
|
|
274
|
+
)
|
|
275
|
+
)
|
|
276
|
+
|
|
277
|
+
for test in data.get("pending", []):
|
|
278
|
+
test_results.append(
|
|
279
|
+
TestResult(
|
|
280
|
+
name=test.get("fullTitle", ""),
|
|
281
|
+
status=TestStatus.SKIPPED,
|
|
282
|
+
duration_ms=0,
|
|
283
|
+
file_path=test.get("file"),
|
|
284
|
+
)
|
|
285
|
+
)
|
|
286
|
+
|
|
287
|
+
except json.JSONDecodeError:
|
|
288
|
+
pass
|
|
289
|
+
|
|
290
|
+
return ExecutionResult(
|
|
291
|
+
framework=self.NAME,
|
|
292
|
+
started_at=started_at,
|
|
293
|
+
ended_at=ended_at,
|
|
294
|
+
duration_seconds=duration,
|
|
295
|
+
total=total,
|
|
296
|
+
passed=passed,
|
|
297
|
+
failed=failed,
|
|
298
|
+
skipped=skipped,
|
|
299
|
+
errors=0,
|
|
300
|
+
test_results=test_results,
|
|
301
|
+
output=stdout,
|
|
302
|
+
error_output=stderr,
|
|
303
|
+
)
|
|
304
|
+
|
|
305
|
+
def parse_results(self, output: str) -> List[TestResult]:
|
|
306
|
+
"""Parse Mocha output."""
|
|
307
|
+
return []
|
|
308
|
+
|
|
309
|
+
def get_command(self, tests: Optional[List[str]] = None) -> List[str]:
|
|
310
|
+
"""Get Mocha command."""
|
|
311
|
+
command = ["npx", "mocha"]
|
|
312
|
+
if tests:
|
|
313
|
+
command.extend(tests)
|
|
314
|
+
return command
|
|
315
|
+
|
|
316
|
+
|
|
317
|
+
class VitestFramework(TestFramework):
|
|
318
|
+
"""Vitest test framework."""
|
|
319
|
+
|
|
320
|
+
NAME = "vitest"
|
|
321
|
+
DISPLAY_NAME = "Vitest"
|
|
322
|
+
LANGUAGE = "javascript"
|
|
323
|
+
FILE_PATTERNS = ["**/*.test.ts", "**/*.spec.ts", "**/*.test.js", "**/*.spec.js"]
|
|
324
|
+
|
|
325
|
+
@classmethod
|
|
326
|
+
def detect(cls, project_root: Path) -> bool:
|
|
327
|
+
"""Detect if Vitest is used."""
|
|
328
|
+
package_json = project_root / "package.json"
|
|
329
|
+
if package_json.exists():
|
|
330
|
+
try:
|
|
331
|
+
data = json.loads(package_json.read_text())
|
|
332
|
+
deps = {**data.get("dependencies", {}), **data.get("devDependencies", {})}
|
|
333
|
+
if "vitest" in deps:
|
|
334
|
+
return True
|
|
335
|
+
except Exception:
|
|
336
|
+
pass
|
|
337
|
+
|
|
338
|
+
if (project_root / "vitest.config.ts").exists():
|
|
339
|
+
return True
|
|
340
|
+
if (project_root / "vitest.config.js").exists():
|
|
341
|
+
return True
|
|
342
|
+
|
|
343
|
+
return False
|
|
344
|
+
|
|
345
|
+
async def discover(self) -> List[TestSuite]:
|
|
346
|
+
"""Discover Vitest tests."""
|
|
347
|
+
suites = []
|
|
348
|
+
for pattern in self.FILE_PATTERNS:
|
|
349
|
+
for file_path in self.config.project_root.glob(pattern):
|
|
350
|
+
suites.append(TestSuite(name=file_path.stem, file_path=str(file_path), tests=[]))
|
|
351
|
+
return suites
|
|
352
|
+
|
|
353
|
+
async def execute(self, tests: Optional[List[str]] = None, **kwargs) -> ExecutionResult:
|
|
354
|
+
"""Execute Vitest tests."""
|
|
355
|
+
started_at = datetime.now()
|
|
356
|
+
|
|
357
|
+
command = ["npx", "vitest", "run", "--reporter=json"]
|
|
358
|
+
|
|
359
|
+
if self.config.coverage:
|
|
360
|
+
command.append("--coverage")
|
|
361
|
+
|
|
362
|
+
if tests:
|
|
363
|
+
command.extend(tests)
|
|
364
|
+
|
|
365
|
+
exit_code, stdout, stderr = await self.run_command(command)
|
|
366
|
+
|
|
367
|
+
ended_at = datetime.now()
|
|
368
|
+
duration = (ended_at - started_at).total_seconds()
|
|
369
|
+
|
|
370
|
+
test_results = []
|
|
371
|
+
total = passed = failed = skipped = 0
|
|
372
|
+
|
|
373
|
+
try:
|
|
374
|
+
# Find JSON output
|
|
375
|
+
json_match = re.search(r'\{[\s\S]*"numTotalTests"[\s\S]*\}', stdout)
|
|
376
|
+
if json_match:
|
|
377
|
+
data = json.loads(json_match.group())
|
|
378
|
+
total = data.get("numTotalTests", 0)
|
|
379
|
+
passed = data.get("numPassedTests", 0)
|
|
380
|
+
failed = data.get("numFailedTests", 0)
|
|
381
|
+
skipped = data.get("numPendingTests", 0)
|
|
382
|
+
except json.JSONDecodeError:
|
|
383
|
+
pass
|
|
384
|
+
|
|
385
|
+
return ExecutionResult(
|
|
386
|
+
framework=self.NAME,
|
|
387
|
+
started_at=started_at,
|
|
388
|
+
ended_at=ended_at,
|
|
389
|
+
duration_seconds=duration,
|
|
390
|
+
total=total,
|
|
391
|
+
passed=passed,
|
|
392
|
+
failed=failed,
|
|
393
|
+
skipped=skipped,
|
|
394
|
+
errors=0,
|
|
395
|
+
test_results=test_results,
|
|
396
|
+
output=stdout,
|
|
397
|
+
error_output=stderr,
|
|
398
|
+
)
|
|
399
|
+
|
|
400
|
+
def parse_results(self, output: str) -> List[TestResult]:
|
|
401
|
+
"""Parse Vitest output."""
|
|
402
|
+
return []
|
|
403
|
+
|
|
404
|
+
def get_command(self, tests: Optional[List[str]] = None) -> List[str]:
|
|
405
|
+
"""Get Vitest command."""
|
|
406
|
+
command = ["npx", "vitest", "run"]
|
|
407
|
+
if tests:
|
|
408
|
+
command.extend(tests)
|
|
409
|
+
return command
|