portacode 0.3.4.dev0__py3-none-any.whl ā 1.4.11.dev0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of portacode might be problematic. Click here for more details.
- portacode/_version.py +16 -3
- portacode/cli.py +155 -19
- portacode/connection/client.py +152 -12
- portacode/connection/handlers/WEBSOCKET_PROTOCOL.md +1577 -0
- portacode/connection/handlers/__init__.py +43 -1
- portacode/connection/handlers/base.py +122 -18
- portacode/connection/handlers/chunked_content.py +244 -0
- portacode/connection/handlers/diff_handlers.py +603 -0
- portacode/connection/handlers/file_handlers.py +902 -17
- portacode/connection/handlers/project_aware_file_handlers.py +226 -0
- portacode/connection/handlers/project_state/README.md +312 -0
- portacode/connection/handlers/project_state/__init__.py +92 -0
- portacode/connection/handlers/project_state/file_system_watcher.py +179 -0
- portacode/connection/handlers/project_state/git_manager.py +1502 -0
- portacode/connection/handlers/project_state/handlers.py +875 -0
- portacode/connection/handlers/project_state/manager.py +1331 -0
- portacode/connection/handlers/project_state/models.py +108 -0
- portacode/connection/handlers/project_state/utils.py +50 -0
- portacode/connection/handlers/project_state_handlers.py +45 -0
- portacode/connection/handlers/proxmox_infra.py +307 -0
- portacode/connection/handlers/registry.py +53 -10
- portacode/connection/handlers/session.py +705 -53
- portacode/connection/handlers/system_handlers.py +142 -8
- portacode/connection/handlers/tab_factory.py +389 -0
- portacode/connection/handlers/terminal_handlers.py +150 -11
- portacode/connection/handlers/update_handler.py +61 -0
- portacode/connection/multiplex.py +60 -2
- portacode/connection/terminal.py +695 -28
- portacode/keypair.py +63 -1
- portacode/link_capture/__init__.py +38 -0
- portacode/link_capture/__pycache__/__init__.cpython-311.pyc +0 -0
- portacode/link_capture/bin/__pycache__/link_capture_wrapper.cpython-311.pyc +0 -0
- portacode/link_capture/bin/elinks +3 -0
- portacode/link_capture/bin/gio-open +3 -0
- portacode/link_capture/bin/gnome-open +3 -0
- portacode/link_capture/bin/gvfs-open +3 -0
- portacode/link_capture/bin/kde-open +3 -0
- portacode/link_capture/bin/kfmclient +3 -0
- portacode/link_capture/bin/link_capture_exec.sh +11 -0
- portacode/link_capture/bin/link_capture_wrapper.py +75 -0
- portacode/link_capture/bin/links +3 -0
- portacode/link_capture/bin/links2 +3 -0
- portacode/link_capture/bin/lynx +3 -0
- portacode/link_capture/bin/mate-open +3 -0
- portacode/link_capture/bin/netsurf +3 -0
- portacode/link_capture/bin/sensible-browser +3 -0
- portacode/link_capture/bin/w3m +3 -0
- portacode/link_capture/bin/x-www-browser +3 -0
- portacode/link_capture/bin/xdg-open +3 -0
- portacode/logging_categories.py +140 -0
- portacode/pairing.py +103 -0
- portacode/service.py +6 -0
- portacode/static/js/test-ntp-clock.html +63 -0
- portacode/static/js/utils/ntp-clock.js +232 -0
- portacode/utils/NTP_ARCHITECTURE.md +136 -0
- portacode/utils/__init__.py +1 -0
- portacode/utils/diff_apply.py +456 -0
- portacode/utils/diff_renderer.py +371 -0
- portacode/utils/ntp_clock.py +65 -0
- portacode-1.4.11.dev0.dist-info/METADATA +298 -0
- portacode-1.4.11.dev0.dist-info/RECORD +97 -0
- {portacode-0.3.4.dev0.dist-info ā portacode-1.4.11.dev0.dist-info}/WHEEL +1 -1
- portacode-1.4.11.dev0.dist-info/top_level.txt +3 -0
- test_modules/README.md +296 -0
- test_modules/__init__.py +1 -0
- test_modules/test_device_online.py +44 -0
- test_modules/test_file_operations.py +743 -0
- test_modules/test_git_status_ui.py +370 -0
- test_modules/test_login_flow.py +50 -0
- test_modules/test_navigate_testing_folder.py +361 -0
- test_modules/test_play_store_screenshots.py +294 -0
- test_modules/test_terminal_buffer_performance.py +261 -0
- test_modules/test_terminal_interaction.py +80 -0
- test_modules/test_terminal_loading_race_condition.py +95 -0
- test_modules/test_terminal_start.py +56 -0
- testing_framework/.env.example +21 -0
- testing_framework/README.md +334 -0
- testing_framework/__init__.py +17 -0
- testing_framework/cli.py +326 -0
- testing_framework/core/__init__.py +1 -0
- testing_framework/core/base_test.py +336 -0
- testing_framework/core/cli_manager.py +177 -0
- testing_framework/core/hierarchical_runner.py +577 -0
- testing_framework/core/playwright_manager.py +520 -0
- testing_framework/core/runner.py +447 -0
- testing_framework/core/shared_cli_manager.py +234 -0
- testing_framework/core/test_discovery.py +112 -0
- testing_framework/requirements.txt +12 -0
- portacode-0.3.4.dev0.dist-info/METADATA +0 -236
- portacode-0.3.4.dev0.dist-info/RECORD +0 -27
- portacode-0.3.4.dev0.dist-info/top_level.txt +0 -1
- {portacode-0.3.4.dev0.dist-info ā portacode-1.4.11.dev0.dist-info}/entry_points.txt +0 -0
- {portacode-0.3.4.dev0.dist-info ā portacode-1.4.11.dev0.dist-info/licenses}/LICENSE +0 -0
|
@@ -0,0 +1,334 @@
|
|
|
1
|
+
# Modular Testing Framework
|
|
2
|
+
|
|
3
|
+
A comprehensive, modular testing framework designed specifically for Portacode projects that combines CLI connection management with Playwright-based web automation testing.
|
|
4
|
+
|
|
5
|
+
## š Features
|
|
6
|
+
|
|
7
|
+
- **Modular Architecture**: Organize tests by categories and tags
|
|
8
|
+
- **CLI Integration**: Automatically connects to Portacode CLI in background threads
|
|
9
|
+
- **Playwright Automation**: Full web browser automation with comprehensive recording
|
|
10
|
+
- **Selective Execution**: Run all tests, specific categories, tags, or individual tests
|
|
11
|
+
- **Comprehensive Recording**: Screenshots, videos, network logs, console output, and traces
|
|
12
|
+
- **Rich Reporting**: HTML and JSON reports with detailed test results
|
|
13
|
+
|
|
14
|
+
## š Quick Start
|
|
15
|
+
|
|
16
|
+
### 1. Installation
|
|
17
|
+
|
|
18
|
+
```bash
|
|
19
|
+
# Install testing framework dependencies
|
|
20
|
+
pip install -r requirements-testing.txt
|
|
21
|
+
|
|
22
|
+
# Install Playwright browsers
|
|
23
|
+
python -m playwright install
|
|
24
|
+
```
|
|
25
|
+
|
|
26
|
+
### 2. Environment Setup
|
|
27
|
+
|
|
28
|
+
Copy the example environment file and configure it:
|
|
29
|
+
|
|
30
|
+
```bash
|
|
31
|
+
cp .env.example .env
|
|
32
|
+
```
|
|
33
|
+
|
|
34
|
+
Edit `.env` with your configuration:
|
|
35
|
+
|
|
36
|
+
```env
|
|
37
|
+
TEST_BASE_URL=http://192.168.1.188:8001/
|
|
38
|
+
TEST_USERNAME=your_username
|
|
39
|
+
TEST_PASSWORD=your_password
|
|
40
|
+
```
|
|
41
|
+
|
|
42
|
+
### 3. Run Tests
|
|
43
|
+
|
|
44
|
+
```bash
|
|
45
|
+
# List all available tests
|
|
46
|
+
python -m testing_framework.cli list-tests
|
|
47
|
+
|
|
48
|
+
# Run all tests
|
|
49
|
+
python -m testing_framework.cli run-all
|
|
50
|
+
|
|
51
|
+
# Run tests by category
|
|
52
|
+
python -m testing_framework.cli run-category smoke
|
|
53
|
+
|
|
54
|
+
# Run tests by tags
|
|
55
|
+
python -m testing_framework.cli run-tags login authentication
|
|
56
|
+
|
|
57
|
+
# Run specific tests
|
|
58
|
+
python -m testing_framework.cli run-tests login_flow_test device_connection_test
|
|
59
|
+
|
|
60
|
+
# Run tests matching a pattern
|
|
61
|
+
python -m testing_framework.cli run-pattern "login.*"
|
|
62
|
+
```
|
|
63
|
+
|
|
64
|
+
## š Project Structure
|
|
65
|
+
|
|
66
|
+
```
|
|
67
|
+
testing_framework/
|
|
68
|
+
āāā __init__.py # Framework exports
|
|
69
|
+
āāā cli.py # Command-line interface
|
|
70
|
+
āāā core/
|
|
71
|
+
āāā __init__.py
|
|
72
|
+
āāā base_test.py # Base test class and categories
|
|
73
|
+
āāā cli_manager.py # CLI connection management
|
|
74
|
+
āāā playwright_manager.py # Playwright session management
|
|
75
|
+
āāā test_discovery.py # Test discovery system
|
|
76
|
+
āāā runner.py # Test runner with reporting
|
|
77
|
+
|
|
78
|
+
test_modules/
|
|
79
|
+
āāā __init__.py
|
|
80
|
+
āāā test_login_flow.py # Login flow test example
|
|
81
|
+
āāā test_device_connection.py # Device connection test
|
|
82
|
+
āāā test_ui_navigation.py # UI navigation test
|
|
83
|
+
āāā test_performance_check.py # Performance test example
|
|
84
|
+
```
|
|
85
|
+
|
|
86
|
+
## š§ Framework Architecture
|
|
87
|
+
|
|
88
|
+
### Test Execution Flow
|
|
89
|
+
|
|
90
|
+
1. **CLI Connection**: Each test starts by connecting to Portacode CLI in a background thread
|
|
91
|
+
2. **Playwright Session**: Browser session starts and automatically logs in using provided credentials
|
|
92
|
+
3. **Test Execution**: Your test logic runs with access to both CLI and browser automation
|
|
93
|
+
4. **Recording**: Everything is recorded - screenshots, videos, network traffic, console logs
|
|
94
|
+
5. **Cleanup**: Resources are properly cleaned up and recordings are saved
|
|
95
|
+
|
|
96
|
+
### Test Categories
|
|
97
|
+
|
|
98
|
+
- `SMOKE`: Basic functionality tests
|
|
99
|
+
- `INTEGRATION`: Cross-system integration tests
|
|
100
|
+
- `UI`: User interface tests
|
|
101
|
+
- `API`: API endpoint tests
|
|
102
|
+
- `PERFORMANCE`: Performance and load tests
|
|
103
|
+
- `SECURITY`: Security-focused tests
|
|
104
|
+
- `CUSTOM`: Custom test categories
|
|
105
|
+
|
|
106
|
+
## āļø Writing Custom Tests
|
|
107
|
+
|
|
108
|
+
### Basic Test Structure
|
|
109
|
+
|
|
110
|
+
```python
|
|
111
|
+
from testing_framework.core.base_test import BaseTest, TestResult, TestCategory
|
|
112
|
+
|
|
113
|
+
class MyCustomTest(BaseTest):
|
|
114
|
+
def __init__(self):
|
|
115
|
+
super().__init__(
|
|
116
|
+
name="my_custom_test",
|
|
117
|
+
category=TestCategory.SMOKE,
|
|
118
|
+
description="Description of what this test does",
|
|
119
|
+
tags=["tag1", "tag2"]
|
|
120
|
+
)
|
|
121
|
+
|
|
122
|
+
async def run(self) -> TestResult:
|
|
123
|
+
# Your test logic here
|
|
124
|
+
page = self.playwright_manager.page
|
|
125
|
+
|
|
126
|
+
# Take screenshots
|
|
127
|
+
await self.playwright_manager.take_screenshot("test_step_1")
|
|
128
|
+
|
|
129
|
+
# Interact with the page
|
|
130
|
+
await page.click("button")
|
|
131
|
+
|
|
132
|
+
# Check CLI connection
|
|
133
|
+
if self.cli_manager.is_connection_active():
|
|
134
|
+
# CLI is connected and working
|
|
135
|
+
pass
|
|
136
|
+
|
|
137
|
+
# Return test result
|
|
138
|
+
return TestResult(self.name, True, "Test passed!")
|
|
139
|
+
|
|
140
|
+
async def setup(self):
|
|
141
|
+
# Optional setup code
|
|
142
|
+
pass
|
|
143
|
+
|
|
144
|
+
async def teardown(self):
|
|
145
|
+
# Optional cleanup code
|
|
146
|
+
pass
|
|
147
|
+
```
|
|
148
|
+
|
|
149
|
+
### Available Managers
|
|
150
|
+
|
|
151
|
+
#### CLI Manager (`self.cli_manager`)
|
|
152
|
+
|
|
153
|
+
- `is_connection_active()`: Check if CLI connection is active
|
|
154
|
+
- `get_connection_info()`: Get connection details
|
|
155
|
+
- `get_log_content()`: Get CLI output logs
|
|
156
|
+
|
|
157
|
+
#### Playwright Manager (`self.playwright_manager`)
|
|
158
|
+
|
|
159
|
+
- `page`: Direct access to Playwright page object
|
|
160
|
+
- `take_screenshot(name)`: Take named screenshot
|
|
161
|
+
- `log_action(type, details)`: Log custom actions
|
|
162
|
+
- `get_recordings_info()`: Get recording information
|
|
163
|
+
|
|
164
|
+
## š Test Output
|
|
165
|
+
|
|
166
|
+
### Directory Structure
|
|
167
|
+
|
|
168
|
+
```
|
|
169
|
+
test_results/
|
|
170
|
+
āāā run_20241201_143022/
|
|
171
|
+
āāā summary.json # Test run summary
|
|
172
|
+
āāā report.html # HTML report
|
|
173
|
+
āāā test_run.log # Framework logs
|
|
174
|
+
āāā cli_logs/ # CLI output logs
|
|
175
|
+
ā āāā test_name_timestamp_cli.log
|
|
176
|
+
āāā recordings/ # Playwright recordings
|
|
177
|
+
āāā test_name_timestamp/
|
|
178
|
+
āāā recording.webm # Video recording
|
|
179
|
+
āāā trace.zip # Playwright trace
|
|
180
|
+
āāā network.har # Network logs
|
|
181
|
+
āāā console.log # Browser console
|
|
182
|
+
āāā actions.json # Logged actions
|
|
183
|
+
āāā screenshots/ # All screenshots
|
|
184
|
+
āāā summary.json # Recording summary
|
|
185
|
+
```
|
|
186
|
+
|
|
187
|
+
### HTML Report
|
|
188
|
+
|
|
189
|
+
The framework generates comprehensive HTML reports with:
|
|
190
|
+
- Test run statistics and timeline
|
|
191
|
+
- Pass/fail status for each test
|
|
192
|
+
- Screenshots and recordings links
|
|
193
|
+
- Error messages and logs
|
|
194
|
+
- Performance metrics
|
|
195
|
+
|
|
196
|
+
## šļø Configuration Options
|
|
197
|
+
|
|
198
|
+
### Environment Variables
|
|
199
|
+
|
|
200
|
+
| Variable | Description | Default |
|
|
201
|
+
|----------|-------------|---------|
|
|
202
|
+
| `TEST_BASE_URL` | Application URL | `http://192.168.1.188:8001/` |
|
|
203
|
+
| `TEST_USERNAME` | Login username | Required |
|
|
204
|
+
| `TEST_PASSWORD` | Login password | Required |
|
|
205
|
+
| `TEST_BROWSER` | Browser type | `chromium` |
|
|
206
|
+
| `TEST_HEADLESS` | Headless mode | `false` |
|
|
207
|
+
| `TEST_RESULTS_DIR` | Results directory | `test_results` |
|
|
208
|
+
| `TEST_RECORDINGS_DIR` | Recordings directory | `test_recordings` |
|
|
209
|
+
| `TEST_LOGS_DIR` | Logs directory | `test_results` |
|
|
210
|
+
|
|
211
|
+
### Command Line Options
|
|
212
|
+
|
|
213
|
+
```bash
|
|
214
|
+
# Enable debug logging
|
|
215
|
+
python -m testing_framework.cli --debug run-all
|
|
216
|
+
|
|
217
|
+
# All commands support debug mode
|
|
218
|
+
python -m testing_framework.cli --debug list-tests
|
|
219
|
+
```
|
|
220
|
+
|
|
221
|
+
## š Debugging and Troubleshooting
|
|
222
|
+
|
|
223
|
+
### Common Issues
|
|
224
|
+
|
|
225
|
+
1. **CLI Connection Fails**
|
|
226
|
+
- Check that `portacode.cli` module is available
|
|
227
|
+
- Verify CLI credentials and connectivity
|
|
228
|
+
- Check CLI logs in `cli_logs/` directory
|
|
229
|
+
|
|
230
|
+
2. **Playwright Login Fails**
|
|
231
|
+
- Verify `TEST_USERNAME` and `TEST_PASSWORD` in `.env`
|
|
232
|
+
- Check login form selectors in `playwright_manager.py`
|
|
233
|
+
- Review screenshots in recordings directory
|
|
234
|
+
|
|
235
|
+
3. **Tests Not Discovered**
|
|
236
|
+
- Ensure test files are in `test_modules/` or `tests/` directories
|
|
237
|
+
- Test files must start with `test_` or end with `_test.py`
|
|
238
|
+
- Test classes must inherit from `BaseTest`
|
|
239
|
+
|
|
240
|
+
### Debug Mode
|
|
241
|
+
|
|
242
|
+
Run with debug logging to see detailed execution information:
|
|
243
|
+
|
|
244
|
+
```bash
|
|
245
|
+
python -m testing_framework.cli --debug run-all
|
|
246
|
+
```
|
|
247
|
+
|
|
248
|
+
### Manual Debugging
|
|
249
|
+
|
|
250
|
+
Access the recordings directory to inspect:
|
|
251
|
+
- Video recordings of test execution
|
|
252
|
+
- Screenshots at each step
|
|
253
|
+
- Network traffic logs
|
|
254
|
+
- Browser console output
|
|
255
|
+
- Detailed action logs
|
|
256
|
+
|
|
257
|
+
## š¤ Contributing
|
|
258
|
+
|
|
259
|
+
### Adding New Test Categories
|
|
260
|
+
|
|
261
|
+
1. Add new category to `TestCategory` enum in `base_test.py`
|
|
262
|
+
2. Update CLI command choices in `cli.py`
|
|
263
|
+
|
|
264
|
+
### Extending Managers
|
|
265
|
+
|
|
266
|
+
- **CLI Manager**: Add new CLI interaction methods
|
|
267
|
+
- **Playwright Manager**: Add new browser automation helpers
|
|
268
|
+
- **Test Runner**: Add new execution modes or reporting formats
|
|
269
|
+
|
|
270
|
+
### Example: Custom Test Category
|
|
271
|
+
|
|
272
|
+
```python
|
|
273
|
+
# In base_test.py
|
|
274
|
+
class TestCategory(Enum):
|
|
275
|
+
# ... existing categories ...
|
|
276
|
+
ACCESSIBILITY = "accessibility"
|
|
277
|
+
|
|
278
|
+
# In your test
|
|
279
|
+
class AccessibilityTest(BaseTest):
|
|
280
|
+
def __init__(self):
|
|
281
|
+
super().__init__(
|
|
282
|
+
name="accessibility_test",
|
|
283
|
+
category=TestCategory.ACCESSIBILITY,
|
|
284
|
+
description="Check accessibility compliance",
|
|
285
|
+
tags=["a11y", "compliance"]
|
|
286
|
+
)
|
|
287
|
+
```
|
|
288
|
+
|
|
289
|
+
## š Advanced Usage
|
|
290
|
+
|
|
291
|
+
### Programmatic Test Execution
|
|
292
|
+
|
|
293
|
+
```python
|
|
294
|
+
import asyncio
|
|
295
|
+
from testing_framework.core.runner import TestRunner
|
|
296
|
+
from testing_framework.core.base_test import TestCategory
|
|
297
|
+
|
|
298
|
+
async def run_custom_suite():
|
|
299
|
+
runner = TestRunner()
|
|
300
|
+
|
|
301
|
+
# Run specific category
|
|
302
|
+
results = await runner.run_tests_by_category(TestCategory.SMOKE)
|
|
303
|
+
|
|
304
|
+
# Process results
|
|
305
|
+
if results['statistics']['failed'] > 0:
|
|
306
|
+
print("Some tests failed!")
|
|
307
|
+
|
|
308
|
+
return results
|
|
309
|
+
|
|
310
|
+
# Run it
|
|
311
|
+
results = asyncio.run(run_custom_suite())
|
|
312
|
+
```
|
|
313
|
+
|
|
314
|
+
### Custom Reporting
|
|
315
|
+
|
|
316
|
+
```python
|
|
317
|
+
from testing_framework.core.runner import TestRunner
|
|
318
|
+
|
|
319
|
+
class CustomRunner(TestRunner):
|
|
320
|
+
async def _generate_custom_report(self, results):
|
|
321
|
+
# Your custom reporting logic
|
|
322
|
+
pass
|
|
323
|
+
```
|
|
324
|
+
|
|
325
|
+
## š Security Considerations
|
|
326
|
+
|
|
327
|
+
- Store credentials in `.env` file, never in code
|
|
328
|
+
- Add `.env` to `.gitignore`
|
|
329
|
+
- Use environment-specific credential management
|
|
330
|
+
- Review recordings before sharing (may contain sensitive data)
|
|
331
|
+
|
|
332
|
+
## š License
|
|
333
|
+
|
|
334
|
+
This testing framework is part of the Portacode project and follows the same license terms.
|
|
@@ -0,0 +1,17 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Modular Testing Framework for Portacode
|
|
3
|
+
|
|
4
|
+
A comprehensive testing framework that supports:
|
|
5
|
+
- CLI connection management with background threading
|
|
6
|
+
- Playwright-based web automation testing
|
|
7
|
+
- Modular test organization with categories
|
|
8
|
+
- Selective test execution
|
|
9
|
+
- Comprehensive recording and logging
|
|
10
|
+
"""
|
|
11
|
+
|
|
12
|
+
from .core.base_test import BaseTest, TestCategory
|
|
13
|
+
from .core.runner import TestRunner
|
|
14
|
+
from .core.cli_manager import CLIManager
|
|
15
|
+
from .core.playwright_manager import PlaywrightManager
|
|
16
|
+
|
|
17
|
+
__all__ = ['BaseTest', 'TestCategory', 'TestRunner', 'CLIManager', 'PlaywrightManager']
|
testing_framework/cli.py
ADDED
|
@@ -0,0 +1,326 @@
|
|
|
1
|
+
"""Command-line interface for the testing framework."""
|
|
2
|
+
|
|
3
|
+
import asyncio
|
|
4
|
+
import click
|
|
5
|
+
import logging
|
|
6
|
+
import sys
|
|
7
|
+
import os
|
|
8
|
+
from pathlib import Path
|
|
9
|
+
|
|
10
|
+
# Load environment variables from .env file if available
|
|
11
|
+
try:
|
|
12
|
+
from dotenv import load_dotenv
|
|
13
|
+
load_dotenv()
|
|
14
|
+
except ImportError:
|
|
15
|
+
pass
|
|
16
|
+
|
|
17
|
+
from .core.hierarchical_runner import HierarchicalTestRunner
|
|
18
|
+
from .core.base_test import TestCategory
|
|
19
|
+
|
|
20
|
+
# Use hierarchical runner as the default
|
|
21
|
+
TestRunner = HierarchicalTestRunner
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
def setup_logging(debug: bool = False):
|
|
25
|
+
"""Setup logging configuration - logs only to files, not console."""
|
|
26
|
+
level = logging.DEBUG if debug else logging.INFO
|
|
27
|
+
|
|
28
|
+
# Only log to files, not to console
|
|
29
|
+
# Create a null handler to prevent any console output from framework logs
|
|
30
|
+
logging.basicConfig(
|
|
31
|
+
level=level,
|
|
32
|
+
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
|
|
33
|
+
handlers=[logging.NullHandler()]
|
|
34
|
+
)
|
|
35
|
+
|
|
36
|
+
# If debug is enabled, we can optionally add a file handler here
|
|
37
|
+
if debug:
|
|
38
|
+
# Create debug log file in current directory
|
|
39
|
+
debug_handler = logging.FileHandler('framework_debug.log')
|
|
40
|
+
debug_handler.setFormatter(logging.Formatter(
|
|
41
|
+
'%(asctime)s - %(name)s - %(levelname)s - %(message)s'
|
|
42
|
+
))
|
|
43
|
+
logging.getLogger().addHandler(debug_handler)
|
|
44
|
+
|
|
45
|
+
|
|
46
|
+
@click.group()
|
|
47
|
+
@click.option('--debug', is_flag=True, help='Enable debug logging')
|
|
48
|
+
@click.pass_context
|
|
49
|
+
def cli(ctx, debug):
|
|
50
|
+
"""Modular Testing Framework for Portacode"""
|
|
51
|
+
ctx.ensure_object(dict)
|
|
52
|
+
ctx.obj['debug'] = debug
|
|
53
|
+
setup_logging(debug)
|
|
54
|
+
|
|
55
|
+
|
|
56
|
+
@cli.command()
|
|
57
|
+
@click.pass_context
|
|
58
|
+
async def list_tests(ctx):
|
|
59
|
+
"""List all available tests."""
|
|
60
|
+
runner = TestRunner()
|
|
61
|
+
info = runner.list_available_tests()
|
|
62
|
+
|
|
63
|
+
click.echo(f"š Found {click.style(str(info['total_tests']), fg='green')} tests")
|
|
64
|
+
click.echo(f"Categories: {click.style(', '.join([cat.value for cat in info['categories']]), fg='blue')}")
|
|
65
|
+
if info['tags']:
|
|
66
|
+
click.echo(f"Tags: {click.style(', '.join(info['tags']), fg='cyan')}")
|
|
67
|
+
|
|
68
|
+
click.echo("\nš Available Tests:")
|
|
69
|
+
for name, test_info in info['tests'].items():
|
|
70
|
+
click.echo(f" ⢠{click.style(name, fg='yellow')}")
|
|
71
|
+
click.echo(f" Category: {click.style(test_info['category'], fg='blue')}")
|
|
72
|
+
click.echo(f" Description: {test_info['description']}")
|
|
73
|
+
if test_info['tags']:
|
|
74
|
+
click.echo(f" Tags: {click.style(', '.join(test_info['tags']), fg='cyan')}")
|
|
75
|
+
click.echo()
|
|
76
|
+
|
|
77
|
+
|
|
78
|
+
@cli.command()
|
|
79
|
+
@click.option('--clear', is_flag=True, help='Clear test_results directory before running tests')
|
|
80
|
+
@click.pass_context
|
|
81
|
+
async def run_all(ctx, clear):
|
|
82
|
+
"""Run all available tests with dependency resolution."""
|
|
83
|
+
if clear:
|
|
84
|
+
click.echo("šļø Clearing test_results directory...")
|
|
85
|
+
click.echo("š Running all tests with dependency resolution...")
|
|
86
|
+
click.echo("š Starting shared CLI connection...", nl=False)
|
|
87
|
+
runner = TestRunner(clear_results=clear)
|
|
88
|
+
results = await runner.run_all_tests(_create_progress_callback())
|
|
89
|
+
_print_results(results)
|
|
90
|
+
|
|
91
|
+
|
|
92
|
+
@cli.command()
|
|
93
|
+
@click.argument('category', type=click.Choice([cat.value for cat in TestCategory]))
|
|
94
|
+
@click.option('--clear', is_flag=True, help='Clear test_results directory before running tests')
|
|
95
|
+
@click.pass_context
|
|
96
|
+
async def run_category(ctx, category, clear):
|
|
97
|
+
"""Run tests in a specific category with dependency resolution."""
|
|
98
|
+
cat_enum = TestCategory(category)
|
|
99
|
+
if clear:
|
|
100
|
+
click.echo("šļø Clearing test_results directory...")
|
|
101
|
+
click.echo(f"šÆ Running {category} tests with dependency resolution...")
|
|
102
|
+
click.echo("š Starting shared CLI connection...", nl=False)
|
|
103
|
+
runner = TestRunner(clear_results=clear)
|
|
104
|
+
results = await runner.run_tests_by_category(cat_enum, _create_progress_callback())
|
|
105
|
+
_print_results(results)
|
|
106
|
+
|
|
107
|
+
|
|
108
|
+
@cli.command()
|
|
109
|
+
@click.argument('tags', nargs=-1, required=True)
|
|
110
|
+
@click.option('--clear', is_flag=True, help='Clear test_results directory before running tests')
|
|
111
|
+
@click.pass_context
|
|
112
|
+
async def run_tags(ctx, tags, clear):
|
|
113
|
+
"""Run tests with specific tags."""
|
|
114
|
+
if clear:
|
|
115
|
+
click.echo("šļø Clearing test_results directory...")
|
|
116
|
+
click.echo(f"š·ļø Running tests with tags: {', '.join(tags)}...")
|
|
117
|
+
click.echo("š Starting shared CLI connection...", nl=False)
|
|
118
|
+
runner = TestRunner(clear_results=clear)
|
|
119
|
+
results = await runner.run_tests_by_tags(set(tags), _create_progress_callback())
|
|
120
|
+
_print_results(results)
|
|
121
|
+
|
|
122
|
+
|
|
123
|
+
@cli.command()
|
|
124
|
+
@click.argument('names', nargs=-1, required=True)
|
|
125
|
+
@click.option('--clear', is_flag=True, help='Clear test_results directory before running tests')
|
|
126
|
+
@click.pass_context
|
|
127
|
+
async def run_tests(ctx, names, clear):
|
|
128
|
+
"""Run specific tests by name."""
|
|
129
|
+
if clear:
|
|
130
|
+
click.echo("šļø Clearing test_results directory...")
|
|
131
|
+
click.echo(f"š Running tests: {', '.join(names)}...")
|
|
132
|
+
click.echo("š Starting shared CLI connection...", nl=False)
|
|
133
|
+
runner = TestRunner(clear_results=clear)
|
|
134
|
+
results = await runner.run_tests_by_names(list(names), _create_progress_callback())
|
|
135
|
+
_print_results(results)
|
|
136
|
+
|
|
137
|
+
|
|
138
|
+
@cli.command()
|
|
139
|
+
@click.argument('pattern')
|
|
140
|
+
@click.option('--clear', is_flag=True, help='Clear test_results directory before running tests')
|
|
141
|
+
@click.pass_context
|
|
142
|
+
async def run_pattern(ctx, pattern, clear):
|
|
143
|
+
"""Run tests matching a name pattern."""
|
|
144
|
+
if clear:
|
|
145
|
+
click.echo("šļø Clearing test_results directory...")
|
|
146
|
+
click.echo(f"š Running tests matching pattern: {pattern}...")
|
|
147
|
+
click.echo("š Starting shared CLI connection...", nl=False)
|
|
148
|
+
runner = TestRunner(clear_results=clear)
|
|
149
|
+
results = await runner.run_tests_by_pattern(pattern, _create_progress_callback())
|
|
150
|
+
_print_results(results)
|
|
151
|
+
|
|
152
|
+
|
|
153
|
+
@cli.command()
|
|
154
|
+
@click.option('--clear', is_flag=True, help='Clear test_results directory before running tests')
|
|
155
|
+
@click.pass_context
|
|
156
|
+
async def run_hierarchical(ctx, clear):
|
|
157
|
+
"""Run all tests with hierarchical dependency resolution."""
|
|
158
|
+
if clear:
|
|
159
|
+
click.echo("šļø Clearing test_results directory...")
|
|
160
|
+
click.echo("š Running tests with dependency resolution...")
|
|
161
|
+
click.echo("š Analyzing test dependencies...")
|
|
162
|
+
|
|
163
|
+
runner = HierarchicalTestRunner(clear_results=clear)
|
|
164
|
+
results = await runner.run_all_tests(_create_progress_callback())
|
|
165
|
+
_print_results(results)
|
|
166
|
+
|
|
167
|
+
# Show dependency information
|
|
168
|
+
if results.get('results'):
|
|
169
|
+
skipped = [r for r in results['results'] if 'Skipped:' in r.get('message', '')]
|
|
170
|
+
if skipped:
|
|
171
|
+
click.echo(f"\nāļø Skipped Tests ({len(skipped)}):")
|
|
172
|
+
for result in skipped:
|
|
173
|
+
click.echo(f" ⢠{result['test_name']}: {result['message']}")
|
|
174
|
+
|
|
175
|
+
|
|
176
|
+
@cli.command()
|
|
177
|
+
@click.argument('names', nargs=-1, required=True)
|
|
178
|
+
@click.pass_context
|
|
179
|
+
async def run_hierarchical_tests(ctx, names):
|
|
180
|
+
"""Run specific tests with dependency resolution."""
|
|
181
|
+
click.echo(f"š Running tests with dependencies: {', '.join(names)}...")
|
|
182
|
+
click.echo("š Analyzing test dependencies...")
|
|
183
|
+
|
|
184
|
+
runner = HierarchicalTestRunner()
|
|
185
|
+
results = await runner.run_tests_by_names(list(names), _create_progress_callback())
|
|
186
|
+
_print_results(results)
|
|
187
|
+
|
|
188
|
+
|
|
189
|
+
def _create_progress_callback():
|
|
190
|
+
"""Create a progress callback for clean console output."""
|
|
191
|
+
cli_connected_shown = False
|
|
192
|
+
|
|
193
|
+
def progress_callback(event, test, current, total, result=None):
|
|
194
|
+
nonlocal cli_connected_shown
|
|
195
|
+
|
|
196
|
+
if event == 'start':
|
|
197
|
+
# Show CLI connected message only once
|
|
198
|
+
if not cli_connected_shown:
|
|
199
|
+
click.echo("\rš Shared CLI connection established ā
")
|
|
200
|
+
cli_connected_shown = True
|
|
201
|
+
# Clean one-line output for test start
|
|
202
|
+
click.echo(f"[{current}/{total}] š {test.name}", nl=False)
|
|
203
|
+
elif event == 'complete' and result:
|
|
204
|
+
# Clear the line and show result
|
|
205
|
+
click.echo(f"\r[{current}/{total}] {'ā
' if result.success else 'ā'} {test.name} ({result.duration:.2f}s)", nl=True)
|
|
206
|
+
if not result.success and result.message:
|
|
207
|
+
click.echo(f" āā {click.style(result.message, fg='red')}")
|
|
208
|
+
|
|
209
|
+
return progress_callback
|
|
210
|
+
|
|
211
|
+
|
|
212
|
+
def _print_results(results):
|
|
213
|
+
"""Print test results summary with stylish stats formatting."""
|
|
214
|
+
if not results.get('results'):
|
|
215
|
+
click.echo("ā No tests were run")
|
|
216
|
+
return
|
|
217
|
+
|
|
218
|
+
stats = results['statistics']
|
|
219
|
+
duration = results['run_info']['duration']
|
|
220
|
+
|
|
221
|
+
# Stylish header
|
|
222
|
+
click.echo("\n" + "="*60)
|
|
223
|
+
click.echo(f"{'š TEST RESULTS SUMMARY':^60}")
|
|
224
|
+
click.echo("="*60)
|
|
225
|
+
|
|
226
|
+
# Main stats with better formatting
|
|
227
|
+
click.echo(f" š Total Tests: {click.style(str(stats['total_tests']), fg='cyan', bold=True)}")
|
|
228
|
+
click.echo(f" ā±ļø Total Duration: {click.style(f'{duration:.2f}s', fg='blue', bold=True)}")
|
|
229
|
+
click.echo(f" ā
Passed: {click.style(str(stats['passed']), fg='green', bold=True)}")
|
|
230
|
+
click.echo(f" ā Failed: {click.style(str(stats['failed']), fg='red', bold=True)}")
|
|
231
|
+
|
|
232
|
+
# Success rate with color coding
|
|
233
|
+
success_rate_text = f"{stats['success_rate']:.2f}%"
|
|
234
|
+
success_rate_color = 'green' if stats['success_rate'] > 80 else 'yellow' if stats['success_rate'] > 50 else 'red'
|
|
235
|
+
click.echo(f" š Success Rate: {click.style(success_rate_text, fg=success_rate_color, bold=True)}")
|
|
236
|
+
|
|
237
|
+
# Individual test stats with timing
|
|
238
|
+
click.echo(f"\n{'ā” PERFORMANCE BREAKDOWN':^60}")
|
|
239
|
+
click.echo("-"*60)
|
|
240
|
+
|
|
241
|
+
# Show timing stats for each test
|
|
242
|
+
for result in results['results']:
|
|
243
|
+
status_icon = "ā
" if result['success'] else "ā"
|
|
244
|
+
test_name = result['test_name'].replace('_test', '').replace('_', ' ').title()
|
|
245
|
+
duration_text = f"{result['duration']:.2f}s"
|
|
246
|
+
duration_color = 'green' if result['duration'] < 5 else 'yellow' if result['duration'] < 10 else 'red'
|
|
247
|
+
|
|
248
|
+
click.echo(f" {status_icon} {test_name:<30} {click.style(duration_text, fg=duration_color)}")
|
|
249
|
+
|
|
250
|
+
# Show additional stats if available
|
|
251
|
+
if result.get('artifacts') and isinstance(result['artifacts'], dict):
|
|
252
|
+
# Check both direct artifacts and nested timings/stats
|
|
253
|
+
all_stats = {}
|
|
254
|
+
|
|
255
|
+
# Add direct artifacts
|
|
256
|
+
for key, value in result['artifacts'].items():
|
|
257
|
+
if isinstance(value, (int, float)):
|
|
258
|
+
all_stats[key] = value
|
|
259
|
+
|
|
260
|
+
# Add nested timings
|
|
261
|
+
if 'timings' in result['artifacts'] and isinstance(result['artifacts']['timings'], dict):
|
|
262
|
+
for key, value in result['artifacts']['timings'].items():
|
|
263
|
+
if isinstance(value, (int, float)):
|
|
264
|
+
all_stats[key] = value
|
|
265
|
+
|
|
266
|
+
# Add nested stats
|
|
267
|
+
if 'stats' in result['artifacts'] and isinstance(result['artifacts']['stats'], dict):
|
|
268
|
+
for key, value in result['artifacts']['stats'].items():
|
|
269
|
+
if isinstance(value, (int, float)):
|
|
270
|
+
all_stats[key] = value
|
|
271
|
+
|
|
272
|
+
# Display all stats
|
|
273
|
+
for key, value in all_stats.items():
|
|
274
|
+
# Format based on the value and key name
|
|
275
|
+
if 'ms' in key.lower() or value > 100:
|
|
276
|
+
formatted_value = f"{value:.2f}ms"
|
|
277
|
+
else:
|
|
278
|
+
formatted_value = f"{value:.2f}s"
|
|
279
|
+
|
|
280
|
+
# Clean up key name for display
|
|
281
|
+
display_key = key.replace('_', ' ').replace('time ms', 'time').title()
|
|
282
|
+
click.echo(f" āā {display_key}: {click.style(formatted_value, fg='blue')}")
|
|
283
|
+
|
|
284
|
+
click.echo(f"\nš Results saved to: {click.style(results['run_info']['run_directory'], fg='blue', underline=True)}")
|
|
285
|
+
|
|
286
|
+
# Show failed tests summary if any
|
|
287
|
+
failed_tests = [r for r in results['results'] if not r['success']]
|
|
288
|
+
if failed_tests:
|
|
289
|
+
click.echo(f"\n{'ā FAILED TESTS DETAILS':^60}")
|
|
290
|
+
click.echo("-"*60)
|
|
291
|
+
for result in failed_tests:
|
|
292
|
+
click.echo(f" ⢠{click.style(result['test_name'], fg='red', bold=True)}")
|
|
293
|
+
if result.get('message'):
|
|
294
|
+
# Handle multi-line error messages
|
|
295
|
+
message_lines = result['message'].split('\n')
|
|
296
|
+
for i, line in enumerate(message_lines):
|
|
297
|
+
prefix = " āā " if i == 0 else " "
|
|
298
|
+
click.echo(f"{prefix}{click.style(line, fg='red')}")
|
|
299
|
+
|
|
300
|
+
# Add note about trace viewer
|
|
301
|
+
click.echo(f" {click.style('š Trace viewer should open automatically for investigation', fg='yellow')}")
|
|
302
|
+
|
|
303
|
+
# Summary footer
|
|
304
|
+
click.echo("="*60)
|
|
305
|
+
|
|
306
|
+
|
|
307
|
+
# Async command wrapper
|
|
308
|
+
def async_command(f):
|
|
309
|
+
def wrapper(*args, **kwargs):
|
|
310
|
+
return asyncio.run(f(*args, **kwargs))
|
|
311
|
+
return wrapper
|
|
312
|
+
|
|
313
|
+
|
|
314
|
+
# Convert async commands
|
|
315
|
+
list_tests.callback = async_command(list_tests.callback)
|
|
316
|
+
run_all.callback = async_command(run_all.callback)
|
|
317
|
+
run_category.callback = async_command(run_category.callback)
|
|
318
|
+
run_tags.callback = async_command(run_tags.callback)
|
|
319
|
+
run_tests.callback = async_command(run_tests.callback)
|
|
320
|
+
run_pattern.callback = async_command(run_pattern.callback)
|
|
321
|
+
run_hierarchical.callback = async_command(run_hierarchical.callback)
|
|
322
|
+
run_hierarchical_tests.callback = async_command(run_hierarchical_tests.callback)
|
|
323
|
+
|
|
324
|
+
|
|
325
|
+
if __name__ == '__main__':
|
|
326
|
+
cli()
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
"""Core testing framework components."""
|