cursorflow 2.0.4__py3-none-any.whl → 2.1.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- cursorflow/__init__.py +24 -17
- cursorflow/cli.py +44 -13
- cursorflow/core/browser_controller.py +119 -13
- cursorflow/core/browser_engine.py +17 -4
- cursorflow/core/cursorflow.py +259 -8
- cursorflow/install_cursorflow_rules.py +4 -4
- cursorflow-2.1.1.dist-info/METADATA +350 -0
- {cursorflow-2.0.4.dist-info → cursorflow-2.1.1.dist-info}/RECORD +12 -12
- cursorflow-2.0.4.dist-info/METADATA +0 -293
- {cursorflow-2.0.4.dist-info → cursorflow-2.1.1.dist-info}/WHEEL +0 -0
- {cursorflow-2.0.4.dist-info → cursorflow-2.1.1.dist-info}/entry_points.txt +0 -0
- {cursorflow-2.0.4.dist-info → cursorflow-2.1.1.dist-info}/licenses/LICENSE +0 -0
- {cursorflow-2.0.4.dist-info → cursorflow-2.1.1.dist-info}/top_level.txt +0 -0
cursorflow/__init__.py
CHANGED
@@ -18,38 +18,45 @@ from .core.log_monitor import LogMonitor
|
|
18
18
|
from .core.error_correlator import ErrorCorrelator
|
19
19
|
|
20
20
|
def _get_version():
|
21
|
-
"""Get version from
|
21
|
+
"""Get version from pyproject.toml or fallback to default"""
|
22
22
|
try:
|
23
|
-
|
24
|
-
|
25
|
-
|
26
|
-
|
27
|
-
|
28
|
-
|
29
|
-
|
30
|
-
|
31
|
-
|
32
|
-
|
23
|
+
# Try to read from pyproject.toml first (most reliable)
|
24
|
+
import tomllib
|
25
|
+
pyproject_path = Path(__file__).parent.parent / "pyproject.toml"
|
26
|
+
if pyproject_path.exists():
|
27
|
+
with open(pyproject_path, 'rb') as f:
|
28
|
+
data = tomllib.load(f)
|
29
|
+
return data["project"]["version"]
|
30
|
+
except Exception:
|
31
|
+
pass
|
32
|
+
|
33
|
+
try:
|
34
|
+
# Try toml library as fallback (for older Python)
|
35
|
+
import toml
|
36
|
+
pyproject_path = Path(__file__).parent.parent / "pyproject.toml"
|
37
|
+
if pyproject_path.exists():
|
38
|
+
with open(pyproject_path, 'r') as f:
|
39
|
+
data = toml.load(f)
|
40
|
+
return data["project"]["version"]
|
33
41
|
except Exception:
|
34
42
|
pass
|
35
43
|
|
36
44
|
try:
|
37
|
-
# Try
|
45
|
+
# Try git tag as final option
|
46
|
+
import subprocess
|
38
47
|
result = subprocess.run(
|
39
|
-
['git', 'describe', '--tags', '--
|
48
|
+
['git', 'describe', '--tags', '--exact-match'],
|
40
49
|
capture_output=True,
|
41
50
|
text=True,
|
42
51
|
cwd=Path(__file__).parent.parent
|
43
52
|
)
|
44
53
|
if result.returncode == 0:
|
45
|
-
|
46
|
-
# Add dev suffix if not on exact tag
|
47
|
-
return f"{tag}-dev"
|
54
|
+
return result.stdout.strip().lstrip('v')
|
48
55
|
except Exception:
|
49
56
|
pass
|
50
57
|
|
51
58
|
# Fallback version - should match pyproject.toml
|
52
|
-
return "2.
|
59
|
+
return "2.1.1"
|
53
60
|
|
54
61
|
__version__ = _get_version()
|
55
62
|
__author__ = "GeekWarrior Development"
|
cursorflow/cli.py
CHANGED
@@ -46,7 +46,9 @@ def main():
|
|
46
46
|
help='Run browser in headless mode')
|
47
47
|
@click.option('--timeout', type=int, default=30,
|
48
48
|
help='Timeout in seconds for actions')
|
49
|
-
|
49
|
+
@click.option('--responsive', is_flag=True,
|
50
|
+
help='Test across multiple viewports (mobile, tablet, desktop)')
|
51
|
+
def test(base_url, path, actions, output, logs, config, verbose, headless, timeout, responsive):
|
50
52
|
"""Test UI flows and interactions with real-time log monitoring"""
|
51
53
|
|
52
54
|
if verbose:
|
@@ -113,18 +115,47 @@ def test(base_url, path, actions, output, logs, config, verbose, headless, timeo
|
|
113
115
|
|
114
116
|
# Execute test actions
|
115
117
|
try:
|
116
|
-
|
117
|
-
|
118
|
-
|
119
|
-
|
120
|
-
|
121
|
-
|
122
|
-
|
123
|
-
|
124
|
-
|
125
|
-
|
126
|
-
|
127
|
-
console.print(f"
|
118
|
+
if responsive:
|
119
|
+
# Define standard responsive viewports
|
120
|
+
viewports = [
|
121
|
+
{"width": 375, "height": 667, "name": "mobile"},
|
122
|
+
{"width": 768, "height": 1024, "name": "tablet"},
|
123
|
+
{"width": 1440, "height": 900, "name": "desktop"}
|
124
|
+
]
|
125
|
+
|
126
|
+
console.print(f"📱 Executing responsive test across {len(viewports)} viewports...")
|
127
|
+
console.print(f" 📱 Mobile: 375x667")
|
128
|
+
console.print(f" 📟 Tablet: 768x1024")
|
129
|
+
console.print(f" 💻 Desktop: 1440x900")
|
130
|
+
|
131
|
+
results = asyncio.run(flow.test_responsive(viewports, test_actions))
|
132
|
+
|
133
|
+
# Display responsive results
|
134
|
+
console.print(f"✅ Responsive test completed: {test_description}")
|
135
|
+
execution_summary = results.get('execution_summary', {})
|
136
|
+
console.print(f"📊 Viewports tested: {execution_summary.get('successful_viewports', 0)}/{execution_summary.get('total_viewports', 0)}")
|
137
|
+
console.print(f"⏱️ Total execution time: {execution_summary.get('execution_time', 0):.2f}s")
|
138
|
+
console.print(f"📸 Screenshots: {len(results.get('artifacts', {}).get('screenshots', []))}")
|
139
|
+
|
140
|
+
# Show viewport performance
|
141
|
+
responsive_analysis = results.get('responsive_analysis', {})
|
142
|
+
if 'performance_analysis' in responsive_analysis:
|
143
|
+
perf = responsive_analysis['performance_analysis']
|
144
|
+
console.print(f"🏃 Fastest: {perf.get('fastest_viewport')}")
|
145
|
+
console.print(f"🐌 Slowest: {perf.get('slowest_viewport')}")
|
146
|
+
else:
|
147
|
+
console.print(f"🚀 Executing {len(test_actions)} actions...")
|
148
|
+
results = asyncio.run(flow.execute_and_collect(test_actions))
|
149
|
+
|
150
|
+
console.print(f"✅ Test completed: {test_description}")
|
151
|
+
console.print(f"📊 Browser events: {len(results.get('browser_events', []))}")
|
152
|
+
console.print(f"📋 Server logs: {len(results.get('server_logs', []))}")
|
153
|
+
console.print(f"📸 Screenshots: {len(results.get('artifacts', {}).get('screenshots', []))}")
|
154
|
+
|
155
|
+
# Show correlations if found
|
156
|
+
timeline = results.get('organized_timeline', [])
|
157
|
+
if timeline:
|
158
|
+
console.print(f"⏰ Timeline events: {len(timeline)}")
|
128
159
|
|
129
160
|
# Save results to file for Cursor analysis
|
130
161
|
if not output:
|
@@ -398,24 +398,114 @@ class BrowserController:
|
|
398
398
|
self.logger.error(f"Condition wait failed: {condition}, {e}")
|
399
399
|
raise
|
400
400
|
|
401
|
-
async def screenshot(self, name: str,
|
402
|
-
"""
|
401
|
+
async def screenshot(self, name: str, options: Optional[Dict] = None, capture_comprehensive_data: bool = True) -> Dict[str, Any]:
|
402
|
+
"""
|
403
|
+
Take screenshot with comprehensive page analysis - universal
|
404
|
+
|
405
|
+
Args:
|
406
|
+
name: Screenshot name/identifier
|
407
|
+
options: Enhanced screenshot options {
|
408
|
+
"full_page": bool, # Capture full page (default: False)
|
409
|
+
"clip": { # Clip to specific region
|
410
|
+
"x": int, "y": int, # Top-left coordinates
|
411
|
+
"width": int, "height": int
|
412
|
+
} OR {
|
413
|
+
"selector": str # Clip to element bounding box
|
414
|
+
},
|
415
|
+
"mask": [str], # CSS selectors to hide/mask
|
416
|
+
"quality": int # JPEG quality 0-100 (default: 80) - requires .jpg/.jpeg filename
|
417
|
+
}
|
418
|
+
capture_comprehensive_data: Whether to capture detailed page analysis
|
419
|
+
"""
|
403
420
|
try:
|
421
|
+
# Process options with defaults
|
422
|
+
screenshot_options = options or {}
|
423
|
+
full_page = screenshot_options.get("full_page", False)
|
424
|
+
clip_config = screenshot_options.get("clip")
|
425
|
+
mask_selectors = screenshot_options.get("mask", [])
|
426
|
+
quality = screenshot_options.get("quality", 80)
|
427
|
+
|
404
428
|
timestamp = int(time.time())
|
405
429
|
screenshot_filename = f".cursorflow/artifacts/screenshots/{name}_{timestamp}.png"
|
406
430
|
|
431
|
+
# Apply masking if requested (hide sensitive elements)
|
432
|
+
masked_elements = []
|
433
|
+
if mask_selectors:
|
434
|
+
for selector in mask_selectors:
|
435
|
+
try:
|
436
|
+
await self.page.add_style_tag(content=f"""
|
437
|
+
{selector} {{
|
438
|
+
visibility: hidden !important;
|
439
|
+
opacity: 0 !important;
|
440
|
+
}}
|
441
|
+
""")
|
442
|
+
masked_elements.append(selector)
|
443
|
+
self.logger.debug(f"Masked element: {selector}")
|
444
|
+
except Exception as e:
|
445
|
+
self.logger.warning(f"Failed to mask {selector}: {e}")
|
446
|
+
|
447
|
+
# Prepare screenshot parameters
|
448
|
+
screenshot_params = {
|
449
|
+
"path": screenshot_filename,
|
450
|
+
"full_page": full_page
|
451
|
+
}
|
452
|
+
|
453
|
+
# Only add quality for JPEG screenshots
|
454
|
+
if screenshot_filename.lower().endswith(('.jpg', '.jpeg')):
|
455
|
+
screenshot_params["quality"] = quality
|
456
|
+
screenshot_params["type"] = "jpeg"
|
457
|
+
# PNG is default and doesn't support quality parameter
|
458
|
+
|
459
|
+
# Handle clipping options
|
460
|
+
if clip_config:
|
461
|
+
if "selector" in clip_config:
|
462
|
+
# Clip to element bounding box
|
463
|
+
try:
|
464
|
+
element = await self.page.wait_for_selector(clip_config["selector"], timeout=5000)
|
465
|
+
if element:
|
466
|
+
bounding_box = await element.bounding_box()
|
467
|
+
if bounding_box:
|
468
|
+
screenshot_params["clip"] = bounding_box
|
469
|
+
self.logger.debug(f"Clipping to element {clip_config['selector']}: {bounding_box}")
|
470
|
+
else:
|
471
|
+
self.logger.warning(f"Element {clip_config['selector']} has no bounding box")
|
472
|
+
else:
|
473
|
+
self.logger.warning(f"Element {clip_config['selector']} not found for clipping")
|
474
|
+
except Exception as e:
|
475
|
+
self.logger.warning(f"Failed to clip to element {clip_config['selector']}: {e}")
|
476
|
+
|
477
|
+
elif all(key in clip_config for key in ["x", "y", "width", "height"]):
|
478
|
+
# Clip to specific coordinates
|
479
|
+
screenshot_params["clip"] = {
|
480
|
+
"x": clip_config["x"],
|
481
|
+
"y": clip_config["y"],
|
482
|
+
"width": clip_config["width"],
|
483
|
+
"height": clip_config["height"]
|
484
|
+
}
|
485
|
+
self.logger.debug(f"Clipping to coordinates: {screenshot_params['clip']}")
|
486
|
+
|
407
487
|
# Take the visual screenshot
|
408
|
-
await self.page.screenshot(
|
409
|
-
|
410
|
-
|
411
|
-
|
488
|
+
await self.page.screenshot(**screenshot_params)
|
489
|
+
|
490
|
+
# Remove masking styles
|
491
|
+
if masked_elements:
|
492
|
+
try:
|
493
|
+
for selector in masked_elements:
|
494
|
+
await self.page.add_style_tag(content=f"""
|
495
|
+
{selector} {{
|
496
|
+
visibility: visible !important;
|
497
|
+
opacity: 1 !important;
|
498
|
+
}}
|
499
|
+
""")
|
500
|
+
except Exception as e:
|
501
|
+
self.logger.warning(f"Failed to remove masking: {e}")
|
412
502
|
|
413
503
|
# Always return structured data for consistency
|
414
504
|
screenshot_data = {
|
415
505
|
"screenshot_path": screenshot_filename,
|
416
506
|
"timestamp": timestamp,
|
417
507
|
"name": name,
|
418
|
-
"
|
508
|
+
"options": screenshot_options,
|
419
509
|
"session_id": self.session_id,
|
420
510
|
"trace_info": self.trace_manager.get_trace_info() if self.trace_manager else None
|
421
511
|
}
|
@@ -515,16 +605,32 @@ class BrowserController:
|
|
515
605
|
raise
|
516
606
|
|
517
607
|
async def get_performance_metrics(self) -> Dict:
|
518
|
-
"""Get page performance metrics - universal"""
|
608
|
+
"""Get page performance metrics - universal with proper null handling"""
|
519
609
|
try:
|
520
610
|
metrics = await self.page.evaluate("""
|
521
611
|
() => {
|
612
|
+
// Helper function to safely calculate timing differences
|
613
|
+
const safeTiming = (end, start) => {
|
614
|
+
if (!end || !start || end === 0 || start === 0) return null;
|
615
|
+
const diff = end - start;
|
616
|
+
return diff >= 0 ? diff : null;
|
617
|
+
};
|
618
|
+
|
522
619
|
const perf = performance.getEntriesByType('navigation')[0];
|
620
|
+
const paint = performance.getEntriesByType('paint');
|
621
|
+
const lcp = performance.getEntriesByType('largest-contentful-paint')[0];
|
622
|
+
|
523
623
|
return {
|
524
|
-
loadTime: perf ? perf.loadEventEnd
|
525
|
-
domContentLoaded: perf ? perf.domContentLoadedEventEnd
|
526
|
-
firstPaint:
|
527
|
-
largestContentfulPaint:
|
624
|
+
loadTime: perf ? safeTiming(perf.loadEventEnd, perf.loadEventStart) : null,
|
625
|
+
domContentLoaded: perf ? safeTiming(perf.domContentLoadedEventEnd, perf.domContentLoadedEventStart) : null,
|
626
|
+
firstPaint: paint.find(p => p.name === 'first-paint')?.startTime || null,
|
627
|
+
largestContentfulPaint: lcp?.startTime || null,
|
628
|
+
_reliability: {
|
629
|
+
navigation_available: perf !== undefined,
|
630
|
+
paint_available: paint.length > 0,
|
631
|
+
lcp_available: lcp !== undefined,
|
632
|
+
note: "null values in headless mode are expected"
|
633
|
+
}
|
528
634
|
};
|
529
635
|
}
|
530
636
|
""")
|
@@ -533,7 +639,7 @@ class BrowserController:
|
|
533
639
|
|
534
640
|
except Exception as e:
|
535
641
|
self.logger.error(f"Performance metrics failed: {e}")
|
536
|
-
return {}
|
642
|
+
return {"error": str(e)}
|
537
643
|
|
538
644
|
async def cleanup(self):
|
539
645
|
"""Clean up browser resources and stop trace recording"""
|
@@ -340,22 +340,35 @@ class BrowserEngine:
|
|
340
340
|
return validation_result
|
341
341
|
|
342
342
|
async def get_performance_metrics(self) -> Dict:
|
343
|
-
"""Get browser performance metrics"""
|
343
|
+
"""Get browser performance metrics with proper null handling"""
|
344
344
|
|
345
345
|
metrics = await self.page.evaluate("""() => {
|
346
|
+
// Helper function to safely calculate timing differences
|
347
|
+
const safeTiming = (end, start) => {
|
348
|
+
if (!end || !start || end === 0 || start === 0) return null;
|
349
|
+
const diff = end - start;
|
350
|
+
return diff >= 0 ? diff : null;
|
351
|
+
};
|
352
|
+
|
346
353
|
const timing = performance.timing;
|
347
354
|
const navigation = performance.getEntriesByType('navigation')[0];
|
348
355
|
|
349
356
|
return {
|
350
|
-
page_load_time: timing.loadEventEnd
|
351
|
-
dom_ready_time: timing.domContentLoadedEventEnd
|
357
|
+
page_load_time: safeTiming(timing.loadEventEnd, timing.navigationStart),
|
358
|
+
dom_ready_time: safeTiming(timing.domContentLoadedEventEnd, timing.navigationStart),
|
352
359
|
first_paint: navigation ? navigation.loadEventEnd : null,
|
353
360
|
resource_count: performance.getEntriesByType('resource').length,
|
354
361
|
memory_usage: performance.memory ? {
|
355
362
|
used: performance.memory.usedJSHeapSize,
|
356
363
|
total: performance.memory.totalJSHeapSize,
|
357
364
|
limit: performance.memory.jsHeapSizeLimit
|
358
|
-
} : null
|
365
|
+
} : null,
|
366
|
+
_reliability: {
|
367
|
+
timing_available: timing !== undefined,
|
368
|
+
navigation_available: navigation !== undefined,
|
369
|
+
memory_available: performance.memory !== undefined,
|
370
|
+
note: "null values in headless mode are expected"
|
371
|
+
}
|
359
372
|
};
|
360
373
|
}""")
|
361
374
|
|
cursorflow/core/cursorflow.py
CHANGED
@@ -716,9 +716,23 @@ class CursorFlow:
|
|
716
716
|
|
717
717
|
# Capture actions
|
718
718
|
elif "screenshot" in action:
|
719
|
-
|
720
|
-
|
721
|
-
|
719
|
+
screenshot_config = action["screenshot"]
|
720
|
+
|
721
|
+
# Handle both string and dict formats
|
722
|
+
if isinstance(screenshot_config, str):
|
723
|
+
# Simple format: {"screenshot": "name"}
|
724
|
+
name = screenshot_config
|
725
|
+
options = None
|
726
|
+
elif isinstance(screenshot_config, dict):
|
727
|
+
# Enhanced format: {"screenshot": {"name": "test", "options": {...}}}
|
728
|
+
name = screenshot_config.get("name", "screenshot")
|
729
|
+
options = screenshot_config.get("options")
|
730
|
+
else:
|
731
|
+
name = "screenshot"
|
732
|
+
options = None
|
733
|
+
|
734
|
+
screenshot_data = await self.browser.screenshot(name, options)
|
735
|
+
self.artifacts["screenshots"].append(screenshot_data)
|
722
736
|
|
723
737
|
elif "authenticate" in action:
|
724
738
|
if self.auth_handler:
|
@@ -750,12 +764,249 @@ class CursorFlow:
|
|
750
764
|
except Exception as e:
|
751
765
|
self.logger.error(f"Session cleanup failed: {e}")
|
752
766
|
|
753
|
-
def
|
754
|
-
"""
|
755
|
-
|
756
|
-
|
767
|
+
async def test_responsive(self, viewports: List[Dict], actions: List[Dict], session_options: Optional[Dict] = None) -> Dict[str, Any]:
|
768
|
+
"""
|
769
|
+
Test the same actions across multiple viewports in parallel
|
770
|
+
|
771
|
+
Args:
|
772
|
+
viewports: List of viewport configurations [{"width": 375, "height": 667, "name": "mobile"}, ...]
|
773
|
+
actions: Actions to execute on each viewport
|
774
|
+
session_options: Optional session configuration
|
775
|
+
|
776
|
+
Returns:
|
777
|
+
Dict with results for each viewport plus comparison analysis
|
778
|
+
"""
|
779
|
+
session_options = session_options or {}
|
780
|
+
responsive_session_id = session_options.get("session_id", f"responsive_{int(time.time())}")
|
781
|
+
|
782
|
+
self.logger.info(f"Starting responsive testing across {len(viewports)} viewports")
|
783
|
+
|
784
|
+
try:
|
785
|
+
# Execute tests in parallel across all viewports
|
786
|
+
viewport_tasks = []
|
787
|
+
for viewport in viewports:
|
788
|
+
task = self._test_single_viewport(viewport, actions, responsive_session_id)
|
789
|
+
viewport_tasks.append(task)
|
790
|
+
|
791
|
+
# Wait for all viewport tests to complete
|
792
|
+
viewport_results = await asyncio.gather(*viewport_tasks, return_exceptions=True)
|
793
|
+
|
794
|
+
# Process results and handle any exceptions
|
795
|
+
processed_results = {}
|
796
|
+
successful_viewports = []
|
797
|
+
failed_viewports = []
|
798
|
+
|
799
|
+
for i, result in enumerate(viewport_results):
|
800
|
+
viewport_name = viewports[i]["name"]
|
801
|
+
if isinstance(result, Exception):
|
802
|
+
self.logger.error(f"Viewport {viewport_name} failed: {result}")
|
803
|
+
failed_viewports.append({"name": viewport_name, "error": str(result)})
|
804
|
+
else:
|
805
|
+
processed_results[viewport_name] = result
|
806
|
+
successful_viewports.append(viewport_name)
|
807
|
+
|
808
|
+
# Create responsive analysis
|
809
|
+
responsive_analysis = self._analyze_responsive_results(processed_results, viewports)
|
810
|
+
|
811
|
+
return {
|
812
|
+
"session_id": responsive_session_id,
|
813
|
+
"timestamp": time.time(),
|
814
|
+
"viewport_results": processed_results,
|
815
|
+
"responsive_analysis": responsive_analysis,
|
816
|
+
"execution_summary": {
|
817
|
+
"total_viewports": len(viewports),
|
818
|
+
"successful_viewports": len(successful_viewports),
|
819
|
+
"failed_viewports": len(failed_viewports),
|
820
|
+
"success_rate": len(successful_viewports) / len(viewports),
|
821
|
+
"execution_time": responsive_analysis.get("total_execution_time", 0)
|
822
|
+
},
|
823
|
+
"failed_viewports": failed_viewports if failed_viewports else None,
|
824
|
+
"artifacts": {
|
825
|
+
"screenshots": self._collect_responsive_screenshots(processed_results),
|
826
|
+
"comprehensive_data": self._collect_responsive_data(processed_results)
|
827
|
+
}
|
828
|
+
}
|
829
|
+
|
830
|
+
except Exception as e:
|
831
|
+
self.logger.error(f"Responsive testing failed: {e}")
|
832
|
+
return {
|
833
|
+
"session_id": responsive_session_id,
|
834
|
+
"error": str(e),
|
835
|
+
"timestamp": time.time()
|
836
|
+
}
|
837
|
+
|
838
|
+
async def _test_single_viewport(self, viewport: Dict, actions: List[Dict], session_id: str) -> Dict[str, Any]:
|
839
|
+
"""Execute test actions for a single viewport"""
|
840
|
+
viewport_name = viewport["name"]
|
841
|
+
viewport_width = viewport["width"]
|
842
|
+
viewport_height = viewport["height"]
|
843
|
+
|
844
|
+
# Create a separate browser instance for this viewport
|
845
|
+
viewport_browser = BrowserController(
|
846
|
+
base_url=self.base_url,
|
847
|
+
config={
|
848
|
+
**self.browser_config,
|
849
|
+
"viewport": {"width": viewport_width, "height": viewport_height}
|
850
|
+
}
|
851
|
+
)
|
852
|
+
|
853
|
+
try:
|
854
|
+
# Initialize browser with specific viewport
|
855
|
+
await viewport_browser.initialize()
|
856
|
+
|
857
|
+
# Set the session ID for artifact organization
|
858
|
+
viewport_browser.session_id = f"{session_id}_{viewport_name}"
|
859
|
+
|
860
|
+
# Execute all actions for this viewport
|
861
|
+
viewport_artifacts = {"screenshots": [], "comprehensive_data": []}
|
862
|
+
viewport_timeline = []
|
863
|
+
|
864
|
+
for action in actions:
|
865
|
+
action_start = time.time()
|
866
|
+
|
867
|
+
# Use the same action handling as execute_and_collect for consistency
|
868
|
+
if "navigate" in action:
|
869
|
+
await viewport_browser.navigate(action["navigate"])
|
870
|
+
elif "click" in action:
|
871
|
+
selector = action["click"]
|
872
|
+
if isinstance(selector, dict):
|
873
|
+
selector = selector["selector"]
|
874
|
+
await viewport_browser.click(selector)
|
875
|
+
elif "fill" in action:
|
876
|
+
fill_config = action["fill"]
|
877
|
+
await viewport_browser.fill(fill_config["selector"], fill_config["value"])
|
878
|
+
elif "wait_for" in action:
|
879
|
+
selector = action["wait_for"]
|
880
|
+
if isinstance(selector, dict):
|
881
|
+
selector = selector["selector"]
|
882
|
+
await viewport_browser.wait_for_element(selector)
|
883
|
+
elif "wait" in action:
|
884
|
+
wait_time = action["wait"] * 1000 # Convert to milliseconds
|
885
|
+
await viewport_browser.page.wait_for_timeout(wait_time)
|
886
|
+
elif "screenshot" in action:
|
887
|
+
screenshot_config = action["screenshot"]
|
888
|
+
|
889
|
+
# Handle both string and dict formats
|
890
|
+
if isinstance(screenshot_config, str):
|
891
|
+
name = f"{viewport_name}_{screenshot_config}"
|
892
|
+
options = None
|
893
|
+
elif isinstance(screenshot_config, dict):
|
894
|
+
name = f"{viewport_name}_{screenshot_config.get('name', 'screenshot')}"
|
895
|
+
options = screenshot_config.get("options")
|
896
|
+
else:
|
897
|
+
name = f"{viewport_name}_screenshot"
|
898
|
+
options = None
|
899
|
+
|
900
|
+
screenshot_data = await viewport_browser.screenshot(name, options)
|
901
|
+
viewport_artifacts["screenshots"].append(screenshot_data)
|
902
|
+
elif "authenticate" in action:
|
903
|
+
# Note: Auth handler would need to be passed to viewport browser
|
904
|
+
# For now, log that auth is not supported in responsive mode
|
905
|
+
self.logger.warning(f"Authentication action skipped in responsive mode for viewport {viewport_name}")
|
906
|
+
else:
|
907
|
+
# Log unsupported actions
|
908
|
+
action_type = list(action.keys())[0] if action else "unknown"
|
909
|
+
self.logger.warning(f"Unsupported action '{action_type}' in responsive mode for viewport {viewport_name}")
|
910
|
+
|
911
|
+
# Record action in timeline
|
912
|
+
viewport_timeline.append({
|
913
|
+
"timestamp": action_start,
|
914
|
+
"type": "browser",
|
915
|
+
"event": list(action.keys())[0],
|
916
|
+
"data": action,
|
917
|
+
"duration": time.time() - action_start,
|
918
|
+
"viewport": viewport_name
|
919
|
+
})
|
757
920
|
|
758
|
-
|
921
|
+
return {
|
922
|
+
"viewport": viewport,
|
923
|
+
"artifacts": viewport_artifacts,
|
924
|
+
"timeline": viewport_timeline,
|
925
|
+
"success": True,
|
926
|
+
"execution_time": sum(event["duration"] for event in viewport_timeline)
|
927
|
+
}
|
928
|
+
|
929
|
+
except Exception as e:
|
930
|
+
self.logger.error(f"Viewport {viewport_name} test failed: {e}")
|
931
|
+
return {
|
932
|
+
"viewport": viewport,
|
933
|
+
"error": str(e),
|
934
|
+
"success": False
|
935
|
+
}
|
936
|
+
finally:
|
937
|
+
await viewport_browser.cleanup()
|
938
|
+
|
939
|
+
def _analyze_responsive_results(self, results: Dict, viewports: List[Dict]) -> Dict[str, Any]:
|
940
|
+
"""Analyze responsive testing results for patterns and insights"""
|
941
|
+
if not results:
|
942
|
+
return {"error": "No successful viewport results to analyze"}
|
943
|
+
|
944
|
+
analysis = {
|
945
|
+
"viewport_comparison": {},
|
946
|
+
"responsive_insights": [],
|
947
|
+
"performance_analysis": {},
|
948
|
+
"total_execution_time": 0
|
949
|
+
}
|
950
|
+
|
951
|
+
# Compare viewports
|
952
|
+
viewport_names = list(results.keys())
|
953
|
+
for viewport_name in viewport_names:
|
954
|
+
result = results[viewport_name]
|
955
|
+
viewport_config = next(v for v in viewports if v["name"] == viewport_name)
|
956
|
+
|
957
|
+
analysis["viewport_comparison"][viewport_name] = {
|
958
|
+
"dimensions": f"{viewport_config['width']}x{viewport_config['height']}",
|
959
|
+
"screenshot_count": len(result.get("artifacts", {}).get("screenshots", [])),
|
960
|
+
"execution_time": result.get("execution_time", 0),
|
961
|
+
"actions_completed": len(result.get("timeline", [])),
|
962
|
+
"success": result.get("success", False)
|
963
|
+
}
|
964
|
+
|
965
|
+
analysis["total_execution_time"] += result.get("execution_time", 0)
|
966
|
+
|
967
|
+
# Generate responsive insights
|
968
|
+
if len(viewport_names) >= 2:
|
969
|
+
analysis["responsive_insights"].extend([
|
970
|
+
f"Tested across {len(viewport_names)} viewports: {', '.join(viewport_names)}",
|
971
|
+
f"Total execution time: {analysis['total_execution_time']:.2f}s",
|
972
|
+
f"Average time per viewport: {analysis['total_execution_time'] / len(viewport_names):.2f}s"
|
973
|
+
])
|
974
|
+
|
975
|
+
# Performance comparison
|
976
|
+
execution_times = [results[vp].get("execution_time", 0) for vp in viewport_names]
|
977
|
+
fastest_vp = viewport_names[execution_times.index(min(execution_times))]
|
978
|
+
slowest_vp = viewport_names[execution_times.index(max(execution_times))]
|
979
|
+
|
980
|
+
analysis["performance_analysis"] = {
|
981
|
+
"fastest_viewport": fastest_vp,
|
982
|
+
"slowest_viewport": slowest_vp,
|
983
|
+
"time_difference": max(execution_times) - min(execution_times),
|
984
|
+
"performance_variance": "low" if max(execution_times) - min(execution_times) < 2 else "high"
|
985
|
+
}
|
986
|
+
|
987
|
+
return analysis
|
988
|
+
|
989
|
+
def _collect_responsive_screenshots(self, results: Dict) -> List[Dict]:
|
990
|
+
"""Collect all screenshots from responsive testing"""
|
991
|
+
all_screenshots = []
|
992
|
+
for viewport_name, result in results.items():
|
993
|
+
if result.get("success") and "artifacts" in result:
|
994
|
+
for screenshot in result["artifacts"].get("screenshots", []):
|
995
|
+
screenshot["viewport"] = viewport_name
|
996
|
+
all_screenshots.append(screenshot)
|
997
|
+
return all_screenshots
|
998
|
+
|
999
|
+
def _collect_responsive_data(self, results: Dict) -> List[Dict]:
|
1000
|
+
"""Collect all comprehensive data from responsive testing"""
|
1001
|
+
all_data = []
|
1002
|
+
for viewport_name, result in results.items():
|
1003
|
+
if result.get("success") and "artifacts" in result:
|
1004
|
+
for data in result["artifacts"].get("comprehensive_data", []):
|
1005
|
+
data["viewport"] = viewport_name
|
1006
|
+
all_data.append(data)
|
1007
|
+
return all_data
|
1008
|
+
|
1009
|
+
def _recommend_best_iteration(self, iterations: List[Dict]) -> Optional[str]:
|
759
1010
|
best_iteration = None
|
760
1011
|
best_score = -1
|
761
1012
|
|
@@ -125,9 +125,9 @@ def create_config_template(project_path: Path, force: bool = False):
|
|
125
125
|
# Get current version
|
126
126
|
try:
|
127
127
|
import cursorflow
|
128
|
-
current_version = getattr(cursorflow, '__version__', '2.
|
128
|
+
current_version = getattr(cursorflow, '__version__', '2.1.1')
|
129
129
|
except ImportError:
|
130
|
-
current_version = '2.
|
130
|
+
current_version = '2.1.1'
|
131
131
|
|
132
132
|
if config_path.exists():
|
133
133
|
if not force:
|
@@ -302,9 +302,9 @@ def setup_update_checking(project_path: Path):
|
|
302
302
|
# Create initial version tracking
|
303
303
|
try:
|
304
304
|
import cursorflow
|
305
|
-
current_version = getattr(cursorflow, '__version__', '2.
|
305
|
+
current_version = getattr(cursorflow, '__version__', '2.1.1')
|
306
306
|
except ImportError:
|
307
|
-
current_version = '2.
|
307
|
+
current_version = '2.1.1'
|
308
308
|
|
309
309
|
version_info = {
|
310
310
|
"installed_version": current_version,
|