cursorflow 1.2.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,744 @@
1
+ """
2
+ Cursor Integration Layer
3
+
4
+ Transforms CursorFlow raw data into actionable insights for Cursor.
5
+ Provides structured recommendations and decision frameworks.
6
+ """
7
+
8
+ import time
9
+ import json
10
+ from typing import Dict, List, Optional, Any
11
+ from pathlib import Path
12
+ import logging
13
+
14
+
15
+ class CursorIntegration:
16
+ """
17
+ Bridge between CursorFlow data collection and Cursor decision-making
18
+
19
+ Provides structured analysis frameworks and actionable recommendations
20
+ without doing the thinking - just organizing data for Cursor to analyze.
21
+ """
22
+
23
+ def __init__(self):
24
+ self.logger = logging.getLogger(__name__)
25
+
26
+ def format_css_iteration_results(
27
+ self,
28
+ raw_results: Dict,
29
+ session_id: str,
30
+ project_context: Optional[Dict] = None
31
+ ) -> Dict[str, Any]:
32
+ """
33
+ Format CSS iteration results for Cursor analysis
34
+
35
+ Args:
36
+ raw_results: Raw CursorFlow css_iteration_session results
37
+ session_id: Unique session identifier
38
+ project_context: {"framework": "react", "component": "dashboard", ...}
39
+
40
+ Returns:
41
+ Structured data optimized for Cursor decision-making
42
+ """
43
+
44
+ # Create session-linked artifacts
45
+ session_artifacts = self._organize_session_artifacts(raw_results, session_id)
46
+
47
+ # Format for Cursor analysis
48
+ cursor_results = {
49
+ "session_id": session_id,
50
+ "timestamp": time.time(),
51
+ "test_type": "css_iteration",
52
+ "project_context": project_context or {},
53
+
54
+ # Visual comparison data
55
+ "visual_analysis": {
56
+ "baseline": {
57
+ "screenshot_path": session_artifacts["baseline_screenshot"],
58
+ "layout_metrics": raw_results.get("baseline", {}).get("layout_metrics", {}),
59
+ "computed_styles": raw_results.get("baseline", {}).get("computed_styles", {})
60
+ },
61
+ "iterations": self._format_iterations_for_cursor(
62
+ raw_results.get("iterations", []),
63
+ session_artifacts
64
+ ),
65
+ "comparison_framework": self._get_comparison_framework()
66
+ },
67
+
68
+ # Decision support data
69
+ "cursor_analysis_guide": {
70
+ "evaluation_criteria": [
71
+ "visual_hierarchy_improvement",
72
+ "layout_stability",
73
+ "responsive_behavior",
74
+ "accessibility_impact",
75
+ "performance_implications"
76
+ ],
77
+ "decision_questions": [
78
+ "Which iteration best improves visual hierarchy?",
79
+ "Are there any layout breaking changes?",
80
+ "Do any iterations introduce console errors?",
81
+ "Which approach aligns with design system patterns?"
82
+ ],
83
+ "implementation_readiness": self._assess_implementation_readiness(raw_results)
84
+ },
85
+
86
+ # File management
87
+ "artifact_management": {
88
+ "session_directory": session_artifacts["session_directory"],
89
+ "cleanup_after_decision": session_artifacts["cleanup_paths"],
90
+ "permanent_assets": session_artifacts["keep_paths"]
91
+ },
92
+
93
+ # Next steps framework
94
+ "recommended_actions": self._generate_action_recommendations(raw_results, project_context)
95
+ }
96
+
97
+ # Save session data for Cursor reference
98
+ self._save_session_data(cursor_results)
99
+
100
+ return cursor_results
101
+
102
+ def format_ui_test_results(
103
+ self,
104
+ raw_results: Dict,
105
+ session_id: str,
106
+ test_intent: str,
107
+ project_context: Optional[Dict] = None
108
+ ) -> Dict[str, Any]:
109
+ """
110
+ Format UI test results for Cursor analysis
111
+
112
+ Args:
113
+ raw_results: Raw CursorFlow execute_and_collect results
114
+ session_id: Unique session identifier
115
+ test_intent: "debug_error", "validate_functionality", "explore_behavior"
116
+ project_context: Project and component context
117
+
118
+ Returns:
119
+ Structured debugging data for Cursor analysis
120
+ """
121
+
122
+ cursor_results = {
123
+ "session_id": session_id,
124
+ "timestamp": time.time(),
125
+ "test_type": "ui_testing",
126
+ "test_intent": test_intent,
127
+ "project_context": project_context or {},
128
+
129
+ # Debugging analysis
130
+ "debugging_analysis": {
131
+ "timeline": self._format_timeline_for_cursor(raw_results.get("timeline", [])),
132
+ "error_patterns": self._identify_error_patterns(raw_results),
133
+ "correlation_insights": self._format_correlations_for_cursor(raw_results),
134
+ "browser_diagnostics": self._format_browser_diagnostics(raw_results)
135
+ },
136
+
137
+ # Investigation framework
138
+ "cursor_investigation_guide": {
139
+ "primary_questions": self._generate_investigation_questions(raw_results, test_intent),
140
+ "follow_up_tests": self._suggest_follow_up_tests(raw_results, test_intent),
141
+ "code_areas_to_examine": self._identify_code_areas(raw_results, project_context)
142
+ },
143
+
144
+ # Action framework
145
+ "recommended_actions": self._generate_debugging_actions(raw_results, test_intent)
146
+ }
147
+
148
+ self._save_session_data(cursor_results)
149
+ return cursor_results
150
+
151
+ def format_persistent_css_results(
152
+ self,
153
+ raw_results: Dict,
154
+ project_context: Optional[Dict] = None
155
+ ) -> Dict[str, Any]:
156
+ """
157
+ Format persistent CSS iteration results for Cursor analysis
158
+
159
+ Enhances standard CSS iteration formatting with persistent session data,
160
+ hot reload information, and session management recommendations.
161
+
162
+ Args:
163
+ raw_results: Raw CursorFlow css_iteration_persistent results
164
+ project_context: {"framework": "react", "hot_reload": True, ...}
165
+
166
+ Returns:
167
+ Enhanced analysis data optimized for persistent session workflows
168
+ """
169
+
170
+ session_id = raw_results.get("session_id", "unknown")
171
+ session_info = raw_results.get("session_info", {})
172
+
173
+ # Start with standard CSS formatting
174
+ base_results = self.format_css_iteration_results(
175
+ raw_results, session_id, project_context
176
+ )
177
+
178
+ # Enhance with persistent session data
179
+ enhanced_results = base_results.copy()
180
+ enhanced_results.update({
181
+
182
+ # Persistent session context
183
+ "session_context": {
184
+ "session_id": session_id,
185
+ "session_persistent": True,
186
+ "hot_reload_available": session_info.get("hot_reload_available", False),
187
+ "hot_reload_used": raw_results.get("hot_reload_used", False),
188
+ "session_age_seconds": session_info.get("age_seconds", 0),
189
+ "iteration_count": session_info.get("iteration_count", 0),
190
+ "session_reused": raw_results.get("summary", {}).get("session_reused", False)
191
+ },
192
+
193
+ # Enhanced iteration analysis for hot reload
194
+ "persistent_analysis": {
195
+ "hot_reload_effectiveness": self._analyze_hot_reload_effectiveness(raw_results),
196
+ "session_state_consistency": self._assess_session_consistency(raw_results),
197
+ "iteration_speed_metrics": self._calculate_iteration_metrics(raw_results),
198
+ "session_optimization_opportunities": self._identify_optimization_opportunities(raw_results)
199
+ },
200
+
201
+ # Enhanced recommendations for persistent workflows
202
+ "persistent_workflow_guide": {
203
+ "continue_session_criteria": [
204
+ "hot_reload_working_effectively",
205
+ "no_session_state_corruption",
206
+ "development_server_stable",
207
+ "browser_memory_usage_acceptable"
208
+ ],
209
+ "restart_session_triggers": [
210
+ "hot_reload_failures_detected",
211
+ "significant_console_errors",
212
+ "session_state_inconsistency",
213
+ "browser_performance_degradation"
214
+ ],
215
+ "optimization_suggestions": self._identify_optimization_opportunities(raw_results)
216
+ },
217
+
218
+ # Session management recommendations
219
+ "session_management": {
220
+ "recommended_action": self._recommend_session_action(raw_results),
221
+ "keep_session_alive": self._should_keep_session_alive(raw_results),
222
+ "next_iteration_strategy": self._recommend_next_iteration_strategy(raw_results),
223
+ "cleanup_recommendations": self._generate_cleanup_recommendations(raw_results)
224
+ }
225
+ })
226
+
227
+ # Update action recommendations for persistent context
228
+ enhanced_results["recommended_actions"] = self._generate_persistent_action_recommendations(
229
+ raw_results, project_context
230
+ )
231
+
232
+ # Save enhanced session data
233
+ self._save_persistent_session_data(enhanced_results)
234
+
235
+ return enhanced_results
236
+
237
+ def _analyze_hot_reload_effectiveness(self, raw_results: Dict) -> Dict[str, Any]:
238
+ """Analyze how effectively hot reload was used"""
239
+
240
+ iterations = raw_results.get("iterations", [])
241
+ hot_reload_iterations = len([i for i in iterations if i.get("hot_reload_used", False)])
242
+ total_iterations = len(iterations)
243
+
244
+ return {
245
+ "hot_reload_usage_rate": hot_reload_iterations / total_iterations if total_iterations > 0 else 0,
246
+ "hot_reload_successful": hot_reload_iterations > 0,
247
+ "potential_time_savings": self._estimate_time_savings(raw_results),
248
+ "hot_reload_quality": "excellent" if hot_reload_iterations == total_iterations else "partial"
249
+ }
250
+
251
+ def _assess_session_consistency(self, raw_results: Dict) -> Dict[str, Any]:
252
+ """Assess whether session state remained consistent"""
253
+
254
+ session_info = raw_results.get("session_info", {})
255
+ iterations = raw_results.get("iterations", [])
256
+
257
+ # Check for consistency indicators
258
+ console_error_trend = [len(i.get("console_errors", [])) for i in iterations]
259
+ performance_stability = self._check_performance_stability(iterations)
260
+
261
+ return {
262
+ "state_consistent": len(set(console_error_trend)) <= 1, # Error counts consistent
263
+ "performance_stable": performance_stability,
264
+ "session_age_impact": session_info.get("age_seconds", 0) < 1800, # Under 30 minutes
265
+ "navigation_stability": len(session_info.get("navigation_history", [])) < 10
266
+ }
267
+
268
+ def _calculate_iteration_metrics(self, raw_results: Dict) -> Dict[str, Any]:
269
+ """Calculate metrics specific to iteration speed and efficiency"""
270
+
271
+ execution_time = raw_results.get("execution_time", 0)
272
+ total_iterations = len(raw_results.get("iterations", []))
273
+
274
+ return {
275
+ "total_execution_time": execution_time,
276
+ "average_iteration_time": execution_time / total_iterations if total_iterations > 0 else 0,
277
+ "iterations_per_minute": (total_iterations / execution_time) * 60 if execution_time > 0 else 0,
278
+ "hot_reload_speed_advantage": raw_results.get("hot_reload_used", False)
279
+ }
280
+
281
+ def _identify_optimization_opportunities(self, raw_results: Dict) -> List[Dict]:
282
+ """Identify opportunities to optimize the iteration process"""
283
+
284
+ opportunities = []
285
+ session_info = raw_results.get("session_info", {})
286
+
287
+ # Hot reload not being used
288
+ if not raw_results.get("hot_reload_used", False):
289
+ opportunities.append({
290
+ "optimization": "enable_hot_reload",
291
+ "description": "Hot reload could speed up CSS iterations significantly",
292
+ "potential_benefit": "3-5x faster iteration cycles",
293
+ "implementation": "Configure webpack HMR or Vite hot reload"
294
+ })
295
+
296
+ # Session getting old
297
+ if session_info.get("age_seconds", 0) > 1800: # 30 minutes
298
+ opportunities.append({
299
+ "optimization": "session_refresh",
300
+ "description": "Long-running sessions may accumulate performance issues",
301
+ "potential_benefit": "Better memory usage and performance",
302
+ "implementation": "Restart session for fresh environment"
303
+ })
304
+
305
+ # Too many navigation events
306
+ if len(session_info.get("navigation_history", [])) > 10:
307
+ opportunities.append({
308
+ "optimization": "reduce_navigation",
309
+ "description": "Multiple navigations may slow down iterations",
310
+ "potential_benefit": "Faster CSS application and testing",
311
+ "implementation": "Use component-specific testing when possible"
312
+ })
313
+
314
+ return opportunities
315
+
316
+ def _recommend_session_action(self, raw_results: Dict) -> str:
317
+ """Recommend what to do with the current session"""
318
+
319
+ session_info = raw_results.get("session_info", {})
320
+ summary = raw_results.get("summary", {})
321
+
322
+ # Check for problems
323
+ failed_iterations = summary.get("failed_iterations", 0)
324
+ session_age = session_info.get("age_seconds", 0)
325
+ hot_reload_available = session_info.get("hot_reload_available", False)
326
+
327
+ if failed_iterations > 0:
328
+ return "restart_session_due_to_errors"
329
+ elif session_age > 3600: # 1 hour
330
+ return "restart_session_due_to_age"
331
+ elif hot_reload_available and raw_results.get("hot_reload_used", False):
332
+ return "continue_session_optimal"
333
+ else:
334
+ return "continue_session_standard"
335
+
336
+ def _should_keep_session_alive(self, raw_results: Dict) -> bool:
337
+ """Determine if session should be kept alive"""
338
+
339
+ session_info = raw_results.get("session_info", {})
340
+ summary = raw_results.get("summary", {})
341
+
342
+ # Keep alive if:
343
+ # - Hot reload is working
344
+ # - No major errors
345
+ # - Session is relatively fresh
346
+ # - Performance is good
347
+
348
+ return (
349
+ session_info.get("hot_reload_available", False) and
350
+ summary.get("failed_iterations", 0) == 0 and
351
+ session_info.get("age_seconds", 0) < 3600 and
352
+ summary.get("successful_iterations", 0) > 0
353
+ )
354
+
355
+ def _recommend_next_iteration_strategy(self, raw_results: Dict) -> Dict[str, Any]:
356
+ """Recommend strategy for next iteration cycle"""
357
+
358
+ session_info = raw_results.get("session_info", {})
359
+ hot_reload_used = raw_results.get("hot_reload_used", False)
360
+
361
+ if hot_reload_used and session_info.get("hot_reload_available", False):
362
+ return {
363
+ "strategy": "continue_with_hot_reload",
364
+ "session_reuse": True,
365
+ "expected_performance": "fast",
366
+ "preparation_needed": "none"
367
+ }
368
+ elif session_info.get("hot_reload_available", False):
369
+ return {
370
+ "strategy": "optimize_for_hot_reload",
371
+ "session_reuse": True,
372
+ "expected_performance": "improved",
373
+ "preparation_needed": "verify_hot_reload_configuration"
374
+ }
375
+ else:
376
+ return {
377
+ "strategy": "standard_iteration",
378
+ "session_reuse": False,
379
+ "expected_performance": "standard",
380
+ "preparation_needed": "fresh_session_recommended"
381
+ }
382
+
383
+ def _generate_cleanup_recommendations(self, raw_results: Dict) -> List[Dict]:
384
+ """Generate cleanup recommendations for the session"""
385
+
386
+ recommendations = []
387
+ session_info = raw_results.get("session_info", {})
388
+
389
+ # Session-specific cleanup
390
+ if session_info.get("age_seconds", 0) > 3600:
391
+ recommendations.append({
392
+ "action": "restart_browser_session",
393
+ "reason": "long_running_session",
394
+ "priority": "medium"
395
+ })
396
+
397
+ if session_info.get("applied_css_count", 0) > 20:
398
+ recommendations.append({
399
+ "action": "clear_injected_css",
400
+ "reason": "too_many_css_injections",
401
+ "priority": "low"
402
+ })
403
+
404
+ return recommendations
405
+
406
+ def _generate_persistent_action_recommendations(
407
+ self,
408
+ raw_results: Dict,
409
+ project_context: Optional[Dict]
410
+ ) -> List[Dict]:
411
+ """Generate action recommendations enhanced for persistent sessions"""
412
+
413
+ # Start with base recommendations
414
+ base_recommendations = self._generate_action_recommendations(raw_results, project_context)
415
+
416
+ # Add persistent session specific recommendations
417
+ session_recommendations = []
418
+
419
+ # Session management recommendations
420
+ session_action = self._recommend_session_action(raw_results)
421
+ if session_action == "continue_session_optimal":
422
+ session_recommendations.append({
423
+ "action": "continue_persistent_session",
424
+ "priority": "high",
425
+ "description": "Session is performing optimally with hot reload",
426
+ "implementation": "Keep session alive for next iteration cycle",
427
+ "benefits": ["Faster iterations", "Maintained application state", "Hot reload advantages"]
428
+ })
429
+ elif session_action.startswith("restart_session"):
430
+ session_recommendations.append({
431
+ "action": "restart_persistent_session",
432
+ "priority": "medium",
433
+ "description": f"Session restart recommended: {session_action}",
434
+ "implementation": "Clean up current session and start fresh",
435
+ "benefits": ["Clean environment", "Better performance", "Reduced memory usage"]
436
+ })
437
+
438
+ # Hot reload optimization
439
+ if not raw_results.get("hot_reload_used", False):
440
+ session_recommendations.append({
441
+ "action": "configure_hot_reload",
442
+ "priority": "high",
443
+ "description": "Enable hot reload for faster CSS iterations",
444
+ "implementation": "Set up webpack HMR, Vite HMR, or live reload",
445
+ "benefits": ["3-5x faster iterations", "Maintained browser state", "Better development experience"]
446
+ })
447
+
448
+ return base_recommendations + session_recommendations
449
+
450
+ def _estimate_time_savings(self, raw_results: Dict) -> Dict[str, Any]:
451
+ """Estimate time savings from hot reload usage"""
452
+
453
+ total_iterations = len(raw_results.get("iterations", []))
454
+ hot_reload_iterations = len([i for i in raw_results.get("iterations", []) if i.get("hot_reload_used", False)])
455
+
456
+ # Estimate: standard reload ~2-3 seconds, hot reload ~0.1-0.2 seconds
457
+ standard_reload_time = total_iterations * 2.5 # seconds
458
+ hot_reload_time = hot_reload_iterations * 0.15 + (total_iterations - hot_reload_iterations) * 2.5
459
+
460
+ return {
461
+ "estimated_standard_time": standard_reload_time,
462
+ "actual_time_with_hot_reload": hot_reload_time,
463
+ "time_saved_seconds": max(0, standard_reload_time - hot_reload_time),
464
+ "efficiency_improvement": hot_reload_iterations / total_iterations if total_iterations > 0 else 0
465
+ }
466
+
467
+ def _check_performance_stability(self, iterations: List[Dict]) -> bool:
468
+ """Check if performance remained stable across iterations"""
469
+
470
+ render_times = []
471
+ for iteration in iterations:
472
+ perf = iteration.get("performance_impact", {}) or iteration.get("performance_metrics", {})
473
+ render_time = perf.get("renderTime", 0) or perf.get("render_time", 0)
474
+ if render_time > 0:
475
+ render_times.append(render_time)
476
+
477
+ if len(render_times) < 2:
478
+ return True # Insufficient data, assume stable
479
+
480
+ # Check if performance degraded significantly
481
+ first_half = render_times[:len(render_times)//2]
482
+ second_half = render_times[len(render_times)//2:]
483
+
484
+ avg_first = sum(first_half) / len(first_half)
485
+ avg_second = sum(second_half) / len(second_half)
486
+
487
+ # Performance is stable if second half isn't significantly worse
488
+ return avg_second <= avg_first * 1.5 # Allow 50% degradation threshold
489
+
490
+ def _save_persistent_session_data(self, enhanced_results: Dict):
491
+ """Save enhanced persistent session data"""
492
+
493
+ session_id = enhanced_results["session_id"]
494
+ artifacts_base = Path.cwd() / ".cursorflow" / "artifacts"
495
+ session_file = artifacts_base / "sessions" / session_id / "persistent_analysis.json"
496
+ session_file.parent.mkdir(parents=True, exist_ok=True)
497
+
498
+ with open(session_file, 'w') as f:
499
+ json.dump(enhanced_results, f, indent=2)
500
+
501
+ self.logger.info(f"Enhanced persistent session data saved: {session_file}")
502
+
503
+ def _organize_session_artifacts(self, raw_results: Dict, session_id: str) -> Dict[str, Any]:
504
+ """Organize artifacts with clear session linking"""
505
+
506
+ # Create session-specific directory in user's project
507
+ artifacts_base = Path.cwd() / ".cursorflow" / "artifacts"
508
+ session_dir = artifacts_base / "sessions" / session_id
509
+ session_dir.mkdir(parents=True, exist_ok=True)
510
+
511
+ artifacts = {
512
+ "session_directory": str(session_dir),
513
+ "baseline_screenshot": None,
514
+ "iteration_screenshots": [],
515
+ "cleanup_paths": [],
516
+ "keep_paths": []
517
+ }
518
+
519
+ # Process baseline
520
+ baseline = raw_results.get("baseline", {})
521
+ if baseline.get("screenshot"):
522
+ baseline_path = session_dir / "baseline.png"
523
+ artifacts["baseline_screenshot"] = str(baseline_path)
524
+ artifacts["keep_paths"].append(str(baseline_path))
525
+
526
+ # Process iterations
527
+ for i, iteration in enumerate(raw_results.get("iterations", [])):
528
+ if iteration.get("screenshot"):
529
+ iter_name = iteration.get("name", f"iteration_{i+1}")
530
+ iter_path = session_dir / f"{iter_name}.png"
531
+ artifacts["iteration_screenshots"].append({
532
+ "name": iter_name,
533
+ "path": str(iter_path),
534
+ "css_applied": iteration.get("css_applied", ""),
535
+ "has_errors": len(iteration.get("console_errors", [])) > 0
536
+ })
537
+ artifacts["keep_paths"].append(str(iter_path))
538
+
539
+ return artifacts
540
+
541
+ def _format_iterations_for_cursor(self, iterations: List[Dict], session_artifacts: Dict) -> List[Dict]:
542
+ """Format iteration data for Cursor analysis"""
543
+
544
+ formatted = []
545
+ for i, iteration in enumerate(iterations):
546
+ iteration_data = {
547
+ "name": iteration.get("name", f"iteration_{i+1}"),
548
+ "css_changes": iteration.get("css_applied", ""),
549
+ "rationale": iteration.get("rationale", ""),
550
+ "screenshot_path": session_artifacts["iteration_screenshots"][i]["path"] if i < len(session_artifacts["iteration_screenshots"]) else None,
551
+
552
+ # Analysis-ready metrics
553
+ "quality_indicators": {
554
+ "has_console_errors": len(iteration.get("console_errors", [])) > 0,
555
+ "error_count": len(iteration.get("console_errors", [])),
556
+ "layout_changes": bool(iteration.get("changes", {}).get("layout_differences")),
557
+ "style_changes": bool(iteration.get("changes", {}).get("style_differences"))
558
+ },
559
+
560
+ # Raw data for Cursor's detailed analysis
561
+ "raw_console_errors": iteration.get("console_errors", []),
562
+ "raw_layout_changes": iteration.get("changes", {}).get("layout_differences", {}),
563
+ "raw_style_changes": iteration.get("changes", {}).get("style_differences", {}),
564
+ "raw_performance_metrics": iteration.get("performance_metrics", {})
565
+ }
566
+ formatted.append(iteration_data)
567
+
568
+ return formatted
569
+
570
+ def _get_comparison_framework(self) -> Dict[str, Any]:
571
+ """Provide framework for Cursor to compare iterations"""
572
+
573
+ return {
574
+ "evaluation_dimensions": {
575
+ "visual_improvement": {
576
+ "description": "Does this iteration improve visual hierarchy and aesthetics?",
577
+ "data_sources": ["screenshot_comparison", "layout_metrics"],
578
+ "evaluation_method": "visual_inspection_and_metrics_comparison"
579
+ },
580
+ "technical_stability": {
581
+ "description": "Does this iteration introduce technical issues?",
582
+ "data_sources": ["console_errors", "performance_metrics"],
583
+ "evaluation_method": "error_count_and_performance_analysis"
584
+ },
585
+ "layout_integrity": {
586
+ "description": "Does this iteration maintain responsive layout integrity?",
587
+ "data_sources": ["layout_changes", "computed_styles"],
588
+ "evaluation_method": "layout_difference_analysis"
589
+ }
590
+ },
591
+
592
+ "decision_process": [
593
+ "1. Eliminate iterations with console errors",
594
+ "2. Compare visual improvements via screenshots",
595
+ "3. Validate layout stability via metrics",
596
+ "4. Choose iteration with best visual/technical balance",
597
+ "5. Apply chosen CSS to codebase"
598
+ ],
599
+
600
+ "red_flags": [
601
+ "console_errors_introduced",
602
+ "layout_breaking_changes",
603
+ "significant_performance_degradation",
604
+ "accessibility_violations"
605
+ ]
606
+ }
607
+
608
+ def _assess_implementation_readiness(self, raw_results: Dict) -> Dict[str, Any]:
609
+ """Assess which iterations are ready for implementation"""
610
+
611
+ readiness = {
612
+ "safe_to_implement": [],
613
+ "needs_review": [],
614
+ "not_recommended": []
615
+ }
616
+
617
+ for iteration in raw_results.get("iterations", []):
618
+ name = iteration.get("name", "unknown")
619
+ has_errors = len(iteration.get("console_errors", [])) > 0
620
+
621
+ if has_errors:
622
+ readiness["not_recommended"].append({
623
+ "name": name,
624
+ "reason": "introduces_console_errors",
625
+ "error_count": len(iteration.get("console_errors", []))
626
+ })
627
+ elif iteration.get("changes", {}).get("layout_differences"):
628
+ readiness["needs_review"].append({
629
+ "name": name,
630
+ "reason": "significant_layout_changes",
631
+ "review_points": ["responsive_behavior", "cross_browser_compatibility"]
632
+ })
633
+ else:
634
+ readiness["safe_to_implement"].append({
635
+ "name": name,
636
+ "reason": "no_issues_detected"
637
+ })
638
+
639
+ return readiness
640
+
641
+ def _generate_action_recommendations(self, raw_results: Dict, project_context: Optional[Dict]) -> List[Dict]:
642
+ """Generate specific action recommendations for Cursor"""
643
+
644
+ recommendations = []
645
+
646
+ # Analyze results and suggest actions
647
+ safe_iterations = [
648
+ iter for iter in raw_results.get("iterations", [])
649
+ if len(iter.get("console_errors", [])) == 0
650
+ ]
651
+
652
+ if safe_iterations:
653
+ best_iteration = safe_iterations[0] # Cursor should choose based on visual analysis
654
+ recommendations.append({
655
+ "action": "implement_css_changes",
656
+ "priority": "high",
657
+ "iteration": best_iteration.get("name"),
658
+ "css_to_apply": best_iteration.get("css_applied"),
659
+ "target_files": self._identify_target_files(best_iteration, project_context),
660
+ "implementation_notes": [
661
+ "Test across multiple browsers",
662
+ "Validate responsive behavior",
663
+ "Run accessibility audit"
664
+ ]
665
+ })
666
+
667
+ if any(len(iter.get("console_errors", [])) > 0 for iter in raw_results.get("iterations", [])):
668
+ recommendations.append({
669
+ "action": "investigate_console_errors",
670
+ "priority": "medium",
671
+ "affected_iterations": [
672
+ iter.get("name") for iter in raw_results.get("iterations", [])
673
+ if len(iter.get("console_errors", [])) > 0
674
+ ],
675
+ "next_steps": [
676
+ "Review CSS syntax and browser compatibility",
677
+ "Test iterations individually",
678
+ "Consider alternative CSS approaches"
679
+ ]
680
+ })
681
+
682
+ return recommendations
683
+
684
+ def _identify_target_files(self, iteration: Dict, project_context: Optional[Dict]) -> List[str]:
685
+ """Identify which files should be modified for implementation"""
686
+
687
+ if not project_context:
688
+ return ["styles.css"] # Generic fallback
689
+
690
+ framework = project_context.get("framework", "")
691
+ component = project_context.get("component", "")
692
+
693
+ if framework == "react":
694
+ return [f"{component}.css", f"{component}.module.css", "globals.css"]
695
+ elif framework == "vue":
696
+ return [f"{component}.vue", "main.css"]
697
+ else:
698
+ return ["main.css", "styles.css"]
699
+
700
+ def _save_session_data(self, cursor_results: Dict):
701
+ """Save session data for Cursor reference and debugging"""
702
+
703
+ session_id = cursor_results["session_id"]
704
+ artifacts_base = Path.cwd() / ".cursorflow" / "artifacts"
705
+ session_file = artifacts_base / "sessions" / session_id / "cursor_analysis.json"
706
+ session_file.parent.mkdir(parents=True, exist_ok=True)
707
+
708
+ with open(session_file, 'w') as f:
709
+ json.dump(cursor_results, f, indent=2)
710
+
711
+ self.logger.info(f"Session data saved: {session_file}")
712
+
713
+ # Additional methods for UI testing results formatting...
714
+ def _format_timeline_for_cursor(self, timeline: List[Dict]) -> Dict:
715
+ """Format timeline for Cursor analysis"""
716
+ return {"formatted": True, "events": timeline} # Placeholder
717
+
718
+ def _identify_error_patterns(self, raw_results: Dict) -> Dict:
719
+ """Identify error patterns for Cursor analysis"""
720
+ return {"patterns": []} # Placeholder
721
+
722
+ def _format_correlations_for_cursor(self, raw_results: Dict) -> Dict:
723
+ """Format correlations for Cursor analysis"""
724
+ return {"correlations": []} # Placeholder
725
+
726
+ def _format_browser_diagnostics(self, raw_results: Dict) -> Dict:
727
+ """Format browser diagnostics for Cursor analysis"""
728
+ return {"diagnostics": {}} # Placeholder
729
+
730
+ def _generate_investigation_questions(self, raw_results: Dict, test_intent: str) -> List[str]:
731
+ """Generate investigation questions for Cursor"""
732
+ return [] # Placeholder
733
+
734
+ def _suggest_follow_up_tests(self, raw_results: Dict, test_intent: str) -> List[Dict]:
735
+ """Suggest follow-up tests for Cursor"""
736
+ return [] # Placeholder
737
+
738
+ def _identify_code_areas(self, raw_results: Dict, project_context: Optional[Dict]) -> List[str]:
739
+ """Identify code areas for Cursor to examine"""
740
+ return [] # Placeholder
741
+
742
+ def _generate_debugging_actions(self, raw_results: Dict, test_intent: str) -> List[Dict]:
743
+ """Generate debugging actions for Cursor"""
744
+ return [] # Placeholder