cursorflow 1.2.0__py3-none-any.whl → 1.3.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -742,3 +742,791 @@ class CursorIntegration:
742
742
  def _generate_debugging_actions(self, raw_results: Dict, test_intent: str) -> List[Dict]:
743
743
  """Generate debugging actions for Cursor"""
744
744
  return [] # Placeholder
745
+
746
+ def format_mockup_comparison_results(
747
+ self,
748
+ raw_results: Dict,
749
+ session_id: str,
750
+ project_context: Optional[Dict] = None
751
+ ) -> Dict[str, Any]:
752
+ """
753
+ Format mockup comparison results for Cursor analysis
754
+
755
+ Args:
756
+ raw_results: Raw MockupComparator comparison results
757
+ session_id: Unique session identifier
758
+ project_context: {"framework": "react", "component": "dashboard", ...}
759
+
760
+ Returns:
761
+ Structured comparison data optimized for Cursor decision-making
762
+ """
763
+
764
+ cursor_results = {
765
+ "session_id": session_id,
766
+ "timestamp": time.time(),
767
+ "test_type": "mockup_comparison",
768
+ "project_context": project_context or {},
769
+
770
+ # Comparison overview
771
+ "comparison_overview": {
772
+ "mockup_url": raw_results.get("mockup_url"),
773
+ "implementation_url": raw_results.get("implementation_url"),
774
+ "comparison_id": raw_results.get("comparison_id"),
775
+ "viewports_tested": raw_results.get("viewports_tested", 0),
776
+ "overall_similarity": raw_results.get("summary", {}).get("average_similarity", 0),
777
+ "needs_improvement": raw_results.get("summary", {}).get("needs_improvement", True)
778
+ },
779
+
780
+ # Viewport analysis
781
+ "viewport_analysis": self._format_viewport_results_for_cursor(raw_results.get("results", [])),
782
+
783
+ # Visual difference analysis
784
+ "visual_analysis": {
785
+ "similarity_breakdown": self._analyze_similarity_breakdown(raw_results),
786
+ "major_differences": self._extract_major_differences(raw_results),
787
+ "layout_discrepancies": self._analyze_layout_discrepancies(raw_results),
788
+ "element_mismatches": self._analyze_element_mismatches(raw_results)
789
+ },
790
+
791
+ # Decision support framework
792
+ "cursor_analysis_guide": {
793
+ "evaluation_criteria": [
794
+ "visual_similarity_score",
795
+ "layout_structure_match",
796
+ "element_positioning_accuracy",
797
+ "color_and_typography_consistency",
798
+ "responsive_behavior_alignment"
799
+ ],
800
+ "decision_questions": [
801
+ "Which viewport has the best mockup match?",
802
+ "What are the most critical visual differences to address?",
803
+ "Are there layout structure issues or just styling differences?",
804
+ "Which differences impact user experience most?",
805
+ "What's the priority order for fixing discrepancies?"
806
+ ],
807
+ "improvement_strategy": self._generate_improvement_strategy(raw_results)
808
+ },
809
+
810
+ # Actionable recommendations
811
+ "recommended_actions": self._generate_mockup_comparison_actions(raw_results, project_context),
812
+
813
+ # Artifact management
814
+ "artifact_management": {
815
+ "visual_diffs": self._organize_visual_diff_artifacts(raw_results),
816
+ "screenshots": self._organize_screenshot_artifacts(raw_results),
817
+ "comparison_reports": self._organize_comparison_artifacts(raw_results)
818
+ }
819
+ }
820
+
821
+ # Save session data
822
+ self._save_session_data(cursor_results)
823
+
824
+ return cursor_results
825
+
826
+ def format_iterative_mockup_results(
827
+ self,
828
+ raw_results: Dict,
829
+ session_id: str,
830
+ project_context: Optional[Dict] = None
831
+ ) -> Dict[str, Any]:
832
+ """
833
+ Format iterative mockup matching results for Cursor analysis
834
+
835
+ Args:
836
+ raw_results: Raw MockupComparator iterative_ui_matching results
837
+ session_id: Unique session identifier
838
+ project_context: Project context information
839
+
840
+ Returns:
841
+ Structured iteration data optimized for Cursor decision-making
842
+ """
843
+
844
+ cursor_results = {
845
+ "session_id": session_id,
846
+ "timestamp": time.time(),
847
+ "test_type": "iterative_mockup_matching",
848
+ "project_context": project_context or {},
849
+
850
+ # Iteration overview
851
+ "iteration_overview": {
852
+ "mockup_url": raw_results.get("mockup_url"),
853
+ "implementation_url": raw_results.get("implementation_url"),
854
+ "total_iterations": raw_results.get("total_iterations", 0),
855
+ "successful_iterations": raw_results.get("summary", {}).get("successful_iterations", 0),
856
+ "total_improvement": raw_results.get("summary", {}).get("total_improvement", 0),
857
+ "best_similarity_achieved": self._get_best_similarity(raw_results)
858
+ },
859
+
860
+ # Baseline comparison
861
+ "baseline_analysis": {
862
+ "initial_similarity": self._get_baseline_similarity(raw_results),
863
+ "major_issues_identified": self._extract_baseline_issues(raw_results),
864
+ "improvement_potential": self._assess_improvement_potential(raw_results)
865
+ },
866
+
867
+ # Iteration progression analysis
868
+ "iteration_analysis": {
869
+ "progression": self._analyze_iteration_progression(raw_results),
870
+ "successful_changes": self._extract_successful_changes(raw_results),
871
+ "failed_attempts": self._extract_failed_attempts(raw_results),
872
+ "best_iteration": self._format_best_iteration_for_cursor(raw_results)
873
+ },
874
+
875
+ # Implementation guidance
876
+ "implementation_guidance": {
877
+ "css_changes_to_apply": self._extract_css_changes_to_apply(raw_results),
878
+ "implementation_order": self._recommend_implementation_order(raw_results),
879
+ "testing_requirements": self._generate_testing_requirements(raw_results),
880
+ "rollback_strategy": self._generate_rollback_strategy(raw_results)
881
+ },
882
+
883
+ # Decision support framework
884
+ "cursor_analysis_guide": {
885
+ "evaluation_criteria": [
886
+ "improvement_magnitude",
887
+ "css_change_complexity",
888
+ "risk_of_breaking_changes",
889
+ "alignment_with_design_system",
890
+ "cross_browser_compatibility"
891
+ ],
892
+ "decision_questions": [
893
+ "Which CSS changes provide the most visual improvement?",
894
+ "Are there any changes that might break existing functionality?",
895
+ "What's the optimal order for implementing changes?",
896
+ "Which changes should be tested most thoroughly?",
897
+ "Are there alternative approaches for failed iterations?"
898
+ ],
899
+ "next_steps": self._generate_next_steps(raw_results)
900
+ },
901
+
902
+ # Actionable recommendations
903
+ "recommended_actions": self._generate_iterative_mockup_actions(raw_results, project_context)
904
+ }
905
+
906
+ # Save session data
907
+ self._save_session_data(cursor_results)
908
+
909
+ return cursor_results
910
+
911
+ def _format_viewport_results_for_cursor(self, viewport_results: List[Dict]) -> List[Dict]:
912
+ """Format viewport comparison results for Cursor analysis"""
913
+
914
+ formatted_results = []
915
+ for result in viewport_results:
916
+ viewport = result.get("viewport", {})
917
+ visual_diff = result.get("visual_diff", {})
918
+ layout_analysis = result.get("layout_analysis", {})
919
+
920
+ formatted_result = {
921
+ "viewport_name": viewport.get("name", "unknown"),
922
+ "viewport_size": f"{viewport.get('width', 0)}x{viewport.get('height', 0)}",
923
+ "similarity_score": visual_diff.get("similarity_score", 0),
924
+ "similarity_grade": self._grade_similarity(visual_diff.get("similarity_score", 0)),
925
+ "major_differences": visual_diff.get("major_differences", []),
926
+ "layout_issues": len(layout_analysis.get("differences", [])),
927
+ "visual_diff_path": visual_diff.get("highlighted_diff"),
928
+ "mockup_screenshot": result.get("mockup_screenshot"),
929
+ "implementation_screenshot": result.get("implementation_screenshot"),
930
+ "needs_attention": visual_diff.get("similarity_score", 0) < 75
931
+ }
932
+ formatted_results.append(formatted_result)
933
+
934
+ return formatted_results
935
+
936
+ def _grade_similarity(self, similarity_score: float) -> str:
937
+ """Grade similarity score for easier interpretation"""
938
+ if similarity_score >= 90:
939
+ return "excellent"
940
+ elif similarity_score >= 80:
941
+ return "good"
942
+ elif similarity_score >= 70:
943
+ return "fair"
944
+ elif similarity_score >= 60:
945
+ return "poor"
946
+ else:
947
+ return "critical"
948
+
949
+ def _analyze_similarity_breakdown(self, raw_results: Dict) -> Dict[str, Any]:
950
+ """Analyze similarity scores across viewports"""
951
+
952
+ results = raw_results.get("results", [])
953
+ if not results:
954
+ return {"error": "No viewport results available"}
955
+
956
+ similarities = [r.get("visual_diff", {}).get("similarity_score", 0) for r in results]
957
+
958
+ return {
959
+ "best_similarity": max(similarities) if similarities else 0,
960
+ "worst_similarity": min(similarities) if similarities else 0,
961
+ "average_similarity": sum(similarities) / len(similarities) if similarities else 0,
962
+ "similarity_range": max(similarities) - min(similarities) if similarities else 0,
963
+ "consistent_across_viewports": (max(similarities) - min(similarities)) < 10 if similarities else False
964
+ }
965
+
966
+ def _extract_major_differences(self, raw_results: Dict) -> List[Dict]:
967
+ """Extract major visual differences across all viewports"""
968
+
969
+ major_differences = []
970
+ for result in raw_results.get("results", []):
971
+ viewport = result.get("viewport", {})
972
+ visual_diff = result.get("visual_diff", {})
973
+
974
+ for diff in visual_diff.get("major_differences", []):
975
+ major_differences.append({
976
+ "viewport": viewport.get("name", "unknown"),
977
+ "region": diff,
978
+ "severity": "high" if diff.get("area", 0) > 10000 else "medium"
979
+ })
980
+
981
+ return major_differences
982
+
983
+ def _analyze_layout_discrepancies(self, raw_results: Dict) -> Dict[str, Any]:
984
+ """Analyze layout structure discrepancies"""
985
+
986
+ layout_issues = []
987
+ for result in raw_results.get("results", []):
988
+ layout_analysis = result.get("layout_analysis", {})
989
+ viewport = result.get("viewport", {})
990
+
991
+ for diff in layout_analysis.get("differences", []):
992
+ layout_issues.append({
993
+ "viewport": viewport.get("name", "unknown"),
994
+ "type": diff.get("type"),
995
+ "element": diff.get("selector"),
996
+ "issue": diff
997
+ })
998
+
999
+ # Categorize issues
1000
+ missing_elements = [issue for issue in layout_issues if issue["type"] == "missing_in_implementation"]
1001
+ extra_elements = [issue for issue in layout_issues if issue["type"] == "missing_in_mockup"]
1002
+ property_differences = [issue for issue in layout_issues if issue["type"] == "property_differences"]
1003
+
1004
+ return {
1005
+ "missing_elements": len(missing_elements),
1006
+ "extra_elements": len(extra_elements),
1007
+ "property_differences": len(property_differences),
1008
+ "total_issues": len(layout_issues),
1009
+ "critical_missing": [issue for issue in missing_elements if issue["element"] in ["nav", "header", "main", "footer"]],
1010
+ "detailed_issues": layout_issues
1011
+ }
1012
+
1013
+ def _analyze_element_mismatches(self, raw_results: Dict) -> Dict[str, Any]:
1014
+ """Analyze element-level mismatches"""
1015
+
1016
+ # This could be enhanced with more detailed element analysis
1017
+ return {
1018
+ "analysis": "Element mismatch analysis placeholder",
1019
+ "common_issues": ["positioning", "sizing", "styling"],
1020
+ "recommendations": ["Review CSS selectors", "Check responsive breakpoints", "Validate design tokens"]
1021
+ }
1022
+
1023
+ def _generate_improvement_strategy(self, raw_results: Dict) -> Dict[str, Any]:
1024
+ """Generate improvement strategy based on comparison results"""
1025
+
1026
+ summary = raw_results.get("summary", {})
1027
+ avg_similarity = summary.get("average_similarity", 0)
1028
+
1029
+ if avg_similarity >= 85:
1030
+ strategy = "fine_tuning"
1031
+ priority = "low"
1032
+ approach = "Minor adjustments to achieve pixel-perfect match"
1033
+ elif avg_similarity >= 70:
1034
+ strategy = "targeted_improvements"
1035
+ priority = "medium"
1036
+ approach = "Focus on major visual differences and layout issues"
1037
+ else:
1038
+ strategy = "comprehensive_redesign"
1039
+ priority = "high"
1040
+ approach = "Significant changes needed to match mockup design"
1041
+
1042
+ return {
1043
+ "strategy": strategy,
1044
+ "priority": priority,
1045
+ "approach": approach,
1046
+ "estimated_effort": self._estimate_improvement_effort(avg_similarity),
1047
+ "recommended_phases": self._recommend_improvement_phases(raw_results)
1048
+ }
1049
+
1050
+ def _estimate_improvement_effort(self, similarity: float) -> str:
1051
+ """Estimate effort required for improvement"""
1052
+ if similarity >= 85:
1053
+ return "low"
1054
+ elif similarity >= 70:
1055
+ return "medium"
1056
+ elif similarity >= 50:
1057
+ return "high"
1058
+ else:
1059
+ return "very_high"
1060
+
1061
+ def _recommend_improvement_phases(self, raw_results: Dict) -> List[Dict]:
1062
+ """Recommend phases for improvement implementation"""
1063
+
1064
+ phases = []
1065
+
1066
+ # Phase 1: Critical layout issues
1067
+ layout_issues = self._analyze_layout_discrepancies(raw_results)
1068
+ if layout_issues["missing_elements"] > 0 or layout_issues["critical_missing"]:
1069
+ phases.append({
1070
+ "phase": 1,
1071
+ "name": "Fix Critical Layout Issues",
1072
+ "description": "Address missing elements and major layout structure problems",
1073
+ "priority": "high",
1074
+ "estimated_time": "2-4 hours"
1075
+ })
1076
+
1077
+ # Phase 2: Visual styling improvements
1078
+ major_diffs = self._extract_major_differences(raw_results)
1079
+ if major_diffs:
1080
+ phases.append({
1081
+ "phase": 2,
1082
+ "name": "Visual Styling Improvements",
1083
+ "description": "Address major visual differences in colors, typography, spacing",
1084
+ "priority": "medium",
1085
+ "estimated_time": "1-3 hours"
1086
+ })
1087
+
1088
+ # Phase 3: Fine-tuning
1089
+ phases.append({
1090
+ "phase": 3,
1091
+ "name": "Fine-tuning and Polish",
1092
+ "description": "Minor adjustments for pixel-perfect match",
1093
+ "priority": "low",
1094
+ "estimated_time": "1-2 hours"
1095
+ })
1096
+
1097
+ return phases
1098
+
1099
+ def _generate_mockup_comparison_actions(self, raw_results: Dict, project_context: Optional[Dict]) -> List[Dict]:
1100
+ """Generate specific actions for mockup comparison results"""
1101
+
1102
+ actions = []
1103
+ summary = raw_results.get("summary", {})
1104
+
1105
+ # High-priority actions based on similarity
1106
+ if summary.get("average_similarity", 0) < 70:
1107
+ actions.append({
1108
+ "action": "address_critical_differences",
1109
+ "priority": "high",
1110
+ "description": "Significant visual differences detected - major improvements needed",
1111
+ "implementation": "Review layout structure and major styling differences",
1112
+ "estimated_time": "3-6 hours"
1113
+ })
1114
+
1115
+ # Layout-specific actions
1116
+ layout_issues = self._analyze_layout_discrepancies(raw_results)
1117
+ if layout_issues["missing_elements"] > 0:
1118
+ actions.append({
1119
+ "action": "add_missing_elements",
1120
+ "priority": "high",
1121
+ "description": f"Add {layout_issues['missing_elements']} missing elements to match mockup",
1122
+ "implementation": "Review mockup for missing components and add to implementation",
1123
+ "affected_elements": [issue["element"] for issue in layout_issues["detailed_issues"] if issue["type"] == "missing_in_implementation"]
1124
+ })
1125
+
1126
+ # Viewport-specific actions
1127
+ viewport_results = raw_results.get("results", [])
1128
+ poor_viewports = [r for r in viewport_results if r.get("visual_diff", {}).get("similarity_score", 0) < 60]
1129
+ if poor_viewports:
1130
+ actions.append({
1131
+ "action": "fix_responsive_issues",
1132
+ "priority": "medium",
1133
+ "description": f"Poor similarity on {len(poor_viewports)} viewport(s) - responsive design issues",
1134
+ "implementation": "Review CSS media queries and responsive design patterns",
1135
+ "affected_viewports": [r.get("viewport", {}).get("name", "unknown") for r in poor_viewports]
1136
+ })
1137
+
1138
+ return actions
1139
+
1140
+ def _get_best_similarity(self, raw_results: Dict) -> float:
1141
+ """Get the best similarity score achieved across all iterations"""
1142
+
1143
+ best_similarity = 0
1144
+ for iteration in raw_results.get("iterations", []):
1145
+ similarity = iteration.get("improvement_metrics", {}).get("improved_similarity", 0)
1146
+ best_similarity = max(best_similarity, similarity)
1147
+
1148
+ return best_similarity
1149
+
1150
+ def _get_baseline_similarity(self, raw_results: Dict) -> float:
1151
+ """Get baseline similarity before iterations"""
1152
+
1153
+ baseline_comparison = raw_results.get("baseline_comparison", {})
1154
+ if "results" in baseline_comparison and baseline_comparison["results"]:
1155
+ return baseline_comparison["results"][0].get("visual_diff", {}).get("similarity_score", 0)
1156
+ return 0
1157
+
1158
+ def _extract_baseline_issues(self, raw_results: Dict) -> List[Dict]:
1159
+ """Extract major issues identified in baseline comparison"""
1160
+
1161
+ baseline_comparison = raw_results.get("baseline_comparison", {})
1162
+ recommendations = baseline_comparison.get("recommendations", [])
1163
+
1164
+ return [
1165
+ {
1166
+ "type": rec.get("type", "unknown"),
1167
+ "description": rec.get("description", ""),
1168
+ "priority": rec.get("priority", "medium")
1169
+ }
1170
+ for rec in recommendations
1171
+ ]
1172
+
1173
+ def _assess_improvement_potential(self, raw_results: Dict) -> Dict[str, Any]:
1174
+ """Assess potential for improvement based on baseline"""
1175
+
1176
+ baseline_similarity = self._get_baseline_similarity(raw_results)
1177
+
1178
+ if baseline_similarity >= 80:
1179
+ potential = "low"
1180
+ description = "Already close to mockup - minor improvements possible"
1181
+ elif baseline_similarity >= 60:
1182
+ potential = "medium"
1183
+ description = "Good foundation - moderate improvements achievable"
1184
+ else:
1185
+ potential = "high"
1186
+ description = "Significant room for improvement"
1187
+
1188
+ return {
1189
+ "potential": potential,
1190
+ "description": description,
1191
+ "baseline_similarity": baseline_similarity,
1192
+ "target_similarity": min(95, baseline_similarity + 20) # Realistic target
1193
+ }
1194
+
1195
+ def _analyze_iteration_progression(self, raw_results: Dict) -> Dict[str, Any]:
1196
+ """Analyze how similarity improved across iterations"""
1197
+
1198
+ iterations = raw_results.get("iterations", [])
1199
+ progression = []
1200
+
1201
+ for i, iteration in enumerate(iterations):
1202
+ metrics = iteration.get("improvement_metrics", {})
1203
+ progression.append({
1204
+ "iteration": i + 1,
1205
+ "name": iteration.get("css_change", {}).get("name", "unnamed"),
1206
+ "similarity": metrics.get("improved_similarity", 0),
1207
+ "improvement": metrics.get("improvement", 0),
1208
+ "is_improvement": metrics.get("is_improvement", False)
1209
+ })
1210
+
1211
+ # Calculate trends
1212
+ improvements = [p["improvement"] for p in progression]
1213
+ total_improvement = sum(improvements)
1214
+ positive_iterations = len([p for p in progression if p["is_improvement"]])
1215
+
1216
+ return {
1217
+ "progression_data": progression,
1218
+ "total_improvement": total_improvement,
1219
+ "positive_iterations": positive_iterations,
1220
+ "success_rate": positive_iterations / len(progression) if progression else 0,
1221
+ "trend": "improving" if total_improvement > 0 else "declining"
1222
+ }
1223
+
1224
+ def _extract_successful_changes(self, raw_results: Dict) -> List[Dict]:
1225
+ """Extract CSS changes that improved similarity"""
1226
+
1227
+ successful_changes = []
1228
+ for iteration in raw_results.get("iterations", []):
1229
+ if iteration.get("improvement_metrics", {}).get("is_improvement", False):
1230
+ css_change = iteration.get("css_change", {})
1231
+ successful_changes.append({
1232
+ "name": css_change.get("name", "unnamed"),
1233
+ "css": css_change.get("css", ""),
1234
+ "rationale": css_change.get("rationale", ""),
1235
+ "improvement": iteration.get("improvement_metrics", {}).get("improvement", 0),
1236
+ "final_similarity": iteration.get("improvement_metrics", {}).get("improved_similarity", 0)
1237
+ })
1238
+
1239
+ return successful_changes
1240
+
1241
+ def _extract_failed_attempts(self, raw_results: Dict) -> List[Dict]:
1242
+ """Extract CSS changes that didn't improve similarity"""
1243
+
1244
+ failed_attempts = []
1245
+ for iteration in raw_results.get("iterations", []):
1246
+ if not iteration.get("improvement_metrics", {}).get("is_improvement", False):
1247
+ css_change = iteration.get("css_change", {})
1248
+ failed_attempts.append({
1249
+ "name": css_change.get("name", "unnamed"),
1250
+ "css": css_change.get("css", ""),
1251
+ "rationale": css_change.get("rationale", ""),
1252
+ "impact": iteration.get("improvement_metrics", {}).get("improvement", 0),
1253
+ "reason_for_failure": self._analyze_failure_reason(iteration)
1254
+ })
1255
+
1256
+ return failed_attempts
1257
+
1258
+ def _analyze_failure_reason(self, iteration: Dict) -> str:
1259
+ """Analyze why an iteration failed to improve similarity"""
1260
+
1261
+ # Check for console errors
1262
+ css_result = iteration.get("css_result", {})
1263
+ if css_result.get("console_errors"):
1264
+ return "introduced_console_errors"
1265
+
1266
+ # Check for negative impact
1267
+ improvement = iteration.get("improvement_metrics", {}).get("improvement", 0)
1268
+ if improvement < -5:
1269
+ return "significantly_worsened_appearance"
1270
+ elif improvement < 0:
1271
+ return "minor_negative_impact"
1272
+ else:
1273
+ return "no_measurable_improvement"
1274
+
1275
+ def _format_best_iteration_for_cursor(self, raw_results: Dict) -> Optional[Dict]:
1276
+ """Format the best iteration for Cursor analysis"""
1277
+
1278
+ best_iteration_data = raw_results.get("best_iteration")
1279
+ if not best_iteration_data:
1280
+ return None
1281
+
1282
+ return {
1283
+ "iteration_number": best_iteration_data.get("iteration_number"),
1284
+ "name": best_iteration_data.get("css_change", {}).get("name", "unnamed"),
1285
+ "css_changes": best_iteration_data.get("css_change", {}).get("css", ""),
1286
+ "rationale": best_iteration_data.get("css_change", {}).get("rationale", ""),
1287
+ "similarity_achieved": best_iteration_data.get("similarity_achieved", 0),
1288
+ "improvement_amount": best_iteration_data.get("improvement", 0),
1289
+ "implementation_ready": True, # Best iteration is typically safe to implement
1290
+ "testing_notes": [
1291
+ "Validate across all target browsers",
1292
+ "Test responsive behavior",
1293
+ "Check for accessibility impact"
1294
+ ]
1295
+ }
1296
+
1297
+ def _extract_css_changes_to_apply(self, raw_results: Dict) -> List[Dict]:
1298
+ """Extract CSS changes that should be applied to the codebase"""
1299
+
1300
+ # Get successful changes and the best iteration
1301
+ successful_changes = self._extract_successful_changes(raw_results)
1302
+ best_iteration = raw_results.get("best_iteration")
1303
+
1304
+ changes_to_apply = []
1305
+
1306
+ # Always include the best iteration if it exists
1307
+ if best_iteration:
1308
+ css_change = best_iteration.get("css_change", {})
1309
+ changes_to_apply.append({
1310
+ "name": css_change.get("name", "best_iteration"),
1311
+ "css": css_change.get("css", ""),
1312
+ "rationale": css_change.get("rationale", ""),
1313
+ "priority": "high",
1314
+ "reason": "best_overall_improvement"
1315
+ })
1316
+
1317
+ # Include other successful changes that don't conflict
1318
+ for change in successful_changes:
1319
+ if not any(c["name"] == change["name"] for c in changes_to_apply):
1320
+ changes_to_apply.append({
1321
+ **change,
1322
+ "priority": "medium",
1323
+ "reason": "positive_improvement"
1324
+ })
1325
+
1326
+ return changes_to_apply
1327
+
1328
+ def _recommend_implementation_order(self, raw_results: Dict) -> List[Dict]:
1329
+ """Recommend order for implementing CSS changes"""
1330
+
1331
+ changes = self._extract_css_changes_to_apply(raw_results)
1332
+
1333
+ # Sort by improvement amount (highest first)
1334
+ changes.sort(key=lambda x: x.get("improvement", 0), reverse=True)
1335
+
1336
+ implementation_order = []
1337
+ for i, change in enumerate(changes):
1338
+ implementation_order.append({
1339
+ "order": i + 1,
1340
+ "name": change["name"],
1341
+ "css": change["css"],
1342
+ "rationale": f"Implement {change['name']} - provides {change.get('improvement', 0):.1f}% improvement",
1343
+ "validation_steps": [
1344
+ "Apply CSS changes",
1345
+ "Test visual appearance",
1346
+ "Validate responsive behavior",
1347
+ "Run mockup comparison again"
1348
+ ]
1349
+ })
1350
+
1351
+ return implementation_order
1352
+
1353
+ def _generate_testing_requirements(self, raw_results: Dict) -> List[Dict]:
1354
+ """Generate testing requirements for implementation"""
1355
+
1356
+ requirements = []
1357
+
1358
+ # Viewport testing
1359
+ viewports_tested = raw_results.get("viewports_tested", 0)
1360
+ if viewports_tested > 1:
1361
+ requirements.append({
1362
+ "type": "responsive_testing",
1363
+ "description": f"Test across {viewports_tested} viewport sizes",
1364
+ "priority": "high",
1365
+ "tools": ["Browser dev tools", "Responsive design mode"]
1366
+ })
1367
+
1368
+ # Visual regression testing
1369
+ requirements.append({
1370
+ "type": "visual_regression",
1371
+ "description": "Compare implementation to mockup after changes",
1372
+ "priority": "high",
1373
+ "tools": ["CursorFlow mockup comparison", "Visual diff tools"]
1374
+ })
1375
+
1376
+ # Cross-browser testing
1377
+ requirements.append({
1378
+ "type": "cross_browser",
1379
+ "description": "Validate appearance across major browsers",
1380
+ "priority": "medium",
1381
+ "tools": ["Chrome", "Firefox", "Safari", "Edge"]
1382
+ })
1383
+
1384
+ return requirements
1385
+
1386
+ def _generate_rollback_strategy(self, raw_results: Dict) -> Dict[str, Any]:
1387
+ """Generate rollback strategy in case of issues"""
1388
+
1389
+ return {
1390
+ "backup_recommendation": "Create git branch before implementing changes",
1391
+ "rollback_triggers": [
1392
+ "Visual regression in production",
1393
+ "Responsive layout breaks",
1394
+ "Accessibility issues introduced",
1395
+ "Performance degradation"
1396
+ ],
1397
+ "rollback_steps": [
1398
+ "Revert CSS changes",
1399
+ "Clear browser cache",
1400
+ "Test original functionality",
1401
+ "Run mockup comparison to confirm baseline"
1402
+ ],
1403
+ "monitoring": "Monitor user feedback and analytics for visual issues"
1404
+ }
1405
+
1406
+ def _generate_next_steps(self, raw_results: Dict) -> List[Dict]:
1407
+ """Generate next steps based on iteration results"""
1408
+
1409
+ next_steps = []
1410
+
1411
+ # Implementation steps
1412
+ successful_changes = self._extract_successful_changes(raw_results)
1413
+ if successful_changes:
1414
+ next_steps.append({
1415
+ "step": "implement_successful_changes",
1416
+ "description": f"Apply {len(successful_changes)} successful CSS changes to codebase",
1417
+ "priority": "high",
1418
+ "estimated_time": "1-2 hours"
1419
+ })
1420
+
1421
+ # Further iteration if needed
1422
+ best_similarity = self._get_best_similarity(raw_results)
1423
+ if best_similarity < 85:
1424
+ next_steps.append({
1425
+ "step": "additional_iterations",
1426
+ "description": f"Current best similarity: {best_similarity}% - consider additional improvements",
1427
+ "priority": "medium",
1428
+ "estimated_time": "2-4 hours"
1429
+ })
1430
+
1431
+ # Validation steps
1432
+ next_steps.append({
1433
+ "step": "validate_implementation",
1434
+ "description": "Test implementation across browsers and devices",
1435
+ "priority": "high",
1436
+ "estimated_time": "1 hour"
1437
+ })
1438
+
1439
+ return next_steps
1440
+
1441
+ def _generate_iterative_mockup_actions(self, raw_results: Dict, project_context: Optional[Dict]) -> List[Dict]:
1442
+ """Generate specific actions for iterative mockup matching results"""
1443
+
1444
+ actions = []
1445
+
1446
+ # Implementation actions
1447
+ changes_to_apply = self._extract_css_changes_to_apply(raw_results)
1448
+ if changes_to_apply:
1449
+ actions.append({
1450
+ "action": "apply_successful_css_changes",
1451
+ "priority": "high",
1452
+ "description": f"Apply {len(changes_to_apply)} successful CSS changes",
1453
+ "implementation": "Follow recommended implementation order",
1454
+ "changes": changes_to_apply,
1455
+ "estimated_time": "1-3 hours"
1456
+ })
1457
+
1458
+ # Further improvement actions
1459
+ best_similarity = self._get_best_similarity(raw_results)
1460
+ if best_similarity < 90:
1461
+ actions.append({
1462
+ "action": "continue_improvement_iterations",
1463
+ "priority": "medium",
1464
+ "description": f"Best similarity achieved: {best_similarity}% - room for improvement",
1465
+ "implementation": "Design additional CSS improvements and run another iteration cycle",
1466
+ "target_similarity": "90%+",
1467
+ "estimated_time": "2-4 hours"
1468
+ })
1469
+
1470
+ # Validation actions
1471
+ actions.append({
1472
+ "action": "comprehensive_testing",
1473
+ "priority": "high",
1474
+ "description": "Validate implementation across browsers and devices",
1475
+ "implementation": "Follow generated testing requirements",
1476
+ "testing_requirements": self._generate_testing_requirements(raw_results),
1477
+ "estimated_time": "1-2 hours"
1478
+ })
1479
+
1480
+ return actions
1481
+
1482
+ def _organize_visual_diff_artifacts(self, raw_results: Dict) -> List[Dict]:
1483
+ """Organize visual diff artifacts for easy access"""
1484
+
1485
+ artifacts = []
1486
+ for result in raw_results.get("results", []):
1487
+ viewport = result.get("viewport", {})
1488
+ visual_diff = result.get("visual_diff", {})
1489
+
1490
+ if visual_diff.get("highlighted_diff"):
1491
+ artifacts.append({
1492
+ "type": "visual_diff",
1493
+ "viewport": viewport.get("name", "unknown"),
1494
+ "path": visual_diff.get("highlighted_diff"),
1495
+ "similarity_score": visual_diff.get("similarity_score", 0)
1496
+ })
1497
+
1498
+ return artifacts
1499
+
1500
+ def _organize_screenshot_artifacts(self, raw_results: Dict) -> List[Dict]:
1501
+ """Organize screenshot artifacts for easy access"""
1502
+
1503
+ artifacts = []
1504
+ for result in raw_results.get("results", []):
1505
+ viewport = result.get("viewport", {})
1506
+
1507
+ # Mockup screenshot
1508
+ if result.get("mockup_screenshot"):
1509
+ artifacts.append({
1510
+ "type": "mockup_screenshot",
1511
+ "viewport": viewport.get("name", "unknown"),
1512
+ "path": result.get("mockup_screenshot")
1513
+ })
1514
+
1515
+ # Implementation screenshot
1516
+ if result.get("implementation_screenshot"):
1517
+ artifacts.append({
1518
+ "type": "implementation_screenshot",
1519
+ "viewport": viewport.get("name", "unknown"),
1520
+ "path": result.get("implementation_screenshot")
1521
+ })
1522
+
1523
+ return artifacts
1524
+
1525
+ def _organize_comparison_artifacts(self, raw_results: Dict) -> List[Dict]:
1526
+ """Organize comparison report artifacts"""
1527
+
1528
+ return [{
1529
+ "type": "comparison_report",
1530
+ "comparison_id": raw_results.get("comparison_id"),
1531
+ "path": f".cursorflow/artifacts/mockup_comparisons/{raw_results.get('comparison_id')}.json"
1532
+ }]