cursorflow 2.6.3__py3-none-any.whl → 2.7.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,523 @@
1
+ """
2
+ Output Manager - Multi-File Result Organization
3
+
4
+ Transforms monolithic JSON results into organized multi-file structure
5
+ optimized for AI consumption. Pure data organization without analysis.
6
+ """
7
+
8
+ import json
9
+ import shutil
10
+ from pathlib import Path
11
+ from typing import Dict, Any, Optional
12
+ import logging
13
+ from datetime import datetime
14
+
15
+
16
+ class OutputManager:
17
+ """
18
+ Manages structured output generation for CursorFlow test results.
19
+
20
+ Splits comprehensive test data into organized files:
21
+ - summary.json: Core metrics and counts
22
+ - errors.json: Console errors with context
23
+ - network.json: Network requests/responses
24
+ - console.json: All console messages
25
+ - dom_analysis.json: Complete DOM and element data
26
+ - performance.json: Performance and timing metrics
27
+ - data_digest.md: AI-optimized data presentation
28
+ """
29
+
30
+ def __init__(self, artifacts_base_dir: str = ".cursorflow/artifacts"):
31
+ self.artifacts_base_dir = Path(artifacts_base_dir)
32
+ self.logger = logging.getLogger(__name__)
33
+
34
+ def save_structured_results(
35
+ self,
36
+ results: Dict[str, Any],
37
+ session_id: str,
38
+ test_description: str = "test"
39
+ ) -> Dict[str, str]:
40
+ """
41
+ Save test results in structured multi-file format.
42
+
43
+ Args:
44
+ results: Complete test results dictionary
45
+ session_id: Unique session identifier
46
+ test_description: Brief description of test
47
+
48
+ Returns:
49
+ Dictionary mapping file types to their paths
50
+ """
51
+ # Create session directory
52
+ session_dir = self.artifacts_base_dir / "sessions" / session_id
53
+ session_dir.mkdir(parents=True, exist_ok=True)
54
+
55
+ file_paths = {}
56
+
57
+ # 1. Summary - Core metrics and counts
58
+ summary_data = self._extract_summary(results)
59
+ summary_path = session_dir / "summary.json"
60
+ self._write_json(summary_path, summary_data)
61
+ file_paths['summary'] = str(summary_path)
62
+
63
+ # 2. Errors - Console errors with context
64
+ errors_data = self._extract_errors(results)
65
+ errors_path = session_dir / "errors.json"
66
+ self._write_json(errors_path, errors_data)
67
+ file_paths['errors'] = str(errors_path)
68
+
69
+ # 3. Network - Requests and responses
70
+ network_data = self._extract_network(results)
71
+ network_path = session_dir / "network.json"
72
+ self._write_json(network_path, network_data)
73
+ file_paths['network'] = str(network_path)
74
+
75
+ # 4. Console - All console messages
76
+ console_data = self._extract_console(results)
77
+ console_path = session_dir / "console.json"
78
+ self._write_json(console_path, console_data)
79
+ file_paths['console'] = str(console_path)
80
+
81
+ # 5. DOM Analysis - Complete DOM and element data
82
+ dom_data = self._extract_dom_analysis(results)
83
+ dom_path = session_dir / "dom_analysis.json"
84
+ self._write_json(dom_path, dom_data)
85
+ file_paths['dom_analysis'] = str(dom_path)
86
+
87
+ # 6. Performance - Performance and timing metrics
88
+ performance_data = self._extract_performance(results)
89
+ performance_path = session_dir / "performance.json"
90
+ self._write_json(performance_path, performance_data)
91
+ file_paths['performance'] = str(performance_path)
92
+
93
+ # 7. Timeline - Complete event timeline
94
+ timeline_data = self._extract_timeline(results)
95
+ timeline_path = session_dir / "timeline.json"
96
+ self._write_json(timeline_path, timeline_data)
97
+ file_paths['timeline'] = str(timeline_path)
98
+
99
+ # 8. Server Logs - Dedicated server log file
100
+ server_logs_data = self._extract_server_logs(results)
101
+ server_logs_path = session_dir / "server_logs.json"
102
+ self._write_json(server_logs_path, server_logs_data)
103
+ file_paths['server_logs'] = str(server_logs_path)
104
+
105
+ # 9. Screenshots - Screenshot metadata index
106
+ screenshots_data = self._extract_screenshots_metadata(results)
107
+ screenshots_meta_path = session_dir / "screenshots.json"
108
+ self._write_json(screenshots_meta_path, screenshots_data)
109
+ file_paths['screenshots_metadata'] = str(screenshots_meta_path)
110
+
111
+ # 10. Mockup Comparison - If present
112
+ if 'mockup_comparison' in results:
113
+ mockup_data = self._extract_mockup_comparison(results)
114
+ mockup_path = session_dir / "mockup_comparison.json"
115
+ self._write_json(mockup_path, mockup_data)
116
+ file_paths['mockup_comparison'] = str(mockup_path)
117
+
118
+ # 11. Responsive Results - If present
119
+ if 'responsive_results' in results:
120
+ responsive_data = self._extract_responsive_results(results)
121
+ responsive_path = session_dir / "responsive_results.json"
122
+ self._write_json(responsive_path, responsive_data)
123
+ file_paths['responsive_results'] = str(responsive_path)
124
+
125
+ # 12. CSS Iterations - If present
126
+ if 'css_iterations' in results or 'iterations' in results:
127
+ css_data = self._extract_css_iterations(results)
128
+ css_path = session_dir / "css_iterations.json"
129
+ self._write_json(css_path, css_data)
130
+ file_paths['css_iterations'] = str(css_path)
131
+
132
+ # 13. Move screenshot files to session directory if they exist
133
+ screenshots_dir = session_dir / "screenshots"
134
+ screenshots_dir.mkdir(exist_ok=True)
135
+ self._organize_screenshots(results, screenshots_dir)
136
+ file_paths['screenshots'] = str(screenshots_dir)
137
+
138
+ # 14. Move traces to session directory if they exist
139
+ traces_dir = session_dir / "traces"
140
+ traces_dir.mkdir(exist_ok=True)
141
+ self._organize_traces(results, traces_dir)
142
+ file_paths['traces'] = str(traces_dir)
143
+
144
+ self.logger.info(f"Structured results saved to: {session_dir}")
145
+ return file_paths
146
+
147
+ def _extract_summary(self, results: Dict) -> Dict:
148
+ """Extract high-level summary data"""
149
+ comprehensive = results.get('comprehensive_data', {})
150
+ artifacts = results.get('artifacts', {})
151
+
152
+ # Count errors
153
+ error_count = 0
154
+ warning_count = 0
155
+ console_screenshots = artifacts.get('console_errors', [])
156
+ for screenshot in console_screenshots:
157
+ if isinstance(screenshot, dict):
158
+ console_data = screenshot.get('console_data', {})
159
+ errors = console_data.get('errors', {})
160
+ error_count += len(errors.get('logs', []))
161
+ warnings = console_data.get('warnings', {})
162
+ warning_count += len(warnings.get('logs', []))
163
+
164
+ # Count network requests
165
+ network_count = 0
166
+ failed_network_count = 0
167
+ for screenshot in artifacts.get('screenshots', []):
168
+ if isinstance(screenshot, dict):
169
+ network_data = screenshot.get('network_data', {})
170
+ requests = network_data.get('requests', [])
171
+ network_count += len(requests)
172
+ failed_requests = network_data.get('failed_requests', {})
173
+ failed_network_count += len(failed_requests.get('requests', []))
174
+
175
+ # Count DOM elements
176
+ dom_element_count = 0
177
+ if comprehensive:
178
+ dom_analysis = comprehensive.get('dom_analysis', {})
179
+ dom_element_count = len(dom_analysis.get('elements', []))
180
+
181
+ return {
182
+ "session_id": results.get('session_id', 'unknown'),
183
+ "timestamp": datetime.now().isoformat(),
184
+ "success": results.get('success', False),
185
+ "execution_time": results.get('execution_time', 0),
186
+ "test_description": results.get('test_description', 'test'),
187
+ "metrics": {
188
+ "total_errors": error_count,
189
+ "total_warnings": warning_count,
190
+ "total_network_requests": network_count,
191
+ "failed_network_requests": failed_network_count,
192
+ "total_dom_elements": dom_element_count,
193
+ "total_screenshots": len(artifacts.get('screenshots', [])),
194
+ "total_timeline_events": len(results.get('timeline', []))
195
+ },
196
+ "status": {
197
+ "has_errors": error_count > 0,
198
+ "has_network_failures": failed_network_count > 0,
199
+ "has_warnings": warning_count > 0
200
+ }
201
+ }
202
+
203
+ def _extract_errors(self, results: Dict) -> Dict:
204
+ """Extract all error data with context"""
205
+ artifacts = results.get('artifacts', {})
206
+ errors = []
207
+
208
+ # Collect errors from all screenshots
209
+ for screenshot in artifacts.get('screenshots', []):
210
+ if isinstance(screenshot, dict):
211
+ console_data = screenshot.get('console_data', {})
212
+ error_logs = console_data.get('errors', {}).get('logs', [])
213
+
214
+ for error in error_logs:
215
+ errors.append({
216
+ "message": error.get('message', ''),
217
+ "source": error.get('source', ''),
218
+ "line": error.get('line', 0),
219
+ "column": error.get('column', 0),
220
+ "stack_trace": error.get('stack_trace', ''),
221
+ "timestamp": screenshot.get('timestamp', 0),
222
+ "screenshot_name": screenshot.get('name', 'unknown'),
223
+ "url": screenshot.get('url', '')
224
+ })
225
+
226
+ # Organize by error type
227
+ error_types = {}
228
+ for error in errors:
229
+ error_type = self._categorize_error_type(error['message'])
230
+ if error_type not in error_types:
231
+ error_types[error_type] = []
232
+ error_types[error_type].append(error)
233
+
234
+ return {
235
+ "total_errors": len(errors),
236
+ "errors_by_type": error_types,
237
+ "all_errors": errors,
238
+ "summary": {
239
+ "has_critical_errors": len(errors) > 0,
240
+ "unique_error_types": len(error_types)
241
+ }
242
+ }
243
+
244
+ def _extract_network(self, results: Dict) -> Dict:
245
+ """Extract network request/response data"""
246
+ artifacts = results.get('artifacts', {})
247
+ all_requests = []
248
+ failed_requests = []
249
+
250
+ for screenshot in artifacts.get('screenshots', []):
251
+ if isinstance(screenshot, dict):
252
+ network_data = screenshot.get('network_data', {})
253
+ requests = network_data.get('requests', [])
254
+
255
+ for request in requests:
256
+ all_requests.append({
257
+ **request,
258
+ "screenshot_name": screenshot.get('name', 'unknown'),
259
+ "timestamp": screenshot.get('timestamp', 0)
260
+ })
261
+
262
+ failed = network_data.get('failed_requests', {}).get('requests', [])
263
+ for request in failed:
264
+ failed_requests.append({
265
+ **request,
266
+ "screenshot_name": screenshot.get('name', 'unknown'),
267
+ "timestamp": screenshot.get('timestamp', 0)
268
+ })
269
+
270
+ # Organize by status code
271
+ by_status_code = {}
272
+ for request in all_requests:
273
+ status = request.get('status_code', 0)
274
+ if status not in by_status_code:
275
+ by_status_code[status] = []
276
+ by_status_code[status].append(request)
277
+
278
+ return {
279
+ "total_requests": len(all_requests),
280
+ "failed_requests": failed_requests,
281
+ "requests_by_status_code": by_status_code,
282
+ "all_requests": all_requests,
283
+ "summary": {
284
+ "total_failed": len(failed_requests),
285
+ "success_rate": (len(all_requests) - len(failed_requests)) / len(all_requests) * 100 if all_requests else 100
286
+ }
287
+ }
288
+
289
+ def _extract_console(self, results: Dict) -> Dict:
290
+ """Extract all console messages"""
291
+ artifacts = results.get('artifacts', {})
292
+ all_messages = []
293
+
294
+ for screenshot in artifacts.get('screenshots', []):
295
+ if isinstance(screenshot, dict):
296
+ console_data = screenshot.get('console_data', {})
297
+
298
+ # Collect all message types
299
+ for msg_type in ['errors', 'warnings', 'logs', 'info']:
300
+ messages = console_data.get(msg_type, {}).get('logs', [])
301
+ for msg in messages:
302
+ all_messages.append({
303
+ "type": msg_type,
304
+ "message": msg.get('message', ''),
305
+ "source": msg.get('source', ''),
306
+ "timestamp": screenshot.get('timestamp', 0),
307
+ "screenshot_name": screenshot.get('name', 'unknown')
308
+ })
309
+
310
+ # Organize by type
311
+ by_type = {}
312
+ for msg in all_messages:
313
+ msg_type = msg['type']
314
+ if msg_type not in by_type:
315
+ by_type[msg_type] = []
316
+ by_type[msg_type].append(msg)
317
+
318
+ return {
319
+ "total_messages": len(all_messages),
320
+ "messages_by_type": by_type,
321
+ "all_messages": all_messages
322
+ }
323
+
324
+ def _extract_dom_analysis(self, results: Dict) -> Dict:
325
+ """Extract DOM and element data"""
326
+ comprehensive = results.get('comprehensive_data', {})
327
+ dom_analysis = comprehensive.get('dom_analysis', {})
328
+
329
+ return {
330
+ "total_elements": len(dom_analysis.get('elements', [])),
331
+ "elements": dom_analysis.get('elements', []),
332
+ "page_structure": dom_analysis.get('page_structure', {}),
333
+ "accessibility": comprehensive.get('accessibility', {})
334
+ }
335
+
336
+ def _extract_performance(self, results: Dict) -> Dict:
337
+ """Extract performance metrics"""
338
+ artifacts = results.get('artifacts', {})
339
+ performance_data = []
340
+
341
+ for screenshot in artifacts.get('screenshots', []):
342
+ if isinstance(screenshot, dict):
343
+ perf = screenshot.get('performance_data', {})
344
+ if perf:
345
+ performance_data.append({
346
+ "screenshot_name": screenshot.get('name', 'unknown'),
347
+ "timestamp": screenshot.get('timestamp', 0),
348
+ "metrics": perf
349
+ })
350
+
351
+ return {
352
+ "execution_time": results.get('execution_time', 0),
353
+ "performance_snapshots": performance_data,
354
+ "summary": self._calculate_performance_summary(performance_data)
355
+ }
356
+
357
+ def _extract_timeline(self, results: Dict) -> Dict:
358
+ """Extract complete timeline data"""
359
+ return {
360
+ "organized_timeline": results.get('timeline', []),
361
+ "browser_events": results.get('browser_events', []),
362
+ "server_logs": results.get('server_logs', [])
363
+ }
364
+
365
+ def _extract_server_logs(self, results: Dict) -> Dict:
366
+ """Extract server logs with categorization"""
367
+ server_logs = results.get('server_logs', [])
368
+
369
+ # Organize by severity
370
+ by_severity = {}
371
+ by_source = {}
372
+
373
+ for log in server_logs:
374
+ # Group by severity
375
+ severity = log.get('severity', 'info')
376
+ if severity not in by_severity:
377
+ by_severity[severity] = []
378
+ by_severity[severity].append(log)
379
+
380
+ # Group by source
381
+ source = log.get('source', 'unknown')
382
+ if source not in by_source:
383
+ by_source[source] = []
384
+ by_source[source].append(log)
385
+
386
+ return {
387
+ "total_logs": len(server_logs),
388
+ "logs_by_severity": by_severity,
389
+ "logs_by_source": by_source,
390
+ "all_logs": server_logs
391
+ }
392
+
393
+ def _extract_screenshots_metadata(self, results: Dict) -> Dict:
394
+ """Extract screenshot metadata and index"""
395
+ artifacts = results.get('artifacts', {})
396
+ screenshots = artifacts.get('screenshots', [])
397
+
398
+ metadata = []
399
+ for idx, screenshot in enumerate(screenshots):
400
+ if isinstance(screenshot, dict):
401
+ metadata.append({
402
+ "index": idx,
403
+ "name": screenshot.get('name', f'screenshot_{idx}'),
404
+ "timestamp": screenshot.get('timestamp', 0),
405
+ "url": screenshot.get('url', ''),
406
+ "path": screenshot.get('path', ''),
407
+ "has_errors": len(screenshot.get('console_data', {}).get('errors', {}).get('logs', [])) > 0,
408
+ "has_network_failures": len(screenshot.get('network_data', {}).get('failed_requests', {}).get('requests', [])) > 0,
409
+ "element_count": len(screenshot.get('dom_analysis', {}).get('elements', [])) if 'dom_analysis' in screenshot else 0
410
+ })
411
+
412
+ return {
413
+ "total_screenshots": len(metadata),
414
+ "screenshots": metadata
415
+ }
416
+
417
+ def _extract_mockup_comparison(self, results: Dict) -> Dict:
418
+ """Extract mockup comparison results"""
419
+ mockup_data = results.get('mockup_comparison', {})
420
+
421
+ return {
422
+ "mockup_url": mockup_data.get('mockup_url', ''),
423
+ "implementation_url": mockup_data.get('implementation_url', ''),
424
+ "similarity_score": mockup_data.get('similarity_score', 0),
425
+ "differences": mockup_data.get('differences', []),
426
+ "iterations": mockup_data.get('iterations', [])
427
+ }
428
+
429
+ def _extract_responsive_results(self, results: Dict) -> Dict:
430
+ """Extract responsive testing results"""
431
+ responsive_data = results.get('responsive_results', {})
432
+
433
+ viewports = responsive_data.get('viewports', {})
434
+ comparison = responsive_data.get('comparison', {})
435
+
436
+ return {
437
+ "viewports": viewports,
438
+ "comparison": comparison,
439
+ "performance_by_viewport": responsive_data.get('performance', {})
440
+ }
441
+
442
+ def _extract_css_iterations(self, results: Dict) -> Dict:
443
+ """Extract CSS iteration results"""
444
+ iterations = results.get('iterations', results.get('css_iterations', []))
445
+
446
+ return {
447
+ "total_iterations": len(iterations) if isinstance(iterations, list) else 0,
448
+ "iterations": iterations,
449
+ "session_context": results.get('session_context', {})
450
+ }
451
+
452
+ def _organize_screenshots(self, results: Dict, screenshots_dir: Path):
453
+ """Move screenshots to organized directory"""
454
+ artifacts = results.get('artifacts', {})
455
+ screenshots = artifacts.get('screenshots', [])
456
+
457
+ for screenshot in screenshots:
458
+ if isinstance(screenshot, dict):
459
+ screenshot_path = screenshot.get('path')
460
+ if screenshot_path and Path(screenshot_path).exists():
461
+ dest = screenshots_dir / Path(screenshot_path).name
462
+ shutil.copy2(screenshot_path, dest)
463
+
464
+ def _organize_traces(self, results: Dict, traces_dir: Path):
465
+ """Move traces to organized directory"""
466
+ artifacts = results.get('artifacts', {})
467
+ traces = artifacts.get('traces', [])
468
+
469
+ for trace_path in traces:
470
+ if Path(trace_path).exists():
471
+ dest = traces_dir / Path(trace_path).name
472
+ shutil.copy2(trace_path, dest)
473
+
474
+ def _categorize_error_type(self, error_message: str) -> str:
475
+ """Categorize error by type based on message content"""
476
+ error_message_lower = error_message.lower()
477
+
478
+ if 'syntaxerror' in error_message_lower or 'unexpected' in error_message_lower:
479
+ return 'syntax_error'
480
+ elif 'referenceerror' in error_message_lower or 'not defined' in error_message_lower:
481
+ return 'reference_error'
482
+ elif 'typeerror' in error_message_lower:
483
+ return 'type_error'
484
+ elif 'networkerror' in error_message_lower or 'failed to fetch' in error_message_lower:
485
+ return 'network_error'
486
+ elif 'load' in error_message_lower:
487
+ return 'load_error'
488
+ else:
489
+ return 'other_error'
490
+
491
+ def _calculate_performance_summary(self, performance_data: list) -> Dict:
492
+ """Calculate aggregate performance metrics"""
493
+ if not performance_data:
494
+ return {}
495
+
496
+ # Extract metrics from snapshots
497
+ load_times = []
498
+ memory_usage = []
499
+
500
+ for snapshot in performance_data:
501
+ metrics = snapshot.get('metrics', {})
502
+ summary = metrics.get('performance_summary', {})
503
+
504
+ if 'page_load_time' in summary:
505
+ load_times.append(summary['page_load_time'])
506
+ if 'memory_usage_mb' in summary:
507
+ memory_usage.append(summary['memory_usage_mb'])
508
+
509
+ return {
510
+ "average_page_load_time": sum(load_times) / len(load_times) if load_times else 0,
511
+ "max_memory_usage": max(memory_usage) if memory_usage else 0,
512
+ "min_memory_usage": min(memory_usage) if memory_usage else 0
513
+ }
514
+
515
+ def _write_json(self, path: Path, data: Dict):
516
+ """Write JSON data to file with proper formatting"""
517
+ with open(path, 'w', encoding='utf-8') as f:
518
+ json.dump(data, f, indent=2, default=str, ensure_ascii=False)
519
+
520
+ def get_session_path(self, session_id: str) -> Path:
521
+ """Get path to session directory"""
522
+ return self.artifacts_base_dir / "sessions" / session_id
523
+