cursorflow 2.6.3__py3-none-any.whl → 2.7.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,639 @@
1
+ """
2
+ Output Manager - Multi-File Result Organization
3
+
4
+ Transforms monolithic JSON results into organized multi-file structure
5
+ optimized for AI consumption. Pure data organization without analysis.
6
+ """
7
+
8
+ import json
9
+ import shutil
10
+ from pathlib import Path
11
+ from typing import Dict, Any, Optional
12
+ import logging
13
+ from datetime import datetime
14
+
15
+
16
+ class OutputManager:
17
+ """
18
+ Manages structured output generation for CursorFlow test results.
19
+
20
+ Splits comprehensive test data into organized files:
21
+ - summary.json: Core metrics and counts
22
+ - errors.json: Console errors with context
23
+ - network.json: Network requests/responses
24
+ - console.json: All console messages
25
+ - dom_analysis.json: Complete DOM and element data
26
+ - performance.json: Performance and timing metrics
27
+ - data_digest.md: AI-optimized data presentation
28
+ """
29
+
30
+ def __init__(self, artifacts_base_dir: str = ".cursorflow/artifacts"):
31
+ self.artifacts_base_dir = Path(artifacts_base_dir)
32
+ self.logger = logging.getLogger(__name__)
33
+
34
+ def save_structured_results(
35
+ self,
36
+ results: Dict[str, Any],
37
+ session_id: str,
38
+ test_description: str = "test"
39
+ ) -> Dict[str, str]:
40
+ """
41
+ Save test results in structured multi-file format.
42
+
43
+ Args:
44
+ results: Complete test results dictionary
45
+ session_id: Unique session identifier
46
+ test_description: Brief description of test
47
+
48
+ Returns:
49
+ Dictionary mapping file types to their paths
50
+ """
51
+ # Create session directory
52
+ session_dir = self.artifacts_base_dir / "sessions" / session_id
53
+ session_dir.mkdir(parents=True, exist_ok=True)
54
+
55
+ file_paths = {}
56
+
57
+ # 1. Summary - Core metrics and counts
58
+ summary_data = self._extract_summary(results)
59
+ summary_path = session_dir / "summary.json"
60
+ self._write_json(summary_path, summary_data)
61
+ file_paths['summary'] = str(summary_path)
62
+
63
+ # 2. Errors - Console errors with context
64
+ errors_data = self._extract_errors(results)
65
+ errors_path = session_dir / "errors.json"
66
+ self._write_json(errors_path, errors_data)
67
+ file_paths['errors'] = str(errors_path)
68
+
69
+ # 3. Network - Requests and responses
70
+ network_data = self._extract_network(results)
71
+ network_path = session_dir / "network.json"
72
+ self._write_json(network_path, network_data)
73
+ file_paths['network'] = str(network_path)
74
+
75
+ # 4. Console - All console messages
76
+ console_data = self._extract_console(results)
77
+ console_path = session_dir / "console.json"
78
+ self._write_json(console_path, console_data)
79
+ file_paths['console'] = str(console_path)
80
+
81
+ # 5. DOM Analysis - Complete DOM and element data
82
+ dom_data = self._extract_dom_analysis(results)
83
+ dom_path = session_dir / "dom_analysis.json"
84
+ self._write_json(dom_path, dom_data)
85
+ file_paths['dom_analysis'] = str(dom_path)
86
+
87
+ # 6. Performance - Performance and timing metrics
88
+ performance_data = self._extract_performance(results)
89
+ performance_path = session_dir / "performance.json"
90
+ self._write_json(performance_path, performance_data)
91
+ file_paths['performance'] = str(performance_path)
92
+
93
+ # 7. Timeline - Complete event timeline
94
+ timeline_data = self._extract_timeline(results)
95
+ timeline_path = session_dir / "timeline.json"
96
+ self._write_json(timeline_path, timeline_data)
97
+ file_paths['timeline'] = str(timeline_path)
98
+
99
+ # 8. Server Logs - Dedicated server log file
100
+ server_logs_data = self._extract_server_logs(results)
101
+ server_logs_path = session_dir / "server_logs.json"
102
+ self._write_json(server_logs_path, server_logs_data)
103
+ file_paths['server_logs'] = str(server_logs_path)
104
+
105
+ # 9. Screenshots - Screenshot metadata index
106
+ screenshots_data = self._extract_screenshots_metadata(results)
107
+ screenshots_meta_path = session_dir / "screenshots.json"
108
+ self._write_json(screenshots_meta_path, screenshots_data)
109
+ file_paths['screenshots_metadata'] = str(screenshots_meta_path)
110
+
111
+ # 10. Mockup Comparison - If present
112
+ if 'mockup_comparison' in results:
113
+ mockup_data = self._extract_mockup_comparison(results)
114
+ mockup_path = session_dir / "mockup_comparison.json"
115
+ self._write_json(mockup_path, mockup_data)
116
+ file_paths['mockup_comparison'] = str(mockup_path)
117
+
118
+ # 11. Responsive Results - If present
119
+ if 'responsive_results' in results:
120
+ responsive_data = self._extract_responsive_results(results)
121
+ responsive_path = session_dir / "responsive_results.json"
122
+ self._write_json(responsive_path, responsive_data)
123
+ file_paths['responsive_results'] = str(responsive_path)
124
+
125
+ # 12. CSS Iterations - If present
126
+ if 'css_iterations' in results or 'iterations' in results:
127
+ css_data = self._extract_css_iterations(results)
128
+ css_path = session_dir / "css_iterations.json"
129
+ self._write_json(css_path, css_data)
130
+ file_paths['css_iterations'] = str(css_path)
131
+
132
+ # 13. Move screenshot files to session directory if they exist
133
+ screenshots_dir = session_dir / "screenshots"
134
+ screenshots_dir.mkdir(exist_ok=True)
135
+ self._organize_screenshots(results, screenshots_dir)
136
+ file_paths['screenshots'] = str(screenshots_dir)
137
+
138
+ # 14. Move traces to session directory if they exist
139
+ traces_dir = session_dir / "traces"
140
+ traces_dir.mkdir(exist_ok=True)
141
+ self._organize_traces(results, traces_dir)
142
+ file_paths['traces'] = str(traces_dir)
143
+
144
+ self.logger.info(f"Structured results saved to: {session_dir}")
145
+ return file_paths
146
+
147
+ def _extract_summary(self, results: Dict) -> Dict:
148
+ """Extract high-level summary data"""
149
+ # Load comprehensive data from disk if available
150
+ comprehensive = self._load_comprehensive_data(results)
151
+ artifacts = results.get('artifacts', {})
152
+
153
+ # Count errors from comprehensive_data
154
+ error_count = 0
155
+ warning_count = 0
156
+ if comprehensive:
157
+ console_data = comprehensive.get('console_data', {})
158
+ all_console_logs = console_data.get('all_console_logs', [])
159
+ error_count = len([log for log in all_console_logs if log.get('type') == 'error'])
160
+ warning_count = len([log for log in all_console_logs if log.get('type') == 'warning'])
161
+
162
+ # Count network requests from comprehensive_data
163
+ network_count = 0
164
+ failed_network_count = 0
165
+ if comprehensive:
166
+ network_data = comprehensive.get('network_data', {})
167
+ all_network_events = network_data.get('all_network_events', [])
168
+ network_count = len(all_network_events)
169
+ # Count failures (4xx, 5xx)
170
+ failed_network_count = len([req for req in all_network_events if req.get('status', 0) >= 400])
171
+
172
+ # Count DOM elements
173
+ dom_element_count = 0
174
+ if comprehensive:
175
+ dom_analysis = comprehensive.get('dom_analysis', {})
176
+ dom_element_count = len(dom_analysis.get('elements', []))
177
+
178
+ return {
179
+ "session_id": results.get('session_id', 'unknown'),
180
+ "timestamp": datetime.now().isoformat(),
181
+ "success": results.get('success', False),
182
+ "execution_time": results.get('execution_time', 0),
183
+ "test_description": results.get('test_description', 'test'),
184
+ "metrics": {
185
+ "total_errors": error_count,
186
+ "total_warnings": warning_count,
187
+ "total_network_requests": network_count,
188
+ "failed_network_requests": failed_network_count,
189
+ "total_dom_elements": dom_element_count,
190
+ "total_screenshots": len(artifacts.get('screenshots', [])),
191
+ "total_timeline_events": len(results.get('timeline', []))
192
+ },
193
+ "status": {
194
+ "has_errors": error_count > 0,
195
+ "has_network_failures": failed_network_count > 0,
196
+ "has_warnings": warning_count > 0
197
+ }
198
+ }
199
+
200
+ def _extract_errors(self, results: Dict) -> Dict:
201
+ """Extract all error data with context"""
202
+ errors = []
203
+
204
+ # Load comprehensive data from disk if available
205
+ comprehensive_data = self._load_comprehensive_data(results)
206
+
207
+ if comprehensive_data:
208
+ console_data = comprehensive_data.get('console_data', {})
209
+ all_console_logs = console_data.get('all_console_logs', [])
210
+
211
+ # Filter for errors
212
+ for log in all_console_logs:
213
+ if log.get('type') == 'error':
214
+ # Extract location info if present
215
+ location = log.get('location', {})
216
+ errors.append({
217
+ "message": log.get('text', ''),
218
+ "source": location.get('url', ''),
219
+ "line": location.get('lineNumber', 0),
220
+ "column": location.get('columnNumber', 0),
221
+ "stack_trace": log.get('stackTrace', {}).get('callFrames', []),
222
+ "timestamp": log.get('timestamp', 0),
223
+ "screenshot_name": 'comprehensive',
224
+ "url": location.get('url', '')
225
+ })
226
+ else:
227
+ # Fallback: Collect errors from screenshot artifacts (old structure)
228
+ # Only if comprehensive_data not available
229
+ artifacts = results.get('artifacts', {})
230
+ for screenshot in artifacts.get('screenshots', []):
231
+ if isinstance(screenshot, dict):
232
+ console_data = screenshot.get('console_data', {})
233
+ error_logs = console_data.get('errors', {}).get('logs', [])
234
+
235
+ for error in error_logs:
236
+ errors.append({
237
+ "message": error.get('message', ''),
238
+ "source": error.get('source', ''),
239
+ "line": error.get('line', 0),
240
+ "column": error.get('column', 0),
241
+ "stack_trace": error.get('stack_trace', ''),
242
+ "timestamp": screenshot.get('timestamp', 0),
243
+ "screenshot_name": screenshot.get('name', 'unknown'),
244
+ "url": screenshot.get('url', '')
245
+ })
246
+
247
+ # Organize by error type
248
+ error_types = {}
249
+ for error in errors:
250
+ error_type = self._categorize_error_type(error['message'])
251
+ if error_type not in error_types:
252
+ error_types[error_type] = []
253
+ error_types[error_type].append(error)
254
+
255
+ return {
256
+ "total_errors": len(errors),
257
+ "errors_by_type": error_types,
258
+ "all_errors": errors,
259
+ "summary": {
260
+ "has_critical_errors": len(errors) > 0,
261
+ "unique_error_types": len(error_types)
262
+ }
263
+ }
264
+
265
+ def _extract_network(self, results: Dict) -> Dict:
266
+ """Extract network request/response data"""
267
+ all_requests = []
268
+ failed_requests = []
269
+
270
+ # Load comprehensive data from disk if available
271
+ comprehensive_data = self._load_comprehensive_data(results)
272
+
273
+ if comprehensive_data:
274
+ network_data = comprehensive_data.get('network_data', {})
275
+ all_network_events = network_data.get('all_network_events', [])
276
+
277
+ # Add all network events and identify failures
278
+ for event in all_network_events:
279
+ request_data = {
280
+ "url": event.get('url', ''),
281
+ "method": event.get('method', 'GET'),
282
+ "status_code": event.get('status', 0),
283
+ "timestamp": event.get('timestamp', 0),
284
+ "timing": event.get('timing', {}),
285
+ "screenshot_name": 'comprehensive'
286
+ }
287
+ all_requests.append(request_data)
288
+
289
+ # Identify failed requests (4xx, 5xx status codes)
290
+ status = event.get('status', 0)
291
+ if status >= 400:
292
+ failed_requests.append(request_data)
293
+ else:
294
+ # Fallback: Collect from screenshot artifacts (old structure)
295
+ # Only if comprehensive_data not available
296
+ artifacts = results.get('artifacts', {})
297
+ for screenshot in artifacts.get('screenshots', []):
298
+ if isinstance(screenshot, dict):
299
+ network_data = screenshot.get('network_data', {})
300
+ requests = network_data.get('requests', [])
301
+
302
+ for request in requests:
303
+ all_requests.append({
304
+ **request,
305
+ "screenshot_name": screenshot.get('name', 'unknown'),
306
+ "timestamp": screenshot.get('timestamp', 0)
307
+ })
308
+
309
+ failed = network_data.get('failed_requests', {}).get('requests', [])
310
+ for request in failed:
311
+ failed_requests.append({
312
+ **request,
313
+ "screenshot_name": screenshot.get('name', 'unknown'),
314
+ "timestamp": screenshot.get('timestamp', 0)
315
+ })
316
+
317
+ # Organize by status code
318
+ by_status_code = {}
319
+ for request in all_requests:
320
+ status = request.get('status_code', 0)
321
+ if status not in by_status_code:
322
+ by_status_code[status] = []
323
+ by_status_code[status].append(request)
324
+
325
+ return {
326
+ "total_requests": len(all_requests),
327
+ "failed_requests": failed_requests,
328
+ "requests_by_status_code": by_status_code,
329
+ "all_requests": all_requests,
330
+ "summary": {
331
+ "total_failed": len(failed_requests),
332
+ "success_rate": (len(all_requests) - len(failed_requests)) / len(all_requests) * 100 if all_requests else 100
333
+ }
334
+ }
335
+
336
+ def _extract_console(self, results: Dict) -> Dict:
337
+ """Extract all console messages"""
338
+ all_messages = []
339
+
340
+ # Load comprehensive data from disk if available
341
+ comprehensive_data = self._load_comprehensive_data(results)
342
+
343
+ if comprehensive_data:
344
+ console_data = comprehensive_data.get('console_data', {})
345
+ all_console_logs = console_data.get('all_console_logs', [])
346
+
347
+ for log in all_console_logs:
348
+ all_messages.append({
349
+ "type": log.get('type', 'log'),
350
+ "message": log.get('text', ''),
351
+ "source": log.get('location', {}).get('url', ''),
352
+ "timestamp": log.get('timestamp', 0),
353
+ "screenshot_name": 'comprehensive'
354
+ })
355
+ else:
356
+ # Fallback: Collect from screenshot artifacts (old structure)
357
+ # Only if comprehensive_data not available
358
+ artifacts = results.get('artifacts', {})
359
+ for screenshot in artifacts.get('screenshots', []):
360
+ if isinstance(screenshot, dict):
361
+ console_data = screenshot.get('console_data', {})
362
+
363
+ # Collect all message types
364
+ for msg_type in ['errors', 'warnings', 'logs', 'info']:
365
+ messages = console_data.get(msg_type, {}).get('logs', [])
366
+ for msg in messages:
367
+ all_messages.append({
368
+ "type": msg_type,
369
+ "message": msg.get('message', ''),
370
+ "source": msg.get('source', ''),
371
+ "timestamp": screenshot.get('timestamp', 0),
372
+ "screenshot_name": screenshot.get('name', 'unknown')
373
+ })
374
+
375
+ # Organize by type
376
+ by_type = {}
377
+ for msg in all_messages:
378
+ msg_type = msg['type']
379
+ if msg_type not in by_type:
380
+ by_type[msg_type] = []
381
+ by_type[msg_type].append(msg)
382
+
383
+ return {
384
+ "total_messages": len(all_messages),
385
+ "messages_by_type": by_type,
386
+ "all_messages": all_messages
387
+ }
388
+
389
+ def _extract_dom_analysis(self, results: Dict) -> Dict:
390
+ """Extract DOM and element data"""
391
+ comprehensive = results.get('comprehensive_data', {})
392
+ dom_analysis = comprehensive.get('dom_analysis', {})
393
+
394
+ return {
395
+ "total_elements": len(dom_analysis.get('elements', [])),
396
+ "elements": dom_analysis.get('elements', []),
397
+ "page_structure": dom_analysis.get('page_structure', {}),
398
+ "accessibility": comprehensive.get('accessibility', {})
399
+ }
400
+
401
+ def _extract_performance(self, results: Dict) -> Dict:
402
+ """Extract performance metrics"""
403
+ performance_data = []
404
+
405
+ # Load comprehensive data from disk if available
406
+ comprehensive = self._load_comprehensive_data(results)
407
+
408
+ if comprehensive and 'performance_data' in comprehensive:
409
+ perf = comprehensive.get('performance_data', {})
410
+ if perf:
411
+ performance_data.append({
412
+ "screenshot_name": 'comprehensive',
413
+ "timestamp": results.get('execution_time', 0),
414
+ "metrics": perf
415
+ })
416
+
417
+ # Fallback: from screenshot artifacts
418
+ artifacts = results.get('artifacts', {})
419
+ for screenshot in artifacts.get('screenshots', []):
420
+ if isinstance(screenshot, dict):
421
+ perf = screenshot.get('performance_data', {})
422
+ if perf:
423
+ performance_data.append({
424
+ "screenshot_name": screenshot.get('name', 'unknown'),
425
+ "timestamp": screenshot.get('timestamp', 0),
426
+ "metrics": perf
427
+ })
428
+
429
+ return {
430
+ "execution_time": results.get('execution_time', 0),
431
+ "performance_snapshots": performance_data,
432
+ "summary": self._calculate_performance_summary(performance_data)
433
+ }
434
+
435
+ def _extract_timeline(self, results: Dict) -> Dict:
436
+ """Extract complete timeline data"""
437
+ return {
438
+ "organized_timeline": results.get('timeline', []),
439
+ "browser_events": results.get('browser_events', []),
440
+ "server_logs": results.get('server_logs', [])
441
+ }
442
+
443
+ def _extract_server_logs(self, results: Dict) -> Dict:
444
+ """Extract server logs with categorization"""
445
+ server_logs = results.get('server_logs', [])
446
+
447
+ # Organize by severity
448
+ by_severity = {}
449
+ by_source = {}
450
+
451
+ for log in server_logs:
452
+ # Group by severity
453
+ severity = log.get('severity', 'info')
454
+ if severity not in by_severity:
455
+ by_severity[severity] = []
456
+ by_severity[severity].append(log)
457
+
458
+ # Group by source
459
+ source = log.get('source', 'unknown')
460
+ if source not in by_source:
461
+ by_source[source] = []
462
+ by_source[source].append(log)
463
+
464
+ return {
465
+ "total_logs": len(server_logs),
466
+ "logs_by_severity": by_severity,
467
+ "logs_by_source": by_source,
468
+ "all_logs": server_logs
469
+ }
470
+
471
+ def _extract_screenshots_metadata(self, results: Dict) -> Dict:
472
+ """Extract screenshot metadata and index"""
473
+ artifacts = results.get('artifacts', {})
474
+ screenshots = artifacts.get('screenshots', [])
475
+
476
+ metadata = []
477
+ for idx, screenshot in enumerate(screenshots):
478
+ if isinstance(screenshot, dict):
479
+ metadata.append({
480
+ "index": idx,
481
+ "name": screenshot.get('name', f'screenshot_{idx}'),
482
+ "timestamp": screenshot.get('timestamp', 0),
483
+ "url": screenshot.get('url', ''),
484
+ "path": screenshot.get('path', ''),
485
+ "has_errors": len(screenshot.get('console_data', {}).get('errors', {}).get('logs', [])) > 0,
486
+ "has_network_failures": len(screenshot.get('network_data', {}).get('failed_requests', {}).get('requests', [])) > 0,
487
+ "element_count": len(screenshot.get('dom_analysis', {}).get('elements', [])) if 'dom_analysis' in screenshot else 0
488
+ })
489
+
490
+ return {
491
+ "total_screenshots": len(metadata),
492
+ "screenshots": metadata
493
+ }
494
+
495
+ def _extract_mockup_comparison(self, results: Dict) -> Dict:
496
+ """Extract mockup comparison results"""
497
+ mockup_data = results.get('mockup_comparison', {})
498
+
499
+ return {
500
+ "mockup_url": mockup_data.get('mockup_url', ''),
501
+ "implementation_url": mockup_data.get('implementation_url', ''),
502
+ "similarity_score": mockup_data.get('similarity_score', 0),
503
+ "differences": mockup_data.get('differences', []),
504
+ "iterations": mockup_data.get('iterations', [])
505
+ }
506
+
507
+ def _extract_responsive_results(self, results: Dict) -> Dict:
508
+ """Extract responsive testing results"""
509
+ responsive_data = results.get('responsive_results', {})
510
+
511
+ viewports = responsive_data.get('viewports', {})
512
+ comparison = responsive_data.get('comparison', {})
513
+
514
+ return {
515
+ "viewports": viewports,
516
+ "comparison": comparison,
517
+ "performance_by_viewport": responsive_data.get('performance', {})
518
+ }
519
+
520
+ def _extract_css_iterations(self, results: Dict) -> Dict:
521
+ """Extract CSS iteration results"""
522
+ iterations = results.get('iterations', results.get('css_iterations', []))
523
+
524
+ return {
525
+ "total_iterations": len(iterations) if isinstance(iterations, list) else 0,
526
+ "iterations": iterations,
527
+ "session_context": results.get('session_context', {})
528
+ }
529
+
530
+ def _organize_screenshots(self, results: Dict, screenshots_dir: Path):
531
+ """Move screenshots to organized directory"""
532
+ artifacts = results.get('artifacts', {})
533
+ screenshots = artifacts.get('screenshots', [])
534
+
535
+ for screenshot in screenshots:
536
+ if isinstance(screenshot, dict):
537
+ screenshot_path = screenshot.get('path')
538
+ if screenshot_path and Path(screenshot_path).exists():
539
+ dest = screenshots_dir / Path(screenshot_path).name
540
+ shutil.copy2(screenshot_path, dest)
541
+
542
+ def _organize_traces(self, results: Dict, traces_dir: Path):
543
+ """Move traces to organized directory"""
544
+ artifacts = results.get('artifacts', {})
545
+ traces = artifacts.get('traces', [])
546
+
547
+ for trace_path in traces:
548
+ if Path(trace_path).exists():
549
+ dest = traces_dir / Path(trace_path).name
550
+ shutil.copy2(trace_path, dest)
551
+
552
+ def _categorize_error_type(self, error_message: str) -> str:
553
+ """Categorize error by type based on message content"""
554
+ error_message_lower = error_message.lower()
555
+
556
+ if 'syntaxerror' in error_message_lower or 'unexpected' in error_message_lower:
557
+ return 'syntax_error'
558
+ elif 'referenceerror' in error_message_lower or 'not defined' in error_message_lower:
559
+ return 'reference_error'
560
+ elif 'typeerror' in error_message_lower:
561
+ return 'type_error'
562
+ elif 'networkerror' in error_message_lower or 'failed to fetch' in error_message_lower:
563
+ return 'network_error'
564
+ elif 'load' in error_message_lower:
565
+ return 'load_error'
566
+ else:
567
+ return 'other_error'
568
+
569
+ def _calculate_performance_summary(self, performance_data: list) -> Dict:
570
+ """Calculate aggregate performance metrics"""
571
+ if not performance_data:
572
+ return {}
573
+
574
+ # Extract metrics from snapshots
575
+ load_times = []
576
+ memory_usage = []
577
+
578
+ for snapshot in performance_data:
579
+ metrics = snapshot.get('metrics', {})
580
+ summary = metrics.get('performance_summary', {})
581
+
582
+ if 'page_load_time' in summary:
583
+ load_times.append(summary['page_load_time'])
584
+ if 'memory_usage_mb' in summary:
585
+ memory_usage.append(summary['memory_usage_mb'])
586
+
587
+ return {
588
+ "average_page_load_time": sum(load_times) / len(load_times) if load_times else 0,
589
+ "max_memory_usage": max(memory_usage) if memory_usage else 0,
590
+ "min_memory_usage": min(memory_usage) if memory_usage else 0
591
+ }
592
+
593
+ def _load_comprehensive_data(self, results: Dict) -> Dict:
594
+ """Load comprehensive data from disk or from results dict"""
595
+ # First try to get from results (if already loaded)
596
+ comprehensive = results.get('comprehensive_data', {})
597
+ if comprehensive:
598
+ return comprehensive
599
+
600
+ # Try to load from disk via screenshot artifacts
601
+ artifacts = results.get('artifacts', {})
602
+ screenshots = artifacts.get('screenshots', [])
603
+
604
+ if screenshots:
605
+ last_screenshot = screenshots[-1]
606
+ # Check if comprehensive_data_path is set
607
+ if isinstance(last_screenshot, dict) and 'comprehensive_data_path' in last_screenshot:
608
+ comp_path = Path(last_screenshot['comprehensive_data_path'])
609
+ if comp_path.exists():
610
+ try:
611
+ with open(comp_path, 'r', encoding='utf-8') as f:
612
+ return json.load(f)
613
+ except Exception as e:
614
+ self.logger.warning(f"Could not load comprehensive data from {comp_path}: {e}")
615
+
616
+ # Try to find comprehensive data file by naming convention
617
+ if isinstance(last_screenshot, dict) and 'path' in last_screenshot:
618
+ screenshot_path = Path(last_screenshot['path'])
619
+ if screenshot_path.exists():
620
+ # Look for companion comprehensive data file
621
+ comp_path = screenshot_path.parent / f"{screenshot_path.stem}_comprehensive_data.json"
622
+ if comp_path.exists():
623
+ try:
624
+ with open(comp_path, 'r', encoding='utf-8') as f:
625
+ return json.load(f)
626
+ except Exception as e:
627
+ self.logger.warning(f"Could not load comprehensive data from {comp_path}: {e}")
628
+
629
+ return {}
630
+
631
+ def _write_json(self, path: Path, data: Dict):
632
+ """Write JSON data to file with proper formatting"""
633
+ with open(path, 'w', encoding='utf-8') as f:
634
+ json.dump(data, f, indent=2, default=str, ensure_ascii=False)
635
+
636
+ def get_session_path(self, session_id: str) -> Path:
637
+ """Get path to session directory"""
638
+ return self.artifacts_base_dir / "sessions" / session_id
639
+