cursorflow 1.2.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,322 @@
1
+ """
2
+ Universal Error Correlator
3
+
4
+ Correlates browser events with server log entries to provide intelligent
5
+ debugging insights. Works across all web frameworks.
6
+ """
7
+
8
+ import re
9
+ from typing import Dict, List, Any, Optional, Tuple
10
+ from datetime import datetime, timedelta
11
+ import logging
12
+
13
+ class ErrorCorrelator:
14
+ """Universal error correlation engine"""
15
+
16
+ def __init__(self, error_patterns: Dict[str, Dict]):
17
+ """
18
+ Initialize error correlator with framework-specific patterns
19
+
20
+ Args:
21
+ error_patterns: Framework-specific error patterns from adapter
22
+ """
23
+ self.error_patterns = error_patterns
24
+ self.correlations = []
25
+
26
+ self.logger = logging.getLogger(__name__)
27
+
28
+ def correlate_events(
29
+ self,
30
+ browser_events: List[Dict],
31
+ server_logs: List[Dict],
32
+ time_window: int = 5
33
+ ) -> Dict[str, Any]:
34
+ """
35
+ Correlate browser events with server log entries
36
+
37
+ Args:
38
+ browser_events: List of browser events (clicks, AJAX, errors)
39
+ server_logs: List of server log entries
40
+ time_window: Time window in seconds for correlation
41
+
42
+ Returns:
43
+ Correlation results with matched events and recommendations
44
+ """
45
+
46
+ self.logger.info(f"Correlating {len(browser_events)} browser events with {len(server_logs)} server logs")
47
+
48
+ correlations = []
49
+ critical_issues = []
50
+ recommendations = []
51
+
52
+ # Categorize server logs by error patterns
53
+ categorized_logs = self._categorize_server_logs(server_logs)
54
+
55
+ # Find correlations between browser events and server errors
56
+ for browser_event in browser_events:
57
+ if browser_event.get('type') in ['action_start', 'action_complete']:
58
+ related_logs = self._find_related_logs(
59
+ browser_event, categorized_logs['errors'], time_window
60
+ )
61
+
62
+ if related_logs:
63
+ correlation = {
64
+ 'browser_event': browser_event,
65
+ 'server_logs': related_logs,
66
+ 'correlation_confidence': self._calculate_confidence(browser_event, related_logs),
67
+ 'time_window': time_window,
68
+ 'recommended_fixes': self._generate_fix_recommendations(related_logs)
69
+ }
70
+ correlations.append(correlation)
71
+
72
+ # Check for critical issues
73
+ if any(log.get('severity') == 'critical' for log in related_logs):
74
+ critical_issues.append(correlation)
75
+
76
+ # Analyze standalone server errors (not correlated with browser events)
77
+ standalone_errors = self._find_standalone_errors(categorized_logs['errors'], correlations)
78
+
79
+ # Generate overall recommendations
80
+ all_errors = categorized_logs['errors'] + categorized_logs['warnings']
81
+ overall_recommendations = self._generate_overall_recommendations(all_errors)
82
+
83
+ results = {
84
+ 'correlations': correlations,
85
+ 'critical_issues': critical_issues,
86
+ 'standalone_errors': standalone_errors,
87
+ 'categorized_logs': categorized_logs,
88
+ 'recommendations': overall_recommendations,
89
+ 'summary': {
90
+ 'total_browser_events': len(browser_events),
91
+ 'total_server_logs': len(server_logs),
92
+ 'correlations_found': len(correlations),
93
+ 'critical_issues': len(critical_issues),
94
+ 'error_count': len(categorized_logs['errors']),
95
+ 'warning_count': len(categorized_logs['warnings'])
96
+ }
97
+ }
98
+
99
+ return results
100
+
101
+ def _categorize_server_logs(self, server_logs: List[Dict]) -> Dict[str, List[Dict]]:
102
+ """Categorize server logs using error patterns"""
103
+
104
+ categorized = {
105
+ 'errors': [],
106
+ 'warnings': [],
107
+ 'info': [],
108
+ 'unknown': []
109
+ }
110
+
111
+ for log_entry in server_logs:
112
+ content = log_entry['content']
113
+ enhanced_entry = log_entry.copy()
114
+
115
+ # Try to match against error patterns
116
+ matched = False
117
+ for pattern_name, pattern_config in self.error_patterns.items():
118
+ if re.search(pattern_config['regex'], content, re.IGNORECASE):
119
+ enhanced_entry.update({
120
+ 'error_type': pattern_name,
121
+ 'severity': pattern_config['severity'],
122
+ 'description': pattern_config['description'],
123
+ 'suggested_fix': pattern_config['suggested_fix'],
124
+ 'pattern_matched': pattern_name
125
+ })
126
+
127
+ # Categorize by severity
128
+ if pattern_config['severity'] in ['critical', 'high']:
129
+ categorized['errors'].append(enhanced_entry)
130
+ elif pattern_config['severity'] == 'medium':
131
+ categorized['warnings'].append(enhanced_entry)
132
+ else:
133
+ categorized['info'].append(enhanced_entry)
134
+
135
+ matched = True
136
+ break
137
+
138
+ if not matched:
139
+ # Basic heuristic categorization
140
+ content_lower = content.lower()
141
+ if any(word in content_lower for word in ['error', 'failed', 'exception', 'critical', 'fatal']):
142
+ enhanced_entry['severity'] = 'high'
143
+ categorized['errors'].append(enhanced_entry)
144
+ elif any(word in content_lower for word in ['warning', 'warn']):
145
+ enhanced_entry['severity'] = 'medium'
146
+ categorized['warnings'].append(enhanced_entry)
147
+ else:
148
+ enhanced_entry['severity'] = 'low'
149
+ categorized['info'].append(enhanced_entry)
150
+
151
+ return categorized
152
+
153
+ def _find_related_logs(
154
+ self,
155
+ browser_event: Dict,
156
+ server_errors: List[Dict],
157
+ time_window: int
158
+ ) -> List[Dict]:
159
+ """Find server logs related to a browser event"""
160
+
161
+ related = []
162
+ browser_time = datetime.fromtimestamp(browser_event['timestamp'])
163
+
164
+ for server_log in server_errors:
165
+ server_time = server_log['timestamp']
166
+
167
+ # Calculate time difference
168
+ time_diff = abs((browser_time - server_time).total_seconds())
169
+
170
+ if time_diff <= time_window:
171
+ # Additional correlation logic based on event type
172
+ correlation_strength = self._calculate_event_correlation(browser_event, server_log)
173
+
174
+ if correlation_strength > 0.3: # Threshold for correlation
175
+ server_log_enhanced = server_log.copy()
176
+ server_log_enhanced['correlation_strength'] = correlation_strength
177
+ server_log_enhanced['time_diff'] = time_diff
178
+ related.append(server_log_enhanced)
179
+
180
+ # Sort by correlation strength
181
+ related.sort(key=lambda x: x.get('correlation_strength', 0), reverse=True)
182
+
183
+ return related
184
+
185
+ def _calculate_event_correlation(self, browser_event: Dict, server_log: Dict) -> float:
186
+ """Calculate correlation strength between browser event and server log"""
187
+
188
+ correlation = 0.0
189
+
190
+ # Time proximity (closer = higher correlation)
191
+ browser_time = datetime.fromtimestamp(browser_event['timestamp'])
192
+ server_time = server_log['timestamp']
193
+ time_diff = abs((browser_time - server_time).total_seconds())
194
+
195
+ if time_diff <= 1:
196
+ correlation += 0.4
197
+ elif time_diff <= 3:
198
+ correlation += 0.2
199
+ elif time_diff <= 5:
200
+ correlation += 0.1
201
+
202
+ # Event type correlation
203
+ browser_action = browser_event.get('action', browser_event.get('type', ''))
204
+ server_content = server_log['content'].lower()
205
+
206
+ action_correlations = {
207
+ 'click': ['ajax', 'post', 'submit'],
208
+ 'ajax_call': ['ajax', 'perl', 'function'],
209
+ 'navigation': ['get', 'request', 'page'],
210
+ 'form_submit': ['post', 'form', 'submit']
211
+ }
212
+
213
+ if browser_action in action_correlations:
214
+ for keyword in action_correlations[browser_action]:
215
+ if keyword in server_content:
216
+ correlation += 0.2
217
+ break
218
+
219
+ # URL/component correlation
220
+ if 'url' in browser_event:
221
+ browser_url = browser_event['url']
222
+ if any(part in server_content for part in browser_url.split('/') if len(part) > 3):
223
+ correlation += 0.3
224
+
225
+ # AJAX function correlation (mod_perl specific)
226
+ if browser_event.get('type') == 'ajax_call' and 'fn' in browser_event.get('data', {}):
227
+ function_name = browser_event['data']['fn']
228
+ if function_name in server_content:
229
+ correlation += 0.5
230
+
231
+ return min(correlation, 1.0) # Cap at 1.0
232
+
233
+ def _calculate_confidence(self, browser_event: Dict, related_logs: List[Dict]) -> float:
234
+ """Calculate overall confidence in correlation"""
235
+
236
+ if not related_logs:
237
+ return 0.0
238
+
239
+ # Average correlation strength
240
+ avg_correlation = sum(log.get('correlation_strength', 0) for log in related_logs) / len(related_logs)
241
+
242
+ # Boost confidence if multiple logs correlate
243
+ multi_log_boost = min(len(related_logs) * 0.1, 0.3)
244
+
245
+ # Boost confidence for critical errors
246
+ critical_boost = 0.2 if any(log.get('severity') == 'critical' for log in related_logs) else 0
247
+
248
+ confidence = avg_correlation + multi_log_boost + critical_boost
249
+
250
+ return min(confidence, 1.0)
251
+
252
+ def _generate_fix_recommendations(self, related_logs: List[Dict]) -> List[str]:
253
+ """Generate fix recommendations based on correlated logs"""
254
+
255
+ recommendations = []
256
+ seen_fixes = set()
257
+
258
+ for log in related_logs:
259
+ suggested_fix = log.get('suggested_fix')
260
+ if suggested_fix and suggested_fix not in seen_fixes:
261
+ recommendations.append(suggested_fix)
262
+ seen_fixes.add(suggested_fix)
263
+
264
+ return recommendations
265
+
266
+ def _find_standalone_errors(self, server_errors: List[Dict], correlations: List[Dict]) -> List[Dict]:
267
+ """Find server errors that don't correlate with browser events"""
268
+
269
+ # Get server logs that are already correlated
270
+ correlated_log_ids = set()
271
+ for correlation in correlations:
272
+ for log in correlation['server_logs']:
273
+ log_id = f"{log['timestamp']}_{log['content']}"
274
+ correlated_log_ids.add(log_id)
275
+
276
+ # Find standalone errors
277
+ standalone = []
278
+ for log in server_errors:
279
+ log_id = f"{log['timestamp']}_{log['content']}"
280
+ if log_id not in correlated_log_ids:
281
+ standalone.append(log)
282
+
283
+ return standalone
284
+
285
+ def _generate_overall_recommendations(self, all_errors: List[Dict]) -> List[Dict]:
286
+ """Generate overall recommendations based on all errors"""
287
+
288
+ recommendations = []
289
+
290
+ # Group errors by type
291
+ error_types = {}
292
+ for error in all_errors:
293
+ error_type = error.get('error_type', 'unknown')
294
+ if error_type not in error_types:
295
+ error_types[error_type] = []
296
+ error_types[error_type].append(error)
297
+
298
+ # Generate recommendations for each error type
299
+ for error_type, errors in error_types.items():
300
+ if len(errors) > 1:
301
+ recommendations.append({
302
+ 'type': 'pattern',
303
+ 'priority': 'high',
304
+ 'title': f"Multiple {error_type} errors detected",
305
+ 'description': f"Found {len(errors)} instances of {error_type}",
306
+ 'action': f"Review and fix {error_type} pattern across the application",
307
+ 'affected_areas': [error.get('source', 'unknown') for error in errors]
308
+ })
309
+
310
+ # Critical error recommendations
311
+ critical_errors = [error for error in all_errors if error.get('severity') == 'critical']
312
+ if critical_errors:
313
+ recommendations.append({
314
+ 'type': 'critical',
315
+ 'priority': 'critical',
316
+ 'title': f"{len(critical_errors)} critical errors require immediate attention",
317
+ 'description': "These errors prevent normal application functionality",
318
+ 'action': "Fix critical errors before continuing development",
319
+ 'details': [error.get('description', error.get('content', '')) for error in critical_errors[:3]]
320
+ })
321
+
322
+ return recommendations
@@ -0,0 +1,182 @@
1
+ """
2
+ Universal Event Correlator
3
+
4
+ Simple data organizer that timestamps and structures browser events
5
+ and server logs for Cursor's analysis. NO PROCESSING - just organization.
6
+ """
7
+
8
+ import time
9
+ from typing import Dict, List, Optional, Any
10
+ import logging
11
+
12
+
13
+ class EventCorrelator:
14
+ """
15
+ Simple data organizer - NO ANALYSIS, just clean data for Cursor
16
+
17
+ Organizes browser events and server logs in chronological timeline
18
+ format for Cursor to analyze. Does NOT process or interpret data.
19
+ """
20
+
21
+ def __init__(self):
22
+ """Initialize simple data organizer"""
23
+ self.logger = logging.getLogger(__name__)
24
+
25
+ async def correlate_events(
26
+ self,
27
+ browser_timeline: List[Dict],
28
+ server_logs: List[Dict]
29
+ ) -> Dict[str, Any]:
30
+ """
31
+ Organize browser events and server logs for Cursor's analysis
32
+
33
+ Args:
34
+ browser_timeline: List of browser events with timestamps
35
+ server_logs: List of server log entries with timestamps
36
+
37
+ Returns:
38
+ Simple organized data structure for Cursor to analyze:
39
+ {
40
+ "timeline": [events sorted by timestamp],
41
+ "browser_events": [structured browser events],
42
+ "server_events": [structured server logs],
43
+ "summary": {basic counts}
44
+ }
45
+ """
46
+ try:
47
+ # Just organize the data - NO ANALYSIS
48
+ organized_data = {
49
+ "timeline": self._create_chronological_timeline(browser_timeline, server_logs),
50
+ "browser_events": self._structure_browser_events(browser_timeline),
51
+ "server_events": self._structure_server_events(server_logs),
52
+ "summary": self._create_basic_summary(browser_timeline, server_logs)
53
+ }
54
+
55
+ self.logger.info(f"Organized {len(browser_timeline)} browser events and {len(server_logs)} server logs")
56
+ return organized_data
57
+
58
+ except Exception as e:
59
+ self.logger.error(f"Data organization failed: {e}")
60
+ return {
61
+ "timeline": [],
62
+ "browser_events": [],
63
+ "server_events": [],
64
+ "summary": {"error": str(e)}
65
+ }
66
+
67
+ def _create_chronological_timeline(self, browser_events: List[Dict], server_logs: List[Dict]) -> List[Dict]:
68
+ """Create chronological timeline of all events - NO ANALYSIS"""
69
+ all_events = []
70
+
71
+ # Add browser events to timeline
72
+ for event in browser_events:
73
+ all_events.append({
74
+ "timestamp": event.get("timestamp", 0),
75
+ "source": "browser",
76
+ "event_type": event.get("event", "unknown"),
77
+ "data": event.get("data", {}),
78
+ "raw_event": event
79
+ })
80
+
81
+ # Add server logs to timeline
82
+ for log in server_logs:
83
+ all_events.append({
84
+ "timestamp": log.get("timestamp", 0),
85
+ "source": "server",
86
+ "event_type": "log_entry",
87
+ "level": log.get("level", "info"),
88
+ "content": log.get("content", ""),
89
+ "raw_log": log
90
+ })
91
+
92
+ # Sort chronologically - that's it, no analysis
93
+ all_events.sort(key=lambda x: x.get("timestamp", 0))
94
+
95
+ return all_events
96
+
97
+ def _structure_browser_events(self, browser_timeline: List[Dict]) -> List[Dict]:
98
+ """Structure browser events for easy Cursor analysis - NO PROCESSING"""
99
+ structured = []
100
+
101
+ for event in browser_timeline:
102
+ structured_event = {
103
+ "timestamp": event.get("timestamp", 0),
104
+ "type": event.get("event", "unknown"),
105
+ "data": event.get("data", {}),
106
+ "duration": event.get("duration", 0),
107
+ "raw": event
108
+ }
109
+ structured.append(structured_event)
110
+
111
+ return structured
112
+
113
+ def _structure_server_events(self, server_logs: List[Dict]) -> List[Dict]:
114
+ """Structure server logs for easy Cursor analysis - NO PROCESSING"""
115
+ structured = []
116
+
117
+ for log in server_logs:
118
+ structured_log = {
119
+ "timestamp": log.get("timestamp", 0),
120
+ "level": log.get("level", "info"),
121
+ "content": log.get("content", ""),
122
+ "source": log.get("source", "unknown"),
123
+ "source_type": log.get("source_type", "local"),
124
+ "error_type": log.get("error_type", None), # From log collector classification
125
+ "raw": log
126
+ }
127
+ structured.append(structured_log)
128
+
129
+ return structured
130
+
131
+ def _create_basic_summary(self, browser_events: List[Dict], server_logs: List[Dict]) -> Dict[str, Any]:
132
+ """Create basic counts for Cursor - NO ANALYSIS"""
133
+
134
+ # Basic counts only
135
+ browser_counts = {}
136
+ server_counts = {}
137
+
138
+ # Count browser event types
139
+ for event in browser_events:
140
+ event_type = event.get("event", "unknown")
141
+ browser_counts[event_type] = browser_counts.get(event_type, 0) + 1
142
+
143
+ # Count server log levels
144
+ for log in server_logs:
145
+ level = log.get("level", "info")
146
+ server_counts[level] = server_counts.get(level, 0) + 1
147
+
148
+ # Time range
149
+ all_timestamps = []
150
+ all_timestamps.extend([e.get("timestamp", 0) for e in browser_events])
151
+ all_timestamps.extend([l.get("timestamp", 0) for l in server_logs])
152
+
153
+ time_range = {}
154
+ if all_timestamps:
155
+ all_timestamps.sort()
156
+ time_range = {
157
+ "start": all_timestamps[0],
158
+ "end": all_timestamps[-1],
159
+ "duration": all_timestamps[-1] - all_timestamps[0]
160
+ }
161
+
162
+ return {
163
+ "total_browser_events": len(browser_events),
164
+ "total_server_logs": len(server_logs),
165
+ "browser_event_types": browser_counts,
166
+ "server_log_levels": server_counts,
167
+ "time_range": time_range
168
+ }
169
+
170
+ def organize_for_time_window(self, timeline: List[Dict], start_time: float, end_time: float) -> List[Dict]:
171
+ """Get events within specific time window - simple filtering"""
172
+ return [
173
+ event for event in timeline
174
+ if start_time <= event.get("timestamp", 0) <= end_time
175
+ ]
176
+
177
+ def get_events_by_source(self, timeline: List[Dict], source: str) -> List[Dict]:
178
+ """Filter events by source - simple filtering"""
179
+ return [
180
+ event for event in timeline
181
+ if event.get("source") == source
182
+ ]