cursorflow 1.2.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,282 @@
1
+ """
2
+ Universal Report Generator
3
+
4
+ Creates comprehensive, Cursor-friendly test reports with actionable debugging
5
+ information. Works across all web frameworks.
6
+ """
7
+
8
+ import json
9
+ import os
10
+ from typing import Dict, List, Any, Optional
11
+ from datetime import datetime
12
+ from pathlib import Path
13
+ import logging
14
+
15
+ class ReportGenerator:
16
+ """Universal test report generator"""
17
+
18
+ def __init__(self, output_dir: str = "test_reports"):
19
+ self.output_dir = Path(output_dir)
20
+ self.output_dir.mkdir(exist_ok=True)
21
+
22
+ self.logger = logging.getLogger(__name__)
23
+
24
+ def create_markdown_report(self, results: Dict) -> str:
25
+ """Create comprehensive markdown report for Cursor"""
26
+
27
+ timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
28
+ framework = results.get('framework', 'unknown')
29
+ component = results.get('component', 'unknown')
30
+ success = results.get('success', False)
31
+
32
+ # Build report content
33
+ report = f"""# Test Report - {component} ({framework})
34
+
35
+ **Generated**: {timestamp}
36
+ **Status**: {'✅ PASSED' if success else '❌ FAILED'}
37
+ **Framework**: {framework}
38
+ **Component**: {component}
39
+
40
+ ## 🎯 **Test Summary**
41
+
42
+ """
43
+
44
+ # Add summary table
45
+ correlations = results.get('correlations', {})
46
+ summary = correlations.get('summary', {})
47
+
48
+ report += f"""| Metric | Value |
49
+ |--------|-------|
50
+ | Browser Events | {summary.get('total_browser_events', 0)} |
51
+ | Server Log Entries | {summary.get('total_server_logs', 0)} |
52
+ | Correlations Found | {summary.get('correlations_found', 0)} |
53
+ | Critical Issues | {summary.get('critical_issues', 0)} |
54
+ | Errors | {summary.get('error_count', 0)} |
55
+ | Warnings | {summary.get('warning_count', 0)} |
56
+
57
+ """
58
+
59
+ # Critical Issues Section
60
+ critical_issues = correlations.get('critical_issues', [])
61
+ if critical_issues:
62
+ report += f"""## 🚨 **Critical Issues ({len(critical_issues)})**
63
+
64
+ """
65
+ for i, issue in enumerate(critical_issues, 1):
66
+ browser_event = issue['browser_event']
67
+ server_logs = issue['server_logs']
68
+ confidence = issue.get('correlation_confidence', 0)
69
+
70
+ report += f"""### Issue {i}: {browser_event.get('action', 'Unknown Action')}
71
+ **Confidence**: {confidence:.1%}
72
+ **Browser Action**: {browser_event.get('action', 'N/A')} at {datetime.fromtimestamp(browser_event['timestamp']).strftime('%H:%M:%S')}
73
+ **Server Errors**: {len(server_logs)} related log entries
74
+
75
+ **Server Log Details**:
76
+ """
77
+
78
+ for log in server_logs[:3]: # Show top 3 most relevant
79
+ report += f"""```
80
+ {log['content']}
81
+ ```
82
+ **Error Type**: {log.get('error_type', 'Unknown')}
83
+ **Suggested Fix**: {log.get('suggested_fix', 'No specific recommendation')}
84
+
85
+ """
86
+
87
+ # Recommendations Section
88
+ recommendations = correlations.get('recommendations', [])
89
+ if recommendations:
90
+ report += f"""## 💡 **Recommendations ({len(recommendations)})**
91
+
92
+ """
93
+ for rec in recommendations:
94
+ priority_emoji = {
95
+ 'critical': '🚨',
96
+ 'high': '⚠️',
97
+ 'medium': '📋',
98
+ 'low': '💭'
99
+ }.get(rec.get('priority', 'medium'), '📋')
100
+
101
+ report += f"""### {priority_emoji} {rec.get('title', 'Recommendation')}
102
+ **Priority**: {rec.get('priority', 'medium')}
103
+ **Description**: {rec.get('description', 'No description')}
104
+ **Action**: {rec.get('action', 'No specific action')}
105
+
106
+ """
107
+
108
+ # Browser Events Section
109
+ browser_results = results.get('browser_results', {})
110
+ workflows = browser_results.get('workflows', {})
111
+
112
+ if workflows:
113
+ report += f"""## 🔄 **Workflow Results**
114
+
115
+ """
116
+ for workflow_name, workflow_result in workflows.items():
117
+ success_emoji = '✅' if workflow_result.get('success', False) else '❌'
118
+ report += f"""### {success_emoji} {workflow_name}
119
+ **Duration**: {workflow_result.get('duration', 0):.2f}s
120
+ **Actions**: {len(workflow_result.get('actions', []))}
121
+ **Errors**: {len(workflow_result.get('errors', []))}
122
+
123
+ """
124
+
125
+ # Performance Metrics
126
+ performance = browser_results.get('performance_metrics', {})
127
+ if performance:
128
+ report += f"""## ⚡ **Performance Metrics**
129
+
130
+ | Metric | Value |
131
+ |--------|-------|
132
+ | Page Load Time | {performance.get('page_load_time', 0)}ms |
133
+ | DOM Ready Time | {performance.get('dom_ready_time', 0)}ms |
134
+ | Resource Count | {performance.get('resource_count', 0)} |
135
+ | Memory Usage | {performance.get('memory_usage', {}).get('used', 0) / 1024 / 1024:.1f}MB |
136
+
137
+ """
138
+
139
+ # Console Errors
140
+ console_errors = browser_results.get('console_errors', [])
141
+ if console_errors:
142
+ report += f"""## 🖥️ **Browser Console Errors ({len(console_errors)})**
143
+
144
+ """
145
+ for error in console_errors[:5]: # Show top 5
146
+ report += f"""**{error.get('level', 'error').upper()}**: {error.get('text', 'Unknown error')}
147
+ **Location**: {error.get('location', {}).get('url', 'Unknown')}:{error.get('location', {}).get('lineNumber', '?')}
148
+
149
+ """
150
+
151
+ # Network Requests
152
+ network_requests = browser_results.get('network_requests', [])
153
+ failed_requests = [req for req in network_requests if req.get('type') == 'response' and req.get('status', 0) >= 400]
154
+
155
+ if failed_requests:
156
+ report += f"""## 🌐 **Failed Network Requests ({len(failed_requests)})**
157
+
158
+ """
159
+ for req in failed_requests[:5]:
160
+ report += f"""**{req.get('status', '?')}**: {req.get('url', 'Unknown URL')}
161
+ **Method**: {req.get('method', 'Unknown')}
162
+
163
+ """
164
+
165
+ # Raw Data Section (for debugging)
166
+ report += f"""## 🔍 **Debug Information**
167
+
168
+ <details>
169
+ <summary>Click to expand raw test data</summary>
170
+
171
+ ```json
172
+ {json.dumps(results, indent=2, default=str)}
173
+ ```
174
+
175
+ </details>
176
+
177
+ ---
178
+
179
+ **Generated by Cursor Testing Agent v1.0.0**
180
+ **Framework Adapter**: {framework}
181
+ **Report Time**: {timestamp}
182
+ """
183
+
184
+ return report
185
+
186
+ def save_report(self, results: Dict, filename: Optional[str] = None) -> str:
187
+ """Save report to file and return path"""
188
+
189
+ if filename is None:
190
+ timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
191
+ component = results.get('component', 'unknown')
192
+ filename = f"test_report_{component}_{timestamp}.md"
193
+
194
+ report_content = self.create_markdown_report(results)
195
+ report_path = self.output_dir / filename
196
+
197
+ with open(report_path, 'w', encoding='utf-8') as f:
198
+ f.write(report_content)
199
+
200
+ self.logger.info(f"Report saved to: {report_path}")
201
+ return str(report_path)
202
+
203
+ def create_summary_report(self, multiple_results: Dict[str, Dict]) -> str:
204
+ """Create summary report for multiple test runs"""
205
+
206
+ timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
207
+ total_tests = len(multiple_results)
208
+ passed_tests = sum(1 for result in multiple_results.values() if result.get('success', False))
209
+
210
+ report = f"""# Multi-Component Test Summary
211
+
212
+ **Generated**: {timestamp}
213
+ **Total Tests**: {total_tests}
214
+ **Passed**: {passed_tests}
215
+ **Failed**: {total_tests - passed_tests}
216
+ **Success Rate**: {(passed_tests / total_tests * 100):.1f}%
217
+
218
+ ## 📊 **Test Results Overview**
219
+
220
+ | Component | Framework | Status | Errors | Warnings |
221
+ |-----------|-----------|--------|--------|----------|
222
+ """
223
+
224
+ for test_name, result in multiple_results.items():
225
+ framework = result.get('framework', 'unknown')
226
+ success = '✅ PASS' if result.get('success', False) else '❌ FAIL'
227
+
228
+ correlations = result.get('correlations', {})
229
+ summary = correlations.get('summary', {})
230
+ errors = summary.get('error_count', 0)
231
+ warnings = summary.get('warning_count', 0)
232
+
233
+ report += f"| {test_name} | {framework} | {success} | {errors} | {warnings} |\n"
234
+
235
+ # Critical Issues Across All Tests
236
+ all_critical = []
237
+ for result in multiple_results.values():
238
+ critical = result.get('correlations', {}).get('critical_issues', [])
239
+ all_critical.extend(critical)
240
+
241
+ if all_critical:
242
+ report += f"""
243
+ ## 🚨 **Critical Issues Requiring Attention**
244
+
245
+ """
246
+ for i, issue in enumerate(all_critical[:5], 1):
247
+ browser_event = issue['browser_event']
248
+ report += f"{i}. **{browser_event.get('action', 'Unknown')}** - {len(issue['server_logs'])} server errors\n"
249
+
250
+ # Overall Recommendations
251
+ report += f"""
252
+ ## 💡 **Overall Recommendations**
253
+
254
+ """
255
+ if passed_tests == total_tests:
256
+ report += "🎉 All tests passed! No immediate action required.\n"
257
+ elif passed_tests == 0:
258
+ report += "🚨 All tests failed. Review critical issues and check basic connectivity.\n"
259
+ else:
260
+ report += f"⚠️ {total_tests - passed_tests} tests failed. Focus on critical issues first.\n"
261
+
262
+ return report
263
+
264
+ def create_json_report(self, results: Dict) -> str:
265
+ """Create machine-readable JSON report"""
266
+
267
+ # Clean up results for JSON serialization
268
+ json_results = self._clean_for_json(results)
269
+
270
+ return json.dumps(json_results, indent=2, default=str)
271
+
272
+ def _clean_for_json(self, data: Any) -> Any:
273
+ """Clean data for JSON serialization"""
274
+
275
+ if isinstance(data, dict):
276
+ return {k: self._clean_for_json(v) for k, v in data.items()}
277
+ elif isinstance(data, list):
278
+ return [self._clean_for_json(item) for item in data]
279
+ elif isinstance(data, datetime):
280
+ return data.isoformat()
281
+ else:
282
+ return data
@@ -0,0 +1,198 @@
1
+ """
2
+ Local File Log Source
3
+
4
+ Monitors local log files using file watching and subprocess tail.
5
+ Perfect for development environments and local testing.
6
+ """
7
+
8
+ import subprocess
9
+ import threading
10
+ import queue
11
+ import time
12
+ import os
13
+ from typing import Dict, List, Optional
14
+ from datetime import datetime
15
+ from watchdog.observers import Observer
16
+ from watchdog.events import FileSystemEventHandler
17
+
18
+ class LocalFileLogSource:
19
+ """Local log file monitoring"""
20
+
21
+ def __init__(self, log_paths: Dict[str, str]):
22
+ """
23
+ Initialize local log monitoring
24
+
25
+ Args:
26
+ log_paths: Dictionary mapping log names to local file paths
27
+ {
28
+ 'app_server': 'logs/app.log',
29
+ 'database': 'logs/db.log',
30
+ 'next_server': '.next/trace.log'
31
+ }
32
+ """
33
+ self.log_paths = log_paths
34
+ self.log_queue = queue.Queue()
35
+ self.tail_processes = {}
36
+ self.monitoring = False
37
+
38
+ import logging
39
+ self.logger = logging.getLogger(__name__)
40
+
41
+ async def start_monitoring(self) -> bool:
42
+ """Start monitoring all configured log files"""
43
+
44
+ if self.monitoring:
45
+ self.logger.warning("Log monitoring already started")
46
+ return False
47
+
48
+ self.monitoring = True
49
+ self.logger.info(f"Starting local log monitoring for {len(self.log_paths)} log files")
50
+
51
+ # Verify all log files exist or can be created
52
+ for log_name, log_path in self.log_paths.items():
53
+ if not os.path.exists(log_path):
54
+ # Try to create directory
55
+ os.makedirs(os.path.dirname(log_path), exist_ok=True)
56
+ # Touch file if it doesn't exist
57
+ if not os.path.exists(log_path):
58
+ with open(log_path, 'a'):
59
+ pass
60
+
61
+ # Start tail process for each log file
62
+ for log_name, log_path in self.log_paths.items():
63
+ success = self._start_tail_process(log_name, log_path)
64
+ if not success:
65
+ self.logger.error(f"Failed to start monitoring {log_name}")
66
+
67
+ return len(self.tail_processes) > 0
68
+
69
+ def _start_tail_process(self, log_name: str, log_path: str) -> bool:
70
+ """Start tail process for a single log file"""
71
+
72
+ try:
73
+ # Use tail -F to follow file even if it's rotated
74
+ process = subprocess.Popen(
75
+ ['tail', '-F', log_path],
76
+ stdout=subprocess.PIPE,
77
+ stderr=subprocess.PIPE,
78
+ universal_newlines=True,
79
+ bufsize=1
80
+ )
81
+
82
+ # Start thread to read output
83
+ thread = threading.Thread(
84
+ target=self._read_tail_output,
85
+ args=(log_name, log_path, process),
86
+ daemon=True
87
+ )
88
+ thread.start()
89
+
90
+ self.tail_processes[log_name] = {
91
+ 'process': process,
92
+ 'thread': thread,
93
+ 'log_path': log_path
94
+ }
95
+
96
+ self.logger.info(f"Started tail process for {log_name}")
97
+ return True
98
+
99
+ except Exception as e:
100
+ self.logger.error(f"Failed to start tail for {log_name}: {e}")
101
+ return False
102
+
103
+ def _read_tail_output(self, log_name: str, log_path: str, process: subprocess.Popen):
104
+ """Read output from tail process"""
105
+
106
+ try:
107
+ for line in iter(process.stdout.readline, ''):
108
+ if not self.monitoring:
109
+ break
110
+
111
+ if line.strip(): # Skip empty lines
112
+ log_entry = {
113
+ 'timestamp': datetime.now(),
114
+ 'source': log_name,
115
+ 'content': line.strip(),
116
+ 'log_path': log_path,
117
+ 'source_type': 'local_file'
118
+ }
119
+
120
+ self.log_queue.put(log_entry)
121
+
122
+ except Exception as e:
123
+ self.logger.error(f"Error reading tail output for {log_name}: {e}")
124
+
125
+ finally:
126
+ process.terminate()
127
+
128
+ async def stop_monitoring(self) -> List[Dict]:
129
+ """Stop monitoring and return collected logs"""
130
+
131
+ if not self.monitoring:
132
+ return []
133
+
134
+ self.monitoring = False
135
+ self.logger.info("Stopping local log monitoring...")
136
+
137
+ # Stop all tail processes
138
+ for log_name, process_info in self.tail_processes.items():
139
+ process_info['process'].terminate()
140
+ process_info['thread'].join(timeout=2.0)
141
+
142
+ # Collect all queued log entries
143
+ logs = []
144
+ while not self.log_queue.empty():
145
+ try:
146
+ log_entry = self.log_queue.get_nowait()
147
+ logs.append(log_entry)
148
+ except queue.Empty:
149
+ break
150
+
151
+ self.tail_processes.clear()
152
+ self.logger.info(f"Local log monitoring stopped. Collected {len(logs)} log entries")
153
+
154
+ return logs
155
+
156
+ def get_recent_logs(self, seconds: int = 10) -> List[Dict]:
157
+ """Get logs from the last N seconds"""
158
+ from datetime import timedelta
159
+
160
+ cutoff_time = datetime.now() - timedelta(seconds=seconds)
161
+ recent_logs = []
162
+
163
+ # Non-destructive queue iteration
164
+ temp_logs = []
165
+ while not self.log_queue.empty():
166
+ try:
167
+ log_entry = self.log_queue.get_nowait()
168
+ temp_logs.append(log_entry)
169
+ except queue.Empty:
170
+ break
171
+
172
+ # Filter and restore
173
+ for log_entry in temp_logs:
174
+ if log_entry['timestamp'] >= cutoff_time:
175
+ recent_logs.append(log_entry)
176
+ self.log_queue.put(log_entry)
177
+
178
+ return recent_logs
179
+
180
+ def tail_file_directly(self, file_path: str, lines: int = 50) -> List[str]:
181
+ """Get last N lines from a file directly (one-time read)"""
182
+
183
+ try:
184
+ result = subprocess.run(
185
+ ['tail', '-n', str(lines), file_path],
186
+ capture_output=True,
187
+ text=True
188
+ )
189
+
190
+ if result.returncode == 0:
191
+ return result.stdout.strip().split('\n')
192
+ else:
193
+ self.logger.error(f"tail command failed: {result.stderr}")
194
+ return []
195
+
196
+ except Exception as e:
197
+ self.logger.error(f"Error reading file {file_path}: {e}")
198
+ return []
@@ -0,0 +1,210 @@
1
+ """
2
+ SSH Remote Log Source
3
+
4
+ Monitors log files on remote servers via SSH connection.
5
+ Supports real-time tailing of multiple log files simultaneously.
6
+ """
7
+
8
+ import paramiko
9
+ import threading
10
+ import queue
11
+ import time
12
+ import logging
13
+ from typing import Dict, List, Optional
14
+ from datetime import datetime
15
+
16
+ class SSHRemoteLogSource:
17
+ """Remote log monitoring via SSH"""
18
+
19
+ def __init__(self, ssh_config: Dict, log_paths: Dict[str, str]):
20
+ """
21
+ Initialize SSH log monitoring
22
+
23
+ Args:
24
+ ssh_config: SSH connection configuration
25
+ {
26
+ 'hostname': 'server.example.com',
27
+ 'username': 'deploy',
28
+ 'key_filename': '/path/to/key',
29
+ 'password': 'optional_password',
30
+ 'port': 22
31
+ }
32
+ log_paths: Dictionary mapping log names to paths
33
+ {
34
+ 'apache_error': '/var/log/httpd/error_log',
35
+ 'apache_access': '/var/log/httpd/access_log',
36
+ 'app_debug': '/tmp/app_debug.log'
37
+ }
38
+ """
39
+ self.ssh_config = ssh_config
40
+ self.log_paths = log_paths
41
+ self.log_queue = queue.Queue()
42
+ self.monitoring_threads = []
43
+ self.monitoring = False
44
+
45
+ self.logger = logging.getLogger(__name__)
46
+
47
+ async def start_monitoring(self) -> bool:
48
+ """Start monitoring all configured log files"""
49
+
50
+ if self.monitoring:
51
+ self.logger.warning("Log monitoring already started")
52
+ return False
53
+
54
+ self.monitoring = True
55
+ self.logger.info(f"Starting SSH log monitoring for {len(self.log_paths)} log files")
56
+
57
+ # Test SSH connection first
58
+ if not self._test_ssh_connection():
59
+ self.logger.error("Failed to establish SSH connection")
60
+ return False
61
+
62
+ # Start monitoring thread for each log file
63
+ for log_name, log_path in self.log_paths.items():
64
+ thread = threading.Thread(
65
+ target=self._monitor_single_log,
66
+ args=(log_name, log_path),
67
+ daemon=True
68
+ )
69
+ thread.start()
70
+ self.monitoring_threads.append(thread)
71
+
72
+ self.logger.info("All log monitoring threads started")
73
+ return True
74
+
75
+ def _test_ssh_connection(self) -> bool:
76
+ """Test SSH connection before starting monitoring"""
77
+ try:
78
+ ssh = paramiko.SSHClient()
79
+ ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
80
+ ssh.connect(**self.ssh_config)
81
+
82
+ # Test basic command
83
+ stdin, stdout, stderr = ssh.exec_command('echo "test"')
84
+ result = stdout.read().decode().strip()
85
+ ssh.close()
86
+
87
+ return result == 'test'
88
+
89
+ except Exception as e:
90
+ self.logger.error(f"SSH connection test failed: {e}")
91
+ return False
92
+
93
+ def _monitor_single_log(self, log_name: str, log_path: str):
94
+ """Monitor a single log file via SSH"""
95
+
96
+ self.logger.info(f"Starting monitoring for {log_name}: {log_path}")
97
+
98
+ ssh = None
99
+ try:
100
+ ssh = paramiko.SSHClient()
101
+ ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
102
+ ssh.connect(**self.ssh_config)
103
+
104
+ # Use tail -F to follow log file
105
+ stdin, stdout, stderr = ssh.exec_command(f'tail -F {log_path}')
106
+
107
+ # Read lines as they come in
108
+ for line in iter(stdout.readline, ""):
109
+ if not self.monitoring:
110
+ break
111
+
112
+ log_entry = {
113
+ 'timestamp': datetime.now(),
114
+ 'source': log_name,
115
+ 'content': line.strip(),
116
+ 'log_path': log_path,
117
+ 'source_type': 'ssh_remote'
118
+ }
119
+
120
+ self.log_queue.put(log_entry)
121
+
122
+ except Exception as e:
123
+ self.logger.error(f"Error monitoring {log_name}: {e}")
124
+
125
+ finally:
126
+ if ssh:
127
+ ssh.close()
128
+ self.logger.info(f"Stopped monitoring {log_name}")
129
+
130
+ async def stop_monitoring(self) -> List[Dict]:
131
+ """Stop monitoring and return collected logs"""
132
+
133
+ if not self.monitoring:
134
+ return []
135
+
136
+ self.monitoring = False
137
+ self.logger.info("Stopping log monitoring...")
138
+
139
+ # Wait for threads to finish (with timeout)
140
+ for thread in self.monitoring_threads:
141
+ thread.join(timeout=2.0)
142
+
143
+ # Collect all queued log entries
144
+ logs = []
145
+ while not self.log_queue.empty():
146
+ try:
147
+ log_entry = self.log_queue.get_nowait()
148
+ logs.append(log_entry)
149
+ except queue.Empty:
150
+ break
151
+
152
+ self.monitoring_threads.clear()
153
+ self.logger.info(f"Log monitoring stopped. Collected {len(logs)} log entries")
154
+
155
+ return logs
156
+
157
+ def get_recent_logs(self, seconds: int = 10) -> List[Dict]:
158
+ """Get logs from the last N seconds without stopping monitoring"""
159
+
160
+ cutoff_time = datetime.now() - timedelta(seconds=seconds)
161
+ recent_logs = []
162
+
163
+ # Create temporary list to avoid modifying queue during iteration
164
+ temp_logs = []
165
+ while not self.log_queue.empty():
166
+ try:
167
+ log_entry = self.log_queue.get_nowait()
168
+ temp_logs.append(log_entry)
169
+ except queue.Empty:
170
+ break
171
+
172
+ # Filter recent logs and put back in queue
173
+ for log_entry in temp_logs:
174
+ if log_entry['timestamp'] >= cutoff_time:
175
+ recent_logs.append(log_entry)
176
+ # Put back in queue
177
+ self.log_queue.put(log_entry)
178
+
179
+ return recent_logs
180
+
181
+ def search_logs(self, pattern: str, log_source: Optional[str] = None) -> List[Dict]:
182
+ """Search for pattern in collected logs"""
183
+
184
+ # Get all current logs
185
+ all_logs = []
186
+ temp_logs = []
187
+
188
+ while not self.log_queue.empty():
189
+ try:
190
+ log_entry = self.log_queue.get_nowait()
191
+ temp_logs.append(log_entry)
192
+ except queue.Empty:
193
+ break
194
+
195
+ # Search and restore
196
+ matches = []
197
+ for log_entry in temp_logs:
198
+ # Filter by log source if specified
199
+ if log_source and log_entry['source'] != log_source:
200
+ self.log_queue.put(log_entry)
201
+ continue
202
+
203
+ # Search for pattern
204
+ if re.search(pattern, log_entry['content'], re.IGNORECASE):
205
+ matches.append(log_entry)
206
+
207
+ # Put back in queue
208
+ self.log_queue.put(log_entry)
209
+
210
+ return matches