cursorflow 2.7.1__py3-none-any.whl → 2.7.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- cursorflow/cli.py +19 -14
- cursorflow/core/output_manager.py +186 -70
- cursorflow/core/query_engine.py +2 -5
- cursorflow/rules/cursorflow-usage.mdc +109 -0
- {cursorflow-2.7.1.dist-info → cursorflow-2.7.3.dist-info}/METADATA +72 -1
- {cursorflow-2.7.1.dist-info → cursorflow-2.7.3.dist-info}/RECORD +10 -10
- {cursorflow-2.7.1.dist-info → cursorflow-2.7.3.dist-info}/WHEEL +0 -0
- {cursorflow-2.7.1.dist-info → cursorflow-2.7.3.dist-info}/entry_points.txt +0 -0
- {cursorflow-2.7.1.dist-info → cursorflow-2.7.3.dist-info}/licenses/LICENSE +0 -0
- {cursorflow-2.7.1.dist-info → cursorflow-2.7.3.dist-info}/top_level.txt +0 -0
cursorflow/cli.py
CHANGED
@@ -337,9 +337,6 @@ def test(base_url, path, actions, output, logs, config, verbose, headless, timeo
|
|
337
337
|
with open(last_test_file, 'w') as f:
|
338
338
|
json.dump(last_test_data, f, indent=2, default=str)
|
339
339
|
|
340
|
-
console.print(f"💾 Full results saved to: [cyan]{output}[/cyan]")
|
341
|
-
console.print(f"📁 Artifacts stored in: [cyan].cursorflow/artifacts/[/cyan]")
|
342
|
-
|
343
340
|
# Phase 3.4: Auto-open trace
|
344
341
|
if open_trace and 'artifacts' in results and 'trace' in results['artifacts']:
|
345
342
|
trace_path = results['artifacts']['trace']
|
@@ -1151,6 +1148,8 @@ def inspect(base_url, path, selector, verbose):
|
|
1151
1148
|
results = asyncio.run(flow.execute_and_collect([
|
1152
1149
|
{"navigate": path},
|
1153
1150
|
{"wait_for_selector": "body"},
|
1151
|
+
{"wait_for_load_state": "networkidle"}, # Wait for dynamic content
|
1152
|
+
{"wait_for_timeout": 1000}, # Additional buffer for JS rendering (1s)
|
1154
1153
|
{"screenshot": "inspection"}
|
1155
1154
|
]))
|
1156
1155
|
|
@@ -1200,7 +1199,7 @@ def inspect(base_url, path, selector, verbose):
|
|
1200
1199
|
console.print(f"Classes: [blue].{classes}[/blue]")
|
1201
1200
|
|
1202
1201
|
# Selectors
|
1203
|
-
unique_selector = element.get('uniqueSelector', 'N/A')
|
1202
|
+
unique_selector = escape(str(element.get('uniqueSelector', 'N/A')))
|
1204
1203
|
console.print(f"Unique: [cyan]{unique_selector}[/cyan]")
|
1205
1204
|
|
1206
1205
|
# Dimensions
|
@@ -1216,20 +1215,22 @@ def inspect(base_url, path, selector, verbose):
|
|
1216
1215
|
console.print(f"\n🎨 Key CSS Properties:")
|
1217
1216
|
|
1218
1217
|
# Layout
|
1219
|
-
display = computed.get('display', 'N/A')
|
1220
|
-
position = computed.get('position', 'N/A')
|
1218
|
+
display = escape(str(computed.get('display', 'N/A')))
|
1219
|
+
position = escape(str(computed.get('position', 'N/A')))
|
1221
1220
|
console.print(f" display: {display}")
|
1222
1221
|
console.print(f" position: {position}")
|
1223
1222
|
|
1224
1223
|
# Flexbox
|
1225
1224
|
if 'flex' in computed:
|
1226
|
-
|
1225
|
+
flex_value = escape(str(computed.get('flex', 'N/A')))
|
1226
|
+
console.print(f" flex: {flex_value}")
|
1227
1227
|
if 'flexBasis' in computed:
|
1228
|
-
|
1228
|
+
flex_basis = escape(str(computed.get('flexBasis', 'N/A')))
|
1229
|
+
console.print(f" flex-basis: {flex_basis}")
|
1229
1230
|
|
1230
1231
|
# Dimensions
|
1231
|
-
width = computed.get('width', 'N/A')
|
1232
|
-
height = computed.get('height', 'N/A')
|
1232
|
+
width = escape(str(computed.get('width', 'N/A')))
|
1233
|
+
height = escape(str(computed.get('height', 'N/A')))
|
1233
1234
|
console.print(f" width: {width}")
|
1234
1235
|
console.print(f" height: {height}")
|
1235
1236
|
|
@@ -1237,20 +1238,23 @@ def inspect(base_url, path, selector, verbose):
|
|
1237
1238
|
margin = computed.get('margin', 'N/A')
|
1238
1239
|
padding = computed.get('padding', 'N/A')
|
1239
1240
|
if margin != 'N/A':
|
1241
|
+
margin = escape(str(margin))
|
1240
1242
|
console.print(f" margin: {margin}")
|
1241
1243
|
if padding != 'N/A':
|
1244
|
+
padding = escape(str(padding))
|
1242
1245
|
console.print(f" padding: {padding}")
|
1243
1246
|
|
1244
1247
|
# Show all styles in verbose mode
|
1245
1248
|
if verbose:
|
1246
1249
|
console.print(f"\n📋 All Computed Styles:")
|
1247
1250
|
for prop, value in sorted(computed.items())[:30]: # Limit to 30
|
1248
|
-
|
1251
|
+
safe_value = escape(str(value))
|
1252
|
+
console.print(f" {prop}: {safe_value}")
|
1249
1253
|
|
1250
1254
|
# Accessibility info
|
1251
1255
|
accessibility = element.get('accessibility', {})
|
1252
1256
|
if accessibility:
|
1253
|
-
role = accessibility.get('role', 'N/A')
|
1257
|
+
role = escape(str(accessibility.get('role', 'N/A')))
|
1254
1258
|
is_interactive = accessibility.get('isInteractive', False)
|
1255
1259
|
console.print(f"\n♿ Accessibility:")
|
1256
1260
|
console.print(f" Role: {role}")
|
@@ -1293,8 +1297,9 @@ def inspect(base_url, path, selector, verbose):
|
|
1293
1297
|
|
1294
1298
|
except Exception as e:
|
1295
1299
|
console.print(f"[red]❌ Inspection failed: {escape(str(e))}[/red]")
|
1296
|
-
|
1297
|
-
|
1300
|
+
if verbose:
|
1301
|
+
import traceback
|
1302
|
+
console.print(escape(traceback.format_exc()))
|
1298
1303
|
|
1299
1304
|
def _element_matches_selector(element: Dict, selector: str) -> bool:
|
1300
1305
|
"""Check if element matches the given selector"""
|
@@ -146,31 +146,28 @@ class OutputManager:
|
|
146
146
|
|
147
147
|
def _extract_summary(self, results: Dict) -> Dict:
|
148
148
|
"""Extract high-level summary data"""
|
149
|
-
comprehensive
|
149
|
+
# Load comprehensive data from disk if available
|
150
|
+
comprehensive = self._load_comprehensive_data(results)
|
150
151
|
artifacts = results.get('artifacts', {})
|
151
152
|
|
152
|
-
# Count errors
|
153
|
+
# Count errors from comprehensive_data
|
153
154
|
error_count = 0
|
154
155
|
warning_count = 0
|
155
|
-
|
156
|
-
|
157
|
-
|
158
|
-
|
159
|
-
|
160
|
-
error_count += len(errors.get('logs', []))
|
161
|
-
warnings = console_data.get('warnings', {})
|
162
|
-
warning_count += len(warnings.get('logs', []))
|
156
|
+
if comprehensive:
|
157
|
+
console_data = comprehensive.get('console_data', {})
|
158
|
+
all_console_logs = console_data.get('all_console_logs', [])
|
159
|
+
error_count = len([log for log in all_console_logs if log.get('type') == 'error'])
|
160
|
+
warning_count = len([log for log in all_console_logs if log.get('type') == 'warning'])
|
163
161
|
|
164
|
-
# Count network requests
|
162
|
+
# Count network requests from comprehensive_data
|
165
163
|
network_count = 0
|
166
164
|
failed_network_count = 0
|
167
|
-
|
168
|
-
|
169
|
-
|
170
|
-
|
171
|
-
|
172
|
-
|
173
|
-
failed_network_count += len(failed_requests.get('requests', []))
|
165
|
+
if comprehensive:
|
166
|
+
network_data = comprehensive.get('network_data', {})
|
167
|
+
all_network_events = network_data.get('all_network_events', [])
|
168
|
+
network_count = len(all_network_events)
|
169
|
+
# Count failures (4xx, 5xx)
|
170
|
+
failed_network_count = len([req for req in all_network_events if req.get('status', 0) >= 400])
|
174
171
|
|
175
172
|
# Count DOM elements
|
176
173
|
dom_element_count = 0
|
@@ -202,26 +199,50 @@ class OutputManager:
|
|
202
199
|
|
203
200
|
def _extract_errors(self, results: Dict) -> Dict:
|
204
201
|
"""Extract all error data with context"""
|
205
|
-
artifacts = results.get('artifacts', {})
|
206
202
|
errors = []
|
207
203
|
|
208
|
-
#
|
209
|
-
|
210
|
-
|
211
|
-
|
212
|
-
|
213
|
-
|
214
|
-
|
204
|
+
# Load comprehensive data from disk if available
|
205
|
+
comprehensive_data = self._load_comprehensive_data(results)
|
206
|
+
|
207
|
+
if comprehensive_data:
|
208
|
+
console_data = comprehensive_data.get('console_data', {})
|
209
|
+
all_console_logs = console_data.get('all_console_logs', [])
|
210
|
+
|
211
|
+
# Filter for errors
|
212
|
+
for log in all_console_logs:
|
213
|
+
if log.get('type') == 'error':
|
214
|
+
# Extract location info if present
|
215
|
+
location = log.get('location', {})
|
215
216
|
errors.append({
|
216
|
-
"message":
|
217
|
-
"source":
|
218
|
-
"line":
|
219
|
-
"column":
|
220
|
-
"stack_trace":
|
221
|
-
"timestamp":
|
222
|
-
"screenshot_name":
|
223
|
-
"url":
|
217
|
+
"message": log.get('text', ''),
|
218
|
+
"source": location.get('url', ''),
|
219
|
+
"line": location.get('lineNumber', 0),
|
220
|
+
"column": location.get('columnNumber', 0),
|
221
|
+
"stack_trace": log.get('stackTrace', {}).get('callFrames', []),
|
222
|
+
"timestamp": log.get('timestamp', 0),
|
223
|
+
"screenshot_name": 'comprehensive',
|
224
|
+
"url": location.get('url', '')
|
224
225
|
})
|
226
|
+
else:
|
227
|
+
# Fallback: Collect errors from screenshot artifacts (old structure)
|
228
|
+
# Only if comprehensive_data not available
|
229
|
+
artifacts = results.get('artifacts', {})
|
230
|
+
for screenshot in artifacts.get('screenshots', []):
|
231
|
+
if isinstance(screenshot, dict):
|
232
|
+
console_data = screenshot.get('console_data', {})
|
233
|
+
error_logs = console_data.get('errors', {}).get('logs', [])
|
234
|
+
|
235
|
+
for error in error_logs:
|
236
|
+
errors.append({
|
237
|
+
"message": error.get('message', ''),
|
238
|
+
"source": error.get('source', ''),
|
239
|
+
"line": error.get('line', 0),
|
240
|
+
"column": error.get('column', 0),
|
241
|
+
"stack_trace": error.get('stack_trace', ''),
|
242
|
+
"timestamp": screenshot.get('timestamp', 0),
|
243
|
+
"screenshot_name": screenshot.get('name', 'unknown'),
|
244
|
+
"url": screenshot.get('url', '')
|
245
|
+
})
|
225
246
|
|
226
247
|
# Organize by error type
|
227
248
|
error_types = {}
|
@@ -243,29 +264,55 @@ class OutputManager:
|
|
243
264
|
|
244
265
|
def _extract_network(self, results: Dict) -> Dict:
|
245
266
|
"""Extract network request/response data"""
|
246
|
-
artifacts = results.get('artifacts', {})
|
247
267
|
all_requests = []
|
248
268
|
failed_requests = []
|
249
269
|
|
250
|
-
|
251
|
-
|
252
|
-
|
253
|
-
|
254
|
-
|
255
|
-
|
256
|
-
|
257
|
-
|
258
|
-
|
259
|
-
|
260
|
-
|
270
|
+
# Load comprehensive data from disk if available
|
271
|
+
comprehensive_data = self._load_comprehensive_data(results)
|
272
|
+
|
273
|
+
if comprehensive_data:
|
274
|
+
network_data = comprehensive_data.get('network_data', {})
|
275
|
+
all_network_events = network_data.get('all_network_events', [])
|
276
|
+
|
277
|
+
# Add all network events and identify failures
|
278
|
+
for event in all_network_events:
|
279
|
+
request_data = {
|
280
|
+
"url": event.get('url', ''),
|
281
|
+
"method": event.get('method', 'GET'),
|
282
|
+
"status_code": event.get('status', 0),
|
283
|
+
"timestamp": event.get('timestamp', 0),
|
284
|
+
"timing": event.get('timing', {}),
|
285
|
+
"screenshot_name": 'comprehensive'
|
286
|
+
}
|
287
|
+
all_requests.append(request_data)
|
261
288
|
|
262
|
-
failed
|
263
|
-
|
264
|
-
|
265
|
-
|
266
|
-
|
267
|
-
|
268
|
-
|
289
|
+
# Identify failed requests (4xx, 5xx status codes)
|
290
|
+
status = event.get('status', 0)
|
291
|
+
if status >= 400:
|
292
|
+
failed_requests.append(request_data)
|
293
|
+
else:
|
294
|
+
# Fallback: Collect from screenshot artifacts (old structure)
|
295
|
+
# Only if comprehensive_data not available
|
296
|
+
artifacts = results.get('artifacts', {})
|
297
|
+
for screenshot in artifacts.get('screenshots', []):
|
298
|
+
if isinstance(screenshot, dict):
|
299
|
+
network_data = screenshot.get('network_data', {})
|
300
|
+
requests = network_data.get('requests', [])
|
301
|
+
|
302
|
+
for request in requests:
|
303
|
+
all_requests.append({
|
304
|
+
**request,
|
305
|
+
"screenshot_name": screenshot.get('name', 'unknown'),
|
306
|
+
"timestamp": screenshot.get('timestamp', 0)
|
307
|
+
})
|
308
|
+
|
309
|
+
failed = network_data.get('failed_requests', {}).get('requests', [])
|
310
|
+
for request in failed:
|
311
|
+
failed_requests.append({
|
312
|
+
**request,
|
313
|
+
"screenshot_name": screenshot.get('name', 'unknown'),
|
314
|
+
"timestamp": screenshot.get('timestamp', 0)
|
315
|
+
})
|
269
316
|
|
270
317
|
# Organize by status code
|
271
318
|
by_status_code = {}
|
@@ -288,24 +335,42 @@ class OutputManager:
|
|
288
335
|
|
289
336
|
def _extract_console(self, results: Dict) -> Dict:
|
290
337
|
"""Extract all console messages"""
|
291
|
-
artifacts = results.get('artifacts', {})
|
292
338
|
all_messages = []
|
293
339
|
|
294
|
-
|
295
|
-
|
296
|
-
|
297
|
-
|
298
|
-
|
299
|
-
|
300
|
-
|
301
|
-
|
302
|
-
|
303
|
-
|
304
|
-
|
305
|
-
|
306
|
-
|
307
|
-
|
308
|
-
|
340
|
+
# Load comprehensive data from disk if available
|
341
|
+
comprehensive_data = self._load_comprehensive_data(results)
|
342
|
+
|
343
|
+
if comprehensive_data:
|
344
|
+
console_data = comprehensive_data.get('console_data', {})
|
345
|
+
all_console_logs = console_data.get('all_console_logs', [])
|
346
|
+
|
347
|
+
for log in all_console_logs:
|
348
|
+
all_messages.append({
|
349
|
+
"type": log.get('type', 'log'),
|
350
|
+
"message": log.get('text', ''),
|
351
|
+
"source": log.get('location', {}).get('url', ''),
|
352
|
+
"timestamp": log.get('timestamp', 0),
|
353
|
+
"screenshot_name": 'comprehensive'
|
354
|
+
})
|
355
|
+
else:
|
356
|
+
# Fallback: Collect from screenshot artifacts (old structure)
|
357
|
+
# Only if comprehensive_data not available
|
358
|
+
artifacts = results.get('artifacts', {})
|
359
|
+
for screenshot in artifacts.get('screenshots', []):
|
360
|
+
if isinstance(screenshot, dict):
|
361
|
+
console_data = screenshot.get('console_data', {})
|
362
|
+
|
363
|
+
# Collect all message types
|
364
|
+
for msg_type in ['errors', 'warnings', 'logs', 'info']:
|
365
|
+
messages = console_data.get(msg_type, {}).get('logs', [])
|
366
|
+
for msg in messages:
|
367
|
+
all_messages.append({
|
368
|
+
"type": msg_type,
|
369
|
+
"message": msg.get('message', ''),
|
370
|
+
"source": msg.get('source', ''),
|
371
|
+
"timestamp": screenshot.get('timestamp', 0),
|
372
|
+
"screenshot_name": screenshot.get('name', 'unknown')
|
373
|
+
})
|
309
374
|
|
310
375
|
# Organize by type
|
311
376
|
by_type = {}
|
@@ -335,9 +400,22 @@ class OutputManager:
|
|
335
400
|
|
336
401
|
def _extract_performance(self, results: Dict) -> Dict:
|
337
402
|
"""Extract performance metrics"""
|
338
|
-
artifacts = results.get('artifacts', {})
|
339
403
|
performance_data = []
|
340
404
|
|
405
|
+
# Load comprehensive data from disk if available
|
406
|
+
comprehensive = self._load_comprehensive_data(results)
|
407
|
+
|
408
|
+
if comprehensive and 'performance_data' in comprehensive:
|
409
|
+
perf = comprehensive.get('performance_data', {})
|
410
|
+
if perf:
|
411
|
+
performance_data.append({
|
412
|
+
"screenshot_name": 'comprehensive',
|
413
|
+
"timestamp": results.get('execution_time', 0),
|
414
|
+
"metrics": perf
|
415
|
+
})
|
416
|
+
|
417
|
+
# Fallback: from screenshot artifacts
|
418
|
+
artifacts = results.get('artifacts', {})
|
341
419
|
for screenshot in artifacts.get('screenshots', []):
|
342
420
|
if isinstance(screenshot, dict):
|
343
421
|
perf = screenshot.get('performance_data', {})
|
@@ -512,6 +590,44 @@ class OutputManager:
|
|
512
590
|
"min_memory_usage": min(memory_usage) if memory_usage else 0
|
513
591
|
}
|
514
592
|
|
593
|
+
def _load_comprehensive_data(self, results: Dict) -> Dict:
|
594
|
+
"""Load comprehensive data from disk or from results dict"""
|
595
|
+
# First try to get from results (if already loaded)
|
596
|
+
comprehensive = results.get('comprehensive_data', {})
|
597
|
+
if comprehensive:
|
598
|
+
return comprehensive
|
599
|
+
|
600
|
+
# Try to load from disk via screenshot artifacts
|
601
|
+
artifacts = results.get('artifacts', {})
|
602
|
+
screenshots = artifacts.get('screenshots', [])
|
603
|
+
|
604
|
+
if screenshots:
|
605
|
+
last_screenshot = screenshots[-1]
|
606
|
+
# Check if comprehensive_data_path is set
|
607
|
+
if isinstance(last_screenshot, dict) and 'comprehensive_data_path' in last_screenshot:
|
608
|
+
comp_path = Path(last_screenshot['comprehensive_data_path'])
|
609
|
+
if comp_path.exists():
|
610
|
+
try:
|
611
|
+
with open(comp_path, 'r', encoding='utf-8') as f:
|
612
|
+
return json.load(f)
|
613
|
+
except Exception as e:
|
614
|
+
self.logger.warning(f"Could not load comprehensive data from {comp_path}: {e}")
|
615
|
+
|
616
|
+
# Try to find comprehensive data file by naming convention
|
617
|
+
if isinstance(last_screenshot, dict) and 'path' in last_screenshot:
|
618
|
+
screenshot_path = Path(last_screenshot['path'])
|
619
|
+
if screenshot_path.exists():
|
620
|
+
# Look for companion comprehensive data file
|
621
|
+
comp_path = screenshot_path.parent / f"{screenshot_path.stem}_comprehensive_data.json"
|
622
|
+
if comp_path.exists():
|
623
|
+
try:
|
624
|
+
with open(comp_path, 'r', encoding='utf-8') as f:
|
625
|
+
return json.load(f)
|
626
|
+
except Exception as e:
|
627
|
+
self.logger.warning(f"Could not load comprehensive data from {comp_path}: {e}")
|
628
|
+
|
629
|
+
return {}
|
630
|
+
|
515
631
|
def _write_json(self, path: Path, data: Dict):
|
516
632
|
"""Write JSON data to file with proper formatting"""
|
517
633
|
with open(path, 'w', encoding='utf-8') as f:
|
cursorflow/core/query_engine.py
CHANGED
@@ -317,11 +317,8 @@ class QueryEngine:
|
|
317
317
|
|
318
318
|
# Filter failed requests only
|
319
319
|
if 'failed' in filters and filters['failed']:
|
320
|
-
|
321
|
-
filtered_requests = [
|
322
|
-
req for req in all_requests
|
323
|
-
if req.get('url') in [f.get('url') for f in failed_reqs]
|
324
|
-
]
|
320
|
+
# Use failed_requests array directly (already filtered)
|
321
|
+
filtered_requests = network.get('failed_requests', [])
|
325
322
|
|
326
323
|
# Phase 1: Enhanced network filtering
|
327
324
|
|
@@ -6,6 +6,115 @@ alwaysApply: true
|
|
6
6
|
|
7
7
|
# CursorFlow Usage Rules for Cursor AI
|
8
8
|
|
9
|
+
## 🎯 **AI AGENT QUICK START - Read This First**
|
10
|
+
|
11
|
+
### Instant Decision Tree
|
12
|
+
|
13
|
+
**User reports error?**
|
14
|
+
→ `cursorflow test --base-url URL --path /page`
|
15
|
+
→ `cat .cursorflow/artifacts/sessions/session_XXX/data_digest.md`
|
16
|
+
→ `cursorflow query session_XXX --errors --with-server-logs --with-network`
|
17
|
+
→ Analyze correlated data, make fix
|
18
|
+
|
19
|
+
**User wants CSS/styling change?**
|
20
|
+
→ Python API: `css_iteration_persistent()`
|
21
|
+
→ Test 2-3 CSS variations
|
22
|
+
→ Apply best result
|
23
|
+
|
24
|
+
**User reports slow page?**
|
25
|
+
→ `cursorflow query session_XXX --network --over 500ms`
|
26
|
+
→ `cursorflow query session_XXX --performance`
|
27
|
+
→ Identify bottlenecks
|
28
|
+
|
29
|
+
**User debugging network failure?**
|
30
|
+
→ `cursorflow query session_XXX --network --failed --with-errors`
|
31
|
+
→ See errors that happened around network failure
|
32
|
+
→ `cursorflow query session_XXX --server-logs --pattern "api"`
|
33
|
+
|
34
|
+
---
|
35
|
+
|
36
|
+
## 🔍 **Common Debugging Patterns - Copy These**
|
37
|
+
|
38
|
+
### Pattern 1: "Page has JavaScript error"
|
39
|
+
```bash
|
40
|
+
# 1. Capture current state
|
41
|
+
cursorflow test --base-url http://localhost:3000 --path /page
|
42
|
+
|
43
|
+
# 2. Get error context
|
44
|
+
cursorflow query session_XXX --errors --export markdown
|
45
|
+
|
46
|
+
# 3. Find what triggered it
|
47
|
+
cursorflow query session_XXX --context-for-error 0 --window 10
|
48
|
+
|
49
|
+
# 4. Check server logs
|
50
|
+
cursorflow query session_XXX --server-logs --level error
|
51
|
+
```
|
52
|
+
|
53
|
+
### Pattern 2: "API call failing"
|
54
|
+
```bash
|
55
|
+
# 1. Find failed request
|
56
|
+
cursorflow query session_XXX --network --failed
|
57
|
+
|
58
|
+
# 2. Get full context
|
59
|
+
cursorflow query session_XXX --network --url-contains "/api/" --with-errors
|
60
|
+
|
61
|
+
# 3. Check server side
|
62
|
+
cursorflow query session_XXX --server-logs --pattern "api" --level error
|
63
|
+
```
|
64
|
+
|
65
|
+
### Pattern 3: "Feature broken after deployment"
|
66
|
+
```bash
|
67
|
+
# 1. Test current version
|
68
|
+
cursorflow test --base-url https://staging.com --path /feature
|
69
|
+
|
70
|
+
# 2. Compare with baseline
|
71
|
+
cursorflow query session_new --compare-with session_baseline --errors
|
72
|
+
cursorflow query session_new --compare-with session_baseline --network
|
73
|
+
|
74
|
+
# 3. Identify regressions
|
75
|
+
# Shows: new_errors, fixed_errors, timing_changes
|
76
|
+
```
|
77
|
+
|
78
|
+
### Pattern 4: "Element not visible/clickable"
|
79
|
+
```bash
|
80
|
+
# 1. Inspect element
|
81
|
+
cursorflow inspect --base-url URL --path /page --selector ".element"
|
82
|
+
|
83
|
+
# 2. Query DOM
|
84
|
+
cursorflow query session_XXX --dom --selector ".element"
|
85
|
+
|
86
|
+
# 3. Check if hidden
|
87
|
+
cursorflow query session_XXX --dom --visible
|
88
|
+
```
|
89
|
+
|
90
|
+
### Pattern 5: "Slow page load"
|
91
|
+
```bash
|
92
|
+
# 1. Find slow requests
|
93
|
+
cursorflow query session_XXX --network --over 1000ms
|
94
|
+
|
95
|
+
# 2. Group by URL
|
96
|
+
cursorflow query session_XXX --group-by-url "/api/slow"
|
97
|
+
|
98
|
+
# 3. Check performance
|
99
|
+
cursorflow query session_XXX --performance
|
100
|
+
```
|
101
|
+
|
102
|
+
---
|
103
|
+
|
104
|
+
## 💡 **What Each Query Reveals - AI Agent Guide**
|
105
|
+
|
106
|
+
**`--errors`** → Error message, source file, line number, stack trace
|
107
|
+
**`--errors --with-network`** → Errors + network requests within ±5s (find API failures)
|
108
|
+
**`--errors --with-server-logs`** → Errors + backend logs (complete picture)
|
109
|
+
**`--network --failed`** → All 4xx/5xx responses with headers and timing
|
110
|
+
**`--network --over 500ms`** → Slow requests (bottleneck identification)
|
111
|
+
**`--server-logs --level error`** → Backend errors with timestamps
|
112
|
+
**`--context-for-error 0`** → Everything within ±5s of error (deep debugging)
|
113
|
+
**`--group-by-url "/api/X"`** → All data for specific endpoint
|
114
|
+
**`session_A --compare-with session_B`** → New/fixed errors, timing changes
|
115
|
+
|
116
|
+
---
|
117
|
+
|
9
118
|
## 🔥 **CursorFlow: AI-Native Testing with Optimized Output**
|
10
119
|
|
11
120
|
**CursorFlow v2.7.0 provides comprehensive data collection + AI-optimized output:**
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.4
|
2
2
|
Name: cursorflow
|
3
|
-
Version: 2.7.
|
3
|
+
Version: 2.7.3
|
4
4
|
Summary: 🔥 Complete page intelligence for AI-driven development with Hot Reload Intelligence - captures DOM, network, console, performance, HMR events, and comprehensive page analysis
|
5
5
|
Author-email: GeekWarrior Development <rbush@cooltheory.com>
|
6
6
|
License-Expression: MIT
|
@@ -256,6 +256,77 @@ cursorflow query session_A --compare-with session_B --errors
|
|
256
256
|
|
257
257
|
---
|
258
258
|
|
259
|
+
## 🤖 **For AI Agents: Start Here**
|
260
|
+
|
261
|
+
### 3-Second Decision Guide
|
262
|
+
|
263
|
+
| User Says | Command | What You Get |
|
264
|
+
|-----------|---------|--------------|
|
265
|
+
| "Debug this error" | `cursorflow test` → `query session_X --errors --with-network` | Error + network + server logs correlated |
|
266
|
+
| "Fix this CSS" | Python: `css_iteration_persistent()` | Visual comparison of CSS variations |
|
267
|
+
| "Why is this slow?" | `query session_X --network --over 500ms` | Slow requests identified |
|
268
|
+
| "Test login flow" | `cursorflow test --actions '[...]'` | Complete flow with server correlation |
|
269
|
+
|
270
|
+
### Complete Debugging Example
|
271
|
+
|
272
|
+
```bash
|
273
|
+
# User: "There's a JavaScript error on the dashboard"
|
274
|
+
|
275
|
+
# 1. Capture (5 seconds)
|
276
|
+
cursorflow test --base-url http://localhost:3000 --path /dashboard
|
277
|
+
|
278
|
+
# 2. Diagnose (10 seconds)
|
279
|
+
cat .cursorflow/artifacts/sessions/session_XXX/data_digest.md
|
280
|
+
cursorflow query session_XXX --errors
|
281
|
+
|
282
|
+
# 3. Get context (5 seconds)
|
283
|
+
cursorflow query session_XXX --context-for-error 0 --export markdown
|
284
|
+
|
285
|
+
# 4. Fix based on correlated data
|
286
|
+
# (You now have: error message, stack trace, network requests, server logs)
|
287
|
+
```
|
288
|
+
|
289
|
+
**Total time: 20 seconds from problem to actionable diagnosis**
|
290
|
+
|
291
|
+
### When User Mentions...
|
292
|
+
|
293
|
+
**"Error", "Broken", "Not working"**
|
294
|
+
1. `cursorflow test` to capture
|
295
|
+
2. `query --errors` to identify
|
296
|
+
3. `query --context-for-error` to understand
|
297
|
+
4. Fix based on correlated data
|
298
|
+
|
299
|
+
**"Slow", "Performance", "Takes forever"**
|
300
|
+
1. `cursorflow test` to baseline
|
301
|
+
2. `query --network --over 500ms` to find bottlenecks
|
302
|
+
3. `query --performance` for metrics
|
303
|
+
4. Optimize slow requests
|
304
|
+
|
305
|
+
**"CSS", "Styling", "Layout", "Spacing"**
|
306
|
+
1. Python API: `css_iteration_persistent()`
|
307
|
+
2. Test 2-3 CSS approaches
|
308
|
+
3. Compare screenshots
|
309
|
+
4. Apply best result
|
310
|
+
|
311
|
+
**"Login", "Auth", "Session"**
|
312
|
+
1. `cursorflow test --actions '[...]'` with login flow
|
313
|
+
2. `--save-session` to preserve auth
|
314
|
+
3. `--use-session` for subsequent tests
|
315
|
+
4. Check auth errors with `query --errors --contains "auth"`
|
316
|
+
|
317
|
+
**"Responsive", "Mobile", "Tablet"**
|
318
|
+
1. `cursorflow test --responsive`
|
319
|
+
2. `query --responsive --viewport mobile`
|
320
|
+
3. Compare viewport results
|
321
|
+
4. Fix mobile-specific issues
|
322
|
+
|
323
|
+
**"Compare", "Regression", "What changed"**
|
324
|
+
1. `query session_before --compare-with session_after`
|
325
|
+
2. Focus on `new_errors` and `timing_changes`
|
326
|
+
3. Investigate specific regressions
|
327
|
+
|
328
|
+
---
|
329
|
+
|
259
330
|
## 🚀 Quick Start
|
260
331
|
|
261
332
|
### Step 1: Install CursorFlow Package
|
@@ -1,7 +1,7 @@
|
|
1
1
|
cursorflow/__init__.py,sha256=2V9xzG2tYxVWOTmSw2v9Jdbr7lSrMi_y2SMUMuNZdvw,2990
|
2
2
|
cursorflow/auto_init.py,sha256=dXQaXXiXe4wkUP-jd8fcJ5fYVt7ASdTb47b7SzXymOM,6122
|
3
3
|
cursorflow/auto_updater.py,sha256=oQ12TIMZ6Cm3HF-x9iRWFtvOLkRh-JWPqitS69-4roE,7851
|
4
|
-
cursorflow/cli.py,sha256=
|
4
|
+
cursorflow/cli.py,sha256=5uObhh0ggolMX9DBHtQ8F74ewpzEcUxj9XkwoWyfCBA,80095
|
5
5
|
cursorflow/install_cursorflow_rules.py,sha256=DsZ0680y9JMuTKFXjdgYtOKIEAjBMsdwL8LmA9WEb5A,11864
|
6
6
|
cursorflow/post_install.py,sha256=WieBiKWG0qBAQpF8iMVWUyb9Fr2Xky9qECTMPrlAbpE,2678
|
7
7
|
cursorflow/updater.py,sha256=SroSQHQi5cYyzcOK_bf-WzmQmE7yeOs8qo3r__j-Z6E,19583
|
@@ -24,19 +24,19 @@ cursorflow/core/json_utils.py,sha256=ofqRP1twDSHQx-SERgLTO5y2ZXkoirsEw8OOiJkGh2w
|
|
24
24
|
cursorflow/core/log_collector.py,sha256=CYh41SLDI_t-hgzJqG3fTrSTDhEPFDktjO_mOQwucnE,15198
|
25
25
|
cursorflow/core/log_monitor.py,sha256=pONMu_JHEnT0T62OA5KRZ4nClzKgNpifPyrfN5w_RM8,6704
|
26
26
|
cursorflow/core/mockup_comparator.py,sha256=ttcXdueZz9dwcUGBQ2sft4i66zRdkZkFPu41s6JlEwU,63472
|
27
|
-
cursorflow/core/output_manager.py,sha256=
|
27
|
+
cursorflow/core/output_manager.py,sha256=PT73Awq4htYVwbBN3EadXGvuy3Wreip0Y35sCg7Pz5s,28013
|
28
28
|
cursorflow/core/persistent_session.py,sha256=FsEHj4wKkycmdp6PFRHv3g333Y74yqra0x_qhUTQpik,36075
|
29
|
-
cursorflow/core/query_engine.py,sha256=
|
29
|
+
cursorflow/core/query_engine.py,sha256=138mEdd0D5w2zzYrcBEegDcxbY1hS9hio-U121Tj2wA,53984
|
30
30
|
cursorflow/core/report_generator.py,sha256=-vosfyrnfVyWDbAIMlMurl90xOXqBae8d6aLd9sEqiY,10113
|
31
31
|
cursorflow/core/trace_manager.py,sha256=Jj9ultZrL1atiZXfcRVI6ynCnnfqZM-X0_taxt-llJ0,7189
|
32
32
|
cursorflow/log_sources/local_file.py,sha256=GVnhsaifIdc41twXwbxRM9-fBeRDsknDpk5IEGulnhQ,8318
|
33
33
|
cursorflow/log_sources/ssh_remote.py,sha256=_Kwh0bhRpKgq-0c98oaX2hN6h9cT-wCHlqY5NiWVCoY,8388
|
34
34
|
cursorflow/rules/__init__.py,sha256=gPcA-IkhXj03sl7cvZV0wwo7CtEkcyuKs4y0F5oQbqE,458
|
35
35
|
cursorflow/rules/cursorflow-installation.mdc,sha256=D55pzzDPAVVbE3gAtKPUGoT-2fvB-FI2l6yrTdzUIEo,10208
|
36
|
-
cursorflow/rules/cursorflow-usage.mdc,sha256=
|
37
|
-
cursorflow-2.7.
|
38
|
-
cursorflow-2.7.
|
39
|
-
cursorflow-2.7.
|
40
|
-
cursorflow-2.7.
|
41
|
-
cursorflow-2.7.
|
42
|
-
cursorflow-2.7.
|
36
|
+
cursorflow/rules/cursorflow-usage.mdc,sha256=OYsqF1OKeGP5Bl8yR5TJ92dDXH7C3yp-SKAs9aZfBAw,34744
|
37
|
+
cursorflow-2.7.3.dist-info/licenses/LICENSE,sha256=e4QbjAsj3bW-xgQOvQelr8sGLYDoqc48k6cKgCr_pBU,1080
|
38
|
+
cursorflow-2.7.3.dist-info/METADATA,sha256=J4eWtDeDZ2xaXpLgk9th-ku0Ysmalil-vcjICY8C9D0,19844
|
39
|
+
cursorflow-2.7.3.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
40
|
+
cursorflow-2.7.3.dist-info/entry_points.txt,sha256=-Ed_n4Uff7wClEtWS-Py6xmQabecB9f0QAOjX0w7ljA,51
|
41
|
+
cursorflow-2.7.3.dist-info/top_level.txt,sha256=t1UZwRyZP4u-ng2CEcNHmk_ZT4ibQxoihB2IjTF7ovc,11
|
42
|
+
cursorflow-2.7.3.dist-info/RECORD,,
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|