cursorflow 2.6.0__py3-none-any.whl → 2.6.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- cursorflow/cli.py +18 -6
- cursorflow/core/browser_controller.py +204 -151
- {cursorflow-2.6.0.dist-info → cursorflow-2.6.2.dist-info}/METADATA +1 -1
- {cursorflow-2.6.0.dist-info → cursorflow-2.6.2.dist-info}/RECORD +8 -8
- {cursorflow-2.6.0.dist-info → cursorflow-2.6.2.dist-info}/WHEEL +0 -0
- {cursorflow-2.6.0.dist-info → cursorflow-2.6.2.dist-info}/entry_points.txt +0 -0
- {cursorflow-2.6.0.dist-info → cursorflow-2.6.2.dist-info}/licenses/LICENSE +0 -0
- {cursorflow-2.6.0.dist-info → cursorflow-2.6.2.dist-info}/top_level.txt +0 -0
cursorflow/cli.py
CHANGED
@@ -357,8 +357,8 @@ def test(base_url, path, actions, output, logs, config, verbose, headless, timeo
|
|
357
357
|
help='JSON array of viewports to test: [{"width": 1440, "height": 900, "name": "desktop"}]')
|
358
358
|
@click.option('--diff-threshold', '-t', type=float, default=0.1,
|
359
359
|
help='Visual difference threshold (0.0-1.0)')
|
360
|
-
@click.option('--output', '-o',
|
361
|
-
help='Output file for comparison results')
|
360
|
+
@click.option('--output', '-o',
|
361
|
+
help='Output file for comparison results (auto-generated in .cursorflow/artifacts/ if not specified)')
|
362
362
|
@click.option('--verbose', is_flag=True,
|
363
363
|
help='Verbose output')
|
364
364
|
def compare_mockup(mockup_url, base_url, mockup_actions, implementation_actions, viewports, diff_threshold, output, verbose):
|
@@ -439,7 +439,13 @@ def compare_mockup(mockup_url, base_url, mockup_actions, implementation_actions,
|
|
439
439
|
if similarity_range:
|
440
440
|
console.print(f"📈 Similarity range: {similarity_range.get('min', 0)}% - {similarity_range.get('max', 0)}%")
|
441
441
|
|
442
|
-
# Save results
|
442
|
+
# Save results (in artifacts directory by default for consistency)
|
443
|
+
if not output:
|
444
|
+
# Auto-generate filename in artifacts directory
|
445
|
+
comparison_id = results.get('comparison_id', f"comparison_{int(time.time())}")
|
446
|
+
output = f".cursorflow/artifacts/{comparison_id}.json"
|
447
|
+
Path('.cursorflow/artifacts').mkdir(parents=True, exist_ok=True)
|
448
|
+
|
443
449
|
from .core.json_utils import safe_json_dump
|
444
450
|
safe_json_dump(results, output)
|
445
451
|
|
@@ -463,8 +469,8 @@ def compare_mockup(mockup_url, base_url, mockup_actions, implementation_actions,
|
|
463
469
|
help='JSON file with base actions to perform before each test')
|
464
470
|
@click.option('--diff-threshold', '-t', type=float, default=0.1,
|
465
471
|
help='Visual difference threshold (0.0-1.0)')
|
466
|
-
@click.option('--output', '-o',
|
467
|
-
help='Output file for iteration results')
|
472
|
+
@click.option('--output', '-o',
|
473
|
+
help='Output file for iteration results (auto-generated in .cursorflow/artifacts/ if not specified)')
|
468
474
|
@click.option('--verbose', is_flag=True,
|
469
475
|
help='Verbose output')
|
470
476
|
def iterate_mockup(mockup_url, base_url, css_improvements, base_actions, diff_threshold, output, verbose):
|
@@ -542,7 +548,13 @@ def iterate_mockup(mockup_url, base_url, css_improvements, base_actions, diff_th
|
|
542
548
|
for i, rec in enumerate(recommendations[:3]):
|
543
549
|
console.print(f" {i+1}. {rec.get('description', 'No description')}")
|
544
550
|
|
545
|
-
# Save results
|
551
|
+
# Save results (in artifacts directory by default for consistency)
|
552
|
+
if not output:
|
553
|
+
# Auto-generate filename in artifacts directory
|
554
|
+
session_id = results.get('session_id', f"iteration_{int(time.time())}")
|
555
|
+
output = f".cursorflow/artifacts/{session_id}.json"
|
556
|
+
Path('.cursorflow/artifacts').mkdir(parents=True, exist_ok=True)
|
557
|
+
|
546
558
|
from cursorflow.core.json_utils import safe_json_serialize
|
547
559
|
with open(output, 'w') as f:
|
548
560
|
json.dump(results, f, indent=2, default=safe_json_serialize)
|
@@ -189,165 +189,205 @@ class BrowserController:
|
|
189
189
|
|
190
190
|
def _handle_console_message(self, msg):
|
191
191
|
"""Handle console messages from any framework with enhanced error context collection"""
|
192
|
-
|
193
|
-
|
194
|
-
|
195
|
-
|
196
|
-
|
197
|
-
"
|
198
|
-
|
199
|
-
|
200
|
-
|
201
|
-
|
202
|
-
|
203
|
-
|
204
|
-
|
205
|
-
|
206
|
-
# Enhanced logging for better correlation
|
207
|
-
if msg.type == "error":
|
208
|
-
self.logger.error(f"Console Error: {msg.text} at {msg.location}")
|
209
|
-
|
210
|
-
# v2.0 Enhancement: Trigger error context collection for console errors
|
211
|
-
if self.error_context_collector:
|
212
|
-
error_event = {
|
213
|
-
'type': 'console_error',
|
214
|
-
'message': msg.text,
|
215
|
-
'location': log_entry['location'],
|
216
|
-
'stack_trace': log_entry['stack_trace'],
|
217
|
-
'timestamp': log_entry['timestamp']
|
218
|
-
}
|
219
|
-
# Capture context asynchronously (don't block the event handler)
|
220
|
-
asyncio.create_task(self._collect_error_context_async(error_event))
|
221
|
-
|
222
|
-
elif msg.type == "warning":
|
223
|
-
self.logger.warning(f"Console Warning: {msg.text}")
|
224
|
-
elif msg.type in ["log", "info"] and any(keyword in msg.text.lower() for keyword in ["error", "failed", "exception"]):
|
225
|
-
# Catch application logs that indicate errors
|
226
|
-
self.logger.warning(f"App Error Log: {msg.text}")
|
192
|
+
try:
|
193
|
+
log_entry = {
|
194
|
+
"timestamp": time.time(),
|
195
|
+
"type": msg.type,
|
196
|
+
"text": msg.text,
|
197
|
+
"location": {
|
198
|
+
"url": msg.location.get("url", "") if msg.location else "",
|
199
|
+
"line": msg.location.get("lineNumber", 0) if msg.location else 0,
|
200
|
+
"column": msg.location.get("columnNumber", 0) if msg.location else 0
|
201
|
+
},
|
202
|
+
"args": [str(arg) for arg in msg.args] if msg.args else [],
|
203
|
+
"stack_trace": getattr(msg, 'stackTrace', None)
|
204
|
+
}
|
205
|
+
self.console_logs.append(log_entry)
|
227
206
|
|
228
|
-
#
|
229
|
-
if
|
230
|
-
|
231
|
-
|
232
|
-
|
233
|
-
|
234
|
-
|
235
|
-
|
207
|
+
# Enhanced logging for better correlation
|
208
|
+
if msg.type == "error":
|
209
|
+
self.logger.error(f"Console Error: {msg.text} at {msg.location}")
|
210
|
+
|
211
|
+
# v2.0 Enhancement: Trigger error context collection for console errors
|
212
|
+
if self.error_context_collector:
|
213
|
+
error_event = {
|
214
|
+
'type': 'console_error',
|
215
|
+
'message': msg.text,
|
216
|
+
'location': log_entry['location'],
|
217
|
+
'stack_trace': log_entry['stack_trace'],
|
218
|
+
'timestamp': log_entry['timestamp']
|
219
|
+
}
|
220
|
+
# Capture context asynchronously (don't block the event handler)
|
221
|
+
asyncio.create_task(self._collect_error_context_async(error_event))
|
222
|
+
|
223
|
+
elif msg.type == "warning":
|
224
|
+
self.logger.warning(f"Console Warning: {msg.text}")
|
225
|
+
elif msg.type in ["log", "info"] and any(keyword in msg.text.lower() for keyword in ["error", "failed", "exception"]):
|
226
|
+
# Catch application logs that indicate errors
|
227
|
+
self.logger.warning(f"App Error Log: {msg.text}")
|
228
|
+
|
229
|
+
# v2.0 Enhancement: Collect context for application error logs too
|
230
|
+
if self.error_context_collector:
|
231
|
+
error_event = {
|
232
|
+
'type': 'app_error_log',
|
233
|
+
'message': msg.text,
|
234
|
+
'timestamp': log_entry['timestamp']
|
235
|
+
}
|
236
|
+
asyncio.create_task(self._collect_error_context_async(error_event))
|
237
|
+
except Exception as e:
|
238
|
+
# Defensive error handling - don't let console message parsing break tests
|
239
|
+
self.logger.debug(f"Console message handler error: {e}")
|
240
|
+
# Continue test execution despite error
|
236
241
|
|
237
242
|
def _handle_request(self, request):
|
238
243
|
"""Handle network requests - framework agnostic"""
|
239
|
-
|
240
|
-
|
241
|
-
|
242
|
-
|
243
|
-
|
244
|
-
|
245
|
-
|
246
|
-
|
247
|
-
|
248
|
-
|
249
|
-
|
250
|
-
# Capture complete payload data for all request types
|
251
|
-
if request.post_data:
|
252
|
-
request_data["post_data"] = request.post_data
|
253
|
-
request_data["post_data_size"] = len(request.post_data)
|
244
|
+
try:
|
245
|
+
# Capture complete request data
|
246
|
+
request_data = {
|
247
|
+
"timestamp": time.time(),
|
248
|
+
"type": "request",
|
249
|
+
"url": request.url,
|
250
|
+
"method": request.method,
|
251
|
+
"headers": dict(request.headers),
|
252
|
+
"resource_type": request.resource_type, # document, xhr, fetch, etc.
|
253
|
+
"is_navigation_request": request.is_navigation_request()
|
254
|
+
}
|
254
255
|
|
255
|
-
#
|
256
|
-
|
257
|
-
|
258
|
-
|
259
|
-
|
260
|
-
request_data["
|
261
|
-
|
262
|
-
|
263
|
-
|
264
|
-
|
265
|
-
|
266
|
-
|
267
|
-
|
268
|
-
|
269
|
-
|
270
|
-
|
271
|
-
|
272
|
-
|
273
|
-
|
274
|
-
|
275
|
-
|
276
|
-
|
277
|
-
|
278
|
-
|
279
|
-
|
280
|
-
|
281
|
-
|
282
|
-
|
283
|
-
|
284
|
-
|
285
|
-
|
286
|
-
|
287
|
-
|
288
|
-
|
289
|
-
|
290
|
-
|
291
|
-
if
|
292
|
-
|
256
|
+
# Capture complete payload data for all request types
|
257
|
+
# Wrap in try/except to handle gzip-compressed data gracefully
|
258
|
+
try:
|
259
|
+
if request.post_data:
|
260
|
+
request_data["post_data"] = request.post_data
|
261
|
+
request_data["post_data_size"] = len(request.post_data)
|
262
|
+
|
263
|
+
# Try to parse JSON payloads for better debugging
|
264
|
+
content_type = request.headers.get("content-type", "")
|
265
|
+
if "application/json" in content_type:
|
266
|
+
try:
|
267
|
+
import json
|
268
|
+
request_data["parsed_json"] = json.loads(request.post_data)
|
269
|
+
except:
|
270
|
+
pass
|
271
|
+
elif "application/x-www-form-urlencoded" in content_type:
|
272
|
+
try:
|
273
|
+
from urllib.parse import parse_qs
|
274
|
+
request_data["parsed_form"] = parse_qs(request.post_data)
|
275
|
+
except:
|
276
|
+
pass
|
277
|
+
except UnicodeDecodeError:
|
278
|
+
# Handle gzip-compressed or binary data gracefully
|
279
|
+
# This happens when Playwright can't decode the post_data (e.g., gzip magic bytes 0x1f 0x8b)
|
280
|
+
request_data["post_data"] = "[binary/compressed data]"
|
281
|
+
request_data["post_data_size"] = 0
|
282
|
+
self.logger.debug(f"Binary/compressed POST data detected for {request.url}")
|
283
|
+
except Exception as e:
|
284
|
+
# Graceful degradation - don't let post_data parsing break request tracking
|
285
|
+
request_data["post_data"] = None
|
286
|
+
request_data["post_data_size"] = 0
|
287
|
+
self.logger.debug(f"Could not capture post_data for {request.url}: {e}")
|
288
|
+
|
289
|
+
# Capture query parameters
|
290
|
+
from urllib.parse import urlparse, parse_qs
|
291
|
+
parsed_url = urlparse(request.url)
|
292
|
+
if parsed_url.query:
|
293
|
+
request_data["query_params"] = parse_qs(parsed_url.query)
|
294
|
+
|
295
|
+
# Capture file uploads
|
296
|
+
if "multipart/form-data" in request.headers.get("content-type", ""):
|
297
|
+
request_data["has_file_upload"] = True
|
298
|
+
# Note: Actual file content not captured for performance/privacy
|
299
|
+
|
300
|
+
self.network_requests.append(request_data)
|
301
|
+
|
302
|
+
# Enhanced logging for correlation
|
303
|
+
if request.resource_type in ["xhr", "fetch"] or "/api/" in request.url:
|
304
|
+
payload_info = ""
|
305
|
+
post_data_value = request_data.get("post_data")
|
306
|
+
post_data_size = request_data.get("post_data_size", 0)
|
307
|
+
if post_data_value and post_data_size > 0:
|
308
|
+
payload_info = f" (payload: {post_data_size} bytes)"
|
309
|
+
self.logger.debug(f"API Request: {request.method} {request.url}{payload_info}")
|
310
|
+
|
311
|
+
# Log critical data for immediate debugging
|
312
|
+
if post_data_value and post_data_size > 0 and post_data_size < 500: # Only log small payloads
|
313
|
+
self.logger.debug(f"Request payload: {post_data_value}")
|
314
|
+
except Exception as e:
|
315
|
+
# Top-level defensive error handling - don't let request handler break event listeners
|
316
|
+
self.logger.debug(f"Request handler error: {e}")
|
317
|
+
# Continue test execution despite error
|
293
318
|
|
294
319
|
def _handle_response(self, response):
|
295
320
|
"""Handle network responses with enhanced error context collection"""
|
296
|
-
|
297
|
-
|
298
|
-
|
299
|
-
|
300
|
-
|
301
|
-
|
302
|
-
|
303
|
-
|
304
|
-
|
305
|
-
|
306
|
-
|
307
|
-
|
308
|
-
# Capture response body asynchronously (Phase 1.4: Network Response Body Capture)
|
309
|
-
asyncio.create_task(self._capture_response_body_async(response, response_data))
|
310
|
-
|
311
|
-
# Log failed requests for correlation
|
312
|
-
if response.status >= 400:
|
313
|
-
self.logger.warning(f"Failed Response: {response.status} {response.url}")
|
321
|
+
try:
|
322
|
+
response_data = {
|
323
|
+
"timestamp": time.time(),
|
324
|
+
"type": "response",
|
325
|
+
"url": response.url,
|
326
|
+
"status": response.status,
|
327
|
+
"status_text": response.status_text,
|
328
|
+
"headers": dict(response.headers),
|
329
|
+
"size": 0, # Will be populated by _capture_response_body if needed
|
330
|
+
"from_cache": response.from_service_worker or False
|
331
|
+
}
|
332
|
+
self.network_requests.append(response_data)
|
314
333
|
|
315
|
-
#
|
316
|
-
|
317
|
-
|
318
|
-
|
319
|
-
|
320
|
-
|
321
|
-
|
322
|
-
|
323
|
-
|
324
|
-
|
325
|
-
|
326
|
-
|
327
|
-
|
328
|
-
|
329
|
-
|
330
|
-
|
331
|
-
|
332
|
-
|
333
|
-
|
334
|
-
|
335
|
-
|
336
|
-
|
334
|
+
# Capture response body asynchronously (Phase 1.4: Network Response Body Capture)
|
335
|
+
asyncio.create_task(self._capture_response_body_async(response, response_data))
|
336
|
+
|
337
|
+
# Log failed requests for correlation
|
338
|
+
if response.status >= 400:
|
339
|
+
self.logger.warning(f"Failed Response: {response.status} {response.url}")
|
340
|
+
|
341
|
+
# v2.0 Enhancement: Trigger error context collection for failed requests
|
342
|
+
if self.error_context_collector:
|
343
|
+
error_event = {
|
344
|
+
'type': 'network_error',
|
345
|
+
'url': response.url,
|
346
|
+
'status': response.status,
|
347
|
+
'status_text': response.status_text,
|
348
|
+
'headers': dict(response.headers),
|
349
|
+
'timestamp': response_data['timestamp']
|
350
|
+
}
|
351
|
+
# Capture context asynchronously
|
352
|
+
asyncio.create_task(self._collect_error_context_async(error_event))
|
353
|
+
|
354
|
+
# Capture response body for important requests
|
355
|
+
should_capture_body = (
|
356
|
+
response.status >= 400 or # All error responses
|
357
|
+
any(api_path in response.url for api_path in ["/api/", "/ajax", ".json"]) or # API calls
|
358
|
+
"application/json" in response.headers.get("content-type", "") # JSON responses
|
359
|
+
)
|
360
|
+
|
361
|
+
if should_capture_body:
|
362
|
+
asyncio.create_task(self._capture_response_body(response))
|
363
|
+
except Exception as e:
|
364
|
+
# Defensive error handling - don't let response handler break event listeners
|
365
|
+
self.logger.debug(f"Response handler error: {e}")
|
366
|
+
# Continue test execution despite error
|
337
367
|
|
338
368
|
def _handle_page_error(self, error):
|
339
369
|
"""Handle page errors from any framework"""
|
340
|
-
|
341
|
-
|
342
|
-
|
343
|
-
|
344
|
-
|
345
|
-
|
346
|
-
|
370
|
+
try:
|
371
|
+
self.console_logs.append({
|
372
|
+
"timestamp": time.time(),
|
373
|
+
"type": "pageerror",
|
374
|
+
"text": str(error),
|
375
|
+
"location": None
|
376
|
+
})
|
377
|
+
self.logger.error(f"Page error: {error}")
|
378
|
+
except Exception as e:
|
379
|
+
# Defensive error handling - don't let page error handler break event listeners
|
380
|
+
self.logger.debug(f"Page error handler error: {e}")
|
381
|
+
# Continue test execution despite error
|
347
382
|
|
348
383
|
def _handle_page_crash(self, page):
|
349
384
|
"""Handle page crashes"""
|
350
|
-
|
385
|
+
try:
|
386
|
+
self.logger.error("Page crashed - attempting recovery")
|
387
|
+
except Exception as e:
|
388
|
+
# Defensive error handling - don't let crash handler break event listeners
|
389
|
+
self.logger.debug(f"Page crash handler error: {e}")
|
390
|
+
# Continue test execution despite error
|
351
391
|
|
352
392
|
async def navigate(self, path: str, wait_for_load: bool = True):
|
353
393
|
"""Navigate to URL - works with any web framework"""
|
@@ -2481,15 +2521,28 @@ class BrowserController:
|
|
2481
2521
|
resource_analysis: Dict = None, storage_analysis: Dict = None) -> Dict[str, Any]:
|
2482
2522
|
"""Enhanced analysis summary with v2.0 comprehensive data"""
|
2483
2523
|
try:
|
2524
|
+
# Safe value extraction with null-safety
|
2525
|
+
error_count = console_data.get("console_summary", {}).get("error_count") or 0
|
2526
|
+
failed_requests = network_data.get("failed_requests", {}).get("count") or 0
|
2527
|
+
page_load_time = performance_data.get("performance_summary", {}).get("page_load_time") or 0
|
2528
|
+
|
2529
|
+
# Ensure numeric values are actually numeric (not None)
|
2530
|
+
if not isinstance(error_count, (int, float)):
|
2531
|
+
error_count = 0
|
2532
|
+
if not isinstance(failed_requests, (int, float)):
|
2533
|
+
failed_requests = 0
|
2534
|
+
if not isinstance(page_load_time, (int, float)):
|
2535
|
+
page_load_time = 0
|
2536
|
+
|
2484
2537
|
# Base summary (v1.x compatibility)
|
2485
2538
|
summary = {
|
2486
2539
|
"page_health": {
|
2487
2540
|
"dom_elements_count": dom_analysis.get("totalElements", 0),
|
2488
|
-
"has_errors":
|
2489
|
-
"error_count":
|
2541
|
+
"has_errors": error_count > 0,
|
2542
|
+
"error_count": error_count,
|
2490
2543
|
"warning_count": console_data.get("console_summary", {}).get("warning_count", 0),
|
2491
2544
|
"failed_requests": network_data.get("network_summary", {}).get("failed_requests", 0),
|
2492
|
-
"page_load_time_ms":
|
2545
|
+
"page_load_time_ms": page_load_time
|
2493
2546
|
},
|
2494
2547
|
"interaction_readiness": {
|
2495
2548
|
"interactive_elements": dom_analysis.get("pageStructure", {}).get("interactiveElements", 0),
|
@@ -2505,12 +2558,12 @@ class BrowserController:
|
|
2505
2558
|
},
|
2506
2559
|
"quality_indicators": {
|
2507
2560
|
"has_console_errors": console_data.get("console_summary", {}).get("has_recent_errors", False),
|
2508
|
-
"has_failed_requests":
|
2561
|
+
"has_failed_requests": failed_requests > 0,
|
2509
2562
|
"performance_score": self._calculate_performance_score(performance_data),
|
2510
2563
|
"overall_health": "good" if (
|
2511
|
-
|
2512
|
-
|
2513
|
-
|
2564
|
+
error_count == 0 and
|
2565
|
+
failed_requests == 0 and
|
2566
|
+
page_load_time < 3000
|
2514
2567
|
) else "needs_attention"
|
2515
2568
|
}
|
2516
2569
|
}
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.4
|
2
2
|
Name: cursorflow
|
3
|
-
Version: 2.6.
|
3
|
+
Version: 2.6.2
|
4
4
|
Summary: 🔥 Complete page intelligence for AI-driven development with Hot Reload Intelligence - captures DOM, network, console, performance, HMR events, and comprehensive page analysis
|
5
5
|
Author-email: GeekWarrior Development <rbush@cooltheory.com>
|
6
6
|
License-Expression: MIT
|
@@ -1,14 +1,14 @@
|
|
1
1
|
cursorflow/__init__.py,sha256=2V9xzG2tYxVWOTmSw2v9Jdbr7lSrMi_y2SMUMuNZdvw,2990
|
2
2
|
cursorflow/auto_init.py,sha256=dXQaXXiXe4wkUP-jd8fcJ5fYVt7ASdTb47b7SzXymOM,6122
|
3
3
|
cursorflow/auto_updater.py,sha256=oQ12TIMZ6Cm3HF-x9iRWFtvOLkRh-JWPqitS69-4roE,7851
|
4
|
-
cursorflow/cli.py,sha256=
|
4
|
+
cursorflow/cli.py,sha256=ySz4bmn-vpLzS4rDBefGON93oTBTqR2ZopLfXZ5nVno,65478
|
5
5
|
cursorflow/install_cursorflow_rules.py,sha256=DsZ0680y9JMuTKFXjdgYtOKIEAjBMsdwL8LmA9WEb5A,11864
|
6
6
|
cursorflow/post_install.py,sha256=WieBiKWG0qBAQpF8iMVWUyb9Fr2Xky9qECTMPrlAbpE,2678
|
7
7
|
cursorflow/updater.py,sha256=SroSQHQi5cYyzcOK_bf-WzmQmE7yeOs8qo3r__j-Z6E,19583
|
8
8
|
cursorflow/core/action_validator.py,sha256=SCk3w_62D1y0cCRDOajK8L44-abSj_KpnUBgR_yNVW4,6846
|
9
9
|
cursorflow/core/agent.py,sha256=f3lecgEzDRDdGTVccAtorpLGfNJJ49bbsQAmgr0vNGg,10136
|
10
10
|
cursorflow/core/auth_handler.py,sha256=oRafO6ZdxoHryBIvHsrNV8TECed4GXpJsdEiH0KdPPk,17149
|
11
|
-
cursorflow/core/browser_controller.py,sha256=
|
11
|
+
cursorflow/core/browser_controller.py,sha256=RDIqIwmSAJxtOgGplHjr5h2s6LdL1I3ZVTEvrNM4h9s,150282
|
12
12
|
cursorflow/core/browser_engine.py,sha256=7N9hPOyDrEhLWYgZW2981N9gKlHF6Lbp7D7h0zBzuz8,14851
|
13
13
|
cursorflow/core/config_validator.py,sha256=HRtONSOmM0Xxt3-ok3xwnBADRiNnI0nNOMaS2OqOkDk,7286
|
14
14
|
cursorflow/core/css_iterator.py,sha256=whLCIwbHZEWaH1HCbmqhNX5zrh_fL-r3hsxKjYsukcE,16478
|
@@ -31,9 +31,9 @@ cursorflow/log_sources/ssh_remote.py,sha256=_Kwh0bhRpKgq-0c98oaX2hN6h9cT-wCHlqY5
|
|
31
31
|
cursorflow/rules/__init__.py,sha256=gPcA-IkhXj03sl7cvZV0wwo7CtEkcyuKs4y0F5oQbqE,458
|
32
32
|
cursorflow/rules/cursorflow-installation.mdc,sha256=D55pzzDPAVVbE3gAtKPUGoT-2fvB-FI2l6yrTdzUIEo,10208
|
33
33
|
cursorflow/rules/cursorflow-usage.mdc,sha256=W56Qydfb4jqSBTrki7cNyFPfOe_b89mzniRtKSrMlz4,24138
|
34
|
-
cursorflow-2.6.
|
35
|
-
cursorflow-2.6.
|
36
|
-
cursorflow-2.6.
|
37
|
-
cursorflow-2.6.
|
38
|
-
cursorflow-2.6.
|
39
|
-
cursorflow-2.6.
|
34
|
+
cursorflow-2.6.2.dist-info/licenses/LICENSE,sha256=e4QbjAsj3bW-xgQOvQelr8sGLYDoqc48k6cKgCr_pBU,1080
|
35
|
+
cursorflow-2.6.2.dist-info/METADATA,sha256=wT7QRz67l6T69kWE8kFRlGbane6AgmEp1J3mb-56C5g,15011
|
36
|
+
cursorflow-2.6.2.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
37
|
+
cursorflow-2.6.2.dist-info/entry_points.txt,sha256=-Ed_n4Uff7wClEtWS-Py6xmQabecB9f0QAOjX0w7ljA,51
|
38
|
+
cursorflow-2.6.2.dist-info/top_level.txt,sha256=t1UZwRyZP4u-ng2CEcNHmk_ZT4ibQxoihB2IjTF7ovc,11
|
39
|
+
cursorflow-2.6.2.dist-info/RECORD,,
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|