selenium-selector-autocorrect 0.1.1__py3-none-any.whl → 0.1.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -15,7 +15,7 @@ Environment Variables:
15
15
  SELENIUM_AUTO_UPDATE_TESTS: Auto-update test files with corrections (default: "0")
16
16
  """
17
17
 
18
- __version__ = "0.1.1"
18
+ __version__ = "0.1.2"
19
19
 
20
20
  from .ai_providers import AIProvider, LocalAIProvider, configure_provider, get_provider
21
21
  from .auto_correct import (
@@ -48,14 +48,24 @@ class LocalAIProvider(AIProvider):
48
48
  if self._available is not None:
49
49
  return self._available
50
50
  try:
51
- response = requests.post(
52
- f"{self.base_url}/v1/chat/completions",
53
- json={"messages": [{"role": "user", "content": "test"}], "max_tokens": 1},
54
- timeout=5
55
- )
51
+ url = f"{self.base_url}/v1/chat/completions"
52
+ payload = {"messages": [{"role": "user", "content": "test"}], "max_tokens": 1}
53
+
54
+ logger.debug(f"[AI-REQUEST] POST {url}")
55
+ logger.debug(f"[AI-REQUEST] Payload: {payload}")
56
+
57
+ response = requests.post(url, json=payload, timeout=30)
58
+
59
+ logger.debug(f"[AI-RESPONSE] Status: {response.status_code}")
60
+ logger.debug(f"[AI-RESPONSE] Headers: {dict(response.headers)}")
61
+ logger.debug(f"[AI-RESPONSE] Body length: {len(response.text)} chars")
62
+ if response.text:
63
+ logger.debug(f"[AI-RESPONSE] Body preview: {response.text[:500]}")
64
+
56
65
  self._available = response.status_code in (200, 400)
57
66
  except Exception as e:
58
67
  logger.info(f"Local AI service not available at {self.base_url}: {e}")
68
+ logger.debug(f"[AI-ERROR] Exception details: {type(e).__name__}: {str(e)}")
59
69
  self._available = False
60
70
  return self._available
61
71
 
@@ -70,23 +80,37 @@ class LocalAIProvider(AIProvider):
70
80
  AI response text or None if request fails
71
81
  """
72
82
  try:
73
- response = requests.post(
74
- f"{self.base_url}/v1/chat/completions",
75
- json={
76
- "messages": [
77
- {"role": "system", "content": system_prompt},
78
- {"role": "user", "content": user_prompt}
79
- ],
80
- "temperature": 0.3,
81
- "max_tokens": 500
82
- },
83
- timeout=30
84
- )
83
+ url = f"{self.base_url}/v1/chat/completions"
84
+ payload = {
85
+ "messages": [
86
+ {"role": "system", "content": system_prompt},
87
+ {"role": "user", "content": user_prompt}
88
+ ],
89
+ "temperature": 0.3,
90
+ "max_tokens": 500
91
+ }
92
+
93
+ logger.debug(f"[AI-REQUEST] POST {url}")
94
+ logger.debug(f"[AI-REQUEST] System prompt ({len(system_prompt)} chars): {system_prompt[:200]}...")
95
+ logger.debug(f"[AI-REQUEST] User prompt ({len(user_prompt)} chars): {user_prompt[:200]}...")
96
+ logger.debug(f"[AI-REQUEST] Full payload: {payload}")
97
+
98
+ response = requests.post(url, json=payload, timeout=30)
99
+
100
+ logger.debug(f"[AI-RESPONSE] Status: {response.status_code}")
101
+ logger.debug(f"[AI-RESPONSE] Headers: {dict(response.headers)}")
102
+ logger.debug(f"[AI-RESPONSE] Body length: {len(response.text)} chars")
103
+ logger.debug(f"[AI-RESPONSE] Full body: {response.text[:2000]}")
104
+
85
105
  response.raise_for_status()
86
106
  data: Dict[str, Any] = response.json()
87
107
  content: Optional[str] = data.get("choices", [{}])[0].get("message", {}).get("content", "")
108
+ logger.debug(f"[AI-PARSED] Content length: {len(content) if content else 0} chars")
109
+ logger.debug(f"[AI-PARSED] Content preview: {content[:500] if content else 'None'}")
88
110
  return content
89
111
  except requests.exceptions.HTTPError as e:
112
+ logger.error(f"[AI-ERROR] HTTP Error {e.response.status_code}")
113
+ logger.error(f"[AI-ERROR] Response body: {e.response.text[:1000]}")
90
114
  if e.response.status_code == 503:
91
115
  logger.info(f"Local AI service unavailable (503). Disabling auto-correction.")
92
116
  self._available = False
@@ -94,7 +118,8 @@ class LocalAIProvider(AIProvider):
94
118
  logger.warning(f"Local AI HTTP error: {e}")
95
119
  return None
96
120
  except Exception as e:
97
- logger.warning(f"Local AI request failed: {e}")
121
+ logger.error(f"[AI-ERROR] Request failed: {type(e).__name__}: {str(e)}")
122
+ logger.debug(f"[AI-ERROR] Exception details: {e}", exc_info=True)
98
123
  self._available = False
99
124
  return None
100
125
 
@@ -3,8 +3,10 @@
3
3
  import json
4
4
  import logging
5
5
  import os
6
+ import re
6
7
  import traceback
7
8
  from datetime import datetime
9
+ from pathlib import Path
8
10
  from typing import Any, Dict, List, Optional, TypedDict
9
11
 
10
12
  import requests
@@ -39,6 +41,8 @@ class CorrectionTracker:
39
41
  self._corrections: List[CorrectionRecord] = []
40
42
  self._local_ai_url: str = os.environ.get("LOCAL_AI_API_URL", "http://localhost:8765")
41
43
  self._auto_update_enabled: bool = os.environ.get("SELENIUM_AUTO_UPDATE_TESTS", "0").lower() in ("1", "true", "yes")
44
+ # Configurable import pattern - set via environment variable for project-specific structure
45
+ self._import_pattern: str = os.environ.get("SELENIUM_IMPORT_PATTERN", r'from\s+([\w.]+)\s+import')
42
46
 
43
47
  def record_correction(
44
48
  self,
@@ -65,16 +69,11 @@ class CorrectionTracker:
65
69
  '/selenium_selector_autocorrect/' in filename_lower or
66
70
  '\\selenium_selector_autocorrect\\' in filename):
67
71
  continue
68
- # Prioritize test files, then page objects, then ui_client
69
- if ('test_library' in filename or
70
- 'test_' in filename or
71
- 'page_factory' in filename_lower or
72
- 'ui_client' in filename_lower):
72
+ # Prioritize test files first
73
+ if 'test_' in filename:
73
74
  test_file = filename
74
75
  test_line = frame.lineno
75
- # Don't break - keep looking for test files specifically
76
- if 'test_' in filename or 'test_library' in filename:
77
- break
76
+ break
78
77
 
79
78
  correction: CorrectionRecord = {
80
79
  "original_by": original_by,
@@ -113,20 +112,349 @@ class CorrectionTracker:
113
112
  test_file = correction.get("test_file")
114
113
  if not test_file:
115
114
  return
116
- result = self.update_test_file_via_service(
115
+
116
+ # Get all files that need to be updated (test file + referenced files)
117
+ files_to_update = self._find_files_with_selector(
117
118
  test_file,
118
- correction["original_by"],
119
- correction["original_value"],
120
- correction["corrected_by"],
121
- correction["corrected_value"]
119
+ correction["original_value"]
122
120
  )
123
- if result.get("success"):
124
- logger.info(f"[AUTO-UPDATE] Successfully updated {test_file}")
125
- else:
126
- logger.warning(f"[AUTO-UPDATE] Failed to update {test_file}: {result.get('errors', [])}")
121
+
122
+ updated_count = 0
123
+ failed_count = 0
124
+
125
+ for file_path in files_to_update:
126
+ result = self.update_test_file_via_service(
127
+ file_path,
128
+ correction["original_by"],
129
+ correction["original_value"],
130
+ correction["corrected_by"],
131
+ correction["corrected_value"]
132
+ )
133
+ if result.get("success"):
134
+ updated_count += 1
135
+ logger.info(f"[AUTO-UPDATE] Successfully updated {file_path}")
136
+ else:
137
+ failed_count += 1
138
+ logger.warning(f"[AUTO-UPDATE] Failed to update {file_path}: {result.get('errors', [])}")
139
+
140
+ if updated_count > 0:
141
+ logger.info(f"[AUTO-UPDATE] Updated {updated_count} file(s), {failed_count} failed")
127
142
  except Exception as e:
128
143
  logger.warning(f"[AUTO-UPDATE] Error updating test file: {e}")
129
144
 
145
+ def _find_files_with_selector(self, test_file: str, selector_value: str) -> List[str]:
146
+ """Find all files that contain the selector and are used by the test file.
147
+
148
+ Backward search strategy:
149
+ 1. Search for selector text in all Python files (fast workspace grep)
150
+ 2. Extract all imports from test file (with recursion for nested imports)
151
+ 3. Return only files that contain the selector AND are imported by the test
152
+
153
+ This is efficient because:
154
+ - Workspace search is very fast (indexed)
155
+ - We only verify imports for files that actually have the selector
156
+ - Typically finds 1-5 files instead of checking 100s
157
+ """
158
+ files_with_selector: List[str] = []
159
+
160
+ try:
161
+ logger.debug(f"[BACKWARD SEARCH] Searching for selector: {selector_value}")
162
+
163
+ # Step 1: Search for files containing the selector (fast workspace search)
164
+ # Try different search queries since literal string search may fail with special characters
165
+ workspace_files = []
166
+ search_queries = [
167
+ selector_value, # Try full selector first
168
+ # Strip common CSS selector wrappers and search for cleaner text
169
+ selector_value.replace('[', '').replace(']', '').replace('"', '').replace("'", '').strip(),
170
+ ]
171
+
172
+ logger.debug(f"[BACKWARD SEARCH] Will try {len(search_queries)} search queries")
173
+ for i, search_query in enumerate(search_queries):
174
+ logger.debug(f"[BACKWARD SEARCH] Query {i+1}: {search_query[:100]}")
175
+ if search_query and not workspace_files:
176
+ files = self._workspace_search_for_selector(search_query)
177
+ if files:
178
+ workspace_files = files
179
+ logger.debug(f"[BACKWARD SEARCH] ✓ Found {len(files)} matches with query {i+1}")
180
+ break
181
+ else:
182
+ logger.debug(f"[BACKWARD SEARCH] ✗ No matches with query {i+1}")
183
+
184
+ logger.info(f"[BACKWARD SEARCH] Workspace search found {len(workspace_files)} file(s)")
185
+
186
+ if not workspace_files:
187
+ logger.debug(f"[BACKWARD SEARCH] No matches found with workspace search")
188
+ logger.info(f"[AUTO-UPDATE] Found selector in 0 file(s): []")
189
+ return files_with_selector
190
+
191
+ # Step 2: Extract all imports from test file (with recursion for nested imports)
192
+ all_imports = self._extract_all_imports_from_test(test_file)
193
+ logger.debug(f"[BACKWARD SEARCH] Test file imports {len(all_imports)} files")
194
+
195
+ # Normalize paths for comparison
196
+ test_file_normalized = os.path.normpath(test_file)
197
+ all_imports_normalized = {os.path.normpath(f) for f in all_imports}
198
+
199
+ # Step 3: Verify which matched files are actually used by the test
200
+ for file_path in workspace_files:
201
+ file_path_normalized = os.path.normpath(file_path)
202
+
203
+ # Check if this is the test file itself
204
+ if file_path_normalized == test_file_normalized:
205
+ logger.debug(f"[BACKWARD SEARCH] ✓ Selector in test file: {file_path}")
206
+ files_with_selector.append(file_path)
207
+ continue
208
+
209
+ # Check if this file is in the imports (direct match)
210
+ if file_path_normalized in all_imports_normalized:
211
+ logger.debug(f"[BACKWARD SEARCH] ✓ Selector in imported file: {file_path}")
212
+ files_with_selector.append(file_path)
213
+ continue
214
+
215
+ # Check if the file path matches any import by filename
216
+ # (handles different path separators and relative vs absolute paths)
217
+ file_name = os.path.basename(file_path)
218
+ for imported_file in all_imports:
219
+ if os.path.basename(imported_file) == file_name:
220
+ # Verify it's the same file by checking if paths end the same way
221
+ imported_parts = imported_file.replace('\\', '/').split('/')
222
+ file_parts = file_path.replace('\\', '/').split('/')
223
+
224
+ # Compare the last N parts of the path
225
+ min_parts = min(len(imported_parts), len(file_parts))
226
+ if imported_parts[-min_parts:] == file_parts[-min_parts:]:
227
+ logger.debug(f"[BACKWARD SEARCH] ✓ Selector in imported file: {file_path}")
228
+ files_with_selector.append(imported_file) # Use the full path from imports
229
+ break
230
+ else:
231
+ logger.debug(f"[BACKWARD SEARCH] ✗ Selector in unrelated file: {file_path}")
232
+
233
+ except Exception as e:
234
+ logger.debug(f"[BACKWARD SEARCH] Error during search: {e}")
235
+
236
+ logger.info(f"[AUTO-UPDATE] Found selector in {len(files_with_selector)} file(s): {files_with_selector}")
237
+ return files_with_selector
238
+
239
+ def _workspace_search_for_selector(self, selector_value: str) -> List[str]:
240
+ """Try to find files using workspace search API. Returns empty list if not found or API unavailable."""
241
+ try:
242
+ # Use dedicated search endpoint (not the unified AI endpoint)
243
+ search_url = f"{self._local_ai_url}/v1/workspace/files/search"
244
+ # Prefer a narrower search first (avoids huge workspaces like venv), then fall back.
245
+ preferred_pattern = os.environ.get("SELENIUM_WORKSPACE_SEARCH_FILE_PATTERN", "src/**/*.py")
246
+ patterns_to_try = [preferred_pattern, "**/*.py"]
247
+ seen_patterns = set()
248
+
249
+ for file_pattern in patterns_to_try:
250
+ if not file_pattern or file_pattern in seen_patterns:
251
+ continue
252
+ seen_patterns.add(file_pattern)
253
+
254
+ search_payload = {
255
+ "query": selector_value,
256
+ "filePattern": file_pattern,
257
+ "maxResults": 50
258
+ }
259
+
260
+ logger.debug(f"[WORKSPACE-SEARCH-REQUEST] URL: {search_url}")
261
+ logger.debug(f"[WORKSPACE-SEARCH-REQUEST] Payload: {search_payload}")
262
+
263
+ response = requests.post(search_url, json=search_payload, timeout=30)
264
+
265
+ logger.debug(f"[WORKSPACE-SEARCH-RESPONSE] Status: {response.status_code}")
266
+ logger.debug(f"[WORKSPACE-SEARCH-RESPONSE] Headers: {dict(response.headers)}")
267
+ logger.debug(f"[WORKSPACE-SEARCH-RESPONSE] Body length: {len(response.text)} chars")
268
+
269
+ if not response.ok:
270
+ logger.debug(f"[WORKSPACE-SEARCH-RESPONSE] Body: {response.text[:500]}")
271
+ logger.debug(f"[WORKSPACE-SEARCH] Request failed: {response.status_code}")
272
+ continue
273
+
274
+ search_results = response.json()
275
+ logger.debug(f"[WORKSPACE-SEARCH-RESPONSE] Parsed JSON keys: {list(search_results.keys())}")
276
+ logger.debug(f"[WORKSPACE-SEARCH-RESPONSE] Full response: {search_results}")
277
+
278
+ file_paths: List[str] = []
279
+
280
+ # Handle dedicated endpoint 'results' markdown format
281
+ results_text = search_results.get("results", "")
282
+ logger.debug(f"[WORKSPACE-SEARCH] Results text length: {len(results_text)} chars")
283
+ logger.debug(f"[WORKSPACE-SEARCH] Results text preview: {results_text[:300]}")
284
+
285
+ if results_text and "No matches found" not in results_text:
286
+ logger.debug(f"[WORKSPACE-SEARCH] Parsing markdown results ({len(results_text)} chars)")
287
+ for line in results_text.split('\n'):
288
+ if line.startswith('## ') and line.endswith('.py'):
289
+ file_path = line[3:].strip()
290
+ if '__pycache__' not in file_path and not file_path.endswith('.pyc') and file_path not in file_paths:
291
+ file_paths.append(file_path)
292
+ logger.debug(f"[WORKSPACE-SEARCH] Added file: {file_path}")
293
+ else:
294
+ logger.debug(f"[WORKSPACE-SEARCH] No results in response")
295
+
296
+ if file_paths:
297
+ logger.info(
298
+ f"[WORKSPACE-SEARCH] Found {len(file_paths)} file(s) with selector (pattern={file_pattern})"
299
+ )
300
+ return file_paths
301
+
302
+ logger.debug(f"[WORKSPACE-SEARCH] No files found (pattern={file_pattern})")
303
+
304
+ return []
305
+
306
+ except Exception as e:
307
+ logger.error(f"[WORKSPACE-SEARCH-ERROR] Failed: {type(e).__name__}: {str(e)}")
308
+ logger.debug(f"[WORKSPACE-SEARCH-ERROR] Details: {e}", exc_info=True)
309
+ return []
310
+
311
+ def _extract_all_imports_from_test(self, test_file: str, max_depth: int = 3, visited: Optional[set] = None) -> List[str]: # noqa: A006
312
+ """Extract all imported files from test file recursively.
313
+
314
+ Used by backward search to verify if a file is actually imported by the test.
315
+ Only recurses into page objects (not utilities) for efficiency.
316
+ """
317
+ if visited is None:
318
+ visited = set()
319
+
320
+ if test_file in visited or max_depth <= 0:
321
+ return []
322
+
323
+ visited.add(test_file)
324
+ all_imports = []
325
+
326
+ try:
327
+ direct_imports = self._extract_imported_files(test_file)
328
+ all_imports.extend(direct_imports)
329
+
330
+ # Recursively extract imports from page objects only
331
+ if max_depth > 1:
332
+ for imported_file in direct_imports:
333
+ if imported_file not in visited and self._is_page_object_file(imported_file):
334
+ nested = self._extract_all_imports_from_test(imported_file, max_depth - 1, visited)
335
+ all_imports.extend(nested)
336
+
337
+ except Exception as e:
338
+ logger.debug(f"[IMPORT EXTRACTION] Error: {e}")
339
+
340
+ return all_imports
341
+
342
+ def _is_page_object_file(self, file_path: str) -> bool:
343
+ """Check if a file is likely a page object or steps file (not a utility or base class)."""
344
+ file_lower = file_path.lower()
345
+
346
+ # Include: Page objects, Dialogs, Modals, Components, Steps
347
+ if any(pattern in file_lower for pattern in ['page.py', 'dialog.py', 'modal.py', 'section.py', 'steps.py', 'step.py']):
348
+ return True
349
+
350
+ # Include: Files in component/header/steps directories
351
+ if any(pattern in file_lower for pattern in ['component', 'header', 'footer', 'sidebar', '/steps/', '\\steps\\']):
352
+ return True
353
+
354
+ # Exclude: Utilities, base classes, helpers, drivers, clients
355
+ if any(pattern in file_lower for pattern in ['utility.py', 'helper.py', 'base.py', 'util.py', '__init__.py', 'driver.py', 'client.py']):
356
+ return False
357
+
358
+ return False
359
+
360
+ def _extract_imported_files(self, test_file: str) -> List[str]:
361
+ """Extract imported page object file paths from a test file."""
362
+ imported_files: List[str] = []
363
+
364
+ try:
365
+ # Read the test file content via dedicated endpoint
366
+ read_url = f"{self._local_ai_url}/v1/workspace/files/read"
367
+ read_payload = {"filePath": test_file}
368
+
369
+ logger.debug(f"[FILE-READ-REQUEST] URL: {read_url}")
370
+ logger.debug(f"[FILE-READ-REQUEST] Payload: {read_payload}")
371
+
372
+ read_response = requests.post(read_url, json=read_payload, timeout=30)
373
+
374
+ logger.debug(f"[FILE-READ-RESPONSE] Status: {read_response.status_code}")
375
+ logger.debug(f"[FILE-READ-RESPONSE] Headers: {dict(read_response.headers)}")
376
+ logger.debug(f"[FILE-READ-RESPONSE] Body length: {len(read_response.text)} chars")
377
+
378
+ if not read_response.ok:
379
+ logger.debug(f"[FILE-READ-RESPONSE] Body: {read_response.text[:500]}")
380
+ return imported_files
381
+
382
+ file_content = read_response.json()
383
+ logger.debug(f"[FILE-READ-RESPONSE] Parsed JSON keys: {list(file_content.keys())}")
384
+ logger.debug(f"[FILE-READ-RESPONSE] Full response: {file_content}")
385
+
386
+ if not file_content.get("success"):
387
+ logger.debug(f"[FILE-READ] Read failed: success={file_content.get('success')}")
388
+ return imported_files
389
+
390
+ content = file_content.get("content", "")
391
+ logger.debug(f"[FILE-READ] Content length: {len(content)} chars")
392
+
393
+ # Pattern to match imports - configurable via SELENIUM_IMPORT_PATTERN env var
394
+ for match in re.finditer(self._import_pattern, content):
395
+ module_path = match.group(1)
396
+ # Convert module path to file path
397
+ file_path = self._module_to_file_path(module_path, test_file)
398
+ if file_path:
399
+ imported_files.append(file_path)
400
+
401
+ # Also extract from imports in step functions and page objects
402
+ # Pattern: from <path> import <class>
403
+ step_import_pattern = r'from\s+([\w.]+)\s+import\s+([\w,\s]+)'
404
+ for match in re.finditer(step_import_pattern, content):
405
+ module_path = match.group(1)
406
+ # Include Page classes and step files (configurable via environment variable)
407
+ keywords = os.environ.get("SELENIUM_IMPORT_KEYWORDS", "Page,.steps.,steps").split(",")
408
+ if any(keyword.strip() in module_path for keyword in keywords):
409
+ file_path = self._module_to_file_path(module_path, test_file)
410
+ if file_path:
411
+ imported_files.append(file_path)
412
+
413
+ except Exception as e:
414
+ logger.debug(f"[IMPORT EXTRACTION] Error: {e}")
415
+
416
+ return imported_files
417
+
418
+ def _module_to_file_path(self, module_path: str, reference_file: str) -> Optional[str]:
419
+ """Convert a Python module path to a file path."""
420
+ try:
421
+ # Extract the root package name from module_path
422
+ parts = module_path.split('.')
423
+ if not parts:
424
+ return None
425
+
426
+ root_package = parts[0]
427
+ ref_path = Path(reference_file)
428
+
429
+ # Find the root package directory by going up from the reference file
430
+ current = ref_path.parent
431
+ package_root = None
432
+
433
+ while current.parent != current:
434
+ if (current / root_package).exists():
435
+ package_root = current / root_package
436
+ break
437
+ current = current.parent
438
+
439
+ if not package_root:
440
+ return None
441
+
442
+ # Convert module path to relative path, removing the root package
443
+ relative_parts = parts[1:] # Remove root package prefix
444
+ if not relative_parts:
445
+ return None
446
+
447
+ relative_path = Path(*relative_parts)
448
+ file_path = package_root / relative_path.with_suffix('.py')
449
+
450
+ if file_path.exists():
451
+ return str(file_path).replace('\\', '/')
452
+
453
+ except Exception as e:
454
+ logger.debug(f"[MODULE CONVERSION] Error converting {module_path}: {e}")
455
+
456
+ return None
457
+
130
458
  def update_test_file_via_service(
131
459
  self,
132
460
  file_path: str,
@@ -136,45 +464,152 @@ class CorrectionTracker:
136
464
  corrected_value: str
137
465
  ) -> Dict[str, Any]:
138
466
  try:
467
+ # Read the file using dedicated endpoint
139
468
  read_url = f"{self._local_ai_url}/v1/workspace/files/read"
140
- read_response = requests.post(read_url, json={"filePath": file_path}, timeout=30)
469
+ read_payload = {"filePath": file_path}
470
+
471
+ logger.debug(f"[FILE-EDIT-READ-REQUEST] URL: {read_url}")
472
+ logger.debug(f"[FILE-EDIT-READ-REQUEST] Payload: {read_payload}")
473
+
474
+ read_response = requests.post(read_url, json=read_payload, timeout=30)
475
+
476
+ logger.debug(f"[FILE-EDIT-READ-RESPONSE] Status: {read_response.status_code}")
477
+ logger.debug(f"[FILE-EDIT-READ-RESPONSE] Headers: {dict(read_response.headers)}")
478
+ logger.debug(f"[FILE-EDIT-READ-RESPONSE] Body length: {len(read_response.text)} chars")
479
+
141
480
  read_response.raise_for_status()
142
481
  file_content = read_response.json()
482
+
483
+ logger.debug(f"[FILE-EDIT-READ-RESPONSE] Parsed JSON keys: {list(file_content.keys())}")
143
484
 
144
485
  if not file_content.get("success"):
486
+ logger.error(f"[FILE-EDIT] Read failed: {file_content}")
145
487
  return {"success": False, "errors": ["Could not read file"]}
146
488
 
147
489
  content = file_content.get("content", "")
148
- old_patterns = [
149
- f'"{original_value}"',
150
- f"'{original_value}'",
151
- ]
490
+ logger.debug(f"[FILE-EDIT] Read {len(content)} chars from {file_path}")
491
+
492
+ def _strategy_to_by_token(strategy: str) -> Optional[str]:
493
+ s = (strategy or "").strip().lower()
494
+ mapping = {
495
+ "css selector": "CSS_SELECTOR",
496
+ "css": "CSS_SELECTOR",
497
+ "xpath": "XPATH",
498
+ "id": "ID",
499
+ "name": "NAME",
500
+ "class name": "CLASS_NAME",
501
+ "class": "CLASS_NAME",
502
+ "tag name": "TAG_NAME",
503
+ "tag": "TAG_NAME",
504
+ "link text": "LINK_TEXT",
505
+ "partial link text": "PARTIAL_LINK_TEXT",
506
+ }
507
+ return mapping.get(s)
508
+
509
+ corrected_by_token = _strategy_to_by_token(corrected_by)
510
+
511
+ # Prefer strategy-aware replacements like: By.XPATH, '<old>' -> By.ID, '<new>'
512
+ # This prevents invalid updates such as leaving By.XPATH with an id value.
513
+ replacements: List[Dict[str, str]] = []
514
+ if corrected_by_token:
515
+ # Find all occurrences of the selector value inside a By.<TOKEN>, '<value>' pair.
516
+ # We intentionally allow any existing By token to be replaced.
517
+ locator_pattern = re.compile(
518
+ r"By\\.[A-Z_]+(\\s*,\\s*)(['\"])" + re.escape(original_value) + r"\\2"
519
+ )
520
+
521
+ for match in locator_pattern.finditer(content):
522
+ quote = match.group(2)
523
+ escaped_corrected_value = corrected_value.replace(quote, f"\\{quote}")
524
+ old_substring = match.group(0)
525
+ new_substring = f"By.{corrected_by_token}{match.group(1)}{quote}{escaped_corrected_value}{quote}"
526
+ if old_substring != new_substring:
527
+ replacements.append({"oldString": old_substring, "newString": new_substring})
528
+
529
+ if replacements:
530
+ logger.debug(f"[FILE-EDIT] Prepared {len(replacements)} strategy-aware replacement(s)")
531
+ else:
532
+ logger.debug("[FILE-EDIT] No strategy-aware matches found")
533
+
534
+ # If we couldn't find a By.<TOKEN>, '<value>' match, fall back to value-only replacement
535
+ # ONLY when the strategy does not change (or we don't know the corrected strategy).
536
+ if not replacements:
537
+ corrected_by_normalized = (corrected_by or "").strip().lower()
538
+ original_by_normalized = (original_by or "").strip().lower()
539
+
540
+ if corrected_by_token and corrected_by_normalized != original_by_normalized:
541
+ logger.warning(
542
+ "[FILE-EDIT] Strategy changed but no locator match found; refusing unsafe value-only update"
543
+ )
544
+ return {
545
+ "success": False,
546
+ "errors": [
547
+ "Strategy changed (e.g. xpath -> id) but locator tuple not found in file; skipping unsafe edit"
548
+ ],
549
+ }
550
+
551
+ old_patterns = [
552
+ f'"{original_value}"',
553
+ f"'{original_value}'",
554
+ ]
555
+
556
+ found_pattern = None
557
+ new_pattern = None
558
+ for old_pattern in old_patterns:
559
+ if old_pattern in content:
560
+ found_pattern = old_pattern
561
+ logger.debug(f"[FILE-EDIT] Found value-only pattern: {old_pattern[:100]}")
562
+
563
+ # Choose quote style based on what's in the corrected value
564
+ if "'" in corrected_value and '"' not in corrected_value:
565
+ new_pattern = f'"{corrected_value}"'
566
+ elif '"' in corrected_value and "'" not in corrected_value:
567
+ new_pattern = f"'{corrected_value}'"
568
+ elif "'" in corrected_value and '"' in corrected_value:
569
+ if old_pattern.startswith('"'):
570
+ escaped_value = corrected_value.replace('"', '\\"')
571
+ new_pattern = f'"{escaped_value}"'
572
+ else:
573
+ escaped_value = corrected_value.replace("'", "\\'")
574
+ new_pattern = f"'{escaped_value}'"
575
+ else:
576
+ new_pattern = f'"{corrected_value}"' if old_pattern.startswith('"') else f"'{corrected_value}'"
152
577
 
153
- found_pattern = None
154
- new_pattern = None
155
- for old_pattern in old_patterns:
156
- if old_pattern in content:
157
- found_pattern = old_pattern
158
- new_pattern = f'"{corrected_value}"' if old_pattern.startswith('"') else f"'{corrected_value}'"
159
- break
578
+ break
160
579
 
161
- if not found_pattern:
162
- return {"success": False, "errors": [f"Could not find selector: {original_value[:50]}..."]}
580
+ if not found_pattern or new_pattern is None:
581
+ logger.warning(f"[FILE-EDIT] Could not find selector: {original_value[:50]}")
582
+ return {"success": False, "errors": [f"Could not find selector: {original_value[:50]}..."]}
163
583
 
584
+ replacements = [{"oldString": found_pattern, "newString": new_pattern}]
585
+
586
+ # Use dedicated endpoint for edit (supports multiple replacements)
164
587
  edit_url = f"{self._local_ai_url}/v1/workspace/files/edit"
165
- edit_response = requests.post(
166
- edit_url,
167
- json={"filePath": file_path, "oldString": found_pattern, "newString": new_pattern},
168
- timeout=30
169
- )
588
+ edit_payload = {"filePath": file_path, "replacements": replacements}
589
+
590
+ logger.debug(f"[FILE-EDIT-REQUEST] URL: {edit_url}")
591
+ logger.debug(f"[FILE-EDIT-REQUEST] Payload: {edit_payload}")
592
+
593
+ edit_response = requests.post(edit_url, json=edit_payload, timeout=30)
594
+
595
+ logger.debug(f"[FILE-EDIT-RESPONSE] Status: {edit_response.status_code}")
596
+ logger.debug(f"[FILE-EDIT-RESPONSE] Headers: {dict(edit_response.headers)}")
597
+ logger.debug(f"[FILE-EDIT-RESPONSE] Body length: {len(edit_response.text)} chars")
598
+ logger.debug(f"[FILE-EDIT-RESPONSE] Body: {edit_response.text[:1000]}")
599
+
170
600
  edit_response.raise_for_status()
171
601
  result: Dict[str, Any] = edit_response.json()
602
+
603
+ logger.debug(f"[FILE-EDIT-RESPONSE] Parsed JSON: {result}")
604
+ logger.info(f"[FILE-EDIT] File update result: success={result.get('success')}")
172
605
  return result
173
- except requests.exceptions.ConnectionError:
606
+ except requests.exceptions.ConnectionError as e:
607
+ logger.error(f"[FILE-EDIT-ERROR] Connection failed: {e}")
174
608
  logger.warning(f"[LOCAL AI SERVICE] Not available at {self._local_ai_url}")
175
609
  return {"success": False, "errors": ["Local AI service not available"]}
176
610
  except Exception as e:
177
- logger.warning(f"[UPDATE ERROR] {e}")
611
+ logger.error(f"[FILE-EDIT-ERROR] {type(e).__name__}: {str(e)}")
612
+ logger.debug(f"[FILE-EDIT-ERROR] Details: {e}", exc_info=True)
178
613
  return {"success": False, "errors": [str(e)]}
179
614
 
180
615
  def export_corrections_report(self, output_file: str = "selector_corrections.json") -> None:
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: selenium-selector-autocorrect
3
- Version: 0.1.1
3
+ Version: 0.1.2
4
4
  Summary: Automatic Selenium selector correction using AI when elements fail to be found
5
5
  Author-email: Marty Zhou <marty.zhou@example.com>
6
6
  Maintainer-email: Marty Zhou <marty.zhou@example.com>
@@ -75,22 +75,6 @@ element = WebDriverWait(driver, 10).until(
75
75
  )
76
76
  ```
77
77
 
78
- ## AI Service Setup
79
-
80
- This package requires a local AI service with an OpenAI-compatible API. We recommend using **[VS Code Copilot as Service](https://marketplace.visualstudio.com/items?itemName=MartyZhou.vscode-copilot-as-service)**, which exposes GitHub Copilot through a local HTTP server.
81
-
82
- ### Installing VS Code Copilot as Service
83
-
84
- 1. Install from VS Code Marketplace or run:
85
- ```bash
86
- code --install-extension MartyZhou.vscode-copilot-as-service
87
- ```
88
-
89
- 2. The extension automatically starts a server on `http://localhost:8765`
90
-
91
- 3. Requires an active GitHub Copilot subscription
92
-
93
-
94
78
  ## Configuration
95
79
 
96
80
  Configure via environment variables:
@@ -124,67 +108,28 @@ install_auto_correct_hook()
124
108
 
125
109
  ```python
126
110
  from selenium_selector_autocorrect import (
127
- install_auto_correct_hook,
128
111
  get_auto_correct,
129
112
  get_correction_tracker,
130
- export_corrections_report
113
+ export_corrections_report,
131
114
  )
132
115
 
133
- install_auto_correct_hook()
134
-
135
116
  auto_correct = get_auto_correct()
136
117
  auto_correct.enabled = True
137
- auto_correct.suggest_better_selectors = False
138
118
 
139
- # Export corrections report at end of test run
140
119
  tracker = get_correction_tracker()
141
120
  export_corrections_report("corrections_report.json")
142
- tracker = get_correction_tracker()
143
- export_corrections_report("corrections_report.json")
144
-
145
- print(f"Total corrections: {len(tracker.get_corrections())}")
146
- print(f"Successful corrections: {len(tracker.get_successful_corrections())}")
147
121
  ```
148
122
 
149
- ### Custom AI Provider
150
-
151
- ```python
152
- from selenium_selector_autocorrect import AIProvider, configure_provider
153
-
154
- class CustomAIProvider(AIProvider):
155
- def is_available(self) -> bool:
156
- return True
157
-
158
- def suggest_selector(self, system_prompt: str, user_prompt: str):))
159
- ```
160
-
161
- ## How It Works
162
-
163
- 1. **Hook Installation**: Patches `WebDriverWait.until()` to add auto-correction
164
- 2. **Timeout Detection**: When a selector times out, the original exception is caught
165
- 3. **Page Analysis**: JavaScript extracts visible elements and their attributes
166
- 4. **AI Suggestion**: Sends page context to AI provider for selector suggestion
167
- 5. **Verification**: Tests the suggested selector
168
- 6. **Success Handling**: If successful, records the correction and optionally updates the test file
169
- 7. **Fallback**: If correction fails, raises the original TimeoutException
170
-
171
- ## AI Provider Setup
172
-
173
- ### Local AI Service
174
-
175
- The package requires a local AI service with OpenAI-compatible API:
123
+ ## AI Service Setup
176
124
 
177
- ```bash
178
- POST http://localhost:8765/v1/chat/completions
179
- ```
125
+ This package requires a local AI service with an OpenAI-compatible API. The following endpoints are used:
180
126
 
181
- For file auto-updates:
182
- ```bash
183
- POST http://localhost:8765/v1/workspace/files/read
184
- POST http://localhost:8765/v1/workspace/files/edit
185
- ## Correction Reports
127
+ - `POST {LOCAL_AI_API_URL}/v1/chat/completions` — chat completions for suggestions
128
+ - `POST {LOCAL_AI_API_URL}/v1/workspace/files/read` — read file content
129
+ - `POST {LOCAL_AI_API_URL}/v1/workspace/files/edit` — apply edits to files
130
+ - `POST {LOCAL_AI_API_URL}/v1/workspace/files/search` — search workspace
186
131
 
187
- Export correction reports in JSON format:
132
+ ## Exporting Reports
188
133
 
189
134
  ```python
190
135
  from selenium_selector_autocorrect import export_corrections_report
@@ -193,6 +138,7 @@ export_corrections_report("corrections_report.json")
193
138
  ```
194
139
 
195
140
  Report format:
141
+
196
142
  ```json
197
143
  {
198
144
  "corrections": [
@@ -215,13 +161,13 @@ Report format:
215
161
  }
216
162
  ```
217
163
 
218
- ## Best Practices
164
+ ## Troubleshooting
165
+
166
+ **AI service not available**: Ensure the local AI service is running and reachable via `LOCAL_AI_API_URL`.
219
167
 
220
- 1. **Install Once**: Call `install_auto_correct_hook()` once at test suite startup (e.g., in `conftest.py`)
221
- 2. **Review Corrections**: Regularly review correction reports to identify brittle selectors
222
- 3. **Update Tests**: Use auto-update sparingly and review changes before committing
223
- 4. **Monitor AI Service**: Ensure your AI service is running and responsive
224
- 5. **Use Strong Selectors**: The tool helps with failures but writing robust selectors is still preferred
168
+ **Auto-update not running**: Verify `SELENIUM_AUTO_UPDATE_TESTS` is set to `"1"`.
169
+
170
+ **Selector strings not found when updating**: Check quote styles in your source files match those used in the correction.
225
171
 
226
172
  ## Requirements
227
173
 
@@ -231,47 +177,45 @@ Report format:
231
177
 
232
178
  ## License
233
179
 
234
- MITInstall hook once at test suite startup (e.g., in conftest.py)
235
- 2. Review correction reports regularly to identify brittle selectors
236
- 3. Use auto-update sparingly and review changes before committing
237
- 4. Ensure your AI service is running and responsive
238
- 5. Write robust selectors - the tool helps with failures but prevention is better
180
+ MIT
239
181
 
240
- When contributing:
241
- 1. Follow PEP 8 style guidelines
242
- 2. Add tests for new features
243
- 3. Update documentation
244
- 4. No emojis in code or documentation
182
+ ## Contributing
245
183
 
246
- ## Troubleshooting
184
+ Please follow PEP 8, add tests for new features, and update documentation when changing behavior.
247
185
 
248
- ### AI Service Not Available
186
+ See [CHANGELOG.md](CHANGELOG.md) for release notes and version history.
249
187
 
250
- Contributions are welcome! Please:
251
- 1. Follow PEP 8 style guidelines
252
- 2. Add tests for new features
253
- 3. Update documentation
254
- 4. Maintain consistency with existing code
188
+ ## Publishing to PyPI
255
189
 
256
- **Possible causes**:
257
- - `SELENIUM_AUTO_UPDATE_TESTS` not set to `"1"`
258
- - Test file path not detected correctly
259
- - Selector string not found in source file (check quotes)
190
+ Create a PyPI API token at https://pypi.org/manage/account/#api-tokens (recommended scope: project or account) and keep the token secret. PyPI no longer accepts username/password uploads — use the token as the password and `__token__` as the username.
260
191
 
261
- ### No Corrections Happening
262
- Solution: Ensure your local AI service is running on the configured port.
192
+ PowerShell (Windows) example:
263
193
 
264
- ### Test File Not Updated
194
+ ```powershell
195
+ $env:TWINE_USERNAME='__token__'
196
+ $env:TWINE_PASSWORD='pypi-XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX'
197
+ .\venv\Scripts\python.exe -m pip install --upgrade build twine
198
+ .\venv\Scripts\python.exe -m build
199
+ .\venv\Scripts\python.exe -m twine upload dist/*
200
+ ```
265
201
 
266
- Possible causes:
267
- - `SELENIUM_AUTO_UPDATE_TESTS` not set to "1"
268
- - Test file path not detected correctly
269
- - Selector string not found in source file
202
+ Unix / macOS example:
270
203
 
271
- ### No Corrections Happening
204
+ ```bash
205
+ export TWINE_USERNAME='__token__'
206
+ export TWINE_PASSWORD='pypi-XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX'
207
+ python3 -m pip install --upgrade build twine
208
+ python3 -m build
209
+ python3 -m twine upload dist/*
210
+ ```
211
+
212
+ To publish to TestPyPI (verify release first):
213
+
214
+ ```bash
215
+ python3 -m twine upload --repository-url https://test.pypi.org/legacy/ dist/*
216
+ ```
272
217
 
273
- Check:
274
- 1. Hook is installed - look for log message
275
- 2. AI service is available - check `get_auto_correct().is_service_available()`
276
- 3. Auto-correct is enabled - c
277
- See CHANGELOG.md for version history and changes.
218
+ Notes:
219
+ - Use an API token (not account password). If using a CI system, store the token in secure secrets.
220
+ - You can add a persistent `~/.pypirc` for repeated uploads; see PyPI documentation for details.
221
+ - If upload fails with a 403, verify the token is correct and has the required scope.
@@ -0,0 +1,10 @@
1
+ selenium_selector_autocorrect/__init__.py,sha256=aP02rJbuJxwqMDuc1Y-LoNA3AMYeoCe0_0q_xYNKnTE,1665
2
+ selenium_selector_autocorrect/ai_providers.py,sha256=MIsnwIgTSQQxL4uex4f_4l886EvGLLXvojbheyMgjuI,5966
3
+ selenium_selector_autocorrect/auto_correct.py,sha256=ME2dxkN4liNC797x1nFn-HAKvyXxl7q2ePv5SNon42w,11335
4
+ selenium_selector_autocorrect/correction_tracker.py,sha256=VXmu-9Qgcknl6DOuIuawG_LQJNr3B_N0eHQDghygQEk,32927
5
+ selenium_selector_autocorrect/wait_hook.py,sha256=Vs1FRtKG73i1Cphgf4a3_P5MTdlrWdeOnmzG1ewnlS4,7659
6
+ selenium_selector_autocorrect-0.1.2.dist-info/licenses/LICENSE,sha256=VRPy6YXF2wA_3MeTDnpa_-6Zgjt8c2C0D_iIyhDkduc,1095
7
+ selenium_selector_autocorrect-0.1.2.dist-info/METADATA,sha256=Oh8ha_xAm2MGYGx_lguQ2_citrtVBoaLSM83u236qGk,7318
8
+ selenium_selector_autocorrect-0.1.2.dist-info/WHEEL,sha256=wUyA8OaulRlbfwMtmQsvNngGrxQHAvkKcvRmdizlJi0,92
9
+ selenium_selector_autocorrect-0.1.2.dist-info/top_level.txt,sha256=nQ78Mk-XHDhYBckP0tMZvoFAZmZGO4Ec4-e1i61Fdz0,30
10
+ selenium_selector_autocorrect-0.1.2.dist-info/RECORD,,
@@ -1,10 +0,0 @@
1
- selenium_selector_autocorrect/__init__.py,sha256=v1Up86il_XXBgXbZLZiXm_hFlTO1MO5EXKCPQfb0pAs,1665
2
- selenium_selector_autocorrect/ai_providers.py,sha256=CtoLGYvzKWsZBzr5DW6H8vWnTpb5HIAmPhmi_BYddsU,4237
3
- selenium_selector_autocorrect/auto_correct.py,sha256=ME2dxkN4liNC797x1nFn-HAKvyXxl7q2ePv5SNon42w,11335
4
- selenium_selector_autocorrect/correction_tracker.py,sha256=XG3U3tSoGxwmLoBrPi8tc3eWtMDqKG6qJa0K4W7pK54,9984
5
- selenium_selector_autocorrect/wait_hook.py,sha256=Vs1FRtKG73i1Cphgf4a3_P5MTdlrWdeOnmzG1ewnlS4,7659
6
- selenium_selector_autocorrect-0.1.1.dist-info/licenses/LICENSE,sha256=VRPy6YXF2wA_3MeTDnpa_-6Zgjt8c2C0D_iIyhDkduc,1095
7
- selenium_selector_autocorrect-0.1.1.dist-info/METADATA,sha256=EGdbgP_v7OyZDGcNLeFafxY_DcjRoiZRf-mxbjGK4fo,9221
8
- selenium_selector_autocorrect-0.1.1.dist-info/WHEEL,sha256=wUyA8OaulRlbfwMtmQsvNngGrxQHAvkKcvRmdizlJi0,92
9
- selenium_selector_autocorrect-0.1.1.dist-info/top_level.txt,sha256=nQ78Mk-XHDhYBckP0tMZvoFAZmZGO4Ec4-e1i61Fdz0,30
10
- selenium_selector_autocorrect-0.1.1.dist-info/RECORD,,