selenium-selector-autocorrect 0.1.0__py3-none-any.whl → 0.1.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- selenium_selector_autocorrect/__init__.py +1 -1
- selenium_selector_autocorrect/ai_providers.py +52 -26
- selenium_selector_autocorrect/auto_correct.py +39 -19
- selenium_selector_autocorrect/correction_tracker.py +517 -57
- selenium_selector_autocorrect/wait_hook.py +50 -19
- {selenium_selector_autocorrect-0.1.0.dist-info → selenium_selector_autocorrect-0.1.2.dist-info}/METADATA +48 -104
- selenium_selector_autocorrect-0.1.2.dist-info/RECORD +10 -0
- selenium_selector_autocorrect/py.typed +0 -1
- selenium_selector_autocorrect-0.1.0.dist-info/RECORD +0 -11
- {selenium_selector_autocorrect-0.1.0.dist-info → selenium_selector_autocorrect-0.1.2.dist-info}/WHEEL +0 -0
- {selenium_selector_autocorrect-0.1.0.dist-info → selenium_selector_autocorrect-0.1.2.dist-info}/licenses/LICENSE +0 -0
- {selenium_selector_autocorrect-0.1.0.dist-info → selenium_selector_autocorrect-0.1.2.dist-info}/top_level.txt +0 -0
|
@@ -3,22 +3,46 @@
|
|
|
3
3
|
import json
|
|
4
4
|
import logging
|
|
5
5
|
import os
|
|
6
|
+
import re
|
|
6
7
|
import traceback
|
|
7
8
|
from datetime import datetime
|
|
8
|
-
from
|
|
9
|
+
from pathlib import Path
|
|
10
|
+
from typing import Any, Dict, List, Optional, TypedDict
|
|
9
11
|
|
|
10
12
|
import requests
|
|
11
13
|
|
|
12
14
|
logger = logging.getLogger(__name__)
|
|
13
15
|
|
|
14
16
|
|
|
17
|
+
class CorrectionRecord(TypedDict, total=False):
|
|
18
|
+
"""Type definition for a correction record."""
|
|
19
|
+
original_by: str
|
|
20
|
+
original_value: str
|
|
21
|
+
corrected_by: str
|
|
22
|
+
corrected_value: str
|
|
23
|
+
success: bool
|
|
24
|
+
test_file: Optional[str]
|
|
25
|
+
test_line: Optional[int]
|
|
26
|
+
timestamp: str
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
class ApplyCorrectionsResult(TypedDict):
|
|
30
|
+
"""Type definition for apply_all_corrections result."""
|
|
31
|
+
total: int
|
|
32
|
+
success: int
|
|
33
|
+
failed: int
|
|
34
|
+
details: List[Dict[str, Any]]
|
|
35
|
+
|
|
36
|
+
|
|
15
37
|
class CorrectionTracker:
|
|
16
38
|
"""Tracks selector corrections and manages test file updates."""
|
|
17
39
|
|
|
18
|
-
def __init__(self):
|
|
19
|
-
self._corrections: List[
|
|
20
|
-
self._local_ai_url = os.environ.get("LOCAL_AI_API_URL", "http://localhost:8765")
|
|
21
|
-
self._auto_update_enabled = os.environ.get("SELENIUM_AUTO_UPDATE_TESTS", "0").lower() in ("1", "true", "yes")
|
|
40
|
+
def __init__(self) -> None:
|
|
41
|
+
self._corrections: List[CorrectionRecord] = []
|
|
42
|
+
self._local_ai_url: str = os.environ.get("LOCAL_AI_API_URL", "http://localhost:8765")
|
|
43
|
+
self._auto_update_enabled: bool = os.environ.get("SELENIUM_AUTO_UPDATE_TESTS", "0").lower() in ("1", "true", "yes")
|
|
44
|
+
# Configurable import pattern - set via environment variable for project-specific structure
|
|
45
|
+
self._import_pattern: str = os.environ.get("SELENIUM_IMPORT_PATTERN", r'from\s+([\w.]+)\s+import')
|
|
22
46
|
|
|
23
47
|
def record_correction(
|
|
24
48
|
self,
|
|
@@ -29,7 +53,7 @@ class CorrectionTracker:
|
|
|
29
53
|
success: bool = True,
|
|
30
54
|
test_file: Optional[str] = None,
|
|
31
55
|
test_line: Optional[int] = None
|
|
32
|
-
):
|
|
56
|
+
) -> None:
|
|
33
57
|
if test_file is None or test_line is None:
|
|
34
58
|
# Extract from stack trace, prioritizing actual test files
|
|
35
59
|
for frame in traceback.extract_stack():
|
|
@@ -45,18 +69,13 @@ class CorrectionTracker:
|
|
|
45
69
|
'/selenium_selector_autocorrect/' in filename_lower or
|
|
46
70
|
'\\selenium_selector_autocorrect\\' in filename):
|
|
47
71
|
continue
|
|
48
|
-
# Prioritize test files
|
|
49
|
-
if
|
|
50
|
-
'test_' in filename or
|
|
51
|
-
'page_factory' in filename_lower or
|
|
52
|
-
'ui_client' in filename_lower):
|
|
72
|
+
# Prioritize test files first
|
|
73
|
+
if 'test_' in filename:
|
|
53
74
|
test_file = filename
|
|
54
75
|
test_line = frame.lineno
|
|
55
|
-
|
|
56
|
-
if 'test_' in filename or 'test_library' in filename:
|
|
57
|
-
break
|
|
76
|
+
break
|
|
58
77
|
|
|
59
|
-
correction = {
|
|
78
|
+
correction: CorrectionRecord = {
|
|
60
79
|
"original_by": original_by,
|
|
61
80
|
"original_value": original_value,
|
|
62
81
|
"corrected_by": corrected_by,
|
|
@@ -79,34 +98,363 @@ class CorrectionTracker:
|
|
|
79
98
|
logger.info(f"[AUTO-UPDATE] Attempting to update {test_file}...")
|
|
80
99
|
self._auto_update_test_file(correction)
|
|
81
100
|
|
|
82
|
-
def get_corrections(self) -> List[
|
|
101
|
+
def get_corrections(self) -> List[CorrectionRecord]:
|
|
83
102
|
return self._corrections.copy()
|
|
84
103
|
|
|
85
|
-
def get_successful_corrections(self) -> List[
|
|
104
|
+
def get_successful_corrections(self) -> List[CorrectionRecord]:
|
|
86
105
|
return [c for c in self._corrections if c.get("success", False)]
|
|
87
106
|
|
|
88
|
-
def clear_corrections(self):
|
|
107
|
+
def clear_corrections(self) -> None:
|
|
89
108
|
self._corrections.clear()
|
|
90
109
|
|
|
91
|
-
def _auto_update_test_file(self, correction:
|
|
110
|
+
def _auto_update_test_file(self, correction: CorrectionRecord) -> None:
|
|
92
111
|
try:
|
|
93
112
|
test_file = correction.get("test_file")
|
|
94
113
|
if not test_file:
|
|
95
114
|
return
|
|
96
|
-
|
|
115
|
+
|
|
116
|
+
# Get all files that need to be updated (test file + referenced files)
|
|
117
|
+
files_to_update = self._find_files_with_selector(
|
|
97
118
|
test_file,
|
|
98
|
-
correction["
|
|
99
|
-
correction["original_value"],
|
|
100
|
-
correction["corrected_by"],
|
|
101
|
-
correction["corrected_value"]
|
|
119
|
+
correction["original_value"]
|
|
102
120
|
)
|
|
103
|
-
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
|
|
121
|
+
|
|
122
|
+
updated_count = 0
|
|
123
|
+
failed_count = 0
|
|
124
|
+
|
|
125
|
+
for file_path in files_to_update:
|
|
126
|
+
result = self.update_test_file_via_service(
|
|
127
|
+
file_path,
|
|
128
|
+
correction["original_by"],
|
|
129
|
+
correction["original_value"],
|
|
130
|
+
correction["corrected_by"],
|
|
131
|
+
correction["corrected_value"]
|
|
132
|
+
)
|
|
133
|
+
if result.get("success"):
|
|
134
|
+
updated_count += 1
|
|
135
|
+
logger.info(f"[AUTO-UPDATE] Successfully updated {file_path}")
|
|
136
|
+
else:
|
|
137
|
+
failed_count += 1
|
|
138
|
+
logger.warning(f"[AUTO-UPDATE] Failed to update {file_path}: {result.get('errors', [])}")
|
|
139
|
+
|
|
140
|
+
if updated_count > 0:
|
|
141
|
+
logger.info(f"[AUTO-UPDATE] Updated {updated_count} file(s), {failed_count} failed")
|
|
107
142
|
except Exception as e:
|
|
108
143
|
logger.warning(f"[AUTO-UPDATE] Error updating test file: {e}")
|
|
109
144
|
|
|
145
|
+
def _find_files_with_selector(self, test_file: str, selector_value: str) -> List[str]:
|
|
146
|
+
"""Find all files that contain the selector and are used by the test file.
|
|
147
|
+
|
|
148
|
+
Backward search strategy:
|
|
149
|
+
1. Search for selector text in all Python files (fast workspace grep)
|
|
150
|
+
2. Extract all imports from test file (with recursion for nested imports)
|
|
151
|
+
3. Return only files that contain the selector AND are imported by the test
|
|
152
|
+
|
|
153
|
+
This is efficient because:
|
|
154
|
+
- Workspace search is very fast (indexed)
|
|
155
|
+
- We only verify imports for files that actually have the selector
|
|
156
|
+
- Typically finds 1-5 files instead of checking 100s
|
|
157
|
+
"""
|
|
158
|
+
files_with_selector: List[str] = []
|
|
159
|
+
|
|
160
|
+
try:
|
|
161
|
+
logger.debug(f"[BACKWARD SEARCH] Searching for selector: {selector_value}")
|
|
162
|
+
|
|
163
|
+
# Step 1: Search for files containing the selector (fast workspace search)
|
|
164
|
+
# Try different search queries since literal string search may fail with special characters
|
|
165
|
+
workspace_files = []
|
|
166
|
+
search_queries = [
|
|
167
|
+
selector_value, # Try full selector first
|
|
168
|
+
# Strip common CSS selector wrappers and search for cleaner text
|
|
169
|
+
selector_value.replace('[', '').replace(']', '').replace('"', '').replace("'", '').strip(),
|
|
170
|
+
]
|
|
171
|
+
|
|
172
|
+
logger.debug(f"[BACKWARD SEARCH] Will try {len(search_queries)} search queries")
|
|
173
|
+
for i, search_query in enumerate(search_queries):
|
|
174
|
+
logger.debug(f"[BACKWARD SEARCH] Query {i+1}: {search_query[:100]}")
|
|
175
|
+
if search_query and not workspace_files:
|
|
176
|
+
files = self._workspace_search_for_selector(search_query)
|
|
177
|
+
if files:
|
|
178
|
+
workspace_files = files
|
|
179
|
+
logger.debug(f"[BACKWARD SEARCH] ✓ Found {len(files)} matches with query {i+1}")
|
|
180
|
+
break
|
|
181
|
+
else:
|
|
182
|
+
logger.debug(f"[BACKWARD SEARCH] ✗ No matches with query {i+1}")
|
|
183
|
+
|
|
184
|
+
logger.info(f"[BACKWARD SEARCH] Workspace search found {len(workspace_files)} file(s)")
|
|
185
|
+
|
|
186
|
+
if not workspace_files:
|
|
187
|
+
logger.debug(f"[BACKWARD SEARCH] No matches found with workspace search")
|
|
188
|
+
logger.info(f"[AUTO-UPDATE] Found selector in 0 file(s): []")
|
|
189
|
+
return files_with_selector
|
|
190
|
+
|
|
191
|
+
# Step 2: Extract all imports from test file (with recursion for nested imports)
|
|
192
|
+
all_imports = self._extract_all_imports_from_test(test_file)
|
|
193
|
+
logger.debug(f"[BACKWARD SEARCH] Test file imports {len(all_imports)} files")
|
|
194
|
+
|
|
195
|
+
# Normalize paths for comparison
|
|
196
|
+
test_file_normalized = os.path.normpath(test_file)
|
|
197
|
+
all_imports_normalized = {os.path.normpath(f) for f in all_imports}
|
|
198
|
+
|
|
199
|
+
# Step 3: Verify which matched files are actually used by the test
|
|
200
|
+
for file_path in workspace_files:
|
|
201
|
+
file_path_normalized = os.path.normpath(file_path)
|
|
202
|
+
|
|
203
|
+
# Check if this is the test file itself
|
|
204
|
+
if file_path_normalized == test_file_normalized:
|
|
205
|
+
logger.debug(f"[BACKWARD SEARCH] ✓ Selector in test file: {file_path}")
|
|
206
|
+
files_with_selector.append(file_path)
|
|
207
|
+
continue
|
|
208
|
+
|
|
209
|
+
# Check if this file is in the imports (direct match)
|
|
210
|
+
if file_path_normalized in all_imports_normalized:
|
|
211
|
+
logger.debug(f"[BACKWARD SEARCH] ✓ Selector in imported file: {file_path}")
|
|
212
|
+
files_with_selector.append(file_path)
|
|
213
|
+
continue
|
|
214
|
+
|
|
215
|
+
# Check if the file path matches any import by filename
|
|
216
|
+
# (handles different path separators and relative vs absolute paths)
|
|
217
|
+
file_name = os.path.basename(file_path)
|
|
218
|
+
for imported_file in all_imports:
|
|
219
|
+
if os.path.basename(imported_file) == file_name:
|
|
220
|
+
# Verify it's the same file by checking if paths end the same way
|
|
221
|
+
imported_parts = imported_file.replace('\\', '/').split('/')
|
|
222
|
+
file_parts = file_path.replace('\\', '/').split('/')
|
|
223
|
+
|
|
224
|
+
# Compare the last N parts of the path
|
|
225
|
+
min_parts = min(len(imported_parts), len(file_parts))
|
|
226
|
+
if imported_parts[-min_parts:] == file_parts[-min_parts:]:
|
|
227
|
+
logger.debug(f"[BACKWARD SEARCH] ✓ Selector in imported file: {file_path}")
|
|
228
|
+
files_with_selector.append(imported_file) # Use the full path from imports
|
|
229
|
+
break
|
|
230
|
+
else:
|
|
231
|
+
logger.debug(f"[BACKWARD SEARCH] ✗ Selector in unrelated file: {file_path}")
|
|
232
|
+
|
|
233
|
+
except Exception as e:
|
|
234
|
+
logger.debug(f"[BACKWARD SEARCH] Error during search: {e}")
|
|
235
|
+
|
|
236
|
+
logger.info(f"[AUTO-UPDATE] Found selector in {len(files_with_selector)} file(s): {files_with_selector}")
|
|
237
|
+
return files_with_selector
|
|
238
|
+
|
|
239
|
+
def _workspace_search_for_selector(self, selector_value: str) -> List[str]:
|
|
240
|
+
"""Try to find files using workspace search API. Returns empty list if not found or API unavailable."""
|
|
241
|
+
try:
|
|
242
|
+
# Use dedicated search endpoint (not the unified AI endpoint)
|
|
243
|
+
search_url = f"{self._local_ai_url}/v1/workspace/files/search"
|
|
244
|
+
# Prefer a narrower search first (avoids huge workspaces like venv), then fall back.
|
|
245
|
+
preferred_pattern = os.environ.get("SELENIUM_WORKSPACE_SEARCH_FILE_PATTERN", "src/**/*.py")
|
|
246
|
+
patterns_to_try = [preferred_pattern, "**/*.py"]
|
|
247
|
+
seen_patterns = set()
|
|
248
|
+
|
|
249
|
+
for file_pattern in patterns_to_try:
|
|
250
|
+
if not file_pattern or file_pattern in seen_patterns:
|
|
251
|
+
continue
|
|
252
|
+
seen_patterns.add(file_pattern)
|
|
253
|
+
|
|
254
|
+
search_payload = {
|
|
255
|
+
"query": selector_value,
|
|
256
|
+
"filePattern": file_pattern,
|
|
257
|
+
"maxResults": 50
|
|
258
|
+
}
|
|
259
|
+
|
|
260
|
+
logger.debug(f"[WORKSPACE-SEARCH-REQUEST] URL: {search_url}")
|
|
261
|
+
logger.debug(f"[WORKSPACE-SEARCH-REQUEST] Payload: {search_payload}")
|
|
262
|
+
|
|
263
|
+
response = requests.post(search_url, json=search_payload, timeout=30)
|
|
264
|
+
|
|
265
|
+
logger.debug(f"[WORKSPACE-SEARCH-RESPONSE] Status: {response.status_code}")
|
|
266
|
+
logger.debug(f"[WORKSPACE-SEARCH-RESPONSE] Headers: {dict(response.headers)}")
|
|
267
|
+
logger.debug(f"[WORKSPACE-SEARCH-RESPONSE] Body length: {len(response.text)} chars")
|
|
268
|
+
|
|
269
|
+
if not response.ok:
|
|
270
|
+
logger.debug(f"[WORKSPACE-SEARCH-RESPONSE] Body: {response.text[:500]}")
|
|
271
|
+
logger.debug(f"[WORKSPACE-SEARCH] Request failed: {response.status_code}")
|
|
272
|
+
continue
|
|
273
|
+
|
|
274
|
+
search_results = response.json()
|
|
275
|
+
logger.debug(f"[WORKSPACE-SEARCH-RESPONSE] Parsed JSON keys: {list(search_results.keys())}")
|
|
276
|
+
logger.debug(f"[WORKSPACE-SEARCH-RESPONSE] Full response: {search_results}")
|
|
277
|
+
|
|
278
|
+
file_paths: List[str] = []
|
|
279
|
+
|
|
280
|
+
# Handle dedicated endpoint 'results' markdown format
|
|
281
|
+
results_text = search_results.get("results", "")
|
|
282
|
+
logger.debug(f"[WORKSPACE-SEARCH] Results text length: {len(results_text)} chars")
|
|
283
|
+
logger.debug(f"[WORKSPACE-SEARCH] Results text preview: {results_text[:300]}")
|
|
284
|
+
|
|
285
|
+
if results_text and "No matches found" not in results_text:
|
|
286
|
+
logger.debug(f"[WORKSPACE-SEARCH] Parsing markdown results ({len(results_text)} chars)")
|
|
287
|
+
for line in results_text.split('\n'):
|
|
288
|
+
if line.startswith('## ') and line.endswith('.py'):
|
|
289
|
+
file_path = line[3:].strip()
|
|
290
|
+
if '__pycache__' not in file_path and not file_path.endswith('.pyc') and file_path not in file_paths:
|
|
291
|
+
file_paths.append(file_path)
|
|
292
|
+
logger.debug(f"[WORKSPACE-SEARCH] Added file: {file_path}")
|
|
293
|
+
else:
|
|
294
|
+
logger.debug(f"[WORKSPACE-SEARCH] No results in response")
|
|
295
|
+
|
|
296
|
+
if file_paths:
|
|
297
|
+
logger.info(
|
|
298
|
+
f"[WORKSPACE-SEARCH] Found {len(file_paths)} file(s) with selector (pattern={file_pattern})"
|
|
299
|
+
)
|
|
300
|
+
return file_paths
|
|
301
|
+
|
|
302
|
+
logger.debug(f"[WORKSPACE-SEARCH] No files found (pattern={file_pattern})")
|
|
303
|
+
|
|
304
|
+
return []
|
|
305
|
+
|
|
306
|
+
except Exception as e:
|
|
307
|
+
logger.error(f"[WORKSPACE-SEARCH-ERROR] Failed: {type(e).__name__}: {str(e)}")
|
|
308
|
+
logger.debug(f"[WORKSPACE-SEARCH-ERROR] Details: {e}", exc_info=True)
|
|
309
|
+
return []
|
|
310
|
+
|
|
311
|
+
def _extract_all_imports_from_test(self, test_file: str, max_depth: int = 3, visited: Optional[set] = None) -> List[str]: # noqa: A006
|
|
312
|
+
"""Extract all imported files from test file recursively.
|
|
313
|
+
|
|
314
|
+
Used by backward search to verify if a file is actually imported by the test.
|
|
315
|
+
Only recurses into page objects (not utilities) for efficiency.
|
|
316
|
+
"""
|
|
317
|
+
if visited is None:
|
|
318
|
+
visited = set()
|
|
319
|
+
|
|
320
|
+
if test_file in visited or max_depth <= 0:
|
|
321
|
+
return []
|
|
322
|
+
|
|
323
|
+
visited.add(test_file)
|
|
324
|
+
all_imports = []
|
|
325
|
+
|
|
326
|
+
try:
|
|
327
|
+
direct_imports = self._extract_imported_files(test_file)
|
|
328
|
+
all_imports.extend(direct_imports)
|
|
329
|
+
|
|
330
|
+
# Recursively extract imports from page objects only
|
|
331
|
+
if max_depth > 1:
|
|
332
|
+
for imported_file in direct_imports:
|
|
333
|
+
if imported_file not in visited and self._is_page_object_file(imported_file):
|
|
334
|
+
nested = self._extract_all_imports_from_test(imported_file, max_depth - 1, visited)
|
|
335
|
+
all_imports.extend(nested)
|
|
336
|
+
|
|
337
|
+
except Exception as e:
|
|
338
|
+
logger.debug(f"[IMPORT EXTRACTION] Error: {e}")
|
|
339
|
+
|
|
340
|
+
return all_imports
|
|
341
|
+
|
|
342
|
+
def _is_page_object_file(self, file_path: str) -> bool:
|
|
343
|
+
"""Check if a file is likely a page object or steps file (not a utility or base class)."""
|
|
344
|
+
file_lower = file_path.lower()
|
|
345
|
+
|
|
346
|
+
# Include: Page objects, Dialogs, Modals, Components, Steps
|
|
347
|
+
if any(pattern in file_lower for pattern in ['page.py', 'dialog.py', 'modal.py', 'section.py', 'steps.py', 'step.py']):
|
|
348
|
+
return True
|
|
349
|
+
|
|
350
|
+
# Include: Files in component/header/steps directories
|
|
351
|
+
if any(pattern in file_lower for pattern in ['component', 'header', 'footer', 'sidebar', '/steps/', '\\steps\\']):
|
|
352
|
+
return True
|
|
353
|
+
|
|
354
|
+
# Exclude: Utilities, base classes, helpers, drivers, clients
|
|
355
|
+
if any(pattern in file_lower for pattern in ['utility.py', 'helper.py', 'base.py', 'util.py', '__init__.py', 'driver.py', 'client.py']):
|
|
356
|
+
return False
|
|
357
|
+
|
|
358
|
+
return False
|
|
359
|
+
|
|
360
|
+
def _extract_imported_files(self, test_file: str) -> List[str]:
|
|
361
|
+
"""Extract imported page object file paths from a test file."""
|
|
362
|
+
imported_files: List[str] = []
|
|
363
|
+
|
|
364
|
+
try:
|
|
365
|
+
# Read the test file content via dedicated endpoint
|
|
366
|
+
read_url = f"{self._local_ai_url}/v1/workspace/files/read"
|
|
367
|
+
read_payload = {"filePath": test_file}
|
|
368
|
+
|
|
369
|
+
logger.debug(f"[FILE-READ-REQUEST] URL: {read_url}")
|
|
370
|
+
logger.debug(f"[FILE-READ-REQUEST] Payload: {read_payload}")
|
|
371
|
+
|
|
372
|
+
read_response = requests.post(read_url, json=read_payload, timeout=30)
|
|
373
|
+
|
|
374
|
+
logger.debug(f"[FILE-READ-RESPONSE] Status: {read_response.status_code}")
|
|
375
|
+
logger.debug(f"[FILE-READ-RESPONSE] Headers: {dict(read_response.headers)}")
|
|
376
|
+
logger.debug(f"[FILE-READ-RESPONSE] Body length: {len(read_response.text)} chars")
|
|
377
|
+
|
|
378
|
+
if not read_response.ok:
|
|
379
|
+
logger.debug(f"[FILE-READ-RESPONSE] Body: {read_response.text[:500]}")
|
|
380
|
+
return imported_files
|
|
381
|
+
|
|
382
|
+
file_content = read_response.json()
|
|
383
|
+
logger.debug(f"[FILE-READ-RESPONSE] Parsed JSON keys: {list(file_content.keys())}")
|
|
384
|
+
logger.debug(f"[FILE-READ-RESPONSE] Full response: {file_content}")
|
|
385
|
+
|
|
386
|
+
if not file_content.get("success"):
|
|
387
|
+
logger.debug(f"[FILE-READ] Read failed: success={file_content.get('success')}")
|
|
388
|
+
return imported_files
|
|
389
|
+
|
|
390
|
+
content = file_content.get("content", "")
|
|
391
|
+
logger.debug(f"[FILE-READ] Content length: {len(content)} chars")
|
|
392
|
+
|
|
393
|
+
# Pattern to match imports - configurable via SELENIUM_IMPORT_PATTERN env var
|
|
394
|
+
for match in re.finditer(self._import_pattern, content):
|
|
395
|
+
module_path = match.group(1)
|
|
396
|
+
# Convert module path to file path
|
|
397
|
+
file_path = self._module_to_file_path(module_path, test_file)
|
|
398
|
+
if file_path:
|
|
399
|
+
imported_files.append(file_path)
|
|
400
|
+
|
|
401
|
+
# Also extract from imports in step functions and page objects
|
|
402
|
+
# Pattern: from <path> import <class>
|
|
403
|
+
step_import_pattern = r'from\s+([\w.]+)\s+import\s+([\w,\s]+)'
|
|
404
|
+
for match in re.finditer(step_import_pattern, content):
|
|
405
|
+
module_path = match.group(1)
|
|
406
|
+
# Include Page classes and step files (configurable via environment variable)
|
|
407
|
+
keywords = os.environ.get("SELENIUM_IMPORT_KEYWORDS", "Page,.steps.,steps").split(",")
|
|
408
|
+
if any(keyword.strip() in module_path for keyword in keywords):
|
|
409
|
+
file_path = self._module_to_file_path(module_path, test_file)
|
|
410
|
+
if file_path:
|
|
411
|
+
imported_files.append(file_path)
|
|
412
|
+
|
|
413
|
+
except Exception as e:
|
|
414
|
+
logger.debug(f"[IMPORT EXTRACTION] Error: {e}")
|
|
415
|
+
|
|
416
|
+
return imported_files
|
|
417
|
+
|
|
418
|
+
def _module_to_file_path(self, module_path: str, reference_file: str) -> Optional[str]:
|
|
419
|
+
"""Convert a Python module path to a file path."""
|
|
420
|
+
try:
|
|
421
|
+
# Extract the root package name from module_path
|
|
422
|
+
parts = module_path.split('.')
|
|
423
|
+
if not parts:
|
|
424
|
+
return None
|
|
425
|
+
|
|
426
|
+
root_package = parts[0]
|
|
427
|
+
ref_path = Path(reference_file)
|
|
428
|
+
|
|
429
|
+
# Find the root package directory by going up from the reference file
|
|
430
|
+
current = ref_path.parent
|
|
431
|
+
package_root = None
|
|
432
|
+
|
|
433
|
+
while current.parent != current:
|
|
434
|
+
if (current / root_package).exists():
|
|
435
|
+
package_root = current / root_package
|
|
436
|
+
break
|
|
437
|
+
current = current.parent
|
|
438
|
+
|
|
439
|
+
if not package_root:
|
|
440
|
+
return None
|
|
441
|
+
|
|
442
|
+
# Convert module path to relative path, removing the root package
|
|
443
|
+
relative_parts = parts[1:] # Remove root package prefix
|
|
444
|
+
if not relative_parts:
|
|
445
|
+
return None
|
|
446
|
+
|
|
447
|
+
relative_path = Path(*relative_parts)
|
|
448
|
+
file_path = package_root / relative_path.with_suffix('.py')
|
|
449
|
+
|
|
450
|
+
if file_path.exists():
|
|
451
|
+
return str(file_path).replace('\\', '/')
|
|
452
|
+
|
|
453
|
+
except Exception as e:
|
|
454
|
+
logger.debug(f"[MODULE CONVERSION] Error converting {module_path}: {e}")
|
|
455
|
+
|
|
456
|
+
return None
|
|
457
|
+
|
|
110
458
|
def update_test_file_via_service(
|
|
111
459
|
self,
|
|
112
460
|
file_path: str,
|
|
@@ -116,47 +464,155 @@ class CorrectionTracker:
|
|
|
116
464
|
corrected_value: str
|
|
117
465
|
) -> Dict[str, Any]:
|
|
118
466
|
try:
|
|
467
|
+
# Read the file using dedicated endpoint
|
|
119
468
|
read_url = f"{self._local_ai_url}/v1/workspace/files/read"
|
|
120
|
-
|
|
469
|
+
read_payload = {"filePath": file_path}
|
|
470
|
+
|
|
471
|
+
logger.debug(f"[FILE-EDIT-READ-REQUEST] URL: {read_url}")
|
|
472
|
+
logger.debug(f"[FILE-EDIT-READ-REQUEST] Payload: {read_payload}")
|
|
473
|
+
|
|
474
|
+
read_response = requests.post(read_url, json=read_payload, timeout=30)
|
|
475
|
+
|
|
476
|
+
logger.debug(f"[FILE-EDIT-READ-RESPONSE] Status: {read_response.status_code}")
|
|
477
|
+
logger.debug(f"[FILE-EDIT-READ-RESPONSE] Headers: {dict(read_response.headers)}")
|
|
478
|
+
logger.debug(f"[FILE-EDIT-READ-RESPONSE] Body length: {len(read_response.text)} chars")
|
|
479
|
+
|
|
121
480
|
read_response.raise_for_status()
|
|
122
481
|
file_content = read_response.json()
|
|
482
|
+
|
|
483
|
+
logger.debug(f"[FILE-EDIT-READ-RESPONSE] Parsed JSON keys: {list(file_content.keys())}")
|
|
123
484
|
|
|
124
485
|
if not file_content.get("success"):
|
|
486
|
+
logger.error(f"[FILE-EDIT] Read failed: {file_content}")
|
|
125
487
|
return {"success": False, "errors": ["Could not read file"]}
|
|
126
488
|
|
|
127
489
|
content = file_content.get("content", "")
|
|
128
|
-
|
|
129
|
-
|
|
130
|
-
|
|
131
|
-
|
|
490
|
+
logger.debug(f"[FILE-EDIT] Read {len(content)} chars from {file_path}")
|
|
491
|
+
|
|
492
|
+
def _strategy_to_by_token(strategy: str) -> Optional[str]:
|
|
493
|
+
s = (strategy or "").strip().lower()
|
|
494
|
+
mapping = {
|
|
495
|
+
"css selector": "CSS_SELECTOR",
|
|
496
|
+
"css": "CSS_SELECTOR",
|
|
497
|
+
"xpath": "XPATH",
|
|
498
|
+
"id": "ID",
|
|
499
|
+
"name": "NAME",
|
|
500
|
+
"class name": "CLASS_NAME",
|
|
501
|
+
"class": "CLASS_NAME",
|
|
502
|
+
"tag name": "TAG_NAME",
|
|
503
|
+
"tag": "TAG_NAME",
|
|
504
|
+
"link text": "LINK_TEXT",
|
|
505
|
+
"partial link text": "PARTIAL_LINK_TEXT",
|
|
506
|
+
}
|
|
507
|
+
return mapping.get(s)
|
|
508
|
+
|
|
509
|
+
corrected_by_token = _strategy_to_by_token(corrected_by)
|
|
510
|
+
|
|
511
|
+
# Prefer strategy-aware replacements like: By.XPATH, '<old>' -> By.ID, '<new>'
|
|
512
|
+
# This prevents invalid updates such as leaving By.XPATH with an id value.
|
|
513
|
+
replacements: List[Dict[str, str]] = []
|
|
514
|
+
if corrected_by_token:
|
|
515
|
+
# Find all occurrences of the selector value inside a By.<TOKEN>, '<value>' pair.
|
|
516
|
+
# We intentionally allow any existing By token to be replaced.
|
|
517
|
+
locator_pattern = re.compile(
|
|
518
|
+
r"By\\.[A-Z_]+(\\s*,\\s*)(['\"])" + re.escape(original_value) + r"\\2"
|
|
519
|
+
)
|
|
520
|
+
|
|
521
|
+
for match in locator_pattern.finditer(content):
|
|
522
|
+
quote = match.group(2)
|
|
523
|
+
escaped_corrected_value = corrected_value.replace(quote, f"\\{quote}")
|
|
524
|
+
old_substring = match.group(0)
|
|
525
|
+
new_substring = f"By.{corrected_by_token}{match.group(1)}{quote}{escaped_corrected_value}{quote}"
|
|
526
|
+
if old_substring != new_substring:
|
|
527
|
+
replacements.append({"oldString": old_substring, "newString": new_substring})
|
|
528
|
+
|
|
529
|
+
if replacements:
|
|
530
|
+
logger.debug(f"[FILE-EDIT] Prepared {len(replacements)} strategy-aware replacement(s)")
|
|
531
|
+
else:
|
|
532
|
+
logger.debug("[FILE-EDIT] No strategy-aware matches found")
|
|
533
|
+
|
|
534
|
+
# If we couldn't find a By.<TOKEN>, '<value>' match, fall back to value-only replacement
|
|
535
|
+
# ONLY when the strategy does not change (or we don't know the corrected strategy).
|
|
536
|
+
if not replacements:
|
|
537
|
+
corrected_by_normalized = (corrected_by or "").strip().lower()
|
|
538
|
+
original_by_normalized = (original_by or "").strip().lower()
|
|
539
|
+
|
|
540
|
+
if corrected_by_token and corrected_by_normalized != original_by_normalized:
|
|
541
|
+
logger.warning(
|
|
542
|
+
"[FILE-EDIT] Strategy changed but no locator match found; refusing unsafe value-only update"
|
|
543
|
+
)
|
|
544
|
+
return {
|
|
545
|
+
"success": False,
|
|
546
|
+
"errors": [
|
|
547
|
+
"Strategy changed (e.g. xpath -> id) but locator tuple not found in file; skipping unsafe edit"
|
|
548
|
+
],
|
|
549
|
+
}
|
|
550
|
+
|
|
551
|
+
old_patterns = [
|
|
552
|
+
f'"{original_value}"',
|
|
553
|
+
f"'{original_value}'",
|
|
554
|
+
]
|
|
555
|
+
|
|
556
|
+
found_pattern = None
|
|
557
|
+
new_pattern = None
|
|
558
|
+
for old_pattern in old_patterns:
|
|
559
|
+
if old_pattern in content:
|
|
560
|
+
found_pattern = old_pattern
|
|
561
|
+
logger.debug(f"[FILE-EDIT] Found value-only pattern: {old_pattern[:100]}")
|
|
562
|
+
|
|
563
|
+
# Choose quote style based on what's in the corrected value
|
|
564
|
+
if "'" in corrected_value and '"' not in corrected_value:
|
|
565
|
+
new_pattern = f'"{corrected_value}"'
|
|
566
|
+
elif '"' in corrected_value and "'" not in corrected_value:
|
|
567
|
+
new_pattern = f"'{corrected_value}'"
|
|
568
|
+
elif "'" in corrected_value and '"' in corrected_value:
|
|
569
|
+
if old_pattern.startswith('"'):
|
|
570
|
+
escaped_value = corrected_value.replace('"', '\\"')
|
|
571
|
+
new_pattern = f'"{escaped_value}"'
|
|
572
|
+
else:
|
|
573
|
+
escaped_value = corrected_value.replace("'", "\\'")
|
|
574
|
+
new_pattern = f"'{escaped_value}'"
|
|
575
|
+
else:
|
|
576
|
+
new_pattern = f'"{corrected_value}"' if old_pattern.startswith('"') else f"'{corrected_value}'"
|
|
132
577
|
|
|
133
|
-
|
|
134
|
-
|
|
135
|
-
|
|
136
|
-
|
|
137
|
-
|
|
138
|
-
new_pattern = f'"{corrected_value}"' if old_pattern.startswith('"') else f"'{corrected_value}'"
|
|
139
|
-
break
|
|
578
|
+
break
|
|
579
|
+
|
|
580
|
+
if not found_pattern or new_pattern is None:
|
|
581
|
+
logger.warning(f"[FILE-EDIT] Could not find selector: {original_value[:50]}")
|
|
582
|
+
return {"success": False, "errors": [f"Could not find selector: {original_value[:50]}..."]}
|
|
140
583
|
|
|
141
|
-
|
|
142
|
-
return {"success": False, "errors": [f"Could not find selector: {original_value[:50]}..."]}
|
|
584
|
+
replacements = [{"oldString": found_pattern, "newString": new_pattern}]
|
|
143
585
|
|
|
586
|
+
# Use dedicated endpoint for edit (supports multiple replacements)
|
|
144
587
|
edit_url = f"{self._local_ai_url}/v1/workspace/files/edit"
|
|
145
|
-
|
|
146
|
-
|
|
147
|
-
|
|
148
|
-
|
|
149
|
-
|
|
588
|
+
edit_payload = {"filePath": file_path, "replacements": replacements}
|
|
589
|
+
|
|
590
|
+
logger.debug(f"[FILE-EDIT-REQUEST] URL: {edit_url}")
|
|
591
|
+
logger.debug(f"[FILE-EDIT-REQUEST] Payload: {edit_payload}")
|
|
592
|
+
|
|
593
|
+
edit_response = requests.post(edit_url, json=edit_payload, timeout=30)
|
|
594
|
+
|
|
595
|
+
logger.debug(f"[FILE-EDIT-RESPONSE] Status: {edit_response.status_code}")
|
|
596
|
+
logger.debug(f"[FILE-EDIT-RESPONSE] Headers: {dict(edit_response.headers)}")
|
|
597
|
+
logger.debug(f"[FILE-EDIT-RESPONSE] Body length: {len(edit_response.text)} chars")
|
|
598
|
+
logger.debug(f"[FILE-EDIT-RESPONSE] Body: {edit_response.text[:1000]}")
|
|
599
|
+
|
|
150
600
|
edit_response.raise_for_status()
|
|
151
|
-
|
|
152
|
-
|
|
601
|
+
result: Dict[str, Any] = edit_response.json()
|
|
602
|
+
|
|
603
|
+
logger.debug(f"[FILE-EDIT-RESPONSE] Parsed JSON: {result}")
|
|
604
|
+
logger.info(f"[FILE-EDIT] File update result: success={result.get('success')}")
|
|
605
|
+
return result
|
|
606
|
+
except requests.exceptions.ConnectionError as e:
|
|
607
|
+
logger.error(f"[FILE-EDIT-ERROR] Connection failed: {e}")
|
|
153
608
|
logger.warning(f"[LOCAL AI SERVICE] Not available at {self._local_ai_url}")
|
|
154
609
|
return {"success": False, "errors": ["Local AI service not available"]}
|
|
155
610
|
except Exception as e:
|
|
156
|
-
logger.
|
|
611
|
+
logger.error(f"[FILE-EDIT-ERROR] {type(e).__name__}: {str(e)}")
|
|
612
|
+
logger.debug(f"[FILE-EDIT-ERROR] Details: {e}", exc_info=True)
|
|
157
613
|
return {"success": False, "errors": [str(e)]}
|
|
158
614
|
|
|
159
|
-
def export_corrections_report(self, output_file: str = "selector_corrections.json"):
|
|
615
|
+
def export_corrections_report(self, output_file: str = "selector_corrections.json") -> None:
|
|
160
616
|
with open(output_file, "w") as f:
|
|
161
617
|
json.dump({
|
|
162
618
|
"corrections": self._corrections,
|
|
@@ -168,8 +624,8 @@ class CorrectionTracker:
|
|
|
168
624
|
}, f, indent=2)
|
|
169
625
|
logger.info(f"[CORRECTIONS REPORT] Exported to {output_file}")
|
|
170
626
|
|
|
171
|
-
def apply_all_corrections_to_files(self) ->
|
|
172
|
-
results = {"total": 0, "success": 0, "failed": 0, "details": []}
|
|
627
|
+
def apply_all_corrections_to_files(self) -> ApplyCorrectionsResult:
|
|
628
|
+
results: ApplyCorrectionsResult = {"total": 0, "success": 0, "failed": 0, "details": []}
|
|
173
629
|
for correction in self.get_successful_corrections():
|
|
174
630
|
test_file = correction.get("test_file")
|
|
175
631
|
if not test_file:
|
|
@@ -208,19 +664,23 @@ def get_correction_tracker() -> CorrectionTracker:
|
|
|
208
664
|
|
|
209
665
|
|
|
210
666
|
def record_correction(
|
|
211
|
-
original_by: str,
|
|
212
|
-
|
|
667
|
+
original_by: str,
|
|
668
|
+
original_value: str,
|
|
669
|
+
corrected_by: str,
|
|
670
|
+
corrected_value: str,
|
|
671
|
+
success: bool = True
|
|
672
|
+
) -> None:
|
|
213
673
|
"""Record a selector correction."""
|
|
214
674
|
get_correction_tracker().record_correction(
|
|
215
675
|
original_by, original_value, corrected_by, corrected_value, success
|
|
216
676
|
)
|
|
217
677
|
|
|
218
678
|
|
|
219
|
-
def apply_corrections_to_test_files() ->
|
|
679
|
+
def apply_corrections_to_test_files() -> ApplyCorrectionsResult:
|
|
220
680
|
"""Apply all successful corrections to their source test files."""
|
|
221
681
|
return get_correction_tracker().apply_all_corrections_to_files()
|
|
222
682
|
|
|
223
683
|
|
|
224
|
-
def export_corrections_report(output_file: str = "selector_corrections.json"):
|
|
684
|
+
def export_corrections_report(output_file: str = "selector_corrections.json") -> None:
|
|
225
685
|
"""Export corrections report to JSON file."""
|
|
226
686
|
get_correction_tracker().export_corrections_report(output_file)
|