Kea2-python 0.2.1__py3-none-any.whl → 0.2.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of Kea2-python might be problematic. Click here for more details.
- kea2/adbUtils.py +3 -1
- kea2/bug_report_generator.py +458 -229
- kea2/fastbotManager.py +30 -3
- kea2/keaUtils.py +72 -64
- kea2/kea_launcher.py +19 -1
- kea2/logWatcher.py +4 -2
- kea2/templates/bug_report_template.html +499 -152
- kea2/u2Driver.py +5 -3
- kea2/utils.py +14 -0
- {kea2_python-0.2.1.dist-info → kea2_python-0.2.3.dist-info}/METADATA +2 -2
- {kea2_python-0.2.1.dist-info → kea2_python-0.2.3.dist-info}/RECORD +15 -15
- {kea2_python-0.2.1.dist-info → kea2_python-0.2.3.dist-info}/WHEEL +0 -0
- {kea2_python-0.2.1.dist-info → kea2_python-0.2.3.dist-info}/entry_points.txt +0 -0
- {kea2_python-0.2.1.dist-info → kea2_python-0.2.3.dist-info}/licenses/LICENSE +0 -0
- {kea2_python-0.2.1.dist-info → kea2_python-0.2.3.dist-info}/top_level.txt +0 -0
kea2/bug_report_generator.py
CHANGED
|
@@ -1,31 +1,78 @@
|
|
|
1
1
|
import json
|
|
2
|
-
import datetime
|
|
2
|
+
from datetime import datetime
|
|
3
3
|
from dataclasses import dataclass
|
|
4
4
|
from pathlib import Path
|
|
5
|
-
from typing import Dict, TypedDict,
|
|
5
|
+
from typing import Dict, TypedDict, List, Deque, NewType, Union
|
|
6
6
|
from collections import deque
|
|
7
|
+
from concurrent.futures import ThreadPoolExecutor
|
|
7
8
|
|
|
8
|
-
from PIL import Image, ImageDraw
|
|
9
|
+
from PIL import Image, ImageDraw, ImageFont
|
|
9
10
|
from jinja2 import Environment, FileSystemLoader, select_autoescape, PackageLoader
|
|
10
|
-
from .utils import getLogger
|
|
11
|
-
|
|
11
|
+
from kea2.utils import getLogger
|
|
12
12
|
|
|
13
13
|
logger = getLogger(__name__)
|
|
14
14
|
|
|
15
15
|
|
|
16
16
|
class StepData(TypedDict):
|
|
17
|
+
# The type of the action (Monkey / Script / Script Info)
|
|
17
18
|
Type: str
|
|
19
|
+
# The steps of monkey event when the action happened
|
|
20
|
+
# ps: since we insert script actions into monkey actions. Total actions count >= Monkey actions count
|
|
18
21
|
MonkeyStepsCount: int
|
|
22
|
+
# The time stamp of the action
|
|
19
23
|
Time: str
|
|
24
|
+
# The execution info of the action
|
|
20
25
|
Info: Dict
|
|
26
|
+
# The screenshot of the action
|
|
21
27
|
Screenshot: str
|
|
22
28
|
|
|
29
|
+
|
|
30
|
+
class CovData(TypedDict):
|
|
31
|
+
stepsCount: int # The MonkeyStepsCount when profiling the Coverage data
|
|
32
|
+
coverage: float
|
|
33
|
+
totalActivitiesCount: int
|
|
34
|
+
testedActivitiesCount: int
|
|
35
|
+
totalActivities: List[str]
|
|
36
|
+
testedActivities: List[str]
|
|
37
|
+
|
|
38
|
+
|
|
39
|
+
class ReportData(TypedDict):
|
|
40
|
+
timestamp: str
|
|
41
|
+
bugs_found: int
|
|
42
|
+
executed_events: int
|
|
43
|
+
total_testing_time: float
|
|
44
|
+
coverage: float
|
|
45
|
+
total_activities_count: int
|
|
46
|
+
tested_activities_count: int
|
|
47
|
+
total_activities: List
|
|
48
|
+
tested_activities: List
|
|
49
|
+
all_properties_count: int
|
|
50
|
+
executed_properties_count: int
|
|
51
|
+
property_violations: List[Dict]
|
|
52
|
+
property_stats: List
|
|
53
|
+
property_error_details: Dict[str, List[Dict]] # Support multiple errors per property
|
|
54
|
+
screenshot_info: Dict
|
|
55
|
+
coverage_trend: List
|
|
56
|
+
|
|
57
|
+
|
|
58
|
+
class PropertyExecResult(TypedDict):
|
|
59
|
+
precond_satisfied: int
|
|
60
|
+
executed: int
|
|
61
|
+
fail: int
|
|
62
|
+
error: int
|
|
63
|
+
|
|
64
|
+
|
|
65
|
+
PropertyName = NewType("PropertyName", str)
|
|
66
|
+
TestResult = NewType("TestResult", Dict[PropertyName, PropertyExecResult])
|
|
67
|
+
|
|
68
|
+
|
|
23
69
|
@dataclass
|
|
24
70
|
class DataPath:
|
|
25
71
|
steps_log: Path
|
|
26
72
|
result_json: Path
|
|
27
73
|
coverage_log: Path
|
|
28
74
|
screenshots_dir: Path
|
|
75
|
+
property_exec_info: Path
|
|
29
76
|
|
|
30
77
|
|
|
31
78
|
class BugReportGenerator:
|
|
@@ -33,26 +80,66 @@ class BugReportGenerator:
|
|
|
33
80
|
Generate HTML format bug reports
|
|
34
81
|
"""
|
|
35
82
|
|
|
36
|
-
|
|
83
|
+
_cov_trend: Deque[CovData] = None
|
|
84
|
+
_test_result: TestResult = None
|
|
85
|
+
_take_screenshots: bool = None
|
|
86
|
+
_data_path: DataPath = None
|
|
87
|
+
|
|
88
|
+
@property
|
|
89
|
+
def cov_trend(self):
|
|
90
|
+
if self._cov_trend is not None:
|
|
91
|
+
return self._cov_trend
|
|
92
|
+
|
|
93
|
+
# Parse coverage data
|
|
94
|
+
if not self.data_path.coverage_log.exists():
|
|
95
|
+
logger.error(f"{self.data_path.coverage_log} not exists")
|
|
96
|
+
|
|
97
|
+
cov_trend = list()
|
|
98
|
+
|
|
99
|
+
with open(self.data_path.coverage_log, "r", encoding="utf-8") as f:
|
|
100
|
+
for line in f:
|
|
101
|
+
if not line.strip():
|
|
102
|
+
continue
|
|
103
|
+
|
|
104
|
+
coverage_data = json.loads(line)
|
|
105
|
+
cov_trend.append(coverage_data)
|
|
106
|
+
self._cov_trend = cov_trend
|
|
107
|
+
return self._cov_trend
|
|
108
|
+
|
|
109
|
+
@property
|
|
110
|
+
def take_screenshots(self) -> bool:
|
|
111
|
+
"""Whether the `--take-screenshots` enabled. Should we report the screenshots?
|
|
112
|
+
|
|
113
|
+
Returns:
|
|
114
|
+
bool: Whether the `--take-screenshots` enabled.
|
|
115
|
+
"""
|
|
116
|
+
if self._take_screenshots is None:
|
|
117
|
+
self._take_screenshots = self.data_path.screenshots_dir.exists()
|
|
118
|
+
return self._take_screenshots
|
|
119
|
+
|
|
120
|
+
@property
|
|
121
|
+
def test_result(self) -> TestResult:
|
|
122
|
+
if self._test_result is not None:
|
|
123
|
+
return self._test_result
|
|
124
|
+
|
|
125
|
+
if not self.data_path.result_json.exists():
|
|
126
|
+
logger.error(f"{self.data_path.result_json} not found")
|
|
127
|
+
with open(self.data_path.result_json, "r", encoding="utf-8") as f:
|
|
128
|
+
self._test_result: TestResult = json.load(f)
|
|
129
|
+
|
|
130
|
+
return self._test_result
|
|
131
|
+
|
|
132
|
+
def __init__(self, result_dir=None):
|
|
37
133
|
"""
|
|
38
134
|
Initialize the bug report generator
|
|
39
135
|
|
|
40
136
|
Args:
|
|
41
137
|
result_dir: Directory path containing test results
|
|
42
138
|
"""
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
self.data_path: DataPath = DataPath(
|
|
47
|
-
steps_log=self.result_dir / f"output_{self.log_timestamp}" / "steps.log",
|
|
48
|
-
result_json=self.result_dir / f"result_{self.log_timestamp}.json",
|
|
49
|
-
coverage_log=self.result_dir / f"output_{self.log_timestamp}" / "coverage.log",
|
|
50
|
-
screenshots_dir=self.result_dir / f"output_{self.log_timestamp}" / "screenshots"
|
|
51
|
-
)
|
|
139
|
+
if result_dir is not None:
|
|
140
|
+
self._setup_paths(result_dir)
|
|
52
141
|
|
|
53
|
-
self.
|
|
54
|
-
|
|
55
|
-
self.take_screenshots = self._detect_screenshots_setting()
|
|
142
|
+
self.executor = ThreadPoolExecutor(max_workers=128)
|
|
56
143
|
|
|
57
144
|
# Set up Jinja2 environment
|
|
58
145
|
# First try to load templates from the package
|
|
@@ -75,16 +162,48 @@ class BugReportGenerator:
|
|
|
75
162
|
autoescape=select_autoescape(['html', 'xml'])
|
|
76
163
|
)
|
|
77
164
|
|
|
165
|
+
def _setup_paths(self, result_dir):
|
|
166
|
+
"""
|
|
167
|
+
Setup paths for a given result directory
|
|
168
|
+
|
|
169
|
+
Args:
|
|
170
|
+
result_dir: Directory path containing test results
|
|
171
|
+
"""
|
|
172
|
+
self.result_dir = Path(result_dir)
|
|
173
|
+
self.log_timestamp = self.result_dir.name.split("_", 1)[1]
|
|
174
|
+
|
|
175
|
+
self.data_path: DataPath = DataPath(
|
|
176
|
+
steps_log=self.result_dir / f"output_{self.log_timestamp}" / "steps.log",
|
|
177
|
+
result_json=self.result_dir / f"result_{self.log_timestamp}.json",
|
|
178
|
+
coverage_log=self.result_dir / f"output_{self.log_timestamp}" / "coverage.log",
|
|
179
|
+
screenshots_dir=self.result_dir / f"output_{self.log_timestamp}" / "screenshots",
|
|
180
|
+
property_exec_info=self.result_dir / f"property_exec_info_{self.log_timestamp}.json"
|
|
181
|
+
)
|
|
78
182
|
|
|
79
|
-
|
|
183
|
+
self.screenshots = deque()
|
|
184
|
+
|
|
185
|
+
def generate_report(self, result_dir_path=None):
|
|
80
186
|
"""
|
|
81
187
|
Generate bug report and save to result directory
|
|
188
|
+
|
|
189
|
+
Args:
|
|
190
|
+
result_dir_path: Directory path containing test results (optional)
|
|
191
|
+
If not provided, uses the path from initialization
|
|
82
192
|
"""
|
|
83
193
|
try:
|
|
194
|
+
# Setup paths if result_dir_path is provided
|
|
195
|
+
if result_dir_path is not None:
|
|
196
|
+
self._setup_paths(result_dir_path)
|
|
197
|
+
|
|
198
|
+
# Check if paths are properly set up
|
|
199
|
+
if not hasattr(self, 'result_dir') or self.result_dir is None:
|
|
200
|
+
raise ValueError(
|
|
201
|
+
"No result directory specified. Please provide result_dir_path or initialize with a directory.")
|
|
202
|
+
|
|
84
203
|
logger.debug("Starting bug report generation")
|
|
85
204
|
|
|
86
205
|
# Collect test data
|
|
87
|
-
test_data = self._collect_test_data()
|
|
206
|
+
test_data: ReportData = self._collect_test_data()
|
|
88
207
|
|
|
89
208
|
# Generate HTML report
|
|
90
209
|
html_content = self._generate_html_report(test_data)
|
|
@@ -95,170 +214,191 @@ class BugReportGenerator:
|
|
|
95
214
|
f.write(html_content)
|
|
96
215
|
|
|
97
216
|
logger.debug(f"Bug report saved to: {report_path}")
|
|
217
|
+
return str(report_path)
|
|
98
218
|
|
|
99
219
|
except Exception as e:
|
|
100
220
|
logger.error(f"Error generating bug report: {e}")
|
|
221
|
+
finally:
|
|
222
|
+
self.executor.shutdown()
|
|
101
223
|
|
|
102
|
-
def _collect_test_data(self):
|
|
224
|
+
def _collect_test_data(self) -> ReportData:
|
|
103
225
|
"""
|
|
104
226
|
Collect test data, including results, coverage, etc.
|
|
105
227
|
"""
|
|
106
|
-
data = {
|
|
228
|
+
data: ReportData = {
|
|
107
229
|
"timestamp": self.log_timestamp,
|
|
108
230
|
"bugs_found": 0,
|
|
109
231
|
"executed_events": 0,
|
|
110
232
|
"total_testing_time": 0,
|
|
111
|
-
"first_bug_time": 0,
|
|
112
|
-
"first_precondition_time": 0,
|
|
113
233
|
"coverage": 0,
|
|
114
234
|
"total_activities": [],
|
|
115
235
|
"tested_activities": [],
|
|
236
|
+
"all_properties_count": 0,
|
|
237
|
+
"executed_properties_count": 0,
|
|
116
238
|
"property_violations": [],
|
|
117
239
|
"property_stats": [],
|
|
118
|
-
"
|
|
119
|
-
"
|
|
240
|
+
"property_error_details": {},
|
|
241
|
+
"screenshot_info": {},
|
|
242
|
+
"coverage_trend": []
|
|
120
243
|
}
|
|
121
244
|
|
|
122
245
|
# Parse steps.log file to get test step numbers and screenshot mappings
|
|
123
|
-
steps_log_path = self.data_path.steps_log
|
|
124
246
|
property_violations = {} # Store multiple violation records for each property
|
|
125
|
-
|
|
126
|
-
|
|
127
|
-
|
|
128
|
-
|
|
129
|
-
|
|
130
|
-
|
|
131
|
-
|
|
132
|
-
|
|
133
|
-
|
|
134
|
-
|
|
135
|
-
|
|
136
|
-
|
|
137
|
-
|
|
138
|
-
|
|
139
|
-
|
|
140
|
-
|
|
141
|
-
|
|
142
|
-
|
|
143
|
-
|
|
144
|
-
|
|
145
|
-
|
|
146
|
-
|
|
147
|
-
|
|
148
|
-
|
|
149
|
-
|
|
150
|
-
|
|
151
|
-
|
|
152
|
-
|
|
153
|
-
|
|
154
|
-
|
|
155
|
-
|
|
156
|
-
|
|
157
|
-
|
|
158
|
-
|
|
159
|
-
|
|
160
|
-
|
|
161
|
-
|
|
162
|
-
current_property, current_test, property_violations
|
|
163
|
-
)
|
|
164
|
-
except Exception as e:
|
|
165
|
-
logger.error(f"Error processing ScriptInfo step {step_index}: {e}")
|
|
166
|
-
|
|
167
|
-
# Store first and last step for time calculation
|
|
168
|
-
if step_index == 1:
|
|
169
|
-
first_step_time = step_data["Time"]
|
|
170
|
-
last_step_time = step_data["Time"]
|
|
171
|
-
|
|
172
|
-
# Set the monkey events count
|
|
173
|
-
data["executed_events"] = monkey_events_count
|
|
174
|
-
|
|
175
|
-
# Calculate test time
|
|
176
|
-
if step_index > 0:
|
|
247
|
+
|
|
248
|
+
if not self.data_path.steps_log.exists():
|
|
249
|
+
logger.error(f"{self.data_path.steps_log} not exists")
|
|
250
|
+
return
|
|
251
|
+
|
|
252
|
+
current_property = None
|
|
253
|
+
current_test = {}
|
|
254
|
+
step_index = 0
|
|
255
|
+
monkey_events_count = 0 # Track monkey events separately
|
|
256
|
+
|
|
257
|
+
with open(self.data_path.steps_log, "r", encoding="utf-8") as f:
|
|
258
|
+
# Track current test state
|
|
259
|
+
|
|
260
|
+
for step_index, line in enumerate(f, start=1):
|
|
261
|
+
step_data = self._parse_step_data(line)
|
|
262
|
+
|
|
263
|
+
if not step_data:
|
|
264
|
+
continue
|
|
265
|
+
|
|
266
|
+
step_type = step_data.get("Type", "")
|
|
267
|
+
screenshot = step_data.get("Screenshot", "")
|
|
268
|
+
info = step_data.get("Info", {})
|
|
269
|
+
|
|
270
|
+
# Count Monkey events separately
|
|
271
|
+
if step_type == "Monkey":
|
|
272
|
+
monkey_events_count += 1
|
|
273
|
+
|
|
274
|
+
# If screenshots are enabled, mark the screenshot
|
|
275
|
+
if self.take_screenshots and step_data["Screenshot"]:
|
|
276
|
+
self.executor.submit(self._mark_screenshot, step_data)
|
|
277
|
+
|
|
278
|
+
# Collect detailed information for each screenshot
|
|
279
|
+
if screenshot and screenshot not in data["screenshot_info"]:
|
|
280
|
+
self._add_screenshot_info(step_data, step_index, data)
|
|
281
|
+
|
|
282
|
+
# Process ScriptInfo for property violations
|
|
283
|
+
if step_type == "ScriptInfo":
|
|
177
284
|
try:
|
|
178
|
-
|
|
179
|
-
|
|
180
|
-
|
|
285
|
+
property_name = info.get("propName", "")
|
|
286
|
+
state = info.get("state", "")
|
|
287
|
+
current_property, current_test = self._process_script_info(
|
|
288
|
+
property_name, state, step_index, screenshot,
|
|
289
|
+
current_property, current_test, property_violations
|
|
290
|
+
)
|
|
181
291
|
except Exception as e:
|
|
182
|
-
logger.error(f"Error
|
|
292
|
+
logger.error(f"Error processing ScriptInfo step {step_index}: {e}")
|
|
183
293
|
|
|
184
|
-
|
|
185
|
-
|
|
186
|
-
|
|
187
|
-
|
|
188
|
-
|
|
189
|
-
|
|
294
|
+
# Store first and last step for time calculation
|
|
295
|
+
if step_index == 1:
|
|
296
|
+
first_step_time = step_data["Time"]
|
|
297
|
+
last_step_time = step_data["Time"]
|
|
298
|
+
|
|
299
|
+
# Set the monkey events count correctly
|
|
300
|
+
data["executed_events"] = monkey_events_count
|
|
190
301
|
|
|
191
|
-
# Calculate
|
|
192
|
-
|
|
193
|
-
|
|
194
|
-
|
|
195
|
-
data["bugs_found"] += 1
|
|
302
|
+
# Calculate test time
|
|
303
|
+
if first_step_time and last_step_time:
|
|
304
|
+
def _get_datetime(raw_datetime) -> datetime:
|
|
305
|
+
return datetime.strptime(raw_datetime, r"%Y-%m-%d %H:%M:%S.%f")
|
|
196
306
|
|
|
197
|
-
|
|
198
|
-
|
|
307
|
+
test_time = _get_datetime(last_step_time) - _get_datetime(first_step_time)
|
|
308
|
+
|
|
309
|
+
total_seconds = int(test_time.total_seconds())
|
|
310
|
+
hours, remainder = divmod(total_seconds, 3600)
|
|
311
|
+
minutes, seconds = divmod(remainder, 60)
|
|
312
|
+
data["total_testing_time"] = f"{hours:02d}:{minutes:02d}:{seconds:02d}"
|
|
313
|
+
|
|
314
|
+
# Calculate bug count directly from result data
|
|
315
|
+
for property_name, test_result in self.test_result.items():
|
|
316
|
+
# Check if failed or error
|
|
317
|
+
if test_result["fail"] > 0 or test_result["error"] > 0:
|
|
318
|
+
data["bugs_found"] += 1
|
|
319
|
+
|
|
320
|
+
# Store the raw result data for direct use in HTML template
|
|
321
|
+
data["property_stats"] = self.test_result
|
|
322
|
+
|
|
323
|
+
# Calculate properties statistics
|
|
324
|
+
data["all_properties_count"] = len(self.test_result)
|
|
325
|
+
data["executed_properties_count"] = sum(1 for result in self.test_result.values() if result.get("executed", 0) > 0)
|
|
199
326
|
|
|
200
327
|
# Process coverage data
|
|
201
|
-
|
|
202
|
-
|
|
203
|
-
|
|
204
|
-
|
|
205
|
-
|
|
206
|
-
|
|
207
|
-
|
|
208
|
-
|
|
209
|
-
|
|
210
|
-
data["total_activities"] = coverage_data.get("totalActivities", [])
|
|
211
|
-
data["tested_activities"] = coverage_data.get("testedActivities", [])
|
|
212
|
-
except Exception as e:
|
|
213
|
-
logger.error(f"Error parsing final coverage data: {e}")
|
|
328
|
+
data["coverage_trend"] = self.cov_trend
|
|
329
|
+
|
|
330
|
+
if self.cov_trend:
|
|
331
|
+
final_trend = self.cov_trend[-1]
|
|
332
|
+
data["coverage"] = final_trend["coverage"]
|
|
333
|
+
data["total_activities"] = final_trend["totalActivities"]
|
|
334
|
+
data["tested_activities"] = final_trend["testedActivities"]
|
|
335
|
+
data["total_activities_count"] = final_trend["totalActivitiesCount"]
|
|
336
|
+
data["tested_activities_count"] = final_trend["testedActivitiesCount"]
|
|
214
337
|
|
|
215
338
|
# Generate Property Violations list
|
|
216
339
|
self._generate_property_violations_list(property_violations, data)
|
|
217
340
|
|
|
341
|
+
# Load error details for properties with fail/error state
|
|
342
|
+
data["property_error_details"] = self._load_property_error_details()
|
|
343
|
+
|
|
218
344
|
return data
|
|
219
345
|
|
|
220
346
|
def _parse_step_data(self, raw_step_info: str) -> StepData:
|
|
221
|
-
step_data = json.loads(raw_step_info)
|
|
222
|
-
step_data["Info"] = json.loads(step_data
|
|
347
|
+
step_data: StepData = json.loads(raw_step_info)
|
|
348
|
+
step_data["Info"] = json.loads(step_data["Info"])
|
|
223
349
|
return step_data
|
|
224
350
|
|
|
225
351
|
def _mark_screenshot(self, step_data: StepData):
|
|
226
|
-
|
|
227
|
-
|
|
352
|
+
try:
|
|
353
|
+
step_type = step_data["Type"]
|
|
354
|
+
screenshot_name = step_data["Screenshot"]
|
|
355
|
+
if not screenshot_name:
|
|
356
|
+
return
|
|
357
|
+
|
|
358
|
+
if step_type == "Monkey":
|
|
228
359
|
act = step_data["Info"].get("act")
|
|
229
360
|
pos = step_data["Info"].get("pos")
|
|
230
|
-
screenshot_name = step_data["Screenshot"]
|
|
231
361
|
if act in ["CLICK", "LONG_CLICK"] or act.startswith("SCROLL"):
|
|
232
|
-
|
|
233
|
-
|
|
234
|
-
|
|
235
|
-
|
|
236
|
-
|
|
362
|
+
self._mark_screenshot_interaction(step_type, screenshot_name, act, pos)
|
|
363
|
+
|
|
364
|
+
elif step_type == "Script":
|
|
365
|
+
act = step_data["Info"].get("method")
|
|
366
|
+
pos = step_data["Info"].get("params")
|
|
367
|
+
if act in ["click", "setText", "swipe"]:
|
|
368
|
+
self._mark_screenshot_interaction(step_type, screenshot_name, act, pos)
|
|
369
|
+
|
|
370
|
+
except Exception as e:
|
|
371
|
+
logger.error(f"Error when marking screenshots: {e}")
|
|
237
372
|
|
|
238
373
|
|
|
239
|
-
def _mark_screenshot_interaction(self,
|
|
374
|
+
def _mark_screenshot_interaction(self, step_type: str, screenshot_name: str, action_type: str, position: Union[List, tuple]) -> bool:
|
|
240
375
|
"""
|
|
241
376
|
Mark interaction on screenshot with colored rectangle
|
|
242
377
|
|
|
243
378
|
Args:
|
|
244
|
-
|
|
245
|
-
|
|
246
|
-
|
|
379
|
+
step_type (str): Type of the step (Monkey or Script)
|
|
380
|
+
screenshot_name (str): Name of the screenshot file
|
|
381
|
+
action_type (str): Type of action (CLICK/LONG_CLICK/SCROLL for Monkey, click/setText/swipe for Script)
|
|
382
|
+
position: Position coordinates or parameters (format varies by action type)
|
|
247
383
|
|
|
248
384
|
Returns:
|
|
249
385
|
bool: True if marking was successful, False otherwise
|
|
250
386
|
"""
|
|
251
|
-
|
|
252
|
-
|
|
253
|
-
|
|
387
|
+
screenshot_path: Path = self.data_path.screenshots_dir / screenshot_name
|
|
388
|
+
if not screenshot_path.exists():
|
|
389
|
+
logger.error(f"Screenshot file {screenshot_path} not exists.")
|
|
390
|
+
return False
|
|
254
391
|
|
|
255
|
-
|
|
256
|
-
|
|
257
|
-
|
|
392
|
+
img = Image.open(screenshot_path).convert("RGB")
|
|
393
|
+
draw = ImageDraw.Draw(img)
|
|
394
|
+
line_width = 5
|
|
258
395
|
|
|
259
|
-
|
|
396
|
+
if step_type == "Monkey":
|
|
397
|
+
if len(position) < 4:
|
|
398
|
+
logger.warning(f"Monkey action requires 4 coordinates, got {len(position)}. Skip drawing.")
|
|
399
|
+
return False
|
|
260
400
|
|
|
261
|
-
|
|
401
|
+
x1, y1, x2, y2 = map(int, position[:4])
|
|
262
402
|
|
|
263
403
|
if action_type == "CLICK":
|
|
264
404
|
for i in range(line_width):
|
|
@@ -270,56 +410,64 @@ class BugReportGenerator:
|
|
|
270
410
|
for i in range(line_width):
|
|
271
411
|
draw.rectangle([x1 - i, y1 - i, x2 + i, y2 + i], outline=(0, 255, 0))
|
|
272
412
|
|
|
273
|
-
|
|
274
|
-
|
|
413
|
+
elif step_type == "Script":
|
|
414
|
+
if action_type == "click":
|
|
275
415
|
|
|
276
|
-
|
|
277
|
-
|
|
278
|
-
|
|
279
|
-
|
|
280
|
-
|
|
281
|
-
|
|
282
|
-
"""
|
|
283
|
-
Detect if screenshots were enabled during test run.
|
|
284
|
-
Returns True if screenshots were taken, False otherwise.
|
|
285
|
-
"""
|
|
286
|
-
return self.data_path.screenshots_dir.exists()
|
|
416
|
+
if len(position) < 2:
|
|
417
|
+
logger.warning(f"Script click action requires 2 coordinates, got {len(position)}. Skip drawing.")
|
|
418
|
+
return False
|
|
419
|
+
|
|
420
|
+
x, y = map(float, position[:2])
|
|
421
|
+
x1, y1, x2, y2 = x - 50, y - 50, x + 50, y + 50
|
|
287
422
|
|
|
288
|
-
|
|
289
|
-
|
|
290
|
-
|
|
291
|
-
|
|
292
|
-
|
|
293
|
-
|
|
294
|
-
|
|
295
|
-
|
|
296
|
-
|
|
297
|
-
|
|
298
|
-
|
|
299
|
-
|
|
300
|
-
|
|
301
|
-
|
|
302
|
-
|
|
303
|
-
"tested_activities_count": coverage_data.get("testedActivitiesCount", 0)
|
|
304
|
-
})
|
|
305
|
-
last_line = line
|
|
306
|
-
except Exception as e:
|
|
307
|
-
logger.error(f"Error parsing coverage data: {e}")
|
|
308
|
-
continue
|
|
309
|
-
return cov_trend, last_line
|
|
423
|
+
for i in range(line_width):
|
|
424
|
+
draw.rectangle([x1 - i, y1 - i, x2 + i, y2 + i], outline=(255, 0, 0))
|
|
425
|
+
|
|
426
|
+
elif action_type == "swipe":
|
|
427
|
+
|
|
428
|
+
if len(position) < 4:
|
|
429
|
+
logger.warning(f"Script swipe action requires 4 coordinates, got {len(position)}. Skip drawing.")
|
|
430
|
+
return False
|
|
431
|
+
|
|
432
|
+
x1, y1, x2, y2 = map(float, position[:4])
|
|
433
|
+
|
|
434
|
+
# mark start and end positions with rectangles
|
|
435
|
+
start_x1, start_y1, start_x2, start_y2 = x1 - 50, y1 - 50, x1 + 50, y1 + 50
|
|
436
|
+
for i in range(line_width):
|
|
437
|
+
draw.rectangle([start_x1 - i, start_y1 - i, start_x2 + i, start_y2 + i], outline=(255, 0, 0))
|
|
310
438
|
|
|
311
|
-
|
|
439
|
+
end_x1, end_y1, end_x2, end_y2 = x2 - 50, y2 - 50, x2 + 50, y2 + 50
|
|
440
|
+
for i in range(line_width):
|
|
441
|
+
draw.rectangle([end_x1 - i, end_y1 - i, end_x2 + i, end_y2 + i], outline=(255, 0, 0))
|
|
442
|
+
|
|
443
|
+
# draw line between start and end positions
|
|
444
|
+
draw.line([(x1, y1), (x2, y2)], fill=(255, 0, 0), width=line_width)
|
|
445
|
+
|
|
446
|
+
# add text labels for start and end positions
|
|
447
|
+
font = ImageFont.truetype("arial.ttf", 80)
|
|
448
|
+
|
|
449
|
+
# draw "start" at start position
|
|
450
|
+
draw.text((x1 - 20, y1 - 70), "start", fill=(255, 0, 0), font=font)
|
|
451
|
+
|
|
452
|
+
# draw "end" at end position
|
|
453
|
+
draw.text((x2 - 15, y2 - 70), "end", fill=(255, 0, 0), font=font)
|
|
454
|
+
|
|
455
|
+
img.save(screenshot_path)
|
|
456
|
+
return True
|
|
457
|
+
|
|
458
|
+
def _generate_html_report(self, data: ReportData):
|
|
312
459
|
"""
|
|
313
460
|
Generate HTML format bug report
|
|
314
461
|
"""
|
|
315
462
|
try:
|
|
316
463
|
# Format timestamp for display
|
|
317
|
-
timestamp = datetime.
|
|
464
|
+
timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
|
|
318
465
|
|
|
319
466
|
# Ensure coverage_trend has data
|
|
320
467
|
if not data["coverage_trend"]:
|
|
321
468
|
logger.warning("No coverage trend data")
|
|
322
|
-
|
|
469
|
+
# Use the same field names as in coverage.log file
|
|
470
|
+
data["coverage_trend"] = [{"stepsCount": 0, "coverage": 0, "testedActivitiesCount": 0}]
|
|
323
471
|
|
|
324
472
|
# Convert coverage_trend to JSON string, ensuring all data points are included
|
|
325
473
|
coverage_trend_json = json.dumps(data["coverage_trend"])
|
|
@@ -332,16 +480,17 @@ class BugReportGenerator:
|
|
|
332
480
|
'total_testing_time': data["total_testing_time"],
|
|
333
481
|
'executed_events': data["executed_events"],
|
|
334
482
|
'coverage_percent': round(data["coverage"], 2),
|
|
335
|
-
'
|
|
336
|
-
'
|
|
337
|
-
'
|
|
338
|
-
'
|
|
339
|
-
'
|
|
340
|
-
'
|
|
483
|
+
'total_activities_count': data["total_activities_count"],
|
|
484
|
+
'tested_activities_count': data["tested_activities_count"],
|
|
485
|
+
'tested_activities': data["tested_activities"],
|
|
486
|
+
'total_activities': data["total_activities"],
|
|
487
|
+
'all_properties_count': data["all_properties_count"],
|
|
488
|
+
'executed_properties_count': data["executed_properties_count"],
|
|
341
489
|
'items_per_page': 10, # Items to display per page
|
|
342
490
|
'screenshots': self.screenshots,
|
|
343
491
|
'property_violations': data["property_violations"],
|
|
344
492
|
'property_stats': data["property_stats"],
|
|
493
|
+
'property_error_details': data["property_error_details"],
|
|
345
494
|
'coverage_data': coverage_trend_json,
|
|
346
495
|
'take_screenshots': self.take_screenshots # Pass screenshot setting to template
|
|
347
496
|
}
|
|
@@ -361,66 +510,50 @@ class BugReportGenerator:
|
|
|
361
510
|
logger.error(f"Error rendering template: {e}")
|
|
362
511
|
raise
|
|
363
512
|
|
|
364
|
-
def _add_screenshot_info(self,
|
|
513
|
+
def _add_screenshot_info(self, step_data: StepData, step_index: int, data: Dict):
|
|
365
514
|
"""
|
|
366
515
|
Add screenshot information to data structure
|
|
367
|
-
|
|
516
|
+
|
|
368
517
|
Args:
|
|
369
|
-
|
|
370
|
-
step_type: Type of step (Monkey, Script, ScriptInfo)
|
|
371
|
-
info: Step information dictionary
|
|
518
|
+
step_data: data for the current step
|
|
372
519
|
step_index: Current step index
|
|
373
|
-
relative_path: Relative path to screenshots directory
|
|
374
520
|
data: Data dictionary to update
|
|
375
521
|
"""
|
|
376
|
-
|
|
377
|
-
caption = ""
|
|
522
|
+
caption = ""
|
|
378
523
|
|
|
379
|
-
|
|
380
|
-
|
|
381
|
-
|
|
382
|
-
|
|
383
|
-
|
|
384
|
-
|
|
385
|
-
|
|
386
|
-
|
|
387
|
-
|
|
388
|
-
|
|
389
|
-
|
|
390
|
-
|
|
391
|
-
|
|
392
|
-
|
|
393
|
-
|
|
394
|
-
|
|
395
|
-
|
|
396
|
-
|
|
397
|
-
|
|
398
|
-
|
|
399
|
-
|
|
400
|
-
|
|
401
|
-
|
|
402
|
-
|
|
403
|
-
|
|
404
|
-
|
|
405
|
-
|
|
406
|
-
|
|
407
|
-
|
|
408
|
-
|
|
409
|
-
"step_index": step_index
|
|
410
|
-
}
|
|
411
|
-
|
|
412
|
-
screenshot_caption = data["screenshot_info"][screenshot].get('caption', '')
|
|
413
|
-
self.screenshots.append({
|
|
414
|
-
'id': step_index,
|
|
415
|
-
'path': f"{relative_path}/{screenshot}",
|
|
416
|
-
'caption': f"{step_index}. {screenshot_caption}"
|
|
417
|
-
})
|
|
418
|
-
|
|
419
|
-
def _process_script_info(self, property_name: str, state: str, step_index: int, screenshot: str,
|
|
420
|
-
current_property: str, current_test: Dict, property_violations: Dict) -> tuple:
|
|
524
|
+
if step_data["Type"] == "Monkey":
|
|
525
|
+
# Extract 'act' attribute for Monkey type and convert to lowercase
|
|
526
|
+
caption = f"{step_data['Info'].get('act', 'N/A')}"
|
|
527
|
+
elif step_data["Type"] == "Script":
|
|
528
|
+
# Extract 'method' attribute for Script type
|
|
529
|
+
caption = f"{step_data['Info'].get('method', 'N/A')}"
|
|
530
|
+
elif step_data["Type"] == "ScriptInfo":
|
|
531
|
+
# Extract 'propName' and 'state' attributes for ScriptInfo type
|
|
532
|
+
prop_name = step_data["Info"].get('propName', '')
|
|
533
|
+
state = step_data["Info"].get('state', 'N/A')
|
|
534
|
+
caption = f"{prop_name}: {state}" if prop_name else f"{state}"
|
|
535
|
+
|
|
536
|
+
screenshot_name = step_data["Screenshot"]
|
|
537
|
+
# Use relative path string instead of Path object
|
|
538
|
+
relative_screenshot_path = f"output_{self.log_timestamp}/screenshots/{screenshot_name}"
|
|
539
|
+
|
|
540
|
+
data["screenshot_info"][screenshot_name] = {
|
|
541
|
+
"type": step_data["Type"],
|
|
542
|
+
"caption": caption,
|
|
543
|
+
"step_index": step_index
|
|
544
|
+
}
|
|
545
|
+
|
|
546
|
+
self.screenshots.append({
|
|
547
|
+
'id': step_index,
|
|
548
|
+
'path': relative_screenshot_path, # Now using string path
|
|
549
|
+
'caption': f"{step_index}. {caption}"
|
|
550
|
+
})
|
|
551
|
+
|
|
552
|
+
def _process_script_info(self, property_name: str, state: str, step_index: int, screenshot: str,
|
|
553
|
+
current_property: str, current_test: Dict, property_violations: Dict) -> tuple:
|
|
421
554
|
"""
|
|
422
555
|
Process ScriptInfo step for property violations tracking
|
|
423
|
-
|
|
556
|
+
|
|
424
557
|
Args:
|
|
425
558
|
property_name: Property name from ScriptInfo
|
|
426
559
|
state: State from ScriptInfo (start, pass, fail, error)
|
|
@@ -429,7 +562,7 @@ class BugReportGenerator:
|
|
|
429
562
|
current_property: Currently tracked property
|
|
430
563
|
current_test: Current test data
|
|
431
564
|
property_violations: Dictionary to store violations
|
|
432
|
-
|
|
565
|
+
|
|
433
566
|
Returns:
|
|
434
567
|
tuple: (updated_current_property, updated_current_test)
|
|
435
568
|
"""
|
|
@@ -464,13 +597,13 @@ class BugReportGenerator:
|
|
|
464
597
|
# Reset current test
|
|
465
598
|
current_property = None
|
|
466
599
|
current_test = {}
|
|
467
|
-
|
|
600
|
+
|
|
468
601
|
return current_property, current_test
|
|
469
602
|
|
|
470
603
|
def _generate_property_violations_list(self, property_violations: Dict, data: Dict):
|
|
471
604
|
"""
|
|
472
605
|
Generate property violations list from collected violation data
|
|
473
|
-
|
|
606
|
+
|
|
474
607
|
Args:
|
|
475
608
|
property_violations: Dictionary containing property violations
|
|
476
609
|
data: Data dictionary to update with property violations list
|
|
@@ -489,3 +622,99 @@ class BugReportGenerator:
|
|
|
489
622
|
"postcondition_page": end_step
|
|
490
623
|
})
|
|
491
624
|
index += 1
|
|
625
|
+
|
|
626
|
+
def _load_property_error_details(self) -> Dict[str, List[Dict]]:
|
|
627
|
+
"""
|
|
628
|
+
Load property execution error details from property_exec_info file
|
|
629
|
+
|
|
630
|
+
Returns:
|
|
631
|
+
Dict[str, List[Dict]]: Mapping of property names to their error tracebacks with context
|
|
632
|
+
"""
|
|
633
|
+
error_details = {}
|
|
634
|
+
|
|
635
|
+
if not self.data_path.property_exec_info.exists():
|
|
636
|
+
logger.warning(f"Property exec info file {self.data_path.property_exec_info} not found")
|
|
637
|
+
return error_details
|
|
638
|
+
|
|
639
|
+
try:
|
|
640
|
+
with open(self.data_path.property_exec_info, "r", encoding="utf-8") as f:
|
|
641
|
+
# Use hash map for efficient deduplication
|
|
642
|
+
error_hash_map = {} # property_name -> {error_hash: error_data}
|
|
643
|
+
|
|
644
|
+
for line_number, line in enumerate(f, 1):
|
|
645
|
+
line = line.strip()
|
|
646
|
+
if not line:
|
|
647
|
+
continue
|
|
648
|
+
|
|
649
|
+
try:
|
|
650
|
+
exec_info = json.loads(line)
|
|
651
|
+
prop_name = exec_info.get("propName", "")
|
|
652
|
+
state = exec_info.get("state", "")
|
|
653
|
+
tb = exec_info.get("tb", "")
|
|
654
|
+
|
|
655
|
+
# Only process error details for failed or error states
|
|
656
|
+
if prop_name and state in ["fail", "error"] and tb:
|
|
657
|
+
if prop_name not in error_hash_map:
|
|
658
|
+
error_hash_map[prop_name] = {}
|
|
659
|
+
|
|
660
|
+
# Create hash key for this specific error (state + traceback)
|
|
661
|
+
error_hash = hash((state, tb))
|
|
662
|
+
|
|
663
|
+
if error_hash in error_hash_map[prop_name]:
|
|
664
|
+
# Error already exists, increment count
|
|
665
|
+
error_hash_map[prop_name][error_hash]["occurrence_count"] += 1
|
|
666
|
+
else:
|
|
667
|
+
# New error, create entry
|
|
668
|
+
short_desc = self._extract_error_summary(tb)
|
|
669
|
+
error_hash_map[prop_name][error_hash] = {
|
|
670
|
+
"state": state,
|
|
671
|
+
"traceback": tb,
|
|
672
|
+
"occurrence_count": 1,
|
|
673
|
+
"short_description": short_desc
|
|
674
|
+
}
|
|
675
|
+
|
|
676
|
+
except json.JSONDecodeError as e:
|
|
677
|
+
logger.warning(f"Failed to parse property exec info line {line_number}: {line[:100]}... Error: {e}")
|
|
678
|
+
continue
|
|
679
|
+
|
|
680
|
+
# Convert hash map to list format for template compatibility
|
|
681
|
+
for prop_name, hash_dict in error_hash_map.items():
|
|
682
|
+
error_details[prop_name] = list(hash_dict.values())
|
|
683
|
+
# Sort by occurrence count (descending) to show most frequent errors first
|
|
684
|
+
error_details[prop_name].sort(key=lambda x: x["occurrence_count"], reverse=True)
|
|
685
|
+
|
|
686
|
+
except Exception as e:
|
|
687
|
+
logger.error(f"Error reading property exec info file: {e}")
|
|
688
|
+
|
|
689
|
+
return error_details
|
|
690
|
+
|
|
691
|
+
def _extract_error_summary(self, traceback: str) -> str:
|
|
692
|
+
"""
|
|
693
|
+
Extract a short error summary from the full traceback
|
|
694
|
+
|
|
695
|
+
Args:
|
|
696
|
+
traceback: Full error traceback string
|
|
697
|
+
|
|
698
|
+
Returns:
|
|
699
|
+
str: Short error summary
|
|
700
|
+
"""
|
|
701
|
+
try:
|
|
702
|
+
lines = traceback.strip().split('\n')
|
|
703
|
+
|
|
704
|
+
for line in reversed(lines):
|
|
705
|
+
line = line.strip()
|
|
706
|
+
if line and not line.startswith(' '):
|
|
707
|
+
return line
|
|
708
|
+
return "Unknown error"
|
|
709
|
+
except Exception:
|
|
710
|
+
return "Error parsing traceback"
|
|
711
|
+
|
|
712
|
+
|
|
713
|
+
if __name__ == "__main__":
|
|
714
|
+
print("Generating bug report")
|
|
715
|
+
# OUTPUT_PATH = "<Your output path>"
|
|
716
|
+
OUTPUT_PATH = "P:/Python/Kea2/output/res_2025062921_4535312225"
|
|
717
|
+
|
|
718
|
+
report_generator = BugReportGenerator()
|
|
719
|
+
report_path = report_generator.generate_report(OUTPUT_PATH)
|
|
720
|
+
print(f"bug report generated: {report_path}")
|