Kea2-python 1.1.0b1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- kea2/__init__.py +8 -0
- kea2/absDriver.py +56 -0
- kea2/adbUtils.py +554 -0
- kea2/assets/config_version.json +16 -0
- kea2/assets/fastbot-thirdpart.jar +0 -0
- kea2/assets/fastbot_configs/abl.strings +2 -0
- kea2/assets/fastbot_configs/awl.strings +3 -0
- kea2/assets/fastbot_configs/max.config +7 -0
- kea2/assets/fastbot_configs/max.fuzzing.strings +699 -0
- kea2/assets/fastbot_configs/max.schema.strings +1 -0
- kea2/assets/fastbot_configs/max.strings +3 -0
- kea2/assets/fastbot_configs/max.tree.pruning +27 -0
- kea2/assets/fastbot_configs/teardown.py +18 -0
- kea2/assets/fastbot_configs/widget.block.py +38 -0
- kea2/assets/fastbot_libs/arm64-v8a/libfastbot_native.so +0 -0
- kea2/assets/fastbot_libs/armeabi-v7a/libfastbot_native.so +0 -0
- kea2/assets/fastbot_libs/x86/libfastbot_native.so +0 -0
- kea2/assets/fastbot_libs/x86_64/libfastbot_native.so +0 -0
- kea2/assets/framework.jar +0 -0
- kea2/assets/kea2-thirdpart.jar +0 -0
- kea2/assets/monkeyq.jar +0 -0
- kea2/assets/quicktest.py +126 -0
- kea2/cli.py +216 -0
- kea2/fastbotManager.py +269 -0
- kea2/kea2_api.py +166 -0
- kea2/keaUtils.py +926 -0
- kea2/kea_launcher.py +299 -0
- kea2/logWatcher.py +92 -0
- kea2/mixin.py +0 -0
- kea2/report/__init__.py +0 -0
- kea2/report/bug_report_generator.py +879 -0
- kea2/report/mixin.py +496 -0
- kea2/report/report_merger.py +1066 -0
- kea2/report/templates/bug_report_template.html +4028 -0
- kea2/report/templates/merged_bug_report_template.html +3602 -0
- kea2/report/utils.py +10 -0
- kea2/result.py +257 -0
- kea2/resultSyncer.py +65 -0
- kea2/state.py +22 -0
- kea2/typedefs.py +32 -0
- kea2/u2Driver.py +612 -0
- kea2/utils.py +192 -0
- kea2/version_manager.py +102 -0
- kea2_python-1.1.0b1.dist-info/METADATA +447 -0
- kea2_python-1.1.0b1.dist-info/RECORD +49 -0
- kea2_python-1.1.0b1.dist-info/WHEEL +5 -0
- kea2_python-1.1.0b1.dist-info/entry_points.txt +2 -0
- kea2_python-1.1.0b1.dist-info/licenses/LICENSE +16 -0
- kea2_python-1.1.0b1.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,879 @@
|
|
|
1
|
+
import json
|
|
2
|
+
from datetime import datetime
|
|
3
|
+
from dataclasses import dataclass
|
|
4
|
+
from pathlib import Path
|
|
5
|
+
from typing import Dict, Tuple, TypedDict, List, Deque, NewType, Union, Optional
|
|
6
|
+
from collections import deque
|
|
7
|
+
from concurrent.futures import ThreadPoolExecutor
|
|
8
|
+
|
|
9
|
+
from jinja2 import Environment, FileSystemLoader, select_autoescape, PackageLoader
|
|
10
|
+
from ..utils import getLogger, catchException
|
|
11
|
+
from .mixin import CrashAnrMixin, PathParserMixin, ScreenshotsMixin
|
|
12
|
+
from .utils import thread_pool
|
|
13
|
+
|
|
14
|
+
logger = getLogger(__name__)
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
class StepData(TypedDict):
|
|
18
|
+
# The type of the action (Monkey / Script / Script Info)
|
|
19
|
+
Type: str
|
|
20
|
+
# The steps of monkey event when the action happened
|
|
21
|
+
# ps: since we insert script actions into monkey actions. Total actions count >= Monkey actions count
|
|
22
|
+
MonkeyStepsCount: int
|
|
23
|
+
# The time stamp of the action
|
|
24
|
+
Time: str
|
|
25
|
+
# The execution info of the action
|
|
26
|
+
Info: Dict
|
|
27
|
+
# The screenshot of the action
|
|
28
|
+
Screenshot: str
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
class CovData(TypedDict):
|
|
32
|
+
stepsCount: int
|
|
33
|
+
coverage: float
|
|
34
|
+
totalActivitiesCount: int
|
|
35
|
+
testedActivitiesCount: int
|
|
36
|
+
totalActivities: List[str]
|
|
37
|
+
testedActivities: List[str]
|
|
38
|
+
activityCountHistory: Dict[str, int]
|
|
39
|
+
|
|
40
|
+
|
|
41
|
+
class ReportData(TypedDict):
|
|
42
|
+
timestamp: str
|
|
43
|
+
bugs_found: int
|
|
44
|
+
invariant_violations_count: int
|
|
45
|
+
executed_events: int
|
|
46
|
+
total_testing_time: float
|
|
47
|
+
coverage: float
|
|
48
|
+
total_activities_count: int
|
|
49
|
+
tested_activities_count: int
|
|
50
|
+
total_activities: List
|
|
51
|
+
tested_activities: List
|
|
52
|
+
all_properties_count: int
|
|
53
|
+
executed_properties_count: int
|
|
54
|
+
property_violations: List[Dict]
|
|
55
|
+
property_stats: List
|
|
56
|
+
property_error_details: Dict[str, List[Dict]] # Support multiple errors per property
|
|
57
|
+
property_kind_summary: Dict[str, int]
|
|
58
|
+
screenshot_info: Dict
|
|
59
|
+
coverage_trend: List
|
|
60
|
+
property_execution_trend: List # Track executed properties count over steps
|
|
61
|
+
activity_count_history: Dict[str, int] # Activity traversal count from final coverage data
|
|
62
|
+
crash_events: List[Dict] # Crash events from crash-dump.log
|
|
63
|
+
anr_events: List[Dict] # ANR events from crash-dump.log
|
|
64
|
+
kill_apps_events: List[Dict] # kill_apps info events from steps.log
|
|
65
|
+
|
|
66
|
+
|
|
67
|
+
class PropertyExecResult(TypedDict):
|
|
68
|
+
precond_satisfied: int
|
|
69
|
+
executed: int
|
|
70
|
+
fail: int
|
|
71
|
+
error: int
|
|
72
|
+
|
|
73
|
+
|
|
74
|
+
@dataclass
|
|
75
|
+
class PropertyExecInfo:
|
|
76
|
+
"""Class representing property execution information from property_exec_info file"""
|
|
77
|
+
prop_name: str
|
|
78
|
+
state: str # start, pass, fail, error
|
|
79
|
+
traceback: str
|
|
80
|
+
start_steps_count: int
|
|
81
|
+
occurrence_count: int = 1
|
|
82
|
+
short_description: str = ""
|
|
83
|
+
start_steps_count_list: List[int] = None
|
|
84
|
+
|
|
85
|
+
def __post_init__(self):
|
|
86
|
+
if self.start_steps_count_list is None:
|
|
87
|
+
self.start_steps_count_list = [self.start_steps_count]
|
|
88
|
+
if not self.short_description and self.traceback:
|
|
89
|
+
self.short_description = self._extract_error_summary(self.traceback)
|
|
90
|
+
|
|
91
|
+
def _extract_error_summary(self, traceback: str) -> str:
|
|
92
|
+
"""Extract a short error summary from the full traceback"""
|
|
93
|
+
try:
|
|
94
|
+
lines = traceback.strip().split('\n')
|
|
95
|
+
for line in reversed(lines):
|
|
96
|
+
line = line.strip()
|
|
97
|
+
if line and not line.startswith(' '):
|
|
98
|
+
return line
|
|
99
|
+
return "Unknown error"
|
|
100
|
+
except Exception:
|
|
101
|
+
return "Error parsing traceback"
|
|
102
|
+
|
|
103
|
+
def get_error_hash(self) -> int:
|
|
104
|
+
"""Generate hash key for error deduplication"""
|
|
105
|
+
return hash((self.state, self.traceback))
|
|
106
|
+
|
|
107
|
+
def is_error_state(self) -> bool:
|
|
108
|
+
"""Check if this is an error or fail state"""
|
|
109
|
+
return self.state in ["fail", "error"]
|
|
110
|
+
|
|
111
|
+
def add_occurrence(self, start_steps_count: int):
|
|
112
|
+
"""Add another occurrence of the same error"""
|
|
113
|
+
self.occurrence_count += 1
|
|
114
|
+
self.start_steps_count_list.append(start_steps_count)
|
|
115
|
+
|
|
116
|
+
|
|
117
|
+
PropertyName = NewType("PropertyName", str)
|
|
118
|
+
TestResult = NewType("TestResult", Dict[PropertyName, PropertyExecResult])
|
|
119
|
+
|
|
120
|
+
|
|
121
|
+
class BugReportGenerator(CrashAnrMixin, PathParserMixin, ScreenshotsMixin):
|
|
122
|
+
"""
|
|
123
|
+
Generate HTML format bug reports
|
|
124
|
+
"""
|
|
125
|
+
|
|
126
|
+
_cov_trend: Deque[CovData] = None
|
|
127
|
+
_test_result: TestResult = None
|
|
128
|
+
|
|
129
|
+
@property
|
|
130
|
+
def cov_trend(self):
|
|
131
|
+
if self._cov_trend is not None:
|
|
132
|
+
return self._cov_trend
|
|
133
|
+
|
|
134
|
+
# Parse coverage data
|
|
135
|
+
if not self.data_path.coverage_log.exists():
|
|
136
|
+
logger.error(f"{self.data_path.coverage_log} not exists")
|
|
137
|
+
|
|
138
|
+
cov_trend = list()
|
|
139
|
+
|
|
140
|
+
with open(self.data_path.coverage_log, "r", encoding="utf-8") as f:
|
|
141
|
+
for line in f:
|
|
142
|
+
if not line.strip():
|
|
143
|
+
continue
|
|
144
|
+
|
|
145
|
+
coverage_data = json.loads(line)
|
|
146
|
+
cov_trend.append(coverage_data)
|
|
147
|
+
self._cov_trend = cov_trend
|
|
148
|
+
return self._cov_trend
|
|
149
|
+
|
|
150
|
+
@property
|
|
151
|
+
def test_result(self) -> TestResult:
|
|
152
|
+
if self._test_result is not None:
|
|
153
|
+
return self._test_result
|
|
154
|
+
|
|
155
|
+
if not self.data_path.result_json.exists():
|
|
156
|
+
logger.error(f"{self.data_path.result_json} not found")
|
|
157
|
+
with open(self.data_path.result_json, "r", encoding="utf-8") as f:
|
|
158
|
+
self._test_result: TestResult = json.load(f)
|
|
159
|
+
|
|
160
|
+
return self._test_result
|
|
161
|
+
|
|
162
|
+
@property
|
|
163
|
+
def config(self) -> Dict:
|
|
164
|
+
if not hasattr(self, '_config'):
|
|
165
|
+
with open(self.result_dir / "bug_report_config.json", "r", encoding="utf-8") as fp:
|
|
166
|
+
self._config = json.load(fp)
|
|
167
|
+
return self._config
|
|
168
|
+
|
|
169
|
+
def __init__(self, result_dir=None):
|
|
170
|
+
"""
|
|
171
|
+
Initialize the bug report generator
|
|
172
|
+
|
|
173
|
+
Args:
|
|
174
|
+
result_dir: Directory path containing test results
|
|
175
|
+
"""
|
|
176
|
+
if result_dir is None:
|
|
177
|
+
raise RuntimeError("Result directory must be provided to generate report.")
|
|
178
|
+
self.result_dir = Path(result_dir)
|
|
179
|
+
|
|
180
|
+
def __set_up_jinja_env(self):
|
|
181
|
+
"""Set up Jinja2 environment for HTML template rendering"""
|
|
182
|
+
try:
|
|
183
|
+
self.jinja_env = Environment(
|
|
184
|
+
loader=PackageLoader("kea2.report", "templates"),
|
|
185
|
+
autoescape=select_autoescape(['html', 'xml'])
|
|
186
|
+
)
|
|
187
|
+
except (ImportError, ValueError):
|
|
188
|
+
# If unable to load from package, load from current directory's templates folder
|
|
189
|
+
current_dir = Path(__file__).parent
|
|
190
|
+
templates_dir = current_dir / "templates"
|
|
191
|
+
|
|
192
|
+
# Ensure template directory exists
|
|
193
|
+
if not templates_dir.exists():
|
|
194
|
+
templates_dir.mkdir(parents=True, exist_ok=True)
|
|
195
|
+
|
|
196
|
+
self.jinja_env = Environment(
|
|
197
|
+
loader=FileSystemLoader(templates_dir),
|
|
198
|
+
autoescape=select_autoescape(['html', 'xml'])
|
|
199
|
+
)
|
|
200
|
+
|
|
201
|
+
@catchException("Error generating bug report")
|
|
202
|
+
def generate_report(self) -> Optional[str]:
|
|
203
|
+
"""
|
|
204
|
+
Generate bug report and save to result directory
|
|
205
|
+
|
|
206
|
+
Args:
|
|
207
|
+
result_dir_path: Directory path containing test results (optional)
|
|
208
|
+
If not provided, uses the path from initialization
|
|
209
|
+
"""
|
|
210
|
+
# Check if paths are properly set up
|
|
211
|
+
self.__set_up_jinja_env()
|
|
212
|
+
|
|
213
|
+
self.screenshots = deque()
|
|
214
|
+
with thread_pool(max_workers=128) as executor:
|
|
215
|
+
logger.debug("Starting bug report generation")
|
|
216
|
+
|
|
217
|
+
# Collect test data
|
|
218
|
+
test_data: ReportData = self._collect_test_data(executor)
|
|
219
|
+
|
|
220
|
+
# Generate HTML report
|
|
221
|
+
html_content = self._generate_html_report(test_data)
|
|
222
|
+
|
|
223
|
+
# Save report
|
|
224
|
+
report_path = self.result_dir / "bug_report.html"
|
|
225
|
+
with open(report_path, "w", encoding="utf-8") as f:
|
|
226
|
+
f.write(html_content)
|
|
227
|
+
|
|
228
|
+
logger.info(f"Bug report saved to: {report_path}")
|
|
229
|
+
return str(report_path)
|
|
230
|
+
|
|
231
|
+
@catchException("Error when collecting test data")
|
|
232
|
+
def _collect_test_data(self, executor: "ThreadPoolExecutor"=None) -> ReportData:
|
|
233
|
+
"""
|
|
234
|
+
Collect test data, including results, coverage, etc.
|
|
235
|
+
"""
|
|
236
|
+
data: ReportData = {
|
|
237
|
+
"timestamp": self.config.get("log_stamp", ""),
|
|
238
|
+
"test_time": self.config.get("test_time", ""),
|
|
239
|
+
"bugs_found": 0,
|
|
240
|
+
"invariant_violations_count": 0,
|
|
241
|
+
"executed_events": 0,
|
|
242
|
+
"total_testing_time": 0,
|
|
243
|
+
"coverage": 0,
|
|
244
|
+
"total_activities": [],
|
|
245
|
+
"tested_activities": [],
|
|
246
|
+
"all_properties_count": 0,
|
|
247
|
+
"executed_properties_count": 0,
|
|
248
|
+
"property_violations": [],
|
|
249
|
+
"property_stats": [],
|
|
250
|
+
"property_error_details": {},
|
|
251
|
+
"property_kind_summary": {},
|
|
252
|
+
"screenshot_info": {},
|
|
253
|
+
"coverage_trend": [],
|
|
254
|
+
"property_execution_trend": [],
|
|
255
|
+
"activity_count_history": {},
|
|
256
|
+
"crash_events": [],
|
|
257
|
+
"anr_events": [],
|
|
258
|
+
"kill_apps_events": [],
|
|
259
|
+
}
|
|
260
|
+
|
|
261
|
+
# Parse steps.log file to get test step numbers and screenshot mappings
|
|
262
|
+
property_violations = {} # Store multiple violation records for each property
|
|
263
|
+
executed_properties_by_step = {} # Track executed properties at each step: {step_count: set()}
|
|
264
|
+
executed_properties = set() # Track unique executed properties
|
|
265
|
+
property_kinds = {}
|
|
266
|
+
for prop_name, result in self.test_result.items():
|
|
267
|
+
raw_kind = result.get("kind", "unknown")
|
|
268
|
+
if isinstance(raw_kind, str) and raw_kind.strip():
|
|
269
|
+
property_kinds[prop_name] = raw_kind.strip().lower()
|
|
270
|
+
else:
|
|
271
|
+
property_kinds[prop_name] = "unknown"
|
|
272
|
+
|
|
273
|
+
if not self.data_path.steps_log.exists():
|
|
274
|
+
logger.error(f"{self.data_path.steps_log} not exists")
|
|
275
|
+
return
|
|
276
|
+
|
|
277
|
+
current_property = None
|
|
278
|
+
current_test = {}
|
|
279
|
+
step_index = 0
|
|
280
|
+
monkey_events_count = 0 # Track monkey events separately
|
|
281
|
+
|
|
282
|
+
with open(self.data_path.steps_log, "r", encoding="utf-8") as f:
|
|
283
|
+
# Track current test state
|
|
284
|
+
|
|
285
|
+
step_index = 0
|
|
286
|
+
_last_screenshot_file = ""
|
|
287
|
+
for line in f:
|
|
288
|
+
step_data = self._parse_step_data(line)
|
|
289
|
+
|
|
290
|
+
if not step_data:
|
|
291
|
+
continue
|
|
292
|
+
|
|
293
|
+
step_type = step_data.get("Type", "")
|
|
294
|
+
screenshot = step_data.get("Screenshot", "")
|
|
295
|
+
if screenshot and screenshot != _last_screenshot_file:
|
|
296
|
+
step_index += 1
|
|
297
|
+
_last_screenshot_file = screenshot
|
|
298
|
+
|
|
299
|
+
info = step_data.get("Info", {})
|
|
300
|
+
|
|
301
|
+
# Count Monkey events separately
|
|
302
|
+
if step_type == "Monkey" or step_type == "Fuzz":
|
|
303
|
+
monkey_events_count += 1
|
|
304
|
+
|
|
305
|
+
# Record restart-app marker events (no screenshot expected)
|
|
306
|
+
if step_type == "Monkey" and info == "kill_apps":
|
|
307
|
+
monkey_steps_count = step_data.get("MonkeyStepsCount", "N/A")
|
|
308
|
+
caption = f"Monkey Step {monkey_steps_count}: restart app"
|
|
309
|
+
|
|
310
|
+
data["kill_apps_events"].append({
|
|
311
|
+
"step_index": step_index,
|
|
312
|
+
"monkey_steps_count": monkey_steps_count,
|
|
313
|
+
})
|
|
314
|
+
|
|
315
|
+
# Show this info event in the Test Screenshots timeline
|
|
316
|
+
self.screenshots.append({
|
|
317
|
+
"id": step_index,
|
|
318
|
+
"path": "",
|
|
319
|
+
"caption": f"{step_index}. {caption}",
|
|
320
|
+
"kind": "info",
|
|
321
|
+
"info": "kill_apps",
|
|
322
|
+
})
|
|
323
|
+
|
|
324
|
+
# If screenshots are enabled, mark the screenshot
|
|
325
|
+
if self.take_screenshots and step_data["Screenshot"]:
|
|
326
|
+
executor.submit(self._mark_screenshot, step_data)
|
|
327
|
+
|
|
328
|
+
# Collect detailed information for each screenshot
|
|
329
|
+
if screenshot and screenshot not in data["screenshot_info"]:
|
|
330
|
+
self._add_screenshot_info(step_data, step_index, data)
|
|
331
|
+
|
|
332
|
+
# Process ScriptInfo for property violations and execution tracking
|
|
333
|
+
if step_type == "ScriptInfo":
|
|
334
|
+
property_name = info.get("propName", "")
|
|
335
|
+
state = info.get("state", "")
|
|
336
|
+
kind = info.get("kind", "")
|
|
337
|
+
normalized_kind = ""
|
|
338
|
+
if isinstance(kind, str) and kind:
|
|
339
|
+
normalized_kind = kind.strip().lower()
|
|
340
|
+
if not normalized_kind:
|
|
341
|
+
normalized_kind = property_kinds.get(property_name, "unknown")
|
|
342
|
+
|
|
343
|
+
# Track executed properties (properties that have been started)
|
|
344
|
+
if property_name and state == "start" and normalized_kind != "invariant":
|
|
345
|
+
executed_properties.add(property_name)
|
|
346
|
+
# Record the monkey steps count for this property execution
|
|
347
|
+
executed_properties_by_step[monkey_events_count] = executed_properties.copy()
|
|
348
|
+
|
|
349
|
+
if normalized_kind == "invariant" and screenshot and screenshot in data["screenshot_info"]:
|
|
350
|
+
self._add_screenshot_info(step_data, step_index, data, force_append=True)
|
|
351
|
+
|
|
352
|
+
current_property, current_test = self._process_script_info(
|
|
353
|
+
property_name, state, normalized_kind, step_index, screenshot,
|
|
354
|
+
current_property, current_test, property_violations
|
|
355
|
+
)
|
|
356
|
+
|
|
357
|
+
# Store first and last step for time calculation
|
|
358
|
+
if step_index == 1:
|
|
359
|
+
first_step_time = step_data["Time"]
|
|
360
|
+
last_step_time = step_data["Time"]
|
|
361
|
+
|
|
362
|
+
# Set the monkey events count correctly
|
|
363
|
+
data["executed_events"] = monkey_events_count
|
|
364
|
+
|
|
365
|
+
# Calculate test time
|
|
366
|
+
if first_step_time and last_step_time:
|
|
367
|
+
def _get_datetime(raw_datetime) -> datetime:
|
|
368
|
+
return datetime.strptime(raw_datetime, r"%Y-%m-%d %H:%M:%S.%f")
|
|
369
|
+
|
|
370
|
+
test_time = _get_datetime(last_step_time) - _get_datetime(first_step_time)
|
|
371
|
+
|
|
372
|
+
total_seconds = int(test_time.total_seconds())
|
|
373
|
+
hours, remainder = divmod(total_seconds, 3600)
|
|
374
|
+
minutes, seconds = divmod(remainder, 60)
|
|
375
|
+
data["total_testing_time"] = f"{hours:02d}:{minutes:02d}:{seconds:02d}"
|
|
376
|
+
|
|
377
|
+
# Enrich property statistics with derived metrics and calculate bug count
|
|
378
|
+
enriched_property_stats = {}
|
|
379
|
+
for property_name, test_result in self.test_result.items():
|
|
380
|
+
executed_count = test_result.get("executed", 0)
|
|
381
|
+
fail_count = test_result.get("fail", 0)
|
|
382
|
+
error_count = test_result.get("error", 0)
|
|
383
|
+
pass_count = max(executed_count - fail_count - error_count, 0)
|
|
384
|
+
|
|
385
|
+
enriched_property_stats[property_name] = {
|
|
386
|
+
**test_result,
|
|
387
|
+
"pass_count": pass_count,
|
|
388
|
+
"kind": property_kinds.get(property_name, "unknown"),
|
|
389
|
+
}
|
|
390
|
+
|
|
391
|
+
# Count property violations (exclude invariants)
|
|
392
|
+
data["bugs_found"] = sum(
|
|
393
|
+
1
|
|
394
|
+
for stats in enriched_property_stats.values()
|
|
395
|
+
if stats.get("kind") != "invariant"
|
|
396
|
+
and (stats.get("fail", 0) > 0 or stats.get("error", 0) > 0)
|
|
397
|
+
)
|
|
398
|
+
|
|
399
|
+
# Store the enriched result data for direct use in HTML template
|
|
400
|
+
data["property_stats"] = enriched_property_stats
|
|
401
|
+
|
|
402
|
+
data["invariant_violations_count"] = sum(
|
|
403
|
+
1
|
|
404
|
+
for stats in enriched_property_stats.values()
|
|
405
|
+
if stats.get("kind") == "invariant"
|
|
406
|
+
and (stats.get("fail", 0) > 0 or stats.get("error", 0) > 0)
|
|
407
|
+
)
|
|
408
|
+
|
|
409
|
+
property_kind_summary = {
|
|
410
|
+
"all": 0,
|
|
411
|
+
"property": 0,
|
|
412
|
+
"invariant": 0,
|
|
413
|
+
"unknown": 0,
|
|
414
|
+
}
|
|
415
|
+
for stats in enriched_property_stats.values():
|
|
416
|
+
kind = stats.get("kind", "unknown")
|
|
417
|
+
if kind not in {"property", "invariant"}:
|
|
418
|
+
kind = "unknown"
|
|
419
|
+
property_kind_summary[kind] += 1
|
|
420
|
+
property_kind_summary["all"] += 1
|
|
421
|
+
data["property_kind_summary"] = property_kind_summary
|
|
422
|
+
|
|
423
|
+
# Calculate properties statistics (exclude invariants from summary counts)
|
|
424
|
+
data["all_properties_count"] = sum(
|
|
425
|
+
1 for result in enriched_property_stats.values()
|
|
426
|
+
if result.get("kind") != "invariant"
|
|
427
|
+
)
|
|
428
|
+
data["executed_properties_count"] = sum(
|
|
429
|
+
1 for result in enriched_property_stats.values()
|
|
430
|
+
if result.get("kind") != "invariant" and result.get("executed", 0) > 0
|
|
431
|
+
)
|
|
432
|
+
|
|
433
|
+
# Calculate detailed property statistics for table headers
|
|
434
|
+
property_stats_summary = self._calculate_property_stats_summary(enriched_property_stats)
|
|
435
|
+
data["property_stats_summary"] = property_stats_summary
|
|
436
|
+
|
|
437
|
+
# Process coverage data
|
|
438
|
+
data["coverage_trend"] = self.cov_trend
|
|
439
|
+
|
|
440
|
+
if self.cov_trend:
|
|
441
|
+
final_trend = self.cov_trend[-1]
|
|
442
|
+
data["coverage"] = final_trend["coverage"]
|
|
443
|
+
data["total_activities"] = final_trend["totalActivities"]
|
|
444
|
+
data["tested_activities"] = final_trend["testedActivities"]
|
|
445
|
+
data["total_activities_count"] = final_trend["totalActivitiesCount"]
|
|
446
|
+
data["tested_activities_count"] = final_trend["testedActivitiesCount"]
|
|
447
|
+
data["activity_count_history"] = final_trend["activityCountHistory"]
|
|
448
|
+
|
|
449
|
+
# Generate property execution trend aligned with coverage trend
|
|
450
|
+
data["property_execution_trend"] = self._generate_property_execution_trend(executed_properties_by_step)
|
|
451
|
+
|
|
452
|
+
# Generate Property Violations list
|
|
453
|
+
self._generate_property_violations_list(property_violations, data)
|
|
454
|
+
|
|
455
|
+
# Load error details for properties with fail/error state
|
|
456
|
+
data["property_error_details"] = self._load_property_error_details()
|
|
457
|
+
|
|
458
|
+
# Load crash and ANR events from crash-dump.log
|
|
459
|
+
crash_events, anr_events = self._load_crash_dump_data()
|
|
460
|
+
|
|
461
|
+
# Add screenshot ID information to crash and ANR events
|
|
462
|
+
self._add_screenshot_ids_to_events(crash_events)
|
|
463
|
+
self._add_screenshot_ids_to_events(anr_events)
|
|
464
|
+
|
|
465
|
+
data["crash_events"] = crash_events
|
|
466
|
+
data["anr_events"] = anr_events
|
|
467
|
+
|
|
468
|
+
return data
|
|
469
|
+
|
|
470
|
+
def _parse_step_data(self, raw_step_info: str) -> StepData:
|
|
471
|
+
step_data: StepData = json.loads(raw_step_info)
|
|
472
|
+
if step_data.get("Type") in {"Monkey", "Script", "ScriptInfo"}:
|
|
473
|
+
info = step_data.get("Info")
|
|
474
|
+
if isinstance(info, str):
|
|
475
|
+
stripped = info.strip()
|
|
476
|
+
if stripped and stripped[0] in "{[":
|
|
477
|
+
step_data["Info"] = json.loads(stripped)
|
|
478
|
+
return step_data
|
|
479
|
+
|
|
480
|
+
|
|
481
|
+
|
|
482
|
+
@catchException("Error rendering template")
|
|
483
|
+
def _generate_html_report(self, data: ReportData):
|
|
484
|
+
"""
|
|
485
|
+
Generate HTML format bug report
|
|
486
|
+
"""
|
|
487
|
+
# Format timestamp for display
|
|
488
|
+
timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
|
|
489
|
+
|
|
490
|
+
# Ensure coverage_trend has data
|
|
491
|
+
if not data.get("coverage_trend"):
|
|
492
|
+
logger.warning("No coverage trend data")
|
|
493
|
+
# Use the same field names as in coverage.log file
|
|
494
|
+
data["coverage_trend"] = [{"stepsCount": 0, "coverage": 0, "testedActivitiesCount": 0}]
|
|
495
|
+
|
|
496
|
+
# Convert coverage_trend to JSON string, ensuring all data points are included
|
|
497
|
+
coverage_trend_json = json.dumps(data["coverage_trend"])
|
|
498
|
+
logger.debug(f"Number of coverage trend data points: {len(data['coverage_trend'])}")
|
|
499
|
+
|
|
500
|
+
# Prepare template data
|
|
501
|
+
template_data = {
|
|
502
|
+
'timestamp': timestamp,
|
|
503
|
+
'test_time': data.get("test_time", ""),
|
|
504
|
+
'log_stamp': data.get("timestamp", ""),
|
|
505
|
+
'bugs_found': data["bugs_found"],
|
|
506
|
+
'invariant_violations_count': data["invariant_violations_count"],
|
|
507
|
+
'total_testing_time': data["total_testing_time"],
|
|
508
|
+
'executed_events': data["executed_events"],
|
|
509
|
+
'coverage_percent': round(data["coverage"], 2),
|
|
510
|
+
'total_activities_count': data["total_activities_count"],
|
|
511
|
+
'tested_activities_count': data["tested_activities_count"],
|
|
512
|
+
'tested_activities': data["tested_activities"],
|
|
513
|
+
'total_activities': data["total_activities"],
|
|
514
|
+
'all_properties_count': data["all_properties_count"],
|
|
515
|
+
'executed_properties_count': data["executed_properties_count"],
|
|
516
|
+
'items_per_page': 10, # Items to display per page
|
|
517
|
+
'screenshots': self.screenshots,
|
|
518
|
+
'property_violations': data["property_violations"],
|
|
519
|
+
'property_stats': data["property_stats"],
|
|
520
|
+
'property_error_details': data["property_error_details"],
|
|
521
|
+
'property_kind_summary': data.get("property_kind_summary", {}),
|
|
522
|
+
'coverage_data': coverage_trend_json,
|
|
523
|
+
'take_screenshots': self.take_screenshots, # Pass screenshot setting to template
|
|
524
|
+
'property_execution_trend': data["property_execution_trend"],
|
|
525
|
+
'property_execution_data': json.dumps(data["property_execution_trend"]),
|
|
526
|
+
'activity_count_history': data["activity_count_history"],
|
|
527
|
+
'crash_events': data["crash_events"],
|
|
528
|
+
'anr_events': data["anr_events"],
|
|
529
|
+
'triggered_crash_count': len(data["crash_events"]),
|
|
530
|
+
'triggered_anr_count': len(data["anr_events"]),
|
|
531
|
+
'property_stats_summary': data["property_stats_summary"],
|
|
532
|
+
'kill_apps_events': data.get("kill_apps_events", []),
|
|
533
|
+
}
|
|
534
|
+
|
|
535
|
+
# Check if template exists, if not create it
|
|
536
|
+
template_path = Path(__file__).parent / "templates" / "bug_report_template.html"
|
|
537
|
+
if not template_path.exists():
|
|
538
|
+
logger.warning("Template file does not exist, creating default template...")
|
|
539
|
+
|
|
540
|
+
# Use Jinja2 to render template
|
|
541
|
+
template = self.jinja_env.get_template("bug_report_template.html")
|
|
542
|
+
html_content = template.render(**template_data)
|
|
543
|
+
|
|
544
|
+
return html_content
|
|
545
|
+
|
|
546
|
+
def _process_script_info(
|
|
547
|
+
self,
|
|
548
|
+
property_name: str,
|
|
549
|
+
state: str,
|
|
550
|
+
kind: str,
|
|
551
|
+
step_index: int,
|
|
552
|
+
screenshot: str,
|
|
553
|
+
current_property: str,
|
|
554
|
+
current_test: Dict,
|
|
555
|
+
property_violations: Dict,
|
|
556
|
+
) -> Tuple:
|
|
557
|
+
"""
|
|
558
|
+
Process ScriptInfo step for property violations tracking
|
|
559
|
+
|
|
560
|
+
Args:
|
|
561
|
+
property_name: Property name from ScriptInfo
|
|
562
|
+
state: State from ScriptInfo (start, pass, fail, error)
|
|
563
|
+
kind: Kind from ScriptInfo (property, invariant)
|
|
564
|
+
step_index: Current step index
|
|
565
|
+
screenshot: Screenshot filename
|
|
566
|
+
current_property: Currently tracked property
|
|
567
|
+
current_test: Current test data
|
|
568
|
+
property_violations: Dictionary to store violations
|
|
569
|
+
|
|
570
|
+
Returns:
|
|
571
|
+
tuple: (updated_current_property, updated_current_test)
|
|
572
|
+
"""
|
|
573
|
+
if property_name and state:
|
|
574
|
+
if state == "start":
|
|
575
|
+
# Record new test start
|
|
576
|
+
current_property = property_name
|
|
577
|
+
current_test = {
|
|
578
|
+
"start": step_index,
|
|
579
|
+
"end": None,
|
|
580
|
+
"screenshot_start": screenshot
|
|
581
|
+
}
|
|
582
|
+
elif state in ["pass", "fail", "error"]:
|
|
583
|
+
if current_property == property_name:
|
|
584
|
+
# Update test end information
|
|
585
|
+
current_test["end"] = step_index
|
|
586
|
+
current_test["screenshot_end"] = screenshot
|
|
587
|
+
|
|
588
|
+
if state == "fail" or state == "error":
|
|
589
|
+
# Record failed/error test
|
|
590
|
+
if property_name not in property_violations:
|
|
591
|
+
property_violations[property_name] = []
|
|
592
|
+
|
|
593
|
+
property_violations[property_name].append({
|
|
594
|
+
"start": current_test["start"],
|
|
595
|
+
"end": current_test["end"],
|
|
596
|
+
"screenshot_start": current_test["screenshot_start"],
|
|
597
|
+
"screenshot_end": screenshot,
|
|
598
|
+
"state": state
|
|
599
|
+
})
|
|
600
|
+
|
|
601
|
+
# Reset current test
|
|
602
|
+
current_property = None
|
|
603
|
+
current_test = {}
|
|
604
|
+
elif state in ["fail", "error"] and kind == "invariant":
|
|
605
|
+
if property_name not in property_violations:
|
|
606
|
+
property_violations[property_name] = []
|
|
607
|
+
|
|
608
|
+
property_violations[property_name].append({
|
|
609
|
+
"start": step_index,
|
|
610
|
+
"end": step_index,
|
|
611
|
+
"screenshot_start": screenshot,
|
|
612
|
+
"screenshot_end": screenshot,
|
|
613
|
+
"state": state
|
|
614
|
+
})
|
|
615
|
+
|
|
616
|
+
return current_property, current_test
|
|
617
|
+
|
|
618
|
+
def _generate_property_violations_list(self, property_violations: Dict, data: Dict):
|
|
619
|
+
"""
|
|
620
|
+
Generate property violations list from collected violation data
|
|
621
|
+
|
|
622
|
+
Args:
|
|
623
|
+
property_violations: Dictionary containing property violations
|
|
624
|
+
data: Data dictionary to update with property violations list
|
|
625
|
+
"""
|
|
626
|
+
if property_violations:
|
|
627
|
+
index = 1
|
|
628
|
+
for property_name, violations in property_violations.items():
|
|
629
|
+
kind = "unknown"
|
|
630
|
+
property_stats = data.get("property_stats", {})
|
|
631
|
+
if isinstance(property_stats, dict):
|
|
632
|
+
kind = property_stats.get(property_name, {}).get("kind", "unknown")
|
|
633
|
+
for violation in violations:
|
|
634
|
+
start_step = violation["start"]
|
|
635
|
+
end_step = violation["end"]
|
|
636
|
+
data["property_violations"].append({
|
|
637
|
+
"index": index,
|
|
638
|
+
"property_name": property_name,
|
|
639
|
+
"interaction_pages": [start_step, end_step],
|
|
640
|
+
"state": violation.get("state", "fail"),
|
|
641
|
+
"kind": kind,
|
|
642
|
+
})
|
|
643
|
+
index += 1
|
|
644
|
+
|
|
645
|
+
def _load_property_error_details(self) -> Dict[str, List[Dict]]:
|
|
646
|
+
"""
|
|
647
|
+
Load property execution error details from property_exec_info file
|
|
648
|
+
|
|
649
|
+
Returns:
|
|
650
|
+
Dict[str, List[Dict]]: Mapping of property names to their error tracebacks with context
|
|
651
|
+
"""
|
|
652
|
+
if not self.data_path.property_exec_info.exists():
|
|
653
|
+
logger.warning(f"Property exec info file {self.data_path.property_exec_info} not found")
|
|
654
|
+
return {}
|
|
655
|
+
|
|
656
|
+
try:
|
|
657
|
+
property_exec_infos = self._parse_property_exec_infos()
|
|
658
|
+
return self._group_errors_by_property(property_exec_infos)
|
|
659
|
+
|
|
660
|
+
except Exception as e:
|
|
661
|
+
logger.error(f"Error reading property exec info file: {e}")
|
|
662
|
+
return {}
|
|
663
|
+
|
|
664
|
+
def _parse_property_exec_infos(self) -> List[PropertyExecInfo]:
|
|
665
|
+
"""Parse property execution info from file"""
|
|
666
|
+
exec_infos = []
|
|
667
|
+
|
|
668
|
+
with open(self.data_path.property_exec_info, "r", encoding="utf-8") as f:
|
|
669
|
+
for line_number, line in enumerate(f, 1):
|
|
670
|
+
line = line.strip()
|
|
671
|
+
if not line:
|
|
672
|
+
continue
|
|
673
|
+
|
|
674
|
+
try:
|
|
675
|
+
exec_info_data = json.loads(line)
|
|
676
|
+
prop_name = exec_info_data.get("propName", "")
|
|
677
|
+
state = exec_info_data.get("state", "")
|
|
678
|
+
tb = exec_info_data.get("tb", "")
|
|
679
|
+
start_steps_count = exec_info_data.get("startStepsCount", 0)
|
|
680
|
+
|
|
681
|
+
exec_info = PropertyExecInfo(
|
|
682
|
+
prop_name=prop_name,
|
|
683
|
+
state=state,
|
|
684
|
+
traceback=tb,
|
|
685
|
+
start_steps_count=start_steps_count
|
|
686
|
+
)
|
|
687
|
+
|
|
688
|
+
if exec_info.is_error_state() and prop_name and tb:
|
|
689
|
+
exec_infos.append(exec_info)
|
|
690
|
+
|
|
691
|
+
except json.JSONDecodeError as e:
|
|
692
|
+
logger.warning(f"Failed to parse property exec info line {line_number}: {line[:100]}... Error: {e}")
|
|
693
|
+
continue
|
|
694
|
+
|
|
695
|
+
return exec_infos
|
|
696
|
+
|
|
697
|
+
def _group_errors_by_property(self, exec_infos: List[PropertyExecInfo]) -> Dict[str, List[Dict]]:
|
|
698
|
+
"""Group errors by property name and deduplicate"""
|
|
699
|
+
error_details = {}
|
|
700
|
+
|
|
701
|
+
for exec_info in exec_infos:
|
|
702
|
+
prop_name = exec_info.prop_name
|
|
703
|
+
|
|
704
|
+
if prop_name not in error_details:
|
|
705
|
+
error_details[prop_name] = {}
|
|
706
|
+
|
|
707
|
+
error_hash = exec_info.get_error_hash()
|
|
708
|
+
|
|
709
|
+
if error_hash in error_details[prop_name]:
|
|
710
|
+
# Error already exists, add occurrence
|
|
711
|
+
error_details[prop_name][error_hash].add_occurrence(exec_info.start_steps_count)
|
|
712
|
+
else:
|
|
713
|
+
# New error, create entry
|
|
714
|
+
error_details[prop_name][error_hash] = exec_info
|
|
715
|
+
|
|
716
|
+
# Convert to template-compatible format
|
|
717
|
+
result = {}
|
|
718
|
+
for prop_name, hash_dict in error_details.items():
|
|
719
|
+
result[prop_name] = []
|
|
720
|
+
for exec_info in hash_dict.values():
|
|
721
|
+
result[prop_name].append({
|
|
722
|
+
"state": exec_info.state,
|
|
723
|
+
"traceback": exec_info.traceback,
|
|
724
|
+
"occurrence_count": exec_info.occurrence_count,
|
|
725
|
+
"short_description": exec_info.short_description,
|
|
726
|
+
"startStepsCountList": exec_info.start_steps_count_list
|
|
727
|
+
})
|
|
728
|
+
|
|
729
|
+
# Sort by earliest startStepsCount, then by occurrence count (descending)
|
|
730
|
+
result[prop_name].sort(key=lambda x: (min(x["startStepsCountList"]), -x["occurrence_count"]))
|
|
731
|
+
|
|
732
|
+
return result
|
|
733
|
+
|
|
734
|
+
def _generate_property_execution_trend(self, executed_properties_by_step: Dict[int, set]) -> List[Dict]:
|
|
735
|
+
"""
|
|
736
|
+
Generate property execution trend aligned with coverage trend
|
|
737
|
+
|
|
738
|
+
Args:
|
|
739
|
+
executed_properties_by_step: Dictionary containing executed properties at each step
|
|
740
|
+
|
|
741
|
+
Returns:
|
|
742
|
+
List[Dict]: Property execution trend data aligned with coverage trend
|
|
743
|
+
"""
|
|
744
|
+
property_execution_trend = []
|
|
745
|
+
|
|
746
|
+
# Get step points from coverage trend to ensure alignment
|
|
747
|
+
coverage_step_points = []
|
|
748
|
+
if self.cov_trend:
|
|
749
|
+
coverage_step_points = [cov_data["stepsCount"] for cov_data in self.cov_trend]
|
|
750
|
+
|
|
751
|
+
# If no coverage data, use property execution data points
|
|
752
|
+
if not coverage_step_points and executed_properties_by_step:
|
|
753
|
+
coverage_step_points = sorted(executed_properties_by_step.keys())
|
|
754
|
+
|
|
755
|
+
# Generate property execution data for each coverage step point
|
|
756
|
+
for step_count in coverage_step_points:
|
|
757
|
+
# Find the latest executed properties count up to this step
|
|
758
|
+
executed_count = 0
|
|
759
|
+
latest_step = 0
|
|
760
|
+
|
|
761
|
+
for exec_step in executed_properties_by_step.keys():
|
|
762
|
+
if exec_step <= step_count and exec_step >= latest_step:
|
|
763
|
+
latest_step = exec_step
|
|
764
|
+
executed_count = len(executed_properties_by_step[exec_step])
|
|
765
|
+
|
|
766
|
+
property_execution_trend.append({
|
|
767
|
+
"stepsCount": step_count,
|
|
768
|
+
"executedPropertiesCount": executed_count
|
|
769
|
+
})
|
|
770
|
+
|
|
771
|
+
return property_execution_trend
|
|
772
|
+
|
|
773
|
+
def _calculate_property_stats_summary(self, test_result: TestResult) -> Dict[str, int]:
|
|
774
|
+
"""
|
|
775
|
+
Calculate summary statistics for property checking table headers
|
|
776
|
+
|
|
777
|
+
Args:
|
|
778
|
+
test_result: Test result data containing property statistics
|
|
779
|
+
|
|
780
|
+
Returns:
|
|
781
|
+
Dict: Summary statistics for each column
|
|
782
|
+
"""
|
|
783
|
+
stats_summary = {
|
|
784
|
+
"total_properties": 0,
|
|
785
|
+
"total_precond_satisfied": 0,
|
|
786
|
+
"total_executed": 0,
|
|
787
|
+
"total_passes": 0,
|
|
788
|
+
"total_fails": 0,
|
|
789
|
+
"total_errors": 0,
|
|
790
|
+
"properties_with_errors": 0
|
|
791
|
+
}
|
|
792
|
+
|
|
793
|
+
for property_name, result in test_result.items():
|
|
794
|
+
executed_count = result.get("executed", result.get("executed_total", 0))
|
|
795
|
+
fail_count = result.get("fail", 0)
|
|
796
|
+
error_count = result.get("error", 0)
|
|
797
|
+
pass_count = result.get("pass_count",
|
|
798
|
+
max(executed_count - fail_count - error_count, 0))
|
|
799
|
+
|
|
800
|
+
stats_summary["total_properties"] += 1
|
|
801
|
+
stats_summary["total_precond_satisfied"] += result.get("precond_satisfied", 0)
|
|
802
|
+
stats_summary["total_executed"] += executed_count
|
|
803
|
+
stats_summary["total_passes"] += pass_count
|
|
804
|
+
stats_summary["total_fails"] += fail_count
|
|
805
|
+
stats_summary["total_errors"] += error_count
|
|
806
|
+
|
|
807
|
+
# Count properties that have errors or fails
|
|
808
|
+
if fail_count > 0 or error_count > 0:
|
|
809
|
+
stats_summary["properties_with_errors"] += 1
|
|
810
|
+
|
|
811
|
+
return stats_summary
|
|
812
|
+
|
|
813
|
+
def _load_crash_dump_data(self) -> Tuple[List[Dict], List[Dict]]:
|
|
814
|
+
"""
|
|
815
|
+
Load crash and ANR events from crash-dump.log file
|
|
816
|
+
|
|
817
|
+
Returns:
|
|
818
|
+
tuple: (crash_events, anr_events) - Lists of crash and ANR event dictionaries
|
|
819
|
+
"""
|
|
820
|
+
crash_events = []
|
|
821
|
+
anr_events = []
|
|
822
|
+
|
|
823
|
+
if not self.data_path.crash_dump_log.exists():
|
|
824
|
+
logger.info(f"No crash was found in this run.")
|
|
825
|
+
return crash_events, anr_events
|
|
826
|
+
|
|
827
|
+
try:
|
|
828
|
+
with open(self.data_path.crash_dump_log, "r", encoding="utf-8") as f:
|
|
829
|
+
content = f.read()
|
|
830
|
+
|
|
831
|
+
# Parse crash events with screenshot mapping
|
|
832
|
+
crash_events = self._parse_crash_events_with_screenshots(content)
|
|
833
|
+
|
|
834
|
+
# Parse ANR events with screenshot mapping
|
|
835
|
+
anr_events = self._parse_anr_events_with_screenshots(content)
|
|
836
|
+
|
|
837
|
+
logger.debug(f"Found {len(crash_events)} crash events and {len(anr_events)} ANR events")
|
|
838
|
+
|
|
839
|
+
return crash_events, anr_events
|
|
840
|
+
|
|
841
|
+
except Exception as e:
|
|
842
|
+
logger.error(f"Error reading crash dump file: {e}")
|
|
843
|
+
return crash_events, anr_events
|
|
844
|
+
|
|
845
|
+
def _find_screenshot_id_by_filename(self, screenshot_filename: str) -> str:
|
|
846
|
+
"""
|
|
847
|
+
Find screenshot ID by filename in the screenshots list
|
|
848
|
+
|
|
849
|
+
Args:
|
|
850
|
+
screenshot_filename: Name of the screenshot file
|
|
851
|
+
|
|
852
|
+
Returns:
|
|
853
|
+
str: Screenshot ID if found, empty string otherwise
|
|
854
|
+
"""
|
|
855
|
+
if not screenshot_filename:
|
|
856
|
+
return ""
|
|
857
|
+
|
|
858
|
+
for screenshot in self.screenshots:
|
|
859
|
+
# Extract filename from path
|
|
860
|
+
screenshot_path = screenshot.get('path', '')
|
|
861
|
+
if screenshot_path.endswith(screenshot_filename):
|
|
862
|
+
return str(screenshot.get('id', ''))
|
|
863
|
+
|
|
864
|
+
return ""
|
|
865
|
+
|
|
866
|
+
def _add_screenshot_ids_to_events(self, events: List[Dict]):
|
|
867
|
+
"""
|
|
868
|
+
Add screenshot ID information to crash/ANR events
|
|
869
|
+
|
|
870
|
+
Args:
|
|
871
|
+
events: List of crash or ANR event dictionaries
|
|
872
|
+
"""
|
|
873
|
+
for event in events:
|
|
874
|
+
crash_screen = event.get('crash_screen')
|
|
875
|
+
if crash_screen:
|
|
876
|
+
screenshot_id = self._find_screenshot_id_by_filename(crash_screen)
|
|
877
|
+
event['screenshot_id'] = screenshot_id
|
|
878
|
+
else:
|
|
879
|
+
event['screenshot_id'] = ""
|