Kea2-python 0.1.0b0__py3-none-any.whl → 0.1.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of Kea2-python might be problematic. Click here for more details.

Binary file
kea2/assets/monkeyq.jar CHANGED
Binary file
kea2/assets/quicktest.py CHANGED
@@ -81,7 +81,9 @@ if __name__ == "__main__":
81
81
  Driver=U2Driver,
82
82
  packageNames=[PACKAGE_NAME],
83
83
  # serial="emulator-5554", # specify the serial
84
- maxStep=5000,
84
+ maxStep=50,
85
+ profile_period=10,
86
+ take_screenshots=True, # whether to take screenshots, default is False
85
87
  # running_mins=10, # specify the maximal running time in minutes, default value is 10m
86
88
  # throttle=200, # specify the throttle in milliseconds, default value is 200ms
87
89
  agent="u2" # 'native' for running the vanilla Fastbot, 'u2' for running Kea2
@@ -0,0 +1,479 @@
1
+ import os
2
+ import json
3
+ import datetime
4
+ import re
5
+ from pathlib import Path
6
+ import shutil
7
+ from jinja2 import Environment, FileSystemLoader, select_autoescape, PackageLoader
8
+ from .utils import getLogger
9
+
10
+ logger = getLogger(__name__)
11
+
12
+
13
+ class BugReportGenerator:
14
+ """
15
+ Generate HTML format bug reports
16
+ """
17
+
18
+ def __init__(self, result_dir):
19
+ """
20
+ Initialize the bug report generator
21
+
22
+ Args:
23
+ result_dir: Directory path containing test results
24
+ """
25
+ self.result_dir = Path(result_dir)
26
+ self.log_timestamp = self.result_dir.name.split("_", 1)[1]
27
+ self.screenshots_dir = self.result_dir / f"output_{self.log_timestamp}" / "screenshots"
28
+ self.take_screenshots = self._detect_screenshots_setting()
29
+
30
+ # Set up Jinja2 environment
31
+ # First try to load templates from the package
32
+ try:
33
+ self.jinja_env = Environment(
34
+ loader=PackageLoader("kea2", "templates"),
35
+ autoescape=select_autoescape(['html', 'xml'])
36
+ )
37
+ except (ImportError, ValueError):
38
+ # If unable to load from package, load from current directory's templates folder
39
+ current_dir = Path(__file__).parent
40
+ templates_dir = current_dir / "templates"
41
+
42
+ # Ensure template directory exists
43
+ if not templates_dir.exists():
44
+ templates_dir.mkdir(parents=True, exist_ok=True)
45
+
46
+ self.jinja_env = Environment(
47
+ loader=FileSystemLoader(templates_dir),
48
+ autoescape=select_autoescape(['html', 'xml'])
49
+ )
50
+
51
+ # If template file doesn't exist, it will be created on first report generation
52
+
53
+ def generate_report(self):
54
+ """
55
+ Generate bug report and save to result directory
56
+ """
57
+ try:
58
+ logger.debug("Starting bug report generation")
59
+
60
+ # Collect test data
61
+ test_data = self._collect_test_data()
62
+
63
+ # Generate HTML report
64
+ html_content = self._generate_html_report(test_data)
65
+
66
+ # Save report
67
+ report_path = self.result_dir / "bug_report.html"
68
+ with open(report_path, "w", encoding="utf-8") as f:
69
+ f.write(html_content)
70
+
71
+ logger.info(f"Bug report saved to: {report_path}")
72
+
73
+ except Exception as e:
74
+ logger.error(f"Error generating bug report: {e}")
75
+
76
+ def _collect_test_data(self):
77
+ """
78
+ Collect test data, including results, coverage, etc.
79
+ """
80
+ data = {
81
+ "timestamp": self.log_timestamp,
82
+ "bugs_found": 0,
83
+ "preconditions_satisfied": 0,
84
+ "executed_events": 0,
85
+ "total_testing_time": 0,
86
+ "first_bug_time": 0,
87
+ "first_precondition_time": 0,
88
+ "coverage": 0,
89
+ "total_activities": [],
90
+ "tested_activities": [],
91
+ "property_violations": [],
92
+ "property_stats": [],
93
+ "screenshots_count": 0,
94
+ "screenshot_info": {}, # Store detailed information for each screenshot
95
+ "coverage_trend": [] # Store coverage trend data
96
+ }
97
+
98
+ # Get screenshot count
99
+ if self.screenshots_dir.exists():
100
+ screenshots = sorted(self.screenshots_dir.glob("screenshot-*.png"),
101
+ key=lambda x: int(x.name.split("-")[1].split(".")[0]))
102
+ data["screenshots_count"] = len(screenshots)
103
+
104
+ # Parse steps.log file to get test step numbers and screenshot mappings
105
+ steps_log_path = self.result_dir / f"output_{self.log_timestamp}" / "steps.log"
106
+ property_violations = {} # Store multiple violation records for each property: {property_name: [{start, end, screenshot}, ...]}
107
+ start_screenshot = None # Screenshot name at the start of testing
108
+ fail_screenshot = None # Screenshot name at test failure
109
+
110
+ # For storing time data
111
+ first_precond_time = None # Time of the first ScriptInfo entry with state=start
112
+ first_fail_time = None # Time of the first ScriptInfo entry with state=fail
113
+
114
+ if steps_log_path.exists():
115
+ with open(steps_log_path, "r", encoding="utf-8") as f:
116
+ # First read all steps
117
+ steps = []
118
+
119
+ for line in f:
120
+ try:
121
+ step_data = json.loads(line)
122
+ steps.append(step_data)
123
+
124
+ # Extract time from ScriptInfo entries
125
+ if step_data.get("Type") == "ScriptInfo":
126
+ try:
127
+ info = json.loads(step_data.get("Info", "{}")) if isinstance(step_data.get("Info"), str) else step_data.get("Info", {})
128
+ state = info.get("state", "")
129
+
130
+ # Record the first ScriptInfo with state=start as precondition time
131
+ if state == "start" and first_precond_time is None:
132
+ first_precond_time = step_data.get("Time")
133
+
134
+ # Record the first ScriptInfo with state=fail as fail time
135
+ elif state == "fail" and first_fail_time is None:
136
+ first_fail_time = step_data.get("Time")
137
+ except Exception as e:
138
+ logger.error(f"Error parsing ScriptInfo: {e}")
139
+ except:
140
+ pass
141
+
142
+ # Calculate number of Monkey events
143
+ monkey_events_count = sum(1 for step in steps if step.get("Type") == "Monkey")
144
+ data["executed_events"] = monkey_events_count
145
+
146
+ # Track current test state
147
+ current_property = None
148
+ current_test = {}
149
+
150
+ # Collect detailed information for each screenshot
151
+ for step in steps:
152
+ step_type = step.get("Type", "")
153
+ screenshot = step.get("Screenshot", "")
154
+ info = step.get("Info", "{}")
155
+
156
+ if screenshot and screenshot not in data["screenshot_info"]:
157
+ try:
158
+ info_obj = json.loads(info) if isinstance(info, str) else info
159
+ caption = ""
160
+
161
+ if step_type == "Monkey":
162
+ # Extract 'act' attribute for Monkey type and convert to lowercase
163
+ caption = f"{info_obj.get('act', 'N/A').lower()}"
164
+ elif step_type == "Script":
165
+ # Extract 'method' attribute for Script type
166
+ caption = f"{info_obj.get('method', 'N/A')}"
167
+ elif step_type == "ScriptInfo":
168
+ # Extract 'propName' and 'state' attributes for ScriptInfo type
169
+ prop_name = info_obj.get('propName', '')
170
+ state = info_obj.get('state', 'N/A')
171
+ caption = f"{prop_name} {state}" if prop_name else f"{state}"
172
+
173
+ data["screenshot_info"][screenshot] = {
174
+ "type": step_type,
175
+ "caption": caption
176
+ }
177
+ except Exception as e:
178
+ logger.error(f"Error parsing screenshot info: {e}")
179
+ data["screenshot_info"][screenshot] = {
180
+ "type": step_type,
181
+ "caption": step_type
182
+ }
183
+
184
+ # Find start and end step numbers and corresponding screenshots for all tests
185
+ for i, step in enumerate(steps, 1): # Start counting from 1 to match screenshot numbering
186
+ if step.get("Type") == "ScriptInfo":
187
+ try:
188
+ info = json.loads(step.get("Info", "{}"))
189
+ property_name = info.get("propName", "")
190
+ state = info.get("state", "")
191
+ screenshot = step.get("Screenshot", "")
192
+
193
+ if property_name and state:
194
+ if state == "start":
195
+ # Record new test start
196
+ current_property = property_name
197
+ current_test = {
198
+ "start": i,
199
+ "end": None,
200
+ "screenshot_start": screenshot
201
+ }
202
+ # Record screenshot at test start
203
+ if not start_screenshot and screenshot:
204
+ start_screenshot = screenshot
205
+
206
+ elif state == "fail" or state == "pass":
207
+ if current_property == property_name:
208
+ # Update test end information
209
+ current_test["end"] = i
210
+ current_test["screenshot_end"] = screenshot
211
+
212
+ if state == "fail":
213
+ # Record failed test
214
+ if property_name not in property_violations:
215
+ property_violations[property_name] = []
216
+
217
+ property_violations[property_name].append({
218
+ "start": current_test["start"],
219
+ "end": current_test["end"],
220
+ "screenshot_start": current_test["screenshot_start"],
221
+ "screenshot_end": screenshot
222
+ })
223
+
224
+ # Record screenshot at test failure
225
+ if not fail_screenshot and screenshot:
226
+ fail_screenshot = screenshot
227
+
228
+ # Reset current test
229
+ current_property = None
230
+ current_test = {}
231
+ except:
232
+ pass
233
+
234
+ # Calculate test time
235
+ start_time = None
236
+
237
+ # Parse fastbot log file to get start time
238
+ fastbot_log_path = list(self.result_dir.glob("fastbot_*.log"))
239
+ if fastbot_log_path:
240
+ try:
241
+ with open(fastbot_log_path[0], "r", encoding="utf-8") as f:
242
+ log_content = f.read()
243
+
244
+ # Extract test start time
245
+ start_match = re.search(r'\[Fastbot\]\[(\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}\.\d{3})\] @Version',
246
+ log_content)
247
+ if start_match:
248
+ start_time_str = start_match.group(1)
249
+ start_time = datetime.datetime.strptime(start_time_str, "%Y-%m-%d %H:%M:%S.%f")
250
+
251
+ # Extract test end time (last timestamp)
252
+ end_matches = re.findall(r'\[Fastbot\]\[(\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}\.\d{3})\]',
253
+ log_content)
254
+ end_time = None
255
+ if end_matches:
256
+ end_time_str = end_matches[-1]
257
+ end_time = datetime.datetime.strptime(end_time_str, "%Y-%m-%d %H:%M:%S.%f")
258
+
259
+ # Calculate total test time (in seconds)
260
+ if start_time and end_time:
261
+ data["total_testing_time"] = int((end_time - start_time).total_seconds())
262
+ except Exception as e:
263
+ logger.error(f"Error parsing fastbot log file: {e}")
264
+ logger.error(f"Error details: {str(e)}")
265
+
266
+ # Calculate first_bug_time and first_precondition_time from steps.log data
267
+ if start_time:
268
+ # If first_precond_time exists, calculate first_precondition_time
269
+ if first_precond_time:
270
+ try:
271
+ precond_time = datetime.datetime.strptime(first_precond_time, "%Y-%m-%d %H:%M:%S.%f")
272
+ data["first_precondition_time"] = int((precond_time - start_time).total_seconds())
273
+ except Exception as e:
274
+ logger.error(f"Error parsing precond_time: {e}")
275
+
276
+ # If first_fail_time exists, calculate first_bug_time
277
+ if first_fail_time:
278
+ try:
279
+ fail_time = datetime.datetime.strptime(first_fail_time, "%Y-%m-%d %H:%M:%S.%f")
280
+ data["first_bug_time"] = int((fail_time - start_time).total_seconds())
281
+ except Exception as e:
282
+ logger.error(f"Error parsing fail_time: {e}")
283
+
284
+ # Parse result file
285
+ result_json_path = list(self.result_dir.glob("result_*.json"))
286
+ property_stats = {} # Store property names and corresponding statistics
287
+
288
+ if result_json_path:
289
+ with open(result_json_path[0], "r", encoding="utf-8") as f:
290
+ result_data = json.load(f)
291
+
292
+ # Calculate bug count and get property names
293
+ for property_name, test_result in result_data.items():
294
+ # Extract property name (last part of test name)
295
+
296
+ # Initialize property statistics
297
+ if property_name not in property_stats:
298
+ property_stats[property_name] = {
299
+ "precond_satisfied": 0,
300
+ "precond_checked": 0,
301
+ "postcond_violated": 0,
302
+ "error": 0
303
+ }
304
+
305
+ # Extract statistics directly from result_*.json file
306
+ property_stats[property_name]["precond_satisfied"] += test_result.get("precond_satisfied", 0)
307
+ property_stats[property_name]["precond_checked"] += test_result.get("executed", 0)
308
+ property_stats[property_name]["postcond_violated"] += test_result.get("fail", 0)
309
+ property_stats[property_name]["error"] += test_result.get("error", 0)
310
+
311
+ # Check if failed or error
312
+ if test_result.get("fail", 0) > 0 or test_result.get("error", 0) > 0:
313
+ data["bugs_found"] += 1
314
+
315
+ data["preconditions_satisfied"] += test_result.get("precond_satisfied", 0)
316
+ # data["executed_events"] += test_result.get("executed", 0)
317
+
318
+ # Parse coverage data
319
+ coverage_log_path = self.result_dir / f"output_{self.log_timestamp}" / "coverage.log"
320
+ if coverage_log_path.exists():
321
+ with open(coverage_log_path, "r", encoding="utf-8") as f:
322
+ lines = f.readlines()
323
+ if lines:
324
+ # Collect coverage trend data
325
+ for line in lines:
326
+ if not line.strip():
327
+ continue
328
+ try:
329
+ coverage_data = json.loads(line)
330
+ data["coverage_trend"].append({
331
+ "steps": coverage_data.get("stepsCount", 0),
332
+ "coverage": coverage_data.get("coverage", 0),
333
+ "tested_activities_count": len(coverage_data.get("testedActivities", []))
334
+ })
335
+ except Exception as e:
336
+ logger.error(f"Error parsing coverage data: {e}")
337
+ continue
338
+
339
+ # Ensure sorted by steps
340
+ data["coverage_trend"].sort(key=lambda x: x["steps"])
341
+
342
+ try:
343
+ # Read last line to get final coverage data
344
+ coverage_data = json.loads(lines[-1])
345
+ data["coverage"] = coverage_data.get("coverage", 0)
346
+ data["total_activities"] = coverage_data.get("totalActivities", [])
347
+ data["tested_activities"] = coverage_data.get("testedActivities", [])
348
+ except Exception as e:
349
+ logger.error(f"Error parsing final coverage data: {e}")
350
+
351
+ # Generate Property Violations list
352
+ if property_violations:
353
+ index = 1
354
+ for property_name, violations in property_violations.items():
355
+ for violation in violations:
356
+ start_step = violation["start"]
357
+ end_step = violation["end"]
358
+ data["property_violations"].append({
359
+ "index": index,
360
+ "property_name": property_name,
361
+ "precondition_page": start_step,
362
+ "interaction_pages": [start_step, end_step],
363
+ "postcondition_page": end_step
364
+ })
365
+ index += 1
366
+
367
+ # Generate Property Stats list
368
+ if property_stats:
369
+ index = 1
370
+ for property_name, stats in property_stats.items():
371
+ data["property_stats"].append({
372
+ "index": index,
373
+ "property_name": property_name,
374
+ "precond_satisfied": stats["precond_satisfied"],
375
+ "precond_checked": stats["precond_checked"],
376
+ "postcond_violated": stats["postcond_violated"],
377
+ "error": stats["error"]
378
+ })
379
+ index += 1
380
+
381
+ return data
382
+
383
+ def _detect_screenshots_setting(self):
384
+ """
385
+ Detect if screenshots were enabled during test run.
386
+ Returns True if screenshots were taken, False otherwise.
387
+ """
388
+ # Method 1: Check if screenshots directory exists and has content
389
+ if self.screenshots_dir.exists() and any(self.screenshots_dir.glob("screenshot-*.png")):
390
+ return True
391
+
392
+ # Method 2: Try to read init config from logs
393
+ fastbot_log_path = list(self.result_dir.glob("fastbot_*.log"))
394
+ if fastbot_log_path:
395
+ try:
396
+ with open(fastbot_log_path[0], "r", encoding="utf-8") as f:
397
+ log_content = f.read()
398
+ if '"takeScreenshots": true' in log_content:
399
+ return True
400
+ except Exception:
401
+ pass
402
+
403
+ return False
404
+
405
+ def _generate_html_report(self, data):
406
+ """
407
+ Generate HTML format bug report
408
+ """
409
+ try:
410
+ # Prepare screenshot data
411
+ screenshots = []
412
+ relative_path = f"output_{self.log_timestamp}/screenshots"
413
+
414
+ if self.screenshots_dir.exists():
415
+ screenshot_files = sorted(self.screenshots_dir.glob("screenshot-*.png"),
416
+ key=lambda x: int(x.name.split("-")[1].split(".")[0]))
417
+
418
+ for i, screenshot in enumerate(screenshot_files, 1):
419
+ screenshot_name = screenshot.name
420
+
421
+ # Get information for this screenshot
422
+ caption = f"{i}"
423
+ if screenshot_name in data["screenshot_info"]:
424
+ info = data["screenshot_info"][screenshot_name]
425
+ caption = f"{i}. {info.get('caption', '')}"
426
+
427
+ screenshots.append({
428
+ 'id': i,
429
+ 'path': f"{relative_path}/{screenshot_name}",
430
+ 'caption': caption
431
+ })
432
+
433
+ # Format timestamp for display
434
+ timestamp = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
435
+
436
+ # Ensure coverage_trend has data
437
+ if not data["coverage_trend"]:
438
+ logger.warning("No coverage trend data")
439
+ data["coverage_trend"] = [{"steps": 0, "coverage": 0, "tested_activities_count": 0}]
440
+
441
+ # Convert coverage_trend to JSON string, ensuring all data points are included
442
+ coverage_trend_json = json.dumps(data["coverage_trend"])
443
+ logger.debug(f"Number of coverage trend data points: {len(data['coverage_trend'])}")
444
+
445
+ # Prepare template data
446
+ template_data = {
447
+ 'timestamp': timestamp,
448
+ 'bugs_found': data["bugs_found"],
449
+ 'total_testing_time': data["total_testing_time"],
450
+ 'executed_events': data["executed_events"],
451
+ 'coverage_percent': round(data["coverage"], 2),
452
+ 'first_bug_time': data["first_bug_time"],
453
+ 'first_precondition_time': data["first_precondition_time"],
454
+ 'total_activities_count': len(data["total_activities"]),
455
+ 'tested_activities_count': len(data["tested_activities"]),
456
+ 'tested_activities': data["tested_activities"], # Pass list of tested Activities
457
+ 'total_activities': data["total_activities"], # Pass list of all Activities
458
+ 'items_per_page': 10, # Items to display per page
459
+ 'screenshots': screenshots,
460
+ 'property_violations': data["property_violations"],
461
+ 'property_stats': data["property_stats"],
462
+ 'coverage_data': coverage_trend_json,
463
+ 'take_screenshots': self.take_screenshots # Pass screenshot setting to template
464
+ }
465
+
466
+ # Check if template exists, if not create it
467
+ template_path = Path(__file__).parent / "templates" / "bug_report_template.html"
468
+ if not template_path.exists():
469
+ logger.warning("Template file does not exist, creating default template...")
470
+
471
+ # Use Jinja2 to render template
472
+ template = self.jinja_env.get_template("bug_report_template.html")
473
+ html_content = template.render(**template_data)
474
+
475
+ return html_content
476
+
477
+ except Exception as e:
478
+ logger.error(f"Error rendering template: {e}")
479
+ raise
kea2/fastbotManager.py ADDED
@@ -0,0 +1,155 @@
1
+ from dataclasses import asdict
2
+ import subprocess
3
+ import threading
4
+ import requests
5
+ from time import sleep
6
+ from .adbUtils import push_file
7
+ from pathlib import Path
8
+ from .utils import getLogger
9
+
10
+ from typing import IO, TYPE_CHECKING
11
+ if TYPE_CHECKING:
12
+ from .keaUtils import Options
13
+
14
+
15
+ logger = getLogger(__name__)
16
+
17
+
18
+ class FastbotManager:
19
+ def __init__(self, options: "Options", log_file: str):
20
+ self.options:"Options" = options
21
+ self.log_file: str = log_file
22
+ self.port = None
23
+ self.thread = None
24
+
25
+
26
+ def _activateFastbot(self) -> threading.Thread:
27
+ """
28
+ activate fastbot.
29
+ :params: options: the running setting for fastbot
30
+ :params: port: the listening port for script driver
31
+ :return: the fastbot daemon thread
32
+ """
33
+ options = self.options
34
+ cur_dir = Path(__file__).parent
35
+ push_file(
36
+ Path.joinpath(cur_dir, "assets/monkeyq.jar"),
37
+ "/sdcard/monkeyq.jar",
38
+ device=options.serial
39
+ )
40
+ push_file(
41
+ Path.joinpath(cur_dir, "assets/fastbot-thirdpart.jar"),
42
+ "/sdcard/fastbot-thirdpart.jar",
43
+ device=options.serial,
44
+ )
45
+ push_file(
46
+ Path.joinpath(cur_dir, "assets/kea2-thirdpart.jar"),
47
+ "/sdcard/kea2-thirdpart.jar",
48
+ device=options.serial,
49
+ )
50
+ push_file(
51
+ Path.joinpath(cur_dir, "assets/framework.jar"),
52
+ "/sdcard/framework.jar",
53
+ device=options.serial
54
+ )
55
+ push_file(
56
+ Path.joinpath(cur_dir, "assets/fastbot_libs/arm64-v8a"),
57
+ "/data/local/tmp",
58
+ device=options.serial
59
+ )
60
+ push_file(
61
+ Path.joinpath(cur_dir, "assets/fastbot_libs/armeabi-v7a"),
62
+ "/data/local/tmp",
63
+ device=options.serial
64
+ )
65
+ push_file(
66
+ Path.joinpath(cur_dir, "assets/fastbot_libs/x86"),
67
+ "/data/local/tmp",
68
+ device=options.serial
69
+ )
70
+ push_file(
71
+ Path.joinpath(cur_dir, "assets/fastbot_libs/x86_64"),
72
+ "/data/local/tmp",
73
+ device=options.serial
74
+ )
75
+
76
+ t = self._startFastbotService()
77
+ logger.info("Running Fastbot...")
78
+
79
+ return t
80
+
81
+
82
+ def check_alive(self, port):
83
+ """
84
+ check if the script driver and proxy server are alive.
85
+ """
86
+ for _ in range(10):
87
+ sleep(2)
88
+ try:
89
+ requests.get(f"http://localhost:{port}/ping")
90
+ return
91
+ except requests.ConnectionError:
92
+ logger.info("waiting for connection.")
93
+ pass
94
+ raise RuntimeError("Failed to connect fastbot")
95
+
96
+
97
+ def _startFastbotService(self) -> threading.Thread:
98
+ shell_command = [
99
+ "CLASSPATH="
100
+ "/sdcard/monkeyq.jar:"
101
+ "/sdcard/framework.jar:"
102
+ "/sdcard/fastbot-thirdpart.jar:"
103
+ "/sdcard/kea2-thirdpart.jar",
104
+
105
+ "exec", "app_process",
106
+ "/system/bin", "com.android.commands.monkey.Monkey",
107
+ "-p", *self.options.packageNames,
108
+ "--agent-u2" if self.options.agent == "u2" else "--agent",
109
+ "reuseq",
110
+ "--running-minutes", f"{self.options.running_mins}",
111
+ "--throttle", f"{self.options.throttle}",
112
+ "--bugreport",
113
+ ]
114
+
115
+ if self.options.profile_period:
116
+ shell_command += ["--profile-period", f"{self.options.profile_period}"]
117
+
118
+ shell_command += ["-v", "-v", "-v"]
119
+
120
+ full_cmd = ["adb"] + (["-s", self.options.serial] if self.options.serial else []) + ["shell"] + shell_command
121
+
122
+ outfile = open(self.log_file, "w", encoding="utf-8", buffering=1)
123
+
124
+ logger.info("Options info: {}".format(asdict(self.options)))
125
+ logger.info("Launching fastbot with shell command:\n{}".format(" ".join(full_cmd)))
126
+ logger.info("Fastbot log will be saved to {}".format(outfile.name))
127
+
128
+ # process handler
129
+ proc = subprocess.Popen(full_cmd, stdout=outfile, stderr=outfile)
130
+ t = threading.Thread(target=self.close_on_exit, args=(proc, outfile), daemon=True)
131
+ t.start()
132
+
133
+ return t
134
+
135
+ def close_on_exit(self, proc: subprocess.Popen, f: IO):
136
+ self.return_code = proc.wait()
137
+ f.close()
138
+ if self.return_code != 0:
139
+ raise RuntimeError(f"Fastbot Error: Terminated with [code {self.return_code}] See {self.log_file} for details.")
140
+
141
+ def get_return_code(self):
142
+ if self.thread:
143
+ logger.info("Waiting for Fastbot to exit.")
144
+ self.thread.join()
145
+ return self.return_code
146
+
147
+ def start(self):
148
+ self.thread = self._activateFastbot()
149
+
150
+ def join(self):
151
+ if self.thread:
152
+ self.thread.join()
153
+
154
+
155
+