Kea2-python 0.1.0b0__py3-none-any.whl → 0.1.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of Kea2-python might be problematic. Click here for more details.

kea2/assets/monkeyq.jar CHANGED
Binary file
@@ -0,0 +1,477 @@
1
+ import os
2
+ import json
3
+ import datetime
4
+ import re
5
+ from pathlib import Path
6
+ import shutil
7
+ from jinja2 import Environment, FileSystemLoader, select_autoescape, PackageLoader
8
+ from .utils import getLogger
9
+
10
+ logger = getLogger(__name__)
11
+
12
+
13
+ class BugReportGenerator:
14
+ """
15
+ Generate HTML format bug reports
16
+ """
17
+
18
+ def __init__(self, result_dir):
19
+ """
20
+ Initialize the bug report generator
21
+
22
+ Args:
23
+ result_dir: Directory path containing test results
24
+ """
25
+ self.result_dir = Path(result_dir)
26
+ self.log_timestamp = self.result_dir.name.split("_", 1)[1]
27
+ self.screenshots_dir = self.result_dir / f"output_{self.log_timestamp}" / "screenshots"
28
+ self.take_screenshots = self._detect_screenshots_setting()
29
+
30
+ # Set up Jinja2 environment
31
+ # First try to load templates from the package
32
+ try:
33
+ self.jinja_env = Environment(
34
+ loader=PackageLoader("kea2", "templates"),
35
+ autoescape=select_autoescape(['html', 'xml'])
36
+ )
37
+ except (ImportError, ValueError):
38
+ # If unable to load from package, load from current directory's templates folder
39
+ current_dir = Path(__file__).parent
40
+ templates_dir = current_dir / "templates"
41
+
42
+ # Ensure template directory exists
43
+ if not templates_dir.exists():
44
+ templates_dir.mkdir(parents=True, exist_ok=True)
45
+
46
+ self.jinja_env = Environment(
47
+ loader=FileSystemLoader(templates_dir),
48
+ autoescape=select_autoescape(['html', 'xml'])
49
+ )
50
+
51
+ # If template file doesn't exist, it will be created on first report generation
52
+
53
+ def generate_report(self):
54
+ """
55
+ Generate bug report and save to result directory
56
+ """
57
+ try:
58
+ logger.debug("Starting bug report generation")
59
+
60
+ # Collect test data
61
+ test_data = self._collect_test_data()
62
+
63
+ # Generate HTML report
64
+ html_content = self._generate_html_report(test_data)
65
+
66
+ # Save report
67
+ report_path = self.result_dir / "bug_report.html"
68
+ with open(report_path, "w", encoding="utf-8") as f:
69
+ f.write(html_content)
70
+
71
+ logger.debug(f"Bug report generated: {report_path}")
72
+
73
+ except Exception as e:
74
+ logger.error(f"Error generating bug report: {e}")
75
+
76
+ def _collect_test_data(self):
77
+ """
78
+ Collect test data, including results, coverage, etc.
79
+ """
80
+ data = {
81
+ "timestamp": self.log_timestamp,
82
+ "bugs_found": 0,
83
+ "preconditions_satisfied": 0,
84
+ "executed_events": 0,
85
+ "total_testing_time": 0,
86
+ "first_bug_time": 0,
87
+ "first_precondition_time": 0,
88
+ "coverage": 0,
89
+ "total_activities": [],
90
+ "tested_activities": [],
91
+ "property_violations": [],
92
+ "property_stats": [],
93
+ "screenshots_count": 0,
94
+ "screenshot_info": {}, # Store detailed information for each screenshot
95
+ "coverage_trend": [] # Store coverage trend data
96
+ }
97
+
98
+ # Get screenshot count
99
+ if self.screenshots_dir.exists():
100
+ screenshots = sorted(self.screenshots_dir.glob("screenshot-*.png"),
101
+ key=lambda x: int(x.name.split("-")[1].split(".")[0]))
102
+ data["screenshots_count"] = len(screenshots)
103
+
104
+ # Parse steps.log file to get test step numbers and screenshot mappings
105
+ steps_log_path = self.result_dir / f"output_{self.log_timestamp}" / "steps.log"
106
+ property_violations = {} # Store multiple violation records for each property: {property_name: [{start, end, screenshot}, ...]}
107
+ start_screenshot = None # Screenshot name at the start of testing
108
+ fail_screenshot = None # Screenshot name at test failure
109
+
110
+ # For storing time data
111
+ first_precond_time = None # Time of the first ScriptInfo entry with state=start
112
+ first_fail_time = None # Time of the first ScriptInfo entry with state=fail
113
+
114
+ if steps_log_path.exists():
115
+ with open(steps_log_path, "r", encoding="utf-8") as f:
116
+ # First read all steps
117
+ steps = []
118
+
119
+ for line in f:
120
+ try:
121
+ step_data = json.loads(line)
122
+ steps.append(step_data)
123
+
124
+ # Extract time from ScriptInfo entries
125
+ if step_data.get("Type") == "ScriptInfo":
126
+ try:
127
+ info = json.loads(step_data.get("Info", "{}")) if isinstance(step_data.get("Info"), str) else step_data.get("Info", {})
128
+ state = info.get("state", "")
129
+
130
+ # Record the first ScriptInfo with state=start as precondition time
131
+ if state == "start" and first_precond_time is None:
132
+ first_precond_time = step_data.get("Time")
133
+
134
+ # Record the first ScriptInfo with state=fail as fail time
135
+ elif state == "fail" and first_fail_time is None:
136
+ first_fail_time = step_data.get("Time")
137
+ except Exception as e:
138
+ logger.error(f"Error parsing ScriptInfo: {e}")
139
+ except:
140
+ pass
141
+
142
+ # Calculate number of Monkey events
143
+ monkey_events_count = sum(1 for step in steps if step.get("Type") == "Monkey")
144
+ data["executed_events"] = monkey_events_count
145
+
146
+ # Track current test state
147
+ current_property = None
148
+ current_test = {}
149
+
150
+ # Collect detailed information for each screenshot
151
+ for step in steps:
152
+ step_type = step.get("Type", "")
153
+ screenshot = step.get("Screenshot", "")
154
+ info = step.get("Info", "{}")
155
+
156
+ if screenshot and screenshot not in data["screenshot_info"]:
157
+ try:
158
+ info_obj = json.loads(info) if isinstance(info, str) else info
159
+ caption = ""
160
+
161
+ if step_type == "Monkey":
162
+ # Extract 'act' attribute for Monkey type and convert to lowercase
163
+ caption = f"{info_obj.get('act', 'N/A').lower()}"
164
+ elif step_type == "Script":
165
+ # Extract 'method' attribute for Script type
166
+ caption = f"{info_obj.get('method', 'N/A')}"
167
+ elif step_type == "ScriptInfo":
168
+ # Extract 'propName' and 'state' attributes for ScriptInfo type
169
+ prop_name = info_obj.get('propName', '')
170
+ state = info_obj.get('state', 'N/A')
171
+ caption = f"{prop_name} {state}" if prop_name else f"{state}"
172
+
173
+ data["screenshot_info"][screenshot] = {
174
+ "type": step_type,
175
+ "caption": caption
176
+ }
177
+ except Exception as e:
178
+ logger.error(f"Error parsing screenshot info: {e}")
179
+ data["screenshot_info"][screenshot] = {
180
+ "type": step_type,
181
+ "caption": step_type
182
+ }
183
+
184
+ # Find start and end step numbers and corresponding screenshots for all tests
185
+ for i, step in enumerate(steps, 1): # Start counting from 1 to match screenshot numbering
186
+ if step.get("Type") == "ScriptInfo":
187
+ try:
188
+ info = json.loads(step.get("Info", "{}"))
189
+ property_name = info.get("propName", "")
190
+ state = info.get("state", "")
191
+ screenshot = step.get("Screenshot", "")
192
+
193
+ if property_name and state:
194
+ if state == "start":
195
+ # Record new test start
196
+ current_property = property_name
197
+ current_test = {
198
+ "start": i,
199
+ "end": None,
200
+ "screenshot_start": screenshot
201
+ }
202
+ # Record screenshot at test start
203
+ if not start_screenshot and screenshot:
204
+ start_screenshot = screenshot
205
+
206
+ elif state == "fail" or state == "pass":
207
+ if current_property == property_name:
208
+ # Update test end information
209
+ current_test["end"] = i
210
+ current_test["screenshot_end"] = screenshot
211
+
212
+ if state == "fail":
213
+ # Record failed test
214
+ if property_name not in property_violations:
215
+ property_violations[property_name] = []
216
+
217
+ property_violations[property_name].append({
218
+ "start": current_test["start"],
219
+ "end": current_test["end"],
220
+ "screenshot_start": current_test["screenshot_start"],
221
+ "screenshot_end": screenshot
222
+ })
223
+
224
+ # Record screenshot at test failure
225
+ if not fail_screenshot and screenshot:
226
+ fail_screenshot = screenshot
227
+
228
+ # Reset current test
229
+ current_property = None
230
+ current_test = {}
231
+ except:
232
+ pass
233
+
234
+ # Calculate test time
235
+ start_time = None
236
+
237
+ # Parse fastbot log file to get start time
238
+ fastbot_log_path = list(self.result_dir.glob("fastbot_*.log"))
239
+ if fastbot_log_path:
240
+ try:
241
+ with open(fastbot_log_path[0], "r", encoding="utf-8") as f:
242
+ log_content = f.read()
243
+
244
+ # Extract test start time
245
+ start_match = re.search(r'\[Fastbot\]\[(\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}\.\d{3})\] @Version',
246
+ log_content)
247
+ if start_match:
248
+ start_time_str = start_match.group(1)
249
+ start_time = datetime.datetime.strptime(start_time_str, "%Y-%m-%d %H:%M:%S.%f")
250
+
251
+ # Extract test end time (last timestamp)
252
+ end_matches = re.findall(r'\[Fastbot\]\[(\d{4}-\d{2}-\d{2} \d{2}:\d{2}:\d{2}\.\d{3})\]',
253
+ log_content)
254
+ end_time = None
255
+ if end_matches:
256
+ end_time_str = end_matches[-1]
257
+ end_time = datetime.datetime.strptime(end_time_str, "%Y-%m-%d %H:%M:%S.%f")
258
+
259
+ # Calculate total test time (in seconds)
260
+ if start_time and end_time:
261
+ data["total_testing_time"] = int((end_time - start_time).total_seconds())
262
+ except Exception as e:
263
+ logger.error(f"Error parsing fastbot log file: {e}")
264
+ logger.error(f"Error details: {str(e)}")
265
+
266
+ # Calculate first_bug_time and first_precondition_time from steps.log data
267
+ if start_time:
268
+ # If first_precond_time exists, calculate first_precondition_time
269
+ if first_precond_time:
270
+ try:
271
+ precond_time = datetime.datetime.strptime(first_precond_time, "%Y-%m-%d %H:%M:%S.%f")
272
+ data["first_precondition_time"] = int((precond_time - start_time).total_seconds())
273
+ except Exception as e:
274
+ logger.error(f"Error parsing precond_time: {e}")
275
+
276
+ # If first_fail_time exists, calculate first_bug_time
277
+ if first_fail_time:
278
+ try:
279
+ fail_time = datetime.datetime.strptime(first_fail_time, "%Y-%m-%d %H:%M:%S.%f")
280
+ data["first_bug_time"] = int((fail_time - start_time).total_seconds())
281
+ except Exception as e:
282
+ logger.error(f"Error parsing fail_time: {e}")
283
+
284
+ # Parse result file
285
+ result_json_path = list(self.result_dir.glob("result_*.json"))
286
+ property_stats = {} # Store property names and corresponding statistics
287
+
288
+ if result_json_path:
289
+ with open(result_json_path[0], "r", encoding="utf-8") as f:
290
+ result_data = json.load(f)
291
+
292
+ # Calculate bug count and get property names
293
+ for property_name, test_result in result_data.items():
294
+ # Extract property name (last part of test name)
295
+
296
+ # Initialize property statistics
297
+ if property_name not in property_stats:
298
+ property_stats[property_name] = {
299
+ "precond_satisfied": 0,
300
+ "precond_checked": 0,
301
+ "postcond_violated": 0,
302
+ "error": 0
303
+ }
304
+
305
+ # Extract statistics directly from result_*.json file
306
+ property_stats[property_name]["precond_satisfied"] += test_result.get("precond_satisfied", 0)
307
+ property_stats[property_name]["precond_checked"] += test_result.get("executed", 0)
308
+ property_stats[property_name]["postcond_violated"] += test_result.get("fail", 0)
309
+ property_stats[property_name]["error"] += test_result.get("error", 0)
310
+
311
+ # Check if failed or error
312
+ if test_result.get("fail", 0) > 0 or test_result.get("error", 0) > 0:
313
+ data["bugs_found"] += 1
314
+
315
+ data["preconditions_satisfied"] += test_result.get("precond_satisfied", 0)
316
+ # data["executed_events"] += test_result.get("executed", 0)
317
+
318
+ # Parse coverage data
319
+ coverage_log_path = self.result_dir / f"output_{self.log_timestamp}" / "coverage.log"
320
+ if coverage_log_path.exists():
321
+ with open(coverage_log_path, "r", encoding="utf-8") as f:
322
+ lines = f.readlines()
323
+ if lines:
324
+ # Collect coverage trend data
325
+ for line in lines:
326
+ try:
327
+ coverage_data = json.loads(line)
328
+ data["coverage_trend"].append({
329
+ "steps": coverage_data.get("stepsCount", 0),
330
+ "coverage": coverage_data.get("coverage", 0),
331
+ "tested_activities_count": len(coverage_data.get("testedActivities", []))
332
+ })
333
+ except Exception as e:
334
+ logger.error(f"Error parsing coverage data: {e}")
335
+ continue
336
+
337
+ # Ensure sorted by steps
338
+ data["coverage_trend"].sort(key=lambda x: x["steps"])
339
+
340
+ try:
341
+ # Read last line to get final coverage data
342
+ coverage_data = json.loads(lines[-1])
343
+ data["coverage"] = coverage_data.get("coverage", 0)
344
+ data["total_activities"] = coverage_data.get("totalActivities", [])
345
+ data["tested_activities"] = coverage_data.get("testedActivities", [])
346
+ except Exception as e:
347
+ logger.error(f"Error parsing final coverage data: {e}")
348
+
349
+ # Generate Property Violations list
350
+ if property_violations:
351
+ index = 1
352
+ for property_name, violations in property_violations.items():
353
+ for violation in violations:
354
+ start_step = violation["start"]
355
+ end_step = violation["end"]
356
+ data["property_violations"].append({
357
+ "index": index,
358
+ "property_name": property_name,
359
+ "precondition_page": start_step,
360
+ "interaction_pages": [start_step, end_step],
361
+ "postcondition_page": end_step
362
+ })
363
+ index += 1
364
+
365
+ # Generate Property Stats list
366
+ if property_stats:
367
+ index = 1
368
+ for property_name, stats in property_stats.items():
369
+ data["property_stats"].append({
370
+ "index": index,
371
+ "property_name": property_name,
372
+ "precond_satisfied": stats["precond_satisfied"],
373
+ "precond_checked": stats["precond_checked"],
374
+ "postcond_violated": stats["postcond_violated"],
375
+ "error": stats["error"]
376
+ })
377
+ index += 1
378
+
379
+ return data
380
+
381
+ def _detect_screenshots_setting(self):
382
+ """
383
+ Detect if screenshots were enabled during test run.
384
+ Returns True if screenshots were taken, False otherwise.
385
+ """
386
+ # Method 1: Check if screenshots directory exists and has content
387
+ if self.screenshots_dir.exists() and any(self.screenshots_dir.glob("screenshot-*.png")):
388
+ return True
389
+
390
+ # Method 2: Try to read init config from logs
391
+ fastbot_log_path = list(self.result_dir.glob("fastbot_*.log"))
392
+ if fastbot_log_path:
393
+ try:
394
+ with open(fastbot_log_path[0], "r", encoding="utf-8") as f:
395
+ log_content = f.read()
396
+ if '"takeScreenshots": true' in log_content:
397
+ return True
398
+ except Exception:
399
+ pass
400
+
401
+ return False
402
+
403
+ def _generate_html_report(self, data):
404
+ """
405
+ Generate HTML format bug report
406
+ """
407
+ try:
408
+ # Prepare screenshot data
409
+ screenshots = []
410
+ relative_path = f"output_{self.log_timestamp}/screenshots"
411
+
412
+ if self.screenshots_dir.exists():
413
+ screenshot_files = sorted(self.screenshots_dir.glob("screenshot-*.png"),
414
+ key=lambda x: int(x.name.split("-")[1].split(".")[0]))
415
+
416
+ for i, screenshot in enumerate(screenshot_files, 1):
417
+ screenshot_name = screenshot.name
418
+
419
+ # Get information for this screenshot
420
+ caption = f"{i}"
421
+ if screenshot_name in data["screenshot_info"]:
422
+ info = data["screenshot_info"][screenshot_name]
423
+ caption = f"{i}. {info.get('caption', '')}"
424
+
425
+ screenshots.append({
426
+ 'id': i,
427
+ 'path': f"{relative_path}/{screenshot_name}",
428
+ 'caption': caption
429
+ })
430
+
431
+ # Format timestamp for display
432
+ timestamp = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
433
+
434
+ # Ensure coverage_trend has data
435
+ if not data["coverage_trend"]:
436
+ logger.warning("No coverage trend data")
437
+ data["coverage_trend"] = [{"steps": 0, "coverage": 0, "tested_activities_count": 0}]
438
+
439
+ # Convert coverage_trend to JSON string, ensuring all data points are included
440
+ coverage_trend_json = json.dumps(data["coverage_trend"])
441
+ logger.debug(f"Number of coverage trend data points: {len(data['coverage_trend'])}")
442
+
443
+ # Prepare template data
444
+ template_data = {
445
+ 'timestamp': timestamp,
446
+ 'bugs_found': data["bugs_found"],
447
+ 'total_testing_time': data["total_testing_time"],
448
+ 'executed_events': data["executed_events"],
449
+ 'coverage_percent': round(data["coverage"], 2),
450
+ 'first_bug_time': data["first_bug_time"],
451
+ 'first_precondition_time': data["first_precondition_time"],
452
+ 'total_activities_count': len(data["total_activities"]),
453
+ 'tested_activities_count': len(data["tested_activities"]),
454
+ 'tested_activities': data["tested_activities"], # Pass list of tested Activities
455
+ 'total_activities': data["total_activities"], # Pass list of all Activities
456
+ 'items_per_page': 10, # Items to display per page
457
+ 'screenshots': screenshots,
458
+ 'property_violations': data["property_violations"],
459
+ 'property_stats': data["property_stats"],
460
+ 'coverage_data': coverage_trend_json,
461
+ 'take_screenshots': self.take_screenshots # Pass screenshot setting to template
462
+ }
463
+
464
+ # Check if template exists, if not create it
465
+ template_path = Path(__file__).parent / "templates" / "bug_report_template.html"
466
+ if not template_path.exists():
467
+ logger.warning("Template file does not exist, creating default template...")
468
+
469
+ # Use Jinja2 to render template
470
+ template = self.jinja_env.get_template("bug_report_template.html")
471
+ html_content = template.render(**template_data)
472
+
473
+ return html_content
474
+
475
+ except Exception as e:
476
+ logger.error(f"Error rendering template: {e}")
477
+ raise
kea2/fastbotManager.py ADDED
@@ -0,0 +1,145 @@
1
+ from dataclasses import asdict
2
+ import subprocess
3
+ import threading
4
+ import requests
5
+ from time import sleep
6
+ from .adbUtils import push_file
7
+ from pathlib import Path
8
+ from .utils import getLogger
9
+
10
+ from typing import IO, TYPE_CHECKING
11
+ if TYPE_CHECKING:
12
+ from .keaUtils import Options
13
+
14
+
15
+ logger = getLogger(__name__)
16
+
17
+
18
+ class FastbotManager:
19
+ def __init__(self, options: "Options", log_file: str):
20
+ self.options:"Options" = options
21
+ self.log_file: str = log_file
22
+ self.port = None
23
+ self.thread = None
24
+
25
+
26
+ def _activateFastbot(self) -> threading.Thread:
27
+ """
28
+ activate fastbot.
29
+ :params: options: the running setting for fastbot
30
+ :params: port: the listening port for script driver
31
+ :return: the fastbot daemon thread
32
+ """
33
+ options = self.options
34
+ cur_dir = Path(__file__).parent
35
+ push_file(
36
+ Path.joinpath(cur_dir, "assets/monkeyq.jar"),
37
+ "/sdcard/monkeyq.jar",
38
+ device=options.serial
39
+ )
40
+ push_file(
41
+ Path.joinpath(cur_dir, "assets/fastbot-thirdpart.jar"),
42
+ "/sdcard/fastbot-thirdpart.jar",
43
+ device=options.serial,
44
+ )
45
+ push_file(
46
+ Path.joinpath(cur_dir, "assets/framework.jar"),
47
+ "/sdcard/framework.jar",
48
+ device=options.serial
49
+ )
50
+ push_file(
51
+ Path.joinpath(cur_dir, "assets/fastbot_libs/arm64-v8a"),
52
+ "/data/local/tmp",
53
+ device=options.serial
54
+ )
55
+ push_file(
56
+ Path.joinpath(cur_dir, "assets/fastbot_libs/armeabi-v7a"),
57
+ "/data/local/tmp",
58
+ device=options.serial
59
+ )
60
+ push_file(
61
+ Path.joinpath(cur_dir, "assets/fastbot_libs/x86"),
62
+ "/data/local/tmp",
63
+ device=options.serial
64
+ )
65
+ push_file(
66
+ Path.joinpath(cur_dir, "assets/fastbot_libs/x86_64"),
67
+ "/data/local/tmp",
68
+ device=options.serial
69
+ )
70
+
71
+ t = self._startFastbotService()
72
+ logger.info("Running Fastbot...")
73
+
74
+ return t
75
+
76
+
77
+ def check_alive(self, port):
78
+ """
79
+ check if the script driver and proxy server are alive.
80
+ """
81
+ for _ in range(10):
82
+ sleep(2)
83
+ try:
84
+ requests.get(f"http://localhost:{port}/ping")
85
+ return
86
+ except requests.ConnectionError:
87
+ logger.info("waiting for connection.")
88
+ pass
89
+ raise RuntimeError("Failed to connect fastbot")
90
+
91
+
92
+ def _startFastbotService(self) -> threading.Thread:
93
+ shell_command = [
94
+ "CLASSPATH=/sdcard/monkeyq.jar:/sdcard/framework.jar:/sdcard/fastbot-thirdpart.jar",
95
+ "exec", "app_process",
96
+ "/system/bin", "com.android.commands.monkey.Monkey",
97
+ "-p", *self.options.packageNames,
98
+ "--agent-u2" if self.options.agent == "u2" else "--agent",
99
+ "reuseq",
100
+ "--running-minutes", f"{self.options.running_mins}",
101
+ "--throttle", f"{self.options.throttle}",
102
+ "--bugreport",
103
+ ]
104
+
105
+ if self.options.profile_period:
106
+ shell_command += ["--profile-period", f"{self.options.profile_period}"]
107
+
108
+ shell_command += ["-v", "-v", "-v"]
109
+
110
+ full_cmd = ["adb"] + (["-s", self.options.serial] if self.options.serial else []) + ["shell"] + shell_command
111
+
112
+ outfile = open(self.log_file, "w", encoding="utf-8", buffering=1)
113
+
114
+ logger.info("Options info: {}".format(asdict(self.options)))
115
+ logger.info("Launching fastbot with shell command:\n{}".format(" ".join(full_cmd)))
116
+ logger.info("Fastbot log will be saved to {}".format(outfile.name))
117
+
118
+ # process handler
119
+ proc = subprocess.Popen(full_cmd, stdout=outfile, stderr=outfile)
120
+ t = threading.Thread(target=self.close_on_exit, args=(proc, outfile), daemon=True)
121
+ t.start()
122
+
123
+ return t
124
+
125
+ def close_on_exit(self, proc: subprocess.Popen, f: IO):
126
+ self.return_code = proc.wait()
127
+ f.close()
128
+ if self.return_code != 0:
129
+ raise RuntimeError(f"Fastbot Error: Terminated with [code {self.return_code}]")
130
+
131
+ def get_return_code(self):
132
+ if self.thread:
133
+ logger.info("Waiting for Fastbot to exit.")
134
+ self.thread.join()
135
+ return self.return_code
136
+
137
+ def start(self):
138
+ self.thread = self._activateFastbot()
139
+
140
+ def join(self):
141
+ if self.thread:
142
+ self.thread.join()
143
+
144
+
145
+