robotframework-pabot 5.1.0__py3-none-any.whl → 5.2.0rc1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- pabot/ProcessManager.py +415 -0
- pabot/__init__.py +1 -1
- pabot/arguments.py +83 -26
- pabot/pabot.py +603 -218
- pabot/result_merger.py +13 -3
- pabot/robotremoteserver.py +27 -7
- pabot/writer.py +235 -0
- {robotframework_pabot-5.1.0.dist-info → robotframework_pabot-5.2.0rc1.dist-info}/METADATA +70 -21
- robotframework_pabot-5.2.0rc1.dist-info/RECORD +23 -0
- {robotframework_pabot-5.1.0.dist-info → robotframework_pabot-5.2.0rc1.dist-info}/WHEEL +1 -1
- robotframework_pabot-5.1.0.dist-info/RECORD +0 -22
- robotframework_pabot-5.1.0.dist-info/licenses/LICENSE.txt +0 -202
- {robotframework_pabot-5.1.0.dist-info → robotframework_pabot-5.2.0rc1.dist-info}/entry_points.txt +0 -0
- {robotframework_pabot-5.1.0.dist-info → robotframework_pabot-5.2.0rc1.dist-info}/top_level.txt +0 -0
pabot/pabot.py
CHANGED
|
@@ -48,6 +48,7 @@ from glob import glob
|
|
|
48
48
|
from io import BytesIO, StringIO
|
|
49
49
|
from multiprocessing.pool import ThreadPool
|
|
50
50
|
from natsort import natsorted
|
|
51
|
+
from pathlib import Path
|
|
51
52
|
|
|
52
53
|
from robot import __version__ as ROBOT_VERSION
|
|
53
54
|
from robot import rebot
|
|
@@ -83,6 +84,7 @@ from .execution_items import (
|
|
|
83
84
|
create_dependency_tree,
|
|
84
85
|
)
|
|
85
86
|
from .result_merger import merge
|
|
87
|
+
from .writer import get_writer, get_stdout_writer, get_stderr_writer, ThreadSafeWriter, MessageWriter
|
|
86
88
|
|
|
87
89
|
try:
|
|
88
90
|
import queue # type: ignore
|
|
@@ -100,21 +102,27 @@ try:
|
|
|
100
102
|
except ImportError:
|
|
101
103
|
METADATA_AVAILABLE = False
|
|
102
104
|
|
|
103
|
-
from typing import
|
|
105
|
+
from typing import Any, Dict, List, Optional, Tuple, Union
|
|
104
106
|
|
|
105
107
|
CTRL_C_PRESSED = False
|
|
106
|
-
MESSAGE_QUEUE = queue.Queue()
|
|
107
|
-
EXECUTION_POOL_IDS = [] # type: List[int]
|
|
108
|
-
EXECUTION_POOL_ID_LOCK = threading.Lock()
|
|
109
|
-
POPEN_LOCK = threading.Lock()
|
|
110
108
|
_PABOTLIBURI = "127.0.0.1:8270"
|
|
111
109
|
_PABOTLIBPROCESS = None # type: Optional[subprocess.Popen]
|
|
110
|
+
_PABOTWRITER = None # type: Optional[MessageWriter]
|
|
112
111
|
_NUMBER_OF_ITEMS_TO_BE_EXECUTED = 0
|
|
113
112
|
_ABNORMAL_EXIT_HAPPENED = False
|
|
113
|
+
_PABOTCONSOLE = "verbose" # type: str
|
|
114
114
|
|
|
115
115
|
_COMPLETED_LOCK = threading.Lock()
|
|
116
116
|
_NOT_COMPLETED_INDEXES = [] # type: List[int]
|
|
117
117
|
|
|
118
|
+
# Thread-local storage for tracking executor number assigned to each thread
|
|
119
|
+
_EXECUTOR_THREAD_LOCAL = threading.local()
|
|
120
|
+
# Next executor number to assign (incremented each time a task is submitted)
|
|
121
|
+
_EXECUTOR_COUNTER = 0
|
|
122
|
+
_EXECUTOR_COUNTER_LOCK = threading.Lock()
|
|
123
|
+
# Maximum number of executors (workers in the thread pool)
|
|
124
|
+
_MAX_EXECUTORS = 1
|
|
125
|
+
|
|
118
126
|
_ROBOT_EXTENSIONS = [
|
|
119
127
|
".html",
|
|
120
128
|
".htm",
|
|
@@ -130,6 +138,15 @@ _ALL_ELAPSED = [] # type: List[Union[int, float]]
|
|
|
130
138
|
# Python version check for supporting importlib.metadata (requires Python 3.8+)
|
|
131
139
|
IS_PYTHON_3_8_OR_NEWER = sys.version_info >= (3, 8)
|
|
132
140
|
|
|
141
|
+
_PROCESS_MANAGER = None
|
|
142
|
+
|
|
143
|
+
def _ensure_process_manager():
|
|
144
|
+
global _PROCESS_MANAGER
|
|
145
|
+
if _PROCESS_MANAGER is None:
|
|
146
|
+
from pabot.ProcessManager import ProcessManager
|
|
147
|
+
_PROCESS_MANAGER = ProcessManager()
|
|
148
|
+
return _PROCESS_MANAGER
|
|
149
|
+
|
|
133
150
|
|
|
134
151
|
def read_args_from_readme():
|
|
135
152
|
"""Reads a specific section from package METADATA or development README.md if available."""
|
|
@@ -214,8 +231,34 @@ class Color:
|
|
|
214
231
|
YELLOW = "\033[93m"
|
|
215
232
|
|
|
216
233
|
|
|
234
|
+
def _get_next_executor_num():
|
|
235
|
+
"""Get the next executor number in round-robin fashion."""
|
|
236
|
+
global _EXECUTOR_COUNTER, _MAX_EXECUTORS
|
|
237
|
+
with _EXECUTOR_COUNTER_LOCK:
|
|
238
|
+
executor_num = _EXECUTOR_COUNTER % _MAX_EXECUTORS
|
|
239
|
+
_EXECUTOR_COUNTER += 1
|
|
240
|
+
return executor_num
|
|
241
|
+
|
|
242
|
+
|
|
243
|
+
def _set_executor_num(executor_num):
|
|
244
|
+
"""Set the executor number for the current thread."""
|
|
245
|
+
_EXECUTOR_THREAD_LOCAL.executor_num = executor_num
|
|
246
|
+
|
|
247
|
+
|
|
248
|
+
def _get_executor_num():
|
|
249
|
+
"""Get the executor number for the current thread."""
|
|
250
|
+
return getattr(_EXECUTOR_THREAD_LOCAL, 'executor_num', 0)
|
|
251
|
+
|
|
252
|
+
|
|
253
|
+
def _execute_item_with_executor_tracking(item):
|
|
254
|
+
"""Wrapper to track executor number and call execute_and_wait_with."""
|
|
255
|
+
executor_num = _get_next_executor_num()
|
|
256
|
+
_set_executor_num(executor_num)
|
|
257
|
+
return execute_and_wait_with(item)
|
|
258
|
+
|
|
259
|
+
|
|
217
260
|
def execute_and_wait_with(item):
|
|
218
|
-
# type: ('QueueItem') ->
|
|
261
|
+
# type: ('QueueItem') -> int
|
|
219
262
|
global CTRL_C_PRESSED, _NUMBER_OF_ITEMS_TO_BE_EXECUTED
|
|
220
263
|
is_last = _NUMBER_OF_ITEMS_TO_BE_EXECUTED == 1
|
|
221
264
|
_NUMBER_OF_ITEMS_TO_BE_EXECUTED -= 1
|
|
@@ -234,6 +277,7 @@ def execute_and_wait_with(item):
|
|
|
234
277
|
run_cmd, run_options = _create_command_for_execution(
|
|
235
278
|
caller_id, datasources, is_last, item, outs_dir
|
|
236
279
|
)
|
|
280
|
+
rc = 0
|
|
237
281
|
if item.hive:
|
|
238
282
|
_hived_execute(
|
|
239
283
|
item.hive,
|
|
@@ -241,18 +285,18 @@ def execute_and_wait_with(item):
|
|
|
241
285
|
outs_dir,
|
|
242
286
|
name,
|
|
243
287
|
item.verbose,
|
|
244
|
-
|
|
288
|
+
_get_executor_num(),
|
|
245
289
|
caller_id,
|
|
246
290
|
item.index,
|
|
247
291
|
)
|
|
248
292
|
else:
|
|
249
|
-
_try_execute_and_wait(
|
|
293
|
+
rc = _try_execute_and_wait(
|
|
250
294
|
run_cmd,
|
|
251
295
|
run_options,
|
|
252
296
|
outs_dir,
|
|
253
297
|
name,
|
|
254
298
|
item.verbose,
|
|
255
|
-
|
|
299
|
+
_get_executor_num(),
|
|
256
300
|
caller_id,
|
|
257
301
|
item.index,
|
|
258
302
|
item.execution_item.type != "test",
|
|
@@ -260,10 +304,11 @@ def execute_and_wait_with(item):
|
|
|
260
304
|
sleep_before_start=item.sleep_before_start
|
|
261
305
|
)
|
|
262
306
|
outputxml_preprocessing(
|
|
263
|
-
item.options, outs_dir, name, item.verbose,
|
|
307
|
+
item.options, outs_dir, name, item.verbose, _get_executor_num(), caller_id, item.index
|
|
264
308
|
)
|
|
265
309
|
except:
|
|
266
|
-
_write(traceback.format_exc())
|
|
310
|
+
_write(traceback.format_exc(), level="error")
|
|
311
|
+
return rc
|
|
267
312
|
|
|
268
313
|
|
|
269
314
|
def _create_command_for_execution(caller_id, datasources, is_last, item, outs_dir):
|
|
@@ -281,6 +326,7 @@ def _create_command_for_execution(caller_id, datasources, is_last, item, outs_di
|
|
|
281
326
|
item.index,
|
|
282
327
|
item.last_level,
|
|
283
328
|
item.processes,
|
|
329
|
+
item.skip,
|
|
284
330
|
)
|
|
285
331
|
+ datasources
|
|
286
332
|
)
|
|
@@ -299,7 +345,7 @@ def _hived_execute(
|
|
|
299
345
|
try:
|
|
300
346
|
make_order(hive, " ".join(cmd), outs_dir)
|
|
301
347
|
except:
|
|
302
|
-
_write(traceback.format_exc())
|
|
348
|
+
_write(traceback.format_exc(), level="error")
|
|
303
349
|
if plib:
|
|
304
350
|
_increase_completed(plib, my_index)
|
|
305
351
|
|
|
@@ -317,7 +363,7 @@ def _try_execute_and_wait(
|
|
|
317
363
|
process_timeout=None,
|
|
318
364
|
sleep_before_start=0
|
|
319
365
|
):
|
|
320
|
-
# type: (List[str], List[str], str, str, bool, int, str, int, bool, Optional[int], int) ->
|
|
366
|
+
# type: (List[str], List[str], str, str, bool, int, str, int, bool, Optional[int], int) -> int
|
|
321
367
|
plib = None
|
|
322
368
|
is_ignored = False
|
|
323
369
|
if _pabotlib_in_use():
|
|
@@ -339,7 +385,7 @@ def _try_execute_and_wait(
|
|
|
339
385
|
sleep_before_start
|
|
340
386
|
)
|
|
341
387
|
except:
|
|
342
|
-
_write(traceback.format_exc())
|
|
388
|
+
_write(traceback.format_exc(), level="error")
|
|
343
389
|
if plib:
|
|
344
390
|
_increase_completed(plib, my_index)
|
|
345
391
|
is_ignored = _is_ignored(plib, caller_id)
|
|
@@ -360,6 +406,7 @@ def _try_execute_and_wait(
|
|
|
360
406
|
)
|
|
361
407
|
if is_ignored and os.path.isdir(outs_dir):
|
|
362
408
|
_rmtree_with_path(outs_dir)
|
|
409
|
+
return rc
|
|
363
410
|
|
|
364
411
|
|
|
365
412
|
def _result_to_stdout(
|
|
@@ -381,6 +428,7 @@ def _result_to_stdout(
|
|
|
381
428
|
pool_id,
|
|
382
429
|
my_index,
|
|
383
430
|
_execution_ignored_message(item_name, stdout, stderr, elapsed, verbose),
|
|
431
|
+
level="info_ignored",
|
|
384
432
|
)
|
|
385
433
|
elif rc != 0:
|
|
386
434
|
_write_with_id(
|
|
@@ -391,6 +439,7 @@ def _result_to_stdout(
|
|
|
391
439
|
item_name, stdout, stderr, rc, verbose or show_stdout_on_failure
|
|
392
440
|
),
|
|
393
441
|
Color.RED,
|
|
442
|
+
level="info_failed",
|
|
394
443
|
)
|
|
395
444
|
else:
|
|
396
445
|
_write_with_id(
|
|
@@ -399,6 +448,7 @@ def _result_to_stdout(
|
|
|
399
448
|
my_index,
|
|
400
449
|
_execution_passed_message(item_name, stdout, stderr, elapsed, verbose),
|
|
401
450
|
Color.GREEN,
|
|
451
|
+
level="info_passed",
|
|
402
452
|
)
|
|
403
453
|
|
|
404
454
|
|
|
@@ -474,25 +524,16 @@ def outputxml_preprocessing(options, outs_dir, item_name, verbose, pool_id, call
|
|
|
474
524
|
print(sys.exc_info())
|
|
475
525
|
|
|
476
526
|
|
|
477
|
-
def _write_with_id(process, pool_id, item_index, message, color=None, timestamp=None):
|
|
527
|
+
def _write_with_id(process, pool_id, item_index, message, color=None, timestamp=None, level="debug"):
|
|
478
528
|
timestamp = timestamp or datetime.datetime.now()
|
|
479
529
|
_write(
|
|
480
530
|
"%s [PID:%s] [%s] [ID:%s] %s"
|
|
481
531
|
% (timestamp, process.pid, pool_id, item_index, message),
|
|
482
532
|
color,
|
|
533
|
+
level=level,
|
|
483
534
|
)
|
|
484
535
|
|
|
485
536
|
|
|
486
|
-
def _make_id(): # type: () -> int
|
|
487
|
-
global EXECUTION_POOL_IDS, EXECUTION_POOL_ID_LOCK
|
|
488
|
-
thread_id = threading.current_thread().ident
|
|
489
|
-
assert thread_id is not None
|
|
490
|
-
with EXECUTION_POOL_ID_LOCK:
|
|
491
|
-
if thread_id not in EXECUTION_POOL_IDS:
|
|
492
|
-
EXECUTION_POOL_IDS += [thread_id]
|
|
493
|
-
return EXECUTION_POOL_IDS.index(thread_id)
|
|
494
|
-
|
|
495
|
-
|
|
496
537
|
def _increase_completed(plib, my_index):
|
|
497
538
|
# type: (Remote, int) -> None
|
|
498
539
|
global _COMPLETED_LOCK, _NOT_COMPLETED_INDEXES
|
|
@@ -556,88 +597,39 @@ def _run(
|
|
|
556
597
|
process_timeout,
|
|
557
598
|
sleep_before_start,
|
|
558
599
|
):
|
|
559
|
-
# type: (List[str], List[str], IO[Any], IO[Any], str, bool, int, int, str, Optional[int], int) -> Tuple[Union[subprocess.Popen[bytes], subprocess.Popen], Tuple[int, float]]
|
|
560
600
|
timestamp = datetime.datetime.now()
|
|
601
|
+
|
|
561
602
|
if sleep_before_start > 0:
|
|
562
|
-
_write(
|
|
563
|
-
"%s [%s] [ID:%s] SLEEPING %s SECONDS BEFORE STARTING %s"
|
|
564
|
-
% (timestamp, pool_id, item_index, sleep_before_start, item_name),
|
|
565
|
-
)
|
|
603
|
+
_write(f"{timestamp} [{pool_id}] [ID:{item_index}] SLEEPING {sleep_before_start} SECONDS BEFORE STARTING {item_name}")
|
|
566
604
|
time.sleep(sleep_before_start)
|
|
567
|
-
|
|
605
|
+
|
|
568
606
|
command_name = run_command[-1].replace(" ", "_")
|
|
569
607
|
argfile_path = os.path.join(outs_dir, f"{command_name}_argfile.txt")
|
|
570
608
|
_write_internal_argument_file(run_options, filename=argfile_path)
|
|
571
|
-
cmd = ' '.join(run_command + ['-A'] + [argfile_path])
|
|
572
|
-
if PY2:
|
|
573
|
-
cmd = cmd.decode("utf-8").encode(SYSTEM_ENCODING)
|
|
574
|
-
# avoid hitting https://bugs.python.org/issue10394
|
|
575
|
-
with POPEN_LOCK:
|
|
576
|
-
my_env = os.environ.copy()
|
|
577
|
-
syslog_file = my_env.get("ROBOT_SYSLOG_FILE", None)
|
|
578
|
-
if syslog_file:
|
|
579
|
-
my_env["ROBOT_SYSLOG_FILE"] = os.path.join(
|
|
580
|
-
outs_dir, os.path.basename(syslog_file)
|
|
581
|
-
)
|
|
582
|
-
process = subprocess.Popen(
|
|
583
|
-
cmd, shell=True, stderr=stderr, stdout=stdout, env=my_env
|
|
584
|
-
)
|
|
585
|
-
if verbose:
|
|
586
|
-
_write_with_id(
|
|
587
|
-
process,
|
|
588
|
-
pool_id,
|
|
589
|
-
item_index,
|
|
590
|
-
"EXECUTING PARALLEL %s with command:\n%s" % (item_name, cmd),
|
|
591
|
-
timestamp=timestamp,
|
|
592
|
-
)
|
|
593
|
-
else:
|
|
594
|
-
_write_with_id(
|
|
595
|
-
process,
|
|
596
|
-
pool_id,
|
|
597
|
-
item_index,
|
|
598
|
-
"EXECUTING %s" % item_name,
|
|
599
|
-
timestamp=timestamp,
|
|
600
|
-
)
|
|
601
|
-
return process, _wait_for_return_code(
|
|
602
|
-
process, item_name, pool_id, item_index, process_timeout
|
|
603
|
-
)
|
|
604
|
-
|
|
605
|
-
|
|
606
|
-
def _wait_for_return_code(process, item_name, pool_id, item_index, process_timeout):
|
|
607
|
-
rc = None
|
|
608
|
-
elapsed = 0
|
|
609
|
-
ping_time = ping_interval = 150
|
|
610
|
-
while rc is None:
|
|
611
|
-
rc = process.poll()
|
|
612
|
-
time.sleep(0.1)
|
|
613
|
-
elapsed += 1
|
|
614
|
-
|
|
615
|
-
if process_timeout and elapsed / 10.0 >= process_timeout:
|
|
616
|
-
process.terminate()
|
|
617
|
-
process.wait()
|
|
618
|
-
rc = (
|
|
619
|
-
-1
|
|
620
|
-
) # Set a return code indicating that the process was killed due to timeout
|
|
621
|
-
_write_with_id(
|
|
622
|
-
process,
|
|
623
|
-
pool_id,
|
|
624
|
-
item_index,
|
|
625
|
-
"Process %s killed due to exceeding the maximum timeout of %s seconds"
|
|
626
|
-
% (item_name, process_timeout),
|
|
627
|
-
)
|
|
628
|
-
break
|
|
629
609
|
|
|
630
|
-
|
|
631
|
-
|
|
632
|
-
|
|
633
|
-
|
|
634
|
-
|
|
635
|
-
|
|
636
|
-
|
|
637
|
-
|
|
638
|
-
|
|
610
|
+
cmd = run_command + ['-A', argfile_path]
|
|
611
|
+
my_env = os.environ.copy()
|
|
612
|
+
syslog_file = my_env.get("ROBOT_SYSLOG_FILE", None)
|
|
613
|
+
if syslog_file:
|
|
614
|
+
my_env["ROBOT_SYSLOG_FILE"] = os.path.join(outs_dir, os.path.basename(syslog_file))
|
|
615
|
+
|
|
616
|
+
log_path = os.path.join(outs_dir, f"{command_name}_{item_index}.log")
|
|
617
|
+
|
|
618
|
+
manager = _ensure_process_manager()
|
|
619
|
+
process, (rc, elapsed) = manager.run(
|
|
620
|
+
cmd,
|
|
621
|
+
env=my_env,
|
|
622
|
+
stdout=stdout,
|
|
623
|
+
stderr=stderr,
|
|
624
|
+
timeout=process_timeout,
|
|
625
|
+
verbose=verbose,
|
|
626
|
+
item_name=item_name,
|
|
627
|
+
log_file=log_path,
|
|
628
|
+
pool_id=pool_id,
|
|
629
|
+
item_index=item_index,
|
|
630
|
+
)
|
|
639
631
|
|
|
640
|
-
return rc, elapsed
|
|
632
|
+
return process, (rc, elapsed)
|
|
641
633
|
|
|
642
634
|
|
|
643
635
|
def _read_file(file_handle):
|
|
@@ -697,6 +689,7 @@ def _options_for_executor(
|
|
|
697
689
|
queueIndex,
|
|
698
690
|
last_level,
|
|
699
691
|
processes,
|
|
692
|
+
skip,
|
|
700
693
|
):
|
|
701
694
|
options = options.copy()
|
|
702
695
|
options["log"] = "NONE"
|
|
@@ -712,7 +705,7 @@ def _options_for_executor(
|
|
|
712
705
|
# Prevent multiple appending of PABOTLIBURI variable setting
|
|
713
706
|
if pabotLibURIVar not in options["variable"]:
|
|
714
707
|
options["variable"].append(pabotLibURIVar)
|
|
715
|
-
pabotExecutionPoolId = "PABOTEXECUTIONPOOLID:%d" %
|
|
708
|
+
pabotExecutionPoolId = "PABOTEXECUTIONPOOLID:%d" % _get_executor_num()
|
|
716
709
|
if pabotExecutionPoolId not in options["variable"]:
|
|
717
710
|
options["variable"].append(pabotExecutionPoolId)
|
|
718
711
|
pabotIsLast = "PABOTISLASTEXECUTIONINPOOL:%s" % ("1" if is_last else "0")
|
|
@@ -733,6 +726,11 @@ def _options_for_executor(
|
|
|
733
726
|
options["argumentfile"] = argfile
|
|
734
727
|
if options.get("test", False) and options.get("include", []):
|
|
735
728
|
del options["include"]
|
|
729
|
+
if skip:
|
|
730
|
+
this_dir = os.path.dirname(os.path.abspath(__file__))
|
|
731
|
+
listener_path = os.path.join(this_dir, "listener", "skip_listener.py")
|
|
732
|
+
options["dryrun"] = True
|
|
733
|
+
options["listener"].append(listener_path)
|
|
736
734
|
return _set_terminal_coloring_options(options)
|
|
737
735
|
|
|
738
736
|
|
|
@@ -1234,7 +1232,7 @@ def store_suite_names(hashes, suite_names):
|
|
|
1234
1232
|
_write(
|
|
1235
1233
|
"[ "
|
|
1236
1234
|
+ _wrap_with(Color.YELLOW, "WARNING")
|
|
1237
|
-
+ " ]: storing .pabotsuitenames failed"
|
|
1235
|
+
+ " ]: storing .pabotsuitenames failed", level="warning",
|
|
1238
1236
|
)
|
|
1239
1237
|
|
|
1240
1238
|
|
|
@@ -1299,13 +1297,13 @@ def generate_suite_names_with_builder(outs_dir, datasources, options):
|
|
|
1299
1297
|
if stdout_value:
|
|
1300
1298
|
_write(
|
|
1301
1299
|
"[STDOUT] from suite search:\n" + stdout_value + "[STDOUT] end",
|
|
1302
|
-
Color.YELLOW,
|
|
1300
|
+
Color.YELLOW, level="warning",
|
|
1303
1301
|
)
|
|
1304
1302
|
stderr_value = opts["stderr"].getvalue()
|
|
1305
1303
|
if stderr_value:
|
|
1306
1304
|
_write(
|
|
1307
1305
|
"[STDERR] from suite search:\n" + stderr_value + "[STDERR] end",
|
|
1308
|
-
Color.RED,
|
|
1306
|
+
Color.RED, level="error",
|
|
1309
1307
|
)
|
|
1310
1308
|
return list(sorted(set(suite_names)))
|
|
1311
1309
|
|
|
@@ -1415,9 +1413,11 @@ def _now():
|
|
|
1415
1413
|
def _print_elapsed(start, end):
|
|
1416
1414
|
_write(
|
|
1417
1415
|
"Total testing: "
|
|
1418
|
-
+ _time_string(sum(_ALL_ELAPSED))
|
|
1419
|
-
|
|
1420
|
-
|
|
1416
|
+
+ _time_string(sum(_ALL_ELAPSED)), level="info"
|
|
1417
|
+
)
|
|
1418
|
+
_write(
|
|
1419
|
+
"Elapsed time: "
|
|
1420
|
+
+ _time_string(end - start), level="info"
|
|
1421
1421
|
)
|
|
1422
1422
|
|
|
1423
1423
|
|
|
@@ -1444,14 +1444,202 @@ def _time_string(elapsed):
|
|
|
1444
1444
|
def keyboard_interrupt(*args):
|
|
1445
1445
|
global CTRL_C_PRESSED
|
|
1446
1446
|
CTRL_C_PRESSED = True
|
|
1447
|
+
# Notify ProcessManager to interrupt running processes
|
|
1448
|
+
if _PROCESS_MANAGER:
|
|
1449
|
+
_PROCESS_MANAGER.set_interrupted()
|
|
1450
|
+
if _PABOTWRITER:
|
|
1451
|
+
_write("[ INTERRUPT ] Ctrl+C pressed - initiating graceful shutdown...", Color.YELLOW, level="warning")
|
|
1452
|
+
else:
|
|
1453
|
+
print("[ INTERRUPT ] Ctrl+C pressed - initiating graceful shutdown...")
|
|
1454
|
+
|
|
1455
|
+
|
|
1456
|
+
def _get_depends(item):
|
|
1457
|
+
return getattr(item.execution_item, "depends", [])
|
|
1458
|
+
|
|
1459
|
+
|
|
1460
|
+
def _dependencies_satisfied(item, completed):
|
|
1461
|
+
"""
|
|
1462
|
+
Check if all dependencies for an item are satisfied (completed).
|
|
1463
|
+
Uses unique names that include argfile_index when applicable.
|
|
1464
|
+
"""
|
|
1465
|
+
for dep in _get_depends(item):
|
|
1466
|
+
# Build unique name for dependency with same argfile_index as the item
|
|
1467
|
+
if hasattr(item, 'argfile_index') and item.argfile_index:
|
|
1468
|
+
# Item has an argfile index, so check for dependency with same argfile index
|
|
1469
|
+
dep_unique_name = f"{item.argfile_index}:{dep}"
|
|
1470
|
+
if dep_unique_name not in completed:
|
|
1471
|
+
return False
|
|
1472
|
+
else:
|
|
1473
|
+
# No argfile index (single argumentfile case)
|
|
1474
|
+
if dep not in completed:
|
|
1475
|
+
return False
|
|
1476
|
+
|
|
1477
|
+
return True
|
|
1478
|
+
|
|
1479
|
+
|
|
1480
|
+
def _collect_transitive_dependents(failed_name, pending_items):
|
|
1481
|
+
"""
|
|
1482
|
+
Returns all pending items that (directly or indirectly) depend on failed_name.
|
|
1483
|
+
Handles both regular names and unique names (with argfile_index).
|
|
1484
|
+
|
|
1485
|
+
When failed_name is "1:Suite", it means Suite failed in argumentfile 1.
|
|
1486
|
+
We should only skip items in argumentfile 1 that depend on Suite,
|
|
1487
|
+
not items in other argumentfiles.
|
|
1488
|
+
"""
|
|
1489
|
+
to_skip = set()
|
|
1490
|
+
queue = [failed_name]
|
|
1491
|
+
|
|
1492
|
+
# Extract argfile_index from failed_name if it has one
|
|
1493
|
+
if ":" in failed_name:
|
|
1494
|
+
argfile_index, base_name = failed_name.split(":", 1)
|
|
1495
|
+
else:
|
|
1496
|
+
argfile_index = ""
|
|
1497
|
+
base_name = failed_name
|
|
1498
|
+
|
|
1499
|
+
# Build dependency map: item unique name -> set of dependency base names
|
|
1500
|
+
depends_map = {
|
|
1501
|
+
_get_unique_execution_name(item): set(_get_depends(item))
|
|
1502
|
+
for item in pending_items
|
|
1503
|
+
}
|
|
1504
|
+
|
|
1505
|
+
while queue:
|
|
1506
|
+
current = queue.pop(0)
|
|
1507
|
+
|
|
1508
|
+
# Extract base name from current (e.g., "1:Suite" -> "Suite")
|
|
1509
|
+
if ":" in current:
|
|
1510
|
+
current_argfile, current_base = current.split(":", 1)
|
|
1511
|
+
else:
|
|
1512
|
+
current_argfile = ""
|
|
1513
|
+
current_base = current
|
|
1514
|
+
|
|
1515
|
+
for item_name, deps in depends_map.items():
|
|
1516
|
+
# Only skip items from the same argumentfile
|
|
1517
|
+
# Check if item_name corresponds to the same argumentfile
|
|
1518
|
+
if ":" in item_name:
|
|
1519
|
+
item_argfile, _ = item_name.split(":", 1)
|
|
1520
|
+
else:
|
|
1521
|
+
item_argfile = ""
|
|
1522
|
+
|
|
1523
|
+
# Only process if same argumentfile
|
|
1524
|
+
if item_argfile != argfile_index:
|
|
1525
|
+
continue
|
|
1526
|
+
|
|
1527
|
+
# Check if this item depends on the current failed item
|
|
1528
|
+
if current_base in deps and item_name not in to_skip:
|
|
1529
|
+
to_skip.add(item_name)
|
|
1530
|
+
queue.append(item_name)
|
|
1531
|
+
|
|
1532
|
+
return to_skip
|
|
1533
|
+
|
|
1534
|
+
|
|
1535
|
+
def _get_unique_execution_name(item):
|
|
1536
|
+
"""
|
|
1537
|
+
Create a unique identifier for an execution item that includes argfile index.
|
|
1538
|
+
This ensures that the same test run with different argumentfiles are treated as distinct items.
|
|
1539
|
+
"""
|
|
1540
|
+
if item.argfile_index:
|
|
1541
|
+
return f"{item.argfile_index}:{item.execution_item.name}"
|
|
1542
|
+
return item.execution_item.name
|
|
1543
|
+
|
|
1544
|
+
|
|
1545
|
+
def _parallel_execute_dynamic(
|
|
1546
|
+
items,
|
|
1547
|
+
processes,
|
|
1548
|
+
datasources,
|
|
1549
|
+
outs_dir,
|
|
1550
|
+
opts_for_run,
|
|
1551
|
+
pabot_args,
|
|
1552
|
+
):
|
|
1553
|
+
# Signal handler is already set in main_program, no need to set it again
|
|
1554
|
+
# Just use the thread pool without managing signals
|
|
1555
|
+
global _MAX_EXECUTORS, _EXECUTOR_COUNTER
|
|
1556
|
+
|
|
1557
|
+
max_processes = processes or len(items)
|
|
1558
|
+
_MAX_EXECUTORS = max_processes
|
|
1559
|
+
_EXECUTOR_COUNTER = 0 # Reset executor counter for each parallel execution batch
|
|
1560
|
+
pool = ThreadPool(max_processes)
|
|
1561
|
+
|
|
1562
|
+
pending = set(items)
|
|
1563
|
+
running = {}
|
|
1564
|
+
completed = set()
|
|
1565
|
+
failed = set()
|
|
1566
|
+
|
|
1567
|
+
failure_policy = pabot_args.get("ordering", {}).get("failure_policy", "run_all")
|
|
1568
|
+
lock = threading.Lock()
|
|
1569
|
+
|
|
1570
|
+
def on_complete(it, rc):
|
|
1571
|
+
nonlocal pending, running, completed, failed
|
|
1572
|
+
|
|
1573
|
+
with lock:
|
|
1574
|
+
running.pop(it, None)
|
|
1575
|
+
unique_name = _get_unique_execution_name(it)
|
|
1576
|
+
completed.add(unique_name)
|
|
1577
|
+
|
|
1578
|
+
if rc != 0:
|
|
1579
|
+
failed.add(unique_name)
|
|
1580
|
+
|
|
1581
|
+
if failure_policy == "skip":
|
|
1582
|
+
to_skip_names = _collect_transitive_dependents(
|
|
1583
|
+
unique_name,
|
|
1584
|
+
pending,
|
|
1585
|
+
)
|
|
1586
|
+
|
|
1587
|
+
for other in list(pending):
|
|
1588
|
+
other_unique_name = _get_unique_execution_name(other)
|
|
1589
|
+
if other_unique_name in to_skip_names:
|
|
1590
|
+
# Only log skip once when first marking it as skipped
|
|
1591
|
+
if not other.skip:
|
|
1592
|
+
_write(
|
|
1593
|
+
f"Skipping '{other_unique_name}' because dependency "
|
|
1594
|
+
f"'{unique_name}' failed (transitive).",
|
|
1595
|
+
Color.YELLOW, level="debug"
|
|
1596
|
+
)
|
|
1597
|
+
other.skip = True
|
|
1598
|
+
|
|
1599
|
+
try:
|
|
1600
|
+
while pending or running:
|
|
1601
|
+
with lock:
|
|
1602
|
+
ready = [
|
|
1603
|
+
item for item in list(pending)
|
|
1604
|
+
if _dependencies_satisfied(item, completed)
|
|
1605
|
+
]
|
|
1606
|
+
|
|
1607
|
+
while ready and len(running) < max_processes:
|
|
1608
|
+
item = ready.pop(0)
|
|
1609
|
+
pending.remove(item)
|
|
1610
|
+
|
|
1611
|
+
result = pool.apply_async(
|
|
1612
|
+
_execute_item_with_executor_tracking,
|
|
1613
|
+
(item,),
|
|
1614
|
+
callback=lambda rc, it=item: on_complete(it, rc),
|
|
1615
|
+
)
|
|
1616
|
+
running[item] = result
|
|
1617
|
+
|
|
1618
|
+
dynamic_items = _get_dynamically_created_execution_items(
|
|
1619
|
+
datasources, outs_dir, opts_for_run, pabot_args
|
|
1620
|
+
)
|
|
1621
|
+
if dynamic_items:
|
|
1622
|
+
with lock:
|
|
1623
|
+
for di in dynamic_items:
|
|
1624
|
+
pending.add(di)
|
|
1625
|
+
|
|
1626
|
+
time.sleep(0.1)
|
|
1627
|
+
|
|
1628
|
+
finally:
|
|
1629
|
+
pool.close()
|
|
1630
|
+
# Signal handler was set in main_program and will be restored there
|
|
1447
1631
|
|
|
1448
1632
|
|
|
1449
1633
|
def _parallel_execute(
|
|
1450
1634
|
items, processes, datasources, outs_dir, opts_for_run, pabot_args
|
|
1451
1635
|
):
|
|
1452
|
-
|
|
1453
|
-
|
|
1454
|
-
|
|
1636
|
+
# Signal handler is already set in main_program, no need to set it again
|
|
1637
|
+
global _MAX_EXECUTORS, _EXECUTOR_COUNTER
|
|
1638
|
+
max_workers = len(items) if processes is None else processes
|
|
1639
|
+
_MAX_EXECUTORS = max_workers
|
|
1640
|
+
_EXECUTOR_COUNTER = 0 # Reset executor counter for each parallel execution batch
|
|
1641
|
+
pool = ThreadPool(max_workers)
|
|
1642
|
+
results = [pool.map_async(_execute_item_with_executor_tracking, items, 1)]
|
|
1455
1643
|
delayed_result_append = 0
|
|
1456
1644
|
new_items = []
|
|
1457
1645
|
while not all(result.ready() for result in results) or delayed_result_append > 0:
|
|
@@ -1471,10 +1659,10 @@ def _parallel_execute(
|
|
|
1471
1659
|
delayed_result_append = max(0, delayed_result_append - 1)
|
|
1472
1660
|
if new_items and delayed_result_append == 0:
|
|
1473
1661
|
_construct_last_levels([new_items])
|
|
1474
|
-
results.append(pool.map_async(
|
|
1662
|
+
results.append(pool.map_async(_execute_item_with_executor_tracking, new_items, 1))
|
|
1475
1663
|
new_items = []
|
|
1476
1664
|
pool.close()
|
|
1477
|
-
|
|
1665
|
+
# Signal handler will be restored in main_program's finally block
|
|
1478
1666
|
|
|
1479
1667
|
|
|
1480
1668
|
def _output_dir(options, cleanup=True):
|
|
@@ -1540,16 +1728,34 @@ def _copy_output_artifacts(options, timestamp_id=None, file_extensions=None, inc
|
|
|
1540
1728
|
|
|
1541
1729
|
|
|
1542
1730
|
def _check_pabot_results_for_missing_xml(base_dir, command_name, output_xml_name):
|
|
1731
|
+
"""
|
|
1732
|
+
Check for missing Robot Framework output XML files in pabot result directories,
|
|
1733
|
+
taking into account the optional timestamp added by the -T option.
|
|
1734
|
+
|
|
1735
|
+
Args:
|
|
1736
|
+
base_dir: The root directory containing pabot subdirectories
|
|
1737
|
+
command_name: Name of the command that generated the output (used for fallback stderr filename)
|
|
1738
|
+
output_xml_name: Expected XML filename, e.g., 'output.xml'
|
|
1739
|
+
|
|
1740
|
+
Returns:
|
|
1741
|
+
List of paths to stderr output files for directories where the XML is missing.
|
|
1742
|
+
"""
|
|
1543
1743
|
missing = []
|
|
1744
|
+
# Prepare regex to match timestamped filenames like output-YYYYMMDD-hhmmss.xml
|
|
1745
|
+
name_stem = os.path.splitext(output_xml_name)[0]
|
|
1746
|
+
name_suffix = os.path.splitext(output_xml_name)[1]
|
|
1747
|
+
pattern = re.compile(rf"^{re.escape(name_stem)}(-\d{{8}}-\d{{6}})?{re.escape(name_suffix)}$")
|
|
1748
|
+
|
|
1544
1749
|
for root, dirs, _ in os.walk(base_dir):
|
|
1545
1750
|
if root == base_dir:
|
|
1546
1751
|
for subdir in dirs:
|
|
1547
1752
|
subdir_path = os.path.join(base_dir, subdir)
|
|
1548
|
-
|
|
1753
|
+
# Check if any file matches the expected XML name or timestamped variant
|
|
1754
|
+
has_xml = any(pattern.match(fname) for fname in os.listdir(subdir_path))
|
|
1549
1755
|
if not has_xml:
|
|
1550
|
-
|
|
1551
|
-
missing.append(os.path.join(subdir_path, f
|
|
1552
|
-
break
|
|
1756
|
+
sanitized_cmd = command_name.replace(" ", "_")
|
|
1757
|
+
missing.append(os.path.join(subdir_path, f"{sanitized_cmd}_stderr.out"))
|
|
1758
|
+
break # only check immediate subdirectories
|
|
1553
1759
|
return missing
|
|
1554
1760
|
|
|
1555
1761
|
|
|
@@ -1591,7 +1797,9 @@ def _report_results(outs_dir, pabot_args, options, start_time_string, tests_root
|
|
|
1591
1797
|
if "output" not in options:
|
|
1592
1798
|
options["output"] = "output.xml"
|
|
1593
1799
|
_write_stats(stats)
|
|
1594
|
-
|
|
1800
|
+
stdout_writer = get_stdout_writer()
|
|
1801
|
+
stderr_writer = get_stderr_writer(original_stderr_name='Internal Rebot')
|
|
1802
|
+
exit_code = rebot(*outputs, **_options_for_rebot(options, start_time_string, _now()), stdout=stdout_writer, stderr=stderr_writer)
|
|
1595
1803
|
else:
|
|
1596
1804
|
exit_code = _report_results_for_one_run(
|
|
1597
1805
|
outs_dir, pabot_args, options, start_time_string, tests_root_name, stats
|
|
@@ -1601,12 +1809,12 @@ def _report_results(outs_dir, pabot_args, options, start_time_string, tests_root
|
|
|
1601
1809
|
_write(("[ " + _wrap_with(Color.YELLOW, 'WARNING') + " ] "
|
|
1602
1810
|
"One or more subprocesses encountered an error and the "
|
|
1603
1811
|
"internal .xml files could not be generated. Please check the "
|
|
1604
|
-
"following stderr files to identify the cause:"))
|
|
1812
|
+
"following stderr files to identify the cause:"), level="warning")
|
|
1605
1813
|
for missing in missing_outputs:
|
|
1606
|
-
_write(repr(missing))
|
|
1814
|
+
_write(repr(missing), level="warning")
|
|
1607
1815
|
_write((f"[ " + _wrap_with(Color.RED, 'ERROR') + " ] "
|
|
1608
1816
|
"The output, log and report files produced by Pabot are "
|
|
1609
|
-
"incomplete and do not contain all test cases."))
|
|
1817
|
+
"incomplete and do not contain all test cases."), level="error")
|
|
1610
1818
|
return exit_code if not missing_outputs else 252
|
|
1611
1819
|
|
|
1612
1820
|
|
|
@@ -1616,24 +1824,39 @@ def _write_stats(stats):
|
|
|
1616
1824
|
al = stats["all"]
|
|
1617
1825
|
_write(
|
|
1618
1826
|
"%d critical tests, %d passed, %d failed"
|
|
1619
|
-
% (crit["total"], crit["passed"], crit["failed"])
|
|
1827
|
+
% (crit["total"], crit["passed"], crit["failed"]), level="info"
|
|
1620
1828
|
)
|
|
1621
1829
|
_write(
|
|
1622
1830
|
"%d tests total, %d passed, %d failed"
|
|
1623
|
-
% (al["total"], al["passed"], al["failed"])
|
|
1831
|
+
% (al["total"], al["passed"], al["failed"]), level="info"
|
|
1624
1832
|
)
|
|
1625
1833
|
else:
|
|
1626
1834
|
_write(
|
|
1627
1835
|
"%d tests, %d passed, %d failed, %d skipped."
|
|
1628
|
-
% (stats["total"], stats["passed"], stats["failed"], stats["skipped"])
|
|
1836
|
+
% (stats["total"], stats["passed"], stats["failed"], stats["skipped"]), level="info"
|
|
1629
1837
|
)
|
|
1630
|
-
_write("===================================================")
|
|
1838
|
+
_write("===================================================", level="info")
|
|
1839
|
+
|
|
1840
|
+
|
|
1841
|
+
def add_timestamp_to_filename(file_path: str, timestamp: str) -> str:
|
|
1842
|
+
"""
|
|
1843
|
+
Rename the given file by inserting a timestamp before the extension.
|
|
1844
|
+
Format: YYYYMMDD-hhmmss
|
|
1845
|
+
Example: output.xml -> output-20251222-152233.xml
|
|
1846
|
+
"""
|
|
1847
|
+
file_path = Path(file_path)
|
|
1848
|
+
if not file_path.exists():
|
|
1849
|
+
raise FileNotFoundError(f"{file_path} does not exist")
|
|
1850
|
+
|
|
1851
|
+
new_name = f"{file_path.stem}-{timestamp}{file_path.suffix}"
|
|
1852
|
+
new_path = file_path.with_name(new_name)
|
|
1853
|
+
file_path.rename(new_path)
|
|
1854
|
+
return str(new_path)
|
|
1631
1855
|
|
|
1632
1856
|
|
|
1633
1857
|
def _report_results_for_one_run(
|
|
1634
1858
|
outs_dir, pabot_args, options, start_time_string, tests_root_name, stats
|
|
1635
1859
|
):
|
|
1636
|
-
_write(pabot_args)
|
|
1637
1860
|
copied_artifacts = _copy_output_artifacts(
|
|
1638
1861
|
options, _get_timestamp_id(start_time_string, pabot_args["artifactstimestamps"]), pabot_args["artifacts"], pabot_args["artifactsinsubfolders"]
|
|
1639
1862
|
)
|
|
@@ -1641,6 +1864,9 @@ def _report_results_for_one_run(
|
|
|
1641
1864
|
outs_dir, options, tests_root_name, stats, copied_artifacts, _get_timestamp_id(start_time_string, pabot_args["artifactstimestamps"])
|
|
1642
1865
|
)
|
|
1643
1866
|
_write_stats(stats)
|
|
1867
|
+
ts = datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
|
|
1868
|
+
if "timestampoutputs" in options and options["timestampoutputs"]:
|
|
1869
|
+
output_path = add_timestamp_to_filename(output_path, ts)
|
|
1644
1870
|
if (
|
|
1645
1871
|
"report" in options
|
|
1646
1872
|
and options["report"].upper() == "NONE"
|
|
@@ -1651,9 +1877,12 @@ def _report_results_for_one_run(
|
|
|
1651
1877
|
"output"
|
|
1652
1878
|
] = output_path # REBOT will return error 252 if nothing is written
|
|
1653
1879
|
else:
|
|
1654
|
-
_write("Output: %s" % output_path)
|
|
1880
|
+
_write("Output: %s" % output_path, level="info")
|
|
1655
1881
|
options["output"] = None # Do not write output again with rebot
|
|
1656
|
-
|
|
1882
|
+
stdout_writer = get_stdout_writer()
|
|
1883
|
+
stderr_writer = get_stderr_writer(original_stderr_name="Internal Rebot")
|
|
1884
|
+
exit_code = rebot(output_path, **_options_for_rebot(options, start_time_string, ts), stdout=stdout_writer, stderr=stderr_writer)
|
|
1885
|
+
return exit_code
|
|
1657
1886
|
|
|
1658
1887
|
|
|
1659
1888
|
def _merge_one_run(
|
|
@@ -1664,9 +1893,18 @@ def _merge_one_run(
|
|
|
1664
1893
|
os.path.join(options.get("outputdir", "."), outputfile)
|
|
1665
1894
|
)
|
|
1666
1895
|
filename = options.get("output") or "output.xml"
|
|
1667
|
-
|
|
1896
|
+
base_name, ext = os.path.splitext(filename)
|
|
1897
|
+
# Glob all candidates
|
|
1898
|
+
candidate_files = glob(os.path.join(outs_dir, "**", f"*{base_name}*{ext}"), recursive=True)
|
|
1899
|
+
|
|
1900
|
+
# Regex: basename or basename-YYYYMMDD-hhmmss.ext
|
|
1901
|
+
ts_pattern = re.compile(rf"^{re.escape(base_name)}(?:-\d{{8}}-\d{{6}})?{re.escape(ext)}$")
|
|
1902
|
+
|
|
1903
|
+
files = [f for f in candidate_files if ts_pattern.search(os.path.basename(f))]
|
|
1904
|
+
files = natsorted(files)
|
|
1905
|
+
|
|
1668
1906
|
if not files:
|
|
1669
|
-
_write('
|
|
1907
|
+
_write('[ WARNING ]: No output files in "%s"' % outs_dir, Color.YELLOW, level="warning")
|
|
1670
1908
|
return ""
|
|
1671
1909
|
|
|
1672
1910
|
def invalid_xml_callback():
|
|
@@ -1715,19 +1953,9 @@ def _glob_escape(pathname):
|
|
|
1715
1953
|
return drive + pathname
|
|
1716
1954
|
|
|
1717
1955
|
|
|
1718
|
-
def
|
|
1719
|
-
|
|
1720
|
-
|
|
1721
|
-
if message is None:
|
|
1722
|
-
MESSAGE_QUEUE.task_done()
|
|
1723
|
-
return
|
|
1724
|
-
print(message)
|
|
1725
|
-
sys.stdout.flush()
|
|
1726
|
-
MESSAGE_QUEUE.task_done()
|
|
1727
|
-
|
|
1728
|
-
|
|
1729
|
-
def _write(message, color=None):
|
|
1730
|
-
MESSAGE_QUEUE.put(_wrap_with(color, message))
|
|
1956
|
+
def _write(message, color=None, level="debug"):
|
|
1957
|
+
writer = get_writer()
|
|
1958
|
+
writer.write(message, color=color, level=level)
|
|
1731
1959
|
|
|
1732
1960
|
|
|
1733
1961
|
def _wrap_with(color, message):
|
|
@@ -1740,16 +1968,6 @@ def _is_output_coloring_supported():
|
|
|
1740
1968
|
return sys.stdout.isatty() and os.name in Color.SUPPORTED_OSES
|
|
1741
1969
|
|
|
1742
1970
|
|
|
1743
|
-
def _start_message_writer():
|
|
1744
|
-
t = threading.Thread(target=_writer)
|
|
1745
|
-
t.start()
|
|
1746
|
-
|
|
1747
|
-
|
|
1748
|
-
def _stop_message_writer():
|
|
1749
|
-
MESSAGE_QUEUE.put(None)
|
|
1750
|
-
MESSAGE_QUEUE.join()
|
|
1751
|
-
|
|
1752
|
-
|
|
1753
1971
|
def _is_port_available(port):
|
|
1754
1972
|
"""Check if a given port on localhost is available."""
|
|
1755
1973
|
with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as s:
|
|
@@ -1783,7 +2001,7 @@ def _start_remote_library(pabot_args): # type: (dict) -> Optional[subprocess.Po
|
|
|
1783
2001
|
_write(
|
|
1784
2002
|
f"Warning: specified pabotlibport {port} is already in use. "
|
|
1785
2003
|
"A free port will be assigned automatically.",
|
|
1786
|
-
Color.YELLOW,
|
|
2004
|
+
Color.YELLOW, level="warning"
|
|
1787
2005
|
)
|
|
1788
2006
|
port = _get_free_port()
|
|
1789
2007
|
|
|
@@ -1797,7 +2015,7 @@ def _start_remote_library(pabot_args): # type: (dict) -> Optional[subprocess.Po
|
|
|
1797
2015
|
_write(
|
|
1798
2016
|
"Warning: specified resource file doesn't exist."
|
|
1799
2017
|
" Some tests may fail or continue forever.",
|
|
1800
|
-
Color.YELLOW,
|
|
2018
|
+
Color.YELLOW, level="warning"
|
|
1801
2019
|
)
|
|
1802
2020
|
resourcefile = ""
|
|
1803
2021
|
cmd = [
|
|
@@ -1807,28 +2025,73 @@ def _start_remote_library(pabot_args): # type: (dict) -> Optional[subprocess.Po
|
|
|
1807
2025
|
pabot_args["pabotlibhost"],
|
|
1808
2026
|
str(port),
|
|
1809
2027
|
]
|
|
1810
|
-
|
|
2028
|
+
# Start PabotLib in isolation so it doesn't receive CTRL+C when the main process is interrupted.
|
|
2029
|
+
# This allows graceful shutdown in finally block.
|
|
2030
|
+
kwargs = {
|
|
2031
|
+
"stdout": subprocess.PIPE,
|
|
2032
|
+
"stderr": subprocess.STDOUT,
|
|
2033
|
+
"text": True,
|
|
2034
|
+
"bufsize": 1,
|
|
2035
|
+
"env": {**os.environ, "PYTHONUNBUFFERED": "1"},
|
|
2036
|
+
}
|
|
2037
|
+
if sys.platform.startswith('win'):
|
|
2038
|
+
# Windows: use CREATE_NEW_PROCESS_GROUP
|
|
2039
|
+
kwargs["creationflags"] = subprocess.CREATE_NEW_PROCESS_GROUP
|
|
2040
|
+
else:
|
|
2041
|
+
# Unix/Linux/macOS: use preexec_fn to create new session
|
|
2042
|
+
import os as os_module
|
|
2043
|
+
kwargs["preexec_fn"] = os_module.setsid
|
|
2044
|
+
|
|
2045
|
+
process = subprocess.Popen(cmd, **kwargs)
|
|
2046
|
+
|
|
2047
|
+
def _read_output(proc, writer):
|
|
2048
|
+
for line in proc.stdout:
|
|
2049
|
+
if line.strip(): # Skip empty lines
|
|
2050
|
+
writer.write(line.rstrip('\n') + '\n', level="info")
|
|
2051
|
+
writer.flush()
|
|
2052
|
+
proc.stdout.close()
|
|
2053
|
+
|
|
2054
|
+
pabotlib_writer = ThreadSafeWriter(get_writer())
|
|
2055
|
+
thread = threading.Thread(
|
|
2056
|
+
target=_read_output,
|
|
2057
|
+
args=(process, pabotlib_writer),
|
|
2058
|
+
daemon=False, # Non-daemon so output is captured before exit
|
|
2059
|
+
)
|
|
2060
|
+
thread.start()
|
|
2061
|
+
|
|
2062
|
+
return process
|
|
1811
2063
|
|
|
1812
2064
|
|
|
1813
2065
|
def _stop_remote_library(process): # type: (subprocess.Popen) -> None
|
|
1814
|
-
_write("Stopping PabotLib process")
|
|
2066
|
+
_write("Stopping PabotLib process", level="debug")
|
|
1815
2067
|
try:
|
|
1816
2068
|
remoteLib = Remote(_PABOTLIBURI)
|
|
1817
2069
|
remoteLib.run_keyword("stop_remote_libraries", [], {})
|
|
1818
2070
|
remoteLib.run_keyword("stop_remote_server", [], {})
|
|
1819
2071
|
except RuntimeError:
|
|
1820
|
-
_write("Could not connect to PabotLib - assuming stopped already")
|
|
1821
|
-
|
|
2072
|
+
_write("Could not connect to PabotLib - assuming stopped already", level="info")
|
|
2073
|
+
|
|
2074
|
+
# Always wait for graceful shutdown, regardless of remote connection status
|
|
1822
2075
|
i = 50
|
|
1823
2076
|
while i > 0 and process.poll() is None:
|
|
1824
2077
|
time.sleep(0.1)
|
|
1825
2078
|
i -= 1
|
|
1826
|
-
|
|
2079
|
+
|
|
2080
|
+
# If still running after remote stop attempt, terminate it
|
|
2081
|
+
if process.poll() is None:
|
|
1827
2082
|
_write(
|
|
1828
2083
|
"Could not stop PabotLib Process in 5 seconds " "- calling terminate",
|
|
1829
|
-
Color.YELLOW,
|
|
2084
|
+
Color.YELLOW, level="warning"
|
|
1830
2085
|
)
|
|
1831
2086
|
process.terminate()
|
|
2087
|
+
# Give it a moment to respond to SIGTERM
|
|
2088
|
+
time.sleep(0.5)
|
|
2089
|
+
if process.poll() is None:
|
|
2090
|
+
_write(
|
|
2091
|
+
"PabotLib Process did not respond to terminate - calling kill",
|
|
2092
|
+
Color.RED, level="error"
|
|
2093
|
+
)
|
|
2094
|
+
process.kill()
|
|
1832
2095
|
else:
|
|
1833
2096
|
_write("PabotLib process stopped")
|
|
1834
2097
|
|
|
@@ -1855,8 +2118,9 @@ class QueueItem(object):
|
|
|
1855
2118
|
hive=None,
|
|
1856
2119
|
processes=0,
|
|
1857
2120
|
timeout=None,
|
|
2121
|
+
skip=False,
|
|
1858
2122
|
):
|
|
1859
|
-
# type: (List[str], str, Dict[str, object], ExecutionItem, List[str], bool, Tuple[str, Optional[str]], Optional[str], int, Optional[int]) -> None
|
|
2123
|
+
# type: (List[str], str, Dict[str, object], ExecutionItem, List[str], bool, Tuple[str, Optional[str]], Optional[str], int, Optional[int], bool) -> None
|
|
1860
2124
|
self.datasources = datasources
|
|
1861
2125
|
self.outs_dir = (
|
|
1862
2126
|
outs_dir.encode("utf-8") if PY2 and is_unicode(outs_dir) else outs_dir
|
|
@@ -1876,6 +2140,7 @@ class QueueItem(object):
|
|
|
1876
2140
|
self.processes = processes
|
|
1877
2141
|
self.timeout = timeout
|
|
1878
2142
|
self.sleep_before_start = execution_item.get_sleep()
|
|
2143
|
+
self.skip = skip
|
|
1879
2144
|
|
|
1880
2145
|
@property
|
|
1881
2146
|
def index(self):
|
|
@@ -1941,7 +2206,9 @@ def _create_execution_items_for_run(
|
|
|
1941
2206
|
return all_items
|
|
1942
2207
|
|
|
1943
2208
|
|
|
1944
|
-
def _create_items(datasources, opts_for_run, outs_dir, pabot_args, suite_group):
|
|
2209
|
+
def _create_items(datasources, opts_for_run, outs_dir, pabot_args, suite_group, argfile=None):
|
|
2210
|
+
# If argfile is provided, use only that one. Otherwise, loop through all argumentfiles.
|
|
2211
|
+
argumentfiles = [argfile] if argfile is not None else (pabot_args["argumentfiles"] or [("", None)])
|
|
1945
2212
|
return [
|
|
1946
2213
|
QueueItem(
|
|
1947
2214
|
datasources,
|
|
@@ -1950,13 +2217,13 @@ def _create_items(datasources, opts_for_run, outs_dir, pabot_args, suite_group):
|
|
|
1950
2217
|
suite,
|
|
1951
2218
|
pabot_args["command"],
|
|
1952
2219
|
pabot_args["verbose"],
|
|
1953
|
-
|
|
2220
|
+
af,
|
|
1954
2221
|
pabot_args.get("hive"),
|
|
1955
2222
|
pabot_args["processes"],
|
|
1956
2223
|
pabot_args["processtimeout"],
|
|
1957
2224
|
)
|
|
1958
2225
|
for suite in suite_group
|
|
1959
|
-
for
|
|
2226
|
+
for af in argumentfiles
|
|
1960
2227
|
]
|
|
1961
2228
|
|
|
1962
2229
|
|
|
@@ -1983,31 +2250,20 @@ def _create_execution_items_for_dry_run(
|
|
|
1983
2250
|
def _chunk_items(items, chunk_size):
|
|
1984
2251
|
for i in range(0, len(items), chunk_size):
|
|
1985
2252
|
chunked_items = items[i : i + chunk_size]
|
|
1986
|
-
|
|
1987
|
-
if not base_item:
|
|
2253
|
+
if not chunked_items:
|
|
1988
2254
|
continue
|
|
2255
|
+
# For TestItem execution items, yield each item separately
|
|
2256
|
+
# For Suite items, combine them into one item
|
|
2257
|
+
base_item = chunked_items[0]
|
|
1989
2258
|
if isinstance(base_item.execution_item, TestItem):
|
|
1990
2259
|
for item in chunked_items:
|
|
1991
|
-
|
|
1992
|
-
yield chunked_item
|
|
2260
|
+
yield item
|
|
1993
2261
|
else:
|
|
2262
|
+
# For suites, create a combined execution item with all suite execution items
|
|
1994
2263
|
execution_items = SuiteItems([item.execution_item for item in chunked_items])
|
|
1995
|
-
|
|
1996
|
-
|
|
1997
|
-
|
|
1998
|
-
|
|
1999
|
-
def _queue_item(base_item, execution_items):
|
|
2000
|
-
return QueueItem(
|
|
2001
|
-
base_item.datasources,
|
|
2002
|
-
base_item.outs_dir,
|
|
2003
|
-
base_item.options,
|
|
2004
|
-
execution_items,
|
|
2005
|
-
base_item.command,
|
|
2006
|
-
base_item.verbose,
|
|
2007
|
-
(base_item.argfile_index, base_item.argfile),
|
|
2008
|
-
processes=base_item.processes,
|
|
2009
|
-
timeout=base_item.timeout,
|
|
2010
|
-
)
|
|
2264
|
+
# Reuse the base item but update its execution_item to the combined one
|
|
2265
|
+
base_item.execution_item = execution_items
|
|
2266
|
+
yield base_item
|
|
2011
2267
|
|
|
2012
2268
|
|
|
2013
2269
|
def _find_ending_level(name, group):
|
|
@@ -2070,7 +2326,16 @@ def _get_dynamically_created_execution_items(
|
|
|
2070
2326
|
if not _pabotlib_in_use():
|
|
2071
2327
|
return None
|
|
2072
2328
|
plib = Remote(_PABOTLIBURI)
|
|
2073
|
-
|
|
2329
|
+
try:
|
|
2330
|
+
new_suites = plib.run_keyword("get_added_suites", [], {})
|
|
2331
|
+
except RuntimeError as err:
|
|
2332
|
+
_write(
|
|
2333
|
+
"[ WARNING ] PabotLib unreachable during post-run phase, "
|
|
2334
|
+
"assuming no dynamically added suites. "
|
|
2335
|
+
"Original error: %s",
|
|
2336
|
+
err, level="warning"
|
|
2337
|
+
)
|
|
2338
|
+
new_suites = []
|
|
2074
2339
|
if len(new_suites) == 0:
|
|
2075
2340
|
return None
|
|
2076
2341
|
suite_group = [DynamicSuiteItem(s, v) for s, v in new_suites]
|
|
@@ -2101,7 +2366,8 @@ def main(args=None):
|
|
|
2101
2366
|
|
|
2102
2367
|
|
|
2103
2368
|
def main_program(args):
|
|
2104
|
-
global _PABOTLIBPROCESS
|
|
2369
|
+
global _PABOTLIBPROCESS, _PABOTCONSOLE, _PABOTWRITER
|
|
2370
|
+
outs_dir = None
|
|
2105
2371
|
args = args or sys.argv[1:]
|
|
2106
2372
|
if len(args) == 0:
|
|
2107
2373
|
print(
|
|
@@ -2114,52 +2380,81 @@ def main_program(args):
|
|
|
2114
2380
|
start_time = time.time()
|
|
2115
2381
|
start_time_string = _now()
|
|
2116
2382
|
# NOTE: timeout option
|
|
2383
|
+
original_signal_handler = signal.default_int_handler # Save default handler in case of early exit
|
|
2117
2384
|
try:
|
|
2118
|
-
_start_message_writer()
|
|
2119
2385
|
options, datasources, pabot_args, opts_for_run = parse_args(args)
|
|
2386
|
+
_PABOTCONSOLE = pabot_args.get("pabotconsole", "verbose")
|
|
2120
2387
|
if pabot_args["help"]:
|
|
2121
2388
|
help_print = __doc__.replace(
|
|
2122
2389
|
"PLACEHOLDER_README.MD",
|
|
2123
2390
|
read_args_from_readme()
|
|
2124
2391
|
)
|
|
2125
2392
|
print(help_print.replace("[PABOT_VERSION]", PABOT_VERSION))
|
|
2126
|
-
return
|
|
2393
|
+
return 251
|
|
2127
2394
|
if len(datasources) == 0:
|
|
2128
2395
|
print("[ " + _wrap_with(Color.RED, "ERROR") + " ]: No datasources given.")
|
|
2129
2396
|
print("Try --help for usage information.")
|
|
2130
2397
|
return 252
|
|
2398
|
+
outs_dir = _output_dir(options)
|
|
2399
|
+
|
|
2400
|
+
# These ensure MessageWriter and ProcessManager are ready before any parallel execution.
|
|
2401
|
+
_PABOTWRITER = get_writer(log_dir=outs_dir, console_type=_PABOTCONSOLE)
|
|
2402
|
+
_ensure_process_manager()
|
|
2403
|
+
_write(f"Initialized logging in {outs_dir}", level="info")
|
|
2404
|
+
|
|
2131
2405
|
_PABOTLIBPROCESS = _start_remote_library(pabot_args)
|
|
2406
|
+
# Set up signal handler to keep PabotLib alive during CTRL+C
|
|
2407
|
+
# This ensures graceful shutdown in the finally block
|
|
2408
|
+
original_signal_handler = signal.signal(signal.SIGINT, keyboard_interrupt)
|
|
2132
2409
|
if _pabotlib_in_use():
|
|
2133
2410
|
_initialize_queue_index()
|
|
2134
|
-
|
|
2411
|
+
|
|
2135
2412
|
suite_groups = _group_suites(outs_dir, datasources, options, pabot_args)
|
|
2136
2413
|
if pabot_args["verbose"]:
|
|
2137
2414
|
_write("Suite names resolved in %s seconds" % str(time.time() - start_time))
|
|
2138
2415
|
if not suite_groups or suite_groups == [[]]:
|
|
2139
|
-
_write("No tests to execute")
|
|
2416
|
+
_write("No tests to execute", level="info")
|
|
2140
2417
|
if not options.get("runemptysuite", False):
|
|
2141
2418
|
return 252
|
|
2142
|
-
|
|
2419
|
+
|
|
2420
|
+
# Create execution items for all argumentfiles at once
|
|
2421
|
+
all_execution_items = _create_execution_items(
|
|
2143
2422
|
suite_groups, datasources, outs_dir, options, opts_for_run, pabot_args
|
|
2144
2423
|
)
|
|
2145
|
-
|
|
2146
|
-
|
|
2147
|
-
|
|
2148
|
-
|
|
2424
|
+
|
|
2425
|
+
# Now execute all items from all argumentfiles in parallel
|
|
2426
|
+
if pabot_args.get("ordering", {}).get("mode") == "dynamic":
|
|
2427
|
+
# flatten stages
|
|
2428
|
+
flattened_items = []
|
|
2429
|
+
for stage in all_execution_items:
|
|
2430
|
+
flattened_items.extend(stage)
|
|
2431
|
+
_parallel_execute_dynamic(
|
|
2432
|
+
flattened_items,
|
|
2149
2433
|
pabot_args["processes"],
|
|
2150
2434
|
datasources,
|
|
2151
2435
|
outs_dir,
|
|
2152
2436
|
opts_for_run,
|
|
2153
2437
|
pabot_args,
|
|
2154
2438
|
)
|
|
2439
|
+
else:
|
|
2440
|
+
while all_execution_items:
|
|
2441
|
+
items = all_execution_items.pop(0)
|
|
2442
|
+
_parallel_execute(
|
|
2443
|
+
items,
|
|
2444
|
+
pabot_args["processes"],
|
|
2445
|
+
datasources,
|
|
2446
|
+
outs_dir,
|
|
2447
|
+
opts_for_run,
|
|
2448
|
+
pabot_args,
|
|
2449
|
+
)
|
|
2155
2450
|
if pabot_args["no-rebot"]:
|
|
2156
2451
|
_write((
|
|
2157
2452
|
"All tests were executed, but the --no-rebot argument was given, "
|
|
2158
2453
|
"so the results were not compiled, and no summary was generated. "
|
|
2159
2454
|
f"All results have been saved in the {outs_dir} folder."
|
|
2160
|
-
))
|
|
2161
|
-
_write("===================================================")
|
|
2162
|
-
return
|
|
2455
|
+
), level="info")
|
|
2456
|
+
_write("===================================================", level="info")
|
|
2457
|
+
return 253
|
|
2163
2458
|
result_code = _report_results(
|
|
2164
2459
|
outs_dir,
|
|
2165
2460
|
pabot_args,
|
|
@@ -2167,29 +2462,119 @@ def main_program(args):
|
|
|
2167
2462
|
start_time_string,
|
|
2168
2463
|
_get_suite_root_name(suite_groups),
|
|
2169
2464
|
)
|
|
2465
|
+
# If CTRL+C was pressed during execution, raise KeyboardInterrupt now.
|
|
2466
|
+
# This can happen without previous errors if test are for example almost ready.
|
|
2467
|
+
if CTRL_C_PRESSED:
|
|
2468
|
+
raise KeyboardInterrupt()
|
|
2170
2469
|
return result_code if not _ABNORMAL_EXIT_HAPPENED else 252
|
|
2171
2470
|
except Information as i:
|
|
2172
2471
|
version_print = __doc__.replace("\nPLACEHOLDER_README.MD\n", "")
|
|
2173
2472
|
print(version_print.replace("[PABOT_VERSION]", PABOT_VERSION))
|
|
2174
|
-
|
|
2473
|
+
if _PABOTWRITER:
|
|
2474
|
+
_write(i.message, level="info")
|
|
2475
|
+
else:
|
|
2476
|
+
print(i.message)
|
|
2477
|
+
return 251
|
|
2175
2478
|
except DataError as err:
|
|
2176
|
-
|
|
2479
|
+
if _PABOTWRITER:
|
|
2480
|
+
_write(err.message, Color.RED, level="error")
|
|
2481
|
+
else:
|
|
2482
|
+
print(err.message)
|
|
2177
2483
|
return 252
|
|
2178
|
-
except Exception:
|
|
2179
|
-
|
|
2180
|
-
|
|
2181
|
-
|
|
2182
|
-
|
|
2183
|
-
|
|
2184
|
-
|
|
2185
|
-
|
|
2186
|
-
|
|
2187
|
-
|
|
2484
|
+
except (Exception, KeyboardInterrupt):
|
|
2485
|
+
if not CTRL_C_PRESSED:
|
|
2486
|
+
if _PABOTWRITER:
|
|
2487
|
+
_write("[ ERROR ] EXCEPTION RAISED DURING PABOT EXECUTION", Color.RED, level="error")
|
|
2488
|
+
_write(
|
|
2489
|
+
"[ ERROR ] PLEASE CONSIDER REPORTING THIS ISSUE TO https://github.com/mkorpela/pabot/issues",
|
|
2490
|
+
Color.RED, level="error"
|
|
2491
|
+
)
|
|
2492
|
+
_write("Pabot: %s" % PABOT_VERSION, level="info")
|
|
2493
|
+
_write("Python: %s" % sys.version, level="info")
|
|
2494
|
+
_write("Robot Framework: %s" % ROBOT_VERSION, level="info")
|
|
2495
|
+
else:
|
|
2496
|
+
print("[ ERROR ] EXCEPTION RAISED DURING PABOT EXECUTION")
|
|
2497
|
+
print("[ ERROR ] PLEASE CONSIDER REPORTING THIS ISSUE TO https://github.com/mkorpela/pabot/issues")
|
|
2498
|
+
print("Pabot: %s" % PABOT_VERSION)
|
|
2499
|
+
print("Python: %s" % sys.version)
|
|
2500
|
+
print("Robot Framework: %s" % ROBOT_VERSION)
|
|
2501
|
+
import traceback
|
|
2502
|
+
traceback.print_exc()
|
|
2503
|
+
return 255
|
|
2504
|
+
else:
|
|
2505
|
+
if _PABOTWRITER:
|
|
2506
|
+
_write("[ ERROR ] Execution stopped by user (Ctrl+C)", Color.RED, level="error")
|
|
2507
|
+
else:
|
|
2508
|
+
print("[ ERROR ] Execution stopped by user (Ctrl+C)")
|
|
2509
|
+
return 253
|
|
2188
2510
|
finally:
|
|
2189
|
-
if
|
|
2190
|
-
|
|
2191
|
-
|
|
2192
|
-
|
|
2511
|
+
if _PABOTWRITER:
|
|
2512
|
+
_write("Finalizing Pabot execution...", level="debug")
|
|
2513
|
+
else:
|
|
2514
|
+
print("Finalizing Pabot execution...")
|
|
2515
|
+
|
|
2516
|
+
# Restore original signal handler
|
|
2517
|
+
try:
|
|
2518
|
+
signal.signal(signal.SIGINT, original_signal_handler)
|
|
2519
|
+
except Exception as e:
|
|
2520
|
+
if _PABOTWRITER:
|
|
2521
|
+
_write(f"[ WARNING ] Could not restore signal handler: {e}", Color.YELLOW, level="warning")
|
|
2522
|
+
else:
|
|
2523
|
+
print(f"[ WARNING ] Could not restore signal handler: {e}")
|
|
2524
|
+
|
|
2525
|
+
# First: Terminate all test subprocesses gracefully
|
|
2526
|
+
# This must happen BEFORE stopping PabotLib so test processes
|
|
2527
|
+
# can cleanly disconnect from the remote library
|
|
2528
|
+
try:
|
|
2529
|
+
if _PROCESS_MANAGER:
|
|
2530
|
+
_PROCESS_MANAGER.terminate_all()
|
|
2531
|
+
except Exception as e:
|
|
2532
|
+
if _PABOTWRITER:
|
|
2533
|
+
_write(f"[ WARNING ] Could not terminate test subprocesses: {e}", Color.YELLOW, level="warning")
|
|
2534
|
+
else:
|
|
2535
|
+
print(f"[ WARNING ] Could not terminate test subprocesses: {e}")
|
|
2536
|
+
|
|
2537
|
+
# Then: Stop PabotLib after all test processes are gone
|
|
2538
|
+
# This ensures clean shutdown with no orphaned remote connections
|
|
2539
|
+
try:
|
|
2540
|
+
if _PABOTLIBPROCESS:
|
|
2541
|
+
_stop_remote_library(_PABOTLIBPROCESS)
|
|
2542
|
+
except Exception as e:
|
|
2543
|
+
if _PABOTWRITER:
|
|
2544
|
+
_write(f"[ WARNING ] Failed to stop remote library cleanly: {e}", Color.YELLOW, level="warning")
|
|
2545
|
+
else:
|
|
2546
|
+
print(f"[ WARNING ] Failed to stop remote library cleanly: {e}")
|
|
2547
|
+
|
|
2548
|
+
# Print elapsed time
|
|
2549
|
+
try:
|
|
2550
|
+
_print_elapsed(start_time, time.time())
|
|
2551
|
+
except Exception as e:
|
|
2552
|
+
if _PABOTWRITER:
|
|
2553
|
+
_write(f"[ WARNING ] Failed to print elapsed time: {e}", Color.YELLOW, level="warning")
|
|
2554
|
+
else:
|
|
2555
|
+
print(f"[ WARNING ] Failed to print elapsed time: {e}")
|
|
2556
|
+
|
|
2557
|
+
# Flush and stop writer
|
|
2558
|
+
try:
|
|
2559
|
+
if _PABOTWRITER:
|
|
2560
|
+
_PABOTWRITER.flush()
|
|
2561
|
+
_PABOTWRITER.write("Logs flushed successfully.", level="debug")
|
|
2562
|
+
else:
|
|
2563
|
+
writer = get_writer()
|
|
2564
|
+
if writer:
|
|
2565
|
+
writer.flush()
|
|
2566
|
+
except Exception as e:
|
|
2567
|
+
print(f"[ WARNING ] Could not flush writer: {e}")
|
|
2568
|
+
|
|
2569
|
+
try:
|
|
2570
|
+
if _PABOTWRITER:
|
|
2571
|
+
_PABOTWRITER.stop()
|
|
2572
|
+
else:
|
|
2573
|
+
writer = get_writer()
|
|
2574
|
+
if writer:
|
|
2575
|
+
writer.stop()
|
|
2576
|
+
except Exception as e:
|
|
2577
|
+
print(f"[ WARNING ] Could not stop writer: {e}")
|
|
2193
2578
|
|
|
2194
2579
|
|
|
2195
2580
|
def _parse_ordering(filename): # type: (str) -> List[ExecutionItem]
|
|
@@ -2225,19 +2610,19 @@ def _check_ordering(ordering_file, suite_names): # type: (List[ExecutionItem],
|
|
|
2225
2610
|
duplicates.append(f"{item.type.title()} item: '{item.name}'")
|
|
2226
2611
|
suite_and_test_names.append(item.name)
|
|
2227
2612
|
if skipped_runnable_items:
|
|
2228
|
-
_write("Note: The ordering file contains test or suite items that are not included in the current test run. The following items will be ignored/skipped:")
|
|
2613
|
+
_write("Note: The ordering file contains test or suite items that are not included in the current test run. The following items will be ignored/skipped:", level="info")
|
|
2229
2614
|
for item in skipped_runnable_items:
|
|
2230
|
-
_write(f" - {item}")
|
|
2615
|
+
_write(f" - {item}", level="info")
|
|
2231
2616
|
if duplicates:
|
|
2232
|
-
_write("Note: The ordering file contains duplicate suite or test items. Only the first occurrence is taken into account. These are duplicates:")
|
|
2617
|
+
_write("Note: The ordering file contains duplicate suite or test items. Only the first occurrence is taken into account. These are duplicates:", level="info")
|
|
2233
2618
|
for item in duplicates:
|
|
2234
|
-
_write(f" - {item}")
|
|
2619
|
+
_write(f" - {item}", level="info")
|
|
2235
2620
|
|
|
2236
2621
|
|
|
2237
2622
|
def _group_suites(outs_dir, datasources, options, pabot_args):
|
|
2238
2623
|
suite_names = solve_suite_names(outs_dir, datasources, options, pabot_args)
|
|
2239
2624
|
_verify_depends(suite_names)
|
|
2240
|
-
ordering_arg = _parse_ordering(pabot_args.get("ordering")) if (pabot_args.get("ordering")) is not None else None
|
|
2625
|
+
ordering_arg = _parse_ordering(pabot_args.get("ordering").get("file")) if (pabot_args.get("ordering")) is not None else None
|
|
2241
2626
|
if ordering_arg:
|
|
2242
2627
|
_verify_depends(ordering_arg)
|
|
2243
2628
|
if options.get("name"):
|