robotframework-pabot 5.2.0b1__py3-none-any.whl → 5.2.0rc1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
pabot/pabot.py CHANGED
@@ -84,7 +84,7 @@ from .execution_items import (
84
84
  create_dependency_tree,
85
85
  )
86
86
  from .result_merger import merge
87
- from .writer import get_writer
87
+ from .writer import get_writer, get_stdout_writer, get_stderr_writer, ThreadSafeWriter, MessageWriter
88
88
 
89
89
  try:
90
90
  import queue # type: ignore
@@ -105,18 +105,24 @@ except ImportError:
105
105
  from typing import Any, Dict, List, Optional, Tuple, Union
106
106
 
107
107
  CTRL_C_PRESSED = False
108
- #MESSAGE_QUEUE = queue.Queue()
109
- EXECUTION_POOL_IDS = [] # type: List[int]
110
- EXECUTION_POOL_ID_LOCK = threading.Lock()
111
- POPEN_LOCK = threading.Lock()
112
108
  _PABOTLIBURI = "127.0.0.1:8270"
113
109
  _PABOTLIBPROCESS = None # type: Optional[subprocess.Popen]
110
+ _PABOTWRITER = None # type: Optional[MessageWriter]
114
111
  _NUMBER_OF_ITEMS_TO_BE_EXECUTED = 0
115
112
  _ABNORMAL_EXIT_HAPPENED = False
113
+ _PABOTCONSOLE = "verbose" # type: str
116
114
 
117
115
  _COMPLETED_LOCK = threading.Lock()
118
116
  _NOT_COMPLETED_INDEXES = [] # type: List[int]
119
117
 
118
+ # Thread-local storage for tracking executor number assigned to each thread
119
+ _EXECUTOR_THREAD_LOCAL = threading.local()
120
+ # Next executor number to assign (incremented each time a task is submitted)
121
+ _EXECUTOR_COUNTER = 0
122
+ _EXECUTOR_COUNTER_LOCK = threading.Lock()
123
+ # Maximum number of executors (workers in the thread pool)
124
+ _MAX_EXECUTORS = 1
125
+
120
126
  _ROBOT_EXTENSIONS = [
121
127
  ".html",
122
128
  ".htm",
@@ -225,6 +231,32 @@ class Color:
225
231
  YELLOW = "\033[93m"
226
232
 
227
233
 
234
+ def _get_next_executor_num():
235
+ """Get the next executor number in round-robin fashion."""
236
+ global _EXECUTOR_COUNTER, _MAX_EXECUTORS
237
+ with _EXECUTOR_COUNTER_LOCK:
238
+ executor_num = _EXECUTOR_COUNTER % _MAX_EXECUTORS
239
+ _EXECUTOR_COUNTER += 1
240
+ return executor_num
241
+
242
+
243
+ def _set_executor_num(executor_num):
244
+ """Set the executor number for the current thread."""
245
+ _EXECUTOR_THREAD_LOCAL.executor_num = executor_num
246
+
247
+
248
+ def _get_executor_num():
249
+ """Get the executor number for the current thread."""
250
+ return getattr(_EXECUTOR_THREAD_LOCAL, 'executor_num', 0)
251
+
252
+
253
+ def _execute_item_with_executor_tracking(item):
254
+ """Wrapper to track executor number and call execute_and_wait_with."""
255
+ executor_num = _get_next_executor_num()
256
+ _set_executor_num(executor_num)
257
+ return execute_and_wait_with(item)
258
+
259
+
228
260
  def execute_and_wait_with(item):
229
261
  # type: ('QueueItem') -> int
230
262
  global CTRL_C_PRESSED, _NUMBER_OF_ITEMS_TO_BE_EXECUTED
@@ -253,7 +285,7 @@ def execute_and_wait_with(item):
253
285
  outs_dir,
254
286
  name,
255
287
  item.verbose,
256
- _make_id(),
288
+ _get_executor_num(),
257
289
  caller_id,
258
290
  item.index,
259
291
  )
@@ -264,7 +296,7 @@ def execute_and_wait_with(item):
264
296
  outs_dir,
265
297
  name,
266
298
  item.verbose,
267
- _make_id(),
299
+ _get_executor_num(),
268
300
  caller_id,
269
301
  item.index,
270
302
  item.execution_item.type != "test",
@@ -272,10 +304,10 @@ def execute_and_wait_with(item):
272
304
  sleep_before_start=item.sleep_before_start
273
305
  )
274
306
  outputxml_preprocessing(
275
- item.options, outs_dir, name, item.verbose, _make_id(), caller_id, item.index
307
+ item.options, outs_dir, name, item.verbose, _get_executor_num(), caller_id, item.index
276
308
  )
277
309
  except:
278
- _write(traceback.format_exc())
310
+ _write(traceback.format_exc(), level="error")
279
311
  return rc
280
312
 
281
313
 
@@ -313,7 +345,7 @@ def _hived_execute(
313
345
  try:
314
346
  make_order(hive, " ".join(cmd), outs_dir)
315
347
  except:
316
- _write(traceback.format_exc())
348
+ _write(traceback.format_exc(), level="error")
317
349
  if plib:
318
350
  _increase_completed(plib, my_index)
319
351
 
@@ -353,7 +385,7 @@ def _try_execute_and_wait(
353
385
  sleep_before_start
354
386
  )
355
387
  except:
356
- _write(traceback.format_exc())
388
+ _write(traceback.format_exc(), level="error")
357
389
  if plib:
358
390
  _increase_completed(plib, my_index)
359
391
  is_ignored = _is_ignored(plib, caller_id)
@@ -396,6 +428,7 @@ def _result_to_stdout(
396
428
  pool_id,
397
429
  my_index,
398
430
  _execution_ignored_message(item_name, stdout, stderr, elapsed, verbose),
431
+ level="info_ignored",
399
432
  )
400
433
  elif rc != 0:
401
434
  _write_with_id(
@@ -406,6 +439,7 @@ def _result_to_stdout(
406
439
  item_name, stdout, stderr, rc, verbose or show_stdout_on_failure
407
440
  ),
408
441
  Color.RED,
442
+ level="info_failed",
409
443
  )
410
444
  else:
411
445
  _write_with_id(
@@ -414,6 +448,7 @@ def _result_to_stdout(
414
448
  my_index,
415
449
  _execution_passed_message(item_name, stdout, stderr, elapsed, verbose),
416
450
  Color.GREEN,
451
+ level="info_passed",
417
452
  )
418
453
 
419
454
 
@@ -489,25 +524,16 @@ def outputxml_preprocessing(options, outs_dir, item_name, verbose, pool_id, call
489
524
  print(sys.exc_info())
490
525
 
491
526
 
492
- def _write_with_id(process, pool_id, item_index, message, color=None, timestamp=None):
527
+ def _write_with_id(process, pool_id, item_index, message, color=None, timestamp=None, level="debug"):
493
528
  timestamp = timestamp or datetime.datetime.now()
494
529
  _write(
495
530
  "%s [PID:%s] [%s] [ID:%s] %s"
496
531
  % (timestamp, process.pid, pool_id, item_index, message),
497
532
  color,
533
+ level=level,
498
534
  )
499
535
 
500
536
 
501
- def _make_id(): # type: () -> int
502
- global EXECUTION_POOL_IDS, EXECUTION_POOL_ID_LOCK
503
- thread_id = threading.current_thread().ident
504
- assert thread_id is not None
505
- with EXECUTION_POOL_ID_LOCK:
506
- if thread_id not in EXECUTION_POOL_IDS:
507
- EXECUTION_POOL_IDS += [thread_id]
508
- return EXECUTION_POOL_IDS.index(thread_id)
509
-
510
-
511
537
  def _increase_completed(plib, my_index):
512
538
  # type: (Remote, int) -> None
513
539
  global _COMPLETED_LOCK, _NOT_COMPLETED_INDEXES
@@ -679,7 +705,7 @@ def _options_for_executor(
679
705
  # Prevent multiple appending of PABOTLIBURI variable setting
680
706
  if pabotLibURIVar not in options["variable"]:
681
707
  options["variable"].append(pabotLibURIVar)
682
- pabotExecutionPoolId = "PABOTEXECUTIONPOOLID:%d" % _make_id()
708
+ pabotExecutionPoolId = "PABOTEXECUTIONPOOLID:%d" % _get_executor_num()
683
709
  if pabotExecutionPoolId not in options["variable"]:
684
710
  options["variable"].append(pabotExecutionPoolId)
685
711
  pabotIsLast = "PABOTISLASTEXECUTIONINPOOL:%s" % ("1" if is_last else "0")
@@ -702,7 +728,7 @@ def _options_for_executor(
702
728
  del options["include"]
703
729
  if skip:
704
730
  this_dir = os.path.dirname(os.path.abspath(__file__))
705
- listener_path = os.path.join(this_dir, "skip_listener.py")
731
+ listener_path = os.path.join(this_dir, "listener", "skip_listener.py")
706
732
  options["dryrun"] = True
707
733
  options["listener"].append(listener_path)
708
734
  return _set_terminal_coloring_options(options)
@@ -1206,7 +1232,7 @@ def store_suite_names(hashes, suite_names):
1206
1232
  _write(
1207
1233
  "[ "
1208
1234
  + _wrap_with(Color.YELLOW, "WARNING")
1209
- + " ]: storing .pabotsuitenames failed"
1235
+ + " ]: storing .pabotsuitenames failed", level="warning",
1210
1236
  )
1211
1237
 
1212
1238
 
@@ -1271,13 +1297,13 @@ def generate_suite_names_with_builder(outs_dir, datasources, options):
1271
1297
  if stdout_value:
1272
1298
  _write(
1273
1299
  "[STDOUT] from suite search:\n" + stdout_value + "[STDOUT] end",
1274
- Color.YELLOW,
1300
+ Color.YELLOW, level="warning",
1275
1301
  )
1276
1302
  stderr_value = opts["stderr"].getvalue()
1277
1303
  if stderr_value:
1278
1304
  _write(
1279
1305
  "[STDERR] from suite search:\n" + stderr_value + "[STDERR] end",
1280
- Color.RED,
1306
+ Color.RED, level="error",
1281
1307
  )
1282
1308
  return list(sorted(set(suite_names)))
1283
1309
 
@@ -1387,9 +1413,11 @@ def _now():
1387
1413
  def _print_elapsed(start, end):
1388
1414
  _write(
1389
1415
  "Total testing: "
1390
- + _time_string(sum(_ALL_ELAPSED))
1391
- + "\nElapsed time: "
1392
- + _time_string(end - start)
1416
+ + _time_string(sum(_ALL_ELAPSED)), level="info"
1417
+ )
1418
+ _write(
1419
+ "Elapsed time: "
1420
+ + _time_string(end - start), level="info"
1393
1421
  )
1394
1422
 
1395
1423
 
@@ -1416,6 +1444,13 @@ def _time_string(elapsed):
1416
1444
  def keyboard_interrupt(*args):
1417
1445
  global CTRL_C_PRESSED
1418
1446
  CTRL_C_PRESSED = True
1447
+ # Notify ProcessManager to interrupt running processes
1448
+ if _PROCESS_MANAGER:
1449
+ _PROCESS_MANAGER.set_interrupted()
1450
+ if _PABOTWRITER:
1451
+ _write("[ INTERRUPT ] Ctrl+C pressed - initiating graceful shutdown...", Color.YELLOW, level="warning")
1452
+ else:
1453
+ print("[ INTERRUPT ] Ctrl+C pressed - initiating graceful shutdown...")
1419
1454
 
1420
1455
 
1421
1456
  def _get_depends(item):
@@ -1423,32 +1458,90 @@ def _get_depends(item):
1423
1458
 
1424
1459
 
1425
1460
  def _dependencies_satisfied(item, completed):
1426
- return all(dep in completed for dep in _get_depends(item))
1461
+ """
1462
+ Check if all dependencies for an item are satisfied (completed).
1463
+ Uses unique names that include argfile_index when applicable.
1464
+ """
1465
+ for dep in _get_depends(item):
1466
+ # Build unique name for dependency with same argfile_index as the item
1467
+ if hasattr(item, 'argfile_index') and item.argfile_index:
1468
+ # Item has an argfile index, so check for dependency with same argfile index
1469
+ dep_unique_name = f"{item.argfile_index}:{dep}"
1470
+ if dep_unique_name not in completed:
1471
+ return False
1472
+ else:
1473
+ # No argfile index (single argumentfile case)
1474
+ if dep not in completed:
1475
+ return False
1476
+
1477
+ return True
1427
1478
 
1428
1479
 
1429
1480
  def _collect_transitive_dependents(failed_name, pending_items):
1430
1481
  """
1431
1482
  Returns all pending items that (directly or indirectly) depend on failed_name.
1483
+ Handles both regular names and unique names (with argfile_index).
1484
+
1485
+ When failed_name is "1:Suite", it means Suite failed in argumentfile 1.
1486
+ We should only skip items in argumentfile 1 that depend on Suite,
1487
+ not items in other argumentfiles.
1432
1488
  """
1433
1489
  to_skip = set()
1434
1490
  queue = [failed_name]
1435
1491
 
1436
- # Build dependency map once
1492
+ # Extract argfile_index from failed_name if it has one
1493
+ if ":" in failed_name:
1494
+ argfile_index, base_name = failed_name.split(":", 1)
1495
+ else:
1496
+ argfile_index = ""
1497
+ base_name = failed_name
1498
+
1499
+ # Build dependency map: item unique name -> set of dependency base names
1437
1500
  depends_map = {
1438
- item.execution_item.name: set(_get_depends(item))
1501
+ _get_unique_execution_name(item): set(_get_depends(item))
1439
1502
  for item in pending_items
1440
1503
  }
1441
1504
 
1442
1505
  while queue:
1443
1506
  current = queue.pop(0)
1507
+
1508
+ # Extract base name from current (e.g., "1:Suite" -> "Suite")
1509
+ if ":" in current:
1510
+ current_argfile, current_base = current.split(":", 1)
1511
+ else:
1512
+ current_argfile = ""
1513
+ current_base = current
1514
+
1444
1515
  for item_name, deps in depends_map.items():
1445
- if current in deps and item_name not in to_skip:
1516
+ # Only skip items from the same argumentfile
1517
+ # Check if item_name corresponds to the same argumentfile
1518
+ if ":" in item_name:
1519
+ item_argfile, _ = item_name.split(":", 1)
1520
+ else:
1521
+ item_argfile = ""
1522
+
1523
+ # Only process if same argumentfile
1524
+ if item_argfile != argfile_index:
1525
+ continue
1526
+
1527
+ # Check if this item depends on the current failed item
1528
+ if current_base in deps and item_name not in to_skip:
1446
1529
  to_skip.add(item_name)
1447
1530
  queue.append(item_name)
1448
1531
 
1449
1532
  return to_skip
1450
1533
 
1451
1534
 
1535
+ def _get_unique_execution_name(item):
1536
+ """
1537
+ Create a unique identifier for an execution item that includes argfile index.
1538
+ This ensures that the same test run with different argumentfiles are treated as distinct items.
1539
+ """
1540
+ if item.argfile_index:
1541
+ return f"{item.argfile_index}:{item.execution_item.name}"
1542
+ return item.execution_item.name
1543
+
1544
+
1452
1545
  def _parallel_execute_dynamic(
1453
1546
  items,
1454
1547
  processes,
@@ -1457,9 +1550,13 @@ def _parallel_execute_dynamic(
1457
1550
  opts_for_run,
1458
1551
  pabot_args,
1459
1552
  ):
1460
- original_signal_handler = signal.signal(signal.SIGINT, keyboard_interrupt)
1553
+ # Signal handler is already set in main_program, no need to set it again
1554
+ # Just use the thread pool without managing signals
1555
+ global _MAX_EXECUTORS, _EXECUTOR_COUNTER
1461
1556
 
1462
1557
  max_processes = processes or len(items)
1558
+ _MAX_EXECUTORS = max_processes
1559
+ _EXECUTOR_COUNTER = 0 # Reset executor counter for each parallel execution batch
1463
1560
  pool = ThreadPool(max_processes)
1464
1561
 
1465
1562
  pending = set(items)
@@ -1475,24 +1572,28 @@ def _parallel_execute_dynamic(
1475
1572
 
1476
1573
  with lock:
1477
1574
  running.pop(it, None)
1478
- completed.add(it.execution_item.name)
1575
+ unique_name = _get_unique_execution_name(it)
1576
+ completed.add(unique_name)
1479
1577
 
1480
1578
  if rc != 0:
1481
- failed.add(it.execution_item.name)
1579
+ failed.add(unique_name)
1482
1580
 
1483
1581
  if failure_policy == "skip":
1484
1582
  to_skip_names = _collect_transitive_dependents(
1485
- it.execution_item.name,
1583
+ unique_name,
1486
1584
  pending,
1487
1585
  )
1488
1586
 
1489
1587
  for other in list(pending):
1490
- if other.execution_item.name in to_skip_names:
1491
- _write(
1492
- f"Skipping '{other.execution_item.name}' because dependency "
1493
- f"'{it.execution_item.name}' failed (transitive).",
1494
- Color.YELLOW,
1495
- )
1588
+ other_unique_name = _get_unique_execution_name(other)
1589
+ if other_unique_name in to_skip_names:
1590
+ # Only log skip once when first marking it as skipped
1591
+ if not other.skip:
1592
+ _write(
1593
+ f"Skipping '{other_unique_name}' because dependency "
1594
+ f"'{unique_name}' failed (transitive).",
1595
+ Color.YELLOW, level="debug"
1596
+ )
1496
1597
  other.skip = True
1497
1598
 
1498
1599
  try:
@@ -1508,7 +1609,7 @@ def _parallel_execute_dynamic(
1508
1609
  pending.remove(item)
1509
1610
 
1510
1611
  result = pool.apply_async(
1511
- execute_and_wait_with,
1612
+ _execute_item_with_executor_tracking,
1512
1613
  (item,),
1513
1614
  callback=lambda rc, it=item: on_complete(it, rc),
1514
1615
  )
@@ -1526,15 +1627,19 @@ def _parallel_execute_dynamic(
1526
1627
 
1527
1628
  finally:
1528
1629
  pool.close()
1529
- signal.signal(signal.SIGINT, original_signal_handler)
1630
+ # Signal handler was set in main_program and will be restored there
1530
1631
 
1531
1632
 
1532
1633
  def _parallel_execute(
1533
1634
  items, processes, datasources, outs_dir, opts_for_run, pabot_args
1534
1635
  ):
1535
- original_signal_handler = signal.signal(signal.SIGINT, keyboard_interrupt)
1536
- pool = ThreadPool(len(items) if processes is None else processes)
1537
- results = [pool.map_async(execute_and_wait_with, items, 1)]
1636
+ # Signal handler is already set in main_program, no need to set it again
1637
+ global _MAX_EXECUTORS, _EXECUTOR_COUNTER
1638
+ max_workers = len(items) if processes is None else processes
1639
+ _MAX_EXECUTORS = max_workers
1640
+ _EXECUTOR_COUNTER = 0 # Reset executor counter for each parallel execution batch
1641
+ pool = ThreadPool(max_workers)
1642
+ results = [pool.map_async(_execute_item_with_executor_tracking, items, 1)]
1538
1643
  delayed_result_append = 0
1539
1644
  new_items = []
1540
1645
  while not all(result.ready() for result in results) or delayed_result_append > 0:
@@ -1554,10 +1659,10 @@ def _parallel_execute(
1554
1659
  delayed_result_append = max(0, delayed_result_append - 1)
1555
1660
  if new_items and delayed_result_append == 0:
1556
1661
  _construct_last_levels([new_items])
1557
- results.append(pool.map_async(execute_and_wait_with, new_items, 1))
1662
+ results.append(pool.map_async(_execute_item_with_executor_tracking, new_items, 1))
1558
1663
  new_items = []
1559
1664
  pool.close()
1560
- signal.signal(signal.SIGINT, original_signal_handler)
1665
+ # Signal handler will be restored in main_program's finally block
1561
1666
 
1562
1667
 
1563
1668
  def _output_dir(options, cleanup=True):
@@ -1692,7 +1797,9 @@ def _report_results(outs_dir, pabot_args, options, start_time_string, tests_root
1692
1797
  if "output" not in options:
1693
1798
  options["output"] = "output.xml"
1694
1799
  _write_stats(stats)
1695
- exit_code = rebot(*outputs, **_options_for_rebot(options, start_time_string, _now()))
1800
+ stdout_writer = get_stdout_writer()
1801
+ stderr_writer = get_stderr_writer(original_stderr_name='Internal Rebot')
1802
+ exit_code = rebot(*outputs, **_options_for_rebot(options, start_time_string, _now()), stdout=stdout_writer, stderr=stderr_writer)
1696
1803
  else:
1697
1804
  exit_code = _report_results_for_one_run(
1698
1805
  outs_dir, pabot_args, options, start_time_string, tests_root_name, stats
@@ -1702,12 +1809,12 @@ def _report_results(outs_dir, pabot_args, options, start_time_string, tests_root
1702
1809
  _write(("[ " + _wrap_with(Color.YELLOW, 'WARNING') + " ] "
1703
1810
  "One or more subprocesses encountered an error and the "
1704
1811
  "internal .xml files could not be generated. Please check the "
1705
- "following stderr files to identify the cause:"))
1812
+ "following stderr files to identify the cause:"), level="warning")
1706
1813
  for missing in missing_outputs:
1707
- _write(repr(missing))
1814
+ _write(repr(missing), level="warning")
1708
1815
  _write((f"[ " + _wrap_with(Color.RED, 'ERROR') + " ] "
1709
1816
  "The output, log and report files produced by Pabot are "
1710
- "incomplete and do not contain all test cases."))
1817
+ "incomplete and do not contain all test cases."), level="error")
1711
1818
  return exit_code if not missing_outputs else 252
1712
1819
 
1713
1820
 
@@ -1717,18 +1824,18 @@ def _write_stats(stats):
1717
1824
  al = stats["all"]
1718
1825
  _write(
1719
1826
  "%d critical tests, %d passed, %d failed"
1720
- % (crit["total"], crit["passed"], crit["failed"])
1827
+ % (crit["total"], crit["passed"], crit["failed"]), level="info"
1721
1828
  )
1722
1829
  _write(
1723
1830
  "%d tests total, %d passed, %d failed"
1724
- % (al["total"], al["passed"], al["failed"])
1831
+ % (al["total"], al["passed"], al["failed"]), level="info"
1725
1832
  )
1726
1833
  else:
1727
1834
  _write(
1728
1835
  "%d tests, %d passed, %d failed, %d skipped."
1729
- % (stats["total"], stats["passed"], stats["failed"], stats["skipped"])
1836
+ % (stats["total"], stats["passed"], stats["failed"], stats["skipped"]), level="info"
1730
1837
  )
1731
- _write("===================================================")
1838
+ _write("===================================================", level="info")
1732
1839
 
1733
1840
 
1734
1841
  def add_timestamp_to_filename(file_path: str, timestamp: str) -> str:
@@ -1770,9 +1877,12 @@ def _report_results_for_one_run(
1770
1877
  "output"
1771
1878
  ] = output_path # REBOT will return error 252 if nothing is written
1772
1879
  else:
1773
- _write("Output: %s" % output_path)
1880
+ _write("Output: %s" % output_path, level="info")
1774
1881
  options["output"] = None # Do not write output again with rebot
1775
- return rebot(output_path, **_options_for_rebot(options, start_time_string, ts))
1882
+ stdout_writer = get_stdout_writer()
1883
+ stderr_writer = get_stderr_writer(original_stderr_name="Internal Rebot")
1884
+ exit_code = rebot(output_path, **_options_for_rebot(options, start_time_string, ts), stdout=stdout_writer, stderr=stderr_writer)
1885
+ return exit_code
1776
1886
 
1777
1887
 
1778
1888
  def _merge_one_run(
@@ -1794,7 +1904,7 @@ def _merge_one_run(
1794
1904
  files = natsorted(files)
1795
1905
 
1796
1906
  if not files:
1797
- _write('WARN: No output files in "%s"' % outs_dir, Color.YELLOW)
1907
+ _write('[ WARNING ]: No output files in "%s"' % outs_dir, Color.YELLOW, level="warning")
1798
1908
  return ""
1799
1909
 
1800
1910
  def invalid_xml_callback():
@@ -1843,9 +1953,9 @@ def _glob_escape(pathname):
1843
1953
  return drive + pathname
1844
1954
 
1845
1955
 
1846
- def _write(message, color=None):
1956
+ def _write(message, color=None, level="debug"):
1847
1957
  writer = get_writer()
1848
- writer.write(message, color=color)
1958
+ writer.write(message, color=color, level=level)
1849
1959
 
1850
1960
 
1851
1961
  def _wrap_with(color, message):
@@ -1891,7 +2001,7 @@ def _start_remote_library(pabot_args): # type: (dict) -> Optional[subprocess.Po
1891
2001
  _write(
1892
2002
  f"Warning: specified pabotlibport {port} is already in use. "
1893
2003
  "A free port will be assigned automatically.",
1894
- Color.YELLOW,
2004
+ Color.YELLOW, level="warning"
1895
2005
  )
1896
2006
  port = _get_free_port()
1897
2007
 
@@ -1905,7 +2015,7 @@ def _start_remote_library(pabot_args): # type: (dict) -> Optional[subprocess.Po
1905
2015
  _write(
1906
2016
  "Warning: specified resource file doesn't exist."
1907
2017
  " Some tests may fail or continue forever.",
1908
- Color.YELLOW,
2018
+ Color.YELLOW, level="warning"
1909
2019
  )
1910
2020
  resourcefile = ""
1911
2021
  cmd = [
@@ -1915,28 +2025,73 @@ def _start_remote_library(pabot_args): # type: (dict) -> Optional[subprocess.Po
1915
2025
  pabot_args["pabotlibhost"],
1916
2026
  str(port),
1917
2027
  ]
1918
- return subprocess.Popen(cmd)
2028
+ # Start PabotLib in isolation so it doesn't receive CTRL+C when the main process is interrupted.
2029
+ # This allows graceful shutdown in finally block.
2030
+ kwargs = {
2031
+ "stdout": subprocess.PIPE,
2032
+ "stderr": subprocess.STDOUT,
2033
+ "text": True,
2034
+ "bufsize": 1,
2035
+ "env": {**os.environ, "PYTHONUNBUFFERED": "1"},
2036
+ }
2037
+ if sys.platform.startswith('win'):
2038
+ # Windows: use CREATE_NEW_PROCESS_GROUP
2039
+ kwargs["creationflags"] = subprocess.CREATE_NEW_PROCESS_GROUP
2040
+ else:
2041
+ # Unix/Linux/macOS: use preexec_fn to create new session
2042
+ import os as os_module
2043
+ kwargs["preexec_fn"] = os_module.setsid
2044
+
2045
+ process = subprocess.Popen(cmd, **kwargs)
2046
+
2047
+ def _read_output(proc, writer):
2048
+ for line in proc.stdout:
2049
+ if line.strip(): # Skip empty lines
2050
+ writer.write(line.rstrip('\n') + '\n', level="info")
2051
+ writer.flush()
2052
+ proc.stdout.close()
2053
+
2054
+ pabotlib_writer = ThreadSafeWriter(get_writer())
2055
+ thread = threading.Thread(
2056
+ target=_read_output,
2057
+ args=(process, pabotlib_writer),
2058
+ daemon=False, # Non-daemon so output is captured before exit
2059
+ )
2060
+ thread.start()
2061
+
2062
+ return process
1919
2063
 
1920
2064
 
1921
2065
  def _stop_remote_library(process): # type: (subprocess.Popen) -> None
1922
- _write("Stopping PabotLib process")
2066
+ _write("Stopping PabotLib process", level="debug")
1923
2067
  try:
1924
2068
  remoteLib = Remote(_PABOTLIBURI)
1925
2069
  remoteLib.run_keyword("stop_remote_libraries", [], {})
1926
2070
  remoteLib.run_keyword("stop_remote_server", [], {})
1927
2071
  except RuntimeError:
1928
- _write("Could not connect to PabotLib - assuming stopped already")
1929
- return
2072
+ _write("Could not connect to PabotLib - assuming stopped already", level="info")
2073
+
2074
+ # Always wait for graceful shutdown, regardless of remote connection status
1930
2075
  i = 50
1931
2076
  while i > 0 and process.poll() is None:
1932
2077
  time.sleep(0.1)
1933
2078
  i -= 1
1934
- if i == 0:
2079
+
2080
+ # If still running after remote stop attempt, terminate it
2081
+ if process.poll() is None:
1935
2082
  _write(
1936
2083
  "Could not stop PabotLib Process in 5 seconds " "- calling terminate",
1937
- Color.YELLOW,
2084
+ Color.YELLOW, level="warning"
1938
2085
  )
1939
2086
  process.terminate()
2087
+ # Give it a moment to respond to SIGTERM
2088
+ time.sleep(0.5)
2089
+ if process.poll() is None:
2090
+ _write(
2091
+ "PabotLib Process did not respond to terminate - calling kill",
2092
+ Color.RED, level="error"
2093
+ )
2094
+ process.kill()
1940
2095
  else:
1941
2096
  _write("PabotLib process stopped")
1942
2097
 
@@ -2051,7 +2206,9 @@ def _create_execution_items_for_run(
2051
2206
  return all_items
2052
2207
 
2053
2208
 
2054
- def _create_items(datasources, opts_for_run, outs_dir, pabot_args, suite_group):
2209
+ def _create_items(datasources, opts_for_run, outs_dir, pabot_args, suite_group, argfile=None):
2210
+ # If argfile is provided, use only that one. Otherwise, loop through all argumentfiles.
2211
+ argumentfiles = [argfile] if argfile is not None else (pabot_args["argumentfiles"] or [("", None)])
2055
2212
  return [
2056
2213
  QueueItem(
2057
2214
  datasources,
@@ -2060,13 +2217,13 @@ def _create_items(datasources, opts_for_run, outs_dir, pabot_args, suite_group):
2060
2217
  suite,
2061
2218
  pabot_args["command"],
2062
2219
  pabot_args["verbose"],
2063
- argfile,
2220
+ af,
2064
2221
  pabot_args.get("hive"),
2065
2222
  pabot_args["processes"],
2066
2223
  pabot_args["processtimeout"],
2067
2224
  )
2068
2225
  for suite in suite_group
2069
- for argfile in pabot_args["argumentfiles"] or [("", None)]
2226
+ for af in argumentfiles
2070
2227
  ]
2071
2228
 
2072
2229
 
@@ -2093,31 +2250,20 @@ def _create_execution_items_for_dry_run(
2093
2250
  def _chunk_items(items, chunk_size):
2094
2251
  for i in range(0, len(items), chunk_size):
2095
2252
  chunked_items = items[i : i + chunk_size]
2096
- base_item = chunked_items[0]
2097
- if not base_item:
2253
+ if not chunked_items:
2098
2254
  continue
2255
+ # For TestItem execution items, yield each item separately
2256
+ # For Suite items, combine them into one item
2257
+ base_item = chunked_items[0]
2099
2258
  if isinstance(base_item.execution_item, TestItem):
2100
2259
  for item in chunked_items:
2101
- chunked_item = _queue_item(base_item, item.execution_item)
2102
- yield chunked_item
2260
+ yield item
2103
2261
  else:
2262
+ # For suites, create a combined execution item with all suite execution items
2104
2263
  execution_items = SuiteItems([item.execution_item for item in chunked_items])
2105
- chunked_item = _queue_item(base_item, execution_items)
2106
- yield chunked_item
2107
-
2108
-
2109
- def _queue_item(base_item, execution_items):
2110
- return QueueItem(
2111
- base_item.datasources,
2112
- base_item.outs_dir,
2113
- base_item.options,
2114
- execution_items,
2115
- base_item.command,
2116
- base_item.verbose,
2117
- (base_item.argfile_index, base_item.argfile),
2118
- processes=base_item.processes,
2119
- timeout=base_item.timeout,
2120
- )
2264
+ # Reuse the base item but update its execution_item to the combined one
2265
+ base_item.execution_item = execution_items
2266
+ yield base_item
2121
2267
 
2122
2268
 
2123
2269
  def _find_ending_level(name, group):
@@ -2184,10 +2330,10 @@ def _get_dynamically_created_execution_items(
2184
2330
  new_suites = plib.run_keyword("get_added_suites", [], {})
2185
2331
  except RuntimeError as err:
2186
2332
  _write(
2187
- "[WARN] PabotLib unreachable during post-run phase, "
2333
+ "[ WARNING ] PabotLib unreachable during post-run phase, "
2188
2334
  "assuming no dynamically added suites. "
2189
2335
  "Original error: %s",
2190
- err,
2336
+ err, level="warning"
2191
2337
  )
2192
2338
  new_suites = []
2193
2339
  if len(new_suites) == 0:
@@ -2220,7 +2366,7 @@ def main(args=None):
2220
2366
 
2221
2367
 
2222
2368
  def main_program(args):
2223
- global _PABOTLIBPROCESS
2369
+ global _PABOTLIBPROCESS, _PABOTCONSOLE, _PABOTWRITER
2224
2370
  outs_dir = None
2225
2371
  args = args or sys.argv[1:]
2226
2372
  if len(args) == 0:
@@ -2234,8 +2380,10 @@ def main_program(args):
2234
2380
  start_time = time.time()
2235
2381
  start_time_string = _now()
2236
2382
  # NOTE: timeout option
2383
+ original_signal_handler = signal.default_int_handler # Save default handler in case of early exit
2237
2384
  try:
2238
2385
  options, datasources, pabot_args, opts_for_run = parse_args(args)
2386
+ _PABOTCONSOLE = pabot_args.get("pabotconsole", "verbose")
2239
2387
  if pabot_args["help"]:
2240
2388
  help_print = __doc__.replace(
2241
2389
  "PLACEHOLDER_README.MD",
@@ -2250,11 +2398,14 @@ def main_program(args):
2250
2398
  outs_dir = _output_dir(options)
2251
2399
 
2252
2400
  # These ensure MessageWriter and ProcessManager are ready before any parallel execution.
2253
- writer = get_writer(log_dir=outs_dir)
2401
+ _PABOTWRITER = get_writer(log_dir=outs_dir, console_type=_PABOTCONSOLE)
2254
2402
  _ensure_process_manager()
2255
- _write(f"Initialized logging in {outs_dir}")
2403
+ _write(f"Initialized logging in {outs_dir}", level="info")
2256
2404
 
2257
2405
  _PABOTLIBPROCESS = _start_remote_library(pabot_args)
2406
+ # Set up signal handler to keep PabotLib alive during CTRL+C
2407
+ # This ensures graceful shutdown in the finally block
2408
+ original_signal_handler = signal.signal(signal.SIGINT, keyboard_interrupt)
2258
2409
  if _pabotlib_in_use():
2259
2410
  _initialize_queue_index()
2260
2411
 
@@ -2262,19 +2413,23 @@ def main_program(args):
2262
2413
  if pabot_args["verbose"]:
2263
2414
  _write("Suite names resolved in %s seconds" % str(time.time() - start_time))
2264
2415
  if not suite_groups or suite_groups == [[]]:
2265
- _write("No tests to execute")
2416
+ _write("No tests to execute", level="info")
2266
2417
  if not options.get("runemptysuite", False):
2267
2418
  return 252
2268
- execution_items = _create_execution_items(
2419
+
2420
+ # Create execution items for all argumentfiles at once
2421
+ all_execution_items = _create_execution_items(
2269
2422
  suite_groups, datasources, outs_dir, options, opts_for_run, pabot_args
2270
2423
  )
2424
+
2425
+ # Now execute all items from all argumentfiles in parallel
2271
2426
  if pabot_args.get("ordering", {}).get("mode") == "dynamic":
2272
2427
  # flatten stages
2273
- all_items = []
2274
- for stage in execution_items:
2275
- all_items.extend(stage)
2428
+ flattened_items = []
2429
+ for stage in all_execution_items:
2430
+ flattened_items.extend(stage)
2276
2431
  _parallel_execute_dynamic(
2277
- all_items,
2432
+ flattened_items,
2278
2433
  pabot_args["processes"],
2279
2434
  datasources,
2280
2435
  outs_dir,
@@ -2282,8 +2437,8 @@ def main_program(args):
2282
2437
  pabot_args,
2283
2438
  )
2284
2439
  else:
2285
- while execution_items:
2286
- items = execution_items.pop(0)
2440
+ while all_execution_items:
2441
+ items = all_execution_items.pop(0)
2287
2442
  _parallel_execute(
2288
2443
  items,
2289
2444
  pabot_args["processes"],
@@ -2297,8 +2452,8 @@ def main_program(args):
2297
2452
  "All tests were executed, but the --no-rebot argument was given, "
2298
2453
  "so the results were not compiled, and no summary was generated. "
2299
2454
  f"All results have been saved in the {outs_dir} folder."
2300
- ))
2301
- _write("===================================================")
2455
+ ), level="info")
2456
+ _write("===================================================", level="info")
2302
2457
  return 253
2303
2458
  result_code = _report_results(
2304
2459
  outs_dir,
@@ -2307,67 +2462,119 @@ def main_program(args):
2307
2462
  start_time_string,
2308
2463
  _get_suite_root_name(suite_groups),
2309
2464
  )
2465
+ # If CTRL+C was pressed during execution, raise KeyboardInterrupt now.
2466
+ # This can happen without previous errors if test are for example almost ready.
2467
+ if CTRL_C_PRESSED:
2468
+ raise KeyboardInterrupt()
2310
2469
  return result_code if not _ABNORMAL_EXIT_HAPPENED else 252
2311
2470
  except Information as i:
2312
2471
  version_print = __doc__.replace("\nPLACEHOLDER_README.MD\n", "")
2313
2472
  print(version_print.replace("[PABOT_VERSION]", PABOT_VERSION))
2314
- print(i.message)
2473
+ if _PABOTWRITER:
2474
+ _write(i.message, level="info")
2475
+ else:
2476
+ print(i.message)
2315
2477
  return 251
2316
2478
  except DataError as err:
2317
- print(err.message)
2479
+ if _PABOTWRITER:
2480
+ _write(err.message, Color.RED, level="error")
2481
+ else:
2482
+ print(err.message)
2318
2483
  return 252
2319
- except Exception:
2484
+ except (Exception, KeyboardInterrupt):
2320
2485
  if not CTRL_C_PRESSED:
2321
- _write("[ERROR] EXCEPTION RAISED DURING PABOT EXECUTION", Color.RED)
2322
- _write(
2323
- "[ERROR] PLEASE CONSIDER REPORTING THIS ISSUE TO https://github.com/mkorpela/pabot/issues",
2324
- Color.RED,
2325
- )
2326
- _write("Pabot: %s" % PABOT_VERSION)
2327
- _write("Python: %s" % sys.version)
2328
- _write("Robot Framework: %s" % ROBOT_VERSION)
2486
+ if _PABOTWRITER:
2487
+ _write("[ ERROR ] EXCEPTION RAISED DURING PABOT EXECUTION", Color.RED, level="error")
2488
+ _write(
2489
+ "[ ERROR ] PLEASE CONSIDER REPORTING THIS ISSUE TO https://github.com/mkorpela/pabot/issues",
2490
+ Color.RED, level="error"
2491
+ )
2492
+ _write("Pabot: %s" % PABOT_VERSION, level="info")
2493
+ _write("Python: %s" % sys.version, level="info")
2494
+ _write("Robot Framework: %s" % ROBOT_VERSION, level="info")
2495
+ else:
2496
+ print("[ ERROR ] EXCEPTION RAISED DURING PABOT EXECUTION")
2497
+ print("[ ERROR ] PLEASE CONSIDER REPORTING THIS ISSUE TO https://github.com/mkorpela/pabot/issues")
2498
+ print("Pabot: %s" % PABOT_VERSION)
2499
+ print("Python: %s" % sys.version)
2500
+ print("Robot Framework: %s" % ROBOT_VERSION)
2329
2501
  import traceback
2330
2502
  traceback.print_exc()
2331
- sys.exit(255)
2503
+ return 255
2332
2504
  else:
2333
- _write("[ERROR] Execution stopped by user (Ctrl+C)", Color.RED)
2334
- sys.exit(253)
2505
+ if _PABOTWRITER:
2506
+ _write("[ ERROR ] Execution stopped by user (Ctrl+C)", Color.RED, level="error")
2507
+ else:
2508
+ print("[ ERROR ] Execution stopped by user (Ctrl+C)")
2509
+ return 253
2335
2510
  finally:
2336
- # Ensure that writer exists
2337
- writer = None
2511
+ if _PABOTWRITER:
2512
+ _write("Finalizing Pabot execution...", level="debug")
2513
+ else:
2514
+ print("Finalizing Pabot execution...")
2515
+
2516
+ # Restore original signal handler
2338
2517
  try:
2339
- if outs_dir is not None:
2340
- writer = get_writer(log_dir=outs_dir)
2518
+ signal.signal(signal.SIGINT, original_signal_handler)
2341
2519
  except Exception as e:
2342
- print(f"[WARN] Could not initialize writer in finally: {e}")
2343
- # Try to stop remote library
2520
+ if _PABOTWRITER:
2521
+ _write(f"[ WARNING ] Could not restore signal handler: {e}", Color.YELLOW, level="warning")
2522
+ else:
2523
+ print(f"[ WARNING ] Could not restore signal handler: {e}")
2524
+
2525
+ # First: Terminate all test subprocesses gracefully
2526
+ # This must happen BEFORE stopping PabotLib so test processes
2527
+ # can cleanly disconnect from the remote library
2528
+ try:
2529
+ if _PROCESS_MANAGER:
2530
+ _PROCESS_MANAGER.terminate_all()
2531
+ except Exception as e:
2532
+ if _PABOTWRITER:
2533
+ _write(f"[ WARNING ] Could not terminate test subprocesses: {e}", Color.YELLOW, level="warning")
2534
+ else:
2535
+ print(f"[ WARNING ] Could not terminate test subprocesses: {e}")
2536
+
2537
+ # Then: Stop PabotLib after all test processes are gone
2538
+ # This ensures clean shutdown with no orphaned remote connections
2344
2539
  try:
2345
2540
  if _PABOTLIBPROCESS:
2346
2541
  _stop_remote_library(_PABOTLIBPROCESS)
2347
2542
  except Exception as e:
2348
- if writer:
2349
- writer.write(f"[WARN] Failed to stop remote library cleanly: {e}", Color.YELLOW)
2543
+ if _PABOTWRITER:
2544
+ _write(f"[ WARNING ] Failed to stop remote library cleanly: {e}", Color.YELLOW, level="warning")
2350
2545
  else:
2351
- print(f"[WARN] Failed to stop remote library cleanly: {e}")
2352
- # print elapsed time
2546
+ print(f"[ WARNING ] Failed to stop remote library cleanly: {e}")
2547
+
2548
+ # Print elapsed time
2353
2549
  try:
2354
2550
  _print_elapsed(start_time, time.time())
2355
2551
  except Exception as e:
2356
- if writer:
2357
- writer.write(f"[WARN] Failed to print elapsed time: {e}", Color.YELLOW)
2552
+ if _PABOTWRITER:
2553
+ _write(f"[ WARNING ] Failed to print elapsed time: {e}", Color.YELLOW, level="warning")
2358
2554
  else:
2359
- print(f"[WARN] Failed to print elapsed time: {e}")
2555
+ print(f"[ WARNING ] Failed to print elapsed time: {e}")
2556
+
2360
2557
  # Flush and stop writer
2361
- if writer:
2362
- try:
2363
- writer.flush()
2364
- writer.write("Logs flushed successfully.")
2365
- except Exception as e:
2366
- print(f"[WARN] Could not flush writer: {e}")
2367
- try:
2368
- writer.stop()
2369
- except Exception as e:
2370
- print(f"[WARN] Could not stop writer: {e}")
2558
+ try:
2559
+ if _PABOTWRITER:
2560
+ _PABOTWRITER.flush()
2561
+ _PABOTWRITER.write("Logs flushed successfully.", level="debug")
2562
+ else:
2563
+ writer = get_writer()
2564
+ if writer:
2565
+ writer.flush()
2566
+ except Exception as e:
2567
+ print(f"[ WARNING ] Could not flush writer: {e}")
2568
+
2569
+ try:
2570
+ if _PABOTWRITER:
2571
+ _PABOTWRITER.stop()
2572
+ else:
2573
+ writer = get_writer()
2574
+ if writer:
2575
+ writer.stop()
2576
+ except Exception as e:
2577
+ print(f"[ WARNING ] Could not stop writer: {e}")
2371
2578
 
2372
2579
 
2373
2580
  def _parse_ordering(filename): # type: (str) -> List[ExecutionItem]
@@ -2403,13 +2610,13 @@ def _check_ordering(ordering_file, suite_names): # type: (List[ExecutionItem],
2403
2610
  duplicates.append(f"{item.type.title()} item: '{item.name}'")
2404
2611
  suite_and_test_names.append(item.name)
2405
2612
  if skipped_runnable_items:
2406
- _write("Note: The ordering file contains test or suite items that are not included in the current test run. The following items will be ignored/skipped:")
2613
+ _write("Note: The ordering file contains test or suite items that are not included in the current test run. The following items will be ignored/skipped:", level="info")
2407
2614
  for item in skipped_runnable_items:
2408
- _write(f" - {item}")
2615
+ _write(f" - {item}", level="info")
2409
2616
  if duplicates:
2410
- _write("Note: The ordering file contains duplicate suite or test items. Only the first occurrence is taken into account. These are duplicates:")
2617
+ _write("Note: The ordering file contains duplicate suite or test items. Only the first occurrence is taken into account. These are duplicates:", level="info")
2411
2618
  for item in duplicates:
2412
- _write(f" - {item}")
2619
+ _write(f" - {item}", level="info")
2413
2620
 
2414
2621
 
2415
2622
  def _group_suites(outs_dir, datasources, options, pabot_args):