robotframework-pabot 4.3.2__py3-none-any.whl → 5.0.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
pabot/__init__.py CHANGED
@@ -7,4 +7,4 @@ try:
7
7
  except ImportError:
8
8
  pass
9
9
 
10
- __version__ = "4.3.2"
10
+ __version__ = "5.0.0"
pabot/execution_items.py CHANGED
@@ -1,5 +1,5 @@
1
1
  from functools import total_ordering
2
- from typing import Dict, List, Optional, Tuple, Union
2
+ from typing import Dict, List, Optional, Tuple, Union, Set
3
3
 
4
4
  from robot import __version__ as ROBOT_VERSION
5
5
  from robot.errors import DataError
@@ -8,36 +8,68 @@ from robot.utils import PY2, is_unicode
8
8
  import re
9
9
 
10
10
 
11
- def create_dependency_tree(items):
11
+ def create_dependency_tree(items):
12
12
  # type: (List[ExecutionItem]) -> List[List[ExecutionItem]]
13
- independent_tests = list(filter(lambda item: not item.depends, items))
14
- dependency_tree = [independent_tests]
15
- dependent_tests = list(filter(lambda item: item.depends, items))
16
- unknown_dependent_tests = dependent_tests
17
- while len(unknown_dependent_tests) > 0:
18
- run_in_this_stage, run_later = [], []
19
- for d in unknown_dependent_tests:
20
- stage_indexes = []
21
- for i, stage in enumerate(dependency_tree):
22
- for test in stage:
23
- if test.name in d.depends:
24
- stage_indexes.append(i)
25
- # All #DEPENDS test are already run:
26
- if len(stage_indexes) == len(d.depends):
27
- run_in_this_stage.append(d)
13
+ dependency_tree = [] # type: List[List[ExecutionItem]]
14
+ scheduled = set() # type: Set[str]
15
+ name_to_item = {item.name: item for item in items} # type: Dict[str, ExecutionItem]
16
+
17
+ while items:
18
+ stage = [] #type: List[ExecutionItem]
19
+ stage_names = set() # type: Set[str]
20
+
21
+ for item in items:
22
+ if all(dep in scheduled for dep in item.depends):
23
+ stage.append(item)
24
+ stage_names.add(item.name)
28
25
  else:
29
- run_later.append(d)
30
- unknown_dependent_tests = run_later
31
- if len(run_in_this_stage) == 0:
32
- text = "There are circular or unmet dependencies using #DEPENDS. Check this/these test(s): " + str(run_later)
33
- raise DataError(text)
34
- else:
35
- dependency_tree.append(run_in_this_stage)
36
- flattened_dependency_tree = sum(dependency_tree, [])
37
- if len(flattened_dependency_tree) != len(items):
38
- raise DataError(
39
- "Invalid test configuration: Circular or unmet dependencies detected between test suites. Please check your #DEPENDS definitions."
40
- )
26
+ break # Preserve input order
27
+
28
+ if not stage:
29
+ # Try to find any schedulable item even if it's out of order
30
+ for item in items:
31
+ if all(dep in scheduled for dep in item.depends):
32
+ stage = [item]
33
+ stage_names = {item.name}
34
+ break
35
+
36
+ if not stage:
37
+ # Prepare a detailed error message
38
+ unscheduled_items = [item.name for item in items]
39
+ unsatisfied_deps = {
40
+ item.name: [d for d in item.depends if d not in scheduled and d not in name_to_item]
41
+ for item in items
42
+ }
43
+ potential_cycles = {
44
+ item.name: [d for d in item.depends if d in unscheduled_items]
45
+ for item in items if item.depends
46
+ }
47
+
48
+ message = ["Invalid test configuration:"]
49
+
50
+ message_unsatisfied = []
51
+ for item, deps in unsatisfied_deps.items():
52
+ if deps:
53
+ message_unsatisfied.append(f" - {item} depends on missing: {', '.join(deps)}")
54
+ if message_unsatisfied:
55
+ message.append(" Unsatisfied dependencies:")
56
+ message.extend(message_unsatisfied)
57
+ message.append(" For these tests, check that there is not #WAIT between them and that they are not inside different groups { }")
58
+
59
+ message_cycles = []
60
+ for item, deps in potential_cycles.items():
61
+ if deps:
62
+ message_cycles.append(f" - {item} <-> {', '.join(deps)}")
63
+ if message_cycles:
64
+ message.append(" Possible circular dependencies:")
65
+ message.extend(message_cycles)
66
+
67
+ raise DataError("\n".join(message))
68
+
69
+ dependency_tree.append(stage)
70
+ scheduled.update(stage_names)
71
+ items = [item for item in items if item.name not in stage_names]
72
+
41
73
  return dependency_tree
42
74
 
43
75
 
@@ -47,6 +79,7 @@ class ExecutionItem(object):
47
79
  type = None # type: str
48
80
  name = None # type: str
49
81
  sleep = 0 # type: int
82
+ depends = [] # type: List[str] # Note that depends is used by RunnableItems.
50
83
 
51
84
  def top_name(self):
52
85
  # type: () -> str
@@ -156,7 +189,6 @@ class GroupItem(ExecutionItem):
156
189
  class RunnableItem(ExecutionItem):
157
190
  pass
158
191
 
159
- depends = None # type: List[str]
160
192
  depends_keyword = "#DEPENDS"
161
193
 
162
194
  def _split_dependencies(self, line_name, depends_indexes):
@@ -182,7 +214,7 @@ class RunnableItem(ExecutionItem):
182
214
  self.depends = (
183
215
  self._split_dependencies(line_name, depends_indexes)
184
216
  if len(depends_indexes) != 0
185
- else None
217
+ else []
186
218
  )
187
219
 
188
220
  def line(self):
@@ -243,6 +275,10 @@ class SuiteItem(RunnableItem):
243
275
  # TODO Make this happen
244
276
  return []
245
277
 
278
+ def modify_options_for_executor(self, options):
279
+ if not(options.get("runemptysuite") and options.get("suite")):
280
+ options[self.type] = self.name
281
+
246
282
 
247
283
  class TestItem(RunnableItem):
248
284
  type = "test"
pabot/pabot.py CHANGED
@@ -109,10 +109,6 @@ EXECUTION_POOL_ID_LOCK = threading.Lock()
109
109
  POPEN_LOCK = threading.Lock()
110
110
  _PABOTLIBURI = "127.0.0.1:8270"
111
111
  _PABOTLIBPROCESS = None # type: Optional[subprocess.Popen]
112
- _BOURNELIKE_SHELL_BAD_CHARS_WITHOUT_DQUOTE = (
113
- "!#$^&*?[(){}<>~;'`\\|= \t\n" # does not contain '"'
114
- )
115
- _BAD_CHARS_SET = set(_BOURNELIKE_SHELL_BAD_CHARS_WITHOUT_DQUOTE)
116
112
  _NUMBER_OF_ITEMS_TO_BE_EXECUTED = 0
117
113
  _ABNORMAL_EXIT_HAPPENED = False
118
114
 
@@ -213,16 +209,6 @@ class Color:
213
209
  YELLOW = "\033[93m"
214
210
 
215
211
 
216
- def _mapOptionalQuote(command_args):
217
- # type: (List[str]) -> List[str]
218
- if os.name == "posix":
219
- return [quote(arg) for arg in command_args]
220
- return [
221
- arg if set(arg).isdisjoint(_BAD_CHARS_SET) else '"%s"' % arg
222
- for arg in command_args
223
- ]
224
-
225
-
226
212
  def execute_and_wait_with(item):
227
213
  # type: ('QueueItem') -> None
228
214
  global CTRL_C_PRESSED, _NUMBER_OF_ITEMS_TO_BE_EXECUTED
@@ -240,13 +226,13 @@ def execute_and_wait_with(item):
240
226
  name = item.display_name
241
227
  outs_dir = os.path.join(item.outs_dir, item.argfile_index, str(item.index))
242
228
  os.makedirs(outs_dir)
243
- cmd = _create_command_for_execution(
229
+ run_cmd, run_options = _create_command_for_execution(
244
230
  caller_id, datasources, is_last, item, outs_dir
245
231
  )
246
232
  if item.hive:
247
233
  _hived_execute(
248
234
  item.hive,
249
- cmd,
235
+ run_cmd + run_options,
250
236
  outs_dir,
251
237
  name,
252
238
  item.verbose,
@@ -256,7 +242,8 @@ def execute_and_wait_with(item):
256
242
  )
257
243
  else:
258
244
  _try_execute_and_wait(
259
- cmd,
245
+ run_cmd,
246
+ run_options,
260
247
  outs_dir,
261
248
  name,
262
249
  item.verbose,
@@ -268,7 +255,7 @@ def execute_and_wait_with(item):
268
255
  sleep_before_start=item.sleep_before_start
269
256
  )
270
257
  outputxml_preprocessing(
271
- item.options, outs_dir, name, item.verbose, _make_id(), caller_id
258
+ item.options, outs_dir, name, item.verbose, _make_id(), caller_id, item.index
272
259
  )
273
260
  except:
274
261
  _write(traceback.format_exc())
@@ -278,9 +265,8 @@ def _create_command_for_execution(caller_id, datasources, is_last, item, outs_di
278
265
  options = item.options.copy()
279
266
  if item.command == ["robot"] and not options["listener"]:
280
267
  options["listener"] = ["RobotStackTracer"]
281
- cmd = (
282
- item.command
283
- + _options_for_custom_executor(
268
+ run_options = (
269
+ _options_for_custom_executor(
284
270
  options,
285
271
  outs_dir,
286
272
  item.execution_item,
@@ -291,12 +277,9 @@ def _create_command_for_execution(caller_id, datasources, is_last, item, outs_di
291
277
  item.last_level,
292
278
  item.processes,
293
279
  )
294
- # If the datasource ends with a backslash '\', it is deleted to ensure
295
- # correct handling of the escape character later on.
296
- + [os.path.normpath(s) for s in datasources]
280
+ + datasources
297
281
  )
298
- return _mapOptionalQuote(cmd)
299
-
282
+ return item.command, run_options
300
283
 
301
284
  def _pabotlib_in_use():
302
285
  return _PABOTLIBPROCESS or _PABOTLIBURI != "127.0.0.1:8270"
@@ -317,7 +300,8 @@ def _hived_execute(
317
300
 
318
301
 
319
302
  def _try_execute_and_wait(
320
- cmd,
303
+ run_cmd,
304
+ run_options,
321
305
  outs_dir,
322
306
  item_name,
323
307
  verbose,
@@ -328,16 +312,17 @@ def _try_execute_and_wait(
328
312
  process_timeout=None,
329
313
  sleep_before_start=0
330
314
  ):
331
- # type: (List[str], str, str, bool, int, str, int, bool, Optional[int], int) -> None
315
+ # type: (List[str], List[str], str, str, bool, int, str, int, bool, Optional[int], int) -> None
332
316
  plib = None
333
317
  is_ignored = False
334
318
  if _pabotlib_in_use():
335
319
  plib = Remote(_PABOTLIBURI)
336
320
  try:
337
- with open(os.path.join(outs_dir, cmd[0] + "_stdout.out"), "w") as stdout:
338
- with open(os.path.join(outs_dir, cmd[0] + "_stderr.out"), "w") as stderr:
321
+ with open(os.path.join(outs_dir, run_cmd[-1] + "_stdout.out"), "w") as stdout:
322
+ with open(os.path.join(outs_dir, run_cmd[-1] + "_stderr.out"), "w") as stderr:
339
323
  process, (rc, elapsed) = _run(
340
- cmd,
324
+ run_cmd,
325
+ run_options,
341
326
  stderr,
342
327
  stdout,
343
328
  item_name,
@@ -418,8 +403,8 @@ def _is_ignored(plib, caller_id): # type: (Remote, str) -> bool
418
403
 
419
404
  # optionally invoke rebot for output.xml preprocessing to get --RemoveKeywords
420
405
  # and --flattenkeywords applied => result: much smaller output.xml files + faster merging + avoid MemoryErrors
421
- def outputxml_preprocessing(options, outs_dir, item_name, verbose, pool_id, caller_id):
422
- # type: (Dict[str, Any], str, str, bool, int, str) -> None
406
+ def outputxml_preprocessing(options, outs_dir, item_name, verbose, pool_id, caller_id, item_id):
407
+ # type: (Dict[str, Any], str, str, bool, int, str, int) -> None
423
408
  try:
424
409
  remove_keywords = options["removekeywords"]
425
410
  flatten_keywords = options["flattenkeywords"]
@@ -432,11 +417,15 @@ def outputxml_preprocessing(options, outs_dir, item_name, verbose, pool_id, call
432
417
  remove_keywords_args += ["--removekeywords", k]
433
418
  for k in flatten_keywords:
434
419
  flatten_keywords_args += ["--flattenkeywords", k]
435
- outputxmlfile = os.path.join(outs_dir, "output.xml")
420
+ output_name = options.get("output", "output.xml")
421
+ outputxmlfile = os.path.join(outs_dir, output_name)
422
+ if not os.path.isfile(outputxmlfile):
423
+ raise DataError(f"Preprosessing cannot be done because file {outputxmlfile} not exists.")
436
424
  oldsize = os.path.getsize(outputxmlfile)
437
- cmd = (
425
+ process_empty = ["--processemptysuite"] if options.get("runemptysuite") else []
426
+ run_cmd = ["rebot"]
427
+ run_options = (
438
428
  [
439
- "rebot",
440
429
  "--log",
441
430
  "NONE",
442
431
  "--report",
@@ -447,18 +436,20 @@ def outputxml_preprocessing(options, outs_dir, item_name, verbose, pool_id, call
447
436
  "off",
448
437
  "--NoStatusRC",
449
438
  ]
439
+ + process_empty
450
440
  + remove_keywords_args
451
441
  + flatten_keywords_args
452
442
  + ["--output", outputxmlfile, outputxmlfile]
453
443
  )
454
- cmd = _mapOptionalQuote(cmd)
455
444
  _try_execute_and_wait(
456
- cmd,
445
+ run_cmd,
446
+ run_options,
457
447
  outs_dir,
458
- "preprocessing output.xml on " + item_name,
448
+ f"preprocessing {output_name} on " + item_name,
459
449
  verbose,
460
450
  pool_id,
461
451
  caller_id,
452
+ item_id,
462
453
  )
463
454
  newsize = os.path.getsize(outputxmlfile)
464
455
  perc = 100 * newsize / oldsize
@@ -519,8 +510,37 @@ def _increase_completed(plib, my_index):
519
510
  )
520
511
 
521
512
 
513
+ def _write_internal_argument_file(cmd_args, filename):
514
+ # type: (List[str], str) -> None
515
+ """
516
+ Writes a list of command-line arguments to a file.
517
+ If an argument starts with '-' or '--', its value (the next item) is written on the same line.
518
+
519
+ Example:
520
+ ['--name', 'value', '--flag', '--other', 'x']
521
+ becomes:
522
+ --name value
523
+ --flag
524
+ --other x
525
+
526
+ :param cmd_args: List of argument strings to write
527
+ :param filename: Target filename
528
+ """
529
+ with open(filename, "w", encoding="utf-8") as f:
530
+ i = 0
531
+ while i < len(cmd_args):
532
+ current = cmd_args[i]
533
+ if current.startswith("-") and i + 1 < len(cmd_args) and not cmd_args[i + 1].startswith("-"):
534
+ f.write(f"{current} {cmd_args[i + 1]}\n")
535
+ i += 2
536
+ else:
537
+ f.write(f"{current}\n")
538
+ i += 1
539
+
540
+
522
541
  def _run(
523
- command,
542
+ run_command,
543
+ run_options,
524
544
  stderr,
525
545
  stdout,
526
546
  item_name,
@@ -531,7 +551,7 @@ def _run(
531
551
  process_timeout,
532
552
  sleep_before_start,
533
553
  ):
534
- # type: (List[str], IO[Any], IO[Any], str, bool, int, int, str, Optional[int], int) -> Tuple[Union[subprocess.Popen[bytes], subprocess.Popen], Tuple[int, float]]
554
+ # type: (List[str], List[str], IO[Any], IO[Any], str, bool, int, int, str, Optional[int], int) -> Tuple[Union[subprocess.Popen[bytes], subprocess.Popen], Tuple[int, float]]
535
555
  timestamp = datetime.datetime.now()
536
556
  if sleep_before_start > 0:
537
557
  _write(
@@ -540,7 +560,10 @@ def _run(
540
560
  )
541
561
  time.sleep(sleep_before_start)
542
562
  timestamp = datetime.datetime.now()
543
- cmd = " ".join(command)
563
+ command_name = run_command[-1].replace(" ", "_")
564
+ argfile_path = os.path.join(outs_dir, f"{command_name}_argfile.txt")
565
+ _write_internal_argument_file(run_options, filename=argfile_path)
566
+ cmd = ' '.join(run_command + ['-A'] + [argfile_path])
544
567
  if PY2:
545
568
  cmd = cmd.decode("utf-8").encode(SYSTEM_ENCODING)
546
569
  # avoid hitting https://bugs.python.org/issue10394
@@ -701,14 +724,14 @@ def _options_for_executor(
701
724
  if pabotLastLevel not in options["variable"]:
702
725
  options["variable"].append(pabotLastLevel)
703
726
  if argfile:
704
- _modify_options_for_argfile_use(argfile, options, execution_item.top_name())
727
+ _modify_options_for_argfile_use(argfile, options)
705
728
  options["argumentfile"] = argfile
706
729
  if options.get("test", False) and options.get("include", []):
707
730
  del options["include"]
708
731
  return _set_terminal_coloring_options(options)
709
732
 
710
733
 
711
- def _modify_options_for_argfile_use(argfile, options, root_name):
734
+ def _modify_options_for_argfile_use(argfile, options):
712
735
  argfile_opts, _ = ArgumentParser(
713
736
  USAGE,
714
737
  **_filter_argument_parser_options(
@@ -717,21 +740,20 @@ def _modify_options_for_argfile_use(argfile, options, root_name):
717
740
  env_options="ROBOT_OPTIONS",
718
741
  ),
719
742
  ).parse_args(["--argumentfile", argfile])
720
- old_name = options.get("name", root_name)
721
743
  if argfile_opts["name"]:
722
744
  new_name = argfile_opts["name"]
723
- _replace_base_name(new_name, old_name, options, "suite")
745
+ _replace_base_name(new_name, options, "suite")
724
746
  if not options["suite"]:
725
- _replace_base_name(new_name, old_name, options, "test")
747
+ _replace_base_name(new_name, options, "test")
726
748
  if "name" in options:
727
749
  del options["name"]
728
750
 
729
751
 
730
- def _replace_base_name(new_name, old_name, options, key):
752
+ def _replace_base_name(new_name, options, key):
731
753
  if isinstance(options.get(key, None), str):
732
- options[key] = new_name + options[key][len(old_name) :]
754
+ options[key] = new_name + '.' + options[key].split('.', 1)[1]
733
755
  elif key in options:
734
- options[key] = [new_name + s[len(old_name) :] for s in options.get(key, [])]
756
+ options[key] = [new_name + '.' + s.split('.', 1)[1] for s in options.get(key, [])]
735
757
 
736
758
 
737
759
  def _set_terminal_coloring_options(options):
@@ -985,7 +1007,9 @@ def _levelsplit(
985
1007
  tests = [] # type: List[ExecutionItem]
986
1008
  for s in suites:
987
1009
  tests.extend(s.tests)
988
- return tests
1010
+ # If there are no tests, it may be that --runemptysuite option is used, so fallback suites
1011
+ if tests:
1012
+ return tests
989
1013
  return list(suites)
990
1014
 
991
1015
 
@@ -1453,16 +1477,23 @@ def _output_dir(options, cleanup=True):
1453
1477
  return outpath
1454
1478
 
1455
1479
 
1456
- def _copy_output_artifacts(options, file_extensions=None, include_subfolders=False):
1480
+ def _get_timestamp_id(timestamp_str):
1481
+ return datetime.datetime.strptime(timestamp_str, "%Y-%m-%d %H:%M:%S.%f").strftime("%Y%m%d_%H%M%S")
1482
+
1483
+
1484
+ def _copy_output_artifacts(options, timestamp_id, file_extensions=None, include_subfolders=False, index=None):
1457
1485
  file_extensions = file_extensions or ["png"]
1458
1486
  pabot_outputdir = _output_dir(options, cleanup=False)
1459
1487
  outputdir = options.get("outputdir", ".")
1460
1488
  copied_artifacts = []
1461
- for location, _, file_names in os.walk(pabot_outputdir):
1489
+ one_run_outputdir = pabot_outputdir
1490
+ if index: # For argumentfileN option:
1491
+ one_run_outputdir = os.path.join(pabot_outputdir, index)
1492
+ for location, _, file_names in os.walk(one_run_outputdir):
1462
1493
  for file_name in file_names:
1463
1494
  file_ext = file_name.split(".")[-1]
1464
1495
  if file_ext in file_extensions:
1465
- rel_path = os.path.relpath(location, pabot_outputdir)
1496
+ rel_path = os.path.relpath(location, one_run_outputdir)
1466
1497
  prefix = rel_path.split(os.sep)[0] # folders named "process-id"
1467
1498
  dst_folder_path = outputdir
1468
1499
  # if it is a file from sub-folders of "location"
@@ -1474,7 +1505,9 @@ def _copy_output_artifacts(options, file_extensions=None, include_subfolders=Fal
1474
1505
  dst_folder_path = os.path.join(outputdir, subfolder_path)
1475
1506
  if not os.path.isdir(dst_folder_path):
1476
1507
  os.makedirs(dst_folder_path)
1477
- dst_file_name = "-".join([prefix, file_name])
1508
+ dst_file_name = "-".join([timestamp_id, prefix, file_name])
1509
+ if index:
1510
+ dst_file_name = "-".join([timestamp_id, index, prefix, file_name])
1478
1511
  shutil.copy2(
1479
1512
  os.path.join(location, file_name),
1480
1513
  os.path.join(dst_folder_path, dst_file_name),
@@ -1483,20 +1516,22 @@ def _copy_output_artifacts(options, file_extensions=None, include_subfolders=Fal
1483
1516
  return copied_artifacts
1484
1517
 
1485
1518
 
1486
- def _check_pabot_results_for_missing_xml(base_dir):
1519
+ def _check_pabot_results_for_missing_xml(base_dir, command_name, output_xml_name):
1487
1520
  missing = []
1488
1521
  for root, dirs, _ in os.walk(base_dir):
1489
1522
  if root == base_dir:
1490
1523
  for subdir in dirs:
1491
1524
  subdir_path = os.path.join(base_dir, subdir)
1492
- has_xml = any(fname.endswith('.xml') for fname in os.listdir(subdir_path))
1525
+ has_xml = any(fname.endswith(output_xml_name) for fname in os.listdir(subdir_path))
1493
1526
  if not has_xml:
1494
- missing.append(os.path.join(subdir_path, 'robot_stderr.out'))
1527
+ command_name = command_name.replace(" ", "_")
1528
+ missing.append(os.path.join(subdir_path, f'{command_name}_stderr.out'))
1495
1529
  break
1496
1530
  return missing
1497
1531
 
1498
1532
 
1499
1533
  def _report_results(outs_dir, pabot_args, options, start_time_string, tests_root_name):
1534
+ output_xml_name = options.get("output") or "output.xml"
1500
1535
  if "pythonpath" in options:
1501
1536
  del options["pythonpath"]
1502
1537
  if ROBOT_VERSION < "4.0":
@@ -1516,7 +1551,7 @@ def _report_results(outs_dir, pabot_args, options, start_time_string, tests_root
1516
1551
  outputs = [] # type: List[str]
1517
1552
  for index, _ in pabot_args["argumentfiles"]:
1518
1553
  copied_artifacts = _copy_output_artifacts(
1519
- options, pabot_args["artifacts"], pabot_args["artifactsinsubfolders"]
1554
+ options, _get_timestamp_id(start_time_string), pabot_args["artifacts"], pabot_args["artifactsinsubfolders"], index
1520
1555
  )
1521
1556
  outputs += [
1522
1557
  _merge_one_run(
@@ -1525,10 +1560,11 @@ def _report_results(outs_dir, pabot_args, options, start_time_string, tests_root
1525
1560
  tests_root_name,
1526
1561
  stats,
1527
1562
  copied_artifacts,
1563
+ timestamp_id=_get_timestamp_id(start_time_string),
1528
1564
  outputfile=os.path.join("pabot_results", "output%s.xml" % index),
1529
1565
  )
1530
1566
  ]
1531
- missing_outputs.extend(_check_pabot_results_for_missing_xml(os.path.join(outs_dir, index)))
1567
+ missing_outputs.extend(_check_pabot_results_for_missing_xml(os.path.join(outs_dir, index), pabot_args.get('command')[-1], output_xml_name))
1532
1568
  if "output" not in options:
1533
1569
  options["output"] = "output.xml"
1534
1570
  _write_stats(stats)
@@ -1537,7 +1573,7 @@ def _report_results(outs_dir, pabot_args, options, start_time_string, tests_root
1537
1573
  exit_code = _report_results_for_one_run(
1538
1574
  outs_dir, pabot_args, options, start_time_string, tests_root_name, stats
1539
1575
  )
1540
- missing_outputs.extend(_check_pabot_results_for_missing_xml(outs_dir))
1576
+ missing_outputs.extend(_check_pabot_results_for_missing_xml(outs_dir, pabot_args.get('command')[-1], output_xml_name))
1541
1577
  if missing_outputs:
1542
1578
  _write(("[ " + _wrap_with(Color.YELLOW, 'WARNING') + " ] "
1543
1579
  "One or more subprocesses encountered an error and the "
@@ -1575,10 +1611,10 @@ def _report_results_for_one_run(
1575
1611
  outs_dir, pabot_args, options, start_time_string, tests_root_name, stats
1576
1612
  ):
1577
1613
  copied_artifacts = _copy_output_artifacts(
1578
- options, pabot_args["artifacts"], pabot_args["artifactsinsubfolders"]
1614
+ options, _get_timestamp_id(start_time_string), pabot_args["artifacts"], pabot_args["artifactsinsubfolders"]
1579
1615
  )
1580
1616
  output_path = _merge_one_run(
1581
- outs_dir, options, tests_root_name, stats, copied_artifacts
1617
+ outs_dir, options, tests_root_name, stats, copied_artifacts, _get_timestamp_id(start_time_string)
1582
1618
  )
1583
1619
  _write_stats(stats)
1584
1620
  if (
@@ -1597,13 +1633,14 @@ def _report_results_for_one_run(
1597
1633
 
1598
1634
 
1599
1635
  def _merge_one_run(
1600
- outs_dir, options, tests_root_name, stats, copied_artifacts, outputfile=None
1636
+ outs_dir, options, tests_root_name, stats, copied_artifacts, timestamp_id, outputfile=None
1601
1637
  ):
1602
1638
  outputfile = outputfile or options.get("output", "output.xml")
1603
1639
  output_path = os.path.abspath(
1604
1640
  os.path.join(options.get("outputdir", "."), outputfile)
1605
1641
  )
1606
- files = natsorted(glob(os.path.join(_glob_escape(outs_dir), "**/*.xml")))
1642
+ filename = options.get("output") or "output.xml"
1643
+ files = natsorted(glob(os.path.join(_glob_escape(outs_dir), f"**/*{filename}"), recursive=True))
1607
1644
  if not files:
1608
1645
  _write('WARN: No output files in "%s"' % outs_dir, Color.YELLOW)
1609
1646
  return ""
@@ -1615,7 +1652,7 @@ def _merge_one_run(
1615
1652
  if PY2:
1616
1653
  files = [f.decode(SYSTEM_ENCODING) if not is_unicode(f) else f for f in files]
1617
1654
  resu = merge(
1618
- files, options, tests_root_name, copied_artifacts, invalid_xml_callback
1655
+ files, options, tests_root_name, copied_artifacts, timestamp_id, invalid_xml_callback
1619
1656
  )
1620
1657
  _update_stats(resu, stats)
1621
1658
  if ROBOT_VERSION >= "7.0" and options.get("legacyoutput"):
@@ -2122,10 +2159,11 @@ def _parse_ordering(filename): # type: (str) -> List[ExecutionItem]
2122
2159
  raise DataError("Error parsing ordering file '%s'" % filename)
2123
2160
 
2124
2161
 
2125
- # TODO: After issue #646, it seems necessary to thoroughly rethink how this functionality should work.
2126
2162
  def _check_ordering(ordering_file, suite_names): # type: (List[ExecutionItem], List[ExecutionItem]) -> None
2127
2163
  list_of_suite_names = [s.name for s in suite_names]
2128
2164
  skipped_runnable_items = []
2165
+ suite_and_test_names = []
2166
+ duplicates = []
2129
2167
  if ordering_file:
2130
2168
  for item in ordering_file:
2131
2169
  if item.type in ['suite', 'test']:
@@ -2135,10 +2173,17 @@ def _check_ordering(ordering_file, suite_names): # type: (List[ExecutionItem],
2135
2173
  # the --suite option, and the given name is part of the full name of any test or suite.
2136
2174
  if item.name != ' Invalid' and not (item.type == 'suite' and any((s == item.name or s.startswith(item.name + ".")) for s in list_of_suite_names)):
2137
2175
  skipped_runnable_items.append(f"{item.type.title()} item: '{item.name}'")
2176
+ if item.name in suite_and_test_names:
2177
+ duplicates.append(f"{item.type.title()} item: '{item.name}'")
2178
+ suite_and_test_names.append(item.name)
2138
2179
  if skipped_runnable_items:
2139
2180
  _write("Note: The ordering file contains test or suite items that are not included in the current test run. The following items will be ignored/skipped:")
2140
2181
  for item in skipped_runnable_items:
2141
2182
  _write(f" - {item}")
2183
+ if duplicates:
2184
+ _write("Note: The ordering file contains duplicate suite or test items. Only the first occurrence is taken into account. These are duplicates:")
2185
+ for item in duplicates:
2186
+ _write(f" - {item}")
2142
2187
 
2143
2188
 
2144
2189
  def _group_suites(outs_dir, datasources, options, pabot_args):
@@ -2147,8 +2192,15 @@ def _group_suites(outs_dir, datasources, options, pabot_args):
2147
2192
  ordering_arg = _parse_ordering(pabot_args.get("ordering")) if (pabot_args.get("ordering")) is not None else None
2148
2193
  if ordering_arg:
2149
2194
  _verify_depends(ordering_arg)
2150
- # TODO: After issue #646, it seems necessary to thoroughly rethink how this functionality should work.
2151
- #_check_ordering(ordering_arg, suite_names)
2195
+ if options.get("name"):
2196
+ ordering_arg = _update_ordering_names(ordering_arg, options['name'])
2197
+ _check_ordering(ordering_arg, suite_names)
2198
+ if pabot_args.get("testlevelsplit") and ordering_arg and any(item.type == 'suite' for item in ordering_arg):
2199
+ reduced_suite_names = _reduce_items(suite_names, ordering_arg)
2200
+ if options.get("runemptysuite") and not reduced_suite_names:
2201
+ return [suite_names]
2202
+ if reduced_suite_names:
2203
+ suite_names = reduced_suite_names
2152
2204
  ordering_arg_with_sleep = _set_sleep_times(ordering_arg)
2153
2205
  ordered_suites = _preserve_order(suite_names, ordering_arg_with_sleep)
2154
2206
  shard_suites = solve_shard_suites(ordered_suites, pabot_args)
@@ -2161,6 +2213,58 @@ def _group_suites(outs_dir, datasources, options, pabot_args):
2161
2213
  return grouped_by_depend
2162
2214
 
2163
2215
 
2216
+ def _update_ordering_names(ordering, new_top_name):
2217
+ # type: (List[ExecutionItem], str) -> List[ExecutionItem]
2218
+ output = []
2219
+ for item in ordering:
2220
+ if item.type in ['suite', 'test']:
2221
+ splitted_name = item.name.split('.')
2222
+ splitted_name[0] = new_top_name
2223
+ item.name = '.'.join(splitted_name)
2224
+ output.append(item)
2225
+ return output
2226
+
2227
+
2228
+ def _reduce_items(items, selected_suites):
2229
+ # type: (List[ExecutionItem], List[ExecutionItem]) -> List[ExecutionItem]
2230
+ """
2231
+ Reduce a list of test items by replacing covered test cases with suite items from selected_suites.
2232
+ Raises DataError if:
2233
+ - Any test is covered by more than one selected suite.
2234
+ """
2235
+ reduced = []
2236
+ suite_coverage = {}
2237
+ test_to_suite = {}
2238
+
2239
+ for suite in selected_suites:
2240
+ if suite.type == 'suite':
2241
+ suite_name = str(suite.name)
2242
+ covered_tests = [
2243
+ item for item in items
2244
+ if item.type == "test" and str(item.name).startswith(suite_name + ".")
2245
+ ]
2246
+
2247
+ if covered_tests:
2248
+ for test in covered_tests:
2249
+ test_name = str(test.name)
2250
+ if test_name in test_to_suite:
2251
+ raise DataError(
2252
+ f"Invalid test configuration: Test '{test_name}' is matched by multiple suites: "
2253
+ f"'{test_to_suite[test_name]}' and '{suite_name}'."
2254
+ )
2255
+ test_to_suite[test_name] = suite_name
2256
+
2257
+ suite_coverage[suite_name] = set(str(t.name) for t in covered_tests)
2258
+ reduced.append(suite)
2259
+
2260
+ # Add tests not covered by any suite
2261
+ for item in items:
2262
+ if item.type == "test" and str(item.name) not in test_to_suite:
2263
+ reduced.append(item)
2264
+
2265
+ return reduced
2266
+
2267
+
2164
2268
  def _set_sleep_times(ordering_arg):
2165
2269
  # type: (List[ExecutionItem]) -> List[ExecutionItem]
2166
2270
  set_sleep_value = 0
@@ -2227,23 +2331,11 @@ def _verify_depends(suite_names):
2227
2331
  )
2228
2332
 
2229
2333
 
2230
- def _group_by_depend(suite_names):
2231
- # type: (List[ExecutionItem]) -> List[List[ExecutionItem]]
2232
- group_items = list(filter(lambda suite: isinstance(suite, GroupItem), suite_names))
2233
- runnable_suites = list(
2234
- filter(lambda suite: isinstance(suite, RunnableItem), suite_names)
2235
- )
2236
- dependency_tree = create_dependency_tree(runnable_suites)
2237
- # Since groups cannot depend on others, they are placed at the beginning.
2238
- dependency_tree[0][0:0] = group_items
2239
- return dependency_tree
2240
-
2241
-
2242
2334
  def _all_grouped_suites_by_depend(grouped_suites):
2243
2335
  # type: (List[List[ExecutionItem]]) -> List[List[ExecutionItem]]
2244
2336
  grouped_by_depend = []
2245
2337
  for group_suite in grouped_suites: # These groups are divided by #WAIT
2246
- grouped_by_depend.extend(_group_by_depend(group_suite))
2338
+ grouped_by_depend.extend(create_dependency_tree(group_suite))
2247
2339
  return grouped_by_depend
2248
2340
 
2249
2341
 
pabot/result_merger.py CHANGED
@@ -35,7 +35,7 @@ from robot.model import SuiteVisitor
35
35
 
36
36
 
37
37
  class ResultMerger(SuiteVisitor):
38
- def __init__(self, result, tests_root_name, out_dir, copied_artifacts, legacy_output):
38
+ def __init__(self, result, tests_root_name, out_dir, copied_artifacts, timestamp_id, legacy_output):
39
39
  self.root = result.suite
40
40
  self.errors = result.errors
41
41
  self.current = None
@@ -44,6 +44,7 @@ class ResultMerger(SuiteVisitor):
44
44
  self._prefix = ""
45
45
  self._out_dir = out_dir
46
46
  self.legacy_output = legacy_output
47
+ self.timestamp_id = timestamp_id
47
48
 
48
49
  self._patterns = []
49
50
  regexp_template = (
@@ -65,7 +66,7 @@ class ResultMerger(SuiteVisitor):
65
66
  raise
66
67
 
67
68
  def _set_prefix(self, source):
68
- self._prefix = prefix(source)
69
+ self._prefix = prefix(source, self.timestamp_id)
69
70
 
70
71
  def start_suite(self, suite):
71
72
  if self._skip_until and self._skip_until != suite:
@@ -194,9 +195,19 @@ class ResultsCombiner(CombinedResult):
194
195
  self.errors.add(other.errors)
195
196
 
196
197
 
197
- def prefix(source):
198
+ def prefix(source, timestamp_id):
198
199
  try:
199
- return os.path.split(os.path.dirname(source))[1]
200
+ path_without_id, id = os.path.split(os.path.dirname(source))
201
+ if not id:
202
+ return ""
203
+ if os.path.split(path_without_id)[1] == 'pabot_results':
204
+ return "-".join([timestamp_id, id])
205
+ else:
206
+ # --argumentfileN in use: (there should be one subdir level more)
207
+ _, index = os.path.split(path_without_id)
208
+ if not index:
209
+ return ""
210
+ return "-".join([timestamp_id, index, id])
200
211
  except:
201
212
  return ""
202
213
 
@@ -225,6 +236,7 @@ def merge_groups(
225
236
  invalid_xml_callback,
226
237
  out_dir,
227
238
  copied_artifacts,
239
+ timestamp_id,
228
240
  legacy_output
229
241
  ):
230
242
  merged = []
@@ -232,7 +244,7 @@ def merge_groups(
232
244
  results, critical_tags, non_critical_tags, invalid_xml_callback
233
245
  ).values():
234
246
  base = group[0]
235
- merger = ResultMerger(base, tests_root_name, out_dir, copied_artifacts, legacy_output)
247
+ merger = ResultMerger(base, tests_root_name, out_dir, copied_artifacts, timestamp_id, legacy_output)
236
248
  for out in group:
237
249
  merger.merge(out)
238
250
  merged.append(base)
@@ -244,6 +256,7 @@ def merge(
244
256
  rebot_options,
245
257
  tests_root_name,
246
258
  copied_artifacts,
259
+ timestamp_id,
247
260
  invalid_xml_callback=None,
248
261
  ):
249
262
  assert len(result_files) > 0
@@ -263,6 +276,7 @@ def merge(
263
276
  invalid_xml_callback,
264
277
  settings.output_directory,
265
278
  copied_artifacts,
279
+ timestamp_id,
266
280
  rebot_options.get('legacyoutput')
267
281
  )
268
282
  if len(merged) == 1:
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: robotframework-pabot
3
- Version: 4.3.2
3
+ Version: 5.0.0
4
4
  Summary: Parallel test runner for Robot Framework
5
5
  Home-page: https://pabot.org
6
6
  Download-URL: https://pypi.python.org/pypi/robotframework-pabot
@@ -39,6 +39,22 @@ A parallel executor for [Robot Framework](http://www.robotframework.org) tests.
39
39
 
40
40
  [![Pabot presentation at robocon.io 2018](http://img.youtube.com/vi/i0RV6SJSIn8/0.jpg)](https://youtu.be/i0RV6SJSIn8 "Pabot presentation at robocon.io 2018")
41
41
 
42
+ ## Table of Contents
43
+
44
+ - [Installation](#installation)
45
+ - [Basic use](#basic-use)
46
+ - [Contact](#contact)
47
+ - [Contributing](#contributing-to-the-project)
48
+ - [Command-line options](#command-line-options)
49
+ - [PabotLib](#pabotlib)
50
+ - [Controlling execution order](#controlling-execution-order-and-level-of-parallelism)
51
+ - [Programmatic use](#programmatic-use)
52
+ - [Global variables](#global-variables)
53
+ - [Output Files Generated by Pabot](#output-files-generated-by-pabot)
54
+ - [Artifacts Handling and Parallel Execution Notes](#artifacts-handling-and-parallel-execution-notes)
55
+
56
+ ----
57
+
42
58
  ## Installation:
43
59
 
44
60
  From PyPi:
@@ -271,11 +287,24 @@ Note: The `--ordering` file is intended only for defining the execution order of
271
287
  There different possibilities to influence the execution:
272
288
 
273
289
  * The order of suites can be changed.
274
- * If a directory (or a directory structure) should be executed sequentially, add the directory suite name to a row as a ```--suite``` option.
275
- * If the base suite name is changing with robot option [```--name / -N```](https://robotframework.org/robotframework/latest/RobotFrameworkUserGuide.html#setting-the-name) you can also give partial suite name without the base suite.
290
+ * If a directory (or a directory structure) should be executed sequentially, add the directory suite name to a row as a ```--suite``` option. This usage is also supported when `--testlevelsplit` is enabled. As an alternative to using `--suite` options, you can also group tests into sequential batches using `{}` braces. (See below for details.) Note that if multiple `--suite` options are used, they must not reference the same test case. This means you cannot specify both parent and child suite names at the same time. For instance:
291
+
292
+ ```
293
+ --suite Top Suite.Sub Suite
294
+ --suite Top Suite
295
+ ```
296
+
297
+ * If the base suite name is changing with robot option [```--name / -N```](https://robotframework.org/robotframework/latest/RobotFrameworkUserGuide.html#setting-the-name) you can use either the new or old full test path. For example:
298
+
299
+ ```
300
+ --test New Suite Name.Sub Suite.Test 1
301
+ OR
302
+ --test Old Suite Name.Sub Suite.Test 1
303
+ ```
304
+
276
305
  * You can add a line with text `#WAIT` to force executor to wait until all previous suites have been executed.
277
306
  * You can group suites and tests together to same executor process by adding line `{` before the group and `}` after. Note that `#WAIT` cannot be used inside a group.
278
- * You can introduce dependencies using the word `#DEPENDS` after a test declaration. This keyword can be used several times if it is necessary to refer to several different tests. Please take care that in case of circular dependencies an exception will be thrown. Note that each `#WAIT` splits suites into separate execution blocks, and it's not possible to define dependencies for suites or tests that are inside another `#WAIT` block or inside another `{}` brackets.
307
+ * You can introduce dependencies using the word `#DEPENDS` after a test declaration. This keyword can be used several times if it is necessary to refer to several different tests. The ordering algorithm is designed to preserve the exact user-defined order as closely as possible. However, if a test's execution dependencies are not yet satisfied, the test is postponed and moved to the earliest possible stage where all its dependencies are fulfilled. Please take care that in case of circular dependencies an exception will be thrown. Note that each `#WAIT` splits suites into separate execution blocks, and it's not possible to define dependencies for suites or tests that are inside another `#WAIT` block or inside another `{}` braces.
279
308
  * Note: Within a group `{}`, neither execution order nor the `#DEPENDS` keyword currently works. This is due to limitations in Robot Framework, which is invoked within Pabot subprocesses. These limitations may be addressed in a future release of Robot Framework. For now, tests or suites within a group will be executed in the order Robot Framework discovers them — typically in alphabetical order.
280
309
  * An example could be:
281
310
 
@@ -369,4 +398,68 @@ Pabot will insert following global variables to Robot Framework namespace. These
369
398
  PABOTEXECUTIONPOOLID - this contains the pool id (an integer) for the current Robot Framework executor. This is helpful for example when visualizing the execution flow from your own listener.
370
399
  PABOTNUMBEROFPROCESSES - max number of concurrent processes that pabot may use in execution.
371
400
  CALLER_ID - a universally unique identifier for this execution.
372
-
401
+
402
+
403
+ ### Output Files Generated by Pabot
404
+
405
+ Pabot generates several output files and folders during execution, both for internal use and for analysis purposes.
406
+
407
+ #### Internal File: `.pabotsuitenames`
408
+
409
+ Pabot creates a `.pabotsuitenames` file in the working directory. This is an internal hash file used to speed up execution in certain scenarios.
410
+ This file can also be used as a base for the `--ordering` file as described earlier. Although technically it can be modified, it will be overwritten during the next execution.
411
+ Therefore, it is **recommended** to maintain a separate file for the `--ordering` option if needed.
412
+
413
+ #### Output Directory Structure
414
+
415
+ In addition to the standard `log.html`, `report.html`, and `output.xml` files, the specified `--outputdir` will contain:
416
+
417
+ - A folder named `pabot_results`, and
418
+ - All defined artifacts (default: `.png` files)
419
+ - Optionally, artifacts from subfolders if `--artifactsinsubfolders` is used
420
+
421
+ Artifacts are **copied** into the output directory and renamed with the following structure:
422
+
423
+ ```
424
+ TIMESTAMP-ARGUMENT_INDEX-PABOTQUEUEINDEX
425
+ ```
426
+
427
+ - **TIMESTAMP** = Time of `pabot` command invocation (not the screenshot's actual timestamp), format: `YYYYmmdd_HHMMSS`
428
+ - **ARGUMENT_INDEX** = Optional index number, only used if `--argumentfileN` options are given
429
+ - **PABOTQUEUEINDEX** = Process queue index (see section [Global Variables](#global-variables))
430
+
431
+ #### `pabot_results` Folder Structure
432
+
433
+ The structure of the `pabot_results` folder is as follows:
434
+
435
+ ```
436
+ pabot_results/
437
+ ├── [N]/ # Optional: N = argument file index (if --argumentfileN is used)
438
+ │ └── PABOTQUEUEINDEX/ # One per subprocess
439
+ │ ├── output.xml
440
+ │ ├── robot_argfile.txt
441
+ │ ├── robot_stdout.out
442
+ │ ├── robot_stderr.out
443
+ │ └── artifacts...
444
+ ```
445
+
446
+ Each `PABOTQUEUEINDEX` folder contains as default:
447
+
448
+ - `robot_argfile.txt` – Arguments used in that subprocess
449
+ - `robot_stdout.out` and `robot_stderr.out` – Stdout and stderr of the subprocess
450
+ - `output.xml` – The partial output file to be merged later
451
+ - Artifacts – Screenshots or other files copied from subprocess folders
452
+
453
+ > **Note:** The entire `pabot_results` folder is considered temporary and will be **deleted/overwritten** on the next `pabot` run using the same `--outputdir`.
454
+
455
+
456
+ ### Artifacts Handling and Parallel Execution Notes
457
+
458
+ Due to parallel execution, artifacts like screenshots should ideally be:
459
+
460
+ - Embedded directly into the XML using tools like [SeleniumLibrary](https://robotframework.org/SeleniumLibrary/SeleniumLibrary.html#Set%20Screenshot%20Directory) with the `EMBED` option
461
+ _Example:_
462
+ `Library SeleniumLibrary screenshot_root_directory=EMBED`
463
+ - Or saved to the subprocess’s working directory (usually default behavior), ensuring separation across processes
464
+
465
+ If you manually specify a shared screenshot directory in your test code, **all processes will write to it concurrently**, which may cause issues such as overwriting or missing files if screenshots are taken simultaneously.
@@ -1,12 +1,12 @@
1
1
  pabot/SharedLibrary.py,sha256=mIipGs3ZhKYEakKprcbrMI4P_Un6qI8gE7086xpHaLY,2552
2
- pabot/__init__.py,sha256=h6cCOibZvyBUm_rgLAWdrpXF7RiLsoI65Y3VqVm_WrM,200
2
+ pabot/__init__.py,sha256=QKXV5e-W0g2tAcx-NzbFZscMZr1HtBUzDrpQDrURXBk,200
3
3
  pabot/arguments.py,sha256=m38y8mXKJ5BHlxSrsEI0gXlkzR5hv88G9i-FL_BouQ4,9168
4
4
  pabot/clientwrapper.py,sha256=yz7battGs0exysnDeLDWJuzpb2Q-qSjitwxZMO2TlJw,231
5
5
  pabot/coordinatorwrapper.py,sha256=nQQ7IowD6c246y8y9nsx0HZbt8vS2XODhPVDjm-lyi0,195
6
- pabot/execution_items.py,sha256=HCd54LsIEZJjnL0TZC_tuac2DSVL4JHes6veJlpCE94,12058
7
- pabot/pabot.py,sha256=zRhp1bgvXmMpB1H8DU-Y0rMGsZ8Z_oTkOsXDZv_qrGU,75356
6
+ pabot/execution_items.py,sha256=zDVGW0AAeVbM-scC3Yui2TxvIPx1wYyFKHTPU2BkJkY,13329
7
+ pabot/pabot.py,sha256=j_CtB8S8gZ5qBSlXKuv_7Mhj7i5RVHm7X6tg1l_Fp6o,79549
8
8
  pabot/pabotlib.py,sha256=FRZKaKy1ybyRkE-0SpaCsUWzxZAzNNU5dAywSm1QoPk,22324
9
- pabot/result_merger.py,sha256=8iIptBn5MdgiW-OdhwVR2DZ0hUYuQeQXwIHAEPkMTuw,9095
9
+ pabot/result_merger.py,sha256=ST2szeXoaD3ipwr_vhIUH2SihlxhoXiOQu2Zj2VowyA,9674
10
10
  pabot/robotremoteserver.py,sha256=L3O2QRKSGSE4ux5M1ip5XJMaelqaxQWJxd9wLLdtpzM,22272
11
11
  pabot/workerwrapper.py,sha256=BdELUVDs5BmEkdNBcYTlnP22Cj0tUpZEunYQMAKyKWU,185
12
12
  pabot/py3/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
@@ -14,9 +14,9 @@ pabot/py3/client.py,sha256=Od9L4vZ0sozMHq_W_ITQHBBt8kAej40DG58wnxmbHGM,1434
14
14
  pabot/py3/coordinator.py,sha256=kBshCzA_1QX_f0WNk42QBJyDYSwSlNM-UEBxOReOj6E,2313
15
15
  pabot/py3/messages.py,sha256=7mFr4_0x1JHm5sW8TvKq28Xs_JoeIGku2bX7AyO0kng,2557
16
16
  pabot/py3/worker.py,sha256=5rfp4ZiW6gf8GRz6eC0-KUkfx847A91lVtRYpLAv2sg,1612
17
- robotframework_pabot-4.3.2.dist-info/licenses/LICENSE.txt,sha256=WNHhf_5RCaeuKWyq_K39vmp9F28LxKsB4SpomwSZ2L0,11357
18
- robotframework_pabot-4.3.2.dist-info/METADATA,sha256=T30IE6XvBzbaKaEQl9YenlrkWTGNOnB8iqWGUsw3CSE,16417
19
- robotframework_pabot-4.3.2.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
20
- robotframework_pabot-4.3.2.dist-info/entry_points.txt,sha256=JpAIFADTeFOQWdwmn56KpAil8V3-41ZC5ICXCYm3Ng0,43
21
- robotframework_pabot-4.3.2.dist-info/top_level.txt,sha256=t3OwfEAsSxyxrhjy_GCJYHKbV_X6AIsgeLhYeHvObG4,6
22
- robotframework_pabot-4.3.2.dist-info/RECORD,,
17
+ robotframework_pabot-5.0.0.dist-info/licenses/LICENSE.txt,sha256=WNHhf_5RCaeuKWyq_K39vmp9F28LxKsB4SpomwSZ2L0,11357
18
+ robotframework_pabot-5.0.0.dist-info/METADATA,sha256=-vtvV7PlcmXw8p1RmrDuLGw0LNcC_jwzMVVlQvyDuPE,20880
19
+ robotframework_pabot-5.0.0.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
20
+ robotframework_pabot-5.0.0.dist-info/entry_points.txt,sha256=JpAIFADTeFOQWdwmn56KpAil8V3-41ZC5ICXCYm3Ng0,43
21
+ robotframework_pabot-5.0.0.dist-info/top_level.txt,sha256=t3OwfEAsSxyxrhjy_GCJYHKbV_X6AIsgeLhYeHvObG4,6
22
+ robotframework_pabot-5.0.0.dist-info/RECORD,,