robotframework-pabot 3.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- pabot/SharedLibrary.py +62 -0
- pabot/__init__.py +4 -0
- pabot/arguments.py +236 -0
- pabot/clientwrapper.py +10 -0
- pabot/coordinatorwrapper.py +8 -0
- pabot/execution_items.py +320 -0
- pabot/pabot.py +2072 -0
- pabot/pabotlib.py +578 -0
- pabot/py3/__init__.py +0 -0
- pabot/py3/client.py +40 -0
- pabot/py3/coordinator.py +63 -0
- pabot/py3/messages.py +104 -0
- pabot/py3/worker.py +52 -0
- pabot/result_merger.py +272 -0
- pabot/robotremoteserver.py +632 -0
- pabot/workerwrapper.py +8 -0
- robotframework_pabot-3.1.0.dist-info/LICENSE.txt +202 -0
- robotframework_pabot-3.1.0.dist-info/METADATA +24 -0
- robotframework_pabot-3.1.0.dist-info/RECORD +22 -0
- robotframework_pabot-3.1.0.dist-info/WHEEL +5 -0
- robotframework_pabot-3.1.0.dist-info/entry_points.txt +2 -0
- robotframework_pabot-3.1.0.dist-info/top_level.txt +1 -0
pabot/pabot.py
ADDED
|
@@ -0,0 +1,2072 @@
|
|
|
1
|
+
#!/usr/bin/env python
|
|
2
|
+
|
|
3
|
+
# Copyright 2014->future! Mikko Korpela
|
|
4
|
+
#
|
|
5
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
6
|
+
# you may not use this file except in compliance with the License.
|
|
7
|
+
# You may obtain a copy of the License at
|
|
8
|
+
#
|
|
9
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
10
|
+
#
|
|
11
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
12
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
13
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
14
|
+
# See the License for the specific language governing permissions and
|
|
15
|
+
# limitations under the License.
|
|
16
|
+
#
|
|
17
|
+
# partly based on work by Nokia Solutions and Networks Oyj
|
|
18
|
+
"""A parallel executor for Robot Framework test cases.
|
|
19
|
+
Version [PABOT_VERSION]
|
|
20
|
+
|
|
21
|
+
Supports all Robot Framework command line options and also following
|
|
22
|
+
options (these must be before normal RF options):
|
|
23
|
+
|
|
24
|
+
--verbose
|
|
25
|
+
more output
|
|
26
|
+
|
|
27
|
+
--command [ACTUAL COMMANDS TO START ROBOT EXECUTOR] --end-command
|
|
28
|
+
RF script for situations where pybot is not used directly
|
|
29
|
+
|
|
30
|
+
--processes [NUMBER OF PROCESSES]
|
|
31
|
+
How many parallel executors to use (default max of 2 and cpu count).
|
|
32
|
+
Special option "all" will use as many processes as there are
|
|
33
|
+
executable suites or tests.
|
|
34
|
+
|
|
35
|
+
--testlevelsplit
|
|
36
|
+
Split execution on test level instead of default suite level.
|
|
37
|
+
If .pabotsuitenames contains both tests and suites then this
|
|
38
|
+
will only affect new suites and split only them.
|
|
39
|
+
Leaving this flag out when both suites and tests in
|
|
40
|
+
.pabotsuitenames file will also only affect new suites and
|
|
41
|
+
add them as suite files.
|
|
42
|
+
|
|
43
|
+
--resourcefile [FILEPATH]
|
|
44
|
+
Indicator for a file that can contain shared variables for
|
|
45
|
+
distributing resources.
|
|
46
|
+
|
|
47
|
+
--pabotlib
|
|
48
|
+
Start PabotLib remote server. This enables locking and resource
|
|
49
|
+
distribution between parallel test executions.
|
|
50
|
+
|
|
51
|
+
--pabotlibhost [HOSTNAME]
|
|
52
|
+
Host name of the PabotLib remote server (default is 127.0.0.1)
|
|
53
|
+
|
|
54
|
+
--pabotlibport [PORT]
|
|
55
|
+
Port number of the PabotLib remote server (default is 8270)
|
|
56
|
+
|
|
57
|
+
--processtimeout [TIMEOUT]
|
|
58
|
+
Maximum time in seconds to wait for a process before killing it. If not set, there's no timeout.
|
|
59
|
+
|
|
60
|
+
--ordering [FILE PATH]
|
|
61
|
+
Optionally give execution order from a file.
|
|
62
|
+
|
|
63
|
+
--suitesfrom [FILEPATH TO OUTPUTXML]
|
|
64
|
+
Optionally read suites from output.xml file. Failed suites will run
|
|
65
|
+
first and longer running ones will be executed before shorter ones.
|
|
66
|
+
|
|
67
|
+
--argumentfile[INTEGER] [FILEPATH]
|
|
68
|
+
Run same suite with multiple argumentfile options.
|
|
69
|
+
For example "--argumentfile1 arg1.txt --argumentfile2 arg2.txt".
|
|
70
|
+
|
|
71
|
+
--shard [SHARD]/[SHARD COUNT]
|
|
72
|
+
Optionally split execution into smaller pieces. This can
|
|
73
|
+
be used for distributing testing to multiple machines.
|
|
74
|
+
|
|
75
|
+
--chunk
|
|
76
|
+
Optionally chunk tests to PROCESSES number of robot runs.
|
|
77
|
+
|
|
78
|
+
Copyright 2022 Mikko Korpela - Apache 2 License
|
|
79
|
+
"""
|
|
80
|
+
|
|
81
|
+
from __future__ import absolute_import, print_function
|
|
82
|
+
|
|
83
|
+
import datetime
|
|
84
|
+
import hashlib
|
|
85
|
+
import os
|
|
86
|
+
import random
|
|
87
|
+
import re
|
|
88
|
+
import shutil
|
|
89
|
+
import signal
|
|
90
|
+
import socket
|
|
91
|
+
import subprocess
|
|
92
|
+
import sys
|
|
93
|
+
import threading
|
|
94
|
+
import time
|
|
95
|
+
import traceback
|
|
96
|
+
import uuid
|
|
97
|
+
from collections import namedtuple
|
|
98
|
+
from contextlib import closing
|
|
99
|
+
from glob import glob
|
|
100
|
+
from io import BytesIO, StringIO
|
|
101
|
+
from multiprocessing.pool import ThreadPool
|
|
102
|
+
from natsort import natsorted
|
|
103
|
+
|
|
104
|
+
from robot import __version__ as ROBOT_VERSION
|
|
105
|
+
from robot import rebot
|
|
106
|
+
from robot.api import ExecutionResult
|
|
107
|
+
from robot.conf import RobotSettings
|
|
108
|
+
from robot.errors import DataError, Information
|
|
109
|
+
from robot.libraries.Remote import Remote
|
|
110
|
+
from robot.model import ModelModifier
|
|
111
|
+
from robot.result.visitor import ResultVisitor
|
|
112
|
+
from robot.run import USAGE
|
|
113
|
+
from robot.running import TestSuiteBuilder
|
|
114
|
+
from robot.utils import PY2, SYSTEM_ENCODING, ArgumentParser, is_unicode
|
|
115
|
+
|
|
116
|
+
from . import pabotlib, __version__ as PABOT_VERSION
|
|
117
|
+
from .arguments import (
|
|
118
|
+
parse_args,
|
|
119
|
+
parse_execution_item_line,
|
|
120
|
+
_filter_argument_parser_options,
|
|
121
|
+
)
|
|
122
|
+
from .clientwrapper import make_order
|
|
123
|
+
from .execution_items import (
|
|
124
|
+
DynamicSuiteItem,
|
|
125
|
+
ExecutionItem,
|
|
126
|
+
GroupEndItem,
|
|
127
|
+
GroupItem,
|
|
128
|
+
GroupStartItem,
|
|
129
|
+
HivedItem,
|
|
130
|
+
SuiteItem,
|
|
131
|
+
SuiteItems,
|
|
132
|
+
TestItem,
|
|
133
|
+
RunnableItem,
|
|
134
|
+
)
|
|
135
|
+
from .result_merger import merge
|
|
136
|
+
|
|
137
|
+
try:
|
|
138
|
+
import queue # type: ignore
|
|
139
|
+
except ImportError:
|
|
140
|
+
import Queue as queue # type: ignore
|
|
141
|
+
|
|
142
|
+
try:
|
|
143
|
+
from shlex import quote # type: ignore
|
|
144
|
+
except ImportError:
|
|
145
|
+
from pipes import quote # type: ignore
|
|
146
|
+
|
|
147
|
+
from typing import IO, Any, Dict, List, Optional, Tuple, Union
|
|
148
|
+
|
|
149
|
+
CTRL_C_PRESSED = False
|
|
150
|
+
MESSAGE_QUEUE = queue.Queue()
|
|
151
|
+
EXECUTION_POOL_IDS = [] # type: List[int]
|
|
152
|
+
EXECUTION_POOL_ID_LOCK = threading.Lock()
|
|
153
|
+
POPEN_LOCK = threading.Lock()
|
|
154
|
+
_PABOTLIBURI = "127.0.0.1:8270"
|
|
155
|
+
_PABOTLIBPROCESS = None # type: Optional[subprocess.Popen]
|
|
156
|
+
_BOURNELIKE_SHELL_BAD_CHARS_WITHOUT_DQUOTE = (
|
|
157
|
+
"!#$^&*?[(){}<>~;'`\\|= \t\n" # does not contain '"'
|
|
158
|
+
)
|
|
159
|
+
_BAD_CHARS_SET = set(_BOURNELIKE_SHELL_BAD_CHARS_WITHOUT_DQUOTE)
|
|
160
|
+
_NUMBER_OF_ITEMS_TO_BE_EXECUTED = 0
|
|
161
|
+
_ABNORMAL_EXIT_HAPPENED = False
|
|
162
|
+
|
|
163
|
+
_COMPLETED_LOCK = threading.Lock()
|
|
164
|
+
_NOT_COMPLETED_INDEXES = [] # type: List[int]
|
|
165
|
+
|
|
166
|
+
_ROBOT_EXTENSIONS = [
|
|
167
|
+
".html",
|
|
168
|
+
".htm",
|
|
169
|
+
".xhtml",
|
|
170
|
+
".tsv",
|
|
171
|
+
".rst",
|
|
172
|
+
".rest",
|
|
173
|
+
".txt",
|
|
174
|
+
".robot",
|
|
175
|
+
]
|
|
176
|
+
_ALL_ELAPSED = [] # type: List[Union[int, float]]
|
|
177
|
+
|
|
178
|
+
|
|
179
|
+
class Color:
|
|
180
|
+
SUPPORTED_OSES = ["posix"]
|
|
181
|
+
|
|
182
|
+
GREEN = "\033[92m"
|
|
183
|
+
RED = "\033[91m"
|
|
184
|
+
ENDC = "\033[0m"
|
|
185
|
+
YELLOW = "\033[93m"
|
|
186
|
+
|
|
187
|
+
|
|
188
|
+
def _mapOptionalQuote(command_args):
|
|
189
|
+
# type: (List[str]) -> List[str]
|
|
190
|
+
if os.name == "posix":
|
|
191
|
+
return [quote(arg) for arg in command_args]
|
|
192
|
+
return [
|
|
193
|
+
arg if set(arg).isdisjoint(_BAD_CHARS_SET) else '"%s"' % arg
|
|
194
|
+
for arg in command_args
|
|
195
|
+
]
|
|
196
|
+
|
|
197
|
+
|
|
198
|
+
def execute_and_wait_with(item):
|
|
199
|
+
# type: ('QueueItem') -> None
|
|
200
|
+
global CTRL_C_PRESSED, _NUMBER_OF_ITEMS_TO_BE_EXECUTED
|
|
201
|
+
is_last = _NUMBER_OF_ITEMS_TO_BE_EXECUTED == 1
|
|
202
|
+
_NUMBER_OF_ITEMS_TO_BE_EXECUTED -= 1
|
|
203
|
+
if CTRL_C_PRESSED:
|
|
204
|
+
# Keyboard interrupt has happened!
|
|
205
|
+
return
|
|
206
|
+
time.sleep(0)
|
|
207
|
+
try:
|
|
208
|
+
datasources = [
|
|
209
|
+
d.encode("utf-8") if PY2 and is_unicode(d) else d for d in item.datasources
|
|
210
|
+
]
|
|
211
|
+
caller_id = uuid.uuid4().hex
|
|
212
|
+
name = item.display_name
|
|
213
|
+
outs_dir = os.path.join(item.outs_dir, item.argfile_index, str(item.index))
|
|
214
|
+
os.makedirs(outs_dir)
|
|
215
|
+
cmd = _create_command_for_execution(
|
|
216
|
+
caller_id, datasources, is_last, item, outs_dir
|
|
217
|
+
)
|
|
218
|
+
if item.hive:
|
|
219
|
+
_hived_execute(
|
|
220
|
+
item.hive,
|
|
221
|
+
cmd,
|
|
222
|
+
outs_dir,
|
|
223
|
+
name,
|
|
224
|
+
item.verbose,
|
|
225
|
+
_make_id(),
|
|
226
|
+
caller_id,
|
|
227
|
+
item.index,
|
|
228
|
+
)
|
|
229
|
+
else:
|
|
230
|
+
_try_execute_and_wait(
|
|
231
|
+
cmd,
|
|
232
|
+
outs_dir,
|
|
233
|
+
name,
|
|
234
|
+
item.verbose,
|
|
235
|
+
_make_id(),
|
|
236
|
+
caller_id,
|
|
237
|
+
item.index,
|
|
238
|
+
item.execution_item.type != "test",
|
|
239
|
+
process_timeout=item.timeout
|
|
240
|
+
)
|
|
241
|
+
outputxml_preprocessing(
|
|
242
|
+
item.options, outs_dir, name, item.verbose, _make_id(), caller_id
|
|
243
|
+
)
|
|
244
|
+
except:
|
|
245
|
+
_write(traceback.format_exc())
|
|
246
|
+
|
|
247
|
+
|
|
248
|
+
def _create_command_for_execution(caller_id, datasources, is_last, item, outs_dir):
|
|
249
|
+
options = item.options.copy()
|
|
250
|
+
if item.command == ["robot"] and not options["listener"]:
|
|
251
|
+
options["listener"] = ["RobotStackTracer"]
|
|
252
|
+
cmd = (
|
|
253
|
+
item.command
|
|
254
|
+
+ _options_for_custom_executor(
|
|
255
|
+
options,
|
|
256
|
+
outs_dir,
|
|
257
|
+
item.execution_item,
|
|
258
|
+
item.argfile,
|
|
259
|
+
caller_id,
|
|
260
|
+
is_last,
|
|
261
|
+
item.index,
|
|
262
|
+
item.last_level,
|
|
263
|
+
item.processes,
|
|
264
|
+
)
|
|
265
|
+
+ datasources
|
|
266
|
+
)
|
|
267
|
+
return _mapOptionalQuote(cmd)
|
|
268
|
+
|
|
269
|
+
|
|
270
|
+
def _pabotlib_in_use():
|
|
271
|
+
return _PABOTLIBPROCESS or _PABOTLIBURI != "127.0.0.1:8270"
|
|
272
|
+
|
|
273
|
+
|
|
274
|
+
def _hived_execute(
|
|
275
|
+
hive, cmd, outs_dir, item_name, verbose, pool_id, caller_id, my_index=-1
|
|
276
|
+
):
|
|
277
|
+
plib = None
|
|
278
|
+
if _pabotlib_in_use():
|
|
279
|
+
plib = Remote(_PABOTLIBURI)
|
|
280
|
+
try:
|
|
281
|
+
make_order(hive, " ".join(cmd), outs_dir)
|
|
282
|
+
except:
|
|
283
|
+
_write(traceback.format_exc())
|
|
284
|
+
if plib:
|
|
285
|
+
_increase_completed(plib, my_index)
|
|
286
|
+
|
|
287
|
+
|
|
288
|
+
def _try_execute_and_wait(
|
|
289
|
+
cmd,
|
|
290
|
+
outs_dir,
|
|
291
|
+
item_name,
|
|
292
|
+
verbose,
|
|
293
|
+
pool_id,
|
|
294
|
+
caller_id,
|
|
295
|
+
my_index=-1,
|
|
296
|
+
show_stdout_on_failure=False,
|
|
297
|
+
process_timeout=None
|
|
298
|
+
):
|
|
299
|
+
# type: (List[str], str, str, bool, int, str, int, bool, Optional[int]) -> None
|
|
300
|
+
plib = None
|
|
301
|
+
is_ignored = False
|
|
302
|
+
if _pabotlib_in_use():
|
|
303
|
+
plib = Remote(_PABOTLIBURI)
|
|
304
|
+
try:
|
|
305
|
+
with open(os.path.join(outs_dir, cmd[0] + "_stdout.out"), "w") as stdout:
|
|
306
|
+
with open(os.path.join(outs_dir, cmd[0] + "_stderr.out"), "w") as stderr:
|
|
307
|
+
process, (rc, elapsed) = _run(
|
|
308
|
+
cmd, stderr, stdout, item_name, verbose, pool_id, my_index, outs_dir, process_timeout
|
|
309
|
+
)
|
|
310
|
+
except:
|
|
311
|
+
_write(traceback.format_exc())
|
|
312
|
+
if plib:
|
|
313
|
+
_increase_completed(plib, my_index)
|
|
314
|
+
is_ignored = _is_ignored(plib, caller_id)
|
|
315
|
+
# Thread-safe list append
|
|
316
|
+
_ALL_ELAPSED.append(elapsed)
|
|
317
|
+
_result_to_stdout(
|
|
318
|
+
elapsed,
|
|
319
|
+
is_ignored,
|
|
320
|
+
item_name,
|
|
321
|
+
my_index,
|
|
322
|
+
pool_id,
|
|
323
|
+
process,
|
|
324
|
+
rc,
|
|
325
|
+
stderr,
|
|
326
|
+
stdout,
|
|
327
|
+
verbose,
|
|
328
|
+
show_stdout_on_failure,
|
|
329
|
+
)
|
|
330
|
+
if is_ignored and os.path.isdir(outs_dir):
|
|
331
|
+
shutil.rmtree(outs_dir)
|
|
332
|
+
|
|
333
|
+
|
|
334
|
+
def _result_to_stdout(
|
|
335
|
+
elapsed,
|
|
336
|
+
is_ignored,
|
|
337
|
+
item_name,
|
|
338
|
+
my_index,
|
|
339
|
+
pool_id,
|
|
340
|
+
process,
|
|
341
|
+
rc,
|
|
342
|
+
stderr,
|
|
343
|
+
stdout,
|
|
344
|
+
verbose,
|
|
345
|
+
show_stdout_on_failure,
|
|
346
|
+
):
|
|
347
|
+
if is_ignored:
|
|
348
|
+
_write_with_id(
|
|
349
|
+
process,
|
|
350
|
+
pool_id,
|
|
351
|
+
my_index,
|
|
352
|
+
_execution_ignored_message(item_name, stdout, stderr, elapsed, verbose),
|
|
353
|
+
)
|
|
354
|
+
elif rc != 0:
|
|
355
|
+
_write_with_id(
|
|
356
|
+
process,
|
|
357
|
+
pool_id,
|
|
358
|
+
my_index,
|
|
359
|
+
_execution_failed_message(
|
|
360
|
+
item_name, stdout, stderr, rc, verbose or show_stdout_on_failure
|
|
361
|
+
),
|
|
362
|
+
Color.RED,
|
|
363
|
+
)
|
|
364
|
+
else:
|
|
365
|
+
_write_with_id(
|
|
366
|
+
process,
|
|
367
|
+
pool_id,
|
|
368
|
+
my_index,
|
|
369
|
+
_execution_passed_message(item_name, stdout, stderr, elapsed, verbose),
|
|
370
|
+
Color.GREEN,
|
|
371
|
+
)
|
|
372
|
+
|
|
373
|
+
|
|
374
|
+
def _is_ignored(plib, caller_id): # type: (Remote, str) -> bool
|
|
375
|
+
return plib.run_keyword("is_ignored_execution", [caller_id], {})
|
|
376
|
+
|
|
377
|
+
|
|
378
|
+
# optionally invoke rebot for output.xml preprocessing to get --RemoveKeywords
|
|
379
|
+
# and --flattenkeywords applied => result: much smaller output.xml files + faster merging + avoid MemoryErrors
|
|
380
|
+
def outputxml_preprocessing(options, outs_dir, item_name, verbose, pool_id, caller_id):
|
|
381
|
+
# type: (Dict[str, Any], str, str, bool, int, str) -> None
|
|
382
|
+
try:
|
|
383
|
+
remove_keywords = options["removekeywords"]
|
|
384
|
+
flatten_keywords = options["flattenkeywords"]
|
|
385
|
+
if not remove_keywords and not flatten_keywords:
|
|
386
|
+
# => no preprocessing needed if no removekeywords or flattenkeywords present
|
|
387
|
+
return
|
|
388
|
+
remove_keywords_args = [] # type: List[str]
|
|
389
|
+
flatten_keywords_args = [] # type: List[str]
|
|
390
|
+
for k in remove_keywords:
|
|
391
|
+
remove_keywords_args += ["--removekeywords", k]
|
|
392
|
+
for k in flatten_keywords:
|
|
393
|
+
flatten_keywords_args += ["--flattenkeywords", k]
|
|
394
|
+
outputxmlfile = os.path.join(outs_dir, "output.xml")
|
|
395
|
+
oldsize = os.path.getsize(outputxmlfile)
|
|
396
|
+
cmd = (
|
|
397
|
+
[
|
|
398
|
+
"rebot",
|
|
399
|
+
"--log",
|
|
400
|
+
"NONE",
|
|
401
|
+
"--report",
|
|
402
|
+
"NONE",
|
|
403
|
+
"--xunit",
|
|
404
|
+
"NONE",
|
|
405
|
+
"--consolecolors",
|
|
406
|
+
"off",
|
|
407
|
+
"--NoStatusRC",
|
|
408
|
+
]
|
|
409
|
+
+ remove_keywords_args
|
|
410
|
+
+ flatten_keywords_args
|
|
411
|
+
+ ["--output", outputxmlfile, outputxmlfile]
|
|
412
|
+
)
|
|
413
|
+
cmd = _mapOptionalQuote(cmd)
|
|
414
|
+
_try_execute_and_wait(
|
|
415
|
+
cmd,
|
|
416
|
+
outs_dir,
|
|
417
|
+
"preprocessing output.xml on " + item_name,
|
|
418
|
+
verbose,
|
|
419
|
+
pool_id,
|
|
420
|
+
caller_id,
|
|
421
|
+
)
|
|
422
|
+
newsize = os.path.getsize(outputxmlfile)
|
|
423
|
+
perc = 100 * newsize / oldsize
|
|
424
|
+
if verbose:
|
|
425
|
+
_write(
|
|
426
|
+
"%s [main] [%s] Filesize reduced from %s to %s (%0.2f%%) for file %s"
|
|
427
|
+
% (
|
|
428
|
+
datetime.datetime.now(),
|
|
429
|
+
pool_id,
|
|
430
|
+
oldsize,
|
|
431
|
+
newsize,
|
|
432
|
+
perc,
|
|
433
|
+
outputxmlfile,
|
|
434
|
+
)
|
|
435
|
+
)
|
|
436
|
+
except:
|
|
437
|
+
print(sys.exc_info())
|
|
438
|
+
|
|
439
|
+
|
|
440
|
+
def _write_with_id(process, pool_id, item_index, message, color=None, timestamp=None):
|
|
441
|
+
timestamp = timestamp or datetime.datetime.now()
|
|
442
|
+
_write(
|
|
443
|
+
"%s [PID:%s] [%s] [ID:%s] %s"
|
|
444
|
+
% (timestamp, process.pid, pool_id, item_index, message),
|
|
445
|
+
color,
|
|
446
|
+
)
|
|
447
|
+
|
|
448
|
+
|
|
449
|
+
def _make_id(): # type: () -> int
|
|
450
|
+
global EXECUTION_POOL_IDS, EXECUTION_POOL_ID_LOCK
|
|
451
|
+
thread_id = threading.current_thread().ident
|
|
452
|
+
assert thread_id is not None
|
|
453
|
+
with EXECUTION_POOL_ID_LOCK:
|
|
454
|
+
if thread_id not in EXECUTION_POOL_IDS:
|
|
455
|
+
EXECUTION_POOL_IDS += [thread_id]
|
|
456
|
+
return EXECUTION_POOL_IDS.index(thread_id)
|
|
457
|
+
|
|
458
|
+
|
|
459
|
+
def _increase_completed(plib, my_index):
|
|
460
|
+
# type: (Remote, int) -> None
|
|
461
|
+
global _COMPLETED_LOCK, _NOT_COMPLETED_INDEXES
|
|
462
|
+
with _COMPLETED_LOCK:
|
|
463
|
+
if my_index not in _NOT_COMPLETED_INDEXES:
|
|
464
|
+
return
|
|
465
|
+
_NOT_COMPLETED_INDEXES.remove(my_index)
|
|
466
|
+
if _NOT_COMPLETED_INDEXES:
|
|
467
|
+
plib.run_keyword(
|
|
468
|
+
"set_parallel_value_for_key",
|
|
469
|
+
[
|
|
470
|
+
pabotlib.PABOT_MIN_QUEUE_INDEX_EXECUTING_PARALLEL_VALUE,
|
|
471
|
+
_NOT_COMPLETED_INDEXES[0],
|
|
472
|
+
],
|
|
473
|
+
{},
|
|
474
|
+
)
|
|
475
|
+
if len(_NOT_COMPLETED_INDEXES) == 1:
|
|
476
|
+
plib.run_keyword(
|
|
477
|
+
"set_parallel_value_for_key", ["pabot_only_last_executing", 1], {}
|
|
478
|
+
)
|
|
479
|
+
|
|
480
|
+
|
|
481
|
+
def _run(command, stderr, stdout, item_name, verbose, pool_id, item_index, outs_dir, process_timeout):
|
|
482
|
+
# type: (List[str], IO[Any], IO[Any], str, bool, int, int, str, Optional[int]) -> Tuple[Union[subprocess.Popen[bytes], subprocess.Popen], Tuple[int, float]]
|
|
483
|
+
timestamp = datetime.datetime.now()
|
|
484
|
+
cmd = " ".join(command)
|
|
485
|
+
if PY2:
|
|
486
|
+
cmd = cmd.decode("utf-8").encode(SYSTEM_ENCODING)
|
|
487
|
+
# avoid hitting https://bugs.python.org/issue10394
|
|
488
|
+
with POPEN_LOCK:
|
|
489
|
+
my_env = os.environ.copy()
|
|
490
|
+
syslog_file = my_env.get('ROBOT_SYSLOG_FILE', None)
|
|
491
|
+
if syslog_file:
|
|
492
|
+
my_env['ROBOT_SYSLOG_FILE'] = os.path.join(outs_dir, os.path.basename(syslog_file))
|
|
493
|
+
process = subprocess.Popen(cmd, shell=True, stderr=stderr, stdout=stdout, env=my_env)
|
|
494
|
+
if verbose:
|
|
495
|
+
_write_with_id(
|
|
496
|
+
process,
|
|
497
|
+
pool_id,
|
|
498
|
+
item_index,
|
|
499
|
+
"EXECUTING PARALLEL %s with command:\n%s" % (item_name, cmd),
|
|
500
|
+
timestamp=timestamp,
|
|
501
|
+
)
|
|
502
|
+
else:
|
|
503
|
+
_write_with_id(
|
|
504
|
+
process,
|
|
505
|
+
pool_id,
|
|
506
|
+
item_index,
|
|
507
|
+
"EXECUTING %s" % item_name,
|
|
508
|
+
timestamp=timestamp,
|
|
509
|
+
)
|
|
510
|
+
return process, _wait_for_return_code(process, item_name, pool_id, item_index, process_timeout)
|
|
511
|
+
|
|
512
|
+
|
|
513
|
+
def _wait_for_return_code(process, item_name, pool_id, item_index, process_timeout):
|
|
514
|
+
rc = None
|
|
515
|
+
elapsed = 0
|
|
516
|
+
ping_time = ping_interval = 150
|
|
517
|
+
while rc is None:
|
|
518
|
+
rc = process.poll()
|
|
519
|
+
time.sleep(0.1)
|
|
520
|
+
elapsed += 1
|
|
521
|
+
|
|
522
|
+
if process_timeout and elapsed / 10.0 >= process_timeout:
|
|
523
|
+
process.terminate()
|
|
524
|
+
process.wait()
|
|
525
|
+
rc = -1 # Set a return code indicating that the process was killed due to timeout
|
|
526
|
+
_write_with_id(
|
|
527
|
+
process,
|
|
528
|
+
pool_id,
|
|
529
|
+
item_index,
|
|
530
|
+
"Process %s killed due to exceeding the maximum timeout of %s seconds" % (item_name, process_timeout),
|
|
531
|
+
)
|
|
532
|
+
break
|
|
533
|
+
|
|
534
|
+
if elapsed == ping_time:
|
|
535
|
+
ping_interval += 50
|
|
536
|
+
ping_time += ping_interval
|
|
537
|
+
_write_with_id(
|
|
538
|
+
process,
|
|
539
|
+
pool_id,
|
|
540
|
+
item_index,
|
|
541
|
+
"still running %s after %s seconds" % (item_name, elapsed / 10.0),
|
|
542
|
+
)
|
|
543
|
+
|
|
544
|
+
return rc, elapsed / 10.0
|
|
545
|
+
|
|
546
|
+
|
|
547
|
+
|
|
548
|
+
def _read_file(file_handle):
|
|
549
|
+
try:
|
|
550
|
+
with open(file_handle.name, "r") as content_file:
|
|
551
|
+
content = content_file.read()
|
|
552
|
+
return content
|
|
553
|
+
except:
|
|
554
|
+
return "Unable to read file %s" % file_handle
|
|
555
|
+
|
|
556
|
+
|
|
557
|
+
def _execution_failed_message(suite_name, stdout, stderr, rc, verbose):
|
|
558
|
+
if not verbose:
|
|
559
|
+
return "FAILED %s" % suite_name
|
|
560
|
+
return "Execution failed in %s with %d failing test(s)\n%s\n%s" % (
|
|
561
|
+
suite_name,
|
|
562
|
+
rc,
|
|
563
|
+
_read_file(stdout),
|
|
564
|
+
_read_file(stderr),
|
|
565
|
+
)
|
|
566
|
+
|
|
567
|
+
|
|
568
|
+
def _execution_passed_message(suite_name, stdout, stderr, elapsed, verbose):
|
|
569
|
+
if not verbose:
|
|
570
|
+
return "PASSED %s in %s seconds" % (suite_name, elapsed)
|
|
571
|
+
return "PASSED %s in %s seconds\n%s\n%s" % (
|
|
572
|
+
suite_name,
|
|
573
|
+
elapsed,
|
|
574
|
+
_read_file(stdout),
|
|
575
|
+
_read_file(stderr),
|
|
576
|
+
)
|
|
577
|
+
|
|
578
|
+
|
|
579
|
+
def _execution_ignored_message(suite_name, stdout, stderr, elapsed, verbose):
|
|
580
|
+
if not verbose:
|
|
581
|
+
return "IGNORED %s" % suite_name
|
|
582
|
+
return "IGNORED %s in %s seconds\n%s\n%s" % (
|
|
583
|
+
suite_name,
|
|
584
|
+
elapsed,
|
|
585
|
+
_read_file(stdout),
|
|
586
|
+
_read_file(stderr),
|
|
587
|
+
)
|
|
588
|
+
|
|
589
|
+
|
|
590
|
+
def _options_for_custom_executor(*args):
|
|
591
|
+
# type: (Any) -> List[str]
|
|
592
|
+
return _options_to_cli_arguments(_options_for_executor(*args))
|
|
593
|
+
|
|
594
|
+
|
|
595
|
+
def _options_for_executor(
|
|
596
|
+
options,
|
|
597
|
+
outs_dir,
|
|
598
|
+
execution_item,
|
|
599
|
+
argfile,
|
|
600
|
+
caller_id,
|
|
601
|
+
is_last,
|
|
602
|
+
queueIndex,
|
|
603
|
+
last_level,
|
|
604
|
+
processes,
|
|
605
|
+
):
|
|
606
|
+
options = options.copy()
|
|
607
|
+
options["log"] = "NONE"
|
|
608
|
+
options["report"] = "NONE"
|
|
609
|
+
options["xunit"] = "NONE"
|
|
610
|
+
options["test"] = options.get("test", [])[:]
|
|
611
|
+
options["suite"] = options.get("suite", [])[:]
|
|
612
|
+
execution_item.modify_options_for_executor(options)
|
|
613
|
+
options["outputdir"] = "%OUTPUTDIR%" if execution_item.type == "hived" else outs_dir
|
|
614
|
+
options["variable"] = options.get("variable", [])[:]
|
|
615
|
+
options["variable"].append("CALLER_ID:%s" % caller_id)
|
|
616
|
+
pabotLibURIVar = "PABOTLIBURI:%s" % _PABOTLIBURI
|
|
617
|
+
# Prevent multiple appending of PABOTLIBURI variable setting
|
|
618
|
+
if pabotLibURIVar not in options["variable"]:
|
|
619
|
+
options["variable"].append(pabotLibURIVar)
|
|
620
|
+
pabotExecutionPoolId = "PABOTEXECUTIONPOOLID:%d" % _make_id()
|
|
621
|
+
if pabotExecutionPoolId not in options["variable"]:
|
|
622
|
+
options["variable"].append(pabotExecutionPoolId)
|
|
623
|
+
pabotIsLast = "PABOTISLASTEXECUTIONINPOOL:%s" % ("1" if is_last else "0")
|
|
624
|
+
if pabotIsLast not in options["variable"]:
|
|
625
|
+
options["variable"].append(pabotIsLast)
|
|
626
|
+
pabotProcesses = "PABOTNUMBEROFPROCESSES:%s" % str(processes)
|
|
627
|
+
if pabotProcesses not in options["variable"]:
|
|
628
|
+
options["variable"].append(pabotProcesses)
|
|
629
|
+
pabotIndex = pabotlib.PABOT_QUEUE_INDEX + ":" + str(queueIndex)
|
|
630
|
+
if pabotIndex not in options["variable"]:
|
|
631
|
+
options["variable"].append(pabotIndex)
|
|
632
|
+
if last_level is not None:
|
|
633
|
+
pabotLastLevel = pabotlib.PABOT_LAST_LEVEL + ":" + str(last_level)
|
|
634
|
+
if pabotLastLevel not in options["variable"]:
|
|
635
|
+
options["variable"].append(pabotLastLevel)
|
|
636
|
+
if argfile:
|
|
637
|
+
_modify_options_for_argfile_use(argfile, options, execution_item.top_name())
|
|
638
|
+
options["argumentfile"] = argfile
|
|
639
|
+
if options.get("test", False) and options.get("include", []):
|
|
640
|
+
del options["include"]
|
|
641
|
+
return _set_terminal_coloring_options(options)
|
|
642
|
+
|
|
643
|
+
|
|
644
|
+
def _modify_options_for_argfile_use(argfile, options, root_name):
|
|
645
|
+
argfile_opts, _ = ArgumentParser(
|
|
646
|
+
USAGE,
|
|
647
|
+
**_filter_argument_parser_options(
|
|
648
|
+
auto_pythonpath=False,
|
|
649
|
+
auto_argumentfile=True,
|
|
650
|
+
env_options="ROBOT_OPTIONS",
|
|
651
|
+
),
|
|
652
|
+
).parse_args(["--argumentfile", argfile])
|
|
653
|
+
old_name = options.get("name", root_name)
|
|
654
|
+
if argfile_opts["name"]:
|
|
655
|
+
new_name = argfile_opts["name"]
|
|
656
|
+
_replace_base_name(new_name, old_name, options, "suite")
|
|
657
|
+
if not options["suite"]:
|
|
658
|
+
_replace_base_name(new_name, old_name, options, "test")
|
|
659
|
+
if "name" in options:
|
|
660
|
+
del options["name"]
|
|
661
|
+
|
|
662
|
+
|
|
663
|
+
def _replace_base_name(new_name, old_name, options, key):
|
|
664
|
+
if isinstance(options.get(key, None), str):
|
|
665
|
+
options[key] = new_name + options[key][len(old_name) :]
|
|
666
|
+
elif key in options:
|
|
667
|
+
options[key] = [new_name + s[len(old_name) :] for s in options.get(key, [])]
|
|
668
|
+
|
|
669
|
+
|
|
670
|
+
def _set_terminal_coloring_options(options):
|
|
671
|
+
if ROBOT_VERSION >= "2.9":
|
|
672
|
+
options["consolecolors"] = "off"
|
|
673
|
+
options["consolemarkers"] = "off"
|
|
674
|
+
else:
|
|
675
|
+
options["monitorcolors"] = "off"
|
|
676
|
+
if ROBOT_VERSION >= "2.8" and ROBOT_VERSION < "2.9":
|
|
677
|
+
options["monitormarkers"] = "off"
|
|
678
|
+
return options
|
|
679
|
+
|
|
680
|
+
|
|
681
|
+
def _options_to_cli_arguments(opts): # type: (dict) -> List[str]
|
|
682
|
+
res = [] # type: List[str]
|
|
683
|
+
for k, v in opts.items():
|
|
684
|
+
if isinstance(v, str):
|
|
685
|
+
res += ["--" + str(k), str(v)]
|
|
686
|
+
elif PY2 and is_unicode(v):
|
|
687
|
+
res += ["--" + str(k), v.encode("utf-8")]
|
|
688
|
+
elif isinstance(v, bool) and (v is True):
|
|
689
|
+
res += ["--" + str(k)]
|
|
690
|
+
elif isinstance(v, list):
|
|
691
|
+
for value in v:
|
|
692
|
+
if PY2 and is_unicode(value):
|
|
693
|
+
res += ["--" + str(k), value.encode("utf-8")]
|
|
694
|
+
else:
|
|
695
|
+
res += ["--" + str(k), str(value)]
|
|
696
|
+
return res
|
|
697
|
+
|
|
698
|
+
|
|
699
|
+
def _group_by_groups(tokens):
|
|
700
|
+
result = []
|
|
701
|
+
group = None
|
|
702
|
+
for token in tokens:
|
|
703
|
+
if isinstance(token, GroupStartItem):
|
|
704
|
+
if group is not None:
|
|
705
|
+
raise DataError(
|
|
706
|
+
"Ordering: Group can not contain a group. Encoutered '{'"
|
|
707
|
+
)
|
|
708
|
+
group = GroupItem()
|
|
709
|
+
result.append(group)
|
|
710
|
+
continue
|
|
711
|
+
if isinstance(token, GroupEndItem):
|
|
712
|
+
if group is None:
|
|
713
|
+
raise DataError(
|
|
714
|
+
"Ordering: Group end tag '}' encountered before start '{'"
|
|
715
|
+
)
|
|
716
|
+
group = None
|
|
717
|
+
continue
|
|
718
|
+
if group is not None:
|
|
719
|
+
group.add(token)
|
|
720
|
+
else:
|
|
721
|
+
result.append(token)
|
|
722
|
+
return result
|
|
723
|
+
|
|
724
|
+
|
|
725
|
+
def hash_directory(digest, path):
|
|
726
|
+
if os.path.isfile(path):
|
|
727
|
+
digest.update(_digest(_norm_path(path)))
|
|
728
|
+
get_hash_of_file(path, digest)
|
|
729
|
+
return
|
|
730
|
+
for root, _, files in os.walk(path):
|
|
731
|
+
for name in sorted(files):
|
|
732
|
+
file_path = os.path.join(root, name)
|
|
733
|
+
if os.path.isfile(file_path) and any(
|
|
734
|
+
file_path.endswith(p) for p in _ROBOT_EXTENSIONS
|
|
735
|
+
):
|
|
736
|
+
# DO NOT ALLOW CHANGE TO FILE LOCATION
|
|
737
|
+
digest.update(_digest(_norm_path(root)))
|
|
738
|
+
# DO THESE IN TWO PHASES BECAUSE SEPARATOR DIFFERS IN DIFFERENT OS
|
|
739
|
+
digest.update(_digest(name))
|
|
740
|
+
get_hash_of_file(file_path, digest)
|
|
741
|
+
|
|
742
|
+
|
|
743
|
+
def _norm_path(path):
|
|
744
|
+
return "/".join(os.path.normpath(path).split(os.path.sep))
|
|
745
|
+
|
|
746
|
+
|
|
747
|
+
def _digest(text):
|
|
748
|
+
text = text.decode("utf-8") if PY2 and not is_unicode(text) else text
|
|
749
|
+
return hashlib.sha1(text.encode("utf-8")).digest()
|
|
750
|
+
|
|
751
|
+
|
|
752
|
+
def get_hash_of_file(filename, digest):
|
|
753
|
+
if not os.path.isfile(filename):
|
|
754
|
+
return
|
|
755
|
+
with open(filename, "rb") as f_obj:
|
|
756
|
+
while True:
|
|
757
|
+
buf = f_obj.read(1024 * 1024)
|
|
758
|
+
if not buf:
|
|
759
|
+
break
|
|
760
|
+
digest.update(buf)
|
|
761
|
+
|
|
762
|
+
|
|
763
|
+
def get_hash_of_dirs(directories):
|
|
764
|
+
digest = hashlib.sha1()
|
|
765
|
+
for directory in directories:
|
|
766
|
+
hash_directory(digest, directory)
|
|
767
|
+
return digest.hexdigest()
|
|
768
|
+
|
|
769
|
+
|
|
770
|
+
IGNORED_OPTIONS = [
|
|
771
|
+
"pythonpath",
|
|
772
|
+
"outputdir",
|
|
773
|
+
"output",
|
|
774
|
+
"log",
|
|
775
|
+
"report",
|
|
776
|
+
"removekeywords",
|
|
777
|
+
"flattenkeywords",
|
|
778
|
+
"tagstatinclude",
|
|
779
|
+
"tagstatexclude",
|
|
780
|
+
"tagstatcombine",
|
|
781
|
+
"critical",
|
|
782
|
+
"noncritical",
|
|
783
|
+
"tagstatlink",
|
|
784
|
+
"metadata",
|
|
785
|
+
"tagdoc",
|
|
786
|
+
]
|
|
787
|
+
|
|
788
|
+
|
|
789
|
+
def get_hash_of_command(options, pabot_args):
|
|
790
|
+
digest = hashlib.sha1()
|
|
791
|
+
hopts = dict(options)
|
|
792
|
+
for option in options:
|
|
793
|
+
if option in IGNORED_OPTIONS or options[option] == []:
|
|
794
|
+
del hopts[option]
|
|
795
|
+
if pabot_args.get("testlevelsplit"):
|
|
796
|
+
hopts["testlevelsplit"] = True
|
|
797
|
+
digest.update(repr(sorted(hopts.items())).encode("utf-8"))
|
|
798
|
+
return digest.hexdigest()
|
|
799
|
+
|
|
800
|
+
|
|
801
|
+
Hashes = namedtuple("Hashes", ["dirs", "cmd", "suitesfrom"])
|
|
802
|
+
|
|
803
|
+
|
|
804
|
+
def _suitesfrom_hash(pabot_args):
|
|
805
|
+
if "suitesfrom" in pabot_args:
|
|
806
|
+
digest = hashlib.sha1()
|
|
807
|
+
get_hash_of_file(pabot_args["suitesfrom"], digest)
|
|
808
|
+
return digest.hexdigest()
|
|
809
|
+
else:
|
|
810
|
+
return "no-suites-from-option"
|
|
811
|
+
|
|
812
|
+
|
|
813
|
+
if PY2:
|
|
814
|
+
|
|
815
|
+
def _open_pabotsuitenames(mode):
|
|
816
|
+
return open(".pabotsuitenames", mode)
|
|
817
|
+
|
|
818
|
+
else:
|
|
819
|
+
|
|
820
|
+
def _open_pabotsuitenames(mode):
|
|
821
|
+
return open(".pabotsuitenames", mode, encoding="utf-8")
|
|
822
|
+
|
|
823
|
+
|
|
824
|
+
def solve_shard_suites(suite_names, pabot_args):
|
|
825
|
+
if pabot_args.get("shardcount", 1) <= 1:
|
|
826
|
+
return suite_names
|
|
827
|
+
if "shardindex" not in pabot_args:
|
|
828
|
+
return suite_names
|
|
829
|
+
shard_index = pabot_args["shardindex"]
|
|
830
|
+
shard_count = pabot_args["shardcount"]
|
|
831
|
+
if shard_index > shard_count:
|
|
832
|
+
raise DataError(
|
|
833
|
+
f"Shard index ({shard_index}) greater than shard count ({shard_count})."
|
|
834
|
+
)
|
|
835
|
+
items_count = len(suite_names)
|
|
836
|
+
if items_count < shard_count:
|
|
837
|
+
raise DataError(
|
|
838
|
+
f"Not enought items ({items_count}) for shard cound ({shard_count})."
|
|
839
|
+
)
|
|
840
|
+
q, r = divmod(items_count, shard_count)
|
|
841
|
+
return suite_names[
|
|
842
|
+
(shard_index - 1) * q
|
|
843
|
+
+ min(shard_index - 1, r) : shard_index * q
|
|
844
|
+
+ min(shard_index, r)
|
|
845
|
+
]
|
|
846
|
+
|
|
847
|
+
|
|
848
|
+
def solve_suite_names(outs_dir, datasources, options, pabot_args):
|
|
849
|
+
h = Hashes(
|
|
850
|
+
dirs=get_hash_of_dirs(datasources),
|
|
851
|
+
cmd=get_hash_of_command(options, pabot_args),
|
|
852
|
+
suitesfrom=_suitesfrom_hash(pabot_args),
|
|
853
|
+
)
|
|
854
|
+
try:
|
|
855
|
+
if not os.path.isfile(".pabotsuitenames"):
|
|
856
|
+
suite_names = generate_suite_names(
|
|
857
|
+
outs_dir, datasources, options, pabot_args
|
|
858
|
+
)
|
|
859
|
+
store_suite_names(h, suite_names)
|
|
860
|
+
return suite_names
|
|
861
|
+
with _open_pabotsuitenames("r") as suitenamesfile:
|
|
862
|
+
lines = [line.strip() for line in suitenamesfile.readlines()]
|
|
863
|
+
corrupted = len(lines) < 5
|
|
864
|
+
file_h = None # type: Optional[Hashes]
|
|
865
|
+
file_hash = None # type: Optional[str]
|
|
866
|
+
hash_of_file = None # type: Optional[str]
|
|
867
|
+
if not corrupted:
|
|
868
|
+
file_h = Hashes(
|
|
869
|
+
dirs=lines[0][len("datasources:") :],
|
|
870
|
+
cmd=lines[1][len("commandlineoptions:") :],
|
|
871
|
+
suitesfrom=lines[2][len("suitesfrom:") :],
|
|
872
|
+
)
|
|
873
|
+
file_hash = lines[3][len("file:") :]
|
|
874
|
+
hash_of_file = _file_hash(lines)
|
|
875
|
+
corrupted = corrupted or any(
|
|
876
|
+
not l.startswith("--suite ")
|
|
877
|
+
and not l.startswith("--test ")
|
|
878
|
+
and l != "#WAIT"
|
|
879
|
+
and l != "{"
|
|
880
|
+
and l != "}"
|
|
881
|
+
for l in lines[4:]
|
|
882
|
+
)
|
|
883
|
+
execution_item_lines = [parse_execution_item_line(l) for l in lines[4:]]
|
|
884
|
+
if corrupted or h != file_h or file_hash != hash_of_file:
|
|
885
|
+
return _regenerate(
|
|
886
|
+
file_h,
|
|
887
|
+
h,
|
|
888
|
+
pabot_args,
|
|
889
|
+
outs_dir,
|
|
890
|
+
datasources,
|
|
891
|
+
options,
|
|
892
|
+
execution_item_lines,
|
|
893
|
+
)
|
|
894
|
+
return execution_item_lines
|
|
895
|
+
except IOError:
|
|
896
|
+
return _levelsplit(
|
|
897
|
+
generate_suite_names_with_builder(outs_dir, datasources, options),
|
|
898
|
+
pabot_args,
|
|
899
|
+
)
|
|
900
|
+
|
|
901
|
+
|
|
902
|
+
def _levelsplit(
|
|
903
|
+
suites, pabot_args
|
|
904
|
+
): # type: (List[SuiteItem], Dict[str, str]) -> List[ExecutionItem]
|
|
905
|
+
if pabot_args.get("testlevelsplit"):
|
|
906
|
+
tests = [] # type: List[ExecutionItem]
|
|
907
|
+
for s in suites:
|
|
908
|
+
tests.extend(s.tests)
|
|
909
|
+
return tests
|
|
910
|
+
return list(suites)
|
|
911
|
+
|
|
912
|
+
|
|
913
|
+
def _group_by_wait(lines):
|
|
914
|
+
suites = [[]] # type: List[List[ExecutionItem]]
|
|
915
|
+
for suite in lines:
|
|
916
|
+
if not suite.isWait:
|
|
917
|
+
if suite:
|
|
918
|
+
suites[-1].append(suite)
|
|
919
|
+
else:
|
|
920
|
+
suites.append([])
|
|
921
|
+
return suites
|
|
922
|
+
|
|
923
|
+
|
|
924
|
+
def _regenerate(
|
|
925
|
+
file_h, h, pabot_args, outs_dir, datasources, options, lines
|
|
926
|
+
): # type: (Optional[Hashes], Hashes, Dict[str, str], str, List[str], Dict[str, str], List[ExecutionItem]) -> List[ExecutionItem]
|
|
927
|
+
assert all(isinstance(s, ExecutionItem) for s in lines)
|
|
928
|
+
if (
|
|
929
|
+
(file_h is None or file_h.suitesfrom != h.suitesfrom)
|
|
930
|
+
and "suitesfrom" in pabot_args
|
|
931
|
+
and os.path.isfile(pabot_args["suitesfrom"])
|
|
932
|
+
):
|
|
933
|
+
suites = _suites_from_file(
|
|
934
|
+
file_h, h, pabot_args, outs_dir, datasources, options, lines
|
|
935
|
+
)
|
|
936
|
+
else:
|
|
937
|
+
suites = _suites_from_wrong_or_empty_file(
|
|
938
|
+
pabot_args, outs_dir, datasources, options, lines
|
|
939
|
+
)
|
|
940
|
+
if suites:
|
|
941
|
+
store_suite_names(h, suites)
|
|
942
|
+
assert all(isinstance(s, ExecutionItem) for s in suites)
|
|
943
|
+
return suites
|
|
944
|
+
|
|
945
|
+
|
|
946
|
+
def _suites_from_file(file_h, h, pabot_args, outs_dir, datasources, options, lines):
|
|
947
|
+
suites = _suites_from_outputxml(pabot_args["suitesfrom"])
|
|
948
|
+
if file_h is None or file_h.dirs != h.dirs:
|
|
949
|
+
all_suites = generate_suite_names_with_builder(outs_dir, datasources, options)
|
|
950
|
+
else:
|
|
951
|
+
all_suites = [suite for suite in lines if suite]
|
|
952
|
+
return _preserve_order(all_suites, suites)
|
|
953
|
+
|
|
954
|
+
|
|
955
|
+
def _suites_from_wrong_or_empty_file(pabot_args, outs_dir, datasources, options, lines):
|
|
956
|
+
suites = _levelsplit(
|
|
957
|
+
generate_suite_names_with_builder(outs_dir, datasources, options),
|
|
958
|
+
pabot_args,
|
|
959
|
+
)
|
|
960
|
+
return _preserve_order(suites, [suite for suite in lines if suite])
|
|
961
|
+
|
|
962
|
+
|
|
963
|
+
def _contains_suite_and_test(suites):
|
|
964
|
+
return any(isinstance(s, SuiteItem) for s in suites) and any(
|
|
965
|
+
isinstance(t, TestItem) for t in suites
|
|
966
|
+
)
|
|
967
|
+
|
|
968
|
+
|
|
969
|
+
def _preserve_order(new_items, old_items):
|
|
970
|
+
assert all(isinstance(s, ExecutionItem) for s in new_items)
|
|
971
|
+
if not old_items:
|
|
972
|
+
return new_items
|
|
973
|
+
assert all(isinstance(s, ExecutionItem) for s in old_items)
|
|
974
|
+
old_contains_tests = any(isinstance(t, TestItem) for t in old_items)
|
|
975
|
+
old_contains_suites = any(isinstance(s, SuiteItem) for s in old_items)
|
|
976
|
+
old_items = _fix_items(old_items)
|
|
977
|
+
new_contains_tests = any(isinstance(t, TestItem) for t in new_items)
|
|
978
|
+
if old_contains_tests and old_contains_suites and not new_contains_tests:
|
|
979
|
+
new_items = _split_partially_to_tests(new_items, old_items)
|
|
980
|
+
# TODO: Preserving order when suites => tests OR tests => suites
|
|
981
|
+
preserve, ignorable = _get_preserve_and_ignore(
|
|
982
|
+
new_items, old_items, old_contains_tests and old_contains_suites
|
|
983
|
+
)
|
|
984
|
+
exists_in_old_and_new = [
|
|
985
|
+
s for s in old_items if (s in new_items and s not in ignorable) or s in preserve
|
|
986
|
+
]
|
|
987
|
+
exists_only_in_new = [
|
|
988
|
+
s for s in new_items if s not in old_items and s not in ignorable
|
|
989
|
+
]
|
|
990
|
+
return _fix_items(exists_in_old_and_new + exists_only_in_new)
|
|
991
|
+
|
|
992
|
+
|
|
993
|
+
def _fix_items(items): # type: (List[ExecutionItem]) -> List[ExecutionItem]
|
|
994
|
+
assert all(isinstance(s, ExecutionItem) for s in items)
|
|
995
|
+
to_be_removed = [] # type: List[int]
|
|
996
|
+
for i in range(len(items)):
|
|
997
|
+
for j in range(i + 1, len(items)):
|
|
998
|
+
if items[i].contains(items[j]):
|
|
999
|
+
to_be_removed.append(j)
|
|
1000
|
+
items = [item for i, item in enumerate(items) if i not in to_be_removed]
|
|
1001
|
+
result = [] # type: List[ExecutionItem]
|
|
1002
|
+
to_be_splitted = {} # type: Dict[int, List[ExecutionItem]]
|
|
1003
|
+
for i in range(len(items)):
|
|
1004
|
+
if i in to_be_splitted:
|
|
1005
|
+
result.extend(items[i].difference(to_be_splitted[i]))
|
|
1006
|
+
else:
|
|
1007
|
+
result.append(items[i])
|
|
1008
|
+
for j in range(i + 1, len(items)):
|
|
1009
|
+
if items[j].contains(items[i]):
|
|
1010
|
+
if j not in to_be_splitted:
|
|
1011
|
+
to_be_splitted[j] = []
|
|
1012
|
+
to_be_splitted[j].append(items[i])
|
|
1013
|
+
_remove_double_waits(result)
|
|
1014
|
+
_remove_empty_groups(result)
|
|
1015
|
+
if result and result[0].isWait:
|
|
1016
|
+
result = result[1:]
|
|
1017
|
+
if result and result[-1].isWait:
|
|
1018
|
+
result = result[:-1]
|
|
1019
|
+
return result
|
|
1020
|
+
|
|
1021
|
+
|
|
1022
|
+
def _get_preserve_and_ignore(new_items, old_items, old_contains_suites_and_tests):
|
|
1023
|
+
ignorable = []
|
|
1024
|
+
preserve = []
|
|
1025
|
+
for old_item in old_items:
|
|
1026
|
+
for new_item in new_items:
|
|
1027
|
+
if (
|
|
1028
|
+
old_item.contains(new_item)
|
|
1029
|
+
and new_item != old_item
|
|
1030
|
+
and (isinstance(new_item, SuiteItem) or old_contains_suites_and_tests)
|
|
1031
|
+
):
|
|
1032
|
+
preserve.append(old_item)
|
|
1033
|
+
ignorable.append(new_item)
|
|
1034
|
+
if (
|
|
1035
|
+
old_item.isWait
|
|
1036
|
+
or isinstance(old_item, GroupStartItem)
|
|
1037
|
+
or isinstance(old_item, GroupEndItem)
|
|
1038
|
+
):
|
|
1039
|
+
preserve.append(old_item)
|
|
1040
|
+
preserve = [
|
|
1041
|
+
new_item
|
|
1042
|
+
for new_item in preserve
|
|
1043
|
+
if not any([i.contains(new_item) and i != new_item for i in preserve])
|
|
1044
|
+
]
|
|
1045
|
+
return preserve, ignorable
|
|
1046
|
+
|
|
1047
|
+
|
|
1048
|
+
def _remove_double_waits(exists_in_old_and_new): # type: (List[ExecutionItem]) -> None
|
|
1049
|
+
doubles = []
|
|
1050
|
+
for i, (j, k) in enumerate(zip(exists_in_old_and_new, exists_in_old_and_new[1:])):
|
|
1051
|
+
if j.isWait and k == j:
|
|
1052
|
+
doubles.append(i)
|
|
1053
|
+
for i in reversed(doubles):
|
|
1054
|
+
del exists_in_old_and_new[i]
|
|
1055
|
+
|
|
1056
|
+
|
|
1057
|
+
def _remove_empty_groups(exists_in_old_and_new): # type: (List[ExecutionItem]) -> None
|
|
1058
|
+
removables = []
|
|
1059
|
+
for i, (j, k) in enumerate(zip(exists_in_old_and_new, exists_in_old_and_new[1:])):
|
|
1060
|
+
if isinstance(j, GroupStartItem) and isinstance(k, GroupEndItem):
|
|
1061
|
+
removables.extend([i, i + 1])
|
|
1062
|
+
for i in reversed(removables):
|
|
1063
|
+
del exists_in_old_and_new[i]
|
|
1064
|
+
|
|
1065
|
+
|
|
1066
|
+
def _split_partially_to_tests(
|
|
1067
|
+
new_suites, old_suites
|
|
1068
|
+
): # type: (List[SuiteItem], List[ExecutionItem]) -> List[ExecutionItem]
|
|
1069
|
+
suits = [] # type: List[ExecutionItem]
|
|
1070
|
+
for s in new_suites:
|
|
1071
|
+
split = False
|
|
1072
|
+
for old_test in old_suites:
|
|
1073
|
+
if isinstance(old_test, TestItem) and s.contains(old_test):
|
|
1074
|
+
split = True
|
|
1075
|
+
if split:
|
|
1076
|
+
suits.extend(s.tests)
|
|
1077
|
+
else:
|
|
1078
|
+
suits.append(s)
|
|
1079
|
+
return suits
|
|
1080
|
+
|
|
1081
|
+
|
|
1082
|
+
def _file_hash(lines):
|
|
1083
|
+
digest = hashlib.sha1()
|
|
1084
|
+
digest.update(lines[0].encode())
|
|
1085
|
+
digest.update(lines[1].encode())
|
|
1086
|
+
digest.update(lines[2].encode())
|
|
1087
|
+
hashes = 0
|
|
1088
|
+
for line in lines[4:]:
|
|
1089
|
+
if line not in ("#WAIT", "{", "}"):
|
|
1090
|
+
line = line.decode("utf-8") if PY2 else line
|
|
1091
|
+
hashes ^= int(hashlib.sha1(line.encode("utf-8")).hexdigest(), 16)
|
|
1092
|
+
digest.update(str(hashes).encode())
|
|
1093
|
+
return digest.hexdigest()
|
|
1094
|
+
|
|
1095
|
+
|
|
1096
|
+
def store_suite_names(hashes, suite_names):
|
|
1097
|
+
# type: (Hashes, List[ExecutionItem]) -> None
|
|
1098
|
+
assert all(isinstance(s, ExecutionItem) for s in suite_names)
|
|
1099
|
+
suite_lines = [s.line() for s in suite_names]
|
|
1100
|
+
_write("Storing .pabotsuitenames file")
|
|
1101
|
+
try:
|
|
1102
|
+
with _open_pabotsuitenames("w") as suitenamesfile:
|
|
1103
|
+
suitenamesfile.write("datasources:" + hashes.dirs + "\n")
|
|
1104
|
+
suitenamesfile.write("commandlineoptions:" + hashes.cmd + "\n")
|
|
1105
|
+
suitenamesfile.write("suitesfrom:" + hashes.suitesfrom + "\n")
|
|
1106
|
+
suitenamesfile.write(
|
|
1107
|
+
"file:"
|
|
1108
|
+
+ _file_hash(
|
|
1109
|
+
[
|
|
1110
|
+
"datasources:" + hashes.dirs,
|
|
1111
|
+
"commandlineoptions:" + hashes.cmd,
|
|
1112
|
+
"suitesfrom:" + hashes.suitesfrom,
|
|
1113
|
+
None,
|
|
1114
|
+
]
|
|
1115
|
+
+ suite_lines
|
|
1116
|
+
)
|
|
1117
|
+
+ "\n"
|
|
1118
|
+
)
|
|
1119
|
+
suitenamesfile.writelines(
|
|
1120
|
+
(d + "\n").encode("utf-8") if PY2 and is_unicode(d) else d + "\n"
|
|
1121
|
+
for d in suite_lines
|
|
1122
|
+
)
|
|
1123
|
+
except IOError:
|
|
1124
|
+
|
|
1125
|
+
_write(
|
|
1126
|
+
"[ "
|
|
1127
|
+
+ _wrap_with(Color.YELLOW, "WARNING")
|
|
1128
|
+
+ " ]: storing .pabotsuitenames failed"
|
|
1129
|
+
)
|
|
1130
|
+
|
|
1131
|
+
|
|
1132
|
+
def generate_suite_names(
|
|
1133
|
+
outs_dir, datasources, options, pabot_args
|
|
1134
|
+
): # type: (object, object, object, Dict[str, str]) -> List[ExecutionItem]
|
|
1135
|
+
suites = [] # type: List[SuiteItem]
|
|
1136
|
+
if "suitesfrom" in pabot_args and os.path.isfile(pabot_args["suitesfrom"]):
|
|
1137
|
+
suites = _suites_from_outputxml(pabot_args["suitesfrom"])
|
|
1138
|
+
else:
|
|
1139
|
+
suites = generate_suite_names_with_builder(outs_dir, datasources, options)
|
|
1140
|
+
if pabot_args.get("testlevelsplit"):
|
|
1141
|
+
tests = [] # type: List[ExecutionItem]
|
|
1142
|
+
for s in suites:
|
|
1143
|
+
tests.extend(s.tests)
|
|
1144
|
+
return tests
|
|
1145
|
+
return list(suites)
|
|
1146
|
+
|
|
1147
|
+
|
|
1148
|
+
def generate_suite_names_with_builder(outs_dir, datasources, options):
|
|
1149
|
+
opts = _options_for_dryrun(options, outs_dir)
|
|
1150
|
+
if "pythonpath" in opts:
|
|
1151
|
+
del opts["pythonpath"]
|
|
1152
|
+
settings = RobotSettings(opts)
|
|
1153
|
+
|
|
1154
|
+
# Note: first argument (included_suites) is deprecated from RobotFramework 6.1
|
|
1155
|
+
if ROBOT_VERSION >= "6.1":
|
|
1156
|
+
builder = TestSuiteBuilder(
|
|
1157
|
+
included_extensions=settings.extension, rpa=settings.rpa, lang=opts.get("language")
|
|
1158
|
+
)
|
|
1159
|
+
else:
|
|
1160
|
+
builder = TestSuiteBuilder(
|
|
1161
|
+
settings["SuiteNames"], settings.extension, rpa=settings.rpa
|
|
1162
|
+
)
|
|
1163
|
+
|
|
1164
|
+
suite = builder.build(*datasources)
|
|
1165
|
+
settings.rpa = builder.rpa
|
|
1166
|
+
suite.configure(**settings.suite_config)
|
|
1167
|
+
if settings.pre_run_modifiers:
|
|
1168
|
+
_write.error = _write.warn = _write.info = _write.debug = _write.trace = _write
|
|
1169
|
+
suite.visit(
|
|
1170
|
+
ModelModifier(settings.pre_run_modifiers, settings.run_empty_suite, _write)
|
|
1171
|
+
)
|
|
1172
|
+
all_suites = (
|
|
1173
|
+
get_all_suites_from_main_suite(suite.suites) if suite.suites else [suite]
|
|
1174
|
+
)
|
|
1175
|
+
suite_names = [
|
|
1176
|
+
SuiteItem(
|
|
1177
|
+
suite.longname,
|
|
1178
|
+
tests=[test.longname for test in suite.tests],
|
|
1179
|
+
suites=suite.suites,
|
|
1180
|
+
)
|
|
1181
|
+
for suite in all_suites
|
|
1182
|
+
]
|
|
1183
|
+
if not suite_names and not options.get("runemptysuite", False):
|
|
1184
|
+
stdout_value = opts["stdout"].getvalue()
|
|
1185
|
+
if stdout_value:
|
|
1186
|
+
_write(
|
|
1187
|
+
"[STDOUT] from suite search:\n" + stdout_value + "[STDOUT] end",
|
|
1188
|
+
Color.YELLOW,
|
|
1189
|
+
)
|
|
1190
|
+
stderr_value = opts["stderr"].getvalue()
|
|
1191
|
+
if stderr_value:
|
|
1192
|
+
_write(
|
|
1193
|
+
"[STDERR] from suite search:\n" + stderr_value + "[STDERR] end",
|
|
1194
|
+
Color.RED,
|
|
1195
|
+
)
|
|
1196
|
+
return list(sorted(set(suite_names)))
|
|
1197
|
+
|
|
1198
|
+
|
|
1199
|
+
def get_all_suites_from_main_suite(suites):
|
|
1200
|
+
all_suites = []
|
|
1201
|
+
for suite in suites:
|
|
1202
|
+
if suite.suites:
|
|
1203
|
+
all_suites.extend(get_all_suites_from_main_suite(suite.suites))
|
|
1204
|
+
else:
|
|
1205
|
+
all_suites.append(suite)
|
|
1206
|
+
return all_suites
|
|
1207
|
+
|
|
1208
|
+
|
|
1209
|
+
class SuiteNotPassingsAndTimes(ResultVisitor):
|
|
1210
|
+
def __init__(self):
|
|
1211
|
+
self.suites = [] # type: List[Tuple[bool, int, str]]
|
|
1212
|
+
|
|
1213
|
+
def start_suite(self, suite):
|
|
1214
|
+
if len(suite.tests) > 0:
|
|
1215
|
+
self.suites.append((not suite.passed, suite.elapsedtime, suite.longname))
|
|
1216
|
+
|
|
1217
|
+
|
|
1218
|
+
def _suites_from_outputxml(outputxml):
|
|
1219
|
+
res = ExecutionResult(outputxml)
|
|
1220
|
+
suite_times = SuiteNotPassingsAndTimes()
|
|
1221
|
+
res.visit(suite_times)
|
|
1222
|
+
return [SuiteItem(suite) for (_, _, suite) in reversed(sorted(suite_times.suites))]
|
|
1223
|
+
|
|
1224
|
+
|
|
1225
|
+
def _options_for_dryrun(options, outs_dir):
|
|
1226
|
+
options = options.copy()
|
|
1227
|
+
options["log"] = "NONE"
|
|
1228
|
+
options["report"] = "NONE"
|
|
1229
|
+
options["xunit"] = "NONE"
|
|
1230
|
+
options["variable"] = options.get("variable", [])[:]
|
|
1231
|
+
options["variable"].append(pabotlib.PABOT_QUEUE_INDEX + ":-1")
|
|
1232
|
+
if ROBOT_VERSION >= "2.8":
|
|
1233
|
+
options["dryrun"] = True
|
|
1234
|
+
else:
|
|
1235
|
+
options["runmode"] = "DryRun"
|
|
1236
|
+
options["output"] = "suite_names.xml"
|
|
1237
|
+
# --timestampoutputs is not compatible with hard-coded suite_names.xml
|
|
1238
|
+
options["timestampoutputs"] = False
|
|
1239
|
+
options["outputdir"] = outs_dir
|
|
1240
|
+
if PY2:
|
|
1241
|
+
options["stdout"] = BytesIO()
|
|
1242
|
+
options["stderr"] = BytesIO()
|
|
1243
|
+
else:
|
|
1244
|
+
options["stdout"] = StringIO()
|
|
1245
|
+
options["stderr"] = StringIO()
|
|
1246
|
+
options["listener"] = []
|
|
1247
|
+
return _set_terminal_coloring_options(options)
|
|
1248
|
+
|
|
1249
|
+
|
|
1250
|
+
def _options_for_rebot(options, start_time_string, end_time_string):
|
|
1251
|
+
rebot_options = options.copy()
|
|
1252
|
+
rebot_options["starttime"] = start_time_string
|
|
1253
|
+
rebot_options["endtime"] = end_time_string
|
|
1254
|
+
rebot_options["monitorcolors"] = "off"
|
|
1255
|
+
rebot_options["suite"] = []
|
|
1256
|
+
rebot_options["test"] = []
|
|
1257
|
+
rebot_options["exclude"] = []
|
|
1258
|
+
rebot_options["include"] = []
|
|
1259
|
+
if ROBOT_VERSION >= "2.8":
|
|
1260
|
+
options["monitormarkers"] = "off"
|
|
1261
|
+
for key in [
|
|
1262
|
+
"console",
|
|
1263
|
+
"consolemarkers",
|
|
1264
|
+
"consolewidth",
|
|
1265
|
+
"debugfile",
|
|
1266
|
+
"dotted",
|
|
1267
|
+
"dryrun",
|
|
1268
|
+
"exitonerror",
|
|
1269
|
+
"exitonfailure",
|
|
1270
|
+
"extension",
|
|
1271
|
+
"listener",
|
|
1272
|
+
"loglevel",
|
|
1273
|
+
"language",
|
|
1274
|
+
"maxassignlength",
|
|
1275
|
+
"maxerrorlines",
|
|
1276
|
+
"monitorcolors",
|
|
1277
|
+
"parser",
|
|
1278
|
+
"prerunmodifier",
|
|
1279
|
+
"quiet",
|
|
1280
|
+
"randomize",
|
|
1281
|
+
"runemptysuite",
|
|
1282
|
+
"rerunfailed",
|
|
1283
|
+
"skip",
|
|
1284
|
+
"skiponfailure",
|
|
1285
|
+
"skipteardownonexit",
|
|
1286
|
+
"variable",
|
|
1287
|
+
"variablefile",
|
|
1288
|
+
]:
|
|
1289
|
+
if key in rebot_options:
|
|
1290
|
+
del rebot_options[key]
|
|
1291
|
+
return rebot_options
|
|
1292
|
+
|
|
1293
|
+
|
|
1294
|
+
def _now():
|
|
1295
|
+
return datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S.%f")
|
|
1296
|
+
|
|
1297
|
+
|
|
1298
|
+
def _print_elapsed(start, end):
|
|
1299
|
+
_write(
|
|
1300
|
+
"Total testing: "
|
|
1301
|
+
+ _time_string(sum(_ALL_ELAPSED))
|
|
1302
|
+
+ "\nElapsed time: "
|
|
1303
|
+
+ _time_string(end - start)
|
|
1304
|
+
)
|
|
1305
|
+
|
|
1306
|
+
|
|
1307
|
+
def _time_string(elapsed):
|
|
1308
|
+
millis = int((elapsed * 100) % 100)
|
|
1309
|
+
seconds = int(elapsed) % 60
|
|
1310
|
+
elapsed_minutes = (int(elapsed) - seconds) / 60
|
|
1311
|
+
minutes = elapsed_minutes % 60
|
|
1312
|
+
elapsed_hours = (elapsed_minutes - minutes) / 60
|
|
1313
|
+
elapsed_string = ""
|
|
1314
|
+
if elapsed_hours > 0:
|
|
1315
|
+
plural = ""
|
|
1316
|
+
if elapsed_hours > 1:
|
|
1317
|
+
plural = "s"
|
|
1318
|
+
elapsed_string += ("%d hour" % elapsed_hours) + plural + " "
|
|
1319
|
+
if minutes > 0:
|
|
1320
|
+
plural = ""
|
|
1321
|
+
if minutes > 1:
|
|
1322
|
+
plural = "s"
|
|
1323
|
+
elapsed_string += ("%d minute" % minutes) + plural + " "
|
|
1324
|
+
return elapsed_string + "%d.%d seconds" % (seconds, millis)
|
|
1325
|
+
|
|
1326
|
+
|
|
1327
|
+
def keyboard_interrupt(*args):
|
|
1328
|
+
global CTRL_C_PRESSED
|
|
1329
|
+
CTRL_C_PRESSED = True
|
|
1330
|
+
|
|
1331
|
+
|
|
1332
|
+
def _parallel_execute(
|
|
1333
|
+
items, processes, datasources, outs_dir, opts_for_run, pabot_args
|
|
1334
|
+
):
|
|
1335
|
+
original_signal_handler = signal.signal(signal.SIGINT, keyboard_interrupt)
|
|
1336
|
+
pool = ThreadPool(len(items) if processes is None else processes)
|
|
1337
|
+
results = [pool.map_async(execute_and_wait_with, items, 1)]
|
|
1338
|
+
delayed_result_append = 0
|
|
1339
|
+
new_items = []
|
|
1340
|
+
while not all(result.ready() for result in results) or delayed_result_append > 0:
|
|
1341
|
+
# keyboard interrupt is executed in main thread
|
|
1342
|
+
# and needs this loop to get time to get executed
|
|
1343
|
+
try:
|
|
1344
|
+
time.sleep(0.1)
|
|
1345
|
+
except IOError:
|
|
1346
|
+
keyboard_interrupt()
|
|
1347
|
+
dynamic_items = _get_dynamically_created_execution_items(
|
|
1348
|
+
datasources, outs_dir, opts_for_run, pabot_args
|
|
1349
|
+
)
|
|
1350
|
+
if dynamic_items:
|
|
1351
|
+
new_items += dynamic_items
|
|
1352
|
+
# Because of last level construction, wait for more.
|
|
1353
|
+
delayed_result_append = 3
|
|
1354
|
+
delayed_result_append = max(0, delayed_result_append - 1)
|
|
1355
|
+
if new_items and delayed_result_append == 0:
|
|
1356
|
+
_construct_last_levels([new_items])
|
|
1357
|
+
results.append(pool.map_async(execute_and_wait_with, new_items, 1))
|
|
1358
|
+
new_items = []
|
|
1359
|
+
pool.close()
|
|
1360
|
+
signal.signal(signal.SIGINT, original_signal_handler)
|
|
1361
|
+
|
|
1362
|
+
|
|
1363
|
+
def _output_dir(options, cleanup=True):
|
|
1364
|
+
outputdir = options.get("outputdir", ".")
|
|
1365
|
+
outpath = os.path.join(outputdir, "pabot_results")
|
|
1366
|
+
if cleanup and os.path.isdir(outpath):
|
|
1367
|
+
shutil.rmtree(outpath)
|
|
1368
|
+
return outpath
|
|
1369
|
+
|
|
1370
|
+
|
|
1371
|
+
def _copy_output_artifacts(options, file_extensions=None, include_subfolders=False):
|
|
1372
|
+
file_extensions = file_extensions or ["png"]
|
|
1373
|
+
pabot_outputdir = _output_dir(options, cleanup=False)
|
|
1374
|
+
outputdir = options.get("outputdir", ".")
|
|
1375
|
+
copied_artifacts = []
|
|
1376
|
+
for location, _, file_names in os.walk(pabot_outputdir):
|
|
1377
|
+
for file_name in file_names:
|
|
1378
|
+
file_ext = file_name.split(".")[-1]
|
|
1379
|
+
if file_ext in file_extensions:
|
|
1380
|
+
rel_path = os.path.relpath(location, pabot_outputdir)
|
|
1381
|
+
prefix = rel_path.split(os.sep)[0] # folders named "process-id"
|
|
1382
|
+
dst_folder_path = outputdir
|
|
1383
|
+
# if it is a file from sub-folders of "location"
|
|
1384
|
+
if os.sep in rel_path:
|
|
1385
|
+
if not include_subfolders:
|
|
1386
|
+
continue
|
|
1387
|
+
# create destination sub-folder
|
|
1388
|
+
subfolder_path = rel_path[rel_path.index(os.sep) + 1 :]
|
|
1389
|
+
dst_folder_path = os.path.join(outputdir, subfolder_path)
|
|
1390
|
+
if not os.path.isdir(dst_folder_path):
|
|
1391
|
+
os.makedirs(dst_folder_path)
|
|
1392
|
+
dst_file_name = "-".join([prefix, file_name])
|
|
1393
|
+
shutil.copy2(
|
|
1394
|
+
os.path.join(location, file_name),
|
|
1395
|
+
os.path.join(dst_folder_path, dst_file_name),
|
|
1396
|
+
)
|
|
1397
|
+
copied_artifacts.append(file_name)
|
|
1398
|
+
return copied_artifacts
|
|
1399
|
+
|
|
1400
|
+
|
|
1401
|
+
def _report_results(outs_dir, pabot_args, options, start_time_string, tests_root_name):
|
|
1402
|
+
if "pythonpath" in options:
|
|
1403
|
+
del options["pythonpath"]
|
|
1404
|
+
if ROBOT_VERSION < "4.0":
|
|
1405
|
+
stats = {
|
|
1406
|
+
"critical": {"total": 0, "passed": 0, "failed": 0},
|
|
1407
|
+
"all": {"total": 0, "passed": 0, "failed": 0},
|
|
1408
|
+
}
|
|
1409
|
+
else:
|
|
1410
|
+
stats = {
|
|
1411
|
+
"total": 0,
|
|
1412
|
+
"passed": 0,
|
|
1413
|
+
"failed": 0,
|
|
1414
|
+
"skipped": 0,
|
|
1415
|
+
}
|
|
1416
|
+
if pabot_args["argumentfiles"]:
|
|
1417
|
+
outputs = [] # type: List[str]
|
|
1418
|
+
for index, _ in pabot_args["argumentfiles"]:
|
|
1419
|
+
copied_artifacts = _copy_output_artifacts(
|
|
1420
|
+
options, pabot_args["artifacts"], pabot_args["artifactsinsubfolders"]
|
|
1421
|
+
)
|
|
1422
|
+
outputs += [
|
|
1423
|
+
_merge_one_run(
|
|
1424
|
+
os.path.join(outs_dir, index),
|
|
1425
|
+
options,
|
|
1426
|
+
tests_root_name,
|
|
1427
|
+
stats,
|
|
1428
|
+
copied_artifacts,
|
|
1429
|
+
outputfile=os.path.join("pabot_results", "output%s.xml" % index),
|
|
1430
|
+
)
|
|
1431
|
+
]
|
|
1432
|
+
if "output" not in options:
|
|
1433
|
+
options["output"] = "output.xml"
|
|
1434
|
+
_write_stats(stats)
|
|
1435
|
+
return rebot(*outputs, **_options_for_rebot(options, start_time_string, _now()))
|
|
1436
|
+
else:
|
|
1437
|
+
return _report_results_for_one_run(
|
|
1438
|
+
outs_dir, pabot_args, options, start_time_string, tests_root_name, stats
|
|
1439
|
+
)
|
|
1440
|
+
|
|
1441
|
+
|
|
1442
|
+
def _write_stats(stats):
|
|
1443
|
+
if ROBOT_VERSION < "4.0":
|
|
1444
|
+
crit = stats["critical"]
|
|
1445
|
+
al = stats["all"]
|
|
1446
|
+
_write(
|
|
1447
|
+
"%d critical tests, %d passed, %d failed"
|
|
1448
|
+
% (crit["total"], crit["passed"], crit["failed"])
|
|
1449
|
+
)
|
|
1450
|
+
_write(
|
|
1451
|
+
"%d tests total, %d passed, %d failed"
|
|
1452
|
+
% (al["total"], al["passed"], al["failed"])
|
|
1453
|
+
)
|
|
1454
|
+
else:
|
|
1455
|
+
_write(
|
|
1456
|
+
"%d tests, %d passed, %d failed, %d skipped."
|
|
1457
|
+
% (stats["total"], stats["passed"], stats["failed"], stats["skipped"])
|
|
1458
|
+
)
|
|
1459
|
+
_write("===================================================")
|
|
1460
|
+
|
|
1461
|
+
|
|
1462
|
+
def _report_results_for_one_run(
|
|
1463
|
+
outs_dir, pabot_args, options, start_time_string, tests_root_name, stats
|
|
1464
|
+
):
|
|
1465
|
+
copied_artifacts = _copy_output_artifacts(
|
|
1466
|
+
options, pabot_args["artifacts"], pabot_args["artifactsinsubfolders"]
|
|
1467
|
+
)
|
|
1468
|
+
output_path = _merge_one_run(
|
|
1469
|
+
outs_dir, options, tests_root_name, stats, copied_artifacts
|
|
1470
|
+
)
|
|
1471
|
+
_write_stats(stats)
|
|
1472
|
+
if (
|
|
1473
|
+
"report" in options
|
|
1474
|
+
and options["report"] == "NONE"
|
|
1475
|
+
and "log" in options
|
|
1476
|
+
and options["log"] == "NONE"
|
|
1477
|
+
):
|
|
1478
|
+
options[
|
|
1479
|
+
"output"
|
|
1480
|
+
] = output_path # REBOT will return error 252 if nothing is written
|
|
1481
|
+
else:
|
|
1482
|
+
_write("Output: %s" % output_path)
|
|
1483
|
+
options["output"] = None # Do not write output again with rebot
|
|
1484
|
+
return rebot(output_path, **_options_for_rebot(options, start_time_string, _now()))
|
|
1485
|
+
|
|
1486
|
+
|
|
1487
|
+
def _merge_one_run(
|
|
1488
|
+
outs_dir, options, tests_root_name, stats, copied_artifacts, outputfile=None
|
|
1489
|
+
):
|
|
1490
|
+
outputfile = outputfile or options.get("output", "output.xml")
|
|
1491
|
+
output_path = os.path.abspath(
|
|
1492
|
+
os.path.join(options.get("outputdir", "."), outputfile)
|
|
1493
|
+
)
|
|
1494
|
+
files = natsorted(glob(os.path.join(_glob_escape(outs_dir), "**/*.xml")))
|
|
1495
|
+
if not files:
|
|
1496
|
+
_write('WARN: No output files in "%s"' % outs_dir, Color.YELLOW)
|
|
1497
|
+
return ""
|
|
1498
|
+
|
|
1499
|
+
def invalid_xml_callback():
|
|
1500
|
+
global _ABNORMAL_EXIT_HAPPENED
|
|
1501
|
+
_ABNORMAL_EXIT_HAPPENED = True
|
|
1502
|
+
|
|
1503
|
+
if PY2:
|
|
1504
|
+
files = [f.decode(SYSTEM_ENCODING) if not is_unicode(f) else f for f in files]
|
|
1505
|
+
resu = merge(
|
|
1506
|
+
files, options, tests_root_name, copied_artifacts, invalid_xml_callback
|
|
1507
|
+
)
|
|
1508
|
+
_update_stats(resu, stats)
|
|
1509
|
+
if ROBOT_VERSION >= "7.0" and options.get("legacyoutput"):
|
|
1510
|
+
resu.save(output_path, legacy_output=True)
|
|
1511
|
+
else:
|
|
1512
|
+
resu.save(output_path)
|
|
1513
|
+
return output_path
|
|
1514
|
+
|
|
1515
|
+
|
|
1516
|
+
def _update_stats(result, stats):
|
|
1517
|
+
s = result.statistics
|
|
1518
|
+
if ROBOT_VERSION < "4.0":
|
|
1519
|
+
stats["critical"]["total"] += s.total.critical.total
|
|
1520
|
+
stats["critical"]["passed"] += s.total.critical.passed
|
|
1521
|
+
stats["critical"]["failed"] += s.total.critical.failed
|
|
1522
|
+
stats["all"]["total"] += s.total.all.total
|
|
1523
|
+
stats["all"]["passed"] += s.total.all.passed
|
|
1524
|
+
stats["all"]["failed"] += s.total.all.failed
|
|
1525
|
+
else:
|
|
1526
|
+
stats["total"] += s.total.total
|
|
1527
|
+
stats["passed"] += s.total.passed
|
|
1528
|
+
stats["failed"] += s.total.failed
|
|
1529
|
+
stats["skipped"] += s.total.skipped
|
|
1530
|
+
|
|
1531
|
+
|
|
1532
|
+
# This is from https://github.com/django/django/blob/master/django/utils/glob.py
|
|
1533
|
+
_magic_check = re.compile("([*?[])")
|
|
1534
|
+
|
|
1535
|
+
|
|
1536
|
+
def _glob_escape(pathname):
|
|
1537
|
+
"""
|
|
1538
|
+
Escape all special characters.
|
|
1539
|
+
"""
|
|
1540
|
+
drive, pathname = os.path.splitdrive(pathname)
|
|
1541
|
+
pathname = _magic_check.sub(r"[\1]", pathname)
|
|
1542
|
+
return drive + pathname
|
|
1543
|
+
|
|
1544
|
+
|
|
1545
|
+
def _writer():
|
|
1546
|
+
while True:
|
|
1547
|
+
message = MESSAGE_QUEUE.get()
|
|
1548
|
+
if message is None:
|
|
1549
|
+
MESSAGE_QUEUE.task_done()
|
|
1550
|
+
return
|
|
1551
|
+
print(message)
|
|
1552
|
+
sys.stdout.flush()
|
|
1553
|
+
MESSAGE_QUEUE.task_done()
|
|
1554
|
+
|
|
1555
|
+
|
|
1556
|
+
def _write(message, color=None):
|
|
1557
|
+
MESSAGE_QUEUE.put(_wrap_with(color, message))
|
|
1558
|
+
|
|
1559
|
+
|
|
1560
|
+
def _wrap_with(color, message):
|
|
1561
|
+
if _is_output_coloring_supported() and color:
|
|
1562
|
+
return "%s%s%s" % (color, message, Color.ENDC)
|
|
1563
|
+
return message
|
|
1564
|
+
|
|
1565
|
+
|
|
1566
|
+
def _is_output_coloring_supported():
|
|
1567
|
+
return sys.stdout.isatty() and os.name in Color.SUPPORTED_OSES
|
|
1568
|
+
|
|
1569
|
+
|
|
1570
|
+
def _start_message_writer():
|
|
1571
|
+
t = threading.Thread(target=_writer)
|
|
1572
|
+
t.start()
|
|
1573
|
+
|
|
1574
|
+
|
|
1575
|
+
def _stop_message_writer():
|
|
1576
|
+
MESSAGE_QUEUE.put(None)
|
|
1577
|
+
MESSAGE_QUEUE.join()
|
|
1578
|
+
|
|
1579
|
+
|
|
1580
|
+
def _get_free_port(pabot_args):
|
|
1581
|
+
if pabot_args["pabotlibport"] != 0:
|
|
1582
|
+
return pabot_args["pabotlibport"]
|
|
1583
|
+
with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as s:
|
|
1584
|
+
s.bind(("localhost", 0))
|
|
1585
|
+
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
|
|
1586
|
+
return s.getsockname()[1]
|
|
1587
|
+
|
|
1588
|
+
|
|
1589
|
+
def _start_remote_library(pabot_args): # type: (dict) -> Optional[subprocess.Popen]
|
|
1590
|
+
global _PABOTLIBURI
|
|
1591
|
+
free_port = _get_free_port(pabot_args)
|
|
1592
|
+
_PABOTLIBURI = "%s:%s" % (pabot_args["pabotlibhost"], free_port)
|
|
1593
|
+
if not pabot_args["pabotlib"]:
|
|
1594
|
+
return None
|
|
1595
|
+
if pabot_args.get("resourcefile") and not os.path.exists(
|
|
1596
|
+
pabot_args["resourcefile"]
|
|
1597
|
+
):
|
|
1598
|
+
_write(
|
|
1599
|
+
"Warning: specified resource file doesn't exist."
|
|
1600
|
+
" Some tests may fail or continue forever.",
|
|
1601
|
+
Color.YELLOW,
|
|
1602
|
+
)
|
|
1603
|
+
pabot_args["resourcefile"] = None
|
|
1604
|
+
return subprocess.Popen(
|
|
1605
|
+
'"{python}" -m {pabotlibname} {resourcefile} {pabotlibhost} {pabotlibport}'.format(
|
|
1606
|
+
python=sys.executable,
|
|
1607
|
+
pabotlibname=pabotlib.__name__,
|
|
1608
|
+
resourcefile=pabot_args.get("resourcefile"),
|
|
1609
|
+
pabotlibhost=pabot_args["pabotlibhost"],
|
|
1610
|
+
pabotlibport=free_port,
|
|
1611
|
+
),
|
|
1612
|
+
shell=True,
|
|
1613
|
+
)
|
|
1614
|
+
|
|
1615
|
+
|
|
1616
|
+
def _stop_remote_library(process): # type: (subprocess.Popen) -> None
|
|
1617
|
+
_write("Stopping PabotLib process")
|
|
1618
|
+
try:
|
|
1619
|
+
remoteLib = Remote(_PABOTLIBURI)
|
|
1620
|
+
remoteLib.run_keyword("stop_remote_libraries", [], {})
|
|
1621
|
+
remoteLib.run_keyword("stop_remote_server", [], {})
|
|
1622
|
+
except RuntimeError:
|
|
1623
|
+
_write("Could not connect to PabotLib - assuming stopped already")
|
|
1624
|
+
return
|
|
1625
|
+
i = 50
|
|
1626
|
+
while i > 0 and process.poll() is None:
|
|
1627
|
+
time.sleep(0.1)
|
|
1628
|
+
i -= 1
|
|
1629
|
+
if i == 0:
|
|
1630
|
+
_write(
|
|
1631
|
+
"Could not stop PabotLib Process in 5 seconds " "- calling terminate",
|
|
1632
|
+
Color.YELLOW,
|
|
1633
|
+
)
|
|
1634
|
+
process.terminate()
|
|
1635
|
+
else:
|
|
1636
|
+
_write("PabotLib process stopped")
|
|
1637
|
+
|
|
1638
|
+
|
|
1639
|
+
def _get_suite_root_name(suite_names):
|
|
1640
|
+
top_names = [x.top_name() for group in suite_names for x in group]
|
|
1641
|
+
if top_names and top_names.count(top_names[0]) == len(top_names):
|
|
1642
|
+
return top_names[0]
|
|
1643
|
+
return ""
|
|
1644
|
+
|
|
1645
|
+
|
|
1646
|
+
class QueueItem(object):
|
|
1647
|
+
_queue_index = 0
|
|
1648
|
+
|
|
1649
|
+
def __init__(
|
|
1650
|
+
self,
|
|
1651
|
+
datasources,
|
|
1652
|
+
outs_dir,
|
|
1653
|
+
options,
|
|
1654
|
+
execution_item,
|
|
1655
|
+
command,
|
|
1656
|
+
verbose,
|
|
1657
|
+
argfile,
|
|
1658
|
+
hive=None,
|
|
1659
|
+
processes=0,
|
|
1660
|
+
timeout=None
|
|
1661
|
+
):
|
|
1662
|
+
# type: (List[str], str, Dict[str, object], ExecutionItem, List[str], bool, Tuple[str, Optional[str]], Optional[str], int, Optional[int]) -> None
|
|
1663
|
+
self.datasources = datasources
|
|
1664
|
+
self.outs_dir = (
|
|
1665
|
+
outs_dir.encode("utf-8") if PY2 and is_unicode(outs_dir) else outs_dir
|
|
1666
|
+
)
|
|
1667
|
+
self.options = options
|
|
1668
|
+
self.execution_item = (
|
|
1669
|
+
execution_item if not hive else HivedItem(execution_item, hive)
|
|
1670
|
+
)
|
|
1671
|
+
self.command = command
|
|
1672
|
+
self.verbose = verbose
|
|
1673
|
+
self.argfile_index = argfile[0]
|
|
1674
|
+
self.argfile = argfile[1]
|
|
1675
|
+
self._index = QueueItem._queue_index
|
|
1676
|
+
QueueItem._queue_index += 1
|
|
1677
|
+
self.last_level = None
|
|
1678
|
+
self.hive = hive
|
|
1679
|
+
self.processes = processes
|
|
1680
|
+
self.timeout = timeout
|
|
1681
|
+
|
|
1682
|
+
@property
|
|
1683
|
+
def index(self):
|
|
1684
|
+
# type: () -> int
|
|
1685
|
+
return self._index
|
|
1686
|
+
|
|
1687
|
+
@property
|
|
1688
|
+
def display_name(self):
|
|
1689
|
+
# type: () -> str
|
|
1690
|
+
if self.argfile:
|
|
1691
|
+
return "%s {%s}" % (self.execution_item.name, self.argfile)
|
|
1692
|
+
return self.execution_item.name
|
|
1693
|
+
|
|
1694
|
+
|
|
1695
|
+
def _create_execution_items(
|
|
1696
|
+
suite_groups, datasources, outs_dir, options, opts_for_run, pabot_args
|
|
1697
|
+
):
|
|
1698
|
+
is_dry_run = (
|
|
1699
|
+
options.get("dryrun")
|
|
1700
|
+
if ROBOT_VERSION >= "2.8"
|
|
1701
|
+
else options.get("runmode") == "DryRun"
|
|
1702
|
+
)
|
|
1703
|
+
if is_dry_run:
|
|
1704
|
+
all_items = _create_execution_items_for_dry_run(
|
|
1705
|
+
suite_groups, datasources, outs_dir, opts_for_run, pabot_args
|
|
1706
|
+
)
|
|
1707
|
+
else:
|
|
1708
|
+
all_items = _create_execution_items_for_run(
|
|
1709
|
+
suite_groups, datasources, outs_dir, options, opts_for_run, pabot_args
|
|
1710
|
+
)
|
|
1711
|
+
_construct_index_and_completed_index(all_items)
|
|
1712
|
+
_construct_last_levels(all_items)
|
|
1713
|
+
return all_items
|
|
1714
|
+
|
|
1715
|
+
|
|
1716
|
+
def _construct_index_and_completed_index(all_items):
|
|
1717
|
+
# type: (List[List[QueueItem]]) -> None
|
|
1718
|
+
global _COMPLETED_LOCK, _NOT_COMPLETED_INDEXES
|
|
1719
|
+
with _COMPLETED_LOCK:
|
|
1720
|
+
for item_group in all_items:
|
|
1721
|
+
for item in item_group:
|
|
1722
|
+
_NOT_COMPLETED_INDEXES.append(item.index)
|
|
1723
|
+
|
|
1724
|
+
|
|
1725
|
+
def _create_execution_items_for_run(
|
|
1726
|
+
suite_groups, datasources, outs_dir, options, opts_for_run, pabot_args
|
|
1727
|
+
):
|
|
1728
|
+
global _NUMBER_OF_ITEMS_TO_BE_EXECUTED
|
|
1729
|
+
all_items = [] # type: List[List[QueueItem]]
|
|
1730
|
+
_NUMBER_OF_ITEMS_TO_BE_EXECUTED = 0
|
|
1731
|
+
for suite_group in suite_groups:
|
|
1732
|
+
# TODO: Fix this better
|
|
1733
|
+
if (
|
|
1734
|
+
options.get("randomize") in ["all", "suites"]
|
|
1735
|
+
and "suitesfrom" not in pabot_args
|
|
1736
|
+
):
|
|
1737
|
+
random.shuffle(suite_group)
|
|
1738
|
+
items = _create_items(
|
|
1739
|
+
datasources, opts_for_run, outs_dir, pabot_args, suite_group
|
|
1740
|
+
)
|
|
1741
|
+
_NUMBER_OF_ITEMS_TO_BE_EXECUTED += len(items)
|
|
1742
|
+
all_items.append(items)
|
|
1743
|
+
return all_items
|
|
1744
|
+
|
|
1745
|
+
|
|
1746
|
+
def _create_items(datasources, opts_for_run, outs_dir, pabot_args, suite_group):
|
|
1747
|
+
return [
|
|
1748
|
+
QueueItem(
|
|
1749
|
+
datasources,
|
|
1750
|
+
outs_dir,
|
|
1751
|
+
opts_for_run,
|
|
1752
|
+
suite,
|
|
1753
|
+
pabot_args["command"],
|
|
1754
|
+
pabot_args["verbose"],
|
|
1755
|
+
argfile,
|
|
1756
|
+
pabot_args.get("hive"),
|
|
1757
|
+
pabot_args["processes"],
|
|
1758
|
+
pabot_args["processtimeout"]
|
|
1759
|
+
)
|
|
1760
|
+
for suite in suite_group
|
|
1761
|
+
for argfile in pabot_args["argumentfiles"] or [("", None)]
|
|
1762
|
+
]
|
|
1763
|
+
|
|
1764
|
+
|
|
1765
|
+
def _create_execution_items_for_dry_run(
|
|
1766
|
+
suite_groups, datasources, outs_dir, opts_for_run, pabot_args
|
|
1767
|
+
):
|
|
1768
|
+
global _NUMBER_OF_ITEMS_TO_BE_EXECUTED
|
|
1769
|
+
all_items = [] # type: List[List[QueueItem]]
|
|
1770
|
+
_NUMBER_OF_ITEMS_TO_BE_EXECUTED = 0
|
|
1771
|
+
processes_count = pabot_args["processes"]
|
|
1772
|
+
for suite_group in suite_groups:
|
|
1773
|
+
items = _create_items(
|
|
1774
|
+
datasources, opts_for_run, outs_dir, pabot_args, suite_group
|
|
1775
|
+
)
|
|
1776
|
+
chunk_size = (
|
|
1777
|
+
round(len(items) / processes_count)
|
|
1778
|
+
if len(items) > processes_count
|
|
1779
|
+
else 1
|
|
1780
|
+
)
|
|
1781
|
+
chunked_items = list(_chunk_items(items, chunk_size))
|
|
1782
|
+
_NUMBER_OF_ITEMS_TO_BE_EXECUTED += len(chunked_items)
|
|
1783
|
+
all_items.append(chunked_items)
|
|
1784
|
+
return all_items
|
|
1785
|
+
|
|
1786
|
+
|
|
1787
|
+
def _chunk_items(items, chunk_size):
|
|
1788
|
+
for i in range(0, len(items), chunk_size):
|
|
1789
|
+
chunked_items = items[i : i + chunk_size]
|
|
1790
|
+
base_item = chunked_items[0]
|
|
1791
|
+
if not base_item:
|
|
1792
|
+
continue
|
|
1793
|
+
execution_items = SuiteItems([item.execution_item for item in chunked_items])
|
|
1794
|
+
chunked_item = QueueItem(
|
|
1795
|
+
base_item.datasources,
|
|
1796
|
+
base_item.outs_dir,
|
|
1797
|
+
base_item.options,
|
|
1798
|
+
execution_items,
|
|
1799
|
+
base_item.command,
|
|
1800
|
+
base_item.verbose,
|
|
1801
|
+
(base_item.argfile_index, base_item.argfile),
|
|
1802
|
+
processes=base_item.processes,
|
|
1803
|
+
timeout=base_item.timeout
|
|
1804
|
+
)
|
|
1805
|
+
yield chunked_item
|
|
1806
|
+
|
|
1807
|
+
|
|
1808
|
+
def _find_ending_level(name, group):
|
|
1809
|
+
n = name.split(".")
|
|
1810
|
+
level = -1
|
|
1811
|
+
for other in group:
|
|
1812
|
+
o = other.split(".")
|
|
1813
|
+
dif = [i for i in range(min(len(o), len(n))) if o[i] != n[i]]
|
|
1814
|
+
if dif:
|
|
1815
|
+
level = max(dif[0], level)
|
|
1816
|
+
else:
|
|
1817
|
+
return name + ".PABOT_noend"
|
|
1818
|
+
return ".".join(n[: (level + 1)])
|
|
1819
|
+
|
|
1820
|
+
|
|
1821
|
+
def _construct_last_levels(all_items):
|
|
1822
|
+
names = []
|
|
1823
|
+
for items in all_items:
|
|
1824
|
+
for item in items:
|
|
1825
|
+
if isinstance(item.execution_item, SuiteItems):
|
|
1826
|
+
for suite in item.execution_item.suites:
|
|
1827
|
+
names.append(suite.name)
|
|
1828
|
+
else:
|
|
1829
|
+
names.append(item.execution_item.name)
|
|
1830
|
+
index = 0
|
|
1831
|
+
for items in all_items:
|
|
1832
|
+
for item in items:
|
|
1833
|
+
if isinstance(item.execution_item, SuiteItems):
|
|
1834
|
+
for suite in item.execution_item.suites:
|
|
1835
|
+
item.last_level = _find_ending_level(suite.name, names[index + 1 :])
|
|
1836
|
+
else:
|
|
1837
|
+
item.last_level = _find_ending_level(
|
|
1838
|
+
item.execution_item.name, names[index + 1 :]
|
|
1839
|
+
)
|
|
1840
|
+
index += 1
|
|
1841
|
+
|
|
1842
|
+
|
|
1843
|
+
def _initialize_queue_index():
|
|
1844
|
+
global _PABOTLIBURI
|
|
1845
|
+
plib = Remote(_PABOTLIBURI)
|
|
1846
|
+
# INITIALISE PARALLEL QUEUE MIN INDEX
|
|
1847
|
+
for i in range(300):
|
|
1848
|
+
try:
|
|
1849
|
+
plib.run_keyword(
|
|
1850
|
+
"set_parallel_value_for_key",
|
|
1851
|
+
[pabotlib.PABOT_MIN_QUEUE_INDEX_EXECUTING_PARALLEL_VALUE, 0],
|
|
1852
|
+
{},
|
|
1853
|
+
)
|
|
1854
|
+
return
|
|
1855
|
+
except RuntimeError as e:
|
|
1856
|
+
# REMOTE LIB NOT YET CONNECTED
|
|
1857
|
+
time.sleep(0.1)
|
|
1858
|
+
raise RuntimeError("Can not connect to PabotLib at %s" % _PABOTLIBURI)
|
|
1859
|
+
|
|
1860
|
+
|
|
1861
|
+
def _get_dynamically_created_execution_items(
|
|
1862
|
+
datasources, outs_dir, opts_for_run, pabot_args
|
|
1863
|
+
):
|
|
1864
|
+
global _COMPLETED_LOCK, _NOT_COMPLETED_INDEXES, _NUMBER_OF_ITEMS_TO_BE_EXECUTED
|
|
1865
|
+
if not _pabotlib_in_use():
|
|
1866
|
+
return None
|
|
1867
|
+
plib = Remote(_PABOTLIBURI)
|
|
1868
|
+
new_suites = plib.run_keyword("get_added_suites", [], {})
|
|
1869
|
+
if len(new_suites) == 0:
|
|
1870
|
+
return None
|
|
1871
|
+
suite_group = [DynamicSuiteItem(s, v) for s, v in new_suites]
|
|
1872
|
+
items = [
|
|
1873
|
+
QueueItem(
|
|
1874
|
+
datasources,
|
|
1875
|
+
outs_dir,
|
|
1876
|
+
opts_for_run,
|
|
1877
|
+
suite,
|
|
1878
|
+
pabot_args["command"],
|
|
1879
|
+
pabot_args["verbose"],
|
|
1880
|
+
("", None),
|
|
1881
|
+
pabot_args.get("hive"),
|
|
1882
|
+
pabot_args["processes"],
|
|
1883
|
+
pabot_args["processtimeout"]
|
|
1884
|
+
)
|
|
1885
|
+
for suite in suite_group
|
|
1886
|
+
]
|
|
1887
|
+
with _COMPLETED_LOCK:
|
|
1888
|
+
_NUMBER_OF_ITEMS_TO_BE_EXECUTED += len(items)
|
|
1889
|
+
for item in items:
|
|
1890
|
+
_NOT_COMPLETED_INDEXES.append(item.index)
|
|
1891
|
+
return items
|
|
1892
|
+
|
|
1893
|
+
|
|
1894
|
+
def main(args=None):
|
|
1895
|
+
return sys.exit(main_program(args))
|
|
1896
|
+
|
|
1897
|
+
|
|
1898
|
+
def main_program(args):
|
|
1899
|
+
global _PABOTLIBPROCESS
|
|
1900
|
+
args = args or sys.argv[1:]
|
|
1901
|
+
if len(args) == 0:
|
|
1902
|
+
print(
|
|
1903
|
+
"[ "
|
|
1904
|
+
+ _wrap_with(Color.RED, "ERROR")
|
|
1905
|
+
+ " ]: Expected at least 1 argument, got 0."
|
|
1906
|
+
)
|
|
1907
|
+
print("Try --help for usage information.")
|
|
1908
|
+
return 252
|
|
1909
|
+
start_time = time.time()
|
|
1910
|
+
start_time_string = _now()
|
|
1911
|
+
# NOTE: timeout option
|
|
1912
|
+
try:
|
|
1913
|
+
_start_message_writer()
|
|
1914
|
+
options, datasources, pabot_args, opts_for_run = parse_args(args)
|
|
1915
|
+
if pabot_args["help"]:
|
|
1916
|
+
print(__doc__.replace("[PABOT_VERSION]", PABOT_VERSION))
|
|
1917
|
+
return 0
|
|
1918
|
+
if len(datasources) == 0:
|
|
1919
|
+
print("[ " + _wrap_with(Color.RED, "ERROR") + " ]: No datasources given.")
|
|
1920
|
+
print("Try --help for usage information.")
|
|
1921
|
+
return 252
|
|
1922
|
+
_PABOTLIBPROCESS = _start_remote_library(pabot_args)
|
|
1923
|
+
if _pabotlib_in_use():
|
|
1924
|
+
_initialize_queue_index()
|
|
1925
|
+
outs_dir = _output_dir(options)
|
|
1926
|
+
suite_groups = _group_suites(outs_dir, datasources, options, pabot_args)
|
|
1927
|
+
if pabot_args["verbose"]:
|
|
1928
|
+
_write("Suite names resolved in %s seconds" % str(time.time() - start_time))
|
|
1929
|
+
if not suite_groups or suite_groups == [[]]:
|
|
1930
|
+
_write("No tests to execute")
|
|
1931
|
+
if not options.get("runemptysuite", False):
|
|
1932
|
+
return 252
|
|
1933
|
+
execution_items = _create_execution_items(
|
|
1934
|
+
suite_groups, datasources, outs_dir, options, opts_for_run, pabot_args
|
|
1935
|
+
)
|
|
1936
|
+
while execution_items:
|
|
1937
|
+
items = execution_items.pop(0)
|
|
1938
|
+
_parallel_execute(
|
|
1939
|
+
items,
|
|
1940
|
+
pabot_args["processes"],
|
|
1941
|
+
datasources,
|
|
1942
|
+
outs_dir,
|
|
1943
|
+
opts_for_run,
|
|
1944
|
+
pabot_args,
|
|
1945
|
+
)
|
|
1946
|
+
result_code = _report_results(
|
|
1947
|
+
outs_dir,
|
|
1948
|
+
pabot_args,
|
|
1949
|
+
options,
|
|
1950
|
+
start_time_string,
|
|
1951
|
+
_get_suite_root_name(suite_groups),
|
|
1952
|
+
)
|
|
1953
|
+
return result_code if not _ABNORMAL_EXIT_HAPPENED else 252
|
|
1954
|
+
except Information as i:
|
|
1955
|
+
print(__doc__.replace("[PABOT_VERSION]", PABOT_VERSION))
|
|
1956
|
+
print(i.message)
|
|
1957
|
+
except DataError as err:
|
|
1958
|
+
print(err.message)
|
|
1959
|
+
return 252
|
|
1960
|
+
except Exception:
|
|
1961
|
+
_write("[ERROR] EXCEPTION RAISED DURING PABOT EXECUTION", Color.RED)
|
|
1962
|
+
_write(
|
|
1963
|
+
"[ERROR] PLEASE CONSIDER REPORTING THIS ISSUE TO https://github.com/mkorpela/pabot/issues",
|
|
1964
|
+
Color.RED,
|
|
1965
|
+
)
|
|
1966
|
+
_write("Pabot: %s" % PABOT_VERSION)
|
|
1967
|
+
_write("Python: %s" % sys.version)
|
|
1968
|
+
_write("Robot Framework: %s" % ROBOT_VERSION)
|
|
1969
|
+
raise
|
|
1970
|
+
finally:
|
|
1971
|
+
if _PABOTLIBPROCESS:
|
|
1972
|
+
_stop_remote_library(_PABOTLIBPROCESS)
|
|
1973
|
+
_print_elapsed(start_time, time.time())
|
|
1974
|
+
_stop_message_writer()
|
|
1975
|
+
|
|
1976
|
+
|
|
1977
|
+
def _group_suites(outs_dir, datasources, options, pabot_args):
|
|
1978
|
+
suite_names = solve_suite_names(outs_dir, datasources, options, pabot_args)
|
|
1979
|
+
_verify_depends(suite_names)
|
|
1980
|
+
ordered_suites = _preserve_order(suite_names, pabot_args.get("ordering"))
|
|
1981
|
+
shard_suites = solve_shard_suites(ordered_suites, pabot_args)
|
|
1982
|
+
grouped_suites = (
|
|
1983
|
+
_chunked_suite_names(shard_suites, pabot_args["processes"])
|
|
1984
|
+
if pabot_args["chunk"]
|
|
1985
|
+
else _group_by_wait(_group_by_groups(ordered_suites))
|
|
1986
|
+
)
|
|
1987
|
+
grouped_by_depend = _all_grouped_suites_by_depend(grouped_suites)
|
|
1988
|
+
return grouped_by_depend
|
|
1989
|
+
|
|
1990
|
+
|
|
1991
|
+
def _chunked_suite_names(suite_names, processes):
|
|
1992
|
+
q, r = divmod(len(suite_names), processes)
|
|
1993
|
+
result = []
|
|
1994
|
+
for index in range(processes):
|
|
1995
|
+
chunk = suite_names[
|
|
1996
|
+
(index) * q + min(index, r) : (index + 1) * q + min((index + 1), r)
|
|
1997
|
+
]
|
|
1998
|
+
if len(chunk) == 0:
|
|
1999
|
+
continue
|
|
2000
|
+
grouped = GroupItem()
|
|
2001
|
+
for item in chunk:
|
|
2002
|
+
grouped.add(item)
|
|
2003
|
+
result.append(grouped)
|
|
2004
|
+
return [result]
|
|
2005
|
+
|
|
2006
|
+
|
|
2007
|
+
def _verify_depends(suite_names):
|
|
2008
|
+
runnable_suites = list(
|
|
2009
|
+
filter(lambda suite: isinstance(suite, RunnableItem), suite_names)
|
|
2010
|
+
)
|
|
2011
|
+
suites_with_depends = list(filter(lambda suite: suite.depends, runnable_suites))
|
|
2012
|
+
suites_with_found_dependencies = list(
|
|
2013
|
+
filter(
|
|
2014
|
+
lambda suite: any(
|
|
2015
|
+
runnable_suite.name == suite.depends
|
|
2016
|
+
for runnable_suite in runnable_suites
|
|
2017
|
+
),
|
|
2018
|
+
suites_with_depends,
|
|
2019
|
+
)
|
|
2020
|
+
)
|
|
2021
|
+
if suites_with_depends != suites_with_found_dependencies:
|
|
2022
|
+
raise Exception("There are unmet dependencies using #DEPENDS")
|
|
2023
|
+
suites_with_circular_dependencies = list(
|
|
2024
|
+
filter(lambda suite: suite.depends == suite.name, suites_with_depends)
|
|
2025
|
+
)
|
|
2026
|
+
if suites_with_circular_dependencies:
|
|
2027
|
+
raise Exception("There are suites with circular dependencies using #DEPENDS")
|
|
2028
|
+
grouped_suites = list(
|
|
2029
|
+
filter(lambda suite: isinstance(suite, GroupItem), suite_names)
|
|
2030
|
+
)
|
|
2031
|
+
if grouped_suites and suites_with_depends:
|
|
2032
|
+
raise Exception("#DEPENDS and grouped suites are incompatible")
|
|
2033
|
+
|
|
2034
|
+
|
|
2035
|
+
def _group_by_depend(suite_names):
|
|
2036
|
+
group_items = list(filter(lambda suite: isinstance(suite, GroupItem), suite_names))
|
|
2037
|
+
runnable_suites = list(
|
|
2038
|
+
filter(lambda suite: isinstance(suite, RunnableItem), suite_names)
|
|
2039
|
+
)
|
|
2040
|
+
if group_items or not runnable_suites:
|
|
2041
|
+
return [suite_names]
|
|
2042
|
+
independent_tests = list(filter(lambda suite: not suite.depends, runnable_suites))
|
|
2043
|
+
dependency_tree = [independent_tests]
|
|
2044
|
+
while True:
|
|
2045
|
+
dependent_tests = list(filter(lambda suite: suite.depends, runnable_suites))
|
|
2046
|
+
dependent_on_last_stage = list(
|
|
2047
|
+
filter(
|
|
2048
|
+
lambda suite: any(
|
|
2049
|
+
test_in_tier_before.name == suite.depends
|
|
2050
|
+
for test_in_tier_before in dependency_tree[-1]
|
|
2051
|
+
),
|
|
2052
|
+
dependent_tests,
|
|
2053
|
+
)
|
|
2054
|
+
)
|
|
2055
|
+
if not dependent_on_last_stage:
|
|
2056
|
+
break
|
|
2057
|
+
dependency_tree += [dependent_on_last_stage]
|
|
2058
|
+
flattened_dependency_tree = sum(dependency_tree, [])
|
|
2059
|
+
if len(flattened_dependency_tree) != len(runnable_suites):
|
|
2060
|
+
raise Exception("There are circular or unmet dependencies using #DEPENDS")
|
|
2061
|
+
return dependency_tree
|
|
2062
|
+
|
|
2063
|
+
|
|
2064
|
+
def _all_grouped_suites_by_depend(grouped_suites):
|
|
2065
|
+
grouped_by_depend = []
|
|
2066
|
+
for group_suite in grouped_suites:
|
|
2067
|
+
grouped_by_depend += _group_by_depend(group_suite)
|
|
2068
|
+
return grouped_by_depend
|
|
2069
|
+
|
|
2070
|
+
|
|
2071
|
+
if __name__ == "__main__":
|
|
2072
|
+
main()
|