robotframework-pabot 5.0.0__py3-none-any.whl → 5.2.0b1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- pabot/ProcessManager.py +376 -0
- pabot/__init__.py +1 -1
- pabot/arguments.py +80 -24
- pabot/pabot.py +392 -166
- pabot/pabotlib.py +1 -1
- pabot/result_merger.py +2 -2
- pabot/robotremoteserver.py +27 -7
- pabot/skip_listener.py +7 -0
- pabot/timeout_listener.py +5 -0
- pabot/writer.py +110 -0
- {robotframework_pabot-5.0.0.dist-info → robotframework_pabot-5.2.0b1.dist-info}/METADATA +93 -42
- robotframework_pabot-5.2.0b1.dist-info/RECORD +25 -0
- robotframework_pabot-5.0.0.dist-info/RECORD +0 -22
- robotframework_pabot-5.0.0.dist-info/licenses/LICENSE.txt +0 -202
- {robotframework_pabot-5.0.0.dist-info → robotframework_pabot-5.2.0b1.dist-info}/WHEEL +0 -0
- {robotframework_pabot-5.0.0.dist-info → robotframework_pabot-5.2.0b1.dist-info}/entry_points.txt +0 -0
- {robotframework_pabot-5.0.0.dist-info → robotframework_pabot-5.2.0b1.dist-info}/top_level.txt +0 -0
pabot/ProcessManager.py
ADDED
|
@@ -0,0 +1,376 @@
|
|
|
1
|
+
import os
|
|
2
|
+
import sys
|
|
3
|
+
import time
|
|
4
|
+
import signal
|
|
5
|
+
import threading
|
|
6
|
+
import subprocess
|
|
7
|
+
import datetime
|
|
8
|
+
import queue
|
|
9
|
+
import locale
|
|
10
|
+
|
|
11
|
+
try:
|
|
12
|
+
import psutil
|
|
13
|
+
except ImportError:
|
|
14
|
+
psutil = None
|
|
15
|
+
|
|
16
|
+
from .writer import get_writer, Color
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
def split_on_first(lst, value):
|
|
20
|
+
for i, x in enumerate(lst):
|
|
21
|
+
if x == value:
|
|
22
|
+
return lst[:i], lst[i+1:]
|
|
23
|
+
return lst, []
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
class ProcessManager:
|
|
27
|
+
def __init__(self):
|
|
28
|
+
self.processes = []
|
|
29
|
+
self.lock = threading.Lock()
|
|
30
|
+
self.writer = get_writer()
|
|
31
|
+
|
|
32
|
+
# Install SIGINT only in main thread
|
|
33
|
+
try:
|
|
34
|
+
if threading.current_thread() is threading.main_thread():
|
|
35
|
+
signal.signal(signal.SIGINT, self._handle_sigint)
|
|
36
|
+
else:
|
|
37
|
+
self.writer.write(
|
|
38
|
+
"[ProcessManager] (test mode) signal handlers disabled (not in main thread)"
|
|
39
|
+
)
|
|
40
|
+
except Exception as e:
|
|
41
|
+
self.writer.write(f"[WARN] Could not register signal handler: {e}")
|
|
42
|
+
|
|
43
|
+
# -------------------------------
|
|
44
|
+
# SIGNAL HANDLING
|
|
45
|
+
# -------------------------------
|
|
46
|
+
|
|
47
|
+
def _handle_sigint(self, signum, frame):
|
|
48
|
+
self.writer.write("[ProcessManager] Ctrl+C detected — terminating all subprocesses", color=Color.RED)
|
|
49
|
+
self.terminate_all()
|
|
50
|
+
sys.exit(130)
|
|
51
|
+
|
|
52
|
+
# -------------------------------
|
|
53
|
+
# OUTPUT STREAM READERS
|
|
54
|
+
# -------------------------------
|
|
55
|
+
|
|
56
|
+
def _enqueue_output(self, pipe, q):
|
|
57
|
+
"""
|
|
58
|
+
Reads lines from `pipe` and puts them into queue `q`.
|
|
59
|
+
When pipe is exhausted, pushes `None` sentinel.
|
|
60
|
+
"""
|
|
61
|
+
try:
|
|
62
|
+
with pipe:
|
|
63
|
+
for line in iter(pipe.readline, b""):
|
|
64
|
+
q.put(line)
|
|
65
|
+
finally:
|
|
66
|
+
q.put(None) # sentinel → "this stream is finished"
|
|
67
|
+
|
|
68
|
+
def _safe_write_to_stream(self, stream, text):
|
|
69
|
+
"""
|
|
70
|
+
Writes text safely to an output stream.
|
|
71
|
+
If encoding errors occur, fall back to bytes/replace.
|
|
72
|
+
"""
|
|
73
|
+
try:
|
|
74
|
+
stream.write(text)
|
|
75
|
+
try:
|
|
76
|
+
stream.flush()
|
|
77
|
+
except Exception:
|
|
78
|
+
pass
|
|
79
|
+
return
|
|
80
|
+
except UnicodeEncodeError:
|
|
81
|
+
pass
|
|
82
|
+
except Exception:
|
|
83
|
+
pass
|
|
84
|
+
|
|
85
|
+
enc = getattr(stream, "encoding", None) or locale.getpreferredencoding(False) or "utf-8"
|
|
86
|
+
|
|
87
|
+
try:
|
|
88
|
+
b = text.encode(enc, errors="replace")
|
|
89
|
+
if hasattr(stream, "buffer"):
|
|
90
|
+
try:
|
|
91
|
+
stream.buffer.write(b)
|
|
92
|
+
stream.buffer.write(b"\n")
|
|
93
|
+
stream.buffer.flush()
|
|
94
|
+
return
|
|
95
|
+
except Exception:
|
|
96
|
+
pass
|
|
97
|
+
|
|
98
|
+
safe = b.decode(enc, errors="replace")
|
|
99
|
+
stream.write(safe + "\n")
|
|
100
|
+
stream.flush()
|
|
101
|
+
except Exception:
|
|
102
|
+
try:
|
|
103
|
+
print(text)
|
|
104
|
+
except Exception:
|
|
105
|
+
pass
|
|
106
|
+
|
|
107
|
+
# -------------------------------
|
|
108
|
+
# STREAM OUTPUT MERGER
|
|
109
|
+
# -------------------------------
|
|
110
|
+
|
|
111
|
+
def _stream_output(self, process, stdout=None, stderr=None,
|
|
112
|
+
item_name="process", log_file=None):
|
|
113
|
+
|
|
114
|
+
q_out = queue.Queue()
|
|
115
|
+
q_err = queue.Queue()
|
|
116
|
+
|
|
117
|
+
t_out = None
|
|
118
|
+
t_err = None
|
|
119
|
+
|
|
120
|
+
if process.stdout:
|
|
121
|
+
t_out = threading.Thread(target=self._enqueue_output, args=(process.stdout, q_out))
|
|
122
|
+
t_out.daemon = True
|
|
123
|
+
t_out.start()
|
|
124
|
+
|
|
125
|
+
if process.stderr:
|
|
126
|
+
t_err = threading.Thread(target=self._enqueue_output, args=(process.stderr, q_err))
|
|
127
|
+
t_err.daemon = True
|
|
128
|
+
t_err.start()
|
|
129
|
+
|
|
130
|
+
stdout_done = False
|
|
131
|
+
stderr_done = False
|
|
132
|
+
|
|
133
|
+
log_handle = None
|
|
134
|
+
if log_file:
|
|
135
|
+
os.makedirs(os.path.dirname(log_file), exist_ok=True)
|
|
136
|
+
log_handle = open(log_file, "a", encoding="utf-8")
|
|
137
|
+
|
|
138
|
+
try:
|
|
139
|
+
while True:
|
|
140
|
+
now = datetime.datetime.now()
|
|
141
|
+
|
|
142
|
+
# STDOUT
|
|
143
|
+
if not stdout_done:
|
|
144
|
+
try:
|
|
145
|
+
line = q_out.get(timeout=0.05)
|
|
146
|
+
if line is None:
|
|
147
|
+
stdout_done = True
|
|
148
|
+
else:
|
|
149
|
+
msg = line.decode(errors="replace").rstrip()
|
|
150
|
+
self._safe_write_to_stream(stdout or sys.stdout, msg + "\n")
|
|
151
|
+
if log_handle:
|
|
152
|
+
log_handle.write(f"{now} {msg}\n")
|
|
153
|
+
except queue.Empty:
|
|
154
|
+
pass
|
|
155
|
+
|
|
156
|
+
# STDERR
|
|
157
|
+
if not stderr_done:
|
|
158
|
+
try:
|
|
159
|
+
line = q_err.get_nowait()
|
|
160
|
+
if line is None:
|
|
161
|
+
stderr_done = True
|
|
162
|
+
else:
|
|
163
|
+
msg = line.decode(errors="replace").rstrip()
|
|
164
|
+
self._safe_write_to_stream(stderr or sys.stderr, msg + "\n")
|
|
165
|
+
if log_handle:
|
|
166
|
+
log_handle.write(f"{now} {msg}\n")
|
|
167
|
+
except queue.Empty:
|
|
168
|
+
pass
|
|
169
|
+
|
|
170
|
+
# Terminate when both streams finished
|
|
171
|
+
if stdout_done and stderr_done:
|
|
172
|
+
break
|
|
173
|
+
|
|
174
|
+
finally:
|
|
175
|
+
if t_out:
|
|
176
|
+
t_out.join()
|
|
177
|
+
if t_err:
|
|
178
|
+
t_err.join()
|
|
179
|
+
if log_handle:
|
|
180
|
+
log_handle.close()
|
|
181
|
+
|
|
182
|
+
# -------------------------------
|
|
183
|
+
# PROCESS CREATION
|
|
184
|
+
# -------------------------------
|
|
185
|
+
|
|
186
|
+
def _start_process(self, cmd, env=None):
|
|
187
|
+
if sys.platform == "win32":
|
|
188
|
+
return subprocess.Popen(
|
|
189
|
+
cmd,
|
|
190
|
+
stdout=subprocess.PIPE,
|
|
191
|
+
stderr=subprocess.PIPE,
|
|
192
|
+
env=env,
|
|
193
|
+
shell=False,
|
|
194
|
+
creationflags=subprocess.CREATE_NEW_PROCESS_GROUP,
|
|
195
|
+
)
|
|
196
|
+
else:
|
|
197
|
+
return subprocess.Popen(
|
|
198
|
+
cmd,
|
|
199
|
+
stdout=subprocess.PIPE,
|
|
200
|
+
stderr=subprocess.PIPE,
|
|
201
|
+
env=env,
|
|
202
|
+
shell=False,
|
|
203
|
+
preexec_fn=os.setsid,
|
|
204
|
+
)
|
|
205
|
+
|
|
206
|
+
# -------------------------------
|
|
207
|
+
# PROCESS TREE TERMINATION
|
|
208
|
+
# -------------------------------
|
|
209
|
+
|
|
210
|
+
def _terminate_tree(self, process):
|
|
211
|
+
if process.poll() is not None:
|
|
212
|
+
return
|
|
213
|
+
|
|
214
|
+
self.writer.write(
|
|
215
|
+
f"[ProcessManager] Terminating process tree PID={process.pid}",
|
|
216
|
+
color=Color.YELLOW
|
|
217
|
+
)
|
|
218
|
+
|
|
219
|
+
# PRIMARY: psutil (best reliability)
|
|
220
|
+
if psutil:
|
|
221
|
+
try:
|
|
222
|
+
parent = psutil.Process(process.pid)
|
|
223
|
+
children = parent.children(recursive=True)
|
|
224
|
+
for c in children:
|
|
225
|
+
try:
|
|
226
|
+
c.terminate()
|
|
227
|
+
except Exception:
|
|
228
|
+
pass
|
|
229
|
+
psutil.wait_procs(children, timeout=5)
|
|
230
|
+
|
|
231
|
+
for c in children:
|
|
232
|
+
if c.is_running():
|
|
233
|
+
try:
|
|
234
|
+
c.kill()
|
|
235
|
+
except Exception:
|
|
236
|
+
pass
|
|
237
|
+
|
|
238
|
+
try:
|
|
239
|
+
parent.terminate()
|
|
240
|
+
except Exception:
|
|
241
|
+
pass
|
|
242
|
+
|
|
243
|
+
try:
|
|
244
|
+
parent.wait(timeout=5)
|
|
245
|
+
except psutil.TimeoutExpired:
|
|
246
|
+
try:
|
|
247
|
+
parent.kill()
|
|
248
|
+
except Exception:
|
|
249
|
+
pass
|
|
250
|
+
|
|
251
|
+
return
|
|
252
|
+
except Exception:
|
|
253
|
+
pass
|
|
254
|
+
|
|
255
|
+
# FALLBACK — Windows
|
|
256
|
+
if sys.platform == "win32":
|
|
257
|
+
subprocess.run(
|
|
258
|
+
["taskkill", "/PID", str(process.pid), "/T", "/F"],
|
|
259
|
+
stdout=subprocess.DEVNULL,
|
|
260
|
+
stderr=subprocess.DEVNULL,
|
|
261
|
+
)
|
|
262
|
+
return
|
|
263
|
+
|
|
264
|
+
# FALLBACK — Linux / macOS
|
|
265
|
+
try:
|
|
266
|
+
os.killpg(os.getpgid(process.pid), signal.SIGTERM)
|
|
267
|
+
time.sleep(2)
|
|
268
|
+
if process.poll() is None:
|
|
269
|
+
os.killpg(os.getpgid(process.pid), signal.SIGKILL)
|
|
270
|
+
except Exception:
|
|
271
|
+
if process.poll() is None:
|
|
272
|
+
try:
|
|
273
|
+
process.kill()
|
|
274
|
+
except Exception:
|
|
275
|
+
pass
|
|
276
|
+
|
|
277
|
+
try:
|
|
278
|
+
process.wait(timeout=5)
|
|
279
|
+
except Exception:
|
|
280
|
+
pass
|
|
281
|
+
|
|
282
|
+
# -------------------------------
|
|
283
|
+
# PUBLIC API
|
|
284
|
+
# -------------------------------
|
|
285
|
+
|
|
286
|
+
def terminate_all(self):
|
|
287
|
+
with self.lock:
|
|
288
|
+
for p in list(self.processes):
|
|
289
|
+
self._terminate_tree(p)
|
|
290
|
+
self.processes.clear()
|
|
291
|
+
|
|
292
|
+
def run(self, cmd, *, env=None, stdout=None, stderr=None,
|
|
293
|
+
timeout=None, verbose=False, item_name="process",
|
|
294
|
+
log_file=None, pool_id=0, item_index=0):
|
|
295
|
+
|
|
296
|
+
start = time.time()
|
|
297
|
+
process = self._start_process(cmd, env)
|
|
298
|
+
|
|
299
|
+
with self.lock:
|
|
300
|
+
self.processes.append(process)
|
|
301
|
+
|
|
302
|
+
ts = datetime.datetime.now()
|
|
303
|
+
|
|
304
|
+
if verbose:
|
|
305
|
+
self.writer.write(
|
|
306
|
+
f"{ts} [PID:{process.pid}] [{pool_id}] [ID:{item_index}] "
|
|
307
|
+
f"EXECUTING PARALLEL {item_name}:\n{' '.join(cmd)}"
|
|
308
|
+
)
|
|
309
|
+
else:
|
|
310
|
+
self.writer.write(
|
|
311
|
+
f"{ts} [PID:{process.pid}] [{pool_id}] [ID:{item_index}] EXECUTING {item_name}"
|
|
312
|
+
)
|
|
313
|
+
|
|
314
|
+
# Start logging thread
|
|
315
|
+
log_thread = threading.Thread(
|
|
316
|
+
target=self._stream_output,
|
|
317
|
+
args=(process, stdout, stderr, item_name, log_file),
|
|
318
|
+
)
|
|
319
|
+
log_thread.daemon = True
|
|
320
|
+
log_thread.start()
|
|
321
|
+
|
|
322
|
+
rc = None
|
|
323
|
+
ping_interval = 50 # 5s
|
|
324
|
+
next_ping = ping_interval
|
|
325
|
+
counter = 0
|
|
326
|
+
|
|
327
|
+
while rc is None:
|
|
328
|
+
rc = process.poll()
|
|
329
|
+
|
|
330
|
+
# TIMEOUT CHECK
|
|
331
|
+
if timeout and (time.time() - start > timeout):
|
|
332
|
+
ts = datetime.datetime.now()
|
|
333
|
+
self.writer.write(
|
|
334
|
+
f"{ts} [PID:{process.pid}] [{pool_id}] [ID:{item_index}] "
|
|
335
|
+
f"Process {item_name} killed due to exceeding the maximum timeout of {timeout} seconds"
|
|
336
|
+
)
|
|
337
|
+
self._terminate_tree(process)
|
|
338
|
+
rc = -1
|
|
339
|
+
|
|
340
|
+
# Dryrun process to mark all tests as failed due to timeout
|
|
341
|
+
this_dir = os.path.dirname(os.path.abspath(__file__))
|
|
342
|
+
listener_path = os.path.join(this_dir, "timeout_listener.py")
|
|
343
|
+
dry_run_env = env.copy() if env else os.environ.copy()
|
|
344
|
+
before, after = split_on_first(cmd, "-A")
|
|
345
|
+
dryrun_cmd = before + ["--dryrun", '--listener', listener_path, '-A'] + after
|
|
346
|
+
|
|
347
|
+
self.writer.write(
|
|
348
|
+
f"{ts} [PID:{process.pid}] [{pool_id}] [ID:{item_index}] "
|
|
349
|
+
f"Starting dry run to mark tests as failed due to timeout: {' '.join(dryrun_cmd)}"
|
|
350
|
+
)
|
|
351
|
+
subprocess.run(dryrun_cmd, env=dry_run_env, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
|
352
|
+
|
|
353
|
+
break
|
|
354
|
+
|
|
355
|
+
# Progress ping
|
|
356
|
+
if counter == next_ping:
|
|
357
|
+
ts = datetime.datetime.now()
|
|
358
|
+
self.writer.write(
|
|
359
|
+
f"{ts} [PID:{process.pid}] [{pool_id}] [ID:{item_index}] still running "
|
|
360
|
+
f"{item_name} after {(counter * 0.1):.1f}s"
|
|
361
|
+
)
|
|
362
|
+
ping_interval += 50
|
|
363
|
+
next_ping += ping_interval
|
|
364
|
+
|
|
365
|
+
time.sleep(0.1)
|
|
366
|
+
counter += 1
|
|
367
|
+
|
|
368
|
+
log_thread.join()
|
|
369
|
+
|
|
370
|
+
elapsed = round(time.time() - start, 1)
|
|
371
|
+
|
|
372
|
+
with self.lock:
|
|
373
|
+
if process in self.processes:
|
|
374
|
+
self.processes.remove(process)
|
|
375
|
+
|
|
376
|
+
return process, (rc, elapsed)
|
pabot/__init__.py
CHANGED
pabot/arguments.py
CHANGED
|
@@ -142,7 +142,20 @@ def _parse_shard(arg):
|
|
|
142
142
|
return int(parts[0]), int(parts[1])
|
|
143
143
|
|
|
144
144
|
|
|
145
|
+
def _parse_artifacts(arg):
|
|
146
|
+
# type: (str) -> Tuple[List[str], bool]
|
|
147
|
+
artifacts = arg.split(',')
|
|
148
|
+
if artifacts[-1] == 'notimestamps':
|
|
149
|
+
return (artifacts[:-1], False)
|
|
150
|
+
return (artifacts, True)
|
|
151
|
+
|
|
152
|
+
|
|
145
153
|
def _parse_pabot_args(args): # type: (List[str]) -> Tuple[List[str], Dict[str, object]]
|
|
154
|
+
"""
|
|
155
|
+
Parse pabot-specific command line arguments.
|
|
156
|
+
Supports new --ordering syntax:
|
|
157
|
+
--ordering <file> [static|dynamic] [skip|run_all]
|
|
158
|
+
"""
|
|
146
159
|
pabot_args = {
|
|
147
160
|
"command": ["pybot" if ROBOT_VERSION < "3.1" else "robot"],
|
|
148
161
|
"verbose": False,
|
|
@@ -155,13 +168,15 @@ def _parse_pabot_args(args): # type: (List[str]) -> Tuple[List[str], Dict[str,
|
|
|
155
168
|
"processes": _processes_count(),
|
|
156
169
|
"processtimeout": None,
|
|
157
170
|
"artifacts": ["png"],
|
|
171
|
+
"artifactstimestamps": True,
|
|
158
172
|
"artifactsinsubfolders": False,
|
|
159
173
|
"shardindex": 0,
|
|
160
174
|
"shardcount": 1,
|
|
161
175
|
"chunk": False,
|
|
162
176
|
"no-rebot": False,
|
|
163
177
|
}
|
|
164
|
-
|
|
178
|
+
|
|
179
|
+
# Arguments that are flags (boolean)
|
|
165
180
|
flag_args = {
|
|
166
181
|
"verbose",
|
|
167
182
|
"help",
|
|
@@ -169,8 +184,10 @@ def _parse_pabot_args(args): # type: (List[str]) -> Tuple[List[str], Dict[str,
|
|
|
169
184
|
"pabotlib",
|
|
170
185
|
"artifactsinsubfolders",
|
|
171
186
|
"chunk",
|
|
172
|
-
"no-rebot"
|
|
187
|
+
"no-rebot",
|
|
173
188
|
}
|
|
189
|
+
|
|
190
|
+
# Arguments that expect values
|
|
174
191
|
value_args = {
|
|
175
192
|
"hive": str,
|
|
176
193
|
"processes": lambda x: int(x) if x != "all" else None,
|
|
@@ -179,9 +196,9 @@ def _parse_pabot_args(args): # type: (List[str]) -> Tuple[List[str], Dict[str,
|
|
|
179
196
|
"pabotlibport": int,
|
|
180
197
|
"pabotprerunmodifier": str,
|
|
181
198
|
"processtimeout": int,
|
|
182
|
-
"ordering": str,
|
|
199
|
+
"ordering": str, # special handling below
|
|
183
200
|
"suitesfrom": str,
|
|
184
|
-
"artifacts":
|
|
201
|
+
"artifacts": _parse_artifacts,
|
|
185
202
|
"shard": _parse_shard,
|
|
186
203
|
}
|
|
187
204
|
|
|
@@ -189,7 +206,7 @@ def _parse_pabot_args(args): # type: (List[str]) -> Tuple[List[str], Dict[str,
|
|
|
189
206
|
remaining_args = []
|
|
190
207
|
i = 0
|
|
191
208
|
|
|
192
|
-
# Track conflicting options
|
|
209
|
+
# Track conflicting pabotlib options
|
|
193
210
|
saw_pabotlib_flag = False
|
|
194
211
|
saw_no_pabotlib = False
|
|
195
212
|
|
|
@@ -200,19 +217,20 @@ def _parse_pabot_args(args): # type: (List[str]) -> Tuple[List[str], Dict[str,
|
|
|
200
217
|
i += 1
|
|
201
218
|
continue
|
|
202
219
|
|
|
203
|
-
arg_name = arg[2:] #
|
|
220
|
+
arg_name = arg[2:] # remove leading '--'
|
|
204
221
|
|
|
222
|
+
# Handle mutually exclusive pabotlib flags
|
|
205
223
|
if arg_name == "no-pabotlib":
|
|
206
224
|
saw_no_pabotlib = True
|
|
207
|
-
pabot_args["pabotlib"] = False
|
|
208
|
-
|
|
225
|
+
pabot_args["pabotlib"] = False
|
|
226
|
+
i += 1
|
|
209
227
|
continue
|
|
210
228
|
if arg_name == "pabotlib":
|
|
211
229
|
saw_pabotlib_flag = True
|
|
212
|
-
|
|
230
|
+
i += 1
|
|
213
231
|
continue
|
|
214
232
|
|
|
215
|
-
# Special
|
|
233
|
+
# Special handling for --command ... --end-command
|
|
216
234
|
if arg_name == "command":
|
|
217
235
|
try:
|
|
218
236
|
end_index = args.index("--end-command", i)
|
|
@@ -222,7 +240,7 @@ def _parse_pabot_args(args): # type: (List[str]) -> Tuple[List[str], Dict[str,
|
|
|
222
240
|
except ValueError:
|
|
223
241
|
raise DataError("--command requires matching --end-command")
|
|
224
242
|
|
|
225
|
-
# Handle
|
|
243
|
+
# Handle boolean flags
|
|
226
244
|
if arg_name in flag_args:
|
|
227
245
|
pabot_args[arg_name] = True
|
|
228
246
|
i += 1
|
|
@@ -233,20 +251,57 @@ def _parse_pabot_args(args): # type: (List[str]) -> Tuple[List[str], Dict[str,
|
|
|
233
251
|
if i + 1 >= len(args):
|
|
234
252
|
raise DataError(f"--{arg_name} requires a value")
|
|
235
253
|
try:
|
|
236
|
-
|
|
237
|
-
if arg_name == "
|
|
238
|
-
|
|
239
|
-
|
|
240
|
-
|
|
241
|
-
|
|
254
|
+
# Special parsing for --ordering <file> [mode] [failure_policy]
|
|
255
|
+
if arg_name == "ordering":
|
|
256
|
+
if i + 1 >= len(args):
|
|
257
|
+
raise DataError("--ordering requires at least a file path")
|
|
258
|
+
|
|
259
|
+
ordering_file = args[i + 1]
|
|
260
|
+
mode = "static" # default
|
|
261
|
+
failure_policy = "run_all" # default
|
|
262
|
+
|
|
263
|
+
# optional mode
|
|
264
|
+
if i + 2 < len(args) and args[i + 2] in ("static", "dynamic"):
|
|
265
|
+
mode = args[i + 2]
|
|
266
|
+
i_mode_offset = 1
|
|
267
|
+
else:
|
|
268
|
+
i_mode_offset = 0
|
|
269
|
+
|
|
270
|
+
# optional failure policy, only for dynamic mode
|
|
271
|
+
if mode == "dynamic" and i + 2 + i_mode_offset < len(args) and args[i + 2 + i_mode_offset] in ("skip", "run_all"):
|
|
272
|
+
failure_policy = args[i + 2 + i_mode_offset]
|
|
273
|
+
i_failure_offset = 1
|
|
274
|
+
else:
|
|
275
|
+
i_failure_offset = 0
|
|
276
|
+
|
|
277
|
+
# store
|
|
278
|
+
pabot_args["ordering"] = {
|
|
279
|
+
"file": ordering_file,
|
|
280
|
+
"mode": mode,
|
|
281
|
+
"failure_policy": failure_policy,
|
|
282
|
+
}
|
|
283
|
+
|
|
284
|
+
# move index past ordering args only
|
|
285
|
+
i += 2 + i_mode_offset + i_failure_offset
|
|
286
|
+
continue
|
|
242
287
|
else:
|
|
243
|
-
|
|
244
|
-
|
|
245
|
-
|
|
246
|
-
|
|
288
|
+
value = value_args[arg_name](args[i + 1])
|
|
289
|
+
if arg_name == "shard":
|
|
290
|
+
pabot_args["shardindex"], pabot_args["shardcount"] = value
|
|
291
|
+
elif arg_name == "pabotlibhost":
|
|
292
|
+
pabot_args["pabotlib"] = False
|
|
293
|
+
pabot_args[arg_name] = value
|
|
294
|
+
elif arg_name == "artifacts":
|
|
295
|
+
pabot_args["artifacts"] = value[0]
|
|
296
|
+
pabot_args["artifactstimestamps"] = value[1]
|
|
297
|
+
else:
|
|
298
|
+
pabot_args[arg_name] = value
|
|
299
|
+
i += 2
|
|
300
|
+
continue
|
|
301
|
+
except (ValueError, TypeError):
|
|
247
302
|
raise DataError(f"Invalid value for --{arg_name}: {args[i + 1]}")
|
|
248
|
-
|
|
249
|
-
# Handle
|
|
303
|
+
|
|
304
|
+
# Handle argumentfiles like --argumentfile1
|
|
250
305
|
match = ARGSMATCHER.match(arg)
|
|
251
306
|
if match:
|
|
252
307
|
if i + 1 >= len(args):
|
|
@@ -255,10 +310,11 @@ def _parse_pabot_args(args): # type: (List[str]) -> Tuple[List[str], Dict[str,
|
|
|
255
310
|
i += 2
|
|
256
311
|
continue
|
|
257
312
|
|
|
258
|
-
#
|
|
313
|
+
# Any other non-pabot argument
|
|
259
314
|
remaining_args.append(arg)
|
|
260
315
|
i += 1
|
|
261
316
|
|
|
317
|
+
# Check for conflicting pabotlib flags
|
|
262
318
|
if saw_pabotlib_flag and saw_no_pabotlib:
|
|
263
319
|
raise DataError("Cannot use both --pabotlib and --no-pabotlib options together")
|
|
264
320
|
|