metaflow 2.12.35__py2.py3-none-any.whl → 2.12.37__py2.py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (46) hide show
  1. metaflow/__init__.py +3 -0
  2. metaflow/cli.py +84 -697
  3. metaflow/cli_args.py +17 -0
  4. metaflow/cli_components/__init__.py +0 -0
  5. metaflow/cli_components/dump_cmd.py +96 -0
  6. metaflow/cli_components/init_cmd.py +51 -0
  7. metaflow/cli_components/run_cmds.py +358 -0
  8. metaflow/cli_components/step_cmd.py +189 -0
  9. metaflow/cli_components/utils.py +140 -0
  10. metaflow/cmd/develop/stub_generator.py +9 -2
  11. metaflow/decorators.py +54 -2
  12. metaflow/extension_support/plugins.py +41 -27
  13. metaflow/flowspec.py +156 -16
  14. metaflow/includefile.py +50 -22
  15. metaflow/metaflow_config.py +1 -1
  16. metaflow/package.py +17 -3
  17. metaflow/parameters.py +80 -23
  18. metaflow/plugins/__init__.py +4 -0
  19. metaflow/plugins/airflow/airflow_cli.py +1 -0
  20. metaflow/plugins/argo/argo_workflows.py +41 -1
  21. metaflow/plugins/argo/argo_workflows_cli.py +1 -0
  22. metaflow/plugins/aws/batch/batch_decorator.py +2 -2
  23. metaflow/plugins/aws/step_functions/step_functions.py +32 -0
  24. metaflow/plugins/aws/step_functions/step_functions_cli.py +1 -0
  25. metaflow/plugins/datatools/s3/s3op.py +3 -3
  26. metaflow/plugins/kubernetes/kubernetes_cli.py +1 -1
  27. metaflow/plugins/kubernetes/kubernetes_decorator.py +2 -2
  28. metaflow/plugins/parallel_decorator.py +4 -1
  29. metaflow/plugins/pypi/conda_decorator.py +22 -0
  30. metaflow/plugins/pypi/pypi_decorator.py +1 -0
  31. metaflow/plugins/timeout_decorator.py +2 -2
  32. metaflow/runner/click_api.py +73 -19
  33. metaflow/runtime.py +199 -105
  34. metaflow/sidecar/sidecar_worker.py +1 -1
  35. metaflow/user_configs/__init__.py +0 -0
  36. metaflow/user_configs/config_decorators.py +563 -0
  37. metaflow/user_configs/config_options.py +495 -0
  38. metaflow/user_configs/config_parameters.py +386 -0
  39. metaflow/util.py +17 -0
  40. metaflow/version.py +1 -1
  41. {metaflow-2.12.35.dist-info → metaflow-2.12.37.dist-info}/METADATA +3 -2
  42. {metaflow-2.12.35.dist-info → metaflow-2.12.37.dist-info}/RECORD +46 -36
  43. {metaflow-2.12.35.dist-info → metaflow-2.12.37.dist-info}/LICENSE +0 -0
  44. {metaflow-2.12.35.dist-info → metaflow-2.12.37.dist-info}/WHEEL +0 -0
  45. {metaflow-2.12.35.dist-info → metaflow-2.12.37.dist-info}/entry_points.txt +0 -0
  46. {metaflow-2.12.35.dist-info → metaflow-2.12.37.dist-info}/top_level.txt +0 -0
metaflow/runtime.py CHANGED
@@ -6,9 +6,12 @@ using local / remote processes
6
6
  """
7
7
 
8
8
  from __future__ import print_function
9
+ import json
9
10
  import os
10
11
  import sys
11
12
  import fcntl
13
+ import re
14
+ import tempfile
12
15
  import time
13
16
  import subprocess
14
17
  from datetime import datetime
@@ -31,6 +34,7 @@ from . import procpoll
31
34
  from .datastore import TaskDataStoreSet
32
35
  from .debug import debug
33
36
  from .decorators import flow_decorators
37
+ from .flowspec import _FlowState
34
38
  from .mflog import mflog, RUNTIME_LOG_SOURCE
35
39
  from .util import to_unicode, compress_list, unicode_type
36
40
  from .clone_util import clone_task_helper
@@ -39,6 +43,10 @@ from .unbounded_foreach import (
39
43
  UBF_CONTROL,
40
44
  UBF_TASK,
41
45
  )
46
+
47
+ from .user_configs.config_options import ConfigInput
48
+ from .user_configs.config_parameters import dump_config_values
49
+
42
50
  import metaflow.tracing as tracing
43
51
 
44
52
  MAX_WORKERS = 16
@@ -49,7 +57,13 @@ PROGRESS_INTERVAL = 300 # s
49
57
  # The following is a list of the (data) artifacts used by the runtime while
50
58
  # executing a flow. These are prefetched during the resume operation by
51
59
  # leveraging the TaskDataStoreSet.
52
- PREFETCH_DATA_ARTIFACTS = ["_foreach_stack", "_task_ok", "_transition"]
60
+ PREFETCH_DATA_ARTIFACTS = [
61
+ "_foreach_stack",
62
+ "_task_ok",
63
+ "_transition",
64
+ "_control_mapper_tasks",
65
+ "_control_task_is_mapper_zero",
66
+ ]
53
67
  RESUME_POLL_SECONDS = 60
54
68
 
55
69
  # Runtime must use logsource=RUNTIME_LOG_SOURCE for all loglines that it
@@ -269,6 +283,8 @@ class NativeRuntime(object):
269
283
  step_name,
270
284
  task_id,
271
285
  pathspec_index,
286
+ cloned_task_pathspec_index,
287
+ finished_tuple,
272
288
  ubf_context,
273
289
  generate_task_obj,
274
290
  verbose=False,
@@ -281,8 +297,13 @@ class NativeRuntime(object):
281
297
  task.ubf_context = ubf_context
282
298
  new_task_id = task.task_id
283
299
  self._cloned_tasks.append(task)
284
- self._cloned_task_index.add(task.task_index)
285
-
300
+ self._cloned_task_index.add(cloned_task_pathspec_index)
301
+ task_pathspec = "{}/{}/{}".format(self._run_id, step_name, new_task_id)
302
+ else:
303
+ task_pathspec = "{}/{}/{}".format(self._run_id, step_name, new_task_id)
304
+ Task.clone_pathspec_mapping[task_pathspec] = "{}/{}/{}".format(
305
+ self._clone_run_id, step_name, task_id
306
+ )
286
307
  if verbose:
287
308
  self._logger(
288
309
  "Cloning task from {}/{}/{}/{} to {}/{}/{}/{}".format(
@@ -308,6 +329,8 @@ class NativeRuntime(object):
308
329
  self._metadata,
309
330
  origin_ds_set=self._origin_ds_set,
310
331
  )
332
+ self._finished[(step_name, finished_tuple)] = task_pathspec
333
+ self._is_cloned[task_pathspec] = True
311
334
  except Exception as e:
312
335
  self._logger(
313
336
  "Cloning {}/{}/{}/{} failed with error: {}".format(
@@ -323,7 +346,8 @@ class NativeRuntime(object):
323
346
 
324
347
  inputs = []
325
348
 
326
- ubf_mapper_tasks_to_clone = []
349
+ ubf_mapper_tasks_to_clone = set()
350
+ ubf_control_tasks = set()
327
351
  # We only clone ubf mapper tasks if the control task is complete.
328
352
  # Here we need to check which control tasks are complete, and then get the corresponding
329
353
  # mapper tasks.
@@ -331,13 +355,25 @@ class NativeRuntime(object):
331
355
  _, step_name, task_id = task_ds.pathspec.split("/")
332
356
  pathspec_index = task_ds.pathspec_index
333
357
  if task_ds["_task_ok"] and step_name != "_parameters":
334
- # Only control task can have _control_mapper_tasks. We then store the corresponding mapepr task pathspecs.
358
+ # Control task contains "_control_mapper_tasks" but, in the case of
359
+ # @parallel decorator, the control task is also a mapper task so we
360
+ # need to distinguish this using _control_task_is_mapper_zero
335
361
  control_mapper_tasks = (
336
362
  []
337
363
  if "_control_mapper_tasks" not in task_ds
338
364
  else task_ds["_control_mapper_tasks"]
339
365
  )
340
- ubf_mapper_tasks_to_clone.extend(control_mapper_tasks)
366
+ if control_mapper_tasks:
367
+ if task_ds.get("_control_task_is_mapper_zero", False):
368
+ # Strip out the control task of list of mapper tasks
369
+ ubf_control_tasks.add(control_mapper_tasks[0])
370
+ ubf_mapper_tasks_to_clone.update(control_mapper_tasks[1:])
371
+ else:
372
+ ubf_mapper_tasks_to_clone.update(control_mapper_tasks)
373
+ # Since we only add mapper tasks here, if we are not in the list
374
+ # we are a control task
375
+ if task_ds.pathspec not in ubf_mapper_tasks_to_clone:
376
+ ubf_control_tasks.add(task_ds.pathspec)
341
377
 
342
378
  for task_ds in self._origin_ds_set:
343
379
  _, step_name, task_id = task_ds.pathspec.split("/")
@@ -350,33 +386,54 @@ class NativeRuntime(object):
350
386
  ):
351
387
  # "_unbounded_foreach" is a special flag to indicate that the transition is an unbounded foreach.
352
388
  # Both parent and splitted children tasks will have this flag set. The splitted control/mapper tasks
353
- # have no "foreach_param" because UBF is always followed by a join step.
389
+ # are not foreach types because UBF is always followed by a join step.
354
390
  is_ubf_task = (
355
391
  "_unbounded_foreach" in task_ds and task_ds["_unbounded_foreach"]
356
- ) and (self._graph[step_name].foreach_param is None)
392
+ ) and (self._graph[step_name].type != "foreach")
357
393
 
358
- # Only the control task has "_control_mapper_tasks" artifact.
359
- is_ubf_control_task = (
360
- is_ubf_task
361
- and ("_control_mapper_tasks" in task_ds)
362
- and task_ds["_control_mapper_tasks"]
363
- )
364
- is_ubf_mapper_tasks = is_ubf_task and (not is_ubf_control_task)
365
- if is_ubf_mapper_tasks and (
394
+ is_ubf_control_task = task_ds.pathspec in ubf_control_tasks
395
+
396
+ is_ubf_mapper_task = is_ubf_task and (not is_ubf_control_task)
397
+
398
+ if is_ubf_mapper_task and (
366
399
  task_ds.pathspec not in ubf_mapper_tasks_to_clone
367
400
  ):
368
- # Skip copying UBF mapper tasks if control tasks is incomplete.
401
+ # Skip copying UBF mapper tasks if control task is incomplete.
369
402
  continue
370
403
 
371
404
  ubf_context = None
372
405
  if is_ubf_task:
373
- ubf_context = "ubf_test" if is_ubf_mapper_tasks else "ubf_control"
406
+ ubf_context = "ubf_test" if is_ubf_mapper_task else "ubf_control"
407
+
408
+ finished_tuple = tuple(
409
+ [s._replace(value=0) for s in task_ds.get("_foreach_stack", ())]
410
+ )
411
+ cloned_task_pathspec_index = pathspec_index.split("/")[1]
412
+ if task_ds.get("_control_task_is_mapper_zero", False):
413
+ # Replace None with index 0 for control task as it is part of the
414
+ # UBF (as a mapper as well)
415
+ finished_tuple = finished_tuple[:-1] + (
416
+ finished_tuple[-1]._replace(index=0),
417
+ )
418
+ # We need this reverse override though because when we check
419
+ # if a task has been cloned in _queue_push, the index will be None
420
+ # because the _control_task_is_mapper_zero is set in the control
421
+ # task *itself* and *not* in the one that is launching the UBF nest.
422
+ # This means that _translate_index will use None.
423
+ cloned_task_pathspec_index = re.sub(
424
+ r"(\[(?:\d+, ?)*)0\]",
425
+ lambda m: (m.group(1) or "[") + "None]",
426
+ cloned_task_pathspec_index,
427
+ )
428
+
374
429
  inputs.append(
375
430
  (
376
431
  step_name,
377
432
  task_id,
378
433
  pathspec_index,
379
- is_ubf_mapper_tasks,
434
+ cloned_task_pathspec_index,
435
+ finished_tuple,
436
+ is_ubf_mapper_task,
380
437
  ubf_context,
381
438
  )
382
439
  )
@@ -388,15 +445,19 @@ class NativeRuntime(object):
388
445
  step_name,
389
446
  task_id,
390
447
  pathspec_index,
448
+ cloned_task_pathspec_index,
449
+ finished_tuple,
391
450
  ubf_context=ubf_context,
392
- generate_task_obj=generate_task_obj and (not is_ubf_mapper_tasks),
451
+ generate_task_obj=generate_task_obj and (not is_ubf_mapper_task),
393
452
  verbose=verbose,
394
453
  )
395
454
  for (
396
455
  step_name,
397
456
  task_id,
398
457
  pathspec_index,
399
- is_ubf_mapper_tasks,
458
+ cloned_task_pathspec_index,
459
+ finished_tuple,
460
+ is_ubf_mapper_task,
400
461
  ubf_context,
401
462
  ) in inputs
402
463
  ]
@@ -417,82 +478,95 @@ class NativeRuntime(object):
417
478
  else:
418
479
  self._queue_push("start", {})
419
480
  progress_tstamp = time.time()
420
- try:
421
- # main scheduling loop
422
- exception = None
423
- while self._run_queue or self._active_tasks[0] > 0 or self._cloned_tasks:
424
- # 1. are any of the current workers finished?
425
- if self._cloned_tasks:
426
- finished_tasks = self._cloned_tasks
427
- # reset the list of cloned tasks and let poll_workers handle
428
- # the remaining transition
429
- self._cloned_tasks = []
430
- else:
431
- finished_tasks = list(self._poll_workers())
432
- # 2. push new tasks triggered by the finished tasks to the queue
433
- self._queue_tasks(finished_tasks)
434
- # 3. if there are available worker slots, pop and start tasks
435
- # from the queue.
436
- self._launch_workers()
437
-
438
- if time.time() - progress_tstamp > PROGRESS_INTERVAL:
439
- progress_tstamp = time.time()
440
- tasks_print = ", ".join(
441
- [
442
- "%s (%d running; %d done)" % (k, v[0], v[1])
443
- for k, v in self._active_tasks.items()
444
- if k != 0 and v[0] > 0
445
- ]
446
- )
447
- if self._active_tasks[0] == 0:
448
- msg = "No tasks are running."
481
+ with tempfile.NamedTemporaryFile(mode="w", encoding="utf-8") as config_file:
482
+ # Configurations are passed through a file to avoid overloading the
483
+ # command-line. We only need to create this file once and it can be reused
484
+ # for any task launch
485
+ config_value = dump_config_values(self._flow)
486
+ if config_value:
487
+ json.dump(config_value, config_file)
488
+ config_file.flush()
489
+ self._config_file_name = config_file.name
490
+ else:
491
+ self._config_file_name = None
492
+ try:
493
+ # main scheduling loop
494
+ exception = None
495
+ while (
496
+ self._run_queue or self._active_tasks[0] > 0 or self._cloned_tasks
497
+ ):
498
+ # 1. are any of the current workers finished?
499
+ if self._cloned_tasks:
500
+ finished_tasks = self._cloned_tasks
501
+ # reset the list of cloned tasks and let poll_workers handle
502
+ # the remaining transition
503
+ self._cloned_tasks = []
449
504
  else:
450
- if self._active_tasks[0] == 1:
451
- msg = "1 task is running: "
505
+ finished_tasks = list(self._poll_workers())
506
+ # 2. push new tasks triggered by the finished tasks to the queue
507
+ self._queue_tasks(finished_tasks)
508
+ # 3. if there are available worker slots, pop and start tasks
509
+ # from the queue.
510
+ self._launch_workers()
511
+
512
+ if time.time() - progress_tstamp > PROGRESS_INTERVAL:
513
+ progress_tstamp = time.time()
514
+ tasks_print = ", ".join(
515
+ [
516
+ "%s (%d running; %d done)" % (k, v[0], v[1])
517
+ for k, v in self._active_tasks.items()
518
+ if k != 0 and v[0] > 0
519
+ ]
520
+ )
521
+ if self._active_tasks[0] == 0:
522
+ msg = "No tasks are running."
452
523
  else:
453
- msg = "%d tasks are running: " % self._active_tasks[0]
454
- msg += "%s." % tasks_print
524
+ if self._active_tasks[0] == 1:
525
+ msg = "1 task is running: "
526
+ else:
527
+ msg = "%d tasks are running: " % self._active_tasks[0]
528
+ msg += "%s." % tasks_print
455
529
 
456
- self._logger(msg, system_msg=True)
530
+ self._logger(msg, system_msg=True)
457
531
 
458
- if len(self._run_queue) == 0:
459
- msg = "No tasks are waiting in the queue."
460
- else:
461
- if len(self._run_queue) == 1:
462
- msg = "1 task is waiting in the queue: "
532
+ if len(self._run_queue) == 0:
533
+ msg = "No tasks are waiting in the queue."
463
534
  else:
464
- msg = "%d tasks are waiting in the queue." % len(
465
- self._run_queue
466
- )
535
+ if len(self._run_queue) == 1:
536
+ msg = "1 task is waiting in the queue: "
537
+ else:
538
+ msg = "%d tasks are waiting in the queue." % len(
539
+ self._run_queue
540
+ )
467
541
 
468
- self._logger(msg, system_msg=True)
469
- if len(self._unprocessed_steps) > 0:
470
- if len(self._unprocessed_steps) == 1:
471
- msg = "%s step has not started" % (
472
- next(iter(self._unprocessed_steps)),
473
- )
474
- else:
475
- msg = "%d steps have not started: " % len(
476
- self._unprocessed_steps
477
- )
478
- msg += "%s." % ", ".join(self._unprocessed_steps)
479
542
  self._logger(msg, system_msg=True)
480
-
481
- except KeyboardInterrupt as ex:
482
- self._logger("Workflow interrupted.", system_msg=True, bad=True)
483
- self._killall()
484
- exception = ex
485
- raise
486
- except Exception as ex:
487
- self._logger("Workflow failed.", system_msg=True, bad=True)
488
- self._killall()
489
- exception = ex
490
- raise
491
- finally:
492
- # on finish clean tasks
493
- for step in self._flow:
494
- for deco in step.decorators:
495
- deco.runtime_finished(exception)
543
+ if len(self._unprocessed_steps) > 0:
544
+ if len(self._unprocessed_steps) == 1:
545
+ msg = "%s step has not started" % (
546
+ next(iter(self._unprocessed_steps)),
547
+ )
548
+ else:
549
+ msg = "%d steps have not started: " % len(
550
+ self._unprocessed_steps
551
+ )
552
+ msg += "%s." % ", ".join(self._unprocessed_steps)
553
+ self._logger(msg, system_msg=True)
554
+
555
+ except KeyboardInterrupt as ex:
556
+ self._logger("Workflow interrupted.", system_msg=True, bad=True)
557
+ self._killall()
558
+ exception = ex
559
+ raise
560
+ except Exception as ex:
561
+ self._logger("Workflow failed.", system_msg=True, bad=True)
562
+ self._killall()
563
+ exception = ex
564
+ raise
565
+ finally:
566
+ # on finish clean tasks
567
+ for step in self._flow:
568
+ for deco in step.decorators:
569
+ deco.runtime_finished(exception)
496
570
 
497
571
  # assert that end was executed and it was successful
498
572
  if ("end", ()) in self._finished:
@@ -546,7 +620,6 @@ class NativeRuntime(object):
546
620
  # Given the current task information (task_index), the type of transition,
547
621
  # and the split index, return the new task index.
548
622
  def _translate_index(self, task, next_step, type, split_index=None):
549
- import re
550
623
 
551
624
  match = re.match(r"^(.+)\[(.*)\]$", task.task_index)
552
625
  if match:
@@ -640,15 +713,18 @@ class NativeRuntime(object):
640
713
  # If the control task is cloned, all mapper tasks should have been cloned
641
714
  # as well, so we no longer need to handle cloning of mapper tasks in runtime.
642
715
 
643
- # Update _finished since these tasks were successfully
644
- # run elsewhere so that join will be unblocked.
645
- _, foreach_stack = task.finished_id
646
- top = foreach_stack[-1]
647
- bottom = list(foreach_stack[:-1])
648
- for i in range(num_splits):
649
- s = tuple(bottom + [top._replace(index=i)])
650
- self._finished[(task.step, s)] = mapper_tasks[i]
651
- self._is_cloned[mapper_tasks[i]] = False
716
+ # Update _finished if we are not cloned. If we were cloned, we already
717
+ # updated _finished with the new tasks. Note that the *value* of mapper
718
+ # tasks is incorrect and contains the pathspec of the *cloned* run
719
+ # but we don't use it for anything. We could look to clean it up though
720
+ if not task.is_cloned:
721
+ _, foreach_stack = task.finished_id
722
+ top = foreach_stack[-1]
723
+ bottom = list(foreach_stack[:-1])
724
+ for i in range(num_splits):
725
+ s = tuple(bottom + [top._replace(index=i)])
726
+ self._finished[(task.step, s)] = mapper_tasks[i]
727
+ self._is_cloned[mapper_tasks[i]] = False
652
728
 
653
729
  # Find and check status of control task and retrieve its pathspec
654
730
  # for retrieving unbounded foreach cardinality.
@@ -901,7 +977,7 @@ class NativeRuntime(object):
901
977
  )
902
978
  return
903
979
 
904
- worker = Worker(task, self._max_log_size)
980
+ worker = Worker(task, self._max_log_size, self._config_file_name)
905
981
  for fd in worker.fds():
906
982
  self._workers[fd] = worker
907
983
  self._poll.add(fd)
@@ -1080,7 +1156,7 @@ class Task(object):
1080
1156
  # To avoid the edge case where the resume leader is selected but has not
1081
1157
  # yet written the _resume_leader metadata, we will wait for a few seconds.
1082
1158
  # We will wait for resume leader for at most 3 times.
1083
- for resume_leader_wait_retry in range(3):
1159
+ for _ in range(3):
1084
1160
  if ds.has_metadata("_resume_leader", add_attempt=False):
1085
1161
  resume_leader = ds.load_metadata(
1086
1162
  ["_resume_leader"], add_attempt=False
@@ -1181,7 +1257,6 @@ class Task(object):
1181
1257
  # Open the output datastore only if the task is not being cloned.
1182
1258
  if not self._is_cloned:
1183
1259
  self.new_attempt()
1184
-
1185
1260
  for deco in decos:
1186
1261
  deco.runtime_task_created(
1187
1262
  self._ds,
@@ -1448,6 +1523,15 @@ class CLIArgs(object):
1448
1523
  for deco in flow_decorators(self.task.flow):
1449
1524
  self.top_level_options.update(deco.get_top_level_options())
1450
1525
 
1526
+ # We also pass configuration options using the kv.<name> syntax which will cause
1527
+ # the configuration options to be loaded from the CONFIG file (or local-config-file
1528
+ # in the case of the local runtime)
1529
+ configs = self.task.flow._flow_state.get(_FlowState.CONFIGS)
1530
+ if configs:
1531
+ self.top_level_options["config-value"] = [
1532
+ (k, ConfigInput.make_key_name(k)) for k in configs
1533
+ ]
1534
+
1451
1535
  self.commands = ["step"]
1452
1536
  self.command_args = [self.task.step]
1453
1537
  self.command_options = {
@@ -1481,12 +1565,15 @@ class CLIArgs(object):
1481
1565
  for value in v:
1482
1566
  yield "--%s" % k
1483
1567
  if not isinstance(value, bool):
1484
- yield to_unicode(value)
1568
+ value = value if isinstance(value, tuple) else (value,)
1569
+ for vv in value:
1570
+ yield to_unicode(vv)
1485
1571
 
1486
1572
  args = list(self.entrypoint)
1487
1573
  args.extend(_options(self.top_level_options))
1488
1574
  args.extend(self.commands)
1489
1575
  args.extend(self.command_args)
1576
+
1490
1577
  args.extend(_options(self.command_options))
1491
1578
  return args
1492
1579
 
@@ -1498,8 +1585,9 @@ class CLIArgs(object):
1498
1585
 
1499
1586
 
1500
1587
  class Worker(object):
1501
- def __init__(self, task, max_logs_size):
1588
+ def __init__(self, task, max_logs_size, config_file_name):
1502
1589
  self.task = task
1590
+ self._config_file_name = config_file_name
1503
1591
  self._proc = self._launch()
1504
1592
 
1505
1593
  if task.retries > task.user_code_retries:
@@ -1551,6 +1639,12 @@ class Worker(object):
1551
1639
  self.task.user_code_retries,
1552
1640
  self.task.ubf_context,
1553
1641
  )
1642
+
1643
+ # Add user configurations using a file to avoid using up too much space on the
1644
+ # command line
1645
+ if self._config_file_name:
1646
+ args.top_level_options["local-config-file"] = self._config_file_name
1647
+ # Pass configuration options
1554
1648
  env.update(args.get_env())
1555
1649
  env["PYTHONUNBUFFERED"] = "x"
1556
1650
  tracing.inject_tracing_vars(env)
@@ -48,8 +48,8 @@ def process_messages(worker_type, worker):
48
48
  pass
49
49
 
50
50
 
51
- @tracing.cli_entrypoint("sidecar")
52
51
  @click.command(help="Initialize workers")
52
+ @tracing.cli_entrypoint("sidecar")
53
53
  @click.argument("worker-type")
54
54
  def main(worker_type):
55
55
  sidecar_type = SIDECARS.get(worker_type)
File without changes