ob-metaflow 2.17.1.0__py2.py3-none-any.whl → 2.18.0.1__py2.py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of ob-metaflow might be problematic. Click here for more details.

Files changed (29) hide show
  1. metaflow/cli_components/run_cmds.py +15 -0
  2. metaflow/datastore/task_datastore.py +3 -0
  3. metaflow/flowspec.py +91 -1
  4. metaflow/graph.py +154 -13
  5. metaflow/lint.py +94 -3
  6. metaflow/plugins/argo/argo_workflows.py +367 -11
  7. metaflow/plugins/argo/argo_workflows_decorator.py +9 -0
  8. metaflow/plugins/argo/conditional_input_paths.py +21 -0
  9. metaflow/plugins/aws/step_functions/step_functions.py +6 -0
  10. metaflow/plugins/cards/card_modules/basic.py +14 -2
  11. metaflow/plugins/cards/card_modules/main.css +1 -0
  12. metaflow/plugins/cards/card_modules/main.js +31 -31
  13. metaflow/plugins/catch_decorator.py +9 -0
  14. metaflow/plugins/package_cli.py +1 -1
  15. metaflow/plugins/parallel_decorator.py +7 -0
  16. metaflow/runtime.py +217 -34
  17. metaflow/task.py +129 -34
  18. metaflow/user_configs/config_parameters.py +3 -1
  19. metaflow/user_decorators/user_step_decorator.py +31 -6
  20. metaflow/version.py +1 -1
  21. {ob_metaflow-2.17.1.0.dist-info → ob_metaflow-2.18.0.1.dist-info}/METADATA +2 -2
  22. {ob_metaflow-2.17.1.0.dist-info → ob_metaflow-2.18.0.1.dist-info}/RECORD +29 -27
  23. {ob_metaflow-2.17.1.0.data → ob_metaflow-2.18.0.1.data}/data/share/metaflow/devtools/Makefile +0 -0
  24. {ob_metaflow-2.17.1.0.data → ob_metaflow-2.18.0.1.data}/data/share/metaflow/devtools/Tiltfile +0 -0
  25. {ob_metaflow-2.17.1.0.data → ob_metaflow-2.18.0.1.data}/data/share/metaflow/devtools/pick_services.sh +0 -0
  26. {ob_metaflow-2.17.1.0.dist-info → ob_metaflow-2.18.0.1.dist-info}/WHEEL +0 -0
  27. {ob_metaflow-2.17.1.0.dist-info → ob_metaflow-2.18.0.1.dist-info}/entry_points.txt +0 -0
  28. {ob_metaflow-2.17.1.0.dist-info → ob_metaflow-2.18.0.1.dist-info}/licenses/LICENSE +0 -0
  29. {ob_metaflow-2.17.1.0.dist-info → ob_metaflow-2.18.0.1.dist-info}/top_level.txt +0 -0
@@ -52,6 +52,15 @@ class CatchDecorator(StepDecorator):
52
52
  "split steps." % step
53
53
  )
54
54
 
55
+ # Do not support catch on switch steps for now.
56
+ # When applying @catch to a switch step, we can not guarantee that the flow attribute used for the switching condition gets properly recorded.
57
+ if graph[step].type == "split-switch":
58
+ raise MetaflowException(
59
+ "@catch is defined for the step *%s* "
60
+ "but @catch is not supported in conditional "
61
+ "switch steps." % step
62
+ )
63
+
55
64
  def _print_exception(self, step, flow):
56
65
  self.logger(head="@catch caught an exception from %s" % flow, timestamp=False)
57
66
  for line in traceback.format_exc().splitlines():
@@ -60,7 +60,7 @@ def list(obj, archive=False):
60
60
  @click.pass_obj
61
61
  def save(obj, path):
62
62
  with open(path, "wb") as f:
63
- f.write(obj.package.blob())
63
+ f.write(obj.package.blob)
64
64
  obj.echo(
65
65
  "Code package saved in *%s* with metadata: %s"
66
66
  % (path, obj.package.package_metadata),
@@ -53,6 +53,13 @@ class ParallelDecorator(StepDecorator):
53
53
  def step_init(
54
54
  self, flow, graph, step_name, decorators, environment, flow_datastore, logger
55
55
  ):
56
+ # TODO: This can be supported in the future, but for the time being we disable the transition as it leads to
57
+ # a UBF exception during runtime when the actual parallel-join step is conditional (switching between different join implementations from the @parallel step).
58
+ if graph[step_name].type == "split-switch":
59
+ raise MetaflowException(
60
+ "A @parallel step can not be a conditional switch step. Please add a join step after *%s*"
61
+ % step_name
62
+ )
56
63
  self.environment = environment
57
64
  # Previously, the `parallel` property was a hardcoded, static property within `current`.
58
65
  # Whenever `current.parallel` was called, it returned a named tuple with values coming from
metaflow/runtime.py CHANGED
@@ -15,11 +15,13 @@ import tempfile
15
15
  import time
16
16
  import subprocess
17
17
  from datetime import datetime
18
+ from enum import Enum
18
19
  from io import BytesIO
19
20
  from itertools import chain
20
21
  from functools import partial
21
22
  from concurrent import futures
22
23
 
24
+ from typing import Dict, Tuple
23
25
  from metaflow.datastore.exceptions import DataException
24
26
  from contextlib import contextmanager
25
27
 
@@ -60,6 +62,7 @@ PROGRESS_INTERVAL = 300 # s
60
62
  # leveraging the TaskDataStoreSet.
61
63
  PREFETCH_DATA_ARTIFACTS = [
62
64
  "_foreach_stack",
65
+ "_iteration_stack",
63
66
  "_task_ok",
64
67
  "_transition",
65
68
  "_control_mapper_tasks",
@@ -67,6 +70,14 @@ PREFETCH_DATA_ARTIFACTS = [
67
70
  ]
68
71
  RESUME_POLL_SECONDS = 60
69
72
 
73
+
74
+ class LoopBehavior(Enum):
75
+ NONE = "none"
76
+ ENTERING = "entering"
77
+ EXITING = "exiting"
78
+ LOOPING = "looping"
79
+
80
+
70
81
  # Runtime must use logsource=RUNTIME_LOG_SOURCE for all loglines that it
71
82
  # formats according to mflog. See a comment in mflog.__init__
72
83
  mflog_msg = partial(mflog.decorate, RUNTIME_LOG_SOURCE)
@@ -290,6 +301,7 @@ class NativeRuntime(object):
290
301
  pathspec_index,
291
302
  cloned_task_pathspec_index,
292
303
  finished_tuple,
304
+ iteration_tuple,
293
305
  ubf_context,
294
306
  generate_task_obj,
295
307
  verbose=False,
@@ -334,7 +346,7 @@ class NativeRuntime(object):
334
346
  self._metadata,
335
347
  origin_ds_set=self._origin_ds_set,
336
348
  )
337
- self._finished[(step_name, finished_tuple)] = task_pathspec
349
+ self._finished[(step_name, finished_tuple, iteration_tuple)] = task_pathspec
338
350
  self._is_cloned[task_pathspec] = True
339
351
  except Exception as e:
340
352
  self._logger(
@@ -415,6 +427,7 @@ class NativeRuntime(object):
415
427
  finished_tuple = tuple(
416
428
  [s._replace(value=0) for s in task_ds.get("_foreach_stack", ())]
417
429
  )
430
+ iteration_tuple = tuple(task_ds.get("_iteration_stack", ()))
418
431
  cloned_task_pathspec_index = pathspec_index.split("/")[1]
419
432
  if task_ds.get("_control_task_is_mapper_zero", False):
420
433
  # Replace None with index 0 for control task as it is part of the
@@ -440,6 +453,7 @@ class NativeRuntime(object):
440
453
  pathspec_index,
441
454
  cloned_task_pathspec_index,
442
455
  finished_tuple,
456
+ iteration_tuple,
443
457
  is_ubf_mapper_task,
444
458
  ubf_context,
445
459
  )
@@ -454,6 +468,7 @@ class NativeRuntime(object):
454
468
  pathspec_index,
455
469
  cloned_task_pathspec_index,
456
470
  finished_tuple,
471
+ iteration_tuple,
457
472
  ubf_context=ubf_context,
458
473
  generate_task_obj=generate_task_obj and (not is_ubf_mapper_task),
459
474
  verbose=verbose,
@@ -464,6 +479,7 @@ class NativeRuntime(object):
464
479
  pathspec_index,
465
480
  cloned_task_pathspec_index,
466
481
  finished_tuple,
482
+ iteration_tuple,
467
483
  is_ubf_mapper_task,
468
484
  ubf_context,
469
485
  ) in inputs
@@ -484,6 +500,7 @@ class NativeRuntime(object):
484
500
  self._queue_push("start", {"input_paths": [self._params_task.path]})
485
501
  else:
486
502
  self._queue_push("start", {})
503
+
487
504
  progress_tstamp = time.time()
488
505
  with tempfile.NamedTemporaryFile(mode="w", encoding="utf-8") as config_file:
489
506
  # Configurations are passed through a file to avoid overloading the
@@ -504,7 +521,74 @@ class NativeRuntime(object):
504
521
  ):
505
522
  # 1. are any of the current workers finished?
506
523
  if self._cloned_tasks:
507
- finished_tasks = self._cloned_tasks
524
+ finished_tasks = []
525
+
526
+ # For loops (right now just recursive steps), we need to find
527
+ # the exact frontier because if we queue all "successors" to all
528
+ # the finished iterations, we would incorrectly launch multiple
529
+ # successors. We therefore have to strip out all non-last
530
+ # iterations *per* foreach branch.
531
+ idx_per_finished_id = (
532
+ {}
533
+ ) # type: Dict[Tuple[str, Tuple[int, ...], Tuple[int, Tuple[int, ...]]]]
534
+ for task in self._cloned_tasks:
535
+ step_name, foreach_stack, iteration_stack = task.finished_id
536
+ existing_task_idx = idx_per_finished_id.get(
537
+ (step_name, foreach_stack), None
538
+ )
539
+ if existing_task_idx is not None:
540
+ len_diff = len(iteration_stack) - len(
541
+ existing_task_idx[1]
542
+ )
543
+ # In this case, we need to keep only the latest iteration
544
+ if (
545
+ len_diff == 0
546
+ and iteration_stack > existing_task_idx[1]
547
+ ) or len_diff == -1:
548
+ # We remove the one we currently have and replace
549
+ # by this one. The second option means that we are
550
+ # adding the finished iteration marker.
551
+ existing_task = finished_tasks[existing_task_idx[0]]
552
+ # These are the first two lines of _queue_tasks
553
+ # We still consider the tasks finished so we need
554
+ # to update state to be clean.
555
+ self._finished[existing_task.finished_id] = (
556
+ existing_task.path
557
+ )
558
+ self._is_cloned[existing_task.path] = (
559
+ existing_task.is_cloned
560
+ )
561
+
562
+ finished_tasks[existing_task_idx[0]] = task
563
+ idx_per_finished_id[(step_name, foreach_stack)] = (
564
+ existing_task_idx[0],
565
+ iteration_stack,
566
+ )
567
+ elif (
568
+ len_diff == 0
569
+ and iteration_stack < existing_task_idx[1]
570
+ ) or len_diff == 1:
571
+ # The second option is when we have already marked
572
+ # the end of the iteration in self._finished and
573
+ # are now seeing a previous iteration.
574
+ # We just mark the task as finished but we don't
575
+ # put it in the finished_tasks list to pass to
576
+ # the _queue_tasks function
577
+ self._finished[task.finished_id] = task.path
578
+ self._is_cloned[task.path] = task.is_cloned
579
+ else:
580
+ raise MetaflowInternalError(
581
+ "Unexpected recursive cloned tasks -- "
582
+ "this is a bug, please report it."
583
+ )
584
+ else:
585
+ # New entry
586
+ finished_tasks.append(task)
587
+ idx_per_finished_id[(step_name, foreach_stack)] = (
588
+ len(finished_tasks) - 1,
589
+ iteration_stack,
590
+ )
591
+
508
592
  # reset the list of cloned tasks and let poll_workers handle
509
593
  # the remaining transition
510
594
  self._cloned_tasks = []
@@ -578,7 +662,7 @@ class NativeRuntime(object):
578
662
  self._run_exit_hooks()
579
663
 
580
664
  # assert that end was executed and it was successful
581
- if ("end", ()) in self._finished:
665
+ if ("end", (), ()) in self._finished:
582
666
  if self._run_url:
583
667
  self._logger(
584
668
  "Done! See the run in the UI at %s" % self._run_url,
@@ -604,7 +688,7 @@ class NativeRuntime(object):
604
688
  if not exit_hook_decos:
605
689
  return
606
690
 
607
- successful = ("end", ()) in self._finished or self._clone_only
691
+ successful = ("end", (), ()) in self._finished or self._clone_only
608
692
  pathspec = f"{self._graph.name}/{self._run_id}"
609
693
  flow_file = self._environment.get_environment_info()["script"]
610
694
 
@@ -672,29 +756,60 @@ class NativeRuntime(object):
672
756
 
673
757
  # Given the current task information (task_index), the type of transition,
674
758
  # and the split index, return the new task index.
675
- def _translate_index(self, task, next_step, type, split_index=None):
676
- match = re.match(r"^(.+)\[(.*)\]$", task.task_index)
759
+ def _translate_index(
760
+ self, task, next_step, type, split_index=None, loop_mode=LoopBehavior.NONE
761
+ ):
762
+ match = re.match(r"^(.+)\[(.*)\]\[(.*)\]$", task.task_index)
677
763
  if match:
678
- _, foreach_index = match.groups()
764
+ _, foreach_index, iteration_index = match.groups()
679
765
  # Convert foreach_index to a list of integers
680
766
  if len(foreach_index) > 0:
681
767
  foreach_index = foreach_index.split(",")
682
768
  else:
683
769
  foreach_index = []
770
+ # Ditto for iteration_index
771
+ if len(iteration_index) > 0:
772
+ iteration_index = iteration_index.split(",")
773
+ else:
774
+ iteration_index = []
684
775
  else:
685
776
  raise ValueError(
686
- "Index not in the format of {run_id}/{step_name}[{foreach_index}]"
777
+ "Index not in the format of {run_id}/{step_name}[{foreach_index}][{iteration_index}]"
687
778
  )
779
+ if loop_mode == LoopBehavior.NONE:
780
+ # Check if we are entering a looping construct. Right now, only recursive
781
+ # steps are looping constructs
782
+ next_step_node = self._graph[next_step]
783
+ if (
784
+ next_step_node.type == "split-switch"
785
+ and next_step in next_step_node.out_funcs
786
+ ):
787
+ loop_mode = LoopBehavior.ENTERING
788
+
789
+ # Update iteration_index
790
+ if loop_mode == LoopBehavior.ENTERING:
791
+ # We are entering a loop, so we add a new iteration level
792
+ iteration_index.append("0")
793
+ elif loop_mode == LoopBehavior.EXITING:
794
+ iteration_index = iteration_index[:-1]
795
+ elif loop_mode == LoopBehavior.LOOPING:
796
+ if len(iteration_index) == 0:
797
+ raise MetaflowInternalError(
798
+ "In looping mode but there is no iteration index"
799
+ )
800
+ iteration_index[-1] = str(int(iteration_index[-1]) + 1)
801
+ iteration_index = ",".join(iteration_index)
802
+
688
803
  if type == "linear":
689
- return "%s[%s]" % (next_step, ",".join(foreach_index))
804
+ return "%s[%s][%s]" % (next_step, ",".join(foreach_index), iteration_index)
690
805
  elif type == "join":
691
806
  indices = []
692
807
  if len(foreach_index) > 0:
693
808
  indices = foreach_index[:-1]
694
- return "%s[%s]" % (next_step, ",".join(indices))
809
+ return "%s[%s][%s]" % (next_step, ",".join(indices), iteration_index)
695
810
  elif type == "split":
696
811
  foreach_index.append(str(split_index))
697
- return "%s[%s]" % (next_step, ",".join(foreach_index))
812
+ return "%s[%s][%s]" % (next_step, ",".join(foreach_index), iteration_index)
698
813
 
699
814
  # Store the parameters needed for task creation, so that pushing on items
700
815
  # onto the run_queue is an inexpensive operation.
@@ -778,17 +893,19 @@ class NativeRuntime(object):
778
893
  # tasks is incorrect and contains the pathspec of the *cloned* run
779
894
  # but we don't use it for anything. We could look to clean it up though
780
895
  if not task.is_cloned:
781
- _, foreach_stack = task.finished_id
896
+ _, foreach_stack, iteration_stack = task.finished_id
782
897
  top = foreach_stack[-1]
783
898
  bottom = list(foreach_stack[:-1])
784
899
  for i in range(num_splits):
785
900
  s = tuple(bottom + [top._replace(index=i)])
786
- self._finished[(task.step, s)] = mapper_tasks[i]
901
+ self._finished[(task.step, s, iteration_stack)] = mapper_tasks[
902
+ i
903
+ ]
787
904
  self._is_cloned[mapper_tasks[i]] = False
788
905
 
789
906
  # Find and check status of control task and retrieve its pathspec
790
907
  # for retrieving unbounded foreach cardinality.
791
- _, foreach_stack = task.finished_id
908
+ _, foreach_stack, iteration_stack = task.finished_id
792
909
  top = foreach_stack[-1]
793
910
  bottom = list(foreach_stack[:-1])
794
911
  s = tuple(bottom + [top._replace(index=None)])
@@ -797,7 +914,7 @@ class NativeRuntime(object):
797
914
  # it will have index=0 instead of index=None.
798
915
  if task.results.get("_control_task_is_mapper_zero", False):
799
916
  s = tuple(bottom + [top._replace(index=0)])
800
- control_path = self._finished.get((task.step, s))
917
+ control_path = self._finished.get((task.step, s, iteration_stack))
801
918
  if control_path:
802
919
  # Control task was successful.
803
920
  # Additionally check the state of (sibling) mapper tasks as well
@@ -806,7 +923,9 @@ class NativeRuntime(object):
806
923
  required_tasks = []
807
924
  for i in range(num_splits):
808
925
  s = tuple(bottom + [top._replace(index=i)])
809
- required_tasks.append(self._finished.get((task.step, s)))
926
+ required_tasks.append(
927
+ self._finished.get((task.step, s, iteration_stack))
928
+ )
810
929
 
811
930
  if all(required_tasks):
812
931
  index = self._translate_index(task, next_step, "join")
@@ -819,10 +938,12 @@ class NativeRuntime(object):
819
938
  else:
820
939
  # matching_split is the split-parent of the finished task
821
940
  matching_split = self._graph[self._graph[next_step].split_parents[-1]]
822
- _, foreach_stack = task.finished_id
823
- index = ""
941
+ _, foreach_stack, iteration_stack = task.finished_id
942
+
943
+ direct_parents = set(self._graph[next_step].in_funcs)
944
+
945
+ # next step is a foreach join
824
946
  if matching_split.type == "foreach":
825
- # next step is a foreach join
826
947
 
827
948
  def siblings(foreach_stack):
828
949
  top = foreach_stack[-1]
@@ -831,29 +952,56 @@ class NativeRuntime(object):
831
952
  yield tuple(bottom + [top._replace(index=index)])
832
953
 
833
954
  # required tasks are all split-siblings of the finished task
834
- required_tasks = [
835
- self._finished.get((task.step, s)) for s in siblings(foreach_stack)
836
- ]
955
+ required_tasks = list(
956
+ filter(
957
+ lambda x: x is not None,
958
+ [
959
+ self._finished.get((p, s, iteration_stack))
960
+ for p in direct_parents
961
+ for s in siblings(foreach_stack)
962
+ ],
963
+ )
964
+ )
965
+ required_count = task.finished_id[1][-1].num_splits
837
966
  join_type = "foreach"
838
967
  index = self._translate_index(task, next_step, "join")
839
968
  else:
840
969
  # next step is a split
841
- # required tasks are all branches joined by the next step
842
- required_tasks = [
843
- self._finished.get((step, foreach_stack))
844
- for step in self._graph[next_step].in_funcs
845
- ]
970
+ required_tasks = list(
971
+ filter(
972
+ lambda x: x is not None,
973
+ [
974
+ self._finished.get((p, foreach_stack, iteration_stack))
975
+ for p in direct_parents
976
+ ],
977
+ )
978
+ )
979
+
980
+ required_count = len(matching_split.out_funcs)
846
981
  join_type = "linear"
847
982
  index = self._translate_index(task, next_step, "linear")
848
-
849
- if all(required_tasks):
850
- # all tasks to be joined are ready. Schedule the next join step.
983
+ if len(required_tasks) == required_count:
984
+ # We have all the required previous tasks to schedule a join
851
985
  self._queue_push(
852
986
  next_step,
853
987
  {"input_paths": required_tasks, "join_type": join_type},
854
988
  index,
855
989
  )
856
990
 
991
+ def _queue_task_switch(self, task, next_steps, is_recursive):
992
+ chosen_step = next_steps[0]
993
+
994
+ loop_mode = LoopBehavior.NONE
995
+ if is_recursive:
996
+ if chosen_step != task.step:
997
+ # We are exiting a loop
998
+ loop_mode = LoopBehavior.EXITING
999
+ else:
1000
+ # We are staying in the loop
1001
+ loop_mode = LoopBehavior.LOOPING
1002
+ index = self._translate_index(task, chosen_step, "linear", None, loop_mode)
1003
+ self._queue_push(chosen_step, {"input_paths": [task.path]}, index)
1004
+
857
1005
  def _queue_task_foreach(self, task, next_steps):
858
1006
  # CHECK: this condition should be enforced by the linter but
859
1007
  # let's assert that the assumption holds
@@ -930,7 +1078,39 @@ class NativeRuntime(object):
930
1078
  next_steps = []
931
1079
  foreach = None
932
1080
  expected = self._graph[task.step].out_funcs
933
- if next_steps != expected:
1081
+
1082
+ if self._graph[task.step].type == "split-switch":
1083
+ is_recursive = task.step in self._graph[task.step].out_funcs
1084
+ if len(next_steps) != 1:
1085
+ msg = (
1086
+ "Switch step *{step}* should transition to exactly "
1087
+ "one step at runtime, but got: {actual}"
1088
+ )
1089
+ raise MetaflowInternalError(
1090
+ msg.format(step=task.step, actual=", ".join(next_steps))
1091
+ )
1092
+ if next_steps[0] not in expected:
1093
+ msg = (
1094
+ "Switch step *{step}* transitioned to unexpected "
1095
+ "step *{actual}*. Expected one of: {expected}"
1096
+ )
1097
+ raise MetaflowInternalError(
1098
+ msg.format(
1099
+ step=task.step,
1100
+ actual=next_steps[0],
1101
+ expected=", ".join(expected),
1102
+ )
1103
+ )
1104
+ # When exiting a recursive loop, we mark that the loop itself has
1105
+ # finished by adding a special entry in self._finished which has
1106
+ # an iteration stack that is shorter (ie: we are out of the loop) so
1107
+ # that we can then find it when looking at successor tasks to launch.
1108
+ if is_recursive and next_steps[0] != task.step:
1109
+ step_name, finished_tuple, iteration_tuple = task.finished_id
1110
+ self._finished[
1111
+ (step_name, finished_tuple, iteration_tuple[:-1])
1112
+ ] = task.path
1113
+ elif next_steps != expected:
934
1114
  msg = (
935
1115
  "Based on static analysis of the code, step *{step}* "
936
1116
  "was expected to transition to step(s) *{expected}*. "
@@ -954,6 +1134,9 @@ class NativeRuntime(object):
954
1134
  elif foreach:
955
1135
  # Next step is a foreach child
956
1136
  self._queue_task_foreach(task, next_steps)
1137
+ elif self._graph[task.step].type == "split-switch":
1138
+ # Current step is switch - queue the chosen step
1139
+ self._queue_task_switch(task, next_steps, is_recursive)
957
1140
  else:
958
1141
  # Next steps are normal linear steps
959
1142
  for step in next_steps:
@@ -1494,13 +1677,13 @@ class Task(object):
1494
1677
  @property
1495
1678
  def finished_id(self):
1496
1679
  # note: id is not available before the task has finished.
1497
- # Index already identifies the task within the foreach,
1498
- # we will remove foreach value so that it is easier to
1680
+ # Index already identifies the task within the foreach and loop.
1681
+ # We will remove foreach value so that it is easier to
1499
1682
  # identify siblings within a foreach.
1500
1683
  foreach_stack_tuple = tuple(
1501
1684
  [s._replace(value=0) for s in self.results["_foreach_stack"]]
1502
1685
  )
1503
- return (self.step, foreach_stack_tuple)
1686
+ return (self.step, foreach_stack_tuple, tuple(self.results["_iteration_stack"]))
1504
1687
 
1505
1688
  @property
1506
1689
  def is_cloned(self):