ddeutil-workflow 0.0.64__py3-none-any.whl → 0.0.65__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- ddeutil/workflow/__about__.py +1 -1
- ddeutil/workflow/__init__.py +1 -1
- ddeutil/workflow/api/routes/job.py +2 -2
- ddeutil/workflow/conf.py +0 -4
- ddeutil/workflow/{exceptions.py → errors.py} +49 -11
- ddeutil/workflow/job.py +249 -118
- ddeutil/workflow/params.py +11 -11
- ddeutil/workflow/result.py +86 -10
- ddeutil/workflow/reusables.py +15 -17
- ddeutil/workflow/stages.py +676 -450
- ddeutil/workflow/utils.py +33 -0
- ddeutil/workflow/workflow.py +163 -664
- {ddeutil_workflow-0.0.64.dist-info → ddeutil_workflow-0.0.65.dist-info}/METADATA +14 -12
- ddeutil_workflow-0.0.65.dist-info/RECORD +28 -0
- {ddeutil_workflow-0.0.64.dist-info → ddeutil_workflow-0.0.65.dist-info}/WHEEL +1 -1
- ddeutil_workflow-0.0.64.dist-info/RECORD +0 -28
- {ddeutil_workflow-0.0.64.dist-info → ddeutil_workflow-0.0.65.dist-info}/entry_points.txt +0 -0
- {ddeutil_workflow-0.0.64.dist-info → ddeutil_workflow-0.0.65.dist-info}/licenses/LICENSE +0 -0
- {ddeutil_workflow-0.0.64.dist-info → ddeutil_workflow-0.0.65.dist-info}/top_level.txt +0 -0
ddeutil/workflow/job.py
CHANGED
@@ -12,7 +12,7 @@ for execute on target machine instead of the current local machine.
|
|
12
12
|
making matrix values before execution parallelism stage execution.
|
13
13
|
|
14
14
|
The Job model does not implement `handler_execute` same as Stage model
|
15
|
-
because the job should raise only `
|
15
|
+
because the job should raise only `JobError` class from the execution
|
16
16
|
method.
|
17
17
|
"""
|
18
18
|
from __future__ import annotations
|
@@ -39,13 +39,20 @@ from pydantic import BaseModel, Discriminator, Field, SecretStr, Tag
|
|
39
39
|
from pydantic.functional_validators import field_validator, model_validator
|
40
40
|
from typing_extensions import Self
|
41
41
|
|
42
|
+
from . import JobSkipError
|
42
43
|
from .__types import DictData, DictStr, Matrix, StrOrNone
|
43
|
-
from .
|
44
|
-
|
45
|
-
|
46
|
-
|
44
|
+
from .errors import JobCancelError, JobError, to_dict
|
45
|
+
from .result import (
|
46
|
+
CANCEL,
|
47
|
+
FAILED,
|
48
|
+
SKIP,
|
49
|
+
SUCCESS,
|
50
|
+
WAIT,
|
51
|
+
Result,
|
52
|
+
Status,
|
53
|
+
get_status_from_error,
|
54
|
+
validate_statuses,
|
47
55
|
)
|
48
|
-
from .result import CANCEL, FAILED, SKIP, SUCCESS, WAIT, Result, Status
|
49
56
|
from .reusables import has_template, param2template
|
50
57
|
from .stages import Stage
|
51
58
|
from .utils import cross_product, filter_func, gen_id
|
@@ -238,6 +245,7 @@ class SelfHostedArgs(BaseModel):
|
|
238
245
|
"""Self-Hosted arguments."""
|
239
246
|
|
240
247
|
host: str = Field(description="A host URL of the target self-hosted.")
|
248
|
+
token: SecretStr = Field(description="An API or Access token.")
|
241
249
|
|
242
250
|
|
243
251
|
class OnSelfHosted(BaseRunsOn): # pragma: no cov
|
@@ -250,6 +258,8 @@ class OnSelfHosted(BaseRunsOn): # pragma: no cov
|
|
250
258
|
|
251
259
|
|
252
260
|
class AzBatchArgs(BaseModel):
|
261
|
+
"""Azure Batch arguments."""
|
262
|
+
|
253
263
|
batch_account_name: str
|
254
264
|
batch_account_key: SecretStr
|
255
265
|
batch_account_url: str
|
@@ -431,11 +441,10 @@ class Job(BaseModel):
|
|
431
441
|
return stage
|
432
442
|
raise ValueError(f"Stage {stage_id!r} does not exists in this job.")
|
433
443
|
|
434
|
-
def check_needs(
|
435
|
-
self, jobs: dict[str, DictData]
|
436
|
-
) -> Status: # pragma: no cov
|
444
|
+
def check_needs(self, jobs: dict[str, DictData]) -> Status:
|
437
445
|
"""Return trigger status from checking job's need trigger rule logic was
|
438
|
-
valid. The return status should be SUCCESS
|
446
|
+
valid. The return status should be `SUCCESS`, `FAILED`, `WAIT`, or
|
447
|
+
`SKIP` status.
|
439
448
|
|
440
449
|
:param jobs: (dict[str, DictData]) A mapping of job ID and its context
|
441
450
|
data that return from execution process.
|
@@ -450,43 +459,98 @@ class Job(BaseModel):
|
|
450
459
|
def make_return(result: bool) -> Status:
|
451
460
|
return SUCCESS if result else FAILED
|
452
461
|
|
462
|
+
# NOTE: Filter all job result context only needed in this job.
|
453
463
|
need_exist: dict[str, Any] = {
|
454
|
-
need: jobs[need]
|
464
|
+
need: jobs[need] or {"status": SUCCESS}
|
465
|
+
for need in self.needs
|
466
|
+
if need in jobs
|
455
467
|
}
|
456
|
-
|
468
|
+
|
469
|
+
# NOTE: Return WAIT status if result context not complete, or it has any
|
470
|
+
# waiting status.
|
471
|
+
if len(need_exist) < len(self.needs) or any(
|
472
|
+
need_exist[job].get("status", SUCCESS) == WAIT for job in need_exist
|
473
|
+
):
|
457
474
|
return WAIT
|
458
|
-
|
475
|
+
|
476
|
+
# NOTE: Return SKIP status if all status are SKIP.
|
477
|
+
elif all(
|
478
|
+
need_exist[job].get("status", SUCCESS) == SKIP for job in need_exist
|
479
|
+
):
|
459
480
|
return SKIP
|
481
|
+
|
482
|
+
# NOTE: Return CANCEL status if any status is CANCEL.
|
483
|
+
elif any(
|
484
|
+
need_exist[job].get("status", SUCCESS) == CANCEL
|
485
|
+
for job in need_exist
|
486
|
+
):
|
487
|
+
return CANCEL
|
488
|
+
|
489
|
+
# NOTE: Return SUCCESS if all status not be WAIT or all SKIP.
|
460
490
|
elif self.trigger_rule == Rule.ALL_DONE:
|
461
491
|
return SUCCESS
|
492
|
+
|
462
493
|
elif self.trigger_rule == Rule.ALL_SUCCESS:
|
463
494
|
rs = all(
|
464
495
|
(
|
465
496
|
"errors" not in need_exist[job]
|
466
|
-
and
|
497
|
+
and need_exist[job].get("status", SUCCESS) == SUCCESS
|
467
498
|
)
|
468
499
|
for job in need_exist
|
469
500
|
)
|
470
501
|
elif self.trigger_rule == Rule.ALL_FAILED:
|
471
|
-
rs = all(
|
472
|
-
elif self.trigger_rule == Rule.ONE_SUCCESS:
|
473
|
-
rs = sum(
|
502
|
+
rs = all(
|
474
503
|
(
|
475
|
-
"errors"
|
476
|
-
|
504
|
+
"errors" in need_exist[job]
|
505
|
+
or need_exist[job].get("status", SUCCESS) == FAILED
|
477
506
|
)
|
478
507
|
for job in need_exist
|
479
|
-
)
|
508
|
+
)
|
509
|
+
|
510
|
+
elif self.trigger_rule == Rule.ONE_SUCCESS:
|
511
|
+
rs = (
|
512
|
+
sum(
|
513
|
+
(
|
514
|
+
"errors" not in need_exist[job]
|
515
|
+
and need_exist[job].get("status", SUCCESS) == SUCCESS
|
516
|
+
)
|
517
|
+
for job in need_exist
|
518
|
+
)
|
519
|
+
== 1
|
520
|
+
)
|
521
|
+
|
480
522
|
elif self.trigger_rule == Rule.ONE_FAILED:
|
481
|
-
rs =
|
523
|
+
rs = (
|
524
|
+
sum(
|
525
|
+
(
|
526
|
+
"errors" in need_exist[job]
|
527
|
+
or need_exist[job].get("status", SUCCESS) == FAILED
|
528
|
+
)
|
529
|
+
for job in need_exist
|
530
|
+
)
|
531
|
+
== 1
|
532
|
+
)
|
533
|
+
|
482
534
|
elif self.trigger_rule == Rule.NONE_SKIPPED:
|
483
535
|
rs = all(
|
484
|
-
|
536
|
+
need_exist[job].get("status", SUCCESS) != SKIP
|
537
|
+
for job in need_exist
|
485
538
|
)
|
539
|
+
|
486
540
|
elif self.trigger_rule == Rule.NONE_FAILED:
|
487
|
-
rs = all(
|
541
|
+
rs = all(
|
542
|
+
(
|
543
|
+
"errors" not in need_exist[job]
|
544
|
+
and need_exist[job].get("status", SUCCESS) != FAILED
|
545
|
+
)
|
546
|
+
for job in need_exist
|
547
|
+
)
|
548
|
+
|
488
549
|
else: # pragma: no cov
|
489
|
-
|
550
|
+
raise NotImplementedError(
|
551
|
+
f"Trigger rule {self.trigger_rule} does not implement on this "
|
552
|
+
f"`check_needs` method yet."
|
553
|
+
)
|
490
554
|
return make_return(rs)
|
491
555
|
|
492
556
|
def is_skipped(self, params: DictData) -> bool:
|
@@ -496,9 +560,9 @@ class Job(BaseModel):
|
|
496
560
|
:param params: (DictData) A parameter value that want to pass to condition
|
497
561
|
template.
|
498
562
|
|
499
|
-
:raise
|
563
|
+
:raise JobError: When it has any error raise from the eval
|
500
564
|
condition statement.
|
501
|
-
:raise
|
565
|
+
:raise JobError: When return type of the eval condition statement
|
502
566
|
does not return with boolean type.
|
503
567
|
|
504
568
|
:rtype: bool
|
@@ -519,7 +583,7 @@ class Job(BaseModel):
|
|
519
583
|
raise TypeError("Return type of condition does not be boolean")
|
520
584
|
return not rs
|
521
585
|
except Exception as e:
|
522
|
-
raise
|
586
|
+
raise JobError(f"{e.__class__.__name__}: {e}") from e
|
523
587
|
|
524
588
|
def set_outputs(
|
525
589
|
self,
|
@@ -561,7 +625,7 @@ class Job(BaseModel):
|
|
561
625
|
extract from the result context if it exists. If it does not found, it
|
562
626
|
will not set on the received context.
|
563
627
|
|
564
|
-
:raise
|
628
|
+
:raise JobError: If the job's ID does not set and the setting
|
565
629
|
default job ID flag does not set.
|
566
630
|
|
567
631
|
:param output: (DictData) A result data context that want to extract
|
@@ -575,34 +639,51 @@ class Job(BaseModel):
|
|
575
639
|
to["jobs"] = {}
|
576
640
|
|
577
641
|
if self.id is None and job_id is None:
|
578
|
-
raise
|
642
|
+
raise JobError(
|
579
643
|
"This job do not set the ID before setting execution output."
|
580
644
|
)
|
581
645
|
|
582
646
|
_id: str = self.id or job_id
|
583
|
-
output: DictData =
|
647
|
+
output: DictData = copy.deepcopy(output)
|
584
648
|
errors: DictData = (
|
585
|
-
{"errors": output.pop("errors"
|
649
|
+
{"errors": output.pop("errors")} if "errors" in output else {}
|
586
650
|
)
|
587
|
-
|
588
|
-
{"
|
589
|
-
if "skipped" in output
|
590
|
-
else {}
|
651
|
+
status: dict[str, Status] = (
|
652
|
+
{"status": output.pop("status")} if "status" in output else {}
|
591
653
|
)
|
592
|
-
|
593
654
|
if self.strategy.is_set():
|
594
|
-
to["jobs"][_id] = {"strategies": output
|
655
|
+
to["jobs"][_id] = {"strategies": output} | errors | status
|
595
656
|
elif len(k := output.keys()) > 1: # pragma: no cov
|
596
|
-
raise
|
657
|
+
raise JobError(
|
597
658
|
"Strategy output from execution return more than one ID while "
|
598
659
|
"this job does not set strategy."
|
599
660
|
)
|
600
661
|
else:
|
601
662
|
_output: DictData = {} if len(k) == 0 else output[list(k)[0]]
|
602
663
|
_output.pop("matrix", {})
|
603
|
-
to["jobs"][_id] =
|
664
|
+
to["jobs"][_id] = _output | errors | status
|
604
665
|
return to
|
605
666
|
|
667
|
+
def get_outputs(
|
668
|
+
self,
|
669
|
+
output: DictData,
|
670
|
+
*,
|
671
|
+
job_id: StrOrNone = None,
|
672
|
+
) -> DictData:
|
673
|
+
"""Get the outputs from jobs data. It will get this job ID or passing
|
674
|
+
custom ID from the job outputs mapping.
|
675
|
+
|
676
|
+
:param output: (DictData) A job outputs data that want to extract
|
677
|
+
:param job_id: (StrOrNone) A job ID if the `id` field does not set.
|
678
|
+
|
679
|
+
:rtype: DictData
|
680
|
+
"""
|
681
|
+
_id: str = self.id or job_id
|
682
|
+
if self.strategy.is_set():
|
683
|
+
return output.get("jobs", {}).get(_id, {}).get("strategies", {})
|
684
|
+
else:
|
685
|
+
return output.get("jobs", {}).get(_id, {})
|
686
|
+
|
606
687
|
def execute(
|
607
688
|
self,
|
608
689
|
params: DictData,
|
@@ -634,10 +715,11 @@ class Job(BaseModel):
|
|
634
715
|
)
|
635
716
|
|
636
717
|
result.trace.info(
|
637
|
-
f"[JOB]:
|
718
|
+
f"[JOB]: Routing for "
|
638
719
|
f"{''.join(self.runs_on.type.value.split('_')).title()}: "
|
639
720
|
f"{self.id!r}"
|
640
721
|
)
|
722
|
+
|
641
723
|
if self.runs_on.type == RunsOn.LOCAL:
|
642
724
|
return local_execute(
|
643
725
|
self,
|
@@ -658,13 +740,13 @@ class Job(BaseModel):
|
|
658
740
|
)
|
659
741
|
|
660
742
|
result.trace.error(
|
661
|
-
f"[JOB]:
|
743
|
+
f"[JOB]: Execution not support runs-on: {self.runs_on.type.value!r} "
|
662
744
|
f"yet."
|
663
745
|
)
|
664
746
|
return result.catch(
|
665
747
|
status=FAILED,
|
666
748
|
context={
|
667
|
-
"errors":
|
749
|
+
"errors": JobError(
|
668
750
|
f"Execute runs-on type: {self.runs_on.type.value!r} does "
|
669
751
|
f"not support yet."
|
670
752
|
).to_dict(),
|
@@ -672,6 +754,19 @@ class Job(BaseModel):
|
|
672
754
|
)
|
673
755
|
|
674
756
|
|
757
|
+
def mark_errors(context: DictData, error: JobError) -> None:
|
758
|
+
"""Make the errors context result with the refs value depends on the nested
|
759
|
+
execute func.
|
760
|
+
|
761
|
+
:param context: (DictData) A context data.
|
762
|
+
:param error: (JobError) A stage exception object.
|
763
|
+
"""
|
764
|
+
if "errors" in context:
|
765
|
+
context["errors"][error.refs] = error.to_dict()
|
766
|
+
else:
|
767
|
+
context["errors"] = error.to_dict(with_refs=True)
|
768
|
+
|
769
|
+
|
675
770
|
def local_execute_strategy(
|
676
771
|
job: Job,
|
677
772
|
strategy: DictData,
|
@@ -700,9 +795,9 @@ def local_execute_strategy(
|
|
700
795
|
:param event: (Event) An Event manager instance that use to cancel this
|
701
796
|
execution if it forces stopped by parent execution.
|
702
797
|
|
703
|
-
:raise
|
704
|
-
:raise
|
705
|
-
:raise
|
798
|
+
:raise JobError: If event was set.
|
799
|
+
:raise JobError: If stage execution raise any error as `StageError`.
|
800
|
+
:raise JobError: If the result from execution has `FAILED` status.
|
706
801
|
|
707
802
|
:rtype: Result
|
708
803
|
"""
|
@@ -719,81 +814,94 @@ def local_execute_strategy(
|
|
719
814
|
|
720
815
|
context: DictData = copy.deepcopy(params)
|
721
816
|
context.update({"matrix": strategy, "stages": {}})
|
722
|
-
|
817
|
+
total_stage: int = len(job.stages)
|
818
|
+
skips: list[bool] = [False] * total_stage
|
819
|
+
for i, stage in enumerate(job.stages, start=0):
|
723
820
|
|
724
821
|
if job.extras:
|
725
822
|
stage.extras = job.extras
|
726
823
|
|
727
|
-
if stage.is_skipped(params=context):
|
728
|
-
result.trace.info(f"[JOB]: Skip Stage: {stage.iden!r}")
|
729
|
-
stage.set_outputs(output={"skipped": True}, to=context)
|
730
|
-
continue
|
731
|
-
|
732
824
|
if event and event.is_set():
|
733
|
-
error_msg: str =
|
825
|
+
error_msg: str = (
|
826
|
+
"Strategy execution was canceled from the event before "
|
827
|
+
"start stage execution."
|
828
|
+
)
|
734
829
|
result.catch(
|
735
830
|
status=CANCEL,
|
736
831
|
context={
|
737
832
|
strategy_id: {
|
833
|
+
"status": CANCEL,
|
738
834
|
"matrix": strategy,
|
739
835
|
"stages": filter_func(context.pop("stages", {})),
|
740
|
-
"errors":
|
836
|
+
"errors": JobCancelError(error_msg).to_dict(),
|
741
837
|
},
|
742
838
|
},
|
743
839
|
)
|
744
|
-
raise
|
840
|
+
raise JobCancelError(error_msg, refs=strategy_id)
|
841
|
+
|
842
|
+
result.trace.info(f"[JOB]: Execute Stage: {stage.iden!r}")
|
843
|
+
rs: Result = stage.handler_execute(
|
844
|
+
params=context,
|
845
|
+
run_id=result.run_id,
|
846
|
+
parent_run_id=result.parent_run_id,
|
847
|
+
event=event,
|
848
|
+
)
|
849
|
+
stage.set_outputs(rs.context, to=context)
|
745
850
|
|
746
|
-
|
747
|
-
|
748
|
-
|
749
|
-
|
750
|
-
|
751
|
-
|
752
|
-
|
851
|
+
if rs.status == SKIP:
|
852
|
+
skips[i] = True
|
853
|
+
continue
|
854
|
+
|
855
|
+
if rs.status == FAILED:
|
856
|
+
error_msg: str = (
|
857
|
+
f"Strategy execution was break because its nested-stage, "
|
858
|
+
f"{stage.iden!r}, failed."
|
753
859
|
)
|
754
|
-
stage.set_outputs(rs.context, to=context)
|
755
|
-
except StageException as e:
|
756
860
|
result.catch(
|
757
861
|
status=FAILED,
|
758
862
|
context={
|
759
863
|
strategy_id: {
|
864
|
+
"status": FAILED,
|
760
865
|
"matrix": strategy,
|
761
866
|
"stages": filter_func(context.pop("stages", {})),
|
762
|
-
"errors":
|
867
|
+
"errors": JobError(error_msg).to_dict(),
|
763
868
|
},
|
764
869
|
},
|
765
870
|
)
|
766
|
-
raise
|
767
|
-
message=f"Handler Error: {e.__class__.__name__}: {e}",
|
768
|
-
refs=strategy_id,
|
769
|
-
) from e
|
871
|
+
raise JobError(error_msg, refs=strategy_id)
|
770
872
|
|
771
|
-
|
873
|
+
elif rs.status == CANCEL:
|
772
874
|
error_msg: str = (
|
773
|
-
|
774
|
-
|
875
|
+
"Strategy execution was canceled from the event after "
|
876
|
+
"end stage execution."
|
775
877
|
)
|
776
878
|
result.catch(
|
777
|
-
status=
|
879
|
+
status=CANCEL,
|
778
880
|
context={
|
779
881
|
strategy_id: {
|
882
|
+
"status": CANCEL,
|
780
883
|
"matrix": strategy,
|
781
884
|
"stages": filter_func(context.pop("stages", {})),
|
782
|
-
"errors":
|
885
|
+
"errors": JobCancelError(error_msg).to_dict(),
|
783
886
|
},
|
784
887
|
},
|
785
888
|
)
|
786
|
-
raise
|
889
|
+
raise JobCancelError(error_msg, refs=strategy_id)
|
787
890
|
|
788
|
-
|
789
|
-
|
891
|
+
status: Status = SKIP if sum(skips) == total_stage else SUCCESS
|
892
|
+
result.catch(
|
893
|
+
status=status,
|
790
894
|
context={
|
791
895
|
strategy_id: {
|
896
|
+
"status": status,
|
792
897
|
"matrix": strategy,
|
793
898
|
"stages": filter_func(context.pop("stages", {})),
|
794
899
|
},
|
795
900
|
},
|
796
901
|
)
|
902
|
+
if status == SKIP:
|
903
|
+
raise JobSkipError("All stage was skipped.")
|
904
|
+
return result
|
797
905
|
|
798
906
|
|
799
907
|
def local_execute(
|
@@ -809,7 +917,7 @@ def local_execute(
|
|
809
917
|
step and run multithread on this metrics to the `stages` field of this job.
|
810
918
|
|
811
919
|
Important:
|
812
|
-
This method does not raise any `
|
920
|
+
This method does not raise any `JobError` because it allows run
|
813
921
|
parallel mode. If it raises error from strategy execution, it will catch
|
814
922
|
that error and store it in the `errors` key with list of error.
|
815
923
|
|
@@ -835,12 +943,22 @@ def local_execute(
|
|
835
943
|
extras=job.extras,
|
836
944
|
)
|
837
945
|
|
946
|
+
result.trace.info("[JOB]: Start Local executor.")
|
947
|
+
|
948
|
+
if job.desc:
|
949
|
+
result.trace.debug(f"[JOB]: Description:||{job.desc}||")
|
950
|
+
|
951
|
+
if job.is_skipped(params=params):
|
952
|
+
result.trace.info("[JOB]: Skip because job condition was valid.")
|
953
|
+
return result.catch(status=SKIP)
|
954
|
+
|
838
955
|
event: Event = event or Event()
|
839
|
-
|
840
|
-
ls: str = "Fail-Fast" if fail_fast_flag else "All-Completed"
|
956
|
+
ls: str = "Fail-Fast" if job.strategy.fail_fast else "All-Completed"
|
841
957
|
workers: int = job.strategy.max_parallel
|
958
|
+
strategies: list[DictStr] = job.strategy.make()
|
959
|
+
len_strategy: int = len(strategies)
|
842
960
|
result.trace.info(
|
843
|
-
f"[JOB]:
|
961
|
+
f"[JOB]: ... Mode {ls}: {job.id!r} with {workers} "
|
844
962
|
f"worker{'s' if workers > 1 else ''}."
|
845
963
|
)
|
846
964
|
|
@@ -848,17 +966,14 @@ def local_execute(
|
|
848
966
|
return result.catch(
|
849
967
|
status=CANCEL,
|
850
968
|
context={
|
851
|
-
"errors":
|
852
|
-
"
|
969
|
+
"errors": JobCancelError(
|
970
|
+
"Execution was canceled from the event before start "
|
853
971
|
"local job execution."
|
854
972
|
).to_dict()
|
855
973
|
},
|
856
974
|
)
|
857
975
|
|
858
|
-
with ThreadPoolExecutor(
|
859
|
-
max_workers=workers, thread_name_prefix="job_strategy_exec_"
|
860
|
-
) as executor:
|
861
|
-
|
976
|
+
with ThreadPoolExecutor(workers, "jb_stg") as executor:
|
862
977
|
futures: list[Future] = [
|
863
978
|
executor.submit(
|
864
979
|
local_execute_strategy,
|
@@ -868,50 +983,58 @@ def local_execute(
|
|
868
983
|
result=result,
|
869
984
|
event=event,
|
870
985
|
)
|
871
|
-
for strategy in
|
986
|
+
for strategy in strategies
|
872
987
|
]
|
873
988
|
|
874
989
|
context: DictData = {}
|
875
|
-
|
990
|
+
statuses: list[Status] = [WAIT] * len_strategy
|
991
|
+
fail_fast: bool = False
|
876
992
|
|
877
|
-
if not
|
993
|
+
if not job.strategy.fail_fast:
|
878
994
|
done: Iterator[Future] = as_completed(futures)
|
879
995
|
else:
|
880
996
|
done, not_done = wait(futures, return_when=FIRST_EXCEPTION)
|
881
997
|
if len(list(done)) != len(futures):
|
882
998
|
result.trace.warning(
|
883
|
-
"[JOB]:
|
999
|
+
"[JOB]: Set the event for stop pending job-execution."
|
884
1000
|
)
|
885
1001
|
event.set()
|
886
1002
|
for future in not_done:
|
887
1003
|
future.cancel()
|
888
|
-
time.sleep(0.075)
|
889
1004
|
|
890
|
-
|
891
|
-
(
|
892
|
-
|
893
|
-
|
1005
|
+
time.sleep(0.025)
|
1006
|
+
nd: str = (
|
1007
|
+
(
|
1008
|
+
f", {len(not_done)} strateg"
|
1009
|
+
f"{'ies' if len(not_done) > 1 else 'y'} not run!!!"
|
1010
|
+
)
|
1011
|
+
if not_done
|
1012
|
+
else ""
|
894
1013
|
)
|
895
|
-
|
896
|
-
|
897
|
-
|
898
|
-
result.trace.debug(f"[JOB]: ... Job was set Fail-Fast{nd}")
|
899
|
-
done: Iterator[Future] = as_completed(futures)
|
1014
|
+
result.trace.debug(f"[JOB]: ... Job was set Fail-Fast{nd}")
|
1015
|
+
done: Iterator[Future] = as_completed(futures)
|
1016
|
+
fail_fast: bool = True
|
900
1017
|
|
901
|
-
for future in done:
|
1018
|
+
for i, future in enumerate(done, start=0):
|
902
1019
|
try:
|
903
|
-
future.result()
|
904
|
-
except
|
905
|
-
|
1020
|
+
statuses[i] = future.result().status
|
1021
|
+
except JobError as e:
|
1022
|
+
statuses[i] = get_status_from_error(e)
|
906
1023
|
result.trace.error(
|
907
|
-
f"[JOB]: {ls} Error Handler:||{e.__class__.__name__}
|
1024
|
+
f"[JOB]: {ls} Error Handler:||{e.__class__.__name__}: {e}"
|
908
1025
|
)
|
909
|
-
if
|
910
|
-
context
|
911
|
-
else:
|
912
|
-
context["errors"] = e.to_dict(with_refs=True)
|
1026
|
+
if not isinstance(e, JobSkipError):
|
1027
|
+
mark_errors(context, e)
|
913
1028
|
except CancelledError:
|
914
1029
|
pass
|
1030
|
+
|
1031
|
+
status: Status = validate_statuses(statuses)
|
1032
|
+
|
1033
|
+
# NOTE: Prepare status because it does not cancel from parent event but
|
1034
|
+
# cancel from failed item execution.
|
1035
|
+
if fail_fast and status == CANCEL:
|
1036
|
+
status = FAILED
|
1037
|
+
|
915
1038
|
return result.catch(status=status, context=context)
|
916
1039
|
|
917
1040
|
|
@@ -943,12 +1066,14 @@ def self_hosted_execute(
|
|
943
1066
|
extras=job.extras,
|
944
1067
|
)
|
945
1068
|
|
1069
|
+
result.trace.info("[JOB]: Start self-hosted executor.")
|
1070
|
+
|
946
1071
|
if event and event.is_set():
|
947
1072
|
return result.catch(
|
948
1073
|
status=CANCEL,
|
949
1074
|
context={
|
950
|
-
"errors":
|
951
|
-
"
|
1075
|
+
"errors": JobCancelError(
|
1076
|
+
"Execution was canceled from the event before start "
|
952
1077
|
"self-hosted execution."
|
953
1078
|
).to_dict()
|
954
1079
|
},
|
@@ -970,8 +1095,8 @@ def self_hosted_execute(
|
|
970
1095
|
return result.catch(status=FAILED, context={"errors": to_dict(e)})
|
971
1096
|
|
972
1097
|
if resp.status_code != 200:
|
973
|
-
raise
|
974
|
-
f"Job execution error from
|
1098
|
+
raise JobError(
|
1099
|
+
f"Job execution got error response from self-hosted: "
|
975
1100
|
f"{job.runs_on.args.host!r}"
|
976
1101
|
)
|
977
1102
|
|
@@ -1018,12 +1143,15 @@ def azure_batch_execute(
|
|
1018
1143
|
id_logic=(job.id or "EMPTY"),
|
1019
1144
|
extras=job.extras,
|
1020
1145
|
)
|
1146
|
+
|
1147
|
+
result.trace.info("[JOB]: Start Azure Batch executor.")
|
1148
|
+
|
1021
1149
|
if event and event.is_set():
|
1022
1150
|
return result.catch(
|
1023
1151
|
status=CANCEL,
|
1024
1152
|
context={
|
1025
|
-
"errors":
|
1026
|
-
"
|
1153
|
+
"errors": JobCancelError(
|
1154
|
+
"Execution was canceled from the event before start "
|
1027
1155
|
"azure-batch execution."
|
1028
1156
|
).to_dict()
|
1029
1157
|
},
|
@@ -1053,13 +1181,16 @@ def docker_execution(
|
|
1053
1181
|
id_logic=(job.id or "EMPTY"),
|
1054
1182
|
extras=job.extras,
|
1055
1183
|
)
|
1184
|
+
|
1185
|
+
result.trace.info("[JOB]: Start Docker executor.")
|
1186
|
+
|
1056
1187
|
if event and event.is_set():
|
1057
1188
|
return result.catch(
|
1058
1189
|
status=CANCEL,
|
1059
1190
|
context={
|
1060
|
-
"errors":
|
1061
|
-
"
|
1062
|
-
"
|
1191
|
+
"errors": JobCancelError(
|
1192
|
+
"Execution was canceled from the event before start "
|
1193
|
+
"start docker execution."
|
1063
1194
|
).to_dict()
|
1064
1195
|
},
|
1065
1196
|
)
|