fluidattacks_batch_client 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- fluidattacks_batch_client/__init__.py +9 -0
- fluidattacks_batch_client/_cli.py +171 -0
- fluidattacks_batch_client/_logger.py +17 -0
- fluidattacks_batch_client/_utils.py +102 -0
- fluidattacks_batch_client/api/__init__.py +11 -0
- fluidattacks_batch_client/api/_client_1/__init__.py +36 -0
- fluidattacks_batch_client/api/_client_1/_get_command.py +68 -0
- fluidattacks_batch_client/api/_client_1/_list_jobs.py +123 -0
- fluidattacks_batch_client/api/_client_1/_send_job.py +189 -0
- fluidattacks_batch_client/api/_core.py +33 -0
- fluidattacks_batch_client/core.py +310 -0
- fluidattacks_batch_client/decode.py +175 -0
- fluidattacks_batch_client/py.typed +0 -0
- fluidattacks_batch_client/sender/__init__.py +15 -0
- fluidattacks_batch_client/sender/_core.py +26 -0
- fluidattacks_batch_client/sender/_send.py +87 -0
- fluidattacks_batch_client-0.1.0.dist-info/METADATA +9 -0
- fluidattacks_batch_client-0.1.0.dist-info/RECORD +20 -0
- fluidattacks_batch_client-0.1.0.dist-info/WHEEL +4 -0
- fluidattacks_batch_client-0.1.0.dist-info/entry_points.txt +3 -0
|
@@ -0,0 +1,189 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
from mypy_boto3_batch.type_defs import (
|
|
4
|
+
ContainerOverridesTypeDef,
|
|
5
|
+
SubmitJobRequestTypeDef,
|
|
6
|
+
)
|
|
7
|
+
|
|
8
|
+
from fluidattacks_batch_client import (
|
|
9
|
+
_utils,
|
|
10
|
+
)
|
|
11
|
+
from fluidattacks_batch_client.core import (
|
|
12
|
+
ContainerOverride,
|
|
13
|
+
DependentJobRequest,
|
|
14
|
+
EnvVars,
|
|
15
|
+
JobArn,
|
|
16
|
+
JobDefOverride,
|
|
17
|
+
JobDependencies,
|
|
18
|
+
JobId,
|
|
19
|
+
ResourceRequirement,
|
|
20
|
+
)
|
|
21
|
+
from fa_purity import (
|
|
22
|
+
Cmd,
|
|
23
|
+
NewFrozenList,
|
|
24
|
+
PureIterFactory,
|
|
25
|
+
)
|
|
26
|
+
import logging
|
|
27
|
+
|
|
28
|
+
from typing import (
|
|
29
|
+
TYPE_CHECKING,
|
|
30
|
+
List,
|
|
31
|
+
Tuple,
|
|
32
|
+
)
|
|
33
|
+
|
|
34
|
+
if TYPE_CHECKING:
|
|
35
|
+
from mypy_boto3_batch.client import (
|
|
36
|
+
BatchClient,
|
|
37
|
+
)
|
|
38
|
+
from mypy_boto3_batch.type_defs import (
|
|
39
|
+
JobDependencyTypeDef,
|
|
40
|
+
KeyValuePairTypeDef,
|
|
41
|
+
ResourceRequirementTypeDef,
|
|
42
|
+
SubmitJobResponseTypeDef,
|
|
43
|
+
)
|
|
44
|
+
LOG = logging.getLogger(__name__)
|
|
45
|
+
|
|
46
|
+
|
|
47
|
+
def _encode_req(req: ResourceRequirement) -> ResourceRequirementTypeDef:
|
|
48
|
+
return {
|
|
49
|
+
"type": req.resource.value,
|
|
50
|
+
"value": _utils.int_to_str(req.value.to_int),
|
|
51
|
+
}
|
|
52
|
+
|
|
53
|
+
|
|
54
|
+
def _to_pair(key: str, val: str) -> KeyValuePairTypeDef:
|
|
55
|
+
return {
|
|
56
|
+
"name": key,
|
|
57
|
+
"value": val,
|
|
58
|
+
}
|
|
59
|
+
|
|
60
|
+
|
|
61
|
+
def _encode_env_vars(environment: EnvVars) -> NewFrozenList[KeyValuePairTypeDef]:
|
|
62
|
+
return NewFrozenList(tuple(environment.items.items())).map(lambda t: _to_pair(*t))
|
|
63
|
+
|
|
64
|
+
|
|
65
|
+
def _decode_respose(
|
|
66
|
+
response: SubmitJobResponseTypeDef,
|
|
67
|
+
) -> Tuple[JobId, JobArn]:
|
|
68
|
+
return (JobId(response["jobId"]), JobArn(response["jobArn"]))
|
|
69
|
+
|
|
70
|
+
|
|
71
|
+
def _encode_deps(deps: JobDependencies) -> List[JobDependencyTypeDef]:
|
|
72
|
+
def _transform(_id: JobId) -> JobDependencyTypeDef:
|
|
73
|
+
return {"jobId": _id.raw}
|
|
74
|
+
|
|
75
|
+
result = PureIterFactory.from_list(tuple(deps.items)).map(_transform).to_list()
|
|
76
|
+
return list(result)
|
|
77
|
+
|
|
78
|
+
|
|
79
|
+
def _apply_container_overrides(
|
|
80
|
+
override: ContainerOverride,
|
|
81
|
+
) -> ContainerOverridesTypeDef:
|
|
82
|
+
def _merge(
|
|
83
|
+
base: ContainerOverridesTypeDef, override: ContainerOverridesTypeDef
|
|
84
|
+
) -> ContainerOverridesTypeDef:
|
|
85
|
+
return ContainerOverridesTypeDef({**base, **override})
|
|
86
|
+
|
|
87
|
+
empty: ContainerOverridesTypeDef = {}
|
|
88
|
+
command_override: ContainerOverridesTypeDef = override.command.map(
|
|
89
|
+
lambda c: ContainerOverridesTypeDef({"command": c.raw})
|
|
90
|
+
).value_or(empty)
|
|
91
|
+
environment_override: ContainerOverridesTypeDef = override.environment.map(
|
|
92
|
+
lambda e: ContainerOverridesTypeDef({"environment": _encode_env_vars(e).items})
|
|
93
|
+
).value_or(empty)
|
|
94
|
+
resources_override: ContainerOverridesTypeDef = override.resources.map(
|
|
95
|
+
lambda r: ContainerOverridesTypeDef(
|
|
96
|
+
{"resourceRequirements": r.map(_encode_req).items}
|
|
97
|
+
)
|
|
98
|
+
).value_or(empty)
|
|
99
|
+
overrides = NewFrozenList[ContainerOverridesTypeDef].new(
|
|
100
|
+
command_override,
|
|
101
|
+
environment_override,
|
|
102
|
+
resources_override,
|
|
103
|
+
)
|
|
104
|
+
return PureIterFactory.from_list(
|
|
105
|
+
overrides.items,
|
|
106
|
+
).reduce(lambda b, c: _merge(b, c), empty)
|
|
107
|
+
|
|
108
|
+
|
|
109
|
+
def _apply_overrides(
|
|
110
|
+
base: SubmitJobRequestTypeDef, override: JobDefOverride
|
|
111
|
+
) -> SubmitJobRequestTypeDef:
|
|
112
|
+
def _merge(
|
|
113
|
+
base: SubmitJobRequestTypeDef, override: SubmitJobRequestTypeDef
|
|
114
|
+
) -> SubmitJobRequestTypeDef:
|
|
115
|
+
return SubmitJobRequestTypeDef({**base, **override})
|
|
116
|
+
|
|
117
|
+
container_override: SubmitJobRequestTypeDef = override.container.map(
|
|
118
|
+
lambda c: SubmitJobRequestTypeDef(
|
|
119
|
+
{**base, "containerOverrides": _apply_container_overrides(c)}
|
|
120
|
+
)
|
|
121
|
+
).value_or(base)
|
|
122
|
+
timeout_override: SubmitJobRequestTypeDef = override.timeout.map(
|
|
123
|
+
lambda t: SubmitJobRequestTypeDef(
|
|
124
|
+
{
|
|
125
|
+
**base,
|
|
126
|
+
"timeout": {"attemptDurationSeconds": t.seconds.to_int},
|
|
127
|
+
}
|
|
128
|
+
)
|
|
129
|
+
).value_or(base)
|
|
130
|
+
attempts_override: SubmitJobRequestTypeDef = override.retries.map(
|
|
131
|
+
lambda r: SubmitJobRequestTypeDef(
|
|
132
|
+
{
|
|
133
|
+
**base,
|
|
134
|
+
"retryStrategy": {"attempts": r.maximum.to_int},
|
|
135
|
+
}
|
|
136
|
+
)
|
|
137
|
+
).value_or(base)
|
|
138
|
+
tags_override: SubmitJobRequestTypeDef = override.tags.map(
|
|
139
|
+
lambda t: SubmitJobRequestTypeDef(
|
|
140
|
+
{
|
|
141
|
+
**base,
|
|
142
|
+
"tags": dict(t.items),
|
|
143
|
+
}
|
|
144
|
+
)
|
|
145
|
+
).value_or(base)
|
|
146
|
+
propagate_override: SubmitJobRequestTypeDef = override.propagate_tags.map(
|
|
147
|
+
lambda p: SubmitJobRequestTypeDef(
|
|
148
|
+
{
|
|
149
|
+
**base,
|
|
150
|
+
"propagateTags": p,
|
|
151
|
+
}
|
|
152
|
+
)
|
|
153
|
+
).value_or(base)
|
|
154
|
+
overrides = NewFrozenList[SubmitJobRequestTypeDef].new(
|
|
155
|
+
container_override,
|
|
156
|
+
timeout_override,
|
|
157
|
+
attempts_override,
|
|
158
|
+
tags_override,
|
|
159
|
+
propagate_override,
|
|
160
|
+
)
|
|
161
|
+
return PureIterFactory.from_list(
|
|
162
|
+
overrides.items,
|
|
163
|
+
).reduce(lambda b, c: _merge(b, c), base)
|
|
164
|
+
|
|
165
|
+
|
|
166
|
+
def send_job(
|
|
167
|
+
client: BatchClient, job_request: DependentJobRequest
|
|
168
|
+
) -> Cmd[Tuple[JobId, JobArn]]:
|
|
169
|
+
job = job_request.job
|
|
170
|
+
|
|
171
|
+
def _action() -> Tuple[JobId, JobArn]:
|
|
172
|
+
LOG.info("Submiting job: %s", job.name.raw)
|
|
173
|
+
args: SubmitJobRequestTypeDef = {
|
|
174
|
+
"arrayProperties": {"size": job.parallel.size.to_int}
|
|
175
|
+
if job.parallel.size.to_int > 1
|
|
176
|
+
else {},
|
|
177
|
+
"jobDefinition": job.job_def.raw,
|
|
178
|
+
"jobName": job.name.raw,
|
|
179
|
+
"jobQueue": job.queue.raw,
|
|
180
|
+
"dependsOn": job_request.dependencies.map(_encode_deps).value_or([]),
|
|
181
|
+
}
|
|
182
|
+
response = client.submit_job(
|
|
183
|
+
**job.override.map(lambda o: _apply_overrides(args, o)).value_or(args)
|
|
184
|
+
)
|
|
185
|
+
result = _decode_respose(response)
|
|
186
|
+
LOG.info("Job sent! id=%s arn=%s", result[0].raw, result[1].raw)
|
|
187
|
+
return result
|
|
188
|
+
|
|
189
|
+
return Cmd.wrap_impure(_action)
|
|
@@ -0,0 +1,33 @@
|
|
|
1
|
+
from fluidattacks_batch_client.core import (
|
|
2
|
+
BatchJobObj,
|
|
3
|
+
Command,
|
|
4
|
+
DependentJobRequest,
|
|
5
|
+
JobArn,
|
|
6
|
+
JobDefinitionName,
|
|
7
|
+
JobId,
|
|
8
|
+
JobName,
|
|
9
|
+
JobStatus,
|
|
10
|
+
QueueName,
|
|
11
|
+
)
|
|
12
|
+
from collections.abc import (
|
|
13
|
+
Callable,
|
|
14
|
+
)
|
|
15
|
+
from dataclasses import (
|
|
16
|
+
dataclass,
|
|
17
|
+
)
|
|
18
|
+
from fa_purity import (
|
|
19
|
+
Cmd,
|
|
20
|
+
Maybe,
|
|
21
|
+
ResultE,
|
|
22
|
+
Stream,
|
|
23
|
+
)
|
|
24
|
+
from typing import (
|
|
25
|
+
Tuple,
|
|
26
|
+
)
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
@dataclass(frozen=True)
|
|
30
|
+
class ApiClient:
|
|
31
|
+
list_jobs: Callable[[JobName, QueueName, frozenset[JobStatus]], Stream[BatchJobObj]]
|
|
32
|
+
send_job: Callable[[DependentJobRequest], Cmd[Tuple[JobId, JobArn]]]
|
|
33
|
+
get_job_def_command: Callable[[JobDefinitionName], Cmd[ResultE[Maybe[Command]]]]
|
|
@@ -0,0 +1,310 @@
|
|
|
1
|
+
from __future__ import (
|
|
2
|
+
annotations,
|
|
3
|
+
)
|
|
4
|
+
|
|
5
|
+
from fluidattacks_batch_client import (
|
|
6
|
+
_utils,
|
|
7
|
+
)
|
|
8
|
+
from dataclasses import (
|
|
9
|
+
dataclass,
|
|
10
|
+
field,
|
|
11
|
+
)
|
|
12
|
+
from enum import (
|
|
13
|
+
Enum,
|
|
14
|
+
)
|
|
15
|
+
from fa_purity import (
|
|
16
|
+
Cmd,
|
|
17
|
+
FrozenDict,
|
|
18
|
+
FrozenList,
|
|
19
|
+
Maybe,
|
|
20
|
+
NewFrozenList,
|
|
21
|
+
Result,
|
|
22
|
+
ResultE,
|
|
23
|
+
PureIterFactory,
|
|
24
|
+
PureIterTransform,
|
|
25
|
+
Unsafe,
|
|
26
|
+
)
|
|
27
|
+
import os
|
|
28
|
+
from typing import (
|
|
29
|
+
FrozenSet,
|
|
30
|
+
)
|
|
31
|
+
|
|
32
|
+
from fluidattacks_batch_client._utils import LibraryBug
|
|
33
|
+
|
|
34
|
+
|
|
35
|
+
@dataclass(frozen=True)
|
|
36
|
+
class Natural:
|
|
37
|
+
@dataclass(frozen=True)
|
|
38
|
+
class _Private:
|
|
39
|
+
pass
|
|
40
|
+
|
|
41
|
+
_private: Natural._Private = field(repr=False, hash=False, compare=False)
|
|
42
|
+
to_int: int
|
|
43
|
+
|
|
44
|
+
@staticmethod
|
|
45
|
+
def assert_natural(raw: int) -> ResultE[Natural]:
|
|
46
|
+
if raw >= 0:
|
|
47
|
+
return Result.success(Natural(Natural._Private(), raw))
|
|
48
|
+
err = ValueError("The supplied integer is not a natural number")
|
|
49
|
+
return Result.failure(Exception(err))
|
|
50
|
+
|
|
51
|
+
@classmethod
|
|
52
|
+
def abs(cls, raw: int) -> Natural:
|
|
53
|
+
return (
|
|
54
|
+
cls.assert_natural(abs(raw))
|
|
55
|
+
.alt(LibraryBug)
|
|
56
|
+
.alt(Unsafe.raise_exception)
|
|
57
|
+
.to_union()
|
|
58
|
+
)
|
|
59
|
+
|
|
60
|
+
|
|
61
|
+
@dataclass(frozen=True)
|
|
62
|
+
class QueueName:
|
|
63
|
+
raw: str
|
|
64
|
+
|
|
65
|
+
|
|
66
|
+
@dataclass(frozen=True)
|
|
67
|
+
class Attempts:
|
|
68
|
+
@dataclass(frozen=True)
|
|
69
|
+
class _Private:
|
|
70
|
+
pass
|
|
71
|
+
|
|
72
|
+
_private: Attempts._Private = field(repr=False, hash=False, compare=False)
|
|
73
|
+
maximum: Natural
|
|
74
|
+
|
|
75
|
+
@staticmethod
|
|
76
|
+
def new(raw: Natural) -> ResultE[Attempts]:
|
|
77
|
+
if raw.to_int <= 10:
|
|
78
|
+
return Result.success(Attempts(Attempts._Private(), raw))
|
|
79
|
+
err = ValueError("Attempts must be a Natural <= 10")
|
|
80
|
+
return Result.failure(Exception(err))
|
|
81
|
+
|
|
82
|
+
|
|
83
|
+
@dataclass(frozen=True)
|
|
84
|
+
class Timeout:
|
|
85
|
+
@dataclass(frozen=True)
|
|
86
|
+
class _Private:
|
|
87
|
+
pass
|
|
88
|
+
|
|
89
|
+
_private: Timeout._Private = field(repr=False, hash=False, compare=False)
|
|
90
|
+
seconds: Natural
|
|
91
|
+
|
|
92
|
+
@staticmethod
|
|
93
|
+
def new(raw: Natural) -> ResultE[Timeout]:
|
|
94
|
+
if raw.to_int >= 60:
|
|
95
|
+
return Result.success(Timeout(Timeout._Private(), raw))
|
|
96
|
+
err = ValueError("Timeout must be a Natural >= 60")
|
|
97
|
+
return Result.failure(Exception(err))
|
|
98
|
+
|
|
99
|
+
|
|
100
|
+
@dataclass(frozen=True)
|
|
101
|
+
class Command:
|
|
102
|
+
raw: FrozenList[str]
|
|
103
|
+
|
|
104
|
+
|
|
105
|
+
@dataclass(frozen=True)
|
|
106
|
+
class JobDefinitionName:
|
|
107
|
+
raw: str
|
|
108
|
+
|
|
109
|
+
|
|
110
|
+
@dataclass(frozen=True)
|
|
111
|
+
class EnvVarPointer:
|
|
112
|
+
name: str
|
|
113
|
+
|
|
114
|
+
def get_value(self) -> Cmd[Maybe[str]]:
|
|
115
|
+
return Cmd.wrap_impure(lambda: Maybe.from_optional(os.environ.get(self.name)))
|
|
116
|
+
|
|
117
|
+
|
|
118
|
+
class ResourceType(Enum):
|
|
119
|
+
VCPU = "VCPU"
|
|
120
|
+
MEMORY = "MEMORY"
|
|
121
|
+
|
|
122
|
+
@staticmethod
|
|
123
|
+
def to_req_type(raw: str) -> ResultE[ResourceType]:
|
|
124
|
+
return _utils.handle_value_error(lambda: ResourceType(raw.upper()))
|
|
125
|
+
|
|
126
|
+
|
|
127
|
+
@dataclass(frozen=True)
|
|
128
|
+
class ResourceRequirement:
|
|
129
|
+
resource: ResourceType
|
|
130
|
+
value: Natural
|
|
131
|
+
|
|
132
|
+
|
|
133
|
+
@dataclass(frozen=True)
|
|
134
|
+
class Tags:
|
|
135
|
+
items: FrozenDict[str, str]
|
|
136
|
+
|
|
137
|
+
|
|
138
|
+
@dataclass(frozen=True)
|
|
139
|
+
class EnvVar:
|
|
140
|
+
name: str
|
|
141
|
+
value: str
|
|
142
|
+
|
|
143
|
+
|
|
144
|
+
@dataclass(frozen=True)
|
|
145
|
+
class EnvVars:
|
|
146
|
+
items: FrozenDict[str, str]
|
|
147
|
+
|
|
148
|
+
|
|
149
|
+
@dataclass(frozen=True)
|
|
150
|
+
class JobSize:
|
|
151
|
+
@dataclass(frozen=True)
|
|
152
|
+
class _Private:
|
|
153
|
+
pass
|
|
154
|
+
|
|
155
|
+
_private: JobSize._Private = field(repr=False, hash=False, compare=False)
|
|
156
|
+
size: Natural
|
|
157
|
+
|
|
158
|
+
@staticmethod
|
|
159
|
+
def new(raw: Natural) -> ResultE[JobSize]:
|
|
160
|
+
if raw.to_int >= 1 and raw.to_int <= 10000:
|
|
161
|
+
return Result.success(JobSize(JobSize._Private(), raw))
|
|
162
|
+
err = ValueError("JobSize must be a Natural between 1 and 10000")
|
|
163
|
+
return Result.failure(Exception(err))
|
|
164
|
+
|
|
165
|
+
|
|
166
|
+
class JobStatus(Enum):
|
|
167
|
+
SUBMITTED = "SUBMITTED"
|
|
168
|
+
PENDING = "PENDING"
|
|
169
|
+
RUNNABLE = "RUNNABLE"
|
|
170
|
+
STARTING = "STARTING"
|
|
171
|
+
RUNNING = "RUNNING"
|
|
172
|
+
SUCCEEDED = "SUCCEEDED"
|
|
173
|
+
FAILED = "FAILED"
|
|
174
|
+
|
|
175
|
+
@staticmethod
|
|
176
|
+
def to_status(raw: str) -> ResultE[JobStatus]:
|
|
177
|
+
return _utils.handle_value_error(lambda: JobStatus(raw.upper()))
|
|
178
|
+
|
|
179
|
+
|
|
180
|
+
@dataclass(frozen=True)
|
|
181
|
+
class JobName:
|
|
182
|
+
@dataclass(frozen=True)
|
|
183
|
+
class _Private:
|
|
184
|
+
pass
|
|
185
|
+
|
|
186
|
+
_private: JobName._Private = field(repr=False, hash=False, compare=False)
|
|
187
|
+
raw: str
|
|
188
|
+
|
|
189
|
+
@staticmethod
|
|
190
|
+
def new(raw: str) -> ResultE[JobName]:
|
|
191
|
+
def _check(index: int, char: str) -> bool:
|
|
192
|
+
if index == 1:
|
|
193
|
+
return char.isalnum()
|
|
194
|
+
return char.isalnum() or char in ["_", "-"]
|
|
195
|
+
|
|
196
|
+
validation = (
|
|
197
|
+
PureIterFactory.from_list(tuple(raw)).enumerate(1).map(lambda t: _check(*t))
|
|
198
|
+
)
|
|
199
|
+
if len(raw) <= 128 and all(validation):
|
|
200
|
+
return Result.success(JobName(JobName._Private(), raw))
|
|
201
|
+
err = ValueError("JobName does not fulfill naming rules")
|
|
202
|
+
return Result.failure(Exception(err))
|
|
203
|
+
|
|
204
|
+
@staticmethod
|
|
205
|
+
def normalize(raw: str) -> JobName:
|
|
206
|
+
def _normalize(index: int, char: str) -> str:
|
|
207
|
+
if index == 1 and not char.isalnum():
|
|
208
|
+
return "X"
|
|
209
|
+
if char.isalnum() or char in ["_"]:
|
|
210
|
+
return char
|
|
211
|
+
else:
|
|
212
|
+
return "-"
|
|
213
|
+
|
|
214
|
+
text = (
|
|
215
|
+
PureIterFactory.from_list(tuple(raw))
|
|
216
|
+
.enumerate(1)
|
|
217
|
+
.map(lambda t: (t[0], _normalize(*t)))
|
|
218
|
+
)
|
|
219
|
+
truncated = PureIterTransform.until_none(
|
|
220
|
+
text.map(lambda t: t[1] if t[0] <= 128 else None)
|
|
221
|
+
)
|
|
222
|
+
return JobName(JobName._Private(), "".join(truncated))
|
|
223
|
+
|
|
224
|
+
|
|
225
|
+
@dataclass(frozen=True)
|
|
226
|
+
class JobId:
|
|
227
|
+
raw: str
|
|
228
|
+
|
|
229
|
+
|
|
230
|
+
@dataclass(frozen=True)
|
|
231
|
+
class JobArn:
|
|
232
|
+
raw: str
|
|
233
|
+
|
|
234
|
+
|
|
235
|
+
@dataclass(frozen=True)
|
|
236
|
+
class JobDependencies:
|
|
237
|
+
@dataclass(frozen=True)
|
|
238
|
+
class _Private:
|
|
239
|
+
pass
|
|
240
|
+
|
|
241
|
+
_private: JobDependencies._Private = field(repr=False, hash=False, compare=False)
|
|
242
|
+
items: FrozenSet[JobId]
|
|
243
|
+
|
|
244
|
+
@staticmethod
|
|
245
|
+
def new(items: FrozenSet[JobId]) -> ResultE[JobDependencies]:
|
|
246
|
+
if len(items) >= 1 and len(items) <= 20:
|
|
247
|
+
return Result.success(JobDependencies(JobDependencies._Private(), items))
|
|
248
|
+
err = ValueError("The maximun number of dependencies for a job is 20")
|
|
249
|
+
return Result.failure(Exception(err))
|
|
250
|
+
|
|
251
|
+
|
|
252
|
+
@dataclass(frozen=True)
|
|
253
|
+
class BatchJob:
|
|
254
|
+
created_at: int
|
|
255
|
+
status: JobStatus
|
|
256
|
+
status_reason: Maybe[str]
|
|
257
|
+
started_at: Maybe[int]
|
|
258
|
+
stoped_at: Maybe[int]
|
|
259
|
+
|
|
260
|
+
|
|
261
|
+
@dataclass(frozen=True)
|
|
262
|
+
class BatchJobObj:
|
|
263
|
+
job_id: JobId
|
|
264
|
+
arn: JobArn
|
|
265
|
+
name: JobName
|
|
266
|
+
job: BatchJob
|
|
267
|
+
|
|
268
|
+
|
|
269
|
+
@dataclass(frozen=True)
|
|
270
|
+
class ContainerOverride:
|
|
271
|
+
command: Maybe[Command]
|
|
272
|
+
environment: Maybe[EnvVars]
|
|
273
|
+
resources: Maybe[NewFrozenList[ResourceRequirement]]
|
|
274
|
+
|
|
275
|
+
|
|
276
|
+
@dataclass(frozen=True)
|
|
277
|
+
class JobDefOverride:
|
|
278
|
+
retries: Maybe[Attempts]
|
|
279
|
+
timeout: Maybe[Timeout]
|
|
280
|
+
container: Maybe[ContainerOverride]
|
|
281
|
+
tags: Maybe[Tags]
|
|
282
|
+
propagate_tags: Maybe[bool]
|
|
283
|
+
|
|
284
|
+
|
|
285
|
+
@dataclass(frozen=True)
|
|
286
|
+
class JobRequest:
|
|
287
|
+
name: JobName
|
|
288
|
+
job_def: JobDefinitionName
|
|
289
|
+
queue: QueueName
|
|
290
|
+
parallel: JobSize
|
|
291
|
+
override: Maybe[JobDefOverride]
|
|
292
|
+
|
|
293
|
+
|
|
294
|
+
@dataclass(frozen=True)
|
|
295
|
+
class DependentJobRequest:
|
|
296
|
+
job: JobRequest
|
|
297
|
+
dependencies: Maybe[JobDependencies]
|
|
298
|
+
|
|
299
|
+
|
|
300
|
+
@dataclass(frozen=True)
|
|
301
|
+
class AllowDuplicates:
|
|
302
|
+
value: bool
|
|
303
|
+
|
|
304
|
+
|
|
305
|
+
@dataclass(frozen=True)
|
|
306
|
+
class JobPipeline:
|
|
307
|
+
jobs: NewFrozenList[JobRequest]
|
|
308
|
+
|
|
309
|
+
def __repr__(self) -> str:
|
|
310
|
+
return f"JobPipeline(drafts={self.jobs.items})"
|
|
@@ -0,0 +1,175 @@
|
|
|
1
|
+
from __future__ import (
|
|
2
|
+
annotations,
|
|
3
|
+
)
|
|
4
|
+
from dataclasses import dataclass
|
|
5
|
+
|
|
6
|
+
from fluidattacks_batch_client.core import (
|
|
7
|
+
Attempts,
|
|
8
|
+
Command,
|
|
9
|
+
ContainerOverride,
|
|
10
|
+
EnvVar,
|
|
11
|
+
EnvVars,
|
|
12
|
+
JobDefOverride,
|
|
13
|
+
JobDefinitionName,
|
|
14
|
+
JobRequest,
|
|
15
|
+
JobName,
|
|
16
|
+
JobPipeline,
|
|
17
|
+
JobSize,
|
|
18
|
+
QueueName,
|
|
19
|
+
Natural,
|
|
20
|
+
ResourceRequirement,
|
|
21
|
+
ResourceType,
|
|
22
|
+
Tags,
|
|
23
|
+
Timeout,
|
|
24
|
+
)
|
|
25
|
+
from fa_purity import (
|
|
26
|
+
FrozenDict,
|
|
27
|
+
NewFrozenList,
|
|
28
|
+
Result,
|
|
29
|
+
ResultE,
|
|
30
|
+
ResultSmash,
|
|
31
|
+
ResultTransform,
|
|
32
|
+
)
|
|
33
|
+
from fa_purity.json import (
|
|
34
|
+
JsonObj,
|
|
35
|
+
JsonPrimitiveUnfolder,
|
|
36
|
+
JsonUnfolder,
|
|
37
|
+
JsonValue,
|
|
38
|
+
Unfolder,
|
|
39
|
+
)
|
|
40
|
+
|
|
41
|
+
|
|
42
|
+
def _to_str(raw: JsonValue) -> ResultE[str]:
|
|
43
|
+
return Unfolder.to_primitive(raw).bind(JsonPrimitiveUnfolder.to_str)
|
|
44
|
+
|
|
45
|
+
|
|
46
|
+
def _to_int(raw: JsonValue) -> ResultE[int]:
|
|
47
|
+
return Unfolder.to_primitive(raw).bind(JsonPrimitiveUnfolder.to_int)
|
|
48
|
+
|
|
49
|
+
|
|
50
|
+
def _to_bool(raw: JsonValue) -> ResultE[bool]:
|
|
51
|
+
return Unfolder.to_primitive(raw).bind(JsonPrimitiveUnfolder.to_bool)
|
|
52
|
+
|
|
53
|
+
|
|
54
|
+
@dataclass(frozen=True)
|
|
55
|
+
class JobDefDecoder:
|
|
56
|
+
external_env_vars: FrozenDict[str, str]
|
|
57
|
+
|
|
58
|
+
def _get_env_var(self, name: str) -> ResultE[str]:
|
|
59
|
+
try:
|
|
60
|
+
return Result.success(self.external_env_vars[name])
|
|
61
|
+
except KeyError:
|
|
62
|
+
return Result.failure(
|
|
63
|
+
KeyError(f"Environment variable `{name}` is not present")
|
|
64
|
+
)
|
|
65
|
+
|
|
66
|
+
def _decode_env_var(self, raw: JsonObj) -> ResultE[EnvVar]:
|
|
67
|
+
_name = JsonUnfolder.require(raw, "name", _to_str)
|
|
68
|
+
|
|
69
|
+
def _get_value(name: str) -> ResultE[str]:
|
|
70
|
+
return JsonUnfolder.optional(raw, "value", _to_str).bind(
|
|
71
|
+
lambda m: m.to_coproduct().map(
|
|
72
|
+
lambda s: Result.success(s),
|
|
73
|
+
lambda _: self._get_env_var(name),
|
|
74
|
+
)
|
|
75
|
+
)
|
|
76
|
+
|
|
77
|
+
return _name.bind(
|
|
78
|
+
lambda name: _get_value(name).map(lambda value: EnvVar(name, value))
|
|
79
|
+
)
|
|
80
|
+
|
|
81
|
+
def _decode_vars(self, raw: JsonValue) -> ResultE[EnvVars]:
|
|
82
|
+
return (
|
|
83
|
+
Unfolder.to_list_of(
|
|
84
|
+
raw, lambda v: Unfolder.to_json(v).bind(self._decode_env_var)
|
|
85
|
+
)
|
|
86
|
+
.map(NewFrozenList)
|
|
87
|
+
.map(lambda i: i.map(lambda e: (e.name, e.value)))
|
|
88
|
+
.map(lambda i: EnvVars(FrozenDict(dict(i))))
|
|
89
|
+
)
|
|
90
|
+
|
|
91
|
+
def _decode_resource_req(self, raw: JsonObj) -> ResultE[ResourceRequirement]:
|
|
92
|
+
_resource = JsonUnfolder.require(raw, "resource", _to_str).bind(
|
|
93
|
+
ResourceType.to_req_type
|
|
94
|
+
)
|
|
95
|
+
_value = JsonUnfolder.require(
|
|
96
|
+
raw, "value", lambda v: _to_int(v).bind(Natural.assert_natural)
|
|
97
|
+
)
|
|
98
|
+
return ResultSmash.smash_result_2(
|
|
99
|
+
_resource,
|
|
100
|
+
_value,
|
|
101
|
+
).map(lambda t: ResourceRequirement(*t))
|
|
102
|
+
|
|
103
|
+
def _decode_container_override(self, raw: JsonObj) -> ResultE[ContainerOverride]:
|
|
104
|
+
_command = JsonUnfolder.optional(
|
|
105
|
+
raw, "command", lambda v: Unfolder.to_list_of(v, _to_str).map(Command)
|
|
106
|
+
)
|
|
107
|
+
_environment = JsonUnfolder.optional(raw, "environment", self._decode_vars)
|
|
108
|
+
_resources = JsonUnfolder.optional(
|
|
109
|
+
raw,
|
|
110
|
+
"resources",
|
|
111
|
+
lambda v: Unfolder.to_list_of(
|
|
112
|
+
v, lambda i: Unfolder.to_json(i).bind(self._decode_resource_req)
|
|
113
|
+
).map(NewFrozenList),
|
|
114
|
+
)
|
|
115
|
+
return ResultSmash.smash_result_3(
|
|
116
|
+
_command,
|
|
117
|
+
_environment,
|
|
118
|
+
_resources,
|
|
119
|
+
).map(lambda t: ContainerOverride(*t))
|
|
120
|
+
|
|
121
|
+
def _decode_override(self, raw: JsonObj) -> ResultE[JobDefOverride]:
|
|
122
|
+
_retries = JsonUnfolder.optional(
|
|
123
|
+
raw,
|
|
124
|
+
"retries",
|
|
125
|
+
lambda v: _to_int(v).bind(Natural.assert_natural).bind(Attempts.new),
|
|
126
|
+
)
|
|
127
|
+
_timeout = JsonUnfolder.optional(
|
|
128
|
+
raw,
|
|
129
|
+
"timeout",
|
|
130
|
+
lambda v: _to_int(v).bind(Natural.assert_natural).bind(Timeout.new),
|
|
131
|
+
)
|
|
132
|
+
_container = JsonUnfolder.optional(
|
|
133
|
+
raw,
|
|
134
|
+
"container",
|
|
135
|
+
lambda v: Unfolder.to_json(v).bind(self._decode_container_override),
|
|
136
|
+
)
|
|
137
|
+
_tags = JsonUnfolder.optional(
|
|
138
|
+
raw, "tags", lambda v: Unfolder.to_dict_of(v, _to_str).map(Tags)
|
|
139
|
+
)
|
|
140
|
+
_propagate_tags = JsonUnfolder.optional(raw, "propagate_tags", _to_bool)
|
|
141
|
+
return ResultSmash.smash_result_5(
|
|
142
|
+
_retries,
|
|
143
|
+
_timeout,
|
|
144
|
+
_container,
|
|
145
|
+
_tags,
|
|
146
|
+
_propagate_tags,
|
|
147
|
+
).map(lambda t: JobDefOverride(*t))
|
|
148
|
+
|
|
149
|
+
def decode_job(self, raw: JsonObj) -> ResultE[JobRequest]:
|
|
150
|
+
_name = JsonUnfolder.require(raw, "name", _to_str).bind(JobName.new)
|
|
151
|
+
_job_def = JsonUnfolder.require(raw, "definition", _to_str).map(
|
|
152
|
+
JobDefinitionName
|
|
153
|
+
)
|
|
154
|
+
_queue = JsonUnfolder.require(raw, "queue", _to_str).map(QueueName)
|
|
155
|
+
_size = (
|
|
156
|
+
JsonUnfolder.require(raw, "parallel", _to_int)
|
|
157
|
+
.bind(Natural.assert_natural)
|
|
158
|
+
.bind(JobSize.new)
|
|
159
|
+
)
|
|
160
|
+
_override = JsonUnfolder.optional(
|
|
161
|
+
raw, "override", lambda v: Unfolder.to_json(v).bind(self._decode_override)
|
|
162
|
+
)
|
|
163
|
+
return ResultSmash.smash_result_5(
|
|
164
|
+
_name,
|
|
165
|
+
_job_def,
|
|
166
|
+
_queue,
|
|
167
|
+
_size,
|
|
168
|
+
_override,
|
|
169
|
+
).map(lambda t: JobRequest(*t))
|
|
170
|
+
|
|
171
|
+
def decode_pipeline(
|
|
172
|
+
self,
|
|
173
|
+
raw: NewFrozenList[JsonObj],
|
|
174
|
+
) -> ResultE[JobPipeline]:
|
|
175
|
+
return ResultTransform.all_ok_2(raw.map(self.decode_job)).map(JobPipeline)
|
|
File without changes
|