compose-runner 0.6.3rc1__py2.py3-none-any.whl → 0.6.4rc2__py2.py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- compose_runner/_version.py +2 -2
- compose_runner/aws_lambda/run_handler.py +71 -1
- compose_runner/ecs_task.py +3 -0
- compose_runner/tests/cassettes/test_lambda_handlers/test_select_task_size_uses_large_for_montecarlo.yaml +60 -0
- compose_runner/tests/cassettes/test_lambda_handlers/test_select_task_size_uses_standard_for_fdr.yaml +55 -0
- compose_runner/tests/test_lambda_handlers.py +51 -0
- {compose_runner-0.6.3rc1.dist-info → compose_runner-0.6.4rc2.dist-info}/METADATA +2 -2
- {compose_runner-0.6.3rc1.dist-info → compose_runner-0.6.4rc2.dist-info}/RECORD +11 -9
- {compose_runner-0.6.3rc1.dist-info → compose_runner-0.6.4rc2.dist-info}/WHEEL +0 -0
- {compose_runner-0.6.3rc1.dist-info → compose_runner-0.6.4rc2.dist-info}/entry_points.txt +0 -0
- {compose_runner-0.6.3rc1.dist-info → compose_runner-0.6.4rc2.dist-info}/licenses/LICENSE +0 -0
compose_runner/_version.py
CHANGED
|
@@ -28,7 +28,7 @@ version_tuple: VERSION_TUPLE
|
|
|
28
28
|
commit_id: COMMIT_ID
|
|
29
29
|
__commit_id__: COMMIT_ID
|
|
30
30
|
|
|
31
|
-
__version__ = version = '0.6.
|
|
32
|
-
__version_tuple__ = version_tuple = (0, 6,
|
|
31
|
+
__version__ = version = '0.6.4rc2'
|
|
32
|
+
__version_tuple__ = version_tuple = (0, 6, 4, 'rc2')
|
|
33
33
|
|
|
34
34
|
__commit_id__ = commit_id = None
|
|
@@ -4,6 +4,8 @@ import json
|
|
|
4
4
|
import logging
|
|
5
5
|
import os
|
|
6
6
|
import uuid
|
|
7
|
+
import urllib.error
|
|
8
|
+
import urllib.request
|
|
7
9
|
from typing import Any, Dict, Optional
|
|
8
10
|
|
|
9
11
|
import boto3
|
|
@@ -22,6 +24,8 @@ RESULTS_PREFIX_ENV = "RESULTS_PREFIX"
|
|
|
22
24
|
NSC_KEY_ENV = "NSC_KEY"
|
|
23
25
|
NV_KEY_ENV = "NV_KEY"
|
|
24
26
|
|
|
27
|
+
DEFAULT_TASK_SIZE = "standard"
|
|
28
|
+
|
|
25
29
|
|
|
26
30
|
def _log(job_id: str, message: str, **details: Any) -> None:
|
|
27
31
|
payload = {"job_id": job_id, "message": message, **details}
|
|
@@ -29,6 +33,67 @@ def _log(job_id: str, message: str, **details: Any) -> None:
|
|
|
29
33
|
logger.info(json.dumps(payload))
|
|
30
34
|
|
|
31
35
|
|
|
36
|
+
def _compose_api_base_url(environment: str) -> str:
|
|
37
|
+
env = (environment or "production").lower()
|
|
38
|
+
if env == "staging":
|
|
39
|
+
return "https://synth.neurostore.xyz/api"
|
|
40
|
+
if env == "local":
|
|
41
|
+
return "http://localhost:81/api"
|
|
42
|
+
return "https://compose.neurosynth.org/api"
|
|
43
|
+
|
|
44
|
+
|
|
45
|
+
def _fetch_meta_analysis(meta_analysis_id: str, environment: str) -> Optional[Dict[str, Any]]:
|
|
46
|
+
base_url = _compose_api_base_url(environment).rstrip("/")
|
|
47
|
+
url = f"{base_url}/meta-analyses/{meta_analysis_id}?nested=true"
|
|
48
|
+
request = urllib.request.Request(url, headers={"User-Agent": "compose-runner/submit"})
|
|
49
|
+
try:
|
|
50
|
+
with urllib.request.urlopen(request, timeout=10) as response:
|
|
51
|
+
return json.load(response)
|
|
52
|
+
except (urllib.error.URLError, urllib.error.HTTPError, json.JSONDecodeError) as exc:
|
|
53
|
+
logger.warning("Failed to fetch meta-analysis %s: %s", meta_analysis_id, exc)
|
|
54
|
+
return None
|
|
55
|
+
|
|
56
|
+
|
|
57
|
+
def _requires_large_task(specification: Dict[str, Any]) -> bool:
|
|
58
|
+
if not isinstance(specification, dict):
|
|
59
|
+
return False
|
|
60
|
+
corrector = specification.get("corrector")
|
|
61
|
+
if not isinstance(corrector, dict):
|
|
62
|
+
return False
|
|
63
|
+
if corrector.get("type") != "FWECorrector":
|
|
64
|
+
return False
|
|
65
|
+
args = corrector.get("args")
|
|
66
|
+
if not isinstance(args, dict):
|
|
67
|
+
return False
|
|
68
|
+
method = args.get("method")
|
|
69
|
+
if method is None:
|
|
70
|
+
kwargs = args.get("**kwargs")
|
|
71
|
+
if isinstance(kwargs, dict):
|
|
72
|
+
method = kwargs.get("method")
|
|
73
|
+
if isinstance(method, str) and method.lower() == "montecarlo":
|
|
74
|
+
return True
|
|
75
|
+
return False
|
|
76
|
+
|
|
77
|
+
|
|
78
|
+
def _select_task_size(meta_analysis_id: str, environment: str, artifact_prefix: str) -> str:
|
|
79
|
+
doc = _fetch_meta_analysis(meta_analysis_id, environment)
|
|
80
|
+
if not doc:
|
|
81
|
+
return DEFAULT_TASK_SIZE
|
|
82
|
+
specification = doc.get("specification")
|
|
83
|
+
try:
|
|
84
|
+
if _requires_large_task(specification):
|
|
85
|
+
_log(
|
|
86
|
+
artifact_prefix,
|
|
87
|
+
"workflow.task_size_selected",
|
|
88
|
+
task_size="large",
|
|
89
|
+
reason="montecarlo_fwe",
|
|
90
|
+
)
|
|
91
|
+
return "large"
|
|
92
|
+
except Exception as exc: # noqa: broad-except
|
|
93
|
+
logger.warning("Failed to evaluate specification for %s: %s", meta_analysis_id, exc)
|
|
94
|
+
return DEFAULT_TASK_SIZE
|
|
95
|
+
|
|
96
|
+
|
|
32
97
|
def _job_input(
|
|
33
98
|
payload: Dict[str, Any],
|
|
34
99
|
artifact_prefix: str,
|
|
@@ -36,6 +101,7 @@ def _job_input(
|
|
|
36
101
|
prefix: Optional[str],
|
|
37
102
|
nsc_key: Optional[str],
|
|
38
103
|
nv_key: Optional[str],
|
|
104
|
+
task_size: str,
|
|
39
105
|
) -> Dict[str, Any]:
|
|
40
106
|
no_upload_flag = bool(payload.get("no_upload", False))
|
|
41
107
|
doc: Dict[str, Any] = {
|
|
@@ -44,6 +110,7 @@ def _job_input(
|
|
|
44
110
|
"environment": payload.get("environment", "production"),
|
|
45
111
|
"no_upload": "true" if no_upload_flag else "false",
|
|
46
112
|
"results": {"bucket": bucket or "", "prefix": prefix or ""},
|
|
113
|
+
"task_size": task_size,
|
|
47
114
|
}
|
|
48
115
|
n_cores = payload.get("n_cores")
|
|
49
116
|
doc["n_cores"] = str(n_cores) if n_cores is not None else ""
|
|
@@ -76,7 +143,10 @@ def handler(event: Dict[str, Any], context: Any) -> Dict[str, Any]:
|
|
|
76
143
|
nsc_key = payload.get("nsc_key") or os.environ.get(NSC_KEY_ENV)
|
|
77
144
|
nv_key = payload.get("nv_key") or os.environ.get(NV_KEY_ENV)
|
|
78
145
|
|
|
79
|
-
|
|
146
|
+
environment = payload.get("environment", "production")
|
|
147
|
+
task_size = _select_task_size(payload["meta_analysis_id"], environment, artifact_prefix)
|
|
148
|
+
|
|
149
|
+
job_input = _job_input(payload, artifact_prefix, bucket, prefix, nsc_key, nv_key, task_size)
|
|
80
150
|
params = {
|
|
81
151
|
"stateMachineArn": os.environ[STATE_MACHINE_ARN_ENV],
|
|
82
152
|
"name": artifact_prefix,
|
compose_runner/ecs_task.py
CHANGED
|
@@ -93,6 +93,7 @@ def main() -> None:
|
|
|
93
93
|
nv_key = os.environ.get(NV_KEY_ENV) or None
|
|
94
94
|
no_upload = _bool_from_env(os.environ.get(NO_UPLOAD_ENV))
|
|
95
95
|
n_cores = _resolve_n_cores(os.environ.get(N_CORES_ENV))
|
|
96
|
+
compose_runner_version = os.environ.get("COMPOSE_RUNNER_VERSION", "unknown")
|
|
96
97
|
|
|
97
98
|
bucket = os.environ.get(RESULTS_BUCKET_ENV)
|
|
98
99
|
prefix = os.environ.get(RESULTS_PREFIX_ENV)
|
|
@@ -106,6 +107,7 @@ def main() -> None:
|
|
|
106
107
|
meta_analysis_id=meta_analysis_id,
|
|
107
108
|
environment=environment,
|
|
108
109
|
no_upload=no_upload,
|
|
110
|
+
compose_runner_version=compose_runner_version,
|
|
109
111
|
)
|
|
110
112
|
try:
|
|
111
113
|
url, _ = run_compose(
|
|
@@ -125,6 +127,7 @@ def main() -> None:
|
|
|
125
127
|
"result_url": url,
|
|
126
128
|
"artifacts_bucket": bucket,
|
|
127
129
|
"artifacts_prefix": prefix,
|
|
130
|
+
"compose_runner_version": compose_runner_version,
|
|
128
131
|
}
|
|
129
132
|
|
|
130
133
|
if bucket:
|
|
@@ -0,0 +1,60 @@
|
|
|
1
|
+
interactions:
|
|
2
|
+
- request:
|
|
3
|
+
method: GET
|
|
4
|
+
uri: https://synth.neurostore.xyz/api/meta-analyses/ZPSvyvhZAopz?nested=true
|
|
5
|
+
body: null
|
|
6
|
+
headers:
|
|
7
|
+
Accept:
|
|
8
|
+
- '*/*'
|
|
9
|
+
Accept-Encoding:
|
|
10
|
+
- gzip, deflate
|
|
11
|
+
Connection:
|
|
12
|
+
- keep-alive
|
|
13
|
+
User-Agent:
|
|
14
|
+
- python-requests/2.32.4
|
|
15
|
+
response:
|
|
16
|
+
status:
|
|
17
|
+
code: 200
|
|
18
|
+
message: OK
|
|
19
|
+
headers:
|
|
20
|
+
Server:
|
|
21
|
+
- nginx/1.21.6
|
|
22
|
+
Date:
|
|
23
|
+
- Tue, 21 Oct 2025 14:08:45 GMT
|
|
24
|
+
Content-Type:
|
|
25
|
+
- application/json
|
|
26
|
+
Transfer-Encoding:
|
|
27
|
+
- chunked
|
|
28
|
+
Connection:
|
|
29
|
+
- keep-alive
|
|
30
|
+
Vary:
|
|
31
|
+
- Accept-Encoding
|
|
32
|
+
Content-Encoding:
|
|
33
|
+
- gzip
|
|
34
|
+
Strict-Transport-Security:
|
|
35
|
+
- max-age=31536000
|
|
36
|
+
body:
|
|
37
|
+
string: '{"id": "ZPSvyvhZAopz", "created_at": "2025-10-21T04:57:40.236536+00:00",
|
|
38
|
+
"updated_at": null, "user": "github|12564882", "username": "James Kent", "name":
|
|
39
|
+
"Untitled MKDADensity Meta Analysis: included", "description": "MKDADensity
|
|
40
|
+
meta analysis with FWECorrector", "provenance": null, "specification": {"id":
|
|
41
|
+
"zQdMa4uAaYYU", "created_at": "2025-10-21T04:57:39.888528+00:00", "updated_at":
|
|
42
|
+
null, "user": "github|12564882", "username": "James Kent", "type": "CBMA",
|
|
43
|
+
"estimator": {"type": "MKDADensity", "args": {"null_method": "approximate",
|
|
44
|
+
"n_iters": 5000, "**kwargs": {}, "kernel__r": 10, "kernel__value": 1}}, "database_studyset":
|
|
45
|
+
null, "filter": "included", "corrector": {"type": "FWECorrector", "args":
|
|
46
|
+
{"voxel_thresh": 0.001, "n_iters": 5000, "vfwe_only": false, "method": "montecarlo"}},
|
|
47
|
+
"conditions": [true], "weights": [1.0]}, "neurostore_analysis": {"id": "8S5xRedCGRkz",
|
|
48
|
+
"created_at": "2025-10-21T04:57:40.255480+00:00", "updated_at": null, "neurostore_id":
|
|
49
|
+
null, "exception": null, "traceback": null, "status": "PENDING"}, "studyset":
|
|
50
|
+
{"id": "9jPvdkuRufUP", "created_at": "2025-10-21T04:57:40.008456+00:00", "updated_at":
|
|
51
|
+
null, "user": "github|12564882", "username": "James Kent", "snapshot": null,
|
|
52
|
+
"neurostore_id": "3EmvH2LELwR2", "version": null, "url": "https://neurostore.org/api/studysets/3EmvH2LELwR2"},
|
|
53
|
+
"annotation": {"id": "YVLt6DRFKdd5", "created_at": "2025-10-21T04:57:40.121637+00:00",
|
|
54
|
+
"updated_at": null, "user": "github|12564882", "username": "James Kent", "snapshot":
|
|
55
|
+
null, "neurostore_id": "TebrRstj8ofh", "studyset": "3EmvH2LELwR2", "url":
|
|
56
|
+
"https://neurostore.org/api/annotations/TebrRstj8ofh"}, "project": "D2cTfoxNfpLy",
|
|
57
|
+
"cached_studyset": "9jPvdkuRufUP", "cached_annotation": "YVLt6DRFKdd5", "run_key":
|
|
58
|
+
"PDeDnh_8MXc88xoVJySz3w", "results": [], "neurostore_url": null}'
|
|
59
|
+
http_version: HTTP/1.1
|
|
60
|
+
version: 1
|
compose_runner/tests/cassettes/test_lambda_handlers/test_select_task_size_uses_standard_for_fdr.yaml
ADDED
|
@@ -0,0 +1,55 @@
|
|
|
1
|
+
interactions:
|
|
2
|
+
- request:
|
|
3
|
+
body: null
|
|
4
|
+
headers:
|
|
5
|
+
Connection:
|
|
6
|
+
- close
|
|
7
|
+
Host:
|
|
8
|
+
- synth.neurostore.xyz
|
|
9
|
+
User-Agent:
|
|
10
|
+
- compose-runner/submit
|
|
11
|
+
method: GET
|
|
12
|
+
uri: https://synth.neurostore.xyz/api/meta-analyses/VtFZJFniCKvG?nested=true
|
|
13
|
+
response:
|
|
14
|
+
body:
|
|
15
|
+
string: '{"id": "VtFZJFniCKvG", "created_at": "2025-10-21T14:10:35.309383+00:00",
|
|
16
|
+
"updated_at": null, "user": "github|12564882", "username": "James Kent", "name":
|
|
17
|
+
"Untitled MKDADensity Meta Analysis: included (1)", "description": "MKDADensity
|
|
18
|
+
meta analysis with FDRCorrector", "provenance": null, "specification": {"id":
|
|
19
|
+
"DtVzKEKGaXLu", "created_at": "2025-10-21T14:10:34.564365+00:00", "updated_at":
|
|
20
|
+
null, "user": "github|12564882", "username": "James Kent", "type": "CBMA",
|
|
21
|
+
"estimator": {"type": "MKDADensity", "args": {"null_method": "approximate",
|
|
22
|
+
"n_iters": 5000, "**kwargs": {}, "kernel__r": 10, "kernel__value": 1}}, "database_studyset":
|
|
23
|
+
null, "filter": "included", "corrector": {"type": "FDRCorrector", "args":
|
|
24
|
+
{"method": "indep", "alpha": 0.05}}, "conditions": [true], "weights": [1.0]},
|
|
25
|
+
"neurostore_analysis": {"id": "564c8kRnJVT4", "created_at": "2025-10-21T14:10:35.325173+00:00",
|
|
26
|
+
"updated_at": null, "neurostore_id": null, "exception": null, "traceback":
|
|
27
|
+
null, "status": "PENDING"}, "studyset": {"id": "FA3BDBdGRZ5d", "created_at":
|
|
28
|
+
"2025-10-21T14:10:34.821625+00:00", "updated_at": null, "user": "github|12564882",
|
|
29
|
+
"username": "James Kent", "snapshot": null, "neurostore_id": "3EmvH2LELwR2",
|
|
30
|
+
"version": null, "url": "https://neurostore.org/api/studysets/3EmvH2LELwR2"},
|
|
31
|
+
"annotation": {"id": "XELVYV7ftp7e", "created_at": "2025-10-21T14:10:35.183354+00:00",
|
|
32
|
+
"updated_at": null, "user": "github|12564882", "username": "James Kent", "snapshot":
|
|
33
|
+
null, "neurostore_id": "TebrRstj8ofh", "studyset": "3EmvH2LELwR2", "url":
|
|
34
|
+
"https://neurostore.org/api/annotations/TebrRstj8ofh"}, "project": "D2cTfoxNfpLy",
|
|
35
|
+
"cached_studyset": "FA3BDBdGRZ5d", "cached_annotation": "XELVYV7ftp7e", "run_key":
|
|
36
|
+
"V_jTcP2zfNlWD4KhwKKcJw", "results": [], "neurostore_url": null}'
|
|
37
|
+
headers:
|
|
38
|
+
Connection:
|
|
39
|
+
- close
|
|
40
|
+
Content-Length:
|
|
41
|
+
- '1750'
|
|
42
|
+
Content-Type:
|
|
43
|
+
- application/json
|
|
44
|
+
Date:
|
|
45
|
+
- Tue, 21 Oct 2025 14:14:50 GMT
|
|
46
|
+
Server:
|
|
47
|
+
- nginx/1.21.6
|
|
48
|
+
Strict-Transport-Security:
|
|
49
|
+
- max-age=31536000
|
|
50
|
+
Vary:
|
|
51
|
+
- Accept-Encoding
|
|
52
|
+
status:
|
|
53
|
+
code: 200
|
|
54
|
+
message: OK
|
|
55
|
+
version: 1
|
|
@@ -4,6 +4,8 @@ import json
|
|
|
4
4
|
from datetime import datetime, timezone
|
|
5
5
|
from typing import Any, Dict
|
|
6
6
|
|
|
7
|
+
import pytest
|
|
8
|
+
|
|
7
9
|
from compose_runner.aws_lambda import log_poll_handler, results_handler, run_handler, status_handler
|
|
8
10
|
|
|
9
11
|
|
|
@@ -23,6 +25,28 @@ def _make_http_event(payload: Dict[str, Any]) -> Dict[str, Any]:
|
|
|
23
25
|
}
|
|
24
26
|
|
|
25
27
|
|
|
28
|
+
def test_requires_large_task_detection():
|
|
29
|
+
spec = {"corrector": {"type": "FWECorrector", "args": {"method": "montecarlo"}}}
|
|
30
|
+
assert run_handler._requires_large_task(spec)
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
def test_requires_large_task_false_when_method_differs():
|
|
34
|
+
spec = {"corrector": {"type": "FWECorrector", "args": {"method": "bonferroni"}}}
|
|
35
|
+
assert run_handler._requires_large_task(spec) is False
|
|
36
|
+
|
|
37
|
+
|
|
38
|
+
@pytest.mark.vcr(record_mode="once")
|
|
39
|
+
def test_select_task_size_uses_large_for_montecarlo():
|
|
40
|
+
task_size = run_handler._select_task_size("ZPSvyvhZAopz", "staging", "artifact-test")
|
|
41
|
+
assert task_size == "large"
|
|
42
|
+
|
|
43
|
+
|
|
44
|
+
@pytest.mark.vcr(record_mode="once")
|
|
45
|
+
def test_select_task_size_uses_standard_for_fdr():
|
|
46
|
+
task_size = run_handler._select_task_size("VtFZJFniCKvG", "staging", "artifact-test")
|
|
47
|
+
assert task_size == "standard"
|
|
48
|
+
|
|
49
|
+
|
|
26
50
|
def test_run_handler_http_success(monkeypatch, tmp_path):
|
|
27
51
|
captured = {}
|
|
28
52
|
|
|
@@ -36,6 +60,7 @@ def test_run_handler_http_success(monkeypatch, tmp_path):
|
|
|
36
60
|
...
|
|
37
61
|
|
|
38
62
|
monkeypatch.setattr(run_handler, "_SFN_CLIENT", FakeSFN())
|
|
63
|
+
monkeypatch.setattr(run_handler, "_select_task_size", lambda *args: "standard")
|
|
39
64
|
monkeypatch.setenv("STATE_MACHINE_ARN", "arn:aws:states:state-machine")
|
|
40
65
|
monkeypatch.setenv("RESULTS_BUCKET", "bucket")
|
|
41
66
|
monkeypatch.setenv("RESULTS_PREFIX", "prefix")
|
|
@@ -63,6 +88,32 @@ def test_run_handler_http_success(monkeypatch, tmp_path):
|
|
|
63
88
|
assert input_doc["results"]["prefix"] == "prefix"
|
|
64
89
|
assert input_doc["nsc_key"] == "nsc"
|
|
65
90
|
assert input_doc["nv_key"] == "nv"
|
|
91
|
+
assert input_doc["task_size"] == "standard"
|
|
92
|
+
|
|
93
|
+
|
|
94
|
+
def test_run_handler_http_uses_large_task(monkeypatch):
|
|
95
|
+
captured = {}
|
|
96
|
+
|
|
97
|
+
class FakeSFN:
|
|
98
|
+
def start_execution(self, **kwargs):
|
|
99
|
+
captured.update(kwargs)
|
|
100
|
+
return {"executionArn": "arn:aws:states:us-east-1:123:execution:state-machine:run-456"}
|
|
101
|
+
|
|
102
|
+
class exceptions:
|
|
103
|
+
class ExecutionAlreadyExists(Exception):
|
|
104
|
+
...
|
|
105
|
+
|
|
106
|
+
monkeypatch.setattr(run_handler, "_SFN_CLIENT", FakeSFN())
|
|
107
|
+
monkeypatch.setattr(run_handler, "_select_task_size", lambda *args: "large")
|
|
108
|
+
monkeypatch.setenv("STATE_MACHINE_ARN", "arn:aws:states:state-machine")
|
|
109
|
+
monkeypatch.setenv("RESULTS_BUCKET", "bucket")
|
|
110
|
+
monkeypatch.setenv("RESULTS_PREFIX", "prefix")
|
|
111
|
+
|
|
112
|
+
event = _make_http_event({"meta_analysis_id": "abc123"})
|
|
113
|
+
response = run_handler.handler(event, DummyContext())
|
|
114
|
+
assert response["statusCode"] == 202
|
|
115
|
+
input_doc = json.loads(captured["input"])
|
|
116
|
+
assert input_doc["task_size"] == "large"
|
|
66
117
|
|
|
67
118
|
|
|
68
119
|
def test_run_handler_missing_meta_analysis(monkeypatch):
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: compose-runner
|
|
3
|
-
Version: 0.6.
|
|
3
|
+
Version: 0.6.4rc2
|
|
4
4
|
Summary: A package for running neurosynth-compose analyses
|
|
5
5
|
Project-URL: Repository, https://github.com/neurostuff/compose-runner
|
|
6
6
|
Author-email: James Kent <jamesdkent21@gmail.com>
|
|
@@ -67,7 +67,7 @@ The deployed architecture works like this:
|
|
|
67
67
|
Pass `-c resultsBucketName=<bucket>` to use an existing S3 bucket, or omit it
|
|
68
68
|
to let the stack create and retain a dedicated bucket. Additional knobs:
|
|
69
69
|
|
|
70
|
-
|
|
70
|
+
- `-c stateMachineTimeoutSeconds=32400` to control the max wall clock per run
|
|
71
71
|
- `-c submitTimeoutSeconds` / `-c statusTimeoutSeconds` / `-c pollTimeoutSeconds`
|
|
72
72
|
to tune Lambda timeouts
|
|
73
73
|
- `-c taskEphemeralStorageGiB` if the default 21 GiB scratch volume is insufficient
|
|
@@ -1,27 +1,29 @@
|
|
|
1
1
|
compose_runner/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
2
|
-
compose_runner/_version.py,sha256=
|
|
2
|
+
compose_runner/_version.py,sha256=cybk8XYVdWK2Je29E0k4lwZfcW6ZxTZItOHcu6hpJqk,714
|
|
3
3
|
compose_runner/cli.py,sha256=1tkxFgEe8Yk7VkzE8qxGmCGqLU7UbGin2VaP0AiZkVg,1101
|
|
4
|
-
compose_runner/ecs_task.py,sha256=
|
|
4
|
+
compose_runner/ecs_task.py,sha256=15CyLsaf2xrvWM-gGPDOXj_Hq8eJJPiH7xyRas93zn0,5352
|
|
5
5
|
compose_runner/run.py,sha256=yIh8Fj8dfVKvahRl483qGOsDUoAS1FdsYrKZp_HknGo,18525
|
|
6
6
|
compose_runner/sentry.py,sha256=pjqwsZrXrKB0cCy-TL-_2eYJIqUU0aV-8e0SWUk-9Xw,320
|
|
7
7
|
compose_runner/aws_lambda/__init__.py,sha256=yZNXXv7gCPSrtLCEX5Qf4cnzSTS3fHPV6k-SyZwiZIA,48
|
|
8
8
|
compose_runner/aws_lambda/common.py,sha256=cA2G5lO4P8uVBqJaYcU6Y3P3t3syoTmk4SpLKZhAFo8,1688
|
|
9
9
|
compose_runner/aws_lambda/log_poll_handler.py,sha256=eEU-Ra_-17me3e4eqSTd2Nv_qoaOl7zi3kIxD58Tbek,1905
|
|
10
10
|
compose_runner/aws_lambda/results_handler.py,sha256=vSxs4nbWyBmkFFKRGIp5-T4W2hPh9zgj7uNH-e18aW8,2107
|
|
11
|
-
compose_runner/aws_lambda/run_handler.py,sha256=
|
|
11
|
+
compose_runner/aws_lambda/run_handler.py,sha256=WbaoRp5hsWNqAgDamW9BPihp1tfVVTM94_HKHL4Uawg,6562
|
|
12
12
|
compose_runner/aws_lambda/status_handler.py,sha256=K_VDyPYY3ExiyalDyf35nXi3UZzqj4AenWmlxkzWNXo,3423
|
|
13
13
|
compose_runner/tests/conftest.py,sha256=ijb1iw724izKMxrvclt5x7LljTGoBfHwSS-jIEUe-sQ,191
|
|
14
14
|
compose_runner/tests/test_cli.py,sha256=G3Kz7Nbl2voJ_luXPL7E6slkRNF9lmcpZ-nHBAqeL-M,290
|
|
15
15
|
compose_runner/tests/test_ecs_task.py,sha256=kY_3mPXjUTiKlLMiJpCmNIQP7cWWw047aKVB8-Rr8Ws,463
|
|
16
|
-
compose_runner/tests/test_lambda_handlers.py,sha256=
|
|
16
|
+
compose_runner/tests/test_lambda_handlers.py,sha256=WDoOFR5r-RXTXzasAqGcpVI_G736g1YGdB90WetIZBo,9028
|
|
17
17
|
compose_runner/tests/test_run.py,sha256=Nhx7wz8XxQuxy3kT5yoE_S1Hw0Mgmfn8TWYOZXm1_Gg,1795
|
|
18
|
+
compose_runner/tests/cassettes/test_lambda_handlers/test_select_task_size_uses_large_for_montecarlo.yaml,sha256=Btgzi8zzN9AwK7EbjJYdLIwv2ep3IiJlIHimcyCfrYw,2748
|
|
19
|
+
compose_runner/tests/cassettes/test_lambda_handlers/test_select_task_size_uses_standard_for_fdr.yaml,sha256=40wd30GbtwmhWgfz9cxPdRC8fZiSZOy9Fdc8Eiovacg,2591
|
|
18
20
|
compose_runner/tests/cassettes/test_run/test_download_bundle.yaml,sha256=vgdGDqirjBHosQsspkaN5Ty6XqJbkYUAbGtdImym5xI,79304
|
|
19
21
|
compose_runner/tests/cassettes/test_run/test_run_database_workflow.yaml,sha256=ay0aHtU-nmVWvbmN_EIgO9MMkC4ZeQljKU8nkTXOoDw,8724312
|
|
20
22
|
compose_runner/tests/cassettes/test_run/test_run_group_comparison_workflow.yaml,sha256=FaZpMdcaM7TMgyueyZBGftm6ywUh1HhtGmCegXUmRFA,4029712
|
|
21
23
|
compose_runner/tests/cassettes/test_run/test_run_string_group_comparison_workflow.yaml,sha256=pcn6tQwrimhDtP8yJ3jFlsfEOnk8FWybYQr9IQ5A_KA,3233839
|
|
22
24
|
compose_runner/tests/cassettes/test_run/test_run_workflow.yaml,sha256=0Nk7eJWAmgYALG2ODrezbRhpYsc00JiuYVjXt3TUm5c,3857234
|
|
23
|
-
compose_runner-0.6.
|
|
24
|
-
compose_runner-0.6.
|
|
25
|
-
compose_runner-0.6.
|
|
26
|
-
compose_runner-0.6.
|
|
27
|
-
compose_runner-0.6.
|
|
25
|
+
compose_runner-0.6.4rc2.dist-info/METADATA,sha256=Q2Eb4lIUAgE88_wxf5xjxcBeL2r3EEvKqHzP7pC2CWc,3435
|
|
26
|
+
compose_runner-0.6.4rc2.dist-info/WHEEL,sha256=tkmg4JIqwd9H8mL30xA7crRmoStyCtGp0VWshokd1Jc,105
|
|
27
|
+
compose_runner-0.6.4rc2.dist-info/entry_points.txt,sha256=TyPmB9o2tSWw8L3mcach9r2EL7inRVXE9ew3_XReMIY,55
|
|
28
|
+
compose_runner-0.6.4rc2.dist-info/licenses/LICENSE,sha256=PeiWxrrRme2rIpPMV9vjgGe7UHEKCIcTb0KagYhnyqo,1313
|
|
29
|
+
compose_runner-0.6.4rc2.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|
|
File without changes
|