parsl 2024.10.14__py3-none-any.whl → 2024.10.28__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- parsl/channels/base.py +0 -11
- parsl/channels/errors.py +0 -17
- parsl/channels/local/local.py +3 -16
- parsl/channels/ssh/ssh.py +0 -11
- parsl/dataflow/dflow.py +1 -1
- parsl/executors/high_throughput/executor.py +16 -9
- parsl/executors/high_throughput/interchange.py +8 -5
- parsl/executors/high_throughput/manager_selector.py +30 -0
- parsl/executors/high_throughput/process_worker_pool.py +1 -9
- parsl/monitoring/db_manager.py +1 -1
- parsl/monitoring/monitoring.py +5 -5
- parsl/monitoring/radios.py +2 -2
- parsl/monitoring/router.py +4 -7
- parsl/monitoring/types.py +3 -6
- parsl/providers/base.py +0 -16
- parsl/providers/kubernetes/kube.py +35 -28
- parsl/tests/{integration/test_channels → test_channels}/test_local_channel.py +4 -8
- parsl/tests/test_htex/test_block_manager_selector_unit.py +20 -0
- parsl/tests/test_htex/test_drain.py +6 -4
- parsl/tests/test_htex/test_manager_selector_by_block.py +53 -0
- parsl/tests/test_htex/test_resource_spec_validation.py +7 -0
- parsl/tests/test_providers/test_kubernetes_provider.py +102 -0
- parsl/tests/test_scaling/test_worker_interchange_bad_messages_3262.py +92 -0
- parsl/tests/test_serialization/test_3495_deserialize_managerlost.py +1 -1
- parsl/tests/test_utils/test_sanitize_dns.py +76 -0
- parsl/utils.py +78 -0
- parsl/version.py +1 -1
- {parsl-2024.10.14.data → parsl-2024.10.28.data}/scripts/interchange.py +8 -5
- {parsl-2024.10.14.data → parsl-2024.10.28.data}/scripts/process_worker_pool.py +1 -9
- {parsl-2024.10.14.dist-info → parsl-2024.10.28.dist-info}/METADATA +2 -2
- {parsl-2024.10.14.dist-info → parsl-2024.10.28.dist-info}/RECORD +37 -33
- parsl/tests/integration/test_channels/test_channels.py +0 -17
- {parsl-2024.10.14.data → parsl-2024.10.28.data}/scripts/exec_parsl_function.py +0 -0
- {parsl-2024.10.14.data → parsl-2024.10.28.data}/scripts/parsl_coprocess.py +0 -0
- {parsl-2024.10.14.dist-info → parsl-2024.10.28.dist-info}/LICENSE +0 -0
- {parsl-2024.10.14.dist-info → parsl-2024.10.28.dist-info}/WHEEL +0 -0
- {parsl-2024.10.14.dist-info → parsl-2024.10.28.dist-info}/entry_points.txt +0 -0
- {parsl-2024.10.14.dist-info → parsl-2024.10.28.dist-info}/top_level.txt +0 -0
@@ -13,7 +13,9 @@ from parsl.providers import LocalProvider
|
|
13
13
|
# based around the expected drain period: the drain period
|
14
14
|
# is TIME_CONST seconds, and the single executed task will
|
15
15
|
# last twice that many number of seconds.
|
16
|
-
TIME_CONST =
|
16
|
+
TIME_CONST = 4
|
17
|
+
|
18
|
+
CONNECTED_MANAGERS_POLL_MS = 100
|
17
19
|
|
18
20
|
|
19
21
|
def local_config():
|
@@ -52,7 +54,7 @@ def test_drain(try_assert):
|
|
52
54
|
|
53
55
|
# wait till we have a block running...
|
54
56
|
|
55
|
-
try_assert(lambda: len(htex.connected_managers()) == 1)
|
57
|
+
try_assert(lambda: len(htex.connected_managers()) == 1, check_period_ms=CONNECTED_MANAGERS_POLL_MS)
|
56
58
|
|
57
59
|
managers = htex.connected_managers()
|
58
60
|
assert managers[0]['active'], "The manager should be active"
|
@@ -63,7 +65,7 @@ def test_drain(try_assert):
|
|
63
65
|
time.sleep(TIME_CONST)
|
64
66
|
|
65
67
|
# this assert should happen *very fast* after the above delay...
|
66
|
-
try_assert(lambda: htex.connected_managers()[0]['draining'], timeout_ms=500)
|
68
|
+
try_assert(lambda: htex.connected_managers()[0]['draining'], timeout_ms=500, check_period_ms=CONNECTED_MANAGERS_POLL_MS)
|
67
69
|
|
68
70
|
# and the test task should still be running...
|
69
71
|
assert not fut.done(), "The test task should still be running"
|
@@ -76,4 +78,4 @@ def test_drain(try_assert):
|
|
76
78
|
# connected managers.
|
77
79
|
# As with the above draining assert, this should happen very fast after
|
78
80
|
# the task ends.
|
79
|
-
try_assert(lambda: len(htex.connected_managers()) == 0, timeout_ms=500)
|
81
|
+
try_assert(lambda: len(htex.connected_managers()) == 0, timeout_ms=500, check_period_ms=CONNECTED_MANAGERS_POLL_MS)
|
@@ -0,0 +1,53 @@
|
|
1
|
+
import time
|
2
|
+
|
3
|
+
import pytest
|
4
|
+
|
5
|
+
import parsl
|
6
|
+
from parsl.app.app import bash_app, python_app
|
7
|
+
from parsl.channels import LocalChannel
|
8
|
+
from parsl.config import Config
|
9
|
+
from parsl.executors import HighThroughputExecutor
|
10
|
+
from parsl.executors.high_throughput.manager_selector import (
|
11
|
+
BlockIdManagerSelector,
|
12
|
+
ManagerSelector,
|
13
|
+
)
|
14
|
+
from parsl.launchers import WrappedLauncher
|
15
|
+
from parsl.providers import LocalProvider
|
16
|
+
from parsl.usage_tracking.levels import LEVEL_1
|
17
|
+
|
18
|
+
BLOCK_COUNT = 2
|
19
|
+
|
20
|
+
|
21
|
+
@parsl.python_app
|
22
|
+
def get_worker_pid():
|
23
|
+
import os
|
24
|
+
return os.environ.get('PARSL_WORKER_BLOCK_ID')
|
25
|
+
|
26
|
+
|
27
|
+
@pytest.mark.local
|
28
|
+
def test_block_id_selection(try_assert):
|
29
|
+
htex = HighThroughputExecutor(
|
30
|
+
label="htex_local",
|
31
|
+
max_workers_per_node=1,
|
32
|
+
manager_selector=BlockIdManagerSelector(),
|
33
|
+
provider=LocalProvider(
|
34
|
+
channel=LocalChannel(),
|
35
|
+
init_blocks=BLOCK_COUNT,
|
36
|
+
max_blocks=BLOCK_COUNT,
|
37
|
+
min_blocks=BLOCK_COUNT,
|
38
|
+
),
|
39
|
+
)
|
40
|
+
|
41
|
+
config = Config(
|
42
|
+
executors=[htex],
|
43
|
+
usage_tracking=LEVEL_1,
|
44
|
+
)
|
45
|
+
|
46
|
+
with parsl.load(config):
|
47
|
+
blockids = []
|
48
|
+
try_assert(lambda: len(htex.connected_managers()) == BLOCK_COUNT, timeout_ms=20000)
|
49
|
+
for i in range(10):
|
50
|
+
future = get_worker_pid()
|
51
|
+
blockids.append(future.result())
|
52
|
+
|
53
|
+
assert all(blockid == "1" for blockid in blockids)
|
@@ -30,6 +30,13 @@ def test_resource_spec_validation():
|
|
30
30
|
assert ret_val is None
|
31
31
|
|
32
32
|
|
33
|
+
@pytest.mark.local
|
34
|
+
def test_resource_spec_validation_one_key():
|
35
|
+
htex = HighThroughputExecutor()
|
36
|
+
ret_val = htex.validate_resource_spec({"priority": 2})
|
37
|
+
assert ret_val is None
|
38
|
+
|
39
|
+
|
33
40
|
@pytest.mark.local
|
34
41
|
def test_resource_spec_validation_bad_keys():
|
35
42
|
htex = HighThroughputExecutor()
|
@@ -0,0 +1,102 @@
|
|
1
|
+
import re
|
2
|
+
from unittest import mock
|
3
|
+
|
4
|
+
import pytest
|
5
|
+
|
6
|
+
from parsl.providers.kubernetes.kube import KubernetesProvider
|
7
|
+
from parsl.tests.test_utils.test_sanitize_dns import DNS_SUBDOMAIN_REGEX
|
8
|
+
|
9
|
+
_MOCK_BASE = "parsl.providers.kubernetes.kube"
|
10
|
+
|
11
|
+
|
12
|
+
@pytest.fixture(autouse=True)
|
13
|
+
def mock_kube_config():
|
14
|
+
with mock.patch(f"{_MOCK_BASE}.config") as mock_config:
|
15
|
+
mock_config.load_kube_config.return_value = None
|
16
|
+
yield mock_config
|
17
|
+
|
18
|
+
|
19
|
+
@pytest.fixture
|
20
|
+
def mock_kube_client():
|
21
|
+
mock_client = mock.MagicMock()
|
22
|
+
with mock.patch(f"{_MOCK_BASE}.client.CoreV1Api") as mock_api:
|
23
|
+
mock_api.return_value = mock_client
|
24
|
+
yield mock_client
|
25
|
+
|
26
|
+
|
27
|
+
@pytest.mark.local
|
28
|
+
def test_submit_happy_path(mock_kube_client: mock.MagicMock):
|
29
|
+
image = "test-image"
|
30
|
+
namespace = "test-namespace"
|
31
|
+
cmd_string = "test-command"
|
32
|
+
volumes = [("test-volume", "test-mount-path")]
|
33
|
+
service_account_name = "test-service-account"
|
34
|
+
annotations = {"test-annotation": "test-value"}
|
35
|
+
max_cpu = 2
|
36
|
+
max_mem = "2Gi"
|
37
|
+
init_cpu = 1
|
38
|
+
init_mem = "1Gi"
|
39
|
+
provider = KubernetesProvider(
|
40
|
+
image=image,
|
41
|
+
persistent_volumes=volumes,
|
42
|
+
namespace=namespace,
|
43
|
+
service_account_name=service_account_name,
|
44
|
+
annotations=annotations,
|
45
|
+
max_cpu=max_cpu,
|
46
|
+
max_mem=max_mem,
|
47
|
+
init_cpu=init_cpu,
|
48
|
+
init_mem=init_mem,
|
49
|
+
)
|
50
|
+
|
51
|
+
job_name = "test.job.name"
|
52
|
+
job_id = provider.submit(cmd_string=cmd_string, tasks_per_node=1, job_name=job_name)
|
53
|
+
|
54
|
+
assert job_id in provider.resources
|
55
|
+
assert mock_kube_client.create_namespaced_pod.call_count == 1
|
56
|
+
|
57
|
+
call_args = mock_kube_client.create_namespaced_pod.call_args[1]
|
58
|
+
pod = call_args["body"]
|
59
|
+
container = pod.spec.containers[0]
|
60
|
+
volume = container.volume_mounts[0]
|
61
|
+
|
62
|
+
assert image == container.image
|
63
|
+
assert namespace == call_args["namespace"]
|
64
|
+
assert any(cmd_string in arg for arg in container.args)
|
65
|
+
assert volumes[0] == (volume.name, volume.mount_path)
|
66
|
+
assert service_account_name == pod.spec.service_account_name
|
67
|
+
assert annotations == pod.metadata.annotations
|
68
|
+
assert str(max_cpu) == container.resources.limits["cpu"]
|
69
|
+
assert max_mem == container.resources.limits["memory"]
|
70
|
+
assert str(init_cpu) == container.resources.requests["cpu"]
|
71
|
+
assert init_mem == container.resources.requests["memory"]
|
72
|
+
assert job_id == pod.metadata.labels["parsl-job-id"]
|
73
|
+
assert job_id == container.name
|
74
|
+
assert f"{job_name}.{job_id}" == pod.metadata.name
|
75
|
+
|
76
|
+
|
77
|
+
@pytest.mark.local
|
78
|
+
@mock.patch(f"{_MOCK_BASE}.KubernetesProvider._create_pod")
|
79
|
+
@pytest.mark.parametrize("char", (".", "-"))
|
80
|
+
def test_submit_pod_name_includes_job_id(mock_create_pod: mock.MagicMock, char: str):
|
81
|
+
provider = KubernetesProvider(image="test-image")
|
82
|
+
|
83
|
+
job_name = "a." * 121 + f"a{char}" + "a" * 9
|
84
|
+
assert len(job_name) == 253 # Max length for pod name
|
85
|
+
job_id = provider.submit(cmd_string="test-command", tasks_per_node=1, job_name=job_name)
|
86
|
+
|
87
|
+
expected_pod_name = job_name[:253 - len(job_id) - 2] + f".{job_id}"
|
88
|
+
actual_pod_name = mock_create_pod.call_args[1]["pod_name"]
|
89
|
+
assert re.match(DNS_SUBDOMAIN_REGEX, actual_pod_name)
|
90
|
+
assert expected_pod_name == actual_pod_name
|
91
|
+
|
92
|
+
|
93
|
+
@pytest.mark.local
|
94
|
+
@mock.patch(f"{_MOCK_BASE}.KubernetesProvider._create_pod")
|
95
|
+
@mock.patch(f"{_MOCK_BASE}.logger")
|
96
|
+
@pytest.mark.parametrize("job_name", ("", ".", "-", "a.-.a", "$$$"))
|
97
|
+
def test_submit_invalid_job_name(mock_logger: mock.MagicMock, mock_create_pod: mock.MagicMock, job_name: str):
|
98
|
+
provider = KubernetesProvider(image="test-image")
|
99
|
+
job_id = provider.submit(cmd_string="test-command", tasks_per_node=1, job_name=job_name)
|
100
|
+
assert mock_logger.warning.call_count == 1
|
101
|
+
assert f"Invalid pod name '{job_name}' for job '{job_id}'" in mock_logger.warning.call_args[0][0]
|
102
|
+
assert f"parsl.kube.{job_id}" == mock_create_pod.call_args[1]["pod_name"]
|
@@ -0,0 +1,92 @@
|
|
1
|
+
import os
|
2
|
+
import signal
|
3
|
+
import time
|
4
|
+
|
5
|
+
import pytest
|
6
|
+
import zmq
|
7
|
+
|
8
|
+
import parsl
|
9
|
+
from parsl.channels import LocalChannel
|
10
|
+
from parsl.config import Config
|
11
|
+
from parsl.executors import HighThroughputExecutor
|
12
|
+
from parsl.launchers import SimpleLauncher
|
13
|
+
from parsl.providers import LocalProvider
|
14
|
+
|
15
|
+
T_s = 1
|
16
|
+
|
17
|
+
|
18
|
+
def fresh_config():
|
19
|
+
htex = HighThroughputExecutor(
|
20
|
+
heartbeat_period=1 * T_s,
|
21
|
+
heartbeat_threshold=3 * T_s,
|
22
|
+
label="htex_local",
|
23
|
+
worker_debug=True,
|
24
|
+
cores_per_worker=1,
|
25
|
+
encrypted=False,
|
26
|
+
provider=LocalProvider(
|
27
|
+
channel=LocalChannel(),
|
28
|
+
init_blocks=0,
|
29
|
+
min_blocks=0,
|
30
|
+
max_blocks=0,
|
31
|
+
launcher=SimpleLauncher(),
|
32
|
+
),
|
33
|
+
)
|
34
|
+
c = Config(
|
35
|
+
executors=[htex],
|
36
|
+
strategy='none',
|
37
|
+
strategy_period=0.5,
|
38
|
+
)
|
39
|
+
return c, htex
|
40
|
+
|
41
|
+
|
42
|
+
@parsl.python_app
|
43
|
+
def app():
|
44
|
+
return 7
|
45
|
+
|
46
|
+
|
47
|
+
@pytest.mark.local
|
48
|
+
@pytest.mark.parametrize("msg",
|
49
|
+
(b'FuzzyByte\rSTREAM', # not JSON
|
50
|
+
b'{}', # missing fields
|
51
|
+
b'{"type":"heartbeat"}', # regression test #3262
|
52
|
+
)
|
53
|
+
)
|
54
|
+
def test_bad_messages(try_assert, msg):
|
55
|
+
"""This tests that the interchange is resilient to a few different bad
|
56
|
+
messages: malformed messages caused by implementation errors, and
|
57
|
+
heartbeat messages from managers that are not registered.
|
58
|
+
|
59
|
+
The heartbeat test is a regression test for issues #3262, #3632
|
60
|
+
"""
|
61
|
+
|
62
|
+
c, htex = fresh_config()
|
63
|
+
|
64
|
+
with parsl.load(c):
|
65
|
+
|
66
|
+
# send a bad message into the interchange on the task_outgoing worker
|
67
|
+
# channel, and then check that the interchange is still alive enough
|
68
|
+
# that we can scale out a block and run a task.
|
69
|
+
|
70
|
+
(task_port, result_port) = htex.command_client.run("WORKER_PORTS")
|
71
|
+
|
72
|
+
context = zmq.Context()
|
73
|
+
channel_timeout = 10000 # in milliseconds
|
74
|
+
task_channel = context.socket(zmq.DEALER)
|
75
|
+
task_channel.setsockopt(zmq.LINGER, 0)
|
76
|
+
task_channel.setsockopt(zmq.IDENTITY, b'testid')
|
77
|
+
|
78
|
+
task_channel.set_hwm(0)
|
79
|
+
task_channel.setsockopt(zmq.SNDTIMEO, channel_timeout)
|
80
|
+
task_channel.connect(f"tcp://localhost:{task_port}")
|
81
|
+
|
82
|
+
task_channel.send(msg)
|
83
|
+
|
84
|
+
# If the interchange exits, it's likely that this test will hang rather
|
85
|
+
# than raise an error, because the interchange interaction code
|
86
|
+
# assumes the interchange is always there.
|
87
|
+
# In the case of issue #3262, an exception message goes to stderr, and
|
88
|
+
# no error goes to the interchange log file.
|
89
|
+
htex.scale_out_facade(1)
|
90
|
+
try_assert(lambda: len(htex.connected_managers()) == 1, timeout_ms=10000)
|
91
|
+
|
92
|
+
assert app().result() == 7
|
@@ -32,7 +32,7 @@ def test_manager_lost_system_failure(tmpd_cwd):
|
|
32
32
|
cores_per_worker=1,
|
33
33
|
worker_logdir_root=str(tmpd_cwd),
|
34
34
|
heartbeat_period=1,
|
35
|
-
heartbeat_threshold=
|
35
|
+
heartbeat_threshold=3,
|
36
36
|
)
|
37
37
|
c = Config(executors=[hte], strategy='simple', strategy_period=0.1)
|
38
38
|
|
@@ -0,0 +1,76 @@
|
|
1
|
+
import random
|
2
|
+
import re
|
3
|
+
|
4
|
+
import pytest
|
5
|
+
|
6
|
+
from parsl.utils import sanitize_dns_label_rfc1123, sanitize_dns_subdomain_rfc1123
|
7
|
+
|
8
|
+
# Ref: https://datatracker.ietf.org/doc/html/rfc1123
|
9
|
+
DNS_LABEL_REGEX = r'^[a-z0-9]([-a-z0-9]{0,61}[a-z0-9])?$'
|
10
|
+
DNS_SUBDOMAIN_REGEX = r'^[a-z0-9]([-a-z0-9]{0,61}[a-z0-9])?(\.[a-z0-9]([-a-z0-9]{0,61}[a-z0-9])?)*$'
|
11
|
+
|
12
|
+
test_labels = [
|
13
|
+
"example-label-123", # Valid label
|
14
|
+
"EXAMPLE", # Case sensitivity
|
15
|
+
"!@#example*", # Remove invalid characters
|
16
|
+
"--leading-and-trailing--", # Leading and trailing hyphens
|
17
|
+
"..leading.and.trailing..", # Leading and tailing dots
|
18
|
+
"multiple..dots", # Consecutive dots
|
19
|
+
"valid--label", # Consecutive hyphens
|
20
|
+
"a" * random.randint(64, 70), # Longer than 63 characters
|
21
|
+
f"{'a' * 62}-a", # Trailing hyphen at max length
|
22
|
+
]
|
23
|
+
|
24
|
+
|
25
|
+
def _generate_test_subdomains(num_subdomains: int):
|
26
|
+
subdomains = []
|
27
|
+
for _ in range(num_subdomains):
|
28
|
+
num_labels = random.randint(1, 5)
|
29
|
+
labels = [test_labels[random.randint(0, num_labels - 1)] for _ in range(num_labels)]
|
30
|
+
subdomain = ".".join(labels)
|
31
|
+
subdomains.append(subdomain)
|
32
|
+
return subdomains
|
33
|
+
|
34
|
+
|
35
|
+
@pytest.mark.local
|
36
|
+
@pytest.mark.parametrize("raw_string", test_labels)
|
37
|
+
def test_sanitize_dns_label_rfc1123(raw_string: str):
|
38
|
+
print(sanitize_dns_label_rfc1123(raw_string))
|
39
|
+
assert re.match(DNS_LABEL_REGEX, sanitize_dns_label_rfc1123(raw_string))
|
40
|
+
|
41
|
+
|
42
|
+
@pytest.mark.local
|
43
|
+
@pytest.mark.parametrize("raw_string", ("", "-", "@", "$$$"))
|
44
|
+
def test_sanitize_dns_label_rfc1123_empty(raw_string: str):
|
45
|
+
with pytest.raises(ValueError) as e_info:
|
46
|
+
sanitize_dns_label_rfc1123(raw_string)
|
47
|
+
assert str(e_info.value) == f"Sanitized DNS label is empty for input '{raw_string}'"
|
48
|
+
|
49
|
+
|
50
|
+
@pytest.mark.local
|
51
|
+
@pytest.mark.parametrize("raw_string", _generate_test_subdomains(10))
|
52
|
+
def test_sanitize_dns_subdomain_rfc1123(raw_string: str):
|
53
|
+
assert re.match(DNS_SUBDOMAIN_REGEX, sanitize_dns_subdomain_rfc1123(raw_string))
|
54
|
+
|
55
|
+
|
56
|
+
@pytest.mark.local
|
57
|
+
@pytest.mark.parametrize("char", ("-", "."))
|
58
|
+
def test_sanitize_dns_subdomain_rfc1123_trailing_non_alphanumeric_at_max_length(char: str):
|
59
|
+
raw_string = (f"{'a' * 61}." * 4) + f".aaaa{char}a"
|
60
|
+
assert re.match(DNS_SUBDOMAIN_REGEX, sanitize_dns_subdomain_rfc1123(raw_string))
|
61
|
+
|
62
|
+
|
63
|
+
@pytest.mark.local
|
64
|
+
@pytest.mark.parametrize("raw_string", ("", ".", "..."))
|
65
|
+
def test_sanitize_dns_subdomain_rfc1123_empty(raw_string: str):
|
66
|
+
with pytest.raises(ValueError) as e_info:
|
67
|
+
sanitize_dns_subdomain_rfc1123(raw_string)
|
68
|
+
assert str(e_info.value) == f"Sanitized DNS subdomain is empty for input '{raw_string}'"
|
69
|
+
|
70
|
+
|
71
|
+
@pytest.mark.local
|
72
|
+
@pytest.mark.parametrize(
|
73
|
+
"raw_string", ("a" * 253, "a" * random.randint(254, 300)), ids=("254 chars", ">253 chars")
|
74
|
+
)
|
75
|
+
def test_sanitize_dns_subdomain_rfc1123_max_length(raw_string: str):
|
76
|
+
assert len(sanitize_dns_subdomain_rfc1123(raw_string)) <= 253
|
parsl/utils.py
CHANGED
@@ -1,6 +1,7 @@
|
|
1
1
|
import inspect
|
2
2
|
import logging
|
3
3
|
import os
|
4
|
+
import re
|
4
5
|
import shlex
|
5
6
|
import subprocess
|
6
7
|
import threading
|
@@ -380,3 +381,80 @@ class AutoCancelTimer(threading.Timer):
|
|
380
381
|
exc_tb: Optional[TracebackType]
|
381
382
|
) -> None:
|
382
383
|
self.cancel()
|
384
|
+
|
385
|
+
|
386
|
+
def sanitize_dns_label_rfc1123(raw_string: str) -> str:
|
387
|
+
"""Convert input string to a valid RFC 1123 DNS label.
|
388
|
+
|
389
|
+
Parameters
|
390
|
+
----------
|
391
|
+
raw_string : str
|
392
|
+
String to sanitize.
|
393
|
+
|
394
|
+
Returns
|
395
|
+
-------
|
396
|
+
str
|
397
|
+
Sanitized string.
|
398
|
+
|
399
|
+
Raises
|
400
|
+
------
|
401
|
+
ValueError
|
402
|
+
If the string is empty after sanitization.
|
403
|
+
"""
|
404
|
+
# Convert to lowercase and replace non-alphanumeric characters with hyphen
|
405
|
+
sanitized = re.sub(r'[^a-z0-9]', '-', raw_string.lower())
|
406
|
+
|
407
|
+
# Remove consecutive hyphens
|
408
|
+
sanitized = re.sub(r'-+', '-', sanitized)
|
409
|
+
|
410
|
+
# DNS label cannot exceed 63 characters
|
411
|
+
sanitized = sanitized[:63]
|
412
|
+
|
413
|
+
# Strip after trimming to avoid trailing hyphens
|
414
|
+
sanitized = sanitized.strip("-")
|
415
|
+
|
416
|
+
if not sanitized:
|
417
|
+
raise ValueError(f"Sanitized DNS label is empty for input '{raw_string}'")
|
418
|
+
|
419
|
+
return sanitized
|
420
|
+
|
421
|
+
|
422
|
+
def sanitize_dns_subdomain_rfc1123(raw_string: str) -> str:
|
423
|
+
"""Convert input string to a valid RFC 1123 DNS subdomain.
|
424
|
+
|
425
|
+
Parameters
|
426
|
+
----------
|
427
|
+
raw_string : str
|
428
|
+
String to sanitize.
|
429
|
+
|
430
|
+
Returns
|
431
|
+
-------
|
432
|
+
str
|
433
|
+
Sanitized string.
|
434
|
+
|
435
|
+
Raises
|
436
|
+
------
|
437
|
+
ValueError
|
438
|
+
If the string is empty after sanitization.
|
439
|
+
"""
|
440
|
+
segments = raw_string.split('.')
|
441
|
+
|
442
|
+
sanitized_segments = []
|
443
|
+
for segment in segments:
|
444
|
+
if not segment:
|
445
|
+
continue
|
446
|
+
sanitized_segment = sanitize_dns_label_rfc1123(segment)
|
447
|
+
sanitized_segments.append(sanitized_segment)
|
448
|
+
|
449
|
+
sanitized = '.'.join(sanitized_segments)
|
450
|
+
|
451
|
+
# DNS subdomain cannot exceed 253 characters
|
452
|
+
sanitized = sanitized[:253]
|
453
|
+
|
454
|
+
# Strip after trimming to avoid trailing dots or hyphens
|
455
|
+
sanitized = sanitized.strip(".-")
|
456
|
+
|
457
|
+
if not sanitized:
|
458
|
+
raise ValueError(f"Sanitized DNS subdomain is empty for input '{raw_string}'")
|
459
|
+
|
460
|
+
return sanitized
|
parsl/version.py
CHANGED
@@ -66,7 +66,7 @@ class Interchange:
|
|
66
66
|
If specified the interchange will only listen on this address for connections from workers
|
67
67
|
else, it binds to all addresses.
|
68
68
|
|
69
|
-
client_ports :
|
69
|
+
client_ports : tuple(int, int, int)
|
70
70
|
The ports at which the client can be reached
|
71
71
|
|
72
72
|
worker_ports : tuple(int, int)
|
@@ -104,7 +104,6 @@ class Interchange:
|
|
104
104
|
os.makedirs(self.logdir, exist_ok=True)
|
105
105
|
|
106
106
|
start_file_logger("{}/interchange.log".format(self.logdir), level=logging_level)
|
107
|
-
logger.propagate = False
|
108
107
|
logger.debug("Initializing Interchange process")
|
109
108
|
|
110
109
|
self.client_address = client_address
|
@@ -437,9 +436,13 @@ class Interchange:
|
|
437
436
|
logger.info(f"Manager {manager_id!r} has compatible Parsl version {msg['parsl_v']}")
|
438
437
|
logger.info(f"Manager {manager_id!r} has compatible Python version {msg['python_v'].rsplit('.', 1)[0]}")
|
439
438
|
elif msg['type'] == 'heartbeat':
|
440
|
-
|
441
|
-
|
442
|
-
|
439
|
+
manager = self._ready_managers.get(manager_id)
|
440
|
+
if manager:
|
441
|
+
manager['last_heartbeat'] = time.time()
|
442
|
+
logger.debug("Manager %r sent heartbeat via tasks connection", manager_id)
|
443
|
+
self.task_outgoing.send_multipart([manager_id, b'', PKL_HEARTBEAT_CODE])
|
444
|
+
else:
|
445
|
+
logger.warning("Received heartbeat via tasks connection for not-registered manager %r", manager_id)
|
443
446
|
elif msg['type'] == 'drain':
|
444
447
|
self._ready_managers[manager_id]['draining'] = True
|
445
448
|
logger.debug("Manager %r requested drain", manager_id)
|
@@ -362,7 +362,7 @@ class Manager:
|
|
362
362
|
if tasks == HEARTBEAT_CODE:
|
363
363
|
logger.debug("Got heartbeat from interchange")
|
364
364
|
elif tasks == DRAINED_CODE:
|
365
|
-
logger.info("Got
|
365
|
+
logger.info("Got fully drained message from interchange - setting kill flag")
|
366
366
|
kill_event.set()
|
367
367
|
else:
|
368
368
|
task_recv_counter += len(tasks)
|
@@ -650,14 +650,6 @@ def worker(
|
|
650
650
|
debug: bool,
|
651
651
|
mpi_launcher: str,
|
652
652
|
):
|
653
|
-
"""
|
654
|
-
|
655
|
-
Put request token into queue
|
656
|
-
Get task from task_queue
|
657
|
-
Pop request from queue
|
658
|
-
Put result into result_queue
|
659
|
-
"""
|
660
|
-
|
661
653
|
# override the global logger inherited from the __main__ process (which
|
662
654
|
# usually logs to manager.log) with one specific to this worker.
|
663
655
|
global logger
|
@@ -1,9 +1,9 @@
|
|
1
1
|
Metadata-Version: 2.1
|
2
2
|
Name: parsl
|
3
|
-
Version: 2024.10.
|
3
|
+
Version: 2024.10.28
|
4
4
|
Summary: Simple data dependent workflows in Python
|
5
5
|
Home-page: https://github.com/Parsl/parsl
|
6
|
-
Download-URL: https://github.com/Parsl/parsl/archive/2024.10.
|
6
|
+
Download-URL: https://github.com/Parsl/parsl/archive/2024.10.28.tar.gz
|
7
7
|
Author: The Parsl Team
|
8
8
|
Author-email: parsl@googlegroups.com
|
9
9
|
License: Apache 2.0
|