rucio-clients 37.0.0rc3__py3-none-any.whl → 37.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of rucio-clients might be problematic. Click here for more details.
- rucio/cli/account.py +14 -14
- rucio/cli/command.py +9 -9
- rucio/cli/config.py +3 -3
- rucio/cli/did.py +13 -13
- rucio/cli/lifetime_exception.py +1 -1
- rucio/cli/replica.py +3 -3
- rucio/cli/rse.py +18 -18
- rucio/cli/rule.py +5 -5
- rucio/cli/scope.py +2 -2
- rucio/cli/subscription.py +4 -4
- rucio/client/baseclient.py +0 -3
- rucio/client/lifetimeclient.py +46 -13
- rucio/common/config.py +0 -26
- rucio/common/stomp_utils.py +119 -383
- rucio/common/utils.py +14 -17
- rucio/vcsversion.py +4 -4
- {rucio_clients-37.0.0rc3.dist-info → rucio_clients-37.1.0.dist-info}/METADATA +1 -1
- {rucio_clients-37.0.0rc3.dist-info → rucio_clients-37.1.0.dist-info}/RECORD +29 -29
- {rucio_clients-37.0.0rc3.data → rucio_clients-37.1.0.data}/data/etc/rse-accounts.cfg.template +0 -0
- {rucio_clients-37.0.0rc3.data → rucio_clients-37.1.0.data}/data/etc/rucio.cfg.atlas.client.template +0 -0
- {rucio_clients-37.0.0rc3.data → rucio_clients-37.1.0.data}/data/etc/rucio.cfg.template +0 -0
- {rucio_clients-37.0.0rc3.data → rucio_clients-37.1.0.data}/data/requirements.client.txt +0 -0
- {rucio_clients-37.0.0rc3.data → rucio_clients-37.1.0.data}/data/rucio_client/merge_rucio_configs.py +0 -0
- {rucio_clients-37.0.0rc3.data → rucio_clients-37.1.0.data}/scripts/rucio +0 -0
- {rucio_clients-37.0.0rc3.data → rucio_clients-37.1.0.data}/scripts/rucio-admin +0 -0
- {rucio_clients-37.0.0rc3.dist-info → rucio_clients-37.1.0.dist-info}/WHEEL +0 -0
- {rucio_clients-37.0.0rc3.dist-info → rucio_clients-37.1.0.dist-info}/licenses/AUTHORS.rst +0 -0
- {rucio_clients-37.0.0rc3.dist-info → rucio_clients-37.1.0.dist-info}/licenses/LICENSE +0 -0
- {rucio_clients-37.0.0rc3.dist-info → rucio_clients-37.1.0.dist-info}/top_level.txt +0 -0
rucio/client/lifetimeclient.py
CHANGED
|
@@ -39,10 +39,32 @@ class LifetimeClient(BaseClient):
|
|
|
39
39
|
states: Optional['Sequence[LifetimeExceptionsState]'] = None
|
|
40
40
|
) -> 'Iterator[dict[str, Any]]':
|
|
41
41
|
"""
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
|
|
42
|
+
Lists lifetime model exceptions that allow extending data lifetimes beyond their configured policies.
|
|
43
|
+
|
|
44
|
+
The lifetime model exceptions are used to override the default lifecycle policies for data identifiers
|
|
45
|
+
(files, datasets, containers, or archives) that need to be kept longer than usual. These exceptions
|
|
46
|
+
can be filtered by their ID or approval state (this feature is not available yet).
|
|
47
|
+
|
|
48
|
+
:param exception_id: The unique identifier of a specific exception. If provided, returns only that exception.
|
|
49
|
+
:param states: Filter exceptions by their states. Possible values are:
|
|
50
|
+
- `A` (APPROVED): Exception was approved
|
|
51
|
+
- `R` (REJECTED): Exception was rejected
|
|
52
|
+
- `W` (WAITING): Exception is waiting for approval by an admin (or other authorized account)
|
|
53
|
+
|
|
54
|
+
:returns:
|
|
55
|
+
An iterator of dictionaries containing the exception details:
|
|
56
|
+
- `id`: The unique identifier of the exception
|
|
57
|
+
- `scope`: The scope of the data identifier
|
|
58
|
+
- `name`: The name of the data identifier
|
|
59
|
+
- `did_type`: Type of the data identifier:
|
|
60
|
+
`F` (file), `D` (dataset), `C` (container), `A` (archive),
|
|
61
|
+
`X` (deleted file), `Y` (deleted dataset), `Z` (deleted container)
|
|
62
|
+
- `account`: The account that requested the exception
|
|
63
|
+
- `pattern`: Pattern used for matching data identifiers
|
|
64
|
+
- `comments`: User provided comments explaining the exception
|
|
65
|
+
- `state`: Current state of the exception
|
|
66
|
+
- `created_at`: When the exception was created (returned as timestamp string)
|
|
67
|
+
- `expires_at`: When the exception expires (returned as timestamp string)
|
|
46
68
|
"""
|
|
47
69
|
|
|
48
70
|
path = self.LIFETIME_BASEURL + '/'
|
|
@@ -70,16 +92,27 @@ class LifetimeClient(BaseClient):
|
|
|
70
92
|
expires_at: 'datetime'
|
|
71
93
|
) -> dict[str, Any]:
|
|
72
94
|
"""
|
|
73
|
-
|
|
74
|
-
|
|
75
|
-
|
|
76
|
-
|
|
77
|
-
|
|
78
|
-
|
|
79
|
-
:param
|
|
80
|
-
|
|
81
|
-
|
|
95
|
+
Creates a lifetime model exception request to extend the expiration date of data identifiers (DIDs).
|
|
96
|
+
|
|
97
|
+
These exceptions allow requesting extensions to DIDs' lifetimes, subject to approval and configured
|
|
98
|
+
maximum extension periods. The request includes details about which DIDs should have extended
|
|
99
|
+
lifetimes, who is requesting it, and why it's needed.
|
|
100
|
+
|
|
101
|
+
:param dids: List of dictionaries containing the data identifiers to be excepted.
|
|
102
|
+
Each dictionary must contain:
|
|
103
|
+
- `scope`: The scope of the data identifier
|
|
104
|
+
- `name`: The name of the data identifier
|
|
105
|
+
:param account: The account requesting the exception
|
|
106
|
+
:param pattern: Associated pattern for the exception request
|
|
107
|
+
:param comments: Justification for why the exception is needed (e.g. "Needed for my XYZ analysis..")
|
|
108
|
+
:param expires_at: When the exception should expire (datetime object)
|
|
109
|
+
|
|
110
|
+
:returns: A dictionary containing:
|
|
111
|
+
- `exceptions`: Dictionary mapping exception IDs to lists of DIDs that were successfully added
|
|
112
|
+
- `unknown`: List of DIDs that could not be found
|
|
113
|
+
- `not_affected`: List of DIDs that did not qualify for an exception
|
|
82
114
|
"""
|
|
115
|
+
|
|
83
116
|
path = self.LIFETIME_BASEURL + '/'
|
|
84
117
|
url = build_url(choice(self.list_hosts), path=path)
|
|
85
118
|
data = {'dids': dids, 'account': account, 'pattern': pattern, 'comments': comments, 'expires_at': expires_at}
|
rucio/common/config.py
CHANGED
|
@@ -31,8 +31,6 @@ if TYPE_CHECKING:
|
|
|
31
31
|
|
|
32
32
|
from sqlalchemy.orm import Session
|
|
33
33
|
|
|
34
|
-
LEGACY_SECTION_NAME = {}
|
|
35
|
-
LEGACY_OPTION_NAME = {}
|
|
36
34
|
|
|
37
35
|
|
|
38
36
|
def convert_to_any_type(value: str) -> Union[bool, int, float, str]:
|
|
@@ -194,12 +192,6 @@ def config_get(
|
|
|
194
192
|
try:
|
|
195
193
|
return convert_type_fnc(get_config().get(section, option))
|
|
196
194
|
except (configparser.NoOptionError, configparser.NoSectionError, ConfigNotFound) as err:
|
|
197
|
-
try:
|
|
198
|
-
legacy_config = get_legacy_config(section, option)
|
|
199
|
-
if legacy_config is not None:
|
|
200
|
-
return convert_type_fnc(legacy_config)
|
|
201
|
-
except ConfigNotFound:
|
|
202
|
-
pass
|
|
203
195
|
|
|
204
196
|
from rucio.common.client import is_client
|
|
205
197
|
client_mode = is_client()
|
|
@@ -220,24 +212,6 @@ def config_get(
|
|
|
220
212
|
return default
|
|
221
213
|
|
|
222
214
|
|
|
223
|
-
def get_legacy_config(section: str, option: str):
|
|
224
|
-
"""
|
|
225
|
-
Returns a legacy config value, if it is present.
|
|
226
|
-
|
|
227
|
-
:param section: The section of the new config.
|
|
228
|
-
:param option: The option of the new config.
|
|
229
|
-
:returns: The string value of the legacy option if one is found, None otherwise.
|
|
230
|
-
"""
|
|
231
|
-
|
|
232
|
-
section = LEGACY_SECTION_NAME.get(section, section)
|
|
233
|
-
option = LEGACY_OPTION_NAME.get(option, option)
|
|
234
|
-
|
|
235
|
-
if config_has_option(section, option):
|
|
236
|
-
return get_config().get(section, option)
|
|
237
|
-
|
|
238
|
-
return None
|
|
239
|
-
|
|
240
|
-
|
|
241
215
|
def config_has_section(section: str) -> bool:
|
|
242
216
|
"""
|
|
243
217
|
Indicates whether the named section is present in the configuration. The DEFAULT section is not acknowledged.
|
rucio/common/stomp_utils.py
CHANGED
|
@@ -15,409 +15,145 @@
|
|
|
15
15
|
"""
|
|
16
16
|
Common utility functions for stomp connections
|
|
17
17
|
"""
|
|
18
|
-
|
|
18
|
+
|
|
19
19
|
import logging
|
|
20
|
-
import random
|
|
21
20
|
import socket
|
|
22
|
-
from collections import namedtuple
|
|
23
|
-
from copy import deepcopy
|
|
24
|
-
from functools import partial
|
|
25
21
|
from time import monotonic
|
|
26
|
-
from typing import TYPE_CHECKING
|
|
27
|
-
|
|
28
|
-
from stomp import Connection12
|
|
29
|
-
from stomp.exception import ConnectFailedException, NotConnectedException
|
|
30
|
-
from stomp.listener import HeartbeatListener
|
|
22
|
+
from typing import TYPE_CHECKING
|
|
31
23
|
|
|
32
|
-
from
|
|
33
|
-
from rucio.common.logging import formatted_logger
|
|
34
|
-
from rucio.core.monitor import MetricManager
|
|
24
|
+
from stomp import Connection
|
|
35
25
|
|
|
36
26
|
if TYPE_CHECKING:
|
|
37
|
-
from collections.abc import
|
|
38
|
-
|
|
39
|
-
from stomp.connect import Frame
|
|
40
|
-
|
|
41
|
-
from rucio.common.types import LoggerFunction
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
METRICS = MetricManager(module=__name__)
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
class Connection(Connection12):
|
|
48
|
-
"""
|
|
49
|
-
Connection class.
|
|
50
|
-
|
|
51
|
-
Wraps Stomp Connection but knows the brokers without accessing
|
|
52
|
-
hidden variables from the Transport.
|
|
53
|
-
"""
|
|
54
|
-
def __init__(self, host_and_ports: list[tuple[str, int]], **kwargs):
|
|
55
|
-
"""
|
|
56
|
-
Initialise.
|
|
57
|
-
|
|
58
|
-
Args:
|
|
59
|
-
host_and_ports: brokers list
|
|
60
|
-
|
|
61
|
-
Kwargs:
|
|
62
|
-
Arguments to pass to the Constructor12 base class.
|
|
63
|
-
"""
|
|
64
|
-
super().__init__(host_and_ports=host_and_ports, **kwargs)
|
|
65
|
-
self._brokers = host_and_ports
|
|
66
|
-
|
|
67
|
-
@property
|
|
68
|
-
def brokers(self) -> list[tuple[str, int]]:
|
|
69
|
-
"""
|
|
70
|
-
List brokers.
|
|
71
|
-
|
|
72
|
-
Returns:
|
|
73
|
-
All assigned brokers in (host, port) format.
|
|
74
|
-
"""
|
|
75
|
-
return self._brokers
|
|
76
|
-
|
|
27
|
+
from collections.abc import Callable, Sequence
|
|
28
|
+
from typing import Any
|
|
77
29
|
|
|
78
|
-
|
|
79
|
-
"""Listener Base."""
|
|
30
|
+
LoggerFunction = Callable[..., Any]
|
|
80
31
|
|
|
81
|
-
_logger = formatted_logger(logging.log, 'ListenerBase %s')
|
|
82
32
|
|
|
83
|
-
|
|
84
|
-
|
|
85
|
-
|
|
86
|
-
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
|
|
90
|
-
|
|
91
|
-
conn: The connection object that is using this listener
|
|
92
|
-
logger: Logger to use. Defaults to logging.getLogger(__name__).getChild(__qualname__).
|
|
93
|
-
|
|
94
|
-
Kwargs:
|
|
95
|
-
Arguments to pass to the stomp.ConnectionListener base class.
|
|
96
|
-
"""
|
|
97
|
-
super().__init__(transport=conn.transport, **kwargs)
|
|
98
|
-
self._conn = conn
|
|
99
|
-
if logger is not None:
|
|
100
|
-
self._logger = logger
|
|
101
|
-
|
|
102
|
-
@METRICS.count_it
|
|
103
|
-
def on_heartbeat_timeout(self):
|
|
104
|
-
self._conn.disconnect()
|
|
105
|
-
|
|
106
|
-
@METRICS.count_it
|
|
107
|
-
def on_error(self, frame: "Frame"):
|
|
108
|
-
"""
|
|
109
|
-
on_error
|
|
110
|
-
"""
|
|
111
|
-
self._logger(logging.ERROR, 'Message receive error: [%s] %s', self._conn.brokers[0][0], frame.body)
|
|
112
|
-
|
|
113
|
-
|
|
114
|
-
StompConfig = namedtuple("StompConfig", ('brokers', 'use_ssl', 'port', 'vhost',
|
|
115
|
-
'destination', 'key_file', 'cert_file',
|
|
116
|
-
'username', 'password', 'nonssl_port',
|
|
117
|
-
'reconnect_attempts_max', 'timeout', 'heartbeats'))
|
|
33
|
+
def resolve_ips(fqdns: "Sequence[str]", logger: "LoggerFunction" = logging.log):
|
|
34
|
+
logger(logging.DEBUG, 'resolving dns aliases: %s' % fqdns)
|
|
35
|
+
resolved = []
|
|
36
|
+
for fqdn in fqdns:
|
|
37
|
+
addrinfos = socket.getaddrinfo(fqdn, 0, socket.AF_INET, 0, socket.IPPROTO_TCP)
|
|
38
|
+
resolved.extend(ai[4][0] for ai in addrinfos)
|
|
39
|
+
logger(logging.DEBUG, 'dns aliases resolved to %s', resolved)
|
|
40
|
+
return resolved
|
|
118
41
|
|
|
119
42
|
|
|
120
43
|
class StompConnectionManager:
|
|
121
|
-
"""Stomp Connection Manager."""
|
|
122
|
-
|
|
123
|
-
_logger = formatted_logger(logging.log, 'StompConnectionManager %s')
|
|
124
|
-
|
|
125
|
-
def __init__(self,
|
|
126
|
-
config_section: str,
|
|
127
|
-
logger: "None | LoggerFunction" = None):
|
|
128
|
-
"""
|
|
129
|
-
Initialise.
|
|
130
|
-
|
|
131
|
-
Args:
|
|
132
|
-
config_section: The name of the config section for this manager to parse for configuration.
|
|
133
|
-
logger: logger to use. Defaults to logging.getLogger(__name__).getChild(__qualname__).
|
|
134
|
-
"""
|
|
135
|
-
if logger is not None:
|
|
136
|
-
self._logger = logger
|
|
137
|
-
self._config = self._parse_config(config_section)
|
|
138
|
-
self._listener_factory = None
|
|
139
|
-
self._conns = []
|
|
140
|
-
for broker in self._config.brokers:
|
|
141
|
-
conn = Connection(host_and_ports=[broker],
|
|
142
|
-
vhost=self._config.vhost,
|
|
143
|
-
reconnect_attempts_max=self._config.reconnect_attempts_max,
|
|
144
|
-
timeout=self._config.timeout,
|
|
145
|
-
heartbeats=self._config.heartbeats)
|
|
146
|
-
if self._config.use_ssl:
|
|
147
|
-
conn.set_ssl(cert_file=self._config.cert_file, key_file=self._config.key_file)
|
|
148
|
-
self._conns.append(conn)
|
|
149
|
-
|
|
150
|
-
@property
|
|
151
|
-
def config(self) -> StompConfig:
|
|
152
|
-
"""
|
|
153
|
-
Get the config.
|
|
154
|
-
|
|
155
|
-
Returns:
|
|
156
|
-
config object.
|
|
157
|
-
"""
|
|
158
|
-
return deepcopy(self._config)
|
|
159
|
-
|
|
160
|
-
def set_listener_factory(self, name: str, listener_cls: type, **kwargs) -> None:
|
|
161
|
-
"""
|
|
162
|
-
Setup listener factory
|
|
163
|
-
|
|
164
|
-
This method will setup a factory to create a name and listener for the arguments to
|
|
165
|
-
connection.set_listener based on pre-defined argument values.
|
|
166
44
|
|
|
167
|
-
|
|
168
|
-
|
|
169
|
-
|
|
170
|
-
|
|
171
|
-
|
|
172
|
-
|
|
173
|
-
self.
|
|
174
|
-
|
|
175
|
-
|
|
45
|
+
def __init__(self):
|
|
46
|
+
self._brokers = None
|
|
47
|
+
self._port = None
|
|
48
|
+
self._use_ssl = None
|
|
49
|
+
self._vhost = None
|
|
50
|
+
self._reconnect_attempts = None
|
|
51
|
+
self._ssl_key_file = None
|
|
52
|
+
self._timeout = None
|
|
53
|
+
self._heartbeats = None
|
|
176
54
|
|
|
177
|
-
|
|
178
|
-
"""
|
|
179
|
-
Parse config section.
|
|
55
|
+
self._connections = {}
|
|
180
56
|
|
|
181
|
-
|
|
182
|
-
|
|
57
|
+
def is_stalled(self, connection: Connection, *, logger: "LoggerFunction" = logging.log):
|
|
58
|
+
if not connection.is_connected():
|
|
59
|
+
return True
|
|
183
60
|
|
|
184
|
-
|
|
185
|
-
|
|
61
|
+
if self._heartbeats and getattr(connection, 'received_heartbeat') and connection.received_heartbeat:
|
|
62
|
+
heartbeat_period_seconds = max(0, self._heartbeats[0], self._heartbeats[1]) / 1000
|
|
186
63
|
|
|
187
|
-
|
|
188
|
-
|
|
189
|
-
"""
|
|
190
|
-
try:
|
|
191
|
-
brokers = config_get(config_section, 'brokers')
|
|
192
|
-
except Exception as exc:
|
|
193
|
-
self._logger(logging.ERROR, "Could not load brokers from configuration")
|
|
194
|
-
raise RuntimeError('Could not load brokers from configuration') from exc
|
|
64
|
+
if not heartbeat_period_seconds:
|
|
65
|
+
return False
|
|
195
66
|
|
|
196
|
-
|
|
197
|
-
|
|
198
|
-
|
|
199
|
-
|
|
200
|
-
raise RuntimeError('could not find use_ssl in configuration -- please update your rucio.cfg') from exc
|
|
67
|
+
now = monotonic()
|
|
68
|
+
if connection.received_heartbeat + 10 * heartbeat_period_seconds < now:
|
|
69
|
+
logger(logging.WARNING, "Stomp connection missed heartbeats for a long time")
|
|
70
|
+
return True
|
|
201
71
|
|
|
202
|
-
|
|
203
|
-
vhost = config_get(config_section, 'broker_virtual_host', raise_exception=False)
|
|
204
|
-
destination = config_get(config_section, "destination")
|
|
205
|
-
key_file = config_get(config_section, 'ssl_key_file', default=None, raise_exception=False)
|
|
206
|
-
cert_file = config_get(config_section, 'ssl_cert_file', default=None, raise_exception=False)
|
|
207
|
-
username = config_get(config_section, 'username', default=None, raise_exception=False)
|
|
208
|
-
password = config_get(config_section, 'password', default=None, raise_exception=False)
|
|
209
|
-
nonssl_port = config_get_int(config_section, 'nonssl_port', default=0, raise_exception=False)
|
|
210
|
-
timeout = config_get_float(config_section, 'timeout', default=None, raise_exception=False)
|
|
211
|
-
heartbeats = tuple(config_get_list(config_section, 'heartbeats', default=[0., 0.], raise_exception=False))
|
|
212
|
-
reconnect_attempts = config_get_int(config_section, 'reconnect_attempts', default=100)
|
|
213
|
-
if use_ssl and (key_file is None or cert_file is None):
|
|
214
|
-
self._logger(logging.ERROR, "If use_ssl is True in config you must provide both 'ssl_cert_file' "
|
|
215
|
-
"and 'ssl_key_file'")
|
|
216
|
-
raise RuntimeError("If use_ssl is True in config you must provide both 'ssl_cert_file' and 'ssl_key_file'")
|
|
217
|
-
if not use_ssl and (username is None or password is None or nonssl_port == 0):
|
|
218
|
-
self._logger(logging.ERROR, "If use_ssl is False in config you must provide "
|
|
219
|
-
"'username', 'password' and 'nonssl_port'")
|
|
220
|
-
raise RuntimeError("If use_ssl is False in config you must provide "
|
|
221
|
-
"'username', 'password' and 'nonssl_port'")
|
|
222
|
-
return StompConfig(brokers=self._resolve_host_and_port(brokers, port if use_ssl else nonssl_port),
|
|
223
|
-
use_ssl=use_ssl,
|
|
224
|
-
port=port, vhost=vhost,
|
|
225
|
-
destination=destination, key_file=key_file, cert_file=cert_file,
|
|
226
|
-
username=username, password=password, nonssl_port=nonssl_port,
|
|
227
|
-
reconnect_attempts_max=reconnect_attempts, timeout=timeout, heartbeats=heartbeats)
|
|
72
|
+
return False
|
|
228
73
|
|
|
229
|
-
def
|
|
230
|
-
|
|
231
|
-
Resolve host and port.
|
|
232
|
-
|
|
233
|
-
Args:
|
|
234
|
-
fqdns: fully qualified domain name(s)
|
|
235
|
-
port: port
|
|
236
|
-
|
|
237
|
-
Returns:
|
|
238
|
-
list of (host, port) tuples.
|
|
239
|
-
"""
|
|
240
|
-
if isinstance(fqdns, str):
|
|
241
|
-
fqdns = fqdns.split(',')
|
|
242
|
-
|
|
243
|
-
hosts_and_ports = []
|
|
244
|
-
for fqdn in fqdns:
|
|
245
|
-
try:
|
|
246
|
-
addrinfos = socket.getaddrinfo(fqdn.strip(), port, socket.AF_INET, 0, socket.IPPROTO_TCP)
|
|
247
|
-
except socket.gaierror as exc:
|
|
248
|
-
self._logger(logging.ERROR, "[broker] Cannot resolve domain name %s (%s)", fqdn.strip(), str(exc))
|
|
249
|
-
continue
|
|
250
|
-
|
|
251
|
-
hosts_and_ports.extend(addrinfo[4] for addrinfo in addrinfos)
|
|
252
|
-
if not hosts_and_ports:
|
|
253
|
-
self._logger(logging.WARNING, "[broker] No resolved brokers")
|
|
254
|
-
return hosts_and_ports
|
|
255
|
-
|
|
256
|
-
def _is_stalled(self, conn: Connection) -> bool:
|
|
257
|
-
"""
|
|
258
|
-
Determine if a connection is stalled.
|
|
259
|
-
|
|
260
|
-
Args:
|
|
261
|
-
conn: The Connection object
|
|
262
|
-
|
|
263
|
-
Returns:
|
|
264
|
-
Whether the connection has stalled.
|
|
265
|
-
"""
|
|
266
|
-
received_heartbeat = getattr(conn, 'received_heartbeat', None)
|
|
267
|
-
if received_heartbeat is None or not any(self._config.heartbeats):
|
|
268
|
-
return False
|
|
269
|
-
|
|
270
|
-
heartbeat_period_seconds = max(0, self._config.heartbeats[0], self._config.heartbeats[1]) / 1000
|
|
271
|
-
if heartbeat_period_seconds == 0.:
|
|
272
|
-
return False
|
|
273
|
-
|
|
274
|
-
now = monotonic()
|
|
275
|
-
if received_heartbeat + 10 * heartbeat_period_seconds >= now:
|
|
276
|
-
return False
|
|
277
|
-
|
|
278
|
-
return True
|
|
279
|
-
|
|
280
|
-
def connect(self) -> "Iterator[Connection]":
|
|
281
|
-
"""
|
|
282
|
-
Connect.
|
|
283
|
-
|
|
284
|
-
Yields:
|
|
285
|
-
Each connection object after ensuring it's connected.
|
|
286
|
-
"""
|
|
287
|
-
config = self._config
|
|
288
|
-
params = {'wait': True, "heartbeats": self._config.heartbeats}
|
|
289
|
-
self._logger(logging.WARNING, 'heartbeats: %s', self._config.heartbeats)
|
|
290
|
-
if not config.use_ssl:
|
|
291
|
-
params.update(username=config.username, password=config.password)
|
|
292
|
-
|
|
293
|
-
for conn in self._conns:
|
|
294
|
-
if self._is_stalled(conn):
|
|
295
|
-
try:
|
|
296
|
-
conn.disconnect()
|
|
297
|
-
except Exception:
|
|
298
|
-
self._logger(logging.ERROR, "[broker] Stalled connection could not be disconnected")
|
|
74
|
+
def disconnect(self):
|
|
75
|
+
for conn in self._connections.values():
|
|
299
76
|
if not conn.is_connected():
|
|
300
|
-
|
|
301
|
-
METRICS.counter('reconnect.{host}').labels(host=conn.brokers[0][0]).inc()
|
|
302
|
-
if self._listener_factory is not None:
|
|
303
|
-
conn.set_listener(*self._listener_factory(conn=conn))
|
|
304
|
-
|
|
305
|
-
try:
|
|
306
|
-
conn.connect(**params)
|
|
307
|
-
except ConnectFailedException as error:
|
|
308
|
-
self._logger(logging.WARNING, "[broker] Could not deliver message due to "
|
|
309
|
-
"ConnectFailedException: %s", str(error))
|
|
310
|
-
continue
|
|
311
|
-
except Exception as error:
|
|
312
|
-
self._logger(logging.ERROR, "[broker] Could not connect: %s", str(error))
|
|
313
|
-
continue
|
|
314
|
-
try:
|
|
315
|
-
yield conn
|
|
316
|
-
except Exception:
|
|
317
|
-
self._logger(logging.ERROR, "[broker] Error in yielded code, skipping to next connection.")
|
|
318
|
-
|
|
319
|
-
def deliver_messages(self, messages: "Iterable[dict[str, Any]]") -> list[int]:
|
|
320
|
-
"""
|
|
321
|
-
Deliver messages.
|
|
322
|
-
|
|
323
|
-
Args:
|
|
324
|
-
messages: Messages to deliver.
|
|
325
|
-
|
|
326
|
-
Returns:
|
|
327
|
-
delivered message ids, ready for deletion.
|
|
328
|
-
"""
|
|
329
|
-
config = self._config
|
|
330
|
-
conn = random.sample(list(self.connect()), 1)[0]
|
|
331
|
-
to_delete = []
|
|
332
|
-
for message in messages:
|
|
333
|
-
try:
|
|
334
|
-
body = json.dumps({"event_type": str(message["event_type"]).lower(),
|
|
335
|
-
"payload": message["payload"],
|
|
336
|
-
"created_at": str(message["created_at"])})
|
|
337
|
-
except ValueError:
|
|
338
|
-
self._logger(logging.ERROR, "[broker] Cannot serialize payload to JSON: %s", str(message["payload"]))
|
|
339
|
-
to_delete.append(message["id"])
|
|
340
|
-
continue
|
|
341
|
-
|
|
342
|
-
try:
|
|
343
|
-
conn.send(
|
|
344
|
-
body=body,
|
|
345
|
-
destination=config.destination,
|
|
346
|
-
headers={"persistent": "true",
|
|
347
|
-
"event_type": str(message["event_type"]).lower()}
|
|
348
|
-
)
|
|
349
|
-
to_delete.append(message["id"])
|
|
350
|
-
except NotConnectedException as error:
|
|
351
|
-
self._logger(logging.WARNING, "[broker] Could not deliver message due to NotConnectedException: %s",
|
|
352
|
-
str(error))
|
|
353
|
-
continue
|
|
354
|
-
except Exception as error:
|
|
355
|
-
self._logger(logging.ERROR, "[broker] Could not deliver message: %s", str(error))
|
|
356
|
-
continue
|
|
357
|
-
|
|
358
|
-
msg_event_type = str(message["event_type"]).lower()
|
|
359
|
-
msg_payload = message.get("payload", {})
|
|
360
|
-
if msg_event_type.startswith("transfer") or msg_event_type.startswith("stagein"):
|
|
361
|
-
self._logger(logging.DEBUG,
|
|
362
|
-
"[broker] - event_type: %s, scope: %s, name: %s, rse: %s, request-id: %s, "
|
|
363
|
-
"transfer-id: %s, created_at: %s",
|
|
364
|
-
msg_event_type,
|
|
365
|
-
msg_payload.get("scope", None),
|
|
366
|
-
msg_payload.get("name", None),
|
|
367
|
-
msg_payload.get("dst-rse", None),
|
|
368
|
-
msg_payload.get("request-id", None),
|
|
369
|
-
msg_payload.get("transfer-id", None),
|
|
370
|
-
str(message["created_at"]))
|
|
371
|
-
|
|
372
|
-
elif msg_event_type.startswith("dataset"):
|
|
373
|
-
self._logger(logging.DEBUG,
|
|
374
|
-
"[broker] - event_type: %s, scope: %s, name: %s, rse: %s, rule-id: %s, created_at: %s)",
|
|
375
|
-
msg_event_type,
|
|
376
|
-
msg_payload.get("scope", None),
|
|
377
|
-
msg_payload.get("name", None),
|
|
378
|
-
msg_payload.get("rse", None),
|
|
379
|
-
msg_payload.get("rule_id", None),
|
|
380
|
-
str(message["created_at"]))
|
|
381
|
-
|
|
382
|
-
elif msg_event_type.startswith("deletion"):
|
|
383
|
-
if "url" not in msg_payload:
|
|
384
|
-
msg_payload["url"] = "unknown"
|
|
385
|
-
self._logger(logging.DEBUG,
|
|
386
|
-
"[broker] - event_type: %s, scope: %s, name: %s, rse: %s, url: %s, created_at: %s)",
|
|
387
|
-
msg_event_type,
|
|
388
|
-
msg_payload.get("scope", None),
|
|
389
|
-
msg_payload.get("name", None),
|
|
390
|
-
msg_payload.get("rse", None),
|
|
391
|
-
msg_payload.get("url", None),
|
|
392
|
-
str(message["created_at"]))
|
|
393
|
-
else:
|
|
394
|
-
self._logger(logging.DEBUG, "[broker] Other message: %s", message)
|
|
395
|
-
|
|
396
|
-
return to_delete
|
|
397
|
-
|
|
398
|
-
def subscribe(self, id_: str, ack: str, destination: "None | str" = None, **kwargs) -> None:
|
|
399
|
-
"""
|
|
400
|
-
Subscribe
|
|
401
|
-
|
|
402
|
-
Args:
|
|
403
|
-
id_: The identifier to uniquely identify the subscription
|
|
404
|
-
ack: Either auto, client or client-individual
|
|
405
|
-
destination: The topic or queue to subscribe to. If None then
|
|
406
|
-
destination is taken from the rucio config Defaults to None.
|
|
407
|
-
|
|
408
|
-
Kwargs:
|
|
409
|
-
Arguments to pass to the Construction objects subscribe method.
|
|
410
|
-
"""
|
|
411
|
-
if destination is None:
|
|
412
|
-
destination = self._config.destination
|
|
413
|
-
for conn in self.connect():
|
|
414
|
-
conn.subscribe(destination=destination,
|
|
415
|
-
id=id_, ack=ack, **kwargs)
|
|
77
|
+
conn.disconnect()
|
|
416
78
|
|
|
417
|
-
def
|
|
418
|
-
|
|
419
|
-
|
|
420
|
-
|
|
79
|
+
def re_configure(
|
|
80
|
+
self,
|
|
81
|
+
brokers: "Sequence[str]",
|
|
82
|
+
port: int,
|
|
83
|
+
use_ssl: bool,
|
|
84
|
+
vhost,
|
|
85
|
+
reconnect_attempts: int,
|
|
86
|
+
ssl_key_file,
|
|
87
|
+
ssl_cert_file,
|
|
88
|
+
timeout,
|
|
89
|
+
heartbeats=(0, 1000),
|
|
90
|
+
*,
|
|
91
|
+
logger: "LoggerFunction" = logging.log
|
|
92
|
+
) -> tuple[list, list]:
|
|
93
|
+
|
|
94
|
+
configuration_changed = any([
|
|
95
|
+
self._brokers != brokers,
|
|
96
|
+
self._port != port,
|
|
97
|
+
self._use_ssl != use_ssl,
|
|
98
|
+
self._vhost != vhost,
|
|
99
|
+
self._reconnect_attempts != reconnect_attempts,
|
|
100
|
+
self._ssl_key_file != ssl_key_file,
|
|
101
|
+
self._timeout != timeout,
|
|
102
|
+
self._heartbeats != heartbeats,
|
|
103
|
+
])
|
|
104
|
+
if configuration_changed:
|
|
105
|
+
self._brokers = brokers
|
|
106
|
+
self._port = port
|
|
107
|
+
self._use_ssl = use_ssl
|
|
108
|
+
self._vhost = vhost
|
|
109
|
+
self._reconnect_attempts = reconnect_attempts
|
|
110
|
+
self._ssl_key_file = ssl_key_file
|
|
111
|
+
self._timeout = timeout
|
|
112
|
+
self._heartbeats = heartbeats
|
|
113
|
+
|
|
114
|
+
current_remotes = set(self._connections)
|
|
115
|
+
desired_remotes = set((ip, port) for ip in resolve_ips(brokers, logger=logger))
|
|
116
|
+
|
|
117
|
+
if configuration_changed:
|
|
118
|
+
# Re-create all connections
|
|
119
|
+
to_delete = current_remotes
|
|
120
|
+
to_create = desired_remotes
|
|
121
|
+
else:
|
|
122
|
+
to_delete = current_remotes.difference(desired_remotes)
|
|
123
|
+
to_create = desired_remotes.difference(current_remotes)
|
|
124
|
+
|
|
125
|
+
for remote in current_remotes.intersection(desired_remotes):
|
|
126
|
+
conn = self._connections[remote]
|
|
127
|
+
|
|
128
|
+
if self.is_stalled(conn, logger=logger):
|
|
129
|
+
# Re-create stalled connections
|
|
130
|
+
to_delete.add(remote)
|
|
131
|
+
to_create.add(remote)
|
|
132
|
+
|
|
133
|
+
deleted_conns = []
|
|
134
|
+
for remote in to_delete:
|
|
135
|
+
conn = self._connections.pop(remote)
|
|
136
|
+
if conn.is_connected():
|
|
421
137
|
conn.disconnect()
|
|
422
|
-
|
|
423
|
-
|
|
138
|
+
deleted_conns.append(to_delete)
|
|
139
|
+
|
|
140
|
+
created_conns = []
|
|
141
|
+
for remote in to_create:
|
|
142
|
+
conn = Connection(
|
|
143
|
+
host_and_ports=[remote],
|
|
144
|
+
vhost=vhost,
|
|
145
|
+
timeout=timeout,
|
|
146
|
+
heartbeats=heartbeats,
|
|
147
|
+
reconnect_attempts_max=reconnect_attempts
|
|
148
|
+
)
|
|
149
|
+
if use_ssl:
|
|
150
|
+
conn.set_ssl(key_file=ssl_key_file, cert_file=ssl_cert_file)
|
|
151
|
+
self._connections[remote] = conn
|
|
152
|
+
created_conns.append(conn)
|
|
153
|
+
|
|
154
|
+
if not to_delete and not to_create:
|
|
155
|
+
logger(logging.INFO, "Stomp connections didn't change")
|
|
156
|
+
else:
|
|
157
|
+
logger(logging.INFO, f"Stomp connections refreshed. Deleted: {list(to_delete)}. Added: {list(to_create)}")
|
|
158
|
+
|
|
159
|
+
return created_conns, deleted_conns
|