rucio 37.7.1__py3-none-any.whl → 38.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of rucio might be problematic. Click here for more details.
- rucio/alembicrevision.py +1 -1
- rucio/cli/bin_legacy/rucio.py +51 -107
- rucio/cli/bin_legacy/rucio_admin.py +26 -26
- rucio/cli/command.py +1 -0
- rucio/cli/did.py +2 -2
- rucio/cli/opendata.py +132 -0
- rucio/cli/replica.py +15 -5
- rucio/cli/rule.py +7 -2
- rucio/cli/scope.py +3 -2
- rucio/cli/utils.py +28 -4
- rucio/client/baseclient.py +9 -1
- rucio/client/client.py +2 -0
- rucio/client/diracclient.py +73 -12
- rucio/client/opendataclient.py +249 -0
- rucio/client/subscriptionclient.py +30 -0
- rucio/client/uploadclient.py +10 -13
- rucio/common/constants.py +4 -1
- rucio/common/exception.py +55 -0
- rucio/common/plugins.py +45 -8
- rucio/common/schema/generic.py +5 -3
- rucio/common/schema/generic_multi_vo.py +4 -2
- rucio/common/types.py +8 -7
- rucio/common/utils.py +176 -11
- rucio/core/dirac.py +5 -5
- rucio/core/opendata.py +744 -0
- rucio/core/rule.py +63 -8
- rucio/core/transfer.py +1 -1
- rucio/daemons/common.py +1 -1
- rucio/daemons/conveyor/finisher.py +2 -2
- rucio/daemons/conveyor/poller.py +2 -2
- rucio/daemons/conveyor/preparer.py +1 -1
- rucio/daemons/conveyor/submitter.py +2 -2
- rucio/daemons/conveyor/throttler.py +1 -1
- rucio/db/sqla/constants.py +6 -0
- rucio/db/sqla/migrate_repo/versions/a62db546a1f1_opendata_initial_model.py +85 -0
- rucio/db/sqla/models.py +69 -0
- rucio/db/sqla/session.py +8 -1
- rucio/db/sqla/util.py +2 -2
- rucio/gateway/dirac.py +1 -1
- rucio/gateway/opendata.py +190 -0
- rucio/gateway/subscription.py +5 -3
- rucio/rse/protocols/protocol.py +9 -5
- rucio/rse/translation.py +17 -6
- rucio/tests/common.py +64 -12
- rucio/transfertool/fts3.py +1 -0
- rucio/transfertool/fts3_plugins.py +6 -1
- rucio/vcsversion.py +4 -4
- rucio/web/rest/flaskapi/v1/auth.py +11 -2
- rucio/web/rest/flaskapi/v1/common.py +34 -14
- rucio/web/rest/flaskapi/v1/config.py +1 -1
- rucio/web/rest/flaskapi/v1/dids.py +447 -160
- rucio/web/rest/flaskapi/v1/heartbeats.py +1 -1
- rucio/web/rest/flaskapi/v1/identities.py +1 -1
- rucio/web/rest/flaskapi/v1/lifetime_exceptions.py +1 -1
- rucio/web/rest/flaskapi/v1/locks.py +1 -1
- rucio/web/rest/flaskapi/v1/main.py +3 -7
- rucio/web/rest/flaskapi/v1/meta_conventions.py +1 -16
- rucio/web/rest/flaskapi/v1/nongrid_traces.py +1 -1
- rucio/web/rest/flaskapi/v1/opendata.py +391 -0
- rucio/web/rest/flaskapi/v1/opendata_public.py +146 -0
- rucio/web/rest/flaskapi/v1/requests.py +1 -1
- rucio/web/rest/flaskapi/v1/rses.py +1 -1
- rucio/web/rest/flaskapi/v1/rules.py +1 -1
- rucio/web/rest/flaskapi/v1/scopes.py +1 -1
- rucio/web/rest/flaskapi/v1/subscriptions.py +6 -9
- rucio/web/rest/flaskapi/v1/traces.py +1 -1
- rucio/web/rest/flaskapi/v1/vos.py +1 -1
- {rucio-37.7.1.data → rucio-38.0.0.data}/data/rucio/etc/alembic.ini.template +1 -1
- {rucio-37.7.1.data → rucio-38.0.0.data}/data/rucio/etc/alembic_offline.ini.template +1 -1
- {rucio-37.7.1.data → rucio-38.0.0.data}/data/rucio/etc/rucio.cfg.template +2 -2
- {rucio-37.7.1.data → rucio-38.0.0.data}/data/rucio/etc/rucio_multi_vo.cfg.template +3 -3
- {rucio-37.7.1.data → rucio-38.0.0.data}/data/rucio/requirements.server.txt +6 -3
- rucio-38.0.0.data/data/rucio/tools/reset_database.py +87 -0
- {rucio-37.7.1.data → rucio-38.0.0.data}/scripts/rucio +2 -1
- {rucio-37.7.1.dist-info → rucio-38.0.0.dist-info}/METADATA +37 -36
- {rucio-37.7.1.dist-info → rucio-38.0.0.dist-info}/RECORD +128 -122
- {rucio-37.7.1.dist-info → rucio-38.0.0.dist-info}/licenses/AUTHORS.rst +1 -0
- rucio/client/fileclient.py +0 -57
- rucio-37.7.1.data/data/rucio/tools/reset_database.py +0 -40
- {rucio-37.7.1.data → rucio-38.0.0.data}/data/rucio/etc/globus-config.yml.template +0 -0
- {rucio-37.7.1.data → rucio-38.0.0.data}/data/rucio/etc/ldap.cfg.template +0 -0
- {rucio-37.7.1.data → rucio-38.0.0.data}/data/rucio/etc/mail_templates/rule_approval_request.tmpl +0 -0
- {rucio-37.7.1.data → rucio-38.0.0.data}/data/rucio/etc/mail_templates/rule_approved_admin.tmpl +0 -0
- {rucio-37.7.1.data → rucio-38.0.0.data}/data/rucio/etc/mail_templates/rule_approved_user.tmpl +0 -0
- {rucio-37.7.1.data → rucio-38.0.0.data}/data/rucio/etc/mail_templates/rule_denied_admin.tmpl +0 -0
- {rucio-37.7.1.data → rucio-38.0.0.data}/data/rucio/etc/mail_templates/rule_denied_user.tmpl +0 -0
- {rucio-37.7.1.data → rucio-38.0.0.data}/data/rucio/etc/mail_templates/rule_ok_notification.tmpl +0 -0
- {rucio-37.7.1.data → rucio-38.0.0.data}/data/rucio/etc/rse-accounts.cfg.template +0 -0
- {rucio-37.7.1.data → rucio-38.0.0.data}/data/rucio/etc/rucio.cfg.atlas.client.template +0 -0
- {rucio-37.7.1.data → rucio-38.0.0.data}/data/rucio/tools/bootstrap.py +0 -0
- {rucio-37.7.1.data → rucio-38.0.0.data}/data/rucio/tools/merge_rucio_configs.py +0 -0
- {rucio-37.7.1.data → rucio-38.0.0.data}/scripts/rucio-abacus-account +0 -0
- {rucio-37.7.1.data → rucio-38.0.0.data}/scripts/rucio-abacus-collection-replica +0 -0
- {rucio-37.7.1.data → rucio-38.0.0.data}/scripts/rucio-abacus-rse +0 -0
- {rucio-37.7.1.data → rucio-38.0.0.data}/scripts/rucio-admin +0 -0
- {rucio-37.7.1.data → rucio-38.0.0.data}/scripts/rucio-atropos +0 -0
- {rucio-37.7.1.data → rucio-38.0.0.data}/scripts/rucio-auditor +0 -0
- {rucio-37.7.1.data → rucio-38.0.0.data}/scripts/rucio-automatix +0 -0
- {rucio-37.7.1.data → rucio-38.0.0.data}/scripts/rucio-bb8 +0 -0
- {rucio-37.7.1.data → rucio-38.0.0.data}/scripts/rucio-cache-client +0 -0
- {rucio-37.7.1.data → rucio-38.0.0.data}/scripts/rucio-cache-consumer +0 -0
- {rucio-37.7.1.data → rucio-38.0.0.data}/scripts/rucio-conveyor-finisher +0 -0
- {rucio-37.7.1.data → rucio-38.0.0.data}/scripts/rucio-conveyor-poller +0 -0
- {rucio-37.7.1.data → rucio-38.0.0.data}/scripts/rucio-conveyor-preparer +0 -0
- {rucio-37.7.1.data → rucio-38.0.0.data}/scripts/rucio-conveyor-receiver +0 -0
- {rucio-37.7.1.data → rucio-38.0.0.data}/scripts/rucio-conveyor-stager +0 -0
- {rucio-37.7.1.data → rucio-38.0.0.data}/scripts/rucio-conveyor-submitter +0 -0
- {rucio-37.7.1.data → rucio-38.0.0.data}/scripts/rucio-conveyor-throttler +0 -0
- {rucio-37.7.1.data → rucio-38.0.0.data}/scripts/rucio-dark-reaper +0 -0
- {rucio-37.7.1.data → rucio-38.0.0.data}/scripts/rucio-dumper +0 -0
- {rucio-37.7.1.data → rucio-38.0.0.data}/scripts/rucio-follower +0 -0
- {rucio-37.7.1.data → rucio-38.0.0.data}/scripts/rucio-hermes +0 -0
- {rucio-37.7.1.data → rucio-38.0.0.data}/scripts/rucio-judge-cleaner +0 -0
- {rucio-37.7.1.data → rucio-38.0.0.data}/scripts/rucio-judge-evaluator +0 -0
- {rucio-37.7.1.data → rucio-38.0.0.data}/scripts/rucio-judge-injector +0 -0
- {rucio-37.7.1.data → rucio-38.0.0.data}/scripts/rucio-judge-repairer +0 -0
- {rucio-37.7.1.data → rucio-38.0.0.data}/scripts/rucio-kronos +0 -0
- {rucio-37.7.1.data → rucio-38.0.0.data}/scripts/rucio-minos +0 -0
- {rucio-37.7.1.data → rucio-38.0.0.data}/scripts/rucio-minos-temporary-expiration +0 -0
- {rucio-37.7.1.data → rucio-38.0.0.data}/scripts/rucio-necromancer +0 -0
- {rucio-37.7.1.data → rucio-38.0.0.data}/scripts/rucio-oauth-manager +0 -0
- {rucio-37.7.1.data → rucio-38.0.0.data}/scripts/rucio-reaper +0 -0
- {rucio-37.7.1.data → rucio-38.0.0.data}/scripts/rucio-replica-recoverer +0 -0
- {rucio-37.7.1.data → rucio-38.0.0.data}/scripts/rucio-rse-decommissioner +0 -0
- {rucio-37.7.1.data → rucio-38.0.0.data}/scripts/rucio-storage-consistency-actions +0 -0
- {rucio-37.7.1.data → rucio-38.0.0.data}/scripts/rucio-transmogrifier +0 -0
- {rucio-37.7.1.data → rucio-38.0.0.data}/scripts/rucio-undertaker +0 -0
- {rucio-37.7.1.dist-info → rucio-38.0.0.dist-info}/WHEEL +0 -0
- {rucio-37.7.1.dist-info → rucio-38.0.0.dist-info}/licenses/LICENSE +0 -0
- {rucio-37.7.1.dist-info → rucio-38.0.0.dist-info}/top_level.txt +0 -0
rucio/core/rule.py
CHANGED
|
@@ -100,21 +100,22 @@ class AutoApprove(PolicyPackageAlgorithms):
|
|
|
100
100
|
|
|
101
101
|
_algorithm_type = 'auto_approve'
|
|
102
102
|
|
|
103
|
-
def __init__(self, rule: models.ReplicationRule, did: models.DataIdentifier, session: 'Session') -> None:
|
|
103
|
+
def __init__(self, rule: models.ReplicationRule, did: models.DataIdentifier, session: 'Session', vo: str = DEFAULT_VO) -> None:
|
|
104
104
|
super().__init__()
|
|
105
105
|
self.rule = rule
|
|
106
106
|
self.did = did
|
|
107
107
|
self.session = session
|
|
108
|
+
self.vo = vo
|
|
108
109
|
self.register("default", self.default)
|
|
109
110
|
|
|
110
111
|
def evaluate(self) -> bool:
|
|
111
112
|
"""
|
|
112
113
|
Evaluate the auto-approve algorithm
|
|
113
114
|
"""
|
|
114
|
-
return self.get_configured_algorithm()(self.rule, self.did, self.session)
|
|
115
|
+
return self.get_configured_algorithm(self.vo)(self.rule, self.did, self.session)
|
|
115
116
|
|
|
116
117
|
@classmethod
|
|
117
|
-
def get_configured_algorithm(cls: type[AutoApproveT]) -> "Callable[[models.ReplicationRule, models.DataIdentifier, Session], bool]":
|
|
118
|
+
def get_configured_algorithm(cls: type[AutoApproveT], vo: str) -> "Callable[[models.ReplicationRule, models.DataIdentifier, Session], bool]":
|
|
118
119
|
"""
|
|
119
120
|
Get the configured auto-approve algorithm
|
|
120
121
|
"""
|
|
@@ -123,7 +124,12 @@ class AutoApprove(PolicyPackageAlgorithms):
|
|
|
123
124
|
except (NoOptionError, NoSectionError, RuntimeError):
|
|
124
125
|
configured_algorithm = 'default'
|
|
125
126
|
|
|
126
|
-
|
|
127
|
+
result = None
|
|
128
|
+
if configured_algorithm == 'default':
|
|
129
|
+
result = super()._get_default_algorithm(cls._algorithm_type, vo)
|
|
130
|
+
if result is None:
|
|
131
|
+
result = super()._get_one_algorithm(cls._algorithm_type, configured_algorithm)
|
|
132
|
+
return result
|
|
127
133
|
|
|
128
134
|
@classmethod
|
|
129
135
|
def register(cls: type[AutoApproveT], name: str, fn_auto_approve: "Callable[[models.ReplicationRule, models.DataIdentifier, Session], bool]") -> None:
|
|
@@ -390,7 +396,7 @@ def add_rule(
|
|
|
390
396
|
if ask_approval:
|
|
391
397
|
new_rule.state = RuleState.WAITING_APPROVAL
|
|
392
398
|
# Use the new rule as the argument here
|
|
393
|
-
auto_approver = AutoApprove(new_rule, did, session=session)
|
|
399
|
+
auto_approver = AutoApprove(new_rule, did, session=session, vo=account.vo)
|
|
394
400
|
if auto_approver.evaluate():
|
|
395
401
|
logger(logging.DEBUG, "Auto approving rule %s", str(new_rule.id))
|
|
396
402
|
logger(logging.DEBUG, "Created rule %s for injection", str(new_rule.id))
|
|
@@ -1261,6 +1267,7 @@ def repair_rule(
|
|
|
1261
1267
|
# created.
|
|
1262
1268
|
# (C) Transfers fail and mark locks (and the rule) as STUCK. All STUCK locks have to be repaired.
|
|
1263
1269
|
# (D) Files are declared as BAD.
|
|
1270
|
+
# (E) Stuck locks are found on RSEs that do not belong to the target RSEs.
|
|
1264
1271
|
|
|
1265
1272
|
# start_time = time.time()
|
|
1266
1273
|
try:
|
|
@@ -1314,6 +1321,43 @@ def repair_rule(
|
|
|
1314
1321
|
logger(logging.DEBUG, '%s while repairing rule %s', str(error), rule_id)
|
|
1315
1322
|
return
|
|
1316
1323
|
|
|
1324
|
+
# Get all stuck locks for this rule ID
|
|
1325
|
+
stmt = select(
|
|
1326
|
+
models.ReplicaLock.rse_id,
|
|
1327
|
+
func.count().label('lock_count')
|
|
1328
|
+
).where(
|
|
1329
|
+
and_(models.ReplicaLock.rule_id == rule.id,
|
|
1330
|
+
models.ReplicaLock.state == LockState.STUCK)
|
|
1331
|
+
).group_by(
|
|
1332
|
+
models.ReplicaLock.rse_id
|
|
1333
|
+
)
|
|
1334
|
+
stuck_locks_by_rse = session.execute(stmt).all()
|
|
1335
|
+
|
|
1336
|
+
stuck_locks_on_nontarget_rses = []
|
|
1337
|
+
|
|
1338
|
+
# Check if any of the locks found are not on our target RSEs
|
|
1339
|
+
target_rse_ids = {rse['id'] for rse in target_rses}
|
|
1340
|
+
for stuck_lock in stuck_locks_by_rse:
|
|
1341
|
+
if stuck_lock.rse_id not in target_rse_ids:
|
|
1342
|
+
rse_name = get_rse_name(rse_id=stuck_lock.rse_id, session=session)
|
|
1343
|
+
stuck_locks_on_nontarget_rses.append({
|
|
1344
|
+
'rse_id': stuck_lock.rse_id,
|
|
1345
|
+
'rse_name': rse_name,
|
|
1346
|
+
'lock_count': stuck_lock.lock_count
|
|
1347
|
+
})
|
|
1348
|
+
|
|
1349
|
+
# Add to rule error if found
|
|
1350
|
+
if stuck_locks_on_nontarget_rses:
|
|
1351
|
+
error_msg = "Found stuck locks on RSEs not matching target expression: "
|
|
1352
|
+
error_msg += ", ".join([f"{rse['rse_name']} ({rse['lock_count']})" for rse in stuck_locks_on_nontarget_rses])
|
|
1353
|
+
|
|
1354
|
+
if rule.error:
|
|
1355
|
+
error_msg = rule.error + '|' + error_msg
|
|
1356
|
+
|
|
1357
|
+
rule.error = (error_msg[:245] + '...') if len(error_msg) > 245 else error_msg
|
|
1358
|
+
|
|
1359
|
+
logger(logging.WARNING, "Rule %s: %s", str(rule.id), error_msg)
|
|
1360
|
+
|
|
1317
1361
|
# Create the RSESelector
|
|
1318
1362
|
try:
|
|
1319
1363
|
rseselector = RSESelector(account=rule.account,
|
|
@@ -1324,7 +1368,12 @@ def repair_rule(
|
|
|
1324
1368
|
session=session)
|
|
1325
1369
|
except (InvalidRuleWeight, InsufficientTargetRSEs, InsufficientAccountLimit) as error:
|
|
1326
1370
|
rule.state = RuleState.STUCK
|
|
1327
|
-
|
|
1371
|
+
|
|
1372
|
+
error_msg = str(error)
|
|
1373
|
+
if rule.error:
|
|
1374
|
+
error_msg = rule.error + '|' + error_msg
|
|
1375
|
+
rule.error = (error_msg[:245] + '...') if len(error_msg) > 245 else error_msg
|
|
1376
|
+
|
|
1328
1377
|
rule.save(session=session)
|
|
1329
1378
|
# Insert rule history
|
|
1330
1379
|
insert_rule_history(rule=rule, recent=True, longterm=False, session=session)
|
|
@@ -1410,7 +1459,10 @@ def repair_rule(
|
|
|
1410
1459
|
session=session)
|
|
1411
1460
|
except (InsufficientAccountLimit, InsufficientTargetRSEs) as error:
|
|
1412
1461
|
rule.state = RuleState.STUCK
|
|
1413
|
-
|
|
1462
|
+
error_msg = str(error)
|
|
1463
|
+
if rule.error:
|
|
1464
|
+
error_msg = rule.error + '|' + error_msg
|
|
1465
|
+
rule.error = (error_msg[:245] + '...') if len(error_msg) > 245 else error_msg
|
|
1414
1466
|
rule.save(session=session)
|
|
1415
1467
|
# Insert rule history
|
|
1416
1468
|
insert_rule_history(rule=rule, recent=True, longterm=False, session=session)
|
|
@@ -1450,7 +1502,10 @@ def repair_rule(
|
|
|
1450
1502
|
session=session)
|
|
1451
1503
|
except (InsufficientAccountLimit, InsufficientTargetRSEs) as error:
|
|
1452
1504
|
rule.state = RuleState.STUCK
|
|
1453
|
-
|
|
1505
|
+
error_msg = str(error)
|
|
1506
|
+
if rule.error:
|
|
1507
|
+
error_msg = rule.error + '|' + error_msg
|
|
1508
|
+
rule.error = (error_msg[:245] + '...') if len(error_msg) > 245 else error_msg
|
|
1454
1509
|
rule.save(session=session)
|
|
1455
1510
|
# Insert rule history
|
|
1456
1511
|
insert_rule_history(rule=rule, recent=True, longterm=False, session=session)
|
rucio/core/transfer.py
CHANGED
|
@@ -239,7 +239,7 @@ class DirectTransferImplementation(DirectTransfer):
|
|
|
239
239
|
# DQ2 path always starts with /, but prefix might not end with /
|
|
240
240
|
naming_convention = dst.rse.attributes.get(RseAttr.NAMING_CONVENTION, None)
|
|
241
241
|
if rws.scope.external is not None:
|
|
242
|
-
dest_path = construct_non_deterministic_pfn(dsn, rws.scope.external, rws.name, naming_convention)
|
|
242
|
+
dest_path = construct_non_deterministic_pfn(dsn, rws.scope.external, rws.name, naming_convention, rws.scope.vo)
|
|
243
243
|
if dst.rse.is_tape():
|
|
244
244
|
if rws.retry_count or rws.activity == 'Recovery':
|
|
245
245
|
dest_path = '%s_%i' % (dest_path, int(time.time()))
|
rucio/daemons/common.py
CHANGED
|
@@ -118,7 +118,7 @@ def _activity_looper(
|
|
|
118
118
|
sleep_time: int,
|
|
119
119
|
activities: Optional['Sequence[str]'],
|
|
120
120
|
heartbeat_handler: HeartbeatHandler,
|
|
121
|
-
) -> 'Generator[tuple[str, float], tuple[float, bool], None]':
|
|
121
|
+
) -> 'Generator[tuple[Optional[str], float], tuple[float, bool], None]':
|
|
122
122
|
"""
|
|
123
123
|
Generator which loops (either once, or indefinitely) over all activities while ensuring that `sleep_time`
|
|
124
124
|
passes between handling twice the same activity.
|
|
@@ -66,7 +66,7 @@ def _fetch_requests(
|
|
|
66
66
|
set_last_processed_by: bool,
|
|
67
67
|
cached_topology: Optional[ExpiringObjectCache],
|
|
68
68
|
heartbeat_handler: "HeartbeatHandler",
|
|
69
|
-
activity: str,
|
|
69
|
+
activity: Optional[str],
|
|
70
70
|
) -> tuple[bool, tuple[list[dict[str, Any]], Topology]]:
|
|
71
71
|
worker_number, total_workers, logger = heartbeat_handler.live()
|
|
72
72
|
|
|
@@ -184,7 +184,7 @@ def finisher(
|
|
|
184
184
|
)
|
|
185
185
|
def _db_producer(
|
|
186
186
|
*,
|
|
187
|
-
activity: str,
|
|
187
|
+
activity: Optional[str],
|
|
188
188
|
heartbeat_handler: "HeartbeatHandler"
|
|
189
189
|
) -> tuple[bool, tuple[list[dict[str, Any]], Topology]]:
|
|
190
190
|
return _fetch_requests(
|
rucio/daemons/conveyor/poller.py
CHANGED
|
@@ -66,7 +66,7 @@ def _fetch_requests(
|
|
|
66
66
|
transfertool: Optional[str],
|
|
67
67
|
filter_transfertool: Optional[str],
|
|
68
68
|
cached_topology: Optional[ExpiringObjectCache],
|
|
69
|
-
activity: str,
|
|
69
|
+
activity: Optional[str],
|
|
70
70
|
set_last_processed_by: bool,
|
|
71
71
|
heartbeat_handler: "HeartbeatHandler"
|
|
72
72
|
) -> tuple[bool, list[dict[str, Any]]]:
|
|
@@ -196,7 +196,7 @@ def poller(
|
|
|
196
196
|
)
|
|
197
197
|
def _db_producer(
|
|
198
198
|
*,
|
|
199
|
-
activity: str,
|
|
199
|
+
activity: Optional[str],
|
|
200
200
|
heartbeat_handler: "HeartbeatHandler"
|
|
201
201
|
) -> tuple[bool, list[dict[str, Any]]]:
|
|
202
202
|
return _fetch_requests(
|
|
@@ -100,7 +100,7 @@ def preparer(
|
|
|
100
100
|
sleep_time=sleep_time)
|
|
101
101
|
def _db_producer(
|
|
102
102
|
*,
|
|
103
|
-
activity: str,
|
|
103
|
+
activity: Optional[str],
|
|
104
104
|
heartbeat_handler: "HeartbeatHandler"
|
|
105
105
|
) -> tuple[bool, tuple[Topology, dict[str, RequestWithSources]]]:
|
|
106
106
|
return _fetch_requests(
|
|
@@ -55,7 +55,7 @@ TRANSFER_TYPE = config_get('conveyor', 'transfertype', False, 'single')
|
|
|
55
55
|
def _fetch_requests(
|
|
56
56
|
partition_hash_var: Optional[str],
|
|
57
57
|
bulk: int,
|
|
58
|
-
activity: str,
|
|
58
|
+
activity: Optional[str],
|
|
59
59
|
rse_ids: Optional[list[str]],
|
|
60
60
|
request_type: list[RequestType],
|
|
61
61
|
ignore_availability: bool,
|
|
@@ -285,7 +285,7 @@ def submitter(
|
|
|
285
285
|
activities=activities)
|
|
286
286
|
def _db_producer(
|
|
287
287
|
*,
|
|
288
|
-
activity: str,
|
|
288
|
+
activity: Optional[str],
|
|
289
289
|
heartbeat_handler: "HeartbeatHandler"
|
|
290
290
|
) -> tuple[bool, tuple[Topology, dict[str, RequestWithSources]]]:
|
|
291
291
|
return _fetch_requests(
|
|
@@ -102,7 +102,7 @@ def throttler(
|
|
|
102
102
|
sleep_time=sleep_time)
|
|
103
103
|
def _db_producer(
|
|
104
104
|
*,
|
|
105
|
-
activity: str,
|
|
105
|
+
activity: Optional[str],
|
|
106
106
|
heartbeat_handler: "HeartbeatHandler"
|
|
107
107
|
) -> tuple[bool, Optional["ReleaseGroupsDict"]]:
|
|
108
108
|
worker_number, total_workers, logger = heartbeat_handler.live()
|
rucio/db/sqla/constants.py
CHANGED
|
@@ -0,0 +1,85 @@
|
|
|
1
|
+
# Copyright European Organization for Nuclear Research (CERN) since 2012
|
|
2
|
+
#
|
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
4
|
+
# you may not use this file except in compliance with the License.
|
|
5
|
+
# You may obtain a copy of the License at
|
|
6
|
+
#
|
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
8
|
+
#
|
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
12
|
+
# See the License for the specific language governing permissions and
|
|
13
|
+
# limitations under the License.
|
|
14
|
+
|
|
15
|
+
"""Opendata initial model""" # noqa: D400, D415
|
|
16
|
+
|
|
17
|
+
import sqlalchemy as sa
|
|
18
|
+
from alembic import op
|
|
19
|
+
|
|
20
|
+
from rucio.common.schema import get_schema_value
|
|
21
|
+
from rucio.db.sqla.constants import OpenDataDIDState
|
|
22
|
+
from rucio.db.sqla.types import JSON
|
|
23
|
+
|
|
24
|
+
# Alembic revision identifiers
|
|
25
|
+
revision = 'a62db546a1f1'
|
|
26
|
+
down_revision = '30d5206e9cad'
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
def upgrade():
|
|
30
|
+
op.create_table(
|
|
31
|
+
'dids_opendata',
|
|
32
|
+
sa.Column('scope', sa.String(length=get_schema_value('SCOPE_LENGTH')), nullable=False),
|
|
33
|
+
sa.Column('name', sa.String(length=get_schema_value('NAME_LENGTH')), nullable=False),
|
|
34
|
+
sa.Column('state', sa.Enum(OpenDataDIDState, name='DID_OPENDATA_STATE_CHK',
|
|
35
|
+
values_callable=lambda obj: [e.value for e in obj]), nullable=True,
|
|
36
|
+
server_default=OpenDataDIDState.DRAFT.value),
|
|
37
|
+
sa.Column('created_at', sa.DateTime(), nullable=True),
|
|
38
|
+
sa.Column('updated_at', sa.DateTime(), nullable=True),
|
|
39
|
+
sa.PrimaryKeyConstraint('scope', 'name', name='OPENDATA_DID_PK'),
|
|
40
|
+
sa.ForeignKeyConstraint(['scope', 'name'], ['dids.scope', 'dids.name'],
|
|
41
|
+
ondelete='CASCADE', name='OPENDATA_DID_FK')
|
|
42
|
+
)
|
|
43
|
+
op.create_index('OPENDATA_DID_UPDATED_AT_IDX', 'dids_opendata', ['updated_at'])
|
|
44
|
+
op.create_index('OPENDATA_DID_CREATED_AT_IDX', 'dids_opendata', ['created_at'])
|
|
45
|
+
op.create_index('OPENDATA_DID_STATE_IDX', 'dids_opendata', ['state'])
|
|
46
|
+
op.create_index('OPENDATA_DID_STATE_UPDATED_AT_IDX', 'dids_opendata', ['state', 'updated_at'])
|
|
47
|
+
|
|
48
|
+
op.create_table(
|
|
49
|
+
'dids_opendata_doi',
|
|
50
|
+
sa.Column('scope', sa.String(length=get_schema_value('SCOPE_LENGTH')), nullable=False),
|
|
51
|
+
sa.Column('name', sa.String(length=get_schema_value('NAME_LENGTH')), nullable=False),
|
|
52
|
+
sa.Column('doi', sa.String(length=255), nullable=False, unique=True),
|
|
53
|
+
sa.Column('created_at', sa.DateTime(), nullable=True),
|
|
54
|
+
sa.Column('updated_at', sa.DateTime(), nullable=True),
|
|
55
|
+
sa.PrimaryKeyConstraint('scope', 'name', name='OPENDATA_DOI_PK'),
|
|
56
|
+
sa.ForeignKeyConstraint(['scope', 'name'], ['dids_opendata.scope', 'dids_opendata.name'],
|
|
57
|
+
ondelete='CASCADE', name='OPENDATA_DOI_FK')
|
|
58
|
+
)
|
|
59
|
+
op.create_index('OPENDATA_DOI_UPDATED_AT_IDX', 'dids_opendata_doi', ['updated_at'])
|
|
60
|
+
op.create_index('OPENDATA_DOI_CREATED_AT_IDX', 'dids_opendata_doi', ['created_at'])
|
|
61
|
+
|
|
62
|
+
op.create_table(
|
|
63
|
+
'dids_opendata_meta',
|
|
64
|
+
sa.Column('scope', sa.String(length=get_schema_value('SCOPE_LENGTH')), nullable=False),
|
|
65
|
+
sa.Column('name', sa.String(length=get_schema_value('NAME_LENGTH')), nullable=False),
|
|
66
|
+
sa.Column('meta', JSON(), nullable=False),
|
|
67
|
+
sa.Column('created_at', sa.DateTime(), nullable=True),
|
|
68
|
+
sa.Column('updated_at', sa.DateTime(), nullable=True),
|
|
69
|
+
sa.PrimaryKeyConstraint('scope', 'name', name='OPENDATA_META_PK'),
|
|
70
|
+
sa.ForeignKeyConstraint(['scope', 'name'], ['dids_opendata.scope', 'dids_opendata.name'],
|
|
71
|
+
ondelete='CASCADE', name='OPENDATA_META_FK')
|
|
72
|
+
)
|
|
73
|
+
|
|
74
|
+
|
|
75
|
+
def downgrade():
|
|
76
|
+
op.drop_table('dids_opendata_meta')
|
|
77
|
+
op.drop_table('dids_opendata_doi')
|
|
78
|
+
op.drop_index('OPENDATA_DID_STATE_UPDATED_AT_IDX', table_name='dids_opendata')
|
|
79
|
+
op.drop_index('OPENDATA_DID_STATE_IDX', table_name='dids_opendata')
|
|
80
|
+
op.drop_index('OPENDATA_DID_CREATED_AT_IDX', table_name='dids_opendata')
|
|
81
|
+
op.drop_index('OPENDATA_DID_UPDATED_AT_IDX', table_name='dids_opendata')
|
|
82
|
+
op.drop_table('dids_opendata')
|
|
83
|
+
|
|
84
|
+
# Drop enum if created in this migration
|
|
85
|
+
sa.Enum(name='DID_OPENDATA_STATE_CHK').drop(op.get_bind(), checkfirst=True)
|
rucio/db/sqla/models.py
CHANGED
|
@@ -43,6 +43,7 @@ from rucio.db.sqla.constants import (
|
|
|
43
43
|
KeyType,
|
|
44
44
|
LifetimeExceptionsState,
|
|
45
45
|
LockState,
|
|
46
|
+
OpenDataDIDState,
|
|
46
47
|
ReplicaState,
|
|
47
48
|
RequestState,
|
|
48
49
|
RequestType,
|
|
@@ -468,6 +469,74 @@ class DataIdentifier(BASE, ModelBase):
|
|
|
468
469
|
Index('DIDS_EXPIRED_AT_IDX', 'expired_at'))
|
|
469
470
|
|
|
470
471
|
|
|
472
|
+
class OpenDataDid(BASE, ModelBase):
|
|
473
|
+
"""DIDs which are part of OpenData"""
|
|
474
|
+
__tablename__ = 'dids_opendata'
|
|
475
|
+
|
|
476
|
+
scope: Mapped[InternalScope] = mapped_column(InternalScopeString(common_schema.get_schema_value('SCOPE_LENGTH')))
|
|
477
|
+
name: Mapped[str] = mapped_column(String(common_schema.get_schema_value('NAME_LENGTH')))
|
|
478
|
+
state: Mapped[Optional[OpenDataDIDState]] = mapped_column(Enum(OpenDataDIDState, name='DID_OPENDATA_STATE_CHK',
|
|
479
|
+
create_constraint=True,
|
|
480
|
+
values_callable=lambda obj: [e.value for e in obj]),
|
|
481
|
+
default=OpenDataDIDState.DRAFT)
|
|
482
|
+
|
|
483
|
+
__table_args__ = (
|
|
484
|
+
PrimaryKeyConstraint('scope', 'name', name='OPENDATA_DID_PK'),
|
|
485
|
+
ForeignKeyConstraint(
|
|
486
|
+
['scope', 'name'],
|
|
487
|
+
['dids.scope', 'dids.name'],
|
|
488
|
+
name='OPENDATA_DID_FK',
|
|
489
|
+
ondelete='CASCADE',
|
|
490
|
+
),
|
|
491
|
+
Index('OPENDATA_DID_UPDATED_AT_IDX', 'updated_at'),
|
|
492
|
+
Index('OPENDATA_DID_CREATED_AT_IDX', 'created_at'),
|
|
493
|
+
Index('OPENDATA_DID_STATE_IDX', 'state'),
|
|
494
|
+
Index('OPENDATA_DID_STATE_UPDATED_AT_IDX', 'state', 'updated_at'),
|
|
495
|
+
)
|
|
496
|
+
|
|
497
|
+
|
|
498
|
+
class OpenDataDOI(BASE, ModelBase):
|
|
499
|
+
"""Mapping between OpenData DIDs and DOIs"""
|
|
500
|
+
__tablename__ = 'dids_opendata_doi'
|
|
501
|
+
|
|
502
|
+
scope: Mapped[InternalScope] = mapped_column(InternalScopeString(common_schema.get_schema_value('SCOPE_LENGTH')))
|
|
503
|
+
name: Mapped[str] = mapped_column(String(common_schema.get_schema_value('NAME_LENGTH')))
|
|
504
|
+
doi: Mapped[str] = mapped_column(String(255), unique=True)
|
|
505
|
+
|
|
506
|
+
__table_args__ = (
|
|
507
|
+
PrimaryKeyConstraint('scope', 'name', name='OPENDATA_DOI_PK'),
|
|
508
|
+
ForeignKeyConstraint(
|
|
509
|
+
['scope', 'name'],
|
|
510
|
+
['dids_opendata.scope', 'dids_opendata.name'],
|
|
511
|
+
name='OPENDATA_DOI_FK',
|
|
512
|
+
ondelete='CASCADE',
|
|
513
|
+
),
|
|
514
|
+
# Not working on all DB, we add the constraint on insert
|
|
515
|
+
# CheckConstraint("doi ~* '^10\\.[0-9]{4,9}/[-._;()/:A-Z0-9]+$'", name='OPENDATA_DOI_FORMAT_CHK'),
|
|
516
|
+
Index('OPENDATA_DOI_UPDATED_AT_IDX', 'updated_at'),
|
|
517
|
+
Index('OPENDATA_DOI_CREATED_AT_IDX', 'created_at'),
|
|
518
|
+
)
|
|
519
|
+
|
|
520
|
+
|
|
521
|
+
class OpenDataMeta(BASE, ModelBase):
|
|
522
|
+
"""Mapping between OpenData DIDs and DOIs"""
|
|
523
|
+
__tablename__ = 'dids_opendata_meta'
|
|
524
|
+
|
|
525
|
+
scope: Mapped[InternalScope] = mapped_column(InternalScopeString(common_schema.get_schema_value('SCOPE_LENGTH')))
|
|
526
|
+
name: Mapped[str] = mapped_column(String(common_schema.get_schema_value('NAME_LENGTH')))
|
|
527
|
+
meta = mapped_column(JSON(), nullable=False)
|
|
528
|
+
|
|
529
|
+
__table_args__ = (
|
|
530
|
+
PrimaryKeyConstraint('scope', 'name', name='OPENDATA_META_PK'),
|
|
531
|
+
ForeignKeyConstraint(
|
|
532
|
+
['scope', 'name'],
|
|
533
|
+
['dids_opendata.scope', 'dids_opendata.name'],
|
|
534
|
+
name='OPENDATA_META_FK',
|
|
535
|
+
ondelete='CASCADE',
|
|
536
|
+
),
|
|
537
|
+
)
|
|
538
|
+
|
|
539
|
+
|
|
471
540
|
class VirtualPlacements(BASE, ModelBase):
|
|
472
541
|
"""Represents virtual placements"""
|
|
473
542
|
__tablename__ = 'virtual_placements'
|
rucio/db/sqla/session.py
CHANGED
|
@@ -38,6 +38,8 @@ from rucio.db.sqla.constants import DatabaseOperationType
|
|
|
38
38
|
|
|
39
39
|
EXTRA_MODULES = import_extras(['MySQLdb', 'pymysql'])
|
|
40
40
|
|
|
41
|
+
LOG = logging.getLogger(__name__)
|
|
42
|
+
|
|
41
43
|
if TYPE_CHECKING:
|
|
42
44
|
from collections.abc import Callable, Iterator
|
|
43
45
|
from typing import Optional, ParamSpec, TypeVar
|
|
@@ -71,7 +73,6 @@ DEFAULT_SCHEMA_NAME = config_get(DATABASE_SECTION, 'schema',
|
|
|
71
73
|
_METADATA = MetaData(schema=DEFAULT_SCHEMA_NAME)
|
|
72
74
|
_MAKER, _ENGINE, _LOCK = None, None, Lock()
|
|
73
75
|
|
|
74
|
-
|
|
75
76
|
SQLA_CONFIG_POOLCLASS_MAPPING = {
|
|
76
77
|
'queuepool': QueuePool,
|
|
77
78
|
'singletonthreadpool': SingletonThreadPool,
|
|
@@ -218,6 +219,12 @@ def get_engine() -> 'Engine':
|
|
|
218
219
|
if 'mysql' in sql_connection:
|
|
219
220
|
conv = mysql_convert_decimal_to_float(pymysql=sql_connection.startswith('mysql+pymysql'))
|
|
220
221
|
params['connect_args'] = {'conv': conv}
|
|
222
|
+
elif 'oracle' in sql_connection:
|
|
223
|
+
try:
|
|
224
|
+
import oracledb # pylint: disable=import-error
|
|
225
|
+
oracledb.init_oracle_client()
|
|
226
|
+
except Exception as err:
|
|
227
|
+
LOG.warning('Could not start Oracle thick mode; falling back to thin: %s', err)
|
|
221
228
|
for param, param_type in config_params:
|
|
222
229
|
try:
|
|
223
230
|
params[param] = param_type(config_get(DATABASE_SECTION, param, check_config_table=False))
|
rucio/db/sqla/util.py
CHANGED
|
@@ -81,7 +81,7 @@ def dump_schema() -> None:
|
|
|
81
81
|
models.register_models(engine)
|
|
82
82
|
|
|
83
83
|
|
|
84
|
-
def
|
|
84
|
+
def drop_orm_tables() -> None:
|
|
85
85
|
""" Removes the schema from the database. Only useful for test cases or malicious intents. """
|
|
86
86
|
engine = get_engine()
|
|
87
87
|
|
|
@@ -91,7 +91,7 @@ def destroy_database() -> None:
|
|
|
91
91
|
print('Cannot destroy schema -- assuming already gone, continuing:', e)
|
|
92
92
|
|
|
93
93
|
|
|
94
|
-
def
|
|
94
|
+
def purge_db() -> None:
|
|
95
95
|
"""
|
|
96
96
|
Pre-gather all named constraints and table names, and drop everything.
|
|
97
97
|
This is better than using metadata.reflect(); metadata.drop_all()
|
rucio/gateway/dirac.py
CHANGED
|
@@ -0,0 +1,190 @@
|
|
|
1
|
+
# Copyright European Organization for Nuclear Research (CERN) since 2012
|
|
2
|
+
#
|
|
3
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
4
|
+
# you may not use this file except in compliance with the License.
|
|
5
|
+
# You may obtain a copy of the License at
|
|
6
|
+
#
|
|
7
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
8
|
+
#
|
|
9
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
10
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
11
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
12
|
+
# See the License for the specific language governing permissions and
|
|
13
|
+
# limitations under the License.
|
|
14
|
+
|
|
15
|
+
import json
|
|
16
|
+
from typing import TYPE_CHECKING, Any, Optional
|
|
17
|
+
|
|
18
|
+
from rucio.common.constants import DEFAULT_VO
|
|
19
|
+
from rucio.common.types import InternalScope
|
|
20
|
+
from rucio.common.utils import gateway_update_return_dict
|
|
21
|
+
from rucio.core import opendata
|
|
22
|
+
from rucio.core.opendata import opendata_state_str_to_enum, validate_opendata_did_state
|
|
23
|
+
from rucio.db.sqla.constants import DatabaseOperationType
|
|
24
|
+
from rucio.db.sqla.session import db_session
|
|
25
|
+
|
|
26
|
+
if TYPE_CHECKING:
|
|
27
|
+
from rucio.common.constants import OPENDATA_DID_STATE_LITERAL
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
def list_opendata_dids(
|
|
31
|
+
*,
|
|
32
|
+
limit: Optional[int] = None,
|
|
33
|
+
offset: Optional[int] = None,
|
|
34
|
+
state: Optional["OPENDATA_DID_STATE_LITERAL"] = None,
|
|
35
|
+
) -> dict[str, list[dict[str, Any]]]:
|
|
36
|
+
"""
|
|
37
|
+
List Opendata DIDs from the Opendata catalog.
|
|
38
|
+
|
|
39
|
+
Parameters:
|
|
40
|
+
limit: Maximum number of DIDs to return.
|
|
41
|
+
offset: Number of DIDs to skip before starting to collect the result set.
|
|
42
|
+
state: Filter DIDs by their state.
|
|
43
|
+
|
|
44
|
+
Returns:
|
|
45
|
+
A dictionary with a list of DIDs matching the criteria.
|
|
46
|
+
"""
|
|
47
|
+
|
|
48
|
+
state_enum = None
|
|
49
|
+
if state is not None:
|
|
50
|
+
state = validate_opendata_did_state(state)
|
|
51
|
+
state_enum = opendata_state_str_to_enum(state)
|
|
52
|
+
with db_session(DatabaseOperationType.READ) as session:
|
|
53
|
+
result = opendata.list_opendata_dids(limit=limit, offset=offset, state=state_enum, session=session)
|
|
54
|
+
return result
|
|
55
|
+
|
|
56
|
+
|
|
57
|
+
def get_opendata_did(
|
|
58
|
+
*,
|
|
59
|
+
scope: str,
|
|
60
|
+
name: str,
|
|
61
|
+
state: Optional["OPENDATA_DID_STATE_LITERAL"] = None,
|
|
62
|
+
include_files: bool = True,
|
|
63
|
+
include_metadata: bool = False,
|
|
64
|
+
include_doi: bool = True,
|
|
65
|
+
vo: str = DEFAULT_VO,
|
|
66
|
+
) -> dict[str, Any]:
|
|
67
|
+
"""
|
|
68
|
+
Retrieve a specific Opendata DID from the Opendata catalog.
|
|
69
|
+
|
|
70
|
+
Parameters:
|
|
71
|
+
scope: The scope of the DID.
|
|
72
|
+
name: The name of the DID.
|
|
73
|
+
state: Optional state to filter the DID.
|
|
74
|
+
include_files: Whether to include files in the result.
|
|
75
|
+
include_metadata: Whether to include metadata in the result.
|
|
76
|
+
include_doi: Whether to include DOI information in the result.
|
|
77
|
+
vo: The virtual organization.
|
|
78
|
+
|
|
79
|
+
Returns:
|
|
80
|
+
A dictionary containing the details of the requested DID.
|
|
81
|
+
"""
|
|
82
|
+
|
|
83
|
+
internal_scope = InternalScope(scope, vo=vo)
|
|
84
|
+
state_enum = None
|
|
85
|
+
if state is not None:
|
|
86
|
+
state = validate_opendata_did_state(state)
|
|
87
|
+
state_enum = opendata_state_str_to_enum(state)
|
|
88
|
+
|
|
89
|
+
with db_session(DatabaseOperationType.READ) as session:
|
|
90
|
+
result = opendata.get_opendata_did(scope=internal_scope,
|
|
91
|
+
name=name,
|
|
92
|
+
state=state_enum,
|
|
93
|
+
include_files=include_files,
|
|
94
|
+
include_metadata=include_metadata,
|
|
95
|
+
include_doi=include_doi,
|
|
96
|
+
session=session)
|
|
97
|
+
return gateway_update_return_dict(result, session=session)
|
|
98
|
+
|
|
99
|
+
|
|
100
|
+
def add_opendata_did(
|
|
101
|
+
*,
|
|
102
|
+
scope: str,
|
|
103
|
+
name: str,
|
|
104
|
+
vo: str = DEFAULT_VO,
|
|
105
|
+
) -> None:
|
|
106
|
+
"""
|
|
107
|
+
Add a new Opendata DID to the Opendata catalog.
|
|
108
|
+
|
|
109
|
+
Parameters:
|
|
110
|
+
scope: The scope of the DID.
|
|
111
|
+
name: The name of the DID.
|
|
112
|
+
vo: The virtual organization.
|
|
113
|
+
|
|
114
|
+
Returns:
|
|
115
|
+
None
|
|
116
|
+
"""
|
|
117
|
+
|
|
118
|
+
internal_scope = InternalScope(scope, vo=vo)
|
|
119
|
+
with db_session(DatabaseOperationType.WRITE) as session:
|
|
120
|
+
return opendata.add_opendata_did(scope=internal_scope, name=name, session=session)
|
|
121
|
+
|
|
122
|
+
|
|
123
|
+
def delete_opendata_did(
|
|
124
|
+
*,
|
|
125
|
+
scope: str,
|
|
126
|
+
name: str,
|
|
127
|
+
vo: str = DEFAULT_VO,
|
|
128
|
+
) -> None:
|
|
129
|
+
"""
|
|
130
|
+
Delete an Opendata DID from the Opendata catalog.
|
|
131
|
+
|
|
132
|
+
Parameters:
|
|
133
|
+
scope: The scope of the DID.
|
|
134
|
+
name: The name of the DID.
|
|
135
|
+
vo: The virtual organization.
|
|
136
|
+
|
|
137
|
+
Returns:
|
|
138
|
+
None
|
|
139
|
+
"""
|
|
140
|
+
|
|
141
|
+
internal_scope = InternalScope(scope, vo=vo)
|
|
142
|
+
with db_session(DatabaseOperationType.WRITE) as session:
|
|
143
|
+
return opendata.delete_opendata_did(scope=internal_scope, name=name, session=session)
|
|
144
|
+
|
|
145
|
+
|
|
146
|
+
def update_opendata_did(
|
|
147
|
+
*,
|
|
148
|
+
scope: str,
|
|
149
|
+
name: str,
|
|
150
|
+
state: Optional["OPENDATA_DID_STATE_LITERAL"] = None,
|
|
151
|
+
meta: Optional[dict] = None,
|
|
152
|
+
doi: Optional[str] = None,
|
|
153
|
+
vo: str = DEFAULT_VO,
|
|
154
|
+
) -> None:
|
|
155
|
+
"""
|
|
156
|
+
Update an existing Opendata DID in the Opendata catalog.
|
|
157
|
+
|
|
158
|
+
Parameters:
|
|
159
|
+
scope: The scope of the DID.
|
|
160
|
+
name: The name of the DID.
|
|
161
|
+
state: Optional new state for the DID.
|
|
162
|
+
meta: Optional metadata dictionary or JSON string.
|
|
163
|
+
doi: Optional DOI string.
|
|
164
|
+
vo: The virtual organization.
|
|
165
|
+
|
|
166
|
+
Returns:
|
|
167
|
+
None
|
|
168
|
+
|
|
169
|
+
Raises:
|
|
170
|
+
ValueError: If meta is a string and cannot be parsed as valid JSON.
|
|
171
|
+
"""
|
|
172
|
+
|
|
173
|
+
internal_scope = InternalScope(scope, vo=vo)
|
|
174
|
+
state_enum = None
|
|
175
|
+
if state is not None:
|
|
176
|
+
state = validate_opendata_did_state(state)
|
|
177
|
+
state_enum = opendata_state_str_to_enum(state)
|
|
178
|
+
if isinstance(meta, str):
|
|
179
|
+
try:
|
|
180
|
+
meta = json.loads(meta)
|
|
181
|
+
except ValueError as error:
|
|
182
|
+
raise ValueError(f"Invalid JSON: {error}")
|
|
183
|
+
|
|
184
|
+
with db_session(DatabaseOperationType.WRITE) as session:
|
|
185
|
+
return opendata.update_opendata_did(scope=internal_scope,
|
|
186
|
+
name=name,
|
|
187
|
+
state=state_enum,
|
|
188
|
+
meta=meta,
|
|
189
|
+
doi=doi,
|
|
190
|
+
session=session)
|